diff options
Diffstat (limited to 'drivers')
68 files changed, 874 insertions, 505 deletions
diff --git a/drivers/char/agp/Kconfig b/drivers/char/agp/Kconfig index fcd867d923ba..d8b1b576556c 100644 --- a/drivers/char/agp/Kconfig +++ b/drivers/char/agp/Kconfig @@ -50,7 +50,7 @@ config AGP_ATI config AGP_AMD tristate "AMD Irongate, 761, and 762 chipset support" - depends on AGP && (X86_32 || ALPHA) + depends on AGP && X86_32 help This option gives you AGP support for the GLX component of X on AMD Irongate, 761, and 762 chipsets. diff --git a/drivers/char/agp/amd-k7-agp.c b/drivers/char/agp/amd-k7-agp.c index b1b4362bc648..45681c0ff3b6 100644 --- a/drivers/char/agp/amd-k7-agp.c +++ b/drivers/char/agp/amd-k7-agp.c @@ -41,22 +41,8 @@ static int amd_create_page_map(struct amd_page_map *page_map) if (page_map->real == NULL) return -ENOMEM; -#ifndef CONFIG_X86 - SetPageReserved(virt_to_page(page_map->real)); - global_cache_flush(); - page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real), - PAGE_SIZE); - if (page_map->remapped == NULL) { - ClearPageReserved(virt_to_page(page_map->real)); - free_page((unsigned long) page_map->real); - page_map->real = NULL; - return -ENOMEM; - } - global_cache_flush(); -#else set_memory_uc((unsigned long)page_map->real, 1); page_map->remapped = page_map->real; -#endif for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) { writel(agp_bridge->scratch_page, page_map->remapped+i); @@ -68,12 +54,7 @@ static int amd_create_page_map(struct amd_page_map *page_map) static void amd_free_page_map(struct amd_page_map *page_map) { -#ifndef CONFIG_X86 - iounmap(page_map->remapped); - ClearPageReserved(virt_to_page(page_map->real)); -#else set_memory_wb((unsigned long)page_map->real, 1); -#endif free_page((unsigned long) page_map->real); } diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c index 857df10c0428..b0a0dccc98c1 100644 --- a/drivers/char/agp/intel-agp.c +++ b/drivers/char/agp/intel-agp.c @@ -774,20 +774,14 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev, dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name); /* - * If the device has not been properly setup, the following will catch - * the problem and should stop the system from crashing. - * 20030610 - hamish@zot.org - */ - if (pci_enable_device(pdev)) { - dev_err(&pdev->dev, "can't enable PCI device\n"); - agp_put_bridge(bridge); - return -ENODEV; - } - - /* * The following fixes the case where the BIOS has "forgotten" to * provide an address range for the GART. * 20030610 - hamish@zot.org + * This happens before pci_enable_device() intentionally; + * calling pci_enable_device() before assigning the resource + * will result in the GART being disabled on machines with such + * BIOSs (the GART ends up with a BAR starting at 0, which + * conflicts a lot of other devices). */ r = &pdev->resource[0]; if (!r->start && r->end) { @@ -798,6 +792,17 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev, } } + /* + * If the device has not been properly setup, the following will catch + * the problem and should stop the system from crashing. + * 20030610 - hamish@zot.org + */ + if (pci_enable_device(pdev)) { + dev_err(&pdev->dev, "can't enable PCI device\n"); + agp_put_bridge(bridge); + return -ENODEV; + } + /* Fill in the mode register */ if (cap_ptr) { pci_read_config_dword(pdev, diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 2baa6708e44c..654faa803dcb 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -2674,3 +2674,23 @@ out: mutex_unlock(&dev->mode_config.mutex); return ret; } + +void drm_mode_config_reset(struct drm_device *dev) +{ + struct drm_crtc *crtc; + struct drm_encoder *encoder; + struct drm_connector *connector; + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) + if (crtc->funcs->reset) + crtc->funcs->reset(crtc); + + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) + if (encoder->funcs->reset) + encoder->funcs->reset(encoder); + + list_for_each_entry(connector, &dev->mode_config.connector_list, head) + if (connector->funcs->reset) + connector->funcs->reset(connector); +} +EXPORT_SYMBOL(drm_mode_config_reset); diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index 952b3d4fb2a6..92369655dca3 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c @@ -343,13 +343,12 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, struct drm_encoder *encoder; bool ret = true; - adjusted_mode = drm_mode_duplicate(dev, mode); - crtc->enabled = drm_helper_crtc_in_use(crtc); - if (!crtc->enabled) return true; + adjusted_mode = drm_mode_duplicate(dev, mode); + saved_hwmode = crtc->hwmode; saved_mode = crtc->mode; saved_x = crtc->x; @@ -437,10 +436,9 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, */ drm_calc_timestamping_constants(crtc); - /* XXX free adjustedmode */ - drm_mode_destroy(dev, adjusted_mode); /* FIXME: add subpixel order */ done: + drm_mode_destroy(dev, adjusted_mode); if (!ret) { crtc->hwmode = saved_hwmode; crtc->mode = saved_mode; @@ -497,14 +495,17 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) crtc_funcs = set->crtc->helper_private; + if (!set->mode) + set->fb = NULL; + if (set->fb) { DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n", set->crtc->base.id, set->fb->base.id, (int)set->num_connectors, set->x, set->y); } else { - DRM_DEBUG_KMS("[CRTC:%d] [NOFB] #connectors=%d (x y) (%i %i)\n", - set->crtc->base.id, (int)set->num_connectors, - set->x, set->y); + DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id); + set->mode = NULL; + set->num_connectors = 0; } dev = set->crtc->dev; @@ -649,8 +650,8 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) mode_changed = true; if (mode_changed) { - set->crtc->enabled = (set->mode != NULL); - if (set->mode != NULL) { + set->crtc->enabled = drm_helper_crtc_in_use(set->crtc); + if (set->crtc->enabled) { DRM_DEBUG_KMS("attempting to set mode from" " userspace\n"); drm_mode_debug_printmodeline(set->mode); @@ -665,6 +666,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) ret = -EINVAL; goto fail; } + DRM_DEBUG_KMS("Setting connector DPMS state to on\n"); + for (i = 0; i < set->num_connectors; i++) { + DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id, + drm_get_connector_name(set->connectors[i])); + set->connectors[i]->dpms = DRM_MODE_DPMS_ON; + } } drm_helper_disable_unused_functions(dev); } else if (fb_changed) { @@ -681,12 +688,6 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) goto fail; } } - DRM_DEBUG_KMS("Setting connector DPMS state to on\n"); - for (i = 0; i < set->num_connectors; i++) { - DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id, - drm_get_connector_name(set->connectors[i])); - set->connectors[i]->dpms = DRM_MODE_DPMS_ON; - } kfree(save_connectors); kfree(save_encoders); diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index 0054e957203f..3dadfa2a8528 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -1250,7 +1250,7 @@ void drm_handle_vblank_events(struct drm_device *dev, int crtc) * Drivers should call this routine in their vblank interrupt handlers to * update the vblank counter and send any signals that may be pending. */ -void drm_handle_vblank(struct drm_device *dev, int crtc) +bool drm_handle_vblank(struct drm_device *dev, int crtc) { u32 vblcount; s64 diff_ns; @@ -1258,7 +1258,7 @@ void drm_handle_vblank(struct drm_device *dev, int crtc) unsigned long irqflags; if (!dev->num_crtcs) - return; + return false; /* Need timestamp lock to prevent concurrent execution with * vblank enable/disable, as this would cause inconsistent @@ -1269,7 +1269,7 @@ void drm_handle_vblank(struct drm_device *dev, int crtc) /* Vblank irq handling disabled. Nothing to do. */ if (!dev->vblank_enabled[crtc]) { spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags); - return; + return false; } /* Fetch corresponding timestamp for this vblank interval from @@ -1311,5 +1311,6 @@ void drm_handle_vblank(struct drm_device *dev, int crtc) drm_handle_vblank_events(dev, crtc); spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags); + return true; } EXPORT_SYMBOL(drm_handle_vblank); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 66796bb82d3e..cfb56d0ff367 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -354,6 +354,7 @@ static int i915_drm_thaw(struct drm_device *dev) error = i915_gem_init_ringbuffer(dev); mutex_unlock(&dev->struct_mutex); + drm_mode_config_reset(dev); drm_irq_install(dev); /* Resume the modeset for every activated CRTC */ @@ -542,6 +543,7 @@ int i915_reset(struct drm_device *dev, u8 flags) mutex_unlock(&dev->struct_mutex); drm_irq_uninstall(dev); + drm_mode_config_reset(dev); drm_irq_install(dev); mutex_lock(&dev->struct_mutex); } @@ -566,6 +568,14 @@ int i915_reset(struct drm_device *dev, u8 flags) static int __devinit i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { + /* Only bind to function 0 of the device. Early generations + * used function 1 as a placeholder for multi-head. This causes + * us confusion instead, especially on the systems where both + * functions have the same PCI-ID! + */ + if (PCI_FUNC(pdev->devfn)) + return -ENODEV; + return drm_get_pci_dev(pdev, ent, &driver); } diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 062f353497e6..97f946dcc1aa 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1196,18 +1196,18 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) intel_finish_page_flip_plane(dev, 1); } - if (pipea_stats & vblank_status) { + if (pipea_stats & vblank_status && + drm_handle_vblank(dev, 0)) { vblank++; - drm_handle_vblank(dev, 0); if (!dev_priv->flip_pending_is_done) { i915_pageflip_stall_check(dev, 0); intel_finish_page_flip(dev, 0); } } - if (pipeb_stats & vblank_status) { + if (pipeb_stats & vblank_status && + drm_handle_vblank(dev, 1)) { vblank++; - drm_handle_vblank(dev, 1); if (!dev_priv->flip_pending_is_done) { i915_pageflip_stall_check(dev, 1); intel_finish_page_flip(dev, 1); diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 17035b87ee46..8a77ff4a7237 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -535,6 +535,15 @@ static int intel_crt_set_property(struct drm_connector *connector, return 0; } +static void intel_crt_reset(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + struct intel_crt *crt = intel_attached_crt(connector); + + if (HAS_PCH_SPLIT(dev)) + crt->force_hotplug_required = 1; +} + /* * Routines for controlling stuff on the analog port */ @@ -548,6 +557,7 @@ static const struct drm_encoder_helper_funcs intel_crt_helper_funcs = { }; static const struct drm_connector_funcs intel_crt_connector_funcs = { + .reset = intel_crt_reset, .dpms = drm_helper_connector_dpms, .detect = intel_crt_detect, .fill_modes = drm_helper_probe_single_connector_modes, diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index d7f237deaaf0..7e42aa586504 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -5551,6 +5551,18 @@ cleanup_work: return ret; } +static void intel_crtc_reset(struct drm_crtc *crtc) +{ + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + + /* Reset flags back to the 'unknown' status so that they + * will be correctly set on the initial modeset. + */ + intel_crtc->cursor_addr = 0; + intel_crtc->dpms_mode = -1; + intel_crtc->active = true; /* force the pipe off on setup_init_config */ +} + static struct drm_crtc_helper_funcs intel_helper_funcs = { .dpms = intel_crtc_dpms, .mode_fixup = intel_crtc_mode_fixup, @@ -5562,6 +5574,7 @@ static struct drm_crtc_helper_funcs intel_helper_funcs = { }; static const struct drm_crtc_funcs intel_crtc_funcs = { + .reset = intel_crtc_reset, .cursor_set = intel_crtc_cursor_set, .cursor_move = intel_crtc_cursor_move, .gamma_set = intel_crtc_gamma_set, @@ -5652,9 +5665,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base; dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base; - intel_crtc->cursor_addr = 0; - intel_crtc->dpms_mode = -1; - intel_crtc->active = true; /* force the pipe off on setup_init_config */ + intel_crtc_reset(&intel_crtc->base); if (HAS_PCH_SPLIT(dev)) { intel_helper_funcs.prepare = ironlake_crtc_prepare; diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 45cd37652a37..6a09c1413d60 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -473,20 +473,6 @@ static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd, return false; } - i = 3; - while (status == SDVO_CMD_STATUS_PENDING && i--) { - if (!intel_sdvo_read_byte(intel_sdvo, - SDVO_I2C_CMD_STATUS, - &status)) - return false; - } - if (status != SDVO_CMD_STATUS_SUCCESS) { - DRM_DEBUG_KMS("command returns response %s [%d]\n", - status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP ? cmd_status_names[status] : "???", - status); - return false; - } - return true; } @@ -497,6 +483,8 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo, u8 status; int i; + DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(intel_sdvo)); + /* * The documentation states that all commands will be * processed within 15µs, and that we need only poll @@ -505,14 +493,19 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo, * * Check 5 times in case the hardware failed to read the docs. */ - do { + if (!intel_sdvo_read_byte(intel_sdvo, + SDVO_I2C_CMD_STATUS, + &status)) + goto log_fail; + + while (status == SDVO_CMD_STATUS_PENDING && retry--) { + udelay(15); if (!intel_sdvo_read_byte(intel_sdvo, SDVO_I2C_CMD_STATUS, &status)) - return false; - } while (status == SDVO_CMD_STATUS_PENDING && --retry); + goto log_fail; + } - DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(intel_sdvo)); if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP) DRM_LOG_KMS("(%s)", cmd_status_names[status]); else @@ -533,7 +526,7 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo, return true; log_fail: - DRM_LOG_KMS("\n"); + DRM_LOG_KMS("... failed\n"); return false; } @@ -550,6 +543,7 @@ static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode) static bool intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo, u8 ddc_bus) { + /* This must be the immediately preceding write before the i2c xfer */ return intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_CONTROL_BUS_SWITCH, &ddc_bus, 1); @@ -557,7 +551,10 @@ static bool intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo, static bool intel_sdvo_set_value(struct intel_sdvo *intel_sdvo, u8 cmd, const void *data, int len) { - return intel_sdvo_write_cmd(intel_sdvo, cmd, data, len); + if (!intel_sdvo_write_cmd(intel_sdvo, cmd, data, len)) + return false; + + return intel_sdvo_read_response(intel_sdvo, NULL, 0); } static bool @@ -859,18 +856,21 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo) intel_dip_infoframe_csum(&avi_if); - if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_INDEX, + if (!intel_sdvo_set_value(intel_sdvo, + SDVO_CMD_SET_HBUF_INDEX, set_buf_index, 2)) return false; for (i = 0; i < sizeof(avi_if); i += 8) { - if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_DATA, + if (!intel_sdvo_set_value(intel_sdvo, + SDVO_CMD_SET_HBUF_DATA, data, 8)) return false; data++; } - return intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_TXRATE, + return intel_sdvo_set_value(intel_sdvo, + SDVO_CMD_SET_HBUF_TXRATE, &tx_rate, 1); } diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c index fb846a3fef15..f05c0cddfeca 100644 --- a/drivers/gpu/drm/nouveau/nouveau_pm.c +++ b/drivers/gpu/drm/nouveau/nouveau_pm.c @@ -443,7 +443,7 @@ nouveau_hwmon_fini(struct drm_device *dev) struct nouveau_pm_engine *pm = &dev_priv->engine.pm; if (pm->hwmon) { - sysfs_remove_group(&pm->hwmon->kobj, &hwmon_attrgroup); + sysfs_remove_group(&dev->pdev->dev.kobj, &hwmon_attrgroup); hwmon_device_unregister(pm->hwmon); } #endif diff --git a/drivers/gpu/drm/nouveau/nv50_evo.c b/drivers/gpu/drm/nouveau/nv50_evo.c index 14e24e906ee8..0ea090f4244a 100644 --- a/drivers/gpu/drm/nouveau/nv50_evo.c +++ b/drivers/gpu/drm/nouveau/nv50_evo.c @@ -283,8 +283,7 @@ nv50_evo_create(struct drm_device *dev) nv50_evo_channel_del(&dev_priv->evo); return ret; } - } else - if (dev_priv->chipset != 0x50) { + } else { ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoFB16, 0x70, 0x19, 0, 0xffffffff, 0x00010000); if (ret) { diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index 842954fe74c5..b1537000a104 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -555,6 +555,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, dp_clock = dig_connector->dp_clock; } } +/* this might work properly with the new pll algo */ #if 0 /* doesn't work properly on some laptops */ /* use recommended ref_div for ss */ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { @@ -572,6 +573,11 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, adjusted_clock = mode->clock * 2; if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER; + /* rv515 needs more testing with this option */ + if (rdev->family != CHIP_RV515) { + if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) + pll->flags |= RADEON_PLL_IS_LCD; + } } else { if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) pll->flags |= RADEON_PLL_NO_ODD_POST_DIV; @@ -951,8 +957,16 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode /* adjust pixel clock as needed */ adjusted_clock = atombios_adjust_pll(crtc, mode, pll, ss_enabled, &ss); - radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, - &ref_div, &post_div); + /* rv515 seems happier with the old algo */ + if (rdev->family == CHIP_RV515) + radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, + &ref_div, &post_div); + else if (ASIC_IS_AVIVO(rdev)) + radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, + &ref_div, &post_div); + else + radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, + &ref_div, &post_div); atombios_crtc_program_ss(crtc, ATOM_DISABLE, radeon_crtc->pll_id, &ss); diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 677af91b555c..ffdc8332b76e 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -97,26 +97,29 @@ u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) } /* get temperature in millidegrees */ -u32 evergreen_get_temp(struct radeon_device *rdev) +int evergreen_get_temp(struct radeon_device *rdev) { u32 temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >> ASIC_T_SHIFT; u32 actual_temp = 0; - if ((temp >> 10) & 1) - actual_temp = 0; - else if ((temp >> 9) & 1) + if (temp & 0x400) + actual_temp = -256; + else if (temp & 0x200) actual_temp = 255; - else - actual_temp = (temp >> 1) & 0xff; + else if (temp & 0x100) { + actual_temp = temp & 0x1ff; + actual_temp |= ~0x1ff; + } else + actual_temp = temp & 0xff; - return actual_temp * 1000; + return (actual_temp * 1000) / 2; } -u32 sumo_get_temp(struct radeon_device *rdev) +int sumo_get_temp(struct radeon_device *rdev) { u32 temp = RREG32(CG_THERMAL_STATUS) & 0xff; - u32 actual_temp = (temp >> 1) & 0xff; + int actual_temp = temp - 49; return actual_temp * 1000; } @@ -1182,6 +1185,18 @@ static void evergreen_mc_program(struct radeon_device *rdev) /* * CP. */ +void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) +{ + /* set to DX10/11 mode */ + radeon_ring_write(rdev, PACKET3(PACKET3_MODE_CONTROL, 0)); + radeon_ring_write(rdev, 1); + /* FIXME: implement */ + radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); + radeon_ring_write(rdev, ib->gpu_addr & 0xFFFFFFFC); + radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF); + radeon_ring_write(rdev, ib->length_dw); +} + static int evergreen_cp_load_microcode(struct radeon_device *rdev) { @@ -1233,7 +1248,7 @@ static int evergreen_cp_start(struct radeon_device *rdev) cp_me = 0xff; WREG32(CP_ME_CNTL, cp_me); - r = radeon_ring_lock(rdev, evergreen_default_size + 15); + r = radeon_ring_lock(rdev, evergreen_default_size + 19); if (r) { DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); return r; @@ -1266,6 +1281,11 @@ static int evergreen_cp_start(struct radeon_device *rdev) radeon_ring_write(rdev, 0xffffffff); radeon_ring_write(rdev, 0xffffffff); + radeon_ring_write(rdev, 0xc0026900); + radeon_ring_write(rdev, 0x00000316); + radeon_ring_write(rdev, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ + radeon_ring_write(rdev, 0x00000010); /* */ + radeon_ring_unlock_commit(rdev); return 0; @@ -2072,6 +2092,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev) WREG32(VGT_CACHE_INVALIDATION, vgt_cache_invalidation); WREG32(VGT_GS_VERTEX_REUSE, 16); + WREG32(PA_SU_LINE_STIPPLE_VALUE, 0); WREG32(PA_SC_LINE_STIPPLE_STATE, 0); WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, 14); diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c index d4d4db49a8b8..a1ba4b3053d0 100644 --- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c +++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c @@ -232,7 +232,7 @@ draw_auto(struct radeon_device *rdev) } -/* emits 34 */ +/* emits 36 */ static void set_default_state(struct radeon_device *rdev) { @@ -499,6 +499,10 @@ set_default_state(struct radeon_device *rdev) radeon_ring_write(rdev, 0x00000000); radeon_ring_write(rdev, 0x00000000); + /* set to DX10/11 mode */ + radeon_ring_write(rdev, PACKET3(PACKET3_MODE_CONTROL, 0)); + radeon_ring_write(rdev, 1); + /* emit an IB pointing at default state */ dwords = ALIGN(rdev->r600_blit.state_len, 0x10); gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset; @@ -679,7 +683,7 @@ int evergreen_blit_prepare_copy(struct radeon_device *rdev, int size_bytes) /* calculate number of loops correctly */ ring_size = num_loops * dwords_per_loop; /* set default + shaders */ - ring_size += 50; /* shaders + def state */ + ring_size += 52; /* shaders + def state */ ring_size += 10; /* fence emit for VB IB */ ring_size += 5; /* done copy */ ring_size += 10; /* fence emit for done copy */ @@ -687,7 +691,7 @@ int evergreen_blit_prepare_copy(struct radeon_device *rdev, int size_bytes) if (r) return r; - set_default_state(rdev); /* 34 */ + set_default_state(rdev); /* 36 */ set_shaders(rdev); /* 16 */ return 0; } diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index 36d32d83d866..afec1aca2a73 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h @@ -240,6 +240,7 @@ #define FORCE_EOV_MAX_CLK_CNT(x) ((x) << 0) #define FORCE_EOV_MAX_REZ_CNT(x) ((x) << 16) #define PA_SC_LINE_STIPPLE 0x28A0C +#define PA_SU_LINE_STIPPLE_VALUE 0x8A60 #define PA_SC_LINE_STIPPLE_STATE 0x8B10 #define SCRATCH_REG0 0x8500 @@ -652,6 +653,7 @@ #define PACKET3_DISPATCH_DIRECT 0x15 #define PACKET3_DISPATCH_INDIRECT 0x16 #define PACKET3_INDIRECT_BUFFER_END 0x17 +#define PACKET3_MODE_CONTROL 0x18 #define PACKET3_SET_PREDICATION 0x20 #define PACKET3_REG_RMW 0x21 #define PACKET3_COND_EXEC 0x22 diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 1e10e3e2ba2a..650672a0f5ad 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -97,12 +97,16 @@ void r600_irq_disable(struct radeon_device *rdev); static void r600_pcie_gen2_enable(struct radeon_device *rdev); /* get temperature in millidegrees */ -u32 rv6xx_get_temp(struct radeon_device *rdev) +int rv6xx_get_temp(struct radeon_device *rdev) { u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >> ASIC_T_SHIFT; + int actual_temp = temp & 0xff; - return temp * 1000; + if (temp & 0x100) + actual_temp -= 256; + + return actual_temp * 1000; } void r600_pm_get_dynpm_state(struct radeon_device *rdev) diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 71d2a554bbe6..56c48b67ef3d 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -179,10 +179,10 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev); void radeon_atombios_get_power_modes(struct radeon_device *rdev); void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level); void rs690_pm_info(struct radeon_device *rdev); -extern u32 rv6xx_get_temp(struct radeon_device *rdev); -extern u32 rv770_get_temp(struct radeon_device *rdev); -extern u32 evergreen_get_temp(struct radeon_device *rdev); -extern u32 sumo_get_temp(struct radeon_device *rdev); +extern int rv6xx_get_temp(struct radeon_device *rdev); +extern int rv770_get_temp(struct radeon_device *rdev); +extern int evergreen_get_temp(struct radeon_device *rdev); +extern int sumo_get_temp(struct radeon_device *rdev); /* * Fences. @@ -812,8 +812,7 @@ struct radeon_pm { fixed20_12 sclk; fixed20_12 mclk; fixed20_12 needed_bandwidth; - /* XXX: use a define for num power modes */ - struct radeon_power_state power_state[8]; + struct radeon_power_state *power_state; /* number of valid power states */ int num_power_states; int current_power_state_index; diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index 3a1b16186224..e75d63b8e21d 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -759,7 +759,7 @@ static struct radeon_asic evergreen_asic = { .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush, .gart_set_page = &rs600_gart_set_page, .ring_test = &r600_ring_test, - .ring_ib_execute = &r600_ring_ib_execute, + .ring_ib_execute = &evergreen_ring_ib_execute, .irq_set = &evergreen_irq_set, .irq_process = &evergreen_irq_process, .get_vblank_counter = &evergreen_get_vblank_counter, @@ -805,7 +805,7 @@ static struct radeon_asic sumo_asic = { .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush, .gart_set_page = &rs600_gart_set_page, .ring_test = &r600_ring_test, - .ring_ib_execute = &r600_ring_ib_execute, + .ring_ib_execute = &evergreen_ring_ib_execute, .irq_set = &evergreen_irq_set, .irq_process = &evergreen_irq_process, .get_vblank_counter = &evergreen_get_vblank_counter, @@ -848,7 +848,7 @@ static struct radeon_asic btc_asic = { .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush, .gart_set_page = &rs600_gart_set_page, .ring_test = &r600_ring_test, - .ring_ib_execute = &r600_ring_ib_execute, + .ring_ib_execute = &evergreen_ring_ib_execute, .irq_set = &evergreen_irq_set, .irq_process = &evergreen_irq_process, .get_vblank_counter = &evergreen_get_vblank_counter, diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index e01f07718539..c59bd98a2029 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -355,6 +355,7 @@ int evergreen_resume(struct radeon_device *rdev); bool evergreen_gpu_is_lockup(struct radeon_device *rdev); int evergreen_asic_reset(struct radeon_device *rdev); void evergreen_bandwidth_update(struct radeon_device *rdev); +void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); int evergreen_copy_blit(struct radeon_device *rdev, uint64_t src_offset, uint64_t dst_offset, unsigned num_pages, struct radeon_fence *fence); diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 52777902bbcc..5c1cc7ad9a15 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -1163,16 +1163,6 @@ bool radeon_atom_get_clock_info(struct drm_device *dev) p1pll->pll_out_min = 64800; else p1pll->pll_out_min = 20000; - } else if (p1pll->pll_out_min > 64800) { - /* Limiting the pll output range is a good thing generally as - * it limits the number of possible pll combinations for a given - * frequency presumably to the ones that work best on each card. - * However, certain duallink DVI monitors seem to like - * pll combinations that would be limited by this at least on - * pre-DCE 3.0 r6xx hardware. This might need to be adjusted per - * family. - */ - p1pll->pll_out_min = 64800; } p1pll->pll_in_min = @@ -1987,6 +1977,9 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev) num_modes = power_info->info.ucNumOfPowerModeEntries; if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK) num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK; + rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * num_modes, GFP_KERNEL); + if (!rdev->pm.power_state) + return state_index; /* last mode is usually default, array is low to high */ for (i = 0; i < num_modes; i++) { rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; @@ -2338,6 +2331,10 @@ static int radeon_atombios_parse_power_table_4_5(struct radeon_device *rdev) power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController); + rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * + power_info->pplib.ucNumStates, GFP_KERNEL); + if (!rdev->pm.power_state) + return state_index; /* first mode is usually default, followed by low to high */ for (i = 0; i < power_info->pplib.ucNumStates; i++) { mode_index = 0; @@ -2418,6 +2415,10 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev) non_clock_info_array = (struct NonClockInfoArray *) (mode_info->atom_context->bios + data_offset + power_info->pplib.usNonClockInfoArrayOffset); + rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * + state_array->ucNumEntries, GFP_KERNEL); + if (!rdev->pm.power_state) + return state_index; for (i = 0; i < state_array->ucNumEntries; i++) { mode_index = 0; power_state = (union pplib_power_state *)&state_array->states[i]; @@ -2491,19 +2492,22 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) break; } } else { - /* add the default mode */ - rdev->pm.power_state[state_index].type = - POWER_STATE_TYPE_DEFAULT; - rdev->pm.power_state[state_index].num_clock_modes = 1; - rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk; - rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk; - rdev->pm.power_state[state_index].default_clock_mode = - &rdev->pm.power_state[state_index].clock_info[0]; - rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; - rdev->pm.power_state[state_index].pcie_lanes = 16; - rdev->pm.default_power_state_index = state_index; - rdev->pm.power_state[state_index].flags = 0; - state_index++; + rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state), GFP_KERNEL); + if (rdev->pm.power_state) { + /* add the default mode */ + rdev->pm.power_state[state_index].type = + POWER_STATE_TYPE_DEFAULT; + rdev->pm.power_state[state_index].num_clock_modes = 1; + rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk; + rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk; + rdev->pm.power_state[state_index].default_clock_mode = + &rdev->pm.power_state[state_index].clock_info[0]; + rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; + rdev->pm.power_state[state_index].pcie_lanes = 16; + rdev->pm.default_power_state_index = state_index; + rdev->pm.power_state[state_index].flags = 0; + state_index++; + } } rdev->pm.num_power_states = state_index; @@ -2619,7 +2623,7 @@ void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev) bios_2_scratch &= ~ATOM_S2_VRI_BRIGHT_ENABLE; /* tell the bios not to handle mode switching */ - bios_6_scratch |= (ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH | ATOM_S6_ACC_MODE); + bios_6_scratch |= ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH; if (rdev->family >= CHIP_R600) { WREG32(R600_BIOS_2_SCRATCH, bios_2_scratch); @@ -2670,10 +2674,13 @@ void radeon_atom_output_lock(struct drm_encoder *encoder, bool lock) else bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH); - if (lock) + if (lock) { bios_6_scratch |= ATOM_S6_CRITICAL_STATE; - else + bios_6_scratch &= ~ATOM_S6_ACC_MODE; + } else { bios_6_scratch &= ~ATOM_S6_CRITICAL_STATE; + bios_6_scratch |= ATOM_S6_ACC_MODE; + } if (rdev->family >= CHIP_R600) WREG32(R600_BIOS_6_SCRATCH, bios_6_scratch); diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index 591fcae8f224..d27ef74590cd 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c @@ -2442,6 +2442,17 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev) rdev->pm.default_power_state_index = -1; + /* allocate 2 power states */ + rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * 2, GFP_KERNEL); + if (!rdev->pm.power_state) { + rdev->pm.default_power_state_index = state_index; + rdev->pm.num_power_states = 0; + + rdev->pm.current_power_state_index = rdev->pm.default_power_state_index; + rdev->pm.current_clock_mode_index = 0; + return; + } + if (rdev->flags & RADEON_IS_MOBILITY) { offset = combios_get_table_offset(dev, COMBIOS_POWERPLAY_INFO_TABLE); if (offset) { diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index d26dabf878d9..2eff98cfd728 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -780,6 +780,115 @@ static int radeon_ddc_dump(struct drm_connector *connector) return ret; } +/* avivo */ +static void avivo_get_fb_div(struct radeon_pll *pll, + u32 target_clock, + u32 post_div, + u32 ref_div, + u32 *fb_div, + u32 *frac_fb_div) +{ + u32 tmp = post_div * ref_div; + + tmp *= target_clock; + *fb_div = tmp / pll->reference_freq; + *frac_fb_div = tmp % pll->reference_freq; +} + +static u32 avivo_get_post_div(struct radeon_pll *pll, + u32 target_clock) +{ + u32 vco, post_div, tmp; + + if (pll->flags & RADEON_PLL_USE_POST_DIV) + return pll->post_div; + + if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP) { + if (pll->flags & RADEON_PLL_IS_LCD) + vco = pll->lcd_pll_out_min; + else + vco = pll->pll_out_min; + } else { + if (pll->flags & RADEON_PLL_IS_LCD) + vco = pll->lcd_pll_out_max; + else + vco = pll->pll_out_max; + } + + post_div = vco / target_clock; + tmp = vco % target_clock; + + if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP) { + if (tmp) + post_div++; + } else { + if (!tmp) + post_div--; + } + + return post_div; +} + +#define MAX_TOLERANCE 10 + +void radeon_compute_pll_avivo(struct radeon_pll *pll, + u32 freq, + u32 *dot_clock_p, + u32 *fb_div_p, + u32 *frac_fb_div_p, + u32 *ref_div_p, + u32 *post_div_p) +{ + u32 target_clock = freq / 10; + u32 post_div = avivo_get_post_div(pll, target_clock); + u32 ref_div = pll->min_ref_div; + u32 fb_div = 0, frac_fb_div = 0, tmp; + + if (pll->flags & RADEON_PLL_USE_REF_DIV) + ref_div = pll->reference_div; + + if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) { + avivo_get_fb_div(pll, target_clock, post_div, ref_div, &fb_div, &frac_fb_div); + frac_fb_div = (100 * frac_fb_div) / pll->reference_freq; + if (frac_fb_div >= 5) { + frac_fb_div -= 5; + frac_fb_div = frac_fb_div / 10; + frac_fb_div++; + } + if (frac_fb_div >= 10) { + fb_div++; + frac_fb_div = 0; + } + } else { + while (ref_div <= pll->max_ref_div) { + avivo_get_fb_div(pll, target_clock, post_div, ref_div, + &fb_div, &frac_fb_div); + if (frac_fb_div >= (pll->reference_freq / 2)) + fb_div++; + frac_fb_div = 0; + tmp = (pll->reference_freq * fb_div) / (post_div * ref_div); + tmp = (tmp * 10000) / target_clock; + + if (tmp > (10000 + MAX_TOLERANCE)) + ref_div++; + else if (tmp >= (10000 - MAX_TOLERANCE)) + break; + else + ref_div++; + } + } + + *dot_clock_p = ((pll->reference_freq * fb_div * 10) + (pll->reference_freq * frac_fb_div)) / + (ref_div * post_div * 10); + *fb_div_p = fb_div; + *frac_fb_div_p = frac_fb_div; + *ref_div_p = ref_div; + *post_div_p = post_div; + DRM_DEBUG_KMS("%d, pll dividers - fb: %d.%d ref: %d, post %d\n", + *dot_clock_p, fb_div, frac_fb_div, ref_div, post_div); +} + +/* pre-avivo */ static inline uint32_t radeon_div(uint64_t n, uint32_t d) { uint64_t mod; @@ -790,13 +899,13 @@ static inline uint32_t radeon_div(uint64_t n, uint32_t d) return n; } -void radeon_compute_pll(struct radeon_pll *pll, - uint64_t freq, - uint32_t *dot_clock_p, - uint32_t *fb_div_p, - uint32_t *frac_fb_div_p, - uint32_t *ref_div_p, - uint32_t *post_div_p) +void radeon_compute_pll_legacy(struct radeon_pll *pll, + uint64_t freq, + uint32_t *dot_clock_p, + uint32_t *fb_div_p, + uint32_t *frac_fb_div_p, + uint32_t *ref_div_p, + uint32_t *post_div_p) { uint32_t min_ref_div = pll->min_ref_div; uint32_t max_ref_div = pll->max_ref_div; @@ -826,6 +935,9 @@ void radeon_compute_pll(struct radeon_pll *pll, pll_out_max = pll->pll_out_max; } + if (pll_out_min > 64800) + pll_out_min = 64800; + if (pll->flags & RADEON_PLL_USE_REF_DIV) min_ref_div = max_ref_div = pll->reference_div; else { @@ -849,7 +961,7 @@ void radeon_compute_pll(struct radeon_pll *pll, max_fractional_feed_div = pll->max_frac_feedback_div; } - for (post_div = max_post_div; post_div >= min_post_div; --post_div) { + for (post_div = min_post_div; post_div <= max_post_div; ++post_div) { uint32_t ref_div; if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1)) @@ -965,6 +1077,10 @@ void radeon_compute_pll(struct radeon_pll *pll, *frac_fb_div_p = best_frac_feedback_div; *ref_div_p = best_ref_div; *post_div_p = best_post_div; + DRM_DEBUG_KMS("%d %d, pll dividers - fb: %d.%d ref: %d, post %d\n", + freq, best_freq / 1000, best_feedback_div, best_frac_feedback_div, + best_ref_div, best_post_div); + } static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb) diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index 5e90984d5ad2..d4a542247618 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c @@ -1063,7 +1063,7 @@ atombios_set_edp_panel_power(struct drm_connector *connector, int action) if (!ASIC_IS_DCE4(rdev)) return; - if ((action != ATOM_TRANSMITTER_ACTION_POWER_ON) || + if ((action != ATOM_TRANSMITTER_ACTION_POWER_ON) && (action != ATOM_TRANSMITTER_ACTION_POWER_OFF)) return; diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c index ace2e6384d40..cf0638c3b7c7 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c @@ -778,9 +778,9 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) DRM_DEBUG_KMS("\n"); if (!use_bios_divs) { - radeon_compute_pll(pll, mode->clock, - &freq, &feedback_div, &frac_fb_div, - &reference_div, &post_divider); + radeon_compute_pll_legacy(pll, mode->clock, + &freq, &feedback_div, &frac_fb_div, + &reference_div, &post_divider); for (post_div = &post_divs[0]; post_div->divider; ++post_div) { if (post_div->divider == post_divider) diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 12bdeab91c86..6794cdf91f28 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -149,6 +149,7 @@ struct radeon_tmds_pll { #define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11) #define RADEON_PLL_USE_POST_DIV (1 << 12) #define RADEON_PLL_IS_LCD (1 << 13) +#define RADEON_PLL_PREFER_MINM_OVER_MAXP (1 << 14) struct radeon_pll { /* reference frequency */ @@ -510,13 +511,21 @@ extern bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev, struct radeon_atom_ss *ss, int id, u32 clock); -extern void radeon_compute_pll(struct radeon_pll *pll, - uint64_t freq, - uint32_t *dot_clock_p, - uint32_t *fb_div_p, - uint32_t *frac_fb_div_p, - uint32_t *ref_div_p, - uint32_t *post_div_p); +extern void radeon_compute_pll_legacy(struct radeon_pll *pll, + uint64_t freq, + uint32_t *dot_clock_p, + uint32_t *fb_div_p, + uint32_t *frac_fb_div_p, + uint32_t *ref_div_p, + uint32_t *post_div_p); + +extern void radeon_compute_pll_avivo(struct radeon_pll *pll, + u32 freq, + u32 *dot_clock_p, + u32 *fb_div_p, + u32 *frac_fb_div_p, + u32 *ref_div_p, + u32 *post_div_p); extern void radeon_setup_encoder_clones(struct drm_device *dev); diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 3b1b2bf9cdd5..2aed03bde4b2 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -430,7 +430,7 @@ static ssize_t radeon_hwmon_show_temp(struct device *dev, { struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); struct radeon_device *rdev = ddev->dev_private; - u32 temp; + int temp; switch (rdev->pm.int_thermal_type) { case THERMAL_TYPE_RV6XX: @@ -646,6 +646,9 @@ void radeon_pm_fini(struct radeon_device *rdev) #endif } + if (rdev->pm.power_state) + kfree(rdev->pm.power_state); + radeon_hwmon_fini(rdev); } diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 491dc9000655..2211a323db41 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -78,18 +78,23 @@ u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) } /* get temperature in millidegrees */ -u32 rv770_get_temp(struct radeon_device *rdev) +int rv770_get_temp(struct radeon_device *rdev) { u32 temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >> ASIC_T_SHIFT; - u32 actual_temp = 0; - - if ((temp >> 9) & 1) - actual_temp = 0; - else - actual_temp = (temp >> 1) & 0xff; - - return actual_temp * 1000; + int actual_temp; + + if (temp & 0x400) + actual_temp = -256; + else if (temp & 0x200) + actual_temp = 255; + else if (temp & 0x100) { + actual_temp = temp & 0x1ff; + actual_temp |= ~0x1ff; + } else + actual_temp = temp & 0xff; + + return (actual_temp * 1000) / 2; } void rv770_pm_misc(struct radeon_device *rdev) diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c index f2b5bab5e6a1..1f355bb85e54 100644 --- a/drivers/isdn/icn/icn.c +++ b/drivers/isdn/icn/icn.c @@ -1627,7 +1627,7 @@ __setup("icn=", icn_setup); static int __init icn_init(void) { char *p; - char rev[20]; + char rev[21]; memset(&dev, 0, sizeof(icn_dev)); dev.memaddr = (membase & 0x0ffc000); @@ -1638,6 +1638,7 @@ static int __init icn_init(void) if ((p = strchr(revision, ':'))) { strncpy(rev, p + 1, 20); + rev[20] = '\0'; p = strchr(rev, '$'); if (p) *p = 0; diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c index a699bbf20eb5..3824382faecc 100644 --- a/drivers/net/atl1c/atl1c_main.c +++ b/drivers/net/atl1c/atl1c_main.c @@ -48,6 +48,7 @@ static DEFINE_PCI_DEVICE_TABLE(atl1c_pci_tbl) = { {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L2C_B)}, {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L2C_B2)}, {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L1D)}, + {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L1D_2_0)}, /* required last entry */ { 0 } }; diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c index de40d3b7152f..28a32a6c8bf1 100644 --- a/drivers/net/benet/be_main.c +++ b/drivers/net/benet/be_main.c @@ -312,11 +312,9 @@ void be_link_status_update(struct be_adapter *adapter, bool link_up) if (adapter->link_up != link_up) { adapter->link_speed = -1; if (link_up) { - netif_start_queue(netdev); netif_carrier_on(netdev); printk(KERN_INFO "%s: Link up\n", netdev->name); } else { - netif_stop_queue(netdev); netif_carrier_off(netdev); printk(KERN_INFO "%s: Link down\n", netdev->name); } @@ -2628,8 +2626,6 @@ static void be_netdev_init(struct net_device *netdev) netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc, BE_NAPI_WEIGHT); - - netif_stop_queue(netdev); } static void be_unmap_pci_bars(struct be_adapter *adapter) diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h index 8e4183717d91..653c62475cb6 100644 --- a/drivers/net/bnx2x/bnx2x.h +++ b/drivers/net/bnx2x/bnx2x.h @@ -22,8 +22,8 @@ * (you will need to reboot afterwards) */ /* #define BNX2X_STOP_ON_ERROR */ -#define DRV_MODULE_VERSION "1.62.00-4" -#define DRV_MODULE_RELDATE "2011/01/18" +#define DRV_MODULE_VERSION "1.62.00-5" +#define DRV_MODULE_RELDATE "2011/01/30" #define BNX2X_BC_VER 0x040200 #define BNX2X_MULTI_QUEUE diff --git a/drivers/net/bnx2x/bnx2x_link.c b/drivers/net/bnx2x/bnx2x_link.c index 7160ec51093e..dd1210fddfff 100644 --- a/drivers/net/bnx2x/bnx2x_link.c +++ b/drivers/net/bnx2x/bnx2x_link.c @@ -3948,48 +3948,6 @@ static u8 bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp, return rc; } -static void bnx2x_8073_set_xaui_low_power_mode(struct bnx2x *bp, - struct bnx2x_phy *phy) -{ - u16 val; - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_8073_CHIP_REV, &val); - - if (val == 0) { - /* Mustn't set low power mode in 8073 A0 */ - return; - } - - /* Disable PLL sequencer (use read-modify-write to clear bit 13) */ - bnx2x_cl45_read(bp, phy, - MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, &val); - val &= ~(1<<13); - bnx2x_cl45_write(bp, phy, - MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, val); - - /* PLL controls */ - bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805E, 0x1077); - bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805D, 0x0000); - bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805C, 0x030B); - bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805B, 0x1240); - bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805A, 0x2490); - - /* Tx Controls */ - bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80A7, 0x0C74); - bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80A6, 0x9041); - bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80A5, 0x4640); - - /* Rx Controls */ - bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80FE, 0x01C4); - bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80FD, 0x9249); - bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80FC, 0x2015); - - /* Enable PLL sequencer (use read-modify-write to set bit 13) */ - bnx2x_cl45_read(bp, phy, MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, &val); - val |= (1<<13); - bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, val); -} - /******************************************************************/ /* BCM8073 PHY SECTION */ /******************************************************************/ @@ -4148,8 +4106,6 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy, bnx2x_8073_set_pause_cl37(params, phy, vars); - bnx2x_8073_set_xaui_low_power_mode(bp, phy); - bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &tmp1); @@ -6519,6 +6475,18 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED1_MASK, 0x80); + + /* Tell LED3 to blink on source */ + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LINK_SIGNAL, + &val); + val &= ~(7<<6); + val |= (1<<6); /* A83B[8:6]= 1 */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LINK_SIGNAL, + val); } break; } @@ -7720,10 +7688,13 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp, struct bnx2x_phy phy[PORT_MAX]; struct bnx2x_phy *phy_blk[PORT_MAX]; u16 val; - s8 port; + s8 port = 0; s8 port_of_path = 0; - - bnx2x_ext_phy_hw_reset(bp, 0); + u32 swap_val, swap_override; + swap_val = REG_RD(bp, NIG_REG_PORT_SWAP); + swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE); + port ^= (swap_val && swap_override); + bnx2x_ext_phy_hw_reset(bp, port); /* PART1 - Reset both phys */ for (port = PORT_MAX - 1; port >= PORT_0; port--) { u32 shmem_base, shmem2_base; diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c index 8cdcf5b39d1e..f40740e68ea5 100644 --- a/drivers/net/bnx2x/bnx2x_main.c +++ b/drivers/net/bnx2x/bnx2x_main.c @@ -2301,15 +2301,10 @@ static void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters) /* accept matched ucast */ drop_all_ucast = 0; } - if (filters & BNX2X_ACCEPT_MULTICAST) { + if (filters & BNX2X_ACCEPT_MULTICAST) /* accept matched mcast */ drop_all_mcast = 0; - if (IS_MF_SI(bp)) - /* since mcast addresses won't arrive with ovlan, - * fw needs to accept all of them in - * switch-independent mode */ - accp_all_mcast = 1; - } + if (filters & BNX2X_ACCEPT_ALL_UNICAST) { /* accept all mcast */ drop_all_ucast = 0; @@ -5296,10 +5291,6 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code) } } - bp->port.need_hw_lock = bnx2x_hw_lock_required(bp, - bp->common.shmem_base, - bp->common.shmem2_base); - bnx2x_setup_fan_failure_detection(bp); /* clear PXP2 attentions */ @@ -5503,9 +5494,6 @@ static int bnx2x_init_hw_port(struct bnx2x *bp) bnx2x_init_block(bp, MCP_BLOCK, init_stage); bnx2x_init_block(bp, DMAE_BLOCK, init_stage); - bp->port.need_hw_lock = bnx2x_hw_lock_required(bp, - bp->common.shmem_base, - bp->common.shmem2_base); if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base, bp->common.shmem2_base, port)) { u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : @@ -8379,6 +8367,17 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp) (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)) bp->mdio.prtad = XGXS_EXT_PHY_ADDR(ext_phy_config); + + /* + * Check if hw lock is required to access MDC/MDIO bus to the PHY(s) + * In MF mode, it is set to cover self test cases + */ + if (IS_MF(bp)) + bp->port.need_hw_lock = 1; + else + bp->port.need_hw_lock = bnx2x_hw_lock_required(bp, + bp->common.shmem_base, + bp->common.shmem2_base); } static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp) diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig index 986195eaa57c..5dec456fd4a4 100644 --- a/drivers/net/can/Kconfig +++ b/drivers/net/can/Kconfig @@ -23,7 +23,7 @@ config CAN_SLCAN As only the sending and receiving of CAN frames is implemented, this driver should work with the (serial/USB) CAN hardware from: - www.canusb.com / www.can232.com / www.mictronic.com / www.canhack.de + www.canusb.com / www.can232.com / www.mictronics.de / www.canhack.de Userspace tools to attach the SLCAN line discipline (slcan_attach, slcand) can be found in the can-utils at the SocketCAN SVN, see diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c index 2532b9631538..57d2ffbbb433 100644 --- a/drivers/net/can/at91_can.c +++ b/drivers/net/can/at91_can.c @@ -1109,7 +1109,7 @@ static ssize_t at91_sysfs_set_mb0_id(struct device *dev, return ret; } -static DEVICE_ATTR(mb0_id, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(mb0_id, S_IWUSR | S_IRUGO, at91_sysfs_show_mb0_id, at91_sysfs_set_mb0_id); static struct attribute *at91_sysfs_attrs[] = { diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c index b9a6d7a5a739..366f5cc050ae 100644 --- a/drivers/net/can/janz-ican3.c +++ b/drivers/net/can/janz-ican3.c @@ -1618,7 +1618,7 @@ static ssize_t ican3_sysfs_set_term(struct device *dev, return count; } -static DEVICE_ATTR(termination, S_IWUGO | S_IRUGO, ican3_sysfs_show_term, +static DEVICE_ATTR(termination, S_IWUSR | S_IRUGO, ican3_sysfs_show_term, ican3_sysfs_set_term); static struct attribute *ican3_sysfs_attrs[] = { diff --git a/drivers/net/can/softing/Kconfig b/drivers/net/can/softing/Kconfig index 92bd6bdde5e3..8ba81b3ddd90 100644 --- a/drivers/net/can/softing/Kconfig +++ b/drivers/net/can/softing/Kconfig @@ -1,6 +1,6 @@ config CAN_SOFTING tristate "Softing Gmbh CAN generic support" - depends on CAN_DEV + depends on CAN_DEV && HAS_IOMEM ---help--- Support for CAN cards from Softing Gmbh & some cards from Vector Gmbh. diff --git a/drivers/net/depca.c b/drivers/net/depca.c index 1b48b68ad4fd..8b0084d17c8c 100644 --- a/drivers/net/depca.c +++ b/drivers/net/depca.c @@ -1094,7 +1094,7 @@ static int depca_rx(struct net_device *dev) } } /* Change buffer ownership for this last frame, back to the adapter */ - for (; lp->rx_old != entry; lp->rx_old = (++lp->rx_old) & lp->rxRingMask) { + for (; lp->rx_old != entry; lp->rx_old = (lp->rx_old + 1) & lp->rxRingMask) { writel(readl(&lp->rx_ring[lp->rx_old].base) | R_OWN, &lp->rx_ring[lp->rx_old].base); } writel(readl(&lp->rx_ring[entry].base) | R_OWN, &lp->rx_ring[entry].base); @@ -1103,7 +1103,7 @@ static int depca_rx(struct net_device *dev) /* ** Update entry information */ - lp->rx_new = (++lp->rx_new) & lp->rxRingMask; + lp->rx_new = (lp->rx_new + 1) & lp->rxRingMask; } return 0; @@ -1148,7 +1148,7 @@ static int depca_tx(struct net_device *dev) } /* Update all the pointers */ - lp->tx_old = (++lp->tx_old) & lp->txRingMask; + lp->tx_old = (lp->tx_old + 1) & lp->txRingMask; } return 0; diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c index e1a8216ff692..c05db6046050 100644 --- a/drivers/net/dl2k.c +++ b/drivers/net/dl2k.c @@ -1753,8 +1753,6 @@ rio_close (struct net_device *dev) /* Free all the skbuffs in the queue. */ for (i = 0; i < RX_RING_SIZE; i++) { - np->rx_ring[i].status = 0; - np->rx_ring[i].fraginfo = 0; skb = np->rx_skbuff[i]; if (skb) { pci_unmap_single(np->pdev, @@ -1763,6 +1761,8 @@ rio_close (struct net_device *dev) dev_kfree_skb (skb); np->rx_skbuff[i] = NULL; } + np->rx_ring[i].status = 0; + np->rx_ring[i].fraginfo = 0; } for (i = 0; i < TX_RING_SIZE; i++) { skb = np->tx_skbuff[i]; diff --git a/drivers/net/enc28j60.c b/drivers/net/enc28j60.c index 112c5aa9af7f..907b05a1c659 100644 --- a/drivers/net/enc28j60.c +++ b/drivers/net/enc28j60.c @@ -812,7 +812,7 @@ static void enc28j60_read_tsv(struct enc28j60_net *priv, u8 tsv[TSV_SIZE]) if (netif_msg_hw(priv)) printk(KERN_DEBUG DRV_NAME ": reading TSV at addr:0x%04x\n", endptr + 1); - enc28j60_mem_read(priv, endptr + 1, sizeof(tsv), tsv); + enc28j60_mem_read(priv, endptr + 1, TSV_SIZE, tsv); } static void enc28j60_dump_tsv(struct enc28j60_net *priv, const char *msg, diff --git a/drivers/net/niu.c b/drivers/net/niu.c index 2541321bad82..9fb59d3f9c92 100644 --- a/drivers/net/niu.c +++ b/drivers/net/niu.c @@ -4489,6 +4489,9 @@ static int niu_alloc_channels(struct niu *np) { struct niu_parent *parent = np->parent; int first_rx_channel, first_tx_channel; + int num_rx_rings, num_tx_rings; + struct rx_ring_info *rx_rings; + struct tx_ring_info *tx_rings; int i, port, err; port = np->port; @@ -4498,18 +4501,21 @@ static int niu_alloc_channels(struct niu *np) first_tx_channel += parent->txchan_per_port[i]; } - np->num_rx_rings = parent->rxchan_per_port[port]; - np->num_tx_rings = parent->txchan_per_port[port]; + num_rx_rings = parent->rxchan_per_port[port]; + num_tx_rings = parent->txchan_per_port[port]; - netif_set_real_num_rx_queues(np->dev, np->num_rx_rings); - netif_set_real_num_tx_queues(np->dev, np->num_tx_rings); - - np->rx_rings = kcalloc(np->num_rx_rings, sizeof(struct rx_ring_info), - GFP_KERNEL); + rx_rings = kcalloc(num_rx_rings, sizeof(struct rx_ring_info), + GFP_KERNEL); err = -ENOMEM; - if (!np->rx_rings) + if (!rx_rings) goto out_err; + np->num_rx_rings = num_rx_rings; + smp_wmb(); + np->rx_rings = rx_rings; + + netif_set_real_num_rx_queues(np->dev, num_rx_rings); + for (i = 0; i < np->num_rx_rings; i++) { struct rx_ring_info *rp = &np->rx_rings[i]; @@ -4538,12 +4544,18 @@ static int niu_alloc_channels(struct niu *np) return err; } - np->tx_rings = kcalloc(np->num_tx_rings, sizeof(struct tx_ring_info), - GFP_KERNEL); + tx_rings = kcalloc(num_tx_rings, sizeof(struct tx_ring_info), + GFP_KERNEL); err = -ENOMEM; - if (!np->tx_rings) + if (!tx_rings) goto out_err; + np->num_tx_rings = num_tx_rings; + smp_wmb(); + np->tx_rings = tx_rings; + + netif_set_real_num_tx_queues(np->dev, num_tx_rings); + for (i = 0; i < np->num_tx_rings; i++) { struct tx_ring_info *rp = &np->tx_rings[i]; @@ -6246,11 +6258,17 @@ static void niu_sync_mac_stats(struct niu *np) static void niu_get_rx_stats(struct niu *np) { unsigned long pkts, dropped, errors, bytes; + struct rx_ring_info *rx_rings; int i; pkts = dropped = errors = bytes = 0; + + rx_rings = ACCESS_ONCE(np->rx_rings); + if (!rx_rings) + goto no_rings; + for (i = 0; i < np->num_rx_rings; i++) { - struct rx_ring_info *rp = &np->rx_rings[i]; + struct rx_ring_info *rp = &rx_rings[i]; niu_sync_rx_discard_stats(np, rp, 0); @@ -6259,6 +6277,8 @@ static void niu_get_rx_stats(struct niu *np) dropped += rp->rx_dropped; errors += rp->rx_errors; } + +no_rings: np->dev->stats.rx_packets = pkts; np->dev->stats.rx_bytes = bytes; np->dev->stats.rx_dropped = dropped; @@ -6268,16 +6288,24 @@ static void niu_get_rx_stats(struct niu *np) static void niu_get_tx_stats(struct niu *np) { unsigned long pkts, errors, bytes; + struct tx_ring_info *tx_rings; int i; pkts = errors = bytes = 0; + + tx_rings = ACCESS_ONCE(np->tx_rings); + if (!tx_rings) + goto no_rings; + for (i = 0; i < np->num_tx_rings; i++) { - struct tx_ring_info *rp = &np->tx_rings[i]; + struct tx_ring_info *rp = &tx_rings[i]; pkts += rp->tx_packets; bytes += rp->tx_bytes; errors += rp->tx_errors; } + +no_rings: np->dev->stats.tx_packets = pkts; np->dev->stats.tx_bytes = bytes; np->dev->stats.tx_errors = errors; @@ -6287,9 +6315,10 @@ static struct net_device_stats *niu_get_stats(struct net_device *dev) { struct niu *np = netdev_priv(dev); - niu_get_rx_stats(np); - niu_get_tx_stats(np); - + if (netif_running(dev)) { + niu_get_rx_stats(np); + niu_get_tx_stats(np); + } return &dev->stats; } diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c index 1f42f6ac8551..d3cb77205863 100644 --- a/drivers/net/pcmcia/axnet_cs.c +++ b/drivers/net/pcmcia/axnet_cs.c @@ -1488,12 +1488,10 @@ static void ei_rx_overrun(struct net_device *dev) /* * Wait a full Tx time (1.2ms) + some guard time, NS says 1.6ms total. - * Early datasheets said to poll the reset bit, but now they say that - * it "is not a reliable indicator and subsequently should be ignored." - * We wait at least 10ms. + * We wait at least 2ms. */ - mdelay(10); + mdelay(2); /* * Reset RBCR[01] back to zero as per magic incantation. diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index bde7d61f1930..59ccf0c5c610 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c @@ -973,7 +973,8 @@ static void __rtl8169_check_link_status(struct net_device *dev, if (pm) pm_request_resume(&tp->pci_dev->dev); netif_carrier_on(dev); - netif_info(tp, ifup, dev, "link up\n"); + if (net_ratelimit()) + netif_info(tp, ifup, dev, "link up\n"); } else { netif_carrier_off(dev); netif_info(tp, ifdown, dev, "link down\n"); @@ -3757,7 +3758,8 @@ static void rtl_hw_start_8168(struct net_device *dev) RTL_W16(IntrMitigate, 0x5151); /* Work around for RxFIFO overflow. */ - if (tp->mac_version == RTL_GIGA_MAC_VER_11) { + if (tp->mac_version == RTL_GIGA_MAC_VER_11 || + tp->mac_version == RTL_GIGA_MAC_VER_22) { tp->intr_event |= RxFIFOOver | PCSTimeout; tp->intr_event &= ~RxOverflow; } @@ -4639,12 +4641,33 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) break; } - /* Work around for rx fifo overflow */ - if (unlikely(status & RxFIFOOver) && - (tp->mac_version == RTL_GIGA_MAC_VER_11)) { - netif_stop_queue(dev); - rtl8169_tx_timeout(dev); - break; + if (unlikely(status & RxFIFOOver)) { + switch (tp->mac_version) { + /* Work around for rx fifo overflow */ + case RTL_GIGA_MAC_VER_11: + case RTL_GIGA_MAC_VER_22: + case RTL_GIGA_MAC_VER_26: + netif_stop_queue(dev); + rtl8169_tx_timeout(dev); + goto done; + /* Testers needed. */ + case RTL_GIGA_MAC_VER_17: + case RTL_GIGA_MAC_VER_19: + case RTL_GIGA_MAC_VER_20: + case RTL_GIGA_MAC_VER_21: + case RTL_GIGA_MAC_VER_23: + case RTL_GIGA_MAC_VER_24: + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + /* Experimental science. Pktgen proof. */ + case RTL_GIGA_MAC_VER_12: + case RTL_GIGA_MAC_VER_25: + if (status == RxFIFOOver) + goto done; + break; + default: + break; + } } if (unlikely(status & SYSErr)) { @@ -4680,7 +4703,7 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) (status & RxFIFOOver) ? (status | RxOverflow) : status); status = RTL_R16(IntrStatus); } - +done: return IRQ_RETVAL(handled); } diff --git a/drivers/net/vxge/vxge-config.c b/drivers/net/vxge/vxge-config.c index 01c05f53e2f9..228d4f7a58af 100644 --- a/drivers/net/vxge/vxge-config.c +++ b/drivers/net/vxge/vxge-config.c @@ -3690,7 +3690,7 @@ __vxge_hw_vpath_rts_table_get(struct __vxge_hw_vpath_handle *vp, if (status != VXGE_HW_OK) goto exit; - if ((rts_table != VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) || + if ((rts_table != VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) && (rts_table != VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT)) *data1 = 0; diff --git a/drivers/net/wireless/ath/ath5k/dma.c b/drivers/net/wireless/ath/ath5k/dma.c index 0064be7ce5c9..21091c26a9a5 100644 --- a/drivers/net/wireless/ath/ath5k/dma.c +++ b/drivers/net/wireless/ath/ath5k/dma.c @@ -838,9 +838,9 @@ int ath5k_hw_dma_stop(struct ath5k_hw *ah) for (i = 0; i < qmax; i++) { err = ath5k_hw_stop_tx_dma(ah, i); /* -EINVAL -> queue inactive */ - if (err != -EINVAL) + if (err && err != -EINVAL) return err; } - return err; + return 0; } diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c index e5f2b96a4c63..a702817daf72 100644 --- a/drivers/net/wireless/ath/ath5k/pcu.c +++ b/drivers/net/wireless/ath/ath5k/pcu.c @@ -86,7 +86,7 @@ int ath5k_hw_get_frame_duration(struct ath5k_hw *ah, if (!ah->ah_bwmode) { dur = ieee80211_generic_frame_duration(sc->hw, NULL, len, rate); - return dur; + return le16_to_cpu(dur); } bitrate = rate->bitrate; @@ -265,8 +265,6 @@ static inline void ath5k_hw_write_rate_duration(struct ath5k_hw *ah) * what rate we should choose to TX ACKs. */ tx_time = ath5k_hw_get_frame_duration(ah, 10, rate); - tx_time = le16_to_cpu(tx_time); - ath5k_hw_reg_write(ah, tx_time, reg); if (!(rate->flags & IEEE80211_RATE_SHORT_PREAMBLE)) diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c index f8a7771faee2..f44c84ab5dce 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_hw.c +++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c @@ -426,9 +426,8 @@ static void ar9002_hw_configpcipowersave(struct ath_hw *ah, } /* WAR for ASPM system hang */ - if (AR_SREV_9280(ah) || AR_SREV_9285(ah) || AR_SREV_9287(ah)) { + if (AR_SREV_9285(ah) || AR_SREV_9287(ah)) val |= (AR_WA_BIT6 | AR_WA_BIT7); - } if (AR_SREV_9285E_20(ah)) val |= AR_WA_BIT23; diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c index 38433f9bfe59..0352f0994caa 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c @@ -142,9 +142,6 @@ static void ath9k_deinit_priv(struct ath9k_htc_priv *priv) { ath9k_htc_exit_debug(priv->ah); ath9k_hw_deinit(priv->ah); - tasklet_kill(&priv->swba_tasklet); - tasklet_kill(&priv->rx_tasklet); - tasklet_kill(&priv->tx_tasklet); kfree(priv->ah); priv->ah = NULL; } diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c index f4d576bc3ccd..6bb59958f71e 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c @@ -1025,12 +1025,6 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw) int ret = 0; u8 cmd_rsp; - /* Cancel all the running timers/work .. */ - cancel_work_sync(&priv->fatal_work); - cancel_work_sync(&priv->ps_work); - cancel_delayed_work_sync(&priv->ath9k_led_blink_work); - ath9k_led_stop_brightness(priv); - mutex_lock(&priv->mutex); if (priv->op_flags & OP_INVALID) { @@ -1044,8 +1038,23 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw) WMI_CMD(WMI_DISABLE_INTR_CMDID); WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID); WMI_CMD(WMI_STOP_RECV_CMDID); + + tasklet_kill(&priv->swba_tasklet); + tasklet_kill(&priv->rx_tasklet); + tasklet_kill(&priv->tx_tasklet); + skb_queue_purge(&priv->tx_queue); + mutex_unlock(&priv->mutex); + + /* Cancel all the running timers/work .. */ + cancel_work_sync(&priv->fatal_work); + cancel_work_sync(&priv->ps_work); + cancel_delayed_work_sync(&priv->ath9k_led_blink_work); + ath9k_led_stop_brightness(priv); + + mutex_lock(&priv->mutex); + /* Remove monitor interface here */ if (ah->opmode == NL80211_IFTYPE_MONITOR) { if (ath9k_htc_remove_monitor_interface(priv)) diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index 767d8b86f1e1..087a6a95edd5 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -598,8 +598,6 @@ err_btcoex: err_queues: ath9k_hw_deinit(ah); err_hw: - tasklet_kill(&sc->intr_tq); - tasklet_kill(&sc->bcon_tasklet); kfree(ah); sc->sc_ah = NULL; @@ -807,9 +805,6 @@ static void ath9k_deinit_softc(struct ath_softc *sc) ath9k_hw_deinit(sc->sc_ah); - tasklet_kill(&sc->intr_tq); - tasklet_kill(&sc->bcon_tasklet); - kfree(sc->sc_ah); sc->sc_ah = NULL; } @@ -824,6 +819,8 @@ void ath9k_deinit_device(struct ath_softc *sc) wiphy_rfkill_stop_polling(sc->hw->wiphy); ath_deinit_leds(sc); + ath9k_ps_restore(sc); + for (i = 0; i < sc->num_sec_wiphy; i++) { struct ath_wiphy *aphy = sc->sec_wiphy[i]; if (aphy == NULL) diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index c79c97be6cd4..9040c2ff1909 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -325,6 +325,8 @@ static bool ath_paprd_send_frame(struct ath_softc *sc, struct sk_buff *skb, int { struct ieee80211_hw *hw = sc->hw; struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); + struct ath_hw *ah = sc->sc_ah; + struct ath_common *common = ath9k_hw_common(ah); struct ath_tx_control txctl; int time_left; @@ -342,8 +344,12 @@ static bool ath_paprd_send_frame(struct ath_softc *sc, struct sk_buff *skb, int init_completion(&sc->paprd_complete); sc->paprd_pending = true; txctl.paprd = BIT(chain); - if (ath_tx_start(hw, skb, &txctl) != 0) + + if (ath_tx_start(hw, skb, &txctl) != 0) { + ath_dbg(common, ATH_DBG_XMIT, "PAPRD TX failed\n"); + dev_kfree_skb_any(skb); return false; + } time_left = wait_for_completion_timeout(&sc->paprd_complete, msecs_to_jiffies(ATH_PAPRD_TIMEOUT)); @@ -953,8 +959,6 @@ void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw) spin_unlock_bh(&sc->sc_pcu_lock); ath9k_ps_restore(sc); - - ath9k_setpower(sc, ATH9K_PM_FULL_SLEEP); } int ath_reset(struct ath_softc *sc, bool retry_tx) @@ -1309,6 +1313,9 @@ static void ath9k_stop(struct ieee80211_hw *hw) spin_lock_bh(&sc->sc_pcu_lock); + /* prevent tasklets to enable interrupts once we disable them */ + ah->imask &= ~ATH9K_INT_GLOBAL; + /* make sure h/w will not generate any interrupt * before setting the invalid flag. */ ath9k_hw_disable_interrupts(ah); @@ -1326,6 +1333,12 @@ static void ath9k_stop(struct ieee80211_hw *hw) spin_unlock_bh(&sc->sc_pcu_lock); + /* we can now sync irq and kill any running tasklets, since we already + * disabled interrupts and not holding a spin lock */ + synchronize_irq(sc->irq); + tasklet_kill(&sc->intr_tq); + tasklet_kill(&sc->bcon_tasklet); + ath9k_ps_restore(sc); sc->ps_idle = true; diff --git a/drivers/net/wireless/rtlwifi/efuse.c b/drivers/net/wireless/rtlwifi/efuse.c index b8433f3a9bc2..62876cd5c41a 100644 --- a/drivers/net/wireless/rtlwifi/efuse.c +++ b/drivers/net/wireless/rtlwifi/efuse.c @@ -726,9 +726,9 @@ static int efuse_pg_packet_read(struct ieee80211_hw *hw, u8 offset, u8 *data) } static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr, - u8 efuse_data, u8 offset, int *bcontinual, - u8 *write_state, struct pgpkt_struct target_pkt, - int *repeat_times, int *bresult, u8 word_en) + u8 efuse_data, u8 offset, int *bcontinual, + u8 *write_state, struct pgpkt_struct *target_pkt, + int *repeat_times, int *bresult, u8 word_en) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct pgpkt_struct tmp_pkt; @@ -744,8 +744,8 @@ static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr, tmp_pkt.word_en = tmp_header & 0x0F; tmp_word_cnts = efuse_calculate_word_cnts(tmp_pkt.word_en); - if (tmp_pkt.offset != target_pkt.offset) { - efuse_addr = efuse_addr + (tmp_word_cnts * 2) + 1; + if (tmp_pkt.offset != target_pkt->offset) { + *efuse_addr = *efuse_addr + (tmp_word_cnts * 2) + 1; *write_state = PG_STATE_HEADER; } else { for (tmpindex = 0; tmpindex < (tmp_word_cnts * 2); tmpindex++) { @@ -756,23 +756,23 @@ static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr, } if (bdataempty == false) { - efuse_addr = efuse_addr + (tmp_word_cnts * 2) + 1; + *efuse_addr = *efuse_addr + (tmp_word_cnts * 2) + 1; *write_state = PG_STATE_HEADER; } else { match_word_en = 0x0F; - if (!((target_pkt.word_en & BIT(0)) | + if (!((target_pkt->word_en & BIT(0)) | (tmp_pkt.word_en & BIT(0)))) match_word_en &= (~BIT(0)); - if (!((target_pkt.word_en & BIT(1)) | + if (!((target_pkt->word_en & BIT(1)) | (tmp_pkt.word_en & BIT(1)))) match_word_en &= (~BIT(1)); - if (!((target_pkt.word_en & BIT(2)) | + if (!((target_pkt->word_en & BIT(2)) | (tmp_pkt.word_en & BIT(2)))) match_word_en &= (~BIT(2)); - if (!((target_pkt.word_en & BIT(3)) | + if (!((target_pkt->word_en & BIT(3)) | (tmp_pkt.word_en & BIT(3)))) match_word_en &= (~BIT(3)); @@ -780,7 +780,7 @@ static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr, badworden = efuse_word_enable_data_write( hw, *efuse_addr + 1, tmp_pkt.word_en, - target_pkt.data); + target_pkt->data); if (0x0F != (badworden & 0x0F)) { u8 reorg_offset = offset; @@ -791,26 +791,26 @@ static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr, } tmp_word_en = 0x0F; - if ((target_pkt.word_en & BIT(0)) ^ + if ((target_pkt->word_en & BIT(0)) ^ (match_word_en & BIT(0))) tmp_word_en &= (~BIT(0)); - if ((target_pkt.word_en & BIT(1)) ^ + if ((target_pkt->word_en & BIT(1)) ^ (match_word_en & BIT(1))) tmp_word_en &= (~BIT(1)); - if ((target_pkt.word_en & BIT(2)) ^ + if ((target_pkt->word_en & BIT(2)) ^ (match_word_en & BIT(2))) tmp_word_en &= (~BIT(2)); - if ((target_pkt.word_en & BIT(3)) ^ + if ((target_pkt->word_en & BIT(3)) ^ (match_word_en & BIT(3))) tmp_word_en &= (~BIT(3)); if ((tmp_word_en & 0x0F) != 0x0F) { *efuse_addr = efuse_get_current_size(hw); - target_pkt.offset = offset; - target_pkt.word_en = tmp_word_en; + target_pkt->offset = offset; + target_pkt->word_en = tmp_word_en; } else *bcontinual = false; *write_state = PG_STATE_HEADER; @@ -821,8 +821,8 @@ static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr, } } else { *efuse_addr += (2 * tmp_word_cnts) + 1; - target_pkt.offset = offset; - target_pkt.word_en = word_en; + target_pkt->offset = offset; + target_pkt->word_en = word_en; *write_state = PG_STATE_HEADER; } } @@ -938,7 +938,7 @@ static int efuse_pg_packet_write(struct ieee80211_hw *hw, efuse_write_data_case1(hw, &efuse_addr, efuse_data, offset, &bcontinual, - &write_state, target_pkt, + &write_state, &target_pkt, &repeat_times, &bresult, word_en); else diff --git a/drivers/net/wireless/wl12xx/spi.c b/drivers/net/wireless/wl12xx/spi.c index 46714910f98c..7145ea543783 100644 --- a/drivers/net/wireless/wl12xx/spi.c +++ b/drivers/net/wireless/wl12xx/spi.c @@ -110,9 +110,8 @@ static void wl1271_spi_reset(struct wl1271 *wl) spi_message_add_tail(&t, &m); spi_sync(wl_to_spi(wl), &m); - kfree(cmd); - wl1271_dump(DEBUG_SPI, "spi reset -> ", cmd, WSPI_INIT_CMD_LEN); + kfree(cmd); } static void wl1271_spi_init(struct wl1271 *wl) diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 546de5749824..da1f12120346 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -120,6 +120,9 @@ struct netfront_info { unsigned long rx_pfn_array[NET_RX_RING_SIZE]; struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1]; struct mmu_update rx_mmu[NET_RX_RING_SIZE]; + + /* Statistics */ + int rx_gso_checksum_fixup; }; struct netfront_rx_info { @@ -770,11 +773,29 @@ static RING_IDX xennet_fill_frags(struct netfront_info *np, return cons; } -static int skb_checksum_setup(struct sk_buff *skb) +static int checksum_setup(struct net_device *dev, struct sk_buff *skb) { struct iphdr *iph; unsigned char *th; int err = -EPROTO; + int recalculate_partial_csum = 0; + + /* + * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy + * peers can fail to set NETRXF_csum_blank when sending a GSO + * frame. In this case force the SKB to CHECKSUM_PARTIAL and + * recalculate the partial checksum. + */ + if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) { + struct netfront_info *np = netdev_priv(dev); + np->rx_gso_checksum_fixup++; + skb->ip_summed = CHECKSUM_PARTIAL; + recalculate_partial_csum = 1; + } + + /* A non-CHECKSUM_PARTIAL SKB does not require setup. */ + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; if (skb->protocol != htons(ETH_P_IP)) goto out; @@ -788,9 +809,23 @@ static int skb_checksum_setup(struct sk_buff *skb) switch (iph->protocol) { case IPPROTO_TCP: skb->csum_offset = offsetof(struct tcphdr, check); + + if (recalculate_partial_csum) { + struct tcphdr *tcph = (struct tcphdr *)th; + tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, + skb->len - iph->ihl*4, + IPPROTO_TCP, 0); + } break; case IPPROTO_UDP: skb->csum_offset = offsetof(struct udphdr, check); + + if (recalculate_partial_csum) { + struct udphdr *udph = (struct udphdr *)th; + udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, + skb->len - iph->ihl*4, + IPPROTO_UDP, 0); + } break; default: if (net_ratelimit()) @@ -829,13 +864,11 @@ static int handle_incoming_queue(struct net_device *dev, /* Ethernet work: Delayed to here as it peeks the header. */ skb->protocol = eth_type_trans(skb, dev); - if (skb->ip_summed == CHECKSUM_PARTIAL) { - if (skb_checksum_setup(skb)) { - kfree_skb(skb); - packets_dropped++; - dev->stats.rx_errors++; - continue; - } + if (checksum_setup(dev, skb)) { + kfree_skb(skb); + packets_dropped++; + dev->stats.rx_errors++; + continue; } dev->stats.rx_packets++; @@ -1632,12 +1665,59 @@ static void netback_changed(struct xenbus_device *dev, } } +static const struct xennet_stat { + char name[ETH_GSTRING_LEN]; + u16 offset; +} xennet_stats[] = { + { + "rx_gso_checksum_fixup", + offsetof(struct netfront_info, rx_gso_checksum_fixup) + }, +}; + +static int xennet_get_sset_count(struct net_device *dev, int string_set) +{ + switch (string_set) { + case ETH_SS_STATS: + return ARRAY_SIZE(xennet_stats); + default: + return -EINVAL; + } +} + +static void xennet_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 * data) +{ + void *np = netdev_priv(dev); + int i; + + for (i = 0; i < ARRAY_SIZE(xennet_stats); i++) + data[i] = *(int *)(np + xennet_stats[i].offset); +} + +static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data) +{ + int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < ARRAY_SIZE(xennet_stats); i++) + memcpy(data + i * ETH_GSTRING_LEN, + xennet_stats[i].name, ETH_GSTRING_LEN); + break; + } +} + static const struct ethtool_ops xennet_ethtool_ops = { .set_tx_csum = ethtool_op_set_tx_csum, .set_sg = xennet_set_sg, .set_tso = xennet_set_tso, .get_link = ethtool_op_get_link, + + .get_sset_count = xennet_get_sset_count, + .get_ethtool_stats = xennet_get_ethtool_stats, + .get_strings = xennet_get_strings, }; #ifdef CONFIG_SYSFS diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c index 65ebee0a3266..b6a6356d09b3 100644 --- a/drivers/s390/net/netiucv.c +++ b/drivers/s390/net/netiucv.c @@ -565,7 +565,7 @@ static int netiucv_callback_connreq(struct iucv_path *path, struct iucv_event ev; int rc; - if (memcmp(iucvMagic, ipuser, sizeof(ipuser))) + if (memcmp(iucvMagic, ipuser, 16)) /* ipuser must match iucvMagic. */ return -EINVAL; rc = -EINVAL; diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 29f848bfc12f..019ae58ab913 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -988,16 +988,30 @@ static void qeth_get_channel_path_desc(struct qeth_card *card) chp_dsc = (struct channelPath_dsc *)ccw_device_get_chp_desc(ccwdev, 0); if (chp_dsc != NULL) { /* CHPP field bit 6 == 1 -> single queue */ - if ((chp_dsc->chpp & 0x02) == 0x02) + if ((chp_dsc->chpp & 0x02) == 0x02) { + if ((atomic_read(&card->qdio.state) != + QETH_QDIO_UNINITIALIZED) && + (card->qdio.no_out_queues == 4)) + /* change from 4 to 1 outbound queues */ + qeth_free_qdio_buffers(card); card->qdio.no_out_queues = 1; + if (card->qdio.default_out_queue != 0) + dev_info(&card->gdev->dev, + "Priority Queueing not supported\n"); + card->qdio.default_out_queue = 0; + } else { + if ((atomic_read(&card->qdio.state) != + QETH_QDIO_UNINITIALIZED) && + (card->qdio.no_out_queues == 1)) { + /* change from 1 to 4 outbound queues */ + qeth_free_qdio_buffers(card); + card->qdio.default_out_queue = 2; + } + card->qdio.no_out_queues = 4; + } card->info.func_level = 0x4100 + chp_dsc->desc; kfree(chp_dsc); } - if (card->qdio.no_out_queues == 1) { - card->qdio.default_out_queue = 0; - dev_info(&card->gdev->dev, - "Priority Queueing not supported\n"); - } QETH_DBF_TEXT_(SETUP, 2, "nr:%x", card->qdio.no_out_queues); QETH_DBF_TEXT_(SETUP, 2, "lvl:%02x", card->info.func_level); return; @@ -1832,33 +1846,6 @@ static inline int qeth_get_initial_mtu_for_card(struct qeth_card *card) } } -static inline int qeth_get_max_mtu_for_card(int cardtype) -{ - switch (cardtype) { - - case QETH_CARD_TYPE_UNKNOWN: - case QETH_CARD_TYPE_OSD: - case QETH_CARD_TYPE_OSN: - case QETH_CARD_TYPE_OSM: - case QETH_CARD_TYPE_OSX: - return 61440; - case QETH_CARD_TYPE_IQD: - return 57344; - default: - return 1500; - } -} - -static inline int qeth_get_mtu_out_of_mpc(int cardtype) -{ - switch (cardtype) { - case QETH_CARD_TYPE_IQD: - return 1; - default: - return 0; - } -} - static inline int qeth_get_mtu_outof_framesize(int framesize) { switch (framesize) { @@ -1881,10 +1868,9 @@ static inline int qeth_mtu_is_valid(struct qeth_card *card, int mtu) case QETH_CARD_TYPE_OSD: case QETH_CARD_TYPE_OSM: case QETH_CARD_TYPE_OSX: - return ((mtu >= 576) && (mtu <= 61440)); case QETH_CARD_TYPE_IQD: return ((mtu >= 576) && - (mtu <= card->info.max_mtu + 4096 - 32)); + (mtu <= card->info.max_mtu)); case QETH_CARD_TYPE_OSN: case QETH_CARD_TYPE_UNKNOWN: default: @@ -1907,7 +1893,7 @@ static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply, memcpy(&card->token.ulp_filter_r, QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data), QETH_MPC_TOKEN_LENGTH); - if (qeth_get_mtu_out_of_mpc(card->info.type)) { + if (card->info.type == QETH_CARD_TYPE_IQD) { memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2); mtu = qeth_get_mtu_outof_framesize(framesize); if (!mtu) { @@ -1915,12 +1901,21 @@ static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply, QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc); return 0; } - card->info.max_mtu = mtu; + if (card->info.initial_mtu && (card->info.initial_mtu != mtu)) { + /* frame size has changed */ + if (card->dev && + ((card->dev->mtu == card->info.initial_mtu) || + (card->dev->mtu > mtu))) + card->dev->mtu = mtu; + qeth_free_qdio_buffers(card); + } card->info.initial_mtu = mtu; + card->info.max_mtu = mtu; card->qdio.in_buf_size = mtu + 2 * PAGE_SIZE; } else { card->info.initial_mtu = qeth_get_initial_mtu_for_card(card); - card->info.max_mtu = qeth_get_max_mtu_for_card(card->info.type); + card->info.max_mtu = *(__u16 *)QETH_ULP_ENABLE_RESP_MAX_MTU( + iob->data); card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT; } @@ -3775,6 +3770,47 @@ static inline int qeth_get_qdio_q_format(struct qeth_card *card) } } +static void qeth_determine_capabilities(struct qeth_card *card) +{ + int rc; + int length; + char *prcd; + struct ccw_device *ddev; + int ddev_offline = 0; + + QETH_DBF_TEXT(SETUP, 2, "detcapab"); + ddev = CARD_DDEV(card); + if (!ddev->online) { + ddev_offline = 1; + rc = ccw_device_set_online(ddev); + if (rc) { + QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc); + goto out; + } + } + + rc = qeth_read_conf_data(card, (void **) &prcd, &length); + if (rc) { + QETH_DBF_MESSAGE(2, "%s qeth_read_conf_data returned %i\n", + dev_name(&card->gdev->dev), rc); + QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc); + goto out_offline; + } + qeth_configure_unitaddr(card, prcd); + qeth_configure_blkt_default(card, prcd); + kfree(prcd); + + rc = qdio_get_ssqd_desc(ddev, &card->ssqd); + if (rc) + QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc); + +out_offline: + if (ddev_offline == 1) + ccw_device_set_offline(ddev); +out: + return; +} + static int qeth_qdio_establish(struct qeth_card *card) { struct qdio_initialize init_data; @@ -3905,6 +3941,7 @@ int qeth_core_hardsetup_card(struct qeth_card *card) QETH_DBF_TEXT(SETUP, 2, "hrdsetup"); atomic_set(&card->force_alloc_skb, 0); + qeth_get_channel_path_desc(card); retry: if (retries) QETH_DBF_MESSAGE(2, "%s Retrying to do IDX activates.\n", @@ -3933,6 +3970,7 @@ retriable: else goto retry; } + qeth_determine_capabilities(card); qeth_init_tokens(card); qeth_init_func_level(card); rc = qeth_idx_activate_channel(&card->read, qeth_idx_read_cb); @@ -4202,41 +4240,6 @@ void qeth_core_free_discipline(struct qeth_card *card) card->discipline.ccwgdriver = NULL; } -static void qeth_determine_capabilities(struct qeth_card *card) -{ - int rc; - int length; - char *prcd; - - QETH_DBF_TEXT(SETUP, 2, "detcapab"); - rc = ccw_device_set_online(CARD_DDEV(card)); - if (rc) { - QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc); - goto out; - } - - - rc = qeth_read_conf_data(card, (void **) &prcd, &length); - if (rc) { - QETH_DBF_MESSAGE(2, "%s qeth_read_conf_data returned %i\n", - dev_name(&card->gdev->dev), rc); - QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc); - goto out_offline; - } - qeth_configure_unitaddr(card, prcd); - qeth_configure_blkt_default(card, prcd); - kfree(prcd); - - rc = qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd); - if (rc) - QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc); - -out_offline: - ccw_device_set_offline(CARD_DDEV(card)); -out: - return; -} - static int qeth_core_probe_device(struct ccwgroup_device *gdev) { struct qeth_card *card; diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 2ac8f6aff5a4..ada0fe782373 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -573,13 +573,13 @@ static int qeth_l2_send_setmac_cb(struct qeth_card *card, case IPA_RC_L2_DUP_LAYER3_MAC: dev_warn(&card->gdev->dev, "MAC address %pM already exists\n", - card->dev->dev_addr); + cmd->data.setdelmac.mac); break; case IPA_RC_L2_MAC_NOT_AUTH_BY_HYP: case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP: dev_warn(&card->gdev->dev, "MAC address %pM is not authorized\n", - card->dev->dev_addr); + cmd->data.setdelmac.mac); break; default: break; diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c index 65e1cf104943..207b7d742443 100644 --- a/drivers/s390/net/smsgiucv.c +++ b/drivers/s390/net/smsgiucv.c @@ -60,7 +60,7 @@ static struct iucv_handler smsg_handler = { static int smsg_path_pending(struct iucv_path *path, u8 ipvmid[8], u8 ipuser[16]) { - if (strncmp(ipvmid, "*MSG ", sizeof(ipvmid)) != 0) + if (strncmp(ipvmid, "*MSG ", 8) != 0) return -EINVAL; /* Path pending from *MSG. */ return iucv_path_accept(path, &smsg_handler, "SMSGIUCV ", NULL); diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h index 475c31ae985c..77b26f5b9c33 100644 --- a/drivers/scsi/arcmsr/arcmsr.h +++ b/drivers/scsi/arcmsr/arcmsr.h @@ -2,7 +2,7 @@ ******************************************************************************* ** O.S : Linux ** FILE NAME : arcmsr.h -** BY : Erich Chen +** BY : Nick Cheng ** Description: SCSI RAID Device Driver for ** ARECA RAID Host adapter ******************************************************************************* @@ -46,8 +46,12 @@ struct device_attribute; /*The limit of outstanding scsi command that firmware can handle*/ #define ARCMSR_MAX_OUTSTANDING_CMD 256 -#define ARCMSR_MAX_FREECCB_NUM 320 -#define ARCMSR_DRIVER_VERSION "Driver Version 1.20.00.15 2010/02/02" +#ifdef CONFIG_XEN + #define ARCMSR_MAX_FREECCB_NUM 160 +#else + #define ARCMSR_MAX_FREECCB_NUM 320 +#endif +#define ARCMSR_DRIVER_VERSION "Driver Version 1.20.00.15 2010/08/05" #define ARCMSR_SCSI_INITIATOR_ID 255 #define ARCMSR_MAX_XFER_SECTORS 512 #define ARCMSR_MAX_XFER_SECTORS_B 4096 @@ -60,7 +64,6 @@ struct device_attribute; #define ARCMSR_MAX_HBB_POSTQUEUE 264 #define ARCMSR_MAX_XFER_LEN 0x26000 /* 152K */ #define ARCMSR_CDB_SG_PAGE_LENGTH 256 -#define SCSI_CMD_ARECA_SPECIFIC 0xE1 #ifndef PCI_DEVICE_ID_ARECA_1880 #define PCI_DEVICE_ID_ARECA_1880 0x1880 #endif diff --git a/drivers/scsi/arcmsr/arcmsr_attr.c b/drivers/scsi/arcmsr/arcmsr_attr.c index a4e04c50c436..acdae33de521 100644 --- a/drivers/scsi/arcmsr/arcmsr_attr.c +++ b/drivers/scsi/arcmsr/arcmsr_attr.c @@ -2,7 +2,7 @@ ******************************************************************************* ** O.S : Linux ** FILE NAME : arcmsr_attr.c -** BY : Erich Chen +** BY : Nick Cheng ** Description: attributes exported to sysfs and device host ******************************************************************************* ** Copyright (C) 2002 - 2005, Areca Technology Corporation All rights reserved diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c index 1cadcd6b7da6..984bd527c6c9 100644 --- a/drivers/scsi/arcmsr/arcmsr_hba.c +++ b/drivers/scsi/arcmsr/arcmsr_hba.c @@ -2,7 +2,7 @@ ******************************************************************************* ** O.S : Linux ** FILE NAME : arcmsr_hba.c -** BY : Erich Chen +** BY : Nick Cheng ** Description: SCSI RAID Device Driver for ** ARECA RAID Host adapter ******************************************************************************* @@ -76,7 +76,7 @@ MODULE_DESCRIPTION("ARECA (ARC11xx/12xx/16xx/1880) SATA/SAS RAID Host Bus Adapte MODULE_LICENSE("Dual BSD/GPL"); MODULE_VERSION(ARCMSR_DRIVER_VERSION); static int sleeptime = 10; -static int retrycount = 30; +static int retrycount = 12; wait_queue_head_t wait_q; static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, struct scsi_cmnd *cmd); @@ -187,7 +187,6 @@ int arcmsr_sleep_for_bus_reset(struct scsi_cmnd *cmd) if (isleep > 0) { msleep(isleep*1000); } - printk(KERN_NOTICE "wake-up\n"); return 0; } @@ -921,7 +920,6 @@ static void arcmsr_report_ccb_state(struct AdapterControlBlock *acb, } static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, struct CommandControlBlock *pCCB, bool error) - { int id, lun; if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) { @@ -948,7 +946,7 @@ static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, struct Comma , pCCB->startdone , atomic_read(&acb->ccboutstandingcount)); return; - } + } arcmsr_report_ccb_state(acb, pCCB, error); } @@ -981,7 +979,7 @@ static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb) case ACB_ADAPTER_TYPE_B: { struct MessageUnit_B *reg = acb->pmuB; /*clear all outbound posted Q*/ - writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, ®->iop2drv_doorbell); /* clear doorbell interrupt */ + writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell); /* clear doorbell interrupt */ for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) { if ((flag_ccb = readl(®->done_qbuffer[i])) != 0) { writel(0, ®->done_qbuffer[i]); @@ -1511,7 +1509,6 @@ static void arcmsr_hba_postqueue_isr(struct AdapterControlBlock *acb) arcmsr_drain_donequeue(acb, pCCB, error); } } - static void arcmsr_hbb_postqueue_isr(struct AdapterControlBlock *acb) { uint32_t index; @@ -2106,10 +2103,6 @@ static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd, if (atomic_read(&acb->ccboutstandingcount) >= ARCMSR_MAX_OUTSTANDING_CMD) return SCSI_MLQUEUE_HOST_BUSY; - if ((scsicmd == SCSI_CMD_ARECA_SPECIFIC)) { - printk(KERN_NOTICE "Receiveing SCSI_CMD_ARECA_SPECIFIC command..\n"); - return 0; - } ccb = arcmsr_get_freeccb(acb); if (!ccb) return SCSI_MLQUEUE_HOST_BUSY; @@ -2393,6 +2386,7 @@ static int arcmsr_polling_hbb_ccbdone(struct AdapterControlBlock *acb, int index, rtn; bool error; polling_hbb_ccb_retry: + poll_count++; /* clear doorbell interrupt */ writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell); @@ -2663,6 +2657,7 @@ static void arcmsr_request_hba_device_map(struct AdapterControlBlock *acb) { struct MessageUnit_A __iomem *reg = acb->pmuA; if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0 ) || ((acb->acb_flags & ACB_F_ABORT) != 0 )){ + mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ)); return; } else { acb->fw_flag = FW_NORMAL; @@ -2670,8 +2665,10 @@ static void arcmsr_request_hba_device_map(struct AdapterControlBlock *acb) atomic_set(&acb->rq_map_token, 16); } atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token)); - if (atomic_dec_and_test(&acb->rq_map_token)) + if (atomic_dec_and_test(&acb->rq_map_token)) { + mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ)); return; + } writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0); mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ)); } @@ -2682,15 +2679,18 @@ static void arcmsr_request_hbb_device_map(struct AdapterControlBlock *acb) { struct MessageUnit_B __iomem *reg = acb->pmuB; if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0 ) || ((acb->acb_flags & ACB_F_ABORT) != 0 )){ + mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ)); return; } else { acb->fw_flag = FW_NORMAL; if (atomic_read(&acb->ante_token_value) == atomic_read(&acb->rq_map_token)) { - atomic_set(&acb->rq_map_token,16); + atomic_set(&acb->rq_map_token, 16); } atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token)); - if(atomic_dec_and_test(&acb->rq_map_token)) + if (atomic_dec_and_test(&acb->rq_map_token)) { + mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ)); return; + } writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell); mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ)); } @@ -2701,6 +2701,7 @@ static void arcmsr_request_hbc_device_map(struct AdapterControlBlock *acb) { struct MessageUnit_C __iomem *reg = acb->pmuC; if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0) || ((acb->acb_flags & ACB_F_ABORT) != 0)) { + mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ)); return; } else { acb->fw_flag = FW_NORMAL; @@ -2708,8 +2709,10 @@ static void arcmsr_request_hbc_device_map(struct AdapterControlBlock *acb) atomic_set(&acb->rq_map_token, 16); } atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token)); - if (atomic_dec_and_test(&acb->rq_map_token)) + if (atomic_dec_and_test(&acb->rq_map_token)) { + mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ)); return; + } writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0); writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell); mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ)); @@ -2897,6 +2900,8 @@ static uint8_t arcmsr_iop_reset(struct AdapterControlBlock *acb) uint32_t intmask_org; uint8_t rtnval = 0x00; int i = 0; + unsigned long flags; + if (atomic_read(&acb->ccboutstandingcount) != 0) { /* disable all outbound interrupt */ intmask_org = arcmsr_disable_outbound_ints(acb); @@ -2907,7 +2912,12 @@ static uint8_t arcmsr_iop_reset(struct AdapterControlBlock *acb) for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) { ccb = acb->pccb_pool[i]; if (ccb->startdone == ARCMSR_CCB_START) { - arcmsr_ccb_complete(ccb); + scsi_dma_unmap(ccb->pcmd); + ccb->startdone = ARCMSR_CCB_DONE; + ccb->ccb_flags = 0; + spin_lock_irqsave(&acb->ccblist_lock, flags); + list_add_tail(&ccb->list, &acb->ccb_free_list); + spin_unlock_irqrestore(&acb->ccblist_lock, flags); } } atomic_set(&acb->ccboutstandingcount, 0); @@ -2920,8 +2930,7 @@ static uint8_t arcmsr_iop_reset(struct AdapterControlBlock *acb) static int arcmsr_bus_reset(struct scsi_cmnd *cmd) { - struct AdapterControlBlock *acb = - (struct AdapterControlBlock *)cmd->device->host->hostdata; + struct AdapterControlBlock *acb; uint32_t intmask_org, outbound_doorbell; int retry_count = 0; int rtn = FAILED; @@ -2971,31 +2980,16 @@ sleep_again: atomic_set(&acb->rq_map_token, 16); atomic_set(&acb->ante_token_value, 16); acb->fw_flag = FW_NORMAL; - init_timer(&acb->eternal_timer); - acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6*HZ); - acb->eternal_timer.data = (unsigned long) acb; - acb->eternal_timer.function = &arcmsr_request_device_map; - add_timer(&acb->eternal_timer); + mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ)); acb->acb_flags &= ~ACB_F_BUS_RESET; rtn = SUCCESS; printk(KERN_ERR "arcmsr: scsi bus reset eh returns with success\n"); } else { acb->acb_flags &= ~ACB_F_BUS_RESET; - if (atomic_read(&acb->rq_map_token) == 0) { - atomic_set(&acb->rq_map_token, 16); - atomic_set(&acb->ante_token_value, 16); - acb->fw_flag = FW_NORMAL; - init_timer(&acb->eternal_timer); - acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6*HZ); - acb->eternal_timer.data = (unsigned long) acb; - acb->eternal_timer.function = &arcmsr_request_device_map; - add_timer(&acb->eternal_timer); - } else { - atomic_set(&acb->rq_map_token, 16); - atomic_set(&acb->ante_token_value, 16); - acb->fw_flag = FW_NORMAL; - mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ)); - } + atomic_set(&acb->rq_map_token, 16); + atomic_set(&acb->ante_token_value, 16); + acb->fw_flag = FW_NORMAL; + mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ)); rtn = SUCCESS; } break; @@ -3007,21 +3001,10 @@ sleep_again: rtn = FAILED; } else { acb->acb_flags &= ~ACB_F_BUS_RESET; - if (atomic_read(&acb->rq_map_token) == 0) { - atomic_set(&acb->rq_map_token, 16); - atomic_set(&acb->ante_token_value, 16); - acb->fw_flag = FW_NORMAL; - init_timer(&acb->eternal_timer); - acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6*HZ); - acb->eternal_timer.data = (unsigned long) acb; - acb->eternal_timer.function = &arcmsr_request_device_map; - add_timer(&acb->eternal_timer); - } else { - atomic_set(&acb->rq_map_token, 16); - atomic_set(&acb->ante_token_value, 16); - acb->fw_flag = FW_NORMAL; - mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ)); - } + atomic_set(&acb->rq_map_token, 16); + atomic_set(&acb->ante_token_value, 16); + acb->fw_flag = FW_NORMAL; + mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ)); rtn = SUCCESS; } break; @@ -3067,31 +3050,16 @@ sleep: atomic_set(&acb->rq_map_token, 16); atomic_set(&acb->ante_token_value, 16); acb->fw_flag = FW_NORMAL; - init_timer(&acb->eternal_timer); - acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6 * HZ); - acb->eternal_timer.data = (unsigned long) acb; - acb->eternal_timer.function = &arcmsr_request_device_map; - add_timer(&acb->eternal_timer); + mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ)); acb->acb_flags &= ~ACB_F_BUS_RESET; rtn = SUCCESS; printk(KERN_ERR "arcmsr: scsi bus reset eh returns with success\n"); } else { acb->acb_flags &= ~ACB_F_BUS_RESET; - if (atomic_read(&acb->rq_map_token) == 0) { - atomic_set(&acb->rq_map_token, 16); - atomic_set(&acb->ante_token_value, 16); - acb->fw_flag = FW_NORMAL; - init_timer(&acb->eternal_timer); - acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6*HZ); - acb->eternal_timer.data = (unsigned long) acb; - acb->eternal_timer.function = &arcmsr_request_device_map; - add_timer(&acb->eternal_timer); - } else { - atomic_set(&acb->rq_map_token, 16); - atomic_set(&acb->ante_token_value, 16); - acb->fw_flag = FW_NORMAL; - mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ)); - } + atomic_set(&acb->rq_map_token, 16); + atomic_set(&acb->ante_token_value, 16); + acb->fw_flag = FW_NORMAL; + mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ)); rtn = SUCCESS; } break; diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c index 5815cbeb27a6..9a7aaf5f1311 100644 --- a/drivers/scsi/libsas/sas_scsi_host.c +++ b/drivers/scsi/libsas/sas_scsi_host.c @@ -646,6 +646,7 @@ void sas_scsi_recover_host(struct Scsi_Host *shost) spin_lock_irqsave(shost->host_lock, flags); list_splice_init(&shost->eh_cmd_q, &eh_work_q); + shost->host_eh_scheduled = 0; spin_unlock_irqrestore(shost->host_lock, flags); SAS_DPRINTK("Enter %s\n", __func__); diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c index b2a817055b8b..9ead0399808a 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.c +++ b/drivers/scsi/mpt2sas/mpt2sas_base.c @@ -2176,9 +2176,9 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) /* adjust hba_queue_depth, reply_free_queue_depth, * and queue_size */ - ioc->hba_queue_depth -= queue_diff; - ioc->reply_free_queue_depth -= queue_diff; - queue_size -= queue_diff; + ioc->hba_queue_depth -= (queue_diff / 2); + ioc->reply_free_queue_depth -= (queue_diff / 2); + queue_size = facts->MaxReplyDescriptorPostQueueDepth; } ioc->reply_post_queue_depth = queue_size; @@ -3941,6 +3941,8 @@ mpt2sas_base_detach(struct MPT2SAS_ADAPTER *ioc) static void _base_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase) { + mpt2sas_scsih_reset_handler(ioc, reset_phase); + mpt2sas_ctl_reset_handler(ioc, reset_phase); switch (reset_phase) { case MPT2_IOC_PRE_RESET: dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: " @@ -3971,8 +3973,6 @@ _base_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase) "MPT2_IOC_DONE_RESET\n", ioc->name, __func__)); break; } - mpt2sas_scsih_reset_handler(ioc, reset_phase); - mpt2sas_ctl_reset_handler(ioc, reset_phase); } /** @@ -4026,6 +4026,7 @@ mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag, { int r; unsigned long flags; + u8 pe_complete = ioc->wait_for_port_enable_to_complete; dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name, __func__)); @@ -4068,6 +4069,14 @@ mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag, if (r) goto out; _base_reset_handler(ioc, MPT2_IOC_AFTER_RESET); + + /* If this hard reset is called while port enable is active, then + * there is no reason to call make_ioc_operational + */ + if (pe_complete) { + r = -EFAULT; + goto out; + } r = _base_make_ioc_operational(ioc, sleep_flag); if (!r) _base_reset_handler(ioc, MPT2_IOC_DONE_RESET); diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c index eda347c57979..5ded3db6e316 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c @@ -819,7 +819,7 @@ _scsih_is_end_device(u32 device_info) } /** - * mptscsih_get_scsi_lookup - returns scmd entry + * _scsih_scsi_lookup_get - returns scmd entry * @ioc: per adapter object * @smid: system request message index * @@ -832,6 +832,28 @@ _scsih_scsi_lookup_get(struct MPT2SAS_ADAPTER *ioc, u16 smid) } /** + * _scsih_scsi_lookup_get_clear - returns scmd entry + * @ioc: per adapter object + * @smid: system request message index + * + * Returns the smid stored scmd pointer. + * Then will derefrence the stored scmd pointer. + */ +static inline struct scsi_cmnd * +_scsih_scsi_lookup_get_clear(struct MPT2SAS_ADAPTER *ioc, u16 smid) +{ + unsigned long flags; + struct scsi_cmnd *scmd; + + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + scmd = ioc->scsi_lookup[smid - 1].scmd; + ioc->scsi_lookup[smid - 1].scmd = NULL; + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + + return scmd; +} + +/** * _scsih_scsi_lookup_find_by_scmd - scmd lookup * @ioc: per adapter object * @smid: system request message index @@ -2981,9 +3003,6 @@ _scsih_check_topo_delete_events(struct MPT2SAS_ADAPTER *ioc, u16 handle; for (i = 0 ; i < event_data->NumEntries; i++) { - if (event_data->PHY[i].PhyStatus & - MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) - continue; handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); if (!handle) continue; @@ -3210,7 +3229,7 @@ _scsih_flush_running_cmds(struct MPT2SAS_ADAPTER *ioc) u16 count = 0; for (smid = 1; smid <= ioc->scsiio_depth; smid++) { - scmd = _scsih_scsi_lookup_get(ioc, smid); + scmd = _scsih_scsi_lookup_get_clear(ioc, smid); if (!scmd) continue; count++; @@ -3804,7 +3823,7 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) u32 response_code = 0; mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply); - scmd = _scsih_scsi_lookup_get(ioc, smid); + scmd = _scsih_scsi_lookup_get_clear(ioc, smid); if (scmd == NULL) return 1; @@ -5005,6 +5024,12 @@ _scsih_sas_device_status_change_event(struct MPT2SAS_ADAPTER *ioc, event_data); #endif + /* In MPI Revision K (0xC), the internal device reset complete was + * implemented, so avoid setting tm_busy flag for older firmware. + */ + if ((ioc->facts.HeaderVersion >> 8) < 0xC) + return; + if (event_data->ReasonCode != MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET && event_data->ReasonCode != @@ -5099,6 +5124,7 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc, struct fw_event_work *fw_event) { struct scsi_cmnd *scmd; + struct scsi_device *sdev; u16 smid, handle; u32 lun; struct MPT2SAS_DEVICE *sas_device_priv_data; @@ -5109,12 +5135,17 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc, Mpi2EventDataSasBroadcastPrimitive_t *event_data = fw_event->event_data; #endif u16 ioc_status; + unsigned long flags; + int r; + dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "broadcast primative: " "phy number(%d), width(%d)\n", ioc->name, event_data->PhyNum, event_data->PortWidth)); dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name, __func__)); + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + ioc->broadcast_aen_busy = 0; termination_count = 0; query_count = 0; mpi_reply = ioc->tm_cmds.reply; @@ -5122,7 +5153,8 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc, scmd = _scsih_scsi_lookup_get(ioc, smid); if (!scmd) continue; - sas_device_priv_data = scmd->device->hostdata; + sdev = scmd->device; + sas_device_priv_data = sdev->hostdata; if (!sas_device_priv_data || !sas_device_priv_data->sas_target) continue; /* skip hidden raid components */ @@ -5138,6 +5170,7 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc, lun = sas_device_priv_data->lun; query_count++; + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); mpt2sas_scsih_issue_tm(ioc, handle, 0, 0, lun, MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30, NULL); ioc->tm_cmds.status = MPT2_CMD_NOT_USED; @@ -5147,14 +5180,20 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc, (mpi_reply->ResponseCode == MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED || mpi_reply->ResponseCode == - MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC)) + MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC)) { + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); continue; - - mpt2sas_scsih_issue_tm(ioc, handle, 0, 0, lun, - MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET, 0, 30, NULL); + } + r = mpt2sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id, + sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30, + scmd); + if (r == FAILED) + sdev_printk(KERN_WARNING, sdev, "task abort: FAILED " + "scmd(%p)\n", scmd); termination_count += le32_to_cpu(mpi_reply->TerminationCount); + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); } - ioc->broadcast_aen_busy = 0; + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s - exit, query_count = %d termination_count = %d\n", @@ -6626,6 +6665,7 @@ _scsih_remove(struct pci_dev *pdev) destroy_workqueue(wq); /* release all the volumes */ + _scsih_ir_shutdown(ioc); list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list, list) { if (raid_device->starget) { diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 9b3ca103135f..f616cefc95ba 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -128,8 +128,7 @@ static void handle_tx(struct vhost_net *net) size_t hdr_size; struct socket *sock; - /* TODO: check that we are running from vhost_worker? - * Not sure it's worth it, it's straight-forward enough. */ + /* TODO: check that we are running from vhost_worker? */ sock = rcu_dereference_check(vq->private_data, 1); if (!sock) return; @@ -306,7 +305,8 @@ static void handle_rx_big(struct vhost_net *net) size_t len, total_len = 0; int err; size_t hdr_size; - struct socket *sock = rcu_dereference(vq->private_data); + /* TODO: check that we are running from vhost_worker? */ + struct socket *sock = rcu_dereference_check(vq->private_data, 1); if (!sock || skb_queue_empty(&sock->sk->sk_receive_queue)) return; @@ -415,7 +415,8 @@ static void handle_rx_mergeable(struct vhost_net *net) int err, headcount; size_t vhost_hlen, sock_hlen; size_t vhost_len, sock_len; - struct socket *sock = rcu_dereference(vq->private_data); + /* TODO: check that we are running from vhost_worker? */ + struct socket *sock = rcu_dereference_check(vq->private_data, 1); if (!sock || skb_queue_empty(&sock->sk->sk_receive_queue)) return; diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index 2af44b7b1f3f..b3363ae38518 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h @@ -173,9 +173,9 @@ static inline int vhost_has_feature(struct vhost_dev *dev, int bit) { unsigned acked_features; - acked_features = - rcu_dereference_index_check(dev->acked_features, - lockdep_is_held(&dev->mutex)); + /* TODO: check that we are running from vhost_worker or dev mutex is + * held? */ + acked_features = rcu_dereference_index_check(dev->acked_features, 1); return acked_features & (1 << bit); } |