diff options
author | David S. Miller <davem@davemloft.net> | 2011-10-07 21:38:43 +0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2011-10-07 21:38:43 +0400 |
commit | 88c5100c28b02c4b2b2c6f6fafbbd76d90f698b9 (patch) | |
tree | 08c4399e0341f7eb0ccb24e15f2cab687275c2a4 /drivers | |
parent | 8083f0fc969d9b5353061a7a6f963405057e26b1 (diff) | |
parent | 3ee72ca99288f1de95ec9c570e43f531c8799f06 (diff) | |
download | linux-88c5100c28b02c4b2b2c6f6fafbbd76d90f698b9.tar.xz |
Merge branch 'master' of github.com:davem330/net
Conflicts:
net/batman-adv/soft-interface.c
Diffstat (limited to 'drivers')
72 files changed, 539 insertions, 604 deletions
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c index 2c18d584066d..b97294e2d95b 100644 --- a/drivers/base/power/clock_ops.c +++ b/drivers/base/power/clock_ops.c @@ -42,6 +42,22 @@ static struct pm_clk_data *__to_pcd(struct device *dev) } /** + * pm_clk_acquire - Acquire a device clock. + * @dev: Device whose clock is to be acquired. + * @ce: PM clock entry corresponding to the clock. + */ +static void pm_clk_acquire(struct device *dev, struct pm_clock_entry *ce) +{ + ce->clk = clk_get(dev, ce->con_id); + if (IS_ERR(ce->clk)) { + ce->status = PCE_STATUS_ERROR; + } else { + ce->status = PCE_STATUS_ACQUIRED; + dev_dbg(dev, "Clock %s managed by runtime PM.\n", ce->con_id); + } +} + +/** * pm_clk_add - Start using a device clock for power management. * @dev: Device whose clock is going to be used for power management. * @con_id: Connection ID of the clock. @@ -73,6 +89,8 @@ int pm_clk_add(struct device *dev, const char *con_id) } } + pm_clk_acquire(dev, ce); + spin_lock_irq(&pcd->lock); list_add_tail(&ce->node, &pcd->clock_list); spin_unlock_irq(&pcd->lock); @@ -82,17 +100,12 @@ int pm_clk_add(struct device *dev, const char *con_id) /** * __pm_clk_remove - Destroy PM clock entry. * @ce: PM clock entry to destroy. - * - * This routine must be called under the spinlock protecting the PM list of - * clocks corresponding the the @ce's device. */ static void __pm_clk_remove(struct pm_clock_entry *ce) { if (!ce) return; - list_del(&ce->node); - if (ce->status < PCE_STATUS_ERROR) { if (ce->status == PCE_STATUS_ENABLED) clk_disable(ce->clk); @@ -126,18 +139,22 @@ void pm_clk_remove(struct device *dev, const char *con_id) spin_lock_irq(&pcd->lock); list_for_each_entry(ce, &pcd->clock_list, node) { - if (!con_id && !ce->con_id) { - __pm_clk_remove(ce); - break; - } else if (!con_id || !ce->con_id) { + if (!con_id && !ce->con_id) + goto remove; + else if (!con_id || !ce->con_id) continue; - } else if (!strcmp(con_id, ce->con_id)) { - __pm_clk_remove(ce); - break; - } + else if (!strcmp(con_id, ce->con_id)) + goto remove; } spin_unlock_irq(&pcd->lock); + return; + + remove: + list_del(&ce->node); + spin_unlock_irq(&pcd->lock); + + __pm_clk_remove(ce); } /** @@ -175,20 +192,27 @@ void pm_clk_destroy(struct device *dev) { struct pm_clk_data *pcd = __to_pcd(dev); struct pm_clock_entry *ce, *c; + struct list_head list; if (!pcd) return; dev->power.subsys_data = NULL; + INIT_LIST_HEAD(&list); spin_lock_irq(&pcd->lock); list_for_each_entry_safe_reverse(ce, c, &pcd->clock_list, node) - __pm_clk_remove(ce); + list_move(&ce->node, &list); spin_unlock_irq(&pcd->lock); kfree(pcd); + + list_for_each_entry_safe_reverse(ce, c, &list, node) { + list_del(&ce->node); + __pm_clk_remove(ce); + } } #endif /* CONFIG_PM */ @@ -196,23 +220,6 @@ void pm_clk_destroy(struct device *dev) #ifdef CONFIG_PM_RUNTIME /** - * pm_clk_acquire - Acquire a device clock. - * @dev: Device whose clock is to be acquired. - * @con_id: Connection ID of the clock. - */ -static void pm_clk_acquire(struct device *dev, - struct pm_clock_entry *ce) -{ - ce->clk = clk_get(dev, ce->con_id); - if (IS_ERR(ce->clk)) { - ce->status = PCE_STATUS_ERROR; - } else { - ce->status = PCE_STATUS_ACQUIRED; - dev_dbg(dev, "Clock %s managed by runtime PM.\n", ce->con_id); - } -} - -/** * pm_clk_suspend - Disable clocks in a device's PM clock list. * @dev: Device to disable the clocks for. */ @@ -230,9 +237,6 @@ int pm_clk_suspend(struct device *dev) spin_lock_irqsave(&pcd->lock, flags); list_for_each_entry_reverse(ce, &pcd->clock_list, node) { - if (ce->status == PCE_STATUS_NONE) - pm_clk_acquire(dev, ce); - if (ce->status < PCE_STATUS_ERROR) { clk_disable(ce->clk); ce->status = PCE_STATUS_ACQUIRED; @@ -262,9 +266,6 @@ int pm_clk_resume(struct device *dev) spin_lock_irqsave(&pcd->lock, flags); list_for_each_entry(ce, &pcd->clock_list, node) { - if (ce->status == PCE_STATUS_NONE) - pm_clk_acquire(dev, ce); - if (ce->status < PCE_STATUS_ERROR) { clk_enable(ce->clk); ce->status = PCE_STATUS_ENABLED; diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig index f6595aba4f0f..fa567f1158c2 100644 --- a/drivers/char/tpm/Kconfig +++ b/drivers/char/tpm/Kconfig @@ -43,6 +43,7 @@ config TCG_NSC config TCG_ATMEL tristate "Atmel TPM Interface" + depends on PPC64 || HAS_IOPORT ---help--- If you have a TPM security chip from Atmel say Yes and it will be accessible from within Linux. To compile this driver diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c index caf8012ef47c..9ca5c021d0b6 100644 --- a/drivers/char/tpm/tpm.c +++ b/drivers/char/tpm/tpm.c @@ -383,6 +383,9 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf, u32 count, ordinal; unsigned long stop; + if (bufsiz > TPM_BUFSIZE) + bufsiz = TPM_BUFSIZE; + count = be32_to_cpu(*((__be32 *) (buf + 2))); ordinal = be32_to_cpu(*((__be32 *) (buf + 6))); if (count == 0) @@ -1102,6 +1105,7 @@ ssize_t tpm_read(struct file *file, char __user *buf, { struct tpm_chip *chip = file->private_data; ssize_t ret_size; + int rc; del_singleshot_timer_sync(&chip->user_read_timer); flush_work_sync(&chip->work); @@ -1112,8 +1116,11 @@ ssize_t tpm_read(struct file *file, char __user *buf, ret_size = size; mutex_lock(&chip->buffer_mutex); - if (copy_to_user(buf, chip->data_buffer, ret_size)) + rc = copy_to_user(buf, chip->data_buffer, ret_size); + memset(chip->data_buffer, 0, ret_size); + if (rc) ret_size = -EFAULT; + mutex_unlock(&chip->buffer_mutex); } diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c index 82facc9104c7..4d2464871ada 100644 --- a/drivers/char/tpm/tpm_nsc.c +++ b/drivers/char/tpm/tpm_nsc.c @@ -396,8 +396,6 @@ static void __exit cleanup_nsc(void) if (pdev) { tpm_nsc_remove(&pdev->dev); platform_device_unregister(pdev); - kfree(pdev); - pdev = NULL; } platform_driver_unregister(&nsc_drv); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index ce045a8cf82c..f07e4252b708 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -67,11 +67,11 @@ module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600); MODULE_PARM_DESC(i915_enable_rc6, "Enable power-saving render C-state 6 (default: true)"); -unsigned int i915_enable_fbc __read_mostly = 1; +unsigned int i915_enable_fbc __read_mostly = -1; module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600); MODULE_PARM_DESC(i915_enable_fbc, "Enable frame buffer compression for power savings " - "(default: false)"); + "(default: -1 (use per-chip default))"); unsigned int i915_lvds_downclock __read_mostly = 0; module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 56a8554d9039..04411ad2e779 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1799,6 +1799,7 @@ static void intel_update_fbc(struct drm_device *dev) struct drm_framebuffer *fb; struct intel_framebuffer *intel_fb; struct drm_i915_gem_object *obj; + int enable_fbc; DRM_DEBUG_KMS("\n"); @@ -1839,8 +1840,15 @@ static void intel_update_fbc(struct drm_device *dev) intel_fb = to_intel_framebuffer(fb); obj = intel_fb->obj; - if (!i915_enable_fbc) { - DRM_DEBUG_KMS("fbc disabled per module param (default off)\n"); + enable_fbc = i915_enable_fbc; + if (enable_fbc < 0) { + DRM_DEBUG_KMS("fbc set to per-chip default\n"); + enable_fbc = 1; + if (INTEL_INFO(dev)->gen <= 5) + enable_fbc = 0; + } + if (!enable_fbc) { + DRM_DEBUG_KMS("fbc disabled per module param\n"); dev_priv->no_fbc_reason = FBC_MODULE_PARAM; goto out_disable; } @@ -4687,13 +4695,13 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc, bpc = 6; /* min is 18bpp */ break; case 24: - bpc = min((unsigned int)8, display_bpc); + bpc = 8; break; case 30: - bpc = min((unsigned int)10, display_bpc); + bpc = 10; break; case 48: - bpc = min((unsigned int)12, display_bpc); + bpc = 12; break; default: DRM_DEBUG("unsupported depth, assuming 24 bits\n"); @@ -4701,10 +4709,12 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc, break; } + display_bpc = min(display_bpc, bpc); + DRM_DEBUG_DRIVER("setting pipe bpc to %d (max display bpc %d)\n", bpc, display_bpc); - *pipe_bpp = bpc * 3; + *pipe_bpp = display_bpc * 3; return display_bpc != bpc; } diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 0b2ee9d39980..fe1099d8817e 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -337,9 +337,6 @@ extern void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, struct drm_connector *connector, struct intel_load_detect_pipe *old); -extern struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB); -extern int intel_sdvo_supports_hotplug(struct drm_connector *connector); -extern void intel_sdvo_set_hotplug(struct drm_connector *connector, int enable); extern void intelfb_restore(void); extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, u16 blue, int regno); diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 30fe554d8936..6348c499616f 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -92,6 +92,11 @@ struct intel_sdvo { */ uint16_t attached_output; + /* + * Hotplug activation bits for this device + */ + uint8_t hotplug_active[2]; + /** * This is used to select the color range of RBG outputs in HDMI mode. * It is only valid when using TMDS encoding and 8 bit per color mode. @@ -1208,74 +1213,20 @@ static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct in return true; } -/* No use! */ -#if 0 -struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB) -{ - struct drm_connector *connector = NULL; - struct intel_sdvo *iout = NULL; - struct intel_sdvo *sdvo; - - /* find the sdvo connector */ - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - iout = to_intel_sdvo(connector); - - if (iout->type != INTEL_OUTPUT_SDVO) - continue; - - sdvo = iout->dev_priv; - - if (sdvo->sdvo_reg == SDVOB && sdvoB) - return connector; - - if (sdvo->sdvo_reg == SDVOC && !sdvoB) - return connector; - - } - - return NULL; -} - -int intel_sdvo_supports_hotplug(struct drm_connector *connector) +static int intel_sdvo_supports_hotplug(struct intel_sdvo *intel_sdvo) { u8 response[2]; - u8 status; - struct intel_sdvo *intel_sdvo; - DRM_DEBUG_KMS("\n"); - - if (!connector) - return 0; - - intel_sdvo = to_intel_sdvo(connector); return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, &response, 2) && response[0]; } -void intel_sdvo_set_hotplug(struct drm_connector *connector, int on) +static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder) { - u8 response[2]; - u8 status; - struct intel_sdvo *intel_sdvo = to_intel_sdvo(connector); - - intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); - intel_sdvo_read_response(intel_sdvo, &response, 2); - - if (on) { - intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0); - status = intel_sdvo_read_response(intel_sdvo, &response, 2); - - intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2); - } else { - response[0] = 0; - response[1] = 0; - intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2); - } + struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base); - intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); - intel_sdvo_read_response(intel_sdvo, &response, 2); + intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &intel_sdvo->hotplug_active, 2); } -#endif static bool intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo) @@ -2045,6 +1996,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) { struct drm_encoder *encoder = &intel_sdvo->base.base; struct drm_connector *connector; + struct intel_encoder *intel_encoder = to_intel_encoder(encoder); struct intel_connector *intel_connector; struct intel_sdvo_connector *intel_sdvo_connector; @@ -2062,7 +2014,17 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) intel_connector = &intel_sdvo_connector->base; connector = &intel_connector->base; - connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; + if (intel_sdvo_supports_hotplug(intel_sdvo) & (1 << device)) { + connector->polled = DRM_CONNECTOR_POLL_HPD; + intel_sdvo->hotplug_active[0] |= 1 << device; + /* Some SDVO devices have one-shot hotplug interrupts. + * Ensure that they get re-enabled when an interrupt happens. + */ + intel_encoder->hot_plug = intel_sdvo_enable_hotplug; + intel_sdvo_enable_hotplug(intel_encoder); + } + else + connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; encoder->encoder_type = DRM_MODE_ENCODER_TMDS; connector->connector_type = DRM_MODE_CONNECTOR_DVID; @@ -2569,6 +2531,14 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps)) goto err; + /* Set up hotplug command - note paranoia about contents of reply. + * We assume that the hardware is in a sane state, and only touch + * the bits we think we understand. + */ + intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, + &intel_sdvo->hotplug_active, 2); + intel_sdvo->hotplug_active[0] &= ~0x3; + if (intel_sdvo_output_setup(intel_sdvo, intel_sdvo->caps.output_flags) != true) { DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n", diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index 7ad43c6b1db7..4da23889fea6 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c @@ -115,6 +115,7 @@ static int radeon_dp_aux_native_write(struct radeon_connector *radeon_connector, u8 msg[20]; int msg_bytes = send_bytes + 4; u8 ack; + unsigned retry; if (send_bytes > 16) return -1; @@ -125,20 +126,20 @@ static int radeon_dp_aux_native_write(struct radeon_connector *radeon_connector, msg[3] = (msg_bytes << 4) | (send_bytes - 1); memcpy(&msg[4], send, send_bytes); - while (1) { + for (retry = 0; retry < 4; retry++) { ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, msg, msg_bytes, NULL, 0, delay, &ack); if (ret < 0) return ret; if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) - break; + return send_bytes; else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) udelay(400); else return -EIO; } - return send_bytes; + return -EIO; } static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector, @@ -149,26 +150,29 @@ static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector, int msg_bytes = 4; u8 ack; int ret; + unsigned retry; msg[0] = address; msg[1] = address >> 8; msg[2] = AUX_NATIVE_READ << 4; msg[3] = (msg_bytes << 4) | (recv_bytes - 1); - while (1) { + for (retry = 0; retry < 4; retry++) { ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, msg, msg_bytes, recv, recv_bytes, delay, &ack); - if (ret == 0) - return -EPROTO; if (ret < 0) return ret; if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) return ret; else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) udelay(400); + else if (ret == 0) + return -EPROTO; else return -EIO; } + + return -EIO; } static void radeon_write_dpcd_reg(struct radeon_connector *radeon_connector, diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index e8a746712b5b..c4ffa14fb2f4 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -1590,48 +1590,6 @@ static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev, return backend_map; } -static void evergreen_program_channel_remap(struct radeon_device *rdev) -{ - u32 tcp_chan_steer_lo, tcp_chan_steer_hi, mc_shared_chremap, tmp; - - tmp = RREG32(MC_SHARED_CHMAP); - switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) { - case 0: - case 1: - case 2: - case 3: - default: - /* default mapping */ - mc_shared_chremap = 0x00fac688; - break; - } - - switch (rdev->family) { - case CHIP_HEMLOCK: - case CHIP_CYPRESS: - case CHIP_BARTS: - tcp_chan_steer_lo = 0x54763210; - tcp_chan_steer_hi = 0x0000ba98; - break; - case CHIP_JUNIPER: - case CHIP_REDWOOD: - case CHIP_CEDAR: - case CHIP_PALM: - case CHIP_SUMO: - case CHIP_SUMO2: - case CHIP_TURKS: - case CHIP_CAICOS: - default: - tcp_chan_steer_lo = 0x76543210; - tcp_chan_steer_hi = 0x0000ba98; - break; - } - - WREG32(TCP_CHAN_STEER_LO, tcp_chan_steer_lo); - WREG32(TCP_CHAN_STEER_HI, tcp_chan_steer_hi); - WREG32(MC_SHARED_CHREMAP, mc_shared_chremap); -} - static void evergreen_gpu_init(struct radeon_device *rdev) { u32 cc_rb_backend_disable = 0; @@ -2078,8 +2036,6 @@ static void evergreen_gpu_init(struct radeon_device *rdev) WREG32(DMIF_ADDR_CONFIG, gb_addr_config); WREG32(HDP_ADDR_CONFIG, gb_addr_config); - evergreen_program_channel_remap(rdev); - num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1; grbm_gfx_index = INSTANCE_BROADCAST_WRITES; diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 99fbd793c08c..8c79ca97753d 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c @@ -569,36 +569,6 @@ static u32 cayman_get_tile_pipe_to_backend_map(struct radeon_device *rdev, return backend_map; } -static void cayman_program_channel_remap(struct radeon_device *rdev) -{ - u32 tcp_chan_steer_lo, tcp_chan_steer_hi, mc_shared_chremap, tmp; - - tmp = RREG32(MC_SHARED_CHMAP); - switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) { - case 0: - case 1: - case 2: - case 3: - default: - /* default mapping */ - mc_shared_chremap = 0x00fac688; - break; - } - - switch (rdev->family) { - case CHIP_CAYMAN: - default: - //tcp_chan_steer_lo = 0x54763210 - tcp_chan_steer_lo = 0x76543210; - tcp_chan_steer_hi = 0x0000ba98; - break; - } - - WREG32(TCP_CHAN_STEER_LO, tcp_chan_steer_lo); - WREG32(TCP_CHAN_STEER_HI, tcp_chan_steer_hi); - WREG32(MC_SHARED_CHREMAP, mc_shared_chremap); -} - static u32 cayman_get_disable_mask_per_asic(struct radeon_device *rdev, u32 disable_mask_per_se, u32 max_disable_mask_per_se, @@ -842,8 +812,6 @@ static void cayman_gpu_init(struct radeon_device *rdev) WREG32(DMIF_ADDR_CONFIG, gb_addr_config); WREG32(HDP_ADDR_CONFIG, gb_addr_config); - cayman_program_channel_remap(rdev); - /* primary versions */ WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable); diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 5b1837b4aacf..7fcdbbbf2979 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -773,8 +773,8 @@ int r100_copy_blit(struct radeon_device *rdev, radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16)); radeon_ring_write(rdev, 0); radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16)); - radeon_ring_write(rdev, cur_pages); - radeon_ring_write(rdev, cur_pages); + radeon_ring_write(rdev, num_gpu_pages); + radeon_ring_write(rdev, num_gpu_pages); radeon_ring_write(rdev, cur_pages | (stride_pixels << 16)); } radeon_ring_write(rdev, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0)); diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index c4b8741dbf58..bce63fd329d4 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -68,11 +68,11 @@ void radeon_connector_hotplug(struct drm_connector *connector) if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) { int saved_dpms = connector->dpms; - if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd) && - radeon_dp_needs_link_train(radeon_connector)) - drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); - else + /* Only turn off the display it it's physically disconnected */ + if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); + else if (radeon_dp_needs_link_train(radeon_connector)) + drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); connector->dpms = saved_dpms; } } diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c index 3189a7efb2e9..fde25c0d65a0 100644 --- a/drivers/gpu/drm/radeon/radeon_cursor.c +++ b/drivers/gpu/drm/radeon/radeon_cursor.c @@ -208,23 +208,25 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc, int xorigin = 0, yorigin = 0; int w = radeon_crtc->cursor_width; - if (x < 0) - xorigin = -x + 1; - if (y < 0) - yorigin = -y + 1; - if (xorigin >= CURSOR_WIDTH) - xorigin = CURSOR_WIDTH - 1; - if (yorigin >= CURSOR_HEIGHT) - yorigin = CURSOR_HEIGHT - 1; - if (ASIC_IS_AVIVO(rdev)) { - int i = 0; - struct drm_crtc *crtc_p; - /* avivo cursor are offset into the total surface */ x += crtc->x; y += crtc->y; - DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y); + } + DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y); + + if (x < 0) { + xorigin = min(-x, CURSOR_WIDTH - 1); + x = 0; + } + if (y < 0) { + yorigin = min(-y, CURSOR_HEIGHT - 1); + y = 0; + } + + if (ASIC_IS_AVIVO(rdev)) { + int i = 0; + struct drm_crtc *crtc_p; /* avivo cursor image can't end on 128 pixel boundary or * go past the end of the frame if both crtcs are enabled @@ -253,16 +255,12 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc, radeon_lock_cursor(crtc, true); if (ASIC_IS_DCE4(rdev)) { - WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset, - ((xorigin ? 0 : x) << 16) | - (yorigin ? 0 : y)); + WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset, (x << 16) | y); WREG32(EVERGREEN_CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin); WREG32(EVERGREEN_CUR_SIZE + radeon_crtc->crtc_offset, ((w - 1) << 16) | (radeon_crtc->cursor_height - 1)); } else if (ASIC_IS_AVIVO(rdev)) { - WREG32(AVIVO_D1CUR_POSITION + radeon_crtc->crtc_offset, - ((xorigin ? 0 : x) << 16) | - (yorigin ? 0 : y)); + WREG32(AVIVO_D1CUR_POSITION + radeon_crtc->crtc_offset, (x << 16) | y); WREG32(AVIVO_D1CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin); WREG32(AVIVO_D1CUR_SIZE + radeon_crtc->crtc_offset, ((w - 1) << 16) | (radeon_crtc->cursor_height - 1)); @@ -276,8 +274,8 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc, | yorigin)); WREG32(RADEON_CUR_HORZ_VERT_POSN + radeon_crtc->crtc_offset, (RADEON_CUR_LOCK - | ((xorigin ? 0 : x) << 16) - | (yorigin ? 0 : y))); + | (x << 16) + | y)); /* offset is from DISP(2)_BASE_ADDRESS */ WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, (radeon_crtc->legacy_cursor_offset + (yorigin * 256))); diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index 319d85d7e759..13690f3eb4a4 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c @@ -1507,7 +1507,14 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) switch (mode) { case DRM_MODE_DPMS_ON: args.ucAction = ATOM_ENABLE; - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + /* workaround for DVOOutputControl on some RS690 systems */ + if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DDI) { + u32 reg = RREG32(RADEON_BIOS_3_SCRATCH); + WREG32(RADEON_BIOS_3_SCRATCH, reg & ~ATOM_S3_DFP2I_ACTIVE); + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + WREG32(RADEON_BIOS_3_SCRATCH, reg); + } else + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { args.ucAction = ATOM_LCD_BLON; atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 4720d000d440..b13c2eedc321 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -536,55 +536,6 @@ static u32 r700_get_tile_pipe_to_backend_map(struct radeon_device *rdev, return backend_map; } -static void rv770_program_channel_remap(struct radeon_device *rdev) -{ - u32 tcp_chan_steer, mc_shared_chremap, tmp; - bool force_no_swizzle; - - switch (rdev->family) { - case CHIP_RV770: - case CHIP_RV730: - force_no_swizzle = false; - break; - case CHIP_RV710: - case CHIP_RV740: - default: - force_no_swizzle = true; - break; - } - - tmp = RREG32(MC_SHARED_CHMAP); - switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) { - case 0: - case 1: - default: - /* default mapping */ - mc_shared_chremap = 0x00fac688; - break; - case 2: - case 3: - if (force_no_swizzle) - mc_shared_chremap = 0x00fac688; - else - mc_shared_chremap = 0x00bbc298; - break; - } - - if (rdev->family == CHIP_RV740) - tcp_chan_steer = 0x00ef2a60; - else - tcp_chan_steer = 0x00fac688; - - /* RV770 CE has special chremap setup */ - if (rdev->pdev->device == 0x944e) { - tcp_chan_steer = 0x00b08b08; - mc_shared_chremap = 0x00b08b08; - } - - WREG32(TCP_CHAN_STEER, tcp_chan_steer); - WREG32(MC_SHARED_CHREMAP, mc_shared_chremap); -} - static void rv770_gpu_init(struct radeon_device *rdev) { int i, j, num_qd_pipes; @@ -785,8 +736,6 @@ static void rv770_gpu_init(struct radeon_device *rdev) WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff)); WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff)); - rv770_program_channel_remap(rdev); - WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c index 411257676133..932383786642 100644 --- a/drivers/hwmon/coretemp.c +++ b/drivers/hwmon/coretemp.c @@ -36,17 +36,25 @@ #include <linux/cpu.h> #include <linux/pci.h> #include <linux/smp.h> +#include <linux/moduleparam.h> #include <asm/msr.h> #include <asm/processor.h> #define DRVNAME "coretemp" +/* + * force_tjmax only matters when TjMax can't be read from the CPU itself. + * When set, it replaces the driver's suboptimal heuristic. + */ +static int force_tjmax; +module_param_named(tjmax, force_tjmax, int, 0444); +MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius"); + #define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */ #define NUM_REAL_CORES 16 /* Number of Real cores per cpu */ #define CORETEMP_NAME_LENGTH 17 /* String Length of attrs */ #define MAX_CORE_ATTRS 4 /* Maximum no of basic attrs */ -#define MAX_THRESH_ATTRS 3 /* Maximum no of Threshold attrs */ -#define TOTAL_ATTRS (MAX_CORE_ATTRS + MAX_THRESH_ATTRS) +#define TOTAL_ATTRS (MAX_CORE_ATTRS + 1) #define MAX_CORE_DATA (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO) #ifdef CONFIG_SMP @@ -69,8 +77,6 @@ * This value is passed as "id" field to rdmsr/wrmsr functions. * @status_reg: One of IA32_THERM_STATUS or IA32_PACKAGE_THERM_STATUS, * from where the temperature values should be read. - * @intrpt_reg: One of IA32_THERM_INTERRUPT or IA32_PACKAGE_THERM_INTERRUPT, - * from where the thresholds are read. * @attr_size: Total number of pre-core attrs displayed in the sysfs. * @is_pkg_data: If this is 1, the temp_data holds pkgtemp data. * Otherwise, temp_data holds coretemp data. @@ -79,13 +85,11 @@ struct temp_data { int temp; int ttarget; - int tmin; int tjmax; unsigned long last_updated; unsigned int cpu; u32 cpu_core_id; u32 status_reg; - u32 intrpt_reg; int attr_size; bool is_pkg_data; bool valid; @@ -143,19 +147,6 @@ static ssize_t show_crit_alarm(struct device *dev, return sprintf(buf, "%d\n", (eax >> 5) & 1); } -static ssize_t show_max_alarm(struct device *dev, - struct device_attribute *devattr, char *buf) -{ - u32 eax, edx; - struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); - struct platform_data *pdata = dev_get_drvdata(dev); - struct temp_data *tdata = pdata->core_data[attr->index]; - - rdmsr_on_cpu(tdata->cpu, tdata->status_reg, &eax, &edx); - - return sprintf(buf, "%d\n", !!(eax & THERM_STATUS_THRESHOLD1)); -} - static ssize_t show_tjmax(struct device *dev, struct device_attribute *devattr, char *buf) { @@ -174,83 +165,6 @@ static ssize_t show_ttarget(struct device *dev, return sprintf(buf, "%d\n", pdata->core_data[attr->index]->ttarget); } -static ssize_t store_ttarget(struct device *dev, - struct device_attribute *devattr, - const char *buf, size_t count) -{ - struct platform_data *pdata = dev_get_drvdata(dev); - struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); - struct temp_data *tdata = pdata->core_data[attr->index]; - u32 eax, edx; - unsigned long val; - int diff; - - if (strict_strtoul(buf, 10, &val)) - return -EINVAL; - - /* - * THERM_MASK_THRESHOLD1 is 7 bits wide. Values are entered in terms - * of milli degree celsius. Hence don't accept val > (127 * 1000) - */ - if (val > tdata->tjmax || val > 127000) - return -EINVAL; - - diff = (tdata->tjmax - val) / 1000; - - mutex_lock(&tdata->update_lock); - rdmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, &eax, &edx); - eax = (eax & ~THERM_MASK_THRESHOLD1) | - (diff << THERM_SHIFT_THRESHOLD1); - wrmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, eax, edx); - tdata->ttarget = val; - mutex_unlock(&tdata->update_lock); - - return count; -} - -static ssize_t show_tmin(struct device *dev, - struct device_attribute *devattr, char *buf) -{ - struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); - struct platform_data *pdata = dev_get_drvdata(dev); - - return sprintf(buf, "%d\n", pdata->core_data[attr->index]->tmin); -} - -static ssize_t store_tmin(struct device *dev, - struct device_attribute *devattr, - const char *buf, size_t count) -{ - struct platform_data *pdata = dev_get_drvdata(dev); - struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); - struct temp_data *tdata = pdata->core_data[attr->index]; - u32 eax, edx; - unsigned long val; - int diff; - - if (strict_strtoul(buf, 10, &val)) - return -EINVAL; - - /* - * THERM_MASK_THRESHOLD0 is 7 bits wide. Values are entered in terms - * of milli degree celsius. Hence don't accept val > (127 * 1000) - */ - if (val > tdata->tjmax || val > 127000) - return -EINVAL; - - diff = (tdata->tjmax - val) / 1000; - - mutex_lock(&tdata->update_lock); - rdmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, &eax, &edx); - eax = (eax & ~THERM_MASK_THRESHOLD0) | - (diff << THERM_SHIFT_THRESHOLD0); - wrmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, eax, edx); - tdata->tmin = val; - mutex_unlock(&tdata->update_lock); - - return count; -} - static ssize_t show_temp(struct device *dev, struct device_attribute *devattr, char *buf) { @@ -374,7 +288,6 @@ static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev) static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev) { - /* The 100C is default for both mobile and non mobile CPUs */ int err; u32 eax, edx; u32 val; @@ -385,7 +298,8 @@ static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev) */ err = rdmsr_safe_on_cpu(id, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx); if (err) { - dev_warn(dev, "Unable to read TjMax from CPU.\n"); + if (c->x86_model > 0xe && c->x86_model != 0x1c) + dev_warn(dev, "Unable to read TjMax from CPU %u\n", id); } else { val = (eax >> 16) & 0xff; /* @@ -393,11 +307,17 @@ static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev) * will be used */ if (val) { - dev_info(dev, "TjMax is %d C.\n", val); + dev_dbg(dev, "TjMax is %d degrees C\n", val); return val * 1000; } } + if (force_tjmax) { + dev_notice(dev, "TjMax forced to %d degrees C by user\n", + force_tjmax); + return force_tjmax * 1000; + } + /* * An assumption is made for early CPUs and unreadable MSR. * NOTE: the calculated value may not be correct. @@ -414,21 +334,6 @@ static void __devinit get_ucode_rev_on_cpu(void *edx) rdmsr(MSR_IA32_UCODE_REV, eax, *(u32 *)edx); } -static int get_pkg_tjmax(unsigned int cpu, struct device *dev) -{ - int err; - u32 eax, edx, val; - - err = rdmsr_safe_on_cpu(cpu, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx); - if (!err) { - val = (eax >> 16) & 0xff; - if (val) - return val * 1000; - } - dev_warn(dev, "Unable to read Pkg-TjMax from CPU:%u\n", cpu); - return 100000; /* Default TjMax: 100 degree celsius */ -} - static int create_name_attr(struct platform_data *pdata, struct device *dev) { sysfs_attr_init(&pdata->name_attr.attr); @@ -442,19 +347,14 @@ static int create_core_attrs(struct temp_data *tdata, struct device *dev, int attr_no) { int err, i; - static ssize_t (*rd_ptr[TOTAL_ATTRS]) (struct device *dev, + static ssize_t (*const rd_ptr[TOTAL_ATTRS]) (struct device *dev, struct device_attribute *devattr, char *buf) = { show_label, show_crit_alarm, show_temp, show_tjmax, - show_max_alarm, show_ttarget, show_tmin }; - static ssize_t (*rw_ptr[TOTAL_ATTRS]) (struct device *dev, - struct device_attribute *devattr, const char *buf, - size_t count) = { NULL, NULL, NULL, NULL, NULL, - store_ttarget, store_tmin }; - static const char *names[TOTAL_ATTRS] = { + show_ttarget }; + static const char *const names[TOTAL_ATTRS] = { "temp%d_label", "temp%d_crit_alarm", "temp%d_input", "temp%d_crit", - "temp%d_max_alarm", "temp%d_max", - "temp%d_max_hyst" }; + "temp%d_max" }; for (i = 0; i < tdata->attr_size; i++) { snprintf(tdata->attr_name[i], CORETEMP_NAME_LENGTH, names[i], @@ -462,10 +362,6 @@ static int create_core_attrs(struct temp_data *tdata, struct device *dev, sysfs_attr_init(&tdata->sd_attrs[i].dev_attr.attr); tdata->sd_attrs[i].dev_attr.attr.name = tdata->attr_name[i]; tdata->sd_attrs[i].dev_attr.attr.mode = S_IRUGO; - if (rw_ptr[i]) { - tdata->sd_attrs[i].dev_attr.attr.mode |= S_IWUSR; - tdata->sd_attrs[i].dev_attr.store = rw_ptr[i]; - } tdata->sd_attrs[i].dev_attr.show = rd_ptr[i]; tdata->sd_attrs[i].index = attr_no; err = device_create_file(dev, &tdata->sd_attrs[i].dev_attr); @@ -481,9 +377,9 @@ exit_free: } -static int __devinit chk_ucode_version(struct platform_device *pdev) +static int __cpuinit chk_ucode_version(unsigned int cpu) { - struct cpuinfo_x86 *c = &cpu_data(pdev->id); + struct cpuinfo_x86 *c = &cpu_data(cpu); int err; u32 edx; @@ -494,17 +390,15 @@ static int __devinit chk_ucode_version(struct platform_device *pdev) */ if (c->x86_model == 0xe && c->x86_mask < 0xc) { /* check for microcode update */ - err = smp_call_function_single(pdev->id, get_ucode_rev_on_cpu, + err = smp_call_function_single(cpu, get_ucode_rev_on_cpu, &edx, 1); if (err) { - dev_err(&pdev->dev, - "Cannot determine microcode revision of " - "CPU#%u (%d)!\n", pdev->id, err); + pr_err("Cannot determine microcode revision of " + "CPU#%u (%d)!\n", cpu, err); return -ENODEV; } else if (edx < 0x39) { - dev_err(&pdev->dev, - "Errata AE18 not fixed, update BIOS or " - "microcode of the CPU!\n"); + pr_err("Errata AE18 not fixed, update BIOS or " + "microcode of the CPU!\n"); return -ENODEV; } } @@ -538,8 +432,6 @@ static struct temp_data *init_temp_data(unsigned int cpu, int pkg_flag) tdata->status_reg = pkg_flag ? MSR_IA32_PACKAGE_THERM_STATUS : MSR_IA32_THERM_STATUS; - tdata->intrpt_reg = pkg_flag ? MSR_IA32_PACKAGE_THERM_INTERRUPT : - MSR_IA32_THERM_INTERRUPT; tdata->is_pkg_data = pkg_flag; tdata->cpu = cpu; tdata->cpu_core_id = TO_CORE_ID(cpu); @@ -548,11 +440,11 @@ static struct temp_data *init_temp_data(unsigned int cpu, int pkg_flag) return tdata; } -static int create_core_data(struct platform_data *pdata, - struct platform_device *pdev, +static int create_core_data(struct platform_device *pdev, unsigned int cpu, int pkg_flag) { struct temp_data *tdata; + struct platform_data *pdata = platform_get_drvdata(pdev); struct cpuinfo_x86 *c = &cpu_data(cpu); u32 eax, edx; int err, attr_no; @@ -588,25 +480,21 @@ static int create_core_data(struct platform_data *pdata, goto exit_free; /* We can access status register. Get Critical Temperature */ - if (pkg_flag) - tdata->tjmax = get_pkg_tjmax(pdev->id, &pdev->dev); - else - tdata->tjmax = get_tjmax(c, cpu, &pdev->dev); + tdata->tjmax = get_tjmax(c, cpu, &pdev->dev); /* - * Test if we can access the intrpt register. If so, increase the - * 'size' enough to have ttarget/tmin/max_alarm interfaces. - * Initialize ttarget with bits 16:22 of MSR_IA32_THERM_INTERRUPT + * Read the still undocumented bits 8:15 of IA32_TEMPERATURE_TARGET. + * The target temperature is available on older CPUs but not in this + * register. Atoms don't have the register at all. */ - err = rdmsr_safe_on_cpu(cpu, tdata->intrpt_reg, &eax, &edx); - if (!err) { - tdata->attr_size += MAX_THRESH_ATTRS; - tdata->tmin = tdata->tjmax - - ((eax & THERM_MASK_THRESHOLD0) >> - THERM_SHIFT_THRESHOLD0) * 1000; - tdata->ttarget = tdata->tjmax - - ((eax & THERM_MASK_THRESHOLD1) >> - THERM_SHIFT_THRESHOLD1) * 1000; + if (c->x86_model > 0xe && c->x86_model != 0x1c) { + err = rdmsr_safe_on_cpu(cpu, MSR_IA32_TEMPERATURE_TARGET, + &eax, &edx); + if (!err) { + tdata->ttarget + = tdata->tjmax - ((eax >> 8) & 0xff) * 1000; + tdata->attr_size++; + } } pdata->core_data[attr_no] = tdata; @@ -618,22 +506,20 @@ static int create_core_data(struct platform_data *pdata, return 0; exit_free: + pdata->core_data[attr_no] = NULL; kfree(tdata); return err; } static void coretemp_add_core(unsigned int cpu, int pkg_flag) { - struct platform_data *pdata; struct platform_device *pdev = coretemp_get_pdev(cpu); int err; if (!pdev) return; - pdata = platform_get_drvdata(pdev); - - err = create_core_data(pdata, pdev, cpu, pkg_flag); + err = create_core_data(pdev, cpu, pkg_flag); if (err) dev_err(&pdev->dev, "Adding Core %u failed\n", cpu); } @@ -657,11 +543,6 @@ static int __devinit coretemp_probe(struct platform_device *pdev) struct platform_data *pdata; int err; - /* Check the microcode version of the CPU */ - err = chk_ucode_version(pdev); - if (err) - return err; - /* Initialize the per-package data structures */ pdata = kzalloc(sizeof(struct platform_data), GFP_KERNEL); if (!pdata) @@ -671,7 +552,7 @@ static int __devinit coretemp_probe(struct platform_device *pdev) if (err) goto exit_free; - pdata->phys_proc_id = TO_PHYS_ID(pdev->id); + pdata->phys_proc_id = pdev->id; platform_set_drvdata(pdev, pdata); pdata->hwmon_dev = hwmon_device_register(&pdev->dev); @@ -723,7 +604,7 @@ static int __cpuinit coretemp_device_add(unsigned int cpu) mutex_lock(&pdev_list_mutex); - pdev = platform_device_alloc(DRVNAME, cpu); + pdev = platform_device_alloc(DRVNAME, TO_PHYS_ID(cpu)); if (!pdev) { err = -ENOMEM; pr_err("Device allocation failed\n"); @@ -743,7 +624,7 @@ static int __cpuinit coretemp_device_add(unsigned int cpu) } pdev_entry->pdev = pdev; - pdev_entry->phys_proc_id = TO_PHYS_ID(cpu); + pdev_entry->phys_proc_id = pdev->id; list_add_tail(&pdev_entry->list, &pdev_list); mutex_unlock(&pdev_list_mutex); @@ -804,6 +685,10 @@ static void __cpuinit get_core_online(unsigned int cpu) return; if (!pdev) { + /* Check the microcode version of the CPU */ + if (chk_ucode_version(cpu)) + return; + /* * Alright, we have DTS support. * We are bringing the _first_ core in this pkg diff --git a/drivers/hwmon/ds620.c b/drivers/hwmon/ds620.c index 257957c69d92..4f7c3fc40a89 100644 --- a/drivers/hwmon/ds620.c +++ b/drivers/hwmon/ds620.c @@ -72,7 +72,7 @@ struct ds620_data { char valid; /* !=0 if following fields are valid */ unsigned long last_updated; /* In jiffies */ - u16 temp[3]; /* Register values, word */ + s16 temp[3]; /* Register values, word */ }; /* diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c index 17cf1ab95521..8c2844e5691c 100644 --- a/drivers/hwmon/w83791d.c +++ b/drivers/hwmon/w83791d.c @@ -329,8 +329,8 @@ static int w83791d_detect(struct i2c_client *client, struct i2c_board_info *info); static int w83791d_remove(struct i2c_client *client); -static int w83791d_read(struct i2c_client *client, u8 register); -static int w83791d_write(struct i2c_client *client, u8 register, u8 value); +static int w83791d_read(struct i2c_client *client, u8 reg); +static int w83791d_write(struct i2c_client *client, u8 reg, u8 value); static struct w83791d_data *w83791d_update_device(struct device *dev); #ifdef DEBUG diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c index 274798068a54..16f69be820c7 100644 --- a/drivers/ide/ide-disk.c +++ b/drivers/ide/ide-disk.c @@ -435,7 +435,12 @@ static int idedisk_prep_fn(struct request_queue *q, struct request *rq) if (!(rq->cmd_flags & REQ_FLUSH)) return BLKPREP_OK; - cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC); + if (rq->special) { + cmd = rq->special; + memset(cmd, 0, sizeof(*cmd)); + } else { + cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC); + } /* FIXME: map struct ide_taskfile on rq->cmd[] */ BUG_ON(cmd == NULL); diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c index 17bf9d95463c..6cd642aaa4de 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_cm.c +++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c @@ -287,7 +287,7 @@ void __free_ep(struct kref *kref) if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) { cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid); dst_release(ep->dst); - l2t_release(L2DATA(ep->com.tdev), ep->l2t); + l2t_release(ep->com.tdev, ep->l2t); } kfree(ep); } @@ -1178,7 +1178,7 @@ static int act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) release_tid(ep->com.tdev, GET_TID(rpl), NULL); cxgb3_free_atid(ep->com.tdev, ep->atid); dst_release(ep->dst); - l2t_release(L2DATA(ep->com.tdev), ep->l2t); + l2t_release(ep->com.tdev, ep->l2t); put_ep(&ep->com); return CPL_RET_BUF_DONE; } @@ -1377,7 +1377,7 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) if (!child_ep) { printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n", __func__); - l2t_release(L2DATA(tdev), l2t); + l2t_release(tdev, l2t); dst_release(dst); goto reject; } @@ -1956,7 +1956,7 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) if (!err) goto out; - l2t_release(L2DATA(h->rdev.t3cdev_p), ep->l2t); + l2t_release(h->rdev.t3cdev_p, ep->l2t); fail4: dst_release(ep->dst); fail3: @@ -2127,7 +2127,7 @@ int iwch_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new, PDBG("%s ep %p redirect to dst %p l2t %p\n", __func__, ep, new, l2t); dst_hold(new); - l2t_release(L2DATA(ep->com.tdev), ep->l2t); + l2t_release(ep->com.tdev, ep->l2t); ep->l2t = l2t; dst_release(old); ep->dst = new; diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c index 0dc97ec15c28..9dea71849f40 100644 --- a/drivers/input/tablet/wacom_wac.c +++ b/drivers/input/tablet/wacom_wac.c @@ -1124,11 +1124,8 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev, for (i = 0; i < 8; i++) __set_bit(BTN_0 + i, input_dev->keybit); - if (wacom_wac->features.type != WACOM_21UX2) { - input_set_abs_params(input_dev, ABS_RX, 0, 4096, 0, 0); - input_set_abs_params(input_dev, ABS_RY, 0, 4096, 0, 0); - } - + input_set_abs_params(input_dev, ABS_RX, 0, 4096, 0, 0); + input_set_abs_params(input_dev, ABS_RY, 0, 4096, 0, 0); input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); __set_bit(INPUT_PROP_DIRECT, input_dev->propbit); diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 49da55c1528a..8c2a000cf3f5 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -1698,6 +1698,8 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) } ti->num_flush_requests = 1; + ti->discard_zeroes_data_unsupported = 1; + return 0; bad: diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c index 89f73ca22cfa..f84c08029b21 100644 --- a/drivers/md/dm-flakey.c +++ b/drivers/md/dm-flakey.c @@ -81,8 +81,10 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc, * corrupt_bio_byte <Nth_byte> <direction> <value> <bio_flags> */ if (!strcasecmp(arg_name, "corrupt_bio_byte")) { - if (!argc) + if (!argc) { ti->error = "Feature corrupt_bio_byte requires parameters"; + return -EINVAL; + } r = dm_read_arg(_args + 1, as, &fc->corrupt_bio_byte, &ti->error); if (r) diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index a002dd85db1e..86df8b2cf927 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -449,7 +449,7 @@ static int parse_raid_params(struct raid_set *rs, char **argv, rs->ti->error = "write_mostly option is only valid for RAID1"; return -EINVAL; } - if (value > rs->md.raid_disks) { + if (value >= rs->md.raid_disks) { rs->ti->error = "Invalid write_mostly drive index given"; return -EINVAL; } diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 986b8754bb08..bc04518e9d8b 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -1238,14 +1238,15 @@ static void dm_table_set_integrity(struct dm_table *t) return; template_disk = dm_table_get_integrity_disk(t, true); - if (!template_disk && - blk_integrity_is_initialized(dm_disk(t->md))) { + if (template_disk) + blk_integrity_register(dm_disk(t->md), + blk_get_integrity(template_disk)); + else if (blk_integrity_is_initialized(dm_disk(t->md))) DMWARN("%s: device no longer has a valid integrity profile", dm_device_name(t->md)); - return; - } - blk_integrity_register(dm_disk(t->md), - blk_get_integrity(template_disk)); + else + DMWARN("%s: unable to establish an integrity profile", + dm_device_name(t->md)); } static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev, @@ -1282,6 +1283,22 @@ static bool dm_table_supports_flush(struct dm_table *t, unsigned flush) return 0; } +static bool dm_table_discard_zeroes_data(struct dm_table *t) +{ + struct dm_target *ti; + unsigned i = 0; + + /* Ensure that all targets supports discard_zeroes_data. */ + while (i < dm_table_get_num_targets(t)) { + ti = dm_table_get_target(t, i++); + + if (ti->discard_zeroes_data_unsupported) + return 0; + } + + return 1; +} + void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, struct queue_limits *limits) { @@ -1304,6 +1321,9 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, } blk_queue_flush(q, flush); + if (!dm_table_discard_zeroes_data(t)) + q->limits.discard_zeroes_data = 0; + dm_table_set_integrity(t); /* diff --git a/drivers/md/md.c b/drivers/md/md.c index 5404b2295820..5c95ccb59500 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -61,6 +61,11 @@ static void autostart_arrays(int part); #endif +/* pers_list is a list of registered personalities protected + * by pers_lock. + * pers_lock does extra service to protect accesses to + * mddev->thread when the mutex cannot be held. + */ static LIST_HEAD(pers_list); static DEFINE_SPINLOCK(pers_lock); @@ -739,7 +744,12 @@ static void mddev_unlock(mddev_t * mddev) } else mutex_unlock(&mddev->reconfig_mutex); + /* was we've dropped the mutex we need a spinlock to + * make sur the thread doesn't disappear + */ + spin_lock(&pers_lock); md_wakeup_thread(mddev->thread); + spin_unlock(&pers_lock); } static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr) @@ -6429,11 +6439,18 @@ mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev, return thread; } -void md_unregister_thread(mdk_thread_t *thread) +void md_unregister_thread(mdk_thread_t **threadp) { + mdk_thread_t *thread = *threadp; if (!thread) return; dprintk("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk)); + /* Locking ensures that mddev_unlock does not wake_up a + * non-existent thread + */ + spin_lock(&pers_lock); + *threadp = NULL; + spin_unlock(&pers_lock); kthread_stop(thread->tsk); kfree(thread); @@ -7340,8 +7357,7 @@ static void reap_sync_thread(mddev_t *mddev) mdk_rdev_t *rdev; /* resync has finished, collect result */ - md_unregister_thread(mddev->sync_thread); - mddev->sync_thread = NULL; + md_unregister_thread(&mddev->sync_thread); if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { /* success...*/ diff --git a/drivers/md/md.h b/drivers/md/md.h index 1e586bb4452e..0a309dc29b45 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -560,7 +560,7 @@ extern int register_md_personality(struct mdk_personality *p); extern int unregister_md_personality(struct mdk_personality *p); extern mdk_thread_t * md_register_thread(void (*run) (mddev_t *mddev), mddev_t *mddev, const char *name); -extern void md_unregister_thread(mdk_thread_t *thread); +extern void md_unregister_thread(mdk_thread_t **threadp); extern void md_wakeup_thread(mdk_thread_t *thread); extern void md_check_recovery(mddev_t *mddev); extern void md_write_start(mddev_t *mddev, struct bio *bi); diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index 3535c23af288..d5b5fb300171 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c @@ -514,8 +514,7 @@ static int multipath_stop (mddev_t *mddev) { multipath_conf_t *conf = mddev->private; - md_unregister_thread(mddev->thread); - mddev->thread = NULL; + md_unregister_thread(&mddev->thread); blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ mempool_destroy(conf->pool); kfree(conf->multipaths); diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index f4622dd8fc59..d9587dffe533 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -2562,8 +2562,7 @@ static int stop(mddev_t *mddev) raise_barrier(conf); lower_barrier(conf); - md_unregister_thread(mddev->thread); - mddev->thread = NULL; + md_unregister_thread(&mddev->thread); if (conf->r1bio_pool) mempool_destroy(conf->r1bio_pool); kfree(conf->mirrors); diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index d7a8468ddeab..0cd9672cf9cb 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -2955,7 +2955,7 @@ static int run(mddev_t *mddev) return 0; out_free_conf: - md_unregister_thread(mddev->thread); + md_unregister_thread(&mddev->thread); if (conf->r10bio_pool) mempool_destroy(conf->r10bio_pool); safe_put_page(conf->tmppage); @@ -2973,8 +2973,7 @@ static int stop(mddev_t *mddev) raise_barrier(conf, 0); lower_barrier(conf); - md_unregister_thread(mddev->thread); - mddev->thread = NULL; + md_unregister_thread(&mddev->thread); blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ if (conf->r10bio_pool) mempool_destroy(conf->r10bio_pool); diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 43709fa6b6df..ac5e8b57e50f 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -4941,8 +4941,7 @@ static int run(mddev_t *mddev) return 0; abort: - md_unregister_thread(mddev->thread); - mddev->thread = NULL; + md_unregister_thread(&mddev->thread); if (conf) { print_raid5_conf(conf); free_conf(conf); @@ -4956,8 +4955,7 @@ static int stop(mddev_t *mddev) { raid5_conf_t *conf = mddev->private; - md_unregister_thread(mddev->thread); - mddev->thread = NULL; + md_unregister_thread(&mddev->thread); if (mddev->queue) mddev->queue->backing_dev_info.congested_fn = NULL; free_conf(conf); diff --git a/drivers/media/video/omap/omap_vout.c b/drivers/media/video/omap/omap_vout.c index b5ef36222440..b3a5ecdb33ac 100644 --- a/drivers/media/video/omap/omap_vout.c +++ b/drivers/media/video/omap/omap_vout.c @@ -2194,19 +2194,6 @@ static int __init omap_vout_probe(struct platform_device *pdev) "'%s' Display already enabled\n", def_display->name); } - /* set the update mode */ - if (def_display->caps & - OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) { - if (dssdrv->enable_te) - dssdrv->enable_te(def_display, 0); - if (dssdrv->set_update_mode) - dssdrv->set_update_mode(def_display, - OMAP_DSS_UPDATE_MANUAL); - } else { - if (dssdrv->set_update_mode) - dssdrv->set_update_mode(def_display, - OMAP_DSS_UPDATE_AUTO); - } } } diff --git a/drivers/media/video/omap3isp/ispccdc.c b/drivers/media/video/omap3isp/ispccdc.c index 9d3459de04b2..80796eb0c53e 100644 --- a/drivers/media/video/omap3isp/ispccdc.c +++ b/drivers/media/video/omap3isp/ispccdc.c @@ -31,6 +31,7 @@ #include <linux/dma-mapping.h> #include <linux/mm.h> #include <linux/sched.h> +#include <linux/slab.h> #include <media/v4l2-event.h> #include "isp.h" diff --git a/drivers/media/video/uvc/uvc_driver.c b/drivers/media/video/uvc/uvc_driver.c index d29f9c2d0854..e4100b1f68df 100644 --- a/drivers/media/video/uvc/uvc_driver.c +++ b/drivers/media/video/uvc/uvc_driver.c @@ -1961,7 +1961,7 @@ static int __uvc_resume(struct usb_interface *intf, int reset) list_for_each_entry(stream, &dev->streams, list) { if (stream->intf == intf) - return uvc_video_resume(stream); + return uvc_video_resume(stream, reset); } uvc_trace(UVC_TRACE_SUSPEND, "Resume: video streaming USB interface " diff --git a/drivers/media/video/uvc/uvc_entity.c b/drivers/media/video/uvc/uvc_entity.c index 48fea373c25a..29e239911d0e 100644 --- a/drivers/media/video/uvc/uvc_entity.c +++ b/drivers/media/video/uvc/uvc_entity.c @@ -49,7 +49,7 @@ static int uvc_mc_register_entity(struct uvc_video_chain *chain, if (remote == NULL) return -EINVAL; - source = (UVC_ENTITY_TYPE(remote) != UVC_TT_STREAMING) + source = (UVC_ENTITY_TYPE(remote) == UVC_TT_STREAMING) ? (remote->vdev ? &remote->vdev->entity : NULL) : &remote->subdev.entity; if (source == NULL) diff --git a/drivers/media/video/uvc/uvc_video.c b/drivers/media/video/uvc/uvc_video.c index 8244167c8915..ffd1158628b6 100644 --- a/drivers/media/video/uvc/uvc_video.c +++ b/drivers/media/video/uvc/uvc_video.c @@ -1104,10 +1104,18 @@ int uvc_video_suspend(struct uvc_streaming *stream) * buffers, making sure userspace applications are notified of the problem * instead of waiting forever. */ -int uvc_video_resume(struct uvc_streaming *stream) +int uvc_video_resume(struct uvc_streaming *stream, int reset) { int ret; + /* If the bus has been reset on resume, set the alternate setting to 0. + * This should be the default value, but some devices crash or otherwise + * misbehave if they don't receive a SET_INTERFACE request before any + * other video control request. + */ + if (reset) + usb_set_interface(stream->dev->udev, stream->intfnum, 0); + stream->frozen = 0; ret = uvc_commit_video(stream, &stream->ctrl); diff --git a/drivers/media/video/uvc/uvcvideo.h b/drivers/media/video/uvc/uvcvideo.h index df32a43ca86a..cbdd49bf8b67 100644 --- a/drivers/media/video/uvc/uvcvideo.h +++ b/drivers/media/video/uvc/uvcvideo.h @@ -638,7 +638,7 @@ extern void uvc_mc_cleanup_entity(struct uvc_entity *entity); /* Video */ extern int uvc_video_init(struct uvc_streaming *stream); extern int uvc_video_suspend(struct uvc_streaming *stream); -extern int uvc_video_resume(struct uvc_streaming *stream); +extern int uvc_video_resume(struct uvc_streaming *stream, int reset); extern int uvc_video_enable(struct uvc_streaming *stream, int enable); extern int uvc_probe_video(struct uvc_streaming *stream, struct uvc_streaming_control *probe); diff --git a/drivers/media/video/v4l2-dev.c b/drivers/media/video/v4l2-dev.c index 06f14008b346..d72156517726 100644 --- a/drivers/media/video/v4l2-dev.c +++ b/drivers/media/video/v4l2-dev.c @@ -173,6 +173,17 @@ static void v4l2_device_release(struct device *cd) media_device_unregister_entity(&vdev->entity); #endif + /* Do not call v4l2_device_put if there is no release callback set. + * Drivers that have no v4l2_device release callback might free the + * v4l2_dev instance in the video_device release callback below, so we + * must perform this check here. + * + * TODO: In the long run all drivers that use v4l2_device should use the + * v4l2_device release callback. This check will then be unnecessary. + */ + if (v4l2_dev->release == NULL) + v4l2_dev = NULL; + /* Release video_device and perform other cleanups as needed. */ vdev->release(vdev); diff --git a/drivers/media/video/v4l2-device.c b/drivers/media/video/v4l2-device.c index c72856c41434..e6a2c3b302d4 100644 --- a/drivers/media/video/v4l2-device.c +++ b/drivers/media/video/v4l2-device.c @@ -38,6 +38,7 @@ int v4l2_device_register(struct device *dev, struct v4l2_device *v4l2_dev) mutex_init(&v4l2_dev->ioctl_lock); v4l2_prio_init(&v4l2_dev->prio); kref_init(&v4l2_dev->ref); + get_device(dev); v4l2_dev->dev = dev; if (dev == NULL) { /* If dev == NULL, then name must be filled in by the caller */ @@ -93,6 +94,7 @@ void v4l2_device_disconnect(struct v4l2_device *v4l2_dev) if (dev_get_drvdata(v4l2_dev->dev) == v4l2_dev) dev_set_drvdata(v4l2_dev->dev, NULL); + put_device(v4l2_dev->dev); v4l2_dev->dev = NULL; } EXPORT_SYMBOL_GPL(v4l2_device_disconnect); diff --git a/drivers/mfd/jz4740-adc.c b/drivers/mfd/jz4740-adc.c index 21131c7b0f1e..563654c9b19e 100644 --- a/drivers/mfd/jz4740-adc.c +++ b/drivers/mfd/jz4740-adc.c @@ -273,7 +273,7 @@ static int __devinit jz4740_adc_probe(struct platform_device *pdev) ct->regs.ack = JZ_REG_ADC_STATUS; ct->chip.irq_mask = irq_gc_mask_set_bit; ct->chip.irq_unmask = irq_gc_mask_clr_bit; - ct->chip.irq_ack = irq_gc_ack; + ct->chip.irq_ack = irq_gc_ack_set_bit; irq_setup_generic_chip(gc, IRQ_MSK(5), 0, 0, IRQ_NOPROBE | IRQ_LEVEL); diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c index b928bc14e97b..8b51cd62d067 100644 --- a/drivers/misc/lis3lv02d/lis3lv02d.c +++ b/drivers/misc/lis3lv02d/lis3lv02d.c @@ -375,12 +375,14 @@ void lis3lv02d_poweron(struct lis3lv02d *lis3) * both have been read. So the value read will always be correct. * Set BOOT bit to refresh factory tuning values. */ - lis3->read(lis3, CTRL_REG2, ®); - if (lis3->whoami == WAI_12B) - reg |= CTRL2_BDU | CTRL2_BOOT; - else - reg |= CTRL2_BOOT_8B; - lis3->write(lis3, CTRL_REG2, reg); + if (lis3->pdata) { + lis3->read(lis3, CTRL_REG2, ®); + if (lis3->whoami == WAI_12B) + reg |= CTRL2_BDU | CTRL2_BOOT; + else + reg |= CTRL2_BOOT_8B; + lis3->write(lis3, CTRL_REG2, reg); + } /* LIS3 power on delay is quite long */ msleep(lis3->pwron_delay / lis3lv02d_get_odr()); diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index a047eb973e3b..47b928ed08f8 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -2168,7 +2168,8 @@ void bond_3ad_state_machine_handler(struct work_struct *work) } re_arm: - queue_delayed_work(bond->wq, &bond->ad_work, ad_delta_in_ticks); + if (!bond->kill_timers) + queue_delayed_work(bond->wq, &bond->ad_work, ad_delta_in_ticks); out: read_unlock(&bond->lock); } diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 7f8b20a34ee3..d4fbd2e62616 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -1440,7 +1440,8 @@ void bond_alb_monitor(struct work_struct *work) } re_arm: - queue_delayed_work(bond->wq, &bond->alb_work, alb_delta_in_ticks); + if (!bond->kill_timers) + queue_delayed_work(bond->wq, &bond->alb_work, alb_delta_in_ticks); out: read_unlock(&bond->lock); } diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 1dcb07ce5263..6191e6337284 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -774,6 +774,9 @@ static void bond_resend_igmp_join_requests(struct bonding *bond) read_lock(&bond->lock); + if (bond->kill_timers) + goto out; + /* rejoin all groups on bond device */ __bond_resend_igmp_join_requests(bond->dev); @@ -787,9 +790,9 @@ static void bond_resend_igmp_join_requests(struct bonding *bond) __bond_resend_igmp_join_requests(vlan_dev); } - if (--bond->igmp_retrans > 0) + if ((--bond->igmp_retrans > 0) && !bond->kill_timers) queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5); - +out: read_unlock(&bond->lock); } @@ -2535,7 +2538,7 @@ void bond_mii_monitor(struct work_struct *work) } re_arm: - if (bond->params.miimon) + if (bond->params.miimon && !bond->kill_timers) queue_delayed_work(bond->wq, &bond->mii_work, msecs_to_jiffies(bond->params.miimon)); out: @@ -2883,7 +2886,7 @@ void bond_loadbalance_arp_mon(struct work_struct *work) } re_arm: - if (bond->params.arp_interval) + if (bond->params.arp_interval && !bond->kill_timers) queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks); out: read_unlock(&bond->lock); @@ -3151,7 +3154,7 @@ void bond_activebackup_arp_mon(struct work_struct *work) bond_ab_arp_probe(bond); re_arm: - if (bond->params.arp_interval) + if (bond->params.arp_interval && !bond->kill_timers) queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks); out: read_unlock(&bond->lock); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c index 0b9bd551580b..51bd7485ab18 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c @@ -2123,6 +2123,7 @@ static u8 bnx2x_dcbnl_get_cap(struct net_device *netdev, int capid, u8 *cap) break; case DCB_CAP_ATTR_DCBX: *cap = BNX2X_DCBX_CAPS; + break; default: rval = -EINVAL; break; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 28bde1610ffb..6486ab8c8fc8 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -4937,7 +4937,7 @@ static void bnx2x_init_def_sb(struct bnx2x *bp) int igu_seg_id; int port = BP_PORT(bp); int func = BP_FUNC(bp); - int reg_offset; + int reg_offset, reg_offset_en5; u64 section; int index; struct hc_sp_status_block_data sp_sb_data; @@ -4960,6 +4960,8 @@ static void bnx2x_init_def_sb(struct bnx2x *bp) reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); + reg_offset_en5 = (port ? MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 : + MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0); for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { int sindex; /* take care of sig[0]..sig[4] */ @@ -4974,7 +4976,7 @@ static void bnx2x_init_def_sb(struct bnx2x *bp) * and not 16 between the different groups */ bp->attn_group[index].sig[4] = REG_RD(bp, - reg_offset + 0x10 + 0x4*index); + reg_offset_en5 + 0x4*index); else bp->attn_group[index].sig[4] = 0; } @@ -7619,8 +7621,11 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode) u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; u8 *mac_addr = bp->dev->dev_addr; u32 val; + u16 pmc; + /* The mac address is written to entries 1-4 to - preserve entry 0 which is used by the PMF */ + * preserve entry 0 which is used by the PMF + */ u8 entry = (BP_VN(bp) + 1)*8; val = (mac_addr[0] << 8) | mac_addr[1]; @@ -7630,6 +7635,11 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode) (mac_addr[4] << 8) | mac_addr[5]; EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val); + /* Enable the PME and clear the status */ + pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmc); + pmc |= PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS; + pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, pmc); + reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN; } else diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h index 750e8445dac4..fc7bd0f23c0b 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h @@ -1384,6 +1384,18 @@ Latched ump_tx_parity; [31] MCP Latched scpad_parity; */ #define MISC_REG_AEU_ENABLE4_PXP_0 0xa108 #define MISC_REG_AEU_ENABLE4_PXP_1 0xa1a8 +/* [RW 32] fifth 32b for enabling the output for function 0 output0. Mapped + * as follows: [0] PGLUE config_space; [1] PGLUE misc_flr; [2] PGLUE B RBC + * attention [3] PGLUE B RBC parity; [4] ATC attention; [5] ATC parity; [6] + * mstat0 attention; [7] mstat0 parity; [8] mstat1 attention; [9] mstat1 + * parity; [31-10] Reserved; */ +#define MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0 0xa688 +/* [RW 32] Fifth 32b for enabling the output for function 1 output0. Mapped + * as follows: [0] PGLUE config_space; [1] PGLUE misc_flr; [2] PGLUE B RBC + * attention [3] PGLUE B RBC parity; [4] ATC attention; [5] ATC parity; [6] + * mstat0 attention; [7] mstat0 parity; [8] mstat1 attention; [9] mstat1 + * parity; [31-10] Reserved; */ +#define MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 0xa6b0 /* [RW 1] set/clr general attention 0; this will set/clr bit 94 in the aeu 128 bit vector */ #define MISC_REG_AEU_GENERAL_ATTN_0 0xa000 diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c index 805076c54f1b..da5a5d9b8aff 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c @@ -1146,12 +1146,14 @@ static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new) if (te && te->ctx && te->client && te->client->redirect) { update_tcb = te->client->redirect(te->ctx, old, new, e); if (update_tcb) { + rcu_read_lock(); l2t_hold(L2DATA(tdev), e); + rcu_read_unlock(); set_l2t_ix(tdev, tid, e); } } } - l2t_release(L2DATA(tdev), e); + l2t_release(tdev, e); } /* @@ -1264,7 +1266,7 @@ int cxgb3_offload_activate(struct adapter *adapter) goto out_free; err = -ENOMEM; - L2DATA(dev) = t3_init_l2t(l2t_capacity); + RCU_INIT_POINTER(dev->l2opt, t3_init_l2t(l2t_capacity)); if (!L2DATA(dev)) goto out_free; @@ -1298,16 +1300,24 @@ int cxgb3_offload_activate(struct adapter *adapter) out_free_l2t: t3_free_l2t(L2DATA(dev)); - L2DATA(dev) = NULL; + rcu_assign_pointer(dev->l2opt, NULL); out_free: kfree(t); return err; } +static void clean_l2_data(struct rcu_head *head) +{ + struct l2t_data *d = container_of(head, struct l2t_data, rcu_head); + t3_free_l2t(d); +} + + void cxgb3_offload_deactivate(struct adapter *adapter) { struct t3cdev *tdev = &adapter->tdev; struct t3c_data *t = T3C_DATA(tdev); + struct l2t_data *d; remove_adapter(adapter); if (list_empty(&adapter_list)) @@ -1315,8 +1325,11 @@ void cxgb3_offload_deactivate(struct adapter *adapter) free_tid_maps(&t->tid_maps); T3C_DATA(tdev) = NULL; - t3_free_l2t(L2DATA(tdev)); - L2DATA(tdev) = NULL; + rcu_read_lock(); + d = L2DATA(tdev); + rcu_read_unlock(); + rcu_assign_pointer(tdev->l2opt, NULL); + call_rcu(&d->rcu_head, clean_l2_data); if (t->nofail_skb) kfree_skb(t->nofail_skb); kfree(t); diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.c b/drivers/net/ethernet/chelsio/cxgb3/l2t.c index f452c4003253..41540978a173 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/l2t.c +++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.c @@ -300,14 +300,21 @@ static inline void reuse_entry(struct l2t_entry *e, struct neighbour *neigh) struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh, struct net_device *dev) { - struct l2t_entry *e; - struct l2t_data *d = L2DATA(cdev); + struct l2t_entry *e = NULL; + struct l2t_data *d; + int hash; u32 addr = *(u32 *) neigh->primary_key; int ifidx = neigh->dev->ifindex; - int hash = arp_hash(addr, ifidx, d); struct port_info *p = netdev_priv(dev); int smt_idx = p->port_id; + rcu_read_lock(); + d = L2DATA(cdev); + if (!d) + goto done_rcu; + + hash = arp_hash(addr, ifidx, d); + write_lock_bh(&d->lock); for (e = d->l2tab[hash].first; e; e = e->next) if (e->addr == addr && e->ifindex == ifidx && @@ -338,6 +345,8 @@ struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh, } done: write_unlock_bh(&d->lock); +done_rcu: + rcu_read_unlock(); return e; } diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h index 7a12d52ed4fc..c5f54796e2cb 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h +++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h @@ -76,6 +76,7 @@ struct l2t_data { atomic_t nfree; /* number of free entries */ rwlock_t lock; struct l2t_entry l2tab[0]; + struct rcu_head rcu_head; /* to handle rcu cleanup */ }; typedef void (*arp_failure_handler_func)(struct t3cdev * dev, @@ -99,7 +100,7 @@ static inline void set_arp_failure_handler(struct sk_buff *skb, /* * Getting to the L2 data from an offload device. */ -#define L2DATA(dev) ((dev)->l2opt) +#define L2DATA(cdev) (rcu_dereference((cdev)->l2opt)) #define W_TCB_L2T_IX 0 #define S_TCB_L2T_IX 7 @@ -126,15 +127,22 @@ static inline int l2t_send(struct t3cdev *dev, struct sk_buff *skb, return t3_l2t_send_slow(dev, skb, e); } -static inline void l2t_release(struct l2t_data *d, struct l2t_entry *e) +static inline void l2t_release(struct t3cdev *t, struct l2t_entry *e) { - if (atomic_dec_and_test(&e->refcnt)) + struct l2t_data *d; + + rcu_read_lock(); + d = L2DATA(t); + + if (atomic_dec_and_test(&e->refcnt) && d) t3_l2e_free(d, e); + + rcu_read_unlock(); } static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e) { - if (atomic_add_return(1, &e->refcnt) == 1) /* 0 -> 1 transition */ + if (d && atomic_add_return(1, &e->refcnt) == 1) /* 0 -> 1 transition */ atomic_dec(&d->nfree); } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 40b395f932cf..4c8f42afa3c6 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -3715,6 +3715,9 @@ static int __devinit init_one(struct pci_dev *pdev, setup_debugfs(adapter); } + /* PCIe EEH recovery on powerpc platforms needs fundamental reset */ + pdev->needs_freset = 1; + if (is_offload(adapter)) attach_ulds(adapter); diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index 72b84de48756..4da972eaabb4 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -636,8 +636,8 @@ static int ibmveth_open(struct net_device *netdev) netdev_err(netdev, "unable to request irq 0x%x, rc %d\n", netdev->irq, rc); do { - rc = h_free_logical_lan(adapter->vdev->unit_address); - } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY)); + lpar_rc = h_free_logical_lan(adapter->vdev->unit_address); + } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY)); goto err_out; } diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index 5dc61b4ef3cd..b89f3a684aec 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -1198,6 +1198,8 @@ static irqreturn_t pch_gbe_intr(int irq, void *data) iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR), &hw->reg->INT_EN); pch_gbe_stop_receive(adapter); + int_st |= ioread32(&hw->reg->INT_ST); + int_st = int_st & ioread32(&hw->reg->INT_EN); } if (int_st & PCH_GBE_INT_RX_DMA_ERR) adapter->stats.intr_rx_dma_err_count++; @@ -1217,14 +1219,11 @@ static irqreturn_t pch_gbe_intr(int irq, void *data) /* Set Pause packet */ pch_gbe_mac_set_pause_packet(hw); } - if ((int_en & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT)) - == 0) { - return IRQ_HANDLED; - } } /* When request status is Receive interruption */ - if ((int_st & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT))) { + if ((int_st & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT)) || + (adapter->rx_stop_flag == true)) { if (likely(napi_schedule_prep(&adapter->napi))) { /* Enable only Rx Descriptor empty */ atomic_inc(&adapter->irq_sem); @@ -1384,7 +1383,7 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter, struct sk_buff *skb; unsigned int i; unsigned int cleaned_count = 0; - bool cleaned = false; + bool cleaned = true; pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean); @@ -1395,7 +1394,6 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter, while ((tx_desc->gbec_status & DSC_INIT16) == 0x0000) { pr_debug("gbec_status:0x%04x\n", tx_desc->gbec_status); - cleaned = true; buffer_info = &tx_ring->buffer_info[i]; skb = buffer_info->skb; @@ -1438,8 +1436,10 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter, tx_desc = PCH_GBE_TX_DESC(*tx_ring, i); /* weight of a sort for tx, to avoid endless transmit cleanup */ - if (cleaned_count++ == PCH_GBE_TX_WEIGHT) + if (cleaned_count++ == PCH_GBE_TX_WEIGHT) { + cleaned = false; break; + } } pr_debug("called pch_gbe_unmap_and_free_tx_resource() %d count\n", cleaned_count); @@ -2167,7 +2167,6 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget) { struct pch_gbe_adapter *adapter = container_of(napi, struct pch_gbe_adapter, napi); - struct net_device *netdev = adapter->netdev; int work_done = 0; bool poll_end_flag = false; bool cleaned = false; @@ -2175,33 +2174,32 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget) pr_debug("budget : %d\n", budget); - /* Keep link state information with original netdev */ - if (!netif_carrier_ok(netdev)) { + pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget); + cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring); + + if (!cleaned) + work_done = budget; + /* If no Tx and not enough Rx work done, + * exit the polling mode + */ + if (work_done < budget) poll_end_flag = true; - } else { - pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget); + + if (poll_end_flag) { + napi_complete(napi); + if (adapter->rx_stop_flag) { + adapter->rx_stop_flag = false; + pch_gbe_start_receive(&adapter->hw); + } + pch_gbe_irq_enable(adapter); + } else if (adapter->rx_stop_flag) { adapter->rx_stop_flag = false; pch_gbe_start_receive(&adapter->hw); int_en = ioread32(&adapter->hw.reg->INT_EN); iowrite32((int_en | PCH_GBE_INT_RX_FIFO_ERR), - &adapter->hw.reg->INT_EN); + &adapter->hw.reg->INT_EN); } - cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring); - - if (cleaned) - work_done = budget; - /* If no Tx and not enough Rx work done, - * exit the polling mode - */ - if ((work_done < budget) || !netif_running(netdev)) - poll_end_flag = true; - } - - if (poll_end_flag) { - napi_complete(napi); - pch_gbe_irq_enable(adapter); - } pr_debug("poll_end_flag : %d work_done : %d budget : %d\n", poll_end_flag, work_done, budget); diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index b100c90e8507..24cf942e1316 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -239,7 +239,7 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev) dest = macvlan_hash_lookup(port, eth->h_dest); if (dest && dest->mode == MACVLAN_MODE_BRIDGE) { /* send to lowerdev first for its network taps */ - vlan->forward(vlan->lowerdev, skb); + dev_forward_skb(vlan->lowerdev, skb); return NET_XMIT_SUCCESS; } diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c index be381c24c4b4..c588a162050f 100644 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c @@ -686,7 +686,7 @@ static void decode_rxts(struct dp83640_private *dp83640, prune_rx_ts(dp83640); if (list_empty(&dp83640->rxpool)) { - pr_warning("dp83640: rx timestamp pool is empty\n"); + pr_debug("dp83640: rx timestamp pool is empty\n"); goto out; } rxts = list_first_entry(&dp83640->rxpool, struct rxts, list); @@ -709,7 +709,7 @@ static void decode_txts(struct dp83640_private *dp83640, skb = skb_dequeue(&dp83640->tx_queue); if (!skb) { - pr_warning("dp83640: have timestamp but tx_queue empty\n"); + pr_debug("dp83640: have timestamp but tx_queue empty\n"); return; } ns = phy2txts(phy_txts); diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 0ca86f9ec4ed..182562952c79 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -327,12 +327,12 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref, xenvif_get(vif); rtnl_lock(); - if (netif_running(vif->dev)) - xenvif_up(vif); if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN) dev_set_mtu(vif->dev, ETH_DATA_LEN); netdev_update_features(vif->dev); netif_carrier_on(vif->dev); + if (netif_running(vif->dev)) + xenvif_up(vif); rtnl_unlock(); return 0; diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 4e84fd4a4312..e9651f0a8817 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -77,7 +77,7 @@ unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE; unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE; unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE; -enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_SAFE; +enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF; /* * The default CLS is used if arch didn't set CLS explicitly and not @@ -3568,10 +3568,14 @@ static int __init pci_setup(char *str) pci_hotplug_io_size = memparse(str + 9, &str); } else if (!strncmp(str, "hpmemsize=", 10)) { pci_hotplug_mem_size = memparse(str + 10, &str); + } else if (!strncmp(str, "pcie_bus_tune_off", 17)) { + pcie_bus_config = PCIE_BUS_TUNE_OFF; } else if (!strncmp(str, "pcie_bus_safe", 13)) { pcie_bus_config = PCIE_BUS_SAFE; } else if (!strncmp(str, "pcie_bus_perf", 13)) { pcie_bus_config = PCIE_BUS_PERFORMANCE; + } else if (!strncmp(str, "pcie_bus_peer2peer", 18)) { + pcie_bus_config = PCIE_BUS_PEER2PEER; } else { printk(KERN_ERR "PCI: Unknown option `%s'\n", str); diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index f3f94a5c068f..6ab6bd3df4b2 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -1458,12 +1458,24 @@ static int pcie_bus_configure_set(struct pci_dev *dev, void *data) */ void pcie_bus_configure_settings(struct pci_bus *bus, u8 mpss) { - u8 smpss = mpss; + u8 smpss; if (!pci_is_pcie(bus->self)) return; + if (pcie_bus_config == PCIE_BUS_TUNE_OFF) + return; + + /* FIXME - Peer to peer DMA is possible, though the endpoint would need + * to be aware to the MPS of the destination. To work around this, + * simply force the MPS of the entire system to the smallest possible. + */ + if (pcie_bus_config == PCIE_BUS_PEER2PEER) + smpss = 0; + if (pcie_bus_config == PCIE_BUS_SAFE) { + smpss = mpss; + pcie_find_smpss(bus->self, &smpss); pci_walk_bus(bus, pcie_find_smpss, &smpss); } diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index cbde448f9947..eb3140ee821e 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c @@ -654,8 +654,8 @@ static struct io_subchannel_private console_priv; static int console_subchannel_in_use; /* - * Use tpi to get a pending interrupt, call the interrupt handler and - * return a pointer to the subchannel structure. + * Use cio_tpi to get a pending interrupt and call the interrupt handler. + * Return non-zero if an interrupt was processed, zero otherwise. */ static int cio_tpi(void) { @@ -667,6 +667,10 @@ static int cio_tpi(void) tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id; if (tpi(NULL) != 1) return 0; + if (tpi_info->adapter_IO) { + do_adapter_IO(tpi_info->isc); + return 1; + } irb = (struct irb *)&S390_lowcore.irb; /* Store interrupt response block to lowcore. */ if (tsch(tpi_info->schid, irb) != 0) diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c index b7bd5b0cc7aa..3868ab2397c6 100644 --- a/drivers/scsi/3w-9xxx.c +++ b/drivers/scsi/3w-9xxx.c @@ -1800,10 +1800,12 @@ static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_ switch (retval) { case SCSI_MLQUEUE_HOST_BUSY: twa_free_request_id(tw_dev, request_id); + twa_unmap_scsi_data(tw_dev, request_id); break; case 1: tw_dev->state[request_id] = TW_S_COMPLETED; twa_free_request_id(tw_dev, request_id); + twa_unmap_scsi_data(tw_dev, request_id); SCpnt->result = (DID_ERROR << 16); done(SCpnt); retval = 0; diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 8d9dae89f065..3878b7395081 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -837,6 +837,7 @@ config SCSI_ISCI # (temporary): known alpha quality driver depends on EXPERIMENTAL select SCSI_SAS_LIBSAS + select SCSI_SAS_HOST_SMP ---help--- This driver supports the 6Gb/s SAS capabilities of the storage control unit found in the Intel(R) C600 series chipset. diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index 3c08f5352b2d..6153a66a8a31 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile @@ -88,7 +88,7 @@ obj-$(CONFIG_SCSI_QLOGIC_FAS) += qlogicfas408.o qlogicfas.o obj-$(CONFIG_PCMCIA_QLOGIC) += qlogicfas408.o obj-$(CONFIG_SCSI_QLOGIC_1280) += qla1280.o obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx/ -obj-$(CONFIG_SCSI_QLA_ISCSI) += qla4xxx/ +obj-$(CONFIG_SCSI_QLA_ISCSI) += libiscsi.o qla4xxx/ obj-$(CONFIG_SCSI_LPFC) += lpfc/ obj-$(CONFIG_SCSI_BFA_FC) += bfa/ obj-$(CONFIG_SCSI_PAS16) += pas16.o diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index e7d0d47b9185..e5f2d7d9002e 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c @@ -1283,6 +1283,8 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced) kfree(aac->queues); aac->queues = NULL; free_irq(aac->pdev->irq, aac); + if (aac->msi) + pci_disable_msi(aac->pdev); kfree(aac->fsa_dev); aac->fsa_dev = NULL; quirks = aac_get_driver_ident(index)->quirks; diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c index bd22041e2789..f58644850333 100644 --- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c +++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c @@ -913,7 +913,7 @@ static void l2t_put(struct cxgbi_sock *csk) struct t3cdev *t3dev = (struct t3cdev *)csk->cdev->lldev; if (csk->l2t) { - l2t_release(L2DATA(t3dev), csk->l2t); + l2t_release(t3dev, csk->l2t); csk->l2t = NULL; cxgbi_sock_put(csk); } diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index f84084bba2f0..16ad97df5ba6 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c @@ -1721,7 +1721,7 @@ static int sas_find_bcast_dev(struct domain_device *dev, list_for_each_entry(ch, &ex->children, siblings) { if (ch->dev_type == EDGE_DEV || ch->dev_type == FANOUT_DEV) { res = sas_find_bcast_dev(ch, src_dev); - if (src_dev) + if (*src_dev) return res; } } @@ -1769,10 +1769,12 @@ static void sas_unregister_devs_sas_addr(struct domain_device *parent, sas_disable_routing(parent, phy->attached_sas_addr); } memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE); - sas_port_delete_phy(phy->port, phy->phy); - if (phy->port->num_phys == 0) - sas_port_delete(phy->port); - phy->port = NULL; + if (phy->port) { + sas_port_delete_phy(phy->port, phy->phy); + if (phy->port->num_phys == 0) + sas_port_delete(phy->port); + phy->port = NULL; + } } static int sas_discover_bfs_by_root_level(struct domain_device *root, diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 646fc5263d50..8a7591f035e6 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -1507,8 +1507,8 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) if (k != blocks_done) { qla_printk(KERN_WARNING, sp->fcport->vha->hw, - "unexpected tag values tag:lba=%x:%lx)\n", - e_ref_tag, lba_s); + "unexpected tag values tag:lba=%x:%llx)\n", + e_ref_tag, (unsigned long long)lba_s); return 1; } diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 4cace3f20c04..1e69527f1e4e 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -1328,10 +1328,9 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res) qla2x00_sp_compl(ha, sp); } else { ctx = sp->ctx; - if (ctx->type == SRB_LOGIN_CMD || - ctx->type == SRB_LOGOUT_CMD) { - ctx->u.iocb_cmd->free(sp); - } else { + if (ctx->type == SRB_ELS_CMD_RPT || + ctx->type == SRB_ELS_CMD_HST || + ctx->type == SRB_CT_CMD) { struct fc_bsg_job *bsg_job = ctx->u.bsg_job; if (bsg_job->request->msgcode @@ -1343,6 +1342,8 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res) kfree(sp->ctx); mempool_free(sp, ha->srb_mempool); + } else { + ctx->u.iocb_cmd->free(sp); } } } diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c index d2407558773f..24cacff57786 100644 --- a/drivers/spi/spi-fsl-spi.c +++ b/drivers/spi/spi-fsl-spi.c @@ -825,6 +825,9 @@ static void fsl_spi_cpm_free(struct mpc8xxx_spi *mspi) { struct device *dev = mspi->dev; + if (!(mspi->flags & SPI_CPM_MODE)) + return; + dma_unmap_single(dev, mspi->dma_dummy_rx, SPI_MRBLR, DMA_FROM_DEVICE); dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE); cpm_muram_free(cpm_muram_offset(mspi->tx_bd)); diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c index 8ac6542aedcd..fa594d604aca 100644 --- a/drivers/spi/spi-imx.c +++ b/drivers/spi/spi-imx.c @@ -786,9 +786,11 @@ static int __devinit spi_imx_probe(struct platform_device *pdev) int cs_gpio = of_get_named_gpio(np, "cs-gpios", i); if (cs_gpio < 0) cs_gpio = mxc_platform_info->chipselect[i]; + + spi_imx->chipselect[i] = cs_gpio; if (cs_gpio < 0) continue; - spi_imx->chipselect[i] = cs_gpio; + ret = gpio_request(spi_imx->chipselect[i], DRIVER_NAME); if (ret) { while (i > 0) { diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c index 1d23f3831866..6a80749391db 100644 --- a/drivers/spi/spi-topcliff-pch.c +++ b/drivers/spi/spi-topcliff-pch.c @@ -50,6 +50,8 @@ #define PCH_RX_THOLD 7 #define PCH_RX_THOLD_MAX 15 +#define PCH_TX_THOLD 2 + #define PCH_MAX_BAUDRATE 5000000 #define PCH_MAX_FIFO_DEPTH 16 @@ -58,6 +60,7 @@ #define PCH_SLEEP_TIME 10 #define SSN_LOW 0x02U +#define SSN_HIGH 0x03U #define SSN_NO_CONTROL 0x00U #define PCH_MAX_CS 0xFF #define PCI_DEVICE_ID_GE_SPI 0x8816 @@ -316,16 +319,19 @@ static void pch_spi_handler_sub(struct pch_spi_data *data, u32 reg_spsr_val, /* if transfer complete interrupt */ if (reg_spsr_val & SPSR_FI_BIT) { - if (tx_index < bpw_len) + if ((tx_index == bpw_len) && (rx_index == tx_index)) { + /* disable interrupts */ + pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL); + + /* transfer is completed; + inform pch_spi_process_messages */ + data->transfer_complete = true; + data->transfer_active = false; + wake_up(&data->wait); + } else { dev_err(&data->master->dev, "%s : Transfer is not completed", __func__); - /* disable interrupts */ - pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL); - - /* transfer is completed;inform pch_spi_process_messages */ - data->transfer_complete = true; - data->transfer_active = false; - wake_up(&data->wait); + } } } @@ -348,16 +354,26 @@ static irqreturn_t pch_spi_handler(int irq, void *dev_id) "%s returning due to suspend\n", __func__); return IRQ_NONE; } - if (data->use_dma) - return IRQ_NONE; io_remap_addr = data->io_remap_addr; spsr = io_remap_addr + PCH_SPSR; reg_spsr_val = ioread32(spsr); - if (reg_spsr_val & SPSR_ORF_BIT) - dev_err(&board_dat->pdev->dev, "%s Over run error", __func__); + if (reg_spsr_val & SPSR_ORF_BIT) { + dev_err(&board_dat->pdev->dev, "%s Over run error\n", __func__); + if (data->current_msg->complete != 0) { + data->transfer_complete = true; + data->current_msg->status = -EIO; + data->current_msg->complete(data->current_msg->context); + data->bcurrent_msg_processing = false; + data->current_msg = NULL; + data->cur_trans = NULL; + } + } + + if (data->use_dma) + return IRQ_NONE; /* Check if the interrupt is for SPI device */ if (reg_spsr_val & (SPSR_FI_BIT | SPSR_RFI_BIT)) { @@ -756,10 +772,6 @@ static void pch_spi_set_ir(struct pch_spi_data *data) wait_event_interruptible(data->wait, data->transfer_complete); - pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL); - dev_dbg(&data->master->dev, - "%s:no more control over SSN-writing 0 to SSNXCR.", __func__); - /* clear all interrupts */ pch_spi_writereg(data->master, PCH_SPSR, pch_spi_readreg(data->master, PCH_SPSR)); @@ -815,10 +827,11 @@ static void pch_spi_copy_rx_data_for_dma(struct pch_spi_data *data, int bpw) } } -static void pch_spi_start_transfer(struct pch_spi_data *data) +static int pch_spi_start_transfer(struct pch_spi_data *data) { struct pch_spi_dma_ctrl *dma; unsigned long flags; + int rtn; dma = &data->dma; @@ -833,19 +846,23 @@ static void pch_spi_start_transfer(struct pch_spi_data *data) initiating the transfer. */ dev_dbg(&data->master->dev, "%s:waiting for transfer to get over\n", __func__); - wait_event_interruptible(data->wait, data->transfer_complete); + rtn = wait_event_interruptible_timeout(data->wait, + data->transfer_complete, + msecs_to_jiffies(2 * HZ)); dma_sync_sg_for_cpu(&data->master->dev, dma->sg_rx_p, dma->nent, DMA_FROM_DEVICE); + + dma_sync_sg_for_cpu(&data->master->dev, dma->sg_tx_p, dma->nent, + DMA_FROM_DEVICE); + memset(data->dma.tx_buf_virt, 0, PAGE_SIZE); + async_tx_ack(dma->desc_rx); async_tx_ack(dma->desc_tx); kfree(dma->sg_tx_p); kfree(dma->sg_rx_p); spin_lock_irqsave(&data->lock, flags); - pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL); - dev_dbg(&data->master->dev, - "%s:no more control over SSN-writing 0 to SSNXCR.", __func__); /* clear fifo threshold, disable interrupts, disable SPI transfer */ pch_spi_setclr_reg(data->master, PCH_SPCR, 0, @@ -858,6 +875,8 @@ static void pch_spi_start_transfer(struct pch_spi_data *data) pch_spi_clear_fifo(data->master); spin_unlock_irqrestore(&data->lock, flags); + + return rtn; } static void pch_dma_rx_complete(void *arg) @@ -1023,8 +1042,7 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw) /* set receive fifo threshold and transmit fifo threshold */ pch_spi_setclr_reg(data->master, PCH_SPCR, ((size - 1) << SPCR_RFIC_FIELD) | - ((PCH_MAX_FIFO_DEPTH - PCH_DMA_TRANS_SIZE) << - SPCR_TFIC_FIELD), + (PCH_TX_THOLD << SPCR_TFIC_FIELD), MASK_RFIC_SPCR_BITS | MASK_TFIC_SPCR_BITS); spin_unlock_irqrestore(&data->lock, flags); @@ -1035,13 +1053,20 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw) /* offset, length setting */ sg = dma->sg_rx_p; for (i = 0; i < num; i++, sg++) { - if (i == 0) { - sg->offset = 0; + if (i == (num - 2)) { + sg->offset = size * i; + sg->offset = sg->offset * (*bpw / 8); sg_set_page(sg, virt_to_page(dma->rx_buf_virt), rem, sg->offset); sg_dma_len(sg) = rem; + } else if (i == (num - 1)) { + sg->offset = size * (i - 1) + rem; + sg->offset = sg->offset * (*bpw / 8); + sg_set_page(sg, virt_to_page(dma->rx_buf_virt), size, + sg->offset); + sg_dma_len(sg) = size; } else { - sg->offset = rem + size * (i - 1); + sg->offset = size * i; sg->offset = sg->offset * (*bpw / 8); sg_set_page(sg, virt_to_page(dma->rx_buf_virt), size, sg->offset); @@ -1065,6 +1090,16 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw) dma->desc_rx = desc_rx; /* TX */ + if (data->bpw_len > PCH_DMA_TRANS_SIZE) { + num = data->bpw_len / PCH_DMA_TRANS_SIZE; + size = PCH_DMA_TRANS_SIZE; + rem = 16; + } else { + num = 1; + size = data->bpw_len; + rem = data->bpw_len; + } + dma->sg_tx_p = kzalloc(sizeof(struct scatterlist)*num, GFP_ATOMIC); sg_init_table(dma->sg_tx_p, num); /* Initialize SG table */ /* offset, length setting */ @@ -1162,6 +1197,7 @@ static void pch_spi_process_messages(struct work_struct *pwork) if (data->use_dma) pch_spi_request_dma(data, data->current_msg->spi->bits_per_word); + pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL); do { /* If we are already processing a message get the next transfer structure from the message otherwise retrieve @@ -1184,7 +1220,8 @@ static void pch_spi_process_messages(struct work_struct *pwork) if (data->use_dma) { pch_spi_handle_dma(data, &bpw); - pch_spi_start_transfer(data); + if (!pch_spi_start_transfer(data)) + goto out; pch_spi_copy_rx_data_for_dma(data, bpw); } else { pch_spi_set_tx(data, &bpw); @@ -1222,6 +1259,8 @@ static void pch_spi_process_messages(struct work_struct *pwork) } while (data->cur_trans != NULL); +out: + pch_spi_writereg(data->master, PCH_SSNXCR, SSN_HIGH); if (data->use_dma) pch_spi_release_dma(data); } diff --git a/drivers/zorro/zorro.c b/drivers/zorro/zorro.c index e0c2807b0970..181fa8158a8b 100644 --- a/drivers/zorro/zorro.c +++ b/drivers/zorro/zorro.c @@ -148,10 +148,10 @@ static int __init amiga_zorro_probe(struct platform_device *pdev) } platform_set_drvdata(pdev, bus); - /* Register all devices */ pr_info("Zorro: Probing AutoConfig expansion devices: %u device%s\n", zorro_num_autocon, zorro_num_autocon == 1 ? "" : "s"); + /* First identify all devices ... */ for (i = 0; i < zorro_num_autocon; i++) { z = &zorro_autocon[i]; z->id = (z->rom.er_Manufacturer<<16) | (z->rom.er_Product<<8); @@ -172,6 +172,11 @@ static int __init amiga_zorro_probe(struct platform_device *pdev) dev_set_name(&z->dev, "%02x", i); z->dev.parent = &bus->dev; z->dev.bus = &zorro_bus_type; + } + + /* ... then register them */ + for (i = 0; i < zorro_num_autocon; i++) { + z = &zorro_autocon[i]; error = device_register(&z->dev); if (error) { dev_err(&bus->dev, "Error registering device %s\n", |