diff options
Diffstat (limited to 'drivers/gpu/drm/msm/msm_gpu_devfreq.c')
-rw-r--r-- | drivers/gpu/drm/msm/msm_gpu_devfreq.c | 89 |
1 files changed, 48 insertions, 41 deletions
diff --git a/drivers/gpu/drm/msm/msm_gpu_devfreq.c b/drivers/gpu/drm/msm/msm_gpu_devfreq.c index d232f74d2346..62405e980925 100644 --- a/drivers/gpu/drm/msm/msm_gpu_devfreq.c +++ b/drivers/gpu/drm/msm/msm_gpu_devfreq.c @@ -9,6 +9,7 @@ #include <linux/devfreq.h> #include <linux/devfreq_cooling.h> +#include <linux/units.h> /* * Power Management: @@ -25,17 +26,6 @@ static int msm_devfreq_target(struct device *dev, unsigned long *freq, * to something that actually is in the opp table: */ opp = devfreq_recommended_opp(dev, freq, flags); - - /* - * If the GPU is idle, devfreq is not aware, so just ignore - * it's requests - */ - if (gpu->devfreq.idle_freq) { - gpu->devfreq.idle_freq = *freq; - dev_pm_opp_put(opp); - return 0; - } - if (IS_ERR(opp)) return PTR_ERR(opp); @@ -53,9 +43,6 @@ static int msm_devfreq_target(struct device *dev, unsigned long *freq, static unsigned long get_freq(struct msm_gpu *gpu) { - if (gpu->devfreq.idle_freq) - return gpu->devfreq.idle_freq; - if (gpu->funcs->gpu_get_freq) return gpu->funcs->gpu_get_freq(gpu); @@ -93,6 +80,7 @@ static struct devfreq_dev_profile msm_devfreq_profile = { .get_cur_freq = msm_devfreq_get_cur_freq, }; +static void msm_devfreq_boost_work(struct kthread_work *work); static void msm_devfreq_idle_work(struct kthread_work *work); void msm_devfreq_init(struct msm_gpu *gpu) @@ -103,6 +91,12 @@ void msm_devfreq_init(struct msm_gpu *gpu) if (!gpu->funcs->gpu_busy) return; + dev_pm_qos_add_request(&gpu->pdev->dev, &df->idle_freq, + DEV_PM_QOS_MAX_FREQUENCY, + PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE); + dev_pm_qos_add_request(&gpu->pdev->dev, &df->boost_freq, + DEV_PM_QOS_MIN_FREQUENCY, 0); + msm_devfreq_profile.initial_freq = gpu->fast_rate; /* @@ -133,13 +127,19 @@ void msm_devfreq_init(struct msm_gpu *gpu) gpu->cooling = NULL; } + msm_hrtimer_work_init(&df->boost_work, gpu->worker, msm_devfreq_boost_work, + CLOCK_MONOTONIC, HRTIMER_MODE_REL); msm_hrtimer_work_init(&df->idle_work, gpu->worker, msm_devfreq_idle_work, CLOCK_MONOTONIC, HRTIMER_MODE_REL); } void msm_devfreq_cleanup(struct msm_gpu *gpu) { + struct msm_gpu_devfreq *df = &gpu->devfreq; + devfreq_cooling_unregister(gpu->cooling); + dev_pm_qos_remove_request(&df->boost_freq); + dev_pm_qos_remove_request(&df->idle_freq); } void msm_devfreq_resume(struct msm_gpu *gpu) @@ -155,12 +155,40 @@ void msm_devfreq_suspend(struct msm_gpu *gpu) devfreq_suspend_device(gpu->devfreq.devfreq); } +static void msm_devfreq_boost_work(struct kthread_work *work) +{ + struct msm_gpu_devfreq *df = container_of(work, + struct msm_gpu_devfreq, boost_work.work); + + dev_pm_qos_update_request(&df->boost_freq, 0); +} + +void msm_devfreq_boost(struct msm_gpu *gpu, unsigned factor) +{ + struct msm_gpu_devfreq *df = &gpu->devfreq; + uint64_t freq; + + freq = get_freq(gpu); + freq *= factor; + + /* + * A nice little trap is that PM QoS operates in terms of KHz, + * while devfreq operates in terms of Hz: + */ + do_div(freq, HZ_PER_KHZ); + + dev_pm_qos_update_request(&df->boost_freq, freq); + + msm_hrtimer_queue_work(&df->boost_work, + ms_to_ktime(msm_devfreq_profile.polling_ms), + HRTIMER_MODE_REL); +} + void msm_devfreq_active(struct msm_gpu *gpu) { struct msm_gpu_devfreq *df = &gpu->devfreq; struct devfreq_dev_status status; unsigned int idle_time; - unsigned long target_freq = df->idle_freq; if (!df->devfreq) return; @@ -170,12 +198,6 @@ void msm_devfreq_active(struct msm_gpu *gpu) */ hrtimer_cancel(&df->idle_work.timer); - /* - * Hold devfreq lock to synchronize with get_dev_status()/ - * target() callbacks - */ - mutex_lock(&df->devfreq->lock); - idle_time = ktime_to_ms(ktime_sub(ktime_get(), df->idle_time)); /* @@ -184,20 +206,17 @@ void msm_devfreq_active(struct msm_gpu *gpu) * the governor to ramp up the freq.. so give some boost */ if (idle_time > msm_devfreq_profile.polling_ms) { - target_freq *= 2; + msm_devfreq_boost(gpu, 2); } - df->idle_freq = 0; - - msm_devfreq_target(&gpu->pdev->dev, &target_freq, 0); + dev_pm_qos_update_request(&df->idle_freq, + PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE); /* * Reset the polling interval so we aren't inconsistent * about freq vs busy/total cycles */ msm_devfreq_get_dev_status(&gpu->pdev->dev, &status); - - mutex_unlock(&df->devfreq->lock); } @@ -206,23 +225,11 @@ static void msm_devfreq_idle_work(struct kthread_work *work) struct msm_gpu_devfreq *df = container_of(work, struct msm_gpu_devfreq, idle_work.work); struct msm_gpu *gpu = container_of(df, struct msm_gpu, devfreq); - unsigned long idle_freq, target_freq = 0; - - /* - * Hold devfreq lock to synchronize with get_dev_status()/ - * target() callbacks - */ - mutex_lock(&df->devfreq->lock); - - idle_freq = get_freq(gpu); - - if (gpu->clamp_to_idle) - msm_devfreq_target(&gpu->pdev->dev, &target_freq, 0); df->idle_time = ktime_get(); - df->idle_freq = idle_freq; - mutex_unlock(&df->devfreq->lock); + if (gpu->clamp_to_idle) + dev_pm_qos_update_request(&df->idle_freq, 0); } void msm_devfreq_idle(struct msm_gpu *gpu) |