diff options
Diffstat (limited to 'drivers/thermal')
-rw-r--r-- | drivers/thermal/gov_power_allocator.c | 269 | ||||
-rw-r--r-- | drivers/thermal/thermal_core.c | 91 | ||||
-rw-r--r-- | drivers/thermal/thermal_core.h | 2 | ||||
-rw-r--r-- | drivers/thermal/thermal_netlink.c | 24 | ||||
-rw-r--r-- | drivers/thermal/thermal_sysfs.c | 9 | ||||
-rw-r--r-- | drivers/thermal/thermal_trace_ipa.h | 50 |
6 files changed, 300 insertions, 145 deletions
diff --git a/drivers/thermal/gov_power_allocator.c b/drivers/thermal/gov_power_allocator.c index 785fff14223d..7b6aa265ff6a 100644 --- a/drivers/thermal/gov_power_allocator.c +++ b/drivers/thermal/gov_power_allocator.c @@ -47,6 +47,22 @@ static inline s64 div_frac(s64 x, s64 y) } /** + * struct power_actor - internal power information for power actor + * @req_power: requested power value (not weighted) + * @max_power: max allocatable power for this actor + * @granted_power: granted power for this actor + * @extra_actor_power: extra power that this actor can receive + * @weighted_req_power: weighted requested power as input to IPA + */ +struct power_actor { + u32 req_power; + u32 max_power; + u32 granted_power; + u32 extra_actor_power; + u32 weighted_req_power; +}; + +/** * struct power_allocator_params - parameters for the power allocator governor * @allocated_tzp: whether we have allocated tzp for this thermal zone and * it needs to be freed on unbind @@ -61,6 +77,10 @@ static inline s64 div_frac(s64 x, s64 y) * @trip_switch_on should be NULL. * @trip_max: last passive trip point of the thermal zone. The * temperature we are controlling for. + * @total_weight: Sum of all thermal instances weights + * @num_actors: number of cooling devices supporting IPA callbacks + * @buffer_size: internal buffer size, to avoid runtime re-calculation + * @power: buffer for all power actors internal power information */ struct power_allocator_params { bool allocated_tzp; @@ -69,8 +89,19 @@ struct power_allocator_params { u32 sustainable_power; const struct thermal_trip *trip_switch_on; const struct thermal_trip *trip_max; + int total_weight; + unsigned int num_actors; + unsigned int buffer_size; + struct power_actor *power; }; +static bool power_actor_is_valid(struct power_allocator_params *params, + struct thermal_instance *instance) +{ + return (instance->trip == params->trip_max && + cdev_is_power_actor(instance->cdev)); +} + /** * estimate_sustainable_power() - Estimate the sustainable power of a thermal zone * @tz: thermal zone we are operating in @@ -91,14 +122,10 @@ static u32 estimate_sustainable_power(struct thermal_zone_device *tz) u32 min_power; list_for_each_entry(instance, &tz->thermal_instances, tz_node) { - cdev = instance->cdev; - - if (instance->trip != params->trip_max) - continue; - - if (!cdev_is_power_actor(cdev)) + if (!power_actor_is_valid(params, instance)) continue; + cdev = instance->cdev; if (cdev->ops->state2power(cdev, instance->upper, &min_power)) continue; @@ -303,15 +330,10 @@ power_actor_set_power(struct thermal_cooling_device *cdev, /** * divvy_up_power() - divvy the allocated power between the actors - * @req_power: each actor's requested power - * @max_power: each actor's maximum available power - * @num_actors: size of the @req_power, @max_power and @granted_power's array - * @total_req_power: sum of @req_power + * @power: buffer for all power actors internal power information + * @num_actors: number of power actors in this thermal zone + * @total_req_power: sum of all weighted requested power for all actors * @power_range: total allocated power - * @granted_power: output array: each actor's granted power - * @extra_actor_power: an appropriately sized array to be used in the - * function as temporary storage of the extra power given - * to the actors * * This function divides the total allocated power (@power_range) * fairly between the actors. It first tries to give each actor a @@ -324,15 +346,12 @@ power_actor_set_power(struct thermal_cooling_device *cdev, * If any actor received more than their maximum power, then that * surplus is re-divvied among the actors based on how far they are * from their respective maximums. - * - * Granted power for each actor is written to @granted_power, which - * should've been allocated by the calling function. */ -static void divvy_up_power(u32 *req_power, u32 *max_power, int num_actors, - u32 total_req_power, u32 power_range, - u32 *granted_power, u32 *extra_actor_power) +static void divvy_up_power(struct power_actor *power, int num_actors, + u32 total_req_power, u32 power_range) { - u32 extra_power, capped_extra_power; + u32 capped_extra_power = 0; + u32 extra_power = 0; int i; /* @@ -341,24 +360,23 @@ static void divvy_up_power(u32 *req_power, u32 *max_power, int num_actors, if (!total_req_power) total_req_power = 1; - capped_extra_power = 0; - extra_power = 0; for (i = 0; i < num_actors; i++) { - u64 req_range = (u64)req_power[i] * power_range; + struct power_actor *pa = &power[i]; + u64 req_range = (u64)pa->req_power * power_range; - granted_power[i] = DIV_ROUND_CLOSEST_ULL(req_range, - total_req_power); + pa->granted_power = DIV_ROUND_CLOSEST_ULL(req_range, + total_req_power); - if (granted_power[i] > max_power[i]) { - extra_power += granted_power[i] - max_power[i]; - granted_power[i] = max_power[i]; + if (pa->granted_power > pa->max_power) { + extra_power += pa->granted_power - pa->max_power; + pa->granted_power = pa->max_power; } - extra_actor_power[i] = max_power[i] - granted_power[i]; - capped_extra_power += extra_actor_power[i]; + pa->extra_actor_power = pa->max_power - pa->granted_power; + capped_extra_power += pa->extra_actor_power; } - if (!extra_power) + if (!extra_power || !capped_extra_power) return; /* @@ -366,120 +384,94 @@ static void divvy_up_power(u32 *req_power, u32 *max_power, int num_actors, * how far they are from the max */ extra_power = min(extra_power, capped_extra_power); - if (capped_extra_power > 0) - for (i = 0; i < num_actors; i++) { - u64 extra_range = (u64)extra_actor_power[i] * extra_power; - granted_power[i] += DIV_ROUND_CLOSEST_ULL(extra_range, - capped_extra_power); - } + + for (i = 0; i < num_actors; i++) { + struct power_actor *pa = &power[i]; + u64 extra_range = pa->extra_actor_power; + + extra_range *= extra_power; + pa->granted_power += DIV_ROUND_CLOSEST_ULL(extra_range, + capped_extra_power); + } } static int allocate_power(struct thermal_zone_device *tz, int control_temp) { - u32 *req_power, *max_power, *granted_power, *extra_actor_power; struct power_allocator_params *params = tz->governor_data; + unsigned int num_actors = params->num_actors; + struct power_actor *power = params->power; struct thermal_cooling_device *cdev; struct thermal_instance *instance; u32 total_weighted_req_power = 0; u32 max_allocatable_power = 0; u32 total_granted_power = 0; u32 total_req_power = 0; - u32 *weighted_req_power; u32 power_range, weight; - int total_weight = 0; - int num_actors = 0; - int i = 0; - - list_for_each_entry(instance, &tz->thermal_instances, tz_node) { - if ((instance->trip == params->trip_max) && - cdev_is_power_actor(instance->cdev)) { - num_actors++; - total_weight += instance->weight; - } - } + int i = 0, ret; if (!num_actors) return -ENODEV; - /* - * We need to allocate five arrays of the same size: - * req_power, max_power, granted_power, extra_actor_power and - * weighted_req_power. They are going to be needed until this - * function returns. Allocate them all in one go to simplify - * the allocation and deallocation logic. - */ - BUILD_BUG_ON(sizeof(*req_power) != sizeof(*max_power)); - BUILD_BUG_ON(sizeof(*req_power) != sizeof(*granted_power)); - BUILD_BUG_ON(sizeof(*req_power) != sizeof(*extra_actor_power)); - BUILD_BUG_ON(sizeof(*req_power) != sizeof(*weighted_req_power)); - req_power = kcalloc(num_actors * 5, sizeof(*req_power), GFP_KERNEL); - if (!req_power) - return -ENOMEM; - - max_power = &req_power[num_actors]; - granted_power = &req_power[2 * num_actors]; - extra_actor_power = &req_power[3 * num_actors]; - weighted_req_power = &req_power[4 * num_actors]; + /* Clean all buffers for new power estimations */ + memset(power, 0, params->buffer_size); list_for_each_entry(instance, &tz->thermal_instances, tz_node) { - cdev = instance->cdev; + struct power_actor *pa = &power[i]; - if (instance->trip != params->trip_max) + if (!power_actor_is_valid(params, instance)) continue; - if (!cdev_is_power_actor(cdev)) - continue; + cdev = instance->cdev; - if (cdev->ops->get_requested_power(cdev, &req_power[i])) + ret = cdev->ops->get_requested_power(cdev, &pa->req_power); + if (ret) continue; - if (!total_weight) + if (!params->total_weight) weight = 1 << FRAC_BITS; else weight = instance->weight; - weighted_req_power[i] = frac_to_int(weight * req_power[i]); + pa->weighted_req_power = frac_to_int(weight * pa->req_power); - if (cdev->ops->state2power(cdev, instance->lower, - &max_power[i])) + ret = cdev->ops->state2power(cdev, instance->lower, + &pa->max_power); + if (ret) continue; - total_req_power += req_power[i]; - max_allocatable_power += max_power[i]; - total_weighted_req_power += weighted_req_power[i]; + total_req_power += pa->req_power; + max_allocatable_power += pa->max_power; + total_weighted_req_power += pa->weighted_req_power; i++; } power_range = pid_controller(tz, control_temp, max_allocatable_power); - divvy_up_power(weighted_req_power, max_power, num_actors, - total_weighted_req_power, power_range, granted_power, - extra_actor_power); + divvy_up_power(power, num_actors, total_weighted_req_power, + power_range); i = 0; list_for_each_entry(instance, &tz->thermal_instances, tz_node) { - if (instance->trip != params->trip_max) - continue; + struct power_actor *pa = &power[i]; - if (!cdev_is_power_actor(instance->cdev)) + if (!power_actor_is_valid(params, instance)) continue; power_actor_set_power(instance->cdev, instance, - granted_power[i]); - total_granted_power += granted_power[i]; + pa->granted_power); + total_granted_power += pa->granted_power; + trace_thermal_power_actor(tz, i, pa->req_power, + pa->granted_power); i++; } - trace_thermal_power_allocator(tz, req_power, total_req_power, - granted_power, total_granted_power, + trace_thermal_power_allocator(tz, total_req_power, total_granted_power, num_actors, power_range, max_allocatable_power, tz->temperature, control_temp - tz->temperature); - kfree(req_power); - return 0; } @@ -549,12 +541,11 @@ static void allow_maximum_power(struct thermal_zone_device *tz, bool update) u32 req_power; list_for_each_entry(instance, &tz->thermal_instances, tz_node) { - cdev = instance->cdev; - - if (instance->trip != params->trip_max || - !cdev_is_power_actor(instance->cdev)) + if (!power_actor_is_valid(params, instance)) continue; + cdev = instance->cdev; + instance->target = 0; mutex_lock(&cdev->lock); /* @@ -581,8 +572,9 @@ static void allow_maximum_power(struct thermal_zone_device *tz, bool update) * power actor API. The warning should help to investigate the issue, which * could be e.g. lack of Energy Model for a given device. * - * Return: 0 on success, -EINVAL if any cooling device does not implement - * the power actor API. + * If all of the cooling devices currently attached to @tz implement the power + * actor API, return the number of them (which may be 0, because some cooling + * devices may be attached later). Otherwise, return -EINVAL. */ static int check_power_actors(struct thermal_zone_device *tz, struct power_allocator_params *params) @@ -597,13 +589,76 @@ static int check_power_actors(struct thermal_zone_device *tz, if (!cdev_is_power_actor(instance->cdev)) { dev_warn(&tz->device, "power_allocator: %s is not a power actor\n", instance->cdev->type); - ret = -EINVAL; + return -EINVAL; } + ret++; + } + + return ret; +} + +static int allocate_actors_buffer(struct power_allocator_params *params, + int num_actors) +{ + int ret; + + kfree(params->power); + + /* There might be no cooling devices yet. */ + if (!num_actors) { + ret = -EINVAL; + goto clean_state; + } + + params->power = kcalloc(num_actors, sizeof(struct power_actor), + GFP_KERNEL); + if (!params->power) { + ret = -ENOMEM; + goto clean_state; } + params->num_actors = num_actors; + params->buffer_size = num_actors * sizeof(struct power_actor); + + return 0; + +clean_state: + params->num_actors = 0; + params->buffer_size = 0; + params->power = NULL; return ret; } +static void power_allocator_update_tz(struct thermal_zone_device *tz, + enum thermal_notify_event reason) +{ + struct power_allocator_params *params = tz->governor_data; + struct thermal_instance *instance; + int num_actors = 0; + + switch (reason) { + case THERMAL_TZ_BIND_CDEV: + case THERMAL_TZ_UNBIND_CDEV: + list_for_each_entry(instance, &tz->thermal_instances, tz_node) + if (power_actor_is_valid(params, instance)) + num_actors++; + + if (num_actors == params->num_actors) + return; + + allocate_actors_buffer(params, num_actors); + break; + case THERMAL_INSTANCE_WEIGHT_CHANGED: + params->total_weight = 0; + list_for_each_entry(instance, &tz->thermal_instances, tz_node) + if (power_actor_is_valid(params, instance)) + params->total_weight += instance->weight; + break; + default: + break; + } +} + /** * power_allocator_bind() - bind the power_allocator governor to a thermal zone * @tz: thermal zone to bind it to @@ -631,12 +686,19 @@ static int power_allocator_bind(struct thermal_zone_device *tz) } ret = check_power_actors(tz, params); - if (ret) { + if (ret < 0) { dev_warn(&tz->device, "power_allocator: binding failed\n"); kfree(params); return ret; } + ret = allocate_actors_buffer(params, ret); + if (ret) { + dev_warn(&tz->device, "power_allocator: allocation failed\n"); + kfree(params); + return ret; + } + if (!tz->tzp) { tz->tzp = kzalloc(sizeof(*tz->tzp), GFP_KERNEL); if (!tz->tzp) { @@ -661,6 +723,7 @@ static int power_allocator_bind(struct thermal_zone_device *tz) return 0; free_params: + kfree(params->power); kfree(params); return ret; @@ -677,6 +740,7 @@ static void power_allocator_unbind(struct thermal_zone_device *tz) tz->tzp = NULL; } + kfree(params->power); kfree(tz->governor_data); tz->governor_data = NULL; } @@ -715,5 +779,6 @@ static struct thermal_governor thermal_gov_power_allocator = { .bind_to_tz = power_allocator_bind, .unbind_from_tz = power_allocator_unbind, .throttle = power_allocator_throttle, + .update_tz = power_allocator_update_tz, }; THERMAL_GOVERNOR_DECLARE(thermal_gov_power_allocator); diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index 0d761afb7cbc..fa88d8707241 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -37,8 +37,6 @@ static LIST_HEAD(thermal_governor_list); static DEFINE_MUTEX(thermal_list_lock); static DEFINE_MUTEX(thermal_governor_lock); -static atomic_t in_suspend; - static struct thermal_governor *def_governor; /* @@ -311,6 +309,15 @@ static void handle_non_critical_trips(struct thermal_zone_device *tz, def_governor->throttle(tz, trip); } +void thermal_governor_update_tz(struct thermal_zone_device *tz, + enum thermal_notify_event reason) +{ + if (!tz->governor || !tz->governor->update_tz) + return; + + tz->governor->update_tz(tz, reason); +} + static void thermal_zone_device_halt(struct thermal_zone_device *tz, bool shutdown) { /* @@ -429,9 +436,20 @@ static void update_temperature(struct thermal_zone_device *tz) thermal_genl_sampling_temp(tz->id, temp); } +static void thermal_zone_device_check(struct work_struct *work) +{ + struct thermal_zone_device *tz = container_of(work, struct + thermal_zone_device, + poll_queue.work); + thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED); +} + static void thermal_zone_device_init(struct thermal_zone_device *tz) { struct thermal_instance *pos; + + INIT_DELAYED_WORK(&tz->poll_queue, thermal_zone_device_check); + tz->temperature = THERMAL_TEMP_INVALID; tz->prev_low_trip = -INT_MAX; tz->prev_high_trip = INT_MAX; @@ -444,7 +462,7 @@ void __thermal_zone_device_update(struct thermal_zone_device *tz, { struct thermal_trip *trip; - if (atomic_read(&in_suspend)) + if (tz->suspended) return; if (!thermal_zone_device_is_enabled(tz)) @@ -528,14 +546,6 @@ void thermal_zone_device_update(struct thermal_zone_device *tz, } EXPORT_SYMBOL_GPL(thermal_zone_device_update); -static void thermal_zone_device_check(struct work_struct *work) -{ - struct thermal_zone_device *tz = container_of(work, struct - thermal_zone_device, - poll_queue.work); - thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED); -} - int for_each_thermal_governor(int (*cb)(struct thermal_governor *, void *), void *data) { @@ -727,6 +737,8 @@ int thermal_bind_cdev_to_trip(struct thermal_zone_device *tz, list_add_tail(&dev->tz_node, &tz->thermal_instances); list_add_tail(&dev->cdev_node, &cdev->thermal_instances); atomic_set(&tz->need_update, 1); + + thermal_governor_update_tz(tz, THERMAL_TZ_BIND_CDEV); } mutex_unlock(&cdev->lock); mutex_unlock(&tz->lock); @@ -785,6 +797,9 @@ int thermal_unbind_cdev_from_trip(struct thermal_zone_device *tz, if (pos->tz == tz && pos->trip == trip && pos->cdev == cdev) { list_del(&pos->tz_node); list_del(&pos->cdev_node); + + thermal_governor_update_tz(tz, THERMAL_TZ_UNBIND_CDEV); + mutex_unlock(&cdev->lock); mutex_unlock(&tz->lock); goto unbind; @@ -1391,8 +1406,6 @@ thermal_zone_device_register_with_trips(const char *type, struct thermal_trip *t /* Bind cooling devices for this zone */ bind_tz(tz); - INIT_DELAYED_WORK(&tz->poll_queue, thermal_zone_device_check); - thermal_zone_device_init(tz); /* Update the new thermal zone and mark it as already updated. */ if (atomic_cmpxchg(&tz->need_update, 1, 0)) @@ -1546,6 +1559,22 @@ exit: } EXPORT_SYMBOL_GPL(thermal_zone_get_zone_by_name); +static void thermal_zone_device_resume(struct work_struct *work) +{ + struct thermal_zone_device *tz; + + tz = container_of(work, struct thermal_zone_device, poll_queue.work); + + mutex_lock(&tz->lock); + + tz->suspended = false; + + thermal_zone_device_init(tz); + __thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED); + + mutex_unlock(&tz->lock); +} + static int thermal_pm_notify(struct notifier_block *nb, unsigned long mode, void *_unused) { @@ -1555,17 +1584,43 @@ static int thermal_pm_notify(struct notifier_block *nb, case PM_HIBERNATION_PREPARE: case PM_RESTORE_PREPARE: case PM_SUSPEND_PREPARE: - atomic_set(&in_suspend, 1); + mutex_lock(&thermal_list_lock); + + list_for_each_entry(tz, &thermal_tz_list, node) { + mutex_lock(&tz->lock); + + tz->suspended = true; + + mutex_unlock(&tz->lock); + } + + mutex_unlock(&thermal_list_lock); break; case PM_POST_HIBERNATION: case PM_POST_RESTORE: case PM_POST_SUSPEND: - atomic_set(&in_suspend, 0); + mutex_lock(&thermal_list_lock); + list_for_each_entry(tz, &thermal_tz_list, node) { - thermal_zone_device_init(tz); - thermal_zone_device_update(tz, - THERMAL_EVENT_UNSPECIFIED); + mutex_lock(&tz->lock); + + cancel_delayed_work(&tz->poll_queue); + + /* + * Replace the work function with the resume one, which + * will restore the original work function and schedule + * the polling work if needed. + */ + INIT_DELAYED_WORK(&tz->poll_queue, + thermal_zone_device_resume); + /* Queue up the work without a delay. */ + mod_delayed_work(system_freezable_power_efficient_wq, + &tz->poll_queue, 0); + + mutex_unlock(&tz->lock); } + + mutex_unlock(&thermal_list_lock); break; default: break; diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h index b5e6743bd157..e6a2b6f97be8 100644 --- a/drivers/thermal/thermal_core.h +++ b/drivers/thermal/thermal_core.h @@ -115,6 +115,8 @@ int thermal_build_list_of_policies(char *buf); void __thermal_zone_device_update(struct thermal_zone_device *tz, enum thermal_notify_event event); void thermal_zone_device_critical_reboot(struct thermal_zone_device *tz); +void thermal_governor_update_tz(struct thermal_zone_device *tz, + enum thermal_notify_event reason); /* Helpers */ #define for_each_trip(__tz, __trip) \ diff --git a/drivers/thermal/thermal_netlink.c b/drivers/thermal/thermal_netlink.c index 21f00d73acb7..332052e24a86 100644 --- a/drivers/thermal/thermal_netlink.c +++ b/drivers/thermal/thermal_netlink.c @@ -13,9 +13,14 @@ #include "thermal_core.h" +enum thermal_genl_multicast_groups { + THERMAL_GENL_SAMPLING_GROUP = 0, + THERMAL_GENL_EVENT_GROUP = 1, +}; + static const struct genl_multicast_group thermal_genl_mcgrps[] = { - { .name = THERMAL_GENL_SAMPLING_GROUP_NAME, }, - { .name = THERMAL_GENL_EVENT_GROUP_NAME, }, + [THERMAL_GENL_SAMPLING_GROUP] = { .name = THERMAL_GENL_SAMPLING_GROUP_NAME, }, + [THERMAL_GENL_EVENT_GROUP] = { .name = THERMAL_GENL_EVENT_GROUP_NAME, }, }; static const struct nla_policy thermal_genl_policy[THERMAL_GENL_ATTR_MAX + 1] = { @@ -71,6 +76,11 @@ typedef int (*cb_t)(struct param *); static struct genl_family thermal_gnl_family; +static int thermal_group_has_listeners(enum thermal_genl_multicast_groups group) +{ + return genl_has_listeners(&thermal_gnl_family, &init_net, group); +} + /************************** Sampling encoding *******************************/ int thermal_genl_sampling_temp(int id, int temp) @@ -78,6 +88,9 @@ int thermal_genl_sampling_temp(int id, int temp) struct sk_buff *skb; void *hdr; + if (!thermal_group_has_listeners(THERMAL_GENL_SAMPLING_GROUP)) + return 0; + skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); if (!skb) return -ENOMEM; @@ -95,7 +108,7 @@ int thermal_genl_sampling_temp(int id, int temp) genlmsg_end(skb, hdr); - genlmsg_multicast(&thermal_gnl_family, skb, 0, 0, GFP_KERNEL); + genlmsg_multicast(&thermal_gnl_family, skb, 0, THERMAL_GENL_SAMPLING_GROUP, GFP_KERNEL); return 0; out_cancel: @@ -275,6 +288,9 @@ static int thermal_genl_send_event(enum thermal_genl_event event, int ret = -EMSGSIZE; void *hdr; + if (!thermal_group_has_listeners(THERMAL_GENL_EVENT_GROUP)) + return 0; + msg = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); if (!msg) return -ENOMEM; @@ -290,7 +306,7 @@ static int thermal_genl_send_event(enum thermal_genl_event event, genlmsg_end(msg, hdr); - genlmsg_multicast(&thermal_gnl_family, msg, 0, 1, GFP_KERNEL); + genlmsg_multicast(&thermal_gnl_family, msg, 0, THERMAL_GENL_EVENT_GROUP, GFP_KERNEL); return 0; diff --git a/drivers/thermal/thermal_sysfs.c b/drivers/thermal/thermal_sysfs.c index d8ff74a4338a..f4033865b093 100644 --- a/drivers/thermal/thermal_sysfs.c +++ b/drivers/thermal/thermal_sysfs.c @@ -936,7 +936,16 @@ ssize_t weight_store(struct device *dev, struct device_attribute *attr, return ret; instance = container_of(attr, struct thermal_instance, weight_attr); + + /* Don't race with governors using the 'weight' value */ + mutex_lock(&instance->tz->lock); + instance->weight = weight; + thermal_governor_update_tz(instance->tz, + THERMAL_INSTANCE_WEIGHT_CHANGED); + + mutex_unlock(&instance->tz->lock); + return count; } diff --git a/drivers/thermal/thermal_trace_ipa.h b/drivers/thermal/thermal_trace_ipa.h index 84568db5421b..b16b5dd863d9 100644 --- a/drivers/thermal/thermal_trace_ipa.h +++ b/drivers/thermal/thermal_trace_ipa.h @@ -8,19 +8,14 @@ #include <linux/tracepoint.h> TRACE_EVENT(thermal_power_allocator, - TP_PROTO(struct thermal_zone_device *tz, u32 *req_power, - u32 total_req_power, u32 *granted_power, - u32 total_granted_power, size_t num_actors, - u32 power_range, u32 max_allocatable_power, - int current_temp, s32 delta_temp), - TP_ARGS(tz, req_power, total_req_power, granted_power, - total_granted_power, num_actors, power_range, - max_allocatable_power, current_temp, delta_temp), + TP_PROTO(struct thermal_zone_device *tz, u32 total_req_power, + u32 total_granted_power, int num_actors, u32 power_range, + u32 max_allocatable_power, int current_temp, s32 delta_temp), + TP_ARGS(tz, total_req_power, total_granted_power, num_actors, + power_range, max_allocatable_power, current_temp, delta_temp), TP_STRUCT__entry( __field(int, tz_id ) - __dynamic_array(u32, req_power, num_actors ) __field(u32, total_req_power ) - __dynamic_array(u32, granted_power, num_actors) __field(u32, total_granted_power ) __field(size_t, num_actors ) __field(u32, power_range ) @@ -30,11 +25,7 @@ TRACE_EVENT(thermal_power_allocator, ), TP_fast_assign( __entry->tz_id = tz->id; - memcpy(__get_dynamic_array(req_power), req_power, - num_actors * sizeof(*req_power)); __entry->total_req_power = total_req_power; - memcpy(__get_dynamic_array(granted_power), granted_power, - num_actors * sizeof(*granted_power)); __entry->total_granted_power = total_granted_power; __entry->num_actors = num_actors; __entry->power_range = power_range; @@ -43,18 +34,35 @@ TRACE_EVENT(thermal_power_allocator, __entry->delta_temp = delta_temp; ), - TP_printk("thermal_zone_id=%d req_power={%s} total_req_power=%u granted_power={%s} total_granted_power=%u power_range=%u max_allocatable_power=%u current_temperature=%d delta_temperature=%d", - __entry->tz_id, - __print_array(__get_dynamic_array(req_power), - __entry->num_actors, 4), - __entry->total_req_power, - __print_array(__get_dynamic_array(granted_power), - __entry->num_actors, 4), + TP_printk("thermal_zone_id=%d total_req_power=%u total_granted_power=%u power_range=%u max_allocatable_power=%u current_temperature=%d delta_temperature=%d", + __entry->tz_id, __entry->total_req_power, __entry->total_granted_power, __entry->power_range, __entry->max_allocatable_power, __entry->current_temp, __entry->delta_temp) ); +TRACE_EVENT(thermal_power_actor, + TP_PROTO(struct thermal_zone_device *tz, int actor_id, u32 req_power, + u32 granted_power), + TP_ARGS(tz, actor_id, req_power, granted_power), + TP_STRUCT__entry( + __field(int, tz_id) + __field(int, actor_id) + __field(u32, req_power) + __field(u32, granted_power) + ), + TP_fast_assign( + __entry->tz_id = tz->id; + __entry->actor_id = actor_id; + __entry->req_power = req_power; + __entry->granted_power = granted_power; + ), + + TP_printk("thermal_zone_id=%d actor_id=%d req_power=%u granted_power=%u", + __entry->tz_id, __entry->actor_id, __entry->req_power, + __entry->granted_power) +); + TRACE_EVENT(thermal_power_allocator_pid, TP_PROTO(struct thermal_zone_device *tz, s32 err, s32 err_integral, s64 p, s64 i, s64 d, s32 output), |