From ad62e1e67751d21a3e15bca9d3ededc73de2cfa0 Mon Sep 17 00:00:00 2001 From: Ashwin Chaugule Date: Wed, 17 Feb 2016 13:20:59 -0700 Subject: ACPI / CPPC: Optimize PCC Read Write operations Previously the send_pcc_cmd() code checked if the PCC operation had completed before returning from the function. This check was performed regardless of the PCC op type (i.e. Read/Write). Knowing the type of cmd can be used to optimize the check and avoid needless waiting. e.g. with Write ops, the actual Writing is done before calling send_pcc_cmd(). And the subsequent Writes will check if the channel is free at the entry of send_pcc_cmd() anyway. However, for Read cmds, we need to wait for the cmd completion bit to be flipped, since the actual Read ops follow after returning from the send_pcc_cmd(). So, only do the looping check at the end for Read ops. Also, instead of using udelay() calls, use ktime as a means to check for deadlines. The current deadline in which the Remote should flip the cmd completion bit is defined as N * Nominal latency. Where N is arbitrary and large enough to work on slow emulators and Nominal latency comes from the ACPI table (PCCT). This helps in working around the CONFIG_HZ effects on udelay() and also avoids needing different ACPI tables for Silicon and Emulation platforms. Signed-off-by: Ashwin Chaugule Signed-off-by: Prashanth Prakash Signed-off-by: Rafael J. Wysocki --- drivers/acpi/cppc_acpi.c | 101 +++++++++++++++++++++++++++++++++-------------- 1 file changed, 72 insertions(+), 29 deletions(-) (limited to 'drivers/acpi/cppc_acpi.c') diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c index 6730f965b379..026cdd4d491a 100644 --- a/drivers/acpi/cppc_acpi.c +++ b/drivers/acpi/cppc_acpi.c @@ -39,6 +39,7 @@ #include #include +#include #include /* @@ -63,25 +64,53 @@ static struct mbox_chan *pcc_channel; static void __iomem *pcc_comm_addr; static u64 comm_base_addr; static int pcc_subspace_idx = -1; -static u16 pcc_cmd_delay; static bool pcc_channel_acquired; +static ktime_t deadline; /* * Arbitrary Retries in case the remote processor is slow to respond - * to PCC commands. + * to PCC commands. Keeping it high enough to cover emulators where + * the processors run painfully slow. */ #define NUM_RETRIES 500 +static int check_pcc_chan(void) +{ + int ret = -EIO; + struct acpi_pcct_shared_memory __iomem *generic_comm_base = pcc_comm_addr; + ktime_t next_deadline = ktime_add(ktime_get(), deadline); + + /* Retry in case the remote processor was too slow to catch up. */ + while (!ktime_after(ktime_get(), next_deadline)) { + if (readw_relaxed(&generic_comm_base->status) & PCC_CMD_COMPLETE) { + ret = 0; + break; + } + /* + * Reducing the bus traffic in case this loop takes longer than + * a few retries. + */ + udelay(3); + } + + return ret; +} + static int send_pcc_cmd(u16 cmd) { - int retries, result = -EIO; - struct acpi_pcct_hw_reduced *pcct_ss = pcc_channel->con_priv; + int ret = -EIO; struct acpi_pcct_shared_memory *generic_comm_base = (struct acpi_pcct_shared_memory *) pcc_comm_addr; - u32 cmd_latency = pcct_ss->latency; - /* Min time OS should wait before sending next command. */ - udelay(pcc_cmd_delay); + /* + * For CMD_WRITE we know for a fact the caller should have checked + * the channel before writing to PCC space + */ + if (cmd == CMD_READ) { + ret = check_pcc_chan(); + if (ret) + return ret; + } /* Write to the shared comm region. */ writew(cmd, &generic_comm_base->command); @@ -90,31 +119,30 @@ static int send_pcc_cmd(u16 cmd) writew(0, &generic_comm_base->status); /* Ring doorbell */ - result = mbox_send_message(pcc_channel, &cmd); - if (result < 0) { + ret = mbox_send_message(pcc_channel, &cmd); + if (ret < 0) { pr_err("Err sending PCC mbox message. cmd:%d, ret:%d\n", - cmd, result); - return result; + cmd, ret); + return ret; } - /* Wait for a nominal time to let platform process command. */ - udelay(cmd_latency); - - /* Retry in case the remote processor was too slow to catch up. */ - for (retries = NUM_RETRIES; retries > 0; retries--) { - if (readw_relaxed(&generic_comm_base->status) & PCC_CMD_COMPLETE) { - result = 0; - break; - } - } + /* + * For READs we need to ensure the cmd completed to ensure + * the ensuing read()s can proceed. For WRITEs we dont care + * because the actual write()s are done before coming here + * and the next READ or WRITE will check if the channel + * is busy/free at the entry of this call. + */ + if (cmd == CMD_READ) + ret = check_pcc_chan(); - mbox_client_txdone(pcc_channel, result); - return result; + mbox_client_txdone(pcc_channel, ret); + return ret; } static void cppc_chan_tx_done(struct mbox_client *cl, void *msg, int ret) { - if (ret) + if (ret < 0) pr_debug("TX did not complete: CMD sent:%x, ret:%d\n", *(u16 *)msg, ret); else @@ -306,6 +334,7 @@ static int register_pcc_channel(int pcc_subspace_idx) { struct acpi_pcct_hw_reduced *cppc_ss; unsigned int len; + u64 usecs_lat; if (pcc_subspace_idx >= 0) { pcc_channel = pcc_mbox_request_channel(&cppc_mbox_cl, @@ -335,7 +364,14 @@ static int register_pcc_channel(int pcc_subspace_idx) */ comm_base_addr = cppc_ss->base_address; len = cppc_ss->length; - pcc_cmd_delay = cppc_ss->min_turnaround_time; + + /* + * cppc_ss->latency is just a Nominal value. In reality + * the remote processor could be much slower to reply. + * So add an arbitrary amount of wait on top of Nominal. + */ + usecs_lat = NUM_RETRIES * cppc_ss->latency; + deadline = ns_to_ktime(usecs_lat * NSEC_PER_USEC); pcc_comm_addr = acpi_os_ioremap(comm_base_addr, len); if (!pcc_comm_addr) { @@ -604,7 +640,7 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps) (ref_perf->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) || (nom_perf->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM)) { /* Ring doorbell once to update PCC subspace */ - if (send_pcc_cmd(CMD_READ)) { + if (send_pcc_cmd(CMD_READ) < 0) { ret = -EIO; goto out_err; } @@ -662,7 +698,7 @@ int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs) if ((delivered_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) || (reference_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM)) { /* Ring doorbell once to update PCC subspace */ - if (send_pcc_cmd(CMD_READ)) { + if (send_pcc_cmd(CMD_READ) < 0) { ret = -EIO; goto out_err; } @@ -713,6 +749,13 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls) spin_lock(&pcc_lock); + /* If this is PCC reg, check if channel is free before writing */ + if (desired_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) { + ret = check_pcc_chan(); + if (ret) + goto busy_channel; + } + /* * Skip writing MIN/MAX until Linux knows how to come up with * useful values. @@ -722,10 +765,10 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls) /* Is this a PCC reg ?*/ if (desired_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) { /* Ring doorbell so Remote can get our perf request. */ - if (send_pcc_cmd(CMD_WRITE)) + if (send_pcc_cmd(CMD_WRITE) < 0) ret = -EIO; } - +busy_channel: spin_unlock(&pcc_lock); return ret; -- cgit v1.2.3 From 77e3d86f2f8c533c9c72fb5585b623b447d9bd57 Mon Sep 17 00:00:00 2001 From: "Prakash, Prashanth" Date: Wed, 17 Feb 2016 13:21:00 -0700 Subject: ACPI / CPPC: optimized cpc_read and cpc_write cpc_read and cpc_write are used while holding the pcc_lock spin_lock, so they need to be as fast as possible. acpi_os_read/write_memory APIs linearly search through a list for cached mapping which is quite expensive. Since the PCC subspace is already mapped into virtual address space during initialization, we can just add the offset and access the necessary CPPC registers. This patch + similar changes to PCC driver reduce the time per freq. transition from around 200us to about 20us for the CPPC cpufreq driver. Signed-off-by: Prashanth Prakash Acked-by: Ashwin Chaugule Signed-off-by: Rafael J. Wysocki --- drivers/acpi/cppc_acpi.c | 80 ++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 64 insertions(+), 16 deletions(-) (limited to 'drivers/acpi/cppc_acpi.c') diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c index 026cdd4d491a..6970b44d37d5 100644 --- a/drivers/acpi/cppc_acpi.c +++ b/drivers/acpi/cppc_acpi.c @@ -67,6 +67,9 @@ static int pcc_subspace_idx = -1; static bool pcc_channel_acquired; static ktime_t deadline; +/* pcc mapped address + header size + offset within PCC subspace */ +#define GET_PCC_VADDR(offs) (pcc_comm_addr + 0x8 + (offs)) + /* * Arbitrary Retries in case the remote processor is slow to respond * to PCC commands. Keeping it high enough to cover emulators where @@ -582,29 +585,74 @@ void acpi_cppc_processor_exit(struct acpi_processor *pr) } EXPORT_SYMBOL_GPL(acpi_cppc_processor_exit); -static u64 get_phys_addr(struct cpc_reg *reg) -{ - /* PCC communication addr space begins at byte offset 0x8. */ - if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) - return (u64)comm_base_addr + 0x8 + reg->address; - else - return reg->address; -} +/* + * Since cpc_read and cpc_write are called while holding pcc_lock, it should be + * as fast as possible. We have already mapped the PCC subspace during init, so + * we can directly write to it. + */ -static void cpc_read(struct cpc_reg *reg, u64 *val) +static int cpc_read(struct cpc_reg *reg, u64 *val) { - u64 addr = get_phys_addr(reg); + int ret_val = 0; + + *val = 0; + if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) { + void __iomem *vaddr = GET_PCC_VADDR(reg->address); - acpi_os_read_memory((acpi_physical_address)addr, - val, reg->bit_width); + switch (reg->bit_width) { + case 8: + *val = readb(vaddr); + break; + case 16: + *val = readw(vaddr); + break; + case 32: + *val = readl(vaddr); + break; + case 64: + *val = readq(vaddr); + break; + default: + pr_debug("Error: Cannot read %u bit width from PCC\n", + reg->bit_width); + ret_val = -EFAULT; + } + } else + ret_val = acpi_os_read_memory((acpi_physical_address)reg->address, + val, reg->bit_width); + return ret_val; } -static void cpc_write(struct cpc_reg *reg, u64 val) +static int cpc_write(struct cpc_reg *reg, u64 val) { - u64 addr = get_phys_addr(reg); + int ret_val = 0; + + if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) { + void __iomem *vaddr = GET_PCC_VADDR(reg->address); - acpi_os_write_memory((acpi_physical_address)addr, - val, reg->bit_width); + switch (reg->bit_width) { + case 8: + writeb(val, vaddr); + break; + case 16: + writew(val, vaddr); + break; + case 32: + writel(val, vaddr); + break; + case 64: + writeq(val, vaddr); + break; + default: + pr_debug("Error: Cannot write %u bit width to PCC\n", + reg->bit_width); + ret_val = -EFAULT; + break; + } + } else + ret_val = acpi_os_write_memory((acpi_physical_address)reg->address, + val, reg->bit_width); + return ret_val; } /** -- cgit v1.2.3 From beee23aebc6650609ef1547f6d813fa5065f74aa Mon Sep 17 00:00:00 2001 From: "Prakash, Prashanth" Date: Wed, 17 Feb 2016 13:21:02 -0700 Subject: ACPI / CPPC: replace writeX/readX to PCC with relaxed version We do not have a strict read/write order requirement while accessing PCC subspace. The only requirement is all access should be committed before triggering the PCC doorbell to transfer the ownership of PCC to the platform and this requirement is enforced by the PCC driver. Profiling on a many core system shows improvement of about 1.8us on average per freq change request(about 10% improvement on average). Since these operations are executed while holding the pcc_lock, reducing this time helps the CPPC implementation to scale much better as the number of cores increases. Signed-off-by: Prashanth Prakash Acked-by: Ashwin Chaugule Signed-off-by: Rafael J. Wysocki --- drivers/acpi/cppc_acpi.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) (limited to 'drivers/acpi/cppc_acpi.c') diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c index 6970b44d37d5..8a0911a13599 100644 --- a/drivers/acpi/cppc_acpi.c +++ b/drivers/acpi/cppc_acpi.c @@ -116,10 +116,10 @@ static int send_pcc_cmd(u16 cmd) } /* Write to the shared comm region. */ - writew(cmd, &generic_comm_base->command); + writew_relaxed(cmd, &generic_comm_base->command); /* Flip CMD COMPLETE bit */ - writew(0, &generic_comm_base->status); + writew_relaxed(0, &generic_comm_base->status); /* Ring doorbell */ ret = mbox_send_message(pcc_channel, &cmd); @@ -601,16 +601,16 @@ static int cpc_read(struct cpc_reg *reg, u64 *val) switch (reg->bit_width) { case 8: - *val = readb(vaddr); + *val = readb_relaxed(vaddr); break; case 16: - *val = readw(vaddr); + *val = readw_relaxed(vaddr); break; case 32: - *val = readl(vaddr); + *val = readl_relaxed(vaddr); break; case 64: - *val = readq(vaddr); + *val = readq_relaxed(vaddr); break; default: pr_debug("Error: Cannot read %u bit width from PCC\n", @@ -632,16 +632,16 @@ static int cpc_write(struct cpc_reg *reg, u64 val) switch (reg->bit_width) { case 8: - writeb(val, vaddr); + writeb_relaxed(val, vaddr); break; case 16: - writew(val, vaddr); + writew_relaxed(val, vaddr); break; case 32: - writel(val, vaddr); + writel_relaxed(val, vaddr); break; case 64: - writeq(val, vaddr); + writeq_relaxed(val, vaddr); break; default: pr_debug("Error: Cannot write %u bit width to PCC\n", -- cgit v1.2.3 From f387e5b901adb8352fcdf031bb2c539e390b92e2 Mon Sep 17 00:00:00 2001 From: "Prakash, Prashanth" Date: Wed, 17 Feb 2016 13:21:03 -0700 Subject: ACPI / CPPC: use MRTT/MPAR to decide if/when a req can be sent The ACPI spec defines Minimum Request Turnaround Time(MRTT) and Maximum Periodic Access Rate(MPAR) to prevent the OSPM from sending too many requests than the platform can handle. For further details on these parameters please refer to section 14.1.3 of ACPI 6.0 spec. This patch includes MRTT/MPAR in deciding if or when a CPPC request can be sent to the platform to make sure CPPC implementation is compliant to the spec. Signed-off-by: Prashanth Prakash Acked-by: Ashwin Chaugule Signed-off-by: Rafael J. Wysocki --- drivers/acpi/cppc_acpi.c | 56 +++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 55 insertions(+), 1 deletion(-) (limited to 'drivers/acpi/cppc_acpi.c') diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c index 8a0911a13599..8adac69dba3d 100644 --- a/drivers/acpi/cppc_acpi.c +++ b/drivers/acpi/cppc_acpi.c @@ -66,6 +66,7 @@ static u64 comm_base_addr; static int pcc_subspace_idx = -1; static bool pcc_channel_acquired; static ktime_t deadline; +static unsigned int pcc_mpar, pcc_mrtt; /* pcc mapped address + header size + offset within PCC subspace */ #define GET_PCC_VADDR(offs) (pcc_comm_addr + 0x8 + (offs)) @@ -85,6 +86,11 @@ static int check_pcc_chan(void) /* Retry in case the remote processor was too slow to catch up. */ while (!ktime_after(ktime_get(), next_deadline)) { + /* + * Per spec, prior to boot the PCC space wil be initialized by + * platform and should have set the command completion bit when + * PCC can be used by OSPM + */ if (readw_relaxed(&generic_comm_base->status) & PCC_CMD_COMPLETE) { ret = 0; break; @@ -104,6 +110,9 @@ static int send_pcc_cmd(u16 cmd) int ret = -EIO; struct acpi_pcct_shared_memory *generic_comm_base = (struct acpi_pcct_shared_memory *) pcc_comm_addr; + static ktime_t last_cmd_cmpl_time, last_mpar_reset; + static int mpar_count; + unsigned int time_delta; /* * For CMD_WRITE we know for a fact the caller should have checked @@ -115,6 +124,41 @@ static int send_pcc_cmd(u16 cmd) return ret; } + /* + * Handle the Minimum Request Turnaround Time(MRTT) + * "The minimum amount of time that OSPM must wait after the completion + * of a command before issuing the next command, in microseconds" + */ + if (pcc_mrtt) { + time_delta = ktime_us_delta(ktime_get(), last_cmd_cmpl_time); + if (pcc_mrtt > time_delta) + udelay(pcc_mrtt - time_delta); + } + + /* + * Handle the non-zero Maximum Periodic Access Rate(MPAR) + * "The maximum number of periodic requests that the subspace channel can + * support, reported in commands per minute. 0 indicates no limitation." + * + * This parameter should be ideally zero or large enough so that it can + * handle maximum number of requests that all the cores in the system can + * collectively generate. If it is not, we will follow the spec and just + * not send the request to the platform after hitting the MPAR limit in + * any 60s window + */ + if (pcc_mpar) { + if (mpar_count == 0) { + time_delta = ktime_ms_delta(ktime_get(), last_mpar_reset); + if (time_delta < 60 * MSEC_PER_SEC) { + pr_debug("PCC cmd not sent due to MPAR limit"); + return -EIO; + } + last_mpar_reset = ktime_get(); + mpar_count = pcc_mpar; + } + mpar_count--; + } + /* Write to the shared comm region. */ writew_relaxed(cmd, &generic_comm_base->command); @@ -135,9 +179,17 @@ static int send_pcc_cmd(u16 cmd) * because the actual write()s are done before coming here * and the next READ or WRITE will check if the channel * is busy/free at the entry of this call. + * + * If Minimum Request Turnaround Time is non-zero, we need + * to record the completion time of both READ and WRITE + * command for proper handling of MRTT, so we need to check + * for pcc_mrtt in addition to CMD_READ */ - if (cmd == CMD_READ) + if (cmd == CMD_READ || pcc_mrtt) { ret = check_pcc_chan(); + if (pcc_mrtt) + last_cmd_cmpl_time = ktime_get(); + } mbox_client_txdone(pcc_channel, ret); return ret; @@ -375,6 +427,8 @@ static int register_pcc_channel(int pcc_subspace_idx) */ usecs_lat = NUM_RETRIES * cppc_ss->latency; deadline = ns_to_ktime(usecs_lat * NSEC_PER_USEC); + pcc_mrtt = cppc_ss->min_turnaround_time; + pcc_mpar = cppc_ss->max_access_rate; pcc_comm_addr = acpi_os_ioremap(comm_base_addr, len); if (!pcc_comm_addr) { -- cgit v1.2.3