summaryrefslogtreecommitdiff
path: root/drivers/firmware
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/firmware')
-rw-r--r--drivers/firmware/arm_scmi/Kconfig12
-rw-r--r--drivers/firmware/arm_scmi/Makefile3
-rw-r--r--drivers/firmware/arm_scmi/driver.c281
-rw-r--r--drivers/firmware/arm_scmi/perf.c243
-rw-r--r--drivers/firmware/arm_scmi/powercap.c866
-rw-r--r--drivers/firmware/arm_scmi/protocols.h23
-rw-r--r--drivers/firmware/arm_scmi/scmi_power_control.c362
-rw-r--r--drivers/firmware/arm_scmi/system.c17
-rw-r--r--drivers/firmware/arm_scpi.c61
-rw-r--r--drivers/firmware/cirrus/cs_dsp.c107
-rw-r--r--drivers/firmware/efi/Kconfig14
-rw-r--r--drivers/firmware/efi/Makefile1
-rw-r--r--drivers/firmware/efi/efi-init.c1
-rw-r--r--drivers/firmware/efi/efi-pstore.c377
-rw-r--r--drivers/firmware/efi/efi.c127
-rw-r--r--drivers/firmware/efi/efibc.c76
-rw-r--r--drivers/firmware/efi/efivars.c671
-rw-r--r--drivers/firmware/efi/libstub/riscv-stub.c13
-rw-r--r--drivers/firmware/efi/memmap.c5
-rw-r--r--drivers/firmware/efi/reboot.c21
-rw-r--r--drivers/firmware/efi/vars.c1219
-rw-r--r--drivers/firmware/mtk-adsp-ipc.c36
-rw-r--r--drivers/firmware/qcom_scm-legacy.c4
-rw-r--r--drivers/firmware/qcom_scm.c71
-rw-r--r--drivers/firmware/stratix10-rsu.c129
-rw-r--r--drivers/firmware/stratix10-svc.c201
-rw-r--r--drivers/firmware/tegra/bpmp-debugfs.c10
-rw-r--r--drivers/firmware/tegra/bpmp.c6
-rw-r--r--drivers/firmware/xilinx/zynqmp.c16
29 files changed, 2503 insertions, 2470 deletions
diff --git a/drivers/firmware/arm_scmi/Kconfig b/drivers/firmware/arm_scmi/Kconfig
index 1e7b7fec97d9..a14f65444b35 100644
--- a/drivers/firmware/arm_scmi/Kconfig
+++ b/drivers/firmware/arm_scmi/Kconfig
@@ -149,4 +149,16 @@ config ARM_SCMI_POWER_DOMAIN
will be called scmi_pm_domain. Note this may needed early in boot
before rootfs may be available.
+config ARM_SCMI_POWER_CONTROL
+ tristate "SCMI system power control driver"
+ depends on ARM_SCMI_PROTOCOL || (COMPILE_TEST && OF)
+ help
+ This enables System Power control logic which binds system shutdown or
+ reboot actions to SCMI System Power notifications generated by SCP
+ firmware.
+
+ This driver can also be built as a module. If so, the module will be
+ called scmi_power_control. Note this may needed early in boot to catch
+ early shutdown/reboot SCMI requests.
+
endmenu
diff --git a/drivers/firmware/arm_scmi/Makefile b/drivers/firmware/arm_scmi/Makefile
index 8d4afadda38c..9ea86f8cc8f7 100644
--- a/drivers/firmware/arm_scmi/Makefile
+++ b/drivers/firmware/arm_scmi/Makefile
@@ -7,11 +7,12 @@ scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_SMC) += smc.o
scmi-transport-$(CONFIG_ARM_SCMI_HAVE_MSG) += msg.o
scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_VIRTIO) += virtio.o
scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_OPTEE) += optee.o
-scmi-protocols-y = base.o clock.o perf.o power.o reset.o sensors.o system.o voltage.o
+scmi-protocols-y = base.o clock.o perf.o power.o reset.o sensors.o system.o voltage.o powercap.o
scmi-module-objs := $(scmi-bus-y) $(scmi-driver-y) $(scmi-protocols-y) \
$(scmi-transport-y)
obj-$(CONFIG_ARM_SCMI_PROTOCOL) += scmi-module.o
obj-$(CONFIG_ARM_SCMI_POWER_DOMAIN) += scmi_pm_domain.o
+obj-$(CONFIG_ARM_SCMI_POWER_CONTROL) += scmi_power_control.o
ifeq ($(CONFIG_THUMB2_KERNEL)$(CONFIG_CC_IS_CLANG),yy)
# The use of R7 in the SMCCC conflicts with the compiler's use of R7 as a frame
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index 8b7ac6663d57..609ebedee9cb 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -19,6 +19,7 @@
#include <linux/export.h>
#include <linux/idr.h>
#include <linux/io.h>
+#include <linux/io-64-nonatomic-hi-lo.h>
#include <linux/kernel.h>
#include <linux/ktime.h>
#include <linux/hashtable.h>
@@ -60,6 +61,11 @@ static atomic_t transfer_last_id;
static DEFINE_IDR(scmi_requested_devices);
static DEFINE_MUTEX(scmi_requested_devices_mtx);
+/* Track globally the creation of SCMI SystemPower related devices */
+static bool scmi_syspower_registered;
+/* Protect access to scmi_syspower_registered */
+static DEFINE_MUTEX(scmi_syspower_mtx);
+
struct scmi_requested_dev {
const struct scmi_device_id *id_table;
struct list_head node;
@@ -660,6 +666,11 @@ static void scmi_handle_notification(struct scmi_chan_info *cinfo,
smp_store_mb(xfer->priv, priv);
info->desc->ops->fetch_notification(cinfo, info->desc->max_msg_size,
xfer);
+
+ trace_scmi_msg_dump(xfer->hdr.protocol_id, xfer->hdr.id, "NOTI",
+ xfer->hdr.seq, xfer->hdr.status,
+ xfer->rx.buf, xfer->rx.len);
+
scmi_notify(cinfo->handle, xfer->hdr.protocol_id,
xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts);
@@ -694,6 +705,12 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo,
smp_store_mb(xfer->priv, priv);
info->desc->ops->fetch_response(cinfo, xfer);
+ trace_scmi_msg_dump(xfer->hdr.protocol_id, xfer->hdr.id,
+ xfer->hdr.type == MSG_TYPE_DELAYED_RESP ?
+ "DLYD" : "RESP",
+ xfer->hdr.seq, xfer->hdr.status,
+ xfer->rx.buf, xfer->rx.len);
+
trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
xfer->hdr.protocol_id, xfer->hdr.seq,
xfer->hdr.type);
@@ -827,6 +844,12 @@ static int scmi_wait_for_message_response(struct scmi_chan_info *cinfo,
xfer->state = SCMI_XFER_RESP_OK;
}
spin_unlock_irqrestore(&xfer->lock, flags);
+
+ /* Trace polled replies. */
+ trace_scmi_msg_dump(xfer->hdr.protocol_id, xfer->hdr.id,
+ "RESP",
+ xfer->hdr.seq, xfer->hdr.status,
+ xfer->rx.buf, xfer->rx.len);
}
} else {
/* And we wait for the response. */
@@ -903,6 +926,10 @@ static int do_xfer(const struct scmi_protocol_handle *ph,
return ret;
}
+ trace_scmi_msg_dump(xfer->hdr.protocol_id, xfer->hdr.id, "CMND",
+ xfer->hdr.seq, xfer->hdr.status,
+ xfer->tx.buf, xfer->tx.len);
+
ret = scmi_wait_for_message_response(cinfo, xfer);
if (!ret && xfer->hdr.status)
ret = scmi_to_linux_errno(xfer->hdr.status);
@@ -1259,10 +1286,174 @@ out:
return ret;
}
+struct scmi_msg_get_fc_info {
+ __le32 domain;
+ __le32 message_id;
+};
+
+struct scmi_msg_resp_desc_fc {
+ __le32 attr;
+#define SUPPORTS_DOORBELL(x) ((x) & BIT(0))
+#define DOORBELL_REG_WIDTH(x) FIELD_GET(GENMASK(2, 1), (x))
+ __le32 rate_limit;
+ __le32 chan_addr_low;
+ __le32 chan_addr_high;
+ __le32 chan_size;
+ __le32 db_addr_low;
+ __le32 db_addr_high;
+ __le32 db_set_lmask;
+ __le32 db_set_hmask;
+ __le32 db_preserve_lmask;
+ __le32 db_preserve_hmask;
+};
+
+static void
+scmi_common_fastchannel_init(const struct scmi_protocol_handle *ph,
+ u8 describe_id, u32 message_id, u32 valid_size,
+ u32 domain, void __iomem **p_addr,
+ struct scmi_fc_db_info **p_db)
+{
+ int ret;
+ u32 flags;
+ u64 phys_addr;
+ u8 size;
+ void __iomem *addr;
+ struct scmi_xfer *t;
+ struct scmi_fc_db_info *db = NULL;
+ struct scmi_msg_get_fc_info *info;
+ struct scmi_msg_resp_desc_fc *resp;
+ const struct scmi_protocol_instance *pi = ph_to_pi(ph);
+
+ if (!p_addr) {
+ ret = -EINVAL;
+ goto err_out;
+ }
+
+ ret = ph->xops->xfer_get_init(ph, describe_id,
+ sizeof(*info), sizeof(*resp), &t);
+ if (ret)
+ goto err_out;
+
+ info = t->tx.buf;
+ info->domain = cpu_to_le32(domain);
+ info->message_id = cpu_to_le32(message_id);
+
+ /*
+ * Bail out on error leaving fc_info addresses zeroed; this includes
+ * the case in which the requested domain/message_id does NOT support
+ * fastchannels at all.
+ */
+ ret = ph->xops->do_xfer(ph, t);
+ if (ret)
+ goto err_xfer;
+
+ resp = t->rx.buf;
+ flags = le32_to_cpu(resp->attr);
+ size = le32_to_cpu(resp->chan_size);
+ if (size != valid_size) {
+ ret = -EINVAL;
+ goto err_xfer;
+ }
+
+ phys_addr = le32_to_cpu(resp->chan_addr_low);
+ phys_addr |= (u64)le32_to_cpu(resp->chan_addr_high) << 32;
+ addr = devm_ioremap(ph->dev, phys_addr, size);
+ if (!addr) {
+ ret = -EADDRNOTAVAIL;
+ goto err_xfer;
+ }
+
+ *p_addr = addr;
+
+ if (p_db && SUPPORTS_DOORBELL(flags)) {
+ db = devm_kzalloc(ph->dev, sizeof(*db), GFP_KERNEL);
+ if (!db) {
+ ret = -ENOMEM;
+ goto err_db;
+ }
+
+ size = 1 << DOORBELL_REG_WIDTH(flags);
+ phys_addr = le32_to_cpu(resp->db_addr_low);
+ phys_addr |= (u64)le32_to_cpu(resp->db_addr_high) << 32;
+ addr = devm_ioremap(ph->dev, phys_addr, size);
+ if (!addr) {
+ ret = -EADDRNOTAVAIL;
+ goto err_db_mem;
+ }
+
+ db->addr = addr;
+ db->width = size;
+ db->set = le32_to_cpu(resp->db_set_lmask);
+ db->set |= (u64)le32_to_cpu(resp->db_set_hmask) << 32;
+ db->mask = le32_to_cpu(resp->db_preserve_lmask);
+ db->mask |= (u64)le32_to_cpu(resp->db_preserve_hmask) << 32;
+
+ *p_db = db;
+ }
+
+ ph->xops->xfer_put(ph, t);
+
+ dev_dbg(ph->dev,
+ "Using valid FC for protocol %X [MSG_ID:%u / RES_ID:%u]\n",
+ pi->proto->id, message_id, domain);
+
+ return;
+
+err_db_mem:
+ devm_kfree(ph->dev, db);
+
+err_db:
+ *p_addr = NULL;
+
+err_xfer:
+ ph->xops->xfer_put(ph, t);
+
+err_out:
+ dev_warn(ph->dev,
+ "Failed to get FC for protocol %X [MSG_ID:%u / RES_ID:%u] - ret:%d. Using regular messaging.\n",
+ pi->proto->id, message_id, domain, ret);
+}
+
+#define SCMI_PROTO_FC_RING_DB(w) \
+do { \
+ u##w val = 0; \
+ \
+ if (db->mask) \
+ val = ioread##w(db->addr) & db->mask; \
+ iowrite##w((u##w)db->set | val, db->addr); \
+} while (0)
+
+static void scmi_common_fastchannel_db_ring(struct scmi_fc_db_info *db)
+{
+ if (!db || !db->addr)
+ return;
+
+ if (db->width == 1)
+ SCMI_PROTO_FC_RING_DB(8);
+ else if (db->width == 2)
+ SCMI_PROTO_FC_RING_DB(16);
+ else if (db->width == 4)
+ SCMI_PROTO_FC_RING_DB(32);
+ else /* db->width == 8 */
+#ifdef CONFIG_64BIT
+ SCMI_PROTO_FC_RING_DB(64);
+#else
+ {
+ u64 val = 0;
+
+ if (db->mask)
+ val = ioread64_hi_lo(db->addr) & db->mask;
+ iowrite64_hi_lo(db->set | val, db->addr);
+ }
+#endif
+}
+
static const struct scmi_proto_helpers_ops helpers_ops = {
.extended_name_get = scmi_common_extended_name_get,
.iter_response_init = scmi_iterator_init,
.iter_response_run = scmi_iterator_run,
+ .fastchannel_init = scmi_common_fastchannel_init,
+ .fastchannel_db_ring = scmi_common_fastchannel_db_ring,
};
/**
@@ -1497,6 +1688,30 @@ static void scmi_devm_release_protocol(struct device *dev, void *res)
scmi_protocol_release(dres->handle, dres->protocol_id);
}
+static struct scmi_protocol_instance __must_check *
+scmi_devres_protocol_instance_get(struct scmi_device *sdev, u8 protocol_id)
+{
+ struct scmi_protocol_instance *pi;
+ struct scmi_protocol_devres *dres;
+
+ dres = devres_alloc(scmi_devm_release_protocol,
+ sizeof(*dres), GFP_KERNEL);
+ if (!dres)
+ return ERR_PTR(-ENOMEM);
+
+ pi = scmi_get_protocol_instance(sdev->handle, protocol_id);
+ if (IS_ERR(pi)) {
+ devres_free(dres);
+ return pi;
+ }
+
+ dres->handle = sdev->handle;
+ dres->protocol_id = protocol_id;
+ devres_add(&sdev->dev, dres);
+
+ return pi;
+}
+
/**
* scmi_devm_protocol_get - Devres managed get protocol operations and handle
* @sdev: A reference to an scmi_device whose embedded struct device is to
@@ -1520,32 +1735,47 @@ scmi_devm_protocol_get(struct scmi_device *sdev, u8 protocol_id,
struct scmi_protocol_handle **ph)
{
struct scmi_protocol_instance *pi;
- struct scmi_protocol_devres *dres;
- struct scmi_handle *handle = sdev->handle;
if (!ph)
return ERR_PTR(-EINVAL);
- dres = devres_alloc(scmi_devm_release_protocol,
- sizeof(*dres), GFP_KERNEL);
- if (!dres)
- return ERR_PTR(-ENOMEM);
-
- pi = scmi_get_protocol_instance(handle, protocol_id);
- if (IS_ERR(pi)) {
- devres_free(dres);
+ pi = scmi_devres_protocol_instance_get(sdev, protocol_id);
+ if (IS_ERR(pi))
return pi;
- }
-
- dres->handle = handle;
- dres->protocol_id = protocol_id;
- devres_add(&sdev->dev, dres);
*ph = &pi->ph;
return pi->proto->ops;
}
+/**
+ * scmi_devm_protocol_acquire - Devres managed helper to get hold of a protocol
+ * @sdev: A reference to an scmi_device whose embedded struct device is to
+ * be used for devres accounting.
+ * @protocol_id: The protocol being requested.
+ *
+ * Get hold of a protocol accounting for its usage, possibly triggering its
+ * initialization but without getting access to its protocol specific operations
+ * and handle.
+ *
+ * Being a devres based managed method, protocol hold will be automatically
+ * released, and possibly de-initialized on last user, once the SCMI driver
+ * owning the scmi_device is unbound from it.
+ *
+ * Return: 0 on SUCCESS
+ */
+static int __must_check scmi_devm_protocol_acquire(struct scmi_device *sdev,
+ u8 protocol_id)
+{
+ struct scmi_protocol_instance *pi;
+
+ pi = scmi_devres_protocol_instance_get(sdev, protocol_id);
+ if (IS_ERR(pi))
+ return PTR_ERR(pi);
+
+ return 0;
+}
+
static int scmi_devm_protocol_match(struct device *dev, void *res, void *data)
{
struct scmi_protocol_devres *dres = res;
@@ -1849,21 +2079,39 @@ scmi_get_protocol_device(struct device_node *np, struct scmi_info *info,
if (sdev)
return sdev;
+ mutex_lock(&scmi_syspower_mtx);
+ if (prot_id == SCMI_PROTOCOL_SYSTEM && scmi_syspower_registered) {
+ dev_warn(info->dev,
+ "SCMI SystemPower protocol device must be unique !\n");
+ mutex_unlock(&scmi_syspower_mtx);
+
+ return NULL;
+ }
+
pr_debug("Creating SCMI device (%s) for protocol %x\n", name, prot_id);
sdev = scmi_device_create(np, info->dev, prot_id, name);
if (!sdev) {
dev_err(info->dev, "failed to create %d protocol device\n",
prot_id);
+ mutex_unlock(&scmi_syspower_mtx);
+
return NULL;
}
if (scmi_txrx_setup(info, &sdev->dev, prot_id)) {
dev_err(&sdev->dev, "failed to setup transport\n");
scmi_device_destroy(sdev);
+ mutex_unlock(&scmi_syspower_mtx);
+
return NULL;
}
+ if (prot_id == SCMI_PROTOCOL_SYSTEM)
+ scmi_syspower_registered = true;
+
+ mutex_unlock(&scmi_syspower_mtx);
+
return sdev;
}
@@ -2132,6 +2380,7 @@ static int scmi_probe(struct platform_device *pdev)
handle = &info->handle;
handle->dev = info->dev;
handle->version = &info->version;
+ handle->devm_protocol_acquire = scmi_devm_protocol_acquire;
handle->devm_protocol_get = scmi_devm_protocol_get;
handle->devm_protocol_put = scmi_devm_protocol_put;
@@ -2401,6 +2650,7 @@ static int __init scmi_driver_init(void)
scmi_sensors_register();
scmi_voltage_register();
scmi_system_register();
+ scmi_powercap_register();
return platform_driver_register(&scmi_driver);
}
@@ -2417,6 +2667,7 @@ static void __exit scmi_driver_exit(void)
scmi_sensors_unregister();
scmi_voltage_unregister();
scmi_system_unregister();
+ scmi_powercap_unregister();
scmi_bus_exit();
diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c
index bbb0331801ff..ecf5c4de851b 100644
--- a/drivers/firmware/arm_scmi/perf.c
+++ b/drivers/firmware/arm_scmi/perf.c
@@ -10,13 +10,14 @@
#include <linux/bits.h>
#include <linux/of.h>
#include <linux/io.h>
-#include <linux/io-64-nonatomic-hi-lo.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
#include <linux/scmi_protocol.h>
#include <linux/sort.h>
+#include <trace/events/scmi.h>
+
#include "protocols.h"
#include "notify.h"
@@ -35,6 +36,12 @@ enum scmi_performance_protocol_cmd {
PERF_DOMAIN_NAME_GET = 0xc,
};
+enum {
+ PERF_FC_LEVEL,
+ PERF_FC_LIMIT,
+ PERF_FC_MAX,
+};
+
struct scmi_opp {
u32 perf;
u32 power;
@@ -115,43 +122,6 @@ struct scmi_msg_resp_perf_describe_levels {
} opp[];
};
-struct scmi_perf_get_fc_info {
- __le32 domain;
- __le32 message_id;
-};
-
-struct scmi_msg_resp_perf_desc_fc {
- __le32 attr;
-#define SUPPORTS_DOORBELL(x) ((x) & BIT(0))
-#define DOORBELL_REG_WIDTH(x) FIELD_GET(GENMASK(2, 1), (x))
- __le32 rate_limit;
- __le32 chan_addr_low;
- __le32 chan_addr_high;
- __le32 chan_size;
- __le32 db_addr_low;
- __le32 db_addr_high;
- __le32 db_set_lmask;
- __le32 db_set_hmask;
- __le32 db_preserve_lmask;
- __le32 db_preserve_hmask;
-};
-
-struct scmi_fc_db_info {
- int width;
- u64 set;
- u64 mask;
- void __iomem *addr;
-};
-
-struct scmi_fc_info {
- void __iomem *level_set_addr;
- void __iomem *limit_set_addr;
- void __iomem *level_get_addr;
- void __iomem *limit_get_addr;
- struct scmi_fc_db_info *level_set_db;
- struct scmi_fc_db_info *limit_set_db;
-};
-
struct perf_dom_info {
bool set_limits;
bool set_perf;
@@ -170,8 +140,7 @@ struct perf_dom_info {
struct scmi_perf_info {
u32 version;
int num_domains;
- bool power_scale_mw;
- bool power_scale_uw;
+ enum scmi_power_scale power_scale;
u64 stats_addr;
u32 stats_size;
struct perf_dom_info *dom_info;
@@ -201,9 +170,13 @@ static int scmi_perf_attributes_get(const struct scmi_protocol_handle *ph,
u16 flags = le16_to_cpu(attr->flags);
pi->num_domains = le16_to_cpu(attr->num_domains);
- pi->power_scale_mw = POWER_SCALE_IN_MILLIWATT(flags);
+
+ if (POWER_SCALE_IN_MILLIWATT(flags))
+ pi->power_scale = SCMI_POWER_MILLIWATTS;
if (PROTOCOL_REV_MAJOR(pi->version) >= 0x3)
- pi->power_scale_uw = POWER_SCALE_IN_MICROWATT(flags);
+ if (POWER_SCALE_IN_MICROWATT(flags))
+ pi->power_scale = SCMI_POWER_MICROWATTS;
+
pi->stats_addr = le32_to_cpu(attr->stats_addr_low) |
(u64)le32_to_cpu(attr->stats_addr_high) << 32;
pi->stats_size = le32_to_cpu(attr->stats_size);
@@ -360,40 +333,6 @@ scmi_perf_describe_levels_get(const struct scmi_protocol_handle *ph, u32 domain,
return ret;
}
-#define SCMI_PERF_FC_RING_DB(w) \
-do { \
- u##w val = 0; \
- \
- if (db->mask) \
- val = ioread##w(db->addr) & db->mask; \
- iowrite##w((u##w)db->set | val, db->addr); \
-} while (0)
-
-static void scmi_perf_fc_ring_db(struct scmi_fc_db_info *db)
-{
- if (!db || !db->addr)
- return;
-
- if (db->width == 1)
- SCMI_PERF_FC_RING_DB(8);
- else if (db->width == 2)
- SCMI_PERF_FC_RING_DB(16);
- else if (db->width == 4)
- SCMI_PERF_FC_RING_DB(32);
- else /* db->width == 8 */
-#ifdef CONFIG_64BIT
- SCMI_PERF_FC_RING_DB(64);
-#else
- {
- u64 val = 0;
-
- if (db->mask)
- val = ioread64_hi_lo(db->addr) & db->mask;
- iowrite64_hi_lo(db->set | val, db->addr);
- }
-#endif
-}
-
static int scmi_perf_mb_limits_set(const struct scmi_protocol_handle *ph,
u32 domain, u32 max_perf, u32 min_perf)
{
@@ -426,10 +365,14 @@ static int scmi_perf_limits_set(const struct scmi_protocol_handle *ph,
if (PROTOCOL_REV_MAJOR(pi->version) >= 0x3 && !max_perf && !min_perf)
return -EINVAL;
- if (dom->fc_info && dom->fc_info->limit_set_addr) {
- iowrite32(max_perf, dom->fc_info->limit_set_addr);
- iowrite32(min_perf, dom->fc_info->limit_set_addr + 4);
- scmi_perf_fc_ring_db(dom->fc_info->limit_set_db);
+ if (dom->fc_info && dom->fc_info[PERF_FC_LIMIT].set_addr) {
+ struct scmi_fc_info *fci = &dom->fc_info[PERF_FC_LIMIT];
+
+ trace_scmi_fc_call(SCMI_PROTOCOL_PERF, PERF_LIMITS_SET,
+ domain, min_perf, max_perf);
+ iowrite32(max_perf, fci->set_addr);
+ iowrite32(min_perf, fci->set_addr + 4);
+ ph->hops->fastchannel_db_ring(fci->set_db);
return 0;
}
@@ -468,9 +411,13 @@ static int scmi_perf_limits_get(const struct scmi_protocol_handle *ph,
struct scmi_perf_info *pi = ph->get_priv(ph);
struct perf_dom_info *dom = pi->dom_info + domain;
- if (dom->fc_info && dom->fc_info->limit_get_addr) {
- *max_perf = ioread32(dom->fc_info->limit_get_addr);
- *min_perf = ioread32(dom->fc_info->limit_get_addr + 4);
+ if (dom->fc_info && dom->fc_info[PERF_FC_LIMIT].get_addr) {
+ struct scmi_fc_info *fci = &dom->fc_info[PERF_FC_LIMIT];
+
+ *max_perf = ioread32(fci->get_addr);
+ *min_perf = ioread32(fci->get_addr + 4);
+ trace_scmi_fc_call(SCMI_PROTOCOL_PERF, PERF_LIMITS_GET,
+ domain, *min_perf, *max_perf);
return 0;
}
@@ -505,9 +452,13 @@ static int scmi_perf_level_set(const struct scmi_protocol_handle *ph,
struct scmi_perf_info *pi = ph->get_priv(ph);
struct perf_dom_info *dom = pi->dom_info + domain;
- if (dom->fc_info && dom->fc_info->level_set_addr) {
- iowrite32(level, dom->fc_info->level_set_addr);
- scmi_perf_fc_ring_db(dom->fc_info->level_set_db);
+ if (dom->fc_info && dom->fc_info[PERF_FC_LEVEL].set_addr) {
+ struct scmi_fc_info *fci = &dom->fc_info[PERF_FC_LEVEL];
+
+ trace_scmi_fc_call(SCMI_PROTOCOL_PERF, PERF_LEVEL_SET,
+ domain, level, 0);
+ iowrite32(level, fci->set_addr);
+ ph->hops->fastchannel_db_ring(fci->set_db);
return 0;
}
@@ -542,8 +493,10 @@ static int scmi_perf_level_get(const struct scmi_protocol_handle *ph,
struct scmi_perf_info *pi = ph->get_priv(ph);
struct perf_dom_info *dom = pi->dom_info + domain;
- if (dom->fc_info && dom->fc_info->level_get_addr) {
- *level = ioread32(dom->fc_info->level_get_addr);
+ if (dom->fc_info && dom->fc_info[PERF_FC_LEVEL].get_addr) {
+ *level = ioread32(dom->fc_info[PERF_FC_LEVEL].get_addr);
+ trace_scmi_fc_call(SCMI_PROTOCOL_PERF, PERF_LEVEL_GET,
+ domain, *level, 0);
return 0;
}
@@ -572,100 +525,33 @@ static int scmi_perf_level_limits_notify(const struct scmi_protocol_handle *ph,
return ret;
}
-static bool scmi_perf_fc_size_is_valid(u32 msg, u32 size)
-{
- if ((msg == PERF_LEVEL_GET || msg == PERF_LEVEL_SET) && size == 4)
- return true;
- if ((msg == PERF_LIMITS_GET || msg == PERF_LIMITS_SET) && size == 8)
- return true;
- return false;
-}
-
-static void
-scmi_perf_domain_desc_fc(const struct scmi_protocol_handle *ph, u32 domain,
- u32 message_id, void __iomem **p_addr,
- struct scmi_fc_db_info **p_db)
-{
- int ret;
- u32 flags;
- u64 phys_addr;
- u8 size;
- void __iomem *addr;
- struct scmi_xfer *t;
- struct scmi_fc_db_info *db;
- struct scmi_perf_get_fc_info *info;
- struct scmi_msg_resp_perf_desc_fc *resp;
-
- if (!p_addr)
- return;
-
- ret = ph->xops->xfer_get_init(ph, PERF_DESCRIBE_FASTCHANNEL,
- sizeof(*info), sizeof(*resp), &t);
- if (ret)
- return;
-
- info = t->tx.buf;
- info->domain = cpu_to_le32(domain);
- info->message_id = cpu_to_le32(message_id);
-
- ret = ph->xops->do_xfer(ph, t);
- if (ret)
- goto err_xfer;
-
- resp = t->rx.buf;
- flags = le32_to_cpu(resp->attr);
- size = le32_to_cpu(resp->chan_size);
- if (!scmi_perf_fc_size_is_valid(message_id, size))
- goto err_xfer;
-
- phys_addr = le32_to_cpu(resp->chan_addr_low);
- phys_addr |= (u64)le32_to_cpu(resp->chan_addr_high) << 32;
- addr = devm_ioremap(ph->dev, phys_addr, size);
- if (!addr)
- goto err_xfer;
- *p_addr = addr;
-
- if (p_db && SUPPORTS_DOORBELL(flags)) {
- db = devm_kzalloc(ph->dev, sizeof(*db), GFP_KERNEL);
- if (!db)
- goto err_xfer;
-
- size = 1 << DOORBELL_REG_WIDTH(flags);
- phys_addr = le32_to_cpu(resp->db_addr_low);
- phys_addr |= (u64)le32_to_cpu(resp->db_addr_high) << 32;
- addr = devm_ioremap(ph->dev, phys_addr, size);
- if (!addr)
- goto err_xfer;
-
- db->addr = addr;
- db->width = size;
- db->set = le32_to_cpu(resp->db_set_lmask);
- db->set |= (u64)le32_to_cpu(resp->db_set_hmask) << 32;
- db->mask = le32_to_cpu(resp->db_preserve_lmask);
- db->mask |= (u64)le32_to_cpu(resp->db_preserve_hmask) << 32;
- *p_db = db;
- }
-err_xfer:
- ph->xops->xfer_put(ph, t);
-}
-
static void scmi_perf_domain_init_fc(const struct scmi_protocol_handle *ph,
u32 domain, struct scmi_fc_info **p_fc)
{
struct scmi_fc_info *fc;
- fc = devm_kzalloc(ph->dev, sizeof(*fc), GFP_KERNEL);
+ fc = devm_kcalloc(ph->dev, PERF_FC_MAX, sizeof(*fc), GFP_KERNEL);
if (!fc)
return;
- scmi_perf_domain_desc_fc(ph, domain, PERF_LEVEL_SET,
- &fc->level_set_addr, &fc->level_set_db);
- scmi_perf_domain_desc_fc(ph, domain, PERF_LEVEL_GET,
- &fc->level_get_addr, NULL);
- scmi_perf_domain_desc_fc(ph, domain, PERF_LIMITS_SET,
- &fc->limit_set_addr, &fc->limit_set_db);
- scmi_perf_domain_desc_fc(ph, domain, PERF_LIMITS_GET,
- &fc->limit_get_addr, NULL);
+ ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL,
+ PERF_LEVEL_SET, 4, domain,
+ &fc[PERF_FC_LEVEL].set_addr,
+ &fc[PERF_FC_LEVEL].set_db);
+
+ ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL,
+ PERF_LEVEL_GET, 4, domain,
+ &fc[PERF_FC_LEVEL].get_addr, NULL);
+
+ ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL,
+ PERF_LIMITS_SET, 8, domain,
+ &fc[PERF_FC_LIMIT].set_addr,
+ &fc[PERF_FC_LIMIT].set_db);
+
+ ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL,
+ PERF_LIMITS_GET, 8, domain,
+ &fc[PERF_FC_LIMIT].get_addr, NULL);
+
*p_fc = fc;
}
@@ -789,14 +675,15 @@ static bool scmi_fast_switch_possible(const struct scmi_protocol_handle *ph,
dom = pi->dom_info + scmi_dev_domain_id(dev);
- return dom->fc_info && dom->fc_info->level_set_addr;
+ return dom->fc_info && dom->fc_info[PERF_FC_LEVEL].set_addr;
}
-static bool scmi_power_scale_mw_get(const struct scmi_protocol_handle *ph)
+static enum scmi_power_scale
+scmi_power_scale_get(const struct scmi_protocol_handle *ph)
{
struct scmi_perf_info *pi = ph->get_priv(ph);
- return pi->power_scale_mw;
+ return pi->power_scale;
}
static const struct scmi_perf_proto_ops perf_proto_ops = {
@@ -811,7 +698,7 @@ static const struct scmi_perf_proto_ops perf_proto_ops = {
.freq_get = scmi_dvfs_freq_get,
.est_power_get = scmi_dvfs_est_power_get,
.fast_switch_possible = scmi_fast_switch_possible,
- .power_scale_mw_get = scmi_power_scale_mw_get,
+ .power_scale_get = scmi_power_scale_get,
};
static int scmi_perf_set_notify_enabled(const struct scmi_protocol_handle *ph,
diff --git a/drivers/firmware/arm_scmi/powercap.c b/drivers/firmware/arm_scmi/powercap.c
new file mode 100644
index 000000000000..83b90bde755c
--- /dev/null
+++ b/drivers/firmware/arm_scmi/powercap.c
@@ -0,0 +1,866 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Powercap Protocol
+ *
+ * Copyright (C) 2022 ARM Ltd.
+ */
+
+#define pr_fmt(fmt) "SCMI Notifications POWERCAP - " fmt
+
+#include <linux/bitfield.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/scmi_protocol.h>
+
+#include <trace/events/scmi.h>
+
+#include "protocols.h"
+#include "notify.h"
+
+enum scmi_powercap_protocol_cmd {
+ POWERCAP_DOMAIN_ATTRIBUTES = 0x3,
+ POWERCAP_CAP_GET = 0x4,
+ POWERCAP_CAP_SET = 0x5,
+ POWERCAP_PAI_GET = 0x6,
+ POWERCAP_PAI_SET = 0x7,
+ POWERCAP_DOMAIN_NAME_GET = 0x8,
+ POWERCAP_MEASUREMENTS_GET = 0x9,
+ POWERCAP_CAP_NOTIFY = 0xa,
+ POWERCAP_MEASUREMENTS_NOTIFY = 0xb,
+ POWERCAP_DESCRIBE_FASTCHANNEL = 0xc,
+};
+
+enum {
+ POWERCAP_FC_CAP,
+ POWERCAP_FC_PAI,
+ POWERCAP_FC_MAX,
+};
+
+struct scmi_msg_resp_powercap_domain_attributes {
+ __le32 attributes;
+#define SUPPORTS_POWERCAP_CAP_CHANGE_NOTIFY(x) ((x) & BIT(31))
+#define SUPPORTS_POWERCAP_MEASUREMENTS_CHANGE_NOTIFY(x) ((x) & BIT(30))
+#define SUPPORTS_ASYNC_POWERCAP_CAP_SET(x) ((x) & BIT(29))
+#define SUPPORTS_EXTENDED_NAMES(x) ((x) & BIT(28))
+#define SUPPORTS_POWERCAP_CAP_CONFIGURATION(x) ((x) & BIT(27))
+#define SUPPORTS_POWERCAP_MONITORING(x) ((x) & BIT(26))
+#define SUPPORTS_POWERCAP_PAI_CONFIGURATION(x) ((x) & BIT(25))
+#define SUPPORTS_POWERCAP_FASTCHANNELS(x) ((x) & BIT(22))
+#define POWERCAP_POWER_UNIT(x) \
+ (FIELD_GET(GENMASK(24, 23), (x)))
+#define SUPPORTS_POWER_UNITS_MW(x) \
+ (POWERCAP_POWER_UNIT(x) == 0x2)
+#define SUPPORTS_POWER_UNITS_UW(x) \
+ (POWERCAP_POWER_UNIT(x) == 0x1)
+ u8 name[SCMI_SHORT_NAME_MAX_SIZE];
+ __le32 min_pai;
+ __le32 max_pai;
+ __le32 pai_step;
+ __le32 min_power_cap;
+ __le32 max_power_cap;
+ __le32 power_cap_step;
+ __le32 sustainable_power;
+ __le32 accuracy;
+ __le32 parent_id;
+};
+
+struct scmi_msg_powercap_set_cap_or_pai {
+ __le32 domain;
+ __le32 flags;
+#define CAP_SET_ASYNC BIT(1)
+#define CAP_SET_IGNORE_DRESP BIT(0)
+ __le32 value;
+};
+
+struct scmi_msg_resp_powercap_cap_set_complete {
+ __le32 domain;
+ __le32 power_cap;
+};
+
+struct scmi_msg_resp_powercap_meas_get {
+ __le32 power;
+ __le32 pai;
+};
+
+struct scmi_msg_powercap_notify_cap {
+ __le32 domain;
+ __le32 notify_enable;
+};
+
+struct scmi_msg_powercap_notify_thresh {
+ __le32 domain;
+ __le32 notify_enable;
+ __le32 power_thresh_low;
+ __le32 power_thresh_high;
+};
+
+struct scmi_powercap_cap_changed_notify_payld {
+ __le32 agent_id;
+ __le32 domain_id;
+ __le32 power_cap;
+ __le32 pai;
+};
+
+struct scmi_powercap_meas_changed_notify_payld {
+ __le32 agent_id;
+ __le32 domain_id;
+ __le32 power;
+};
+
+struct scmi_powercap_state {
+ bool meas_notif_enabled;
+ u64 thresholds;
+#define THRESH_LOW(p, id) \
+ (lower_32_bits((p)->states[(id)].thresholds))
+#define THRESH_HIGH(p, id) \
+ (upper_32_bits((p)->states[(id)].thresholds))
+};
+
+struct powercap_info {
+ u32 version;
+ int num_domains;
+ struct scmi_powercap_state *states;
+ struct scmi_powercap_info *powercaps;
+};
+
+static enum scmi_powercap_protocol_cmd evt_2_cmd[] = {
+ POWERCAP_CAP_NOTIFY,
+ POWERCAP_MEASUREMENTS_NOTIFY,
+};
+
+static int scmi_powercap_notify(const struct scmi_protocol_handle *ph,
+ u32 domain, int message_id, bool enable);
+
+static int
+scmi_powercap_attributes_get(const struct scmi_protocol_handle *ph,
+ struct powercap_info *pi)
+{
+ int ret;
+ struct scmi_xfer *t;
+
+ ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES, 0,
+ sizeof(u32), &t);
+ if (ret)
+ return ret;
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ u32 attributes;
+
+ attributes = get_unaligned_le32(t->rx.buf);
+ pi->num_domains = FIELD_GET(GENMASK(15, 0), attributes);
+ }
+
+ ph->xops->xfer_put(ph, t);
+ return ret;
+}
+
+static inline int
+scmi_powercap_validate(unsigned int min_val, unsigned int max_val,
+ unsigned int step_val, bool configurable)
+{
+ if (!min_val || !max_val)
+ return -EPROTO;
+
+ if ((configurable && min_val == max_val) ||
+ (!configurable && min_val != max_val))
+ return -EPROTO;
+
+ if (min_val != max_val && !step_val)
+ return -EPROTO;
+
+ return 0;
+}
+
+static int
+scmi_powercap_domain_attributes_get(const struct scmi_protocol_handle *ph,
+ struct powercap_info *pinfo, u32 domain)
+{
+ int ret;
+ u32 flags;
+ struct scmi_xfer *t;
+ struct scmi_powercap_info *dom_info = pinfo->powercaps + domain;
+ struct scmi_msg_resp_powercap_domain_attributes *resp;
+
+ ret = ph->xops->xfer_get_init(ph, POWERCAP_DOMAIN_ATTRIBUTES,
+ sizeof(domain), sizeof(*resp), &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(domain, t->tx.buf);
+ resp = t->rx.buf;
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ flags = le32_to_cpu(resp->attributes);
+
+ dom_info->id = domain;
+ dom_info->notify_powercap_cap_change =
+ SUPPORTS_POWERCAP_CAP_CHANGE_NOTIFY(flags);
+ dom_info->notify_powercap_measurement_change =
+ SUPPORTS_POWERCAP_MEASUREMENTS_CHANGE_NOTIFY(flags);
+ dom_info->async_powercap_cap_set =
+ SUPPORTS_ASYNC_POWERCAP_CAP_SET(flags);
+ dom_info->powercap_cap_config =
+ SUPPORTS_POWERCAP_CAP_CONFIGURATION(flags);
+ dom_info->powercap_monitoring =
+ SUPPORTS_POWERCAP_MONITORING(flags);
+ dom_info->powercap_pai_config =
+ SUPPORTS_POWERCAP_PAI_CONFIGURATION(flags);
+ dom_info->powercap_scale_mw =
+ SUPPORTS_POWER_UNITS_MW(flags);
+ dom_info->powercap_scale_uw =
+ SUPPORTS_POWER_UNITS_UW(flags);
+ dom_info->fastchannels =
+ SUPPORTS_POWERCAP_FASTCHANNELS(flags);
+
+ strscpy(dom_info->name, resp->name, SCMI_SHORT_NAME_MAX_SIZE);
+
+ dom_info->min_pai = le32_to_cpu(resp->min_pai);
+ dom_info->max_pai = le32_to_cpu(resp->max_pai);
+ dom_info->pai_step = le32_to_cpu(resp->pai_step);
+ ret = scmi_powercap_validate(dom_info->min_pai,
+ dom_info->max_pai,
+ dom_info->pai_step,
+ dom_info->powercap_pai_config);
+ if (ret) {
+ dev_err(ph->dev,
+ "Platform reported inconsistent PAI config for domain %d - %s\n",
+ dom_info->id, dom_info->name);
+ goto clean;
+ }
+
+ dom_info->min_power_cap = le32_to_cpu(resp->min_power_cap);
+ dom_info->max_power_cap = le32_to_cpu(resp->max_power_cap);
+ dom_info->power_cap_step = le32_to_cpu(resp->power_cap_step);
+ ret = scmi_powercap_validate(dom_info->min_power_cap,
+ dom_info->max_power_cap,
+ dom_info->power_cap_step,
+ dom_info->powercap_cap_config);
+ if (ret) {
+ dev_err(ph->dev,
+ "Platform reported inconsistent CAP config for domain %d - %s\n",
+ dom_info->id, dom_info->name);
+ goto clean;
+ }
+
+ dom_info->sustainable_power =
+ le32_to_cpu(resp->sustainable_power);
+ dom_info->accuracy = le32_to_cpu(resp->accuracy);
+
+ dom_info->parent_id = le32_to_cpu(resp->parent_id);
+ if (dom_info->parent_id != SCMI_POWERCAP_ROOT_ZONE_ID &&
+ (dom_info->parent_id >= pinfo->num_domains ||
+ dom_info->parent_id == dom_info->id)) {
+ dev_err(ph->dev,
+ "Platform reported inconsistent parent ID for domain %d - %s\n",
+ dom_info->id, dom_info->name);
+ ret = -ENODEV;
+ }
+ }
+
+clean:
+ ph->xops->xfer_put(ph, t);
+
+ /*
+ * If supported overwrite short name with the extended one;
+ * on error just carry on and use already provided short name.
+ */
+ if (!ret && SUPPORTS_EXTENDED_NAMES(flags))
+ ph->hops->extended_name_get(ph, POWERCAP_DOMAIN_NAME_GET,
+ domain, dom_info->name,
+ SCMI_MAX_STR_SIZE);
+
+ return ret;
+}
+
+static int scmi_powercap_num_domains_get(const struct scmi_protocol_handle *ph)
+{
+ struct powercap_info *pi = ph->get_priv(ph);
+
+ return pi->num_domains;
+}
+
+static const struct scmi_powercap_info *
+scmi_powercap_dom_info_get(const struct scmi_protocol_handle *ph, u32 domain_id)
+{
+ struct powercap_info *pi = ph->get_priv(ph);
+
+ if (domain_id >= pi->num_domains)
+ return NULL;
+
+ return pi->powercaps + domain_id;
+}
+
+static int scmi_powercap_xfer_cap_get(const struct scmi_protocol_handle *ph,
+ u32 domain_id, u32 *power_cap)
+{
+ int ret;
+ struct scmi_xfer *t;
+
+ ret = ph->xops->xfer_get_init(ph, POWERCAP_CAP_GET, sizeof(u32),
+ sizeof(u32), &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(domain_id, t->tx.buf);
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret)
+ *power_cap = get_unaligned_le32(t->rx.buf);
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static int scmi_powercap_cap_get(const struct scmi_protocol_handle *ph,
+ u32 domain_id, u32 *power_cap)
+{
+ struct scmi_powercap_info *dom;
+ struct powercap_info *pi = ph->get_priv(ph);
+
+ if (!power_cap || domain_id >= pi->num_domains)
+ return -EINVAL;
+
+ dom = pi->powercaps + domain_id;
+ if (dom->fc_info && dom->fc_info[POWERCAP_FC_CAP].get_addr) {
+ *power_cap = ioread32(dom->fc_info[POWERCAP_FC_CAP].get_addr);
+ trace_scmi_fc_call(SCMI_PROTOCOL_POWERCAP, POWERCAP_CAP_GET,
+ domain_id, *power_cap, 0);
+ return 0;
+ }
+
+ return scmi_powercap_xfer_cap_get(ph, domain_id, power_cap);
+}
+
+static int scmi_powercap_xfer_cap_set(const struct scmi_protocol_handle *ph,
+ const struct scmi_powercap_info *pc,
+ u32 power_cap, bool ignore_dresp)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_msg_powercap_set_cap_or_pai *msg;
+
+ ret = ph->xops->xfer_get_init(ph, POWERCAP_CAP_SET,
+ sizeof(*msg), 0, &t);
+ if (ret)
+ return ret;
+
+ msg = t->tx.buf;
+ msg->domain = cpu_to_le32(pc->id);
+ msg->flags =
+ cpu_to_le32(FIELD_PREP(CAP_SET_ASYNC, !!pc->async_powercap_cap_set) |
+ FIELD_PREP(CAP_SET_IGNORE_DRESP, !!ignore_dresp));
+ msg->value = cpu_to_le32(power_cap);
+
+ if (!pc->async_powercap_cap_set || ignore_dresp) {
+ ret = ph->xops->do_xfer(ph, t);
+ } else {
+ ret = ph->xops->do_xfer_with_response(ph, t);
+ if (!ret) {
+ struct scmi_msg_resp_powercap_cap_set_complete *resp;
+
+ resp = t->rx.buf;
+ if (le32_to_cpu(resp->domain) == pc->id)
+ dev_dbg(ph->dev,
+ "Powercap ID %d CAP set async to %u\n",
+ pc->id,
+ get_unaligned_le32(&resp->power_cap));
+ else
+ ret = -EPROTO;
+ }
+ }
+
+ ph->xops->xfer_put(ph, t);
+ return ret;
+}
+
+static int scmi_powercap_cap_set(const struct scmi_protocol_handle *ph,
+ u32 domain_id, u32 power_cap,
+ bool ignore_dresp)
+{
+ const struct scmi_powercap_info *pc;
+
+ pc = scmi_powercap_dom_info_get(ph, domain_id);
+ if (!pc || !pc->powercap_cap_config || !power_cap ||
+ power_cap < pc->min_power_cap ||
+ power_cap > pc->max_power_cap)
+ return -EINVAL;
+
+ if (pc->fc_info && pc->fc_info[POWERCAP_FC_CAP].set_addr) {
+ struct scmi_fc_info *fci = &pc->fc_info[POWERCAP_FC_CAP];
+
+ iowrite32(power_cap, fci->set_addr);
+ ph->hops->fastchannel_db_ring(fci->set_db);
+ trace_scmi_fc_call(SCMI_PROTOCOL_POWERCAP, POWERCAP_CAP_SET,
+ domain_id, power_cap, 0);
+ return 0;
+ }
+
+ return scmi_powercap_xfer_cap_set(ph, pc, power_cap, ignore_dresp);
+}
+
+static int scmi_powercap_xfer_pai_get(const struct scmi_protocol_handle *ph,
+ u32 domain_id, u32 *pai)
+{
+ int ret;
+ struct scmi_xfer *t;
+
+ ret = ph->xops->xfer_get_init(ph, POWERCAP_PAI_GET, sizeof(u32),
+ sizeof(u32), &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(domain_id, t->tx.buf);
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret)
+ *pai = get_unaligned_le32(t->rx.buf);
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static int scmi_powercap_pai_get(const struct scmi_protocol_handle *ph,
+ u32 domain_id, u32 *pai)
+{
+ struct scmi_powercap_info *dom;
+ struct powercap_info *pi = ph->get_priv(ph);
+
+ if (!pai || domain_id >= pi->num_domains)
+ return -EINVAL;
+
+ dom = pi->powercaps + domain_id;
+ if (dom->fc_info && dom->fc_info[POWERCAP_FC_PAI].get_addr) {
+ *pai = ioread32(dom->fc_info[POWERCAP_FC_PAI].get_addr);
+ trace_scmi_fc_call(SCMI_PROTOCOL_POWERCAP, POWERCAP_PAI_GET,
+ domain_id, *pai, 0);
+ return 0;
+ }
+
+ return scmi_powercap_xfer_pai_get(ph, domain_id, pai);
+}
+
+static int scmi_powercap_xfer_pai_set(const struct scmi_protocol_handle *ph,
+ u32 domain_id, u32 pai)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_msg_powercap_set_cap_or_pai *msg;
+
+ ret = ph->xops->xfer_get_init(ph, POWERCAP_PAI_SET,
+ sizeof(*msg), 0, &t);
+ if (ret)
+ return ret;
+
+ msg = t->tx.buf;
+ msg->domain = cpu_to_le32(domain_id);
+ msg->flags = cpu_to_le32(0);
+ msg->value = cpu_to_le32(pai);
+
+ ret = ph->xops->do_xfer(ph, t);
+
+ ph->xops->xfer_put(ph, t);
+ return ret;
+}
+
+static int scmi_powercap_pai_set(const struct scmi_protocol_handle *ph,
+ u32 domain_id, u32 pai)
+{
+ const struct scmi_powercap_info *pc;
+
+ pc = scmi_powercap_dom_info_get(ph, domain_id);
+ if (!pc || !pc->powercap_pai_config || !pai ||
+ pai < pc->min_pai || pai > pc->max_pai)
+ return -EINVAL;
+
+ if (pc->fc_info && pc->fc_info[POWERCAP_FC_PAI].set_addr) {
+ struct scmi_fc_info *fci = &pc->fc_info[POWERCAP_FC_PAI];
+
+ trace_scmi_fc_call(SCMI_PROTOCOL_POWERCAP, POWERCAP_PAI_SET,
+ domain_id, pai, 0);
+ iowrite32(pai, fci->set_addr);
+ ph->hops->fastchannel_db_ring(fci->set_db);
+ return 0;
+ }
+
+ return scmi_powercap_xfer_pai_set(ph, domain_id, pai);
+}
+
+static int scmi_powercap_measurements_get(const struct scmi_protocol_handle *ph,
+ u32 domain_id, u32 *average_power,
+ u32 *pai)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_msg_resp_powercap_meas_get *resp;
+ const struct scmi_powercap_info *pc;
+
+ pc = scmi_powercap_dom_info_get(ph, domain_id);
+ if (!pc || !pc->powercap_monitoring || !pai || !average_power)
+ return -EINVAL;
+
+ ret = ph->xops->xfer_get_init(ph, POWERCAP_MEASUREMENTS_GET,
+ sizeof(u32), sizeof(*resp), &t);
+ if (ret)
+ return ret;
+
+ resp = t->rx.buf;
+ put_unaligned_le32(domain_id, t->tx.buf);
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ *average_power = le32_to_cpu(resp->power);
+ *pai = le32_to_cpu(resp->pai);
+ }
+
+ ph->xops->xfer_put(ph, t);
+ return ret;
+}
+
+static int
+scmi_powercap_measurements_threshold_get(const struct scmi_protocol_handle *ph,
+ u32 domain_id, u32 *power_thresh_low,
+ u32 *power_thresh_high)
+{
+ struct powercap_info *pi = ph->get_priv(ph);
+
+ if (!power_thresh_low || !power_thresh_high ||
+ domain_id >= pi->num_domains)
+ return -EINVAL;
+
+ *power_thresh_low = THRESH_LOW(pi, domain_id);
+ *power_thresh_high = THRESH_HIGH(pi, domain_id);
+
+ return 0;
+}
+
+static int
+scmi_powercap_measurements_threshold_set(const struct scmi_protocol_handle *ph,
+ u32 domain_id, u32 power_thresh_low,
+ u32 power_thresh_high)
+{
+ int ret = 0;
+ struct powercap_info *pi = ph->get_priv(ph);
+
+ if (domain_id >= pi->num_domains ||
+ power_thresh_low > power_thresh_high)
+ return -EINVAL;
+
+ /* Anything to do ? */
+ if (THRESH_LOW(pi, domain_id) == power_thresh_low &&
+ THRESH_HIGH(pi, domain_id) == power_thresh_high)
+ return ret;
+
+ pi->states[domain_id].thresholds =
+ (FIELD_PREP(GENMASK_ULL(31, 0), power_thresh_low) |
+ FIELD_PREP(GENMASK_ULL(63, 32), power_thresh_high));
+
+ /* Update thresholds if notification already enabled */
+ if (pi->states[domain_id].meas_notif_enabled)
+ ret = scmi_powercap_notify(ph, domain_id,
+ POWERCAP_MEASUREMENTS_NOTIFY,
+ true);
+
+ return ret;
+}
+
+static const struct scmi_powercap_proto_ops powercap_proto_ops = {
+ .num_domains_get = scmi_powercap_num_domains_get,
+ .info_get = scmi_powercap_dom_info_get,
+ .cap_get = scmi_powercap_cap_get,
+ .cap_set = scmi_powercap_cap_set,
+ .pai_get = scmi_powercap_pai_get,
+ .pai_set = scmi_powercap_pai_set,
+ .measurements_get = scmi_powercap_measurements_get,
+ .measurements_threshold_set = scmi_powercap_measurements_threshold_set,
+ .measurements_threshold_get = scmi_powercap_measurements_threshold_get,
+};
+
+static void scmi_powercap_domain_init_fc(const struct scmi_protocol_handle *ph,
+ u32 domain, struct scmi_fc_info **p_fc)
+{
+ struct scmi_fc_info *fc;
+
+ fc = devm_kcalloc(ph->dev, POWERCAP_FC_MAX, sizeof(*fc), GFP_KERNEL);
+ if (!fc)
+ return;
+
+ ph->hops->fastchannel_init(ph, POWERCAP_DESCRIBE_FASTCHANNEL,
+ POWERCAP_CAP_SET, 4, domain,
+ &fc[POWERCAP_FC_CAP].set_addr,
+ &fc[POWERCAP_FC_CAP].set_db);
+
+ ph->hops->fastchannel_init(ph, POWERCAP_DESCRIBE_FASTCHANNEL,
+ POWERCAP_CAP_GET, 4, domain,
+ &fc[POWERCAP_FC_CAP].get_addr, NULL);
+
+ ph->hops->fastchannel_init(ph, POWERCAP_DESCRIBE_FASTCHANNEL,
+ POWERCAP_PAI_SET, 4, domain,
+ &fc[POWERCAP_FC_PAI].set_addr,
+ &fc[POWERCAP_FC_PAI].set_db);
+
+ ph->hops->fastchannel_init(ph, POWERCAP_DESCRIBE_FASTCHANNEL,
+ POWERCAP_PAI_GET, 4, domain,
+ &fc[POWERCAP_FC_PAI].get_addr, NULL);
+
+ *p_fc = fc;
+}
+
+static int scmi_powercap_notify(const struct scmi_protocol_handle *ph,
+ u32 domain, int message_id, bool enable)
+{
+ int ret;
+ struct scmi_xfer *t;
+
+ switch (message_id) {
+ case POWERCAP_CAP_NOTIFY:
+ {
+ struct scmi_msg_powercap_notify_cap *notify;
+
+ ret = ph->xops->xfer_get_init(ph, message_id,
+ sizeof(*notify), 0, &t);
+ if (ret)
+ return ret;
+
+ notify = t->tx.buf;
+ notify->domain = cpu_to_le32(domain);
+ notify->notify_enable = cpu_to_le32(enable ? BIT(0) : 0);
+ break;
+ }
+ case POWERCAP_MEASUREMENTS_NOTIFY:
+ {
+ u32 low, high;
+ struct scmi_msg_powercap_notify_thresh *notify;
+
+ /*
+ * Note that we have to pick the most recently configured
+ * thresholds to build a proper POWERCAP_MEASUREMENTS_NOTIFY
+ * enable request and we fail, complaining, if no thresholds
+ * were ever set, since this is an indication the API has been
+ * used wrongly.
+ */
+ ret = scmi_powercap_measurements_threshold_get(ph, domain,
+ &low, &high);
+ if (ret)
+ return ret;
+
+ if (enable && !low && !high) {
+ dev_err(ph->dev,
+ "Invalid Measurements Notify thresholds: %u/%u\n",
+ low, high);
+ return -EINVAL;
+ }
+
+ ret = ph->xops->xfer_get_init(ph, message_id,
+ sizeof(*notify), 0, &t);
+ if (ret)
+ return ret;
+
+ notify = t->tx.buf;
+ notify->domain = cpu_to_le32(domain);
+ notify->notify_enable = cpu_to_le32(enable ? BIT(0) : 0);
+ notify->power_thresh_low = cpu_to_le32(low);
+ notify->power_thresh_high = cpu_to_le32(high);
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+
+ ret = ph->xops->do_xfer(ph, t);
+
+ ph->xops->xfer_put(ph, t);
+ return ret;
+}
+
+static int
+scmi_powercap_set_notify_enabled(const struct scmi_protocol_handle *ph,
+ u8 evt_id, u32 src_id, bool enable)
+{
+ int ret, cmd_id;
+ struct powercap_info *pi = ph->get_priv(ph);
+
+ if (evt_id >= ARRAY_SIZE(evt_2_cmd) || src_id >= pi->num_domains)
+ return -EINVAL;
+
+ cmd_id = evt_2_cmd[evt_id];
+ ret = scmi_powercap_notify(ph, src_id, cmd_id, enable);
+ if (ret)
+ pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n",
+ evt_id, src_id, ret);
+ else if (cmd_id == POWERCAP_MEASUREMENTS_NOTIFY)
+ /*
+ * On success save the current notification enabled state, so
+ * as to be able to properly update the notification thresholds
+ * when they are modified on a domain for which measurement
+ * notifications were currently enabled.
+ *
+ * This is needed because the SCMI Notification core machinery
+ * and API does not support passing per-notification custom
+ * arguments at callback registration time.
+ *
+ * Note that this can be done here with a simple flag since the
+ * SCMI core Notifications code takes care of keeping proper
+ * per-domain enables refcounting, so that this helper function
+ * will be called only once (for enables) when the first user
+ * registers a callback on this domain and once more (disable)
+ * when the last user de-registers its callback.
+ */
+ pi->states[src_id].meas_notif_enabled = enable;
+
+ return ret;
+}
+
+static void *
+scmi_powercap_fill_custom_report(const struct scmi_protocol_handle *ph,
+ u8 evt_id, ktime_t timestamp,
+ const void *payld, size_t payld_sz,
+ void *report, u32 *src_id)
+{
+ void *rep = NULL;
+
+ switch (evt_id) {
+ case SCMI_EVENT_POWERCAP_CAP_CHANGED:
+ {
+ const struct scmi_powercap_cap_changed_notify_payld *p = payld;
+ struct scmi_powercap_cap_changed_report *r = report;
+
+ if (sizeof(*p) != payld_sz)
+ break;
+
+ r->timestamp = timestamp;
+ r->agent_id = le32_to_cpu(p->agent_id);
+ r->domain_id = le32_to_cpu(p->domain_id);
+ r->power_cap = le32_to_cpu(p->power_cap);
+ r->pai = le32_to_cpu(p->pai);
+ *src_id = r->domain_id;
+ rep = r;
+ break;
+ }
+ case SCMI_EVENT_POWERCAP_MEASUREMENTS_CHANGED:
+ {
+ const struct scmi_powercap_meas_changed_notify_payld *p = payld;
+ struct scmi_powercap_meas_changed_report *r = report;
+
+ if (sizeof(*p) != payld_sz)
+ break;
+
+ r->timestamp = timestamp;
+ r->agent_id = le32_to_cpu(p->agent_id);
+ r->domain_id = le32_to_cpu(p->domain_id);
+ r->power = le32_to_cpu(p->power);
+ *src_id = r->domain_id;
+ rep = r;
+ break;
+ }
+ default:
+ break;
+ }
+
+ return rep;
+}
+
+static int
+scmi_powercap_get_num_sources(const struct scmi_protocol_handle *ph)
+{
+ struct powercap_info *pi = ph->get_priv(ph);
+
+ if (!pi)
+ return -EINVAL;
+
+ return pi->num_domains;
+}
+
+static const struct scmi_event powercap_events[] = {
+ {
+ .id = SCMI_EVENT_POWERCAP_CAP_CHANGED,
+ .max_payld_sz =
+ sizeof(struct scmi_powercap_cap_changed_notify_payld),
+ .max_report_sz =
+ sizeof(struct scmi_powercap_cap_changed_report),
+ },
+ {
+ .id = SCMI_EVENT_POWERCAP_MEASUREMENTS_CHANGED,
+ .max_payld_sz =
+ sizeof(struct scmi_powercap_meas_changed_notify_payld),
+ .max_report_sz =
+ sizeof(struct scmi_powercap_meas_changed_report),
+ },
+};
+
+static const struct scmi_event_ops powercap_event_ops = {
+ .get_num_sources = scmi_powercap_get_num_sources,
+ .set_notify_enabled = scmi_powercap_set_notify_enabled,
+ .fill_custom_report = scmi_powercap_fill_custom_report,
+};
+
+static const struct scmi_protocol_events powercap_protocol_events = {
+ .queue_sz = SCMI_PROTO_QUEUE_SZ,
+ .ops = &powercap_event_ops,
+ .evts = powercap_events,
+ .num_events = ARRAY_SIZE(powercap_events),
+};
+
+static int
+scmi_powercap_protocol_init(const struct scmi_protocol_handle *ph)
+{
+ int domain, ret;
+ u32 version;
+ struct powercap_info *pinfo;
+
+ ret = ph->xops->version_get(ph, &version);
+ if (ret)
+ return ret;
+
+ dev_dbg(ph->dev, "Powercap Version %d.%d\n",
+ PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
+
+ pinfo = devm_kzalloc(ph->dev, sizeof(*pinfo), GFP_KERNEL);
+ if (!pinfo)
+ return -ENOMEM;
+
+ ret = scmi_powercap_attributes_get(ph, pinfo);
+ if (ret)
+ return ret;
+
+ pinfo->powercaps = devm_kcalloc(ph->dev, pinfo->num_domains,
+ sizeof(*pinfo->powercaps),
+ GFP_KERNEL);
+ if (!pinfo->powercaps)
+ return -ENOMEM;
+
+ /*
+ * Note that any failure in retrieving any domain attribute leads to
+ * the whole Powercap protocol initialization failure: this way the
+ * reported Powercap domains are all assured, when accessed, to be well
+ * formed and correlated by sane parent-child relationship (if any).
+ */
+ for (domain = 0; domain < pinfo->num_domains; domain++) {
+ ret = scmi_powercap_domain_attributes_get(ph, pinfo, domain);
+ if (ret)
+ return ret;
+
+ if (pinfo->powercaps[domain].fastchannels)
+ scmi_powercap_domain_init_fc(ph, domain,
+ &pinfo->powercaps[domain].fc_info);
+ }
+
+ pinfo->states = devm_kcalloc(ph->dev, pinfo->num_domains,
+ sizeof(*pinfo->states), GFP_KERNEL);
+ if (!pinfo->states)
+ return -ENOMEM;
+
+ pinfo->version = version;
+
+ return ph->set_priv(ph, pinfo);
+}
+
+static const struct scmi_protocol scmi_powercap = {
+ .id = SCMI_PROTOCOL_POWERCAP,
+ .owner = THIS_MODULE,
+ .instance_init = &scmi_powercap_protocol_init,
+ .ops = &powercap_proto_ops,
+ .events = &powercap_protocol_events,
+};
+
+DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(powercap, scmi_powercap)
diff --git a/drivers/firmware/arm_scmi/protocols.h b/drivers/firmware/arm_scmi/protocols.h
index 51c31379f9b3..2f3bf691db7c 100644
--- a/drivers/firmware/arm_scmi/protocols.h
+++ b/drivers/firmware/arm_scmi/protocols.h
@@ -215,6 +215,19 @@ struct scmi_iterator_ops {
struct scmi_iterator_state *st, void *priv);
};
+struct scmi_fc_db_info {
+ int width;
+ u64 set;
+ u64 mask;
+ void __iomem *addr;
+};
+
+struct scmi_fc_info {
+ void __iomem *set_addr;
+ void __iomem *get_addr;
+ struct scmi_fc_db_info *set_db;
+};
+
/**
* struct scmi_proto_helpers_ops - References to common protocol helpers
* @extended_name_get: A common helper function to retrieve extended naming
@@ -230,6 +243,9 @@ struct scmi_iterator_ops {
* provided in @ops.
* @iter_response_run: A common helper to trigger the run of a previously
* initialized iterator.
+ * @fastchannel_init: A common helper used to initialize FC descriptors by
+ * gathering FC descriptions from the SCMI platform server.
+ * @fastchannel_db_ring: A common helper to ring a FC doorbell.
*/
struct scmi_proto_helpers_ops {
int (*extended_name_get)(const struct scmi_protocol_handle *ph,
@@ -239,6 +255,12 @@ struct scmi_proto_helpers_ops {
unsigned int max_resources, u8 msg_id,
size_t tx_size, void *priv);
int (*iter_response_run)(void *iter);
+ void (*fastchannel_init)(const struct scmi_protocol_handle *ph,
+ u8 describe_id, u32 message_id,
+ u32 valid_size, u32 domain,
+ void __iomem **p_addr,
+ struct scmi_fc_db_info **p_db);
+ void (*fastchannel_db_ring)(struct scmi_fc_db_info *db);
};
/**
@@ -315,5 +337,6 @@ DECLARE_SCMI_REGISTER_UNREGISTER(reset);
DECLARE_SCMI_REGISTER_UNREGISTER(sensors);
DECLARE_SCMI_REGISTER_UNREGISTER(voltage);
DECLARE_SCMI_REGISTER_UNREGISTER(system);
+DECLARE_SCMI_REGISTER_UNREGISTER(powercap);
#endif /* _SCMI_PROTOCOLS_H */
diff --git a/drivers/firmware/arm_scmi/scmi_power_control.c b/drivers/firmware/arm_scmi/scmi_power_control.c
new file mode 100644
index 000000000000..6eb7d2a4b6b1
--- /dev/null
+++ b/drivers/firmware/arm_scmi/scmi_power_control.c
@@ -0,0 +1,362 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SCMI Generic SystemPower Control driver.
+ *
+ * Copyright (C) 2020-2022 ARM Ltd.
+ */
+/*
+ * In order to handle platform originated SCMI SystemPower requests (like
+ * shutdowns or cold/warm resets) we register an SCMI Notification notifier
+ * block to react when such SCMI SystemPower events are emitted by platform.
+ *
+ * Once such a notification is received we act accordingly to perform the
+ * required system transition depending on the kind of request.
+ *
+ * Graceful requests are routed to userspace through the same API methods
+ * (orderly_poweroff/reboot()) used by ACPI when handling ACPI Shutdown bus
+ * events.
+ *
+ * Direct forceful requests are not supported since are not meant to be sent
+ * by the SCMI platform to an OSPM like Linux.
+ *
+ * Additionally, graceful request notifications can carry an optional timeout
+ * field stating the maximum amount of time allowed by the platform for
+ * completion after which they are converted to forceful ones: the assumption
+ * here is that even graceful requests can be upper-bound by a maximum final
+ * timeout strictly enforced by the platform itself which can ultimately cut
+ * the power off at will anytime; in order to avoid such extreme scenario, we
+ * track progress of graceful requests through the means of a reboot notifier
+ * converting timed-out graceful requests to forceful ones, so at least we
+ * try to perform a clean sync and shutdown/restart before the power is cut.
+ *
+ * Given the peculiar nature of SCMI SystemPower protocol, that is being in
+ * charge of triggering system wide shutdown/reboot events, there should be
+ * only one SCMI platform actively emitting SystemPower events.
+ * For this reason the SCMI core takes care to enforce the creation of one
+ * single unique device associated to the SCMI System Power protocol; no matter
+ * how many SCMI platforms are defined on the system, only one can be designated
+ * to support System Power: as a consequence this driver will never be probed
+ * more than once.
+ *
+ * For similar reasons as soon as the first valid SystemPower is received by
+ * this driver and the shutdown/reboot is started, any further notification
+ * possibly emitted by the platform will be ignored.
+ */
+
+#include <linux/math.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/printk.h>
+#include <linux/reboot.h>
+#include <linux/scmi_protocol.h>
+#include <linux/slab.h>
+#include <linux/time64.h>
+#include <linux/timer.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#ifndef MODULE
+#include <linux/fs.h>
+#endif
+
+enum scmi_syspower_state {
+ SCMI_SYSPOWER_IDLE,
+ SCMI_SYSPOWER_IN_PROGRESS,
+ SCMI_SYSPOWER_REBOOTING
+};
+
+/**
+ * struct scmi_syspower_conf - Common configuration
+ *
+ * @dev: A reference device
+ * @state: Current SystemPower state
+ * @state_mtx: @state related mutex
+ * @required_transition: The requested transition as decribed in the received
+ * SCMI SystemPower notification
+ * @userspace_nb: The notifier_block registered against the SCMI SystemPower
+ * notification to start the needed userspace interactions.
+ * @reboot_nb: A notifier_block optionally used to track reboot progress
+ * @forceful_work: A worker used to trigger a forceful transition once a
+ * graceful has timed out.
+ */
+struct scmi_syspower_conf {
+ struct device *dev;
+ enum scmi_syspower_state state;
+ /* Protect access to state */
+ struct mutex state_mtx;
+ enum scmi_system_events required_transition;
+
+ struct notifier_block userspace_nb;
+ struct notifier_block reboot_nb;
+
+ struct delayed_work forceful_work;
+};
+
+#define userspace_nb_to_sconf(x) \
+ container_of(x, struct scmi_syspower_conf, userspace_nb)
+
+#define reboot_nb_to_sconf(x) \
+ container_of(x, struct scmi_syspower_conf, reboot_nb)
+
+#define dwork_to_sconf(x) \
+ container_of(x, struct scmi_syspower_conf, forceful_work)
+
+/**
+ * scmi_reboot_notifier - A reboot notifier to catch an ongoing successful
+ * system transition
+ * @nb: Reference to the related notifier block
+ * @reason: The reason for the ongoing reboot
+ * @__unused: The cmd being executed on a restart request (unused)
+ *
+ * When an ongoing system transition is detected, compatible with the one
+ * requested by SCMI, cancel the delayed work.
+ *
+ * Return: NOTIFY_OK in any case
+ */
+static int scmi_reboot_notifier(struct notifier_block *nb,
+ unsigned long reason, void *__unused)
+{
+ struct scmi_syspower_conf *sc = reboot_nb_to_sconf(nb);
+
+ mutex_lock(&sc->state_mtx);
+ switch (reason) {
+ case SYS_HALT:
+ case SYS_POWER_OFF:
+ if (sc->required_transition == SCMI_SYSTEM_SHUTDOWN)
+ sc->state = SCMI_SYSPOWER_REBOOTING;
+ break;
+ case SYS_RESTART:
+ if (sc->required_transition == SCMI_SYSTEM_COLDRESET ||
+ sc->required_transition == SCMI_SYSTEM_WARMRESET)
+ sc->state = SCMI_SYSPOWER_REBOOTING;
+ break;
+ default:
+ break;
+ }
+
+ if (sc->state == SCMI_SYSPOWER_REBOOTING) {
+ dev_dbg(sc->dev, "Reboot in progress...cancel delayed work.\n");
+ cancel_delayed_work_sync(&sc->forceful_work);
+ }
+ mutex_unlock(&sc->state_mtx);
+
+ return NOTIFY_OK;
+}
+
+/**
+ * scmi_request_forceful_transition - Request forceful SystemPower transition
+ * @sc: A reference to the configuration data
+ *
+ * Initiates the required SystemPower transition without involving userspace:
+ * just trigger the action at the kernel level after issuing an emergency
+ * sync. (if possible at all)
+ */
+static inline void
+scmi_request_forceful_transition(struct scmi_syspower_conf *sc)
+{
+ dev_dbg(sc->dev, "Serving forceful request:%d\n",
+ sc->required_transition);
+
+#ifndef MODULE
+ emergency_sync();
+#endif
+ switch (sc->required_transition) {
+ case SCMI_SYSTEM_SHUTDOWN:
+ kernel_power_off();
+ break;
+ case SCMI_SYSTEM_COLDRESET:
+ case SCMI_SYSTEM_WARMRESET:
+ kernel_restart(NULL);
+ break;
+ default:
+ break;
+ }
+}
+
+static void scmi_forceful_work_func(struct work_struct *work)
+{
+ struct scmi_syspower_conf *sc;
+ struct delayed_work *dwork;
+
+ if (system_state > SYSTEM_RUNNING)
+ return;
+
+ dwork = to_delayed_work(work);
+ sc = dwork_to_sconf(dwork);
+
+ dev_dbg(sc->dev, "Graceful request timed out...forcing !\n");
+ mutex_lock(&sc->state_mtx);
+ /* avoid deadlock by unregistering reboot notifier first */
+ unregister_reboot_notifier(&sc->reboot_nb);
+ if (sc->state == SCMI_SYSPOWER_IN_PROGRESS)
+ scmi_request_forceful_transition(sc);
+ mutex_unlock(&sc->state_mtx);
+}
+
+/**
+ * scmi_request_graceful_transition - Request graceful SystemPower transition
+ * @sc: A reference to the configuration data
+ * @timeout_ms: The desired timeout to wait for the shutdown to complete before
+ * system is forcibly shutdown.
+ *
+ * Initiates the required SystemPower transition, requesting userspace
+ * co-operation: it uses the same orderly_ methods used by ACPI Shutdown event
+ * processing.
+ *
+ * Takes care also to register a reboot notifier and to schedule a delayed work
+ * in order to detect if userspace actions are taking too long and in such a
+ * case to trigger a forceful transition.
+ */
+static void scmi_request_graceful_transition(struct scmi_syspower_conf *sc,
+ unsigned int timeout_ms)
+{
+ unsigned int adj_timeout_ms = 0;
+
+ if (timeout_ms) {
+ int ret;
+
+ sc->reboot_nb.notifier_call = &scmi_reboot_notifier;
+ ret = register_reboot_notifier(&sc->reboot_nb);
+ if (!ret) {
+ /* Wait only up to 75% of the advertised timeout */
+ adj_timeout_ms = mult_frac(timeout_ms, 3, 4);
+ INIT_DELAYED_WORK(&sc->forceful_work,
+ scmi_forceful_work_func);
+ schedule_delayed_work(&sc->forceful_work,
+ msecs_to_jiffies(adj_timeout_ms));
+ } else {
+ /* Carry on best effort even without a reboot notifier */
+ dev_warn(sc->dev,
+ "Cannot register reboot notifier !\n");
+ }
+ }
+
+ dev_dbg(sc->dev,
+ "Serving graceful req:%d (timeout_ms:%u adj_timeout_ms:%u)\n",
+ sc->required_transition, timeout_ms, adj_timeout_ms);
+
+ switch (sc->required_transition) {
+ case SCMI_SYSTEM_SHUTDOWN:
+ /*
+ * When triggered early at boot-time the 'orderly' call will
+ * partially fail due to the lack of userspace itself, but
+ * the force=true argument will start anyway a successful
+ * forced shutdown.
+ */
+ orderly_poweroff(true);
+ break;
+ case SCMI_SYSTEM_COLDRESET:
+ case SCMI_SYSTEM_WARMRESET:
+ orderly_reboot();
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * scmi_userspace_notifier - Notifier callback to act on SystemPower
+ * Notifications
+ * @nb: Reference to the related notifier block
+ * @event: The SystemPower notification event id
+ * @data: The SystemPower event report
+ *
+ * This callback is in charge of decoding the received SystemPower report
+ * and act accordingly triggering a graceful or forceful system transition.
+ *
+ * Note that once a valid SCMI SystemPower event starts being served, any
+ * other following SystemPower notification received from the same SCMI
+ * instance (handle) will be ignored.
+ *
+ * Return: NOTIFY_OK once a valid SystemPower event has been successfully
+ * processed.
+ */
+static int scmi_userspace_notifier(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct scmi_system_power_state_notifier_report *er = data;
+ struct scmi_syspower_conf *sc = userspace_nb_to_sconf(nb);
+
+ if (er->system_state >= SCMI_SYSTEM_POWERUP) {
+ dev_err(sc->dev, "Ignoring unsupported system_state: 0x%X\n",
+ er->system_state);
+ return NOTIFY_DONE;
+ }
+
+ if (!SCMI_SYSPOWER_IS_REQUEST_GRACEFUL(er->flags)) {
+ dev_err(sc->dev, "Ignoring forceful notification.\n");
+ return NOTIFY_DONE;
+ }
+
+ /*
+ * Bail out if system is already shutting down or an SCMI SystemPower
+ * requested is already being served.
+ */
+ if (system_state > SYSTEM_RUNNING)
+ return NOTIFY_DONE;
+ mutex_lock(&sc->state_mtx);
+ if (sc->state != SCMI_SYSPOWER_IDLE) {
+ dev_dbg(sc->dev,
+ "Transition already in progress...ignore.\n");
+ mutex_unlock(&sc->state_mtx);
+ return NOTIFY_DONE;
+ }
+ sc->state = SCMI_SYSPOWER_IN_PROGRESS;
+ mutex_unlock(&sc->state_mtx);
+
+ sc->required_transition = er->system_state;
+
+ /* Leaving a trace in logs of who triggered the shutdown/reboot. */
+ dev_info(sc->dev, "Serving shutdown/reboot request: %d\n",
+ sc->required_transition);
+
+ scmi_request_graceful_transition(sc, er->timeout);
+
+ return NOTIFY_OK;
+}
+
+static int scmi_syspower_probe(struct scmi_device *sdev)
+{
+ int ret;
+ struct scmi_syspower_conf *sc;
+ struct scmi_handle *handle = sdev->handle;
+
+ if (!handle)
+ return -ENODEV;
+
+ ret = handle->devm_protocol_acquire(sdev, SCMI_PROTOCOL_SYSTEM);
+ if (ret)
+ return ret;
+
+ sc = devm_kzalloc(&sdev->dev, sizeof(*sc), GFP_KERNEL);
+ if (!sc)
+ return -ENOMEM;
+
+ sc->state = SCMI_SYSPOWER_IDLE;
+ mutex_init(&sc->state_mtx);
+ sc->required_transition = SCMI_SYSTEM_MAX;
+ sc->userspace_nb.notifier_call = &scmi_userspace_notifier;
+ sc->dev = &sdev->dev;
+
+ return handle->notify_ops->devm_event_notifier_register(sdev,
+ SCMI_PROTOCOL_SYSTEM,
+ SCMI_EVENT_SYSTEM_POWER_STATE_NOTIFIER,
+ NULL, &sc->userspace_nb);
+}
+
+static const struct scmi_device_id scmi_id_table[] = {
+ { SCMI_PROTOCOL_SYSTEM, "syspower" },
+ { },
+};
+MODULE_DEVICE_TABLE(scmi, scmi_id_table);
+
+static struct scmi_driver scmi_system_power_driver = {
+ .name = "scmi-system-power",
+ .probe = scmi_syspower_probe,
+ .id_table = scmi_id_table,
+};
+module_scmi_driver(scmi_system_power_driver);
+
+MODULE_AUTHOR("Cristian Marussi <cristian.marussi@arm.com>");
+MODULE_DESCRIPTION("ARM SCMI SystemPower Control driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/arm_scmi/system.c b/drivers/firmware/arm_scmi/system.c
index 220e399118ad..9383d7584539 100644
--- a/drivers/firmware/arm_scmi/system.c
+++ b/drivers/firmware/arm_scmi/system.c
@@ -27,10 +27,12 @@ struct scmi_system_power_state_notifier_payld {
__le32 agent_id;
__le32 flags;
__le32 system_state;
+ __le32 timeout;
};
struct scmi_system_info {
u32 version;
+ bool graceful_timeout_supported;
};
static int scmi_system_request_notify(const struct scmi_protocol_handle *ph,
@@ -72,17 +74,27 @@ scmi_system_fill_custom_report(const struct scmi_protocol_handle *ph,
const void *payld, size_t payld_sz,
void *report, u32 *src_id)
{
+ size_t expected_sz;
const struct scmi_system_power_state_notifier_payld *p = payld;
struct scmi_system_power_state_notifier_report *r = report;
+ struct scmi_system_info *pinfo = ph->get_priv(ph);
+ expected_sz = pinfo->graceful_timeout_supported ?
+ sizeof(*p) : sizeof(*p) - sizeof(__le32);
if (evt_id != SCMI_EVENT_SYSTEM_POWER_STATE_NOTIFIER ||
- sizeof(*p) != payld_sz)
+ payld_sz != expected_sz)
return NULL;
r->timestamp = timestamp;
r->agent_id = le32_to_cpu(p->agent_id);
r->flags = le32_to_cpu(p->flags);
r->system_state = le32_to_cpu(p->system_state);
+ if (pinfo->graceful_timeout_supported &&
+ r->system_state == SCMI_SYSTEM_SHUTDOWN &&
+ SCMI_SYSPOWER_IS_REQUEST_GRACEFUL(r->flags))
+ r->timeout = le32_to_cpu(p->timeout);
+ else
+ r->timeout = 0x00;
*src_id = 0;
return r;
@@ -129,6 +141,9 @@ static int scmi_system_protocol_init(const struct scmi_protocol_handle *ph)
return -ENOMEM;
pinfo->version = version;
+ if (PROTOCOL_REV_MAJOR(pinfo->version) >= 0x2)
+ pinfo->graceful_timeout_supported = true;
+
return ph->set_priv(ph, pinfo);
}
diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c
index ddf0b9ff9e15..435d0e2658a4 100644
--- a/drivers/firmware/arm_scpi.c
+++ b/drivers/firmware/arm_scpi.c
@@ -815,7 +815,7 @@ static int scpi_init_versions(struct scpi_drvinfo *info)
info->firmware_version = le32_to_cpu(caps.platform_version);
}
/* Ignore error if not implemented */
- if (scpi_info->is_legacy && ret == -EOPNOTSUPP)
+ if (info->is_legacy && ret == -EOPNOTSUPP)
return 0;
return ret;
@@ -913,13 +913,14 @@ static int scpi_probe(struct platform_device *pdev)
struct resource res;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
+ struct scpi_drvinfo *scpi_drvinfo;
- scpi_info = devm_kzalloc(dev, sizeof(*scpi_info), GFP_KERNEL);
- if (!scpi_info)
+ scpi_drvinfo = devm_kzalloc(dev, sizeof(*scpi_drvinfo), GFP_KERNEL);
+ if (!scpi_drvinfo)
return -ENOMEM;
if (of_match_device(legacy_scpi_of_match, &pdev->dev))
- scpi_info->is_legacy = true;
+ scpi_drvinfo->is_legacy = true;
count = of_count_phandle_with_args(np, "mboxes", "#mbox-cells");
if (count < 0) {
@@ -927,19 +928,19 @@ static int scpi_probe(struct platform_device *pdev)
return -ENODEV;
}
- scpi_info->channels = devm_kcalloc(dev, count, sizeof(struct scpi_chan),
- GFP_KERNEL);
- if (!scpi_info->channels)
+ scpi_drvinfo->channels =
+ devm_kcalloc(dev, count, sizeof(struct scpi_chan), GFP_KERNEL);
+ if (!scpi_drvinfo->channels)
return -ENOMEM;
- ret = devm_add_action(dev, scpi_free_channels, scpi_info);
+ ret = devm_add_action(dev, scpi_free_channels, scpi_drvinfo);
if (ret)
return ret;
- for (; scpi_info->num_chans < count; scpi_info->num_chans++) {
+ for (; scpi_drvinfo->num_chans < count; scpi_drvinfo->num_chans++) {
resource_size_t size;
- int idx = scpi_info->num_chans;
- struct scpi_chan *pchan = scpi_info->channels + idx;
+ int idx = scpi_drvinfo->num_chans;
+ struct scpi_chan *pchan = scpi_drvinfo->channels + idx;
struct mbox_client *cl = &pchan->cl;
struct device_node *shmem = of_parse_phandle(np, "shmem", idx);
@@ -986,45 +987,53 @@ static int scpi_probe(struct platform_device *pdev)
return ret;
}
- scpi_info->commands = scpi_std_commands;
+ scpi_drvinfo->commands = scpi_std_commands;
- platform_set_drvdata(pdev, scpi_info);
+ platform_set_drvdata(pdev, scpi_drvinfo);
- if (scpi_info->is_legacy) {
+ if (scpi_drvinfo->is_legacy) {
/* Replace with legacy variants */
scpi_ops.clk_set_val = legacy_scpi_clk_set_val;
- scpi_info->commands = scpi_legacy_commands;
+ scpi_drvinfo->commands = scpi_legacy_commands;
/* Fill priority bitmap */
for (idx = 0; idx < ARRAY_SIZE(legacy_hpriority_cmds); idx++)
set_bit(legacy_hpriority_cmds[idx],
- scpi_info->cmd_priority);
+ scpi_drvinfo->cmd_priority);
}
- ret = scpi_init_versions(scpi_info);
+ scpi_info = scpi_drvinfo;
+
+ ret = scpi_init_versions(scpi_drvinfo);
if (ret) {
dev_err(dev, "incorrect or no SCP firmware found\n");
+ scpi_info = NULL;
return ret;
}
- if (scpi_info->is_legacy && !scpi_info->protocol_version &&
- !scpi_info->firmware_version)
+ if (scpi_drvinfo->is_legacy && !scpi_drvinfo->protocol_version &&
+ !scpi_drvinfo->firmware_version)
dev_info(dev, "SCP Protocol legacy pre-1.0 firmware\n");
else
dev_info(dev, "SCP Protocol %lu.%lu Firmware %lu.%lu.%lu version\n",
FIELD_GET(PROTO_REV_MAJOR_MASK,
- scpi_info->protocol_version),
+ scpi_drvinfo->protocol_version),
FIELD_GET(PROTO_REV_MINOR_MASK,
- scpi_info->protocol_version),
+ scpi_drvinfo->protocol_version),
FIELD_GET(FW_REV_MAJOR_MASK,
- scpi_info->firmware_version),
+ scpi_drvinfo->firmware_version),
FIELD_GET(FW_REV_MINOR_MASK,
- scpi_info->firmware_version),
+ scpi_drvinfo->firmware_version),
FIELD_GET(FW_REV_PATCH_MASK,
- scpi_info->firmware_version));
- scpi_info->scpi_ops = &scpi_ops;
+ scpi_drvinfo->firmware_version));
+
+ scpi_drvinfo->scpi_ops = &scpi_ops;
- return devm_of_platform_populate(dev);
+ ret = devm_of_platform_populate(dev);
+ if (ret)
+ scpi_info = NULL;
+
+ return ret;
}
static const struct of_device_id scpi_of_match[] = {
diff --git a/drivers/firmware/cirrus/cs_dsp.c b/drivers/firmware/cirrus/cs_dsp.c
index 7dad6f57d970..81cc3d0f6eec 100644
--- a/drivers/firmware/cirrus/cs_dsp.c
+++ b/drivers/firmware/cirrus/cs_dsp.c
@@ -2725,6 +2725,9 @@ void cs_dsp_stop(struct cs_dsp *dsp)
mutex_lock(&dsp->pwr_lock);
+ if (dsp->client_ops->pre_stop)
+ dsp->client_ops->pre_stop(dsp);
+
dsp->running = false;
if (dsp->ops->stop_core)
@@ -3177,6 +3180,110 @@ static const struct cs_dsp_ops cs_dsp_halo_ops = {
.stop_core = cs_dsp_halo_stop_core,
};
+/**
+ * cs_dsp_chunk_write() - Format data to a DSP memory chunk
+ * @ch: Pointer to the chunk structure
+ * @nbits: Number of bits to write
+ * @val: Value to write
+ *
+ * This function sequentially writes values into the format required for DSP
+ * memory, it handles both inserting of the padding bytes and converting to
+ * big endian. Note that data is only committed to the chunk when a whole DSP
+ * words worth of data is available.
+ *
+ * Return: Zero for success, a negative number on error.
+ */
+int cs_dsp_chunk_write(struct cs_dsp_chunk *ch, int nbits, u32 val)
+{
+ int nwrite, i;
+
+ nwrite = min(CS_DSP_DATA_WORD_BITS - ch->cachebits, nbits);
+
+ ch->cache <<= nwrite;
+ ch->cache |= val >> (nbits - nwrite);
+ ch->cachebits += nwrite;
+ nbits -= nwrite;
+
+ if (ch->cachebits == CS_DSP_DATA_WORD_BITS) {
+ if (cs_dsp_chunk_end(ch))
+ return -ENOSPC;
+
+ ch->cache &= 0xFFFFFF;
+ for (i = 0; i < sizeof(ch->cache); i++, ch->cache <<= BITS_PER_BYTE)
+ *ch->data++ = (ch->cache & 0xFF000000) >> CS_DSP_DATA_WORD_BITS;
+
+ ch->bytes += sizeof(ch->cache);
+ ch->cachebits = 0;
+ }
+
+ if (nbits)
+ return cs_dsp_chunk_write(ch, nbits, val);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cs_dsp_chunk_write);
+
+/**
+ * cs_dsp_chunk_flush() - Pad remaining data with zero and commit to chunk
+ * @ch: Pointer to the chunk structure
+ *
+ * As cs_dsp_chunk_write only writes data when a whole DSP word is ready to
+ * be written out it is possible that some data will remain in the cache, this
+ * function will pad that data with zeros upto a whole DSP word and write out.
+ *
+ * Return: Zero for success, a negative number on error.
+ */
+int cs_dsp_chunk_flush(struct cs_dsp_chunk *ch)
+{
+ if (!ch->cachebits)
+ return 0;
+
+ return cs_dsp_chunk_write(ch, CS_DSP_DATA_WORD_BITS - ch->cachebits, 0);
+}
+EXPORT_SYMBOL_GPL(cs_dsp_chunk_flush);
+
+/**
+ * cs_dsp_chunk_read() - Parse data from a DSP memory chunk
+ * @ch: Pointer to the chunk structure
+ * @nbits: Number of bits to read
+ *
+ * This function sequentially reads values from a DSP memory formatted buffer,
+ * it handles both removing of the padding bytes and converting from big endian.
+ *
+ * Return: A negative number is returned on error, otherwise the read value.
+ */
+int cs_dsp_chunk_read(struct cs_dsp_chunk *ch, int nbits)
+{
+ int nread, i;
+ u32 result;
+
+ if (!ch->cachebits) {
+ if (cs_dsp_chunk_end(ch))
+ return -ENOSPC;
+
+ ch->cache = 0;
+ ch->cachebits = CS_DSP_DATA_WORD_BITS;
+
+ for (i = 0; i < sizeof(ch->cache); i++, ch->cache <<= BITS_PER_BYTE)
+ ch->cache |= *ch->data++;
+
+ ch->bytes += sizeof(ch->cache);
+ }
+
+ nread = min(ch->cachebits, nbits);
+ nbits -= nread;
+
+ result = ch->cache >> ((sizeof(ch->cache) * BITS_PER_BYTE) - nread);
+ ch->cache <<= nread;
+ ch->cachebits -= nread;
+
+ if (nbits)
+ result = (result << nbits) | cs_dsp_chunk_read(ch, nbits);
+
+ return result;
+}
+EXPORT_SYMBOL_GPL(cs_dsp_chunk_read);
+
MODULE_DESCRIPTION("Cirrus Logic DSP Support");
MODULE_AUTHOR("Simon Trimmer <simont@opensource.cirrus.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index 7aa4717cdcac..6cb7384ad2ac 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -2,18 +2,6 @@
menu "EFI (Extensible Firmware Interface) Support"
depends on EFI
-config EFI_VARS
- tristate "EFI Variable Support via sysfs"
- depends on EFI && (X86 || IA64)
- default n
- help
- If you say Y here, you are able to get EFI (Extensible Firmware
- Interface) variable information via sysfs. You may read,
- write, create, and destroy EFI variables through this interface.
- Note that this driver is only retained for compatibility with
- legacy users: new users should use the efivarfs filesystem
- instead.
-
config EFI_ESRT
bool
depends on EFI && !IA64
@@ -22,6 +10,7 @@ config EFI_ESRT
config EFI_VARS_PSTORE
tristate "Register efivars backend for pstore"
depends on PSTORE
+ select UCS2_STRING
default y
help
Say Y here to enable use efivars as a backend to pstore. This
@@ -145,6 +134,7 @@ config EFI_GENERIC_STUB_INITRD_CMDLINE_LOADER
config EFI_BOOTLOADER_CONTROL
tristate "EFI Bootloader Control"
+ select UCS2_STRING
default n
help
This module installs a reboot hook, such that if reboot() is
diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile
index c02ff25dd477..8d151e332584 100644
--- a/drivers/firmware/efi/Makefile
+++ b/drivers/firmware/efi/Makefile
@@ -17,7 +17,6 @@ ifneq ($(CONFIG_EFI_CAPSULE_LOADER),)
obj-$(CONFIG_EFI) += capsule.o
endif
obj-$(CONFIG_EFI_PARAMS_FROM_FDT) += fdtparams.o
-obj-$(CONFIG_EFI_VARS) += efivars.o
obj-$(CONFIG_EFI_ESRT) += esrt.o
obj-$(CONFIG_EFI_VARS_PSTORE) += efi-pstore.o
obj-$(CONFIG_UEFI_CPER) += cper.o
diff --git a/drivers/firmware/efi/efi-init.c b/drivers/firmware/efi/efi-init.c
index b2c829e95bd1..3928dbff76d0 100644
--- a/drivers/firmware/efi/efi-init.c
+++ b/drivers/firmware/efi/efi-init.c
@@ -240,6 +240,7 @@ void __init efi_init(void)
* And now, memblock is fully populated, it is time to do capping.
*/
early_init_dt_check_for_usable_mem_range();
+ efi_find_mirror();
efi_esrt_init();
efi_mokvar_table_init();
diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c
index 7e771c56c13c..3bddc152fcd4 100644
--- a/drivers/firmware/efi/efi-pstore.c
+++ b/drivers/firmware/efi/efi-pstore.c
@@ -6,6 +6,8 @@
#include <linux/slab.h>
#include <linux/ucs2_string.h>
+MODULE_IMPORT_NS(EFIVAR);
+
#define DUMP_NAME_LEN 66
#define EFIVARS_DATA_SIZE_MAX 1024
@@ -20,18 +22,25 @@ module_param_named(pstore_disable, efivars_pstore_disable, bool, 0644);
EFI_VARIABLE_BOOTSERVICE_ACCESS | \
EFI_VARIABLE_RUNTIME_ACCESS)
-static LIST_HEAD(efi_pstore_list);
-static DECLARE_WORK(efivar_work, NULL);
-
static int efi_pstore_open(struct pstore_info *psi)
{
- psi->data = NULL;
+ int err;
+
+ err = efivar_lock();
+ if (err)
+ return err;
+
+ psi->data = kzalloc(EFIVARS_DATA_SIZE_MAX, GFP_KERNEL);
+ if (!psi->data)
+ return -ENOMEM;
+
return 0;
}
static int efi_pstore_close(struct pstore_info *psi)
{
- psi->data = NULL;
+ efivar_unlock();
+ kfree(psi->data);
return 0;
}
@@ -40,22 +49,17 @@ static inline u64 generic_id(u64 timestamp, unsigned int part, int count)
return (timestamp * 100 + part) * 1000 + count;
}
-static int efi_pstore_read_func(struct efivar_entry *entry,
- struct pstore_record *record)
+static int efi_pstore_read_func(struct pstore_record *record,
+ efi_char16_t *varname)
{
- efi_guid_t vendor = LINUX_EFI_CRASH_GUID;
+ unsigned long wlen, size = EFIVARS_DATA_SIZE_MAX;
char name[DUMP_NAME_LEN], data_type;
- int i;
+ efi_status_t status;
int cnt;
unsigned int part;
- unsigned long size;
u64 time;
- if (efi_guidcmp(entry->var.VendorGuid, vendor))
- return 0;
-
- for (i = 0; i < DUMP_NAME_LEN; i++)
- name[i] = entry->var.VariableName[i];
+ ucs2_as_utf8(name, varname, DUMP_NAME_LEN);
if (sscanf(name, "dump-type%u-%u-%d-%llu-%c",
&record->type, &part, &cnt, &time, &data_type) == 5) {
@@ -95,161 +99,75 @@ static int efi_pstore_read_func(struct efivar_entry *entry,
} else
return 0;
- entry->var.DataSize = 1024;
- __efivar_entry_get(entry, &entry->var.Attributes,
- &entry->var.DataSize, entry->var.Data);
- size = entry->var.DataSize;
- memcpy(record->buf, entry->var.Data,
- (size_t)min_t(unsigned long, EFIVARS_DATA_SIZE_MAX, size));
-
- return size;
-}
-
-/**
- * efi_pstore_scan_sysfs_enter
- * @pos: scanning entry
- * @next: next entry
- * @head: list head
- */
-static void efi_pstore_scan_sysfs_enter(struct efivar_entry *pos,
- struct efivar_entry *next,
- struct list_head *head)
-{
- pos->scanning = true;
- if (&next->list != head)
- next->scanning = true;
-}
-
-/**
- * __efi_pstore_scan_sysfs_exit
- * @entry: deleting entry
- * @turn_off_scanning: Check if a scanning flag should be turned off
- */
-static inline int __efi_pstore_scan_sysfs_exit(struct efivar_entry *entry,
- bool turn_off_scanning)
-{
- if (entry->deleting) {
- list_del(&entry->list);
- efivar_entry_iter_end();
- kfree(entry);
- if (efivar_entry_iter_begin())
- return -EINTR;
- } else if (turn_off_scanning)
- entry->scanning = false;
-
- return 0;
-}
-
-/**
- * efi_pstore_scan_sysfs_exit
- * @pos: scanning entry
- * @next: next entry
- * @head: list head
- * @stop: a flag checking if scanning will stop
- */
-static int efi_pstore_scan_sysfs_exit(struct efivar_entry *pos,
- struct efivar_entry *next,
- struct list_head *head, bool stop)
-{
- int ret = __efi_pstore_scan_sysfs_exit(pos, true);
-
- if (ret)
- return ret;
-
- if (stop)
- ret = __efi_pstore_scan_sysfs_exit(next, &next->list != head);
- return ret;
-}
+ record->buf = kmalloc(size, GFP_KERNEL);
+ if (!record->buf)
+ return -ENOMEM;
-/**
- * efi_pstore_sysfs_entry_iter
- *
- * @record: pstore record to pass to callback
- *
- * You MUST call efivar_entry_iter_begin() before this function, and
- * efivar_entry_iter_end() afterwards.
- *
- */
-static int efi_pstore_sysfs_entry_iter(struct pstore_record *record)
-{
- struct efivar_entry **pos = (struct efivar_entry **)&record->psi->data;
- struct efivar_entry *entry, *n;
- struct list_head *head = &efi_pstore_list;
- int size = 0;
- int ret;
-
- if (!*pos) {
- list_for_each_entry_safe(entry, n, head, list) {
- efi_pstore_scan_sysfs_enter(entry, n, head);
-
- size = efi_pstore_read_func(entry, record);
- ret = efi_pstore_scan_sysfs_exit(entry, n, head,
- size < 0);
- if (ret)
- return ret;
- if (size)
- break;
- }
- *pos = n;
- return size;
+ status = efivar_get_variable(varname, &LINUX_EFI_CRASH_GUID, NULL,
+ &size, record->buf);
+ if (status != EFI_SUCCESS) {
+ kfree(record->buf);
+ return -EIO;
}
- list_for_each_entry_safe_from((*pos), n, head, list) {
- efi_pstore_scan_sysfs_enter((*pos), n, head);
-
- size = efi_pstore_read_func((*pos), record);
- ret = efi_pstore_scan_sysfs_exit((*pos), n, head, size < 0);
- if (ret)
- return ret;
- if (size)
- break;
+ /*
+ * Store the name of the variable in the pstore_record priv field, so
+ * we can reuse it later if we need to delete the EFI variable from the
+ * variable store.
+ */
+ wlen = (ucs2_strnlen(varname, DUMP_NAME_LEN) + 1) * sizeof(efi_char16_t);
+ record->priv = kmemdup(varname, wlen, GFP_KERNEL);
+ if (!record->priv) {
+ kfree(record->buf);
+ return -ENOMEM;
}
- *pos = n;
+
return size;
}
-/**
- * efi_pstore_read
- *
- * This function returns a size of NVRAM entry logged via efi_pstore_write().
- * The meaning and behavior of efi_pstore/pstore are as below.
- *
- * size > 0: Got data of an entry logged via efi_pstore_write() successfully,
- * and pstore filesystem will continue reading subsequent entries.
- * size == 0: Entry was not logged via efi_pstore_write(),
- * and efi_pstore driver will continue reading subsequent entries.
- * size < 0: Failed to get data of entry logging via efi_pstore_write(),
- * and pstore will stop reading entry.
- */
static ssize_t efi_pstore_read(struct pstore_record *record)
{
- ssize_t size;
+ efi_char16_t *varname = record->psi->data;
+ efi_guid_t guid = LINUX_EFI_CRASH_GUID;
+ unsigned long varname_size;
+ efi_status_t status;
- record->buf = kzalloc(EFIVARS_DATA_SIZE_MAX, GFP_KERNEL);
- if (!record->buf)
- return -ENOMEM;
+ for (;;) {
+ varname_size = EFIVARS_DATA_SIZE_MAX;
- if (efivar_entry_iter_begin()) {
- size = -EINTR;
- goto out;
- }
- size = efi_pstore_sysfs_entry_iter(record);
- efivar_entry_iter_end();
+ /*
+ * If this is the first read() call in the pstore enumeration,
+ * varname will be the empty string, and the GetNextVariable()
+ * runtime service call will return the first EFI variable in
+ * its own enumeration order, ignoring the guid argument.
+ *
+ * Subsequent calls to GetNextVariable() must pass the name and
+ * guid values returned by the previous call, which is why we
+ * store varname in record->psi->data. Given that we only
+ * enumerate variables with the efi-pstore GUID, there is no
+ * need to record the guid return value.
+ */
+ status = efivar_get_next_variable(&varname_size, varname, &guid);
+ if (status == EFI_NOT_FOUND)
+ return 0;
-out:
- if (size <= 0) {
- kfree(record->buf);
- record->buf = NULL;
+ if (status != EFI_SUCCESS)
+ return -EIO;
+
+ /* skip variables that don't concern us */
+ if (efi_guidcmp(guid, LINUX_EFI_CRASH_GUID))
+ continue;
+
+ return efi_pstore_read_func(record, varname);
}
- return size;
}
static int efi_pstore_write(struct pstore_record *record)
{
char name[DUMP_NAME_LEN];
efi_char16_t efi_name[DUMP_NAME_LEN];
- efi_guid_t vendor = LINUX_EFI_CRASH_GUID;
- int i, ret = 0;
+ efi_status_t status;
+ int i;
record->id = generic_id(record->time.tv_sec, record->part,
record->count);
@@ -265,88 +183,26 @@ static int efi_pstore_write(struct pstore_record *record)
for (i = 0; i < DUMP_NAME_LEN; i++)
efi_name[i] = name[i];
- ret = efivar_entry_set_safe(efi_name, vendor, PSTORE_EFI_ATTRIBUTES,
- false, record->size, record->psi->buf);
-
- if (record->reason == KMSG_DUMP_OOPS && try_module_get(THIS_MODULE))
- if (!schedule_work(&efivar_work))
- module_put(THIS_MODULE);
-
- return ret;
+ if (efivar_trylock())
+ return -EBUSY;
+ status = efivar_set_variable_locked(efi_name, &LINUX_EFI_CRASH_GUID,
+ PSTORE_EFI_ATTRIBUTES,
+ record->size, record->psi->buf,
+ true);
+ efivar_unlock();
+ return status == EFI_SUCCESS ? 0 : -EIO;
};
-/*
- * Clean up an entry with the same name
- */
-static int efi_pstore_erase_func(struct efivar_entry *entry, void *data)
-{
- efi_char16_t *efi_name = data;
- efi_guid_t vendor = LINUX_EFI_CRASH_GUID;
- unsigned long ucs2_len = ucs2_strlen(efi_name);
-
- if (efi_guidcmp(entry->var.VendorGuid, vendor))
- return 0;
-
- if (ucs2_strncmp(entry->var.VariableName, efi_name, (size_t)ucs2_len))
- return 0;
-
- if (entry->scanning) {
- /*
- * Skip deletion because this entry will be deleted
- * after scanning is completed.
- */
- entry->deleting = true;
- } else
- list_del(&entry->list);
-
- /* found */
- __efivar_entry_delete(entry);
-
- return 1;
-}
-
-static int efi_pstore_erase_name(const char *name)
-{
- struct efivar_entry *entry = NULL;
- efi_char16_t efi_name[DUMP_NAME_LEN];
- int found, i;
-
- for (i = 0; i < DUMP_NAME_LEN; i++) {
- efi_name[i] = name[i];
- if (name[i] == '\0')
- break;
- }
-
- if (efivar_entry_iter_begin())
- return -EINTR;
-
- found = __efivar_entry_iter(efi_pstore_erase_func, &efi_pstore_list,
- efi_name, &entry);
- efivar_entry_iter_end();
-
- if (found && !entry->scanning)
- kfree(entry);
-
- return found ? 0 : -ENOENT;
-}
-
static int efi_pstore_erase(struct pstore_record *record)
{
- char name[DUMP_NAME_LEN];
- int ret;
-
- snprintf(name, sizeof(name), "dump-type%u-%u-%d-%lld",
- record->type, record->part, record->count,
- (long long)record->time.tv_sec);
- ret = efi_pstore_erase_name(name);
- if (ret != -ENOENT)
- return ret;
+ efi_status_t status;
- snprintf(name, sizeof(name), "dump-type%u-%u-%lld",
- record->type, record->part, (long long)record->time.tv_sec);
- ret = efi_pstore_erase_name(name);
+ status = efivar_set_variable(record->priv, &LINUX_EFI_CRASH_GUID,
+ PSTORE_EFI_ATTRIBUTES, 0, NULL);
- return ret;
+ if (status != EFI_SUCCESS && status != EFI_NOT_FOUND)
+ return -EIO;
+ return 0;
}
static struct pstore_info efi_pstore_info = {
@@ -360,77 +216,14 @@ static struct pstore_info efi_pstore_info = {
.erase = efi_pstore_erase,
};
-static int efi_pstore_callback(efi_char16_t *name, efi_guid_t vendor,
- unsigned long name_size, void *data)
-{
- struct efivar_entry *entry;
- int ret;
-
- entry = kzalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry)
- return -ENOMEM;
-
- memcpy(entry->var.VariableName, name, name_size);
- entry->var.VendorGuid = vendor;
-
- ret = efivar_entry_add(entry, &efi_pstore_list);
- if (ret)
- kfree(entry);
-
- return ret;
-}
-
-static int efi_pstore_update_entry(efi_char16_t *name, efi_guid_t vendor,
- unsigned long name_size, void *data)
-{
- struct efivar_entry *entry = data;
-
- if (efivar_entry_find(name, vendor, &efi_pstore_list, false))
- return 0;
-
- memcpy(entry->var.VariableName, name, name_size);
- memcpy(&(entry->var.VendorGuid), &vendor, sizeof(efi_guid_t));
-
- return 1;
-}
-
-static void efi_pstore_update_entries(struct work_struct *work)
-{
- struct efivar_entry *entry;
- int err;
-
- /* Add new sysfs entries */
- while (1) {
- entry = kzalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry)
- return;
-
- err = efivar_init(efi_pstore_update_entry, entry,
- false, &efi_pstore_list);
- if (!err)
- break;
-
- efivar_entry_add(entry, &efi_pstore_list);
- }
-
- kfree(entry);
- module_put(THIS_MODULE);
-}
-
static __init int efivars_pstore_init(void)
{
- int ret;
-
- if (!efivars_kobject() || !efivar_supports_writes())
+ if (!efivar_supports_writes())
return 0;
if (efivars_pstore_disable)
return 0;
- ret = efivar_init(efi_pstore_callback, NULL, true, &efi_pstore_list);
- if (ret)
- return ret;
-
efi_pstore_info.buf = kmalloc(4096, GFP_KERNEL);
if (!efi_pstore_info.buf)
return -ENOMEM;
@@ -443,8 +236,6 @@ static __init int efivars_pstore_init(void)
efi_pstore_info.bufsize = 0;
}
- INIT_WORK(&efivar_work, efi_pstore_update_entries);
-
return 0;
}
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 860534bcfdac..e4080ad96089 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -202,7 +202,7 @@ static void generic_ops_unregister(void)
}
#ifdef CONFIG_EFI_CUSTOM_SSDT_OVERLAYS
-#define EFIVAR_SSDT_NAME_MAX 16
+#define EFIVAR_SSDT_NAME_MAX 16UL
static char efivar_ssdt[EFIVAR_SSDT_NAME_MAX] __initdata;
static int __init efivar_ssdt_setup(char *str)
{
@@ -219,83 +219,62 @@ static int __init efivar_ssdt_setup(char *str)
}
__setup("efivar_ssdt=", efivar_ssdt_setup);
-static __init int efivar_ssdt_iter(efi_char16_t *name, efi_guid_t vendor,
- unsigned long name_size, void *data)
-{
- struct efivar_entry *entry;
- struct list_head *list = data;
- char utf8_name[EFIVAR_SSDT_NAME_MAX];
- int limit = min_t(unsigned long, EFIVAR_SSDT_NAME_MAX, name_size);
-
- ucs2_as_utf8(utf8_name, name, limit - 1);
- if (strncmp(utf8_name, efivar_ssdt, limit) != 0)
- return 0;
-
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry)
- return 0;
-
- memcpy(entry->var.VariableName, name, name_size);
- memcpy(&entry->var.VendorGuid, &vendor, sizeof(efi_guid_t));
-
- efivar_entry_add(entry, list);
-
- return 0;
-}
-
static __init int efivar_ssdt_load(void)
{
- LIST_HEAD(entries);
- struct efivar_entry *entry, *aux;
- unsigned long size;
- void *data;
- int ret;
+ unsigned long name_size = 256;
+ efi_char16_t *name = NULL;
+ efi_status_t status;
+ efi_guid_t guid;
if (!efivar_ssdt[0])
return 0;
- ret = efivar_init(efivar_ssdt_iter, &entries, true, &entries);
-
- list_for_each_entry_safe(entry, aux, &entries, list) {
- pr_info("loading SSDT from variable %s-%pUl\n", efivar_ssdt,
- &entry->var.VendorGuid);
+ name = kzalloc(name_size, GFP_KERNEL);
+ if (!name)
+ return -ENOMEM;
- list_del(&entry->list);
+ for (;;) {
+ char utf8_name[EFIVAR_SSDT_NAME_MAX];
+ unsigned long data_size = 0;
+ void *data;
+ int limit;
- ret = efivar_entry_size(entry, &size);
- if (ret) {
- pr_err("failed to get var size\n");
- goto free_entry;
+ status = efi.get_next_variable(&name_size, name, &guid);
+ if (status == EFI_NOT_FOUND) {
+ break;
+ } else if (status == EFI_BUFFER_TOO_SMALL) {
+ name = krealloc(name, name_size, GFP_KERNEL);
+ if (!name)
+ return -ENOMEM;
+ continue;
}
- data = kmalloc(size, GFP_KERNEL);
- if (!data) {
- ret = -ENOMEM;
- goto free_entry;
- }
+ limit = min(EFIVAR_SSDT_NAME_MAX, name_size);
+ ucs2_as_utf8(utf8_name, name, limit - 1);
+ if (strncmp(utf8_name, efivar_ssdt, limit) != 0)
+ continue;
- ret = efivar_entry_get(entry, NULL, &size, data);
- if (ret) {
- pr_err("failed to get var data\n");
- goto free_data;
- }
+ pr_info("loading SSDT from variable %s-%pUl\n", efivar_ssdt, &guid);
- ret = acpi_load_table(data, NULL);
- if (ret) {
- pr_err("failed to load table: %d\n", ret);
- goto free_data;
- }
+ status = efi.get_variable(name, &guid, NULL, &data_size, NULL);
+ if (status != EFI_BUFFER_TOO_SMALL || !data_size)
+ return -EIO;
- goto free_entry;
+ data = kmalloc(data_size, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
-free_data:
+ status = efi.get_variable(name, &guid, NULL, &data_size, data);
+ if (status == EFI_SUCCESS) {
+ acpi_status ret = acpi_load_table(data, NULL);
+ if (ret)
+ pr_err("failed to load table: %u\n", ret);
+ } else {
+ pr_err("failed to get var data: 0x%lx\n", status);
+ }
kfree(data);
-
-free_entry:
- kfree(entry);
}
-
- return ret;
+ return 0;
}
#else
static inline int efivar_ssdt_load(void) { return 0; }
@@ -446,6 +425,29 @@ err_put:
subsys_initcall(efisubsys_init);
+void __init efi_find_mirror(void)
+{
+ efi_memory_desc_t *md;
+ u64 mirror_size = 0, total_size = 0;
+
+ if (!efi_enabled(EFI_MEMMAP))
+ return;
+
+ for_each_efi_memory_desc(md) {
+ unsigned long long start = md->phys_addr;
+ unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
+
+ total_size += size;
+ if (md->attribute & EFI_MEMORY_MORE_RELIABLE) {
+ memblock_mark_mirror(start, size);
+ mirror_size += size;
+ }
+ }
+ if (mirror_size)
+ pr_info("Memory: %lldM/%lldM mirrored memory\n",
+ mirror_size>>20, total_size>>20);
+}
+
/*
* Find the efi memory descriptor for a given physical address. Given a
* physical address, determine if it exists within an EFI Memory Map entry,
@@ -897,6 +899,7 @@ int efi_status_to_err(efi_status_t status)
return err;
}
+EXPORT_SYMBOL_GPL(efi_status_to_err);
static DEFINE_SPINLOCK(efi_mem_reserve_persistent_lock);
static struct linux_efi_memreserve *efi_memreserve_root __ro_after_init;
diff --git a/drivers/firmware/efi/efibc.c b/drivers/firmware/efi/efibc.c
index 15a47539dc56..8ced7af8e56d 100644
--- a/drivers/firmware/efi/efibc.c
+++ b/drivers/firmware/efi/efibc.c
@@ -10,69 +10,51 @@
#include <linux/module.h>
#include <linux/reboot.h>
#include <linux/slab.h>
+#include <linux/ucs2_string.h>
-static void efibc_str_to_str16(const char *str, efi_char16_t *str16)
-{
- size_t i;
-
- for (i = 0; i < strlen(str); i++)
- str16[i] = str[i];
-
- str16[i] = '\0';
-}
+#define MAX_DATA_LEN 512
-static int efibc_set_variable(const char *name, const char *value)
+static int efibc_set_variable(efi_char16_t *name, efi_char16_t *value,
+ unsigned long len)
{
- int ret;
- efi_guid_t guid = LINUX_EFI_LOADER_ENTRY_GUID;
- struct efivar_entry *entry;
- size_t size = (strlen(value) + 1) * sizeof(efi_char16_t);
+ efi_status_t status;
- if (size > sizeof(entry->var.Data)) {
- pr_err("value is too large (%zu bytes) for '%s' EFI variable\n", size, name);
- return -EINVAL;
- }
+ status = efi.set_variable(name, &LINUX_EFI_LOADER_ENTRY_GUID,
+ EFI_VARIABLE_NON_VOLATILE
+ | EFI_VARIABLE_BOOTSERVICE_ACCESS
+ | EFI_VARIABLE_RUNTIME_ACCESS,
+ len * sizeof(efi_char16_t), value);
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry) {
- pr_err("failed to allocate efivar entry for '%s' EFI variable\n", name);
- return -ENOMEM;
+ if (status != EFI_SUCCESS) {
+ pr_err("failed to set EFI variable: 0x%lx\n", status);
+ return -EIO;
}
-
- efibc_str_to_str16(name, entry->var.VariableName);
- efibc_str_to_str16(value, (efi_char16_t *)entry->var.Data);
- memcpy(&entry->var.VendorGuid, &guid, sizeof(guid));
-
- ret = efivar_entry_set_safe(entry->var.VariableName,
- entry->var.VendorGuid,
- EFI_VARIABLE_NON_VOLATILE
- | EFI_VARIABLE_BOOTSERVICE_ACCESS
- | EFI_VARIABLE_RUNTIME_ACCESS,
- false, size, entry->var.Data);
-
- if (ret)
- pr_err("failed to set %s EFI variable: 0x%x\n",
- name, ret);
-
- kfree(entry);
- return ret;
+ return 0;
}
static int efibc_reboot_notifier_call(struct notifier_block *notifier,
unsigned long event, void *data)
{
- const char *reason = "shutdown";
+ efi_char16_t *reason = event == SYS_RESTART ? L"reboot"
+ : L"shutdown";
+ const u8 *str = data;
+ efi_char16_t *wdata;
+ unsigned long l;
int ret;
- if (event == SYS_RESTART)
- reason = "reboot";
-
- ret = efibc_set_variable("LoaderEntryRebootReason", reason);
+ ret = efibc_set_variable(L"LoaderEntryRebootReason", reason,
+ ucs2_strlen(reason));
if (ret || !data)
return NOTIFY_DONE;
- efibc_set_variable("LoaderEntryOneShot", (char *)data);
+ wdata = kmalloc(MAX_DATA_LEN * sizeof(efi_char16_t), GFP_KERNEL);
+ for (l = 0; l < MAX_DATA_LEN - 1 && str[l] != '\0'; l++)
+ wdata[l] = str[l];
+ wdata[l] = L'\0';
+
+ efibc_set_variable(L"LoaderEntryOneShot", wdata, l);
+ kfree(wdata);
return NOTIFY_DONE;
}
@@ -84,7 +66,7 @@ static int __init efibc_init(void)
{
int ret;
- if (!efivars_kobject() || !efivar_supports_writes())
+ if (!efi_rt_services_supported(EFI_RT_SUPPORTED_SET_VARIABLE))
return -ENODEV;
ret = register_reboot_notifier(&efibc_reboot_notifier);
diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
deleted file mode 100644
index ea0bc39dc965..000000000000
--- a/drivers/firmware/efi/efivars.c
+++ /dev/null
@@ -1,671 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * Originally from efivars.c,
- *
- * Copyright (C) 2001,2003,2004 Dell <Matt_Domsch@dell.com>
- * Copyright (C) 2004 Intel Corporation <matthew.e.tolentino@intel.com>
- *
- * This code takes all variables accessible from EFI runtime and
- * exports them via sysfs
- */
-
-#include <linux/efi.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/ucs2_string.h>
-#include <linux/compat.h>
-
-#define EFIVARS_VERSION "0.08"
-#define EFIVARS_DATE "2004-May-17"
-
-MODULE_AUTHOR("Matt Domsch <Matt_Domsch@Dell.com>");
-MODULE_DESCRIPTION("sysfs interface to EFI Variables");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(EFIVARS_VERSION);
-
-static LIST_HEAD(efivar_sysfs_list);
-
-static struct kset *efivars_kset;
-
-static struct bin_attribute *efivars_new_var;
-static struct bin_attribute *efivars_del_var;
-
-struct compat_efi_variable {
- efi_char16_t VariableName[EFI_VAR_NAME_LEN/sizeof(efi_char16_t)];
- efi_guid_t VendorGuid;
- __u32 DataSize;
- __u8 Data[1024];
- __u32 Status;
- __u32 Attributes;
-} __packed;
-
-struct efivar_attribute {
- struct attribute attr;
- ssize_t (*show) (struct efivar_entry *entry, char *buf);
- ssize_t (*store)(struct efivar_entry *entry, const char *buf, size_t count);
-};
-
-#define EFIVAR_ATTR(_name, _mode, _show, _store) \
-struct efivar_attribute efivar_attr_##_name = { \
- .attr = {.name = __stringify(_name), .mode = _mode}, \
- .show = _show, \
- .store = _store, \
-};
-
-#define to_efivar_attr(_attr) container_of(_attr, struct efivar_attribute, attr)
-#define to_efivar_entry(obj) container_of(obj, struct efivar_entry, kobj)
-
-/*
- * Prototype for sysfs creation function
- */
-static int
-efivar_create_sysfs_entry(struct efivar_entry *new_var);
-
-static ssize_t
-efivar_guid_read(struct efivar_entry *entry, char *buf)
-{
- struct efi_variable *var = &entry->var;
- char *str = buf;
-
- if (!entry || !buf)
- return 0;
-
- efi_guid_to_str(&var->VendorGuid, str);
- str += strlen(str);
- str += sprintf(str, "\n");
-
- return str - buf;
-}
-
-static ssize_t
-efivar_attr_read(struct efivar_entry *entry, char *buf)
-{
- struct efi_variable *var = &entry->var;
- unsigned long size = sizeof(var->Data);
- char *str = buf;
- int ret;
-
- if (!entry || !buf)
- return -EINVAL;
-
- ret = efivar_entry_get(entry, &var->Attributes, &size, var->Data);
- var->DataSize = size;
- if (ret)
- return -EIO;
-
- if (var->Attributes & EFI_VARIABLE_NON_VOLATILE)
- str += sprintf(str, "EFI_VARIABLE_NON_VOLATILE\n");
- if (var->Attributes & EFI_VARIABLE_BOOTSERVICE_ACCESS)
- str += sprintf(str, "EFI_VARIABLE_BOOTSERVICE_ACCESS\n");
- if (var->Attributes & EFI_VARIABLE_RUNTIME_ACCESS)
- str += sprintf(str, "EFI_VARIABLE_RUNTIME_ACCESS\n");
- if (var->Attributes & EFI_VARIABLE_HARDWARE_ERROR_RECORD)
- str += sprintf(str, "EFI_VARIABLE_HARDWARE_ERROR_RECORD\n");
- if (var->Attributes & EFI_VARIABLE_AUTHENTICATED_WRITE_ACCESS)
- str += sprintf(str,
- "EFI_VARIABLE_AUTHENTICATED_WRITE_ACCESS\n");
- if (var->Attributes &
- EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS)
- str += sprintf(str,
- "EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS\n");
- if (var->Attributes & EFI_VARIABLE_APPEND_WRITE)
- str += sprintf(str, "EFI_VARIABLE_APPEND_WRITE\n");
- return str - buf;
-}
-
-static ssize_t
-efivar_size_read(struct efivar_entry *entry, char *buf)
-{
- struct efi_variable *var = &entry->var;
- unsigned long size = sizeof(var->Data);
- char *str = buf;
- int ret;
-
- if (!entry || !buf)
- return -EINVAL;
-
- ret = efivar_entry_get(entry, &var->Attributes, &size, var->Data);
- var->DataSize = size;
- if (ret)
- return -EIO;
-
- str += sprintf(str, "0x%lx\n", var->DataSize);
- return str - buf;
-}
-
-static ssize_t
-efivar_data_read(struct efivar_entry *entry, char *buf)
-{
- struct efi_variable *var = &entry->var;
- unsigned long size = sizeof(var->Data);
- int ret;
-
- if (!entry || !buf)
- return -EINVAL;
-
- ret = efivar_entry_get(entry, &var->Attributes, &size, var->Data);
- var->DataSize = size;
- if (ret)
- return -EIO;
-
- memcpy(buf, var->Data, var->DataSize);
- return var->DataSize;
-}
-
-static inline int
-sanity_check(struct efi_variable *var, efi_char16_t *name, efi_guid_t vendor,
- unsigned long size, u32 attributes, u8 *data)
-{
- /*
- * If only updating the variable data, then the name
- * and guid should remain the same
- */
- if (memcmp(name, var->VariableName, sizeof(var->VariableName)) ||
- efi_guidcmp(vendor, var->VendorGuid)) {
- printk(KERN_ERR "efivars: Cannot edit the wrong variable!\n");
- return -EINVAL;
- }
-
- if ((size <= 0) || (attributes == 0)){
- printk(KERN_ERR "efivars: DataSize & Attributes must be valid!\n");
- return -EINVAL;
- }
-
- if ((attributes & ~EFI_VARIABLE_MASK) != 0 ||
- efivar_validate(vendor, name, data, size) == false) {
- printk(KERN_ERR "efivars: Malformed variable content\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static void
-copy_out_compat(struct efi_variable *dst, struct compat_efi_variable *src)
-{
- memcpy(dst->VariableName, src->VariableName, EFI_VAR_NAME_LEN);
- memcpy(dst->Data, src->Data, sizeof(src->Data));
-
- dst->VendorGuid = src->VendorGuid;
- dst->DataSize = src->DataSize;
- dst->Attributes = src->Attributes;
-}
-
-/*
- * We allow each variable to be edited via rewriting the
- * entire efi variable structure.
- */
-static ssize_t
-efivar_store_raw(struct efivar_entry *entry, const char *buf, size_t count)
-{
- struct efi_variable *new_var, *var = &entry->var;
- efi_char16_t *name;
- unsigned long size;
- efi_guid_t vendor;
- u32 attributes;
- u8 *data;
- int err;
-
- if (!entry || !buf)
- return -EINVAL;
-
- if (in_compat_syscall()) {
- struct compat_efi_variable *compat;
-
- if (count != sizeof(*compat))
- return -EINVAL;
-
- compat = (struct compat_efi_variable *)buf;
- attributes = compat->Attributes;
- vendor = compat->VendorGuid;
- name = compat->VariableName;
- size = compat->DataSize;
- data = compat->Data;
-
- err = sanity_check(var, name, vendor, size, attributes, data);
- if (err)
- return err;
-
- copy_out_compat(&entry->var, compat);
- } else {
- if (count != sizeof(struct efi_variable))
- return -EINVAL;
-
- new_var = (struct efi_variable *)buf;
-
- attributes = new_var->Attributes;
- vendor = new_var->VendorGuid;
- name = new_var->VariableName;
- size = new_var->DataSize;
- data = new_var->Data;
-
- err = sanity_check(var, name, vendor, size, attributes, data);
- if (err)
- return err;
-
- memcpy(&entry->var, new_var, count);
- }
-
- err = efivar_entry_set(entry, attributes, size, data, NULL);
- if (err) {
- printk(KERN_WARNING "efivars: set_variable() failed: status=%d\n", err);
- return -EIO;
- }
-
- return count;
-}
-
-static ssize_t
-efivar_show_raw(struct efivar_entry *entry, char *buf)
-{
- struct efi_variable *var = &entry->var;
- struct compat_efi_variable *compat;
- unsigned long datasize = sizeof(var->Data);
- size_t size;
- int ret;
-
- if (!entry || !buf)
- return 0;
-
- ret = efivar_entry_get(entry, &var->Attributes, &datasize, var->Data);
- var->DataSize = datasize;
- if (ret)
- return -EIO;
-
- if (in_compat_syscall()) {
- compat = (struct compat_efi_variable *)buf;
-
- size = sizeof(*compat);
- memcpy(compat->VariableName, var->VariableName,
- EFI_VAR_NAME_LEN);
- memcpy(compat->Data, var->Data, sizeof(compat->Data));
-
- compat->VendorGuid = var->VendorGuid;
- compat->DataSize = var->DataSize;
- compat->Attributes = var->Attributes;
- } else {
- size = sizeof(*var);
- memcpy(buf, var, size);
- }
-
- return size;
-}
-
-/*
- * Generic read/write functions that call the specific functions of
- * the attributes...
- */
-static ssize_t efivar_attr_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct efivar_entry *var = to_efivar_entry(kobj);
- struct efivar_attribute *efivar_attr = to_efivar_attr(attr);
- ssize_t ret = -EIO;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
-
- if (efivar_attr->show) {
- ret = efivar_attr->show(var, buf);
- }
- return ret;
-}
-
-static ssize_t efivar_attr_store(struct kobject *kobj, struct attribute *attr,
- const char *buf, size_t count)
-{
- struct efivar_entry *var = to_efivar_entry(kobj);
- struct efivar_attribute *efivar_attr = to_efivar_attr(attr);
- ssize_t ret = -EIO;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
-
- if (efivar_attr->store)
- ret = efivar_attr->store(var, buf, count);
-
- return ret;
-}
-
-static const struct sysfs_ops efivar_attr_ops = {
- .show = efivar_attr_show,
- .store = efivar_attr_store,
-};
-
-static void efivar_release(struct kobject *kobj)
-{
- struct efivar_entry *var = to_efivar_entry(kobj);
- kfree(var);
-}
-
-static EFIVAR_ATTR(guid, 0400, efivar_guid_read, NULL);
-static EFIVAR_ATTR(attributes, 0400, efivar_attr_read, NULL);
-static EFIVAR_ATTR(size, 0400, efivar_size_read, NULL);
-static EFIVAR_ATTR(data, 0400, efivar_data_read, NULL);
-static EFIVAR_ATTR(raw_var, 0600, efivar_show_raw, efivar_store_raw);
-
-static struct attribute *def_attrs[] = {
- &efivar_attr_guid.attr,
- &efivar_attr_size.attr,
- &efivar_attr_attributes.attr,
- &efivar_attr_data.attr,
- &efivar_attr_raw_var.attr,
- NULL,
-};
-ATTRIBUTE_GROUPS(def);
-
-static struct kobj_type efivar_ktype = {
- .release = efivar_release,
- .sysfs_ops = &efivar_attr_ops,
- .default_groups = def_groups,
-};
-
-static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t pos, size_t count)
-{
- struct compat_efi_variable *compat = (struct compat_efi_variable *)buf;
- struct efi_variable *new_var = (struct efi_variable *)buf;
- struct efivar_entry *new_entry;
- bool need_compat = in_compat_syscall();
- efi_char16_t *name;
- unsigned long size;
- u32 attributes;
- u8 *data;
- int err;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
-
- if (need_compat) {
- if (count != sizeof(*compat))
- return -EINVAL;
-
- attributes = compat->Attributes;
- name = compat->VariableName;
- size = compat->DataSize;
- data = compat->Data;
- } else {
- if (count != sizeof(*new_var))
- return -EINVAL;
-
- attributes = new_var->Attributes;
- name = new_var->VariableName;
- size = new_var->DataSize;
- data = new_var->Data;
- }
-
- if ((attributes & ~EFI_VARIABLE_MASK) != 0 ||
- efivar_validate(new_var->VendorGuid, name, data,
- size) == false) {
- printk(KERN_ERR "efivars: Malformed variable content\n");
- return -EINVAL;
- }
-
- new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
- if (!new_entry)
- return -ENOMEM;
-
- if (need_compat)
- copy_out_compat(&new_entry->var, compat);
- else
- memcpy(&new_entry->var, new_var, sizeof(*new_var));
-
- err = efivar_entry_set(new_entry, attributes, size,
- data, &efivar_sysfs_list);
- if (err) {
- if (err == -EEXIST)
- err = -EINVAL;
- goto out;
- }
-
- if (efivar_create_sysfs_entry(new_entry)) {
- printk(KERN_WARNING "efivars: failed to create sysfs entry.\n");
- kfree(new_entry);
- }
- return count;
-
-out:
- kfree(new_entry);
- return err;
-}
-
-static ssize_t efivar_delete(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t pos, size_t count)
-{
- struct efi_variable *del_var = (struct efi_variable *)buf;
- struct compat_efi_variable *compat;
- struct efivar_entry *entry;
- efi_char16_t *name;
- efi_guid_t vendor;
- int err = 0;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
-
- if (in_compat_syscall()) {
- if (count != sizeof(*compat))
- return -EINVAL;
-
- compat = (struct compat_efi_variable *)buf;
- name = compat->VariableName;
- vendor = compat->VendorGuid;
- } else {
- if (count != sizeof(*del_var))
- return -EINVAL;
-
- name = del_var->VariableName;
- vendor = del_var->VendorGuid;
- }
-
- if (efivar_entry_iter_begin())
- return -EINTR;
- entry = efivar_entry_find(name, vendor, &efivar_sysfs_list, true);
- if (!entry)
- err = -EINVAL;
- else if (__efivar_entry_delete(entry))
- err = -EIO;
-
- if (err) {
- efivar_entry_iter_end();
- return err;
- }
-
- if (!entry->scanning) {
- efivar_entry_iter_end();
- efivar_unregister(entry);
- } else
- efivar_entry_iter_end();
-
- /* It's dead Jim.... */
- return count;
-}
-
-/**
- * efivar_create_sysfs_entry - create a new entry in sysfs
- * @new_var: efivar entry to create
- *
- * Returns 0 on success, negative error code on failure
- */
-static int
-efivar_create_sysfs_entry(struct efivar_entry *new_var)
-{
- int short_name_size;
- char *short_name;
- unsigned long utf8_name_size;
- efi_char16_t *variable_name = new_var->var.VariableName;
- int ret;
-
- /*
- * Length of the variable bytes in UTF8, plus the '-' separator,
- * plus the GUID, plus trailing NUL
- */
- utf8_name_size = ucs2_utf8size(variable_name);
- short_name_size = utf8_name_size + 1 + EFI_VARIABLE_GUID_LEN + 1;
-
- short_name = kmalloc(short_name_size, GFP_KERNEL);
- if (!short_name)
- return -ENOMEM;
-
- ucs2_as_utf8(short_name, variable_name, short_name_size);
-
- /* This is ugly, but necessary to separate one vendor's
- private variables from another's. */
- short_name[utf8_name_size] = '-';
- efi_guid_to_str(&new_var->var.VendorGuid,
- short_name + utf8_name_size + 1);
-
- new_var->kobj.kset = efivars_kset;
-
- ret = kobject_init_and_add(&new_var->kobj, &efivar_ktype,
- NULL, "%s", short_name);
- kfree(short_name);
- if (ret) {
- kobject_put(&new_var->kobj);
- return ret;
- }
-
- kobject_uevent(&new_var->kobj, KOBJ_ADD);
- if (efivar_entry_add(new_var, &efivar_sysfs_list)) {
- efivar_unregister(new_var);
- return -EINTR;
- }
-
- return 0;
-}
-
-static int
-create_efivars_bin_attributes(void)
-{
- struct bin_attribute *attr;
- int error;
-
- /* new_var */
- attr = kzalloc(sizeof(*attr), GFP_KERNEL);
- if (!attr)
- return -ENOMEM;
-
- attr->attr.name = "new_var";
- attr->attr.mode = 0200;
- attr->write = efivar_create;
- efivars_new_var = attr;
-
- /* del_var */
- attr = kzalloc(sizeof(*attr), GFP_KERNEL);
- if (!attr) {
- error = -ENOMEM;
- goto out_free;
- }
- attr->attr.name = "del_var";
- attr->attr.mode = 0200;
- attr->write = efivar_delete;
- efivars_del_var = attr;
-
- sysfs_bin_attr_init(efivars_new_var);
- sysfs_bin_attr_init(efivars_del_var);
-
- /* Register */
- error = sysfs_create_bin_file(&efivars_kset->kobj, efivars_new_var);
- if (error) {
- printk(KERN_ERR "efivars: unable to create new_var sysfs file"
- " due to error %d\n", error);
- goto out_free;
- }
-
- error = sysfs_create_bin_file(&efivars_kset->kobj, efivars_del_var);
- if (error) {
- printk(KERN_ERR "efivars: unable to create del_var sysfs file"
- " due to error %d\n", error);
- sysfs_remove_bin_file(&efivars_kset->kobj, efivars_new_var);
- goto out_free;
- }
-
- return 0;
-out_free:
- kfree(efivars_del_var);
- efivars_del_var = NULL;
- kfree(efivars_new_var);
- efivars_new_var = NULL;
- return error;
-}
-
-static int efivars_sysfs_callback(efi_char16_t *name, efi_guid_t vendor,
- unsigned long name_size, void *data)
-{
- struct efivar_entry *entry;
-
- entry = kzalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry)
- return -ENOMEM;
-
- memcpy(entry->var.VariableName, name, name_size);
- memcpy(&(entry->var.VendorGuid), &vendor, sizeof(efi_guid_t));
-
- efivar_create_sysfs_entry(entry);
-
- return 0;
-}
-
-static int efivar_sysfs_destroy(struct efivar_entry *entry, void *data)
-{
- int err = efivar_entry_remove(entry);
-
- if (err)
- return err;
- efivar_unregister(entry);
- return 0;
-}
-
-static void efivars_sysfs_exit(void)
-{
- /* Remove all entries and destroy */
- int err;
-
- err = __efivar_entry_iter(efivar_sysfs_destroy, &efivar_sysfs_list,
- NULL, NULL);
- if (err) {
- pr_err("efivars: Failed to destroy sysfs entries\n");
- return;
- }
-
- if (efivars_new_var)
- sysfs_remove_bin_file(&efivars_kset->kobj, efivars_new_var);
- if (efivars_del_var)
- sysfs_remove_bin_file(&efivars_kset->kobj, efivars_del_var);
- kfree(efivars_new_var);
- kfree(efivars_del_var);
- kset_unregister(efivars_kset);
-}
-
-static int efivars_sysfs_init(void)
-{
- struct kobject *parent_kobj = efivars_kobject();
- int error = 0;
-
- /* No efivars has been registered yet */
- if (!parent_kobj || !efivar_supports_writes())
- return 0;
-
- printk(KERN_INFO "EFI Variables Facility v%s %s\n", EFIVARS_VERSION,
- EFIVARS_DATE);
-
- efivars_kset = kset_create_and_add("vars", NULL, parent_kobj);
- if (!efivars_kset) {
- printk(KERN_ERR "efivars: Subsystem registration failed.\n");
- return -ENOMEM;
- }
-
- efivar_init(efivars_sysfs_callback, NULL, true, &efivar_sysfs_list);
-
- error = create_efivars_bin_attributes();
- if (error) {
- efivars_sysfs_exit();
- return error;
- }
-
- return 0;
-}
-
-module_init(efivars_sysfs_init);
-module_exit(efivars_sysfs_exit);
diff --git a/drivers/firmware/efi/libstub/riscv-stub.c b/drivers/firmware/efi/libstub/riscv-stub.c
index 9e85e58d1f27..b450ebf95977 100644
--- a/drivers/firmware/efi/libstub/riscv-stub.c
+++ b/drivers/firmware/efi/libstub/riscv-stub.c
@@ -8,6 +8,7 @@
#include <asm/efi.h>
#include <asm/sections.h>
+#include <asm/unaligned.h>
#include "efistub.h"
@@ -29,7 +30,7 @@ static int get_boot_hartid_from_fdt(void)
{
const void *fdt;
int chosen_node, len;
- const fdt32_t *prop;
+ const void *prop;
fdt = get_efi_config_table(DEVICE_TREE_GUID);
if (!fdt)
@@ -40,10 +41,16 @@ static int get_boot_hartid_from_fdt(void)
return -EINVAL;
prop = fdt_getprop((void *)fdt, chosen_node, "boot-hartid", &len);
- if (!prop || len != sizeof(u32))
+ if (!prop)
+ return -EINVAL;
+
+ if (len == sizeof(u32))
+ hartid = (unsigned long) fdt32_to_cpu(*(fdt32_t *)prop);
+ else if (len == sizeof(u64))
+ hartid = (unsigned long) fdt64_to_cpu(__get_unaligned_t(fdt64_t, prop));
+ else
return -EINVAL;
- hartid = fdt32_to_cpu(*prop);
return 0;
}
diff --git a/drivers/firmware/efi/memmap.c b/drivers/firmware/efi/memmap.c
index 4df55a55da84..6ec7970dbd40 100644
--- a/drivers/firmware/efi/memmap.c
+++ b/drivers/firmware/efi/memmap.c
@@ -59,8 +59,7 @@ static void __init efi_memmap_free(void)
* Depending on whether mm_init() has already been invoked or not,
* either memblock or "normal" page allocation is used.
*
- * Returns the physical address of the allocated memory map on
- * success, zero on failure.
+ * Returns zero on success, a negative error code on failure.
*/
int __init efi_memmap_alloc(unsigned int num_entries,
struct efi_memory_map_data *data)
@@ -245,7 +244,7 @@ int __init efi_memmap_install(struct efi_memory_map_data *data)
* @range: Address range (start, end) to split around
*
* Returns the number of additional EFI memmap entries required to
- * accomodate @range.
+ * accommodate @range.
*/
int __init efi_memmap_split_count(efi_memory_desc_t *md, struct range *range)
{
diff --git a/drivers/firmware/efi/reboot.c b/drivers/firmware/efi/reboot.c
index 73089a24f04b..ceae84c19d22 100644
--- a/drivers/firmware/efi/reboot.c
+++ b/drivers/firmware/efi/reboot.c
@@ -6,7 +6,7 @@
#include <linux/efi.h>
#include <linux/reboot.h>
-static void (*orig_pm_power_off)(void);
+static struct sys_off_handler *efi_sys_off_handler;
int efi_reboot_quirk_mode = -1;
@@ -51,15 +51,11 @@ bool __weak efi_poweroff_required(void)
return false;
}
-static void efi_power_off(void)
+static int efi_power_off(struct sys_off_data *data)
{
efi.reset_system(EFI_RESET_SHUTDOWN, EFI_SUCCESS, 0, NULL);
- /*
- * The above call should not return, if it does fall back to
- * the original power off method (typically ACPI poweroff).
- */
- if (orig_pm_power_off)
- orig_pm_power_off();
+
+ return NOTIFY_DONE;
}
static int __init efi_shutdown_init(void)
@@ -68,8 +64,13 @@ static int __init efi_shutdown_init(void)
return -ENODEV;
if (efi_poweroff_required()) {
- orig_pm_power_off = pm_power_off;
- pm_power_off = efi_power_off;
+ /* SYS_OFF_PRIO_FIRMWARE + 1 so that it runs before acpi_power_off */
+ efi_sys_off_handler =
+ register_sys_off_handler(SYS_OFF_MODE_POWER_OFF,
+ SYS_OFF_PRIO_FIRMWARE + 1,
+ efi_power_off, NULL);
+ if (IS_ERR(efi_sys_off_handler))
+ return PTR_ERR(efi_sys_off_handler);
}
return 0;
diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
index cae590bd08f2..dd74d2ad3184 100644
--- a/drivers/firmware/efi/vars.c
+++ b/drivers/firmware/efi/vars.c
@@ -6,306 +6,24 @@
* Copyright (C) 2004 Intel Corporation <matthew.e.tolentino@intel.com>
*/
-#include <linux/capability.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/init.h>
-#include <linux/mm.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/smp.h>
#include <linux/efi.h>
-#include <linux/sysfs.h>
-#include <linux/device.h>
-#include <linux/slab.h>
-#include <linux/ctype.h>
#include <linux/ucs2_string.h>
/* Private pointer to registered efivars */
static struct efivars *__efivars;
-/*
- * efivars_lock protects three things:
- * 1) efivarfs_list and efivars_sysfs_list
- * 2) ->ops calls
- * 3) (un)registration of __efivars
- */
static DEFINE_SEMAPHORE(efivars_lock);
-static bool
-validate_device_path(efi_char16_t *var_name, int match, u8 *buffer,
- unsigned long len)
-{
- struct efi_generic_dev_path *node;
- int offset = 0;
-
- node = (struct efi_generic_dev_path *)buffer;
-
- if (len < sizeof(*node))
- return false;
-
- while (offset <= len - sizeof(*node) &&
- node->length >= sizeof(*node) &&
- node->length <= len - offset) {
- offset += node->length;
-
- if ((node->type == EFI_DEV_END_PATH ||
- node->type == EFI_DEV_END_PATH2) &&
- node->sub_type == EFI_DEV_END_ENTIRE)
- return true;
-
- node = (struct efi_generic_dev_path *)(buffer + offset);
- }
-
- /*
- * If we're here then either node->length pointed past the end
- * of the buffer or we reached the end of the buffer without
- * finding a device path end node.
- */
- return false;
-}
-
-static bool
-validate_boot_order(efi_char16_t *var_name, int match, u8 *buffer,
- unsigned long len)
-{
- /* An array of 16-bit integers */
- if ((len % 2) != 0)
- return false;
-
- return true;
-}
-
-static bool
-validate_load_option(efi_char16_t *var_name, int match, u8 *buffer,
- unsigned long len)
-{
- u16 filepathlength;
- int i, desclength = 0, namelen;
-
- namelen = ucs2_strnlen(var_name, EFI_VAR_NAME_LEN);
-
- /* Either "Boot" or "Driver" followed by four digits of hex */
- for (i = match; i < match+4; i++) {
- if (var_name[i] > 127 ||
- hex_to_bin(var_name[i] & 0xff) < 0)
- return true;
- }
-
- /* Reject it if there's 4 digits of hex and then further content */
- if (namelen > match + 4)
- return false;
-
- /* A valid entry must be at least 8 bytes */
- if (len < 8)
- return false;
-
- filepathlength = buffer[4] | buffer[5] << 8;
-
- /*
- * There's no stored length for the description, so it has to be
- * found by hand
- */
- desclength = ucs2_strsize((efi_char16_t *)(buffer + 6), len - 6) + 2;
-
- /* Each boot entry must have a descriptor */
- if (!desclength)
- return false;
-
- /*
- * If the sum of the length of the description, the claimed filepath
- * length and the original header are greater than the length of the
- * variable, it's malformed
- */
- if ((desclength + filepathlength + 6) > len)
- return false;
-
- /*
- * And, finally, check the filepath
- */
- return validate_device_path(var_name, match, buffer + desclength + 6,
- filepathlength);
-}
-
-static bool
-validate_uint16(efi_char16_t *var_name, int match, u8 *buffer,
- unsigned long len)
-{
- /* A single 16-bit integer */
- if (len != 2)
- return false;
-
- return true;
-}
-
-static bool
-validate_ascii_string(efi_char16_t *var_name, int match, u8 *buffer,
- unsigned long len)
-{
- int i;
-
- for (i = 0; i < len; i++) {
- if (buffer[i] > 127)
- return false;
-
- if (buffer[i] == 0)
- return true;
- }
-
- return false;
-}
-
-struct variable_validate {
- efi_guid_t vendor;
- char *name;
- bool (*validate)(efi_char16_t *var_name, int match, u8 *data,
- unsigned long len);
-};
-
-/*
- * This is the list of variables we need to validate, as well as the
- * whitelist for what we think is safe not to default to immutable.
- *
- * If it has a validate() method that's not NULL, it'll go into the
- * validation routine. If not, it is assumed valid, but still used for
- * whitelisting.
- *
- * Note that it's sorted by {vendor,name}, but globbed names must come after
- * any other name with the same prefix.
- */
-static const struct variable_validate variable_validate[] = {
- { EFI_GLOBAL_VARIABLE_GUID, "BootNext", validate_uint16 },
- { EFI_GLOBAL_VARIABLE_GUID, "BootOrder", validate_boot_order },
- { EFI_GLOBAL_VARIABLE_GUID, "Boot*", validate_load_option },
- { EFI_GLOBAL_VARIABLE_GUID, "DriverOrder", validate_boot_order },
- { EFI_GLOBAL_VARIABLE_GUID, "Driver*", validate_load_option },
- { EFI_GLOBAL_VARIABLE_GUID, "ConIn", validate_device_path },
- { EFI_GLOBAL_VARIABLE_GUID, "ConInDev", validate_device_path },
- { EFI_GLOBAL_VARIABLE_GUID, "ConOut", validate_device_path },
- { EFI_GLOBAL_VARIABLE_GUID, "ConOutDev", validate_device_path },
- { EFI_GLOBAL_VARIABLE_GUID, "ErrOut", validate_device_path },
- { EFI_GLOBAL_VARIABLE_GUID, "ErrOutDev", validate_device_path },
- { EFI_GLOBAL_VARIABLE_GUID, "Lang", validate_ascii_string },
- { EFI_GLOBAL_VARIABLE_GUID, "OsIndications", NULL },
- { EFI_GLOBAL_VARIABLE_GUID, "PlatformLang", validate_ascii_string },
- { EFI_GLOBAL_VARIABLE_GUID, "Timeout", validate_uint16 },
- { LINUX_EFI_CRASH_GUID, "*", NULL },
- { NULL_GUID, "", NULL },
-};
-
-/*
- * Check if @var_name matches the pattern given in @match_name.
- *
- * @var_name: an array of @len non-NUL characters.
- * @match_name: a NUL-terminated pattern string, optionally ending in "*". A
- * final "*" character matches any trailing characters @var_name,
- * including the case when there are none left in @var_name.
- * @match: on output, the number of non-wildcard characters in @match_name
- * that @var_name matches, regardless of the return value.
- * @return: whether @var_name fully matches @match_name.
- */
-static bool
-variable_matches(const char *var_name, size_t len, const char *match_name,
- int *match)
-{
- for (*match = 0; ; (*match)++) {
- char c = match_name[*match];
-
- switch (c) {
- case '*':
- /* Wildcard in @match_name means we've matched. */
- return true;
-
- case '\0':
- /* @match_name has ended. Has @var_name too? */
- return (*match == len);
-
- default:
- /*
- * We've reached a non-wildcard char in @match_name.
- * Continue only if there's an identical character in
- * @var_name.
- */
- if (*match < len && c == var_name[*match])
- continue;
- return false;
- }
- }
-}
-
-bool
-efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data,
- unsigned long data_size)
-{
- int i;
- unsigned long utf8_size;
- u8 *utf8_name;
-
- utf8_size = ucs2_utf8size(var_name);
- utf8_name = kmalloc(utf8_size + 1, GFP_KERNEL);
- if (!utf8_name)
- return false;
-
- ucs2_as_utf8(utf8_name, var_name, utf8_size);
- utf8_name[utf8_size] = '\0';
-
- for (i = 0; variable_validate[i].name[0] != '\0'; i++) {
- const char *name = variable_validate[i].name;
- int match = 0;
-
- if (efi_guidcmp(vendor, variable_validate[i].vendor))
- continue;
-
- if (variable_matches(utf8_name, utf8_size+1, name, &match)) {
- if (variable_validate[i].validate == NULL)
- break;
- kfree(utf8_name);
- return variable_validate[i].validate(var_name, match,
- data, data_size);
- }
- }
- kfree(utf8_name);
- return true;
-}
-EXPORT_SYMBOL_GPL(efivar_validate);
-
-bool
-efivar_variable_is_removable(efi_guid_t vendor, const char *var_name,
- size_t len)
-{
- int i;
- bool found = false;
- int match = 0;
-
- /*
- * Check if our variable is in the validated variables list
- */
- for (i = 0; variable_validate[i].name[0] != '\0'; i++) {
- if (efi_guidcmp(variable_validate[i].vendor, vendor))
- continue;
-
- if (variable_matches(var_name, len,
- variable_validate[i].name, &match)) {
- found = true;
- break;
- }
- }
-
- /*
- * If it's in our list, it is removable.
- */
- return found;
-}
-EXPORT_SYMBOL_GPL(efivar_variable_is_removable);
-
-static efi_status_t
-check_var_size(u32 attributes, unsigned long size)
+efi_status_t check_var_size(u32 attributes, unsigned long size)
{
const struct efivar_operations *fops;
- if (!__efivars)
- return EFI_UNSUPPORTED;
-
fops = __efivars->ops;
if (!fops->query_variable_store)
@@ -313,15 +31,12 @@ check_var_size(u32 attributes, unsigned long size)
return fops->query_variable_store(attributes, size, false);
}
+EXPORT_SYMBOL_NS_GPL(check_var_size, EFIVAR);
-static efi_status_t
-check_var_size_nonblocking(u32 attributes, unsigned long size)
+efi_status_t check_var_size_nonblocking(u32 attributes, unsigned long size)
{
const struct efivar_operations *fops;
- if (!__efivars)
- return EFI_UNSUPPORTED;
-
fops = __efivars->ops;
if (!fops->query_variable_store)
@@ -329,894 +44,228 @@ check_var_size_nonblocking(u32 attributes, unsigned long size)
return fops->query_variable_store(attributes, size, true);
}
-
-static bool variable_is_present(efi_char16_t *variable_name, efi_guid_t *vendor,
- struct list_head *head)
-{
- struct efivar_entry *entry, *n;
- unsigned long strsize1, strsize2;
- bool found = false;
-
- strsize1 = ucs2_strsize(variable_name, 1024);
- list_for_each_entry_safe(entry, n, head, list) {
- strsize2 = ucs2_strsize(entry->var.VariableName, 1024);
- if (strsize1 == strsize2 &&
- !memcmp(variable_name, &(entry->var.VariableName),
- strsize2) &&
- !efi_guidcmp(entry->var.VendorGuid,
- *vendor)) {
- found = true;
- break;
- }
- }
- return found;
-}
-
-/*
- * Returns the size of variable_name, in bytes, including the
- * terminating NULL character, or variable_name_size if no NULL
- * character is found among the first variable_name_size bytes.
- */
-static unsigned long var_name_strnsize(efi_char16_t *variable_name,
- unsigned long variable_name_size)
-{
- unsigned long len;
- efi_char16_t c;
-
- /*
- * The variable name is, by definition, a NULL-terminated
- * string, so make absolutely sure that variable_name_size is
- * the value we expect it to be. If not, return the real size.
- */
- for (len = 2; len <= variable_name_size; len += sizeof(c)) {
- c = variable_name[(len / sizeof(c)) - 1];
- if (!c)
- break;
- }
-
- return min(len, variable_name_size);
-}
-
-/*
- * Print a warning when duplicate EFI variables are encountered and
- * disable the sysfs workqueue since the firmware is buggy.
- */
-static void dup_variable_bug(efi_char16_t *str16, efi_guid_t *vendor_guid,
- unsigned long len16)
-{
- size_t i, len8 = len16 / sizeof(efi_char16_t);
- char *str8;
-
- str8 = kzalloc(len8, GFP_KERNEL);
- if (!str8)
- return;
-
- for (i = 0; i < len8; i++)
- str8[i] = str16[i];
-
- printk(KERN_WARNING "efivars: duplicate variable: %s-%pUl\n",
- str8, vendor_guid);
- kfree(str8);
-}
+EXPORT_SYMBOL_NS_GPL(check_var_size_nonblocking, EFIVAR);
/**
- * efivar_init - build the initial list of EFI variables
- * @func: callback function to invoke for every variable
- * @data: function-specific data to pass to @func
- * @duplicates: error if we encounter duplicates on @head?
- * @head: initialised head of variable list
- *
- * Get every EFI variable from the firmware and invoke @func. @func
- * should call efivar_entry_add() to build the list of variables.
+ * efivars_kobject - get the kobject for the registered efivars
*
- * Returns 0 on success, or a kernel error code on failure.
+ * If efivars_register() has not been called we return NULL,
+ * otherwise return the kobject used at registration time.
*/
-int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *),
- void *data, bool duplicates, struct list_head *head)
+struct kobject *efivars_kobject(void)
{
- const struct efivar_operations *ops;
- unsigned long variable_name_size = 1024;
- efi_char16_t *variable_name;
- efi_status_t status;
- efi_guid_t vendor_guid;
- int err = 0;
-
if (!__efivars)
- return -EFAULT;
-
- ops = __efivars->ops;
-
- variable_name = kzalloc(variable_name_size, GFP_KERNEL);
- if (!variable_name) {
- printk(KERN_ERR "efivars: Memory allocation failed.\n");
- return -ENOMEM;
- }
-
- if (down_interruptible(&efivars_lock)) {
- err = -EINTR;
- goto free;
- }
-
- /*
- * Per EFI spec, the maximum storage allocated for both
- * the variable name and variable data is 1024 bytes.
- */
-
- do {
- variable_name_size = 1024;
-
- status = ops->get_next_variable(&variable_name_size,
- variable_name,
- &vendor_guid);
- switch (status) {
- case EFI_SUCCESS:
- if (duplicates)
- up(&efivars_lock);
-
- variable_name_size = var_name_strnsize(variable_name,
- variable_name_size);
-
- /*
- * Some firmware implementations return the
- * same variable name on multiple calls to
- * get_next_variable(). Terminate the loop
- * immediately as there is no guarantee that
- * we'll ever see a different variable name,
- * and may end up looping here forever.
- */
- if (duplicates &&
- variable_is_present(variable_name, &vendor_guid,
- head)) {
- dup_variable_bug(variable_name, &vendor_guid,
- variable_name_size);
- status = EFI_NOT_FOUND;
- } else {
- err = func(variable_name, vendor_guid,
- variable_name_size, data);
- if (err)
- status = EFI_NOT_FOUND;
- }
-
- if (duplicates) {
- if (down_interruptible(&efivars_lock)) {
- err = -EINTR;
- goto free;
- }
- }
-
- break;
- case EFI_UNSUPPORTED:
- err = -EOPNOTSUPP;
- status = EFI_NOT_FOUND;
- break;
- case EFI_NOT_FOUND:
- break;
- default:
- printk(KERN_WARNING "efivars: get_next_variable: status=%lx\n",
- status);
- status = EFI_NOT_FOUND;
- break;
- }
-
- } while (status != EFI_NOT_FOUND);
-
- up(&efivars_lock);
-free:
- kfree(variable_name);
+ return NULL;
- return err;
+ return __efivars->kobject;
}
-EXPORT_SYMBOL_GPL(efivar_init);
+EXPORT_SYMBOL_GPL(efivars_kobject);
/**
- * efivar_entry_add - add entry to variable list
- * @entry: entry to add to list
- * @head: list head
+ * efivars_register - register an efivars
+ * @efivars: efivars to register
+ * @ops: efivars operations
+ * @kobject: @efivars-specific kobject
*
- * Returns 0 on success, or a kernel error code on failure.
+ * Only a single efivars can be registered at any time.
*/
-int efivar_entry_add(struct efivar_entry *entry, struct list_head *head)
+int efivars_register(struct efivars *efivars,
+ const struct efivar_operations *ops,
+ struct kobject *kobject)
{
if (down_interruptible(&efivars_lock))
return -EINTR;
- list_add(&entry->list, head);
- up(&efivars_lock);
- return 0;
-}
-EXPORT_SYMBOL_GPL(efivar_entry_add);
+ efivars->ops = ops;
+ efivars->kobject = kobject;
-/**
- * efivar_entry_remove - remove entry from variable list
- * @entry: entry to remove from list
- *
- * Returns 0 on success, or a kernel error code on failure.
- */
-int efivar_entry_remove(struct efivar_entry *entry)
-{
- if (down_interruptible(&efivars_lock))
- return -EINTR;
- list_del(&entry->list);
- up(&efivars_lock);
+ __efivars = efivars;
- return 0;
-}
-EXPORT_SYMBOL_GPL(efivar_entry_remove);
+ pr_info("Registered efivars operations\n");
-/*
- * efivar_entry_list_del_unlock - remove entry from variable list
- * @entry: entry to remove
- *
- * Remove @entry from the variable list and release the list lock.
- *
- * NOTE: slightly weird locking semantics here - we expect to be
- * called with the efivars lock already held, and we release it before
- * returning. This is because this function is usually called after
- * set_variable() while the lock is still held.
- */
-static void efivar_entry_list_del_unlock(struct efivar_entry *entry)
-{
- list_del(&entry->list);
up(&efivars_lock);
-}
-/**
- * __efivar_entry_delete - delete an EFI variable
- * @entry: entry containing EFI variable to delete
- *
- * Delete the variable from the firmware but leave @entry on the
- * variable list.
- *
- * This function differs from efivar_entry_delete() because it does
- * not remove @entry from the variable list. Also, it is safe to be
- * called from within a efivar_entry_iter_begin() and
- * efivar_entry_iter_end() region, unlike efivar_entry_delete().
- *
- * Returns 0 on success, or a converted EFI status code if
- * set_variable() fails.
- */
-int __efivar_entry_delete(struct efivar_entry *entry)
-{
- efi_status_t status;
-
- if (!__efivars)
- return -EINVAL;
-
- status = __efivars->ops->set_variable(entry->var.VariableName,
- &entry->var.VendorGuid,
- 0, 0, NULL);
-
- return efi_status_to_err(status);
-}
-EXPORT_SYMBOL_GPL(__efivar_entry_delete);
-
-/**
- * efivar_entry_delete - delete variable and remove entry from list
- * @entry: entry containing variable to delete
- *
- * Delete the variable from the firmware and remove @entry from the
- * variable list. It is the caller's responsibility to free @entry
- * once we return.
- *
- * Returns 0 on success, -EINTR if we can't grab the semaphore,
- * converted EFI status code if set_variable() fails.
- */
-int efivar_entry_delete(struct efivar_entry *entry)
-{
- const struct efivar_operations *ops;
- efi_status_t status;
-
- if (down_interruptible(&efivars_lock))
- return -EINTR;
-
- if (!__efivars) {
- up(&efivars_lock);
- return -EINVAL;
- }
- ops = __efivars->ops;
- status = ops->set_variable(entry->var.VariableName,
- &entry->var.VendorGuid,
- 0, 0, NULL);
- if (!(status == EFI_SUCCESS || status == EFI_NOT_FOUND)) {
- up(&efivars_lock);
- return efi_status_to_err(status);
- }
-
- efivar_entry_list_del_unlock(entry);
return 0;
}
-EXPORT_SYMBOL_GPL(efivar_entry_delete);
+EXPORT_SYMBOL_GPL(efivars_register);
/**
- * efivar_entry_set - call set_variable()
- * @entry: entry containing the EFI variable to write
- * @attributes: variable attributes
- * @size: size of @data buffer
- * @data: buffer containing variable data
- * @head: head of variable list
- *
- * Calls set_variable() for an EFI variable. If creating a new EFI
- * variable, this function is usually followed by efivar_entry_add().
- *
- * Before writing the variable, the remaining EFI variable storage
- * space is checked to ensure there is enough room available.
- *
- * If @head is not NULL a lookup is performed to determine whether
- * the entry is already on the list.
+ * efivars_unregister - unregister an efivars
+ * @efivars: efivars to unregister
*
- * Returns 0 on success, -EINTR if we can't grab the semaphore,
- * -EEXIST if a lookup is performed and the entry already exists on
- * the list, or a converted EFI status code if set_variable() fails.
+ * The caller must have already removed every entry from the list,
+ * failure to do so is an error.
*/
-int efivar_entry_set(struct efivar_entry *entry, u32 attributes,
- unsigned long size, void *data, struct list_head *head)
+int efivars_unregister(struct efivars *efivars)
{
- const struct efivar_operations *ops;
- efi_status_t status;
- efi_char16_t *name = entry->var.VariableName;
- efi_guid_t vendor = entry->var.VendorGuid;
+ int rv;
if (down_interruptible(&efivars_lock))
return -EINTR;
if (!__efivars) {
- up(&efivars_lock);
- return -EINVAL;
- }
- ops = __efivars->ops;
- if (head && efivar_entry_find(name, vendor, head, false)) {
- up(&efivars_lock);
- return -EEXIST;
- }
-
- status = check_var_size(attributes, size + ucs2_strsize(name, 1024));
- if (status == EFI_SUCCESS || status == EFI_UNSUPPORTED)
- status = ops->set_variable(name, &vendor,
- attributes, size, data);
-
- up(&efivars_lock);
-
- return efi_status_to_err(status);
-
-}
-EXPORT_SYMBOL_GPL(efivar_entry_set);
-
-/*
- * efivar_entry_set_nonblocking - call set_variable_nonblocking()
- *
- * This function is guaranteed to not block and is suitable for calling
- * from crash/panic handlers.
- *
- * Crucially, this function will not block if it cannot acquire
- * efivars_lock. Instead, it returns -EBUSY.
- */
-static int
-efivar_entry_set_nonblocking(efi_char16_t *name, efi_guid_t vendor,
- u32 attributes, unsigned long size, void *data)
-{
- const struct efivar_operations *ops;
- efi_status_t status;
-
- if (down_trylock(&efivars_lock))
- return -EBUSY;
-
- if (!__efivars) {
- up(&efivars_lock);
- return -EINVAL;
- }
-
- status = check_var_size_nonblocking(attributes,
- size + ucs2_strsize(name, 1024));
- if (status != EFI_SUCCESS) {
- up(&efivars_lock);
- return -ENOSPC;
- }
-
- ops = __efivars->ops;
- status = ops->set_variable_nonblocking(name, &vendor, attributes,
- size, data);
-
- up(&efivars_lock);
- return efi_status_to_err(status);
-}
-
-/**
- * efivar_entry_set_safe - call set_variable() if enough space in firmware
- * @name: buffer containing the variable name
- * @vendor: variable vendor guid
- * @attributes: variable attributes
- * @block: can we block in this context?
- * @size: size of @data buffer
- * @data: buffer containing variable data
- *
- * Ensures there is enough free storage in the firmware for this variable, and
- * if so, calls set_variable(). If creating a new EFI variable, this function
- * is usually followed by efivar_entry_add().
- *
- * Returns 0 on success, -ENOSPC if the firmware does not have enough
- * space for set_variable() to succeed, or a converted EFI status code
- * if set_variable() fails.
- */
-int efivar_entry_set_safe(efi_char16_t *name, efi_guid_t vendor, u32 attributes,
- bool block, unsigned long size, void *data)
-{
- const struct efivar_operations *ops;
- efi_status_t status;
- unsigned long varsize;
-
- if (!__efivars)
- return -EINVAL;
-
- ops = __efivars->ops;
- if (!ops->query_variable_store)
- return -ENOSYS;
-
- /*
- * If the EFI variable backend provides a non-blocking
- * ->set_variable() operation and we're in a context where we
- * cannot block, then we need to use it to avoid live-locks,
- * since the implication is that the regular ->set_variable()
- * will block.
- *
- * If no ->set_variable_nonblocking() is provided then
- * ->set_variable() is assumed to be non-blocking.
- */
- if (!block && ops->set_variable_nonblocking)
- return efivar_entry_set_nonblocking(name, vendor, attributes,
- size, data);
-
- varsize = size + ucs2_strsize(name, 1024);
- if (!block) {
- if (down_trylock(&efivars_lock))
- return -EBUSY;
- status = check_var_size_nonblocking(attributes, varsize);
- } else {
- if (down_interruptible(&efivars_lock))
- return -EINTR;
- status = check_var_size(attributes, varsize);
+ printk(KERN_ERR "efivars not registered\n");
+ rv = -EINVAL;
+ goto out;
}
- if (status != EFI_SUCCESS) {
- up(&efivars_lock);
- return -ENOSPC;
+ if (__efivars != efivars) {
+ rv = -EINVAL;
+ goto out;
}
- status = ops->set_variable(name, &vendor, attributes, size, data);
+ pr_info("Unregistered efivars operations\n");
+ __efivars = NULL;
+ rv = 0;
+out:
up(&efivars_lock);
-
- return efi_status_to_err(status);
+ return rv;
}
-EXPORT_SYMBOL_GPL(efivar_entry_set_safe);
+EXPORT_SYMBOL_GPL(efivars_unregister);
-/**
- * efivar_entry_find - search for an entry
- * @name: the EFI variable name
- * @guid: the EFI variable vendor's guid
- * @head: head of the variable list
- * @remove: should we remove the entry from the list?
- *
- * Search for an entry on the variable list that has the EFI variable
- * name @name and vendor guid @guid. If an entry is found on the list
- * and @remove is true, the entry is removed from the list.
- *
- * The caller MUST call efivar_entry_iter_begin() and
- * efivar_entry_iter_end() before and after the invocation of this
- * function, respectively.
- *
- * Returns the entry if found on the list, %NULL otherwise.
- */
-struct efivar_entry *efivar_entry_find(efi_char16_t *name, efi_guid_t guid,
- struct list_head *head, bool remove)
+int efivar_supports_writes(void)
{
- struct efivar_entry *entry, *n;
- int strsize1, strsize2;
- bool found = false;
-
- list_for_each_entry_safe(entry, n, head, list) {
- strsize1 = ucs2_strsize(name, 1024);
- strsize2 = ucs2_strsize(entry->var.VariableName, 1024);
- if (strsize1 == strsize2 &&
- !memcmp(name, &(entry->var.VariableName), strsize1) &&
- !efi_guidcmp(guid, entry->var.VendorGuid)) {
- found = true;
- break;
- }
- }
-
- if (!found)
- return NULL;
-
- if (remove) {
- if (entry->scanning) {
- /*
- * The entry will be deleted
- * after scanning is completed.
- */
- entry->deleting = true;
- } else
- list_del(&entry->list);
- }
-
- return entry;
+ return __efivars && __efivars->ops->set_variable;
}
-EXPORT_SYMBOL_GPL(efivar_entry_find);
+EXPORT_SYMBOL_GPL(efivar_supports_writes);
-/**
- * efivar_entry_size - obtain the size of a variable
- * @entry: entry for this variable
- * @size: location to store the variable's size
+/*
+ * efivar_lock() - obtain the efivar lock, wait for it if needed
+ * @return 0 on success, error code on failure
*/
-int efivar_entry_size(struct efivar_entry *entry, unsigned long *size)
+int efivar_lock(void)
{
- const struct efivar_operations *ops;
- efi_status_t status;
-
- *size = 0;
-
if (down_interruptible(&efivars_lock))
return -EINTR;
- if (!__efivars) {
+ if (!__efivars->ops) {
up(&efivars_lock);
- return -EINVAL;
+ return -ENODEV;
}
- ops = __efivars->ops;
- status = ops->get_variable(entry->var.VariableName,
- &entry->var.VendorGuid, NULL, size, NULL);
- up(&efivars_lock);
-
- if (status != EFI_BUFFER_TOO_SMALL)
- return efi_status_to_err(status);
-
return 0;
}
-EXPORT_SYMBOL_GPL(efivar_entry_size);
+EXPORT_SYMBOL_NS_GPL(efivar_lock, EFIVAR);
-/**
- * __efivar_entry_get - call get_variable()
- * @entry: read data for this variable
- * @attributes: variable attributes
- * @size: size of @data buffer
- * @data: buffer to store variable data
- *
- * The caller MUST call efivar_entry_iter_begin() and
- * efivar_entry_iter_end() before and after the invocation of this
- * function, respectively.
- */
-int __efivar_entry_get(struct efivar_entry *entry, u32 *attributes,
- unsigned long *size, void *data)
-{
- efi_status_t status;
-
- if (!__efivars)
- return -EINVAL;
-
- status = __efivars->ops->get_variable(entry->var.VariableName,
- &entry->var.VendorGuid,
- attributes, size, data);
-
- return efi_status_to_err(status);
-}
-EXPORT_SYMBOL_GPL(__efivar_entry_get);
-
-/**
- * efivar_entry_get - call get_variable()
- * @entry: read data for this variable
- * @attributes: variable attributes
- * @size: size of @data buffer
- * @data: buffer to store variable data
+/*
+ * efivar_lock() - obtain the efivar lock if it is free
+ * @return 0 on success, error code on failure
*/
-int efivar_entry_get(struct efivar_entry *entry, u32 *attributes,
- unsigned long *size, void *data)
+int efivar_trylock(void)
{
- efi_status_t status;
-
- if (down_interruptible(&efivars_lock))
- return -EINTR;
-
- if (!__efivars) {
+ if (down_trylock(&efivars_lock))
+ return -EBUSY;
+ if (!__efivars->ops) {
up(&efivars_lock);
- return -EINVAL;
- }
-
- status = __efivars->ops->get_variable(entry->var.VariableName,
- &entry->var.VendorGuid,
- attributes, size, data);
- up(&efivars_lock);
-
- return efi_status_to_err(status);
-}
-EXPORT_SYMBOL_GPL(efivar_entry_get);
-
-/**
- * efivar_entry_set_get_size - call set_variable() and get new size (atomic)
- * @entry: entry containing variable to set and get
- * @attributes: attributes of variable to be written
- * @size: size of data buffer
- * @data: buffer containing data to write
- * @set: did the set_variable() call succeed?
- *
- * This is a pretty special (complex) function. See efivarfs_file_write().
- *
- * Atomically call set_variable() for @entry and if the call is
- * successful, return the new size of the variable from get_variable()
- * in @size. The success of set_variable() is indicated by @set.
- *
- * Returns 0 on success, -EINVAL if the variable data is invalid,
- * -ENOSPC if the firmware does not have enough available space, or a
- * converted EFI status code if either of set_variable() or
- * get_variable() fail.
- *
- * If the EFI variable does not exist when calling set_variable()
- * (EFI_NOT_FOUND), @entry is removed from the variable list.
- */
-int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes,
- unsigned long *size, void *data, bool *set)
-{
- const struct efivar_operations *ops;
- efi_char16_t *name = entry->var.VariableName;
- efi_guid_t *vendor = &entry->var.VendorGuid;
- efi_status_t status;
- int err;
-
- *set = false;
-
- if (efivar_validate(*vendor, name, data, *size) == false)
- return -EINVAL;
-
- /*
- * The lock here protects the get_variable call, the conditional
- * set_variable call, and removal of the variable from the efivars
- * list (in the case of an authenticated delete).
- */
- if (down_interruptible(&efivars_lock))
- return -EINTR;
-
- if (!__efivars) {
- err = -EINVAL;
- goto out;
- }
-
- /*
- * Ensure that the available space hasn't shrunk below the safe level
- */
- status = check_var_size(attributes, *size + ucs2_strsize(name, 1024));
- if (status != EFI_SUCCESS) {
- if (status != EFI_UNSUPPORTED) {
- err = efi_status_to_err(status);
- goto out;
- }
-
- if (*size > 65536) {
- err = -ENOSPC;
- goto out;
- }
- }
-
- ops = __efivars->ops;
-
- status = ops->set_variable(name, vendor, attributes, *size, data);
- if (status != EFI_SUCCESS) {
- err = efi_status_to_err(status);
- goto out;
+ return -ENODEV;
}
-
- *set = true;
-
- /*
- * Writing to the variable may have caused a change in size (which
- * could either be an append or an overwrite), or the variable to be
- * deleted. Perform a GetVariable() so we can tell what actually
- * happened.
- */
- *size = 0;
- status = ops->get_variable(entry->var.VariableName,
- &entry->var.VendorGuid,
- NULL, size, NULL);
-
- if (status == EFI_NOT_FOUND)
- efivar_entry_list_del_unlock(entry);
- else
- up(&efivars_lock);
-
- if (status && status != EFI_BUFFER_TOO_SMALL)
- return efi_status_to_err(status);
-
return 0;
-
-out:
- up(&efivars_lock);
- return err;
-
}
-EXPORT_SYMBOL_GPL(efivar_entry_set_get_size);
+EXPORT_SYMBOL_NS_GPL(efivar_trylock, EFIVAR);
-/**
- * efivar_entry_iter_begin - begin iterating the variable list
- *
- * Lock the variable list to prevent entry insertion and removal until
- * efivar_entry_iter_end() is called. This function is usually used in
- * conjunction with __efivar_entry_iter() or efivar_entry_iter().
- */
-int efivar_entry_iter_begin(void)
-{
- return down_interruptible(&efivars_lock);
-}
-EXPORT_SYMBOL_GPL(efivar_entry_iter_begin);
-
-/**
- * efivar_entry_iter_end - finish iterating the variable list
- *
- * Unlock the variable list and allow modifications to the list again.
+/*
+ * efivar_unlock() - release the efivar lock
*/
-void efivar_entry_iter_end(void)
+void efivar_unlock(void)
{
up(&efivars_lock);
}
-EXPORT_SYMBOL_GPL(efivar_entry_iter_end);
+EXPORT_SYMBOL_NS_GPL(efivar_unlock, EFIVAR);
-/**
- * __efivar_entry_iter - iterate over variable list
- * @func: callback function
- * @head: head of the variable list
- * @data: function-specific data to pass to callback
- * @prev: entry to begin iterating from
- *
- * Iterate over the list of EFI variables and call @func with every
- * entry on the list. It is safe for @func to remove entries in the
- * list via efivar_entry_delete().
- *
- * You MUST call efivar_entry_iter_begin() before this function, and
- * efivar_entry_iter_end() afterwards.
- *
- * It is possible to begin iteration from an arbitrary entry within
- * the list by passing @prev. @prev is updated on return to point to
- * the last entry passed to @func. To begin iterating from the
- * beginning of the list @prev must be %NULL.
+/*
+ * efivar_get_variable() - retrieve a variable identified by name/vendor
*
- * The restrictions for @func are the same as documented for
- * efivar_entry_iter().
+ * Must be called with efivars_lock held.
*/
-int __efivar_entry_iter(int (*func)(struct efivar_entry *, void *),
- struct list_head *head, void *data,
- struct efivar_entry **prev)
+efi_status_t efivar_get_variable(efi_char16_t *name, efi_guid_t *vendor,
+ u32 *attr, unsigned long *size, void *data)
{
- struct efivar_entry *entry, *n;
- int err = 0;
-
- if (!prev || !*prev) {
- list_for_each_entry_safe(entry, n, head, list) {
- err = func(entry, data);
- if (err)
- break;
- }
-
- if (prev)
- *prev = entry;
-
- return err;
- }
-
-
- list_for_each_entry_safe_continue((*prev), n, head, list) {
- err = func(*prev, data);
- if (err)
- break;
- }
-
- return err;
+ return __efivars->ops->get_variable(name, vendor, attr, size, data);
}
-EXPORT_SYMBOL_GPL(__efivar_entry_iter);
+EXPORT_SYMBOL_NS_GPL(efivar_get_variable, EFIVAR);
-/**
- * efivar_entry_iter - iterate over variable list
- * @func: callback function
- * @head: head of variable list
- * @data: function-specific data to pass to callback
- *
- * Iterate over the list of EFI variables and call @func with every
- * entry on the list. It is safe for @func to remove entries in the
- * list via efivar_entry_delete() while iterating.
+/*
+ * efivar_get_next_variable() - enumerate the next name/vendor pair
*
- * Some notes for the callback function:
- * - a non-zero return value indicates an error and terminates the loop
- * - @func is called from atomic context
+ * Must be called with efivars_lock held.
*/
-int efivar_entry_iter(int (*func)(struct efivar_entry *, void *),
- struct list_head *head, void *data)
+efi_status_t efivar_get_next_variable(unsigned long *name_size,
+ efi_char16_t *name, efi_guid_t *vendor)
{
- int err = 0;
-
- err = efivar_entry_iter_begin();
- if (err)
- return err;
- err = __efivar_entry_iter(func, head, data, NULL);
- efivar_entry_iter_end();
-
- return err;
+ return __efivars->ops->get_next_variable(name_size, name, vendor);
}
-EXPORT_SYMBOL_GPL(efivar_entry_iter);
+EXPORT_SYMBOL_NS_GPL(efivar_get_next_variable, EFIVAR);
-/**
- * efivars_kobject - get the kobject for the registered efivars
+/*
+ * efivar_set_variable_blocking() - local helper function for set_variable
*
- * If efivars_register() has not been called we return NULL,
- * otherwise return the kobject used at registration time.
+ * Must be called with efivars_lock held.
*/
-struct kobject *efivars_kobject(void)
+static efi_status_t
+efivar_set_variable_blocking(efi_char16_t *name, efi_guid_t *vendor,
+ u32 attr, unsigned long data_size, void *data)
{
- if (!__efivars)
- return NULL;
+ efi_status_t status;
- return __efivars->kobject;
+ if (data_size > 0) {
+ status = check_var_size(attr, data_size +
+ ucs2_strsize(name, 1024));
+ if (status != EFI_SUCCESS)
+ return status;
+ }
+ return __efivars->ops->set_variable(name, vendor, attr, data_size, data);
}
-EXPORT_SYMBOL_GPL(efivars_kobject);
-/**
- * efivars_register - register an efivars
- * @efivars: efivars to register
- * @ops: efivars operations
- * @kobject: @efivars-specific kobject
+/*
+ * efivar_set_variable_locked() - set a variable identified by name/vendor
*
- * Only a single efivars can be registered at any time.
+ * Must be called with efivars_lock held. If @nonblocking is set, it will use
+ * non-blocking primitives so it is guaranteed not to sleep.
*/
-int efivars_register(struct efivars *efivars,
- const struct efivar_operations *ops,
- struct kobject *kobject)
+efi_status_t efivar_set_variable_locked(efi_char16_t *name, efi_guid_t *vendor,
+ u32 attr, unsigned long data_size,
+ void *data, bool nonblocking)
{
- if (down_interruptible(&efivars_lock))
- return -EINTR;
-
- efivars->ops = ops;
- efivars->kobject = kobject;
-
- __efivars = efivars;
+ efi_set_variable_t *setvar;
+ efi_status_t status;
- pr_info("Registered efivars operations\n");
+ if (!nonblocking)
+ return efivar_set_variable_blocking(name, vendor, attr,
+ data_size, data);
- up(&efivars_lock);
-
- return 0;
+ /*
+ * If no _nonblocking variant exists, the ordinary one
+ * is assumed to be non-blocking.
+ */
+ setvar = __efivars->ops->set_variable_nonblocking ?:
+ __efivars->ops->set_variable;
+
+ if (data_size > 0) {
+ status = check_var_size_nonblocking(attr, data_size +
+ ucs2_strsize(name, 1024));
+ if (status != EFI_SUCCESS)
+ return status;
+ }
+ return setvar(name, vendor, attr, data_size, data);
}
-EXPORT_SYMBOL_GPL(efivars_register);
+EXPORT_SYMBOL_NS_GPL(efivar_set_variable_locked, EFIVAR);
-/**
- * efivars_unregister - unregister an efivars
- * @efivars: efivars to unregister
+/*
+ * efivar_set_variable() - set a variable identified by name/vendor
*
- * The caller must have already removed every entry from the list,
- * failure to do so is an error.
+ * Can be called without holding the efivars_lock. Will sleep on obtaining the
+ * lock, or on obtaining other locks that are needed in order to complete the
+ * call.
*/
-int efivars_unregister(struct efivars *efivars)
+efi_status_t efivar_set_variable(efi_char16_t *name, efi_guid_t *vendor,
+ u32 attr, unsigned long data_size, void *data)
{
- int rv;
-
- if (down_interruptible(&efivars_lock))
- return -EINTR;
-
- if (!__efivars) {
- printk(KERN_ERR "efivars not registered\n");
- rv = -EINVAL;
- goto out;
- }
-
- if (__efivars != efivars) {
- rv = -EINVAL;
- goto out;
- }
-
- pr_info("Unregistered efivars operations\n");
- __efivars = NULL;
+ efi_status_t status;
- rv = 0;
-out:
- up(&efivars_lock);
- return rv;
-}
-EXPORT_SYMBOL_GPL(efivars_unregister);
+ if (efivar_lock())
+ return EFI_ABORTED;
-int efivar_supports_writes(void)
-{
- return __efivars && __efivars->ops->set_variable;
+ status = efivar_set_variable_blocking(name, vendor, attr, data_size, data);
+ efivar_unlock();
+ return status;
}
-EXPORT_SYMBOL_GPL(efivar_supports_writes);
+EXPORT_SYMBOL_NS_GPL(efivar_set_variable, EFIVAR);
diff --git a/drivers/firmware/mtk-adsp-ipc.c b/drivers/firmware/mtk-adsp-ipc.c
index cb255a99170c..3c071f814455 100644
--- a/drivers/firmware/mtk-adsp-ipc.c
+++ b/drivers/firmware/mtk-adsp-ipc.c
@@ -12,6 +12,8 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
+static const char * const adsp_mbox_ch_names[MTK_ADSP_MBOX_NUM] = { "rx", "tx" };
+
/*
* mtk_adsp_ipc_send - send ipc cmd to MTK ADSP
*
@@ -72,7 +74,6 @@ static int mtk_adsp_ipc_probe(struct platform_device *pdev)
struct mtk_adsp_ipc *adsp_ipc;
struct mtk_adsp_chan *adsp_chan;
struct mbox_client *cl;
- char *chan_name;
int ret;
int i, j;
@@ -83,12 +84,6 @@ static int mtk_adsp_ipc_probe(struct platform_device *pdev)
return -ENOMEM;
for (i = 0; i < MTK_ADSP_MBOX_NUM; i++) {
- chan_name = kasprintf(GFP_KERNEL, "mbox%d", i);
- if (!chan_name) {
- ret = -ENOMEM;
- goto out;
- }
-
adsp_chan = &adsp_ipc->chans[i];
cl = &adsp_chan->cl;
cl->dev = dev->parent;
@@ -99,17 +94,20 @@ static int mtk_adsp_ipc_probe(struct platform_device *pdev)
adsp_chan->ipc = adsp_ipc;
adsp_chan->idx = i;
- adsp_chan->ch = mbox_request_channel_byname(cl, chan_name);
+ adsp_chan->ch = mbox_request_channel_byname(cl, adsp_mbox_ch_names[i]);
if (IS_ERR(adsp_chan->ch)) {
ret = PTR_ERR(adsp_chan->ch);
if (ret != -EPROBE_DEFER)
- dev_err(dev, "Failed to request mbox chan %d ret %d\n",
- i, ret);
- goto out_free;
- }
+ dev_err(dev, "Failed to request mbox chan %s ret %d\n",
+ adsp_mbox_ch_names[i], ret);
+
+ for (j = 0; j < i; j++) {
+ adsp_chan = &adsp_ipc->chans[j];
+ mbox_free_channel(adsp_chan->ch);
+ }
- dev_dbg(dev, "request mbox chan %s\n", chan_name);
- kfree(chan_name);
+ return ret;
+ }
}
adsp_ipc->dev = dev;
@@ -117,16 +115,6 @@ static int mtk_adsp_ipc_probe(struct platform_device *pdev)
dev_dbg(dev, "MTK ADSP IPC initialized\n");
return 0;
-
-out_free:
- kfree(chan_name);
-out:
- for (j = 0; j < i; j++) {
- adsp_chan = &adsp_ipc->chans[j];
- mbox_free_channel(adsp_chan->ch);
- }
-
- return ret;
}
static int mtk_adsp_ipc_remove(struct platform_device *pdev)
diff --git a/drivers/firmware/qcom_scm-legacy.c b/drivers/firmware/qcom_scm-legacy.c
index 1829ba220576..9f918b9e6f8f 100644
--- a/drivers/firmware/qcom_scm-legacy.c
+++ b/drivers/firmware/qcom_scm-legacy.c
@@ -120,6 +120,9 @@ static void __scm_legacy_do(const struct arm_smccc_args *smc,
/**
* scm_legacy_call() - Sends a command to the SCM and waits for the command to
* finish processing.
+ * @dev: device
+ * @desc: descriptor structure containing arguments and return values
+ * @res: results from SMC call
*
* A note on cache maintenance:
* Note that any buffers that are expected to be accessed by the secure world
@@ -211,6 +214,7 @@ out:
/**
* scm_legacy_call_atomic() - Send an atomic SCM command with up to 5 arguments
* and 3 return values
+ * @unused: device, legacy argument, not used, can be NULL
* @desc: SCM call descriptor containing arguments
* @res: SCM call return values
*
diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
index 3163660fa8e2..cdbfe54c8146 100644
--- a/drivers/firmware/qcom_scm.c
+++ b/drivers/firmware/qcom_scm.c
@@ -7,6 +7,7 @@
#include <linux/cpumask.h>
#include <linux/export.h>
#include <linux/dma-mapping.h>
+#include <linux/interconnect.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/qcom_scm.h>
@@ -31,8 +32,13 @@ struct qcom_scm {
struct clk *core_clk;
struct clk *iface_clk;
struct clk *bus_clk;
+ struct icc_path *path;
struct reset_controller_dev reset;
+ /* control access to the interconnect path */
+ struct mutex scm_bw_lock;
+ int scm_vote_count;
+
u64 dload_mode_addr;
};
@@ -99,6 +105,42 @@ static void qcom_scm_clk_disable(void)
clk_disable_unprepare(__scm->bus_clk);
}
+static int qcom_scm_bw_enable(void)
+{
+ int ret = 0;
+
+ if (!__scm->path)
+ return 0;
+
+ if (IS_ERR(__scm->path))
+ return -EINVAL;
+
+ mutex_lock(&__scm->scm_bw_lock);
+ if (!__scm->scm_vote_count) {
+ ret = icc_set_bw(__scm->path, 0, UINT_MAX);
+ if (ret < 0) {
+ dev_err(__scm->dev, "failed to set bandwidth request\n");
+ goto err_bw;
+ }
+ }
+ __scm->scm_vote_count++;
+err_bw:
+ mutex_unlock(&__scm->scm_bw_lock);
+
+ return ret;
+}
+
+static void qcom_scm_bw_disable(void)
+{
+ if (IS_ERR_OR_NULL(__scm->path))
+ return;
+
+ mutex_lock(&__scm->scm_bw_lock);
+ if (__scm->scm_vote_count-- == 1)
+ icc_set_bw(__scm->path, 0, 0);
+ mutex_unlock(&__scm->scm_bw_lock);
+}
+
enum qcom_scm_convention qcom_scm_convention = SMC_CONVENTION_UNKNOWN;
static DEFINE_SPINLOCK(scm_query_lock);
@@ -444,10 +486,15 @@ int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size,
if (ret)
goto out;
+ ret = qcom_scm_bw_enable();
+ if (ret)
+ return ret;
+
desc.args[1] = mdata_phys;
ret = qcom_scm_call(__scm->dev, &desc, &res);
+ qcom_scm_bw_disable();
qcom_scm_clk_disable();
out:
@@ -507,7 +554,12 @@ int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size)
if (ret)
return ret;
+ ret = qcom_scm_bw_enable();
+ if (ret)
+ return ret;
+
ret = qcom_scm_call(__scm->dev, &desc, &res);
+ qcom_scm_bw_disable();
qcom_scm_clk_disable();
return ret ? : res.result[0];
@@ -537,7 +589,12 @@ int qcom_scm_pas_auth_and_reset(u32 peripheral)
if (ret)
return ret;
+ ret = qcom_scm_bw_enable();
+ if (ret)
+ return ret;
+
ret = qcom_scm_call(__scm->dev, &desc, &res);
+ qcom_scm_bw_disable();
qcom_scm_clk_disable();
return ret ? : res.result[0];
@@ -566,8 +623,13 @@ int qcom_scm_pas_shutdown(u32 peripheral)
if (ret)
return ret;
+ ret = qcom_scm_bw_enable();
+ if (ret)
+ return ret;
+
ret = qcom_scm_call(__scm->dev, &desc, &res);
+ qcom_scm_bw_disable();
qcom_scm_clk_disable();
return ret ? : res.result[0];
@@ -1277,8 +1339,15 @@ static int qcom_scm_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
+ mutex_init(&scm->scm_bw_lock);
+
clks = (unsigned long)of_device_get_match_data(&pdev->dev);
+ scm->path = devm_of_icc_get(&pdev->dev, NULL);
+ if (IS_ERR(scm->path))
+ return dev_err_probe(&pdev->dev, PTR_ERR(scm->path),
+ "failed to acquire interconnect path\n");
+
scm->core_clk = devm_clk_get(&pdev->dev, "core");
if (IS_ERR(scm->core_clk)) {
if (PTR_ERR(scm->core_clk) == -EPROBE_DEFER)
@@ -1337,7 +1406,7 @@ static int qcom_scm_probe(struct platform_device *pdev)
/*
* If requested enable "download mode", from this point on warmboot
- * will cause the the boot stages to enter download mode, unless
+ * will cause the boot stages to enter download mode, unless
* disabled below by a clean shutdown/reboot.
*/
if (download_mode)
diff --git a/drivers/firmware/stratix10-rsu.c b/drivers/firmware/stratix10-rsu.c
index 9378075d04e9..e51c95f8d445 100644
--- a/drivers/firmware/stratix10-rsu.c
+++ b/drivers/firmware/stratix10-rsu.c
@@ -24,12 +24,16 @@
#define RSU_DCMF1_MASK GENMASK_ULL(63, 32)
#define RSU_DCMF2_MASK GENMASK_ULL(31, 0)
#define RSU_DCMF3_MASK GENMASK_ULL(63, 32)
+#define RSU_DCMF0_STATUS_MASK GENMASK_ULL(15, 0)
+#define RSU_DCMF1_STATUS_MASK GENMASK_ULL(31, 16)
+#define RSU_DCMF2_STATUS_MASK GENMASK_ULL(47, 32)
+#define RSU_DCMF3_STATUS_MASK GENMASK_ULL(63, 48)
#define RSU_TIMEOUT (msecs_to_jiffies(SVC_RSU_REQUEST_TIMEOUT_MS))
#define INVALID_RETRY_COUNTER 0xFF
#define INVALID_DCMF_VERSION 0xFF
-
+#define INVALID_DCMF_STATUS 0xFFFFFFFF
typedef void (*rsu_callback)(struct stratix10_svc_client *client,
struct stratix10_svc_cb_data *data);
@@ -49,6 +53,10 @@ typedef void (*rsu_callback)(struct stratix10_svc_client *client,
* @dcmf_version.dcmf1: Quartus dcmf1 version
* @dcmf_version.dcmf2: Quartus dcmf2 version
* @dcmf_version.dcmf3: Quartus dcmf3 version
+ * @dcmf_status.dcmf0: dcmf0 status
+ * @dcmf_status.dcmf1: dcmf1 status
+ * @dcmf_status.dcmf2: dcmf2 status
+ * @dcmf_status.dcmf3: dcmf3 status
* @retry_counter: the current image's retry counter
* @max_retry: the preset max retry value
*/
@@ -73,6 +81,13 @@ struct stratix10_rsu_priv {
unsigned int dcmf3;
} dcmf_version;
+ struct {
+ unsigned int dcmf0;
+ unsigned int dcmf1;
+ unsigned int dcmf2;
+ unsigned int dcmf3;
+ } dcmf_status;
+
unsigned int retry_counter;
unsigned int max_retry;
};
@@ -129,7 +144,7 @@ static void rsu_command_callback(struct stratix10_svc_client *client,
struct stratix10_rsu_priv *priv = client->priv;
if (data->status == BIT(SVC_STATUS_NO_SUPPORT))
- dev_warn(client->dev, "FW doesn't support notify\n");
+ dev_warn(client->dev, "Secure FW doesn't support notify\n");
else if (data->status == BIT(SVC_STATUS_ERROR))
dev_err(client->dev, "Failure, returned status is %lu\n",
BIT(data->status));
@@ -156,7 +171,7 @@ static void rsu_retry_callback(struct stratix10_svc_client *client,
if (data->status == BIT(SVC_STATUS_OK))
priv->retry_counter = *counter;
else if (data->status == BIT(SVC_STATUS_NO_SUPPORT))
- dev_warn(client->dev, "FW doesn't support retry\n");
+ dev_warn(client->dev, "Secure FW doesn't support retry\n");
else
dev_err(client->dev, "Failed to get retry counter %lu\n",
BIT(data->status));
@@ -181,7 +196,7 @@ static void rsu_max_retry_callback(struct stratix10_svc_client *client,
if (data->status == BIT(SVC_STATUS_OK))
priv->max_retry = *max_retry;
else if (data->status == BIT(SVC_STATUS_NO_SUPPORT))
- dev_warn(client->dev, "FW doesn't support max retry\n");
+ dev_warn(client->dev, "Secure FW doesn't support max retry\n");
else
dev_err(client->dev, "Failed to get max retry %lu\n",
BIT(data->status));
@@ -216,6 +231,35 @@ static void rsu_dcmf_version_callback(struct stratix10_svc_client *client,
}
/**
+ * rsu_dcmf_status_callback() - Callback from Intel service layer for getting
+ * the DCMF status
+ * @client: pointer to client
+ * @data: pointer to callback data structure
+ *
+ * Callback from Intel service layer for DCMF status
+ */
+static void rsu_dcmf_status_callback(struct stratix10_svc_client *client,
+ struct stratix10_svc_cb_data *data)
+{
+ struct stratix10_rsu_priv *priv = client->priv;
+ unsigned long long *value = (unsigned long long *)data->kaddr1;
+
+ if (data->status == BIT(SVC_STATUS_OK)) {
+ priv->dcmf_status.dcmf0 = FIELD_GET(RSU_DCMF0_STATUS_MASK,
+ *value);
+ priv->dcmf_status.dcmf1 = FIELD_GET(RSU_DCMF1_STATUS_MASK,
+ *value);
+ priv->dcmf_status.dcmf2 = FIELD_GET(RSU_DCMF2_STATUS_MASK,
+ *value);
+ priv->dcmf_status.dcmf3 = FIELD_GET(RSU_DCMF3_STATUS_MASK,
+ *value);
+ } else
+ dev_err(client->dev, "failed to get DCMF status\n");
+
+ complete(&priv->completion);
+}
+
+/**
* rsu_send_msg() - send a message to Intel service layer
* @priv: pointer to rsu private data
* @command: RSU status or update command
@@ -361,7 +405,8 @@ static ssize_t max_retry_show(struct device *dev,
if (!priv)
return -ENODEV;
- return sprintf(buf, "0x%08x\n", priv->max_retry);
+ return scnprintf(buf, sizeof(priv->max_retry),
+ "0x%08x\n", priv->max_retry);
}
static ssize_t dcmf0_show(struct device *dev,
@@ -408,6 +453,61 @@ static ssize_t dcmf3_show(struct device *dev,
return sprintf(buf, "0x%08x\n", priv->dcmf_version.dcmf3);
}
+static ssize_t dcmf0_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return -ENODEV;
+
+ if (priv->dcmf_status.dcmf0 == INVALID_DCMF_STATUS)
+ return -EIO;
+
+ return sprintf(buf, "0x%08x\n", priv->dcmf_status.dcmf0);
+}
+
+static ssize_t dcmf1_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return -ENODEV;
+
+ if (priv->dcmf_status.dcmf1 == INVALID_DCMF_STATUS)
+ return -EIO;
+
+ return sprintf(buf, "0x%08x\n", priv->dcmf_status.dcmf1);
+}
+
+static ssize_t dcmf2_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return -ENODEV;
+
+ if (priv->dcmf_status.dcmf2 == INVALID_DCMF_STATUS)
+ return -EIO;
+
+ return sprintf(buf, "0x%08x\n", priv->dcmf_status.dcmf2);
+}
+
+static ssize_t dcmf3_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return -ENODEV;
+
+ if (priv->dcmf_status.dcmf3 == INVALID_DCMF_STATUS)
+ return -EIO;
+
+ return sprintf(buf, "0x%08x\n", priv->dcmf_status.dcmf3);
+}
static ssize_t reboot_image_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
@@ -484,6 +584,10 @@ static DEVICE_ATTR_RO(dcmf0);
static DEVICE_ATTR_RO(dcmf1);
static DEVICE_ATTR_RO(dcmf2);
static DEVICE_ATTR_RO(dcmf3);
+static DEVICE_ATTR_RO(dcmf0_status);
+static DEVICE_ATTR_RO(dcmf1_status);
+static DEVICE_ATTR_RO(dcmf2_status);
+static DEVICE_ATTR_RO(dcmf3_status);
static DEVICE_ATTR_WO(reboot_image);
static DEVICE_ATTR_WO(notify);
@@ -500,6 +604,10 @@ static struct attribute *rsu_attrs[] = {
&dev_attr_dcmf1.attr,
&dev_attr_dcmf2.attr,
&dev_attr_dcmf3.attr,
+ &dev_attr_dcmf0_status.attr,
+ &dev_attr_dcmf1_status.attr,
+ &dev_attr_dcmf2_status.attr,
+ &dev_attr_dcmf3_status.attr,
&dev_attr_reboot_image.attr,
&dev_attr_notify.attr,
NULL
@@ -532,6 +640,10 @@ static int stratix10_rsu_probe(struct platform_device *pdev)
priv->dcmf_version.dcmf2 = INVALID_DCMF_VERSION;
priv->dcmf_version.dcmf3 = INVALID_DCMF_VERSION;
priv->max_retry = INVALID_RETRY_COUNTER;
+ priv->dcmf_status.dcmf0 = INVALID_DCMF_STATUS;
+ priv->dcmf_status.dcmf1 = INVALID_DCMF_STATUS;
+ priv->dcmf_status.dcmf2 = INVALID_DCMF_STATUS;
+ priv->dcmf_status.dcmf3 = INVALID_DCMF_STATUS;
mutex_init(&priv->lock);
priv->chan = stratix10_svc_request_channel_byname(&priv->client,
@@ -561,6 +673,13 @@ static int stratix10_rsu_probe(struct platform_device *pdev)
stratix10_svc_free_channel(priv->chan);
}
+ ret = rsu_send_msg(priv, COMMAND_RSU_DCMF_STATUS,
+ 0, rsu_dcmf_status_callback);
+ if (ret) {
+ dev_err(dev, "Error, getting DCMF status %i\n", ret);
+ stratix10_svc_free_channel(priv->chan);
+ }
+
ret = rsu_send_msg(priv, COMMAND_RSU_RETRY, 0, rsu_retry_callback);
if (ret) {
dev_err(dev, "Error, getting RSU retry %i\n", ret);
diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c
index 14663f671323..b4081f4d88a3 100644
--- a/drivers/firmware/stratix10-svc.c
+++ b/drivers/firmware/stratix10-svc.c
@@ -34,12 +34,13 @@
* timeout is set to 30 seconds (30 * 1000) at Intel Stratix10 SoC.
*/
#define SVC_NUM_DATA_IN_FIFO 32
-#define SVC_NUM_CHANNEL 2
+#define SVC_NUM_CHANNEL 3
#define FPGA_CONFIG_DATA_CLAIM_TIMEOUT_MS 200
#define FPGA_CONFIG_STATUS_TIMEOUT_SEC 30
/* stratix10 service layer clients */
#define STRATIX10_RSU "stratix10-rsu"
+#define INTEL_FCS "intel-fcs"
typedef void (svc_invoke_fn)(unsigned long, unsigned long, unsigned long,
unsigned long, unsigned long, unsigned long,
@@ -53,6 +54,7 @@ struct stratix10_svc_chan;
*/
struct stratix10_svc {
struct platform_device *stratix10_svc_rsu;
+ struct platform_device *intel_svc_fcs;
};
/**
@@ -97,8 +99,10 @@ struct stratix10_svc_data_mem {
/**
* struct stratix10_svc_data - service data structure
* @chan: service channel
- * @paddr: playload physical address
- * @size: playload size
+ * @paddr: physical address of to be processed payload
+ * @size: to be processed playload size
+ * @paddr_output: physical address of processed payload
+ * @size_output: processed payload size
* @command: service command requested by client
* @flag: configuration type (full or partial)
* @arg: args to be passed via registers and not physically mapped buffers
@@ -109,6 +113,8 @@ struct stratix10_svc_data {
struct stratix10_svc_chan *chan;
phys_addr_t paddr;
size_t size;
+ phys_addr_t paddr_output;
+ size_t size_output;
u32 command;
u32 flag;
u64 arg[3];
@@ -246,6 +252,7 @@ static void svc_thread_cmd_config_status(struct stratix10_svc_controller *ctrl,
{
struct arm_smccc_res res;
int count_in_sec;
+ unsigned long a0, a1, a2;
cb_data->kaddr1 = NULL;
cb_data->kaddr2 = NULL;
@@ -254,24 +261,45 @@ static void svc_thread_cmd_config_status(struct stratix10_svc_controller *ctrl,
pr_debug("%s: polling config status\n", __func__);
+ a0 = INTEL_SIP_SMC_FPGA_CONFIG_ISDONE;
+ a1 = (unsigned long)p_data->paddr;
+ a2 = (unsigned long)p_data->size;
+
+ if (p_data->command == COMMAND_POLL_SERVICE_STATUS)
+ a0 = INTEL_SIP_SMC_SERVICE_COMPLETED;
+
count_in_sec = FPGA_CONFIG_STATUS_TIMEOUT_SEC;
while (count_in_sec) {
- ctrl->invoke_fn(INTEL_SIP_SMC_FPGA_CONFIG_ISDONE,
- 0, 0, 0, 0, 0, 0, 0, &res);
+ ctrl->invoke_fn(a0, a1, a2, 0, 0, 0, 0, 0, &res);
if ((res.a0 == INTEL_SIP_SMC_STATUS_OK) ||
- (res.a0 == INTEL_SIP_SMC_STATUS_ERROR))
+ (res.a0 == INTEL_SIP_SMC_STATUS_ERROR) ||
+ (res.a0 == INTEL_SIP_SMC_STATUS_REJECTED))
break;
/*
- * configuration is still in progress, wait one second then
+ * request is still in progress, wait one second then
* poll again
*/
msleep(1000);
count_in_sec--;
}
- if (res.a0 == INTEL_SIP_SMC_STATUS_OK && count_in_sec)
+ if (!count_in_sec) {
+ pr_err("%s: poll status timeout\n", __func__);
+ cb_data->status = BIT(SVC_STATUS_BUSY);
+ } else if (res.a0 == INTEL_SIP_SMC_STATUS_OK) {
cb_data->status = BIT(SVC_STATUS_COMPLETED);
+ cb_data->kaddr2 = (res.a2) ?
+ svc_pa_to_va(res.a2) : NULL;
+ cb_data->kaddr3 = (res.a3) ? &res.a3 : NULL;
+ } else {
+ pr_err("%s: poll status error\n", __func__);
+ cb_data->kaddr1 = &res.a1;
+ cb_data->kaddr2 = (res.a2) ?
+ svc_pa_to_va(res.a2) : NULL;
+ cb_data->kaddr3 = (res.a3) ? &res.a3 : NULL;
+ cb_data->status = BIT(SVC_STATUS_ERROR);
+ }
p_data->chan->scl->receive_cb(p_data->chan->scl, cb_data);
}
@@ -296,6 +324,10 @@ static void svc_thread_recv_status_ok(struct stratix10_svc_data *p_data,
case COMMAND_RECONFIG:
case COMMAND_RSU_UPDATE:
case COMMAND_RSU_NOTIFY:
+ case COMMAND_FCS_REQUEST_SERVICE:
+ case COMMAND_FCS_SEND_CERTIFICATE:
+ case COMMAND_FCS_DATA_ENCRYPTION:
+ case COMMAND_FCS_DATA_DECRYPTION:
cb_data->status = BIT(SVC_STATUS_OK);
break;
case COMMAND_RECONFIG_DATA_SUBMIT:
@@ -306,15 +338,29 @@ static void svc_thread_recv_status_ok(struct stratix10_svc_data *p_data,
break;
case COMMAND_RSU_RETRY:
case COMMAND_RSU_MAX_RETRY:
+ case COMMAND_RSU_DCMF_STATUS:
case COMMAND_FIRMWARE_VERSION:
cb_data->status = BIT(SVC_STATUS_OK);
cb_data->kaddr1 = &res.a1;
break;
+ case COMMAND_SMC_SVC_VERSION:
+ cb_data->status = BIT(SVC_STATUS_OK);
+ cb_data->kaddr1 = &res.a1;
+ cb_data->kaddr2 = &res.a2;
+ break;
case COMMAND_RSU_DCMF_VERSION:
cb_data->status = BIT(SVC_STATUS_OK);
cb_data->kaddr1 = &res.a1;
cb_data->kaddr2 = &res.a2;
break;
+ case COMMAND_FCS_RANDOM_NUMBER_GEN:
+ case COMMAND_FCS_GET_PROVISION_DATA:
+ case COMMAND_POLL_SERVICE_STATUS:
+ cb_data->status = BIT(SVC_STATUS_OK);
+ cb_data->kaddr1 = &res.a1;
+ cb_data->kaddr2 = svc_pa_to_va(res.a2);
+ cb_data->kaddr3 = &res.a3;
+ break;
default:
pr_warn("it shouldn't happen\n");
break;
@@ -341,7 +387,7 @@ static int svc_normal_to_secure_thread(void *data)
struct stratix10_svc_data *pdata;
struct stratix10_svc_cb_data *cbdata;
struct arm_smccc_res res;
- unsigned long a0, a1, a2;
+ unsigned long a0, a1, a2, a3, a4, a5, a6, a7;
int ret_fifo = 0;
pdata = kmalloc(sizeof(*pdata), GFP_KERNEL);
@@ -358,6 +404,11 @@ static int svc_normal_to_secure_thread(void *data)
a0 = INTEL_SIP_SMC_FPGA_CONFIG_LOOPBACK;
a1 = 0;
a2 = 0;
+ a3 = 0;
+ a4 = 0;
+ a5 = 0;
+ a6 = 0;
+ a7 = 0;
pr_debug("smc_hvc_shm_thread is running\n");
@@ -428,15 +479,74 @@ static int svc_normal_to_secure_thread(void *data)
a1 = 0;
a2 = 0;
break;
+
+ /* for FCS */
+ case COMMAND_FCS_DATA_ENCRYPTION:
+ a0 = INTEL_SIP_SMC_FCS_CRYPTION;
+ a1 = 1;
+ a2 = (unsigned long)pdata->paddr;
+ a3 = (unsigned long)pdata->size;
+ a4 = (unsigned long)pdata->paddr_output;
+ a5 = (unsigned long)pdata->size_output;
+ break;
+ case COMMAND_FCS_DATA_DECRYPTION:
+ a0 = INTEL_SIP_SMC_FCS_CRYPTION;
+ a1 = 0;
+ a2 = (unsigned long)pdata->paddr;
+ a3 = (unsigned long)pdata->size;
+ a4 = (unsigned long)pdata->paddr_output;
+ a5 = (unsigned long)pdata->size_output;
+ break;
+ case COMMAND_FCS_RANDOM_NUMBER_GEN:
+ a0 = INTEL_SIP_SMC_FCS_RANDOM_NUMBER;
+ a1 = (unsigned long)pdata->paddr;
+ a2 = 0;
+ break;
+ case COMMAND_FCS_REQUEST_SERVICE:
+ a0 = INTEL_SIP_SMC_FCS_SERVICE_REQUEST;
+ a1 = (unsigned long)pdata->paddr;
+ a2 = (unsigned long)pdata->size;
+ break;
+ case COMMAND_FCS_SEND_CERTIFICATE:
+ a0 = INTEL_SIP_SMC_FCS_SEND_CERTIFICATE;
+ a1 = (unsigned long)pdata->paddr;
+ a2 = (unsigned long)pdata->size;
+ break;
+ case COMMAND_FCS_GET_PROVISION_DATA:
+ a0 = INTEL_SIP_SMC_FCS_GET_PROVISION_DATA;
+ a1 = (unsigned long)pdata->paddr;
+ a2 = 0;
+ break;
+
+ /* for polling */
+ case COMMAND_POLL_SERVICE_STATUS:
+ a0 = INTEL_SIP_SMC_SERVICE_COMPLETED;
+ a1 = (unsigned long)pdata->paddr;
+ a2 = (unsigned long)pdata->size;
+ break;
+ case COMMAND_RSU_DCMF_STATUS:
+ a0 = INTEL_SIP_SMC_RSU_DCMF_STATUS;
+ a1 = 0;
+ a2 = 0;
+ break;
+ case COMMAND_SMC_SVC_VERSION:
+ a0 = INTEL_SIP_SMC_SVC_VERSION;
+ a1 = 0;
+ a2 = 0;
+ break;
default:
pr_warn("it shouldn't happen\n");
break;
}
pr_debug("%s: before SMC call -- a0=0x%016x a1=0x%016x",
- __func__, (unsigned int)a0, (unsigned int)a1);
+ __func__,
+ (unsigned int)a0,
+ (unsigned int)a1);
pr_debug(" a2=0x%016x\n", (unsigned int)a2);
-
- ctrl->invoke_fn(a0, a1, a2, 0, 0, 0, 0, 0, &res);
+ pr_debug(" a3=0x%016x\n", (unsigned int)a3);
+ pr_debug(" a4=0x%016x\n", (unsigned int)a4);
+ pr_debug(" a5=0x%016x\n", (unsigned int)a5);
+ ctrl->invoke_fn(a0, a1, a2, a3, a4, a5, a6, a7, &res);
pr_debug("%s: after SMC call -- res.a0=0x%016x",
__func__, (unsigned int)res.a0);
@@ -468,6 +578,7 @@ static int svc_normal_to_secure_thread(void *data)
pdata, cbdata);
break;
case COMMAND_RECONFIG_STATUS:
+ case COMMAND_POLL_SERVICE_STATUS:
svc_thread_cmd_config_status(ctrl,
pdata, cbdata);
break;
@@ -478,14 +589,31 @@ static int svc_normal_to_secure_thread(void *data)
break;
case INTEL_SIP_SMC_STATUS_REJECTED:
pr_debug("%s: STATUS_REJECTED\n", __func__);
+ /* for FCS */
+ switch (pdata->command) {
+ case COMMAND_FCS_REQUEST_SERVICE:
+ case COMMAND_FCS_SEND_CERTIFICATE:
+ case COMMAND_FCS_GET_PROVISION_DATA:
+ case COMMAND_FCS_DATA_ENCRYPTION:
+ case COMMAND_FCS_DATA_DECRYPTION:
+ case COMMAND_FCS_RANDOM_NUMBER_GEN:
+ cbdata->status = BIT(SVC_STATUS_INVALID_PARAM);
+ cbdata->kaddr1 = NULL;
+ cbdata->kaddr2 = NULL;
+ cbdata->kaddr3 = NULL;
+ pdata->chan->scl->receive_cb(pdata->chan->scl,
+ cbdata);
+ break;
+ }
break;
case INTEL_SIP_SMC_STATUS_ERROR:
case INTEL_SIP_SMC_RSU_ERROR:
pr_err("%s: STATUS_ERROR\n", __func__);
cbdata->status = BIT(SVC_STATUS_ERROR);
cbdata->kaddr1 = &res.a1;
- cbdata->kaddr2 = NULL;
- cbdata->kaddr3 = NULL;
+ cbdata->kaddr2 = (res.a2) ?
+ svc_pa_to_va(res.a2) : NULL;
+ cbdata->kaddr3 = (res.a3) ? &res.a3 : NULL;
pdata->chan->scl->receive_cb(pdata->chan->scl, cbdata);
break;
default:
@@ -493,12 +621,10 @@ static int svc_normal_to_secure_thread(void *data)
/*
* be compatible with older version firmware which
- * doesn't support RSU notify or retry
+ * doesn't support newer RSU commands
*/
- if ((pdata->command == COMMAND_RSU_RETRY) ||
- (pdata->command == COMMAND_RSU_MAX_RETRY) ||
- (pdata->command == COMMAND_RSU_NOTIFY) ||
- (pdata->command == COMMAND_FIRMWARE_VERSION)) {
+ if ((pdata->command != COMMAND_RSU_UPDATE) &&
+ (pdata->command != COMMAND_RSU_STATUS)) {
cbdata->status =
BIT(SVC_STATUS_NO_SUPPORT);
cbdata->kaddr1 = NULL;
@@ -852,8 +978,19 @@ int stratix10_svc_send(struct stratix10_svc_chan *chan, void *msg)
list_for_each_entry(p_mem, &svc_data_mem, node)
if (p_mem->vaddr == p_msg->payload) {
p_data->paddr = p_mem->paddr;
+ p_data->size = p_msg->payload_length;
break;
}
+ if (p_msg->payload_output) {
+ list_for_each_entry(p_mem, &svc_data_mem, node)
+ if (p_mem->vaddr == p_msg->payload_output) {
+ p_data->paddr_output =
+ p_mem->paddr;
+ p_data->size_output =
+ p_msg->payload_length_output;
+ break;
+ }
+ }
}
p_data->command = p_msg->command;
@@ -1036,6 +1173,11 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev)
chans[1].name = SVC_CLIENT_RSU;
spin_lock_init(&chans[1].lock);
+ chans[2].scl = NULL;
+ chans[2].ctrl = controller;
+ chans[2].name = SVC_CLIENT_FCS;
+ spin_lock_init(&chans[2].lock);
+
list_add_tail(&controller->node, &svc_ctrl);
platform_set_drvdata(pdev, controller);
@@ -1054,8 +1196,22 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev)
}
ret = platform_device_add(svc->stratix10_svc_rsu);
- if (ret)
- goto err_put_device;
+ if (ret) {
+ platform_device_put(svc->stratix10_svc_rsu);
+ return ret;
+ }
+
+ svc->intel_svc_fcs = platform_device_alloc(INTEL_FCS, 1);
+ if (!svc->intel_svc_fcs) {
+ dev_err(dev, "failed to allocate %s device\n", INTEL_FCS);
+ return -ENOMEM;
+ }
+
+ ret = platform_device_add(svc->intel_svc_fcs);
+ if (ret) {
+ platform_device_put(svc->intel_svc_fcs);
+ return ret;
+ }
dev_set_drvdata(dev, svc);
@@ -1063,8 +1219,6 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev)
return 0;
-err_put_device:
- platform_device_put(svc->stratix10_svc_rsu);
err_free_kfifo:
kfifo_free(&controller->svc_fifo);
return ret;
@@ -1075,6 +1229,7 @@ static int stratix10_svc_drv_remove(struct platform_device *pdev)
struct stratix10_svc *svc = dev_get_drvdata(&pdev->dev);
struct stratix10_svc_controller *ctrl = platform_get_drvdata(pdev);
+ platform_device_unregister(svc->intel_svc_fcs);
platform_device_unregister(svc->stratix10_svc_rsu);
kfifo_free(&ctrl->svc_fifo);
diff --git a/drivers/firmware/tegra/bpmp-debugfs.c b/drivers/firmware/tegra/bpmp-debugfs.c
index fd89899aeeed..0c440afd5224 100644
--- a/drivers/firmware/tegra/bpmp-debugfs.c
+++ b/drivers/firmware/tegra/bpmp-debugfs.c
@@ -474,7 +474,7 @@ static int bpmp_populate_debugfs_inband(struct tegra_bpmp *bpmp,
mode |= attrs & DEBUGFS_S_IWUSR ? 0200 : 0;
dentry = debugfs_create_file(name, mode, parent, bpmp,
&bpmp_debug_fops);
- if (!dentry) {
+ if (IS_ERR(dentry)) {
err = -ENOMEM;
goto out;
}
@@ -725,7 +725,7 @@ static int bpmp_populate_dir(struct tegra_bpmp *bpmp, struct seqbuf *seqbuf,
if (t & DEBUGFS_S_ISDIR) {
dentry = debugfs_create_dir(name, parent);
- if (!dentry)
+ if (IS_ERR(dentry))
return -ENOMEM;
err = bpmp_populate_dir(bpmp, seqbuf, dentry, depth+1);
if (err < 0)
@@ -738,7 +738,7 @@ static int bpmp_populate_dir(struct tegra_bpmp *bpmp, struct seqbuf *seqbuf,
dentry = debugfs_create_file(name, mode,
parent, bpmp,
&debugfs_fops);
- if (!dentry)
+ if (IS_ERR(dentry))
return -ENOMEM;
}
}
@@ -788,11 +788,11 @@ int tegra_bpmp_init_debugfs(struct tegra_bpmp *bpmp)
return 0;
root = debugfs_create_dir("bpmp", NULL);
- if (!root)
+ if (IS_ERR(root))
return -ENOMEM;
bpmp->debugfs_mirror = debugfs_create_dir("debug", root);
- if (!bpmp->debugfs_mirror) {
+ if (IS_ERR(bpmp->debugfs_mirror)) {
err = -ENOMEM;
goto out;
}
diff --git a/drivers/firmware/tegra/bpmp.c b/drivers/firmware/tegra/bpmp.c
index 5654c5e9862b..037db21de510 100644
--- a/drivers/firmware/tegra/bpmp.c
+++ b/drivers/firmware/tegra/bpmp.c
@@ -201,7 +201,7 @@ static ssize_t __tegra_bpmp_channel_read(struct tegra_bpmp_channel *channel,
int err;
if (data && size > 0)
- memcpy(data, channel->ib->data, size);
+ memcpy_fromio(data, channel->ib->data, size);
err = tegra_bpmp_ack_response(channel);
if (err < 0)
@@ -245,7 +245,7 @@ static ssize_t __tegra_bpmp_channel_write(struct tegra_bpmp_channel *channel,
channel->ob->flags = flags;
if (data && size > 0)
- memcpy(channel->ob->data, data, size);
+ memcpy_toio(channel->ob->data, data, size);
return tegra_bpmp_post_request(channel);
}
@@ -420,7 +420,7 @@ void tegra_bpmp_mrq_return(struct tegra_bpmp_channel *channel, int code,
channel->ob->code = code;
if (data && size > 0)
- memcpy(channel->ob->data, data, size);
+ memcpy_toio(channel->ob->data, data, size);
err = tegra_bpmp_post_response(channel);
if (WARN_ON(err < 0))
diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
index 7977a494a651..d1f652802181 100644
--- a/drivers/firmware/xilinx/zynqmp.c
+++ b/drivers/firmware/xilinx/zynqmp.c
@@ -2,7 +2,7 @@
/*
* Xilinx Zynq MPSoC Firmware layer
*
- * Copyright (C) 2014-2021 Xilinx, Inc.
+ * Copyright (C) 2014-2022 Xilinx, Inc.
*
* Michal Simek <michal.simek@xilinx.com>
* Davorin Mista <davorin.mista@aggios.com>
@@ -340,6 +340,20 @@ int zynqmp_pm_invoke_fn(u32 pm_api_id, u32 arg0, u32 arg1,
static u32 pm_api_version;
static u32 pm_tz_version;
+int zynqmp_pm_register_sgi(u32 sgi_num, u32 reset)
+{
+ int ret;
+
+ ret = zynqmp_pm_invoke_fn(TF_A_PM_REGISTER_SGI, sgi_num, reset, 0, 0,
+ NULL);
+ if (!ret)
+ return ret;
+
+ /* try old implementation as fallback strategy if above fails */
+ return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_REGISTER_SGI, sgi_num,
+ reset, NULL);
+}
+
/**
* zynqmp_pm_get_api_version() - Get version number of PMU PM firmware
* @version: Returned version value