diff options
Diffstat (limited to 'drivers/misc')
35 files changed, 673 insertions, 379 deletions
diff --git a/drivers/misc/aspeed-lpc-snoop.c b/drivers/misc/aspeed-lpc-snoop.c index cb78c98bc78d..2feb4347d67f 100644 --- a/drivers/misc/aspeed-lpc-snoop.c +++ b/drivers/misc/aspeed-lpc-snoop.c @@ -16,12 +16,15 @@ #include <linux/bitops.h> #include <linux/interrupt.h> +#include <linux/fs.h> #include <linux/kfifo.h> #include <linux/mfd/syscon.h> +#include <linux/miscdevice.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/platform_device.h> +#include <linux/poll.h> #include <linux/regmap.h> #define DEVICE_NAME "aspeed-lpc-snoop" @@ -59,20 +62,70 @@ struct aspeed_lpc_snoop_model_data { unsigned int has_hicrb_ensnp; }; +struct aspeed_lpc_snoop_channel { + struct kfifo fifo; + wait_queue_head_t wq; + struct miscdevice miscdev; +}; + struct aspeed_lpc_snoop { struct regmap *regmap; int irq; - struct kfifo snoop_fifo[NUM_SNOOP_CHANNELS]; + struct aspeed_lpc_snoop_channel chan[NUM_SNOOP_CHANNELS]; +}; + +static struct aspeed_lpc_snoop_channel *snoop_file_to_chan(struct file *file) +{ + return container_of(file->private_data, + struct aspeed_lpc_snoop_channel, + miscdev); +} + +static ssize_t snoop_file_read(struct file *file, char __user *buffer, + size_t count, loff_t *ppos) +{ + struct aspeed_lpc_snoop_channel *chan = snoop_file_to_chan(file); + unsigned int copied; + int ret = 0; + + if (kfifo_is_empty(&chan->fifo)) { + if (file->f_flags & O_NONBLOCK) + return -EAGAIN; + ret = wait_event_interruptible(chan->wq, + !kfifo_is_empty(&chan->fifo)); + if (ret == -ERESTARTSYS) + return -EINTR; + } + ret = kfifo_to_user(&chan->fifo, buffer, count, &copied); + + return ret ? ret : copied; +} + +static unsigned int snoop_file_poll(struct file *file, + struct poll_table_struct *pt) +{ + struct aspeed_lpc_snoop_channel *chan = snoop_file_to_chan(file); + + poll_wait(file, &chan->wq, pt); + return !kfifo_is_empty(&chan->fifo) ? POLLIN : 0; +} + +static const struct file_operations snoop_fops = { + .owner = THIS_MODULE, + .read = snoop_file_read, + .poll = snoop_file_poll, + .llseek = noop_llseek, }; /* Save a byte to a FIFO and discard the oldest byte if FIFO is full */ -static void put_fifo_with_discard(struct kfifo *fifo, u8 val) +static void put_fifo_with_discard(struct aspeed_lpc_snoop_channel *chan, u8 val) { - if (!kfifo_initialized(fifo)) + if (!kfifo_initialized(&chan->fifo)) return; - if (kfifo_is_full(fifo)) - kfifo_skip(fifo); - kfifo_put(fifo, val); + if (kfifo_is_full(&chan->fifo)) + kfifo_skip(&chan->fifo); + kfifo_put(&chan->fifo, val); + wake_up_interruptible(&chan->wq); } static irqreturn_t aspeed_lpc_snoop_irq(int irq, void *arg) @@ -97,12 +150,12 @@ static irqreturn_t aspeed_lpc_snoop_irq(int irq, void *arg) if (reg & HICR6_STR_SNP0W) { u8 val = (data & SNPWDR_CH0_MASK) >> SNPWDR_CH0_SHIFT; - put_fifo_with_discard(&lpc_snoop->snoop_fifo[0], val); + put_fifo_with_discard(&lpc_snoop->chan[0], val); } if (reg & HICR6_STR_SNP1W) { u8 val = (data & SNPWDR_CH1_MASK) >> SNPWDR_CH1_SHIFT; - put_fifo_with_discard(&lpc_snoop->snoop_fifo[1], val); + put_fifo_with_discard(&lpc_snoop->chan[1], val); } return IRQ_HANDLED; @@ -139,12 +192,22 @@ static int aspeed_lpc_enable_snoop(struct aspeed_lpc_snoop *lpc_snoop, const struct aspeed_lpc_snoop_model_data *model_data = of_device_get_match_data(dev); + init_waitqueue_head(&lpc_snoop->chan[channel].wq); /* Create FIFO datastructure */ - rc = kfifo_alloc(&lpc_snoop->snoop_fifo[channel], + rc = kfifo_alloc(&lpc_snoop->chan[channel].fifo, SNOOP_FIFO_SIZE, GFP_KERNEL); if (rc) return rc; + lpc_snoop->chan[channel].miscdev.minor = MISC_DYNAMIC_MINOR; + lpc_snoop->chan[channel].miscdev.name = + devm_kasprintf(dev, GFP_KERNEL, "%s%d", DEVICE_NAME, channel); + lpc_snoop->chan[channel].miscdev.fops = &snoop_fops; + lpc_snoop->chan[channel].miscdev.parent = dev; + rc = misc_register(&lpc_snoop->chan[channel].miscdev); + if (rc) + return rc; + /* Enable LPC snoop channel at requested port */ switch (channel) { case 0: @@ -191,7 +254,8 @@ static void aspeed_lpc_disable_snoop(struct aspeed_lpc_snoop *lpc_snoop, return; } - kfifo_free(&lpc_snoop->snoop_fifo[channel]); + kfifo_free(&lpc_snoop->chan[channel].fifo); + misc_deregister(&lpc_snoop->chan[channel].miscdev); } static int aspeed_lpc_snoop_probe(struct platform_device *pdev) diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c index e8f1d4bb806a..da445223f4cc 100644 --- a/drivers/misc/cardreader/rtsx_pcr.c +++ b/drivers/misc/cardreader/rtsx_pcr.c @@ -80,7 +80,7 @@ static inline void rtsx_pci_disable_aspm(struct rtsx_pcr *pcr) 0xFC, 0); } -int rtsx_comm_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency) +static int rtsx_comm_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency) { rtsx_pci_write_register(pcr, MSGTXDATA0, MASK_8_BIT_DEF, (u8) (latency & 0xFF)); @@ -143,7 +143,7 @@ int rtsx_set_l1off_sub(struct rtsx_pcr *pcr, u8 val) return 0; } -void rtsx_set_l1off_sub_cfg_d0(struct rtsx_pcr *pcr, int active) +static void rtsx_set_l1off_sub_cfg_d0(struct rtsx_pcr *pcr, int active) { if (pcr->ops->set_l1off_cfg_sub_d0) pcr->ops->set_l1off_cfg_sub_d0(pcr, active); @@ -162,7 +162,7 @@ static void rtsx_comm_pm_full_on(struct rtsx_pcr *pcr) rtsx_set_l1off_sub_cfg_d0(pcr, 1); } -void rtsx_pm_full_on(struct rtsx_pcr *pcr) +static void rtsx_pm_full_on(struct rtsx_pcr *pcr) { if (pcr->ops->full_on) pcr->ops->full_on(pcr); @@ -967,13 +967,13 @@ static void rtsx_pci_card_detect(struct work_struct *work) pcr->slots[RTSX_MS_CARD].p_dev); } -void rtsx_pci_process_ocp(struct rtsx_pcr *pcr) +static void rtsx_pci_process_ocp(struct rtsx_pcr *pcr) { if (pcr->ops->process_ocp) pcr->ops->process_ocp(pcr); } -int rtsx_pci_process_ocp_interrupt(struct rtsx_pcr *pcr) +static int rtsx_pci_process_ocp_interrupt(struct rtsx_pcr *pcr) { if (pcr->option.ocp_en) rtsx_pci_process_ocp(pcr); @@ -1094,7 +1094,7 @@ static void rtsx_comm_pm_power_saving(struct rtsx_pcr *pcr) rtsx_enable_aspm(pcr); } -void rtsx_pm_power_saving(struct rtsx_pcr *pcr) +static void rtsx_pm_power_saving(struct rtsx_pcr *pcr) { if (pcr->ops->power_saving) pcr->ops->power_saving(pcr); diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c index 6a7d4a2ad514..840afb398f9e 100644 --- a/drivers/misc/eeprom/at25.c +++ b/drivers/misc/eeprom/at25.c @@ -94,8 +94,10 @@ static int at25_ee_read(void *priv, unsigned int offset, switch (at25->addrlen) { default: /* case 3 */ *cp++ = offset >> 16; + /* fall through */ case 2: *cp++ = offset >> 8; + /* fall through */ case 1: case 0: /* can't happen: for better codegen */ *cp++ = offset >> 0; @@ -180,8 +182,10 @@ static int at25_ee_write(void *priv, unsigned int off, void *val, size_t count) switch (at25->addrlen) { default: /* case 3 */ *cp++ = offset >> 16; + /* fall through */ case 2: *cp++ = offset >> 8; + /* fall through */ case 1: case 0: /* can't happen: for better codegen */ *cp++ = offset >> 0; diff --git a/drivers/misc/eeprom/idt_89hpesx.c b/drivers/misc/eeprom/idt_89hpesx.c index 59dc24bb70ec..8a4659518c33 100644 --- a/drivers/misc/eeprom/idt_89hpesx.c +++ b/drivers/misc/eeprom/idt_89hpesx.c @@ -938,7 +938,7 @@ static ssize_t idt_dbgfs_csr_write(struct file *filep, const char __user *ubuf, { struct idt_89hpesx_dev *pdev = filep->private_data; char *colon_ch, *csraddr_str, *csrval_str; - int ret, csraddr_len, csrval_len; + int ret, csraddr_len; u32 csraddr, csrval; char *buf; @@ -974,12 +974,10 @@ static ssize_t idt_dbgfs_csr_write(struct file *filep, const char __user *ubuf, csraddr_str[csraddr_len] = '\0'; /* Register value must follow the colon */ csrval_str = colon_ch + 1; - csrval_len = strnlen(csrval_str, count - csraddr_len); } else /* if (str_colon == NULL) */ { csraddr_str = (char *)buf; /* Just to shut warning up */ csraddr_len = strnlen(csraddr_str, count); csrval_str = NULL; - csrval_len = 0; } /* Convert CSR address to u32 value */ @@ -1130,7 +1128,7 @@ static void idt_get_fw_data(struct idt_89hpesx_dev *pdev) device_for_each_child_node(dev, fwnode) { ee_id = idt_ee_match_id(fwnode); - if (IS_ERR_OR_NULL(ee_id)) { + if (!ee_id) { dev_warn(dev, "Skip unsupported EEPROM device"); continue; } else diff --git a/drivers/misc/eeprom/max6875.c b/drivers/misc/eeprom/max6875.c index 0e32709d1022..fc0cf9a7402e 100644 --- a/drivers/misc/eeprom/max6875.c +++ b/drivers/misc/eeprom/max6875.c @@ -148,7 +148,8 @@ static int max6875_probe(struct i2c_client *client, if (client->addr & 1) return -ENODEV; - if (!(data = kzalloc(sizeof(struct max6875_data), GFP_KERNEL))) + data = kzalloc(sizeof(struct max6875_data), GFP_KERNEL); + if (!data) return -ENOMEM; /* A fake client is created on the odd address */ diff --git a/drivers/misc/genwqe/card_base.h b/drivers/misc/genwqe/card_base.h index 1c3967f10f55..120738d6e58b 100644 --- a/drivers/misc/genwqe/card_base.h +++ b/drivers/misc/genwqe/card_base.h @@ -497,7 +497,7 @@ int genwqe_user_vunmap(struct genwqe_dev *cd, struct dma_mapping *m); static inline bool dma_mapping_used(struct dma_mapping *m) { if (!m) - return 0; + return false; return m->size != 0; } diff --git a/drivers/misc/genwqe/card_debugfs.c b/drivers/misc/genwqe/card_debugfs.c index f921dd590271..c6b82f09b3ba 100644 --- a/drivers/misc/genwqe/card_debugfs.c +++ b/drivers/misc/genwqe/card_debugfs.c @@ -305,7 +305,6 @@ GENWQE_DEBUGFS_RO(ddcb_info, genwqe_ddcb_info_show); static int genwqe_info_show(struct seq_file *s, void *unused) { struct genwqe_dev *cd = s->private; - u16 val16, type; u64 app_id, slu_id, bitstream = -1; struct pci_dev *pci_dev = cd->pci_dev; @@ -315,9 +314,6 @@ static int genwqe_info_show(struct seq_file *s, void *unused) if (genwqe_is_privileged(cd)) bitstream = __genwqe_readq(cd, IO_SLU_BITSTREAM); - val16 = (u16)(slu_id & 0x0fLLU); - type = (u16)((slu_id >> 20) & 0xffLLU); - seq_printf(s, "%s driver version: %s\n" " Device Name/Type: %s %s CardIdx: %d\n" " SLU/APP Config : 0x%016llx/0x%016llx\n" diff --git a/drivers/misc/genwqe/card_dev.c b/drivers/misc/genwqe/card_dev.c index 0dd6b5ef314a..f453ab82f0d7 100644 --- a/drivers/misc/genwqe/card_dev.c +++ b/drivers/misc/genwqe/card_dev.c @@ -304,14 +304,12 @@ static int genwqe_open(struct inode *inode, struct file *filp) { struct genwqe_dev *cd; struct genwqe_file *cfile; - struct pci_dev *pci_dev; cfile = kzalloc(sizeof(*cfile), GFP_KERNEL); if (cfile == NULL) return -ENOMEM; cd = container_of(inode->i_cdev, struct genwqe_dev, cdev_genwqe); - pci_dev = cd->pci_dev; cfile->cd = cd; cfile->filp = filp; cfile->client = NULL; @@ -864,7 +862,6 @@ static int ddcb_cmd_fixups(struct genwqe_file *cfile, struct ddcb_requ *req) struct genwqe_dev *cd = cfile->cd; struct genwqe_ddcb_cmd *cmd = &req->cmd; struct dma_mapping *m; - const char *type = "UNKNOWN"; for (i = 0, asiv_offs = 0x00; asiv_offs <= 0x58; i++, asiv_offs += 0x08) { @@ -933,11 +930,9 @@ static int ddcb_cmd_fixups(struct genwqe_file *cfile, struct ddcb_requ *req) m = genwqe_search_pin(cfile, u_addr, u_size, NULL); if (m != NULL) { - type = "PINNING"; page_offs = (u_addr - (u64)m->u_vaddr)/PAGE_SIZE; } else { - type = "MAPPING"; m = &req->dma_mappings[i]; genwqe_mapping_init(m, diff --git a/drivers/misc/ibmvmc.c b/drivers/misc/ibmvmc.c index fb83d1375638..8f82bb9d11e2 100644 --- a/drivers/misc/ibmvmc.c +++ b/drivers/misc/ibmvmc.c @@ -273,7 +273,7 @@ static void *alloc_dma_buffer(struct vio_dev *vdev, size_t size, dma_addr_t *dma_handle) { /* allocate memory */ - void *buffer = kzalloc(size, GFP_KERNEL); + void *buffer = kzalloc(size, GFP_ATOMIC); if (!buffer) { *dma_handle = 0; diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c index 0208c4b027c5..a6f41f96f2a1 100644 --- a/drivers/misc/mei/bus-fixup.c +++ b/drivers/misc/mei/bus-fixup.c @@ -1,7 +1,7 @@ /* * * Intel Management Engine Interface (Intel MEI) Linux driver - * Copyright (c) 2003-2013, Intel Corporation. + * Copyright (c) 2003-2018, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -96,8 +96,22 @@ struct mkhi_fwcaps { u8 data[0]; } __packed; +struct mkhi_fw_ver_block { + u16 minor; + u8 major; + u8 platform; + u16 buildno; + u16 hotfix; +} __packed; + +struct mkhi_fw_ver { + struct mkhi_fw_ver_block ver[MEI_MAX_FW_VER_BLOCKS]; +} __packed; + #define MKHI_FWCAPS_GROUP_ID 0x3 #define MKHI_FWCAPS_SET_OS_VER_APP_RULE_CMD 6 +#define MKHI_GEN_GROUP_ID 0xFF +#define MKHI_GEN_GET_FW_VERSION_CMD 0x2 struct mkhi_msg_hdr { u8 group_id; u8 command; @@ -139,21 +153,81 @@ static int mei_osver(struct mei_cl_device *cldev) return __mei_cl_send(cldev->cl, buf, size, mode); } +#define MKHI_FWVER_BUF_LEN (sizeof(struct mkhi_msg_hdr) + \ + sizeof(struct mkhi_fw_ver)) +#define MKHI_FWVER_LEN(__num) (sizeof(struct mkhi_msg_hdr) + \ + sizeof(struct mkhi_fw_ver_block) * (__num)) +#define MKHI_RCV_TIMEOUT 500 /* receive timeout in msec */ +static int mei_fwver(struct mei_cl_device *cldev) +{ + char buf[MKHI_FWVER_BUF_LEN]; + struct mkhi_msg *req; + struct mkhi_fw_ver *fwver; + int bytes_recv, ret, i; + + memset(buf, 0, sizeof(buf)); + + req = (struct mkhi_msg *)buf; + req->hdr.group_id = MKHI_GEN_GROUP_ID; + req->hdr.command = MKHI_GEN_GET_FW_VERSION_CMD; + + ret = __mei_cl_send(cldev->cl, buf, sizeof(struct mkhi_msg_hdr), + MEI_CL_IO_TX_BLOCKING); + if (ret < 0) { + dev_err(&cldev->dev, "Could not send ReqFWVersion cmd\n"); + return ret; + } + + ret = 0; + bytes_recv = __mei_cl_recv(cldev->cl, buf, sizeof(buf), 0, + MKHI_RCV_TIMEOUT); + if (bytes_recv < 0 || (size_t)bytes_recv < MKHI_FWVER_LEN(1)) { + /* + * Should be at least one version block, + * error out if nothing found + */ + dev_err(&cldev->dev, "Could not read FW version\n"); + return -EIO; + } + + fwver = (struct mkhi_fw_ver *)req->data; + memset(cldev->bus->fw_ver, 0, sizeof(cldev->bus->fw_ver)); + for (i = 0; i < MEI_MAX_FW_VER_BLOCKS; i++) { + if ((size_t)bytes_recv < MKHI_FWVER_LEN(i + 1)) + break; + dev_dbg(&cldev->dev, "FW version%d %d:%d.%d.%d.%d\n", + i, fwver->ver[i].platform, + fwver->ver[i].major, fwver->ver[i].minor, + fwver->ver[i].hotfix, fwver->ver[i].buildno); + + cldev->bus->fw_ver[i].platform = fwver->ver[i].platform; + cldev->bus->fw_ver[i].major = fwver->ver[i].major; + cldev->bus->fw_ver[i].minor = fwver->ver[i].minor; + cldev->bus->fw_ver[i].hotfix = fwver->ver[i].hotfix; + cldev->bus->fw_ver[i].buildno = fwver->ver[i].buildno; + } + + return ret; +} + static void mei_mkhi_fix(struct mei_cl_device *cldev) { int ret; - if (!cldev->bus->hbm_f_os_supported) - return; - ret = mei_cldev_enable(cldev); if (ret) return; - ret = mei_osver(cldev); + ret = mei_fwver(cldev); if (ret < 0) - dev_err(&cldev->dev, "OS version command failed %d\n", ret); + dev_err(&cldev->dev, "FW version command failed %d\n", ret); + if (cldev->bus->hbm_f_os_supported) { + ret = mei_osver(cldev); + if (ret < 0) + dev_err(&cldev->dev, "OS version command failed %d\n", + ret); + } mei_cldev_disable(cldev); } @@ -266,8 +340,8 @@ static int mei_nfc_if_version(struct mei_cl *cl, return -ENOMEM; ret = 0; - bytes_recv = __mei_cl_recv(cl, (u8 *)reply, if_version_length, 0); - if (bytes_recv < if_version_length) { + bytes_recv = __mei_cl_recv(cl, (u8 *)reply, if_version_length, 0, 0); + if (bytes_recv < 0 || (size_t)bytes_recv < if_version_length) { dev_err(bus->dev, "Could not read IF version\n"); ret = -EIO; goto err; @@ -410,7 +484,7 @@ void mei_cl_bus_dev_fixup(struct mei_cl_device *cldev) { struct mei_fixup *f; const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl); - int i; + size_t i; for (i = 0; i < ARRAY_SIZE(mei_fixups); i++) { diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c index b1133739fb4b..7bba62a72921 100644 --- a/drivers/misc/mei/bus.c +++ b/drivers/misc/mei/bus.c @@ -116,11 +116,12 @@ out: * @buf: buffer to receive * @length: buffer length * @mode: io mode + * @timeout: recv timeout, 0 for infinite timeout * * Return: read size in bytes of < 0 on error */ ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length, - unsigned int mode) + unsigned int mode, unsigned long timeout) { struct mei_device *bus; struct mei_cl_cb *cb; @@ -158,13 +159,28 @@ ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length, mutex_unlock(&bus->device_lock); - if (wait_event_interruptible(cl->rx_wait, - (!list_empty(&cl->rd_completed)) || - (!mei_cl_is_connected(cl)))) { - - if (signal_pending(current)) - return -EINTR; - return -ERESTARTSYS; + if (timeout) { + rets = wait_event_interruptible_timeout + (cl->rx_wait, + (!list_empty(&cl->rd_completed)) || + (!mei_cl_is_connected(cl)), + msecs_to_jiffies(timeout)); + if (rets == 0) + return -ETIME; + if (rets < 0) { + if (signal_pending(current)) + return -EINTR; + return -ERESTARTSYS; + } + } else { + if (wait_event_interruptible + (cl->rx_wait, + (!list_empty(&cl->rd_completed)) || + (!mei_cl_is_connected(cl)))) { + if (signal_pending(current)) + return -EINTR; + return -ERESTARTSYS; + } } mutex_lock(&bus->device_lock); @@ -231,7 +247,7 @@ ssize_t mei_cldev_recv_nonblock(struct mei_cl_device *cldev, u8 *buf, { struct mei_cl *cl = cldev->cl; - return __mei_cl_recv(cl, buf, length, MEI_CL_IO_RX_NONBLOCK); + return __mei_cl_recv(cl, buf, length, MEI_CL_IO_RX_NONBLOCK, 0); } EXPORT_SYMBOL_GPL(mei_cldev_recv_nonblock); @@ -248,7 +264,7 @@ ssize_t mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length) { struct mei_cl *cl = cldev->cl; - return __mei_cl_recv(cl, buf, length, 0); + return __mei_cl_recv(cl, buf, length, 0, 0); } EXPORT_SYMBOL_GPL(mei_cldev_recv); diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c index 8d6197a88b54..4ab6251d418e 100644 --- a/drivers/misc/mei/client.c +++ b/drivers/misc/mei/client.c @@ -863,10 +863,12 @@ int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb, int slots; int ret; - msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_request)); + msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request)); slots = mei_hbuf_empty_slots(dev); + if (slots < 0) + return -EOVERFLOW; - if (slots < msg_slots) + if ((u32)slots < msg_slots) return -EMSGSIZE; ret = mei_cl_send_disconnect(cl, cb); @@ -1053,13 +1055,15 @@ int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb, int slots; int rets; - msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_request)); - slots = mei_hbuf_empty_slots(dev); - if (mei_cl_is_other_connecting(cl)) return 0; - if (slots < msg_slots) + msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request)); + slots = mei_hbuf_empty_slots(dev); + if (slots < 0) + return -EOVERFLOW; + + if ((u32)slots < msg_slots) return -EMSGSIZE; rets = mei_cl_send_connect(cl, cb); @@ -1294,10 +1298,12 @@ int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb, int ret; bool request; - msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_request)); + msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request)); slots = mei_hbuf_empty_slots(dev); + if (slots < 0) + return -EOVERFLOW; - if (slots < msg_slots) + if ((u32)slots < msg_slots) return -EMSGSIZE; request = mei_cl_notify_fop2req(cb->fop_type); @@ -1533,6 +1539,23 @@ nortpm: } /** + * mei_msg_hdr_init - initialize mei message header + * + * @mei_hdr: mei message header + * @cb: message callback structure + */ +static void mei_msg_hdr_init(struct mei_msg_hdr *mei_hdr, struct mei_cl_cb *cb) +{ + mei_hdr->host_addr = mei_cl_host_addr(cb->cl); + mei_hdr->me_addr = mei_cl_me_id(cb->cl); + mei_hdr->length = 0; + mei_hdr->reserved = 0; + mei_hdr->msg_complete = 0; + mei_hdr->dma_ring = 0; + mei_hdr->internal = cb->internal; +} + +/** * mei_cl_irq_write - write a message to device * from the interrupt thread context * @@ -1548,9 +1571,10 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, struct mei_device *dev; struct mei_msg_data *buf; struct mei_msg_hdr mei_hdr; + size_t hdr_len = sizeof(mei_hdr); size_t len; - u32 msg_slots; - int slots; + size_t hbuf_len; + int hbuf_slots; int rets; bool first_chunk; @@ -1572,40 +1596,41 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, return 0; } - slots = mei_hbuf_empty_slots(dev); len = buf->size - cb->buf_idx; - msg_slots = mei_data2slots(len); + hbuf_slots = mei_hbuf_empty_slots(dev); + if (hbuf_slots < 0) { + rets = -EOVERFLOW; + goto err; + } - mei_hdr.host_addr = mei_cl_host_addr(cl); - mei_hdr.me_addr = mei_cl_me_id(cl); - mei_hdr.reserved = 0; - mei_hdr.internal = cb->internal; + hbuf_len = mei_slots2data(hbuf_slots); - if (slots >= msg_slots) { + mei_msg_hdr_init(&mei_hdr, cb); + + /** + * Split the message only if we can write the whole host buffer + * otherwise wait for next time the host buffer is empty. + */ + if (len + hdr_len <= hbuf_len) { mei_hdr.length = len; mei_hdr.msg_complete = 1; - /* Split the message only if we can write the whole host buffer */ - } else if (slots == dev->hbuf_depth) { - msg_slots = slots; - len = (slots * sizeof(u32)) - sizeof(struct mei_msg_hdr); - mei_hdr.length = len; - mei_hdr.msg_complete = 0; + } else if ((u32)hbuf_slots == mei_hbuf_depth(dev)) { + mei_hdr.length = hbuf_len - hdr_len; } else { - /* wait for next time the host buffer is empty */ return 0; } cl_dbg(dev, cl, "buf: size = %zu idx = %zu\n", cb->buf.size, cb->buf_idx); - rets = mei_write_message(dev, &mei_hdr, buf->data + cb->buf_idx); + rets = mei_write_message(dev, &mei_hdr, hdr_len, + buf->data + cb->buf_idx, mei_hdr.length); if (rets) goto err; cl->status = 0; cl->writing_state = MEI_WRITING; cb->buf_idx += mei_hdr.length; - cb->completed = mei_hdr.msg_complete == 1; if (first_chunk) { if (mei_cl_tx_flow_ctrl_creds_reduce(cl)) { @@ -1634,13 +1659,16 @@ err: * * Return: number of bytes sent on success, <0 on failure. */ -int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb) +ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb) { struct mei_device *dev; struct mei_msg_data *buf; struct mei_msg_hdr mei_hdr; - int size; - int rets; + size_t hdr_len = sizeof(mei_hdr); + size_t len; + size_t hbuf_len; + int hbuf_slots; + ssize_t rets; bool blocking; if (WARN_ON(!cl || !cl->dev)) @@ -1652,52 +1680,57 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb) dev = cl->dev; buf = &cb->buf; - size = buf->size; + len = buf->size; blocking = cb->blocking; - cl_dbg(dev, cl, "size=%d\n", size); + cl_dbg(dev, cl, "len=%zd\n", len); rets = pm_runtime_get(dev->dev); if (rets < 0 && rets != -EINPROGRESS) { pm_runtime_put_noidle(dev->dev); - cl_err(dev, cl, "rpm: get failed %d\n", rets); + cl_err(dev, cl, "rpm: get failed %zd\n", rets); goto free; } cb->buf_idx = 0; cl->writing_state = MEI_IDLE; - mei_hdr.host_addr = mei_cl_host_addr(cl); - mei_hdr.me_addr = mei_cl_me_id(cl); - mei_hdr.reserved = 0; - mei_hdr.msg_complete = 0; - mei_hdr.internal = cb->internal; rets = mei_cl_tx_flow_ctrl_creds(cl); if (rets < 0) goto err; + mei_msg_hdr_init(&mei_hdr, cb); + if (rets == 0) { cl_dbg(dev, cl, "No flow control credentials: not sending.\n"); - rets = size; + rets = len; goto out; } + if (!mei_hbuf_acquire(dev)) { cl_dbg(dev, cl, "Cannot acquire the host buffer: not sending.\n"); - rets = size; + rets = len; goto out; } - /* Check for a maximum length */ - if (size > mei_hbuf_max_len(dev)) { - mei_hdr.length = mei_hbuf_max_len(dev); - mei_hdr.msg_complete = 0; - } else { - mei_hdr.length = size; + hbuf_slots = mei_hbuf_empty_slots(dev); + if (hbuf_slots < 0) { + rets = -EOVERFLOW; + goto out; + } + + hbuf_len = mei_slots2data(hbuf_slots); + + if (len + hdr_len <= hbuf_len) { + mei_hdr.length = len; mei_hdr.msg_complete = 1; + } else { + mei_hdr.length = hbuf_len - hdr_len; } - rets = mei_write_message(dev, &mei_hdr, buf->data); + rets = mei_write_message(dev, &mei_hdr, hdr_len, + buf->data, mei_hdr.length); if (rets) goto err; @@ -1707,7 +1740,6 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb) cl->writing_state = MEI_WRITING; cb->buf_idx = mei_hdr.length; - cb->completed = mei_hdr.msg_complete == 1; out: if (mei_hdr.msg_complete) @@ -1735,7 +1767,7 @@ out: } } - rets = size; + rets = buf->size; err: cl_dbg(dev, cl, "rpm: autosuspend\n"); pm_runtime_mark_last_busy(dev->dev); diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h index 5371df4d8af3..64e318f589b4 100644 --- a/drivers/misc/mei/client.h +++ b/drivers/misc/mei/client.h @@ -202,7 +202,7 @@ int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl, int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb, struct list_head *cmpl_list); int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp); -int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb); +ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb); int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, struct list_head *cmpl_list); diff --git a/drivers/misc/mei/debugfs.c b/drivers/misc/mei/debugfs.c index c815da91089c..7b5df8fd6c5a 100644 --- a/drivers/misc/mei/debugfs.c +++ b/drivers/misc/mei/debugfs.c @@ -183,6 +183,8 @@ static ssize_t mei_dbgfs_read_devstate(struct file *fp, char __user *ubuf, dev->hbm_f_fa_supported); pos += scnprintf(buf + pos, bufsz - pos, "\tOS: %01d\n", dev->hbm_f_os_supported); + pos += scnprintf(buf + pos, bufsz - pos, "\tDR: %01d\n", + dev->hbm_f_dr_supported); } pos += scnprintf(buf + pos, bufsz - pos, "pg: %s, %s\n", diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c index fe6595fe94f1..09e233d4c0de 100644 --- a/drivers/misc/mei/hbm.c +++ b/drivers/misc/mei/hbm.c @@ -96,6 +96,20 @@ static int mei_cl_conn_status_to_errno(enum mei_cl_connect_status status) } /** + * mei_hbm_write_message - wrapper for sending hbm messages. + * + * @dev: mei device + * @hdr: mei header + * @data: payload + */ +static inline int mei_hbm_write_message(struct mei_device *dev, + struct mei_msg_hdr *hdr, + const void *data) +{ + return mei_write_message(dev, hdr, sizeof(*hdr), data, hdr->length); +} + +/** * mei_hbm_idle - set hbm to idle state * * @dev: the device structure @@ -131,6 +145,7 @@ static inline void mei_hbm_hdr(struct mei_msg_hdr *hdr, size_t length) hdr->me_addr = 0; hdr->length = length; hdr->msg_complete = 1; + hdr->dma_ring = 0; hdr->reserved = 0; hdr->internal = 0; } @@ -174,7 +189,7 @@ static inline int mei_hbm_cl_write(struct mei_device *dev, struct mei_cl *cl, mei_hbm_hdr(&mei_hdr, len); mei_hbm_cl_hdr(cl, hbm_cmd, buf, len); - return mei_write_message(dev, &mei_hdr, buf); + return mei_hbm_write_message(dev, &mei_hdr, buf); } /** @@ -267,7 +282,7 @@ int mei_hbm_start_req(struct mei_device *dev) start_req.host_version.minor_version = HBM_MINOR_VERSION; dev->hbm_state = MEI_HBM_IDLE; - ret = mei_write_message(dev, &mei_hdr, &start_req); + ret = mei_hbm_write_message(dev, &mei_hdr, &start_req); if (ret) { dev_err(dev->dev, "version message write failed: ret = %d\n", ret); @@ -304,7 +319,7 @@ static int mei_hbm_enum_clients_req(struct mei_device *dev) enum_req.flags |= dev->hbm_f_ie_supported ? MEI_HBM_ENUM_F_IMMEDIATE_ENUM : 0; - ret = mei_write_message(dev, &mei_hdr, &enum_req); + ret = mei_hbm_write_message(dev, &mei_hdr, &enum_req); if (ret) { dev_err(dev->dev, "enumeration request write failed: ret = %d.\n", ret); @@ -373,7 +388,7 @@ static int mei_hbm_add_cl_resp(struct mei_device *dev, u8 addr, u8 status) resp.me_addr = addr; resp.status = status; - ret = mei_write_message(dev, &mei_hdr, &resp); + ret = mei_hbm_write_message(dev, &mei_hdr, &resp); if (ret) dev_err(dev->dev, "add client response write failed: ret = %d\n", ret); @@ -430,7 +445,7 @@ int mei_hbm_cl_notify_req(struct mei_device *dev, req.start = start; - ret = mei_write_message(dev, &mei_hdr, &req); + ret = mei_hbm_write_message(dev, &mei_hdr, &req); if (ret) dev_err(dev->dev, "notify request failed: ret = %d\n", ret); @@ -555,7 +570,7 @@ static int mei_hbm_prop_req(struct mei_device *dev, unsigned long start_idx) prop_req.hbm_cmd = HOST_CLIENT_PROPERTIES_REQ_CMD; prop_req.me_addr = addr; - ret = mei_write_message(dev, &mei_hdr, &prop_req); + ret = mei_hbm_write_message(dev, &mei_hdr, &prop_req); if (ret) { dev_err(dev->dev, "properties request write failed: ret = %d\n", ret); @@ -592,7 +607,7 @@ int mei_hbm_pg(struct mei_device *dev, u8 pg_cmd) memset(&req, 0, len); req.hbm_cmd = pg_cmd; - ret = mei_write_message(dev, &mei_hdr, &req); + ret = mei_hbm_write_message(dev, &mei_hdr, &req); if (ret) dev_err(dev->dev, "power gate command write failed.\n"); return ret; @@ -618,7 +633,7 @@ static int mei_hbm_stop_req(struct mei_device *dev) req.hbm_cmd = HOST_STOP_REQ_CMD; req.reason = DRIVER_STOP_REQUEST; - return mei_write_message(dev, &mei_hdr, &req); + return mei_hbm_write_message(dev, &mei_hdr, &req); } /** @@ -992,6 +1007,12 @@ static void mei_hbm_config_features(struct mei_device *dev) /* OS ver message Support */ if (dev->version.major_version >= HBM_MAJOR_VERSION_OS) dev->hbm_f_os_supported = 1; + + /* DMA Ring Support */ + if (dev->version.major_version > HBM_MAJOR_VERSION_DR || + (dev->version.major_version == HBM_MAJOR_VERSION_DR && + dev->version.minor_version >= HBM_MINOR_VERSION_DR)) + dev->hbm_f_dr_supported = 1; } /** diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c index 334ab02e1de2..0759c3a668de 100644 --- a/drivers/misc/mei/hw-me.c +++ b/drivers/misc/mei/hw-me.c @@ -19,6 +19,7 @@ #include <linux/kthread.h> #include <linux/interrupt.h> #include <linux/pm_runtime.h> +#include <linux/sizes.h> #include "mei_dev.h" #include "hbm.h" @@ -228,7 +229,7 @@ static void mei_me_hw_config(struct mei_device *dev) /* Doesn't change in runtime */ hcsr = mei_hcsr_read(dev); - dev->hbuf_depth = (hcsr & H_CBD) >> 24; + hw->hbuf_depth = (hcsr & H_CBD) >> 24; reg = 0; pci_read_config_dword(pdev, PCI_CFG_HFS_1, ®); @@ -490,70 +491,82 @@ static bool mei_me_hbuf_is_empty(struct mei_device *dev) */ static int mei_me_hbuf_empty_slots(struct mei_device *dev) { + struct mei_me_hw *hw = to_me_hw(dev); unsigned char filled_slots, empty_slots; filled_slots = mei_hbuf_filled_slots(dev); - empty_slots = dev->hbuf_depth - filled_slots; + empty_slots = hw->hbuf_depth - filled_slots; /* check for overflow */ - if (filled_slots > dev->hbuf_depth) + if (filled_slots > hw->hbuf_depth) return -EOVERFLOW; return empty_slots; } /** - * mei_me_hbuf_max_len - returns size of hw buffer. + * mei_me_hbuf_depth - returns depth of the hw buffer. * * @dev: the device structure * - * Return: size of hw buffer in bytes + * Return: size of hw buffer in slots */ -static size_t mei_me_hbuf_max_len(const struct mei_device *dev) +static u32 mei_me_hbuf_depth(const struct mei_device *dev) { - return dev->hbuf_depth * sizeof(u32) - sizeof(struct mei_msg_hdr); -} + struct mei_me_hw *hw = to_me_hw(dev); + return hw->hbuf_depth; +} /** * mei_me_hbuf_write - writes a message to host hw buffer. * * @dev: the device structure - * @header: mei HECI header of message - * @buf: message payload will be written + * @hdr: header of message + * @hdr_len: header length in bytes: must be multiplication of a slot (4bytes) + * @data: payload + * @data_len: payload length in bytes * - * Return: -EIO if write has failed + * Return: 0 if success, < 0 - otherwise. */ static int mei_me_hbuf_write(struct mei_device *dev, - struct mei_msg_hdr *header, - const unsigned char *buf) + const void *hdr, size_t hdr_len, + const void *data, size_t data_len) { unsigned long rem; - unsigned long length = header->length; - u32 *reg_buf = (u32 *)buf; + unsigned long i; + const u32 *reg_buf; u32 dw_cnt; - int i; int empty_slots; - dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM(header)); + if (WARN_ON(!hdr || !data || hdr_len & 0x3)) + return -EINVAL; + + dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM((struct mei_msg_hdr *)hdr)); empty_slots = mei_hbuf_empty_slots(dev); dev_dbg(dev->dev, "empty slots = %hu.\n", empty_slots); - dw_cnt = mei_data2slots(length); - if (empty_slots < 0 || dw_cnt > empty_slots) + if (empty_slots < 0) + return -EOVERFLOW; + + dw_cnt = mei_data2slots(hdr_len + data_len); + if (dw_cnt > (u32)empty_slots) return -EMSGSIZE; - mei_me_hcbww_write(dev, *((u32 *) header)); + reg_buf = hdr; + for (i = 0; i < hdr_len / MEI_SLOT_SIZE; i++) + mei_me_hcbww_write(dev, reg_buf[i]); - for (i = 0; i < length / 4; i++) + reg_buf = data; + for (i = 0; i < data_len / MEI_SLOT_SIZE; i++) mei_me_hcbww_write(dev, reg_buf[i]); - rem = length & 0x3; + rem = data_len & 0x3; if (rem > 0) { u32 reg = 0; - memcpy(®, &buf[length - rem], rem); + memcpy(®, (const u8 *)data + data_len - rem, rem); mei_me_hcbww_write(dev, reg); } @@ -601,11 +614,11 @@ static int mei_me_count_full_read_slots(struct mei_device *dev) * Return: always 0 */ static int mei_me_read_slots(struct mei_device *dev, unsigned char *buffer, - unsigned long buffer_length) + unsigned long buffer_length) { u32 *reg_buf = (u32 *)buffer; - for (; buffer_length >= sizeof(u32); buffer_length -= sizeof(u32)) + for (; buffer_length >= MEI_SLOT_SIZE; buffer_length -= MEI_SLOT_SIZE) *reg_buf++ = mei_me_mecbrw_read(dev); if (buffer_length > 0) { @@ -1314,7 +1327,7 @@ static const struct mei_hw_ops mei_me_hw_ops = { .hbuf_free_slots = mei_me_hbuf_empty_slots, .hbuf_is_ready = mei_me_hbuf_is_empty, - .hbuf_max_len = mei_me_hbuf_max_len, + .hbuf_depth = mei_me_hbuf_depth, .write = mei_me_hbuf_write, @@ -1377,6 +1390,11 @@ static bool mei_me_fw_type_sps(struct pci_dev *pdev) .fw_status.status[4] = PCI_CFG_HFS_5, \ .fw_status.status[5] = PCI_CFG_HFS_6 +#define MEI_CFG_DMA_128 \ + .dma_size[DMA_DSCR_HOST] = SZ_128K, \ + .dma_size[DMA_DSCR_DEVICE] = SZ_128K, \ + .dma_size[DMA_DSCR_CTRL] = PAGE_SIZE + /* ICH Legacy devices */ static const struct mei_cfg mei_me_ich_cfg = { MEI_CFG_ICH_HFS, @@ -1409,6 +1427,12 @@ static const struct mei_cfg mei_me_pch8_sps_cfg = { MEI_CFG_FW_SPS, }; +/* Cannon Lake and newer devices */ +static const struct mei_cfg mei_me_pch12_cfg = { + MEI_CFG_PCH8_HFS, + MEI_CFG_DMA_128, +}; + /* * mei_cfg_list - A list of platform platform specific configurations. * Note: has to be synchronized with enum mei_cfg_idx. @@ -1421,6 +1445,7 @@ static const struct mei_cfg *const mei_cfg_list[] = { [MEI_ME_PCH_CPT_PBG_CFG] = &mei_me_pch_cpt_pbg_cfg, [MEI_ME_PCH8_CFG] = &mei_me_pch8_cfg, [MEI_ME_PCH8_SPS_CFG] = &mei_me_pch8_sps_cfg, + [MEI_ME_PCH12_CFG] = &mei_me_pch12_cfg, }; const struct mei_cfg *mei_me_get_cfg(kernel_ulong_t idx) diff --git a/drivers/misc/mei/hw-me.h b/drivers/misc/mei/hw-me.h index 67892533576e..bbcc5fc106cd 100644 --- a/drivers/misc/mei/hw-me.h +++ b/drivers/misc/mei/hw-me.h @@ -31,10 +31,12 @@ * * @fw_status: FW status * @quirk_probe: device exclusion quirk + * @dma_size: device DMA buffers size */ struct mei_cfg { const struct mei_fw_status fw_status; bool (*quirk_probe)(struct pci_dev *pdev); + size_t dma_size[DMA_DSCR_NUM]; }; @@ -52,12 +54,14 @@ struct mei_cfg { * @mem_addr: io memory address * @pg_state: power gating state * @d0i3_supported: di03 support + * @hbuf_depth: depth of hardware host/write buffer in slots */ struct mei_me_hw { const struct mei_cfg *cfg; void __iomem *mem_addr; enum mei_pg_state pg_state; bool d0i3_supported; + u8 hbuf_depth; }; #define to_me_hw(dev) (struct mei_me_hw *)((dev)->hw) @@ -78,6 +82,7 @@ struct mei_me_hw { * @MEI_ME_PCH8_SPS_CFG: Platform Controller Hub Gen8 and newer * servers platforms with quirk for * SPS firmware exclusion. + * @MEI_ME_PCH12_CFG: Platform Controller Hub Gen12 and newer * @MEI_ME_NUM_CFG: Upper Sentinel. */ enum mei_cfg_idx { @@ -88,6 +93,7 @@ enum mei_cfg_idx { MEI_ME_PCH_CPT_PBG_CFG, MEI_ME_PCH8_CFG, MEI_ME_PCH8_SPS_CFG, + MEI_ME_PCH12_CFG, MEI_ME_NUM_CFG, }; diff --git a/drivers/misc/mei/hw-txe.c b/drivers/misc/mei/hw-txe.c index c2c8993e2a51..8449fe0367ff 100644 --- a/drivers/misc/mei/hw-txe.c +++ b/drivers/misc/mei/hw-txe.c @@ -31,6 +31,7 @@ #include "mei-trace.h" +#define TXE_HBUF_DEPTH (PAYLOAD_SIZE / MEI_SLOT_SIZE) /** * mei_txe_reg_read - Reads 32bit data from the txe device @@ -681,9 +682,6 @@ static void mei_txe_hw_config(struct mei_device *dev) struct mei_txe_hw *hw = to_txe_hw(dev); - /* Doesn't change in runtime */ - dev->hbuf_depth = PAYLOAD_SIZE / 4; - hw->aliveness = mei_txe_aliveness_get(dev); hw->readiness = mei_txe_readiness_get(dev); @@ -691,37 +689,34 @@ static void mei_txe_hw_config(struct mei_device *dev) hw->aliveness, hw->readiness); } - /** * mei_txe_write - writes a message to device. * * @dev: the device structure - * @header: header of message - * @buf: message buffer will be written + * @hdr: header of message + * @hdr_len: header length in bytes - must multiplication of a slot (4bytes) + * @data: payload + * @data_len: paylead length in bytes * - * Return: 0 if success, <0 - otherwise. + * Return: 0 if success, < 0 - otherwise. */ - static int mei_txe_write(struct mei_device *dev, - struct mei_msg_hdr *header, - const unsigned char *buf) + const void *hdr, size_t hdr_len, + const void *data, size_t data_len) { struct mei_txe_hw *hw = to_txe_hw(dev); unsigned long rem; - unsigned long length; - int slots = dev->hbuf_depth; - u32 *reg_buf = (u32 *)buf; + const u32 *reg_buf; + u32 slots = TXE_HBUF_DEPTH; u32 dw_cnt; - int i; + unsigned long i, j; - if (WARN_ON(!header || !buf)) + if (WARN_ON(!hdr || !data || hdr_len & 0x3)) return -EINVAL; - length = header->length; - - dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM(header)); + dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM((struct mei_msg_hdr *)hdr)); - dw_cnt = mei_data2slots(length); + dw_cnt = mei_data2slots(hdr_len + data_len); if (dw_cnt > slots) return -EMSGSIZE; @@ -739,17 +734,20 @@ static int mei_txe_write(struct mei_device *dev, return -EAGAIN; } - mei_txe_input_payload_write(dev, 0, *((u32 *)header)); + reg_buf = hdr; + for (i = 0; i < hdr_len / MEI_SLOT_SIZE; i++) + mei_txe_input_payload_write(dev, i, reg_buf[i]); - for (i = 0; i < length / 4; i++) - mei_txe_input_payload_write(dev, i + 1, reg_buf[i]); + reg_buf = data; + for (j = 0; j < data_len / MEI_SLOT_SIZE; j++) + mei_txe_input_payload_write(dev, i + j, reg_buf[j]); - rem = length & 0x3; + rem = data_len & 0x3; if (rem > 0) { u32 reg = 0; - memcpy(®, &buf[length - rem], rem); - mei_txe_input_payload_write(dev, i + 1, reg); + memcpy(®, (const u8 *)data + data_len - rem, rem); + mei_txe_input_payload_write(dev, i + j, reg); } /* after each write the whole buffer is consumed */ @@ -762,15 +760,15 @@ static int mei_txe_write(struct mei_device *dev, } /** - * mei_txe_hbuf_max_len - mimics the me hbuf circular buffer + * mei_txe_hbuf_depth - mimics the me hbuf circular buffer * * @dev: the device structure * - * Return: the PAYLOAD_SIZE - 4 + * Return: the TXE_HBUF_DEPTH */ -static size_t mei_txe_hbuf_max_len(const struct mei_device *dev) +static u32 mei_txe_hbuf_depth(const struct mei_device *dev) { - return PAYLOAD_SIZE - sizeof(struct mei_msg_hdr); + return TXE_HBUF_DEPTH; } /** @@ -778,7 +776,7 @@ static size_t mei_txe_hbuf_max_len(const struct mei_device *dev) * * @dev: the device structure * - * Return: always hbuf_depth + * Return: always TXE_HBUF_DEPTH */ static int mei_txe_hbuf_empty_slots(struct mei_device *dev) { @@ -797,7 +795,7 @@ static int mei_txe_hbuf_empty_slots(struct mei_device *dev) static int mei_txe_count_full_read_slots(struct mei_device *dev) { /* read buffers has static size */ - return PAYLOAD_SIZE / 4; + return TXE_HBUF_DEPTH; } /** @@ -839,7 +837,7 @@ static int mei_txe_read(struct mei_device *dev, dev_dbg(dev->dev, "buffer-length = %lu buf[0]0x%08X\n", len, mei_txe_out_data_read(dev, 0)); - for (i = 0; i < len / 4; i++) { + for (i = 0; i < len / MEI_SLOT_SIZE; i++) { /* skip header: index starts from 1 */ reg = mei_txe_out_data_read(dev, i + 1); dev_dbg(dev->dev, "buf[%d] = 0x%08X\n", i, reg); @@ -1140,7 +1138,7 @@ irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id) /* Input Ready: Detection if host can write to SeC */ if (test_and_clear_bit(TXE_INTR_IN_READY_BIT, &hw->intr_cause)) { dev->hbuf_is_ready = true; - hw->slots = dev->hbuf_depth; + hw->slots = TXE_HBUF_DEPTH; } if (hw->aliveness && dev->hbuf_is_ready) { @@ -1186,7 +1184,7 @@ static const struct mei_hw_ops mei_txe_hw_ops = { .hbuf_free_slots = mei_txe_hbuf_empty_slots, .hbuf_is_ready = mei_txe_is_input_ready, - .hbuf_max_len = mei_txe_hbuf_max_len, + .hbuf_depth = mei_txe_hbuf_depth, .write = mei_txe_write, diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h index 5c8286b40b62..65655925791a 100644 --- a/drivers/misc/mei/hw.h +++ b/drivers/misc/mei/hw.h @@ -28,8 +28,6 @@ #define MEI_CL_CONNECT_TIMEOUT 15 /* HPS: Client Connect Timeout */ #define MEI_CLIENTS_INIT_TIMEOUT 15 /* HPS: Clients Enumeration Timeout */ -#define MEI_IAMTHIF_STALL_TIMER 12 /* HPS */ - #define MEI_PGI_TIMEOUT 1 /* PG Isolation time response 1 sec */ #define MEI_D0I3_TIMEOUT 5 /* D0i3 set/unset max response time */ #define MEI_HBM_TIMEOUT 1 /* 1 second */ @@ -82,6 +80,12 @@ #define HBM_MINOR_VERSION_OS 0 #define HBM_MAJOR_VERSION_OS 2 +/* + * MEI version with dma ring support + */ +#define HBM_MINOR_VERSION_DR 1 +#define HBM_MAJOR_VERSION_DR 2 + /* Host bus message command opcode */ #define MEI_HBM_CMD_OP_MSK 0x7f /* Host bus message command RESPONSE */ @@ -124,6 +128,9 @@ #define MEI_HBM_NOTIFY_RES_CMD 0x90 #define MEI_HBM_NOTIFICATION_CMD 0x11 +#define MEI_HBM_DMA_SETUP_REQ_CMD 0x12 +#define MEI_HBM_DMA_SETUP_RES_CMD 0x92 + /* * MEI Stop Reason * used by hbm_host_stop_request.reason @@ -189,19 +196,27 @@ enum mei_cl_disconnect_status { MEI_CL_DISCONN_SUCCESS = MEI_HBMS_SUCCESS }; -/* - * MEI BUS Interface Section +/** + * struct mei_msg_hdr - MEI BUS Interface Section + * + * @me_addr: device address + * @host_addr: host address + * @length: message length + * @reserved: reserved + * @dma_ring: message is on dma ring + * @internal: message is internal + * @msg_complete: last packet of the message */ struct mei_msg_hdr { u32 me_addr:8; u32 host_addr:8; u32 length:9; - u32 reserved:5; + u32 reserved:4; + u32 dma_ring:1; u32 internal:1; u32 msg_complete:1; } __packed; - struct mei_bus_message { u8 hbm_cmd; u8 data[0]; @@ -451,4 +466,50 @@ struct hbm_notification { u8 reserved[1]; } __packed; +/** + * struct hbm_dma_mem_dscr - dma ring + * + * @addr_hi: the high 32bits of 64 bit address + * @addr_lo: the low 32bits of 64 bit address + * @size : size in bytes (must be power of 2) + */ +struct hbm_dma_mem_dscr { + u32 addr_hi; + u32 addr_lo; + u32 size; +} __packed; + +enum { + DMA_DSCR_HOST = 0, + DMA_DSCR_DEVICE = 1, + DMA_DSCR_CTRL = 2, + DMA_DSCR_NUM, +}; + +/** + * struct hbm_dma_setup_request - dma setup request + * + * @hbm_cmd: bus message command header + * @reserved: reserved for alignment + * @dma_dscr: dma descriptor for HOST, DEVICE, and CTRL + */ +struct hbm_dma_setup_request { + u8 hbm_cmd; + u8 reserved[3]; + struct hbm_dma_mem_dscr dma_dscr[DMA_DSCR_NUM]; +} __packed; + +/** + * struct hbm_dma_setup_response - dma setup response + * + * @hbm_cmd: bus message command header + * @status: 0 on success; otherwise DMA setup failed. + * @reserved: reserved for alignment + */ +struct hbm_dma_setup_response { + u8 hbm_cmd; + u8 status; + u8 reserved[2]; +} __packed; + #endif diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c index 6649f0d56d2f..5a661cbdf2ae 100644 --- a/drivers/misc/mei/interrupt.c +++ b/drivers/misc/mei/interrupt.c @@ -173,10 +173,12 @@ static int mei_cl_irq_disconnect_rsp(struct mei_cl *cl, struct mei_cl_cb *cb, int slots; int ret; + msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_response)); slots = mei_hbuf_empty_slots(dev); - msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_response)); + if (slots < 0) + return -EOVERFLOW; - if (slots < msg_slots) + if ((u32)slots < msg_slots) return -EMSGSIZE; ret = mei_hbm_cl_disconnect_rsp(dev, cl); @@ -206,10 +208,12 @@ static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb, if (!list_empty(&cl->rd_pending)) return 0; - msg_slots = mei_data2slots(sizeof(struct hbm_flow_control)); + msg_slots = mei_hbm2slots(sizeof(struct hbm_flow_control)); slots = mei_hbuf_empty_slots(dev); + if (slots < 0) + return -EOVERFLOW; - if (slots < msg_slots) + if ((u32)slots < msg_slots) return -EMSGSIZE; ret = mei_hbm_cl_flow_control_req(dev, cl); @@ -368,7 +372,10 @@ int mei_irq_write_handler(struct mei_device *dev, struct list_head *cmpl_list) return 0; slots = mei_hbuf_empty_slots(dev); - if (slots <= 0) + if (slots < 0) + return -EOVERFLOW; + + if (slots == 0) return -EMSGSIZE; /* complete all waiting for write CB */ diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c index 7465f17e1559..4d77a6ae183a 100644 --- a/drivers/misc/mei/main.c +++ b/drivers/misc/mei/main.c @@ -1,7 +1,7 @@ /* * * Intel Management Engine Interface (Intel MEI) Linux driver - * Copyright (c) 2003-2012, Intel Corporation. + * Copyright (c) 2003-2018, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -137,7 +137,7 @@ static ssize_t mei_read(struct file *file, char __user *ubuf, struct mei_device *dev; struct mei_cl_cb *cb = NULL; bool nonblock = !!(file->f_flags & O_NONBLOCK); - int rets; + ssize_t rets; if (WARN_ON(!cl || !cl->dev)) return -ENODEV; @@ -170,7 +170,7 @@ static ssize_t mei_read(struct file *file, char __user *ubuf, rets = mei_cl_read_start(cl, length, file); if (rets && rets != -EBUSY) { - cl_dbg(dev, cl, "mei start read failure status = %d\n", rets); + cl_dbg(dev, cl, "mei start read failure status = %zd\n", rets); goto out; } @@ -204,7 +204,7 @@ copy_buffer: /* now copy the data to user space */ if (cb->status) { rets = cb->status; - cl_dbg(dev, cl, "read operation failed %d\n", rets); + cl_dbg(dev, cl, "read operation failed %zd\n", rets); goto free; } @@ -236,7 +236,7 @@ free: *offset = 0; out: - cl_dbg(dev, cl, "end mei read rets = %d\n", rets); + cl_dbg(dev, cl, "end mei read rets = %zd\n", rets); mutex_unlock(&dev->device_lock); return rets; } @@ -256,7 +256,7 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf, struct mei_cl *cl = file->private_data; struct mei_cl_cb *cb; struct mei_device *dev; - int rets; + ssize_t rets; if (WARN_ON(!cl || !cl->dev)) return -ENODEV; @@ -312,7 +312,6 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf, } } - *offset = 0; cb = mei_cl_alloc_cb(cl, length, MEI_FOP_WRITE, file); if (!cb) { rets = -ENOMEM; @@ -812,11 +811,39 @@ static ssize_t tx_queue_limit_store(struct device *device, } static DEVICE_ATTR_RW(tx_queue_limit); +/** + * fw_ver_show - display ME FW version + * + * @device: device pointer + * @attr: attribute pointer + * @buf: char out buffer + * + * Return: number of the bytes printed into buf or error + */ +static ssize_t fw_ver_show(struct device *device, + struct device_attribute *attr, char *buf) +{ + struct mei_device *dev = dev_get_drvdata(device); + struct mei_fw_version *ver; + ssize_t cnt = 0; + int i; + + ver = dev->fw_ver; + + for (i = 0; i < MEI_MAX_FW_VER_BLOCKS; i++) + cnt += scnprintf(buf + cnt, PAGE_SIZE - cnt, "%u:%u.%u.%u.%u\n", + ver[i].platform, ver[i].major, ver[i].minor, + ver[i].hotfix, ver[i].buildno); + return cnt; +} +static DEVICE_ATTR_RO(fw_ver); + static struct attribute *mei_attrs[] = { &dev_attr_fw_status.attr, &dev_attr_hbm_ver.attr, &dev_attr_hbm_ver_drv.attr, &dev_attr_tx_queue_limit.attr, + &dev_attr_fw_ver.attr, NULL }; ATTRIBUTE_GROUPS(mei); diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h index be9c48415da9..377397e1b5a5 100644 --- a/drivers/misc/mei/mei_dev.h +++ b/drivers/misc/mei/mei_dev.h @@ -1,7 +1,7 @@ /* * * Intel Management Engine Interface (Intel MEI) Linux driver - * Copyright (c) 2003-2012, Intel Corporation. + * Copyright (c) 2003-2018, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -26,7 +26,8 @@ #include "hw.h" #include "hbm.h" -#define MEI_RD_MSG_BUF_SIZE (128 * sizeof(u32)) +#define MEI_SLOT_SIZE sizeof(u32) +#define MEI_RD_MSG_BUF_SIZE (128 * MEI_SLOT_SIZE) /* * Number of Maximum MEI Clients @@ -174,7 +175,6 @@ struct mei_cl; * @status: io status of the cb * @internal: communication between driver and FW flag * @blocking: transmission blocking mode - * @completed: the transfer or reception has completed */ struct mei_cl_cb { struct list_head list; @@ -186,7 +186,6 @@ struct mei_cl_cb { int status; u32 internal:1; u32 blocking:1; - u32 completed:1; }; /** @@ -269,7 +268,7 @@ struct mei_cl { * * @hbuf_free_slots : query for write buffer empty slots * @hbuf_is_ready : query if write buffer is empty - * @hbuf_max_len : query for write buffer max len + * @hbuf_depth : query for write buffer depth * * @write : write a message to FW * @@ -299,10 +298,10 @@ struct mei_hw_ops { int (*hbuf_free_slots)(struct mei_device *dev); bool (*hbuf_is_ready)(struct mei_device *dev); - size_t (*hbuf_max_len)(const struct mei_device *dev); + u32 (*hbuf_depth)(const struct mei_device *dev); int (*write)(struct mei_device *dev, - struct mei_msg_hdr *hdr, - const unsigned char *buf); + const void *hdr, size_t hdr_len, + const void *data, size_t data_len); int (*rdbuf_full_slots)(struct mei_device *dev); @@ -317,7 +316,7 @@ void mei_cl_bus_dev_fixup(struct mei_cl_device *dev); ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length, unsigned int mode); ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length, - unsigned int mode); + unsigned int mode, unsigned long timeout); bool mei_cl_bus_rx_event(struct mei_cl *cl); bool mei_cl_bus_notify_event(struct mei_cl *cl); void mei_cl_bus_remove_devices(struct mei_device *bus); @@ -355,6 +354,25 @@ enum mei_pg_state { const char *mei_pg_state_str(enum mei_pg_state state); /** + * struct mei_fw_version - MEI FW version struct + * + * @platform: platform identifier + * @major: major version field + * @minor: minor version field + * @buildno: build number version field + * @hotfix: hotfix number version field + */ +struct mei_fw_version { + u8 platform; + u8 major; + u16 minor; + u16 buildno; + u16 hotfix; +}; + +#define MEI_MAX_FW_VER_BLOCKS 3 + +/** * struct mei_device - MEI private device struct * * @dev : device on a bus @@ -390,7 +408,6 @@ const char *mei_pg_state_str(enum mei_pg_state state); * @rd_msg_buf : control messages buffer * @rd_msg_hdr : read message header storage * - * @hbuf_depth : depth of hardware host/write buffer is slots * @hbuf_is_ready : query if the host host/write buffer is ready * * @version : HBM protocol version in use @@ -401,6 +418,9 @@ const char *mei_pg_state_str(enum mei_pg_state state); * @hbm_f_fa_supported : hbm feature fixed address client * @hbm_f_ie_supported : hbm feature immediate reply to enum request * @hbm_f_os_supported : hbm feature support OS ver message + * @hbm_f_dr_supported : hbm feature dma ring supported + * + * @fw_ver : FW versions * * @me_clients_rwsem: rw lock over me_clients list * @me_clients : list of FW clients @@ -466,7 +486,6 @@ struct mei_device { u32 rd_msg_hdr; /* write buffer */ - u8 hbuf_depth; bool hbuf_is_ready; struct hbm_version version; @@ -477,6 +496,9 @@ struct mei_device { unsigned int hbm_f_fa_supported:1; unsigned int hbm_f_ie_supported:1; unsigned int hbm_f_os_supported:1; + unsigned int hbm_f_dr_supported:1; + + struct mei_fw_version fw_ver[MEI_MAX_FW_VER_BLOCKS]; struct rw_semaphore me_clients_rwsem; struct list_head me_clients; @@ -508,8 +530,7 @@ static inline unsigned long mei_secs_to_jiffies(unsigned long sec) } /** - * mei_data2slots - get slots - number of (dwords) from a message length - * + size of the mei header + * mei_data2slots - get slots number from a message length * * @length: size of the messages in bytes * @@ -517,7 +538,20 @@ static inline unsigned long mei_secs_to_jiffies(unsigned long sec) */ static inline u32 mei_data2slots(size_t length) { - return DIV_ROUND_UP(sizeof(struct mei_msg_hdr) + length, 4); + return DIV_ROUND_UP(length, MEI_SLOT_SIZE); +} + +/** + * mei_hbm2slots - get slots number from a hbm message length + * length + size of the mei message header + * + * @length: size of the messages in bytes + * + * Return: number of slots + */ +static inline u32 mei_hbm2slots(size_t length) +{ + return DIV_ROUND_UP(sizeof(struct mei_msg_hdr) + length, MEI_SLOT_SIZE); } /** @@ -529,7 +563,7 @@ static inline u32 mei_data2slots(size_t length) */ static inline u32 mei_slots2data(int slots) { - return slots * 4; + return slots * MEI_SLOT_SIZE; } /* @@ -630,15 +664,16 @@ static inline int mei_hbuf_empty_slots(struct mei_device *dev) return dev->ops->hbuf_free_slots(dev); } -static inline size_t mei_hbuf_max_len(const struct mei_device *dev) +static inline u32 mei_hbuf_depth(const struct mei_device *dev) { - return dev->ops->hbuf_max_len(dev); + return dev->ops->hbuf_depth(dev); } static inline int mei_write_message(struct mei_device *dev, - struct mei_msg_hdr *hdr, const void *buf) + const void *hdr, size_t hdr_len, + const void *data, size_t data_len) { - return dev->ops->write(dev, hdr, buf); + return dev->ops->write(dev, hdr, hdr_len, data, data_len); } static inline u32 mei_read_hdr(const struct mei_device *dev) @@ -681,10 +716,10 @@ static inline void mei_dbgfs_deregister(struct mei_device *dev) {} int mei_register(struct mei_device *dev, struct device *parent); void mei_deregister(struct mei_device *dev); -#define MEI_HDR_FMT "hdr:host=%02d me=%02d len=%d internal=%1d comp=%1d" +#define MEI_HDR_FMT "hdr:host=%02d me=%02d len=%d dma=%1d internal=%1d comp=%1d" #define MEI_HDR_PRM(hdr) \ (hdr)->host_addr, (hdr)->me_addr, \ - (hdr)->length, (hdr)->internal, (hdr)->msg_complete + (hdr)->length, (hdr)->dma_ring, (hdr)->internal, (hdr)->msg_complete ssize_t mei_fw_status2str(struct mei_fw_status *fw_sts, char *buf, size_t len); /** diff --git a/drivers/misc/mic/cosm/cosm_main.h b/drivers/misc/mic/cosm/cosm_main.h index f01156fca881..aa78cdf25e40 100644 --- a/drivers/misc/mic/cosm/cosm_main.h +++ b/drivers/misc/mic/cosm/cosm_main.h @@ -45,7 +45,10 @@ struct cosm_msg { u64 id; union { u64 shutdown_status; - struct timespec64 timespec; + struct { + u64 tv_sec; + u64 tv_nsec; + } timespec; }; }; diff --git a/drivers/misc/mic/cosm/cosm_scif_server.c b/drivers/misc/mic/cosm/cosm_scif_server.c index 05a63286741c..e94b7eac4a06 100644 --- a/drivers/misc/mic/cosm/cosm_scif_server.c +++ b/drivers/misc/mic/cosm/cosm_scif_server.c @@ -179,9 +179,13 @@ static void cosm_set_crashed(struct cosm_device *cdev) static void cosm_send_time(struct cosm_device *cdev) { struct cosm_msg msg = { .id = COSM_MSG_SYNC_TIME }; + struct timespec64 ts; int rc; - getnstimeofday64(&msg.timespec); + ktime_get_real_ts64(&ts); + msg.timespec.tv_sec = ts.tv_sec; + msg.timespec.tv_nsec = ts.tv_nsec; + rc = scif_send(cdev->epd, &msg, sizeof(msg), SCIF_SEND_BLOCK); if (rc < 0) dev_err(&cdev->dev, "%s %d scif_send failed rc %d\n", diff --git a/drivers/misc/mic/cosm_client/cosm_scif_client.c b/drivers/misc/mic/cosm_client/cosm_scif_client.c index beafc0da4027..225078cb51fd 100644 --- a/drivers/misc/mic/cosm_client/cosm_scif_client.c +++ b/drivers/misc/mic/cosm_client/cosm_scif_client.c @@ -63,7 +63,11 @@ static struct notifier_block cosm_reboot = { /* Set system time from timespec value received from the host */ static void cosm_set_time(struct cosm_msg *msg) { - int rc = do_settimeofday64(&msg->timespec); + struct timespec64 ts = { + .tv_sec = msg->timespec.tv_sec, + .tv_nsec = msg->timespec.tv_nsec, + }; + int rc = do_settimeofday64(&ts); if (rc) dev_err(&client_spdev->dev, "%s: %d settimeofday rc %d\n", diff --git a/drivers/misc/mic/scif/scif_api.c b/drivers/misc/mic/scif/scif_api.c index 7b2dddcdd46d..8dd0ccedeb94 100644 --- a/drivers/misc/mic/scif/scif_api.c +++ b/drivers/misc/mic/scif/scif_api.c @@ -187,6 +187,7 @@ int scif_close(scif_epd_t epd) case SCIFEP_ZOMBIE: dev_err(scif_info.mdev.this_device, "SCIFAPI close: zombie state unexpected\n"); + /* fall through */ case SCIFEP_DISCONNECTED: spin_unlock(&ep->lock); scif_unregister_all_windows(epd); @@ -370,11 +371,10 @@ int scif_bind(scif_epd_t epd, u16 pn) goto scif_bind_exit; } } else { - pn = scif_get_new_port(); - if (!pn) { - ret = -ENOSPC; + ret = scif_get_new_port(); + if (ret < 0) goto scif_bind_exit; - } + pn = ret; } ep->state = SCIFEP_BOUND; @@ -648,13 +648,12 @@ int __scif_connect(scif_epd_t epd, struct scif_port_id *dst, bool non_block) err = -EISCONN; break; case SCIFEP_UNBOUND: - ep->port.port = scif_get_new_port(); - if (!ep->port.port) { - err = -ENOSPC; - } else { - ep->port.node = scif_info.nodeid; - ep->conn_async_state = ASYNC_CONN_IDLE; - } + err = scif_get_new_port(); + if (err < 0) + break; + ep->port.port = err; + ep->port.node = scif_info.nodeid; + ep->conn_async_state = ASYNC_CONN_IDLE; /* Fall through */ case SCIFEP_BOUND: /* diff --git a/drivers/misc/sgi-xp/xpc_channel.c b/drivers/misc/sgi-xp/xpc_channel.c index 128d5615c804..05a890ce2ab8 100644 --- a/drivers/misc/sgi-xp/xpc_channel.c +++ b/drivers/misc/sgi-xp/xpc_channel.c @@ -656,7 +656,6 @@ xpc_initiate_connect(int ch_number) { short partid; struct xpc_partition *part; - struct xpc_channel *ch; DBUG_ON(ch_number < 0 || ch_number >= XPC_MAX_NCHANNELS); @@ -664,8 +663,6 @@ xpc_initiate_connect(int ch_number) part = &xpc_partitions[partid]; if (xpc_part_ref(part)) { - ch = &part->channels[ch_number]; - /* * Initiate the establishment of a connection on the * newly registered channel to the remote partition. diff --git a/drivers/misc/sgi-xp/xpc_partition.c b/drivers/misc/sgi-xp/xpc_partition.c index 7284413dabfd..0c3ef6f1df54 100644 --- a/drivers/misc/sgi-xp/xpc_partition.c +++ b/drivers/misc/sgi-xp/xpc_partition.c @@ -415,7 +415,6 @@ xpc_discovery(void) int region_size; int max_regions; int nasid; - struct xpc_rsvd_page *rp; unsigned long *discovered_nasids; enum xp_retval ret; @@ -432,8 +431,6 @@ xpc_discovery(void) return; } - rp = (struct xpc_rsvd_page *)xpc_rsvd_page; - /* * The term 'region' in this context refers to the minimum number of * nodes that can comprise an access protection grouping. The access @@ -449,8 +446,10 @@ xpc_discovery(void) switch (region_size) { case 128: max_regions *= 2; + /* fall through */ case 64: max_regions *= 2; + /* fall through */ case 32: max_regions *= 2; region_size = 16; diff --git a/drivers/misc/sram.c b/drivers/misc/sram.c index c5dc6095686a..74b183baf044 100644 --- a/drivers/misc/sram.c +++ b/drivers/misc/sram.c @@ -391,29 +391,37 @@ static int sram_probe(struct platform_device *pdev) if (IS_ERR(sram->pool)) return PTR_ERR(sram->pool); - ret = sram_reserve_regions(sram, res); - if (ret) - return ret; - sram->clk = devm_clk_get(sram->dev, NULL); if (IS_ERR(sram->clk)) sram->clk = NULL; else clk_prepare_enable(sram->clk); + ret = sram_reserve_regions(sram, res); + if (ret) + goto err_disable_clk; + platform_set_drvdata(pdev, sram); init_func = of_device_get_match_data(&pdev->dev); if (init_func) { ret = init_func(); if (ret) - return ret; + goto err_free_partitions; } dev_dbg(sram->dev, "SRAM pool: %zu KiB @ 0x%p\n", gen_pool_size(sram->pool) / 1024, sram->virt_base); return 0; + +err_free_partitions: + sram_free_partitions(sram); +err_disable_clk: + if (sram->clk) + clk_disable_unprepare(sram->clk); + + return ret; } static int sram_remove(struct platform_device *pdev) diff --git a/drivers/misc/ti-st/Kconfig b/drivers/misc/ti-st/Kconfig index f34dcc514730..5bb92698bc80 100644 --- a/drivers/misc/ti-st/Kconfig +++ b/drivers/misc/ti-st/Kconfig @@ -5,7 +5,8 @@ menu "Texas Instruments shared transport line discipline" config TI_ST tristate "Shared transport core driver" - depends on NET && GPIOLIB && TTY + depends on NET && TTY + depends on GPIOLIB || COMPILE_TEST select FW_LOADER help This enables the shared transport core driver for TI diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c index 5ec3f5a43718..1874ac922166 100644 --- a/drivers/misc/ti-st/st_kim.c +++ b/drivers/misc/ti-st/st_kim.c @@ -138,7 +138,7 @@ static void kim_int_recv(struct kim_data_s *kim_gdata, const unsigned char *data, long count) { const unsigned char *ptr; - int len = 0, type = 0; + int len = 0; unsigned char *plen; pr_debug("%s", __func__); @@ -183,7 +183,6 @@ static void kim_int_recv(struct kim_data_s *kim_gdata, case 0x04: kim_gdata->rx_state = ST_W4_HEADER; kim_gdata->rx_count = 2; - type = *ptr; break; default: pr_info("unknown packet"); @@ -756,14 +755,14 @@ static int kim_probe(struct platform_device *pdev) err = gpio_request(kim_gdata->nshutdown, "kim"); if (unlikely(err)) { pr_err(" gpio %d request failed ", kim_gdata->nshutdown); - return err; + goto err_sysfs_group; } /* Configure nShutdown GPIO as output=0 */ err = gpio_direction_output(kim_gdata->nshutdown, 0); if (unlikely(err)) { pr_err(" unable to configure gpio %d", kim_gdata->nshutdown); - return err; + goto err_sysfs_group; } /* get reference of pdev for request_firmware */ diff --git a/drivers/misc/tsl2550.c b/drivers/misc/tsl2550.c index adf46072cb37..3fce3b6a3624 100644 --- a/drivers/misc/tsl2550.c +++ b/drivers/misc/tsl2550.c @@ -177,7 +177,7 @@ static int tsl2550_calculate_lux(u8 ch0, u8 ch1) } else lux = 0; else - return -EAGAIN; + return 0; /* LUX range check */ return lux > TSL2550_MAX_LUX ? TSL2550_MAX_LUX : lux; diff --git a/drivers/misc/vexpress-syscfg.c b/drivers/misc/vexpress-syscfg.c index 80a6f199077c..6c3591cdf855 100644 --- a/drivers/misc/vexpress-syscfg.c +++ b/drivers/misc/vexpress-syscfg.c @@ -258,13 +258,9 @@ static int vexpress_syscfg_probe(struct platform_device *pdev) INIT_LIST_HEAD(&syscfg->funcs); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!devm_request_mem_region(&pdev->dev, res->start, - resource_size(res), pdev->name)) - return -EBUSY; - - syscfg->base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); - if (!syscfg->base) - return -EFAULT; + syscfg->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(syscfg->base)) + return PTR_ERR(syscfg->base); /* Must use dev.parent (MFD), as that's where DT phandle points at... */ bridge = vexpress_config_bridge_register(pdev->dev.parent, diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c index 56c6f79a5c5a..2543ef1ece17 100644 --- a/drivers/misc/vmw_balloon.c +++ b/drivers/misc/vmw_balloon.c @@ -1,27 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0 /* * VMware Balloon driver. * - * Copyright (C) 2000-2014, VMware, Inc. All Rights Reserved. + * Copyright (C) 2000-2018, VMware, Inc. All Rights Reserved. * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; version 2 of the License and no later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or - * NON INFRINGEMENT. See the GNU General Public License for more - * details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Maintained by: Xavier Deguillard <xdeguillard@vmware.com> - * Philip Moltmann <moltmann@vmware.com> - */ - -/* * This is VMware physical memory management driver for Linux. The driver * acts like a "balloon" that can be inflated to reclaim physical pages by * reserving them in the guest and invalidating them in the monitor, @@ -55,25 +37,6 @@ MODULE_ALIAS("vmware_vmmemctl"); MODULE_LICENSE("GPL"); /* - * Various constants controlling rate of inflaint/deflating balloon, - * measured in pages. - */ - -/* - * Rates of memory allocaton when guest experiences memory pressure - * (driver performs sleeping allocations). - */ -#define VMW_BALLOON_RATE_ALLOC_MIN 512U -#define VMW_BALLOON_RATE_ALLOC_MAX 2048U -#define VMW_BALLOON_RATE_ALLOC_INC 16U - -/* - * When guest is under memory pressure, use a reduced page allocation - * rate for next several cycles. - */ -#define VMW_BALLOON_SLOW_CYCLES 4 - -/* * Use __GFP_HIGHMEM to allow pages from HIGHMEM zone. We don't * allow wait (__GFP_RECLAIM) for NOSLEEP page allocations. Use * __GFP_NOWARN, to suppress page allocation failure warnings. @@ -284,12 +247,6 @@ struct vmballoon { /* reset flag */ bool reset_required; - /* adjustment rates (pages per second) */ - unsigned int rate_alloc; - - /* slowdown page allocations for next few cycles */ - unsigned int slow_allocation_cycles; - unsigned long capabilities; struct vmballoon_batch_page *batch_page; @@ -341,7 +298,13 @@ static bool vmballoon_send_start(struct vmballoon *b, unsigned long req_caps) success = false; } - if (b->capabilities & VMW_BALLOON_BATCHED_2M_CMDS) + /* + * 2MB pages are only supported with batching. If batching is for some + * reason disabled, do not use 2MB pages, since otherwise the legacy + * mechanism is used with 2MB pages, causing a failure. + */ + if ((b->capabilities & VMW_BALLOON_BATCHED_2M_CMDS) && + (b->capabilities & VMW_BALLOON_BATCHED_CMDS)) b->supported_page_sizes = 2; else b->supported_page_sizes = 1; @@ -450,7 +413,7 @@ static int vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn, pfn32 = (u32)pfn; if (pfn32 != pfn) - return -1; + return -EINVAL; STATS_INC(b->stats.lock[false]); @@ -460,7 +423,7 @@ static int vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn, pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status); STATS_INC(b->stats.lock_fail[false]); - return 1; + return -EIO; } static int vmballoon_send_batched_lock(struct vmballoon *b, @@ -597,11 +560,12 @@ static int vmballoon_lock_page(struct vmballoon *b, unsigned int num_pages, locked = vmballoon_send_lock_page(b, page_to_pfn(page), &hv_status, target); - if (locked > 0) { + if (locked) { STATS_INC(b->stats.refused_alloc[false]); - if (hv_status == VMW_BALLOON_ERROR_RESET || - hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED) { + if (locked == -EIO && + (hv_status == VMW_BALLOON_ERROR_RESET || + hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED)) { vmballoon_free_page(page, false); return -EIO; } @@ -617,7 +581,7 @@ static int vmballoon_lock_page(struct vmballoon *b, unsigned int num_pages, } else { vmballoon_free_page(page, false); } - return -EIO; + return locked; } /* track allocated page */ @@ -790,8 +754,6 @@ static void vmballoon_add_batched_page(struct vmballoon *b, int idx, */ static void vmballoon_inflate(struct vmballoon *b) { - unsigned rate; - unsigned int allocations = 0; unsigned int num_pages = 0; int error = 0; gfp_t flags = VMW_PAGE_ALLOC_NOSLEEP; @@ -818,17 +780,9 @@ static void vmballoon_inflate(struct vmballoon *b) * Start with no sleep allocation rate which may be higher * than sleeping allocation rate. */ - if (b->slow_allocation_cycles) { - rate = b->rate_alloc; - is_2m_pages = false; - } else { - rate = UINT_MAX; - is_2m_pages = - b->supported_page_sizes == VMW_BALLOON_NUM_PAGE_SIZES; - } + is_2m_pages = b->supported_page_sizes == VMW_BALLOON_NUM_PAGE_SIZES; - pr_debug("%s - goal: %d, no-sleep rate: %u, sleep rate: %d\n", - __func__, b->target - b->size, rate, b->rate_alloc); + pr_debug("%s - goal: %d", __func__, b->target - b->size); while (!b->reset_required && b->size + num_pages * vmballoon_page_size(is_2m_pages) @@ -861,31 +815,24 @@ static void vmballoon_inflate(struct vmballoon *b) if (flags == VMW_PAGE_ALLOC_CANSLEEP) { /* * CANSLEEP page allocation failed, so guest - * is under severe memory pressure. Quickly - * decrease allocation rate. + * is under severe memory pressure. We just log + * the event, but do not stop the inflation + * due to its negative impact on performance. */ - b->rate_alloc = max(b->rate_alloc / 2, - VMW_BALLOON_RATE_ALLOC_MIN); STATS_INC(b->stats.sleep_alloc_fail); break; } /* * NOSLEEP page allocation failed, so the guest is - * under memory pressure. Let us slow down page - * allocations for next few cycles so that the guest - * gets out of memory pressure. Also, if we already - * allocated b->rate_alloc pages, let's pause, - * otherwise switch to sleeping allocations. + * under memory pressure. Slowing down page alloctions + * seems to be reasonable, but doing so might actually + * cause the hypervisor to throttle us down, resulting + * in degraded performance. We will count on the + * scheduler and standard memory management mechanisms + * for now. */ - b->slow_allocation_cycles = VMW_BALLOON_SLOW_CYCLES; - - if (allocations >= b->rate_alloc) - break; - flags = VMW_PAGE_ALLOC_CANSLEEP; - /* Lower rate for sleeping allocations. */ - rate = b->rate_alloc; continue; } @@ -899,28 +846,11 @@ static void vmballoon_inflate(struct vmballoon *b) } cond_resched(); - - if (allocations >= rate) { - /* We allocated enough pages, let's take a break. */ - break; - } } if (num_pages > 0) b->ops->lock(b, num_pages, is_2m_pages, &b->target); - /* - * We reached our goal without failures so try increasing - * allocation rate. - */ - if (error == 0 && allocations >= b->rate_alloc) { - unsigned int mult = allocations / b->rate_alloc; - - b->rate_alloc = - min(b->rate_alloc + mult * VMW_BALLOON_RATE_ALLOC_INC, - VMW_BALLOON_RATE_ALLOC_MAX); - } - vmballoon_release_refused_pages(b, true); vmballoon_release_refused_pages(b, false); } @@ -1029,29 +959,30 @@ static void vmballoon_vmci_cleanup(struct vmballoon *b) */ static int vmballoon_vmci_init(struct vmballoon *b) { - int error = 0; + unsigned long error, dummy; - if ((b->capabilities & VMW_BALLOON_SIGNALLED_WAKEUP_CMD) != 0) { - error = vmci_doorbell_create(&b->vmci_doorbell, - VMCI_FLAG_DELAYED_CB, - VMCI_PRIVILEGE_FLAG_RESTRICTED, - vmballoon_doorbell, b); - - if (error == VMCI_SUCCESS) { - VMWARE_BALLOON_CMD(VMCI_DOORBELL_SET, - b->vmci_doorbell.context, - b->vmci_doorbell.resource, error); - STATS_INC(b->stats.doorbell_set); - } - } + if ((b->capabilities & VMW_BALLOON_SIGNALLED_WAKEUP_CMD) == 0) + return 0; - if (error != 0) { - vmballoon_vmci_cleanup(b); + error = vmci_doorbell_create(&b->vmci_doorbell, VMCI_FLAG_DELAYED_CB, + VMCI_PRIVILEGE_FLAG_RESTRICTED, + vmballoon_doorbell, b); - return -EIO; - } + if (error != VMCI_SUCCESS) + goto fail; + + error = VMWARE_BALLOON_CMD(VMCI_DOORBELL_SET, b->vmci_doorbell.context, + b->vmci_doorbell.resource, dummy); + + STATS_INC(b->stats.doorbell_set); + + if (error != VMW_BALLOON_SUCCESS) + goto fail; return 0; +fail: + vmballoon_vmci_cleanup(b); + return -EIO; } /* @@ -1114,9 +1045,6 @@ static void vmballoon_work(struct work_struct *work) if (b->reset_required) vmballoon_reset(b); - if (b->slow_allocation_cycles > 0) - b->slow_allocation_cycles--; - if (!b->reset_required && vmballoon_send_get_target(b, &target)) { /* update target, adjust size */ b->target = target; @@ -1160,11 +1088,6 @@ static int vmballoon_debug_show(struct seq_file *f, void *offset) "current: %8d pages\n", b->target, b->size); - /* format rate info */ - seq_printf(f, - "rateSleepAlloc: %8d pages/sec\n", - b->rate_alloc); - seq_printf(f, "\n" "timer: %8u\n" @@ -1271,9 +1194,6 @@ static int __init vmballoon_init(void) INIT_LIST_HEAD(&balloon.page_sizes[is_2m_pages].refused_pages); } - /* initialize rates */ - balloon.rate_alloc = VMW_BALLOON_RATE_ALLOC_MAX; - INIT_DELAYED_WORK(&balloon.dwork, vmballoon_work); error = vmballoon_debugfs_init(&balloon); @@ -1289,7 +1209,14 @@ static int __init vmballoon_init(void) return 0; } -module_init(vmballoon_init); + +/* + * Using late_initcall() instead of module_init() allows the balloon to use the + * VMCI doorbell even when the balloon is built into the kernel. Otherwise the + * VMCI is probed only after the balloon is initialized. If the balloon is used + * as a module, late_initcall() is equivalent to module_init(). + */ +late_initcall(vmballoon_init); static void __exit vmballoon_exit(void) { diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c index b4d7774cfe07..bd52f29b4a4e 100644 --- a/drivers/misc/vmw_vmci/vmci_queue_pair.c +++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c @@ -668,7 +668,7 @@ static int qp_host_get_user_memory(u64 produce_uva, retval = get_user_pages_fast((uintptr_t) produce_uva, produce_q->kernel_if->num_pages, 1, produce_q->kernel_if->u.h.header_page); - if (retval < produce_q->kernel_if->num_pages) { + if (retval < (int)produce_q->kernel_if->num_pages) { pr_debug("get_user_pages_fast(produce) failed (retval=%d)", retval); qp_release_pages(produce_q->kernel_if->u.h.header_page, @@ -680,7 +680,7 @@ static int qp_host_get_user_memory(u64 produce_uva, retval = get_user_pages_fast((uintptr_t) consume_uva, consume_q->kernel_if->num_pages, 1, consume_q->kernel_if->u.h.header_page); - if (retval < consume_q->kernel_if->num_pages) { + if (retval < (int)consume_q->kernel_if->num_pages) { pr_debug("get_user_pages_fast(consume) failed (retval=%d)", retval); qp_release_pages(consume_q->kernel_if->u.h.header_page, @@ -2214,7 +2214,6 @@ int vmci_qp_broker_map(struct vmci_handle handle, { struct qp_broker_entry *entry; const u32 context_id = vmci_ctx_get_id(context); - bool is_local = false; int result; if (vmci_handle_is_invalid(handle) || !context || @@ -2243,7 +2242,6 @@ int vmci_qp_broker_map(struct vmci_handle handle, goto out; } - is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL; result = VMCI_SUCCESS; if (context_id != VMCI_HOST_CONTEXT_ID) { @@ -2325,7 +2323,6 @@ int vmci_qp_broker_unmap(struct vmci_handle handle, { struct qp_broker_entry *entry; const u32 context_id = vmci_ctx_get_id(context); - bool is_local = false; int result; if (vmci_handle_is_invalid(handle) || !context || @@ -2354,8 +2351,6 @@ int vmci_qp_broker_unmap(struct vmci_handle handle, goto out; } - is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL; - if (context_id != VMCI_HOST_CONTEXT_ID) { qp_acquire_queue_mutex(entry->produce_q); result = qp_save_headers(entry); |