summaryrefslogtreecommitdiff
path: root/drivers/misc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/misc')
-rw-r--r--drivers/misc/Kconfig4
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/c2port/core.c29
-rw-r--r--drivers/misc/cxl/Kconfig6
-rw-r--r--drivers/misc/cxl/of.c2
-rw-r--r--drivers/misc/cxl/pci.c2
-rw-r--r--drivers/misc/cxl/sysfs.c8
-rw-r--r--drivers/misc/ds1682.c8
-rw-r--r--drivers/misc/eeprom/idt_89hpesx.c6
-rw-r--r--drivers/misc/eeprom/max6875.c4
-rw-r--r--drivers/misc/fastrpc.c58
-rw-r--r--drivers/misc/keba/cp500.c69
-rw-r--r--drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c3
-rw-r--r--drivers/misc/misc_minor_kunit.c69
-rw-r--r--drivers/misc/ntsync.c1001
-rw-r--r--drivers/misc/ocxl/sysfs.c4
-rw-r--r--drivers/misc/pch_phub.c8
-rw-r--r--drivers/misc/pci_endpoint_test.c375
-rw-r--r--drivers/misc/sgi-xp/xpc_main.c4
-rw-r--r--drivers/misc/sram.c8
20 files changed, 1428 insertions, 241 deletions
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 09cbe3f0ab1e..56bc72c7ce4a 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -517,7 +517,6 @@ config OPEN_DICE
config NTSYNC
tristate "NT synchronization primitive emulation"
- depends on BROKEN
help
This module provides kernel support for emulation of Windows NT
synchronization primitives. It is not a hardware driver.
@@ -613,8 +612,7 @@ config MARVELL_CN10K_DPI
config MCHP_LAN966X_PCI
tristate "Microchip LAN966x PCIe Support"
depends on PCI
- select OF
- select OF_OVERLAY
+ depends on OF_OVERLAY
select IRQ_DOMAIN
help
This enables the support for the LAN966x PCIe device.
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 40bf953185c7..545aad06d088 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -22,6 +22,7 @@ obj-$(CONFIG_SENSORS_BH1770) += bh1770glc.o
obj-$(CONFIG_SENSORS_APDS990X) += apds990x.o
obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o
obj-$(CONFIG_KGDB_TESTS) += kgdbts.o
+obj-$(CONFIG_TEST_MISC_MINOR) += misc_minor_kunit.o
obj-$(CONFIG_SGI_XP) += sgi-xp/
obj-$(CONFIG_SGI_GRU) += sgi-gru/
obj-$(CONFIG_SMPRO_ERRMON) += smpro-errmon.o
diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
index 2bb1dd2511f9..fc64474b8241 100644
--- a/drivers/misc/c2port/core.c
+++ b/drivers/misc/c2port/core.c
@@ -714,7 +714,7 @@ static ssize_t __c2port_read_flash_data(struct c2port_device *dev,
}
static ssize_t c2port_read_flash_data(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buffer, loff_t offset, size_t count)
{
struct c2port_device *c2dev = dev_get_drvdata(kobj_to_dev(kobj));
@@ -829,7 +829,7 @@ static ssize_t __c2port_write_flash_data(struct c2port_device *dev,
}
static ssize_t c2port_write_flash_data(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buffer, loff_t offset, size_t count)
{
struct c2port_device *c2dev = dev_get_drvdata(kobj_to_dev(kobj));
@@ -849,8 +849,8 @@ static ssize_t c2port_write_flash_data(struct file *filp, struct kobject *kobj,
return ret;
}
/* size is computed at run-time */
-static BIN_ATTR(flash_data, 0644, c2port_read_flash_data,
- c2port_write_flash_data, 0);
+static const BIN_ATTR(flash_data, 0644, c2port_read_flash_data,
+ c2port_write_flash_data, 0);
/*
* Class attributes
@@ -869,14 +869,27 @@ static struct attribute *c2port_attrs[] = {
NULL,
};
-static struct bin_attribute *c2port_bin_attrs[] = {
+static const struct bin_attribute *const c2port_bin_attrs[] = {
&bin_attr_flash_data,
NULL,
};
+static size_t c2port_bin_attr_size(struct kobject *kobj,
+ const struct bin_attribute *attr,
+ int i)
+{
+ struct c2port_device *c2dev = dev_get_drvdata(kobj_to_dev(kobj));
+
+ if (attr == &bin_attr_flash_data)
+ return c2dev->ops->blocks_num * c2dev->ops->block_size;
+
+ return attr->size;
+}
+
static const struct attribute_group c2port_group = {
.attrs = c2port_attrs,
- .bin_attrs = c2port_bin_attrs,
+ .bin_attrs_new = c2port_bin_attrs,
+ .bin_size = c2port_bin_attr_size,
};
static const struct attribute_group *c2port_groups[] = {
@@ -912,8 +925,7 @@ struct c2port_device *c2port_device_register(char *name,
if (ret < 0)
goto error_idr_alloc;
c2dev->id = ret;
-
- bin_attr_flash_data.size = ops->blocks_num * ops->block_size;
+ c2dev->ops = ops;
c2dev->dev = device_create(c2port_class, NULL, 0, c2dev,
"c2port%d", c2dev->id);
@@ -924,7 +936,6 @@ struct c2port_device *c2port_device_register(char *name,
dev_set_drvdata(c2dev->dev, c2dev);
strscpy(c2dev->name, name, sizeof(c2dev->name));
- c2dev->ops = ops;
mutex_init(&c2dev->mutex);
/* By default C2 port access is off */
diff --git a/drivers/misc/cxl/Kconfig b/drivers/misc/cxl/Kconfig
index 5efc4151bf58..15307f5e4307 100644
--- a/drivers/misc/cxl/Kconfig
+++ b/drivers/misc/cxl/Kconfig
@@ -9,11 +9,13 @@ config CXL_BASE
select PPC_64S_HASH_MMU
config CXL
- tristate "Support for IBM Coherent Accelerators (CXL)"
+ tristate "Support for IBM Coherent Accelerators (CXL) (DEPRECATED)"
depends on PPC_POWERNV && PCI_MSI && EEH
select CXL_BASE
- default m
help
+ The cxl driver is deprecated and will be removed in a future
+ kernel release.
+
Select this option to enable driver support for IBM Coherent
Accelerators (CXL). CXL is otherwise known as Coherent Accelerator
Processor Interface (CAPI). CAPI allows accelerators in FPGAs to be
diff --git a/drivers/misc/cxl/of.c b/drivers/misc/cxl/of.c
index cf6bd8a43056..e26ee85279fa 100644
--- a/drivers/misc/cxl/of.c
+++ b/drivers/misc/cxl/of.c
@@ -295,6 +295,8 @@ int cxl_of_probe(struct platform_device *pdev)
int ret;
int slice = 0, slice_ok = 0;
+ dev_err_once(&pdev->dev, "DEPRECATION: cxl is deprecated and will be removed in a future kernel release\n");
+
pr_devel("in %s\n", __func__);
np = pdev->dev.of_node;
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index 3d52f9b92d0d..92bf7c5c7b35 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -1726,6 +1726,8 @@ static int cxl_probe(struct pci_dev *dev, const struct pci_device_id *id)
int slice;
int rc;
+ dev_err_once(&dev->dev, "DEPRECATED: cxl is deprecated and will be removed in a future kernel release\n");
+
if (cxl_pci_is_vphb_device(dev)) {
dev_dbg(&dev->dev, "cxl_init_adapter: Ignoring cxl vphb device\n");
return -ENODEV;
diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c
index 409bd1c39663..b1fc6446bd4b 100644
--- a/drivers/misc/cxl/sysfs.c
+++ b/drivers/misc/cxl/sysfs.c
@@ -444,7 +444,7 @@ static ssize_t api_version_compatible_show(struct device *device,
}
static ssize_t afu_eb_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct cxl_afu *afu = to_cxl_afu(kobj_to_dev(kobj));
@@ -538,7 +538,7 @@ static ssize_t class_show(struct kobject *kobj,
}
static ssize_t afu_read_config(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct afu_config_record *cr = to_cr(kobj);
@@ -620,7 +620,7 @@ static struct afu_config_record *cxl_sysfs_afu_new_cr(struct cxl_afu *afu, int c
cr->config_attr.attr.name = "config";
cr->config_attr.attr.mode = S_IRUSR;
cr->config_attr.size = afu->crs_len;
- cr->config_attr.read = afu_read_config;
+ cr->config_attr.read_new = afu_read_config;
rc = kobject_init_and_add(&cr->kobj, &afu_config_record_type,
&afu->dev.kobj, "cr%i", cr->cr);
@@ -693,7 +693,7 @@ int cxl_sysfs_afu_add(struct cxl_afu *afu)
afu->attr_eb.attr.name = "afu_err_buff";
afu->attr_eb.attr.mode = S_IRUGO;
afu->attr_eb.size = afu->eb_len;
- afu->attr_eb.read = afu_eb_read;
+ afu->attr_eb.read_new = afu_eb_read;
rc = device_create_bin_file(&afu->dev, &afu->attr_eb);
if (rc) {
diff --git a/drivers/misc/ds1682.c b/drivers/misc/ds1682.c
index 4175df7ef011..5d5a70a62e98 100644
--- a/drivers/misc/ds1682.c
+++ b/drivers/misc/ds1682.c
@@ -154,7 +154,7 @@ static const struct attribute_group ds1682_group = {
* User data attribute
*/
static ssize_t ds1682_eeprom_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct i2c_client *client = kobj_to_i2c_client(kobj);
@@ -172,7 +172,7 @@ static ssize_t ds1682_eeprom_read(struct file *filp, struct kobject *kobj,
}
static ssize_t ds1682_eeprom_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct i2c_client *client = kobj_to_i2c_client(kobj);
@@ -194,8 +194,8 @@ static const struct bin_attribute ds1682_eeprom_attr = {
.mode = S_IRUGO | S_IWUSR,
},
.size = DS1682_EEPROM_SIZE,
- .read = ds1682_eeprom_read,
- .write = ds1682_eeprom_write,
+ .read_new = ds1682_eeprom_read,
+ .write_new = ds1682_eeprom_write,
};
static int ds1682_nvmem_read(void *priv, unsigned int offset, void *val,
diff --git a/drivers/misc/eeprom/idt_89hpesx.c b/drivers/misc/eeprom/idt_89hpesx.c
index 43421fe37d33..1fc632ebf22f 100644
--- a/drivers/misc/eeprom/idt_89hpesx.c
+++ b/drivers/misc/eeprom/idt_89hpesx.c
@@ -847,7 +847,7 @@ err_mutex_unlock:
* @count: Number of bytes to write
*/
static ssize_t eeprom_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct idt_89hpesx_dev *pdev;
@@ -871,7 +871,7 @@ static ssize_t eeprom_write(struct file *filp, struct kobject *kobj,
* @count: Number of bytes to write
*/
static ssize_t eeprom_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct idt_89hpesx_dev *pdev;
@@ -1017,7 +1017,7 @@ static ssize_t idt_dbgfs_csr_read(struct file *filep, char __user *ubuf,
* NOTE Size will be changed in compliance with OF node. EEPROM attribute will
* be read-only as well if the corresponding flag is specified in OF node.
*/
-static BIN_ATTR_RW(eeprom, EEPROM_DEF_SIZE);
+static const BIN_ATTR_RW(eeprom, EEPROM_DEF_SIZE);
/*
* csr_dbgfs_ops - CSR debugfs-node read/write operations
diff --git a/drivers/misc/eeprom/max6875.c b/drivers/misc/eeprom/max6875.c
index 6fab2ffa736b..1c36ad153e78 100644
--- a/drivers/misc/eeprom/max6875.c
+++ b/drivers/misc/eeprom/max6875.c
@@ -104,7 +104,7 @@ exit_up:
}
static ssize_t max6875_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct i2c_client *client = kobj_to_i2c_client(kobj);
@@ -127,7 +127,7 @@ static const struct bin_attribute user_eeprom_attr = {
.mode = S_IRUGO,
},
.size = USER_EEPROM_SIZE,
- .read = max6875_read,
+ .read_new = max6875_read,
};
static int max6875_probe(struct i2c_client *client)
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index d1188cd12ec6..7b7a22c91fe4 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -139,14 +139,14 @@ struct fastrpc_mmap_rsp_msg {
};
struct fastrpc_mmap_req_msg {
- s32 pgid;
+ s32 client_id;
u32 flags;
u64 vaddr;
s32 num;
};
struct fastrpc_mem_map_req_msg {
- s32 pgid;
+ s32 client_id;
s32 fd;
s32 offset;
u32 flags;
@@ -156,20 +156,20 @@ struct fastrpc_mem_map_req_msg {
};
struct fastrpc_munmap_req_msg {
- s32 pgid;
+ s32 client_id;
u64 vaddr;
u64 size;
};
struct fastrpc_mem_unmap_req_msg {
- s32 pgid;
+ s32 client_id;
s32 fd;
u64 vaddrin;
u64 len;
};
struct fastrpc_msg {
- int pid; /* process group id */
+ int client_id; /* process client id */
int tid; /* thread id */
u64 ctx; /* invoke caller context */
u32 handle; /* handle to invoke */
@@ -234,7 +234,7 @@ struct fastrpc_invoke_ctx {
int nbufs;
int retval;
int pid;
- int tgid;
+ int client_id;
u32 sc;
u32 *crc;
u64 ctxid;
@@ -299,7 +299,7 @@ struct fastrpc_user {
struct fastrpc_session_ctx *sctx;
struct fastrpc_buf *init_mem;
- int tgid;
+ int client_id;
int pd;
bool is_secure_dev;
/* Lock for lists */
@@ -614,7 +614,7 @@ static struct fastrpc_invoke_ctx *fastrpc_context_alloc(
ctx->sc = sc;
ctx->retval = -1;
ctx->pid = current->pid;
- ctx->tgid = user->tgid;
+ ctx->client_id = user->client_id;
ctx->cctx = cctx;
init_completion(&ctx->work);
INIT_WORK(&ctx->put_work, fastrpc_context_put_wq);
@@ -1115,11 +1115,11 @@ static int fastrpc_invoke_send(struct fastrpc_session_ctx *sctx,
int ret;
cctx = fl->cctx;
- msg->pid = fl->tgid;
+ msg->client_id = fl->client_id;
msg->tid = current->pid;
if (kernel)
- msg->pid = 0;
+ msg->client_id = 0;
msg->ctx = ctx->ctxid | fl->pd;
msg->handle = handle;
@@ -1244,7 +1244,7 @@ static int fastrpc_init_create_static_process(struct fastrpc_user *fl,
int err;
bool scm_done = false;
struct {
- int pgid;
+ int client_id;
u32 namelen;
u32 pageslen;
} inbuf;
@@ -1293,7 +1293,7 @@ static int fastrpc_init_create_static_process(struct fastrpc_user *fl,
}
}
- inbuf.pgid = fl->tgid;
+ inbuf.client_id = fl->client_id;
inbuf.namelen = init.namelen;
inbuf.pageslen = 0;
fl->pd = USER_PD;
@@ -1363,7 +1363,7 @@ static int fastrpc_init_create_process(struct fastrpc_user *fl,
int memlen;
int err;
struct {
- int pgid;
+ int client_id;
u32 namelen;
u32 filelen;
u32 pageslen;
@@ -1395,7 +1395,7 @@ static int fastrpc_init_create_process(struct fastrpc_user *fl,
goto err;
}
- inbuf.pgid = fl->tgid;
+ inbuf.client_id = fl->client_id;
inbuf.namelen = strlen(current->comm) + 1;
inbuf.filelen = init.filelen;
inbuf.pageslen = 1;
@@ -1469,8 +1469,9 @@ err:
}
static struct fastrpc_session_ctx *fastrpc_session_alloc(
- struct fastrpc_channel_ctx *cctx)
+ struct fastrpc_user *fl)
{
+ struct fastrpc_channel_ctx *cctx = fl->cctx;
struct fastrpc_session_ctx *session = NULL;
unsigned long flags;
int i;
@@ -1480,6 +1481,8 @@ static struct fastrpc_session_ctx *fastrpc_session_alloc(
if (!cctx->session[i].used && cctx->session[i].valid) {
cctx->session[i].used = true;
session = &cctx->session[i];
+ /* any non-zero ID will work, session_idx + 1 is the simplest one */
+ fl->client_id = i + 1;
break;
}
}
@@ -1501,12 +1504,12 @@ static void fastrpc_session_free(struct fastrpc_channel_ctx *cctx,
static int fastrpc_release_current_dsp_process(struct fastrpc_user *fl)
{
struct fastrpc_invoke_args args[1];
- int tgid = 0;
+ int client_id = 0;
u32 sc;
- tgid = fl->tgid;
- args[0].ptr = (u64)(uintptr_t) &tgid;
- args[0].length = sizeof(tgid);
+ client_id = fl->client_id;
+ args[0].ptr = (u64)(uintptr_t) &client_id;
+ args[0].length = sizeof(client_id);
args[0].fd = -1;
sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_RELEASE, 1, 0);
@@ -1579,11 +1582,10 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp)
INIT_LIST_HEAD(&fl->maps);
INIT_LIST_HEAD(&fl->mmaps);
INIT_LIST_HEAD(&fl->user);
- fl->tgid = current->tgid;
fl->cctx = cctx;
fl->is_secure_dev = fdevice->secure;
- fl->sctx = fastrpc_session_alloc(cctx);
+ fl->sctx = fastrpc_session_alloc(fl);
if (!fl->sctx) {
dev_err(&cctx->rpdev->dev, "No session available\n");
mutex_destroy(&fl->mutex);
@@ -1647,11 +1649,11 @@ static int fastrpc_dmabuf_alloc(struct fastrpc_user *fl, char __user *argp)
static int fastrpc_init_attach(struct fastrpc_user *fl, int pd)
{
struct fastrpc_invoke_args args[1];
- int tgid = fl->tgid;
+ int client_id = fl->client_id;
u32 sc;
- args[0].ptr = (u64)(uintptr_t) &tgid;
- args[0].length = sizeof(tgid);
+ args[0].ptr = (u64)(uintptr_t) &client_id;
+ args[0].length = sizeof(client_id);
args[0].fd = -1;
sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_ATTACH, 1, 0);
fl->pd = pd;
@@ -1803,7 +1805,7 @@ static int fastrpc_req_munmap_impl(struct fastrpc_user *fl, struct fastrpc_buf *
int err;
u32 sc;
- req_msg.pgid = fl->tgid;
+ req_msg.client_id = fl->client_id;
req_msg.size = buf->size;
req_msg.vaddr = buf->raddr;
@@ -1889,7 +1891,7 @@ static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp)
return err;
}
- req_msg.pgid = fl->tgid;
+ req_msg.client_id = fl->client_id;
req_msg.flags = req.flags;
req_msg.vaddr = req.vaddrin;
req_msg.num = sizeof(pages);
@@ -1978,7 +1980,7 @@ static int fastrpc_req_mem_unmap_impl(struct fastrpc_user *fl, struct fastrpc_me
return -EINVAL;
}
- req_msg.pgid = fl->tgid;
+ req_msg.client_id = fl->client_id;
req_msg.len = map->len;
req_msg.vaddrin = map->raddr;
req_msg.fd = map->fd;
@@ -2031,7 +2033,7 @@ static int fastrpc_req_mem_map(struct fastrpc_user *fl, char __user *argp)
return err;
}
- req_msg.pgid = fl->tgid;
+ req_msg.client_id = fl->client_id;
req_msg.fd = req.fd;
req_msg.offset = req.offset;
req_msg.vaddrin = req.vaddrin;
diff --git a/drivers/misc/keba/cp500.c b/drivers/misc/keba/cp500.c
index 255d3022dae8..d0c6113dcff3 100644
--- a/drivers/misc/keba/cp500.c
+++ b/drivers/misc/keba/cp500.c
@@ -126,8 +126,9 @@ static struct cp500_devs cp520_devices = {
};
struct cp500_nvmem {
- struct nvmem_device *nvmem;
+ struct nvmem_device *base_nvmem;
unsigned int offset;
+ struct nvmem_device *nvmem;
};
struct cp500 {
@@ -581,8 +582,8 @@ static int cp500_nvmem_read(void *priv, unsigned int offset, void *val,
struct cp500_nvmem *nvmem = priv;
int ret;
- ret = nvmem_device_read(nvmem->nvmem, nvmem->offset + offset, bytes,
- val);
+ ret = nvmem_device_read(nvmem->base_nvmem, nvmem->offset + offset,
+ bytes, val);
if (ret != bytes)
return ret;
@@ -595,15 +596,16 @@ static int cp500_nvmem_write(void *priv, unsigned int offset, void *val,
struct cp500_nvmem *nvmem = priv;
int ret;
- ret = nvmem_device_write(nvmem->nvmem, nvmem->offset + offset, bytes,
- val);
+ ret = nvmem_device_write(nvmem->base_nvmem, nvmem->offset + offset,
+ bytes, val);
if (ret != bytes)
return ret;
return 0;
}
-static int cp500_nvmem_register(struct cp500 *cp500, struct nvmem_device *nvmem)
+static int cp500_nvmem_register(struct cp500 *cp500,
+ struct nvmem_device *base_nvmem)
{
struct device *dev = &cp500->pci_dev->dev;
struct nvmem_config nvmem_config = {};
@@ -625,27 +627,52 @@ static int cp500_nvmem_register(struct cp500 *cp500, struct nvmem_device *nvmem)
nvmem_config.reg_read = cp500_nvmem_read;
nvmem_config.reg_write = cp500_nvmem_write;
- cp500->nvmem_cpu.nvmem = nvmem;
+ cp500->nvmem_cpu.base_nvmem = base_nvmem;
cp500->nvmem_cpu.offset = CP500_EEPROM_CPU_OFFSET;
nvmem_config.name = CP500_EEPROM_CPU_NAME;
nvmem_config.size = CP500_EEPROM_CPU_SIZE;
nvmem_config.priv = &cp500->nvmem_cpu;
- tmp = devm_nvmem_register(dev, &nvmem_config);
+ tmp = nvmem_register(&nvmem_config);
if (IS_ERR(tmp))
return PTR_ERR(tmp);
+ cp500->nvmem_cpu.nvmem = tmp;
- cp500->nvmem_user.nvmem = nvmem;
+ cp500->nvmem_user.base_nvmem = base_nvmem;
cp500->nvmem_user.offset = CP500_EEPROM_USER_OFFSET;
nvmem_config.name = CP500_EEPROM_USER_NAME;
nvmem_config.size = CP500_EEPROM_USER_SIZE;
nvmem_config.priv = &cp500->nvmem_user;
- tmp = devm_nvmem_register(dev, &nvmem_config);
- if (IS_ERR(tmp))
+ tmp = nvmem_register(&nvmem_config);
+ if (IS_ERR(tmp)) {
+ nvmem_unregister(cp500->nvmem_cpu.nvmem);
+ cp500->nvmem_cpu.nvmem = NULL;
+
return PTR_ERR(tmp);
+ }
+ cp500->nvmem_user.nvmem = tmp;
return 0;
}
+static void cp500_nvmem_unregister(struct cp500 *cp500)
+{
+ int notified;
+
+ if (cp500->nvmem_user.nvmem) {
+ nvmem_unregister(cp500->nvmem_user.nvmem);
+ cp500->nvmem_user.nvmem = NULL;
+ }
+ if (cp500->nvmem_cpu.nvmem) {
+ nvmem_unregister(cp500->nvmem_cpu.nvmem);
+ cp500->nvmem_cpu.nvmem = NULL;
+ }
+
+ /* CPU and user nvmem use the same base_nvmem, put only once */
+ notified = atomic_read(&cp500->nvmem_notified);
+ if (notified)
+ nvmem_device_put(cp500->nvmem_cpu.base_nvmem);
+}
+
static int cp500_nvmem_match(struct device *dev, const void *data)
{
const struct cp500 *cp500 = data;
@@ -663,13 +690,6 @@ static int cp500_nvmem_match(struct device *dev, const void *data)
return 0;
}
-static void cp500_devm_nvmem_put(void *data)
-{
- struct nvmem_device *nvmem = data;
-
- nvmem_device_put(nvmem);
-}
-
static int cp500_nvmem(struct notifier_block *nb, unsigned long action,
void *data)
{
@@ -698,10 +718,6 @@ static int cp500_nvmem(struct notifier_block *nb, unsigned long action,
return NOTIFY_DONE;
}
- ret = devm_add_action_or_reset(dev, cp500_devm_nvmem_put, nvmem);
- if (ret)
- return ret;
-
ret = cp500_nvmem_register(cp500, nvmem);
if (ret)
return ret;
@@ -932,12 +948,17 @@ static void cp500_remove(struct pci_dev *pci_dev)
{
struct cp500 *cp500 = pci_get_drvdata(pci_dev);
+ /*
+ * unregister CPU and user nvmem and put base_nvmem before parent
+ * auxiliary device of base_nvmem is unregistered
+ */
+ nvmem_unregister_notifier(&cp500->nvmem_notifier);
+ cp500_nvmem_unregister(cp500);
+
cp500_unregister_auxiliary_devs(cp500);
cp500_disable(cp500);
- nvmem_unregister_notifier(&cp500->nvmem_notifier);
-
pci_set_drvdata(pci_dev, 0);
pci_free_irq_vectors(pci_dev);
diff --git a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c
index 3c1359d8d4e6..04756302b878 100644
--- a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c
+++ b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c
@@ -147,6 +147,9 @@ static int pci1xxxx_gpio_set_config(struct gpio_chip *gpio, unsigned int offset,
case PIN_CONFIG_DRIVE_OPEN_DRAIN:
pci1xxx_assign_bit(priv->reg_base, OPENDRAIN_OFFSET(offset), (offset % 32), true);
break;
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ pci1xxx_assign_bit(priv->reg_base, OPENDRAIN_OFFSET(offset), (offset % 32), false);
+ break;
default:
ret = -ENOTSUPP;
break;
diff --git a/drivers/misc/misc_minor_kunit.c b/drivers/misc/misc_minor_kunit.c
new file mode 100644
index 000000000000..293e0fb7e43e
--- /dev/null
+++ b/drivers/misc/misc_minor_kunit.c
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <kunit/test.h>
+#include <kunit/test-bug.h>
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+
+/* dynamic minor (2) */
+static struct miscdevice dev_dynamic_minor = {
+ .minor = 2,
+ .name = "dev_dynamic_minor",
+};
+
+/* static minor (LCD_MINOR) */
+static struct miscdevice dev_static_minor = {
+ .minor = LCD_MINOR,
+ .name = "dev_static_minor",
+};
+
+/* misc dynamic minor */
+static struct miscdevice dev_misc_dynamic_minor = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "dev_misc_dynamic_minor",
+};
+
+static void kunit_dynamic_minor(struct kunit *test)
+{
+ int ret;
+
+ ret = misc_register(&dev_dynamic_minor);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_EQ(test, 2, dev_dynamic_minor.minor);
+ misc_deregister(&dev_dynamic_minor);
+}
+
+static void kunit_static_minor(struct kunit *test)
+{
+ int ret;
+
+ ret = misc_register(&dev_static_minor);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_EQ(test, LCD_MINOR, dev_static_minor.minor);
+ misc_deregister(&dev_static_minor);
+}
+
+static void kunit_misc_dynamic_minor(struct kunit *test)
+{
+ int ret;
+
+ ret = misc_register(&dev_misc_dynamic_minor);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ misc_deregister(&dev_misc_dynamic_minor);
+}
+
+static struct kunit_case test_cases[] = {
+ KUNIT_CASE(kunit_dynamic_minor),
+ KUNIT_CASE(kunit_static_minor),
+ KUNIT_CASE(kunit_misc_dynamic_minor),
+ {}
+};
+
+static struct kunit_suite test_suite = {
+ .name = "misc_minor_test",
+ .test_cases = test_cases,
+};
+kunit_test_suite(test_suite);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Vimal Agrawal");
+MODULE_DESCRIPTION("misc minor testing");
diff --git a/drivers/misc/ntsync.c b/drivers/misc/ntsync.c
index 4954553b7baa..999026a1ae04 100644
--- a/drivers/misc/ntsync.c
+++ b/drivers/misc/ntsync.c
@@ -6,11 +6,17 @@
*/
#include <linux/anon_inodes.h>
+#include <linux/atomic.h>
#include <linux/file.h>
#include <linux/fs.h>
+#include <linux/hrtimer.h>
+#include <linux/ktime.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/overflow.h>
+#include <linux/sched.h>
+#include <linux/sched/signal.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <uapi/linux/ntsync.h>
@@ -19,6 +25,8 @@
enum ntsync_type {
NTSYNC_TYPE_SEM,
+ NTSYNC_TYPE_MUTEX,
+ NTSYNC_TYPE_EVENT,
};
/*
@@ -30,10 +38,13 @@ enum ntsync_type {
*
* Both rely on struct file for reference counting. Individual
* ntsync_obj objects take a reference to the device when created.
+ * Wait operations take a reference to each object being waited on for
+ * the duration of the wait.
*/
struct ntsync_obj {
spinlock_t lock;
+ int dev_locked;
enum ntsync_type type;
@@ -46,22 +57,344 @@ struct ntsync_obj {
__u32 count;
__u32 max;
} sem;
+ struct {
+ __u32 count;
+ pid_t owner;
+ bool ownerdead;
+ } mutex;
+ struct {
+ bool manual;
+ bool signaled;
+ } event;
} u;
+
+ /*
+ * any_waiters is protected by the object lock, but all_waiters is
+ * protected by the device wait_all_lock.
+ */
+ struct list_head any_waiters;
+ struct list_head all_waiters;
+
+ /*
+ * Hint describing how many tasks are queued on this object in a
+ * wait-all operation.
+ *
+ * Any time we do a wake, we may need to wake "all" waiters as well as
+ * "any" waiters. In order to atomically wake "all" waiters, we must
+ * lock all of the objects, and that means grabbing the wait_all_lock
+ * below (and, due to lock ordering rules, before locking this object).
+ * However, wait-all is a rare operation, and grabbing the wait-all
+ * lock for every wake would create unnecessary contention.
+ * Therefore we first check whether all_hint is zero, and, if it is,
+ * we skip trying to wake "all" waiters.
+ *
+ * Since wait requests must originate from user-space threads, we're
+ * limited here by PID_MAX_LIMIT, so there's no risk of overflow.
+ */
+ atomic_t all_hint;
+};
+
+struct ntsync_q_entry {
+ struct list_head node;
+ struct ntsync_q *q;
+ struct ntsync_obj *obj;
+ __u32 index;
+};
+
+struct ntsync_q {
+ struct task_struct *task;
+ __u32 owner;
+
+ /*
+ * Protected via atomic_try_cmpxchg(). Only the thread that wins the
+ * compare-and-swap may actually change object states and wake this
+ * task.
+ */
+ atomic_t signaled;
+
+ bool all;
+ bool ownerdead;
+ __u32 count;
+ struct ntsync_q_entry entries[];
};
struct ntsync_device {
+ /*
+ * Wait-all operations must atomically grab all objects, and be totally
+ * ordered with respect to each other and wait-any operations.
+ * If one thread is trying to acquire several objects, another thread
+ * cannot touch the object at the same time.
+ *
+ * This device-wide lock is used to serialize wait-for-all
+ * operations, and operations on an object that is involved in a
+ * wait-for-all.
+ */
+ struct mutex wait_all_lock;
+
struct file *file;
};
/*
+ * Single objects are locked using obj->lock.
+ *
+ * Multiple objects are 'locked' while holding dev->wait_all_lock.
+ * In this case however, individual objects are not locked by holding
+ * obj->lock, but by setting obj->dev_locked.
+ *
+ * This means that in order to lock a single object, the sequence is slightly
+ * more complicated than usual. Specifically it needs to check obj->dev_locked
+ * after acquiring obj->lock, if set, it needs to drop the lock and acquire
+ * dev->wait_all_lock in order to serialize against the multi-object operation.
+ */
+
+static void dev_lock_obj(struct ntsync_device *dev, struct ntsync_obj *obj)
+{
+ lockdep_assert_held(&dev->wait_all_lock);
+ lockdep_assert(obj->dev == dev);
+ spin_lock(&obj->lock);
+ /*
+ * By setting obj->dev_locked inside obj->lock, it is ensured that
+ * anyone holding obj->lock must see the value.
+ */
+ obj->dev_locked = 1;
+ spin_unlock(&obj->lock);
+}
+
+static void dev_unlock_obj(struct ntsync_device *dev, struct ntsync_obj *obj)
+{
+ lockdep_assert_held(&dev->wait_all_lock);
+ lockdep_assert(obj->dev == dev);
+ spin_lock(&obj->lock);
+ obj->dev_locked = 0;
+ spin_unlock(&obj->lock);
+}
+
+static void obj_lock(struct ntsync_obj *obj)
+{
+ struct ntsync_device *dev = obj->dev;
+
+ for (;;) {
+ spin_lock(&obj->lock);
+ if (likely(!obj->dev_locked))
+ break;
+
+ spin_unlock(&obj->lock);
+ mutex_lock(&dev->wait_all_lock);
+ spin_lock(&obj->lock);
+ /*
+ * obj->dev_locked should be set and released under the same
+ * wait_all_lock section, since we now own this lock, it should
+ * be clear.
+ */
+ lockdep_assert(!obj->dev_locked);
+ spin_unlock(&obj->lock);
+ mutex_unlock(&dev->wait_all_lock);
+ }
+}
+
+static void obj_unlock(struct ntsync_obj *obj)
+{
+ spin_unlock(&obj->lock);
+}
+
+static bool ntsync_lock_obj(struct ntsync_device *dev, struct ntsync_obj *obj)
+{
+ bool all;
+
+ obj_lock(obj);
+ all = atomic_read(&obj->all_hint);
+ if (unlikely(all)) {
+ obj_unlock(obj);
+ mutex_lock(&dev->wait_all_lock);
+ dev_lock_obj(dev, obj);
+ }
+
+ return all;
+}
+
+static void ntsync_unlock_obj(struct ntsync_device *dev, struct ntsync_obj *obj, bool all)
+{
+ if (all) {
+ dev_unlock_obj(dev, obj);
+ mutex_unlock(&dev->wait_all_lock);
+ } else {
+ obj_unlock(obj);
+ }
+}
+
+#define ntsync_assert_held(obj) \
+ lockdep_assert((lockdep_is_held(&(obj)->lock) != LOCK_STATE_NOT_HELD) || \
+ ((lockdep_is_held(&(obj)->dev->wait_all_lock) != LOCK_STATE_NOT_HELD) && \
+ (obj)->dev_locked))
+
+static bool is_signaled(struct ntsync_obj *obj, __u32 owner)
+{
+ ntsync_assert_held(obj);
+
+ switch (obj->type) {
+ case NTSYNC_TYPE_SEM:
+ return !!obj->u.sem.count;
+ case NTSYNC_TYPE_MUTEX:
+ if (obj->u.mutex.owner && obj->u.mutex.owner != owner)
+ return false;
+ return obj->u.mutex.count < UINT_MAX;
+ case NTSYNC_TYPE_EVENT:
+ return obj->u.event.signaled;
+ }
+
+ WARN(1, "bad object type %#x\n", obj->type);
+ return false;
+}
+
+/*
+ * "locked_obj" is an optional pointer to an object which is already locked and
+ * should not be locked again. This is necessary so that changing an object's
+ * state and waking it can be a single atomic operation.
+ */
+static void try_wake_all(struct ntsync_device *dev, struct ntsync_q *q,
+ struct ntsync_obj *locked_obj)
+{
+ __u32 count = q->count;
+ bool can_wake = true;
+ int signaled = -1;
+ __u32 i;
+
+ lockdep_assert_held(&dev->wait_all_lock);
+ if (locked_obj)
+ lockdep_assert(locked_obj->dev_locked);
+
+ for (i = 0; i < count; i++) {
+ if (q->entries[i].obj != locked_obj)
+ dev_lock_obj(dev, q->entries[i].obj);
+ }
+
+ for (i = 0; i < count; i++) {
+ if (!is_signaled(q->entries[i].obj, q->owner)) {
+ can_wake = false;
+ break;
+ }
+ }
+
+ if (can_wake && atomic_try_cmpxchg(&q->signaled, &signaled, 0)) {
+ for (i = 0; i < count; i++) {
+ struct ntsync_obj *obj = q->entries[i].obj;
+
+ switch (obj->type) {
+ case NTSYNC_TYPE_SEM:
+ obj->u.sem.count--;
+ break;
+ case NTSYNC_TYPE_MUTEX:
+ if (obj->u.mutex.ownerdead)
+ q->ownerdead = true;
+ obj->u.mutex.ownerdead = false;
+ obj->u.mutex.count++;
+ obj->u.mutex.owner = q->owner;
+ break;
+ case NTSYNC_TYPE_EVENT:
+ if (!obj->u.event.manual)
+ obj->u.event.signaled = false;
+ break;
+ }
+ }
+ wake_up_process(q->task);
+ }
+
+ for (i = 0; i < count; i++) {
+ if (q->entries[i].obj != locked_obj)
+ dev_unlock_obj(dev, q->entries[i].obj);
+ }
+}
+
+static void try_wake_all_obj(struct ntsync_device *dev, struct ntsync_obj *obj)
+{
+ struct ntsync_q_entry *entry;
+
+ lockdep_assert_held(&dev->wait_all_lock);
+ lockdep_assert(obj->dev_locked);
+
+ list_for_each_entry(entry, &obj->all_waiters, node)
+ try_wake_all(dev, entry->q, obj);
+}
+
+static void try_wake_any_sem(struct ntsync_obj *sem)
+{
+ struct ntsync_q_entry *entry;
+
+ ntsync_assert_held(sem);
+ lockdep_assert(sem->type == NTSYNC_TYPE_SEM);
+
+ list_for_each_entry(entry, &sem->any_waiters, node) {
+ struct ntsync_q *q = entry->q;
+ int signaled = -1;
+
+ if (!sem->u.sem.count)
+ break;
+
+ if (atomic_try_cmpxchg(&q->signaled, &signaled, entry->index)) {
+ sem->u.sem.count--;
+ wake_up_process(q->task);
+ }
+ }
+}
+
+static void try_wake_any_mutex(struct ntsync_obj *mutex)
+{
+ struct ntsync_q_entry *entry;
+
+ ntsync_assert_held(mutex);
+ lockdep_assert(mutex->type == NTSYNC_TYPE_MUTEX);
+
+ list_for_each_entry(entry, &mutex->any_waiters, node) {
+ struct ntsync_q *q = entry->q;
+ int signaled = -1;
+
+ if (mutex->u.mutex.count == UINT_MAX)
+ break;
+ if (mutex->u.mutex.owner && mutex->u.mutex.owner != q->owner)
+ continue;
+
+ if (atomic_try_cmpxchg(&q->signaled, &signaled, entry->index)) {
+ if (mutex->u.mutex.ownerdead)
+ q->ownerdead = true;
+ mutex->u.mutex.ownerdead = false;
+ mutex->u.mutex.count++;
+ mutex->u.mutex.owner = q->owner;
+ wake_up_process(q->task);
+ }
+ }
+}
+
+static void try_wake_any_event(struct ntsync_obj *event)
+{
+ struct ntsync_q_entry *entry;
+
+ ntsync_assert_held(event);
+ lockdep_assert(event->type == NTSYNC_TYPE_EVENT);
+
+ list_for_each_entry(entry, &event->any_waiters, node) {
+ struct ntsync_q *q = entry->q;
+ int signaled = -1;
+
+ if (!event->u.event.signaled)
+ break;
+
+ if (atomic_try_cmpxchg(&q->signaled, &signaled, entry->index)) {
+ if (!event->u.event.manual)
+ event->u.event.signaled = false;
+ wake_up_process(q->task);
+ }
+ }
+}
+
+/*
* Actually change the semaphore state, returning -EOVERFLOW if it is made
* invalid.
*/
-static int post_sem_state(struct ntsync_obj *sem, __u32 count)
+static int release_sem_state(struct ntsync_obj *sem, __u32 count)
{
__u32 sum;
- lockdep_assert_held(&sem->lock);
+ ntsync_assert_held(sem);
if (check_add_overflow(sem->u.sem.count, count, &sum) ||
sum > sem->u.sem.max)
@@ -71,11 +404,13 @@ static int post_sem_state(struct ntsync_obj *sem, __u32 count)
return 0;
}
-static int ntsync_sem_post(struct ntsync_obj *sem, void __user *argp)
+static int ntsync_sem_release(struct ntsync_obj *sem, void __user *argp)
{
+ struct ntsync_device *dev = sem->dev;
__u32 __user *user_args = argp;
__u32 prev_count;
__u32 args;
+ bool all;
int ret;
if (copy_from_user(&args, argp, sizeof(args)))
@@ -84,12 +419,17 @@ static int ntsync_sem_post(struct ntsync_obj *sem, void __user *argp)
if (sem->type != NTSYNC_TYPE_SEM)
return -EINVAL;
- spin_lock(&sem->lock);
+ all = ntsync_lock_obj(dev, sem);
prev_count = sem->u.sem.count;
- ret = post_sem_state(sem, args);
+ ret = release_sem_state(sem, args);
+ if (!ret) {
+ if (all)
+ try_wake_all_obj(dev, sem);
+ try_wake_any_sem(sem);
+ }
- spin_unlock(&sem->lock);
+ ntsync_unlock_obj(dev, sem, all);
if (!ret && put_user(prev_count, user_args))
ret = -EFAULT;
@@ -97,13 +437,229 @@ static int ntsync_sem_post(struct ntsync_obj *sem, void __user *argp)
return ret;
}
-static int ntsync_obj_release(struct inode *inode, struct file *file)
+/*
+ * Actually change the mutex state, returning -EPERM if not the owner.
+ */
+static int unlock_mutex_state(struct ntsync_obj *mutex,
+ const struct ntsync_mutex_args *args)
{
- struct ntsync_obj *obj = file->private_data;
+ ntsync_assert_held(mutex);
+
+ if (mutex->u.mutex.owner != args->owner)
+ return -EPERM;
+
+ if (!--mutex->u.mutex.count)
+ mutex->u.mutex.owner = 0;
+ return 0;
+}
+
+static int ntsync_mutex_unlock(struct ntsync_obj *mutex, void __user *argp)
+{
+ struct ntsync_mutex_args __user *user_args = argp;
+ struct ntsync_device *dev = mutex->dev;
+ struct ntsync_mutex_args args;
+ __u32 prev_count;
+ bool all;
+ int ret;
+
+ if (copy_from_user(&args, argp, sizeof(args)))
+ return -EFAULT;
+ if (!args.owner)
+ return -EINVAL;
+
+ if (mutex->type != NTSYNC_TYPE_MUTEX)
+ return -EINVAL;
+
+ all = ntsync_lock_obj(dev, mutex);
+
+ prev_count = mutex->u.mutex.count;
+ ret = unlock_mutex_state(mutex, &args);
+ if (!ret) {
+ if (all)
+ try_wake_all_obj(dev, mutex);
+ try_wake_any_mutex(mutex);
+ }
+
+ ntsync_unlock_obj(dev, mutex, all);
+
+ if (!ret && put_user(prev_count, &user_args->count))
+ ret = -EFAULT;
+
+ return ret;
+}
+
+/*
+ * Actually change the mutex state to mark its owner as dead,
+ * returning -EPERM if not the owner.
+ */
+static int kill_mutex_state(struct ntsync_obj *mutex, __u32 owner)
+{
+ ntsync_assert_held(mutex);
+
+ if (mutex->u.mutex.owner != owner)
+ return -EPERM;
+
+ mutex->u.mutex.ownerdead = true;
+ mutex->u.mutex.owner = 0;
+ mutex->u.mutex.count = 0;
+ return 0;
+}
+
+static int ntsync_mutex_kill(struct ntsync_obj *mutex, void __user *argp)
+{
+ struct ntsync_device *dev = mutex->dev;
+ __u32 owner;
+ bool all;
+ int ret;
+
+ if (get_user(owner, (__u32 __user *)argp))
+ return -EFAULT;
+ if (!owner)
+ return -EINVAL;
+
+ if (mutex->type != NTSYNC_TYPE_MUTEX)
+ return -EINVAL;
+
+ all = ntsync_lock_obj(dev, mutex);
+
+ ret = kill_mutex_state(mutex, owner);
+ if (!ret) {
+ if (all)
+ try_wake_all_obj(dev, mutex);
+ try_wake_any_mutex(mutex);
+ }
+
+ ntsync_unlock_obj(dev, mutex, all);
+
+ return ret;
+}
+
+static int ntsync_event_set(struct ntsync_obj *event, void __user *argp, bool pulse)
+{
+ struct ntsync_device *dev = event->dev;
+ __u32 prev_state;
+ bool all;
+
+ if (event->type != NTSYNC_TYPE_EVENT)
+ return -EINVAL;
+
+ all = ntsync_lock_obj(dev, event);
+
+ prev_state = event->u.event.signaled;
+ event->u.event.signaled = true;
+ if (all)
+ try_wake_all_obj(dev, event);
+ try_wake_any_event(event);
+ if (pulse)
+ event->u.event.signaled = false;
+
+ ntsync_unlock_obj(dev, event, all);
+
+ if (put_user(prev_state, (__u32 __user *)argp))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int ntsync_event_reset(struct ntsync_obj *event, void __user *argp)
+{
+ struct ntsync_device *dev = event->dev;
+ __u32 prev_state;
+ bool all;
+
+ if (event->type != NTSYNC_TYPE_EVENT)
+ return -EINVAL;
+
+ all = ntsync_lock_obj(dev, event);
+
+ prev_state = event->u.event.signaled;
+ event->u.event.signaled = false;
+
+ ntsync_unlock_obj(dev, event, all);
+
+ if (put_user(prev_state, (__u32 __user *)argp))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int ntsync_sem_read(struct ntsync_obj *sem, void __user *argp)
+{
+ struct ntsync_sem_args __user *user_args = argp;
+ struct ntsync_device *dev = sem->dev;
+ struct ntsync_sem_args args;
+ bool all;
+
+ if (sem->type != NTSYNC_TYPE_SEM)
+ return -EINVAL;
+
+ all = ntsync_lock_obj(dev, sem);
+
+ args.count = sem->u.sem.count;
+ args.max = sem->u.sem.max;
+
+ ntsync_unlock_obj(dev, sem, all);
+
+ if (copy_to_user(user_args, &args, sizeof(args)))
+ return -EFAULT;
+ return 0;
+}
+static int ntsync_mutex_read(struct ntsync_obj *mutex, void __user *argp)
+{
+ struct ntsync_mutex_args __user *user_args = argp;
+ struct ntsync_device *dev = mutex->dev;
+ struct ntsync_mutex_args args;
+ bool all;
+ int ret;
+
+ if (mutex->type != NTSYNC_TYPE_MUTEX)
+ return -EINVAL;
+
+ all = ntsync_lock_obj(dev, mutex);
+
+ args.count = mutex->u.mutex.count;
+ args.owner = mutex->u.mutex.owner;
+ ret = mutex->u.mutex.ownerdead ? -EOWNERDEAD : 0;
+
+ ntsync_unlock_obj(dev, mutex, all);
+
+ if (copy_to_user(user_args, &args, sizeof(args)))
+ return -EFAULT;
+ return ret;
+}
+
+static int ntsync_event_read(struct ntsync_obj *event, void __user *argp)
+{
+ struct ntsync_event_args __user *user_args = argp;
+ struct ntsync_device *dev = event->dev;
+ struct ntsync_event_args args;
+ bool all;
+
+ if (event->type != NTSYNC_TYPE_EVENT)
+ return -EINVAL;
+
+ all = ntsync_lock_obj(dev, event);
+
+ args.manual = event->u.event.manual;
+ args.signaled = event->u.event.signaled;
+
+ ntsync_unlock_obj(dev, event, all);
+
+ if (copy_to_user(user_args, &args, sizeof(args)))
+ return -EFAULT;
+ return 0;
+}
+
+static void ntsync_free_obj(struct ntsync_obj *obj)
+{
fput(obj->dev->file);
kfree(obj);
+}
+static int ntsync_obj_release(struct inode *inode, struct file *file)
+{
+ ntsync_free_obj(file->private_data);
return 0;
}
@@ -114,8 +670,24 @@ static long ntsync_obj_ioctl(struct file *file, unsigned int cmd,
void __user *argp = (void __user *)parm;
switch (cmd) {
- case NTSYNC_IOC_SEM_POST:
- return ntsync_sem_post(obj, argp);
+ case NTSYNC_IOC_SEM_RELEASE:
+ return ntsync_sem_release(obj, argp);
+ case NTSYNC_IOC_SEM_READ:
+ return ntsync_sem_read(obj, argp);
+ case NTSYNC_IOC_MUTEX_UNLOCK:
+ return ntsync_mutex_unlock(obj, argp);
+ case NTSYNC_IOC_MUTEX_KILL:
+ return ntsync_mutex_kill(obj, argp);
+ case NTSYNC_IOC_MUTEX_READ:
+ return ntsync_mutex_read(obj, argp);
+ case NTSYNC_IOC_EVENT_SET:
+ return ntsync_event_set(obj, argp, false);
+ case NTSYNC_IOC_EVENT_RESET:
+ return ntsync_event_reset(obj, argp);
+ case NTSYNC_IOC_EVENT_PULSE:
+ return ntsync_event_set(obj, argp, true);
+ case NTSYNC_IOC_EVENT_READ:
+ return ntsync_event_read(obj, argp);
default:
return -ENOIOCTLCMD;
}
@@ -140,6 +712,9 @@ static struct ntsync_obj *ntsync_alloc_obj(struct ntsync_device *dev,
obj->dev = dev;
get_file(dev->file);
spin_lock_init(&obj->lock);
+ INIT_LIST_HEAD(&obj->any_waiters);
+ INIT_LIST_HEAD(&obj->all_waiters);
+ atomic_set(&obj->all_hint, 0);
return obj;
}
@@ -165,7 +740,6 @@ static int ntsync_obj_get_fd(struct ntsync_obj *obj)
static int ntsync_create_sem(struct ntsync_device *dev, void __user *argp)
{
- struct ntsync_sem_args __user *user_args = argp;
struct ntsync_sem_args args;
struct ntsync_obj *sem;
int fd;
@@ -182,12 +756,398 @@ static int ntsync_create_sem(struct ntsync_device *dev, void __user *argp)
sem->u.sem.count = args.count;
sem->u.sem.max = args.max;
fd = ntsync_obj_get_fd(sem);
- if (fd < 0) {
- kfree(sem);
- return fd;
+ if (fd < 0)
+ ntsync_free_obj(sem);
+
+ return fd;
+}
+
+static int ntsync_create_mutex(struct ntsync_device *dev, void __user *argp)
+{
+ struct ntsync_mutex_args args;
+ struct ntsync_obj *mutex;
+ int fd;
+
+ if (copy_from_user(&args, argp, sizeof(args)))
+ return -EFAULT;
+
+ if (!args.owner != !args.count)
+ return -EINVAL;
+
+ mutex = ntsync_alloc_obj(dev, NTSYNC_TYPE_MUTEX);
+ if (!mutex)
+ return -ENOMEM;
+ mutex->u.mutex.count = args.count;
+ mutex->u.mutex.owner = args.owner;
+ fd = ntsync_obj_get_fd(mutex);
+ if (fd < 0)
+ ntsync_free_obj(mutex);
+
+ return fd;
+}
+
+static int ntsync_create_event(struct ntsync_device *dev, void __user *argp)
+{
+ struct ntsync_event_args args;
+ struct ntsync_obj *event;
+ int fd;
+
+ if (copy_from_user(&args, argp, sizeof(args)))
+ return -EFAULT;
+
+ event = ntsync_alloc_obj(dev, NTSYNC_TYPE_EVENT);
+ if (!event)
+ return -ENOMEM;
+ event->u.event.manual = args.manual;
+ event->u.event.signaled = args.signaled;
+ fd = ntsync_obj_get_fd(event);
+ if (fd < 0)
+ ntsync_free_obj(event);
+
+ return fd;
+}
+
+static struct ntsync_obj *get_obj(struct ntsync_device *dev, int fd)
+{
+ struct file *file = fget(fd);
+ struct ntsync_obj *obj;
+
+ if (!file)
+ return NULL;
+
+ if (file->f_op != &ntsync_obj_fops) {
+ fput(file);
+ return NULL;
+ }
+
+ obj = file->private_data;
+ if (obj->dev != dev) {
+ fput(file);
+ return NULL;
}
- return put_user(fd, &user_args->sem);
+ return obj;
+}
+
+static void put_obj(struct ntsync_obj *obj)
+{
+ fput(obj->file);
+}
+
+static int ntsync_schedule(const struct ntsync_q *q, const struct ntsync_wait_args *args)
+{
+ ktime_t timeout = ns_to_ktime(args->timeout);
+ clockid_t clock = CLOCK_MONOTONIC;
+ ktime_t *timeout_ptr;
+ int ret = 0;
+
+ timeout_ptr = (args->timeout == U64_MAX ? NULL : &timeout);
+
+ if (args->flags & NTSYNC_WAIT_REALTIME)
+ clock = CLOCK_REALTIME;
+
+ do {
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (atomic_read(&q->signaled) != -1) {
+ ret = 0;
+ break;
+ }
+ ret = schedule_hrtimeout_range_clock(timeout_ptr, 0, HRTIMER_MODE_ABS, clock);
+ } while (ret < 0);
+ __set_current_state(TASK_RUNNING);
+
+ return ret;
+}
+
+/*
+ * Allocate and initialize the ntsync_q structure, but do not queue us yet.
+ */
+static int setup_wait(struct ntsync_device *dev,
+ const struct ntsync_wait_args *args, bool all,
+ struct ntsync_q **ret_q)
+{
+ int fds[NTSYNC_MAX_WAIT_COUNT + 1];
+ const __u32 count = args->count;
+ size_t size = array_size(count, sizeof(fds[0]));
+ struct ntsync_q *q;
+ __u32 total_count;
+ __u32 i, j;
+
+ if (args->pad || (args->flags & ~NTSYNC_WAIT_REALTIME))
+ return -EINVAL;
+
+ if (size >= sizeof(fds))
+ return -EINVAL;
+
+ total_count = count;
+ if (args->alert)
+ total_count++;
+
+ if (copy_from_user(fds, u64_to_user_ptr(args->objs), size))
+ return -EFAULT;
+ if (args->alert)
+ fds[count] = args->alert;
+
+ q = kmalloc(struct_size(q, entries, total_count), GFP_KERNEL);
+ if (!q)
+ return -ENOMEM;
+ q->task = current;
+ q->owner = args->owner;
+ atomic_set(&q->signaled, -1);
+ q->all = all;
+ q->ownerdead = false;
+ q->count = count;
+
+ for (i = 0; i < total_count; i++) {
+ struct ntsync_q_entry *entry = &q->entries[i];
+ struct ntsync_obj *obj = get_obj(dev, fds[i]);
+
+ if (!obj)
+ goto err;
+
+ if (all) {
+ /* Check that the objects are all distinct. */
+ for (j = 0; j < i; j++) {
+ if (obj == q->entries[j].obj) {
+ put_obj(obj);
+ goto err;
+ }
+ }
+ }
+
+ entry->obj = obj;
+ entry->q = q;
+ entry->index = i;
+ }
+
+ *ret_q = q;
+ return 0;
+
+err:
+ for (j = 0; j < i; j++)
+ put_obj(q->entries[j].obj);
+ kfree(q);
+ return -EINVAL;
+}
+
+static void try_wake_any_obj(struct ntsync_obj *obj)
+{
+ switch (obj->type) {
+ case NTSYNC_TYPE_SEM:
+ try_wake_any_sem(obj);
+ break;
+ case NTSYNC_TYPE_MUTEX:
+ try_wake_any_mutex(obj);
+ break;
+ case NTSYNC_TYPE_EVENT:
+ try_wake_any_event(obj);
+ break;
+ }
+}
+
+static int ntsync_wait_any(struct ntsync_device *dev, void __user *argp)
+{
+ struct ntsync_wait_args args;
+ __u32 i, total_count;
+ struct ntsync_q *q;
+ int signaled;
+ bool all;
+ int ret;
+
+ if (copy_from_user(&args, argp, sizeof(args)))
+ return -EFAULT;
+
+ ret = setup_wait(dev, &args, false, &q);
+ if (ret < 0)
+ return ret;
+
+ total_count = args.count;
+ if (args.alert)
+ total_count++;
+
+ /* queue ourselves */
+
+ for (i = 0; i < total_count; i++) {
+ struct ntsync_q_entry *entry = &q->entries[i];
+ struct ntsync_obj *obj = entry->obj;
+
+ all = ntsync_lock_obj(dev, obj);
+ list_add_tail(&entry->node, &obj->any_waiters);
+ ntsync_unlock_obj(dev, obj, all);
+ }
+
+ /*
+ * Check if we are already signaled.
+ *
+ * Note that the API requires that normal objects are checked before
+ * the alert event. Hence we queue the alert event last, and check
+ * objects in order.
+ */
+
+ for (i = 0; i < total_count; i++) {
+ struct ntsync_obj *obj = q->entries[i].obj;
+
+ if (atomic_read(&q->signaled) != -1)
+ break;
+
+ all = ntsync_lock_obj(dev, obj);
+ try_wake_any_obj(obj);
+ ntsync_unlock_obj(dev, obj, all);
+ }
+
+ /* sleep */
+
+ ret = ntsync_schedule(q, &args);
+
+ /* and finally, unqueue */
+
+ for (i = 0; i < total_count; i++) {
+ struct ntsync_q_entry *entry = &q->entries[i];
+ struct ntsync_obj *obj = entry->obj;
+
+ all = ntsync_lock_obj(dev, obj);
+ list_del(&entry->node);
+ ntsync_unlock_obj(dev, obj, all);
+
+ put_obj(obj);
+ }
+
+ signaled = atomic_read(&q->signaled);
+ if (signaled != -1) {
+ struct ntsync_wait_args __user *user_args = argp;
+
+ /* even if we caught a signal, we need to communicate success */
+ ret = q->ownerdead ? -EOWNERDEAD : 0;
+
+ if (put_user(signaled, &user_args->index))
+ ret = -EFAULT;
+ } else if (!ret) {
+ ret = -ETIMEDOUT;
+ }
+
+ kfree(q);
+ return ret;
+}
+
+static int ntsync_wait_all(struct ntsync_device *dev, void __user *argp)
+{
+ struct ntsync_wait_args args;
+ struct ntsync_q *q;
+ int signaled;
+ __u32 i;
+ int ret;
+
+ if (copy_from_user(&args, argp, sizeof(args)))
+ return -EFAULT;
+
+ ret = setup_wait(dev, &args, true, &q);
+ if (ret < 0)
+ return ret;
+
+ /* queue ourselves */
+
+ mutex_lock(&dev->wait_all_lock);
+
+ for (i = 0; i < args.count; i++) {
+ struct ntsync_q_entry *entry = &q->entries[i];
+ struct ntsync_obj *obj = entry->obj;
+
+ atomic_inc(&obj->all_hint);
+
+ /*
+ * obj->all_waiters is protected by dev->wait_all_lock rather
+ * than obj->lock, so there is no need to acquire obj->lock
+ * here.
+ */
+ list_add_tail(&entry->node, &obj->all_waiters);
+ }
+ if (args.alert) {
+ struct ntsync_q_entry *entry = &q->entries[args.count];
+ struct ntsync_obj *obj = entry->obj;
+
+ dev_lock_obj(dev, obj);
+ list_add_tail(&entry->node, &obj->any_waiters);
+ dev_unlock_obj(dev, obj);
+ }
+
+ /* check if we are already signaled */
+
+ try_wake_all(dev, q, NULL);
+
+ mutex_unlock(&dev->wait_all_lock);
+
+ /*
+ * Check if the alert event is signaled, making sure to do so only
+ * after checking if the other objects are signaled.
+ */
+
+ if (args.alert) {
+ struct ntsync_obj *obj = q->entries[args.count].obj;
+
+ if (atomic_read(&q->signaled) == -1) {
+ bool all = ntsync_lock_obj(dev, obj);
+ try_wake_any_obj(obj);
+ ntsync_unlock_obj(dev, obj, all);
+ }
+ }
+
+ /* sleep */
+
+ ret = ntsync_schedule(q, &args);
+
+ /* and finally, unqueue */
+
+ mutex_lock(&dev->wait_all_lock);
+
+ for (i = 0; i < args.count; i++) {
+ struct ntsync_q_entry *entry = &q->entries[i];
+ struct ntsync_obj *obj = entry->obj;
+
+ /*
+ * obj->all_waiters is protected by dev->wait_all_lock rather
+ * than obj->lock, so there is no need to acquire it here.
+ */
+ list_del(&entry->node);
+
+ atomic_dec(&obj->all_hint);
+
+ put_obj(obj);
+ }
+
+ mutex_unlock(&dev->wait_all_lock);
+
+ if (args.alert) {
+ struct ntsync_q_entry *entry = &q->entries[args.count];
+ struct ntsync_obj *obj = entry->obj;
+ bool all;
+
+ all = ntsync_lock_obj(dev, obj);
+ list_del(&entry->node);
+ ntsync_unlock_obj(dev, obj, all);
+
+ put_obj(obj);
+ }
+
+ signaled = atomic_read(&q->signaled);
+ if (signaled != -1) {
+ struct ntsync_wait_args __user *user_args = argp;
+
+ /* even if we caught a signal, we need to communicate success */
+ ret = q->ownerdead ? -EOWNERDEAD : 0;
+
+ if (put_user(signaled, &user_args->index))
+ ret = -EFAULT;
+ } else if (!ret) {
+ ret = -ETIMEDOUT;
+ }
+
+ kfree(q);
+ return ret;
}
static int ntsync_char_open(struct inode *inode, struct file *file)
@@ -198,6 +1158,8 @@ static int ntsync_char_open(struct inode *inode, struct file *file)
if (!dev)
return -ENOMEM;
+ mutex_init(&dev->wait_all_lock);
+
file->private_data = dev;
dev->file = file;
return nonseekable_open(inode, file);
@@ -219,8 +1181,16 @@ static long ntsync_char_ioctl(struct file *file, unsigned int cmd,
void __user *argp = (void __user *)parm;
switch (cmd) {
+ case NTSYNC_IOC_CREATE_EVENT:
+ return ntsync_create_event(dev, argp);
+ case NTSYNC_IOC_CREATE_MUTEX:
+ return ntsync_create_mutex(dev, argp);
case NTSYNC_IOC_CREATE_SEM:
return ntsync_create_sem(dev, argp);
+ case NTSYNC_IOC_WAIT_ALL:
+ return ntsync_wait_all(dev, argp);
+ case NTSYNC_IOC_WAIT_ANY:
+ return ntsync_wait_any(dev, argp);
default:
return -ENOIOCTLCMD;
}
@@ -238,6 +1208,7 @@ static struct miscdevice ntsync_misc = {
.minor = MISC_DYNAMIC_MINOR,
.name = NTSYNC_NAME,
.fops = &ntsync_fops,
+ .mode = 0666,
};
module_misc_device(ntsync_misc);
diff --git a/drivers/misc/ocxl/sysfs.c b/drivers/misc/ocxl/sysfs.c
index 07520d6e6dc5..e849641687a0 100644
--- a/drivers/misc/ocxl/sysfs.c
+++ b/drivers/misc/ocxl/sysfs.c
@@ -94,7 +94,7 @@ static struct device_attribute afu_attrs[] = {
};
static ssize_t global_mmio_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct ocxl_afu *afu = to_afu(kobj_to_dev(kobj));
@@ -155,7 +155,7 @@ int ocxl_sysfs_register_afu(struct ocxl_file_info *info)
info->attr_global_mmio.attr.name = "global_mmio_area";
info->attr_global_mmio.attr.mode = 0600;
info->attr_global_mmio.size = info->afu->config.global_mmio_size;
- info->attr_global_mmio.read = global_mmio_read;
+ info->attr_global_mmio.read_new = global_mmio_read;
info->attr_global_mmio.mmap = global_mmio_mmap;
rc = device_create_bin_file(&info->dev, &info->attr_global_mmio);
if (rc) {
diff --git a/drivers/misc/pch_phub.c b/drivers/misc/pch_phub.c
index 8d2b7135738e..6121c0940cd1 100644
--- a/drivers/misc/pch_phub.c
+++ b/drivers/misc/pch_phub.c
@@ -483,7 +483,7 @@ static int pch_phub_write_gbe_mac_addr(struct pch_phub_reg *chip, u8 *data)
}
static ssize_t pch_phub_bin_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
unsigned int rom_signature;
@@ -553,7 +553,7 @@ return_err_nomutex:
}
static ssize_t pch_phub_bin_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
int err;
@@ -655,8 +655,8 @@ static const struct bin_attribute pch_bin_attr = {
.mode = S_IRUGO | S_IWUSR,
},
.size = PCH_PHUB_OROM_SIZE + 1,
- .read = pch_phub_bin_read,
- .write = pch_phub_bin_write,
+ .read_new = pch_phub_bin_read,
+ .write_new = pch_phub_bin_write,
};
static int pch_phub_probe(struct pci_dev *pdev,
diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c
index 3aaaf47fa4ee..4c0f37ad0281 100644
--- a/drivers/misc/pci_endpoint_test.c
+++ b/drivers/misc/pci_endpoint_test.c
@@ -69,6 +69,9 @@
#define PCI_ENDPOINT_TEST_FLAGS 0x2c
#define FLAG_USE_DMA BIT(0)
+#define PCI_ENDPOINT_TEST_CAPS 0x30
+#define CAP_UNALIGNED_ACCESS BIT(0)
+
#define PCI_DEVICE_ID_TI_AM654 0xb00c
#define PCI_DEVICE_ID_TI_J7200 0xb00f
#define PCI_DEVICE_ID_TI_AM64 0xb010
@@ -85,7 +88,6 @@
#define PCI_DEVICE_ID_RENESAS_R8A774E1 0x0025
#define PCI_DEVICE_ID_RENESAS_R8A779F0 0x0031
-#define PCI_VENDOR_ID_ROCKCHIP 0x1d87
#define PCI_DEVICE_ID_ROCKCHIP_RK3588 0x3588
static DEFINE_IDA(pci_endpoint_test_ida);
@@ -166,43 +168,47 @@ static void pci_endpoint_test_free_irq_vectors(struct pci_endpoint_test *test)
test->irq_type = IRQ_TYPE_UNDEFINED;
}
-static bool pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test *test,
+static int pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test *test,
int type)
{
- int irq = -1;
+ int irq;
struct pci_dev *pdev = test->pdev;
struct device *dev = &pdev->dev;
- bool res = true;
switch (type) {
case IRQ_TYPE_INTX:
irq = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_INTX);
- if (irq < 0)
+ if (irq < 0) {
dev_err(dev, "Failed to get Legacy interrupt\n");
+ return irq;
+ }
+
break;
case IRQ_TYPE_MSI:
irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
- if (irq < 0)
+ if (irq < 0) {
dev_err(dev, "Failed to get MSI interrupts\n");
+ return irq;
+ }
+
break;
case IRQ_TYPE_MSIX:
irq = pci_alloc_irq_vectors(pdev, 1, 2048, PCI_IRQ_MSIX);
- if (irq < 0)
+ if (irq < 0) {
dev_err(dev, "Failed to get MSI-X interrupts\n");
+ return irq;
+ }
+
break;
default:
dev_err(dev, "Invalid IRQ type selected\n");
- }
-
- if (irq < 0) {
- irq = 0;
- res = false;
+ return -EINVAL;
}
test->irq_type = type;
test->num_irqs = irq;
- return res;
+ return 0;
}
static void pci_endpoint_test_release_irq(struct pci_endpoint_test *test)
@@ -217,25 +223,25 @@ static void pci_endpoint_test_release_irq(struct pci_endpoint_test *test)
test->num_irqs = 0;
}
-static bool pci_endpoint_test_request_irq(struct pci_endpoint_test *test)
+static int pci_endpoint_test_request_irq(struct pci_endpoint_test *test)
{
int i;
- int err;
+ int ret;
struct pci_dev *pdev = test->pdev;
struct device *dev = &pdev->dev;
for (i = 0; i < test->num_irqs; i++) {
- err = devm_request_irq(dev, pci_irq_vector(pdev, i),
+ ret = devm_request_irq(dev, pci_irq_vector(pdev, i),
pci_endpoint_test_irqhandler,
IRQF_SHARED, test->name, test);
- if (err)
+ if (ret)
goto fail;
}
- return true;
+ return 0;
fail:
- switch (irq_type) {
+ switch (test->irq_type) {
case IRQ_TYPE_INTX:
dev_err(dev, "Failed to request IRQ %d for Legacy\n",
pci_irq_vector(pdev, i));
@@ -252,7 +258,10 @@ fail:
break;
}
- return false;
+ test->num_irqs = i;
+ pci_endpoint_test_release_irq(test);
+
+ return ret;
}
static const u32 bar_test_pattern[] = {
@@ -265,9 +274,9 @@ static const u32 bar_test_pattern[] = {
};
static int pci_endpoint_test_bar_memcmp(struct pci_endpoint_test *test,
- enum pci_barno barno, int offset,
- void *write_buf, void *read_buf,
- int size)
+ enum pci_barno barno,
+ resource_size_t offset, void *write_buf,
+ void *read_buf, int size)
{
memset(write_buf, bar_test_pattern[barno], size);
memcpy_toio(test->bar[barno] + offset, write_buf, size);
@@ -277,16 +286,17 @@ static int pci_endpoint_test_bar_memcmp(struct pci_endpoint_test *test,
return memcmp(write_buf, read_buf, size);
}
-static bool pci_endpoint_test_bar(struct pci_endpoint_test *test,
+static int pci_endpoint_test_bar(struct pci_endpoint_test *test,
enum pci_barno barno)
{
- int j, bar_size, buf_size, iters, remain;
+ resource_size_t bar_size, offset = 0;
void *write_buf __free(kfree) = NULL;
void *read_buf __free(kfree) = NULL;
struct pci_dev *pdev = test->pdev;
+ int buf_size;
if (!test->bar[barno])
- return false;
+ return -ENOMEM;
bar_size = pci_resource_len(pdev, barno);
@@ -301,28 +311,106 @@ static bool pci_endpoint_test_bar(struct pci_endpoint_test *test,
write_buf = kmalloc(buf_size, GFP_KERNEL);
if (!write_buf)
- return false;
+ return -ENOMEM;
read_buf = kmalloc(buf_size, GFP_KERNEL);
if (!read_buf)
- return false;
+ return -ENOMEM;
+
+ while (offset < bar_size) {
+ if (pci_endpoint_test_bar_memcmp(test, barno, offset, write_buf,
+ read_buf, buf_size))
+ return -EIO;
+ offset += buf_size;
+ }
+
+ return 0;
+}
+
+static u32 bar_test_pattern_with_offset(enum pci_barno barno, int offset)
+{
+ u32 val;
+
+ /* Keep the BAR pattern in the top byte. */
+ val = bar_test_pattern[barno] & 0xff000000;
+ /* Store the (partial) offset in the remaining bytes. */
+ val |= offset & 0x00ffffff;
+
+ return val;
+}
+
+static void pci_endpoint_test_bars_write_bar(struct pci_endpoint_test *test,
+ enum pci_barno barno)
+{
+ struct pci_dev *pdev = test->pdev;
+ int j, size;
- iters = bar_size / buf_size;
- for (j = 0; j < iters; j++)
- if (pci_endpoint_test_bar_memcmp(test, barno, buf_size * j,
- write_buf, read_buf, buf_size))
- return false;
+ size = pci_resource_len(pdev, barno);
- remain = bar_size % buf_size;
- if (remain)
- if (pci_endpoint_test_bar_memcmp(test, barno, buf_size * iters,
- write_buf, read_buf, remain))
- return false;
+ if (barno == test->test_reg_bar)
+ size = 0x4;
- return true;
+ for (j = 0; j < size; j += 4)
+ writel_relaxed(bar_test_pattern_with_offset(barno, j),
+ test->bar[barno] + j);
}
-static bool pci_endpoint_test_intx_irq(struct pci_endpoint_test *test)
+static int pci_endpoint_test_bars_read_bar(struct pci_endpoint_test *test,
+ enum pci_barno barno)
+{
+ struct pci_dev *pdev = test->pdev;
+ struct device *dev = &pdev->dev;
+ int j, size;
+ u32 val;
+
+ size = pci_resource_len(pdev, barno);
+
+ if (barno == test->test_reg_bar)
+ size = 0x4;
+
+ for (j = 0; j < size; j += 4) {
+ u32 expected = bar_test_pattern_with_offset(barno, j);
+
+ val = readl_relaxed(test->bar[barno] + j);
+ if (val != expected) {
+ dev_err(dev,
+ "BAR%d incorrect data at offset: %#x, got: %#x expected: %#x\n",
+ barno, j, val, expected);
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+static int pci_endpoint_test_bars(struct pci_endpoint_test *test)
+{
+ enum pci_barno bar;
+ int ret;
+
+ /* Write all BARs in order (without reading). */
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
+ if (test->bar[bar])
+ pci_endpoint_test_bars_write_bar(test, bar);
+
+ /*
+ * Read all BARs in order (without writing).
+ * If there is an address translation issue on the EP, writing one BAR
+ * might have overwritten another BAR. Ensure that this is not the case.
+ * (Reading back the BAR directly after writing can not detect this.)
+ */
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
+ if (test->bar[bar]) {
+ ret = pci_endpoint_test_bars_read_bar(test, bar);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int pci_endpoint_test_intx_irq(struct pci_endpoint_test *test)
{
u32 val;
@@ -334,16 +422,17 @@ static bool pci_endpoint_test_intx_irq(struct pci_endpoint_test *test)
val = wait_for_completion_timeout(&test->irq_raised,
msecs_to_jiffies(1000));
if (!val)
- return false;
+ return -ETIMEDOUT;
- return true;
+ return 0;
}
-static bool pci_endpoint_test_msi_irq(struct pci_endpoint_test *test,
+static int pci_endpoint_test_msi_irq(struct pci_endpoint_test *test,
u16 msi_num, bool msix)
{
- u32 val;
struct pci_dev *pdev = test->pdev;
+ u32 val;
+ int ret;
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
msix ? IRQ_TYPE_MSIX : IRQ_TYPE_MSI);
@@ -354,9 +443,16 @@ static bool pci_endpoint_test_msi_irq(struct pci_endpoint_test *test,
val = wait_for_completion_timeout(&test->irq_raised,
msecs_to_jiffies(1000));
if (!val)
- return false;
+ return -ETIMEDOUT;
+
+ ret = pci_irq_vector(pdev, msi_num - 1);
+ if (ret < 0)
+ return ret;
- return pci_irq_vector(pdev, msi_num - 1) == test->last_irq;
+ if (ret != test->last_irq)
+ return -EIO;
+
+ return 0;
}
static int pci_endpoint_test_validate_xfer_params(struct device *dev,
@@ -375,11 +471,10 @@ static int pci_endpoint_test_validate_xfer_params(struct device *dev,
return 0;
}
-static bool pci_endpoint_test_copy(struct pci_endpoint_test *test,
+static int pci_endpoint_test_copy(struct pci_endpoint_test *test,
unsigned long arg)
{
struct pci_endpoint_test_xfer_param param;
- bool ret = false;
void *src_addr;
void *dst_addr;
u32 flags = 0;
@@ -398,17 +493,17 @@ static bool pci_endpoint_test_copy(struct pci_endpoint_test *test,
int irq_type = test->irq_type;
u32 src_crc32;
u32 dst_crc32;
- int err;
+ int ret;
- err = copy_from_user(&param, (void __user *)arg, sizeof(param));
- if (err) {
+ ret = copy_from_user(&param, (void __user *)arg, sizeof(param));
+ if (ret) {
dev_err(dev, "Failed to get transfer param\n");
- return false;
+ return -EFAULT;
}
- err = pci_endpoint_test_validate_xfer_params(dev, &param, alignment);
- if (err)
- return false;
+ ret = pci_endpoint_test_validate_xfer_params(dev, &param, alignment);
+ if (ret)
+ return ret;
size = param.size;
@@ -418,22 +513,21 @@ static bool pci_endpoint_test_copy(struct pci_endpoint_test *test,
if (irq_type < IRQ_TYPE_INTX || irq_type > IRQ_TYPE_MSIX) {
dev_err(dev, "Invalid IRQ type option\n");
- goto err;
+ return -EINVAL;
}
orig_src_addr = kzalloc(size + alignment, GFP_KERNEL);
if (!orig_src_addr) {
dev_err(dev, "Failed to allocate source buffer\n");
- ret = false;
- goto err;
+ return -ENOMEM;
}
get_random_bytes(orig_src_addr, size + alignment);
orig_src_phys_addr = dma_map_single(dev, orig_src_addr,
size + alignment, DMA_TO_DEVICE);
- if (dma_mapping_error(dev, orig_src_phys_addr)) {
+ ret = dma_mapping_error(dev, orig_src_phys_addr);
+ if (ret) {
dev_err(dev, "failed to map source buffer address\n");
- ret = false;
goto err_src_phys_addr;
}
@@ -457,15 +551,15 @@ static bool pci_endpoint_test_copy(struct pci_endpoint_test *test,
orig_dst_addr = kzalloc(size + alignment, GFP_KERNEL);
if (!orig_dst_addr) {
dev_err(dev, "Failed to allocate destination address\n");
- ret = false;
+ ret = -ENOMEM;
goto err_dst_addr;
}
orig_dst_phys_addr = dma_map_single(dev, orig_dst_addr,
size + alignment, DMA_FROM_DEVICE);
- if (dma_mapping_error(dev, orig_dst_phys_addr)) {
+ ret = dma_mapping_error(dev, orig_dst_phys_addr);
+ if (ret) {
dev_err(dev, "failed to map destination buffer address\n");
- ret = false;
goto err_dst_phys_addr;
}
@@ -498,8 +592,8 @@ static bool pci_endpoint_test_copy(struct pci_endpoint_test *test,
DMA_FROM_DEVICE);
dst_crc32 = crc32_le(~0, dst_addr, size);
- if (dst_crc32 == src_crc32)
- ret = true;
+ if (dst_crc32 != src_crc32)
+ ret = -EIO;
err_dst_phys_addr:
kfree(orig_dst_addr);
@@ -510,16 +604,13 @@ err_dst_addr:
err_src_phys_addr:
kfree(orig_src_addr);
-
-err:
return ret;
}
-static bool pci_endpoint_test_write(struct pci_endpoint_test *test,
+static int pci_endpoint_test_write(struct pci_endpoint_test *test,
unsigned long arg)
{
struct pci_endpoint_test_xfer_param param;
- bool ret = false;
u32 flags = 0;
bool use_dma;
u32 reg;
@@ -534,17 +625,17 @@ static bool pci_endpoint_test_write(struct pci_endpoint_test *test,
int irq_type = test->irq_type;
size_t size;
u32 crc32;
- int err;
+ int ret;
- err = copy_from_user(&param, (void __user *)arg, sizeof(param));
- if (err != 0) {
+ ret = copy_from_user(&param, (void __user *)arg, sizeof(param));
+ if (ret) {
dev_err(dev, "Failed to get transfer param\n");
- return false;
+ return -EFAULT;
}
- err = pci_endpoint_test_validate_xfer_params(dev, &param, alignment);
- if (err)
- return false;
+ ret = pci_endpoint_test_validate_xfer_params(dev, &param, alignment);
+ if (ret)
+ return ret;
size = param.size;
@@ -554,23 +645,22 @@ static bool pci_endpoint_test_write(struct pci_endpoint_test *test,
if (irq_type < IRQ_TYPE_INTX || irq_type > IRQ_TYPE_MSIX) {
dev_err(dev, "Invalid IRQ type option\n");
- goto err;
+ return -EINVAL;
}
orig_addr = kzalloc(size + alignment, GFP_KERNEL);
if (!orig_addr) {
dev_err(dev, "Failed to allocate address\n");
- ret = false;
- goto err;
+ return -ENOMEM;
}
get_random_bytes(orig_addr, size + alignment);
orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
DMA_TO_DEVICE);
- if (dma_mapping_error(dev, orig_phys_addr)) {
+ ret = dma_mapping_error(dev, orig_phys_addr);
+ if (ret) {
dev_err(dev, "failed to map source buffer address\n");
- ret = false;
goto err_phys_addr;
}
@@ -603,24 +693,21 @@ static bool pci_endpoint_test_write(struct pci_endpoint_test *test,
wait_for_completion(&test->irq_raised);
reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
- if (reg & STATUS_READ_SUCCESS)
- ret = true;
+ if (!(reg & STATUS_READ_SUCCESS))
+ ret = -EIO;
dma_unmap_single(dev, orig_phys_addr, size + alignment,
DMA_TO_DEVICE);
err_phys_addr:
kfree(orig_addr);
-
-err:
return ret;
}
-static bool pci_endpoint_test_read(struct pci_endpoint_test *test,
+static int pci_endpoint_test_read(struct pci_endpoint_test *test,
unsigned long arg)
{
struct pci_endpoint_test_xfer_param param;
- bool ret = false;
u32 flags = 0;
bool use_dma;
size_t size;
@@ -634,17 +721,17 @@ static bool pci_endpoint_test_read(struct pci_endpoint_test *test,
size_t alignment = test->alignment;
int irq_type = test->irq_type;
u32 crc32;
- int err;
+ int ret;
- err = copy_from_user(&param, (void __user *)arg, sizeof(param));
- if (err) {
+ ret = copy_from_user(&param, (void __user *)arg, sizeof(param));
+ if (ret) {
dev_err(dev, "Failed to get transfer param\n");
- return false;
+ return -EFAULT;
}
- err = pci_endpoint_test_validate_xfer_params(dev, &param, alignment);
- if (err)
- return false;
+ ret = pci_endpoint_test_validate_xfer_params(dev, &param, alignment);
+ if (ret)
+ return ret;
size = param.size;
@@ -654,21 +741,20 @@ static bool pci_endpoint_test_read(struct pci_endpoint_test *test,
if (irq_type < IRQ_TYPE_INTX || irq_type > IRQ_TYPE_MSIX) {
dev_err(dev, "Invalid IRQ type option\n");
- goto err;
+ return -EINVAL;
}
orig_addr = kzalloc(size + alignment, GFP_KERNEL);
if (!orig_addr) {
dev_err(dev, "Failed to allocate destination address\n");
- ret = false;
- goto err;
+ return -ENOMEM;
}
orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
DMA_FROM_DEVICE);
- if (dma_mapping_error(dev, orig_phys_addr)) {
+ ret = dma_mapping_error(dev, orig_phys_addr);
+ if (ret) {
dev_err(dev, "failed to map source buffer address\n");
- ret = false;
goto err_phys_addr;
}
@@ -700,50 +786,52 @@ static bool pci_endpoint_test_read(struct pci_endpoint_test *test,
DMA_FROM_DEVICE);
crc32 = crc32_le(~0, addr, size);
- if (crc32 == pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_CHECKSUM))
- ret = true;
+ if (crc32 != pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_CHECKSUM))
+ ret = -EIO;
err_phys_addr:
kfree(orig_addr);
-err:
return ret;
}
-static bool pci_endpoint_test_clear_irq(struct pci_endpoint_test *test)
+static int pci_endpoint_test_clear_irq(struct pci_endpoint_test *test)
{
pci_endpoint_test_release_irq(test);
pci_endpoint_test_free_irq_vectors(test);
- return true;
+
+ return 0;
}
-static bool pci_endpoint_test_set_irq(struct pci_endpoint_test *test,
+static int pci_endpoint_test_set_irq(struct pci_endpoint_test *test,
int req_irq_type)
{
struct pci_dev *pdev = test->pdev;
struct device *dev = &pdev->dev;
+ int ret;
if (req_irq_type < IRQ_TYPE_INTX || req_irq_type > IRQ_TYPE_MSIX) {
dev_err(dev, "Invalid IRQ type option\n");
- return false;
+ return -EINVAL;
}
if (test->irq_type == req_irq_type)
- return true;
+ return 0;
pci_endpoint_test_release_irq(test);
pci_endpoint_test_free_irq_vectors(test);
- if (!pci_endpoint_test_alloc_irq_vectors(test, req_irq_type))
- goto err;
-
- if (!pci_endpoint_test_request_irq(test))
- goto err;
+ ret = pci_endpoint_test_alloc_irq_vectors(test, req_irq_type);
+ if (ret)
+ return ret;
- return true;
+ ret = pci_endpoint_test_request_irq(test);
+ if (ret) {
+ pci_endpoint_test_free_irq_vectors(test);
+ return ret;
+ }
-err:
- pci_endpoint_test_free_irq_vectors(test);
- return false;
+ irq_type = test->irq_type;
+ return 0;
}
static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
@@ -768,6 +856,9 @@ static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
goto ret;
ret = pci_endpoint_test_bar(test, bar);
break;
+ case PCITEST_BARS:
+ ret = pci_endpoint_test_bars(test);
+ break;
case PCITEST_INTX_IRQ:
ret = pci_endpoint_test_intx_irq(test);
break;
@@ -805,10 +896,24 @@ static const struct file_operations pci_endpoint_test_fops = {
.unlocked_ioctl = pci_endpoint_test_ioctl,
};
+static void pci_endpoint_test_get_capabilities(struct pci_endpoint_test *test)
+{
+ struct pci_dev *pdev = test->pdev;
+ struct device *dev = &pdev->dev;
+ u32 caps;
+
+ caps = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_CAPS);
+ dev_dbg(dev, "PCI_ENDPOINT_TEST_CAPS: %#x\n", caps);
+
+ /* CAP_UNALIGNED_ACCESS is set if the EP can do unaligned access */
+ if (caps & CAP_UNALIGNED_ACCESS)
+ test->alignment = 0;
+}
+
static int pci_endpoint_test_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
- int err;
+ int ret;
int id;
char name[24];
enum pci_barno bar;
@@ -847,24 +952,23 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
- err = pci_enable_device(pdev);
- if (err) {
+ ret = pci_enable_device(pdev);
+ if (ret) {
dev_err(dev, "Cannot enable PCI device\n");
- return err;
+ return ret;
}
- err = pci_request_regions(pdev, DRV_MODULE_NAME);
- if (err) {
+ ret = pci_request_regions(pdev, DRV_MODULE_NAME);
+ if (ret) {
dev_err(dev, "Cannot obtain PCI resources\n");
goto err_disable_pdev;
}
pci_set_master(pdev);
- if (!pci_endpoint_test_alloc_irq_vectors(test, irq_type)) {
- err = -EINVAL;
+ ret = pci_endpoint_test_alloc_irq_vectors(test, irq_type);
+ if (ret)
goto err_disable_irq;
- }
for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
@@ -879,7 +983,7 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
test->base = test->bar[test_reg_bar];
if (!test->base) {
- err = -ENOMEM;
+ ret = -ENOMEM;
dev_err(dev, "Cannot perform PCI test without BAR%d\n",
test_reg_bar);
goto err_iounmap;
@@ -889,7 +993,7 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
id = ida_alloc(&pci_endpoint_test_ida, GFP_KERNEL);
if (id < 0) {
- err = id;
+ ret = id;
dev_err(dev, "Unable to get id\n");
goto err_iounmap;
}
@@ -897,27 +1001,28 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
snprintf(name, sizeof(name), DRV_MODULE_NAME ".%d", id);
test->name = kstrdup(name, GFP_KERNEL);
if (!test->name) {
- err = -ENOMEM;
+ ret = -ENOMEM;
goto err_ida_remove;
}
- if (!pci_endpoint_test_request_irq(test)) {
- err = -EINVAL;
+ ret = pci_endpoint_test_request_irq(test);
+ if (ret)
goto err_kfree_test_name;
- }
+
+ pci_endpoint_test_get_capabilities(test);
misc_device = &test->miscdev;
misc_device->minor = MISC_DYNAMIC_MINOR;
misc_device->name = kstrdup(name, GFP_KERNEL);
if (!misc_device->name) {
- err = -ENOMEM;
+ ret = -ENOMEM;
goto err_release_irq;
}
misc_device->parent = &pdev->dev;
misc_device->fops = &pci_endpoint_test_fops;
- err = misc_register(misc_device);
- if (err) {
+ ret = misc_register(misc_device);
+ if (ret) {
dev_err(dev, "Failed to register device\n");
goto err_kfree_name;
}
@@ -949,7 +1054,7 @@ err_disable_irq:
err_disable_pdev:
pci_disable_device(pdev);
- return err;
+ return ret;
}
static void pci_endpoint_test_remove(struct pci_dev *pdev)
diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
index 61b66e318488..7a3c34306de9 100644
--- a/drivers/misc/sgi-xp/xpc_main.c
+++ b/drivers/misc/sgi-xp/xpc_main.c
@@ -93,7 +93,7 @@ int xpc_disengage_timelimit = XPC_DISENGAGE_DEFAULT_TIMELIMIT;
static int xpc_disengage_min_timelimit; /* = 0 */
static int xpc_disengage_max_timelimit = 120;
-static struct ctl_table xpc_sys_xpc_hb[] = {
+static const struct ctl_table xpc_sys_xpc_hb[] = {
{
.procname = "hb_interval",
.data = &xpc_hb_interval,
@@ -111,7 +111,7 @@ static struct ctl_table xpc_sys_xpc_hb[] = {
.extra1 = &xpc_hb_check_min_interval,
.extra2 = &xpc_hb_check_max_interval},
};
-static struct ctl_table xpc_sys_xpc[] = {
+static const struct ctl_table xpc_sys_xpc[] = {
{
.procname = "disengage_timelimit",
.data = &xpc_disengage_timelimit,
diff --git a/drivers/misc/sram.c b/drivers/misc/sram.c
index e40b027a88e2..e5069882457e 100644
--- a/drivers/misc/sram.c
+++ b/drivers/misc/sram.c
@@ -23,7 +23,7 @@
#define SRAM_GRANULARITY 32
static ssize_t sram_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t pos, size_t count)
{
struct sram_partition *part;
@@ -38,7 +38,7 @@ static ssize_t sram_read(struct file *filp, struct kobject *kobj,
}
static ssize_t sram_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t pos, size_t count)
{
struct sram_partition *part;
@@ -83,8 +83,8 @@ static int sram_add_export(struct sram_dev *sram, struct sram_reserve *block,
return -ENOMEM;
part->battr.attr.mode = S_IRUSR | S_IWUSR;
- part->battr.read = sram_read;
- part->battr.write = sram_write;
+ part->battr.read_new = sram_read;
+ part->battr.write_new = sram_write;
part->battr.size = block->size;
return device_create_bin_file(sram->dev, &part->battr);