summaryrefslogtreecommitdiff
path: root/drivers/iio/industrialio-buffer.c
diff options
context:
space:
mode:
authorNuno Sá <nuno.sa@analog.com>2022-10-12 18:16:20 +0300
committerJonathan Cameron <Jonathan.Cameron@huawei.com>2022-11-23 22:44:00 +0300
commit16afe125b53f88b855d2713c8ba253d905dcf3cc (patch)
tree4f62505de7510d5194d1fd9c249ccae56db18005 /drivers/iio/industrialio-buffer.c
parent6b701cda3632c9cffaea6f79c5fe638800c8f7f1 (diff)
downloadlinux-16afe125b53f88b855d2713c8ba253d905dcf3cc.tar.xz
iio: core: move 'mlock' to 'struct iio_dev_opaque'
Now that there are no more users accessing 'mlock' directly, we can move it to the iio_dev private structure. Hence, it's now explicit that new driver's should not directly use this lock. Signed-off-by: Nuno Sá <nuno.sa@analog.com> Reviewed-by: Andy Shevchenko <andy.shevchenko@gmail.com> Link: https://lore.kernel.org/r/20221012151620.1725215-5-nuno.sa@analog.com Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Diffstat (limited to 'drivers/iio/industrialio-buffer.c')
-rw-r--r--drivers/iio/industrialio-buffer.c29
1 files changed, 17 insertions, 12 deletions
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index 228598b82a2f..9cd7db549fcb 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -507,13 +507,14 @@ static ssize_t iio_scan_el_store(struct device *dev,
int ret;
bool state;
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
struct iio_buffer *buffer = this_attr->buffer;
ret = kstrtobool(buf, &state);
if (ret < 0)
return ret;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&iio_dev_opaque->mlock);
if (iio_buffer_is_active(buffer)) {
ret = -EBUSY;
goto error_ret;
@@ -532,7 +533,7 @@ static ssize_t iio_scan_el_store(struct device *dev,
}
error_ret:
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&iio_dev_opaque->mlock);
return ret < 0 ? ret : len;
@@ -554,6 +555,7 @@ static ssize_t iio_scan_el_ts_store(struct device *dev,
{
int ret;
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
bool state;
@@ -561,14 +563,14 @@ static ssize_t iio_scan_el_ts_store(struct device *dev,
if (ret < 0)
return ret;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&iio_dev_opaque->mlock);
if (iio_buffer_is_active(buffer)) {
ret = -EBUSY;
goto error_ret;
}
buffer->scan_timestamp = state;
error_ret:
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&iio_dev_opaque->mlock);
return ret ? ret : len;
}
@@ -642,6 +644,7 @@ static ssize_t length_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
unsigned int val;
int ret;
@@ -653,7 +656,7 @@ static ssize_t length_store(struct device *dev, struct device_attribute *attr,
if (val == buffer->length)
return len;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&iio_dev_opaque->mlock);
if (iio_buffer_is_active(buffer)) {
ret = -EBUSY;
} else {
@@ -665,7 +668,7 @@ static ssize_t length_store(struct device *dev, struct device_attribute *attr,
if (buffer->length && buffer->length < buffer->watermark)
buffer->watermark = buffer->length;
out:
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&iio_dev_opaque->mlock);
return ret ? ret : len;
}
@@ -1256,7 +1259,7 @@ int iio_update_buffers(struct iio_dev *indio_dev,
return -EINVAL;
mutex_lock(&iio_dev_opaque->info_exist_lock);
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&iio_dev_opaque->mlock);
if (insert_buffer && iio_buffer_is_active(insert_buffer))
insert_buffer = NULL;
@@ -1277,7 +1280,7 @@ int iio_update_buffers(struct iio_dev *indio_dev,
ret = __iio_update_buffers(indio_dev, insert_buffer, remove_buffer);
out_unlock:
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&iio_dev_opaque->mlock);
mutex_unlock(&iio_dev_opaque->info_exist_lock);
return ret;
@@ -1296,6 +1299,7 @@ static ssize_t enable_store(struct device *dev, struct device_attribute *attr,
int ret;
bool requested_state;
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
bool inlist;
@@ -1303,7 +1307,7 @@ static ssize_t enable_store(struct device *dev, struct device_attribute *attr,
if (ret < 0)
return ret;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&iio_dev_opaque->mlock);
/* Find out if it is in the list */
inlist = iio_buffer_is_active(buffer);
@@ -1317,7 +1321,7 @@ static ssize_t enable_store(struct device *dev, struct device_attribute *attr,
ret = __iio_update_buffers(indio_dev, NULL, buffer);
done:
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&iio_dev_opaque->mlock);
return (ret < 0) ? ret : len;
}
@@ -1334,6 +1338,7 @@ static ssize_t watermark_store(struct device *dev,
const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
unsigned int val;
int ret;
@@ -1344,7 +1349,7 @@ static ssize_t watermark_store(struct device *dev,
if (!val)
return -EINVAL;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&iio_dev_opaque->mlock);
if (val > buffer->length) {
ret = -EINVAL;
@@ -1358,7 +1363,7 @@ static ssize_t watermark_store(struct device *dev,
buffer->watermark = val;
out:
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&iio_dev_opaque->mlock);
return ret ? ret : len;
}