diff options
Diffstat (limited to 'drivers/iio/industrialio-buffer.c')
-rw-r--r-- | drivers/iio/industrialio-buffer.c | 149 |
1 files changed, 90 insertions, 59 deletions
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c index 4ada5592aa2b..a7d7e5143ed2 100644 --- a/drivers/iio/industrialio-buffer.c +++ b/drivers/iio/industrialio-buffer.c @@ -19,7 +19,9 @@ #include <linux/sched/signal.h> #include <linux/iio/iio.h> +#include <linux/iio/iio-opaque.h> #include "iio_core.h" +#include "iio_core_trigger.h" #include <linux/iio/sysfs.h> #include <linux/iio/buffer.h> #include <linux/iio/buffer_impl.h> @@ -189,10 +191,12 @@ __poll_t iio_buffer_poll(struct file *filp, */ void iio_buffer_wakeup_poll(struct iio_dev *indio_dev) { - if (!indio_dev->buffer) + struct iio_buffer *buffer = indio_dev->buffer; + + if (!buffer) return; - wake_up(&indio_dev->buffer->pollq); + wake_up(&buffer->pollq); } void iio_buffer_init(struct iio_buffer *buffer) @@ -262,10 +266,11 @@ static ssize_t iio_scan_el_show(struct device *dev, { int ret; struct iio_dev *indio_dev = dev_to_iio_dev(dev); + struct iio_buffer *buffer = indio_dev->buffer; /* Ensure ret is 0 or 1. */ ret = !!test_bit(to_iio_dev_attr(attr)->address, - indio_dev->buffer->scan_mask); + buffer->scan_mask); return sprintf(buf, "%d\n", ret); } @@ -316,8 +321,7 @@ static int iio_scan_mask_set(struct iio_dev *indio_dev, const unsigned long *mask; unsigned long *trialmask; - trialmask = kcalloc(BITS_TO_LONGS(indio_dev->masklength), - sizeof(*trialmask), GFP_KERNEL); + trialmask = bitmap_zalloc(indio_dev->masklength, GFP_KERNEL); if (trialmask == NULL) return -ENOMEM; if (!indio_dev->masklength) { @@ -382,7 +386,7 @@ static ssize_t iio_scan_el_store(struct device *dev, if (ret < 0) return ret; mutex_lock(&indio_dev->mlock); - if (iio_buffer_is_active(indio_dev->buffer)) { + if (iio_buffer_is_active(buffer)) { ret = -EBUSY; goto error_ret; } @@ -411,7 +415,9 @@ static ssize_t iio_scan_el_ts_show(struct device *dev, char *buf) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); - return sprintf(buf, "%d\n", indio_dev->buffer->scan_timestamp); + struct iio_buffer *buffer = indio_dev->buffer; + + return sprintf(buf, "%d\n", buffer->scan_timestamp); } static ssize_t iio_scan_el_ts_store(struct device *dev, @@ -421,6 +427,7 @@ static ssize_t iio_scan_el_ts_store(struct device *dev, { int ret; struct iio_dev *indio_dev = dev_to_iio_dev(dev); + struct iio_buffer *buffer = indio_dev->buffer; bool state; ret = strtobool(buf, &state); @@ -428,11 +435,11 @@ static ssize_t iio_scan_el_ts_store(struct device *dev, return ret; mutex_lock(&indio_dev->mlock); - if (iio_buffer_is_active(indio_dev->buffer)) { + if (iio_buffer_is_active(buffer)) { ret = -EBUSY; goto error_ret; } - indio_dev->buffer->scan_timestamp = state; + buffer->scan_timestamp = state; error_ret: mutex_unlock(&indio_dev->mlock); @@ -440,10 +447,10 @@ error_ret: } static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev, + struct iio_buffer *buffer, const struct iio_chan_spec *chan) { int ret, attrcount = 0; - struct iio_buffer *buffer = indio_dev->buffer; ret = __iio_add_chan_devattr("index", chan, @@ -519,7 +526,7 @@ static ssize_t iio_buffer_write_length(struct device *dev, return len; mutex_lock(&indio_dev->mlock); - if (iio_buffer_is_active(indio_dev->buffer)) { + if (iio_buffer_is_active(buffer)) { ret = -EBUSY; } else { buffer->access->set_length(buffer, val); @@ -540,7 +547,9 @@ static ssize_t iio_buffer_show_enable(struct device *dev, char *buf) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); - return sprintf(buf, "%d\n", iio_buffer_is_active(indio_dev->buffer)); + struct iio_buffer *buffer = indio_dev->buffer; + + return sprintf(buf, "%d\n", iio_buffer_is_active(buffer)); } static unsigned int iio_storage_bytes_for_si(struct iio_dev *indio_dev, @@ -591,8 +600,10 @@ static int iio_compute_scan_bytes(struct iio_dev *indio_dev, static void iio_buffer_activate(struct iio_dev *indio_dev, struct iio_buffer *buffer) { + struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); + iio_buffer_get(buffer); - list_add(&buffer->buffer_list, &indio_dev->buffer_list); + list_add(&buffer->buffer_list, &iio_dev_opaque->buffer_list); } static void iio_buffer_deactivate(struct iio_buffer *buffer) @@ -604,10 +615,11 @@ static void iio_buffer_deactivate(struct iio_buffer *buffer) static void iio_buffer_deactivate_all(struct iio_dev *indio_dev) { + struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); struct iio_buffer *buffer, *_buffer; list_for_each_entry_safe(buffer, _buffer, - &indio_dev->buffer_list, buffer_list) + &iio_dev_opaque->buffer_list, buffer_list) iio_buffer_deactivate(buffer); } @@ -680,6 +692,7 @@ static int iio_verify_update(struct iio_dev *indio_dev, struct iio_buffer *insert_buffer, struct iio_buffer *remove_buffer, struct iio_device_config *config) { + struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); unsigned long *compound_mask; const unsigned long *scan_mask; bool strict_scanmask = false; @@ -687,6 +700,13 @@ static int iio_verify_update(struct iio_dev *indio_dev, bool scan_timestamp; unsigned int modes; + if (insert_buffer && + bitmap_empty(insert_buffer->scan_mask, indio_dev->masklength)) { + dev_dbg(&indio_dev->dev, + "At least one scan element must be enabled first\n"); + return -EINVAL; + } + memset(config, 0, sizeof(*config)); config->watermark = ~0; @@ -695,12 +715,12 @@ static int iio_verify_update(struct iio_dev *indio_dev, * to verify. */ if (remove_buffer && !insert_buffer && - list_is_singular(&indio_dev->buffer_list)) + list_is_singular(&iio_dev_opaque->buffer_list)) return 0; modes = indio_dev->modes; - list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) { + list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) { if (buffer == remove_buffer) continue; modes &= buffer->access->modes; @@ -721,7 +741,7 @@ static int iio_verify_update(struct iio_dev *indio_dev, * Keep things simple for now and only allow a single buffer to * be connected in hardware mode. */ - if (insert_buffer && !list_empty(&indio_dev->buffer_list)) + if (insert_buffer && !list_empty(&iio_dev_opaque->buffer_list)) return -EINVAL; config->mode = INDIO_BUFFER_HARDWARE; strict_scanmask = true; @@ -741,7 +761,7 @@ static int iio_verify_update(struct iio_dev *indio_dev, scan_timestamp = false; - list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) { + list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) { if (buffer == remove_buffer) continue; bitmap_or(compound_mask, compound_mask, buffer->scan_mask, @@ -887,10 +907,11 @@ error_clear_mux_table: static int iio_update_demux(struct iio_dev *indio_dev) { + struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); struct iio_buffer *buffer; int ret; - list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) { + list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) { ret = iio_buffer_update_demux(indio_dev, buffer); if (ret < 0) goto error_clear_mux_table; @@ -898,7 +919,7 @@ static int iio_update_demux(struct iio_dev *indio_dev) return 0; error_clear_mux_table: - list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) + list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) iio_buffer_demux_free(buffer); return ret; @@ -907,12 +928,14 @@ error_clear_mux_table: static int iio_enable_buffers(struct iio_dev *indio_dev, struct iio_device_config *config) { + struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); struct iio_buffer *buffer; int ret; indio_dev->active_scan_mask = config->scan_mask; indio_dev->scan_timestamp = config->scan_timestamp; indio_dev->scan_bytes = config->scan_bytes; + indio_dev->currentmode = config->mode; iio_update_demux(indio_dev); @@ -942,34 +965,44 @@ static int iio_enable_buffers(struct iio_dev *indio_dev, indio_dev->info->hwfifo_set_watermark(indio_dev, config->watermark); - list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) { + list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) { ret = iio_buffer_enable(buffer, indio_dev); if (ret) goto err_disable_buffers; } - indio_dev->currentmode = config->mode; + if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) { + ret = iio_trigger_attach_poll_func(indio_dev->trig, + indio_dev->pollfunc); + if (ret) + goto err_disable_buffers; + } if (indio_dev->setup_ops->postenable) { ret = indio_dev->setup_ops->postenable(indio_dev); if (ret) { dev_dbg(&indio_dev->dev, "Buffer not started: postenable failed (%d)\n", ret); - goto err_disable_buffers; + goto err_detach_pollfunc; } } return 0; +err_detach_pollfunc: + if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) { + iio_trigger_detach_poll_func(indio_dev->trig, + indio_dev->pollfunc); + } err_disable_buffers: - list_for_each_entry_continue_reverse(buffer, &indio_dev->buffer_list, + list_for_each_entry_continue_reverse(buffer, &iio_dev_opaque->buffer_list, buffer_list) iio_buffer_disable(buffer, indio_dev); err_run_postdisable: - indio_dev->currentmode = INDIO_DIRECT_MODE; if (indio_dev->setup_ops->postdisable) indio_dev->setup_ops->postdisable(indio_dev); err_undo_config: + indio_dev->currentmode = INDIO_DIRECT_MODE; indio_dev->active_scan_mask = NULL; return ret; @@ -977,12 +1010,13 @@ err_undo_config: static int iio_disable_buffers(struct iio_dev *indio_dev) { + struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); struct iio_buffer *buffer; int ret = 0; int ret2; /* Wind down existing buffers - iff there are any */ - if (list_empty(&indio_dev->buffer_list)) + if (list_empty(&iio_dev_opaque->buffer_list)) return 0; /* @@ -998,14 +1032,17 @@ static int iio_disable_buffers(struct iio_dev *indio_dev) ret = ret2; } - list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) { + if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) { + iio_trigger_detach_poll_func(indio_dev->trig, + indio_dev->pollfunc); + } + + list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) { ret2 = iio_buffer_disable(buffer, indio_dev); if (ret2 && !ret) ret = ret2; } - indio_dev->currentmode = INDIO_DIRECT_MODE; - if (indio_dev->setup_ops->postdisable) { ret2 = indio_dev->setup_ops->postdisable(indio_dev); if (ret2 && !ret) @@ -1014,6 +1051,7 @@ static int iio_disable_buffers(struct iio_dev *indio_dev) iio_free_scan_mask(indio_dev, indio_dev->active_scan_mask); indio_dev->active_scan_mask = NULL; + indio_dev->currentmode = INDIO_DIRECT_MODE; return ret; } @@ -1022,6 +1060,7 @@ static int __iio_update_buffers(struct iio_dev *indio_dev, struct iio_buffer *insert_buffer, struct iio_buffer *remove_buffer) { + struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); struct iio_device_config new_config; int ret; @@ -1046,7 +1085,7 @@ static int __iio_update_buffers(struct iio_dev *indio_dev, iio_buffer_activate(indio_dev, insert_buffer); /* If no buffers in list, we are done */ - if (list_empty(&indio_dev->buffer_list)) + if (list_empty(&iio_dev_opaque->buffer_list)) return 0; ret = iio_enable_buffers(indio_dev, &new_config); @@ -1123,6 +1162,7 @@ static ssize_t iio_buffer_store_enable(struct device *dev, int ret; bool requested_state; struct iio_dev *indio_dev = dev_to_iio_dev(dev); + struct iio_buffer *buffer = indio_dev->buffer; bool inlist; ret = strtobool(buf, &requested_state); @@ -1132,17 +1172,15 @@ static ssize_t iio_buffer_store_enable(struct device *dev, mutex_lock(&indio_dev->mlock); /* Find out if it is in the list */ - inlist = iio_buffer_is_active(indio_dev->buffer); + inlist = iio_buffer_is_active(buffer); /* Already in desired state */ if (inlist == requested_state) goto done; if (requested_state) - ret = __iio_update_buffers(indio_dev, - indio_dev->buffer, NULL); + ret = __iio_update_buffers(indio_dev, buffer, NULL); else - ret = __iio_update_buffers(indio_dev, - NULL, indio_dev->buffer); + ret = __iio_update_buffers(indio_dev, NULL, buffer); done: mutex_unlock(&indio_dev->mlock); @@ -1184,7 +1222,7 @@ static ssize_t iio_buffer_store_watermark(struct device *dev, goto out; } - if (iio_buffer_is_active(indio_dev->buffer)) { + if (iio_buffer_is_active(buffer)) { ret = -EBUSY; goto out; } @@ -1201,11 +1239,9 @@ static ssize_t iio_dma_show_data_available(struct device *dev, char *buf) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); - size_t bytes; - - bytes = iio_buffer_data_available(indio_dev->buffer); + struct iio_buffer *buffer = indio_dev->buffer; - return sprintf(buf, "%zu\n", bytes); + return sprintf(buf, "%zu\n", iio_buffer_data_available(buffer)); } static DEVICE_ATTR(length, S_IRUGO | S_IWUSR, iio_buffer_read_length, @@ -1233,7 +1269,7 @@ int iio_buffer_alloc_sysfs_and_mask(struct iio_dev *indio_dev) struct iio_dev_attr *p; struct attribute **attr; struct iio_buffer *buffer = indio_dev->buffer; - int ret, i, attrn, attrcount, attrcount_orig = 0; + int ret, i, attrn, attrcount; const struct iio_chan_spec *channels; channels = indio_dev->channels; @@ -1277,12 +1313,7 @@ int iio_buffer_alloc_sysfs_and_mask(struct iio_dev *indio_dev) indio_dev->groups[indio_dev->groupcounter++] = &buffer->buffer_group; - if (buffer->scan_el_attrs != NULL) { - attr = buffer->scan_el_attrs->attrs; - while (*attr++ != NULL) - attrcount_orig++; - } - attrcount = attrcount_orig; + attrcount = 0; INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list); channels = indio_dev->channels; if (channels) { @@ -1291,7 +1322,7 @@ int iio_buffer_alloc_sysfs_and_mask(struct iio_dev *indio_dev) if (channels[i].scan_index < 0) continue; - ret = iio_buffer_add_channel_sysfs(indio_dev, + ret = iio_buffer_add_channel_sysfs(indio_dev, buffer, &channels[i]); if (ret < 0) goto error_cleanup_dynamic; @@ -1319,10 +1350,7 @@ int iio_buffer_alloc_sysfs_and_mask(struct iio_dev *indio_dev) ret = -ENOMEM; goto error_free_scan_mask; } - if (buffer->scan_el_attrs) - memcpy(buffer->scan_el_group.attrs, buffer->scan_el_attrs, - sizeof(buffer->scan_el_group.attrs[0])*attrcount_orig); - attrn = attrcount_orig; + attrn = 0; list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l) buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr; @@ -1334,20 +1362,22 @@ error_free_scan_mask: bitmap_free(buffer->scan_mask); error_cleanup_dynamic: iio_free_chan_devattr_list(&buffer->scan_el_dev_attr_list); - kfree(indio_dev->buffer->buffer_group.attrs); + kfree(buffer->buffer_group.attrs); return ret; } void iio_buffer_free_sysfs_and_mask(struct iio_dev *indio_dev) { - if (!indio_dev->buffer) + struct iio_buffer *buffer = indio_dev->buffer; + + if (!buffer) return; - bitmap_free(indio_dev->buffer->scan_mask); - kfree(indio_dev->buffer->buffer_group.attrs); - kfree(indio_dev->buffer->scan_el_group.attrs); - iio_free_chan_devattr_list(&indio_dev->buffer->scan_el_dev_attr_list); + bitmap_free(buffer->scan_mask); + kfree(buffer->buffer_group.attrs); + kfree(buffer->scan_el_group.attrs); + iio_free_chan_devattr_list(&buffer->scan_el_dev_attr_list); } /** @@ -1404,10 +1434,11 @@ static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data) */ int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data) { + struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); int ret; struct iio_buffer *buf; - list_for_each_entry(buf, &indio_dev->buffer_list, buffer_list) { + list_for_each_entry(buf, &iio_dev_opaque->buffer_list, buffer_list) { ret = iio_push_to_buffer(buf, data); if (ret < 0) return ret; |