diff options
Diffstat (limited to 'drivers/iio/industrialio-buffer.c')
-rw-r--r-- | drivers/iio/industrialio-buffer.c | 410 |
1 files changed, 217 insertions, 193 deletions
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c index f971f79103ec..71333140d42c 100644 --- a/drivers/iio/industrialio-buffer.c +++ b/drivers/iio/industrialio-buffer.c @@ -178,6 +178,80 @@ static ssize_t iio_scan_el_show(struct device *dev, return sprintf(buf, "%d\n", ret); } +/* Note NULL used as error indicator as it doesn't make sense. */ +static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks, + unsigned int masklength, + const unsigned long *mask) +{ + if (bitmap_empty(mask, masklength)) + return NULL; + while (*av_masks) { + if (bitmap_subset(mask, av_masks, masklength)) + return av_masks; + av_masks += BITS_TO_LONGS(masklength); + } + return NULL; +} + +static bool iio_validate_scan_mask(struct iio_dev *indio_dev, + const unsigned long *mask) +{ + if (!indio_dev->setup_ops->validate_scan_mask) + return true; + + return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask); +} + +/** + * iio_scan_mask_set() - set particular bit in the scan mask + * @indio_dev: the iio device + * @buffer: the buffer whose scan mask we are interested in + * @bit: the bit to be set. + * + * Note that at this point we have no way of knowing what other + * buffers might request, hence this code only verifies that the + * individual buffers request is plausible. + */ +static int iio_scan_mask_set(struct iio_dev *indio_dev, + struct iio_buffer *buffer, int bit) +{ + const unsigned long *mask; + unsigned long *trialmask; + + trialmask = kmalloc(sizeof(*trialmask)* + BITS_TO_LONGS(indio_dev->masklength), + GFP_KERNEL); + + if (trialmask == NULL) + return -ENOMEM; + if (!indio_dev->masklength) { + WARN_ON("Trying to set scanmask prior to registering buffer\n"); + goto err_invalid_mask; + } + bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength); + set_bit(bit, trialmask); + + if (!iio_validate_scan_mask(indio_dev, trialmask)) + goto err_invalid_mask; + + if (indio_dev->available_scan_masks) { + mask = iio_scan_mask_match(indio_dev->available_scan_masks, + indio_dev->masklength, + trialmask); + if (!mask) + goto err_invalid_mask; + } + bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength); + + kfree(trialmask); + + return 0; + +err_invalid_mask: + kfree(trialmask); + return -EINVAL; +} + static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit) { clear_bit(bit, buffer->scan_mask); @@ -309,115 +383,19 @@ static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev, return ret; } -static const char * const iio_scan_elements_group_name = "scan_elements"; - -int iio_buffer_register(struct iio_dev *indio_dev, - const struct iio_chan_spec *channels, - int num_channels) -{ - struct iio_dev_attr *p; - struct attribute **attr; - struct iio_buffer *buffer = indio_dev->buffer; - int ret, i, attrn, attrcount, attrcount_orig = 0; - - if (buffer->attrs) - indio_dev->groups[indio_dev->groupcounter++] = buffer->attrs; - - if (buffer->scan_el_attrs != NULL) { - attr = buffer->scan_el_attrs->attrs; - while (*attr++ != NULL) - attrcount_orig++; - } - attrcount = attrcount_orig; - INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list); - if (channels) { - /* new magic */ - for (i = 0; i < num_channels; i++) { - if (channels[i].scan_index < 0) - continue; - - /* Establish necessary mask length */ - if (channels[i].scan_index > - (int)indio_dev->masklength - 1) - indio_dev->masklength - = channels[i].scan_index + 1; - - ret = iio_buffer_add_channel_sysfs(indio_dev, - &channels[i]); - if (ret < 0) - goto error_cleanup_dynamic; - attrcount += ret; - if (channels[i].type == IIO_TIMESTAMP) - indio_dev->scan_index_timestamp = - channels[i].scan_index; - } - if (indio_dev->masklength && buffer->scan_mask == NULL) { - buffer->scan_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength), - sizeof(*buffer->scan_mask), - GFP_KERNEL); - if (buffer->scan_mask == NULL) { - ret = -ENOMEM; - goto error_cleanup_dynamic; - } - } - } - - buffer->scan_el_group.name = iio_scan_elements_group_name; - - buffer->scan_el_group.attrs = kcalloc(attrcount + 1, - sizeof(buffer->scan_el_group.attrs[0]), - GFP_KERNEL); - if (buffer->scan_el_group.attrs == NULL) { - ret = -ENOMEM; - goto error_free_scan_mask; - } - if (buffer->scan_el_attrs) - memcpy(buffer->scan_el_group.attrs, buffer->scan_el_attrs, - sizeof(buffer->scan_el_group.attrs[0])*attrcount_orig); - attrn = attrcount_orig; - - list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l) - buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr; - indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group; - - return 0; - -error_free_scan_mask: - kfree(buffer->scan_mask); -error_cleanup_dynamic: - iio_free_chan_devattr_list(&buffer->scan_el_dev_attr_list); - - return ret; -} -EXPORT_SYMBOL(iio_buffer_register); - -void iio_buffer_unregister(struct iio_dev *indio_dev) -{ - kfree(indio_dev->buffer->scan_mask); - kfree(indio_dev->buffer->scan_el_group.attrs); - iio_free_chan_devattr_list(&indio_dev->buffer->scan_el_dev_attr_list); -} -EXPORT_SYMBOL(iio_buffer_unregister); - -ssize_t iio_buffer_read_length(struct device *dev, - struct device_attribute *attr, - char *buf) +static ssize_t iio_buffer_read_length(struct device *dev, + struct device_attribute *attr, + char *buf) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct iio_buffer *buffer = indio_dev->buffer; - if (buffer->access->get_length) - return sprintf(buf, "%d\n", - buffer->access->get_length(buffer)); - - return 0; + return sprintf(buf, "%d\n", buffer->length); } -EXPORT_SYMBOL(iio_buffer_read_length); -ssize_t iio_buffer_write_length(struct device *dev, - struct device_attribute *attr, - const char *buf, - size_t len) +static ssize_t iio_buffer_write_length(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct iio_buffer *buffer = indio_dev->buffer; @@ -428,47 +406,28 @@ ssize_t iio_buffer_write_length(struct device *dev, if (ret) return ret; - if (buffer->access->get_length) - if (val == buffer->access->get_length(buffer)) - return len; + if (val == buffer->length) + return len; mutex_lock(&indio_dev->mlock); if (iio_buffer_is_active(indio_dev->buffer)) { ret = -EBUSY; } else { - if (buffer->access->set_length) - buffer->access->set_length(buffer, val); + buffer->access->set_length(buffer, val); ret = 0; } mutex_unlock(&indio_dev->mlock); return ret ? ret : len; } -EXPORT_SYMBOL(iio_buffer_write_length); -ssize_t iio_buffer_show_enable(struct device *dev, - struct device_attribute *attr, - char *buf) +static ssize_t iio_buffer_show_enable(struct device *dev, + struct device_attribute *attr, + char *buf) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); return sprintf(buf, "%d\n", iio_buffer_is_active(indio_dev->buffer)); } -EXPORT_SYMBOL(iio_buffer_show_enable); - -/* Note NULL used as error indicator as it doesn't make sense. */ -static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks, - unsigned int masklength, - const unsigned long *mask) -{ - if (bitmap_empty(mask, masklength)) - return NULL; - while (*av_masks) { - if (bitmap_subset(mask, av_masks, masklength)) - return av_masks; - av_masks += BITS_TO_LONGS(masklength); - } - return NULL; -} static int iio_compute_scan_bytes(struct iio_dev *indio_dev, const unsigned long *mask, bool timestamp) @@ -680,6 +639,8 @@ static int __iio_update_buffers(struct iio_dev *indio_dev, indio_dev->currentmode = INDIO_BUFFER_TRIGGERED; } else if (indio_dev->modes & INDIO_BUFFER_HARDWARE) { indio_dev->currentmode = INDIO_BUFFER_HARDWARE; + } else if (indio_dev->modes & INDIO_BUFFER_SOFTWARE) { + indio_dev->currentmode = INDIO_BUFFER_SOFTWARE; } else { /* Should never be reached */ ret = -EINVAL; goto error_run_postdisable; @@ -755,10 +716,10 @@ out_unlock: } EXPORT_SYMBOL_GPL(iio_update_buffers); -ssize_t iio_buffer_store_enable(struct device *dev, - struct device_attribute *attr, - const char *buf, - size_t len) +static ssize_t iio_buffer_store_enable(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t len) { int ret; bool requested_state; @@ -790,83 +751,146 @@ done: mutex_unlock(&indio_dev->mlock); return (ret < 0) ? ret : len; } -EXPORT_SYMBOL(iio_buffer_store_enable); -/** - * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected - * @indio_dev: the iio device - * @mask: scan mask to be checked - * - * Return true if exactly one bit is set in the scan mask, false otherwise. It - * can be used for devices where only one channel can be active for sampling at - * a time. - */ -bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev, - const unsigned long *mask) -{ - return bitmap_weight(mask, indio_dev->masklength) == 1; -} -EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot); - -static bool iio_validate_scan_mask(struct iio_dev *indio_dev, - const unsigned long *mask) -{ - if (!indio_dev->setup_ops->validate_scan_mask) - return true; +static const char * const iio_scan_elements_group_name = "scan_elements"; - return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask); -} +static DEVICE_ATTR(length, S_IRUGO | S_IWUSR, iio_buffer_read_length, + iio_buffer_write_length); +static struct device_attribute dev_attr_length_ro = __ATTR(length, + S_IRUGO, iio_buffer_read_length, NULL); +static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, + iio_buffer_show_enable, iio_buffer_store_enable); -/** - * iio_scan_mask_set() - set particular bit in the scan mask - * @indio_dev: the iio device - * @buffer: the buffer whose scan mask we are interested in - * @bit: the bit to be set. - * - * Note that at this point we have no way of knowing what other - * buffers might request, hence this code only verifies that the - * individual buffers request is plausible. - */ -int iio_scan_mask_set(struct iio_dev *indio_dev, - struct iio_buffer *buffer, int bit) +int iio_buffer_alloc_sysfs_and_mask(struct iio_dev *indio_dev) { - const unsigned long *mask; - unsigned long *trialmask; + struct iio_dev_attr *p; + struct attribute **attr; + struct iio_buffer *buffer = indio_dev->buffer; + int ret, i, attrn, attrcount, attrcount_orig = 0; + const struct iio_chan_spec *channels; - trialmask = kmalloc(sizeof(*trialmask)* - BITS_TO_LONGS(indio_dev->masklength), - GFP_KERNEL); + if (!buffer) + return 0; - if (trialmask == NULL) + attrcount = 0; + if (buffer->attrs) { + while (buffer->attrs[attrcount] != NULL) + attrcount++; + } + + buffer->buffer_group.name = "buffer"; + buffer->buffer_group.attrs = kcalloc(attrcount + 3, + sizeof(*buffer->buffer_group.attrs), GFP_KERNEL); + if (!buffer->buffer_group.attrs) return -ENOMEM; - if (!indio_dev->masklength) { - WARN_ON("Trying to set scanmask prior to registering buffer\n"); - goto err_invalid_mask; + + if (buffer->access->set_length) + buffer->buffer_group.attrs[0] = &dev_attr_length.attr; + else + buffer->buffer_group.attrs[0] = &dev_attr_length_ro.attr; + buffer->buffer_group.attrs[1] = &dev_attr_enable.attr; + if (buffer->attrs) + memcpy(&buffer->buffer_group.attrs[2], buffer->attrs, + sizeof(*&buffer->buffer_group.attrs) * attrcount); + buffer->buffer_group.attrs[attrcount+2] = NULL; + + indio_dev->groups[indio_dev->groupcounter++] = &buffer->buffer_group; + + if (buffer->scan_el_attrs != NULL) { + attr = buffer->scan_el_attrs->attrs; + while (*attr++ != NULL) + attrcount_orig++; } - bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength); - set_bit(bit, trialmask); + attrcount = attrcount_orig; + INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list); + channels = indio_dev->channels; + if (channels) { + /* new magic */ + for (i = 0; i < indio_dev->num_channels; i++) { + if (channels[i].scan_index < 0) + continue; - if (!iio_validate_scan_mask(indio_dev, trialmask)) - goto err_invalid_mask; + /* Establish necessary mask length */ + if (channels[i].scan_index > + (int)indio_dev->masklength - 1) + indio_dev->masklength + = channels[i].scan_index + 1; - if (indio_dev->available_scan_masks) { - mask = iio_scan_mask_match(indio_dev->available_scan_masks, - indio_dev->masklength, - trialmask); - if (!mask) - goto err_invalid_mask; + ret = iio_buffer_add_channel_sysfs(indio_dev, + &channels[i]); + if (ret < 0) + goto error_cleanup_dynamic; + attrcount += ret; + if (channels[i].type == IIO_TIMESTAMP) + indio_dev->scan_index_timestamp = + channels[i].scan_index; + } + if (indio_dev->masklength && buffer->scan_mask == NULL) { + buffer->scan_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength), + sizeof(*buffer->scan_mask), + GFP_KERNEL); + if (buffer->scan_mask == NULL) { + ret = -ENOMEM; + goto error_cleanup_dynamic; + } + } } - bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength); - kfree(trialmask); + buffer->scan_el_group.name = iio_scan_elements_group_name; + + buffer->scan_el_group.attrs = kcalloc(attrcount + 1, + sizeof(buffer->scan_el_group.attrs[0]), + GFP_KERNEL); + if (buffer->scan_el_group.attrs == NULL) { + ret = -ENOMEM; + goto error_free_scan_mask; + } + if (buffer->scan_el_attrs) + memcpy(buffer->scan_el_group.attrs, buffer->scan_el_attrs, + sizeof(buffer->scan_el_group.attrs[0])*attrcount_orig); + attrn = attrcount_orig; + + list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l) + buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr; + indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group; return 0; -err_invalid_mask: - kfree(trialmask); - return -EINVAL; +error_free_scan_mask: + kfree(buffer->scan_mask); +error_cleanup_dynamic: + iio_free_chan_devattr_list(&buffer->scan_el_dev_attr_list); + kfree(indio_dev->buffer->buffer_group.attrs); + + return ret; +} + +void iio_buffer_free_sysfs_and_mask(struct iio_dev *indio_dev) +{ + if (!indio_dev->buffer) + return; + + kfree(indio_dev->buffer->scan_mask); + kfree(indio_dev->buffer->buffer_group.attrs); + kfree(indio_dev->buffer->scan_el_group.attrs); + iio_free_chan_devattr_list(&indio_dev->buffer->scan_el_dev_attr_list); +} + +/** + * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected + * @indio_dev: the iio device + * @mask: scan mask to be checked + * + * Return true if exactly one bit is set in the scan mask, false otherwise. It + * can be used for devices where only one channel can be active for sampling at + * a time. + */ +bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev, + const unsigned long *mask) +{ + return bitmap_weight(mask, indio_dev->masklength) == 1; } -EXPORT_SYMBOL_GPL(iio_scan_mask_set); +EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot); int iio_scan_mask_query(struct iio_dev *indio_dev, struct iio_buffer *buffer, int bit) |