diff options
Diffstat (limited to 'drivers/iio/industrialio-buffer.c')
-rw-r--r-- | drivers/iio/industrialio-buffer.c | 573 |
1 files changed, 363 insertions, 210 deletions
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c index f971f79103ec..df919f44d513 100644 --- a/drivers/iio/industrialio-buffer.c +++ b/drivers/iio/industrialio-buffer.c @@ -37,11 +37,57 @@ static bool iio_buffer_is_active(struct iio_buffer *buf) return !list_empty(&buf->buffer_list); } -static bool iio_buffer_data_available(struct iio_buffer *buf) +static size_t iio_buffer_data_available(struct iio_buffer *buf) { return buf->access->data_available(buf); } +static int iio_buffer_flush_hwfifo(struct iio_dev *indio_dev, + struct iio_buffer *buf, size_t required) +{ + if (!indio_dev->info->hwfifo_flush_to_buffer) + return -ENODEV; + + return indio_dev->info->hwfifo_flush_to_buffer(indio_dev, required); +} + +static bool iio_buffer_ready(struct iio_dev *indio_dev, struct iio_buffer *buf, + size_t to_wait, int to_flush) +{ + size_t avail; + int flushed = 0; + + /* wakeup if the device was unregistered */ + if (!indio_dev->info) + return true; + + /* drain the buffer if it was disabled */ + if (!iio_buffer_is_active(buf)) { + to_wait = min_t(size_t, to_wait, 1); + to_flush = 0; + } + + avail = iio_buffer_data_available(buf); + + if (avail >= to_wait) { + /* force a flush for non-blocking reads */ + if (!to_wait && !avail && to_flush) + iio_buffer_flush_hwfifo(indio_dev, buf, to_flush); + return true; + } + + if (to_flush) + flushed = iio_buffer_flush_hwfifo(indio_dev, buf, + to_wait - avail); + if (flushed <= 0) + return false; + + if (avail + flushed >= to_wait) + return true; + + return false; +} + /** * iio_buffer_read_first_n_outer() - chrdev read for buffer access * @@ -53,6 +99,9 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf, { struct iio_dev *indio_dev = filp->private_data; struct iio_buffer *rb = indio_dev->buffer; + size_t datum_size; + size_t to_wait = 0; + size_t to_read; int ret; if (!indio_dev->info) @@ -61,19 +110,28 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf, if (!rb || !rb->access->read_first_n) return -EINVAL; + datum_size = rb->bytes_per_datum; + + /* + * If datum_size is 0 there will never be anything to read from the + * buffer, so signal end of file now. + */ + if (!datum_size) + return 0; + + to_read = min_t(size_t, n / datum_size, rb->watermark); + + if (!(filp->f_flags & O_NONBLOCK)) + to_wait = to_read; + do { - if (!iio_buffer_data_available(rb)) { - if (filp->f_flags & O_NONBLOCK) - return -EAGAIN; + ret = wait_event_interruptible(rb->pollq, + iio_buffer_ready(indio_dev, rb, to_wait, to_read)); + if (ret) + return ret; - ret = wait_event_interruptible(rb->pollq, - iio_buffer_data_available(rb) || - indio_dev->info == NULL); - if (ret) - return ret; - if (indio_dev->info == NULL) - return -ENODEV; - } + if (!indio_dev->info) + return -ENODEV; ret = rb->access->read_first_n(rb, n, buf); if (ret == 0 && (filp->f_flags & O_NONBLOCK)) @@ -96,9 +154,8 @@ unsigned int iio_buffer_poll(struct file *filp, return -ENODEV; poll_wait(filp, &rb->pollq, wait); - if (iio_buffer_data_available(rb)) + if (iio_buffer_ready(indio_dev, rb, rb->watermark, 0)) return POLLIN | POLLRDNORM; - /* need a way of knowing if there may be enough data... */ return 0; } @@ -123,6 +180,7 @@ void iio_buffer_init(struct iio_buffer *buffer) INIT_LIST_HEAD(&buffer->buffer_list); init_waitqueue_head(&buffer->pollq); kref_init(&buffer->ref); + buffer->watermark = 1; } EXPORT_SYMBOL(iio_buffer_init); @@ -178,6 +236,80 @@ static ssize_t iio_scan_el_show(struct device *dev, return sprintf(buf, "%d\n", ret); } +/* Note NULL used as error indicator as it doesn't make sense. */ +static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks, + unsigned int masklength, + const unsigned long *mask) +{ + if (bitmap_empty(mask, masklength)) + return NULL; + while (*av_masks) { + if (bitmap_subset(mask, av_masks, masklength)) + return av_masks; + av_masks += BITS_TO_LONGS(masklength); + } + return NULL; +} + +static bool iio_validate_scan_mask(struct iio_dev *indio_dev, + const unsigned long *mask) +{ + if (!indio_dev->setup_ops->validate_scan_mask) + return true; + + return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask); +} + +/** + * iio_scan_mask_set() - set particular bit in the scan mask + * @indio_dev: the iio device + * @buffer: the buffer whose scan mask we are interested in + * @bit: the bit to be set. + * + * Note that at this point we have no way of knowing what other + * buffers might request, hence this code only verifies that the + * individual buffers request is plausible. + */ +static int iio_scan_mask_set(struct iio_dev *indio_dev, + struct iio_buffer *buffer, int bit) +{ + const unsigned long *mask; + unsigned long *trialmask; + + trialmask = kmalloc(sizeof(*trialmask)* + BITS_TO_LONGS(indio_dev->masklength), + GFP_KERNEL); + + if (trialmask == NULL) + return -ENOMEM; + if (!indio_dev->masklength) { + WARN_ON("Trying to set scanmask prior to registering buffer\n"); + goto err_invalid_mask; + } + bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength); + set_bit(bit, trialmask); + + if (!iio_validate_scan_mask(indio_dev, trialmask)) + goto err_invalid_mask; + + if (indio_dev->available_scan_masks) { + mask = iio_scan_mask_match(indio_dev->available_scan_masks, + indio_dev->masklength, + trialmask); + if (!mask) + goto err_invalid_mask; + } + bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength); + + kfree(trialmask); + + return 0; + +err_invalid_mask: + kfree(trialmask); + return -EINVAL; +} + static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit) { clear_bit(bit, buffer->scan_mask); @@ -309,115 +441,19 @@ static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev, return ret; } -static const char * const iio_scan_elements_group_name = "scan_elements"; - -int iio_buffer_register(struct iio_dev *indio_dev, - const struct iio_chan_spec *channels, - int num_channels) -{ - struct iio_dev_attr *p; - struct attribute **attr; - struct iio_buffer *buffer = indio_dev->buffer; - int ret, i, attrn, attrcount, attrcount_orig = 0; - - if (buffer->attrs) - indio_dev->groups[indio_dev->groupcounter++] = buffer->attrs; - - if (buffer->scan_el_attrs != NULL) { - attr = buffer->scan_el_attrs->attrs; - while (*attr++ != NULL) - attrcount_orig++; - } - attrcount = attrcount_orig; - INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list); - if (channels) { - /* new magic */ - for (i = 0; i < num_channels; i++) { - if (channels[i].scan_index < 0) - continue; - - /* Establish necessary mask length */ - if (channels[i].scan_index > - (int)indio_dev->masklength - 1) - indio_dev->masklength - = channels[i].scan_index + 1; - - ret = iio_buffer_add_channel_sysfs(indio_dev, - &channels[i]); - if (ret < 0) - goto error_cleanup_dynamic; - attrcount += ret; - if (channels[i].type == IIO_TIMESTAMP) - indio_dev->scan_index_timestamp = - channels[i].scan_index; - } - if (indio_dev->masklength && buffer->scan_mask == NULL) { - buffer->scan_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength), - sizeof(*buffer->scan_mask), - GFP_KERNEL); - if (buffer->scan_mask == NULL) { - ret = -ENOMEM; - goto error_cleanup_dynamic; - } - } - } - - buffer->scan_el_group.name = iio_scan_elements_group_name; - - buffer->scan_el_group.attrs = kcalloc(attrcount + 1, - sizeof(buffer->scan_el_group.attrs[0]), - GFP_KERNEL); - if (buffer->scan_el_group.attrs == NULL) { - ret = -ENOMEM; - goto error_free_scan_mask; - } - if (buffer->scan_el_attrs) - memcpy(buffer->scan_el_group.attrs, buffer->scan_el_attrs, - sizeof(buffer->scan_el_group.attrs[0])*attrcount_orig); - attrn = attrcount_orig; - - list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l) - buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr; - indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group; - - return 0; - -error_free_scan_mask: - kfree(buffer->scan_mask); -error_cleanup_dynamic: - iio_free_chan_devattr_list(&buffer->scan_el_dev_attr_list); - - return ret; -} -EXPORT_SYMBOL(iio_buffer_register); - -void iio_buffer_unregister(struct iio_dev *indio_dev) -{ - kfree(indio_dev->buffer->scan_mask); - kfree(indio_dev->buffer->scan_el_group.attrs); - iio_free_chan_devattr_list(&indio_dev->buffer->scan_el_dev_attr_list); -} -EXPORT_SYMBOL(iio_buffer_unregister); - -ssize_t iio_buffer_read_length(struct device *dev, - struct device_attribute *attr, - char *buf) +static ssize_t iio_buffer_read_length(struct device *dev, + struct device_attribute *attr, + char *buf) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct iio_buffer *buffer = indio_dev->buffer; - if (buffer->access->get_length) - return sprintf(buf, "%d\n", - buffer->access->get_length(buffer)); - - return 0; + return sprintf(buf, "%d\n", buffer->length); } -EXPORT_SYMBOL(iio_buffer_read_length); -ssize_t iio_buffer_write_length(struct device *dev, - struct device_attribute *attr, - const char *buf, - size_t len) +static ssize_t iio_buffer_write_length(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct iio_buffer *buffer = indio_dev->buffer; @@ -428,47 +464,33 @@ ssize_t iio_buffer_write_length(struct device *dev, if (ret) return ret; - if (buffer->access->get_length) - if (val == buffer->access->get_length(buffer)) - return len; + if (val == buffer->length) + return len; mutex_lock(&indio_dev->mlock); if (iio_buffer_is_active(indio_dev->buffer)) { ret = -EBUSY; } else { - if (buffer->access->set_length) - buffer->access->set_length(buffer, val); + buffer->access->set_length(buffer, val); ret = 0; } + if (ret) + goto out; + if (buffer->length && buffer->length < buffer->watermark) + buffer->watermark = buffer->length; +out: mutex_unlock(&indio_dev->mlock); return ret ? ret : len; } -EXPORT_SYMBOL(iio_buffer_write_length); -ssize_t iio_buffer_show_enable(struct device *dev, - struct device_attribute *attr, - char *buf) +static ssize_t iio_buffer_show_enable(struct device *dev, + struct device_attribute *attr, + char *buf) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); return sprintf(buf, "%d\n", iio_buffer_is_active(indio_dev->buffer)); } -EXPORT_SYMBOL(iio_buffer_show_enable); - -/* Note NULL used as error indicator as it doesn't make sense. */ -static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks, - unsigned int masklength, - const unsigned long *mask) -{ - if (bitmap_empty(mask, masklength)) - return NULL; - while (*av_masks) { - if (bitmap_subset(mask, av_masks, masklength)) - return av_masks; - av_masks += BITS_TO_LONGS(masklength); - } - return NULL; -} static int iio_compute_scan_bytes(struct iio_dev *indio_dev, const unsigned long *mask, bool timestamp) @@ -513,6 +535,7 @@ static void iio_buffer_activate(struct iio_dev *indio_dev, static void iio_buffer_deactivate(struct iio_buffer *buffer) { list_del_init(&buffer->buffer_list); + wake_up_interruptible(&buffer->pollq); iio_buffer_put(buffer); } @@ -670,17 +693,16 @@ static int __iio_update_buffers(struct iio_dev *indio_dev, } } /* Definitely possible for devices to support both of these. */ - if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) { - if (!indio_dev->trig) { - printk(KERN_INFO "Buffer not started: no trigger\n"); - ret = -EINVAL; - /* Can only occur on first buffer */ - goto error_run_postdisable; - } + if ((indio_dev->modes & INDIO_BUFFER_TRIGGERED) && indio_dev->trig) { indio_dev->currentmode = INDIO_BUFFER_TRIGGERED; } else if (indio_dev->modes & INDIO_BUFFER_HARDWARE) { indio_dev->currentmode = INDIO_BUFFER_HARDWARE; + } else if (indio_dev->modes & INDIO_BUFFER_SOFTWARE) { + indio_dev->currentmode = INDIO_BUFFER_SOFTWARE; } else { /* Should never be reached */ + /* Can only occur on first buffer */ + if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) + pr_info("Buffer not started: no trigger\n"); ret = -EINVAL; goto error_run_postdisable; } @@ -755,10 +777,10 @@ out_unlock: } EXPORT_SYMBOL_GPL(iio_update_buffers); -ssize_t iio_buffer_store_enable(struct device *dev, - struct device_attribute *attr, - const char *buf, - size_t len) +static ssize_t iio_buffer_store_enable(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t len) { int ret; bool requested_state; @@ -790,83 +812,204 @@ done: mutex_unlock(&indio_dev->mlock); return (ret < 0) ? ret : len; } -EXPORT_SYMBOL(iio_buffer_store_enable); -/** - * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected - * @indio_dev: the iio device - * @mask: scan mask to be checked - * - * Return true if exactly one bit is set in the scan mask, false otherwise. It - * can be used for devices where only one channel can be active for sampling at - * a time. - */ -bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev, - const unsigned long *mask) +static const char * const iio_scan_elements_group_name = "scan_elements"; + +static ssize_t iio_buffer_show_watermark(struct device *dev, + struct device_attribute *attr, + char *buf) { - return bitmap_weight(mask, indio_dev->masklength) == 1; + struct iio_dev *indio_dev = dev_to_iio_dev(dev); + struct iio_buffer *buffer = indio_dev->buffer; + + return sprintf(buf, "%u\n", buffer->watermark); } -EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot); -static bool iio_validate_scan_mask(struct iio_dev *indio_dev, - const unsigned long *mask) +static ssize_t iio_buffer_store_watermark(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t len) { - if (!indio_dev->setup_ops->validate_scan_mask) - return true; + struct iio_dev *indio_dev = dev_to_iio_dev(dev); + struct iio_buffer *buffer = indio_dev->buffer; + unsigned int val; + int ret; - return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask); + ret = kstrtouint(buf, 10, &val); + if (ret) + return ret; + if (!val) + return -EINVAL; + + mutex_lock(&indio_dev->mlock); + + if (val > buffer->length) { + ret = -EINVAL; + goto out; + } + + if (iio_buffer_is_active(indio_dev->buffer)) { + ret = -EBUSY; + goto out; + } + + buffer->watermark = val; + + if (indio_dev->info->hwfifo_set_watermark) + indio_dev->info->hwfifo_set_watermark(indio_dev, val); +out: + mutex_unlock(&indio_dev->mlock); + + return ret ? ret : len; } -/** - * iio_scan_mask_set() - set particular bit in the scan mask - * @indio_dev: the iio device - * @buffer: the buffer whose scan mask we are interested in - * @bit: the bit to be set. - * - * Note that at this point we have no way of knowing what other - * buffers might request, hence this code only verifies that the - * individual buffers request is plausible. - */ -int iio_scan_mask_set(struct iio_dev *indio_dev, - struct iio_buffer *buffer, int bit) +static DEVICE_ATTR(length, S_IRUGO | S_IWUSR, iio_buffer_read_length, + iio_buffer_write_length); +static struct device_attribute dev_attr_length_ro = __ATTR(length, + S_IRUGO, iio_buffer_read_length, NULL); +static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, + iio_buffer_show_enable, iio_buffer_store_enable); +static DEVICE_ATTR(watermark, S_IRUGO | S_IWUSR, + iio_buffer_show_watermark, iio_buffer_store_watermark); + +static struct attribute *iio_buffer_attrs[] = { + &dev_attr_length.attr, + &dev_attr_enable.attr, + &dev_attr_watermark.attr, +}; + +int iio_buffer_alloc_sysfs_and_mask(struct iio_dev *indio_dev) { - const unsigned long *mask; - unsigned long *trialmask; + struct iio_dev_attr *p; + struct attribute **attr; + struct iio_buffer *buffer = indio_dev->buffer; + int ret, i, attrn, attrcount, attrcount_orig = 0; + const struct iio_chan_spec *channels; - trialmask = kmalloc(sizeof(*trialmask)* - BITS_TO_LONGS(indio_dev->masklength), - GFP_KERNEL); + if (!buffer) + return 0; - if (trialmask == NULL) + attrcount = 0; + if (buffer->attrs) { + while (buffer->attrs[attrcount] != NULL) + attrcount++; + } + + attr = kcalloc(attrcount + ARRAY_SIZE(iio_buffer_attrs) + 1, + sizeof(struct attribute *), GFP_KERNEL); + if (!attr) return -ENOMEM; - if (!indio_dev->masklength) { - WARN_ON("Trying to set scanmask prior to registering buffer\n"); - goto err_invalid_mask; + + memcpy(attr, iio_buffer_attrs, sizeof(iio_buffer_attrs)); + if (!buffer->access->set_length) + attr[0] = &dev_attr_length_ro.attr; + + if (buffer->attrs) + memcpy(&attr[ARRAY_SIZE(iio_buffer_attrs)], buffer->attrs, + sizeof(struct attribute *) * attrcount); + + attr[attrcount + ARRAY_SIZE(iio_buffer_attrs)] = NULL; + + buffer->buffer_group.name = "buffer"; + buffer->buffer_group.attrs = attr; + + indio_dev->groups[indio_dev->groupcounter++] = &buffer->buffer_group; + + if (buffer->scan_el_attrs != NULL) { + attr = buffer->scan_el_attrs->attrs; + while (*attr++ != NULL) + attrcount_orig++; } - bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength); - set_bit(bit, trialmask); + attrcount = attrcount_orig; + INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list); + channels = indio_dev->channels; + if (channels) { + /* new magic */ + for (i = 0; i < indio_dev->num_channels; i++) { + if (channels[i].scan_index < 0) + continue; - if (!iio_validate_scan_mask(indio_dev, trialmask)) - goto err_invalid_mask; + /* Establish necessary mask length */ + if (channels[i].scan_index > + (int)indio_dev->masklength - 1) + indio_dev->masklength + = channels[i].scan_index + 1; - if (indio_dev->available_scan_masks) { - mask = iio_scan_mask_match(indio_dev->available_scan_masks, - indio_dev->masklength, - trialmask); - if (!mask) - goto err_invalid_mask; + ret = iio_buffer_add_channel_sysfs(indio_dev, + &channels[i]); + if (ret < 0) + goto error_cleanup_dynamic; + attrcount += ret; + if (channels[i].type == IIO_TIMESTAMP) + indio_dev->scan_index_timestamp = + channels[i].scan_index; + } + if (indio_dev->masklength && buffer->scan_mask == NULL) { + buffer->scan_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength), + sizeof(*buffer->scan_mask), + GFP_KERNEL); + if (buffer->scan_mask == NULL) { + ret = -ENOMEM; + goto error_cleanup_dynamic; + } + } } - bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength); - kfree(trialmask); + buffer->scan_el_group.name = iio_scan_elements_group_name; + + buffer->scan_el_group.attrs = kcalloc(attrcount + 1, + sizeof(buffer->scan_el_group.attrs[0]), + GFP_KERNEL); + if (buffer->scan_el_group.attrs == NULL) { + ret = -ENOMEM; + goto error_free_scan_mask; + } + if (buffer->scan_el_attrs) + memcpy(buffer->scan_el_group.attrs, buffer->scan_el_attrs, + sizeof(buffer->scan_el_group.attrs[0])*attrcount_orig); + attrn = attrcount_orig; + + list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l) + buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr; + indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group; return 0; -err_invalid_mask: - kfree(trialmask); - return -EINVAL; +error_free_scan_mask: + kfree(buffer->scan_mask); +error_cleanup_dynamic: + iio_free_chan_devattr_list(&buffer->scan_el_dev_attr_list); + kfree(indio_dev->buffer->buffer_group.attrs); + + return ret; +} + +void iio_buffer_free_sysfs_and_mask(struct iio_dev *indio_dev) +{ + if (!indio_dev->buffer) + return; + + kfree(indio_dev->buffer->scan_mask); + kfree(indio_dev->buffer->buffer_group.attrs); + kfree(indio_dev->buffer->scan_el_group.attrs); + iio_free_chan_devattr_list(&indio_dev->buffer->scan_el_dev_attr_list); +} + +/** + * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected + * @indio_dev: the iio device + * @mask: scan mask to be checked + * + * Return true if exactly one bit is set in the scan mask, false otherwise. It + * can be used for devices where only one channel can be active for sampling at + * a time. + */ +bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev, + const unsigned long *mask) +{ + return bitmap_weight(mask, indio_dev->masklength) == 1; } -EXPORT_SYMBOL_GPL(iio_scan_mask_set); +EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot); int iio_scan_mask_query(struct iio_dev *indio_dev, struct iio_buffer *buffer, int bit) @@ -913,8 +1056,18 @@ static const void *iio_demux(struct iio_buffer *buffer, static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data) { const void *dataout = iio_demux(buffer, data); + int ret; + + ret = buffer->access->store_to(buffer, dataout); + if (ret) + return ret; - return buffer->access->store_to(buffer, dataout); + /* + * We can't just test for watermark to decide if we wake the poll queue + * because read may request less samples than the watermark. + */ + wake_up_interruptible_poll(&buffer->pollq, POLLIN | POLLRDNORM); + return 0; } static void iio_buffer_demux_free(struct iio_buffer *buffer) |