diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-01-13 10:59:52 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-01-13 10:59:52 -0800 |
commit | d6a322774cb7096ca683fc46ddc9482e02ee6133 (patch) | |
tree | 0e5b95d9899eefa5c58f23af8ecadd7e93010586 /drivers/dma/dmaengine.c | |
parent | 4c257ec37bc365614933c7f0a7fe9b0688dfd1e7 (diff) | |
parent | 8b648436eb45c1f561164b24aafd35fb2bee9cfc (diff) | |
download | talos-obmc-linux-d6a322774cb7096ca683fc46ddc9482e02ee6133.tar.gz talos-obmc-linux-d6a322774cb7096ca683fc46ddc9482e02ee6133.zip |
Merge tag 'dmaengine-4.5-rc1' of git://git.infradead.org/users/vkoul/slave-dma
Pull dmaengine updates from Vinod Koul:
"This round we have few new features, new driver and updates to few
drivers.
The new features to dmaengine core are:
- Synchronized transfer termination API to terminate the dmaengine
transfers in synchronized and async fashion as required by users.
We have its user now in ALSA dmaengine lib, img, at_xdma, axi_dmac
drivers.
- Universal API for channel request and start consolidation of
request flows. It's user is ompa-dma driver.
- Introduce reuse of descriptors and use in pxa_dma driver
Add/Remove:
- New STM32 DMA driver
- Removal of unused R-Car HPB-DMAC driver
Updates:
- ti-dma-crossbar updates for supporting eDMA
- tegra-apb pm updates
- idma64
- mv_xor updates
- ste_dma updates"
* tag 'dmaengine-4.5-rc1' of git://git.infradead.org/users/vkoul/slave-dma: (54 commits)
dmaengine: mv_xor: add suspend/resume support
dmaengine: mv_xor: de-duplicate mv_chan_set_mode*()
dmaengine: mv_xor: remove mv_xor_chan->current_type field
dmaengine: omap-dma: Add support for DMA filter mapping to slave devices
dmaengine: edma: Add support for DMA filter mapping to slave devices
dmaengine: core: Introduce new, universal API to request a channel
dmaengine: core: Move and merge the code paths using private_candidate
dmaengine: core: Skip mask matching when it is not provided to private_candidate
dmaengine: mdc: Correct terminate_all handling
dmaengine: edma: Add probe callback to edma_tptc_driver
dmaengine: dw: fix potential memory leak in dw_dma_parse_dt()
dmaengine: stm32-dma: Fix unchecked deference of chan->desc
dmaengine: sh: Remove unused R-Car HPB-DMAC driver
dmaengine: usb-dmac: Document SoC specific compatibility strings
ste_dma40: Delete an unnecessary variable initialisation in d40_probe()
ste_dma40: Delete another unnecessary check in d40_probe()
ste_dma40: Delete an unnecessary check before the function call "kmem_cache_destroy"
dmaengine: tegra-apb: Free interrupts before killing tasklets
dmaengine: tegra-apb: Update driver to use GFP_NOWAIT
dmaengine: tegra-apb: Only save channel state for those in use
...
Diffstat (limited to 'drivers/dma/dmaengine.c')
-rw-r--r-- | drivers/dma/dmaengine.c | 178 |
1 files changed, 125 insertions, 53 deletions
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 3ecec1445adf..c50a247be2e0 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -43,6 +43,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/init.h> #include <linux/module.h> @@ -265,8 +266,11 @@ static void dma_chan_put(struct dma_chan *chan) module_put(dma_chan_to_owner(chan)); /* This channel is not in use anymore, free it */ - if (!chan->client_count && chan->device->device_free_chan_resources) + if (!chan->client_count && chan->device->device_free_chan_resources) { + /* Make sure all operations have completed */ + dmaengine_synchronize(chan); chan->device->device_free_chan_resources(chan); + } /* If the channel is used via a DMA request router, free the mapping */ if (chan->router && chan->router->route_free) { @@ -493,6 +497,7 @@ int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps) caps->dst_addr_widths = device->dst_addr_widths; caps->directions = device->directions; caps->residue_granularity = device->residue_granularity; + caps->descriptor_reuse = device->descriptor_reuse; /* * Some devices implement only pause (e.g. to get residuum) but no @@ -511,7 +516,7 @@ static struct dma_chan *private_candidate(const dma_cap_mask_t *mask, { struct dma_chan *chan; - if (!__dma_device_satisfies_mask(dev, mask)) { + if (mask && !__dma_device_satisfies_mask(dev, mask)) { pr_debug("%s: wrong capabilities\n", __func__); return NULL; } @@ -542,6 +547,42 @@ static struct dma_chan *private_candidate(const dma_cap_mask_t *mask, return NULL; } +static struct dma_chan *find_candidate(struct dma_device *device, + const dma_cap_mask_t *mask, + dma_filter_fn fn, void *fn_param) +{ + struct dma_chan *chan = private_candidate(mask, device, fn, fn_param); + int err; + + if (chan) { + /* Found a suitable channel, try to grab, prep, and return it. + * We first set DMA_PRIVATE to disable balance_ref_count as this + * channel will not be published in the general-purpose + * allocator + */ + dma_cap_set(DMA_PRIVATE, device->cap_mask); + device->privatecnt++; + err = dma_chan_get(chan); + + if (err) { + if (err == -ENODEV) { + pr_debug("%s: %s module removed\n", __func__, + dma_chan_name(chan)); + list_del_rcu(&device->global_node); + } else + pr_debug("%s: failed to get %s: (%d)\n", + __func__, dma_chan_name(chan), err); + + if (--device->privatecnt == 0) + dma_cap_clear(DMA_PRIVATE, device->cap_mask); + + chan = ERR_PTR(err); + } + } + + return chan ? chan : ERR_PTR(-EPROBE_DEFER); +} + /** * dma_get_slave_channel - try to get specific channel exclusively * @chan: target channel @@ -580,7 +621,6 @@ struct dma_chan *dma_get_any_slave_channel(struct dma_device *device) { dma_cap_mask_t mask; struct dma_chan *chan; - int err; dma_cap_zero(mask); dma_cap_set(DMA_SLAVE, mask); @@ -588,23 +628,11 @@ struct dma_chan *dma_get_any_slave_channel(struct dma_device *device) /* lock against __dma_request_channel */ mutex_lock(&dma_list_mutex); - chan = private_candidate(&mask, device, NULL, NULL); - if (chan) { - dma_cap_set(DMA_PRIVATE, device->cap_mask); - device->privatecnt++; - err = dma_chan_get(chan); - if (err) { - pr_debug("%s: failed to get %s: (%d)\n", - __func__, dma_chan_name(chan), err); - chan = NULL; - if (--device->privatecnt == 0) - dma_cap_clear(DMA_PRIVATE, device->cap_mask); - } - } + chan = find_candidate(device, &mask, NULL, NULL); mutex_unlock(&dma_list_mutex); - return chan; + return IS_ERR(chan) ? NULL : chan; } EXPORT_SYMBOL_GPL(dma_get_any_slave_channel); @@ -621,35 +649,15 @@ struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, { struct dma_device *device, *_d; struct dma_chan *chan = NULL; - int err; /* Find a channel */ mutex_lock(&dma_list_mutex); list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { - chan = private_candidate(mask, device, fn, fn_param); - if (chan) { - /* Found a suitable channel, try to grab, prep, and - * return it. We first set DMA_PRIVATE to disable - * balance_ref_count as this channel will not be - * published in the general-purpose allocator - */ - dma_cap_set(DMA_PRIVATE, device->cap_mask); - device->privatecnt++; - err = dma_chan_get(chan); + chan = find_candidate(device, mask, fn, fn_param); + if (!IS_ERR(chan)) + break; - if (err == -ENODEV) { - pr_debug("%s: %s module removed\n", - __func__, dma_chan_name(chan)); - list_del_rcu(&device->global_node); - } else if (err) - pr_debug("%s: failed to get %s: (%d)\n", - __func__, dma_chan_name(chan), err); - else - break; - if (--device->privatecnt == 0) - dma_cap_clear(DMA_PRIVATE, device->cap_mask); - chan = NULL; - } + chan = NULL; } mutex_unlock(&dma_list_mutex); @@ -662,27 +670,73 @@ struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, } EXPORT_SYMBOL_GPL(__dma_request_channel); +static const struct dma_slave_map *dma_filter_match(struct dma_device *device, + const char *name, + struct device *dev) +{ + int i; + + if (!device->filter.mapcnt) + return NULL; + + for (i = 0; i < device->filter.mapcnt; i++) { + const struct dma_slave_map *map = &device->filter.map[i]; + + if (!strcmp(map->devname, dev_name(dev)) && + !strcmp(map->slave, name)) + return map; + } + + return NULL; +} + /** - * dma_request_slave_channel_reason - try to allocate an exclusive slave channel + * dma_request_chan - try to allocate an exclusive slave channel * @dev: pointer to client device structure * @name: slave channel name * * Returns pointer to appropriate DMA channel on success or an error pointer. */ -struct dma_chan *dma_request_slave_channel_reason(struct device *dev, - const char *name) +struct dma_chan *dma_request_chan(struct device *dev, const char *name) { + struct dma_device *d, *_d; + struct dma_chan *chan = NULL; + /* If device-tree is present get slave info from here */ if (dev->of_node) - return of_dma_request_slave_channel(dev->of_node, name); + chan = of_dma_request_slave_channel(dev->of_node, name); /* If device was enumerated by ACPI get slave info from here */ - if (ACPI_HANDLE(dev)) - return acpi_dma_request_slave_chan_by_name(dev, name); + if (has_acpi_companion(dev) && !chan) + chan = acpi_dma_request_slave_chan_by_name(dev, name); + + if (chan) { + /* Valid channel found or requester need to be deferred */ + if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER) + return chan; + } - return ERR_PTR(-ENODEV); + /* Try to find the channel via the DMA filter map(s) */ + mutex_lock(&dma_list_mutex); + list_for_each_entry_safe(d, _d, &dma_device_list, global_node) { + dma_cap_mask_t mask; + const struct dma_slave_map *map = dma_filter_match(d, name, dev); + + if (!map) + continue; + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + + chan = find_candidate(d, &mask, d->filter.fn, map->param); + if (!IS_ERR(chan)) + break; + } + mutex_unlock(&dma_list_mutex); + + return chan ? chan : ERR_PTR(-EPROBE_DEFER); } -EXPORT_SYMBOL_GPL(dma_request_slave_channel_reason); +EXPORT_SYMBOL_GPL(dma_request_chan); /** * dma_request_slave_channel - try to allocate an exclusive slave channel @@ -694,17 +748,35 @@ EXPORT_SYMBOL_GPL(dma_request_slave_channel_reason); struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name) { - struct dma_chan *ch = dma_request_slave_channel_reason(dev, name); + struct dma_chan *ch = dma_request_chan(dev, name); if (IS_ERR(ch)) return NULL; - dma_cap_set(DMA_PRIVATE, ch->device->cap_mask); - ch->device->privatecnt++; - return ch; } EXPORT_SYMBOL_GPL(dma_request_slave_channel); +/** + * dma_request_chan_by_mask - allocate a channel satisfying certain capabilities + * @mask: capabilities that the channel must satisfy + * + * Returns pointer to appropriate DMA channel on success or an error pointer. + */ +struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask) +{ + struct dma_chan *chan; + + if (!mask) + return ERR_PTR(-ENODEV); + + chan = __dma_request_channel(mask, NULL, NULL); + if (!chan) + chan = ERR_PTR(-ENODEV); + + return chan; +} +EXPORT_SYMBOL_GPL(dma_request_chan_by_mask); + void dma_release_channel(struct dma_chan *chan) { mutex_lock(&dma_list_mutex); |