diff options
Diffstat (limited to 'drivers')
112 files changed, 1384 insertions, 960 deletions
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c index cd827625cf07..01504c819e8f 100644 --- a/drivers/acpi/sbs.c +++ b/drivers/acpi/sbs.c @@ -684,7 +684,7 @@ static int acpi_sbs_add(struct acpi_device *device) if (!sbs_manager_broken) { result = acpi_manager_get_info(sbs); if (!result) { - sbs->manager_present = 0; + sbs->manager_present = 1; for (id = 0; id < MAX_SBS_BAT; ++id) if ((sbs->batteries_supported & (1 << id))) acpi_battery_add(sbs, id); diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 812523330a78..ec6c5c6e1ac9 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -2264,6 +2264,11 @@ static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request) result, xferred); if (!img_request->result) img_request->result = result; + /* + * Need to end I/O on the entire obj_request worth of + * bytes in case of error. + */ + xferred = obj_request->length; } /* Image object requests don't own their page array */ diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 7a73a279e179..61c417b9e53f 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -158,9 +158,18 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, int entered_state; struct cpuidle_state *target_state = &drv->states[index]; + bool broadcast = !!(target_state->flags & CPUIDLE_FLAG_TIMER_STOP); ktime_t time_start, time_end; s64 diff; + /* + * Tell the time framework to switch to a broadcast timer because our + * local timer will be shut down. If a local timer is used from another + * CPU as a broadcast timer, this call may fail if it is not available. + */ + if (broadcast && tick_broadcast_enter()) + return -EBUSY; + trace_cpu_idle_rcuidle(index, dev->cpu); time_start = ktime_get(); @@ -169,6 +178,13 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, time_end = ktime_get(); trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu); + if (broadcast) { + if (WARN_ON_ONCE(!irqs_disabled())) + local_irq_disable(); + + tick_broadcast_exit(); + } + if (!cpuidle_state_is_coupled(dev, drv, entered_state)) local_irq_enable(); diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 681651aed320..0205ade0ba32 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -441,6 +441,7 @@ config IMG_MDC_DMA config XGENE_DMA tristate "APM X-Gene DMA support" + depends on ARCH_XGENE || COMPILE_TEST select DMA_ENGINE select DMA_ENGINE_RAID select ASYNC_TX_ENABLE_CHANNEL_SWITCH diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 49d396ec06e5..5de3cf453f35 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@ -474,7 +474,7 @@ static void pl08x_terminate_phy_chan(struct pl08x_driver_data *pl08x, u32 val = readl(ch->reg_config); val &= ~(PL080_CONFIG_ENABLE | PL080_CONFIG_ERR_IRQ_MASK | - PL080_CONFIG_TC_IRQ_MASK); + PL080_CONFIG_TC_IRQ_MASK); writel(val, ch->reg_config); diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 9e5949696b1b..052a2bd1a5cf 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -539,7 +539,7 @@ static struct dma_chan *private_candidate(const dma_cap_mask_t *mask, } /** - * dma_request_slave_channel - try to get specific channel exclusively + * dma_get_slave_channel - try to get specific channel exclusively * @chan: target channel */ struct dma_chan *dma_get_slave_channel(struct dma_chan *chan) @@ -578,11 +578,15 @@ struct dma_chan *dma_get_any_slave_channel(struct dma_device *device) chan = private_candidate(&mask, device, NULL, NULL); if (chan) { + dma_cap_set(DMA_PRIVATE, device->cap_mask); + device->privatecnt++; err = dma_chan_get(chan); if (err) { pr_debug("%s: failed to get %s: (%d)\n", __func__, dma_chan_name(chan), err); chan = NULL; + if (--device->privatecnt == 0) + dma_cap_clear(DMA_PRIVATE, device->cap_mask); } } @@ -647,7 +651,7 @@ struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, EXPORT_SYMBOL_GPL(__dma_request_channel); /** - * dma_request_slave_channel - try to allocate an exclusive slave channel + * dma_request_slave_channel_reason - try to allocate an exclusive slave channel * @dev: pointer to client device structure * @name: slave channel name * @@ -835,6 +839,8 @@ int dma_async_device_register(struct dma_device *device) !device->device_prep_dma_pq); BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) && !device->device_prep_dma_pq_val); + BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) && + !device->device_prep_dma_memset); BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) && !device->device_prep_dma_interrupt); BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) && diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c index 24e5290faa32..57ff46284f15 100644 --- a/drivers/dma/ep93xx_dma.c +++ b/drivers/dma/ep93xx_dma.c @@ -1364,7 +1364,7 @@ static int __init ep93xx_dma_probe(struct platform_device *pdev) return ret; } -static struct platform_device_id ep93xx_dma_driver_ids[] = { +static const struct platform_device_id ep93xx_dma_driver_ids[] = { { "ep93xx-dma-m2p", 0 }, { "ep93xx-dma-m2m", 1 }, { }, diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index eed405976ea9..865501fcc67d 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c @@ -193,7 +193,7 @@ struct imxdma_filter_data { int request; }; -static struct platform_device_id imx_dma_devtype[] = { +static const struct platform_device_id imx_dma_devtype[] = { { .name = "imx1-dma", .driver_data = IMX1_DMA, diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 62bbd79338e0..77b6aab04f47 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -420,7 +420,7 @@ static struct sdma_driver_data sdma_imx6q = { .script_addrs = &sdma_script_imx6q, }; -static struct platform_device_id sdma_devtypes[] = { +static const struct platform_device_id sdma_devtypes[] = { { .name = "imx25-sdma", .driver_data = (unsigned long)&sdma_imx25, diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index 1c56001df676..fbaf1ead2597 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c @@ -19,6 +19,7 @@ #include <linux/dma-mapping.h> #include <linux/spinlock.h> #include <linux/interrupt.h> +#include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/memory.h> #include <linux/clk.h> @@ -30,6 +31,11 @@ #include "dmaengine.h" #include "mv_xor.h" +enum mv_xor_mode { + XOR_MODE_IN_REG, + XOR_MODE_IN_DESC, +}; + static void mv_xor_issue_pending(struct dma_chan *chan); #define to_mv_xor_chan(chan) \ @@ -56,18 +62,30 @@ static void mv_desc_init(struct mv_xor_desc_slot *desc, hw_desc->byte_count = byte_count; } -static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc, - u32 next_desc_addr) +static void mv_desc_set_mode(struct mv_xor_desc_slot *desc) { struct mv_xor_desc *hw_desc = desc->hw_desc; - BUG_ON(hw_desc->phy_next_desc); - hw_desc->phy_next_desc = next_desc_addr; + + switch (desc->type) { + case DMA_XOR: + case DMA_INTERRUPT: + hw_desc->desc_command |= XOR_DESC_OPERATION_XOR; + break; + case DMA_MEMCPY: + hw_desc->desc_command |= XOR_DESC_OPERATION_MEMCPY; + break; + default: + BUG(); + return; + } } -static void mv_desc_clear_next_desc(struct mv_xor_desc_slot *desc) +static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc, + u32 next_desc_addr) { struct mv_xor_desc *hw_desc = desc->hw_desc; - hw_desc->phy_next_desc = 0; + BUG_ON(hw_desc->phy_next_desc); + hw_desc->phy_next_desc = next_desc_addr; } static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc, @@ -104,7 +122,7 @@ static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan) return intr_cause; } -static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan) +static void mv_chan_clear_eoc_cause(struct mv_xor_chan *chan) { u32 val; @@ -114,14 +132,14 @@ static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan) writel_relaxed(val, XOR_INTR_CAUSE(chan)); } -static void mv_xor_device_clear_err_status(struct mv_xor_chan *chan) +static void mv_chan_clear_err_status(struct mv_xor_chan *chan) { u32 val = 0xFFFF0000 >> (chan->idx * 16); writel_relaxed(val, XOR_INTR_CAUSE(chan)); } -static void mv_set_mode(struct mv_xor_chan *chan, - enum dma_transaction_type type) +static void mv_chan_set_mode(struct mv_xor_chan *chan, + enum dma_transaction_type type) { u32 op_mode; u32 config = readl_relaxed(XOR_CONFIG(chan)); @@ -144,6 +162,25 @@ static void mv_set_mode(struct mv_xor_chan *chan, config &= ~0x7; config |= op_mode; + if (IS_ENABLED(__BIG_ENDIAN)) + config |= XOR_DESCRIPTOR_SWAP; + else + config &= ~XOR_DESCRIPTOR_SWAP; + + writel_relaxed(config, XOR_CONFIG(chan)); + chan->current_type = type; +} + +static void mv_chan_set_mode_to_desc(struct mv_xor_chan *chan) +{ + u32 op_mode; + u32 config = readl_relaxed(XOR_CONFIG(chan)); + + op_mode = XOR_OPERATION_MODE_IN_DESC; + + config &= ~0x7; + config |= op_mode; + #if defined(__BIG_ENDIAN) config |= XOR_DESCRIPTOR_SWAP; #else @@ -151,7 +188,6 @@ static void mv_set_mode(struct mv_xor_chan *chan, #endif writel_relaxed(config, XOR_CONFIG(chan)); - chan->current_type = type; } static void mv_chan_activate(struct mv_xor_chan *chan) @@ -171,28 +207,13 @@ static char mv_chan_is_busy(struct mv_xor_chan *chan) return (state == 1) ? 1 : 0; } -/** - * mv_xor_free_slots - flags descriptor slots for reuse - * @slot: Slot to free - * Caller must hold &mv_chan->lock while calling this function - */ -static void mv_xor_free_slots(struct mv_xor_chan *mv_chan, - struct mv_xor_desc_slot *slot) -{ - dev_dbg(mv_chan_to_devp(mv_chan), "%s %d slot %p\n", - __func__, __LINE__, slot); - - slot->slot_used = 0; - -} - /* - * mv_xor_start_new_chain - program the engine to operate on new chain headed by - * sw_desc + * mv_chan_start_new_chain - program the engine to operate on new + * chain headed by sw_desc * Caller must hold &mv_chan->lock while calling this function */ -static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan, - struct mv_xor_desc_slot *sw_desc) +static void mv_chan_start_new_chain(struct mv_xor_chan *mv_chan, + struct mv_xor_desc_slot *sw_desc) { dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p\n", __func__, __LINE__, sw_desc); @@ -205,8 +226,9 @@ static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan, } static dma_cookie_t -mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc, - struct mv_xor_chan *mv_chan, dma_cookie_t cookie) +mv_desc_run_tx_complete_actions(struct mv_xor_desc_slot *desc, + struct mv_xor_chan *mv_chan, + dma_cookie_t cookie) { BUG_ON(desc->async_tx.cookie < 0); @@ -230,93 +252,110 @@ mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc, } static int -mv_xor_clean_completed_slots(struct mv_xor_chan *mv_chan) +mv_chan_clean_completed_slots(struct mv_xor_chan *mv_chan) { struct mv_xor_desc_slot *iter, *_iter; dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__); list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots, - completed_node) { + node) { - if (async_tx_test_ack(&iter->async_tx)) { - list_del(&iter->completed_node); - mv_xor_free_slots(mv_chan, iter); - } + if (async_tx_test_ack(&iter->async_tx)) + list_move_tail(&iter->node, &mv_chan->free_slots); } return 0; } static int -mv_xor_clean_slot(struct mv_xor_desc_slot *desc, - struct mv_xor_chan *mv_chan) +mv_desc_clean_slot(struct mv_xor_desc_slot *desc, + struct mv_xor_chan *mv_chan) { dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: desc %p flags %d\n", __func__, __LINE__, desc, desc->async_tx.flags); - list_del(&desc->chain_node); + /* the client is allowed to attach dependent operations * until 'ack' is set */ - if (!async_tx_test_ack(&desc->async_tx)) { + if (!async_tx_test_ack(&desc->async_tx)) /* move this slot to the completed_slots */ - list_add_tail(&desc->completed_node, &mv_chan->completed_slots); - return 0; - } + list_move_tail(&desc->node, &mv_chan->completed_slots); + else + list_move_tail(&desc->node, &mv_chan->free_slots); - mv_xor_free_slots(mv_chan, desc); return 0; } /* This function must be called with the mv_xor_chan spinlock held */ -static void mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan) +static void mv_chan_slot_cleanup(struct mv_xor_chan *mv_chan) { struct mv_xor_desc_slot *iter, *_iter; dma_cookie_t cookie = 0; int busy = mv_chan_is_busy(mv_chan); u32 current_desc = mv_chan_get_current_desc(mv_chan); - int seen_current = 0; + int current_cleaned = 0; + struct mv_xor_desc *hw_desc; dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__); dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc); - mv_xor_clean_completed_slots(mv_chan); + mv_chan_clean_completed_slots(mv_chan); /* free completed slots from the chain starting with * the oldest descriptor */ list_for_each_entry_safe(iter, _iter, &mv_chan->chain, - chain_node) { - prefetch(_iter); - prefetch(&_iter->async_tx); + node) { - /* do not advance past the current descriptor loaded into the - * hardware channel, subsequent descriptors are either in - * process or have not been submitted - */ - if (seen_current) - break; + /* clean finished descriptors */ + hw_desc = iter->hw_desc; + if (hw_desc->status & XOR_DESC_SUCCESS) { + cookie = mv_desc_run_tx_complete_actions(iter, mv_chan, + cookie); - /* stop the search if we reach the current descriptor and the - * channel is busy - */ - if (iter->async_tx.phys == current_desc) { - seen_current = 1; - if (busy) + /* done processing desc, clean slot */ + mv_desc_clean_slot(iter, mv_chan); + + /* break if we did cleaned the current */ + if (iter->async_tx.phys == current_desc) { + current_cleaned = 1; break; + } + } else { + if (iter->async_tx.phys == current_desc) { + current_cleaned = 0; + break; + } } - - cookie = mv_xor_run_tx_complete_actions(iter, mv_chan, cookie); - - if (mv_xor_clean_slot(iter, mv_chan)) - break; } if ((busy == 0) && !list_empty(&mv_chan->chain)) { - struct mv_xor_desc_slot *chain_head; - chain_head = list_entry(mv_chan->chain.next, - struct mv_xor_desc_slot, - chain_node); - - mv_xor_start_new_chain(mv_chan, chain_head); + if (current_cleaned) { + /* + * current descriptor cleaned and removed, run + * from list head + */ + iter = list_entry(mv_chan->chain.next, + struct mv_xor_desc_slot, + node); + mv_chan_start_new_chain(mv_chan, iter); + } else { + if (!list_is_last(&iter->node, &mv_chan->chain)) { + /* + * descriptors are still waiting after + * current, trigger them + */ + iter = list_entry(iter->node.next, + struct mv_xor_desc_slot, + node); + mv_chan_start_new_chain(mv_chan, iter); + } else { + /* + * some descriptors are still waiting + * to be cleaned + */ + tasklet_schedule(&mv_chan->irq_tasklet); + } + } } if (cookie > 0) @@ -328,56 +367,35 @@ static void mv_xor_tasklet(unsigned long data) struct mv_xor_chan *chan = (struct mv_xor_chan *) data; spin_lock_bh(&chan->lock); - mv_xor_slot_cleanup(chan); + mv_chan_slot_cleanup(chan); spin_unlock_bh(&chan->lock); } static struct mv_xor_desc_slot * -mv_xor_alloc_slot(struct mv_xor_chan *mv_chan) +mv_chan_alloc_slot(struct mv_xor_chan *mv_chan) { - struct mv_xor_desc_slot *iter, *_iter; - int retry = 0; + struct mv_xor_desc_slot *iter; - /* start search from the last allocated descrtiptor - * if a contiguous allocation can not be found start searching - * from the beginning of the list - */ -retry: - if (retry == 0) - iter = mv_chan->last_used; - else - iter = list_entry(&mv_chan->all_slots, - struct mv_xor_desc_slot, - slot_node); - - list_for_each_entry_safe_continue( - iter, _iter, &mv_chan->all_slots, slot_node) { - - prefetch(_iter); - prefetch(&_iter->async_tx); - if (iter->slot_used) { - /* give up after finding the first busy slot - * on the second pass through the list - */ - if (retry) - break; - continue; - } + spin_lock_bh(&mv_chan->lock); + + if (!list_empty(&mv_chan->free_slots)) { + iter = list_first_entry(&mv_chan->free_slots, + struct mv_xor_desc_slot, + node); + + list_move_tail(&iter->node, &mv_chan->allocated_slots); + + spin_unlock_bh(&mv_chan->lock); /* pre-ack descriptor */ async_tx_ack(&iter->async_tx); - - iter->slot_used = 1; - INIT_LIST_HEAD(&iter->chain_node); iter->async_tx.cookie = -EBUSY; - mv_chan->last_used = iter; - mv_desc_clear_next_desc(iter); return iter; } - if (!retry++) - goto retry; + + spin_unlock_bh(&mv_chan->lock); /* try to free some slots if the allocation fails */ tasklet_schedule(&mv_chan->irq_tasklet); @@ -403,14 +421,14 @@ mv_xor_tx_submit(struct dma_async_tx_descriptor *tx) cookie = dma_cookie_assign(tx); if (list_empty(&mv_chan->chain)) - list_add_tail(&sw_desc->chain_node, &mv_chan->chain); + list_move_tail(&sw_desc->node, &mv_chan->chain); else { new_hw_chain = 0; old_chain_tail = list_entry(mv_chan->chain.prev, struct mv_xor_desc_slot, - chain_node); - list_add_tail(&sw_desc->chain_node, &mv_chan->chain); + node); + list_move_tail(&sw_desc->node, &mv_chan->chain); dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %pa\n", &old_chain_tail->async_tx.phys); @@ -431,7 +449,7 @@ mv_xor_tx_submit(struct dma_async_tx_descriptor *tx) } if (new_hw_chain) - mv_xor_start_new_chain(mv_chan, sw_desc); + mv_chan_start_new_chain(mv_chan, sw_desc); spin_unlock_bh(&mv_chan->lock); @@ -463,26 +481,20 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan) dma_async_tx_descriptor_init(&slot->async_tx, chan); slot->async_tx.tx_submit = mv_xor_tx_submit; - INIT_LIST_HEAD(&slot->chain_node); - INIT_LIST_HEAD(&slot->slot_node); + INIT_LIST_HEAD(&slot->node); dma_desc = mv_chan->dma_desc_pool; slot->async_tx.phys = dma_desc + idx * MV_XOR_SLOT_SIZE; slot->idx = idx++; spin_lock_bh(&mv_chan->lock); mv_chan->slots_allocated = idx; - list_add_tail(&slot->slot_node, &mv_chan->all_slots); + list_add_tail(&slot->node, &mv_chan->free_slots); spin_unlock_bh(&mv_chan->lock); } - if (mv_chan->slots_allocated && !mv_chan->last_used) - mv_chan->last_used = list_entry(mv_chan->all_slots.next, - struct mv_xor_desc_slot, - slot_node); - dev_dbg(mv_chan_to_devp(mv_chan), - "allocated %d descriptor slots last_used: %p\n", - mv_chan->slots_allocated, mv_chan->last_used); + "allocated %d descriptor slots\n", + mv_chan->slots_allocated); return mv_chan->slots_allocated ? : -ENOMEM; } @@ -503,16 +515,17 @@ mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, "%s src_cnt: %d len: %u dest %pad flags: %ld\n", __func__, src_cnt, len, &dest, flags); - spin_lock_bh(&mv_chan->lock); - sw_desc = mv_xor_alloc_slot(mv_chan); + sw_desc = mv_chan_alloc_slot(mv_chan); if (sw_desc) { sw_desc->type = DMA_XOR; sw_desc->async_tx.flags = flags; mv_desc_init(sw_desc, dest, len, flags); + if (mv_chan->op_in_desc == XOR_MODE_IN_DESC) + mv_desc_set_mode(sw_desc); while (src_cnt--) mv_desc_set_src_addr(sw_desc, src_cnt, src[src_cnt]); } - spin_unlock_bh(&mv_chan->lock); + dev_dbg(mv_chan_to_devp(mv_chan), "%s sw_desc %p async_tx %p \n", __func__, sw_desc, &sw_desc->async_tx); @@ -556,25 +569,29 @@ static void mv_xor_free_chan_resources(struct dma_chan *chan) spin_lock_bh(&mv_chan->lock); - mv_xor_slot_cleanup(mv_chan); + mv_chan_slot_cleanup(mv_chan); list_for_each_entry_safe(iter, _iter, &mv_chan->chain, - chain_node) { + node) { in_use_descs++; - list_del(&iter->chain_node); + list_move_tail(&iter->node, &mv_chan->free_slots); } list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots, - completed_node) { + node) { in_use_descs++; - list_del(&iter->completed_node); + list_move_tail(&iter->node, &mv_chan->free_slots); + } + list_for_each_entry_safe(iter, _iter, &mv_chan->allocated_slots, + node) { + in_use_descs++; + list_move_tail(&iter->node, &mv_chan->free_slots); } list_for_each_entry_safe_reverse( - iter, _iter, &mv_chan->all_slots, slot_node) { - list_del(&iter->slot_node); + iter, _iter, &mv_chan->free_slots, node) { + list_del(&iter->node); kfree(iter); mv_chan->slots_allocated--; } - mv_chan->last_used = NULL; dev_dbg(mv_chan_to_devp(mv_chan), "%s slots_allocated %d\n", __func__, mv_chan->slots_allocated); @@ -603,13 +620,13 @@ static enum dma_status mv_xor_status(struct dma_chan *chan, return ret; spin_lock_bh(&mv_chan->lock); - mv_xor_slot_cleanup(mv_chan); + mv_chan_slot_cleanup(mv_chan); spin_unlock_bh(&mv_chan->lock); return dma_cookie_status(chan, cookie, txstate); } -static void mv_dump_xor_regs(struct mv_xor_chan *chan) +static void mv_chan_dump_regs(struct mv_xor_chan *chan) { u32 val; @@ -632,8 +649,8 @@ static void mv_dump_xor_regs(struct mv_xor_chan *chan) dev_err(mv_chan_to_devp(chan), "error addr 0x%08x\n", val); } -static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan, - u32 intr_cause) +static void mv_chan_err_interrupt_handler(struct mv_xor_chan *chan, + u32 intr_cause) { if (intr_cause & XOR_INT_ERR_DECODE) { dev_dbg(mv_chan_to_devp(chan), "ignoring address decode error\n"); @@ -643,7 +660,7 @@ static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan, dev_err(mv_chan_to_devp(chan), "error on chan %d. intr cause 0x%08x\n", chan->idx, intr_cause); - mv_dump_xor_regs(chan); + mv_chan_dump_regs(chan); WARN_ON(1); } @@ -655,11 +672,11 @@ static irqreturn_t mv_xor_interrupt_handler(int irq, void *data) dev_dbg(mv_chan_to_devp(chan), "intr cause %x\n", intr_cause); if (intr_cause & XOR_INTR_ERRORS) - mv_xor_err_interrupt_handler(chan, intr_cause); + mv_chan_err_interrupt_handler(chan, intr_cause); tasklet_schedule(&chan->irq_tasklet); - mv_xor_device_clear_eoc_cause(chan); + mv_chan_clear_eoc_cause(chan); return IRQ_HANDLED; } @@ -678,7 +695,7 @@ static void mv_xor_issue_pending(struct dma_chan *chan) * Perform a transaction to verify the HW works. */ -static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) +static int mv_chan_memcpy_self_test(struct mv_xor_chan *mv_chan) { int i, ret; void *src, *dest; @@ -787,7 +804,7 @@ out: #define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */ static int -mv_xor_xor_self_test(struct mv_xor_chan *mv_chan) +mv_chan_xor_self_test(struct mv_xor_chan *mv_chan) { int i, src_idx, ret; struct page *dest; @@ -951,7 +968,7 @@ static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan) static struct mv_xor_chan * mv_xor_channel_add(struct mv_xor_device *xordev, struct platform_device *pdev, - int idx, dma_cap_mask_t cap_mask, int irq) + int idx, dma_cap_mask_t cap_mask, int irq, int op_in_desc) { int ret = 0; struct mv_xor_chan *mv_chan; @@ -963,6 +980,7 @@ mv_xor_channel_add(struct mv_xor_device *xordev, mv_chan->idx = idx; mv_chan->irq = irq; + mv_chan->op_in_desc = op_in_desc; dma_dev = &mv_chan->dmadev; @@ -1014,7 +1032,7 @@ mv_xor_channel_add(struct mv_xor_device *xordev, mv_chan); /* clear errors before enabling interrupts */ - mv_xor_device_clear_err_status(mv_chan); + mv_chan_clear_err_status(mv_chan); ret = request_irq(mv_chan->irq, mv_xor_interrupt_handler, 0, dev_name(&pdev->dev), mv_chan); @@ -1023,32 +1041,37 @@ mv_xor_channel_add(struct mv_xor_device *xordev, mv_chan_unmask_interrupts(mv_chan); - mv_set_mode(mv_chan, DMA_XOR); + if (mv_chan->op_in_desc == XOR_MODE_IN_DESC) + mv_chan_set_mode_to_desc(mv_chan); + else + mv_chan_set_mode(mv_chan, DMA_XOR); spin_lock_init(&mv_chan->lock); INIT_LIST_HEAD(&mv_chan->chain); INIT_LIST_HEAD(&mv_chan->completed_slots); - INIT_LIST_HEAD(&mv_chan->all_slots); + INIT_LIST_HEAD(&mv_chan->free_slots); + INIT_LIST_HEAD(&mv_chan->allocated_slots); mv_chan->dmachan.device = dma_dev; dma_cookie_init(&mv_chan->dmachan); list_add_tail(&mv_chan->dmachan.device_node, &dma_dev->channels); if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) { - ret = mv_xor_memcpy_self_test(mv_chan); + ret = mv_chan_memcpy_self_test(mv_chan); dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret); if (ret) goto err_free_irq; } if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { - ret = mv_xor_xor_self_test(mv_chan); + ret = mv_chan_xor_self_test(mv_chan); dev_dbg(&pdev->dev, "xor self test returned %d\n", ret); if (ret) goto err_free_irq; } - dev_info(&pdev->dev, "Marvell XOR: ( %s%s%s)\n", + dev_info(&pdev->dev, "Marvell XOR (%s): ( %s%s%s)\n", + mv_chan->op_in_desc ? "Descriptor Mode" : "Registers Mode", dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); @@ -1097,6 +1120,13 @@ mv_xor_conf_mbus_windows(struct mv_xor_device *xordev, writel(0, base + WINDOW_OVERRIDE_CTRL(1)); } +static const struct of_device_id mv_xor_dt_ids[] = { + { .compatible = "marvell,orion-xor", .data = (void *)XOR_MODE_IN_REG }, + { .compatible = "marvell,armada-380-xor", .data = (void *)XOR_MODE_IN_DESC }, + {}, +}; +MODULE_DEVICE_TABLE(of, mv_xor_dt_ids); + static int mv_xor_probe(struct platform_device *pdev) { const struct mbus_dram_target_info *dram; @@ -1104,6 +1134,7 @@ static int mv_xor_probe(struct platform_device *pdev) struct mv_xor_platform_data *pdata = dev_get_platdata(&pdev->dev); struct resource *res; int i, ret; + int op_in_desc; dev_notice(&pdev->dev, "Marvell shared XOR driver\n"); @@ -1148,11 +1179,15 @@ static int mv_xor_probe(struct platform_device *pdev) if (pdev->dev.of_node) { struct device_node *np; int i = 0; + const struct of_device_id *of_id = + of_match_device(mv_xor_dt_ids, + &pdev->dev); for_each_child_of_node(pdev->dev.of_node, np) { struct mv_xor_chan *chan; dma_cap_mask_t cap_mask; int irq; + op_in_desc = (int)of_id->data; dma_cap_zero(cap_mask); if (of_property_read_bool(np, "dmacap,memcpy")) @@ -1169,7 +1204,7 @@ static int mv_xor_probe(struct platform_device *pdev) } chan = mv_xor_channel_add(xordev, pdev, i, - cap_mask, irq); + cap_mask, irq, op_in_desc); if (IS_ERR(chan)) { ret = PTR_ERR(chan); irq_dispose_mapping(irq); @@ -1198,7 +1233,8 @@ static int mv_xor_probe(struct platform_device *pdev) } chan = mv_xor_channel_add(xordev, pdev, i, - cd->cap_mask, irq); + cd->cap_mask, irq, + XOR_MODE_IN_REG); if (IS_ERR(chan)) { ret = PTR_ERR(chan); goto err_channel_add; @@ -1244,14 +1280,6 @@ static int mv_xor_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_OF -static const struct of_device_id mv_xor_dt_ids[] = { - { .compatible = "marvell,orion-xor", }, - {}, -}; -MODULE_DEVICE_TABLE(of, mv_xor_dt_ids); -#endif - static struct platform_driver mv_xor_driver = { .probe = mv_xor_probe, .remove = mv_xor_remove, diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h index 91958dba39a2..b7455b42137b 100644 --- a/drivers/dma/mv_xor.h +++ b/drivers/dma/mv_xor.h @@ -19,7 +19,7 @@ #include <linux/dmaengine.h> #include <linux/interrupt.h> -#define MV_XOR_POOL_SIZE PAGE_SIZE +#define MV_XOR_POOL_SIZE (MV_XOR_SLOT_SIZE * 3072) #define MV_XOR_SLOT_SIZE 64 #define MV_XOR_THRESHOLD 1 #define MV_XOR_MAX_CHANNELS 2 @@ -30,7 +30,13 @@ /* Values for the XOR_CONFIG register */ #define XOR_OPERATION_MODE_XOR 0 #define XOR_OPERATION_MODE_MEMCPY 2 +#define XOR_OPERATION_MODE_IN_DESC 7 #define XOR_DESCRIPTOR_SWAP BIT(14) +#define XOR_DESC_SUCCESS 0x40000000 + +#define XOR_DESC_OPERATION_XOR (0 << 24) +#define XOR_DESC_OPERATION_CRC32C (1 << 24) +#define XOR_DESC_OPERATION_MEMCPY (2 << 24) #define XOR_DESC_DMA_OWNED BIT(31) #define XOR_DESC_EOD_INT_EN BIT(31) @@ -88,13 +94,14 @@ struct mv_xor_device { * @mmr_base: memory mapped register base * @idx: the index of the xor channel * @chain: device chain view of the descriptors + * @free_slots: free slots usable by the channel + * @allocated_slots: slots allocated by the driver * @completed_slots: slots completed by HW but still need to be acked * @device: parent device * @common: common dmaengine channel object members - * @last_used: place holder for allocation to continue from where it left off - * @all_slots: complete domain of slots usable by the channel * @slots_allocated: records the actual size of the descriptor slot pool * @irq_tasklet: bottom half where mv_xor_slot_cleanup runs + * @op_in_desc: new mode of driver, each op is writen to descriptor. */ struct mv_xor_chan { int pending; @@ -105,16 +112,17 @@ struct mv_xor_chan { int irq; enum dma_transaction_type current_type; struct list_head chain; + struct list_head free_slots; + struct list_head allocated_slots; struct list_head completed_slots; dma_addr_t dma_desc_pool; void *dma_desc_pool_virt; size_t pool_size; struct dma_device dmadev; struct dma_chan dmachan; - struct mv_xor_desc_slot *last_used; - struct list_head all_slots; int slots_allocated; struct tasklet_struct irq_tasklet; + int op_in_desc; char dummy_src[MV_XOR_MIN_BYTE_COUNT]; char dummy_dst[MV_XOR_MIN_BYTE_COUNT]; dma_addr_t dummy_src_addr, dummy_dst_addr; @@ -122,9 +130,7 @@ struct mv_xor_chan { /** * struct mv_xor_desc_slot - software descriptor - * @slot_node: node on the mv_xor_chan.all_slots list - * @chain_node: node on the mv_xor_chan.chain list - * @completed_node: node on the mv_xor_chan.completed_slots list + * @node: node on the mv_xor_chan lists * @hw_desc: virtual address of the hardware descriptor chain * @phys: hardware address of the hardware descriptor chain * @slot_used: slot in use or not @@ -133,12 +139,9 @@ struct mv_xor_chan { * @async_tx: support for the async_tx api */ struct mv_xor_desc_slot { - struct list_head slot_node; - struct list_head chain_node; - struct list_head completed_node; + struct list_head node; enum dma_transaction_type type; void *hw_desc; - u16 slot_used; u16 idx; struct dma_async_tx_descriptor async_tx; }; diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c index 829ec686dac3..60de35251da5 100644 --- a/drivers/dma/mxs-dma.c +++ b/drivers/dma/mxs-dma.c @@ -170,7 +170,7 @@ static struct mxs_dma_type mxs_dma_types[] = { } }; -static struct platform_device_id mxs_dma_ids[] = { +static const struct platform_device_id mxs_dma_ids[] = { { .name = "imx23-dma-apbh", .driver_data = (kernel_ulong_t) &mxs_dma_types[0], diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c index 88b77c98365d..2b5a198ac77e 100644 --- a/drivers/dma/nbpfaxi.c +++ b/drivers/dma/nbpfaxi.c @@ -1455,7 +1455,7 @@ static int nbpf_remove(struct platform_device *pdev) return 0; } -static struct platform_device_id nbpf_ids[] = { +static const struct platform_device_id nbpf_ids[] = { {"nbpfaxi64dmac1b4", (kernel_ulong_t)&nbpf_cfg[NBPF1B4]}, {"nbpfaxi64dmac1b8", (kernel_ulong_t)&nbpf_cfg[NBPF1B8]}, {"nbpfaxi64dmac1b16", (kernel_ulong_t)&nbpf_cfg[NBPF1B16]}, diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c index 7f154c138200..249445c8a4c6 100644 --- a/drivers/dma/omap-dma.c +++ b/drivers/dma/omap-dma.c @@ -366,7 +366,7 @@ static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d, struct omap_sg *sg = d->sg + idx; unsigned cxsa, cxei, cxfi; - if (d->dir == DMA_DEV_TO_MEM) { + if (d->dir == DMA_DEV_TO_MEM || d->dir == DMA_MEM_TO_MEM) { cxsa = CDSA; cxei = CDEI; cxfi = CDFI; @@ -412,7 +412,7 @@ static void omap_dma_start_desc(struct omap_chan *c) if (dma_omap1()) omap_dma_chan_write(c, CCR2, d->ccr >> 16); - if (d->dir == DMA_DEV_TO_MEM) { + if (d->dir == DMA_DEV_TO_MEM || d->dir == DMA_MEM_TO_MEM) { cxsa = CSSA; cxei = CSEI; cxfi = CSFI; @@ -953,6 +953,51 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic( return vchan_tx_prep(&c->vc, &d->vd, flags); } +static struct dma_async_tx_descriptor *omap_dma_prep_dma_memcpy( + struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, + size_t len, unsigned long tx_flags) +{ + struct omap_chan *c = to_omap_dma_chan(chan); + struct omap_desc *d; + uint8_t data_type; + + d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC); + if (!d) + return NULL; + + data_type = __ffs((src | dest | len)); + if (data_type > CSDP_DATA_TYPE_32) + data_type = CSDP_DATA_TYPE_32; + + d->dir = DMA_MEM_TO_MEM; + d->dev_addr = src; + d->fi = 0; + d->es = data_type; + d->sg[0].en = len / BIT(data_type); + d->sg[0].fn = 1; + d->sg[0].addr = dest; + d->sglen = 1; + d->ccr = c->ccr; + d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_POSTINC; + + d->cicr = CICR_DROP_IE; + if (tx_flags & DMA_PREP_INTERRUPT) + d->cicr |= CICR_FRAME_IE; + + d->csdp = data_type; + + if (dma_omap1()) { + d->cicr |= CICR_TOUT_IE; + d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_EMIFF; + } else { + d->csdp |= CSDP_DST_PACKED | CSDP_SRC_PACKED; + d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE; + d->csdp |= CSDP_DST_BURST_64 | CSDP_SRC_BURST_64; + } + + return vchan_tx_prep(&c->vc, &d->vd, tx_flags); +} + static int omap_dma_slave_config(struct dma_chan *chan, struct dma_slave_config *cfg) { struct omap_chan *c = to_omap_dma_chan(chan); @@ -1098,12 +1143,14 @@ static int omap_dma_probe(struct platform_device *pdev) dma_cap_set(DMA_SLAVE, od->ddev.cap_mask); dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask); + dma_cap_set(DMA_MEMCPY, od->ddev.cap_mask); od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources; od->ddev.device_free_chan_resources = omap_dma_free_chan_resources; od->ddev.device_tx_status = omap_dma_tx_status; od->ddev.device_issue_pending = omap_dma_issue_pending; od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg; od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic; + od->ddev.device_prep_dma_memcpy = omap_dma_prep_dma_memcpy; od->ddev.device_config = omap_dma_slave_config; od->ddev.device_pause = omap_dma_pause; od->ddev.device_resume = omap_dma_resume; diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index a7d9d3029b14..3781f327eedc 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -1424,8 +1424,8 @@ static int pl330_submit_req(struct pl330_thread *thrd, goto xfer_exit; if (ret > pl330->mcbufsz / 2) { - dev_info(pl330->ddma.dev, "%s:%d Trying increasing mcbufsz\n", - __func__, __LINE__); + dev_info(pl330->ddma.dev, "%s:%d Try increasing mcbufsz (%i/%i)\n", + __func__, __LINE__, ret, pl330->mcbufsz / 2); ret = -ENOMEM; goto xfer_exit; } @@ -2581,12 +2581,14 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst, { struct dma_pl330_desc *desc; struct dma_pl330_chan *pch = to_pchan(chan); - struct pl330_dmac *pl330 = pch->dmac; + struct pl330_dmac *pl330; int burst; if (unlikely(!pch || !len)) return NULL; + pl330 = pch->dmac; + desc = __pl330_prep_dma_memcpy(pch, dst, src, len); if (!desc) return NULL; diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c index 01dcaf21b988..17ccdfd28f37 100644 --- a/drivers/dma/s3c24xx-dma.c +++ b/drivers/dma/s3c24xx-dma.c @@ -1168,7 +1168,7 @@ static struct soc_data soc_s3c2443 = { .has_clocks = true, }; -static struct platform_device_id s3c24xx_dma_driver_ids[] = { +static const struct platform_device_id s3c24xx_dma_driver_ids[] = { { .name = "s3c2410-dma", .driver_data = (kernel_ulong_t)&soc_s3c2410, diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c index a18d16cc4795..d5423a669ea0 100644 --- a/drivers/dma/sh/rcar-dmac.c +++ b/drivers/dma/sh/rcar-dmac.c @@ -183,7 +183,7 @@ struct rcar_dmac { unsigned int n_channels; struct rcar_dmac_chan *channels; - unsigned long modules[256 / BITS_PER_LONG]; + DECLARE_BITMAP(modules, 256); }; #define to_rcar_dmac(d) container_of(d, struct rcar_dmac, engine) diff --git a/drivers/dma/sh/shdma-r8a73a4.c b/drivers/dma/sh/shdma-r8a73a4.c index 4fb99970a3ea..96ea3828c3eb 100644 --- a/drivers/dma/sh/shdma-r8a73a4.c +++ b/drivers/dma/sh/shdma-r8a73a4.c @@ -11,7 +11,7 @@ #include "shdma-arm.h" -const unsigned int dma_ts_shift[] = SH_DMAE_TS_SHIFT; +static const unsigned int dma_ts_shift[] = SH_DMAE_TS_SHIFT; static const struct sh_dmae_slave_config dma_slaves[] = { { diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c index f705798ce3eb..ebd8a5f398b0 100644 --- a/drivers/dma/sh/usb-dmac.c +++ b/drivers/dma/sh/usb-dmac.c @@ -673,6 +673,7 @@ static struct dma_chan *usb_dmac_of_xlate(struct of_phandle_args *dma_spec, * Power management */ +#ifdef CONFIG_PM static int usb_dmac_runtime_suspend(struct device *dev) { struct usb_dmac *dmac = dev_get_drvdata(dev); @@ -690,6 +691,7 @@ static int usb_dmac_runtime_resume(struct device *dev) return usb_dmac_init(dmac); } +#endif /* CONFIG_PM */ static const struct dev_pm_ops usb_dmac_pm = { SET_RUNTIME_PM_OPS(usb_dmac_runtime_suspend, usb_dmac_runtime_resume, diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c index a1afda43b8ef..8c5186cc9f63 100644 --- a/drivers/dma/sirf-dma.c +++ b/drivers/dma/sirf-dma.c @@ -23,8 +23,13 @@ #include "dmaengine.h" +#define SIRFSOC_DMA_VER_A7V1 1 +#define SIRFSOC_DMA_VER_A7V2 2 +#define SIRFSOC_DMA_VER_A6 4 + #define SIRFSOC_DMA_DESCRIPTORS 16 #define SIRFSOC_DMA_CHANNELS 16 +#define SIRFSOC_DMA_TABLE_NUM 256 #define SIRFSOC_DMA_CH_ADDR 0x00 #define SIRFSOC_DMA_CH_XLEN 0x04 @@ -35,15 +40,44 @@ #define SIRFSOC_DMA_CH_VALID 0x140 #define SIRFSOC_DMA_CH_INT 0x144 #define SIRFSOC_DMA_INT_EN 0x148 -#define SIRFSOC_DMA_INT_EN_CLR 0x14C +#define SIRFSOC_DMA_INT_EN_CLR 0x14C #define SIRFSOC_DMA_CH_LOOP_CTRL 0x150 -#define SIRFSOC_DMA_CH_LOOP_CTRL_CLR 0x15C +#define SIRFSOC_DMA_CH_LOOP_CTRL_CLR 0x154 +#define SIRFSOC_DMA_WIDTH_ATLAS7 0x10 +#define SIRFSOC_DMA_VALID_ATLAS7 0x14 +#define SIRFSOC_DMA_INT_ATLAS7 0x18 +#define SIRFSOC_DMA_INT_EN_ATLAS7 0x1c +#define SIRFSOC_DMA_LOOP_CTRL_ATLAS7 0x20 +#define SIRFSOC_DMA_CUR_DATA_ADDR 0x34 +#define SIRFSOC_DMA_MUL_ATLAS7 0x38 +#define SIRFSOC_DMA_CH_LOOP_CTRL_ATLAS7 0x158 +#define SIRFSOC_DMA_CH_LOOP_CTRL_CLR_ATLAS7 0x15C +#define SIRFSOC_DMA_IOBG_SCMD_EN 0x800 +#define SIRFSOC_DMA_EARLY_RESP_SET 0x818 +#define SIRFSOC_DMA_EARLY_RESP_CLR 0x81C #define SIRFSOC_DMA_MODE_CTRL_BIT 4 #define SIRFSOC_DMA_DIR_CTRL_BIT 5 +#define SIRFSOC_DMA_MODE_CTRL_BIT_ATLAS7 2 +#define SIRFSOC_DMA_CHAIN_CTRL_BIT_ATLAS7 3 +#define SIRFSOC_DMA_DIR_CTRL_BIT_ATLAS7 4 +#define SIRFSOC_DMA_TAB_NUM_ATLAS7 7 +#define SIRFSOC_DMA_CHAIN_INT_BIT_ATLAS7 5 +#define SIRFSOC_DMA_CHAIN_FLAG_SHIFT_ATLAS7 25 +#define SIRFSOC_DMA_CHAIN_ADDR_SHIFT 32 + +#define SIRFSOC_DMA_INT_FINI_INT_ATLAS7 BIT(0) +#define SIRFSOC_DMA_INT_CNT_INT_ATLAS7 BIT(1) +#define SIRFSOC_DMA_INT_PAU_INT_ATLAS7 BIT(2) +#define SIRFSOC_DMA_INT_LOOP_INT_ATLAS7 BIT(3) +#define SIRFSOC_DMA_INT_INV_INT_ATLAS7 BIT(4) +#define SIRFSOC_DMA_INT_END_INT_ATLAS7 BIT(5) +#define SIRFSOC_DMA_INT_ALL_ATLAS7 0x3F /* xlen and dma_width register is in 4 bytes boundary */ #define SIRFSOC_DMA_WORD_LEN 4 +#define SIRFSOC_DMA_XLEN_MAX_V1 0x800 +#define SIRFSOC_DMA_XLEN_MAX_V2 0x1000 struct sirfsoc_dma_desc { struct dma_async_tx_descriptor desc; @@ -56,7 +90,9 @@ struct sirfsoc_dma_desc { int width; /* DMA width */ int dir; bool cyclic; /* is loop DMA? */ + bool chain; /* is chain DMA? */ u32 addr; /* DMA buffer address */ + u64 chain_table[SIRFSOC_DMA_TABLE_NUM]; /* chain tbl */ }; struct sirfsoc_dma_chan { @@ -87,10 +123,25 @@ struct sirfsoc_dma { void __iomem *base; int irq; struct clk *clk; - bool is_marco; + int type; + void (*exec_desc)(struct sirfsoc_dma_desc *sdesc, + int cid, int burst_mode, void __iomem *base); struct sirfsoc_dma_regs regs_save; }; +struct sirfsoc_dmadata { + void (*exec)(struct sirfsoc_dma_desc *sdesc, + int cid, int burst_mode, void __iomem *base); + int type; +}; + +enum sirfsoc_dma_chain_flag { + SIRFSOC_DMA_CHAIN_NORMAL = 0x01, + SIRFSOC_DMA_CHAIN_PAUSE = 0x02, + SIRFSOC_DMA_CHAIN_LOOP = 0x03, + SIRFSOC_DMA_CHAIN_END = 0x04 +}; + #define DRV_NAME "sirfsoc_dma" static int sirfsoc_dma_runtime_suspend(struct device *dev); @@ -109,48 +160,105 @@ static inline struct sirfsoc_dma *dma_chan_to_sirfsoc_dma(struct dma_chan *c) return container_of(schan, struct sirfsoc_dma, channels[c->chan_id]); } +static void sirfsoc_dma_execute_hw_a7v2(struct sirfsoc_dma_desc *sdesc, + int cid, int burst_mode, void __iomem *base) +{ + if (sdesc->chain) { + /* DMA v2 HW chain mode */ + writel_relaxed((sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT_ATLAS7) | + (sdesc->chain << + SIRFSOC_DMA_CHAIN_CTRL_BIT_ATLAS7) | + (0x8 << SIRFSOC_DMA_TAB_NUM_ATLAS7) | 0x3, + base + SIRFSOC_DMA_CH_CTRL); + } else { + /* DMA v2 legacy mode */ + writel_relaxed(sdesc->xlen, base + SIRFSOC_DMA_CH_XLEN); + writel_relaxed(sdesc->ylen, base + SIRFSOC_DMA_CH_YLEN); + writel_relaxed(sdesc->width, base + SIRFSOC_DMA_WIDTH_ATLAS7); + writel_relaxed((sdesc->width*((sdesc->ylen+1)>>1)), + base + SIRFSOC_DMA_MUL_ATLAS7); + writel_relaxed((sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT_ATLAS7) | + (sdesc->chain << + SIRFSOC_DMA_CHAIN_CTRL_BIT_ATLAS7) | + 0x3, base + SIRFSOC_DMA_CH_CTRL); + } + writel_relaxed(sdesc->chain ? SIRFSOC_DMA_INT_END_INT_ATLAS7 : + (SIRFSOC_DMA_INT_FINI_INT_ATLAS7 | + SIRFSOC_DMA_INT_LOOP_INT_ATLAS7), + base + SIRFSOC_DMA_INT_EN_ATLAS7); + writel(sdesc->addr, base + SIRFSOC_DMA_CH_ADDR); + if (sdesc->cyclic) + writel(0x10001, base + SIRFSOC_DMA_LOOP_CTRL_ATLAS7); +} + +static void sirfsoc_dma_execute_hw_a7v1(struct sirfsoc_dma_desc *sdesc, + int cid, int burst_mode, void __iomem *base) +{ + writel_relaxed(1, base + SIRFSOC_DMA_IOBG_SCMD_EN); + writel_relaxed((1 << cid), base + SIRFSOC_DMA_EARLY_RESP_SET); + writel_relaxed(sdesc->width, base + SIRFSOC_DMA_WIDTH_0 + cid * 4); + writel_relaxed(cid | (burst_mode << SIRFSOC_DMA_MODE_CTRL_BIT) | + (sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT), + base + cid * 0x10 + SIRFSOC_DMA_CH_CTRL); + writel_relaxed(sdesc->xlen, base + cid * 0x10 + SIRFSOC_DMA_CH_XLEN); + writel_relaxed(sdesc->ylen, base + cid * 0x10 + SIRFSOC_DMA_CH_YLEN); + writel_relaxed(readl_relaxed(base + SIRFSOC_DMA_INT_EN) | + (1 << cid), base + SIRFSOC_DMA_INT_EN); + writel(sdesc->addr >> 2, base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR); + if (sdesc->cyclic) { + writel((1 << cid) | 1 << (cid + 16) | + readl_relaxed(base + SIRFSOC_DMA_CH_LOOP_CTRL_ATLAS7), + base + SIRFSOC_DMA_CH_LOOP_CTRL_ATLAS7); + } + +} + +static void sirfsoc_dma_execute_hw_a6(struct sirfsoc_dma_desc *sdesc, + int cid, int burst_mode, void __iomem *base) +{ + writel_relaxed(sdesc->width, base + SIRFSOC_DMA_WIDTH_0 + cid * 4); + writel_relaxed(cid | (burst_mode << SIRFSOC_DMA_MODE_CTRL_BIT) | + (sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT), + base + cid * 0x10 + SIRFSOC_DMA_CH_CTRL); + writel_relaxed(sdesc->xlen, base + cid * 0x10 + SIRFSOC_DMA_CH_XLEN); + writel_relaxed(sdesc->ylen, base + cid * 0x10 + SIRFSOC_DMA_CH_YLEN); + writel_relaxed(readl_relaxed(base + SIRFSOC_DMA_INT_EN) | + (1 << cid), base + SIRFSOC_DMA_INT_EN); + writel(sdesc->addr >> 2, base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR); + if (sdesc->cyclic) { + writel((1 << cid) | 1 << (cid + 16) | + readl_relaxed(base + SIRFSOC_DMA_CH_LOOP_CTRL), + base + SIRFSOC_DMA_CH_LOOP_CTRL); + } + +} + /* Execute all queued DMA descriptors */ static void sirfsoc_dma_execute(struct sirfsoc_dma_chan *schan) { struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan); int cid = schan->chan.chan_id; struct sirfsoc_dma_desc *sdesc = NULL; + void __iomem *base; /* * lock has been held by functions calling this, so we don't hold * lock again */ - + base = sdma->base; sdesc = list_first_entry(&schan->queued, struct sirfsoc_dma_desc, - node); + node); /* Move the first queued descriptor to active list */ list_move_tail(&sdesc->node, &schan->active); - /* Start the DMA transfer */ - writel_relaxed(sdesc->width, sdma->base + SIRFSOC_DMA_WIDTH_0 + - cid * 4); - writel_relaxed(cid | (schan->mode << SIRFSOC_DMA_MODE_CTRL_BIT) | - (sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT), - sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_CTRL); - writel_relaxed(sdesc->xlen, sdma->base + cid * 0x10 + - SIRFSOC_DMA_CH_XLEN); - writel_relaxed(sdesc->ylen, sdma->base + cid * 0x10 + - SIRFSOC_DMA_CH_YLEN); - writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) | - (1 << cid), sdma->base + SIRFSOC_DMA_INT_EN); + if (sdma->type == SIRFSOC_DMA_VER_A7V2) + cid = 0; - /* - * writel has an implict memory write barrier to make sure data is - * flushed into memory before starting DMA - */ - writel(sdesc->addr >> 2, sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR); + /* Start the DMA transfer */ + sdma->exec_desc(sdesc, cid, schan->mode, base); - if (sdesc->cyclic) { - writel((1 << cid) | 1 << (cid + 16) | - readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL), - sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL); + if (sdesc->cyclic) schan->happened_cyclic = schan->completed_cyclic = 0; - } } /* Interrupt handler */ @@ -160,27 +268,65 @@ static irqreturn_t sirfsoc_dma_irq(int irq, void *data) struct sirfsoc_dma_chan *schan; struct sirfsoc_dma_desc *sdesc = NULL; u32 is; + bool chain; int ch; + void __iomem *reg; + + switch (sdma->type) { + case SIRFSOC_DMA_VER_A6: + case SIRFSOC_DMA_VER_A7V1: + is = readl(sdma->base + SIRFSOC_DMA_CH_INT); + reg = sdma->base + SIRFSOC_DMA_CH_INT; + while ((ch = fls(is) - 1) >= 0) { + is &= ~(1 << ch); + writel_relaxed(1 << ch, reg); + schan = &sdma->channels[ch]; + spin_lock(&schan->lock); + sdesc = list_first_entry(&schan->active, + struct sirfsoc_dma_desc, node); + if (!sdesc->cyclic) { + /* Execute queued descriptors */ + list_splice_tail_init(&schan->active, + &schan->completed); + dma_cookie_complete(&sdesc->desc); + if (!list_empty(&schan->queued)) + sirfsoc_dma_execute(schan); + } else + schan->happened_cyclic++; + spin_unlock(&schan->lock); + } + break; - is = readl(sdma->base + SIRFSOC_DMA_CH_INT); - while ((ch = fls(is) - 1) >= 0) { - is &= ~(1 << ch); - writel_relaxed(1 << ch, sdma->base + SIRFSOC_DMA_CH_INT); - schan = &sdma->channels[ch]; + case SIRFSOC_DMA_VER_A7V2: + is = readl(sdma->base + SIRFSOC_DMA_INT_ATLAS7); + reg = sdma->base + SIRFSOC_DMA_INT_ATLAS7; + writel_relaxed(SIRFSOC_DMA_INT_ALL_ATLAS7, reg); + schan = &sdma->channels[0]; spin_lock(&schan->lock); - - sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc, - node); + sdesc = list_first_entry(&schan->active, + struct sirfsoc_dma_desc, node); if (!sdesc->cyclic) { - /* Execute queued descriptors */ - list_splice_tail_init(&schan->active, &schan->completed); - if (!list_empty(&schan->queued)) - sirfsoc_dma_execute(schan); - } else + chain = sdesc->chain; + if ((chain && (is & SIRFSOC_DMA_INT_END_INT_ATLAS7)) || + (!chain && + (is & SIRFSOC_DMA_INT_FINI_INT_ATLAS7))) { + /* Execute queued descriptors */ + list_splice_tail_init(&schan->active, + &schan->completed); + dma_cookie_complete(&sdesc->desc); + if (!list_empty(&schan->queued)) + sirfsoc_dma_execute(schan); + } + } else if (sdesc->cyclic && (is & + SIRFSOC_DMA_INT_LOOP_INT_ATLAS7)) schan->happened_cyclic++; spin_unlock(&schan->lock); + break; + + default: + break; } /* Schedule tasklet */ @@ -227,16 +373,15 @@ static void sirfsoc_dma_process_completed(struct sirfsoc_dma *sdma) schan->chan.completed_cookie = last_cookie; spin_unlock_irqrestore(&schan->lock, flags); } else { - /* for cyclic channel, desc is always in active list */ - sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc, - node); - - if (!sdesc || (sdesc && !sdesc->cyclic)) { - /* without active cyclic DMA */ + if (list_empty(&schan->active)) { spin_unlock_irqrestore(&schan->lock, flags); continue; } + /* for cyclic channel, desc is always in active list */ + sdesc = list_first_entry(&schan->active, + struct sirfsoc_dma_desc, node); + /* cyclic DMA */ happened_cyclic = schan->happened_cyclic; spin_unlock_irqrestore(&schan->lock, flags); @@ -307,20 +452,32 @@ static int sirfsoc_dma_terminate_all(struct dma_chan *chan) spin_lock_irqsave(&schan->lock, flags); - if (!sdma->is_marco) { - writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) & - ~(1 << cid), sdma->base + SIRFSOC_DMA_INT_EN); - writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL) - & ~((1 << cid) | 1 << (cid + 16)), - sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL); - } else { + switch (sdma->type) { + case SIRFSOC_DMA_VER_A7V1: writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_INT_EN_CLR); writel_relaxed((1 << cid) | 1 << (cid + 16), - sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL_CLR); + sdma->base + + SIRFSOC_DMA_CH_LOOP_CTRL_CLR_ATLAS7); + writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_VALID); + break; + case SIRFSOC_DMA_VER_A7V2: + writel_relaxed(0, sdma->base + SIRFSOC_DMA_INT_EN_ATLAS7); + writel_relaxed(0, sdma->base + SIRFSOC_DMA_LOOP_CTRL_ATLAS7); + writel_relaxed(0, sdma->base + SIRFSOC_DMA_VALID_ATLAS7); + break; + case SIRFSOC_DMA_VER_A6: + writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) & + ~(1 << cid), sdma->base + SIRFSOC_DMA_INT_EN); + writel_relaxed(readl_relaxed(sdma->base + + SIRFSOC_DMA_CH_LOOP_CTRL) & + ~((1 << cid) | 1 << (cid + 16)), + sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL); + writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_VALID); + break; + default: + break; } - writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_VALID); - list_splice_tail_init(&schan->active, &schan->free); list_splice_tail_init(&schan->queued, &schan->free); @@ -338,13 +495,25 @@ static int sirfsoc_dma_pause_chan(struct dma_chan *chan) spin_lock_irqsave(&schan->lock, flags); - if (!sdma->is_marco) - writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL) - & ~((1 << cid) | 1 << (cid + 16)), - sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL); - else + switch (sdma->type) { + case SIRFSOC_DMA_VER_A7V1: writel_relaxed((1 << cid) | 1 << (cid + 16), - sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL_CLR); + sdma->base + + SIRFSOC_DMA_CH_LOOP_CTRL_CLR_ATLAS7); + break; + case SIRFSOC_DMA_VER_A7V2: + writel_relaxed(0, sdma->base + SIRFSOC_DMA_LOOP_CTRL_ATLAS7); + break; + case SIRFSOC_DMA_VER_A6: + writel_relaxed(readl_relaxed(sdma->base + + SIRFSOC_DMA_CH_LOOP_CTRL) & + ~((1 << cid) | 1 << (cid + 16)), + sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL); + break; + + default: + break; + } spin_unlock_irqrestore(&schan->lock, flags); @@ -359,14 +528,25 @@ static int sirfsoc_dma_resume_chan(struct dma_chan *chan) unsigned long flags; spin_lock_irqsave(&schan->lock, flags); - - if (!sdma->is_marco) - writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL) - | ((1 << cid) | 1 << (cid + 16)), - sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL); - else + switch (sdma->type) { + case SIRFSOC_DMA_VER_A7V1: writel_relaxed((1 << cid) | 1 << (cid + 16), - sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL); + sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL_ATLAS7); + break; + case SIRFSOC_DMA_VER_A7V2: + writel_relaxed(0x10001, + sdma->base + SIRFSOC_DMA_LOOP_CTRL_ATLAS7); + break; + case SIRFSOC_DMA_VER_A6: + writel_relaxed(readl_relaxed(sdma->base + + SIRFSOC_DMA_CH_LOOP_CTRL) | + ((1 << cid) | 1 << (cid + 16)), + sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL); + break; + + default: + break; + } spin_unlock_irqrestore(&schan->lock, flags); @@ -473,14 +653,31 @@ sirfsoc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, spin_lock_irqsave(&schan->lock, flags); - sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc, - node); - dma_request_bytes = (sdesc->xlen + 1) * (sdesc->ylen + 1) * - (sdesc->width * SIRFSOC_DMA_WORD_LEN); + if (list_empty(&schan->active)) { + ret = dma_cookie_status(chan, cookie, txstate); + dma_set_residue(txstate, 0); + spin_unlock_irqrestore(&schan->lock, flags); + return ret; + } + sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc, node); + if (sdesc->cyclic) + dma_request_bytes = (sdesc->xlen + 1) * (sdesc->ylen + 1) * + (sdesc->width * SIRFSOC_DMA_WORD_LEN); + else + dma_request_bytes = sdesc->xlen * SIRFSOC_DMA_WORD_LEN; ret = dma_cookie_status(chan, cookie, txstate); - dma_pos = readl_relaxed(sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR) - << 2; + + if (sdma->type == SIRFSOC_DMA_VER_A7V2) + cid = 0; + + if (sdma->type == SIRFSOC_DMA_VER_A7V2) { + dma_pos = readl_relaxed(sdma->base + SIRFSOC_DMA_CUR_DATA_ADDR); + } else { + dma_pos = readl_relaxed( + sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR) << 2; + } + residue = dma_request_bytes - (dma_pos - sdesc->addr); dma_set_residue(txstate, residue); @@ -647,6 +844,7 @@ static int sirfsoc_dma_probe(struct platform_device *op) struct dma_device *dma; struct sirfsoc_dma *sdma; struct sirfsoc_dma_chan *schan; + struct sirfsoc_dmadata *data; struct resource res; ulong regs_start, regs_size; u32 id; @@ -657,9 +855,11 @@ static int sirfsoc_dma_probe(struct platform_device *op) dev_err(dev, "Memory exhausted!\n"); return -ENOMEM; } - - if (of_device_is_compatible(dn, "sirf,marco-dmac")) - sdma->is_marco = true; + data = (struct sirfsoc_dmadata *) + (of_match_device(op->dev.driver->of_match_table, + &op->dev)->data); + sdma->exec_desc = data->exec; + sdma->type = data->type; if (of_property_read_u32(dn, "cell-index", &id)) { dev_err(dev, "Fail to get DMAC index\n"); @@ -816,6 +1016,8 @@ static int sirfsoc_dma_pm_suspend(struct device *dev) struct sirfsoc_dma_chan *schan; int ch; int ret; + int count; + u32 int_offset; /* * if we were runtime-suspended before, resume to enable clock @@ -827,11 +1029,19 @@ static int sirfsoc_dma_pm_suspend(struct device *dev) return ret; } + if (sdma->type == SIRFSOC_DMA_VER_A7V2) { + count = 1; + int_offset = SIRFSOC_DMA_INT_EN_ATLAS7; + } else { + count = SIRFSOC_DMA_CHANNELS; + int_offset = SIRFSOC_DMA_INT_EN; + } + /* * DMA controller will lose all registers while suspending * so we need to save registers for active channels */ - for (ch = 0; ch < SIRFSOC_DMA_CHANNELS; ch++) { + for (ch = 0; ch < count; ch++) { schan = &sdma->channels[ch]; if (list_empty(&schan->active)) continue; @@ -841,7 +1051,7 @@ static int sirfsoc_dma_pm_suspend(struct device *dev) save->ctrl[ch] = readl_relaxed(sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_CTRL); } - save->interrupt_en = readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN); + save->interrupt_en = readl_relaxed(sdma->base + int_offset); /* Disable clock */ sirfsoc_dma_runtime_suspend(dev); @@ -857,14 +1067,27 @@ static int sirfsoc_dma_pm_resume(struct device *dev) struct sirfsoc_dma_chan *schan; int ch; int ret; + int count; + u32 int_offset; + u32 width_offset; /* Enable clock before accessing register */ ret = sirfsoc_dma_runtime_resume(dev); if (ret < 0) return ret; - writel_relaxed(save->interrupt_en, sdma->base + SIRFSOC_DMA_INT_EN); - for (ch = 0; ch < SIRFSOC_DMA_CHANNELS; ch++) { + if (sdma->type == SIRFSOC_DMA_VER_A7V2) { + count = 1; + int_offset = SIRFSOC_DMA_INT_EN_ATLAS7; + width_offset = SIRFSOC_DMA_WIDTH_ATLAS7; + } else { + count = SIRFSOC_DMA_CHANNELS; + int_offset = SIRFSOC_DMA_INT_EN; + width_offset = SIRFSOC_DMA_WIDTH_0; + } + + writel_relaxed(save->interrupt_en, sdma->base + int_offset); + for (ch = 0; ch < count; ch++) { schan = &sdma->channels[ch]; if (list_empty(&schan->active)) continue; @@ -872,15 +1095,21 @@ static int sirfsoc_dma_pm_resume(struct device *dev) struct sirfsoc_dma_desc, node); writel_relaxed(sdesc->width, - sdma->base + SIRFSOC_DMA_WIDTH_0 + ch * 4); + sdma->base + width_offset + ch * 4); writel_relaxed(sdesc->xlen, sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_XLEN); writel_relaxed(sdesc->ylen, sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_YLEN); writel_relaxed(save->ctrl[ch], sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_CTRL); - writel_relaxed(sdesc->addr >> 2, - sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_ADDR); + if (sdma->type == SIRFSOC_DMA_VER_A7V2) { + writel_relaxed(sdesc->addr, + sdma->base + SIRFSOC_DMA_CH_ADDR); + } else { + writel_relaxed(sdesc->addr >> 2, + sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_ADDR); + + } } /* if we were runtime-suspended before, suspend again */ @@ -896,9 +1125,25 @@ static const struct dev_pm_ops sirfsoc_dma_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(sirfsoc_dma_pm_suspend, sirfsoc_dma_pm_resume) }; +struct sirfsoc_dmadata sirfsoc_dmadata_a6 = { + .exec = sirfsoc_dma_execute_hw_a6, + .type = SIRFSOC_DMA_VER_A6, +}; + +struct sirfsoc_dmadata sirfsoc_dmadata_a7v1 = { + .exec = sirfsoc_dma_execute_hw_a7v1, + .type = SIRFSOC_DMA_VER_A7V1, +}; + +struct sirfsoc_dmadata sirfsoc_dmadata_a7v2 = { + .exec = sirfsoc_dma_execute_hw_a7v2, + .type = SIRFSOC_DMA_VER_A7V2, +}; + static const struct of_device_id sirfsoc_dma_match[] = { - { .compatible = "sirf,prima2-dmac", }, - { .compatible = "sirf,marco-dmac", }, + { .compatible = "sirf,prima2-dmac", .data = &sirfsoc_dmadata_a6,}, + { .compatible = "sirf,atlas7-dmac", .data = &sirfsoc_dmadata_a7v1,}, + { .compatible = "sirf,atlas7-dmac-v2", .data = &sirfsoc_dmadata_a7v2,}, {}, }; @@ -925,7 +1170,7 @@ static void __exit sirfsoc_dma_exit(void) subsys_initcall(sirfsoc_dma_init); module_exit(sirfsoc_dma_exit); -MODULE_AUTHOR("Rongjun Ying <rongjun.ying@csr.com>, " - "Barry Song <baohua.song@csr.com>"); +MODULE_AUTHOR("Rongjun Ying <rongjun.ying@csr.com>"); +MODULE_AUTHOR("Barry Song <baohua.song@csr.com>"); MODULE_DESCRIPTION("SIRFSOC DMA control driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c index 11e536586812..842ff97c2cfb 100644 --- a/drivers/dma/sun6i-dma.c +++ b/drivers/dma/sun6i-dma.c @@ -891,9 +891,21 @@ static struct sun6i_dma_config sun8i_a23_dma_cfg = { .nr_max_vchans = 37, }; +/* + * The H3 has 12 physical channels, a maximum DRQ port id of 27, + * and a total of 34 usable source and destination endpoints. + */ + +static struct sun6i_dma_config sun8i_h3_dma_cfg = { + .nr_max_channels = 12, + .nr_max_requests = 27, + .nr_max_vchans = 34, +}; + static const struct of_device_id sun6i_dma_match[] = { { .compatible = "allwinner,sun6i-a31-dma", .data = &sun6i_a31_dma_cfg }, { .compatible = "allwinner,sun8i-a23-dma", .data = &sun8i_a23_dma_cfg }, + { .compatible = "allwinner,sun8i-h3-dma", .data = &sun8i_h3_dma_cfg }, { /* sentinel */ } }; diff --git a/drivers/dma/virt-dma.c b/drivers/dma/virt-dma.c index 6f80432a3f0a..7d2c17d8d30f 100644 --- a/drivers/dma/virt-dma.c +++ b/drivers/dma/virt-dma.c @@ -29,7 +29,7 @@ dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx) spin_lock_irqsave(&vc->lock, flags); cookie = dma_cookie_assign(tx); - list_add_tail(&vd->node, &vc->desc_submitted); + list_move_tail(&vd->node, &vc->desc_submitted); spin_unlock_irqrestore(&vc->lock, flags); dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n", @@ -83,8 +83,10 @@ static void vchan_complete(unsigned long arg) cb_data = vd->tx.callback_param; list_del(&vd->node); - - vc->desc_free(vd); + if (async_tx_test_ack(&vd->tx)) + list_add(&vd->node, &vc->desc_allocated); + else + vc->desc_free(vd); if (cb) cb(cb_data); @@ -96,9 +98,13 @@ void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head) while (!list_empty(head)) { struct virt_dma_desc *vd = list_first_entry(head, struct virt_dma_desc, node); - list_del(&vd->node); - dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd); - vc->desc_free(vd); + if (async_tx_test_ack(&vd->tx)) { + list_move_tail(&vd->node, &vc->desc_allocated); + } else { + dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd); + list_del(&vd->node); + vc->desc_free(vd); + } } } EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list); @@ -108,6 +114,7 @@ void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev) dma_cookie_init(&vc->chan); spin_lock_init(&vc->lock); + INIT_LIST_HEAD(&vc->desc_allocated); INIT_LIST_HEAD(&vc->desc_submitted); INIT_LIST_HEAD(&vc->desc_issued); INIT_LIST_HEAD(&vc->desc_completed); diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h index 181b95267866..189e75dbcb15 100644 --- a/drivers/dma/virt-dma.h +++ b/drivers/dma/virt-dma.h @@ -29,6 +29,7 @@ struct virt_dma_chan { spinlock_t lock; /* protected by vc.lock */ + struct list_head desc_allocated; struct list_head desc_submitted; struct list_head desc_issued; struct list_head desc_completed; @@ -55,11 +56,16 @@ static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan struct virt_dma_desc *vd, unsigned long tx_flags) { extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *); + unsigned long flags; dma_async_tx_descriptor_init(&vd->tx, &vc->chan); vd->tx.flags = tx_flags; vd->tx.tx_submit = vchan_tx_submit; + spin_lock_irqsave(&vc->lock, flags); + list_add_tail(&vd->node, &vc->desc_allocated); + spin_unlock_irqrestore(&vc->lock, flags); + return &vd->tx; } @@ -122,7 +128,8 @@ static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc) } /** - * vchan_get_all_descriptors - obtain all submitted and issued descriptors + * vchan_get_all_descriptors - obtain all allocated, submitted and issued + * descriptors * vc: virtual channel to get descriptors from * head: list of descriptors found * @@ -134,6 +141,7 @@ static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc) static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc, struct list_head *head) { + list_splice_tail_init(&vc->desc_allocated, head); list_splice_tail_init(&vc->desc_submitted, head); list_splice_tail_init(&vc->desc_issued, head); list_splice_tail_init(&vc->desc_completed, head); @@ -141,11 +149,14 @@ static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc, static inline void vchan_free_chan_resources(struct virt_dma_chan *vc) { + struct virt_dma_desc *vd; unsigned long flags; LIST_HEAD(head); spin_lock_irqsave(&vc->lock, flags); vchan_get_all_descriptors(vc, &head); + list_for_each_entry(vd, &head, node) + async_tx_clear_ack(&vd->tx); spin_unlock_irqrestore(&vc->lock, flags); vchan_dma_desc_free_list(vc, &head); diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c index f52e37502254..620fd55ec766 100755 --- a/drivers/dma/xgene-dma.c +++ b/drivers/dma/xgene-dma.c @@ -124,32 +124,8 @@ #define XGENE_DMA_DESC_ELERR_POS 46 #define XGENE_DMA_DESC_RTYPE_POS 56 #define XGENE_DMA_DESC_LERR_POS 60 -#define XGENE_DMA_DESC_FLYBY_POS 4 #define XGENE_DMA_DESC_BUFLEN_POS 48 #define XGENE_DMA_DESC_HOENQ_NUM_POS 48 - -#define XGENE_DMA_DESC_NV_SET(m) \ - (((u64 *)(m))[0] |= XGENE_DMA_DESC_NV_BIT) -#define XGENE_DMA_DESC_IN_SET(m) \ - (((u64 *)(m))[0] |= XGENE_DMA_DESC_IN_BIT) -#define XGENE_DMA_DESC_RTYPE_SET(m, v) \ - (((u64 *)(m))[0] |= ((u64)(v) << XGENE_DMA_DESC_RTYPE_POS)) -#define XGENE_DMA_DESC_BUFADDR_SET(m, v) \ - (((u64 *)(m))[0] |= (v)) -#define XGENE_DMA_DESC_BUFLEN_SET(m, v) \ - (((u64 *)(m))[0] |= ((u64)(v) << XGENE_DMA_DESC_BUFLEN_POS)) -#define XGENE_DMA_DESC_C_SET(m) \ - (((u64 *)(m))[1] |= XGENE_DMA_DESC_C_BIT) -#define XGENE_DMA_DESC_FLYBY_SET(m, v) \ - (((u64 *)(m))[2] |= ((v) << XGENE_DMA_DESC_FLYBY_POS)) -#define XGENE_DMA_DESC_MULTI_SET(m, v, i) \ - (((u64 *)(m))[2] |= ((u64)(v) << (((i) + 1) * 8))) -#define XGENE_DMA_DESC_DR_SET(m) \ - (((u64 *)(m))[2] |= XGENE_DMA_DESC_DR_BIT) -#define XGENE_DMA_DESC_DST_ADDR_SET(m, v) \ - (((u64 *)(m))[3] |= (v)) -#define XGENE_DMA_DESC_H0ENQ_NUM_SET(m, v) \ - (((u64 *)(m))[3] |= ((u64)(v) << XGENE_DMA_DESC_HOENQ_NUM_POS)) #define XGENE_DMA_DESC_ELERR_RD(m) \ (((m) >> XGENE_DMA_DESC_ELERR_POS) & 0x3) #define XGENE_DMA_DESC_LERR_RD(m) \ @@ -158,14 +134,7 @@ (((elerr) << 4) | (lerr)) /* X-Gene DMA descriptor empty s/w signature */ -#define XGENE_DMA_DESC_EMPTY_INDEX 0 #define XGENE_DMA_DESC_EMPTY_SIGNATURE ~0ULL -#define XGENE_DMA_DESC_SET_EMPTY(m) \ - (((u64 *)(m))[XGENE_DMA_DESC_EMPTY_INDEX] = \ - XGENE_DMA_DESC_EMPTY_SIGNATURE) -#define XGENE_DMA_DESC_IS_EMPTY(m) \ - (((u64 *)(m))[XGENE_DMA_DESC_EMPTY_INDEX] == \ - XGENE_DMA_DESC_EMPTY_SIGNATURE) /* X-Gene DMA configurable parameters defines */ #define XGENE_DMA_RING_NUM 512 @@ -184,7 +153,7 @@ #define XGENE_DMA_XOR_ALIGNMENT 6 /* 64 Bytes */ #define XGENE_DMA_MAX_XOR_SRC 5 #define XGENE_DMA_16K_BUFFER_LEN_CODE 0x0 -#define XGENE_DMA_INVALID_LEN_CODE 0x7800 +#define XGENE_DMA_INVALID_LEN_CODE 0x7800000000000000ULL /* X-Gene DMA descriptor error codes */ #define ERR_DESC_AXI 0x01 @@ -214,10 +183,10 @@ #define ERR_DESC_SRC_INT 0xB /* X-Gene DMA flyby operation code */ -#define FLYBY_2SRC_XOR 0x8 -#define FLYBY_3SRC_XOR 0x9 -#define FLYBY_4SRC_XOR 0xA -#define FLYBY_5SRC_XOR 0xB +#define FLYBY_2SRC_XOR 0x80 +#define FLYBY_3SRC_XOR 0x90 +#define FLYBY_4SRC_XOR 0xA0 +#define FLYBY_5SRC_XOR 0xB0 /* X-Gene DMA SW descriptor flags */ #define XGENE_DMA_FLAG_64B_DESC BIT(0) @@ -238,10 +207,10 @@ dev_err(chan->dev, "%s: " fmt, chan->name, ##arg) struct xgene_dma_desc_hw { - u64 m0; - u64 m1; - u64 m2; - u64 m3; + __le64 m0; + __le64 m1; + __le64 m2; + __le64 m3; }; enum xgene_dma_ring_cfgsize { @@ -388,18 +357,11 @@ static bool is_pq_enabled(struct xgene_dma *pdma) return !(val & XGENE_DMA_PQ_DISABLE_MASK); } -static void xgene_dma_cpu_to_le64(u64 *desc, int count) -{ - int i; - - for (i = 0; i < count; i++) - desc[i] = cpu_to_le64(desc[i]); -} - -static u16 xgene_dma_encode_len(u32 len) +static u64 xgene_dma_encode_len(size_t len) { return (len < XGENE_DMA_MAX_BYTE_CNT) ? - len : XGENE_DMA_16K_BUFFER_LEN_CODE; + ((u64)len << XGENE_DMA_DESC_BUFLEN_POS) : + XGENE_DMA_16K_BUFFER_LEN_CODE; } static u8 xgene_dma_encode_xor_flyby(u32 src_cnt) @@ -424,34 +386,50 @@ static u32 xgene_dma_ring_desc_cnt(struct xgene_dma_ring *ring) return XGENE_DMA_RING_DESC_CNT(ring_state); } -static void xgene_dma_set_src_buffer(void *ext8, size_t *len, +static void xgene_dma_set_src_buffer(__le64 *ext8, size_t *len, dma_addr_t *paddr) { size_t nbytes = (*len < XGENE_DMA_MAX_BYTE_CNT) ? *len : XGENE_DMA_MAX_BYTE_CNT; - XGENE_DMA_DESC_BUFADDR_SET(ext8, *paddr); - XGENE_DMA_DESC_BUFLEN_SET(ext8, xgene_dma_encode_len(nbytes)); + *ext8 |= cpu_to_le64(*paddr); + *ext8 |= cpu_to_le64(xgene_dma_encode_len(nbytes)); *len -= nbytes; *paddr += nbytes; } -static void xgene_dma_invalidate_buffer(void *ext8) +static void xgene_dma_invalidate_buffer(__le64 *ext8) { - XGENE_DMA_DESC_BUFLEN_SET(ext8, XGENE_DMA_INVALID_LEN_CODE); + *ext8 |= cpu_to_le64(XGENE_DMA_INVALID_LEN_CODE); } -static void *xgene_dma_lookup_ext8(u64 *desc, int idx) +static __le64 *xgene_dma_lookup_ext8(struct xgene_dma_desc_hw *desc, int idx) { - return (idx % 2) ? (desc + idx - 1) : (desc + idx + 1); + switch (idx) { + case 0: + return &desc->m1; + case 1: + return &desc->m0; + case 2: + return &desc->m3; + case 3: + return &desc->m2; + default: + pr_err("Invalid dma descriptor index\n"); + } + + return NULL; } -static void xgene_dma_init_desc(void *desc, u16 dst_ring_num) +static void xgene_dma_init_desc(struct xgene_dma_desc_hw *desc, + u16 dst_ring_num) { - XGENE_DMA_DESC_C_SET(desc); /* Coherent IO */ - XGENE_DMA_DESC_IN_SET(desc); - XGENE_DMA_DESC_H0ENQ_NUM_SET(desc, dst_ring_num); - XGENE_DMA_DESC_RTYPE_SET(desc, XGENE_DMA_RING_OWNER_DMA); + desc->m0 |= cpu_to_le64(XGENE_DMA_DESC_IN_BIT); + desc->m0 |= cpu_to_le64((u64)XGENE_DMA_RING_OWNER_DMA << + XGENE_DMA_DESC_RTYPE_POS); + desc->m1 |= cpu_to_le64(XGENE_DMA_DESC_C_BIT); + desc->m3 |= cpu_to_le64((u64)dst_ring_num << + XGENE_DMA_DESC_HOENQ_NUM_POS); } static void xgene_dma_prep_cpy_desc(struct xgene_dma_chan *chan, @@ -459,7 +437,7 @@ static void xgene_dma_prep_cpy_desc(struct xgene_dma_chan *chan, dma_addr_t dst, dma_addr_t src, size_t len) { - void *desc1, *desc2; + struct xgene_dma_desc_hw *desc1, *desc2; int i; /* Get 1st descriptor */ @@ -467,23 +445,21 @@ static void xgene_dma_prep_cpy_desc(struct xgene_dma_chan *chan, xgene_dma_init_desc(desc1, chan->tx_ring.dst_ring_num); /* Set destination address */ - XGENE_DMA_DESC_DR_SET(desc1); - XGENE_DMA_DESC_DST_ADDR_SET(desc1, dst); + desc1->m2 |= cpu_to_le64(XGENE_DMA_DESC_DR_BIT); + desc1->m3 |= cpu_to_le64(dst); /* Set 1st source address */ - xgene_dma_set_src_buffer(desc1 + 8, &len, &src); + xgene_dma_set_src_buffer(&desc1->m1, &len, &src); - if (len <= 0) { - desc2 = NULL; - goto skip_additional_src; - } + if (!len) + return; /* * We need to split this source buffer, * and need to use 2nd descriptor */ desc2 = &desc_sw->desc2; - XGENE_DMA_DESC_NV_SET(desc1); + desc1->m0 |= cpu_to_le64(XGENE_DMA_DESC_NV_BIT); /* Set 2nd to 5th source address */ for (i = 0; i < 4 && len; i++) @@ -496,12 +472,6 @@ static void xgene_dma_prep_cpy_desc(struct xgene_dma_chan *chan, /* Updated flag that we have prepared 64B descriptor */ desc_sw->flags |= XGENE_DMA_FLAG_64B_DESC; - -skip_additional_src: - /* Hardware stores descriptor in little endian format */ - xgene_dma_cpu_to_le64(desc1, 4); - if (desc2) - xgene_dma_cpu_to_le64(desc2, 4); } static void xgene_dma_prep_xor_desc(struct xgene_dma_chan *chan, @@ -510,7 +480,7 @@ static void xgene_dma_prep_xor_desc(struct xgene_dma_chan *chan, u32 src_cnt, size_t *nbytes, const u8 *scf) { - void *desc1, *desc2; + struct xgene_dma_desc_hw *desc1, *desc2; size_t len = *nbytes; int i; @@ -521,28 +491,24 @@ static void xgene_dma_prep_xor_desc(struct xgene_dma_chan *chan, xgene_dma_init_desc(desc1, chan->tx_ring.dst_ring_num); /* Set destination address */ - XGENE_DMA_DESC_DR_SET(desc1); - XGENE_DMA_DESC_DST_ADDR_SET(desc1, *dst); + desc1->m2 |= cpu_to_le64(XGENE_DMA_DESC_DR_BIT); + desc1->m3 |= cpu_to_le64(*dst); /* We have multiple source addresses, so need to set NV bit*/ - XGENE_DMA_DESC_NV_SET(desc1); + desc1->m0 |= cpu_to_le64(XGENE_DMA_DESC_NV_BIT); /* Set flyby opcode */ - XGENE_DMA_DESC_FLYBY_SET(desc1, xgene_dma_encode_xor_flyby(src_cnt)); + desc1->m2 |= cpu_to_le64(xgene_dma_encode_xor_flyby(src_cnt)); /* Set 1st to 5th source addresses */ for (i = 0; i < src_cnt; i++) { len = *nbytes; - xgene_dma_set_src_buffer((i == 0) ? (desc1 + 8) : + xgene_dma_set_src_buffer((i == 0) ? &desc1->m1 : xgene_dma_lookup_ext8(desc2, i - 1), &len, &src[i]); - XGENE_DMA_DESC_MULTI_SET(desc1, scf[i], i); + desc1->m2 |= cpu_to_le64((scf[i] << ((i + 1) * 8))); } - /* Hardware stores descriptor in little endian format */ - xgene_dma_cpu_to_le64(desc1, 4); - xgene_dma_cpu_to_le64(desc2, 4); - /* Update meta data */ *nbytes = len; *dst += XGENE_DMA_MAX_BYTE_CNT; @@ -738,7 +704,7 @@ static int xgene_chan_xfer_request(struct xgene_dma_ring *ring, * xgene_chan_xfer_ld_pending - push any pending transactions to hw * @chan : X-Gene DMA channel * - * LOCKING: must hold chan->desc_lock + * LOCKING: must hold chan->lock */ static void xgene_chan_xfer_ld_pending(struct xgene_dma_chan *chan) { @@ -808,7 +774,8 @@ static void xgene_dma_cleanup_descriptors(struct xgene_dma_chan *chan) desc_hw = &ring->desc_hw[ring->head]; /* Check if this descriptor has been completed */ - if (unlikely(XGENE_DMA_DESC_IS_EMPTY(desc_hw))) + if (unlikely(le64_to_cpu(desc_hw->m0) == + XGENE_DMA_DESC_EMPTY_SIGNATURE)) break; if (++ring->head == ring->slots) @@ -842,7 +809,7 @@ static void xgene_dma_cleanup_descriptors(struct xgene_dma_chan *chan) iowrite32(-1, ring->cmd); /* Mark this hw descriptor as processed */ - XGENE_DMA_DESC_SET_EMPTY(desc_hw); + desc_hw->m0 = cpu_to_le64(XGENE_DMA_DESC_EMPTY_SIGNATURE); xgene_dma_run_tx_complete_actions(chan, desc_sw); @@ -889,7 +856,7 @@ static int xgene_dma_alloc_chan_resources(struct dma_chan *dchan) * @chan: X-Gene DMA channel * @list: the list to free * - * LOCKING: must hold chan->desc_lock + * LOCKING: must hold chan->lock */ static void xgene_dma_free_desc_list(struct xgene_dma_chan *chan, struct list_head *list) @@ -900,15 +867,6 @@ static void xgene_dma_free_desc_list(struct xgene_dma_chan *chan, xgene_dma_clean_descriptor(chan, desc); } -static void xgene_dma_free_tx_desc_list(struct xgene_dma_chan *chan, - struct list_head *list) -{ - struct xgene_dma_desc_sw *desc, *_desc; - - list_for_each_entry_safe(desc, _desc, list, node) - xgene_dma_clean_descriptor(chan, desc); -} - static void xgene_dma_free_chan_resources(struct dma_chan *dchan) { struct xgene_dma_chan *chan = to_dma_chan(dchan); @@ -985,7 +943,7 @@ fail: if (!first) return NULL; - xgene_dma_free_tx_desc_list(chan, &first->tx_list); + xgene_dma_free_desc_list(chan, &first->tx_list); return NULL; } @@ -1093,7 +1051,7 @@ fail: if (!first) return NULL; - xgene_dma_free_tx_desc_list(chan, &first->tx_list); + xgene_dma_free_desc_list(chan, &first->tx_list); return NULL; } @@ -1141,7 +1099,7 @@ fail: if (!first) return NULL; - xgene_dma_free_tx_desc_list(chan, &first->tx_list); + xgene_dma_free_desc_list(chan, &first->tx_list); return NULL; } @@ -1218,7 +1176,7 @@ fail: if (!first) return NULL; - xgene_dma_free_tx_desc_list(chan, &first->tx_list); + xgene_dma_free_desc_list(chan, &first->tx_list); return NULL; } @@ -1316,7 +1274,6 @@ static void xgene_dma_setup_ring(struct xgene_dma_ring *ring) { void *ring_cfg = ring->state; u64 addr = ring->desc_paddr; - void *desc; u32 i, val; ring->slots = ring->size / XGENE_DMA_RING_WQ_DESC_SIZE; @@ -1358,8 +1315,10 @@ static void xgene_dma_setup_ring(struct xgene_dma_ring *ring) /* Set empty signature to DMA Rx ring descriptors */ for (i = 0; i < ring->slots; i++) { + struct xgene_dma_desc_hw *desc; + desc = &ring->desc_hw[i]; - XGENE_DMA_DESC_SET_EMPTY(desc); + desc->m0 = cpu_to_le64(XGENE_DMA_DESC_EMPTY_SIGNATURE); } /* Enable DMA Rx ring interrupt */ diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 3da1af46625c..773d1d24e604 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -6074,6 +6074,8 @@ enum skl_disp_power_wells { #define GTFIFOCTL 0x120008 #define GT_FIFO_FREE_ENTRIES_MASK 0x7f #define GT_FIFO_NUM_RESERVED_ENTRIES 20 +#define GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL (1 << 12) +#define GT_FIFO_CTL_RC6_POLICY_STALL (1 << 11) #define HSW_IDICR 0x9008 #define IDIHASHMSK(x) (((x) & 0x3f) << 16) diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index ab5cc94588e1..ff2a74651dd4 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -360,6 +360,14 @@ static void __intel_uncore_early_sanitize(struct drm_device *dev, __raw_i915_write32(dev_priv, GTFIFODBG, __raw_i915_read32(dev_priv, GTFIFODBG)); + /* WaDisableShadowRegForCpd:chv */ + if (IS_CHERRYVIEW(dev)) { + __raw_i915_write32(dev_priv, GTFIFOCTL, + __raw_i915_read32(dev_priv, GTFIFOCTL) | + GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL | + GT_FIFO_CTL_RC6_POLICY_STALL); + } + intel_uncore_forcewake_reset(dev, restore_forcewake); } diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index dac78ad24b31..42b2ea3fdcf3 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -580,6 +580,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, else radeon_crtc->pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV; + /* if there is no audio, set MINM_OVER_MAXP */ + if (!drm_detect_monitor_audio(radeon_connector_edid(connector))) + radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP; if (rdev->family < CHIP_RV770) radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP; /* use frac fb div on APUs */ diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index f57c1ab617bc..dd39f434b4a7 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c @@ -1761,17 +1761,15 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); int encoder_mode = atombios_get_encoder_mode(encoder); DRM_DEBUG_KMS("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n", radeon_encoder->encoder_id, mode, radeon_encoder->devices, radeon_encoder->active_device); - if (connector && (radeon_audio != 0) && + if ((radeon_audio != 0) && ((encoder_mode == ATOM_ENCODER_MODE_HDMI) || - (ENCODER_MODE_IS_DP(encoder_mode) && - drm_detect_monitor_audio(radeon_connector_edid(connector))))) + ENCODER_MODE_IS_DP(encoder_mode))) radeon_audio_dpms(encoder, mode); switch (radeon_encoder->encoder_id) { diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c index 3adc2afe32aa..68fd9fc677e3 100644 --- a/drivers/gpu/drm/radeon/dce6_afmt.c +++ b/drivers/gpu/drm/radeon/dce6_afmt.c @@ -295,28 +295,3 @@ void dce6_dp_audio_set_dto(struct radeon_device *rdev, WREG32(DCCG_AUDIO_DTO1_MODULE, clock); } } - -void dce6_dp_enable(struct drm_encoder *encoder, bool enable) -{ - struct drm_device *dev = encoder->dev; - struct radeon_device *rdev = dev->dev_private; - struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; - - if (!dig || !dig->afmt) - return; - - if (enable) { - WREG32(EVERGREEN_DP_SEC_TIMESTAMP + dig->afmt->offset, - EVERGREEN_DP_SEC_TIMESTAMP_MODE(1)); - WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset, - EVERGREEN_DP_SEC_ASP_ENABLE | /* Audio packet transmission */ - EVERGREEN_DP_SEC_ATP_ENABLE | /* Audio timestamp packet transmission */ - EVERGREEN_DP_SEC_AIP_ENABLE | /* Audio infoframe packet transmission */ - EVERGREEN_DP_SEC_STREAM_ENABLE); /* Master enable for secondary stream engine */ - } else { - WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset, 0); - } - - dig->afmt->enabled = enable; -} diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c index c18d4ecbd95d..0926739c9fa7 100644 --- a/drivers/gpu/drm/radeon/evergreen_hdmi.c +++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c @@ -219,13 +219,9 @@ void evergreen_set_avi_packet(struct radeon_device *rdev, u32 offset, WREG32(AFMT_AVI_INFO3 + offset, frame[0xC] | (frame[0xD] << 8) | (buffer[1] << 24)); - WREG32_OR(HDMI_INFOFRAME_CONTROL0 + offset, - HDMI_AVI_INFO_SEND | /* enable AVI info frames */ - HDMI_AVI_INFO_CONT); /* required for audio info values to be updated */ - WREG32_P(HDMI_INFOFRAME_CONTROL1 + offset, - HDMI_AVI_INFO_LINE(2), /* anything other than 0 */ - ~HDMI_AVI_INFO_LINE_MASK); + HDMI_AVI_INFO_LINE(2), /* anything other than 0 */ + ~HDMI_AVI_INFO_LINE_MASK); } void dce4_hdmi_audio_set_dto(struct radeon_device *rdev, @@ -370,9 +366,13 @@ void dce4_set_audio_packet(struct drm_encoder *encoder, u32 offset) WREG32(AFMT_AUDIO_PACKET_CONTROL2 + offset, AFMT_AUDIO_CHANNEL_ENABLE(0xff)); + WREG32(HDMI_AUDIO_PACKET_CONTROL + offset, + HDMI_AUDIO_DELAY_EN(1) | /* set the default audio delay */ + HDMI_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */ + /* allow 60958 channel status and send audio packets fields to be updated */ - WREG32(AFMT_AUDIO_PACKET_CONTROL + offset, - AFMT_AUDIO_SAMPLE_SEND | AFMT_RESET_FIFO_WHEN_AUDIO_DIS | AFMT_60958_CS_UPDATE); + WREG32_OR(AFMT_AUDIO_PACKET_CONTROL + offset, + AFMT_RESET_FIFO_WHEN_AUDIO_DIS | AFMT_60958_CS_UPDATE); } @@ -398,17 +398,26 @@ void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable) return; if (enable) { - WREG32(HDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, - HDMI_AUDIO_INFO_LINE(2)); /* anything other than 0 */ - - WREG32(HDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset, - HDMI_AUDIO_DELAY_EN(1) | /* set the default audio delay */ - HDMI_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */ + struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); - WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, - HDMI_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */ - HDMI_AUDIO_INFO_CONT); /* required for audio info values to be updated */ + if (drm_detect_monitor_audio(radeon_connector_edid(connector))) { + WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, + HDMI_AVI_INFO_SEND | /* enable AVI info frames */ + HDMI_AVI_INFO_CONT | /* required for audio info values to be updated */ + HDMI_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */ + HDMI_AUDIO_INFO_CONT); /* required for audio info values to be updated */ + WREG32_OR(AFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, + AFMT_AUDIO_SAMPLE_SEND); + } else { + WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, + HDMI_AVI_INFO_SEND | /* enable AVI info frames */ + HDMI_AVI_INFO_CONT); /* required for audio info values to be updated */ + WREG32_AND(AFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, + ~AFMT_AUDIO_SAMPLE_SEND); + } } else { + WREG32_AND(AFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, + ~AFMT_AUDIO_SAMPLE_SEND); WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, 0); } @@ -424,20 +433,24 @@ void evergreen_dp_enable(struct drm_encoder *encoder, bool enable) struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; + struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); if (!dig || !dig->afmt) return; - if (enable) { + if (enable && drm_detect_monitor_audio(radeon_connector_edid(connector))) { struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct radeon_connector_atom_dig *dig_connector; uint32_t val; + WREG32_OR(AFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, + AFMT_AUDIO_SAMPLE_SEND); + WREG32(EVERGREEN_DP_SEC_TIMESTAMP + dig->afmt->offset, EVERGREEN_DP_SEC_TIMESTAMP_MODE(1)); - if (radeon_connector->con_priv) { + if (!ASIC_IS_DCE6(rdev) && radeon_connector->con_priv) { dig_connector = radeon_connector->con_priv; val = RREG32(EVERGREEN_DP_SEC_AUD_N + dig->afmt->offset); val &= ~EVERGREEN_DP_SEC_N_BASE_MULTIPLE(0xf); @@ -457,6 +470,8 @@ void evergreen_dp_enable(struct drm_encoder *encoder, bool enable) EVERGREEN_DP_SEC_STREAM_ENABLE); /* Master enable for secondary stream engine */ } else { WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset, 0); + WREG32_AND(AFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, + ~AFMT_AUDIO_SAMPLE_SEND); } dig->afmt->enabled = enable; diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c index dd6606b8e23c..e85894ade95c 100644 --- a/drivers/gpu/drm/radeon/r600_hdmi.c +++ b/drivers/gpu/drm/radeon/r600_hdmi.c @@ -228,12 +228,13 @@ void r600_set_avi_packet(struct radeon_device *rdev, u32 offset, WREG32(HDMI0_AVI_INFO3 + offset, frame[0xC] | (frame[0xD] << 8) | (buffer[1] << 24)); + WREG32_OR(HDMI0_INFOFRAME_CONTROL1 + offset, + HDMI0_AVI_INFO_LINE(2)); /* anything other than 0 */ + WREG32_OR(HDMI0_INFOFRAME_CONTROL0 + offset, - HDMI0_AVI_INFO_SEND | /* enable AVI info frames */ - HDMI0_AVI_INFO_CONT); /* send AVI info frames every frame/field */ + HDMI0_AVI_INFO_SEND | /* enable AVI info frames */ + HDMI0_AVI_INFO_CONT); /* send AVI info frames every frame/field */ - WREG32_OR(HDMI0_INFOFRAME_CONTROL1 + offset, - HDMI0_AVI_INFO_LINE(2)); /* anything other than 0 */ } /* diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c index 48d49e651a30..8b82abb78df1 100644 --- a/drivers/gpu/drm/radeon/radeon_audio.c +++ b/drivers/gpu/drm/radeon/radeon_audio.c @@ -102,7 +102,6 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder, void r600_hdmi_enable(struct drm_encoder *encoder, bool enable); void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable); void evergreen_dp_enable(struct drm_encoder *encoder, bool enable); -void dce6_dp_enable(struct drm_encoder *encoder, bool enable); static const u32 pin_offsets[7] = { @@ -240,7 +239,7 @@ static struct radeon_audio_funcs dce6_dp_funcs = { .set_avi_packet = evergreen_set_avi_packet, .set_audio_packet = dce4_set_audio_packet, .mode_set = radeon_audio_dp_mode_set, - .dpms = dce6_dp_enable, + .dpms = evergreen_dp_enable, }; static void radeon_audio_interface_init(struct radeon_device *rdev) @@ -461,30 +460,33 @@ void radeon_audio_detect(struct drm_connector *connector, if (!connector || !connector->encoder) return; + if (!radeon_encoder_is_digital(connector->encoder)) + return; + rdev = connector->encoder->dev->dev_private; radeon_encoder = to_radeon_encoder(connector->encoder); dig = radeon_encoder->enc_priv; - if (status == connector_status_connected) { - struct radeon_connector *radeon_connector; - int sink_type; - - if (!drm_detect_monitor_audio(radeon_connector_edid(connector))) { - radeon_encoder->audio = NULL; - return; - } + if (!dig->afmt) + return; - radeon_connector = to_radeon_connector(connector); - sink_type = radeon_dp_getsinktype(radeon_connector); + if (status == connector_status_connected) { + struct radeon_connector *radeon_connector = to_radeon_connector(connector); if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort && - sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) + radeon_dp_getsinktype(radeon_connector) == + CONNECTOR_OBJECT_ID_DISPLAYPORT) radeon_encoder->audio = rdev->audio.dp_funcs; else radeon_encoder->audio = rdev->audio.hdmi_funcs; dig->afmt->pin = radeon_audio_get_pin(connector->encoder); - radeon_audio_enable(rdev, dig->afmt->pin, 0xf); + if (drm_detect_monitor_audio(radeon_connector_edid(connector))) { + radeon_audio_enable(rdev, dig->afmt->pin, 0xf); + } else { + radeon_audio_enable(rdev, dig->afmt->pin, 0); + dig->afmt->pin = NULL; + } } else { radeon_audio_enable(rdev, dig->afmt->pin, 0); dig->afmt->pin = NULL; diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index cebb65e07e1d..d17d251dbd4f 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -1379,8 +1379,10 @@ out: /* updated in get modes as well since we need to know if it's analog or digital */ radeon_connector_update_scratch_regs(connector, ret); - if (radeon_audio != 0) + if (radeon_audio != 0) { + radeon_connector_get_edid(connector); radeon_audio_detect(connector, ret); + } exit: pm_runtime_mark_last_busy(connector->dev->dev); @@ -1717,8 +1719,10 @@ radeon_dp_detect(struct drm_connector *connector, bool force) radeon_connector_update_scratch_regs(connector, ret); - if (radeon_audio != 0) + if (radeon_audio != 0) { + radeon_connector_get_edid(connector); radeon_audio_detect(connector, ret); + } out: pm_runtime_mark_last_busy(connector->dev->dev); diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index 4d0f96cc3da4..ab39b85e0f76 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c @@ -88,7 +88,7 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) p->dma_reloc_idx = 0; /* FIXME: we assume that each relocs use 4 dwords */ p->nrelocs = chunk->length_dw / 4; - p->relocs = kcalloc(p->nrelocs, sizeof(struct radeon_bo_list), GFP_KERNEL); + p->relocs = drm_calloc_large(p->nrelocs, sizeof(struct radeon_bo_list)); if (p->relocs == NULL) { return -ENOMEM; } @@ -428,7 +428,7 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo } } kfree(parser->track); - kfree(parser->relocs); + drm_free_large(parser->relocs); drm_free_large(parser->vm_bos); for (i = 0; i < parser->nchunks; i++) drm_free_large(parser->chunks[i].kdata); diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c index 01701376b239..535bf404b725 100644 --- a/drivers/gpu/drm/radeon/radeon_mn.c +++ b/drivers/gpu/drm/radeon/radeon_mn.c @@ -135,7 +135,7 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn, while (it) { struct radeon_mn_node *node; struct radeon_bo *bo; - int r; + long r; node = container_of(it, struct radeon_mn_node, it); it = interval_tree_iter_next(it, start, end); @@ -144,19 +144,19 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn, r = radeon_bo_reserve(bo, true); if (r) { - DRM_ERROR("(%d) failed to reserve user bo\n", r); + DRM_ERROR("(%ld) failed to reserve user bo\n", r); continue; } r = reservation_object_wait_timeout_rcu(bo->tbo.resv, true, false, MAX_SCHEDULE_TIMEOUT); - if (r) - DRM_ERROR("(%d) failed to wait for user bo\n", r); + if (r <= 0) + DRM_ERROR("(%ld) failed to wait for user bo\n", r); radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU); r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); if (r) - DRM_ERROR("(%d) failed to validate user bo\n", r); + DRM_ERROR("(%ld) failed to validate user bo\n", r); radeon_bo_unreserve(bo); } diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c index 2a5a4a9e772d..de42fc4a22b8 100644 --- a/drivers/gpu/drm/radeon/radeon_vm.c +++ b/drivers/gpu/drm/radeon/radeon_vm.c @@ -473,6 +473,23 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev, } mutex_lock(&vm->mutex); + soffset /= RADEON_GPU_PAGE_SIZE; + eoffset /= RADEON_GPU_PAGE_SIZE; + if (soffset || eoffset) { + struct interval_tree_node *it; + it = interval_tree_iter_first(&vm->va, soffset, eoffset - 1); + if (it && it != &bo_va->it) { + struct radeon_bo_va *tmp; + tmp = container_of(it, struct radeon_bo_va, it); + /* bo and tmp overlap, invalid offset */ + dev_err(rdev->dev, "bo %p va 0x%010Lx conflict with " + "(bo %p 0x%010lx 0x%010lx)\n", bo_va->bo, + soffset, tmp->bo, tmp->it.start, tmp->it.last); + mutex_unlock(&vm->mutex); + return -EINVAL; + } + } + if (bo_va->it.start || bo_va->it.last) { if (bo_va->addr) { /* add a clone of the bo_va to clear the old address */ @@ -490,6 +507,8 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev, spin_lock(&vm->status_lock); list_add(&tmp->vm_status, &vm->freed); spin_unlock(&vm->status_lock); + + bo_va->addr = 0; } interval_tree_remove(&bo_va->it, &vm->va); @@ -497,21 +516,7 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev, bo_va->it.last = 0; } - soffset /= RADEON_GPU_PAGE_SIZE; - eoffset /= RADEON_GPU_PAGE_SIZE; if (soffset || eoffset) { - struct interval_tree_node *it; - it = interval_tree_iter_first(&vm->va, soffset, eoffset - 1); - if (it) { - struct radeon_bo_va *tmp; - tmp = container_of(it, struct radeon_bo_va, it); - /* bo and tmp overlap, invalid offset */ - dev_err(rdev->dev, "bo %p va 0x%010Lx conflict with " - "(bo %p 0x%010lx 0x%010lx)\n", bo_va->bo, - soffset, tmp->bo, tmp->it.start, tmp->it.last); - mutex_unlock(&vm->mutex); - return -EINVAL; - } bo_va->it.start = soffset; bo_va->it.last = eoffset - 1; interval_tree_insert(&bo_va->it, &vm->va); @@ -1107,7 +1112,8 @@ void radeon_vm_bo_rmv(struct radeon_device *rdev, list_del(&bo_va->bo_list); mutex_lock(&vm->mutex); - interval_tree_remove(&bo_va->it, &vm->va); + if (bo_va->it.start || bo_va->it.last) + interval_tree_remove(&bo_va->it, &vm->va); spin_lock(&vm->status_lock); list_del(&bo_va->vm_status); diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index b35bccfeef79..ff8b83f5e929 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c @@ -2924,6 +2924,7 @@ struct si_dpm_quirk { static struct si_dpm_quirk si_dpm_quirk_list[] = { /* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */ { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 }, + { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 }, { 0, 0, 0, 0 }, }; diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c index ccb0ce073ef2..4557f335a8a5 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c @@ -1409,7 +1409,7 @@ static int vop_bind(struct device *dev, struct device *master, void *data) struct vop *vop; struct resource *res; size_t alloc_size; - int ret; + int ret, irq; of_id = of_match_device(vop_driver_dt_match, dev); vop_data = of_id->data; @@ -1445,11 +1445,12 @@ static int vop_bind(struct device *dev, struct device *master, void *data) return ret; } - vop->irq = platform_get_irq(pdev, 0); - if (vop->irq < 0) { + irq = platform_get_irq(pdev, 0); + if (irq < 0) { dev_err(dev, "cannot find irq for vop\n"); - return vop->irq; + return irq; } + vop->irq = (unsigned int)irq; spin_lock_init(&vop->reg_lock); spin_lock_init(&vop->irq_lock); diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index c8a18e4ee9dc..720ceeb7fa9b 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -1298,21 +1298,22 @@ static int table_load(struct dm_ioctl *param, size_t param_size) goto err_unlock_md_type; } - if (dm_get_md_type(md) == DM_TYPE_NONE) + if (dm_get_md_type(md) == DM_TYPE_NONE) { /* Initial table load: acquire type of table. */ dm_set_md_type(md, dm_table_get_type(t)); - else if (dm_get_md_type(md) != dm_table_get_type(t)) { + + /* setup md->queue to reflect md's type (may block) */ + r = dm_setup_md_queue(md); + if (r) { + DMWARN("unable to set up device queue for new table."); + goto err_unlock_md_type; + } + } else if (dm_get_md_type(md) != dm_table_get_type(t)) { DMWARN("can't change device type after initial table load."); r = -EINVAL; goto err_unlock_md_type; } - /* setup md->queue to reflect md's type (may block) */ - r = dm_setup_md_queue(md); - if (r) { - DMWARN("unable to set up device queue for new table."); - goto err_unlock_md_type; - } dm_unlock_md_type(md); /* stage inactive table */ diff --git a/drivers/md/dm.c b/drivers/md/dm.c index f8c7ca3e8947..a930b72314ac 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1082,18 +1082,26 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue) dm_put(md); } -static void free_rq_clone(struct request *clone) +static void free_rq_clone(struct request *clone, bool must_be_mapped) { struct dm_rq_target_io *tio = clone->end_io_data; struct mapped_device *md = tio->md; + WARN_ON_ONCE(must_be_mapped && !clone->q); + blk_rq_unprep_clone(clone); - if (clone->q->mq_ops) + if (md->type == DM_TYPE_MQ_REQUEST_BASED) + /* stacked on blk-mq queue(s) */ tio->ti->type->release_clone_rq(clone); else if (!md->queue->mq_ops) /* request_fn queue stacked on request_fn queue(s) */ free_clone_request(md, clone); + /* + * NOTE: for the blk-mq queue stacked on request_fn queue(s) case: + * no need to call free_clone_request() because we leverage blk-mq by + * allocating the clone at the end of the blk-mq pdu (see: clone_rq) + */ if (!md->queue->mq_ops) free_rq_tio(tio); @@ -1124,7 +1132,7 @@ static void dm_end_request(struct request *clone, int error) rq->sense_len = clone->sense_len; } - free_rq_clone(clone); + free_rq_clone(clone, true); if (!rq->q->mq_ops) blk_end_request_all(rq, error); else @@ -1143,7 +1151,7 @@ static void dm_unprep_request(struct request *rq) } if (clone) - free_rq_clone(clone); + free_rq_clone(clone, false); } /* @@ -2662,9 +2670,6 @@ static int dm_init_request_based_queue(struct mapped_device *md) { struct request_queue *q = NULL; - if (md->queue->elevator) - return 0; - /* Fully initialize the queue */ q = blk_init_allocated_queue(md->queue, dm_request_fn, NULL); if (!q) diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 78dde56ae6e6..d5fe5d5f490f 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -82,6 +82,8 @@ #include <net/bond_3ad.h> #include <net/bond_alb.h> +#include "bonding_priv.h" + /*---------------------------- Module parameters ----------------------------*/ /* monitor all links that often (in milliseconds). <=0 disables monitoring */ @@ -4542,6 +4544,8 @@ unsigned int bond_get_num_tx_queues(void) int bond_create(struct net *net, const char *name) { struct net_device *bond_dev; + struct bonding *bond; + struct alb_bond_info *bond_info; int res; rtnl_lock(); @@ -4555,6 +4559,14 @@ int bond_create(struct net *net, const char *name) return -ENOMEM; } + /* + * Initialize rx_hashtbl_used_head to RLB_NULL_INDEX. + * It is set to 0 by default which is wrong. + */ + bond = netdev_priv(bond_dev); + bond_info = &(BOND_ALB_INFO(bond)); + bond_info->rx_hashtbl_used_head = RLB_NULL_INDEX; + dev_net_set(bond_dev, net); bond_dev->rtnl_link_ops = &bond_link_ops; diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c index 62694cfc05b6..b20b35acb47d 100644 --- a/drivers/net/bonding/bond_procfs.c +++ b/drivers/net/bonding/bond_procfs.c @@ -4,6 +4,7 @@ #include <net/netns/generic.h> #include <net/bonding.h> +#include "bonding_priv.h" static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos) __acquires(RCU) diff --git a/drivers/net/bonding/bonding_priv.h b/drivers/net/bonding/bonding_priv.h new file mode 100644 index 000000000000..5a4d81a9437c --- /dev/null +++ b/drivers/net/bonding/bonding_priv.h @@ -0,0 +1,25 @@ +/* + * Bond several ethernet interfaces into a Cisco, running 'Etherchannel'. + * + * Portions are (c) Copyright 1995 Simon "Guru Aleph-Null" Janes + * NCM: Network and Communications Management, Inc. + * + * BUT, I'm the one who modified it for ethernet, so: + * (c) Copyright 1999, Thomas Davis, tadavis@lbl.gov + * + * This software may be used and distributed according to the terms + * of the GNU Public License, incorporated herein by reference. + * + */ + +#ifndef _BONDING_PRIV_H +#define _BONDING_PRIV_H + +#define DRV_VERSION "3.7.1" +#define DRV_RELDATE "April 27, 2011" +#define DRV_NAME "bonding" +#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver" + +#define bond_version DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n" + +#endif diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig index 58808f651452..e8c96b8e86f4 100644 --- a/drivers/net/can/Kconfig +++ b/drivers/net/can/Kconfig @@ -112,7 +112,7 @@ config PCH_CAN config CAN_GRCAN tristate "Aeroflex Gaisler GRCAN and GRHCAN CAN devices" - depends on OF + depends on OF && HAS_DMA ---help--- Say Y here if you want to use Aeroflex Gaisler GRCAN or GRHCAN. Note that the driver supports little endian, even though little diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c index 4643914859b2..8b17a9065b0b 100644 --- a/drivers/net/can/usb/kvaser_usb.c +++ b/drivers/net/can/usb/kvaser_usb.c @@ -1102,7 +1102,7 @@ static void kvaser_usb_rx_can_err(const struct kvaser_usb_net_priv *priv, if (msg->u.rx_can_header.flag & (MSG_FLAG_ERROR_FRAME | MSG_FLAG_NERR)) { - netdev_err(priv->netdev, "Unknow error (flags: 0x%02x)\n", + netdev_err(priv->netdev, "Unknown error (flags: 0x%02x)\n", msg->u.rx_can_header.flag); stats->rx_errors++; diff --git a/drivers/net/ethernet/8390/etherh.c b/drivers/net/ethernet/8390/etherh.c index b36ee9e0d220..d686b9cac29f 100644 --- a/drivers/net/ethernet/8390/etherh.c +++ b/drivers/net/ethernet/8390/etherh.c @@ -523,7 +523,7 @@ static int etherh_addr(char *addr, struct expansion_card *ec) char *s; if (!ecard_readchunk(&cd, ec, 0xf5, 0)) { - printk(KERN_ERR "%s: unable to read podule description string\n", + printk(KERN_ERR "%s: unable to read module description string\n", dev_name(&ec->dev)); goto no_addr; } diff --git a/drivers/net/ethernet/altera/altera_msgdmahw.h b/drivers/net/ethernet/altera/altera_msgdmahw.h index eba070f16782..89cd11d86642 100644 --- a/drivers/net/ethernet/altera/altera_msgdmahw.h +++ b/drivers/net/ethernet/altera/altera_msgdmahw.h @@ -58,15 +58,12 @@ struct msgdma_extended_desc { /* Tx buffer control flags */ #define MSGDMA_DESC_CTL_TX_FIRST (MSGDMA_DESC_CTL_GEN_SOP | \ - MSGDMA_DESC_CTL_TR_ERR_IRQ | \ MSGDMA_DESC_CTL_GO) -#define MSGDMA_DESC_CTL_TX_MIDDLE (MSGDMA_DESC_CTL_TR_ERR_IRQ | \ - MSGDMA_DESC_CTL_GO) +#define MSGDMA_DESC_CTL_TX_MIDDLE (MSGDMA_DESC_CTL_GO) #define MSGDMA_DESC_CTL_TX_LAST (MSGDMA_DESC_CTL_GEN_EOP | \ MSGDMA_DESC_CTL_TR_COMP_IRQ | \ - MSGDMA_DESC_CTL_TR_ERR_IRQ | \ MSGDMA_DESC_CTL_GO) #define MSGDMA_DESC_CTL_TX_SINGLE (MSGDMA_DESC_CTL_GEN_SOP | \ diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c index 90a76306ad0f..da48e66377b5 100644 --- a/drivers/net/ethernet/altera/altera_tse_main.c +++ b/drivers/net/ethernet/altera/altera_tse_main.c @@ -391,6 +391,12 @@ static int tse_rx(struct altera_tse_private *priv, int limit) "RCV pktstatus %08X pktlength %08X\n", pktstatus, pktlength); + /* DMA trasfer from TSE starts with 2 aditional bytes for + * IP payload alignment. Status returned by get_rx_status() + * contains DMA transfer length. Packet is 2 bytes shorter. + */ + pktlength -= 2; + count++; next_entry = (++priv->rx_cons) % priv->rx_ring_size; @@ -777,6 +783,8 @@ static int init_phy(struct net_device *dev) struct altera_tse_private *priv = netdev_priv(dev); struct phy_device *phydev; struct device_node *phynode; + bool fixed_link = false; + int rc = 0; /* Avoid init phy in case of no phy present */ if (!priv->phy_iface) @@ -789,13 +797,32 @@ static int init_phy(struct net_device *dev) phynode = of_parse_phandle(priv->device->of_node, "phy-handle", 0); if (!phynode) { - netdev_dbg(dev, "no phy-handle found\n"); - if (!priv->mdio) { - netdev_err(dev, - "No phy-handle nor local mdio specified\n"); - return -ENODEV; + /* check if a fixed-link is defined in device-tree */ + if (of_phy_is_fixed_link(priv->device->of_node)) { + rc = of_phy_register_fixed_link(priv->device->of_node); + if (rc < 0) { + netdev_err(dev, "cannot register fixed PHY\n"); + return rc; + } + + /* In the case of a fixed PHY, the DT node associated + * to the PHY is the Ethernet MAC DT node. + */ + phynode = of_node_get(priv->device->of_node); + fixed_link = true; + + netdev_dbg(dev, "fixed-link detected\n"); + phydev = of_phy_connect(dev, phynode, + &altera_tse_adjust_link, + 0, priv->phy_iface); + } else { + netdev_dbg(dev, "no phy-handle found\n"); + if (!priv->mdio) { + netdev_err(dev, "No phy-handle nor local mdio specified\n"); + return -ENODEV; + } + phydev = connect_local_phy(dev); } - phydev = connect_local_phy(dev); } else { netdev_dbg(dev, "phy-handle found\n"); phydev = of_phy_connect(dev, phynode, @@ -819,10 +846,10 @@ static int init_phy(struct net_device *dev) /* Broken HW is sometimes missing the pull-up resistor on the * MDIO line, which results in reads to non-existent devices returning * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent - * device as well. + * device as well. If a fixed-link is used the phy_id is always 0. * Note: phydev->phy_id is the result of reading the UID PHY registers. */ - if (phydev->phy_id == 0) { + if ((phydev->phy_id == 0) && !fixed_link) { netdev_err(dev, "Bad PHY UID 0x%08x\n", phydev->phy_id); phy_disconnect(phydev); return -ENODEV; diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig index c638c85f3954..089c269637b7 100644 --- a/drivers/net/ethernet/amd/Kconfig +++ b/drivers/net/ethernet/amd/Kconfig @@ -179,7 +179,7 @@ config SUNLANCE config AMD_XGBE tristate "AMD 10GbE Ethernet driver" - depends on (OF_NET || ACPI) && HAS_IOMEM + depends on (OF_NET || ACPI) && HAS_IOMEM && HAS_DMA select PHYLIB select AMD_XGBE_PHY select BITREVERSE diff --git a/drivers/net/ethernet/arc/Kconfig b/drivers/net/ethernet/arc/Kconfig index 8e262e2b39b6..dea29ee24da4 100644 --- a/drivers/net/ethernet/arc/Kconfig +++ b/drivers/net/ethernet/arc/Kconfig @@ -25,8 +25,7 @@ config ARC_EMAC_CORE config ARC_EMAC tristate "ARC EMAC support" select ARC_EMAC_CORE - depends on OF_IRQ - depends on OF_NET + depends on OF_IRQ && OF_NET && HAS_DMA ---help--- On some legacy ARC (Synopsys) FPGA boards such as ARCAngel4/ML50x non-standard on-chip ethernet device ARC EMAC 10/100 is used. @@ -35,7 +34,7 @@ config ARC_EMAC config EMAC_ROCKCHIP tristate "Rockchip EMAC support" select ARC_EMAC_CORE - depends on OF_IRQ && OF_NET && REGULATOR + depends on OF_IRQ && OF_NET && REGULATOR && HAS_DMA ---help--- Support for Rockchip RK3066/RK3188 EMAC ethernet controllers. This selects Rockchip SoC glue layer support for the diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_hw.h b/drivers/net/ethernet/atheros/atl1e/atl1e_hw.h index 74df16aef793..88a6271de5bc 100644 --- a/drivers/net/ethernet/atheros/atl1e/atl1e_hw.h +++ b/drivers/net/ethernet/atheros/atl1e/atl1e_hw.h @@ -129,7 +129,7 @@ s32 atl1e_restart_autoneg(struct atl1e_hw *hw); #define TWSI_CTRL_LD_SLV_ADDR_SHIFT 8 #define TWSI_CTRL_SW_LDSTART 0x800 #define TWSI_CTRL_HW_LDSTART 0x1000 -#define TWSI_CTRL_SMB_SLV_ADDR_MASK 0x0x7F +#define TWSI_CTRL_SMB_SLV_ADDR_MASK 0x7F #define TWSI_CTRL_SMB_SLV_ADDR_SHIFT 15 #define TWSI_CTRL_LD_EXIST 0x400000 #define TWSI_CTRL_READ_FREQ_SEL_MASK 0x3 diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h index 7e3d87a88c76..e2c043eabbf3 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.h +++ b/drivers/net/ethernet/broadcom/bcmsysport.h @@ -543,7 +543,7 @@ struct bcm_sysport_tx_counters { u32 jbr; /* RO # of xmited jabber count*/ u32 bytes; /* RO # of xmited byte count */ u32 pok; /* RO # of xmited good pkt */ - u32 uc; /* RO (0x0x4f0)# of xmited unitcast pkt */ + u32 uc; /* RO (0x4f0) # of xmited unicast pkt */ }; struct bcm_sysport_mib { diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index de77d3a74abc..21e3c38c7c75 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -1260,7 +1260,7 @@ static int bgmac_poll(struct napi_struct *napi, int weight) /* Poll again if more events arrived in the meantime */ if (bgmac_read(bgmac, BGMAC_INT_STATUS) & (BGMAC_IS_TX0 | BGMAC_IS_RX)) - return handled; + return weight; if (handled < weight) { napi_complete(napi); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index 355d5fea5be9..a3b0f7a0c61e 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -521,6 +521,7 @@ struct bnx2x_fp_txdata { }; enum bnx2x_tpa_mode_t { + TPA_MODE_DISABLED, TPA_MODE_LRO, TPA_MODE_GRO }; @@ -589,7 +590,6 @@ struct bnx2x_fastpath { /* TPA related */ struct bnx2x_agg_info *tpa_info; - u8 disable_tpa; #ifdef BNX2X_STOP_ON_ERROR u64 tpa_queue_used; #endif @@ -1545,9 +1545,7 @@ struct bnx2x { #define USING_MSIX_FLAG (1 << 5) #define USING_MSI_FLAG (1 << 6) #define DISABLE_MSI_FLAG (1 << 7) -#define TPA_ENABLE_FLAG (1 << 8) #define NO_MCP_FLAG (1 << 9) -#define GRO_ENABLE_FLAG (1 << 10) #define MF_FUNC_DIS (1 << 11) #define OWN_CNIC_IRQ (1 << 12) #define NO_ISCSI_OOO_FLAG (1 << 13) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 2f63467bce46..a8bb8f664d3d 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -947,10 +947,10 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) u16 frag_size, pages; #ifdef BNX2X_STOP_ON_ERROR /* sanity check */ - if (fp->disable_tpa && + if (fp->mode == TPA_MODE_DISABLED && (CQE_TYPE_START(cqe_fp_type) || CQE_TYPE_STOP(cqe_fp_type))) - BNX2X_ERR("START/STOP packet while disable_tpa type %x\n", + BNX2X_ERR("START/STOP packet while TPA disabled, type %x\n", CQE_TYPE(cqe_fp_type)); #endif @@ -1396,7 +1396,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp) DP(NETIF_MSG_IFUP, "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size); - if (!fp->disable_tpa) { + if (fp->mode != TPA_MODE_DISABLED) { /* Fill the per-aggregation pool */ for (i = 0; i < MAX_AGG_QS(bp); i++) { struct bnx2x_agg_info *tpa_info = @@ -1410,7 +1410,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp) BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n", j); bnx2x_free_tpa_pool(bp, fp, i); - fp->disable_tpa = 1; + fp->mode = TPA_MODE_DISABLED; break; } dma_unmap_addr_set(first_buf, mapping, 0); @@ -1438,7 +1438,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp) ring_prod); bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp)); - fp->disable_tpa = 1; + fp->mode = TPA_MODE_DISABLED; ring_prod = 0; break; } @@ -1560,7 +1560,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp) bnx2x_free_rx_bds(fp); - if (!fp->disable_tpa) + if (fp->mode != TPA_MODE_DISABLED) bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp)); } } @@ -2477,17 +2477,19 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index) /* set the tpa flag for each queue. The tpa flag determines the queue * minimal size so it must be set prior to queue memory allocation */ - fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG || - (bp->flags & GRO_ENABLE_FLAG && - bnx2x_mtu_allows_gro(bp->dev->mtu))); - if (bp->flags & TPA_ENABLE_FLAG) + if (bp->dev->features & NETIF_F_LRO) fp->mode = TPA_MODE_LRO; - else if (bp->flags & GRO_ENABLE_FLAG) + else if (bp->dev->features & NETIF_F_GRO && + bnx2x_mtu_allows_gro(bp->dev->mtu)) fp->mode = TPA_MODE_GRO; + else + fp->mode = TPA_MODE_DISABLED; - /* We don't want TPA on an FCoE L2 ring */ - if (IS_FCOE_FP(fp)) - fp->disable_tpa = 1; + /* We don't want TPA if it's disabled in bp + * or if this is an FCoE L2 ring. + */ + if (bp->disable_tpa || IS_FCOE_FP(fp)) + fp->mode = TPA_MODE_DISABLED; } int bnx2x_load_cnic(struct bnx2x *bp) @@ -2608,7 +2610,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) /* * Zero fastpath structures preserving invariants like napi, which are * allocated only once, fp index, max_cos, bp pointer. - * Also set fp->disable_tpa and txdata_ptr. + * Also set fp->mode and txdata_ptr. */ DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues); for_each_queue(bp, i) @@ -3247,7 +3249,7 @@ int bnx2x_low_latency_recv(struct napi_struct *napi) if ((bp->state == BNX2X_STATE_CLOSED) || (bp->state == BNX2X_STATE_ERROR) || - (bp->flags & (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG))) + (bp->dev->features & (NETIF_F_LRO | NETIF_F_GRO))) return LL_FLUSH_FAILED; if (!bnx2x_fp_lock_poll(fp)) @@ -4543,7 +4545,7 @@ alloc_mem_err: * In these cases we disable the queue * Min size is different for OOO, TPA and non-TPA queues */ - if (ring_size < (fp->disable_tpa ? + if (ring_size < (fp->mode == TPA_MODE_DISABLED ? MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) { /* release memory allocated for this queue */ bnx2x_free_fp_mem_at(bp, index); @@ -4809,66 +4811,71 @@ netdev_features_t bnx2x_fix_features(struct net_device *dev, { struct bnx2x *bp = netdev_priv(dev); + if (pci_num_vf(bp->pdev)) { + netdev_features_t changed = dev->features ^ features; + + /* Revert the requested changes in features if they + * would require internal reload of PF in bnx2x_set_features(). + */ + if (!(features & NETIF_F_RXCSUM) && !bp->disable_tpa) { + features &= ~NETIF_F_RXCSUM; + features |= dev->features & NETIF_F_RXCSUM; + } + + if (changed & NETIF_F_LOOPBACK) { + features &= ~NETIF_F_LOOPBACK; + features |= dev->features & NETIF_F_LOOPBACK; + } + } + /* TPA requires Rx CSUM offloading */ if (!(features & NETIF_F_RXCSUM)) { features &= ~NETIF_F_LRO; features &= ~NETIF_F_GRO; } - /* Note: do not disable SW GRO in kernel when HW GRO is off */ - if (bp->disable_tpa) - features &= ~NETIF_F_LRO; - return features; } int bnx2x_set_features(struct net_device *dev, netdev_features_t features) { struct bnx2x *bp = netdev_priv(dev); - u32 flags = bp->flags; - u32 changes; + netdev_features_t changes = features ^ dev->features; bool bnx2x_reload = false; + int rc; - if (features & NETIF_F_LRO) - flags |= TPA_ENABLE_FLAG; - else - flags &= ~TPA_ENABLE_FLAG; - - if (features & NETIF_F_GRO) - flags |= GRO_ENABLE_FLAG; - else - flags &= ~GRO_ENABLE_FLAG; - - if (features & NETIF_F_LOOPBACK) { - if (bp->link_params.loopback_mode != LOOPBACK_BMAC) { - bp->link_params.loopback_mode = LOOPBACK_BMAC; - bnx2x_reload = true; - } - } else { - if (bp->link_params.loopback_mode != LOOPBACK_NONE) { - bp->link_params.loopback_mode = LOOPBACK_NONE; - bnx2x_reload = true; + /* VFs or non SRIOV PFs should be able to change loopback feature */ + if (!pci_num_vf(bp->pdev)) { + if (features & NETIF_F_LOOPBACK) { + if (bp->link_params.loopback_mode != LOOPBACK_BMAC) { + bp->link_params.loopback_mode = LOOPBACK_BMAC; + bnx2x_reload = true; + } + } else { + if (bp->link_params.loopback_mode != LOOPBACK_NONE) { + bp->link_params.loopback_mode = LOOPBACK_NONE; + bnx2x_reload = true; + } } } - changes = flags ^ bp->flags; - /* if GRO is changed while LRO is enabled, don't force a reload */ - if ((changes & GRO_ENABLE_FLAG) && (flags & TPA_ENABLE_FLAG)) - changes &= ~GRO_ENABLE_FLAG; + if ((changes & NETIF_F_GRO) && (features & NETIF_F_LRO)) + changes &= ~NETIF_F_GRO; /* if GRO is changed while HW TPA is off, don't force a reload */ - if ((changes & GRO_ENABLE_FLAG) && bp->disable_tpa) - changes &= ~GRO_ENABLE_FLAG; + if ((changes & NETIF_F_GRO) && bp->disable_tpa) + changes &= ~NETIF_F_GRO; if (changes) bnx2x_reload = true; - bp->flags = flags; - if (bnx2x_reload) { - if (bp->recovery_state == BNX2X_RECOVERY_DONE) - return bnx2x_reload_if_running(dev); + if (bp->recovery_state == BNX2X_RECOVERY_DONE) { + dev->features = features; + rc = bnx2x_reload_if_running(dev); + return rc ? rc : 1; + } /* else: bnx2x_nic_load() will be called at end of recovery */ } @@ -4931,6 +4938,11 @@ int bnx2x_resume(struct pci_dev *pdev) } bp = netdev_priv(dev); + if (pci_num_vf(bp->pdev)) { + DP(BNX2X_MSG_IOV, "VFs are enabled, can not change MTU\n"); + return -EPERM; + } + if (bp->recovery_state != BNX2X_RECOVERY_DONE) { BNX2X_ERR("Handling parity error recovery. Try again later\n"); return -EAGAIN; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index adcacda7af7b..d7a71758e876 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -969,7 +969,7 @@ static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp, { int i; - if (fp->disable_tpa) + if (fp->mode == TPA_MODE_DISABLED) return; for (i = 0; i < last; i++) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index e3d853cab7c9..48ed005ba73f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -1843,6 +1843,12 @@ static int bnx2x_set_ringparam(struct net_device *dev, "set ring params command parameters: rx_pending = %d, tx_pending = %d\n", ering->rx_pending, ering->tx_pending); + if (pci_num_vf(bp->pdev)) { + DP(BNX2X_MSG_IOV, + "VFs are enabled, can not change ring parameters\n"); + return -EPERM; + } + if (bp->recovery_state != BNX2X_RECOVERY_DONE) { DP(BNX2X_MSG_ETHTOOL, "Handling parity error recovery. Try again later\n"); @@ -2899,6 +2905,12 @@ static void bnx2x_self_test(struct net_device *dev, u8 is_serdes, link_up; int rc, cnt = 0; + if (pci_num_vf(bp->pdev)) { + DP(BNX2X_MSG_IOV, + "VFs are enabled, can not perform self test\n"); + return; + } + if (bp->recovery_state != BNX2X_RECOVERY_DONE) { netdev_err(bp->dev, "Handling parity error recovery. Try again later\n"); @@ -3468,6 +3480,11 @@ static int bnx2x_set_channels(struct net_device *dev, channels->rx_count, channels->tx_count, channels->other_count, channels->combined_count); + if (pci_num_vf(bp->pdev)) { + DP(BNX2X_MSG_IOV, "VFs are enabled, can not set channels\n"); + return -EPERM; + } + /* We don't support separate rx / tx channels. * We don't allow setting 'other' channels. */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index b9f85fccb419..556dcc162a62 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -3128,7 +3128,7 @@ static unsigned long bnx2x_get_q_flags(struct bnx2x *bp, __set_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, &flags); } - if (!fp->disable_tpa) { + if (fp->mode != TPA_MODE_DISABLED) { __set_bit(BNX2X_Q_FLG_TPA, &flags); __set_bit(BNX2X_Q_FLG_TPA_IPV6, &flags); if (fp->mode == TPA_MODE_GRO) @@ -3176,7 +3176,7 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp, u16 sge_sz = 0; u16 tpa_agg_size = 0; - if (!fp->disable_tpa) { + if (fp->mode != TPA_MODE_DISABLED) { pause->sge_th_lo = SGE_TH_LO(bp); pause->sge_th_hi = SGE_TH_HI(bp); @@ -3304,7 +3304,7 @@ static void bnx2x_pf_init(struct bnx2x *bp) /* This flag is relevant for E1x only. * E2 doesn't have a TPA configuration in a function level. */ - flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0; + flags |= (bp->dev->features & NETIF_F_LRO) ? FUNC_FLG_TPA : 0; func_init.func_flgs = flags; func_init.pf_id = BP_FUNC(bp); @@ -12107,11 +12107,8 @@ static int bnx2x_init_bp(struct bnx2x *bp) /* Set TPA flags */ if (bp->disable_tpa) { - bp->flags &= ~(TPA_ENABLE_FLAG | GRO_ENABLE_FLAG); + bp->dev->hw_features &= ~NETIF_F_LRO; bp->dev->features &= ~NETIF_F_LRO; - } else { - bp->flags |= (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG); - bp->dev->features |= NETIF_F_LRO; } if (CHIP_IS_E1(bp)) @@ -13371,6 +13368,12 @@ static int bnx2x_init_one(struct pci_dev *pdev, bool is_vf; int cnic_cnt; + /* Management FW 'remembers' living interfaces. Allow it some time + * to forget previously living interfaces, allowing a proper re-load. + */ + if (is_kdump_kernel()) + msleep(5000); + /* An estimated maximum supported CoS number according to the chip * version. * We will try to roughly estimate the maximum number of CoSes this chip diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index 15b2d1647560..06b8c0d8fd3b 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c @@ -594,7 +594,7 @@ int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp, bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SETUP_Q, sizeof(*req)); /* select tpa mode to request */ - if (!fp->disable_tpa) { + if (fp->mode != TPA_MODE_DISABLED) { flags |= VFPF_QUEUE_FLG_TPA; flags |= VFPF_QUEUE_FLG_TPA_IPV6; if (fp->mode == TPA_MODE_GRO) diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 1270b189a9a2..069952fa5d64 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -18129,7 +18129,9 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev, rtnl_lock(); - tp->pcierr_recovery = true; + /* We needn't recover from permanent error */ + if (state == pci_channel_io_frozen) + tp->pcierr_recovery = true; /* We probably don't have netdev yet */ if (!netdev || !netif_running(netdev)) diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index 9f5387249f24..4104d49f005d 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@ -707,6 +707,9 @@ static void gem_rx_refill(struct macb *bp) /* properly align Ethernet header */ skb_reserve(skb, NET_IP_ALIGN); + } else { + bp->rx_ring[entry].addr &= ~MACB_BIT(RX_USED); + bp->rx_ring[entry].ctrl = 0; } } @@ -1473,9 +1476,9 @@ static void macb_init_rings(struct macb *bp) for (i = 0; i < TX_RING_SIZE; i++) { bp->queues[0].tx_ring[i].addr = 0; bp->queues[0].tx_ring[i].ctrl = MACB_BIT(TX_USED); - bp->queues[0].tx_head = 0; - bp->queues[0].tx_tail = 0; } + bp->queues[0].tx_head = 0; + bp->queues[0].tx_tail = 0; bp->queues[0].tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP); bp->rx_tail = 0; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 5959e3ae72da..e8578a742f2a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -492,7 +492,7 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, memoffset = (mtype * (edc_size * 1024 * 1024)); else { mc_size = EXT_MEM0_SIZE_G(t4_read_reg(adap, - MA_EXT_MEMORY1_BAR_A)); + MA_EXT_MEMORY0_BAR_A)); memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024; } diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index fb0bc3c3620e..a6dcbf850c1f 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -4846,7 +4846,8 @@ err: } static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, - struct net_device *dev, u32 filter_mask) + struct net_device *dev, u32 filter_mask, + int nlflags) { struct be_adapter *adapter = netdev_priv(dev); int status = 0; @@ -4868,7 +4869,7 @@ static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, return ndo_dflt_bridge_getlink(skb, pid, seq, dev, hsw_mode == PORT_FWD_TYPE_VEPA ? BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB, - 0, 0); + 0, 0, nlflags); } #ifdef CONFIG_BE2NET_VXLAN diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index f6a3a7abd468..66d47e448e4d 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -988,7 +988,10 @@ fec_restart(struct net_device *ndev) rcntl |= 0x40000000 | 0x00000020; /* RGMII, RMII or MII */ - if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII) + if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII || + fep->phy_interface == PHY_INTERFACE_MODE_RGMII_ID || + fep->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID || + fep->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) rcntl |= (1 << 6); else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) rcntl |= (1 << 8); diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index 291c87036e17..2a0dc127df3f 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c @@ -3347,7 +3347,7 @@ static int ehea_register_memory_hooks(void) { int ret = 0; - if (atomic_inc_and_test(&ehea_memory_hooks_registered)) + if (atomic_inc_return(&ehea_memory_hooks_registered) > 1) return 0; ret = ehea_create_busmap(); @@ -3381,12 +3381,14 @@ out3: out2: unregister_reboot_notifier(&ehea_reboot_nb); out: + atomic_dec(&ehea_memory_hooks_registered); return ret; } static void ehea_unregister_memory_hooks(void) { - if (atomic_read(&ehea_memory_hooks_registered)) + /* Only remove the hooks if we've registered them */ + if (atomic_read(&ehea_memory_hooks_registered) == 0) return; unregister_reboot_notifier(&ehea_reboot_nb); diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index cd7675ac5bf9..18134766a114 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -1238,7 +1238,7 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu) return -EINVAL; for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) - if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) + if (new_mtu_oh <= adapter->rx_buff_pool[i].buff_size) break; if (i == IBMVETH_NUM_BUFF_POOLS) @@ -1257,7 +1257,7 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu) for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { adapter->rx_buff_pool[i].active = 1; - if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) { + if (new_mtu_oh <= adapter->rx_buff_pool[i].buff_size) { dev->mtu = new_mtu; vio_cmo_set_dev_desired(viodev, ibmveth_get_desired_dma diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 24481cd7e59a..a54c14491e3b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -8053,10 +8053,10 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev, #ifdef HAVE_BRIDGE_FILTER static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, struct net_device *dev, - u32 __always_unused filter_mask) + u32 __always_unused filter_mask, int nlflags) #else static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, - struct net_device *dev) + struct net_device *dev, int nlflags) #endif /* HAVE_BRIDGE_FILTER */ { struct i40e_netdev_priv *np = netdev_priv(dev); @@ -8078,7 +8078,8 @@ static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, if (!veb) return 0; - return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode); + return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode, + nlflags); } #endif /* HAVE_BRIDGE_ATTRIBS */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index d3f4b0ceb3f7..5be12a00e1f4 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -8044,7 +8044,7 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev, static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, struct net_device *dev, - u32 filter_mask) + u32 filter_mask, int nlflags) { struct ixgbe_adapter *adapter = netdev_priv(dev); @@ -8052,7 +8052,7 @@ static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, return 0; return ndo_dflt_bridge_getlink(skb, pid, seq, dev, - adapter->bridge_mode, 0, 0); + adapter->bridge_mode, 0, 0, nlflags); } static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev) diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index af829c578400..7ace07dad6a3 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c @@ -1508,7 +1508,8 @@ static int pxa168_eth_probe(struct platform_device *pdev) np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); if (!np) { dev_err(&pdev->dev, "missing phy-handle\n"); - return -EINVAL; + err = -EINVAL; + goto err_netdev; } of_property_read_u32(np, "reg", &pep->phy_addr); pep->phy_intf = of_get_phy_mode(pdev->dev.of_node); @@ -1526,7 +1527,7 @@ static int pxa168_eth_probe(struct platform_device *pdev) pep->smi_bus = mdiobus_alloc(); if (pep->smi_bus == NULL) { err = -ENOMEM; - goto err_base; + goto err_netdev; } pep->smi_bus->priv = pep; pep->smi_bus->name = "pxa168_eth smi"; @@ -1551,13 +1552,10 @@ err_mdiobus: mdiobus_unregister(pep->smi_bus); err_free_mdio: mdiobus_free(pep->smi_bus); -err_base: - iounmap(pep->base); err_netdev: free_netdev(dev); err_clk: - clk_disable(clk); - clk_put(clk); + clk_disable_unprepare(clk); return err; } @@ -1574,13 +1572,9 @@ static int pxa168_eth_remove(struct platform_device *pdev) if (pep->phy) phy_disconnect(pep->phy); if (pep->clk) { - clk_disable(pep->clk); - clk_put(pep->clk); - pep->clk = NULL; + clk_disable_unprepare(pep->clk); } - iounmap(pep->base); - pep->base = NULL; mdiobus_unregister(pep->smi_bus); mdiobus_free(pep->smi_bus); unregister_netdev(dev); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index 3f44e2bbb982..a2ddf3d75ff8 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -1102,20 +1102,21 @@ static int mlx4_en_check_rxfh_func(struct net_device *dev, u8 hfunc) struct mlx4_en_priv *priv = netdev_priv(dev); /* check if requested function is supported by the device */ - if ((hfunc == ETH_RSS_HASH_TOP && - !(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP)) || - (hfunc == ETH_RSS_HASH_XOR && - !(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR))) - return -EINVAL; + if (hfunc == ETH_RSS_HASH_TOP) { + if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP)) + return -EINVAL; + if (!(dev->features & NETIF_F_RXHASH)) + en_warn(priv, "Toeplitz hash function should be used in conjunction with RX hashing for optimal performance\n"); + return 0; + } else if (hfunc == ETH_RSS_HASH_XOR) { + if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR)) + return -EINVAL; + if (dev->features & NETIF_F_RXHASH) + en_warn(priv, "Enabling both XOR Hash function and RX Hashing can limit RPS functionality\n"); + return 0; + } - priv->rss_hash_fn = hfunc; - if (hfunc == ETH_RSS_HASH_TOP && !(dev->features & NETIF_F_RXHASH)) - en_warn(priv, - "Toeplitz hash function should be used in conjunction with RX hashing for optimal performance\n"); - if (hfunc == ETH_RSS_HASH_XOR && (dev->features & NETIF_F_RXHASH)) - en_warn(priv, - "Enabling both XOR Hash function and RX Hashing can limit RPS functionality\n"); - return 0; + return -EINVAL; } static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key, @@ -1189,6 +1190,8 @@ static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index, priv->prof->rss_rings = rss_rings; if (key) memcpy(priv->rss_key, key, MLX4_EN_RSS_KEY_SIZE); + if (hfunc != ETH_RSS_HASH_NO_CHANGE) + priv->rss_hash_fn = hfunc; if (port_up) { err = mlx4_en_start_port(dev); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 0f1afc085d58..32f5ec737472 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -1467,6 +1467,7 @@ static void mlx4_en_service_task(struct work_struct *work) if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) mlx4_en_ptp_overflow_check(mdev); + mlx4_en_recover_from_oom(priv); queue_delayed_work(mdev->workqueue, &priv->service_task, SERVICE_TASK_DELAY); } @@ -1721,7 +1722,7 @@ mac_err: cq_err: while (rx_index--) { mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]); - mlx4_en_free_affinity_hint(priv, i); + mlx4_en_free_affinity_hint(priv, rx_index); } for (i = 0; i < priv->rx_ring_num; i++) mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 4fdd3c37e47b..2a77a6b19121 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -244,6 +244,12 @@ static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv, return mlx4_en_alloc_frags(priv, rx_desc, frags, ring->page_alloc, gfp); } +static inline bool mlx4_en_is_ring_empty(struct mlx4_en_rx_ring *ring) +{ + BUG_ON((u32)(ring->prod - ring->cons) > ring->actual_size); + return ring->prod == ring->cons; +} + static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring) { *ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff); @@ -315,8 +321,7 @@ static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv, ring->cons, ring->prod); /* Unmap and free Rx buffers */ - BUG_ON((u32) (ring->prod - ring->cons) > ring->actual_size); - while (ring->cons != ring->prod) { + while (!mlx4_en_is_ring_empty(ring)) { index = ring->cons & ring->size_mask; en_dbg(DRV, priv, "Processing descriptor:%d\n", index); mlx4_en_free_rx_desc(priv, ring, index); @@ -491,6 +496,23 @@ err_allocator: return err; } +/* We recover from out of memory by scheduling our napi poll + * function (mlx4_en_process_cq), which tries to allocate + * all missing RX buffers (call to mlx4_en_refill_rx_buffers). + */ +void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv) +{ + int ring; + + if (!priv->port_up) + return; + + for (ring = 0; ring < priv->rx_ring_num; ring++) { + if (mlx4_en_is_ring_empty(priv->rx_ring[ring])) + napi_reschedule(&priv->rx_cq[ring]->napi); + } +} + void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring **pring, u32 size, u16 stride) diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 1783705273d8..f7bf312fb443 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -143,8 +143,10 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, ring->hwtstamp_tx_type = priv->hwtstamp_config.tx_type; ring->queue_index = queue_index; - if (queue_index < priv->num_tx_rings_p_up && cpu_online(queue_index)) - cpumask_set_cpu(queue_index, &ring->affinity_mask); + if (queue_index < priv->num_tx_rings_p_up) + cpumask_set_cpu_local_first(queue_index, + priv->mdev->dev->numa_node, + &ring->affinity_mask); *pring = ring; return 0; @@ -213,7 +215,7 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context, &ring->qp, &ring->qp_state); - if (!user_prio && cpu_online(ring->queue_index)) + if (!cpumask_empty(&ring->affinity_mask)) netif_set_xps_queue(priv->dev, &ring->affinity_mask, ring->queue_index); diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index a4079811b176..e30bf57ad7a1 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -56,11 +56,13 @@ MODULE_PARM_DESC(enable_qos, "Enable Enhanced QoS support (default: on)"); #define MLX4_GET(dest, source, offset) \ do { \ void *__p = (char *) (source) + (offset); \ + u64 val; \ switch (sizeof (dest)) { \ case 1: (dest) = *(u8 *) __p; break; \ case 2: (dest) = be16_to_cpup(__p); break; \ case 4: (dest) = be32_to_cpup(__p); break; \ - case 8: (dest) = be64_to_cpup(__p); break; \ + case 8: val = get_unaligned((u64 *)__p); \ + (dest) = be64_to_cpu(val); break; \ default: __buggy_use_of_MLX4_GET(); \ } \ } while (0) @@ -1605,9 +1607,17 @@ static void get_board_id(void *vsd, char *board_id) * swaps each 4-byte word before passing it back to * us. Therefore we need to swab it before printing. */ - for (i = 0; i < 4; ++i) - ((u32 *) board_id)[i] = - swab32(*(u32 *) (vsd + VSD_OFFSET_MLX_BOARD_ID + i * 4)); + u32 *bid_u32 = (u32 *)board_id; + + for (i = 0; i < 4; ++i) { + u32 *addr; + u32 val; + + addr = (u32 *) (vsd + VSD_OFFSET_MLX_BOARD_ID + i * 4); + val = get_unaligned(addr); + val = swab32(val); + put_unaligned(val, &bid_u32[i]); + } } } diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 9de30216b146..d021f079f181 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -774,6 +774,7 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring); void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev); +void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv); int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring **pring, u32 size, u16 stride, int node); diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index 1412f5af05ec..2bae50292dcd 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c @@ -69,11 +69,7 @@ #include <net/ip.h> #include <net/tcp.h> #include <asm/byteorder.h> -#include <asm/io.h> #include <asm/processor.h> -#ifdef CONFIG_MTRR -#include <asm/mtrr.h> -#endif #include <net/busy_poll.h> #include "myri10ge_mcp.h" @@ -242,8 +238,7 @@ struct myri10ge_priv { unsigned int rdma_tags_available; int intr_coal_delay; __be32 __iomem *intr_coal_delay_ptr; - int mtrr; - int wc_enabled; + int wc_cookie; int down_cnt; wait_queue_head_t down_wq; struct work_struct watchdog_work; @@ -1905,7 +1900,7 @@ static const char myri10ge_gstrings_main_stats[][ETH_GSTRING_LEN] = { "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors", "tx_heartbeat_errors", "tx_window_errors", /* device-specific stats */ - "tx_boundary", "WC", "irq", "MSI", "MSIX", + "tx_boundary", "irq", "MSI", "MSIX", "read_dma_bw_MBs", "write_dma_bw_MBs", "read_write_dma_bw_MBs", "serial_number", "watchdog_resets", #ifdef CONFIG_MYRI10GE_DCA @@ -1984,7 +1979,6 @@ myri10ge_get_ethtool_stats(struct net_device *netdev, data[i] = ((u64 *)&link_stats)[i]; data[i++] = (unsigned int)mgp->tx_boundary; - data[i++] = (unsigned int)mgp->wc_enabled; data[i++] = (unsigned int)mgp->pdev->irq; data[i++] = (unsigned int)mgp->msi_enabled; data[i++] = (unsigned int)mgp->msix_enabled; @@ -4040,14 +4034,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) mgp->board_span = pci_resource_len(pdev, 0); mgp->iomem_base = pci_resource_start(pdev, 0); - mgp->mtrr = -1; - mgp->wc_enabled = 0; -#ifdef CONFIG_MTRR - mgp->mtrr = mtrr_add(mgp->iomem_base, mgp->board_span, - MTRR_TYPE_WRCOMB, 1); - if (mgp->mtrr >= 0) - mgp->wc_enabled = 1; -#endif + mgp->wc_cookie = arch_phys_wc_add(mgp->iomem_base, mgp->board_span); mgp->sram = ioremap_wc(mgp->iomem_base, mgp->board_span); if (mgp->sram == NULL) { dev_err(&pdev->dev, "ioremap failed for %ld bytes at 0x%lx\n", @@ -4146,14 +4133,14 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto abort_with_state; } if (mgp->msix_enabled) - dev_info(dev, "%d MSI-X IRQs, tx bndry %d, fw %s, WC %s\n", + dev_info(dev, "%d MSI-X IRQs, tx bndry %d, fw %s, MTRR %s, WC Enabled\n", mgp->num_slices, mgp->tx_boundary, mgp->fw_name, - (mgp->wc_enabled ? "Enabled" : "Disabled")); + (mgp->wc_cookie > 0 ? "Enabled" : "Disabled")); else - dev_info(dev, "%s IRQ %d, tx bndry %d, fw %s, WC %s\n", + dev_info(dev, "%s IRQ %d, tx bndry %d, fw %s, MTRR %s, WC Enabled\n", mgp->msi_enabled ? "MSI" : "xPIC", pdev->irq, mgp->tx_boundary, mgp->fw_name, - (mgp->wc_enabled ? "Enabled" : "Disabled")); + (mgp->wc_cookie > 0 ? "Enabled" : "Disabled")); board_number++; return 0; @@ -4175,10 +4162,7 @@ abort_with_ioremap: iounmap(mgp->sram); abort_with_mtrr: -#ifdef CONFIG_MTRR - if (mgp->mtrr >= 0) - mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span); -#endif + arch_phys_wc_del(mgp->wc_cookie); dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd), mgp->cmd, mgp->cmd_bus); @@ -4220,11 +4204,7 @@ static void myri10ge_remove(struct pci_dev *pdev) pci_restore_state(pdev); iounmap(mgp->sram); - -#ifdef CONFIG_MTRR - if (mgp->mtrr >= 0) - mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span); -#endif + arch_phys_wc_del(mgp->wc_cookie); myri10ge_free_slices(mgp); kfree(mgp->msix_vectors); dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd), diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c index 5c4068353f66..8da7c3faf817 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c @@ -135,7 +135,7 @@ void netxen_release_tx_buffers(struct netxen_adapter *adapter) int i, j; struct nx_host_tx_ring *tx_ring = adapter->tx_ring; - spin_lock(&adapter->tx_clean_lock); + spin_lock_bh(&adapter->tx_clean_lock); cmd_buf = tx_ring->cmd_buf_arr; for (i = 0; i < tx_ring->num_desc; i++) { buffrag = cmd_buf->frag_array; @@ -159,7 +159,7 @@ void netxen_release_tx_buffers(struct netxen_adapter *adapter) } cmd_buf++; } - spin_unlock(&adapter->tx_clean_lock); + spin_unlock_bh(&adapter->tx_clean_lock); } void netxen_free_sw_resources(struct netxen_adapter *adapter) diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c index a570a60533be..ec251531bd9f 100644 --- a/drivers/net/ethernet/rocker/rocker.c +++ b/drivers/net/ethernet/rocker/rocker.c @@ -4176,14 +4176,15 @@ static int rocker_port_bridge_setlink(struct net_device *dev, static int rocker_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, struct net_device *dev, - u32 filter_mask) + u32 filter_mask, int nlflags) { struct rocker_port *rocker_port = netdev_priv(dev); u16 mode = BRIDGE_MODE_UNDEF; u32 mask = BR_LEARNING | BR_LEARNING_SYNC; return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, - rocker_port->brport_flags, mask); + rocker_port->brport_flags, mask, + nlflags); } static int rocker_port_get_phys_port_name(struct net_device *dev, diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c index 2bef655279f3..9b7e0a34c98b 100644 --- a/drivers/net/ethernet/ti/netcp_ethss.c +++ b/drivers/net/ethernet/ti/netcp_ethss.c @@ -1765,7 +1765,9 @@ static void netcp_ethss_link_state_action(struct gbe_priv *gbe_dev, ALE_PORT_STATE, ALE_PORT_STATE_FORWARD); - if (ndev && slave->open) + if (ndev && slave->open && + slave->link_interface != SGMII_LINK_MAC_PHY && + slave->link_interface != XGMII_LINK_MAC_PHY) netif_carrier_on(ndev); } else { writel(mac_control, GBE_REG_ADDR(slave, emac_regs, @@ -1773,7 +1775,9 @@ static void netcp_ethss_link_state_action(struct gbe_priv *gbe_dev, cpsw_ale_control_set(gbe_dev->ale, slave->port_num, ALE_PORT_STATE, ALE_PORT_STATE_DISABLE); - if (ndev) + if (ndev && + slave->link_interface != SGMII_LINK_MAC_PHY && + slave->link_interface != XGMII_LINK_MAC_PHY) netif_carrier_off(ndev); } diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index a10b31664709..41071d32bc8e 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -128,7 +128,6 @@ struct ndis_tcp_ip_checksum_info; struct hv_netvsc_packet { /* Bookkeeping stuff */ u32 status; - bool part_of_skb; bool is_data_pkt; bool xmit_more; /* from skb */ @@ -612,6 +611,15 @@ struct multi_send_data { u32 count; /* counter of batched packets */ }; +/* The context of the netvsc device */ +struct net_device_context { + /* point back to our device context */ + struct hv_device *device_ctx; + struct delayed_work dwork; + struct work_struct work; + u32 msg_enable; /* debug level */ +}; + /* Per netvsc device */ struct netvsc_device { struct hv_device *dev; @@ -667,6 +675,9 @@ struct netvsc_device { struct multi_send_data msd[NR_CPUS]; u32 max_pkt; /* max number of pkt in one send, e.g. 8 */ u32 pkt_align; /* alignment bytes, e.g. 8 */ + + /* The net device context */ + struct net_device_context *nd_ctx; }; /* NdisInitialize message */ diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 2e8ad0636b46..2d9ef533cc48 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -889,11 +889,6 @@ int netvsc_send(struct hv_device *device, } else { packet->page_buf_cnt = 0; packet->total_data_buflen += msd_len; - if (!packet->part_of_skb) { - skb = (struct sk_buff *)(unsigned long)packet-> - send_completion_tid; - packet->send_completion_tid = 0; - } } if (msdp->pkt) @@ -1197,6 +1192,9 @@ int netvsc_device_add(struct hv_device *device, void *additional_info) */ ndev = net_device->ndev; + /* Add netvsc_device context to netvsc_device */ + net_device->nd_ctx = netdev_priv(ndev); + /* Initialize the NetVSC channel extension */ init_completion(&net_device->channel_init_wait); diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index a3a9d3898a6e..5993c7e2d723 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -40,18 +40,21 @@ #include "hyperv_net.h" -struct net_device_context { - /* point back to our device context */ - struct hv_device *device_ctx; - struct delayed_work dwork; - struct work_struct work; -}; #define RING_SIZE_MIN 64 static int ring_size = 128; module_param(ring_size, int, S_IRUGO); MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)"); +static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | + NETIF_MSG_LINK | NETIF_MSG_IFUP | + NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR | + NETIF_MSG_TX_ERR; + +static int debug = -1; +module_param(debug, int, S_IRUGO); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + static void do_set_multicast(struct work_struct *w) { struct net_device_context *ndevctx = @@ -235,9 +238,6 @@ void netvsc_xmit_completion(void *context) struct sk_buff *skb = (struct sk_buff *) (unsigned long)packet->send_completion_tid; - if (!packet->part_of_skb) - kfree(packet); - if (skb) dev_kfree_skb_any(skb); } @@ -389,7 +389,6 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) u32 net_trans_info; u32 hash; u32 skb_length; - u32 head_room; u32 pkt_sz; struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT]; @@ -402,7 +401,6 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) check_size: skb_length = skb->len; - head_room = skb_headroom(skb); num_data_pgs = netvsc_get_slots(skb) + 2; if (num_data_pgs > MAX_PAGE_BUFFER_COUNT && linear) { net_alert_ratelimited("packet too big: %u pages (%u bytes)\n", @@ -421,20 +419,14 @@ check_size: pkt_sz = sizeof(struct hv_netvsc_packet) + RNDIS_AND_PPI_SIZE; - if (head_room < pkt_sz) { - packet = kmalloc(pkt_sz, GFP_ATOMIC); - if (!packet) { - /* out of memory, drop packet */ - netdev_err(net, "unable to alloc hv_netvsc_packet\n"); - ret = -ENOMEM; - goto drop; - } - packet->part_of_skb = false; - } else { - /* Use the headroom for building up the packet */ - packet = (struct hv_netvsc_packet *)skb->head; - packet->part_of_skb = true; + ret = skb_cow_head(skb, pkt_sz); + if (ret) { + netdev_err(net, "unable to alloc hv_netvsc_packet\n"); + ret = -ENOMEM; + goto drop; } + /* Use the headroom for building up the packet */ + packet = (struct hv_netvsc_packet *)skb->head; packet->status = 0; packet->xmit_more = skb->xmit_more; @@ -591,8 +583,6 @@ drop: net->stats.tx_bytes += skb_length; net->stats.tx_packets++; } else { - if (packet && !packet->part_of_skb) - kfree(packet); if (ret != -EAGAIN) { dev_kfree_skb_any(skb); net->stats.tx_dropped++; @@ -888,6 +878,11 @@ static int netvsc_probe(struct hv_device *dev, net_device_ctx = netdev_priv(net); net_device_ctx->device_ctx = dev; + net_device_ctx->msg_enable = netif_msg_init(debug, default_msg); + if (netif_msg_probe(net_device_ctx)) + netdev_dbg(net, "netvsc msg_enable: %d\n", + net_device_ctx->msg_enable); + hv_set_drvdata(dev, net); INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change); INIT_WORK(&net_device_ctx->work, do_set_multicast); diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 0d92efefd796..9118cea91882 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -429,7 +429,8 @@ int rndis_filter_receive(struct hv_device *dev, rndis_msg = pkt->data; - dump_rndis_message(dev, rndis_msg); + if (netif_msg_rx_err(net_dev->nd_ctx)) + dump_rndis_message(dev, rndis_msg); switch (rndis_msg->ndis_msg_type) { case RNDIS_MSG_PACKET: diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c index 49ce7ece5af3..c9cb486c753d 100644 --- a/drivers/net/phy/mdio-gpio.c +++ b/drivers/net/phy/mdio-gpio.c @@ -80,7 +80,8 @@ static void mdio_dir(struct mdiobb_ctrl *ctrl, int dir) * assume the pin serves as pull-up. If direction is * output, the default value is high. */ - gpio_set_value(bitbang->mdo, 1 ^ bitbang->mdo_active_low); + gpio_set_value_cansleep(bitbang->mdo, + 1 ^ bitbang->mdo_active_low); return; } @@ -96,7 +97,8 @@ static int mdio_get(struct mdiobb_ctrl *ctrl) struct mdio_gpio_info *bitbang = container_of(ctrl, struct mdio_gpio_info, ctrl); - return gpio_get_value(bitbang->mdio) ^ bitbang->mdio_active_low; + return gpio_get_value_cansleep(bitbang->mdio) ^ + bitbang->mdio_active_low; } static void mdio_set(struct mdiobb_ctrl *ctrl, int what) @@ -105,9 +107,11 @@ static void mdio_set(struct mdiobb_ctrl *ctrl, int what) container_of(ctrl, struct mdio_gpio_info, ctrl); if (bitbang->mdo) - gpio_set_value(bitbang->mdo, what ^ bitbang->mdo_active_low); + gpio_set_value_cansleep(bitbang->mdo, + what ^ bitbang->mdo_active_low); else - gpio_set_value(bitbang->mdio, what ^ bitbang->mdio_active_low); + gpio_set_value_cansleep(bitbang->mdio, + what ^ bitbang->mdio_active_low); } static void mdc_set(struct mdiobb_ctrl *ctrl, int what) @@ -115,7 +119,7 @@ static void mdc_set(struct mdiobb_ctrl *ctrl, int what) struct mdio_gpio_info *bitbang = container_of(ctrl, struct mdio_gpio_info, ctrl); - gpio_set_value(bitbang->mdc, what ^ bitbang->mdc_active_low); + gpio_set_value_cansleep(bitbang->mdc, what ^ bitbang->mdc_active_low); } static struct mdiobb_ops mdio_gpio_ops = { diff --git a/drivers/net/phy/mdio-mux-gpio.c b/drivers/net/phy/mdio-mux-gpio.c index 1a87a585e74d..66edd99bc302 100644 --- a/drivers/net/phy/mdio-mux-gpio.c +++ b/drivers/net/phy/mdio-mux-gpio.c @@ -12,33 +12,30 @@ #include <linux/module.h> #include <linux/phy.h> #include <linux/mdio-mux.h> -#include <linux/of_gpio.h> +#include <linux/gpio/consumer.h> #define DRV_VERSION "1.1" #define DRV_DESCRIPTION "GPIO controlled MDIO bus multiplexer driver" -#define MDIO_MUX_GPIO_MAX_BITS 8 - struct mdio_mux_gpio_state { - struct gpio_desc *gpio[MDIO_MUX_GPIO_MAX_BITS]; - unsigned int num_gpios; + struct gpio_descs *gpios; void *mux_handle; }; static int mdio_mux_gpio_switch_fn(int current_child, int desired_child, void *data) { - int values[MDIO_MUX_GPIO_MAX_BITS]; - unsigned int n; struct mdio_mux_gpio_state *s = data; + int values[s->gpios->ndescs]; + unsigned int n; if (current_child == desired_child) return 0; - for (n = 0; n < s->num_gpios; n++) { + for (n = 0; n < s->gpios->ndescs; n++) values[n] = (desired_child >> n) & 1; - } - gpiod_set_array_cansleep(s->num_gpios, s->gpio, values); + + gpiod_set_array_cansleep(s->gpios->ndescs, s->gpios->desc, values); return 0; } @@ -46,56 +43,33 @@ static int mdio_mux_gpio_switch_fn(int current_child, int desired_child, static int mdio_mux_gpio_probe(struct platform_device *pdev) { struct mdio_mux_gpio_state *s; - int num_gpios; - unsigned int n; int r; - if (!pdev->dev.of_node) - return -ENODEV; - - num_gpios = of_gpio_count(pdev->dev.of_node); - if (num_gpios <= 0 || num_gpios > MDIO_MUX_GPIO_MAX_BITS) - return -ENODEV; - s = devm_kzalloc(&pdev->dev, sizeof(*s), GFP_KERNEL); if (!s) return -ENOMEM; - s->num_gpios = num_gpios; - - for (n = 0; n < num_gpios; ) { - struct gpio_desc *gpio = gpiod_get_index(&pdev->dev, NULL, n, - GPIOD_OUT_LOW); - if (IS_ERR(gpio)) { - r = PTR_ERR(gpio); - goto err; - } - s->gpio[n] = gpio; - n++; - } + s->gpios = gpiod_get_array(&pdev->dev, NULL, GPIOD_OUT_LOW); + if (IS_ERR(s->gpios)) + return PTR_ERR(s->gpios); r = mdio_mux_init(&pdev->dev, mdio_mux_gpio_switch_fn, &s->mux_handle, s); - if (r == 0) { - pdev->dev.platform_data = s; - return 0; - } -err: - while (n) { - n--; - gpiod_put(s->gpio[n]); + if (r != 0) { + gpiod_put_array(s->gpios); + return r; } - return r; + + pdev->dev.platform_data = s; + return 0; } static int mdio_mux_gpio_remove(struct platform_device *pdev) { - unsigned int n; struct mdio_mux_gpio_state *s = dev_get_platdata(&pdev->dev); mdio_mux_uninit(s->mux_handle); - for (n = 0; n < s->num_gpios; n++) - gpiod_put(s->gpio[n]); + gpiod_put_array(s->gpios); return 0; } diff --git a/drivers/net/ppp/ppp_mppe.c b/drivers/net/ppp/ppp_mppe.c index 911b21602ff2..05005c660d4d 100644 --- a/drivers/net/ppp/ppp_mppe.c +++ b/drivers/net/ppp/ppp_mppe.c @@ -478,7 +478,6 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf, struct blkcipher_desc desc = { .tfm = state->arc4 }; unsigned ccount; int flushed = MPPE_BITS(ibuf) & MPPE_BIT_FLUSHED; - int sanity = 0; struct scatterlist sg_in[1], sg_out[1]; if (isize <= PPP_HDRLEN + MPPE_OVHD) { @@ -514,31 +513,19 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf, "mppe_decompress[%d]: ENCRYPTED bit not set!\n", state->unit); state->sanity_errors += 100; - sanity = 1; + goto sanity_error; } if (!state->stateful && !flushed) { printk(KERN_DEBUG "mppe_decompress[%d]: FLUSHED bit not set in " "stateless mode!\n", state->unit); state->sanity_errors += 100; - sanity = 1; + goto sanity_error; } if (state->stateful && ((ccount & 0xff) == 0xff) && !flushed) { printk(KERN_DEBUG "mppe_decompress[%d]: FLUSHED bit not set on " "flag packet!\n", state->unit); state->sanity_errors += 100; - sanity = 1; - } - - if (sanity) { - if (state->sanity_errors < SANITY_MAX) - return DECOMP_ERROR; - else - /* - * Take LCP down if the peer is sending too many bogons. - * We don't want to do this for a single or just a few - * instances since it could just be due to packet corruption. - */ - return DECOMP_FATALERROR; + goto sanity_error; } /* @@ -546,6 +533,13 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf, */ if (!state->stateful) { + /* Discard late packet */ + if ((ccount - state->ccount) % MPPE_CCOUNT_SPACE + > MPPE_CCOUNT_SPACE / 2) { + state->sanity_errors++; + goto sanity_error; + } + /* RFC 3078, sec 8.1. Rekey for every packet. */ while (state->ccount != ccount) { mppe_rekey(state, 0); @@ -649,6 +643,16 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf, state->sanity_errors >>= 1; return osize; + +sanity_error: + if (state->sanity_errors < SANITY_MAX) + return DECOMP_ERROR; + else + /* Take LCP down if the peer is sending too many bogons. + * We don't want to do this for a single or just a few + * instances since it could just be due to packet corruption. + */ + return DECOMP_FATALERROR; } /* diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 154116aafd0d..27a5f954f8e9 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -730,12 +730,8 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan, /* Only change unicasts */ if (!(is_multicast_ether_addr(f->eth_addr) || is_zero_ether_addr(f->eth_addr))) { - int rc = vxlan_fdb_replace(f, ip, port, vni, + notify |= vxlan_fdb_replace(f, ip, port, vni, ifindex); - - if (rc < 0) - return rc; - notify |= rc; } else return -EOPNOTSUPP; } diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c index c43aca69fb30..0fc3fe5fd5b8 100644 --- a/drivers/s390/char/con3215.c +++ b/drivers/s390/char/con3215.c @@ -667,6 +667,8 @@ static struct raw3215_info *raw3215_alloc_info(void) info->buffer = kzalloc(RAW3215_BUFFER_SIZE, GFP_KERNEL | GFP_DMA); info->inbuf = kzalloc(RAW3215_INBUF_SIZE, GFP_KERNEL | GFP_DMA); if (!info->buffer || !info->inbuf) { + kfree(info->inbuf); + kfree(info->buffer); kfree(info); return NULL; } diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c index 7600639db4c4..add419d6ff34 100644 --- a/drivers/scsi/3w-9xxx.c +++ b/drivers/scsi/3w-9xxx.c @@ -149,7 +149,6 @@ static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset); static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg); static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id); static char *twa_string_lookup(twa_message_type *table, unsigned int aen_code); -static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id); /* Functions */ @@ -1340,11 +1339,11 @@ static irqreturn_t twa_interrupt(int irq, void *dev_instance) } /* Now complete the io */ + scsi_dma_unmap(cmd); + cmd->scsi_done(cmd); tw_dev->state[request_id] = TW_S_COMPLETED; twa_free_request_id(tw_dev, request_id); tw_dev->posted_request_count--; - tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]); - twa_unmap_scsi_data(tw_dev, request_id); } /* Check for valid status after each drain */ @@ -1402,26 +1401,6 @@ static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_comm } } /* End twa_load_sgl() */ -/* This function will perform a pci-dma mapping for a scatter gather list */ -static int twa_map_scsi_sg_data(TW_Device_Extension *tw_dev, int request_id) -{ - int use_sg; - struct scsi_cmnd *cmd = tw_dev->srb[request_id]; - - use_sg = scsi_dma_map(cmd); - if (!use_sg) - return 0; - else if (use_sg < 0) { - TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1c, "Failed to map scatter gather list"); - return 0; - } - - cmd->SCp.phase = TW_PHASE_SGLIST; - cmd->SCp.have_data_in = use_sg; - - return use_sg; -} /* End twa_map_scsi_sg_data() */ - /* This function will poll for a response interrupt of a request */ static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds) { @@ -1600,9 +1579,11 @@ static int twa_reset_device_extension(TW_Device_Extension *tw_dev) (tw_dev->state[i] != TW_S_INITIAL) && (tw_dev->state[i] != TW_S_COMPLETED)) { if (tw_dev->srb[i]) { - tw_dev->srb[i]->result = (DID_RESET << 16); - tw_dev->srb[i]->scsi_done(tw_dev->srb[i]); - twa_unmap_scsi_data(tw_dev, i); + struct scsi_cmnd *cmd = tw_dev->srb[i]; + + cmd->result = (DID_RESET << 16); + scsi_dma_unmap(cmd); + cmd->scsi_done(cmd); } } } @@ -1781,21 +1762,18 @@ static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_ /* Save the scsi command for use by the ISR */ tw_dev->srb[request_id] = SCpnt; - /* Initialize phase to zero */ - SCpnt->SCp.phase = TW_PHASE_INITIAL; - retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL); switch (retval) { case SCSI_MLQUEUE_HOST_BUSY: + scsi_dma_unmap(SCpnt); twa_free_request_id(tw_dev, request_id); - twa_unmap_scsi_data(tw_dev, request_id); break; case 1: - tw_dev->state[request_id] = TW_S_COMPLETED; - twa_free_request_id(tw_dev, request_id); - twa_unmap_scsi_data(tw_dev, request_id); SCpnt->result = (DID_ERROR << 16); + scsi_dma_unmap(SCpnt); done(SCpnt); + tw_dev->state[request_id] = TW_S_COMPLETED; + twa_free_request_id(tw_dev, request_id); retval = 0; } out: @@ -1863,8 +1841,8 @@ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, command_packet->sg_list[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]); command_packet->sg_list[0].length = cpu_to_le32(TW_MIN_SGL_LENGTH); } else { - sg_count = twa_map_scsi_sg_data(tw_dev, request_id); - if (sg_count == 0) + sg_count = scsi_dma_map(srb); + if (sg_count < 0) goto out; scsi_for_each_sg(srb, sg, sg_count, i) { @@ -1979,15 +1957,6 @@ static char *twa_string_lookup(twa_message_type *table, unsigned int code) return(table[index].text); } /* End twa_string_lookup() */ -/* This function will perform a pci-dma unmap */ -static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id) -{ - struct scsi_cmnd *cmd = tw_dev->srb[request_id]; - - if (cmd->SCp.phase == TW_PHASE_SGLIST) - scsi_dma_unmap(cmd); -} /* End twa_unmap_scsi_data() */ - /* This function gets called when a disk is coming on-line */ static int twa_slave_configure(struct scsi_device *sdev) { diff --git a/drivers/scsi/3w-9xxx.h b/drivers/scsi/3w-9xxx.h index 040f7214e5b7..0fdc83cfa0e1 100644 --- a/drivers/scsi/3w-9xxx.h +++ b/drivers/scsi/3w-9xxx.h @@ -324,11 +324,6 @@ static twa_message_type twa_error_table[] = { #define TW_CURRENT_DRIVER_BUILD 0 #define TW_CURRENT_DRIVER_BRANCH 0 -/* Phase defines */ -#define TW_PHASE_INITIAL 0 -#define TW_PHASE_SINGLE 1 -#define TW_PHASE_SGLIST 2 - /* Misc defines */ #define TW_9550SX_DRAIN_COMPLETED 0xFFFF #define TW_SECTOR_SIZE 512 diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c index 2361772d5909..f8374850f714 100644 --- a/drivers/scsi/3w-sas.c +++ b/drivers/scsi/3w-sas.c @@ -290,26 +290,6 @@ static int twl_post_command_packet(TW_Device_Extension *tw_dev, int request_id) return 0; } /* End twl_post_command_packet() */ -/* This function will perform a pci-dma mapping for a scatter gather list */ -static int twl_map_scsi_sg_data(TW_Device_Extension *tw_dev, int request_id) -{ - int use_sg; - struct scsi_cmnd *cmd = tw_dev->srb[request_id]; - - use_sg = scsi_dma_map(cmd); - if (!use_sg) - return 0; - else if (use_sg < 0) { - TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1, "Failed to map scatter gather list"); - return 0; - } - - cmd->SCp.phase = TW_PHASE_SGLIST; - cmd->SCp.have_data_in = use_sg; - - return use_sg; -} /* End twl_map_scsi_sg_data() */ - /* This function hands scsi cdb's to the firmware */ static int twl_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry_ISO *sglistarg) { @@ -357,8 +337,8 @@ static int twl_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, if (!sglistarg) { /* Map sglist from scsi layer to cmd packet */ if (scsi_sg_count(srb)) { - sg_count = twl_map_scsi_sg_data(tw_dev, request_id); - if (sg_count == 0) + sg_count = scsi_dma_map(srb); + if (sg_count <= 0) goto out; scsi_for_each_sg(srb, sg, sg_count, i) { @@ -1102,15 +1082,6 @@ out: return retval; } /* End twl_initialize_device_extension() */ -/* This function will perform a pci-dma unmap */ -static void twl_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id) -{ - struct scsi_cmnd *cmd = tw_dev->srb[request_id]; - - if (cmd->SCp.phase == TW_PHASE_SGLIST) - scsi_dma_unmap(cmd); -} /* End twl_unmap_scsi_data() */ - /* This function will handle attention interrupts */ static int twl_handle_attention_interrupt(TW_Device_Extension *tw_dev) { @@ -1251,11 +1222,11 @@ static irqreturn_t twl_interrupt(int irq, void *dev_instance) } /* Now complete the io */ + scsi_dma_unmap(cmd); + cmd->scsi_done(cmd); tw_dev->state[request_id] = TW_S_COMPLETED; twl_free_request_id(tw_dev, request_id); tw_dev->posted_request_count--; - tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]); - twl_unmap_scsi_data(tw_dev, request_id); } /* Check for another response interrupt */ @@ -1400,10 +1371,12 @@ static int twl_reset_device_extension(TW_Device_Extension *tw_dev, int ioctl_res if ((tw_dev->state[i] != TW_S_FINISHED) && (tw_dev->state[i] != TW_S_INITIAL) && (tw_dev->state[i] != TW_S_COMPLETED)) { - if (tw_dev->srb[i]) { - tw_dev->srb[i]->result = (DID_RESET << 16); - tw_dev->srb[i]->scsi_done(tw_dev->srb[i]); - twl_unmap_scsi_data(tw_dev, i); + struct scsi_cmnd *cmd = tw_dev->srb[i]; + + if (cmd) { + cmd->result = (DID_RESET << 16); + scsi_dma_unmap(cmd); + cmd->scsi_done(cmd); } } } @@ -1507,9 +1480,6 @@ static int twl_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_ /* Save the scsi command for use by the ISR */ tw_dev->srb[request_id] = SCpnt; - /* Initialize phase to zero */ - SCpnt->SCp.phase = TW_PHASE_INITIAL; - retval = twl_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL); if (retval) { tw_dev->state[request_id] = TW_S_COMPLETED; diff --git a/drivers/scsi/3w-sas.h b/drivers/scsi/3w-sas.h index d474892701d4..fec6449c7595 100644 --- a/drivers/scsi/3w-sas.h +++ b/drivers/scsi/3w-sas.h @@ -103,10 +103,6 @@ static char *twl_aen_severity_table[] = #define TW_CURRENT_DRIVER_BUILD 0 #define TW_CURRENT_DRIVER_BRANCH 0 -/* Phase defines */ -#define TW_PHASE_INITIAL 0 -#define TW_PHASE_SGLIST 2 - /* Misc defines */ #define TW_SECTOR_SIZE 512 #define TW_MAX_UNITS 32 diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c index c75f2048319f..2940bd769936 100644 --- a/drivers/scsi/3w-xxxx.c +++ b/drivers/scsi/3w-xxxx.c @@ -1271,32 +1271,6 @@ static int tw_initialize_device_extension(TW_Device_Extension *tw_dev) return 0; } /* End tw_initialize_device_extension() */ -static int tw_map_scsi_sg_data(struct pci_dev *pdev, struct scsi_cmnd *cmd) -{ - int use_sg; - - dprintk(KERN_WARNING "3w-xxxx: tw_map_scsi_sg_data()\n"); - - use_sg = scsi_dma_map(cmd); - if (use_sg < 0) { - printk(KERN_WARNING "3w-xxxx: tw_map_scsi_sg_data(): pci_map_sg() failed.\n"); - return 0; - } - - cmd->SCp.phase = TW_PHASE_SGLIST; - cmd->SCp.have_data_in = use_sg; - - return use_sg; -} /* End tw_map_scsi_sg_data() */ - -static void tw_unmap_scsi_data(struct pci_dev *pdev, struct scsi_cmnd *cmd) -{ - dprintk(KERN_WARNING "3w-xxxx: tw_unmap_scsi_data()\n"); - - if (cmd->SCp.phase == TW_PHASE_SGLIST) - scsi_dma_unmap(cmd); -} /* End tw_unmap_scsi_data() */ - /* This function will reset a device extension */ static int tw_reset_device_extension(TW_Device_Extension *tw_dev) { @@ -1319,8 +1293,8 @@ static int tw_reset_device_extension(TW_Device_Extension *tw_dev) srb = tw_dev->srb[i]; if (srb != NULL) { srb->result = (DID_RESET << 16); - tw_dev->srb[i]->scsi_done(tw_dev->srb[i]); - tw_unmap_scsi_data(tw_dev->tw_pci_dev, tw_dev->srb[i]); + scsi_dma_unmap(srb); + srb->scsi_done(srb); } } } @@ -1767,8 +1741,8 @@ static int tw_scsiop_read_write(TW_Device_Extension *tw_dev, int request_id) command_packet->byte8.io.lba = lba; command_packet->byte6.block_count = num_sectors; - use_sg = tw_map_scsi_sg_data(tw_dev->tw_pci_dev, tw_dev->srb[request_id]); - if (!use_sg) + use_sg = scsi_dma_map(srb); + if (use_sg <= 0) return 1; scsi_for_each_sg(tw_dev->srb[request_id], sg, use_sg, i) { @@ -1955,9 +1929,6 @@ static int tw_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_c /* Save the scsi command for use by the ISR */ tw_dev->srb[request_id] = SCpnt; - /* Initialize phase to zero */ - SCpnt->SCp.phase = TW_PHASE_INITIAL; - switch (*command) { case READ_10: case READ_6: @@ -2185,12 +2156,11 @@ static irqreturn_t tw_interrupt(int irq, void *dev_instance) /* Now complete the io */ if ((error != TW_ISR_DONT_COMPLETE)) { + scsi_dma_unmap(tw_dev->srb[request_id]); + tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]); tw_dev->state[request_id] = TW_S_COMPLETED; tw_state_request_finish(tw_dev, request_id); tw_dev->posted_request_count--; - tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]); - - tw_unmap_scsi_data(tw_dev->tw_pci_dev, tw_dev->srb[request_id]); } } diff --git a/drivers/scsi/3w-xxxx.h b/drivers/scsi/3w-xxxx.h index 29b0b84ed69e..6f65e663d393 100644 --- a/drivers/scsi/3w-xxxx.h +++ b/drivers/scsi/3w-xxxx.h @@ -195,11 +195,6 @@ static unsigned char tw_sense_table[][4] = #define TW_AEN_SMART_FAIL 0x000F #define TW_AEN_SBUF_FAIL 0x0024 -/* Phase defines */ -#define TW_PHASE_INITIAL 0 -#define TW_PHASE_SINGLE 1 -#define TW_PHASE_SGLIST 2 - /* Misc defines */ #define TW_ALIGNMENT_6000 64 /* 64 bytes */ #define TW_ALIGNMENT_7000 4 /* 4 bytes */ diff --git a/drivers/scsi/aha1542.c b/drivers/scsi/aha1542.c index ec432763a29a..b95d2779f467 100644 --- a/drivers/scsi/aha1542.c +++ b/drivers/scsi/aha1542.c @@ -375,9 +375,10 @@ static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd) u8 lun = cmd->device->lun; unsigned long flags; int bufflen = scsi_bufflen(cmd); - int mbo; + int mbo, sg_count; struct mailbox *mb = aha1542->mb; struct ccb *ccb = aha1542->ccb; + struct chain *cptr; if (*cmd->cmnd == REQUEST_SENSE) { /* Don't do the command - we have the sense data already */ @@ -397,6 +398,13 @@ static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd) print_hex_dump_bytes("command: ", DUMP_PREFIX_NONE, cmd->cmnd, cmd->cmd_len); } #endif + if (bufflen) { /* allocate memory before taking host_lock */ + sg_count = scsi_sg_count(cmd); + cptr = kmalloc(sizeof(*cptr) * sg_count, GFP_KERNEL | GFP_DMA); + if (!cptr) + return SCSI_MLQUEUE_HOST_BUSY; + } + /* Use the outgoing mailboxes in a round-robin fashion, because this is how the host adapter will scan for them */ @@ -441,19 +449,10 @@ static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd) if (bufflen) { struct scatterlist *sg; - struct chain *cptr; - int i, sg_count = scsi_sg_count(cmd); + int i; ccb[mbo].op = 2; /* SCSI Initiator Command w/scatter-gather */ - cmd->host_scribble = kmalloc(sizeof(*cptr)*sg_count, - GFP_KERNEL | GFP_DMA); - cptr = (struct chain *) cmd->host_scribble; - if (cptr == NULL) { - /* free the claimed mailbox slot */ - aha1542->int_cmds[mbo] = NULL; - spin_unlock_irqrestore(sh->host_lock, flags); - return SCSI_MLQUEUE_HOST_BUSY; - } + cmd->host_scribble = (void *)cptr; scsi_for_each_sg(cmd, sg, sg_count, i) { any2scsi(cptr[i].dataptr, isa_page_to_bus(sg_page(sg)) + sg->offset); diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c index 262ab837a704..9f77d23239a2 100644 --- a/drivers/scsi/scsi_devinfo.c +++ b/drivers/scsi/scsi_devinfo.c @@ -226,6 +226,7 @@ static struct { {"PIONEER", "CD-ROM DRM-624X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, {"Promise", "VTrak E610f", NULL, BLIST_SPARSELUN | BLIST_NO_RSOC}, {"Promise", "", NULL, BLIST_SPARSELUN}, + {"QNAP", "iSCSI Storage", NULL, BLIST_MAX_1024}, {"QUANTUM", "XP34301", "1071", BLIST_NOTQ}, {"REGAL", "CDC-4X", NULL, BLIST_MAX5LUN | BLIST_SINGLELUN}, {"SanDisk", "ImageMate CF-SD1", NULL, BLIST_FORCELUN}, diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 60aae01caa89..6efab1c455e1 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -897,6 +897,12 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result, */ if (*bflags & BLIST_MAX_512) blk_queue_max_hw_sectors(sdev->request_queue, 512); + /* + * Max 1024 sector transfer length for targets that report incorrect + * max/optimal lengths and relied on the old block layer safe default + */ + else if (*bflags & BLIST_MAX_1024) + blk_queue_max_hw_sectors(sdev->request_queue, 1024); /* * Some devices may not want to have a start command automatically diff --git a/drivers/sh/pm_runtime.c b/drivers/sh/pm_runtime.c index cd4c293f0dd0..fe8875f0d7be 100644 --- a/drivers/sh/pm_runtime.c +++ b/drivers/sh/pm_runtime.c @@ -80,9 +80,10 @@ static int __init sh_pm_runtime_init(void) if (IS_ENABLED(CONFIG_ARCH_SHMOBILE_MULTI)) { if (!of_machine_is_compatible("renesas,emev2") && !of_machine_is_compatible("renesas,r7s72100") && - !of_machine_is_compatible("renesas,r8a73a4") && #ifndef CONFIG_PM_GENERIC_DOMAINS_OF + !of_machine_is_compatible("renesas,r8a73a4") && !of_machine_is_compatible("renesas,r8a7740") && + !of_machine_is_compatible("renesas,sh73a0") && #endif !of_machine_is_compatible("renesas,r8a7778") && !of_machine_is_compatible("renesas,r8a7779") && @@ -90,9 +91,7 @@ static int __init sh_pm_runtime_init(void) !of_machine_is_compatible("renesas,r8a7791") && !of_machine_is_compatible("renesas,r8a7792") && !of_machine_is_compatible("renesas,r8a7793") && - !of_machine_is_compatible("renesas,r8a7794") && - !of_machine_is_compatible("renesas,sh7372") && - !of_machine_is_compatible("renesas,sh73a0")) + !of_machine_is_compatible("renesas,r8a7794")) return 0; } diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index 08da4d3e2162..46bcebba54b2 100644 --- a/drivers/tty/serial/8250/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c @@ -1998,6 +1998,8 @@ pci_wch_ch38x_setup(struct serial_private *priv, #define PCIE_DEVICE_ID_WCH_CH382_2S1P 0x3250 #define PCIE_DEVICE_ID_WCH_CH384_4S 0x3470 +#define PCI_DEVICE_ID_EXAR_XR17V8358 0x8358 + /* Unknown vendors/cards - this should not be in linux/pci_ids.h */ #define PCI_SUBDEVICE_ID_UNKNOWN_0x1584 0x1584 #define PCI_SUBDEVICE_ID_UNKNOWN_0x1588 0x1588 @@ -2520,6 +2522,13 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = { .subdevice = PCI_ANY_ID, .setup = pci_xr17v35x_setup, }, + { + .vendor = PCI_VENDOR_ID_EXAR, + .device = PCI_DEVICE_ID_EXAR_XR17V8358, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .setup = pci_xr17v35x_setup, + }, /* * Xircom cards */ @@ -2999,6 +3008,7 @@ enum pci_board_num_t { pbn_exar_XR17V352, pbn_exar_XR17V354, pbn_exar_XR17V358, + pbn_exar_XR17V8358, pbn_exar_ibm_saturn, pbn_pasemi_1682M, pbn_ni8430_2, @@ -3685,6 +3695,14 @@ static struct pciserial_board pci_boards[] = { .reg_shift = 0, .first_offset = 0, }, + [pbn_exar_XR17V8358] = { + .flags = FL_BASE0, + .num_ports = 16, + .base_baud = 7812500, + .uart_offset = 0x400, + .reg_shift = 0, + .first_offset = 0, + }, [pbn_exar_ibm_saturn] = { .flags = FL_BASE0, .num_ports = 1, @@ -5080,7 +5098,7 @@ static struct pci_device_id serial_pci_tbl[] = { 0, 0, pbn_exar_XR17C158 }, /* - * Exar Corp. XR17V35[248] Dual/Quad/Octal PCIe UARTs + * Exar Corp. XR17V[48]35[248] Dual/Quad/Octal/Hexa PCIe UARTs */ { PCI_VENDOR_ID_EXAR, PCI_DEVICE_ID_EXAR_XR17V352, PCI_ANY_ID, PCI_ANY_ID, @@ -5094,7 +5112,10 @@ static struct pci_device_id serial_pci_tbl[] = { PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_exar_XR17V358 }, - + { PCI_VENDOR_ID_EXAR, PCI_DEVICE_ID_EXAR_XR17V8358, + PCI_ANY_ID, PCI_ANY_ID, + 0, + 0, pbn_exar_XR17V8358 }, /* * Topic TP560 Data/Fax/Voice 56k modem (reported by Evan Clarke) */ diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c index d58fe4763d9e..27dade29646b 100644 --- a/drivers/tty/serial/atmel_serial.c +++ b/drivers/tty/serial/atmel_serial.c @@ -880,6 +880,7 @@ static int atmel_prepare_tx_dma(struct uart_port *port) config.direction = DMA_MEM_TO_DEV; config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; config.dst_addr = port->mapbase + ATMEL_US_THR; + config.dst_maxburst = 1; ret = dmaengine_slave_config(atmel_port->chan_tx, &config); @@ -1059,6 +1060,7 @@ static int atmel_prepare_rx_dma(struct uart_port *port) config.direction = DMA_DEV_TO_MEM; config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; config.src_addr = port->mapbase + ATMEL_US_RHR; + config.src_maxburst = 1; ret = dmaengine_slave_config(atmel_port->chan_rx, &config); diff --git a/drivers/tty/serial/of_serial.c b/drivers/tty/serial/of_serial.c index 5b73afb9f9f3..137381e649e5 100644 --- a/drivers/tty/serial/of_serial.c +++ b/drivers/tty/serial/of_serial.c @@ -346,7 +346,6 @@ static const struct of_device_id of_platform_serial_table[] = { { .compatible = "ibm,qpace-nwp-serial", .data = (void *)PORT_NWPSERIAL, }, #endif - { .type = "serial", .data = (void *)PORT_UNKNOWN, }, { /* end of list */ }, }; diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c index cf08876922f1..a0ae942d9562 100644 --- a/drivers/tty/serial/samsung.c +++ b/drivers/tty/serial/samsung.c @@ -1068,8 +1068,9 @@ static int s3c64xx_serial_startup(struct uart_port *port) spin_lock_irqsave(&port->lock, flags); ufcon = rd_regl(port, S3C2410_UFCON); - ufcon |= S3C2410_UFCON_RESETRX | S3C2410_UFCON_RESETTX | - S5PV210_UFCON_RXTRIG8; + ufcon |= S3C2410_UFCON_RESETRX | S5PV210_UFCON_RXTRIG8; + if (!uart_console(port)) + ufcon |= S3C2410_UFCON_RESETTX; wr_regl(port, S3C2410_UFCON, ufcon); enable_rx_pio(ourport); diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index eb5b03be9dfd..0b7bb12dfc68 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -1770,7 +1770,7 @@ static const struct file_operations uart_proc_fops = { * @port: the port to write the message * @s: array of characters * @count: number of characters in string to write - * @write: function to write character to port + * @putchar: function to write character to port */ void uart_console_write(struct uart_port *port, const char *s, unsigned int count, diff --git a/drivers/tty/serial/uartlite.c b/drivers/tty/serial/uartlite.c index 708eead850b0..b1c6bd3d483f 100644 --- a/drivers/tty/serial/uartlite.c +++ b/drivers/tty/serial/uartlite.c @@ -632,7 +632,8 @@ MODULE_DEVICE_TABLE(of, ulite_of_match); static int ulite_probe(struct platform_device *pdev) { - struct resource *res, *res2; + struct resource *res; + int irq; int id = pdev->id; #ifdef CONFIG_OF const __be32 *prop; @@ -646,11 +647,11 @@ static int ulite_probe(struct platform_device *pdev) if (!res) return -ENODEV; - res2 = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (!res2) - return -ENODEV; + irq = platform_get_irq(pdev, 0); + if (irq <= 0) + return -ENXIO; - return ulite_assign(&pdev->dev, id, res->start, res2->start); + return ulite_assign(&pdev->dev, id, res->start, irq); } static int ulite_remove(struct platform_device *pdev) diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c index f218ec658f5d..3ddbac767db3 100644 --- a/drivers/tty/serial/xilinx_uartps.c +++ b/drivers/tty/serial/xilinx_uartps.c @@ -1331,9 +1331,9 @@ static SIMPLE_DEV_PM_OPS(cdns_uart_dev_pm_ops, cdns_uart_suspend, */ static int cdns_uart_probe(struct platform_device *pdev) { - int rc, id; + int rc, id, irq; struct uart_port *port; - struct resource *res, *res2; + struct resource *res; struct cdns_uart *cdns_uart_data; cdns_uart_data = devm_kzalloc(&pdev->dev, sizeof(*cdns_uart_data), @@ -1380,9 +1380,9 @@ static int cdns_uart_probe(struct platform_device *pdev) goto err_out_clk_disable; } - res2 = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (!res2) { - rc = -ENODEV; + irq = platform_get_irq(pdev, 0); + if (irq <= 0) { + rc = -ENXIO; goto err_out_clk_disable; } @@ -1411,7 +1411,7 @@ static int cdns_uart_probe(struct platform_device *pdev) * and triggers invocation of the config_port() entry point. */ port->mapbase = res->start; - port->irq = res2->start; + port->irq = irq; port->dev = &pdev->dev; port->uartclk = clk_get_rate(cdns_uart_data->uartclk); port->private_data = cdns_uart_data; diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c index 632fc8152061..8e53fe469664 100644 --- a/drivers/tty/tty_ioctl.c +++ b/drivers/tty/tty_ioctl.c @@ -536,7 +536,7 @@ EXPORT_SYMBOL(tty_termios_hw_change); * Locking: termios_rwsem */ -static int tty_set_termios(struct tty_struct *tty, struct ktermios *new_termios) +int tty_set_termios(struct tty_struct *tty, struct ktermios *new_termios) { struct ktermios old_termios; struct tty_ldisc *ld; @@ -569,6 +569,7 @@ static int tty_set_termios(struct tty_struct *tty, struct ktermios *new_termios) up_write(&tty->termios_rwsem); return 0; } +EXPORT_SYMBOL_GPL(tty_set_termios); /** * set_termios - set termios values for a tty diff --git a/drivers/usb/chipidea/otg_fsm.c b/drivers/usb/chipidea/otg_fsm.c index 083acf45ad5a..19d655a743b5 100644 --- a/drivers/usb/chipidea/otg_fsm.c +++ b/drivers/usb/chipidea/otg_fsm.c @@ -520,7 +520,6 @@ static int ci_otg_start_host(struct otg_fsm *fsm, int on) { struct ci_hdrc *ci = container_of(fsm, struct ci_hdrc, fsm); - mutex_unlock(&fsm->lock); if (on) { ci_role_stop(ci); ci_role_start(ci, CI_ROLE_HOST); @@ -529,7 +528,6 @@ static int ci_otg_start_host(struct otg_fsm *fsm, int on) hw_device_reset(ci); ci_role_start(ci, CI_ROLE_GADGET); } - mutex_lock(&fsm->lock); return 0; } @@ -537,12 +535,10 @@ static int ci_otg_start_gadget(struct otg_fsm *fsm, int on) { struct ci_hdrc *ci = container_of(fsm, struct ci_hdrc, fsm); - mutex_unlock(&fsm->lock); if (on) usb_gadget_vbus_connect(&ci->gadget); else usb_gadget_vbus_disconnect(&ci->gadget); - mutex_lock(&fsm->lock); return 0; } diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 3e15add665e2..5c8f58114677 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -1142,11 +1142,16 @@ static int acm_probe(struct usb_interface *intf, } while (buflen > 0) { + elength = buffer[0]; + if (!elength) { + dev_err(&intf->dev, "skipping garbage byte\n"); + elength = 1; + goto next_desc; + } if (buffer[1] != USB_DT_CS_INTERFACE) { dev_err(&intf->dev, "skipping garbage\n"); goto next_desc; } - elength = buffer[0]; switch (buffer[2]) { case USB_CDC_UNION_TYPE: /* we've found it */ diff --git a/drivers/usb/host/ehci-msm.c b/drivers/usb/host/ehci-msm.c index 9db74ca7e5b9..275c92e53a59 100644 --- a/drivers/usb/host/ehci-msm.c +++ b/drivers/usb/host/ehci-msm.c @@ -88,13 +88,20 @@ static int ehci_msm_probe(struct platform_device *pdev) } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - hcd->regs = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(hcd->regs)) { - ret = PTR_ERR(hcd->regs); + if (!res) { + dev_err(&pdev->dev, "Unable to get memory resource\n"); + ret = -ENODEV; goto put_hcd; } + hcd->rsrc_start = res->start; hcd->rsrc_len = resource_size(res); + hcd->regs = devm_ioremap(&pdev->dev, hcd->rsrc_start, hcd->rsrc_len); + if (!hcd->regs) { + dev_err(&pdev->dev, "ioremap failed\n"); + ret = -ENOMEM; + goto put_hcd; + } /* * OTG driver takes care of PHY initialization, clock management, diff --git a/drivers/usb/storage/uas-detect.h b/drivers/usb/storage/uas-detect.h index 9893d696fc97..f58caa9e6a27 100644 --- a/drivers/usb/storage/uas-detect.h +++ b/drivers/usb/storage/uas-detect.h @@ -51,7 +51,8 @@ static int uas_find_endpoints(struct usb_host_interface *alt, } static int uas_use_uas_driver(struct usb_interface *intf, - const struct usb_device_id *id) + const struct usb_device_id *id, + unsigned long *flags_ret) { struct usb_host_endpoint *eps[4] = { }; struct usb_device *udev = interface_to_usbdev(intf); @@ -73,7 +74,7 @@ static int uas_use_uas_driver(struct usb_interface *intf, * this writing the following versions exist: * ASM1051 - no uas support version * ASM1051 - with broken (*) uas support - * ASM1053 - with working uas support + * ASM1053 - with working uas support, but problems with large xfers * ASM1153 - with working uas support * * Devices with these chips re-use a number of device-ids over the @@ -103,6 +104,9 @@ static int uas_use_uas_driver(struct usb_interface *intf, } else if (usb_ss_max_streams(&eps[1]->ss_ep_comp) == 32) { /* Possibly an ASM1051, disable uas */ flags |= US_FL_IGNORE_UAS; + } else { + /* ASM1053, these have issues with large transfers */ + flags |= US_FL_MAX_SECTORS_240; } } @@ -132,5 +136,8 @@ static int uas_use_uas_driver(struct usb_interface *intf, return 0; } + if (flags_ret) + *flags_ret = flags; + return 1; } diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c index 6cdabdc119a7..6d3122afeed3 100644 --- a/drivers/usb/storage/uas.c +++ b/drivers/usb/storage/uas.c @@ -759,7 +759,10 @@ static int uas_eh_bus_reset_handler(struct scsi_cmnd *cmnd) static int uas_slave_alloc(struct scsi_device *sdev) { - sdev->hostdata = (void *)sdev->host->hostdata; + struct uas_dev_info *devinfo = + (struct uas_dev_info *)sdev->host->hostdata; + + sdev->hostdata = devinfo; /* USB has unusual DMA-alignment requirements: Although the * starting address of each scatter-gather element doesn't matter, @@ -778,6 +781,11 @@ static int uas_slave_alloc(struct scsi_device *sdev) */ blk_queue_update_dma_alignment(sdev->request_queue, (512 - 1)); + if (devinfo->flags & US_FL_MAX_SECTORS_64) + blk_queue_max_hw_sectors(sdev->request_queue, 64); + else if (devinfo->flags & US_FL_MAX_SECTORS_240) + blk_queue_max_hw_sectors(sdev->request_queue, 240); + return 0; } @@ -887,8 +895,9 @@ static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id) struct Scsi_Host *shost = NULL; struct uas_dev_info *devinfo; struct usb_device *udev = interface_to_usbdev(intf); + unsigned long dev_flags; - if (!uas_use_uas_driver(intf, id)) + if (!uas_use_uas_driver(intf, id, &dev_flags)) return -ENODEV; if (uas_switch_interface(udev, intf)) @@ -910,8 +919,7 @@ static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id) devinfo->udev = udev; devinfo->resetting = 0; devinfo->shutdown = 0; - devinfo->flags = id->driver_info; - usb_stor_adjust_quirks(udev, &devinfo->flags); + devinfo->flags = dev_flags; init_usb_anchor(&devinfo->cmd_urbs); init_usb_anchor(&devinfo->sense_urbs); init_usb_anchor(&devinfo->data_urbs); diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c index 5600c33fcadb..6c10c888f35f 100644 --- a/drivers/usb/storage/usb.c +++ b/drivers/usb/storage/usb.c @@ -479,7 +479,8 @@ void usb_stor_adjust_quirks(struct usb_device *udev, unsigned long *fflags) US_FL_SINGLE_LUN | US_FL_NO_WP_DETECT | US_FL_NO_READ_DISC_INFO | US_FL_NO_READ_CAPACITY_16 | US_FL_INITIAL_READ10 | US_FL_WRITE_CACHE | - US_FL_NO_ATA_1X | US_FL_NO_REPORT_OPCODES); + US_FL_NO_ATA_1X | US_FL_NO_REPORT_OPCODES | + US_FL_MAX_SECTORS_240); p = quirks; while (*p) { @@ -520,6 +521,9 @@ void usb_stor_adjust_quirks(struct usb_device *udev, unsigned long *fflags) case 'f': f |= US_FL_NO_REPORT_OPCODES; break; + case 'g': + f |= US_FL_MAX_SECTORS_240; + break; case 'h': f |= US_FL_CAPACITY_HEURISTICS; break; @@ -1080,7 +1084,7 @@ static int storage_probe(struct usb_interface *intf, /* If uas is enabled and this device can do uas then ignore it. */ #if IS_ENABLED(CONFIG_USB_UAS) - if (uas_use_uas_driver(intf, id)) + if (uas_use_uas_driver(intf, id, NULL)) return -ENXIO; #endif |