diff options
author | Paul Mundt <lethal@linux-sh.org> | 2011-01-07 10:29:26 +0900 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2011-01-07 10:29:26 +0900 |
commit | 5e93c6b4ecd78b1bab49bad1dc2f6ed7ec0115ee (patch) | |
tree | 4f4e321a1ca0baf64d8af528080c71f93495a7d7 /drivers/net/vxge | |
parent | 98d27b8abf413a310df6676f7d2128ada1cccc08 (diff) | |
parent | 3c0cb7c31c206aaedb967e44b98442bbeb17a6c4 (diff) | |
download | blackbird-op-linux-5e93c6b4ecd78b1bab49bad1dc2f6ed7ec0115ee.tar.gz blackbird-op-linux-5e93c6b4ecd78b1bab49bad1dc2f6ed7ec0115ee.zip |
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/torvalds/linux-2.6 into rmobile-latest
Conflicts:
arch/arm/mach-shmobile/Kconfig
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'drivers/net/vxge')
-rw-r--r-- | drivers/net/vxge/vxge-config.c | 3604 | ||||
-rw-r--r-- | drivers/net/vxge/vxge-config.h | 169 | ||||
-rw-r--r-- | drivers/net/vxge/vxge-ethtool.c | 112 | ||||
-rw-r--r-- | drivers/net/vxge/vxge-main.c | 1106 | ||||
-rw-r--r-- | drivers/net/vxge/vxge-main.h | 86 | ||||
-rw-r--r-- | drivers/net/vxge/vxge-reg.h | 33 | ||||
-rw-r--r-- | drivers/net/vxge/vxge-traffic.c | 775 | ||||
-rw-r--r-- | drivers/net/vxge/vxge-traffic.h | 49 | ||||
-rw-r--r-- | drivers/net/vxge/vxge-version.h | 33 |
9 files changed, 3143 insertions, 2824 deletions
diff --git a/drivers/net/vxge/vxge-config.c b/drivers/net/vxge/vxge-config.c index 906a3ca3676b..01c05f53e2f9 100644 --- a/drivers/net/vxge/vxge-config.c +++ b/drivers/net/vxge/vxge-config.c @@ -19,109 +19,128 @@ #include "vxge-traffic.h" #include "vxge-config.h" - -static enum vxge_hw_status -__vxge_hw_fifo_create( - struct __vxge_hw_vpath_handle *vpath_handle, - struct vxge_hw_fifo_attr *attr); - -static enum vxge_hw_status -__vxge_hw_fifo_abort( - struct __vxge_hw_fifo *fifoh); - -static enum vxge_hw_status -__vxge_hw_fifo_reset( - struct __vxge_hw_fifo *ringh); - -static enum vxge_hw_status -__vxge_hw_fifo_delete( - struct __vxge_hw_vpath_handle *vpath_handle); - -static struct __vxge_hw_blockpool_entry * -__vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *hldev, - u32 size); +#include "vxge-main.h" + +#define VXGE_HW_VPATH_STATS_PIO_READ(offset) { \ + status = __vxge_hw_vpath_stats_access(vpath, \ + VXGE_HW_STATS_OP_READ, \ + offset, \ + &val64); \ + if (status != VXGE_HW_OK) \ + return status; \ +} static void -__vxge_hw_blockpool_block_free(struct __vxge_hw_device *hldev, - struct __vxge_hw_blockpool_entry *entry); - -static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh, - void *block_addr, - u32 length, - struct pci_dev *dma_h, - struct pci_dev *acc_handle); - -static enum vxge_hw_status -__vxge_hw_blockpool_create(struct __vxge_hw_device *hldev, - struct __vxge_hw_blockpool *blockpool, - u32 pool_size, - u32 pool_max); +vxge_hw_vpath_set_zero_rx_frm_len(struct vxge_hw_vpath_reg __iomem *vp_reg) +{ + u64 val64; -static void -__vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool); + val64 = readq(&vp_reg->rxmac_vcfg0); + val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff); + writeq(val64, &vp_reg->rxmac_vcfg0); + val64 = readq(&vp_reg->rxmac_vcfg0); +} -static void * -__vxge_hw_blockpool_malloc(struct __vxge_hw_device *hldev, - u32 size, - struct vxge_hw_mempool_dma *dma_object); +/* + * vxge_hw_vpath_wait_receive_idle - Wait for Rx to become idle + */ +int vxge_hw_vpath_wait_receive_idle(struct __vxge_hw_device *hldev, u32 vp_id) +{ + struct vxge_hw_vpath_reg __iomem *vp_reg; + struct __vxge_hw_virtualpath *vpath; + u64 val64, rxd_count, rxd_spat; + int count = 0, total_count = 0; -static void -__vxge_hw_blockpool_free(struct __vxge_hw_device *hldev, - void *memblock, - u32 size, - struct vxge_hw_mempool_dma *dma_object); + vpath = &hldev->virtual_paths[vp_id]; + vp_reg = vpath->vp_reg; + vxge_hw_vpath_set_zero_rx_frm_len(vp_reg); -static struct __vxge_hw_channel* -__vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph, - enum __vxge_hw_channel_type type, u32 length, - u32 per_dtr_space, void *userdata); + /* Check that the ring controller for this vpath has enough free RxDs + * to send frames to the host. This is done by reading the + * PRC_RXD_DOORBELL_VPn register and comparing the read value to the + * RXD_SPAT value for the vpath. + */ + val64 = readq(&vp_reg->prc_cfg6); + rxd_spat = VXGE_HW_PRC_CFG6_GET_RXD_SPAT(val64) + 1; + /* Use a factor of 2 when comparing rxd_count against rxd_spat for some + * leg room. + */ + rxd_spat *= 2; -static void -__vxge_hw_channel_free( - struct __vxge_hw_channel *channel); + do { + mdelay(1); -static enum vxge_hw_status -__vxge_hw_channel_initialize( - struct __vxge_hw_channel *channel); + rxd_count = readq(&vp_reg->prc_rxd_doorbell); -static enum vxge_hw_status -__vxge_hw_channel_reset( - struct __vxge_hw_channel *channel); + /* Check that the ring controller for this vpath does + * not have any frame in its pipeline. + */ + val64 = readq(&vp_reg->frm_in_progress_cnt); + if ((rxd_count <= rxd_spat) || (val64 > 0)) + count = 0; + else + count++; + total_count++; + } while ((count < VXGE_HW_MIN_SUCCESSIVE_IDLE_COUNT) && + (total_count < VXGE_HW_MAX_POLLING_COUNT)); -static enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp); + if (total_count >= VXGE_HW_MAX_POLLING_COUNT) + printk(KERN_ALERT "%s: Still Receiving traffic. Abort wait\n", + __func__); -static enum vxge_hw_status -__vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config); + return total_count; +} -static enum vxge_hw_status -__vxge_hw_device_config_check(struct vxge_hw_device_config *new_config); +/* vxge_hw_device_wait_receive_idle - This function waits until all frames + * stored in the frame buffer for each vpath assigned to the given + * function (hldev) have been sent to the host. + */ +void vxge_hw_device_wait_receive_idle(struct __vxge_hw_device *hldev) +{ + int i, total_count = 0; -static void -__vxge_hw_device_id_get(struct __vxge_hw_device *hldev); + for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { + if (!(hldev->vpaths_deployed & vxge_mBIT(i))) + continue; -static void -__vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev); + total_count += vxge_hw_vpath_wait_receive_idle(hldev, i); + if (total_count >= VXGE_HW_MAX_POLLING_COUNT) + break; + } +} +/* + * __vxge_hw_device_register_poll + * Will poll certain register for specified amount of time. + * Will poll until masked bit is not cleared. + */ static enum vxge_hw_status -__vxge_hw_vpath_card_info_get( - u32 vp_id, - struct vxge_hw_vpath_reg __iomem *vpath_reg, - struct vxge_hw_device_hw_info *hw_info); +__vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis) +{ + u64 val64; + u32 i = 0; + enum vxge_hw_status ret = VXGE_HW_FAIL; -static enum vxge_hw_status -__vxge_hw_device_initialize(struct __vxge_hw_device *hldev); + udelay(10); -static void -__vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev); + do { + val64 = readq(reg); + if (!(val64 & mask)) + return VXGE_HW_OK; + udelay(100); + } while (++i <= 9); -static enum vxge_hw_status -__vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev); + i = 0; + do { + val64 = readq(reg); + if (!(val64 & mask)) + return VXGE_HW_OK; + mdelay(1); + } while (++i <= max_millis); -static enum vxge_hw_status -__vxge_hw_device_register_poll( - void __iomem *reg, - u64 mask, u32 max_millis); + return ret; +} static inline enum vxge_hw_status __vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr, @@ -129,139 +148,258 @@ __vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr, { __vxge_hw_pio_mem_write32_lower((u32)vxge_bVALn(val64, 32, 32), addr); wmb(); - __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), addr); wmb(); - return __vxge_hw_device_register_poll(addr, mask, max_millis); + return __vxge_hw_device_register_poll(addr, mask, max_millis); } -static struct vxge_hw_mempool* -__vxge_hw_mempool_create(struct __vxge_hw_device *devh, u32 memblock_size, - u32 item_size, u32 private_size, u32 items_initial, - u32 items_max, struct vxge_hw_mempool_cbs *mp_callback, - void *userdata); -static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool); - static enum vxge_hw_status -__vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath, - struct vxge_hw_vpath_stats_hw_info *hw_stats); +vxge_hw_vpath_fw_api(struct __vxge_hw_virtualpath *vpath, u32 action, + u32 fw_memo, u32 offset, u64 *data0, u64 *data1, + u64 *steer_ctrl) +{ + struct vxge_hw_vpath_reg __iomem *vp_reg; + enum vxge_hw_status status; + u64 val64; + u32 retry = 0, max_retry = 100; -static enum vxge_hw_status -vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vpath_handle); + vp_reg = vpath->vp_reg; -static enum vxge_hw_status -__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg); + if (vpath->vp_open) { + max_retry = 3; + spin_lock(&vpath->lock); + } -static u64 -__vxge_hw_vpath_pci_func_mode_get(u32 vp_id, - struct vxge_hw_vpath_reg __iomem *vpath_reg); + writeq(*data0, &vp_reg->rts_access_steer_data0); + writeq(*data1, &vp_reg->rts_access_steer_data1); + wmb(); -static u32 -__vxge_hw_vpath_func_id_get(u32 vp_id, struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg); + val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) | + VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(fw_memo) | + VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset) | + VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE | + *steer_ctrl; -static enum vxge_hw_status -__vxge_hw_vpath_addr_get(u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg, - u8 (macaddr)[ETH_ALEN], u8 (macaddr_mask)[ETH_ALEN]); + status = __vxge_hw_pio_mem_write64(val64, + &vp_reg->rts_access_steer_ctrl, + VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE, + VXGE_HW_DEF_DEVICE_POLL_MILLIS); + + /* The __vxge_hw_device_register_poll can udelay for a significant + * amount of time, blocking other proccess from the CPU. If it delays + * for ~5secs, a NMI error can occur. A way around this is to give up + * the processor via msleep, but this is not allowed is under lock. + * So, only allow it to sleep for ~4secs if open. Otherwise, delay for + * 1sec and sleep for 10ms until the firmware operation has completed + * or timed-out. + */ + while ((status != VXGE_HW_OK) && retry++ < max_retry) { + if (!vpath->vp_open) + msleep(20); + status = __vxge_hw_device_register_poll( + &vp_reg->rts_access_steer_ctrl, + VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE, + VXGE_HW_DEF_DEVICE_POLL_MILLIS); + } -static enum vxge_hw_status -__vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath); + if (status != VXGE_HW_OK) + goto out; + val64 = readq(&vp_reg->rts_access_steer_ctrl); + if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) { + *data0 = readq(&vp_reg->rts_access_steer_data0); + *data1 = readq(&vp_reg->rts_access_steer_data1); + *steer_ctrl = val64; + } else + status = VXGE_HW_FAIL; -static enum vxge_hw_status -__vxge_hw_vpath_sw_reset(struct __vxge_hw_device *devh, u32 vp_id); +out: + if (vpath->vp_open) + spin_unlock(&vpath->lock); + return status; +} -static enum vxge_hw_status -__vxge_hw_vpath_fw_ver_get(u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg, - struct vxge_hw_device_hw_info *hw_info); +enum vxge_hw_status +vxge_hw_upgrade_read_version(struct __vxge_hw_device *hldev, u32 *major, + u32 *minor, u32 *build) +{ + u64 data0 = 0, data1 = 0, steer_ctrl = 0; + struct __vxge_hw_virtualpath *vpath; + enum vxge_hw_status status; -static enum vxge_hw_status -__vxge_hw_vpath_mac_configure(struct __vxge_hw_device *devh, u32 vp_id); + vpath = &hldev->virtual_paths[hldev->first_vp_id]; -static void -__vxge_hw_vp_terminate(struct __vxge_hw_device *devh, u32 vp_id); + status = vxge_hw_vpath_fw_api(vpath, + VXGE_HW_FW_UPGRADE_ACTION, + VXGE_HW_FW_UPGRADE_MEMO, + VXGE_HW_FW_UPGRADE_OFFSET_READ, + &data0, &data1, &steer_ctrl); + if (status != VXGE_HW_OK) + return status; -static enum vxge_hw_status -__vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath, - u32 operation, u32 offset, u64 *stat); + *major = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data0); + *minor = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data0); + *build = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data0); -static enum vxge_hw_status -__vxge_hw_vpath_xmac_tx_stats_get(struct __vxge_hw_virtualpath *vpath, - struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats); + return status; +} -static enum vxge_hw_status -__vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath, - struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats); +enum vxge_hw_status vxge_hw_flash_fw(struct __vxge_hw_device *hldev) +{ + u64 data0 = 0, data1 = 0, steer_ctrl = 0; + struct __vxge_hw_virtualpath *vpath; + enum vxge_hw_status status; + u32 ret; -/* - * __vxge_hw_channel_allocate - Allocate memory for channel - * This function allocates required memory for the channel and various arrays - * in the channel - */ -struct __vxge_hw_channel* -__vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph, - enum __vxge_hw_channel_type type, - u32 length, u32 per_dtr_space, void *userdata) + vpath = &hldev->virtual_paths[hldev->first_vp_id]; + + status = vxge_hw_vpath_fw_api(vpath, + VXGE_HW_FW_UPGRADE_ACTION, + VXGE_HW_FW_UPGRADE_MEMO, + VXGE_HW_FW_UPGRADE_OFFSET_COMMIT, + &data0, &data1, &steer_ctrl); + if (status != VXGE_HW_OK) { + vxge_debug_init(VXGE_ERR, "%s: FW upgrade failed", __func__); + goto exit; + } + + ret = VXGE_HW_RTS_ACCESS_STEER_CTRL_GET_ACTION(steer_ctrl) & 0x7F; + if (ret != 1) { + vxge_debug_init(VXGE_ERR, "%s: FW commit failed with error %d", + __func__, ret); + status = VXGE_HW_FAIL; + } + +exit: + return status; +} + +enum vxge_hw_status +vxge_update_fw_image(struct __vxge_hw_device *hldev, const u8 *fwdata, int size) { - struct __vxge_hw_channel *channel; - struct __vxge_hw_device *hldev; - int size = 0; - u32 vp_id; + u64 data0 = 0, data1 = 0, steer_ctrl = 0; + struct __vxge_hw_virtualpath *vpath; + enum vxge_hw_status status; + int ret_code, sec_code; - hldev = vph->vpath->hldev; - vp_id = vph->vpath->vp_id; + vpath = &hldev->virtual_paths[hldev->first_vp_id]; - switch (type) { - case VXGE_HW_CHANNEL_TYPE_FIFO: - size = sizeof(struct __vxge_hw_fifo); - break; - case VXGE_HW_CHANNEL_TYPE_RING: - size = sizeof(struct __vxge_hw_ring); - break; - default: - break; + /* send upgrade start command */ + status = vxge_hw_vpath_fw_api(vpath, + VXGE_HW_FW_UPGRADE_ACTION, + VXGE_HW_FW_UPGRADE_MEMO, + VXGE_HW_FW_UPGRADE_OFFSET_START, + &data0, &data1, &steer_ctrl); + if (status != VXGE_HW_OK) { + vxge_debug_init(VXGE_ERR, " %s: Upgrade start cmd failed", + __func__); + return status; } - channel = kzalloc(size, GFP_KERNEL); - if (channel == NULL) - goto exit0; - INIT_LIST_HEAD(&channel->item); + /* Transfer fw image to adapter 16 bytes at a time */ + for (; size > 0; size -= VXGE_HW_FW_UPGRADE_BLK_SIZE) { + steer_ctrl = 0; - channel->common_reg = hldev->common_reg; - channel->first_vp_id = hldev->first_vp_id; - channel->type = type; - channel->devh = hldev; - channel->vph = vph; - channel->userdata = userdata; - channel->per_dtr_space = per_dtr_space; - channel->length = length; - channel->vp_id = vp_id; + /* The next 128bits of fwdata to be loaded onto the adapter */ + data0 = *((u64 *)fwdata); + data1 = *((u64 *)fwdata + 1); - channel->work_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL); - if (channel->work_arr == NULL) - goto exit1; + status = vxge_hw_vpath_fw_api(vpath, + VXGE_HW_FW_UPGRADE_ACTION, + VXGE_HW_FW_UPGRADE_MEMO, + VXGE_HW_FW_UPGRADE_OFFSET_SEND, + &data0, &data1, &steer_ctrl); + if (status != VXGE_HW_OK) { + vxge_debug_init(VXGE_ERR, "%s: Upgrade send failed", + __func__); + goto out; + } - channel->free_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL); - if (channel->free_arr == NULL) - goto exit1; - channel->free_ptr = length; + ret_code = VXGE_HW_UPGRADE_GET_RET_ERR_CODE(data0); + switch (ret_code) { + case VXGE_HW_FW_UPGRADE_OK: + /* All OK, send next 16 bytes. */ + break; + case VXGE_FW_UPGRADE_BYTES2SKIP: + /* skip bytes in the stream */ + fwdata += (data0 >> 8) & 0xFFFFFFFF; + break; + case VXGE_HW_FW_UPGRADE_DONE: + goto out; + case VXGE_HW_FW_UPGRADE_ERR: + sec_code = VXGE_HW_UPGRADE_GET_SEC_ERR_CODE(data0); + switch (sec_code) { + case VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1: + case VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7: + printk(KERN_ERR + "corrupted data from .ncf file\n"); + break; + case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3: + case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4: + case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5: + case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6: + case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8: + printk(KERN_ERR "invalid .ncf file\n"); + break; + case VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW: + printk(KERN_ERR "buffer overflow\n"); + break; + case VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH: + printk(KERN_ERR "failed to flash the image\n"); + break; + case VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN: + printk(KERN_ERR + "generic error. Unknown error type\n"); + break; + default: + printk(KERN_ERR "Unknown error of type %d\n", + sec_code); + break; + } + status = VXGE_HW_FAIL; + goto out; + default: + printk(KERN_ERR "Unknown FW error: %d\n", ret_code); + status = VXGE_HW_FAIL; + goto out; + } + /* point to next 16 bytes */ + fwdata += VXGE_HW_FW_UPGRADE_BLK_SIZE; + } +out: + return status; +} - channel->reserve_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL); - if (channel->reserve_arr == NULL) - goto exit1; - channel->reserve_ptr = length; - channel->reserve_top = 0; +enum vxge_hw_status +vxge_hw_vpath_eprom_img_ver_get(struct __vxge_hw_device *hldev, + struct eprom_image *img) +{ + u64 data0 = 0, data1 = 0, steer_ctrl = 0; + struct __vxge_hw_virtualpath *vpath; + enum vxge_hw_status status; + int i; - channel->orig_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL); - if (channel->orig_arr == NULL) - goto exit1; + vpath = &hldev->virtual_paths[hldev->first_vp_id]; - return channel; -exit1: - __vxge_hw_channel_free(channel); + for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++) { + data0 = VXGE_HW_RTS_ACCESS_STEER_ROM_IMAGE_INDEX(i); + data1 = steer_ctrl = 0; -exit0: - return NULL; + status = vxge_hw_vpath_fw_api(vpath, + VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO, + VXGE_HW_FW_API_GET_EPROM_REV, + 0, &data0, &data1, &steer_ctrl); + if (status != VXGE_HW_OK) + break; + + img[i].is_valid = VXGE_HW_GET_EPROM_IMAGE_VALID(data0); + img[i].index = VXGE_HW_GET_EPROM_IMAGE_INDEX(data0); + img[i].type = VXGE_HW_GET_EPROM_IMAGE_TYPE(data0); + img[i].version = VXGE_HW_GET_EPROM_IMAGE_REV(data0); + } + + return status; } /* @@ -269,7 +407,7 @@ exit0: * This function deallocates memory from the channel and various arrays * in the channel */ -void __vxge_hw_channel_free(struct __vxge_hw_channel *channel) +static void __vxge_hw_channel_free(struct __vxge_hw_channel *channel) { kfree(channel->work_arr); kfree(channel->free_arr); @@ -283,7 +421,7 @@ void __vxge_hw_channel_free(struct __vxge_hw_channel *channel) * This function initializes a channel by properly setting the * various references */ -enum vxge_hw_status +static enum vxge_hw_status __vxge_hw_channel_initialize(struct __vxge_hw_channel *channel) { u32 i; @@ -318,7 +456,7 @@ __vxge_hw_channel_initialize(struct __vxge_hw_channel *channel) * __vxge_hw_channel_reset - Resets a channel * This function resets a channel by properly setting the various references */ -enum vxge_hw_status +static enum vxge_hw_status __vxge_hw_channel_reset(struct __vxge_hw_channel *channel) { u32 i; @@ -345,8 +483,7 @@ __vxge_hw_channel_reset(struct __vxge_hw_channel *channel) * Initialize certain PCI/PCI-X configuration registers * with recommended values. Save config space for future hw resets. */ -void -__vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev) +static void __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev) { u16 cmd = 0; @@ -358,39 +495,7 @@ __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev) pci_save_state(hldev->pdev); } -/* - * __vxge_hw_device_register_poll - * Will poll certain register for specified amount of time. - * Will poll until masked bit is not cleared. - */ -static enum vxge_hw_status -__vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis) -{ - u64 val64; - u32 i = 0; - enum vxge_hw_status ret = VXGE_HW_FAIL; - - udelay(10); - - do { - val64 = readq(reg); - if (!(val64 & mask)) - return VXGE_HW_OK; - udelay(100); - } while (++i <= 9); - - i = 0; - do { - val64 = readq(reg); - if (!(val64 & mask)) - return VXGE_HW_OK; - mdelay(1); - } while (++i <= max_millis); - - return ret; -} - - /* __vxge_hw_device_vpath_reset_in_prog_check - Check if vpath reset +/* __vxge_hw_device_vpath_reset_in_prog_check - Check if vpath reset * in progress * This routine checks the vpath reset in progress register is turned zero */ @@ -405,6 +510,60 @@ __vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog) } /* + * _hw_legacy_swapper_set - Set the swapper bits for the legacy secion. + * Set the swapper bits appropriately for the lagacy section. + */ +static enum vxge_hw_status +__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg) +{ + u64 val64; + enum vxge_hw_status status = VXGE_HW_OK; + + val64 = readq(&legacy_reg->toc_swapper_fb); + + wmb(); + + switch (val64) { + case VXGE_HW_SWAPPER_INITIAL_VALUE: + return status; + + case VXGE_HW_SWAPPER_BYTE_SWAPPED_BIT_FLIPPED: + writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE, + &legacy_reg->pifm_rd_swap_en); + writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE, + &legacy_reg->pifm_rd_flip_en); + writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE, + &legacy_reg->pifm_wr_swap_en); + writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE, + &legacy_reg->pifm_wr_flip_en); + break; + + case VXGE_HW_SWAPPER_BYTE_SWAPPED: + writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE, + &legacy_reg->pifm_rd_swap_en); + writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE, + &legacy_reg->pifm_wr_swap_en); + break; + + case VXGE_HW_SWAPPER_BIT_FLIPPED: + writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE, + &legacy_reg->pifm_rd_flip_en); + writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE, + &legacy_reg->pifm_wr_flip_en); + break; + } + + wmb(); + + val64 = readq(&legacy_reg->toc_swapper_fb); + + if (val64 != VXGE_HW_SWAPPER_INITIAL_VALUE) + status = VXGE_HW_ERR_SWAPPER_CTRL; + + return status; +} + +/* * __vxge_hw_device_toc_get * This routine sets the swapper and reads the toc pointer and returns the * memory mapped address of the toc @@ -435,7 +594,7 @@ exit: * register location pointers in the device object. It waits until the ric is * completed initializing registers. */ -enum vxge_hw_status +static enum vxge_hw_status __vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev) { u64 val64; @@ -496,26 +655,6 @@ exit: } /* - * __vxge_hw_device_id_get - * This routine returns sets the device id and revision numbers into the device - * structure - */ -void __vxge_hw_device_id_get(struct __vxge_hw_device *hldev) -{ - u64 val64; - - val64 = readq(&hldev->common_reg->titan_asic_id); - hldev->device_id = - (u16)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_DEVICE_ID(val64); - - hldev->major_revision = - (u8)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MAJOR_REVISION(val64); - - hldev->minor_revision = - (u8)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MINOR_REVISION(val64); -} - -/* * __vxge_hw_device_access_rights_get: Get Access Rights of the driver * This routine returns the Access Rights of the driver */ @@ -568,10 +707,25 @@ __vxge_hw_device_is_privilaged(u32 host_type, u32 func_id) } /* + * __vxge_hw_vpath_func_id_get - Get the function id of the vpath. + * Returns the function number of the vpath. + */ +static u32 +__vxge_hw_vpath_func_id_get(struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg) +{ + u64 val64; + + val64 = readq(&vpmgmt_reg->vpath_to_func_map_cfg1); + + return + (u32)VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(val64); +} + +/* * __vxge_hw_device_host_info_get * This routine returns the host type assignments */ -void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev) +static void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev) { u64 val64; u32 i; @@ -584,16 +738,18 @@ void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev) hldev->vpath_assignments = readq(&hldev->common_reg->vpath_assignments); for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { - if (!(hldev->vpath_assignments & vxge_mBIT(i))) continue; hldev->func_id = - __vxge_hw_vpath_func_id_get(i, hldev->vpmgmt_reg[i]); + __vxge_hw_vpath_func_id_get(hldev->vpmgmt_reg[i]); hldev->access_rights = __vxge_hw_device_access_rights_get( hldev->host_type, hldev->func_id); + hldev->virtual_paths[i].vp_open = VXGE_HW_VP_NOT_OPEN; + hldev->virtual_paths[i].vp_reg = hldev->vpath_reg[i]; + hldev->first_vp_id = i; break; } @@ -634,7 +790,8 @@ __vxge_hw_verify_pci_e_info(struct __vxge_hw_device *hldev) * __vxge_hw_device_initialize * Initialize Titan-V hardware. */ -enum vxge_hw_status __vxge_hw_device_initialize(struct __vxge_hw_device *hldev) +static enum vxge_hw_status +__vxge_hw_device_initialize(struct __vxge_hw_device *hldev) { enum vxge_hw_status status = VXGE_HW_OK; @@ -650,6 +807,196 @@ exit: return status; } +/* + * __vxge_hw_vpath_fw_ver_get - Get the fw version + * Returns FW Version + */ +static enum vxge_hw_status +__vxge_hw_vpath_fw_ver_get(struct __vxge_hw_virtualpath *vpath, + struct vxge_hw_device_hw_info *hw_info) +{ + struct vxge_hw_device_version *fw_version = &hw_info->fw_version; + struct vxge_hw_device_date *fw_date = &hw_info->fw_date; + struct vxge_hw_device_version *flash_version = &hw_info->flash_version; + struct vxge_hw_device_date *flash_date = &hw_info->flash_date; + u64 data0, data1 = 0, steer_ctrl = 0; + enum vxge_hw_status status; + + status = vxge_hw_vpath_fw_api(vpath, + VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY, + VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO, + 0, &data0, &data1, &steer_ctrl); + if (status != VXGE_HW_OK) + goto exit; + + fw_date->day = + (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY(data0); + fw_date->month = + (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH(data0); + fw_date->year = + (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR(data0); + + snprintf(fw_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d", + fw_date->month, fw_date->day, fw_date->year); + + fw_version->major = + (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data0); + fw_version->minor = + (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data0); + fw_version->build = + (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data0); + + snprintf(fw_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d", + fw_version->major, fw_version->minor, fw_version->build); + + flash_date->day = + (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(data1); + flash_date->month = + (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(data1); + flash_date->year = + (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(data1); + + snprintf(flash_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d", + flash_date->month, flash_date->day, flash_date->year); + + flash_version->major = + (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(data1); + flash_version->minor = + (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(data1); + flash_version->build = + (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(data1); + + snprintf(flash_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d", + flash_version->major, flash_version->minor, + flash_version->build); + +exit: + return status; +} + +/* + * __vxge_hw_vpath_card_info_get - Get the serial numbers, + * part number and product description. + */ +static enum vxge_hw_status +__vxge_hw_vpath_card_info_get(struct __vxge_hw_virtualpath *vpath, + struct vxge_hw_device_hw_info *hw_info) +{ + enum vxge_hw_status status; + u64 data0, data1 = 0, steer_ctrl = 0; + u8 *serial_number = hw_info->serial_number; + u8 *part_number = hw_info->part_number; + u8 *product_desc = hw_info->product_desc; + u32 i, j = 0; + + data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER; + + status = vxge_hw_vpath_fw_api(vpath, + VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY, + VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO, + 0, &data0, &data1, &steer_ctrl); + if (status != VXGE_HW_OK) + return status; + + ((u64 *)serial_number)[0] = be64_to_cpu(data0); + ((u64 *)serial_number)[1] = be64_to_cpu(data1); + + data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER; + data1 = steer_ctrl = 0; + + status = vxge_hw_vpath_fw_api(vpath, + VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY, + VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO, + 0, &data0, &data1, &steer_ctrl); + if (status != VXGE_HW_OK) + return status; + + ((u64 *)part_number)[0] = be64_to_cpu(data0); + ((u64 *)part_number)[1] = be64_to_cpu(data1); + + for (i = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0; + i <= VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3; i++) { + data0 = i; + data1 = steer_ctrl = 0; + + status = vxge_hw_vpath_fw_api(vpath, + VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY, + VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO, + 0, &data0, &data1, &steer_ctrl); + if (status != VXGE_HW_OK) + return status; + + ((u64 *)product_desc)[j++] = be64_to_cpu(data0); + ((u64 *)product_desc)[j++] = be64_to_cpu(data1); + } + + return status; +} + +/* + * __vxge_hw_vpath_pci_func_mode_get - Get the pci mode + * Returns pci function mode + */ +static enum vxge_hw_status +__vxge_hw_vpath_pci_func_mode_get(struct __vxge_hw_virtualpath *vpath, + struct vxge_hw_device_hw_info *hw_info) +{ + u64 data0, data1 = 0, steer_ctrl = 0; + enum vxge_hw_status status; + + data0 = 0; + + status = vxge_hw_vpath_fw_api(vpath, + VXGE_HW_FW_API_GET_FUNC_MODE, + VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO, + 0, &data0, &data1, &steer_ctrl); + if (status != VXGE_HW_OK) + return status; + + hw_info->function_mode = VXGE_HW_GET_FUNC_MODE_VAL(data0); + return status; +} + +/* + * __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath + * from MAC address table. + */ +static enum vxge_hw_status +__vxge_hw_vpath_addr_get(struct __vxge_hw_virtualpath *vpath, + u8 *macaddr, u8 *macaddr_mask) +{ + u64 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY, + data0 = 0, data1 = 0, steer_ctrl = 0; + enum vxge_hw_status status; + int i; + + do { + status = vxge_hw_vpath_fw_api(vpath, action, + VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA, + 0, &data0, &data1, &steer_ctrl); + if (status != VXGE_HW_OK) + goto exit; + + data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data0); + data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK( + data1); + + for (i = ETH_ALEN; i > 0; i--) { + macaddr[i - 1] = (u8) (data0 & 0xFF); + data0 >>= 8; + + macaddr_mask[i - 1] = (u8) (data1 & 0xFF); + data1 >>= 8; + } + + action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY; + data0 = 0, data1 = 0, steer_ctrl = 0; + + } while (!is_valid_ether_addr(macaddr)); +exit: + return status; +} + /** * vxge_hw_device_hw_info_get - Get the hw information * Returns the vpath mask that has the bits set for each vpath allocated @@ -665,9 +1012,9 @@ vxge_hw_device_hw_info_get(void __iomem *bar0, struct vxge_hw_toc_reg __iomem *toc; struct vxge_hw_mrpcim_reg __iomem *mrpcim_reg; struct vxge_hw_common_reg __iomem *common_reg; - struct vxge_hw_vpath_reg __iomem *vpath_reg; struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg; enum vxge_hw_status status; + struct __vxge_hw_virtualpath vpath; memset(hw_info, 0, sizeof(struct vxge_hw_device_hw_info)); @@ -693,7 +1040,6 @@ vxge_hw_device_hw_info_get(void __iomem *bar0, (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64); for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { - if (!((hw_info->vpath_mask) & vxge_mBIT(i))) continue; @@ -702,7 +1048,7 @@ vxge_hw_device_hw_info_get(void __iomem *bar0, vpmgmt_reg = (struct vxge_hw_vpmgmt_reg __iomem *) (bar0 + val64); - hw_info->func_id = __vxge_hw_vpath_func_id_get(i, vpmgmt_reg); + hw_info->func_id = __vxge_hw_vpath_func_id_get(vpmgmt_reg); if (__vxge_hw_device_access_rights_get(hw_info->host_type, hw_info->func_id) & VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM) { @@ -718,16 +1064,19 @@ vxge_hw_device_hw_info_get(void __iomem *bar0, val64 = readq(&toc->toc_vpath_pointer[i]); - vpath_reg = (struct vxge_hw_vpath_reg __iomem *)(bar0 + val64); + vpath.vp_reg = (struct vxge_hw_vpath_reg __iomem *) + (bar0 + val64); + vpath.vp_open = 0; - hw_info->function_mode = - __vxge_hw_vpath_pci_func_mode_get(i, vpath_reg); + status = __vxge_hw_vpath_pci_func_mode_get(&vpath, hw_info); + if (status != VXGE_HW_OK) + goto exit; - status = __vxge_hw_vpath_fw_ver_get(i, vpath_reg, hw_info); + status = __vxge_hw_vpath_fw_ver_get(&vpath, hw_info); if (status != VXGE_HW_OK) goto exit; - status = __vxge_hw_vpath_card_info_get(i, vpath_reg, hw_info); + status = __vxge_hw_vpath_card_info_get(&vpath, hw_info); if (status != VXGE_HW_OK) goto exit; @@ -735,14 +1084,15 @@ vxge_hw_device_hw_info_get(void __iomem *bar0, } for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { - if (!((hw_info->vpath_mask) & vxge_mBIT(i))) continue; val64 = readq(&toc->toc_vpath_pointer[i]); - vpath_reg = (struct vxge_hw_vpath_reg __iomem *)(bar0 + val64); + vpath.vp_reg = (struct vxge_hw_vpath_reg __iomem *) + (bar0 + val64); + vpath.vp_open = 0; - status = __vxge_hw_vpath_addr_get(i, vpath_reg, + status = __vxge_hw_vpath_addr_get(&vpath, hw_info->mac_addrs[i], hw_info->mac_addr_masks[i]); if (status != VXGE_HW_OK) @@ -753,6 +1103,218 @@ exit: } /* + * __vxge_hw_blockpool_destroy - Deallocates the block pool + */ +static void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool) +{ + struct __vxge_hw_device *hldev; + struct list_head *p, *n; + u16 ret; + + if (blockpool == NULL) { + ret = 1; + goto exit; + } + + hldev = blockpool->hldev; + + list_for_each_safe(p, n, &blockpool->free_block_list) { + pci_unmap_single(hldev->pdev, + ((struct __vxge_hw_blockpool_entry *)p)->dma_addr, + ((struct __vxge_hw_blockpool_entry *)p)->length, + PCI_DMA_BIDIRECTIONAL); + + vxge_os_dma_free(hldev->pdev, + ((struct __vxge_hw_blockpool_entry *)p)->memblock, + &((struct __vxge_hw_blockpool_entry *)p)->acc_handle); + + list_del(&((struct __vxge_hw_blockpool_entry *)p)->item); + kfree(p); + blockpool->pool_size--; + } + + list_for_each_safe(p, n, &blockpool->free_entry_list) { + list_del(&((struct __vxge_hw_blockpool_entry *)p)->item); + kfree((void *)p); + } + ret = 0; +exit: + return; +} + +/* + * __vxge_hw_blockpool_create - Create block pool + */ +static enum vxge_hw_status +__vxge_hw_blockpool_create(struct __vxge_hw_device *hldev, + struct __vxge_hw_blockpool *blockpool, + u32 pool_size, + u32 pool_max) +{ + u32 i; + struct __vxge_hw_blockpool_entry *entry = NULL; + void *memblock; + dma_addr_t dma_addr; + struct pci_dev *dma_handle; + struct pci_dev *acc_handle; + enum vxge_hw_status status = VXGE_HW_OK; + + if (blockpool == NULL) { + status = VXGE_HW_FAIL; + goto blockpool_create_exit; + } + + blockpool->hldev = hldev; + blockpool->block_size = VXGE_HW_BLOCK_SIZE; + blockpool->pool_size = 0; + blockpool->pool_max = pool_max; + blockpool->req_out = 0; + + INIT_LIST_HEAD(&blockpool->free_block_list); + INIT_LIST_HEAD(&blockpool->free_entry_list); + + for (i = 0; i < pool_size + pool_max; i++) { + entry = kzalloc(sizeof(struct __vxge_hw_blockpool_entry), + GFP_KERNEL); + if (entry == NULL) { + __vxge_hw_blockpool_destroy(blockpool); + status = VXGE_HW_ERR_OUT_OF_MEMORY; + goto blockpool_create_exit; + } + list_add(&entry->item, &blockpool->free_entry_list); + } + + for (i = 0; i < pool_size; i++) { + memblock = vxge_os_dma_malloc( + hldev->pdev, + VXGE_HW_BLOCK_SIZE, + &dma_handle, + &acc_handle); + if (memblock == NULL) { + __vxge_hw_blockpool_destroy(blockpool); + status = VXGE_HW_ERR_OUT_OF_MEMORY; + goto blockpool_create_exit; + } + + dma_addr = pci_map_single(hldev->pdev, memblock, + VXGE_HW_BLOCK_SIZE, PCI_DMA_BIDIRECTIONAL); + if (unlikely(pci_dma_mapping_error(hldev->pdev, + dma_addr))) { + vxge_os_dma_free(hldev->pdev, memblock, &acc_handle); + __vxge_hw_blockpool_destroy(blockpool); + status = VXGE_HW_ERR_OUT_OF_MEMORY; + goto blockpool_create_exit; + } + + if (!list_empty(&blockpool->free_entry_list)) + entry = (struct __vxge_hw_blockpool_entry *) + list_first_entry(&blockpool->free_entry_list, + struct __vxge_hw_blockpool_entry, + item); + + if (entry == NULL) + entry = + kzalloc(sizeof(struct __vxge_hw_blockpool_entry), + GFP_KERNEL); + if (entry != NULL) { + list_del(&entry->item); + entry->length = VXGE_HW_BLOCK_SIZE; + entry->memblock = memblock; + entry->dma_addr = dma_addr; + entry->acc_handle = acc_handle; + entry->dma_handle = dma_handle; + list_add(&entry->item, + &blockpool->free_block_list); + blockpool->pool_size++; + } else { + __vxge_hw_blockpool_destroy(blockpool); + status = VXGE_HW_ERR_OUT_OF_MEMORY; + goto blockpool_create_exit; + } + } + +blockpool_create_exit: + return status; +} + +/* + * __vxge_hw_device_fifo_config_check - Check fifo configuration. + * Check the fifo configuration + */ +static enum vxge_hw_status +__vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config) +{ + if ((fifo_config->fifo_blocks < VXGE_HW_MIN_FIFO_BLOCKS) || + (fifo_config->fifo_blocks > VXGE_HW_MAX_FIFO_BLOCKS)) + return VXGE_HW_BADCFG_FIFO_BLOCKS; + + return VXGE_HW_OK; +} + +/* + * __vxge_hw_device_vpath_config_check - Check vpath configuration. + * Check the vpath configuration + */ +static enum vxge_hw_status +__vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config) +{ + enum vxge_hw_status status; + + if ((vp_config->min_bandwidth < VXGE_HW_VPATH_BANDWIDTH_MIN) || + (vp_config->min_bandwidth > VXGE_HW_VPATH_BANDWIDTH_MAX)) + return VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH; + + status = __vxge_hw_device_fifo_config_check(&vp_config->fifo); + if (status != VXGE_HW_OK) + return status; + + if ((vp_config->mtu != VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) && + ((vp_config->mtu < VXGE_HW_VPATH_MIN_INITIAL_MTU) || + (vp_config->mtu > VXGE_HW_VPATH_MAX_INITIAL_MTU))) + return VXGE_HW_BADCFG_VPATH_MTU; + + if ((vp_config->rpa_strip_vlan_tag != + VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) && + (vp_config->rpa_strip_vlan_tag != + VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE) && + (vp_config->rpa_strip_vlan_tag != + VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE)) + return VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG; + + return VXGE_HW_OK; +} + +/* + * __vxge_hw_device_config_check - Check device configuration. + * Check the device configuration + */ +static enum vxge_hw_status +__vxge_hw_device_config_check(struct vxge_hw_device_config *new_config) +{ + u32 i; + enum vxge_hw_status status; + + if ((new_config->intr_mode != VXGE_HW_INTR_MODE_IRQLINE) && + (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX) && + (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) && + (new_config->intr_mode != VXGE_HW_INTR_MODE_DEF)) + return VXGE_HW_BADCFG_INTR_MODE; + + if ((new_config->rts_mac_en != VXGE_HW_RTS_MAC_DISABLE) && + (new_config->rts_mac_en != VXGE_HW_RTS_MAC_ENABLE)) + return VXGE_HW_BADCFG_RTS_MAC_EN; + + for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { + status = __vxge_hw_device_vpath_config_check( + &new_config->vp_config[i]); + if (status != VXGE_HW_OK) + return status; + } + + return VXGE_HW_OK; +} + +/* * vxge_hw_device_initialize - Initialize Titan device. * Initialize Titan device. Note that all the arguments of this public API * are 'IN', including @hldev. Driver cooperates with @@ -776,14 +1338,12 @@ vxge_hw_device_initialize( if (status != VXGE_HW_OK) goto exit; - hldev = (struct __vxge_hw_device *) - vmalloc(sizeof(struct __vxge_hw_device)); + hldev = vzalloc(sizeof(struct __vxge_hw_device)); if (hldev == NULL) { status = VXGE_HW_ERR_OUT_OF_MEMORY; goto exit; } - memset(hldev, 0, sizeof(struct __vxge_hw_device)); hldev->magic = VXGE_HW_DEVICE_MAGIC; vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_ALL); @@ -806,7 +1366,6 @@ vxge_hw_device_initialize( vfree(hldev); goto exit; } - __vxge_hw_device_id_get(hldev); __vxge_hw_device_host_info_get(hldev); @@ -814,7 +1373,6 @@ vxge_hw_device_initialize( nblocks++; for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { - if (!(hldev->vpath_assignments & vxge_mBIT(i))) continue; @@ -839,7 +1397,6 @@ vxge_hw_device_initialize( } status = __vxge_hw_device_initialize(hldev); - if (status != VXGE_HW_OK) { vxge_hw_device_terminate(hldev); goto exit; @@ -865,6 +1422,242 @@ vxge_hw_device_terminate(struct __vxge_hw_device *hldev) } /* + * __vxge_hw_vpath_stats_access - Get the statistics from the given location + * and offset and perform an operation + */ +static enum vxge_hw_status +__vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath, + u32 operation, u32 offset, u64 *stat) +{ + u64 val64; + enum vxge_hw_status status = VXGE_HW_OK; + struct vxge_hw_vpath_reg __iomem *vp_reg; + + if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { + status = VXGE_HW_ERR_VPATH_NOT_OPEN; + goto vpath_stats_access_exit; + } + + vp_reg = vpath->vp_reg; + + val64 = VXGE_HW_XMAC_STATS_ACCESS_CMD_OP(operation) | + VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE | + VXGE_HW_XMAC_STATS_ACCESS_CMD_OFFSET_SEL(offset); + + status = __vxge_hw_pio_mem_write64(val64, + &vp_reg->xmac_stats_access_cmd, + VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE, + vpath->hldev->config.device_poll_millis); + if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ)) + *stat = readq(&vp_reg->xmac_stats_access_data); + else + *stat = 0; + +vpath_stats_access_exit: + return status; +} + +/* + * __vxge_hw_vpath_xmac_tx_stats_get - Get the TX Statistics of a vpath + */ +static enum vxge_hw_status +__vxge_hw_vpath_xmac_tx_stats_get(struct __vxge_hw_virtualpath *vpath, + struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats) +{ + u64 *val64; + int i; + u32 offset = VXGE_HW_STATS_VPATH_TX_OFFSET; + enum vxge_hw_status status = VXGE_HW_OK; + + val64 = (u64 *)vpath_tx_stats; + + if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { + status = VXGE_HW_ERR_VPATH_NOT_OPEN; + goto exit; + } + + for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_tx_stats) / 8; i++) { + status = __vxge_hw_vpath_stats_access(vpath, + VXGE_HW_STATS_OP_READ, + offset, val64); + if (status != VXGE_HW_OK) + goto exit; + offset++; + val64++; + } +exit: + return status; +} + +/* + * __vxge_hw_vpath_xmac_rx_stats_get - Get the RX Statistics of a vpath + */ +static enum vxge_hw_status +__vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath, + struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats) +{ + u64 *val64; + enum vxge_hw_status status = VXGE_HW_OK; + int i; + u32 offset = VXGE_HW_STATS_VPATH_RX_OFFSET; + val64 = (u64 *) vpath_rx_stats; + + if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { + status = VXGE_HW_ERR_VPATH_NOT_OPEN; + goto exit; + } + for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_rx_stats) / 8; i++) { + status = __vxge_hw_vpath_stats_access(vpath, + VXGE_HW_STATS_OP_READ, + offset >> 3, val64); + if (status != VXGE_HW_OK) + goto exit; + + offset += 8; + val64++; + } +exit: + return status; +} + +/* + * __vxge_hw_vpath_stats_get - Get the vpath hw statistics. + */ +static enum vxge_hw_status +__vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath, + struct vxge_hw_vpath_stats_hw_info *hw_stats) +{ + u64 val64; + enum vxge_hw_status status = VXGE_HW_OK; + struct vxge_hw_vpath_reg __iomem *vp_reg; + + if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { + status = VXGE_HW_ERR_VPATH_NOT_OPEN; + goto exit; + } + vp_reg = vpath->vp_reg; + + val64 = readq(&vp_reg->vpath_debug_stats0); + hw_stats->ini_num_mwr_sent = + (u32)VXGE_HW_VPATH_DEBUG_STATS0_GET_INI_NUM_MWR_SENT(val64); + + val64 = readq(&vp_reg->vpath_debug_stats1); + hw_stats->ini_num_mrd_sent = + (u32)VXGE_HW_VPATH_DEBUG_STATS1_GET_INI_NUM_MRD_SENT(val64); + + val64 = readq(&vp_reg->vpath_debug_stats2); + hw_stats->ini_num_cpl_rcvd = + (u32)VXGE_HW_VPATH_DEBUG_STATS2_GET_INI_NUM_CPL_RCVD(val64); + + val64 = readq(&vp_reg->vpath_debug_stats3); + hw_stats->ini_num_mwr_byte_sent = + VXGE_HW_VPATH_DEBUG_STATS3_GET_INI_NUM_MWR_BYTE_SENT(val64); + + val64 = readq(&vp_reg->vpath_debug_stats4); + hw_stats->ini_num_cpl_byte_rcvd = + VXGE_HW_VPATH_DEBUG_STATS4_GET_INI_NUM_CPL_BYTE_RCVD(val64); + + val64 = readq(&vp_reg->vpath_debug_stats5); + hw_stats->wrcrdtarb_xoff = + (u32)VXGE_HW_VPATH_DEBUG_STATS5_GET_WRCRDTARB_XOFF(val64); + + val64 = readq(&vp_reg->vpath_debug_stats6); + hw_stats->rdcrdtarb_xoff = + (u32)VXGE_HW_VPATH_DEBUG_STATS6_GET_RDCRDTARB_XOFF(val64); + + val64 = readq(&vp_reg->vpath_genstats_count01); + hw_stats->vpath_genstats_count0 = + (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT0( + val64); + + val64 = readq(&vp_reg->vpath_genstats_count01); + hw_stats->vpath_genstats_count1 = + (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT1( + val64); + + val64 = readq(&vp_reg->vpath_genstats_count23); + hw_stats->vpath_genstats_count2 = + (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT2( + val64); + + val64 = readq(&vp_reg->vpath_genstats_count01); + hw_stats->vpath_genstats_count3 = + (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT3( + val64); + + val64 = readq(&vp_reg->vpath_genstats_count4); + hw_stats->vpath_genstats_count4 = + (u32)VXGE_HW_VPATH_GENSTATS_COUNT4_GET_PPIF_VPATH_GENSTATS_COUNT4( + val64); + + val64 = readq(&vp_reg->vpath_genstats_count5); + hw_stats->vpath_genstats_count5 = + (u32)VXGE_HW_VPATH_GENSTATS_COUNT5_GET_PPIF_VPATH_GENSTATS_COUNT5( + val64); + + status = __vxge_hw_vpath_xmac_tx_stats_get(vpath, &hw_stats->tx_stats); + if (status != VXGE_HW_OK) + goto exit; + + status = __vxge_hw_vpath_xmac_rx_stats_get(vpath, &hw_stats->rx_stats); + if (status != VXGE_HW_OK) + goto exit; + + VXGE_HW_VPATH_STATS_PIO_READ( + VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM0_OFFSET); + + hw_stats->prog_event_vnum0 = + (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM0(val64); + + hw_stats->prog_event_vnum1 = + (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM1(val64); + + VXGE_HW_VPATH_STATS_PIO_READ( + VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM2_OFFSET); + + hw_stats->prog_event_vnum2 = + (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM2(val64); + + hw_stats->prog_event_vnum3 = + (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM3(val64); + + val64 = readq(&vp_reg->rx_multi_cast_stats); + hw_stats->rx_multi_cast_frame_discard = + (u16)VXGE_HW_RX_MULTI_CAST_STATS_GET_FRAME_DISCARD(val64); + + val64 = readq(&vp_reg->rx_frm_transferred); + hw_stats->rx_frm_transferred = + (u32)VXGE_HW_RX_FRM_TRANSFERRED_GET_RX_FRM_TRANSFERRED(val64); + + val64 = readq(&vp_reg->rxd_returned); + hw_stats->rxd_returned = + (u16)VXGE_HW_RXD_RETURNED_GET_RXD_RETURNED(val64); + + val64 = readq(&vp_reg->dbg_stats_rx_mpa); + hw_stats->rx_mpa_len_fail_frms = + (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_LEN_FAIL_FRMS(val64); + hw_stats->rx_mpa_mrk_fail_frms = + (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_MRK_FAIL_FRMS(val64); + hw_stats->rx_mpa_crc_fail_frms = + (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_CRC_FAIL_FRMS(val64); + + val64 = readq(&vp_reg->dbg_stats_rx_fau); + hw_stats->rx_permitted_frms = + (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_PERMITTED_FRMS(val64); + hw_stats->rx_vp_reset_discarded_frms = + (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(val64); + hw_stats->rx_wol_frms = + (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_WOL_FRMS(val64); + + val64 = readq(&vp_reg->tx_vp_reset_discarded_frms); + hw_stats->tx_vp_reset_discarded_frms = + (u16)VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_GET_TX_VP_RESET_DISCARDED_FRMS( + val64); +exit: + return status; +} + +/* * vxge_hw_device_stats_get - Get the device hw statistics. * Returns the vpath h/w stats for the device. */ @@ -876,7 +1669,6 @@ vxge_hw_device_stats_get(struct __vxge_hw_device *hldev, enum vxge_hw_status status = VXGE_HW_OK; for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { - if (!(hldev->vpaths_deployed & vxge_mBIT(i)) || (hldev->virtual_paths[i].vp_open == VXGE_HW_VP_NOT_OPEN)) @@ -1031,7 +1823,6 @@ vxge_hw_device_xmac_stats_get(struct __vxge_hw_device *hldev, status = vxge_hw_device_xmac_aggr_stats_get(hldev, 0, &xmac_stats->aggr_stats[0]); - if (status != VXGE_HW_OK) goto exit; @@ -1165,7 +1956,6 @@ exit: * It can be used to set or reset Pause frame generation or reception * support of the NIC. */ - enum vxge_hw_status vxge_hw_device_setpause_data(struct __vxge_hw_device *hldev, u32 port, u32 tx, u32 rx) { @@ -1407,190 +2197,359 @@ exit: } /* - * __vxge_hw_ring_create - Create a Ring - * This function creates Ring and initializes it. - * + * __vxge_hw_channel_allocate - Allocate memory for channel + * This function allocates required memory for the channel and various arrays + * in the channel */ -static enum vxge_hw_status -__vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp, - struct vxge_hw_ring_attr *attr) +static struct __vxge_hw_channel * +__vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph, + enum __vxge_hw_channel_type type, + u32 length, u32 per_dtr_space, + void *userdata) { - enum vxge_hw_status status = VXGE_HW_OK; - struct __vxge_hw_ring *ring; - u32 ring_length; - struct vxge_hw_ring_config *config; + struct __vxge_hw_channel *channel; struct __vxge_hw_device *hldev; + int size = 0; u32 vp_id; - struct vxge_hw_mempool_cbs ring_mp_callback; - if ((vp == NULL) || (attr == NULL)) { + hldev = vph->vpath->hldev; + vp_id = vph->vpath->vp_id; + + switch (type) { + case VXGE_HW_CHANNEL_TYPE_FIFO: + size = sizeof(struct __vxge_hw_fifo); + break; + case VXGE_HW_CHANNEL_TYPE_RING: + size = sizeof(struct __vxge_hw_ring); + break; + default: + break; + } + + channel = kzalloc(size, GFP_KERNEL); + if (channel == NULL) + goto exit0; + INIT_LIST_HEAD(&channel->item); + + channel->common_reg = hldev->common_reg; + channel->first_vp_id = hldev->first_vp_id; + channel->type = type; + channel->devh = hldev; + channel->vph = vph; + channel->userdata = userdata; + channel->per_dtr_space = per_dtr_space; + channel->length = length; + channel->vp_id = vp_id; + + channel->work_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL); + if (channel->work_arr == NULL) + goto exit1; + + channel->free_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL); + if (channel->free_arr == NULL) + goto exit1; + channel->free_ptr = length; + + channel->reserve_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL); + if (channel->reserve_arr == NULL) + goto exit1; + channel->reserve_ptr = length; + channel->reserve_top = 0; + + channel->orig_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL); + if (channel->orig_arr == NULL) + goto exit1; + + return channel; +exit1: + __vxge_hw_channel_free(channel); + +exit0: + return NULL; +} + +/* + * vxge_hw_blockpool_block_add - callback for vxge_os_dma_malloc_async + * Adds a block to block pool + */ +static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh, + void *block_addr, + u32 length, + struct pci_dev *dma_h, + struct pci_dev *acc_handle) +{ + struct __vxge_hw_blockpool *blockpool; + struct __vxge_hw_blockpool_entry *entry = NULL; + dma_addr_t dma_addr; + enum vxge_hw_status status = VXGE_HW_OK; + u32 req_out; + + blockpool = &devh->block_pool; + + if (block_addr == NULL) { + blockpool->req_out--; status = VXGE_HW_FAIL; goto exit; } - hldev = vp->vpath->hldev; - vp_id = vp->vpath->vp_id; + dma_addr = pci_map_single(devh->pdev, block_addr, length, + PCI_DMA_BIDIRECTIONAL); - config = &hldev->config.vp_config[vp_id].ring; + if (unlikely(pci_dma_mapping_error(devh->pdev, dma_addr))) { + vxge_os_dma_free(devh->pdev, block_addr, &acc_handle); + blockpool->req_out--; + status = VXGE_HW_FAIL; + goto exit; + } - ring_length = config->ring_blocks * - vxge_hw_ring_rxds_per_block_get(config->buffer_mode); + if (!list_empty(&blockpool->free_entry_list)) + entry = (struct __vxge_hw_blockpool_entry *) + list_first_entry(&blockpool->free_entry_list, + struct __vxge_hw_blockpool_entry, + item); - ring = (struct __vxge_hw_ring *)__vxge_hw_channel_allocate(vp, - VXGE_HW_CHANNEL_TYPE_RING, - ring_length, - attr->per_rxd_space, - attr->userdata); + if (entry == NULL) + entry = vmalloc(sizeof(struct __vxge_hw_blockpool_entry)); + else + list_del(&entry->item); - if (ring == NULL) { + if (entry != NULL) { + entry->length = length; + entry->memblock = block_addr; + entry->dma_addr = dma_addr; + entry->acc_handle = acc_handle; + entry->dma_handle = dma_h; + list_add(&entry->item, &blockpool->free_block_list); + blockpool->pool_size++; + status = VXGE_HW_OK; + } else status = VXGE_HW_ERR_OUT_OF_MEMORY; - goto exit; - } - vp->vpath->ringh = ring; - ring->vp_id = vp_id; - ring->vp_reg = vp->vpath->vp_reg; - ring->common_reg = hldev->common_reg; - ring->stats = &vp->vpath->sw_stats->ring_stats; - ring->config = config; - ring->callback = attr->callback; - ring->rxd_init = attr->rxd_init; - ring->rxd_term = attr->rxd_term; - ring->buffer_mode = config->buffer_mode; - ring->rxds_limit = config->rxds_limit; + blockpool->req_out--; - ring->rxd_size = vxge_hw_ring_rxd_size_get(config->buffer_mode); - ring->rxd_priv_size = - sizeof(struct __vxge_hw_ring_rxd_priv) + attr->per_rxd_space; - ring->per_rxd_space = attr->per_rxd_space; + req_out = blockpool->req_out; +exit: + return; +} - ring->rxd_priv_size = - ((ring->rxd_priv_size + VXGE_CACHE_LINE_SIZE - 1) / - VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE; +static inline void +vxge_os_dma_malloc_async(struct pci_dev *pdev, void *devh, unsigned long size) +{ + gfp_t flags; + void *vaddr; - /* how many RxDs can fit into one block. Depends on configured - * buffer_mode. */ - ring->rxds_per_block = - vxge_hw_ring_rxds_per_block_get(config->buffer_mode); + if (in_interrupt()) + flags = GFP_ATOMIC | GFP_DMA; + else + flags = GFP_KERNEL | GFP_DMA; - /* calculate actual RxD block private size */ - ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block; - ring_mp_callback.item_func_alloc = __vxge_hw_ring_mempool_item_alloc; - ring->mempool = __vxge_hw_mempool_create(hldev, - VXGE_HW_BLOCK_SIZE, - VXGE_HW_BLOCK_SIZE, - ring->rxdblock_priv_size, - ring->config->ring_blocks, - ring->config->ring_blocks, - &ring_mp_callback, - ring); + vaddr = kmalloc((size), flags); - if (ring->mempool == NULL) { - __vxge_hw_ring_delete(vp); - return VXGE_HW_ERR_OUT_OF_MEMORY; - } + vxge_hw_blockpool_block_add(devh, vaddr, size, pdev, pdev); +} - status = __vxge_hw_channel_initialize(&ring->channel); - if (status != VXGE_HW_OK) { - __vxge_hw_ring_delete(vp); - goto exit; +/* + * __vxge_hw_blockpool_blocks_add - Request additional blocks + */ +static +void __vxge_hw_blockpool_blocks_add(struct __vxge_hw_blockpool *blockpool) +{ + u32 nreq = 0, i; + + if ((blockpool->pool_size + blockpool->req_out) < + VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE) { + nreq = VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE; + blockpool->req_out += nreq; } - /* Note: - * Specifying rxd_init callback means two things: - * 1) rxds need to be initialized by driver at channel-open time; - * 2) rxds need to be posted at channel-open time - * (that's what the initial_replenish() below does) - * Currently we don't have a case when the 1) is done without the 2). - */ - if (ring->rxd_init) { - status = vxge_hw_ring_replenish(ring); - if (status != VXGE_HW_OK) { - __vxge_hw_ring_delete(vp); + for (i = 0; i < nreq; i++) + vxge_os_dma_malloc_async( + ((struct __vxge_hw_device *)blockpool->hldev)->pdev, + blockpool->hldev, VXGE_HW_BLOCK_SIZE); +} + +/* + * __vxge_hw_blockpool_malloc - Allocate a memory block from pool + * Allocates a block of memory of given size, either from block pool + * or by calling vxge_os_dma_malloc() + */ +static void *__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size, + struct vxge_hw_mempool_dma *dma_object) +{ + struct __vxge_hw_blockpool_entry *entry = NULL; + struct __vxge_hw_blockpool *blockpool; + void *memblock = NULL; + enum vxge_hw_status status = VXGE_HW_OK; + + blockpool = &devh->block_pool; + + if (size != blockpool->block_size) { + + memblock = vxge_os_dma_malloc(devh->pdev, size, + &dma_object->handle, + &dma_object->acc_handle); + + if (memblock == NULL) { + status = VXGE_HW_ERR_OUT_OF_MEMORY; goto exit; } - } - /* initial replenish will increment the counter in its post() routine, - * we have to reset it */ - ring->stats->common_stats.usage_cnt = 0; + dma_object->addr = pci_map_single(devh->pdev, memblock, size, + PCI_DMA_BIDIRECTIONAL); + + if (unlikely(pci_dma_mapping_error(devh->pdev, + dma_object->addr))) { + vxge_os_dma_free(devh->pdev, memblock, + &dma_object->acc_handle); + status = VXGE_HW_ERR_OUT_OF_MEMORY; + goto exit; + } + + } else { + + if (!list_empty(&blockpool->free_block_list)) + entry = (struct __vxge_hw_blockpool_entry *) + list_first_entry(&blockpool->free_block_list, + struct __vxge_hw_blockpool_entry, + item); + + if (entry != NULL) { + list_del(&entry->item); + dma_object->addr = entry->dma_addr; + dma_object->handle = entry->dma_handle; + dma_object->acc_handle = entry->acc_handle; + memblock = entry->memblock; + + list_add(&entry->item, + &blockpool->free_entry_list); + blockpool->pool_size--; + } + + if (memblock != NULL) + __vxge_hw_blockpool_blocks_add(blockpool); + } exit: - return status; + return memblock; } /* - * __vxge_hw_ring_abort - Returns the RxD - * This function terminates the RxDs of ring + * __vxge_hw_blockpool_blocks_remove - Free additional blocks */ -static enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring) +static void +__vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool *blockpool) { - void *rxdh; - struct __vxge_hw_channel *channel; - - channel = &ring->channel; + struct list_head *p, *n; - for (;;) { - vxge_hw_channel_dtr_try_complete(channel, &rxdh); + list_for_each_safe(p, n, &blockpool->free_block_list) { - if (rxdh == NULL) + if (blockpool->pool_size < blockpool->pool_max) break; - vxge_hw_channel_dtr_complete(channel); + pci_unmap_single( + ((struct __vxge_hw_device *)blockpool->hldev)->pdev, + ((struct __vxge_hw_blockpool_entry *)p)->dma_addr, + ((struct __vxge_hw_blockpool_entry *)p)->length, + PCI_DMA_BIDIRECTIONAL); - if (ring->rxd_term) - ring->rxd_term(rxdh, VXGE_HW_RXD_STATE_POSTED, - channel->userdata); + vxge_os_dma_free( + ((struct __vxge_hw_device *)blockpool->hldev)->pdev, + ((struct __vxge_hw_blockpool_entry *)p)->memblock, + &((struct __vxge_hw_blockpool_entry *)p)->acc_handle); - vxge_hw_channel_dtr_free(channel, rxdh); - } + list_del(&((struct __vxge_hw_blockpool_entry *)p)->item); - return VXGE_HW_OK; + list_add(p, &blockpool->free_entry_list); + + blockpool->pool_size--; + + } } /* - * __vxge_hw_ring_reset - Resets the ring - * This function resets the ring during vpath reset operation + * __vxge_hw_blockpool_free - Frees the memory allcoated with + * __vxge_hw_blockpool_malloc */ -static enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring) +static void __vxge_hw_blockpool_free(struct __vxge_hw_device *devh, + void *memblock, u32 size, + struct vxge_hw_mempool_dma *dma_object) { + struct __vxge_hw_blockpool_entry *entry = NULL; + struct __vxge_hw_blockpool *blockpool; enum vxge_hw_status status = VXGE_HW_OK; - struct __vxge_hw_channel *channel; - channel = &ring->channel; + blockpool = &devh->block_pool; - __vxge_hw_ring_abort(ring); + if (size != blockpool->block_size) { + pci_unmap_single(devh->pdev, dma_object->addr, size, + PCI_DMA_BIDIRECTIONAL); + vxge_os_dma_free(devh->pdev, memblock, &dma_object->acc_handle); + } else { - status = __vxge_hw_channel_reset(channel); + if (!list_empty(&blockpool->free_entry_list)) + entry = (struct __vxge_hw_blockpool_entry *) + list_first_entry(&blockpool->free_entry_list, + struct __vxge_hw_blockpool_entry, + item); - if (status != VXGE_HW_OK) - goto exit; + if (entry == NULL) + entry = vmalloc(sizeof( + struct __vxge_hw_blockpool_entry)); + else + list_del(&entry->item); - if (ring->rxd_init) { - status = vxge_hw_ring_replenish(ring); - if (status != VXGE_HW_OK) - goto exit; + if (entry != NULL) { + entry->length = size; + entry->memblock = memblock; + entry->dma_addr = dma_object->addr; + entry->acc_handle = dma_object->acc_handle; + entry->dma_handle = dma_object->handle; + list_add(&entry->item, + &blockpool->free_block_list); + blockpool->pool_size++; + status = VXGE_HW_OK; + } else + status = VXGE_HW_ERR_OUT_OF_MEMORY; + + if (status == VXGE_HW_OK) + __vxge_hw_blockpool_blocks_remove(blockpool); } -exit: - return status; } /* - * __vxge_hw_ring_delete - Removes the ring - * This function freeup the memory pool and removes the ring + * vxge_hw_mempool_destroy */ -static enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp) +static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool) { - struct __vxge_hw_ring *ring = vp->vpath->ringh; + u32 i, j; + struct __vxge_hw_device *devh = mempool->devh; - __vxge_hw_ring_abort(ring); + for (i = 0; i < mempool->memblocks_allocated; i++) { + struct vxge_hw_mempool_dma *dma_object; - if (ring->mempool) - __vxge_hw_mempool_destroy(ring->mempool); + vxge_assert(mempool->memblocks_arr[i]); + vxge_assert(mempool->memblocks_dma_arr + i); - vp->vpath->ringh = NULL; - __vxge_hw_channel_free(&ring->channel); + dma_object = mempool->memblocks_dma_arr + i; - return VXGE_HW_OK; + for (j = 0; j < mempool->items_per_memblock; j++) { + u32 index = i * mempool->items_per_memblock + j; + + /* to skip last partially filled(if any) memblock */ + if (index >= mempool->items_current) + break; + } + + vfree(mempool->memblocks_priv_arr[i]); + + __vxge_hw_blockpool_free(devh, mempool->memblocks_arr[i], + mempool->memblock_size, dma_object); + } + + vfree(mempool->items_arr); + vfree(mempool->memblocks_dma_arr); + vfree(mempool->memblocks_priv_arr); + vfree(mempool->memblocks_arr); + vfree(mempool); } /* @@ -1627,15 +2586,12 @@ __vxge_hw_mempool_grow(struct vxge_hw_mempool *mempool, u32 num_allocate, * allocate new memblock and its private part at once. * This helps to minimize memory usage a lot. */ mempool->memblocks_priv_arr[i] = - vmalloc(mempool->items_priv_size * n_items); + vzalloc(mempool->items_priv_size * n_items); if (mempool->memblocks_priv_arr[i] == NULL) { status = VXGE_HW_ERR_OUT_OF_MEMORY; goto exit; } - memset(mempool->memblocks_priv_arr[i], 0, - mempool->items_priv_size * n_items); - /* allocate DMA-capable memblock */ mempool->memblocks_arr[i] = __vxge_hw_blockpool_malloc(mempool->devh, @@ -1686,16 +2642,15 @@ exit: * with size enough to hold %items_initial number of items. Memory is * DMA-able but client must map/unmap before interoperating with the device. */ -static struct vxge_hw_mempool* -__vxge_hw_mempool_create( - struct __vxge_hw_device *devh, - u32 memblock_size, - u32 item_size, - u32 items_priv_size, - u32 items_initial, - u32 items_max, - struct vxge_hw_mempool_cbs *mp_callback, - void *userdata) +static struct vxge_hw_mempool * +__vxge_hw_mempool_create(struct __vxge_hw_device *devh, + u32 memblock_size, + u32 item_size, + u32 items_priv_size, + u32 items_initial, + u32 items_max, + struct vxge_hw_mempool_cbs *mp_callback, + void *userdata) { enum vxge_hw_status status = VXGE_HW_OK; u32 memblocks_to_allocate; @@ -1707,13 +2662,11 @@ __vxge_hw_mempool_create( goto exit; } - mempool = (struct vxge_hw_mempool *) - vmalloc(sizeof(struct vxge_hw_mempool)); + mempool = vzalloc(sizeof(struct vxge_hw_mempool)); if (mempool == NULL) { status = VXGE_HW_ERR_OUT_OF_MEMORY; goto exit; } - memset(mempool, 0, sizeof(struct vxge_hw_mempool)); mempool->devh = devh; mempool->memblock_size = memblock_size; @@ -1733,53 +2686,43 @@ __vxge_hw_mempool_create( /* allocate array of memblocks */ mempool->memblocks_arr = - (void **) vmalloc(sizeof(void *) * mempool->memblocks_max); + vzalloc(sizeof(void *) * mempool->memblocks_max); if (mempool->memblocks_arr == NULL) { __vxge_hw_mempool_destroy(mempool); status = VXGE_HW_ERR_OUT_OF_MEMORY; mempool = NULL; goto exit; } - memset(mempool->memblocks_arr, 0, - sizeof(void *) * mempool->memblocks_max); /* allocate array of private parts of items per memblocks */ mempool->memblocks_priv_arr = - (void **) vmalloc(sizeof(void *) * mempool->memblocks_max); + vzalloc(sizeof(void *) * mempool->memblocks_max); if (mempool->memblocks_priv_arr == NULL) { __vxge_hw_mempool_destroy(mempool); status = VXGE_HW_ERR_OUT_OF_MEMORY; mempool = NULL; goto exit; } - memset(mempool->memblocks_priv_arr, 0, - sizeof(void *) * mempool->memblocks_max); /* allocate array of memblocks DMA objects */ - mempool->memblocks_dma_arr = (struct vxge_hw_mempool_dma *) - vmalloc(sizeof(struct vxge_hw_mempool_dma) * + mempool->memblocks_dma_arr = + vzalloc(sizeof(struct vxge_hw_mempool_dma) * mempool->memblocks_max); - if (mempool->memblocks_dma_arr == NULL) { __vxge_hw_mempool_destroy(mempool); status = VXGE_HW_ERR_OUT_OF_MEMORY; mempool = NULL; goto exit; } - memset(mempool->memblocks_dma_arr, 0, - sizeof(struct vxge_hw_mempool_dma) * - mempool->memblocks_max); /* allocate hash array of items */ - mempool->items_arr = - (void **) vmalloc(sizeof(void *) * mempool->items_max); + mempool->items_arr = vzalloc(sizeof(void *) * mempool->items_max); if (mempool->items_arr == NULL) { __vxge_hw_mempool_destroy(mempool); status = VXGE_HW_ERR_OUT_OF_MEMORY; mempool = NULL; goto exit; } - memset(mempool->items_arr, 0, sizeof(void *) * mempool->items_max); /* calculate initial number of memblocks */ memblocks_to_allocate = (mempool->items_initial + @@ -1801,122 +2744,188 @@ exit: } /* - * vxge_hw_mempool_destroy + * __vxge_hw_ring_abort - Returns the RxD + * This function terminates the RxDs of ring */ -static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool) +static enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring) { - u32 i, j; - struct __vxge_hw_device *devh = mempool->devh; - - for (i = 0; i < mempool->memblocks_allocated; i++) { - struct vxge_hw_mempool_dma *dma_object; + void *rxdh; + struct __vxge_hw_channel *channel; - vxge_assert(mempool->memblocks_arr[i]); - vxge_assert(mempool->memblocks_dma_arr + i); + channel = &ring->channel; - dma_object = mempool->memblocks_dma_arr + i; + for (;;) { + vxge_hw_channel_dtr_try_complete(channel, &rxdh); - for (j = 0; j < mempool->items_per_memblock; j++) { - u32 index = i * mempool->items_per_memblock + j; + if (rxdh == NULL) + break; - /* to skip last partially filled(if any) memblock */ - if (index >= mempool->items_current) - break; - } + vxge_hw_channel_dtr_complete(channel); - vfree(mempool->memblocks_priv_arr[i]); + if (ring->rxd_term) + ring->rxd_term(rxdh, VXGE_HW_RXD_STATE_POSTED, + channel->userdata); - __vxge_hw_blockpool_free(devh, mempool->memblocks_arr[i], - mempool->memblock_size, dma_object); + vxge_hw_channel_dtr_free(channel, rxdh); } - vfree(mempool->items_arr); + return VXGE_HW_OK; +} - vfree(mempool->memblocks_dma_arr); +/* + * __vxge_hw_ring_reset - Resets the ring + * This function resets the ring during vpath reset operation + */ +static enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring) +{ + enum vxge_hw_status status = VXGE_HW_OK; + struct __vxge_hw_channel *channel; - vfree(mempool->memblocks_priv_arr); + channel = &ring->channel; - vfree(mempool->memblocks_arr); + __vxge_hw_ring_abort(ring); - vfree(mempool); + status = __vxge_hw_channel_reset(channel); + + if (status != VXGE_HW_OK) + goto exit; + + if (ring->rxd_init) { + status = vxge_hw_ring_replenish(ring); + if (status != VXGE_HW_OK) + goto exit; + } +exit: + return status; } /* - * __vxge_hw_device_fifo_config_check - Check fifo configuration. - * Check the fifo configuration + * __vxge_hw_ring_delete - Removes the ring + * This function freeup the memory pool and removes the ring */ -enum vxge_hw_status -__vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config) +static enum vxge_hw_status +__vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp) { - if ((fifo_config->fifo_blocks < VXGE_HW_MIN_FIFO_BLOCKS) || - (fifo_config->fifo_blocks > VXGE_HW_MAX_FIFO_BLOCKS)) - return VXGE_HW_BADCFG_FIFO_BLOCKS; + struct __vxge_hw_ring *ring = vp->vpath->ringh; + + __vxge_hw_ring_abort(ring); + + if (ring->mempool) + __vxge_hw_mempool_destroy(ring->mempool); + + vp->vpath->ringh = NULL; + __vxge_hw_channel_free(&ring->channel); return VXGE_HW_OK; } /* - * __vxge_hw_device_vpath_config_check - Check vpath configuration. - * Check the vpath configuration + * __vxge_hw_ring_create - Create a Ring + * This function creates Ring and initializes it. */ static enum vxge_hw_status -__vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config) +__vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp, + struct vxge_hw_ring_attr *attr) { - enum vxge_hw_status status; + enum vxge_hw_status status = VXGE_HW_OK; + struct __vxge_hw_ring *ring; + u32 ring_length; + struct vxge_hw_ring_config *config; + struct __vxge_hw_device *hldev; + u32 vp_id; + struct vxge_hw_mempool_cbs ring_mp_callback; - if ((vp_config->min_bandwidth < VXGE_HW_VPATH_BANDWIDTH_MIN) || - (vp_config->min_bandwidth > - VXGE_HW_VPATH_BANDWIDTH_MAX)) - return VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH; + if ((vp == NULL) || (attr == NULL)) { + status = VXGE_HW_FAIL; + goto exit; + } - status = __vxge_hw_device_fifo_config_check(&vp_config->fifo); - if (status != VXGE_HW_OK) - return status; + hldev = vp->vpath->hldev; + vp_id = vp->vpath->vp_id; - if ((vp_config->mtu != VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) && - ((vp_config->mtu < VXGE_HW_VPATH_MIN_INITIAL_MTU) || - (vp_config->mtu > VXGE_HW_VPATH_MAX_INITIAL_MTU))) - return VXGE_HW_BADCFG_VPATH_MTU; + config = &hldev->config.vp_config[vp_id].ring; - if ((vp_config->rpa_strip_vlan_tag != - VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) && - (vp_config->rpa_strip_vlan_tag != - VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE) && - (vp_config->rpa_strip_vlan_tag != - VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE)) - return VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG; + ring_length = config->ring_blocks * + vxge_hw_ring_rxds_per_block_get(config->buffer_mode); - return VXGE_HW_OK; -} + ring = (struct __vxge_hw_ring *)__vxge_hw_channel_allocate(vp, + VXGE_HW_CHANNEL_TYPE_RING, + ring_length, + attr->per_rxd_space, + attr->userdata); + if (ring == NULL) { + status = VXGE_HW_ERR_OUT_OF_MEMORY; + goto exit; + } -/* - * __vxge_hw_device_config_check - Check device configuration. - * Check the device configuration - */ -enum vxge_hw_status -__vxge_hw_device_config_check(struct vxge_hw_device_config *new_config) -{ - u32 i; - enum vxge_hw_status status; + vp->vpath->ringh = ring; + ring->vp_id = vp_id; + ring->vp_reg = vp->vpath->vp_reg; + ring->common_reg = hldev->common_reg; + ring->stats = &vp->vpath->sw_stats->ring_stats; + ring->config = config; + ring->callback = attr->callback; + ring->rxd_init = attr->rxd_init; + ring->rxd_term = attr->rxd_term; + ring->buffer_mode = config->buffer_mode; + ring->rxds_limit = config->rxds_limit; - if ((new_config->intr_mode != VXGE_HW_INTR_MODE_IRQLINE) && - (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX) && - (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) && - (new_config->intr_mode != VXGE_HW_INTR_MODE_DEF)) - return VXGE_HW_BADCFG_INTR_MODE; + ring->rxd_size = vxge_hw_ring_rxd_size_get(config->buffer_mode); + ring->rxd_priv_size = + sizeof(struct __vxge_hw_ring_rxd_priv) + attr->per_rxd_space; + ring->per_rxd_space = attr->per_rxd_space; - if ((new_config->rts_mac_en != VXGE_HW_RTS_MAC_DISABLE) && - (new_config->rts_mac_en != VXGE_HW_RTS_MAC_ENABLE)) - return VXGE_HW_BADCFG_RTS_MAC_EN; + ring->rxd_priv_size = + ((ring->rxd_priv_size + VXGE_CACHE_LINE_SIZE - 1) / + VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE; - for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { - status = __vxge_hw_device_vpath_config_check( - &new_config->vp_config[i]); - if (status != VXGE_HW_OK) - return status; + /* how many RxDs can fit into one block. Depends on configured + * buffer_mode. */ + ring->rxds_per_block = + vxge_hw_ring_rxds_per_block_get(config->buffer_mode); + + /* calculate actual RxD block private size */ + ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block; + ring_mp_callback.item_func_alloc = __vxge_hw_ring_mempool_item_alloc; + ring->mempool = __vxge_hw_mempool_create(hldev, + VXGE_HW_BLOCK_SIZE, + VXGE_HW_BLOCK_SIZE, + ring->rxdblock_priv_size, + ring->config->ring_blocks, + ring->config->ring_blocks, + &ring_mp_callback, + ring); + if (ring->mempool == NULL) { + __vxge_hw_ring_delete(vp); + return VXGE_HW_ERR_OUT_OF_MEMORY; } - return VXGE_HW_OK; + status = __vxge_hw_channel_initialize(&ring->channel); + if (status != VXGE_HW_OK) { + __vxge_hw_ring_delete(vp); + goto exit; + } + + /* Note: + * Specifying rxd_init callback means two things: + * 1) rxds need to be initialized by driver at channel-open time; + * 2) rxds need to be posted at channel-open time + * (that's what the initial_replenish() below does) + * Currently we don't have a case when the 1) is done without the 2). + */ + if (ring->rxd_init) { + status = vxge_hw_ring_replenish(ring); + if (status != VXGE_HW_OK) { + __vxge_hw_ring_delete(vp); + goto exit; + } + } + + /* initial replenish will increment the counter in its post() routine, + * we have to reset it */ + ring->stats->common_stats.usage_cnt = 0; +exit: + return status; } /* @@ -1938,7 +2947,6 @@ vxge_hw_device_config_default_get(struct vxge_hw_device_config *device_config) device_config->rts_mac_en = VXGE_HW_RTS_MAC_DEFAULT; for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { - device_config->vp_config[i].vp_id = i; device_config->vp_config[i].min_bandwidth = @@ -2078,61 +3086,6 @@ vxge_hw_device_config_default_get(struct vxge_hw_device_config *device_config) } /* - * _hw_legacy_swapper_set - Set the swapper bits for the legacy secion. - * Set the swapper bits appropriately for the lagacy section. - */ -static enum vxge_hw_status -__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg) -{ - u64 val64; - enum vxge_hw_status status = VXGE_HW_OK; - - val64 = readq(&legacy_reg->toc_swapper_fb); - - wmb(); - - switch (val64) { - - case VXGE_HW_SWAPPER_INITIAL_VALUE: - return status; - - case VXGE_HW_SWAPPER_BYTE_SWAPPED_BIT_FLIPPED: - writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE, - &legacy_reg->pifm_rd_swap_en); - writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE, - &legacy_reg->pifm_rd_flip_en); - writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE, - &legacy_reg->pifm_wr_swap_en); - writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE, - &legacy_reg->pifm_wr_flip_en); - break; - - case VXGE_HW_SWAPPER_BYTE_SWAPPED: - writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE, - &legacy_reg->pifm_rd_swap_en); - writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE, - &legacy_reg->pifm_wr_swap_en); - break; - - case VXGE_HW_SWAPPER_BIT_FLIPPED: - writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE, - &legacy_reg->pifm_rd_flip_en); - writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE, - &legacy_reg->pifm_wr_flip_en); - break; - } - - wmb(); - - val64 = readq(&legacy_reg->toc_swapper_fb); - - if (val64 != VXGE_HW_SWAPPER_INITIAL_VALUE) - status = VXGE_HW_ERR_SWAPPER_CTRL; - - return status; -} - -/* * __vxge_hw_vpath_swapper_set - Set the swapper bits for the vpath. * Set the swapper bits appropriately for the vpath. */ @@ -2156,9 +3109,8 @@ __vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg) * Set the swapper bits appropriately for the vpath. */ static enum vxge_hw_status -__vxge_hw_kdfc_swapper_set( - struct vxge_hw_legacy_reg __iomem *legacy_reg, - struct vxge_hw_vpath_reg __iomem *vpath_reg) +__vxge_hw_kdfc_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg, + struct vxge_hw_vpath_reg __iomem *vpath_reg) { u64 val64; @@ -2408,6 +3360,69 @@ exit: } /* + * __vxge_hw_fifo_abort - Returns the TxD + * This function terminates the TxDs of fifo + */ +static enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo) +{ + void *txdlh; + + for (;;) { + vxge_hw_channel_dtr_try_complete(&fifo->channel, &txdlh); + + if (txdlh == NULL) + break; + + vxge_hw_channel_dtr_complete(&fifo->channel); + + if (fifo->txdl_term) { + fifo->txdl_term(txdlh, + VXGE_HW_TXDL_STATE_POSTED, + fifo->channel.userdata); + } + + vxge_hw_channel_dtr_free(&fifo->channel, txdlh); + } + + return VXGE_HW_OK; +} + +/* + * __vxge_hw_fifo_reset - Resets the fifo + * This function resets the fifo during vpath reset operation + */ +static enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo) +{ + enum vxge_hw_status status = VXGE_HW_OK; + + __vxge_hw_fifo_abort(fifo); + status = __vxge_hw_channel_reset(&fifo->channel); + + return status; +} + +/* + * __vxge_hw_fifo_delete - Removes the FIFO + * This function freeup the memory pool and removes the FIFO + */ +static enum vxge_hw_status +__vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle *vp) +{ + struct __vxge_hw_fifo *fifo = vp->vpath->fifoh; + + __vxge_hw_fifo_abort(fifo); + + if (fifo->mempool) + __vxge_hw_mempool_destroy(fifo->mempool); + + vp->vpath->fifoh = NULL; + + __vxge_hw_channel_free(&fifo->channel); + + return VXGE_HW_OK; +} + +/* * __vxge_hw_fifo_mempool_item_alloc - Allocate List blocks for TxD * list callback * This function is callback passed to __vxge_hw_mempool_create to create memory @@ -2453,7 +3468,7 @@ __vxge_hw_fifo_mempool_item_alloc( * __vxge_hw_fifo_create - Create a FIFO * This function creates FIFO and initializes it. */ -enum vxge_hw_status +static enum vxge_hw_status __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp, struct vxge_hw_fifo_attr *attr) { @@ -2572,68 +3587,6 @@ exit: } /* - * __vxge_hw_fifo_abort - Returns the TxD - * This function terminates the TxDs of fifo - */ -static enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo) -{ - void *txdlh; - - for (;;) { - vxge_hw_channel_dtr_try_complete(&fifo->channel, &txdlh); - - if (txdlh == NULL) - break; - - vxge_hw_channel_dtr_complete(&fifo->channel); - - if (fifo->txdl_term) { - fifo->txdl_term(txdlh, - VXGE_HW_TXDL_STATE_POSTED, - fifo->channel.userdata); - } - - vxge_hw_channel_dtr_free(&fifo->channel, txdlh); - } - - return VXGE_HW_OK; -} - -/* - * __vxge_hw_fifo_reset - Resets the fifo - * This function resets the fifo during vpath reset operation - */ -static enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo) -{ - enum vxge_hw_status status = VXGE_HW_OK; - - __vxge_hw_fifo_abort(fifo); - status = __vxge_hw_channel_reset(&fifo->channel); - - return status; -} - -/* - * __vxge_hw_fifo_delete - Removes the FIFO - * This function freeup the memory pool and removes the FIFO - */ -enum vxge_hw_status __vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle *vp) -{ - struct __vxge_hw_fifo *fifo = vp->vpath->fifoh; - - __vxge_hw_fifo_abort(fifo); - - if (fifo->mempool) - __vxge_hw_mempool_destroy(fifo->mempool); - - vp->vpath->fifoh = NULL; - - __vxge_hw_channel_free(&fifo->channel); - - return VXGE_HW_OK; -} - -/* * __vxge_hw_vpath_pci_read - Read the content of given address * in pci config space. * Read from the vpath pci config space. @@ -2675,297 +3628,6 @@ exit: return status; } -/* - * __vxge_hw_vpath_func_id_get - Get the function id of the vpath. - * Returns the function number of the vpath. - */ -static u32 -__vxge_hw_vpath_func_id_get(u32 vp_id, - struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg) -{ - u64 val64; - - val64 = readq(&vpmgmt_reg->vpath_to_func_map_cfg1); - - return - (u32)VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(val64); -} - -/* - * __vxge_hw_read_rts_ds - Program RTS steering critieria - */ -static inline void -__vxge_hw_read_rts_ds(struct vxge_hw_vpath_reg __iomem *vpath_reg, - u64 dta_struct_sel) -{ - writeq(0, &vpath_reg->rts_access_steer_ctrl); - wmb(); - writeq(dta_struct_sel, &vpath_reg->rts_access_steer_data0); - writeq(0, &vpath_reg->rts_access_steer_data1); - wmb(); -} - - -/* - * __vxge_hw_vpath_card_info_get - Get the serial numbers, - * part number and product description. - */ -static enum vxge_hw_status -__vxge_hw_vpath_card_info_get( - u32 vp_id, - struct vxge_hw_vpath_reg __iomem *vpath_reg, - struct vxge_hw_device_hw_info *hw_info) -{ - u32 i, j; - u64 val64; - u64 data1 = 0ULL; - u64 data2 = 0ULL; - enum vxge_hw_status status = VXGE_HW_OK; - u8 *serial_number = hw_info->serial_number; - u8 *part_number = hw_info->part_number; - u8 *product_desc = hw_info->product_desc; - - __vxge_hw_read_rts_ds(vpath_reg, - VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER); - - val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION( - VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) | - VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL( - VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) | - VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE | - VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0); - - status = __vxge_hw_pio_mem_write64(val64, - &vpath_reg->rts_access_steer_ctrl, - VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE, - VXGE_HW_DEF_DEVICE_POLL_MILLIS); - - if (status != VXGE_HW_OK) - return status; - - val64 = readq(&vpath_reg->rts_access_steer_ctrl); - - if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) { - data1 = readq(&vpath_reg->rts_access_steer_data0); - ((u64 *)serial_number)[0] = be64_to_cpu(data1); - - data2 = readq(&vpath_reg->rts_access_steer_data1); - ((u64 *)serial_number)[1] = be64_to_cpu(data2); - status = VXGE_HW_OK; - } else - *serial_number = 0; - - __vxge_hw_read_rts_ds(vpath_reg, - VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER); - - val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION( - VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) | - VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL( - VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) | - VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE | - VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0); - - status = __vxge_hw_pio_mem_write64(val64, - &vpath_reg->rts_access_steer_ctrl, - VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE, - VXGE_HW_DEF_DEVICE_POLL_MILLIS); - - if (status != VXGE_HW_OK) - return status; - - val64 = readq(&vpath_reg->rts_access_steer_ctrl); - - if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) { - - data1 = readq(&vpath_reg->rts_access_steer_data0); - ((u64 *)part_number)[0] = be64_to_cpu(data1); - - data2 = readq(&vpath_reg->rts_access_steer_data1); - ((u64 *)part_number)[1] = be64_to_cpu(data2); - - status = VXGE_HW_OK; - - } else - *part_number = 0; - - j = 0; - - for (i = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0; - i <= VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3; i++) { - - __vxge_hw_read_rts_ds(vpath_reg, i); - - val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION( - VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) | - VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL( - VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) | - VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE | - VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0); - - status = __vxge_hw_pio_mem_write64(val64, - &vpath_reg->rts_access_steer_ctrl, - VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE, - VXGE_HW_DEF_DEVICE_POLL_MILLIS); - - if (status != VXGE_HW_OK) - return status; - - val64 = readq(&vpath_reg->rts_access_steer_ctrl); - - if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) { - - data1 = readq(&vpath_reg->rts_access_steer_data0); - ((u64 *)product_desc)[j++] = be64_to_cpu(data1); - - data2 = readq(&vpath_reg->rts_access_steer_data1); - ((u64 *)product_desc)[j++] = be64_to_cpu(data2); - - status = VXGE_HW_OK; - } else - *product_desc = 0; - } - - return status; -} - -/* - * __vxge_hw_vpath_fw_ver_get - Get the fw version - * Returns FW Version - */ -static enum vxge_hw_status -__vxge_hw_vpath_fw_ver_get( - u32 vp_id, - struct vxge_hw_vpath_reg __iomem *vpath_reg, - struct vxge_hw_device_hw_info *hw_info) -{ - u64 val64; - u64 data1 = 0ULL; - u64 data2 = 0ULL; - struct vxge_hw_device_version *fw_version = &hw_info->fw_version; - struct vxge_hw_device_date *fw_date = &hw_info->fw_date; - struct vxge_hw_device_version *flash_version = &hw_info->flash_version; - struct vxge_hw_device_date *flash_date = &hw_info->flash_date; - enum vxge_hw_status status = VXGE_HW_OK; - - val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION( - VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY) | - VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL( - VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) | - VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE | - VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0); - - status = __vxge_hw_pio_mem_write64(val64, - &vpath_reg->rts_access_steer_ctrl, - VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE, - VXGE_HW_DEF_DEVICE_POLL_MILLIS); - - if (status != VXGE_HW_OK) - goto exit; - - val64 = readq(&vpath_reg->rts_access_steer_ctrl); - - if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) { - - data1 = readq(&vpath_reg->rts_access_steer_data0); - data2 = readq(&vpath_reg->rts_access_steer_data1); - - fw_date->day = - (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY( - data1); - fw_date->month = - (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH( - data1); - fw_date->year = - (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR( - data1); - - snprintf(fw_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d", - fw_date->month, fw_date->day, fw_date->year); - - fw_version->major = - (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data1); - fw_version->minor = - (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data1); - fw_version->build = - (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data1); - - snprintf(fw_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d", - fw_version->major, fw_version->minor, fw_version->build); - - flash_date->day = - (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(data2); - flash_date->month = - (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(data2); - flash_date->year = - (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(data2); - - snprintf(flash_date->date, VXGE_HW_FW_STRLEN, - "%2.2d/%2.2d/%4.4d", - flash_date->month, flash_date->day, flash_date->year); - - flash_version->major = - (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(data2); - flash_version->minor = - (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(data2); - flash_version->build = - (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(data2); - - snprintf(flash_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d", - flash_version->major, flash_version->minor, - flash_version->build); - - status = VXGE_HW_OK; - - } else - status = VXGE_HW_FAIL; -exit: - return status; -} - -/* - * __vxge_hw_vpath_pci_func_mode_get - Get the pci mode - * Returns pci function mode - */ -static u64 -__vxge_hw_vpath_pci_func_mode_get( - u32 vp_id, - struct vxge_hw_vpath_reg __iomem *vpath_reg) -{ - u64 val64; - u64 data1 = 0ULL; - enum vxge_hw_status status = VXGE_HW_OK; - - __vxge_hw_read_rts_ds(vpath_reg, - VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PCI_MODE); - - val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION( - VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) | - VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL( - VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) | - VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE | - VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0); - - status = __vxge_hw_pio_mem_write64(val64, - &vpath_reg->rts_access_steer_ctrl, - VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE, - VXGE_HW_DEF_DEVICE_POLL_MILLIS); - - if (status != VXGE_HW_OK) - goto exit; - - val64 = readq(&vpath_reg->rts_access_steer_ctrl); - - if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) { - data1 = readq(&vpath_reg->rts_access_steer_data0); - status = VXGE_HW_OK; - } else { - data1 = 0; - status = VXGE_HW_FAIL; - } -exit: - return data1; -} - /** * vxge_hw_device_flick_link_led - Flick (blink) link LED. * @hldev: HW device. @@ -2974,37 +3636,24 @@ exit: * Flicker the link LED. */ enum vxge_hw_status -vxge_hw_device_flick_link_led(struct __vxge_hw_device *hldev, - u64 on_off) +vxge_hw_device_flick_link_led(struct __vxge_hw_device *hldev, u64 on_off) { - u64 val64; - enum vxge_hw_status status = VXGE_HW_OK; - struct vxge_hw_vpath_reg __iomem *vp_reg; + struct __vxge_hw_virtualpath *vpath; + u64 data0, data1 = 0, steer_ctrl = 0; + enum vxge_hw_status status; if (hldev == NULL) { status = VXGE_HW_ERR_INVALID_DEVICE; goto exit; } - vp_reg = hldev->vpath_reg[hldev->first_vp_id]; + vpath = &hldev->virtual_paths[hldev->first_vp_id]; - writeq(0, &vp_reg->rts_access_steer_ctrl); - wmb(); - writeq(on_off, &vp_reg->rts_access_steer_data0); - writeq(0, &vp_reg->rts_access_steer_data1); - wmb(); - - val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION( - VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LED_CONTROL) | - VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL( - VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) | - VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE | - VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0); - - status = __vxge_hw_pio_mem_write64(val64, - &vp_reg->rts_access_steer_ctrl, - VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE, - VXGE_HW_DEF_DEVICE_POLL_MILLIS); + data0 = on_off; + status = vxge_hw_vpath_fw_api(vpath, + VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LED_CONTROL, + VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO, + 0, &data0, &data1, &steer_ctrl); exit: return status; } @@ -3013,63 +3662,38 @@ exit: * __vxge_hw_vpath_rts_table_get - Get the entries from RTS access tables */ enum vxge_hw_status -__vxge_hw_vpath_rts_table_get( - struct __vxge_hw_vpath_handle *vp, - u32 action, u32 rts_table, u32 offset, u64 *data1, u64 *data2) +__vxge_hw_vpath_rts_table_get(struct __vxge_hw_vpath_handle *vp, + u32 action, u32 rts_table, u32 offset, + u64 *data0, u64 *data1) { - u64 val64; - struct __vxge_hw_virtualpath *vpath; - struct vxge_hw_vpath_reg __iomem *vp_reg; - - enum vxge_hw_status status = VXGE_HW_OK; + enum vxge_hw_status status; + u64 steer_ctrl = 0; if (vp == NULL) { status = VXGE_HW_ERR_INVALID_HANDLE; goto exit; } - vpath = vp->vpath; - vp_reg = vpath->vp_reg; - - val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) | - VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(rts_table) | - VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE | - VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset); - if ((rts_table == - VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT) || + VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT) || (rts_table == - VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT) || + VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT) || (rts_table == - VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK) || + VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK) || (rts_table == - VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY)) { - val64 = val64 | VXGE_HW_RTS_ACCESS_STEER_CTRL_TABLE_SEL; + VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY)) { + steer_ctrl = VXGE_HW_RTS_ACCESS_STEER_CTRL_TABLE_SEL; } - status = __vxge_hw_pio_mem_write64(val64, - &vp_reg->rts_access_steer_ctrl, - VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE, - vpath->hldev->config.device_poll_millis); - + status = vxge_hw_vpath_fw_api(vp->vpath, action, rts_table, offset, + data0, data1, &steer_ctrl); if (status != VXGE_HW_OK) goto exit; - val64 = readq(&vp_reg->rts_access_steer_ctrl); - - if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) { - - *data1 = readq(&vp_reg->rts_access_steer_data0); - - if ((rts_table == - VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) || - (rts_table == - VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT)) { - *data2 = readq(&vp_reg->rts_access_steer_data1); - } - status = VXGE_HW_OK; - } else - status = VXGE_HW_FAIL; + if ((rts_table != VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) || + (rts_table != + VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT)) + *data1 = 0; exit: return status; } @@ -3078,107 +3702,27 @@ exit: * __vxge_hw_vpath_rts_table_set - Set the entries of RTS access tables */ enum vxge_hw_status -__vxge_hw_vpath_rts_table_set( - struct __vxge_hw_vpath_handle *vp, u32 action, u32 rts_table, - u32 offset, u64 data1, u64 data2) +__vxge_hw_vpath_rts_table_set(struct __vxge_hw_vpath_handle *vp, u32 action, + u32 rts_table, u32 offset, u64 steer_data0, + u64 steer_data1) { - u64 val64; - struct __vxge_hw_virtualpath *vpath; - enum vxge_hw_status status = VXGE_HW_OK; - struct vxge_hw_vpath_reg __iomem *vp_reg; + u64 data0, data1 = 0, steer_ctrl = 0; + enum vxge_hw_status status; if (vp == NULL) { status = VXGE_HW_ERR_INVALID_HANDLE; goto exit; } - vpath = vp->vpath; - vp_reg = vpath->vp_reg; - - writeq(data1, &vp_reg->rts_access_steer_data0); - wmb(); + data0 = steer_data0; if ((rts_table == VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) || (rts_table == - VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT)) { - writeq(data2, &vp_reg->rts_access_steer_data1); - wmb(); - } - - val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) | - VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(rts_table) | - VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE | - VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset); - - status = __vxge_hw_pio_mem_write64(val64, - &vp_reg->rts_access_steer_ctrl, - VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE, - vpath->hldev->config.device_poll_millis); - - if (status != VXGE_HW_OK) - goto exit; - - val64 = readq(&vp_reg->rts_access_steer_ctrl); - - if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) - status = VXGE_HW_OK; - else - status = VXGE_HW_FAIL; -exit: - return status; -} - -/* - * __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath - * from MAC address table. - */ -static enum vxge_hw_status -__vxge_hw_vpath_addr_get( - u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg, - u8 (macaddr)[ETH_ALEN], u8 (macaddr_mask)[ETH_ALEN]) -{ - u32 i; - u64 val64; - u64 data1 = 0ULL; - u64 data2 = 0ULL; - enum vxge_hw_status status = VXGE_HW_OK; - - val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION( - VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY) | - VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL( - VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) | - VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE | - VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0); - - status = __vxge_hw_pio_mem_write64(val64, - &vpath_reg->rts_access_steer_ctrl, - VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE, - VXGE_HW_DEF_DEVICE_POLL_MILLIS); - - if (status != VXGE_HW_OK) - goto exit; - - val64 = readq(&vpath_reg->rts_access_steer_ctrl); - - if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) { + VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT)) + data1 = steer_data1; - data1 = readq(&vpath_reg->rts_access_steer_data0); - data2 = readq(&vpath_reg->rts_access_steer_data1); - - data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1); - data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK( - data2); - - for (i = ETH_ALEN; i > 0; i--) { - macaddr[i-1] = (u8)(data1 & 0xFF); - data1 >>= 8; - - macaddr_mask[i-1] = (u8)(data2 & 0xFF); - data2 >>= 8; - } - status = VXGE_HW_OK; - } else - status = VXGE_HW_FAIL; + status = vxge_hw_vpath_fw_api(vp->vpath, action, rts_table, offset, + &data0, &data1, &steer_ctrl); exit: return status; } @@ -3204,6 +3748,8 @@ enum vxge_hw_status vxge_hw_vpath_rts_rth_set( VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY, VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG, 0, &data0, &data1); + if (status != VXGE_HW_OK) + goto exit; data0 &= ~(VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(0xf) | VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(0x3)); @@ -3771,10 +4317,10 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id) vp_reg = vpath->vp_reg; config = vpath->vp_config; - writeq((u64)0, &vp_reg->tim_dest_addr); - writeq((u64)0, &vp_reg->tim_vpath_map); - writeq((u64)0, &vp_reg->tim_bitmap); - writeq((u64)0, &vp_reg->tim_remap); + writeq(0, &vp_reg->tim_dest_addr); + writeq(0, &vp_reg->tim_vpath_map); + writeq(0, &vp_reg->tim_bitmap); + writeq(0, &vp_reg->tim_remap); if (config->ring.enable == VXGE_HW_RING_ENABLE) writeq(VXGE_HW_TIM_RING_ASSN_INT_NUM( @@ -3876,8 +4422,7 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id) if (config->tti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) { val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f); - val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL( - config->tti.util_sel); + val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(vp_id); } if (config->tti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) { @@ -3981,8 +4526,7 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id) if (config->rti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) { val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f); - val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL( - config->rti.util_sel); + val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(vp_id); } if (config->rti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) { @@ -4003,11 +4547,15 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id) writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_BMAP]); writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_BMAP]); + val64 = VXGE_HW_TIM_WRKLD_CLC_WRKLD_EVAL_PRD(150); + val64 |= VXGE_HW_TIM_WRKLD_CLC_WRKLD_EVAL_DIV(0); + val64 |= VXGE_HW_TIM_WRKLD_CLC_CNT_RX_TX(3); + writeq(val64, &vp_reg->tim_wrkld_clc); + return status; } -void -vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id) +void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id) { struct __vxge_hw_virtualpath *vpath; struct vxge_hw_vpath_reg __iomem *vp_reg; @@ -4018,17 +4566,15 @@ vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id) vp_reg = vpath->vp_reg; config = vpath->vp_config; - if (config->fifo.enable == VXGE_HW_FIFO_ENABLE) { + if (config->fifo.enable == VXGE_HW_FIFO_ENABLE && + config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) { + config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE; val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]); - - if (config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) { - config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE; - val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI; - writeq(val64, - &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]); - } + val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI; + writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]); } } + /* * __vxge_hw_vpath_initialize * This routine is the final phase of init which initializes the @@ -4052,22 +4598,18 @@ __vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id) vp_reg = vpath->vp_reg; status = __vxge_hw_vpath_swapper_set(vpath->vp_reg); - if (status != VXGE_HW_OK) goto exit; status = __vxge_hw_vpath_mac_configure(hldev, vp_id); - if (status != VXGE_HW_OK) goto exit; status = __vxge_hw_vpath_kdfc_configure(hldev, vp_id); - if (status != VXGE_HW_OK) goto exit; status = __vxge_hw_vpath_tim_configure(hldev, vp_id); - if (status != VXGE_HW_OK) goto exit; @@ -4075,7 +4617,6 @@ __vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id) /* Get MRRS value from device control */ status = __vxge_hw_vpath_pci_read(vpath, 1, 0x78, &val32); - if (status == VXGE_HW_OK) { val32 = (val32 & VXGE_HW_PCI_EXP_DEVCTL_READRQ) >> 12; val64 &= @@ -4099,6 +4640,28 @@ exit: } /* + * __vxge_hw_vp_terminate - Terminate Virtual Path structure + * This routine closes all channels it opened and freeup memory + */ +static void __vxge_hw_vp_terminate(struct __vxge_hw_device *hldev, u32 vp_id) +{ + struct __vxge_hw_virtualpath *vpath; + + vpath = &hldev->virtual_paths[vp_id]; + + if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) + goto exit; + + VXGE_HW_DEVICE_TIM_INT_MASK_RESET(vpath->hldev->tim_int_mask0, + vpath->hldev->tim_int_mask1, vpath->vp_id); + hldev->stats.hw_dev_info_stats.vpath_info[vpath->vp_id] = NULL; + + memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath)); +exit: + return; +} + +/* * __vxge_hw_vp_initialize - Initialize Virtual Path structure * This routine is the initial phase of init which resets the vpath and * initializes the software support structures. @@ -4117,6 +4680,7 @@ __vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id, vpath = &hldev->virtual_paths[vp_id]; + spin_lock_init(&hldev->virtual_paths[vp_id].lock); vpath->vp_id = vp_id; vpath->vp_open = VXGE_HW_VP_OPEN; vpath->hldev = hldev; @@ -4127,14 +4691,12 @@ __vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id, __vxge_hw_vpath_reset(hldev, vp_id); status = __vxge_hw_vpath_reset_check(vpath); - if (status != VXGE_HW_OK) { memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath)); goto exit; } status = __vxge_hw_vpath_mgmt_read(hldev, vpath); - if (status != VXGE_HW_OK) { memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath)); goto exit; @@ -4148,7 +4710,6 @@ __vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id, hldev->tim_int_mask1, vp_id); status = __vxge_hw_vpath_initialize(hldev, vp_id); - if (status != VXGE_HW_OK) __vxge_hw_vp_terminate(hldev, vp_id); exit: @@ -4156,29 +4717,6 @@ exit: } /* - * __vxge_hw_vp_terminate - Terminate Virtual Path structure - * This routine closes all channels it opened and freeup memory - */ -static void -__vxge_hw_vp_terminate(struct __vxge_hw_device *hldev, u32 vp_id) -{ - struct __vxge_hw_virtualpath *vpath; - - vpath = &hldev->virtual_paths[vp_id]; - - if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) - goto exit; - - VXGE_HW_DEVICE_TIM_INT_MASK_RESET(vpath->hldev->tim_int_mask0, - vpath->hldev->tim_int_mask1, vpath->vp_id); - hldev->stats.hw_dev_info_stats.vpath_info[vpath->vp_id] = NULL; - - memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath)); -exit: - return; -} - -/* * vxge_hw_vpath_mtu_set - Set MTU. * Set new MTU value. Example, to use jumbo frames: * vxge_hw_vpath_mtu_set(my_device, 9600); @@ -4215,6 +4753,64 @@ exit: } /* + * vxge_hw_vpath_stats_enable - Enable vpath h/wstatistics. + * Enable the DMA vpath statistics. The function is to be called to re-enable + * the adapter to update stats into the host memory + */ +static enum vxge_hw_status +vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vp) +{ + enum vxge_hw_status status = VXGE_HW_OK; + struct __vxge_hw_virtualpath *vpath; + + vpath = vp->vpath; + + if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { + status = VXGE_HW_ERR_VPATH_NOT_OPEN; + goto exit; + } + + memcpy(vpath->hw_stats_sav, vpath->hw_stats, + sizeof(struct vxge_hw_vpath_stats_hw_info)); + + status = __vxge_hw_vpath_stats_get(vpath, vpath->hw_stats); +exit: + return status; +} + +/* + * __vxge_hw_blockpool_block_allocate - Allocates a block from block pool + * This function allocates a block from block pool or from the system + */ +static struct __vxge_hw_blockpool_entry * +__vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *devh, u32 size) +{ + struct __vxge_hw_blockpool_entry *entry = NULL; + struct __vxge_hw_blockpool *blockpool; + + blockpool = &devh->block_pool; + + if (size == blockpool->block_size) { + + if (!list_empty(&blockpool->free_block_list)) + entry = (struct __vxge_hw_blockpool_entry *) + list_first_entry(&blockpool->free_block_list, + struct __vxge_hw_blockpool_entry, + item); + + if (entry != NULL) { + list_del(&entry->item); + blockpool->pool_size--; + } + } + + if (entry != NULL) + __vxge_hw_blockpool_blocks_add(blockpool); + + return entry; +} + +/* * vxge_hw_vpath_open - Open a virtual path on a given adapter * This function is used to open access to virtual path of an * adapter for offload, GRO operations. This function returns @@ -4238,19 +4834,15 @@ vxge_hw_vpath_open(struct __vxge_hw_device *hldev, status = __vxge_hw_vp_initialize(hldev, attr->vp_id, &hldev->config.vp_config[attr->vp_id]); - if (status != VXGE_HW_OK) goto vpath_open_exit1; - vp = (struct __vxge_hw_vpath_handle *) - vmalloc(sizeof(struct __vxge_hw_vpath_handle)); + vp = vzalloc(sizeof(struct __vxge_hw_vpath_handle)); if (vp == NULL) { status = VXGE_HW_ERR_OUT_OF_MEMORY; goto vpath_open_exit2; } - memset(vp, 0, sizeof(struct __vxge_hw_vpath_handle)); - vp->vpath = vpath; if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) { @@ -4273,7 +4865,6 @@ vxge_hw_vpath_open(struct __vxge_hw_device *hldev, vpath->stats_block = __vxge_hw_blockpool_block_allocate(hldev, VXGE_HW_BLOCK_SIZE); - if (vpath->stats_block == NULL) { status = VXGE_HW_ERR_OUT_OF_MEMORY; goto vpath_open_exit8; @@ -4332,19 +4923,20 @@ vpath_open_exit1: * This function is used to close access to virtual path opened * earlier. */ -void -vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp) +void vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp) { - struct __vxge_hw_virtualpath *vpath = NULL; + struct __vxge_hw_virtualpath *vpath = vp->vpath; + struct __vxge_hw_ring *ring = vpath->ringh; + struct vxgedev *vdev = netdev_priv(vpath->hldev->ndev); u64 new_count, val64, val164; - struct __vxge_hw_ring *ring; - vpath = vp->vpath; - ring = vpath->ringh; + if (vdev->titan1) { + new_count = readq(&vpath->vp_reg->rxdmem_size); + new_count &= 0x1fff; + } else + new_count = ring->config->ring_blocks * VXGE_HW_BLOCK_SIZE / 8; - new_count = readq(&vpath->vp_reg->rxdmem_size); - new_count &= 0x1fff; - val164 = (VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(new_count)); + val164 = VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(new_count); writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val164), &vpath->vp_reg->prc_rxd_doorbell); @@ -4367,6 +4959,29 @@ vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp) } /* + * __vxge_hw_blockpool_block_free - Frees a block from block pool + * @devh: Hal device + * @entry: Entry of block to be freed + * + * This function frees a block from block pool + */ +static void +__vxge_hw_blockpool_block_free(struct __vxge_hw_device *devh, + struct __vxge_hw_blockpool_entry *entry) +{ + struct __vxge_hw_blockpool *blockpool; + + blockpool = &devh->block_pool; + + if (entry->length == blockpool->block_size) { + list_add(&entry->item, &blockpool->free_block_list); + blockpool->pool_size++; + } + + __vxge_hw_blockpool_blocks_remove(blockpool); +} + +/* * vxge_hw_vpath_close - Close the handle got from previous vpath (vpath) open * This function is used to close access to virtual path opened * earlier. @@ -4414,7 +5029,9 @@ enum vxge_hw_status vxge_hw_vpath_close(struct __vxge_hw_vpath_handle *vp) __vxge_hw_vp_terminate(devh, vp_id); + spin_lock(&vpath->lock); vpath->vp_open = VXGE_HW_VP_NOT_OPEN; + spin_unlock(&vpath->lock); vpath_close_exit: return status; @@ -4515,730 +5132,3 @@ vxge_hw_vpath_enable(struct __vxge_hw_vpath_handle *vp) __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), &hldev->common_reg->cmn_rsthdlr_cfg1); } - -/* - * vxge_hw_vpath_stats_enable - Enable vpath h/wstatistics. - * Enable the DMA vpath statistics. The function is to be called to re-enable - * the adapter to update stats into the host memory - */ -static enum vxge_hw_status -vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vp) -{ - enum vxge_hw_status status = VXGE_HW_OK; - struct __vxge_hw_virtualpath *vpath; - - vpath = vp->vpath; - - if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { - status = VXGE_HW_ERR_VPATH_NOT_OPEN; - goto exit; - } - - memcpy(vpath->hw_stats_sav, vpath->hw_stats, - sizeof(struct vxge_hw_vpath_stats_hw_info)); - - status = __vxge_hw_vpath_stats_get(vpath, vpath->hw_stats); -exit: - return status; -} - -/* - * __vxge_hw_vpath_stats_access - Get the statistics from the given location - * and offset and perform an operation - */ -static enum vxge_hw_status -__vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath, - u32 operation, u32 offset, u64 *stat) -{ - u64 val64; - enum vxge_hw_status status = VXGE_HW_OK; - struct vxge_hw_vpath_reg __iomem *vp_reg; - - if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { - status = VXGE_HW_ERR_VPATH_NOT_OPEN; - goto vpath_stats_access_exit; - } - - vp_reg = vpath->vp_reg; - - val64 = VXGE_HW_XMAC_STATS_ACCESS_CMD_OP(operation) | - VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE | - VXGE_HW_XMAC_STATS_ACCESS_CMD_OFFSET_SEL(offset); - - status = __vxge_hw_pio_mem_write64(val64, - &vp_reg->xmac_stats_access_cmd, - VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE, - vpath->hldev->config.device_poll_millis); - - if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ)) - *stat = readq(&vp_reg->xmac_stats_access_data); - else - *stat = 0; - -vpath_stats_access_exit: - return status; -} - -/* - * __vxge_hw_vpath_xmac_tx_stats_get - Get the TX Statistics of a vpath - */ -static enum vxge_hw_status -__vxge_hw_vpath_xmac_tx_stats_get( - struct __vxge_hw_virtualpath *vpath, - struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats) -{ - u64 *val64; - int i; - u32 offset = VXGE_HW_STATS_VPATH_TX_OFFSET; - enum vxge_hw_status status = VXGE_HW_OK; - - val64 = (u64 *) vpath_tx_stats; - - if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { - status = VXGE_HW_ERR_VPATH_NOT_OPEN; - goto exit; - } - - for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_tx_stats) / 8; i++) { - status = __vxge_hw_vpath_stats_access(vpath, - VXGE_HW_STATS_OP_READ, - offset, val64); - if (status != VXGE_HW_OK) - goto exit; - offset++; - val64++; - } -exit: - return status; -} - -/* - * __vxge_hw_vpath_xmac_rx_stats_get - Get the RX Statistics of a vpath - */ -static enum vxge_hw_status -__vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath, - struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats) -{ - u64 *val64; - enum vxge_hw_status status = VXGE_HW_OK; - int i; - u32 offset = VXGE_HW_STATS_VPATH_RX_OFFSET; - val64 = (u64 *) vpath_rx_stats; - - if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { - status = VXGE_HW_ERR_VPATH_NOT_OPEN; - goto exit; - } - for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_rx_stats) / 8; i++) { - status = __vxge_hw_vpath_stats_access(vpath, - VXGE_HW_STATS_OP_READ, - offset >> 3, val64); - if (status != VXGE_HW_OK) - goto exit; - - offset += 8; - val64++; - } -exit: - return status; -} - -/* - * __vxge_hw_vpath_stats_get - Get the vpath hw statistics. - */ -static enum vxge_hw_status -__vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath, - struct vxge_hw_vpath_stats_hw_info *hw_stats) -{ - u64 val64; - enum vxge_hw_status status = VXGE_HW_OK; - struct vxge_hw_vpath_reg __iomem *vp_reg; - - if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { - status = VXGE_HW_ERR_VPATH_NOT_OPEN; - goto exit; - } - vp_reg = vpath->vp_reg; - - val64 = readq(&vp_reg->vpath_debug_stats0); - hw_stats->ini_num_mwr_sent = - (u32)VXGE_HW_VPATH_DEBUG_STATS0_GET_INI_NUM_MWR_SENT(val64); - - val64 = readq(&vp_reg->vpath_debug_stats1); - hw_stats->ini_num_mrd_sent = - (u32)VXGE_HW_VPATH_DEBUG_STATS1_GET_INI_NUM_MRD_SENT(val64); - - val64 = readq(&vp_reg->vpath_debug_stats2); - hw_stats->ini_num_cpl_rcvd = - (u32)VXGE_HW_VPATH_DEBUG_STATS2_GET_INI_NUM_CPL_RCVD(val64); - - val64 = readq(&vp_reg->vpath_debug_stats3); - hw_stats->ini_num_mwr_byte_sent = - VXGE_HW_VPATH_DEBUG_STATS3_GET_INI_NUM_MWR_BYTE_SENT(val64); - - val64 = readq(&vp_reg->vpath_debug_stats4); - hw_stats->ini_num_cpl_byte_rcvd = - VXGE_HW_VPATH_DEBUG_STATS4_GET_INI_NUM_CPL_BYTE_RCVD(val64); - - val64 = readq(&vp_reg->vpath_debug_stats5); - hw_stats->wrcrdtarb_xoff = - (u32)VXGE_HW_VPATH_DEBUG_STATS5_GET_WRCRDTARB_XOFF(val64); - - val64 = readq(&vp_reg->vpath_debug_stats6); - hw_stats->rdcrdtarb_xoff = - (u32)VXGE_HW_VPATH_DEBUG_STATS6_GET_RDCRDTARB_XOFF(val64); - - val64 = readq(&vp_reg->vpath_genstats_count01); - hw_stats->vpath_genstats_count0 = - (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT0( - val64); - - val64 = readq(&vp_reg->vpath_genstats_count01); - hw_stats->vpath_genstats_count1 = - (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT1( - val64); - - val64 = readq(&vp_reg->vpath_genstats_count23); - hw_stats->vpath_genstats_count2 = - (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT2( - val64); - - val64 = readq(&vp_reg->vpath_genstats_count01); - hw_stats->vpath_genstats_count3 = - (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT3( - val64); - - val64 = readq(&vp_reg->vpath_genstats_count4); - hw_stats->vpath_genstats_count4 = - (u32)VXGE_HW_VPATH_GENSTATS_COUNT4_GET_PPIF_VPATH_GENSTATS_COUNT4( - val64); - - val64 = readq(&vp_reg->vpath_genstats_count5); - hw_stats->vpath_genstats_count5 = - (u32)VXGE_HW_VPATH_GENSTATS_COUNT5_GET_PPIF_VPATH_GENSTATS_COUNT5( - val64); - - status = __vxge_hw_vpath_xmac_tx_stats_get(vpath, &hw_stats->tx_stats); - if (status != VXGE_HW_OK) - goto exit; - - status = __vxge_hw_vpath_xmac_rx_stats_get(vpath, &hw_stats->rx_stats); - if (status != VXGE_HW_OK) - goto exit; - - VXGE_HW_VPATH_STATS_PIO_READ( - VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM0_OFFSET); - - hw_stats->prog_event_vnum0 = - (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM0(val64); - - hw_stats->prog_event_vnum1 = - (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM1(val64); - - VXGE_HW_VPATH_STATS_PIO_READ( - VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM2_OFFSET); - - hw_stats->prog_event_vnum2 = - (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM2(val64); - - hw_stats->prog_event_vnum3 = - (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM3(val64); - - val64 = readq(&vp_reg->rx_multi_cast_stats); - hw_stats->rx_multi_cast_frame_discard = - (u16)VXGE_HW_RX_MULTI_CAST_STATS_GET_FRAME_DISCARD(val64); - - val64 = readq(&vp_reg->rx_frm_transferred); - hw_stats->rx_frm_transferred = - (u32)VXGE_HW_RX_FRM_TRANSFERRED_GET_RX_FRM_TRANSFERRED(val64); - - val64 = readq(&vp_reg->rxd_returned); - hw_stats->rxd_returned = - (u16)VXGE_HW_RXD_RETURNED_GET_RXD_RETURNED(val64); - - val64 = readq(&vp_reg->dbg_stats_rx_mpa); - hw_stats->rx_mpa_len_fail_frms = - (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_LEN_FAIL_FRMS(val64); - hw_stats->rx_mpa_mrk_fail_frms = - (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_MRK_FAIL_FRMS(val64); - hw_stats->rx_mpa_crc_fail_frms = - (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_CRC_FAIL_FRMS(val64); - - val64 = readq(&vp_reg->dbg_stats_rx_fau); - hw_stats->rx_permitted_frms = - (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_PERMITTED_FRMS(val64); - hw_stats->rx_vp_reset_discarded_frms = - (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(val64); - hw_stats->rx_wol_frms = - (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_WOL_FRMS(val64); - - val64 = readq(&vp_reg->tx_vp_reset_discarded_frms); - hw_stats->tx_vp_reset_discarded_frms = - (u16)VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_GET_TX_VP_RESET_DISCARDED_FRMS( - val64); -exit: - return status; -} - - -static void vxge_os_dma_malloc_async(struct pci_dev *pdev, void *devh, - unsigned long size) -{ - gfp_t flags; - void *vaddr; - - if (in_interrupt()) - flags = GFP_ATOMIC | GFP_DMA; - else - flags = GFP_KERNEL | GFP_DMA; - - vaddr = kmalloc((size), flags); - - vxge_hw_blockpool_block_add(devh, vaddr, size, pdev, pdev); -} - -static void vxge_os_dma_free(struct pci_dev *pdev, const void *vaddr, - struct pci_dev **p_dma_acch) -{ - unsigned long misaligned = *(unsigned long *)p_dma_acch; - u8 *tmp = (u8 *)vaddr; - tmp -= misaligned; - kfree((void *)tmp); -} - -/* - * __vxge_hw_blockpool_create - Create block pool - */ - -enum vxge_hw_status -__vxge_hw_blockpool_create(struct __vxge_hw_device *hldev, - struct __vxge_hw_blockpool *blockpool, - u32 pool_size, - u32 pool_max) -{ - u32 i; - struct __vxge_hw_blockpool_entry *entry = NULL; - void *memblock; - dma_addr_t dma_addr; - struct pci_dev *dma_handle; - struct pci_dev *acc_handle; - enum vxge_hw_status status = VXGE_HW_OK; - - if (blockpool == NULL) { - status = VXGE_HW_FAIL; - goto blockpool_create_exit; - } - - blockpool->hldev = hldev; - blockpool->block_size = VXGE_HW_BLOCK_SIZE; - blockpool->pool_size = 0; - blockpool->pool_max = pool_max; - blockpool->req_out = 0; - - INIT_LIST_HEAD(&blockpool->free_block_list); - INIT_LIST_HEAD(&blockpool->free_entry_list); - - for (i = 0; i < pool_size + pool_max; i++) { - entry = kzalloc(sizeof(struct __vxge_hw_blockpool_entry), - GFP_KERNEL); - if (entry == NULL) { - __vxge_hw_blockpool_destroy(blockpool); - status = VXGE_HW_ERR_OUT_OF_MEMORY; - goto blockpool_create_exit; - } - list_add(&entry->item, &blockpool->free_entry_list); - } - - for (i = 0; i < pool_size; i++) { - - memblock = vxge_os_dma_malloc( - hldev->pdev, - VXGE_HW_BLOCK_SIZE, - &dma_handle, - &acc_handle); - - if (memblock == NULL) { - __vxge_hw_blockpool_destroy(blockpool); - status = VXGE_HW_ERR_OUT_OF_MEMORY; - goto blockpool_create_exit; - } - - dma_addr = pci_map_single(hldev->pdev, memblock, - VXGE_HW_BLOCK_SIZE, PCI_DMA_BIDIRECTIONAL); - - if (unlikely(pci_dma_mapping_error(hldev->pdev, - dma_addr))) { - - vxge_os_dma_free(hldev->pdev, memblock, &acc_handle); - __vxge_hw_blockpool_destroy(blockpool); - status = VXGE_HW_ERR_OUT_OF_MEMORY; - goto blockpool_create_exit; - } - - if (!list_empty(&blockpool->free_entry_list)) - entry = (struct __vxge_hw_blockpool_entry *) - list_first_entry(&blockpool->free_entry_list, - struct __vxge_hw_blockpool_entry, - item); - - if (entry == NULL) - entry = - kzalloc(sizeof(struct __vxge_hw_blockpool_entry), - GFP_KERNEL); - if (entry != NULL) { - list_del(&entry->item); - entry->length = VXGE_HW_BLOCK_SIZE; - entry->memblock = memblock; - entry->dma_addr = dma_addr; - entry->acc_handle = acc_handle; - entry->dma_handle = dma_handle; - list_add(&entry->item, - &blockpool->free_block_list); - blockpool->pool_size++; - } else { - __vxge_hw_blockpool_destroy(blockpool); - status = VXGE_HW_ERR_OUT_OF_MEMORY; - goto blockpool_create_exit; - } - } - -blockpool_create_exit: - return status; -} - -/* - * __vxge_hw_blockpool_destroy - Deallocates the block pool - */ - -void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool) -{ - - struct __vxge_hw_device *hldev; - struct list_head *p, *n; - u16 ret; - - if (blockpool == NULL) { - ret = 1; - goto exit; - } - - hldev = blockpool->hldev; - - list_for_each_safe(p, n, &blockpool->free_block_list) { - - pci_unmap_single(hldev->pdev, - ((struct __vxge_hw_blockpool_entry *)p)->dma_addr, - ((struct __vxge_hw_blockpool_entry *)p)->length, - PCI_DMA_BIDIRECTIONAL); - - vxge_os_dma_free(hldev->pdev, - ((struct __vxge_hw_blockpool_entry *)p)->memblock, - &((struct __vxge_hw_blockpool_entry *) p)->acc_handle); - - list_del( - &((struct __vxge_hw_blockpool_entry *)p)->item); - kfree(p); - blockpool->pool_size--; - } - - list_for_each_safe(p, n, &blockpool->free_entry_list) { - list_del( - &((struct __vxge_hw_blockpool_entry *)p)->item); - kfree((void *)p); - } - ret = 0; -exit: - return; -} - -/* - * __vxge_hw_blockpool_blocks_add - Request additional blocks - */ -static -void __vxge_hw_blockpool_blocks_add(struct __vxge_hw_blockpool *blockpool) -{ - u32 nreq = 0, i; - - if ((blockpool->pool_size + blockpool->req_out) < - VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE) { - nreq = VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE; - blockpool->req_out += nreq; - } - - for (i = 0; i < nreq; i++) - vxge_os_dma_malloc_async( - ((struct __vxge_hw_device *)blockpool->hldev)->pdev, - blockpool->hldev, VXGE_HW_BLOCK_SIZE); -} - -/* - * __vxge_hw_blockpool_blocks_remove - Free additional blocks - */ -static -void __vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool *blockpool) -{ - struct list_head *p, *n; - - list_for_each_safe(p, n, &blockpool->free_block_list) { - - if (blockpool->pool_size < blockpool->pool_max) - break; - - pci_unmap_single( - ((struct __vxge_hw_device *)blockpool->hldev)->pdev, - ((struct __vxge_hw_blockpool_entry *)p)->dma_addr, - ((struct __vxge_hw_blockpool_entry *)p)->length, - PCI_DMA_BIDIRECTIONAL); - - vxge_os_dma_free( - ((struct __vxge_hw_device *)blockpool->hldev)->pdev, - ((struct __vxge_hw_blockpool_entry *)p)->memblock, - &((struct __vxge_hw_blockpool_entry *)p)->acc_handle); - - list_del(&((struct __vxge_hw_blockpool_entry *)p)->item); - - list_add(p, &blockpool->free_entry_list); - - blockpool->pool_size--; - - } -} - -/* - * vxge_hw_blockpool_block_add - callback for vxge_os_dma_malloc_async - * Adds a block to block pool - */ -static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh, - void *block_addr, - u32 length, - struct pci_dev *dma_h, - struct pci_dev *acc_handle) -{ - struct __vxge_hw_blockpool *blockpool; - struct __vxge_hw_blockpool_entry *entry = NULL; - dma_addr_t dma_addr; - enum vxge_hw_status status = VXGE_HW_OK; - u32 req_out; - - blockpool = &devh->block_pool; - - if (block_addr == NULL) { - blockpool->req_out--; - status = VXGE_HW_FAIL; - goto exit; - } - - dma_addr = pci_map_single(devh->pdev, block_addr, length, - PCI_DMA_BIDIRECTIONAL); - - if (unlikely(pci_dma_mapping_error(devh->pdev, dma_addr))) { - - vxge_os_dma_free(devh->pdev, block_addr, &acc_handle); - blockpool->req_out--; - status = VXGE_HW_FAIL; - goto exit; - } - - - if (!list_empty(&blockpool->free_entry_list)) - entry = (struct __vxge_hw_blockpool_entry *) - list_first_entry(&blockpool->free_entry_list, - struct __vxge_hw_blockpool_entry, - item); - - if (entry == NULL) - entry = (struct __vxge_hw_blockpool_entry *) - vmalloc(sizeof(struct __vxge_hw_blockpool_entry)); - else - list_del(&entry->item); - - if (entry != NULL) { - entry->length = length; - entry->memblock = block_addr; - entry->dma_addr = dma_addr; - entry->acc_handle = acc_handle; - entry->dma_handle = dma_h; - list_add(&entry->item, &blockpool->free_block_list); - blockpool->pool_size++; - status = VXGE_HW_OK; - } else - status = VXGE_HW_ERR_OUT_OF_MEMORY; - - blockpool->req_out--; - - req_out = blockpool->req_out; -exit: - return; -} - -/* - * __vxge_hw_blockpool_malloc - Allocate a memory block from pool - * Allocates a block of memory of given size, either from block pool - * or by calling vxge_os_dma_malloc() - */ -void * -__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size, - struct vxge_hw_mempool_dma *dma_object) -{ - struct __vxge_hw_blockpool_entry *entry = NULL; - struct __vxge_hw_blockpool *blockpool; - void *memblock = NULL; - enum vxge_hw_status status = VXGE_HW_OK; - - blockpool = &devh->block_pool; - - if (size != blockpool->block_size) { - - memblock = vxge_os_dma_malloc(devh->pdev, size, - &dma_object->handle, - &dma_object->acc_handle); - - if (memblock == NULL) { - status = VXGE_HW_ERR_OUT_OF_MEMORY; - goto exit; - } - - dma_object->addr = pci_map_single(devh->pdev, memblock, size, - PCI_DMA_BIDIRECTIONAL); - - if (unlikely(pci_dma_mapping_error(devh->pdev, - dma_object->addr))) { - vxge_os_dma_free(devh->pdev, memblock, - &dma_object->acc_handle); - status = VXGE_HW_ERR_OUT_OF_MEMORY; - goto exit; - } - - } else { - - if (!list_empty(&blockpool->free_block_list)) - entry = (struct __vxge_hw_blockpool_entry *) - list_first_entry(&blockpool->free_block_list, - struct __vxge_hw_blockpool_entry, - item); - - if (entry != NULL) { - list_del(&entry->item); - dma_object->addr = entry->dma_addr; - dma_object->handle = entry->dma_handle; - dma_object->acc_handle = entry->acc_handle; - memblock = entry->memblock; - - list_add(&entry->item, - &blockpool->free_entry_list); - blockpool->pool_size--; - } - - if (memblock != NULL) - __vxge_hw_blockpool_blocks_add(blockpool); - } -exit: - return memblock; -} - -/* - * __vxge_hw_blockpool_free - Frees the memory allcoated with - __vxge_hw_blockpool_malloc - */ -void -__vxge_hw_blockpool_free(struct __vxge_hw_device *devh, - void *memblock, u32 size, - struct vxge_hw_mempool_dma *dma_object) -{ - struct __vxge_hw_blockpool_entry *entry = NULL; - struct __vxge_hw_blockpool *blockpool; - enum vxge_hw_status status = VXGE_HW_OK; - - blockpool = &devh->block_pool; - - if (size != blockpool->block_size) { - pci_unmap_single(devh->pdev, dma_object->addr, size, - PCI_DMA_BIDIRECTIONAL); - vxge_os_dma_free(devh->pdev, memblock, &dma_object->acc_handle); - } else { - - if (!list_empty(&blockpool->free_entry_list)) - entry = (struct __vxge_hw_blockpool_entry *) - list_first_entry(&blockpool->free_entry_list, - struct __vxge_hw_blockpool_entry, - item); - - if (entry == NULL) - entry = (struct __vxge_hw_blockpool_entry *) - vmalloc(sizeof( - struct __vxge_hw_blockpool_entry)); - else - list_del(&entry->item); - - if (entry != NULL) { - entry->length = size; - entry->memblock = memblock; - entry->dma_addr = dma_object->addr; - entry->acc_handle = dma_object->acc_handle; - entry->dma_handle = dma_object->handle; - list_add(&entry->item, - &blockpool->free_block_list); - blockpool->pool_size++; - status = VXGE_HW_OK; - } else - status = VXGE_HW_ERR_OUT_OF_MEMORY; - - if (status == VXGE_HW_OK) - __vxge_hw_blockpool_blocks_remove(blockpool); - } -} - -/* - * __vxge_hw_blockpool_block_allocate - Allocates a block from block pool - * This function allocates a block from block pool or from the system - */ -struct __vxge_hw_blockpool_entry * -__vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *devh, u32 size) -{ - struct __vxge_hw_blockpool_entry *entry = NULL; - struct __vxge_hw_blockpool *blockpool; - - blockpool = &devh->block_pool; - - if (size == blockpool->block_size) { - - if (!list_empty(&blockpool->free_block_list)) - entry = (struct __vxge_hw_blockpool_entry *) - list_first_entry(&blockpool->free_block_list, - struct __vxge_hw_blockpool_entry, - item); - - if (entry != NULL) { - list_del(&entry->item); - blockpool->pool_size--; - } - } - - if (entry != NULL) - __vxge_hw_blockpool_blocks_add(blockpool); - - return entry; -} - -/* - * __vxge_hw_blockpool_block_free - Frees a block from block pool - * @devh: Hal device - * @entry: Entry of block to be freed - * - * This function frees a block from block pool - */ -void -__vxge_hw_blockpool_block_free(struct __vxge_hw_device *devh, - struct __vxge_hw_blockpool_entry *entry) -{ - struct __vxge_hw_blockpool *blockpool; - - blockpool = &devh->block_pool; - - if (entry->length == blockpool->block_size) { - list_add(&entry->item, &blockpool->free_block_list); - blockpool->pool_size++; - } - - __vxge_hw_blockpool_blocks_remove(blockpool); -} diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h index 5c00861b6c2c..e249e288d160 100644 --- a/drivers/net/vxge/vxge-config.h +++ b/drivers/net/vxge/vxge-config.h @@ -20,13 +20,6 @@ #define VXGE_CACHE_LINE_SIZE 128 #endif -#define vxge_os_vaprintf(level, mask, fmt, ...) { \ - char buff[255]; \ - snprintf(buff, 255, fmt, __VA_ARGS__); \ - printk(buff); \ - printk("\n"); \ -} - #ifndef VXGE_ALIGN #define VXGE_ALIGN(adrs, size) \ (((size) - (((u64)adrs) & ((size)-1))) & ((size)-1)) @@ -36,8 +29,16 @@ #define VXGE_HW_MAX_MTU 9600 #define VXGE_HW_DEFAULT_MTU 1500 -#ifdef VXGE_DEBUG_ASSERT +#define VXGE_HW_MAX_ROM_IMAGES 8 +struct eprom_image { + u8 is_valid:1; + u8 index; + u8 type; + u16 version; +}; + +#ifdef VXGE_DEBUG_ASSERT /** * vxge_assert * @test: C-condition to check @@ -48,16 +49,13 @@ * compilation * time. */ -#define vxge_assert(test) { \ - if (!(test)) \ - vxge_os_bug("bad cond: "#test" at %s:%d\n", \ - __FILE__, __LINE__); } +#define vxge_assert(test) BUG_ON(!(test)) #else #define vxge_assert(test) #endif /* end of VXGE_DEBUG_ASSERT */ /** - * enum enum vxge_debug_level + * enum vxge_debug_level * @VXGE_NONE: debug disabled * @VXGE_ERR: all errors going to be logged out * @VXGE_TRACE: all errors plus all kind of verbose tracing print outs @@ -159,6 +157,47 @@ enum vxge_hw_device_link_state { }; /** + * enum enum vxge_hw_fw_upgrade_code - FW upgrade return codes. + * @VXGE_HW_FW_UPGRADE_OK: All OK send next 16 bytes + * @VXGE_HW_FW_UPGRADE_DONE: upload completed + * @VXGE_HW_FW_UPGRADE_ERR: upload error + * @VXGE_FW_UPGRADE_BYTES2SKIP: skip bytes in the stream + * + */ +enum vxge_hw_fw_upgrade_code { + VXGE_HW_FW_UPGRADE_OK = 0, + VXGE_HW_FW_UPGRADE_DONE = 1, + VXGE_HW_FW_UPGRADE_ERR = 2, + VXGE_FW_UPGRADE_BYTES2SKIP = 3 +}; + +/** + * enum enum vxge_hw_fw_upgrade_err_code - FW upgrade error codes. + * @VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1: corrupt data + * @VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW: buffer overflow + * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3: invalid .ncf file + * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4: invalid .ncf file + * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5: invalid .ncf file + * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6: invalid .ncf file + * @VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7: corrupt data + * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8: invalid .ncf file + * @VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN: generic error unknown type + * @VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH: failed to flash image check failed + */ +enum vxge_hw_fw_upgrade_err_code { + VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1 = 1, + VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW = 2, + VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3 = 3, + VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4 = 4, + VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5 = 5, + VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6 = 6, + VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7 = 7, + VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8 = 8, + VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN = 9, + VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH = 10 +}; + +/** * struct vxge_hw_device_date - Date Format * @day: Day * @month: Month @@ -275,9 +314,9 @@ struct vxge_hw_ring_config { #define VXGE_HW_RING_DEFAULT 1 u32 ring_blocks; -#define VXGE_HW_MIN_RING_BLOCKS 1 -#define VXGE_HW_MAX_RING_BLOCKS 128 -#define VXGE_HW_DEF_RING_BLOCKS 2 +#define VXGE_HW_MIN_RING_BLOCKS 1 +#define VXGE_HW_MAX_RING_BLOCKS 128 +#define VXGE_HW_DEF_RING_BLOCKS 2 u32 buffer_mode; #define VXGE_HW_RING_RXD_BUFFER_MODE_1 1 @@ -465,7 +504,6 @@ struct vxge_hw_device_config { * See also: vxge_hw_driver_initialize(). */ struct vxge_hw_uld_cbs { - void (*link_up)(struct __vxge_hw_device *devh); void (*link_down)(struct __vxge_hw_device *devh); void (*crit_err)(struct __vxge_hw_device *devh, @@ -652,6 +690,7 @@ struct __vxge_hw_virtualpath { struct vxge_hw_vpath_stats_hw_info *hw_stats; struct vxge_hw_vpath_stats_hw_info *hw_stats_sav; struct vxge_hw_vpath_stats_sw_info *sw_stats; + spinlock_t lock; }; /* @@ -661,7 +700,7 @@ struct __vxge_hw_virtualpath { * * This structure is used to store the callback information. */ -struct __vxge_hw_vpath_handle{ +struct __vxge_hw_vpath_handle { struct list_head item; struct __vxge_hw_virtualpath *vpath; }; @@ -674,9 +713,6 @@ struct __vxge_hw_vpath_handle{ /** * struct __vxge_hw_device - Hal device object * @magic: Magic Number - * @device_id: PCI Device Id of the adapter - * @major_revision: PCI Device major revision - * @minor_revision: PCI Device minor revision * @bar0: BAR0 virtual address. * @pdev: Physical device handle * @config: Confguration passed by the LL driver at initialization @@ -688,9 +724,6 @@ struct __vxge_hw_device { u32 magic; #define VXGE_HW_DEVICE_MAGIC 0x12345678 #define VXGE_HW_DEVICE_DEAD 0xDEADDEAD - u16 device_id; - u8 major_revision; - u8 minor_revision; void __iomem *bar0; struct pci_dev *pdev; struct net_device *ndev; @@ -731,6 +764,7 @@ struct __vxge_hw_device { u32 debug_level; u32 level_err; u32 level_trace; + u16 eprom_versions[VXGE_HW_MAX_ROM_IMAGES]; }; #define VXGE_HW_INFO_LEN 64 @@ -781,8 +815,8 @@ struct vxge_hw_device_hw_info { u8 serial_number[VXGE_HW_INFO_LEN]; u8 part_number[VXGE_HW_INFO_LEN]; u8 product_desc[VXGE_HW_INFO_LEN]; - u8 (mac_addrs)[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN]; - u8 (mac_addr_masks)[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN]; + u8 mac_addrs[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN]; + u8 mac_addr_masks[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN]; }; /** @@ -829,20 +863,10 @@ struct vxge_hw_device_attr { loc, \ offset, \ &val64); \ - \ if (status != VXGE_HW_OK) \ return status; \ } -#define VXGE_HW_VPATH_STATS_PIO_READ(offset) { \ - status = __vxge_hw_vpath_stats_access(vpath, \ - VXGE_HW_STATS_OP_READ, \ - offset, \ - &val64); \ - if (status != VXGE_HW_OK) \ - return status; \ -} - /* * struct __vxge_hw_ring - Ring channel. * @channel: Channel "base" of this ring, the common part of all HW @@ -1114,7 +1138,7 @@ struct __vxge_hw_non_offload_db_wrapper { * lookup to determine the transmit port. * 01: Send on physical Port1. * 10: Send on physical Port0. - * 11: Send on both ports. + * 11: Send on both ports. * Bits 18 to 21 - Reserved * Bits 22 to 23 - Gather_Code. This field is set by the host and * is used to describe how individual buffers comprise a frame. @@ -1413,12 +1437,12 @@ enum vxge_hw_rth_algoritms { * See also: vxge_hw_vpath_rts_rth_set(), vxge_hw_vpath_rts_rth_get(). */ struct vxge_hw_rth_hash_types { - u8 hash_type_tcpipv4_en; - u8 hash_type_ipv4_en; - u8 hash_type_tcpipv6_en; - u8 hash_type_ipv6_en; - u8 hash_type_tcpipv6ex_en; - u8 hash_type_ipv6ex_en; + u8 hash_type_tcpipv4_en:1, + hash_type_ipv4_en:1, + hash_type_tcpipv6_en:1, + hash_type_ipv6_en:1, + hash_type_tcpipv6ex_en:1, + hash_type_ipv6ex_en:1; }; void vxge_hw_device_debug_set( @@ -1893,6 +1917,15 @@ out: return vaddr; } +static inline void vxge_os_dma_free(struct pci_dev *pdev, const void *vaddr, + struct pci_dev **p_dma_acch) +{ + unsigned long misaligned = *(unsigned long *)p_dma_acch; + u8 *tmp = (u8 *)vaddr; + tmp -= misaligned; + kfree((void *)tmp); +} + /* * __vxge_hw_mempool_item_priv - will return pointer on per item private space */ @@ -1962,7 +1995,6 @@ enum vxge_hw_status vxge_hw_vpath_mtu_set( void vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp); - #ifndef readq static inline u64 readq(void __iomem *addr) { @@ -2000,7 +2032,7 @@ enum vxge_hw_status vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask); /** - * vxge_debug + * vxge_debug_ll * @level: level of debug verbosity. * @mask: mask for the debug * @buf: Circular buffer for tracing @@ -2012,26 +2044,13 @@ vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask); * may be compiled out if DEBUG macro was never defined. * See also: enum vxge_debug_level{}. */ - -#define vxge_trace_aux(level, mask, fmt, ...) \ -{\ - vxge_os_vaprintf(level, mask, fmt, __VA_ARGS__);\ -} - -#define vxge_debug(module, level, mask, fmt, ...) { \ -if ((level >= VXGE_TRACE && ((module & VXGE_DEBUG_TRACE_MASK) == module)) || \ - (level >= VXGE_ERR && ((module & VXGE_DEBUG_ERR_MASK) == module))) {\ - if ((mask & VXGE_DEBUG_MASK) == mask)\ - vxge_trace_aux(level, mask, fmt, __VA_ARGS__); \ -} \ -} - #if (VXGE_COMPONENT_LL & VXGE_DEBUG_MODULE_MASK) -#define vxge_debug_ll(level, mask, fmt, ...) \ -{\ - vxge_debug(VXGE_COMPONENT_LL, level, mask, fmt, __VA_ARGS__);\ -} - +#define vxge_debug_ll(level, mask, fmt, ...) do { \ + if ((level >= VXGE_ERR && VXGE_COMPONENT_LL & VXGE_DEBUG_ERR_MASK) || \ + (level >= VXGE_TRACE && VXGE_COMPONENT_LL & VXGE_DEBUG_TRACE_MASK))\ + if ((mask & VXGE_DEBUG_MASK) == mask) \ + printk(fmt "\n", __VA_ARGS__); \ +} while (0) #else #define vxge_debug_ll(level, mask, fmt, ...) #endif @@ -2051,4 +2070,26 @@ enum vxge_hw_status vxge_hw_vpath_rts_rth_set( enum vxge_hw_status __vxge_hw_device_is_privilaged(u32 host_type, u32 func_id); + +#define VXGE_HW_MIN_SUCCESSIVE_IDLE_COUNT 5 +#define VXGE_HW_MAX_POLLING_COUNT 100 + +void +vxge_hw_device_wait_receive_idle(struct __vxge_hw_device *hldev); + +enum vxge_hw_status +vxge_hw_upgrade_read_version(struct __vxge_hw_device *hldev, u32 *major, + u32 *minor, u32 *build); + +enum vxge_hw_status vxge_hw_flash_fw(struct __vxge_hw_device *hldev); + +enum vxge_hw_status +vxge_update_fw_image(struct __vxge_hw_device *hldev, const u8 *filebuf, + int size); + +enum vxge_hw_status +vxge_hw_vpath_eprom_img_ver_get(struct __vxge_hw_device *hldev, + struct eprom_image *eprom_image_data); + +int vxge_hw_vpath_wait_receive_idle(struct __vxge_hw_device *hldev, u32 vp_id); #endif diff --git a/drivers/net/vxge/vxge-ethtool.c b/drivers/net/vxge/vxge-ethtool.c index b67746eef923..1dd3a21b3a43 100644 --- a/drivers/net/vxge/vxge-ethtool.c +++ b/drivers/net/vxge/vxge-ethtool.c @@ -11,7 +11,7 @@ * Virtualized Server Adapter. * Copyright(c) 2002-2010 Exar Corp. ******************************************************************************/ -#include<linux/ethtool.h> +#include <linux/ethtool.h> #include <linux/slab.h> #include <linux/pci.h> #include <linux/etherdevice.h> @@ -29,7 +29,6 @@ * Return value: * 0 on success. */ - static int vxge_ethtool_sset(struct net_device *dev, struct ethtool_cmd *info) { /* We currently only support 10Gb/FULL */ @@ -79,10 +78,9 @@ static int vxge_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info) * Returns driver specefic information like name, version etc.. to ethtool. */ static void vxge_ethtool_gdrvinfo(struct net_device *dev, - struct ethtool_drvinfo *info) + struct ethtool_drvinfo *info) { - struct vxgedev *vdev; - vdev = (struct vxgedev *)netdev_priv(dev); + struct vxgedev *vdev = netdev_priv(dev); strlcpy(info->driver, VXGE_DRIVER_NAME, sizeof(VXGE_DRIVER_NAME)); strlcpy(info->version, DRV_VERSION, sizeof(DRV_VERSION)); strlcpy(info->fw_version, vdev->fw_version, VXGE_HW_FW_STRLEN); @@ -104,15 +102,14 @@ static void vxge_ethtool_gdrvinfo(struct net_device *dev, * buffer area. */ static void vxge_ethtool_gregs(struct net_device *dev, - struct ethtool_regs *regs, void *space) + struct ethtool_regs *regs, void *space) { int index, offset; enum vxge_hw_status status; u64 reg; - u64 *reg_space = (u64 *) space; - struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); - struct __vxge_hw_device *hldev = (struct __vxge_hw_device *) - pci_get_drvdata(vdev->pdev); + u64 *reg_space = (u64 *)space; + struct vxgedev *vdev = netdev_priv(dev); + struct __vxge_hw_device *hldev = vdev->devh; regs->len = sizeof(struct vxge_hw_vpath_reg) * vdev->no_of_vpath; regs->version = vdev->pdev->subsystem_device; @@ -147,9 +144,8 @@ static void vxge_ethtool_gregs(struct net_device *dev, */ static int vxge_ethtool_idnic(struct net_device *dev, u32 data) { - struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); - struct __vxge_hw_device *hldev = (struct __vxge_hw_device *) - pci_get_drvdata(vdev->pdev); + struct vxgedev *vdev = netdev_priv(dev); + struct __vxge_hw_device *hldev = vdev->devh; vxge_hw_device_flick_link_led(hldev, VXGE_FLICKER_ON); msleep_interruptible(data ? (data * HZ) : VXGE_MAX_FLICKER_TIME); @@ -168,11 +164,10 @@ static int vxge_ethtool_idnic(struct net_device *dev, u32 data) * void */ static void vxge_ethtool_getpause_data(struct net_device *dev, - struct ethtool_pauseparam *ep) + struct ethtool_pauseparam *ep) { - struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); - struct __vxge_hw_device *hldev = (struct __vxge_hw_device *) - pci_get_drvdata(vdev->pdev); + struct vxgedev *vdev = netdev_priv(dev); + struct __vxge_hw_device *hldev = vdev->devh; vxge_hw_device_getpause_data(hldev, 0, &ep->tx_pause, &ep->rx_pause); } @@ -188,11 +183,10 @@ static void vxge_ethtool_getpause_data(struct net_device *dev, * int, returns 0 on Success */ static int vxge_ethtool_setpause_data(struct net_device *dev, - struct ethtool_pauseparam *ep) + struct ethtool_pauseparam *ep) { - struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); - struct __vxge_hw_device *hldev = (struct __vxge_hw_device *) - pci_get_drvdata(vdev->pdev); + struct vxgedev *vdev = netdev_priv(dev); + struct __vxge_hw_device *hldev = vdev->devh; vxge_hw_device_setpause_data(hldev, 0, ep->tx_pause, ep->rx_pause); @@ -209,9 +203,8 @@ static void vxge_get_ethtool_stats(struct net_device *dev, enum vxge_hw_status status; enum vxge_hw_status swstatus; struct vxge_vpath *vpath = NULL; - - struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); - struct __vxge_hw_device *hldev = vdev->devh; + struct vxgedev *vdev = netdev_priv(dev); + struct __vxge_hw_device *hldev = vdev->devh; struct vxge_hw_xmac_stats *xmac_stats; struct vxge_hw_device_stats_sw_info *sw_stats; struct vxge_hw_device_stats_hw_info *hw_stats; @@ -574,12 +567,12 @@ static void vxge_get_ethtool_stats(struct net_device *dev, kfree(hw_stats); } -static void vxge_ethtool_get_strings(struct net_device *dev, - u32 stringset, u8 *data) +static void vxge_ethtool_get_strings(struct net_device *dev, u32 stringset, + u8 *data) { int stat_size = 0; int i, j; - struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); + struct vxgedev *vdev = netdev_priv(dev); switch (stringset) { case ETH_SS_STATS: vxge_add_string("VPATH STATISTICS%s\t\t\t", @@ -1066,21 +1059,21 @@ static void vxge_ethtool_get_strings(struct net_device *dev, static int vxge_ethtool_get_regs_len(struct net_device *dev) { - struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); + struct vxgedev *vdev = netdev_priv(dev); return sizeof(struct vxge_hw_vpath_reg) * vdev->no_of_vpath; } static u32 vxge_get_rx_csum(struct net_device *dev) { - struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); + struct vxgedev *vdev = netdev_priv(dev); return vdev->rx_csum; } static int vxge_set_rx_csum(struct net_device *dev, u32 data) { - struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); + struct vxgedev *vdev = netdev_priv(dev); if (data) vdev->rx_csum = 1; @@ -1102,7 +1095,7 @@ static int vxge_ethtool_op_set_tso(struct net_device *dev, u32 data) static int vxge_ethtool_get_sset_count(struct net_device *dev, int sset) { - struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); + struct vxgedev *vdev = netdev_priv(dev); switch (sset) { case ETH_SS_STATS: @@ -1119,6 +1112,59 @@ static int vxge_ethtool_get_sset_count(struct net_device *dev, int sset) } } +static int vxge_set_flags(struct net_device *dev, u32 data) +{ + struct vxgedev *vdev = netdev_priv(dev); + enum vxge_hw_status status; + + if (data & ~ETH_FLAG_RXHASH) + return -EOPNOTSUPP; + + if (!!(data & ETH_FLAG_RXHASH) == vdev->devh->config.rth_en) + return 0; + + if (netif_running(dev) || (vdev->config.rth_steering == NO_STEERING)) + return -EINVAL; + + vdev->devh->config.rth_en = !!(data & ETH_FLAG_RXHASH); + + /* Enabling RTH requires some of the logic in vxge_device_register and a + * vpath reset. Due to these restrictions, only allow modification + * while the interface is down. + */ + status = vxge_reset_all_vpaths(vdev); + if (status != VXGE_HW_OK) { + vdev->devh->config.rth_en = !vdev->devh->config.rth_en; + return -EFAULT; + } + + if (vdev->devh->config.rth_en) + dev->features |= NETIF_F_RXHASH; + else + dev->features &= ~NETIF_F_RXHASH; + + return 0; +} + +static int vxge_fw_flash(struct net_device *dev, struct ethtool_flash *parms) +{ + struct vxgedev *vdev = netdev_priv(dev); + + if (vdev->max_vpath_supported != VXGE_HW_MAX_VIRTUAL_PATHS) { + printk(KERN_INFO "Single Function Mode is required to flash the" + " firmware\n"); + return -EINVAL; + } + + if (netif_running(dev)) { + printk(KERN_INFO "Interface %s must be down to flash the " + "firmware\n", dev->name); + return -EBUSY; + } + + return vxge_fw_upgrade(vdev, parms->data, 1); +} + static const struct ethtool_ops vxge_ethtool_ops = { .get_settings = vxge_ethtool_gset, .set_settings = vxge_ethtool_sset, @@ -1131,7 +1177,7 @@ static const struct ethtool_ops vxge_ethtool_ops = { .get_rx_csum = vxge_get_rx_csum, .set_rx_csum = vxge_set_rx_csum, .get_tx_csum = ethtool_op_get_tx_csum, - .set_tx_csum = ethtool_op_set_tx_hw_csum, + .set_tx_csum = ethtool_op_set_tx_ipv6_csum, .get_sg = ethtool_op_get_sg, .set_sg = ethtool_op_set_sg, .get_tso = ethtool_op_get_tso, @@ -1140,6 +1186,8 @@ static const struct ethtool_ops vxge_ethtool_ops = { .phys_id = vxge_ethtool_idnic, .get_sset_count = vxge_ethtool_get_sset_count, .get_ethtool_stats = vxge_get_ethtool_stats, + .set_flags = vxge_set_flags, + .flash_device = vxge_fw_flash, }; void vxge_initialize_ethtool_ops(struct net_device *ndev) diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c index 813829f3d024..1ac9b568f1b0 100644 --- a/drivers/net/vxge/vxge-main.c +++ b/drivers/net/vxge/vxge-main.c @@ -50,6 +50,8 @@ #include <net/ip.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> +#include <linux/firmware.h> +#include <linux/net_tstamp.h> #include "vxge-main.h" #include "vxge-reg.h" @@ -82,16 +84,6 @@ module_param_array(bw_percentage, uint, NULL, 0); static struct vxge_drv_config *driver_config; -static enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev, - struct macInfo *mac); -static enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev, - struct macInfo *mac); -static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac); -static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac); -static enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath); -static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath); -static enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev); - static inline int is_vxge_card_up(struct vxgedev *vdev) { return test_bit(__VXGE_STATE_CARD_UP, &vdev->state); @@ -148,11 +140,10 @@ static inline void VXGE_COMPLETE_ALL_RX(struct vxgedev *vdev) * This function is called during interrupt context to notify link up state * change. */ -static void -vxge_callback_link_up(struct __vxge_hw_device *hldev) +static void vxge_callback_link_up(struct __vxge_hw_device *hldev) { struct net_device *dev = hldev->ndev; - struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); + struct vxgedev *vdev = netdev_priv(dev); vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", vdev->ndev->name, __func__, __LINE__); @@ -172,11 +163,10 @@ vxge_callback_link_up(struct __vxge_hw_device *hldev) * This function is called during interrupt context to notify link down state * change. */ -static void -vxge_callback_link_down(struct __vxge_hw_device *hldev) +static void vxge_callback_link_down(struct __vxge_hw_device *hldev) { struct net_device *dev = hldev->ndev; - struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); + struct vxgedev *vdev = netdev_priv(dev); vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", vdev->ndev->name, __func__, __LINE__); @@ -195,7 +185,7 @@ vxge_callback_link_down(struct __vxge_hw_device *hldev) * * Allocate SKB. */ -static struct sk_buff* +static struct sk_buff * vxge_rx_alloc(void *dtrh, struct vxge_ring *ring, const int skb_size) { struct net_device *dev; @@ -369,7 +359,7 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr, u8 t_code, void *userdata) { struct vxge_ring *ring = (struct vxge_ring *)userdata; - struct net_device *dev = ring->ndev; + struct net_device *dev = ring->ndev; unsigned int dma_sizes; void *first_dtr = NULL; int dtr_cnt = 0; @@ -413,7 +403,6 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr, prefetch((char *)skb + L1_CACHE_BYTES); if (unlikely(t_code)) { - if (vxge_hw_ring_handle_tcode(ringh, dtr, t_code) != VXGE_HW_OK) { @@ -436,9 +425,7 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr, } if (pkt_length > VXGE_LL_RX_COPY_THRESHOLD) { - if (vxge_rx_alloc(dtr, ring, data_size) != NULL) { - if (!vxge_rx_map(dtr, ring)) { skb_put(skb, pkt_length); @@ -513,6 +500,23 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr, else skb_checksum_none_assert(skb); + + if (ring->rx_hwts) { + struct skb_shared_hwtstamps *skb_hwts; + u32 ns = *(u32 *)(skb->head + pkt_length); + + skb_hwts = skb_hwtstamps(skb); + skb_hwts->hwtstamp = ns_to_ktime(ns); + skb_hwts->syststamp.tv64 = 0; + } + + /* rth_hash_type and rth_it_hit are non-zero regardless of + * whether rss is enabled. Only the rth_value is zero/non-zero + * if rss is disabled/enabled, so key off of that. + */ + if (ext_info.rth_value) + skb->rxhash = ext_info.rth_value; + vxge_rx_complete(ring, skb, ext_info.vlan, pkt_length, &ext_info); @@ -660,6 +664,65 @@ static enum vxge_hw_status vxge_search_mac_addr_in_list( return FALSE; } +static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac) +{ + struct vxge_mac_addrs *new_mac_entry; + u8 *mac_address = NULL; + + if (vpath->mac_addr_cnt >= VXGE_MAX_LEARN_MAC_ADDR_CNT) + return TRUE; + + new_mac_entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_ATOMIC); + if (!new_mac_entry) { + vxge_debug_mem(VXGE_ERR, + "%s: memory allocation failed", + VXGE_DRIVER_NAME); + return FALSE; + } + + list_add(&new_mac_entry->item, &vpath->mac_addr_list); + + /* Copy the new mac address to the list */ + mac_address = (u8 *)&new_mac_entry->macaddr; + memcpy(mac_address, mac->macaddr, ETH_ALEN); + + new_mac_entry->state = mac->state; + vpath->mac_addr_cnt++; + + /* Is this a multicast address */ + if (0x01 & mac->macaddr[0]) + vpath->mcast_addr_cnt++; + + return TRUE; +} + +/* Add a mac address to DA table */ +static enum vxge_hw_status +vxge_add_mac_addr(struct vxgedev *vdev, struct macInfo *mac) +{ + enum vxge_hw_status status = VXGE_HW_OK; + struct vxge_vpath *vpath; + enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode; + + if (0x01 & mac->macaddr[0]) /* multicast address */ + duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE; + else + duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE; + + vpath = &vdev->vpaths[mac->vpath_no]; + status = vxge_hw_vpath_mac_addr_add(vpath->handle, mac->macaddr, + mac->macmask, duplicate_mode); + if (status != VXGE_HW_OK) { + vxge_debug_init(VXGE_ERR, + "DA config add entry failed for vpath:%d", + vpath->device_id); + } else + if (FALSE == vxge_mac_list_add(vpath, mac)) + status = -EPERM; + + return status; +} + static int vxge_learn_mac(struct vxgedev *vdev, u8 *mac_header) { struct macInfo mac_info; @@ -670,7 +733,7 @@ static int vxge_learn_mac(struct vxgedev *vdev, u8 *mac_header) struct vxge_vpath *vpath = NULL; struct __vxge_hw_device *hldev; - hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev); + hldev = pci_get_drvdata(vdev->pdev); mac_address = (u8 *)&mac_addr; memcpy(mac_address, mac_header, ETH_ALEN); @@ -769,7 +832,7 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; } - vdev = (struct vxgedev *)netdev_priv(dev); + vdev = netdev_priv(dev); if (unlikely(!is_vxge_card_up(vdev))) { vxge_debug_tx(VXGE_ERR, @@ -1005,6 +1068,50 @@ vxge_tx_term(void *dtrh, enum vxge_hw_txdl_state state, void *userdata) "%s:%d Exiting...", __func__, __LINE__); } +static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac) +{ + struct list_head *entry, *next; + u64 del_mac = 0; + u8 *mac_address = (u8 *) (&del_mac); + + /* Copy the mac address to delete from the list */ + memcpy(mac_address, mac->macaddr, ETH_ALEN); + + list_for_each_safe(entry, next, &vpath->mac_addr_list) { + if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac) { + list_del(entry); + kfree((struct vxge_mac_addrs *)entry); + vpath->mac_addr_cnt--; + + /* Is this a multicast address */ + if (0x01 & mac->macaddr[0]) + vpath->mcast_addr_cnt--; + return TRUE; + } + } + + return FALSE; +} + +/* delete a mac address from DA table */ +static enum vxge_hw_status +vxge_del_mac_addr(struct vxgedev *vdev, struct macInfo *mac) +{ + enum vxge_hw_status status = VXGE_HW_OK; + struct vxge_vpath *vpath; + + vpath = &vdev->vpaths[mac->vpath_no]; + status = vxge_hw_vpath_mac_addr_delete(vpath->handle, mac->macaddr, + mac->macmask); + if (status != VXGE_HW_OK) { + vxge_debug_init(VXGE_ERR, + "DA config delete entry failed for vpath:%d", + vpath->device_id); + } else + vxge_mac_list_del(vpath, mac); + return status; +} + /** * vxge_set_multicast * @dev: pointer to the device structure @@ -1034,7 +1141,7 @@ static void vxge_set_multicast(struct net_device *dev) vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); - vdev = (struct vxgedev *)netdev_priv(dev); + vdev = netdev_priv(dev); hldev = (struct __vxge_hw_device *)vdev->devh; if (unlikely(!is_vxge_card_up(vdev))) @@ -1094,7 +1201,7 @@ static void vxge_set_multicast(struct net_device *dev) /* Delete previous MC's */ for (i = 0; i < mcast_cnt; i++) { list_for_each_safe(entry, next, list_head) { - mac_entry = (struct vxge_mac_addrs *) entry; + mac_entry = (struct vxge_mac_addrs *)entry; /* Copy the mac address to delete */ mac_address = (u8 *)&mac_entry->macaddr; memcpy(mac_info.macaddr, mac_address, ETH_ALEN); @@ -1137,7 +1244,7 @@ _set_all_mcast: /* Delete previous MC's */ for (i = 0; i < mcast_cnt; i++) { list_for_each_safe(entry, next, list_head) { - mac_entry = (struct vxge_mac_addrs *) entry; + mac_entry = (struct vxge_mac_addrs *)entry; /* Copy the mac address to delete */ mac_address = (u8 *)&mac_entry->macaddr; memcpy(mac_info.macaddr, mac_address, ETH_ALEN); @@ -1184,14 +1291,14 @@ static int vxge_set_mac_addr(struct net_device *dev, void *p) { struct sockaddr *addr = p; struct vxgedev *vdev; - struct __vxge_hw_device *hldev; + struct __vxge_hw_device *hldev; enum vxge_hw_status status = VXGE_HW_OK; struct macInfo mac_info_new, mac_info_old; int vpath_idx = 0; vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); - vdev = (struct vxgedev *)netdev_priv(dev); + vdev = netdev_priv(dev); hldev = vdev->devh; if (!is_valid_ether_addr(addr->sa_data)) @@ -1292,8 +1399,13 @@ static void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id) static void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id) { struct vxge_vpath *vpath = &vdev->vpaths[vp_id]; + struct __vxge_hw_device *hldev; int msix_id; + hldev = pci_get_drvdata(vdev->pdev); + + vxge_hw_vpath_wait_receive_idle(hldev, vpath->device_id); + vxge_hw_vpath_intr_disable(vpath->handle); if (vdev->config.intr_type == INTA) @@ -1310,6 +1422,95 @@ static void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id) } } +/* list all mac addresses from DA table */ +static enum vxge_hw_status +vxge_search_mac_addr_in_da_table(struct vxge_vpath *vpath, struct macInfo *mac) +{ + enum vxge_hw_status status = VXGE_HW_OK; + unsigned char macmask[ETH_ALEN]; + unsigned char macaddr[ETH_ALEN]; + + status = vxge_hw_vpath_mac_addr_get(vpath->handle, + macaddr, macmask); + if (status != VXGE_HW_OK) { + vxge_debug_init(VXGE_ERR, + "DA config list entry failed for vpath:%d", + vpath->device_id); + return status; + } + + while (memcmp(mac->macaddr, macaddr, ETH_ALEN)) { + status = vxge_hw_vpath_mac_addr_get_next(vpath->handle, + macaddr, macmask); + if (status != VXGE_HW_OK) + break; + } + + return status; +} + +/* Store all mac addresses from the list to the DA table */ +static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath) +{ + enum vxge_hw_status status = VXGE_HW_OK; + struct macInfo mac_info; + u8 *mac_address = NULL; + struct list_head *entry, *next; + + memset(&mac_info, 0, sizeof(struct macInfo)); + + if (vpath->is_open) { + list_for_each_safe(entry, next, &vpath->mac_addr_list) { + mac_address = + (u8 *)& + ((struct vxge_mac_addrs *)entry)->macaddr; + memcpy(mac_info.macaddr, mac_address, ETH_ALEN); + ((struct vxge_mac_addrs *)entry)->state = + VXGE_LL_MAC_ADDR_IN_DA_TABLE; + /* does this mac address already exist in da table? */ + status = vxge_search_mac_addr_in_da_table(vpath, + &mac_info); + if (status != VXGE_HW_OK) { + /* Add this mac address to the DA table */ + status = vxge_hw_vpath_mac_addr_add( + vpath->handle, mac_info.macaddr, + mac_info.macmask, + VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE); + if (status != VXGE_HW_OK) { + vxge_debug_init(VXGE_ERR, + "DA add entry failed for vpath:%d", + vpath->device_id); + ((struct vxge_mac_addrs *)entry)->state + = VXGE_LL_MAC_ADDR_IN_LIST; + } + } + } + } + + return status; +} + +/* Store all vlan ids from the list to the vid table */ +static enum vxge_hw_status +vxge_restore_vpath_vid_table(struct vxge_vpath *vpath) +{ + enum vxge_hw_status status = VXGE_HW_OK; + struct vxgedev *vdev = vpath->vdev; + u16 vid; + + if (vdev->vlgrp && vpath->is_open) { + + for (vid = 0; vid < VLAN_N_VID; vid++) { + if (!vlan_group_get_device(vdev->vlgrp, vid)) + continue; + /* Add these vlan to the vid table */ + status = vxge_hw_vpath_vid_add(vpath->handle, vid); + } + } + + return status; +} + /* * vxge_reset_vpath * @vdev: pointer to vdev @@ -1405,12 +1606,16 @@ static int do_vxge_reset(struct vxgedev *vdev, int event) } if (event == VXGE_LL_FULL_RESET) { + netif_carrier_off(vdev->ndev); + /* wait for all the vpath reset to complete */ for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) { while (test_bit(vp_id, &vdev->vp_reset)) msleep(50); } + netif_carrier_on(vdev->ndev); + /* if execution mode is set to debug, don't reset the adapter */ if (unlikely(vdev->exec_mode)) { vxge_debug_init(VXGE_ERR, @@ -1423,6 +1628,7 @@ static int do_vxge_reset(struct vxgedev *vdev, int event) } if (event == VXGE_LL_FULL_RESET) { + vxge_hw_device_wait_receive_idle(vdev->devh); vxge_hw_device_intr_disable(vdev->devh); switch (vdev->cric_err_event) { @@ -1563,9 +1769,14 @@ out: * * driver may reset the chip on events of serr, eccerr, etc */ -static int vxge_reset(struct vxgedev *vdev) +static void vxge_reset(struct work_struct *work) { - return do_vxge_reset(vdev, VXGE_LL_FULL_RESET); + struct vxgedev *vdev = container_of(work, struct vxgedev, reset_task); + + if (!netif_running(vdev->ndev)) + return; + + do_vxge_reset(vdev, VXGE_LL_FULL_RESET); } /** @@ -1608,8 +1819,7 @@ static int vxge_poll_inta(struct napi_struct *napi, int budget) int budget_org = budget; struct vxge_ring *ring; - struct __vxge_hw_device *hldev = (struct __vxge_hw_device *) - pci_get_drvdata(vdev->pdev); + struct __vxge_hw_device *hldev = pci_get_drvdata(vdev->pdev); for (i = 0; i < vdev->no_of_vpath; i++) { ring = &vdev->vpaths[i].ring; @@ -1645,11 +1855,11 @@ static int vxge_poll_inta(struct napi_struct *napi, int budget) */ static void vxge_netpoll(struct net_device *dev) { - struct __vxge_hw_device *hldev; + struct __vxge_hw_device *hldev; struct vxgedev *vdev; - vdev = (struct vxgedev *)netdev_priv(dev); - hldev = (struct __vxge_hw_device *)pci_get_drvdata(vdev->pdev); + vdev = netdev_priv(dev); + hldev = pci_get_drvdata(vdev->pdev); vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); @@ -1689,15 +1899,6 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev) mtable[index] = index % vdev->no_of_vpath; } - /* Fill RTH hash types */ - hash_types.hash_type_tcpipv4_en = vdev->config.rth_hash_type_tcpipv4; - hash_types.hash_type_ipv4_en = vdev->config.rth_hash_type_ipv4; - hash_types.hash_type_tcpipv6_en = vdev->config.rth_hash_type_tcpipv6; - hash_types.hash_type_ipv6_en = vdev->config.rth_hash_type_ipv6; - hash_types.hash_type_tcpipv6ex_en = - vdev->config.rth_hash_type_tcpipv6ex; - hash_types.hash_type_ipv6ex_en = vdev->config.rth_hash_type_ipv6ex; - /* set indirection table, bucket-to-vpath mapping */ status = vxge_hw_vpath_rts_rth_itable_set(vdev->vp_handles, vdev->no_of_vpath, @@ -1710,19 +1911,27 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev) return status; } + /* Fill RTH hash types */ + hash_types.hash_type_tcpipv4_en = vdev->config.rth_hash_type_tcpipv4; + hash_types.hash_type_ipv4_en = vdev->config.rth_hash_type_ipv4; + hash_types.hash_type_tcpipv6_en = vdev->config.rth_hash_type_tcpipv6; + hash_types.hash_type_ipv6_en = vdev->config.rth_hash_type_ipv6; + hash_types.hash_type_tcpipv6ex_en = + vdev->config.rth_hash_type_tcpipv6ex; + hash_types.hash_type_ipv6ex_en = vdev->config.rth_hash_type_ipv6ex; + /* - * Because the itable_set() method uses the active_table field - * for the target virtual path the RTH config should be updated - * for all VPATHs. The h/w only uses the lowest numbered VPATH - * when steering frames. - */ + * Because the itable_set() method uses the active_table field + * for the target virtual path the RTH config should be updated + * for all VPATHs. The h/w only uses the lowest numbered VPATH + * when steering frames. + */ for (index = 0; index < vdev->no_of_vpath; index++) { status = vxge_hw_vpath_rts_rth_set( vdev->vpaths[index].handle, vdev->config.rth_algorithm, &hash_types, vdev->config.rth_bkt_sz); - if (status != VXGE_HW_OK) { vxge_debug_init(VXGE_ERR, "RTH configuration failed for vpath:%d", @@ -1734,201 +1943,8 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev) return status; } -static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac) -{ - struct vxge_mac_addrs *new_mac_entry; - u8 *mac_address = NULL; - - if (vpath->mac_addr_cnt >= VXGE_MAX_LEARN_MAC_ADDR_CNT) - return TRUE; - - new_mac_entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_ATOMIC); - if (!new_mac_entry) { - vxge_debug_mem(VXGE_ERR, - "%s: memory allocation failed", - VXGE_DRIVER_NAME); - return FALSE; - } - - list_add(&new_mac_entry->item, &vpath->mac_addr_list); - - /* Copy the new mac address to the list */ - mac_address = (u8 *)&new_mac_entry->macaddr; - memcpy(mac_address, mac->macaddr, ETH_ALEN); - - new_mac_entry->state = mac->state; - vpath->mac_addr_cnt++; - - /* Is this a multicast address */ - if (0x01 & mac->macaddr[0]) - vpath->mcast_addr_cnt++; - - return TRUE; -} - -/* Add a mac address to DA table */ -static enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev, - struct macInfo *mac) -{ - enum vxge_hw_status status = VXGE_HW_OK; - struct vxge_vpath *vpath; - enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode; - - if (0x01 & mac->macaddr[0]) /* multicast address */ - duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE; - else - duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE; - - vpath = &vdev->vpaths[mac->vpath_no]; - status = vxge_hw_vpath_mac_addr_add(vpath->handle, mac->macaddr, - mac->macmask, duplicate_mode); - if (status != VXGE_HW_OK) { - vxge_debug_init(VXGE_ERR, - "DA config add entry failed for vpath:%d", - vpath->device_id); - } else - if (FALSE == vxge_mac_list_add(vpath, mac)) - status = -EPERM; - - return status; -} - -static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac) -{ - struct list_head *entry, *next; - u64 del_mac = 0; - u8 *mac_address = (u8 *) (&del_mac); - - /* Copy the mac address to delete from the list */ - memcpy(mac_address, mac->macaddr, ETH_ALEN); - - list_for_each_safe(entry, next, &vpath->mac_addr_list) { - if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac) { - list_del(entry); - kfree((struct vxge_mac_addrs *)entry); - vpath->mac_addr_cnt--; - - /* Is this a multicast address */ - if (0x01 & mac->macaddr[0]) - vpath->mcast_addr_cnt--; - return TRUE; - } - } - - return FALSE; -} -/* delete a mac address from DA table */ -static enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev, - struct macInfo *mac) -{ - enum vxge_hw_status status = VXGE_HW_OK; - struct vxge_vpath *vpath; - - vpath = &vdev->vpaths[mac->vpath_no]; - status = vxge_hw_vpath_mac_addr_delete(vpath->handle, mac->macaddr, - mac->macmask); - if (status != VXGE_HW_OK) { - vxge_debug_init(VXGE_ERR, - "DA config delete entry failed for vpath:%d", - vpath->device_id); - } else - vxge_mac_list_del(vpath, mac); - return status; -} - -/* list all mac addresses from DA table */ -enum vxge_hw_status -static vxge_search_mac_addr_in_da_table(struct vxge_vpath *vpath, - struct macInfo *mac) -{ - enum vxge_hw_status status = VXGE_HW_OK; - unsigned char macmask[ETH_ALEN]; - unsigned char macaddr[ETH_ALEN]; - - status = vxge_hw_vpath_mac_addr_get(vpath->handle, - macaddr, macmask); - if (status != VXGE_HW_OK) { - vxge_debug_init(VXGE_ERR, - "DA config list entry failed for vpath:%d", - vpath->device_id); - return status; - } - - while (memcmp(mac->macaddr, macaddr, ETH_ALEN)) { - - status = vxge_hw_vpath_mac_addr_get_next(vpath->handle, - macaddr, macmask); - if (status != VXGE_HW_OK) - break; - } - - return status; -} - -/* Store all vlan ids from the list to the vid table */ -static enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath) -{ - enum vxge_hw_status status = VXGE_HW_OK; - struct vxgedev *vdev = vpath->vdev; - u16 vid; - - if (vdev->vlgrp && vpath->is_open) { - - for (vid = 0; vid < VLAN_N_VID; vid++) { - if (!vlan_group_get_device(vdev->vlgrp, vid)) - continue; - /* Add these vlan to the vid table */ - status = vxge_hw_vpath_vid_add(vpath->handle, vid); - } - } - - return status; -} - -/* Store all mac addresses from the list to the DA table */ -static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath) -{ - enum vxge_hw_status status = VXGE_HW_OK; - struct macInfo mac_info; - u8 *mac_address = NULL; - struct list_head *entry, *next; - - memset(&mac_info, 0, sizeof(struct macInfo)); - - if (vpath->is_open) { - - list_for_each_safe(entry, next, &vpath->mac_addr_list) { - mac_address = - (u8 *)& - ((struct vxge_mac_addrs *)entry)->macaddr; - memcpy(mac_info.macaddr, mac_address, ETH_ALEN); - ((struct vxge_mac_addrs *)entry)->state = - VXGE_LL_MAC_ADDR_IN_DA_TABLE; - /* does this mac address already exist in da table? */ - status = vxge_search_mac_addr_in_da_table(vpath, - &mac_info); - if (status != VXGE_HW_OK) { - /* Add this mac address to the DA table */ - status = vxge_hw_vpath_mac_addr_add( - vpath->handle, mac_info.macaddr, - mac_info.macmask, - VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE); - if (status != VXGE_HW_OK) { - vxge_debug_init(VXGE_ERR, - "DA add entry failed for vpath:%d", - vpath->device_id); - ((struct vxge_mac_addrs *)entry)->state - = VXGE_LL_MAC_ADDR_IN_LIST; - } - } - } - } - - return status; -} - /* reset vpaths */ -static enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev) +enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev) { enum vxge_hw_status status = VXGE_HW_OK; struct vxge_vpath *vpath; @@ -1988,8 +2004,23 @@ static int vxge_open_vpaths(struct vxgedev *vdev) for (i = 0; i < vdev->no_of_vpath; i++) { vpath = &vdev->vpaths[i]; - vxge_assert(vpath->is_configured); + + if (!vdev->titan1) { + struct vxge_hw_vp_config *vcfg; + vcfg = &vdev->devh->config.vp_config[vpath->device_id]; + + vcfg->rti.urange_a = RTI_T1A_RX_URANGE_A; + vcfg->rti.urange_b = RTI_T1A_RX_URANGE_B; + vcfg->rti.urange_c = RTI_T1A_RX_URANGE_C; + vcfg->tti.uec_a = TTI_T1A_TX_UFC_A; + vcfg->tti.uec_b = TTI_T1A_TX_UFC_B; + vcfg->tti.uec_c = TTI_T1A_TX_UFC_C(vdev->mtu); + vcfg->tti.uec_d = TTI_T1A_TX_UFC_D(vdev->mtu); + vcfg->tti.ltimer_val = VXGE_T1A_TTI_LTIMER_VAL; + vcfg->tti.rtimer_val = VXGE_T1A_TTI_RTIMER_VAL; + } + attr.vp_id = vpath->device_id; attr.fifo_attr.callback = vxge_xmit_compl; attr.fifo_attr.txdl_term = vxge_tx_term; @@ -2004,6 +2035,7 @@ static int vxge_open_vpaths(struct vxgedev *vdev) vpath->ring.ndev = vdev->ndev; vpath->ring.pdev = vdev->pdev; + status = vxge_hw_vpath_open(vdev->devh, &attr, &vpath->handle); if (status == VXGE_HW_OK) { vpath->fifo.handle = @@ -2024,6 +2056,7 @@ static int vxge_open_vpaths(struct vxgedev *vdev) vdev->config.fifo_indicate_max_pkts; vpath->ring.rx_vector_no = 0; vpath->ring.rx_csum = vdev->rx_csum; + vpath->ring.rx_hwts = vdev->rx_hwts; vpath->is_open = 1; vdev->vp_handles[i] = vpath->handle; vpath->ring.gro_enable = vdev->config.gro_enable; @@ -2031,11 +2064,10 @@ static int vxge_open_vpaths(struct vxgedev *vdev) vdev->stats.vpaths_open++; } else { vdev->stats.vpath_open_fail++; - vxge_debug_init(VXGE_ERR, - "%s: vpath: %d failed to open " - "with status: %d", - vdev->ndev->name, vpath->device_id, - status); + vxge_debug_init(VXGE_ERR, "%s: vpath: %d failed to " + "open with status: %d", + vdev->ndev->name, vpath->device_id, + status); vxge_close_vpaths(vdev, 0); return -EPERM; } @@ -2043,6 +2075,7 @@ static int vxge_open_vpaths(struct vxgedev *vdev) vp_id = vpath->handle->vpath->vp_id; vdev->vpaths_deployed |= vxge_mBIT(vp_id); } + return VXGE_HW_OK; } @@ -2062,21 +2095,20 @@ static irqreturn_t vxge_isr_napi(int irq, void *dev_id) struct __vxge_hw_device *hldev; u64 reason; enum vxge_hw_status status; - struct vxgedev *vdev = (struct vxgedev *) dev_id;; + struct vxgedev *vdev = (struct vxgedev *)dev_id; vxge_debug_intr(VXGE_TRACE, "%s:%d", __func__, __LINE__); dev = vdev->ndev; - hldev = (struct __vxge_hw_device *)pci_get_drvdata(vdev->pdev); + hldev = pci_get_drvdata(vdev->pdev); if (pci_channel_offline(vdev->pdev)) return IRQ_NONE; if (unlikely(!is_vxge_card_up(vdev))) - return IRQ_NONE; + return IRQ_HANDLED; - status = vxge_hw_device_begin_irq(hldev, vdev->exec_mode, - &reason); + status = vxge_hw_device_begin_irq(hldev, vdev->exec_mode, &reason); if (status == VXGE_HW_OK) { vxge_hw_device_mask_all(hldev); @@ -2301,8 +2333,8 @@ static void vxge_rem_msix_isr(struct vxgedev *vdev) static void vxge_rem_isr(struct vxgedev *vdev) { - struct __vxge_hw_device *hldev; - hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev); + struct __vxge_hw_device *hldev; + hldev = pci_get_drvdata(vdev->pdev); #ifdef CONFIG_PCI_MSI if (vdev->config.intr_type == MSI_X) { @@ -2529,8 +2561,7 @@ static void vxge_poll_vp_lockup(unsigned long data) * Return value: '0' on success and an appropriate (-)ve integer as * defined in errno.h file on failure. */ -static int -vxge_open(struct net_device *dev) +static int vxge_open(struct net_device *dev) { enum vxge_hw_status status; struct vxgedev *vdev; @@ -2539,11 +2570,12 @@ vxge_open(struct net_device *dev) int ret = 0; int i; u64 val64, function_mode; + vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", dev->name, __func__, __LINE__); - vdev = (struct vxgedev *)netdev_priv(dev); - hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev); + vdev = netdev_priv(dev); + hldev = pci_get_drvdata(vdev->pdev); function_mode = vdev->config.device_hw_info.function_mode; /* make sure you have link off by default every time Nic is @@ -2598,6 +2630,8 @@ vxge_open(struct net_device *dev) goto out2; } } + printk(KERN_INFO "%s: Receive Hashing Offload %s\n", dev->name, + hldev->config.rth_en ? "enabled" : "disabled"); for (i = 0; i < vdev->no_of_vpath; i++) { vpath = &vdev->vpaths[i]; @@ -2683,9 +2717,10 @@ vxge_open(struct net_device *dev) vxge_os_timer(vdev->vp_reset_timer, vxge_poll_vp_reset, vdev, (HZ/2)); - if (vdev->vp_lockup_timer.function == NULL) - vxge_os_timer(vdev->vp_lockup_timer, - vxge_poll_vp_lockup, vdev, (HZ/2)); + /* There is no need to check for RxD leak and RxD lookup on Titan1A */ + if (vdev->titan1 && vdev->vp_lockup_timer.function == NULL) + vxge_os_timer(vdev->vp_lockup_timer, vxge_poll_vp_lockup, vdev, + HZ / 2); set_bit(__VXGE_STATE_CARD_UP, &vdev->state); @@ -2767,8 +2802,8 @@ static int do_vxge_close(struct net_device *dev, int do_io) vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", dev->name, __func__, __LINE__); - vdev = (struct vxgedev *)netdev_priv(dev); - hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev); + vdev = netdev_priv(dev); + hldev = pci_get_drvdata(vdev->pdev); if (unlikely(!is_vxge_card_up(vdev))) return 0; @@ -2778,7 +2813,6 @@ static int do_vxge_close(struct net_device *dev, int do_io) while (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state)) msleep(50); - clear_bit(__VXGE_STATE_CARD_UP, &vdev->state); if (do_io) { /* Put the vpath back in normal mode */ vpath_vector = vxge_mBIT(vdev->vpaths[0].device_id); @@ -2789,7 +2823,6 @@ static int do_vxge_close(struct net_device *dev, int do_io) struct vxge_hw_mrpcim_reg, rts_mgr_cbasin_cfg), &val64); - if (status == VXGE_HW_OK) { val64 &= ~vpath_vector; status = vxge_hw_mgmt_reg_write(vdev->devh, @@ -2818,10 +2851,17 @@ static int do_vxge_close(struct net_device *dev, int do_io) smp_wmb(); } - del_timer_sync(&vdev->vp_lockup_timer); + + if (vdev->titan1) + del_timer_sync(&vdev->vp_lockup_timer); del_timer_sync(&vdev->vp_reset_timer); + if (do_io) + vxge_hw_device_wait_receive_idle(hldev); + + clear_bit(__VXGE_STATE_CARD_UP, &vdev->state); + /* Disable napi */ if (vdev->config.intr_type != MSI_X) napi_disable(&vdev->napi); @@ -2838,8 +2878,6 @@ static int do_vxge_close(struct net_device *dev, int do_io) if (do_io) vxge_hw_device_intr_disable(vdev->devh); - mdelay(1000); - vxge_rem_isr(vdev); vxge_napi_del_all(vdev); @@ -2868,8 +2906,7 @@ static int do_vxge_close(struct net_device *dev, int do_io) * Return value: '0' on success and an appropriate (-)ve integer as * defined in errno.h file on failure. */ -static int -vxge_close(struct net_device *dev) +static int vxge_close(struct net_device *dev) { do_vxge_close(dev, 1); return 0; @@ -2943,9 +2980,7 @@ vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats) net_stats->rx_bytes += vdev->vpaths[k].ring.stats.rx_bytes; net_stats->rx_errors += vdev->vpaths[k].ring.stats.rx_errors; net_stats->multicast += vdev->vpaths[k].ring.stats.rx_mcast; - net_stats->rx_dropped += - vdev->vpaths[k].ring.stats.rx_dropped; - + net_stats->rx_dropped += vdev->vpaths[k].ring.stats.rx_dropped; net_stats->tx_packets += vdev->vpaths[k].fifo.stats.tx_frms; net_stats->tx_bytes += vdev->vpaths[k].fifo.stats.tx_bytes; net_stats->tx_errors += vdev->vpaths[k].fifo.stats.tx_errors; @@ -2954,6 +2989,101 @@ vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats) return net_stats; } +static enum vxge_hw_status vxge_timestamp_config(struct vxgedev *vdev, + int enable) +{ + enum vxge_hw_status status; + u64 val64; + + /* Timestamp is passed to the driver via the FCS, therefore we + * must disable the FCS stripping by the adapter. Since this is + * required for the driver to load (due to a hardware bug), + * there is no need to do anything special here. + */ + if (enable) + val64 = VXGE_HW_XMAC_TIMESTAMP_EN | + VXGE_HW_XMAC_TIMESTAMP_USE_LINK_ID(0) | + VXGE_HW_XMAC_TIMESTAMP_INTERVAL(0); + else + val64 = 0; + + status = vxge_hw_mgmt_reg_write(vdev->devh, + vxge_hw_mgmt_reg_type_mrpcim, + 0, + offsetof(struct vxge_hw_mrpcim_reg, + xmac_timestamp), + val64); + vxge_hw_device_flush_io(vdev->devh); + return status; +} + +static int vxge_hwtstamp_ioctl(struct vxgedev *vdev, void __user *data) +{ + struct hwtstamp_config config; + enum vxge_hw_status status; + int i; + + if (copy_from_user(&config, data, sizeof(config))) + return -EFAULT; + + /* reserved for future extensions */ + if (config.flags) + return -EINVAL; + + /* Transmit HW Timestamp not supported */ + switch (config.tx_type) { + case HWTSTAMP_TX_OFF: + break; + case HWTSTAMP_TX_ON: + default: + return -ERANGE; + } + + switch (config.rx_filter) { + case HWTSTAMP_FILTER_NONE: + status = vxge_timestamp_config(vdev, 0); + if (status != VXGE_HW_OK) + return -EFAULT; + + vdev->rx_hwts = 0; + config.rx_filter = HWTSTAMP_FILTER_NONE; + break; + + case HWTSTAMP_FILTER_ALL: + case HWTSTAMP_FILTER_SOME: + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + status = vxge_timestamp_config(vdev, 1); + if (status != VXGE_HW_OK) + return -EFAULT; + + vdev->rx_hwts = 1; + config.rx_filter = HWTSTAMP_FILTER_ALL; + break; + + default: + return -ERANGE; + } + + for (i = 0; i < vdev->no_of_vpath; i++) + vdev->vpaths[i].ring.rx_hwts = vdev->rx_hwts; + + if (copy_to_user(data, &config, sizeof(config))) + return -EFAULT; + + return 0; +} + /** * vxge_ioctl * @dev: Device pointer. @@ -2966,7 +3096,20 @@ vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats) */ static int vxge_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { - return -EOPNOTSUPP; + struct vxgedev *vdev = netdev_priv(dev); + int ret; + + switch (cmd) { + case SIOCSHWTSTAMP: + ret = vxge_hwtstamp_ioctl(vdev, rq->ifr_data); + if (ret) + return ret; + break; + default: + return -EOPNOTSUPP; + } + + return 0; } /** @@ -2977,18 +3120,17 @@ static int vxge_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) * This function is triggered if the Tx Queue is stopped * for a pre-defined amount of time when the Interface is still up. */ -static void -vxge_tx_watchdog(struct net_device *dev) +static void vxge_tx_watchdog(struct net_device *dev) { struct vxgedev *vdev; vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); - vdev = (struct vxgedev *)netdev_priv(dev); + vdev = netdev_priv(dev); vdev->cric_err_event = VXGE_HW_EVENT_RESET_START; - vxge_reset(vdev); + schedule_work(&vdev->reset_task); vxge_debug_entryexit(VXGE_TRACE, "%s:%d Exiting...", __func__, __LINE__); } @@ -3012,7 +3154,7 @@ vxge_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); - vdev = (struct vxgedev *)netdev_priv(dev); + vdev = netdev_priv(dev); vpath = &vdev->vpaths[0]; if ((NULL == grp) && (vpath->is_open)) { @@ -3061,7 +3203,7 @@ vxge_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) struct vxge_vpath *vpath; int vp_id; - vdev = (struct vxgedev *)netdev_priv(dev); + vdev = netdev_priv(dev); /* Add these vlan to the vid table */ for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) { @@ -3088,7 +3230,7 @@ vxge_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); - vdev = (struct vxgedev *)netdev_priv(dev); + vdev = netdev_priv(dev); vlan_group_set_device(vdev->vlgrp, vid, NULL); @@ -3110,21 +3252,31 @@ static const struct net_device_ops vxge_netdev_ops = { .ndo_start_xmit = vxge_xmit, .ndo_validate_addr = eth_validate_addr, .ndo_set_multicast_list = vxge_set_multicast, - .ndo_do_ioctl = vxge_ioctl, - .ndo_set_mac_address = vxge_set_mac_addr, .ndo_change_mtu = vxge_change_mtu, .ndo_vlan_rx_register = vxge_vlan_rx_register, .ndo_vlan_rx_kill_vid = vxge_vlan_rx_kill_vid, .ndo_vlan_rx_add_vid = vxge_vlan_rx_add_vid, - .ndo_tx_timeout = vxge_tx_watchdog, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = vxge_netpoll, #endif }; +static int __devinit vxge_device_revision(struct vxgedev *vdev) +{ + int ret; + u8 revision; + + ret = pci_read_config_byte(vdev->pdev, PCI_REVISION_ID, &revision); + if (ret) + return -EIO; + + vdev->titan1 = (revision == VXGE_HW_TITAN1_PCI_REVISION); + return 0; +} + static int __devinit vxge_device_register(struct __vxge_hw_device *hldev, struct vxge_config *config, int high_dma, int no_of_vpath, @@ -3163,6 +3315,11 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev, vdev->pdev = hldev->pdev; memcpy(&vdev->config, config, sizeof(struct vxge_config)); vdev->rx_csum = 1; /* Enable Rx CSUM by default. */ + vdev->rx_hwts = 0; + + ret = vxge_device_revision(vdev); + if (ret < 0) + goto _out1; SET_NETDEV_DEV(ndev, &vdev->pdev->dev); @@ -3175,9 +3332,15 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev, ndev->netdev_ops = &vxge_netdev_ops; ndev->watchdog_timeo = VXGE_LL_WATCH_DOG_TIMEOUT; + INIT_WORK(&vdev->reset_task, vxge_reset); vxge_initialize_ethtool_ops(ndev); + if (vdev->config.rth_steering != NO_STEERING) { + ndev->features |= NETIF_F_RXHASH; + hldev->config.rth_en = VXGE_HW_RTH_ENABLE; + } + /* Allocate memory for vpath */ vdev->vpaths = kzalloc((sizeof(struct vxge_vpath)) * no_of_vpath, GFP_KERNEL); @@ -3191,7 +3354,7 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev, ndev->features |= NETIF_F_SG; - ndev->features |= NETIF_F_HW_CSUM; + ndev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; vxge_debug_init(vxge_hw_device_trace_level_get(hldev), "%s : checksuming enabled", __func__); @@ -3227,6 +3390,7 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev, "%s: Ethernet device registered", ndev->name); + hldev->ndev = ndev; *vdev_out = vdev; /* Resetting the Device stats */ @@ -3261,36 +3425,29 @@ _out0: * * This function will unregister and free network device */ -static void -vxge_device_unregister(struct __vxge_hw_device *hldev) +static void vxge_device_unregister(struct __vxge_hw_device *hldev) { struct vxgedev *vdev; struct net_device *dev; char buf[IFNAMSIZ]; -#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \ - (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK)) - u32 level_trace; -#endif dev = hldev->ndev; vdev = netdev_priv(dev); -#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \ - (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK)) - level_trace = vdev->level_trace; -#endif - vxge_debug_entryexit(level_trace, - "%s: %s:%d", vdev->ndev->name, __func__, __LINE__); - memcpy(buf, vdev->ndev->name, IFNAMSIZ); + vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d", vdev->ndev->name, + __func__, __LINE__); + + strncpy(buf, dev->name, IFNAMSIZ); + + flush_work_sync(&vdev->reset_task); /* in 2.6 will call stop() if device is up */ unregister_netdev(dev); - flush_scheduled_work(); - - vxge_debug_init(level_trace, "%s: ethernet device unregistered", buf); - vxge_debug_entryexit(level_trace, - "%s: %s:%d Exiting...", buf, __func__, __LINE__); + vxge_debug_init(vdev->level_trace, "%s: ethernet device unregistered", + buf); + vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d Exiting...", buf, + __func__, __LINE__); } /* @@ -3304,7 +3461,7 @@ vxge_callback_crit_err(struct __vxge_hw_device *hldev, enum vxge_hw_event type, u64 vp_id) { struct net_device *dev = hldev->ndev; - struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); + struct vxgedev *vdev = netdev_priv(dev); struct vxge_vpath *vpath = NULL; int vpath_idx; @@ -3527,9 +3684,9 @@ static int __devinit vxge_config_vpaths( device_config->vp_config[i].tti.timer_ac_en = VXGE_HW_TIM_TIMER_AC_ENABLE; - /* For msi-x with napi (each vector - has a handler of its own) - - Set CI to OFF for all vpaths */ + /* For msi-x with napi (each vector has a handler of its own) - + * Set CI to OFF for all vpaths + */ device_config->vp_config[i].tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_DISABLE; @@ -3559,10 +3716,13 @@ static int __devinit vxge_config_vpaths( device_config->vp_config[i].ring.ring_blocks = VXGE_HW_DEF_RING_BLOCKS; + device_config->vp_config[i].ring.buffer_mode = VXGE_HW_RING_RXD_BUFFER_MODE_1; + device_config->vp_config[i].ring.rxds_limit = VXGE_HW_DEF_RING_RXDS_LIMIT; + device_config->vp_config[i].ring.scatter_mode = VXGE_HW_RING_SCATTER_MODE_A; @@ -3642,6 +3802,7 @@ static void __devinit vxge_device_config_init( device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX; break; } + /* Timer period between device poll */ device_config->device_poll_millis = VXGE_TIMER_DELAY; @@ -3653,16 +3814,10 @@ static void __devinit vxge_device_config_init( vxge_debug_ll_config(VXGE_TRACE, "%s : Device Config Params ", __func__); - vxge_debug_ll_config(VXGE_TRACE, "dma_blockpool_initial : %d", - device_config->dma_blockpool_initial); - vxge_debug_ll_config(VXGE_TRACE, "dma_blockpool_max : %d", - device_config->dma_blockpool_max); vxge_debug_ll_config(VXGE_TRACE, "intr_mode : %d", device_config->intr_mode); vxge_debug_ll_config(VXGE_TRACE, "device_poll_millis : %d", device_config->device_poll_millis); - vxge_debug_ll_config(VXGE_TRACE, "rts_mac_en : %d", - device_config->rts_mac_en); vxge_debug_ll_config(VXGE_TRACE, "rth_en : %d", device_config->rth_en); vxge_debug_ll_config(VXGE_TRACE, "rth_it_type : %d", @@ -3751,9 +3906,6 @@ static void __devinit vxge_print_parm(struct vxgedev *vdev, u64 vpath_mask) vxge_debug_init(VXGE_TRACE, "%s: MAC Address learning enabled", vdev->ndev->name); - vxge_debug_init(VXGE_TRACE, - "%s: Rx doorbell mode enabled", vdev->ndev->name); - for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { if (!vxge_bVALn(vpath_mask, i, 1)) continue; @@ -3766,14 +3918,6 @@ static void __devinit vxge_print_parm(struct vxgedev *vdev, u64 vpath_mask) ((struct __vxge_hw_device *)(vdev->devh))-> config.vp_config[i].rpa_strip_vlan_tag ? "Enabled" : "Disabled"); - vxge_debug_init(VXGE_TRACE, - "%s: Ring blocks : %d", vdev->ndev->name, - ((struct __vxge_hw_device *)(vdev->devh))-> - config.vp_config[i].ring.ring_blocks); - vxge_debug_init(VXGE_TRACE, - "%s: Fifo blocks : %d", vdev->ndev->name, - ((struct __vxge_hw_device *)(vdev->devh))-> - config.vp_config[i].fifo.fifo_blocks); vxge_debug_ll_config(VXGE_TRACE, "%s: Max frags : %d", vdev->ndev->name, ((struct __vxge_hw_device *)(vdev->devh))-> @@ -3813,8 +3957,7 @@ static int vxge_pm_resume(struct pci_dev *pdev) static pci_ers_result_t vxge_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) { - struct __vxge_hw_device *hldev = - (struct __vxge_hw_device *) pci_get_drvdata(pdev); + struct __vxge_hw_device *hldev = pci_get_drvdata(pdev); struct net_device *netdev = hldev->ndev; netif_device_detach(netdev); @@ -3843,8 +3986,7 @@ static pci_ers_result_t vxge_io_error_detected(struct pci_dev *pdev, */ static pci_ers_result_t vxge_io_slot_reset(struct pci_dev *pdev) { - struct __vxge_hw_device *hldev = - (struct __vxge_hw_device *) pci_get_drvdata(pdev); + struct __vxge_hw_device *hldev = pci_get_drvdata(pdev); struct net_device *netdev = hldev->ndev; struct vxgedev *vdev = netdev_priv(netdev); @@ -3855,7 +3997,7 @@ static pci_ers_result_t vxge_io_slot_reset(struct pci_dev *pdev) } pci_set_master(pdev); - vxge_reset(vdev); + do_vxge_reset(vdev, VXGE_LL_FULL_RESET); return PCI_ERS_RESULT_RECOVERED; } @@ -3869,8 +4011,7 @@ static pci_ers_result_t vxge_io_slot_reset(struct pci_dev *pdev) */ static void vxge_io_resume(struct pci_dev *pdev) { - struct __vxge_hw_device *hldev = - (struct __vxge_hw_device *) pci_get_drvdata(pdev); + struct __vxge_hw_device *hldev = pci_get_drvdata(pdev); struct net_device *netdev = hldev->ndev; if (netif_running(netdev)) { @@ -3914,6 +4055,156 @@ static inline u32 vxge_get_num_vfs(u64 function_mode) return num_functions; } +int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override) +{ + struct __vxge_hw_device *hldev = vdev->devh; + u32 maj, min, bld, cmaj, cmin, cbld; + enum vxge_hw_status status; + const struct firmware *fw; + int ret; + + ret = request_firmware(&fw, fw_name, &vdev->pdev->dev); + if (ret) { + vxge_debug_init(VXGE_ERR, "%s: Firmware file '%s' not found", + VXGE_DRIVER_NAME, fw_name); + goto out; + } + + /* Load the new firmware onto the adapter */ + status = vxge_update_fw_image(hldev, fw->data, fw->size); + if (status != VXGE_HW_OK) { + vxge_debug_init(VXGE_ERR, + "%s: FW image download to adapter failed '%s'.", + VXGE_DRIVER_NAME, fw_name); + ret = -EIO; + goto out; + } + + /* Read the version of the new firmware */ + status = vxge_hw_upgrade_read_version(hldev, &maj, &min, &bld); + if (status != VXGE_HW_OK) { + vxge_debug_init(VXGE_ERR, + "%s: Upgrade read version failed '%s'.", + VXGE_DRIVER_NAME, fw_name); + ret = -EIO; + goto out; + } + + cmaj = vdev->config.device_hw_info.fw_version.major; + cmin = vdev->config.device_hw_info.fw_version.minor; + cbld = vdev->config.device_hw_info.fw_version.build; + /* It's possible the version in /lib/firmware is not the latest version. + * If so, we could get into a loop of trying to upgrade to the latest + * and flashing the older version. + */ + if (VXGE_FW_VER(maj, min, bld) == VXGE_FW_VER(cmaj, cmin, cbld) && + !override) { + ret = -EINVAL; + goto out; + } + + printk(KERN_NOTICE "Upgrade to firmware version %d.%d.%d commencing\n", + maj, min, bld); + + /* Flash the adapter with the new firmware */ + status = vxge_hw_flash_fw(hldev); + if (status != VXGE_HW_OK) { + vxge_debug_init(VXGE_ERR, "%s: Upgrade commit failed '%s'.", + VXGE_DRIVER_NAME, fw_name); + ret = -EIO; + goto out; + } + + printk(KERN_NOTICE "Upgrade of firmware successful! Adapter must be " + "hard reset before using, thus requiring a system reboot or a " + "hotplug event.\n"); + +out: + return ret; +} + +static int vxge_probe_fw_update(struct vxgedev *vdev) +{ + u32 maj, min, bld; + int ret, gpxe = 0; + char *fw_name; + + maj = vdev->config.device_hw_info.fw_version.major; + min = vdev->config.device_hw_info.fw_version.minor; + bld = vdev->config.device_hw_info.fw_version.build; + + if (VXGE_FW_VER(maj, min, bld) == VXGE_CERT_FW_VER) + return 0; + + /* Ignore the build number when determining if the current firmware is + * "too new" to load the driver + */ + if (VXGE_FW_VER(maj, min, 0) > VXGE_CERT_FW_VER) { + vxge_debug_init(VXGE_ERR, "%s: Firmware newer than last known " + "version, unable to load driver\n", + VXGE_DRIVER_NAME); + return -EINVAL; + } + + /* Firmware 1.4.4 and older cannot be upgraded, and is too ancient to + * work with this driver. + */ + if (VXGE_FW_VER(maj, min, bld) <= VXGE_FW_DEAD_VER) { + vxge_debug_init(VXGE_ERR, "%s: Firmware %d.%d.%d cannot be " + "upgraded\n", VXGE_DRIVER_NAME, maj, min, bld); + return -EINVAL; + } + + /* If file not specified, determine gPXE or not */ + if (VXGE_FW_VER(maj, min, bld) >= VXGE_EPROM_FW_VER) { + int i; + for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++) + if (vdev->devh->eprom_versions[i]) { + gpxe = 1; + break; + } + } + if (gpxe) + fw_name = "vxge/X3fw-pxe.ncf"; + else + fw_name = "vxge/X3fw.ncf"; + + ret = vxge_fw_upgrade(vdev, fw_name, 0); + /* -EINVAL and -ENOENT are not fatal errors for flashing firmware on + * probe, so ignore them + */ + if (ret != -EINVAL && ret != -ENOENT) + return -EIO; + else + ret = 0; + + if (VXGE_FW_VER(VXGE_CERT_FW_VER_MAJOR, VXGE_CERT_FW_VER_MINOR, 0) > + VXGE_FW_VER(maj, min, 0)) { + vxge_debug_init(VXGE_ERR, "%s: Firmware %d.%d.%d is too old to" + " be used with this driver.\n" + "Please get the latest version from " + "ftp://ftp.s2io.com/pub/X3100-Drivers/FIRMWARE", + VXGE_DRIVER_NAME, maj, min, bld); + return -EINVAL; + } + + return ret; +} + +static int __devinit is_sriov_initialized(struct pci_dev *pdev) +{ + int pos; + u16 ctrl; + + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); + if (pos) { + pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &ctrl); + if (ctrl & PCI_SRIOV_CTRL_VFE) + return 1; + } + return 0; +} + /** * vxge_probe * @pdev : structure containing the PCI related information of the device. @@ -3928,7 +4219,7 @@ static inline u32 vxge_get_num_vfs(u64 function_mode) static int __devinit vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) { - struct __vxge_hw_device *hldev; + struct __vxge_hw_device *hldev; enum vxge_hw_status status; int ret; int high_dma = 0; @@ -3951,9 +4242,10 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) attr.pdev = pdev; /* In SRIOV-17 mode, functions of the same adapter - * can be deployed on different buses */ - if ((!pdev->is_virtfn) && ((bus != pdev->bus->number) || - (device != PCI_SLOT(pdev->devfn)))) + * can be deployed on different buses + */ + if (((bus != pdev->bus->number) || (device != PCI_SLOT(pdev->devfn))) && + !pdev->is_virtfn) new_device = 1; bus = pdev->bus->number; @@ -3971,6 +4263,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) driver_config->config_dev_cnt = 0; driver_config->total_dev_cnt = 0; } + /* Now making the CPU based no of vpath calculation * applicable for individual functions as well. */ @@ -3993,11 +4286,11 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) goto _exit0; } - ll_config = kzalloc(sizeof(*ll_config), GFP_KERNEL); + ll_config = kzalloc(sizeof(struct vxge_config), GFP_KERNEL); if (!ll_config) { ret = -ENOMEM; vxge_debug_init(VXGE_ERR, - "ll_config : malloc failed %s %d", + "device_config : malloc failed %s %d", __FILE__, __LINE__); goto _exit0; } @@ -4041,7 +4334,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) goto _exit1; } - if (pci_request_regions(pdev, VXGE_DRIVER_NAME)) { + if (pci_request_region(pdev, 0, VXGE_DRIVER_NAME)) { vxge_debug_init(VXGE_ERR, "%s : request regions failed", __func__); ret = -ENODEV; @@ -4072,16 +4365,6 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) goto _exit3; } - if (ll_config->device_hw_info.fw_version.major != - VXGE_DRIVER_FW_VERSION_MAJOR) { - vxge_debug_init(VXGE_ERR, - "%s: Incorrect firmware version." - "Please upgrade the firmware to version 1.x.x", - VXGE_DRIVER_NAME); - ret = -EINVAL; - goto _exit3; - } - vpath_mask = ll_config->device_hw_info.vpath_mask; if (vpath_mask == 0) { vxge_debug_ll_config(VXGE_TRACE, @@ -4110,14 +4393,13 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) num_vfs = vxge_get_num_vfs(function_mode) - 1; /* Enable SRIOV mode, if firmware has SRIOV support and if it is a PF */ - if (is_sriov(function_mode) && (max_config_dev > 1) && - (ll_config->intr_type != INTA) && - (is_privileged == VXGE_HW_OK)) { - ret = pci_enable_sriov(pdev, ((max_config_dev - 1) < num_vfs) - ? (max_config_dev - 1) : num_vfs); + if (is_sriov(function_mode) && !is_sriov_initialized(pdev) && + (ll_config->intr_type != INTA)) { + ret = pci_enable_sriov(pdev, num_vfs); if (ret) vxge_debug_ll_config(VXGE_ERR, "Failed in enabling SRIOV mode: %d\n", ret); + /* No need to fail out, as an error here is non-fatal */ } /* @@ -4145,11 +4427,37 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) goto _exit3; } + if (VXGE_FW_VER(ll_config->device_hw_info.fw_version.major, + ll_config->device_hw_info.fw_version.minor, + ll_config->device_hw_info.fw_version.build) >= + VXGE_EPROM_FW_VER) { + struct eprom_image img[VXGE_HW_MAX_ROM_IMAGES]; + + status = vxge_hw_vpath_eprom_img_ver_get(hldev, img); + if (status != VXGE_HW_OK) { + vxge_debug_init(VXGE_ERR, "%s: Reading of EPROM failed", + VXGE_DRIVER_NAME); + /* This is a non-fatal error, continue */ + } + + for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++) { + hldev->eprom_versions[i] = img[i].version; + if (!img[i].is_valid) + break; + vxge_debug_init(VXGE_TRACE, "%s: EPROM %d, version " + "%d.%d.%d.%d\n", VXGE_DRIVER_NAME, i, + VXGE_EPROM_IMG_MAJOR(img[i].version), + VXGE_EPROM_IMG_MINOR(img[i].version), + VXGE_EPROM_IMG_FIX(img[i].version), + VXGE_EPROM_IMG_BUILD(img[i].version)); + } + } + /* if FCS stripping is not disabled in MAC fail driver load */ - if (vxge_hw_vpath_strip_fcs_check(hldev, vpath_mask) != VXGE_HW_OK) { - vxge_debug_init(VXGE_ERR, - "%s: FCS stripping is not disabled in MAC" - " failing driver load", VXGE_DRIVER_NAME); + status = vxge_hw_vpath_strip_fcs_check(hldev, vpath_mask); + if (status != VXGE_HW_OK) { + vxge_debug_init(VXGE_ERR, "%s: FCS stripping is enabled in MAC" + " failing driver load", VXGE_DRIVER_NAME); ret = -EINVAL; goto _exit4; } @@ -4163,28 +4471,32 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) ll_config->fifo_indicate_max_pkts = VXGE_FIFO_INDICATE_MAX_PKTS; ll_config->addr_learn_en = addr_learn_en; ll_config->rth_algorithm = RTH_ALG_JENKINS; - ll_config->rth_hash_type_tcpipv4 = VXGE_HW_RING_HASH_TYPE_TCP_IPV4; - ll_config->rth_hash_type_ipv4 = VXGE_HW_RING_HASH_TYPE_NONE; - ll_config->rth_hash_type_tcpipv6 = VXGE_HW_RING_HASH_TYPE_NONE; - ll_config->rth_hash_type_ipv6 = VXGE_HW_RING_HASH_TYPE_NONE; - ll_config->rth_hash_type_tcpipv6ex = VXGE_HW_RING_HASH_TYPE_NONE; - ll_config->rth_hash_type_ipv6ex = VXGE_HW_RING_HASH_TYPE_NONE; + ll_config->rth_hash_type_tcpipv4 = 1; + ll_config->rth_hash_type_ipv4 = 0; + ll_config->rth_hash_type_tcpipv6 = 0; + ll_config->rth_hash_type_ipv6 = 0; + ll_config->rth_hash_type_tcpipv6ex = 0; + ll_config->rth_hash_type_ipv6ex = 0; ll_config->rth_bkt_sz = RTH_BUCKET_SIZE; ll_config->tx_pause_enable = VXGE_PAUSE_CTRL_ENABLE; ll_config->rx_pause_enable = VXGE_PAUSE_CTRL_ENABLE; - if (vxge_device_register(hldev, ll_config, high_dma, no_of_vpath, - &vdev)) { + ret = vxge_device_register(hldev, ll_config, high_dma, no_of_vpath, + &vdev); + if (ret) { ret = -EINVAL; goto _exit4; } + ret = vxge_probe_fw_update(vdev); + if (ret) + goto _exit5; + vxge_hw_device_debug_set(hldev, VXGE_TRACE, VXGE_COMPONENT_LL); VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev), vxge_hw_device_trace_level_get(hldev)); /* set private HW device info */ - hldev->ndev = vdev->ndev; vdev->mtu = VXGE_HW_DEFAULT_MTU; vdev->bar0 = attr.bar0; vdev->max_vpath_supported = max_vpath_supported; @@ -4278,15 +4590,13 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) /* Copy the station mac address to the list */ for (i = 0; i < vdev->no_of_vpath; i++) { - entry = (struct vxge_mac_addrs *) - kzalloc(sizeof(struct vxge_mac_addrs), - GFP_KERNEL); + entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_KERNEL); if (NULL == entry) { vxge_debug_init(VXGE_ERR, "%s: mac_addr_list : memory allocation failed", vdev->ndev->name); ret = -EPERM; - goto _exit5; + goto _exit6; } macaddr = (u8 *)&entry->macaddr; memcpy(macaddr, vdev->ndev->dev_addr, ETH_ALEN); @@ -4326,10 +4636,10 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) kfree(ll_config); return 0; -_exit5: +_exit6: for (i = 0; i < vdev->no_of_vpath; i++) vxge_free_mac_add_list(&vdev->vpaths[i]); - +_exit5: vxge_device_unregister(hldev); _exit4: pci_disable_sriov(pdev); @@ -4337,7 +4647,7 @@ _exit4: _exit3: iounmap(attr.bar0); _exit2: - pci_release_regions(pdev); + pci_release_region(pdev, 0); _exit1: pci_disable_device(pdev); _exit0: @@ -4354,34 +4664,25 @@ _exit0: * Description: This function is called by the Pci subsystem to release a * PCI device and free up all resource held up by the device. */ -static void __devexit -vxge_remove(struct pci_dev *pdev) +static void __devexit vxge_remove(struct pci_dev *pdev) { - struct __vxge_hw_device *hldev; + struct __vxge_hw_device *hldev; struct vxgedev *vdev = NULL; struct net_device *dev; int i = 0; -#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \ - (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK)) - u32 level_trace; -#endif - hldev = (struct __vxge_hw_device *) pci_get_drvdata(pdev); + hldev = pci_get_drvdata(pdev); if (hldev == NULL) return; + dev = hldev->ndev; vdev = netdev_priv(dev); -#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \ - (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK)) - level_trace = vdev->level_trace; -#endif - vxge_debug_entryexit(level_trace, - "%s:%d", __func__, __LINE__); + vxge_debug_entryexit(vdev->level_trace, "%s:%d", __func__, __LINE__); - vxge_debug_init(level_trace, - "%s : removing PCI device...", __func__); + vxge_debug_init(vdev->level_trace, "%s : removing PCI device...", + __func__); vxge_device_unregister(hldev); for (i = 0; i < vdev->no_of_vpath; i++) { @@ -4394,21 +4695,19 @@ vxge_remove(struct pci_dev *pdev) iounmap(vdev->bar0); - pci_disable_sriov(pdev); - /* we are safe to free it now */ free_netdev(dev); - vxge_debug_init(level_trace, - "%s:%d Device unregistered", __func__, __LINE__); + vxge_debug_init(vdev->level_trace, "%s:%d Device unregistered", + __func__, __LINE__); vxge_hw_device_terminate(hldev); pci_disable_device(pdev); - pci_release_regions(pdev); + pci_release_region(pdev, 0); pci_set_drvdata(pdev, NULL); - vxge_debug_entryexit(level_trace, - "%s:%d Exiting...", __func__, __LINE__); + vxge_debug_entryexit(vdev->level_trace, "%s:%d Exiting...", __func__, + __LINE__); } static struct pci_error_handlers vxge_err_handler = { @@ -4444,6 +4743,10 @@ vxge_starter(void) return -ENOMEM; ret = pci_register_driver(&vxge_driver); + if (ret) { + kfree(driver_config); + goto err; + } if (driver_config->config_dev_cnt && (driver_config->config_dev_cnt != driver_config->total_dev_cnt)) @@ -4451,10 +4754,7 @@ vxge_starter(void) "%s: Configured %d of %d devices", VXGE_DRIVER_NAME, driver_config->config_dev_cnt, driver_config->total_dev_cnt); - - if (ret) - kfree(driver_config); - +err: return ret; } diff --git a/drivers/net/vxge/vxge-main.h b/drivers/net/vxge/vxge-main.h index de64536cb7d0..5746fedc356f 100644 --- a/drivers/net/vxge/vxge-main.h +++ b/drivers/net/vxge/vxge-main.h @@ -29,6 +29,9 @@ #define PCI_DEVICE_ID_TITAN_WIN 0x5733 #define PCI_DEVICE_ID_TITAN_UNI 0x5833 +#define VXGE_HW_TITAN1_PCI_REVISION 1 +#define VXGE_HW_TITAN1A_PCI_REVISION 2 + #define VXGE_USE_DEFAULT 0xffffffff #define VXGE_HW_VPATH_MSIX_ACTIVE 4 #define VXGE_ALARM_MSIX_ID 2 @@ -53,11 +56,13 @@ #define VXGE_TTI_BTIMER_VAL 250000 -#define VXGE_TTI_LTIMER_VAL 1000 -#define VXGE_TTI_RTIMER_VAL 0 -#define VXGE_RTI_BTIMER_VAL 250 -#define VXGE_RTI_LTIMER_VAL 100 -#define VXGE_RTI_RTIMER_VAL 0 +#define VXGE_TTI_LTIMER_VAL 1000 +#define VXGE_T1A_TTI_LTIMER_VAL 80 +#define VXGE_TTI_RTIMER_VAL 0 +#define VXGE_T1A_TTI_RTIMER_VAL 400 +#define VXGE_RTI_BTIMER_VAL 250 +#define VXGE_RTI_LTIMER_VAL 100 +#define VXGE_RTI_RTIMER_VAL 0 #define VXGE_FIFO_INDICATE_MAX_PKTS VXGE_DEF_FIFO_LENGTH #define VXGE_ISR_POLLING_CNT 8 #define VXGE_MAX_CONFIG_DEV 0xFF @@ -76,14 +81,32 @@ #define TTI_TX_UFC_B 40 #define TTI_TX_UFC_C 60 #define TTI_TX_UFC_D 100 +#define TTI_T1A_TX_UFC_A 30 +#define TTI_T1A_TX_UFC_B 80 +/* Slope - (max_mtu - min_mtu)/(max_mtu_ufc - min_mtu_ufc) */ +/* Slope - 93 */ +/* 60 - 9k Mtu, 140 - 1.5k mtu */ +#define TTI_T1A_TX_UFC_C(mtu) (60 + ((VXGE_HW_MAX_MTU - mtu) / 93)) + +/* Slope - 37 */ +/* 100 - 9k Mtu, 300 - 1.5k mtu */ +#define TTI_T1A_TX_UFC_D(mtu) (100 + ((VXGE_HW_MAX_MTU - mtu) / 37)) + + +#define RTI_RX_URANGE_A 5 +#define RTI_RX_URANGE_B 15 +#define RTI_RX_URANGE_C 40 +#define RTI_T1A_RX_URANGE_A 1 +#define RTI_T1A_RX_URANGE_B 20 +#define RTI_T1A_RX_URANGE_C 50 +#define RTI_RX_UFC_A 1 +#define RTI_RX_UFC_B 5 +#define RTI_RX_UFC_C 10 +#define RTI_RX_UFC_D 15 +#define RTI_T1A_RX_UFC_B 20 +#define RTI_T1A_RX_UFC_C 50 +#define RTI_T1A_RX_UFC_D 60 -#define RTI_RX_URANGE_A 5 -#define RTI_RX_URANGE_B 15 -#define RTI_RX_URANGE_C 40 -#define RTI_RX_UFC_A 1 -#define RTI_RX_UFC_B 5 -#define RTI_RX_UFC_C 10 -#define RTI_RX_UFC_D 15 /* Milli secs timer period */ #define VXGE_TIMER_DELAY 10000 @@ -145,15 +168,15 @@ struct vxge_config { int addr_learn_en; - int rth_steering; - int rth_algorithm; - int rth_hash_type_tcpipv4; - int rth_hash_type_ipv4; - int rth_hash_type_tcpipv6; - int rth_hash_type_ipv6; - int rth_hash_type_tcpipv6ex; - int rth_hash_type_ipv6ex; - int rth_bkt_sz; + u32 rth_steering:2, + rth_algorithm:2, + rth_hash_type_tcpipv4:1, + rth_hash_type_ipv4:1, + rth_hash_type_tcpipv6:1, + rth_hash_type_ipv6:1, + rth_hash_type_tcpipv6ex:1, + rth_hash_type_ipv6ex:1, + rth_bkt_sz:8; int rth_jhash_golden_ratio; int tx_steering_type; int fifo_indicate_max_pkts; @@ -248,8 +271,9 @@ struct vxge_ring { */ int driver_id; - /* copy of the flag indicating whether rx_csum is to be used */ - u32 rx_csum; + /* copy of the flag indicating whether rx_csum is to be used */ + u32 rx_csum:1, + rx_hwts:1; int pkts_processed; int budget; @@ -281,8 +305,8 @@ struct vxge_vpath { int is_configured; int is_open; struct vxgedev *vdev; - u8 (macaddr)[ETH_ALEN]; - u8 (macmask)[ETH_ALEN]; + u8 macaddr[ETH_ALEN]; + u8 macmask[ETH_ALEN]; #define VXGE_MAX_LEARN_MAC_ADDR_CNT 2048 /* mac addresses currently programmed into NIC */ @@ -327,7 +351,9 @@ struct vxgedev { u16 all_multi_flg; /* A flag indicating whether rx_csum is to be used or not. */ - u32 rx_csum; + u32 rx_csum:1, + rx_hwts:1, + titan1:1; struct vxge_msix_entry *vxge_entries; struct msix_entry *entries; @@ -369,6 +395,7 @@ struct vxgedev { u32 level_err; u32 level_trace; char fw_version[VXGE_HW_FW_STRLEN]; + struct work_struct reset_task; }; struct vxge_rx_priv { @@ -387,8 +414,6 @@ struct vxge_tx_priv { static int p = val; \ module_param(p, int, 0) -#define vxge_os_bug(fmt...) { printk(fmt); BUG(); } - #define vxge_os_timer(timer, handle, arg, exp) do { \ init_timer(&timer); \ timer.function = handle; \ @@ -396,7 +421,10 @@ struct vxge_tx_priv { mod_timer(&timer, (jiffies + exp)); \ } while (0); -extern void vxge_initialize_ethtool_ops(struct net_device *ndev); +void vxge_initialize_ethtool_ops(struct net_device *ndev); +enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev); +int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override); + /** * #define VXGE_DEBUG_INIT: debug for initialization functions * #define VXGE_DEBUG_TX : debug transmit related functions diff --git a/drivers/net/vxge/vxge-reg.h b/drivers/net/vxge/vxge-reg.h index 3dd5c9615ef9..3e658b175947 100644 --- a/drivers/net/vxge/vxge-reg.h +++ b/drivers/net/vxge/vxge-reg.h @@ -49,6 +49,33 @@ #define VXGE_HW_TITAN_VPMGMT_REG_SPACES 17 #define VXGE_HW_TITAN_VPATH_REG_SPACES 17 +#define VXGE_HW_FW_API_GET_EPROM_REV 31 + +#define VXGE_EPROM_IMG_MAJOR(val) (u32) vxge_bVALn(val, 48, 4) +#define VXGE_EPROM_IMG_MINOR(val) (u32) vxge_bVALn(val, 52, 4) +#define VXGE_EPROM_IMG_FIX(val) (u32) vxge_bVALn(val, 56, 4) +#define VXGE_EPROM_IMG_BUILD(val) (u32) vxge_bVALn(val, 60, 4) + +#define VXGE_HW_GET_EPROM_IMAGE_INDEX(val) vxge_bVALn(val, 16, 8) +#define VXGE_HW_GET_EPROM_IMAGE_VALID(val) vxge_bVALn(val, 31, 1) +#define VXGE_HW_GET_EPROM_IMAGE_TYPE(val) vxge_bVALn(val, 40, 8) +#define VXGE_HW_GET_EPROM_IMAGE_REV(val) vxge_bVALn(val, 48, 16) +#define VXGE_HW_RTS_ACCESS_STEER_ROM_IMAGE_INDEX(val) vxge_vBIT(val, 16, 8) + +#define VXGE_HW_FW_API_GET_FUNC_MODE 29 +#define VXGE_HW_GET_FUNC_MODE_VAL(val) (val & 0xFF) + +#define VXGE_HW_FW_UPGRADE_MEMO 13 +#define VXGE_HW_FW_UPGRADE_ACTION 16 +#define VXGE_HW_FW_UPGRADE_OFFSET_START 2 +#define VXGE_HW_FW_UPGRADE_OFFSET_SEND 3 +#define VXGE_HW_FW_UPGRADE_OFFSET_COMMIT 4 +#define VXGE_HW_FW_UPGRADE_OFFSET_READ 5 + +#define VXGE_HW_FW_UPGRADE_BLK_SIZE 16 +#define VXGE_HW_UPGRADE_GET_RET_ERR_CODE(val) (val & 0xff) +#define VXGE_HW_UPGRADE_GET_SEC_ERR_CODE(val) ((val >> 8) & 0xff) + #define VXGE_HW_ASIC_MODE_RESERVED 0 #define VXGE_HW_ASIC_MODE_NO_IOV 1 #define VXGE_HW_ASIC_MODE_SR_IOV 2 @@ -165,13 +192,13 @@ #define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_ETYPE 2 #define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_PN 3 #define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG 5 -#define VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT 6 +#define VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT 6 #define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_JHASH_CFG 7 #define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK 8 #define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY 9 #define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_QOS 10 #define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DS 11 -#define VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT 12 +#define VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT 12 #define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO 13 #define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(bits) \ @@ -437,6 +464,7 @@ #define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(bits) \ vxge_bVALn(bits, 48, 16) #define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_BUILD vxge_vBIT(val, 48, 16) +#define VXGE_HW_RTS_ACCESS_STEER_CTRL_GET_ACTION(bits) vxge_bVALn(bits, 0, 8) #define VXGE_HW_SRPCIM_TO_VPATH_ALARM_REG_GET_PPIF_SRPCIM_TO_VPATH_ALARM(bits)\ vxge_bVALn(bits, 0, 18) @@ -3998,6 +4026,7 @@ struct vxge_hw_vpath_reg { #define VXGE_HW_PRC_CFG6_L4_CPC_TRSFR_CODE_EN vxge_mBIT(9) #define VXGE_HW_PRC_CFG6_RXD_CRXDT(val) vxge_vBIT(val, 23, 9) #define VXGE_HW_PRC_CFG6_RXD_SPAT(val) vxge_vBIT(val, 36, 9) +#define VXGE_HW_PRC_CFG6_GET_RXD_SPAT(val) vxge_bVALn(val, 36, 9) /*0x00a78*/ u64 prc_cfg7; #define VXGE_HW_PRC_CFG7_SCATTER_MODE(val) vxge_vBIT(val, 6, 2) #define VXGE_HW_PRC_CFG7_SMART_SCAT_EN vxge_mBIT(11) diff --git a/drivers/net/vxge/vxge-traffic.c b/drivers/net/vxge/vxge-traffic.c index 4bdb611a6842..4c10d6c4075f 100644 --- a/drivers/net/vxge/vxge-traffic.c +++ b/drivers/net/vxge/vxge-traffic.c @@ -17,13 +17,6 @@ #include "vxge-config.h" #include "vxge-main.h" -static enum vxge_hw_status -__vxge_hw_device_handle_error(struct __vxge_hw_device *hldev, - u32 vp_id, enum vxge_hw_event type); -static enum vxge_hw_status -__vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath, - u32 skip_alarms); - /* * vxge_hw_vpath_intr_enable - Enable vpath interrupts. * @vp: Virtual Path handle. @@ -419,6 +412,384 @@ void vxge_hw_device_flush_io(struct __vxge_hw_device *hldev) } /** + * __vxge_hw_device_handle_error - Handle error + * @hldev: HW device + * @vp_id: Vpath Id + * @type: Error type. Please see enum vxge_hw_event{} + * + * Handle error. + */ +static enum vxge_hw_status +__vxge_hw_device_handle_error(struct __vxge_hw_device *hldev, u32 vp_id, + enum vxge_hw_event type) +{ + switch (type) { + case VXGE_HW_EVENT_UNKNOWN: + break; + case VXGE_HW_EVENT_RESET_START: + case VXGE_HW_EVENT_RESET_COMPLETE: + case VXGE_HW_EVENT_LINK_DOWN: + case VXGE_HW_EVENT_LINK_UP: + goto out; + case VXGE_HW_EVENT_ALARM_CLEARED: + goto out; + case VXGE_HW_EVENT_ECCERR: + case VXGE_HW_EVENT_MRPCIM_ECCERR: + goto out; + case VXGE_HW_EVENT_FIFO_ERR: + case VXGE_HW_EVENT_VPATH_ERR: + case VXGE_HW_EVENT_CRITICAL_ERR: + case VXGE_HW_EVENT_SERR: + break; + case VXGE_HW_EVENT_SRPCIM_SERR: + case VXGE_HW_EVENT_MRPCIM_SERR: + goto out; + case VXGE_HW_EVENT_SLOT_FREEZE: + break; + default: + vxge_assert(0); + goto out; + } + + /* notify driver */ + if (hldev->uld_callbacks.crit_err) + hldev->uld_callbacks.crit_err( + (struct __vxge_hw_device *)hldev, + type, vp_id); +out: + + return VXGE_HW_OK; +} + +/* + * __vxge_hw_device_handle_link_down_ind + * @hldev: HW device handle. + * + * Link down indication handler. The function is invoked by HW when + * Titan indicates that the link is down. + */ +static enum vxge_hw_status +__vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev) +{ + /* + * If the previous link state is not down, return. + */ + if (hldev->link_state == VXGE_HW_LINK_DOWN) + goto exit; + + hldev->link_state = VXGE_HW_LINK_DOWN; + + /* notify driver */ + if (hldev->uld_callbacks.link_down) + hldev->uld_callbacks.link_down(hldev); +exit: + return VXGE_HW_OK; +} + +/* + * __vxge_hw_device_handle_link_up_ind + * @hldev: HW device handle. + * + * Link up indication handler. The function is invoked by HW when + * Titan indicates that the link is up for programmable amount of time. + */ +static enum vxge_hw_status +__vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev) +{ + /* + * If the previous link state is not down, return. + */ + if (hldev->link_state == VXGE_HW_LINK_UP) + goto exit; + + hldev->link_state = VXGE_HW_LINK_UP; + + /* notify driver */ + if (hldev->uld_callbacks.link_up) + hldev->uld_callbacks.link_up(hldev); +exit: + return VXGE_HW_OK; +} + +/* + * __vxge_hw_vpath_alarm_process - Process Alarms. + * @vpath: Virtual Path. + * @skip_alarms: Do not clear the alarms + * + * Process vpath alarms. + * + */ +static enum vxge_hw_status +__vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath, + u32 skip_alarms) +{ + u64 val64; + u64 alarm_status; + u64 pic_status; + struct __vxge_hw_device *hldev = NULL; + enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN; + u64 mask64; + struct vxge_hw_vpath_stats_sw_info *sw_stats; + struct vxge_hw_vpath_reg __iomem *vp_reg; + + if (vpath == NULL) { + alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN, + alarm_event); + goto out2; + } + + hldev = vpath->hldev; + vp_reg = vpath->vp_reg; + alarm_status = readq(&vp_reg->vpath_general_int_status); + + if (alarm_status == VXGE_HW_ALL_FOXES) { + alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE, + alarm_event); + goto out; + } + + sw_stats = vpath->sw_stats; + + if (alarm_status & ~( + VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT | + VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT | + VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT | + VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) { + sw_stats->error_stats.unknown_alarms++; + + alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN, + alarm_event); + goto out; + } + + if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) { + + val64 = readq(&vp_reg->xgmac_vp_int_status); + + if (val64 & + VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) { + + val64 = readq(&vp_reg->asic_ntwk_vp_err_reg); + + if (((val64 & + VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) && + (!(val64 & + VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) || + ((val64 & + VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) && + (!(val64 & + VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) + ))) { + sw_stats->error_stats.network_sustained_fault++; + + writeq( + VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT, + &vp_reg->asic_ntwk_vp_err_mask); + + __vxge_hw_device_handle_link_down_ind(hldev); + alarm_event = VXGE_HW_SET_LEVEL( + VXGE_HW_EVENT_LINK_DOWN, alarm_event); + } + + if (((val64 & + VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) && + (!(val64 & + VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) || + ((val64 & + VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) && + (!(val64 & + VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) + ))) { + + sw_stats->error_stats.network_sustained_ok++; + + writeq( + VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK, + &vp_reg->asic_ntwk_vp_err_mask); + + __vxge_hw_device_handle_link_up_ind(hldev); + alarm_event = VXGE_HW_SET_LEVEL( + VXGE_HW_EVENT_LINK_UP, alarm_event); + } + + writeq(VXGE_HW_INTR_MASK_ALL, + &vp_reg->asic_ntwk_vp_err_reg); + + alarm_event = VXGE_HW_SET_LEVEL( + VXGE_HW_EVENT_ALARM_CLEARED, alarm_event); + + if (skip_alarms) + return VXGE_HW_OK; + } + } + + if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) { + + pic_status = readq(&vp_reg->vpath_ppif_int_status); + + if (pic_status & + VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT) { + + val64 = readq(&vp_reg->general_errors_reg); + mask64 = readq(&vp_reg->general_errors_mask); + + if ((val64 & + VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET) & + ~mask64) { + sw_stats->error_stats.ini_serr_det++; + + alarm_event = VXGE_HW_SET_LEVEL( + VXGE_HW_EVENT_SERR, alarm_event); + } + + if ((val64 & + VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW) & + ~mask64) { + sw_stats->error_stats.dblgen_fifo0_overflow++; + + alarm_event = VXGE_HW_SET_LEVEL( + VXGE_HW_EVENT_FIFO_ERR, alarm_event); + } + + if ((val64 & + VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR) & + ~mask64) + sw_stats->error_stats.statsb_pif_chain_error++; + + if ((val64 & + VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ) & + ~mask64) + sw_stats->error_stats.statsb_drop_timeout++; + + if ((val64 & + VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS) & + ~mask64) + sw_stats->error_stats.target_illegal_access++; + + if (!skip_alarms) { + writeq(VXGE_HW_INTR_MASK_ALL, + &vp_reg->general_errors_reg); + alarm_event = VXGE_HW_SET_LEVEL( + VXGE_HW_EVENT_ALARM_CLEARED, + alarm_event); + } + } + + if (pic_status & + VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT) { + + val64 = readq(&vp_reg->kdfcctl_errors_reg); + mask64 = readq(&vp_reg->kdfcctl_errors_mask); + + if ((val64 & + VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR) & + ~mask64) { + sw_stats->error_stats.kdfcctl_fifo0_overwrite++; + + alarm_event = VXGE_HW_SET_LEVEL( + VXGE_HW_EVENT_FIFO_ERR, + alarm_event); + } + + if ((val64 & + VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON) & + ~mask64) { + sw_stats->error_stats.kdfcctl_fifo0_poison++; + + alarm_event = VXGE_HW_SET_LEVEL( + VXGE_HW_EVENT_FIFO_ERR, + alarm_event); + } + + if ((val64 & + VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR) & + ~mask64) { + sw_stats->error_stats.kdfcctl_fifo0_dma_error++; + + alarm_event = VXGE_HW_SET_LEVEL( + VXGE_HW_EVENT_FIFO_ERR, + alarm_event); + } + + if (!skip_alarms) { + writeq(VXGE_HW_INTR_MASK_ALL, + &vp_reg->kdfcctl_errors_reg); + alarm_event = VXGE_HW_SET_LEVEL( + VXGE_HW_EVENT_ALARM_CLEARED, + alarm_event); + } + } + + } + + if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) { + + val64 = readq(&vp_reg->wrdma_alarm_status); + + if (val64 & VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT) { + + val64 = readq(&vp_reg->prc_alarm_reg); + mask64 = readq(&vp_reg->prc_alarm_mask); + + if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP)& + ~mask64) + sw_stats->error_stats.prc_ring_bumps++; + + if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR) & + ~mask64) { + sw_stats->error_stats.prc_rxdcm_sc_err++; + + alarm_event = VXGE_HW_SET_LEVEL( + VXGE_HW_EVENT_VPATH_ERR, + alarm_event); + } + + if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT) + & ~mask64) { + sw_stats->error_stats.prc_rxdcm_sc_abort++; + + alarm_event = VXGE_HW_SET_LEVEL( + VXGE_HW_EVENT_VPATH_ERR, + alarm_event); + } + + if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR) + & ~mask64) { + sw_stats->error_stats.prc_quanta_size_err++; + + alarm_event = VXGE_HW_SET_LEVEL( + VXGE_HW_EVENT_VPATH_ERR, + alarm_event); + } + + if (!skip_alarms) { + writeq(VXGE_HW_INTR_MASK_ALL, + &vp_reg->prc_alarm_reg); + alarm_event = VXGE_HW_SET_LEVEL( + VXGE_HW_EVENT_ALARM_CLEARED, + alarm_event); + } + } + } +out: + hldev->stats.sw_dev_err_stats.vpath_alarms++; +out2: + if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) || + (alarm_event == VXGE_HW_EVENT_UNKNOWN)) + return VXGE_HW_OK; + + __vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event); + + if (alarm_event == VXGE_HW_EVENT_SERR) + return VXGE_HW_ERR_CRITICAL; + + return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ? + VXGE_HW_ERR_SLOT_FREEZE : + (alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO : + VXGE_HW_ERR_VPATH; +} + +/** * vxge_hw_device_begin_irq - Begin IRQ processing. * @hldev: HW device handle. * @skip_alarms: Do not clear the alarms @@ -513,108 +884,6 @@ exit: return ret; } -/* - * __vxge_hw_device_handle_link_up_ind - * @hldev: HW device handle. - * - * Link up indication handler. The function is invoked by HW when - * Titan indicates that the link is up for programmable amount of time. - */ -static enum vxge_hw_status -__vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev) -{ - /* - * If the previous link state is not down, return. - */ - if (hldev->link_state == VXGE_HW_LINK_UP) - goto exit; - - hldev->link_state = VXGE_HW_LINK_UP; - - /* notify driver */ - if (hldev->uld_callbacks.link_up) - hldev->uld_callbacks.link_up(hldev); -exit: - return VXGE_HW_OK; -} - -/* - * __vxge_hw_device_handle_link_down_ind - * @hldev: HW device handle. - * - * Link down indication handler. The function is invoked by HW when - * Titan indicates that the link is down. - */ -static enum vxge_hw_status -__vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev) -{ - /* - * If the previous link state is not down, return. - */ - if (hldev->link_state == VXGE_HW_LINK_DOWN) - goto exit; - - hldev->link_state = VXGE_HW_LINK_DOWN; - - /* notify driver */ - if (hldev->uld_callbacks.link_down) - hldev->uld_callbacks.link_down(hldev); -exit: - return VXGE_HW_OK; -} - -/** - * __vxge_hw_device_handle_error - Handle error - * @hldev: HW device - * @vp_id: Vpath Id - * @type: Error type. Please see enum vxge_hw_event{} - * - * Handle error. - */ -static enum vxge_hw_status -__vxge_hw_device_handle_error( - struct __vxge_hw_device *hldev, - u32 vp_id, - enum vxge_hw_event type) -{ - switch (type) { - case VXGE_HW_EVENT_UNKNOWN: - break; - case VXGE_HW_EVENT_RESET_START: - case VXGE_HW_EVENT_RESET_COMPLETE: - case VXGE_HW_EVENT_LINK_DOWN: - case VXGE_HW_EVENT_LINK_UP: - goto out; - case VXGE_HW_EVENT_ALARM_CLEARED: - goto out; - case VXGE_HW_EVENT_ECCERR: - case VXGE_HW_EVENT_MRPCIM_ECCERR: - goto out; - case VXGE_HW_EVENT_FIFO_ERR: - case VXGE_HW_EVENT_VPATH_ERR: - case VXGE_HW_EVENT_CRITICAL_ERR: - case VXGE_HW_EVENT_SERR: - break; - case VXGE_HW_EVENT_SRPCIM_SERR: - case VXGE_HW_EVENT_MRPCIM_SERR: - goto out; - case VXGE_HW_EVENT_SLOT_FREEZE: - break; - default: - vxge_assert(0); - goto out; - } - - /* notify driver */ - if (hldev->uld_callbacks.crit_err) - hldev->uld_callbacks.crit_err( - (struct __vxge_hw_device *)hldev, - type, vp_id); -out: - - return VXGE_HW_OK; -} - /** * vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the * condition that has caused the Tx and RX interrupt. @@ -699,8 +968,8 @@ _alloc_after_swap: * Posts a dtr to work array. * */ -static void vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, - void *dtrh) +static void +vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh) { vxge_assert(channel->work_arr[channel->post_index] == NULL); @@ -911,10 +1180,6 @@ void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring, void *rxdh) */ void vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring *ring, void *rxdh) { - struct __vxge_hw_channel *channel; - - channel = &ring->channel; - wmb(); vxge_hw_ring_rxd_post_post(ring, rxdh); } @@ -975,7 +1240,7 @@ enum vxge_hw_status vxge_hw_ring_rxd_next_completed( *t_code = (u8)VXGE_HW_RING_RXD_T_CODE_GET(control_0); /* check whether it is not the end */ - if (!own || ((*t_code == VXGE_HW_RING_T_CODE_FRM_DROP) && own)) { + if (!own || *t_code == VXGE_HW_RING_T_CODE_FRM_DROP) { vxge_assert(((struct vxge_hw_ring_rxd_1 *)rxdp)->host_control != 0); @@ -1868,284 +2133,6 @@ exit: } /* - * __vxge_hw_vpath_alarm_process - Process Alarms. - * @vpath: Virtual Path. - * @skip_alarms: Do not clear the alarms - * - * Process vpath alarms. - * - */ -static enum vxge_hw_status -__vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath, - u32 skip_alarms) -{ - u64 val64; - u64 alarm_status; - u64 pic_status; - struct __vxge_hw_device *hldev = NULL; - enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN; - u64 mask64; - struct vxge_hw_vpath_stats_sw_info *sw_stats; - struct vxge_hw_vpath_reg __iomem *vp_reg; - - if (vpath == NULL) { - alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN, - alarm_event); - goto out2; - } - - hldev = vpath->hldev; - vp_reg = vpath->vp_reg; - alarm_status = readq(&vp_reg->vpath_general_int_status); - - if (alarm_status == VXGE_HW_ALL_FOXES) { - alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE, - alarm_event); - goto out; - } - - sw_stats = vpath->sw_stats; - - if (alarm_status & ~( - VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT | - VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT | - VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT | - VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) { - sw_stats->error_stats.unknown_alarms++; - - alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN, - alarm_event); - goto out; - } - - if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) { - - val64 = readq(&vp_reg->xgmac_vp_int_status); - - if (val64 & - VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) { - - val64 = readq(&vp_reg->asic_ntwk_vp_err_reg); - - if (((val64 & - VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) && - (!(val64 & - VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) || - ((val64 & - VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) && - (!(val64 & - VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) - ))) { - sw_stats->error_stats.network_sustained_fault++; - - writeq( - VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT, - &vp_reg->asic_ntwk_vp_err_mask); - - __vxge_hw_device_handle_link_down_ind(hldev); - alarm_event = VXGE_HW_SET_LEVEL( - VXGE_HW_EVENT_LINK_DOWN, alarm_event); - } - - if (((val64 & - VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) && - (!(val64 & - VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) || - ((val64 & - VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) && - (!(val64 & - VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) - ))) { - - sw_stats->error_stats.network_sustained_ok++; - - writeq( - VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK, - &vp_reg->asic_ntwk_vp_err_mask); - - __vxge_hw_device_handle_link_up_ind(hldev); - alarm_event = VXGE_HW_SET_LEVEL( - VXGE_HW_EVENT_LINK_UP, alarm_event); - } - - writeq(VXGE_HW_INTR_MASK_ALL, - &vp_reg->asic_ntwk_vp_err_reg); - - alarm_event = VXGE_HW_SET_LEVEL( - VXGE_HW_EVENT_ALARM_CLEARED, alarm_event); - - if (skip_alarms) - return VXGE_HW_OK; - } - } - - if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) { - - pic_status = readq(&vp_reg->vpath_ppif_int_status); - - if (pic_status & - VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT) { - - val64 = readq(&vp_reg->general_errors_reg); - mask64 = readq(&vp_reg->general_errors_mask); - - if ((val64 & - VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET) & - ~mask64) { - sw_stats->error_stats.ini_serr_det++; - - alarm_event = VXGE_HW_SET_LEVEL( - VXGE_HW_EVENT_SERR, alarm_event); - } - - if ((val64 & - VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW) & - ~mask64) { - sw_stats->error_stats.dblgen_fifo0_overflow++; - - alarm_event = VXGE_HW_SET_LEVEL( - VXGE_HW_EVENT_FIFO_ERR, alarm_event); - } - - if ((val64 & - VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR) & - ~mask64) - sw_stats->error_stats.statsb_pif_chain_error++; - - if ((val64 & - VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ) & - ~mask64) - sw_stats->error_stats.statsb_drop_timeout++; - - if ((val64 & - VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS) & - ~mask64) - sw_stats->error_stats.target_illegal_access++; - - if (!skip_alarms) { - writeq(VXGE_HW_INTR_MASK_ALL, - &vp_reg->general_errors_reg); - alarm_event = VXGE_HW_SET_LEVEL( - VXGE_HW_EVENT_ALARM_CLEARED, - alarm_event); - } - } - - if (pic_status & - VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT) { - - val64 = readq(&vp_reg->kdfcctl_errors_reg); - mask64 = readq(&vp_reg->kdfcctl_errors_mask); - - if ((val64 & - VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR) & - ~mask64) { - sw_stats->error_stats.kdfcctl_fifo0_overwrite++; - - alarm_event = VXGE_HW_SET_LEVEL( - VXGE_HW_EVENT_FIFO_ERR, - alarm_event); - } - - if ((val64 & - VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON) & - ~mask64) { - sw_stats->error_stats.kdfcctl_fifo0_poison++; - - alarm_event = VXGE_HW_SET_LEVEL( - VXGE_HW_EVENT_FIFO_ERR, - alarm_event); - } - - if ((val64 & - VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR) & - ~mask64) { - sw_stats->error_stats.kdfcctl_fifo0_dma_error++; - - alarm_event = VXGE_HW_SET_LEVEL( - VXGE_HW_EVENT_FIFO_ERR, - alarm_event); - } - - if (!skip_alarms) { - writeq(VXGE_HW_INTR_MASK_ALL, - &vp_reg->kdfcctl_errors_reg); - alarm_event = VXGE_HW_SET_LEVEL( - VXGE_HW_EVENT_ALARM_CLEARED, - alarm_event); - } - } - - } - - if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) { - - val64 = readq(&vp_reg->wrdma_alarm_status); - - if (val64 & VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT) { - - val64 = readq(&vp_reg->prc_alarm_reg); - mask64 = readq(&vp_reg->prc_alarm_mask); - - if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP)& - ~mask64) - sw_stats->error_stats.prc_ring_bumps++; - - if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR) & - ~mask64) { - sw_stats->error_stats.prc_rxdcm_sc_err++; - - alarm_event = VXGE_HW_SET_LEVEL( - VXGE_HW_EVENT_VPATH_ERR, - alarm_event); - } - - if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT) - & ~mask64) { - sw_stats->error_stats.prc_rxdcm_sc_abort++; - - alarm_event = VXGE_HW_SET_LEVEL( - VXGE_HW_EVENT_VPATH_ERR, - alarm_event); - } - - if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR) - & ~mask64) { - sw_stats->error_stats.prc_quanta_size_err++; - - alarm_event = VXGE_HW_SET_LEVEL( - VXGE_HW_EVENT_VPATH_ERR, - alarm_event); - } - - if (!skip_alarms) { - writeq(VXGE_HW_INTR_MASK_ALL, - &vp_reg->prc_alarm_reg); - alarm_event = VXGE_HW_SET_LEVEL( - VXGE_HW_EVENT_ALARM_CLEARED, - alarm_event); - } - } - } -out: - hldev->stats.sw_dev_err_stats.vpath_alarms++; -out2: - if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) || - (alarm_event == VXGE_HW_EVENT_UNKNOWN)) - return VXGE_HW_OK; - - __vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event); - - if (alarm_event == VXGE_HW_EVENT_SERR) - return VXGE_HW_ERR_CRITICAL; - - return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ? - VXGE_HW_ERR_SLOT_FREEZE : - (alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO : - VXGE_HW_ERR_VPATH; -} - -/* * vxge_hw_vpath_alarm_process - Process Alarms. * @vpath: Virtual Path. * @skip_alarms: Do not clear the alarms diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h index 9890d4d596d0..8c3103fb6442 100644 --- a/drivers/net/vxge/vxge-traffic.h +++ b/drivers/net/vxge/vxge-traffic.h @@ -1904,34 +1904,6 @@ enum vxge_hw_ring_tcode { VXGE_HW_RING_T_CODE_MULTI_ERR = 0xF }; -/** - * enum enum vxge_hw_ring_hash_type - RTH hash types - * @VXGE_HW_RING_HASH_TYPE_NONE: No Hash - * @VXGE_HW_RING_HASH_TYPE_TCP_IPV4: TCP IPv4 - * @VXGE_HW_RING_HASH_TYPE_UDP_IPV4: UDP IPv4 - * @VXGE_HW_RING_HASH_TYPE_IPV4: IPv4 - * @VXGE_HW_RING_HASH_TYPE_TCP_IPV6: TCP IPv6 - * @VXGE_HW_RING_HASH_TYPE_UDP_IPV6: UDP IPv6 - * @VXGE_HW_RING_HASH_TYPE_IPV6: IPv6 - * @VXGE_HW_RING_HASH_TYPE_TCP_IPV6_EX: TCP IPv6 extension - * @VXGE_HW_RING_HASH_TYPE_UDP_IPV6_EX: UDP IPv6 extension - * @VXGE_HW_RING_HASH_TYPE_IPV6_EX: IPv6 extension - * - * RTH hash types - */ -enum vxge_hw_ring_hash_type { - VXGE_HW_RING_HASH_TYPE_NONE = 0x0, - VXGE_HW_RING_HASH_TYPE_TCP_IPV4 = 0x1, - VXGE_HW_RING_HASH_TYPE_UDP_IPV4 = 0x2, - VXGE_HW_RING_HASH_TYPE_IPV4 = 0x3, - VXGE_HW_RING_HASH_TYPE_TCP_IPV6 = 0x4, - VXGE_HW_RING_HASH_TYPE_UDP_IPV6 = 0x5, - VXGE_HW_RING_HASH_TYPE_IPV6 = 0x6, - VXGE_HW_RING_HASH_TYPE_TCP_IPV6_EX = 0x7, - VXGE_HW_RING_HASH_TYPE_UDP_IPV6_EX = 0x8, - VXGE_HW_RING_HASH_TYPE_IPV6_EX = 0x9 -}; - enum vxge_hw_status vxge_hw_ring_rxd_reserve( struct __vxge_hw_ring *ring_handle, void **rxdh); @@ -2109,10 +2081,6 @@ struct __vxge_hw_ring_rxd_priv { #endif }; -/* ========================= FIFO PRIVATE API ============================= */ - -struct vxge_hw_fifo_attr; - struct vxge_hw_mempool_cbs { void (*item_func_alloc)( struct vxge_hw_mempool *mempoolh, @@ -2186,27 +2154,27 @@ enum vxge_hw_vpath_mac_addr_add_mode { enum vxge_hw_status vxge_hw_vpath_mac_addr_add( struct __vxge_hw_vpath_handle *vpath_handle, - u8 (macaddr)[ETH_ALEN], - u8 (macaddr_mask)[ETH_ALEN], + u8 *macaddr, + u8 *macaddr_mask, enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode); enum vxge_hw_status vxge_hw_vpath_mac_addr_get( struct __vxge_hw_vpath_handle *vpath_handle, - u8 (macaddr)[ETH_ALEN], - u8 (macaddr_mask)[ETH_ALEN]); + u8 *macaddr, + u8 *macaddr_mask); enum vxge_hw_status vxge_hw_vpath_mac_addr_get_next( struct __vxge_hw_vpath_handle *vpath_handle, - u8 (macaddr)[ETH_ALEN], - u8 (macaddr_mask)[ETH_ALEN]); + u8 *macaddr, + u8 *macaddr_mask); enum vxge_hw_status vxge_hw_vpath_mac_addr_delete( struct __vxge_hw_vpath_handle *vpath_handle, - u8 (macaddr)[ETH_ALEN], - u8 (macaddr_mask)[ETH_ALEN]); + u8 *macaddr, + u8 *macaddr_mask); enum vxge_hw_status vxge_hw_vpath_vid_add( @@ -2313,6 +2281,7 @@ vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh); int vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel); + void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id); diff --git a/drivers/net/vxge/vxge-version.h b/drivers/net/vxge/vxge-version.h index 53fefe137368..ad2f99b9bcf3 100644 --- a/drivers/net/vxge/vxge-version.h +++ b/drivers/net/vxge/vxge-version.h @@ -15,8 +15,35 @@ #define VXGE_VERSION_H #define VXGE_VERSION_MAJOR "2" -#define VXGE_VERSION_MINOR "0" -#define VXGE_VERSION_FIX "9" -#define VXGE_VERSION_BUILD "20840" +#define VXGE_VERSION_MINOR "5" +#define VXGE_VERSION_FIX "1" +#define VXGE_VERSION_BUILD "22082" #define VXGE_VERSION_FOR "k" + +#define VXGE_FW_VER(maj, min, bld) (((maj) << 16) + ((min) << 8) + (bld)) + +#define VXGE_DEAD_FW_VER_MAJOR 1 +#define VXGE_DEAD_FW_VER_MINOR 4 +#define VXGE_DEAD_FW_VER_BUILD 4 + +#define VXGE_FW_DEAD_VER VXGE_FW_VER(VXGE_DEAD_FW_VER_MAJOR, \ + VXGE_DEAD_FW_VER_MINOR, \ + VXGE_DEAD_FW_VER_BUILD) + +#define VXGE_EPROM_FW_VER_MAJOR 1 +#define VXGE_EPROM_FW_VER_MINOR 6 +#define VXGE_EPROM_FW_VER_BUILD 1 + +#define VXGE_EPROM_FW_VER VXGE_FW_VER(VXGE_EPROM_FW_VER_MAJOR, \ + VXGE_EPROM_FW_VER_MINOR, \ + VXGE_EPROM_FW_VER_BUILD) + +#define VXGE_CERT_FW_VER_MAJOR 1 +#define VXGE_CERT_FW_VER_MINOR 8 +#define VXGE_CERT_FW_VER_BUILD 1 + +#define VXGE_CERT_FW_VER VXGE_FW_VER(VXGE_CERT_FW_VER_MAJOR, \ + VXGE_CERT_FW_VER_MINOR, \ + VXGE_CERT_FW_VER_BUILD) + #endif |