diff options
Diffstat (limited to 'drivers/net/ethernet/qlogic/qede')
-rw-r--r-- | drivers/net/ethernet/qlogic/qede/qede.h | 7 | ||||
-rw-r--r-- | drivers/net/ethernet/qlogic/qede/qede_ethtool.c | 6 | ||||
-rw-r--r-- | drivers/net/ethernet/qlogic/qede/qede_filter.c | 663 | ||||
-rw-r--r-- | drivers/net/ethernet/qlogic/qede/qede_fp.c | 227 | ||||
-rw-r--r-- | drivers/net/ethernet/qlogic/qede/qede_main.c | 186 |
5 files changed, 725 insertions, 364 deletions
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index 9935978c5542..d7ed0d3dbf71 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -75,6 +75,7 @@ struct qede_stats_common { u64 rx_bcast_pkts; u64 mftag_filter_discards; u64 mac_filter_discards; + u64 gft_filter_drop; u64 tx_ucast_bytes; u64 tx_mcast_bytes; u64 tx_bcast_bytes; @@ -87,6 +88,7 @@ struct qede_stats_common { u64 coalesced_aborts_num; u64 non_coalesced_pkts; u64 coalesced_bytes; + u64 link_change_count; /* port */ u64 rx_64_byte_packets; @@ -290,15 +292,12 @@ struct qede_agg_info { * aggregation. */ struct sw_rx_data buffer; - dma_addr_t buffer_mapping; - struct sk_buff *skb; /* We need some structs from the start cookie until termination */ u16 vlan_tag; - u16 start_cqe_bd_len; - u8 start_cqe_placement_offset; + bool tpa_start_fail; u8 state; u8 frag_id; diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index ecbf1ded7a39..f4a0f8ff8261 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -161,6 +161,7 @@ static const struct { QEDE_STAT(no_buff_discards), QEDE_PF_STAT(mftag_filter_discards), QEDE_PF_STAT(mac_filter_discards), + QEDE_PF_STAT(gft_filter_drop), QEDE_STAT(tx_err_drop_pkts), QEDE_STAT(ttl0_discard), QEDE_STAT(packet_too_big_discard), @@ -170,6 +171,8 @@ static const struct { QEDE_STAT(coalesced_aborts_num), QEDE_STAT(non_coalesced_pkts), QEDE_STAT(coalesced_bytes), + + QEDE_STAT(link_change_count), }; #define QEDE_NUM_STATS ARRAY_SIZE(qede_stats_arr) @@ -1508,7 +1511,8 @@ static int qede_selftest_receive_traffic(struct qede_dev *edev) len = le16_to_cpu(fp_cqe->len_on_first_bd); data_ptr = (u8 *)(page_address(sw_rx_data->data) + fp_cqe->placement_offset + - sw_rx_data->page_offset); + sw_rx_data->page_offset + + rxq->rx_headroom); if (ether_addr_equal(data_ptr, edev->ndev->dev_addr) && ether_addr_equal(data_ptr + ETH_ALEN, edev->ndev->dev_addr)) { diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c index 6687e04d1558..e9e088d9c815 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_filter.c +++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c @@ -38,6 +38,7 @@ #include <linux/qed/qed_if.h> #include "qede.h" +#define QEDE_FILTER_PRINT_MAX_LEN (64) struct qede_arfs_tuple { union { __be32 src_ipv4; @@ -51,6 +52,18 @@ struct qede_arfs_tuple { __be16 dst_port; __be16 eth_proto; u8 ip_proto; + + /* Describe filtering mode needed for this kind of filter */ + enum qed_filter_config_mode mode; + + /* Used to compare new/old filters. Return true if IPs match */ + bool (*ip_comp)(struct qede_arfs_tuple *a, struct qede_arfs_tuple *b); + + /* Given an address into ethhdr build a header from tuple info */ + void (*build_hdr)(struct qede_arfs_tuple *t, void *header); + + /* Stringify the tuple for a print into the provided buffer */ + void (*stringify)(struct qede_arfs_tuple *t, void *buffer); }; struct qede_arfs_fltr_node { @@ -73,9 +86,11 @@ struct qede_arfs_fltr_node { u16 sw_id; u16 rxq_id; u16 next_rxq_id; + u8 vfid; bool filter_op; bool used; u8 fw_rc; + bool b_is_drop; struct hlist_node node; }; @@ -90,7 +105,9 @@ struct qede_arfs { spinlock_t arfs_list_lock; unsigned long *arfs_fltr_bmap; int filter_count; - bool enable; + + /* Currently configured filtering mode */ + enum qed_filter_config_mode mode; }; static void qede_configure_arfs_fltr(struct qede_dev *edev, @@ -109,12 +126,22 @@ static void qede_configure_arfs_fltr(struct qede_dev *edev, params.length = n->buf_len; params.qid = rxq_id; params.b_is_add = add_fltr; + params.b_is_drop = n->b_is_drop; + + if (n->vfid) { + params.b_is_vf = true; + params.vf_id = n->vfid - 1; + } - DP_VERBOSE(edev, NETIF_MSG_RX_STATUS, - "%s arfs filter flow_id=%d, sw_id=%d, src_port=%d, dst_port=%d, rxq=%d\n", - add_fltr ? "Adding" : "Deleting", - n->flow_id, n->sw_id, ntohs(n->tuple.src_port), - ntohs(n->tuple.dst_port), rxq_id); + if (n->tuple.stringify) { + char tuple_buffer[QEDE_FILTER_PRINT_MAX_LEN]; + + n->tuple.stringify(&n->tuple, tuple_buffer); + DP_VERBOSE(edev, NETIF_MSG_RX_STATUS, + "%s sw_id[0x%x]: %s [vf %u queue %d]\n", + add_fltr ? "Adding" : "Deleting", + n->sw_id, tuple_buffer, n->vfid, rxq_id); + } n->used = true; n->filter_op = add_fltr; @@ -145,14 +172,13 @@ qede_enqueue_fltr_and_config_searcher(struct qede_dev *edev, INIT_HLIST_NODE(&fltr->node); hlist_add_head(&fltr->node, QEDE_ARFS_BUCKET_HEAD(edev, bucket_idx)); - edev->arfs->filter_count++; - - if (edev->arfs->filter_count == 1 && !edev->arfs->enable) { - enum qed_filter_config_mode mode; - mode = QED_FILTER_CONFIG_MODE_5_TUPLE; - edev->ops->configure_arfs_searcher(edev->cdev, mode); - edev->arfs->enable = true; + edev->arfs->filter_count++; + if (edev->arfs->filter_count == 1 && + edev->arfs->mode == QED_FILTER_CONFIG_MODE_DISABLE) { + edev->ops->configure_arfs_searcher(edev->cdev, + fltr->tuple.mode); + edev->arfs->mode = fltr->tuple.mode; } return 0; @@ -167,14 +193,15 @@ qede_dequeue_fltr_and_config_searcher(struct qede_dev *edev, fltr->buf_len, DMA_TO_DEVICE); qede_free_arfs_filter(edev, fltr); - edev->arfs->filter_count--; - if (!edev->arfs->filter_count && edev->arfs->enable) { + edev->arfs->filter_count--; + if (!edev->arfs->filter_count && + edev->arfs->mode != QED_FILTER_CONFIG_MODE_DISABLE) { enum qed_filter_config_mode mode; mode = QED_FILTER_CONFIG_MODE_DISABLE; - edev->arfs->enable = false; edev->ops->configure_arfs_searcher(edev->cdev, mode); + edev->arfs->mode = QED_FILTER_CONFIG_MODE_DISABLE; } } @@ -264,25 +291,17 @@ void qede_process_arfs_filters(struct qede_dev *edev, bool free_fltr) } } +#ifdef CONFIG_RFS_ACCEL spin_lock_bh(&edev->arfs->arfs_list_lock); - if (!edev->arfs->filter_count) { - if (edev->arfs->enable) { - enum qed_filter_config_mode mode; - - mode = QED_FILTER_CONFIG_MODE_DISABLE; - edev->arfs->enable = false; - edev->ops->configure_arfs_searcher(edev->cdev, mode); - } -#ifdef CONFIG_RFS_ACCEL - } else { + if (edev->arfs->filter_count) { set_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags); schedule_delayed_work(&edev->sp_task, QEDE_SP_TASK_POLL_DELAY); -#endif } spin_unlock_bh(&edev->arfs->arfs_list_lock); +#endif } /* This function waits until all aRFS filters get deleted and freed. @@ -512,6 +531,7 @@ int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, eth->h_proto = skb->protocol; n->tuple.eth_proto = skb->protocol; n->tuple.ip_proto = ip_proto; + n->tuple.mode = QED_FILTER_CONFIG_MODE_5_TUPLE; memcpy(n->data + ETH_HLEN, skb->data, skb_headlen(skb)); rc = qede_enqueue_fltr_and_config_searcher(edev, n, tbl_idx); @@ -550,8 +570,7 @@ void qede_force_mac(void *dev, u8 *mac, bool forced) __qede_lock(edev); - /* MAC hints take effect only if we haven't set one already */ - if (is_valid_ether_addr(edev->ndev->dev_addr) && !forced) { + if (!is_valid_ether_addr(mac)) { __qede_unlock(edev); return; } @@ -1161,6 +1180,10 @@ int qede_set_mac_addr(struct net_device *ndev, void *p) if (edev->state != QEDE_STATE_OPEN) { DP_VERBOSE(edev, NETIF_MSG_IFDOWN, "The device is currently down\n"); + /* Ask PF to explicitly update a copy in bulletin board */ + if (IS_VF(edev) && edev->ops->req_bulletin_update_mac) + edev->ops->req_bulletin_update_mac(edev->cdev, + ndev->dev_addr); goto out; } @@ -1336,38 +1359,6 @@ qede_get_arfs_fltr_by_loc(struct hlist_head *head, u32 location) return NULL; } -static bool -qede_compare_user_flow_ips(struct qede_arfs_fltr_node *tpos, - struct ethtool_rx_flow_spec *fsp, - __be16 proto) -{ - if (proto == htons(ETH_P_IP)) { - struct ethtool_tcpip4_spec *ip; - - ip = &fsp->h_u.tcp_ip4_spec; - - if (tpos->tuple.src_ipv4 == ip->ip4src && - tpos->tuple.dst_ipv4 == ip->ip4dst) - return true; - else - return false; - } else { - struct ethtool_tcpip6_spec *ip6; - struct in6_addr *src; - - ip6 = &fsp->h_u.tcp_ip6_spec; - src = &tpos->tuple.src_ipv6; - - if (!memcmp(src, &ip6->ip6src, sizeof(struct in6_addr)) && - !memcmp(&tpos->tuple.dst_ipv6, &ip6->ip6dst, - sizeof(struct in6_addr))) - return true; - else - return false; - } - return false; -} - int qede_get_cls_rule_all(struct qede_dev *edev, struct ethtool_rxnfc *info, u32 *rule_locs) { @@ -1452,102 +1443,444 @@ int qede_get_cls_rule_entry(struct qede_dev *edev, struct ethtool_rxnfc *cmd) fsp->ring_cookie = fltr->rxq_id; + if (fltr->vfid) { + fsp->ring_cookie |= ((u64)fltr->vfid) << + ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; + } + + if (fltr->b_is_drop) + fsp->ring_cookie = RX_CLS_FLOW_DISC; unlock: __qede_unlock(edev); return rc; } static int -qede_validate_and_check_flow_exist(struct qede_dev *edev, - struct ethtool_rx_flow_spec *fsp, - int *min_hlen) +qede_poll_arfs_filter_config(struct qede_dev *edev, + struct qede_arfs_fltr_node *fltr) { - __be16 src_port = 0x0, dst_port = 0x0; - struct qede_arfs_fltr_node *fltr; - struct hlist_node *temp; - struct hlist_head *head; - __be16 eth_proto; - u8 ip_proto; + int count = QEDE_ARFS_POLL_COUNT; - if (fsp->location >= QEDE_RFS_MAX_FLTR || - fsp->ring_cookie >= QEDE_RSS_COUNT(edev)) - return -EINVAL; + while (fltr->used && count) { + msleep(20); + count--; + } + + if (count == 0 || fltr->fw_rc) { + DP_NOTICE(edev, "Timeout in polling filter config\n"); + qede_dequeue_fltr_and_config_searcher(edev, fltr); + return -EIO; + } + + return fltr->fw_rc; +} + +static int qede_flow_get_min_header_size(struct qede_arfs_tuple *t) +{ + int size = ETH_HLEN; + + if (t->eth_proto == htons(ETH_P_IP)) + size += sizeof(struct iphdr); + else + size += sizeof(struct ipv6hdr); + + if (t->ip_proto == IPPROTO_TCP) + size += sizeof(struct tcphdr); + else + size += sizeof(struct udphdr); + + return size; +} + +static bool qede_flow_spec_ipv4_cmp(struct qede_arfs_tuple *a, + struct qede_arfs_tuple *b) +{ + if (a->eth_proto != htons(ETH_P_IP) || + b->eth_proto != htons(ETH_P_IP)) + return false; + + return (a->src_ipv4 == b->src_ipv4) && + (a->dst_ipv4 == b->dst_ipv4); +} + +static void qede_flow_build_ipv4_hdr(struct qede_arfs_tuple *t, + void *header) +{ + __be16 *ports = (__be16 *)(header + ETH_HLEN + sizeof(struct iphdr)); + struct iphdr *ip = (struct iphdr *)(header + ETH_HLEN); + struct ethhdr *eth = (struct ethhdr *)header; + + eth->h_proto = t->eth_proto; + ip->saddr = t->src_ipv4; + ip->daddr = t->dst_ipv4; + ip->version = 0x4; + ip->ihl = 0x5; + ip->protocol = t->ip_proto; + ip->tot_len = cpu_to_be16(qede_flow_get_min_header_size(t) - ETH_HLEN); + + /* ports is weakly typed to suit both TCP and UDP ports */ + ports[0] = t->src_port; + ports[1] = t->dst_port; +} + +static void qede_flow_stringify_ipv4_hdr(struct qede_arfs_tuple *t, + void *buffer) +{ + const char *prefix = t->ip_proto == IPPROTO_TCP ? "TCP" : "UDP"; + + snprintf(buffer, QEDE_FILTER_PRINT_MAX_LEN, + "%s %pI4 (%04x) -> %pI4 (%04x)", + prefix, &t->src_ipv4, t->src_port, + &t->dst_ipv4, t->dst_port); +} + +static bool qede_flow_spec_ipv6_cmp(struct qede_arfs_tuple *a, + struct qede_arfs_tuple *b) +{ + if (a->eth_proto != htons(ETH_P_IPV6) || + b->eth_proto != htons(ETH_P_IPV6)) + return false; + + if (memcmp(&a->src_ipv6, &b->src_ipv6, sizeof(struct in6_addr))) + return false; - if (fsp->flow_type == TCP_V4_FLOW) { - *min_hlen += sizeof(struct iphdr) + - sizeof(struct tcphdr); - eth_proto = htons(ETH_P_IP); - ip_proto = IPPROTO_TCP; - } else if (fsp->flow_type == UDP_V4_FLOW) { - *min_hlen += sizeof(struct iphdr) + - sizeof(struct udphdr); - eth_proto = htons(ETH_P_IP); - ip_proto = IPPROTO_UDP; - } else if (fsp->flow_type == TCP_V6_FLOW) { - *min_hlen += sizeof(struct ipv6hdr) + - sizeof(struct tcphdr); - eth_proto = htons(ETH_P_IPV6); - ip_proto = IPPROTO_TCP; - } else if (fsp->flow_type == UDP_V6_FLOW) { - *min_hlen += sizeof(struct ipv6hdr) + - sizeof(struct udphdr); - eth_proto = htons(ETH_P_IPV6); - ip_proto = IPPROTO_UDP; + if (memcmp(&a->dst_ipv6, &b->dst_ipv6, sizeof(struct in6_addr))) + return false; + + return true; +} + +static void qede_flow_build_ipv6_hdr(struct qede_arfs_tuple *t, + void *header) +{ + __be16 *ports = (__be16 *)(header + ETH_HLEN + sizeof(struct ipv6hdr)); + struct ipv6hdr *ip6 = (struct ipv6hdr *)(header + ETH_HLEN); + struct ethhdr *eth = (struct ethhdr *)header; + + eth->h_proto = t->eth_proto; + memcpy(&ip6->saddr, &t->src_ipv6, sizeof(struct in6_addr)); + memcpy(&ip6->daddr, &t->dst_ipv6, sizeof(struct in6_addr)); + ip6->version = 0x6; + + if (t->ip_proto == IPPROTO_TCP) { + ip6->nexthdr = NEXTHDR_TCP; + ip6->payload_len = cpu_to_be16(sizeof(struct tcphdr)); } else { - DP_NOTICE(edev, "Unsupported flow type = 0x%x\n", - fsp->flow_type); - return -EPROTONOSUPPORT; + ip6->nexthdr = NEXTHDR_UDP; + ip6->payload_len = cpu_to_be16(sizeof(struct udphdr)); } - if (eth_proto == htons(ETH_P_IP)) { - src_port = fsp->h_u.tcp_ip4_spec.psrc; - dst_port = fsp->h_u.tcp_ip4_spec.pdst; + /* ports is weakly typed to suit both TCP and UDP ports */ + ports[0] = t->src_port; + ports[1] = t->dst_port; +} + +/* Validate fields which are set and not accepted by the driver */ +static int qede_flow_spec_validate_unused(struct qede_dev *edev, + struct ethtool_rx_flow_spec *fs) +{ + if (fs->flow_type & FLOW_MAC_EXT) { + DP_INFO(edev, "Don't support MAC extensions\n"); + return -EOPNOTSUPP; + } + + if ((fs->flow_type & FLOW_EXT) && + (fs->h_ext.vlan_etype || fs->h_ext.vlan_tci)) { + DP_INFO(edev, "Don't support vlan-based classification\n"); + return -EOPNOTSUPP; + } + + if ((fs->flow_type & FLOW_EXT) && + (fs->h_ext.data[0] || fs->h_ext.data[1])) { + DP_INFO(edev, "Don't support user defined data\n"); + return -EOPNOTSUPP; + } + + return 0; +} + +static int qede_flow_spec_to_tuple_ipv4_common(struct qede_dev *edev, + struct qede_arfs_tuple *t, + struct ethtool_rx_flow_spec *fs) +{ + if ((fs->h_u.tcp_ip4_spec.ip4src & + fs->m_u.tcp_ip4_spec.ip4src) != fs->h_u.tcp_ip4_spec.ip4src) { + DP_INFO(edev, "Don't support IP-masks\n"); + return -EOPNOTSUPP; + } + + if ((fs->h_u.tcp_ip4_spec.ip4dst & + fs->m_u.tcp_ip4_spec.ip4dst) != fs->h_u.tcp_ip4_spec.ip4dst) { + DP_INFO(edev, "Don't support IP-masks\n"); + return -EOPNOTSUPP; + } + + if ((fs->h_u.tcp_ip4_spec.psrc & + fs->m_u.tcp_ip4_spec.psrc) != fs->h_u.tcp_ip4_spec.psrc) { + DP_INFO(edev, "Don't support port-masks\n"); + return -EOPNOTSUPP; + } + + if ((fs->h_u.tcp_ip4_spec.pdst & + fs->m_u.tcp_ip4_spec.pdst) != fs->h_u.tcp_ip4_spec.pdst) { + DP_INFO(edev, "Don't support port-masks\n"); + return -EOPNOTSUPP; + } + + if (fs->h_u.tcp_ip4_spec.tos) { + DP_INFO(edev, "Don't support tos\n"); + return -EOPNOTSUPP; + } + + t->eth_proto = htons(ETH_P_IP); + t->src_ipv4 = fs->h_u.tcp_ip4_spec.ip4src; + t->dst_ipv4 = fs->h_u.tcp_ip4_spec.ip4dst; + t->src_port = fs->h_u.tcp_ip4_spec.psrc; + t->dst_port = fs->h_u.tcp_ip4_spec.pdst; + + /* We must either have a valid 4-tuple or only dst port + * or only src ip as an input + */ + if (t->src_port && t->dst_port && t->src_ipv4 && t->dst_ipv4) { + t->mode = QED_FILTER_CONFIG_MODE_5_TUPLE; + } else if (!t->src_port && t->dst_port && + !t->src_ipv4 && !t->dst_ipv4) { + t->mode = QED_FILTER_CONFIG_MODE_L4_PORT; + } else if (!t->src_port && !t->dst_port && + !t->dst_ipv4 && t->src_ipv4) { + t->mode = QED_FILTER_CONFIG_MODE_IP_SRC; } else { - src_port = fsp->h_u.tcp_ip6_spec.psrc; - dst_port = fsp->h_u.tcp_ip6_spec.pdst; + DP_INFO(edev, "Invalid N-tuple\n"); + return -EOPNOTSUPP; } - head = QEDE_ARFS_BUCKET_HEAD(edev, 0); - hlist_for_each_entry_safe(fltr, temp, head, node) { - if ((fltr->tuple.ip_proto == ip_proto && - fltr->tuple.eth_proto == eth_proto && - qede_compare_user_flow_ips(fltr, fsp, eth_proto) && - fltr->tuple.src_port == src_port && - fltr->tuple.dst_port == dst_port) || - fltr->sw_id == fsp->location) - return -EEXIST; + t->ip_comp = qede_flow_spec_ipv4_cmp; + t->build_hdr = qede_flow_build_ipv4_hdr; + t->stringify = qede_flow_stringify_ipv4_hdr; + + return 0; +} + +static int qede_flow_spec_to_tuple_tcpv4(struct qede_dev *edev, + struct qede_arfs_tuple *t, + struct ethtool_rx_flow_spec *fs) +{ + t->ip_proto = IPPROTO_TCP; + + if (qede_flow_spec_to_tuple_ipv4_common(edev, t, fs)) + return -EINVAL; + + return 0; +} + +static int qede_flow_spec_to_tuple_udpv4(struct qede_dev *edev, + struct qede_arfs_tuple *t, + struct ethtool_rx_flow_spec *fs) +{ + t->ip_proto = IPPROTO_UDP; + + if (qede_flow_spec_to_tuple_ipv4_common(edev, t, fs)) + return -EINVAL; + + return 0; +} + +static int qede_flow_spec_to_tuple_ipv6_common(struct qede_dev *edev, + struct qede_arfs_tuple *t, + struct ethtool_rx_flow_spec *fs) +{ + struct in6_addr zero_addr; + void *p; + + p = &zero_addr; + memset(p, 0, sizeof(zero_addr)); + + if ((fs->h_u.tcp_ip6_spec.psrc & + fs->m_u.tcp_ip6_spec.psrc) != fs->h_u.tcp_ip6_spec.psrc) { + DP_INFO(edev, "Don't support port-masks\n"); + return -EOPNOTSUPP; + } + + if ((fs->h_u.tcp_ip6_spec.pdst & + fs->m_u.tcp_ip6_spec.pdst) != fs->h_u.tcp_ip6_spec.pdst) { + DP_INFO(edev, "Don't support port-masks\n"); + return -EOPNOTSUPP; + } + + if (fs->h_u.tcp_ip6_spec.tclass) { + DP_INFO(edev, "Don't support tclass\n"); + return -EOPNOTSUPP; } + t->eth_proto = htons(ETH_P_IPV6); + memcpy(&t->src_ipv6, &fs->h_u.tcp_ip6_spec.ip6src, + sizeof(struct in6_addr)); + memcpy(&t->dst_ipv6, &fs->h_u.tcp_ip6_spec.ip6dst, + sizeof(struct in6_addr)); + t->src_port = fs->h_u.tcp_ip6_spec.psrc; + t->dst_port = fs->h_u.tcp_ip6_spec.pdst; + + /* We must make sure we have a valid 4-tuple or only dest port + * or only src ip as an input + */ + if (t->src_port && t->dst_port && + memcmp(&t->src_ipv6, p, sizeof(struct in6_addr)) && + memcmp(&t->dst_ipv6, p, sizeof(struct in6_addr))) { + t->mode = QED_FILTER_CONFIG_MODE_5_TUPLE; + } else if (!t->src_port && t->dst_port && + !memcmp(&t->src_ipv6, p, sizeof(struct in6_addr)) && + !memcmp(&t->dst_ipv6, p, sizeof(struct in6_addr))) { + t->mode = QED_FILTER_CONFIG_MODE_L4_PORT; + } else if (!t->src_port && !t->dst_port && + !memcmp(&t->dst_ipv6, p, sizeof(struct in6_addr)) && + memcmp(&t->src_ipv6, p, sizeof(struct in6_addr))) { + t->mode = QED_FILTER_CONFIG_MODE_IP_SRC; + } else { + DP_INFO(edev, "Invalid N-tuple\n"); + return -EOPNOTSUPP; + } + + t->ip_comp = qede_flow_spec_ipv6_cmp; + t->build_hdr = qede_flow_build_ipv6_hdr; + return 0; } -static int -qede_poll_arfs_filter_config(struct qede_dev *edev, - struct qede_arfs_fltr_node *fltr) +static int qede_flow_spec_to_tuple_tcpv6(struct qede_dev *edev, + struct qede_arfs_tuple *t, + struct ethtool_rx_flow_spec *fs) { - int count = QEDE_ARFS_POLL_COUNT; + t->ip_proto = IPPROTO_TCP; - while (fltr->used && count) { - msleep(20); - count--; + if (qede_flow_spec_to_tuple_ipv6_common(edev, t, fs)) + return -EINVAL; + + return 0; +} + +static int qede_flow_spec_to_tuple_udpv6(struct qede_dev *edev, + struct qede_arfs_tuple *t, + struct ethtool_rx_flow_spec *fs) +{ + t->ip_proto = IPPROTO_UDP; + + if (qede_flow_spec_to_tuple_ipv6_common(edev, t, fs)) + return -EINVAL; + + return 0; +} + +static int qede_flow_spec_to_tuple(struct qede_dev *edev, + struct qede_arfs_tuple *t, + struct ethtool_rx_flow_spec *fs) +{ + memset(t, 0, sizeof(*t)); + + if (qede_flow_spec_validate_unused(edev, fs)) + return -EOPNOTSUPP; + + switch ((fs->flow_type & ~FLOW_EXT)) { + case TCP_V4_FLOW: + return qede_flow_spec_to_tuple_tcpv4(edev, t, fs); + case UDP_V4_FLOW: + return qede_flow_spec_to_tuple_udpv4(edev, t, fs); + case TCP_V6_FLOW: + return qede_flow_spec_to_tuple_tcpv6(edev, t, fs); + case UDP_V6_FLOW: + return qede_flow_spec_to_tuple_udpv6(edev, t, fs); + default: + DP_VERBOSE(edev, NETIF_MSG_IFUP, + "Can't support flow of type %08x\n", fs->flow_type); + return -EOPNOTSUPP; } - if (count == 0 || fltr->fw_rc) { - qede_dequeue_fltr_and_config_searcher(edev, fltr); - return -EIO; + return 0; +} + +static int qede_flow_spec_validate(struct qede_dev *edev, + struct ethtool_rx_flow_spec *fs, + struct qede_arfs_tuple *t) +{ + if (fs->location >= QEDE_RFS_MAX_FLTR) { + DP_INFO(edev, "Location out-of-bounds\n"); + return -EINVAL; } - return fltr->fw_rc; + /* Check location isn't already in use */ + if (test_bit(fs->location, edev->arfs->arfs_fltr_bmap)) { + DP_INFO(edev, "Location already in use\n"); + return -EINVAL; + } + + /* Check if the filtering-mode could support the filter */ + if (edev->arfs->filter_count && + edev->arfs->mode != t->mode) { + DP_INFO(edev, + "flow_spec would require filtering mode %08x, but %08x is configured\n", + t->mode, edev->arfs->filter_count); + return -EINVAL; + } + + /* If drop requested then no need to validate other data */ + if (fs->ring_cookie == RX_CLS_FLOW_DISC) + return 0; + + if (ethtool_get_flow_spec_ring_vf(fs->ring_cookie)) + return 0; + + if (fs->ring_cookie >= QEDE_RSS_COUNT(edev)) { + DP_INFO(edev, "Queue out-of-bounds\n"); + return -EINVAL; + } + + return 0; +} + +/* Must be called while qede lock is held */ +static struct qede_arfs_fltr_node * +qede_flow_find_fltr(struct qede_dev *edev, struct qede_arfs_tuple *t) +{ + struct qede_arfs_fltr_node *fltr; + struct hlist_node *temp; + struct hlist_head *head; + + head = QEDE_ARFS_BUCKET_HEAD(edev, 0); + + hlist_for_each_entry_safe(fltr, temp, head, node) { + if (fltr->tuple.ip_proto == t->ip_proto && + fltr->tuple.src_port == t->src_port && + fltr->tuple.dst_port == t->dst_port && + t->ip_comp(&fltr->tuple, t)) + return fltr; + } + + return NULL; +} + +static void qede_flow_set_destination(struct qede_dev *edev, + struct qede_arfs_fltr_node *n, + struct ethtool_rx_flow_spec *fs) +{ + if (fs->ring_cookie == RX_CLS_FLOW_DISC) { + n->b_is_drop = true; + return; + } + + n->vfid = ethtool_get_flow_spec_ring_vf(fs->ring_cookie); + n->rxq_id = ethtool_get_flow_spec_ring(fs->ring_cookie); + n->next_rxq_id = n->rxq_id; + + if (n->vfid) + DP_VERBOSE(edev, QED_MSG_SP, + "Configuring N-tuple for VF 0x%02x\n", n->vfid - 1); } int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info) { struct ethtool_rx_flow_spec *fsp = &info->fs; struct qede_arfs_fltr_node *n; - int min_hlen = ETH_HLEN, rc; - struct ethhdr *eth; - struct iphdr *ip; - __be16 *ports; + struct qede_arfs_tuple t; + int min_hlen, rc; __qede_lock(edev); @@ -1556,16 +1889,28 @@ int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info) goto unlock; } - rc = qede_validate_and_check_flow_exist(edev, fsp, &min_hlen); + /* Translate the flow specification into something fittign our DB */ + rc = qede_flow_spec_to_tuple(edev, &t, fsp); if (rc) goto unlock; + /* Make sure location is valid and filter isn't already set */ + rc = qede_flow_spec_validate(edev, fsp, &t); + if (rc) + goto unlock; + + if (qede_flow_find_fltr(edev, &t)) { + rc = -EINVAL; + goto unlock; + } + n = kzalloc(sizeof(*n), GFP_KERNEL); if (!n) { rc = -ENOMEM; goto unlock; } + min_hlen = qede_flow_get_min_header_size(&t); n->data = kzalloc(min_hlen, GFP_KERNEL); if (!n->data) { kfree(n); @@ -1576,68 +1921,13 @@ int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info) n->sw_id = fsp->location; set_bit(n->sw_id, edev->arfs->arfs_fltr_bmap); n->buf_len = min_hlen; - n->rxq_id = fsp->ring_cookie; - n->next_rxq_id = n->rxq_id; - eth = (struct ethhdr *)n->data; - if (info->fs.flow_type == TCP_V4_FLOW || - info->fs.flow_type == UDP_V4_FLOW) { - ports = (__be16 *)(n->data + ETH_HLEN + - sizeof(struct iphdr)); - eth->h_proto = htons(ETH_P_IP); - n->tuple.eth_proto = htons(ETH_P_IP); - n->tuple.src_ipv4 = info->fs.h_u.tcp_ip4_spec.ip4src; - n->tuple.dst_ipv4 = info->fs.h_u.tcp_ip4_spec.ip4dst; - n->tuple.src_port = info->fs.h_u.tcp_ip4_spec.psrc; - n->tuple.dst_port = info->fs.h_u.tcp_ip4_spec.pdst; - ports[0] = n->tuple.src_port; - ports[1] = n->tuple.dst_port; - ip = (struct iphdr *)(n->data + ETH_HLEN); - ip->saddr = info->fs.h_u.tcp_ip4_spec.ip4src; - ip->daddr = info->fs.h_u.tcp_ip4_spec.ip4dst; - ip->version = 0x4; - ip->ihl = 0x5; - - if (info->fs.flow_type == TCP_V4_FLOW) { - n->tuple.ip_proto = IPPROTO_TCP; - ip->protocol = IPPROTO_TCP; - } else { - n->tuple.ip_proto = IPPROTO_UDP; - ip->protocol = IPPROTO_UDP; - } - ip->tot_len = cpu_to_be16(min_hlen - ETH_HLEN); - } else { - struct ipv6hdr *ip6; - - ip6 = (struct ipv6hdr *)(n->data + ETH_HLEN); - ports = (__be16 *)(n->data + ETH_HLEN + - sizeof(struct ipv6hdr)); - eth->h_proto = htons(ETH_P_IPV6); - n->tuple.eth_proto = htons(ETH_P_IPV6); - memcpy(&n->tuple.src_ipv6, &info->fs.h_u.tcp_ip6_spec.ip6src, - sizeof(struct in6_addr)); - memcpy(&n->tuple.dst_ipv6, &info->fs.h_u.tcp_ip6_spec.ip6dst, - sizeof(struct in6_addr)); - n->tuple.src_port = info->fs.h_u.tcp_ip6_spec.psrc; - n->tuple.dst_port = info->fs.h_u.tcp_ip6_spec.pdst; - ports[0] = n->tuple.src_port; - ports[1] = n->tuple.dst_port; - memcpy(&ip6->saddr, &n->tuple.src_ipv6, - sizeof(struct in6_addr)); - memcpy(&ip6->daddr, &n->tuple.dst_ipv6, - sizeof(struct in6_addr)); - ip6->version = 0x6; + memcpy(&n->tuple, &t, sizeof(n->tuple)); - if (info->fs.flow_type == TCP_V6_FLOW) { - n->tuple.ip_proto = IPPROTO_TCP; - ip6->nexthdr = NEXTHDR_TCP; - ip6->payload_len = cpu_to_be16(sizeof(struct tcphdr)); - } else { - n->tuple.ip_proto = IPPROTO_UDP; - ip6->nexthdr = NEXTHDR_UDP; - ip6->payload_len = cpu_to_be16(sizeof(struct udphdr)); - } - } + qede_flow_set_destination(edev, n, fsp); + + /* Build a minimal header according to the flow */ + n->tuple.build_hdr(&n->tuple, n->data); rc = qede_enqueue_fltr_and_config_searcher(edev, n, 0); if (rc) @@ -1647,6 +1937,7 @@ int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info) rc = qede_poll_arfs_filter_config(edev, n); unlock: __qede_unlock(edev); + return rc; } diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c index 14941303189d..6c702399b801 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_fp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c @@ -660,7 +660,8 @@ static int qede_fill_frag_skb(struct qede_dev *edev, /* Add one frag and update the appropriate fields in the skb */ skb_fill_page_desc(skb, tpa_info->frag_id++, - current_bd->data, current_bd->page_offset, + current_bd->data, + current_bd->page_offset + rxq->rx_headroom, len_on_bd); if (unlikely(qede_realloc_rx_buffer(rxq, current_bd))) { @@ -671,8 +672,7 @@ static int qede_fill_frag_skb(struct qede_dev *edev, goto out; } - qed_chain_consume(&rxq->rx_bd_ring); - rxq->sw_rx_cons++; + qede_rx_bd_ring_consume(rxq); skb->data_len += len_on_bd; skb->truesize += rxq->rx_buf_seg_size; @@ -721,64 +721,129 @@ static u8 qede_check_tunn_csum(u16 flag) return QEDE_CSUM_UNNECESSARY | tcsum; } +static inline struct sk_buff * +qede_build_skb(struct qede_rx_queue *rxq, + struct sw_rx_data *bd, u16 len, u16 pad) +{ + struct sk_buff *skb; + void *buf; + + buf = page_address(bd->data) + bd->page_offset; + skb = build_skb(buf, rxq->rx_buf_seg_size); + + skb_reserve(skb, pad); + skb_put(skb, len); + + return skb; +} + +static struct sk_buff * +qede_tpa_rx_build_skb(struct qede_dev *edev, + struct qede_rx_queue *rxq, + struct sw_rx_data *bd, u16 len, u16 pad, + bool alloc_skb) +{ + struct sk_buff *skb; + + skb = qede_build_skb(rxq, bd, len, pad); + bd->page_offset += rxq->rx_buf_seg_size; + + if (bd->page_offset == PAGE_SIZE) { + if (unlikely(qede_alloc_rx_buffer(rxq, true))) { + DP_NOTICE(edev, + "Failed to allocate RX buffer for tpa start\n"); + bd->page_offset -= rxq->rx_buf_seg_size; + page_ref_inc(bd->data); + dev_kfree_skb_any(skb); + return NULL; + } + } else { + page_ref_inc(bd->data); + qede_reuse_page(rxq, bd); + } + + /* We've consumed the first BD and prepared an SKB */ + qede_rx_bd_ring_consume(rxq); + + return skb; +} + +static struct sk_buff * +qede_rx_build_skb(struct qede_dev *edev, + struct qede_rx_queue *rxq, + struct sw_rx_data *bd, u16 len, u16 pad) +{ + struct sk_buff *skb = NULL; + + /* For smaller frames still need to allocate skb, memcpy + * data and benefit in reusing the page segment instead of + * un-mapping it. + */ + if ((len + pad <= edev->rx_copybreak)) { + unsigned int offset = bd->page_offset + pad; + + skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE); + if (unlikely(!skb)) + return NULL; + + skb_reserve(skb, pad); + memcpy(skb_put(skb, len), + page_address(bd->data) + offset, len); + qede_reuse_page(rxq, bd); + goto out; + } + + skb = qede_build_skb(rxq, bd, len, pad); + + if (unlikely(qede_realloc_rx_buffer(rxq, bd))) { + /* Incr page ref count to reuse on allocation failure so + * that it doesn't get freed while freeing SKB [as its + * already mapped there]. + */ + page_ref_inc(bd->data); + dev_kfree_skb_any(skb); + return NULL; + } +out: + /* We've consumed the first BD and prepared an SKB */ + qede_rx_bd_ring_consume(rxq); + + return skb; +} + static void qede_tpa_start(struct qede_dev *edev, struct qede_rx_queue *rxq, struct eth_fast_path_rx_tpa_start_cqe *cqe) { struct qede_agg_info *tpa_info = &rxq->tpa_info[cqe->tpa_agg_index]; - struct eth_rx_bd *rx_bd_cons = qed_chain_consume(&rxq->rx_bd_ring); - struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring); - struct sw_rx_data *replace_buf = &tpa_info->buffer; - dma_addr_t mapping = tpa_info->buffer_mapping; struct sw_rx_data *sw_rx_data_cons; - struct sw_rx_data *sw_rx_data_prod; + u16 pad; sw_rx_data_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX]; - sw_rx_data_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX]; + pad = cqe->placement_offset + rxq->rx_headroom; - /* Use pre-allocated replacement buffer - we can't release the agg. - * start until its over and we don't want to risk allocation failing - * here, so re-allocate when aggregation will be over. - */ - sw_rx_data_prod->mapping = replace_buf->mapping; - - sw_rx_data_prod->data = replace_buf->data; - rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(mapping)); - rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(mapping)); - sw_rx_data_prod->page_offset = replace_buf->page_offset; - - rxq->sw_rx_prod++; + tpa_info->skb = qede_tpa_rx_build_skb(edev, rxq, sw_rx_data_cons, + le16_to_cpu(cqe->len_on_first_bd), + pad, false); + tpa_info->buffer.page_offset = sw_rx_data_cons->page_offset; + tpa_info->buffer.mapping = sw_rx_data_cons->mapping; - /* move partial skb from cons to pool (don't unmap yet) - * save mapping, incase we drop the packet later on. - */ - tpa_info->buffer = *sw_rx_data_cons; - mapping = HILO_U64(le32_to_cpu(rx_bd_cons->addr.hi), - le32_to_cpu(rx_bd_cons->addr.lo)); - - tpa_info->buffer_mapping = mapping; - rxq->sw_rx_cons++; - - /* set tpa state to start only if we are able to allocate skb - * for this aggregation, otherwise mark as error and aggregation will - * be dropped - */ - tpa_info->skb = netdev_alloc_skb(edev->ndev, - le16_to_cpu(cqe->len_on_first_bd)); if (unlikely(!tpa_info->skb)) { DP_NOTICE(edev, "Failed to allocate SKB for gro\n"); + + /* Consume from ring but do not produce since + * this might be used by FW still, it will be re-used + * at TPA end. + */ + tpa_info->tpa_start_fail = true; + qede_rx_bd_ring_consume(rxq); tpa_info->state = QEDE_AGG_STATE_ERROR; goto cons_buf; } - /* Start filling in the aggregation info */ - skb_put(tpa_info->skb, le16_to_cpu(cqe->len_on_first_bd)); tpa_info->frag_id = 0; tpa_info->state = QEDE_AGG_STATE_START; - /* Store some information from first CQE */ - tpa_info->start_cqe_placement_offset = cqe->placement_offset; - tpa_info->start_cqe_bd_len = le16_to_cpu(cqe->len_on_first_bd); if ((le16_to_cpu(cqe->pars_flags.flags) >> PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT) & PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK) @@ -899,6 +964,10 @@ static int qede_tpa_end(struct qede_dev *edev, tpa_info = &rxq->tpa_info[cqe->tpa_agg_index]; skb = tpa_info->skb; + if (tpa_info->buffer.page_offset == PAGE_SIZE) + dma_unmap_page(rxq->dev, tpa_info->buffer.mapping, + PAGE_SIZE, rxq->data_direction); + for (i = 0; cqe->len_list[i]; i++) qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index, le16_to_cpu(cqe->len_list[i])); @@ -919,11 +988,6 @@ static int qede_tpa_end(struct qede_dev *edev, "Strange - total packet len [cqe] is %4x but SKB has len %04x\n", le16_to_cpu(cqe->total_packet_len), skb->len); - memcpy(skb->data, - page_address(tpa_info->buffer.data) + - tpa_info->start_cqe_placement_offset + - tpa_info->buffer.page_offset, tpa_info->start_cqe_bd_len); - /* Finalize the SKB */ skb->protocol = eth_type_trans(skb, edev->ndev); skb->ip_summed = CHECKSUM_UNNECESSARY; @@ -940,6 +1004,12 @@ static int qede_tpa_end(struct qede_dev *edev, return 1; err: tpa_info->state = QEDE_AGG_STATE_NONE; + + if (tpa_info->tpa_start_fail) { + qede_reuse_page(rxq, &tpa_info->buffer); + tpa_info->tpa_start_fail = false; + } + dev_kfree_skb_any(tpa_info->skb); tpa_info->skb = NULL; return 0; @@ -1058,65 +1128,6 @@ static bool qede_rx_xdp(struct qede_dev *edev, return false; } -static struct sk_buff *qede_rx_allocate_skb(struct qede_dev *edev, - struct qede_rx_queue *rxq, - struct sw_rx_data *bd, u16 len, - u16 pad) -{ - unsigned int offset = bd->page_offset + pad; - struct skb_frag_struct *frag; - struct page *page = bd->data; - unsigned int pull_len; - struct sk_buff *skb; - unsigned char *va; - - /* Allocate a new SKB with a sufficient large header len */ - skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE); - if (unlikely(!skb)) - return NULL; - - /* Copy data into SKB - if it's small, we can simply copy it and - * re-use the already allcoated & mapped memory. - */ - if (len + pad <= edev->rx_copybreak) { - skb_put_data(skb, page_address(page) + offset, len); - qede_reuse_page(rxq, bd); - goto out; - } - - frag = &skb_shinfo(skb)->frags[0]; - - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, - page, offset, len, rxq->rx_buf_seg_size); - - va = skb_frag_address(frag); - pull_len = eth_get_headlen(va, QEDE_RX_HDR_SIZE); - - /* Align the pull_len to optimize memcpy */ - memcpy(skb->data, va, ALIGN(pull_len, sizeof(long))); - - /* Correct the skb & frag sizes offset after the pull */ - skb_frag_size_sub(frag, pull_len); - frag->page_offset += pull_len; - skb->data_len -= pull_len; - skb->tail += pull_len; - - if (unlikely(qede_realloc_rx_buffer(rxq, bd))) { - /* Incr page ref count to reuse on allocation failure so - * that it doesn't get freed while freeing SKB [as its - * already mapped there]. - */ - page_ref_inc(page); - dev_kfree_skb_any(skb); - return NULL; - } - -out: - /* We've consumed the first BD and prepared an SKB */ - qede_rx_bd_ring_consume(rxq); - return skb; -} - static int qede_rx_build_jumbo(struct qede_dev *edev, struct qede_rx_queue *rxq, struct sk_buff *skb, @@ -1157,7 +1168,7 @@ static int qede_rx_build_jumbo(struct qede_dev *edev, PAGE_SIZE, DMA_FROM_DEVICE); skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++, - bd->data, 0, cur_size); + bd->data, rxq->rx_headroom, cur_size); skb->truesize += PAGE_SIZE; skb->data_len += cur_size; @@ -1256,7 +1267,7 @@ static int qede_rx_process_cqe(struct qede_dev *edev, /* Basic validation passed; Need to prepare an SKB. This would also * guarantee to finally consume the first BD upon success. */ - skb = qede_rx_allocate_skb(edev, rxq, bd, len, pad); + skb = qede_rx_build_skb(edev, rxq, bd, len, pad); if (!skb) { rxq->rx_alloc_errors++; qede_recycle_rx_bd_ring(rxq, fp_cqe->bd_num); diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index a01e7d6e5442..6a796040a32c 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -133,6 +133,9 @@ static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id); static void qede_remove(struct pci_dev *pdev); static void qede_shutdown(struct pci_dev *pdev); static void qede_link_update(void *dev, struct qed_link_output *link); +static void qede_get_eth_tlv_data(void *edev, void *data); +static void qede_get_generic_tlv_data(void *edev, + struct qed_generic_tlvs *data); /* The qede lock is used to protect driver state change and driver flows that * are not reentrant. @@ -199,7 +202,7 @@ static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param) /* Enable/Disable Tx switching for PF */ if ((rc == num_vfs_param) && netif_running(edev->ndev) && - qed_info->mf_mode != QED_MF_NPAR && qed_info->tx_switching) { + !qed_info->b_inter_pf_switch && qed_info->tx_switching) { vport_params->vport_id = 0; vport_params->update_tx_switching_flg = 1; vport_params->tx_switching_flg = num_vfs_param ? 1 : 0; @@ -228,6 +231,8 @@ static struct qed_eth_cb_ops qede_ll_ops = { .arfs_filter_op = qede_arfs_filter_op, #endif .link_update = qede_link_update, + .get_generic_tlv_data = qede_get_generic_tlv_data, + .get_protocol_tlv_data = qede_get_eth_tlv_data, }, .force_mac = qede_force_mac, .ports_update = qede_udp_ports_update, @@ -342,6 +347,7 @@ void qede_fill_by_demand_stats(struct qede_dev *edev) p_common->rx_bcast_pkts = stats.common.rx_bcast_pkts; p_common->mftag_filter_discards = stats.common.mftag_filter_discards; p_common->mac_filter_discards = stats.common.mac_filter_discards; + p_common->gft_filter_drop = stats.common.gft_filter_drop; p_common->tx_ucast_bytes = stats.common.tx_ucast_bytes; p_common->tx_mcast_bytes = stats.common.tx_mcast_bytes; @@ -393,6 +399,7 @@ void qede_fill_by_demand_stats(struct qede_dev *edev) p_common->brb_truncates = stats.common.brb_truncates; p_common->brb_discards = stats.common.brb_discards; p_common->tx_mac_ctrl_frames = stats.common.tx_mac_ctrl_frames; + p_common->link_change_count = stats.common.link_change_count; if (QEDE_IS_BB(edev)) { struct qede_stats_bb *p_bb = &edev->stats.bb; @@ -1066,13 +1073,12 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode) DP_INFO(edev, "Starting qede_remove\n"); + qede_rdma_dev_remove(edev); unregister_netdev(ndev); cancel_delayed_work_sync(&edev->sp_task); qede_ptp_disable(edev); - qede_rdma_dev_remove(edev); - edev->ops->common->set_power_state(cdev, PCI_D0); pci_set_drvdata(pdev, NULL); @@ -1197,30 +1203,8 @@ static void qede_free_rx_buffers(struct qede_dev *edev, } } -static void qede_free_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq) -{ - int i; - - if (edev->gro_disable) - return; - - for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) { - struct qede_agg_info *tpa_info = &rxq->tpa_info[i]; - struct sw_rx_data *replace_buf = &tpa_info->buffer; - - if (replace_buf->data) { - dma_unmap_page(&edev->pdev->dev, - replace_buf->mapping, - PAGE_SIZE, DMA_FROM_DEVICE); - __free_page(replace_buf->data); - } - } -} - static void qede_free_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq) { - qede_free_sge_mem(edev, rxq); - /* Free rx buffers */ qede_free_rx_buffers(edev, rxq); @@ -1232,45 +1216,15 @@ static void qede_free_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq) edev->ops->common->chain_free(edev->cdev, &rxq->rx_comp_ring); } -static int qede_alloc_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq) +static void qede_set_tpa_param(struct qede_rx_queue *rxq) { - dma_addr_t mapping; int i; - if (edev->gro_disable) - return 0; - for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) { struct qede_agg_info *tpa_info = &rxq->tpa_info[i]; - struct sw_rx_data *replace_buf = &tpa_info->buffer; - replace_buf->data = alloc_pages(GFP_ATOMIC, 0); - if (unlikely(!replace_buf->data)) { - DP_NOTICE(edev, - "Failed to allocate TPA skb pool [replacement buffer]\n"); - goto err; - } - - mapping = dma_map_page(&edev->pdev->dev, replace_buf->data, 0, - PAGE_SIZE, DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) { - DP_NOTICE(edev, - "Failed to map TPA replacement buffer\n"); - goto err; - } - - replace_buf->mapping = mapping; - tpa_info->buffer.page_offset = 0; - tpa_info->buffer_mapping = mapping; tpa_info->state = QEDE_AGG_STATE_NONE; } - - return 0; -err: - qede_free_sge_mem(edev, rxq); - edev->gro_disable = 1; - edev->ndev->features &= ~NETIF_F_GRO_HW; - return -ENOMEM; } /* This function allocates all memory needed per Rx queue */ @@ -1281,19 +1235,24 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq) rxq->num_rx_buffers = edev->q_num_rx_buffers; rxq->rx_buf_size = NET_IP_ALIGN + ETH_OVERHEAD + edev->ndev->mtu; - rxq->rx_headroom = edev->xdp_prog ? XDP_PACKET_HEADROOM : 0; + + rxq->rx_headroom = edev->xdp_prog ? XDP_PACKET_HEADROOM : NET_SKB_PAD; + size = rxq->rx_headroom + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); /* Make sure that the headroom and payload fit in a single page */ - if (rxq->rx_buf_size + rxq->rx_headroom > PAGE_SIZE) - rxq->rx_buf_size = PAGE_SIZE - rxq->rx_headroom; + if (rxq->rx_buf_size + size > PAGE_SIZE) + rxq->rx_buf_size = PAGE_SIZE - size; - /* Segment size to spilt a page in multiple equal parts, + /* Segment size to spilt a page in multiple equal parts , * unless XDP is used in which case we'd use the entire page. */ - if (!edev->xdp_prog) - rxq->rx_buf_seg_size = roundup_pow_of_two(rxq->rx_buf_size); - else + if (!edev->xdp_prog) { + size = size + rxq->rx_buf_size; + rxq->rx_buf_seg_size = roundup_pow_of_two(size); + } else { rxq->rx_buf_seg_size = PAGE_SIZE; + } /* Allocate the parallel driver ring for Rx buffers */ size = sizeof(*rxq->sw_rx_ring) * RX_RING_SIZE; @@ -1337,7 +1296,8 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq) } } - rc = qede_alloc_sge_mem(edev, rxq); + if (!edev->gro_disable) + qede_set_tpa_param(rxq); err: return rc; } @@ -1928,7 +1888,7 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats) vport_update_params->update_vport_active_flg = 1; vport_update_params->vport_active_flg = 1; - if ((qed_info->mf_mode == QED_MF_NPAR || pci_num_vf(edev->pdev)) && + if ((qed_info->b_inter_pf_switch || pci_num_vf(edev->pdev)) && qed_info->tx_switching) { vport_update_params->update_tx_switching_flg = 1; vport_update_params->tx_switching_flg = 1; @@ -2178,3 +2138,99 @@ static void qede_link_update(void *dev, struct qed_link_output *link) } } } + +static bool qede_is_txq_full(struct qede_dev *edev, struct qede_tx_queue *txq) +{ + struct netdev_queue *netdev_txq; + + netdev_txq = netdev_get_tx_queue(edev->ndev, txq->index); + if (netif_xmit_stopped(netdev_txq)) + return true; + + return false; +} + +static void qede_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data) +{ + struct qede_dev *edev = dev; + struct netdev_hw_addr *ha; + int i; + + if (edev->ndev->features & NETIF_F_IP_CSUM) + data->feat_flags |= QED_TLV_IP_CSUM; + if (edev->ndev->features & NETIF_F_TSO) + data->feat_flags |= QED_TLV_LSO; + + ether_addr_copy(data->mac[0], edev->ndev->dev_addr); + memset(data->mac[1], 0, ETH_ALEN); + memset(data->mac[2], 0, ETH_ALEN); + /* Copy the first two UC macs */ + netif_addr_lock_bh(edev->ndev); + i = 1; + netdev_for_each_uc_addr(ha, edev->ndev) { + ether_addr_copy(data->mac[i++], ha->addr); + if (i == QED_TLV_MAC_COUNT) + break; + } + + netif_addr_unlock_bh(edev->ndev); +} + +static void qede_get_eth_tlv_data(void *dev, void *data) +{ + struct qed_mfw_tlv_eth *etlv = data; + struct qede_dev *edev = dev; + struct qede_fastpath *fp; + int i; + + etlv->lso_maxoff_size = 0XFFFF; + etlv->lso_maxoff_size_set = true; + etlv->lso_minseg_size = (u16)ETH_TX_LSO_WINDOW_MIN_LEN; + etlv->lso_minseg_size_set = true; + etlv->prom_mode = !!(edev->ndev->flags & IFF_PROMISC); + etlv->prom_mode_set = true; + etlv->tx_descr_size = QEDE_TSS_COUNT(edev); + etlv->tx_descr_size_set = true; + etlv->rx_descr_size = QEDE_RSS_COUNT(edev); + etlv->rx_descr_size_set = true; + etlv->iov_offload = QED_MFW_TLV_IOV_OFFLOAD_VEB; + etlv->iov_offload_set = true; + + /* Fill information regarding queues; Should be done under the qede + * lock to guarantee those don't change beneath our feet. + */ + etlv->txqs_empty = true; + etlv->rxqs_empty = true; + etlv->num_txqs_full = 0; + etlv->num_rxqs_full = 0; + + __qede_lock(edev); + for_each_queue(i) { + fp = &edev->fp_array[i]; + if (fp->type & QEDE_FASTPATH_TX) { + if (fp->txq->sw_tx_cons != fp->txq->sw_tx_prod) + etlv->txqs_empty = false; + if (qede_is_txq_full(edev, fp->txq)) + etlv->num_txqs_full++; + } + if (fp->type & QEDE_FASTPATH_RX) { + if (qede_has_rx_work(fp->rxq)) + etlv->rxqs_empty = false; + + /* This one is a bit tricky; Firmware might stop + * placing packets if ring is not yet full. + * Give an approximation. + */ + if (le16_to_cpu(*fp->rxq->hw_cons_ptr) - + qed_chain_get_cons_idx(&fp->rxq->rx_comp_ring) > + RX_RING_SIZE - 100) + etlv->num_rxqs_full++; + } + } + __qede_unlock(edev); + + etlv->txqs_empty_set = true; + etlv->rxqs_empty_set = true; + etlv->num_txqs_full_set = true; + etlv->num_rxqs_full_set = true; +} |