summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/chelsio
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/chelsio')
-rw-r--r--drivers/net/ethernet/chelsio/Kconfig2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.c17
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/subr.c13
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/tp.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/common.h7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c14
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/t3_hw.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h136
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c494
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h23
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c32
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c56
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h67
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h73
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h459
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c22
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c7
23 files changed, 1333 insertions, 125 deletions
diff --git a/drivers/net/ethernet/chelsio/Kconfig b/drivers/net/ethernet/chelsio/Kconfig
index 2de50f95798f..d40c994a4f6a 100644
--- a/drivers/net/ethernet/chelsio/Kconfig
+++ b/drivers/net/ethernet/chelsio/Kconfig
@@ -5,7 +5,7 @@
config NET_VENDOR_CHELSIO
bool "Chelsio devices"
default y
- depends on PCI || INET
+ depends on PCI
---help---
If you have a network (Ethernet) card belonging to this class, say Y
and read the Ethernet-HOWTO, available from
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index 1d17c92f2dda..c8fdeaae56c0 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -974,8 +974,7 @@ static const struct net_device_ops cxgb_netdev_ops = {
#endif
};
-static int __devinit init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
static int version_printed;
@@ -1332,7 +1331,7 @@ static inline void t1_sw_reset(struct pci_dev *pdev)
pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 0);
}
-static void __devexit remove_one(struct pci_dev *pdev)
+static void remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct adapter *adapter = dev->ml_priv;
@@ -1361,7 +1360,7 @@ static struct pci_driver driver = {
.name = DRV_NAME,
.id_table = t1_pci_tbl,
.probe = init_one,
- .remove = __devexit_p(remove_one),
+ .remove = remove_one,
};
static int __init t1_init_module(void)
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c
index 47a84359d4e4..d84872e88171 100644
--- a/drivers/net/ethernet/chelsio/cxgb/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.c
@@ -367,18 +367,6 @@ void t1_sched_set_drain_bits_per_us(struct sge *sge, unsigned int port,
#endif /* 0 */
-
-/*
- * get_clock() implements a ns clock (see ktime_get)
- */
-static inline ktime_t get_clock(void)
-{
- struct timespec ts;
-
- ktime_get_ts(&ts);
- return timespec_to_ktime(ts);
-}
-
/*
* tx_sched_init() allocates resources and does basic initialization.
*/
@@ -411,7 +399,7 @@ static int tx_sched_init(struct sge *sge)
static inline int sched_update_avail(struct sge *sge)
{
struct sched *s = sge->tx_sched;
- ktime_t now = get_clock();
+ ktime_t now = ktime_get();
unsigned int i;
long long delta_time_ns;
@@ -2071,8 +2059,7 @@ static void espibug_workaround(unsigned long data)
/*
* Creates a t1_sge structure and returns suggested resource parameters.
*/
-struct sge * __devinit t1_sge_create(struct adapter *adapter,
- struct sge_params *p)
+struct sge *t1_sge_create(struct adapter *adapter, struct sge_params *p)
{
struct sge *sge = kzalloc(sizeof(*sge), GFP_KERNEL);
int i;
diff --git a/drivers/net/ethernet/chelsio/cxgb/subr.c b/drivers/net/ethernet/chelsio/cxgb/subr.c
index 8a43c7e19701..e0a03a31e7c4 100644
--- a/drivers/net/ethernet/chelsio/cxgb/subr.c
+++ b/drivers/net/ethernet/chelsio/cxgb/subr.c
@@ -892,8 +892,8 @@ static void power_sequence_xpak(adapter_t* adapter)
}
}
-int __devinit t1_get_board_rev(adapter_t *adapter, const struct board_info *bi,
- struct adapter_params *p)
+int t1_get_board_rev(adapter_t *adapter, const struct board_info *bi,
+ struct adapter_params *p)
{
p->chip_version = bi->chip_term;
p->is_asic = (p->chip_version != CHBT_TERM_FPGA);
@@ -992,7 +992,7 @@ out_err:
/*
* Determine a card's PCI mode.
*/
-static void __devinit get_pci_mode(adapter_t *adapter, struct chelsio_pci_params *p)
+static void get_pci_mode(adapter_t *adapter, struct chelsio_pci_params *p)
{
static const unsigned short speed_map[] = { 33, 66, 100, 133 };
u32 pci_mode;
@@ -1028,8 +1028,8 @@ void t1_free_sw_modules(adapter_t *adapter)
t1_espi_destroy(adapter->espi);
}
-static void __devinit init_link_config(struct link_config *lc,
- const struct board_info *bi)
+static void init_link_config(struct link_config *lc,
+ const struct board_info *bi)
{
lc->supported = bi->caps;
lc->requested_speed = lc->speed = SPEED_INVALID;
@@ -1049,8 +1049,7 @@ static void __devinit init_link_config(struct link_config *lc,
* Allocate and initialize the data structures that hold the SW state of
* the Terminator HW modules.
*/
-int __devinit t1_init_sw_modules(adapter_t *adapter,
- const struct board_info *bi)
+int t1_init_sw_modules(adapter_t *adapter, const struct board_info *bi)
{
unsigned int i;
diff --git a/drivers/net/ethernet/chelsio/cxgb/tp.c b/drivers/net/ethernet/chelsio/cxgb/tp.c
index 8bed4a59e65f..b146acabf982 100644
--- a/drivers/net/ethernet/chelsio/cxgb/tp.c
+++ b/drivers/net/ethernet/chelsio/cxgb/tp.c
@@ -55,7 +55,7 @@ void t1_tp_destroy(struct petp *tp)
kfree(tp);
}
-struct petp *__devinit t1_tp_create(adapter_t * adapter, struct tp_params *p)
+struct petp *t1_tp_create(adapter_t *adapter, struct tp_params *p)
{
struct petp *tp = kzalloc(sizeof(*tp), GFP_KERNEL);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/common.h b/drivers/net/ethernet/chelsio/cxgb3/common.h
index df01b6343241..8c82248ce416 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/common.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/common.h
@@ -42,10 +42,9 @@
#include <linux/mdio.h>
#include "version.h"
-#define CH_ERR(adap, fmt, ...) dev_err(&adap->pdev->dev, fmt, ## __VA_ARGS__)
-#define CH_WARN(adap, fmt, ...) dev_warn(&adap->pdev->dev, fmt, ## __VA_ARGS__)
-#define CH_ALERT(adap, fmt, ...) \
- dev_printk(KERN_ALERT, &adap->pdev->dev, fmt, ## __VA_ARGS__)
+#define CH_ERR(adap, fmt, ...) dev_err(&adap->pdev->dev, fmt, ##__VA_ARGS__)
+#define CH_WARN(adap, fmt, ...) dev_warn(&adap->pdev->dev, fmt, ##__VA_ARGS__)
+#define CH_ALERT(adap, fmt, ...) dev_alert(&adap->pdev->dev, fmt, ##__VA_ARGS__)
/*
* More powerful macro that selectively prints messages based on msg_enable.
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 9c9f3260344a..f15ee326d5c1 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -3078,7 +3078,7 @@ static void set_nqsets(struct adapter *adap)
}
}
-static int __devinit cxgb_enable_msix(struct adapter *adap)
+static int cxgb_enable_msix(struct adapter *adap)
{
struct msix_entry entries[SGE_QSETS + 1];
int vectors;
@@ -3108,8 +3108,7 @@ static int __devinit cxgb_enable_msix(struct adapter *adap)
return err;
}
-static void __devinit print_port_info(struct adapter *adap,
- const struct adapter_info *ai)
+static void print_port_info(struct adapter *adap, const struct adapter_info *ai)
{
static const char *pci_variant[] = {
"PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
@@ -3165,7 +3164,7 @@ static const struct net_device_ops cxgb_netdev_ops = {
#endif
};
-static void __devinit cxgb3_init_iscsi_mac(struct net_device *dev)
+static void cxgb3_init_iscsi_mac(struct net_device *dev)
{
struct port_info *pi = netdev_priv(dev);
@@ -3176,8 +3175,7 @@ static void __devinit cxgb3_init_iscsi_mac(struct net_device *dev)
#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
-static int __devinit init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
static int version_printed;
@@ -3381,7 +3379,7 @@ out:
return err;
}
-static void __devexit remove_one(struct pci_dev *pdev)
+static void remove_one(struct pci_dev *pdev)
{
struct adapter *adapter = pci_get_drvdata(pdev);
@@ -3425,7 +3423,7 @@ static struct pci_driver driver = {
.name = DRV_NAME,
.id_table = cxgb3_pci_tbl,
.probe = init_one,
- .remove = __devexit_p(remove_one),
+ .remove = remove_one,
.err_handler = &t3_err_handler,
};
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
index 2dbbcbb450d3..942dace361d2 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
@@ -1382,7 +1382,7 @@ static inline int adap2type(struct adapter *adapter)
return type;
}
-void __devinit cxgb3_adapter_ofld(struct adapter *adapter)
+void cxgb3_adapter_ofld(struct adapter *adapter)
{
struct t3cdev *tdev = &adapter->tdev;
@@ -1396,7 +1396,7 @@ void __devinit cxgb3_adapter_ofld(struct adapter *adapter)
register_tdev(tdev);
}
-void __devexit cxgb3_adapter_unofld(struct adapter *adapter)
+void cxgb3_adapter_unofld(struct adapter *adapter)
{
struct t3cdev *tdev = &adapter->tdev;
diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
index aef45d3113ba..3dee68612c9e 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
@@ -3307,7 +3307,7 @@ static void config_pcie(struct adapter *adap)
G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
log2_width = fls(adap->params.pci.width) - 1;
acklat = ack_lat[log2_width][pldsize];
- if (val & 1) /* check LOsEnable */
+ if (val & PCI_EXP_LNKCTL_ASPM_L0S) /* check LOsEnable */
acklat += fst_trn_tx * 4;
rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 378988b5709a..6db997c78a5f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -35,6 +35,8 @@
#ifndef __CXGB4_H__
#define __CXGB4_H__
+#include "t4_hw.h"
+
#include <linux/bitops.h>
#include <linux/cache.h>
#include <linux/interrupt.h>
@@ -212,6 +214,8 @@ struct tp_err_stats {
struct tp_params {
unsigned int ntxchan; /* # of Tx channels */
unsigned int tre; /* log2 of core clocks per TP tick */
+ unsigned short tx_modq_map; /* TX modulation scheduler queue to */
+ /* channel map */
uint32_t dack_re; /* DACK timer resolution */
unsigned short tx_modq[NCHAN]; /* channel to modulation queue map */
@@ -526,6 +530,7 @@ struct adapter {
struct net_device *port[MAX_NPORTS];
u8 chan_map[NCHAN]; /* channel -> port map */
+ u32 filter_mode;
unsigned int l2t_start;
unsigned int l2t_end;
struct l2t_data *l2t;
@@ -545,6 +550,129 @@ struct adapter {
spinlock_t stats_lock;
};
+/* Defined bit width of user definable filter tuples
+ */
+#define ETHTYPE_BITWIDTH 16
+#define FRAG_BITWIDTH 1
+#define MACIDX_BITWIDTH 9
+#define FCOE_BITWIDTH 1
+#define IPORT_BITWIDTH 3
+#define MATCHTYPE_BITWIDTH 3
+#define PROTO_BITWIDTH 8
+#define TOS_BITWIDTH 8
+#define PF_BITWIDTH 8
+#define VF_BITWIDTH 8
+#define IVLAN_BITWIDTH 16
+#define OVLAN_BITWIDTH 16
+
+/* Filter matching rules. These consist of a set of ingress packet field
+ * (value, mask) tuples. The associated ingress packet field matches the
+ * tuple when ((field & mask) == value). (Thus a wildcard "don't care" field
+ * rule can be constructed by specifying a tuple of (0, 0).) A filter rule
+ * matches an ingress packet when all of the individual individual field
+ * matching rules are true.
+ *
+ * Partial field masks are always valid, however, while it may be easy to
+ * understand their meanings for some fields (e.g. IP address to match a
+ * subnet), for others making sensible partial masks is less intuitive (e.g.
+ * MPS match type) ...
+ *
+ * Most of the following data structures are modeled on T4 capabilities.
+ * Drivers for earlier chips use the subsets which make sense for those chips.
+ * We really need to come up with a hardware-independent mechanism to
+ * represent hardware filter capabilities ...
+ */
+struct ch_filter_tuple {
+ /* Compressed header matching field rules. The TP_VLAN_PRI_MAP
+ * register selects which of these fields will participate in the
+ * filter match rules -- up to a maximum of 36 bits. Because
+ * TP_VLAN_PRI_MAP is a global register, all filters must use the same
+ * set of fields.
+ */
+ uint32_t ethtype:ETHTYPE_BITWIDTH; /* Ethernet type */
+ uint32_t frag:FRAG_BITWIDTH; /* IP fragmentation header */
+ uint32_t ivlan_vld:1; /* inner VLAN valid */
+ uint32_t ovlan_vld:1; /* outer VLAN valid */
+ uint32_t pfvf_vld:1; /* PF/VF valid */
+ uint32_t macidx:MACIDX_BITWIDTH; /* exact match MAC index */
+ uint32_t fcoe:FCOE_BITWIDTH; /* FCoE packet */
+ uint32_t iport:IPORT_BITWIDTH; /* ingress port */
+ uint32_t matchtype:MATCHTYPE_BITWIDTH; /* MPS match type */
+ uint32_t proto:PROTO_BITWIDTH; /* protocol type */
+ uint32_t tos:TOS_BITWIDTH; /* TOS/Traffic Type */
+ uint32_t pf:PF_BITWIDTH; /* PCI-E PF ID */
+ uint32_t vf:VF_BITWIDTH; /* PCI-E VF ID */
+ uint32_t ivlan:IVLAN_BITWIDTH; /* inner VLAN */
+ uint32_t ovlan:OVLAN_BITWIDTH; /* outer VLAN */
+
+ /* Uncompressed header matching field rules. These are always
+ * available for field rules.
+ */
+ uint8_t lip[16]; /* local IP address (IPv4 in [3:0]) */
+ uint8_t fip[16]; /* foreign IP address (IPv4 in [3:0]) */
+ uint16_t lport; /* local port */
+ uint16_t fport; /* foreign port */
+};
+
+/* A filter ioctl command.
+ */
+struct ch_filter_specification {
+ /* Administrative fields for filter.
+ */
+ uint32_t hitcnts:1; /* count filter hits in TCB */
+ uint32_t prio:1; /* filter has priority over active/server */
+
+ /* Fundamental filter typing. This is the one element of filter
+ * matching that doesn't exist as a (value, mask) tuple.
+ */
+ uint32_t type:1; /* 0 => IPv4, 1 => IPv6 */
+
+ /* Packet dispatch information. Ingress packets which match the
+ * filter rules will be dropped, passed to the host or switched back
+ * out as egress packets.
+ */
+ uint32_t action:2; /* drop, pass, switch */
+
+ uint32_t rpttid:1; /* report TID in RSS hash field */
+
+ uint32_t dirsteer:1; /* 0 => RSS, 1 => steer to iq */
+ uint32_t iq:10; /* ingress queue */
+
+ uint32_t maskhash:1; /* dirsteer=0: store RSS hash in TCB */
+ uint32_t dirsteerhash:1;/* dirsteer=1: 0 => TCB contains RSS hash */
+ /* 1 => TCB contains IQ ID */
+
+ /* Switch proxy/rewrite fields. An ingress packet which matches a
+ * filter with "switch" set will be looped back out as an egress
+ * packet -- potentially with some Ethernet header rewriting.
+ */
+ uint32_t eport:2; /* egress port to switch packet out */
+ uint32_t newdmac:1; /* rewrite destination MAC address */
+ uint32_t newsmac:1; /* rewrite source MAC address */
+ uint32_t newvlan:2; /* rewrite VLAN Tag */
+ uint8_t dmac[ETH_ALEN]; /* new destination MAC address */
+ uint8_t smac[ETH_ALEN]; /* new source MAC address */
+ uint16_t vlan; /* VLAN Tag to insert */
+
+ /* Filter rule value/mask pairs.
+ */
+ struct ch_filter_tuple val;
+ struct ch_filter_tuple mask;
+};
+
+enum {
+ FILTER_PASS = 0, /* default */
+ FILTER_DROP,
+ FILTER_SWITCH
+};
+
+enum {
+ VLAN_NOCHANGE = 0, /* default */
+ VLAN_REMOVE,
+ VLAN_INSERT,
+ VLAN_REWRITE
+};
+
static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr)
{
return readl(adap->regs + reg_addr);
@@ -701,6 +829,12 @@ static inline int t4_wr_mbox_ns(struct adapter *adap, int mbox, const void *cmd,
void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
unsigned int data_reg, const u32 *vals,
unsigned int nregs, unsigned int start_idx);
+void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
+ unsigned int data_reg, u32 *vals, unsigned int nregs,
+ unsigned int start_idx);
+
+struct fw_filter_wr;
+
void t4_intr_enable(struct adapter *adapter);
void t4_intr_disable(struct adapter *adapter);
int t4_slow_intr_handler(struct adapter *adapter);
@@ -737,6 +871,8 @@ void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
const unsigned short *alpha, const unsigned short *beta);
+void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid);
+
void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
const u8 *addr);
int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 0df1284df497..f0718e1a8369 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -175,6 +175,30 @@ enum {
MIN_FL_ENTRIES = 16
};
+/* Host shadow copy of ingress filter entry. This is in host native format
+ * and doesn't match the ordering or bit order, etc. of the hardware of the
+ * firmware command. The use of bit-field structure elements is purely to
+ * remind ourselves of the field size limitations and save memory in the case
+ * where the filter table is large.
+ */
+struct filter_entry {
+ /* Administrative fields for filter.
+ */
+ u32 valid:1; /* filter allocated and valid */
+ u32 locked:1; /* filter is administratively locked */
+
+ u32 pending:1; /* filter action is pending firmware reply */
+ u32 smtidx:8; /* Source MAC Table index for smac */
+ struct l2t_entry *l2t; /* Layer Two Table entry for dmac */
+
+ /* The filter itself. Most of this is a straight copy of information
+ * provided by the extended ioctl(). Some fields are translated to
+ * internal forms -- for instance the Ingress Queue ID passed in from
+ * the ioctl() is translated into the Absolute Ingress Queue ID.
+ */
+ struct ch_filter_specification fs;
+};
+
#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
@@ -325,6 +349,9 @@ enum {
static unsigned int tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
+module_param(tp_vlan_pri_map, uint, 0644);
+MODULE_PARM_DESC(tp_vlan_pri_map, "global compressed filter configuration");
+
static struct dentry *cxgb4_debugfs_root;
static LIST_HEAD(adapter_list);
@@ -506,8 +533,67 @@ static int link_start(struct net_device *dev)
return ret;
}
-/*
- * Response queue handler for the FW event queue.
+/* Clear a filter and release any of its resources that we own. This also
+ * clears the filter's "pending" status.
+ */
+static void clear_filter(struct adapter *adap, struct filter_entry *f)
+{
+ /* If the new or old filter have loopback rewriteing rules then we'll
+ * need to free any existing Layer Two Table (L2T) entries of the old
+ * filter rule. The firmware will handle freeing up any Source MAC
+ * Table (SMT) entries used for rewriting Source MAC Addresses in
+ * loopback rules.
+ */
+ if (f->l2t)
+ cxgb4_l2t_release(f->l2t);
+
+ /* The zeroing of the filter rule below clears the filter valid,
+ * pending, locked flags, l2t pointer, etc. so it's all we need for
+ * this operation.
+ */
+ memset(f, 0, sizeof(*f));
+}
+
+/* Handle a filter write/deletion reply.
+ */
+static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
+{
+ unsigned int idx = GET_TID(rpl);
+ unsigned int nidx = idx - adap->tids.ftid_base;
+ unsigned int ret;
+ struct filter_entry *f;
+
+ if (idx >= adap->tids.ftid_base && nidx <
+ (adap->tids.nftids + adap->tids.nsftids)) {
+ idx = nidx;
+ ret = GET_TCB_COOKIE(rpl->cookie);
+ f = &adap->tids.ftid_tab[idx];
+
+ if (ret == FW_FILTER_WR_FLT_DELETED) {
+ /* Clear the filter when we get confirmation from the
+ * hardware that the filter has been deleted.
+ */
+ clear_filter(adap, f);
+ } else if (ret == FW_FILTER_WR_SMT_TBL_FULL) {
+ dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n",
+ idx);
+ clear_filter(adap, f);
+ } else if (ret == FW_FILTER_WR_FLT_ADDED) {
+ f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff;
+ f->pending = 0; /* asynchronous setup completed */
+ f->valid = 1;
+ } else {
+ /* Something went wrong. Issue a warning about the
+ * problem and clear everything out.
+ */
+ dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
+ idx, ret);
+ clear_filter(adap, f);
+ }
+ }
+}
+
+/* Response queue handler for the FW event queue.
*/
static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
const struct pkt_gl *gl)
@@ -542,6 +628,10 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
const struct cpl_l2t_write_rpl *p = (void *)rsp;
do_l2t_write_rpl(q->adap, p);
+ } else if (opcode == CPL_SET_TCB_RPL) {
+ const struct cpl_set_tcb_rpl *p = (void *)rsp;
+
+ filter_rpl(q->adap, p);
} else
dev_err(q->adap->pdev_dev,
"unexpected CPL %#x on FW event queue\n", opcode);
@@ -983,6 +1073,148 @@ static void t4_free_mem(void *addr)
kfree(addr);
}
+/* Send a Work Request to write the filter at a specified index. We construct
+ * a Firmware Filter Work Request to have the work done and put the indicated
+ * filter into "pending" mode which will prevent any further actions against
+ * it till we get a reply from the firmware on the completion status of the
+ * request.
+ */
+static int set_filter_wr(struct adapter *adapter, int fidx)
+{
+ struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
+ struct sk_buff *skb;
+ struct fw_filter_wr *fwr;
+ unsigned int ftid;
+
+ /* If the new filter requires loopback Destination MAC and/or VLAN
+ * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
+ * the filter.
+ */
+ if (f->fs.newdmac || f->fs.newvlan) {
+ /* allocate L2T entry for new filter */
+ f->l2t = t4_l2t_alloc_switching(adapter->l2t);
+ if (f->l2t == NULL)
+ return -EAGAIN;
+ if (t4_l2t_set_switching(adapter, f->l2t, f->fs.vlan,
+ f->fs.eport, f->fs.dmac)) {
+ cxgb4_l2t_release(f->l2t);
+ f->l2t = NULL;
+ return -ENOMEM;
+ }
+ }
+
+ ftid = adapter->tids.ftid_base + fidx;
+
+ skb = alloc_skb(sizeof(*fwr), GFP_KERNEL | __GFP_NOFAIL);
+ fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr));
+ memset(fwr, 0, sizeof(*fwr));
+
+ /* It would be nice to put most of the following in t4_hw.c but most
+ * of the work is translating the cxgbtool ch_filter_specification
+ * into the Work Request and the definition of that structure is
+ * currently in cxgbtool.h which isn't appropriate to pull into the
+ * common code. We may eventually try to come up with a more neutral
+ * filter specification structure but for now it's easiest to simply
+ * put this fairly direct code in line ...
+ */
+ fwr->op_pkd = htonl(FW_WR_OP(FW_FILTER_WR));
+ fwr->len16_pkd = htonl(FW_WR_LEN16(sizeof(*fwr)/16));
+ fwr->tid_to_iq =
+ htonl(V_FW_FILTER_WR_TID(ftid) |
+ V_FW_FILTER_WR_RQTYPE(f->fs.type) |
+ V_FW_FILTER_WR_NOREPLY(0) |
+ V_FW_FILTER_WR_IQ(f->fs.iq));
+ fwr->del_filter_to_l2tix =
+ htonl(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
+ V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
+ V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
+ V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
+ V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
+ V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
+ V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
+ V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
+ V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
+ f->fs.newvlan == VLAN_REWRITE) |
+ V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
+ f->fs.newvlan == VLAN_REWRITE) |
+ V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
+ V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
+ V_FW_FILTER_WR_PRIO(f->fs.prio) |
+ V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
+ fwr->ethtype = htons(f->fs.val.ethtype);
+ fwr->ethtypem = htons(f->fs.mask.ethtype);
+ fwr->frag_to_ovlan_vldm =
+ (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
+ V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
+ V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.ivlan_vld) |
+ V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.ovlan_vld) |
+ V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.ivlan_vld) |
+ V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.ovlan_vld));
+ fwr->smac_sel = 0;
+ fwr->rx_chan_rx_rpl_iq =
+ htons(V_FW_FILTER_WR_RX_CHAN(0) |
+ V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id));
+ fwr->maci_to_matchtypem =
+ htonl(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
+ V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
+ V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
+ V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
+ V_FW_FILTER_WR_PORT(f->fs.val.iport) |
+ V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
+ V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
+ V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
+ fwr->ptcl = f->fs.val.proto;
+ fwr->ptclm = f->fs.mask.proto;
+ fwr->ttyp = f->fs.val.tos;
+ fwr->ttypm = f->fs.mask.tos;
+ fwr->ivlan = htons(f->fs.val.ivlan);
+ fwr->ivlanm = htons(f->fs.mask.ivlan);
+ fwr->ovlan = htons(f->fs.val.ovlan);
+ fwr->ovlanm = htons(f->fs.mask.ovlan);
+ memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
+ memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
+ memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
+ memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
+ fwr->lp = htons(f->fs.val.lport);
+ fwr->lpm = htons(f->fs.mask.lport);
+ fwr->fp = htons(f->fs.val.fport);
+ fwr->fpm = htons(f->fs.mask.fport);
+ if (f->fs.newsmac)
+ memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma));
+
+ /* Mark the filter as "pending" and ship off the Filter Work Request.
+ * When we get the Work Request Reply we'll clear the pending status.
+ */
+ f->pending = 1;
+ set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
+ t4_ofld_send(adapter, skb);
+ return 0;
+}
+
+/* Delete the filter at a specified index.
+ */
+static int del_filter_wr(struct adapter *adapter, int fidx)
+{
+ struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
+ struct sk_buff *skb;
+ struct fw_filter_wr *fwr;
+ unsigned int len, ftid;
+
+ len = sizeof(*fwr);
+ ftid = adapter->tids.ftid_base + fidx;
+
+ skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
+ fwr = (struct fw_filter_wr *)__skb_put(skb, len);
+ t4_mk_filtdelwr(ftid, fwr, adapter->sge.fw_evtq.abs_id);
+
+ /* Mark the filter as "pending" and ship off the Filter Work Request.
+ * When we get the Work Request Reply we'll clear the pending status.
+ */
+ f->pending = 1;
+ t4_mgmt_tx(adapter, skb);
+ return 0;
+}
+
static inline int is_offload(const struct adapter *adap)
{
return adap->params.offload;
@@ -2148,8 +2380,8 @@ static const struct file_operations mem_debugfs_fops = {
.llseek = default_llseek,
};
-static void __devinit add_debugfs_mem(struct adapter *adap, const char *name,
- unsigned int idx, unsigned int size_mb)
+static void add_debugfs_mem(struct adapter *adap, const char *name,
+ unsigned int idx, unsigned int size_mb)
{
struct dentry *de;
@@ -2159,7 +2391,7 @@ static void __devinit add_debugfs_mem(struct adapter *adap, const char *name,
de->d_inode->i_size = size_mb << 20;
}
-static int __devinit setup_debugfs(struct adapter *adap)
+static int setup_debugfs(struct adapter *adap)
{
int i;
@@ -2195,7 +2427,7 @@ int cxgb4_alloc_atid(struct tid_info *t, void *data)
if (t->afree) {
union aopen_entry *p = t->afree;
- atid = p - t->atid_tab;
+ atid = (p - t->atid_tab) + t->atid_base;
t->afree = p->next;
p->data = data;
t->atids_in_use++;
@@ -2210,7 +2442,7 @@ EXPORT_SYMBOL(cxgb4_alloc_atid);
*/
void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
{
- union aopen_entry *p = &t->atid_tab[atid];
+ union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
spin_lock_bh(&t->atid_lock);
p->next = t->afree;
@@ -2249,8 +2481,34 @@ int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
}
EXPORT_SYMBOL(cxgb4_alloc_stid);
-/*
- * Release a server TID.
+/* Allocate a server filter TID and set it to the supplied value.
+ */
+int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
+{
+ int stid;
+
+ spin_lock_bh(&t->stid_lock);
+ if (family == PF_INET) {
+ stid = find_next_zero_bit(t->stid_bmap,
+ t->nstids + t->nsftids, t->nstids);
+ if (stid < (t->nstids + t->nsftids))
+ __set_bit(stid, t->stid_bmap);
+ else
+ stid = -1;
+ } else {
+ stid = -1;
+ }
+ if (stid >= 0) {
+ t->stid_tab[stid].data = data;
+ stid += t->stid_base;
+ t->stids_in_use++;
+ }
+ spin_unlock_bh(&t->stid_lock);
+ return stid;
+}
+EXPORT_SYMBOL(cxgb4_alloc_sftid);
+
+/* Release a server TID.
*/
void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
{
@@ -2362,18 +2620,26 @@ EXPORT_SYMBOL(cxgb4_remove_tid);
static int tid_init(struct tid_info *t)
{
size_t size;
+ unsigned int stid_bmap_size;
unsigned int natids = t->natids;
- size = t->ntids * sizeof(*t->tid_tab) + natids * sizeof(*t->atid_tab) +
+ stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
+ size = t->ntids * sizeof(*t->tid_tab) +
+ natids * sizeof(*t->atid_tab) +
t->nstids * sizeof(*t->stid_tab) +
- BITS_TO_LONGS(t->nstids) * sizeof(long);
+ t->nsftids * sizeof(*t->stid_tab) +
+ stid_bmap_size * sizeof(long) +
+ t->nftids * sizeof(*t->ftid_tab) +
+ t->nsftids * sizeof(*t->ftid_tab);
+
t->tid_tab = t4_alloc_mem(size);
if (!t->tid_tab)
return -ENOMEM;
t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
- t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids];
+ t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
+ t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
spin_lock_init(&t->stid_lock);
spin_lock_init(&t->atid_lock);
@@ -2388,7 +2654,7 @@ static int tid_init(struct tid_info *t)
t->atid_tab[natids - 1].next = &t->atid_tab[natids];
t->afree = t->atid_tab;
}
- bitmap_zero(t->stid_bmap, t->nstids);
+ bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
return 0;
}
@@ -2404,7 +2670,8 @@ static int tid_init(struct tid_info *t)
* Returns <0 on error and one of the %NET_XMIT_* values on success.
*/
int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
- __be32 sip, __be16 sport, unsigned int queue)
+ __be32 sip, __be16 sport, __be16 vlan,
+ unsigned int queue)
{
unsigned int chan;
struct sk_buff *skb;
@@ -2750,6 +3017,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
{
void *handle;
struct cxgb4_lld_info lli;
+ unsigned short i;
lli.pdev = adap->pdev;
lli.l2t = adap->l2t;
@@ -2776,10 +3044,16 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >>
(adap->fn * 4));
+ lli.filt_mode = adap->filter_mode;
+ /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
+ for (i = 0; i < NCHAN; i++)
+ lli.tx_modq[i] = i;
lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
lli.fw_vers = adap->params.fw_vers;
lli.dbfifo_int_thresh = dbfifo_int_thresh;
+ lli.sge_pktshift = adap->sge.pktshift;
+ lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
handle = ulds[uld].add(&lli);
if (IS_ERR(handle)) {
@@ -2999,6 +3273,126 @@ static int cxgb_close(struct net_device *dev)
return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false);
}
+/* Return an error number if the indicated filter isn't writable ...
+ */
+static int writable_filter(struct filter_entry *f)
+{
+ if (f->locked)
+ return -EPERM;
+ if (f->pending)
+ return -EBUSY;
+
+ return 0;
+}
+
+/* Delete the filter at the specified index (if valid). The checks for all
+ * the common problems with doing this like the filter being locked, currently
+ * pending in another operation, etc.
+ */
+static int delete_filter(struct adapter *adapter, unsigned int fidx)
+{
+ struct filter_entry *f;
+ int ret;
+
+ if (fidx >= adapter->tids.nftids + adapter->tids.nsftids)
+ return -EINVAL;
+
+ f = &adapter->tids.ftid_tab[fidx];
+ ret = writable_filter(f);
+ if (ret)
+ return ret;
+ if (f->valid)
+ return del_filter_wr(adapter, fidx);
+
+ return 0;
+}
+
+int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
+ __be32 sip, __be16 sport, __be16 vlan,
+ unsigned int queue, unsigned char port, unsigned char mask)
+{
+ int ret;
+ struct filter_entry *f;
+ struct adapter *adap;
+ int i;
+ u8 *val;
+
+ adap = netdev2adap(dev);
+
+ /* Adjust stid to correct filter index */
+ stid -= adap->tids.nstids;
+ stid += adap->tids.nftids;
+
+ /* Check to make sure the filter requested is writable ...
+ */
+ f = &adap->tids.ftid_tab[stid];
+ ret = writable_filter(f);
+ if (ret)
+ return ret;
+
+ /* Clear out any old resources being used by the filter before
+ * we start constructing the new filter.
+ */
+ if (f->valid)
+ clear_filter(adap, f);
+
+ /* Clear out filter specifications */
+ memset(&f->fs, 0, sizeof(struct ch_filter_specification));
+ f->fs.val.lport = cpu_to_be16(sport);
+ f->fs.mask.lport = ~0;
+ val = (u8 *)&sip;
+ if ((val[0] | val[1] | val[2] | val[3]) != 0) {
+ for (i = 0; i < 4; i++) {
+ f->fs.val.lip[i] = val[i];
+ f->fs.mask.lip[i] = ~0;
+ }
+ if (adap->filter_mode & F_PORT) {
+ f->fs.val.iport = port;
+ f->fs.mask.iport = mask;
+ }
+ }
+
+ f->fs.dirsteer = 1;
+ f->fs.iq = queue;
+ /* Mark filter as locked */
+ f->locked = 1;
+ f->fs.rpttid = 1;
+
+ ret = set_filter_wr(adap, stid);
+ if (ret) {
+ clear_filter(adap, f);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(cxgb4_create_server_filter);
+
+int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
+ unsigned int queue, bool ipv6)
+{
+ int ret;
+ struct filter_entry *f;
+ struct adapter *adap;
+
+ adap = netdev2adap(dev);
+
+ /* Adjust stid to correct filter index */
+ stid -= adap->tids.nstids;
+ stid += adap->tids.nftids;
+
+ f = &adap->tids.ftid_tab[stid];
+ /* Unlock the filter */
+ f->locked = 0;
+
+ ret = delete_filter(adap, stid);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(cxgb4_remove_server_filter);
+
static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
struct rtnl_link_stats64 *ns)
{
@@ -3203,7 +3597,7 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
memset(c, 0, sizeof(*c));
c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
FW_CMD_REQUEST | FW_CMD_READ);
- c->retval_len16 = htonl(FW_LEN16(*c));
+ c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c);
if (ret < 0)
return ret;
@@ -3245,6 +3639,34 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
v = t4_read_reg(adap, TP_PIO_DATA);
t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
+ /* first 4 Tx modulation queues point to consecutive Tx channels */
+ adap->params.tp.tx_modq_map = 0xE4;
+ t4_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
+ V_TX_MOD_QUEUE_REQ_MAP(adap->params.tp.tx_modq_map));
+
+ /* associate each Tx modulation queue with consecutive Tx channels */
+ v = 0x84218421;
+ t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
+ &v, 1, A_TP_TX_SCHED_HDR);
+ t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
+ &v, 1, A_TP_TX_SCHED_FIFO);
+ t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
+ &v, 1, A_TP_TX_SCHED_PCMD);
+
+#define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
+ if (is_offload(adap)) {
+ t4_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0,
+ V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
+ V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
+ V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
+ V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
+ t4_write_reg(adap, A_TP_TX_MOD_CHANNEL_WEIGHT,
+ V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
+ V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
+ V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
+ V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
+ }
+
/* get basic stuff going */
return t4_early_init(adap, adap->fn);
}
@@ -3397,7 +3819,7 @@ static int adap_init0_config(struct adapter *adapter, int reset)
htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
FW_CMD_REQUEST |
FW_CMD_READ);
- caps_cmd.retval_len16 =
+ caps_cmd.cfvalid_to_len16 =
htonl(FW_CAPS_CONFIG_CMD_CFVALID |
FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
@@ -3422,7 +3844,7 @@ static int adap_init0_config(struct adapter *adapter, int reset)
htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
FW_CMD_REQUEST |
FW_CMD_WRITE);
- caps_cmd.retval_len16 = htonl(FW_LEN16(caps_cmd));
+ caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
NULL);
if (ret < 0)
@@ -3497,7 +3919,7 @@ static int adap_init0_no_config(struct adapter *adapter, int reset)
memset(&caps_cmd, 0, sizeof(caps_cmd));
caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
FW_CMD_REQUEST | FW_CMD_READ);
- caps_cmd.retval_len16 = htonl(FW_LEN16(caps_cmd));
+ caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
&caps_cmd);
if (ret < 0)
@@ -3929,7 +4351,7 @@ static int adap_init0(struct adapter *adap)
memset(&caps_cmd, 0, sizeof(caps_cmd));
caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
FW_CMD_REQUEST | FW_CMD_READ);
- caps_cmd.retval_len16 = htonl(FW_LEN16(caps_cmd));
+ caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
&caps_cmd);
if (ret < 0)
@@ -4035,6 +4457,10 @@ static int adap_init0(struct adapter *adap)
for (j = 0; j < NCHAN; j++)
adap->params.tp.tx_modq[j] = j;
+ t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
+ &adap->filter_mode, 1,
+ TP_VLAN_PRI_MAP);
+
adap->flags |= FW_OK;
return 0;
@@ -4173,7 +4599,7 @@ static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx,
* of ports we found and the number of available CPUs. Most settings can be
* modified by the admin prior to actual use.
*/
-static void __devinit cfg_queues(struct adapter *adap)
+static void cfg_queues(struct adapter *adap)
{
struct sge *s = &adap->sge;
int i, q10g = 0, n10g = 0, qidx = 0;
@@ -4257,7 +4683,7 @@ static void __devinit cfg_queues(struct adapter *adap)
* Reduce the number of Ethernet queues across all ports to at most n.
* n provides at least one queue per port.
*/
-static void __devinit reduce_ethqs(struct adapter *adap, int n)
+static void reduce_ethqs(struct adapter *adap, int n)
{
int i;
struct port_info *pi;
@@ -4284,7 +4710,7 @@ static void __devinit reduce_ethqs(struct adapter *adap, int n)
/* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
#define EXTRA_VECS 2
-static int __devinit enable_msix(struct adapter *adap)
+static int enable_msix(struct adapter *adap)
{
int ofld_need = 0;
int i, err, want, need;
@@ -4333,7 +4759,7 @@ static int __devinit enable_msix(struct adapter *adap)
#undef EXTRA_VECS
-static int __devinit init_rss(struct adapter *adap)
+static int init_rss(struct adapter *adap)
{
unsigned int i, j;
@@ -4349,7 +4775,7 @@ static int __devinit init_rss(struct adapter *adap)
return 0;
}
-static void __devinit print_port_info(const struct net_device *dev)
+static void print_port_info(const struct net_device *dev)
{
static const char *base[] = {
"R XFI", "R XAUI", "T SGMII", "T XFI", "T XAUI", "KX4", "CX4",
@@ -4386,7 +4812,7 @@ static void __devinit print_port_info(const struct net_device *dev)
adap->params.vpd.sn, adap->params.vpd.ec);
}
-static void __devinit enable_pcie_relaxed_ordering(struct pci_dev *dev)
+static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
{
pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
}
@@ -4419,8 +4845,7 @@ static void free_some_resources(struct adapter *adapter)
#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
-static int __devinit init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int func, i, err;
struct port_info *pi;
@@ -4640,7 +5065,7 @@ sriov:
return err;
}
-static void __devexit remove_one(struct pci_dev *pdev)
+static void remove_one(struct pci_dev *pdev)
{
struct adapter *adapter = pci_get_drvdata(pdev);
@@ -4662,6 +5087,17 @@ static void __devexit remove_one(struct pci_dev *pdev)
if (adapter->debugfs_root)
debugfs_remove_recursive(adapter->debugfs_root);
+ /* If we allocated filters, free up state associated with any
+ * valid filters ...
+ */
+ if (adapter->tids.ftid_tab) {
+ struct filter_entry *f = &adapter->tids.ftid_tab[0];
+ for (i = 0; i < (adapter->tids.nftids +
+ adapter->tids.nsftids); i++, f++)
+ if (f->valid)
+ clear_filter(adapter, f);
+ }
+
if (adapter->flags & FULL_INIT_DONE)
cxgb_down(adapter);
@@ -4680,7 +5116,7 @@ static struct pci_driver cxgb4_driver = {
.name = KBUILD_MODNAME,
.id_table = cxgb4_pci_tbl,
.probe = init_one,
- .remove = __devexit_p(remove_one),
+ .remove = remove_one,
.err_handler = &cxgb4_eeh,
};
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index 39bec73ff87c..e2bbc7f3e2de 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -38,6 +38,7 @@
#include <linux/cache.h>
#include <linux/spinlock.h>
#include <linux/skbuff.h>
+#include <linux/inetdevice.h>
#include <linux/atomic.h>
/* CPL message priority levels */
@@ -97,7 +98,9 @@ struct tid_info {
union aopen_entry *atid_tab;
unsigned int natids;
+ unsigned int atid_base;
+ struct filter_entry *ftid_tab;
unsigned int nftids;
unsigned int ftid_base;
unsigned int aftid_base;
@@ -129,7 +132,7 @@ static inline void *lookup_atid(const struct tid_info *t, unsigned int atid)
static inline void *lookup_stid(const struct tid_info *t, unsigned int stid)
{
stid -= t->stid_base;
- return stid < t->nstids ? t->stid_tab[stid].data : NULL;
+ return stid < (t->nstids + t->nsftids) ? t->stid_tab[stid].data : NULL;
}
static inline void cxgb4_insert_tid(struct tid_info *t, void *data,
@@ -141,6 +144,7 @@ static inline void cxgb4_insert_tid(struct tid_info *t, void *data,
int cxgb4_alloc_atid(struct tid_info *t, void *data);
int cxgb4_alloc_stid(struct tid_info *t, int family, void *data);
+int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data);
void cxgb4_free_atid(struct tid_info *t, unsigned int atid);
void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family);
void cxgb4_remove_tid(struct tid_info *t, unsigned int qid, unsigned int tid);
@@ -148,8 +152,14 @@ void cxgb4_remove_tid(struct tid_info *t, unsigned int qid, unsigned int tid);
struct in6_addr;
int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
- __be32 sip, __be16 sport, unsigned int queue);
-
+ __be32 sip, __be16 sport, __be16 vlan,
+ unsigned int queue);
+int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
+ __be32 sip, __be16 sport, __be16 vlan,
+ unsigned int queue,
+ unsigned char port, unsigned char mask);
+int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
+ unsigned int queue, bool ipv6);
static inline void set_wr_txq(struct sk_buff *skb, int prio, int queue)
{
skb_set_queue_mapping(skb, (queue << 1) | prio);
@@ -221,9 +231,16 @@ struct cxgb4_lld_info {
unsigned int iscsi_iolen; /* iSCSI max I/O length */
unsigned short udb_density; /* # of user DB/page */
unsigned short ucq_density; /* # of user CQs/page */
+ unsigned short filt_mode; /* filter optional components */
+ unsigned short tx_modq[NCHAN]; /* maps each tx channel to a */
+ /* scheduler queue */
void __iomem *gts_reg; /* address of GTS register */
void __iomem *db_reg; /* address of kernel doorbell */
int dbfifo_int_thresh; /* doorbell fifo int threshold */
+ unsigned int sge_pktshift; /* Padding between CPL and */
+ /* packet data */
+ bool enable_fw_ofld_conn; /* Enable connection through fw */
+ /* WR */
};
struct cxgb4_uld_info {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index 6ac77a62f361..29878098101e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -484,6 +484,38 @@ void t4_l2t_update(struct adapter *adap, struct neighbour *neigh)
handle_failed_resolution(adap, arpq);
}
+/* Allocate an L2T entry for use by a switching rule. Such need to be
+ * explicitly freed and while busy they are not on any hash chain, so normal
+ * address resolution updates do not see them.
+ */
+struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d)
+{
+ struct l2t_entry *e;
+
+ write_lock_bh(&d->lock);
+ e = alloc_l2e(d);
+ if (e) {
+ spin_lock(&e->lock); /* avoid race with t4_l2t_free */
+ e->state = L2T_STATE_SWITCHING;
+ atomic_set(&e->refcnt, 1);
+ spin_unlock(&e->lock);
+ }
+ write_unlock_bh(&d->lock);
+ return e;
+}
+
+/* Sets/updates the contents of a switching L2T entry that has been allocated
+ * with an earlier call to @t4_l2t_alloc_switching.
+ */
+int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan,
+ u8 port, u8 *eth_addr)
+{
+ e->vlan = vlan;
+ e->lport = port;
+ memcpy(e->dmac, eth_addr, ETH_ALEN);
+ return write_l2e(adap, e, 0);
+}
+
struct l2t_data *t4_init_l2t(void)
{
int i;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.h b/drivers/net/ethernet/chelsio/cxgb4/l2t.h
index 02b31d0c6410..108c0f1fce1c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.h
@@ -100,6 +100,9 @@ struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh,
unsigned int priority);
void t4_l2t_update(struct adapter *adap, struct neighbour *neigh);
+struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d);
+int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan,
+ u8 port, u8 *eth_addr);
struct l2t_data *t4_init_l2t(void);
void do_l2t_write_rpl(struct adapter *p, const struct cpl_l2t_write_rpl *rpl);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 3ecc087d732d..fe9a2ea3588b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -508,7 +508,7 @@ static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
{
if (q->pend_cred >= 8) {
wmb();
- t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), DBPRIO |
+ t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), DBPRIO(1) |
QID(q->cntxt_id) | PIDX(q->pend_cred / 8));
q->pend_cred &= 7;
}
@@ -2082,10 +2082,10 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
goto fl_nomem;
flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc);
- c.iqns_to_fl0congen = htonl(FW_IQ_CMD_FL0PACKEN |
+ c.iqns_to_fl0congen = htonl(FW_IQ_CMD_FL0PACKEN(1) |
FW_IQ_CMD_FL0FETCHRO(1) |
FW_IQ_CMD_FL0DATARO(1) |
- FW_IQ_CMD_FL0PADEN);
+ FW_IQ_CMD_FL0PADEN(1));
c.fl0dcaen_to_fl0cidxfthresh = htons(FW_IQ_CMD_FL0FBMIN(2) |
FW_IQ_CMD_FL0FBMAX(3));
c.fl0size = htons(flsz);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 730ae2cfa49e..22f3af5166bf 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -109,7 +109,7 @@ void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
* Reads registers that are accessed indirectly through an address/data
* register pair.
*/
-static void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
+void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
unsigned int data_reg, u32 *vals,
unsigned int nregs, unsigned int start_idx)
{
@@ -648,12 +648,12 @@ static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
if (!byte_cnt || byte_cnt > 4)
return -EINVAL;
- if (t4_read_reg(adapter, SF_OP) & BUSY)
+ if (t4_read_reg(adapter, SF_OP) & SF_BUSY)
return -EBUSY;
cont = cont ? SF_CONT : 0;
lock = lock ? SF_LOCK : 0;
t4_write_reg(adapter, SF_OP, lock | cont | BYTECNT(byte_cnt - 1));
- ret = t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5);
+ ret = t4_wait_op_done(adapter, SF_OP, SF_BUSY, 0, SF_ATTEMPTS, 5);
if (!ret)
*valp = t4_read_reg(adapter, SF_DATA);
return ret;
@@ -676,14 +676,14 @@ static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
{
if (!byte_cnt || byte_cnt > 4)
return -EINVAL;
- if (t4_read_reg(adapter, SF_OP) & BUSY)
+ if (t4_read_reg(adapter, SF_OP) & SF_BUSY)
return -EBUSY;
cont = cont ? SF_CONT : 0;
lock = lock ? SF_LOCK : 0;
t4_write_reg(adapter, SF_DATA, val);
t4_write_reg(adapter, SF_OP, lock |
cont | BYTECNT(byte_cnt - 1) | OP_WR);
- return t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5);
+ return t4_wait_op_done(adapter, SF_OP, SF_BUSY, 0, SF_ATTEMPTS, 5);
}
/**
@@ -2003,7 +2003,7 @@ void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
*
* Initialize the congestion control parameters.
*/
-static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
+static void init_cong_ctrl(unsigned short *a, unsigned short *b)
{
a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
a[9] = 2;
@@ -2252,14 +2252,14 @@ int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
t4_write_reg(adap, EPIO_REG(DATA0), mask0);
t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i) | EPIOWR);
t4_read_reg(adap, EPIO_REG(OP)); /* flush */
- if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY)
+ if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY)
return -ETIMEDOUT;
/* write CRC */
t4_write_reg(adap, EPIO_REG(DATA0), crc);
t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i + 32) | EPIOWR);
t4_read_reg(adap, EPIO_REG(OP)); /* flush */
- if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY)
+ if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY)
return -ETIMEDOUT;
}
#undef EPIO_REG
@@ -2268,6 +2268,26 @@ int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
return 0;
}
+/* t4_mk_filtdelwr - create a delete filter WR
+ * @ftid: the filter ID
+ * @wr: the filter work request to populate
+ * @qid: ingress queue to receive the delete notification
+ *
+ * Creates a filter work request to delete the supplied filter. If @qid is
+ * negative the delete notification is suppressed.
+ */
+void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
+{
+ memset(wr, 0, sizeof(*wr));
+ wr->op_pkd = htonl(FW_WR_OP(FW_FILTER_WR));
+ wr->len16_pkd = htonl(FW_WR_LEN16(sizeof(*wr) / 16));
+ wr->tid_to_iq = htonl(V_FW_FILTER_WR_TID(ftid) |
+ V_FW_FILTER_WR_NOREPLY(qid < 0));
+ wr->del_filter_to_l2tix = htonl(F_FW_FILTER_WR_DEL_FILTER);
+ if (qid >= 0)
+ wr->rx_chan_rx_rpl_iq = htons(V_FW_FILTER_WR_RX_RPL_IQ(qid));
+}
+
#define INIT_CMD(var, cmd, rd_wr) do { \
(var).op_to_write = htonl(FW_CMD_OP(FW_##cmd##_CMD) | \
FW_CMD_REQUEST | FW_CMD_##rd_wr); \
@@ -2405,7 +2425,7 @@ int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
retry:
memset(&c, 0, sizeof(c));
INIT_CMD(c, HELLO, WRITE);
- c.err_to_mbasyncnot = htonl(
+ c.err_to_clearinit = htonl(
FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox :
@@ -2426,7 +2446,7 @@ retry:
return ret;
}
- v = ntohl(c.err_to_mbasyncnot);
+ v = ntohl(c.err_to_clearinit);
master_mbox = FW_HELLO_CMD_MBMASTER_GET(v);
if (state) {
if (v & FW_HELLO_CMD_ERR)
@@ -2774,7 +2794,7 @@ int t4_fw_config_file(struct adapter *adap, unsigned int mbox,
htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
FW_CMD_REQUEST |
FW_CMD_READ);
- caps_cmd.retval_len16 =
+ caps_cmd.cfvalid_to_len16 =
htonl(FW_CAPS_CONFIG_CMD_CFVALID |
FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
@@ -2797,7 +2817,7 @@ int t4_fw_config_file(struct adapter *adap, unsigned int mbox,
htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
FW_CMD_REQUEST |
FW_CMD_WRITE);
- caps_cmd.retval_len16 = htonl(FW_LEN16(caps_cmd));
+ caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
return t4_wr_mbox(adap, mbox, &caps_cmd, sizeof(caps_cmd), NULL);
}
@@ -3440,8 +3460,7 @@ int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
return 0;
}
-static void __devinit get_pci_mode(struct adapter *adapter,
- struct pci_params *p)
+static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
{
u16 val;
@@ -3460,8 +3479,7 @@ static void __devinit get_pci_mode(struct adapter *adapter,
* Initializes the SW state maintained for each link, including the link's
* capabilities and default speed/flow-control/autonegotiation settings.
*/
-static void __devinit init_link_config(struct link_config *lc,
- unsigned int caps)
+static void init_link_config(struct link_config *lc, unsigned int caps)
{
lc->supported = caps;
lc->requested_speed = 0;
@@ -3485,7 +3503,7 @@ int t4_wait_dev_ready(struct adapter *adap)
return t4_read_reg(adap, PL_WHOAMI) != 0xffffffff ? 0 : -EIO;
}
-static int __devinit get_flash_params(struct adapter *adap)
+static int get_flash_params(struct adapter *adap)
{
int ret;
u32 info;
@@ -3521,7 +3539,7 @@ static int __devinit get_flash_params(struct adapter *adap)
* values for some adapter tunables, take PHYs out of reset, and
* initialize the MDIO interface.
*/
-int __devinit t4_prep_adapter(struct adapter *adapter)
+int t4_prep_adapter(struct adapter *adapter)
{
int ret;
@@ -3549,7 +3567,7 @@ int __devinit t4_prep_adapter(struct adapter *adapter)
return 0;
}
-int __devinit t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
+int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
{
u8 addr[6];
int ret, i, j = 0;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index eb71b8250b91..261d17703adc 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -193,8 +193,24 @@ struct work_request_hdr {
__be64 wr_lo;
};
+/* wr_hi fields */
+#define S_WR_OP 24
+#define V_WR_OP(x) ((__u64)(x) << S_WR_OP)
+
#define WR_HDR struct work_request_hdr wr
+/* option 0 fields */
+#define S_MSS_IDX 60
+#define M_MSS_IDX 0xF
+#define V_MSS_IDX(x) ((__u64)(x) << S_MSS_IDX)
+#define G_MSS_IDX(x) (((x) >> S_MSS_IDX) & M_MSS_IDX)
+
+/* option 2 fields */
+#define S_RSS_QUEUE 0
+#define M_RSS_QUEUE 0x3FF
+#define V_RSS_QUEUE(x) ((x) << S_RSS_QUEUE)
+#define G_RSS_QUEUE(x) (((x) >> S_RSS_QUEUE) & M_RSS_QUEUE)
+
struct cpl_pass_open_req {
WR_HDR;
union opcode_tid ot;
@@ -204,12 +220,14 @@ struct cpl_pass_open_req {
__be32 peer_ip;
__be64 opt0;
#define TX_CHAN(x) ((x) << 2)
+#define NO_CONG(x) ((x) << 4)
#define DELACK(x) ((x) << 5)
#define ULP_MODE(x) ((x) << 8)
#define RCV_BUFSIZ(x) ((x) << 12)
#define DSCP(x) ((x) << 22)
#define SMAC_SEL(x) ((u64)(x) << 28)
#define L2T_IDX(x) ((u64)(x) << 36)
+#define TCAM_BYPASS(x) ((u64)(x) << 48)
#define NAGLE(x) ((u64)(x) << 49)
#define WND_SCALE(x) ((u64)(x) << 50)
#define KEEP_ALIVE(x) ((u64)(x) << 54)
@@ -247,8 +265,10 @@ struct cpl_pass_accept_rpl {
#define RSS_QUEUE_VALID (1 << 10)
#define RX_COALESCE_VALID(x) ((x) << 11)
#define RX_COALESCE(x) ((x) << 12)
+#define PACE(x) ((x) << 16)
#define TX_QUEUE(x) ((x) << 23)
#define RX_CHANNEL(x) ((x) << 26)
+#define CCTRL_ECN(x) ((x) << 27)
#define WND_SCALE_EN(x) ((x) << 28)
#define TSTAMPS_EN(x) ((x) << 29)
#define SACK_EN(x) ((x) << 30)
@@ -292,6 +312,9 @@ struct cpl_pass_establish {
union opcode_tid ot;
__be32 rsvd;
__be32 tos_stid;
+#define PASS_OPEN_TID(x) ((x) << 0)
+#define PASS_OPEN_TOS(x) ((x) << 24)
+#define GET_PASS_OPEN_TID(x) (((x) >> 0) & 0xFFFFFF)
#define GET_POPEN_TID(x) ((x) & 0xffffff)
#define GET_POPEN_TOS(x) (((x) >> 24) & 0xff)
__be16 mac_idx;
@@ -332,6 +355,7 @@ struct cpl_set_tcb_field {
__be16 word_cookie;
#define TCB_WORD(x) ((x) << 0)
#define TCB_COOKIE(x) ((x) << 5)
+#define GET_TCB_COOKIE(x) (((x) >> 5) & 7)
__be64 mask;
__be64 val;
};
@@ -536,6 +560,37 @@ struct cpl_rx_pkt {
__be16 err_vec;
};
+/* rx_pkt.l2info fields */
+#define S_RX_ETHHDR_LEN 0
+#define M_RX_ETHHDR_LEN 0x1F
+#define V_RX_ETHHDR_LEN(x) ((x) << S_RX_ETHHDR_LEN)
+#define G_RX_ETHHDR_LEN(x) (((x) >> S_RX_ETHHDR_LEN) & M_RX_ETHHDR_LEN)
+
+#define S_RX_MACIDX 8
+#define M_RX_MACIDX 0x1FF
+#define V_RX_MACIDX(x) ((x) << S_RX_MACIDX)
+#define G_RX_MACIDX(x) (((x) >> S_RX_MACIDX) & M_RX_MACIDX)
+
+#define S_RXF_SYN 21
+#define V_RXF_SYN(x) ((x) << S_RXF_SYN)
+#define F_RXF_SYN V_RXF_SYN(1U)
+
+#define S_RX_CHAN 28
+#define M_RX_CHAN 0xF
+#define V_RX_CHAN(x) ((x) << S_RX_CHAN)
+#define G_RX_CHAN(x) (((x) >> S_RX_CHAN) & M_RX_CHAN)
+
+/* rx_pkt.hdr_len fields */
+#define S_RX_TCPHDR_LEN 0
+#define M_RX_TCPHDR_LEN 0x3F
+#define V_RX_TCPHDR_LEN(x) ((x) << S_RX_TCPHDR_LEN)
+#define G_RX_TCPHDR_LEN(x) (((x) >> S_RX_TCPHDR_LEN) & M_RX_TCPHDR_LEN)
+
+#define S_RX_IPHDR_LEN 6
+#define M_RX_IPHDR_LEN 0x3FF
+#define V_RX_IPHDR_LEN(x) ((x) << S_RX_IPHDR_LEN)
+#define G_RX_IPHDR_LEN(x) (((x) >> S_RX_IPHDR_LEN) & M_RX_IPHDR_LEN)
+
struct cpl_trace_pkt {
u8 opcode;
u8 intf;
@@ -634,6 +689,17 @@ struct cpl_fw6_msg {
/* cpl_fw6_msg.type values */
enum {
FW6_TYPE_CMD_RPL = 0,
+ FW6_TYPE_WR_RPL = 1,
+ FW6_TYPE_CQE = 2,
+ FW6_TYPE_OFLD_CONNECTION_WR_RPL = 3,
+};
+
+struct cpl_fw6_msg_ofld_connection_wr_rpl {
+ __u64 cookie;
+ __be32 tid; /* or atid in case of active failure */
+ __u8 t_state;
+ __u8 retval;
+ __u8 rsvd[2];
};
enum {
@@ -658,6 +724,7 @@ struct ulptx_sgl {
__be32 cmd_nsge;
#define ULPTX_CMD(x) ((x) << 24)
#define ULPTX_NSGE(x) ((x) << 0)
+#define ULPTX_MORE (1U << 23)
__be32 len0;
__be64 addr0;
struct ulptx_sge_pair sge[0];
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index a1a8b57200f6..83ec5f7844ac 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -67,7 +67,7 @@
#define QID_MASK 0xffff8000U
#define QID_SHIFT 15
#define QID(x) ((x) << QID_SHIFT)
-#define DBPRIO 0x00004000U
+#define DBPRIO(x) ((x) << 14)
#define PIDX_MASK 0x00003fffU
#define PIDX_SHIFT 0
#define PIDX(x) ((x) << PIDX_SHIFT)
@@ -193,6 +193,12 @@
#define SGE_FL_BUFFER_SIZE1 0x1048
#define SGE_FL_BUFFER_SIZE2 0x104c
#define SGE_FL_BUFFER_SIZE3 0x1050
+#define SGE_FL_BUFFER_SIZE4 0x1054
+#define SGE_FL_BUFFER_SIZE5 0x1058
+#define SGE_FL_BUFFER_SIZE6 0x105c
+#define SGE_FL_BUFFER_SIZE7 0x1060
+#define SGE_FL_BUFFER_SIZE8 0x1064
+
#define SGE_INGRESS_RX_THRESHOLD 0x10a0
#define THRESHOLD_0_MASK 0x3f000000U
#define THRESHOLD_0_SHIFT 24
@@ -217,6 +223,17 @@
#define EGRTHRESHOLD(x) ((x) << EGRTHRESHOLDshift)
#define EGRTHRESHOLD_GET(x) (((x) & EGRTHRESHOLD_MASK) >> EGRTHRESHOLDshift)
+#define SGE_DBFIFO_STATUS 0x10a4
+#define HP_INT_THRESH_SHIFT 28
+#define HP_INT_THRESH_MASK 0xfU
+#define HP_INT_THRESH(x) ((x) << HP_INT_THRESH_SHIFT)
+#define LP_INT_THRESH_SHIFT 12
+#define LP_INT_THRESH_MASK 0xfU
+#define LP_INT_THRESH(x) ((x) << LP_INT_THRESH_SHIFT)
+
+#define SGE_DOORBELL_CONTROL 0x10a8
+#define ENABLE_DROP (1 << 13)
+
#define SGE_TIMER_VALUE_0_AND_1 0x10b8
#define TIMERVALUE0_MASK 0xffff0000U
#define TIMERVALUE0_SHIFT 16
@@ -277,6 +294,10 @@
#define A_SGE_CTXT_CMD 0x11fc
#define A_SGE_DBQ_CTXT_BADDR 0x1084
+#define PCIE_PF_CFG 0x40
+#define AIVEC(x) ((x) << 4)
+#define AIVEC_MASK 0x3ffU
+
#define PCIE_PF_CLI 0x44
#define PCIE_INT_CAUSE 0x3004
#define UNXSPLCPLERR 0x20000000U
@@ -322,6 +343,13 @@
#define PCIE_MEM_ACCESS_OFFSET 0x306c
#define PCIE_FW 0x30b8
+#define PCIE_FW_ERR 0x80000000U
+#define PCIE_FW_INIT 0x40000000U
+#define PCIE_FW_HALT 0x20000000U
+#define PCIE_FW_MASTER_VLD 0x00008000U
+#define PCIE_FW_MASTER(x) ((x) << 12)
+#define PCIE_FW_MASTER_MASK 0x7
+#define PCIE_FW_MASTER_GET(x) (((x) >> 12) & PCIE_FW_MASTER_MASK)
#define PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS 0x5908
#define RNPP 0x80000000U
@@ -432,6 +460,9 @@
#define MBOWNER(x) ((x) << MBOWNER_SHIFT)
#define MBOWNER_GET(x) (((x) & MBOWNER_MASK) >> MBOWNER_SHIFT)
+#define CIM_PF_HOST_INT_ENABLE 0x288
+#define MBMSGRDYINTEN(x) ((x) << 19)
+
#define CIM_PF_HOST_INT_CAUSE 0x28c
#define MBMSGRDYINT 0x00080000U
@@ -922,7 +953,7 @@
#define SF_DATA 0x193f8
#define SF_OP 0x193fc
-#define BUSY 0x80000000U
+#define SF_BUSY 0x80000000U
#define SF_LOCK 0x00000010U
#define SF_CONT 0x00000008U
#define BYTECNT_MASK 0x00000006U
@@ -981,6 +1012,7 @@
#define I2CM 0x00000002U
#define CIM 0x00000001U
+#define PL_INT_ENABLE 0x19410
#define PL_INT_MAP0 0x19414
#define PL_RST 0x19428
#define PIORST 0x00000002U
@@ -1032,4 +1064,41 @@
#define ADDRESS(x) ((x) << ADDRESS_SHIFT)
#define XGMAC_PORT_INT_CAUSE 0x10dc
+
+#define A_TP_TX_MOD_QUEUE_REQ_MAP 0x7e28
+
+#define A_TP_TX_MOD_CHANNEL_WEIGHT 0x7e34
+
+#define S_TX_MOD_QUEUE_REQ_MAP 0
+#define M_TX_MOD_QUEUE_REQ_MAP 0xffffU
+#define V_TX_MOD_QUEUE_REQ_MAP(x) ((x) << S_TX_MOD_QUEUE_REQ_MAP)
+
+#define A_TP_TX_MOD_QUEUE_WEIGHT0 0x7e30
+
+#define S_TX_MODQ_WEIGHT3 24
+#define M_TX_MODQ_WEIGHT3 0xffU
+#define V_TX_MODQ_WEIGHT3(x) ((x) << S_TX_MODQ_WEIGHT3)
+
+#define S_TX_MODQ_WEIGHT2 16
+#define M_TX_MODQ_WEIGHT2 0xffU
+#define V_TX_MODQ_WEIGHT2(x) ((x) << S_TX_MODQ_WEIGHT2)
+
+#define S_TX_MODQ_WEIGHT1 8
+#define M_TX_MODQ_WEIGHT1 0xffU
+#define V_TX_MODQ_WEIGHT1(x) ((x) << S_TX_MODQ_WEIGHT1)
+
+#define S_TX_MODQ_WEIGHT0 0
+#define M_TX_MODQ_WEIGHT0 0xffU
+#define V_TX_MODQ_WEIGHT0(x) ((x) << S_TX_MODQ_WEIGHT0)
+
+#define A_TP_TX_SCHED_HDR 0x23
+
+#define A_TP_TX_SCHED_FIFO 0x24
+
+#define A_TP_TX_SCHED_PCMD 0x25
+
+#define S_PORT 1
+#define V_PORT(x) ((x) << S_PORT)
+#define F_PORT V_PORT(1U)
+
#endif /* __T4_REGS_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index a6364632b490..a0dcccd846c9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -35,6 +35,45 @@
#ifndef _T4FW_INTERFACE_H_
#define _T4FW_INTERFACE_H_
+enum fw_retval {
+ FW_SUCCESS = 0, /* completed sucessfully */
+ FW_EPERM = 1, /* operation not permitted */
+ FW_ENOENT = 2, /* no such file or directory */
+ FW_EIO = 5, /* input/output error; hw bad */
+ FW_ENOEXEC = 8, /* exec format error; inv microcode */
+ FW_EAGAIN = 11, /* try again */
+ FW_ENOMEM = 12, /* out of memory */
+ FW_EFAULT = 14, /* bad address; fw bad */
+ FW_EBUSY = 16, /* resource busy */
+ FW_EEXIST = 17, /* file exists */
+ FW_EINVAL = 22, /* invalid argument */
+ FW_ENOSPC = 28, /* no space left on device */
+ FW_ENOSYS = 38, /* functionality not implemented */
+ FW_EPROTO = 71, /* protocol error */
+ FW_EADDRINUSE = 98, /* address already in use */
+ FW_EADDRNOTAVAIL = 99, /* cannot assigned requested address */
+ FW_ENETDOWN = 100, /* network is down */
+ FW_ENETUNREACH = 101, /* network is unreachable */
+ FW_ENOBUFS = 105, /* no buffer space available */
+ FW_ETIMEDOUT = 110, /* timeout */
+ FW_EINPROGRESS = 115, /* fw internal */
+ FW_SCSI_ABORT_REQUESTED = 128, /* */
+ FW_SCSI_ABORT_TIMEDOUT = 129, /* */
+ FW_SCSI_ABORTED = 130, /* */
+ FW_SCSI_CLOSE_REQUESTED = 131, /* */
+ FW_ERR_LINK_DOWN = 132, /* */
+ FW_RDEV_NOT_READY = 133, /* */
+ FW_ERR_RDEV_LOST = 134, /* */
+ FW_ERR_RDEV_LOGO = 135, /* */
+ FW_FCOE_NO_XCHG = 136, /* */
+ FW_SCSI_RSP_ERR = 137, /* */
+ FW_ERR_RDEV_IMPL_LOGO = 138, /* */
+ FW_SCSI_UNDER_FLOW_ERR = 139, /* */
+ FW_SCSI_OVER_FLOW_ERR = 140, /* */
+ FW_SCSI_DDP_ERR = 141, /* DDP error*/
+ FW_SCSI_TASK_ERR = 142, /* No SCSI tasks available */
+};
+
#define FW_T4VF_SGE_BASE_ADDR 0x0000
#define FW_T4VF_MPS_BASE_ADDR 0x0100
#define FW_T4VF_PL_BASE_ADDR 0x0200
@@ -46,6 +85,7 @@ enum fw_wr_opcodes {
FW_ULPTX_WR = 0x04,
FW_TP_WR = 0x05,
FW_ETH_TX_PKT_WR = 0x08,
+ FW_OFLD_CONNECTION_WR = 0x2f,
FW_FLOWC_WR = 0x0a,
FW_OFLD_TX_DATA_WR = 0x0b,
FW_CMD_WR = 0x10,
@@ -68,6 +108,7 @@ struct fw_wr_hdr {
};
#define FW_WR_OP(x) ((x) << 24)
+#define FW_WR_OP_GET(x) (((x) >> 24) & 0xff)
#define FW_WR_ATOMIC(x) ((x) << 23)
#define FW_WR_FLUSH(x) ((x) << 22)
#define FW_WR_COMPL(x) ((x) << 21)
@@ -80,6 +121,282 @@ struct fw_wr_hdr {
#define FW_WR_LEN16(x) ((x) << 0)
#define HW_TPL_FR_MT_PR_IV_P_FC 0X32B
+#define HW_TPL_FR_MT_PR_OV_P_FC 0X327
+
+/* filter wr reply code in cookie in CPL_SET_TCB_RPL */
+enum fw_filter_wr_cookie {
+ FW_FILTER_WR_SUCCESS,
+ FW_FILTER_WR_FLT_ADDED,
+ FW_FILTER_WR_FLT_DELETED,
+ FW_FILTER_WR_SMT_TBL_FULL,
+ FW_FILTER_WR_EINVAL,
+};
+
+struct fw_filter_wr {
+ __be32 op_pkd;
+ __be32 len16_pkd;
+ __be64 r3;
+ __be32 tid_to_iq;
+ __be32 del_filter_to_l2tix;
+ __be16 ethtype;
+ __be16 ethtypem;
+ __u8 frag_to_ovlan_vldm;
+ __u8 smac_sel;
+ __be16 rx_chan_rx_rpl_iq;
+ __be32 maci_to_matchtypem;
+ __u8 ptcl;
+ __u8 ptclm;
+ __u8 ttyp;
+ __u8 ttypm;
+ __be16 ivlan;
+ __be16 ivlanm;
+ __be16 ovlan;
+ __be16 ovlanm;
+ __u8 lip[16];
+ __u8 lipm[16];
+ __u8 fip[16];
+ __u8 fipm[16];
+ __be16 lp;
+ __be16 lpm;
+ __be16 fp;
+ __be16 fpm;
+ __be16 r7;
+ __u8 sma[6];
+};
+
+#define S_FW_FILTER_WR_TID 12
+#define M_FW_FILTER_WR_TID 0xfffff
+#define V_FW_FILTER_WR_TID(x) ((x) << S_FW_FILTER_WR_TID)
+#define G_FW_FILTER_WR_TID(x) \
+ (((x) >> S_FW_FILTER_WR_TID) & M_FW_FILTER_WR_TID)
+
+#define S_FW_FILTER_WR_RQTYPE 11
+#define M_FW_FILTER_WR_RQTYPE 0x1
+#define V_FW_FILTER_WR_RQTYPE(x) ((x) << S_FW_FILTER_WR_RQTYPE)
+#define G_FW_FILTER_WR_RQTYPE(x) \
+ (((x) >> S_FW_FILTER_WR_RQTYPE) & M_FW_FILTER_WR_RQTYPE)
+#define F_FW_FILTER_WR_RQTYPE V_FW_FILTER_WR_RQTYPE(1U)
+
+#define S_FW_FILTER_WR_NOREPLY 10
+#define M_FW_FILTER_WR_NOREPLY 0x1
+#define V_FW_FILTER_WR_NOREPLY(x) ((x) << S_FW_FILTER_WR_NOREPLY)
+#define G_FW_FILTER_WR_NOREPLY(x) \
+ (((x) >> S_FW_FILTER_WR_NOREPLY) & M_FW_FILTER_WR_NOREPLY)
+#define F_FW_FILTER_WR_NOREPLY V_FW_FILTER_WR_NOREPLY(1U)
+
+#define S_FW_FILTER_WR_IQ 0
+#define M_FW_FILTER_WR_IQ 0x3ff
+#define V_FW_FILTER_WR_IQ(x) ((x) << S_FW_FILTER_WR_IQ)
+#define G_FW_FILTER_WR_IQ(x) \
+ (((x) >> S_FW_FILTER_WR_IQ) & M_FW_FILTER_WR_IQ)
+
+#define S_FW_FILTER_WR_DEL_FILTER 31
+#define M_FW_FILTER_WR_DEL_FILTER 0x1
+#define V_FW_FILTER_WR_DEL_FILTER(x) ((x) << S_FW_FILTER_WR_DEL_FILTER)
+#define G_FW_FILTER_WR_DEL_FILTER(x) \
+ (((x) >> S_FW_FILTER_WR_DEL_FILTER) & M_FW_FILTER_WR_DEL_FILTER)
+#define F_FW_FILTER_WR_DEL_FILTER V_FW_FILTER_WR_DEL_FILTER(1U)
+
+#define S_FW_FILTER_WR_RPTTID 25
+#define M_FW_FILTER_WR_RPTTID 0x1
+#define V_FW_FILTER_WR_RPTTID(x) ((x) << S_FW_FILTER_WR_RPTTID)
+#define G_FW_FILTER_WR_RPTTID(x) \
+ (((x) >> S_FW_FILTER_WR_RPTTID) & M_FW_FILTER_WR_RPTTID)
+#define F_FW_FILTER_WR_RPTTID V_FW_FILTER_WR_RPTTID(1U)
+
+#define S_FW_FILTER_WR_DROP 24
+#define M_FW_FILTER_WR_DROP 0x1
+#define V_FW_FILTER_WR_DROP(x) ((x) << S_FW_FILTER_WR_DROP)
+#define G_FW_FILTER_WR_DROP(x) \
+ (((x) >> S_FW_FILTER_WR_DROP) & M_FW_FILTER_WR_DROP)
+#define F_FW_FILTER_WR_DROP V_FW_FILTER_WR_DROP(1U)
+
+#define S_FW_FILTER_WR_DIRSTEER 23
+#define M_FW_FILTER_WR_DIRSTEER 0x1
+#define V_FW_FILTER_WR_DIRSTEER(x) ((x) << S_FW_FILTER_WR_DIRSTEER)
+#define G_FW_FILTER_WR_DIRSTEER(x) \
+ (((x) >> S_FW_FILTER_WR_DIRSTEER) & M_FW_FILTER_WR_DIRSTEER)
+#define F_FW_FILTER_WR_DIRSTEER V_FW_FILTER_WR_DIRSTEER(1U)
+
+#define S_FW_FILTER_WR_MASKHASH 22
+#define M_FW_FILTER_WR_MASKHASH 0x1
+#define V_FW_FILTER_WR_MASKHASH(x) ((x) << S_FW_FILTER_WR_MASKHASH)
+#define G_FW_FILTER_WR_MASKHASH(x) \
+ (((x) >> S_FW_FILTER_WR_MASKHASH) & M_FW_FILTER_WR_MASKHASH)
+#define F_FW_FILTER_WR_MASKHASH V_FW_FILTER_WR_MASKHASH(1U)
+
+#define S_FW_FILTER_WR_DIRSTEERHASH 21
+#define M_FW_FILTER_WR_DIRSTEERHASH 0x1
+#define V_FW_FILTER_WR_DIRSTEERHASH(x) ((x) << S_FW_FILTER_WR_DIRSTEERHASH)
+#define G_FW_FILTER_WR_DIRSTEERHASH(x) \
+ (((x) >> S_FW_FILTER_WR_DIRSTEERHASH) & M_FW_FILTER_WR_DIRSTEERHASH)
+#define F_FW_FILTER_WR_DIRSTEERHASH V_FW_FILTER_WR_DIRSTEERHASH(1U)
+
+#define S_FW_FILTER_WR_LPBK 20
+#define M_FW_FILTER_WR_LPBK 0x1
+#define V_FW_FILTER_WR_LPBK(x) ((x) << S_FW_FILTER_WR_LPBK)
+#define G_FW_FILTER_WR_LPBK(x) \
+ (((x) >> S_FW_FILTER_WR_LPBK) & M_FW_FILTER_WR_LPBK)
+#define F_FW_FILTER_WR_LPBK V_FW_FILTER_WR_LPBK(1U)
+
+#define S_FW_FILTER_WR_DMAC 19
+#define M_FW_FILTER_WR_DMAC 0x1
+#define V_FW_FILTER_WR_DMAC(x) ((x) << S_FW_FILTER_WR_DMAC)
+#define G_FW_FILTER_WR_DMAC(x) \
+ (((x) >> S_FW_FILTER_WR_DMAC) & M_FW_FILTER_WR_DMAC)
+#define F_FW_FILTER_WR_DMAC V_FW_FILTER_WR_DMAC(1U)
+
+#define S_FW_FILTER_WR_SMAC 18
+#define M_FW_FILTER_WR_SMAC 0x1
+#define V_FW_FILTER_WR_SMAC(x) ((x) << S_FW_FILTER_WR_SMAC)
+#define G_FW_FILTER_WR_SMAC(x) \
+ (((x) >> S_FW_FILTER_WR_SMAC) & M_FW_FILTER_WR_SMAC)
+#define F_FW_FILTER_WR_SMAC V_FW_FILTER_WR_SMAC(1U)
+
+#define S_FW_FILTER_WR_INSVLAN 17
+#define M_FW_FILTER_WR_INSVLAN 0x1
+#define V_FW_FILTER_WR_INSVLAN(x) ((x) << S_FW_FILTER_WR_INSVLAN)
+#define G_FW_FILTER_WR_INSVLAN(x) \
+ (((x) >> S_FW_FILTER_WR_INSVLAN) & M_FW_FILTER_WR_INSVLAN)
+#define F_FW_FILTER_WR_INSVLAN V_FW_FILTER_WR_INSVLAN(1U)
+
+#define S_FW_FILTER_WR_RMVLAN 16
+#define M_FW_FILTER_WR_RMVLAN 0x1
+#define V_FW_FILTER_WR_RMVLAN(x) ((x) << S_FW_FILTER_WR_RMVLAN)
+#define G_FW_FILTER_WR_RMVLAN(x) \
+ (((x) >> S_FW_FILTER_WR_RMVLAN) & M_FW_FILTER_WR_RMVLAN)
+#define F_FW_FILTER_WR_RMVLAN V_FW_FILTER_WR_RMVLAN(1U)
+
+#define S_FW_FILTER_WR_HITCNTS 15
+#define M_FW_FILTER_WR_HITCNTS 0x1
+#define V_FW_FILTER_WR_HITCNTS(x) ((x) << S_FW_FILTER_WR_HITCNTS)
+#define G_FW_FILTER_WR_HITCNTS(x) \
+ (((x) >> S_FW_FILTER_WR_HITCNTS) & M_FW_FILTER_WR_HITCNTS)
+#define F_FW_FILTER_WR_HITCNTS V_FW_FILTER_WR_HITCNTS(1U)
+
+#define S_FW_FILTER_WR_TXCHAN 13
+#define M_FW_FILTER_WR_TXCHAN 0x3
+#define V_FW_FILTER_WR_TXCHAN(x) ((x) << S_FW_FILTER_WR_TXCHAN)
+#define G_FW_FILTER_WR_TXCHAN(x) \
+ (((x) >> S_FW_FILTER_WR_TXCHAN) & M_FW_FILTER_WR_TXCHAN)
+
+#define S_FW_FILTER_WR_PRIO 12
+#define M_FW_FILTER_WR_PRIO 0x1
+#define V_FW_FILTER_WR_PRIO(x) ((x) << S_FW_FILTER_WR_PRIO)
+#define G_FW_FILTER_WR_PRIO(x) \
+ (((x) >> S_FW_FILTER_WR_PRIO) & M_FW_FILTER_WR_PRIO)
+#define F_FW_FILTER_WR_PRIO V_FW_FILTER_WR_PRIO(1U)
+
+#define S_FW_FILTER_WR_L2TIX 0
+#define M_FW_FILTER_WR_L2TIX 0xfff
+#define V_FW_FILTER_WR_L2TIX(x) ((x) << S_FW_FILTER_WR_L2TIX)
+#define G_FW_FILTER_WR_L2TIX(x) \
+ (((x) >> S_FW_FILTER_WR_L2TIX) & M_FW_FILTER_WR_L2TIX)
+
+#define S_FW_FILTER_WR_FRAG 7
+#define M_FW_FILTER_WR_FRAG 0x1
+#define V_FW_FILTER_WR_FRAG(x) ((x) << S_FW_FILTER_WR_FRAG)
+#define G_FW_FILTER_WR_FRAG(x) \
+ (((x) >> S_FW_FILTER_WR_FRAG) & M_FW_FILTER_WR_FRAG)
+#define F_FW_FILTER_WR_FRAG V_FW_FILTER_WR_FRAG(1U)
+
+#define S_FW_FILTER_WR_FRAGM 6
+#define M_FW_FILTER_WR_FRAGM 0x1
+#define V_FW_FILTER_WR_FRAGM(x) ((x) << S_FW_FILTER_WR_FRAGM)
+#define G_FW_FILTER_WR_FRAGM(x) \
+ (((x) >> S_FW_FILTER_WR_FRAGM) & M_FW_FILTER_WR_FRAGM)
+#define F_FW_FILTER_WR_FRAGM V_FW_FILTER_WR_FRAGM(1U)
+
+#define S_FW_FILTER_WR_IVLAN_VLD 5
+#define M_FW_FILTER_WR_IVLAN_VLD 0x1
+#define V_FW_FILTER_WR_IVLAN_VLD(x) ((x) << S_FW_FILTER_WR_IVLAN_VLD)
+#define G_FW_FILTER_WR_IVLAN_VLD(x) \
+ (((x) >> S_FW_FILTER_WR_IVLAN_VLD) & M_FW_FILTER_WR_IVLAN_VLD)
+#define F_FW_FILTER_WR_IVLAN_VLD V_FW_FILTER_WR_IVLAN_VLD(1U)
+
+#define S_FW_FILTER_WR_OVLAN_VLD 4
+#define M_FW_FILTER_WR_OVLAN_VLD 0x1
+#define V_FW_FILTER_WR_OVLAN_VLD(x) ((x) << S_FW_FILTER_WR_OVLAN_VLD)
+#define G_FW_FILTER_WR_OVLAN_VLD(x) \
+ (((x) >> S_FW_FILTER_WR_OVLAN_VLD) & M_FW_FILTER_WR_OVLAN_VLD)
+#define F_FW_FILTER_WR_OVLAN_VLD V_FW_FILTER_WR_OVLAN_VLD(1U)
+
+#define S_FW_FILTER_WR_IVLAN_VLDM 3
+#define M_FW_FILTER_WR_IVLAN_VLDM 0x1
+#define V_FW_FILTER_WR_IVLAN_VLDM(x) ((x) << S_FW_FILTER_WR_IVLAN_VLDM)
+#define G_FW_FILTER_WR_IVLAN_VLDM(x) \
+ (((x) >> S_FW_FILTER_WR_IVLAN_VLDM) & M_FW_FILTER_WR_IVLAN_VLDM)
+#define F_FW_FILTER_WR_IVLAN_VLDM V_FW_FILTER_WR_IVLAN_VLDM(1U)
+
+#define S_FW_FILTER_WR_OVLAN_VLDM 2
+#define M_FW_FILTER_WR_OVLAN_VLDM 0x1
+#define V_FW_FILTER_WR_OVLAN_VLDM(x) ((x) << S_FW_FILTER_WR_OVLAN_VLDM)
+#define G_FW_FILTER_WR_OVLAN_VLDM(x) \
+ (((x) >> S_FW_FILTER_WR_OVLAN_VLDM) & M_FW_FILTER_WR_OVLAN_VLDM)
+#define F_FW_FILTER_WR_OVLAN_VLDM V_FW_FILTER_WR_OVLAN_VLDM(1U)
+
+#define S_FW_FILTER_WR_RX_CHAN 15
+#define M_FW_FILTER_WR_RX_CHAN 0x1
+#define V_FW_FILTER_WR_RX_CHAN(x) ((x) << S_FW_FILTER_WR_RX_CHAN)
+#define G_FW_FILTER_WR_RX_CHAN(x) \
+ (((x) >> S_FW_FILTER_WR_RX_CHAN) & M_FW_FILTER_WR_RX_CHAN)
+#define F_FW_FILTER_WR_RX_CHAN V_FW_FILTER_WR_RX_CHAN(1U)
+
+#define S_FW_FILTER_WR_RX_RPL_IQ 0
+#define M_FW_FILTER_WR_RX_RPL_IQ 0x3ff
+#define V_FW_FILTER_WR_RX_RPL_IQ(x) ((x) << S_FW_FILTER_WR_RX_RPL_IQ)
+#define G_FW_FILTER_WR_RX_RPL_IQ(x) \
+ (((x) >> S_FW_FILTER_WR_RX_RPL_IQ) & M_FW_FILTER_WR_RX_RPL_IQ)
+
+#define S_FW_FILTER_WR_MACI 23
+#define M_FW_FILTER_WR_MACI 0x1ff
+#define V_FW_FILTER_WR_MACI(x) ((x) << S_FW_FILTER_WR_MACI)
+#define G_FW_FILTER_WR_MACI(x) \
+ (((x) >> S_FW_FILTER_WR_MACI) & M_FW_FILTER_WR_MACI)
+
+#define S_FW_FILTER_WR_MACIM 14
+#define M_FW_FILTER_WR_MACIM 0x1ff
+#define V_FW_FILTER_WR_MACIM(x) ((x) << S_FW_FILTER_WR_MACIM)
+#define G_FW_FILTER_WR_MACIM(x) \
+ (((x) >> S_FW_FILTER_WR_MACIM) & M_FW_FILTER_WR_MACIM)
+
+#define S_FW_FILTER_WR_FCOE 13
+#define M_FW_FILTER_WR_FCOE 0x1
+#define V_FW_FILTER_WR_FCOE(x) ((x) << S_FW_FILTER_WR_FCOE)
+#define G_FW_FILTER_WR_FCOE(x) \
+ (((x) >> S_FW_FILTER_WR_FCOE) & M_FW_FILTER_WR_FCOE)
+#define F_FW_FILTER_WR_FCOE V_FW_FILTER_WR_FCOE(1U)
+
+#define S_FW_FILTER_WR_FCOEM 12
+#define M_FW_FILTER_WR_FCOEM 0x1
+#define V_FW_FILTER_WR_FCOEM(x) ((x) << S_FW_FILTER_WR_FCOEM)
+#define G_FW_FILTER_WR_FCOEM(x) \
+ (((x) >> S_FW_FILTER_WR_FCOEM) & M_FW_FILTER_WR_FCOEM)
+#define F_FW_FILTER_WR_FCOEM V_FW_FILTER_WR_FCOEM(1U)
+
+#define S_FW_FILTER_WR_PORT 9
+#define M_FW_FILTER_WR_PORT 0x7
+#define V_FW_FILTER_WR_PORT(x) ((x) << S_FW_FILTER_WR_PORT)
+#define G_FW_FILTER_WR_PORT(x) \
+ (((x) >> S_FW_FILTER_WR_PORT) & M_FW_FILTER_WR_PORT)
+
+#define S_FW_FILTER_WR_PORTM 6
+#define M_FW_FILTER_WR_PORTM 0x7
+#define V_FW_FILTER_WR_PORTM(x) ((x) << S_FW_FILTER_WR_PORTM)
+#define G_FW_FILTER_WR_PORTM(x) \
+ (((x) >> S_FW_FILTER_WR_PORTM) & M_FW_FILTER_WR_PORTM)
+
+#define S_FW_FILTER_WR_MATCHTYPE 3
+#define M_FW_FILTER_WR_MATCHTYPE 0x7
+#define V_FW_FILTER_WR_MATCHTYPE(x) ((x) << S_FW_FILTER_WR_MATCHTYPE)
+#define G_FW_FILTER_WR_MATCHTYPE(x) \
+ (((x) >> S_FW_FILTER_WR_MATCHTYPE) & M_FW_FILTER_WR_MATCHTYPE)
+
+#define S_FW_FILTER_WR_MATCHTYPEM 0
+#define M_FW_FILTER_WR_MATCHTYPEM 0x7
+#define V_FW_FILTER_WR_MATCHTYPEM(x) ((x) << S_FW_FILTER_WR_MATCHTYPEM)
+#define G_FW_FILTER_WR_MATCHTYPEM(x) \
+ (((x) >> S_FW_FILTER_WR_MATCHTYPEM) & M_FW_FILTER_WR_MATCHTYPEM)
struct fw_ulptx_wr {
__be32 op_to_compl;
@@ -99,6 +416,108 @@ struct fw_eth_tx_pkt_wr {
__be64 r3;
};
+struct fw_ofld_connection_wr {
+ __be32 op_compl;
+ __be32 len16_pkd;
+ __u64 cookie;
+ __be64 r2;
+ __be64 r3;
+ struct fw_ofld_connection_le {
+ __be32 version_cpl;
+ __be32 filter;
+ __be32 r1;
+ __be16 lport;
+ __be16 pport;
+ union fw_ofld_connection_leip {
+ struct fw_ofld_connection_le_ipv4 {
+ __be32 pip;
+ __be32 lip;
+ __be64 r0;
+ __be64 r1;
+ __be64 r2;
+ } ipv4;
+ struct fw_ofld_connection_le_ipv6 {
+ __be64 pip_hi;
+ __be64 pip_lo;
+ __be64 lip_hi;
+ __be64 lip_lo;
+ } ipv6;
+ } u;
+ } le;
+ struct fw_ofld_connection_tcb {
+ __be32 t_state_to_astid;
+ __be16 cplrxdataack_cplpassacceptrpl;
+ __be16 rcv_adv;
+ __be32 rcv_nxt;
+ __be32 tx_max;
+ __be64 opt0;
+ __be32 opt2;
+ __be32 r1;
+ __be64 r2;
+ __be64 r3;
+ } tcb;
+};
+
+#define S_FW_OFLD_CONNECTION_WR_VERSION 31
+#define M_FW_OFLD_CONNECTION_WR_VERSION 0x1
+#define V_FW_OFLD_CONNECTION_WR_VERSION(x) \
+ ((x) << S_FW_OFLD_CONNECTION_WR_VERSION)
+#define G_FW_OFLD_CONNECTION_WR_VERSION(x) \
+ (((x) >> S_FW_OFLD_CONNECTION_WR_VERSION) & \
+ M_FW_OFLD_CONNECTION_WR_VERSION)
+#define F_FW_OFLD_CONNECTION_WR_VERSION \
+ V_FW_OFLD_CONNECTION_WR_VERSION(1U)
+
+#define S_FW_OFLD_CONNECTION_WR_CPL 30
+#define M_FW_OFLD_CONNECTION_WR_CPL 0x1
+#define V_FW_OFLD_CONNECTION_WR_CPL(x) ((x) << S_FW_OFLD_CONNECTION_WR_CPL)
+#define G_FW_OFLD_CONNECTION_WR_CPL(x) \
+ (((x) >> S_FW_OFLD_CONNECTION_WR_CPL) & M_FW_OFLD_CONNECTION_WR_CPL)
+#define F_FW_OFLD_CONNECTION_WR_CPL V_FW_OFLD_CONNECTION_WR_CPL(1U)
+
+#define S_FW_OFLD_CONNECTION_WR_T_STATE 28
+#define M_FW_OFLD_CONNECTION_WR_T_STATE 0xf
+#define V_FW_OFLD_CONNECTION_WR_T_STATE(x) \
+ ((x) << S_FW_OFLD_CONNECTION_WR_T_STATE)
+#define G_FW_OFLD_CONNECTION_WR_T_STATE(x) \
+ (((x) >> S_FW_OFLD_CONNECTION_WR_T_STATE) & \
+ M_FW_OFLD_CONNECTION_WR_T_STATE)
+
+#define S_FW_OFLD_CONNECTION_WR_RCV_SCALE 24
+#define M_FW_OFLD_CONNECTION_WR_RCV_SCALE 0xf
+#define V_FW_OFLD_CONNECTION_WR_RCV_SCALE(x) \
+ ((x) << S_FW_OFLD_CONNECTION_WR_RCV_SCALE)
+#define G_FW_OFLD_CONNECTION_WR_RCV_SCALE(x) \
+ (((x) >> S_FW_OFLD_CONNECTION_WR_RCV_SCALE) & \
+ M_FW_OFLD_CONNECTION_WR_RCV_SCALE)
+
+#define S_FW_OFLD_CONNECTION_WR_ASTID 0
+#define M_FW_OFLD_CONNECTION_WR_ASTID 0xffffff
+#define V_FW_OFLD_CONNECTION_WR_ASTID(x) \
+ ((x) << S_FW_OFLD_CONNECTION_WR_ASTID)
+#define G_FW_OFLD_CONNECTION_WR_ASTID(x) \
+ (((x) >> S_FW_OFLD_CONNECTION_WR_ASTID) & M_FW_OFLD_CONNECTION_WR_ASTID)
+
+#define S_FW_OFLD_CONNECTION_WR_CPLRXDATAACK 15
+#define M_FW_OFLD_CONNECTION_WR_CPLRXDATAACK 0x1
+#define V_FW_OFLD_CONNECTION_WR_CPLRXDATAACK(x) \
+ ((x) << S_FW_OFLD_CONNECTION_WR_CPLRXDATAACK)
+#define G_FW_OFLD_CONNECTION_WR_CPLRXDATAACK(x) \
+ (((x) >> S_FW_OFLD_CONNECTION_WR_CPLRXDATAACK) & \
+ M_FW_OFLD_CONNECTION_WR_CPLRXDATAACK)
+#define F_FW_OFLD_CONNECTION_WR_CPLRXDATAACK \
+ V_FW_OFLD_CONNECTION_WR_CPLRXDATAACK(1U)
+
+#define S_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL 14
+#define M_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL 0x1
+#define V_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL(x) \
+ ((x) << S_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL)
+#define G_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL(x) \
+ (((x) >> S_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL) & \
+ M_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL)
+#define F_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL \
+ V_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL(1U)
+
enum fw_flowc_mnem {
FW_FLOWC_MNEM_PFNVFN, /* PFN [15:8] VFN [7:0] */
FW_FLOWC_MNEM_CH,
@@ -222,6 +641,7 @@ struct fw_cmd_hdr {
#define FW_CMD_OP(x) ((x) << 24)
#define FW_CMD_OP_GET(x) (((x) >> 24) & 0xff)
#define FW_CMD_REQUEST (1U << 23)
+#define FW_CMD_REQUEST_GET(x) (((x) >> 23) & 0x1)
#define FW_CMD_READ (1U << 22)
#define FW_CMD_WRITE (1U << 21)
#define FW_CMD_EXEC (1U << 20)
@@ -229,6 +649,7 @@ struct fw_cmd_hdr {
#define FW_CMD_RETVAL(x) ((x) << 8)
#define FW_CMD_RETVAL_GET(x) (((x) >> 8) & 0xff)
#define FW_CMD_LEN16(x) ((x) << 0)
+#define FW_LEN16(fw_struct) FW_CMD_LEN16(sizeof(fw_struct) / 16)
enum fw_ldst_addrspc {
FW_LDST_ADDRSPC_FIRMWARE = 0x0001,
@@ -241,7 +662,8 @@ enum fw_ldst_addrspc {
FW_LDST_ADDRSPC_TP_MIB = 0x0012,
FW_LDST_ADDRSPC_MDIO = 0x0018,
FW_LDST_ADDRSPC_MPS = 0x0020,
- FW_LDST_ADDRSPC_FUNC = 0x0028
+ FW_LDST_ADDRSPC_FUNC = 0x0028,
+ FW_LDST_ADDRSPC_FUNC_PCIE = 0x0029,
};
enum fw_ldst_mps_fid {
@@ -303,6 +725,16 @@ struct fw_ldst_cmd {
__be64 data0;
__be64 data1;
} func;
+ struct fw_ldst_pcie {
+ u8 ctrl_to_fn;
+ u8 bnum;
+ u8 r;
+ u8 ext_r;
+ u8 select_naccess;
+ u8 pcie_fn;
+ __be16 nset_pkd;
+ __be32 data[12];
+ } pcie;
} u;
};
@@ -312,6 +744,9 @@ struct fw_ldst_cmd {
#define FW_LDST_CMD_FID(x) ((x) << 15)
#define FW_LDST_CMD_CTL(x) ((x) << 0)
#define FW_LDST_CMD_RPLCPF(x) ((x) << 0)
+#define FW_LDST_CMD_LC (1U << 4)
+#define FW_LDST_CMD_NACCESS(x) ((x) << 0)
+#define FW_LDST_CMD_FN(x) ((x) << 0)
struct fw_reset_cmd {
__be32 op_to_write;
@@ -333,7 +768,7 @@ enum fw_hellow_cmd {
struct fw_hello_cmd {
__be32 op_to_write;
__be32 retval_len16;
- __be32 err_to_mbasyncnot;
+ __be32 err_to_clearinit;
#define FW_HELLO_CMD_ERR (1U << 31)
#define FW_HELLO_CMD_INIT (1U << 30)
#define FW_HELLO_CMD_MASTERDIS(x) ((x) << 29)
@@ -343,6 +778,7 @@ struct fw_hello_cmd {
#define FW_HELLO_CMD_MBMASTER(x) ((x) << FW_HELLO_CMD_MBMASTER_SHIFT)
#define FW_HELLO_CMD_MBMASTER_GET(x) \
(((x) >> FW_HELLO_CMD_MBMASTER_SHIFT) & FW_HELLO_CMD_MBMASTER_MASK)
+#define FW_HELLO_CMD_MBASYNCNOTINT(x) ((x) << 23)
#define FW_HELLO_CMD_MBASYNCNOT(x) ((x) << 20)
#define FW_HELLO_CMD_STAGE(x) ((x) << 17)
#define FW_HELLO_CMD_CLEARINIT (1U << 16)
@@ -428,6 +864,7 @@ enum fw_caps_config_iscsi {
enum fw_caps_config_fcoe {
FW_CAPS_CONFIG_FCOE_INITIATOR = 0x00000001,
FW_CAPS_CONFIG_FCOE_TARGET = 0x00000002,
+ FW_CAPS_CONFIG_FCOE_CTRL_OFLD = 0x00000004,
};
enum fw_memtype_cf {
@@ -440,7 +877,7 @@ enum fw_memtype_cf {
struct fw_caps_config_cmd {
__be32 op_to_write;
- __be32 retval_len16;
+ __be32 cfvalid_to_len16;
__be32 r2;
__be32 hwmbitmap;
__be16 nbmcaps;
@@ -701,8 +1138,8 @@ struct fw_iq_cmd {
#define FW_IQ_CMD_FL0FETCHRO(x) ((x) << 6)
#define FW_IQ_CMD_FL0HOSTFCMODE(x) ((x) << 4)
#define FW_IQ_CMD_FL0CPRIO(x) ((x) << 3)
-#define FW_IQ_CMD_FL0PADEN (1U << 2)
-#define FW_IQ_CMD_FL0PACKEN (1U << 1)
+#define FW_IQ_CMD_FL0PADEN(x) ((x) << 2)
+#define FW_IQ_CMD_FL0PACKEN(x) ((x) << 1)
#define FW_IQ_CMD_FL0CONGEN (1U << 0)
#define FW_IQ_CMD_FL0DCAEN(x) ((x) << 15)
@@ -1190,6 +1627,14 @@ enum fw_port_dcb_cfg_rc {
FW_PORT_DCB_CFG_ERROR = 0x1
};
+enum fw_port_dcb_type {
+ FW_PORT_DCB_TYPE_PGID = 0x00,
+ FW_PORT_DCB_TYPE_PGRATE = 0x01,
+ FW_PORT_DCB_TYPE_PRIORATE = 0x02,
+ FW_PORT_DCB_TYPE_PFC = 0x03,
+ FW_PORT_DCB_TYPE_APP_ID = 0x04,
+};
+
struct fw_port_cmd {
__be32 op_to_portid;
__be32 action_to_len16;
@@ -1257,6 +1702,7 @@ struct fw_port_cmd {
#define FW_PORT_CMD_TXIPG(x) ((x) << 19)
#define FW_PORT_CMD_LSTATUS (1U << 31)
+#define FW_PORT_CMD_LSTATUS_GET(x) (((x) >> 31) & 0x1)
#define FW_PORT_CMD_LSPEED(x) ((x) << 24)
#define FW_PORT_CMD_LSPEED_GET(x) (((x) >> 24) & 0x3f)
#define FW_PORT_CMD_TXPAUSE (1U << 23)
@@ -1305,6 +1751,9 @@ enum fw_port_module_type {
FW_PORT_MOD_TYPE_TWINAX_PASSIVE,
FW_PORT_MOD_TYPE_TWINAX_ACTIVE,
FW_PORT_MOD_TYPE_LRM,
+ FW_PORT_MOD_TYPE_ERROR = FW_PORT_CMD_MODTYPE_MASK - 3,
+ FW_PORT_MOD_TYPE_UNKNOWN = FW_PORT_CMD_MODTYPE_MASK - 2,
+ FW_PORT_MOD_TYPE_NOTSUPPORTED = FW_PORT_CMD_MODTYPE_MASK - 1,
FW_PORT_MOD_TYPE_NONE = FW_PORT_CMD_MODTYPE_MASK
};
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 9dad56101e23..0188df705719 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -2023,7 +2023,7 @@ static struct cxgb4vf_debugfs_entry debugfs_files[] = {
* Set up out /sys/kernel/debug/cxgb4vf sub-nodes. We assume that the
* directory (debugfs_root) has already been set up.
*/
-static int __devinit setup_debugfs(struct adapter *adapter)
+static int setup_debugfs(struct adapter *adapter)
{
int i;
@@ -2064,7 +2064,7 @@ static void cleanup_debugfs(struct adapter *adapter)
* adapter parameters we're going to be using and initialize basic adapter
* hardware support.
*/
-static int __devinit adap_init0(struct adapter *adapter)
+static int adap_init0(struct adapter *adapter)
{
struct vf_resources *vfres = &adapter->params.vfres;
struct sge_params *sge_params = &adapter->params.sge;
@@ -2266,7 +2266,7 @@ static inline void init_rspq(struct sge_rspq *rspq, u8 timer_idx,
* be modified by the admin via ethtool and cxgbtool prior to the adapter
* being brought up for the first time.
*/
-static void __devinit cfg_queues(struct adapter *adapter)
+static void cfg_queues(struct adapter *adapter)
{
struct sge *s = &adapter->sge;
int q10g, n10g, qidx, pidx, qs;
@@ -2361,7 +2361,7 @@ static void __devinit cfg_queues(struct adapter *adapter)
* Reduce the number of Ethernet queues across all ports to at most n.
* n provides at least one queue per port.
*/
-static void __devinit reduce_ethqs(struct adapter *adapter, int n)
+static void reduce_ethqs(struct adapter *adapter, int n)
{
int i;
struct port_info *pi;
@@ -2400,7 +2400,7 @@ static void __devinit reduce_ethqs(struct adapter *adapter, int n)
* for our "extras". Note that this process may lower the maximum number of
* allowed Queue Sets ...
*/
-static int __devinit enable_msix(struct adapter *adapter)
+static int enable_msix(struct adapter *adapter)
{
int i, err, want, need;
struct msix_entry entries[MSIX_ENTRIES];
@@ -2462,8 +2462,8 @@ static const struct net_device_ops cxgb4vf_netdev_ops = {
* state needed to manage the device. This routine is called "init_one" in
* the PF Driver ...
*/
-static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int cxgb4vf_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
static int version_printed;
@@ -2769,7 +2769,7 @@ err_disable_device:
* "probe" routine and quiesce the device (disable interrupts, etc.). (Note
* that this is called "remove_one" in the PF Driver.)
*/
-static void __devexit cxgb4vf_pci_remove(struct pci_dev *pdev)
+static void cxgb4vf_pci_remove(struct pci_dev *pdev)
{
struct adapter *adapter = pci_get_drvdata(pdev);
@@ -2835,7 +2835,7 @@ static void __devexit cxgb4vf_pci_remove(struct pci_dev *pdev)
* "Shutdown" quiesce the device, stopping Ingress Packet and Interrupt
* delivery.
*/
-static void __devexit cxgb4vf_pci_shutdown(struct pci_dev *pdev)
+static void cxgb4vf_pci_shutdown(struct pci_dev *pdev)
{
struct adapter *adapter;
int pidx;
@@ -2905,8 +2905,8 @@ static struct pci_driver cxgb4vf_driver = {
.name = KBUILD_MODNAME,
.id_table = cxgb4vf_pci_tbl,
.probe = cxgb4vf_pci_probe,
- .remove = __devexit_p(cxgb4vf_pci_remove),
- .shutdown = __devexit_p(cxgb4vf_pci_shutdown),
+ .remove = cxgb4vf_pci_remove,
+ .shutdown = cxgb4vf_pci_shutdown,
};
/*
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index f16745f4b36b..92170d50d9d8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -536,7 +536,7 @@ static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl)
if (fl->pend_cred >= FL_PER_EQ_UNIT) {
wmb();
t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
- DBPRIO |
+ DBPRIO(1) |
QID(fl->cntxt_id) |
PIDX(fl->pend_cred / FL_PER_EQ_UNIT));
fl->pend_cred %= FL_PER_EQ_UNIT;
@@ -952,7 +952,7 @@ static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq,
* Warn if we write doorbells with the wrong priority and write
* descriptors before telling HW.
*/
- WARN_ON((QID(tq->cntxt_id) | PIDX(n)) & DBPRIO);
+ WARN_ON((QID(tq->cntxt_id) | PIDX(n)) & DBPRIO(1));
wmb();
t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
QID(tq->cntxt_id) | PIDX(n));
@@ -2126,8 +2126,8 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
cmd.iqns_to_fl0congen =
cpu_to_be32(
FW_IQ_CMD_FL0HOSTFCMODE(SGE_HOSTFCMODE_NONE) |
- FW_IQ_CMD_FL0PACKEN |
- FW_IQ_CMD_FL0PADEN);
+ FW_IQ_CMD_FL0PACKEN(1) |
+ FW_IQ_CMD_FL0PADEN(1));
cmd.fl0dcaen_to_fl0cidxfthresh =
cpu_to_be16(
FW_IQ_CMD_FL0FBMIN(SGE_FETCHBURSTMIN_64B) |
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
index a65c80aed1f2..283f9d0d37fd 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
@@ -232,8 +232,8 @@ static inline int t4vf_wr_mbox_ns(struct adapter *adapter, const void *cmd,
return t4vf_wr_mbox_core(adapter, cmd, size, rpl, false);
}
-int __devinit t4vf_wait_dev_ready(struct adapter *);
-int __devinit t4vf_port_init(struct adapter *, int);
+int t4vf_wait_dev_ready(struct adapter *);
+int t4vf_port_init(struct adapter *, int);
int t4vf_fw_reset(struct adapter *);
int t4vf_query_params(struct adapter *, unsigned int, const u32 *, u32 *);
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index fe3fd3dad6f7..7127c7b9efde 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -46,7 +46,7 @@
* returning a value other than all 1's). Return an error if it doesn't
* become ready ...
*/
-int __devinit t4vf_wait_dev_ready(struct adapter *adapter)
+int t4vf_wait_dev_ready(struct adapter *adapter)
{
const u32 whoami = T4VF_PL_BASE_ADDR + PL_VF_WHOAMI;
const u32 notready1 = 0xffffffff;
@@ -253,8 +253,7 @@ static int hash_mac_addr(const u8 *addr)
* Initializes the SW state maintained for each link, including the link's
* capabilities and default speed/flow-control/autonegotiation settings.
*/
-static void __devinit init_link_config(struct link_config *lc,
- unsigned int caps)
+static void init_link_config(struct link_config *lc, unsigned int caps)
{
lc->supported = caps;
lc->requested_speed = 0;
@@ -275,7 +274,7 @@ static void __devinit init_link_config(struct link_config *lc,
* @adapter: the adapter
* @pidx: the adapter port index
*/
-int __devinit t4vf_port_init(struct adapter *adapter, int pidx)
+int t4vf_port_init(struct adapter *adapter, int pidx)
{
struct port_info *pi = adap2pinfo(adapter, pidx);
struct fw_vi_cmd vi_cmd, vi_rpl;
OpenPOWER on IntegriCloud