1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
|
/* bnx2x_vfpf.h: Broadcom Everest network driver.
*
* Copyright (c) 2011-2012 Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2, available
* at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
*
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a
* license other than the GPL, without Broadcom's express prior written
* consent.
*
* Maintained by: Eilon Greenstein <eilong@broadcom.com>
* Written by: Ariel Elior <ariele@broadcom.com>
*/
#ifndef VF_PF_IF_H
#define VF_PF_IF_H
/* Common definitions for all HVs */
struct vf_pf_resc_request {
u8 num_rxqs;
u8 num_txqs;
u8 num_sbs;
u8 num_mac_filters;
u8 num_vlan_filters;
u8 num_mc_filters; /* No limit so superfluous */
};
struct hw_sb_info {
u8 hw_sb_id; /* aka absolute igu id, used to ack the sb */
u8 sb_qid; /* used to update DHC for sb */
};
/* HW VF-PF channel definitions
* A.K.A VF-PF mailbox
*/
#define TLV_BUFFER_SIZE 1024
#define VFPF_QUEUE_FLG_TPA 0x0001
#define VFPF_QUEUE_FLG_TPA_IPV6 0x0002
#define VFPF_QUEUE_FLG_TPA_GRO 0x0004
#define VFPF_QUEUE_FLG_CACHE_ALIGN 0x0008
#define VFPF_QUEUE_FLG_STATS 0x0010
#define VFPF_QUEUE_FLG_OV 0x0020
#define VFPF_QUEUE_FLG_VLAN 0x0040
#define VFPF_QUEUE_FLG_COS 0x0080
#define VFPF_QUEUE_FLG_HC 0x0100
#define VFPF_QUEUE_FLG_DHC 0x0200
#define VFPF_QUEUE_DROP_IP_CS_ERR (1 << 0)
#define VFPF_QUEUE_DROP_TCP_CS_ERR (1 << 1)
#define VFPF_QUEUE_DROP_TTL0 (1 << 2)
#define VFPF_QUEUE_DROP_UDP_CS_ERR (1 << 3)
enum {
PFVF_STATUS_WAITING = 0,
PFVF_STATUS_SUCCESS,
PFVF_STATUS_FAILURE,
PFVF_STATUS_NOT_SUPPORTED,
PFVF_STATUS_NO_RESOURCE
};
/* vf pf channel tlvs */
/* general tlv header (used for both vf->pf request and pf->vf response) */
struct channel_tlv {
u16 type;
u16 length;
};
/* header of first vf->pf tlv carries the offset used to calculate response
* buffer address
*/
struct vfpf_first_tlv {
struct channel_tlv tl;
u32 resp_msg_offset;
};
/* header of pf->vf tlvs, carries the status of handling the request */
struct pfvf_tlv {
struct channel_tlv tl;
u8 status;
u8 padding[3];
};
/* response tlv used for most tlvs */
struct pfvf_general_resp_tlv {
struct pfvf_tlv hdr;
};
/* used to terminate and pad a tlv list */
struct channel_list_end_tlv {
struct channel_tlv tl;
u8 padding[4];
};
/* Acquire */
struct vfpf_acquire_tlv {
struct vfpf_first_tlv first_tlv;
struct vf_pf_vfdev_info {
/* the following fields are for debug purposes */
u8 vf_id; /* ME register value */
u8 vf_os; /* e.g. Linux, W2K8 */
u8 padding[2];
} vfdev_info;
struct vf_pf_resc_request resc_request;
aligned_u64 bulletin_addr;
};
/* acquire response tlv - carries the allocated resources */
struct pfvf_acquire_resp_tlv {
struct pfvf_tlv hdr;
struct pf_vf_pfdev_info {
u32 chip_num;
u32 pf_cap;
#define PFVF_CAP_RSS 0x00000001
#define PFVF_CAP_DHC 0x00000002
#define PFVF_CAP_TPA 0x00000004
char fw_ver[32];
u16 db_size;
u8 indices_per_sb;
u8 padding;
} pfdev_info;
struct pf_vf_resc {
/* in case of status NO_RESOURCE in message hdr, pf will fill
* this struct with suggested amount of resources for next
* acquire request
*/
#define PFVF_MAX_QUEUES_PER_VF 16
#define PFVF_MAX_SBS_PER_VF 16
struct hw_sb_info hw_sbs[PFVF_MAX_SBS_PER_VF];
u8 hw_qid[PFVF_MAX_QUEUES_PER_VF];
u8 num_rxqs;
u8 num_txqs;
u8 num_sbs;
u8 num_mac_filters;
u8 num_vlan_filters;
u8 num_mc_filters;
u8 permanent_mac_addr[ETH_ALEN];
u8 current_mac_addr[ETH_ALEN];
u8 padding[2];
} resc;
};
/* Init VF */
struct vfpf_init_tlv {
struct vfpf_first_tlv first_tlv;
aligned_u64 sb_addr[PFVF_MAX_SBS_PER_VF]; /* vf_sb based */
aligned_u64 spq_addr;
aligned_u64 stats_addr;
};
/* Setup Queue */
struct vfpf_setup_q_tlv {
struct vfpf_first_tlv first_tlv;
struct vf_pf_rxq_params {
/* physical addresses */
aligned_u64 rcq_addr;
aligned_u64 rcq_np_addr;
aligned_u64 rxq_addr;
aligned_u64 sge_addr;
/* sb + hc info */
u8 vf_sb; /* index in hw_sbs[] */
u8 sb_index; /* Index in the SB */
u16 hc_rate; /* desired interrupts per sec. */
/* valid iff VFPF_QUEUE_FLG_HC */
/* rx buffer info */
u16 mtu;
u16 buf_sz;
u16 flags; /* VFPF_QUEUE_FLG_X flags */
u16 stat_id; /* valid iff VFPF_QUEUE_FLG_STATS */
/* valid iff VFPF_QUEUE_FLG_TPA */
u16 sge_buf_sz;
u16 tpa_agg_sz;
u8 max_sge_pkt;
u8 drop_flags; /* VFPF_QUEUE_DROP_X, for Linux VMs
* all the flags are turned off
*/
u8 cache_line_log; /* VFPF_QUEUE_FLG_CACHE_ALIGN */
u8 padding;
} rxq;
struct vf_pf_txq_params {
/* physical addresses */
aligned_u64 txq_addr;
/* sb + hc info */
u8 vf_sb; /* index in hw_sbs[] */
u8 sb_index; /* Index in the SB */
u16 hc_rate; /* desired interrupts per sec. */
/* valid iff VFPF_QUEUE_FLG_HC */
u32 flags; /* VFPF_QUEUE_FLG_X flags */
u16 stat_id; /* valid iff VFPF_QUEUE_FLG_STATS */
u8 traffic_type; /* see in setup_context() */
u8 padding;
} txq;
u8 vf_qid; /* index in hw_qid[] */
u8 param_valid;
#define VFPF_RXQ_VALID 0x01
#define VFPF_TXQ_VALID 0x02
u8 padding[2];
};
/* Set Queue Filters */
struct vfpf_q_mac_vlan_filter {
u32 flags;
#define VFPF_Q_FILTER_DEST_MAC_VALID 0x01
#define VFPF_Q_FILTER_VLAN_TAG_VALID 0x02
#define VFPF_Q_FILTER_SET_MAC 0x100 /* set/clear */
u8 mac[ETH_ALEN];
u16 vlan_tag;
};
/* configure queue filters */
struct vfpf_set_q_filters_tlv {
struct vfpf_first_tlv first_tlv;
u32 flags;
#define VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED 0x01
#define VFPF_SET_Q_FILTERS_MULTICAST_CHANGED 0x02
#define VFPF_SET_Q_FILTERS_RX_MASK_CHANGED 0x04
u8 vf_qid; /* index in hw_qid[] */
u8 n_mac_vlan_filters;
u8 n_multicast;
u8 padding;
#define PFVF_MAX_MAC_FILTERS 16
#define PFVF_MAX_VLAN_FILTERS 16
#define PFVF_MAX_FILTERS (PFVF_MAX_MAC_FILTERS +\
PFVF_MAX_VLAN_FILTERS)
struct vfpf_q_mac_vlan_filter filters[PFVF_MAX_FILTERS];
#define PFVF_MAX_MULTICAST_PER_VF 32
u8 multicast[PFVF_MAX_MULTICAST_PER_VF][ETH_ALEN];
u32 rx_mask; /* see mask constants at the top of the file */
};
/* release the VF's acquired resources */
struct vfpf_release_tlv {
struct vfpf_first_tlv first_tlv;
u16 vf_id;
u8 padding[2];
};
struct tlv_buffer_size {
u8 tlv_buffer[TLV_BUFFER_SIZE];
};
union vfpf_tlvs {
struct vfpf_first_tlv first_tlv;
struct vfpf_acquire_tlv acquire;
struct vfpf_init_tlv init;
struct vfpf_setup_q_tlv setup_q;
struct vfpf_set_q_filters_tlv set_q_filters;
struct vfpf_release_tlv release;
struct channel_list_end_tlv list_end;
struct tlv_buffer_size tlv_buf_size;
};
union pfvf_tlvs {
struct pfvf_general_resp_tlv general_resp;
struct pfvf_acquire_resp_tlv acquire_resp;
struct channel_list_end_tlv list_end;
struct tlv_buffer_size tlv_buf_size;
};
#define MAX_TLVS_IN_LIST 50
enum channel_tlvs {
CHANNEL_TLV_NONE,
CHANNEL_TLV_ACQUIRE,
CHANNEL_TLV_INIT,
CHANNEL_TLV_SETUP_Q,
CHANNEL_TLV_SET_Q_FILTERS,
CHANNEL_TLV_RELEASE,
CHANNEL_TLV_LIST_END,
CHANNEL_TLV_MAX
};
#endif /* VF_PF_IF_H */
|