diff options
Diffstat (limited to 'drivers/net/ethernet/netronome')
42 files changed, 2562 insertions, 451 deletions
diff --git a/drivers/net/ethernet/netronome/Kconfig b/drivers/net/ethernet/netronome/Kconfig index bac5be4d4f43..a3f68a718813 100644 --- a/drivers/net/ethernet/netronome/Kconfig +++ b/drivers/net/ethernet/netronome/Kconfig @@ -31,6 +31,7 @@ config NFP_APP_FLOWER bool "NFP4000/NFP6000 TC Flower offload support" depends on NFP depends on NET_SWITCHDEV + depends on IPV6!=m || NFP=m default y ---help--- Enable driver support for TC Flower offload on NFP4000 and NFP6000. diff --git a/drivers/net/ethernet/netronome/nfp/Makefile b/drivers/net/ethernet/netronome/nfp/Makefile index 2805641965f3..d31772ae511d 100644 --- a/drivers/net/ethernet/netronome/nfp/Makefile +++ b/drivers/net/ethernet/netronome/nfp/Makefile @@ -17,6 +17,7 @@ nfp-objs := \ nfpcore/nfp_target.o \ ccm.o \ ccm_mbox.o \ + devlink_param.o \ nfp_asm.o \ nfp_app.o \ nfp_app_nic.o \ diff --git a/drivers/net/ethernet/netronome/nfp/bpf/Makefile b/drivers/net/ethernet/netronome/nfp/bpf/Makefile deleted file mode 100644 index 805fa28f391a..000000000000 --- a/drivers/net/ethernet/netronome/nfp/bpf/Makefile +++ /dev/null @@ -1,2 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -# kbuild requires Makefile in a directory to build individual objects diff --git a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c index bc9850e4ec5e..0e2db6ea79e9 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c @@ -6,6 +6,7 @@ #include <linux/bug.h> #include <linux/jiffies.h> #include <linux/skbuff.h> +#include <linux/timekeeping.h> #include "../ccm.h" #include "../nfp_app.h" @@ -175,29 +176,151 @@ nfp_bpf_ctrl_reply_val(struct nfp_app_bpf *bpf, struct cmsg_reply_map_op *reply, return &reply->data[bpf->cmsg_key_sz * (n + 1) + bpf->cmsg_val_sz * n]; } +static bool nfp_bpf_ctrl_op_cache_invalidate(enum nfp_ccm_type op) +{ + return op == NFP_CCM_TYPE_BPF_MAP_UPDATE || + op == NFP_CCM_TYPE_BPF_MAP_DELETE; +} + +static bool nfp_bpf_ctrl_op_cache_capable(enum nfp_ccm_type op) +{ + return op == NFP_CCM_TYPE_BPF_MAP_LOOKUP || + op == NFP_CCM_TYPE_BPF_MAP_GETNEXT; +} + +static bool nfp_bpf_ctrl_op_cache_fill(enum nfp_ccm_type op) +{ + return op == NFP_CCM_TYPE_BPF_MAP_GETFIRST || + op == NFP_CCM_TYPE_BPF_MAP_GETNEXT; +} + +static unsigned int +nfp_bpf_ctrl_op_cache_get(struct nfp_bpf_map *nfp_map, enum nfp_ccm_type op, + const u8 *key, u8 *out_key, u8 *out_value, + u32 *cache_gen) +{ + struct bpf_map *map = &nfp_map->offmap->map; + struct nfp_app_bpf *bpf = nfp_map->bpf; + unsigned int i, count, n_entries; + struct cmsg_reply_map_op *reply; + + n_entries = nfp_bpf_ctrl_op_cache_fill(op) ? bpf->cmsg_cache_cnt : 1; + + spin_lock(&nfp_map->cache_lock); + *cache_gen = nfp_map->cache_gen; + if (nfp_map->cache_blockers) + n_entries = 1; + + if (nfp_bpf_ctrl_op_cache_invalidate(op)) + goto exit_block; + if (!nfp_bpf_ctrl_op_cache_capable(op)) + goto exit_unlock; + + if (!nfp_map->cache) + goto exit_unlock; + if (nfp_map->cache_to < ktime_get_ns()) + goto exit_invalidate; + + reply = (void *)nfp_map->cache->data; + count = be32_to_cpu(reply->count); + + for (i = 0; i < count; i++) { + void *cached_key; + + cached_key = nfp_bpf_ctrl_reply_key(bpf, reply, i); + if (memcmp(cached_key, key, map->key_size)) + continue; + + if (op == NFP_CCM_TYPE_BPF_MAP_LOOKUP) + memcpy(out_value, nfp_bpf_ctrl_reply_val(bpf, reply, i), + map->value_size); + if (op == NFP_CCM_TYPE_BPF_MAP_GETNEXT) { + if (i + 1 == count) + break; + + memcpy(out_key, + nfp_bpf_ctrl_reply_key(bpf, reply, i + 1), + map->key_size); + } + + n_entries = 0; + goto exit_unlock; + } + goto exit_unlock; + +exit_block: + nfp_map->cache_blockers++; +exit_invalidate: + dev_consume_skb_any(nfp_map->cache); + nfp_map->cache = NULL; +exit_unlock: + spin_unlock(&nfp_map->cache_lock); + return n_entries; +} + +static void +nfp_bpf_ctrl_op_cache_put(struct nfp_bpf_map *nfp_map, enum nfp_ccm_type op, + struct sk_buff *skb, u32 cache_gen) +{ + bool blocker, filler; + + blocker = nfp_bpf_ctrl_op_cache_invalidate(op); + filler = nfp_bpf_ctrl_op_cache_fill(op); + if (blocker || filler) { + u64 to = 0; + + if (filler) + to = ktime_get_ns() + NFP_BPF_MAP_CACHE_TIME_NS; + + spin_lock(&nfp_map->cache_lock); + if (blocker) { + nfp_map->cache_blockers--; + nfp_map->cache_gen++; + } + if (filler && !nfp_map->cache_blockers && + nfp_map->cache_gen == cache_gen) { + nfp_map->cache_to = to; + swap(nfp_map->cache, skb); + } + spin_unlock(&nfp_map->cache_lock); + } + + dev_consume_skb_any(skb); +} + static int nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap, enum nfp_ccm_type op, u8 *key, u8 *value, u64 flags, u8 *out_key, u8 *out_value) { struct nfp_bpf_map *nfp_map = offmap->dev_priv; + unsigned int n_entries, reply_entries, count; struct nfp_app_bpf *bpf = nfp_map->bpf; struct bpf_map *map = &offmap->map; struct cmsg_reply_map_op *reply; struct cmsg_req_map_op *req; struct sk_buff *skb; + u32 cache_gen; int err; /* FW messages have no space for more than 32 bits of flags */ if (flags >> 32) return -EOPNOTSUPP; + /* Handle op cache */ + n_entries = nfp_bpf_ctrl_op_cache_get(nfp_map, op, key, out_key, + out_value, &cache_gen); + if (!n_entries) + return 0; + skb = nfp_bpf_cmsg_map_req_alloc(bpf, 1); - if (!skb) - return -ENOMEM; + if (!skb) { + err = -ENOMEM; + goto err_cache_put; + } req = (void *)skb->data; req->tid = cpu_to_be32(nfp_map->tid); - req->count = cpu_to_be32(1); + req->count = cpu_to_be32(n_entries); req->flags = cpu_to_be32(flags); /* Copy inputs */ @@ -207,16 +330,38 @@ nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap, enum nfp_ccm_type op, memcpy(nfp_bpf_ctrl_req_val(bpf, req, 0), value, map->value_size); - skb = nfp_ccm_communicate(&bpf->ccm, skb, op, - nfp_bpf_cmsg_map_reply_size(bpf, 1)); - if (IS_ERR(skb)) - return PTR_ERR(skb); + skb = nfp_ccm_communicate(&bpf->ccm, skb, op, 0); + if (IS_ERR(skb)) { + err = PTR_ERR(skb); + goto err_cache_put; + } + + if (skb->len < sizeof(*reply)) { + cmsg_warn(bpf, "cmsg drop - type 0x%02x too short %d!\n", + op, skb->len); + err = -EIO; + goto err_free; + } reply = (void *)skb->data; + count = be32_to_cpu(reply->count); err = nfp_bpf_ctrl_rc_to_errno(bpf, &reply->reply_hdr); + /* FW responds with message sized to hold the good entries, + * plus one extra entry if there was an error. + */ + reply_entries = count + !!err; + if (n_entries > 1 && count) + err = 0; if (err) goto err_free; + if (skb->len != nfp_bpf_cmsg_map_reply_size(bpf, reply_entries)) { + cmsg_warn(bpf, "cmsg drop - type 0x%02x too short %d for %d entries!\n", + op, skb->len, reply_entries); + err = -EIO; + goto err_free; + } + /* Copy outputs */ if (out_key) memcpy(out_key, nfp_bpf_ctrl_reply_key(bpf, reply, 0), @@ -225,11 +370,13 @@ nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap, enum nfp_ccm_type op, memcpy(out_value, nfp_bpf_ctrl_reply_val(bpf, reply, 0), map->value_size); - dev_consume_skb_any(skb); + nfp_bpf_ctrl_op_cache_put(nfp_map, op, skb, cache_gen); return 0; err_free: dev_kfree_skb_any(skb); +err_cache_put: + nfp_bpf_ctrl_op_cache_put(nfp_map, op, NULL, cache_gen); return err; } @@ -267,11 +414,29 @@ int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap, key, NULL, 0, next_key, NULL); } +unsigned int nfp_bpf_ctrl_cmsg_min_mtu(struct nfp_app_bpf *bpf) +{ + return max(nfp_bpf_cmsg_map_req_size(bpf, 1), + nfp_bpf_cmsg_map_reply_size(bpf, 1)); +} + unsigned int nfp_bpf_ctrl_cmsg_mtu(struct nfp_app_bpf *bpf) { - return max3((unsigned int)NFP_NET_DEFAULT_MTU, - nfp_bpf_cmsg_map_req_size(bpf, 1), - nfp_bpf_cmsg_map_reply_size(bpf, 1)); + return max3(NFP_NET_DEFAULT_MTU, + nfp_bpf_cmsg_map_req_size(bpf, NFP_BPF_MAP_CACHE_CNT), + nfp_bpf_cmsg_map_reply_size(bpf, NFP_BPF_MAP_CACHE_CNT)); +} + +unsigned int nfp_bpf_ctrl_cmsg_cache_cnt(struct nfp_app_bpf *bpf) +{ + unsigned int mtu, req_max, reply_max, entry_sz; + + mtu = bpf->app->ctrl->dp.mtu; + entry_sz = bpf->cmsg_key_sz + bpf->cmsg_val_sz; + req_max = (mtu - sizeof(struct cmsg_req_map_op)) / entry_sz; + reply_max = (mtu - sizeof(struct cmsg_reply_map_op)) / entry_sz; + + return min3(req_max, reply_max, NFP_BPF_MAP_CACHE_CNT); } void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb) diff --git a/drivers/net/ethernet/netronome/nfp/bpf/fw.h b/drivers/net/ethernet/netronome/nfp/bpf/fw.h index 06c4286bd79e..a83a0ad5e27d 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/fw.h +++ b/drivers/net/ethernet/netronome/nfp/bpf/fw.h @@ -24,6 +24,7 @@ enum bpf_cap_tlv_type { NFP_BPF_CAP_TYPE_QUEUE_SELECT = 5, NFP_BPF_CAP_TYPE_ADJUST_TAIL = 6, NFP_BPF_CAP_TYPE_ABI_VERSION = 7, + NFP_BPF_CAP_TYPE_CMSG_MULTI_ENT = 8, }; struct nfp_bpf_cap_tlv_func { diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index 4054b70d7719..0a721f6e8676 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -1163,7 +1163,7 @@ mem_op_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, bool clr_gpr, lmem_step step) { s32 off = nfp_prog->stack_frame_depth + meta->insn.off + ptr_off; - bool first = true, last; + bool first = true, narrow_ld, last; bool needs_inc = false; swreg stack_off_reg; u8 prev_gpr = 255; @@ -1209,13 +1209,22 @@ mem_op_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, needs_inc = true; } + + narrow_ld = clr_gpr && size < 8; + if (lm3) { + unsigned int nop_cnt; + emit_csr_wr(nfp_prog, imm_b(nfp_prog), NFP_CSR_ACT_LM_ADDR3); - /* For size < 4 one slot will be filled by zeroing of upper. */ - wrp_nops(nfp_prog, clr_gpr && size < 8 ? 2 : 3); + /* For size < 4 one slot will be filled by zeroing of upper, + * but be careful, that zeroing could be eliminated by zext + * optimization. + */ + nop_cnt = narrow_ld && meta->flags & FLAG_INSN_DO_ZEXT ? 2 : 3; + wrp_nops(nfp_prog, nop_cnt); } - if (clr_gpr && size < 8) + if (narrow_ld) wrp_zext(nfp_prog, meta, gpr); while (size) { @@ -2643,17 +2652,17 @@ static int mem_ldx_skb(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, switch (meta->insn.off) { case offsetof(struct __sk_buff, len): - if (size != FIELD_SIZEOF(struct __sk_buff, len)) + if (size != sizeof_field(struct __sk_buff, len)) return -EOPNOTSUPP; wrp_mov(nfp_prog, dst, plen_reg(nfp_prog)); break; case offsetof(struct __sk_buff, data): - if (size != FIELD_SIZEOF(struct __sk_buff, data)) + if (size != sizeof_field(struct __sk_buff, data)) return -EOPNOTSUPP; wrp_mov(nfp_prog, dst, pptr_reg(nfp_prog)); break; case offsetof(struct __sk_buff, data_end): - if (size != FIELD_SIZEOF(struct __sk_buff, data_end)) + if (size != sizeof_field(struct __sk_buff, data_end)) return -EOPNOTSUPP; emit_alu(nfp_prog, dst, plen_reg(nfp_prog), ALU_OP_ADD, pptr_reg(nfp_prog)); @@ -2674,12 +2683,12 @@ static int mem_ldx_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, switch (meta->insn.off) { case offsetof(struct xdp_md, data): - if (size != FIELD_SIZEOF(struct xdp_md, data)) + if (size != sizeof_field(struct xdp_md, data)) return -EOPNOTSUPP; wrp_mov(nfp_prog, dst, pptr_reg(nfp_prog)); break; case offsetof(struct xdp_md, data_end): - if (size != FIELD_SIZEOF(struct xdp_md, data_end)) + if (size != sizeof_field(struct xdp_md, data_end)) return -EOPNOTSUPP; emit_alu(nfp_prog, dst, plen_reg(nfp_prog), ALU_OP_ADD, pptr_reg(nfp_prog)); @@ -3943,7 +3952,7 @@ static void nfp_bpf_opt_neg_add_sub(struct nfp_prog *nfp_prog) static void nfp_bpf_opt_ld_mask(struct nfp_prog *nfp_prog) { struct nfp_insn_meta *meta1, *meta2; - const s32 exp_mask[] = { + static const s32 exp_mask[] = { [BPF_B] = 0x000000ffU, [BPF_H] = 0x0000ffffU, [BPF_W] = 0xffffffffU, diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c index 1c9fb11470df..11c83a99b014 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c @@ -15,7 +15,7 @@ const struct rhashtable_params nfp_bpf_maps_neutral_params = { .nelem_hint = 4, - .key_len = FIELD_SIZEOF(struct bpf_map, id), + .key_len = sizeof_field(struct bpf_map, id), .key_offset = offsetof(struct nfp_bpf_neutral_map, map_id), .head_offset = offsetof(struct nfp_bpf_neutral_map, l), .automatic_shrinking = true, @@ -300,6 +300,14 @@ nfp_bpf_parse_cap_adjust_tail(struct nfp_app_bpf *bpf, void __iomem *value, } static int +nfp_bpf_parse_cap_cmsg_multi_ent(struct nfp_app_bpf *bpf, void __iomem *value, + u32 length) +{ + bpf->cmsg_multi_ent = true; + return 0; +} + +static int nfp_bpf_parse_cap_abi_version(struct nfp_app_bpf *bpf, void __iomem *value, u32 length) { @@ -375,6 +383,11 @@ static int nfp_bpf_parse_capabilities(struct nfp_app *app) length)) goto err_release_free; break; + case NFP_BPF_CAP_TYPE_CMSG_MULTI_ENT: + if (nfp_bpf_parse_cap_cmsg_multi_ent(app->priv, value, + length)) + goto err_release_free; + break; default: nfp_dbg(cpp, "unknown BPF capability: %d\n", type); break; @@ -415,6 +428,25 @@ static void nfp_bpf_ndo_uninit(struct nfp_app *app, struct net_device *netdev) bpf_offload_dev_netdev_unregister(bpf->bpf_dev, netdev); } +static int nfp_bpf_start(struct nfp_app *app) +{ + struct nfp_app_bpf *bpf = app->priv; + + if (app->ctrl->dp.mtu < nfp_bpf_ctrl_cmsg_min_mtu(bpf)) { + nfp_err(bpf->app->cpp, + "ctrl channel MTU below min required %u < %u\n", + app->ctrl->dp.mtu, nfp_bpf_ctrl_cmsg_min_mtu(bpf)); + return -EINVAL; + } + + if (bpf->cmsg_multi_ent) + bpf->cmsg_cache_cnt = nfp_bpf_ctrl_cmsg_cache_cnt(bpf); + else + bpf->cmsg_cache_cnt = 1; + + return 0; +} + static int nfp_bpf_init(struct nfp_app *app) { struct nfp_app_bpf *bpf; @@ -488,6 +520,7 @@ const struct nfp_app_type app_bpf = { .init = nfp_bpf_init, .clean = nfp_bpf_clean, + .start = nfp_bpf_start, .check_mtu = nfp_bpf_check_mtu, diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h index 57d6ff51e980..fac9c6f9e197 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.h +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h @@ -99,6 +99,7 @@ enum pkt_vec { * @maps_neutral: hash table of offload-neutral maps (on pointer) * * @abi_version: global BPF ABI version + * @cmsg_cache_cnt: number of entries to read for caching * * @adjust_head: adjust head capability * @adjust_head.flags: extra flags for adjust head @@ -124,6 +125,7 @@ enum pkt_vec { * @pseudo_random: FW initialized the pseudo-random machinery (CSRs) * @queue_select: BPF can set the RX queue ID in packet vector * @adjust_tail: BPF can simply trunc packet size for adjust tail + * @cmsg_multi_ent: FW can pack multiple map entries in a single cmsg */ struct nfp_app_bpf { struct nfp_app *app; @@ -134,6 +136,8 @@ struct nfp_app_bpf { unsigned int cmsg_key_sz; unsigned int cmsg_val_sz; + unsigned int cmsg_cache_cnt; + struct list_head map_list; unsigned int maps_in_use; unsigned int map_elems_in_use; @@ -169,6 +173,7 @@ struct nfp_app_bpf { bool pseudo_random; bool queue_select; bool adjust_tail; + bool cmsg_multi_ent; }; enum nfp_bpf_map_use { @@ -183,11 +188,21 @@ struct nfp_bpf_map_word { unsigned char non_zero_update :1; }; +#define NFP_BPF_MAP_CACHE_CNT 4U +#define NFP_BPF_MAP_CACHE_TIME_NS (250 * 1000) + /** * struct nfp_bpf_map - private per-map data attached to BPF maps for offload * @offmap: pointer to the offloaded BPF map * @bpf: back pointer to bpf app private structure * @tid: table id identifying map on datapath + * + * @cache_lock: protects @cache_blockers, @cache_to, @cache + * @cache_blockers: number of ops in flight which block caching + * @cache_gen: counter incremented by every blocker on exit + * @cache_to: time when cache will no longer be valid (ns) + * @cache: skb with cached response + * * @l: link on the nfp_app_bpf->map_list list * @use_map: map of how the value is used (in 4B chunks) */ @@ -195,6 +210,13 @@ struct nfp_bpf_map { struct bpf_offloaded_map *offmap; struct nfp_app_bpf *bpf; u32 tid; + + spinlock_t cache_lock; + u32 cache_blockers; + u32 cache_gen; + u64 cache_to; + struct sk_buff *cache; + struct list_head l; struct nfp_bpf_map_word use_map[]; }; @@ -564,7 +586,9 @@ nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv); +unsigned int nfp_bpf_ctrl_cmsg_min_mtu(struct nfp_app_bpf *bpf); unsigned int nfp_bpf_ctrl_cmsg_mtu(struct nfp_app_bpf *bpf); +unsigned int nfp_bpf_ctrl_cmsg_cache_cnt(struct nfp_app_bpf *bpf); long long int nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf *bpf, struct bpf_map *map); void diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c index 39c9fec222b4..ac02369174a9 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c @@ -46,9 +46,7 @@ nfp_map_ptr_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog, /* Grab a single ref to the map for our record. The prog destroy ndo * happens after free_used_maps(). */ - map = bpf_map_inc(map, false); - if (IS_ERR(map)) - return PTR_ERR(map); + bpf_map_inc(map); record = kmalloc(sizeof(*record), GFP_KERNEL); if (!record) { @@ -376,7 +374,7 @@ nfp_bpf_map_alloc(struct nfp_app_bpf *bpf, struct bpf_offloaded_map *offmap) } use_map_size = DIV_ROUND_UP(offmap->map.value_size, 4) * - FIELD_SIZEOF(struct nfp_bpf_map, use_map[0]); + sizeof_field(struct nfp_bpf_map, use_map[0]); nfp_map = kzalloc(sizeof(*nfp_map) + use_map_size, GFP_USER); if (!nfp_map) @@ -385,6 +383,7 @@ nfp_bpf_map_alloc(struct nfp_app_bpf *bpf, struct bpf_offloaded_map *offmap) offmap->dev_priv = nfp_map; nfp_map->offmap = offmap; nfp_map->bpf = bpf; + spin_lock_init(&nfp_map->cache_lock); res = nfp_bpf_ctrl_alloc_map(bpf, &offmap->map); if (res < 0) { @@ -407,6 +406,8 @@ nfp_bpf_map_free(struct nfp_app_bpf *bpf, struct bpf_offloaded_map *offmap) struct nfp_bpf_map *nfp_map = offmap->dev_priv; nfp_bpf_ctrl_free_map(bpf, nfp_map); + dev_consume_skb_any(nfp_map->cache); + WARN_ON_ONCE(nfp_map->cache_blockers); list_del_init(&nfp_map->l); bpf->map_elems_in_use -= offmap->map.max_entries; bpf->maps_in_use--; @@ -457,8 +458,8 @@ int nfp_bpf_event_output(struct nfp_app_bpf *bpf, const void *data, return -EINVAL; rcu_read_lock(); - record = rhashtable_lookup_fast(&bpf->maps_neutral, &map_id, - nfp_bpf_maps_neutral_params); + record = rhashtable_lookup(&bpf->maps_neutral, &map_id, + nfp_bpf_maps_neutral_params); if (!record || map_id_full > U32_MAX) { rcu_read_unlock(); cmsg_warn(bpf, "perf event: map id %lld (0x%llx) not recognized, dropping event\n", diff --git a/drivers/net/ethernet/netronome/nfp/ccm.h b/drivers/net/ethernet/netronome/nfp/ccm.h index a460c75522be..d81d450be50e 100644 --- a/drivers/net/ethernet/netronome/nfp/ccm.h +++ b/drivers/net/ethernet/netronome/nfp/ccm.h @@ -26,6 +26,7 @@ enum nfp_ccm_type { NFP_CCM_TYPE_CRYPTO_ADD = 10, NFP_CCM_TYPE_CRYPTO_DEL = 11, NFP_CCM_TYPE_CRYPTO_UPDATE = 12, + NFP_CCM_TYPE_CRYPTO_RESYNC = 13, __NFP_CCM_TYPE_MAX, }; diff --git a/drivers/net/ethernet/netronome/nfp/crypto/crypto.h b/drivers/net/ethernet/netronome/nfp/crypto/crypto.h index 60372ddf69f0..bffe58bb2f27 100644 --- a/drivers/net/ethernet/netronome/nfp/crypto/crypto.h +++ b/drivers/net/ethernet/netronome/nfp/crypto/crypto.h @@ -4,6 +4,10 @@ #ifndef NFP_CRYPTO_H #define NFP_CRYPTO_H 1 +struct net_device; +struct nfp_net; +struct nfp_net_tls_resync_req; + struct nfp_net_tls_offload_ctx { __be32 fw_handle[2]; @@ -17,11 +21,22 @@ struct nfp_net_tls_offload_ctx { #ifdef CONFIG_TLS_DEVICE int nfp_net_tls_init(struct nfp_net *nn); +int nfp_net_tls_rx_resync_req(struct net_device *netdev, + struct nfp_net_tls_resync_req *req, + void *pkt, unsigned int pkt_len); #else static inline int nfp_net_tls_init(struct nfp_net *nn) { return 0; } + +static inline int +nfp_net_tls_rx_resync_req(struct net_device *netdev, + struct nfp_net_tls_resync_req *req, + void *pkt, unsigned int pkt_len) +{ + return -EOPNOTSUPP; +} #endif #endif diff --git a/drivers/net/ethernet/netronome/nfp/crypto/fw.h b/drivers/net/ethernet/netronome/nfp/crypto/fw.h index 67413d946c4a..8d1458896bcb 100644 --- a/drivers/net/ethernet/netronome/nfp/crypto/fw.h +++ b/drivers/net/ethernet/netronome/nfp/crypto/fw.h @@ -9,6 +9,14 @@ #define NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_ENC 0 #define NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_DEC 1 +struct nfp_net_tls_resync_req { + __be32 fw_handle[2]; + __be32 tcp_seq; + u8 l3_offset; + u8 l4_offset; + u8 resv[2]; +}; + struct nfp_crypto_reply_simple { struct nfp_ccm_hdr hdr; __be32 error; diff --git a/drivers/net/ethernet/netronome/nfp/crypto/tls.c b/drivers/net/ethernet/netronome/nfp/crypto/tls.c index 96a96b35c0ca..7c50e3dfb9d5 100644 --- a/drivers/net/ethernet/netronome/nfp/crypto/tls.c +++ b/drivers/net/ethernet/netronome/nfp/crypto/tls.c @@ -5,6 +5,7 @@ #include <linux/ipv6.h> #include <linux/skbuff.h> #include <linux/string.h> +#include <net/inet6_hashtables.h> #include <net/tls.h> #include "../ccm.h" @@ -391,8 +392,9 @@ nfp_net_tls_add(struct net_device *netdev, struct sock *sk, if (direction == TLS_OFFLOAD_CTX_DIR_TX) return 0; - tls_offload_rx_resync_set_type(sk, - TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT); + if (!nn->tlv_caps.tls_resync_ss) + tls_offload_rx_resync_set_type(sk, TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT); + return 0; err_fw_remove: @@ -424,6 +426,7 @@ nfp_net_tls_resync(struct net_device *netdev, struct sock *sk, u32 seq, struct nfp_net *nn = netdev_priv(netdev); struct nfp_net_tls_offload_ctx *ntls; struct nfp_crypto_req_update *req; + enum nfp_ccm_type type; struct sk_buff *skb; gfp_t flags; int err; @@ -442,15 +445,18 @@ nfp_net_tls_resync(struct net_device *netdev, struct sock *sk, u32 seq, req->tcp_seq = cpu_to_be32(seq); memcpy(req->rec_no, rcd_sn, sizeof(req->rec_no)); + type = NFP_CCM_TYPE_CRYPTO_UPDATE; if (direction == TLS_OFFLOAD_CTX_DIR_TX) { - err = nfp_net_tls_communicate_simple(nn, skb, "sync", - NFP_CCM_TYPE_CRYPTO_UPDATE); + err = nfp_net_tls_communicate_simple(nn, skb, "sync", type); if (err) return err; ntls->next_seq = seq; } else { - nfp_ccm_mbox_post(nn, skb, NFP_CCM_TYPE_CRYPTO_UPDATE, + if (nn->tlv_caps.tls_resync_ss) + type = NFP_CCM_TYPE_CRYPTO_RESYNC; + nfp_ccm_mbox_post(nn, skb, type, sizeof(struct nfp_crypto_reply_simple)); + atomic_inc(&nn->ktls_rx_resync_sent); } return 0; @@ -462,6 +468,79 @@ static const struct tlsdev_ops nfp_net_tls_ops = { .tls_dev_resync = nfp_net_tls_resync, }; +int nfp_net_tls_rx_resync_req(struct net_device *netdev, + struct nfp_net_tls_resync_req *req, + void *pkt, unsigned int pkt_len) +{ + struct nfp_net *nn = netdev_priv(netdev); + struct nfp_net_tls_offload_ctx *ntls; + struct ipv6hdr *ipv6h; + struct tcphdr *th; + struct iphdr *iph; + struct sock *sk; + __be32 tcp_seq; + int err; + + iph = pkt + req->l3_offset; + ipv6h = pkt + req->l3_offset; + th = pkt + req->l4_offset; + + if ((u8 *)&th[1] > (u8 *)pkt + pkt_len) { + netdev_warn_once(netdev, "invalid TLS RX resync request (l3_off: %hhu l4_off: %hhu pkt_len: %u)\n", + req->l3_offset, req->l4_offset, pkt_len); + err = -EINVAL; + goto err_cnt_ign; + } + + switch (iph->version) { + case 4: + sk = inet_lookup_established(dev_net(netdev), &tcp_hashinfo, + iph->saddr, th->source, iph->daddr, + th->dest, netdev->ifindex); + break; +#if IS_ENABLED(CONFIG_IPV6) + case 6: + sk = __inet6_lookup_established(dev_net(netdev), &tcp_hashinfo, + &ipv6h->saddr, th->source, + &ipv6h->daddr, ntohs(th->dest), + netdev->ifindex, 0); + break; +#endif + default: + netdev_warn_once(netdev, "invalid TLS RX resync request (l3_off: %hhu l4_off: %hhu ipver: %u)\n", + req->l3_offset, req->l4_offset, iph->version); + err = -EINVAL; + goto err_cnt_ign; + } + + err = 0; + if (!sk) + goto err_cnt_ign; + if (!tls_is_sk_rx_device_offloaded(sk) || + sk->sk_shutdown & RCV_SHUTDOWN) + goto err_put_sock; + + ntls = tls_driver_ctx(sk, TLS_OFFLOAD_CTX_DIR_RX); + /* some FW versions can't report the handle and report 0s */ + if (memchr_inv(&req->fw_handle, 0, sizeof(req->fw_handle)) && + memcmp(&req->fw_handle, &ntls->fw_handle, sizeof(ntls->fw_handle))) + goto err_put_sock; + + /* copy to ensure alignment */ + memcpy(&tcp_seq, &req->tcp_seq, sizeof(tcp_seq)); + tls_offload_rx_resync_request(sk, tcp_seq); + atomic_inc(&nn->ktls_rx_resync_req); + + sock_gen_put(sk); + return 0; + +err_put_sock: + sock_gen_put(sk); +err_cnt_ign: + atomic_inc(&nn->ktls_rx_resync_ign); + return err; +} + static int nfp_net_tls_reset(struct nfp_net *nn) { struct nfp_crypto_req_reset *req; diff --git a/drivers/net/ethernet/netronome/nfp/devlink_param.c b/drivers/net/ethernet/netronome/nfp/devlink_param.c new file mode 100644 index 000000000000..36491835ac65 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/devlink_param.c @@ -0,0 +1,255 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2019 Netronome Systems, Inc. */ + +#include <net/devlink.h> + +#include "nfpcore/nfp.h" +#include "nfpcore/nfp_nsp.h" +#include "nfp_main.h" + +/** + * struct nfp_devlink_param_u8_arg - Devlink u8 parameter get/set arguments + * @hwinfo_name: HWinfo key name + * @default_hi_val: Default HWinfo value if HWinfo doesn't exist + * @invalid_dl_val: Devlink value to use if HWinfo is unknown/invalid. + * -errno if there is no unknown/invalid value available + * @hi_to_dl: HWinfo to devlink value mapping + * @dl_to_hi: Devlink to hwinfo value mapping + * @max_dl_val: Maximum devlink value supported, for validation only + * @max_hi_val: Maximum HWinfo value supported, for validation only + */ +struct nfp_devlink_param_u8_arg { + const char *hwinfo_name; + const char *default_hi_val; + int invalid_dl_val; + u8 hi_to_dl[4]; + u8 dl_to_hi[4]; + u8 max_dl_val; + u8 max_hi_val; +}; + +static const struct nfp_devlink_param_u8_arg nfp_devlink_u8_args[] = { + [DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY] = { + .hwinfo_name = "app_fw_from_flash", + .default_hi_val = NFP_NSP_APP_FW_LOAD_DEFAULT, + .invalid_dl_val = + DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_UNKNOWN, + .hi_to_dl = { + [NFP_NSP_APP_FW_LOAD_DISK] = + DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DISK, + [NFP_NSP_APP_FW_LOAD_FLASH] = + DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH, + [NFP_NSP_APP_FW_LOAD_PREF] = + DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER, + }, + .dl_to_hi = { + [DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER] = + NFP_NSP_APP_FW_LOAD_PREF, + [DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH] = + NFP_NSP_APP_FW_LOAD_FLASH, + [DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DISK] = + NFP_NSP_APP_FW_LOAD_DISK, + }, + .max_dl_val = DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DISK, + .max_hi_val = NFP_NSP_APP_FW_LOAD_PREF, + }, + [DEVLINK_PARAM_GENERIC_ID_RESET_DEV_ON_DRV_PROBE] = { + .hwinfo_name = "abi_drv_reset", + .default_hi_val = NFP_NSP_DRV_RESET_DEFAULT, + .invalid_dl_val = + DEVLINK_PARAM_RESET_DEV_ON_DRV_PROBE_VALUE_UNKNOWN, + .hi_to_dl = { + [NFP_NSP_DRV_RESET_ALWAYS] = + DEVLINK_PARAM_RESET_DEV_ON_DRV_PROBE_VALUE_ALWAYS, + [NFP_NSP_DRV_RESET_NEVER] = + DEVLINK_PARAM_RESET_DEV_ON_DRV_PROBE_VALUE_NEVER, + [NFP_NSP_DRV_RESET_DISK] = + DEVLINK_PARAM_RESET_DEV_ON_DRV_PROBE_VALUE_DISK, + }, + .dl_to_hi = { + [DEVLINK_PARAM_RESET_DEV_ON_DRV_PROBE_VALUE_ALWAYS] = + NFP_NSP_DRV_RESET_ALWAYS, + [DEVLINK_PARAM_RESET_DEV_ON_DRV_PROBE_VALUE_NEVER] = + NFP_NSP_DRV_RESET_NEVER, + [DEVLINK_PARAM_RESET_DEV_ON_DRV_PROBE_VALUE_DISK] = + NFP_NSP_DRV_RESET_DISK, + }, + .max_dl_val = DEVLINK_PARAM_RESET_DEV_ON_DRV_PROBE_VALUE_DISK, + .max_hi_val = NFP_NSP_DRV_RESET_NEVER, + } +}; + +static int +nfp_devlink_param_u8_get(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + const struct nfp_devlink_param_u8_arg *arg; + struct nfp_pf *pf = devlink_priv(devlink); + struct nfp_nsp *nsp; + char hwinfo[32]; + long value; + int err; + + if (id >= ARRAY_SIZE(nfp_devlink_u8_args)) + return -EOPNOTSUPP; + + arg = &nfp_devlink_u8_args[id]; + + nsp = nfp_nsp_open(pf->cpp); + if (IS_ERR(nsp)) { + err = PTR_ERR(nsp); + nfp_warn(pf->cpp, "can't access NSP: %d\n", err); + return err; + } + + snprintf(hwinfo, sizeof(hwinfo), arg->hwinfo_name); + err = nfp_nsp_hwinfo_lookup_optional(nsp, hwinfo, sizeof(hwinfo), + arg->default_hi_val); + if (err) { + nfp_warn(pf->cpp, "HWinfo lookup failed: %d\n", err); + goto exit_close_nsp; + } + + err = kstrtol(hwinfo, 0, &value); + if (err || value < 0 || value > arg->max_hi_val) { + nfp_warn(pf->cpp, "HWinfo '%s' value %li invalid\n", + arg->hwinfo_name, value); + + if (arg->invalid_dl_val >= 0) + ctx->val.vu8 = arg->invalid_dl_val; + else + err = arg->invalid_dl_val; + + goto exit_close_nsp; + } + + ctx->val.vu8 = arg->hi_to_dl[value]; + +exit_close_nsp: + nfp_nsp_close(nsp); + return err; +} + +static int +nfp_devlink_param_u8_set(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + const struct nfp_devlink_param_u8_arg *arg; + struct nfp_pf *pf = devlink_priv(devlink); + struct nfp_nsp *nsp; + char hwinfo[32]; + int err; + + if (id >= ARRAY_SIZE(nfp_devlink_u8_args)) + return -EOPNOTSUPP; + + arg = &nfp_devlink_u8_args[id]; + + nsp = nfp_nsp_open(pf->cpp); + if (IS_ERR(nsp)) { + err = PTR_ERR(nsp); + nfp_warn(pf->cpp, "can't access NSP: %d\n", err); + return err; + } + + /* Note the value has already been validated. */ + snprintf(hwinfo, sizeof(hwinfo), "%s=%u", + arg->hwinfo_name, arg->dl_to_hi[ctx->val.vu8]); + err = nfp_nsp_hwinfo_set(nsp, hwinfo, sizeof(hwinfo)); + if (err) { + nfp_warn(pf->cpp, "HWinfo set failed: %d\n", err); + goto exit_close_nsp; + } + +exit_close_nsp: + nfp_nsp_close(nsp); + return err; +} + +static int +nfp_devlink_param_u8_validate(struct devlink *devlink, u32 id, + union devlink_param_value val, + struct netlink_ext_ack *extack) +{ + const struct nfp_devlink_param_u8_arg *arg; + + if (id >= ARRAY_SIZE(nfp_devlink_u8_args)) + return -EOPNOTSUPP; + + arg = &nfp_devlink_u8_args[id]; + + if (val.vu8 > arg->max_dl_val) { + NL_SET_ERR_MSG_MOD(extack, "parameter out of range"); + return -EINVAL; + } + + if (val.vu8 == arg->invalid_dl_val) { + NL_SET_ERR_MSG_MOD(extack, "unknown/invalid value specified"); + return -EINVAL; + } + + return 0; +} + +static const struct devlink_param nfp_devlink_params[] = { + DEVLINK_PARAM_GENERIC(FW_LOAD_POLICY, + BIT(DEVLINK_PARAM_CMODE_PERMANENT), + nfp_devlink_param_u8_get, + nfp_devlink_param_u8_set, + nfp_devlink_param_u8_validate), + DEVLINK_PARAM_GENERIC(RESET_DEV_ON_DRV_PROBE, + BIT(DEVLINK_PARAM_CMODE_PERMANENT), + nfp_devlink_param_u8_get, + nfp_devlink_param_u8_set, + nfp_devlink_param_u8_validate), +}; + +static int nfp_devlink_supports_params(struct nfp_pf *pf) +{ + struct nfp_nsp *nsp; + bool supported; + int err; + + nsp = nfp_nsp_open(pf->cpp); + if (IS_ERR(nsp)) { + err = PTR_ERR(nsp); + dev_err(&pf->pdev->dev, "Failed to access the NSP: %d\n", err); + return err; + } + + supported = nfp_nsp_has_hwinfo_lookup(nsp) && + nfp_nsp_has_hwinfo_set(nsp); + + nfp_nsp_close(nsp); + return supported; +} + +int nfp_devlink_params_register(struct nfp_pf *pf) +{ + struct devlink *devlink = priv_to_devlink(pf); + int err; + + err = nfp_devlink_supports_params(pf); + if (err <= 0) + return err; + + err = devlink_params_register(devlink, nfp_devlink_params, + ARRAY_SIZE(nfp_devlink_params)); + if (err) + return err; + + devlink_params_publish(devlink); + return 0; +} + +void nfp_devlink_params_unregister(struct nfp_pf *pf) +{ + int err; + + err = nfp_devlink_supports_params(pf); + if (err <= 0) + return; + + devlink_params_unregister(priv_to_devlink(pf), nfp_devlink_params, + ARRAY_SIZE(nfp_devlink_params)); +} diff --git a/drivers/net/ethernet/netronome/nfp/flower/Makefile b/drivers/net/ethernet/netronome/nfp/flower/Makefile deleted file mode 100644 index 805fa28f391a..000000000000 --- a/drivers/net/ethernet/netronome/nfp/flower/Makefile +++ /dev/null @@ -1,2 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -# kbuild requires Makefile in a directory to build individual objects diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c index 5a54fe848de4..c06600fb47ff 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/action.c +++ b/drivers/net/ethernet/netronome/nfp/flower/action.c @@ -2,10 +2,12 @@ /* Copyright (C) 2017-2018 Netronome Systems, Inc. */ #include <linux/bitfield.h> +#include <linux/mpls.h> #include <net/pkt_cls.h> #include <net/tc_act/tc_csum.h> #include <net/tc_act/tc_gact.h> #include <net/tc_act/tc_mirred.h> +#include <net/tc_act/tc_mpls.h> #include <net/tc_act/tc_pedit.h> #include <net/tc_act/tc_vlan.h> #include <net/tc_act/tc_tunnel_key.h> @@ -20,11 +22,86 @@ #define NFP_FL_TUNNEL_CSUM cpu_to_be16(0x01) #define NFP_FL_TUNNEL_KEY cpu_to_be16(0x04) #define NFP_FL_TUNNEL_GENEVE_OPT cpu_to_be16(0x0800) -#define NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS IP_TUNNEL_INFO_TX -#define NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS (NFP_FL_TUNNEL_CSUM | \ +#define NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS (IP_TUNNEL_INFO_TX | \ + IP_TUNNEL_INFO_IPV6) +#define NFP_FL_SUPPORTED_UDP_TUN_FLAGS (NFP_FL_TUNNEL_CSUM | \ NFP_FL_TUNNEL_KEY | \ NFP_FL_TUNNEL_GENEVE_OPT) +static int +nfp_fl_push_mpls(struct nfp_fl_push_mpls *push_mpls, + const struct flow_action_entry *act, + struct netlink_ext_ack *extack) +{ + size_t act_size = sizeof(struct nfp_fl_push_mpls); + u32 mpls_lse = 0; + + push_mpls->head.jump_id = NFP_FL_ACTION_OPCODE_PUSH_MPLS; + push_mpls->head.len_lw = act_size >> NFP_FL_LW_SIZ; + + /* BOS is optional in the TC action but required for offload. */ + if (act->mpls_push.bos != ACT_MPLS_BOS_NOT_SET) { + mpls_lse |= act->mpls_push.bos << MPLS_LS_S_SHIFT; + } else { + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: BOS field must explicitly be set for MPLS push"); + return -EOPNOTSUPP; + } + + /* Leave MPLS TC as a default value of 0 if not explicitly set. */ + if (act->mpls_push.tc != ACT_MPLS_TC_NOT_SET) + mpls_lse |= act->mpls_push.tc << MPLS_LS_TC_SHIFT; + + /* Proto, label and TTL are enforced and verified for MPLS push. */ + mpls_lse |= act->mpls_push.label << MPLS_LS_LABEL_SHIFT; + mpls_lse |= act->mpls_push.ttl << MPLS_LS_TTL_SHIFT; + push_mpls->ethtype = act->mpls_push.proto; + push_mpls->lse = cpu_to_be32(mpls_lse); + + return 0; +} + +static void +nfp_fl_pop_mpls(struct nfp_fl_pop_mpls *pop_mpls, + const struct flow_action_entry *act) +{ + size_t act_size = sizeof(struct nfp_fl_pop_mpls); + + pop_mpls->head.jump_id = NFP_FL_ACTION_OPCODE_POP_MPLS; + pop_mpls->head.len_lw = act_size >> NFP_FL_LW_SIZ; + pop_mpls->ethtype = act->mpls_pop.proto; +} + +static void +nfp_fl_set_mpls(struct nfp_fl_set_mpls *set_mpls, + const struct flow_action_entry *act) +{ + size_t act_size = sizeof(struct nfp_fl_set_mpls); + u32 mpls_lse = 0, mpls_mask = 0; + + set_mpls->head.jump_id = NFP_FL_ACTION_OPCODE_SET_MPLS; + set_mpls->head.len_lw = act_size >> NFP_FL_LW_SIZ; + + if (act->mpls_mangle.label != ACT_MPLS_LABEL_NOT_SET) { + mpls_lse |= act->mpls_mangle.label << MPLS_LS_LABEL_SHIFT; + mpls_mask |= MPLS_LS_LABEL_MASK; + } + if (act->mpls_mangle.tc != ACT_MPLS_TC_NOT_SET) { + mpls_lse |= act->mpls_mangle.tc << MPLS_LS_TC_SHIFT; + mpls_mask |= MPLS_LS_TC_MASK; + } + if (act->mpls_mangle.bos != ACT_MPLS_BOS_NOT_SET) { + mpls_lse |= act->mpls_mangle.bos << MPLS_LS_S_SHIFT; + mpls_mask |= MPLS_LS_S_MASK; + } + if (act->mpls_mangle.ttl) { + mpls_lse |= act->mpls_mangle.ttl << MPLS_LS_TTL_SHIFT; + mpls_mask |= MPLS_LS_TTL_MASK; + } + + set_mpls->lse = cpu_to_be32(mpls_lse); + set_mpls->lse_mask = cpu_to_be32(mpls_mask); +} + static void nfp_fl_pop_vlan(struct nfp_fl_pop_vlan *pop_vlan) { size_t act_size = sizeof(struct nfp_fl_pop_vlan); @@ -97,7 +174,7 @@ nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output, struct nfp_fl_payload *nfp_flow, bool last, struct net_device *in_dev, enum nfp_flower_tun_type tun_type, int *tun_out_cnt, - struct netlink_ext_ack *extack) + bool pkt_host, struct netlink_ext_ack *extack) { size_t act_size = sizeof(struct nfp_fl_output); struct nfp_flower_priv *priv = app->priv; @@ -142,6 +219,20 @@ nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output, return gid; } output->port = cpu_to_be32(NFP_FL_LAG_OUT | gid); + } else if (nfp_flower_internal_port_can_offload(app, out_dev)) { + if (!(priv->flower_ext_feats & NFP_FL_FEATS_PRE_TUN_RULES)) { + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pre-tunnel rules not supported in loaded firmware"); + return -EOPNOTSUPP; + } + + if (nfp_flow->pre_tun_rule.dev || !pkt_host) { + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pre-tunnel rules require single egress dev and ptype HOST action"); + return -EOPNOTSUPP; + } + + nfp_flow->pre_tun_rule.dev = out_dev; + + return 0; } else { /* Set action output parameters. */ output->flags = cpu_to_be16(tmp_flags); @@ -304,19 +395,26 @@ nfp_fl_push_geneve_options(struct nfp_fl_payload *nfp_fl, int *list_len, } static int -nfp_fl_set_ipv4_tun(struct nfp_app *app, struct nfp_fl_set_ipv4_tun *set_tun, - const struct flow_action_entry *act, - struct nfp_fl_pre_tunnel *pre_tun, - enum nfp_flower_tun_type tun_type, - struct net_device *netdev, struct netlink_ext_ack *extack) +nfp_fl_set_tun(struct nfp_app *app, struct nfp_fl_set_tun *set_tun, + const struct flow_action_entry *act, + struct nfp_fl_pre_tunnel *pre_tun, + enum nfp_flower_tun_type tun_type, + struct net_device *netdev, struct netlink_ext_ack *extack) { - size_t act_size = sizeof(struct nfp_fl_set_ipv4_tun); const struct ip_tunnel_info *ip_tun = act->tunnel; + bool ipv6 = ip_tunnel_info_af(ip_tun) == AF_INET6; + size_t act_size = sizeof(struct nfp_fl_set_tun); struct nfp_flower_priv *priv = app->priv; u32 tmp_set_ip_tun_type_index = 0; /* Currently support one pre-tunnel so index is always 0. */ int pretun_idx = 0; + if (!IS_ENABLED(CONFIG_IPV6) && ipv6) + return -EOPNOTSUPP; + + if (ipv6 && !(priv->flower_ext_feats & NFP_FL_FEATS_IPV6_TUN)) + return -EOPNOTSUPP; + BUILD_BUG_ON(NFP_FL_TUNNEL_CSUM != TUNNEL_CSUM || NFP_FL_TUNNEL_KEY != TUNNEL_KEY || NFP_FL_TUNNEL_GENEVE_OPT != TUNNEL_GENEVE_OPT); @@ -327,19 +425,35 @@ nfp_fl_set_ipv4_tun(struct nfp_app *app, struct nfp_fl_set_ipv4_tun *set_tun, return -EOPNOTSUPP; } - set_tun->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL; + set_tun->head.jump_id = NFP_FL_ACTION_OPCODE_SET_TUNNEL; set_tun->head.len_lw = act_size >> NFP_FL_LW_SIZ; /* Set tunnel type and pre-tunnel index. */ tmp_set_ip_tun_type_index |= - FIELD_PREP(NFP_FL_IPV4_TUNNEL_TYPE, tun_type) | - FIELD_PREP(NFP_FL_IPV4_PRE_TUN_INDEX, pretun_idx); + FIELD_PREP(NFP_FL_TUNNEL_TYPE, tun_type) | + FIELD_PREP(NFP_FL_PRE_TUN_INDEX, pretun_idx); set_tun->tun_type_index = cpu_to_be32(tmp_set_ip_tun_type_index); set_tun->tun_id = ip_tun->key.tun_id; if (ip_tun->key.ttl) { set_tun->ttl = ip_tun->key.ttl; +#ifdef CONFIG_IPV6 + } else if (ipv6) { + struct net *net = dev_net(netdev); + struct flowi6 flow = {}; + struct dst_entry *dst; + + flow.daddr = ip_tun->key.u.ipv6.dst; + flow.flowi4_proto = IPPROTO_UDP; + dst = ipv6_stub->ipv6_dst_lookup_flow(net, NULL, &flow, NULL); + if (!IS_ERR(dst)) { + set_tun->ttl = ip6_dst_hoplimit(dst); + dst_release(dst); + } else { + set_tun->ttl = net->ipv6.devconf_all->hop_limit; + } +#endif } else { struct net *net = dev_net(netdev); struct flowi4 flow = {}; @@ -365,7 +479,7 @@ nfp_fl_set_ipv4_tun(struct nfp_app *app, struct nfp_fl_set_ipv4_tun *set_tun, set_tun->tos = ip_tun->key.tos; if (!(ip_tun->key.tun_flags & NFP_FL_TUNNEL_KEY) || - ip_tun->key.tun_flags & ~NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS) { + ip_tun->key.tun_flags & ~NFP_FL_SUPPORTED_UDP_TUN_FLAGS) { NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support tunnel flag offload"); return -EOPNOTSUPP; } @@ -377,7 +491,12 @@ nfp_fl_set_ipv4_tun(struct nfp_app *app, struct nfp_fl_set_ipv4_tun *set_tun, } /* Complete pre_tunnel action. */ - pre_tun->ipv4_dst = ip_tun->key.u.ipv4.dst; + if (ipv6) { + pre_tun->flags |= cpu_to_be16(NFP_FL_PRE_TUN_IPV6); + pre_tun->ipv6_dst = ip_tun->key.u.ipv6.dst; + } else { + pre_tun->ipv4_dst = ip_tun->key.u.ipv4.dst; + } return 0; } @@ -809,7 +928,7 @@ nfp_flower_output_action(struct nfp_app *app, struct nfp_fl_payload *nfp_fl, int *a_len, struct net_device *netdev, bool last, enum nfp_flower_tun_type *tun_type, int *tun_out_cnt, - int *out_cnt, u32 *csum_updated, + int *out_cnt, u32 *csum_updated, bool pkt_host, struct netlink_ext_ack *extack) { struct nfp_flower_priv *priv = app->priv; @@ -831,7 +950,7 @@ nfp_flower_output_action(struct nfp_app *app, output = (struct nfp_fl_output *)&nfp_fl->action_data[*a_len]; err = nfp_fl_output(app, output, act, nfp_fl, last, netdev, *tun_type, - tun_out_cnt, extack); + tun_out_cnt, pkt_host, extack); if (err) return err; @@ -863,30 +982,37 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act, struct net_device *netdev, enum nfp_flower_tun_type *tun_type, int *tun_out_cnt, int *out_cnt, u32 *csum_updated, - struct nfp_flower_pedit_acts *set_act, + struct nfp_flower_pedit_acts *set_act, bool *pkt_host, struct netlink_ext_ack *extack, int act_idx) { - struct nfp_fl_set_ipv4_tun *set_tun; struct nfp_fl_pre_tunnel *pre_tun; + struct nfp_fl_set_tun *set_tun; struct nfp_fl_push_vlan *psh_v; + struct nfp_fl_push_mpls *psh_m; struct nfp_fl_pop_vlan *pop_v; + struct nfp_fl_pop_mpls *pop_m; + struct nfp_fl_set_mpls *set_m; int err; switch (act->id) { case FLOW_ACTION_DROP: nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_DROP); break; + case FLOW_ACTION_REDIRECT_INGRESS: case FLOW_ACTION_REDIRECT: err = nfp_flower_output_action(app, act, nfp_fl, a_len, netdev, true, tun_type, tun_out_cnt, - out_cnt, csum_updated, extack); + out_cnt, csum_updated, *pkt_host, + extack); if (err) return err; break; + case FLOW_ACTION_MIRRED_INGRESS: case FLOW_ACTION_MIRRED: err = nfp_flower_output_action(app, act, nfp_fl, a_len, netdev, false, tun_type, tun_out_cnt, - out_cnt, csum_updated, extack); + out_cnt, csum_updated, *pkt_host, + extack); if (err) return err; break; @@ -935,7 +1061,7 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act, * If none, the packet falls back before applying other actions. */ if (*a_len + sizeof(struct nfp_fl_pre_tunnel) + - sizeof(struct nfp_fl_set_ipv4_tun) > NFP_FL_MAX_A_SIZ) { + sizeof(struct nfp_fl_set_tun) > NFP_FL_MAX_A_SIZ) { NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at tunnel encap"); return -EOPNOTSUPP; } @@ -949,11 +1075,11 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act, return err; set_tun = (void *)&nfp_fl->action_data[*a_len]; - err = nfp_fl_set_ipv4_tun(app, set_tun, act, pre_tun, - *tun_type, netdev, extack); + err = nfp_fl_set_tun(app, set_tun, act, pre_tun, *tun_type, + netdev, extack); if (err) return err; - *a_len += sizeof(struct nfp_fl_set_ipv4_tun); + *a_len += sizeof(struct nfp_fl_set_tun); } break; case FLOW_ACTION_TUNNEL_DECAP: @@ -975,6 +1101,54 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act, */ *csum_updated &= ~act->csum_flags; break; + case FLOW_ACTION_MPLS_PUSH: + if (*a_len + + sizeof(struct nfp_fl_push_mpls) > NFP_FL_MAX_A_SIZ) { + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at push MPLS"); + return -EOPNOTSUPP; + } + + psh_m = (struct nfp_fl_push_mpls *)&nfp_fl->action_data[*a_len]; + nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL); + + err = nfp_fl_push_mpls(psh_m, act, extack); + if (err) + return err; + *a_len += sizeof(struct nfp_fl_push_mpls); + break; + case FLOW_ACTION_MPLS_POP: + if (*a_len + + sizeof(struct nfp_fl_pop_mpls) > NFP_FL_MAX_A_SIZ) { + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at pop MPLS"); + return -EOPNOTSUPP; + } + + pop_m = (struct nfp_fl_pop_mpls *)&nfp_fl->action_data[*a_len]; + nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL); + + nfp_fl_pop_mpls(pop_m, act); + *a_len += sizeof(struct nfp_fl_pop_mpls); + break; + case FLOW_ACTION_MPLS_MANGLE: + if (*a_len + + sizeof(struct nfp_fl_set_mpls) > NFP_FL_MAX_A_SIZ) { + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at set MPLS"); + return -EOPNOTSUPP; + } + + set_m = (struct nfp_fl_set_mpls *)&nfp_fl->action_data[*a_len]; + nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL); + + nfp_fl_set_mpls(set_m, act); + *a_len += sizeof(struct nfp_fl_set_mpls); + break; + case FLOW_ACTION_PTYPE: + /* TC ptype skbedit sets PACKET_HOST for ingress redirect. */ + if (act->ptype != PACKET_HOST) + return -EOPNOTSUPP; + + *pkt_host = true; + break; default: /* Currently we do not handle any other actions. */ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unsupported action in action list"); @@ -1030,6 +1204,7 @@ int nfp_flower_compile_action(struct nfp_app *app, struct nfp_flower_pedit_acts set_act; enum nfp_flower_tun_type tun_type; struct flow_action_entry *act; + bool pkt_host = false; u32 csum_updated = 0; memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ); @@ -1046,7 +1221,7 @@ int nfp_flower_compile_action(struct nfp_app *app, err = nfp_flower_loop_action(app, act, flow, nfp_flow, &act_len, netdev, &tun_type, &tun_out_cnt, &out_cnt, &csum_updated, - &set_act, extack, i); + &set_act, &pkt_host, extack, i); if (err) return err; act_cnt++; diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c index d5bbe3d6048b..a595ddb92bff 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c @@ -260,9 +260,6 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb) type = cmsg_hdr->type; switch (type) { - case NFP_FLOWER_CMSG_TYPE_PORT_REIFY: - nfp_flower_cmsg_portreify_rx(app, skb); - break; case NFP_FLOWER_CMSG_TYPE_PORT_MOD: nfp_flower_cmsg_portmod_rx(app, skb); break; @@ -273,11 +270,17 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb) } goto err_default; case NFP_FLOWER_CMSG_TYPE_NO_NEIGH: - nfp_tunnel_request_route(app, skb); + nfp_tunnel_request_route_v4(app, skb); + break; + case NFP_FLOWER_CMSG_TYPE_NO_NEIGH_V6: + nfp_tunnel_request_route_v6(app, skb); break; case NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS: nfp_tunnel_keep_alive(app, skb); break; + case NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS_V6: + nfp_tunnel_keep_alive_v6(app, skb); + break; case NFP_FLOWER_CMSG_TYPE_QOS_STATS: nfp_flower_stats_rlim_reply(app, skb); break; @@ -328,8 +331,7 @@ nfp_flower_queue_ctl_msg(struct nfp_app *app, struct sk_buff *skb, int type) struct nfp_flower_priv *priv = app->priv; struct sk_buff_head *skb_head; - if (type == NFP_FLOWER_CMSG_TYPE_PORT_REIFY || - type == NFP_FLOWER_CMSG_TYPE_PORT_MOD) + if (type == NFP_FLOWER_CMSG_TYPE_PORT_MOD) skb_head = &priv->cmsg_skbs_high; else skb_head = &priv->cmsg_skbs_low; @@ -365,9 +367,14 @@ void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb) nfp_flower_process_mtu_ack(app, skb)) { /* Handle MTU acks outside wq to prevent RTNL conflict. */ dev_consume_skb_any(skb); - } else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH) { + } else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH || + cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6) { /* Acks from the NFP that the route is added - ignore. */ dev_consume_skb_any(skb); + } else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_PORT_REIFY) { + /* Handle REIFY acks outside wq to prevent RTNL conflict. */ + nfp_flower_cmsg_portreify_rx(app, skb); + dev_consume_skb_any(skb); } else { nfp_flower_queue_ctl_msg(app, skb, cmsg_hdr->type); } diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h index 0f1706ae5bfc..9b50d76bbc09 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h @@ -26,6 +26,7 @@ #define NFP_FLOWER_LAYER2_GRE BIT(0) #define NFP_FLOWER_LAYER2_GENEVE BIT(5) #define NFP_FLOWER_LAYER2_GENEVE_OP BIT(6) +#define NFP_FLOWER_LAYER2_TUN_IPV6 BIT(7) #define NFP_FLOWER_MASK_VLAN_PRIO GENMASK(15, 13) #define NFP_FLOWER_MASK_VLAN_PRESENT BIT(12) @@ -63,13 +64,17 @@ #define NFP_FL_MAX_GENEVE_OPT_ACT 32 #define NFP_FL_MAX_GENEVE_OPT_CNT 64 #define NFP_FL_MAX_GENEVE_OPT_KEY 32 +#define NFP_FL_MAX_GENEVE_OPT_KEY_V6 8 /* Action opcodes */ #define NFP_FL_ACTION_OPCODE_OUTPUT 0 #define NFP_FL_ACTION_OPCODE_PUSH_VLAN 1 #define NFP_FL_ACTION_OPCODE_POP_VLAN 2 -#define NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL 6 +#define NFP_FL_ACTION_OPCODE_PUSH_MPLS 3 +#define NFP_FL_ACTION_OPCODE_POP_MPLS 4 +#define NFP_FL_ACTION_OPCODE_SET_TUNNEL 6 #define NFP_FL_ACTION_OPCODE_SET_ETHERNET 7 +#define NFP_FL_ACTION_OPCODE_SET_MPLS 8 #define NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS 9 #define NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS 10 #define NFP_FL_ACTION_OPCODE_SET_IPV6_SRC 11 @@ -96,8 +101,8 @@ /* Tunnel ports */ #define NFP_FL_PORT_TYPE_TUN 0x50000000 -#define NFP_FL_IPV4_TUNNEL_TYPE GENMASK(7, 4) -#define NFP_FL_IPV4_PRE_TUN_INDEX GENMASK(2, 0) +#define NFP_FL_TUNNEL_TYPE GENMASK(7, 4) +#define NFP_FL_PRE_TUN_INDEX GENMASK(2, 0) #define NFP_FLOWER_WORKQ_MAX_SKBS 30000 @@ -203,13 +208,16 @@ struct nfp_fl_pre_lag { struct nfp_fl_pre_tunnel { struct nfp_fl_act_head head; - __be16 reserved; - __be32 ipv4_dst; - /* reserved for use with IPv6 addresses */ - __be32 extra[3]; + __be16 flags; + union { + __be32 ipv4_dst; + struct in6_addr ipv6_dst; + }; }; -struct nfp_fl_set_ipv4_tun { +#define NFP_FL_PRE_TUN_IPV6 BIT(0) + +struct nfp_fl_set_tun { struct nfp_fl_act_head head; __be16 reserved; __be64 tun_id __packed; @@ -217,7 +225,8 @@ struct nfp_fl_set_ipv4_tun { __be16 tun_flags; u8 ttl; u8 tos; - __be32 extra; + __be16 outer_vlan_tpid; + __be16 outer_vlan_tci; u8 tun_len; u8 res2; __be16 tun_proto; @@ -232,6 +241,24 @@ struct nfp_fl_push_geneve { u8 opt_data[]; }; +struct nfp_fl_push_mpls { + struct nfp_fl_act_head head; + __be16 ethtype; + __be32 lse; +}; + +struct nfp_fl_pop_mpls { + struct nfp_fl_act_head head; + __be16 ethtype; +}; + +struct nfp_fl_set_mpls { + struct nfp_fl_act_head head; + __be16 reserved; + __be32 lse_mask; + __be32 lse; +}; + /* Metadata with L2 (1W/4B) * ---------------------------------------------------------------- * 3 2 1 @@ -365,6 +392,11 @@ struct nfp_flower_tun_ipv4 { __be32 dst; }; +struct nfp_flower_tun_ipv6 { + struct in6_addr src; + struct in6_addr dst; +}; + struct nfp_flower_tun_ip_ext { u8 tos; u8 ttl; @@ -394,6 +426,42 @@ struct nfp_flower_ipv4_udp_tun { __be32 tun_id; }; +/* Flow Frame IPv6 UDP TUNNEL --> Tunnel details (11W/44B) + * ----------------------------------------------------------------- + * 3 2 1 + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | ipv6_addr_src, 31 - 0 | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | ipv6_addr_src, 63 - 32 | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | ipv6_addr_src, 95 - 64 | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | ipv6_addr_src, 127 - 96 | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | ipv6_addr_dst, 31 - 0 | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | ipv6_addr_dst, 63 - 32 | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | ipv6_addr_dst, 95 - 64 | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | ipv6_addr_dst, 127 - 96 | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Reserved | tos | ttl | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Reserved | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | VNI | Reserved | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + */ +struct nfp_flower_ipv6_udp_tun { + struct nfp_flower_tun_ipv6 ipv6; + __be16 reserved1; + struct nfp_flower_tun_ip_ext ip_ext; + __be32 reserved2; + __be32 tun_id; +}; + /* Flow Frame GRE TUNNEL --> Tunnel details (6W/24B) * ----------------------------------------------------------------- * 3 2 1 @@ -423,6 +491,46 @@ struct nfp_flower_ipv4_gre_tun { __be32 reserved2; }; +/* Flow Frame GRE TUNNEL V6 --> Tunnel details (12W/48B) + * ----------------------------------------------------------------- + * 3 2 1 + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | ipv6_addr_src, 31 - 0 | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | ipv6_addr_src, 63 - 32 | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | ipv6_addr_src, 95 - 64 | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | ipv6_addr_src, 127 - 96 | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | ipv6_addr_dst, 31 - 0 | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | ipv6_addr_dst, 63 - 32 | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | ipv6_addr_dst, 95 - 64 | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | ipv6_addr_dst, 127 - 96 | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | tun_flags | tos | ttl | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Reserved | Ethertype | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Key | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Reserved | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + */ +struct nfp_flower_ipv6_gre_tun { + struct nfp_flower_tun_ipv6 ipv6; + __be16 tun_flags; + struct nfp_flower_tun_ip_ext ip_ext; + __be16 reserved1; + __be16 ethertype; + __be32 tun_key; + __be32 reserved2; +}; + struct nfp_flower_geneve_options { u8 data[NFP_FL_MAX_GENEVE_OPT_KEY]; }; @@ -462,6 +570,11 @@ enum nfp_flower_cmsg_type_port { NFP_FLOWER_CMSG_TYPE_QOS_MOD = 18, NFP_FLOWER_CMSG_TYPE_QOS_DEL = 19, NFP_FLOWER_CMSG_TYPE_QOS_STATS = 20, + NFP_FLOWER_CMSG_TYPE_PRE_TUN_RULE = 21, + NFP_FLOWER_CMSG_TYPE_TUN_IPS_V6 = 22, + NFP_FLOWER_CMSG_TYPE_NO_NEIGH_V6 = 23, + NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6 = 24, + NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS_V6 = 25, NFP_FLOWER_CMSG_TYPE_MAX = 32, }; diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c index eb846133943b..d8ad9346a26a 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.c +++ b/drivers/net/ethernet/netronome/nfp/flower/main.c @@ -400,6 +400,7 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app, repr_priv = kzalloc(sizeof(*repr_priv), GFP_KERNEL); if (!repr_priv) { err = -ENOMEM; + nfp_repr_free(repr); goto err_reprs_clean; } @@ -413,6 +414,7 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app, port = nfp_port_alloc(app, port_type, repr); if (IS_ERR(port)) { err = PTR_ERR(port); + kfree(repr_priv); nfp_repr_free(repr); goto err_reprs_clean; } @@ -433,6 +435,7 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app, err = nfp_repr_init(app, repr, port_id, port, priv->nn->dp.netdev); if (err) { + kfree(repr_priv); nfp_port_free(port); nfp_repr_free(repr); goto err_reprs_clean; @@ -515,6 +518,7 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv) repr_priv = kzalloc(sizeof(*repr_priv), GFP_KERNEL); if (!repr_priv) { err = -ENOMEM; + nfp_repr_free(repr); goto err_reprs_clean; } @@ -525,11 +529,13 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv) port = nfp_port_alloc(app, NFP_PORT_PHYS_PORT, repr); if (IS_ERR(port)) { err = PTR_ERR(port); + kfree(repr_priv); nfp_repr_free(repr); goto err_reprs_clean; } err = nfp_port_init_phy_port(app->pf, app, port, i); if (err) { + kfree(repr_priv); nfp_port_free(port); nfp_repr_free(repr); goto err_reprs_clean; @@ -542,6 +548,7 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv) err = nfp_repr_init(app, repr, cmsg_port_id, port, priv->nn->dp.netdev); if (err) { + kfree(repr_priv); nfp_port_free(port); nfp_repr_free(repr); goto err_reprs_clean; @@ -781,6 +788,7 @@ static int nfp_flower_init(struct nfp_app *app) INIT_LIST_HEAD(&app_priv->indr_block_cb_priv); INIT_LIST_HEAD(&app_priv->non_repr_priv); + app_priv->pre_tun_rule_cnt = 0; return 0; diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h index af9441d5787f..d55d0d33bc45 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.h +++ b/drivers/net/ethernet/netronome/nfp/flower/main.h @@ -24,7 +24,7 @@ struct nfp_app; #define NFP_FL_STAT_ID_MU_NUM GENMASK(31, 22) #define NFP_FL_STAT_ID_STAT GENMASK(21, 0) -#define NFP_FL_STATS_ELEM_RS FIELD_SIZEOF(struct nfp_fl_stats_id, \ +#define NFP_FL_STATS_ELEM_RS sizeof_field(struct nfp_fl_stats_id, \ init_unalloc) #define NFP_FLOWER_MASK_ENTRY_RS 256 #define NFP_FLOWER_MASK_ELEMENT_RS 1 @@ -42,6 +42,8 @@ struct nfp_app; #define NFP_FL_FEATS_VLAN_PCP BIT(3) #define NFP_FL_FEATS_VF_RLIM BIT(4) #define NFP_FL_FEATS_FLOW_MOD BIT(5) +#define NFP_FL_FEATS_PRE_TUN_RULES BIT(6) +#define NFP_FL_FEATS_IPV6_TUN BIT(7) #define NFP_FL_FEATS_FLOW_MERGE BIT(30) #define NFP_FL_FEATS_LAG BIT(31) @@ -61,18 +63,26 @@ struct nfp_fl_stats_id { * struct nfp_fl_tunnel_offloads - priv data for tunnel offloads * @offloaded_macs: Hashtable of the offloaded MAC addresses * @ipv4_off_list: List of IPv4 addresses to offload - * @neigh_off_list: List of neighbour offloads + * @ipv6_off_list: List of IPv6 addresses to offload + * @neigh_off_list_v4: List of IPv4 neighbour offloads + * @neigh_off_list_v6: List of IPv6 neighbour offloads * @ipv4_off_lock: Lock for the IPv4 address list - * @neigh_off_lock: Lock for the neighbour address list + * @ipv6_off_lock: Lock for the IPv6 address list + * @neigh_off_lock_v4: Lock for the IPv4 neighbour address list + * @neigh_off_lock_v6: Lock for the IPv6 neighbour address list * @mac_off_ids: IDA to manage id assignment for offloaded MACs * @neigh_nb: Notifier to monitor neighbour state */ struct nfp_fl_tunnel_offloads { struct rhashtable offloaded_macs; struct list_head ipv4_off_list; - struct list_head neigh_off_list; + struct list_head ipv6_off_list; + struct list_head neigh_off_list_v4; + struct list_head neigh_off_list_v6; struct mutex ipv4_off_lock; - spinlock_t neigh_off_lock; + struct mutex ipv6_off_lock; + spinlock_t neigh_off_lock_v4; + spinlock_t neigh_off_lock_v6; struct ida mac_off_ids; struct notifier_block neigh_nb; }; @@ -162,6 +172,7 @@ struct nfp_fl_internal_ports { * @qos_stats_work: Workqueue for qos stats processing * @qos_rate_limiters: Current active qos rate limiters * @qos_stats_lock: Lock on qos stats updates + * @pre_tun_rule_cnt: Number of pre-tunnel rules offloaded */ struct nfp_flower_priv { struct nfp_app *app; @@ -193,6 +204,7 @@ struct nfp_flower_priv { struct delayed_work qos_stats_work; unsigned int qos_rate_limiters; spinlock_t qos_stats_lock; /* Protect the qos stats */ + int pre_tun_rule_cnt; }; /** @@ -218,6 +230,7 @@ struct nfp_fl_qos { * @block_shared: Flag indicating if offload applies to shared blocks * @mac_list: List entry of reprs that share the same offloaded MAC * @qos_table: Stored info on filters implementing qos + * @on_bridge: Indicates if the repr is attached to a bridge */ struct nfp_flower_repr_priv { struct nfp_repr *nfp_repr; @@ -227,6 +240,7 @@ struct nfp_flower_repr_priv { bool block_shared; struct list_head mac_list; struct nfp_fl_qos qos_table; + bool on_bridge; }; /** @@ -268,18 +282,36 @@ struct nfp_fl_stats { u64 used; }; +/** + * struct nfp_ipv6_addr_entry - cached IPv6 addresses + * @ipv6_addr: IP address + * @ref_count: number of rules currently using this IP + * @list: list pointer + */ +struct nfp_ipv6_addr_entry { + struct in6_addr ipv6_addr; + int ref_count; + struct list_head list; +}; + struct nfp_fl_payload { struct nfp_fl_rule_metadata meta; unsigned long tc_flower_cookie; struct rhash_head fl_node; struct rcu_head rcu; __be32 nfp_tun_ipv4_addr; + struct nfp_ipv6_addr_entry *nfp_tun_ipv6; struct net_device *ingress_dev; char *unmasked_data; char *mask_data; char *action_data; struct list_head linked_flows; bool in_hw; + struct { + struct net_device *dev; + __be16 vlan_tci; + __be16 port_idx; + } pre_tun_rule; }; struct nfp_fl_payload_link { @@ -333,6 +365,11 @@ static inline bool nfp_flower_is_merge_flow(struct nfp_fl_payload *flow_pay) return flow_pay->tc_flower_cookie == (unsigned long)flow_pay; } +static inline bool nfp_flower_is_supported_bridge(struct net_device *netdev) +{ + return netif_is_ovs_master(netdev); +} + int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count, unsigned int host_ctx_split); void nfp_flower_metadata_cleanup(struct nfp_app *app); @@ -381,8 +418,14 @@ int nfp_tunnel_mac_event_handler(struct nfp_app *app, unsigned long event, void *ptr); void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4); void nfp_tunnel_add_ipv4_off(struct nfp_app *app, __be32 ipv4); -void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb); +void +nfp_tunnel_put_ipv6_off(struct nfp_app *app, struct nfp_ipv6_addr_entry *entry); +struct nfp_ipv6_addr_entry * +nfp_tunnel_add_ipv6_off(struct nfp_app *app, struct in6_addr *ipv6); +void nfp_tunnel_request_route_v4(struct nfp_app *app, struct sk_buff *skb); +void nfp_tunnel_request_route_v6(struct nfp_app *app, struct sk_buff *skb); void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb); +void nfp_tunnel_keep_alive_v6(struct nfp_app *app, struct sk_buff *skb); void nfp_flower_lag_init(struct nfp_fl_lag *lag); void nfp_flower_lag_cleanup(struct nfp_fl_lag *lag); int nfp_flower_lag_reset(struct nfp_fl_lag *lag); @@ -415,4 +458,8 @@ void nfp_flower_non_repr_priv_put(struct nfp_app *app, struct net_device *netdev); u32 nfp_flower_get_port_id_from_netdev(struct nfp_app *app, struct net_device *netdev); +int nfp_flower_xmit_pre_tun_flow(struct nfp_app *app, + struct nfp_fl_payload *flow); +int nfp_flower_xmit_pre_tun_del_flow(struct nfp_app *app, + struct nfp_fl_payload *flow); #endif diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c index 9cc3ba17ff69..546bc01d507d 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/match.c +++ b/drivers/net/ethernet/netronome/nfp/flower/match.c @@ -10,9 +10,8 @@ static void nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *ext, struct nfp_flower_meta_tci *msk, - struct flow_cls_offload *flow, u8 key_type) + struct flow_rule *rule, u8 key_type) { - struct flow_rule *rule = flow_cls_offload_flow_rule(flow); u16 tmp_tci; memset(ext, 0, sizeof(struct nfp_flower_meta_tci)); @@ -77,11 +76,8 @@ nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port, static void nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext, - struct nfp_flower_mac_mpls *msk, - struct flow_cls_offload *flow) + struct nfp_flower_mac_mpls *msk, struct flow_rule *rule) { - struct flow_rule *rule = flow_cls_offload_flow_rule(flow); - memset(ext, 0, sizeof(struct nfp_flower_mac_mpls)); memset(msk, 0, sizeof(struct nfp_flower_mac_mpls)); @@ -130,10 +126,8 @@ nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext, static void nfp_flower_compile_tport(struct nfp_flower_tp_ports *ext, struct nfp_flower_tp_ports *msk, - struct flow_cls_offload *flow) + struct flow_rule *rule) { - struct flow_rule *rule = flow_cls_offload_flow_rule(flow); - memset(ext, 0, sizeof(struct nfp_flower_tp_ports)); memset(msk, 0, sizeof(struct nfp_flower_tp_ports)); @@ -150,11 +144,8 @@ nfp_flower_compile_tport(struct nfp_flower_tp_ports *ext, static void nfp_flower_compile_ip_ext(struct nfp_flower_ip_ext *ext, - struct nfp_flower_ip_ext *msk, - struct flow_cls_offload *flow) + struct nfp_flower_ip_ext *msk, struct flow_rule *rule) { - struct flow_rule *rule = flow_cls_offload_flow_rule(flow); - if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { struct flow_match_basic match; @@ -224,10 +215,8 @@ nfp_flower_compile_ip_ext(struct nfp_flower_ip_ext *ext, static void nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *ext, - struct nfp_flower_ipv4 *msk, - struct flow_cls_offload *flow) + struct nfp_flower_ipv4 *msk, struct flow_rule *rule) { - struct flow_rule *rule = flow_cls_offload_flow_rule(flow); struct flow_match_ipv4_addrs match; memset(ext, 0, sizeof(struct nfp_flower_ipv4)); @@ -241,16 +230,13 @@ nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *ext, msk->ipv4_dst = match.mask->dst; } - nfp_flower_compile_ip_ext(&ext->ip_ext, &msk->ip_ext, flow); + nfp_flower_compile_ip_ext(&ext->ip_ext, &msk->ip_ext, rule); } static void nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *ext, - struct nfp_flower_ipv6 *msk, - struct flow_cls_offload *flow) + struct nfp_flower_ipv6 *msk, struct flow_rule *rule) { - struct flow_rule *rule = flow_cls_offload_flow_rule(flow); - memset(ext, 0, sizeof(struct nfp_flower_ipv6)); memset(msk, 0, sizeof(struct nfp_flower_ipv6)); @@ -264,16 +250,15 @@ nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *ext, msk->ipv6_dst = match.mask->dst; } - nfp_flower_compile_ip_ext(&ext->ip_ext, &msk->ip_ext, flow); + nfp_flower_compile_ip_ext(&ext->ip_ext, &msk->ip_ext, rule); } static int -nfp_flower_compile_geneve_opt(void *ext, void *msk, - struct flow_cls_offload *flow) +nfp_flower_compile_geneve_opt(void *ext, void *msk, struct flow_rule *rule) { struct flow_match_enc_opts match; - flow_rule_match_enc_opts(flow->rule, &match); + flow_rule_match_enc_opts(rule, &match); memcpy(ext, match.key->data, match.key->len); memcpy(msk, match.mask->data, match.mask->len); @@ -283,10 +268,8 @@ nfp_flower_compile_geneve_opt(void *ext, void *msk, static void nfp_flower_compile_tun_ipv4_addrs(struct nfp_flower_tun_ipv4 *ext, struct nfp_flower_tun_ipv4 *msk, - struct flow_cls_offload *flow) + struct flow_rule *rule) { - struct flow_rule *rule = flow_cls_offload_flow_rule(flow); - if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) { struct flow_match_ipv4_addrs match; @@ -299,12 +282,26 @@ nfp_flower_compile_tun_ipv4_addrs(struct nfp_flower_tun_ipv4 *ext, } static void +nfp_flower_compile_tun_ipv6_addrs(struct nfp_flower_tun_ipv6 *ext, + struct nfp_flower_tun_ipv6 *msk, + struct flow_rule *rule) +{ + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) { + struct flow_match_ipv6_addrs match; + + flow_rule_match_enc_ipv6_addrs(rule, &match); + ext->src = match.key->src; + ext->dst = match.key->dst; + msk->src = match.mask->src; + msk->dst = match.mask->dst; + } +} + +static void nfp_flower_compile_tun_ip_ext(struct nfp_flower_tun_ip_ext *ext, struct nfp_flower_tun_ip_ext *msk, - struct flow_cls_offload *flow) + struct flow_rule *rule) { - struct flow_rule *rule = flow_cls_offload_flow_rule(flow); - if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) { struct flow_match_ip match; @@ -317,57 +314,97 @@ nfp_flower_compile_tun_ip_ext(struct nfp_flower_tun_ip_ext *ext, } static void -nfp_flower_compile_ipv4_gre_tun(struct nfp_flower_ipv4_gre_tun *ext, - struct nfp_flower_ipv4_gre_tun *msk, - struct flow_cls_offload *flow) +nfp_flower_compile_tun_udp_key(__be32 *key, __be32 *key_msk, + struct flow_rule *rule) { - struct flow_rule *rule = flow_cls_offload_flow_rule(flow); - - memset(ext, 0, sizeof(struct nfp_flower_ipv4_gre_tun)); - memset(msk, 0, sizeof(struct nfp_flower_ipv4_gre_tun)); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) { + struct flow_match_enc_keyid match; + u32 vni; - /* NVGRE is the only supported GRE tunnel type */ - ext->ethertype = cpu_to_be16(ETH_P_TEB); - msk->ethertype = cpu_to_be16(~0); + flow_rule_match_enc_keyid(rule, &match); + vni = be32_to_cpu(match.key->keyid) << NFP_FL_TUN_VNI_OFFSET; + *key = cpu_to_be32(vni); + vni = be32_to_cpu(match.mask->keyid) << NFP_FL_TUN_VNI_OFFSET; + *key_msk = cpu_to_be32(vni); + } +} +static void +nfp_flower_compile_tun_gre_key(__be32 *key, __be32 *key_msk, __be16 *flags, + __be16 *flags_msk, struct flow_rule *rule) +{ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) { struct flow_match_enc_keyid match; flow_rule_match_enc_keyid(rule, &match); - ext->tun_key = match.key->keyid; - msk->tun_key = match.mask->keyid; + *key = match.key->keyid; + *key_msk = match.mask->keyid; - ext->tun_flags = cpu_to_be16(NFP_FL_GRE_FLAG_KEY); - msk->tun_flags = cpu_to_be16(NFP_FL_GRE_FLAG_KEY); + *flags = cpu_to_be16(NFP_FL_GRE_FLAG_KEY); + *flags_msk = cpu_to_be16(NFP_FL_GRE_FLAG_KEY); } +} + +static void +nfp_flower_compile_ipv4_gre_tun(struct nfp_flower_ipv4_gre_tun *ext, + struct nfp_flower_ipv4_gre_tun *msk, + struct flow_rule *rule) +{ + memset(ext, 0, sizeof(struct nfp_flower_ipv4_gre_tun)); + memset(msk, 0, sizeof(struct nfp_flower_ipv4_gre_tun)); + + /* NVGRE is the only supported GRE tunnel type */ + ext->ethertype = cpu_to_be16(ETH_P_TEB); + msk->ethertype = cpu_to_be16(~0); - nfp_flower_compile_tun_ipv4_addrs(&ext->ipv4, &msk->ipv4, flow); - nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, flow); + nfp_flower_compile_tun_ipv4_addrs(&ext->ipv4, &msk->ipv4, rule); + nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, rule); + nfp_flower_compile_tun_gre_key(&ext->tun_key, &msk->tun_key, + &ext->tun_flags, &msk->tun_flags, rule); } static void nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *ext, struct nfp_flower_ipv4_udp_tun *msk, - struct flow_cls_offload *flow) + struct flow_rule *rule) { - struct flow_rule *rule = flow_cls_offload_flow_rule(flow); - memset(ext, 0, sizeof(struct nfp_flower_ipv4_udp_tun)); memset(msk, 0, sizeof(struct nfp_flower_ipv4_udp_tun)); - if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) { - struct flow_match_enc_keyid match; - u32 temp_vni; + nfp_flower_compile_tun_ipv4_addrs(&ext->ipv4, &msk->ipv4, rule); + nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, rule); + nfp_flower_compile_tun_udp_key(&ext->tun_id, &msk->tun_id, rule); +} - flow_rule_match_enc_keyid(rule, &match); - temp_vni = be32_to_cpu(match.key->keyid) << NFP_FL_TUN_VNI_OFFSET; - ext->tun_id = cpu_to_be32(temp_vni); - temp_vni = be32_to_cpu(match.mask->keyid) << NFP_FL_TUN_VNI_OFFSET; - msk->tun_id = cpu_to_be32(temp_vni); - } +static void +nfp_flower_compile_ipv6_udp_tun(struct nfp_flower_ipv6_udp_tun *ext, + struct nfp_flower_ipv6_udp_tun *msk, + struct flow_rule *rule) +{ + memset(ext, 0, sizeof(struct nfp_flower_ipv6_udp_tun)); + memset(msk, 0, sizeof(struct nfp_flower_ipv6_udp_tun)); + + nfp_flower_compile_tun_ipv6_addrs(&ext->ipv6, &msk->ipv6, rule); + nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, rule); + nfp_flower_compile_tun_udp_key(&ext->tun_id, &msk->tun_id, rule); +} + +static void +nfp_flower_compile_ipv6_gre_tun(struct nfp_flower_ipv6_gre_tun *ext, + struct nfp_flower_ipv6_gre_tun *msk, + struct flow_rule *rule) +{ + memset(ext, 0, sizeof(struct nfp_flower_ipv6_gre_tun)); + memset(msk, 0, sizeof(struct nfp_flower_ipv6_gre_tun)); + + /* NVGRE is the only supported GRE tunnel type */ + ext->ethertype = cpu_to_be16(ETH_P_TEB); + msk->ethertype = cpu_to_be16(~0); - nfp_flower_compile_tun_ipv4_addrs(&ext->ipv4, &msk->ipv4, flow); - nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, flow); + nfp_flower_compile_tun_ipv6_addrs(&ext->ipv6, &msk->ipv6, rule); + nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, rule); + nfp_flower_compile_tun_gre_key(&ext->tun_key, &msk->tun_key, + &ext->tun_flags, &msk->tun_flags, rule); } int nfp_flower_compile_flow_match(struct nfp_app *app, @@ -378,6 +415,7 @@ int nfp_flower_compile_flow_match(struct nfp_app *app, enum nfp_flower_tun_type tun_type, struct netlink_ext_ack *extack) { + struct flow_rule *rule = flow_cls_offload_flow_rule(flow); u32 port_id; int err; u8 *ext; @@ -393,7 +431,7 @@ int nfp_flower_compile_flow_match(struct nfp_app *app, nfp_flower_compile_meta_tci((struct nfp_flower_meta_tci *)ext, (struct nfp_flower_meta_tci *)msk, - flow, key_ls->key_layer); + rule, key_ls->key_layer); ext += sizeof(struct nfp_flower_meta_tci); msk += sizeof(struct nfp_flower_meta_tci); @@ -425,7 +463,7 @@ int nfp_flower_compile_flow_match(struct nfp_app *app, if (NFP_FLOWER_LAYER_MAC & key_ls->key_layer) { nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)ext, (struct nfp_flower_mac_mpls *)msk, - flow); + rule); ext += sizeof(struct nfp_flower_mac_mpls); msk += sizeof(struct nfp_flower_mac_mpls); } @@ -433,7 +471,7 @@ int nfp_flower_compile_flow_match(struct nfp_app *app, if (NFP_FLOWER_LAYER_TP & key_ls->key_layer) { nfp_flower_compile_tport((struct nfp_flower_tp_ports *)ext, (struct nfp_flower_tp_ports *)msk, - flow); + rule); ext += sizeof(struct nfp_flower_tp_ports); msk += sizeof(struct nfp_flower_tp_ports); } @@ -441,7 +479,7 @@ int nfp_flower_compile_flow_match(struct nfp_app *app, if (NFP_FLOWER_LAYER_IPV4 & key_ls->key_layer) { nfp_flower_compile_ipv4((struct nfp_flower_ipv4 *)ext, (struct nfp_flower_ipv4 *)msk, - flow); + rule); ext += sizeof(struct nfp_flower_ipv4); msk += sizeof(struct nfp_flower_ipv4); } @@ -449,43 +487,83 @@ int nfp_flower_compile_flow_match(struct nfp_app *app, if (NFP_FLOWER_LAYER_IPV6 & key_ls->key_layer) { nfp_flower_compile_ipv6((struct nfp_flower_ipv6 *)ext, (struct nfp_flower_ipv6 *)msk, - flow); + rule); ext += sizeof(struct nfp_flower_ipv6); msk += sizeof(struct nfp_flower_ipv6); } if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GRE) { - __be32 tun_dst; - - nfp_flower_compile_ipv4_gre_tun((void *)ext, (void *)msk, flow); - tun_dst = ((struct nfp_flower_ipv4_gre_tun *)ext)->ipv4.dst; - ext += sizeof(struct nfp_flower_ipv4_gre_tun); - msk += sizeof(struct nfp_flower_ipv4_gre_tun); - - /* Store the tunnel destination in the rule data. - * This must be present and be an exact match. - */ - nfp_flow->nfp_tun_ipv4_addr = tun_dst; - nfp_tunnel_add_ipv4_off(app, tun_dst); + if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) { + struct nfp_flower_ipv6_gre_tun *gre_match; + struct nfp_ipv6_addr_entry *entry; + struct in6_addr *dst; + + nfp_flower_compile_ipv6_gre_tun((void *)ext, + (void *)msk, rule); + gre_match = (struct nfp_flower_ipv6_gre_tun *)ext; + dst = &gre_match->ipv6.dst; + ext += sizeof(struct nfp_flower_ipv6_gre_tun); + msk += sizeof(struct nfp_flower_ipv6_gre_tun); + + entry = nfp_tunnel_add_ipv6_off(app, dst); + if (!entry) + return -EOPNOTSUPP; + + nfp_flow->nfp_tun_ipv6 = entry; + } else { + __be32 dst; + + nfp_flower_compile_ipv4_gre_tun((void *)ext, + (void *)msk, rule); + dst = ((struct nfp_flower_ipv4_gre_tun *)ext)->ipv4.dst; + ext += sizeof(struct nfp_flower_ipv4_gre_tun); + msk += sizeof(struct nfp_flower_ipv4_gre_tun); + + /* Store the tunnel destination in the rule data. + * This must be present and be an exact match. + */ + nfp_flow->nfp_tun_ipv4_addr = dst; + nfp_tunnel_add_ipv4_off(app, dst); + } } if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN || key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE) { - __be32 tun_dst; - - nfp_flower_compile_ipv4_udp_tun((void *)ext, (void *)msk, flow); - tun_dst = ((struct nfp_flower_ipv4_udp_tun *)ext)->ipv4.dst; - ext += sizeof(struct nfp_flower_ipv4_udp_tun); - msk += sizeof(struct nfp_flower_ipv4_udp_tun); - - /* Store the tunnel destination in the rule data. - * This must be present and be an exact match. - */ - nfp_flow->nfp_tun_ipv4_addr = tun_dst; - nfp_tunnel_add_ipv4_off(app, tun_dst); + if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) { + struct nfp_flower_ipv6_udp_tun *udp_match; + struct nfp_ipv6_addr_entry *entry; + struct in6_addr *dst; + + nfp_flower_compile_ipv6_udp_tun((void *)ext, + (void *)msk, rule); + udp_match = (struct nfp_flower_ipv6_udp_tun *)ext; + dst = &udp_match->ipv6.dst; + ext += sizeof(struct nfp_flower_ipv6_udp_tun); + msk += sizeof(struct nfp_flower_ipv6_udp_tun); + + entry = nfp_tunnel_add_ipv6_off(app, dst); + if (!entry) + return -EOPNOTSUPP; + + nfp_flow->nfp_tun_ipv6 = entry; + } else { + __be32 dst; + + nfp_flower_compile_ipv4_udp_tun((void *)ext, + (void *)msk, rule); + dst = ((struct nfp_flower_ipv4_udp_tun *)ext)->ipv4.dst; + ext += sizeof(struct nfp_flower_ipv4_udp_tun); + msk += sizeof(struct nfp_flower_ipv4_udp_tun); + + /* Store the tunnel destination in the rule data. + * This must be present and be an exact match. + */ + nfp_flow->nfp_tun_ipv4_addr = dst; + nfp_tunnel_add_ipv4_off(app, dst); + } if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE_OP) { - err = nfp_flower_compile_geneve_opt(ext, msk, flow); + err = nfp_flower_compile_geneve_opt(ext, msk, rule); if (err) return err; } diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c index 7c4a15e967df..5defd31d481c 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c +++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c @@ -65,17 +65,17 @@ static int nfp_get_stats_entry(struct nfp_app *app, u32 *stats_context_id) freed_stats_id = priv->stats_ring_size; /* Check for unallocated entries first. */ if (priv->stats_ids.init_unalloc > 0) { - if (priv->active_mem_unit == priv->total_mem_units) { - priv->stats_ids.init_unalloc--; - priv->active_mem_unit = 0; - } - *stats_context_id = FIELD_PREP(NFP_FL_STAT_ID_STAT, priv->stats_ids.init_unalloc - 1) | FIELD_PREP(NFP_FL_STAT_ID_MU_NUM, priv->active_mem_unit); - priv->active_mem_unit++; + + if (++priv->active_mem_unit == priv->total_mem_units) { + priv->stats_ids.init_unalloc--; + priv->active_mem_unit = 0; + } + return 0; } diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index e209f150c5f2..7ca5c1becfcf 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -54,6 +54,10 @@ (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \ BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) +#define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_V6_R \ + (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \ + BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) + #define NFP_FLOWER_MERGE_FIELDS \ (NFP_FLOWER_LAYER_PORT | \ NFP_FLOWER_LAYER_MAC | \ @@ -61,6 +65,12 @@ NFP_FLOWER_LAYER_IPV4 | \ NFP_FLOWER_LAYER_IPV6) +#define NFP_FLOWER_PRE_TUN_RULE_FIELDS \ + (NFP_FLOWER_LAYER_PORT | \ + NFP_FLOWER_LAYER_MAC | \ + NFP_FLOWER_LAYER_IPV4 | \ + NFP_FLOWER_LAYER_IPV6) + struct nfp_flower_merge_check { union { struct { @@ -141,10 +151,11 @@ static bool nfp_flower_check_higher_than_l3(struct flow_cls_offload *f) static int nfp_flower_calc_opt_layer(struct flow_dissector_key_enc_opts *enc_opts, - u32 *key_layer_two, int *key_size, + u32 *key_layer_two, int *key_size, bool ipv6, struct netlink_ext_ack *extack) { - if (enc_opts->len > NFP_FL_MAX_GENEVE_OPT_KEY) { + if (enc_opts->len > NFP_FL_MAX_GENEVE_OPT_KEY || + (ipv6 && enc_opts->len > NFP_FL_MAX_GENEVE_OPT_KEY_V6)) { NL_SET_ERR_MSG_MOD(extack, "unsupported offload: geneve options exceed maximum length"); return -EOPNOTSUPP; } @@ -162,7 +173,7 @@ nfp_flower_calc_udp_tun_layer(struct flow_dissector_key_ports *enc_ports, struct flow_dissector_key_enc_opts *enc_op, u32 *key_layer_two, u8 *key_layer, int *key_size, struct nfp_flower_priv *priv, - enum nfp_flower_tun_type *tun_type, + enum nfp_flower_tun_type *tun_type, bool ipv6, struct netlink_ext_ack *extack) { int err; @@ -171,7 +182,15 @@ nfp_flower_calc_udp_tun_layer(struct flow_dissector_key_ports *enc_ports, case htons(IANA_VXLAN_UDP_PORT): *tun_type = NFP_FL_TUNNEL_VXLAN; *key_layer |= NFP_FLOWER_LAYER_VXLAN; - *key_size += sizeof(struct nfp_flower_ipv4_udp_tun); + + if (ipv6) { + *key_layer |= NFP_FLOWER_LAYER_EXT_META; + *key_size += sizeof(struct nfp_flower_ext_meta); + *key_layer_two |= NFP_FLOWER_LAYER2_TUN_IPV6; + *key_size += sizeof(struct nfp_flower_ipv6_udp_tun); + } else { + *key_size += sizeof(struct nfp_flower_ipv4_udp_tun); + } if (enc_op) { NL_SET_ERR_MSG_MOD(extack, "unsupported offload: encap options not supported on vxlan tunnels"); @@ -187,7 +206,13 @@ nfp_flower_calc_udp_tun_layer(struct flow_dissector_key_ports *enc_ports, *key_layer |= NFP_FLOWER_LAYER_EXT_META; *key_size += sizeof(struct nfp_flower_ext_meta); *key_layer_two |= NFP_FLOWER_LAYER2_GENEVE; - *key_size += sizeof(struct nfp_flower_ipv4_udp_tun); + + if (ipv6) { + *key_layer_two |= NFP_FLOWER_LAYER2_TUN_IPV6; + *key_size += sizeof(struct nfp_flower_ipv6_udp_tun); + } else { + *key_size += sizeof(struct nfp_flower_ipv4_udp_tun); + } if (!enc_op) break; @@ -195,8 +220,8 @@ nfp_flower_calc_udp_tun_layer(struct flow_dissector_key_ports *enc_ports, NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support geneve option offload"); return -EOPNOTSUPP; } - err = nfp_flower_calc_opt_layer(enc_op, key_layer_two, - key_size, extack); + err = nfp_flower_calc_opt_layer(enc_op, key_layer_two, key_size, + ipv6, extack); if (err) return err; break; @@ -232,6 +257,8 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, /* If any tun dissector is used then the required set must be used. */ if (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR && + (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_V6_R) + != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_V6_R && (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R) != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R) { NL_SET_ERR_MSG_MOD(extack, "unsupported offload: tunnel match not supported"); @@ -263,8 +290,10 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) { struct flow_match_enc_opts enc_op = { NULL, NULL }; struct flow_match_ipv4_addrs ipv4_addrs; + struct flow_match_ipv6_addrs ipv6_addrs; struct flow_match_control enc_ctl; struct flow_match_ports enc_ports; + bool ipv6_tun = false; flow_rule_match_enc_control(rule, &enc_ctl); @@ -272,38 +301,62 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, NL_SET_ERR_MSG_MOD(extack, "unsupported offload: wildcarded protocols on tunnels are not supported"); return -EOPNOTSUPP; } - if (enc_ctl.key->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS) { - NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only IPv4 tunnels are supported"); + + ipv6_tun = enc_ctl.key->addr_type == + FLOW_DISSECTOR_KEY_IPV6_ADDRS; + if (ipv6_tun && + !(priv->flower_ext_feats & NFP_FL_FEATS_IPV6_TUN)) { + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: firmware does not support IPv6 tunnels"); return -EOPNOTSUPP; } - /* These fields are already verified as used. */ - flow_rule_match_enc_ipv4_addrs(rule, &ipv4_addrs); - if (ipv4_addrs.mask->dst != cpu_to_be32(~0)) { - NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match IPv4 destination address is supported"); + if (!ipv6_tun && + enc_ctl.key->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS) { + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: tunnel address type not IPv4 or IPv6"); return -EOPNOTSUPP; } + if (ipv6_tun) { + flow_rule_match_enc_ipv6_addrs(rule, &ipv6_addrs); + if (memchr_inv(&ipv6_addrs.mask->dst, 0xff, + sizeof(ipv6_addrs.mask->dst))) { + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match IPv6 destination address is supported"); + return -EOPNOTSUPP; + } + } else { + flow_rule_match_enc_ipv4_addrs(rule, &ipv4_addrs); + if (ipv4_addrs.mask->dst != cpu_to_be32(~0)) { + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match IPv4 destination address is supported"); + return -EOPNOTSUPP; + } + } + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS)) flow_rule_match_enc_opts(rule, &enc_op); - if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) { /* check if GRE, which has no enc_ports */ - if (netif_is_gretap(netdev)) { - *tun_type = NFP_FL_TUNNEL_GRE; - key_layer |= NFP_FLOWER_LAYER_EXT_META; - key_size += sizeof(struct nfp_flower_ext_meta); - key_layer_two |= NFP_FLOWER_LAYER2_GRE; - key_size += - sizeof(struct nfp_flower_ipv4_gre_tun); + if (!netif_is_gretap(netdev)) { + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: an exact match on L4 destination port is required for non-GRE tunnels"); + return -EOPNOTSUPP; + } - if (enc_op.key) { - NL_SET_ERR_MSG_MOD(extack, "unsupported offload: encap options not supported on GRE tunnels"); - return -EOPNOTSUPP; - } + *tun_type = NFP_FL_TUNNEL_GRE; + key_layer |= NFP_FLOWER_LAYER_EXT_META; + key_size += sizeof(struct nfp_flower_ext_meta); + key_layer_two |= NFP_FLOWER_LAYER2_GRE; + + if (ipv6_tun) { + key_layer_two |= NFP_FLOWER_LAYER2_TUN_IPV6; + key_size += + sizeof(struct nfp_flower_ipv6_udp_tun); } else { - NL_SET_ERR_MSG_MOD(extack, "unsupported offload: an exact match on L4 destination port is required for non-GRE tunnels"); + key_size += + sizeof(struct nfp_flower_ipv4_udp_tun); + } + + if (enc_op.key) { + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: encap options not supported on GRE tunnels"); return -EOPNOTSUPP; } } else { @@ -318,7 +371,8 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, &key_layer_two, &key_layer, &key_size, priv, - tun_type, extack); + tun_type, ipv6_tun, + extack); if (err) return err; @@ -486,9 +540,11 @@ nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer) goto err_free_mask; flow_pay->nfp_tun_ipv4_addr = 0; + flow_pay->nfp_tun_ipv6 = NULL; flow_pay->meta.flags = 0; INIT_LIST_HEAD(&flow_pay->linked_flows); flow_pay->in_hw = false; + flow_pay->pre_tun_rule.dev = NULL; return flow_pay; @@ -511,10 +567,12 @@ nfp_flower_update_merge_with_actions(struct nfp_fl_payload *flow, struct nfp_fl_set_ip4_addrs *ipv4_add; struct nfp_fl_set_ipv6_addr *ipv6_add; struct nfp_fl_push_vlan *push_vlan; + struct nfp_fl_pre_tunnel *pre_tun; struct nfp_fl_set_tport *tport; struct nfp_fl_set_eth *eth; struct nfp_fl_act_head *a; unsigned int act_off = 0; + bool ipv6_tun = false; u8 act_id = 0; u8 *ports; int i; @@ -536,14 +594,18 @@ nfp_flower_update_merge_with_actions(struct nfp_fl_payload *flow, case NFP_FL_ACTION_OPCODE_POP_VLAN: merge->tci = cpu_to_be16(0); break; - case NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL: + case NFP_FL_ACTION_OPCODE_SET_TUNNEL: /* New tunnel header means l2 to l4 can be matched. */ eth_broadcast_addr(&merge->l2.mac_dst[0]); eth_broadcast_addr(&merge->l2.mac_src[0]); memset(&merge->l4, 0xff, sizeof(struct nfp_flower_tp_ports)); - memset(&merge->ipv4, 0xff, - sizeof(struct nfp_flower_ipv4)); + if (ipv6_tun) + memset(&merge->ipv6, 0xff, + sizeof(struct nfp_flower_ipv6)); + else + memset(&merge->ipv4, 0xff, + sizeof(struct nfp_flower_ipv4)); break; case NFP_FL_ACTION_OPCODE_SET_ETHERNET: eth = (struct nfp_fl_set_eth *)a; @@ -591,6 +653,10 @@ nfp_flower_update_merge_with_actions(struct nfp_fl_payload *flow, ports[i] |= tport->tp_port_mask[i]; break; case NFP_FL_ACTION_OPCODE_PRE_TUNNEL: + pre_tun = (struct nfp_fl_pre_tunnel *)a; + ipv6_tun = be16_to_cpu(pre_tun->flags) & + NFP_FL_PRE_TUN_IPV6; + break; case NFP_FL_ACTION_OPCODE_PRE_LAG: case NFP_FL_ACTION_OPCODE_PUSH_GENEVE: break; @@ -732,28 +798,62 @@ nfp_flower_copy_pre_actions(char *act_dst, char *act_src, int len, return act_off; } -static int nfp_fl_verify_post_tun_acts(char *acts, int len) +static int +nfp_fl_verify_post_tun_acts(char *acts, int len, struct nfp_fl_push_vlan **vlan) { struct nfp_fl_act_head *a; unsigned int act_off = 0; while (act_off < len) { a = (struct nfp_fl_act_head *)&acts[act_off]; - if (a->jump_id != NFP_FL_ACTION_OPCODE_OUTPUT) + + if (a->jump_id == NFP_FL_ACTION_OPCODE_PUSH_VLAN && !act_off) + *vlan = (struct nfp_fl_push_vlan *)a; + else if (a->jump_id != NFP_FL_ACTION_OPCODE_OUTPUT) return -EOPNOTSUPP; act_off += a->len_lw << NFP_FL_LW_SIZ; } + /* Ensure any VLAN push also has an egress action. */ + if (*vlan && act_off <= sizeof(struct nfp_fl_push_vlan)) + return -EOPNOTSUPP; + return 0; } static int +nfp_fl_push_vlan_after_tun(char *acts, int len, struct nfp_fl_push_vlan *vlan) +{ + struct nfp_fl_set_tun *tun; + struct nfp_fl_act_head *a; + unsigned int act_off = 0; + + while (act_off < len) { + a = (struct nfp_fl_act_head *)&acts[act_off]; + + if (a->jump_id == NFP_FL_ACTION_OPCODE_SET_TUNNEL) { + tun = (struct nfp_fl_set_tun *)a; + tun->outer_vlan_tpid = vlan->vlan_tpid; + tun->outer_vlan_tci = vlan->vlan_tci; + + return 0; + } + + act_off += a->len_lw << NFP_FL_LW_SIZ; + } + + /* Return error if no tunnel action is found. */ + return -EOPNOTSUPP; +} + +static int nfp_flower_merge_action(struct nfp_fl_payload *sub_flow1, struct nfp_fl_payload *sub_flow2, struct nfp_fl_payload *merge_flow) { unsigned int sub1_act_len, sub2_act_len, pre_off1, pre_off2; + struct nfp_fl_push_vlan *post_tun_push_vlan = NULL; bool tunnel_act = false; char *merge_act; int err; @@ -790,18 +890,36 @@ nfp_flower_merge_action(struct nfp_fl_payload *sub_flow1, sub2_act_len -= pre_off2; /* FW does a tunnel push when egressing, therefore, if sub_flow 1 pushes - * a tunnel, sub_flow 2 can only have output actions for a valid merge. + * a tunnel, there are restrictions on what sub_flow 2 actions lead to a + * valid merge. */ if (tunnel_act) { char *post_tun_acts = &sub_flow2->action_data[pre_off2]; - err = nfp_fl_verify_post_tun_acts(post_tun_acts, sub2_act_len); + err = nfp_fl_verify_post_tun_acts(post_tun_acts, sub2_act_len, + &post_tun_push_vlan); if (err) return err; + + if (post_tun_push_vlan) { + pre_off2 += sizeof(*post_tun_push_vlan); + sub2_act_len -= sizeof(*post_tun_push_vlan); + } } /* Copy remaining actions from sub_flows 1 and 2. */ memcpy(merge_act, sub_flow1->action_data + pre_off1, sub1_act_len); + + if (post_tun_push_vlan) { + /* Update tunnel action in merge to include VLAN push. */ + err = nfp_fl_push_vlan_after_tun(merge_act, sub1_act_len, + post_tun_push_vlan); + if (err) + return err; + + merge_flow->meta.act_len -= sizeof(*post_tun_push_vlan); + } + merge_act += sub1_act_len; memcpy(merge_act, sub_flow2->action_data + pre_off2, sub2_act_len); @@ -945,6 +1063,113 @@ err_destroy_merge_flow: } /** + * nfp_flower_validate_pre_tun_rule() + * @app: Pointer to the APP handle + * @flow: Pointer to NFP flow representation of rule + * @extack: Netlink extended ACK report + * + * Verifies the flow as a pre-tunnel rule. + * + * Return: negative value on error, 0 if verified. + */ +static int +nfp_flower_validate_pre_tun_rule(struct nfp_app *app, + struct nfp_fl_payload *flow, + struct netlink_ext_ack *extack) +{ + struct nfp_flower_meta_tci *meta_tci; + struct nfp_flower_mac_mpls *mac; + struct nfp_fl_act_head *act; + u8 *mask = flow->mask_data; + bool vlan = false; + int act_offset; + u8 key_layer; + + meta_tci = (struct nfp_flower_meta_tci *)flow->unmasked_data; + if (meta_tci->tci & cpu_to_be16(NFP_FLOWER_MASK_VLAN_PRESENT)) { + u16 vlan_tci = be16_to_cpu(meta_tci->tci); + + vlan_tci &= ~NFP_FLOWER_MASK_VLAN_PRESENT; + flow->pre_tun_rule.vlan_tci = cpu_to_be16(vlan_tci); + vlan = true; + } else { + flow->pre_tun_rule.vlan_tci = cpu_to_be16(0xffff); + } + + key_layer = meta_tci->nfp_flow_key_layer; + if (key_layer & ~NFP_FLOWER_PRE_TUN_RULE_FIELDS) { + NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: too many match fields"); + return -EOPNOTSUPP; + } + + if (!(key_layer & NFP_FLOWER_LAYER_MAC)) { + NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: MAC fields match required"); + return -EOPNOTSUPP; + } + + /* Skip fields known to exist. */ + mask += sizeof(struct nfp_flower_meta_tci); + mask += sizeof(struct nfp_flower_in_port); + + /* Ensure destination MAC address is fully matched. */ + mac = (struct nfp_flower_mac_mpls *)mask; + if (!is_broadcast_ether_addr(&mac->mac_dst[0])) { + NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: dest MAC field must not be masked"); + return -EOPNOTSUPP; + } + + if (key_layer & NFP_FLOWER_LAYER_IPV4 || + key_layer & NFP_FLOWER_LAYER_IPV6) { + /* Flags and proto fields have same offset in IPv4 and IPv6. */ + int ip_flags = offsetof(struct nfp_flower_ipv4, ip_ext.flags); + int ip_proto = offsetof(struct nfp_flower_ipv4, ip_ext.proto); + int size; + int i; + + size = key_layer & NFP_FLOWER_LAYER_IPV4 ? + sizeof(struct nfp_flower_ipv4) : + sizeof(struct nfp_flower_ipv6); + + mask += sizeof(struct nfp_flower_mac_mpls); + + /* Ensure proto and flags are the only IP layer fields. */ + for (i = 0; i < size; i++) + if (mask[i] && i != ip_flags && i != ip_proto) { + NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: only flags and proto can be matched in ip header"); + return -EOPNOTSUPP; + } + } + + /* Action must be a single egress or pop_vlan and egress. */ + act_offset = 0; + act = (struct nfp_fl_act_head *)&flow->action_data[act_offset]; + if (vlan) { + if (act->jump_id != NFP_FL_ACTION_OPCODE_POP_VLAN) { + NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: match on VLAN must have VLAN pop as first action"); + return -EOPNOTSUPP; + } + + act_offset += act->len_lw << NFP_FL_LW_SIZ; + act = (struct nfp_fl_act_head *)&flow->action_data[act_offset]; + } + + if (act->jump_id != NFP_FL_ACTION_OPCODE_OUTPUT) { + NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: non egress action detected where egress was expected"); + return -EOPNOTSUPP; + } + + act_offset += act->len_lw << NFP_FL_LW_SIZ; + + /* Ensure there are no more actions after egress. */ + if (act_offset != flow->meta.act_len) { + NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: egress is not the last action"); + return -EOPNOTSUPP; + } + + return 0; +} + +/** * nfp_flower_add_offload() - Adds a new flow to hardware. * @app: Pointer to the APP handle * @netdev: netdev structure. @@ -994,6 +1219,12 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev, if (err) goto err_destroy_flow; + if (flow_pay->pre_tun_rule.dev) { + err = nfp_flower_validate_pre_tun_rule(app, flow_pay, extack); + if (err) + goto err_destroy_flow; + } + err = nfp_compile_flow_metadata(app, flow, flow_pay, netdev, extack); if (err) goto err_destroy_flow; @@ -1006,8 +1237,11 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev, goto err_release_metadata; } - err = nfp_flower_xmit_flow(app, flow_pay, - NFP_FLOWER_CMSG_TYPE_FLOW_ADD); + if (flow_pay->pre_tun_rule.dev) + err = nfp_flower_xmit_pre_tun_flow(app, flow_pay); + else + err = nfp_flower_xmit_flow(app, flow_pay, + NFP_FLOWER_CMSG_TYPE_FLOW_ADD); if (err) goto err_remove_rhash; @@ -1028,6 +1262,8 @@ err_remove_rhash: err_release_metadata: nfp_modify_flow_metadata(app, flow_pay); err_destroy_flow: + if (flow_pay->nfp_tun_ipv6) + nfp_tunnel_put_ipv6_off(app, flow_pay->nfp_tun_ipv6); kfree(flow_pay->action_data); kfree(flow_pay->mask_data); kfree(flow_pay->unmasked_data); @@ -1144,13 +1380,19 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev, if (nfp_flow->nfp_tun_ipv4_addr) nfp_tunnel_del_ipv4_off(app, nfp_flow->nfp_tun_ipv4_addr); + if (nfp_flow->nfp_tun_ipv6) + nfp_tunnel_put_ipv6_off(app, nfp_flow->nfp_tun_ipv6); + if (!nfp_flow->in_hw) { err = 0; goto err_free_merge_flow; } - err = nfp_flower_xmit_flow(app, nfp_flow, - NFP_FLOWER_CMSG_TYPE_FLOW_DEL); + if (nfp_flow->pre_tun_rule.dev) + err = nfp_flower_xmit_pre_tun_del_flow(app, nfp_flow); + else + err = nfp_flower_xmit_flow(app, nfp_flow, + NFP_FLOWER_CMSG_TYPE_FLOW_DEL); /* Fall through on error. */ err_free_merge_flow: @@ -1409,13 +1651,21 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app, struct nfp_flower_priv *priv = app->priv; struct flow_block_cb *block_cb; - if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS && - !(f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS && - nfp_flower_internal_port_can_offload(app, netdev))) + if ((f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS && + !nfp_flower_internal_port_can_offload(app, netdev)) || + (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS && + nfp_flower_internal_port_can_offload(app, netdev))) return -EOPNOTSUPP; switch (f->command) { case FLOW_BLOCK_BIND: + cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev); + if (cb_priv && + flow_block_cb_is_busy(nfp_flower_setup_indr_block_cb, + cb_priv, + &nfp_block_cb_list)) + return -EBUSY; + cb_priv = kmalloc(sizeof(*cb_priv), GFP_KERNEL); if (!cb_priv) return -ENOMEM; @@ -1479,16 +1729,17 @@ int nfp_flower_reg_indir_block_handler(struct nfp_app *app, return NOTIFY_OK; if (event == NETDEV_REGISTER) { - err = __tc_indr_block_cb_register(netdev, app, - nfp_flower_indr_setup_tc_cb, - app); + err = __flow_indr_block_cb_register(netdev, app, + nfp_flower_indr_setup_tc_cb, + app); if (err) nfp_flower_cmsg_warn(app, "Indirect block reg failed - %s\n", netdev->name); } else if (event == NETDEV_UNREGISTER) { - __tc_indr_block_cb_unregister(netdev, - nfp_flower_indr_setup_tc_cb, app); + __flow_indr_block_cb_unregister(netdev, + nfp_flower_indr_setup_tc_cb, + app); } return NOTIFY_OK; diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c index a7a80f4b722a..2df3deedf9fd 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c @@ -15,6 +15,24 @@ #define NFP_FL_MAX_ROUTES 32 +#define NFP_TUN_PRE_TUN_RULE_LIMIT 32 +#define NFP_TUN_PRE_TUN_RULE_DEL 0x1 +#define NFP_TUN_PRE_TUN_IDX_BIT 0x8 + +/** + * struct nfp_tun_pre_run_rule - rule matched before decap + * @flags: options for the rule offset + * @port_idx: index of destination MAC address for the rule + * @vlan_tci: VLAN info associated with MAC + * @host_ctx_id: stats context of rule to update + */ +struct nfp_tun_pre_tun_rule { + __be32 flags; + __be16 port_idx; + __be16 vlan_tci; + __be32 host_ctx_id; +}; + /** * struct nfp_tun_active_tuns - periodic message of active tunnels * @seq: sequence number of the message @@ -37,6 +55,25 @@ struct nfp_tun_active_tuns { }; /** + * struct nfp_tun_active_tuns_v6 - periodic message of active IPv6 tunnels + * @seq: sequence number of the message + * @count: number of tunnels report in message + * @flags: options part of the request + * @tun_info.ipv6: dest IPv6 address of active route + * @tun_info.egress_port: port the encapsulated packet egressed + * @tun_info: tunnels that have sent traffic in reported period + */ +struct nfp_tun_active_tuns_v6 { + __be32 seq; + __be32 count; + __be32 flags; + struct route_ip_info_v6 { + struct in6_addr ipv6; + __be32 egress_port; + } tun_info[]; +}; + +/** * struct nfp_tun_neigh - neighbour/route entry on the NFP * @dst_ipv4: destination IPv4 address * @src_ipv4: source IPv4 address @@ -53,6 +90,22 @@ struct nfp_tun_neigh { }; /** + * struct nfp_tun_neigh_v6 - neighbour/route entry on the NFP + * @dst_ipv6: destination IPv6 address + * @src_ipv6: source IPv6 address + * @dst_addr: destination MAC address + * @src_addr: source MAC address + * @port_id: NFP port to output packet on - associated with source IPv6 + */ +struct nfp_tun_neigh_v6 { + struct in6_addr dst_ipv6; + struct in6_addr src_ipv6; + u8 dst_addr[ETH_ALEN]; + u8 src_addr[ETH_ALEN]; + __be32 port_id; +}; + +/** * struct nfp_tun_req_route_ipv4 - NFP requests a route/neighbour lookup * @ingress_port: ingress port of packet that signalled request * @ipv4_addr: destination ipv4 address for route @@ -65,13 +118,23 @@ struct nfp_tun_req_route_ipv4 { }; /** - * struct nfp_ipv4_route_entry - routes that are offloaded to the NFP - * @ipv4_addr: destination of route + * struct nfp_tun_req_route_ipv6 - NFP requests an IPv6 route/neighbour lookup + * @ingress_port: ingress port of packet that signalled request + * @ipv6_addr: destination ipv6 address for route + */ +struct nfp_tun_req_route_ipv6 { + __be32 ingress_port; + struct in6_addr ipv6_addr; +}; + +/** + * struct nfp_offloaded_route - routes that are offloaded to the NFP * @list: list pointer + * @ip_add: destination of route - can be IPv4 or IPv6 */ -struct nfp_ipv4_route_entry { - __be32 ipv4_addr; +struct nfp_offloaded_route { struct list_head list; + u8 ip_add[]; }; #define NFP_FL_IPV4_ADDRS_MAX 32 @@ -98,6 +161,18 @@ struct nfp_ipv4_addr_entry { struct list_head list; }; +#define NFP_FL_IPV6_ADDRS_MAX 4 + +/** + * struct nfp_tun_ipv6_addr - set the IP address list on the NFP + * @count: number of IPs populated in the array + * @ipv6_addr: array of IPV6_ADDRS_MAX 128 bit IPv6 addresses + */ +struct nfp_tun_ipv6_addr { + __be32 count; + struct in6_addr ipv6_addr[NFP_FL_IPV6_ADDRS_MAX]; +}; + #define NFP_TUN_MAC_OFFLOAD_DEL_FLAG 0x2 /** @@ -124,11 +199,12 @@ enum nfp_flower_mac_offload_cmd { /** * struct nfp_tun_offloaded_mac - hashtable entry for an offloaded MAC - * @ht_node: Hashtable entry - * @addr: Offloaded MAC address - * @index: Offloaded index for given MAC address - * @ref_count: Number of devs using this MAC address - * @repr_list: List of reprs sharing this MAC address + * @ht_node: Hashtable entry + * @addr: Offloaded MAC address + * @index: Offloaded index for given MAC address + * @ref_count: Number of devs using this MAC address + * @repr_list: List of reprs sharing this MAC address + * @bridge_count: Number of bridge/internal devs with MAC */ struct nfp_tun_offloaded_mac { struct rhash_head ht_node; @@ -136,6 +212,7 @@ struct nfp_tun_offloaded_mac { u16 index; int ref_count; struct list_head repr_list; + int bridge_count; }; static const struct rhashtable_params offloaded_macs_params = { @@ -186,6 +263,49 @@ void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb) rcu_read_unlock(); } +void nfp_tunnel_keep_alive_v6(struct nfp_app *app, struct sk_buff *skb) +{ +#if IS_ENABLED(CONFIG_IPV6) + struct nfp_tun_active_tuns_v6 *payload; + struct net_device *netdev; + int count, i, pay_len; + struct neighbour *n; + void *ipv6_add; + u32 port; + + payload = nfp_flower_cmsg_get_data(skb); + count = be32_to_cpu(payload->count); + if (count > NFP_FL_IPV6_ADDRS_MAX) { + nfp_flower_cmsg_warn(app, "IPv6 tunnel keep-alive request exceeds max routes.\n"); + return; + } + + pay_len = nfp_flower_cmsg_get_data_len(skb); + if (pay_len != struct_size(payload, tun_info, count)) { + nfp_flower_cmsg_warn(app, "Corruption in tunnel keep-alive message.\n"); + return; + } + + rcu_read_lock(); + for (i = 0; i < count; i++) { + ipv6_add = &payload->tun_info[i].ipv6; + port = be32_to_cpu(payload->tun_info[i].egress_port); + netdev = nfp_app_dev_get(app, port, NULL); + if (!netdev) + continue; + + n = neigh_lookup(&nd_tbl, ipv6_add, netdev); + if (!n) + continue; + + /* Update the used timestamp of neighbour */ + neigh_event_send(n, NULL); + neigh_release(n); + } + rcu_read_unlock(); +#endif +} + static int nfp_flower_xmit_tun_conf(struct nfp_app *app, u8 mtype, u16 plen, void *pdata, gfp_t flag) @@ -204,71 +324,126 @@ nfp_flower_xmit_tun_conf(struct nfp_app *app, u8 mtype, u16 plen, void *pdata, return 0; } -static bool nfp_tun_has_route(struct nfp_app *app, __be32 ipv4_addr) +static bool +__nfp_tun_has_route(struct list_head *route_list, spinlock_t *list_lock, + void *add, int add_len) { - struct nfp_flower_priv *priv = app->priv; - struct nfp_ipv4_route_entry *entry; - struct list_head *ptr, *storage; + struct nfp_offloaded_route *entry; - spin_lock_bh(&priv->tun.neigh_off_lock); - list_for_each_safe(ptr, storage, &priv->tun.neigh_off_list) { - entry = list_entry(ptr, struct nfp_ipv4_route_entry, list); - if (entry->ipv4_addr == ipv4_addr) { - spin_unlock_bh(&priv->tun.neigh_off_lock); + spin_lock_bh(list_lock); + list_for_each_entry(entry, route_list, list) + if (!memcmp(entry->ip_add, add, add_len)) { + spin_unlock_bh(list_lock); return true; } - } - spin_unlock_bh(&priv->tun.neigh_off_lock); + spin_unlock_bh(list_lock); return false; } -static void nfp_tun_add_route_to_cache(struct nfp_app *app, __be32 ipv4_addr) +static int +__nfp_tun_add_route_to_cache(struct list_head *route_list, + spinlock_t *list_lock, void *add, int add_len) { - struct nfp_flower_priv *priv = app->priv; - struct nfp_ipv4_route_entry *entry; - struct list_head *ptr, *storage; + struct nfp_offloaded_route *entry; - spin_lock_bh(&priv->tun.neigh_off_lock); - list_for_each_safe(ptr, storage, &priv->tun.neigh_off_list) { - entry = list_entry(ptr, struct nfp_ipv4_route_entry, list); - if (entry->ipv4_addr == ipv4_addr) { - spin_unlock_bh(&priv->tun.neigh_off_lock); - return; + spin_lock_bh(list_lock); + list_for_each_entry(entry, route_list, list) + if (!memcmp(entry->ip_add, add, add_len)) { + spin_unlock_bh(list_lock); + return 0; } - } - entry = kmalloc(sizeof(*entry), GFP_ATOMIC); + + entry = kmalloc(sizeof(*entry) + add_len, GFP_ATOMIC); if (!entry) { - spin_unlock_bh(&priv->tun.neigh_off_lock); - nfp_flower_cmsg_warn(app, "Mem error when storing new route.\n"); - return; + spin_unlock_bh(list_lock); + return -ENOMEM; } - entry->ipv4_addr = ipv4_addr; - list_add_tail(&entry->list, &priv->tun.neigh_off_list); - spin_unlock_bh(&priv->tun.neigh_off_lock); + memcpy(entry->ip_add, add, add_len); + list_add_tail(&entry->list, route_list); + spin_unlock_bh(list_lock); + + return 0; } -static void nfp_tun_del_route_from_cache(struct nfp_app *app, __be32 ipv4_addr) +static void +__nfp_tun_del_route_from_cache(struct list_head *route_list, + spinlock_t *list_lock, void *add, int add_len) { - struct nfp_flower_priv *priv = app->priv; - struct nfp_ipv4_route_entry *entry; - struct list_head *ptr, *storage; + struct nfp_offloaded_route *entry; - spin_lock_bh(&priv->tun.neigh_off_lock); - list_for_each_safe(ptr, storage, &priv->tun.neigh_off_list) { - entry = list_entry(ptr, struct nfp_ipv4_route_entry, list); - if (entry->ipv4_addr == ipv4_addr) { + spin_lock_bh(list_lock); + list_for_each_entry(entry, route_list, list) + if (!memcmp(entry->ip_add, add, add_len)) { list_del(&entry->list); kfree(entry); break; } - } - spin_unlock_bh(&priv->tun.neigh_off_lock); + spin_unlock_bh(list_lock); +} + +static bool nfp_tun_has_route_v4(struct nfp_app *app, __be32 *ipv4_addr) +{ + struct nfp_flower_priv *priv = app->priv; + + return __nfp_tun_has_route(&priv->tun.neigh_off_list_v4, + &priv->tun.neigh_off_lock_v4, ipv4_addr, + sizeof(*ipv4_addr)); +} + +static bool +nfp_tun_has_route_v6(struct nfp_app *app, struct in6_addr *ipv6_addr) +{ + struct nfp_flower_priv *priv = app->priv; + + return __nfp_tun_has_route(&priv->tun.neigh_off_list_v6, + &priv->tun.neigh_off_lock_v6, ipv6_addr, + sizeof(*ipv6_addr)); +} + +static void +nfp_tun_add_route_to_cache_v4(struct nfp_app *app, __be32 *ipv4_addr) +{ + struct nfp_flower_priv *priv = app->priv; + + __nfp_tun_add_route_to_cache(&priv->tun.neigh_off_list_v4, + &priv->tun.neigh_off_lock_v4, ipv4_addr, + sizeof(*ipv4_addr)); +} + +static void +nfp_tun_add_route_to_cache_v6(struct nfp_app *app, struct in6_addr *ipv6_addr) +{ + struct nfp_flower_priv *priv = app->priv; + + __nfp_tun_add_route_to_cache(&priv->tun.neigh_off_list_v6, + &priv->tun.neigh_off_lock_v6, ipv6_addr, + sizeof(*ipv6_addr)); +} + +static void +nfp_tun_del_route_from_cache_v4(struct nfp_app *app, __be32 *ipv4_addr) +{ + struct nfp_flower_priv *priv = app->priv; + + __nfp_tun_del_route_from_cache(&priv->tun.neigh_off_list_v4, + &priv->tun.neigh_off_lock_v4, ipv4_addr, + sizeof(*ipv4_addr)); +} + +static void +nfp_tun_del_route_from_cache_v6(struct nfp_app *app, struct in6_addr *ipv6_addr) +{ + struct nfp_flower_priv *priv = app->priv; + + __nfp_tun_del_route_from_cache(&priv->tun.neigh_off_list_v6, + &priv->tun.neigh_off_lock_v6, ipv6_addr, + sizeof(*ipv6_addr)); } static void -nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app, - struct flowi4 *flow, struct neighbour *neigh, gfp_t flag) +nfp_tun_write_neigh_v4(struct net_device *netdev, struct nfp_app *app, + struct flowi4 *flow, struct neighbour *neigh, gfp_t flag) { struct nfp_tun_neigh payload; u32 port_id; @@ -282,7 +457,7 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app, /* If entry has expired send dst IP with all other fields 0. */ if (!(neigh->nud_state & NUD_VALID) || neigh->dead) { - nfp_tun_del_route_from_cache(app, payload.dst_ipv4); + nfp_tun_del_route_from_cache_v4(app, &payload.dst_ipv4); /* Trigger ARP to verify invalid neighbour state. */ neigh_event_send(neigh, NULL); goto send_msg; @@ -294,7 +469,7 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app, neigh_ha_snapshot(payload.dst_addr, neigh, netdev); payload.port_id = cpu_to_be32(port_id); /* Add destination of new route to NFP cache. */ - nfp_tun_add_route_to_cache(app, payload.dst_ipv4); + nfp_tun_add_route_to_cache_v4(app, &payload.dst_ipv4); send_msg: nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH, @@ -302,16 +477,54 @@ send_msg: (unsigned char *)&payload, flag); } +static void +nfp_tun_write_neigh_v6(struct net_device *netdev, struct nfp_app *app, + struct flowi6 *flow, struct neighbour *neigh, gfp_t flag) +{ + struct nfp_tun_neigh_v6 payload; + u32 port_id; + + port_id = nfp_flower_get_port_id_from_netdev(app, netdev); + if (!port_id) + return; + + memset(&payload, 0, sizeof(struct nfp_tun_neigh_v6)); + payload.dst_ipv6 = flow->daddr; + + /* If entry has expired send dst IP with all other fields 0. */ + if (!(neigh->nud_state & NUD_VALID) || neigh->dead) { + nfp_tun_del_route_from_cache_v6(app, &payload.dst_ipv6); + /* Trigger probe to verify invalid neighbour state. */ + neigh_event_send(neigh, NULL); + goto send_msg; + } + + /* Have a valid neighbour so populate rest of entry. */ + payload.src_ipv6 = flow->saddr; + ether_addr_copy(payload.src_addr, netdev->dev_addr); + neigh_ha_snapshot(payload.dst_addr, neigh, netdev); + payload.port_id = cpu_to_be32(port_id); + /* Add destination of new route to NFP cache. */ + nfp_tun_add_route_to_cache_v6(app, &payload.dst_ipv6); + +send_msg: + nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6, + sizeof(struct nfp_tun_neigh_v6), + (unsigned char *)&payload, flag); +} + static int nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event, void *ptr) { struct nfp_flower_priv *app_priv; struct netevent_redirect *redir; - struct flowi4 flow = {}; + struct flowi4 flow4 = {}; + struct flowi6 flow6 = {}; struct neighbour *n; struct nfp_app *app; struct rtable *rt; + bool ipv6 = false; int err; switch (event) { @@ -326,38 +539,62 @@ nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event, return NOTIFY_DONE; } - flow.daddr = *(__be32 *)n->primary_key; + if (n->tbl->family == AF_INET6) + ipv6 = true; - /* Only concerned with route changes for representors. */ - if (!nfp_netdev_is_nfp_repr(n->dev)) - return NOTIFY_DONE; + if (ipv6) + flow6.daddr = *(struct in6_addr *)n->primary_key; + else + flow4.daddr = *(__be32 *)n->primary_key; app_priv = container_of(nb, struct nfp_flower_priv, tun.neigh_nb); app = app_priv->app; + if (!nfp_netdev_is_nfp_repr(n->dev) && + !nfp_flower_internal_port_can_offload(app, n->dev)) + return NOTIFY_DONE; + /* Only concerned with changes to routes already added to NFP. */ - if (!nfp_tun_has_route(app, flow.daddr)) + if ((ipv6 && !nfp_tun_has_route_v6(app, &flow6.daddr)) || + (!ipv6 && !nfp_tun_has_route_v4(app, &flow4.daddr))) return NOTIFY_DONE; #if IS_ENABLED(CONFIG_INET) - /* Do a route lookup to populate flow data. */ - rt = ip_route_output_key(dev_net(n->dev), &flow); - err = PTR_ERR_OR_ZERO(rt); - if (err) + if (ipv6) { +#if IS_ENABLED(CONFIG_IPV6) + struct dst_entry *dst; + + dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(n->dev), NULL, + &flow6, NULL); + if (IS_ERR(dst)) + return NOTIFY_DONE; + + dst_release(dst); + flow6.flowi6_proto = IPPROTO_UDP; + nfp_tun_write_neigh_v6(n->dev, app, &flow6, n, GFP_ATOMIC); +#else return NOTIFY_DONE; +#endif /* CONFIG_IPV6 */ + } else { + /* Do a route lookup to populate flow data. */ + rt = ip_route_output_key(dev_net(n->dev), &flow4); + err = PTR_ERR_OR_ZERO(rt); + if (err) + return NOTIFY_DONE; - ip_rt_put(rt); + ip_rt_put(rt); + + flow4.flowi4_proto = IPPROTO_UDP; + nfp_tun_write_neigh_v4(n->dev, app, &flow4, n, GFP_ATOMIC); + } #else return NOTIFY_DONE; -#endif - - flow.flowi4_proto = IPPROTO_UDP; - nfp_tun_write_neigh(n->dev, app, &flow, n, GFP_ATOMIC); +#endif /* CONFIG_INET */ return NOTIFY_OK; } -void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb) +void nfp_tunnel_request_route_v4(struct nfp_app *app, struct sk_buff *skb) { struct nfp_tun_req_route_ipv4 *payload; struct net_device *netdev; @@ -391,7 +628,7 @@ void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb) ip_rt_put(rt); if (!n) goto fail_rcu_unlock; - nfp_tun_write_neigh(n->dev, app, &flow, n, GFP_ATOMIC); + nfp_tun_write_neigh_v4(n->dev, app, &flow, n, GFP_ATOMIC); neigh_release(n); rcu_read_unlock(); return; @@ -401,6 +638,48 @@ fail_rcu_unlock: nfp_flower_cmsg_warn(app, "Requested route not found.\n"); } +void nfp_tunnel_request_route_v6(struct nfp_app *app, struct sk_buff *skb) +{ + struct nfp_tun_req_route_ipv6 *payload; + struct net_device *netdev; + struct flowi6 flow = {}; + struct dst_entry *dst; + struct neighbour *n; + + payload = nfp_flower_cmsg_get_data(skb); + + rcu_read_lock(); + netdev = nfp_app_dev_get(app, be32_to_cpu(payload->ingress_port), NULL); + if (!netdev) + goto fail_rcu_unlock; + + flow.daddr = payload->ipv6_addr; + flow.flowi6_proto = IPPROTO_UDP; + +#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6) + dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(netdev), NULL, &flow, + NULL); + if (IS_ERR(dst)) + goto fail_rcu_unlock; +#else + goto fail_rcu_unlock; +#endif + + n = dst_neigh_lookup(dst, &flow.daddr); + dst_release(dst); + if (!n) + goto fail_rcu_unlock; + + nfp_tun_write_neigh_v6(n->dev, app, &flow, n, GFP_ATOMIC); + neigh_release(n); + rcu_read_unlock(); + return; + +fail_rcu_unlock: + rcu_read_unlock(); + nfp_flower_cmsg_warn(app, "Requested IPv6 route not found.\n"); +} + static void nfp_tun_write_ipv4_list(struct nfp_app *app) { struct nfp_flower_priv *priv = app->priv; @@ -482,6 +761,78 @@ void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4) nfp_tun_write_ipv4_list(app); } +static void nfp_tun_write_ipv6_list(struct nfp_app *app) +{ + struct nfp_flower_priv *priv = app->priv; + struct nfp_ipv6_addr_entry *entry; + struct nfp_tun_ipv6_addr payload; + int count = 0; + + memset(&payload, 0, sizeof(struct nfp_tun_ipv6_addr)); + mutex_lock(&priv->tun.ipv6_off_lock); + list_for_each_entry(entry, &priv->tun.ipv6_off_list, list) { + if (count >= NFP_FL_IPV6_ADDRS_MAX) { + nfp_flower_cmsg_warn(app, "Too many IPv6 tunnel endpoint addresses, some cannot be offloaded.\n"); + break; + } + payload.ipv6_addr[count++] = entry->ipv6_addr; + } + mutex_unlock(&priv->tun.ipv6_off_lock); + payload.count = cpu_to_be32(count); + + nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_IPS_V6, + sizeof(struct nfp_tun_ipv6_addr), + &payload, GFP_KERNEL); +} + +struct nfp_ipv6_addr_entry * +nfp_tunnel_add_ipv6_off(struct nfp_app *app, struct in6_addr *ipv6) +{ + struct nfp_flower_priv *priv = app->priv; + struct nfp_ipv6_addr_entry *entry; + + mutex_lock(&priv->tun.ipv6_off_lock); + list_for_each_entry(entry, &priv->tun.ipv6_off_list, list) + if (!memcmp(&entry->ipv6_addr, ipv6, sizeof(*ipv6))) { + entry->ref_count++; + mutex_unlock(&priv->tun.ipv6_off_lock); + return entry; + } + + entry = kmalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) { + mutex_unlock(&priv->tun.ipv6_off_lock); + nfp_flower_cmsg_warn(app, "Mem error when offloading IP address.\n"); + return NULL; + } + entry->ipv6_addr = *ipv6; + entry->ref_count = 1; + list_add_tail(&entry->list, &priv->tun.ipv6_off_list); + mutex_unlock(&priv->tun.ipv6_off_lock); + + nfp_tun_write_ipv6_list(app); + + return entry; +} + +void +nfp_tunnel_put_ipv6_off(struct nfp_app *app, struct nfp_ipv6_addr_entry *entry) +{ + struct nfp_flower_priv *priv = app->priv; + bool freed = false; + + mutex_lock(&priv->tun.ipv6_off_lock); + if (!--entry->ref_count) { + list_del(&entry->list); + kfree(entry); + freed = true; + } + mutex_unlock(&priv->tun.ipv6_off_lock); + + if (freed) + nfp_tun_write_ipv6_list(app); +} + static int __nfp_tunnel_offload_mac(struct nfp_app *app, u8 *mac, u16 idx, bool del) { @@ -556,6 +907,8 @@ nfp_tunnel_offloaded_macs_inc_ref_and_link(struct nfp_tun_offloaded_mac *entry, list_del(&repr_priv->mac_list); list_add_tail(&repr_priv->mac_list, &entry->repr_list); + } else if (nfp_flower_is_supported_bridge(netdev)) { + entry->bridge_count++; } entry->ref_count++; @@ -572,20 +925,35 @@ nfp_tunnel_add_shared_mac(struct nfp_app *app, struct net_device *netdev, entry = nfp_tunnel_lookup_offloaded_macs(app, netdev->dev_addr); if (entry && nfp_tunnel_is_mac_idx_global(entry->index)) { - nfp_tunnel_offloaded_macs_inc_ref_and_link(entry, netdev, mod); - return 0; + if (entry->bridge_count || + !nfp_flower_is_supported_bridge(netdev)) { + nfp_tunnel_offloaded_macs_inc_ref_and_link(entry, + netdev, mod); + return 0; + } + + /* MAC is global but matches need to go to pre_tun table. */ + nfp_mac_idx = entry->index | NFP_TUN_PRE_TUN_IDX_BIT; } - /* Assign a global index if non-repr or MAC address is now shared. */ - if (entry || !port) { - ida_idx = ida_simple_get(&priv->tun.mac_off_ids, 0, - NFP_MAX_MAC_INDEX, GFP_KERNEL); - if (ida_idx < 0) - return ida_idx; + if (!nfp_mac_idx) { + /* Assign a global index if non-repr or MAC is now shared. */ + if (entry || !port) { + ida_idx = ida_simple_get(&priv->tun.mac_off_ids, 0, + NFP_MAX_MAC_INDEX, GFP_KERNEL); + if (ida_idx < 0) + return ida_idx; - nfp_mac_idx = nfp_tunnel_get_global_mac_idx_from_ida(ida_idx); - } else { - nfp_mac_idx = nfp_tunnel_get_mac_idx_from_phy_port_id(port); + nfp_mac_idx = + nfp_tunnel_get_global_mac_idx_from_ida(ida_idx); + + if (nfp_flower_is_supported_bridge(netdev)) + nfp_mac_idx |= NFP_TUN_PRE_TUN_IDX_BIT; + + } else { + nfp_mac_idx = + nfp_tunnel_get_mac_idx_from_phy_port_id(port); + } } if (!entry) { @@ -654,6 +1022,25 @@ nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev, list_del(&repr_priv->mac_list); } + if (nfp_flower_is_supported_bridge(netdev)) { + entry->bridge_count--; + + if (!entry->bridge_count && entry->ref_count) { + u16 nfp_mac_idx; + + nfp_mac_idx = entry->index & ~NFP_TUN_PRE_TUN_IDX_BIT; + if (__nfp_tunnel_offload_mac(app, mac, nfp_mac_idx, + false)) { + nfp_flower_cmsg_warn(app, "MAC offload index revert failed on %s.\n", + netdev_name(netdev)); + return 0; + } + + entry->index = nfp_mac_idx; + return 0; + } + } + /* If MAC is now used by 1 repr set the offloaded MAC index to port. */ if (entry->ref_count == 1 && list_is_singular(&entry->repr_list)) { u16 nfp_mac_idx; @@ -713,6 +1100,9 @@ nfp_tunnel_offload_mac(struct nfp_app *app, struct net_device *netdev, return 0; repr_priv = repr->app_priv; + if (repr_priv->on_bridge) + return 0; + mac_offloaded = &repr_priv->mac_offloaded; off_mac = &repr_priv->offloaded_mac_addr[0]; port = nfp_repr_get_port_id(netdev); @@ -828,10 +1218,119 @@ int nfp_tunnel_mac_event_handler(struct nfp_app *app, if (err) nfp_flower_cmsg_warn(app, "Failed to offload MAC change on %s.\n", netdev_name(netdev)); + } else if (event == NETDEV_CHANGEUPPER) { + /* If a repr is attached to a bridge then tunnel packets + * entering the physical port are directed through the bridge + * datapath and cannot be directly detunneled. Therefore, + * associated offloaded MACs and indexes should not be used + * by fw for detunneling. + */ + struct netdev_notifier_changeupper_info *info = ptr; + struct net_device *upper = info->upper_dev; + struct nfp_flower_repr_priv *repr_priv; + struct nfp_repr *repr; + + if (!nfp_netdev_is_nfp_repr(netdev) || + !nfp_flower_is_supported_bridge(upper)) + return NOTIFY_OK; + + repr = netdev_priv(netdev); + if (repr->app != app) + return NOTIFY_OK; + + repr_priv = repr->app_priv; + + if (info->linking) { + if (nfp_tunnel_offload_mac(app, netdev, + NFP_TUNNEL_MAC_OFFLOAD_DEL)) + nfp_flower_cmsg_warn(app, "Failed to delete offloaded MAC on %s.\n", + netdev_name(netdev)); + repr_priv->on_bridge = true; + } else { + repr_priv->on_bridge = false; + + if (!(netdev->flags & IFF_UP)) + return NOTIFY_OK; + + if (nfp_tunnel_offload_mac(app, netdev, + NFP_TUNNEL_MAC_OFFLOAD_ADD)) + nfp_flower_cmsg_warn(app, "Failed to offload MAC on %s.\n", + netdev_name(netdev)); + } } return NOTIFY_OK; } +int nfp_flower_xmit_pre_tun_flow(struct nfp_app *app, + struct nfp_fl_payload *flow) +{ + struct nfp_flower_priv *app_priv = app->priv; + struct nfp_tun_offloaded_mac *mac_entry; + struct nfp_tun_pre_tun_rule payload; + struct net_device *internal_dev; + int err; + + if (app_priv->pre_tun_rule_cnt == NFP_TUN_PRE_TUN_RULE_LIMIT) + return -ENOSPC; + + memset(&payload, 0, sizeof(struct nfp_tun_pre_tun_rule)); + + internal_dev = flow->pre_tun_rule.dev; + payload.vlan_tci = flow->pre_tun_rule.vlan_tci; + payload.host_ctx_id = flow->meta.host_ctx_id; + + /* Lookup MAC index for the pre-tunnel rule egress device. + * Note that because the device is always an internal port, it will + * have a constant global index so does not need to be tracked. + */ + mac_entry = nfp_tunnel_lookup_offloaded_macs(app, + internal_dev->dev_addr); + if (!mac_entry) + return -ENOENT; + + payload.port_idx = cpu_to_be16(mac_entry->index); + + /* Copy mac id and vlan to flow - dev may not exist at delete time. */ + flow->pre_tun_rule.vlan_tci = payload.vlan_tci; + flow->pre_tun_rule.port_idx = payload.port_idx; + + err = nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_PRE_TUN_RULE, + sizeof(struct nfp_tun_pre_tun_rule), + (unsigned char *)&payload, GFP_KERNEL); + if (err) + return err; + + app_priv->pre_tun_rule_cnt++; + + return 0; +} + +int nfp_flower_xmit_pre_tun_del_flow(struct nfp_app *app, + struct nfp_fl_payload *flow) +{ + struct nfp_flower_priv *app_priv = app->priv; + struct nfp_tun_pre_tun_rule payload; + u32 tmp_flags = 0; + int err; + + memset(&payload, 0, sizeof(struct nfp_tun_pre_tun_rule)); + + tmp_flags |= NFP_TUN_PRE_TUN_RULE_DEL; + payload.flags = cpu_to_be32(tmp_flags); + payload.vlan_tci = flow->pre_tun_rule.vlan_tci; + payload.port_idx = flow->pre_tun_rule.port_idx; + + err = nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_PRE_TUN_RULE, + sizeof(struct nfp_tun_pre_tun_rule), + (unsigned char *)&payload, GFP_KERNEL); + if (err) + return err; + + app_priv->pre_tun_rule_cnt--; + + return 0; +} + int nfp_tunnel_config_start(struct nfp_app *app) { struct nfp_flower_priv *priv = app->priv; @@ -845,13 +1344,17 @@ int nfp_tunnel_config_start(struct nfp_app *app) ida_init(&priv->tun.mac_off_ids); - /* Initialise priv data for IPv4 offloading. */ + /* Initialise priv data for IPv4/v6 offloading. */ mutex_init(&priv->tun.ipv4_off_lock); INIT_LIST_HEAD(&priv->tun.ipv4_off_list); + mutex_init(&priv->tun.ipv6_off_lock); + INIT_LIST_HEAD(&priv->tun.ipv6_off_list); /* Initialise priv data for neighbour offloading. */ - spin_lock_init(&priv->tun.neigh_off_lock); - INIT_LIST_HEAD(&priv->tun.neigh_off_list); + spin_lock_init(&priv->tun.neigh_off_lock_v4); + INIT_LIST_HEAD(&priv->tun.neigh_off_list_v4); + spin_lock_init(&priv->tun.neigh_off_lock_v6); + INIT_LIST_HEAD(&priv->tun.neigh_off_list_v6); priv->tun.neigh_nb.notifier_call = nfp_tun_neigh_event_handler; err = register_netevent_notifier(&priv->tun.neigh_nb); @@ -866,9 +1369,11 @@ int nfp_tunnel_config_start(struct nfp_app *app) void nfp_tunnel_config_stop(struct nfp_app *app) { + struct nfp_offloaded_route *route_entry, *temp; struct nfp_flower_priv *priv = app->priv; - struct nfp_ipv4_route_entry *route_entry; struct nfp_ipv4_addr_entry *ip_entry; + struct nfp_tun_neigh_v6 ipv6_route; + struct nfp_tun_neigh ipv4_route; struct list_head *ptr, *storage; unregister_netevent_notifier(&priv->tun.neigh_nb); @@ -882,12 +1387,35 @@ void nfp_tunnel_config_stop(struct nfp_app *app) kfree(ip_entry); } - /* Free any memory that may be occupied by the route list. */ - list_for_each_safe(ptr, storage, &priv->tun.neigh_off_list) { - route_entry = list_entry(ptr, struct nfp_ipv4_route_entry, - list); + mutex_destroy(&priv->tun.ipv6_off_lock); + + /* Free memory in the route list and remove entries from fw cache. */ + list_for_each_entry_safe(route_entry, temp, + &priv->tun.neigh_off_list_v4, list) { + memset(&ipv4_route, 0, sizeof(ipv4_route)); + memcpy(&ipv4_route.dst_ipv4, &route_entry->ip_add, + sizeof(ipv4_route.dst_ipv4)); list_del(&route_entry->list); kfree(route_entry); + + nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH, + sizeof(struct nfp_tun_neigh), + (unsigned char *)&ipv4_route, + GFP_KERNEL); + } + + list_for_each_entry_safe(route_entry, temp, + &priv->tun.neigh_off_list_v6, list) { + memset(&ipv6_route, 0, sizeof(ipv6_route)); + memcpy(&ipv6_route.dst_ipv6, &route_entry->ip_add, + sizeof(ipv6_route.dst_ipv6)); + list_del(&route_entry->list); + kfree(route_entry); + + nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6, + sizeof(struct nfp_tun_neigh), + (unsigned char *)&ipv6_route, + GFP_KERNEL); } /* Destroy rhash. Entries should be cleaned on netdev notifier unreg. */ diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index 60e57f08de80..4d282fc56009 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -352,7 +352,7 @@ nfp_net_fw_request(struct pci_dev *pdev, struct nfp_pf *pf, const char *name) err = request_firmware_direct(&fw, name, &pdev->dev); nfp_info(pf->cpp, " %s: %s\n", - name, err ? "not found" : "found, loading..."); + name, err ? "not found" : "found"); if (err) return NULL; @@ -430,8 +430,35 @@ nfp_net_fw_find(struct pci_dev *pdev, struct nfp_pf *pf) return nfp_net_fw_request(pdev, pf, fw_name); } +static int +nfp_get_fw_policy_value(struct pci_dev *pdev, struct nfp_nsp *nsp, + const char *key, const char *default_val, int max_val, + int *value) +{ + char hwinfo[64]; + long hi_val; + int err; + + snprintf(hwinfo, sizeof(hwinfo), key); + err = nfp_nsp_hwinfo_lookup_optional(nsp, hwinfo, sizeof(hwinfo), + default_val); + if (err) + return err; + + err = kstrtol(hwinfo, 0, &hi_val); + if (err || hi_val < 0 || hi_val > max_val) { + dev_warn(&pdev->dev, + "Invalid value '%s' from '%s', ignoring\n", + hwinfo, key); + err = kstrtol(default_val, 0, &hi_val); + } + + *value = hi_val; + return err; +} + /** - * nfp_net_fw_load() - Load the firmware image + * nfp_fw_load() - Load the firmware image * @pdev: PCI Device structure * @pf: NFP PF Device structure * @nsp: NFP SP handle @@ -441,44 +468,107 @@ nfp_net_fw_find(struct pci_dev *pdev, struct nfp_pf *pf) static int nfp_fw_load(struct pci_dev *pdev, struct nfp_pf *pf, struct nfp_nsp *nsp) { - const struct firmware *fw; + bool do_reset, fw_loaded = false; + const struct firmware *fw = NULL; + int err, reset, policy, ifcs = 0; + char *token, *ptr; + char hwinfo[64]; u16 interface; - int err; + + snprintf(hwinfo, sizeof(hwinfo), "abi_drv_load_ifc"); + err = nfp_nsp_hwinfo_lookup_optional(nsp, hwinfo, sizeof(hwinfo), + NFP_NSP_DRV_LOAD_IFC_DEFAULT); + if (err) + return err; interface = nfp_cpp_interface(pf->cpp); - if (NFP_CPP_INTERFACE_UNIT_of(interface) != 0) { - /* Only Unit 0 should reset or load firmware */ + ptr = hwinfo; + while ((token = strsep(&ptr, ","))) { + unsigned long interface_hi; + + err = kstrtoul(token, 0, &interface_hi); + if (err) { + dev_err(&pdev->dev, + "Failed to parse interface '%s': %d\n", + token, err); + return err; + } + + ifcs++; + if (interface == interface_hi) + break; + } + + if (!token) { dev_info(&pdev->dev, "Firmware will be loaded by partner\n"); return 0; } + err = nfp_get_fw_policy_value(pdev, nsp, "abi_drv_reset", + NFP_NSP_DRV_RESET_DEFAULT, + NFP_NSP_DRV_RESET_NEVER, &reset); + if (err) + return err; + + err = nfp_get_fw_policy_value(pdev, nsp, "app_fw_from_flash", + NFP_NSP_APP_FW_LOAD_DEFAULT, + NFP_NSP_APP_FW_LOAD_PREF, &policy); + if (err) + return err; + fw = nfp_net_fw_find(pdev, pf); - if (!fw) { - if (nfp_nsp_has_stored_fw_load(nsp)) - nfp_nsp_load_stored_fw(nsp); - return 0; + do_reset = reset == NFP_NSP_DRV_RESET_ALWAYS || + (fw && reset == NFP_NSP_DRV_RESET_DISK); + + if (do_reset) { + dev_info(&pdev->dev, "Soft-resetting the NFP\n"); + err = nfp_nsp_device_soft_reset(nsp); + if (err < 0) { + dev_err(&pdev->dev, + "Failed to soft reset the NFP: %d\n", err); + goto exit_release_fw; + } } - dev_info(&pdev->dev, "Soft-reset, loading FW image\n"); - err = nfp_nsp_device_soft_reset(nsp); - if (err < 0) { - dev_err(&pdev->dev, "Failed to soft reset the NFP: %d\n", - err); - goto exit_release_fw; - } + if (fw && policy != NFP_NSP_APP_FW_LOAD_FLASH) { + if (nfp_nsp_has_fw_loaded(nsp) && nfp_nsp_fw_loaded(nsp)) + goto exit_release_fw; - err = nfp_nsp_load_fw(nsp, fw); - if (err < 0) { - dev_err(&pdev->dev, "FW loading failed: %d\n", err); - goto exit_release_fw; + err = nfp_nsp_load_fw(nsp, fw); + if (err < 0) { + dev_err(&pdev->dev, "FW loading failed: %d\n", + err); + goto exit_release_fw; + } + dev_info(&pdev->dev, "Finished loading FW image\n"); + fw_loaded = true; + } else if (policy != NFP_NSP_APP_FW_LOAD_DISK && + nfp_nsp_has_stored_fw_load(nsp)) { + + /* Don't propagate this error to stick with legacy driver + * behavior, failure will be detected later during init. + */ + if (!nfp_nsp_load_stored_fw(nsp)) + dev_info(&pdev->dev, "Finished loading stored FW image\n"); + + /* Don't flag the fw_loaded in this case since other devices + * may reuse the firmware when configured this way + */ + } else { + dev_warn(&pdev->dev, "Didn't load firmware, please update flash or reconfigure card\n"); } - dev_info(&pdev->dev, "Finished loading FW image\n"); - exit_release_fw: release_firmware(fw); - return err < 0 ? err : 1; + /* We don't want to unload firmware when other devices may still be + * dependent on it, which could be the case if there are multiple + * devices that could load firmware. + */ + if (fw_loaded && ifcs == 1) + pf->unload_fw_on_remove = true; + + return err < 0 ? err : fw_loaded; } static void @@ -704,7 +794,7 @@ err_net_remove: err_fw_unload: kfree(pf->rtbl); nfp_mip_close(pf->mip); - if (pf->fw_loaded) + if (pf->unload_fw_on_remove) nfp_fw_unload(pf); kfree(pf->eth_tbl); kfree(pf->nspi); @@ -744,7 +834,7 @@ static void __nfp_pci_shutdown(struct pci_dev *pdev, bool unload_fw) vfree(pf->dumpspec); kfree(pf->rtbl); nfp_mip_close(pf->mip); - if (unload_fw && pf->fw_loaded) + if (unload_fw && pf->unload_fw_on_remove) nfp_fw_unload(pf); destroy_workqueue(pf->wq); @@ -815,6 +905,8 @@ static void __exit nfp_main_exit(void) module_init(nfp_main_init); module_exit(nfp_main_exit); +MODULE_FIRMWARE("netronome/nic_AMDA0058-0011_2x40.nffw"); +MODULE_FIRMWARE("netronome/nic_AMDA0058-0012_2x40.nffw"); MODULE_FIRMWARE("netronome/nic_AMDA0081-0001_1x40.nffw"); MODULE_FIRMWARE("netronome/nic_AMDA0081-0001_4x10.nffw"); MODULE_FIRMWARE("netronome/nic_AMDA0096-0001_2x10.nffw"); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.h b/drivers/net/ethernet/netronome/nfp/nfp_main.h index b7211f200d22..5d5812fd9317 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.h @@ -64,6 +64,7 @@ struct nfp_dumpspec { * @limit_vfs: Number of VFs supported by firmware (~0 for PCI limit) * @num_vfs: Number of SR-IOV VFs enabled * @fw_loaded: Is the firmware loaded? + * @unload_fw_on_remove:Do we need to unload firmware on driver removal? * @ctrl_vnic: Pointer to the control vNIC if available * @mip: MIP handle * @rtbl: RTsym table @@ -110,6 +111,7 @@ struct nfp_pf { unsigned int num_vfs; bool fw_loaded; + bool unload_fw_on_remove; struct nfp_net *ctrl_vnic; @@ -185,4 +187,7 @@ int nfp_shared_buf_pool_get(struct nfp_pf *pf, unsigned int sb, u16 pool_index, int nfp_shared_buf_pool_set(struct nfp_pf *pf, unsigned int sb, u16 pool_index, u32 size, enum devlink_sb_threshold_type threshold_type); + +int nfp_devlink_params_register(struct nfp_pf *pf); +void nfp_devlink_params_unregister(struct nfp_pf *pf); #endif /* NFP_MAIN_H */ diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h index 5d6c3738b494..ff4438478ea9 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h @@ -66,7 +66,7 @@ #define NFP_NET_MAX_DMA_BITS 40 /* Default size for MTU and freelist buffer sizes */ -#define NFP_NET_DEFAULT_MTU 1500 +#define NFP_NET_DEFAULT_MTU 1500U /* Maximum number of bytes prepended to a packet */ #define NFP_NET_MAX_PREPEND 64 @@ -586,6 +586,9 @@ struct nfp_net_dp { * @ktls_conn_id_gen: Trivial generator for kTLS connection ids (for TX) * @ktls_no_space: Counter of firmware rejecting kTLS connection due to * lack of space + * @ktls_rx_resync_req: Counter of TLS RX resync requested + * @ktls_rx_resync_ign: Counter of TLS RX resync requests ignored + * @ktls_rx_resync_sent: Counter of TLS RX resync completed * @mbox_cmsg: Common Control Message via vNIC mailbox state * @mbox_cmsg.queue: CCM mbox queue of pending messages * @mbox_cmsg.wq: CCM mbox wait queue of waiting processes @@ -674,6 +677,9 @@ struct nfp_net { atomic64_t ktls_conn_id_gen; atomic_t ktls_no_space; + atomic_t ktls_rx_resync_req; + atomic_t ktls_rx_resync_ign; + atomic_t ktls_rx_resync_sent; struct { struct sk_buff_head queue; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 9903805717da..9bfb3b077bc1 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -47,6 +47,7 @@ #include "nfp_net_sriov.h" #include "nfp_port.h" #include "crypto/crypto.h" +#include "crypto/fw.h" /** * nfp_net_get_fw_version() - Read and parse the FW version @@ -872,7 +873,8 @@ nfp_net_tls_tx(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec, /* jump forward, a TX may have gotten lost, need to sync TX */ if (!resync_pending && seq - ntls->next_seq < U32_MAX / 4) - tls_offload_tx_resync_request(nskb->sk); + tls_offload_tx_resync_request(nskb->sk, seq, + ntls->next_seq); *nr_frags = 0; return nskb; @@ -975,7 +977,7 @@ static int nfp_net_prep_tx_meta(struct sk_buff *skb, u64 tls_handle) static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev) { struct nfp_net *nn = netdev_priv(netdev); - const struct skb_frag_struct *frag; + const skb_frag_t *frag; int f, nr_frags, wr_idx, md_bytes; struct nfp_net_tx_ring *tx_ring; struct nfp_net_r_vector *r_vec; @@ -1155,7 +1157,7 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring, int budget) todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p); while (todo--) { - const struct skb_frag_struct *frag; + const skb_frag_t *frag; struct nfp_net_tx_buf *tx_buf; struct sk_buff *skb; int fidx, nr_frags; @@ -1270,7 +1272,7 @@ static bool nfp_net_xdp_complete(struct nfp_net_tx_ring *tx_ring) static void nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring) { - const struct skb_frag_struct *frag; + const skb_frag_t *frag; struct netdev_queue *nd_q; while (!tx_ring->is_xdp && tx_ring->rd_p != tx_ring->wr_p) { @@ -1320,17 +1322,11 @@ nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring) netdev_tx_reset_queue(nd_q); } -static void nfp_net_tx_timeout(struct net_device *netdev) +static void nfp_net_tx_timeout(struct net_device *netdev, unsigned int txqueue) { struct nfp_net *nn = netdev_priv(netdev); - int i; - for (i = 0; i < nn->dp.netdev->real_num_tx_queues; i++) { - if (!netif_tx_queue_stopped(netdev_get_tx_queue(netdev, i))) - continue; - nn_warn(nn, "TX timeout on ring: %d\n", i); - } - nn_warn(nn, "TX watchdog timeout\n"); + nn_warn(nn, "TX watchdog timeout on ring: %u\n", txqueue); } /* Receive processing @@ -1666,9 +1662,9 @@ nfp_net_set_hash_desc(struct net_device *netdev, struct nfp_meta_parsed *meta, &rx_hash->hash); } -static void * +static bool nfp_net_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta, - void *data, int meta_len) + void *data, void *pkt, unsigned int pkt_len, int meta_len) { u32 meta_info; @@ -1698,14 +1694,20 @@ nfp_net_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta, (__force __wsum)__get_unaligned_cpu32(data); data += 4; break; + case NFP_NET_META_RESYNC_INFO: + if (nfp_net_tls_rx_resync_req(netdev, data, pkt, + pkt_len)) + return NULL; + data += sizeof(struct nfp_net_tls_resync_req); + break; default: - return NULL; + return true; } meta_info >>= NFP_NET_META_FIELD_SIZE; } - return data; + return data != pkt; } static void @@ -1890,12 +1892,10 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) nfp_net_set_hash_desc(dp->netdev, &meta, rxbuf->frag + meta_off, rxd); } else if (meta_len) { - void *end; - - end = nfp_net_parse_meta(dp->netdev, &meta, - rxbuf->frag + meta_off, - meta_len); - if (unlikely(end != rxbuf->frag + pkt_off)) { + if (unlikely(nfp_net_parse_meta(dp->netdev, &meta, + rxbuf->frag + meta_off, + rxbuf->frag + pkt_off, + pkt_len, meta_len))) { nn_dp_warn(dp, "invalid RX packet metadata\n"); nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL); @@ -4116,14 +4116,7 @@ int nfp_net_init(struct nfp_net *nn) /* Set default MTU and Freelist buffer size */ if (!nfp_net_is_data_vnic(nn) && nn->app->ctrl_mtu) { - if (nn->app->ctrl_mtu <= nn->max_mtu) { - nn->dp.mtu = nn->app->ctrl_mtu; - } else { - if (nn->app->ctrl_mtu != NFP_APP_CTRL_MTU_MAX) - nn_warn(nn, "app requested MTU above max supported %u > %u\n", - nn->app->ctrl_mtu, nn->max_mtu); - nn->dp.mtu = nn->max_mtu; - } + nn->dp.mtu = min(nn->app->ctrl_mtu, nn->max_mtu); } else if (nn->max_mtu < NFP_NET_DEFAULT_MTU) { nn->dp.mtu = nn->max_mtu; } else { diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c index d835c14b7257..c3a763134e79 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c @@ -17,6 +17,30 @@ static void nfp_net_tlv_caps_reset(struct nfp_net_tlv_caps *caps) caps->mbox_len = NFP_NET_CFG_MBOX_VAL_MAX_SZ; } +static bool +nfp_net_tls_parse_crypto_ops(struct device *dev, struct nfp_net_tlv_caps *caps, + u8 __iomem *ctrl_mem, u8 __iomem *data, + unsigned int length, unsigned int offset, + bool rx_stream_scan) +{ + /* Ignore the legacy TLV if new one was already parsed */ + if (caps->tls_resync_ss && !rx_stream_scan) + return true; + + if (length < 32) { + dev_err(dev, + "CRYPTO OPS TLV should be at least 32B, is %dB offset:%u\n", + length, offset); + return false; + } + + caps->crypto_ops = readl(data); + caps->crypto_enable_off = data - ctrl_mem + 16; + caps->tls_resync_ss = rx_stream_scan; + + return true; +} + int nfp_net_tlv_caps_parse(struct device *dev, u8 __iomem *ctrl_mem, struct nfp_net_tlv_caps *caps) { @@ -104,15 +128,25 @@ int nfp_net_tlv_caps_parse(struct device *dev, u8 __iomem *ctrl_mem, caps->mbox_cmsg_types = readl(data); break; case NFP_NET_CFG_TLV_TYPE_CRYPTO_OPS: - if (length < 32) { - dev_err(dev, - "CRYPTO OPS TLV should be at least 32B, is %dB offset:%u\n", - length, offset); + if (!nfp_net_tls_parse_crypto_ops(dev, caps, ctrl_mem, + data, length, offset, + false)) return -EINVAL; + break; + case NFP_NET_CFG_TLV_TYPE_VNIC_STATS: + if ((data - ctrl_mem) % 8) { + dev_warn(dev, "VNIC STATS TLV misaligned, ignoring offset:%u len:%u\n", + offset, length); + break; } - - caps->crypto_ops = readl(data); - caps->crypto_enable_off = data - ctrl_mem + 16; + caps->vnic_stats_off = data - ctrl_mem; + caps->vnic_stats_cnt = length / 10; + break; + case NFP_NET_CFG_TLV_TYPE_CRYPTO_OPS_RX_SCAN: + if (!nfp_net_tls_parse_crypto_ops(dev, caps, ctrl_mem, + data, length, offset, + true)) + return -EINVAL; break; default: if (!FIELD_GET(NFP_NET_CFG_TLV_HEADER_REQUIRED, hdr)) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h index ee6b24e4eacd..3d61a8cb60b0 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h @@ -45,6 +45,7 @@ #define NFP_NET_META_PORTID 5 #define NFP_NET_META_CSUM 6 /* checksum complete type */ #define NFP_NET_META_CONN_HANDLE 7 +#define NFP_NET_META_RESYNC_INFO 8 /* RX resync info request */ #define NFP_META_PORT_ID_CTRL ~0U @@ -479,6 +480,22 @@ * 8 words, bitmaps of supported and enabled crypto operations. * First 16B (4 words) contains a bitmap of supported crypto operations, * and next 16B contain the enabled operations. + * This capability is made obsolete by ones with better sync methods. + * + * %NFP_NET_CFG_TLV_TYPE_VNIC_STATS: + * Variable, per-vNIC statistics, data should be 8B aligned (FW should insert + * zero-length RESERVED TLV to pad). + * TLV data has two sections. First is an array of statistics' IDs (2B each). + * Second 8B statistics themselves. Statistics are 8B aligned, meaning there + * may be a padding between sections. + * Number of statistics can be determined as floor(tlv.length / (2 + 8)). + * This TLV overwrites %NFP_NET_CFG_STATS_* values (statistics in this TLV + * duplicate the old ones, so driver should be careful not to unnecessarily + * render both). + * + * %NFP_NET_CFG_TLV_TYPE_CRYPTO_OPS_RX_SCAN: + * Same as %NFP_NET_CFG_TLV_TYPE_CRYPTO_OPS, but crypto TLS does stream scan + * RX sync, rather than kernel-assisted sync. */ #define NFP_NET_CFG_TLV_TYPE_UNKNOWN 0 #define NFP_NET_CFG_TLV_TYPE_RESERVED 1 @@ -490,6 +507,8 @@ #define NFP_NET_CFG_TLV_TYPE_REPR_CAP 7 #define NFP_NET_CFG_TLV_TYPE_MBOX_CMSG_TYPES 10 #define NFP_NET_CFG_TLV_TYPE_CRYPTO_OPS 11 /* see crypto/fw.h */ +#define NFP_NET_CFG_TLV_TYPE_VNIC_STATS 12 +#define NFP_NET_CFG_TLV_TYPE_CRYPTO_OPS_RX_SCAN 13 struct device; @@ -502,6 +521,9 @@ struct device; * @mbox_cmsg_types: cmsgs which can be passed through the mailbox * @crypto_ops: supported crypto operations * @crypto_enable_off: offset of crypto ops enable region + * @vnic_stats_off: offset of vNIC stats area + * @vnic_stats_cnt: number of vNIC stats + * @tls_resync_ss: TLS resync will be performed via stream scan */ struct nfp_net_tlv_caps { u32 me_freq_mhz; @@ -511,6 +533,9 @@ struct nfp_net_tlv_caps { u32 mbox_cmsg_types; u32 crypto_ops; unsigned int crypto_enable_off; + unsigned int vnic_stats_off; + unsigned int vnic_stats_cnt; + unsigned int tls_resync_ss:1; }; int nfp_net_tlv_caps_parse(struct device *dev, u8 __iomem *ctrl_mem, diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c index ab7f2498e1c4..553c708694e8 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c @@ -159,19 +159,13 @@ void nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir) else strcpy(name, "ctrl-vnic"); nn->debugfs_dir = debugfs_create_dir(name, ddir); - if (IS_ERR_OR_NULL(nn->debugfs_dir)) - return; /* Create queue debugging sub-tree */ queues = debugfs_create_dir("queue", nn->debugfs_dir); - if (IS_ERR_OR_NULL(queues)) - return; rx = debugfs_create_dir("rx", queues); tx = debugfs_create_dir("tx", queues); xdp = debugfs_create_dir("xdp", queues); - if (IS_ERR_OR_NULL(rx) || IS_ERR_OR_NULL(tx) || IS_ERR_OR_NULL(xdp)) - return; for (i = 0; i < min(nn->max_rx_rings, nn->max_r_vecs); i++) { sprintf(name, "%d", i); @@ -190,16 +184,7 @@ void nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir) struct dentry *nfp_net_debugfs_device_add(struct pci_dev *pdev) { - struct dentry *dev_dir; - - if (IS_ERR_OR_NULL(nfp_dir)) - return NULL; - - dev_dir = debugfs_create_dir(pci_name(pdev), nfp_dir); - if (IS_ERR_OR_NULL(dev_dir)) - return NULL; - - return dev_dir; + return debugfs_create_dir(pci_name(pdev), nfp_dir); } void nfp_net_debugfs_dir_clean(struct dentry **dir) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c index 1b840ee47339..d648e32c0520 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c @@ -148,11 +148,33 @@ static const struct nfp_et_stat nfp_mac_et_stats[] = { { "tx_pause_frames_class7", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS7, }, }; +static const char nfp_tlv_stat_names[][ETH_GSTRING_LEN] = { + [1] = "dev_rx_discards", + [2] = "dev_rx_errors", + [3] = "dev_rx_bytes", + [4] = "dev_rx_uc_bytes", + [5] = "dev_rx_mc_bytes", + [6] = "dev_rx_bc_bytes", + [7] = "dev_rx_pkts", + [8] = "dev_rx_mc_pkts", + [9] = "dev_rx_bc_pkts", + + [10] = "dev_tx_discards", + [11] = "dev_tx_errors", + [12] = "dev_tx_bytes", + [13] = "dev_tx_uc_bytes", + [14] = "dev_tx_mc_bytes", + [15] = "dev_tx_bc_bytes", + [16] = "dev_tx_pkts", + [17] = "dev_tx_mc_pkts", + [18] = "dev_tx_bc_pkts", +}; + #define NN_ET_GLOBAL_STATS_LEN ARRAY_SIZE(nfp_net_et_stats) #define NN_ET_SWITCH_STATS_LEN 9 #define NN_RVEC_GATHER_STATS 13 #define NN_RVEC_PER_Q_STATS 3 -#define NN_CTRL_PATH_STATS 1 +#define NN_CTRL_PATH_STATS 4 #define SFP_SFF_REV_COMPLIANCE 1 @@ -454,6 +476,9 @@ static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data) data = nfp_pr_et(data, "tx_tls_drop_no_sync_data"); data = nfp_pr_et(data, "hw_tls_no_space"); + data = nfp_pr_et(data, "rx_tls_resync_req_ok"); + data = nfp_pr_et(data, "rx_tls_resync_req_ign"); + data = nfp_pr_et(data, "rx_tls_resync_sent"); return data; } @@ -502,6 +527,9 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data) *data++ = gathered_stats[j]; *data++ = atomic_read(&nn->ktls_no_space); + *data++ = atomic_read(&nn->ktls_rx_resync_req); + *data++ = atomic_read(&nn->ktls_rx_resync_ign); + *data++ = atomic_read(&nn->ktls_rx_resync_sent); return data; } @@ -560,6 +588,65 @@ nfp_vnic_get_hw_stats(u64 *data, u8 __iomem *mem, unsigned int num_vecs) return data; } +static unsigned int nfp_vnic_get_tlv_stats_count(struct nfp_net *nn) +{ + return nn->tlv_caps.vnic_stats_cnt + nn->max_r_vecs * 4; +} + +static u8 *nfp_vnic_get_tlv_stats_strings(struct nfp_net *nn, u8 *data) +{ + unsigned int i, id; + u8 __iomem *mem; + u64 id_word = 0; + + mem = nn->dp.ctrl_bar + nn->tlv_caps.vnic_stats_off; + for (i = 0; i < nn->tlv_caps.vnic_stats_cnt; i++) { + if (!(i % 4)) + id_word = readq(mem + i * 2); + + id = (u16)id_word; + id_word >>= 16; + + if (id < ARRAY_SIZE(nfp_tlv_stat_names) && + nfp_tlv_stat_names[id][0]) { + memcpy(data, nfp_tlv_stat_names[id], ETH_GSTRING_LEN); + data += ETH_GSTRING_LEN; + } else { + data = nfp_pr_et(data, "dev_unknown_stat%u", id); + } + } + + for (i = 0; i < nn->max_r_vecs; i++) { + data = nfp_pr_et(data, "rxq_%u_pkts", i); + data = nfp_pr_et(data, "rxq_%u_bytes", i); + data = nfp_pr_et(data, "txq_%u_pkts", i); + data = nfp_pr_et(data, "txq_%u_bytes", i); + } + + return data; +} + +static u64 *nfp_vnic_get_tlv_stats(struct nfp_net *nn, u64 *data) +{ + u8 __iomem *mem; + unsigned int i; + + mem = nn->dp.ctrl_bar + nn->tlv_caps.vnic_stats_off; + mem += roundup(2 * nn->tlv_caps.vnic_stats_cnt, 8); + for (i = 0; i < nn->tlv_caps.vnic_stats_cnt; i++) + *data++ = readq(mem + i * 8); + + mem = nn->dp.ctrl_bar; + for (i = 0; i < nn->max_r_vecs; i++) { + *data++ = readq(mem + NFP_NET_CFG_RXR_STATS(i)); + *data++ = readq(mem + NFP_NET_CFG_RXR_STATS(i) + 8); + *data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i)); + *data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i) + 8); + } + + return data; +} + static unsigned int nfp_mac_get_stats_count(struct net_device *netdev) { struct nfp_port *port; @@ -609,8 +696,12 @@ static void nfp_net_get_strings(struct net_device *netdev, switch (stringset) { case ETH_SS_STATS: data = nfp_vnic_get_sw_stats_strings(netdev, data); - data = nfp_vnic_get_hw_stats_strings(data, nn->max_r_vecs, - false); + if (!nn->tlv_caps.vnic_stats_off) + data = nfp_vnic_get_hw_stats_strings(data, + nn->max_r_vecs, + false); + else + data = nfp_vnic_get_tlv_stats_strings(nn, data); data = nfp_mac_get_stats_strings(netdev, data); data = nfp_app_port_get_stats_strings(nn->port, data); break; @@ -624,7 +715,11 @@ nfp_net_get_stats(struct net_device *netdev, struct ethtool_stats *stats, struct nfp_net *nn = netdev_priv(netdev); data = nfp_vnic_get_sw_stats(netdev, data); - data = nfp_vnic_get_hw_stats(data, nn->dp.ctrl_bar, nn->max_r_vecs); + if (!nn->tlv_caps.vnic_stats_off) + data = nfp_vnic_get_hw_stats(data, nn->dp.ctrl_bar, + nn->max_r_vecs); + else + data = nfp_vnic_get_tlv_stats(nn, data); data = nfp_mac_get_stats(netdev, data); data = nfp_app_port_get_stats(nn->port, data); } @@ -632,13 +727,18 @@ nfp_net_get_stats(struct net_device *netdev, struct ethtool_stats *stats, static int nfp_net_get_sset_count(struct net_device *netdev, int sset) { struct nfp_net *nn = netdev_priv(netdev); + unsigned int cnt; switch (sset) { case ETH_SS_STATS: - return nfp_vnic_get_sw_stats_count(netdev) + - nfp_vnic_get_hw_stats_count(nn->max_r_vecs) + - nfp_mac_get_stats_count(netdev) + - nfp_app_port_get_stats_count(nn->port); + cnt = nfp_vnic_get_sw_stats_count(netdev); + if (!nn->tlv_caps.vnic_stats_off) + cnt += nfp_vnic_get_hw_stats_count(nn->max_r_vecs); + else + cnt += nfp_vnic_get_tlv_stats_count(nn); + cnt += nfp_mac_get_stats_count(netdev); + cnt += nfp_app_port_get_stats_count(nn->port); + return cnt; default: return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c index 986464d4a206..921db40047d7 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c @@ -205,10 +205,8 @@ nfp_net_pf_alloc_vnics(struct nfp_pf *pf, void __iomem *ctrl_bar, ctrl_bar += NFP_PF_CSR_SLICE_SIZE; /* Kill the vNIC if app init marked it as invalid */ - if (nn->port && nn->port->type == NFP_PORT_INVALID) { + if (nn->port && nn->port->type == NFP_PORT_INVALID) nfp_net_pf_free_vnic(pf, nn); - continue; - } } if (list_empty(&pf->vnics)) @@ -711,6 +709,10 @@ int nfp_net_pci_probe(struct nfp_pf *pf) if (err) goto err_devlink_unreg; + err = nfp_devlink_params_register(pf); + if (err) + goto err_shared_buf_unreg; + mutex_lock(&pf->lock); pf->ddir = nfp_net_debugfs_device_add(pf->pdev); @@ -744,6 +746,8 @@ err_free_vnics: err_clean_ddir: nfp_net_debugfs_dir_clean(&pf->ddir); mutex_unlock(&pf->lock); + nfp_devlink_params_unregister(pf); +err_shared_buf_unreg: nfp_shared_buf_unregister(pf); err_devlink_unreg: cancel_work_sync(&pf->port_refresh_work); @@ -773,6 +777,7 @@ void nfp_net_pci_remove(struct nfp_pf *pf) mutex_unlock(&pf->lock); + nfp_devlink_params_unregister(pf); nfp_shared_buf_unregister(pf); devlink_unregister(priv_to_devlink(pf)); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c index 1eef446036d6..79d72c88bbef 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c @@ -299,22 +299,6 @@ static void nfp_repr_clean(struct nfp_repr *repr) nfp_port_free(repr->port); } -static struct lock_class_key nfp_repr_netdev_xmit_lock_key; -static struct lock_class_key nfp_repr_netdev_addr_lock_key; - -static void nfp_repr_set_lockdep_class_one(struct net_device *dev, - struct netdev_queue *txq, - void *_unused) -{ - lockdep_set_class(&txq->_xmit_lock, &nfp_repr_netdev_xmit_lock_key); -} - -static void nfp_repr_set_lockdep_class(struct net_device *dev) -{ - lockdep_set_class(&dev->addr_list_lock, &nfp_repr_netdev_addr_lock_key); - netdev_for_each_tx_queue(dev, nfp_repr_set_lockdep_class_one, NULL); -} - int nfp_repr_init(struct nfp_app *app, struct net_device *netdev, u32 cmsg_port_id, struct nfp_port *port, struct net_device *pf_netdev) @@ -324,8 +308,6 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev, u32 repr_cap = nn->tlv_caps.repr_cap; int err; - nfp_repr_set_lockdep_class(netdev); - repr->port = port; repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX, GFP_KERNEL); if (!repr->dst) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c index e4977cdf7678..c0e2f4394aef 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c @@ -106,7 +106,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev, * first NFP_NET_CFG_BAR_SZ of the BAR. This keeps the code * the identical for PF and VF drivers. */ - ctrl_bar = ioremap_nocache(pci_resource_start(pdev, NFP_NET_CTRL_BAR), + ctrl_bar = ioremap(pci_resource_start(pdev, NFP_NET_CTRL_BAR), NFP_NET_CFG_BAR_SZ); if (!ctrl_bar) { dev_err(&pdev->dev, @@ -200,7 +200,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev, bar_sz = (rx_bar_off + rx_bar_sz) - bar_off; map_addr = pci_resource_start(pdev, tx_bar_no) + bar_off; - vf->q_bar = ioremap_nocache(map_addr, bar_sz); + vf->q_bar = ioremap(map_addr, bar_sz); if (!vf->q_bar) { nn_err(nn, "Failed to map resource %d\n", tx_bar_no); err = -EIO; @@ -216,7 +216,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev, /* TX queues */ map_addr = pci_resource_start(pdev, tx_bar_no) + tx_bar_off; - nn->tx_bar = ioremap_nocache(map_addr, tx_bar_sz); + nn->tx_bar = ioremap(map_addr, tx_bar_sz); if (!nn->tx_bar) { nn_err(nn, "Failed to map resource %d\n", tx_bar_no); err = -EIO; @@ -225,7 +225,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev, /* RX queues */ map_addr = pci_resource_start(pdev, rx_bar_no) + rx_bar_off; - nn->rx_bar = ioremap_nocache(map_addr, rx_bar_sz); + nn->rx_bar = ioremap(map_addr, rx_bar_sz); if (!nn->rx_bar) { nn_err(nn, "Failed to map resource %d\n", rx_bar_no); err = -EIO; diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/Makefile b/drivers/net/ethernet/netronome/nfp/nfpcore/Makefile deleted file mode 100644 index 805fa28f391a..000000000000 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/Makefile +++ /dev/null @@ -1,2 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -# kbuild requires Makefile in a directory to build individual objects diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/Makefile b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/Makefile deleted file mode 100644 index 805fa28f391a..000000000000 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/Makefile +++ /dev/null @@ -1,2 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -# kbuild requires Makefile in a directory to build individual objects diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c index 85d46f206b3c..b454db283aef 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c @@ -611,7 +611,7 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface) /* Configure, and lock, BAR0.0 for General Target use (MSI-X SRAM) */ bar = &nfp->bar[0]; if (nfp_bar_resource_len(bar) >= NFP_PCI_MIN_MAP_SIZE) - bar->iomem = ioremap_nocache(nfp_bar_resource_start(bar), + bar->iomem = ioremap(nfp_bar_resource_start(bar), nfp_bar_resource_len(bar)); if (bar->iomem) { int pf; @@ -677,7 +677,7 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface) } bar = &nfp->bar[4 + i]; - bar->iomem = ioremap_nocache(nfp_bar_resource_start(bar), + bar->iomem = ioremap(nfp_bar_resource_start(bar), nfp_bar_resource_len(bar)); if (bar->iomem) { msg += snprintf(msg, end - msg, @@ -858,7 +858,7 @@ static int nfp6000_area_acquire(struct nfp_cpp_area *area) priv->iomem = priv->bar->iomem + priv->bar_offset; else /* Must have been too big. Sub-allocate. */ - priv->iomem = ioremap_nocache(priv->phys, priv->size); + priv->iomem = ioremap(priv->phys, priv->size); if (IS_ERR_OR_NULL(priv->iomem)) { dev_err(nfp->dev, "Can't ioremap() a %d byte region of BAR %d\n", diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c index 3cfecf105bde..85734c6badf5 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c @@ -24,8 +24,9 @@ /* NFP6000 PL */ #define NFP_PL_DEVICE_ID 0x00000004 #define NFP_PL_DEVICE_ID_MASK GENMASK(7, 0) - -#define NFP6000_ARM_GCSR_SOFTMODEL0 0x00400144 +#define NFP_PL_DEVICE_PART_MASK GENMASK(31, 16) +#define NFP_PL_DEVICE_MODEL_MASK (NFP_PL_DEVICE_PART_MASK | \ + NFP_PL_DEVICE_ID_MASK) /** * nfp_cpp_readl() - Read a u32 word from a CPP location @@ -120,22 +121,17 @@ int nfp_cpp_writeq(struct nfp_cpp *cpp, u32 cpp_id, */ int nfp_cpp_model_autodetect(struct nfp_cpp *cpp, u32 *model) { - const u32 arm_id = NFP_CPP_ID(NFP_CPP_TARGET_ARM, 0, 0); u32 reg; int err; - err = nfp_cpp_readl(cpp, arm_id, NFP6000_ARM_GCSR_SOFTMODEL0, model); - if (err < 0) - return err; - - /* The PL's PluDeviceID revision code is authoratative */ - *model &= ~0xff; err = nfp_xpb_readl(cpp, NFP_XPB_DEVICE(1, 1, 16) + NFP_PL_DEVICE_ID, ®); if (err < 0) return err; - *model |= (NFP_PL_DEVICE_ID_MASK & reg) - 0x10; + *model = reg & NFP_PL_DEVICE_MODEL_MASK; + if (*model & NFP_PL_DEVICE_ID_MASK) + *model -= 0x10; return 0; } diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c index 9a08623c325d..f18e787fa9ad 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c @@ -96,6 +96,8 @@ enum nfp_nsp_cmd { SPCODE_NSP_IDENTIFY = 13, /* Read NSP version */ SPCODE_FW_STORED = 16, /* If no FW loaded, load flash app FW */ SPCODE_HWINFO_LOOKUP = 17, /* Lookup HWinfo with overwrites etc. */ + SPCODE_HWINFO_SET = 18, /* Set HWinfo entry */ + SPCODE_FW_LOADED = 19, /* Is application firmware loaded */ SPCODE_VERSIONS = 21, /* Report FW versions */ SPCODE_READ_SFF_EEPROM = 22, /* Read module EEPROM */ }; @@ -143,6 +145,8 @@ struct nfp_nsp { * @option: NFP SP Command Argument * @buf: NFP SP Buffer Address * @error_cb: Callback for interpreting option if error occurred + * @error_quiet:Don't print command error/warning. Protocol errors are still + * logged. */ struct nfp_nsp_command_arg { u16 code; @@ -151,6 +155,7 @@ struct nfp_nsp_command_arg { u32 option; u64 buf; void (*error_cb)(struct nfp_nsp *state, u32 ret_val); + bool error_quiet; }; /** @@ -405,8 +410,10 @@ __nfp_nsp_command(struct nfp_nsp *state, const struct nfp_nsp_command_arg *arg) err = FIELD_GET(NSP_STATUS_RESULT, reg); if (err) { - nfp_warn(cpp, "Result (error) code set: %d (%d) command: %d\n", - -err, (int)ret_val, arg->code); + if (!arg->error_quiet) + nfp_warn(cpp, "Result (error) code set: %d (%d) command: %d\n", + -err, (int)ret_val, arg->code); + if (arg->error_cb) arg->error_cb(state, ret_val); else @@ -891,12 +898,14 @@ int nfp_nsp_load_stored_fw(struct nfp_nsp *state) } static int -__nfp_nsp_hwinfo_lookup(struct nfp_nsp *state, void *buf, unsigned int size) +__nfp_nsp_hwinfo_lookup(struct nfp_nsp *state, void *buf, unsigned int size, + bool optional) { struct nfp_nsp_command_buf_arg hwinfo_lookup = { { .code = SPCODE_HWINFO_LOOKUP, .option = size, + .error_quiet = optional, }, .in_buf = buf, .in_size = size, @@ -913,7 +922,7 @@ int nfp_nsp_hwinfo_lookup(struct nfp_nsp *state, void *buf, unsigned int size) size = min_t(u32, size, NFP_HWINFO_LOOKUP_SIZE); - err = __nfp_nsp_hwinfo_lookup(state, buf, size); + err = __nfp_nsp_hwinfo_lookup(state, buf, size, false); if (err) return err; @@ -925,6 +934,66 @@ int nfp_nsp_hwinfo_lookup(struct nfp_nsp *state, void *buf, unsigned int size) return 0; } +int nfp_nsp_hwinfo_lookup_optional(struct nfp_nsp *state, void *buf, + unsigned int size, const char *default_val) +{ + int err; + + /* Ensure that the default value is usable irrespective of whether + * it is actually going to be used. + */ + if (strnlen(default_val, size) == size) + return -EINVAL; + + if (!nfp_nsp_has_hwinfo_lookup(state)) { + strcpy(buf, default_val); + return 0; + } + + size = min_t(u32, size, NFP_HWINFO_LOOKUP_SIZE); + + err = __nfp_nsp_hwinfo_lookup(state, buf, size, true); + if (err) { + if (err == -ENOENT) { + strcpy(buf, default_val); + return 0; + } + + nfp_err(state->cpp, "NSP HWinfo lookup failed: %d\n", err); + return err; + } + + if (strnlen(buf, size) == size) { + nfp_err(state->cpp, "NSP HWinfo value not NULL-terminated\n"); + return -EINVAL; + } + + return 0; +} + +int nfp_nsp_hwinfo_set(struct nfp_nsp *state, void *buf, unsigned int size) +{ + struct nfp_nsp_command_buf_arg hwinfo_set = { + { + .code = SPCODE_HWINFO_SET, + .option = size, + }, + .in_buf = buf, + .in_size = size, + }; + + return nfp_nsp_command_buf(state, &hwinfo_set); +} + +int nfp_nsp_fw_loaded(struct nfp_nsp *state) +{ + const struct nfp_nsp_command_arg arg = { + .code = SPCODE_FW_LOADED, + }; + + return __nfp_nsp_command(state, &arg); +} + int nfp_nsp_versions(struct nfp_nsp *state, void *buf, unsigned int size) { struct nfp_nsp_command_buf_arg versions = { diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h index 22ee6985ee1c..1531c1870020 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h @@ -22,6 +22,10 @@ int nfp_nsp_write_flash(struct nfp_nsp *state, const struct firmware *fw); int nfp_nsp_mac_reinit(struct nfp_nsp *state); int nfp_nsp_load_stored_fw(struct nfp_nsp *state); int nfp_nsp_hwinfo_lookup(struct nfp_nsp *state, void *buf, unsigned int size); +int nfp_nsp_hwinfo_lookup_optional(struct nfp_nsp *state, void *buf, + unsigned int size, const char *default_val); +int nfp_nsp_hwinfo_set(struct nfp_nsp *state, void *buf, unsigned int size); +int nfp_nsp_fw_loaded(struct nfp_nsp *state); int nfp_nsp_read_module_eeprom(struct nfp_nsp *state, int eth_index, unsigned int offset, void *data, unsigned int len, unsigned int *read_len); @@ -41,6 +45,16 @@ static inline bool nfp_nsp_has_hwinfo_lookup(struct nfp_nsp *state) return nfp_nsp_get_abi_ver_minor(state) > 24; } +static inline bool nfp_nsp_has_hwinfo_set(struct nfp_nsp *state) +{ + return nfp_nsp_get_abi_ver_minor(state) > 25; +} + +static inline bool nfp_nsp_has_fw_loaded(struct nfp_nsp *state) +{ + return nfp_nsp_get_abi_ver_minor(state) > 25; +} + static inline bool nfp_nsp_has_versions(struct nfp_nsp *state) { return nfp_nsp_get_abi_ver_minor(state) > 27; @@ -88,6 +102,21 @@ enum nfp_eth_fec { #define NFP_FEC_REED_SOLOMON BIT(NFP_FEC_REED_SOLOMON_BIT) #define NFP_FEC_DISABLED BIT(NFP_FEC_DISABLED_BIT) +/* Defines the valid values of the 'abi_drv_reset' hwinfo key */ +#define NFP_NSP_DRV_RESET_DISK 0 +#define NFP_NSP_DRV_RESET_ALWAYS 1 +#define NFP_NSP_DRV_RESET_NEVER 2 +#define NFP_NSP_DRV_RESET_DEFAULT "0" + +/* Defines the valid values of the 'app_fw_from_flash' hwinfo key */ +#define NFP_NSP_APP_FW_LOAD_DISK 0 +#define NFP_NSP_APP_FW_LOAD_FLASH 1 +#define NFP_NSP_APP_FW_LOAD_PREF 2 +#define NFP_NSP_APP_FW_LOAD_DEFAULT "2" + +/* Define the default value for the 'abi_drv_load_ifc' key */ +#define NFP_NSP_DRV_LOAD_IFC_DEFAULT "0x10ff" + /** * struct nfp_eth_table - ETH table information * @count: number of table entries diff --git a/drivers/net/ethernet/netronome/nfp/nic/Makefile b/drivers/net/ethernet/netronome/nfp/nic/Makefile deleted file mode 100644 index 805fa28f391a..000000000000 --- a/drivers/net/ethernet/netronome/nfp/nic/Makefile +++ /dev/null @@ -1,2 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -# kbuild requires Makefile in a directory to build individual objects |