diff options
161 files changed, 2139 insertions, 539 deletions
diff --git a/Documentation/ABI/testing/sysfs-class-net-qmi b/Documentation/ABI/testing/sysfs-class-net-qmi index 7122d6264c49..c310db4ccbc2 100644 --- a/Documentation/ABI/testing/sysfs-class-net-qmi +++ b/Documentation/ABI/testing/sysfs-class-net-qmi @@ -29,7 +29,7 @@ Contact: Bjørn Mork <bjorn@mork.no> Description: Unsigned integer. - Write a number ranging from 1 to 127 to add a qmap mux + Write a number ranging from 1 to 254 to add a qmap mux based network device, supported by recent Qualcomm based modems. @@ -46,5 +46,5 @@ Contact: Bjørn Mork <bjorn@mork.no> Description: Unsigned integer. - Write a number ranging from 1 to 127 to delete a previously + Write a number ranging from 1 to 254 to delete a previously created qmap mux based network device. diff --git a/Documentation/devicetree/bindings/net/can/microchip,mcp251x.txt b/Documentation/devicetree/bindings/net/can/microchip,mcp251x.txt index 188c8bd4eb67..5a0111d4de58 100644 --- a/Documentation/devicetree/bindings/net/can/microchip,mcp251x.txt +++ b/Documentation/devicetree/bindings/net/can/microchip,mcp251x.txt @@ -4,6 +4,7 @@ Required properties: - compatible: Should be one of the following: - "microchip,mcp2510" for MCP2510. - "microchip,mcp2515" for MCP2515. + - "microchip,mcp25625" for MCP25625. - reg: SPI chip select. - clocks: The clock feeding the CAN controller. - interrupts: Should contain IRQ line for the CAN controller. diff --git a/Documentation/networking/af_xdp.rst b/Documentation/networking/af_xdp.rst index e14d7d40fc75..50bccbf68308 100644 --- a/Documentation/networking/af_xdp.rst +++ b/Documentation/networking/af_xdp.rst @@ -316,16 +316,16 @@ A: When a netdev of a physical NIC is initialized, Linux usually all the traffic, you can force the netdev to only have 1 queue, queue id 0, and then bind to queue 0. You can use ethtool to do this:: - sudo ethtool -L <interface> combined 1 + sudo ethtool -L <interface> combined 1 If you want to only see part of the traffic, you can program the NIC through ethtool to filter out your traffic to a single queue id that you can bind your XDP socket to. Here is one example in which UDP traffic to and from port 4242 are sent to queue 2:: - sudo ethtool -N <interface> rx-flow-hash udp4 fn - sudo ethtool -N <interface> flow-type udp4 src-port 4242 dst-port \ - 4242 action 2 + sudo ethtool -N <interface> rx-flow-hash udp4 fn + sudo ethtool -N <interface> flow-type udp4 src-port 4242 dst-port \ + 4242 action 2 A number of other ways are possible all up to the capabilitites of the NIC you have. diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index 14fe93049d28..22f6b8b1110a 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -255,6 +255,14 @@ tcp_base_mss - INTEGER Path MTU discovery (MTU probing). If MTU probing is enabled, this is the initial MSS used by the connection. +tcp_min_snd_mss - INTEGER + TCP SYN and SYNACK messages usually advertise an ADVMSS option, + as described in RFC 1122 and RFC 6691. + If this ADVMSS option is smaller than tcp_min_snd_mss, + it is silently capped to tcp_min_snd_mss. + + Default : 48 (at least 8 bytes of payload per segment) + tcp_congestion_control - STRING Set the congestion control algorithm to be used for new connections. The algorithm "reno" is always available, but @@ -772,6 +780,14 @@ tcp_challenge_ack_limit - INTEGER in RFC 5961 (Improving TCP's Robustness to Blind In-Window Attacks) Default: 100 +tcp_rx_skb_cache - BOOLEAN + Controls a per TCP socket cache of one skb, that might help + performance of some workloads. This might be dangerous + on systems with a lot of TCP sockets, since it increases + memory usage. + + Default: 0 (disabled) + UDP variables: udp_l3mdev_accept - BOOLEAN diff --git a/Documentation/networking/rds.txt b/Documentation/networking/rds.txt index 0235ae69af2a..f2a0147c933d 100644 --- a/Documentation/networking/rds.txt +++ b/Documentation/networking/rds.txt @@ -389,7 +389,7 @@ Multipath RDS (mprds) a common (to all paths) part, and a per-path struct rds_conn_path. All I/O workqs and reconnect threads are driven from the rds_conn_path. Transports such as TCP that are multipath capable may then set up a - TPC socket per rds_conn_path, and this is managed by the transport via + TCP socket per rds_conn_path, and this is managed by the transport via the transport privatee cp_transport_data pointer. Transports announce themselves as multipath capable by setting the diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index 493c5c943acd..2291daf39cd1 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -338,6 +338,7 @@ #define PPC_INST_MADDLD 0x10000033 #define PPC_INST_DIVWU 0x7c000396 #define PPC_INST_DIVD 0x7c0003d2 +#define PPC_INST_DIVDU 0x7c000392 #define PPC_INST_RLWINM 0x54000000 #define PPC_INST_RLWINM_DOT 0x54000001 #define PPC_INST_RLWIMI 0x50000000 diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h index 6026a7af031d..55d4377ccfae 100644 --- a/arch/powerpc/net/bpf_jit.h +++ b/arch/powerpc/net/bpf_jit.h @@ -112,7 +112,7 @@ ___PPC_RA(a) | IMM_L(i)) #define PPC_DIVWU(d, a, b) EMIT(PPC_INST_DIVWU | ___PPC_RT(d) | \ ___PPC_RA(a) | ___PPC_RB(b)) -#define PPC_DIVD(d, a, b) EMIT(PPC_INST_DIVD | ___PPC_RT(d) | \ +#define PPC_DIVDU(d, a, b) EMIT(PPC_INST_DIVDU | ___PPC_RT(d) | \ ___PPC_RA(a) | ___PPC_RB(b)) #define PPC_AND(d, a, b) EMIT(PPC_INST_AND | ___PPC_RA(d) | \ ___PPC_RS(a) | ___PPC_RB(b)) diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c index 63d05c499cac..c2ee6041f02c 100644 --- a/arch/powerpc/net/bpf_jit_comp64.c +++ b/arch/powerpc/net/bpf_jit_comp64.c @@ -395,12 +395,12 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */ case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */ if (BPF_OP(code) == BPF_MOD) { - PPC_DIVD(b2p[TMP_REG_1], dst_reg, src_reg); + PPC_DIVDU(b2p[TMP_REG_1], dst_reg, src_reg); PPC_MULD(b2p[TMP_REG_1], src_reg, b2p[TMP_REG_1]); PPC_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]); } else - PPC_DIVD(dst_reg, dst_reg, src_reg); + PPC_DIVDU(dst_reg, dst_reg, src_reg); break; case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */ case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */ @@ -428,7 +428,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, break; case BPF_ALU64: if (BPF_OP(code) == BPF_MOD) { - PPC_DIVD(b2p[TMP_REG_2], dst_reg, + PPC_DIVDU(b2p[TMP_REG_2], dst_reg, b2p[TMP_REG_1]); PPC_MULD(b2p[TMP_REG_1], b2p[TMP_REG_1], @@ -436,7 +436,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, PPC_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]); } else - PPC_DIVD(dst_reg, dst_reg, + PPC_DIVDU(dst_reg, dst_reg, b2p[TMP_REG_1]); break; } diff --git a/arch/riscv/net/bpf_jit_comp.c b/arch/riscv/net/bpf_jit_comp.c index 80b12aa5e10d..426d5c33ea90 100644 --- a/arch/riscv/net/bpf_jit_comp.c +++ b/arch/riscv/net/bpf_jit_comp.c @@ -751,22 +751,32 @@ static int emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, case BPF_ALU | BPF_ADD | BPF_X: case BPF_ALU64 | BPF_ADD | BPF_X: emit(is64 ? rv_add(rd, rd, rs) : rv_addw(rd, rd, rs), ctx); + if (!is64) + emit_zext_32(rd, ctx); break; case BPF_ALU | BPF_SUB | BPF_X: case BPF_ALU64 | BPF_SUB | BPF_X: emit(is64 ? rv_sub(rd, rd, rs) : rv_subw(rd, rd, rs), ctx); + if (!is64) + emit_zext_32(rd, ctx); break; case BPF_ALU | BPF_AND | BPF_X: case BPF_ALU64 | BPF_AND | BPF_X: emit(rv_and(rd, rd, rs), ctx); + if (!is64) + emit_zext_32(rd, ctx); break; case BPF_ALU | BPF_OR | BPF_X: case BPF_ALU64 | BPF_OR | BPF_X: emit(rv_or(rd, rd, rs), ctx); + if (!is64) + emit_zext_32(rd, ctx); break; case BPF_ALU | BPF_XOR | BPF_X: case BPF_ALU64 | BPF_XOR | BPF_X: emit(rv_xor(rd, rd, rs), ctx); + if (!is64) + emit_zext_32(rd, ctx); break; case BPF_ALU | BPF_MUL | BPF_X: case BPF_ALU64 | BPF_MUL | BPF_X: @@ -789,14 +799,20 @@ static int emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, case BPF_ALU | BPF_LSH | BPF_X: case BPF_ALU64 | BPF_LSH | BPF_X: emit(is64 ? rv_sll(rd, rd, rs) : rv_sllw(rd, rd, rs), ctx); + if (!is64) + emit_zext_32(rd, ctx); break; case BPF_ALU | BPF_RSH | BPF_X: case BPF_ALU64 | BPF_RSH | BPF_X: emit(is64 ? rv_srl(rd, rd, rs) : rv_srlw(rd, rd, rs), ctx); + if (!is64) + emit_zext_32(rd, ctx); break; case BPF_ALU | BPF_ARSH | BPF_X: case BPF_ALU64 | BPF_ARSH | BPF_X: emit(is64 ? rv_sra(rd, rd, rs) : rv_sraw(rd, rd, rs), ctx); + if (!is64) + emit_zext_32(rd, ctx); break; /* dst = -dst */ @@ -804,6 +820,8 @@ static int emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, case BPF_ALU64 | BPF_NEG: emit(is64 ? rv_sub(rd, RV_REG_ZERO, rd) : rv_subw(rd, RV_REG_ZERO, rd), ctx); + if (!is64) + emit_zext_32(rd, ctx); break; /* dst = BSWAP##imm(dst) */ @@ -958,14 +976,20 @@ out_be: case BPF_ALU | BPF_LSH | BPF_K: case BPF_ALU64 | BPF_LSH | BPF_K: emit(is64 ? rv_slli(rd, rd, imm) : rv_slliw(rd, rd, imm), ctx); + if (!is64) + emit_zext_32(rd, ctx); break; case BPF_ALU | BPF_RSH | BPF_K: case BPF_ALU64 | BPF_RSH | BPF_K: emit(is64 ? rv_srli(rd, rd, imm) : rv_srliw(rd, rd, imm), ctx); + if (!is64) + emit_zext_32(rd, ctx); break; case BPF_ALU | BPF_ARSH | BPF_K: case BPF_ALU64 | BPF_ARSH | BPF_K: emit(is64 ? rv_srai(rd, rd, imm) : rv_sraiw(rd, rd, imm), ctx); + if (!is64) + emit_zext_32(rd, ctx); break; /* JUMP off */ diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 32bfab4e21eb..eaaed5bfc4a4 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -186,9 +186,7 @@ struct jit_context { #define BPF_MAX_INSN_SIZE 128 #define BPF_INSN_SAFETY 64 -#define AUX_STACK_SPACE 40 /* Space for RBX, R13, R14, R15, tailcnt */ - -#define PROLOGUE_SIZE 37 +#define PROLOGUE_SIZE 20 /* * Emit x86-64 prologue code for BPF program and check its size. @@ -199,44 +197,19 @@ static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf) u8 *prog = *pprog; int cnt = 0; - /* push rbp */ - EMIT1(0x55); - - /* mov rbp,rsp */ - EMIT3(0x48, 0x89, 0xE5); - - /* sub rsp, rounded_stack_depth + AUX_STACK_SPACE */ - EMIT3_off32(0x48, 0x81, 0xEC, - round_up(stack_depth, 8) + AUX_STACK_SPACE); - - /* sub rbp, AUX_STACK_SPACE */ - EMIT4(0x48, 0x83, 0xED, AUX_STACK_SPACE); - - /* mov qword ptr [rbp+0],rbx */ - EMIT4(0x48, 0x89, 0x5D, 0); - /* mov qword ptr [rbp+8],r13 */ - EMIT4(0x4C, 0x89, 0x6D, 8); - /* mov qword ptr [rbp+16],r14 */ - EMIT4(0x4C, 0x89, 0x75, 16); - /* mov qword ptr [rbp+24],r15 */ - EMIT4(0x4C, 0x89, 0x7D, 24); - + EMIT1(0x55); /* push rbp */ + EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */ + /* sub rsp, rounded_stack_depth */ + EMIT3_off32(0x48, 0x81, 0xEC, round_up(stack_depth, 8)); + EMIT1(0x53); /* push rbx */ + EMIT2(0x41, 0x55); /* push r13 */ + EMIT2(0x41, 0x56); /* push r14 */ + EMIT2(0x41, 0x57); /* push r15 */ if (!ebpf_from_cbpf) { - /* - * Clear the tail call counter (tail_call_cnt): for eBPF tail - * calls we need to reset the counter to 0. It's done in two - * instructions, resetting RAX register to 0, and moving it - * to the counter location. - */ - - /* xor eax, eax */ - EMIT2(0x31, 0xc0); - /* mov qword ptr [rbp+32], rax */ - EMIT4(0x48, 0x89, 0x45, 32); - + /* zero init tail_call_cnt */ + EMIT2(0x6a, 0x00); BUILD_BUG_ON(cnt != PROLOGUE_SIZE); } - *pprog = prog; } @@ -281,13 +254,13 @@ static void emit_bpf_tail_call(u8 **pprog) * if (tail_call_cnt > MAX_TAIL_CALL_CNT) * goto out; */ - EMIT2_off32(0x8B, 0x85, 36); /* mov eax, dword ptr [rbp + 36] */ + EMIT2_off32(0x8B, 0x85, -36 - MAX_BPF_STACK); /* mov eax, dword ptr [rbp - 548] */ EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */ #define OFFSET2 (30 + RETPOLINE_RAX_BPF_JIT_SIZE) EMIT2(X86_JA, OFFSET2); /* ja out */ label2 = cnt; EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */ - EMIT2_off32(0x89, 0x85, 36); /* mov dword ptr [rbp + 36], eax */ + EMIT2_off32(0x89, 0x85, -36 - MAX_BPF_STACK); /* mov dword ptr [rbp -548], eax */ /* prog = array->ptrs[index]; */ EMIT4_off32(0x48, 0x8B, 0x84, 0xD6, /* mov rax, [rsi + rdx * 8 + offsetof(...)] */ @@ -1036,19 +1009,14 @@ emit_jmp: seen_exit = true; /* Update cleanup_addr */ ctx->cleanup_addr = proglen; - /* mov rbx, qword ptr [rbp+0] */ - EMIT4(0x48, 0x8B, 0x5D, 0); - /* mov r13, qword ptr [rbp+8] */ - EMIT4(0x4C, 0x8B, 0x6D, 8); - /* mov r14, qword ptr [rbp+16] */ - EMIT4(0x4C, 0x8B, 0x75, 16); - /* mov r15, qword ptr [rbp+24] */ - EMIT4(0x4C, 0x8B, 0x7D, 24); - - /* add rbp, AUX_STACK_SPACE */ - EMIT4(0x48, 0x83, 0xC5, AUX_STACK_SPACE); - EMIT1(0xC9); /* leave */ - EMIT1(0xC3); /* ret */ + if (!bpf_prog_was_classic(bpf_prog)) + EMIT1(0x5B); /* get rid of tail_call_cnt */ + EMIT2(0x41, 0x5F); /* pop r15 */ + EMIT2(0x41, 0x5E); /* pop r14 */ + EMIT2(0x41, 0x5D); /* pop r13 */ + EMIT1(0x5B); /* pop rbx */ + EMIT1(0xC9); /* leave */ + EMIT1(0xC3); /* ret */ break; default: diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 1c66fb2ad76b..f2fe344593d5 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -166,7 +166,7 @@ #define FLEXCAN_MB_CNT_LENGTH(x) (((x) & 0xf) << 16) #define FLEXCAN_MB_CNT_TIMESTAMP(x) ((x) & 0xffff) -#define FLEXCAN_TIMEOUT_US (50) +#define FLEXCAN_TIMEOUT_US (250) /* FLEXCAN hardware feature flags * @@ -1583,9 +1583,6 @@ static int flexcan_probe(struct platform_device *pdev) dev_dbg(&pdev->dev, "failed to setup stop-mode\n"); } - dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%d)\n", - priv->regs, dev->irq); - return 0; failed_register: diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index 9b449400376b..deb274a19ba0 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -822,6 +822,27 @@ static int m_can_poll(struct napi_struct *napi, int quota) if (!irqstatus) goto end; + /* Errata workaround for issue "Needless activation of MRAF irq" + * During frame reception while the MCAN is in Error Passive state + * and the Receive Error Counter has the value MCAN_ECR.REC = 127, + * it may happen that MCAN_IR.MRAF is set although there was no + * Message RAM access failure. + * If MCAN_IR.MRAF is enabled, an interrupt to the Host CPU is generated + * The Message RAM Access Failure interrupt routine needs to check + * whether MCAN_ECR.RP = ’1’ and MCAN_ECR.REC = 127. + * In this case, reset MCAN_IR.MRAF. No further action is required. + */ + if ((priv->version <= 31) && (irqstatus & IR_MRAF) && + (m_can_read(priv, M_CAN_ECR) & ECR_RP)) { + struct can_berr_counter bec; + + __m_can_get_berr_counter(dev, &bec); + if (bec.rxerr == 127) { + m_can_write(priv, M_CAN_IR, IR_MRAF); + irqstatus &= ~IR_MRAF; + } + } + psr = m_can_read(priv, M_CAN_PSR); if (irqstatus & IR_ERR_STATE) work_done += m_can_handle_state_errors(dev, psr); diff --git a/drivers/net/can/spi/Kconfig b/drivers/net/can/spi/Kconfig index 2e7e535e9237..1c50788055cb 100644 --- a/drivers/net/can/spi/Kconfig +++ b/drivers/net/can/spi/Kconfig @@ -9,9 +9,10 @@ config CAN_HI311X Driver for the Holt HI311x SPI CAN controllers. config CAN_MCP251X - tristate "Microchip MCP251x SPI CAN controllers" + tristate "Microchip MCP251x and MCP25625 SPI CAN controllers" depends on HAS_DMA ---help--- - Driver for the Microchip MCP251x SPI CAN controllers. + Driver for the Microchip MCP251x and MCP25625 SPI CAN + controllers. endmenu diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c index 78e29fa06fe6..44e99e3d7134 100644 --- a/drivers/net/can/spi/mcp251x.c +++ b/drivers/net/can/spi/mcp251x.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * CAN bus driver for Microchip 251x CAN Controller with SPI Interface + * CAN bus driver for Microchip 251x/25625 CAN Controller with SPI Interface * * MCP2510 support and bug fixes by Christian Pellegrin * <chripell@evolware.org> @@ -28,7 +28,7 @@ * static struct spi_board_info spi_board_info[] = { * { * .modalias = "mcp2510", - * // or "mcp2515" depending on your controller + * // "mcp2515" or "mcp25625" depending on your controller * .platform_data = &mcp251x_info, * .irq = IRQ_EINT13, * .max_speed_hz = 2*1000*1000, @@ -224,6 +224,7 @@ static const struct can_bittiming_const mcp251x_bittiming_const = { enum mcp251x_model { CAN_MCP251X_MCP2510 = 0x2510, CAN_MCP251X_MCP2515 = 0x2515, + CAN_MCP251X_MCP25625 = 0x25625, }; struct mcp251x_priv { @@ -266,7 +267,6 @@ static inline int mcp251x_is_##_model(struct spi_device *spi) \ } MCP251X_IS(2510); -MCP251X_IS(2515); static void mcp251x_clean(struct net_device *net) { @@ -625,7 +625,7 @@ static int mcp251x_hw_reset(struct spi_device *spi) /* Wait for oscillator startup timer after reset */ mdelay(MCP251X_OST_DELAY_MS); - + reg = mcp251x_read_reg(spi, CANSTAT); if ((reg & CANCTRL_REQOP_MASK) != CANCTRL_REQOP_CONF) return -ENODEV; @@ -806,9 +806,8 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id) /* receive buffer 0 */ if (intf & CANINTF_RX0IF) { mcp251x_hw_rx(spi, 0); - /* - * Free one buffer ASAP - * (The MCP2515 does this automatically.) + /* Free one buffer ASAP + * (The MCP2515/25625 does this automatically.) */ if (mcp251x_is_2510(spi)) mcp251x_write_bits(spi, CANINTF, CANINTF_RX0IF, 0x00); @@ -817,7 +816,7 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id) /* receive buffer 1 */ if (intf & CANINTF_RX1IF) { mcp251x_hw_rx(spi, 1); - /* the MCP2515 does this automatically */ + /* The MCP2515/25625 does this automatically. */ if (mcp251x_is_2510(spi)) clear_intf |= CANINTF_RX1IF; } @@ -992,6 +991,10 @@ static const struct of_device_id mcp251x_of_match[] = { .compatible = "microchip,mcp2515", .data = (void *)CAN_MCP251X_MCP2515, }, + { + .compatible = "microchip,mcp25625", + .data = (void *)CAN_MCP251X_MCP25625, + }, { } }; MODULE_DEVICE_TABLE(of, mcp251x_of_match); @@ -1005,6 +1008,10 @@ static const struct spi_device_id mcp251x_id_table[] = { .name = "mcp2515", .driver_data = (kernel_ulong_t)CAN_MCP251X_MCP2515, }, + { + .name = "mcp25625", + .driver_data = (kernel_ulong_t)CAN_MCP251X_MCP25625, + }, { } }; MODULE_DEVICE_TABLE(spi, mcp251x_id_table); @@ -1245,5 +1252,5 @@ module_spi_driver(mcp251x_can_driver); MODULE_AUTHOR("Chris Elston <celston@katalix.com>, " "Christian Pellegrin <chripell@evolware.org>"); -MODULE_DESCRIPTION("Microchip 251x CAN driver"); +MODULE_DESCRIPTION("Microchip 251x/25625 CAN driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig index ac3522b77303..4b3d0ddcda79 100644 --- a/drivers/net/can/usb/Kconfig +++ b/drivers/net/can/usb/Kconfig @@ -102,12 +102,6 @@ config CAN_PEAK_USB (see also http://www.peak-system.com). -config CAN_MCBA_USB - tristate "Microchip CAN BUS Analyzer interface" - ---help--- - This driver supports the CAN BUS Analyzer interface - from Microchip (http://www.microchip.com/development-tools/). - config CAN_UCAN tristate "Theobroma Systems UCAN interface" ---help--- diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c index f2024404b8d6..63203ff452b5 100644 --- a/drivers/net/can/xilinx_can.c +++ b/drivers/net/can/xilinx_can.c @@ -1435,7 +1435,7 @@ static const struct xcan_devtype_data xcan_canfd_data = { XCAN_FLAG_RXMNF | XCAN_FLAG_TX_MAILBOXES | XCAN_FLAG_RX_FIFO_MULTI, - .bittiming_const = &xcan_bittiming_const, + .bittiming_const = &xcan_bittiming_const_canfd, .btr_ts2_shift = XCAN_BTR_TS2_SHIFT_CANFD, .btr_sjw_shift = XCAN_BTR_SJW_SHIFT_CANFD, .bus_clk_name = "s_axi_aclk", diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile index fefb6aaa82ba..d99dc6de0006 100644 --- a/drivers/net/dsa/Makefile +++ b/drivers/net/dsa/Makefile @@ -9,8 +9,8 @@ obj-$(CONFIG_NET_DSA_LANTIQ_GSWIP) += lantiq_gswip.o obj-$(CONFIG_NET_DSA_MT7530) += mt7530.o obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o obj-$(CONFIG_NET_DSA_QCA8K) += qca8k.o -obj-$(CONFIG_NET_DSA_REALTEK_SMI) += realtek.o -realtek-objs := realtek-smi.o rtl8366.o rtl8366rb.o +obj-$(CONFIG_NET_DSA_REALTEK_SMI) += realtek-smi.o +realtek-smi-objs := realtek-smi-core.o rtl8366.o rtl8366rb.o obj-$(CONFIG_NET_DSA_SMSC_LAN9303) += lan9303-core.o obj-$(CONFIG_NET_DSA_SMSC_LAN9303_I2C) += lan9303_i2c.o obj-$(CONFIG_NET_DSA_SMSC_LAN9303_MDIO) += lan9303_mdio.o diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index 39dace8e3512..f46086fa9064 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -83,6 +83,9 @@ static void ksz_mib_read_work(struct work_struct *work) int i; for (i = 0; i < dev->mib_port_cnt; i++) { + if (dsa_is_unused_port(dev->ds, i)) + continue; + p = &dev->ports[i]; mib = &p->mib; mutex_lock(&mib->cnt_mutex); diff --git a/drivers/net/dsa/realtek-smi.c b/drivers/net/dsa/realtek-smi-core.c index ad41ec63cc9f..dc0509c02d29 100644 --- a/drivers/net/dsa/realtek-smi.c +++ b/drivers/net/dsa/realtek-smi-core.c @@ -40,7 +40,7 @@ #include <linux/bitops.h> #include <linux/if_bridge.h> -#include "realtek-smi.h" +#include "realtek-smi-core.h" #define REALTEK_SMI_ACK_RETRY_COUNT 5 #define REALTEK_SMI_HW_STOP_DELAY 25 /* msecs */ diff --git a/drivers/net/dsa/realtek-smi.h b/drivers/net/dsa/realtek-smi-core.h index 9a63b51e1d82..9a63b51e1d82 100644 --- a/drivers/net/dsa/realtek-smi.h +++ b/drivers/net/dsa/realtek-smi-core.h diff --git a/drivers/net/dsa/rtl8366.c b/drivers/net/dsa/rtl8366.c index 6dedd43442cc..ca3d17e43ed8 100644 --- a/drivers/net/dsa/rtl8366.c +++ b/drivers/net/dsa/rtl8366.c @@ -11,7 +11,7 @@ #include <linux/if_bridge.h> #include <net/dsa.h> -#include "realtek-smi.h" +#include "realtek-smi-core.h" int rtl8366_mc_is_used(struct realtek_smi *smi, int mc_index, int *used) { @@ -307,7 +307,8 @@ int rtl8366_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering) struct rtl8366_vlan_4k vlan4k; int ret; - if (!smi->ops->is_vlan_valid(smi, port)) + /* Use VLAN nr port + 1 since VLAN0 is not valid */ + if (!smi->ops->is_vlan_valid(smi, port + 1)) return -EINVAL; dev_info(smi->dev, "%s filtering on port %d\n", @@ -318,12 +319,12 @@ int rtl8366_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering) * The hardware support filter ID (FID) 0..7, I have no clue how to * support this in the driver when the callback only says on/off. */ - ret = smi->ops->get_vlan_4k(smi, port, &vlan4k); + ret = smi->ops->get_vlan_4k(smi, port + 1, &vlan4k); if (ret) return ret; /* Just set the filter to FID 1 for now then */ - ret = rtl8366_set_vlan(smi, port, + ret = rtl8366_set_vlan(smi, port + 1, vlan4k.member, vlan4k.untag, 1); diff --git a/drivers/net/dsa/rtl8366rb.c b/drivers/net/dsa/rtl8366rb.c index 40b3974970c6..a268085ffad2 100644 --- a/drivers/net/dsa/rtl8366rb.c +++ b/drivers/net/dsa/rtl8366rb.c @@ -20,7 +20,7 @@ #include <linux/of_irq.h> #include <linux/regmap.h> -#include "realtek-smi.h" +#include "realtek-smi-core.h" #define RTL8366RB_PORT_NUM_CPU 5 #define RTL8366RB_NUM_PORTS 6 diff --git a/drivers/net/ethernet/8390/Kconfig b/drivers/net/ethernet/8390/Kconfig index bb09319feedf..2a3e2450968e 100644 --- a/drivers/net/ethernet/8390/Kconfig +++ b/drivers/net/ethernet/8390/Kconfig @@ -50,7 +50,7 @@ config XSURF100 tristate "Amiga XSurf 100 AX88796/NE2000 clone support" depends on ZORRO select AX88796 - select ASIX_PHY + select AX88796B_PHY help This driver is for the Individual Computers X-Surf 100 Ethernet card (based on the Asix AX88796 chip). If you have such a card, diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c index fb308b330dc1..8a6785173228 100644 --- a/drivers/net/ethernet/emulex/benet/be_ethtool.c +++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c @@ -1101,7 +1101,7 @@ static int be_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, cmd->data = be_get_rss_hash_opts(adapter, cmd->flow_type); break; case ETHTOOL_GRXRINGS: - cmd->data = adapter->num_rx_qs - 1; + cmd->data = adapter->num_rx_qs; break; default: return -EINVAL; diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 3da392bfd659..3da680073265 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -428,9 +428,10 @@ static int reset_rx_pools(struct ibmvnic_adapter *adapter) if (rx_pool->buff_size != be64_to_cpu(size_array[i])) { free_long_term_buff(adapter, &rx_pool->long_term_buff); rx_pool->buff_size = be64_to_cpu(size_array[i]); - alloc_long_term_buff(adapter, &rx_pool->long_term_buff, - rx_pool->size * - rx_pool->buff_size); + rc = alloc_long_term_buff(adapter, + &rx_pool->long_term_buff, + rx_pool->size * + rx_pool->buff_size); } else { rc = reset_long_term_buff(adapter, &rx_pool->long_term_buff); @@ -696,9 +697,9 @@ static int init_tx_pools(struct net_device *netdev) return rc; } - init_one_tx_pool(netdev, &adapter->tso_pool[i], - IBMVNIC_TSO_BUFS, - IBMVNIC_TSO_BUF_SZ); + rc = init_one_tx_pool(netdev, &adapter->tso_pool[i], + IBMVNIC_TSO_BUFS, + IBMVNIC_TSO_BUF_SZ); if (rc) { release_tx_pools(adapter); return rc; @@ -1745,7 +1746,8 @@ static int do_reset(struct ibmvnic_adapter *adapter, ibmvnic_cleanup(netdev); - if (adapter->reset_reason != VNIC_RESET_MOBILITY && + if (reset_state == VNIC_OPEN && + adapter->reset_reason != VNIC_RESET_MOBILITY && adapter->reset_reason != VNIC_RESET_FAILOVER) { rc = __ibmvnic_close(netdev); if (rc) @@ -1844,6 +1846,9 @@ static int do_reset(struct ibmvnic_adapter *adapter, return 0; } + /* refresh device's multicast list */ + ibmvnic_set_multi(netdev); + /* kick napi */ for (i = 0; i < adapter->req_rx_queues; i++) napi_schedule(&adapter->napi[i]); diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c index 392fd895f278..ae2240074d8e 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c @@ -1905,8 +1905,7 @@ static int mvpp2_prs_ip6_init(struct mvpp2 *priv) } /* Find tcam entry with matched pair <vid,port> */ -static int mvpp2_prs_vid_range_find(struct mvpp2 *priv, int pmap, u16 vid, - u16 mask) +static int mvpp2_prs_vid_range_find(struct mvpp2_port *port, u16 vid, u16 mask) { unsigned char byte[2], enable[2]; struct mvpp2_prs_entry pe; @@ -1914,13 +1913,13 @@ static int mvpp2_prs_vid_range_find(struct mvpp2 *priv, int pmap, u16 vid, int tid; /* Go through the all entries with MVPP2_PRS_LU_VID */ - for (tid = MVPP2_PE_VID_FILT_RANGE_START; - tid <= MVPP2_PE_VID_FILT_RANGE_END; tid++) { - if (!priv->prs_shadow[tid].valid || - priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VID) + for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id); + tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) { + if (!port->priv->prs_shadow[tid].valid || + port->priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VID) continue; - mvpp2_prs_init_from_hw(priv, &pe, tid); + mvpp2_prs_init_from_hw(port->priv, &pe, tid); mvpp2_prs_tcam_data_byte_get(&pe, 2, &byte[0], &enable[0]); mvpp2_prs_tcam_data_byte_get(&pe, 3, &byte[1], &enable[1]); @@ -1950,7 +1949,7 @@ int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid) memset(&pe, 0, sizeof(pe)); /* Scan TCAM and see if entry with this <vid,port> already exist */ - tid = mvpp2_prs_vid_range_find(priv, (1 << port->id), vid, mask); + tid = mvpp2_prs_vid_range_find(port, vid, mask); reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id)); if (reg_val & MVPP2_DSA_EXTENDED) @@ -2008,7 +2007,7 @@ void mvpp2_prs_vid_entry_remove(struct mvpp2_port *port, u16 vid) int tid; /* Scan TCAM and see if entry with this <vid,port> already exist */ - tid = mvpp2_prs_vid_range_find(priv, (1 << port->id), vid, 0xfff); + tid = mvpp2_prs_vid_range_find(port, vid, 0xfff); /* No such entry */ if (tid < 0) @@ -2026,8 +2025,10 @@ void mvpp2_prs_vid_remove_all(struct mvpp2_port *port) for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id); tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) { - if (priv->prs_shadow[tid].valid) - mvpp2_prs_vid_entry_remove(port, tid); + if (priv->prs_shadow[tid].valid) { + mvpp2_prs_hw_inv(priv, tid); + priv->prs_shadow[tid].valid = false; + } } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index d2ab8cd8ad9f..e94686c42000 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -441,6 +441,10 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, case MLX5_CMD_OP_CREATE_GENERAL_OBJECT: case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT: case MLX5_CMD_OP_QUERY_GENERAL_OBJECT: + case MLX5_CMD_OP_CREATE_UCTX: + case MLX5_CMD_OP_DESTROY_UCTX: + case MLX5_CMD_OP_CREATE_UMEM: + case MLX5_CMD_OP_DESTROY_UMEM: case MLX5_CMD_OP_ALLOC_MEMIC: *status = MLX5_DRIVER_STATUS_ABORTED; *synd = MLX5_DRIVER_SYND; @@ -629,6 +633,10 @@ const char *mlx5_command_str(int command) MLX5_COMMAND_STR_CASE(ALLOC_MEMIC); MLX5_COMMAND_STR_CASE(DEALLOC_MEMIC); MLX5_COMMAND_STR_CASE(QUERY_HOST_PARAMS); + MLX5_COMMAND_STR_CASE(CREATE_UCTX); + MLX5_COMMAND_STR_CASE(DESTROY_UCTX); + MLX5_COMMAND_STR_CASE(CREATE_UMEM); + MLX5_COMMAND_STR_CASE(DESTROY_UMEM); default: return "unknown command opcode"; } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c index ebc046fa97d3..f6b1da99e6c2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c @@ -248,11 +248,32 @@ void mlx5_unregister_interface(struct mlx5_interface *intf) } EXPORT_SYMBOL(mlx5_unregister_interface); +/* Must be called with intf_mutex held */ +static bool mlx5_has_added_dev_by_protocol(struct mlx5_core_dev *mdev, int protocol) +{ + struct mlx5_device_context *dev_ctx; + struct mlx5_interface *intf; + bool found = false; + + list_for_each_entry(intf, &intf_list, list) { + if (intf->protocol == protocol) { + dev_ctx = mlx5_get_device(intf, &mdev->priv); + if (dev_ctx && test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state)) + found = true; + break; + } + } + + return found; +} + void mlx5_reload_interface(struct mlx5_core_dev *mdev, int protocol) { mutex_lock(&mlx5_intf_mutex); - mlx5_remove_dev_by_protocol(mdev, protocol); - mlx5_add_dev_by_protocol(mdev, protocol); + if (mlx5_has_added_dev_by_protocol(mdev, protocol)) { + mlx5_remove_dev_by_protocol(mdev, protocol); + mlx5_add_dev_by_protocol(mdev, protocol); + } mutex_unlock(&mlx5_intf_mutex); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 3a183d690e23..cc6797e24571 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -385,6 +385,7 @@ struct mlx5e_txqsq { /* control path */ struct mlx5_wq_ctrl wq_ctrl; struct mlx5e_channel *channel; + int ch_ix; int txq_ix; u32 rate_limit; struct work_struct recover_work; @@ -1112,6 +1113,7 @@ void mlx5e_del_vxlan_port(struct net_device *netdev, struct udp_tunnel_info *ti) netdev_features_t mlx5e_features_check(struct sk_buff *skb, struct net_device *netdev, netdev_features_t features); +int mlx5e_set_features(struct net_device *netdev, netdev_features_t features); #ifdef CONFIG_MLX5_ESWITCH int mlx5e_set_vf_mac(struct net_device *dev, int vf, u8 *mac); int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, int max_tx_rate); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c index fe5d4d7f15ed..231e7cdfc6f7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c @@ -11,24 +11,25 @@ static int get_route_and_out_devs(struct mlx5e_priv *priv, struct net_device **route_dev, struct net_device **out_dev) { + struct net_device *uplink_dev, *uplink_upper, *real_dev; struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; - struct net_device *uplink_dev, *uplink_upper; bool dst_is_lag_dev; + real_dev = is_vlan_dev(dev) ? vlan_dev_real_dev(dev) : dev; uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH); uplink_upper = netdev_master_upper_dev_get(uplink_dev); dst_is_lag_dev = (uplink_upper && netif_is_lag_master(uplink_upper) && - dev == uplink_upper && + real_dev == uplink_upper && mlx5_lag_is_sriov(priv->mdev)); /* if the egress device isn't on the same HW e-switch or * it's a LAG device, use the uplink */ - if (!netdev_port_same_parent_id(priv->netdev, dev) || + if (!netdev_port_same_parent_id(priv->netdev, real_dev) || dst_is_lag_dev) { - *route_dev = uplink_dev; - *out_dev = *route_dev; + *route_dev = dev; + *out_dev = uplink_dev; } else { *route_dev = dev; if (is_vlan_dev(*route_dev)) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index c65cefd84eda..a8e8350b38aa 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -1082,6 +1082,7 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, sq->clock = &mdev->clock; sq->mkey_be = c->mkey_be; sq->channel = c; + sq->ch_ix = c->ix; sq->txq_ix = txq_ix; sq->uar_map = mdev->mlx5e_res.bfreg.map; sq->min_inline_mode = params->tx_min_inline_mode; @@ -3635,8 +3636,7 @@ static int mlx5e_handle_feature(struct net_device *netdev, return 0; } -static int mlx5e_set_features(struct net_device *netdev, - netdev_features_t features) +int mlx5e_set_features(struct net_device *netdev, netdev_features_t features) { netdev_features_t oper_features = netdev->features; int err = 0; @@ -5108,6 +5108,11 @@ static void mlx5e_detach(struct mlx5_core_dev *mdev, void *vpriv) struct mlx5e_priv *priv = vpriv; struct net_device *netdev = priv->netdev; +#ifdef CONFIG_MLX5_ESWITCH + if (MLX5_ESWITCH_MANAGER(mdev) && vpriv == mdev) + return; +#endif + if (!netif_device_present(netdev)) return; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 9aea9c5b2ce8..2f406b161bcf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -1351,6 +1351,7 @@ static const struct net_device_ops mlx5e_netdev_ops_uplink_rep = { .ndo_get_vf_stats = mlx5e_get_vf_stats, .ndo_set_vf_vlan = mlx5e_uplink_rep_set_vf_vlan, .ndo_get_port_parent_id = mlx5e_rep_get_port_parent_id, + .ndo_set_features = mlx5e_set_features, }; bool mlx5e_eswitch_rep(struct net_device *netdev) @@ -1425,10 +1426,9 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev) netdev->watchdog_timeo = 15 * HZ; + netdev->features |= NETIF_F_NETNS_LOCAL; - netdev->features |= NETIF_F_HW_TC | NETIF_F_NETNS_LOCAL; - netdev->hw_features |= NETIF_F_HW_TC; - + netdev->hw_features |= NETIF_F_HW_TC; netdev->hw_features |= NETIF_F_SG; netdev->hw_features |= NETIF_F_IP_CSUM; netdev->hw_features |= NETIF_F_IPV6_CSUM; @@ -1437,7 +1437,9 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev) netdev->hw_features |= NETIF_F_TSO6; netdev->hw_features |= NETIF_F_RXCSUM; - if (rep->vport != MLX5_VPORT_UPLINK) + if (rep->vport == MLX5_VPORT_UPLINK) + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; + else netdev->features |= NETIF_F_VLAN_CHALLENGED; netdev->features |= netdev->hw_features; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 31cd02f11499..e40c60d1631f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -2812,9 +2812,6 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, if (!flow_action_has_entries(flow_action)) return -EINVAL; - attr->in_rep = rpriv->rep; - attr->in_mdev = priv->mdev; - flow_action_for_each(i, act, flow_action) { switch (act->id) { case FLOW_ACTION_DROP: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 195a7d903cec..701e5dc75bb0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -113,13 +113,13 @@ static inline int mlx5e_get_dscp_up(struct mlx5e_priv *priv, struct sk_buff *skb u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, struct net_device *sb_dev) { - int channel_ix = netdev_pick_tx(dev, skb, NULL); + int txq_ix = netdev_pick_tx(dev, skb, NULL); struct mlx5e_priv *priv = netdev_priv(dev); u16 num_channels; int up = 0; if (!netdev_get_num_tc(dev)) - return channel_ix; + return txq_ix; #ifdef CONFIG_MLX5_CORE_EN_DCB if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP) @@ -129,14 +129,14 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, if (skb_vlan_tag_present(skb)) up = skb_vlan_tag_get_prio(skb); - /* channel_ix can be larger than num_channels since + /* txq_ix can be larger than num_channels since * dev->num_real_tx_queues = num_channels * num_tc */ num_channels = priv->channels.params.num_channels; - if (channel_ix >= num_channels) - channel_ix = reciprocal_scale(channel_ix, num_channels); + if (txq_ix >= num_channels) + txq_ix = priv->txq2sq[txq_ix]->ch_ix; - return priv->channel_tc2txq[channel_ix][up]; + return priv->channel_tc2txq[txq_ix][up]; } static inline int mlx5e_skb_l2_header_offset(struct sk_buff *skb) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index e8002bfc1e8f..7ed63ed657c7 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -997,7 +997,7 @@ static inline void mlxsw_reg_spaft_pack(char *payload, u8 local_port, MLXSW_REG_ZERO(spaft, payload); mlxsw_reg_spaft_local_port_set(payload, local_port); mlxsw_reg_spaft_allow_untagged_set(payload, allow_untagged); - mlxsw_reg_spaft_allow_prio_tagged_set(payload, true); + mlxsw_reg_spaft_allow_prio_tagged_set(payload, allow_untagged); mlxsw_reg_spaft_allow_tagged_set(payload, true); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index dfe6b44baf63..23204356ad88 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -4280,13 +4280,16 @@ static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp) } } +#define MLXSW_SP_LAG_SEED_INIT 0xcafecafe + static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) { char slcr_pl[MLXSW_REG_SLCR_LEN]; u32 seed; int err; - seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 0); + seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), + MLXSW_SP_LAG_SEED_INIT); mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC | MLXSW_REG_SLCR_LAG_HASH_DMAC | MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE | diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c index 8512dd49e420..1537f70bc26d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c @@ -437,8 +437,8 @@ static const struct mlxsw_sp_sb_pr mlxsw_sp1_sb_prs[] = { MLXSW_SP1_SB_PR_CPU_SIZE, true, false), }; -#define MLXSW_SP2_SB_PR_INGRESS_SIZE 40960000 -#define MLXSW_SP2_SB_PR_EGRESS_SIZE 40960000 +#define MLXSW_SP2_SB_PR_INGRESS_SIZE 38128752 +#define MLXSW_SP2_SB_PR_EGRESS_SIZE 38128752 #define MLXSW_SP2_SB_PR_CPU_SIZE (256 * 1000) /* Order according to mlxsw_sp2_sb_pool_dess */ diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c index 15f804453cd6..96b23c856f4d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c @@ -247,8 +247,8 @@ static int mlxsw_sp_flower_parse_ip(struct mlxsw_sp *mlxsw_sp, match.mask->tos & 0x3); mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_DSCP, - match.key->tos >> 6, - match.mask->tos >> 6); + match.key->tos >> 2, + match.mask->tos >> 2); return 0; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 1cda8a248b12..ef554739dd54 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -2363,7 +2363,7 @@ static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work) static void mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_neigh_entry *neigh_entry, - bool removing); + bool removing, bool dead); static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding) { @@ -2507,7 +2507,8 @@ static void mlxsw_sp_router_neigh_event_work(struct work_struct *work) memcpy(neigh_entry->ha, ha, ETH_ALEN); mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected); - mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected); + mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected, + dead); if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list)) mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry); @@ -3472,13 +3473,79 @@ static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh, nh->update = 1; } +static int +mlxsw_sp_nexthop_dead_neigh_replace(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_neigh_entry *neigh_entry) +{ + struct neighbour *n, *old_n = neigh_entry->key.n; + struct mlxsw_sp_nexthop *nh; + bool entry_connected; + u8 nud_state, dead; + int err; + + nh = list_first_entry(&neigh_entry->nexthop_list, + struct mlxsw_sp_nexthop, neigh_list_node); + + n = neigh_lookup(nh->nh_grp->neigh_tbl, &nh->gw_addr, nh->rif->dev); + if (!n) { + n = neigh_create(nh->nh_grp->neigh_tbl, &nh->gw_addr, + nh->rif->dev); + if (IS_ERR(n)) + return PTR_ERR(n); + neigh_event_send(n, NULL); + } + + mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry); + neigh_entry->key.n = n; + err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry); + if (err) + goto err_neigh_entry_insert; + + read_lock_bh(&n->lock); + nud_state = n->nud_state; + dead = n->dead; + read_unlock_bh(&n->lock); + entry_connected = nud_state & NUD_VALID && !dead; + + list_for_each_entry(nh, &neigh_entry->nexthop_list, + neigh_list_node) { + neigh_release(old_n); + neigh_clone(n); + __mlxsw_sp_nexthop_neigh_update(nh, !entry_connected); + mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp); + } + + neigh_release(n); + + return 0; + +err_neigh_entry_insert: + neigh_entry->key.n = old_n; + mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry); + neigh_release(n); + return err; +} + static void mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_neigh_entry *neigh_entry, - bool removing) + bool removing, bool dead) { struct mlxsw_sp_nexthop *nh; + if (list_empty(&neigh_entry->nexthop_list)) + return; + + if (dead) { + int err; + + err = mlxsw_sp_nexthop_dead_neigh_replace(mlxsw_sp, + neigh_entry); + if (err) + dev_err(mlxsw_sp->bus_info->dev, "Failed to replace dead neigh\n"); + return; + } + list_for_each_entry(nh, &neigh_entry->nexthop_list, neigh_list_node) { __mlxsw_sp_nexthop_neigh_update(nh, removing); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index b82b684f52ce..36a3bd30cfd9 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -1867,6 +1867,7 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) napi_gro_receive(&rx_ring->r_vec->napi, skb); } else { skb->dev = netdev; + skb_reset_network_header(skb); __skb_push(skb, ETH_HLEN); dev_queue_xmit(skb); } diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 98d1a45c0606..25770122c219 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -395,7 +395,7 @@ static int geneve_udp_encap_err_lookup(struct sock *sk, struct sk_buff *skb) u8 zero_vni[3] = { 0 }; u8 *vni = zero_vni; - if (skb->len < GENEVE_BASE_HLEN) + if (!pskb_may_pull(skb, skb_transport_offset(skb) + GENEVE_BASE_HLEN)) return -EINVAL; geneveh = geneve_hdr(skb); diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 03ea5a7ed3a4..afdcc5664ea6 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -2407,7 +2407,7 @@ static struct hv_driver netvsc_drv = { .probe = netvsc_probe, .remove = netvsc_remove, .driver = { - .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .probe_type = PROBE_FORCE_SYNCHRONOUS, }, }; diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index f99f27800fdb..1d406c6df790 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -254,7 +254,7 @@ config AQUANTIA_PHY ---help--- Currently supports the Aquantia AQ1202, AQ2104, AQR105, AQR405 -config ASIX_PHY +config AX88796B_PHY tristate "Asix PHYs" help Currently supports the Asix Electronics PHY found in the X-Surf 100 diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index 27d7f9f3b0de..5b5c8669499e 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -52,7 +52,7 @@ ifdef CONFIG_HWMON aquantia-objs += aquantia_hwmon.o endif obj-$(CONFIG_AQUANTIA_PHY) += aquantia.o -obj-$(CONFIG_ASIX_PHY) += asix.o +obj-$(CONFIG_AX88796B_PHY) += ax88796b.o obj-$(CONFIG_AT803X_PHY) += at803x.o obj-$(CONFIG_BCM63XX_PHY) += bcm63xx.o obj-$(CONFIG_BCM7XXX_PHY) += bcm7xxx.o diff --git a/drivers/net/phy/asix.c b/drivers/net/phy/ax88796b.c index 79bf7ef1fcfd..79bf7ef1fcfd 100644 --- a/drivers/net/phy/asix.c +++ b/drivers/net/phy/ax88796b.c diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index d9a6699abe59..780c10ee359b 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -22,6 +22,7 @@ #include <linux/usb/cdc.h> #include <linux/usb/usbnet.h> #include <linux/usb/cdc-wdm.h> +#include <linux/u64_stats_sync.h> /* This driver supports wwan (3G/LTE/?) devices using a vendor * specific management protocol called Qualcomm MSM Interface (QMI) - @@ -75,6 +76,7 @@ struct qmimux_hdr { struct qmimux_priv { struct net_device *real_dev; u8 mux_id; + struct pcpu_sw_netstats __percpu *stats64; }; static int qmimux_open(struct net_device *dev) @@ -101,19 +103,65 @@ static netdev_tx_t qmimux_start_xmit(struct sk_buff *skb, struct net_device *dev struct qmimux_priv *priv = netdev_priv(dev); unsigned int len = skb->len; struct qmimux_hdr *hdr; + netdev_tx_t ret; hdr = skb_push(skb, sizeof(struct qmimux_hdr)); hdr->pad = 0; hdr->mux_id = priv->mux_id; hdr->pkt_len = cpu_to_be16(len); skb->dev = priv->real_dev; - return dev_queue_xmit(skb); + ret = dev_queue_xmit(skb); + + if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { + struct pcpu_sw_netstats *stats64 = this_cpu_ptr(priv->stats64); + + u64_stats_update_begin(&stats64->syncp); + stats64->tx_packets++; + stats64->tx_bytes += len; + u64_stats_update_end(&stats64->syncp); + } else { + dev->stats.tx_dropped++; + } + + return ret; +} + +static void qmimux_get_stats64(struct net_device *net, + struct rtnl_link_stats64 *stats) +{ + struct qmimux_priv *priv = netdev_priv(net); + unsigned int start; + int cpu; + + netdev_stats_to_stats64(stats, &net->stats); + + for_each_possible_cpu(cpu) { + struct pcpu_sw_netstats *stats64; + u64 rx_packets, rx_bytes; + u64 tx_packets, tx_bytes; + + stats64 = per_cpu_ptr(priv->stats64, cpu); + + do { + start = u64_stats_fetch_begin_irq(&stats64->syncp); + rx_packets = stats64->rx_packets; + rx_bytes = stats64->rx_bytes; + tx_packets = stats64->tx_packets; + tx_bytes = stats64->tx_bytes; + } while (u64_stats_fetch_retry_irq(&stats64->syncp, start)); + + stats->rx_packets += rx_packets; + stats->rx_bytes += rx_bytes; + stats->tx_packets += tx_packets; + stats->tx_bytes += tx_bytes; + } } static const struct net_device_ops qmimux_netdev_ops = { - .ndo_open = qmimux_open, - .ndo_stop = qmimux_stop, - .ndo_start_xmit = qmimux_start_xmit, + .ndo_open = qmimux_open, + .ndo_stop = qmimux_stop, + .ndo_start_xmit = qmimux_start_xmit, + .ndo_get_stats64 = qmimux_get_stats64, }; static void qmimux_setup(struct net_device *dev) @@ -153,7 +201,7 @@ static bool qmimux_has_slaves(struct usbnet *dev) static int qmimux_rx_fixup(struct usbnet *dev, struct sk_buff *skb) { - unsigned int len, offset = 0; + unsigned int len, offset = 0, pad_len, pkt_len; struct qmimux_hdr *hdr; struct net_device *net; struct sk_buff *skbn; @@ -171,10 +219,16 @@ static int qmimux_rx_fixup(struct usbnet *dev, struct sk_buff *skb) if (hdr->pad & 0x80) goto skip; + /* extract padding length and check for valid length info */ + pad_len = hdr->pad & 0x3f; + if (len == 0 || pad_len >= len) + goto skip; + pkt_len = len - pad_len; + net = qmimux_find_dev(dev, hdr->mux_id); if (!net) goto skip; - skbn = netdev_alloc_skb(net, len); + skbn = netdev_alloc_skb(net, pkt_len); if (!skbn) return 0; skbn->dev = net; @@ -191,9 +245,20 @@ static int qmimux_rx_fixup(struct usbnet *dev, struct sk_buff *skb) goto skip; } - skb_put_data(skbn, skb->data + offset + qmimux_hdr_sz, len); - if (netif_rx(skbn) != NET_RX_SUCCESS) + skb_put_data(skbn, skb->data + offset + qmimux_hdr_sz, pkt_len); + if (netif_rx(skbn) != NET_RX_SUCCESS) { + net->stats.rx_errors++; return 0; + } else { + struct pcpu_sw_netstats *stats64; + struct qmimux_priv *priv = netdev_priv(net); + + stats64 = this_cpu_ptr(priv->stats64); + u64_stats_update_begin(&stats64->syncp); + stats64->rx_packets++; + stats64->rx_bytes += pkt_len; + u64_stats_update_end(&stats64->syncp); + } skip: offset += len + qmimux_hdr_sz; @@ -217,6 +282,12 @@ static int qmimux_register_device(struct net_device *real_dev, u8 mux_id) priv->mux_id = mux_id; priv->real_dev = real_dev; + priv->stats64 = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); + if (!priv->stats64) { + err = -ENOBUFS; + goto out_free_newdev; + } + err = register_netdevice(new_dev); if (err < 0) goto out_free_newdev; @@ -241,13 +312,15 @@ out_free_newdev: return err; } -static void qmimux_unregister_device(struct net_device *dev) +static void qmimux_unregister_device(struct net_device *dev, + struct list_head *head) { struct qmimux_priv *priv = netdev_priv(dev); struct net_device *real_dev = priv->real_dev; + free_percpu(priv->stats64); netdev_upper_dev_unlink(real_dev, dev); - unregister_netdevice(dev); + unregister_netdevice_queue(dev, head); /* Get rid of the reference to real_dev */ dev_put(real_dev); @@ -356,8 +429,8 @@ static ssize_t add_mux_store(struct device *d, struct device_attribute *attr, c if (kstrtou8(buf, 0, &mux_id)) return -EINVAL; - /* mux_id [1 - 0x7f] range empirically found */ - if (mux_id < 1 || mux_id > 0x7f) + /* mux_id [1 - 254] for compatibility with ip(8) and the rmnet driver */ + if (mux_id < 1 || mux_id > 254) return -EINVAL; if (!rtnl_trylock()) @@ -418,7 +491,7 @@ static ssize_t del_mux_store(struct device *d, struct device_attribute *attr, c ret = -EINVAL; goto err; } - qmimux_unregister_device(del_dev); + qmimux_unregister_device(del_dev, NULL); if (!qmimux_has_slaves(dev)) info->flags &= ~QMI_WWAN_FLAG_MUX; @@ -1428,6 +1501,7 @@ static void qmi_wwan_disconnect(struct usb_interface *intf) struct qmi_wwan_state *info; struct list_head *iter; struct net_device *ldev; + LIST_HEAD(list); /* called twice if separate control and data intf */ if (!dev) @@ -1440,8 +1514,9 @@ static void qmi_wwan_disconnect(struct usb_interface *intf) } rcu_read_lock(); netdev_for_each_upper_dev_rcu(dev->net, ldev, iter) - qmimux_unregister_device(ldev); + qmimux_unregister_device(ldev, &list); rcu_read_unlock(); + unregister_netdevice_many(&list); rtnl_unlock(); info->flags &= ~QMI_WWAN_FLAG_MUX; } diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 5994d5415a03..4c9bc29fe3d5 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1766,7 +1766,7 @@ static int vxlan_err_lookup(struct sock *sk, struct sk_buff *skb) struct vxlanhdr *hdr; __be32 vni; - if (skb->len < VXLAN_HLEN) + if (!pskb_may_pull(skb, skb_transport_offset(skb) + VXLAN_HLEN)) return -EINVAL; hdr = vxlan_hdr(skb); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c index 5f52e40a2903..33d7bc5500db 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c @@ -2747,3 +2747,42 @@ void iwl_fw_dbg_periodic_trig_handler(struct timer_list *t) jiffies + msecs_to_jiffies(collect_interval)); } } + +#define FSEQ_REG(x) { .addr = (x), .str = #x, } + +void iwl_fw_error_print_fseq_regs(struct iwl_fw_runtime *fwrt) +{ + struct iwl_trans *trans = fwrt->trans; + unsigned long flags; + int i; + struct { + u32 addr; + const char *str; + } fseq_regs[] = { + FSEQ_REG(FSEQ_ERROR_CODE), + FSEQ_REG(FSEQ_TOP_INIT_VERSION), + FSEQ_REG(FSEQ_CNVIO_INIT_VERSION), + FSEQ_REG(FSEQ_OTP_VERSION), + FSEQ_REG(FSEQ_TOP_CONTENT_VERSION), + FSEQ_REG(FSEQ_ALIVE_TOKEN), + FSEQ_REG(FSEQ_CNVI_ID), + FSEQ_REG(FSEQ_CNVR_ID), + FSEQ_REG(CNVI_AUX_MISC_CHIP), + FSEQ_REG(CNVR_AUX_MISC_CHIP), + FSEQ_REG(CNVR_SCU_SD_REGS_SD_REG_DIG_DCDC_VTRIM), + FSEQ_REG(CNVR_SCU_SD_REGS_SD_REG_ACTIVE_VDIG_MIRROR), + }; + + if (!iwl_trans_grab_nic_access(trans, &flags)) + return; + + IWL_ERR(fwrt, "Fseq Registers:\n"); + + for (i = 0; i < ARRAY_SIZE(fseq_regs); i++) + IWL_ERR(fwrt, "0x%08X | %s\n", + iwl_read_prph_no_grab(trans, fseq_regs[i].addr), + fseq_regs[i].str); + + iwl_trans_release_nic_access(trans, &flags); +} +IWL_EXPORT_SYMBOL(iwl_fw_error_print_fseq_regs); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h index 2a9e560a906b..fd0ad220e961 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h @@ -471,4 +471,6 @@ static inline void iwl_fw_error_collect(struct iwl_fw_runtime *fwrt) } void iwl_fw_dbg_periodic_trig_handler(struct timer_list *t); + +void iwl_fw_error_print_fseq_regs(struct iwl_fw_runtime *fwrt); #endif /* __iwl_fw_dbg_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index 852d3cbfc719..fba242284507 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -1597,7 +1597,6 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) goto free; out_free_fw: - iwl_dealloc_ucode(drv); release_firmware(ucode_raw); out_unbind: complete(&drv->request_firmware_complete); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h index 8e6a0c363c0d..8d930bfe0727 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h @@ -395,7 +395,11 @@ enum { WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK = 0x80000000, }; -#define AUX_MISC_REG 0xA200B0 +#define CNVI_AUX_MISC_CHIP 0xA200B0 +#define CNVR_AUX_MISC_CHIP 0xA2B800 +#define CNVR_SCU_SD_REGS_SD_REG_DIG_DCDC_VTRIM 0xA29890 +#define CNVR_SCU_SD_REGS_SD_REG_ACTIVE_VDIG_MIRROR 0xA29938 + enum { HW_STEP_LOCATION_BITS = 24, }; @@ -408,7 +412,12 @@ enum aux_misc_master1_en { #define AUX_MISC_MASTER1_SMPHR_STATUS 0xA20800 #define RSA_ENABLE 0xA24B08 #define PREG_AUX_BUS_WPROT_0 0xA04CC0 -#define PREG_PRPH_WPROT_0 0xA04CE0 + +/* device family 9000 WPROT register */ +#define PREG_PRPH_WPROT_9000 0xA04CE0 +/* device family 22000 WPROT register */ +#define PREG_PRPH_WPROT_22000 0xA04D00 + #define SB_CPU_1_STATUS 0xA01E30 #define SB_CPU_2_STATUS 0xA01E34 #define UMAG_SB_CPU_1_STATUS 0xA038C0 @@ -442,4 +451,13 @@ enum { #define UREG_DOORBELL_TO_ISR6 0xA05C04 #define UREG_DOORBELL_TO_ISR6_NMI_BIT BIT(0) + +#define FSEQ_ERROR_CODE 0xA340C8 +#define FSEQ_TOP_INIT_VERSION 0xA34038 +#define FSEQ_CNVIO_INIT_VERSION 0xA3403C +#define FSEQ_OTP_VERSION 0xA340FC +#define FSEQ_TOP_CONTENT_VERSION 0xA340F4 +#define FSEQ_ALIVE_TOKEN 0xA340F0 +#define FSEQ_CNVI_ID 0xA3408C +#define FSEQ_CNVR_ID 0xA34090 #endif /* __iwl_prph_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index 60f5d337f16d..e7e68fb2bd29 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -1972,26 +1972,6 @@ out: } } -static void iwl_mvm_read_d3_sram(struct iwl_mvm *mvm) -{ -#ifdef CONFIG_IWLWIFI_DEBUGFS - const struct fw_img *img = &mvm->fw->img[IWL_UCODE_WOWLAN]; - u32 len = img->sec[IWL_UCODE_SECTION_DATA].len; - u32 offs = img->sec[IWL_UCODE_SECTION_DATA].offset; - - if (!mvm->store_d3_resume_sram) - return; - - if (!mvm->d3_resume_sram) { - mvm->d3_resume_sram = kzalloc(len, GFP_KERNEL); - if (!mvm->d3_resume_sram) - return; - } - - iwl_trans_read_mem_bytes(mvm->trans, offs, mvm->d3_resume_sram, len); -#endif -} - static void iwl_mvm_d3_disconnect_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { @@ -2054,8 +2034,6 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) } iwl_fw_dbg_read_d3_debug_data(&mvm->fwrt); - /* query SRAM first in case we want event logging */ - iwl_mvm_read_d3_sram(mvm); if (iwl_mvm_check_rt_status(mvm, vif)) { set_bit(STATUS_FW_ERROR, &mvm->trans->status); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index d4ff6b44de2c..5b1bb76c5d28 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -1557,59 +1557,6 @@ static ssize_t iwl_dbgfs_bcast_filters_macs_write(struct iwl_mvm *mvm, } #endif -#ifdef CONFIG_PM_SLEEP -static ssize_t iwl_dbgfs_d3_sram_write(struct iwl_mvm *mvm, char *buf, - size_t count, loff_t *ppos) -{ - int store; - - if (sscanf(buf, "%d", &store) != 1) - return -EINVAL; - - mvm->store_d3_resume_sram = store; - - return count; -} - -static ssize_t iwl_dbgfs_d3_sram_read(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_mvm *mvm = file->private_data; - const struct fw_img *img; - int ofs, len, pos = 0; - size_t bufsz, ret; - char *buf; - u8 *ptr = mvm->d3_resume_sram; - - img = &mvm->fw->img[IWL_UCODE_WOWLAN]; - len = img->sec[IWL_UCODE_SECTION_DATA].len; - - bufsz = len * 4 + 256; - buf = kzalloc(bufsz, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - pos += scnprintf(buf, bufsz, "D3 SRAM capture: %sabled\n", - mvm->store_d3_resume_sram ? "en" : "dis"); - - if (ptr) { - for (ofs = 0; ofs < len; ofs += 16) { - pos += scnprintf(buf + pos, bufsz - pos, - "0x%.4x %16ph\n", ofs, ptr + ofs); - } - } else { - pos += scnprintf(buf + pos, bufsz - pos, - "(no data captured)\n"); - } - - ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); - - kfree(buf); - - return ret; -} -#endif - #define PRINT_MVM_REF(ref) do { \ if (mvm->refs[ref]) \ pos += scnprintf(buf + pos, bufsz - pos, \ @@ -1940,9 +1887,6 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters, 256); MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters_macs, 256); #endif -#ifdef CONFIG_PM_SLEEP -MVM_DEBUGFS_READ_WRITE_FILE_OPS(d3_sram, 8); -#endif #ifdef CONFIG_ACPI MVM_DEBUGFS_READ_FILE_OPS(sar_geo_profile); #endif @@ -2159,7 +2103,6 @@ void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir) #endif #ifdef CONFIG_PM_SLEEP - MVM_DEBUGFS_ADD_FILE(d3_sram, mvm->debugfs_dir, 0600); MVM_DEBUGFS_ADD_FILE(d3_test, mvm->debugfs_dir, 0400); debugfs_create_bool("d3_wake_sysassert", 0600, mvm->debugfs_dir, &mvm->d3_wake_sysassert); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index ab68b5d53ec9..153717587aeb 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -311,6 +311,8 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, int ret; enum iwl_ucode_type old_type = mvm->fwrt.cur_fw_img; static const u16 alive_cmd[] = { MVM_ALIVE }; + bool run_in_rfkill = + ucode_type == IWL_UCODE_INIT || iwl_mvm_has_unified_ucode(mvm); if (ucode_type == IWL_UCODE_REGULAR && iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE) && @@ -328,7 +330,12 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, alive_cmd, ARRAY_SIZE(alive_cmd), iwl_alive_fn, &alive_data); - ret = iwl_trans_start_fw(mvm->trans, fw, ucode_type == IWL_UCODE_INIT); + /* + * We want to load the INIT firmware even in RFKILL + * For the unified firmware case, the ucode_type is not + * INIT, but we still need to run it. + */ + ret = iwl_trans_start_fw(mvm->trans, fw, run_in_rfkill); if (ret) { iwl_fw_set_current_image(&mvm->fwrt, old_type); iwl_remove_notification(&mvm->notif_wait, &alive_wait); @@ -433,7 +440,8 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) * commands */ ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(SYSTEM_GROUP, - INIT_EXTENDED_CFG_CMD), 0, + INIT_EXTENDED_CFG_CMD), + CMD_SEND_IN_RFKILL, sizeof(init_cfg), &init_cfg); if (ret) { IWL_ERR(mvm, "Failed to run init config command: %d\n", @@ -457,7 +465,8 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) } ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(REGULATORY_AND_NVM_GROUP, - NVM_ACCESS_COMPLETE), 0, + NVM_ACCESS_COMPLETE), + CMD_SEND_IN_RFKILL, sizeof(nvm_complete), &nvm_complete); if (ret) { IWL_ERR(mvm, "Failed to run complete NVM access: %d\n", @@ -482,6 +491,8 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) } } + mvm->rfkill_safe_init_done = true; + return 0; error: @@ -526,7 +537,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) lockdep_assert_held(&mvm->mutex); - if (WARN_ON_ONCE(mvm->calibrating)) + if (WARN_ON_ONCE(mvm->rfkill_safe_init_done)) return 0; iwl_init_notification_wait(&mvm->notif_wait, @@ -576,7 +587,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) goto remove_notif; } - mvm->calibrating = true; + mvm->rfkill_safe_init_done = true; /* Send TX valid antennas before triggering calibrations */ ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm)); @@ -612,7 +623,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) remove_notif: iwl_remove_notification(&mvm->notif_wait, &calib_wait); out: - mvm->calibrating = false; + mvm->rfkill_safe_init_done = false; if (iwlmvm_mod_params.init_dbg && !mvm->nvm_data) { /* we want to debug INIT and we have no NVM - fake */ mvm->nvm_data = kzalloc(sizeof(struct iwl_nvm_data) + diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 5c52469288be..fdbabca0280e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -1209,7 +1209,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) mvm->scan_status = 0; mvm->ps_disabled = false; - mvm->calibrating = false; + mvm->rfkill_safe_init_done = false; /* just in case one was running */ iwl_mvm_cleanup_roc_te(mvm); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 8dc2a9850bc5..02efcf2189c4 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -880,7 +880,7 @@ struct iwl_mvm { struct iwl_mvm_vif *bf_allowed_vif; bool hw_registered; - bool calibrating; + bool rfkill_safe_init_done; bool support_umac_log; u32 ampdu_ref; @@ -1039,8 +1039,6 @@ struct iwl_mvm { #ifdef CONFIG_IWLWIFI_DEBUGFS bool d3_wake_sysassert; bool d3_test_active; - bool store_d3_resume_sram; - void *d3_resume_sram; u32 d3_test_pme_ptr; struct ieee80211_vif *keep_vif; u32 last_netdetect_scans; /* no. of scans in the last net-detect wake */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index acd2fda12466..fad3bf563712 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -918,9 +918,6 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode) kfree(mvm->error_recovery_buf); mvm->error_recovery_buf = NULL; -#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_IWLWIFI_DEBUGFS) - kfree(mvm->d3_resume_sram); -#endif iwl_trans_op_mode_leave(mvm->trans); iwl_phy_db_free(mvm->phy_db); @@ -1212,7 +1209,8 @@ void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state) static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state) { struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); - bool calibrating = READ_ONCE(mvm->calibrating); + bool rfkill_safe_init_done = READ_ONCE(mvm->rfkill_safe_init_done); + bool unified = iwl_mvm_has_unified_ucode(mvm); if (state) set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status); @@ -1221,15 +1219,23 @@ static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state) iwl_mvm_set_rfkill_state(mvm); - /* iwl_run_init_mvm_ucode is waiting for results, abort it */ - if (calibrating) + /* iwl_run_init_mvm_ucode is waiting for results, abort it. */ + if (rfkill_safe_init_done) iwl_abort_notification_waits(&mvm->notif_wait); /* + * Don't ask the transport to stop the firmware. We'll do it + * after cfg80211 takes us down. + */ + if (unified) + return false; + + /* * Stop the device if we run OPERATIONAL firmware or if we are in the * middle of the calibrations. */ - return state && (mvm->fwrt.cur_fw_img != IWL_UCODE_INIT || calibrating); + return state && (mvm->fwrt.cur_fw_img != IWL_UCODE_INIT || + rfkill_safe_init_done); } static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c index 659e21b2d4e7..be62f499c595 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c @@ -441,7 +441,8 @@ void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, */ sta->max_amsdu_len = max_amsdu_len; - ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(cfg_cmd), &cfg_cmd); + ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, CMD_ASYNC, sizeof(cfg_cmd), + &cfg_cmd); if (ret) IWL_ERR(mvm, "Failed to send rate scale config (%d)\n", ret); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c index b9914efc55c4..cc56ab88fb43 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c @@ -596,6 +596,8 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm) iwl_mvm_dump_lmac_error_log(mvm, 1); iwl_mvm_dump_umac_error_log(mvm); + + iwl_fw_error_print_fseq_regs(&mvm->fwrt); } int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id, diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index b513037dc066..85973dd57234 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -928,7 +928,7 @@ static inline void iwl_enable_rfkill_int(struct iwl_trans *trans) MSIX_HW_INT_CAUSES_REG_RF_KILL); } - if (trans->cfg->device_family == IWL_DEVICE_FAMILY_9000) { + if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_9000) { /* * On 9000-series devices this bit isn't enabled by default, so * when we power down the device we need set the bit to allow it diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 803fcbac4152..dfa1bed124aa 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -1698,26 +1698,26 @@ static int iwl_pcie_init_msix_handler(struct pci_dev *pdev, return 0; } -static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power) +static int iwl_trans_pcie_clear_persistence_bit(struct iwl_trans *trans) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - u32 hpm; - int err; - - lockdep_assert_held(&trans_pcie->mutex); + u32 hpm, wprot; - err = iwl_pcie_prepare_card_hw(trans); - if (err) { - IWL_ERR(trans, "Error while preparing HW: %d\n", err); - return err; + switch (trans->cfg->device_family) { + case IWL_DEVICE_FAMILY_9000: + wprot = PREG_PRPH_WPROT_9000; + break; + case IWL_DEVICE_FAMILY_22000: + wprot = PREG_PRPH_WPROT_22000; + break; + default: + return 0; } hpm = iwl_read_umac_prph_no_grab(trans, HPM_DEBUG); if (hpm != 0xa5a5a5a0 && (hpm & PERSISTENCE_BIT)) { - int wfpm_val = iwl_read_umac_prph_no_grab(trans, - PREG_PRPH_WPROT_0); + u32 wprot_val = iwl_read_umac_prph_no_grab(trans, wprot); - if (wfpm_val & PREG_WFPM_ACCESS) { + if (wprot_val & PREG_WFPM_ACCESS) { IWL_ERR(trans, "Error, can not clear persistence bit\n"); return -EPERM; @@ -1726,6 +1726,26 @@ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power) hpm & ~PERSISTENCE_BIT); } + return 0; +} + +static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int err; + + lockdep_assert_held(&trans_pcie->mutex); + + err = iwl_pcie_prepare_card_hw(trans); + if (err) { + IWL_ERR(trans, "Error while preparing HW: %d\n", err); + return err; + } + + err = iwl_trans_pcie_clear_persistence_bit(trans); + if (err) + return err; + iwl_trans_pcie_sw_reset(trans); err = iwl_pcie_apm_init(trans); @@ -3526,7 +3546,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, hw_step |= ENABLE_WFPM; iwl_write_umac_prph_no_grab(trans, WFPM_CTRL_REG, hw_step); - hw_step = iwl_read_prph_no_grab(trans, AUX_MISC_REG); + hw_step = iwl_read_prph_no_grab(trans, + CNVI_AUX_MISC_CHIP); hw_step = (hw_step >> HW_STEP_LOCATION_BITS) & 0xF; if (hw_step == 0x3) trans->hw_rev = (trans->hw_rev & 0xFFFFFFF3) | @@ -3577,7 +3598,9 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, } } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) && - (trans->cfg != &iwl_ax200_cfg_cc || + ((trans->cfg != &iwl_ax200_cfg_cc && + trans->cfg != &killer1650x_2ax_cfg && + trans->cfg != &killer1650w_2ax_cfg) || trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0)) { u32 hw_status; diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 60ca13e0f15b..b5274d1f30fa 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -3851,6 +3851,7 @@ static int __init init_mac80211_hwsim(void) break; case HWSIM_REGTEST_STRICT_ALL: param.reg_strict = true; + /* fall through */ case HWSIM_REGTEST_DRIVER_REG_ALL: param.reg_alpha2 = hwsim_alpha2s[0]; break; diff --git a/drivers/net/wireless/marvell/mwifiex/ie.c b/drivers/net/wireless/marvell/mwifiex/ie.c index 6845eb57b39a..653d347a9a19 100644 --- a/drivers/net/wireless/marvell/mwifiex/ie.c +++ b/drivers/net/wireless/marvell/mwifiex/ie.c @@ -329,6 +329,8 @@ static int mwifiex_uap_parse_tail_ies(struct mwifiex_private *priv, struct ieee80211_vendor_ie *vendorhdr; u16 gen_idx = MWIFIEX_AUTO_IDX_MASK, ie_len = 0; int left_len, parsed_len = 0; + unsigned int token_len; + int err = 0; if (!info->tail || !info->tail_len) return 0; @@ -344,6 +346,12 @@ static int mwifiex_uap_parse_tail_ies(struct mwifiex_private *priv, */ while (left_len > sizeof(struct ieee_types_header)) { hdr = (void *)(info->tail + parsed_len); + token_len = hdr->len + sizeof(struct ieee_types_header); + if (token_len > left_len) { + err = -EINVAL; + goto out; + } + switch (hdr->element_id) { case WLAN_EID_SSID: case WLAN_EID_SUPP_RATES: @@ -361,17 +369,20 @@ static int mwifiex_uap_parse_tail_ies(struct mwifiex_private *priv, if (cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, WLAN_OUI_TYPE_MICROSOFT_WMM, (const u8 *)hdr, - hdr->len + sizeof(struct ieee_types_header))) + token_len)) break; /* fall through */ default: - memcpy(gen_ie->ie_buffer + ie_len, hdr, - hdr->len + sizeof(struct ieee_types_header)); - ie_len += hdr->len + sizeof(struct ieee_types_header); + if (ie_len + token_len > IEEE_MAX_IE_SIZE) { + err = -EINVAL; + goto out; + } + memcpy(gen_ie->ie_buffer + ie_len, hdr, token_len); + ie_len += token_len; break; } - left_len -= hdr->len + sizeof(struct ieee_types_header); - parsed_len += hdr->len + sizeof(struct ieee_types_header); + left_len -= token_len; + parsed_len += token_len; } /* parse only WPA vendor IE from tail, WMM IE is configured by @@ -381,15 +392,17 @@ static int mwifiex_uap_parse_tail_ies(struct mwifiex_private *priv, WLAN_OUI_TYPE_MICROSOFT_WPA, info->tail, info->tail_len); if (vendorhdr) { - memcpy(gen_ie->ie_buffer + ie_len, vendorhdr, - vendorhdr->len + sizeof(struct ieee_types_header)); - ie_len += vendorhdr->len + sizeof(struct ieee_types_header); + token_len = vendorhdr->len + sizeof(struct ieee_types_header); + if (ie_len + token_len > IEEE_MAX_IE_SIZE) { + err = -EINVAL; + goto out; + } + memcpy(gen_ie->ie_buffer + ie_len, vendorhdr, token_len); + ie_len += token_len; } - if (!ie_len) { - kfree(gen_ie); - return 0; - } + if (!ie_len) + goto out; gen_ie->ie_index = cpu_to_le16(gen_idx); gen_ie->mgmt_subtype_mask = cpu_to_le16(MGMT_MASK_BEACON | @@ -399,13 +412,15 @@ static int mwifiex_uap_parse_tail_ies(struct mwifiex_private *priv, if (mwifiex_update_uap_custom_ie(priv, gen_ie, &gen_idx, NULL, NULL, NULL, NULL)) { - kfree(gen_ie); - return -1; + err = -EINVAL; + goto out; } priv->gen_idx = gen_idx; + + out: kfree(gen_ie); - return 0; + return err; } /* This function parses different IEs-head & tail IEs, beacon IEs, diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c index 935778ec9a1b..c269a0de9413 100644 --- a/drivers/net/wireless/marvell/mwifiex/scan.c +++ b/drivers/net/wireless/marvell/mwifiex/scan.c @@ -1247,6 +1247,8 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter, } switch (element_id) { case WLAN_EID_SSID: + if (element_len > IEEE80211_MAX_SSID_LEN) + return -EINVAL; bss_entry->ssid.ssid_len = element_len; memcpy(bss_entry->ssid.ssid, (current_ptr + 2), element_len); @@ -1256,6 +1258,8 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter, break; case WLAN_EID_SUPP_RATES: + if (element_len > MWIFIEX_SUPPORTED_RATES) + return -EINVAL; memcpy(bss_entry->data_rates, current_ptr + 2, element_len); memcpy(bss_entry->supported_rates, current_ptr + 2, @@ -1265,6 +1269,8 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter, break; case WLAN_EID_FH_PARAMS: + if (element_len + 2 < sizeof(*fh_param_set)) + return -EINVAL; fh_param_set = (struct ieee_types_fh_param_set *) current_ptr; memcpy(&bss_entry->phy_param_set.fh_param_set, @@ -1273,6 +1279,8 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter, break; case WLAN_EID_DS_PARAMS: + if (element_len + 2 < sizeof(*ds_param_set)) + return -EINVAL; ds_param_set = (struct ieee_types_ds_param_set *) current_ptr; @@ -1284,6 +1292,8 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter, break; case WLAN_EID_CF_PARAMS: + if (element_len + 2 < sizeof(*cf_param_set)) + return -EINVAL; cf_param_set = (struct ieee_types_cf_param_set *) current_ptr; memcpy(&bss_entry->ss_param_set.cf_param_set, @@ -1292,6 +1302,8 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter, break; case WLAN_EID_IBSS_PARAMS: + if (element_len + 2 < sizeof(*ibss_param_set)) + return -EINVAL; ibss_param_set = (struct ieee_types_ibss_param_set *) current_ptr; @@ -1301,10 +1313,14 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter, break; case WLAN_EID_ERP_INFO: + if (!element_len) + return -EINVAL; bss_entry->erp_flags = *(current_ptr + 2); break; case WLAN_EID_PWR_CONSTRAINT: + if (!element_len) + return -EINVAL; bss_entry->local_constraint = *(current_ptr + 2); bss_entry->sensed_11h = true; break; @@ -1345,6 +1361,9 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter, break; case WLAN_EID_VENDOR_SPECIFIC: + if (element_len + 2 < sizeof(vendor_ie->vend_hdr)) + return -EINVAL; + vendor_ie = (struct ieee_types_vendor_specific *) current_ptr; diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c index cf4265cda224..628477971213 100644 --- a/drivers/net/wireless/realtek/rtw88/fw.c +++ b/drivers/net/wireless/realtek/rtw88/fw.c @@ -8,7 +8,8 @@ #include "reg.h" #include "debug.h" -void rtw_fw_c2h_cmd_handle_ext(struct rtw_dev *rtwdev, struct sk_buff *skb) +static void rtw_fw_c2h_cmd_handle_ext(struct rtw_dev *rtwdev, + struct sk_buff *skb) { struct rtw_c2h_cmd *c2h; u8 sub_cmd_id; @@ -47,7 +48,8 @@ void rtw_fw_c2h_cmd_handle(struct rtw_dev *rtwdev, struct sk_buff *skb) } } -void rtw_fw_send_h2c_command(struct rtw_dev *rtwdev, u8 *h2c) +static void rtw_fw_send_h2c_command(struct rtw_dev *rtwdev, + u8 *h2c) { u8 box; u8 box_state; diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c index f447361f7573..b2dac4609138 100644 --- a/drivers/net/wireless/realtek/rtw88/main.c +++ b/drivers/net/wireless/realtek/rtw88/main.c @@ -162,7 +162,8 @@ static void rtw_watch_dog_work(struct work_struct *work) rtwdev->stats.tx_cnt = 0; rtwdev->stats.rx_cnt = 0; - rtw_iterate_vifs(rtwdev, rtw_vif_watch_dog_iter, &data); + /* use atomic version to avoid taking local->iflist_mtx mutex */ + rtw_iterate_vifs_atomic(rtwdev, rtw_vif_watch_dog_iter, &data); /* fw supports only one station associated to enter lps, if there are * more than two stations associated to the AP, then we can not enter diff --git a/drivers/net/wireless/realtek/rtw88/phy.c b/drivers/net/wireless/realtek/rtw88/phy.c index 4381b360b5b5..404d89432c96 100644 --- a/drivers/net/wireless/realtek/rtw88/phy.c +++ b/drivers/net/wireless/realtek/rtw88/phy.c @@ -144,10 +144,10 @@ static void rtw_phy_stat_rssi_iter(void *data, struct ieee80211_sta *sta) struct rtw_phy_stat_iter_data *iter_data = data; struct rtw_dev *rtwdev = iter_data->rtwdev; struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv; - u8 rssi, rssi_level; + u8 rssi; rssi = ewma_rssi_read(&si->avg_rssi); - rssi_level = rtw_phy_get_rssi_level(si->rssi_level, rssi); + si->rssi_level = rtw_phy_get_rssi_level(si->rssi_level, rssi); rtw_fw_send_rssi_info(rtwdev, si); @@ -423,6 +423,11 @@ static u64 rtw_phy_db_2_linear(u8 power_db) u8 i, j; u64 linear; + if (power_db > 96) + power_db = 96; + else if (power_db < 1) + return 1; + /* 1dB ~ 96dB */ i = (power_db - 1) >> 3; j = (power_db - 1) - (i << 3); @@ -848,12 +853,13 @@ u8 rtw_vht_2s_rates[] = { DESC_RATEVHT2SS_MCS6, DESC_RATEVHT2SS_MCS7, DESC_RATEVHT2SS_MCS8, DESC_RATEVHT2SS_MCS9 }; -u8 rtw_cck_size = ARRAY_SIZE(rtw_cck_rates); -u8 rtw_ofdm_size = ARRAY_SIZE(rtw_ofdm_rates); -u8 rtw_ht_1s_size = ARRAY_SIZE(rtw_ht_1s_rates); -u8 rtw_ht_2s_size = ARRAY_SIZE(rtw_ht_2s_rates); -u8 rtw_vht_1s_size = ARRAY_SIZE(rtw_vht_1s_rates); -u8 rtw_vht_2s_size = ARRAY_SIZE(rtw_vht_2s_rates); + +static u8 rtw_cck_size = ARRAY_SIZE(rtw_cck_rates); +static u8 rtw_ofdm_size = ARRAY_SIZE(rtw_ofdm_rates); +static u8 rtw_ht_1s_size = ARRAY_SIZE(rtw_ht_1s_rates); +static u8 rtw_ht_2s_size = ARRAY_SIZE(rtw_ht_2s_rates); +static u8 rtw_vht_1s_size = ARRAY_SIZE(rtw_vht_1s_rates); +static u8 rtw_vht_2s_size = ARRAY_SIZE(rtw_vht_2s_rates); u8 *rtw_rate_section[RTW_RATE_SECTION_MAX] = { rtw_cck_rates, rtw_ofdm_rates, rtw_ht_1s_rates, rtw_ht_2s_rates, diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c index f9c67ed473d1..b42cd50b837e 100644 --- a/drivers/net/wireless/rsi/rsi_91x_sdio.c +++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c @@ -929,11 +929,15 @@ static int rsi_sdio_ta_reset(struct rsi_hw *adapter) u32 addr; u8 *data; + data = kzalloc(RSI_9116_REG_SIZE, GFP_KERNEL); + if (!data) + return -ENOMEM; + status = rsi_sdio_master_access_msword(adapter, TA_BASE_ADDR); if (status < 0) { rsi_dbg(ERR_ZONE, "Unable to set ms word to common reg\n"); - return status; + goto err; } rsi_dbg(INIT_ZONE, "%s: Bring TA out of reset\n", __func__); @@ -944,7 +948,7 @@ static int rsi_sdio_ta_reset(struct rsi_hw *adapter) RSI_9116_REG_SIZE); if (status < 0) { rsi_dbg(ERR_ZONE, "Unable to hold TA threads\n"); - return status; + goto err; } put_unaligned_le32(TA_SOFT_RST_CLR, data); @@ -954,7 +958,7 @@ static int rsi_sdio_ta_reset(struct rsi_hw *adapter) RSI_9116_REG_SIZE); if (status < 0) { rsi_dbg(ERR_ZONE, "Unable to get TA out of reset\n"); - return status; + goto err; } put_unaligned_le32(TA_PC_ZERO, data); @@ -964,7 +968,8 @@ static int rsi_sdio_ta_reset(struct rsi_hw *adapter) RSI_9116_REG_SIZE); if (status < 0) { rsi_dbg(ERR_ZONE, "Unable to Reset TA PC value\n"); - return -EINVAL; + status = -EINVAL; + goto err; } put_unaligned_le32(TA_RELEASE_THREAD_VALUE, data); @@ -974,17 +979,19 @@ static int rsi_sdio_ta_reset(struct rsi_hw *adapter) RSI_9116_REG_SIZE); if (status < 0) { rsi_dbg(ERR_ZONE, "Unable to release TA threads\n"); - return status; + goto err; } status = rsi_sdio_master_access_msword(adapter, MISC_CFG_BASE_ADDR); if (status < 0) { rsi_dbg(ERR_ZONE, "Unable to set ms word to common reg\n"); - return status; + goto err; } rsi_dbg(INIT_ZONE, "***** TA Reset done *****\n"); - return 0; +err: + kfree(data); + return status; } static struct rsi_host_intf_ops sdio_host_intf_ops = { diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h index cb3c6b3b89c8..a7f7a98ec39d 100644 --- a/include/linux/bpf-cgroup.h +++ b/include/linux/bpf-cgroup.h @@ -238,6 +238,12 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) \ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_SENDMSG, t_ctx) +#define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) \ + BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_RECVMSG, NULL) + +#define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) \ + BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_RECVMSG, NULL) + #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) \ ({ \ int __ret = 0; \ @@ -339,6 +345,8 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map, #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) ({ 0; }) #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; }) #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) ({ 0; }) #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; }) #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; }) #define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos,nbuf) ({ 0; }) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 5df8e9e2a393..b92ef9f73e42 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -600,7 +600,6 @@ void bpf_map_area_free(void *base); void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr); extern int sysctl_unprivileged_bpf_disabled; -extern int sysctl_bpf_stats_enabled; int bpf_map_new_fd(struct bpf_map *map, int flags); int bpf_prog_new_fd(struct bpf_prog *prog); diff --git a/include/linux/phylink.h b/include/linux/phylink.h index 6411c624f63a..2d2e55dfea94 100644 --- a/include/linux/phylink.h +++ b/include/linux/phylink.h @@ -123,11 +123,20 @@ int mac_link_state(struct net_device *ndev, * @mode: one of %MLO_AN_FIXED, %MLO_AN_PHY, %MLO_AN_INBAND. * @state: a pointer to a &struct phylink_link_state. * + * Note - not all members of @state are valid. In particular, + * @state->lp_advertising, @state->link, @state->an_complete are never + * guaranteed to be correct, and so any mac_config() implementation must + * never reference these fields. + * * The action performed depends on the currently selected mode: * * %MLO_AN_FIXED, %MLO_AN_PHY: * Configure the specified @state->speed, @state->duplex and - * @state->pause (%MLO_PAUSE_TX / %MLO_PAUSE_RX) mode. + * @state->pause (%MLO_PAUSE_TX / %MLO_PAUSE_RX) modes over a link + * specified by @state->interface. @state->advertising may be used, + * but is not required. Other members of @state must be ignored. + * + * Valid state members: interface, speed, duplex, pause, advertising. * * %MLO_AN_INBAND: * place the link in an inband negotiation mode (such as 802.3z @@ -150,6 +159,8 @@ int mac_link_state(struct net_device *ndev, * responsible for reading the configuration word and configuring * itself accordingly. * + * Valid state members: interface, an_enabled, pause, advertising. + * * Implementations are expected to update the MAC to reflect the * requested settings - i.o.w., if nothing has changed between two * calls, no action is expected. If only flow control settings have diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h index 178a3933a71b..50ced8aba9db 100644 --- a/include/linux/skmsg.h +++ b/include/linux/skmsg.h @@ -351,6 +351,8 @@ static inline void sk_psock_update_proto(struct sock *sk, static inline void sk_psock_restore_proto(struct sock *sk, struct sk_psock *psock) { + sk->sk_write_space = psock->saved_write_space; + if (psock->sk_proto) { sk->sk_prot = psock->sk_proto; psock->sk_proto = NULL; diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index b769ecfcc3bd..aadd310769d0 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -63,6 +63,9 @@ extern int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int, void __user *, size_t *, loff_t *); extern int proc_do_large_bitmap(struct ctl_table *, int, void __user *, size_t *, loff_t *); +extern int proc_do_static_key(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos); /* * Register a set of sysctl names by calling register_sysctl_table diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 711361af9ce0..9a478a0cd3a2 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -484,4 +484,8 @@ static inline u16 tcp_mss_clamp(const struct tcp_sock *tp, u16 mss) return (user_mss && user_mss < mss) ? user_mss : mss; } + +int tcp_skb_shift(struct sk_buff *to, struct sk_buff *from, int pcount, + int shiftlen); + #endif /* _LINUX_TCP_H */ diff --git a/include/net/addrconf.h b/include/net/addrconf.h index 2f67ae854ff0..becdad576859 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h @@ -310,6 +310,22 @@ static inline struct inet6_dev *__in6_dev_get(const struct net_device *dev) } /** + * __in6_dev_stats_get - get inet6_dev pointer for stats + * @dev: network device + * @skb: skb for original incoming interface if neeeded + * + * Caller must hold rcu_read_lock or RTNL, because this function + * does not take a reference on the inet6_dev. + */ +static inline struct inet6_dev *__in6_dev_stats_get(const struct net_device *dev, + const struct sk_buff *skb) +{ + if (netif_is_l3_master(dev)) + dev = dev_get_by_index_rcu(dev_net(dev), inet6_iif(skb)); + return __in6_dev_get(dev); +} + +/** * __in6_dev_get_safely - get inet6_dev pointer from netdevice * @dev: network device * diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 87dae868707e..948139690a58 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -3839,7 +3839,8 @@ struct cfg80211_ops { * on wiphy_new(), but can be changed by the driver if it has a good * reason to override the default * @WIPHY_FLAG_4ADDR_AP: supports 4addr mode even on AP (with a single station - * on a VLAN interface) + * on a VLAN interface). This flag also serves an extra purpose of + * supporting 4ADDR AP mode on devices which do not support AP/VLAN iftype. * @WIPHY_FLAG_4ADDR_STATION: supports 4addr mode even as a station * @WIPHY_FLAG_CONTROL_PORT_PROTOCOL: This device supports setting the * control port protocol ethertype. The device also honours the diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h index 7c5a8d9a8d2a..dfabc0503446 100644 --- a/include/net/flow_dissector.h +++ b/include/net/flow_dissector.h @@ -46,6 +46,7 @@ struct flow_dissector_key_tags { struct flow_dissector_key_vlan { u16 vlan_id:12, + vlan_dei:1, vlan_priority:3; __be16 vlan_tpid; }; diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index 7698460a3dd1..623cfbb7b8dc 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -117,6 +117,7 @@ struct netns_ipv4 { #endif int sysctl_tcp_mtu_probing; int sysctl_tcp_base_mss; + int sysctl_tcp_min_snd_mss; int sysctl_tcp_probe_threshold; u32 sysctl_tcp_probe_interval; diff --git a/include/net/sock.h b/include/net/sock.h index e9d769c04637..6cbc16136357 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1463,12 +1463,14 @@ static inline void sk_mem_uncharge(struct sock *sk, int size) __sk_mem_reclaim(sk, 1 << 20); } +DECLARE_STATIC_KEY_FALSE(tcp_tx_skb_cache_key); static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb) { sock_set_flag(sk, SOCK_QUEUE_SHRUNK); sk->sk_wmem_queued -= skb->truesize; sk_mem_uncharge(sk, skb->truesize); - if (!sk->sk_tx_skb_cache && !skb_cloned(skb)) { + if (static_branch_unlikely(&tcp_tx_skb_cache_key) && + !sk->sk_tx_skb_cache && !skb_cloned(skb)) { skb_zcopy_clear(skb, true); sk->sk_tx_skb_cache = skb; return; @@ -2433,13 +2435,11 @@ static inline void skb_setup_tx_timestamp(struct sk_buff *skb, __u16 tsflags) * This routine must be called with interrupts disabled or with the socket * locked so that the sk_buff queue operation is ok. */ +DECLARE_STATIC_KEY_FALSE(tcp_rx_skb_cache_key); static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb) { __skb_unlink(skb, &sk->sk_receive_queue); - if ( -#ifdef CONFIG_RPS - !static_branch_unlikely(&rps_needed) && -#endif + if (static_branch_unlikely(&tcp_rx_skb_cache_key) && !sk->sk_rx_skb_cache) { sk->sk_rx_skb_cache = skb; skb_orphan(skb); @@ -2534,6 +2534,8 @@ extern int sysctl_optmem_max; extern __u32 sysctl_wmem_default; extern __u32 sysctl_rmem_default; +DECLARE_STATIC_KEY_FALSE(net_high_order_alloc_disable_key); + static inline int sk_get_wmem0(const struct sock *sk, const struct proto *proto) { /* Does this proto have per netns sysctl_wmem ? */ diff --git a/include/net/tcp.h b/include/net/tcp.h index ac2f53fbfa6b..582c0caa9811 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -51,6 +51,8 @@ void tcp_time_wait(struct sock *sk, int state, int timeo); #define MAX_TCP_HEADER (128 + MAX_HEADER) #define MAX_TCP_OPTION_SPACE 40 +#define TCP_MIN_SND_MSS 48 +#define TCP_MIN_GSO_SIZE (TCP_MIN_SND_MSS - MAX_TCP_OPTION_SPACE) /* * Never offer a window over 32767 without using window scaling. Some diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 63e0cf66f01a..a8b823c30b43 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -192,6 +192,8 @@ enum bpf_attach_type { BPF_LIRC_MODE2, BPF_FLOW_DISSECTOR, BPF_CGROUP_SYSCTL, + BPF_CGROUP_UDP4_RECVMSG, + BPF_CGROUP_UDP6_RECVMSG, __MAX_BPF_ATTACH_TYPE }; @@ -3376,8 +3378,8 @@ struct bpf_raw_tracepoint_args { /* DIRECT: Skip the FIB rules and go to FIB table associated with device * OUTPUT: Do lookup from egress perspective; default is ingress */ -#define BPF_FIB_LOOKUP_DIRECT BIT(0) -#define BPF_FIB_LOOKUP_OUTPUT BIT(1) +#define BPF_FIB_LOOKUP_DIRECT (1U << 0) +#define BPF_FIB_LOOKUP_OUTPUT (1U << 1) enum { BPF_FIB_LKUP_RET_SUCCESS, /* lookup successful */ diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h index 86dc24a96c90..fd42c1316d3d 100644 --- a/include/uapi/linux/snmp.h +++ b/include/uapi/linux/snmp.h @@ -283,6 +283,7 @@ enum LINUX_MIB_TCPACKCOMPRESSED, /* TCPAckCompressed */ LINUX_MIB_TCPZEROWINDOWDROP, /* TCPZeroWindowDrop */ LINUX_MIB_TCPRCVQDROP, /* TCPRcvQDrop */ + LINUX_MIB_TCPWQUEUETOOBIG, /* TCPWqueueTooBig */ __LINUX_MIB_MAX }; diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 7c473f208a10..080e2bb644cc 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -2097,7 +2097,6 @@ int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to, DEFINE_STATIC_KEY_FALSE(bpf_stats_enabled_key); EXPORT_SYMBOL(bpf_stats_enabled_key); -int sysctl_bpf_stats_enabled __read_mostly; /* All definitions of tracepoints related to BPF. */ #define CREATE_TRACE_POINTS diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c index 15dbc15c5b0c..cd8297b3bdb9 100644 --- a/kernel/bpf/devmap.c +++ b/kernel/bpf/devmap.c @@ -178,6 +178,7 @@ static void dev_map_free(struct bpf_map *map) if (!dev) continue; + free_percpu(dev->bulkq); dev_put(dev->dev); kfree(dev); } @@ -273,6 +274,7 @@ void __dev_map_flush(struct bpf_map *map) unsigned long *bitmap = this_cpu_ptr(dtab->flush_needed); u32 bit; + rcu_read_lock(); for_each_set_bit(bit, bitmap, map->max_entries) { struct bpf_dtab_netdev *dev = READ_ONCE(dtab->netdev_map[bit]); struct xdp_bulk_queue *bq; @@ -283,11 +285,12 @@ void __dev_map_flush(struct bpf_map *map) if (unlikely(!dev)) continue; - __clear_bit(bit, bitmap); - bq = this_cpu_ptr(dev->bulkq); bq_xmit_all(dev, bq, XDP_XMIT_FLUSH, true); + + __clear_bit(bit, bitmap); } + rcu_read_unlock(); } /* rcu_read_lock (from syscall and BPF contexts) ensures that if a delete and/or @@ -380,6 +383,7 @@ static void dev_map_flush_old(struct bpf_dtab_netdev *dev) int cpu; + rcu_read_lock(); for_each_online_cpu(cpu) { bitmap = per_cpu_ptr(dev->dtab->flush_needed, cpu); __clear_bit(dev->bit, bitmap); @@ -387,6 +391,7 @@ static void dev_map_flush_old(struct bpf_dtab_netdev *dev) bq = per_cpu_ptr(dev->bulkq, cpu); bq_xmit_all(dev, bq, XDP_XMIT_FLUSH, false); } + rcu_read_unlock(); } } diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c index e61630c2e50b..864e2a496376 100644 --- a/kernel/bpf/lpm_trie.c +++ b/kernel/bpf/lpm_trie.c @@ -716,9 +716,14 @@ find_leftmost: * have exact two children, so this function will never return NULL. */ for (node = search_root; node;) { - if (!(node->flags & LPM_TREE_NODE_FLAG_IM)) + if (node->flags & LPM_TREE_NODE_FLAG_IM) { + node = rcu_dereference(node->child[0]); + } else { next_node = node; - node = rcu_dereference(node->child[0]); + node = rcu_dereference(node->child[0]); + if (!node) + node = rcu_dereference(next_node->child[1]); + } } do_copy: next_key->prefixlen = next_node->prefixlen; diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index ef63d26622f2..42d17f730780 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -1573,6 +1573,8 @@ bpf_prog_load_check_attach_type(enum bpf_prog_type prog_type, case BPF_CGROUP_INET6_CONNECT: case BPF_CGROUP_UDP4_SENDMSG: case BPF_CGROUP_UDP6_SENDMSG: + case BPF_CGROUP_UDP4_RECVMSG: + case BPF_CGROUP_UDP6_RECVMSG: return 0; default: return -EINVAL; @@ -1867,6 +1869,8 @@ static int bpf_prog_attach(const union bpf_attr *attr) case BPF_CGROUP_INET6_CONNECT: case BPF_CGROUP_UDP4_SENDMSG: case BPF_CGROUP_UDP6_SENDMSG: + case BPF_CGROUP_UDP4_RECVMSG: + case BPF_CGROUP_UDP6_RECVMSG: ptype = BPF_PROG_TYPE_CGROUP_SOCK_ADDR; break; case BPF_CGROUP_SOCK_OPS: @@ -1952,6 +1956,8 @@ static int bpf_prog_detach(const union bpf_attr *attr) case BPF_CGROUP_INET6_CONNECT: case BPF_CGROUP_UDP4_SENDMSG: case BPF_CGROUP_UDP6_SENDMSG: + case BPF_CGROUP_UDP4_RECVMSG: + case BPF_CGROUP_UDP6_RECVMSG: ptype = BPF_PROG_TYPE_CGROUP_SOCK_ADDR; break; case BPF_CGROUP_SOCK_OPS: @@ -2003,6 +2009,8 @@ static int bpf_prog_query(const union bpf_attr *attr, case BPF_CGROUP_INET6_CONNECT: case BPF_CGROUP_UDP4_SENDMSG: case BPF_CGROUP_UDP6_SENDMSG: + case BPF_CGROUP_UDP4_RECVMSG: + case BPF_CGROUP_UDP6_RECVMSG: case BPF_CGROUP_SOCK_OPS: case BPF_CGROUP_DEVICE: case BPF_CGROUP_SYSCTL: diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index d15cc4fafa89..a5c369e60343 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -5353,9 +5353,12 @@ static int check_return_code(struct bpf_verifier_env *env) struct tnum range = tnum_range(0, 1); switch (env->prog->type) { + case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: + if (env->prog->expected_attach_type == BPF_CGROUP_UDP4_RECVMSG || + env->prog->expected_attach_type == BPF_CGROUP_UDP6_RECVMSG) + range = tnum_range(1, 1); case BPF_PROG_TYPE_CGROUP_SKB: case BPF_PROG_TYPE_CGROUP_SOCK: - case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: case BPF_PROG_TYPE_SOCK_OPS: case BPF_PROG_TYPE_CGROUP_DEVICE: case BPF_PROG_TYPE_CGROUP_SYSCTL: @@ -5372,16 +5375,17 @@ static int check_return_code(struct bpf_verifier_env *env) } if (!tnum_in(range, reg->var_off)) { + char tn_buf[48]; + verbose(env, "At program exit the register R0 "); if (!tnum_is_unknown(reg->var_off)) { - char tn_buf[48]; - tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, "has value %s", tn_buf); } else { verbose(env, "has unknown scalar value"); } - verbose(env, " should have been 0 or 1\n"); + tnum_strn(tn_buf, sizeof(tn_buf), range); + verbose(env, " should have been in %s\n", tn_buf); return -EINVAL; } return 0; diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 7d1008be6173..1beca96fb625 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -230,11 +230,6 @@ static int proc_dostring_coredump(struct ctl_table *table, int write, #endif static int proc_dopipe_max_size(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); -#ifdef CONFIG_BPF_SYSCALL -static int proc_dointvec_minmax_bpf_stats(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, - loff_t *ppos); -#endif #ifdef CONFIG_MAGIC_SYSRQ /* Note: sysrq code uses its own private copy */ @@ -1253,12 +1248,10 @@ static struct ctl_table kern_table[] = { }, { .procname = "bpf_stats_enabled", - .data = &sysctl_bpf_stats_enabled, - .maxlen = sizeof(sysctl_bpf_stats_enabled), + .data = &bpf_stats_enabled_key.key, + .maxlen = sizeof(bpf_stats_enabled_key), .mode = 0644, - .proc_handler = proc_dointvec_minmax_bpf_stats, - .extra1 = &zero, - .extra2 = &one, + .proc_handler = proc_do_static_key, }, #endif #if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) @@ -3374,26 +3367,35 @@ int proc_do_large_bitmap(struct ctl_table *table, int write, #endif /* CONFIG_PROC_SYSCTL */ -#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_SYSCTL) -static int proc_dointvec_minmax_bpf_stats(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, - loff_t *ppos) +#if defined(CONFIG_SYSCTL) +int proc_do_static_key(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos) { - int ret, bpf_stats = *(int *)table->data; - struct ctl_table tmp = *table; + struct static_key *key = (struct static_key *)table->data; + static DEFINE_MUTEX(static_key_mutex); + int val, ret; + struct ctl_table tmp = { + .data = &val, + .maxlen = sizeof(val), + .mode = table->mode, + .extra1 = &zero, + .extra2 = &one, + }; if (write && !capable(CAP_SYS_ADMIN)) return -EPERM; - tmp.data = &bpf_stats; + mutex_lock(&static_key_mutex); + val = static_key_enabled(key); ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); if (write && !ret) { - *(int *)table->data = bpf_stats; - if (bpf_stats) - static_branch_enable(&bpf_stats_enabled_key); + if (val) + static_key_enable(key); else - static_branch_disable(&bpf_stats_enabled_key); + static_key_disable(key); } + mutex_unlock(&static_key_mutex); return ret; } #endif diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index f92d6ad5e080..1c9a4745e596 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -410,8 +410,6 @@ static const struct bpf_func_proto bpf_perf_event_read_value_proto = { .arg4_type = ARG_CONST_SIZE, }; -static DEFINE_PER_CPU(struct perf_sample_data, bpf_trace_sd); - static __always_inline u64 __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map, u64 flags, struct perf_sample_data *sd) @@ -442,24 +440,50 @@ __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map, return perf_event_output(event, sd, regs); } +/* + * Support executing tracepoints in normal, irq, and nmi context that each call + * bpf_perf_event_output + */ +struct bpf_trace_sample_data { + struct perf_sample_data sds[3]; +}; + +static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds); +static DEFINE_PER_CPU(int, bpf_trace_nest_level); BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map, u64, flags, void *, data, u64, size) { - struct perf_sample_data *sd = this_cpu_ptr(&bpf_trace_sd); + struct bpf_trace_sample_data *sds = this_cpu_ptr(&bpf_trace_sds); + int nest_level = this_cpu_inc_return(bpf_trace_nest_level); struct perf_raw_record raw = { .frag = { .size = size, .data = data, }, }; + struct perf_sample_data *sd; + int err; - if (unlikely(flags & ~(BPF_F_INDEX_MASK))) - return -EINVAL; + if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) { + err = -EBUSY; + goto out; + } + + sd = &sds->sds[nest_level - 1]; + + if (unlikely(flags & ~(BPF_F_INDEX_MASK))) { + err = -EINVAL; + goto out; + } perf_sample_data_init(sd, 0, 0); sd->raw = &raw; - return __bpf_perf_event_output(regs, map, flags, sd); + err = __bpf_perf_event_output(regs, map, flags, sd); + +out: + this_cpu_dec(bpf_trace_nest_level); + return err; } static const struct bpf_func_proto bpf_perf_event_output_proto = { @@ -822,16 +846,48 @@ pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) /* * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp * to avoid potential recursive reuse issue when/if tracepoints are added - * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack + * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack. + * + * Since raw tracepoints run despite bpf_prog_active, support concurrent usage + * in normal, irq, and nmi context. */ -static DEFINE_PER_CPU(struct pt_regs, bpf_raw_tp_regs); +struct bpf_raw_tp_regs { + struct pt_regs regs[3]; +}; +static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs); +static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level); +static struct pt_regs *get_bpf_raw_tp_regs(void) +{ + struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs); + int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level); + + if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) { + this_cpu_dec(bpf_raw_tp_nest_level); + return ERR_PTR(-EBUSY); + } + + return &tp_regs->regs[nest_level - 1]; +} + +static void put_bpf_raw_tp_regs(void) +{ + this_cpu_dec(bpf_raw_tp_nest_level); +} + BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args, struct bpf_map *, map, u64, flags, void *, data, u64, size) { - struct pt_regs *regs = this_cpu_ptr(&bpf_raw_tp_regs); + struct pt_regs *regs = get_bpf_raw_tp_regs(); + int ret; + + if (IS_ERR(regs)) + return PTR_ERR(regs); perf_fetch_caller_regs(regs); - return ____bpf_perf_event_output(regs, map, flags, data, size); + ret = ____bpf_perf_event_output(regs, map, flags, data, size); + + put_bpf_raw_tp_regs(); + return ret; } static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = { @@ -848,12 +904,18 @@ static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = { BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args, struct bpf_map *, map, u64, flags) { - struct pt_regs *regs = this_cpu_ptr(&bpf_raw_tp_regs); + struct pt_regs *regs = get_bpf_raw_tp_regs(); + int ret; + + if (IS_ERR(regs)) + return PTR_ERR(regs); perf_fetch_caller_regs(regs); /* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */ - return bpf_get_stackid((unsigned long) regs, (unsigned long) map, - flags, 0, 0); + ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map, + flags, 0, 0); + put_bpf_raw_tp_regs(); + return ret; } static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = { @@ -868,11 +930,17 @@ static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = { BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args, void *, buf, u32, size, u64, flags) { - struct pt_regs *regs = this_cpu_ptr(&bpf_raw_tp_regs); + struct pt_regs *regs = get_bpf_raw_tp_regs(); + int ret; + + if (IS_ERR(regs)) + return PTR_ERR(regs); perf_fetch_caller_regs(regs); - return bpf_get_stack((unsigned long) regs, (unsigned long) buf, - (unsigned long) size, flags, 0); + ret = bpf_get_stack((unsigned long) regs, (unsigned long) buf, + (unsigned long) size, flags, 0); + put_bpf_raw_tp_regs(); + return ret; } static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = { diff --git a/net/ax25/ax25_route.c b/net/ax25/ax25_route.c index 09fdd0aac4b9..b40e0bce67ea 100644 --- a/net/ax25/ax25_route.c +++ b/net/ax25/ax25_route.c @@ -426,9 +426,11 @@ int ax25_rt_autobind(ax25_cb *ax25, ax25_address *addr) } if (ax25->sk != NULL) { + local_bh_disable(); bh_lock_sock(ax25->sk); sock_reset_flag(ax25->sk, SOCK_ZAPPED); bh_unlock_sock(ax25->sk); + local_bh_enable(); } put: diff --git a/net/can/af_can.c b/net/can/af_can.c index e8fd5dc1780a..80281ef2ccbd 100644 --- a/net/can/af_can.c +++ b/net/can/af_can.c @@ -99,6 +99,7 @@ EXPORT_SYMBOL(can_ioctl); static void can_sock_destruct(struct sock *sk) { skb_queue_purge(&sk->sk_receive_queue); + skb_queue_purge(&sk->sk_error_queue); } static const struct can_proto *can_get_proto(int protocol) @@ -952,6 +953,8 @@ static struct pernet_operations can_pernet_ops __read_mostly = { static __init int can_init(void) { + int err; + /* check for correct padding to be able to use the structs similarly */ BUILD_BUG_ON(offsetof(struct can_frame, can_dlc) != offsetof(struct canfd_frame, len) || @@ -965,15 +968,31 @@ static __init int can_init(void) if (!rcv_cache) return -ENOMEM; - register_pernet_subsys(&can_pernet_ops); + err = register_pernet_subsys(&can_pernet_ops); + if (err) + goto out_pernet; /* protocol register */ - sock_register(&can_family_ops); - register_netdevice_notifier(&can_netdev_notifier); + err = sock_register(&can_family_ops); + if (err) + goto out_sock; + err = register_netdevice_notifier(&can_netdev_notifier); + if (err) + goto out_notifier; + dev_add_pack(&can_packet); dev_add_pack(&canfd_packet); return 0; + +out_notifier: + sock_unregister(PF_CAN); +out_sock: + unregister_pernet_subsys(&can_pernet_ops); +out_pernet: + kmem_cache_destroy(rcv_cache); + + return err; } static __exit void can_exit(void) diff --git a/net/core/bpf_sk_storage.c b/net/core/bpf_sk_storage.c index cc9597a87770..d1c4e1f3be2c 100644 --- a/net/core/bpf_sk_storage.c +++ b/net/core/bpf_sk_storage.c @@ -633,7 +633,8 @@ static struct bpf_map *bpf_sk_storage_map_alloc(union bpf_attr *attr) return ERR_PTR(-ENOMEM); bpf_map_init_from_attr(&smap->map, attr); - smap->bucket_log = ilog2(roundup_pow_of_two(num_possible_cpus())); + /* Use at least 2 buckets, select_bucket() is undefined behavior with 1 bucket */ + smap->bucket_log = max_t(u32, 1, ilog2(roundup_pow_of_two(num_possible_cpus()))); nbuckets = 1U << smap->bucket_log; smap->buckets = kvcalloc(sizeof(*smap->buckets), nbuckets, GFP_USER | __GFP_NOWARN); diff --git a/net/core/dev.c b/net/core/dev.c index eb7fb6daa1ef..d6edd218babd 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4923,8 +4923,36 @@ skip_classify: } if (unlikely(skb_vlan_tag_present(skb))) { - if (skb_vlan_tag_get_id(skb)) +check_vlan_id: + if (skb_vlan_tag_get_id(skb)) { + /* Vlan id is non 0 and vlan_do_receive() above couldn't + * find vlan device. + */ skb->pkt_type = PACKET_OTHERHOST; + } else if (skb->protocol == cpu_to_be16(ETH_P_8021Q) || + skb->protocol == cpu_to_be16(ETH_P_8021AD)) { + /* Outer header is 802.1P with vlan 0, inner header is + * 802.1Q or 802.1AD and vlan_do_receive() above could + * not find vlan dev for vlan id 0. + */ + __vlan_hwaccel_clear_tag(skb); + skb = skb_vlan_untag(skb); + if (unlikely(!skb)) + goto out; + if (vlan_do_receive(&skb)) + /* After stripping off 802.1P header with vlan 0 + * vlan dev is found for inner header. + */ + goto another_round; + else if (unlikely(!skb)) + goto out; + else + /* We have stripped outer 802.1P vlan 0 header. + * But could not find vlan dev. + * check again for vlan id to set OTHERHOST. + */ + goto check_vlan_id; + } /* Note: we might in the future use prio bits * and set skb->priority like in vlan_do_receive() * For the time being, just ignore Priority Code Point diff --git a/net/core/ethtool.c b/net/core/ethtool.c index d08b1e19ce9c..4d1011b2e24f 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -3020,6 +3020,11 @@ ethtool_rx_flow_rule_create(const struct ethtool_rx_flow_spec_input *input) match->mask.vlan.vlan_id = ntohs(ext_m_spec->vlan_tci) & 0x0fff; + match->key.vlan.vlan_dei = + !!(ext_h_spec->vlan_tci & htons(0x1000)); + match->mask.vlan.vlan_dei = + !!(ext_m_spec->vlan_tci & htons(0x1000)); + match->key.vlan.vlan_priority = (ntohs(ext_h_spec->vlan_tci) & 0xe000) >> 13; match->mask.vlan.vlan_priority = diff --git a/net/core/filter.c b/net/core/filter.c index cd09bf5d21f4..f615e42cf4ef 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -5300,7 +5300,13 @@ __bpf_skc_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len, struct net *net; int sdif; - family = len == sizeof(tuple->ipv4) ? AF_INET : AF_INET6; + if (len == sizeof(tuple->ipv4)) + family = AF_INET; + else if (len == sizeof(tuple->ipv6)) + family = AF_INET6; + else + return NULL; + if (unlikely(family == AF_UNSPEC || flags || !((s32)netns_id < 0 || netns_id <= S32_MAX))) goto out; @@ -5333,8 +5339,14 @@ __bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len, struct sock *sk = __bpf_skc_lookup(skb, tuple, len, caller_net, ifindex, proto, netns_id, flags); - if (sk) + if (sk) { sk = sk_to_full_sk(sk); + if (!sk_fullsock(sk)) { + if (!sock_flag(sk, SOCK_RCU_FREE)) + sock_gen_put(sk); + return NULL; + } + } return sk; } @@ -5365,8 +5377,14 @@ bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len, struct sock *sk = bpf_skc_lookup(skb, tuple, len, proto, netns_id, flags); - if (sk) + if (sk) { sk = sk_to_full_sk(sk); + if (!sk_fullsock(sk)) { + if (!sock_flag(sk, SOCK_RCU_FREE)) + sock_gen_put(sk); + return NULL; + } + } return sk; } @@ -6726,6 +6744,7 @@ static bool sock_addr_is_valid_access(int off, int size, case BPF_CGROUP_INET4_BIND: case BPF_CGROUP_INET4_CONNECT: case BPF_CGROUP_UDP4_SENDMSG: + case BPF_CGROUP_UDP4_RECVMSG: break; default: return false; @@ -6736,6 +6755,7 @@ static bool sock_addr_is_valid_access(int off, int size, case BPF_CGROUP_INET6_BIND: case BPF_CGROUP_INET6_CONNECT: case BPF_CGROUP_UDP6_SENDMSG: + case BPF_CGROUP_UDP6_RECVMSG: break; default: return false; diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 0e2c07355463..9e7fc929bc50 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -3203,6 +3203,7 @@ static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos) } void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags) + __acquires(tbl->lock) __acquires(rcu_bh) { struct neigh_seq_state *state = seq->private; @@ -3213,6 +3214,7 @@ void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl rcu_read_lock_bh(); state->nht = rcu_dereference_bh(tbl->nht); + read_lock(&tbl->lock); return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN; } @@ -3246,8 +3248,13 @@ out: EXPORT_SYMBOL(neigh_seq_next); void neigh_seq_stop(struct seq_file *seq, void *v) + __releases(tbl->lock) __releases(rcu_bh) { + struct neigh_seq_state *state = seq->private; + struct neigh_table *tbl = state->tbl; + + read_unlock(&tbl->lock); rcu_read_unlock_bh(); } EXPORT_SYMBOL(neigh_seq_stop); diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 47c1aa9ee045..c8cd99c3603f 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -2337,6 +2337,7 @@ do_frag_list: kv.iov_base = skb->data + offset; kv.iov_len = slen; memset(&msg, 0, sizeof(msg)); + msg.msg_flags = MSG_DONTWAIT; ret = kernel_sendmsg_locked(sk, &msg, &kv, 1, slen); if (ret <= 0) diff --git a/net/core/sock.c b/net/core/sock.c index 2b3701958486..af09a23e4822 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1850,6 +1850,9 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) goto out; } RCU_INIT_POINTER(newsk->sk_reuseport_cb, NULL); +#ifdef CONFIG_BPF_SYSCALL + RCU_INIT_POINTER(newsk->sk_bpf_storage, NULL); +#endif newsk->sk_err = 0; newsk->sk_err_soft = 0; @@ -2320,6 +2323,7 @@ static void sk_leave_memory_pressure(struct sock *sk) /* On 32bit arches, an skb frag is limited to 2^15 */ #define SKB_FRAG_PAGE_ORDER get_order(32768) +DEFINE_STATIC_KEY_FALSE(net_high_order_alloc_disable_key); /** * skb_page_frag_refill - check that a page_frag contains enough room @@ -2344,7 +2348,8 @@ bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t gfp) } pfrag->offset = 0; - if (SKB_FRAG_PAGE_ORDER) { + if (SKB_FRAG_PAGE_ORDER && + !static_branch_unlikely(&net_high_order_alloc_disable_key)) { /* Avoid direct reclaim but allow kswapd to wake */ pfrag->page = alloc_pages((gfp & ~__GFP_DIRECT_RECLAIM) | __GFP_COMP | __GFP_NOWARN | diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index 1a2685694abd..f9204719aeee 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c @@ -562,6 +562,13 @@ static struct ctl_table net_core_table[] = { .extra1 = &zero, .extra2 = &two, }, + { + .procname = "high_order_alloc_disable", + .data = &net_high_order_alloc_disable_key.key, + .maxlen = sizeof(net_high_order_alloc_disable_key), + .mode = 0644, + .proc_handler = proc_do_static_key, + }, { } }; diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index b80410673915..bfa49a88d03a 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -964,7 +964,7 @@ static int fib_check_nh_v4_gw(struct net *net, struct fib_nh *nh, u32 table, { struct net_device *dev; struct fib_result res; - int err; + int err = 0; if (nh->fib_nh_flags & RTNH_F_ONLINK) { unsigned int addr_type; diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 8c9189a41b13..16f9159234a2 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -918,7 +918,7 @@ static int __ip_append_data(struct sock *sk, uarg = sock_zerocopy_realloc(sk, length, skb_zcopy(skb)); if (!uarg) return -ENOBUFS; - extra_uref = !skb; /* only extra ref if !MSG_MORE */ + extra_uref = !skb_zcopy(skb); /* only ref on new uarg */ if (rt->dst.dev->features & NETIF_F_SG && csummode == CHECKSUM_PARTIAL) { paged = true; diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index 4370f4246e86..073273b751f8 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c @@ -287,6 +287,7 @@ static const struct snmp_mib snmp4_net_list[] = { SNMP_MIB_ITEM("TCPAckCompressed", LINUX_MIB_TCPACKCOMPRESSED), SNMP_MIB_ITEM("TCPZeroWindowDrop", LINUX_MIB_TCPZEROWINDOWDROP), SNMP_MIB_ITEM("TCPRcvQDrop", LINUX_MIB_TCPRCVQDROP), + SNMP_MIB_ITEM("TCPWqueueTooBig", LINUX_MIB_TCPWQUEUETOOBIG), SNMP_MIB_SENTINEL }; diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index 875867b64d6a..b6f14af926fa 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -39,6 +39,8 @@ static int ip_local_port_range_min[] = { 1, 1 }; static int ip_local_port_range_max[] = { 65535, 65535 }; static int tcp_adv_win_scale_min = -31; static int tcp_adv_win_scale_max = 31; +static int tcp_min_snd_mss_min = TCP_MIN_SND_MSS; +static int tcp_min_snd_mss_max = 65535; static int ip_privileged_port_min; static int ip_privileged_port_max = 65535; static int ip_ttl_min = 1; @@ -559,6 +561,18 @@ static struct ctl_table ipv4_table[] = { .extra1 = &sysctl_fib_sync_mem_min, .extra2 = &sysctl_fib_sync_mem_max, }, + { + .procname = "tcp_rx_skb_cache", + .data = &tcp_rx_skb_cache_key.key, + .mode = 0644, + .proc_handler = proc_do_static_key, + }, + { + .procname = "tcp_tx_skb_cache", + .data = &tcp_tx_skb_cache_key.key, + .mode = 0644, + .proc_handler = proc_do_static_key, + }, { } }; @@ -758,6 +772,15 @@ static struct ctl_table ipv4_net_table[] = { .proc_handler = proc_dointvec, }, { + .procname = "tcp_min_snd_mss", + .data = &init_net.ipv4.sysctl_tcp_min_snd_mss, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &tcp_min_snd_mss_min, + .extra2 = &tcp_min_snd_mss_max, + }, + { .procname = "tcp_probe_threshold", .data = &init_net.ipv4.sysctl_tcp_probe_threshold, .maxlen = sizeof(int), diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index f12d500ec85c..7dc9ab84bb69 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -317,6 +317,11 @@ struct tcp_splice_state { unsigned long tcp_memory_pressure __read_mostly; EXPORT_SYMBOL_GPL(tcp_memory_pressure); +DEFINE_STATIC_KEY_FALSE(tcp_rx_skb_cache_key); +EXPORT_SYMBOL(tcp_rx_skb_cache_key); + +DEFINE_STATIC_KEY_FALSE(tcp_tx_skb_cache_key); + void tcp_enter_memory_pressure(struct sock *sk) { unsigned long val; @@ -3868,6 +3873,7 @@ void __init tcp_init(void) unsigned long limit; unsigned int i; + BUILD_BUG_ON(TCP_MIN_SND_MSS <= MAX_TCP_OPTION_SPACE); BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > FIELD_SIZEOF(struct sk_buff, cb)); diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 08a477e74cf3..d95ee40df6c2 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -1302,7 +1302,7 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *prev, TCP_SKB_CB(skb)->seq += shifted; tcp_skb_pcount_add(prev, pcount); - BUG_ON(tcp_skb_pcount(skb) < pcount); + WARN_ON_ONCE(tcp_skb_pcount(skb) < pcount); tcp_skb_pcount_add(skb, -pcount); /* When we're adding to gso_segs == 1, gso_size will be zero, @@ -1368,6 +1368,21 @@ static int skb_can_shift(const struct sk_buff *skb) return !skb_headlen(skb) && skb_is_nonlinear(skb); } +int tcp_skb_shift(struct sk_buff *to, struct sk_buff *from, + int pcount, int shiftlen) +{ + /* TCP min gso_size is 8 bytes (TCP_MIN_GSO_SIZE) + * Since TCP_SKB_CB(skb)->tcp_gso_segs is 16 bits, we need + * to make sure not storing more than 65535 * 8 bytes per skb, + * even if current MSS is bigger. + */ + if (unlikely(to->len + shiftlen >= 65535 * TCP_MIN_GSO_SIZE)) + return 0; + if (unlikely(tcp_skb_pcount(to) + pcount > 65535)) + return 0; + return skb_shift(to, from, shiftlen); +} + /* Try collapsing SACK blocks spanning across multiple skbs to a single * skb. */ @@ -1473,7 +1488,7 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb, if (!after(TCP_SKB_CB(skb)->seq + len, tp->snd_una)) goto fallback; - if (!skb_shift(prev, skb, len)) + if (!tcp_skb_shift(prev, skb, pcount, len)) goto fallback; if (!tcp_shifted_skb(sk, prev, skb, state, pcount, len, mss, dup_sack)) goto out; @@ -1491,11 +1506,10 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb, goto out; len = skb->len; - if (skb_shift(prev, skb, len)) { - pcount += tcp_skb_pcount(skb); - tcp_shifted_skb(sk, prev, skb, state, tcp_skb_pcount(skb), + pcount = tcp_skb_pcount(skb); + if (tcp_skb_shift(prev, skb, pcount, len)) + tcp_shifted_skb(sk, prev, skb, state, pcount, len, mss, 0); - } out: return prev; @@ -2648,7 +2662,7 @@ static void tcp_process_loss(struct sock *sk, int flag, int num_dupack, struct tcp_sock *tp = tcp_sk(sk); bool recovered = !before(tp->snd_una, tp->high_seq); - if ((flag & FLAG_SND_UNA_ADVANCED) && + if ((flag & FLAG_SND_UNA_ADVANCED || tp->fastopen_rsk) && tcp_try_undo_loss(sk, false)) return; diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index bc86f9735f45..cfa81190a1b1 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -2628,6 +2628,7 @@ static int __net_init tcp_sk_init(struct net *net) net->ipv4.sysctl_tcp_ecn_fallback = 1; net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS; + net->ipv4.sysctl_tcp_min_snd_mss = TCP_MIN_SND_MSS; net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD; net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL; diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index f429e856e263..00c01a01b547 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -1296,6 +1296,11 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue, if (nsize < 0) nsize = 0; + if (unlikely((sk->sk_wmem_queued >> 1) > sk->sk_sndbuf)) { + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPWQUEUETOOBIG); + return -ENOMEM; + } + if (skb_unclone(skb, gfp)) return -ENOMEM; @@ -1454,8 +1459,7 @@ static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu) mss_now -= icsk->icsk_ext_hdr_len; /* Then reserve room for full set of TCP options and 8 bytes of data */ - if (mss_now < 48) - mss_now = 48; + mss_now = max(mss_now, sock_net(sk)->ipv4.sysctl_tcp_min_snd_mss); return mss_now; } @@ -2747,7 +2751,7 @@ static bool tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb) if (next_skb_size <= skb_availroom(skb)) skb_copy_bits(next_skb, 0, skb_put(skb, next_skb_size), next_skb_size); - else if (!skb_shift(skb, next_skb, next_skb_size)) + else if (!tcp_skb_shift(skb, next_skb, 1, next_skb_size)) return false; } tcp_highest_sack_replace(sk, next_skb, skb); diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 5bad937ce779..c801cd37cc2a 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -155,6 +155,7 @@ static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk) mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1; mss = min(net->ipv4.sysctl_tcp_base_mss, mss); mss = max(mss, 68 - tcp_sk(sk)->tcp_header_len); + mss = max(mss, net->ipv4.sysctl_tcp_min_snd_mss); icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss); } tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 7c6228fbf5dd..eed59c847722 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -498,7 +498,11 @@ static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb, struct sock *udp4_lib_lookup_skb(struct sk_buff *skb, __be16 sport, __be16 dport) { - return __udp4_lib_lookup_skb(skb, sport, dport, &udp_table); + const struct iphdr *iph = ip_hdr(skb); + + return __udp4_lib_lookup(dev_net(skb->dev), iph->saddr, sport, + iph->daddr, dport, inet_iif(skb), + inet_sdif(skb), &udp_table, NULL); } EXPORT_SYMBOL_GPL(udp4_lib_lookup_skb); @@ -1773,6 +1777,10 @@ try_again: sin->sin_addr.s_addr = ip_hdr(skb)->saddr; memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); *addr_len = sizeof(*sin); + + if (cgroup_bpf_enabled) + BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, + (struct sockaddr *)sin); } if (udp_sk(sk)->gro_enabled) diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index bafdd04a768d..375b4b4f9bf5 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -393,23 +393,28 @@ relookup_failed: return ERR_PTR(err); } -static int icmp6_iif(const struct sk_buff *skb) +static struct net_device *icmp6_dev(const struct sk_buff *skb) { - int iif = skb->dev->ifindex; + struct net_device *dev = skb->dev; /* for local traffic to local address, skb dev is the loopback * device. Check if there is a dst attached to the skb and if so * get the real device index. Same is needed for replies to a link * local address on a device enslaved to an L3 master device */ - if (unlikely(iif == LOOPBACK_IFINDEX || netif_is_l3_master(skb->dev))) { + if (unlikely(dev->ifindex == LOOPBACK_IFINDEX || netif_is_l3_master(skb->dev))) { const struct rt6_info *rt6 = skb_rt6_info(skb); if (rt6) - iif = rt6->rt6i_idev->dev->ifindex; + dev = rt6->rt6i_idev->dev; } - return iif; + return dev; +} + +static int icmp6_iif(const struct sk_buff *skb) +{ + return icmp6_dev(skb)->ifindex; } /* @@ -810,7 +815,7 @@ out: static int icmpv6_rcv(struct sk_buff *skb) { struct net *net = dev_net(skb->dev); - struct net_device *dev = skb->dev; + struct net_device *dev = icmp6_dev(skb); struct inet6_dev *idev = __in6_dev_get(dev); const struct in6_addr *saddr, *daddr; struct icmp6hdr *hdr; diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c index 2f3eb7dc45da..545e339b8c4f 100644 --- a/net/ipv6/ip6_flowlabel.c +++ b/net/ipv6/ip6_flowlabel.c @@ -250,9 +250,9 @@ struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk, __be32 label) rcu_read_lock_bh(); for_each_sk_fl_rcu(np, sfl) { struct ip6_flowlabel *fl = sfl->fl; - if (fl->label == label) { + + if (fl->label == label && atomic_inc_not_zero(&fl->users)) { fl->lastuse = jiffies; - atomic_inc(&fl->users); rcu_read_unlock_bh(); return fl; } @@ -618,7 +618,8 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen) goto done; } fl1 = sfl->fl; - atomic_inc(&fl1->users); + if (!atomic_inc_not_zero(&fl1->users)) + fl1 = NULL; break; } } diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 934c88f128ab..834475717110 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -1340,7 +1340,7 @@ emsgsize: uarg = sock_zerocopy_realloc(sk, length, skb_zcopy(skb)); if (!uarg) return -ENOBUFS; - extra_uref = !skb; /* only extra ref if !MSG_MORE */ + extra_uref = !skb_zcopy(skb); /* only ref on new uarg */ if (rt->dst.dev->features & NETIF_F_SG && csummode == CHECKSUM_PARTIAL) { paged = true; diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index 22369694b2fb..b2b2c0c38b87 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c @@ -298,7 +298,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *skb, skb_network_header_len(skb)); rcu_read_lock(); - __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS); + __IP6_INC_STATS(net, __in6_dev_stats_get(dev, skb), IPSTATS_MIB_REASMOKS); rcu_read_unlock(); fq->q.rb_fragments = RB_ROOT; fq->q.fragments_tail = NULL; @@ -312,7 +312,7 @@ out_oom: net_dbg_ratelimited("ip6_frag_reasm: no memory for reassembly\n"); out_fail: rcu_read_lock(); - __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS); + __IP6_INC_STATS(net, __in6_dev_stats_get(dev, skb), IPSTATS_MIB_REASMFAILS); rcu_read_unlock(); inet_frag_kill(&fq->q); return -1; diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index b3418a7c5c74..70b01bd95022 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -239,7 +239,7 @@ struct sock *udp6_lib_lookup_skb(struct sk_buff *skb, return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport, &iph->daddr, dport, inet6_iif(skb), - inet6_sdif(skb), &udp_table, skb); + inet6_sdif(skb), &udp_table, NULL); } EXPORT_SYMBOL_GPL(udp6_lib_lookup_skb); @@ -365,6 +365,10 @@ try_again: inet6_iif(skb)); } *addr_len = sizeof(*sin6); + + if (cgroup_bpf_enabled) + BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, + (struct sockaddr *)sin6); } if (udp_sk(sk)->gro_enabled) @@ -511,7 +515,7 @@ int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt, struct net *net = dev_net(skb->dev); sk = __udp6_lib_lookup(net, daddr, uh->dest, saddr, uh->source, - inet6_iif(skb), inet6_sdif(skb), udptable, skb); + inet6_iif(skb), inet6_sdif(skb), udptable, NULL); if (!sk) { /* No socket for error: try tunnels before discarding */ sk = ERR_PTR(-ENOENT); diff --git a/net/lapb/lapb_iface.c b/net/lapb/lapb_iface.c index 03f0cd872dce..5d2d1f746b91 100644 --- a/net/lapb/lapb_iface.c +++ b/net/lapb/lapb_iface.c @@ -177,6 +177,7 @@ int lapb_unregister(struct net_device *dev) lapb = __lapb_devtostruct(dev); if (!lapb) goto out; + lapb_put(lapb); lapb_stop_t1timer(lapb); lapb_stop_t2timer(lapb); diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 073a8235ae1b..a86fcae279a6 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -1435,7 +1435,7 @@ ieee80211_get_sband(struct ieee80211_sub_if_data *sdata) rcu_read_lock(); chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); - if (WARN_ON(!chanctx_conf)) { + if (WARN_ON_ONCE(!chanctx_conf)) { rcu_read_unlock(); return NULL; } @@ -2038,6 +2038,13 @@ void __ieee80211_flush_queues(struct ieee80211_local *local, static inline bool ieee80211_can_run_worker(struct ieee80211_local *local) { /* + * It's unsafe to try to do any work during reconfigure flow. + * When the flow ends the work will be requeued. + */ + if (local->in_reconfig) + return false; + + /* * If quiescing is set, we are racing with __ieee80211_suspend. * __ieee80211_suspend flushes the workers after setting quiescing, * and we check quiescing / suspended before enqueing new workers. @@ -2225,6 +2232,9 @@ void ieee80211_tdls_cancel_channel_switch(struct wiphy *wiphy, const u8 *addr); void ieee80211_teardown_tdls_peers(struct ieee80211_sub_if_data *sdata); void ieee80211_tdls_chsw_work(struct work_struct *wk); +void ieee80211_tdls_handle_disconnect(struct ieee80211_sub_if_data *sdata, + const u8 *peer, u16 reason); +const char *ieee80211_get_reason_code_string(u16 reason_code); extern const struct ethtool_ops ieee80211_ethtool_ops; diff --git a/net/mac80211/key.c b/net/mac80211/key.c index 20bf9db7a388..89f09a09efdb 100644 --- a/net/mac80211/key.c +++ b/net/mac80211/key.c @@ -268,11 +268,9 @@ int ieee80211_set_tx_key(struct ieee80211_key *key) { struct sta_info *sta = key->sta; struct ieee80211_local *local = key->local; - struct ieee80211_key *old; assert_key_lock(local); - old = key_mtx_dereference(local, sta->ptk[sta->ptk_idx]); sta->ptk_idx = key->conf.keyidx; ieee80211_check_fast_xmit(sta); diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index 766e5e5bab8a..fe44f0d98de0 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -929,6 +929,7 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata) /* flush STAs and mpaths on this iface */ sta_info_flush(sdata); + ieee80211_free_keys(sdata, true); mesh_path_flush_by_iface(sdata); /* stop the beacon */ @@ -1220,7 +1221,8 @@ int ieee80211_mesh_finish_csa(struct ieee80211_sub_if_data *sdata) ifmsh->chsw_ttl = 0; /* Remove the CSA and MCSP elements from the beacon */ - tmp_csa_settings = rcu_dereference(ifmsh->csa); + tmp_csa_settings = rcu_dereference_protected(ifmsh->csa, + lockdep_is_held(&sdata->wdev.mtx)); RCU_INIT_POINTER(ifmsh->csa, NULL); if (tmp_csa_settings) kfree_rcu(tmp_csa_settings, rcu_head); @@ -1242,6 +1244,8 @@ int ieee80211_mesh_csa_beacon(struct ieee80211_sub_if_data *sdata, struct mesh_csa_settings *tmp_csa_settings; int ret = 0; + lockdep_assert_held(&sdata->wdev.mtx); + tmp_csa_settings = kmalloc(sizeof(*tmp_csa_settings), GFP_ATOMIC); if (!tmp_csa_settings) diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index b7a9fe3d5fcb..383b0df100e4 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -2963,7 +2963,7 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata, #define case_WLAN(type) \ case WLAN_REASON_##type: return #type -static const char *ieee80211_get_reason_code_string(u16 reason_code) +const char *ieee80211_get_reason_code_string(u16 reason_code) { switch (reason_code) { case_WLAN(UNSPECIFIED); @@ -3028,6 +3028,11 @@ static void ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata, if (len < 24 + 2) return; + if (!ether_addr_equal(mgmt->bssid, mgmt->sa)) { + ieee80211_tdls_handle_disconnect(sdata, mgmt->sa, reason_code); + return; + } + if (ifmgd->associated && ether_addr_equal(mgmt->bssid, ifmgd->associated->bssid)) { const u8 *bssid = ifmgd->associated->bssid; @@ -3077,6 +3082,11 @@ static void ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata, reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code); + if (!ether_addr_equal(mgmt->bssid, mgmt->sa)) { + ieee80211_tdls_handle_disconnect(sdata, mgmt->sa, reason_code); + return; + } + sdata_info(sdata, "disassociated from %pM (Reason: %u=%s)\n", mgmt->sa, reason_code, ieee80211_get_reason_code_string(reason_code)); diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 25577ede2986..fd3740000e87 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -3831,6 +3831,8 @@ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx) case NL80211_IFTYPE_STATION: if (!bssid && !sdata->u.mgd.use_4addr) return false; + if (ieee80211_is_robust_mgmt_frame(skb) && !rx->sta) + return false; if (multicast) return true; return ether_addr_equal(sdata->vif.addr, hdr->addr1); diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c index ca97e1598c28..fca1f5477396 100644 --- a/net/mac80211/tdls.c +++ b/net/mac80211/tdls.c @@ -1993,3 +1993,26 @@ void ieee80211_tdls_chsw_work(struct work_struct *wk) } rtnl_unlock(); } + +void ieee80211_tdls_handle_disconnect(struct ieee80211_sub_if_data *sdata, + const u8 *peer, u16 reason) +{ + struct ieee80211_sta *sta; + + rcu_read_lock(); + sta = ieee80211_find_sta(&sdata->vif, peer); + if (!sta || !sta->tdls) { + rcu_read_unlock(); + return; + } + rcu_read_unlock(); + + tdls_dbg(sdata, "disconnected from TDLS peer %pM (Reason: %u=%s)\n", + peer, reason, + ieee80211_get_reason_code_string(reason)); + + ieee80211_tdls_oper_request(&sdata->vif, peer, + NL80211_TDLS_TEARDOWN, + WLAN_REASON_TDLS_TEARDOWN_UNREACHABLE, + GFP_ATOMIC); +} diff --git a/net/mac80211/util.c b/net/mac80211/util.c index cba4633cd6cf..e2edc2a3cc8b 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -2480,6 +2480,10 @@ int ieee80211_reconfig(struct ieee80211_local *local) mutex_lock(&local->mtx); ieee80211_start_next_roc(local); mutex_unlock(&local->mtx); + + /* Requeue all works */ + list_for_each_entry(sdata, &local->interfaces, list) + ieee80211_queue_work(&local->hw, &sdata->work); } ieee80211_wake_queues_by_reason(hw, IEEE80211_MAX_QUEUE_MAP, @@ -3795,7 +3799,9 @@ int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata, } /* Always allow software iftypes */ - if (local->hw.wiphy->software_iftypes & BIT(iftype)) { + if (local->hw.wiphy->software_iftypes & BIT(iftype) || + (iftype == NL80211_IFTYPE_AP_VLAN && + local->hw.wiphy->flags & WIPHY_FLAG_4ADDR_AP)) { if (radar_detect) return -EINVAL; return 0; diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c index 58d0b258b684..5dd48f0a4b1b 100644 --- a/net/mac80211/wpa.c +++ b/net/mac80211/wpa.c @@ -1175,7 +1175,7 @@ ieee80211_crypto_aes_gmac_decrypt(struct ieee80211_rx_data *rx) struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); struct ieee80211_key *key = rx->key; struct ieee80211_mmie_16 *mmie; - u8 aad[GMAC_AAD_LEN], mic[GMAC_MIC_LEN], ipn[6], nonce[GMAC_NONCE_LEN]; + u8 aad[GMAC_AAD_LEN], *mic, ipn[6], nonce[GMAC_NONCE_LEN]; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; if (!ieee80211_is_mgmt(hdr->frame_control)) @@ -1206,13 +1206,18 @@ ieee80211_crypto_aes_gmac_decrypt(struct ieee80211_rx_data *rx) memcpy(nonce, hdr->addr2, ETH_ALEN); memcpy(nonce + ETH_ALEN, ipn, 6); + mic = kmalloc(GMAC_MIC_LEN, GFP_ATOMIC); + if (!mic) + return RX_DROP_UNUSABLE; if (ieee80211_aes_gmac(key->u.aes_gmac.tfm, aad, nonce, skb->data + 24, skb->len - 24, mic) < 0 || crypto_memneq(mic, mmie->mic, sizeof(mmie->mic))) { key->u.aes_gmac.icverrors++; + kfree(mic); return RX_DROP_UNUSABLE; } + kfree(mic); } memcpy(key->u.aes_gmac.rx_pn, ipn, 6); diff --git a/net/mpls/Kconfig b/net/mpls/Kconfig index d9391beea980..d1ad69b7942a 100644 --- a/net/mpls/Kconfig +++ b/net/mpls/Kconfig @@ -26,6 +26,7 @@ config NET_MPLS_GSO config MPLS_ROUTING tristate "MPLS: routing support" depends on NET_IP_TUNNEL || NET_IP_TUNNEL=n + depends on PROC_SYSCTL ---help--- Add support for forwarding of mpls packets. diff --git a/net/mpls/mpls_iptunnel.c b/net/mpls/mpls_iptunnel.c index 500596130760..d25e91d7bdc1 100644 --- a/net/mpls/mpls_iptunnel.c +++ b/net/mpls/mpls_iptunnel.c @@ -23,7 +23,7 @@ #include "internal.h" static const struct nla_policy mpls_iptunnel_policy[MPLS_IPTUNNEL_MAX + 1] = { - [MPLS_IPTUNNEL_DST] = { .type = NLA_U32 }, + [MPLS_IPTUNNEL_DST] = { .len = sizeof(u32) }, [MPLS_IPTUNNEL_TTL] = { .type = NLA_U8 }, }; diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c index 1180b3e58a0a..ea64c90b14e8 100644 --- a/net/nfc/netlink.c +++ b/net/nfc/netlink.c @@ -911,7 +911,8 @@ static int nfc_genl_deactivate_target(struct sk_buff *skb, u32 device_idx, target_idx; int rc; - if (!info->attrs[NFC_ATTR_DEVICE_INDEX]) + if (!info->attrs[NFC_ATTR_DEVICE_INDEX] || + !info->attrs[NFC_ATTR_TARGET_INDEX]) return -EINVAL; device_idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]); diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c index 758bb1da31df..d2437b5b2f6a 100644 --- a/net/openvswitch/vport-internal_dev.c +++ b/net/openvswitch/vport-internal_dev.c @@ -157,7 +157,9 @@ static struct vport *internal_dev_create(const struct vport_parms *parms) { struct vport *vport; struct internal_dev *internal_dev; + struct net_device *dev; int err; + bool free_vport = true; vport = ovs_vport_alloc(0, &ovs_internal_vport_ops, parms); if (IS_ERR(vport)) { @@ -165,8 +167,9 @@ static struct vport *internal_dev_create(const struct vport_parms *parms) goto error; } - vport->dev = alloc_netdev(sizeof(struct internal_dev), - parms->name, NET_NAME_USER, do_setup); + dev = alloc_netdev(sizeof(struct internal_dev), + parms->name, NET_NAME_USER, do_setup); + vport->dev = dev; if (!vport->dev) { err = -ENOMEM; goto error_free_vport; @@ -187,8 +190,10 @@ static struct vport *internal_dev_create(const struct vport_parms *parms) rtnl_lock(); err = register_netdevice(vport->dev); - if (err) + if (err) { + free_vport = false; goto error_unlock; + } dev_set_promiscuity(vport->dev, 1); rtnl_unlock(); @@ -198,11 +203,12 @@ static struct vport *internal_dev_create(const struct vport_parms *parms) error_unlock: rtnl_unlock(); - free_percpu(vport->dev->tstats); + free_percpu(dev->tstats); error_free_netdev: - free_netdev(vport->dev); + free_netdev(dev); error_free_vport: - ovs_vport_free(vport); + if (free_vport) + ovs_vport_free(vport); error: return ERR_PTR(err); } diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index c388372df0e2..eedd5786c084 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -320,10 +320,13 @@ static int fl_init(struct tcf_proto *tp) return rhashtable_init(&head->ht, &mask_ht_params); } -static void fl_mask_free(struct fl_flow_mask *mask) +static void fl_mask_free(struct fl_flow_mask *mask, bool mask_init_done) { - WARN_ON(!list_empty(&mask->filters)); - rhashtable_destroy(&mask->ht); + /* temporary masks don't have their filters list and ht initialized */ + if (mask_init_done) { + WARN_ON(!list_empty(&mask->filters)); + rhashtable_destroy(&mask->ht); + } kfree(mask); } @@ -332,7 +335,15 @@ static void fl_mask_free_work(struct work_struct *work) struct fl_flow_mask *mask = container_of(to_rcu_work(work), struct fl_flow_mask, rwork); - fl_mask_free(mask); + fl_mask_free(mask, true); +} + +static void fl_uninit_mask_free_work(struct work_struct *work) +{ + struct fl_flow_mask *mask = container_of(to_rcu_work(work), + struct fl_flow_mask, rwork); + + fl_mask_free(mask, false); } static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask) @@ -1346,9 +1357,6 @@ static struct fl_flow_mask *fl_create_new_mask(struct cls_fl_head *head, if (err) goto errout_destroy; - /* Wait until any potential concurrent users of mask are finished */ - synchronize_rcu(); - spin_lock(&head->masks_lock); list_add_tail_rcu(&newmask->list, &head->masks); spin_unlock(&head->masks_lock); @@ -1375,11 +1383,7 @@ static int fl_check_assign_mask(struct cls_fl_head *head, /* Insert mask as temporary node to prevent concurrent creation of mask * with same key. Any concurrent lookups with same key will return - * -EAGAIN because mask's refcnt is zero. It is safe to insert - * stack-allocated 'mask' to masks hash table because we call - * synchronize_rcu() before returning from this function (either in case - * of error or after replacing it with heap-allocated mask in - * fl_create_new_mask()). + * -EAGAIN because mask's refcnt is zero. */ fnew->mask = rhashtable_lookup_get_insert_fast(&head->ht, &mask->ht_node, @@ -1414,8 +1418,6 @@ static int fl_check_assign_mask(struct cls_fl_head *head, errout_cleanup: rhashtable_remove_fast(&head->ht, &mask->ht_node, mask_ht_params); - /* Wait until any potential concurrent users of mask are finished */ - synchronize_rcu(); return ret; } @@ -1644,7 +1646,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, *arg = fnew; kfree(tb); - kfree(mask); + tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work); return 0; errout_ht: @@ -1664,7 +1666,7 @@ errout: errout_tb: kfree(tb); errout_mask_alloc: - kfree(mask); + tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work); errout_fold: if (fold) __fl_put(fold); diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index f17908f5c4f3..9b0e5b0d701a 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -2583,6 +2583,8 @@ do_addr_param: case SCTP_PARAM_STATE_COOKIE: asoc->peer.cookie_len = ntohs(param.p->length) - sizeof(struct sctp_paramhdr); + if (asoc->peer.cookie) + kfree(asoc->peer.cookie); asoc->peer.cookie = kmemdup(param.cookie->body, asoc->peer.cookie_len, gfp); if (!asoc->peer.cookie) retval = 0; @@ -2647,6 +2649,8 @@ do_addr_param: goto fall_through; /* Save peer's random parameter */ + if (asoc->peer.peer_random) + kfree(asoc->peer.peer_random); asoc->peer.peer_random = kmemdup(param.p, ntohs(param.p->length), gfp); if (!asoc->peer.peer_random) { @@ -2660,6 +2664,8 @@ do_addr_param: goto fall_through; /* Save peer's HMAC list */ + if (asoc->peer.peer_hmacs) + kfree(asoc->peer.peer_hmacs); asoc->peer.peer_hmacs = kmemdup(param.p, ntohs(param.p->length), gfp); if (!asoc->peer.peer_hmacs) { @@ -2675,6 +2681,8 @@ do_addr_param: if (!ep->auth_enable) goto fall_through; + if (asoc->peer.peer_chunks) + kfree(asoc->peer.peer_chunks); asoc->peer.peer_chunks = kmemdup(param.p, ntohs(param.p->length), gfp); if (!asoc->peer.peer_chunks) diff --git a/net/tipc/group.c b/net/tipc/group.c index 992be6113676..5f98d38bcf08 100644 --- a/net/tipc/group.c +++ b/net/tipc/group.c @@ -218,6 +218,7 @@ void tipc_group_delete(struct net *net, struct tipc_group *grp) rbtree_postorder_for_each_entry_safe(m, tmp, tree, tree_node) { tipc_group_proto_xmit(grp, m, GRP_LEAVE_MSG, &xmitq); + __skb_queue_purge(&m->deferredq); list_del(&m->list); kfree(m); } diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 960494f437ac..455a782c7658 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -1143,7 +1143,6 @@ static int tls_sw_do_sendpage(struct sock *sk, struct page *page, full_record = false; record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size; - copied = 0; copy = size; if (copy >= record_room) { copy = record_room; diff --git a/net/vmw_vsock/hyperv_transport.c b/net/vmw_vsock/hyperv_transport.c index 0a6fe0649945..fb2df6e068fa 100644 --- a/net/vmw_vsock/hyperv_transport.c +++ b/net/vmw_vsock/hyperv_transport.c @@ -335,8 +335,8 @@ static void hvs_open_connection(struct vmbus_channel *chan) struct sockaddr_vm addr; struct sock *sk, *new = NULL; - struct vsock_sock *vnew; - struct hvsock *hvs, *hvs_new; + struct vsock_sock *vnew = NULL; + struct hvsock *hvs, *hvs_new = NULL; int ret; if_type = &chan->offermsg.offer.if_type; diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c index f3f3d06cb6d8..e30f53728725 100644 --- a/net/vmw_vsock/virtio_transport_common.c +++ b/net/vmw_vsock/virtio_transport_common.c @@ -871,8 +871,10 @@ virtio_transport_recv_connected(struct sock *sk, if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SHUTDOWN_SEND) vsk->peer_shutdown |= SEND_SHUTDOWN; if (vsk->peer_shutdown == SHUTDOWN_MASK && - vsock_stream_has_data(vsk) <= 0) + vsock_stream_has_data(vsk) <= 0) { + sock_set_flag(sk, SOCK_DONE); sk->sk_state = TCP_CLOSING; + } if (le32_to_cpu(pkt->hdr.flags)) sk->sk_state_change(sk); break; diff --git a/net/wireless/Makefile b/net/wireless/Makefile index 72a224ce8e0a..2eee93985ab0 100644 --- a/net/wireless/Makefile +++ b/net/wireless/Makefile @@ -39,6 +39,7 @@ $(obj)/extra-certs.c: $(CONFIG_CFG80211_EXTRA_REGDB_KEYDIR:"%"=%) \ @(set -e; \ allf=""; \ for f in $^ ; do \ + test -f $$f || continue;\ # similar to hexdump -v -e '1/1 "0x%.2x," "\n"' \ thisf=$$(od -An -v -tx1 < $$f | \ sed -e 's/ /\n/g' | \ diff --git a/net/wireless/core.c b/net/wireless/core.c index 037816163e70..53ad3dbb76fe 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -514,7 +514,7 @@ use_default_name: &rdev->rfkill_ops, rdev); if (!rdev->rfkill) { - kfree(rdev); + wiphy_free(&rdev->wiphy); return NULL; } @@ -1397,8 +1397,12 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb, } break; case NETDEV_PRE_UP: - if (!(wdev->wiphy->interface_modes & BIT(wdev->iftype))) + if (!(wdev->wiphy->interface_modes & BIT(wdev->iftype)) && + !(wdev->iftype == NL80211_IFTYPE_AP_VLAN && + rdev->wiphy.flags & WIPHY_FLAG_4ADDR_AP && + wdev->use_4addr)) return notifier_from_errno(-EOPNOTSUPP); + if (rfkill_blocked(rdev->rfkill)) return notifier_from_errno(-ERFKILL); break; diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index c391b560d986..520d437aa8d1 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -304,8 +304,11 @@ const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { [NL80211_ATTR_IFINDEX] = { .type = NLA_U32 }, [NL80211_ATTR_IFNAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ-1 }, - [NL80211_ATTR_MAC] = { .len = ETH_ALEN }, - [NL80211_ATTR_PREV_BSSID] = { .len = ETH_ALEN }, + [NL80211_ATTR_MAC] = { .type = NLA_EXACT_LEN_WARN, .len = ETH_ALEN }, + [NL80211_ATTR_PREV_BSSID] = { + .type = NLA_EXACT_LEN_WARN, + .len = ETH_ALEN + }, [NL80211_ATTR_KEY] = { .type = NLA_NESTED, }, [NL80211_ATTR_KEY_DATA] = { .type = NLA_BINARY, @@ -356,7 +359,10 @@ const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { [NL80211_ATTR_MESH_CONFIG] = { .type = NLA_NESTED }, [NL80211_ATTR_SUPPORT_MESH_AUTH] = { .type = NLA_FLAG }, - [NL80211_ATTR_HT_CAPABILITY] = { .len = NL80211_HT_CAPABILITY_LEN }, + [NL80211_ATTR_HT_CAPABILITY] = { + .type = NLA_EXACT_LEN_WARN, + .len = NL80211_HT_CAPABILITY_LEN + }, [NL80211_ATTR_MGMT_SUBTYPE] = { .type = NLA_U8 }, [NL80211_ATTR_IE] = NLA_POLICY_VALIDATE_FN(NLA_BINARY, @@ -386,7 +392,10 @@ const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { [NL80211_ATTR_WPA_VERSIONS] = { .type = NLA_U32 }, [NL80211_ATTR_PID] = { .type = NLA_U32 }, [NL80211_ATTR_4ADDR] = { .type = NLA_U8 }, - [NL80211_ATTR_PMKID] = { .len = WLAN_PMKID_LEN }, + [NL80211_ATTR_PMKID] = { + .type = NLA_EXACT_LEN_WARN, + .len = WLAN_PMKID_LEN + }, [NL80211_ATTR_DURATION] = { .type = NLA_U32 }, [NL80211_ATTR_COOKIE] = { .type = NLA_U64 }, [NL80211_ATTR_TX_RATES] = { .type = NLA_NESTED }, @@ -448,7 +457,10 @@ const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { [NL80211_ATTR_WDEV] = { .type = NLA_U64 }, [NL80211_ATTR_USER_REG_HINT_TYPE] = { .type = NLA_U32 }, [NL80211_ATTR_AUTH_DATA] = { .type = NLA_BINARY, }, - [NL80211_ATTR_VHT_CAPABILITY] = { .len = NL80211_VHT_CAPABILITY_LEN }, + [NL80211_ATTR_VHT_CAPABILITY] = { + .type = NLA_EXACT_LEN_WARN, + .len = NL80211_VHT_CAPABILITY_LEN + }, [NL80211_ATTR_SCAN_FLAGS] = { .type = NLA_U32 }, [NL80211_ATTR_P2P_CTWINDOW] = NLA_POLICY_MAX(NLA_U8, 127), [NL80211_ATTR_P2P_OPPPS] = NLA_POLICY_MAX(NLA_U8, 1), @@ -484,7 +496,10 @@ const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { [NL80211_ATTR_VENDOR_DATA] = { .type = NLA_BINARY }, [NL80211_ATTR_QOS_MAP] = { .type = NLA_BINARY, .len = IEEE80211_QOS_MAP_LEN_MAX }, - [NL80211_ATTR_MAC_HINT] = { .len = ETH_ALEN }, + [NL80211_ATTR_MAC_HINT] = { + .type = NLA_EXACT_LEN_WARN, + .len = ETH_ALEN + }, [NL80211_ATTR_WIPHY_FREQ_HINT] = { .type = NLA_U32 }, [NL80211_ATTR_TDLS_PEER_CAPABILITY] = { .type = NLA_U32 }, [NL80211_ATTR_SOCKET_OWNER] = { .type = NLA_FLAG }, @@ -495,7 +510,10 @@ const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { NLA_POLICY_MAX(NLA_U8, IEEE80211_NUM_UPS - 1), [NL80211_ATTR_ADMITTED_TIME] = { .type = NLA_U16 }, [NL80211_ATTR_SMPS_MODE] = { .type = NLA_U8 }, - [NL80211_ATTR_MAC_MASK] = { .len = ETH_ALEN }, + [NL80211_ATTR_MAC_MASK] = { + .type = NLA_EXACT_LEN_WARN, + .len = ETH_ALEN + }, [NL80211_ATTR_WIPHY_SELF_MANAGED_REG] = { .type = NLA_FLAG }, [NL80211_ATTR_NETNS_FD] = { .type = NLA_U32 }, [NL80211_ATTR_SCHED_SCAN_DELAY] = { .type = NLA_U32 }, @@ -507,15 +525,21 @@ const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { [NL80211_ATTR_MU_MIMO_GROUP_DATA] = { .len = VHT_MUMIMO_GROUPS_DATA_LEN }, - [NL80211_ATTR_MU_MIMO_FOLLOW_MAC_ADDR] = { .len = ETH_ALEN }, + [NL80211_ATTR_MU_MIMO_FOLLOW_MAC_ADDR] = { + .type = NLA_EXACT_LEN_WARN, + .len = ETH_ALEN + }, [NL80211_ATTR_NAN_MASTER_PREF] = NLA_POLICY_MIN(NLA_U8, 1), [NL80211_ATTR_BANDS] = { .type = NLA_U32 }, [NL80211_ATTR_NAN_FUNC] = { .type = NLA_NESTED }, [NL80211_ATTR_FILS_KEK] = { .type = NLA_BINARY, .len = FILS_MAX_KEK_LEN }, - [NL80211_ATTR_FILS_NONCES] = { .len = 2 * FILS_NONCE_LEN }, + [NL80211_ATTR_FILS_NONCES] = { + .type = NLA_EXACT_LEN_WARN, + .len = 2 * FILS_NONCE_LEN + }, [NL80211_ATTR_MULTICAST_TO_UNICAST_ENABLED] = { .type = NLA_FLAG, }, - [NL80211_ATTR_BSSID] = { .len = ETH_ALEN }, + [NL80211_ATTR_BSSID] = { .type = NLA_EXACT_LEN_WARN, .len = ETH_ALEN }, [NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI] = { .type = NLA_S8 }, [NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST] = { .len = sizeof(struct nl80211_bss_select_rssi_adjust) @@ -528,7 +552,7 @@ const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { [NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM] = { .type = NLA_U16 }, [NL80211_ATTR_FILS_ERP_RRK] = { .type = NLA_BINARY, .len = FILS_ERP_MAX_RRK_LEN }, - [NL80211_ATTR_FILS_CACHE_ID] = { .len = 2 }, + [NL80211_ATTR_FILS_CACHE_ID] = { .type = NLA_EXACT_LEN_WARN, .len = 2 }, [NL80211_ATTR_PMK] = { .type = NLA_BINARY, .len = PMK_MAX_LEN }, [NL80211_ATTR_SCHED_SCAN_MULTI] = { .type = NLA_FLAG }, [NL80211_ATTR_EXTERNAL_AUTH_SUPPORT] = { .type = NLA_FLAG }, @@ -589,10 +613,13 @@ static const struct nla_policy nl80211_wowlan_tcp_policy[NUM_NL80211_WOWLAN_TCP] = { [NL80211_WOWLAN_TCP_SRC_IPV4] = { .type = NLA_U32 }, [NL80211_WOWLAN_TCP_DST_IPV4] = { .type = NLA_U32 }, - [NL80211_WOWLAN_TCP_DST_MAC] = { .len = ETH_ALEN }, + [NL80211_WOWLAN_TCP_DST_MAC] = { + .type = NLA_EXACT_LEN_WARN, + .len = ETH_ALEN + }, [NL80211_WOWLAN_TCP_SRC_PORT] = { .type = NLA_U16 }, [NL80211_WOWLAN_TCP_DST_PORT] = { .type = NLA_U16 }, - [NL80211_WOWLAN_TCP_DATA_PAYLOAD] = { .len = 1 }, + [NL80211_WOWLAN_TCP_DATA_PAYLOAD] = { .type = NLA_MIN_LEN, .len = 1 }, [NL80211_WOWLAN_TCP_DATA_PAYLOAD_SEQ] = { .len = sizeof(struct nl80211_wowlan_tcp_data_seq) }, @@ -600,8 +627,8 @@ nl80211_wowlan_tcp_policy[NUM_NL80211_WOWLAN_TCP] = { .len = sizeof(struct nl80211_wowlan_tcp_data_token) }, [NL80211_WOWLAN_TCP_DATA_INTERVAL] = { .type = NLA_U32 }, - [NL80211_WOWLAN_TCP_WAKE_PAYLOAD] = { .len = 1 }, - [NL80211_WOWLAN_TCP_WAKE_MASK] = { .len = 1 }, + [NL80211_WOWLAN_TCP_WAKE_PAYLOAD] = { .type = NLA_MIN_LEN, .len = 1 }, + [NL80211_WOWLAN_TCP_WAKE_MASK] = { .type = NLA_MIN_LEN, .len = 1 }, }; #endif /* CONFIG_PM */ @@ -619,9 +646,18 @@ nl80211_coalesce_policy[NUM_NL80211_ATTR_COALESCE_RULE] = { /* policy for GTK rekey offload attributes */ static const struct nla_policy nl80211_rekey_policy[NUM_NL80211_REKEY_DATA] = { - [NL80211_REKEY_DATA_KEK] = { .len = NL80211_KEK_LEN }, - [NL80211_REKEY_DATA_KCK] = { .len = NL80211_KCK_LEN }, - [NL80211_REKEY_DATA_REPLAY_CTR] = { .len = NL80211_REPLAY_CTR_LEN }, + [NL80211_REKEY_DATA_KEK] = { + .type = NLA_EXACT_LEN_WARN, + .len = NL80211_KEK_LEN, + }, + [NL80211_REKEY_DATA_KCK] = { + .type = NLA_EXACT_LEN_WARN, + .len = NL80211_KCK_LEN, + }, + [NL80211_REKEY_DATA_REPLAY_CTR] = { + .type = NLA_EXACT_LEN_WARN, + .len = NL80211_REPLAY_CTR_LEN + }, }; static const struct nla_policy @@ -635,7 +671,10 @@ static const struct nla_policy nl80211_match_policy[NL80211_SCHED_SCAN_MATCH_ATTR_MAX + 1] = { [NL80211_SCHED_SCAN_MATCH_ATTR_SSID] = { .type = NLA_BINARY, .len = IEEE80211_MAX_SSID_LEN }, - [NL80211_SCHED_SCAN_MATCH_ATTR_BSSID] = { .len = ETH_ALEN }, + [NL80211_SCHED_SCAN_MATCH_ATTR_BSSID] = { + .type = NLA_EXACT_LEN_WARN, + .len = ETH_ALEN + }, [NL80211_SCHED_SCAN_MATCH_ATTR_RSSI] = { .type = NLA_U32 }, [NL80211_SCHED_SCAN_MATCH_PER_BAND_RSSI] = NLA_POLICY_NESTED(nl80211_match_band_rssi_policy), @@ -667,7 +706,10 @@ nl80211_nan_func_policy[NL80211_NAN_FUNC_ATTR_MAX + 1] = { [NL80211_NAN_FUNC_SUBSCRIBE_ACTIVE] = { .type = NLA_FLAG }, [NL80211_NAN_FUNC_FOLLOW_UP_ID] = { .type = NLA_U8 }, [NL80211_NAN_FUNC_FOLLOW_UP_REQ_ID] = { .type = NLA_U8 }, - [NL80211_NAN_FUNC_FOLLOW_UP_DEST] = { .len = ETH_ALEN }, + [NL80211_NAN_FUNC_FOLLOW_UP_DEST] = { + .type = NLA_EXACT_LEN_WARN, + .len = ETH_ALEN + }, [NL80211_NAN_FUNC_CLOSE_RANGE] = { .type = NLA_FLAG }, [NL80211_NAN_FUNC_TTL] = { .type = NLA_U32 }, [NL80211_NAN_FUNC_SERVICE_INFO] = { .type = NLA_BINARY, @@ -3420,8 +3462,7 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info) if (info->attrs[NL80211_ATTR_IFTYPE]) type = nla_get_u32(info->attrs[NL80211_ATTR_IFTYPE]); - if (!rdev->ops->add_virtual_intf || - !(rdev->wiphy.interface_modes & (1 << type))) + if (!rdev->ops->add_virtual_intf) return -EOPNOTSUPP; if ((type == NL80211_IFTYPE_P2P_DEVICE || type == NL80211_IFTYPE_NAN || @@ -3440,6 +3481,11 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info) return err; } + if (!(rdev->wiphy.interface_modes & (1 << type)) && + !(type == NL80211_IFTYPE_AP_VLAN && params.use_4addr && + rdev->wiphy.flags & WIPHY_FLAG_4ADDR_AP)) + return -EOPNOTSUPP; + err = nl80211_parse_mon_options(rdev, type, info, ¶ms); if (err < 0) return err; @@ -4057,7 +4103,10 @@ static const struct nla_policy nl80211_txattr_policy[NL80211_TXRATE_MAX + 1] = { .len = NL80211_MAX_SUPP_RATES }, [NL80211_TXRATE_HT] = { .type = NLA_BINARY, .len = NL80211_MAX_SUPP_HT_RATES }, - [NL80211_TXRATE_VHT] = { .len = sizeof(struct nl80211_txrate_vht)}, + [NL80211_TXRATE_VHT] = { + .type = NLA_EXACT_LEN_WARN, + .len = sizeof(struct nl80211_txrate_vht), + }, [NL80211_TXRATE_GI] = { .type = NLA_U8 }, }; @@ -4856,8 +4905,10 @@ static int nl80211_send_station(struct sk_buff *msg, u32 cmd, u32 portid, struct nlattr *sinfoattr, *bss_param; hdr = nl80211hdr_put(msg, portid, seq, flags, cmd); - if (!hdr) + if (!hdr) { + cfg80211_sinfo_release_content(sinfo); return -1; + } if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) || nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr) || diff --git a/net/wireless/pmsr.c b/net/wireless/pmsr.c index 1b190475359a..c09fbf09549d 100644 --- a/net/wireless/pmsr.c +++ b/net/wireless/pmsr.c @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* - * Copyright (C) 2018 Intel Corporation + * Copyright (C) 2018 - 2019 Intel Corporation */ #ifndef __PMSR_H #define __PMSR_H @@ -448,7 +448,7 @@ static int nl80211_pmsr_send_result(struct sk_buff *msg, if (res->ap_tsf_valid && nla_put_u64_64bit(msg, NL80211_PMSR_RESP_ATTR_AP_TSF, - res->host_time, NL80211_PMSR_RESP_ATTR_PAD)) + res->ap_tsf, NL80211_PMSR_RESP_ATTR_PAD)) goto error; if (res->final && nla_put_flag(msg, NL80211_PMSR_RESP_ATTR_FINAL)) diff --git a/net/wireless/scan.c b/net/wireless/scan.c index c04f5451f89b..aa571d727903 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -1601,12 +1601,12 @@ static void cfg80211_parse_mbssid_data(struct wiphy *wiphy, continue; } - if (seen_indices & BIT(mbssid_index_ie[2])) + if (seen_indices & BIT_ULL(mbssid_index_ie[2])) /* We don't support legacy split of a profile */ net_dbg_ratelimited("Partial info for BSSID index %d\n", mbssid_index_ie[2]); - seen_indices |= BIT(mbssid_index_ie[2]); + seen_indices |= BIT_ULL(mbssid_index_ie[2]); non_tx_data->bssid_index = mbssid_index_ie[2]; non_tx_data->max_bssid_indicator = elem->data[0]; diff --git a/net/wireless/util.c b/net/wireless/util.c index cf63b635afc0..1c39d6a2e850 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -1246,7 +1246,7 @@ static u32 cfg80211_calculate_bitrate_he(struct rate_info *rate) if (rate->he_dcm) result /= 2; - return result; + return result / 10000; } u32 cfg80211_calculate_bitrate(struct rate_info *rate) @@ -1998,7 +1998,7 @@ int ieee80211_get_vht_max_nss(struct ieee80211_vht_cap *cap, continue; if (supp >= mcs_encoding) { - max_vht_nss = i; + max_vht_nss = i + 1; break; } } diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c index 2b18223e7eb8..9c6de4f114f8 100644 --- a/net/xdp/xdp_umem.c +++ b/net/xdp/xdp_umem.c @@ -143,6 +143,9 @@ static void xdp_umem_clear_dev(struct xdp_umem *umem) struct netdev_bpf bpf; int err; + if (!umem->dev) + return; + if (umem->zc) { bpf.command = XDP_SETUP_XSK_UMEM; bpf.xsk.umem = NULL; @@ -156,11 +159,9 @@ static void xdp_umem_clear_dev(struct xdp_umem *umem) WARN(1, "failed to disable umem!\n"); } - if (umem->dev) { - rtnl_lock(); - xdp_clear_umem_at_qid(umem->dev, umem->queue_id); - rtnl_unlock(); - } + rtnl_lock(); + xdp_clear_umem_at_qid(umem->dev, umem->queue_id); + rtnl_unlock(); if (umem->zc) { dev_put(umem->dev); diff --git a/samples/bpf/bpf_load.c b/samples/bpf/bpf_load.c index eae7b635343d..6e87cc831e84 100644 --- a/samples/bpf/bpf_load.c +++ b/samples/bpf/bpf_load.c @@ -678,7 +678,7 @@ void read_trace_pipe(void) static char buf[4096]; ssize_t sz; - sz = read(trace_fd, buf, sizeof(buf)); + sz = read(trace_fd, buf, sizeof(buf) - 1); if (sz > 0) { buf[sz] = 0; puts(buf); diff --git a/samples/bpf/task_fd_query_user.c b/samples/bpf/task_fd_query_user.c index aff2b4ae914e..e39938058223 100644 --- a/samples/bpf/task_fd_query_user.c +++ b/samples/bpf/task_fd_query_user.c @@ -216,7 +216,7 @@ static int test_debug_fs_uprobe(char *binary_path, long offset, bool is_return) { const char *event_type = "uprobe"; struct perf_event_attr attr = {}; - char buf[256], event_alias[256]; + char buf[256], event_alias[sizeof("test_1234567890")]; __u64 probe_offset, probe_addr; __u32 len, prog_id, fd_type; int err, res, kfd, efd; diff --git a/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst b/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst index ac26876389c2..e744b3e4e56a 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst @@ -29,7 +29,7 @@ CGROUP COMMANDS | *PROG* := { **id** *PROG_ID* | **pinned** *FILE* | **tag** *PROG_TAG* } | *ATTACH_TYPE* := { **ingress** | **egress** | **sock_create** | **sock_ops** | **device** | | **bind4** | **bind6** | **post_bind4** | **post_bind6** | **connect4** | **connect6** | -| **sendmsg4** | **sendmsg6** | **sysctl** } +| **sendmsg4** | **sendmsg6** | **recvmsg4** | **recvmsg6** | **sysctl** } | *ATTACH_FLAGS* := { **multi** | **override** } DESCRIPTION @@ -86,6 +86,10 @@ DESCRIPTION unconnected udp4 socket (since 4.18); **sendmsg6** call to sendto(2), sendmsg(2), sendmmsg(2) for an unconnected udp6 socket (since 4.18); + **recvmsg4** call to recvfrom(2), recvmsg(2), recvmmsg(2) for + an unconnected udp4 socket (since 5.2); + **recvmsg6** call to recvfrom(2), recvmsg(2), recvmmsg(2) for + an unconnected udp6 socket (since 5.2); **sysctl** sysctl access (since 5.2). **bpftool cgroup detach** *CGROUP* *ATTACH_TYPE* *PROG* diff --git a/tools/bpf/bpftool/Documentation/bpftool-prog.rst b/tools/bpf/bpftool/Documentation/bpftool-prog.rst index e8118544d118..018ecef8dc13 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-prog.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-prog.rst @@ -40,7 +40,7 @@ PROG COMMANDS | **lwt_seg6local** | **sockops** | **sk_skb** | **sk_msg** | **lirc_mode2** | | **cgroup/bind4** | **cgroup/bind6** | **cgroup/post_bind4** | **cgroup/post_bind6** | | **cgroup/connect4** | **cgroup/connect6** | **cgroup/sendmsg4** | **cgroup/sendmsg6** | -| **cgroup/sysctl** +| **cgroup/recvmsg4** | **cgroup/recvmsg6** | **cgroup/sysctl** | } | *ATTACH_TYPE* := { | **msg_verdict** | **stream_verdict** | **stream_parser** | **flow_dissector** diff --git a/tools/bpf/bpftool/bash-completion/bpftool b/tools/bpf/bpftool/bash-completion/bpftool index 50e402a5a9c8..4300adf6e5ab 100644 --- a/tools/bpf/bpftool/bash-completion/bpftool +++ b/tools/bpf/bpftool/bash-completion/bpftool @@ -371,6 +371,7 @@ _bpftool() lirc_mode2 cgroup/bind4 cgroup/bind6 \ cgroup/connect4 cgroup/connect6 \ cgroup/sendmsg4 cgroup/sendmsg6 \ + cgroup/recvmsg4 cgroup/recvmsg6 \ cgroup/post_bind4 cgroup/post_bind6 \ cgroup/sysctl" -- \ "$cur" ) ) @@ -666,7 +667,7 @@ _bpftool() attach|detach) local ATTACH_TYPES='ingress egress sock_create sock_ops \ device bind4 bind6 post_bind4 post_bind6 connect4 \ - connect6 sendmsg4 sendmsg6 sysctl' + connect6 sendmsg4 sendmsg6 recvmsg4 recvmsg6 sysctl' local ATTACH_FLAGS='multi override' local PROG_TYPE='id pinned tag' case $prev in @@ -676,7 +677,7 @@ _bpftool() ;; ingress|egress|sock_create|sock_ops|device|bind4|bind6|\ post_bind4|post_bind6|connect4|connect6|sendmsg4|\ - sendmsg6|sysctl) + sendmsg6|recvmsg4|recvmsg6|sysctl) COMPREPLY=( $( compgen -W "$PROG_TYPE" -- \ "$cur" ) ) return 0 diff --git a/tools/bpf/bpftool/cgroup.c b/tools/bpf/bpftool/cgroup.c index 7e22f115c8c1..73ec8ea33fb4 100644 --- a/tools/bpf/bpftool/cgroup.c +++ b/tools/bpf/bpftool/cgroup.c @@ -25,7 +25,8 @@ " ATTACH_TYPE := { ingress | egress | sock_create |\n" \ " sock_ops | device | bind4 | bind6 |\n" \ " post_bind4 | post_bind6 | connect4 |\n" \ - " connect6 | sendmsg4 | sendmsg6 | sysctl }" + " connect6 | sendmsg4 | sendmsg6 |\n" \ + " recvmsg4 | recvmsg6 | sysctl }" static const char * const attach_type_strings[] = { [BPF_CGROUP_INET_INGRESS] = "ingress", @@ -42,6 +43,8 @@ static const char * const attach_type_strings[] = { [BPF_CGROUP_UDP4_SENDMSG] = "sendmsg4", [BPF_CGROUP_UDP6_SENDMSG] = "sendmsg6", [BPF_CGROUP_SYSCTL] = "sysctl", + [BPF_CGROUP_UDP4_RECVMSG] = "recvmsg4", + [BPF_CGROUP_UDP6_RECVMSG] = "recvmsg6", [__MAX_BPF_ATTACH_TYPE] = NULL, }; diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c index 3ec82904ccec..5da5a7311f13 100644 --- a/tools/bpf/bpftool/map.c +++ b/tools/bpf/bpftool/map.c @@ -716,12 +716,14 @@ static int dump_map_elem(int fd, void *key, void *value, return 0; if (json_output) { + jsonw_start_object(json_wtr); jsonw_name(json_wtr, "key"); print_hex_data_json(key, map_info->key_size); jsonw_name(json_wtr, "value"); jsonw_start_object(json_wtr); jsonw_string_field(json_wtr, "error", strerror(lookup_errno)); jsonw_end_object(json_wtr); + jsonw_end_object(json_wtr); } else { const char *msg = NULL; diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c index 26336bad0442..7a4e21a31523 100644 --- a/tools/bpf/bpftool/prog.c +++ b/tools/bpf/bpftool/prog.c @@ -1063,7 +1063,8 @@ static int do_help(int argc, char **argv) " sk_reuseport | flow_dissector | cgroup/sysctl |\n" " cgroup/bind4 | cgroup/bind6 | cgroup/post_bind4 |\n" " cgroup/post_bind6 | cgroup/connect4 | cgroup/connect6 |\n" - " cgroup/sendmsg4 | cgroup/sendmsg6 }\n" + " cgroup/sendmsg4 | cgroup/sendmsg6 | cgroup/recvmsg4 |\n" + " cgroup/recvmsg6 }\n" " ATTACH_TYPE := { msg_verdict | stream_verdict | stream_parser |\n" " flow_dissector }\n" " " HELP_SPEC_OPTIONS "\n" diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 63e0cf66f01a..a8b823c30b43 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -192,6 +192,8 @@ enum bpf_attach_type { BPF_LIRC_MODE2, BPF_FLOW_DISSECTOR, BPF_CGROUP_SYSCTL, + BPF_CGROUP_UDP4_RECVMSG, + BPF_CGROUP_UDP6_RECVMSG, __MAX_BPF_ATTACH_TYPE }; @@ -3376,8 +3378,8 @@ struct bpf_raw_tracepoint_args { /* DIRECT: Skip the FIB rules and go to FIB table associated with device * OUTPUT: Do lookup from egress perspective; default is ingress */ -#define BPF_FIB_LOOKUP_DIRECT BIT(0) -#define BPF_FIB_LOOKUP_OUTPUT BIT(1) +#define BPF_FIB_LOOKUP_DIRECT (1U << 0) +#define BPF_FIB_LOOKUP_OUTPUT (1U << 1) enum { BPF_FIB_LKUP_RET_SUCCESS, /* lookup successful */ diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 197b574406b3..151f7ac1882e 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -1645,14 +1645,16 @@ static int bpf_object__probe_btf_func(struct bpf_object *obj) /* FUNC x */ /* [3] */ BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0), 2), }; - int res; + int btf_fd; - res = libbpf__probe_raw_btf((char *)types, sizeof(types), - strs, sizeof(strs)); - if (res < 0) - return res; - if (res > 0) + btf_fd = libbpf__load_raw_btf((char *)types, sizeof(types), + strs, sizeof(strs)); + if (btf_fd >= 0) { obj->caps.btf_func = 1; + close(btf_fd); + return 1; + } + return 0; } @@ -1670,14 +1672,16 @@ static int bpf_object__probe_btf_datasec(struct bpf_object *obj) BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4), BTF_VAR_SECINFO_ENC(2, 0, 4), }; - int res; + int btf_fd; - res = libbpf__probe_raw_btf((char *)types, sizeof(types), - strs, sizeof(strs)); - if (res < 0) - return res; - if (res > 0) + btf_fd = libbpf__load_raw_btf((char *)types, sizeof(types), + strs, sizeof(strs)); + if (btf_fd >= 0) { obj->caps.btf_datasec = 1; + close(btf_fd); + return 1; + } + return 0; } @@ -3206,6 +3210,10 @@ static const struct { BPF_CGROUP_UDP4_SENDMSG), BPF_EAPROG_SEC("cgroup/sendmsg6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_SENDMSG), + BPF_EAPROG_SEC("cgroup/recvmsg4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, + BPF_CGROUP_UDP4_RECVMSG), + BPF_EAPROG_SEC("cgroup/recvmsg6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, + BPF_CGROUP_UDP6_RECVMSG), BPF_EAPROG_SEC("cgroup/sysctl", BPF_PROG_TYPE_CGROUP_SYSCTL, BPF_CGROUP_SYSCTL), }; diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h index f3025b4d90e1..dfab8012185c 100644 --- a/tools/lib/bpf/libbpf_internal.h +++ b/tools/lib/bpf/libbpf_internal.h @@ -34,7 +34,7 @@ do { \ #define pr_info(fmt, ...) __pr(LIBBPF_INFO, fmt, ##__VA_ARGS__) #define pr_debug(fmt, ...) __pr(LIBBPF_DEBUG, fmt, ##__VA_ARGS__) -int libbpf__probe_raw_btf(const char *raw_types, size_t types_len, - const char *str_sec, size_t str_len); +int libbpf__load_raw_btf(const char *raw_types, size_t types_len, + const char *str_sec, size_t str_len); #endif /* __LIBBPF_LIBBPF_INTERNAL_H */ diff --git a/tools/lib/bpf/libbpf_probes.c b/tools/lib/bpf/libbpf_probes.c index 5e2aa83f637a..6635a31a7a16 100644 --- a/tools/lib/bpf/libbpf_probes.c +++ b/tools/lib/bpf/libbpf_probes.c @@ -133,8 +133,8 @@ bool bpf_probe_prog_type(enum bpf_prog_type prog_type, __u32 ifindex) return errno != EINVAL && errno != EOPNOTSUPP; } -int libbpf__probe_raw_btf(const char *raw_types, size_t types_len, - const char *str_sec, size_t str_len) +int libbpf__load_raw_btf(const char *raw_types, size_t types_len, + const char *str_sec, size_t str_len) { struct btf_header hdr = { .magic = BTF_MAGIC, @@ -157,14 +157,9 @@ int libbpf__probe_raw_btf(const char *raw_types, size_t types_len, memcpy(raw_btf + hdr.hdr_len + hdr.type_len, str_sec, hdr.str_len); btf_fd = bpf_load_btf(raw_btf, btf_len, NULL, 0, false); - if (btf_fd < 0) { - free(raw_btf); - return 0; - } - close(btf_fd); free(raw_btf); - return 1; + return btf_fd; } static int load_sk_storage_btf(void) @@ -190,7 +185,7 @@ static int load_sk_storage_btf(void) BTF_MEMBER_ENC(23, 2, 32),/* struct bpf_spin_lock l; */ }; - return libbpf__probe_raw_btf((char *)types, sizeof(types), + return libbpf__load_raw_btf((char *)types, sizeof(types), strs, sizeof(strs)); } diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 66f2dca1dee1..e36356e2377e 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -21,8 +21,8 @@ LDLIBS += -lcap -lelf -lrt -lpthread # Order correspond to 'make run_tests' order TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \ test_align test_verifier_log test_dev_cgroup test_tcpbpf_user \ - test_sock test_btf test_sockmap test_lirc_mode2_user get_cgroup_id_user \ - test_socket_cookie test_cgroup_storage test_select_reuseport test_section_names \ + test_sock test_btf test_sockmap get_cgroup_id_user test_socket_cookie \ + test_cgroup_storage test_select_reuseport test_section_names \ test_netcnt test_tcpnotify_user test_sock_fields test_sysctl BPF_OBJ_FILES = $(patsubst %.c,%.o, $(notdir $(wildcard progs/*.c))) @@ -63,7 +63,8 @@ TEST_PROGS_EXTENDED := with_addr.sh \ # Compile but not part of 'make run_tests' TEST_GEN_PROGS_EXTENDED = test_libbpf_open test_sock_addr test_skb_cgroup_id_user \ - flow_dissector_load test_flow_dissector test_tcp_check_syncookie_user + flow_dissector_load test_flow_dissector test_tcp_check_syncookie_user \ + test_lirc_mode2_user include ../lib.mk diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c index fbd1d88a6095..c938283ac232 100644 --- a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c +++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c @@ -3,6 +3,7 @@ #include <error.h> #include <linux/if.h> #include <linux/if_tun.h> +#include <sys/uio.h> #define CHECK_FLOW_KEYS(desc, got, expected) \ CHECK_ATTR(memcmp(&got, &expected, sizeof(got)) != 0, \ diff --git a/tools/testing/selftests/bpf/test_lpm_map.c b/tools/testing/selftests/bpf/test_lpm_map.c index 02d7c871862a..006be3963977 100644 --- a/tools/testing/selftests/bpf/test_lpm_map.c +++ b/tools/testing/selftests/bpf/test_lpm_map.c @@ -573,13 +573,13 @@ static void test_lpm_get_next_key(void) /* add one more element (total two) */ key_p->prefixlen = 24; - inet_pton(AF_INET, "192.168.0.0", key_p->data); + inet_pton(AF_INET, "192.168.128.0", key_p->data); assert(bpf_map_update_elem(map_fd, key_p, &value, 0) == 0); memset(key_p, 0, key_size); assert(bpf_map_get_next_key(map_fd, NULL, key_p) == 0); assert(key_p->prefixlen == 24 && key_p->data[0] == 192 && - key_p->data[1] == 168 && key_p->data[2] == 0); + key_p->data[1] == 168 && key_p->data[2] == 128); memset(next_key_p, 0, key_size); assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0); @@ -592,7 +592,7 @@ static void test_lpm_get_next_key(void) /* Add one more element (total three) */ key_p->prefixlen = 24; - inet_pton(AF_INET, "192.168.128.0", key_p->data); + inet_pton(AF_INET, "192.168.0.0", key_p->data); assert(bpf_map_update_elem(map_fd, key_p, &value, 0) == 0); memset(key_p, 0, key_size); @@ -643,6 +643,41 @@ static void test_lpm_get_next_key(void) assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -1 && errno == ENOENT); + /* Add one more element (total five) */ + key_p->prefixlen = 28; + inet_pton(AF_INET, "192.168.1.128", key_p->data); + assert(bpf_map_update_elem(map_fd, key_p, &value, 0) == 0); + + memset(key_p, 0, key_size); + assert(bpf_map_get_next_key(map_fd, NULL, key_p) == 0); + assert(key_p->prefixlen == 24 && key_p->data[0] == 192 && + key_p->data[1] == 168 && key_p->data[2] == 0); + + memset(next_key_p, 0, key_size); + assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0); + assert(next_key_p->prefixlen == 28 && next_key_p->data[0] == 192 && + next_key_p->data[1] == 168 && next_key_p->data[2] == 1 && + next_key_p->data[3] == 128); + + memcpy(key_p, next_key_p, key_size); + assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0); + assert(next_key_p->prefixlen == 24 && next_key_p->data[0] == 192 && + next_key_p->data[1] == 168 && next_key_p->data[2] == 1); + + memcpy(key_p, next_key_p, key_size); + assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0); + assert(next_key_p->prefixlen == 24 && next_key_p->data[0] == 192 && + next_key_p->data[1] == 168 && next_key_p->data[2] == 128); + + memcpy(key_p, next_key_p, key_size); + assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0); + assert(next_key_p->prefixlen == 16 && next_key_p->data[0] == 192 && + next_key_p->data[1] == 168); + + memcpy(key_p, next_key_p, key_size); + assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -1 && + errno == ENOENT); + /* no exact matching key should return the first one in post order */ key_p->prefixlen = 22; inet_pton(AF_INET, "192.168.1.0", key_p->data); diff --git a/tools/testing/selftests/bpf/test_section_names.c b/tools/testing/selftests/bpf/test_section_names.c index bebd4fbca1f4..dee2f2eceb0f 100644 --- a/tools/testing/selftests/bpf/test_section_names.c +++ b/tools/testing/selftests/bpf/test_section_names.c @@ -120,6 +120,16 @@ static struct sec_name_test tests[] = { {0, BPF_CGROUP_UDP6_SENDMSG}, }, { + "cgroup/recvmsg4", + {0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_RECVMSG}, + {0, BPF_CGROUP_UDP4_RECVMSG}, + }, + { + "cgroup/recvmsg6", + {0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_RECVMSG}, + {0, BPF_CGROUP_UDP6_RECVMSG}, + }, + { "cgroup/sysctl", {0, BPF_PROG_TYPE_CGROUP_SYSCTL, BPF_CGROUP_SYSCTL}, {0, BPF_CGROUP_SYSCTL}, diff --git a/tools/testing/selftests/bpf/test_sock_addr.c b/tools/testing/selftests/bpf/test_sock_addr.c index 3f110eaaf29c..4ecde2392327 100644 --- a/tools/testing/selftests/bpf/test_sock_addr.c +++ b/tools/testing/selftests/bpf/test_sock_addr.c @@ -76,6 +76,7 @@ struct sock_addr_test { enum { LOAD_REJECT, ATTACH_REJECT, + ATTACH_OKAY, SYSCALL_EPERM, SYSCALL_ENOTSUPP, SUCCESS, @@ -88,9 +89,13 @@ static int connect4_prog_load(const struct sock_addr_test *test); static int connect6_prog_load(const struct sock_addr_test *test); static int sendmsg_allow_prog_load(const struct sock_addr_test *test); static int sendmsg_deny_prog_load(const struct sock_addr_test *test); +static int recvmsg_allow_prog_load(const struct sock_addr_test *test); +static int recvmsg_deny_prog_load(const struct sock_addr_test *test); static int sendmsg4_rw_asm_prog_load(const struct sock_addr_test *test); +static int recvmsg4_rw_asm_prog_load(const struct sock_addr_test *test); static int sendmsg4_rw_c_prog_load(const struct sock_addr_test *test); static int sendmsg6_rw_asm_prog_load(const struct sock_addr_test *test); +static int recvmsg6_rw_asm_prog_load(const struct sock_addr_test *test); static int sendmsg6_rw_c_prog_load(const struct sock_addr_test *test); static int sendmsg6_rw_v4mapped_prog_load(const struct sock_addr_test *test); static int sendmsg6_rw_wildcard_prog_load(const struct sock_addr_test *test); @@ -507,6 +512,92 @@ static struct sock_addr_test tests[] = { SRC6_REWRITE_IP, SYSCALL_EPERM, }, + + /* recvmsg */ + { + "recvmsg4: return code ok", + recvmsg_allow_prog_load, + BPF_CGROUP_UDP4_RECVMSG, + BPF_CGROUP_UDP4_RECVMSG, + AF_INET, + SOCK_DGRAM, + NULL, + 0, + NULL, + 0, + NULL, + ATTACH_OKAY, + }, + { + "recvmsg4: return code !ok", + recvmsg_deny_prog_load, + BPF_CGROUP_UDP4_RECVMSG, + BPF_CGROUP_UDP4_RECVMSG, + AF_INET, + SOCK_DGRAM, + NULL, + 0, + NULL, + 0, + NULL, + LOAD_REJECT, + }, + { + "recvmsg6: return code ok", + recvmsg_allow_prog_load, + BPF_CGROUP_UDP6_RECVMSG, + BPF_CGROUP_UDP6_RECVMSG, + AF_INET6, + SOCK_DGRAM, + NULL, + 0, + NULL, + 0, + NULL, + ATTACH_OKAY, + }, + { + "recvmsg6: return code !ok", + recvmsg_deny_prog_load, + BPF_CGROUP_UDP6_RECVMSG, + BPF_CGROUP_UDP6_RECVMSG, + AF_INET6, + SOCK_DGRAM, + NULL, + 0, + NULL, + 0, + NULL, + LOAD_REJECT, + }, + { + "recvmsg4: rewrite IP & port (asm)", + recvmsg4_rw_asm_prog_load, + BPF_CGROUP_UDP4_RECVMSG, + BPF_CGROUP_UDP4_RECVMSG, + AF_INET, + SOCK_DGRAM, + SERV4_REWRITE_IP, + SERV4_REWRITE_PORT, + SERV4_REWRITE_IP, + SERV4_REWRITE_PORT, + SERV4_IP, + SUCCESS, + }, + { + "recvmsg6: rewrite IP & port (asm)", + recvmsg6_rw_asm_prog_load, + BPF_CGROUP_UDP6_RECVMSG, + BPF_CGROUP_UDP6_RECVMSG, + AF_INET6, + SOCK_DGRAM, + SERV6_REWRITE_IP, + SERV6_REWRITE_PORT, + SERV6_REWRITE_IP, + SERV6_REWRITE_PORT, + SERV6_IP, + SUCCESS, + }, }; static int mk_sockaddr(int domain, const char *ip, unsigned short port, @@ -765,8 +856,8 @@ static int connect6_prog_load(const struct sock_addr_test *test) return load_path(test, CONNECT6_PROG_PATH); } -static int sendmsg_ret_only_prog_load(const struct sock_addr_test *test, - int32_t rc) +static int xmsg_ret_only_prog_load(const struct sock_addr_test *test, + int32_t rc) { struct bpf_insn insns[] = { /* return rc */ @@ -778,12 +869,22 @@ static int sendmsg_ret_only_prog_load(const struct sock_addr_test *test, static int sendmsg_allow_prog_load(const struct sock_addr_test *test) { - return sendmsg_ret_only_prog_load(test, /*rc*/ 1); + return xmsg_ret_only_prog_load(test, /*rc*/ 1); } static int sendmsg_deny_prog_load(const struct sock_addr_test *test) { - return sendmsg_ret_only_prog_load(test, /*rc*/ 0); + return xmsg_ret_only_prog_load(test, /*rc*/ 0); +} + +static int recvmsg_allow_prog_load(const struct sock_addr_test *test) +{ + return xmsg_ret_only_prog_load(test, /*rc*/ 1); +} + +static int recvmsg_deny_prog_load(const struct sock_addr_test *test) +{ + return xmsg_ret_only_prog_load(test, /*rc*/ 0); } static int sendmsg4_rw_asm_prog_load(const struct sock_addr_test *test) @@ -838,6 +939,47 @@ static int sendmsg4_rw_asm_prog_load(const struct sock_addr_test *test) return load_insns(test, insns, sizeof(insns) / sizeof(struct bpf_insn)); } +static int recvmsg4_rw_asm_prog_load(const struct sock_addr_test *test) +{ + struct sockaddr_in src4_rw_addr; + + if (mk_sockaddr(AF_INET, SERV4_IP, SERV4_PORT, + (struct sockaddr *)&src4_rw_addr, + sizeof(src4_rw_addr)) == -1) + return -1; + + struct bpf_insn insns[] = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + + /* if (sk.family == AF_INET && */ + BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6, + offsetof(struct bpf_sock_addr, family)), + BPF_JMP_IMM(BPF_JNE, BPF_REG_7, AF_INET, 6), + + /* sk.type == SOCK_DGRAM) { */ + BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6, + offsetof(struct bpf_sock_addr, type)), + BPF_JMP_IMM(BPF_JNE, BPF_REG_7, SOCK_DGRAM, 4), + + /* user_ip4 = src4_rw_addr.sin_addr */ + BPF_MOV32_IMM(BPF_REG_7, src4_rw_addr.sin_addr.s_addr), + BPF_STX_MEM(BPF_W, BPF_REG_6, BPF_REG_7, + offsetof(struct bpf_sock_addr, user_ip4)), + + /* user_port = src4_rw_addr.sin_port */ + BPF_MOV32_IMM(BPF_REG_7, src4_rw_addr.sin_port), + BPF_STX_MEM(BPF_W, BPF_REG_6, BPF_REG_7, + offsetof(struct bpf_sock_addr, user_port)), + /* } */ + + /* return 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }; + + return load_insns(test, insns, sizeof(insns) / sizeof(struct bpf_insn)); +} + static int sendmsg4_rw_c_prog_load(const struct sock_addr_test *test) { return load_path(test, SENDMSG4_PROG_PATH); @@ -901,6 +1043,39 @@ static int sendmsg6_rw_asm_prog_load(const struct sock_addr_test *test) return sendmsg6_rw_dst_asm_prog_load(test, SERV6_REWRITE_IP); } +static int recvmsg6_rw_asm_prog_load(const struct sock_addr_test *test) +{ + struct sockaddr_in6 src6_rw_addr; + + if (mk_sockaddr(AF_INET6, SERV6_IP, SERV6_PORT, + (struct sockaddr *)&src6_rw_addr, + sizeof(src6_rw_addr)) == -1) + return -1; + + struct bpf_insn insns[] = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + + /* if (sk.family == AF_INET6) { */ + BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6, + offsetof(struct bpf_sock_addr, family)), + BPF_JMP_IMM(BPF_JNE, BPF_REG_7, AF_INET6, 10), + + STORE_IPV6(user_ip6, src6_rw_addr.sin6_addr.s6_addr32), + + /* user_port = dst6_rw_addr.sin6_port */ + BPF_MOV32_IMM(BPF_REG_7, src6_rw_addr.sin6_port), + BPF_STX_MEM(BPF_W, BPF_REG_6, BPF_REG_7, + offsetof(struct bpf_sock_addr, user_port)), + /* } */ + + /* return 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }; + + return load_insns(test, insns, sizeof(insns) / sizeof(struct bpf_insn)); +} + static int sendmsg6_rw_v4mapped_prog_load(const struct sock_addr_test *test) { return sendmsg6_rw_dst_asm_prog_load(test, SERV6_V4MAPPED_IP); @@ -1282,13 +1457,13 @@ out: return err; } -static int run_sendmsg_test_case(const struct sock_addr_test *test) +static int run_xmsg_test_case(const struct sock_addr_test *test, int max_cmsg) { socklen_t addr_len = sizeof(struct sockaddr_storage); - struct sockaddr_storage expected_src_addr; - struct sockaddr_storage requested_addr; struct sockaddr_storage expected_addr; - struct sockaddr_storage real_src_addr; + struct sockaddr_storage server_addr; + struct sockaddr_storage sendmsg_addr; + struct sockaddr_storage recvmsg_addr; int clientfd = -1; int servfd = -1; int set_cmsg; @@ -1297,20 +1472,19 @@ static int run_sendmsg_test_case(const struct sock_addr_test *test) if (test->type != SOCK_DGRAM) goto err; - if (init_addrs(test, &requested_addr, &expected_addr, - &expected_src_addr)) + if (init_addrs(test, &sendmsg_addr, &server_addr, &expected_addr)) goto err; /* Prepare server to sendmsg to */ - servfd = start_server(test->type, &expected_addr, addr_len); + servfd = start_server(test->type, &server_addr, addr_len); if (servfd == -1) goto err; - for (set_cmsg = 0; set_cmsg <= 1; ++set_cmsg) { + for (set_cmsg = 0; set_cmsg <= max_cmsg; ++set_cmsg) { if (clientfd >= 0) close(clientfd); - clientfd = sendmsg_to_server(test->type, &requested_addr, + clientfd = sendmsg_to_server(test->type, &sendmsg_addr, addr_len, set_cmsg, /*flags*/0, &err); if (err) @@ -1330,10 +1504,10 @@ static int run_sendmsg_test_case(const struct sock_addr_test *test) * specific packet may differ from the one used by default and * returned by getsockname(2). */ - if (recvmsg_from_client(servfd, &real_src_addr) == -1) + if (recvmsg_from_client(servfd, &recvmsg_addr) == -1) goto err; - if (cmp_addr(&real_src_addr, &expected_src_addr, /*cmp_port*/0)) + if (cmp_addr(&recvmsg_addr, &expected_addr, /*cmp_port*/0)) goto err; } @@ -1366,6 +1540,9 @@ static int run_test_case(int cgfd, const struct sock_addr_test *test) goto out; } else if (test->expected_result == ATTACH_REJECT || err) { goto err; + } else if (test->expected_result == ATTACH_OKAY) { + err = 0; + goto out; } switch (test->attach_type) { @@ -1379,7 +1556,11 @@ static int run_test_case(int cgfd, const struct sock_addr_test *test) break; case BPF_CGROUP_UDP4_SENDMSG: case BPF_CGROUP_UDP6_SENDMSG: - err = run_sendmsg_test_case(test); + err = run_xmsg_test_case(test, 1); + break; + case BPF_CGROUP_UDP4_RECVMSG: + case BPF_CGROUP_UDP6_RECVMSG: + err = run_xmsg_test_case(test, 0); break; default: goto err; diff --git a/tools/testing/selftests/bpf/verifier/div_overflow.c b/tools/testing/selftests/bpf/verifier/div_overflow.c index bd3f38dbe796..acab4f00819f 100644 --- a/tools/testing/selftests/bpf/verifier/div_overflow.c +++ b/tools/testing/selftests/bpf/verifier/div_overflow.c @@ -29,8 +29,11 @@ "DIV64 overflow, check 1", .insns = { BPF_MOV64_IMM(BPF_REG_1, -1), - BPF_LD_IMM64(BPF_REG_0, LLONG_MIN), - BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1), + BPF_LD_IMM64(BPF_REG_2, LLONG_MIN), + BPF_ALU64_REG(BPF_DIV, BPF_REG_2, BPF_REG_1), + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 1), + BPF_MOV32_IMM(BPF_REG_0, 1), BPF_EXIT_INSN(), }, .prog_type = BPF_PROG_TYPE_SCHED_CLS, @@ -40,8 +43,11 @@ { "DIV64 overflow, check 2", .insns = { - BPF_LD_IMM64(BPF_REG_0, LLONG_MIN), - BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, -1), + BPF_LD_IMM64(BPF_REG_1, LLONG_MIN), + BPF_ALU64_IMM(BPF_DIV, BPF_REG_1, -1), + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_1, 1), + BPF_MOV32_IMM(BPF_REG_0, 1), BPF_EXIT_INSN(), }, .prog_type = BPF_PROG_TYPE_SCHED_CLS, diff --git a/tools/testing/selftests/bpf/verifier/subreg.c b/tools/testing/selftests/bpf/verifier/subreg.c new file mode 100644 index 000000000000..4c4133c80440 --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/subreg.c @@ -0,0 +1,533 @@ +/* This file contains sub-register zero extension checks for insns defining + * sub-registers, meaning: + * - All insns under BPF_ALU class. Their BPF_ALU32 variants or narrow width + * forms (BPF_END) could define sub-registers. + * - Narrow direct loads, BPF_B/H/W | BPF_LDX. + * - BPF_LD is not exposed to JIT back-ends, so no need for testing. + * + * "get_prandom_u32" is used to initialize low 32-bit of some registers to + * prevent potential optimizations done by verifier or JIT back-ends which could + * optimize register back into constant when range info shows one register is a + * constant. + */ +{ + "add32 reg zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_LD_IMM64(BPF_REG_0, 0x100000000ULL), + BPF_ALU32_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "add32 imm zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + /* An insn could have no effect on the low 32-bit, for example: + * a = a + 0 + * a = a | 0 + * a = a & -1 + * But, they should still zero high 32-bit. + */ + BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 0), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, -2), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "sub32 reg zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_LD_IMM64(BPF_REG_0, 0x1ffffffffULL), + BPF_ALU32_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "sub32 imm zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_ALU32_IMM(BPF_SUB, BPF_REG_0, 0), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_ALU32_IMM(BPF_SUB, BPF_REG_0, 1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "mul32 reg zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_LD_IMM64(BPF_REG_0, 0x100000001ULL), + BPF_ALU32_REG(BPF_MUL, BPF_REG_0, BPF_REG_1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "mul32 imm zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_ALU32_IMM(BPF_MUL, BPF_REG_0, 1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_ALU32_IMM(BPF_MUL, BPF_REG_0, -1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "div32 reg zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_MOV64_IMM(BPF_REG_0, -1), + BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "div32 imm zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_ALU32_IMM(BPF_DIV, BPF_REG_0, 1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_ALU32_IMM(BPF_DIV, BPF_REG_0, 2), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "or32 reg zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_LD_IMM64(BPF_REG_0, 0x100000001ULL), + BPF_ALU32_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "or32 imm zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_ALU32_IMM(BPF_OR, BPF_REG_0, 0), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_ALU32_IMM(BPF_OR, BPF_REG_0, 1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "and32 reg zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x100000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_1, BPF_REG_0), + BPF_LD_IMM64(BPF_REG_0, 0x1ffffffffULL), + BPF_ALU32_REG(BPF_AND, BPF_REG_0, BPF_REG_1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "and32 imm zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_ALU32_IMM(BPF_AND, BPF_REG_0, -1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_ALU32_IMM(BPF_AND, BPF_REG_0, -2), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "lsh32 reg zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x100000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_MOV64_IMM(BPF_REG_1, 1), + BPF_ALU32_REG(BPF_LSH, BPF_REG_0, BPF_REG_1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "lsh32 imm zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_ALU32_IMM(BPF_LSH, BPF_REG_0, 0), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_ALU32_IMM(BPF_LSH, BPF_REG_0, 1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "rsh32 reg zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_MOV64_IMM(BPF_REG_1, 1), + BPF_ALU32_REG(BPF_RSH, BPF_REG_0, BPF_REG_1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "rsh32 imm zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_ALU32_IMM(BPF_RSH, BPF_REG_0, 0), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_ALU32_IMM(BPF_RSH, BPF_REG_0, 1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "neg32 reg zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_ALU32_IMM(BPF_NEG, BPF_REG_0, 0), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "mod32 reg zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_MOV64_IMM(BPF_REG_0, -1), + BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "mod32 imm zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_ALU32_IMM(BPF_MOD, BPF_REG_0, 1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_ALU32_IMM(BPF_MOD, BPF_REG_0, 2), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "xor32 reg zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_LD_IMM64(BPF_REG_0, 0x100000000ULL), + BPF_ALU32_REG(BPF_XOR, BPF_REG_0, BPF_REG_1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "xor32 imm zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_ALU32_IMM(BPF_XOR, BPF_REG_0, 1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "mov32 reg zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x100000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_1, BPF_REG_0), + BPF_LD_IMM64(BPF_REG_0, 0x100000000ULL), + BPF_MOV32_REG(BPF_REG_0, BPF_REG_1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "mov32 imm zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_MOV32_IMM(BPF_REG_0, 1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "arsh32 reg zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_MOV64_IMM(BPF_REG_1, 1), + BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "arsh32 imm zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 0), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "end16 (to_le) reg zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 32), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6), + BPF_ENDIAN(BPF_TO_LE, BPF_REG_0, 16), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "end32 (to_le) reg zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 32), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6), + BPF_ENDIAN(BPF_TO_LE, BPF_REG_0, 32), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "end16 (to_be) reg zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 32), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6), + BPF_ENDIAN(BPF_TO_BE, BPF_REG_0, 16), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "end32 (to_be) reg zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 32), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6), + BPF_ENDIAN(BPF_TO_BE, BPF_REG_0, 32), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "ldx_b zero extend check", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -4), + BPF_ST_MEM(BPF_W, BPF_REG_6, 0, 0xfaceb00c), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "ldx_h zero extend check", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -4), + BPF_ST_MEM(BPF_W, BPF_REG_6, 0, 0xfaceb00c), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_6, 0), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "ldx_w zero extend check", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -4), + BPF_ST_MEM(BPF_W, BPF_REG_6, 0, 0xfaceb00c), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, 0), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, diff --git a/tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh b/tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh index 1c30f302a1e7..5c39e5f6a480 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh @@ -28,6 +28,7 @@ ALL_TESTS=" vlan_interface_uppers_test bridge_extern_learn_test neigh_offload_test + nexthop_offload_test devlink_reload_test " NUM_NETIFS=2 @@ -607,6 +608,52 @@ neigh_offload_test() ip -4 address del 192.0.2.1/24 dev $swp1 } +nexthop_offload_test() +{ + # Test that IPv4 and IPv6 nexthops are marked as offloaded + RET=0 + + sysctl_set net.ipv6.conf.$swp2.keep_addr_on_down 1 + simple_if_init $swp1 192.0.2.1/24 2001:db8:1::1/64 + simple_if_init $swp2 192.0.2.2/24 2001:db8:1::2/64 + setup_wait + + ip -4 route add 198.51.100.0/24 vrf v$swp1 \ + nexthop via 192.0.2.2 dev $swp1 + ip -6 route add 2001:db8:2::/64 vrf v$swp1 \ + nexthop via 2001:db8:1::2 dev $swp1 + + ip -4 route show 198.51.100.0/24 vrf v$swp1 | grep -q offload + check_err $? "ipv4 nexthop not marked as offloaded when should" + ip -6 route show 2001:db8:2::/64 vrf v$swp1 | grep -q offload + check_err $? "ipv6 nexthop not marked as offloaded when should" + + ip link set dev $swp2 down + sleep 1 + + ip -4 route show 198.51.100.0/24 vrf v$swp1 | grep -q offload + check_fail $? "ipv4 nexthop marked as offloaded when should not" + ip -6 route show 2001:db8:2::/64 vrf v$swp1 | grep -q offload + check_fail $? "ipv6 nexthop marked as offloaded when should not" + + ip link set dev $swp2 up + setup_wait + + ip -4 route show 198.51.100.0/24 vrf v$swp1 | grep -q offload + check_err $? "ipv4 nexthop not marked as offloaded after neigh add" + ip -6 route show 2001:db8:2::/64 vrf v$swp1 | grep -q offload + check_err $? "ipv6 nexthop not marked as offloaded after neigh add" + + log_test "nexthop offload indication" + + ip -6 route del 2001:db8:2::/64 vrf v$swp1 + ip -4 route del 198.51.100.0/24 vrf v$swp1 + + simple_if_fini $swp2 192.0.2.2/24 2001:db8:1::2/64 + simple_if_fini $swp1 192.0.2.1/24 2001:db8:1::1/64 + sysctl_restore net.ipv6.conf.$swp2.keep_addr_on_down +} + devlink_reload_test() { # Test that after executing all the above configuration tests, a diff --git a/tools/testing/selftests/net/forwarding/tc_flower.sh b/tools/testing/selftests/net/forwarding/tc_flower.sh index 29bcfa84aec7..124803eea4a9 100755 --- a/tools/testing/selftests/net/forwarding/tc_flower.sh +++ b/tools/testing/selftests/net/forwarding/tc_flower.sh @@ -2,7 +2,8 @@ # SPDX-License-Identifier: GPL-2.0 ALL_TESTS="match_dst_mac_test match_src_mac_test match_dst_ip_test \ - match_src_ip_test match_ip_flags_test match_pcp_test match_vlan_test" + match_src_ip_test match_ip_flags_test match_pcp_test match_vlan_test \ + match_ip_tos_test" NUM_NETIFS=2 source tc_common.sh source lib.sh @@ -276,6 +277,39 @@ match_vlan_test() log_test "VLAN match ($tcflags)" } +match_ip_tos_test() +{ + RET=0 + + tc filter add dev $h2 ingress protocol ip pref 1 handle 101 flower \ + $tcflags dst_ip 192.0.2.2 ip_tos 0x20 action drop + tc filter add dev $h2 ingress protocol ip pref 2 handle 102 flower \ + $tcflags dst_ip 192.0.2.2 ip_tos 0x18 action drop + + $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \ + -t ip tos=18 -q + + tc_check_packets "dev $h2 ingress" 101 1 + check_fail $? "Matched on a wrong filter (0x18)" + + tc_check_packets "dev $h2 ingress" 102 1 + check_err $? "Did not match on correct filter (0x18)" + + $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \ + -t ip tos=20 -q + + tc_check_packets "dev $h2 ingress" 102 2 + check_fail $? "Matched on a wrong filter (0x20)" + + tc_check_packets "dev $h2 ingress" 101 1 + check_err $? "Did not match on correct filter (0x20)" + + tc filter del dev $h2 ingress protocol ip pref 2 handle 102 flower + tc filter del dev $h2 ingress protocol ip pref 1 handle 101 flower + + log_test "ip_tos match ($tcflags)" +} + setup_prepare() { h1=${NETIFS[p1]} |