diff options
Diffstat (limited to 'drivers/thunderbolt')
-rw-r--r-- | drivers/thunderbolt/Kconfig | 11 | ||||
-rw-r--r-- | drivers/thunderbolt/Makefile | 6 | ||||
-rw-r--r-- | drivers/thunderbolt/cap.c | 17 | ||||
-rw-r--r-- | drivers/thunderbolt/ctl.c | 50 | ||||
-rw-r--r-- | drivers/thunderbolt/ctl.h | 3 | ||||
-rw-r--r-- | drivers/thunderbolt/eeprom.c | 152 | ||||
-rw-r--r-- | drivers/thunderbolt/icm.c | 317 | ||||
-rw-r--r-- | drivers/thunderbolt/lc.c | 193 | ||||
-rw-r--r-- | drivers/thunderbolt/nhi.c | 137 | ||||
-rw-r--r-- | drivers/thunderbolt/nhi.h | 24 | ||||
-rw-r--r-- | drivers/thunderbolt/nhi_ops.c | 178 | ||||
-rw-r--r-- | drivers/thunderbolt/nhi_regs.h | 37 | ||||
-rw-r--r-- | drivers/thunderbolt/path.c | 52 | ||||
-rw-r--r-- | drivers/thunderbolt/switch.c | 1065 | ||||
-rw-r--r-- | drivers/thunderbolt/tb.c | 539 | ||||
-rw-r--r-- | drivers/thunderbolt/tb.h | 182 | ||||
-rw-r--r-- | drivers/thunderbolt/tb_msgs.h | 24 | ||||
-rw-r--r-- | drivers/thunderbolt/tb_regs.h | 160 | ||||
-rw-r--r-- | drivers/thunderbolt/tmu.c | 383 | ||||
-rw-r--r-- | drivers/thunderbolt/tunnel.c | 531 | ||||
-rw-r--r-- | drivers/thunderbolt/tunnel.h | 19 | ||||
-rw-r--r-- | drivers/thunderbolt/usb4.c | 764 | ||||
-rw-r--r-- | drivers/thunderbolt/xdomain.c | 13 |
23 files changed, 4340 insertions, 517 deletions
diff --git a/drivers/thunderbolt/Kconfig b/drivers/thunderbolt/Kconfig index fd9adca898ff..1eb757e8df3b 100644 --- a/drivers/thunderbolt/Kconfig +++ b/drivers/thunderbolt/Kconfig @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0-only -menuconfig THUNDERBOLT - tristate "Thunderbolt support" +menuconfig USB4 + tristate "Unified support for USB4 and Thunderbolt" depends on PCI depends on X86 || COMPILE_TEST select APPLE_PROPERTIES if EFI_STUB && X86 @@ -9,9 +9,10 @@ menuconfig THUNDERBOLT select CRYPTO_HASH select NVMEM help - Thunderbolt Controller driver. This driver is required if you - want to hotplug Thunderbolt devices on Apple hardware or on PCs - with Intel Falcon Ridge or newer. + USB4 and Thunderbolt driver. USB4 is the public speficiation + based on Thunderbolt 3 protocol. This driver is required if + you want to hotplug Thunderbolt and USB4 compliant devices on + Apple hardware or on PCs with Intel Falcon Ridge or newer. To compile this driver a module, choose M here. The module will be called thunderbolt. diff --git a/drivers/thunderbolt/Makefile b/drivers/thunderbolt/Makefile index 3f55cb3c81b2..eae28dd45250 100644 --- a/drivers/thunderbolt/Makefile +++ b/drivers/thunderbolt/Makefile @@ -1,4 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only -obj-${CONFIG_THUNDERBOLT} := thunderbolt.o -thunderbolt-objs := nhi.o ctl.o tb.o switch.o cap.o path.o tunnel.o eeprom.o -thunderbolt-objs += domain.o dma_port.o icm.o property.o xdomain.o lc.o +obj-${CONFIG_USB4} := thunderbolt.o +thunderbolt-objs := nhi.o nhi_ops.o ctl.o tb.o switch.o cap.o path.o tunnel.o eeprom.o +thunderbolt-objs += domain.o dma_port.o icm.o property.o xdomain.o lc.o tmu.o usb4.o diff --git a/drivers/thunderbolt/cap.c b/drivers/thunderbolt/cap.c index 8bf8e031f0bc..19db6cdc5b70 100644 --- a/drivers/thunderbolt/cap.c +++ b/drivers/thunderbolt/cap.c @@ -33,9 +33,9 @@ static int tb_port_enable_tmu(struct tb_port *port, bool enable) * Legacy devices need to have TMU access enabled before port * space can be fully accessed. */ - if (tb_switch_is_lr(sw)) + if (tb_switch_is_light_ridge(sw)) offset = 0x26; - else if (tb_switch_is_er(sw)) + else if (tb_switch_is_eagle_ridge(sw)) offset = 0x2a; else return 0; @@ -60,7 +60,7 @@ static void tb_port_dummy_read(struct tb_port *port) * reading stale data on next read perform one dummy read after * port capabilities are walked. */ - if (tb_switch_is_lr(port->sw)) { + if (tb_switch_is_light_ridge(port->sw)) { u32 dummy; tb_port_read(port, &dummy, TB_CFG_PORT, 0, 1); @@ -113,7 +113,16 @@ int tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap) return ret; } -static int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap) +/** + * tb_switch_find_cap() - Find switch capability + * @sw Switch to find the capability for + * @cap: Capability to look + * + * Returns offset to start of capability or %-ENOENT if no such + * capability was found. Negative errno is returned if there was an + * error. + */ +int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap) { int offset = sw->config.first_cap_offset; diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c index 2427d73be731..f77ceae5c7d7 100644 --- a/drivers/thunderbolt/ctl.c +++ b/drivers/thunderbolt/ctl.c @@ -708,19 +708,26 @@ void tb_ctl_stop(struct tb_ctl *ctl) /* public interface, commands */ /** - * tb_cfg_error() - send error packet + * tb_cfg_ack_plug() - Ack hot plug/unplug event + * @ctl: Control channel to use + * @route: Router that originated the event + * @port: Port where the hot plug/unplug happened + * @unplug: Ack hot plug or unplug * - * Return: Returns 0 on success or an error code on failure. + * Call this as response for hot plug/unplug event to ack it. + * Returns %0 on success or an error code on failure. */ -int tb_cfg_error(struct tb_ctl *ctl, u64 route, u32 port, - enum tb_cfg_error error) +int tb_cfg_ack_plug(struct tb_ctl *ctl, u64 route, u32 port, bool unplug) { struct cfg_error_pkg pkg = { .header = tb_cfg_make_header(route), .port = port, - .error = error, + .error = TB_CFG_ERROR_ACK_PLUG_EVENT, + .pg = unplug ? TB_CFG_ERROR_PG_HOT_UNPLUG + : TB_CFG_ERROR_PG_HOT_PLUG, }; - tb_ctl_dbg(ctl, "resetting error on %llx:%x.\n", route, port); + tb_ctl_dbg(ctl, "acking hot %splug event on %llx:%x\n", + unplug ? "un" : "", route, port); return tb_ctl_tx(ctl, &pkg, sizeof(pkg), TB_CFG_PKG_ERROR); } @@ -930,6 +937,23 @@ struct tb_cfg_result tb_cfg_write_raw(struct tb_ctl *ctl, const void *buffer, return res; } +static int tb_cfg_get_error(struct tb_ctl *ctl, enum tb_cfg_space space, + const struct tb_cfg_result *res) +{ + /* + * For unimplemented ports access to port config space may return + * TB_CFG_ERROR_INVALID_CONFIG_SPACE (alternatively their type is + * set to TB_TYPE_INACTIVE). In the former case return -ENODEV so + * that the caller can mark the port as disabled. + */ + if (space == TB_CFG_PORT && + res->tb_error == TB_CFG_ERROR_INVALID_CONFIG_SPACE) + return -ENODEV; + + tb_cfg_print_error(ctl, res); + return -EIO; +} + int tb_cfg_read(struct tb_ctl *ctl, void *buffer, u64 route, u32 port, enum tb_cfg_space space, u32 offset, u32 length) { @@ -942,12 +966,11 @@ int tb_cfg_read(struct tb_ctl *ctl, void *buffer, u64 route, u32 port, case 1: /* Thunderbolt error, tb_error holds the actual number */ - tb_cfg_print_error(ctl, &res); - return -EIO; + return tb_cfg_get_error(ctl, space, &res); case -ETIMEDOUT: - tb_ctl_warn(ctl, "timeout reading config space %u from %#x\n", - space, offset); + tb_ctl_warn(ctl, "%llx: timeout reading config space %u from %#x\n", + route, space, offset); break; default: @@ -969,12 +992,11 @@ int tb_cfg_write(struct tb_ctl *ctl, const void *buffer, u64 route, u32 port, case 1: /* Thunderbolt error, tb_error holds the actual number */ - tb_cfg_print_error(ctl, &res); - return -EIO; + return tb_cfg_get_error(ctl, space, &res); case -ETIMEDOUT: - tb_ctl_warn(ctl, "timeout writing config space %u to %#x\n", - space, offset); + tb_ctl_warn(ctl, "%llx: timeout writing config space %u to %#x\n", + route, space, offset); break; default: diff --git a/drivers/thunderbolt/ctl.h b/drivers/thunderbolt/ctl.h index 2f1a1e111110..97cb03b38953 100644 --- a/drivers/thunderbolt/ctl.h +++ b/drivers/thunderbolt/ctl.h @@ -123,8 +123,7 @@ static inline struct tb_cfg_header tb_cfg_make_header(u64 route) return header; } -int tb_cfg_error(struct tb_ctl *ctl, u64 route, u32 port, - enum tb_cfg_error error); +int tb_cfg_ack_plug(struct tb_ctl *ctl, u64 route, u32 port, bool unplug); struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route, int timeout_msec); struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer, diff --git a/drivers/thunderbolt/eeprom.c b/drivers/thunderbolt/eeprom.c index 81e8ac4c5805..921d164b3f35 100644 --- a/drivers/thunderbolt/eeprom.c +++ b/drivers/thunderbolt/eeprom.c @@ -131,12 +131,51 @@ static int tb_eeprom_in(struct tb_switch *sw, u8 *val) } /** + * tb_eeprom_get_drom_offset - get drom offset within eeprom + */ +static int tb_eeprom_get_drom_offset(struct tb_switch *sw, u16 *offset) +{ + struct tb_cap_plug_events cap; + int res; + + if (!sw->cap_plug_events) { + tb_sw_warn(sw, "no TB_CAP_PLUG_EVENTS, cannot read eeprom\n"); + return -ENODEV; + } + res = tb_sw_read(sw, &cap, TB_CFG_SWITCH, sw->cap_plug_events, + sizeof(cap) / 4); + if (res) + return res; + + if (!cap.eeprom_ctl.present || cap.eeprom_ctl.not_present) { + tb_sw_warn(sw, "no NVM\n"); + return -ENODEV; + } + + if (cap.drom_offset > 0xffff) { + tb_sw_warn(sw, "drom offset is larger than 0xffff: %#x\n", + cap.drom_offset); + return -ENXIO; + } + *offset = cap.drom_offset; + return 0; +} + +/** * tb_eeprom_read_n - read count bytes from offset into val */ static int tb_eeprom_read_n(struct tb_switch *sw, u16 offset, u8 *val, size_t count) { + u16 drom_offset; int i, res; + + res = tb_eeprom_get_drom_offset(sw, &drom_offset); + if (res) + return res; + + offset += drom_offset; + res = tb_eeprom_active(sw, true); if (res) return res; @@ -239,36 +278,6 @@ struct tb_drom_entry_port { /** - * tb_eeprom_get_drom_offset - get drom offset within eeprom - */ -static int tb_eeprom_get_drom_offset(struct tb_switch *sw, u16 *offset) -{ - struct tb_cap_plug_events cap; - int res; - if (!sw->cap_plug_events) { - tb_sw_warn(sw, "no TB_CAP_PLUG_EVENTS, cannot read eeprom\n"); - return -ENOSYS; - } - res = tb_sw_read(sw, &cap, TB_CFG_SWITCH, sw->cap_plug_events, - sizeof(cap) / 4); - if (res) - return res; - - if (!cap.eeprom_ctl.present || cap.eeprom_ctl.not_present) { - tb_sw_warn(sw, "no NVM\n"); - return -ENOSYS; - } - - if (cap.drom_offset > 0xffff) { - tb_sw_warn(sw, "drom offset is larger than 0xffff: %#x\n", - cap.drom_offset); - return -ENXIO; - } - *offset = cap.drom_offset; - return 0; -} - -/** * tb_drom_read_uid_only - read uid directly from drom * * Does not use the cached copy in sw->drom. Used during resume to check switch @@ -277,17 +286,11 @@ static int tb_eeprom_get_drom_offset(struct tb_switch *sw, u16 *offset) int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid) { u8 data[9]; - u16 drom_offset; u8 crc; - int res = tb_eeprom_get_drom_offset(sw, &drom_offset); - if (res) - return res; - - if (drom_offset == 0) - return -ENODEV; + int res; /* read uid */ - res = tb_eeprom_read_n(sw, drom_offset, data, 9); + res = tb_eeprom_read_n(sw, 0, data, 9); if (res) return res; @@ -414,7 +417,7 @@ static int tb_drom_copy_efi(struct tb_switch *sw, u16 *size) struct device *dev = &sw->tb->nhi->pdev->dev; int len, res; - len = device_property_read_u8_array(dev, "ThunderboltDROM", NULL, 0); + len = device_property_count_u8(dev, "ThunderboltDROM"); if (len < 0 || len < sizeof(struct tb_drom_header)) return -EINVAL; @@ -484,12 +487,42 @@ err_free: return ret; } +static int usb4_copy_host_drom(struct tb_switch *sw, u16 *size) +{ + int ret; + + ret = usb4_switch_drom_read(sw, 14, size, sizeof(*size)); + if (ret) + return ret; + + /* Size includes CRC8 + UID + CRC32 */ + *size += 1 + 8 + 4; + sw->drom = kzalloc(*size, GFP_KERNEL); + if (!sw->drom) + return -ENOMEM; + + ret = usb4_switch_drom_read(sw, 0, sw->drom, *size); + if (ret) { + kfree(sw->drom); + sw->drom = NULL; + } + + return ret; +} + +static int tb_drom_read_n(struct tb_switch *sw, u16 offset, u8 *val, + size_t count) +{ + if (tb_switch_is_usb4(sw)) + return usb4_switch_drom_read(sw, offset, val, count); + return tb_eeprom_read_n(sw, offset, val, count); +} + /** * tb_drom_read - copy drom to sw->drom and parse it */ int tb_drom_read(struct tb_switch *sw) { - u16 drom_offset; u16 size; u32 crc; struct tb_drom_header *header; @@ -510,33 +543,26 @@ int tb_drom_read(struct tb_switch *sw) goto parse; /* - * The root switch contains only a dummy drom (header only, - * no entries). Hardcode the configuration here. + * USB4 hosts may support reading DROM through router + * operations. */ - tb_drom_read_uid_only(sw, &sw->uid); - - sw->ports[1].link_nr = 0; - sw->ports[2].link_nr = 1; - sw->ports[1].dual_link_port = &sw->ports[2]; - sw->ports[2].dual_link_port = &sw->ports[1]; - - sw->ports[3].link_nr = 0; - sw->ports[4].link_nr = 1; - sw->ports[3].dual_link_port = &sw->ports[4]; - sw->ports[4].dual_link_port = &sw->ports[3]; - - /* Port 5 is inaccessible on this gen 1 controller */ - if (sw->config.device_id == PCI_DEVICE_ID_INTEL_LIGHT_RIDGE) - sw->ports[5].disabled = true; + if (tb_switch_is_usb4(sw)) { + usb4_switch_read_uid(sw, &sw->uid); + if (!usb4_copy_host_drom(sw, &size)) + goto parse; + } else { + /* + * The root switch contains only a dummy drom + * (header only, no entries). Hardcode the + * configuration here. + */ + tb_drom_read_uid_only(sw, &sw->uid); + } return 0; } - res = tb_eeprom_get_drom_offset(sw, &drom_offset); - if (res) - return res; - - res = tb_eeprom_read_n(sw, drom_offset + 14, (u8 *) &size, 2); + res = tb_drom_read_n(sw, 14, (u8 *) &size, 2); if (res) return res; size &= 0x3ff; @@ -550,7 +576,7 @@ int tb_drom_read(struct tb_switch *sw) sw->drom = kzalloc(size, GFP_KERNEL); if (!sw->drom) return -ENOMEM; - res = tb_eeprom_read_n(sw, drom_offset, sw->drom, size); + res = tb_drom_read_n(sw, 0, sw->drom, size); if (res) goto err; diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c index fbdcef56a676..13e88109742e 100644 --- a/drivers/thunderbolt/icm.c +++ b/drivers/thunderbolt/icm.c @@ -11,6 +11,7 @@ #include <linux/delay.h> #include <linux/mutex.h> +#include <linux/moduleparam.h> #include <linux/pci.h> #include <linux/pm_runtime.h> #include <linux/platform_data/x86/apple.h> @@ -43,6 +44,10 @@ #define ICM_APPROVE_TIMEOUT 10000 /* ms */ #define ICM_MAX_LINK 4 +static bool start_icm; +module_param(start_icm, bool, 0444); +MODULE_PARM_DESC(start_icm, "start ICM firmware if it is not running (default: false)"); + /** * struct icm - Internal connection manager private data * @request_lock: Makes sure only one message is send to ICM at time @@ -55,16 +60,20 @@ * @safe_mode: ICM is in safe mode * @max_boot_acl: Maximum number of preboot ACL entries (%0 if not supported) * @rpm: Does the controller support runtime PM (RTD3) + * @can_upgrade_nvm: Can the NVM firmware be upgrade on this controller + * @veto: Is RTD3 veto in effect * @is_supported: Checks if we can support ICM on this controller * @cio_reset: Trigger CIO reset * @get_mode: Read and return the ICM firmware mode (optional) * @get_route: Find a route string for given switch * @save_devices: Ask ICM to save devices to ACL when suspending (optional) * @driver_ready: Send driver ready message to ICM + * @set_uuid: Set UUID for the root switch (optional) * @device_connected: Handle device connected ICM message * @device_disconnected: Handle device disconnected ICM message * @xdomain_connected - Handle XDomain connected ICM message * @xdomain_disconnected - Handle XDomain disconnected ICM message + * @rtd3_veto: Handle RTD3 veto notification ICM message */ struct icm { struct mutex request_lock; @@ -74,6 +83,8 @@ struct icm { int vnd_cap; bool safe_mode; bool rpm; + bool can_upgrade_nvm; + bool veto; bool (*is_supported)(struct tb *tb); int (*cio_reset)(struct tb *tb); int (*get_mode)(struct tb *tb); @@ -82,6 +93,7 @@ struct icm { int (*driver_ready)(struct tb *tb, enum tb_security_level *security_level, size_t *nboot_acl, bool *rpm); + void (*set_uuid)(struct tb *tb); void (*device_connected)(struct tb *tb, const struct icm_pkg_header *hdr); void (*device_disconnected)(struct tb *tb, @@ -90,6 +102,7 @@ struct icm { const struct icm_pkg_header *hdr); void (*xdomain_disconnected)(struct tb *tb, const struct icm_pkg_header *hdr); + void (*rtd3_veto)(struct tb *tb, const struct icm_pkg_header *hdr); }; struct icm_notification { @@ -139,6 +152,17 @@ static const struct intel_vss *parse_intel_vss(const void *ep_name, size_t size) return NULL; } +static bool intel_vss_is_rtd3(const void *ep_name, size_t size) +{ + const struct intel_vss *vss; + + vss = parse_intel_vss(ep_name, size); + if (vss) + return !!(vss->flags & INTEL_VSS_FLAGS_RTD3); + + return false; +} + static inline struct tb *icm_to_tb(struct icm *icm) { return ((void *)icm - sizeof(struct tb)); @@ -294,6 +318,51 @@ static int icm_request(struct tb *tb, const void *request, size_t request_size, return -ETIMEDOUT; } +/* + * If rescan is queued to run (we are resuming), postpone it to give the + * firmware some more time to send device connected notifications for next + * devices in the chain. + */ +static void icm_postpone_rescan(struct tb *tb) +{ + struct icm *icm = tb_priv(tb); + + if (delayed_work_pending(&icm->rescan_work)) + mod_delayed_work(tb->wq, &icm->rescan_work, + msecs_to_jiffies(500)); +} + +static void icm_veto_begin(struct tb *tb) +{ + struct icm *icm = tb_priv(tb); + + if (!icm->veto) { + icm->veto = true; + /* Keep the domain powered while veto is in effect */ + pm_runtime_get(&tb->dev); + } +} + +static void icm_veto_end(struct tb *tb) +{ + struct icm *icm = tb_priv(tb); + + if (icm->veto) { + icm->veto = false; + /* Allow the domain suspend now */ + pm_runtime_mark_last_busy(&tb->dev); + pm_runtime_put_autosuspend(&tb->dev); + } +} + +static bool icm_firmware_running(const struct tb_nhi *nhi) +{ + u32 val; + + val = ioread32(nhi->iobase + REG_FW_STS); + return !!(val & REG_FW_STS_ICM_EN); +} + static bool icm_fr_is_supported(struct tb *tb) { return !x86_apple_machine; @@ -517,52 +586,42 @@ static int icm_fr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) return 0; } -static void add_switch(struct tb_switch *parent_sw, u64 route, - const uuid_t *uuid, const u8 *ep_name, - size_t ep_name_size, u8 connection_id, u8 connection_key, - u8 link, u8 depth, enum tb_security_level security_level, - bool authorized, bool boot) +static struct tb_switch *alloc_switch(struct tb_switch *parent_sw, u64 route, + const uuid_t *uuid) { - const struct intel_vss *vss; + struct tb *tb = parent_sw->tb; struct tb_switch *sw; - pm_runtime_get_sync(&parent_sw->dev); - - sw = tb_switch_alloc(parent_sw->tb, &parent_sw->dev, route); - if (IS_ERR(sw)) - goto out; + sw = tb_switch_alloc(tb, &parent_sw->dev, route); + if (IS_ERR(sw)) { + tb_warn(tb, "failed to allocate switch at %llx\n", route); + return sw; + } sw->uuid = kmemdup(uuid, sizeof(*uuid), GFP_KERNEL); if (!sw->uuid) { - tb_sw_warn(sw, "cannot allocate memory for switch\n"); tb_switch_put(sw); - goto out; + return ERR_PTR(-ENOMEM); } - sw->connection_id = connection_id; - sw->connection_key = connection_key; - sw->link = link; - sw->depth = depth; - sw->authorized = authorized; - sw->security_level = security_level; - sw->boot = boot; + init_completion(&sw->rpm_complete); + return sw; +} - vss = parse_intel_vss(ep_name, ep_name_size); - if (vss) - sw->rpm = !!(vss->flags & INTEL_VSS_FLAGS_RTD3); +static int add_switch(struct tb_switch *parent_sw, struct tb_switch *sw) +{ + u64 route = tb_route(sw); + int ret; /* Link the two switches now */ tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw); tb_upstream_port(sw)->remote = tb_port_at(route, parent_sw); - if (tb_switch_add(sw)) { + ret = tb_switch_add(sw); + if (ret) tb_port_at(tb_route(sw), parent_sw)->remote = NULL; - tb_switch_put(sw); - } -out: - pm_runtime_mark_last_busy(&parent_sw->dev); - pm_runtime_put_autosuspend(&parent_sw->dev); + return ret; } static void update_switch(struct tb_switch *parent_sw, struct tb_switch *sw, @@ -646,14 +705,16 @@ icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr) (const struct icm_fr_event_device_connected *)hdr; enum tb_security_level security_level; struct tb_switch *sw, *parent_sw; + bool boot, dual_lane, speed_gen3; struct icm *icm = tb_priv(tb); bool authorized = false; struct tb_xdomain *xd; u8 link, depth; - bool boot; u64 route; int ret; + icm_postpone_rescan(tb); + link = pkg->link_info & ICM_LINK_INFO_LINK_MASK; depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >> ICM_LINK_INFO_DEPTH_SHIFT; @@ -661,6 +722,8 @@ icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr) security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >> ICM_FLAGS_SLEVEL_SHIFT; boot = pkg->link_info & ICM_LINK_INFO_BOOT; + dual_lane = pkg->hdr.flags & ICM_FLAGS_DUAL_LANE; + speed_gen3 = pkg->hdr.flags & ICM_FLAGS_SPEED_GEN3; if (pkg->link_info & ICM_LINK_INFO_REJECTED) { tb_info(tb, "switch at %u.%u was rejected by ICM firmware because topology limit exceeded\n", @@ -758,10 +821,27 @@ icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr) return; } - add_switch(parent_sw, route, &pkg->ep_uuid, (const u8 *)pkg->ep_name, - sizeof(pkg->ep_name), pkg->connection_id, - pkg->connection_key, link, depth, security_level, - authorized, boot); + pm_runtime_get_sync(&parent_sw->dev); + + sw = alloc_switch(parent_sw, route, &pkg->ep_uuid); + if (!IS_ERR(sw)) { + sw->connection_id = pkg->connection_id; + sw->connection_key = pkg->connection_key; + sw->link = link; + sw->depth = depth; + sw->authorized = authorized; + sw->security_level = security_level; + sw->boot = boot; + sw->link_speed = speed_gen3 ? 20 : 10; + sw->link_width = dual_lane ? 2 : 1; + sw->rpm = intel_vss_is_rtd3(pkg->ep_name, sizeof(pkg->ep_name)); + + if (add_switch(parent_sw, sw)) + tb_switch_put(sw); + } + + pm_runtime_mark_last_busy(&parent_sw->dev); + pm_runtime_put_autosuspend(&parent_sw->dev); tb_switch_put(parent_sw); } @@ -1084,16 +1164,19 @@ static int icm_tr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) } static void -icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr) +__icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr, + bool force_rtd3) { const struct icm_tr_event_device_connected *pkg = (const struct icm_tr_event_device_connected *)hdr; + bool authorized, boot, dual_lane, speed_gen3; enum tb_security_level security_level; struct tb_switch *sw, *parent_sw; struct tb_xdomain *xd; - bool authorized, boot; u64 route; + icm_postpone_rescan(tb); + /* * Currently we don't use the QoS information coming with the * device connected message so simply just ignore that extra @@ -1107,6 +1190,8 @@ icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr) security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >> ICM_FLAGS_SLEVEL_SHIFT; boot = pkg->link_info & ICM_LINK_INFO_BOOT; + dual_lane = pkg->hdr.flags & ICM_FLAGS_DUAL_LANE; + speed_gen3 = pkg->hdr.flags & ICM_FLAGS_SPEED_GEN3; if (pkg->link_info & ICM_LINK_INFO_REJECTED) { tb_info(tb, "switch at %llx was rejected by ICM firmware because topology limit exceeded\n", @@ -1149,14 +1234,38 @@ icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr) return; } - add_switch(parent_sw, route, &pkg->ep_uuid, (const u8 *)pkg->ep_name, - sizeof(pkg->ep_name), pkg->connection_id, - 0, 0, 0, security_level, authorized, boot); + pm_runtime_get_sync(&parent_sw->dev); + + sw = alloc_switch(parent_sw, route, &pkg->ep_uuid); + if (!IS_ERR(sw)) { + sw->connection_id = pkg->connection_id; + sw->authorized = authorized; + sw->security_level = security_level; + sw->boot = boot; + sw->link_speed = speed_gen3 ? 20 : 10; + sw->link_width = dual_lane ? 2 : 1; + sw->rpm = force_rtd3; + if (!sw->rpm) + sw->rpm = intel_vss_is_rtd3(pkg->ep_name, + sizeof(pkg->ep_name)); + + if (add_switch(parent_sw, sw)) + tb_switch_put(sw); + } + + pm_runtime_mark_last_busy(&parent_sw->dev); + pm_runtime_put_autosuspend(&parent_sw->dev); tb_switch_put(parent_sw); } static void +icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr) +{ + __icm_tr_device_connected(tb, hdr, false); +} + +static void icm_tr_device_disconnected(struct tb *tb, const struct icm_pkg_header *hdr) { const struct icm_tr_event_device_disconnected *pkg = @@ -1285,9 +1394,12 @@ static bool icm_ar_is_supported(struct tb *tb) /* * Starting from Alpine Ridge we can use ICM on Apple machines * as well. We just need to reset and re-enable it first. + * However, only start it if explicitly asked by the user. */ - if (!x86_apple_machine) + if (icm_firmware_running(tb->nhi)) return true; + if (!start_icm) + return false; /* * Find the upstream PCIe port in case we need to do reset @@ -1466,6 +1578,61 @@ static int icm_ar_set_boot_acl(struct tb *tb, const uuid_t *uuids, return 0; } +static int +icm_icl_driver_ready(struct tb *tb, enum tb_security_level *security_level, + size_t *nboot_acl, bool *rpm) +{ + struct icm_tr_pkg_driver_ready_response reply; + struct icm_pkg_driver_ready request = { + .hdr.code = ICM_DRIVER_READY, + }; + int ret; + + memset(&reply, 0, sizeof(reply)); + ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), + 1, 20000); + if (ret) + return ret; + + /* Ice Lake always supports RTD3 */ + if (rpm) + *rpm = true; + + return 0; +} + +static void icm_icl_set_uuid(struct tb *tb) +{ + struct tb_nhi *nhi = tb->nhi; + u32 uuid[4]; + + pci_read_config_dword(nhi->pdev, VS_CAP_10, &uuid[0]); + pci_read_config_dword(nhi->pdev, VS_CAP_11, &uuid[1]); + uuid[2] = 0xffffffff; + uuid[3] = 0xffffffff; + + tb->root_switch->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL); +} + +static void +icm_icl_device_connected(struct tb *tb, const struct icm_pkg_header *hdr) +{ + __icm_tr_device_connected(tb, hdr, true); +} + +static void icm_icl_rtd3_veto(struct tb *tb, const struct icm_pkg_header *hdr) +{ + const struct icm_icl_event_rtd3_veto *pkg = + (const struct icm_icl_event_rtd3_veto *)hdr; + + tb_dbg(tb, "ICM rtd3 veto=0x%08x\n", pkg->veto_reason); + + if (pkg->veto_reason) + icm_veto_begin(tb); + else + icm_veto_end(tb); +} + static void icm_handle_notification(struct work_struct *work) { struct icm_notification *n = container_of(work, typeof(*n), work); @@ -1493,6 +1660,9 @@ static void icm_handle_notification(struct work_struct *work) case ICM_EVENT_XDOMAIN_DISCONNECTED: icm->xdomain_disconnected(tb, n->pkg); break; + case ICM_EVENT_RTD3_VETO: + icm->rtd3_veto(tb, n->pkg); + break; } } @@ -1582,8 +1752,7 @@ static int icm_firmware_start(struct tb *tb, struct tb_nhi *nhi) u32 val; /* Check if the ICM firmware is already running */ - val = ioread32(nhi->iobase + REG_FW_STS); - if (val & REG_FW_STS_ICM_EN) + if (icm_firmware_running(nhi)) return 0; dev_dbg(&nhi->pdev->dev, "starting ICM firmware\n"); @@ -1771,14 +1940,12 @@ static int icm_suspend(struct tb *tb) */ static void icm_unplug_children(struct tb_switch *sw) { - unsigned int i; + struct tb_port *port; if (tb_route(sw)) sw->is_unplugged = true; - for (i = 1; i <= sw->config.max_port_number; i++) { - struct tb_port *port = &sw->ports[i]; - + tb_switch_for_each_port(sw, port) { if (port->xdomain) port->xdomain->is_unplugged = true; else if (tb_port_has_remote(port)) @@ -1814,11 +1981,9 @@ static void remove_unplugged_switch(struct tb_switch *sw) static void icm_free_unplugged_children(struct tb_switch *sw) { - unsigned int i; - - for (i = 1; i <= sw->config.max_port_number; i++) { - struct tb_port *port = &sw->ports[i]; + struct tb_port *port; + tb_switch_for_each_port(sw, port) { if (port->xdomain && port->xdomain->is_unplugged) { tb_xdomain_remove(port->xdomain); port->xdomain = NULL; @@ -1851,6 +2016,13 @@ static void icm_complete(struct tb *tb) if (tb->nhi->going_away) return; + /* + * If RTD3 was vetoed before we entered system suspend allow it + * again now before driver ready is sent. Firmware sends a new RTD3 + * veto if it is still the case after we have sent it driver ready + * command. + */ + icm_veto_end(tb); icm_unplug_children(tb->root_switch); /* @@ -1913,14 +2085,12 @@ static int icm_start(struct tb *tb) if (IS_ERR(tb->root_switch)) return PTR_ERR(tb->root_switch); - /* - * NVM upgrade has not been tested on Apple systems and they - * don't provide images publicly either. To be on the safe side - * prevent root switch NVM upgrade on Macs for now. - */ - tb->root_switch->no_nvm_upgrade = x86_apple_machine; + tb->root_switch->no_nvm_upgrade = !icm->can_upgrade_nvm; tb->root_switch->rpm = icm->rpm; + if (icm->set_uuid) + icm->set_uuid(tb); + ret = tb_switch_add(tb->root_switch); if (ret) { tb_switch_put(tb->root_switch); @@ -2005,6 +2175,19 @@ static const struct tb_cm_ops icm_tr_ops = { .disconnect_xdomain_paths = icm_tr_disconnect_xdomain_paths, }; +/* Ice Lake */ +static const struct tb_cm_ops icm_icl_ops = { + .driver_ready = icm_driver_ready, + .start = icm_start, + .stop = icm_stop, + .complete = icm_complete, + .runtime_suspend = icm_runtime_suspend, + .runtime_resume = icm_runtime_resume, + .handle_event = icm_handle_event, + .approve_xdomain_paths = icm_tr_approve_xdomain_paths, + .disconnect_xdomain_paths = icm_tr_disconnect_xdomain_paths, +}; + struct tb *icm_probe(struct tb_nhi *nhi) { struct icm *icm; @@ -2021,6 +2204,7 @@ struct tb *icm_probe(struct tb_nhi *nhi) switch (nhi->pdev->device) { case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI: case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI: + icm->can_upgrade_nvm = true; icm->is_supported = icm_fr_is_supported; icm->get_route = icm_fr_get_route; icm->save_devices = icm_fr_save_devices; @@ -2038,6 +2222,13 @@ struct tb *icm_probe(struct tb_nhi *nhi) case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI: case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI: icm->max_boot_acl = ICM_AR_PREBOOT_ACL_ENTRIES; + /* + * NVM upgrade has not been tested on Apple systems and + * they don't provide images publicly either. To be on + * the safe side prevent root switch NVM upgrade on Macs + * for now. + */ + icm->can_upgrade_nvm = !x86_apple_machine; icm->is_supported = icm_ar_is_supported; icm->cio_reset = icm_ar_cio_reset; icm->get_mode = icm_ar_get_mode; @@ -2054,6 +2245,7 @@ struct tb *icm_probe(struct tb_nhi *nhi) case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_NHI: case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI: icm->max_boot_acl = ICM_AR_PREBOOT_ACL_ENTRIES; + icm->can_upgrade_nvm = !x86_apple_machine; icm->is_supported = icm_ar_is_supported; icm->cio_reset = icm_tr_cio_reset; icm->get_mode = icm_ar_get_mode; @@ -2064,6 +2256,19 @@ struct tb *icm_probe(struct tb_nhi *nhi) icm->xdomain_disconnected = icm_tr_xdomain_disconnected; tb->cm_ops = &icm_tr_ops; break; + + case PCI_DEVICE_ID_INTEL_ICL_NHI0: + case PCI_DEVICE_ID_INTEL_ICL_NHI1: + icm->is_supported = icm_fr_is_supported; + icm->driver_ready = icm_icl_driver_ready; + icm->set_uuid = icm_icl_set_uuid; + icm->device_connected = icm_icl_device_connected; + icm->device_disconnected = icm_tr_device_disconnected; + icm->xdomain_connected = icm_tr_xdomain_connected; + icm->xdomain_disconnected = icm_tr_xdomain_disconnected; + icm->rtd3_veto = icm_icl_rtd3_veto; + tb->cm_ops = &icm_icl_ops; + break; } if (!icm->is_supported || !icm->is_supported(tb)) { diff --git a/drivers/thunderbolt/lc.c b/drivers/thunderbolt/lc.c index ae1e92611c3e..bd44d50246d2 100644 --- a/drivers/thunderbolt/lc.c +++ b/drivers/thunderbolt/lc.c @@ -94,7 +94,7 @@ int tb_lc_configure_link(struct tb_switch *sw) struct tb_port *up, *down; int ret; - if (!sw->config.enabled || !tb_route(sw)) + if (!tb_route(sw) || tb_switch_is_icm(sw)) return 0; up = tb_upstream_port(sw); @@ -124,7 +124,7 @@ void tb_lc_unconfigure_link(struct tb_switch *sw) { struct tb_port *up, *down; - if (sw->is_unplugged || !sw->config.enabled || !tb_route(sw)) + if (sw->is_unplugged || !tb_route(sw) || tb_switch_is_icm(sw)) return; up = tb_upstream_port(sw); @@ -177,3 +177,192 @@ int tb_lc_set_sleep(struct tb_switch *sw) return 0; } + +/** + * tb_lc_lane_bonding_possible() - Is lane bonding possible towards switch + * @sw: Switch to check + * + * Checks whether conditions for lane bonding from parent to @sw are + * possible. + */ +bool tb_lc_lane_bonding_possible(struct tb_switch *sw) +{ + struct tb_port *up; + int cap, ret; + u32 val; + + if (sw->generation < 2) + return false; + + up = tb_upstream_port(sw); + cap = find_port_lc_cap(up); + if (cap < 0) + return false; + + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, cap + TB_LC_PORT_ATTR, 1); + if (ret) + return false; + + return !!(val & TB_LC_PORT_ATTR_BE); +} + +static int tb_lc_dp_sink_from_port(const struct tb_switch *sw, + struct tb_port *in) +{ + struct tb_port *port; + + /* The first DP IN port is sink 0 and second is sink 1 */ + tb_switch_for_each_port(sw, port) { + if (tb_port_is_dpin(port)) + return in != port; + } + + return -EINVAL; +} + +static int tb_lc_dp_sink_available(struct tb_switch *sw, int sink) +{ + u32 val, alloc; + int ret; + + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, + sw->cap_lc + TB_LC_SNK_ALLOCATION, 1); + if (ret) + return ret; + + /* + * Sink is available for CM/SW to use if the allocation valie is + * either 0 or 1. + */ + if (!sink) { + alloc = val & TB_LC_SNK_ALLOCATION_SNK0_MASK; + if (!alloc || alloc == TB_LC_SNK_ALLOCATION_SNK0_CM) + return 0; + } else { + alloc = (val & TB_LC_SNK_ALLOCATION_SNK1_MASK) >> + TB_LC_SNK_ALLOCATION_SNK1_SHIFT; + if (!alloc || alloc == TB_LC_SNK_ALLOCATION_SNK1_CM) + return 0; + } + + return -EBUSY; +} + +/** + * tb_lc_dp_sink_query() - Is DP sink available for DP IN port + * @sw: Switch whose DP sink is queried + * @in: DP IN port to check + * + * Queries through LC SNK_ALLOCATION registers whether DP sink is available + * for the given DP IN port or not. + */ +bool tb_lc_dp_sink_query(struct tb_switch *sw, struct tb_port *in) +{ + int sink; + + /* + * For older generations sink is always available as there is no + * allocation mechanism. + */ + if (sw->generation < 3) + return true; + + sink = tb_lc_dp_sink_from_port(sw, in); + if (sink < 0) + return false; + + return !tb_lc_dp_sink_available(sw, sink); +} + +/** + * tb_lc_dp_sink_alloc() - Allocate DP sink + * @sw: Switch whose DP sink is allocated + * @in: DP IN port the DP sink is allocated for + * + * Allocate DP sink for @in via LC SNK_ALLOCATION registers. If the + * resource is available and allocation is successful returns %0. In all + * other cases returs negative errno. In particular %-EBUSY is returned if + * the resource was not available. + */ +int tb_lc_dp_sink_alloc(struct tb_switch *sw, struct tb_port *in) +{ + int ret, sink; + u32 val; + + if (sw->generation < 3) + return 0; + + sink = tb_lc_dp_sink_from_port(sw, in); + if (sink < 0) + return sink; + + ret = tb_lc_dp_sink_available(sw, sink); + if (ret) + return ret; + + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, + sw->cap_lc + TB_LC_SNK_ALLOCATION, 1); + if (ret) + return ret; + + if (!sink) { + val &= ~TB_LC_SNK_ALLOCATION_SNK0_MASK; + val |= TB_LC_SNK_ALLOCATION_SNK0_CM; + } else { + val &= ~TB_LC_SNK_ALLOCATION_SNK1_MASK; + val |= TB_LC_SNK_ALLOCATION_SNK1_CM << + TB_LC_SNK_ALLOCATION_SNK1_SHIFT; + } + + ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, + sw->cap_lc + TB_LC_SNK_ALLOCATION, 1); + + if (ret) + return ret; + + tb_port_dbg(in, "sink %d allocated\n", sink); + return 0; +} + +/** + * tb_lc_dp_sink_dealloc() - De-allocate DP sink + * @sw: Switch whose DP sink is de-allocated + * @in: DP IN port whose DP sink is de-allocated + * + * De-allocate DP sink from @in using LC SNK_ALLOCATION registers. + */ +int tb_lc_dp_sink_dealloc(struct tb_switch *sw, struct tb_port *in) +{ + int ret, sink; + u32 val; + + if (sw->generation < 3) + return 0; + + sink = tb_lc_dp_sink_from_port(sw, in); + if (sink < 0) + return sink; + + /* Needs to be owned by CM/SW */ + ret = tb_lc_dp_sink_available(sw, sink); + if (ret) + return ret; + + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, + sw->cap_lc + TB_LC_SNK_ALLOCATION, 1); + if (ret) + return ret; + + if (!sink) + val &= ~TB_LC_SNK_ALLOCATION_SNK0_MASK; + else + val &= ~TB_LC_SNK_ALLOCATION_SNK1_MASK; + + ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, + sw->cap_lc + TB_LC_SNK_ALLOCATION, 1); + if (ret) + return ret; + + tb_port_dbg(in, "sink %d de-allocated\n", sink); + return 0; +} diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c index 27fbe62c7ddd..1be491ecbb45 100644 --- a/drivers/thunderbolt/nhi.c +++ b/drivers/thunderbolt/nhi.c @@ -16,6 +16,7 @@ #include <linux/interrupt.h> #include <linux/module.h> #include <linux/delay.h> +#include <linux/property.h> #include "nhi.h" #include "nhi_regs.h" @@ -143,9 +144,20 @@ static void __iomem *ring_options_base(struct tb_ring *ring) return io; } -static void ring_iowrite16desc(struct tb_ring *ring, u32 value, u32 offset) +static void ring_iowrite_cons(struct tb_ring *ring, u16 cons) { - iowrite16(value, ring_desc_base(ring) + offset); + /* + * The other 16-bits in the register is read-only and writes to it + * are ignored by the hardware so we can save one ioread32() by + * filling the read-only bits with zeroes. + */ + iowrite32(cons, ring_desc_base(ring) + 8); +} + +static void ring_iowrite_prod(struct tb_ring *ring, u16 prod) +{ + /* See ring_iowrite_cons() above for explanation */ + iowrite32(prod << 16, ring_desc_base(ring) + 8); } static void ring_iowrite32desc(struct tb_ring *ring, u32 value, u32 offset) @@ -197,7 +209,10 @@ static void ring_write_descriptors(struct tb_ring *ring) descriptor->sof = frame->sof; } ring->head = (ring->head + 1) % ring->size; - ring_iowrite16desc(ring, ring->head, ring->is_tx ? 10 : 8); + if (ring->is_tx) + ring_iowrite_prod(ring, ring->head); + else + ring_iowrite_cons(ring, ring->head); } } @@ -662,7 +677,7 @@ void tb_ring_stop(struct tb_ring *ring) ring_iowrite32options(ring, 0, 0); ring_iowrite64desc(ring, 0, 0); - ring_iowrite16desc(ring, 0, ring->is_tx ? 10 : 8); + ring_iowrite32desc(ring, 0, 8); ring_iowrite32desc(ring, 0, 12); ring->head = 0; ring->tail = 0; @@ -845,12 +860,52 @@ static irqreturn_t nhi_msi(int irq, void *data) return IRQ_HANDLED; } -static int nhi_suspend_noirq(struct device *dev) +static int __nhi_suspend_noirq(struct device *dev, bool wakeup) { struct pci_dev *pdev = to_pci_dev(dev); struct tb *tb = pci_get_drvdata(pdev); + struct tb_nhi *nhi = tb->nhi; + int ret; + + ret = tb_domain_suspend_noirq(tb); + if (ret) + return ret; + + if (nhi->ops && nhi->ops->suspend_noirq) { + ret = nhi->ops->suspend_noirq(tb->nhi, wakeup); + if (ret) + return ret; + } + + return 0; +} + +static int nhi_suspend_noirq(struct device *dev) +{ + return __nhi_suspend_noirq(dev, device_may_wakeup(dev)); +} + +static bool nhi_wake_supported(struct pci_dev *pdev) +{ + u8 val; + + /* + * If power rails are sustainable for wakeup from S4 this + * property is set by the BIOS. + */ + if (device_property_read_u8(&pdev->dev, "WAKE_SUPPORTED", &val)) + return !!val; + + return true; +} + +static int nhi_poweroff_noirq(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + bool wakeup; - return tb_domain_suspend_noirq(tb); + wakeup = device_may_wakeup(dev) && nhi_wake_supported(pdev); + return __nhi_suspend_noirq(dev, wakeup); } static void nhi_enable_int_throttling(struct tb_nhi *nhi) @@ -873,16 +928,24 @@ static int nhi_resume_noirq(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct tb *tb = pci_get_drvdata(pdev); + struct tb_nhi *nhi = tb->nhi; + int ret; /* * Check that the device is still there. It may be that the user * unplugged last device which causes the host controller to go * away on PCs. */ - if (!pci_device_is_present(pdev)) - tb->nhi->going_away = true; - else + if (!pci_device_is_present(pdev)) { + nhi->going_away = true; + } else { + if (nhi->ops && nhi->ops->resume_noirq) { + ret = nhi->ops->resume_noirq(nhi); + if (ret) + return ret; + } nhi_enable_int_throttling(tb->nhi); + } return tb_domain_resume_noirq(tb); } @@ -915,16 +978,35 @@ static int nhi_runtime_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct tb *tb = pci_get_drvdata(pdev); + struct tb_nhi *nhi = tb->nhi; + int ret; - return tb_domain_runtime_suspend(tb); + ret = tb_domain_runtime_suspend(tb); + if (ret) + return ret; + + if (nhi->ops && nhi->ops->runtime_suspend) { + ret = nhi->ops->runtime_suspend(tb->nhi); + if (ret) + return ret; + } + return 0; } static int nhi_runtime_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct tb *tb = pci_get_drvdata(pdev); + struct tb_nhi *nhi = tb->nhi; + int ret; - nhi_enable_int_throttling(tb->nhi); + if (nhi->ops && nhi->ops->runtime_resume) { + ret = nhi->ops->runtime_resume(nhi); + if (ret) + return ret; + } + + nhi_enable_int_throttling(nhi); return tb_domain_runtime_resume(tb); } @@ -952,6 +1034,9 @@ static void nhi_shutdown(struct tb_nhi *nhi) flush_work(&nhi->interrupt_work); } ida_destroy(&nhi->msix_ida); + + if (nhi->ops && nhi->ops->shutdown) + nhi->ops->shutdown(nhi); } static int nhi_init_msi(struct tb_nhi *nhi) @@ -996,12 +1081,27 @@ static int nhi_init_msi(struct tb_nhi *nhi) return 0; } +static bool nhi_imr_valid(struct pci_dev *pdev) +{ + u8 val; + + if (!device_property_read_u8(&pdev->dev, "IMR_VALID", &val)) + return !!val; + + return true; +} + static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct tb_nhi *nhi; struct tb *tb; int res; + if (!nhi_imr_valid(pdev)) { + dev_warn(&pdev->dev, "firmware image not valid, aborting\n"); + return -ENODEV; + } + res = pcim_enable_device(pdev); if (res) { dev_err(&pdev->dev, "cannot enable PCI device, aborting\n"); @@ -1019,6 +1119,7 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id) return -ENOMEM; nhi->pdev = pdev; + nhi->ops = (const struct tb_nhi_ops *)id->driver_data; /* cannot fail - table is allocated bin pcim_iomap_regions */ nhi->iobase = pcim_iomap_table(pdev)[0]; nhi->hop_count = ioread32(nhi->iobase + REG_HOP_COUNT) & 0x3ff; @@ -1051,6 +1152,12 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id) pci_set_master(pdev); + if (nhi->ops && nhi->ops->init) { + res = nhi->ops->init(nhi); + if (res) + return res; + } + tb = icm_probe(nhi); if (!tb) tb = tb_probe(nhi); @@ -1111,6 +1218,7 @@ static const struct dev_pm_ops nhi_pm_ops = { .restore_noirq = nhi_resume_noirq, .suspend = nhi_suspend, .freeze = nhi_suspend, + .poweroff_noirq = nhi_poweroff_noirq, .poweroff = nhi_suspend, .complete = nhi_complete, .runtime_suspend = nhi_runtime_suspend, @@ -1158,6 +1266,13 @@ static struct pci_device_id nhi_ids[] = { { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_USBONLY_NHI) }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_NHI) }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICL_NHI0), + .driver_data = (kernel_ulong_t)&icl_nhi_ops }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICL_NHI1), + .driver_data = (kernel_ulong_t)&icl_nhi_ops }, + + /* Any USB4 compliant host */ + { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_USB4, ~0) }, { 0,} }; diff --git a/drivers/thunderbolt/nhi.h b/drivers/thunderbolt/nhi.h index 1b5d47ecd3ed..5d276ee9b38e 100644 --- a/drivers/thunderbolt/nhi.h +++ b/drivers/thunderbolt/nhi.h @@ -30,6 +30,26 @@ enum nhi_mailbox_cmd { int nhi_mailbox_cmd(struct tb_nhi *nhi, enum nhi_mailbox_cmd cmd, u32 data); enum nhi_fw_mode nhi_mailbox_mode(struct tb_nhi *nhi); +/** + * struct tb_nhi_ops - NHI specific optional operations + * @init: NHI specific initialization + * @suspend_noirq: NHI specific suspend_noirq hook + * @resume_noirq: NHI specific resume_noirq hook + * @runtime_suspend: NHI specific runtime_suspend hook + * @runtime_resume: NHI specific runtime_resume hook + * @shutdown: NHI specific shutdown + */ +struct tb_nhi_ops { + int (*init)(struct tb_nhi *nhi); + int (*suspend_noirq)(struct tb_nhi *nhi, bool wakeup); + int (*resume_noirq)(struct tb_nhi *nhi); + int (*runtime_suspend)(struct tb_nhi *nhi); + int (*runtime_resume)(struct tb_nhi *nhi); + void (*shutdown)(struct tb_nhi *nhi); +}; + +extern const struct tb_nhi_ops icl_nhi_ops; + /* * PCI IDs used in this driver from Win Ridge forward. There is no * need for the PCI quirk anymore as we will use ICM also on Apple @@ -51,5 +71,9 @@ enum nhi_fw_mode nhi_mailbox_mode(struct tb_nhi *nhi); #define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE 0x15ea #define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI 0x15eb #define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE 0x15ef +#define PCI_DEVICE_ID_INTEL_ICL_NHI1 0x8a0d +#define PCI_DEVICE_ID_INTEL_ICL_NHI0 0x8a17 + +#define PCI_CLASS_SERIAL_USB_USB4 0x0c0340 #endif diff --git a/drivers/thunderbolt/nhi_ops.c b/drivers/thunderbolt/nhi_ops.c new file mode 100644 index 000000000000..6795851aac95 --- /dev/null +++ b/drivers/thunderbolt/nhi_ops.c @@ -0,0 +1,178 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * NHI specific operations + * + * Copyright (C) 2019, Intel Corporation + * Author: Mika Westerberg <mika.westerberg@linux.intel.com> + */ + +#include <linux/delay.h> +#include <linux/suspend.h> + +#include "nhi.h" +#include "nhi_regs.h" +#include "tb.h" + +/* Ice Lake specific NHI operations */ + +#define ICL_LC_MAILBOX_TIMEOUT 500 /* ms */ + +static int check_for_device(struct device *dev, void *data) +{ + return tb_is_switch(dev); +} + +static bool icl_nhi_is_device_connected(struct tb_nhi *nhi) +{ + struct tb *tb = pci_get_drvdata(nhi->pdev); + int ret; + + ret = device_for_each_child(&tb->root_switch->dev, NULL, + check_for_device); + return ret > 0; +} + +static int icl_nhi_force_power(struct tb_nhi *nhi, bool power) +{ + u32 vs_cap; + + /* + * The Thunderbolt host controller is present always in Ice Lake + * but the firmware may not be loaded and running (depending + * whether there is device connected and so on). Each time the + * controller is used we need to "Force Power" it first and wait + * for the firmware to indicate it is up and running. This "Force + * Power" is really not about actually powering on/off the + * controller so it is accessible even if "Force Power" is off. + * + * The actual power management happens inside shared ACPI power + * resources using standard ACPI methods. + */ + pci_read_config_dword(nhi->pdev, VS_CAP_22, &vs_cap); + if (power) { + vs_cap &= ~VS_CAP_22_DMA_DELAY_MASK; + vs_cap |= 0x22 << VS_CAP_22_DMA_DELAY_SHIFT; + vs_cap |= VS_CAP_22_FORCE_POWER; + } else { + vs_cap &= ~VS_CAP_22_FORCE_POWER; + } + pci_write_config_dword(nhi->pdev, VS_CAP_22, vs_cap); + + if (power) { + unsigned int retries = 10; + u32 val; + + /* Wait until the firmware tells it is up and running */ + do { + pci_read_config_dword(nhi->pdev, VS_CAP_9, &val); + if (val & VS_CAP_9_FW_READY) + return 0; + msleep(250); + } while (--retries); + + return -ETIMEDOUT; + } + + return 0; +} + +static void icl_nhi_lc_mailbox_cmd(struct tb_nhi *nhi, enum icl_lc_mailbox_cmd cmd) +{ + u32 data; + + data = (cmd << VS_CAP_19_CMD_SHIFT) & VS_CAP_19_CMD_MASK; + pci_write_config_dword(nhi->pdev, VS_CAP_19, data | VS_CAP_19_VALID); +} + +static int icl_nhi_lc_mailbox_cmd_complete(struct tb_nhi *nhi, int timeout) +{ + unsigned long end; + u32 data; + + if (!timeout) + goto clear; + + end = jiffies + msecs_to_jiffies(timeout); + do { + pci_read_config_dword(nhi->pdev, VS_CAP_18, &data); + if (data & VS_CAP_18_DONE) + goto clear; + msleep(100); + } while (time_before(jiffies, end)); + + return -ETIMEDOUT; + +clear: + /* Clear the valid bit */ + pci_write_config_dword(nhi->pdev, VS_CAP_19, 0); + return 0; +} + +static void icl_nhi_set_ltr(struct tb_nhi *nhi) +{ + u32 max_ltr, ltr; + + pci_read_config_dword(nhi->pdev, VS_CAP_16, &max_ltr); + max_ltr &= 0xffff; + /* Program the same value for both snoop and no-snoop */ + ltr = max_ltr << 16 | max_ltr; + pci_write_config_dword(nhi->pdev, VS_CAP_15, ltr); +} + +static int icl_nhi_suspend(struct tb_nhi *nhi) +{ + int ret; + + if (icl_nhi_is_device_connected(nhi)) + return 0; + + /* + * If there is no device connected we need to perform both: a + * handshake through LC mailbox and force power down before + * entering D3. + */ + icl_nhi_lc_mailbox_cmd(nhi, ICL_LC_PREPARE_FOR_RESET); + ret = icl_nhi_lc_mailbox_cmd_complete(nhi, ICL_LC_MAILBOX_TIMEOUT); + if (ret) + return ret; + + return icl_nhi_force_power(nhi, false); +} + +static int icl_nhi_suspend_noirq(struct tb_nhi *nhi, bool wakeup) +{ + enum icl_lc_mailbox_cmd cmd; + + if (!pm_suspend_via_firmware()) + return icl_nhi_suspend(nhi); + + cmd = wakeup ? ICL_LC_GO2SX : ICL_LC_GO2SX_NO_WAKE; + icl_nhi_lc_mailbox_cmd(nhi, cmd); + return icl_nhi_lc_mailbox_cmd_complete(nhi, ICL_LC_MAILBOX_TIMEOUT); +} + +static int icl_nhi_resume(struct tb_nhi *nhi) +{ + int ret; + + ret = icl_nhi_force_power(nhi, true); + if (ret) + return ret; + + icl_nhi_set_ltr(nhi); + return 0; +} + +static void icl_nhi_shutdown(struct tb_nhi *nhi) +{ + icl_nhi_force_power(nhi, false); +} + +const struct tb_nhi_ops icl_nhi_ops = { + .init = icl_nhi_resume, + .suspend_noirq = icl_nhi_suspend_noirq, + .resume_noirq = icl_nhi_resume, + .runtime_suspend = icl_nhi_suspend, + .runtime_resume = icl_nhi_resume, + .shutdown = icl_nhi_shutdown, +}; diff --git a/drivers/thunderbolt/nhi_regs.h b/drivers/thunderbolt/nhi_regs.h index a60bd98c1d04..0d4970dcef84 100644 --- a/drivers/thunderbolt/nhi_regs.h +++ b/drivers/thunderbolt/nhi_regs.h @@ -124,4 +124,41 @@ struct ring_desc { #define REG_FW_STS_ICM_EN_INVERT BIT(1) #define REG_FW_STS_ICM_EN BIT(0) +/* ICL NHI VSEC registers */ + +/* FW ready */ +#define VS_CAP_9 0xc8 +#define VS_CAP_9_FW_READY BIT(31) +/* UUID */ +#define VS_CAP_10 0xcc +#define VS_CAP_11 0xd0 +/* LTR */ +#define VS_CAP_15 0xe0 +#define VS_CAP_16 0xe4 +/* TBT2PCIe */ +#define VS_CAP_18 0xec +#define VS_CAP_18_DONE BIT(0) +/* PCIe2TBT */ +#define VS_CAP_19 0xf0 +#define VS_CAP_19_VALID BIT(0) +#define VS_CAP_19_CMD_SHIFT 1 +#define VS_CAP_19_CMD_MASK GENMASK(7, 1) +/* Force power */ +#define VS_CAP_22 0xfc +#define VS_CAP_22_FORCE_POWER BIT(1) +#define VS_CAP_22_DMA_DELAY_MASK GENMASK(31, 24) +#define VS_CAP_22_DMA_DELAY_SHIFT 24 + +/** + * enum icl_lc_mailbox_cmd - ICL specific LC mailbox commands + * @ICL_LC_GO2SX: Ask LC to enter Sx without wake + * @ICL_LC_GO2SX_NO_WAKE: Ask LC to enter Sx with wake + * @ICL_LC_PREPARE_FOR_RESET: Prepare LC for reset + */ +enum icl_lc_mailbox_cmd { + ICL_LC_GO2SX = 0x02, + ICL_LC_GO2SX_NO_WAKE = 0x03, + ICL_LC_PREPARE_FOR_RESET = 0x21, +}; + #endif diff --git a/drivers/thunderbolt/path.c b/drivers/thunderbolt/path.c index afe5f8391ebf..ad58559ea88e 100644 --- a/drivers/thunderbolt/path.c +++ b/drivers/thunderbolt/path.c @@ -220,7 +220,8 @@ err: * Creates path between two ports starting with given @src_hopid. Reserves * HopIDs for each port (they can be different from @src_hopid depending on * how many HopIDs each port already have reserved). If there are dual - * links on the path, prioritizes using @link_nr. + * links on the path, prioritizes using @link_nr but takes into account + * that the lanes may be bonded. * * Return: Returns a tb_path on success or NULL on failure. */ @@ -259,7 +260,9 @@ struct tb_path *tb_path_alloc(struct tb *tb, struct tb_port *src, int src_hopid, if (!in_port) goto err; - if (in_port->dual_link_port && in_port->link_nr != link_nr) + /* When lanes are bonded primary link must be used */ + if (!in_port->bonded && in_port->dual_link_port && + in_port->link_nr != link_nr) in_port = in_port->dual_link_port; ret = tb_port_alloc_in_hopid(in_port, in_hopid, in_hopid); @@ -271,8 +274,27 @@ struct tb_path *tb_path_alloc(struct tb *tb, struct tb_port *src, int src_hopid, if (!out_port) goto err; - if (out_port->dual_link_port && out_port->link_nr != link_nr) - out_port = out_port->dual_link_port; + /* + * Pick up right port when going from non-bonded to + * bonded or from bonded to non-bonded. + */ + if (out_port->dual_link_port) { + if (!in_port->bonded && out_port->bonded && + out_port->link_nr) { + /* + * Use primary link when going from + * non-bonded to bonded. + */ + out_port = out_port->dual_link_port; + } else if (!out_port->bonded && + out_port->link_nr != link_nr) { + /* + * If out port is not bonded follow + * link_nr. + */ + out_port = out_port->dual_link_port; + } + } if (i == num_hops - 1) ret = tb_port_alloc_out_hopid(out_port, dst_hopid, @@ -535,3 +557,25 @@ bool tb_path_is_invalid(struct tb_path *path) } return false; } + +/** + * tb_path_switch_on_path() - Does the path go through certain switch + * @path: Path to check + * @sw: Switch to check + * + * Goes over all hops on path and checks if @sw is any of them. + * Direction does not matter. + */ +bool tb_path_switch_on_path(const struct tb_path *path, + const struct tb_switch *sw) +{ + int i; + + for (i = 0; i < path->path_length; i++) { + if (path->hops[i].in_port->sw == sw || + path->hops[i].out_port->sw == sw) + return true; + } + + return false; +} diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c index 5668a44e0653..ad5479f21174 100644 --- a/drivers/thunderbolt/switch.c +++ b/drivers/thunderbolt/switch.c @@ -163,12 +163,14 @@ static int nvm_validate_and_write(struct tb_switch *sw) image_size -= hdr_size; } + if (tb_switch_is_usb4(sw)) + return usb4_switch_nvm_write(sw, 0, buf, image_size); return dma_port_flash_write(sw->dma_port, 0, buf, image_size); } -static int nvm_authenticate_host(struct tb_switch *sw) +static int nvm_authenticate_host_dma_port(struct tb_switch *sw) { - int ret; + int ret = 0; /* * Root switch NVM upgrade requires that we disconnect the @@ -176,6 +178,8 @@ static int nvm_authenticate_host(struct tb_switch *sw) * already). */ if (!sw->safe_mode) { + u32 status; + ret = tb_domain_disconnect_all_paths(sw->tb); if (ret) return ret; @@ -184,7 +188,16 @@ static int nvm_authenticate_host(struct tb_switch *sw) * everything goes well so getting timeout is expected. */ ret = dma_port_flash_update_auth(sw->dma_port); - return ret == -ETIMEDOUT ? 0 : ret; + if (!ret || ret == -ETIMEDOUT) + return 0; + + /* + * Any error from update auth operation requires power + * cycling of the host router. + */ + tb_sw_warn(sw, "failed to authenticate NVM, power cycling\n"); + if (dma_port_flash_update_auth_status(sw->dma_port, &status) > 0) + nvm_set_auth_status(sw, status); } /* @@ -192,16 +205,24 @@ static int nvm_authenticate_host(struct tb_switch *sw) * switch. */ dma_port_power_cycle(sw->dma_port); - return 0; + return ret; } -static int nvm_authenticate_device(struct tb_switch *sw) +static int nvm_authenticate_device_dma_port(struct tb_switch *sw) { int ret, retries = 10; ret = dma_port_flash_update_auth(sw->dma_port); - if (ret && ret != -ETIMEDOUT) + switch (ret) { + case 0: + case -ETIMEDOUT: + case -EACCES: + case -EINVAL: + /* Power cycle is required */ + break; + default: return ret; + } /* * Poll here for the authentication status. It takes some time @@ -232,6 +253,78 @@ static int nvm_authenticate_device(struct tb_switch *sw) return -ETIMEDOUT; } +static void nvm_authenticate_start_dma_port(struct tb_switch *sw) +{ + struct pci_dev *root_port; + + /* + * During host router NVM upgrade we should not allow root port to + * go into D3cold because some root ports cannot trigger PME + * itself. To be on the safe side keep the root port in D0 during + * the whole upgrade process. + */ + root_port = pci_find_pcie_root_port(sw->tb->nhi->pdev); + if (root_port) + pm_runtime_get_noresume(&root_port->dev); +} + +static void nvm_authenticate_complete_dma_port(struct tb_switch *sw) +{ + struct pci_dev *root_port; + + root_port = pci_find_pcie_root_port(sw->tb->nhi->pdev); + if (root_port) + pm_runtime_put(&root_port->dev); +} + +static inline bool nvm_readable(struct tb_switch *sw) +{ + if (tb_switch_is_usb4(sw)) { + /* + * USB4 devices must support NVM operations but it is + * optional for hosts. Therefore we query the NVM sector + * size here and if it is supported assume NVM + * operations are implemented. + */ + return usb4_switch_nvm_sector_size(sw) > 0; + } + + /* Thunderbolt 2 and 3 devices support NVM through DMA port */ + return !!sw->dma_port; +} + +static inline bool nvm_upgradeable(struct tb_switch *sw) +{ + if (sw->no_nvm_upgrade) + return false; + return nvm_readable(sw); +} + +static inline int nvm_read(struct tb_switch *sw, unsigned int address, + void *buf, size_t size) +{ + if (tb_switch_is_usb4(sw)) + return usb4_switch_nvm_read(sw, address, buf, size); + return dma_port_flash_read(sw->dma_port, address, buf, size); +} + +static int nvm_authenticate(struct tb_switch *sw) +{ + int ret; + + if (tb_switch_is_usb4(sw)) + return usb4_switch_nvm_authenticate(sw); + + if (!tb_route(sw)) { + nvm_authenticate_start_dma_port(sw); + ret = nvm_authenticate_host_dma_port(sw); + } else { + ret = nvm_authenticate_device_dma_port(sw); + } + + return ret; +} + static int tb_switch_nvm_read(void *priv, unsigned int offset, void *val, size_t bytes) { @@ -245,7 +338,7 @@ static int tb_switch_nvm_read(void *priv, unsigned int offset, void *val, goto out; } - ret = dma_port_flash_read(sw->dma_port, offset, val, bytes); + ret = nvm_read(sw, offset, val, bytes); mutex_unlock(&sw->tb->lock); out: @@ -322,9 +415,21 @@ static int tb_switch_nvm_add(struct tb_switch *sw) u32 val; int ret; - if (!sw->dma_port) + if (!nvm_readable(sw)) return 0; + /* + * The NVM format of non-Intel hardware is not known so + * currently restrict NVM upgrade for Intel hardware. We may + * relax this in the future when we learn other NVM formats. + */ + if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL) { + dev_info(&sw->dev, + "NVM format of vendor %#x is not known, disabling NVM upgrade\n", + sw->config.vendor_id); + return 0; + } + nvm = kzalloc(sizeof(*nvm), GFP_KERNEL); if (!nvm) return -ENOMEM; @@ -339,8 +444,7 @@ static int tb_switch_nvm_add(struct tb_switch *sw) if (!sw->safe_mode) { u32 nvm_size, hdr_size; - ret = dma_port_flash_read(sw->dma_port, NVM_FLASH_SIZE, &val, - sizeof(val)); + ret = nvm_read(sw, NVM_FLASH_SIZE, &val, sizeof(val)); if (ret) goto err_ida; @@ -348,8 +452,7 @@ static int tb_switch_nvm_add(struct tb_switch *sw) nvm_size = (SZ_1M << (val & 7)) / 8; nvm_size = (nvm_size - hdr_size) / 2; - ret = dma_port_flash_read(sw->dma_port, NVM_VERSION, &val, - sizeof(val)); + ret = nvm_read(sw, NVM_VERSION, &val, sizeof(val)); if (ret) goto err_ida; @@ -364,12 +467,14 @@ static int tb_switch_nvm_add(struct tb_switch *sw) nvm->active = nvm_dev; } - nvm_dev = register_nvmem(sw, nvm->id, NVM_MAX_SIZE, false); - if (IS_ERR(nvm_dev)) { - ret = PTR_ERR(nvm_dev); - goto err_nvm_active; + if (!sw->no_nvm_upgrade) { + nvm_dev = register_nvmem(sw, nvm->id, NVM_MAX_SIZE, false); + if (IS_ERR(nvm_dev)) { + ret = PTR_ERR(nvm_dev); + goto err_nvm_active; + } + nvm->non_active = nvm_dev; } - nvm->non_active = nvm_dev; sw->nvm = nvm; return 0; @@ -398,7 +503,8 @@ static void tb_switch_nvm_remove(struct tb_switch *sw) if (!nvm->authenticating) nvm_clear_auth_status(sw); - nvmem_unregister(nvm->non_active); + if (nvm->non_active) + nvmem_unregister(nvm->non_active); if (nvm->active) nvmem_unregister(nvm->active); ida_simple_remove(&nvm_ida, nvm->id); @@ -550,17 +656,17 @@ int tb_port_add_nfc_credits(struct tb_port *port, int credits) if (credits == 0 || port->sw->is_unplugged) return 0; - nfc_credits = port->config.nfc_credits & TB_PORT_NFC_CREDITS_MASK; + nfc_credits = port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK; nfc_credits += credits; - tb_port_dbg(port, "adding %d NFC credits to %lu", - credits, port->config.nfc_credits & TB_PORT_NFC_CREDITS_MASK); + tb_port_dbg(port, "adding %d NFC credits to %lu", credits, + port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK); - port->config.nfc_credits &= ~TB_PORT_NFC_CREDITS_MASK; + port->config.nfc_credits &= ~ADP_CS_4_NFC_BUFFERS_MASK; port->config.nfc_credits |= nfc_credits; return tb_port_write(port, &port->config.nfc_credits, - TB_CFG_PORT, 4, 1); + TB_CFG_PORT, ADP_CS_4, 1); } /** @@ -575,14 +681,14 @@ int tb_port_set_initial_credits(struct tb_port *port, u32 credits) u32 data; int ret; - ret = tb_port_read(port, &data, TB_CFG_PORT, 5, 1); + ret = tb_port_read(port, &data, TB_CFG_PORT, ADP_CS_5, 1); if (ret) return ret; - data &= ~TB_PORT_LCA_MASK; - data |= (credits << TB_PORT_LCA_SHIFT) & TB_PORT_LCA_MASK; + data &= ~ADP_CS_5_LCA_MASK; + data |= (credits << ADP_CS_5_LCA_SHIFT) & ADP_CS_5_LCA_MASK; - return tb_port_write(port, &data, TB_CFG_PORT, 5, 1); + return tb_port_write(port, &data, TB_CFG_PORT, ADP_CS_5, 1); } /** @@ -598,6 +704,24 @@ int tb_port_clear_counter(struct tb_port *port, int counter) } /** + * tb_port_unlock() - Unlock downstream port + * @port: Port to unlock + * + * Needed for USB4 but can be called for any CIO/USB4 ports. Makes the + * downstream router accessible for CM. + */ +int tb_port_unlock(struct tb_port *port) +{ + if (tb_switch_is_icm(port->sw)) + return 0; + if (!tb_port_is_null(port)) + return -EINVAL; + if (tb_switch_is_usb4(port->sw)) + return usb4_port_unlock(port); + return 0; +} + +/** * tb_init_port() - initialize a port * * This is a helper method for tb_switch_alloc. Does not check or initialize @@ -611,8 +735,14 @@ static int tb_init_port(struct tb_port *port) int cap; res = tb_port_read(port, &port->config, TB_CFG_PORT, 0, 8); - if (res) + if (res) { + if (res == -ENODEV) { + tb_dbg(port->sw->tb, " Port %d: not implemented\n", + port->port); + return 0; + } return res; + } /* Port 0 is the switch itself and has no PHY. */ if (port->config.type == TB_TYPE_PORT && port->port != 0) { @@ -622,6 +752,10 @@ static int tb_init_port(struct tb_port *port) port->cap_phy = cap; else tb_port_WARN(port, "non switch port without a PHY\n"); + + cap = tb_port_find_cap(port, TB_PORT_CAP_USB4); + if (cap > 0) + port->cap_usb4 = cap; } else if (port->port != 0) { cap = tb_port_find_cap(port, TB_PORT_CAP_ADAP); if (cap > 0) @@ -636,6 +770,7 @@ static int tb_init_port(struct tb_port *port) ida_init(&port->out_hopids); } + INIT_LIST_HEAD(&port->list); return 0; } @@ -766,6 +901,132 @@ struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end, return next; } +static int tb_port_get_link_speed(struct tb_port *port) +{ + u32 val, speed; + int ret; + + if (!port->cap_phy) + return -EINVAL; + + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_phy + LANE_ADP_CS_1, 1); + if (ret) + return ret; + + speed = (val & LANE_ADP_CS_1_CURRENT_SPEED_MASK) >> + LANE_ADP_CS_1_CURRENT_SPEED_SHIFT; + return speed == LANE_ADP_CS_1_CURRENT_SPEED_GEN3 ? 20 : 10; +} + +static int tb_port_get_link_width(struct tb_port *port) +{ + u32 val; + int ret; + + if (!port->cap_phy) + return -EINVAL; + + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_phy + LANE_ADP_CS_1, 1); + if (ret) + return ret; + + return (val & LANE_ADP_CS_1_CURRENT_WIDTH_MASK) >> + LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT; +} + +static bool tb_port_is_width_supported(struct tb_port *port, int width) +{ + u32 phy, widths; + int ret; + + if (!port->cap_phy) + return false; + + ret = tb_port_read(port, &phy, TB_CFG_PORT, + port->cap_phy + LANE_ADP_CS_0, 1); + if (ret) + return ret; + + widths = (phy & LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK) >> + LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT; + + return !!(widths & width); +} + +static int tb_port_set_link_width(struct tb_port *port, unsigned int width) +{ + u32 val; + int ret; + + if (!port->cap_phy) + return -EINVAL; + + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_phy + LANE_ADP_CS_1, 1); + if (ret) + return ret; + + val &= ~LANE_ADP_CS_1_TARGET_WIDTH_MASK; + switch (width) { + case 1: + val |= LANE_ADP_CS_1_TARGET_WIDTH_SINGLE << + LANE_ADP_CS_1_TARGET_WIDTH_SHIFT; + break; + case 2: + val |= LANE_ADP_CS_1_TARGET_WIDTH_DUAL << + LANE_ADP_CS_1_TARGET_WIDTH_SHIFT; + break; + default: + return -EINVAL; + } + + val |= LANE_ADP_CS_1_LB; + + return tb_port_write(port, &val, TB_CFG_PORT, + port->cap_phy + LANE_ADP_CS_1, 1); +} + +static int tb_port_lane_bonding_enable(struct tb_port *port) +{ + int ret; + + /* + * Enable lane bonding for both links if not already enabled by + * for example the boot firmware. + */ + ret = tb_port_get_link_width(port); + if (ret == 1) { + ret = tb_port_set_link_width(port, 2); + if (ret) + return ret; + } + + ret = tb_port_get_link_width(port->dual_link_port); + if (ret == 1) { + ret = tb_port_set_link_width(port->dual_link_port, 2); + if (ret) { + tb_port_set_link_width(port, 1); + return ret; + } + } + + port->bonded = true; + port->dual_link_port->bonded = true; + + return 0; +} + +static void tb_port_lane_bonding_disable(struct tb_port *port) +{ + port->dual_link_port->bonded = false; + port->bonded = false; + + tb_port_set_link_width(port->dual_link_port, 1); + tb_port_set_link_width(port, 1); +} + /** * tb_port_is_enabled() - Is the adapter port enabled * @port: Port to check @@ -781,12 +1042,47 @@ bool tb_port_is_enabled(struct tb_port *port) case TB_TYPE_DP_HDMI_OUT: return tb_dp_port_is_enabled(port); + case TB_TYPE_USB3_UP: + case TB_TYPE_USB3_DOWN: + return tb_usb3_port_is_enabled(port); + default: return false; } } /** + * tb_usb3_port_is_enabled() - Is the USB3 adapter port enabled + * @port: USB3 adapter port to check + */ +bool tb_usb3_port_is_enabled(struct tb_port *port) +{ + u32 data; + + if (tb_port_read(port, &data, TB_CFG_PORT, + port->cap_adap + ADP_USB3_CS_0, 1)) + return false; + + return !!(data & ADP_USB3_CS_0_PE); +} + +/** + * tb_usb3_port_enable() - Enable USB3 adapter port + * @port: USB3 adapter port to enable + * @enable: Enable/disable the USB3 adapter + */ +int tb_usb3_port_enable(struct tb_port *port, bool enable) +{ + u32 word = enable ? (ADP_USB3_CS_0_PE | ADP_USB3_CS_0_V) + : ADP_USB3_CS_0_V; + + if (!port->cap_adap) + return -ENXIO; + return tb_port_write(port, &word, TB_CFG_PORT, + port->cap_adap + ADP_USB3_CS_0, 1); +} + +/** * tb_pci_port_is_enabled() - Is the PCIe adapter port enabled * @port: PCIe port to check */ @@ -794,10 +1090,11 @@ bool tb_pci_port_is_enabled(struct tb_port *port) { u32 data; - if (tb_port_read(port, &data, TB_CFG_PORT, port->cap_adap, 1)) + if (tb_port_read(port, &data, TB_CFG_PORT, + port->cap_adap + ADP_PCIE_CS_0, 1)) return false; - return !!(data & TB_PCI_EN); + return !!(data & ADP_PCIE_CS_0_PE); } /** @@ -807,10 +1104,11 @@ bool tb_pci_port_is_enabled(struct tb_port *port) */ int tb_pci_port_enable(struct tb_port *port, bool enable) { - u32 word = enable ? TB_PCI_EN : 0x0; + u32 word = enable ? ADP_PCIE_CS_0_PE : 0x0; if (!port->cap_adap) return -ENXIO; - return tb_port_write(port, &word, TB_CFG_PORT, port->cap_adap, 1); + return tb_port_write(port, &word, TB_CFG_PORT, + port->cap_adap + ADP_PCIE_CS_0, 1); } /** @@ -824,11 +1122,12 @@ int tb_dp_port_hpd_is_active(struct tb_port *port) u32 data; int ret; - ret = tb_port_read(port, &data, TB_CFG_PORT, port->cap_adap + 2, 1); + ret = tb_port_read(port, &data, TB_CFG_PORT, + port->cap_adap + ADP_DP_CS_2, 1); if (ret) return ret; - return !!(data & TB_DP_HDP); + return !!(data & ADP_DP_CS_2_HDP); } /** @@ -842,12 +1141,14 @@ int tb_dp_port_hpd_clear(struct tb_port *port) u32 data; int ret; - ret = tb_port_read(port, &data, TB_CFG_PORT, port->cap_adap + 3, 1); + ret = tb_port_read(port, &data, TB_CFG_PORT, + port->cap_adap + ADP_DP_CS_3, 1); if (ret) return ret; - data |= TB_DP_HPDC; - return tb_port_write(port, &data, TB_CFG_PORT, port->cap_adap + 3, 1); + data |= ADP_DP_CS_3_HDPC; + return tb_port_write(port, &data, TB_CFG_PORT, + port->cap_adap + ADP_DP_CS_3, 1); } /** @@ -865,20 +1166,23 @@ int tb_dp_port_set_hops(struct tb_port *port, unsigned int video, u32 data[2]; int ret; - ret = tb_port_read(port, data, TB_CFG_PORT, port->cap_adap, - ARRAY_SIZE(data)); + ret = tb_port_read(port, data, TB_CFG_PORT, + port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data)); if (ret) return ret; - data[0] &= ~TB_DP_VIDEO_HOPID_MASK; - data[1] &= ~(TB_DP_AUX_RX_HOPID_MASK | TB_DP_AUX_TX_HOPID_MASK); + data[0] &= ~ADP_DP_CS_0_VIDEO_HOPID_MASK; + data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK; + data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK; - data[0] |= (video << TB_DP_VIDEO_HOPID_SHIFT) & TB_DP_VIDEO_HOPID_MASK; - data[1] |= aux_tx & TB_DP_AUX_TX_HOPID_MASK; - data[1] |= (aux_rx << TB_DP_AUX_RX_HOPID_SHIFT) & TB_DP_AUX_RX_HOPID_MASK; + data[0] |= (video << ADP_DP_CS_0_VIDEO_HOPID_SHIFT) & + ADP_DP_CS_0_VIDEO_HOPID_MASK; + data[1] |= aux_tx & ADP_DP_CS_1_AUX_TX_HOPID_MASK; + data[1] |= (aux_rx << ADP_DP_CS_1_AUX_RX_HOPID_SHIFT) & + ADP_DP_CS_1_AUX_RX_HOPID_MASK; - return tb_port_write(port, data, TB_CFG_PORT, port->cap_adap, - ARRAY_SIZE(data)); + return tb_port_write(port, data, TB_CFG_PORT, + port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data)); } /** @@ -887,12 +1191,13 @@ int tb_dp_port_set_hops(struct tb_port *port, unsigned int video, */ bool tb_dp_port_is_enabled(struct tb_port *port) { - u32 data; + u32 data[2]; - if (tb_port_read(port, &data, TB_CFG_PORT, port->cap_adap, 1)) + if (tb_port_read(port, data, TB_CFG_PORT, port->cap_adap + ADP_DP_CS_0, + ARRAY_SIZE(data))) return false; - return !!(data & (TB_DP_VIDEO_EN | TB_DP_AUX_EN)); + return !!(data[0] & (ADP_DP_CS_0_VE | ADP_DP_CS_0_AE)); } /** @@ -905,37 +1210,57 @@ bool tb_dp_port_is_enabled(struct tb_port *port) */ int tb_dp_port_enable(struct tb_port *port, bool enable) { - u32 data; + u32 data[2]; int ret; - ret = tb_port_read(port, &data, TB_CFG_PORT, port->cap_adap, 1); + ret = tb_port_read(port, data, TB_CFG_PORT, + port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data)); if (ret) return ret; if (enable) - data |= TB_DP_VIDEO_EN | TB_DP_AUX_EN; + data[0] |= ADP_DP_CS_0_VE | ADP_DP_CS_0_AE; else - data &= ~(TB_DP_VIDEO_EN | TB_DP_AUX_EN); + data[0] &= ~(ADP_DP_CS_0_VE | ADP_DP_CS_0_AE); - return tb_port_write(port, &data, TB_CFG_PORT, port->cap_adap, 1); + return tb_port_write(port, data, TB_CFG_PORT, + port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data)); } /* switch utility functions */ -static void tb_dump_switch(struct tb *tb, struct tb_regs_switch_header *sw) +static const char *tb_switch_generation_name(const struct tb_switch *sw) { - tb_dbg(tb, " Switch: %x:%x (Revision: %d, TB Version: %d)\n", - sw->vendor_id, sw->device_id, sw->revision, - sw->thunderbolt_version); - tb_dbg(tb, " Max Port Number: %d\n", sw->max_port_number); + switch (sw->generation) { + case 1: + return "Thunderbolt 1"; + case 2: + return "Thunderbolt 2"; + case 3: + return "Thunderbolt 3"; + case 4: + return "USB4"; + default: + return "Unknown"; + } +} + +static void tb_dump_switch(const struct tb *tb, const struct tb_switch *sw) +{ + const struct tb_regs_switch_header *regs = &sw->config; + + tb_dbg(tb, " %s Switch: %x:%x (Revision: %d, TB Version: %d)\n", + tb_switch_generation_name(sw), regs->vendor_id, regs->device_id, + regs->revision, regs->thunderbolt_version); + tb_dbg(tb, " Max Port Number: %d\n", regs->max_port_number); tb_dbg(tb, " Config:\n"); tb_dbg(tb, " Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n", - sw->upstream_port_number, sw->depth, - (((u64) sw->route_hi) << 32) | sw->route_lo, - sw->enabled, sw->plug_events_delay); + regs->upstream_port_number, regs->depth, + (((u64) regs->route_hi) << 32) | regs->route_lo, + regs->enabled, regs->plug_events_delay); tb_dbg(tb, " unknown1: %#x unknown4: %#x\n", - sw->__unknown1, sw->__unknown4); + regs->__unknown1, regs->__unknown4); } /** @@ -974,7 +1299,7 @@ static int tb_plug_events_active(struct tb_switch *sw, bool active) u32 data; int res; - if (!sw->config.enabled) + if (tb_switch_is_icm(sw)) return 0; sw->config.plug_events_delay = 0xff; @@ -982,6 +1307,10 @@ static int tb_plug_events_active(struct tb_switch *sw, bool active) if (res) return res; + /* Plug events are always enabled in USB4 */ + if (tb_switch_is_usb4(sw)) + return 0; + res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1); if (res) return res; @@ -1022,13 +1351,6 @@ static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val) if (sw->authorized) goto unlock; - /* - * Make sure there is no PCIe rescan ongoing when a new PCIe - * tunnel is created. Otherwise the PCIe rescan code might find - * the new tunnel too early. - */ - pci_lock_rescan_remove(); - switch (val) { /* Approve switch */ case 1: @@ -1048,8 +1370,6 @@ static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val) break; } - pci_unlock_rescan_remove(); - if (!ret) { sw->authorized = val; /* Notify status change to the userspace */ @@ -1111,6 +1431,15 @@ device_name_show(struct device *dev, struct device_attribute *attr, char *buf) } static DEVICE_ATTR_RO(device_name); +static ssize_t +generation_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct tb_switch *sw = tb_to_switch(dev); + + return sprintf(buf, "%u\n", sw->generation); +} +static DEVICE_ATTR_RO(generation); + static ssize_t key_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -1163,30 +1492,36 @@ static ssize_t key_store(struct device *dev, struct device_attribute *attr, } static DEVICE_ATTR(key, 0600, key_show, key_store); -static void nvm_authenticate_start(struct tb_switch *sw) +static ssize_t speed_show(struct device *dev, struct device_attribute *attr, + char *buf) { - struct pci_dev *root_port; + struct tb_switch *sw = tb_to_switch(dev); - /* - * During host router NVM upgrade we should not allow root port to - * go into D3cold because some root ports cannot trigger PME - * itself. To be on the safe side keep the root port in D0 during - * the whole upgrade process. - */ - root_port = pci_find_pcie_root_port(sw->tb->nhi->pdev); - if (root_port) - pm_runtime_get_noresume(&root_port->dev); + return sprintf(buf, "%u.0 Gb/s\n", sw->link_speed); } -static void nvm_authenticate_complete(struct tb_switch *sw) +/* + * Currently all lanes must run at the same speed but we expose here + * both directions to allow possible asymmetric links in the future. + */ +static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL); +static DEVICE_ATTR(tx_speed, 0444, speed_show, NULL); + +static ssize_t lanes_show(struct device *dev, struct device_attribute *attr, + char *buf) { - struct pci_dev *root_port; + struct tb_switch *sw = tb_to_switch(dev); - root_port = pci_find_pcie_root_port(sw->tb->nhi->pdev); - if (root_port) - pm_runtime_put(&root_port->dev); + return sprintf(buf, "%u\n", sw->link_width); } +/* + * Currently link has same amount of lanes both directions (1 or 2) but + * expose them separately to allow possible asymmetric links in the future. + */ +static DEVICE_ATTR(rx_lanes, 0444, lanes_show, NULL); +static DEVICE_ATTR(tx_lanes, 0444, lanes_show, NULL); + static ssize_t nvm_authenticate_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -1235,19 +1570,7 @@ static ssize_t nvm_authenticate_store(struct device *dev, goto exit_unlock; sw->nvm->authenticating = true; - - if (!tb_route(sw)) { - /* - * Keep root port from suspending as long as the - * NVM upgrade process is running. - */ - nvm_authenticate_start(sw); - ret = nvm_authenticate_host(sw); - if (ret) - nvm_authenticate_complete(sw); - } else { - ret = nvm_authenticate_device(sw); - } + ret = nvm_authenticate(sw); } exit_unlock: @@ -1316,9 +1639,14 @@ static struct attribute *switch_attrs[] = { &dev_attr_boot.attr, &dev_attr_device.attr, &dev_attr_device_name.attr, + &dev_attr_generation.attr, &dev_attr_key.attr, &dev_attr_nvm_authenticate.attr, &dev_attr_nvm_version.attr, + &dev_attr_rx_speed.attr, + &dev_attr_rx_lanes.attr, + &dev_attr_tx_speed.attr, + &dev_attr_tx_lanes.attr, &dev_attr_vendor.attr, &dev_attr_vendor_name.attr, &dev_attr_unique_id.attr, @@ -1331,15 +1659,37 @@ static umode_t switch_attr_is_visible(struct kobject *kobj, struct device *dev = container_of(kobj, struct device, kobj); struct tb_switch *sw = tb_to_switch(dev); - if (attr == &dev_attr_key.attr) { + if (attr == &dev_attr_device.attr) { + if (!sw->device) + return 0; + } else if (attr == &dev_attr_device_name.attr) { + if (!sw->device_name) + return 0; + } else if (attr == &dev_attr_vendor.attr) { + if (!sw->vendor) + return 0; + } else if (attr == &dev_attr_vendor_name.attr) { + if (!sw->vendor_name) + return 0; + } else if (attr == &dev_attr_key.attr) { if (tb_route(sw) && sw->tb->security_level == TB_SECURITY_SECURE && sw->security_level == TB_SECURITY_SECURE) return attr->mode; return 0; - } else if (attr == &dev_attr_nvm_authenticate.attr || - attr == &dev_attr_nvm_version.attr) { - if (sw->dma_port) + } else if (attr == &dev_attr_rx_speed.attr || + attr == &dev_attr_rx_lanes.attr || + attr == &dev_attr_tx_speed.attr || + attr == &dev_attr_tx_lanes.attr) { + if (tb_route(sw)) + return attr->mode; + return 0; + } else if (attr == &dev_attr_nvm_authenticate.attr) { + if (nvm_upgradeable(sw)) + return attr->mode; + return 0; + } else if (attr == &dev_attr_nvm_version.attr) { + if (nvm_readable(sw)) return attr->mode; return 0; } else if (attr == &dev_attr_boot.attr) { @@ -1364,14 +1714,14 @@ static const struct attribute_group *switch_groups[] = { static void tb_switch_release(struct device *dev) { struct tb_switch *sw = tb_to_switch(dev); - int i; + struct tb_port *port; dma_port_free(sw->dma_port); - for (i = 1; i <= sw->config.max_port_number; i++) { - if (!sw->ports[i].disabled) { - ida_destroy(&sw->ports[i].in_hopids); - ida_destroy(&sw->ports[i].out_hopids); + tb_switch_for_each_port(sw, port) { + if (!port->disabled) { + ida_destroy(&port->in_hopids); + ida_destroy(&port->out_hopids); } } @@ -1446,9 +1796,14 @@ static int tb_switch_get_generation(struct tb_switch *sw) case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE: case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE: case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE: + case PCI_DEVICE_ID_INTEL_ICL_NHI0: + case PCI_DEVICE_ID_INTEL_ICL_NHI1: return 3; default: + if (tb_switch_is_usb4(sw)) + return 4; + /* * For unknown switches assume generation to be 1 to be * on the safe side. @@ -1459,6 +1814,19 @@ static int tb_switch_get_generation(struct tb_switch *sw) } } +static bool tb_switch_exceeds_max_depth(const struct tb_switch *sw, int depth) +{ + int max_depth; + + if (tb_switch_is_usb4(sw) || + (sw->tb->root_switch && tb_switch_is_usb4(sw->tb->root_switch))) + max_depth = USB4_SWITCH_MAX_DEPTH; + else + max_depth = TB_SWITCH_MAX_DEPTH; + + return depth > max_depth; +} + /** * tb_switch_alloc() - allocate a switch * @tb: Pointer to the owning domain @@ -1480,10 +1848,16 @@ struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent, int upstream_port; int i, ret, depth; - /* Make sure we do not exceed maximum topology limit */ + /* Unlock the downstream port so we can access the switch below */ + if (route) { + struct tb_switch *parent_sw = tb_to_switch(parent); + struct tb_port *down; + + down = tb_port_at(route, parent_sw); + tb_port_unlock(down); + } + depth = tb_route_length(route); - if (depth > TB_SWITCH_MAX_DEPTH) - return ERR_PTR(-EADDRNOTAVAIL); upstream_port = tb_cfg_get_upstream_port(tb->ctl, route); if (upstream_port < 0) @@ -1498,8 +1872,10 @@ struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent, if (ret) goto err_free_sw_ports; + sw->generation = tb_switch_get_generation(sw); + tb_dbg(tb, "current switch config:\n"); - tb_dump_switch(tb, &sw->config); + tb_dump_switch(tb, sw); /* configure switch */ sw->config.upstream_port_number = upstream_port; @@ -1508,6 +1884,12 @@ struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent, sw->config.route_lo = lower_32_bits(route); sw->config.enabled = 0; + /* Make sure we do not exceed maximum topology limit */ + if (tb_switch_exceeds_max_depth(sw, depth)) { + ret = -EADDRNOTAVAIL; + goto err_free_sw_ports; + } + /* initialize ports */ sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports), GFP_KERNEL); @@ -1522,14 +1904,9 @@ struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent, sw->ports[i].port = i; } - sw->generation = tb_switch_get_generation(sw); - ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS); - if (ret < 0) { - tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n"); - goto err_free_sw_ports; - } - sw->cap_plug_events = ret; + if (ret > 0) + sw->cap_plug_events = ret; ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER); if (ret > 0) @@ -1600,7 +1977,8 @@ tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route) * * Call this function before the switch is added to the system. It will * upload configuration to the switch and makes it available for the - * connection manager to use. + * connection manager to use. Can be called to the switch again after + * resume from low power states to re-initialize it. * * Return: %0 in case of success and negative errno in case of failure */ @@ -1611,21 +1989,50 @@ int tb_switch_configure(struct tb_switch *sw) int ret; route = tb_route(sw); - tb_dbg(tb, "initializing Switch at %#llx (depth: %d, up port: %d)\n", - route, tb_route_length(route), sw->config.upstream_port_number); - if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL) - tb_sw_warn(sw, "unknown switch vendor id %#x\n", - sw->config.vendor_id); + tb_dbg(tb, "%s Switch at %#llx (depth: %d, up port: %d)\n", + sw->config.enabled ? "restoring " : "initializing", route, + tb_route_length(route), sw->config.upstream_port_number); sw->config.enabled = 1; - /* upload configuration */ - ret = tb_sw_write(sw, 1 + (u32 *)&sw->config, TB_CFG_SWITCH, 1, 3); - if (ret) - return ret; + if (tb_switch_is_usb4(sw)) { + /* + * For USB4 devices, we need to program the CM version + * accordingly so that it knows to expose all the + * additional capabilities. + */ + sw->config.cmuv = USB4_VERSION_1_0; + + /* Enumerate the switch */ + ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH, + ROUTER_CS_1, 4); + if (ret) + return ret; + + ret = usb4_switch_setup(sw); + if (ret) + return ret; + + ret = usb4_switch_configure_link(sw); + } else { + if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL) + tb_sw_warn(sw, "unknown switch vendor id %#x\n", + sw->config.vendor_id); - ret = tb_lc_configure_link(sw); + if (!sw->cap_plug_events) { + tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n"); + return -ENODEV; + } + + /* Enumerate the switch */ + ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH, + ROUTER_CS_1, 3); + if (ret) + return ret; + + ret = tb_lc_configure_link(sw); + } if (ret) return ret; @@ -1634,18 +2041,32 @@ int tb_switch_configure(struct tb_switch *sw) static int tb_switch_set_uuid(struct tb_switch *sw) { + bool uid = false; u32 uuid[4]; int ret; if (sw->uuid) return 0; - /* - * The newer controllers include fused UUID as part of link - * controller specific registers - */ - ret = tb_lc_read_uuid(sw, uuid); - if (ret) { + if (tb_switch_is_usb4(sw)) { + ret = usb4_switch_read_uid(sw, &sw->uid); + if (ret) + return ret; + uid = true; + } else { + /* + * The newer controllers include fused UUID as part of + * link controller specific registers + */ + ret = tb_lc_read_uuid(sw, uuid); + if (ret) { + if (ret != -EINVAL) + return ret; + uid = true; + } + } + + if (uid) { /* * ICM generates UUID based on UID and fills the upper * two words with ones. This is not strictly following @@ -1670,13 +2091,16 @@ static int tb_switch_add_dma_port(struct tb_switch *sw) int ret; switch (sw->generation) { - case 3: - break; - case 2: /* Only root switch can be upgraded */ if (tb_route(sw)) return 0; + + /* fallthrough */ + case 3: + ret = tb_switch_set_uuid(sw); + if (ret) + return ret; break; default: @@ -1689,13 +2113,30 @@ static int tb_switch_add_dma_port(struct tb_switch *sw) break; } - if (sw->no_nvm_upgrade) + /* Root switch DMA port requires running firmware */ + if (!tb_route(sw) && !tb_switch_is_icm(sw)) return 0; sw->dma_port = dma_port_alloc(sw); if (!sw->dma_port) return 0; + if (sw->no_nvm_upgrade) + return 0; + + /* + * If there is status already set then authentication failed + * when the dma_port_flash_update_auth() returned. Power cycling + * is not needed (it was done already) so only thing we do here + * is to unblock runtime PM of the root port. + */ + nvm_get_auth_status(sw, &status); + if (status) { + if (!tb_route(sw)) + nvm_authenticate_complete_dma_port(sw); + return 0; + } + /* * Check status of the previous flash authentication. If there * is one we need to power cycle the switch in any case to make @@ -1707,13 +2148,10 @@ static int tb_switch_add_dma_port(struct tb_switch *sw) /* Now we can allow root port to suspend again */ if (!tb_route(sw)) - nvm_authenticate_complete(sw); + nvm_authenticate_complete_dma_port(sw); if (status) { tb_sw_info(sw, "switch flash authentication failed\n"); - ret = tb_switch_set_uuid(sw); - if (ret) - return ret; nvm_set_auth_status(sw, status); } @@ -1727,6 +2165,155 @@ static int tb_switch_add_dma_port(struct tb_switch *sw) return -ESHUTDOWN; } +static void tb_switch_default_link_ports(struct tb_switch *sw) +{ + int i; + + for (i = 1; i <= sw->config.max_port_number; i += 2) { + struct tb_port *port = &sw->ports[i]; + struct tb_port *subordinate; + + if (!tb_port_is_null(port)) + continue; + + /* Check for the subordinate port */ + if (i == sw->config.max_port_number || + !tb_port_is_null(&sw->ports[i + 1])) + continue; + + /* Link them if not already done so (by DROM) */ + subordinate = &sw->ports[i + 1]; + if (!port->dual_link_port && !subordinate->dual_link_port) { + port->link_nr = 0; + port->dual_link_port = subordinate; + subordinate->link_nr = 1; + subordinate->dual_link_port = port; + + tb_sw_dbg(sw, "linked ports %d <-> %d\n", + port->port, subordinate->port); + } + } +} + +static bool tb_switch_lane_bonding_possible(struct tb_switch *sw) +{ + const struct tb_port *up = tb_upstream_port(sw); + + if (!up->dual_link_port || !up->dual_link_port->remote) + return false; + + if (tb_switch_is_usb4(sw)) + return usb4_switch_lane_bonding_possible(sw); + return tb_lc_lane_bonding_possible(sw); +} + +static int tb_switch_update_link_attributes(struct tb_switch *sw) +{ + struct tb_port *up; + bool change = false; + int ret; + + if (!tb_route(sw) || tb_switch_is_icm(sw)) + return 0; + + up = tb_upstream_port(sw); + + ret = tb_port_get_link_speed(up); + if (ret < 0) + return ret; + if (sw->link_speed != ret) + change = true; + sw->link_speed = ret; + + ret = tb_port_get_link_width(up); + if (ret < 0) + return ret; + if (sw->link_width != ret) + change = true; + sw->link_width = ret; + + /* Notify userspace that there is possible link attribute change */ + if (device_is_registered(&sw->dev) && change) + kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE); + + return 0; +} + +/** + * tb_switch_lane_bonding_enable() - Enable lane bonding + * @sw: Switch to enable lane bonding + * + * Connection manager can call this function to enable lane bonding of a + * switch. If conditions are correct and both switches support the feature, + * lanes are bonded. It is safe to call this to any switch. + */ +int tb_switch_lane_bonding_enable(struct tb_switch *sw) +{ + struct tb_switch *parent = tb_to_switch(sw->dev.parent); + struct tb_port *up, *down; + u64 route = tb_route(sw); + int ret; + + if (!route) + return 0; + + if (!tb_switch_lane_bonding_possible(sw)) + return 0; + + up = tb_upstream_port(sw); + down = tb_port_at(route, parent); + + if (!tb_port_is_width_supported(up, 2) || + !tb_port_is_width_supported(down, 2)) + return 0; + + ret = tb_port_lane_bonding_enable(up); + if (ret) { + tb_port_warn(up, "failed to enable lane bonding\n"); + return ret; + } + + ret = tb_port_lane_bonding_enable(down); + if (ret) { + tb_port_warn(down, "failed to enable lane bonding\n"); + tb_port_lane_bonding_disable(up); + return ret; + } + + tb_switch_update_link_attributes(sw); + + tb_sw_dbg(sw, "lane bonding enabled\n"); + return ret; +} + +/** + * tb_switch_lane_bonding_disable() - Disable lane bonding + * @sw: Switch whose lane bonding to disable + * + * Disables lane bonding between @sw and parent. This can be called even + * if lanes were not bonded originally. + */ +void tb_switch_lane_bonding_disable(struct tb_switch *sw) +{ + struct tb_switch *parent = tb_to_switch(sw->dev.parent); + struct tb_port *up, *down; + + if (!tb_route(sw)) + return; + + up = tb_upstream_port(sw); + if (!up->bonded) + return; + + down = tb_port_at(tb_route(sw), parent); + + tb_port_lane_bonding_disable(up); + tb_port_lane_bonding_disable(down); + + tb_switch_update_link_attributes(sw); + tb_sw_dbg(sw, "lane bonding disabled\n"); +} + /** * tb_switch_add() - Add a switch to the domain * @sw: Switch to add @@ -1751,21 +2338,25 @@ int tb_switch_add(struct tb_switch *sw) * configuration based mailbox. */ ret = tb_switch_add_dma_port(sw); - if (ret) + if (ret) { + dev_err(&sw->dev, "failed to add DMA port\n"); return ret; + } if (!sw->safe_mode) { /* read drom */ ret = tb_drom_read(sw); if (ret) { - tb_sw_warn(sw, "tb_eeprom_read_rom failed\n"); + dev_err(&sw->dev, "reading DROM failed\n"); return ret; } tb_sw_dbg(sw, "uid: %#llx\n", sw->uid); ret = tb_switch_set_uuid(sw); - if (ret) + if (ret) { + dev_err(&sw->dev, "failed to set UUID\n"); return ret; + } for (i = 0; i <= sw->config.max_port_number; i++) { if (sw->ports[i].disabled) { @@ -1773,14 +2364,28 @@ int tb_switch_add(struct tb_switch *sw) continue; } ret = tb_init_port(&sw->ports[i]); - if (ret) + if (ret) { + dev_err(&sw->dev, "failed to initialize port %d\n", i); return ret; + } } + + tb_switch_default_link_ports(sw); + + ret = tb_switch_update_link_attributes(sw); + if (ret) + return ret; + + ret = tb_switch_tmu_init(sw); + if (ret) + return ret; } ret = device_add(&sw->dev); - if (ret) + if (ret) { + dev_err(&sw->dev, "failed to add device: %d\n", ret); return ret; + } if (tb_route(sw)) { dev_info(&sw->dev, "new device found, vendor=%#x device=%#x\n", @@ -1792,6 +2397,7 @@ int tb_switch_add(struct tb_switch *sw) ret = tb_switch_nvm_add(sw); if (ret) { + dev_err(&sw->dev, "failed to add NVM devices\n"); device_del(&sw->dev); return ret; } @@ -1818,7 +2424,7 @@ int tb_switch_add(struct tb_switch *sw) */ void tb_switch_remove(struct tb_switch *sw) { - int i; + struct tb_port *port; if (sw->rpm) { pm_runtime_get_sync(&sw->dev); @@ -1826,19 +2432,23 @@ void tb_switch_remove(struct tb_switch *sw) } /* port 0 is the switch itself and never has a remote */ - for (i = 1; i <= sw->config.max_port_number; i++) { - if (tb_port_has_remote(&sw->ports[i])) { - tb_switch_remove(sw->ports[i].remote->sw); - sw->ports[i].remote = NULL; - } else if (sw->ports[i].xdomain) { - tb_xdomain_remove(sw->ports[i].xdomain); - sw->ports[i].xdomain = NULL; + tb_switch_for_each_port(sw, port) { + if (tb_port_has_remote(port)) { + tb_switch_remove(port->remote->sw); + port->remote = NULL; + } else if (port->xdomain) { + tb_xdomain_remove(port->xdomain); + port->xdomain = NULL; } } if (!sw->is_unplugged) tb_plug_events_active(sw, false); - tb_lc_unconfigure_link(sw); + + if (tb_switch_is_usb4(sw)) + usb4_switch_unconfigure_link(sw); + else + tb_lc_unconfigure_link(sw); tb_switch_nvm_remove(sw); @@ -1852,7 +2462,8 @@ void tb_switch_remove(struct tb_switch *sw) */ void tb_sw_set_unplugged(struct tb_switch *sw) { - int i; + struct tb_port *port; + if (sw == sw->tb->root_switch) { tb_sw_WARN(sw, "cannot unplug root switch\n"); return; @@ -1862,17 +2473,19 @@ void tb_sw_set_unplugged(struct tb_switch *sw) return; } sw->is_unplugged = true; - for (i = 0; i <= sw->config.max_port_number; i++) { - if (tb_port_has_remote(&sw->ports[i])) - tb_sw_set_unplugged(sw->ports[i].remote->sw); - else if (sw->ports[i].xdomain) - sw->ports[i].xdomain->is_unplugged = true; + tb_switch_for_each_port(sw, port) { + if (tb_port_has_remote(port)) + tb_sw_set_unplugged(port->remote->sw); + else if (port->xdomain) + port->xdomain->is_unplugged = true; } } int tb_switch_resume(struct tb_switch *sw) { - int i, err; + struct tb_port *port; + int err; + tb_sw_dbg(sw, "resuming switch\n"); /* @@ -1893,7 +2506,10 @@ int tb_switch_resume(struct tb_switch *sw) return err; } - err = tb_drom_read_uid_only(sw, &uid); + if (tb_switch_is_usb4(sw)) + err = usb4_switch_read_uid(sw, &uid); + else + err = tb_drom_read_uid_only(sw, &uid); if (err) { tb_sw_warn(sw, "uid read failed\n"); return err; @@ -1906,23 +2522,12 @@ int tb_switch_resume(struct tb_switch *sw) } } - /* upload configuration */ - err = tb_sw_write(sw, 1 + (u32 *) &sw->config, TB_CFG_SWITCH, 1, 3); - if (err) - return err; - - err = tb_lc_configure_link(sw); - if (err) - return err; - - err = tb_plug_events_active(sw, true); + err = tb_switch_configure(sw); if (err) return err; /* check for surviving downstream switches */ - for (i = 1; i <= sw->config.max_port_number; i++) { - struct tb_port *port = &sw->ports[i]; - + tb_switch_for_each_port(sw, port) { if (!tb_port_has_remote(port) && !port->xdomain) continue; @@ -1933,8 +2538,14 @@ int tb_switch_resume(struct tb_switch *sw) tb_sw_set_unplugged(port->remote->sw); else if (port->xdomain) port->xdomain->is_unplugged = true; - } else if (tb_port_has_remote(port)) { - if (tb_switch_resume(port->remote->sw)) { + } else if (tb_port_has_remote(port) || port->xdomain) { + /* + * Always unlock the port so the downstream + * switch/domain is accessible. + */ + if (tb_port_unlock(port)) + tb_port_warn(port, "failed to unlock port\n"); + if (port->remote && tb_switch_resume(port->remote->sw)) { tb_port_warn(port, "lost during suspend, disconnecting\n"); tb_sw_set_unplugged(port->remote->sw); @@ -1946,17 +2557,75 @@ int tb_switch_resume(struct tb_switch *sw) void tb_switch_suspend(struct tb_switch *sw) { - int i, err; + struct tb_port *port; + int err; + err = tb_plug_events_active(sw, false); if (err) return; - for (i = 1; i <= sw->config.max_port_number; i++) { - if (tb_port_has_remote(&sw->ports[i])) - tb_switch_suspend(sw->ports[i].remote->sw); + tb_switch_for_each_port(sw, port) { + if (tb_port_has_remote(port)) + tb_switch_suspend(port->remote->sw); } - tb_lc_set_sleep(sw); + if (tb_switch_is_usb4(sw)) + usb4_switch_set_sleep(sw); + else + tb_lc_set_sleep(sw); +} + +/** + * tb_switch_query_dp_resource() - Query availability of DP resource + * @sw: Switch whose DP resource is queried + * @in: DP IN port + * + * Queries availability of DP resource for DP tunneling using switch + * specific means. Returns %true if resource is available. + */ +bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in) +{ + if (tb_switch_is_usb4(sw)) + return usb4_switch_query_dp_resource(sw, in); + return tb_lc_dp_sink_query(sw, in); +} + +/** + * tb_switch_alloc_dp_resource() - Allocate available DP resource + * @sw: Switch whose DP resource is allocated + * @in: DP IN port + * + * Allocates DP resource for DP tunneling. The resource must be + * available for this to succeed (see tb_switch_query_dp_resource()). + * Returns %0 in success and negative errno otherwise. + */ +int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in) +{ + if (tb_switch_is_usb4(sw)) + return usb4_switch_alloc_dp_resource(sw, in); + return tb_lc_dp_sink_alloc(sw, in); +} + +/** + * tb_switch_dealloc_dp_resource() - De-allocate DP resource + * @sw: Switch whose DP resource is de-allocated + * @in: DP IN port + * + * De-allocates DP resource that was previously allocated for DP + * tunneling. + */ +void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in) +{ + int ret; + + if (tb_switch_is_usb4(sw)) + ret = usb4_switch_dealloc_dp_resource(sw, in); + else + ret = tb_lc_dp_sink_dealloc(sw, in); + + if (ret) + tb_sw_warn(sw, "failed to de-allocate DP resource for port %d\n", + in->port); } struct tb_sw_lookup { @@ -2069,6 +2738,24 @@ struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route) return NULL; } +/** + * tb_switch_find_port() - return the first port of @type on @sw or NULL + * @sw: Switch to find the port from + * @type: Port type to look for + */ +struct tb_port *tb_switch_find_port(struct tb_switch *sw, + enum tb_port_type type) +{ + struct tb_port *port; + + tb_switch_for_each_port(sw, port) { + if (port->config.type == type) + return port; + } + + return NULL; +} + void tb_switch_exit(void) { ida_destroy(&nvm_ida); diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c index 1f7a9e1cc09c..107cd232f486 100644 --- a/drivers/thunderbolt/tb.c +++ b/drivers/thunderbolt/tb.c @@ -9,7 +9,6 @@ #include <linux/slab.h> #include <linux/errno.h> #include <linux/delay.h> -#include <linux/platform_data/x86/apple.h> #include "tb.h" #include "tb_regs.h" @@ -18,6 +17,7 @@ /** * struct tb_cm - Simple Thunderbolt connection manager * @tunnel_list: List of active tunnels + * @dp_resources: List of available DP resources for DP tunneling * @hotplug_active: tb_handle_hotplug will stop progressing plug * events and exit if this is not set (it needs to * acquire the lock one more time). Used to drain wq @@ -25,6 +25,7 @@ */ struct tb_cm { struct list_head tunnel_list; + struct list_head dp_resources; bool hotplug_active; }; @@ -56,17 +57,51 @@ static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug) /* enumeration & hot plug handling */ +static void tb_add_dp_resources(struct tb_switch *sw) +{ + struct tb_cm *tcm = tb_priv(sw->tb); + struct tb_port *port; + + tb_switch_for_each_port(sw, port) { + if (!tb_port_is_dpin(port)) + continue; + + if (!tb_switch_query_dp_resource(sw, port)) + continue; + + list_add_tail(&port->list, &tcm->dp_resources); + tb_port_dbg(port, "DP IN resource available\n"); + } +} + +static void tb_remove_dp_resources(struct tb_switch *sw) +{ + struct tb_cm *tcm = tb_priv(sw->tb); + struct tb_port *port, *tmp; + + /* Clear children resources first */ + tb_switch_for_each_port(sw, port) { + if (tb_port_has_remote(port)) + tb_remove_dp_resources(port->remote->sw); + } + + list_for_each_entry_safe(port, tmp, &tcm->dp_resources, list) { + if (port->sw == sw) { + tb_port_dbg(port, "DP OUT resource unavailable\n"); + list_del_init(&port->list); + } + } +} + static void tb_discover_tunnels(struct tb_switch *sw) { struct tb *tb = sw->tb; struct tb_cm *tcm = tb_priv(tb); struct tb_port *port; - int i; - for (i = 1; i <= sw->config.max_port_number; i++) { + tb_switch_for_each_port(sw, port) { struct tb_tunnel *tunnel = NULL; - port = &sw->ports[i]; switch (port->config.type) { case TB_TYPE_DP_HDMI_IN: tunnel = tb_tunnel_discover_dp(tb, port); @@ -76,6 +111,10 @@ static void tb_discover_tunnels(struct tb_switch *sw) tunnel = tb_tunnel_discover_pci(tb, port); break; + case TB_TYPE_USB3_DOWN: + tunnel = tb_tunnel_discover_usb3(tb, port); + break; + default: break; } @@ -95,9 +134,9 @@ static void tb_discover_tunnels(struct tb_switch *sw) list_add_tail(&tunnel->list, &tcm->tunnel_list); } - for (i = 1; i <= sw->config.max_port_number; i++) { - if (tb_port_has_remote(&sw->ports[i])) - tb_discover_tunnels(sw->ports[i].remote->sw); + tb_switch_for_each_port(sw, port) { + if (tb_port_has_remote(port)) + tb_discover_tunnels(port->remote->sw); } } @@ -123,6 +162,137 @@ static void tb_scan_xdomain(struct tb_port *port) } } +static int tb_enable_tmu(struct tb_switch *sw) +{ + int ret; + + /* If it is already enabled in correct mode, don't touch it */ + if (tb_switch_tmu_is_enabled(sw)) + return 0; + + ret = tb_switch_tmu_disable(sw); + if (ret) + return ret; + + ret = tb_switch_tmu_post_time(sw); + if (ret) + return ret; + + return tb_switch_tmu_enable(sw); +} + +/** + * tb_find_unused_port() - return the first inactive port on @sw + * @sw: Switch to find the port on + * @type: Port type to look for + */ +static struct tb_port *tb_find_unused_port(struct tb_switch *sw, + enum tb_port_type type) +{ + struct tb_port *port; + + tb_switch_for_each_port(sw, port) { + if (tb_is_upstream_port(port)) + continue; + if (port->config.type != type) + continue; + if (!port->cap_adap) + continue; + if (tb_port_is_enabled(port)) + continue; + return port; + } + return NULL; +} + +static struct tb_port *tb_find_usb3_down(struct tb_switch *sw, + const struct tb_port *port) +{ + struct tb_port *down; + + down = usb4_switch_map_usb3_down(sw, port); + if (down) { + if (WARN_ON(!tb_port_is_usb3_down(down))) + goto out; + if (WARN_ON(tb_usb3_port_is_enabled(down))) + goto out; + + return down; + } + +out: + return tb_find_unused_port(sw, TB_TYPE_USB3_DOWN); +} + +static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw) +{ + struct tb_switch *parent = tb_switch_parent(sw); + struct tb_port *up, *down, *port; + struct tb_cm *tcm = tb_priv(tb); + struct tb_tunnel *tunnel; + + up = tb_switch_find_port(sw, TB_TYPE_USB3_UP); + if (!up) + return 0; + + /* + * Look up available down port. Since we are chaining it should + * be found right above this switch. + */ + port = tb_port_at(tb_route(sw), parent); + down = tb_find_usb3_down(parent, port); + if (!down) + return 0; + + if (tb_route(parent)) { + struct tb_port *parent_up; + /* + * Check first that the parent switch has its upstream USB3 + * port enabled. Otherwise the chain is not complete and + * there is no point setting up a new tunnel. + */ + parent_up = tb_switch_find_port(parent, TB_TYPE_USB3_UP); + if (!parent_up || !tb_port_is_enabled(parent_up)) + return 0; + } + + tunnel = tb_tunnel_alloc_usb3(tb, up, down); + if (!tunnel) + return -ENOMEM; + + if (tb_tunnel_activate(tunnel)) { + tb_port_info(up, + "USB3 tunnel activation failed, aborting\n"); + tb_tunnel_free(tunnel); + return -EIO; + } + + list_add_tail(&tunnel->list, &tcm->tunnel_list); + return 0; +} + +static int tb_create_usb3_tunnels(struct tb_switch *sw) +{ + struct tb_port *port; + int ret; + + if (tb_route(sw)) { + ret = tb_tunnel_usb3(sw->tb, sw); + if (ret) + return ret; + } + + tb_switch_for_each_port(sw, port) { + if (!tb_port_has_remote(port)) + continue; + ret = tb_create_usb3_tunnels(port->remote->sw); + if (ret) + return ret; + } + + return 0; +} + static void tb_scan_port(struct tb_port *port); /** @@ -130,9 +300,10 @@ static void tb_scan_port(struct tb_port *port); */ static void tb_scan_switch(struct tb_switch *sw) { - int i; - for (i = 1; i <= sw->config.max_port_number; i++) - tb_scan_port(&sw->ports[i]); + struct tb_port *port; + + tb_switch_for_each_port(sw, port) + tb_scan_port(port); } /** @@ -217,11 +388,28 @@ static void tb_scan_port(struct tb_port *port) upstream_port->dual_link_port->remote = port->dual_link_port; } + /* Enable lane bonding if supported */ + if (tb_switch_lane_bonding_enable(sw)) + tb_sw_warn(sw, "failed to enable lane bonding\n"); + + if (tb_enable_tmu(sw)) + tb_sw_warn(sw, "failed to enable TMU\n"); + + /* + * Create USB 3.x tunnels only when the switch is plugged to the + * domain. This is because we scan the domain also during discovery + * and want to discover existing USB 3.x tunnels before we create + * any new. + */ + if (tcm->hotplug_active && tb_tunnel_usb3(sw->tb, sw)) + tb_sw_warn(sw, "USB3 tunnel creation failed\n"); + tb_scan_switch(sw); } -static int tb_free_tunnel(struct tb *tb, enum tb_tunnel_type type, - struct tb_port *src_port, struct tb_port *dst_port) +static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type, + struct tb_port *src_port, + struct tb_port *dst_port) { struct tb_cm *tcm = tb_priv(tb); struct tb_tunnel *tunnel; @@ -230,14 +418,32 @@ static int tb_free_tunnel(struct tb *tb, enum tb_tunnel_type type, if (tunnel->type == type && ((src_port && src_port == tunnel->src_port) || (dst_port && dst_port == tunnel->dst_port))) { - tb_tunnel_deactivate(tunnel); - list_del(&tunnel->list); - tb_tunnel_free(tunnel); - return 0; + return tunnel; } } - return -ENODEV; + return NULL; +} + +static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel) +{ + if (!tunnel) + return; + + tb_tunnel_deactivate(tunnel); + list_del(&tunnel->list); + + /* + * In case of DP tunnel make sure the DP IN resource is deallocated + * properly. + */ + if (tb_tunnel_is_dp(tunnel)) { + struct tb_port *in = tunnel->src_port; + + tb_switch_dealloc_dp_resource(in->sw, in); + } + + tb_tunnel_free(tunnel); } /** @@ -250,11 +456,8 @@ static void tb_free_invalid_tunnels(struct tb *tb) struct tb_tunnel *n; list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) { - if (tb_tunnel_is_invalid(tunnel)) { - tb_tunnel_deactivate(tunnel); - list_del(&tunnel->list); - tb_tunnel_free(tunnel); - } + if (tb_tunnel_is_invalid(tunnel)) + tb_deactivate_and_free_tunnel(tunnel); } } @@ -263,14 +466,15 @@ static void tb_free_invalid_tunnels(struct tb *tb) */ static void tb_free_unplugged_children(struct tb_switch *sw) { - int i; - for (i = 1; i <= sw->config.max_port_number; i++) { - struct tb_port *port = &sw->ports[i]; + struct tb_port *port; + tb_switch_for_each_port(sw, port) { if (!tb_port_has_remote(port)) continue; if (port->remote->sw->is_unplugged) { + tb_remove_dp_resources(port->remote->sw); + tb_switch_lane_bonding_disable(port->remote->sw); tb_switch_remove(port->remote->sw); port->remote = NULL; if (port->dual_link_port) @@ -281,54 +485,18 @@ static void tb_free_unplugged_children(struct tb_switch *sw) } } -/** - * tb_find_port() - return the first port of @type on @sw or NULL - * @sw: Switch to find the port from - * @type: Port type to look for - */ -static struct tb_port *tb_find_port(struct tb_switch *sw, - enum tb_port_type type) -{ - int i; - for (i = 1; i <= sw->config.max_port_number; i++) - if (sw->ports[i].config.type == type) - return &sw->ports[i]; - return NULL; -} - -/** - * tb_find_unused_port() - return the first inactive port on @sw - * @sw: Switch to find the port on - * @type: Port type to look for - */ -static struct tb_port *tb_find_unused_port(struct tb_switch *sw, - enum tb_port_type type) -{ - int i; - - for (i = 1; i <= sw->config.max_port_number; i++) { - if (tb_is_upstream_port(&sw->ports[i])) - continue; - if (sw->ports[i].config.type != type) - continue; - if (!sw->ports[i].cap_adap) - continue; - if (tb_port_is_enabled(&sw->ports[i])) - continue; - return &sw->ports[i]; - } - return NULL; -} - static struct tb_port *tb_find_pcie_down(struct tb_switch *sw, const struct tb_port *port) { + struct tb_port *down = NULL; + /* * To keep plugging devices consistently in the same PCIe - * hierarchy, do mapping here for root switch downstream PCIe - * ports. + * hierarchy, do mapping here for switch downstream PCIe ports. */ - if (!tb_route(sw)) { + if (tb_switch_is_usb4(sw)) { + down = usb4_switch_map_pcie_down(sw, port); + } else if (!tb_route(sw)) { int phy_port = tb_phy_port_from_link(port->port); int index; @@ -336,64 +504,192 @@ static struct tb_port *tb_find_pcie_down(struct tb_switch *sw, * Hard-coded Thunderbolt port to PCIe down port mapping * per controller. */ - if (tb_switch_is_cr(sw)) + if (tb_switch_is_cactus_ridge(sw) || + tb_switch_is_alpine_ridge(sw)) index = !phy_port ? 6 : 7; - else if (tb_switch_is_fr(sw)) + else if (tb_switch_is_falcon_ridge(sw)) index = !phy_port ? 6 : 8; + else if (tb_switch_is_titan_ridge(sw)) + index = !phy_port ? 8 : 9; else goto out; /* Validate the hard-coding */ if (WARN_ON(index > sw->config.max_port_number)) goto out; - if (WARN_ON(!tb_port_is_pcie_down(&sw->ports[index]))) + + down = &sw->ports[index]; + } + + if (down) { + if (WARN_ON(!tb_port_is_pcie_down(down))) goto out; - if (WARN_ON(tb_pci_port_is_enabled(&sw->ports[index]))) + if (WARN_ON(tb_pci_port_is_enabled(down))) goto out; - return &sw->ports[index]; + return down; } out: return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN); } -static int tb_tunnel_dp(struct tb *tb, struct tb_port *out) +static int tb_available_bw(struct tb_cm *tcm, struct tb_port *in, + struct tb_port *out) { - struct tb_cm *tcm = tb_priv(tb); struct tb_switch *sw = out->sw; struct tb_tunnel *tunnel; - struct tb_port *in; + int bw, available_bw = 40000; - if (tb_port_is_enabled(out)) - return 0; + while (sw && sw != in->sw) { + bw = sw->link_speed * sw->link_width * 1000; /* Mb/s */ + /* Leave 10% guard band */ + bw -= bw / 10; - do { - sw = tb_to_switch(sw->dev.parent); - if (!sw) - return 0; - in = tb_find_unused_port(sw, TB_TYPE_DP_HDMI_IN); - } while (!in); + /* + * Check for any active DP tunnels that go through this + * switch and reduce their consumed bandwidth from + * available. + */ + list_for_each_entry(tunnel, &tcm->tunnel_list, list) { + int consumed_bw; - tunnel = tb_tunnel_alloc_dp(tb, in, out); + if (!tb_tunnel_switch_on_path(tunnel, sw)) + continue; + + consumed_bw = tb_tunnel_consumed_bandwidth(tunnel); + if (consumed_bw < 0) + return consumed_bw; + + bw -= consumed_bw; + } + + if (bw < available_bw) + available_bw = bw; + + sw = tb_switch_parent(sw); + } + + return available_bw; +} + +static void tb_tunnel_dp(struct tb *tb) +{ + struct tb_cm *tcm = tb_priv(tb); + struct tb_port *port, *in, *out; + struct tb_tunnel *tunnel; + int available_bw; + + /* + * Find pair of inactive DP IN and DP OUT adapters and then + * establish a DP tunnel between them. + */ + tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n"); + + in = NULL; + out = NULL; + list_for_each_entry(port, &tcm->dp_resources, list) { + if (tb_port_is_enabled(port)) { + tb_port_dbg(port, "in use\n"); + continue; + } + + tb_port_dbg(port, "available\n"); + + if (!in && tb_port_is_dpin(port)) + in = port; + else if (!out && tb_port_is_dpout(port)) + out = port; + } + + if (!in) { + tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n"); + return; + } + if (!out) { + tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n"); + return; + } + + if (tb_switch_alloc_dp_resource(in->sw, in)) { + tb_port_dbg(in, "no resource available for DP IN, not tunneling\n"); + return; + } + + /* Calculate available bandwidth between in and out */ + available_bw = tb_available_bw(tcm, in, out); + if (available_bw < 0) { + tb_warn(tb, "failed to determine available bandwidth\n"); + return; + } + + tb_dbg(tb, "available bandwidth for new DP tunnel %u Mb/s\n", + available_bw); + + tunnel = tb_tunnel_alloc_dp(tb, in, out, available_bw); if (!tunnel) { - tb_port_dbg(out, "DP tunnel allocation failed\n"); - return -ENOMEM; + tb_port_dbg(out, "could not allocate DP tunnel\n"); + goto dealloc_dp; } if (tb_tunnel_activate(tunnel)) { tb_port_info(out, "DP tunnel activation failed, aborting\n"); tb_tunnel_free(tunnel); - return -EIO; + goto dealloc_dp; } list_add_tail(&tunnel->list, &tcm->tunnel_list); - return 0; + return; + +dealloc_dp: + tb_switch_dealloc_dp_resource(in->sw, in); } -static void tb_teardown_dp(struct tb *tb, struct tb_port *out) +static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port) { - tb_free_tunnel(tb, TB_TUNNEL_DP, NULL, out); + struct tb_port *in, *out; + struct tb_tunnel *tunnel; + + if (tb_port_is_dpin(port)) { + tb_port_dbg(port, "DP IN resource unavailable\n"); + in = port; + out = NULL; + } else { + tb_port_dbg(port, "DP OUT resource unavailable\n"); + in = NULL; + out = port; + } + + tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out); + tb_deactivate_and_free_tunnel(tunnel); + list_del_init(&port->list); + + /* + * See if there is another DP OUT port that can be used for + * to create another tunnel. + */ + tb_tunnel_dp(tb); +} + +static void tb_dp_resource_available(struct tb *tb, struct tb_port *port) +{ + struct tb_cm *tcm = tb_priv(tb); + struct tb_port *p; + + if (tb_port_is_enabled(port)) + return; + + list_for_each_entry(p, &tcm->dp_resources, list) { + if (p == port) + return; + } + + tb_port_dbg(port, "DP %s resource available\n", + tb_port_is_dpin(port) ? "IN" : "OUT"); + list_add_tail(&port->list, &tcm->dp_resources); + + /* Look for suitable DP IN <-> DP OUT pairs now */ + tb_tunnel_dp(tb); } static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw) @@ -403,7 +699,7 @@ static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw) struct tb_switch *parent_sw; struct tb_tunnel *tunnel; - up = tb_find_port(sw, TB_TYPE_PCIE_UP); + up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP); if (!up) return 0; @@ -441,7 +737,7 @@ static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) sw = tb_to_switch(xd->dev.parent); dst_port = tb_port_at(xd->route, sw); - nhi_port = tb_find_port(tb->root_switch, TB_TYPE_NHI); + nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI); mutex_lock(&tb->lock); tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, xd->transmit_ring, @@ -468,6 +764,7 @@ static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) { struct tb_port *dst_port; + struct tb_tunnel *tunnel; struct tb_switch *sw; sw = tb_to_switch(xd->dev.parent); @@ -478,7 +775,8 @@ static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) * case of cable disconnect) so it is fine if we cannot find it * here anymore. */ - tb_free_tunnel(tb, TB_TUNNEL_DMA, NULL, dst_port); + tunnel = tb_find_tunnel(tb, TB_TUNNEL_DMA, NULL, dst_port); + tb_deactivate_and_free_tunnel(tunnel); } static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) @@ -533,10 +831,15 @@ static void tb_handle_hotplug(struct work_struct *work) tb_port_dbg(port, "switch unplugged\n"); tb_sw_set_unplugged(port->remote->sw); tb_free_invalid_tunnels(tb); + tb_remove_dp_resources(port->remote->sw); + tb_switch_tmu_disable(port->remote->sw); + tb_switch_lane_bonding_disable(port->remote->sw); tb_switch_remove(port->remote->sw); port->remote = NULL; if (port->dual_link_port) port->dual_link_port->remote = NULL; + /* Maybe we can create another DP tunnel */ + tb_tunnel_dp(tb); } else if (port->xdomain) { struct tb_xdomain *xd = tb_xdomain_get(port->xdomain); @@ -553,8 +856,8 @@ static void tb_handle_hotplug(struct work_struct *work) port->xdomain = NULL; __tb_disconnect_xdomain_paths(tb, xd); tb_xdomain_put(xd); - } else if (tb_port_is_dpout(port)) { - tb_teardown_dp(tb, port); + } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) { + tb_dp_resource_unavailable(tb, port); } else { tb_port_dbg(port, "got unplug event for disconnected port, ignoring\n"); @@ -567,8 +870,8 @@ static void tb_handle_hotplug(struct work_struct *work) tb_scan_port(port); if (!port->remote) tb_port_dbg(port, "hotplug: no switch found\n"); - } else if (tb_port_is_dpout(port)) { - tb_tunnel_dp(tb, port); + } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) { + tb_dp_resource_available(tb, port); } } @@ -597,8 +900,7 @@ static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type, route = tb_cfg_get_route(&pkg->header); - if (tb_cfg_error(tb->ctl, route, pkg->port, - TB_CFG_ERROR_ACK_PLUG_EVENT)) { + if (tb_cfg_ack_plug(tb->ctl, route, pkg->port, pkg->unplug)) { tb_warn(tb, "could not ack plug event on %llx:%x\n", route, pkg->port); } @@ -677,10 +979,19 @@ static int tb_start(struct tb *tb) return ret; } + /* Enable TMU if it is off */ + tb_switch_tmu_enable(tb->root_switch); /* Full scan to discover devices added before the driver was loaded. */ tb_scan_switch(tb->root_switch); /* Find out tunnels created by the boot firmware */ tb_discover_tunnels(tb->root_switch); + /* + * If the boot firmware did not create USB 3.x tunnels create them + * now for the whole topology. + */ + tb_create_usb3_tunnels(tb->root_switch); + /* Add DP IN resources for the root switch */ + tb_add_dp_resources(tb->root_switch); /* Make the discovered switches available to the userspace */ device_for_each_child(&tb->root_switch->dev, NULL, tb_scan_finalize_switch); @@ -702,6 +1013,24 @@ static int tb_suspend_noirq(struct tb *tb) return 0; } +static void tb_restore_children(struct tb_switch *sw) +{ + struct tb_port *port; + + if (tb_enable_tmu(sw)) + tb_sw_warn(sw, "failed to restore TMU configuration\n"); + + tb_switch_for_each_port(sw, port) { + if (!tb_port_has_remote(port)) + continue; + + if (tb_switch_lane_bonding_enable(port->remote->sw)) + dev_warn(&sw->dev, "failed to restore lane bonding\n"); + + tb_restore_children(port->remote->sw); + } +} + static int tb_resume_noirq(struct tb *tb) { struct tb_cm *tcm = tb_priv(tb); @@ -715,6 +1044,7 @@ static int tb_resume_noirq(struct tb *tb) tb_switch_resume(tb->root_switch); tb_free_invalid_tunnels(tb); tb_free_unplugged_children(tb->root_switch); + tb_restore_children(tb->root_switch); list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) tb_tunnel_restart(tunnel); if (!list_empty(&tcm->tunnel_list)) { @@ -734,11 +1064,10 @@ static int tb_resume_noirq(struct tb *tb) static int tb_free_unplugged_xdomains(struct tb_switch *sw) { - int i, ret = 0; - - for (i = 1; i <= sw->config.max_port_number; i++) { - struct tb_port *port = &sw->ports[i]; + struct tb_port *port; + int ret = 0; + tb_switch_for_each_port(sw, port) { if (tb_is_upstream_port(port)) continue; if (port->xdomain && port->xdomain->is_unplugged) { @@ -783,9 +1112,6 @@ struct tb *tb_probe(struct tb_nhi *nhi) struct tb_cm *tcm; struct tb *tb; - if (!x86_apple_machine) - return NULL; - tb = tb_domain_alloc(nhi, sizeof(*tcm)); if (!tb) return NULL; @@ -795,6 +1121,7 @@ struct tb *tb_probe(struct tb_nhi *nhi) tcm = tb_priv(tb); INIT_LIST_HEAD(&tcm->tunnel_list); + INIT_LIST_HEAD(&tcm->dp_resources); return tb; } diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h index 6407d529871d..2eb2bcd3cca3 100644 --- a/drivers/thunderbolt/tb.h +++ b/drivers/thunderbolt/tb.h @@ -44,6 +44,39 @@ struct tb_switch_nvm { #define TB_SWITCH_KEY_SIZE 32 #define TB_SWITCH_MAX_DEPTH 6 +#define USB4_SWITCH_MAX_DEPTH 5 + +/** + * enum tb_switch_tmu_rate - TMU refresh rate + * @TB_SWITCH_TMU_RATE_OFF: %0 (Disable Time Sync handshake) + * @TB_SWITCH_TMU_RATE_HIFI: %16 us time interval between successive + * transmission of the Delay Request TSNOS + * (Time Sync Notification Ordered Set) on a Link + * @TB_SWITCH_TMU_RATE_NORMAL: %1 ms time interval between successive + * transmission of the Delay Request TSNOS on + * a Link + */ +enum tb_switch_tmu_rate { + TB_SWITCH_TMU_RATE_OFF = 0, + TB_SWITCH_TMU_RATE_HIFI = 16, + TB_SWITCH_TMU_RATE_NORMAL = 1000, +}; + +/** + * struct tb_switch_tmu - Structure holding switch TMU configuration + * @cap: Offset to the TMU capability (%0 if not found) + * @has_ucap: Does the switch support uni-directional mode + * @rate: TMU refresh rate related to upstream switch. In case of root + * switch this holds the domain rate. + * @unidirectional: Is the TMU in uni-directional or bi-directional mode + * related to upstream switch. Don't case for root switch. + */ +struct tb_switch_tmu { + int cap; + bool has_ucap; + enum tb_switch_tmu_rate rate; + bool unidirectional; +}; /** * struct tb_switch - a thunderbolt switch @@ -54,6 +87,7 @@ struct tb_switch_nvm { * mailbox this will hold the pointer to that (%NULL * otherwise). If set it also means the switch has * upgradeable NVM. + * @tmu: The switch TMU configuration * @tb: Pointer to the domain the switch belongs to * @uid: Unique ID of the switch * @uuid: UUID of the switch (or %NULL if not supported) @@ -61,6 +95,8 @@ struct tb_switch_nvm { * @device: Device ID of the switch * @vendor_name: Name of the vendor (or %NULL if not known) * @device_name: Name of the device (or %NULL if not known) + * @link_speed: Speed of the link in Gb/s + * @link_width: Width of the link (1 or 2) * @generation: Switch Thunderbolt generation * @cap_plug_events: Offset to the plug events capability (%0 if not found) * @cap_lc: Offset to the link controller capability (%0 if not found) @@ -90,6 +126,7 @@ struct tb_switch { struct tb_regs_switch_header config; struct tb_port *ports; struct tb_dma_port *dma_port; + struct tb_switch_tmu tmu; struct tb *tb; u64 uid; uuid_t *uuid; @@ -97,6 +134,8 @@ struct tb_switch { u16 device; const char *vendor_name; const char *device_name; + unsigned int link_speed; + unsigned int link_width; unsigned int generation; int cap_plug_events; int cap_lc; @@ -124,14 +163,18 @@ struct tb_switch { * @remote: Remote port (%NULL if not connected) * @xdomain: Remote host (%NULL if not connected) * @cap_phy: Offset, zero if not found + * @cap_tmu: Offset of the adapter specific TMU capability (%0 if not present) * @cap_adap: Offset of the adapter specific capability (%0 if not present) + * @cap_usb4: Offset to the USB4 port capability (%0 if not present) * @port: Port number on switch * @disabled: Disabled by eeprom + * @bonded: true if the port is bonded (two lanes combined as one) * @dual_link_port: If the switch is connected using two ports, points * to the other port. * @link_nr: Is this primary or secondary port on the dual_link. * @in_hopids: Currently allocated input HopIDs * @out_hopids: Currently allocated output HopIDs + * @list: Used to link ports to DP resources list */ struct tb_port { struct tb_regs_port_header config; @@ -139,13 +182,17 @@ struct tb_port { struct tb_port *remote; struct tb_xdomain *xdomain; int cap_phy; + int cap_tmu; int cap_adap; + int cap_usb4; u8 port; bool disabled; + bool bonded; struct tb_port *dual_link_port; u8 link_nr:1; struct ida in_hopids; struct ida out_hopids; + struct list_head list; }; /** @@ -385,6 +432,16 @@ static inline bool tb_port_is_dpout(const struct tb_port *port) return port && port->config.type == TB_TYPE_DP_HDMI_OUT; } +static inline bool tb_port_is_usb3_down(const struct tb_port *port) +{ + return port && port->config.type == TB_TYPE_USB3_DOWN; +} + +static inline bool tb_port_is_usb3_up(const struct tb_port *port) +{ + return port && port->config.type == TB_TYPE_USB3_UP; +} + static inline int tb_sw_read(struct tb_switch *sw, void *buffer, enum tb_cfg_space space, u32 offset, u32 length) { @@ -399,7 +456,7 @@ static inline int tb_sw_read(struct tb_switch *sw, void *buffer, length); } -static inline int tb_sw_write(struct tb_switch *sw, void *buffer, +static inline int tb_sw_write(struct tb_switch *sw, const void *buffer, enum tb_cfg_space space, u32 offset, u32 length) { if (sw->is_unplugged) @@ -525,11 +582,24 @@ void tb_switch_suspend(struct tb_switch *sw); int tb_switch_resume(struct tb_switch *sw); int tb_switch_reset(struct tb *tb, u64 route); void tb_sw_set_unplugged(struct tb_switch *sw); +struct tb_port *tb_switch_find_port(struct tb_switch *sw, + enum tb_port_type type); struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth); struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid); struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route); +/** + * tb_switch_for_each_port() - Iterate over each switch port + * @sw: Switch whose ports to iterate + * @p: Port used as iterator + * + * Iterates over each switch port skipping the control port (port %0). + */ +#define tb_switch_for_each_port(sw, p) \ + for ((p) = &(sw)->ports[1]; \ + (p) <= &(sw)->ports[(sw)->config.max_port_number]; (p)++) + static inline struct tb_switch *tb_switch_get(struct tb_switch *sw) { if (sw) @@ -559,17 +629,17 @@ static inline struct tb_switch *tb_switch_parent(struct tb_switch *sw) return tb_to_switch(sw->dev.parent); } -static inline bool tb_switch_is_lr(const struct tb_switch *sw) +static inline bool tb_switch_is_light_ridge(const struct tb_switch *sw) { return sw->config.device_id == PCI_DEVICE_ID_INTEL_LIGHT_RIDGE; } -static inline bool tb_switch_is_er(const struct tb_switch *sw) +static inline bool tb_switch_is_eagle_ridge(const struct tb_switch *sw) { return sw->config.device_id == PCI_DEVICE_ID_INTEL_EAGLE_RIDGE; } -static inline bool tb_switch_is_cr(const struct tb_switch *sw) +static inline bool tb_switch_is_cactus_ridge(const struct tb_switch *sw) { switch (sw->config.device_id) { case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C: @@ -580,7 +650,7 @@ static inline bool tb_switch_is_cr(const struct tb_switch *sw) } } -static inline bool tb_switch_is_fr(const struct tb_switch *sw) +static inline bool tb_switch_is_falcon_ridge(const struct tb_switch *sw) { switch (sw->config.device_id) { case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE: @@ -591,10 +661,79 @@ static inline bool tb_switch_is_fr(const struct tb_switch *sw) } } +static inline bool tb_switch_is_alpine_ridge(const struct tb_switch *sw) +{ + switch (sw->config.device_id) { + case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE: + case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE: + case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE: + case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE: + return true; + default: + return false; + } +} + +static inline bool tb_switch_is_titan_ridge(const struct tb_switch *sw) +{ + switch (sw->config.device_id) { + case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE: + case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE: + case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE: + return true; + default: + return false; + } +} + +/** + * tb_switch_is_usb4() - Is the switch USB4 compliant + * @sw: Switch to check + * + * Returns true if the @sw is USB4 compliant router, false otherwise. + */ +static inline bool tb_switch_is_usb4(const struct tb_switch *sw) +{ + return sw->config.thunderbolt_version == USB4_VERSION_1_0; +} + +/** + * tb_switch_is_icm() - Is the switch handled by ICM firmware + * @sw: Switch to check + * + * In case there is a need to differentiate whether ICM firmware or SW CM + * is handling @sw this function can be called. It is valid to call this + * after tb_switch_alloc() and tb_switch_configure() has been called + * (latter only for SW CM case). + */ +static inline bool tb_switch_is_icm(const struct tb_switch *sw) +{ + return !sw->config.enabled; +} + +int tb_switch_lane_bonding_enable(struct tb_switch *sw); +void tb_switch_lane_bonding_disable(struct tb_switch *sw); + +bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in); +int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in); +void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in); + +int tb_switch_tmu_init(struct tb_switch *sw); +int tb_switch_tmu_post_time(struct tb_switch *sw); +int tb_switch_tmu_disable(struct tb_switch *sw); +int tb_switch_tmu_enable(struct tb_switch *sw); + +static inline bool tb_switch_tmu_is_enabled(const struct tb_switch *sw) +{ + return sw->tmu.rate == TB_SWITCH_TMU_RATE_HIFI && + !sw->tmu.unidirectional; +} + int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged); int tb_port_add_nfc_credits(struct tb_port *port, int credits); int tb_port_set_initial_credits(struct tb_port *port, u32 credits); int tb_port_clear_counter(struct tb_port *port, int counter); +int tb_port_unlock(struct tb_port *port); int tb_port_alloc_in_hopid(struct tb_port *port, int hopid, int max_hopid); void tb_port_release_in_hopid(struct tb_port *port, int hopid); int tb_port_alloc_out_hopid(struct tb_port *port, int hopid, int max_hopid); @@ -603,9 +742,13 @@ struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end, struct tb_port *prev); int tb_switch_find_vse_cap(struct tb_switch *sw, enum tb_switch_vse_cap vsec); +int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap); int tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap); bool tb_port_is_enabled(struct tb_port *port); +bool tb_usb3_port_is_enabled(struct tb_port *port); +int tb_usb3_port_enable(struct tb_port *port, bool enable); + bool tb_pci_port_is_enabled(struct tb_port *port); int tb_pci_port_enable(struct tb_port *port, bool enable); @@ -626,6 +769,8 @@ void tb_path_free(struct tb_path *path); int tb_path_activate(struct tb_path *path); void tb_path_deactivate(struct tb_path *path); bool tb_path_is_invalid(struct tb_path *path); +bool tb_path_switch_on_path(const struct tb_path *path, + const struct tb_switch *sw); int tb_drom_read(struct tb_switch *sw); int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid); @@ -634,6 +779,10 @@ int tb_lc_read_uuid(struct tb_switch *sw, u32 *uuid); int tb_lc_configure_link(struct tb_switch *sw); void tb_lc_unconfigure_link(struct tb_switch *sw); int tb_lc_set_sleep(struct tb_switch *sw); +bool tb_lc_lane_bonding_possible(struct tb_switch *sw); +bool tb_lc_dp_sink_query(struct tb_switch *sw, struct tb_port *in); +int tb_lc_dp_sink_alloc(struct tb_switch *sw, struct tb_port *in); +int tb_lc_dp_sink_dealloc(struct tb_switch *sw, struct tb_port *in); static inline int tb_route_length(u64 route) { @@ -663,4 +812,27 @@ void tb_xdomain_remove(struct tb_xdomain *xd); struct tb_xdomain *tb_xdomain_find_by_link_depth(struct tb *tb, u8 link, u8 depth); +int usb4_switch_setup(struct tb_switch *sw); +int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid); +int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf, + size_t size); +int usb4_switch_configure_link(struct tb_switch *sw); +void usb4_switch_unconfigure_link(struct tb_switch *sw); +bool usb4_switch_lane_bonding_possible(struct tb_switch *sw); +int usb4_switch_set_sleep(struct tb_switch *sw); +int usb4_switch_nvm_sector_size(struct tb_switch *sw); +int usb4_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf, + size_t size); +int usb4_switch_nvm_write(struct tb_switch *sw, unsigned int address, + const void *buf, size_t size); +int usb4_switch_nvm_authenticate(struct tb_switch *sw); +bool usb4_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in); +int usb4_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in); +int usb4_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in); +struct tb_port *usb4_switch_map_pcie_down(struct tb_switch *sw, + const struct tb_port *port); +struct tb_port *usb4_switch_map_usb3_down(struct tb_switch *sw, + const struct tb_port *port); + +int usb4_port_unlock(struct tb_port *port); #endif diff --git a/drivers/thunderbolt/tb_msgs.h b/drivers/thunderbolt/tb_msgs.h index afbe1d29bb03..fc208c567953 100644 --- a/drivers/thunderbolt/tb_msgs.h +++ b/drivers/thunderbolt/tb_msgs.h @@ -67,9 +67,13 @@ struct cfg_error_pkg { u32 zero1:4; u32 port:6; u32 zero2:2; /* Both should be zero, still they are different fields. */ - u32 zero3:16; + u32 zero3:14; + u32 pg:2; } __packed; +#define TB_CFG_ERROR_PG_HOT_PLUG 0x2 +#define TB_CFG_ERROR_PG_HOT_UNPLUG 0x3 + /* TB_CFG_PKG_EVENT */ struct cfg_event_pkg { struct tb_cfg_header header; @@ -104,10 +108,11 @@ enum icm_pkg_code { }; enum icm_event_code { - ICM_EVENT_DEVICE_CONNECTED = 3, - ICM_EVENT_DEVICE_DISCONNECTED = 4, - ICM_EVENT_XDOMAIN_CONNECTED = 6, - ICM_EVENT_XDOMAIN_DISCONNECTED = 7, + ICM_EVENT_DEVICE_CONNECTED = 0x3, + ICM_EVENT_DEVICE_DISCONNECTED = 0x4, + ICM_EVENT_XDOMAIN_CONNECTED = 0x6, + ICM_EVENT_XDOMAIN_DISCONNECTED = 0x7, + ICM_EVENT_RTD3_VETO = 0xa, }; struct icm_pkg_header { @@ -121,6 +126,8 @@ struct icm_pkg_header { #define ICM_FLAGS_NO_KEY BIT(1) #define ICM_FLAGS_SLEVEL_SHIFT 3 #define ICM_FLAGS_SLEVEL_MASK GENMASK(4, 3) +#define ICM_FLAGS_DUAL_LANE BIT(5) +#define ICM_FLAGS_SPEED_GEN3 BIT(7) #define ICM_FLAGS_WRITE BIT(7) struct icm_pkg_driver_ready { @@ -463,6 +470,13 @@ struct icm_tr_pkg_disconnect_xdomain_response { uuid_t remote_uuid; }; +/* Ice Lake messages */ + +struct icm_icl_event_rtd3_veto { + struct icm_pkg_header hdr; + u32 veto_reason; +}; + /* XDomain messages */ struct tb_xdomain_header { diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h index deb9d4a977b9..c29c5075525a 100644 --- a/drivers/thunderbolt/tb_regs.h +++ b/drivers/thunderbolt/tb_regs.h @@ -26,6 +26,7 @@ #define TB_MAX_CONFIG_RW_LENGTH 60 enum tb_switch_cap { + TB_SWITCH_CAP_TMU = 0x03, TB_SWITCH_CAP_VSE = 0x05, }; @@ -41,6 +42,7 @@ enum tb_port_cap { TB_PORT_CAP_TIME1 = 0x03, TB_PORT_CAP_ADAP = 0x04, TB_PORT_CAP_VSE = 0x05, + TB_PORT_CAP_USB4 = 0x06, }; enum tb_port_state { @@ -164,10 +166,52 @@ struct tb_regs_switch_header { * milliseconds. Writing 0x00 is interpreted * as 255ms. */ - u32 __unknown4:16; + u32 cmuv:8; + u32 __unknown4:8; u32 thunderbolt_version:8; } __packed; +/* USB4 version 1.0 */ +#define USB4_VERSION_1_0 0x20 + +#define ROUTER_CS_1 0x01 +#define ROUTER_CS_4 0x04 +#define ROUTER_CS_5 0x05 +#define ROUTER_CS_5_SLP BIT(0) +#define ROUTER_CS_5_C3S BIT(23) +#define ROUTER_CS_5_PTO BIT(24) +#define ROUTER_CS_5_UTO BIT(25) +#define ROUTER_CS_5_HCO BIT(26) +#define ROUTER_CS_5_CV BIT(31) +#define ROUTER_CS_6 0x06 +#define ROUTER_CS_6_SLPR BIT(0) +#define ROUTER_CS_6_TNS BIT(1) +#define ROUTER_CS_6_HCI BIT(18) +#define ROUTER_CS_6_CR BIT(25) +#define ROUTER_CS_7 0x07 +#define ROUTER_CS_9 0x09 +#define ROUTER_CS_25 0x19 +#define ROUTER_CS_26 0x1a +#define ROUTER_CS_26_STATUS_MASK GENMASK(29, 24) +#define ROUTER_CS_26_STATUS_SHIFT 24 +#define ROUTER_CS_26_ONS BIT(30) +#define ROUTER_CS_26_OV BIT(31) + +/* Router TMU configuration */ +#define TMU_RTR_CS_0 0x00 +#define TMU_RTR_CS_0_TD BIT(27) +#define TMU_RTR_CS_0_UCAP BIT(30) +#define TMU_RTR_CS_1 0x01 +#define TMU_RTR_CS_1_LOCAL_TIME_NS_MASK GENMASK(31, 16) +#define TMU_RTR_CS_1_LOCAL_TIME_NS_SHIFT 16 +#define TMU_RTR_CS_2 0x02 +#define TMU_RTR_CS_3 0x03 +#define TMU_RTR_CS_3_LOCAL_TIME_NS_MASK GENMASK(15, 0) +#define TMU_RTR_CS_3_TS_PACKET_INTERVAL_MASK GENMASK(31, 16) +#define TMU_RTR_CS_3_TS_PACKET_INTERVAL_SHIFT 16 +#define TMU_RTR_CS_22 0x16 +#define TMU_RTR_CS_24 0x18 + enum tb_port_type { TB_TYPE_INACTIVE = 0x000000, TB_TYPE_PORT = 0x000001, @@ -178,7 +222,8 @@ enum tb_port_type { TB_TYPE_DP_HDMI_OUT = 0x0e0102, TB_TYPE_PCIE_DOWN = 0x100101, TB_TYPE_PCIE_UP = 0x100102, - /* TB_TYPE_USB = 0x200000, lower order bits are not known */ + TB_TYPE_USB3_DOWN = 0x200101, + TB_TYPE_USB3_UP = 0x200102, }; /* Present on every port in TB_CF_PORT at address zero. */ @@ -211,37 +256,87 @@ struct tb_regs_port_header { } __packed; -/* DWORD 4 */ -#define TB_PORT_NFC_CREDITS_MASK GENMASK(19, 0) -#define TB_PORT_MAX_CREDITS_SHIFT 20 -#define TB_PORT_MAX_CREDITS_MASK GENMASK(26, 20) -/* DWORD 5 */ -#define TB_PORT_LCA_SHIFT 22 -#define TB_PORT_LCA_MASK GENMASK(28, 22) +/* Basic adapter configuration registers */ +#define ADP_CS_4 0x04 +#define ADP_CS_4_NFC_BUFFERS_MASK GENMASK(9, 0) +#define ADP_CS_4_TOTAL_BUFFERS_MASK GENMASK(29, 20) +#define ADP_CS_4_TOTAL_BUFFERS_SHIFT 20 +#define ADP_CS_4_LCK BIT(31) +#define ADP_CS_5 0x05 +#define ADP_CS_5_LCA_MASK GENMASK(28, 22) +#define ADP_CS_5_LCA_SHIFT 22 + +/* TMU adapter registers */ +#define TMU_ADP_CS_3 0x03 +#define TMU_ADP_CS_3_UDM BIT(29) + +/* Lane adapter registers */ +#define LANE_ADP_CS_0 0x00 +#define LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK GENMASK(25, 20) +#define LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT 20 +#define LANE_ADP_CS_1 0x01 +#define LANE_ADP_CS_1_TARGET_WIDTH_MASK GENMASK(9, 4) +#define LANE_ADP_CS_1_TARGET_WIDTH_SHIFT 4 +#define LANE_ADP_CS_1_TARGET_WIDTH_SINGLE 0x1 +#define LANE_ADP_CS_1_TARGET_WIDTH_DUAL 0x3 +#define LANE_ADP_CS_1_LB BIT(15) +#define LANE_ADP_CS_1_CURRENT_SPEED_MASK GENMASK(19, 16) +#define LANE_ADP_CS_1_CURRENT_SPEED_SHIFT 16 +#define LANE_ADP_CS_1_CURRENT_SPEED_GEN2 0x8 +#define LANE_ADP_CS_1_CURRENT_SPEED_GEN3 0x4 +#define LANE_ADP_CS_1_CURRENT_WIDTH_MASK GENMASK(25, 20) +#define LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT 20 + +/* USB4 port registers */ +#define PORT_CS_18 0x12 +#define PORT_CS_18_BE BIT(8) +#define PORT_CS_19 0x13 +#define PORT_CS_19_PC BIT(3) /* Display Port adapter registers */ - -/* DWORD 0 */ -#define TB_DP_VIDEO_HOPID_SHIFT 16 -#define TB_DP_VIDEO_HOPID_MASK GENMASK(26, 16) -#define TB_DP_AUX_EN BIT(30) -#define TB_DP_VIDEO_EN BIT(31) -/* DWORD 1 */ -#define TB_DP_AUX_TX_HOPID_MASK GENMASK(10, 0) -#define TB_DP_AUX_RX_HOPID_SHIFT 11 -#define TB_DP_AUX_RX_HOPID_MASK GENMASK(21, 11) -/* DWORD 2 */ -#define TB_DP_HDP BIT(6) -/* DWORD 3 */ -#define TB_DP_HPDC BIT(9) -/* DWORD 4 */ -#define TB_DP_LOCAL_CAP 0x4 -/* DWORD 5 */ -#define TB_DP_REMOTE_CAP 0x5 +#define ADP_DP_CS_0 0x00 +#define ADP_DP_CS_0_VIDEO_HOPID_MASK GENMASK(26, 16) +#define ADP_DP_CS_0_VIDEO_HOPID_SHIFT 16 +#define ADP_DP_CS_0_AE BIT(30) +#define ADP_DP_CS_0_VE BIT(31) +#define ADP_DP_CS_1_AUX_TX_HOPID_MASK GENMASK(10, 0) +#define ADP_DP_CS_1_AUX_RX_HOPID_MASK GENMASK(21, 11) +#define ADP_DP_CS_1_AUX_RX_HOPID_SHIFT 11 +#define ADP_DP_CS_2 0x02 +#define ADP_DP_CS_2_HDP BIT(6) +#define ADP_DP_CS_3 0x03 +#define ADP_DP_CS_3_HDPC BIT(9) +#define DP_LOCAL_CAP 0x04 +#define DP_REMOTE_CAP 0x05 +#define DP_STATUS_CTRL 0x06 +#define DP_STATUS_CTRL_CMHS BIT(25) +#define DP_STATUS_CTRL_UF BIT(26) +#define DP_COMMON_CAP 0x07 +/* + * DP_COMMON_CAP offsets work also for DP_LOCAL_CAP and DP_REMOTE_CAP + * with exception of DPRX done. + */ +#define DP_COMMON_CAP_RATE_MASK GENMASK(11, 8) +#define DP_COMMON_CAP_RATE_SHIFT 8 +#define DP_COMMON_CAP_RATE_RBR 0x0 +#define DP_COMMON_CAP_RATE_HBR 0x1 +#define DP_COMMON_CAP_RATE_HBR2 0x2 +#define DP_COMMON_CAP_RATE_HBR3 0x3 +#define DP_COMMON_CAP_LANES_MASK GENMASK(14, 12) +#define DP_COMMON_CAP_LANES_SHIFT 12 +#define DP_COMMON_CAP_1_LANE 0x0 +#define DP_COMMON_CAP_2_LANES 0x1 +#define DP_COMMON_CAP_4_LANES 0x2 +#define DP_COMMON_CAP_DPRX_DONE BIT(31) /* PCIe adapter registers */ +#define ADP_PCIE_CS_0 0x00 +#define ADP_PCIE_CS_0_PE BIT(31) -#define TB_PCI_EN BIT(31) +/* USB adapter registers */ +#define ADP_USB3_CS_0 0x00 +#define ADP_USB3_CS_0_V BIT(30) +#define ADP_USB3_CS_0_PE BIT(31) /* Hop register from TB_CFG_HOPS. 8 byte per entry. */ struct tb_regs_hop { @@ -278,8 +373,17 @@ struct tb_regs_hop { #define TB_LC_DESC_PORT_SIZE_SHIFT 16 #define TB_LC_DESC_PORT_SIZE_MASK GENMASK(27, 16) #define TB_LC_FUSE 0x03 +#define TB_LC_SNK_ALLOCATION 0x10 +#define TB_LC_SNK_ALLOCATION_SNK0_MASK GENMASK(3, 0) +#define TB_LC_SNK_ALLOCATION_SNK0_CM 0x1 +#define TB_LC_SNK_ALLOCATION_SNK1_SHIFT 4 +#define TB_LC_SNK_ALLOCATION_SNK1_MASK GENMASK(7, 4) +#define TB_LC_SNK_ALLOCATION_SNK1_CM 0x1 /* Link controller registers */ +#define TB_LC_PORT_ATTR 0x8d +#define TB_LC_PORT_ATTR_BE BIT(12) + #define TB_LC_SX_CTRL 0x96 #define TB_LC_SX_CTRL_L1C BIT(16) #define TB_LC_SX_CTRL_L2C BIT(20) diff --git a/drivers/thunderbolt/tmu.c b/drivers/thunderbolt/tmu.c new file mode 100644 index 000000000000..039c42a06000 --- /dev/null +++ b/drivers/thunderbolt/tmu.c @@ -0,0 +1,383 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Thunderbolt Time Management Unit (TMU) support + * + * Copyright (C) 2019, Intel Corporation + * Authors: Mika Westerberg <mika.westerberg@linux.intel.com> + * Rajmohan Mani <rajmohan.mani@intel.com> + */ + +#include <linux/delay.h> + +#include "tb.h" + +static const char *tb_switch_tmu_mode_name(const struct tb_switch *sw) +{ + bool root_switch = !tb_route(sw); + + switch (sw->tmu.rate) { + case TB_SWITCH_TMU_RATE_OFF: + return "off"; + + case TB_SWITCH_TMU_RATE_HIFI: + /* Root switch does not have upstream directionality */ + if (root_switch) + return "HiFi"; + if (sw->tmu.unidirectional) + return "uni-directional, HiFi"; + return "bi-directional, HiFi"; + + case TB_SWITCH_TMU_RATE_NORMAL: + if (root_switch) + return "normal"; + return "uni-directional, normal"; + + default: + return "unknown"; + } +} + +static bool tb_switch_tmu_ucap_supported(struct tb_switch *sw) +{ + int ret; + u32 val; + + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, + sw->tmu.cap + TMU_RTR_CS_0, 1); + if (ret) + return false; + + return !!(val & TMU_RTR_CS_0_UCAP); +} + +static int tb_switch_tmu_rate_read(struct tb_switch *sw) +{ + int ret; + u32 val; + + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, + sw->tmu.cap + TMU_RTR_CS_3, 1); + if (ret) + return ret; + + val >>= TMU_RTR_CS_3_TS_PACKET_INTERVAL_SHIFT; + return val; +} + +static int tb_switch_tmu_rate_write(struct tb_switch *sw, int rate) +{ + int ret; + u32 val; + + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, + sw->tmu.cap + TMU_RTR_CS_3, 1); + if (ret) + return ret; + + val &= ~TMU_RTR_CS_3_TS_PACKET_INTERVAL_MASK; + val |= rate << TMU_RTR_CS_3_TS_PACKET_INTERVAL_SHIFT; + + return tb_sw_write(sw, &val, TB_CFG_SWITCH, + sw->tmu.cap + TMU_RTR_CS_3, 1); +} + +static int tb_port_tmu_write(struct tb_port *port, u8 offset, u32 mask, + u32 value) +{ + u32 data; + int ret; + + ret = tb_port_read(port, &data, TB_CFG_PORT, port->cap_tmu + offset, 1); + if (ret) + return ret; + + data &= ~mask; + data |= value; + + return tb_port_write(port, &data, TB_CFG_PORT, + port->cap_tmu + offset, 1); +} + +static int tb_port_tmu_set_unidirectional(struct tb_port *port, + bool unidirectional) +{ + u32 val; + + if (!port->sw->tmu.has_ucap) + return 0; + + val = unidirectional ? TMU_ADP_CS_3_UDM : 0; + return tb_port_tmu_write(port, TMU_ADP_CS_3, TMU_ADP_CS_3_UDM, val); +} + +static inline int tb_port_tmu_unidirectional_disable(struct tb_port *port) +{ + return tb_port_tmu_set_unidirectional(port, false); +} + +static bool tb_port_tmu_is_unidirectional(struct tb_port *port) +{ + int ret; + u32 val; + + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_tmu + TMU_ADP_CS_3, 1); + if (ret) + return false; + + return val & TMU_ADP_CS_3_UDM; +} + +static int tb_switch_tmu_set_time_disruption(struct tb_switch *sw, bool set) +{ + int ret; + u32 val; + + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, + sw->tmu.cap + TMU_RTR_CS_0, 1); + if (ret) + return ret; + + if (set) + val |= TMU_RTR_CS_0_TD; + else + val &= ~TMU_RTR_CS_0_TD; + + return tb_sw_write(sw, &val, TB_CFG_SWITCH, + sw->tmu.cap + TMU_RTR_CS_0, 1); +} + +/** + * tb_switch_tmu_init() - Initialize switch TMU structures + * @sw: Switch to initialized + * + * This function must be called before other TMU related functions to + * makes the internal structures are filled in correctly. Does not + * change any hardware configuration. + */ +int tb_switch_tmu_init(struct tb_switch *sw) +{ + struct tb_port *port; + int ret; + + if (tb_switch_is_icm(sw)) + return 0; + + ret = tb_switch_find_cap(sw, TB_SWITCH_CAP_TMU); + if (ret > 0) + sw->tmu.cap = ret; + + tb_switch_for_each_port(sw, port) { + int cap; + + cap = tb_port_find_cap(port, TB_PORT_CAP_TIME1); + if (cap > 0) + port->cap_tmu = cap; + } + + ret = tb_switch_tmu_rate_read(sw); + if (ret < 0) + return ret; + + sw->tmu.rate = ret; + + sw->tmu.has_ucap = tb_switch_tmu_ucap_supported(sw); + if (sw->tmu.has_ucap) { + tb_sw_dbg(sw, "TMU: supports uni-directional mode\n"); + + if (tb_route(sw)) { + struct tb_port *up = tb_upstream_port(sw); + + sw->tmu.unidirectional = + tb_port_tmu_is_unidirectional(up); + } + } else { + sw->tmu.unidirectional = false; + } + + tb_sw_dbg(sw, "TMU: current mode: %s\n", tb_switch_tmu_mode_name(sw)); + return 0; +} + +/** + * tb_switch_tmu_post_time() - Update switch local time + * @sw: Switch whose time to update + * + * Updates switch local time using time posting procedure. + */ +int tb_switch_tmu_post_time(struct tb_switch *sw) +{ + unsigned int post_local_time_offset, post_time_offset; + struct tb_switch *root_switch = sw->tb->root_switch; + u64 hi, mid, lo, local_time, post_time; + int i, ret, retries = 100; + u32 gm_local_time[3]; + + if (!tb_route(sw)) + return 0; + + if (!tb_switch_is_usb4(sw)) + return 0; + + /* Need to be able to read the grand master time */ + if (!root_switch->tmu.cap) + return 0; + + ret = tb_sw_read(root_switch, gm_local_time, TB_CFG_SWITCH, + root_switch->tmu.cap + TMU_RTR_CS_1, + ARRAY_SIZE(gm_local_time)); + if (ret) + return ret; + + for (i = 0; i < ARRAY_SIZE(gm_local_time); i++) + tb_sw_dbg(root_switch, "local_time[%d]=0x%08x\n", i, + gm_local_time[i]); + + /* Convert to nanoseconds (drop fractional part) */ + hi = gm_local_time[2] & TMU_RTR_CS_3_LOCAL_TIME_NS_MASK; + mid = gm_local_time[1]; + lo = (gm_local_time[0] & TMU_RTR_CS_1_LOCAL_TIME_NS_MASK) >> + TMU_RTR_CS_1_LOCAL_TIME_NS_SHIFT; + local_time = hi << 48 | mid << 16 | lo; + + /* Tell the switch that time sync is disrupted for a while */ + ret = tb_switch_tmu_set_time_disruption(sw, true); + if (ret) + return ret; + + post_local_time_offset = sw->tmu.cap + TMU_RTR_CS_22; + post_time_offset = sw->tmu.cap + TMU_RTR_CS_24; + + /* + * Write the Grandmaster time to the Post Local Time registers + * of the new switch. + */ + ret = tb_sw_write(sw, &local_time, TB_CFG_SWITCH, + post_local_time_offset, 2); + if (ret) + goto out; + + /* + * Have the new switch update its local time (by writing 1 to + * the post_time registers) and wait for the completion of the + * same (post_time register becomes 0). This means the time has + * been converged properly. + */ + post_time = 1; + + ret = tb_sw_write(sw, &post_time, TB_CFG_SWITCH, post_time_offset, 2); + if (ret) + goto out; + + do { + usleep_range(5, 10); + ret = tb_sw_read(sw, &post_time, TB_CFG_SWITCH, + post_time_offset, 2); + if (ret) + goto out; + } while (--retries && post_time); + + if (!retries) { + ret = -ETIMEDOUT; + goto out; + } + + tb_sw_dbg(sw, "TMU: updated local time to %#llx\n", local_time); + +out: + tb_switch_tmu_set_time_disruption(sw, false); + return ret; +} + +/** + * tb_switch_tmu_disable() - Disable TMU of a switch + * @sw: Switch whose TMU to disable + * + * Turns off TMU of @sw if it is enabled. If not enabled does nothing. + */ +int tb_switch_tmu_disable(struct tb_switch *sw) +{ + int ret; + + if (!tb_switch_is_usb4(sw)) + return 0; + + /* Already disabled? */ + if (sw->tmu.rate == TB_SWITCH_TMU_RATE_OFF) + return 0; + + if (sw->tmu.unidirectional) { + struct tb_switch *parent = tb_switch_parent(sw); + struct tb_port *up, *down; + + up = tb_upstream_port(sw); + down = tb_port_at(tb_route(sw), parent); + + /* The switch may be unplugged so ignore any errors */ + tb_port_tmu_unidirectional_disable(up); + ret = tb_port_tmu_unidirectional_disable(down); + if (ret) + return ret; + } + + tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_OFF); + + sw->tmu.unidirectional = false; + sw->tmu.rate = TB_SWITCH_TMU_RATE_OFF; + + tb_sw_dbg(sw, "TMU: disabled\n"); + return 0; +} + +/** + * tb_switch_tmu_enable() - Enable TMU on a switch + * @sw: Switch whose TMU to enable + * + * Enables TMU of a switch to be in bi-directional, HiFi mode. In this mode + * all tunneling should work. + */ +int tb_switch_tmu_enable(struct tb_switch *sw) +{ + int ret; + + if (!tb_switch_is_usb4(sw)) + return 0; + + if (tb_switch_tmu_is_enabled(sw)) + return 0; + + ret = tb_switch_tmu_set_time_disruption(sw, true); + if (ret) + return ret; + + /* Change mode to bi-directional */ + if (tb_route(sw) && sw->tmu.unidirectional) { + struct tb_switch *parent = tb_switch_parent(sw); + struct tb_port *up, *down; + + up = tb_upstream_port(sw); + down = tb_port_at(tb_route(sw), parent); + + ret = tb_port_tmu_unidirectional_disable(down); + if (ret) + return ret; + + ret = tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_HIFI); + if (ret) + return ret; + + ret = tb_port_tmu_unidirectional_disable(up); + if (ret) + return ret; + } else { + ret = tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_HIFI); + if (ret) + return ret; + } + + sw->tmu.unidirectional = false; + sw->tmu.rate = TB_SWITCH_TMU_RATE_HIFI; + tb_sw_dbg(sw, "TMU: mode set to: %s\n", tb_switch_tmu_mode_name(sw)); + + return tb_switch_tmu_set_time_disruption(sw, false); +} diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c index 31d0234837e4..dbe90bcf4ad4 100644 --- a/drivers/thunderbolt/tunnel.c +++ b/drivers/thunderbolt/tunnel.c @@ -6,6 +6,7 @@ * Copyright (C) 2019, Intel Corporation */ +#include <linux/delay.h> #include <linux/slab.h> #include <linux/list.h> @@ -18,6 +19,12 @@ #define TB_PCI_PATH_DOWN 0 #define TB_PCI_PATH_UP 1 +/* USB3 adapters use always HopID of 8 for both directions */ +#define TB_USB3_HOPID 8 + +#define TB_USB3_PATH_DOWN 0 +#define TB_USB3_PATH_UP 1 + /* DP adapters use HopID 8 for AUX and 9 for Video */ #define TB_DP_AUX_TX_HOPID 8 #define TB_DP_AUX_RX_HOPID 8 @@ -30,7 +37,7 @@ #define TB_DMA_PATH_OUT 0 #define TB_DMA_PATH_IN 1 -static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA" }; +static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" }; #define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...) \ do { \ @@ -90,6 +97,22 @@ static int tb_pci_activate(struct tb_tunnel *tunnel, bool activate) return 0; } +static int tb_initial_credits(const struct tb_switch *sw) +{ + /* If the path is complete sw is not NULL */ + if (sw) { + /* More credits for faster link */ + switch (sw->link_speed * sw->link_width) { + case 40: + return 32; + case 20: + return 24; + } + } + + return 16; +} + static void tb_pci_init_path(struct tb_path *path) { path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL; @@ -101,7 +124,8 @@ static void tb_pci_init_path(struct tb_path *path) path->drop_packages = 0; path->nfc_credits = 0; path->hops[0].initial_credits = 7; - path->hops[1].initial_credits = 16; + path->hops[1].initial_credits = + tb_initial_credits(path->hops[1].in_port->sw); } /** @@ -211,7 +235,7 @@ struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up, return NULL; } tb_pci_init_path(path); - tunnel->paths[TB_PCI_PATH_UP] = path; + tunnel->paths[TB_PCI_PATH_DOWN] = path; path = tb_path_alloc(tb, up, TB_PCI_HOPID, down, TB_PCI_HOPID, 0, "PCIe Up"); @@ -220,16 +244,184 @@ struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up, return NULL; } tb_pci_init_path(path); - tunnel->paths[TB_PCI_PATH_DOWN] = path; + tunnel->paths[TB_PCI_PATH_UP] = path; return tunnel; } +static bool tb_dp_is_usb4(const struct tb_switch *sw) +{ + /* Titan Ridge DP adapters need the same treatment as USB4 */ + return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw); +} + +static int tb_dp_cm_handshake(struct tb_port *in, struct tb_port *out) +{ + int timeout = 10; + u32 val; + int ret; + + /* Both ends need to support this */ + if (!tb_dp_is_usb4(in->sw) || !tb_dp_is_usb4(out->sw)) + return 0; + + ret = tb_port_read(out, &val, TB_CFG_PORT, + out->cap_adap + DP_STATUS_CTRL, 1); + if (ret) + return ret; + + val |= DP_STATUS_CTRL_UF | DP_STATUS_CTRL_CMHS; + + ret = tb_port_write(out, &val, TB_CFG_PORT, + out->cap_adap + DP_STATUS_CTRL, 1); + if (ret) + return ret; + + do { + ret = tb_port_read(out, &val, TB_CFG_PORT, + out->cap_adap + DP_STATUS_CTRL, 1); + if (ret) + return ret; + if (!(val & DP_STATUS_CTRL_CMHS)) + return 0; + usleep_range(10, 100); + } while (timeout--); + + return -ETIMEDOUT; +} + +static inline u32 tb_dp_cap_get_rate(u32 val) +{ + u32 rate = (val & DP_COMMON_CAP_RATE_MASK) >> DP_COMMON_CAP_RATE_SHIFT; + + switch (rate) { + case DP_COMMON_CAP_RATE_RBR: + return 1620; + case DP_COMMON_CAP_RATE_HBR: + return 2700; + case DP_COMMON_CAP_RATE_HBR2: + return 5400; + case DP_COMMON_CAP_RATE_HBR3: + return 8100; + default: + return 0; + } +} + +static inline u32 tb_dp_cap_set_rate(u32 val, u32 rate) +{ + val &= ~DP_COMMON_CAP_RATE_MASK; + switch (rate) { + default: + WARN(1, "invalid rate %u passed, defaulting to 1620 MB/s\n", rate); + /* Fallthrough */ + case 1620: + val |= DP_COMMON_CAP_RATE_RBR << DP_COMMON_CAP_RATE_SHIFT; + break; + case 2700: + val |= DP_COMMON_CAP_RATE_HBR << DP_COMMON_CAP_RATE_SHIFT; + break; + case 5400: + val |= DP_COMMON_CAP_RATE_HBR2 << DP_COMMON_CAP_RATE_SHIFT; + break; + case 8100: + val |= DP_COMMON_CAP_RATE_HBR3 << DP_COMMON_CAP_RATE_SHIFT; + break; + } + return val; +} + +static inline u32 tb_dp_cap_get_lanes(u32 val) +{ + u32 lanes = (val & DP_COMMON_CAP_LANES_MASK) >> DP_COMMON_CAP_LANES_SHIFT; + + switch (lanes) { + case DP_COMMON_CAP_1_LANE: + return 1; + case DP_COMMON_CAP_2_LANES: + return 2; + case DP_COMMON_CAP_4_LANES: + return 4; + default: + return 0; + } +} + +static inline u32 tb_dp_cap_set_lanes(u32 val, u32 lanes) +{ + val &= ~DP_COMMON_CAP_LANES_MASK; + switch (lanes) { + default: + WARN(1, "invalid number of lanes %u passed, defaulting to 1\n", + lanes); + /* Fallthrough */ + case 1: + val |= DP_COMMON_CAP_1_LANE << DP_COMMON_CAP_LANES_SHIFT; + break; + case 2: + val |= DP_COMMON_CAP_2_LANES << DP_COMMON_CAP_LANES_SHIFT; + break; + case 4: + val |= DP_COMMON_CAP_4_LANES << DP_COMMON_CAP_LANES_SHIFT; + break; + } + return val; +} + +static unsigned int tb_dp_bandwidth(unsigned int rate, unsigned int lanes) +{ + /* Tunneling removes the DP 8b/10b encoding */ + return rate * lanes * 8 / 10; +} + +static int tb_dp_reduce_bandwidth(int max_bw, u32 in_rate, u32 in_lanes, + u32 out_rate, u32 out_lanes, u32 *new_rate, + u32 *new_lanes) +{ + static const u32 dp_bw[][2] = { + /* Mb/s, lanes */ + { 8100, 4 }, /* 25920 Mb/s */ + { 5400, 4 }, /* 17280 Mb/s */ + { 8100, 2 }, /* 12960 Mb/s */ + { 2700, 4 }, /* 8640 Mb/s */ + { 5400, 2 }, /* 8640 Mb/s */ + { 8100, 1 }, /* 6480 Mb/s */ + { 1620, 4 }, /* 5184 Mb/s */ + { 5400, 1 }, /* 4320 Mb/s */ + { 2700, 2 }, /* 4320 Mb/s */ + { 1620, 2 }, /* 2592 Mb/s */ + { 2700, 1 }, /* 2160 Mb/s */ + { 1620, 1 }, /* 1296 Mb/s */ + }; + unsigned int i; + + /* + * Find a combination that can fit into max_bw and does not + * exceed the maximum rate and lanes supported by the DP OUT and + * DP IN adapters. + */ + for (i = 0; i < ARRAY_SIZE(dp_bw); i++) { + if (dp_bw[i][0] > out_rate || dp_bw[i][1] > out_lanes) + continue; + + if (dp_bw[i][0] > in_rate || dp_bw[i][1] > in_lanes) + continue; + + if (tb_dp_bandwidth(dp_bw[i][0], dp_bw[i][1]) <= max_bw) { + *new_rate = dp_bw[i][0]; + *new_lanes = dp_bw[i][1]; + return 0; + } + } + + return -ENOSR; +} + static int tb_dp_xchg_caps(struct tb_tunnel *tunnel) { + u32 out_dp_cap, out_rate, out_lanes, in_dp_cap, in_rate, in_lanes, bw; struct tb_port *out = tunnel->dst_port; struct tb_port *in = tunnel->src_port; - u32 in_dp_cap, out_dp_cap; int ret; /* @@ -239,25 +431,71 @@ static int tb_dp_xchg_caps(struct tb_tunnel *tunnel) if (in->sw->generation < 2 || out->sw->generation < 2) return 0; + /* + * Perform connection manager handshake between IN and OUT ports + * before capabilities exchange can take place. + */ + ret = tb_dp_cm_handshake(in, out); + if (ret) + return ret; + /* Read both DP_LOCAL_CAP registers */ ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT, - in->cap_adap + TB_DP_LOCAL_CAP, 1); + in->cap_adap + DP_LOCAL_CAP, 1); if (ret) return ret; ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT, - out->cap_adap + TB_DP_LOCAL_CAP, 1); + out->cap_adap + DP_LOCAL_CAP, 1); if (ret) return ret; /* Write IN local caps to OUT remote caps */ ret = tb_port_write(out, &in_dp_cap, TB_CFG_PORT, - out->cap_adap + TB_DP_REMOTE_CAP, 1); + out->cap_adap + DP_REMOTE_CAP, 1); if (ret) return ret; + in_rate = tb_dp_cap_get_rate(in_dp_cap); + in_lanes = tb_dp_cap_get_lanes(in_dp_cap); + tb_port_dbg(in, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n", + in_rate, in_lanes, tb_dp_bandwidth(in_rate, in_lanes)); + + /* + * If the tunnel bandwidth is limited (max_bw is set) then see + * if we need to reduce bandwidth to fit there. + */ + out_rate = tb_dp_cap_get_rate(out_dp_cap); + out_lanes = tb_dp_cap_get_lanes(out_dp_cap); + bw = tb_dp_bandwidth(out_rate, out_lanes); + tb_port_dbg(out, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n", + out_rate, out_lanes, bw); + + if (tunnel->max_bw && bw > tunnel->max_bw) { + u32 new_rate, new_lanes, new_bw; + + ret = tb_dp_reduce_bandwidth(tunnel->max_bw, in_rate, in_lanes, + out_rate, out_lanes, &new_rate, + &new_lanes); + if (ret) { + tb_port_info(out, "not enough bandwidth for DP tunnel\n"); + return ret; + } + + new_bw = tb_dp_bandwidth(new_rate, new_lanes); + tb_port_dbg(out, "bandwidth reduced to %u Mb/s x%u = %u Mb/s\n", + new_rate, new_lanes, new_bw); + + /* + * Set new rate and number of lanes before writing it to + * the IN port remote caps. + */ + out_dp_cap = tb_dp_cap_set_rate(out_dp_cap, new_rate); + out_dp_cap = tb_dp_cap_set_lanes(out_dp_cap, new_lanes); + } + return tb_port_write(in, &out_dp_cap, TB_CFG_PORT, - in->cap_adap + TB_DP_REMOTE_CAP, 1); + in->cap_adap + DP_REMOTE_CAP, 1); } static int tb_dp_activate(struct tb_tunnel *tunnel, bool active) @@ -297,6 +535,56 @@ static int tb_dp_activate(struct tb_tunnel *tunnel, bool active) return 0; } +static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel) +{ + struct tb_port *in = tunnel->src_port; + const struct tb_switch *sw = in->sw; + u32 val, rate = 0, lanes = 0; + int ret; + + if (tb_dp_is_usb4(sw)) { + int timeout = 10; + + /* + * Wait for DPRX done. Normally it should be already set + * for active tunnel. + */ + do { + ret = tb_port_read(in, &val, TB_CFG_PORT, + in->cap_adap + DP_COMMON_CAP, 1); + if (ret) + return ret; + + if (val & DP_COMMON_CAP_DPRX_DONE) { + rate = tb_dp_cap_get_rate(val); + lanes = tb_dp_cap_get_lanes(val); + break; + } + msleep(250); + } while (timeout--); + + if (!timeout) + return -ETIMEDOUT; + } else if (sw->generation >= 2) { + /* + * Read from the copied remote cap so that we take into + * account if capabilities were reduced during exchange. + */ + ret = tb_port_read(in, &val, TB_CFG_PORT, + in->cap_adap + DP_REMOTE_CAP, 1); + if (ret) + return ret; + + rate = tb_dp_cap_get_rate(val); + lanes = tb_dp_cap_get_lanes(val); + } else { + /* No bandwidth management for legacy devices */ + return 0; + } + + return tb_dp_bandwidth(rate, lanes); +} + static void tb_dp_init_aux_path(struct tb_path *path) { int i; @@ -324,12 +612,12 @@ static void tb_dp_init_video_path(struct tb_path *path, bool discover) path->weight = 1; if (discover) { - path->nfc_credits = nfc_credits & TB_PORT_NFC_CREDITS_MASK; + path->nfc_credits = nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK; } else { u32 max_credits; - max_credits = (nfc_credits & TB_PORT_MAX_CREDITS_MASK) >> - TB_PORT_MAX_CREDITS_SHIFT; + max_credits = (nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >> + ADP_CS_4_TOTAL_BUFFERS_SHIFT; /* Leave some credits for AUX path */ path->nfc_credits = min(max_credits - 2, 12U); } @@ -361,6 +649,7 @@ struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in) tunnel->init = tb_dp_xchg_caps; tunnel->activate = tb_dp_activate; + tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth; tunnel->src_port = in; path = tb_path_discover(in, TB_DP_VIDEO_HOPID, NULL, -1, @@ -419,6 +708,7 @@ err_free: * @tb: Pointer to the domain structure * @in: DP in adapter port * @out: DP out adapter port + * @max_bw: Maximum available bandwidth for the DP tunnel (%0 if not limited) * * Allocates a tunnel between @in and @out that is capable of tunneling * Display Port traffic. @@ -426,7 +716,7 @@ err_free: * Return: Returns a tb_tunnel on success or NULL on failure. */ struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in, - struct tb_port *out) + struct tb_port *out, int max_bw) { struct tb_tunnel *tunnel; struct tb_path **paths; @@ -441,8 +731,10 @@ struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in, tunnel->init = tb_dp_xchg_caps; tunnel->activate = tb_dp_activate; + tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth; tunnel->src_port = in; tunnel->dst_port = out; + tunnel->max_bw = max_bw; paths = tunnel->paths; @@ -478,8 +770,8 @@ static u32 tb_dma_credits(struct tb_port *nhi) { u32 max_credits; - max_credits = (nhi->config.nfc_credits & TB_PORT_MAX_CREDITS_MASK) >> - TB_PORT_MAX_CREDITS_SHIFT; + max_credits = (nhi->config.nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >> + ADP_CS_4_TOTAL_BUFFERS_SHIFT; return min(max_credits, 13U); } @@ -562,6 +854,156 @@ struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi, return tunnel; } +static int tb_usb3_activate(struct tb_tunnel *tunnel, bool activate) +{ + int res; + + res = tb_usb3_port_enable(tunnel->src_port, activate); + if (res) + return res; + + if (tb_port_is_usb3_up(tunnel->dst_port)) + return tb_usb3_port_enable(tunnel->dst_port, activate); + + return 0; +} + +static void tb_usb3_init_path(struct tb_path *path) +{ + path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL; + path->egress_shared_buffer = TB_PATH_NONE; + path->ingress_fc_enable = TB_PATH_ALL; + path->ingress_shared_buffer = TB_PATH_NONE; + path->priority = 3; + path->weight = 3; + path->drop_packages = 0; + path->nfc_credits = 0; + path->hops[0].initial_credits = 7; + path->hops[1].initial_credits = + tb_initial_credits(path->hops[1].in_port->sw); +} + +/** + * tb_tunnel_discover_usb3() - Discover existing USB3 tunnels + * @tb: Pointer to the domain structure + * @down: USB3 downstream adapter + * + * If @down adapter is active, follows the tunnel to the USB3 upstream + * adapter and back. Returns the discovered tunnel or %NULL if there was + * no tunnel. + */ +struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down) +{ + struct tb_tunnel *tunnel; + struct tb_path *path; + + if (!tb_usb3_port_is_enabled(down)) + return NULL; + + tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3); + if (!tunnel) + return NULL; + + tunnel->activate = tb_usb3_activate; + tunnel->src_port = down; + + /* + * Discover both paths even if they are not complete. We will + * clean them up by calling tb_tunnel_deactivate() below in that + * case. + */ + path = tb_path_discover(down, TB_USB3_HOPID, NULL, -1, + &tunnel->dst_port, "USB3 Up"); + if (!path) { + /* Just disable the downstream port */ + tb_usb3_port_enable(down, false); + goto err_free; + } + tunnel->paths[TB_USB3_PATH_UP] = path; + tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_UP]); + + path = tb_path_discover(tunnel->dst_port, -1, down, TB_USB3_HOPID, NULL, + "USB3 Down"); + if (!path) + goto err_deactivate; + tunnel->paths[TB_USB3_PATH_DOWN] = path; + tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_DOWN]); + + /* Validate that the tunnel is complete */ + if (!tb_port_is_usb3_up(tunnel->dst_port)) { + tb_port_warn(tunnel->dst_port, + "path does not end on an USB3 adapter, cleaning up\n"); + goto err_deactivate; + } + + if (down != tunnel->src_port) { + tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n"); + goto err_deactivate; + } + + if (!tb_usb3_port_is_enabled(tunnel->dst_port)) { + tb_tunnel_warn(tunnel, + "tunnel is not fully activated, cleaning up\n"); + goto err_deactivate; + } + + tb_tunnel_dbg(tunnel, "discovered\n"); + return tunnel; + +err_deactivate: + tb_tunnel_deactivate(tunnel); +err_free: + tb_tunnel_free(tunnel); + + return NULL; +} + +/** + * tb_tunnel_alloc_usb3() - allocate a USB3 tunnel + * @tb: Pointer to the domain structure + * @up: USB3 upstream adapter port + * @down: USB3 downstream adapter port + * + * Allocate an USB3 tunnel. The ports must be of type @TB_TYPE_USB3_UP and + * @TB_TYPE_USB3_DOWN. + * + * Return: Returns a tb_tunnel on success or %NULL on failure. + */ +struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up, + struct tb_port *down) +{ + struct tb_tunnel *tunnel; + struct tb_path *path; + + tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3); + if (!tunnel) + return NULL; + + tunnel->activate = tb_usb3_activate; + tunnel->src_port = down; + tunnel->dst_port = up; + + path = tb_path_alloc(tb, down, TB_USB3_HOPID, up, TB_USB3_HOPID, 0, + "USB3 Down"); + if (!path) { + tb_tunnel_free(tunnel); + return NULL; + } + tb_usb3_init_path(path); + tunnel->paths[TB_USB3_PATH_DOWN] = path; + + path = tb_path_alloc(tb, up, TB_USB3_HOPID, down, TB_USB3_HOPID, 0, + "USB3 Up"); + if (!path) { + tb_tunnel_free(tunnel); + return NULL; + } + tb_usb3_init_path(path); + tunnel->paths[TB_USB3_PATH_UP] = path; + + return tunnel; +} + /** * tb_tunnel_free() - free a tunnel * @tunnel: Tunnel to be freed @@ -689,3 +1131,62 @@ void tb_tunnel_deactivate(struct tb_tunnel *tunnel) tb_path_deactivate(tunnel->paths[i]); } } + +/** + * tb_tunnel_switch_on_path() - Does the tunnel go through switch + * @tunnel: Tunnel to check + * @sw: Switch to check + * + * Returns true if @tunnel goes through @sw (direction does not matter), + * false otherwise. + */ +bool tb_tunnel_switch_on_path(const struct tb_tunnel *tunnel, + const struct tb_switch *sw) +{ + int i; + + for (i = 0; i < tunnel->npaths; i++) { + if (!tunnel->paths[i]) + continue; + if (tb_path_switch_on_path(tunnel->paths[i], sw)) + return true; + } + + return false; +} + +static bool tb_tunnel_is_active(const struct tb_tunnel *tunnel) +{ + int i; + + for (i = 0; i < tunnel->npaths; i++) { + if (!tunnel->paths[i]) + return false; + if (!tunnel->paths[i]->activated) + return false; + } + + return true; +} + +/** + * tb_tunnel_consumed_bandwidth() - Return bandwidth consumed by the tunnel + * @tunnel: Tunnel to check + * + * Returns bandwidth currently consumed by @tunnel and %0 if the @tunnel + * is not active or does consume bandwidth. + */ +int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel) +{ + if (!tb_tunnel_is_active(tunnel)) + return 0; + + if (tunnel->consumed_bandwidth) { + int ret = tunnel->consumed_bandwidth(tunnel); + + tb_tunnel_dbg(tunnel, "consumed bandwidth %d Mb/s\n", ret); + return ret; + } + + return 0; +} diff --git a/drivers/thunderbolt/tunnel.h b/drivers/thunderbolt/tunnel.h index c68bbcd3a62c..3f5ba93225e7 100644 --- a/drivers/thunderbolt/tunnel.h +++ b/drivers/thunderbolt/tunnel.h @@ -15,6 +15,7 @@ enum tb_tunnel_type { TB_TUNNEL_PCI, TB_TUNNEL_DP, TB_TUNNEL_DMA, + TB_TUNNEL_USB3, }; /** @@ -27,8 +28,11 @@ enum tb_tunnel_type { * @npaths: Number of paths in @paths * @init: Optional tunnel specific initialization * @activate: Optional tunnel specific activation/deactivation + * @consumed_bandwidth: Return how much bandwidth the tunnel consumes * @list: Tunnels are linked using this field * @type: Type of the tunnel + * @max_bw: Maximum bandwidth (Mb/s) available for the tunnel (only for DP). + * Only set if the bandwidth needs to be limited. */ struct tb_tunnel { struct tb *tb; @@ -38,8 +42,10 @@ struct tb_tunnel { size_t npaths; int (*init)(struct tb_tunnel *tunnel); int (*activate)(struct tb_tunnel *tunnel, bool activate); + int (*consumed_bandwidth)(struct tb_tunnel *tunnel); struct list_head list; enum tb_tunnel_type type; + unsigned int max_bw; }; struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down); @@ -47,17 +53,23 @@ struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up, struct tb_port *down); struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in); struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in, - struct tb_port *out); + struct tb_port *out, int max_bw); struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi, struct tb_port *dst, int transmit_ring, int transmit_path, int receive_ring, int receive_path); +struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down); +struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up, + struct tb_port *down); void tb_tunnel_free(struct tb_tunnel *tunnel); int tb_tunnel_activate(struct tb_tunnel *tunnel); int tb_tunnel_restart(struct tb_tunnel *tunnel); void tb_tunnel_deactivate(struct tb_tunnel *tunnel); bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel); +bool tb_tunnel_switch_on_path(const struct tb_tunnel *tunnel, + const struct tb_switch *sw); +int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel); static inline bool tb_tunnel_is_pci(const struct tb_tunnel *tunnel) { @@ -74,5 +86,10 @@ static inline bool tb_tunnel_is_dma(const struct tb_tunnel *tunnel) return tunnel->type == TB_TUNNEL_DMA; } +static inline bool tb_tunnel_is_usb3(const struct tb_tunnel *tunnel) +{ + return tunnel->type == TB_TUNNEL_USB3; +} + #endif diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c new file mode 100644 index 000000000000..b341fc60c4ba --- /dev/null +++ b/drivers/thunderbolt/usb4.c @@ -0,0 +1,764 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * USB4 specific functionality + * + * Copyright (C) 2019, Intel Corporation + * Authors: Mika Westerberg <mika.westerberg@linux.intel.com> + * Rajmohan Mani <rajmohan.mani@intel.com> + */ + +#include <linux/delay.h> +#include <linux/ktime.h> + +#include "tb.h" + +#define USB4_DATA_DWORDS 16 +#define USB4_DATA_RETRIES 3 + +enum usb4_switch_op { + USB4_SWITCH_OP_QUERY_DP_RESOURCE = 0x10, + USB4_SWITCH_OP_ALLOC_DP_RESOURCE = 0x11, + USB4_SWITCH_OP_DEALLOC_DP_RESOURCE = 0x12, + USB4_SWITCH_OP_NVM_WRITE = 0x20, + USB4_SWITCH_OP_NVM_AUTH = 0x21, + USB4_SWITCH_OP_NVM_READ = 0x22, + USB4_SWITCH_OP_NVM_SET_OFFSET = 0x23, + USB4_SWITCH_OP_DROM_READ = 0x24, + USB4_SWITCH_OP_NVM_SECTOR_SIZE = 0x25, +}; + +#define USB4_NVM_READ_OFFSET_MASK GENMASK(23, 2) +#define USB4_NVM_READ_OFFSET_SHIFT 2 +#define USB4_NVM_READ_LENGTH_MASK GENMASK(27, 24) +#define USB4_NVM_READ_LENGTH_SHIFT 24 + +#define USB4_NVM_SET_OFFSET_MASK USB4_NVM_READ_OFFSET_MASK +#define USB4_NVM_SET_OFFSET_SHIFT USB4_NVM_READ_OFFSET_SHIFT + +#define USB4_DROM_ADDRESS_MASK GENMASK(14, 2) +#define USB4_DROM_ADDRESS_SHIFT 2 +#define USB4_DROM_SIZE_MASK GENMASK(19, 15) +#define USB4_DROM_SIZE_SHIFT 15 + +#define USB4_NVM_SECTOR_SIZE_MASK GENMASK(23, 0) + +typedef int (*read_block_fn)(struct tb_switch *, unsigned int, void *, size_t); +typedef int (*write_block_fn)(struct tb_switch *, const void *, size_t); + +static int usb4_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit, + u32 value, int timeout_msec) +{ + ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec); + + do { + u32 val; + int ret; + + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1); + if (ret) + return ret; + + if ((val & bit) == value) + return 0; + + usleep_range(50, 100); + } while (ktime_before(ktime_get(), timeout)); + + return -ETIMEDOUT; +} + +static int usb4_switch_op_read_data(struct tb_switch *sw, void *data, + size_t dwords) +{ + if (dwords > USB4_DATA_DWORDS) + return -EINVAL; + + return tb_sw_read(sw, data, TB_CFG_SWITCH, ROUTER_CS_9, dwords); +} + +static int usb4_switch_op_write_data(struct tb_switch *sw, const void *data, + size_t dwords) +{ + if (dwords > USB4_DATA_DWORDS) + return -EINVAL; + + return tb_sw_write(sw, data, TB_CFG_SWITCH, ROUTER_CS_9, dwords); +} + +static int usb4_switch_op_read_metadata(struct tb_switch *sw, u32 *metadata) +{ + return tb_sw_read(sw, metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1); +} + +static int usb4_switch_op_write_metadata(struct tb_switch *sw, u32 metadata) +{ + return tb_sw_write(sw, &metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1); +} + +static int usb4_switch_do_read_data(struct tb_switch *sw, u16 address, + void *buf, size_t size, read_block_fn read_block) +{ + unsigned int retries = USB4_DATA_RETRIES; + unsigned int offset; + + offset = address & 3; + address = address & ~3; + + do { + size_t nbytes = min_t(size_t, size, USB4_DATA_DWORDS * 4); + unsigned int dwaddress, dwords; + u8 data[USB4_DATA_DWORDS * 4]; + int ret; + + dwaddress = address / 4; + dwords = ALIGN(nbytes, 4) / 4; + + ret = read_block(sw, dwaddress, data, dwords); + if (ret) { + if (ret == -ETIMEDOUT) { + if (retries--) + continue; + ret = -EIO; + } + return ret; + } + + memcpy(buf, data + offset, nbytes); + + size -= nbytes; + address += nbytes; + buf += nbytes; + } while (size > 0); + + return 0; +} + +static int usb4_switch_do_write_data(struct tb_switch *sw, u16 address, + const void *buf, size_t size, write_block_fn write_next_block) +{ + unsigned int retries = USB4_DATA_RETRIES; + unsigned int offset; + + offset = address & 3; + address = address & ~3; + + do { + u32 nbytes = min_t(u32, size, USB4_DATA_DWORDS * 4); + u8 data[USB4_DATA_DWORDS * 4]; + int ret; + + memcpy(data + offset, buf, nbytes); + + ret = write_next_block(sw, data, nbytes / 4); + if (ret) { + if (ret == -ETIMEDOUT) { + if (retries--) + continue; + ret = -EIO; + } + return ret; + } + + size -= nbytes; + address += nbytes; + buf += nbytes; + } while (size > 0); + + return 0; +} + +static int usb4_switch_op(struct tb_switch *sw, u16 opcode, u8 *status) +{ + u32 val; + int ret; + + val = opcode | ROUTER_CS_26_OV; + ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1); + if (ret) + return ret; + + ret = usb4_switch_wait_for_bit(sw, ROUTER_CS_26, ROUTER_CS_26_OV, 0, 500); + if (ret) + return ret; + + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1); + if (val & ROUTER_CS_26_ONS) + return -EOPNOTSUPP; + + *status = (val & ROUTER_CS_26_STATUS_MASK) >> ROUTER_CS_26_STATUS_SHIFT; + return 0; +} + +/** + * usb4_switch_setup() - Additional setup for USB4 device + * @sw: USB4 router to setup + * + * USB4 routers need additional settings in order to enable all the + * tunneling. This function enables USB and PCIe tunneling if it can be + * enabled (e.g the parent switch also supports them). If USB tunneling + * is not available for some reason (like that there is Thunderbolt 3 + * switch upstream) then the internal xHCI controller is enabled + * instead. + */ +int usb4_switch_setup(struct tb_switch *sw) +{ + struct tb_switch *parent; + bool tbt3, xhci; + u32 val = 0; + int ret; + + if (!tb_route(sw)) + return 0; + + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_6, 1); + if (ret) + return ret; + + xhci = val & ROUTER_CS_6_HCI; + tbt3 = !(val & ROUTER_CS_6_TNS); + + tb_sw_dbg(sw, "TBT3 support: %s, xHCI: %s\n", + tbt3 ? "yes" : "no", xhci ? "yes" : "no"); + + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1); + if (ret) + return ret; + + parent = tb_switch_parent(sw); + + if (tb_switch_find_port(parent, TB_TYPE_USB3_DOWN)) { + val |= ROUTER_CS_5_UTO; + xhci = false; + } + + /* Only enable PCIe tunneling if the parent router supports it */ + if (tb_switch_find_port(parent, TB_TYPE_PCIE_DOWN)) { + val |= ROUTER_CS_5_PTO; + /* + * xHCI can be enabled if PCIe tunneling is supported + * and the parent does not have any USB3 dowstream + * adapters (so we cannot do USB 3.x tunneling). + */ + if (xhci) + val |= ROUTER_CS_5_HCO; + } + + /* TBT3 supported by the CM */ + val |= ROUTER_CS_5_C3S; + /* Tunneling configuration is ready now */ + val |= ROUTER_CS_5_CV; + + ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1); + if (ret) + return ret; + + return usb4_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_CR, + ROUTER_CS_6_CR, 50); +} + +/** + * usb4_switch_read_uid() - Read UID from USB4 router + * @sw: USB4 router + * + * Reads 64-bit UID from USB4 router config space. + */ +int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid) +{ + return tb_sw_read(sw, uid, TB_CFG_SWITCH, ROUTER_CS_7, 2); +} + +static int usb4_switch_drom_read_block(struct tb_switch *sw, + unsigned int dwaddress, void *buf, + size_t dwords) +{ + u8 status = 0; + u32 metadata; + int ret; + + metadata = (dwords << USB4_DROM_SIZE_SHIFT) & USB4_DROM_SIZE_MASK; + metadata |= (dwaddress << USB4_DROM_ADDRESS_SHIFT) & + USB4_DROM_ADDRESS_MASK; + + ret = usb4_switch_op_write_metadata(sw, metadata); + if (ret) + return ret; + + ret = usb4_switch_op(sw, USB4_SWITCH_OP_DROM_READ, &status); + if (ret) + return ret; + + if (status) + return -EIO; + + return usb4_switch_op_read_data(sw, buf, dwords); +} + +/** + * usb4_switch_drom_read() - Read arbitrary bytes from USB4 router DROM + * @sw: USB4 router + * + * Uses USB4 router operations to read router DROM. For devices this + * should always work but for hosts it may return %-EOPNOTSUPP in which + * case the host router does not have DROM. + */ +int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf, + size_t size) +{ + return usb4_switch_do_read_data(sw, address, buf, size, + usb4_switch_drom_read_block); +} + +static int usb4_set_port_configured(struct tb_port *port, bool configured) +{ + int ret; + u32 val; + + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_usb4 + PORT_CS_19, 1); + if (ret) + return ret; + + if (configured) + val |= PORT_CS_19_PC; + else + val &= ~PORT_CS_19_PC; + + return tb_port_write(port, &val, TB_CFG_PORT, + port->cap_usb4 + PORT_CS_19, 1); +} + +/** + * usb4_switch_configure_link() - Set upstream USB4 link configured + * @sw: USB4 router + * + * Sets the upstream USB4 link to be configured for power management + * purposes. + */ +int usb4_switch_configure_link(struct tb_switch *sw) +{ + struct tb_port *up; + + if (!tb_route(sw)) + return 0; + + up = tb_upstream_port(sw); + return usb4_set_port_configured(up, true); +} + +/** + * usb4_switch_unconfigure_link() - Un-set upstream USB4 link configuration + * @sw: USB4 router + * + * Reverse of usb4_switch_configure_link(). + */ +void usb4_switch_unconfigure_link(struct tb_switch *sw) +{ + struct tb_port *up; + + if (sw->is_unplugged || !tb_route(sw)) + return; + + up = tb_upstream_port(sw); + usb4_set_port_configured(up, false); +} + +/** + * usb4_switch_lane_bonding_possible() - Are conditions met for lane bonding + * @sw: USB4 router + * + * Checks whether conditions are met so that lane bonding can be + * established with the upstream router. Call only for device routers. + */ +bool usb4_switch_lane_bonding_possible(struct tb_switch *sw) +{ + struct tb_port *up; + int ret; + u32 val; + + up = tb_upstream_port(sw); + ret = tb_port_read(up, &val, TB_CFG_PORT, up->cap_usb4 + PORT_CS_18, 1); + if (ret) + return false; + + return !!(val & PORT_CS_18_BE); +} + +/** + * usb4_switch_set_sleep() - Prepare the router to enter sleep + * @sw: USB4 router + * + * Enables wakes and sets sleep bit for the router. Returns when the + * router sleep ready bit has been asserted. + */ +int usb4_switch_set_sleep(struct tb_switch *sw) +{ + int ret; + u32 val; + + /* Set sleep bit and wait for sleep ready to be asserted */ + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1); + if (ret) + return ret; + + val |= ROUTER_CS_5_SLP; + + ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1); + if (ret) + return ret; + + return usb4_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_SLPR, + ROUTER_CS_6_SLPR, 500); +} + +/** + * usb4_switch_nvm_sector_size() - Return router NVM sector size + * @sw: USB4 router + * + * If the router supports NVM operations this function returns the NVM + * sector size in bytes. If NVM operations are not supported returns + * %-EOPNOTSUPP. + */ +int usb4_switch_nvm_sector_size(struct tb_switch *sw) +{ + u32 metadata; + u8 status; + int ret; + + ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SECTOR_SIZE, &status); + if (ret) + return ret; + + if (status) + return status == 0x2 ? -EOPNOTSUPP : -EIO; + + ret = usb4_switch_op_read_metadata(sw, &metadata); + if (ret) + return ret; + + return metadata & USB4_NVM_SECTOR_SIZE_MASK; +} + +static int usb4_switch_nvm_read_block(struct tb_switch *sw, + unsigned int dwaddress, void *buf, size_t dwords) +{ + u8 status = 0; + u32 metadata; + int ret; + + metadata = (dwords << USB4_NVM_READ_LENGTH_SHIFT) & + USB4_NVM_READ_LENGTH_MASK; + metadata |= (dwaddress << USB4_NVM_READ_OFFSET_SHIFT) & + USB4_NVM_READ_OFFSET_MASK; + + ret = usb4_switch_op_write_metadata(sw, metadata); + if (ret) + return ret; + + ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_READ, &status); + if (ret) + return ret; + + if (status) + return -EIO; + + return usb4_switch_op_read_data(sw, buf, dwords); +} + +/** + * usb4_switch_nvm_read() - Read arbitrary bytes from router NVM + * @sw: USB4 router + * @address: Starting address in bytes + * @buf: Read data is placed here + * @size: How many bytes to read + * + * Reads NVM contents of the router. If NVM is not supported returns + * %-EOPNOTSUPP. + */ +int usb4_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf, + size_t size) +{ + return usb4_switch_do_read_data(sw, address, buf, size, + usb4_switch_nvm_read_block); +} + +static int usb4_switch_nvm_set_offset(struct tb_switch *sw, + unsigned int address) +{ + u32 metadata, dwaddress; + u8 status = 0; + int ret; + + dwaddress = address / 4; + metadata = (dwaddress << USB4_NVM_SET_OFFSET_SHIFT) & + USB4_NVM_SET_OFFSET_MASK; + + ret = usb4_switch_op_write_metadata(sw, metadata); + if (ret) + return ret; + + ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SET_OFFSET, &status); + if (ret) + return ret; + + return status ? -EIO : 0; +} + +static int usb4_switch_nvm_write_next_block(struct tb_switch *sw, + const void *buf, size_t dwords) +{ + u8 status; + int ret; + + ret = usb4_switch_op_write_data(sw, buf, dwords); + if (ret) + return ret; + + ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_WRITE, &status); + if (ret) + return ret; + + return status ? -EIO : 0; +} + +/** + * usb4_switch_nvm_write() - Write to the router NVM + * @sw: USB4 router + * @address: Start address where to write in bytes + * @buf: Pointer to the data to write + * @size: Size of @buf in bytes + * + * Writes @buf to the router NVM using USB4 router operations. If NVM + * write is not supported returns %-EOPNOTSUPP. + */ +int usb4_switch_nvm_write(struct tb_switch *sw, unsigned int address, + const void *buf, size_t size) +{ + int ret; + + ret = usb4_switch_nvm_set_offset(sw, address); + if (ret) + return ret; + + return usb4_switch_do_write_data(sw, address, buf, size, + usb4_switch_nvm_write_next_block); +} + +/** + * usb4_switch_nvm_authenticate() - Authenticate new NVM + * @sw: USB4 router + * + * After the new NVM has been written via usb4_switch_nvm_write(), this + * function triggers NVM authentication process. If the authentication + * is successful the router is power cycled and the new NVM starts + * running. In case of failure returns negative errno. + */ +int usb4_switch_nvm_authenticate(struct tb_switch *sw) +{ + u8 status = 0; + int ret; + + ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_AUTH, &status); + if (ret) + return ret; + + switch (status) { + case 0x0: + tb_sw_dbg(sw, "NVM authentication successful\n"); + return 0; + case 0x1: + return -EINVAL; + case 0x2: + return -EAGAIN; + case 0x3: + return -EOPNOTSUPP; + default: + return -EIO; + } +} + +/** + * usb4_switch_query_dp_resource() - Query availability of DP IN resource + * @sw: USB4 router + * @in: DP IN adapter + * + * For DP tunneling this function can be used to query availability of + * DP IN resource. Returns true if the resource is available for DP + * tunneling, false otherwise. + */ +bool usb4_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in) +{ + u8 status; + int ret; + + ret = usb4_switch_op_write_metadata(sw, in->port); + if (ret) + return false; + + ret = usb4_switch_op(sw, USB4_SWITCH_OP_QUERY_DP_RESOURCE, &status); + /* + * If DP resource allocation is not supported assume it is + * always available. + */ + if (ret == -EOPNOTSUPP) + return true; + else if (ret) + return false; + + return !status; +} + +/** + * usb4_switch_alloc_dp_resource() - Allocate DP IN resource + * @sw: USB4 router + * @in: DP IN adapter + * + * Allocates DP IN resource for DP tunneling using USB4 router + * operations. If the resource was allocated returns %0. Otherwise + * returns negative errno, in particular %-EBUSY if the resource is + * already allocated. + */ +int usb4_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in) +{ + u8 status; + int ret; + + ret = usb4_switch_op_write_metadata(sw, in->port); + if (ret) + return ret; + + ret = usb4_switch_op(sw, USB4_SWITCH_OP_ALLOC_DP_RESOURCE, &status); + if (ret == -EOPNOTSUPP) + return 0; + else if (ret) + return ret; + + return status ? -EBUSY : 0; +} + +/** + * usb4_switch_dealloc_dp_resource() - Releases allocated DP IN resource + * @sw: USB4 router + * @in: DP IN adapter + * + * Releases the previously allocated DP IN resource. + */ +int usb4_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in) +{ + u8 status; + int ret; + + ret = usb4_switch_op_write_metadata(sw, in->port); + if (ret) + return ret; + + ret = usb4_switch_op(sw, USB4_SWITCH_OP_DEALLOC_DP_RESOURCE, &status); + if (ret == -EOPNOTSUPP) + return 0; + else if (ret) + return ret; + + return status ? -EIO : 0; +} + +static int usb4_port_idx(const struct tb_switch *sw, const struct tb_port *port) +{ + struct tb_port *p; + int usb4_idx = 0; + + /* Assume port is primary */ + tb_switch_for_each_port(sw, p) { + if (!tb_port_is_null(p)) + continue; + if (tb_is_upstream_port(p)) + continue; + if (!p->link_nr) { + if (p == port) + break; + usb4_idx++; + } + } + + return usb4_idx; +} + +/** + * usb4_switch_map_pcie_down() - Map USB4 port to a PCIe downstream adapter + * @sw: USB4 router + * @port: USB4 port + * + * USB4 routers have direct mapping between USB4 ports and PCIe + * downstream adapters where the PCIe topology is extended. This + * function returns the corresponding downstream PCIe adapter or %NULL + * if no such mapping was possible. + */ +struct tb_port *usb4_switch_map_pcie_down(struct tb_switch *sw, + const struct tb_port *port) +{ + int usb4_idx = usb4_port_idx(sw, port); + struct tb_port *p; + int pcie_idx = 0; + + /* Find PCIe down port matching usb4_port */ + tb_switch_for_each_port(sw, p) { + if (!tb_port_is_pcie_down(p)) + continue; + + if (pcie_idx == usb4_idx && !tb_pci_port_is_enabled(p)) + return p; + + pcie_idx++; + } + + return NULL; +} + +/** + * usb4_switch_map_usb3_down() - Map USB4 port to a USB3 downstream adapter + * @sw: USB4 router + * @port: USB4 port + * + * USB4 routers have direct mapping between USB4 ports and USB 3.x + * downstream adapters where the USB 3.x topology is extended. This + * function returns the corresponding downstream USB 3.x adapter or + * %NULL if no such mapping was possible. + */ +struct tb_port *usb4_switch_map_usb3_down(struct tb_switch *sw, + const struct tb_port *port) +{ + int usb4_idx = usb4_port_idx(sw, port); + struct tb_port *p; + int usb_idx = 0; + + /* Find USB3 down port matching usb4_port */ + tb_switch_for_each_port(sw, p) { + if (!tb_port_is_usb3_down(p)) + continue; + + if (usb_idx == usb4_idx && !tb_usb3_port_is_enabled(p)) + return p; + + usb_idx++; + } + + return NULL; +} + +/** + * usb4_port_unlock() - Unlock USB4 downstream port + * @port: USB4 port to unlock + * + * Unlocks USB4 downstream port so that the connection manager can + * access the router below this port. + */ +int usb4_port_unlock(struct tb_port *port) +{ + int ret; + u32 val; + + ret = tb_port_read(port, &val, TB_CFG_PORT, ADP_CS_4, 1); + if (ret) + return ret; + + val &= ~ADP_CS_4_LCK; + return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_4, 1); +} diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c index 5118d46702d5..053f918e00e8 100644 --- a/drivers/thunderbolt/xdomain.c +++ b/drivers/thunderbolt/xdomain.c @@ -636,7 +636,7 @@ static ssize_t key_show(struct device *dev, struct device_attribute *attr, * It should be null terminated but anything else is pretty much * allowed. */ - return sprintf(buf, "%*pEp\n", (int)strlen(svc->key), svc->key); + return sprintf(buf, "%*pE\n", (int)strlen(svc->key), svc->key); } static DEVICE_ATTR_RO(key); @@ -1220,7 +1220,13 @@ struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent, u64 route, const uuid_t *local_uuid, const uuid_t *remote_uuid) { + struct tb_switch *parent_sw = tb_to_switch(parent); struct tb_xdomain *xd; + struct tb_port *down; + + /* Make sure the downstream domain is accessible */ + down = tb_port_at(route, parent_sw); + tb_port_unlock(down); xd = kzalloc(sizeof(*xd), GFP_KERNEL); if (!xd) @@ -1404,10 +1410,9 @@ struct tb_xdomain_lookup { static struct tb_xdomain *switch_find_xdomain(struct tb_switch *sw, const struct tb_xdomain_lookup *lookup) { - int i; + struct tb_port *port; - for (i = 1; i <= sw->config.max_port_number; i++) { - struct tb_port *port = &sw->ports[i]; + tb_switch_for_each_port(sw, port) { struct tb_xdomain *xd; if (port->xdomain) { |