diff options
Diffstat (limited to 'drivers')
685 files changed, 7063 insertions, 5185 deletions
diff --git a/drivers/acpi/acpi_watchdog.c b/drivers/acpi/acpi_watchdog.c index 11b113f8e367..ebb626ffb5fa 100644 --- a/drivers/acpi/acpi_watchdog.c +++ b/drivers/acpi/acpi_watchdog.c @@ -74,10 +74,10 @@ void __init acpi_watchdog_init(void)  		res.start = gas->address;  		if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {  			res.flags = IORESOURCE_MEM; -			res.end = res.start + ALIGN(gas->access_width, 4); +			res.end = res.start + ALIGN(gas->access_width, 4) - 1;  		} else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {  			res.flags = IORESOURCE_IO; -			res.end = res.start + gas->access_width; +			res.end = res.start + gas->access_width - 1;  		} else {  			pr_warn("Unsupported address space: %u\n",  				gas->space_id); diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index 7128488a3a72..f2eb6c37ea0a 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c @@ -70,7 +70,6 @@ static async_cookie_t async_cookie;  static bool battery_driver_registered;  static int battery_bix_broken_package;  static int battery_notification_delay_ms; -static int battery_full_discharging;  static unsigned int cache_time = 1000;  module_param(cache_time, uint, 0644);  MODULE_PARM_DESC(cache_time, "cache time in milliseconds"); @@ -215,12 +214,9 @@ static int acpi_battery_get_property(struct power_supply *psy,  		return -ENODEV;  	switch (psp) {  	case POWER_SUPPLY_PROP_STATUS: -		if (battery->state & ACPI_BATTERY_STATE_DISCHARGING) { -			if (battery_full_discharging && battery->rate_now == 0) -				val->intval = POWER_SUPPLY_STATUS_FULL; -			else -				val->intval = POWER_SUPPLY_STATUS_DISCHARGING; -		} else if (battery->state & ACPI_BATTERY_STATE_CHARGING) +		if (battery->state & ACPI_BATTERY_STATE_DISCHARGING) +			val->intval = POWER_SUPPLY_STATUS_DISCHARGING; +		else if (battery->state & ACPI_BATTERY_STATE_CHARGING)  			val->intval = POWER_SUPPLY_STATUS_CHARGING;  		else if (acpi_battery_is_charged(battery))  			val->intval = POWER_SUPPLY_STATUS_FULL; @@ -1170,12 +1166,6 @@ battery_notification_delay_quirk(const struct dmi_system_id *d)  	return 0;  } -static int __init battery_full_discharging_quirk(const struct dmi_system_id *d) -{ -	battery_full_discharging = 1; -	return 0; -} -  static const struct dmi_system_id bat_dmi_table[] __initconst = {  	{  		.callback = battery_bix_broken_package_quirk, @@ -1193,38 +1183,6 @@ static const struct dmi_system_id bat_dmi_table[] __initconst = {  			DMI_MATCH(DMI_PRODUCT_NAME, "Aspire V5-573G"),  		},  	}, -	{ -		.callback = battery_full_discharging_quirk, -		.ident = "ASUS GL502VSK", -		.matches = { -			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), -			DMI_MATCH(DMI_PRODUCT_NAME, "GL502VSK"), -		}, -	}, -	{ -		.callback = battery_full_discharging_quirk, -		.ident = "ASUS UX305LA", -		.matches = { -			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), -			DMI_MATCH(DMI_PRODUCT_NAME, "UX305LA"), -		}, -	}, -	{ -		.callback = battery_full_discharging_quirk, -		.ident = "ASUS UX360UA", -		.matches = { -			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), -			DMI_MATCH(DMI_PRODUCT_NAME, "UX360UA"), -		}, -	}, -	{ -		.callback = battery_full_discharging_quirk, -		.ident = "ASUS UX410UAK", -		.matches = { -			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), -			DMI_MATCH(DMI_PRODUCT_NAME, "UX410UAK"), -		}, -	},  	{},  }; diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 676c9788e1c8..0dad0bd9327b 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -660,13 +660,15 @@ struct acpi_device *acpi_companion_match(const struct device *dev)   * acpi_of_match_device - Match device object using the "compatible" property.   * @adev: ACPI device object to match.   * @of_match_table: List of device IDs to match against. + * @of_id: OF ID if matched   *   * If @dev has an ACPI companion which has ACPI_DT_NAMESPACE_HID in its list of   * identifiers and a _DSD object with the "compatible" property, use that   * property to match against the given list of identifiers.   */  static bool acpi_of_match_device(struct acpi_device *adev, -				 const struct of_device_id *of_match_table) +				 const struct of_device_id *of_match_table, +				 const struct of_device_id **of_id)  {  	const union acpi_object *of_compatible, *obj;  	int i, nval; @@ -690,8 +692,11 @@ static bool acpi_of_match_device(struct acpi_device *adev,  		const struct of_device_id *id;  		for (id = of_match_table; id->compatible[0]; id++) -			if (!strcasecmp(obj->string.pointer, id->compatible)) +			if (!strcasecmp(obj->string.pointer, id->compatible)) { +				if (of_id) +					*of_id = id;  				return true; +			}  	}  	return false; @@ -762,10 +767,11 @@ static bool __acpi_match_device_cls(const struct acpi_device_id *id,  	return true;  } -static const struct acpi_device_id *__acpi_match_device( -	struct acpi_device *device, -	const struct acpi_device_id *ids, -	const struct of_device_id *of_ids) +static bool __acpi_match_device(struct acpi_device *device, +				const struct acpi_device_id *acpi_ids, +				const struct of_device_id *of_ids, +				const struct acpi_device_id **acpi_id, +				const struct of_device_id **of_id)  {  	const struct acpi_device_id *id;  	struct acpi_hardware_id *hwid; @@ -775,30 +781,32 @@ static const struct acpi_device_id *__acpi_match_device(  	 * driver for it.  	 */  	if (!device || !device->status.present) -		return NULL; +		return false;  	list_for_each_entry(hwid, &device->pnp.ids, list) {  		/* First, check the ACPI/PNP IDs provided by the caller. */ -		for (id = ids; id->id[0] || id->cls; id++) { -			if (id->id[0] && !strcmp((char *) id->id, hwid->id)) -				return id; -			else if (id->cls && __acpi_match_device_cls(id, hwid)) -				return id; +		if (acpi_ids) { +			for (id = acpi_ids; id->id[0] || id->cls; id++) { +				if (id->id[0] && !strcmp((char *)id->id, hwid->id)) +					goto out_acpi_match; +				if (id->cls && __acpi_match_device_cls(id, hwid)) +					goto out_acpi_match; +			}  		}  		/*  		 * Next, check ACPI_DT_NAMESPACE_HID and try to match the  		 * "compatible" property if found. -		 * -		 * The id returned by the below is not valid, but the only -		 * caller passing non-NULL of_ids here is only interested in -		 * whether or not the return value is NULL.  		 */ -		if (!strcmp(ACPI_DT_NAMESPACE_HID, hwid->id) -		    && acpi_of_match_device(device, of_ids)) -			return id; +		if (!strcmp(ACPI_DT_NAMESPACE_HID, hwid->id)) +			return acpi_of_match_device(device, of_ids, of_id);  	} -	return NULL; +	return false; + +out_acpi_match: +	if (acpi_id) +		*acpi_id = id; +	return true;  }  /** @@ -815,32 +823,29 @@ static const struct acpi_device_id *__acpi_match_device(  const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids,  					       const struct device *dev)  { -	return __acpi_match_device(acpi_companion_match(dev), ids, NULL); +	const struct acpi_device_id *id = NULL; + +	__acpi_match_device(acpi_companion_match(dev), ids, NULL, &id, NULL); +	return id;  }  EXPORT_SYMBOL_GPL(acpi_match_device); -void *acpi_get_match_data(const struct device *dev) +const void *acpi_device_get_match_data(const struct device *dev)  {  	const struct acpi_device_id *match; -	if (!dev->driver) -		return NULL; - -	if (!dev->driver->acpi_match_table) -		return NULL; -  	match = acpi_match_device(dev->driver->acpi_match_table, dev);  	if (!match)  		return NULL; -	return (void *)match->driver_data; +	return (const void *)match->driver_data;  } -EXPORT_SYMBOL_GPL(acpi_get_match_data); +EXPORT_SYMBOL_GPL(acpi_device_get_match_data);  int acpi_match_device_ids(struct acpi_device *device,  			  const struct acpi_device_id *ids)  { -	return __acpi_match_device(device, ids, NULL) ? 0 : -ENOENT; +	return __acpi_match_device(device, ids, NULL, NULL, NULL) ? 0 : -ENOENT;  }  EXPORT_SYMBOL(acpi_match_device_ids); @@ -849,10 +854,12 @@ bool acpi_driver_match_device(struct device *dev,  {  	if (!drv->acpi_match_table)  		return acpi_of_match_device(ACPI_COMPANION(dev), -					    drv->of_match_table); +					    drv->of_match_table, +					    NULL); -	return !!__acpi_match_device(acpi_companion_match(dev), -				     drv->acpi_match_table, drv->of_match_table); +	return __acpi_match_device(acpi_companion_match(dev), +				   drv->acpi_match_table, drv->of_match_table, +				   NULL, NULL);  }  EXPORT_SYMBOL_GPL(acpi_driver_match_device); diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index d9f38c645e4a..30a572956557 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -1927,6 +1927,9 @@ static int acpi_ec_suspend_noirq(struct device *dev)  	    ec->reference_count >= 1)  		acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE); +	if (acpi_sleep_no_ec_events()) +		acpi_ec_enter_noirq(ec); +  	return 0;  } @@ -1934,6 +1937,9 @@ static int acpi_ec_resume_noirq(struct device *dev)  {  	struct acpi_ec *ec = acpi_driver_data(to_acpi_device(dev)); +	if (acpi_sleep_no_ec_events()) +		acpi_ec_leave_noirq(ec); +  	if (ec_no_wakeup && test_bit(EC_FLAGS_STARTED, &ec->flags) &&  	    ec->reference_count >= 1)  		acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE); diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index bbe48ad20886..eb09ef55c38a 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c @@ -2675,10 +2675,14 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,  	else  		ndr_desc->numa_node = NUMA_NO_NODE; -	if(acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_CACHE_FLUSH) +	/* +	 * Persistence domain bits are hierarchical, if +	 * ACPI_NFIT_CAPABILITY_CACHE_FLUSH is set then +	 * ACPI_NFIT_CAPABILITY_MEM_FLUSH is implied. +	 */ +	if (acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_CACHE_FLUSH)  		set_bit(ND_REGION_PERSIST_CACHE, &ndr_desc->flags); - -	if (acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_MEM_FLUSH) +	else if (acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_MEM_FLUSH)  		set_bit(ND_REGION_PERSIST_MEMCTRL, &ndr_desc->flags);  	list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c index 8ccaae3550d2..85167603b9c9 100644 --- a/drivers/acpi/numa.c +++ b/drivers/acpi/numa.c @@ -103,25 +103,27 @@ int acpi_map_pxm_to_node(int pxm)   */  int acpi_map_pxm_to_online_node(int pxm)  { -	int node, n, dist, min_dist; +	int node, min_node;  	node = acpi_map_pxm_to_node(pxm);  	if (node == NUMA_NO_NODE)  		node = 0; +	min_node = node;  	if (!node_online(node)) { -		min_dist = INT_MAX; +		int min_dist = INT_MAX, dist, n; +  		for_each_online_node(n) {  			dist = node_distance(node, n);  			if (dist < min_dist) {  				min_dist = dist; -				node = n; +				min_node = n;  			}  		}  	} -	return node; +	return min_node;  }  EXPORT_SYMBOL(acpi_map_pxm_to_online_node); diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c index 466d1503aba0..5815356ea6ad 100644 --- a/drivers/acpi/property.c +++ b/drivers/acpi/property.c @@ -1271,11 +1271,11 @@ static int acpi_fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,  	return 0;  } -static void * +static const void *  acpi_fwnode_device_get_match_data(const struct fwnode_handle *fwnode,  				  const struct device *dev)  { -	return acpi_get_match_data(dev); +	return acpi_device_get_match_data(dev);  }  #define DECLARE_ACPI_FWNODE_OPS(ops) \ diff --git a/drivers/acpi/spcr.c b/drivers/acpi/spcr.c index 89e97d21a89c..9d52743080a4 100644 --- a/drivers/acpi/spcr.c +++ b/drivers/acpi/spcr.c @@ -115,6 +115,7 @@ int __init acpi_parse_spcr(bool enable_earlycon, bool enable_console)  			table->serial_port.access_width))) {  		default:  			pr_err("Unexpected SPCR Access Width.  Defaulting to byte size\n"); +			/* fall through */  		case 8:  			iotype = "mmio";  			break; diff --git a/drivers/android/binder.c b/drivers/android/binder.c index 15e3d3c2260d..764b63a5aade 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -1991,8 +1991,14 @@ static void binder_send_failed_reply(struct binder_transaction *t,  					&target_thread->reply_error.work);  				wake_up_interruptible(&target_thread->wait);  			} else { -				WARN(1, "Unexpected reply error: %u\n", -						target_thread->reply_error.cmd); +				/* +				 * Cannot get here for normal operation, but +				 * we can if multiple synchronous transactions +				 * are sent without blocking for responses. +				 * Just ignore the 2nd error in this case. +				 */ +				pr_warn("Unexpected reply error: %u\n", +					target_thread->reply_error.cmd);  			}  			binder_inner_proc_unlock(target_thread->proc);  			binder_thread_dec_tmpref(target_thread); @@ -2193,7 +2199,7 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,  	int debug_id = buffer->debug_id;  	binder_debug(BINDER_DEBUG_TRANSACTION, -		     "%d buffer release %d, size %zd-%zd, failed at %p\n", +		     "%d buffer release %d, size %zd-%zd, failed at %pK\n",  		     proc->pid, buffer->debug_id,  		     buffer->data_size, buffer->offsets_size, failed_at); @@ -3705,7 +3711,7 @@ static int binder_thread_write(struct binder_proc *proc,  				}  			}  			binder_debug(BINDER_DEBUG_DEAD_BINDER, -				     "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n", +				     "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",  				     proc->pid, thread->pid, (u64)cookie,  				     death);  			if (death == NULL) { @@ -4376,6 +4382,15 @@ static int binder_thread_release(struct binder_proc *proc,  	binder_inner_proc_unlock(thread->proc); +	/* +	 * This is needed to avoid races between wake_up_poll() above and +	 * and ep_remove_waitqueue() called for other reasons (eg the epoll file +	 * descriptor being closed); ep_remove_waitqueue() holds an RCU read +	 * lock, so we can be sure it's done after calling synchronize_rcu(). +	 */ +	if (thread->looper & BINDER_LOOPER_STATE_POLL) +		synchronize_rcu(); +  	if (send_reply)  		binder_send_failed_reply(send_reply, BR_DEAD_REPLY);  	binder_release_work(proc, &thread->todo); @@ -4391,6 +4406,8 @@ static __poll_t binder_poll(struct file *filp,  	bool wait_for_proc_work;  	thread = binder_get_thread(proc); +	if (!thread) +		return POLLERR;  	binder_inner_proc_lock(thread->proc);  	thread->looper |= BINDER_LOOPER_STATE_POLL; @@ -5034,7 +5051,7 @@ static void print_binder_transaction_ilocked(struct seq_file *m,  	spin_lock(&t->lock);  	to_proc = t->to_proc;  	seq_printf(m, -		   "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d", +		   "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d",  		   prefix, t->debug_id, t,  		   t->from ? t->from->proc->pid : 0,  		   t->from ? t->from->pid : 0, @@ -5058,7 +5075,7 @@ static void print_binder_transaction_ilocked(struct seq_file *m,  	}  	if (buffer->target_node)  		seq_printf(m, " node %d", buffer->target_node->debug_id); -	seq_printf(m, " size %zd:%zd data %p\n", +	seq_printf(m, " size %zd:%zd data %pK\n",  		   buffer->data_size, buffer->offsets_size,  		   buffer->data);  } diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 355a95a83a34..1ff17799769d 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -550,7 +550,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {  	  .driver_data = board_ahci_yes_fbs },  	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9230),  	  .driver_data = board_ahci_yes_fbs }, -	{ PCI_DEVICE(PCI_VENDOR_ID_TTI, 0x0642), +	{ PCI_DEVICE(PCI_VENDOR_ID_TTI, 0x0642), /* highpoint rocketraid 642L */ +	  .driver_data = board_ahci_yes_fbs }, +	{ PCI_DEVICE(PCI_VENDOR_ID_TTI, 0x0645), /* highpoint rocketraid 644L */  	  .driver_data = board_ahci_yes_fbs },  	/* Promise */ diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index a0de7a38430c..7adcf3caabd0 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c @@ -665,6 +665,16 @@ int ahci_stop_engine(struct ata_port *ap)  	if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0)  		return 0; +	/* +	 * Don't try to issue commands but return with ENODEV if the +	 * AHCI controller not available anymore (e.g. due to PCIe hot +	 * unplugging). Otherwise a 500ms delay for each port is added. +	 */ +	if (tmp == 0xffffffff) { +		dev_err(ap->host->dev, "AHCI controller unavailable!\n"); +		return -ENODEV; +	} +  	/* setting HBA to idle */  	tmp &= ~PORT_CMD_START;  	writel(tmp, port_mmio + PORT_CMD); diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c index 341d0ef82cbd..30cc8f1a31e1 100644 --- a/drivers/ata/libahci_platform.c +++ b/drivers/ata/libahci_platform.c @@ -340,7 +340,7 @@ static int ahci_platform_get_regulator(struct ahci_host_priv *hpriv, u32 port,   * 2) regulator for controlling the targets power (optional)   * 3) 0 - AHCI_MAX_CLKS clocks, as specified in the devs devicetree node,   *    or for non devicetree enabled platforms a single clock - *	4) phys (optional) + * 4) phys (optional)   *   * RETURNS:   * The allocated ahci_host_priv on success, otherwise an ERR_PTR value diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 3c09122bf038..7431ccd03316 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -4530,6 +4530,25 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {  	{ "PIONEER DVD-RW  DVR-212D",	NULL,	ATA_HORKAGE_NOSETXFER },  	{ "PIONEER DVD-RW  DVR-216D",	NULL,	ATA_HORKAGE_NOSETXFER }, +	/* Crucial BX100 SSD 500GB has broken LPM support */ +	{ "CT500BX100SSD1",		NULL,	ATA_HORKAGE_NOLPM }, + +	/* 512GB MX100 with MU01 firmware has both queued TRIM and LPM issues */ +	{ "Crucial_CT512MX100*",	"MU01",	ATA_HORKAGE_NO_NCQ_TRIM | +						ATA_HORKAGE_ZERO_AFTER_TRIM | +						ATA_HORKAGE_NOLPM, }, +	/* 512GB MX100 with newer firmware has only LPM issues */ +	{ "Crucial_CT512MX100*",	NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM | +						ATA_HORKAGE_NOLPM, }, + +	/* 480GB+ M500 SSDs have both queued TRIM and LPM issues */ +	{ "Crucial_CT480M500*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM | +						ATA_HORKAGE_ZERO_AFTER_TRIM | +						ATA_HORKAGE_NOLPM, }, +	{ "Crucial_CT960M500*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM | +						ATA_HORKAGE_ZERO_AFTER_TRIM | +						ATA_HORKAGE_NOLPM, }, +  	/* devices that don't properly handle queued TRIM commands */  	{ "Micron_M500_*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |  						ATA_HORKAGE_ZERO_AFTER_TRIM, }, @@ -4541,7 +4560,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {  						ATA_HORKAGE_ZERO_AFTER_TRIM, },  	{ "Crucial_CT*MX100*",		"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |  						ATA_HORKAGE_ZERO_AFTER_TRIM, }, -	{ "Samsung SSD 8*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM | +	{ "Samsung SSD 840*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM | +						ATA_HORKAGE_ZERO_AFTER_TRIM, }, +	{ "Samsung SSD 850*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |  						ATA_HORKAGE_ZERO_AFTER_TRIM, },  	{ "FCCT*M500*",			NULL,	ATA_HORKAGE_NO_NCQ_TRIM |  						ATA_HORKAGE_ZERO_AFTER_TRIM, }, @@ -5401,8 +5422,7 @@ void ata_qc_issue(struct ata_queued_cmd *qc)  	 * We guarantee to LLDs that they will have at least one  	 * non-zero sg if the command is a data command.  	 */ -	if (WARN_ON_ONCE(ata_is_data(prot) && -			 (!qc->sg || !qc->n_elem || !qc->nbytes))) +	if (ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes))  		goto sys_err;  	if (ata_is_dma(prot) || (ata_is_pio(prot) && diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 11c3137d7b0a..c016829a38fd 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -815,7 +815,8 @@ void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap)  	if (ap->pflags & ATA_PFLAG_LOADING)  		ap->pflags &= ~ATA_PFLAG_LOADING; -	else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG) +	else if ((ap->pflags & ATA_PFLAG_SCSI_HOTPLUG) && +		!(ap->flags & ATA_FLAG_SAS_HOST))  		schedule_delayed_work(&ap->hotplug_task, 0);  	if (ap->pflags & ATA_PFLAG_RECOVERED) diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 66be961c93a4..89a9d4a2efc8 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -3316,6 +3316,12 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)  		goto invalid_fld;  	} +	/* We may not issue NCQ commands to devices not supporting NCQ */ +	if (ata_is_ncq(tf->protocol) && !ata_ncq_enabled(dev)) { +		fp = 1; +		goto invalid_fld; +	} +  	/* sanity check for pio multi commands */  	if ((cdb[1] & 0xe0) && !is_multi_taskfile(tf)) {  		fp = 1; @@ -4282,7 +4288,7 @@ static inline void ata_scsi_dump_cdb(struct ata_port *ap,  #ifdef ATA_DEBUG  	struct scsi_device *scsidev = cmd->device; -	DPRINTK("CDB (%u:%d,%d,%d) %9ph\n", +	DPRINTK("CDB (%u:%d,%d,%lld) %9ph\n",  		ap->print_id,  		scsidev->channel, scsidev->id, scsidev->lun,  		cmd->cmnd); @@ -4309,7 +4315,9 @@ static inline int __ata_scsi_queuecmd(struct scsi_cmnd *scmd,  		if (likely((scsi_op != ATA_16) || !atapi_passthru16)) {  			/* relay SCSI command to ATAPI device */  			int len = COMMAND_SIZE(scsi_op); -			if (unlikely(len > scmd->cmd_len || len > dev->cdb_len)) +			if (unlikely(len > scmd->cmd_len || +				     len > dev->cdb_len || +				     scmd->cmd_len > ATAPI_CDB_LEN))  				goto bad_cdb_len;  			xlat_func = atapi_xlat; diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c index 80ee2f2a50d0..6456e07db72a 100644 --- a/drivers/ata/sata_rcar.c +++ b/drivers/ata/sata_rcar.c @@ -146,6 +146,7 @@  enum sata_rcar_type {  	RCAR_GEN1_SATA,  	RCAR_GEN2_SATA, +	RCAR_GEN3_SATA,  	RCAR_R8A7790_ES1_SATA,  }; @@ -784,26 +785,11 @@ static void sata_rcar_setup_port(struct ata_host *host)  	ioaddr->command_addr	= ioaddr->cmd_addr + (ATA_REG_CMD << 2);  } -static void sata_rcar_init_controller(struct ata_host *host) +static void sata_rcar_init_module(struct sata_rcar_priv *priv)  { -	struct sata_rcar_priv *priv = host->private_data;  	void __iomem *base = priv->base;  	u32 val; -	/* reset and setup phy */ -	switch (priv->type) { -	case RCAR_GEN1_SATA: -		sata_rcar_gen1_phy_init(priv); -		break; -	case RCAR_GEN2_SATA: -	case RCAR_R8A7790_ES1_SATA: -		sata_rcar_gen2_phy_init(priv); -		break; -	default: -		dev_warn(host->dev, "SATA phy is not initialized\n"); -		break; -	} -  	/* SATA-IP reset state */  	val = ioread32(base + ATAPI_CONTROL1_REG);  	val |= ATAPI_CONTROL1_RESET; @@ -824,10 +810,33 @@ static void sata_rcar_init_controller(struct ata_host *host)  	/* ack and mask */  	iowrite32(0, base + SATAINTSTAT_REG);  	iowrite32(0x7ff, base + SATAINTMASK_REG); +  	/* enable interrupts */  	iowrite32(ATAPI_INT_ENABLE_SATAINT, base + ATAPI_INT_ENABLE_REG);  } +static void sata_rcar_init_controller(struct ata_host *host) +{ +	struct sata_rcar_priv *priv = host->private_data; + +	/* reset and setup phy */ +	switch (priv->type) { +	case RCAR_GEN1_SATA: +		sata_rcar_gen1_phy_init(priv); +		break; +	case RCAR_GEN2_SATA: +	case RCAR_GEN3_SATA: +	case RCAR_R8A7790_ES1_SATA: +		sata_rcar_gen2_phy_init(priv); +		break; +	default: +		dev_warn(host->dev, "SATA phy is not initialized\n"); +		break; +	} + +	sata_rcar_init_module(priv); +} +  static const struct of_device_id sata_rcar_match[] = {  	{  		/* Deprecated by "renesas,sata-r8a7779" */ @@ -856,7 +865,7 @@ static const struct of_device_id sata_rcar_match[] = {  	},  	{  		.compatible = "renesas,sata-r8a7795", -		.data = (void *)RCAR_GEN2_SATA +		.data = (void *)RCAR_GEN3_SATA  	},  	{  		.compatible = "renesas,rcar-gen2-sata", @@ -864,7 +873,7 @@ static const struct of_device_id sata_rcar_match[] = {  	},  	{  		.compatible = "renesas,rcar-gen3-sata", -		.data = (void *)RCAR_GEN2_SATA +		.data = (void *)RCAR_GEN3_SATA  	},  	{ },  }; @@ -982,11 +991,18 @@ static int sata_rcar_resume(struct device *dev)  	if (ret)  		return ret; -	/* ack and mask */ -	iowrite32(0, base + SATAINTSTAT_REG); -	iowrite32(0x7ff, base + SATAINTMASK_REG); -	/* enable interrupts */ -	iowrite32(ATAPI_INT_ENABLE_SATAINT, base + ATAPI_INT_ENABLE_REG); +	if (priv->type == RCAR_GEN3_SATA) { +		sata_rcar_gen2_phy_init(priv); +		sata_rcar_init_module(priv); +	} else { +		/* ack and mask */ +		iowrite32(0, base + SATAINTSTAT_REG); +		iowrite32(0x7ff, base + SATAINTMASK_REG); + +		/* enable interrupts */ +		iowrite32(ATAPI_INT_ENABLE_SATAINT, +			  base + ATAPI_INT_ENABLE_REG); +	}  	ata_host_resume(host); diff --git a/drivers/auxdisplay/img-ascii-lcd.c b/drivers/auxdisplay/img-ascii-lcd.c index 9180b9bd5821..834509506ef6 100644 --- a/drivers/auxdisplay/img-ascii-lcd.c +++ b/drivers/auxdisplay/img-ascii-lcd.c @@ -97,7 +97,7 @@ static struct img_ascii_lcd_config boston_config = {  static void malta_update(struct img_ascii_lcd_ctx *ctx)  {  	unsigned int i; -	int err; +	int err = 0;  	for (i = 0; i < ctx->cfg->num_chars; i++) {  		err = regmap_write(ctx->regmap, @@ -180,7 +180,7 @@ static int sead3_wait_lcd_idle(struct img_ascii_lcd_ctx *ctx)  static void sead3_update(struct img_ascii_lcd_ctx *ctx)  {  	unsigned int i; -	int err; +	int err = 0;  	for (i = 0; i < ctx->cfg->num_chars; i++) {  		err = sead3_wait_lcd_idle(ctx); @@ -224,7 +224,7 @@ MODULE_DEVICE_TABLE(of, img_ascii_lcd_matches);  /**   * img_ascii_lcd_scroll() - scroll the display by a character - * @arg: really a pointer to the private data structure + * @t: really a pointer to the private data structure   *   * Scroll the current message along the LCD by one character, rearming the   * timer if required. diff --git a/drivers/auxdisplay/panel.c b/drivers/auxdisplay/panel.c index ea7869c0d7f9..ec5e8800f8ad 100644 --- a/drivers/auxdisplay/panel.c +++ b/drivers/auxdisplay/panel.c @@ -1372,7 +1372,7 @@ static void panel_process_inputs(void)  				break;  			input->rise_timer = 0;  			input->state = INPUT_ST_RISING; -			/* no break here, fall through */ +			/* fall through */  		case INPUT_ST_RISING:  			if ((phys_curr & input->mask) != input->value) {  				input->state = INPUT_ST_LOW; @@ -1385,11 +1385,11 @@ static void panel_process_inputs(void)  			}  			input->high_timer = 0;  			input->state = INPUT_ST_HIGH; -			/* no break here, fall through */ +			/* fall through */  		case INPUT_ST_HIGH:  			if (input_state_high(input))  				break; -			/* no break here, fall through */ +			/* fall through */  		case INPUT_ST_FALLING:  			input_state_falling(input);  		} diff --git a/drivers/base/core.c b/drivers/base/core.c index b2261f92f2f1..5847364f25d9 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -310,6 +310,9 @@ static void __device_link_del(struct device_link *link)  	dev_info(link->consumer, "Dropping the link to %s\n",  		 dev_name(link->supplier)); +	if (link->flags & DL_FLAG_PM_RUNTIME) +		pm_runtime_drop_link(link->consumer); +  	list_del(&link->s_node);  	list_del(&link->c_node);  	device_link_free(link); diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c index a8ac86e4d79e..6637fc319269 100644 --- a/drivers/base/power/wakeirq.c +++ b/drivers/base/power/wakeirq.c @@ -321,7 +321,8 @@ void dev_pm_arm_wake_irq(struct wake_irq *wirq)  		return;  	if (device_may_wakeup(wirq->dev)) { -		if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED) +		if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED && +		    !pm_runtime_status_suspended(wirq->dev))  			enable_irq(wirq->irq);  		enable_irq_wake(wirq->irq); @@ -343,7 +344,8 @@ void dev_pm_disarm_wake_irq(struct wake_irq *wirq)  	if (device_may_wakeup(wirq->dev)) {  		disable_irq_wake(wirq->irq); -		if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED) +		if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED && +		    !pm_runtime_status_suspended(wirq->dev))  			disable_irq_nosync(wirq->irq);  	}  } diff --git a/drivers/base/property.c b/drivers/base/property.c index 302236281d83..8f205f6461ed 100644 --- a/drivers/base/property.c +++ b/drivers/base/property.c @@ -1410,9 +1410,8 @@ int fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,  }  EXPORT_SYMBOL(fwnode_graph_parse_endpoint); -void *device_get_match_data(struct device *dev) +const void *device_get_match_data(struct device *dev)  { -	return fwnode_call_ptr_op(dev_fwnode(dev), device_get_match_data, -				  dev); +	return fwnode_call_ptr_op(dev_fwnode(dev), device_get_match_data, dev);  }  EXPORT_SYMBOL_GPL(device_get_match_data); diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c index e5aa62fcf5a8..3aaf6af3ec23 100644 --- a/drivers/block/amiflop.c +++ b/drivers/block/amiflop.c @@ -1758,7 +1758,7 @@ static struct kobject *floppy_find(dev_t dev, int *part, void *data)  	if (unit[drive].type->code == FD_NODRIVE)  		return NULL;  	*part = 0; -	return get_disk(unit[drive].gendisk); +	return get_disk_and_module(unit[drive].gendisk);  }  static int __init amiga_floppy_probe(struct platform_device *pdev) diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c index 8bc3b9fd8dd2..dfb2c2622e5a 100644 --- a/drivers/block/ataflop.c +++ b/drivers/block/ataflop.c @@ -1917,7 +1917,7 @@ static struct kobject *floppy_find(dev_t dev, int *part, void *data)  	if (drive >= FD_MAX_UNITS || type > NUM_DISK_MINORS)  		return NULL;  	*part = 0; -	return get_disk(unit[drive].disk); +	return get_disk_and_module(unit[drive].disk);  }  static int __init atari_floppy_init (void) diff --git a/drivers/block/brd.c b/drivers/block/brd.c index 8028a3a7e7fd..deea78e485da 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c @@ -456,7 +456,7 @@ static struct kobject *brd_probe(dev_t dev, int *part, void *data)  	mutex_lock(&brd_devices_mutex);  	brd = brd_init_one(MINOR(dev) / max_part, &new); -	kobj = brd ? get_disk(brd->brd_disk) : NULL; +	kobj = brd ? get_disk_and_module(brd->brd_disk) : NULL;  	mutex_unlock(&brd_devices_mutex);  	if (new) diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index eae484acfbbc..8ec7235fc93b 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -4505,7 +4505,7 @@ static struct kobject *floppy_find(dev_t dev, int *part, void *data)  	if (((*part >> 2) & 0x1f) >= ARRAY_SIZE(floppy_type))  		return NULL;  	*part = 0; -	return get_disk(disks[drive]); +	return get_disk_and_module(disks[drive]);  }  static int __init do_floppy_init(void) diff --git a/drivers/block/loop.c b/drivers/block/loop.c index d5fe720cf149..ee62d2d517bf 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -266,7 +266,7 @@ static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos)  	struct iov_iter i;  	ssize_t bw; -	iov_iter_bvec(&i, ITER_BVEC, bvec, 1, bvec->bv_len); +	iov_iter_bvec(&i, ITER_BVEC | WRITE, bvec, 1, bvec->bv_len);  	file_start_write(file);  	bw = vfs_iter_write(file, &i, ppos, 0); @@ -1922,7 +1922,7 @@ static struct kobject *loop_probe(dev_t dev, int *part, void *data)  	if (err < 0)  		kobj = NULL;  	else -		kobj = get_disk(lo->lo_disk); +		kobj = get_disk_and_module(lo->lo_disk);  	mutex_unlock(&loop_index_mutex);  	*part = 0; diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 5f2a4240a204..86258b00a1d4 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -1591,7 +1591,7 @@ again:  			if (new_index < 0) {  				mutex_unlock(&nbd_index_mutex);  				printk(KERN_ERR "nbd: failed to add new device\n"); -				return ret; +				return new_index;  			}  			nbd = idr_find(&nbd_index_idr, new_index);  		} diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 531a0915066b..c61d20c9f3f8 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -1122,7 +1122,7 @@ static int pkt_start_recovery(struct packet_data *pkt)  	pkt->sector = new_sector;  	bio_reset(pkt->bio); -	bio_set_set(pkt->bio, pd->bdev); +	bio_set_dev(pkt->bio, pd->bdev);  	bio_set_op_attrs(pkt->bio, REQ_OP_WRITE, 0);  	pkt->bio->bi_iter.bi_sector = new_sector;  	pkt->bio->bi_iter.bi_size = pkt->frames * CD_FRAMESIZE; diff --git a/drivers/block/swim.c b/drivers/block/swim.c index 84434d3ea19b..64e066eba72e 100644 --- a/drivers/block/swim.c +++ b/drivers/block/swim.c @@ -799,7 +799,7 @@ static struct kobject *floppy_find(dev_t dev, int *part, void *data)  		return NULL;  	*part = 0; -	return get_disk(swd->unit[drive].disk); +	return get_disk_and_module(swd->unit[drive].disk);  }  static int swim_add_floppy(struct swim_priv *swd, enum drive_location location) diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index e126e4cac2ca..92ec1bbece51 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -262,6 +262,7 @@ static DEFINE_SPINLOCK(minor_lock);  static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo);  static void blkfront_gather_backend_features(struct blkfront_info *info); +static int negotiate_mq(struct blkfront_info *info);  static int get_id_from_freelist(struct blkfront_ring_info *rinfo)  { @@ -1774,11 +1775,18 @@ static int talk_to_blkback(struct xenbus_device *dev,  	unsigned int i, max_page_order;  	unsigned int ring_page_order; +	if (!info) +		return -ENODEV; +  	max_page_order = xenbus_read_unsigned(info->xbdev->otherend,  					      "max-ring-page-order", 0);  	ring_page_order = min(xen_blkif_max_ring_order, max_page_order);  	info->nr_ring_pages = 1 << ring_page_order; +	err = negotiate_mq(info); +	if (err) +		goto destroy_blkring; +  	for (i = 0; i < info->nr_rings; i++) {  		struct blkfront_ring_info *rinfo = &info->rinfo[i]; @@ -1978,11 +1986,6 @@ static int blkfront_probe(struct xenbus_device *dev,  	}  	info->xbdev = dev; -	err = negotiate_mq(info); -	if (err) { -		kfree(info); -		return err; -	}  	mutex_init(&info->mutex);  	info->vdevice = vdevice; @@ -2099,10 +2102,6 @@ static int blkfront_resume(struct xenbus_device *dev)  	blkif_free(info, info->connected == BLKIF_STATE_CONNECTED); -	err = negotiate_mq(info); -	if (err) -		return err; -  	err = talk_to_blkback(dev, info);  	if (!err)  		blk_mq_update_nr_hw_queues(&info->tag_set, info->nr_rings); diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c index 41c95c9b2ab4..8f9130ab5887 100644 --- a/drivers/block/z2ram.c +++ b/drivers/block/z2ram.c @@ -332,7 +332,7 @@ static const struct block_device_operations z2_fops =  static struct kobject *z2_find(dev_t dev, int *part, void *data)  {  	*part = 0; -	return get_disk(z2ram_gendisk); +	return get_disk_and_module(z2ram_gendisk);  }  static struct request_queue *z2_queue; diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 2a55380ad730..366a49c7c08f 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -21,6 +21,7 @@   *   */ +#include <linux/dmi.h>  #include <linux/module.h>  #include <linux/usb.h>  #include <linux/usb/quirks.h> @@ -230,7 +231,6 @@ static const struct usb_device_id blacklist_table[] = {  	{ USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 },  	{ USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 },  	{ USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 }, -	{ USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },  	{ USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },  	{ USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },  	{ USB_DEVICE(0x0cf3, 0x311e), .driver_info = BTUSB_ATH3012 }, @@ -263,6 +263,7 @@ static const struct usb_device_id blacklist_table[] = {  	{ USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 },  	/* QCA ROME chipset */ +	{ USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_QCA_ROME },  	{ USB_DEVICE(0x0cf3, 0xe007), .driver_info = BTUSB_QCA_ROME },  	{ USB_DEVICE(0x0cf3, 0xe009), .driver_info = BTUSB_QCA_ROME },  	{ USB_DEVICE(0x0cf3, 0xe010), .driver_info = BTUSB_QCA_ROME }, @@ -379,6 +380,21 @@ static const struct usb_device_id blacklist_table[] = {  	{ }	/* Terminating entry */  }; +/* The Bluetooth USB module build into some devices needs to be reset on resume, + * this is a problem with the platform (likely shutting off all power) not with + * the module itself. So we use a DMI list to match known broken platforms. + */ +static const struct dmi_system_id btusb_needs_reset_resume_table[] = { +	{ +		/* Dell OptiPlex 3060 (QCA ROME device 0cf3:e007) */ +		.matches = { +			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), +			DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 3060"), +		}, +	}, +	{} +}; +  #define BTUSB_MAX_ISOC_FRAMES	10  #define BTUSB_INTR_RUNNING	0 @@ -2945,6 +2961,9 @@ static int btusb_probe(struct usb_interface *intf,  	hdev->send   = btusb_send_frame;  	hdev->notify = btusb_notify; +	if (dmi_check_system(btusb_needs_reset_resume_table)) +		interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME; +  #ifdef CONFIG_PM  	err = btusb_config_oob_wake(hdev);  	if (err) @@ -3031,12 +3050,6 @@ static int btusb_probe(struct usb_interface *intf,  	if (id->driver_info & BTUSB_QCA_ROME) {  		data->setup_on_usb = btusb_setup_qca;  		hdev->set_bdaddr = btusb_set_bdaddr_ath3012; - -		/* QCA Rome devices lose their updated firmware over suspend, -		 * but the USB hub doesn't notice any status change. -		 * explicitly request a device reset on resume. -		 */ -		interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME;  	}  #ifdef CONFIG_BT_HCIBTUSB_RTL diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c index 0438a64b8185..40b9fb247010 100644 --- a/drivers/bluetooth/hci_bcm.c +++ b/drivers/bluetooth/hci_bcm.c @@ -244,7 +244,9 @@ static irqreturn_t bcm_host_wake(int irq, void *data)  	bt_dev_dbg(bdev, "Host wake IRQ"); -	pm_request_resume(bdev->dev); +	pm_runtime_get(bdev->dev); +	pm_runtime_mark_last_busy(bdev->dev); +	pm_runtime_put_autosuspend(bdev->dev);  	return IRQ_HANDLED;  } @@ -301,7 +303,7 @@ static const struct bcm_set_sleep_mode default_sleep_params = {  	.usb_auto_sleep = 0,  	.usb_resume_timeout = 0,  	.break_to_host = 0, -	.pulsed_host_wake = 0, +	.pulsed_host_wake = 1,  };  static int bcm_setup_sleep(struct hci_uart *hu) @@ -586,8 +588,11 @@ static int bcm_recv(struct hci_uart *hu, const void *data, int count)  	} else if (!bcm->rx_skb) {  		/* Delay auto-suspend when receiving completed packet */  		mutex_lock(&bcm_device_lock); -		if (bcm->dev && bcm_device_exists(bcm->dev)) -			pm_request_resume(bcm->dev->dev); +		if (bcm->dev && bcm_device_exists(bcm->dev)) { +			pm_runtime_get(bcm->dev->dev); +			pm_runtime_mark_last_busy(bcm->dev->dev); +			pm_runtime_put_autosuspend(bcm->dev->dev); +		}  		mutex_unlock(&bcm_device_lock);  	} @@ -922,12 +927,13 @@ static int bcm_get_resources(struct bcm_device *dev)  	dev->clk = devm_clk_get(dev->dev, NULL); -	dev->device_wakeup = devm_gpiod_get(dev->dev, "device-wakeup", -					    GPIOD_OUT_LOW); +	dev->device_wakeup = devm_gpiod_get_optional(dev->dev, "device-wakeup", +						     GPIOD_OUT_LOW);  	if (IS_ERR(dev->device_wakeup))  		return PTR_ERR(dev->device_wakeup); -	dev->shutdown = devm_gpiod_get(dev->dev, "shutdown", GPIOD_OUT_LOW); +	dev->shutdown = devm_gpiod_get_optional(dev->dev, "shutdown", +						GPIOD_OUT_LOW);  	if (IS_ERR(dev->shutdown))  		return PTR_ERR(dev->shutdown); diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index 4d46003c46cf..cdaeeea7999c 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c @@ -630,7 +630,7 @@ static int sysc_init_dts_quirks(struct sysc *ddata)  	for (i = 0; i < ARRAY_SIZE(sysc_dts_quirks); i++) {  		prop = of_get_property(np, sysc_dts_quirks[i].name, &len);  		if (!prop) -			break; +			continue;  		ddata->cfg.quirks |= sysc_dts_quirks[i].mask;  	} diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c index d1f5bb534e0e..6e9df558325b 100644 --- a/drivers/char/hw_random/via-rng.c +++ b/drivers/char/hw_random/via-rng.c @@ -162,7 +162,7 @@ static int via_rng_init(struct hwrng *rng)  	/* Enable secondary noise source on CPUs where it is present. */  	/* Nehemiah stepping 8 and higher */ -	if ((c->x86_model == 9) && (c->x86_mask > 7)) +	if ((c->x86_model == 9) && (c->x86_stepping > 7))  		lo |= VIA_NOISESRC2;  	/* Esther */ diff --git a/drivers/char/tpm/st33zp24/st33zp24.c b/drivers/char/tpm/st33zp24/st33zp24.c index 4d1dc8b46877..f95b9c75175b 100644 --- a/drivers/char/tpm/st33zp24/st33zp24.c +++ b/drivers/char/tpm/st33zp24/st33zp24.c @@ -457,7 +457,7 @@ static int st33zp24_recv(struct tpm_chip *chip, unsigned char *buf,  			    size_t count)  {  	int size = 0; -	int expected; +	u32 expected;  	if (!chip)  		return -EBUSY; @@ -474,7 +474,7 @@ static int st33zp24_recv(struct tpm_chip *chip, unsigned char *buf,  	}  	expected = be32_to_cpu(*(__be32 *)(buf + 2)); -	if (expected > count) { +	if (expected > count || expected < TPM_HEADER_SIZE) {  		size = -EIO;  		goto out;  	} diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c index 76df4fbcf089..9e80a953d693 100644 --- a/drivers/char/tpm/tpm-interface.c +++ b/drivers/char/tpm/tpm-interface.c @@ -1190,6 +1190,10 @@ int tpm_get_random(struct tpm_chip *chip, u8 *out, size_t max)  			break;  		recd = be32_to_cpu(tpm_cmd.params.getrandom_out.rng_data_len); +		if (recd > num_bytes) { +			total = -EFAULT; +			break; +		}  		rlength = be32_to_cpu(tpm_cmd.header.out.length);  		if (rlength < offsetof(struct tpm_getrandom_out, rng_data) + diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c index c17e75348a99..a700f8f9ead7 100644 --- a/drivers/char/tpm/tpm2-cmd.c +++ b/drivers/char/tpm/tpm2-cmd.c @@ -683,6 +683,10 @@ static int tpm2_unseal_cmd(struct tpm_chip *chip,  	if (!rc) {  		data_len = be16_to_cpup(  			(__be16 *) &buf.data[TPM_HEADER_SIZE + 4]); +		if (data_len < MIN_KEY_SIZE ||  data_len > MAX_KEY_SIZE + 1) { +			rc = -EFAULT; +			goto out; +		}  		rlength = be32_to_cpu(((struct tpm2_cmd *)&buf)  					->header.out.length); diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c index c1dd39eaaeeb..6116cd05e228 100644 --- a/drivers/char/tpm/tpm_i2c_infineon.c +++ b/drivers/char/tpm/tpm_i2c_infineon.c @@ -473,7 +473,8 @@ static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)  static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count)  {  	int size = 0; -	int expected, status; +	int status; +	u32 expected;  	if (count < TPM_HEADER_SIZE) {  		size = -EIO; @@ -488,7 +489,7 @@ static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count)  	}  	expected = be32_to_cpu(*(__be32 *)(buf + 2)); -	if ((size_t) expected > count) { +	if (((size_t) expected > count) || (expected < TPM_HEADER_SIZE)) {  		size = -EIO;  		goto out;  	} diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c index c6428771841f..caa86b19c76d 100644 --- a/drivers/char/tpm/tpm_i2c_nuvoton.c +++ b/drivers/char/tpm/tpm_i2c_nuvoton.c @@ -281,7 +281,11 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count)  	struct device *dev = chip->dev.parent;  	struct i2c_client *client = to_i2c_client(dev);  	s32 rc; -	int expected, status, burst_count, retries, size = 0; +	int status; +	int burst_count; +	int retries; +	int size = 0; +	u32 expected;  	if (count < TPM_HEADER_SIZE) {  		i2c_nuvoton_ready(chip);    /* return to idle */ @@ -323,7 +327,7 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count)  		 * to machine native  		 */  		expected = be32_to_cpu(*(__be32 *) (buf + 2)); -		if (expected > count) { +		if (expected > count || expected < size) {  			dev_err(dev, "%s() expected > count\n", __func__);  			size = -EIO;  			continue; diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c index 183a5f54d875..da074e3db19b 100644 --- a/drivers/char/tpm/tpm_tis_core.c +++ b/drivers/char/tpm/tpm_tis_core.c @@ -270,7 +270,8 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)  {  	struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);  	int size = 0; -	int expected, status; +	int status; +	u32 expected;  	if (count < TPM_HEADER_SIZE) {  		size = -EIO; @@ -285,7 +286,7 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)  	}  	expected = be32_to_cpu(*(__be32 *) (buf + 2)); -	if (expected > count) { +	if (expected > count || expected < TPM_HEADER_SIZE) {  		size = -EIO;  		goto out;  	} diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c index 44301a3d9963..a07f6451694a 100644 --- a/drivers/clk/bcm/clk-bcm2835.c +++ b/drivers/clk/bcm/clk-bcm2835.c @@ -449,17 +449,17 @@ struct bcm2835_pll_ana_bits {  static const struct bcm2835_pll_ana_bits bcm2835_ana_default = {  	.mask0 = 0,  	.set0 = 0, -	.mask1 = (u32)~(A2W_PLL_KI_MASK | A2W_PLL_KP_MASK), +	.mask1 = A2W_PLL_KI_MASK | A2W_PLL_KP_MASK,  	.set1 = (2 << A2W_PLL_KI_SHIFT) | (8 << A2W_PLL_KP_SHIFT), -	.mask3 = (u32)~A2W_PLL_KA_MASK, +	.mask3 = A2W_PLL_KA_MASK,  	.set3 = (2 << A2W_PLL_KA_SHIFT),  	.fb_prediv_mask = BIT(14),  };  static const struct bcm2835_pll_ana_bits bcm2835_ana_pllh = { -	.mask0 = (u32)~(A2W_PLLH_KA_MASK | A2W_PLLH_KI_LOW_MASK), +	.mask0 = A2W_PLLH_KA_MASK | A2W_PLLH_KI_LOW_MASK,  	.set0 = (2 << A2W_PLLH_KA_SHIFT) | (2 << A2W_PLLH_KI_LOW_SHIFT), -	.mask1 = (u32)~(A2W_PLLH_KI_HIGH_MASK | A2W_PLLH_KP_MASK), +	.mask1 = A2W_PLLH_KI_HIGH_MASK | A2W_PLLH_KP_MASK,  	.set1 = (6 << A2W_PLLH_KP_SHIFT),  	.mask3 = 0,  	.set3 = 0, @@ -623,8 +623,10 @@ static int bcm2835_pll_on(struct clk_hw *hw)  		     ~A2W_PLL_CTRL_PWRDN);  	/* Take the PLL out of reset. */ +	spin_lock(&cprman->regs_lock);  	cprman_write(cprman, data->cm_ctrl_reg,  		     cprman_read(cprman, data->cm_ctrl_reg) & ~CM_PLL_ANARST); +	spin_unlock(&cprman->regs_lock);  	/* Wait for the PLL to lock. */  	timeout = ktime_add_ns(ktime_get(), LOCK_TIMEOUT_NS); @@ -701,9 +703,11 @@ static int bcm2835_pll_set_rate(struct clk_hw *hw,  	}  	/* Unmask the reference clock from the oscillator. */ +	spin_lock(&cprman->regs_lock);  	cprman_write(cprman, A2W_XOSC_CTRL,  		     cprman_read(cprman, A2W_XOSC_CTRL) |  		     data->reference_enable_mask); +	spin_unlock(&cprman->regs_lock);  	if (do_ana_setup_first)  		bcm2835_pll_write_ana(cprman, data->ana_reg_base, ana); diff --git a/drivers/clk/clk-aspeed.c b/drivers/clk/clk-aspeed.c index 9f7f931d6b2f..5eb50c31e455 100644 --- a/drivers/clk/clk-aspeed.c +++ b/drivers/clk/clk-aspeed.c @@ -205,6 +205,18 @@ static const struct aspeed_clk_soc_data ast2400_data = {  	.calc_pll = aspeed_ast2400_calc_pll,  }; +static int aspeed_clk_is_enabled(struct clk_hw *hw) +{ +	struct aspeed_clk_gate *gate = to_aspeed_clk_gate(hw); +	u32 clk = BIT(gate->clock_idx); +	u32 enval = (gate->flags & CLK_GATE_SET_TO_DISABLE) ? 0 : clk; +	u32 reg; + +	regmap_read(gate->map, ASPEED_CLK_STOP_CTRL, ®); + +	return ((reg & clk) == enval) ? 1 : 0; +} +  static int aspeed_clk_enable(struct clk_hw *hw)  {  	struct aspeed_clk_gate *gate = to_aspeed_clk_gate(hw); @@ -215,6 +227,11 @@ static int aspeed_clk_enable(struct clk_hw *hw)  	spin_lock_irqsave(gate->lock, flags); +	if (aspeed_clk_is_enabled(hw)) { +		spin_unlock_irqrestore(gate->lock, flags); +		return 0; +	} +  	if (gate->reset_idx >= 0) {  		/* Put IP in reset */  		regmap_update_bits(gate->map, ASPEED_RESET_CTRL, rst, rst); @@ -255,17 +272,6 @@ static void aspeed_clk_disable(struct clk_hw *hw)  	spin_unlock_irqrestore(gate->lock, flags);  } -static int aspeed_clk_is_enabled(struct clk_hw *hw) -{ -	struct aspeed_clk_gate *gate = to_aspeed_clk_gate(hw); -	u32 clk = BIT(gate->clock_idx); -	u32 reg; - -	regmap_read(gate->map, ASPEED_CLK_STOP_CTRL, ®); - -	return (reg & clk) ? 0 : 1; -} -  static const struct clk_ops aspeed_clk_gate_ops = {  	.enable = aspeed_clk_enable,  	.disable = aspeed_clk_disable, diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 0f686a9dac3e..076d4244d672 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -1125,8 +1125,10 @@ static int clk_core_round_rate_nolock(struct clk_core *core,  {  	lockdep_assert_held(&prepare_lock); -	if (!core) +	if (!core) { +		req->rate = 0;  		return 0; +	}  	clk_core_init_rate_req(core, req); @@ -2309,8 +2311,11 @@ static int clk_core_set_phase_nolock(struct clk_core *core, int degrees)  	trace_clk_set_phase(core, degrees); -	if (core->ops->set_phase) +	if (core->ops->set_phase) {  		ret = core->ops->set_phase(core->hw, degrees); +		if (!ret) +			core->phase = degrees; +	}  	trace_clk_set_phase_complete(core, degrees); @@ -2968,22 +2973,37 @@ static int __clk_core_init(struct clk_core *core)  	core->rate = core->req_rate = rate;  	/* +	 * Enable CLK_IS_CRITICAL clocks so newly added critical clocks +	 * don't get accidentally disabled when walking the orphan tree and +	 * reparenting clocks +	 */ +	if (core->flags & CLK_IS_CRITICAL) { +		unsigned long flags; + +		clk_core_prepare(core); + +		flags = clk_enable_lock(); +		clk_core_enable(core); +		clk_enable_unlock(flags); +	} + +	/*  	 * walk the list of orphan clocks and reparent any that newly finds a  	 * parent.  	 */  	hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {  		struct clk_core *parent = __clk_init_parent(orphan); -		unsigned long flags;  		/* -		 * we could call __clk_set_parent, but that would result in a -		 * redundant call to the .set_rate op, if it exists +		 * We need to use __clk_set_parent_before() and _after() to +		 * to properly migrate any prepare/enable count of the orphan +		 * clock. This is important for CLK_IS_CRITICAL clocks, which +		 * are enabled during init but might not have a parent yet.  		 */  		if (parent) {  			/* update the clk tree topology */ -			flags = clk_enable_lock(); -			clk_reparent(orphan, parent); -			clk_enable_unlock(flags); +			__clk_set_parent_before(orphan, parent); +			__clk_set_parent_after(orphan, parent, NULL);  			__clk_recalc_accuracies(orphan);  			__clk_recalc_rates(orphan, 0);  		} @@ -3000,16 +3020,6 @@ static int __clk_core_init(struct clk_core *core)  	if (core->ops->init)  		core->ops->init(core->hw); -	if (core->flags & CLK_IS_CRITICAL) { -		unsigned long flags; - -		clk_core_prepare(core); - -		flags = clk_enable_lock(); -		clk_core_enable(core); -		clk_enable_unlock(flags); -	} -  	kref_init(&core->ref);  out:  	clk_pm_runtime_put(core); diff --git a/drivers/clk/hisilicon/clk-hi3660-stub.c b/drivers/clk/hisilicon/clk-hi3660-stub.c index 9b6c72bbddf9..e8b2c43b1bb8 100644 --- a/drivers/clk/hisilicon/clk-hi3660-stub.c +++ b/drivers/clk/hisilicon/clk-hi3660-stub.c @@ -149,6 +149,8 @@ static int hi3660_stub_clk_probe(struct platform_device *pdev)  		return PTR_ERR(stub_clk_chan.mbox);  	res = platform_get_resource(pdev, IORESOURCE_MEM, 0); +	if (!res) +		return -EINVAL;  	freq_reg = devm_ioremap(dev, res->start, resource_size(res));  	if (!freq_reg)  		return -ENOMEM; diff --git a/drivers/clk/imx/clk-imx51-imx53.c b/drivers/clk/imx/clk-imx51-imx53.c index c864992e6983..caa8bd40692c 100644 --- a/drivers/clk/imx/clk-imx51-imx53.c +++ b/drivers/clk/imx/clk-imx51-imx53.c @@ -131,7 +131,17 @@ static const char *ieee1588_sels[] = { "pll3_sw", "pll4_sw", "dummy" /* usbphy2_  static struct clk *clk[IMX5_CLK_END];  static struct clk_onecell_data clk_data; -static struct clk ** const uart_clks[] __initconst = { +static struct clk ** const uart_clks_mx51[] __initconst = { +	&clk[IMX5_CLK_UART1_IPG_GATE], +	&clk[IMX5_CLK_UART1_PER_GATE], +	&clk[IMX5_CLK_UART2_IPG_GATE], +	&clk[IMX5_CLK_UART2_PER_GATE], +	&clk[IMX5_CLK_UART3_IPG_GATE], +	&clk[IMX5_CLK_UART3_PER_GATE], +	NULL +}; + +static struct clk ** const uart_clks_mx50_mx53[] __initconst = {  	&clk[IMX5_CLK_UART1_IPG_GATE],  	&clk[IMX5_CLK_UART1_PER_GATE],  	&clk[IMX5_CLK_UART2_IPG_GATE], @@ -321,8 +331,6 @@ static void __init mx5_clocks_common_init(void __iomem *ccm_base)  	clk_prepare_enable(clk[IMX5_CLK_TMAX1]);  	clk_prepare_enable(clk[IMX5_CLK_TMAX2]); /* esdhc2, fec */  	clk_prepare_enable(clk[IMX5_CLK_TMAX3]); /* esdhc1, esdhc4 */ - -	imx_register_uart_clocks(uart_clks);  }  static void __init mx50_clocks_init(struct device_node *np) @@ -388,6 +396,8 @@ static void __init mx50_clocks_init(struct device_node *np)  	r = clk_round_rate(clk[IMX5_CLK_USBOH3_PER_GATE], 54000000);  	clk_set_rate(clk[IMX5_CLK_USBOH3_PER_GATE], r); + +	imx_register_uart_clocks(uart_clks_mx50_mx53);  }  CLK_OF_DECLARE(imx50_ccm, "fsl,imx50-ccm", mx50_clocks_init); @@ -477,6 +487,8 @@ static void __init mx51_clocks_init(struct device_node *np)  	val = readl(MXC_CCM_CLPCR);  	val |= 1 << 23;  	writel(val, MXC_CCM_CLPCR); + +	imx_register_uart_clocks(uart_clks_mx51);  }  CLK_OF_DECLARE(imx51_ccm, "fsl,imx51-ccm", mx51_clocks_init); @@ -606,5 +618,7 @@ static void __init mx53_clocks_init(struct device_node *np)  	r = clk_round_rate(clk[IMX5_CLK_USBOH3_PER_GATE], 54000000);  	clk_set_rate(clk[IMX5_CLK_USBOH3_PER_GATE], r); + +	imx_register_uart_clocks(uart_clks_mx50_mx53);  }  CLK_OF_DECLARE(imx53_ccm, "fsl,imx53-ccm", mx53_clocks_init); diff --git a/drivers/clk/qcom/apcs-msm8916.c b/drivers/clk/qcom/apcs-msm8916.c index 246957f1a413..b1cc8dbcd327 100644 --- a/drivers/clk/qcom/apcs-msm8916.c +++ b/drivers/clk/qcom/apcs-msm8916.c @@ -49,11 +49,10 @@ static int qcom_apcs_msm8916_clk_probe(struct platform_device *pdev)  	struct clk_regmap_mux_div *a53cc;  	struct regmap *regmap;  	struct clk_init_data init = { }; -	int ret; +	int ret = -ENODEV;  	regmap = dev_get_regmap(parent, NULL); -	if (IS_ERR(regmap)) { -		ret = PTR_ERR(regmap); +	if (!regmap) {  		dev_err(dev, "failed to get regmap: %d\n", ret);  		return ret;  	} diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c index 72b16ed1012b..3b97f60540ad 100644 --- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c +++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c @@ -762,7 +762,7 @@ static struct ccu_mp out_a_clk = {  		.features	= CCU_FEATURE_FIXED_PREDIV,  		.hw.init	= CLK_HW_INIT_PARENTS("out-a",  						      clk_out_parents, -						      &ccu_div_ops, +						      &ccu_mp_ops,  						      0),  	},  }; @@ -783,7 +783,7 @@ static struct ccu_mp out_b_clk = {  		.features	= CCU_FEATURE_FIXED_PREDIV,  		.hw.init	= CLK_HW_INIT_PARENTS("out-b",  						      clk_out_parents, -						      &ccu_div_ops, +						      &ccu_mp_ops,  						      0),  	},  }; @@ -804,7 +804,7 @@ static struct ccu_mp out_c_clk = {  		.features	= CCU_FEATURE_FIXED_PREDIV,  		.hw.init	= CLK_HW_INIT_PARENTS("out-c",  						      clk_out_parents, -						      &ccu_div_ops, +						      &ccu_mp_ops,  						      0),  	},  }; diff --git a/drivers/clk/ti/clk-33xx.c b/drivers/clk/ti/clk-33xx.c index 612491a26070..12e0a2d19911 100644 --- a/drivers/clk/ti/clk-33xx.c +++ b/drivers/clk/ti/clk-33xx.c @@ -45,7 +45,7 @@ static const struct omap_clkctrl_bit_data am3_gpio4_bit_data[] __initconst = {  static const struct omap_clkctrl_reg_data am3_l4_per_clkctrl_regs[] __initconst = {  	{ AM3_CPGMAC0_CLKCTRL, NULL, CLKF_SW_SUP, "cpsw_125mhz_gclk", "cpsw_125mhz_clkdm" }, -	{ AM3_LCDC_CLKCTRL, NULL, CLKF_SW_SUP, "lcd_gclk", "lcdc_clkdm" }, +	{ AM3_LCDC_CLKCTRL, NULL, CLKF_SW_SUP | CLKF_SET_RATE_PARENT, "lcd_gclk", "lcdc_clkdm" },  	{ AM3_USB_OTG_HS_CLKCTRL, NULL, CLKF_SW_SUP, "usbotg_fck", "l3s_clkdm" },  	{ AM3_TPTC0_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" },  	{ AM3_EMIF_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_ddr_m2_div2_ck", "l3_clkdm" }, diff --git a/drivers/clk/ti/clk-43xx.c b/drivers/clk/ti/clk-43xx.c index 2b7c2e017665..63c5ddb50187 100644 --- a/drivers/clk/ti/clk-43xx.c +++ b/drivers/clk/ti/clk-43xx.c @@ -187,7 +187,7 @@ static const struct omap_clkctrl_reg_data am4_l4_per_clkctrl_regs[] __initconst  	{ AM4_OCP2SCP0_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },  	{ AM4_OCP2SCP1_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },  	{ AM4_EMIF_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_ddr_m2_ck", "emif_clkdm" }, -	{ AM4_DSS_CORE_CLKCTRL, NULL, CLKF_SW_SUP, "disp_clk", "dss_clkdm" }, +	{ AM4_DSS_CORE_CLKCTRL, NULL, CLKF_SW_SUP | CLKF_SET_RATE_PARENT, "disp_clk", "dss_clkdm" },  	{ AM4_CPGMAC0_CLKCTRL, NULL, CLKF_SW_SUP, "cpsw_125mhz_gclk", "cpsw_125mhz_clkdm" },  	{ 0 },  }; diff --git a/drivers/clk/ti/clkctrl.c b/drivers/clk/ti/clkctrl.c index afa0d6bfc5c1..421b05392220 100644 --- a/drivers/clk/ti/clkctrl.c +++ b/drivers/clk/ti/clkctrl.c @@ -537,6 +537,8 @@ static void __init _ti_omap4_clkctrl_setup(struct device_node *node)  		init.parent_names = ®_data->parent;  		init.num_parents = 1;  		init.flags = 0; +		if (reg_data->flags & CLKF_SET_RATE_PARENT) +			init.flags |= CLK_SET_RATE_PARENT;  		init.name = kasprintf(GFP_KERNEL, "%s:%s:%04x:%d",  				      node->parent->name, node->name,  				      reg_data->offset, 0); diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index b3b4ed9b6874..d2e5382821a4 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig @@ -386,6 +386,7 @@ config ATMEL_PIT  config ATMEL_ST  	bool "Atmel ST timer support" if COMPILE_TEST +	depends on HAS_IOMEM  	select TIMER_OF  	select MFD_SYSCON  	help diff --git a/drivers/clocksource/arc_timer.c b/drivers/clocksource/arc_timer.c index 4927355f9cbe..471b428d8034 100644 --- a/drivers/clocksource/arc_timer.c +++ b/drivers/clocksource/arc_timer.c @@ -251,9 +251,14 @@ static irqreturn_t timer_irq_handler(int irq, void *dev_id)  	int irq_reenable = clockevent_state_periodic(evt);  	/* -	 * Any write to CTRL reg ACks the interrupt, we rewrite the -	 * Count when [N]ot [H]alted bit. -	 * And re-arm it if perioid by [I]nterrupt [E]nable bit +	 * 1. ACK the interrupt +	 *    - For ARC700, any write to CTRL reg ACKs it, so just rewrite +	 *      Count when [N]ot [H]alted bit. +	 *    - For HS3x, it is a bit subtle. On taken count-down interrupt, +	 *      IP bit [3] is set, which needs to be cleared for ACK'ing. +	 *      The write below can only update the other two bits, hence +	 *      explicitly clears IP bit +	 * 2. Re-arm interrupt if periodic by writing to IE bit [0]  	 */  	write_aux_reg(ARC_REG_TIMER0_CTRL, irq_reenable | TIMER_CTRL_NH); diff --git a/drivers/clocksource/fsl_ftm_timer.c b/drivers/clocksource/fsl_ftm_timer.c index 3ee7e6fea621..846d18daf893 100644 --- a/drivers/clocksource/fsl_ftm_timer.c +++ b/drivers/clocksource/fsl_ftm_timer.c @@ -281,7 +281,7 @@ static int __init __ftm_clk_init(struct device_node *np, char *cnt_name,  static unsigned long __init ftm_clk_init(struct device_node *np)  { -	unsigned long freq; +	long freq;  	freq = __ftm_clk_init(np, "ftm-evt-counter-en", "ftm-evt");  	if (freq <= 0) diff --git a/drivers/clocksource/mips-gic-timer.c b/drivers/clocksource/mips-gic-timer.c index a04808a21d4e..986b6796b631 100644 --- a/drivers/clocksource/mips-gic-timer.c +++ b/drivers/clocksource/mips-gic-timer.c @@ -166,7 +166,7 @@ static int __init __gic_clocksource_init(void)  	/* Set clocksource mask. */  	count_width = read_gic_config() & GIC_CONFIG_COUNTBITS; -	count_width >>= __fls(GIC_CONFIG_COUNTBITS); +	count_width >>= __ffs(GIC_CONFIG_COUNTBITS);  	count_width *= 4;  	count_width += 32;  	gic_clocksource.mask = CLOCKSOURCE_MASK(count_width); @@ -205,12 +205,12 @@ static int __init gic_clocksource_of_init(struct device_node *node)  	} else if (of_property_read_u32(node, "clock-frequency",  					&gic_frequency)) {  		pr_err("GIC frequency not specified.\n"); -		return -EINVAL;; +		return -EINVAL;  	}  	gic_timer_irq = irq_of_parse_and_map(node, 0);  	if (!gic_timer_irq) {  		pr_err("GIC timer IRQ not specified.\n"); -		return -EINVAL;; +		return -EINVAL;  	}  	ret = __gic_clocksource_init(); diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c index 2a3fe83ec337..3b56ea3f52af 100644 --- a/drivers/clocksource/timer-sun5i.c +++ b/drivers/clocksource/timer-sun5i.c @@ -334,7 +334,7 @@ static int __init sun5i_timer_init(struct device_node *node)  	timer_base = of_io_request_and_map(node, 0, of_node_full_name(node));  	if (IS_ERR(timer_base)) {  		pr_err("Can't map registers\n"); -		return PTR_ERR(timer_base);; +		return PTR_ERR(timer_base);  	}  	irq = irq_of_parse_and_map(node, 0); diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index 3a88e33b0cfe..fb586e09682d 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm @@ -44,10 +44,10 @@ config ARM_DT_BL_CPUFREQ  config ARM_SCPI_CPUFREQ  	tristate "SCPI based CPUfreq driver" -	depends on ARM_BIG_LITTLE_CPUFREQ && ARM_SCPI_PROTOCOL && COMMON_CLK_SCPI +	depends on ARM_SCPI_PROTOCOL && COMMON_CLK_SCPI  	help -	  This adds the CPUfreq driver support for ARM big.LITTLE platforms -	  using SCPI protocol for CPU power management. +	  This adds the CPUfreq driver support for ARM platforms using SCPI +	  protocol for CPU power management.  	  This driver uses SCPI Message Protocol driver to interact with the  	  firmware providing the CPU DVFS functionality. diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index 3a2ca0f79daf..d0c34df0529c 100644 --- a/drivers/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c @@ -629,7 +629,7 @@ static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c)  	if (c->x86_vendor == X86_VENDOR_INTEL) {  		if ((c->x86 == 15) &&  		    (c->x86_model == 6) && -		    (c->x86_mask == 8)) { +		    (c->x86_stepping == 8)) {  			pr_info("Intel(R) Xeon(R) 7100 Errata AL30, processors may lock up on frequency changes: disabling acpi-cpufreq\n");  			return -ENODEV;  		    } diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c index 942632a27b50..f730b6528c18 100644 --- a/drivers/cpufreq/longhaul.c +++ b/drivers/cpufreq/longhaul.c @@ -775,7 +775,7 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy)  		break;  	case 7: -		switch (c->x86_mask) { +		switch (c->x86_stepping) {  		case 0:  			longhaul_version = TYPE_LONGHAUL_V1;  			cpu_model = CPU_SAMUEL2; @@ -787,7 +787,7 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy)  			break;  		case 1 ... 15:  			longhaul_version = TYPE_LONGHAUL_V2; -			if (c->x86_mask < 8) { +			if (c->x86_stepping < 8) {  				cpu_model = CPU_SAMUEL2;  				cpuname = "C3 'Samuel 2' [C5B]";  			} else { @@ -814,7 +814,7 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy)  		numscales = 32;  		memcpy(mults, nehemiah_mults, sizeof(nehemiah_mults));  		memcpy(eblcr, nehemiah_eblcr, sizeof(nehemiah_eblcr)); -		switch (c->x86_mask) { +		switch (c->x86_stepping) {  		case 0 ... 1:  			cpu_model = CPU_NEHEMIAH;  			cpuname = "C3 'Nehemiah A' [C5XLOE]"; diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c index fd77812313f3..a25741b1281b 100644 --- a/drivers/cpufreq/p4-clockmod.c +++ b/drivers/cpufreq/p4-clockmod.c @@ -168,7 +168,7 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)  #endif  	/* Errata workaround */ -	cpuid = (c->x86 << 8) | (c->x86_model << 4) | c->x86_mask; +	cpuid = (c->x86 << 8) | (c->x86_model << 4) | c->x86_stepping;  	switch (cpuid) {  	case 0x0f07:  	case 0x0f0a: diff --git a/drivers/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c index 80ac313e6c59..302e9ce793a0 100644 --- a/drivers/cpufreq/powernow-k7.c +++ b/drivers/cpufreq/powernow-k7.c @@ -131,7 +131,7 @@ static int check_powernow(void)  		return 0;  	} -	if ((c->x86_model == 6) && (c->x86_mask == 0)) { +	if ((c->x86_model == 6) && (c->x86_stepping == 0)) {  		pr_info("K7 660[A0] core detected, enabling errata workarounds\n");  		have_a0 = 1;  	} diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c index 7b596fa38ad2..6bebc1f9f55a 100644 --- a/drivers/cpufreq/s3c24xx-cpufreq.c +++ b/drivers/cpufreq/s3c24xx-cpufreq.c @@ -351,7 +351,13 @@ struct clk *s3c_cpufreq_clk_get(struct device *dev, const char *name)  static int s3c_cpufreq_init(struct cpufreq_policy *policy)  {  	policy->clk = clk_arm; -	return cpufreq_generic_init(policy, ftab, cpu_cur.info->latency); + +	policy->cpuinfo.transition_latency = cpu_cur.info->latency; + +	if (ftab) +		return cpufreq_table_validate_and_show(policy, ftab); + +	return 0;  }  static int __init s3c_cpufreq_initclks(void) diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c index c32a833e1b00..d300a163945f 100644 --- a/drivers/cpufreq/scpi-cpufreq.c +++ b/drivers/cpufreq/scpi-cpufreq.c @@ -51,15 +51,23 @@ static unsigned int scpi_cpufreq_get_rate(unsigned int cpu)  static int  scpi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index)  { +	unsigned long freq = policy->freq_table[index].frequency;  	struct scpi_data *priv = policy->driver_data; -	u64 rate = policy->freq_table[index].frequency * 1000; +	u64 rate = freq * 1000;  	int ret;  	ret = clk_set_rate(priv->clk, rate); -	if (!ret && (clk_get_rate(priv->clk) != rate)) -		ret = -EIO; -	return ret; +	if (ret) +		return ret; + +	if (clk_get_rate(priv->clk) != rate) +		return -EIO; + +	arch_set_freq_scale(policy->related_cpus, freq, +			    policy->cpuinfo.max_freq); + +	return 0;  }  static int diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c index 41bc5397f4bb..4fa5adf16c70 100644 --- a/drivers/cpufreq/speedstep-centrino.c +++ b/drivers/cpufreq/speedstep-centrino.c @@ -37,7 +37,7 @@ struct cpu_id  {  	__u8	x86;            /* CPU family */  	__u8	x86_model;	/* model */ -	__u8	x86_mask;	/* stepping */ +	__u8	x86_stepping;	/* stepping */  };  enum { @@ -277,7 +277,7 @@ static int centrino_verify_cpu_id(const struct cpuinfo_x86 *c,  {  	if ((c->x86 == x->x86) &&  	    (c->x86_model == x->x86_model) && -	    (c->x86_mask == x->x86_mask)) +	    (c->x86_stepping == x->x86_stepping))  		return 1;  	return 0;  } diff --git a/drivers/cpufreq/speedstep-lib.c b/drivers/cpufreq/speedstep-lib.c index 8085ec9000d1..e3a9962ee410 100644 --- a/drivers/cpufreq/speedstep-lib.c +++ b/drivers/cpufreq/speedstep-lib.c @@ -272,9 +272,9 @@ unsigned int speedstep_detect_processor(void)  		ebx = cpuid_ebx(0x00000001);  		ebx &= 0x000000FF; -		pr_debug("ebx value is %x, x86_mask is %x\n", ebx, c->x86_mask); +		pr_debug("ebx value is %x, x86_stepping is %x\n", ebx, c->x86_stepping); -		switch (c->x86_mask) { +		switch (c->x86_stepping) {  		case 4:  			/*  			 * B-stepping [M-P4-M] @@ -361,7 +361,7 @@ unsigned int speedstep_detect_processor(void)  				msr_lo, msr_hi);  		if ((msr_hi & (1<<18)) &&  		    (relaxed_check ? 1 : (msr_hi & (3<<24)))) { -			if (c->x86_mask == 0x01) { +			if (c->x86_stepping == 0x01) {  				pr_debug("early PIII version\n");  				return SPEEDSTEP_CPU_PIII_C_EARLY;  			} else diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index 75d280cb2dc0..e843cf410373 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c @@ -228,12 +228,16 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask,  		 * without any error (HW optimizations for later  		 * CAAM eras), then try again.  		 */ +		if (ret) +			break; +  		rdsta_val = rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_IFMASK;  		if ((status && status != JRSTA_SSRC_JUMP_HALT_CC) || -		    !(rdsta_val & (1 << sh_idx))) +		    !(rdsta_val & (1 << sh_idx))) {  			ret = -EAGAIN; -		if (ret)  			break; +		} +  		dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx);  		/* Clear the contents before recreating the descriptor */  		memset(desc, 0x00, CAAM_CMD_SZ * 7); diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c index fcfa5b1eae61..b3afb6cc9d72 100644 --- a/drivers/crypto/ccp/psp-dev.c +++ b/drivers/crypto/ccp/psp-dev.c @@ -211,7 +211,7 @@ static int __sev_platform_shutdown_locked(int *error)  {  	int ret; -	ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, 0, error); +	ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, NULL, error);  	if (ret)  		return ret; @@ -271,7 +271,7 @@ static int sev_ioctl_do_reset(struct sev_issue_cmd *argp)  			return rc;  	} -	return __sev_do_cmd_locked(SEV_CMD_FACTORY_RESET, 0, &argp->error); +	return __sev_do_cmd_locked(SEV_CMD_FACTORY_RESET, NULL, &argp->error);  }  static int sev_ioctl_do_platform_status(struct sev_issue_cmd *argp) @@ -299,7 +299,7 @@ static int sev_ioctl_do_pek_pdh_gen(int cmd, struct sev_issue_cmd *argp)  			return rc;  	} -	return __sev_do_cmd_locked(cmd, 0, &argp->error); +	return __sev_do_cmd_locked(cmd, NULL, &argp->error);  }  static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp) @@ -624,7 +624,7 @@ EXPORT_SYMBOL_GPL(sev_guest_decommission);  int sev_guest_df_flush(int *error)  { -	return sev_do_cmd(SEV_CMD_DF_FLUSH, 0, error); +	return sev_do_cmd(SEV_CMD_DF_FLUSH, NULL, error);  }  EXPORT_SYMBOL_GPL(sev_guest_df_flush); diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index 4b6642a25df5..1c6cbda56afe 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c @@ -512,7 +512,7 @@ static int __init padlock_init(void)  	printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n"); -	if (c->x86 == 6 && c->x86_model == 15 && c->x86_mask == 2) { +	if (c->x86 == 6 && c->x86_model == 15 && c->x86_stepping == 2) {  		ecb_fetch_blocks = MAX_ECB_FETCH_BLOCKS;  		cbc_fetch_blocks = MAX_CBC_FETCH_BLOCKS;  		printk(KERN_NOTICE PFX "VIA Nano stepping 2 detected: enabling workaround.\n"); diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c index 188f44b7eb27..5d64c08b7f47 100644 --- a/drivers/crypto/s5p-sss.c +++ b/drivers/crypto/s5p-sss.c @@ -1922,15 +1922,21 @@ static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)  	uint32_t aes_control;  	unsigned long flags;  	int err; +	u8 *iv;  	aes_control = SSS_AES_KEY_CHANGE_MODE;  	if (mode & FLAGS_AES_DECRYPT)  		aes_control |= SSS_AES_MODE_DECRYPT; -	if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CBC) +	if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CBC) {  		aes_control |= SSS_AES_CHAIN_MODE_CBC; -	else if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CTR) +		iv = req->info; +	} else if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CTR) {  		aes_control |= SSS_AES_CHAIN_MODE_CTR; +		iv = req->info; +	} else { +		iv = NULL; /* AES_ECB */ +	}  	if (dev->ctx->keylen == AES_KEYSIZE_192)  		aes_control |= SSS_AES_KEY_SIZE_192; @@ -1961,7 +1967,7 @@ static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)  		goto outdata_error;  	SSS_AES_WRITE(dev, AES_CONTROL, aes_control); -	s5p_set_aes(dev, dev->ctx->aes_key, req->info, dev->ctx->keylen); +	s5p_set_aes(dev, dev->ctx->aes_key, iv, dev->ctx->keylen);  	s5p_set_dma_indata(dev,  dev->sg_src);  	s5p_set_dma_outdata(dev, dev->sg_dst); diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-prng.c b/drivers/crypto/sunxi-ss/sun4i-ss-prng.c index 0d01d1624252..63d636424161 100644 --- a/drivers/crypto/sunxi-ss/sun4i-ss-prng.c +++ b/drivers/crypto/sunxi-ss/sun4i-ss-prng.c @@ -28,7 +28,7 @@ int sun4i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,  	algt = container_of(alg, struct sun4i_ss_alg_template, alg.rng);  	ss = algt->ss; -	spin_lock(&ss->slock); +	spin_lock_bh(&ss->slock);  	writel(mode, ss->base + SS_CTL); @@ -51,6 +51,6 @@ int sun4i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,  	}  	writel(0, ss->base + SS_CTL); -	spin_unlock(&ss->slock); -	return dlen; +	spin_unlock_bh(&ss->slock); +	return 0;  } diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 9c80e0cb1664..6882fa2f8bad 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -1138,6 +1138,10 @@ static int talitos_sg_map(struct device *dev, struct scatterlist *src,  	struct talitos_private *priv = dev_get_drvdata(dev);  	bool is_sec1 = has_ftr_sec1(priv); +	if (!src) { +		to_talitos_ptr(ptr, 0, 0, is_sec1); +		return 1; +	}  	if (sg_count == 1) {  		to_talitos_ptr(ptr, sg_dma_address(src) + offset, len, is_sec1);  		return sg_count; diff --git a/drivers/dax/super.c b/drivers/dax/super.c index 473af694ad1c..ecdc292aa4e4 100644 --- a/drivers/dax/super.c +++ b/drivers/dax/super.c @@ -246,12 +246,6 @@ long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,  {  	long avail; -	/* -	 * The device driver is allowed to sleep, in order to make the -	 * memory directly accessible. -	 */ -	might_sleep(); -  	if (!dax_dev)  		return -EOPNOTSUPP; diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c index f652a0e0f5a2..3548caa9e933 100644 --- a/drivers/dma/mv_xor_v2.c +++ b/drivers/dma/mv_xor_v2.c @@ -163,6 +163,7 @@ struct mv_xor_v2_device {  	void __iomem *dma_base;  	void __iomem *glob_base;  	struct clk *clk; +	struct clk *reg_clk;  	struct tasklet_struct irq_tasklet;  	struct list_head free_sw_desc;  	struct dma_device dmadev; @@ -749,13 +750,26 @@ static int mv_xor_v2_probe(struct platform_device *pdev)  	if (ret)  		return ret; +	xor_dev->reg_clk = devm_clk_get(&pdev->dev, "reg"); +	if (PTR_ERR(xor_dev->reg_clk) != -ENOENT) { +		if (!IS_ERR(xor_dev->reg_clk)) { +			ret = clk_prepare_enable(xor_dev->reg_clk); +			if (ret) +				return ret; +		} else { +			return PTR_ERR(xor_dev->reg_clk); +		} +	} +  	xor_dev->clk = devm_clk_get(&pdev->dev, NULL); -	if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER) -		return -EPROBE_DEFER; +	if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER) { +		ret = EPROBE_DEFER; +		goto disable_reg_clk; +	}  	if (!IS_ERR(xor_dev->clk)) {  		ret = clk_prepare_enable(xor_dev->clk);  		if (ret) -			return ret; +			goto disable_reg_clk;  	}  	ret = platform_msi_domain_alloc_irqs(&pdev->dev, 1, @@ -866,8 +880,9 @@ free_hw_desq:  free_msi_irqs:  	platform_msi_domain_free_irqs(&pdev->dev);  disable_clk: -	if (!IS_ERR(xor_dev->clk)) -		clk_disable_unprepare(xor_dev->clk); +	clk_disable_unprepare(xor_dev->clk); +disable_reg_clk: +	clk_disable_unprepare(xor_dev->reg_clk);  	return ret;  } diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c index e3ff162c03fc..d0cacdb0713e 100644 --- a/drivers/dma/sh/rcar-dmac.c +++ b/drivers/dma/sh/rcar-dmac.c @@ -917,7 +917,7 @@ rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl,  	rcar_dmac_chan_configure_desc(chan, desc); -	max_chunk_size = (RCAR_DMATCR_MASK + 1) << desc->xfer_shift; +	max_chunk_size = RCAR_DMATCR_MASK << desc->xfer_shift;  	/*  	 * Allocate and fill the transfer chunk descriptors. We own the only diff --git a/drivers/dma/stm32-dmamux.c b/drivers/dma/stm32-dmamux.c index 4dbb30cf94ac..b922db90939a 100644 --- a/drivers/dma/stm32-dmamux.c +++ b/drivers/dma/stm32-dmamux.c @@ -118,14 +118,15 @@ static void *stm32_dmamux_route_allocate(struct of_phandle_args *dma_spec,  	spin_lock_irqsave(&dmamux->lock, flags);  	mux->chan_id = find_first_zero_bit(dmamux->dma_inuse,  					   dmamux->dma_requests); -	set_bit(mux->chan_id, dmamux->dma_inuse); -	spin_unlock_irqrestore(&dmamux->lock, flags);  	if (mux->chan_id == dmamux->dma_requests) { +		spin_unlock_irqrestore(&dmamux->lock, flags);  		dev_err(&pdev->dev, "Run out of free DMA requests\n");  		ret = -ENOMEM; -		goto error; +		goto error_chan_id;  	} +	set_bit(mux->chan_id, dmamux->dma_inuse); +	spin_unlock_irqrestore(&dmamux->lock, flags);  	/* Look for DMA Master */  	for (i = 1, min = 0, max = dmamux->dma_reqs[i]; @@ -173,6 +174,8 @@ static void *stm32_dmamux_route_allocate(struct of_phandle_args *dma_spec,  error:  	clear_bit(mux->chan_id, dmamux->dma_inuse); + +error_chan_id:  	kfree(mux);  	return ERR_PTR(ret);  } diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index 8b16ec595fa7..329cb96f886f 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -3147,7 +3147,7 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)  	struct amd64_family_type *fam_type = NULL;  	pvt->ext_model  = boot_cpu_data.x86_model >> 4; -	pvt->stepping	= boot_cpu_data.x86_mask; +	pvt->stepping	= boot_cpu_data.x86_stepping;  	pvt->model	= boot_cpu_data.x86_model;  	pvt->fam	= boot_cpu_data.x86; diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c index f34430f99fd8..872100215ca0 100644 --- a/drivers/edac/sb_edac.c +++ b/drivers/edac/sb_edac.c @@ -279,7 +279,7 @@ static const u32 correrrthrsld[] = {   * sbridge structs   */ -#define NUM_CHANNELS		4	/* Max channels per MC */ +#define NUM_CHANNELS		6	/* Max channels per MC */  #define MAX_DIMMS		3	/* Max DIMMS per channel */  #define KNL_MAX_CHAS		38	/* KNL max num. of Cache Home Agents */  #define KNL_MAX_CHANNELS	6	/* KNL max num. of PCI channels */ diff --git a/drivers/extcon/extcon-axp288.c b/drivers/extcon/extcon-axp288.c index 0a44d43802fe..3ec4c715e240 100644 --- a/drivers/extcon/extcon-axp288.c +++ b/drivers/extcon/extcon-axp288.c @@ -1,7 +1,6 @@  /*   * extcon-axp288.c - X-Power AXP288 PMIC extcon cable detection driver   * - * Copyright (C) 2016-2017 Hans de Goede <hdegoede@redhat.com>   * Copyright (C) 2015 Intel Corporation   * Author: Ramakrishna Pallala <ramakrishna.pallala@intel.com>   * @@ -98,15 +97,13 @@ struct axp288_extcon_info {  	struct device *dev;  	struct regmap *regmap;  	struct regmap_irq_chip_data *regmap_irqc; -	struct delayed_work det_work;  	int irq[EXTCON_IRQ_END];  	struct extcon_dev *edev;  	unsigned int previous_cable; -	bool first_detect_done;  };  /* Power up/down reason string array */ -static char *axp288_pwr_up_down_info[] = { +static const char * const axp288_pwr_up_down_info[] = {  	"Last wake caused by user pressing the power button",  	"Last wake caused by a charger insertion",  	"Last wake caused by a battery insertion", @@ -124,7 +121,7 @@ static char *axp288_pwr_up_down_info[] = {   */  static void axp288_extcon_log_rsi(struct axp288_extcon_info *info)  { -	char **rsi; +	const char * const *rsi;  	unsigned int val, i, clear_mask = 0;  	int ret; @@ -140,25 +137,6 @@ static void axp288_extcon_log_rsi(struct axp288_extcon_info *info)  	regmap_write(info->regmap, AXP288_PS_BOOT_REASON_REG, clear_mask);  } -static void axp288_chrg_detect_complete(struct axp288_extcon_info *info) -{ -	/* -	 * We depend on other drivers to do things like mux the data lines, -	 * enable/disable vbus based on the id-pin, etc. Sometimes the BIOS has -	 * not set these things up correctly resulting in the initial charger -	 * cable type detection giving a wrong result and we end up not charging -	 * or charging at only 0.5A. -	 * -	 * So we schedule a second cable type detection after 2 seconds to -	 * give the other drivers time to load and do their thing. -	 */ -	if (!info->first_detect_done) { -		queue_delayed_work(system_wq, &info->det_work, -				   msecs_to_jiffies(2000)); -		info->first_detect_done = true; -	} -} -  static int axp288_handle_chrg_det_event(struct axp288_extcon_info *info)  {  	int ret, stat, cfg, pwr_stat; @@ -223,8 +201,6 @@ no_vbus:  		info->previous_cable = cable;  	} -	axp288_chrg_detect_complete(info); -  	return 0;  dev_det_ret: @@ -246,11 +222,8 @@ static irqreturn_t axp288_extcon_isr(int irq, void *data)  	return IRQ_HANDLED;  } -static void axp288_extcon_det_work(struct work_struct *work) +static void axp288_extcon_enable(struct axp288_extcon_info *info)  { -	struct axp288_extcon_info *info = -		container_of(work, struct axp288_extcon_info, det_work.work); -  	regmap_update_bits(info->regmap, AXP288_BC_GLOBAL_REG,  						BC_GLOBAL_RUN, 0);  	/* Enable the charger detection logic */ @@ -272,7 +245,6 @@ static int axp288_extcon_probe(struct platform_device *pdev)  	info->regmap = axp20x->regmap;  	info->regmap_irqc = axp20x->regmap_irqc;  	info->previous_cable = EXTCON_NONE; -	INIT_DELAYED_WORK(&info->det_work, axp288_extcon_det_work);  	platform_set_drvdata(pdev, info); @@ -318,7 +290,7 @@ static int axp288_extcon_probe(struct platform_device *pdev)  	}  	/* Start charger cable type detection */ -	queue_delayed_work(system_wq, &info->det_work, 0); +	axp288_extcon_enable(info);  	return 0;  } diff --git a/drivers/extcon/extcon-intel-int3496.c b/drivers/extcon/extcon-intel-int3496.c index c8691b5a9cb0..191e99f06a9a 100644 --- a/drivers/extcon/extcon-intel-int3496.c +++ b/drivers/extcon/extcon-intel-int3496.c @@ -153,8 +153,9 @@ static int int3496_probe(struct platform_device *pdev)  		return ret;  	} -	/* queue initial processing of id-pin */ +	/* process id-pin so that we start with the right status */  	queue_delayed_work(system_wq, &data->work, 0); +	flush_delayed_work(&data->work);  	platform_set_drvdata(pdev, data); diff --git a/drivers/firmware/dcdbas.c b/drivers/firmware/dcdbas.c index c16600f30611..0bdea60c65dd 100644 --- a/drivers/firmware/dcdbas.c +++ b/drivers/firmware/dcdbas.c @@ -639,7 +639,7 @@ static void __exit dcdbas_exit(void)  	platform_driver_unregister(&dcdbas_driver);  } -module_init(dcdbas_init); +subsys_initcall_sync(dcdbas_init);  module_exit(dcdbas_exit);  MODULE_DESCRIPTION(DRIVER_DESCRIPTION " (version " DRIVER_VERSION ")"); diff --git a/drivers/firmware/efi/libstub/tpm.c b/drivers/firmware/efi/libstub/tpm.c index da661bf8cb96..13c1edd37e96 100644 --- a/drivers/firmware/efi/libstub/tpm.c +++ b/drivers/firmware/efi/libstub/tpm.c @@ -68,11 +68,11 @@ void efi_retrieve_tpm2_eventlog_1_2(efi_system_table_t *sys_table_arg)  	efi_guid_t linux_eventlog_guid = LINUX_EFI_TPM_EVENT_LOG_GUID;  	efi_status_t status;  	efi_physical_addr_t log_location, log_last_entry; -	struct linux_efi_tpm_eventlog *log_tbl; +	struct linux_efi_tpm_eventlog *log_tbl = NULL;  	unsigned long first_entry_addr, last_entry_addr;  	size_t log_size, last_entry_size;  	efi_bool_t truncated; -	void *tcg2_protocol; +	void *tcg2_protocol = NULL;  	status = efi_call_early(locate_protocol, &tcg2_guid, NULL,  				&tcg2_protocol); diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c index e76de57dd617..ebaea8b1594b 100644 --- a/drivers/gpio/gpio-rcar.c +++ b/drivers/gpio/gpio-rcar.c @@ -14,7 +14,6 @@   * GNU General Public License for more details.   */ -#include <linux/clk.h>  #include <linux/err.h>  #include <linux/gpio.h>  #include <linux/init.h> @@ -37,10 +36,9 @@ struct gpio_rcar_priv {  	struct platform_device *pdev;  	struct gpio_chip gpio_chip;  	struct irq_chip irq_chip; -	struct clk *clk;  	unsigned int irq_parent; +	atomic_t wakeup_path;  	bool has_both_edge_trigger; -	bool needs_clk;  };  #define IOINTSEL 0x00	/* General IO/Interrupt Switching Register */ @@ -186,13 +184,10 @@ static int gpio_rcar_irq_set_wake(struct irq_data *d, unsigned int on)  		}  	} -	if (!p->clk) -		return 0; -  	if (on) -		clk_enable(p->clk); +		atomic_inc(&p->wakeup_path);  	else -		clk_disable(p->clk); +		atomic_dec(&p->wakeup_path);  	return 0;  } @@ -330,17 +325,14 @@ static int gpio_rcar_direction_output(struct gpio_chip *chip, unsigned offset,  struct gpio_rcar_info {  	bool has_both_edge_trigger; -	bool needs_clk;  };  static const struct gpio_rcar_info gpio_rcar_info_gen1 = {  	.has_both_edge_trigger = false, -	.needs_clk = false,  };  static const struct gpio_rcar_info gpio_rcar_info_gen2 = {  	.has_both_edge_trigger = true, -	.needs_clk = true,  };  static const struct of_device_id gpio_rcar_of_table[] = { @@ -403,7 +395,6 @@ static int gpio_rcar_parse_dt(struct gpio_rcar_priv *p, unsigned int *npins)  	ret = of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, 0, &args);  	*npins = ret == 0 ? args.args[2] : RCAR_MAX_GPIO_PER_BANK;  	p->has_both_edge_trigger = info->has_both_edge_trigger; -	p->needs_clk = info->needs_clk;  	if (*npins == 0 || *npins > RCAR_MAX_GPIO_PER_BANK) {  		dev_warn(&p->pdev->dev, @@ -440,16 +431,6 @@ static int gpio_rcar_probe(struct platform_device *pdev)  	platform_set_drvdata(pdev, p); -	p->clk = devm_clk_get(dev, NULL); -	if (IS_ERR(p->clk)) { -		if (p->needs_clk) { -			dev_err(dev, "unable to get clock\n"); -			ret = PTR_ERR(p->clk); -			goto err0; -		} -		p->clk = NULL; -	} -  	pm_runtime_enable(dev);  	irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); @@ -531,11 +512,24 @@ static int gpio_rcar_remove(struct platform_device *pdev)  	return 0;  } +static int __maybe_unused gpio_rcar_suspend(struct device *dev) +{ +	struct gpio_rcar_priv *p = dev_get_drvdata(dev); + +	if (atomic_read(&p->wakeup_path)) +		device_set_wakeup_path(dev); + +	return 0; +} + +static SIMPLE_DEV_PM_OPS(gpio_rcar_pm_ops, gpio_rcar_suspend, NULL); +  static struct platform_driver gpio_rcar_device_driver = {  	.probe		= gpio_rcar_probe,  	.remove		= gpio_rcar_remove,  	.driver		= {  		.name	= "gpio_rcar", +		.pm     = &gpio_rcar_pm_ops,  		.of_match_table = of_match_ptr(gpio_rcar_of_table),  	}  }; diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c index 564bb7a31da4..84e5a9df2344 100644 --- a/drivers/gpio/gpiolib-of.c +++ b/drivers/gpio/gpiolib-of.c @@ -241,6 +241,19 @@ struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,  		desc = of_get_named_gpiod_flags(dev->of_node, prop_name, idx,  						&of_flags); +		/* +		 * -EPROBE_DEFER in our case means that we found a +		 * valid GPIO property, but no controller has been +		 * registered so far. +		 * +		 * This means we don't need to look any further for +		 * alternate name conventions, and we should really +		 * preserve the return code for our user to be able to +		 * retry probing later. +		 */ +		if (IS_ERR(desc) && PTR_ERR(desc) == -EPROBE_DEFER) +			return desc; +  		if (!IS_ERR(desc) || (PTR_ERR(desc) != -ENOENT))  			break;  	} @@ -250,7 +263,7 @@ struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,  		desc = of_find_spi_gpio(dev, con_id, &of_flags);  	/* Special handling for regulator GPIOs if used */ -	if (IS_ERR(desc)) +	if (IS_ERR(desc) && PTR_ERR(desc) != -EPROBE_DEFER)  		desc = of_find_regulator_gpio(dev, con_id, &of_flags);  	if (IS_ERR(desc)) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index d5a2eefd6c3e..74edba18b159 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1156,7 +1156,7 @@ static inline void amdgpu_set_ib_value(struct amdgpu_cs_parser *p,  /*   * Writeback   */ -#define AMDGPU_MAX_WB 512	/* Reserve at most 512 WB slots for amdgpu-owned rings. */ +#define AMDGPU_MAX_WB 128	/* Reserve at most 128 WB slots for amdgpu-owned rings. */  struct amdgpu_wb {  	struct amdgpu_bo	*wb_obj; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index 57afad79f55d..8fa850a070e0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -540,6 +540,9 @@ int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,  	size_t size;  	u32 retry = 3; +	if (amdgpu_acpi_pcie_notify_device_ready(adev)) +		return -EINVAL; +  	/* Get the device handle */  	handle = ACPI_HANDLE(&adev->pdev->dev);  	if (!handle) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c index e2c3c5ec42d1..c53095b3b0fb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c @@ -568,6 +568,7 @@ static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = {  	/* HG _PR3 doesn't seem to work on this A+A weston board */  	{ 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX },  	{ 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX }, +	{ 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX },  	{ 0, 0, 0, 0, 0 },  }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c index 8ca3783f2deb..7a073ac5f9c6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c @@ -69,25 +69,18 @@ void amdgpu_connector_hotplug(struct drm_connector *connector)  		/* don't do anything if sink is not display port, i.e.,  		 * passive dp->(dvi|hdmi) adaptor  		 */ -		if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) { -			int saved_dpms = connector->dpms; -			/* Only turn off the display if it's physically disconnected */ -			if (!amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd)) { -				drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); -			} else if (amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) { -				/* Don't try to start link training before we -				 * have the dpcd */ -				if (amdgpu_atombios_dp_get_dpcd(amdgpu_connector)) -					return; - -				/* set it to OFF so that drm_helper_connector_dpms() -				 * won't return immediately since the current state -				 * is ON at this point. -				 */ -				connector->dpms = DRM_MODE_DPMS_OFF; -				drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); -			} -			connector->dpms = saved_dpms; +		if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT && +		    amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd) && +		    amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) { +			/* Don't start link training before we have the DPCD */ +			if (amdgpu_atombios_dp_get_dpcd(amdgpu_connector)) +				return; + +			/* Turn the connector off and back on immediately, which +			 * will trigger link training +			 */ +			drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); +			drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);  		}  	}  } @@ -736,9 +729,11 @@ amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force)  	enum drm_connector_status ret = connector_status_disconnected;  	int r; -	r = pm_runtime_get_sync(connector->dev->dev); -	if (r < 0) -		return connector_status_disconnected; +	if (!drm_kms_helper_is_poll_worker()) { +		r = pm_runtime_get_sync(connector->dev->dev); +		if (r < 0) +			return connector_status_disconnected; +	}  	if (encoder) {  		struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); @@ -757,8 +752,12 @@ amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force)  	/* check acpi lid status ??? */  	amdgpu_connector_update_scratch_regs(connector, ret); -	pm_runtime_mark_last_busy(connector->dev->dev); -	pm_runtime_put_autosuspend(connector->dev->dev); + +	if (!drm_kms_helper_is_poll_worker()) { +		pm_runtime_mark_last_busy(connector->dev->dev); +		pm_runtime_put_autosuspend(connector->dev->dev); +	} +  	return ret;  } @@ -868,9 +867,11 @@ amdgpu_connector_vga_detect(struct drm_connector *connector, bool force)  	enum drm_connector_status ret = connector_status_disconnected;  	int r; -	r = pm_runtime_get_sync(connector->dev->dev); -	if (r < 0) -		return connector_status_disconnected; +	if (!drm_kms_helper_is_poll_worker()) { +		r = pm_runtime_get_sync(connector->dev->dev); +		if (r < 0) +			return connector_status_disconnected; +	}  	encoder = amdgpu_connector_best_single_encoder(connector);  	if (!encoder) @@ -924,8 +925,10 @@ amdgpu_connector_vga_detect(struct drm_connector *connector, bool force)  	amdgpu_connector_update_scratch_regs(connector, ret);  out: -	pm_runtime_mark_last_busy(connector->dev->dev); -	pm_runtime_put_autosuspend(connector->dev->dev); +	if (!drm_kms_helper_is_poll_worker()) { +		pm_runtime_mark_last_busy(connector->dev->dev); +		pm_runtime_put_autosuspend(connector->dev->dev); +	}  	return ret;  } @@ -988,9 +991,11 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)  	enum drm_connector_status ret = connector_status_disconnected;  	bool dret = false, broken_edid = false; -	r = pm_runtime_get_sync(connector->dev->dev); -	if (r < 0) -		return connector_status_disconnected; +	if (!drm_kms_helper_is_poll_worker()) { +		r = pm_runtime_get_sync(connector->dev->dev); +		if (r < 0) +			return connector_status_disconnected; +	}  	if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) {  		ret = connector->status; @@ -1115,8 +1120,10 @@ out:  	amdgpu_connector_update_scratch_regs(connector, ret);  exit: -	pm_runtime_mark_last_busy(connector->dev->dev); -	pm_runtime_put_autosuspend(connector->dev->dev); +	if (!drm_kms_helper_is_poll_worker()) { +		pm_runtime_mark_last_busy(connector->dev->dev); +		pm_runtime_put_autosuspend(connector->dev->dev); +	}  	return ret;  } @@ -1359,9 +1366,11 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force)  	struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector);  	int r; -	r = pm_runtime_get_sync(connector->dev->dev); -	if (r < 0) -		return connector_status_disconnected; +	if (!drm_kms_helper_is_poll_worker()) { +		r = pm_runtime_get_sync(connector->dev->dev); +		if (r < 0) +			return connector_status_disconnected; +	}  	if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) {  		ret = connector->status; @@ -1429,8 +1438,10 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force)  	amdgpu_connector_update_scratch_regs(connector, ret);  out: -	pm_runtime_mark_last_busy(connector->dev->dev); -	pm_runtime_put_autosuspend(connector->dev->dev); +	if (!drm_kms_helper_is_poll_worker()) { +		pm_runtime_mark_last_busy(connector->dev->dev); +		pm_runtime_put_autosuspend(connector->dev->dev); +	}  	return ret;  } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 00a50cc5ec9a..66cb10cdc7c3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -492,7 +492,7 @@ static int amdgpu_device_wb_init(struct amdgpu_device *adev)  		memset(&adev->wb.used, 0, sizeof(adev->wb.used));  		/* clear wb memory */ -		memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t)); +		memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);  	}  	return 0; @@ -530,8 +530,9 @@ int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)   */  void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)  { +	wb >>= 3;  	if (wb < adev->wb.num_wb) -		__clear_bit(wb >> 3, adev->wb.used); +		__clear_bit(wb, adev->wb.used);  }  /** @@ -1455,11 +1456,6 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)  	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {  		if (!adev->ip_blocks[i].status.hw)  			continue; -		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { -			amdgpu_free_static_csa(adev); -			amdgpu_device_wb_fini(adev); -			amdgpu_device_vram_scratch_fini(adev); -		}  		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&  			adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) { @@ -1486,6 +1482,13 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)  	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {  		if (!adev->ip_blocks[i].status.sw)  			continue; + +		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { +			amdgpu_free_static_csa(adev); +			amdgpu_device_wb_fini(adev); +			amdgpu_device_vram_scratch_fini(adev); +		} +  		r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);  		/* XXX handle errors */  		if (r) { @@ -2060,9 +2063,12 @@ void amdgpu_device_fini(struct amdgpu_device *adev)  	DRM_INFO("amdgpu: finishing device.\n");  	adev->shutdown = true; -	if (adev->mode_info.mode_config_initialized) -		drm_crtc_force_disable_all(adev->ddev); - +	if (adev->mode_info.mode_config_initialized){ +		if (!amdgpu_device_has_dc_support(adev)) +			drm_crtc_force_disable_all(adev->ddev); +		else +			drm_atomic_helper_shutdown(adev->ddev); +	}  	amdgpu_ib_pool_fini(adev);  	amdgpu_fence_driver_fini(adev);  	amdgpu_fbdev_fini(adev); @@ -2284,14 +2290,6 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)  				drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);  			}  			drm_modeset_unlock_all(dev); -		} else { -			/* -			 * There is no equivalent atomic helper to turn on -			 * display, so we defined our own function for this, -			 * once suspend resume is supported by the atomic -			 * framework this will be reworked -			 */ -			amdgpu_dm_display_resume(adev);  		}  	} @@ -2726,7 +2724,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,  	if (amdgpu_device_has_dc_support(adev)) {  		if (drm_atomic_helper_resume(adev->ddev, state))  			dev_info(adev->dev, "drm resume failed:%d\n", r); -		amdgpu_dm_display_resume(adev);  	} else {  		drm_helper_resume_force_mode(adev->ddev);  	} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index e48b4ec88c8c..ca6c931dabfa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -36,8 +36,6 @@ void amdgpu_gem_object_free(struct drm_gem_object *gobj)  	struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);  	if (robj) { -		if (robj->gem_base.import_attach) -			drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);  		amdgpu_mn_unregister(robj);  		amdgpu_bo_unref(&robj);  	} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c index e14ab34d8262..7c2be32c5aea 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c @@ -75,7 +75,7 @@ static int amdgpu_gtt_mgr_init(struct ttm_mem_type_manager *man,  static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man)  {  	struct amdgpu_gtt_mgr *mgr = man->priv; - +	spin_lock(&mgr->lock);  	drm_mm_takedown(&mgr->mm);  	spin_unlock(&mgr->lock);  	kfree(mgr); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index 56bcd59c3399..36483e0d3c97 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -257,7 +257,8 @@ int amdgpu_irq_init(struct amdgpu_device *adev)  	r = drm_irq_install(adev->ddev, adev->ddev->pdev->irq);  	if (r) {  		adev->irq.installed = false; -		flush_work(&adev->hotplug_work); +		if (!amdgpu_device_has_dc_support(adev)) +			flush_work(&adev->hotplug_work);  		cancel_work_sync(&adev->reset_work);  		return r;  	} @@ -282,7 +283,8 @@ void amdgpu_irq_fini(struct amdgpu_device *adev)  		adev->irq.installed = false;  		if (adev->irq.msi_enabled)  			pci_disable_msi(adev->pdev); -		flush_work(&adev->hotplug_work); +		if (!amdgpu_device_has_dc_support(adev)) +			flush_work(&adev->hotplug_work);  		cancel_work_sync(&adev->reset_work);  	} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h index 54f06c959340..2264c5c97009 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h @@ -352,6 +352,7 @@ struct amdgpu_mode_info {  	u16 firmware_flags;  	/* pointer to backlight encoder */  	struct amdgpu_encoder *bl_encoder; +	u8 bl_level; /* saved backlight level */  	struct amdgpu_audio	audio; /* audio stuff */  	int			num_crtc; /* number of crtcs */  	int			num_hpd; /* number of hpd pins */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 5c4c3e0d527b..1220322c1680 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -56,6 +56,8 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)  	amdgpu_bo_kunmap(bo); +	if (bo->gem_base.import_attach) +		drm_prime_gem_destroy(&bo->gem_base, bo->tbo.sg);  	drm_gem_object_release(&bo->gem_base);  	amdgpu_bo_unref(&bo->parent);  	if (!list_empty(&bo->shadow_list)) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 13044e66dcaf..561d3312af32 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -481,7 +481,7 @@ static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf,  	result = 0;  	if (*pos < 12) { -		early[0] = amdgpu_ring_get_rptr(ring); +		early[0] = amdgpu_ring_get_rptr(ring) & ring->buf_mask;  		early[1] = amdgpu_ring_get_wptr(ring) & ring->buf_mask;  		early[2] = ring->wptr & ring->buf_mask;  		for (i = *pos / 4; i < 3 && size; i++) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index b2eae86bf906..5c26a8e806b9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -299,12 +299,15 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)  	cancel_delayed_work_sync(&adev->uvd.idle_work); -	for (i = 0; i < adev->uvd.max_handles; ++i) -		if (atomic_read(&adev->uvd.handles[i])) -			break; +	/* only valid for physical mode */ +	if (adev->asic_type < CHIP_POLARIS10) { +		for (i = 0; i < adev->uvd.max_handles; ++i) +			if (atomic_read(&adev->uvd.handles[i])) +				break; -	if (i == AMDGPU_MAX_UVD_HANDLES) -		return 0; +		if (i == adev->uvd.max_handles) +			return 0; +	}  	size = amdgpu_bo_size(adev->uvd.vcpu_bo);  	ptr = adev->uvd.cpu_addr; diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c index 2af26d2da127..d702fb8e3427 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c @@ -34,7 +34,7 @@  #include <linux/backlight.h>  #include "bif/bif_4_1_d.h" -static u8 +u8  amdgpu_atombios_encoder_get_backlight_level_from_reg(struct amdgpu_device *adev)  {  	u8 backlight_level; @@ -48,7 +48,7 @@ amdgpu_atombios_encoder_get_backlight_level_from_reg(struct amdgpu_device *adev)  	return backlight_level;  } -static void +void  amdgpu_atombios_encoder_set_backlight_level_to_reg(struct amdgpu_device *adev,  					    u8 backlight_level)  { diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.h b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.h index 2bdec40515ce..f77cbdef679e 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.h +++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.h @@ -25,6 +25,11 @@  #define __ATOMBIOS_ENCODER_H__  u8 +amdgpu_atombios_encoder_get_backlight_level_from_reg(struct amdgpu_device *adev); +void +amdgpu_atombios_encoder_set_backlight_level_to_reg(struct amdgpu_device *adev, +						   u8 backlight_level); +u8  amdgpu_atombios_encoder_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder);  void  amdgpu_atombios_encoder_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder, diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index f34bc68aadfb..022f303463fc 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -2921,6 +2921,11 @@ static int dce_v10_0_hw_fini(void *handle)  static int dce_v10_0_suspend(void *handle)  { +	struct amdgpu_device *adev = (struct amdgpu_device *)handle; + +	adev->mode_info.bl_level = +		amdgpu_atombios_encoder_get_backlight_level_from_reg(adev); +  	return dce_v10_0_hw_fini(handle);  } @@ -2929,6 +2934,9 @@ static int dce_v10_0_resume(void *handle)  	struct amdgpu_device *adev = (struct amdgpu_device *)handle;  	int ret; +	amdgpu_atombios_encoder_set_backlight_level_to_reg(adev, +							   adev->mode_info.bl_level); +  	ret = dce_v10_0_hw_init(handle);  	/* turn on the BL */ diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index 26378bd6aba4..800a9f36ab4f 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -3047,6 +3047,11 @@ static int dce_v11_0_hw_fini(void *handle)  static int dce_v11_0_suspend(void *handle)  { +	struct amdgpu_device *adev = (struct amdgpu_device *)handle; + +	adev->mode_info.bl_level = +		amdgpu_atombios_encoder_get_backlight_level_from_reg(adev); +  	return dce_v11_0_hw_fini(handle);  } @@ -3055,6 +3060,9 @@ static int dce_v11_0_resume(void *handle)  	struct amdgpu_device *adev = (struct amdgpu_device *)handle;  	int ret; +	amdgpu_atombios_encoder_set_backlight_level_to_reg(adev, +							   adev->mode_info.bl_level); +  	ret = dce_v11_0_hw_init(handle);  	/* turn on the BL */ diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c index bd2c4f727df6..b8368f69ce1f 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c @@ -2787,6 +2787,11 @@ static int dce_v6_0_hw_fini(void *handle)  static int dce_v6_0_suspend(void *handle)  { +	struct amdgpu_device *adev = (struct amdgpu_device *)handle; + +	adev->mode_info.bl_level = +		amdgpu_atombios_encoder_get_backlight_level_from_reg(adev); +  	return dce_v6_0_hw_fini(handle);  } @@ -2795,6 +2800,9 @@ static int dce_v6_0_resume(void *handle)  	struct amdgpu_device *adev = (struct amdgpu_device *)handle;  	int ret; +	amdgpu_atombios_encoder_set_backlight_level_to_reg(adev, +							   adev->mode_info.bl_level); +  	ret = dce_v6_0_hw_init(handle);  	/* turn on the BL */ @@ -3093,7 +3101,7 @@ static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,  		tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;  		WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);  		schedule_work(&adev->hotplug_work); -		DRM_INFO("IH: HPD%d\n", hpd + 1); +		DRM_DEBUG("IH: HPD%d\n", hpd + 1);  	}  	return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index c008dc030687..012e0a9ae0ff 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -2819,6 +2819,11 @@ static int dce_v8_0_hw_fini(void *handle)  static int dce_v8_0_suspend(void *handle)  { +	struct amdgpu_device *adev = (struct amdgpu_device *)handle; + +	adev->mode_info.bl_level = +		amdgpu_atombios_encoder_get_backlight_level_from_reg(adev); +  	return dce_v8_0_hw_fini(handle);  } @@ -2827,6 +2832,9 @@ static int dce_v8_0_resume(void *handle)  	struct amdgpu_device *adev = (struct amdgpu_device *)handle;  	int ret; +	amdgpu_atombios_encoder_set_backlight_level_to_reg(adev, +							   adev->mode_info.bl_level); +  	ret = dce_v8_0_hw_init(handle);  	/* turn on the BL */ diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index a066c5eda135..a4309698e76c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -4384,34 +4384,8 @@ static void gfx_v7_0_gpu_early_init(struct amdgpu_device *adev)  	case CHIP_KAVERI:  		adev->gfx.config.max_shader_engines = 1;  		adev->gfx.config.max_tile_pipes = 4; -		if ((adev->pdev->device == 0x1304) || -		    (adev->pdev->device == 0x1305) || -		    (adev->pdev->device == 0x130C) || -		    (adev->pdev->device == 0x130F) || -		    (adev->pdev->device == 0x1310) || -		    (adev->pdev->device == 0x1311) || -		    (adev->pdev->device == 0x131C)) { -			adev->gfx.config.max_cu_per_sh = 8; -			adev->gfx.config.max_backends_per_se = 2; -		} else if ((adev->pdev->device == 0x1309) || -			   (adev->pdev->device == 0x130A) || -			   (adev->pdev->device == 0x130D) || -			   (adev->pdev->device == 0x1313) || -			   (adev->pdev->device == 0x131D)) { -			adev->gfx.config.max_cu_per_sh = 6; -			adev->gfx.config.max_backends_per_se = 2; -		} else if ((adev->pdev->device == 0x1306) || -			   (adev->pdev->device == 0x1307) || -			   (adev->pdev->device == 0x130B) || -			   (adev->pdev->device == 0x130E) || -			   (adev->pdev->device == 0x1315) || -			   (adev->pdev->device == 0x131B)) { -			adev->gfx.config.max_cu_per_sh = 4; -			adev->gfx.config.max_backends_per_se = 1; -		} else { -			adev->gfx.config.max_cu_per_sh = 3; -			adev->gfx.config.max_backends_per_se = 1; -		} +		adev->gfx.config.max_cu_per_sh = 8; +		adev->gfx.config.max_backends_per_se = 2;  		adev->gfx.config.max_sh_per_se = 1;  		adev->gfx.config.max_texture_channel_caches = 4;  		adev->gfx.config.max_gprs = 256; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 2719937e09d6..3b7e7af09ead 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -634,7 +634,7 @@ static int gmc_v9_0_late_init(void *handle)  	for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i)  		BUG_ON(vm_inv_eng[i] > 16); -	if (adev->asic_type == CHIP_VEGA10) { +	if (adev->asic_type == CHIP_VEGA10 && !amdgpu_sriov_vf(adev)) {  		r = gmc_v9_0_ecc_available(adev);  		if (r == 1) {  			DRM_INFO("ECC is active.\n"); @@ -682,7 +682,10 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)  	adev->mc.vram_width = amdgpu_atomfirmware_get_vram_width(adev);  	if (!adev->mc.vram_width) {  		/* hbm memory channel size */ -		chansize = 128; +		if (adev->flags & AMD_IS_APU) +			chansize = 64; +		else +			chansize = 128;  		tmp = RREG32_SOC15(DF, 0, mmDF_CS_AON0_DramBaseAddress0);  		tmp &= DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK; diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index e92fb372bc99..91cf95a8c39c 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -238,31 +238,27 @@ static uint64_t sdma_v4_0_ring_get_rptr(struct amdgpu_ring *ring)  static uint64_t sdma_v4_0_ring_get_wptr(struct amdgpu_ring *ring)  {  	struct amdgpu_device *adev = ring->adev; -	u64 *wptr = NULL; -	uint64_t local_wptr = 0; +	u64 wptr;  	if (ring->use_doorbell) {  		/* XXX check if swapping is necessary on BE */ -		wptr = ((u64 *)&adev->wb.wb[ring->wptr_offs]); -		DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", *wptr); -		*wptr = (*wptr) >> 2; -		DRM_DEBUG("wptr/doorbell after shift == 0x%016llx\n", *wptr); +		wptr = READ_ONCE(*((u64 *)&adev->wb.wb[ring->wptr_offs])); +		DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", wptr);  	} else {  		u32 lowbit, highbit;  		int me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1; -		wptr = &local_wptr;  		lowbit = RREG32(sdma_v4_0_get_reg_offset(adev, me, mmSDMA0_GFX_RB_WPTR)) >> 2;  		highbit = RREG32(sdma_v4_0_get_reg_offset(adev, me, mmSDMA0_GFX_RB_WPTR_HI)) >> 2;  		DRM_DEBUG("wptr [%i]high== 0x%08x low==0x%08x\n",  				me, highbit, lowbit); -		*wptr = highbit; -		*wptr = (*wptr) << 32; -		*wptr |= lowbit; +		wptr = highbit; +		wptr = wptr << 32; +		wptr |= lowbit;  	} -	return *wptr; +	return wptr >> 2;  }  /** diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c index 543101d5a5ed..2095173aaabf 100644 --- a/drivers/gpu/drm/amd/amdgpu/si.c +++ b/drivers/gpu/drm/amd/amdgpu/si.c @@ -31,6 +31,7 @@  #include "amdgpu_uvd.h"  #include "amdgpu_vce.h"  #include "atom.h" +#include "amd_pcie.h"  #include "amdgpu_powerplay.h"  #include "sid.h"  #include "si_ih.h" @@ -1461,8 +1462,8 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)  {  	struct pci_dev *root = adev->pdev->bus->self;  	int bridge_pos, gpu_pos; -	u32 speed_cntl, mask, current_data_rate; -	int ret, i; +	u32 speed_cntl, current_data_rate; +	int i;  	u16 tmp16;  	if (pci_is_root_bus(adev->pdev->bus)) @@ -1474,23 +1475,20 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)  	if (adev->flags & AMD_IS_APU)  		return; -	ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask); -	if (ret != 0) -		return; - -	if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80))) +	if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | +					CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))  		return;  	speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);  	current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >>  		LC_CURRENT_DATA_RATE_SHIFT; -	if (mask & DRM_PCIE_SPEED_80) { +	if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) {  		if (current_data_rate == 2) {  			DRM_INFO("PCIE gen 3 link speeds already enabled\n");  			return;  		}  		DRM_INFO("enabling PCIE gen 3 link speeds, disable with amdgpu.pcie_gen2=0\n"); -	} else if (mask & DRM_PCIE_SPEED_50) { +	} else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) {  		if (current_data_rate == 1) {  			DRM_INFO("PCIE gen 2 link speeds already enabled\n");  			return; @@ -1506,7 +1504,7 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)  	if (!gpu_pos)  		return; -	if (mask & DRM_PCIE_SPEED_80) { +	if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) {  		if (current_data_rate != 2) {  			u16 bridge_cfg, gpu_cfg;  			u16 bridge_cfg2, gpu_cfg2; @@ -1589,9 +1587,9 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)  	pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);  	tmp16 &= ~0xf; -	if (mask & DRM_PCIE_SPEED_80) +	if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)  		tmp16 |= 3; -	else if (mask & DRM_PCIE_SPEED_50) +	else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)  		tmp16 |= 2;  	else  		tmp16 |= 1; diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c index ce675a7f179a..22f0b7ff3ac9 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c @@ -26,6 +26,7 @@  #include "amdgpu_pm.h"  #include "amdgpu_dpm.h"  #include "amdgpu_atombios.h" +#include "amd_pcie.h"  #include "sid.h"  #include "r600_dpm.h"  #include "si_dpm.h" @@ -3331,29 +3332,6 @@ static void btc_apply_voltage_delta_rules(struct amdgpu_device *adev,  	}  } -static enum amdgpu_pcie_gen r600_get_pcie_gen_support(struct amdgpu_device *adev, -					       u32 sys_mask, -					       enum amdgpu_pcie_gen asic_gen, -					       enum amdgpu_pcie_gen default_gen) -{ -	switch (asic_gen) { -	case AMDGPU_PCIE_GEN1: -		return AMDGPU_PCIE_GEN1; -	case AMDGPU_PCIE_GEN2: -		return AMDGPU_PCIE_GEN2; -	case AMDGPU_PCIE_GEN3: -		return AMDGPU_PCIE_GEN3; -	default: -		if ((sys_mask & DRM_PCIE_SPEED_80) && (default_gen == AMDGPU_PCIE_GEN3)) -			return AMDGPU_PCIE_GEN3; -		else if ((sys_mask & DRM_PCIE_SPEED_50) && (default_gen == AMDGPU_PCIE_GEN2)) -			return AMDGPU_PCIE_GEN2; -		else -			return AMDGPU_PCIE_GEN1; -	} -	return AMDGPU_PCIE_GEN1; -} -  static void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,  			    u32 *p, u32 *u)  { @@ -5028,10 +5006,11 @@ static int si_populate_smc_acpi_state(struct amdgpu_device *adev,  							      table->ACPIState.levels[0].vddc.index,  							      &table->ACPIState.levels[0].std_vddc);  		} -		table->ACPIState.levels[0].gen2PCIE = (u8)r600_get_pcie_gen_support(adev, -										    si_pi->sys_pcie_mask, -										    si_pi->boot_pcie_gen, -										    AMDGPU_PCIE_GEN1); +		table->ACPIState.levels[0].gen2PCIE = +			(u8)amdgpu_get_pcie_gen_support(adev, +							si_pi->sys_pcie_mask, +							si_pi->boot_pcie_gen, +							AMDGPU_PCIE_GEN1);  		if (si_pi->vddc_phase_shed_control)  			si_populate_phase_shedding_value(adev, @@ -7168,10 +7147,10 @@ static void si_parse_pplib_clock_info(struct amdgpu_device *adev,  	pl->vddc = le16_to_cpu(clock_info->si.usVDDC);  	pl->vddci = le16_to_cpu(clock_info->si.usVDDCI);  	pl->flags = le32_to_cpu(clock_info->si.ulFlags); -	pl->pcie_gen = r600_get_pcie_gen_support(adev, -						 si_pi->sys_pcie_mask, -						 si_pi->boot_pcie_gen, -						 clock_info->si.ucPCIEGen); +	pl->pcie_gen = amdgpu_get_pcie_gen_support(adev, +						   si_pi->sys_pcie_mask, +						   si_pi->boot_pcie_gen, +						   clock_info->si.ucPCIEGen);  	/* patch up vddc if necessary */  	ret = si_get_leakage_voltage_from_leakage_index(adev, pl->vddc, @@ -7326,7 +7305,6 @@ static int si_dpm_init(struct amdgpu_device *adev)  	struct si_power_info *si_pi;  	struct atom_clock_dividers dividers;  	int ret; -	u32 mask;  	si_pi = kzalloc(sizeof(struct si_power_info), GFP_KERNEL);  	if (si_pi == NULL) @@ -7336,11 +7314,9 @@ static int si_dpm_init(struct amdgpu_device *adev)  	eg_pi = &ni_pi->eg;  	pi = &eg_pi->rv7xx; -	ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask); -	if (ret) -		si_pi->sys_pcie_mask = 0; -	else -		si_pi->sys_pcie_mask = mask; +	si_pi->sys_pcie_mask = +		(adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK) >> +		CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT;  	si_pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;  	si_pi->boot_pcie_gen = si_get_current_pcie_speed(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index b2bfedaf57f1..9bab4842cd44 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -1618,7 +1618,7 @@ static const struct amdgpu_ring_funcs uvd_v6_0_enc_ring_vm_funcs = {  	.set_wptr = uvd_v6_0_enc_ring_set_wptr,  	.emit_frame_size =  		4 + /* uvd_v6_0_enc_ring_emit_pipeline_sync */ -		6 + /* uvd_v6_0_enc_ring_emit_vm_flush */ +		5 + /* uvd_v6_0_enc_ring_emit_vm_flush */  		5 + 5 + /* uvd_v6_0_enc_ring_emit_fence x2 vm fence */  		1, /* uvd_v6_0_enc_ring_insert_end */  	.emit_ib_size = 5, /* uvd_v6_0_enc_ring_emit_ib */ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 1ce4c98385e3..63c67346d316 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -629,11 +629,13 @@ static int dm_resume(void *handle)  {  	struct amdgpu_device *adev = handle;  	struct amdgpu_display_manager *dm = &adev->dm; +	int ret = 0;  	/* power on hardware */  	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); -	return 0; +	ret = amdgpu_dm_display_resume(adev); +	return ret;  }  int amdgpu_dm_display_resume(struct amdgpu_device *adev) @@ -1035,6 +1037,10 @@ static void handle_hpd_rx_irq(void *param)  			!is_mst_root_connector) {  		/* Downstream Port status changed. */  		if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) { + +			if (aconnector->fake_enable) +				aconnector->fake_enable = false; +  			amdgpu_dm_update_connector_after_detect(aconnector); @@ -2010,30 +2016,32 @@ static void update_stream_scaling_settings(const struct drm_display_mode *mode,  	dst.width = stream->timing.h_addressable;  	dst.height = stream->timing.v_addressable; -	rmx_type = dm_state->scaling; -	if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) { -		if (src.width * dst.height < -				src.height * dst.width) { -			/* height needs less upscaling/more downscaling */ -			dst.width = src.width * -					dst.height / src.height; -		} else { -			/* width needs less upscaling/more downscaling */ -			dst.height = src.height * -					dst.width / src.width; +	if (dm_state) { +		rmx_type = dm_state->scaling; +		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) { +			if (src.width * dst.height < +					src.height * dst.width) { +				/* height needs less upscaling/more downscaling */ +				dst.width = src.width * +						dst.height / src.height; +			} else { +				/* width needs less upscaling/more downscaling */ +				dst.height = src.height * +						dst.width / src.width; +			} +		} else if (rmx_type == RMX_CENTER) { +			dst = src;  		} -	} else if (rmx_type == RMX_CENTER) { -		dst = src; -	} -	dst.x = (stream->timing.h_addressable - dst.width) / 2; -	dst.y = (stream->timing.v_addressable - dst.height) / 2; +		dst.x = (stream->timing.h_addressable - dst.width) / 2; +		dst.y = (stream->timing.v_addressable - dst.height) / 2; -	if (dm_state->underscan_enable) { -		dst.x += dm_state->underscan_hborder / 2; -		dst.y += dm_state->underscan_vborder / 2; -		dst.width -= dm_state->underscan_hborder; -		dst.height -= dm_state->underscan_vborder; +		if (dm_state->underscan_enable) { +			dst.x += dm_state->underscan_hborder / 2; +			dst.y += dm_state->underscan_vborder / 2; +			dst.width -= dm_state->underscan_hborder; +			dst.height -= dm_state->underscan_vborder; +		}  	}  	stream->src = src; @@ -2358,12 +2366,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,  	if (aconnector == NULL) {  		DRM_ERROR("aconnector is NULL!\n"); -		goto drm_connector_null; -	} - -	if (dm_state == NULL) { -		DRM_ERROR("dm_state is NULL!\n"); -		goto dm_state_null; +		return stream;  	}  	drm_connector = &aconnector->base; @@ -2375,18 +2378,18 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,  		 */  		if (aconnector->mst_port) {  			dm_dp_mst_dc_sink_create(drm_connector); -			goto mst_dc_sink_create_done; +			return stream;  		}  		if (create_fake_sink(aconnector)) -			goto stream_create_fail; +			return stream;  	}  	stream = dc_create_stream_for_sink(aconnector->dc_sink);  	if (stream == NULL) {  		DRM_ERROR("Failed to create stream for sink!\n"); -		goto stream_create_fail; +		return stream;  	}  	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) { @@ -2412,9 +2415,12 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,  	} else {  		decide_crtc_timing_for_drm_display_mode(  				&mode, preferred_mode, -				dm_state->scaling != RMX_OFF); +				dm_state ? (dm_state->scaling != RMX_OFF) : false);  	} +	if (!dm_state) +		drm_mode_set_crtcinfo(&mode, 0); +  	fill_stream_properties_from_drm_display_mode(stream,  			&mode, &aconnector->base);  	update_stream_scaling_settings(&mode, dm_state, stream); @@ -2424,10 +2430,8 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,  		drm_connector,  		aconnector->dc_sink); -stream_create_fail: -dm_state_null: -drm_connector_null: -mst_dc_sink_create_done: +	update_stream_signal(stream); +  	return stream;  } @@ -2495,6 +2499,27 @@ dm_crtc_duplicate_state(struct drm_crtc *crtc)  	return &state->base;  } + +static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable) +{ +	enum dc_irq_source irq_source; +	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); +	struct amdgpu_device *adev = crtc->dev->dev_private; + +	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst; +	return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY; +} + +static int dm_enable_vblank(struct drm_crtc *crtc) +{ +	return dm_set_vblank(crtc, true); +} + +static void dm_disable_vblank(struct drm_crtc *crtc) +{ +	dm_set_vblank(crtc, false); +} +  /* Implemented only the options currently availible for the driver */  static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {  	.reset = dm_crtc_reset_state, @@ -2504,6 +2529,8 @@ static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {  	.page_flip = drm_atomic_helper_page_flip,  	.atomic_duplicate_state = dm_crtc_duplicate_state,  	.atomic_destroy_state = dm_crtc_destroy_state, +	.enable_vblank = dm_enable_vblank, +	.disable_vblank = dm_disable_vblank,  };  static enum drm_connector_status @@ -2798,7 +2825,7 @@ int amdgpu_dm_connector_mode_valid(struct drm_connector *connector,  		goto fail;  	} -	stream = dc_create_stream_for_sink(dc_sink); +	stream = create_stream_for_sink(aconnector, mode, NULL);  	if (stream == NULL) {  		DRM_ERROR("Failed to create stream for sink!\n");  		goto fail; @@ -3058,6 +3085,9 @@ static int dm_plane_atomic_check(struct drm_plane *plane,  	if (!dm_plane_state->dc_state)  		return 0; +	if (!fill_rects_from_plane_state(state, dm_plane_state->dc_state)) +		return -EINVAL; +  	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)  		return 0; @@ -3104,8 +3134,6 @@ static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,  	switch (aplane->base.type) {  	case DRM_PLANE_TYPE_PRIMARY: -		aplane->base.format_default = true; -  		res = drm_universal_plane_init(  				dm->adev->ddev,  				&aplane->base, @@ -4630,8 +4658,6 @@ static int dm_update_planes_state(struct dc *dc,  	bool pflip_needed  = !state->allow_modeset;  	int ret = 0; -	if (pflip_needed) -		return ret;  	/* Add new planes */  	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { @@ -4646,6 +4672,8 @@ static int dm_update_planes_state(struct dc *dc,  		/* Remove any changed/removed planes */  		if (!enable) { +			if (pflip_needed) +				continue;  			if (!old_plane_crtc)  				continue; @@ -4677,6 +4705,7 @@ static int dm_update_planes_state(struct dc *dc,  			*lock_and_validation_needed = true;  		} else { /* Add new planes */ +			struct dc_plane_state *dc_new_plane_state;  			if (drm_atomic_plane_disabling(plane->state, new_plane_state))  				continue; @@ -4690,38 +4719,50 @@ static int dm_update_planes_state(struct dc *dc,  			if (!dm_new_crtc_state->stream)  				continue; +			if (pflip_needed) +				continue;  			WARN_ON(dm_new_plane_state->dc_state); -			dm_new_plane_state->dc_state = dc_create_plane_state(dc); - -			DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n", -					plane->base.id, new_plane_crtc->base.id); - -			if (!dm_new_plane_state->dc_state) { +			dc_new_plane_state = dc_create_plane_state(dc); +			if (!dc_new_plane_state) {  				ret = -EINVAL;  				return ret;  			} +			DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n", +					plane->base.id, new_plane_crtc->base.id); +  			ret = fill_plane_attributes(  				new_plane_crtc->dev->dev_private, -				dm_new_plane_state->dc_state, +				dc_new_plane_state,  				new_plane_state,  				new_crtc_state); -			if (ret) +			if (ret) { +				dc_plane_state_release(dc_new_plane_state);  				return ret; +			} - +			/* +			 * Any atomic check errors that occur after this will +			 * not need a release. The plane state will be attached +			 * to the stream, and therefore part of the atomic +			 * state. It'll be released when the atomic state is +			 * cleaned. +			 */  			if (!dc_add_plane_to_context(  					dc,  					dm_new_crtc_state->stream, -					dm_new_plane_state->dc_state, +					dc_new_plane_state,  					dm_state->context)) { +				dc_plane_state_release(dc_new_plane_state);  				ret = -EINVAL;  				return ret;  			} +			dm_new_plane_state->dc_state = dc_new_plane_state; +  			/* Tell DC to do a full surface update every time there  			 * is a plane change. Inefficient, but works for now.  			 */ @@ -4735,6 +4776,33 @@ static int dm_update_planes_state(struct dc *dc,  	return ret;  } +static int dm_atomic_check_plane_state_fb(struct drm_atomic_state *state, +					  struct drm_crtc *crtc) +{ +	struct drm_plane *plane; +	struct drm_crtc_state *crtc_state; + +	WARN_ON(!drm_atomic_get_new_crtc_state(state, crtc)); + +	drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) { +		struct drm_plane_state *plane_state = +			drm_atomic_get_plane_state(state, plane); + +		if (IS_ERR(plane_state)) +			return -EDEADLK; + +		crtc_state = drm_atomic_get_crtc_state(plane_state->state, crtc); +		if (IS_ERR(crtc_state)) +			return PTR_ERR(crtc_state); + +		if (crtc->primary == plane && crtc_state->active) { +			if (!plane_state->fb) +				return -EINVAL; +		} +	} +	return 0; +} +  static int amdgpu_dm_atomic_check(struct drm_device *dev,  				  struct drm_atomic_state *state)  { @@ -4758,6 +4826,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,  		goto fail;  	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { +		ret = dm_atomic_check_plane_state_fb(state, crtc); +		if (ret) +			goto fail; +  		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&  		    !new_crtc_state->color_mgmt_changed)  			continue; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index 9bd142f65f9b..e1acc10e35a2 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -109,7 +109,7 @@ enum dc_edid_status dm_helpers_parse_edid_caps(  		struct cea_sad *sad = &sads[i];  		edid_caps->audio_modes[i].format_code = sad->format; -		edid_caps->audio_modes[i].channel_count = sad->channels; +		edid_caps->audio_modes[i].channel_count = sad->channels + 1;  		edid_caps->audio_modes[i].sample_rate = sad->freq;  		edid_caps->audio_modes[i].sample_size = sad->byte2;  	} diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c index 1874b6cee6af..422055080df4 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c @@ -683,10 +683,8 @@ static const struct amdgpu_irq_src_funcs dm_hpd_irq_funcs = {  void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev)  { -	if (adev->mode_info.num_crtc > 0) -		adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VLINE1 + adev->mode_info.num_crtc; -	else -		adev->crtc_irq.num_types = 0; + +	adev->crtc_irq.num_types = adev->mode_info.num_crtc;  	adev->crtc_irq.funcs = &dm_crtc_irq_funcs;  	adev->pageflip_irq.num_types = adev->mode_info.num_crtc; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index f3d87f418d2e..93421dad21bd 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -189,6 +189,12 @@ void dm_dp_mst_dc_sink_create(struct drm_connector *connector)  			.link = aconnector->dc_link,  			.sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST }; +	/* +	 * TODO: Need to further figure out why ddc.algo is NULL while MST port exists +	 */ +	if (!aconnector->port || !aconnector->port->aux.ddc.algo) +		return; +  	edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port);  	if (!edid) { diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 35e84ed031de..12868c769606 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -1358,13 +1358,13 @@ enum dc_irq_source dc_interrupt_to_irq_source(  	return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id);  } -void dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable) +bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)  {  	if (dc == NULL) -		return; +		return false; -	dal_irq_service_set(dc->res_pool->irqs, src, enable); +	return dal_irq_service_set(dc->res_pool->irqs, src, enable);  }  void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index a37428271573..be5546181fa8 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -1749,8 +1749,7 @@ static void enable_link_hdmi(struct pipe_ctx *pipe_ctx)  			link->link_enc,  			pipe_ctx->clock_source->id,  			display_color_depth, -			pipe_ctx->stream->signal == SIGNAL_TYPE_HDMI_TYPE_A, -			pipe_ctx->stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK, +			pipe_ctx->stream->signal,  			stream->phy_pix_clk);  	if (pipe_ctx->stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 61e8c3e02d16..639421a00ab6 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -718,7 +718,7 @@ static enum link_training_result perform_channel_equalization_sequence(  	uint32_t retries_ch_eq;  	enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;  	union lane_align_status_updated dpcd_lane_status_updated = {{0}}; -	union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {{{0}}};; +	union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {{{0}}};  	hw_tr_pattern = get_supported_tp(link); @@ -1465,7 +1465,7 @@ void decide_link_settings(struct dc_stream_state *stream,  	/* MST doesn't perform link training for now  	 * TODO: add MST specific link training routine  	 */ -	if (is_mst_supported(link)) { +	if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {  		*link_setting = link->verified_link_cap;  		return;  	} diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 95b8dd0e53c6..4d07ffebfd31 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -1360,9 +1360,6 @@ bool dc_is_stream_scaling_unchanged(  	return true;  } -/* Maximum TMDS single link pixel clock 165MHz */ -#define TMDS_MAX_PIXEL_CLOCK_IN_KHZ 165000 -  static void update_stream_engine_usage(  		struct resource_context *res_ctx,  		const struct resource_pool *pool, diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index 261811e0c094..cd5819789d76 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -33,8 +33,7 @@  /*******************************************************************************   * Private functions   ******************************************************************************/ -#define TMDS_MAX_PIXEL_CLOCK_IN_KHZ_UPMOST 297000 -static void update_stream_signal(struct dc_stream_state *stream) +void update_stream_signal(struct dc_stream_state *stream)  {  	struct dc_sink *dc_sink = stream->sink; @@ -45,8 +44,9 @@ static void update_stream_signal(struct dc_stream_state *stream)  		stream->signal = dc_sink->sink_signal;  	if (dc_is_dvi_signal(stream->signal)) { -		if (stream->timing.pix_clk_khz > TMDS_MAX_PIXEL_CLOCK_IN_KHZ_UPMOST && -			stream->sink->sink_signal != SIGNAL_TYPE_DVI_SINGLE_LINK) +		if (stream->ctx->dc->caps.dual_link_dvi && +		    stream->timing.pix_clk_khz > TMDS_MAX_PIXEL_CLOCK && +		    stream->sink->sink_signal != SIGNAL_TYPE_DVI_SINGLE_LINK)  			stream->signal = SIGNAL_TYPE_DVI_DUAL_LINK;  		else  			stream->signal = SIGNAL_TYPE_DVI_SINGLE_LINK; @@ -193,44 +193,20 @@ bool dc_stream_set_cursor_attributes(  	core_dc = stream->ctx->dc;  	res_ctx = &core_dc->current_state->res_ctx; +	stream->cursor_attributes = *attributes;  	for (i = 0; i < MAX_PIPES; i++) {  		struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; -		if (pipe_ctx->stream != stream || (!pipe_ctx->plane_res.xfm && !pipe_ctx->plane_res.dpp)) +		if (pipe_ctx->stream != stream || (!pipe_ctx->plane_res.xfm && +		    !pipe_ctx->plane_res.dpp) || !pipe_ctx->plane_res.ipp)  			continue;  		if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state)  			continue; -		if (pipe_ctx->plane_res.ipp->funcs->ipp_cursor_set_attributes != NULL) -			pipe_ctx->plane_res.ipp->funcs->ipp_cursor_set_attributes( -						pipe_ctx->plane_res.ipp, attributes); - -		if (pipe_ctx->plane_res.hubp != NULL && -				pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes != NULL) -			pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes( -					pipe_ctx->plane_res.hubp, attributes); - -		if (pipe_ctx->plane_res.mi != NULL && -				pipe_ctx->plane_res.mi->funcs->set_cursor_attributes != NULL) -			pipe_ctx->plane_res.mi->funcs->set_cursor_attributes( -					pipe_ctx->plane_res.mi, attributes); - - -		if (pipe_ctx->plane_res.xfm != NULL && -				pipe_ctx->plane_res.xfm->funcs->set_cursor_attributes != NULL) -			pipe_ctx->plane_res.xfm->funcs->set_cursor_attributes( -				pipe_ctx->plane_res.xfm, attributes); - -		if (pipe_ctx->plane_res.dpp != NULL && -				pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes != NULL) -			pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes( -				pipe_ctx->plane_res.dpp, attributes->color_format); +		core_dc->hwss.set_cursor_attribute(pipe_ctx);  	} - -	stream->cursor_attributes = *attributes; -  	return true;  } @@ -254,55 +230,21 @@ bool dc_stream_set_cursor_position(  	core_dc = stream->ctx->dc;  	res_ctx = &core_dc->current_state->res_ctx; +	stream->cursor_position = *position;  	for (i = 0; i < MAX_PIPES; i++) {  		struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; -		struct input_pixel_processor *ipp = pipe_ctx->plane_res.ipp; -		struct mem_input *mi = pipe_ctx->plane_res.mi; -		struct hubp *hubp = pipe_ctx->plane_res.hubp; -		struct dpp *dpp = pipe_ctx->plane_res.dpp; -		struct dc_cursor_position pos_cpy = *position; -		struct dc_cursor_mi_param param = { -			.pixel_clk_khz = stream->timing.pix_clk_khz, -			.ref_clk_khz = core_dc->res_pool->ref_clock_inKhz, -			.viewport_x_start = pipe_ctx->plane_res.scl_data.viewport.x, -			.viewport_width = pipe_ctx->plane_res.scl_data.viewport.width, -			.h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz -		};  		if (pipe_ctx->stream != stream ||  				(!pipe_ctx->plane_res.mi  && !pipe_ctx->plane_res.hubp) ||  				!pipe_ctx->plane_state || -				(!pipe_ctx->plane_res.xfm && !pipe_ctx->plane_res.dpp)) -			continue; - -		if (pipe_ctx->plane_state->address.type -				== PLN_ADDR_TYPE_VIDEO_PROGRESSIVE) -			pos_cpy.enable = false; - -		if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state) -			pos_cpy.enable = false; - - -		if (ipp != NULL && ipp->funcs->ipp_cursor_set_position != NULL) -			ipp->funcs->ipp_cursor_set_position(ipp, &pos_cpy, ¶m); - -		if (mi != NULL && mi->funcs->set_cursor_position != NULL) -			mi->funcs->set_cursor_position(mi, &pos_cpy, ¶m); - -		if (!hubp) +				(!pipe_ctx->plane_res.xfm && !pipe_ctx->plane_res.dpp) || +				!pipe_ctx->plane_res.ipp)  			continue; -		if (hubp->funcs->set_cursor_position != NULL) -			hubp->funcs->set_cursor_position(hubp, &pos_cpy, ¶m); - -		if (dpp != NULL && dpp->funcs->set_cursor_position != NULL) -			dpp->funcs->set_cursor_position(dpp, &pos_cpy, ¶m, hubp->curs_attr.width); - +		core_dc->hwss.set_cursor_position(pipe_ctx);  	} -	stream->cursor_position = *position; -  	return true;  } diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index e2e3c9df79ea..d6d56611604e 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -62,6 +62,7 @@ struct dc_caps {  	bool dcc_const_color;  	bool dynamic_audio;  	bool is_apu; +	bool dual_link_dvi;  };  struct dc_dcc_surface_param { @@ -672,7 +673,7 @@ enum dc_irq_source dc_interrupt_to_irq_source(  		struct dc *dc,  		uint32_t src_id,  		uint32_t ext_id); -void dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable); +bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable);  void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src);  enum dc_irq_source dc_get_hpd_irq_source_at_index(  		struct dc *dc, uint32_t link_index); diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index 01c60f11b2bd..456e4d29eadd 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -237,6 +237,8 @@ enum surface_update_type dc_check_update_surfaces_for_stream(   */  struct dc_stream_state *dc_create_stream_for_sink(struct dc_sink *dc_sink); +void update_stream_signal(struct dc_stream_state *stream); +  void dc_stream_retain(struct dc_stream_state *dc_stream);  void dc_stream_release(struct dc_stream_state *dc_stream); diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h index b73db9e78437..f11f17fe08f9 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h @@ -236,6 +236,7 @@  	SR(D2VGA_CONTROL), \  	SR(D3VGA_CONTROL), \  	SR(D4VGA_CONTROL), \ +	SR(VGA_TEST_CONTROL), \  	SR(DC_IP_REQUEST_CNTL), \  	BL_REG_LIST() @@ -337,6 +338,7 @@ struct dce_hwseq_registers {  	uint32_t D2VGA_CONTROL;  	uint32_t D3VGA_CONTROL;  	uint32_t D4VGA_CONTROL; +	uint32_t VGA_TEST_CONTROL;  	/* MMHUB registers. read only. temporary hack */  	uint32_t VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32;  	uint32_t VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32; @@ -493,6 +495,12 @@ struct dce_hwseq_registers {  	HWS_SF(, DOMAIN6_PG_STATUS, DOMAIN6_PGFSM_PWR_STATUS, mask_sh), \  	HWS_SF(, DOMAIN7_PG_STATUS, DOMAIN7_PGFSM_PWR_STATUS, mask_sh), \  	HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \ +	HWS_SF(, D1VGA_CONTROL, D1VGA_MODE_ENABLE, mask_sh),\ +	HWS_SF(, D2VGA_CONTROL, D2VGA_MODE_ENABLE, mask_sh),\ +	HWS_SF(, D3VGA_CONTROL, D3VGA_MODE_ENABLE, mask_sh),\ +	HWS_SF(, D4VGA_CONTROL, D4VGA_MODE_ENABLE, mask_sh),\ +	HWS_SF(, VGA_TEST_CONTROL, VGA_TEST_ENABLE, mask_sh),\ +	HWS_SF(, VGA_TEST_CONTROL, VGA_TEST_RENDER_START, mask_sh),\  	HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \  	HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh) @@ -583,7 +591,13 @@ struct dce_hwseq_registers {  	type DCFCLK_GATE_DIS; \  	type DCHUBBUB_GLOBAL_TIMER_REFDIV; \  	type DENTIST_DPPCLK_WDIVIDER; \ -	type DENTIST_DISPCLK_WDIVIDER; +	type DENTIST_DISPCLK_WDIVIDER; \ +	type VGA_TEST_ENABLE; \ +	type VGA_TEST_RENDER_START; \ +	type D1VGA_MODE_ENABLE; \ +	type D2VGA_MODE_ENABLE; \ +	type D3VGA_MODE_ENABLE; \ +	type D4VGA_MODE_ENABLE;  struct dce_hwseq_shift {  	HWSEQ_REG_FIELD_LIST(uint8_t) diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c index a266e3f5e75f..e4741f1a2b01 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c @@ -82,13 +82,6 @@  #define DCE110_DIG_FE_SOURCE_SELECT_DIGF 0x20  #define DCE110_DIG_FE_SOURCE_SELECT_DIGG 0x40 -/* Minimum pixel clock, in KHz. For TMDS signal is 25.00 MHz */ -#define TMDS_MIN_PIXEL_CLOCK 25000 -/* Maximum pixel clock, in KHz. For TMDS signal is 165.00 MHz */ -#define TMDS_MAX_PIXEL_CLOCK 165000 -/* For current ASICs pixel clock - 600MHz */ -#define MAX_ENCODER_CLOCK 600000 -  enum {  	DP_MST_UPDATE_MAX_RETRY = 50  }; @@ -683,6 +676,7 @@ void dce110_link_encoder_construct(  {  	struct bp_encoder_cap_info bp_cap_info = {0};  	const struct dc_vbios_funcs *bp_funcs = init_data->ctx->dc_bios->funcs; +	enum bp_result result = BP_RESULT_OK;  	enc110->base.funcs = &dce110_lnk_enc_funcs;  	enc110->base.ctx = init_data->ctx; @@ -757,15 +751,24 @@ void dce110_link_encoder_construct(  		enc110->base.preferred_engine = ENGINE_ID_UNKNOWN;  	} +	/* default to one to mirror Windows behavior */ +	enc110->base.features.flags.bits.HDMI_6GB_EN = 1; + +	result = bp_funcs->get_encoder_cap_info(enc110->base.ctx->dc_bios, +						enc110->base.id, &bp_cap_info); +  	/* Override features with DCE-specific values */ -	if (BP_RESULT_OK == bp_funcs->get_encoder_cap_info( -			enc110->base.ctx->dc_bios, enc110->base.id, -			&bp_cap_info)) { +	if (BP_RESULT_OK == result) {  		enc110->base.features.flags.bits.IS_HBR2_CAPABLE =  				bp_cap_info.DP_HBR2_EN;  		enc110->base.features.flags.bits.IS_HBR3_CAPABLE =  				bp_cap_info.DP_HBR3_EN;  		enc110->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN; +	} else { +		dm_logger_write(enc110->base.ctx->logger, LOG_WARNING, +				"%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n", +				__func__, +				result);  	}  } @@ -904,8 +907,7 @@ void dce110_link_encoder_enable_tmds_output(  	struct link_encoder *enc,  	enum clock_source_id clock_source,  	enum dc_color_depth color_depth, -	bool hdmi, -	bool dual_link, +	enum signal_type signal,  	uint32_t pixel_clock)  {  	struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc); @@ -919,16 +921,12 @@ void dce110_link_encoder_enable_tmds_output(  	cntl.engine_id = enc->preferred_engine;  	cntl.transmitter = enc110->base.transmitter;  	cntl.pll_id = clock_source; -	if (hdmi) { -		cntl.signal = SIGNAL_TYPE_HDMI_TYPE_A; -		cntl.lanes_number = 4; -	} else if (dual_link) { -		cntl.signal = SIGNAL_TYPE_DVI_DUAL_LINK; +	cntl.signal = signal; +	if (cntl.signal == SIGNAL_TYPE_DVI_DUAL_LINK)  		cntl.lanes_number = 8; -	} else { -		cntl.signal = SIGNAL_TYPE_DVI_SINGLE_LINK; +	else  		cntl.lanes_number = 4; -	} +  	cntl.hpd_sel = enc110->base.hpd_source;  	cntl.pixel_clock = pixel_clock; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h index 8ca9afe47a2b..0ec3433d34b6 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h @@ -210,8 +210,7 @@ void dce110_link_encoder_enable_tmds_output(  	struct link_encoder *enc,  	enum clock_source_id clock_source,  	enum dc_color_depth color_depth, -	bool hdmi, -	bool dual_link, +	enum signal_type signal,  	uint32_t pixel_clock);  /* enables DP PHY output */ diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_opp.c b/drivers/gpu/drm/amd/display/dc/dce/dce_opp.c index 3931412ab6d3..87093894ea9e 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_opp.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_opp.c @@ -128,23 +128,22 @@ static void set_truncation(  		return;  	}  	/* on other format-to do */ -	if (params->flags.TRUNCATE_ENABLED == 0 || -			params->flags.TRUNCATE_DEPTH == 2) +	if (params->flags.TRUNCATE_ENABLED == 0)  		return;  	/*Set truncation depth and Enable truncation*/  	REG_UPDATE_3(FMT_BIT_DEPTH_CONTROL,  				FMT_TRUNCATE_EN, 1,  				FMT_TRUNCATE_DEPTH, -				params->flags.TRUNCATE_MODE, +				params->flags.TRUNCATE_DEPTH,  				FMT_TRUNCATE_MODE, -				params->flags.TRUNCATE_DEPTH); +				params->flags.TRUNCATE_MODE);  }  /**   *	set_spatial_dither   *	1) set spatial dithering mode: pattern of seed - *	2) set spatical dithering depth: 0 for 18bpp or 1 for 24bpp + *	2) set spatial dithering depth: 0 for 18bpp or 1 for 24bpp   *	3) set random seed   *	4) set random mode   *		lfsr is reset every frame or not reset diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c index 3ea43e2a9450..442dd2d93618 100644 --- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c @@ -852,6 +852,7 @@ static bool construct(  	dc->caps.max_downscale_ratio = 200;  	dc->caps.i2c_speed_in_khz = 40;  	dc->caps.max_cursor_size = 128; +	dc->caps.dual_link_dvi = true;  	for (i = 0; i < pool->base.pipe_count; i++) {  		pool->base.timing_generators[i] = diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 86cdd7b4811f..6f382a3ac90f 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -688,15 +688,22 @@ void dce110_enable_stream(struct pipe_ctx *pipe_ctx)  	struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;  	struct dc_link *link = pipe_ctx->stream->sink->link; -	/* 1. update AVI info frame (HDMI, DP) -	 * we always need to update info frame -	*/ +  	uint32_t active_total_with_borders;  	uint32_t early_control = 0;  	struct timing_generator *tg = pipe_ctx->stream_res.tg; -	/* TODOFPGA may change to hwss.update_info_frame */ +	/* For MST, there are multiply stream go to only one link. +	 * connect DIG back_end to front_end while enable_stream and +	 * disconnect them during disable_stream +	 * BY this, it is logic clean to separate stream and link */ +	link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc, +						    pipe_ctx->stream_res.stream_enc->id, true); + +	/* update AVI info frame (HDMI, DP)*/ +	/* TODO: FPGA may change to hwss.update_info_frame */  	dce110_update_info_frame(pipe_ctx); +  	/* enable early control to avoid corruption on DP monitor*/  	active_total_with_borders =  			timing->h_addressable @@ -717,12 +724,8 @@ void dce110_enable_stream(struct pipe_ctx *pipe_ctx)  			pipe_ctx->stream_res.stream_enc->funcs->dp_audio_enable(pipe_ctx->stream_res.stream_enc);  	} -	/* For MST, there are multiply stream go to only one link. -	 * connect DIG back_end to front_end while enable_stream and -	 * disconnect them during disable_stream -	 * BY this, it is logic clean to separate stream and link */ -	link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc, -						    pipe_ctx->stream_res.stream_enc->id, true); + +  } @@ -1690,9 +1693,13 @@ static void apply_min_clocks(   *  Check if FBC can be enabled   */  static bool should_enable_fbc(struct dc *dc, -			      struct dc_state *context) +			      struct dc_state *context, +			      uint32_t *pipe_idx)  { -	struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[0]; +	uint32_t i; +	struct pipe_ctx *pipe_ctx = NULL; +	struct resource_context *res_ctx = &context->res_ctx; +  	ASSERT(dc->fbc_compressor); @@ -1704,6 +1711,14 @@ static bool should_enable_fbc(struct dc *dc,  	if (context->stream_count != 1)  		return false; +	for (i = 0; i < dc->res_pool->pipe_count; i++) { +		if (res_ctx->pipe_ctx[i].stream) { +			pipe_ctx = &res_ctx->pipe_ctx[i]; +			*pipe_idx = i; +			break; +		} +	} +  	/* Only supports eDP */  	if (pipe_ctx->stream->sink->link->connector_signal != SIGNAL_TYPE_EDP)  		return false; @@ -1729,11 +1744,14 @@ static bool should_enable_fbc(struct dc *dc,  static void enable_fbc(struct dc *dc,  		       struct dc_state *context)  { -	if (should_enable_fbc(dc, context)) { +	uint32_t pipe_idx = 0; + +	if (should_enable_fbc(dc, context, &pipe_idx)) {  		/* Program GRPH COMPRESSED ADDRESS and PITCH */  		struct compr_addr_and_pitch_params params = {0, 0, 0};  		struct compressor *compr = dc->fbc_compressor; -		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[0]; +		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[pipe_idx]; +  		params.source_view_width = pipe_ctx->stream->timing.h_addressable;  		params.source_view_height = pipe_ctx->stream->timing.v_addressable; @@ -2915,6 +2933,49 @@ static void program_csc_matrix(struct pipe_ctx *pipe_ctx,  	}  } +void dce110_set_cursor_position(struct pipe_ctx *pipe_ctx) +{ +	struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position; +	struct input_pixel_processor *ipp = pipe_ctx->plane_res.ipp; +	struct mem_input *mi = pipe_ctx->plane_res.mi; +	struct dc_cursor_mi_param param = { +		.pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_khz, +		.ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clock_inKhz, +		.viewport_x_start = pipe_ctx->plane_res.scl_data.viewport.x, +		.viewport_width = pipe_ctx->plane_res.scl_data.viewport.width, +		.h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz +	}; + +	if (pipe_ctx->plane_state->address.type +			== PLN_ADDR_TYPE_VIDEO_PROGRESSIVE) +		pos_cpy.enable = false; + +	if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state) +		pos_cpy.enable = false; + +	if (ipp->funcs->ipp_cursor_set_position) +		ipp->funcs->ipp_cursor_set_position(ipp, &pos_cpy, ¶m); +	if (mi->funcs->set_cursor_position) +		mi->funcs->set_cursor_position(mi, &pos_cpy, ¶m); +} + +void dce110_set_cursor_attribute(struct pipe_ctx *pipe_ctx) +{ +	struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes; + +	if (pipe_ctx->plane_res.ipp->funcs->ipp_cursor_set_attributes) +		pipe_ctx->plane_res.ipp->funcs->ipp_cursor_set_attributes( +				pipe_ctx->plane_res.ipp, attributes); + +	if (pipe_ctx->plane_res.mi->funcs->set_cursor_attributes) +		pipe_ctx->plane_res.mi->funcs->set_cursor_attributes( +				pipe_ctx->plane_res.mi, attributes); + +	if (pipe_ctx->plane_res.xfm->funcs->set_cursor_attributes) +		pipe_ctx->plane_res.xfm->funcs->set_cursor_attributes( +				pipe_ctx->plane_res.xfm, attributes); +} +  static void ready_shared_resources(struct dc *dc, struct dc_state *context) {}  static void optimize_shared_resources(struct dc *dc) {} @@ -2957,6 +3018,8 @@ static const struct hw_sequencer_funcs dce110_funcs = {  	.edp_backlight_control = hwss_edp_backlight_control,  	.edp_power_control = hwss_edp_power_control,  	.edp_wait_for_hpd_ready = hwss_edp_wait_for_hpd_ready, +	.set_cursor_position = dce110_set_cursor_position, +	.set_cursor_attribute = dce110_set_cursor_attribute  };  void dce110_hw_sequencer_construct(struct dc *dc) diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c index 7c4779578fb7..00f18c485e1e 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c @@ -846,6 +846,16 @@ static bool dce110_validate_bandwidth(  	return result;  } +enum dc_status dce110_validate_plane(const struct dc_plane_state *plane_state, +				     struct dc_caps *caps) +{ +	if (((plane_state->dst_rect.width * 2) < plane_state->src_rect.width) || +	    ((plane_state->dst_rect.height * 2) < plane_state->src_rect.height)) +		return DC_FAIL_SURFACE_VALIDATE; + +	return DC_OK; +} +  static bool dce110_validate_surface_sets(  		struct dc_state *context)  { @@ -869,6 +879,13 @@ static bool dce110_validate_surface_sets(  					plane->src_rect.height > 1080))  					return false; +				/* we don't have the logic to support underlay +				 * only yet so block the use case where we get +				 * NV12 plane as top layer +				 */ +				if (j == 0) +					return false; +  				/* irrespective of plane format,  				 * stream should be RGB encoded  				 */ @@ -1021,6 +1038,7 @@ static const struct resource_funcs dce110_res_pool_funcs = {  	.link_enc_create = dce110_link_encoder_create,  	.validate_guaranteed = dce110_validate_guaranteed,  	.validate_bandwidth = dce110_validate_bandwidth, +	.validate_plane = dce110_validate_plane,  	.acquire_idle_pipe_for_layer = dce110_acquire_underlay,  	.add_stream_to_ctx = dce110_add_stream_to_ctx,  	.validate_global = dce110_validate_global diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c index 663e0a047a4b..98d9cd0109e1 100644 --- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c @@ -1103,6 +1103,8 @@ static bool construct(  	dc->caps.max_downscale_ratio = 200;  	dc->caps.i2c_speed_in_khz = 100;  	dc->caps.max_cursor_size = 128; +	dc->caps.dual_link_dvi = true; +  	/*************************************************  	 *  Create resources                             * diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c index 57cd67359567..5aab01db28ee 100644 --- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c @@ -835,6 +835,8 @@ static bool construct(  	dc->caps.max_downscale_ratio = 200;  	dc->caps.i2c_speed_in_khz = 100;  	dc->caps.max_cursor_size = 128; +	dc->caps.dual_link_dvi = true; +  	dc->debug = debug_defaults;  	/************************************************* diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c index 8f2bd56f3461..25d7eb1567ae 100644 --- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c @@ -793,6 +793,7 @@ static bool dce80_construct(  	dc->caps.max_downscale_ratio = 200;  	dc->caps.i2c_speed_in_khz = 40;  	dc->caps.max_cursor_size = 128; +	dc->caps.dual_link_dvi = true;  	/*************************************************  	 *  Create resources                             * diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 82572863acab..dc1e010725c1 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -238,10 +238,34 @@ static void enable_power_gating_plane(  static void disable_vga(  	struct dce_hwseq *hws)  { +	unsigned int in_vga1_mode = 0; +	unsigned int in_vga2_mode = 0; +	unsigned int in_vga3_mode = 0; +	unsigned int in_vga4_mode = 0; + +	REG_GET(D1VGA_CONTROL, D1VGA_MODE_ENABLE, &in_vga1_mode); +	REG_GET(D2VGA_CONTROL, D2VGA_MODE_ENABLE, &in_vga2_mode); +	REG_GET(D3VGA_CONTROL, D3VGA_MODE_ENABLE, &in_vga3_mode); +	REG_GET(D4VGA_CONTROL, D4VGA_MODE_ENABLE, &in_vga4_mode); + +	if (in_vga1_mode == 0 && in_vga2_mode == 0 && +			in_vga3_mode == 0 && in_vga4_mode == 0) +		return; +  	REG_WRITE(D1VGA_CONTROL, 0);  	REG_WRITE(D2VGA_CONTROL, 0);  	REG_WRITE(D3VGA_CONTROL, 0);  	REG_WRITE(D4VGA_CONTROL, 0); + +	/* HW Engineer's Notes: +	 *  During switch from vga->extended, if we set the VGA_TEST_ENABLE and +	 *  then hit the VGA_TEST_RENDER_START, then the DCHUBP timing gets updated correctly. +	 * +	 *  Then vBIOS will have it poll for the VGA_TEST_RENDER_DONE and unset +	 *  VGA_TEST_ENABLE, to leave it in the same state as before. +	 */ +	REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_ENABLE, 1); +	REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_RENDER_START, 1);  }  static void dpp_pg_control( @@ -1761,6 +1785,11 @@ static void update_dchubp_dpp(  			&pipe_ctx->plane_res.scl_data.viewport_c);  	} +	if (pipe_ctx->stream->cursor_attributes.address.quad_part != 0) { +		dc->hwss.set_cursor_position(pipe_ctx); +		dc->hwss.set_cursor_attribute(pipe_ctx); +	} +  	if (plane_state->update_flags.bits.full_update) {  		/*gamut remap*/  		program_gamut_remap(pipe_ctx); @@ -2296,7 +2325,7 @@ static bool dcn10_dummy_display_power_gating(  	return true;  } -void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx) +static void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)  {  	struct dc_plane_state *plane_state = pipe_ctx->plane_state;  	struct timing_generator *tg = pipe_ctx->stream_res.tg; @@ -2316,12 +2345,46 @@ void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)  	}  } -void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data) +static void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)  {  	if (hws->ctx->dc->res_pool->hubbub != NULL)  		hubbub1_update_dchub(hws->ctx->dc->res_pool->hubbub, dh_data);  } +static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx) +{ +	struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position; +	struct hubp *hubp = pipe_ctx->plane_res.hubp; +	struct dpp *dpp = pipe_ctx->plane_res.dpp; +	struct dc_cursor_mi_param param = { +		.pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_khz, +		.ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clock_inKhz, +		.viewport_x_start = pipe_ctx->plane_res.scl_data.viewport.x, +		.viewport_width = pipe_ctx->plane_res.scl_data.viewport.width, +		.h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz +	}; + +	if (pipe_ctx->plane_state->address.type +			== PLN_ADDR_TYPE_VIDEO_PROGRESSIVE) +		pos_cpy.enable = false; + +	if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state) +		pos_cpy.enable = false; + +	hubp->funcs->set_cursor_position(hubp, &pos_cpy, ¶m); +	dpp->funcs->set_cursor_position(dpp, &pos_cpy, ¶m, hubp->curs_attr.width); +} + +static void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx) +{ +	struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes; + +	pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes( +			pipe_ctx->plane_res.hubp, attributes); +	pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes( +		pipe_ctx->plane_res.dpp, attributes->color_format); +} +  static const struct hw_sequencer_funcs dcn10_funcs = {  	.program_gamut_remap = program_gamut_remap,  	.program_csc_matrix = program_csc_matrix, @@ -2362,6 +2425,8 @@ static const struct hw_sequencer_funcs dcn10_funcs = {  	.edp_backlight_control = hwss_edp_backlight_control,  	.edp_power_control = hwss_edp_power_control,  	.edp_wait_for_hpd_ready = hwss_edp_wait_for_hpd_ready, +	.set_cursor_position = dcn10_set_cursor_position, +	.set_cursor_attribute = dcn10_set_cursor_attribute  }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h index 0fd329deacd8..54d8a1386142 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h @@ -123,8 +123,7 @@ struct link_encoder_funcs {  	void (*enable_tmds_output)(struct link_encoder *enc,  		enum clock_source_id clock_source,  		enum dc_color_depth color_depth, -		bool hdmi, -		bool dual_link, +		enum signal_type signal,  		uint32_t pixel_clock);  	void (*enable_dp_output)(struct link_encoder *enc,  		const struct dc_link_settings *link_settings, diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h index 4c0aa56f7bae..379c6ecd271a 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h @@ -198,6 +198,9 @@ struct hw_sequencer_funcs {  			bool enable);  	void (*edp_wait_for_hpd_ready)(struct dc_link *link, bool power_up); +	void (*set_cursor_position)(struct pipe_ctx *pipe); +	void (*set_cursor_attribute)(struct pipe_ctx *pipe); +  };  void color_space_to_black_color( diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c index f7e40b292dfb..d3e1923b01a8 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c @@ -217,7 +217,7 @@ bool dce110_vblank_set(  			core_dc->current_state->res_ctx.pipe_ctx[pipe_offset].stream_res.tg;  	if (enable) { -		if (!tg->funcs->arm_vert_intr(tg, 2)) { +		if (!tg || !tg->funcs->arm_vert_intr(tg, 2)) {  			DC_ERROR("Failed to get VBLANK!\n");  			return false;  		} diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c b/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c index 57a54a7b89e5..1c079ba37c30 100644 --- a/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c @@ -42,8 +42,7 @@ static void virtual_link_encoder_enable_tmds_output(  	struct link_encoder *enc,  	enum clock_source_id clock_source,  	enum dc_color_depth color_depth, -	bool hdmi, -	bool dual_link, +	enum signal_type signal,  	uint32_t pixel_clock) {}  static void virtual_link_encoder_enable_dp_output( diff --git a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h index 7a9b43f84a31..36bbad594267 100644 --- a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h +++ b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h @@ -419,11 +419,6 @@ struct bios_event_info {  	bool backlight_changed;  }; -enum { -	HDMI_PIXEL_CLOCK_IN_KHZ_297 = 297000, -	TMDS_PIXEL_CLOCK_IN_KHZ_165 = 165000 -}; -  /*   * DFS-bypass flag   */ diff --git a/drivers/gpu/drm/amd/display/include/signal_types.h b/drivers/gpu/drm/amd/display/include/signal_types.h index b5ebde642207..199c5db67cbc 100644 --- a/drivers/gpu/drm/amd/display/include/signal_types.h +++ b/drivers/gpu/drm/amd/display/include/signal_types.h @@ -26,6 +26,11 @@  #ifndef __DC_SIGNAL_TYPES_H__  #define __DC_SIGNAL_TYPES_H__ +/* Minimum pixel clock, in KHz. For TMDS signal is 25.00 MHz */ +#define TMDS_MIN_PIXEL_CLOCK 25000 +/* Maximum pixel clock, in KHz. For TMDS signal is 165.00 MHz */ +#define TMDS_MAX_PIXEL_CLOCK 165000 +  enum signal_type {  	SIGNAL_TYPE_NONE		= 0L,		/* no signal */  	SIGNAL_TYPE_DVI_SINGLE_LINK	= (1 << 0), diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index 4c3223a4d62b..adb6e7b9280c 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c @@ -162,7 +162,7 @@ static int pp_hw_init(void *handle)  		if(hwmgr->smumgr_funcs->start_smu(pp_handle->hwmgr)) {  			pr_err("smc start failed\n");  			hwmgr->smumgr_funcs->smu_fini(pp_handle->hwmgr); -			return -EINVAL;; +			return -EINVAL;  		}  		if (ret == PP_DPM_DISABLED)  			goto exit; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index 41e42beff213..08e8a793714f 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -2756,10 +2756,13 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,  				    PHM_PlatformCaps_DisableMclkSwitchingForFrameLock); -	disable_mclk_switching = ((1 < info.display_count) || -				  disable_mclk_switching_for_frame_lock || -				  smu7_vblank_too_short(hwmgr, mode_info.vblank_time_us) || -				  (mode_info.refresh_rate > 120)); +	if (info.display_count == 0) +		disable_mclk_switching = false; +	else +		disable_mclk_switching = ((1 < info.display_count) || +					  disable_mclk_switching_for_frame_lock || +					  smu7_vblank_too_short(hwmgr, mode_info.vblank_time_us) || +					  (mode_info.refresh_rate > 120));  	sclk = smu7_ps->performance_levels[0].engine_clock;  	mclk = smu7_ps->performance_levels[0].memory_clock; @@ -4534,13 +4537,6 @@ static int smu7_set_power_profile_state(struct pp_hwmgr *hwmgr,  	int tmp_result, result = 0;  	uint32_t sclk_mask = 0, mclk_mask = 0; -	if (hwmgr->chip_id == CHIP_FIJI) { -		if (request->type == AMD_PP_GFX_PROFILE) -			smu7_enable_power_containment(hwmgr); -		else if (request->type == AMD_PP_COMPUTE_PROFILE) -			smu7_disable_power_containment(hwmgr); -	} -  	if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_AUTO)  		return -EINVAL; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index 2d55dabc77d4..5f9c3efb532f 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c @@ -3168,10 +3168,13 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,  	disable_mclk_switching_for_vr = PP_CAP(PHM_PlatformCaps_DisableMclkSwitchForVR);  	force_mclk_high = PP_CAP(PHM_PlatformCaps_ForceMclkHigh); -	disable_mclk_switching = (info.display_count > 1) || -				    disable_mclk_switching_for_frame_lock || -				    disable_mclk_switching_for_vr || -				    force_mclk_high; +	if (info.display_count == 0) +		disable_mclk_switching = false; +	else +		disable_mclk_switching = (info.display_count > 1) || +			disable_mclk_switching_for_frame_lock || +			disable_mclk_switching_for_vr || +			force_mclk_high;  	sclk = vega10_ps->performance_levels[0].gfx_clock;  	mclk = vega10_ps->performance_levels[0].mem_clock; diff --git a/drivers/gpu/drm/ast/ast_tables.h b/drivers/gpu/drm/ast/ast_tables.h index 5f4c2e833a65..d665dd5af5dd 100644 --- a/drivers/gpu/drm/ast/ast_tables.h +++ b/drivers/gpu/drm/ast/ast_tables.h @@ -97,7 +97,7 @@ static const struct ast_vbios_dclk_info dclk_table[] = {  	{0x67, 0x22, 0x00},			/* 0E: VCLK157_5	*/  	{0x6A, 0x22, 0x00},			/* 0F: VCLK162		*/  	{0x4d, 0x4c, 0x80},			/* 10: VCLK154		*/ -	{0xa7, 0x78, 0x80},			/* 11: VCLK83.5		*/ +	{0x68, 0x6f, 0x80},			/* 11: VCLK83.5		*/  	{0x28, 0x49, 0x80},			/* 12: VCLK106.5	*/  	{0x37, 0x49, 0x80},			/* 13: VCLK146.25	*/  	{0x1f, 0x45, 0x80},			/* 14: VCLK148.5	*/ @@ -127,7 +127,7 @@ static const struct ast_vbios_dclk_info dclk_table_ast2500[] = {  	{0x67, 0x22, 0x00},			/* 0E: VCLK157_5	*/  	{0x6A, 0x22, 0x00},			/* 0F: VCLK162		*/  	{0x4d, 0x4c, 0x80},			/* 10: VCLK154		*/ -	{0xa7, 0x78, 0x80},			/* 11: VCLK83.5		*/ +	{0x68, 0x6f, 0x80},			/* 11: VCLK83.5		*/  	{0x28, 0x49, 0x80},			/* 12: VCLK106.5	*/  	{0x37, 0x49, 0x80},			/* 13: VCLK146.25	*/  	{0x1f, 0x45, 0x80},			/* 14: VCLK148.5	*/ diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c index cd23b1b28259..c91b9b054e3f 100644 --- a/drivers/gpu/drm/cirrus/cirrus_mode.c +++ b/drivers/gpu/drm/cirrus/cirrus_mode.c @@ -294,22 +294,7 @@ static void cirrus_crtc_prepare(struct drm_crtc *crtc)  {  } -/* - * This is called after a mode is programmed. It should reverse anything done - * by the prepare function - */ -static void cirrus_crtc_commit(struct drm_crtc *crtc) -{ -} - -/* - * The core can pass us a set of gamma values to program. We actually only - * use this for 8-bit mode so can't perform smooth fades on deeper modes, - * but it's a requirement that we provide the function - */ -static int cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, -				 u16 *blue, uint32_t size, -				 struct drm_modeset_acquire_ctx *ctx) +static void cirrus_crtc_load_lut(struct drm_crtc *crtc)  {  	struct drm_device *dev = crtc->dev;  	struct cirrus_device *cdev = dev->dev_private; @@ -317,7 +302,7 @@ static int cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,  	int i;  	if (!crtc->enabled) -		return 0; +		return;  	r = crtc->gamma_store;  	g = r + crtc->gamma_size; @@ -330,6 +315,27 @@ static int cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,  		WREG8(PALETTE_DATA, *g++ >> 8);  		WREG8(PALETTE_DATA, *b++ >> 8);  	} +} + +/* + * This is called after a mode is programmed. It should reverse anything done + * by the prepare function + */ +static void cirrus_crtc_commit(struct drm_crtc *crtc) +{ +	cirrus_crtc_load_lut(crtc); +} + +/* + * The core can pass us a set of gamma values to program. We actually only + * use this for 8-bit mode so can't perform smooth fades on deeper modes, + * but it's a requirement that we provide the function + */ +static int cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, +				 u16 *blue, uint32_t size, +				 struct drm_modeset_acquire_ctx *ctx) +{ +	cirrus_crtc_load_lut(crtc);  	return 0;  } diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index ab4032167094..ae3cbfe9e01c 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -1878,6 +1878,8 @@ int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,  		new_crtc_state->event->base.completion = &commit->flip_done;  		new_crtc_state->event->base.completion_release = release_crtc_commit;  		drm_crtc_commit_get(commit); + +		commit->abort_completion = true;  	}  	for_each_oldnew_connector_in_state(state, conn, old_conn_state, new_conn_state, i) { @@ -3421,8 +3423,21 @@ EXPORT_SYMBOL(drm_atomic_helper_crtc_duplicate_state);  void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc_state *state)  {  	if (state->commit) { +		/* +		 * In the event that a non-blocking commit returns +		 * -ERESTARTSYS before the commit_tail work is queued, we will +		 * have an extra reference to the commit object. Release it, if +		 * the event has not been consumed by the worker. +		 * +		 * state->event may be freed, so we can't directly look at +		 * state->event->base.completion. +		 */ +		if (state->event && state->commit->abort_completion) +			drm_crtc_commit_put(state->commit); +  		kfree(state->commit->event);  		state->commit->event = NULL; +  		drm_crtc_commit_put(state->commit);  	} diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index ddd537914575..4f751a9d71a3 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -113,6 +113,9 @@ static const struct edid_quirk {  	/* AEO model 0 reports 8 bpc, but is a 6 bpc panel */  	{ "AEO", 0, EDID_QUIRK_FORCE_6BPC }, +	/* CPT panel of Asus UX303LA reports 8 bpc, but is a 6 bpc panel */ +	{ "CPT", 0x17df, EDID_QUIRK_FORCE_6BPC }, +  	/* Belinea 10 15 55 */  	{ "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 },  	{ "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 }, @@ -162,6 +165,24 @@ static const struct edid_quirk {  	/* HTC Vive VR Headset */  	{ "HVR", 0xaa01, EDID_QUIRK_NON_DESKTOP }, + +	/* Oculus Rift DK1, DK2, and CV1 VR Headsets */ +	{ "OVR", 0x0001, EDID_QUIRK_NON_DESKTOP }, +	{ "OVR", 0x0003, EDID_QUIRK_NON_DESKTOP }, +	{ "OVR", 0x0004, EDID_QUIRK_NON_DESKTOP }, + +	/* Windows Mixed Reality Headsets */ +	{ "ACR", 0x7fce, EDID_QUIRK_NON_DESKTOP }, +	{ "HPN", 0x3515, EDID_QUIRK_NON_DESKTOP }, +	{ "LEN", 0x0408, EDID_QUIRK_NON_DESKTOP }, +	{ "LEN", 0xb800, EDID_QUIRK_NON_DESKTOP }, +	{ "FUJ", 0x1970, EDID_QUIRK_NON_DESKTOP }, +	{ "DEL", 0x7fce, EDID_QUIRK_NON_DESKTOP }, +	{ "SEC", 0x144a, EDID_QUIRK_NON_DESKTOP }, +	{ "AUS", 0xc102, EDID_QUIRK_NON_DESKTOP }, + +	/* Sony PlayStation VR Headset */ +	{ "SNY", 0x0704, EDID_QUIRK_NON_DESKTOP },  };  /* diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c index 5a13ff29f4f0..2dc5e8bed172 100644 --- a/drivers/gpu/drm/drm_framebuffer.c +++ b/drivers/gpu/drm/drm_framebuffer.c @@ -121,6 +121,10 @@ int drm_mode_addfb(struct drm_device *dev,  	r.pixel_format = drm_mode_legacy_fb_format(or->bpp, or->depth);  	r.handles[0] = or->handle; +	if (r.pixel_format == DRM_FORMAT_XRGB2101010 && +	    dev->driver->driver_features & DRIVER_PREFER_XBGR_30BPP) +		r.pixel_format = DRM_FORMAT_XBGR2101010; +  	ret = drm_mode_addfb2(dev, &r, file_priv);  	if (ret)  		return ret; @@ -457,6 +461,12 @@ int drm_mode_getfb(struct drm_device *dev,  	if (!fb)  		return -ENOENT; +	/* Multi-planar framebuffers need getfb2. */ +	if (fb->format->num_planes > 1) { +		ret = -EINVAL; +		goto out; +	} +  	r->height = fb->height;  	r->width = fb->width;  	r->depth = fb->format->depth; @@ -480,6 +490,7 @@ int drm_mode_getfb(struct drm_device *dev,  		ret = -ENODEV;  	} +out:  	drm_framebuffer_put(fb);  	return ret; diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index 186c4e90cc1c..89eef1bb4ddc 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c @@ -836,9 +836,24 @@ struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan)  	if (!mm->color_adjust)  		return NULL; -	hole = list_first_entry(&mm->hole_stack, typeof(*hole), hole_stack); -	hole_start = __drm_mm_hole_node_start(hole); -	hole_end = hole_start + hole->hole_size; +	/* +	 * The hole found during scanning should ideally be the first element +	 * in the hole_stack list, but due to side-effects in the driver it +	 * may not be. +	 */ +	list_for_each_entry(hole, &mm->hole_stack, hole_stack) { +		hole_start = __drm_mm_hole_node_start(hole); +		hole_end = hole_start + hole->hole_size; + +		if (hole_start <= scan->hit_start && +		    hole_end >= scan->hit_end) +			break; +	} + +	/* We should only be called after we found the hole previously */ +	DRM_MM_BUG_ON(&hole->hole_stack == &mm->hole_stack); +	if (unlikely(&hole->hole_stack == &mm->hole_stack)) +		return NULL;  	DRM_MM_BUG_ON(hole_start > scan->hit_start);  	DRM_MM_BUG_ON(hole_end < scan->hit_end); diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index 555fbe54d6e2..00b8445ba819 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c @@ -654,6 +654,26 @@ out:  }  /** + * drm_kms_helper_is_poll_worker - is %current task an output poll worker? + * + * Determine if %current task is an output poll worker.  This can be used + * to select distinct code paths for output polling versus other contexts. + * + * One use case is to avoid a deadlock between the output poll worker and + * the autosuspend worker wherein the latter waits for polling to finish + * upon calling drm_kms_helper_poll_disable(), while the former waits for + * runtime suspend to finish upon calling pm_runtime_get_sync() in a + * connector ->detect hook. + */ +bool drm_kms_helper_is_poll_worker(void) +{ +	struct work_struct *work = current_work(); + +	return work && work->func == output_poll_execute; +} +EXPORT_SYMBOL(drm_kms_helper_is_poll_worker); + +/**   * drm_kms_helper_poll_disable - disable output polling   * @dev: drm_device   * diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index 2b8bf2dd6387..f68ef1b3a28c 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c @@ -286,7 +286,6 @@ static int g2d_init_cmdlist(struct g2d_data *g2d)  	node = kcalloc(G2D_CMDLIST_NUM, sizeof(*node), GFP_KERNEL);  	if (!node) { -		dev_err(dev, "failed to allocate memory\n");  		ret = -ENOMEM;  		goto err;  	} @@ -926,7 +925,7 @@ static void g2d_finish_event(struct g2d_data *g2d, u32 cmdlist_no)  	struct drm_device *drm_dev = g2d->subdrv.drm_dev;  	struct g2d_runqueue_node *runqueue_node = g2d->runqueue_node;  	struct drm_exynos_pending_g2d_event *e; -	struct timeval now; +	struct timespec64 now;  	if (list_empty(&runqueue_node->event_list))  		return; @@ -934,9 +933,9 @@ static void g2d_finish_event(struct g2d_data *g2d, u32 cmdlist_no)  	e = list_first_entry(&runqueue_node->event_list,  			     struct drm_exynos_pending_g2d_event, base.link); -	do_gettimeofday(&now); +	ktime_get_ts64(&now);  	e->event.tv_sec = now.tv_sec; -	e->event.tv_usec = now.tv_usec; +	e->event.tv_usec = now.tv_nsec / NSEC_PER_USEC;  	e->event.cmdlist_no = cmdlist_no;  	drm_send_event(drm_dev, &e->base); @@ -1358,10 +1357,9 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,  		return -EFAULT;  	runqueue_node = kmem_cache_alloc(g2d->runqueue_slab, GFP_KERNEL); -	if (!runqueue_node) { -		dev_err(dev, "failed to allocate memory\n"); +	if (!runqueue_node)  		return -ENOMEM; -	} +  	run_cmdlist = &runqueue_node->run_cmdlist;  	event_list = &runqueue_node->event_list;  	INIT_LIST_HEAD(run_cmdlist); diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.h b/drivers/gpu/drm/exynos/exynos_drm_rotator.h deleted file mode 100644 index 71a0b4c0c1e8..000000000000 --- a/drivers/gpu/drm/exynos/exynos_drm_rotator.h +++ /dev/null @@ -1,19 +0,0 @@ -/* - * Copyright (c) 2012 Samsung Electronics Co., Ltd. - * - * Authors: - *	YoungJun Cho <yj44.cho@samsung.com> - *	Eunchul Kim <chulspro.kim@samsung.com> - * - * This program is free software; you can redistribute  it and/or modify it - * under  the terms of  the GNU General  Public License as published by the - * Free Software Foundation;  either version 2 of the  License, or (at your - * option) any later version. - */ - -#ifndef	_EXYNOS_DRM_ROTATOR_H_ -#define	_EXYNOS_DRM_ROTATOR_H_ - -/* TODO */ - -#endif diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index a4b75a46f946..abd84cbcf1c2 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c @@ -1068,10 +1068,13 @@ static void hdmi_audio_config(struct hdmi_context *hdata)  	/* Configuration I2S input ports. Configure I2S_PIN_SEL_0~4 */  	hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_0, HDMI_I2S_SEL_SCLK(5)  			| HDMI_I2S_SEL_LRCK(6)); -	hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_1, HDMI_I2S_SEL_SDATA1(1) -			| HDMI_I2S_SEL_SDATA2(4)); + +	hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_1, HDMI_I2S_SEL_SDATA1(3) +			| HDMI_I2S_SEL_SDATA0(4)); +  	hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_2, HDMI_I2S_SEL_SDATA3(1)  			| HDMI_I2S_SEL_SDATA2(2)); +  	hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_3, HDMI_I2S_SEL_DSD(0));  	/* I2S_CON_1 & 2 */ diff --git a/drivers/gpu/drm/exynos/regs-fimc.h b/drivers/gpu/drm/exynos/regs-fimc.h index 30496134a3d0..d7cbe53c4c01 100644 --- a/drivers/gpu/drm/exynos/regs-fimc.h +++ b/drivers/gpu/drm/exynos/regs-fimc.h @@ -569,7 +569,7 @@  #define EXYNOS_CIIMGEFF_FIN_EMBOSSING		(4 << 26)  #define EXYNOS_CIIMGEFF_FIN_SILHOUETTE		(5 << 26)  #define EXYNOS_CIIMGEFF_FIN_MASK			(7 << 26) -#define EXYNOS_CIIMGEFF_PAT_CBCR_MASK		((0xff < 13) | (0xff < 0)) +#define EXYNOS_CIIMGEFF_PAT_CBCR_MASK		((0xff << 13) | (0xff << 0))  /* Real input DMA size register */  #define EXYNOS_CIREAL_ISIZE_AUTOLOAD_ENABLE	(1 << 31) diff --git a/drivers/gpu/drm/exynos/regs-hdmi.h b/drivers/gpu/drm/exynos/regs-hdmi.h index 04be0f7e8193..4420c203ac85 100644 --- a/drivers/gpu/drm/exynos/regs-hdmi.h +++ b/drivers/gpu/drm/exynos/regs-hdmi.h @@ -464,7 +464,7 @@  /* I2S_PIN_SEL_1 */  #define HDMI_I2S_SEL_SDATA1(x)		(((x) & 0x7) << 4) -#define HDMI_I2S_SEL_SDATA2(x)		((x) & 0x7) +#define HDMI_I2S_SEL_SDATA0(x)		((x) & 0x7)  /* I2S_PIN_SEL_2 */  #define HDMI_I2S_SEL_SDATA3(x)		(((x) & 0x7) << 4) diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index c8454ac43fae..db6b94dda5df 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -471,6 +471,7 @@ struct parser_exec_state {  	 * used when ret from 2nd level batch buffer  	 */  	int saved_buf_addr_type; +	bool is_ctx_wa;  	struct cmd_info *info; @@ -1715,6 +1716,11 @@ static int perform_bb_shadow(struct parser_exec_state *s)  	bb->accessing = true;  	bb->bb_start_cmd_va = s->ip_va; +	if ((s->buf_type == BATCH_BUFFER_INSTRUCTION) && (!s->is_ctx_wa)) +		bb->bb_offset = s->ip_va - s->rb_va; +	else +		bb->bb_offset = 0; +  	/*  	 * ip_va saves the virtual address of the shadow batch buffer, while  	 * ip_gma saves the graphics address of the original batch buffer. @@ -2571,6 +2577,7 @@ static int scan_workload(struct intel_vgpu_workload *workload)  	s.ring_tail = gma_tail;  	s.rb_va = workload->shadow_ring_buffer_va;  	s.workload = workload; +	s.is_ctx_wa = false;  	if ((bypass_scan_mask & (1 << workload->ring_id)) ||  		gma_head == gma_tail) @@ -2624,6 +2631,7 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)  	s.ring_tail = gma_tail;  	s.rb_va = wa_ctx->indirect_ctx.shadow_va;  	s.workload = workload; +	s.is_ctx_wa = true;  	if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) {  		ret = -EINVAL; diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 909499b73d03..021f722e2481 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -733,6 +733,25 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,  	return ret == 0 ? count : ret;  } +static bool gtt_entry(struct mdev_device *mdev, loff_t *ppos) +{ +	struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); +	unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos); +	struct intel_gvt *gvt = vgpu->gvt; +	int offset; + +	/* Only allow MMIO GGTT entry access */ +	if (index != PCI_BASE_ADDRESS_0) +		return false; + +	offset = (u64)(*ppos & VFIO_PCI_OFFSET_MASK) - +		intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_0); + +	return (offset >= gvt->device_info.gtt_start_offset && +		offset < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt)) ? +			true : false; +} +  static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf,  			size_t count, loff_t *ppos)  { @@ -742,7 +761,21 @@ static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf,  	while (count) {  		size_t filled; -		if (count >= 4 && !(*ppos % 4)) { +		/* Only support GGTT entry 8 bytes read */ +		if (count >= 8 && !(*ppos % 8) && +			gtt_entry(mdev, ppos)) { +			u64 val; + +			ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val), +					ppos, false); +			if (ret <= 0) +				goto read_err; + +			if (copy_to_user(buf, &val, sizeof(val))) +				goto read_err; + +			filled = 8; +		} else if (count >= 4 && !(*ppos % 4)) {  			u32 val;  			ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val), @@ -802,7 +835,21 @@ static ssize_t intel_vgpu_write(struct mdev_device *mdev,  	while (count) {  		size_t filled; -		if (count >= 4 && !(*ppos % 4)) { +		/* Only support GGTT entry 8 bytes write */ +		if (count >= 8 && !(*ppos % 8) && +			gtt_entry(mdev, ppos)) { +			u64 val; + +			if (copy_from_user(&val, buf, sizeof(val))) +				goto write_err; + +			ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val), +					ppos, true); +			if (ret <= 0) +				goto write_err; + +			filled = 8; +		} else if (count >= 4 && !(*ppos % 4)) {  			u32 val;  			if (copy_from_user(&val, buf, sizeof(val))) diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c index 73ad6e90e49d..152df3d0291e 100644 --- a/drivers/gpu/drm/i915/gvt/mmio_context.c +++ b/drivers/gpu/drm/i915/gvt/mmio_context.c @@ -118,6 +118,7 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {  	{RCS, HALF_SLICE_CHICKEN3, 0xffff, true}, /* 0xe184 */  	{RCS, GEN9_HALF_SLICE_CHICKEN5, 0xffff, true}, /* 0xe188 */  	{RCS, GEN9_HALF_SLICE_CHICKEN7, 0xffff, true}, /* 0xe194 */ +	{RCS, GEN8_ROW_CHICKEN, 0xffff, true}, /* 0xe4f0 */  	{RCS, TRVATTL3PTRDW(0), 0, false}, /* 0x4de0 */  	{RCS, TRVATTL3PTRDW(1), 0, false}, /* 0x4de4 */  	{RCS, TRNULLDETCT, 0, false}, /* 0x4de8 */ @@ -393,9 +394,11 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre,  	 * performace for batch mmio read/write, so we need  	 * handle forcewake mannually.  	 */ +	intel_runtime_pm_get(dev_priv);  	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);  	switch_mmio(pre, next, ring_id);  	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); +	intel_runtime_pm_put(dev_priv);  }  /** diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index b55b3580ca1d..d74d6f05c62c 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -52,6 +52,54 @@ static void set_context_pdp_root_pointer(  		pdp_pair[i].val = pdp[7 - i];  } +/* + * when populating shadow ctx from guest, we should not overrride oa related + * registers, so that they will not be overlapped by guest oa configs. Thus + * made it possible to capture oa data from host for both host and guests. + */ +static void sr_oa_regs(struct intel_vgpu_workload *workload, +		u32 *reg_state, bool save) +{ +	struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv; +	u32 ctx_oactxctrl = dev_priv->perf.oa.ctx_oactxctrl_offset; +	u32 ctx_flexeu0 = dev_priv->perf.oa.ctx_flexeu0_offset; +	int i = 0; +	u32 flex_mmio[] = { +		i915_mmio_reg_offset(EU_PERF_CNTL0), +		i915_mmio_reg_offset(EU_PERF_CNTL1), +		i915_mmio_reg_offset(EU_PERF_CNTL2), +		i915_mmio_reg_offset(EU_PERF_CNTL3), +		i915_mmio_reg_offset(EU_PERF_CNTL4), +		i915_mmio_reg_offset(EU_PERF_CNTL5), +		i915_mmio_reg_offset(EU_PERF_CNTL6), +	}; + +	if (!workload || !reg_state || workload->ring_id != RCS) +		return; + +	if (save) { +		workload->oactxctrl = reg_state[ctx_oactxctrl + 1]; + +		for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) { +			u32 state_offset = ctx_flexeu0 + i * 2; + +			workload->flex_mmio[i] = reg_state[state_offset + 1]; +		} +	} else { +		reg_state[ctx_oactxctrl] = +			i915_mmio_reg_offset(GEN8_OACTXCONTROL); +		reg_state[ctx_oactxctrl + 1] = workload->oactxctrl; + +		for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) { +			u32 state_offset = ctx_flexeu0 + i * 2; +			u32 mmio = flex_mmio[i]; + +			reg_state[state_offset] = mmio; +			reg_state[state_offset + 1] = workload->flex_mmio[i]; +		} +	} +} +  static int populate_shadow_context(struct intel_vgpu_workload *workload)  {  	struct intel_vgpu *vgpu = workload->vgpu; @@ -98,6 +146,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)  	page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);  	shadow_ring_context = kmap(page); +	sr_oa_regs(workload, (u32 *)shadow_ring_context, true);  #define COPY_REG(name) \  	intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \  		+ RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4) @@ -122,6 +171,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)  			sizeof(*shadow_ring_context),  			I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context)); +	sr_oa_regs(workload, (u32 *)shadow_ring_context, false);  	kunmap(page);  	return 0;  } @@ -376,6 +426,17 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)  			goto err;  		} +		/* For privilge batch buffer and not wa_ctx, the bb_start_cmd_va +		 * is only updated into ring_scan_buffer, not real ring address +		 * allocated in later copy_workload_to_ring_buffer. pls be noted +		 * shadow_ring_buffer_va is now pointed to real ring buffer va +		 * in copy_workload_to_ring_buffer. +		 */ + +		if (bb->bb_offset) +			bb->bb_start_cmd_va = workload->shadow_ring_buffer_va +				+ bb->bb_offset; +  		/* relocate shadow batch buffer */  		bb->bb_start_cmd_va[1] = i915_ggtt_offset(bb->vma);  		if (gmadr_bytes == 8) @@ -1044,10 +1105,12 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)  	bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES); -	s->workloads = kmem_cache_create("gvt-g_vgpu_workload", -			sizeof(struct intel_vgpu_workload), 0, -			SLAB_HWCACHE_ALIGN, -			NULL); +	s->workloads = kmem_cache_create_usercopy("gvt-g_vgpu_workload", +						  sizeof(struct intel_vgpu_workload), 0, +						  SLAB_HWCACHE_ALIGN, +						  offsetof(struct intel_vgpu_workload, rb_tail), +						  sizeof_field(struct intel_vgpu_workload, rb_tail), +						  NULL);  	if (!s->workloads) {  		ret = -ENOMEM; diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h index ff175a98b19e..a79a4f60637e 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.h +++ b/drivers/gpu/drm/i915/gvt/scheduler.h @@ -110,6 +110,10 @@ struct intel_vgpu_workload {  	/* shadow batch buffer */  	struct list_head shadow_bb;  	struct intel_shadow_wa_ctx wa_ctx; + +	/* oa registers */ +	u32 oactxctrl; +	u32 flex_mmio[7];  };  struct intel_vgpu_shadow_bb { @@ -120,6 +124,7 @@ struct intel_vgpu_shadow_bb {  	u32 *bb_start_cmd_va;  	unsigned int clflush;  	bool accessing; +	unsigned long bb_offset;  };  #define workload_q_head(vgpu, ring_id) \ diff --git a/drivers/gpu/drm/i915/gvt/trace.h b/drivers/gpu/drm/i915/gvt/trace.h index 7a2511538f34..736bd2bc5127 100644 --- a/drivers/gpu/drm/i915/gvt/trace.h +++ b/drivers/gpu/drm/i915/gvt/trace.h @@ -333,7 +333,7 @@ TRACE_EVENT(render_mmio,  	TP_PROTO(int old_id, int new_id, char *action, unsigned int reg,  		 unsigned int old_val, unsigned int new_val), -	TP_ARGS(old_id, new_id, action, reg, new_val, old_val), +	TP_ARGS(old_id, new_id, action, reg, old_val, new_val),  	TP_STRUCT__entry(  		__field(int, old_id) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 173d0095e3b2..2f5209de0391 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -1433,19 +1433,7 @@ void i915_driver_unload(struct drm_device *dev)  	intel_modeset_cleanup(dev); -	/* -	 * free the memory space allocated for the child device -	 * config parsed from VBT -	 */ -	if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) { -		kfree(dev_priv->vbt.child_dev); -		dev_priv->vbt.child_dev = NULL; -		dev_priv->vbt.child_dev_num = 0; -	} -	kfree(dev_priv->vbt.sdvo_lvds_vbt_mode); -	dev_priv->vbt.sdvo_lvds_vbt_mode = NULL; -	kfree(dev_priv->vbt.lfp_lvds_vbt_mode); -	dev_priv->vbt.lfp_lvds_vbt_mode = NULL; +	intel_bios_cleanup(dev_priv);  	vga_switcheroo_unregister_client(pdev);  	vga_client_register(pdev, NULL, NULL, NULL); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index a42deebedb0f..d307429a5ae0 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1349,6 +1349,7 @@ struct intel_vbt_data {  		u32 size;  		u8 *data;  		const u8 *sequence[MIPI_SEQ_MAX]; +		u8 *deassert_seq; /* Used by fixup_mipi_sequences() */  	} dsi;  	int crt_ddc_pin; @@ -3657,6 +3658,7 @@ extern void intel_i2c_reset(struct drm_i915_private *dev_priv);  /* intel_bios.c */  void intel_bios_init(struct drm_i915_private *dev_priv); +void intel_bios_cleanup(struct drm_i915_private *dev_priv);  bool intel_bios_is_valid_vbt(const void *buf, size_t size);  bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv);  bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index dd89abd2263d..6ff5d655c202 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -434,20 +434,28 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,  			dma_fence_put(shared[i]);  		kfree(shared); +		/* +		 * If both shared fences and an exclusive fence exist, +		 * then by construction the shared fences must be later +		 * than the exclusive fence. If we successfully wait for +		 * all the shared fences, we know that the exclusive fence +		 * must all be signaled. If all the shared fences are +		 * signaled, we can prune the array and recover the +		 * floating references on the fences/requests. +		 */  		prune_fences = count && timeout >= 0;  	} else {  		excl = reservation_object_get_excl_rcu(resv);  	} -	if (excl && timeout >= 0) { +	if (excl && timeout >= 0)  		timeout = i915_gem_object_wait_fence(excl, flags, timeout,  						     rps_client); -		prune_fences = timeout >= 0; -	}  	dma_fence_put(excl); -	/* Oportunistically prune the fences iff we know they have *all* been +	/* +	 * Opportunistically prune the fences iff we know they have *all* been  	 * signaled and that the reservation object has not been changed (i.e.  	 * no new fences have been added).  	 */ @@ -3205,8 +3213,10 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)  	 * rolling the global seqno forward (since this would complete requests  	 * for which we haven't set the fence error to EIO yet).  	 */ -	for_each_engine(engine, i915, id) +	for_each_engine(engine, i915, id) { +		i915_gem_reset_prepare_engine(engine);  		engine->submit_request = nop_submit_request; +	}  	/*  	 * Make sure no one is running the old callback before we proceed with @@ -3244,6 +3254,8 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)  		intel_engine_init_global_seqno(engine,  					       intel_engine_last_submit(engine));  		spin_unlock_irqrestore(&engine->timeline->lock, flags); + +		i915_gem_reset_finish_engine(engine);  	}  	set_bit(I915_WEDGED, &i915->gpu_error.flags); diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 648e7536ff51..0c963fcf31ff 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -803,7 +803,7 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,  	case I915_CONTEXT_PARAM_PRIORITY:  		{ -			int priority = args->value; +			s64 priority = args->value;  			if (args->size)  				ret = -EINVAL; diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 4401068ff468..3ab1ace2a6bd 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -505,6 +505,8 @@ eb_add_vma(struct i915_execbuffer *eb, unsigned int i, struct i915_vma *vma)  		list_add_tail(&vma->exec_link, &eb->unbound);  		if (drm_mm_node_allocated(&vma->node))  			err = i915_vma_unbind(vma); +		if (unlikely(err)) +			vma->exec_flags = NULL;  	}  	return err;  } @@ -2410,7 +2412,7 @@ err_request:  	if (out_fence) {  		if (err == 0) {  			fd_install(out_fence_fd, out_fence->file); -			args->rsvd2 &= GENMASK_ULL(0, 31); /* keep in-fence */ +			args->rsvd2 &= GENMASK_ULL(31, 0); /* keep in-fence */  			args->rsvd2 |= (u64)out_fence_fd << 32;  			out_fence_fd = -1;  		} else { diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c index e09d18df8b7f..a3e93d46316a 100644 --- a/drivers/gpu/drm/i915/i915_gem_request.c +++ b/drivers/gpu/drm/i915/i915_gem_request.c @@ -476,8 +476,6 @@ void __i915_gem_request_submit(struct drm_i915_gem_request *request)  	GEM_BUG_ON(!irqs_disabled());  	lockdep_assert_held(&engine->timeline->lock); -	trace_i915_gem_request_execute(request); -  	/* Transfer from per-context onto the global per-engine timeline */  	timeline = engine->timeline;  	GEM_BUG_ON(timeline == request->timeline); @@ -501,6 +499,8 @@ void __i915_gem_request_submit(struct drm_i915_gem_request *request)  	list_move_tail(&request->link, &timeline->requests);  	spin_unlock(&request->timeline->lock); +	trace_i915_gem_request_execute(request); +  	wake_up_all(&request->execute);  } diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt3.c b/drivers/gpu/drm/i915/i915_oa_cflgt3.c index 42ff06fe54a3..792facdb6702 100644 --- a/drivers/gpu/drm/i915/i915_oa_cflgt3.c +++ b/drivers/gpu/drm/i915/i915_oa_cflgt3.c @@ -84,9 +84,9 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)  void  i915_perf_load_test_config_cflgt3(struct drm_i915_private *dev_priv)  { -	strncpy(dev_priv->perf.oa.test_config.uuid, +	strlcpy(dev_priv->perf.oa.test_config.uuid,  		"577e8e2c-3fa0-4875-8743-3538d585e3b0", -		UUID_STRING_LEN); +		sizeof(dev_priv->perf.oa.test_config.uuid));  	dev_priv->perf.oa.test_config.id = 1;  	dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa; diff --git a/drivers/gpu/drm/i915/i915_oa_cnl.c b/drivers/gpu/drm/i915/i915_oa_cnl.c index ff0ac3627cc4..ba9140c87cc0 100644 --- a/drivers/gpu/drm/i915/i915_oa_cnl.c +++ b/drivers/gpu/drm/i915/i915_oa_cnl.c @@ -96,9 +96,9 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)  void  i915_perf_load_test_config_cnl(struct drm_i915_private *dev_priv)  { -	strncpy(dev_priv->perf.oa.test_config.uuid, +	strlcpy(dev_priv->perf.oa.test_config.uuid,  		"db41edd4-d8e7-4730-ad11-b9a2d6833503", -		UUID_STRING_LEN); +		sizeof(dev_priv->perf.oa.test_config.uuid));  	dev_priv->perf.oa.test_config.id = 1;  	dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa; diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 0be50e43507d..f8fe5ffcdcff 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -1303,9 +1303,8 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream)  	 */  	mutex_lock(&dev_priv->drm.struct_mutex);  	dev_priv->perf.oa.exclusive_stream = NULL; -	mutex_unlock(&dev_priv->drm.struct_mutex); -  	dev_priv->perf.oa.ops.disable_metric_set(dev_priv); +	mutex_unlock(&dev_priv->drm.struct_mutex);  	free_oa_buffer(dev_priv); @@ -1756,22 +1755,13 @@ static int gen8_switch_to_updated_kernel_context(struct drm_i915_private *dev_pr   * Note: it's only the RCS/Render context that has any OA state.   */  static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv, -				       const struct i915_oa_config *oa_config, -				       bool interruptible) +				       const struct i915_oa_config *oa_config)  {  	struct i915_gem_context *ctx;  	int ret;  	unsigned int wait_flags = I915_WAIT_LOCKED; -	if (interruptible) { -		ret = i915_mutex_lock_interruptible(&dev_priv->drm); -		if (ret) -			return ret; - -		wait_flags |= I915_WAIT_INTERRUPTIBLE; -	} else { -		mutex_lock(&dev_priv->drm.struct_mutex); -	} +	lockdep_assert_held(&dev_priv->drm.struct_mutex);  	/* Switch away from any user context. */  	ret = gen8_switch_to_updated_kernel_context(dev_priv, oa_config); @@ -1819,8 +1809,6 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,  	}   out: -	mutex_unlock(&dev_priv->drm.struct_mutex); -  	return ret;  } @@ -1863,7 +1851,7 @@ static int gen8_enable_metric_set(struct drm_i915_private *dev_priv,  	 * to make sure all slices/subslices are ON before writing to NOA  	 * registers.  	 */ -	ret = gen8_configure_all_contexts(dev_priv, oa_config, true); +	ret = gen8_configure_all_contexts(dev_priv, oa_config);  	if (ret)  		return ret; @@ -1878,7 +1866,7 @@ static int gen8_enable_metric_set(struct drm_i915_private *dev_priv,  static void gen8_disable_metric_set(struct drm_i915_private *dev_priv)  {  	/* Reset all contexts' slices/subslices configurations. */ -	gen8_configure_all_contexts(dev_priv, NULL, false); +	gen8_configure_all_contexts(dev_priv, NULL);  	I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) &  				      ~GT_NOA_ENABLE)); @@ -1888,7 +1876,7 @@ static void gen8_disable_metric_set(struct drm_i915_private *dev_priv)  static void gen10_disable_metric_set(struct drm_i915_private *dev_priv)  {  	/* Reset all contexts' slices/subslices configurations. */ -	gen8_configure_all_contexts(dev_priv, NULL, false); +	gen8_configure_all_contexts(dev_priv, NULL);  	/* Make sure we disable noa to save power. */  	I915_WRITE(RPM_CONFIG1, @@ -2138,6 +2126,10 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,  	if (ret)  		goto err_oa_buf_alloc; +	ret = i915_mutex_lock_interruptible(&dev_priv->drm); +	if (ret) +		goto err_lock; +  	ret = dev_priv->perf.oa.ops.enable_metric_set(dev_priv,  						      stream->oa_config);  	if (ret) @@ -2145,23 +2137,17 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,  	stream->ops = &i915_oa_stream_ops; -	/* Lock device for exclusive_stream access late because -	 * enable_metric_set() might lock as well on gen8+. -	 */ -	ret = i915_mutex_lock_interruptible(&dev_priv->drm); -	if (ret) -		goto err_lock; -  	dev_priv->perf.oa.exclusive_stream = stream;  	mutex_unlock(&dev_priv->drm.struct_mutex);  	return 0; -err_lock: +err_enable:  	dev_priv->perf.oa.ops.disable_metric_set(dev_priv); +	mutex_unlock(&dev_priv->drm.struct_mutex); -err_enable: +err_lock:  	free_oa_buffer(dev_priv);  err_oa_buf_alloc: diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index 55a8a1e29424..0e9b98c32b62 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -285,26 +285,41 @@ static u64 count_interrupts(struct drm_i915_private *i915)  	return sum;  } -static void i915_pmu_event_destroy(struct perf_event *event) +static void engine_event_destroy(struct perf_event *event)  { -	WARN_ON(event->parent); +	struct drm_i915_private *i915 = +		container_of(event->pmu, typeof(*i915), pmu.base); +	struct intel_engine_cs *engine; + +	engine = intel_engine_lookup_user(i915, +					  engine_event_class(event), +					  engine_event_instance(event)); +	if (WARN_ON_ONCE(!engine)) +		return; + +	if (engine_event_sample(event) == I915_SAMPLE_BUSY && +	    intel_engine_supports_stats(engine)) +		intel_disable_engine_stats(engine);  } -static int engine_event_init(struct perf_event *event) +static void i915_pmu_event_destroy(struct perf_event *event)  { -	struct drm_i915_private *i915 = -		container_of(event->pmu, typeof(*i915), pmu.base); +	WARN_ON(event->parent); -	if (!intel_engine_lookup_user(i915, engine_event_class(event), -				      engine_event_instance(event))) -		return -ENODEV; +	if (is_engine_event(event)) +		engine_event_destroy(event); +} -	switch (engine_event_sample(event)) { +static int +engine_event_status(struct intel_engine_cs *engine, +		    enum drm_i915_pmu_engine_sample sample) +{ +	switch (sample) {  	case I915_SAMPLE_BUSY:  	case I915_SAMPLE_WAIT:  		break;  	case I915_SAMPLE_SEMA: -		if (INTEL_GEN(i915) < 6) +		if (INTEL_GEN(engine->i915) < 6)  			return -ENODEV;  		break;  	default: @@ -314,6 +329,30 @@ static int engine_event_init(struct perf_event *event)  	return 0;  } +static int engine_event_init(struct perf_event *event) +{ +	struct drm_i915_private *i915 = +		container_of(event->pmu, typeof(*i915), pmu.base); +	struct intel_engine_cs *engine; +	u8 sample; +	int ret; + +	engine = intel_engine_lookup_user(i915, engine_event_class(event), +					  engine_event_instance(event)); +	if (!engine) +		return -ENODEV; + +	sample = engine_event_sample(event); +	ret = engine_event_status(engine, sample); +	if (ret) +		return ret; + +	if (sample == I915_SAMPLE_BUSY && intel_engine_supports_stats(engine)) +		ret = intel_enable_engine_stats(engine); + +	return ret; +} +  static int i915_pmu_event_init(struct perf_event *event)  {  	struct drm_i915_private *i915 = @@ -370,7 +409,94 @@ static int i915_pmu_event_init(struct perf_event *event)  	return 0;  } -static u64 __i915_pmu_event_read(struct perf_event *event) +static u64 __get_rc6(struct drm_i915_private *i915) +{ +	u64 val; + +	val = intel_rc6_residency_ns(i915, +				     IS_VALLEYVIEW(i915) ? +				     VLV_GT_RENDER_RC6 : +				     GEN6_GT_GFX_RC6); + +	if (HAS_RC6p(i915)) +		val += intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6p); + +	if (HAS_RC6pp(i915)) +		val += intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6pp); + +	return val; +} + +static u64 get_rc6(struct drm_i915_private *i915, bool locked) +{ +#if IS_ENABLED(CONFIG_PM) +	unsigned long flags; +	u64 val; + +	if (intel_runtime_pm_get_if_in_use(i915)) { +		val = __get_rc6(i915); +		intel_runtime_pm_put(i915); + +		/* +		 * If we are coming back from being runtime suspended we must +		 * be careful not to report a larger value than returned +		 * previously. +		 */ + +		if (!locked) +			spin_lock_irqsave(&i915->pmu.lock, flags); + +		if (val >= i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) { +			i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = 0; +			i915->pmu.sample[__I915_SAMPLE_RC6].cur = val; +		} else { +			val = i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur; +		} + +		if (!locked) +			spin_unlock_irqrestore(&i915->pmu.lock, flags); +	} else { +		struct pci_dev *pdev = i915->drm.pdev; +		struct device *kdev = &pdev->dev; +		unsigned long flags2; + +		/* +		 * We are runtime suspended. +		 * +		 * Report the delta from when the device was suspended to now, +		 * on top of the last known real value, as the approximated RC6 +		 * counter value. +		 */ +		if (!locked) +			spin_lock_irqsave(&i915->pmu.lock, flags); + +		spin_lock_irqsave(&kdev->power.lock, flags2); + +		if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) +			i915->pmu.suspended_jiffies_last = +						kdev->power.suspended_jiffies; + +		val = kdev->power.suspended_jiffies - +		      i915->pmu.suspended_jiffies_last; +		val += jiffies - kdev->power.accounting_timestamp; + +		spin_unlock_irqrestore(&kdev->power.lock, flags2); + +		val = jiffies_to_nsecs(val); +		val += i915->pmu.sample[__I915_SAMPLE_RC6].cur; +		i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val; + +		if (!locked) +			spin_unlock_irqrestore(&i915->pmu.lock, flags); +	} + +	return val; +#else +	return __get_rc6(i915); +#endif +} + +static u64 __i915_pmu_event_read(struct perf_event *event, bool locked)  {  	struct drm_i915_private *i915 =  		container_of(event->pmu, typeof(*i915), pmu.base); @@ -387,7 +513,7 @@ static u64 __i915_pmu_event_read(struct perf_event *event)  		if (WARN_ON_ONCE(!engine)) {  			/* Do nothing */  		} else if (sample == I915_SAMPLE_BUSY && -			   engine->pmu.busy_stats) { +			   intel_engine_supports_stats(engine)) {  			val = ktime_to_ns(intel_engine_get_busy_time(engine));  		} else {  			val = engine->pmu.sample[sample].cur; @@ -408,18 +534,7 @@ static u64 __i915_pmu_event_read(struct perf_event *event)  			val = count_interrupts(i915);  			break;  		case I915_PMU_RC6_RESIDENCY: -			intel_runtime_pm_get(i915); -			val = intel_rc6_residency_ns(i915, -						     IS_VALLEYVIEW(i915) ? -						     VLV_GT_RENDER_RC6 : -						     GEN6_GT_GFX_RC6); -			if (HAS_RC6p(i915)) -				val += intel_rc6_residency_ns(i915, -							      GEN6_GT_GFX_RC6p); -			if (HAS_RC6pp(i915)) -				val += intel_rc6_residency_ns(i915, -							      GEN6_GT_GFX_RC6pp); -			intel_runtime_pm_put(i915); +			val = get_rc6(i915, locked);  			break;  		}  	} @@ -434,7 +549,7 @@ static void i915_pmu_event_read(struct perf_event *event)  again:  	prev = local64_read(&hwc->prev_count); -	new = __i915_pmu_event_read(event); +	new = __i915_pmu_event_read(event, false);  	if (local64_cmpxchg(&hwc->prev_count, prev, new) != prev)  		goto again; @@ -442,12 +557,6 @@ again:  	local64_add(new - prev, &event->count);  } -static bool engine_needs_busy_stats(struct intel_engine_cs *engine) -{ -	return intel_engine_supports_stats(engine) && -	       (engine->pmu.enable & BIT(I915_SAMPLE_BUSY)); -} -  static void i915_pmu_enable(struct perf_event *event)  {  	struct drm_i915_private *i915 = @@ -487,21 +596,7 @@ static void i915_pmu_enable(struct perf_event *event)  		GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS);  		GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0); -		if (engine->pmu.enable_count[sample]++ == 0) { -			/* -			 * Enable engine busy stats tracking if needed or -			 * alternatively cancel the scheduled disable. -			 * -			 * If the delayed disable was pending, cancel it and -			 * in this case do not enable since it already is. -			 */ -			if (engine_needs_busy_stats(engine) && -			    !engine->pmu.busy_stats) { -				engine->pmu.busy_stats = true; -				if (!cancel_delayed_work(&engine->pmu.disable_busy_stats)) -					intel_enable_engine_stats(engine); -			} -		} +		engine->pmu.enable_count[sample]++;  	}  	/* @@ -509,19 +604,11 @@ static void i915_pmu_enable(struct perf_event *event)  	 * for all listeners. Even when the event was already enabled and has  	 * an existing non-zero value.  	 */ -	local64_set(&event->hw.prev_count, __i915_pmu_event_read(event)); +	local64_set(&event->hw.prev_count, __i915_pmu_event_read(event, true));  	spin_unlock_irqrestore(&i915->pmu.lock, flags);  } -static void __disable_busy_stats(struct work_struct *work) -{ -	struct intel_engine_cs *engine = -	       container_of(work, typeof(*engine), pmu.disable_busy_stats.work); - -	intel_disable_engine_stats(engine); -} -  static void i915_pmu_disable(struct perf_event *event)  {  	struct drm_i915_private *i915 = @@ -545,26 +632,8 @@ static void i915_pmu_disable(struct perf_event *event)  		 * Decrement the reference count and clear the enabled  		 * bitmask when the last listener on an event goes away.  		 */ -		if (--engine->pmu.enable_count[sample] == 0) { +		if (--engine->pmu.enable_count[sample] == 0)  			engine->pmu.enable &= ~BIT(sample); -			if (!engine_needs_busy_stats(engine) && -			    engine->pmu.busy_stats) { -				engine->pmu.busy_stats = false; -				/* -				 * We request a delayed disable to handle the -				 * rapid on/off cycles on events, which can -				 * happen when tools like perf stat start, in a -				 * nicer way. -				 * -				 * In addition, this also helps with busy stats -				 * accuracy with background CPU offline/online -				 * migration events. -				 */ -				queue_delayed_work(system_wq, -						   &engine->pmu.disable_busy_stats, -						   round_jiffies_up_relative(HZ)); -			} -		}  	}  	GEM_BUG_ON(bit >= I915_PMU_MASK_BITS); @@ -797,8 +866,6 @@ static void i915_pmu_unregister_cpuhp_state(struct drm_i915_private *i915)  void i915_pmu_register(struct drm_i915_private *i915)  { -	struct intel_engine_cs *engine; -	enum intel_engine_id id;  	int ret;  	if (INTEL_GEN(i915) <= 2) { @@ -820,10 +887,6 @@ void i915_pmu_register(struct drm_i915_private *i915)  	hrtimer_init(&i915->pmu.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);  	i915->pmu.timer.function = i915_sample; -	for_each_engine(engine, i915, id) -		INIT_DELAYED_WORK(&engine->pmu.disable_busy_stats, -				  __disable_busy_stats); -  	ret = perf_pmu_register(&i915->pmu.base, "i915", -1);  	if (ret)  		goto err; @@ -843,9 +906,6 @@ err:  void i915_pmu_unregister(struct drm_i915_private *i915)  { -	struct intel_engine_cs *engine; -	enum intel_engine_id id; -  	if (!i915->pmu.base.event_init)  		return; @@ -853,11 +913,6 @@ void i915_pmu_unregister(struct drm_i915_private *i915)  	hrtimer_cancel(&i915->pmu.timer); -	for_each_engine(engine, i915, id) { -		GEM_BUG_ON(engine->pmu.busy_stats); -		flush_delayed_work(&engine->pmu.disable_busy_stats); -	} -  	i915_pmu_unregister_cpuhp_state(i915);  	perf_pmu_unregister(&i915->pmu.base); diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h index 40c154d13565..bb62df15afa4 100644 --- a/drivers/gpu/drm/i915/i915_pmu.h +++ b/drivers/gpu/drm/i915/i915_pmu.h @@ -27,6 +27,8 @@  enum {  	__I915_SAMPLE_FREQ_ACT = 0,  	__I915_SAMPLE_FREQ_REQ, +	__I915_SAMPLE_RC6, +	__I915_SAMPLE_RC6_ESTIMATED,  	__I915_NUM_PMU_SAMPLERS  }; @@ -94,6 +96,10 @@ struct i915_pmu {  	 * struct intel_engine_cs.  	 */  	struct i915_pmu_sample sample[__I915_NUM_PMU_SAMPLERS]; +	/** +	 * @suspended_jiffies_last: Cached suspend time from PM core. +	 */ +	unsigned long suspended_jiffies_last;  };  #ifdef CONFIG_PERF_EVENTS diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index a2108e35c599..33eb0c5b1d32 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -2027,7 +2027,7 @@ enum i915_power_well_id {  #define _CNL_PORT_TX_DW5_LN0_AE		0x162454  #define _CNL_PORT_TX_DW5_LN0_B		0x162654  #define _CNL_PORT_TX_DW5_LN0_C		0x162C54 -#define _CNL_PORT_TX_DW5_LN0_D		0x162ED4 +#define _CNL_PORT_TX_DW5_LN0_D		0x162E54  #define _CNL_PORT_TX_DW5_LN0_F		0x162854  #define CNL_PORT_TX_DW5_GRP(port)	_MMIO_PORT6(port, \  						    _CNL_PORT_TX_DW5_GRP_AE, \ @@ -2058,7 +2058,7 @@ enum i915_power_well_id {  #define _CNL_PORT_TX_DW7_LN0_AE		0x16245C  #define _CNL_PORT_TX_DW7_LN0_B		0x16265C  #define _CNL_PORT_TX_DW7_LN0_C		0x162C5C -#define _CNL_PORT_TX_DW7_LN0_D		0x162EDC +#define _CNL_PORT_TX_DW7_LN0_D		0x162E5C  #define _CNL_PORT_TX_DW7_LN0_F		0x16285C  #define CNL_PORT_TX_DW7_GRP(port)	_MMIO_PORT6(port, \  						    _CNL_PORT_TX_DW7_GRP_AE, \ diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index b33d2158c234..e5e6f6bb2b05 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c @@ -304,8 +304,9 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev,  {  	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);  	struct intel_rps *rps = &dev_priv->gt_pm.rps; -	u32 val; +	bool boost = false;  	ssize_t ret; +	u32 val;  	ret = kstrtou32(buf, 0, &val);  	if (ret) @@ -317,8 +318,13 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev,  		return -EINVAL;  	mutex_lock(&dev_priv->pcu_lock); -	rps->boost_freq = val; +	if (val != rps->boost_freq) { +		rps->boost_freq = val; +		boost = atomic_read(&rps->num_waiters); +	}  	mutex_unlock(&dev_priv->pcu_lock); +	if (boost) +		schedule_work(&rps->work);  	return count;  } diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c index 522d54fecb53..4a01f62a392d 100644 --- a/drivers/gpu/drm/i915/intel_audio.c +++ b/drivers/gpu/drm/i915/intel_audio.c @@ -779,11 +779,11 @@ static struct intel_encoder *get_saved_enc(struct drm_i915_private *dev_priv,  {  	struct intel_encoder *encoder; -	if (WARN_ON(pipe >= ARRAY_SIZE(dev_priv->av_enc_map))) -		return NULL; -  	/* MST */  	if (pipe >= 0) { +		if (WARN_ON(pipe >= ARRAY_SIZE(dev_priv->av_enc_map))) +			return NULL; +  		encoder = dev_priv->av_enc_map[pipe];  		/*  		 * when bootup, audio driver may not know it is diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index f7f771749e48..b49a2df44430 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -947,6 +947,86 @@ static int goto_next_sequence_v3(const u8 *data, int index, int total)  	return 0;  } +/* + * Get len of pre-fixed deassert fragment from a v1 init OTP sequence, + * skip all delay + gpio operands and stop at the first DSI packet op. + */ +static int get_init_otp_deassert_fragment_len(struct drm_i915_private *dev_priv) +{ +	const u8 *data = dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP]; +	int index, len; + +	if (WARN_ON(!data || dev_priv->vbt.dsi.seq_version != 1)) +		return 0; + +	/* index = 1 to skip sequence byte */ +	for (index = 1; data[index] != MIPI_SEQ_ELEM_END; index += len) { +		switch (data[index]) { +		case MIPI_SEQ_ELEM_SEND_PKT: +			return index == 1 ? 0 : index; +		case MIPI_SEQ_ELEM_DELAY: +			len = 5; /* 1 byte for operand + uint32 */ +			break; +		case MIPI_SEQ_ELEM_GPIO: +			len = 3; /* 1 byte for op, 1 for gpio_nr, 1 for value */ +			break; +		default: +			return 0; +		} +	} + +	return 0; +} + +/* + * Some v1 VBT MIPI sequences do the deassert in the init OTP sequence. + * The deassert must be done before calling intel_dsi_device_ready, so for + * these devices we split the init OTP sequence into a deassert sequence and + * the actual init OTP part. + */ +static void fixup_mipi_sequences(struct drm_i915_private *dev_priv) +{ +	u8 *init_otp; +	int len; + +	/* Limit this to VLV for now. */ +	if (!IS_VALLEYVIEW(dev_priv)) +		return; + +	/* Limit this to v1 vid-mode sequences */ +	if (dev_priv->vbt.dsi.config->is_cmd_mode || +	    dev_priv->vbt.dsi.seq_version != 1) +		return; + +	/* Only do this if there are otp and assert seqs and no deassert seq */ +	if (!dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] || +	    !dev_priv->vbt.dsi.sequence[MIPI_SEQ_ASSERT_RESET] || +	    dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET]) +		return; + +	/* The deassert-sequence ends at the first DSI packet */ +	len = get_init_otp_deassert_fragment_len(dev_priv); +	if (!len) +		return; + +	DRM_DEBUG_KMS("Using init OTP fragment to deassert reset\n"); + +	/* Copy the fragment, update seq byte and terminate it */ +	init_otp = (u8 *)dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP]; +	dev_priv->vbt.dsi.deassert_seq = kmemdup(init_otp, len + 1, GFP_KERNEL); +	if (!dev_priv->vbt.dsi.deassert_seq) +		return; +	dev_priv->vbt.dsi.deassert_seq[0] = MIPI_SEQ_DEASSERT_RESET; +	dev_priv->vbt.dsi.deassert_seq[len] = MIPI_SEQ_ELEM_END; +	/* Use the copy for deassert */ +	dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET] = +		dev_priv->vbt.dsi.deassert_seq; +	/* Replace the last byte of the fragment with init OTP seq byte */ +	init_otp[len - 1] = MIPI_SEQ_INIT_OTP; +	/* And make MIPI_MIPI_SEQ_INIT_OTP point to it */ +	dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] = init_otp + len - 1; +} +  static void  parse_mipi_sequence(struct drm_i915_private *dev_priv,  		    const struct bdb_header *bdb) @@ -1016,6 +1096,8 @@ parse_mipi_sequence(struct drm_i915_private *dev_priv,  	dev_priv->vbt.dsi.size = seq_size;  	dev_priv->vbt.dsi.seq_version = sequence->version; +	fixup_mipi_sequences(dev_priv); +  	DRM_DEBUG_DRIVER("MIPI related VBT parsing complete\n");  	return; @@ -1589,6 +1671,29 @@ out:  }  /** + * intel_bios_cleanup - Free any resources allocated by intel_bios_init() + * @dev_priv: i915 device instance + */ +void intel_bios_cleanup(struct drm_i915_private *dev_priv) +{ +	kfree(dev_priv->vbt.child_dev); +	dev_priv->vbt.child_dev = NULL; +	dev_priv->vbt.child_dev_num = 0; +	kfree(dev_priv->vbt.sdvo_lvds_vbt_mode); +	dev_priv->vbt.sdvo_lvds_vbt_mode = NULL; +	kfree(dev_priv->vbt.lfp_lvds_vbt_mode); +	dev_priv->vbt.lfp_lvds_vbt_mode = NULL; +	kfree(dev_priv->vbt.dsi.data); +	dev_priv->vbt.dsi.data = NULL; +	kfree(dev_priv->vbt.dsi.pps); +	dev_priv->vbt.dsi.pps = NULL; +	kfree(dev_priv->vbt.dsi.config); +	dev_priv->vbt.dsi.config = NULL; +	kfree(dev_priv->vbt.dsi.deassert_seq); +	dev_priv->vbt.dsi.deassert_seq = NULL; +} + +/**   * intel_bios_is_tv_present - is integrated TV present in VBT   * @dev_priv:	i915 device instance   * diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c index bd40fea16b4f..f54ddda9fdad 100644 --- a/drivers/gpu/drm/i915/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c @@ -594,29 +594,16 @@ void intel_engine_remove_wait(struct intel_engine_cs *engine,  	spin_unlock_irq(&b->rb_lock);  } -static bool signal_valid(const struct drm_i915_gem_request *request) -{ -	return intel_wait_check_request(&request->signaling.wait, request); -} -  static bool signal_complete(const struct drm_i915_gem_request *request)  {  	if (!request)  		return false; -	/* If another process served as the bottom-half it may have already -	 * signalled that this wait is already completed. -	 */ -	if (intel_wait_complete(&request->signaling.wait)) -		return signal_valid(request); - -	/* Carefully check if the request is complete, giving time for the +	/* +	 * Carefully check if the request is complete, giving time for the  	 * seqno to be visible or if the GPU hung.  	 */ -	if (__i915_request_irq_complete(request)) -		return true; - -	return false; +	return __i915_request_irq_complete(request);  }  static struct drm_i915_gem_request *to_signaler(struct rb_node *rb) @@ -659,9 +646,13 @@ static int intel_breadcrumbs_signaler(void *arg)  			request = i915_gem_request_get_rcu(request);  		rcu_read_unlock();  		if (signal_complete(request)) { -			local_bh_disable(); -			dma_fence_signal(&request->fence); -			local_bh_enable(); /* kick start the tasklets */ +			if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, +				      &request->fence.flags)) { +				local_bh_disable(); +				dma_fence_signal(&request->fence); +				GEM_BUG_ON(!i915_gem_request_completed(request)); +				local_bh_enable(); /* kick start the tasklets */ +			}  			spin_lock_irq(&b->rb_lock); diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c index 5dc118f26b51..1704c8897afd 100644 --- a/drivers/gpu/drm/i915/intel_cdclk.c +++ b/drivers/gpu/drm/i915/intel_cdclk.c @@ -1952,6 +1952,14 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)  	if (crtc_state->has_audio && INTEL_GEN(dev_priv) >= 9)  		min_cdclk = max(2 * 96000, min_cdclk); +	/* +	 * On Valleyview some DSI panels lose (v|h)sync when the clock is lower +	 * than 320000KHz. +	 */ +	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) && +	    IS_VALLEYVIEW(dev_priv)) +		min_cdclk = max(320000, min_cdclk); +  	if (min_cdclk > dev_priv->max_cdclk_freq) {  		DRM_DEBUG_KMS("required cdclk (%d kHz) exceeds max (%d kHz)\n",  			      min_cdclk, dev_priv->max_cdclk_freq); diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index f51645a08dca..6aff9d096e13 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -2175,8 +2175,7 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,  		intel_prepare_dp_ddi_buffers(encoder, crtc_state);  	intel_ddi_init_dp_buf_reg(encoder); -	if (!is_mst) -		intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); +	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);  	intel_dp_start_link_train(intel_dp);  	if (port != PORT_A || INTEL_GEN(dev_priv) >= 9)  		intel_dp_stop_link_train(intel_dp); @@ -2274,14 +2273,12 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,  	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);  	struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);  	struct intel_dp *intel_dp = &dig_port->dp; -	bool is_mst = intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST);  	/*  	 * Power down sink before disabling the port, otherwise we end  	 * up getting interrupts from the sink on detecting link loss.  	 */ -	if (!is_mst) -		intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); +	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);  	intel_disable_ddi_buf(encoder); diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 35c5299feab6..a29868cd30c7 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -620,19 +620,15 @@ static int  bxt_power_sequencer_idx(struct intel_dp *intel_dp)  {  	struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); +	int backlight_controller = dev_priv->vbt.backlight.controller;  	lockdep_assert_held(&dev_priv->pps_mutex);  	/* We should never land here with regular DP ports */  	WARN_ON(!intel_dp_is_edp(intel_dp)); -	/* -	 * TODO: BXT has 2 PPS instances. The correct port->PPS instance -	 * mapping needs to be retrieved from VBT, for now just hard-code to -	 * use instance #0 always. -	 */  	if (!intel_dp->pps_reset) -		return 0; +		return backlight_controller;  	intel_dp->pps_reset = false; @@ -642,7 +638,7 @@ bxt_power_sequencer_idx(struct intel_dp *intel_dp)  	 */  	intel_dp_init_panel_power_sequencer_registers(intel_dp, false); -	return 0; +	return backlight_controller;  }  typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv, diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index d790bdc227ff..fa960cfd2764 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c @@ -1458,7 +1458,9 @@ static bool ring_is_idle(struct intel_engine_cs *engine)  	struct drm_i915_private *dev_priv = engine->i915;  	bool idle = true; -	intel_runtime_pm_get(dev_priv); +	/* If the whole device is asleep, the engine must be idle */ +	if (!intel_runtime_pm_get_if_in_use(dev_priv)) +		return true;  	/* First check that no commands are left in the ring */  	if ((I915_READ_HEAD(engine) & HEAD_ADDR) != @@ -1943,16 +1945,22 @@ intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance)   */  int intel_enable_engine_stats(struct intel_engine_cs *engine)  { +	struct intel_engine_execlists *execlists = &engine->execlists;  	unsigned long flags; +	int err = 0;  	if (!intel_engine_supports_stats(engine))  		return -ENODEV; +	tasklet_disable(&execlists->tasklet);  	spin_lock_irqsave(&engine->stats.lock, flags); -	if (engine->stats.enabled == ~0) -		goto busy; + +	if (unlikely(engine->stats.enabled == ~0)) { +		err = -EBUSY; +		goto unlock; +	} +  	if (engine->stats.enabled++ == 0) { -		struct intel_engine_execlists *execlists = &engine->execlists;  		const struct execlist_port *port = execlists->port;  		unsigned int num_ports = execlists_num_ports(execlists); @@ -1967,14 +1975,12 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine)  		if (engine->stats.active)  			engine->stats.start = engine->stats.enabled_at;  	} -	spin_unlock_irqrestore(&engine->stats.lock, flags); - -	return 0; -busy: +unlock:  	spin_unlock_irqrestore(&engine->stats.lock, flags); +	tasklet_enable(&execlists->tasklet); -	return -EBUSY; +	return err;  }  static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine) diff --git a/drivers/gpu/drm/i915/intel_hangcheck.c b/drivers/gpu/drm/i915/intel_hangcheck.c index 348a4f7ffb67..53747318f4a7 100644 --- a/drivers/gpu/drm/i915/intel_hangcheck.c +++ b/drivers/gpu/drm/i915/intel_hangcheck.c @@ -246,7 +246,7 @@ engine_stuck(struct intel_engine_cs *engine, u64 acthd)  	 */  	tmp = I915_READ_CTL(engine);  	if (tmp & RING_WAIT) { -		i915_handle_error(dev_priv, 0, +		i915_handle_error(dev_priv, BIT(engine->id),  				  "Kicking stuck wait on %s",  				  engine->name);  		I915_WRITE_CTL(engine, tmp); @@ -258,7 +258,7 @@ engine_stuck(struct intel_engine_cs *engine, u64 acthd)  		default:  			return ENGINE_DEAD;  		case 1: -			i915_handle_error(dev_priv, 0, +			i915_handle_error(dev_priv, ALL_ENGINES,  					  "Kicking stuck semaphore on %s",  					  engine->name);  			I915_WRITE_CTL(engine, tmp); diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 7ece2f061b9e..e0fca035ff78 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -719,6 +719,8 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)  	struct rb_node *rb;  	unsigned long flags; +	GEM_TRACE("%s\n", engine->name); +  	spin_lock_irqsave(&engine->timeline->lock, flags);  	/* Cancel the requests on the HW and clear the ELSP tracker. */ @@ -765,6 +767,9 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)  	 */  	clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); +	/* Mark all CS interrupts as complete */ +	execlists->active = 0; +  	spin_unlock_irqrestore(&engine->timeline->lock, flags);  } diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index c5ff203e42d6..a0e7a6c2a57c 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -366,20 +366,6 @@ struct intel_engine_cs {  		 */  #define I915_ENGINE_SAMPLE_MAX (I915_SAMPLE_SEMA + 1)  		struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_MAX]; -		/** -		 * @busy_stats: Has enablement of engine stats tracking been -		 * 		requested. -		 */ -		bool busy_stats; -		/** -		 * @disable_busy_stats: Work item for busy stats disabling. -		 * -		 * Same as with @enable_busy_stats action, with the difference -		 * that we delay it in case there are rapid enable-disable -		 * actions, which can happen during tool startup (like perf -		 * stat). -		 */ -		struct delayed_work disable_busy_stats;  	} pmu;  	/* diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c index 9a9961802f5c..e83af0f2be86 100644 --- a/drivers/gpu/drm/imx/ipuv3-crtc.c +++ b/drivers/gpu/drm/imx/ipuv3-crtc.c @@ -225,7 +225,11 @@ static void ipu_crtc_atomic_begin(struct drm_crtc *crtc,  				  struct drm_crtc_state *old_crtc_state)  {  	drm_crtc_vblank_on(crtc); +} +static void ipu_crtc_atomic_flush(struct drm_crtc *crtc, +				  struct drm_crtc_state *old_crtc_state) +{  	spin_lock_irq(&crtc->dev->event_lock);  	if (crtc->state->event) {  		WARN_ON(drm_crtc_vblank_get(crtc)); @@ -293,6 +297,7 @@ static const struct drm_crtc_helper_funcs ipu_helper_funcs = {  	.mode_set_nofb = ipu_crtc_mode_set_nofb,  	.atomic_check = ipu_crtc_atomic_check,  	.atomic_begin = ipu_crtc_atomic_begin, +	.atomic_flush = ipu_crtc_atomic_flush,  	.atomic_disable = ipu_crtc_atomic_disable,  	.atomic_enable = ipu_crtc_atomic_enable,  }; diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c index 57ed56d8623f..d9113faaa62f 100644 --- a/drivers/gpu/drm/imx/ipuv3-plane.c +++ b/drivers/gpu/drm/imx/ipuv3-plane.c @@ -22,6 +22,7 @@  #include <drm/drm_plane_helper.h>  #include "video/imx-ipu-v3.h" +#include "imx-drm.h"  #include "ipuv3-plane.h"  struct ipu_plane_state { @@ -272,7 +273,7 @@ static void ipu_plane_destroy(struct drm_plane *plane)  	kfree(ipu_plane);  } -void ipu_plane_state_reset(struct drm_plane *plane) +static void ipu_plane_state_reset(struct drm_plane *plane)  {  	struct ipu_plane_state *ipu_state; @@ -292,7 +293,8 @@ void ipu_plane_state_reset(struct drm_plane *plane)  	plane->state = &ipu_state->base;  } -struct drm_plane_state *ipu_plane_duplicate_state(struct drm_plane *plane) +static struct drm_plane_state * +ipu_plane_duplicate_state(struct drm_plane *plane)  {  	struct ipu_plane_state *state; @@ -306,8 +308,8 @@ struct drm_plane_state *ipu_plane_duplicate_state(struct drm_plane *plane)  	return &state->base;  } -void ipu_plane_destroy_state(struct drm_plane *plane, -			     struct drm_plane_state *state) +static void ipu_plane_destroy_state(struct drm_plane *plane, +				    struct drm_plane_state *state)  {  	struct ipu_plane_state *ipu_state = to_ipu_plane_state(state); diff --git a/drivers/gpu/drm/meson/meson_crtc.c b/drivers/gpu/drm/meson/meson_crtc.c index 5155f0179b61..05520202c967 100644 --- a/drivers/gpu/drm/meson/meson_crtc.c +++ b/drivers/gpu/drm/meson/meson_crtc.c @@ -36,6 +36,7 @@  #include "meson_venc.h"  #include "meson_vpp.h"  #include "meson_viu.h" +#include "meson_canvas.h"  #include "meson_registers.h"  /* CRTC definition */ @@ -192,6 +193,11 @@ void meson_crtc_irq(struct meson_drm *priv)  		} else  			meson_vpp_disable_interlace_vscaler_osd1(priv); +		meson_canvas_setup(priv, MESON_CANVAS_ID_OSD1, +			   priv->viu.osd1_addr, priv->viu.osd1_stride, +			   priv->viu.osd1_height, MESON_CANVAS_WRAP_NONE, +			   MESON_CANVAS_BLKMODE_LINEAR); +  		/* Enable OSD1 */  		writel_bits_relaxed(VPP_OSD1_POSTBLEND, VPP_OSD1_POSTBLEND,  				    priv->io_base + _REG(VPP_MISC)); diff --git a/drivers/gpu/drm/meson/meson_drv.h b/drivers/gpu/drm/meson/meson_drv.h index 5e8b392b9d1f..8450d6ac8c9b 100644 --- a/drivers/gpu/drm/meson/meson_drv.h +++ b/drivers/gpu/drm/meson/meson_drv.h @@ -43,6 +43,9 @@ struct meson_drm {  		bool osd1_commit;  		uint32_t osd1_ctrl_stat;  		uint32_t osd1_blk0_cfg[5]; +		uint32_t osd1_addr; +		uint32_t osd1_stride; +		uint32_t osd1_height;  	} viu;  	struct { diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c index d0a6ac8390f3..27bd3503e1e4 100644 --- a/drivers/gpu/drm/meson/meson_plane.c +++ b/drivers/gpu/drm/meson/meson_plane.c @@ -164,10 +164,9 @@ static void meson_plane_atomic_update(struct drm_plane *plane,  	/* Update Canvas with buffer address */  	gem = drm_fb_cma_get_gem_obj(fb, 0); -	meson_canvas_setup(priv, MESON_CANVAS_ID_OSD1, -			   gem->paddr, fb->pitches[0], -			   fb->height, MESON_CANVAS_WRAP_NONE, -			   MESON_CANVAS_BLKMODE_LINEAR); +	priv->viu.osd1_addr = gem->paddr; +	priv->viu.osd1_stride = fb->pitches[0]; +	priv->viu.osd1_height = fb->height;  	spin_unlock_irqrestore(&priv->drm->event_lock, flags);  } diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c index 3e9bba4d6624..6d8e3a9a6fc0 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c @@ -680,7 +680,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)  	} else {  		dev_info(&pdev->dev,  			 "no iommu, fallback to phys contig buffers for scanout\n"); -		aspace = NULL;; +		aspace = NULL;  	}  	pm_runtime_put_sync(&pdev->dev); diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c index 380f340204e8..debbbf0fd4bd 100644 --- a/drivers/gpu/drm/nouveau/nouveau_backlight.c +++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c @@ -134,7 +134,7 @@ nv50_get_intensity(struct backlight_device *bd)  	struct nouveau_encoder *nv_encoder = bl_get_data(bd);  	struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);  	struct nvif_object *device = &drm->client.device.object; -	int or = nv_encoder->or; +	int or = ffs(nv_encoder->dcb->or) - 1;  	u32 div = 1025;  	u32 val; @@ -149,7 +149,7 @@ nv50_set_intensity(struct backlight_device *bd)  	struct nouveau_encoder *nv_encoder = bl_get_data(bd);  	struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);  	struct nvif_object *device = &drm->client.device.object; -	int or = nv_encoder->or; +	int or = ffs(nv_encoder->dcb->or) - 1;  	u32 div = 1025;  	u32 val = (bd->props.brightness * div) / 100; @@ -170,7 +170,7 @@ nva3_get_intensity(struct backlight_device *bd)  	struct nouveau_encoder *nv_encoder = bl_get_data(bd);  	struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);  	struct nvif_object *device = &drm->client.device.object; -	int or = nv_encoder->or; +	int or = ffs(nv_encoder->dcb->or) - 1;  	u32 div, val;  	div  = nvif_rd32(device, NV50_PDISP_SOR_PWM_DIV(or)); @@ -188,7 +188,7 @@ nva3_set_intensity(struct backlight_device *bd)  	struct nouveau_encoder *nv_encoder = bl_get_data(bd);  	struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);  	struct nvif_object *device = &drm->client.device.object; -	int or = nv_encoder->or; +	int or = ffs(nv_encoder->dcb->or) - 1;  	u32 div, val;  	div = nvif_rd32(device, NV50_PDISP_SOR_PWM_DIV(or)); @@ -228,7 +228,7 @@ nv50_backlight_init(struct drm_connector *connector)  			return -ENODEV;  	} -	if (!nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(nv_encoder->or))) +	if (!nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(ffs(nv_encoder->dcb->or) - 1)))  		return 0;  	if (drm->client.device.info.chipset <= 0xa0 || @@ -268,13 +268,13 @@ nouveau_backlight_init(struct drm_device *dev)  	struct nvif_device *device = &drm->client.device;  	struct drm_connector *connector; +	INIT_LIST_HEAD(&drm->bl_connectors); +  	if (apple_gmux_present()) {  		NV_INFO(drm, "Apple GMUX detected: not registering Nouveau backlight interface\n");  		return 0;  	} -	INIT_LIST_HEAD(&drm->bl_connectors); -  	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {  		if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS &&  		    connector->connector_type != DRM_MODE_CONNECTOR_eDP) diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 69d6e61a01ec..6ed9cb053dfa 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -570,9 +570,15 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)  		nv_connector->edid = NULL;  	} -	ret = pm_runtime_get_sync(connector->dev->dev); -	if (ret < 0 && ret != -EACCES) -		return conn_status; +	/* Outputs are only polled while runtime active, so acquiring a +	 * runtime PM ref here is unnecessary (and would deadlock upon +	 * runtime suspend because it waits for polling to finish). +	 */ +	if (!drm_kms_helper_is_poll_worker()) { +		ret = pm_runtime_get_sync(connector->dev->dev); +		if (ret < 0 && ret != -EACCES) +			return conn_status; +	}  	nv_encoder = nouveau_connector_ddc_detect(connector);  	if (nv_encoder && (i2c = nv_encoder->i2c) != NULL) { @@ -647,8 +653,10 @@ detect_analog:   out: -	pm_runtime_mark_last_busy(connector->dev->dev); -	pm_runtime_put_autosuspend(connector->dev->dev); +	if (!drm_kms_helper_is_poll_worker()) { +		pm_runtime_mark_last_busy(connector->dev->dev); +		pm_runtime_put_autosuspend(connector->dev->dev); +	}  	return conn_status;  } diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index dd8d4352ed99..caddce88d2d8 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -4477,6 +4477,7 @@ nv50_display_create(struct drm_device *dev)  	nouveau_display(dev)->fini = nv50_display_fini;  	disp->disp = &nouveau_display(dev)->disp;  	dev->mode_config.funcs = &nv50_disp_func; +	dev->driver->driver_features |= DRIVER_PREFER_XBGR_30BPP;  	if (nouveau_atomic)  		dev->driver->driver_features |= DRIVER_ATOMIC; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c index 93946dcee319..1c12e58f44c2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c @@ -1354,7 +1354,7 @@ nvkm_vmm_get_locked(struct nvkm_vmm *vmm, bool getref, bool mapref, bool sparse,  		tail = this->addr + this->size;  		if (vmm->func->page_block && next && next->page != p) -			tail = ALIGN_DOWN(addr, vmm->func->page_block); +			tail = ALIGN_DOWN(tail, vmm->func->page_block);  		if (addr <= tail && tail - addr >= size) {  			rb_erase(&this->tree, &vmm->free); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c index bf62303571b3..3695cde669f8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c @@ -301,7 +301,7 @@ nvkm_therm_attr_set(struct nvkm_therm *therm,  void  nvkm_therm_clkgate_enable(struct nvkm_therm *therm)  { -	if (!therm->func->clkgate_enable || !therm->clkgating_enabled) +	if (!therm || !therm->func->clkgate_enable || !therm->clkgating_enabled)  		return;  	nvkm_debug(&therm->subdev, @@ -312,7 +312,7 @@ nvkm_therm_clkgate_enable(struct nvkm_therm *therm)  void  nvkm_therm_clkgate_fini(struct nvkm_therm *therm, bool suspend)  { -	if (!therm->func->clkgate_fini || !therm->clkgating_enabled) +	if (!therm || !therm->func->clkgate_fini || !therm->clkgating_enabled)  		return;  	nvkm_debug(&therm->subdev, @@ -395,7 +395,7 @@ void  nvkm_therm_clkgate_init(struct nvkm_therm *therm,  			const struct nvkm_therm_clkgate_pack *p)  { -	if (!therm->func->clkgate_init || !therm->clkgating_enabled) +	if (!therm || !therm->func->clkgate_init || !therm->clkgating_enabled)  		return;  	therm->func->clkgate_init(therm, p); diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index d3045a371a55..7c73bc7e2f85 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -3221,35 +3221,8 @@ static void cik_gpu_init(struct radeon_device *rdev)  	case CHIP_KAVERI:  		rdev->config.cik.max_shader_engines = 1;  		rdev->config.cik.max_tile_pipes = 4; -		if ((rdev->pdev->device == 0x1304) || -		    (rdev->pdev->device == 0x1305) || -		    (rdev->pdev->device == 0x130C) || -		    (rdev->pdev->device == 0x130F) || -		    (rdev->pdev->device == 0x1310) || -		    (rdev->pdev->device == 0x1311) || -		    (rdev->pdev->device == 0x131C)) { -			rdev->config.cik.max_cu_per_sh = 8; -			rdev->config.cik.max_backends_per_se = 2; -		} else if ((rdev->pdev->device == 0x1309) || -			   (rdev->pdev->device == 0x130A) || -			   (rdev->pdev->device == 0x130D) || -			   (rdev->pdev->device == 0x1313) || -			   (rdev->pdev->device == 0x131D)) { -			rdev->config.cik.max_cu_per_sh = 6; -			rdev->config.cik.max_backends_per_se = 2; -		} else if ((rdev->pdev->device == 0x1306) || -			   (rdev->pdev->device == 0x1307) || -			   (rdev->pdev->device == 0x130B) || -			   (rdev->pdev->device == 0x130E) || -			   (rdev->pdev->device == 0x1315) || -			   (rdev->pdev->device == 0x1318) || -			   (rdev->pdev->device == 0x131B)) { -			rdev->config.cik.max_cu_per_sh = 4; -			rdev->config.cik.max_backends_per_se = 1; -		} else { -			rdev->config.cik.max_cu_per_sh = 3; -			rdev->config.cik.max_backends_per_se = 1; -		} +		rdev->config.cik.max_cu_per_sh = 8; +		rdev->config.cik.max_backends_per_se = 2;  		rdev->config.cik.max_sh_per_se = 1;  		rdev->config.cik.max_texture_channel_caches = 4;  		rdev->config.cik.max_gprs = 256; diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 5012f5e47a1e..df9469a8fdb1 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -90,25 +90,18 @@ void radeon_connector_hotplug(struct drm_connector *connector)  		/* don't do anything if sink is not display port, i.e.,  		 * passive dp->(dvi|hdmi) adaptor  		 */ -		if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) { -			int saved_dpms = connector->dpms; -			/* Only turn off the display if it's physically disconnected */ -			if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) { -				drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); -			} else if (radeon_dp_needs_link_train(radeon_connector)) { -				/* Don't try to start link training before we -				 * have the dpcd */ -				if (!radeon_dp_getdpcd(radeon_connector)) -					return; - -				/* set it to OFF so that drm_helper_connector_dpms() -				 * won't return immediately since the current state -				 * is ON at this point. -				 */ -				connector->dpms = DRM_MODE_DPMS_OFF; -				drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); -			} -			connector->dpms = saved_dpms; +		if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT && +		    radeon_hpd_sense(rdev, radeon_connector->hpd.hpd) && +		    radeon_dp_needs_link_train(radeon_connector)) { +			/* Don't start link training before we have the DPCD */ +			if (!radeon_dp_getdpcd(radeon_connector)) +				return; + +			/* Turn the connector off and back on immediately, which +			 * will trigger link training +			 */ +			drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); +			drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);  		}  	}  } @@ -899,9 +892,11 @@ radeon_lvds_detect(struct drm_connector *connector, bool force)  	enum drm_connector_status ret = connector_status_disconnected;  	int r; -	r = pm_runtime_get_sync(connector->dev->dev); -	if (r < 0) -		return connector_status_disconnected; +	if (!drm_kms_helper_is_poll_worker()) { +		r = pm_runtime_get_sync(connector->dev->dev); +		if (r < 0) +			return connector_status_disconnected; +	}  	if (encoder) {  		struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); @@ -924,8 +919,12 @@ radeon_lvds_detect(struct drm_connector *connector, bool force)  	/* check acpi lid status ??? */  	radeon_connector_update_scratch_regs(connector, ret); -	pm_runtime_mark_last_busy(connector->dev->dev); -	pm_runtime_put_autosuspend(connector->dev->dev); + +	if (!drm_kms_helper_is_poll_worker()) { +		pm_runtime_mark_last_busy(connector->dev->dev); +		pm_runtime_put_autosuspend(connector->dev->dev); +	} +  	return ret;  } @@ -1039,9 +1038,11 @@ radeon_vga_detect(struct drm_connector *connector, bool force)  	enum drm_connector_status ret = connector_status_disconnected;  	int r; -	r = pm_runtime_get_sync(connector->dev->dev); -	if (r < 0) -		return connector_status_disconnected; +	if (!drm_kms_helper_is_poll_worker()) { +		r = pm_runtime_get_sync(connector->dev->dev); +		if (r < 0) +			return connector_status_disconnected; +	}  	encoder = radeon_best_single_encoder(connector);  	if (!encoder) @@ -1108,8 +1109,10 @@ radeon_vga_detect(struct drm_connector *connector, bool force)  	radeon_connector_update_scratch_regs(connector, ret);  out: -	pm_runtime_mark_last_busy(connector->dev->dev); -	pm_runtime_put_autosuspend(connector->dev->dev); +	if (!drm_kms_helper_is_poll_worker()) { +		pm_runtime_mark_last_busy(connector->dev->dev); +		pm_runtime_put_autosuspend(connector->dev->dev); +	}  	return ret;  } @@ -1173,9 +1176,11 @@ radeon_tv_detect(struct drm_connector *connector, bool force)  	if (!radeon_connector->dac_load_detect)  		return ret; -	r = pm_runtime_get_sync(connector->dev->dev); -	if (r < 0) -		return connector_status_disconnected; +	if (!drm_kms_helper_is_poll_worker()) { +		r = pm_runtime_get_sync(connector->dev->dev); +		if (r < 0) +			return connector_status_disconnected; +	}  	encoder = radeon_best_single_encoder(connector);  	if (!encoder) @@ -1187,8 +1192,12 @@ radeon_tv_detect(struct drm_connector *connector, bool force)  	if (ret == connector_status_connected)  		ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, false);  	radeon_connector_update_scratch_regs(connector, ret); -	pm_runtime_mark_last_busy(connector->dev->dev); -	pm_runtime_put_autosuspend(connector->dev->dev); + +	if (!drm_kms_helper_is_poll_worker()) { +		pm_runtime_mark_last_busy(connector->dev->dev); +		pm_runtime_put_autosuspend(connector->dev->dev); +	} +  	return ret;  } @@ -1251,9 +1260,11 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)  	enum drm_connector_status ret = connector_status_disconnected;  	bool dret = false, broken_edid = false; -	r = pm_runtime_get_sync(connector->dev->dev); -	if (r < 0) -		return connector_status_disconnected; +	if (!drm_kms_helper_is_poll_worker()) { +		r = pm_runtime_get_sync(connector->dev->dev); +		if (r < 0) +			return connector_status_disconnected; +	}  	if (radeon_connector->detected_hpd_without_ddc) {  		force = true; @@ -1436,8 +1447,10 @@ out:  	}  exit: -	pm_runtime_mark_last_busy(connector->dev->dev); -	pm_runtime_put_autosuspend(connector->dev->dev); +	if (!drm_kms_helper_is_poll_worker()) { +		pm_runtime_mark_last_busy(connector->dev->dev); +		pm_runtime_put_autosuspend(connector->dev->dev); +	}  	return ret;  } @@ -1688,9 +1701,11 @@ radeon_dp_detect(struct drm_connector *connector, bool force)  	if (radeon_dig_connector->is_mst)  		return connector_status_disconnected; -	r = pm_runtime_get_sync(connector->dev->dev); -	if (r < 0) -		return connector_status_disconnected; +	if (!drm_kms_helper_is_poll_worker()) { +		r = pm_runtime_get_sync(connector->dev->dev); +		if (r < 0) +			return connector_status_disconnected; +	}  	if (!force && radeon_check_hpd_status_unchanged(connector)) {  		ret = connector->status; @@ -1777,8 +1792,10 @@ radeon_dp_detect(struct drm_connector *connector, bool force)  	}  out: -	pm_runtime_mark_last_busy(connector->dev->dev); -	pm_runtime_put_autosuspend(connector->dev->dev); +	if (!drm_kms_helper_is_poll_worker()) { +		pm_runtime_mark_last_busy(connector->dev->dev); +		pm_runtime_put_autosuspend(connector->dev->dev); +	}  	return ret;  } diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 8d3e3d2e0090..7828a5e10629 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -1365,6 +1365,10 @@ int radeon_device_init(struct radeon_device *rdev,  	if ((rdev->flags & RADEON_IS_PCI) &&  	    (rdev->family <= CHIP_RS740))  		rdev->need_dma32 = true; +#ifdef CONFIG_PPC64 +	if (rdev->family == CHIP_CEDAR) +		rdev->need_dma32 = true; +#endif  	dma_bits = rdev->need_dma32 ? 32 : 40;  	r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits)); diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index a9962ffba720..27d8e7dd2d06 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -34,8 +34,6 @@ void radeon_gem_object_free(struct drm_gem_object *gobj)  	struct radeon_bo *robj = gem_to_radeon_bo(gobj);  	if (robj) { -		if (robj->gem_base.import_attach) -			drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);  		radeon_mn_unregister(robj);  		radeon_bo_unref(&robj);  	} diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 15404af9d740..31f5ad605e59 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -82,6 +82,8 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)  	mutex_unlock(&bo->rdev->gem.mutex);  	radeon_bo_clear_surface_reg(bo);  	WARN_ON_ONCE(!list_empty(&bo->va)); +	if (bo->gem_base.import_attach) +		drm_prime_gem_destroy(&bo->gem_base, bo->tbo.sg);  	drm_gem_object_release(&bo->gem_base);  	kfree(bo);  } diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 326ad068c15a..4b6542538ff9 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -47,7 +47,6 @@ static bool radeon_pm_in_vbl(struct radeon_device *rdev);  static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish);  static void radeon_pm_update_profile(struct radeon_device *rdev);  static void radeon_pm_set_clocks(struct radeon_device *rdev); -static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev);  int radeon_pm_get_type_index(struct radeon_device *rdev,  			     enum radeon_pm_state_type ps_type, @@ -80,8 +79,6 @@ void radeon_pm_acpi_event_handler(struct radeon_device *rdev)  				radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power);  		}  		mutex_unlock(&rdev->pm.mutex); -		/* allow new DPM state to be picked */ -		radeon_pm_compute_clocks_dpm(rdev);  	} else if (rdev->pm.pm_method == PM_METHOD_PROFILE) {  		if (rdev->pm.profile == PM_PROFILE_AUTO) {  			mutex_lock(&rdev->pm.mutex); @@ -885,8 +882,7 @@ static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,  		dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;  	/* balanced states don't exist at the moment */  	if (dpm_state == POWER_STATE_TYPE_BALANCED) -		dpm_state = rdev->pm.dpm.ac_power ? -			POWER_STATE_TYPE_PERFORMANCE : POWER_STATE_TYPE_BATTERY; +		dpm_state = POWER_STATE_TYPE_PERFORMANCE;  restart_search:  	/* Pick the best power state based on current conditions */ diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c index 2c18996d59c5..0d95888ccc3e 100644 --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c @@ -461,7 +461,7 @@ void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, struct drm_sched_jo  {  	struct drm_sched_job *s_job;  	struct drm_sched_entity *entity, *tmp; -	int i;; +	int i;  	spin_lock(&sched->job_list_lock);  	list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) { diff --git a/drivers/gpu/drm/sun4i/sun4i_crtc.c b/drivers/gpu/drm/sun4i/sun4i_crtc.c index 5decae0069d0..78cbc3145e44 100644 --- a/drivers/gpu/drm/sun4i/sun4i_crtc.c +++ b/drivers/gpu/drm/sun4i/sun4i_crtc.c @@ -93,6 +93,8 @@ static void sun4i_crtc_atomic_disable(struct drm_crtc *crtc,  	DRM_DEBUG_DRIVER("Disabling the CRTC\n"); +	drm_crtc_vblank_off(crtc); +  	sun4i_tcon_set_status(scrtc->tcon, encoder, false);  	if (crtc->state->event && !crtc->state->active) { @@ -113,6 +115,8 @@ static void sun4i_crtc_atomic_enable(struct drm_crtc *crtc,  	DRM_DEBUG_DRIVER("Enabling the CRTC\n");  	sun4i_tcon_set_status(scrtc->tcon, encoder, true); + +	drm_crtc_vblank_on(crtc);  }  static void sun4i_crtc_mode_set_nofb(struct drm_crtc *crtc) diff --git a/drivers/gpu/drm/sun4i/sun4i_dotclock.c b/drivers/gpu/drm/sun4i/sun4i_dotclock.c index 023f39bda633..e36004fbe453 100644 --- a/drivers/gpu/drm/sun4i/sun4i_dotclock.c +++ b/drivers/gpu/drm/sun4i/sun4i_dotclock.c @@ -132,10 +132,13 @@ static int sun4i_dclk_get_phase(struct clk_hw *hw)  static int sun4i_dclk_set_phase(struct clk_hw *hw, int degrees)  {  	struct sun4i_dclk *dclk = hw_to_dclk(hw); +	u32 val = degrees / 120; + +	val <<= 28;  	regmap_update_bits(dclk->regmap, SUN4I_TCON0_IO_POL_REG,  			   GENMASK(29, 28), -			   degrees / 120); +			   val);  	return 0;  } diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c index 4570da0227b4..d9a71f361b14 100644 --- a/drivers/gpu/drm/sun4i/sun4i_drv.c +++ b/drivers/gpu/drm/sun4i/sun4i_drv.c @@ -111,7 +111,7 @@ static int sun4i_drv_bind(struct device *dev)  	/* drm_vblank_init calls kcalloc, which can fail */  	ret = drm_vblank_init(drm, drm->mode_config.num_crtc);  	if (ret) -		goto free_mem_region; +		goto cleanup_mode_config;  	drm->irq_enabled = true; @@ -139,7 +139,6 @@ finish_poll:  	sun4i_framebuffer_free(drm);  cleanup_mode_config:  	drm_mode_config_cleanup(drm); -free_mem_region:  	of_reserved_mem_device_release(dev);  free_drm:  	drm_dev_unref(drm); diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c index 500b6fb3e028..fa4bcd092eaf 100644 --- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c +++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c @@ -538,7 +538,8 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,  					     &sun4i_hdmi_regmap_config);  	if (IS_ERR(hdmi->regmap)) {  		dev_err(dev, "Couldn't create HDMI encoder regmap\n"); -		return PTR_ERR(hdmi->regmap); +		ret = PTR_ERR(hdmi->regmap); +		goto err_disable_mod_clk;  	}  	ret = sun4i_tmds_create(hdmi); @@ -551,7 +552,8 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,  		hdmi->ddc_parent_clk = devm_clk_get(dev, "ddc");  		if (IS_ERR(hdmi->ddc_parent_clk)) {  			dev_err(dev, "Couldn't get the HDMI DDC clock\n"); -			return PTR_ERR(hdmi->ddc_parent_clk); +			ret = PTR_ERR(hdmi->ddc_parent_clk); +			goto err_disable_mod_clk;  		}  	} else {  		hdmi->ddc_parent_clk = hdmi->tmds_clk; diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c index 832f8f9bc47f..b8da5a50a61d 100644 --- a/drivers/gpu/drm/sun4i/sun4i_rgb.c +++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c @@ -92,6 +92,8 @@ static int sun4i_rgb_mode_valid(struct drm_connector *connector,  	DRM_DEBUG_DRIVER("Vertical parameters OK\n"); +	tcon->dclk_min_div = 6; +	tcon->dclk_max_div = 127;  	rounded_rate = clk_round_rate(tcon->dclk, rate);  	if (rounded_rate < rate)  		return MODE_CLOCK_LOW; diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c index 3c15cf24b503..a818ca491605 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tcon.c +++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c @@ -101,10 +101,13 @@ static void sun4i_tcon_channel_set_status(struct sun4i_tcon *tcon, int channel,  		return;  	} -	if (enabled) +	if (enabled) {  		clk_prepare_enable(clk); -	else +		clk_rate_exclusive_get(clk); +	} else { +		clk_rate_exclusive_put(clk);  		clk_disable_unprepare(clk); +	}  }  static void sun4i_tcon_lvds_set_status(struct sun4i_tcon *tcon, @@ -335,6 +338,9 @@ static void sun4i_tcon0_mode_set_lvds(struct sun4i_tcon *tcon,  	regmap_update_bits(tcon->regs, SUN4I_TCON_GCTL_REG,  			   SUN4I_TCON_GCTL_IOMAP_MASK,  			   SUN4I_TCON_GCTL_IOMAP_TCON0); + +	/* Enable the output on the pins */ +	regmap_write(tcon->regs, SUN4I_TCON0_IO_TRI_REG, 0xe0000000);  }  static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon, @@ -870,52 +876,56 @@ static int sun4i_tcon_bind(struct device *dev, struct device *master,  		return ret;  	} -	/* -	 * This can only be made optional since we've had DT nodes -	 * without the LVDS reset properties. -	 * -	 * If the property is missing, just disable LVDS, and print a -	 * warning. -	 */ -	tcon->lvds_rst = devm_reset_control_get_optional(dev, "lvds"); -	if (IS_ERR(tcon->lvds_rst)) { -		dev_err(dev, "Couldn't get our reset line\n"); -		return PTR_ERR(tcon->lvds_rst); -	} else if (tcon->lvds_rst) { -		has_lvds_rst = true; -		reset_control_reset(tcon->lvds_rst); -	} else { -		has_lvds_rst = false; -	} +	if (tcon->quirks->supports_lvds) { +		/* +		 * This can only be made optional since we've had DT +		 * nodes without the LVDS reset properties. +		 * +		 * If the property is missing, just disable LVDS, and +		 * print a warning. +		 */ +		tcon->lvds_rst = devm_reset_control_get_optional(dev, "lvds"); +		if (IS_ERR(tcon->lvds_rst)) { +			dev_err(dev, "Couldn't get our reset line\n"); +			return PTR_ERR(tcon->lvds_rst); +		} else if (tcon->lvds_rst) { +			has_lvds_rst = true; +			reset_control_reset(tcon->lvds_rst); +		} else { +			has_lvds_rst = false; +		} -	/* -	 * This can only be made optional since we've had DT nodes -	 * without the LVDS reset properties. -	 * -	 * If the property is missing, just disable LVDS, and print a -	 * warning. -	 */ -	if (tcon->quirks->has_lvds_alt) { -		tcon->lvds_pll = devm_clk_get(dev, "lvds-alt"); -		if (IS_ERR(tcon->lvds_pll)) { -			if (PTR_ERR(tcon->lvds_pll) == -ENOENT) { -				has_lvds_alt = false; +		/* +		 * This can only be made optional since we've had DT +		 * nodes without the LVDS reset properties. +		 * +		 * If the property is missing, just disable LVDS, and +		 * print a warning. +		 */ +		if (tcon->quirks->has_lvds_alt) { +			tcon->lvds_pll = devm_clk_get(dev, "lvds-alt"); +			if (IS_ERR(tcon->lvds_pll)) { +				if (PTR_ERR(tcon->lvds_pll) == -ENOENT) { +					has_lvds_alt = false; +				} else { +					dev_err(dev, "Couldn't get the LVDS PLL\n"); +					return PTR_ERR(tcon->lvds_pll); +				}  			} else { -				dev_err(dev, "Couldn't get the LVDS PLL\n"); -				return PTR_ERR(tcon->lvds_pll); +				has_lvds_alt = true;  			} -		} else { -			has_lvds_alt = true;  		} -	} -	if (!has_lvds_rst || (tcon->quirks->has_lvds_alt && !has_lvds_alt)) { -		dev_warn(dev, -			 "Missing LVDS properties, Please upgrade your DT\n"); -		dev_warn(dev, "LVDS output disabled\n"); -		can_lvds = false; +		if (!has_lvds_rst || +		    (tcon->quirks->has_lvds_alt && !has_lvds_alt)) { +			dev_warn(dev, "Missing LVDS properties, Please upgrade your DT\n"); +			dev_warn(dev, "LVDS output disabled\n"); +			can_lvds = false; +		} else { +			can_lvds = true; +		}  	} else { -		can_lvds = true; +		can_lvds = false;  	}  	ret = sun4i_tcon_init_clocks(dev, tcon); @@ -1134,7 +1144,7 @@ static const struct sun4i_tcon_quirks sun8i_a33_quirks = {  };  static const struct sun4i_tcon_quirks sun8i_a83t_lcd_quirks = { -	/* nothing is supported */ +	.supports_lvds		= true,  };  static const struct sun4i_tcon_quirks sun8i_v3s_quirks = { diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.h b/drivers/gpu/drm/sun4i/sun4i_tcon.h index b761c7b823c5..278700c7bf9f 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tcon.h +++ b/drivers/gpu/drm/sun4i/sun4i_tcon.h @@ -175,6 +175,7 @@ struct sun4i_tcon_quirks {  	bool	has_channel_1;	/* a33 does not have channel 1 */  	bool	has_lvds_alt;	/* Does the LVDS clock have a parent other than the TCON clock? */  	bool	needs_de_be_mux; /* sun6i needs mux to select backend */ +	bool	supports_lvds;   /* Does the TCON support an LVDS output? */  	/* callback to handle tcon muxing options */  	int	(*set_mux)(struct sun4i_tcon *, const struct drm_encoder *); diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c index b8403ed48285..fbffe1948b3b 100644 --- a/drivers/gpu/drm/tegra/dc.c +++ b/drivers/gpu/drm/tegra/dc.c @@ -1903,8 +1903,12 @@ cleanup:  	if (!IS_ERR(primary))  		drm_plane_cleanup(primary); -	if (group && tegra->domain) { -		iommu_detach_group(tegra->domain, group); +	if (group && dc->domain) { +		if (group == tegra->group) { +			iommu_detach_group(dc->domain, group); +			tegra->group = NULL; +		} +  		dc->domain = NULL;  	} @@ -1913,8 +1917,10 @@ cleanup:  static int tegra_dc_exit(struct host1x_client *client)  { +	struct drm_device *drm = dev_get_drvdata(client->parent);  	struct iommu_group *group = iommu_group_get(client->dev);  	struct tegra_dc *dc = host1x_client_to_dc(client); +	struct tegra_drm *tegra = drm->dev_private;  	int err;  	devm_free_irq(dc->dev, dc->irq, dc); @@ -1926,7 +1932,11 @@ static int tegra_dc_exit(struct host1x_client *client)  	}  	if (group && dc->domain) { -		iommu_detach_group(dc->domain, group); +		if (group == tegra->group) { +			iommu_detach_group(dc->domain, group); +			tegra->group = NULL; +		} +  		dc->domain = NULL;  	} diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index d50bddb2e447..7fcf4a242840 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -250,6 +250,7 @@ static void tegra_drm_unload(struct drm_device *drm)  	drm_kms_helper_poll_fini(drm);  	tegra_drm_fb_exit(drm); +	drm_atomic_helper_shutdown(drm);  	drm_mode_config_cleanup(drm);  	err = host1x_device_exit(device); diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c index 4d2ed966f9e3..87c5d89bc9ba 100644 --- a/drivers/gpu/drm/tegra/dsi.c +++ b/drivers/gpu/drm/tegra/dsi.c @@ -1072,7 +1072,6 @@ static int tegra_dsi_exit(struct host1x_client *client)  	struct tegra_dsi *dsi = host1x_client_to_dsi(client);  	tegra_output_exit(&dsi->output); -	regulator_disable(dsi->vdd);  	return 0;  } diff --git a/drivers/gpu/drm/tegra/plane.c b/drivers/gpu/drm/tegra/plane.c index 36a06a993698..94dac79ac3c9 100644 --- a/drivers/gpu/drm/tegra/plane.c +++ b/drivers/gpu/drm/tegra/plane.c @@ -297,6 +297,10 @@ int tegra_plane_format_get_alpha(unsigned int opaque, unsigned int *alpha)  	case WIN_COLOR_DEPTH_B8G8R8X8:  		*alpha = WIN_COLOR_DEPTH_B8G8R8A8;  		return 0; + +	case WIN_COLOR_DEPTH_B5G6R5: +		*alpha = opaque; +		return 0;  	}  	return -EINVAL; @@ -330,9 +334,6 @@ void tegra_plane_check_dependent(struct tegra_plane *tegra,  	unsigned int zpos[2];  	unsigned int i; -	for (i = 0; i < 3; i++) -		state->dependent[i] = false; -  	for (i = 0; i < 2; i++)  		zpos[i] = 0; @@ -346,6 +347,8 @@ void tegra_plane_check_dependent(struct tegra_plane *tegra,  		index = tegra_plane_get_overlap_index(tegra, p); +		state->dependent[index] = false; +  		/*  		 * If any of the other planes is on top of this plane and uses  		 * a format with an alpha component, mark this plane as being diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c index b5b335c9b2bb..2ebdc6d5a76e 100644 --- a/drivers/gpu/drm/udl/udl_fb.c +++ b/drivers/gpu/drm/udl/udl_fb.c @@ -159,10 +159,15 @@ static int udl_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)  {  	unsigned long start = vma->vm_start;  	unsigned long size = vma->vm_end - vma->vm_start; -	unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; +	unsigned long offset;  	unsigned long page, pos; -	if (offset + size > info->fix.smem_len) +	if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) +		return -EINVAL; + +	offset = vma->vm_pgoff << PAGE_SHIFT; + +	if (offset > info->fix.smem_len || size > info->fix.smem_len - offset)  		return -EINVAL;  	pos = (unsigned long)info->fix.smem_start + offset; diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c index 5720a0d4ac0a..677ac16c8a6d 100644 --- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c +++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c @@ -197,6 +197,9 @@ static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,  	case VIRTGPU_PARAM_3D_FEATURES:  		value = vgdev->has_virgl_3d == true ? 1 : 0;  		break; +	case VIRTGPU_PARAM_CAPSET_QUERY_FIX: +		value = 1; +		break;  	default:  		return -EINVAL;  	} @@ -472,7 +475,7 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,  {  	struct virtio_gpu_device *vgdev = dev->dev_private;  	struct drm_virtgpu_get_caps *args = data; -	int size; +	unsigned size, host_caps_size;  	int i;  	int found_valid = -1;  	int ret; @@ -481,6 +484,10 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,  	if (vgdev->num_capsets == 0)  		return -ENOSYS; +	/* don't allow userspace to pass 0 */ +	if (args->size == 0) +		return -EINVAL; +  	spin_lock(&vgdev->display_info_lock);  	for (i = 0; i < vgdev->num_capsets; i++) {  		if (vgdev->capsets[i].id == args->cap_set_id) { @@ -496,11 +503,9 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,  		return -EINVAL;  	} -	size = vgdev->capsets[found_valid].max_size; -	if (args->size > size) { -		spin_unlock(&vgdev->display_info_lock); -		return -EINVAL; -	} +	host_caps_size = vgdev->capsets[found_valid].max_size; +	/* only copy to user the minimum of the host caps size or the guest caps size */ +	size = min(args->size, host_caps_size);  	list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {  		if (cache_ent->id == args->cap_set_id && diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 184340d486c3..86d25f18aa99 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -1337,6 +1337,19 @@ static void __vmw_svga_disable(struct vmw_private *dev_priv)   */  void vmw_svga_disable(struct vmw_private *dev_priv)  { +	/* +	 * Disabling SVGA will turn off device modesetting capabilities, so +	 * notify KMS about that so that it doesn't cache atomic state that +	 * isn't valid anymore, for example crtcs turned on. +	 * Strictly we'd want to do this under the SVGA lock (or an SVGA mutex), +	 * but vmw_kms_lost_device() takes the reservation sem and thus we'll +	 * end up with lock order reversal. Thus, a master may actually perform +	 * a new modeset just after we call vmw_kms_lost_device() and race with +	 * vmw_svga_disable(), but that should at worst cause atomic KMS state +	 * to be inconsistent with the device, causing modesetting problems. +	 * +	 */ +	vmw_kms_lost_device(dev_priv->dev);  	ttm_write_lock(&dev_priv->reservation_sem, false);  	spin_lock(&dev_priv->svga_lock);  	if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) { diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index d08753e8fd94..9116fe8baebc 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -938,6 +938,7 @@ int vmw_kms_present(struct vmw_private *dev_priv,  int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,  				struct drm_file *file_priv);  void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv); +void vmw_kms_lost_device(struct drm_device *dev);  int vmw_dumb_create(struct drm_file *file_priv,  		    struct drm_device *dev, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index ead61015cd79..3c824fd7cbf3 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -31,7 +31,6 @@  #include <drm/drm_atomic_helper.h>  #include <drm/drm_rect.h> -  /* Might need a hrtimer here? */  #define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1) @@ -2517,9 +2516,12 @@ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,   * Helper to be used if an error forces the caller to undo the actions of   * vmw_kms_helper_resource_prepare.   */ -void vmw_kms_helper_resource_revert(struct vmw_resource *res) +void vmw_kms_helper_resource_revert(struct vmw_validation_ctx *ctx)  { -	vmw_kms_helper_buffer_revert(res->backup); +	struct vmw_resource *res = ctx->res; + +	vmw_kms_helper_buffer_revert(ctx->buf); +	vmw_dmabuf_unreference(&ctx->buf);  	vmw_resource_unreserve(res, false, NULL, 0);  	mutex_unlock(&res->dev_priv->cmdbuf_mutex);  } @@ -2536,10 +2538,14 @@ void vmw_kms_helper_resource_revert(struct vmw_resource *res)   * interrupted by a signal.   */  int vmw_kms_helper_resource_prepare(struct vmw_resource *res, -				    bool interruptible) +				    bool interruptible, +				    struct vmw_validation_ctx *ctx)  {  	int ret = 0; +	ctx->buf = NULL; +	ctx->res = res; +  	if (interruptible)  		ret = mutex_lock_interruptible(&res->dev_priv->cmdbuf_mutex);  	else @@ -2558,6 +2564,8 @@ int vmw_kms_helper_resource_prepare(struct vmw_resource *res,  						    res->dev_priv->has_mob);  		if (ret)  			goto out_unreserve; + +		ctx->buf = vmw_dmabuf_reference(res->backup);  	}  	ret = vmw_resource_validate(res);  	if (ret) @@ -2565,7 +2573,7 @@ int vmw_kms_helper_resource_prepare(struct vmw_resource *res,  	return 0;  out_revert: -	vmw_kms_helper_buffer_revert(res->backup); +	vmw_kms_helper_buffer_revert(ctx->buf);  out_unreserve:  	vmw_resource_unreserve(res, false, NULL, 0);  out_unlock: @@ -2581,11 +2589,13 @@ out_unlock:   * @out_fence: Optional pointer to a fence pointer. If non-NULL, a   * ref-counted fence pointer is returned here.   */ -void vmw_kms_helper_resource_finish(struct vmw_resource *res, -			     struct vmw_fence_obj **out_fence) +void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx, +				    struct vmw_fence_obj **out_fence)  { -	if (res->backup || out_fence) -		vmw_kms_helper_buffer_finish(res->dev_priv, NULL, res->backup, +	struct vmw_resource *res = ctx->res; + +	if (ctx->buf || out_fence) +		vmw_kms_helper_buffer_finish(res->dev_priv, NULL, ctx->buf,  					     out_fence, NULL);  	vmw_resource_unreserve(res, false, NULL, 0); @@ -2851,3 +2861,14 @@ int vmw_kms_set_config(struct drm_mode_set *set,  	return drm_atomic_helper_set_config(set, ctx);  } + + +/** + * vmw_kms_lost_device - Notify kms that modesetting capabilities will be lost + * + * @dev: Pointer to the drm device + */ +void vmw_kms_lost_device(struct drm_device *dev) +{ +	drm_atomic_helper_shutdown(dev); +} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h index cd9da2dd79af..3d2ca280eaa7 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h @@ -240,6 +240,11 @@ struct vmw_display_unit {  	int set_gui_y;  }; +struct vmw_validation_ctx { +	struct vmw_resource *res; +	struct vmw_dma_buffer *buf; +}; +  #define vmw_crtc_to_du(x) \  	container_of(x, struct vmw_display_unit, crtc)  #define vmw_connector_to_du(x) \ @@ -296,9 +301,10 @@ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,  				  struct drm_vmw_fence_rep __user *  				  user_fence_rep);  int vmw_kms_helper_resource_prepare(struct vmw_resource *res, -				    bool interruptible); -void vmw_kms_helper_resource_revert(struct vmw_resource *res); -void vmw_kms_helper_resource_finish(struct vmw_resource *res, +				    bool interruptible, +				    struct vmw_validation_ctx *ctx); +void vmw_kms_helper_resource_revert(struct vmw_validation_ctx *ctx); +void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx,  				    struct vmw_fence_obj **out_fence);  int vmw_kms_readback(struct vmw_private *dev_priv,  		     struct drm_file *file_priv, @@ -439,5 +445,4 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv,  int vmw_kms_set_config(struct drm_mode_set *set,  		       struct drm_modeset_acquire_ctx *ctx); -  #endif diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c index 63a4cd794b73..3ec9eae831b8 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c @@ -909,12 +909,13 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,  	struct vmw_framebuffer_surface *vfbs =  		container_of(framebuffer, typeof(*vfbs), base);  	struct vmw_kms_sou_surface_dirty sdirty; +	struct vmw_validation_ctx ctx;  	int ret;  	if (!srf)  		srf = &vfbs->surface->res; -	ret = vmw_kms_helper_resource_prepare(srf, true); +	ret = vmw_kms_helper_resource_prepare(srf, true, &ctx);  	if (ret)  		return ret; @@ -933,7 +934,7 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,  	ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,  				   dest_x, dest_y, num_clips, inc,  				   &sdirty.base); -	vmw_kms_helper_resource_finish(srf, out_fence); +	vmw_kms_helper_resource_finish(&ctx, out_fence);  	return ret;  } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c index b68d74888ab1..6b969e5dea2a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c @@ -980,12 +980,13 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,  	struct vmw_framebuffer_surface *vfbs =  		container_of(framebuffer, typeof(*vfbs), base);  	struct vmw_stdu_dirty sdirty; +	struct vmw_validation_ctx ctx;  	int ret;  	if (!srf)  		srf = &vfbs->surface->res; -	ret = vmw_kms_helper_resource_prepare(srf, true); +	ret = vmw_kms_helper_resource_prepare(srf, true, &ctx);  	if (ret)  		return ret; @@ -1008,7 +1009,7 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,  				   dest_x, dest_y, num_clips, inc,  				   &sdirty.base);  out_finish: -	vmw_kms_helper_resource_finish(srf, out_fence); +	vmw_kms_helper_resource_finish(&ctx, out_fence);  	return ret;  } diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c index 658fa2d3e40c..48685cddbad1 100644 --- a/drivers/gpu/ipu-v3/ipu-common.c +++ b/drivers/gpu/ipu-v3/ipu-common.c @@ -1089,7 +1089,7 @@ static void ipu_irq_handler(struct irq_desc *desc)  {  	struct ipu_soc *ipu = irq_desc_get_handler_data(desc);  	struct irq_chip *chip = irq_desc_get_chip(desc); -	const int int_reg[] = { 0, 1, 2, 3, 10, 11, 12, 13, 14}; +	static const int int_reg[] = { 0, 1, 2, 3, 10, 11, 12, 13, 14};  	chained_irq_enter(chip, desc); @@ -1102,7 +1102,7 @@ static void ipu_err_irq_handler(struct irq_desc *desc)  {  	struct ipu_soc *ipu = irq_desc_get_handler_data(desc);  	struct irq_chip *chip = irq_desc_get_chip(desc); -	const int int_reg[] = { 4, 5, 8, 9}; +	static const int int_reg[] = { 4, 5, 8, 9};  	chained_irq_enter(chip, desc); diff --git a/drivers/gpu/ipu-v3/ipu-cpmem.c b/drivers/gpu/ipu-v3/ipu-cpmem.c index bb9c087e6c0d..9f2d9ec42add 100644 --- a/drivers/gpu/ipu-v3/ipu-cpmem.c +++ b/drivers/gpu/ipu-v3/ipu-cpmem.c @@ -788,12 +788,14 @@ int ipu_cpmem_set_image(struct ipuv3_channel *ch, struct ipu_image *image)  	case V4L2_PIX_FMT_SGBRG8:  	case V4L2_PIX_FMT_SGRBG8:  	case V4L2_PIX_FMT_SRGGB8: +	case V4L2_PIX_FMT_GREY:  		offset = image->rect.left + image->rect.top * pix->bytesperline;  		break;  	case V4L2_PIX_FMT_SBGGR16:  	case V4L2_PIX_FMT_SGBRG16:  	case V4L2_PIX_FMT_SGRBG16:  	case V4L2_PIX_FMT_SRGGB16: +	case V4L2_PIX_FMT_Y16:  		offset = image->rect.left * 2 +  			 image->rect.top * pix->bytesperline;  		break; diff --git a/drivers/gpu/ipu-v3/ipu-csi.c b/drivers/gpu/ipu-v3/ipu-csi.c index 24e12b87a0cb..caa05b0702e1 100644 --- a/drivers/gpu/ipu-v3/ipu-csi.c +++ b/drivers/gpu/ipu-v3/ipu-csi.c @@ -288,6 +288,7 @@ static int mbus_code_to_bus_cfg(struct ipu_csi_bus_config *cfg, u32 mbus_code)  	case MEDIA_BUS_FMT_SGBRG10_1X10:  	case MEDIA_BUS_FMT_SGRBG10_1X10:  	case MEDIA_BUS_FMT_SRGGB10_1X10: +	case MEDIA_BUS_FMT_Y10_1X10:  		cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER;  		cfg->mipi_dt = MIPI_DT_RAW10;  		cfg->data_width = IPU_CSI_DATA_WIDTH_10; @@ -296,6 +297,7 @@ static int mbus_code_to_bus_cfg(struct ipu_csi_bus_config *cfg, u32 mbus_code)  	case MEDIA_BUS_FMT_SGBRG12_1X12:  	case MEDIA_BUS_FMT_SGRBG12_1X12:  	case MEDIA_BUS_FMT_SRGGB12_1X12: +	case MEDIA_BUS_FMT_Y12_1X12:  		cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER;  		cfg->mipi_dt = MIPI_DT_RAW12;  		cfg->data_width = IPU_CSI_DATA_WIDTH_12; diff --git a/drivers/gpu/ipu-v3/ipu-pre.c b/drivers/gpu/ipu-v3/ipu-pre.c index f1cec3d70498..0f70e8847540 100644 --- a/drivers/gpu/ipu-v3/ipu-pre.c +++ b/drivers/gpu/ipu-v3/ipu-pre.c @@ -129,11 +129,14 @@ ipu_pre_lookup_by_phandle(struct device *dev, const char *name, int index)  		if (pre_node == pre->dev->of_node) {  			mutex_unlock(&ipu_pre_list_mutex);  			device_link_add(dev, pre->dev, DL_FLAG_AUTOREMOVE); +			of_node_put(pre_node);  			return pre;  		}  	}  	mutex_unlock(&ipu_pre_list_mutex); +	of_node_put(pre_node); +  	return NULL;  } diff --git a/drivers/gpu/ipu-v3/ipu-prg.c b/drivers/gpu/ipu-v3/ipu-prg.c index 067365c733c6..83f9dd934a5d 100644 --- a/drivers/gpu/ipu-v3/ipu-prg.c +++ b/drivers/gpu/ipu-v3/ipu-prg.c @@ -102,11 +102,14 @@ ipu_prg_lookup_by_phandle(struct device *dev, const char *name, int ipu_id)  			mutex_unlock(&ipu_prg_list_mutex);  			device_link_add(dev, prg->dev, DL_FLAG_AUTOREMOVE);  			prg->id = ipu_id; +			of_node_put(prg_node);  			return prg;  		}  	}  	mutex_unlock(&ipu_prg_list_mutex); +	of_node_put(prg_node); +  	return NULL;  } @@ -247,10 +250,14 @@ void ipu_prg_channel_disable(struct ipuv3_channel *ipu_chan)  {  	int prg_chan = ipu_prg_ipu_to_prg_chan(ipu_chan->num);  	struct ipu_prg *prg = ipu_chan->ipu->prg_priv; -	struct ipu_prg_channel *chan = &prg->chan[prg_chan]; +	struct ipu_prg_channel *chan;  	u32 val; -	if (!chan->enabled || prg_chan < 0) +	if (prg_chan < 0) +		return; + +	chan = &prg->chan[prg_chan]; +	if (!chan->enabled)  		return;  	pm_runtime_get_sync(prg->dev); @@ -277,13 +284,15 @@ int ipu_prg_channel_configure(struct ipuv3_channel *ipu_chan,  {  	int prg_chan = ipu_prg_ipu_to_prg_chan(ipu_chan->num);  	struct ipu_prg *prg = ipu_chan->ipu->prg_priv; -	struct ipu_prg_channel *chan = &prg->chan[prg_chan]; +	struct ipu_prg_channel *chan;  	u32 val;  	int ret;  	if (prg_chan < 0)  		return prg_chan; +	chan = &prg->chan[prg_chan]; +  	if (chan->enabled) {  		ipu_pre_update(prg->pres[chan->used_pre], *eba);  		return 0; diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 43ddcdfbd0da..9454ac134ce2 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -645,6 +645,9 @@  #define USB_DEVICE_ID_LD_MICROCASSYTIME		0x1033  #define USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE	0x1035  #define USB_DEVICE_ID_LD_MICROCASSYPH		0x1038 +#define USB_DEVICE_ID_LD_POWERANALYSERCASSY	0x1040 +#define USB_DEVICE_ID_LD_CONVERTERCONTROLLERCASSY	0x1042 +#define USB_DEVICE_ID_LD_MACHINETESTCASSY	0x1043  #define USB_DEVICE_ID_LD_JWM		0x1080  #define USB_DEVICE_ID_LD_DMMP		0x1081  #define USB_DEVICE_ID_LD_UMIP		0x1090 diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c index 5f6035a5ce36..e92b77fa574a 100644 --- a/drivers/hid/hid-quirks.c +++ b/drivers/hid/hid-quirks.c @@ -809,6 +809,9 @@ static const struct hid_device_id hid_ignore_list[] = {  	{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTIME) },  	{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE) },  	{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYPH) }, +	{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POWERANALYSERCASSY) }, +	{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_CONVERTERCONTROLLERCASSY) }, +	{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MACHINETESTCASSY) },  	{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_JWM) },  	{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_DMMP) },  	{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIP) }, diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c index 50e071444a5c..8699bb969e7e 100644 --- a/drivers/hv/ring_buffer.c +++ b/drivers/hv/ring_buffer.c @@ -417,13 +417,24 @@ __hv_pkt_iter_next(struct vmbus_channel *channel,  }  EXPORT_SYMBOL_GPL(__hv_pkt_iter_next); +/* How many bytes were read in this iterator cycle */ +static u32 hv_pkt_iter_bytes_read(const struct hv_ring_buffer_info *rbi, +					u32 start_read_index) +{ +	if (rbi->priv_read_index >= start_read_index) +		return rbi->priv_read_index - start_read_index; +	else +		return rbi->ring_datasize - start_read_index + +			rbi->priv_read_index; +} +  /*   * Update host ring buffer after iterating over packets.   */  void hv_pkt_iter_close(struct vmbus_channel *channel)  {  	struct hv_ring_buffer_info *rbi = &channel->inbound; -	u32 orig_write_sz = hv_get_bytes_to_write(rbi); +	u32 curr_write_sz, pending_sz, bytes_read, start_read_index;  	/*  	 * Make sure all reads are done before we update the read index since @@ -431,8 +442,12 @@ void hv_pkt_iter_close(struct vmbus_channel *channel)  	 * is updated.  	 */  	virt_rmb(); +	start_read_index = rbi->ring_buffer->read_index;  	rbi->ring_buffer->read_index = rbi->priv_read_index; +	if (!rbi->ring_buffer->feature_bits.feat_pending_send_sz) +		return; +  	/*  	 * Issue a full memory barrier before making the signaling decision.  	 * Here is the reason for having this barrier: @@ -446,26 +461,29 @@ void hv_pkt_iter_close(struct vmbus_channel *channel)  	 */  	virt_mb(); -	/* If host has disabled notifications then skip */ -	if (rbi->ring_buffer->interrupt_mask) +	pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz); +	if (!pending_sz)  		return; -	if (rbi->ring_buffer->feature_bits.feat_pending_send_sz) { -		u32 pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz); +	/* +	 * Ensure the read of write_index in hv_get_bytes_to_write() +	 * happens after the read of pending_send_sz. +	 */ +	virt_rmb(); +	curr_write_sz = hv_get_bytes_to_write(rbi); +	bytes_read = hv_pkt_iter_bytes_read(rbi, start_read_index); -		/* -		 * If there was space before we began iteration, -		 * then host was not blocked. Also handles case where -		 * pending_sz is zero then host has nothing pending -		 * and does not need to be signaled. -		 */ -		if (orig_write_sz > pending_sz) -			return; +	/* +	 * If there was space before we began iteration, +	 * then host was not blocked. +	 */ -		/* If pending write will not fit, don't give false hope. */ -		if (hv_get_bytes_to_write(rbi) < pending_sz) -			return; -	} +	if (curr_write_sz - bytes_read > pending_sz) +		return; + +	/* If pending write will not fit, don't give false hope. */ +	if (curr_write_sz <= pending_sz) +		return;  	vmbus_setevent(channel);  } diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c index 4bdbf77f7197..72c338eb5fae 100644 --- a/drivers/hwmon/coretemp.c +++ b/drivers/hwmon/coretemp.c @@ -269,13 +269,13 @@ static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)  	for (i = 0; i < ARRAY_SIZE(tjmax_model_table); i++) {  		const struct tjmax_model *tm = &tjmax_model_table[i];  		if (c->x86_model == tm->model && -		    (tm->mask == ANY || c->x86_mask == tm->mask)) +		    (tm->mask == ANY || c->x86_stepping == tm->mask))  			return tm->tjmax;  	}  	/* Early chips have no MSR for TjMax */ -	if (c->x86_model == 0xf && c->x86_mask < 4) +	if (c->x86_model == 0xf && c->x86_stepping < 4)  		usemsr_ee = 0;  	if (c->x86_model > 0xe && usemsr_ee) { @@ -426,7 +426,7 @@ static int chk_ucode_version(unsigned int cpu)  	 * Readings might stop update when processor visited too deep sleep,  	 * fixed for stepping D0 (6EC).  	 */ -	if (c->x86_model == 0xe && c->x86_mask < 0xc && c->microcode < 0x39) { +	if (c->x86_model == 0xe && c->x86_stepping < 0xc && c->microcode < 0x39) {  		pr_err("Errata AE18 not fixed, update BIOS or microcode of the CPU!\n");  		return -ENODEV;  	} diff --git a/drivers/hwmon/hwmon-vid.c b/drivers/hwmon/hwmon-vid.c index ef91b8a67549..84e91286fc4f 100644 --- a/drivers/hwmon/hwmon-vid.c +++ b/drivers/hwmon/hwmon-vid.c @@ -293,7 +293,7 @@ u8 vid_which_vrm(void)  	if (c->x86 < 6)		/* Any CPU with family lower than 6 */  		return 0;	/* doesn't have VID */ -	vrm_ret = find_vrm(c->x86, c->x86_model, c->x86_mask, c->x86_vendor); +	vrm_ret = find_vrm(c->x86, c->x86_model, c->x86_stepping, c->x86_vendor);  	if (vrm_ret == 134)  		vrm_ret = get_via_model_d_vrm();  	if (vrm_ret == 0) diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c index 06b4e1c78bd8..051a72eecb24 100644 --- a/drivers/hwmon/k10temp.c +++ b/drivers/hwmon/k10temp.c @@ -129,7 +129,10 @@ static ssize_t temp1_input_show(struct device *dev,  	data->read_tempreg(data->pdev, ®val);  	temp = (regval >> 21) * 125; -	temp -= data->temp_offset; +	if (temp > data->temp_offset) +		temp -= data->temp_offset; +	else +		temp = 0;  	return sprintf(buf, "%u\n", temp);  } @@ -227,7 +230,7 @@ static bool has_erratum_319(struct pci_dev *pdev)  	 * and AM3 formats, but that's the best we can do.  	 */  	return boot_cpu_data.x86_model < 4 || -	       (boot_cpu_data.x86_model == 4 && boot_cpu_data.x86_mask <= 2); +	       (boot_cpu_data.x86_model == 4 && boot_cpu_data.x86_stepping <= 2);  }  static int k10temp_probe(struct pci_dev *pdev, diff --git a/drivers/hwmon/k8temp.c b/drivers/hwmon/k8temp.c index 5a632bcf869b..e59f9113fb93 100644 --- a/drivers/hwmon/k8temp.c +++ b/drivers/hwmon/k8temp.c @@ -187,7 +187,7 @@ static int k8temp_probe(struct pci_dev *pdev,  		return -ENOMEM;  	model = boot_cpu_data.x86_model; -	stepping = boot_cpu_data.x86_mask; +	stepping = boot_cpu_data.x86_stepping;  	/* feature available since SH-C0, exclude older revisions */  	if ((model == 4 && stepping == 0) || diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index a9805c7cb305..e2954fb86d65 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -123,8 +123,10 @@ config I2C_I801  	    Wildcat Point (PCH)  	    Wildcat Point-LP (PCH)  	    BayTrail (SOC) +	    Braswell (SOC)  	    Sunrise Point-H (PCH)  	    Sunrise Point-LP (PCH) +	    Kaby Lake-H (PCH)  	    DNV (SOC)  	    Broxton (SOC)  	    Lewisburg (PCH) diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c index cd07a69e2e93..44deae78913e 100644 --- a/drivers/i2c/busses/i2c-bcm2835.c +++ b/drivers/i2c/busses/i2c-bcm2835.c @@ -50,6 +50,9 @@  #define BCM2835_I2C_S_CLKT	BIT(9)  #define BCM2835_I2C_S_LEN	BIT(10) /* Fake bit for SW error reporting */ +#define BCM2835_I2C_FEDL_SHIFT	16 +#define BCM2835_I2C_REDL_SHIFT	0 +  #define BCM2835_I2C_CDIV_MIN	0x0002  #define BCM2835_I2C_CDIV_MAX	0xFFFE @@ -81,7 +84,7 @@ static inline u32 bcm2835_i2c_readl(struct bcm2835_i2c_dev *i2c_dev, u32 reg)  static int bcm2835_i2c_set_divider(struct bcm2835_i2c_dev *i2c_dev)  { -	u32 divider; +	u32 divider, redl, fedl;  	divider = DIV_ROUND_UP(clk_get_rate(i2c_dev->clk),  			       i2c_dev->bus_clk_rate); @@ -100,6 +103,22 @@ static int bcm2835_i2c_set_divider(struct bcm2835_i2c_dev *i2c_dev)  	bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_DIV, divider); +	/* +	 * Number of core clocks to wait after falling edge before +	 * outputting the next data bit.  Note that both FEDL and REDL +	 * can't be greater than CDIV/2. +	 */ +	fedl = max(divider / 16, 1u); + +	/* +	 * Number of core clocks to wait after rising edge before +	 * sampling the next incoming data bit. +	 */ +	redl = max(divider / 4, 1u); + +	bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_DEL, +			   (fedl << BCM2835_I2C_FEDL_SHIFT) | +			   (redl << BCM2835_I2C_REDL_SHIFT));  	return 0;  } diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c index ae691884d071..05732531829f 100644 --- a/drivers/i2c/busses/i2c-designware-master.c +++ b/drivers/i2c/busses/i2c-designware-master.c @@ -209,7 +209,7 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)  	i2c_dw_disable_int(dev);  	/* Enable the adapter */ -	__i2c_dw_enable(dev, true); +	__i2c_dw_enable_and_wait(dev, true);  	/* Clear and enable interrupts */  	dw_readl(dev, DW_IC_CLR_INTR); @@ -644,7 +644,7 @@ static int i2c_dw_init_recovery_info(struct dw_i2c_dev *dev)  	gpio = devm_gpiod_get(dev->dev, "scl", GPIOD_OUT_HIGH);  	if (IS_ERR(gpio)) {  		r = PTR_ERR(gpio); -		if (r == -ENOENT) +		if (r == -ENOENT || r == -ENOSYS)  			return 0;  		return r;  	} diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index 8eac00efadc1..692b34125866 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c @@ -58,6 +58,7 @@   * Wildcat Point (PCH)		0x8ca2	32	hard	yes	yes	yes   * Wildcat Point-LP (PCH)	0x9ca2	32	hard	yes	yes	yes   * BayTrail (SOC)		0x0f12	32	hard	yes	yes	yes + * Braswell (SOC)		0x2292	32	hard	yes	yes	yes   * Sunrise Point-H (PCH) 	0xa123  32	hard	yes	yes	yes   * Sunrise Point-LP (PCH)	0x9d23	32	hard	yes	yes	yes   * DNV (SOC)			0x19df	32	hard	yes	yes	yes diff --git a/drivers/i2c/busses/i2c-octeon-core.c b/drivers/i2c/busses/i2c-octeon-core.c index 1d8775799056..d9607905dc2f 100644 --- a/drivers/i2c/busses/i2c-octeon-core.c +++ b/drivers/i2c/busses/i2c-octeon-core.c @@ -233,6 +233,7 @@ static int octeon_i2c_check_status(struct octeon_i2c *i2c, int final_read)  		return -EOPNOTSUPP;  	case STAT_TXDATA_NAK: +	case STAT_BUS_ERROR:  		return -EIO;  	case STAT_TXADDR_NAK:  	case STAT_RXADDR_NAK: diff --git a/drivers/i2c/busses/i2c-octeon-core.h b/drivers/i2c/busses/i2c-octeon-core.h index a7ef19855bb8..9bb9f64fdda0 100644 --- a/drivers/i2c/busses/i2c-octeon-core.h +++ b/drivers/i2c/busses/i2c-octeon-core.h @@ -43,7 +43,7 @@  #define TWSI_CTL_AAK		0x04	/* Assert ACK */  /* Status values */ -#define STAT_ERROR		0x00 +#define STAT_BUS_ERROR		0x00  #define STAT_START		0x08  #define STAT_REP_START		0x10  #define STAT_TXADDR_ACK		0x18 diff --git a/drivers/i2c/busses/i2c-sirf.c b/drivers/i2c/busses/i2c-sirf.c index 2fd8b6d00391..87197ece0f90 100644 --- a/drivers/i2c/busses/i2c-sirf.c +++ b/drivers/i2c/busses/i2c-sirf.c @@ -341,7 +341,7 @@ static int i2c_sirfsoc_probe(struct platform_device *pdev)  	platform_set_drvdata(pdev, adap);  	init_completion(&siic->done); -	/* Controller Initalisation */ +	/* Controller initialisation */  	writel(SIRFSOC_I2C_RESET, siic->base + SIRFSOC_I2C_CTRL);  	while (readl(siic->base + SIRFSOC_I2C_CTRL) & SIRFSOC_I2C_RESET) @@ -369,7 +369,7 @@ static int i2c_sirfsoc_probe(struct platform_device *pdev)  	 * but they start to affect the speed when clock is set to faster  	 * frequencies.  	 * Through the actual tests, use the different user_div value(which -	 * in the divider formular 'Fio / (Fi2c * user_div)') to adapt +	 * in the divider formula 'Fio / (Fi2c * user_div)') to adapt  	 * the different ranges of i2c bus clock frequency, to make the SCL  	 * more accurate.  	 */ diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c index 17fd55af4d92..caa20eb5f26b 100644 --- a/drivers/ide/ide-probe.c +++ b/drivers/ide/ide-probe.c @@ -928,7 +928,7 @@ static int exact_lock(dev_t dev, void *data)  {  	struct gendisk *p = data; -	if (!get_disk(p)) +	if (!get_disk_and_module(p))  		return -1;  	return 0;  } diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c index 6fe995cf16a6..3e6fd5a8ac5b 100644 --- a/drivers/iio/accel/st_accel_core.c +++ b/drivers/iio/accel/st_accel_core.c @@ -920,6 +920,8 @@ static const struct iio_trigger_ops st_accel_trigger_ops = {  int st_accel_common_probe(struct iio_dev *indio_dev)  {  	struct st_sensor_data *adata = iio_priv(indio_dev); +	struct st_sensors_platform_data *pdata = +		(struct st_sensors_platform_data *)adata->dev->platform_data;  	int irq = adata->get_irq_data_ready(indio_dev);  	int err; @@ -946,7 +948,10 @@ int st_accel_common_probe(struct iio_dev *indio_dev)  					&adata->sensor_settings->fs.fs_avl[0];  	adata->odr = adata->sensor_settings->odr.odr_avl[0].hz; -	err = st_sensors_init_sensor(indio_dev, adata->dev->platform_data); +	if (!pdata) +		pdata = (struct st_sensors_platform_data *)&default_accel_pdata; + +	err = st_sensors_init_sensor(indio_dev, pdata);  	if (err < 0)  		goto st_accel_power_off; diff --git a/drivers/iio/adc/aspeed_adc.c b/drivers/iio/adc/aspeed_adc.c index 327a49ba1991..9515ca165dfd 100644 --- a/drivers/iio/adc/aspeed_adc.c +++ b/drivers/iio/adc/aspeed_adc.c @@ -243,7 +243,7 @@ static int aspeed_adc_probe(struct platform_device *pdev)  					 ASPEED_ADC_INIT_POLLING_TIME,  					 ASPEED_ADC_INIT_TIMEOUT);  		if (ret) -			goto scaler_error; +			goto poll_timeout_error;  	}  	/* Start all channels in normal mode. */ @@ -274,9 +274,10 @@ iio_register_error:  	writel(ASPEED_OPERATION_MODE_POWER_DOWN,  		data->base + ASPEED_REG_ENGINE_CONTROL);  	clk_disable_unprepare(data->clk_scaler->clk); -reset_error: -	reset_control_assert(data->rst);  clk_enable_error: +poll_timeout_error: +	reset_control_assert(data->rst); +reset_error:  	clk_hw_unregister_divider(data->clk_scaler);  scaler_error:  	clk_hw_unregister_divider(data->clk_prescaler); diff --git a/drivers/iio/adc/meson_saradc.c b/drivers/iio/adc/meson_saradc.c index 29fa7736d80c..ede955d9b2a4 100644 --- a/drivers/iio/adc/meson_saradc.c +++ b/drivers/iio/adc/meson_saradc.c @@ -462,8 +462,10 @@ static int meson_sar_adc_lock(struct iio_dev *indio_dev)  			regmap_read(priv->regmap, MESON_SAR_ADC_DELAY, &val);  		} while (val & MESON_SAR_ADC_DELAY_BL30_BUSY && timeout--); -		if (timeout < 0) +		if (timeout < 0) { +			mutex_unlock(&indio_dev->mlock);  			return -ETIMEDOUT; +		}  	}  	return 0; diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c index 7f5def465340..9a2583caedaa 100644 --- a/drivers/iio/adc/stm32-adc.c +++ b/drivers/iio/adc/stm32-adc.c @@ -722,8 +722,6 @@ static int stm32h7_adc_enable(struct stm32_adc *adc)  	int ret;  	u32 val; -	/* Clear ADRDY by writing one, then enable ADC */ -	stm32_adc_set_bits(adc, STM32H7_ADC_ISR, STM32H7_ADRDY);  	stm32_adc_set_bits(adc, STM32H7_ADC_CR, STM32H7_ADEN);  	/* Poll for ADRDY to be set (after adc startup time) */ @@ -731,8 +729,11 @@ static int stm32h7_adc_enable(struct stm32_adc *adc)  					   val & STM32H7_ADRDY,  					   100, STM32_ADC_TIMEOUT_US);  	if (ret) { -		stm32_adc_clr_bits(adc, STM32H7_ADC_CR, STM32H7_ADEN); +		stm32_adc_set_bits(adc, STM32H7_ADC_CR, STM32H7_ADDIS);  		dev_err(&indio_dev->dev, "Failed to enable ADC\n"); +	} else { +		/* Clear ADRDY by writing one */ +		stm32_adc_set_bits(adc, STM32H7_ADC_ISR, STM32H7_ADRDY);  	}  	return ret; diff --git a/drivers/iio/adc/stm32-dfsdm-adc.c b/drivers/iio/adc/stm32-dfsdm-adc.c index daa026d6a94f..01422d11753c 100644 --- a/drivers/iio/adc/stm32-dfsdm-adc.c +++ b/drivers/iio/adc/stm32-dfsdm-adc.c @@ -54,7 +54,6 @@ struct stm32_dfsdm_adc {  	struct stm32_dfsdm *dfsdm;  	const struct stm32_dfsdm_dev_data *dev_data;  	unsigned int fl_id; -	unsigned int ch_id;  	/* ADC specific */  	unsigned int oversamp; @@ -384,7 +383,7 @@ static ssize_t dfsdm_adc_audio_set_spiclk(struct iio_dev *indio_dev,  {  	struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);  	struct stm32_dfsdm_filter *fl = &adc->dfsdm->fl_list[adc->fl_id]; -	struct stm32_dfsdm_channel *ch = &adc->dfsdm->ch_list[adc->ch_id]; +	struct stm32_dfsdm_channel *ch = &adc->dfsdm->ch_list[chan->channel];  	unsigned int sample_freq = adc->sample_freq;  	unsigned int spi_freq;  	int ret; @@ -419,18 +418,20 @@ static ssize_t dfsdm_adc_audio_set_spiclk(struct iio_dev *indio_dev,  	return len;  } -static int stm32_dfsdm_start_conv(struct stm32_dfsdm_adc *adc, bool dma) +static int stm32_dfsdm_start_conv(struct stm32_dfsdm_adc *adc, +				  const struct iio_chan_spec *chan, +				  bool dma)  {  	struct regmap *regmap = adc->dfsdm->regmap;  	int ret;  	unsigned int dma_en = 0, cont_en = 0; -	ret = stm32_dfsdm_start_channel(adc->dfsdm, adc->ch_id); +	ret = stm32_dfsdm_start_channel(adc->dfsdm, chan->channel);  	if (ret < 0)  		return ret;  	ret = stm32_dfsdm_filter_configure(adc->dfsdm, adc->fl_id, -					   adc->ch_id); +					   chan->channel);  	if (ret < 0)  		goto stop_channels; @@ -464,12 +465,13 @@ stop_channels:  	regmap_update_bits(regmap, DFSDM_CR1(adc->fl_id),  			   DFSDM_CR1_RCONT_MASK, 0); -	stm32_dfsdm_stop_channel(adc->dfsdm, adc->fl_id); +	stm32_dfsdm_stop_channel(adc->dfsdm, chan->channel);  	return ret;  } -static void stm32_dfsdm_stop_conv(struct stm32_dfsdm_adc *adc) +static void stm32_dfsdm_stop_conv(struct stm32_dfsdm_adc *adc, +				  const struct iio_chan_spec *chan)  {  	struct regmap *regmap = adc->dfsdm->regmap; @@ -482,7 +484,7 @@ static void stm32_dfsdm_stop_conv(struct stm32_dfsdm_adc *adc)  	regmap_update_bits(regmap, DFSDM_CR1(adc->fl_id),  			   DFSDM_CR1_RCONT_MASK, 0); -	stm32_dfsdm_stop_channel(adc->dfsdm, adc->ch_id); +	stm32_dfsdm_stop_channel(adc->dfsdm, chan->channel);  }  static int stm32_dfsdm_set_watermark(struct iio_dev *indio_dev, @@ -609,6 +611,7 @@ static int stm32_dfsdm_adc_dma_start(struct iio_dev *indio_dev)  static int stm32_dfsdm_postenable(struct iio_dev *indio_dev)  {  	struct stm32_dfsdm_adc *adc = iio_priv(indio_dev); +	const struct iio_chan_spec *chan = &indio_dev->channels[0];  	int ret;  	/* Reset adc buffer index */ @@ -618,7 +621,7 @@ static int stm32_dfsdm_postenable(struct iio_dev *indio_dev)  	if (ret < 0)  		return ret; -	ret = stm32_dfsdm_start_conv(adc, true); +	ret = stm32_dfsdm_start_conv(adc, chan, true);  	if (ret) {  		dev_err(&indio_dev->dev, "Can't start conversion\n");  		goto stop_dfsdm; @@ -635,7 +638,7 @@ static int stm32_dfsdm_postenable(struct iio_dev *indio_dev)  	return 0;  err_stop_conv: -	stm32_dfsdm_stop_conv(adc); +	stm32_dfsdm_stop_conv(adc, chan);  stop_dfsdm:  	stm32_dfsdm_stop_dfsdm(adc->dfsdm); @@ -645,11 +648,12 @@ stop_dfsdm:  static int stm32_dfsdm_predisable(struct iio_dev *indio_dev)  {  	struct stm32_dfsdm_adc *adc = iio_priv(indio_dev); +	const struct iio_chan_spec *chan = &indio_dev->channels[0];  	if (adc->dma_chan)  		dmaengine_terminate_all(adc->dma_chan); -	stm32_dfsdm_stop_conv(adc); +	stm32_dfsdm_stop_conv(adc, chan);  	stm32_dfsdm_stop_dfsdm(adc->dfsdm); @@ -730,7 +734,7 @@ static int stm32_dfsdm_single_conv(struct iio_dev *indio_dev,  	if (ret < 0)  		goto stop_dfsdm; -	ret = stm32_dfsdm_start_conv(adc, false); +	ret = stm32_dfsdm_start_conv(adc, chan, false);  	if (ret < 0) {  		regmap_update_bits(adc->dfsdm->regmap, DFSDM_CR2(adc->fl_id),  				   DFSDM_CR2_REOCIE_MASK, DFSDM_CR2_REOCIE(0)); @@ -751,7 +755,7 @@ static int stm32_dfsdm_single_conv(struct iio_dev *indio_dev,  	else  		ret = IIO_VAL_INT; -	stm32_dfsdm_stop_conv(adc); +	stm32_dfsdm_stop_conv(adc, chan);  stop_dfsdm:  	stm32_dfsdm_stop_dfsdm(adc->dfsdm); @@ -765,7 +769,7 @@ static int stm32_dfsdm_write_raw(struct iio_dev *indio_dev,  {  	struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);  	struct stm32_dfsdm_filter *fl = &adc->dfsdm->fl_list[adc->fl_id]; -	struct stm32_dfsdm_channel *ch = &adc->dfsdm->ch_list[adc->ch_id]; +	struct stm32_dfsdm_channel *ch = &adc->dfsdm->ch_list[chan->channel];  	unsigned int spi_freq = adc->spi_freq;  	int ret = -EINVAL; @@ -972,7 +976,6 @@ static int stm32_dfsdm_adc_chan_init_one(struct iio_dev *indio_dev,  	}  	ch->scan_type.realbits = 24;  	ch->scan_type.storagebits = 32; -	adc->ch_id = ch->channel;  	return stm32_dfsdm_chan_configure(adc->dfsdm,  					  &adc->dfsdm->ch_list[ch->channel]); @@ -1001,7 +1004,7 @@ static int stm32_dfsdm_audio_init(struct iio_dev *indio_dev)  	}  	ch->info_mask_separate = BIT(IIO_CHAN_INFO_SAMP_FREQ); -	d_ch = &adc->dfsdm->ch_list[adc->ch_id]; +	d_ch = &adc->dfsdm->ch_list[ch->channel];  	if (d_ch->src != DFSDM_CHANNEL_SPI_CLOCK_EXTERNAL)  		adc->spi_freq = adc->dfsdm->spi_master_freq; @@ -1042,8 +1045,8 @@ static int stm32_dfsdm_adc_init(struct iio_dev *indio_dev)  		return -ENOMEM;  	for (chan_idx = 0; chan_idx < num_ch; chan_idx++) { -		ch->scan_index = chan_idx; -		ret = stm32_dfsdm_adc_chan_init_one(indio_dev, ch); +		ch[chan_idx].scan_index = chan_idx; +		ret = stm32_dfsdm_adc_chan_init_one(indio_dev, &ch[chan_idx]);  		if (ret < 0) {  			dev_err(&indio_dev->dev, "Channels init failed\n");  			return ret; diff --git a/drivers/iio/adc/stm32-dfsdm-core.c b/drivers/iio/adc/stm32-dfsdm-core.c index 6290332cfd3f..e50efdcc41ff 100644 --- a/drivers/iio/adc/stm32-dfsdm-core.c +++ b/drivers/iio/adc/stm32-dfsdm-core.c @@ -83,7 +83,7 @@ int stm32_dfsdm_start_dfsdm(struct stm32_dfsdm *dfsdm)  {  	struct dfsdm_priv *priv = container_of(dfsdm, struct dfsdm_priv, dfsdm);  	struct device *dev = &priv->pdev->dev; -	unsigned int clk_div = priv->spi_clk_out_div; +	unsigned int clk_div = priv->spi_clk_out_div, clk_src;  	int ret;  	if (atomic_inc_return(&priv->n_active_ch) == 1) { @@ -100,6 +100,14 @@ int stm32_dfsdm_start_dfsdm(struct stm32_dfsdm *dfsdm)  			}  		} +		/* select clock source, e.g. 0 for "dfsdm" or 1 for "audio" */ +		clk_src = priv->aclk ? 1 : 0; +		ret = regmap_update_bits(dfsdm->regmap, DFSDM_CHCFGR1(0), +					 DFSDM_CHCFGR1_CKOUTSRC_MASK, +					 DFSDM_CHCFGR1_CKOUTSRC(clk_src)); +		if (ret < 0) +			goto disable_aclk; +  		/* Output the SPI CLKOUT (if clk_div == 0 clock if OFF) */  		ret = regmap_update_bits(dfsdm->regmap, DFSDM_CHCFGR1(0),  					 DFSDM_CHCFGR1_CKOUTDIV_MASK, @@ -274,7 +282,7 @@ static int stm32_dfsdm_probe(struct platform_device *pdev)  	dfsdm->regmap = devm_regmap_init_mmio_clk(&pdev->dev, "dfsdm",  						  dfsdm->base, -						  &stm32h7_dfsdm_regmap_cfg); +						  dev_data->regmap_cfg);  	if (IS_ERR(dfsdm->regmap)) {  		ret = PTR_ERR(dfsdm->regmap);  		dev_err(&pdev->dev, "%s: Failed to allocate regmap: %d\n", diff --git a/drivers/iio/chemical/ccs811.c b/drivers/iio/chemical/ccs811.c index fbe2431f5b81..1ea9f5513b02 100644 --- a/drivers/iio/chemical/ccs811.c +++ b/drivers/iio/chemical/ccs811.c @@ -133,6 +133,9 @@ static int ccs811_start_sensor_application(struct i2c_client *client)  	if (ret < 0)  		return ret; +	if ((ret & CCS811_STATUS_FW_MODE_APPLICATION)) +		return 0; +  	if ((ret & CCS811_STATUS_APP_VALID_MASK) !=  	    CCS811_STATUS_APP_VALID_LOADED)  		return -EIO; diff --git a/drivers/iio/imu/adis_trigger.c b/drivers/iio/imu/adis_trigger.c index 0dd5a381be64..457372f36791 100644 --- a/drivers/iio/imu/adis_trigger.c +++ b/drivers/iio/imu/adis_trigger.c @@ -46,6 +46,10 @@ int adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev)  	if (adis->trig == NULL)  		return -ENOMEM; +	adis->trig->dev.parent = &adis->spi->dev; +	adis->trig->ops = &adis_trigger_ops; +	iio_trigger_set_drvdata(adis->trig, adis); +  	ret = request_irq(adis->spi->irq,  			  &iio_trigger_generic_data_rdy_poll,  			  IRQF_TRIGGER_RISING, @@ -54,9 +58,6 @@ int adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev)  	if (ret)  		goto error_free_trig; -	adis->trig->dev.parent = &adis->spi->dev; -	adis->trig->ops = &adis_trigger_ops; -	iio_trigger_set_drvdata(adis->trig, adis);  	ret = iio_trigger_register(adis->trig);  	indio_dev->trig = iio_trigger_get(adis->trig); diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c index 79abf70a126d..cd5bfe39591b 100644 --- a/drivers/iio/industrialio-buffer.c +++ b/drivers/iio/industrialio-buffer.c @@ -175,7 +175,7 @@ __poll_t iio_buffer_poll(struct file *filp,  	struct iio_dev *indio_dev = filp->private_data;  	struct iio_buffer *rb = indio_dev->buffer; -	if (!indio_dev->info) +	if (!indio_dev->info || rb == NULL)  		return 0;  	poll_wait(filp, &rb->pollq, wait); diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c index 349e5c713c03..4ddb6cf7d401 100644 --- a/drivers/iio/pressure/st_pressure_core.c +++ b/drivers/iio/pressure/st_pressure_core.c @@ -640,7 +640,7 @@ int st_press_common_probe(struct iio_dev *indio_dev)  		       press_data->sensor_settings->drdy_irq.int2.addr))  		pdata =	(struct st_sensors_platform_data *)&default_press_pdata; -	err = st_sensors_init_sensor(indio_dev, press_data->dev->platform_data); +	err = st_sensors_init_sensor(indio_dev, pdata);  	if (err < 0)  		goto st_press_power_off; diff --git a/drivers/iio/proximity/Kconfig b/drivers/iio/proximity/Kconfig index fcb1c4ba5e41..f726f9427602 100644 --- a/drivers/iio/proximity/Kconfig +++ b/drivers/iio/proximity/Kconfig @@ -68,6 +68,8 @@ config SX9500  config SRF08  	tristate "Devantech SRF02/SRF08/SRF10 ultrasonic ranger sensor" +	select IIO_BUFFER +	select IIO_TRIGGERED_BUFFER  	depends on I2C  	help  	  Say Y here to build a driver for Devantech SRF02/SRF08/SRF10 diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index a5b4cf030c11..9183d148d644 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c @@ -550,18 +550,13 @@ static int addr_resolve(struct sockaddr *src_in,  		dst_release(dst);  	} -	if (ndev->flags & IFF_LOOPBACK) { -		ret = rdma_translate_ip(dst_in, addr); -		/* -		 * Put the loopback device and get the translated -		 * device instead. -		 */ +	if (ndev) { +		if (ndev->flags & IFF_LOOPBACK) +			ret = rdma_translate_ip(dst_in, addr); +		else +			addr->bound_dev_if = ndev->ifindex;  		dev_put(ndev); -		ndev = dev_get_by_index(addr->net, addr->bound_dev_if); -	} else { -		addr->bound_dev_if = ndev->ifindex;  	} -	dev_put(ndev);  	return ret;  } diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index e66963ca58bd..a5367c5efbe7 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -3069,7 +3069,8 @@ static int cma_port_is_unique(struct rdma_bind_list *bind_list,  			continue;  		/* different dest port -> unique */ -		if (!cma_any_port(cur_daddr) && +		if (!cma_any_port(daddr) && +		    !cma_any_port(cur_daddr) &&  		    (dport != cur_dport))  			continue; @@ -3080,7 +3081,8 @@ static int cma_port_is_unique(struct rdma_bind_list *bind_list,  			continue;  		/* different dst address -> unique */ -		if (!cma_any_addr(cur_daddr) && +		if (!cma_any_addr(daddr) && +		    !cma_any_addr(cur_daddr) &&  		    cma_addr_cmp(daddr, cur_daddr))  			continue; @@ -3378,13 +3380,13 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)  		}  #endif  	} +	daddr = cma_dst_addr(id_priv); +	daddr->sa_family = addr->sa_family; +  	ret = cma_get_port(id_priv);  	if (ret)  		goto err2; -	daddr = cma_dst_addr(id_priv); -	daddr->sa_family = addr->sa_family; -  	return 0;  err2:  	if (id_priv->cma_dev) @@ -4173,6 +4175,9 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,  	struct cma_multicast *mc;  	int ret; +	if (!id->device) +		return -EINVAL; +  	id_priv = container_of(id, struct rdma_id_private, id);  	if (!cma_comp(id_priv, RDMA_CM_ADDR_BOUND) &&  	    !cma_comp(id_priv, RDMA_CM_ADDR_RESOLVED)) diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h index c4560d84dfae..25bb178f6074 100644 --- a/drivers/infiniband/core/core_priv.h +++ b/drivers/infiniband/core/core_priv.h @@ -305,16 +305,21 @@ void nldev_exit(void);  static inline struct ib_qp *_ib_create_qp(struct ib_device *dev,  					  struct ib_pd *pd,  					  struct ib_qp_init_attr *attr, -					  struct ib_udata *udata) +					  struct ib_udata *udata, +					  struct ib_uobject *uobj)  {  	struct ib_qp *qp; +	if (!dev->create_qp) +		return ERR_PTR(-EOPNOTSUPP); +  	qp = dev->create_qp(pd, attr, udata);  	if (IS_ERR(qp))  		return qp;  	qp->device = dev;  	qp->pd = pd; +	qp->uobject = uobj;  	/*  	 * We don't track XRC QPs for now, because they don't have PD  	 * and more importantly they are created internaly by driver, diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c index bc79ca8215d7..af5ad6a56ae4 100644 --- a/drivers/infiniband/core/cq.c +++ b/drivers/infiniband/core/cq.c @@ -17,6 +17,7 @@  /* # of WCs to poll for with a single call to ib_poll_cq */  #define IB_POLL_BATCH			16 +#define IB_POLL_BATCH_DIRECT		8  /* # of WCs to iterate over before yielding */  #define IB_POLL_BUDGET_IRQ		256 @@ -25,18 +26,18 @@  #define IB_POLL_FLAGS \  	(IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS) -static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *poll_wc) +static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *wcs, +			   int batch)  {  	int i, n, completed = 0; -	struct ib_wc *wcs = poll_wc ? : cq->wc;  	/*  	 * budget might be (-1) if the caller does not  	 * want to bound this call, thus we need unsigned  	 * minimum here.  	 */ -	while ((n = ib_poll_cq(cq, min_t(u32, IB_POLL_BATCH, -			budget - completed), wcs)) > 0) { +	while ((n = ib_poll_cq(cq, min_t(u32, batch, +					 budget - completed), wcs)) > 0) {  		for (i = 0; i < n; i++) {  			struct ib_wc *wc = &wcs[i]; @@ -48,8 +49,7 @@ static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *poll_wc)  		completed += n; -		if (n != IB_POLL_BATCH || -		    (budget != -1 && completed >= budget)) +		if (n != batch || (budget != -1 && completed >= budget))  			break;  	} @@ -72,9 +72,9 @@ static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *poll_wc)   */  int ib_process_cq_direct(struct ib_cq *cq, int budget)  { -	struct ib_wc wcs[IB_POLL_BATCH]; +	struct ib_wc wcs[IB_POLL_BATCH_DIRECT]; -	return __ib_process_cq(cq, budget, wcs); +	return __ib_process_cq(cq, budget, wcs, IB_POLL_BATCH_DIRECT);  }  EXPORT_SYMBOL(ib_process_cq_direct); @@ -88,7 +88,7 @@ static int ib_poll_handler(struct irq_poll *iop, int budget)  	struct ib_cq *cq = container_of(iop, struct ib_cq, iop);  	int completed; -	completed = __ib_process_cq(cq, budget, NULL); +	completed = __ib_process_cq(cq, budget, cq->wc, IB_POLL_BATCH);  	if (completed < budget) {  		irq_poll_complete(&cq->iop);  		if (ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0) @@ -108,7 +108,8 @@ static void ib_cq_poll_work(struct work_struct *work)  	struct ib_cq *cq = container_of(work, struct ib_cq, work);  	int completed; -	completed = __ib_process_cq(cq, IB_POLL_BUDGET_WORKQUEUE, NULL); +	completed = __ib_process_cq(cq, IB_POLL_BUDGET_WORKQUEUE, cq->wc, +				    IB_POLL_BATCH);  	if (completed >= IB_POLL_BUDGET_WORKQUEUE ||  	    ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0)  		queue_work(ib_comp_wq, &cq->work); diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index e8010e73a1cf..bb065c9449be 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -536,14 +536,14 @@ int ib_register_device(struct ib_device *device,  	ret = device->query_device(device, &device->attrs, &uhw);  	if (ret) {  		pr_warn("Couldn't query the device attributes\n"); -		goto cache_cleanup; +		goto cg_cleanup;  	}  	ret = ib_device_register_sysfs(device, port_callback);  	if (ret) {  		pr_warn("Couldn't register device %s with driver model\n",  			device->name); -		goto cache_cleanup; +		goto cg_cleanup;  	}  	device->reg_state = IB_DEV_REGISTERED; @@ -559,6 +559,8 @@ int ib_register_device(struct ib_device *device,  	mutex_unlock(&device_mutex);  	return 0; +cg_cleanup: +	ib_device_unregister_rdmacg(device);  cache_cleanup:  	ib_cache_cleanup_one(device);  	ib_cache_release_one(device); diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c index 85b5ee4defa4..d8eead5d106d 100644 --- a/drivers/infiniband/core/rdma_core.c +++ b/drivers/infiniband/core/rdma_core.c @@ -141,7 +141,12 @@ static struct ib_uobject *alloc_uobj(struct ib_ucontext *context,  	 */  	uobj->context = context;  	uobj->type = type; -	atomic_set(&uobj->usecnt, 0); +	/* +	 * Allocated objects start out as write locked to deny any other +	 * syscalls from accessing them until they are committed. See +	 * rdma_alloc_commit_uobject +	 */ +	atomic_set(&uobj->usecnt, -1);  	kref_init(&uobj->ref);  	return uobj; @@ -196,7 +201,15 @@ static struct ib_uobject *lookup_get_idr_uobject(const struct uverbs_obj_type *t  		goto free;  	} -	uverbs_uobject_get(uobj); +	/* +	 * The idr_find is guaranteed to return a pointer to something that +	 * isn't freed yet, or NULL, as the free after idr_remove goes through +	 * kfree_rcu(). However the object may still have been released and +	 * kfree() could be called at any time. +	 */ +	if (!kref_get_unless_zero(&uobj->ref)) +		uobj = ERR_PTR(-ENOENT); +  free:  	rcu_read_unlock();  	return uobj; @@ -399,13 +412,13 @@ static int __must_check remove_commit_fd_uobject(struct ib_uobject *uobj,  	return ret;  } -static void lockdep_check(struct ib_uobject *uobj, bool exclusive) +static void assert_uverbs_usecnt(struct ib_uobject *uobj, bool exclusive)  {  #ifdef CONFIG_LOCKDEP  	if (exclusive) -		WARN_ON(atomic_read(&uobj->usecnt) > 0); +		WARN_ON(atomic_read(&uobj->usecnt) != -1);  	else -		WARN_ON(atomic_read(&uobj->usecnt) == -1); +		WARN_ON(atomic_read(&uobj->usecnt) <= 0);  #endif  } @@ -444,7 +457,7 @@ int __must_check rdma_remove_commit_uobject(struct ib_uobject *uobj)  		WARN(true, "ib_uverbs: Cleanup is running while removing an uobject\n");  		return 0;  	} -	lockdep_check(uobj, true); +	assert_uverbs_usecnt(uobj, true);  	ret = _rdma_remove_commit_uobject(uobj, RDMA_REMOVE_DESTROY);  	up_read(&ucontext->cleanup_rwsem); @@ -474,16 +487,17 @@ int rdma_explicit_destroy(struct ib_uobject *uobject)  		WARN(true, "ib_uverbs: Cleanup is running while removing an uobject\n");  		return 0;  	} -	lockdep_check(uobject, true); +	assert_uverbs_usecnt(uobject, true);  	ret = uobject->type->type_class->remove_commit(uobject,  						       RDMA_REMOVE_DESTROY);  	if (ret) -		return ret; +		goto out;  	uobject->type = &null_obj_type; +out:  	up_read(&ucontext->cleanup_rwsem); -	return 0; +	return ret;  }  static void alloc_commit_idr_uobject(struct ib_uobject *uobj) @@ -527,6 +541,10 @@ int rdma_alloc_commit_uobject(struct ib_uobject *uobj)  		return ret;  	} +	/* matches atomic_set(-1) in alloc_uobj */ +	assert_uverbs_usecnt(uobj, true); +	atomic_set(&uobj->usecnt, 0); +  	uobj->type->type_class->alloc_commit(uobj);  	up_read(&uobj->context->cleanup_rwsem); @@ -561,7 +579,7 @@ static void lookup_put_fd_uobject(struct ib_uobject *uobj, bool exclusive)  void rdma_lookup_put_uobject(struct ib_uobject *uobj, bool exclusive)  { -	lockdep_check(uobj, exclusive); +	assert_uverbs_usecnt(uobj, exclusive);  	uobj->type->type_class->lookup_put(uobj, exclusive);  	/*  	 * In order to unlock an object, either decrease its usecnt for diff --git a/drivers/infiniband/core/restrack.c b/drivers/infiniband/core/restrack.c index 857637bf46da..3dbc4e4cca41 100644 --- a/drivers/infiniband/core/restrack.c +++ b/drivers/infiniband/core/restrack.c @@ -7,7 +7,6 @@  #include <rdma/restrack.h>  #include <linux/mutex.h>  #include <linux/sched/task.h> -#include <linux/uaccess.h>  #include <linux/pid_namespace.h>  void rdma_restrack_init(struct rdma_restrack_root *res) @@ -63,7 +62,6 @@ static struct ib_device *res_to_dev(struct rdma_restrack_entry *res)  {  	enum rdma_restrack_type type = res->type;  	struct ib_device *dev; -	struct ib_xrcd *xrcd;  	struct ib_pd *pd;  	struct ib_cq *cq;  	struct ib_qp *qp; @@ -81,10 +79,6 @@ static struct ib_device *res_to_dev(struct rdma_restrack_entry *res)  		qp = container_of(res, struct ib_qp, res);  		dev = qp->device;  		break; -	case RDMA_RESTRACK_XRCD: -		xrcd = container_of(res, struct ib_xrcd, res); -		dev = xrcd->device; -		break;  	default:  		WARN_ONCE(true, "Wrong resource tracking type %u\n", type);  		return NULL; @@ -93,6 +87,21 @@ static struct ib_device *res_to_dev(struct rdma_restrack_entry *res)  	return dev;  } +static bool res_is_user(struct rdma_restrack_entry *res) +{ +	switch (res->type) { +	case RDMA_RESTRACK_PD: +		return container_of(res, struct ib_pd, res)->uobject; +	case RDMA_RESTRACK_CQ: +		return container_of(res, struct ib_cq, res)->uobject; +	case RDMA_RESTRACK_QP: +		return container_of(res, struct ib_qp, res)->uobject; +	default: +		WARN_ONCE(true, "Wrong resource tracking type %u\n", res->type); +		return false; +	} +} +  void rdma_restrack_add(struct rdma_restrack_entry *res)  {  	struct ib_device *dev = res_to_dev(res); @@ -100,7 +109,7 @@ void rdma_restrack_add(struct rdma_restrack_entry *res)  	if (!dev)  		return; -	if (!uaccess_kernel()) { +	if (res_is_user(res)) {  		get_task_struct(current);  		res->task = current;  		res->kern_name = NULL; diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index 8cf15d4a8ac4..9f029a1ca5ea 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c @@ -1291,10 +1291,9 @@ int ib_init_ah_attr_from_path(struct ib_device *device, u8 port_num,  		resolved_dev = dev_get_by_index(dev_addr.net,  						dev_addr.bound_dev_if); -		if (resolved_dev->flags & IFF_LOOPBACK) { -			dev_put(resolved_dev); -			resolved_dev = idev; -			dev_hold(resolved_dev); +		if (!resolved_dev) { +			dev_put(idev); +			return -ENODEV;  		}  		ndev = ib_get_ndev_from_path(rec);  		rcu_read_lock(); diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index f015f1bf88c9..e5a1e7d81326 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c @@ -132,7 +132,7 @@ static inline struct ucma_context *_ucma_find_context(int id,  	ctx = idr_find(&ctx_idr, id);  	if (!ctx)  		ctx = ERR_PTR(-ENOENT); -	else if (ctx->file != file) +	else if (ctx->file != file || !ctx->cm_id)  		ctx = ERR_PTR(-EINVAL);  	return ctx;  } @@ -456,6 +456,7 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,  	struct rdma_ucm_create_id cmd;  	struct rdma_ucm_create_id_resp resp;  	struct ucma_context *ctx; +	struct rdma_cm_id *cm_id;  	enum ib_qp_type qp_type;  	int ret; @@ -476,10 +477,10 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,  		return -ENOMEM;  	ctx->uid = cmd.uid; -	ctx->cm_id = rdma_create_id(current->nsproxy->net_ns, -				    ucma_event_handler, ctx, cmd.ps, qp_type); -	if (IS_ERR(ctx->cm_id)) { -		ret = PTR_ERR(ctx->cm_id); +	cm_id = rdma_create_id(current->nsproxy->net_ns, +			       ucma_event_handler, ctx, cmd.ps, qp_type); +	if (IS_ERR(cm_id)) { +		ret = PTR_ERR(cm_id);  		goto err1;  	} @@ -489,14 +490,19 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,  		ret = -EFAULT;  		goto err2;  	} + +	ctx->cm_id = cm_id;  	return 0;  err2: -	rdma_destroy_id(ctx->cm_id); +	rdma_destroy_id(cm_id);  err1:  	mutex_lock(&mut);  	idr_remove(&ctx_idr, ctx->id);  	mutex_unlock(&mut); +	mutex_lock(&file->mut); +	list_del(&ctx->list); +	mutex_unlock(&file->mut);  	kfree(ctx);  	return ret;  } @@ -664,19 +670,23 @@ static ssize_t ucma_resolve_ip(struct ucma_file *file,  			       int in_len, int out_len)  {  	struct rdma_ucm_resolve_ip cmd; +	struct sockaddr *src, *dst;  	struct ucma_context *ctx;  	int ret;  	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))  		return -EFAULT; +	src = (struct sockaddr *) &cmd.src_addr; +	dst = (struct sockaddr *) &cmd.dst_addr; +	if (!rdma_addr_size(src) || !rdma_addr_size(dst)) +		return -EINVAL; +  	ctx = ucma_get_ctx(file, cmd.id);  	if (IS_ERR(ctx))  		return PTR_ERR(ctx); -	ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr, -				(struct sockaddr *) &cmd.dst_addr, -				cmd.timeout_ms); +	ret = rdma_resolve_addr(ctx->cm_id, src, dst, cmd.timeout_ms);  	ucma_put_ctx(ctx);  	return ret;  } @@ -1149,6 +1159,9 @@ static ssize_t ucma_init_qp_attr(struct ucma_file *file,  	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))  		return -EFAULT; +	if (cmd.qp_state > IB_QPS_ERR) +		return -EINVAL; +  	ctx = ucma_get_ctx(file, cmd.id);  	if (IS_ERR(ctx))  		return PTR_ERR(ctx); @@ -1294,6 +1307,9 @@ static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf,  	if (IS_ERR(ctx))  		return PTR_ERR(ctx); +	if (unlikely(cmd.optval > KMALLOC_MAX_SIZE)) +		return -EINVAL; +  	optval = memdup_user((void __user *) (unsigned long) cmd.optval,  			     cmd.optlen);  	if (IS_ERR(optval)) { @@ -1343,7 +1359,7 @@ static ssize_t ucma_process_join(struct ucma_file *file,  		return -ENOSPC;  	addr = (struct sockaddr *) &cmd->addr; -	if (!cmd->addr_size || (cmd->addr_size != rdma_addr_size(addr))) +	if (cmd->addr_size != rdma_addr_size(addr))  		return -EINVAL;  	if (cmd->join_flags == RDMA_MC_JOIN_FLAG_FULLMEMBER) @@ -1411,6 +1427,9 @@ static ssize_t ucma_join_ip_multicast(struct ucma_file *file,  	join_cmd.uid = cmd.uid;  	join_cmd.id = cmd.id;  	join_cmd.addr_size = rdma_addr_size((struct sockaddr *) &cmd.addr); +	if (!join_cmd.addr_size) +		return -EINVAL; +  	join_cmd.join_flags = RDMA_MC_JOIN_FLAG_FULLMEMBER;  	memcpy(&join_cmd.addr, &cmd.addr, join_cmd.addr_size); @@ -1426,6 +1445,9 @@ static ssize_t ucma_join_multicast(struct ucma_file *file,  	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))  		return -EFAULT; +	if (!rdma_addr_size((struct sockaddr *)&cmd.addr)) +		return -EINVAL; +  	return ucma_process_join(file, &cmd, out_len);  } diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 256934d1f64f..a148de35df8d 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -562,9 +562,10 @@ ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file,  	if (f.file)  		fdput(f); +	mutex_unlock(&file->device->xrcd_tree_mutex); +  	uobj_alloc_commit(&obj->uobject); -	mutex_unlock(&file->device->xrcd_tree_mutex);  	return in_len;  err_copy: @@ -603,10 +604,8 @@ ssize_t ib_uverbs_close_xrcd(struct ib_uverbs_file *file,  	uobj  = uobj_get_write(uobj_get_type(xrcd), cmd.xrcd_handle,  			       file->ucontext); -	if (IS_ERR(uobj)) { -		mutex_unlock(&file->device->xrcd_tree_mutex); +	if (IS_ERR(uobj))  		return PTR_ERR(uobj); -	}  	ret = uobj_remove_commit(uobj);  	return ret ?: in_len; @@ -979,6 +978,9 @@ static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file,  	struct ib_uverbs_ex_create_cq_resp resp;  	struct ib_cq_init_attr attr = {}; +	if (!ib_dev->create_cq) +		return ERR_PTR(-EOPNOTSUPP); +  	if (cmd->comp_vector >= file->device->num_comp_vectors)  		return ERR_PTR(-EINVAL); @@ -1030,14 +1032,14 @@ static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file,  	resp.response_length = offsetof(typeof(resp), response_length) +  		sizeof(resp.response_length); +	cq->res.type = RDMA_RESTRACK_CQ; +	rdma_restrack_add(&cq->res); +  	ret = cb(file, obj, &resp, ucore, context);  	if (ret)  		goto err_cb;  	uobj_alloc_commit(&obj->uobject); -	cq->res.type = RDMA_RESTRACK_CQ; -	rdma_restrack_add(&cq->res); -  	return obj;  err_cb: @@ -1518,7 +1520,8 @@ static int create_qp(struct ib_uverbs_file *file,  	if (cmd->qp_type == IB_QPT_XRC_TGT)  		qp = ib_create_qp(pd, &attr);  	else -		qp = _ib_create_qp(device, pd, &attr, uhw); +		qp = _ib_create_qp(device, pd, &attr, uhw, +				   &obj->uevent.uobject);  	if (IS_ERR(qp)) {  		ret = PTR_ERR(qp); @@ -1550,8 +1553,10 @@ static int create_qp(struct ib_uverbs_file *file,  			atomic_inc(&attr.srq->usecnt);  		if (ind_tbl)  			atomic_inc(&ind_tbl->usecnt); +	} else { +		/* It is done in _ib_create_qp for other QP types */ +		qp->uobject = &obj->uevent.uobject;  	} -	qp->uobject = &obj->uevent.uobject;  	obj->uevent.uobject.object = qp; @@ -1971,8 +1976,15 @@ static int modify_qp(struct ib_uverbs_file *file,  		goto release_qp;  	} +	if ((cmd->base.attr_mask & IB_QP_AV) && +	    !rdma_is_port_valid(qp->device, cmd->base.dest.port_num)) { +		ret = -EINVAL; +		goto release_qp; +	} +  	if ((cmd->base.attr_mask & IB_QP_ALT_PATH) && -	    !rdma_is_port_valid(qp->device, cmd->base.alt_port_num)) { +	    (!rdma_is_port_valid(qp->device, cmd->base.alt_port_num) || +	    !rdma_is_port_valid(qp->device, cmd->base.alt_dest.port_num))) {  		ret = -EINVAL;  		goto release_qp;  	} @@ -2941,6 +2953,11 @@ int ib_uverbs_ex_create_wq(struct ib_uverbs_file *file,  		wq_init_attr.create_flags = cmd.create_flags;  	obj->uevent.events_reported = 0;  	INIT_LIST_HEAD(&obj->uevent.event_list); + +	if (!pd->device->create_wq) { +		err = -EOPNOTSUPP; +		goto err_put_cq; +	}  	wq = pd->device->create_wq(pd, &wq_init_attr, uhw);  	if (IS_ERR(wq)) {  		err = PTR_ERR(wq); @@ -3084,7 +3101,12 @@ int ib_uverbs_ex_modify_wq(struct ib_uverbs_file *file,  		wq_attr.flags = cmd.flags;  		wq_attr.flags_mask = cmd.flags_mask;  	} +	if (!wq->device->modify_wq) { +		ret = -EOPNOTSUPP; +		goto out; +	}  	ret = wq->device->modify_wq(wq, &wq_attr, cmd.attr_mask, uhw); +out:  	uobj_put_obj_read(wq);  	return ret;  } @@ -3181,6 +3203,11 @@ int ib_uverbs_ex_create_rwq_ind_table(struct ib_uverbs_file *file,  	init_attr.log_ind_tbl_size = cmd.log_ind_tbl_size;  	init_attr.ind_tbl = wqs; + +	if (!ib_dev->create_rwq_ind_table) { +		err = -EOPNOTSUPP; +		goto err_uobj; +	}  	rwq_ind_tbl = ib_dev->create_rwq_ind_table(ib_dev, &init_attr, uhw);  	if (IS_ERR(rwq_ind_tbl)) { @@ -3770,6 +3797,9 @@ int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,  	struct ib_device_attr attr = {0};  	int err; +	if (!ib_dev->query_device) +		return -EOPNOTSUPP; +  	if (ucore->inlen < sizeof(cmd))  		return -EINVAL; diff --git a/drivers/infiniband/core/uverbs_ioctl.c b/drivers/infiniband/core/uverbs_ioctl.c index d96dc1d17be1..339b85145044 100644 --- a/drivers/infiniband/core/uverbs_ioctl.c +++ b/drivers/infiniband/core/uverbs_ioctl.c @@ -59,6 +59,9 @@ static int uverbs_process_attr(struct ib_device *ibdev,  			return 0;  	} +	if (test_bit(attr_id, attr_bundle_h->valid_bitmap)) +		return -EINVAL; +  	spec = &attr_spec_bucket->attrs[attr_id];  	e = &elements[attr_id];  	e->uattr = uattr_ptr; diff --git a/drivers/infiniband/core/uverbs_ioctl_merge.c b/drivers/infiniband/core/uverbs_ioctl_merge.c index 062485f9300d..62e1eb1d2a28 100644 --- a/drivers/infiniband/core/uverbs_ioctl_merge.c +++ b/drivers/infiniband/core/uverbs_ioctl_merge.c @@ -114,6 +114,7 @@ static size_t get_elements_above_id(const void **iters,  	short min = SHRT_MAX;  	const void *elem;  	int i, j, last_stored = -1; +	unsigned int equal_min = 0;  	for_each_element(elem, i, j, elements, num_elements, num_offset,  			 data_offset) { @@ -136,6 +137,10 @@ static size_t get_elements_above_id(const void **iters,  		 */  		iters[last_stored == i ? num_iters - 1 : num_iters++] = elem;  		last_stored = i; +		if (min == GET_ID(id)) +			equal_min++; +		else +			equal_min = 1;  		min = GET_ID(id);  	} @@ -146,15 +151,10 @@ static size_t get_elements_above_id(const void **iters,  	 * Therefore, we need to clean the beginning of the array to make sure  	 * all ids of final elements are equal to min.  	 */ -	for (i = num_iters - 1; i >= 0 && -	     GET_ID(*(u16 *)(iters[i] + id_offset)) == min; i--) -		; - -	num_iters -= i + 1; -	memmove(iters, iters + i + 1, sizeof(*iters) * num_iters); +	memmove(iters, iters + num_iters - equal_min, sizeof(*iters) * equal_min);  	*min_id = min; -	return num_iters; +	return equal_min;  }  #define find_max_element_entry_id(num_elements, elements, num_objects_fld, \ @@ -322,7 +322,7 @@ static struct uverbs_method_spec *build_method_with_attrs(const struct uverbs_me  		hash = kzalloc(sizeof(*hash) +  			       ALIGN(sizeof(*hash->attrs) * (attr_max_bucket + 1),  				     sizeof(long)) + -			       BITS_TO_LONGS(attr_max_bucket) * sizeof(long), +			       BITS_TO_LONGS(attr_max_bucket + 1) * sizeof(long),  			       GFP_KERNEL);  		if (!hash) {  			res = -ENOMEM; @@ -509,7 +509,7 @@ static struct uverbs_object_spec *build_object_with_methods(const struct uverbs_  			 * first handler which != NULL. This also defines the  			 * set of flags used for this handler.  			 */ -			for (i = num_object_defs - 1; +			for (i = num_method_defs - 1;  			     i >= 0 && !method_defs[i]->handler; i--)  				;  			hash->methods[min_id++] = method; diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index 395a3b091229..b1ca223aa380 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c @@ -650,12 +650,21 @@ static int verify_command_mask(struct ib_device *ib_dev, __u32 command)  	return -1;  } +static bool verify_command_idx(u32 command, bool extended) +{ +	if (extended) +		return command < ARRAY_SIZE(uverbs_ex_cmd_table); + +	return command < ARRAY_SIZE(uverbs_cmd_table); +} +  static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,  			     size_t count, loff_t *pos)  {  	struct ib_uverbs_file *file = filp->private_data;  	struct ib_device *ib_dev;  	struct ib_uverbs_cmd_hdr hdr; +	bool extended_command;  	__u32 command;  	__u32 flags;  	int srcu_key; @@ -688,6 +697,15 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,  	}  	command = hdr.command & IB_USER_VERBS_CMD_COMMAND_MASK; +	flags = (hdr.command & +		 IB_USER_VERBS_CMD_FLAGS_MASK) >> IB_USER_VERBS_CMD_FLAGS_SHIFT; + +	extended_command = flags & IB_USER_VERBS_CMD_FLAG_EXTENDED; +	if (!verify_command_idx(command, extended_command)) { +		ret = -EINVAL; +		goto out; +	} +  	if (verify_command_mask(ib_dev, command)) {  		ret = -EOPNOTSUPP;  		goto out; @@ -699,12 +717,8 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,  		goto out;  	} -	flags = (hdr.command & -		 IB_USER_VERBS_CMD_FLAGS_MASK) >> IB_USER_VERBS_CMD_FLAGS_SHIFT; -  	if (!flags) { -		if (command >= ARRAY_SIZE(uverbs_cmd_table) || -		    !uverbs_cmd_table[command]) { +		if (!uverbs_cmd_table[command]) {  			ret = -EINVAL;  			goto out;  		} @@ -725,8 +739,7 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,  		struct ib_udata uhw;  		size_t written_count = count; -		if (command >= ARRAY_SIZE(uverbs_ex_cmd_table) || -		    !uverbs_ex_cmd_table[command]) { +		if (!uverbs_ex_cmd_table[command]) {  			ret = -ENOSYS;  			goto out;  		} @@ -942,6 +955,7 @@ static const struct file_operations uverbs_fops = {  	.llseek	 = no_llseek,  #if IS_ENABLED(CONFIG_INFINIBAND_EXP_USER_ACCESS)  	.unlocked_ioctl = ib_uverbs_ioctl, +	.compat_ioctl = ib_uverbs_ioctl,  #endif  }; @@ -954,6 +968,7 @@ static const struct file_operations uverbs_mmap_fops = {  	.llseek	 = no_llseek,  #if IS_ENABLED(CONFIG_INFINIBAND_EXP_USER_ACCESS)  	.unlocked_ioctl = ib_uverbs_ioctl, +	.compat_ioctl = ib_uverbs_ioctl,  #endif  }; diff --git a/drivers/infiniband/core/uverbs_std_types.c b/drivers/infiniband/core/uverbs_std_types.c index cab0ac3556eb..df1360e6774f 100644 --- a/drivers/infiniband/core/uverbs_std_types.c +++ b/drivers/infiniband/core/uverbs_std_types.c @@ -234,15 +234,18 @@ static void create_udata(struct uverbs_attr_bundle *ctx,  		uverbs_attr_get(ctx, UVERBS_UHW_OUT);  	if (!IS_ERR(uhw_in)) { -		udata->inbuf = uhw_in->ptr_attr.ptr;  		udata->inlen = uhw_in->ptr_attr.len; +		if (uverbs_attr_ptr_is_inline(uhw_in)) +			udata->inbuf = &uhw_in->uattr->data; +		else +			udata->inbuf = u64_to_user_ptr(uhw_in->ptr_attr.data);  	} else {  		udata->inbuf = NULL;  		udata->inlen = 0;  	}  	if (!IS_ERR(uhw_out)) { -		udata->outbuf = uhw_out->ptr_attr.ptr; +		udata->outbuf = u64_to_user_ptr(uhw_out->ptr_attr.data);  		udata->outlen = uhw_out->ptr_attr.len;  	} else {  		udata->outbuf = NULL; @@ -323,7 +326,8 @@ static int uverbs_create_cq_handler(struct ib_device *ib_dev,  	cq->res.type = RDMA_RESTRACK_CQ;  	rdma_restrack_add(&cq->res); -	ret = uverbs_copy_to(attrs, CREATE_CQ_RESP_CQE, &cq->cqe); +	ret = uverbs_copy_to(attrs, CREATE_CQ_RESP_CQE, &cq->cqe, +			     sizeof(cq->cqe));  	if (ret)  		goto err_cq; @@ -375,7 +379,7 @@ static int uverbs_destroy_cq_handler(struct ib_device *ib_dev,  	resp.comp_events_reported  = obj->comp_events_reported;  	resp.async_events_reported = obj->async_events_reported; -	return uverbs_copy_to(attrs, DESTROY_CQ_RESP, &resp); +	return uverbs_copy_to(attrs, DESTROY_CQ_RESP, &resp, sizeof(resp));  }  static DECLARE_UVERBS_METHOD( diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 16ebc6372c31..93025d2009b8 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -887,7 +887,7 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,  	if (qp_init_attr->cap.max_rdma_ctxs)  		rdma_rw_init_qp(device, qp_init_attr); -	qp = _ib_create_qp(device, pd, qp_init_attr, NULL); +	qp = _ib_create_qp(device, pd, qp_init_attr, NULL, NULL);  	if (IS_ERR(qp))  		return qp; @@ -898,7 +898,6 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,  	}  	qp->real_qp    = qp; -	qp->uobject    = NULL;  	qp->qp_type    = qp_init_attr->qp_type;  	qp->rwq_ind_tbl = qp_init_attr->rwq_ind_tbl; diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h index ca32057e886f..96f76896488d 100644 --- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h +++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h @@ -57,8 +57,8 @@  #define BNXT_RE_PAGE_SIZE_8M		BIT(BNXT_RE_PAGE_SHIFT_8M)  #define BNXT_RE_PAGE_SIZE_1G		BIT(BNXT_RE_PAGE_SHIFT_1G) -#define BNXT_RE_MAX_MR_SIZE_LOW		BIT(BNXT_RE_PAGE_SHIFT_1G) -#define BNXT_RE_MAX_MR_SIZE_HIGH	BIT(39) +#define BNXT_RE_MAX_MR_SIZE_LOW		BIT_ULL(BNXT_RE_PAGE_SHIFT_1G) +#define BNXT_RE_MAX_MR_SIZE_HIGH	BIT_ULL(39)  #define BNXT_RE_MAX_MR_SIZE		BNXT_RE_MAX_MR_SIZE_HIGH  #define BNXT_RE_MAX_QPC_COUNT		(64 * 1024) @@ -120,7 +120,6 @@ struct bnxt_re_dev {  #define BNXT_RE_FLAG_HAVE_L2_REF		3  #define BNXT_RE_FLAG_RCFW_CHANNEL_EN		4  #define BNXT_RE_FLAG_QOS_WORK_REG		5 -#define BNXT_RE_FLAG_TASK_IN_PROG		6  #define BNXT_RE_FLAG_ISSUE_ROCE_STATS          29  	struct net_device		*netdev;  	unsigned int			version, major, minor; @@ -158,6 +157,7 @@ struct bnxt_re_dev {  	atomic_t			srq_count;  	atomic_t			mr_count;  	atomic_t			mw_count; +	atomic_t			sched_count;  	/* Max of 2 lossless traffic class supported per port */  	u16				cosq[2]; diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index ae9e9ff54826..8301d7e5fa8c 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -174,10 +174,8 @@ int bnxt_re_query_device(struct ib_device *ibdev,  	ib_attr->max_pd = dev_attr->max_pd;  	ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom;  	ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom; -	if (dev_attr->is_atomic) { -		ib_attr->atomic_cap = IB_ATOMIC_HCA; -		ib_attr->masked_atomic_cap = IB_ATOMIC_HCA; -	} +	ib_attr->atomic_cap = IB_ATOMIC_NONE; +	ib_attr->masked_atomic_cap = IB_ATOMIC_NONE;  	ib_attr->max_ee_rd_atom = 0;  	ib_attr->max_res_rd_atom = 0; @@ -787,20 +785,51 @@ int bnxt_re_query_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)  	return 0;  } +unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp) +	__acquires(&qp->scq->cq_lock) __acquires(&qp->rcq->cq_lock) +{ +	unsigned long flags; + +	spin_lock_irqsave(&qp->scq->cq_lock, flags); +	if (qp->rcq != qp->scq) +		spin_lock(&qp->rcq->cq_lock); +	else +		__acquire(&qp->rcq->cq_lock); + +	return flags; +} + +void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp, +			unsigned long flags) +	__releases(&qp->scq->cq_lock) __releases(&qp->rcq->cq_lock) +{ +	if (qp->rcq != qp->scq) +		spin_unlock(&qp->rcq->cq_lock); +	else +		__release(&qp->rcq->cq_lock); +	spin_unlock_irqrestore(&qp->scq->cq_lock, flags); +} +  /* Queue Pairs */  int bnxt_re_destroy_qp(struct ib_qp *ib_qp)  {  	struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);  	struct bnxt_re_dev *rdev = qp->rdev;  	int rc; +	unsigned int flags;  	bnxt_qplib_flush_cqn_wq(&qp->qplib_qp); -	bnxt_qplib_del_flush_qp(&qp->qplib_qp);  	rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);  	if (rc) {  		dev_err(rdev_to_dev(rdev), "Failed to destroy HW QP");  		return rc;  	} + +	flags = bnxt_re_lock_cqs(qp); +	bnxt_qplib_clean_qp(&qp->qplib_qp); +	bnxt_re_unlock_cqs(qp, flags); +	bnxt_qplib_free_qp_res(&rdev->qplib_res, &qp->qplib_qp); +  	if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp) {  		rc = bnxt_qplib_destroy_ah(&rdev->qplib_res,  					   &rdev->sqp_ah->qplib_ah); @@ -810,7 +839,7 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp)  			return rc;  		} -		bnxt_qplib_del_flush_qp(&qp->qplib_qp); +		bnxt_qplib_clean_qp(&qp->qplib_qp);  		rc = bnxt_qplib_destroy_qp(&rdev->qplib_res,  					   &rdev->qp1_sqp->qplib_qp);  		if (rc) { @@ -1069,6 +1098,7 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,  			goto fail;  		}  		qp->qplib_qp.scq = &cq->qplib_cq; +		qp->scq = cq;  	}  	if (qp_init_attr->recv_cq) { @@ -1080,6 +1110,7 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,  			goto fail;  		}  		qp->qplib_qp.rcq = &cq->qplib_cq; +		qp->rcq = cq;  	}  	if (qp_init_attr->srq) { @@ -1185,7 +1216,7 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,  		rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp);  		if (rc) {  			dev_err(rdev_to_dev(rdev), "Failed to create HW QP"); -			goto fail; +			goto free_umem;  		}  	} @@ -1213,6 +1244,13 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,  	return &qp->ib_qp;  qp_destroy:  	bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp); +free_umem: +	if (udata) { +		if (qp->rumem) +			ib_umem_release(qp->rumem); +		if (qp->sumem) +			ib_umem_release(qp->sumem); +	}  fail:  	kfree(qp);  	return ERR_PTR(rc); @@ -1568,6 +1606,7 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,  	int status;  	union ib_gid sgid;  	struct ib_gid_attr sgid_attr; +	unsigned int flags;  	u8 nw_type;  	qp->qplib_qp.modify_flags = 0; @@ -1596,14 +1635,18 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,  			dev_dbg(rdev_to_dev(rdev),  				"Move QP = %p to flush list\n",  				qp); +			flags = bnxt_re_lock_cqs(qp);  			bnxt_qplib_add_flush_qp(&qp->qplib_qp); +			bnxt_re_unlock_cqs(qp, flags);  		}  		if (!qp->sumem &&  		    qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {  			dev_dbg(rdev_to_dev(rdev),  				"Move QP = %p out of flush list\n",  				qp); -			bnxt_qplib_del_flush_qp(&qp->qplib_qp); +			flags = bnxt_re_lock_cqs(qp); +			bnxt_qplib_clean_qp(&qp->qplib_qp); +			bnxt_re_unlock_cqs(qp, flags);  		}  	}  	if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) { @@ -2189,10 +2232,13 @@ static int bnxt_re_build_inv_wqe(struct ib_send_wr *wr,  	wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV;  	wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey; +	/* Need unconditional fence for local invalidate +	 * opcode to work as expected. +	 */ +	wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; +  	if (wr->send_flags & IB_SEND_SIGNALED)  		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; -	if (wr->send_flags & IB_SEND_FENCE) -		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;  	if (wr->send_flags & IB_SEND_SOLICITED)  		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT; @@ -2213,8 +2259,12 @@ static int bnxt_re_build_reg_wqe(struct ib_reg_wr *wr,  	wqe->frmr.levels = qplib_frpl->hwq.level + 1;  	wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR; -	if (wr->wr.send_flags & IB_SEND_FENCE) -		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; +	/* Need unconditional fence for reg_mr +	 * opcode to function as expected. +	 */ + +	wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; +  	if (wr->wr.send_flags & IB_SEND_SIGNALED)  		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; @@ -3548,7 +3598,7 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,  	int umem_pgs, page_shift, rc;  	if (length > BNXT_RE_MAX_MR_SIZE) { -		dev_err(rdev_to_dev(rdev), "MR Size: %lld > Max supported:%ld\n", +		dev_err(rdev_to_dev(rdev), "MR Size: %lld > Max supported:%lld\n",  			length, BNXT_RE_MAX_MR_SIZE);  		return ERR_PTR(-ENOMEM);  	} diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h index 423ebe012f95..e62b7c2c7da6 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h @@ -89,6 +89,8 @@ struct bnxt_re_qp {  	/* QP1 */  	u32			send_psn;  	struct ib_ud_header	qp1_hdr; +	struct bnxt_re_cq	*scq; +	struct bnxt_re_cq	*rcq;  };  struct bnxt_re_cq { @@ -220,4 +222,7 @@ struct ib_ucontext *bnxt_re_alloc_ucontext(struct ib_device *ibdev,  					   struct ib_udata *udata);  int bnxt_re_dealloc_ucontext(struct ib_ucontext *context);  int bnxt_re_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); + +unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp); +void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp, unsigned long flags);  #endif /* __BNXT_RE_IB_VERBS_H__ */ diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index 508d00a5a106..f6e361750466 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@ -656,7 +656,6 @@ static void bnxt_re_dev_remove(struct bnxt_re_dev *rdev)  	mutex_unlock(&bnxt_re_dev_lock);  	synchronize_rcu(); -	flush_workqueue(bnxt_re_wq);  	ib_dealloc_device(&rdev->ibdev);  	/* rdev is gone */ @@ -731,6 +730,13 @@ static int bnxt_re_handle_qp_async_event(struct creq_qp_event *qp_event,  					 struct bnxt_re_qp *qp)  {  	struct ib_event event; +	unsigned int flags; + +	if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) { +		flags = bnxt_re_lock_cqs(qp); +		bnxt_qplib_add_flush_qp(&qp->qplib_qp); +		bnxt_re_unlock_cqs(qp, flags); +	}  	memset(&event, 0, sizeof(event));  	if (qp->qplib_qp.srq) { @@ -1417,9 +1423,12 @@ static void bnxt_re_task(struct work_struct *work)  	switch (re_work->event) {  	case NETDEV_REGISTER:  		rc = bnxt_re_ib_reg(rdev); -		if (rc) +		if (rc) {  			dev_err(rdev_to_dev(rdev),  				"Failed to register with IB: %#x", rc); +			bnxt_re_remove_one(rdev); +			bnxt_re_dev_unreg(rdev); +		}  		break;  	case NETDEV_UP:  		bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, @@ -1441,7 +1450,7 @@ static void bnxt_re_task(struct work_struct *work)  		break;  	}  	smp_mb__before_atomic(); -	clear_bit(BNXT_RE_FLAG_TASK_IN_PROG, &rdev->flags); +	atomic_dec(&rdev->sched_count);  	kfree(re_work);  } @@ -1503,7 +1512,7 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier,  		/* netdev notifier will call NETDEV_UNREGISTER again later since  		 * we are still holding the reference to the netdev  		 */ -		if (test_bit(BNXT_RE_FLAG_TASK_IN_PROG, &rdev->flags)) +		if (atomic_read(&rdev->sched_count) > 0)  			goto exit;  		bnxt_re_ib_unreg(rdev, false);  		bnxt_re_remove_one(rdev); @@ -1523,7 +1532,7 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier,  			re_work->vlan_dev = (real_dev == netdev ?  					     NULL : netdev);  			INIT_WORK(&re_work->work, bnxt_re_task); -			set_bit(BNXT_RE_FLAG_TASK_IN_PROG, &rdev->flags); +			atomic_inc(&rdev->sched_count);  			queue_work(bnxt_re_wq, &re_work->work);  		}  	} @@ -1578,6 +1587,11 @@ static void __exit bnxt_re_mod_exit(void)  	*/  	list_for_each_entry_safe_reverse(rdev, next, &to_be_deleted, list) {  		dev_info(rdev_to_dev(rdev), "Unregistering Device"); +		/* +		 * Flush out any scheduled tasks before destroying the +		 * resources +		 */ +		flush_workqueue(bnxt_re_wq);  		bnxt_re_dev_stop(rdev);  		bnxt_re_ib_unreg(rdev, true);  		bnxt_re_remove_one(rdev); diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c index 1b0e94697fe3..3a78faba8d91 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c @@ -88,75 +88,35 @@ static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)  	}  } -void bnxt_qplib_acquire_cq_locks(struct bnxt_qplib_qp *qp, -				 unsigned long *flags) -	__acquires(&qp->scq->hwq.lock) __acquires(&qp->rcq->hwq.lock) +static void bnxt_qplib_acquire_cq_flush_locks(struct bnxt_qplib_qp *qp, +				       unsigned long *flags) +	__acquires(&qp->scq->flush_lock) __acquires(&qp->rcq->flush_lock)  { -	spin_lock_irqsave(&qp->scq->hwq.lock, *flags); +	spin_lock_irqsave(&qp->scq->flush_lock, *flags);  	if (qp->scq == qp->rcq) -		__acquire(&qp->rcq->hwq.lock); +		__acquire(&qp->rcq->flush_lock);  	else -		spin_lock(&qp->rcq->hwq.lock); +		spin_lock(&qp->rcq->flush_lock);  } -void bnxt_qplib_release_cq_locks(struct bnxt_qplib_qp *qp, -				 unsigned long *flags) -	__releases(&qp->scq->hwq.lock) __releases(&qp->rcq->hwq.lock) +static void bnxt_qplib_release_cq_flush_locks(struct bnxt_qplib_qp *qp, +				       unsigned long *flags) +	__releases(&qp->scq->flush_lock) __releases(&qp->rcq->flush_lock)  {  	if (qp->scq == qp->rcq) -		__release(&qp->rcq->hwq.lock); +		__release(&qp->rcq->flush_lock);  	else -		spin_unlock(&qp->rcq->hwq.lock); -	spin_unlock_irqrestore(&qp->scq->hwq.lock, *flags); -} - -static struct bnxt_qplib_cq *bnxt_qplib_find_buddy_cq(struct bnxt_qplib_qp *qp, -						      struct bnxt_qplib_cq *cq) -{ -	struct bnxt_qplib_cq *buddy_cq = NULL; - -	if (qp->scq == qp->rcq) -		buddy_cq = NULL; -	else if (qp->scq == cq) -		buddy_cq = qp->rcq; -	else -		buddy_cq = qp->scq; -	return buddy_cq; -} - -static void bnxt_qplib_lock_buddy_cq(struct bnxt_qplib_qp *qp, -				     struct bnxt_qplib_cq *cq) -	__acquires(&buddy_cq->hwq.lock) -{ -	struct bnxt_qplib_cq *buddy_cq = NULL; - -	buddy_cq = bnxt_qplib_find_buddy_cq(qp, cq); -	if (!buddy_cq) -		__acquire(&cq->hwq.lock); -	else -		spin_lock(&buddy_cq->hwq.lock); -} - -static void bnxt_qplib_unlock_buddy_cq(struct bnxt_qplib_qp *qp, -				       struct bnxt_qplib_cq *cq) -	__releases(&buddy_cq->hwq.lock) -{ -	struct bnxt_qplib_cq *buddy_cq = NULL; - -	buddy_cq = bnxt_qplib_find_buddy_cq(qp, cq); -	if (!buddy_cq) -		__release(&cq->hwq.lock); -	else -		spin_unlock(&buddy_cq->hwq.lock); +		spin_unlock(&qp->rcq->flush_lock); +	spin_unlock_irqrestore(&qp->scq->flush_lock, *flags);  }  void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)  {  	unsigned long flags; -	bnxt_qplib_acquire_cq_locks(qp, &flags); +	bnxt_qplib_acquire_cq_flush_locks(qp, &flags);  	__bnxt_qplib_add_flush_qp(qp); -	bnxt_qplib_release_cq_locks(qp, &flags); +	bnxt_qplib_release_cq_flush_locks(qp, &flags);  }  static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp) @@ -173,11 +133,11 @@ static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)  	}  } -void bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp) +void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp)  {  	unsigned long flags; -	bnxt_qplib_acquire_cq_locks(qp, &flags); +	bnxt_qplib_acquire_cq_flush_locks(qp, &flags);  	__clean_cq(qp->scq, (u64)(unsigned long)qp);  	qp->sq.hwq.prod = 0;  	qp->sq.hwq.cons = 0; @@ -186,7 +146,7 @@ void bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)  	qp->rq.hwq.cons = 0;  	__bnxt_qplib_del_flush_qp(qp); -	bnxt_qplib_release_cq_locks(qp, &flags); +	bnxt_qplib_release_cq_flush_locks(qp, &flags);  }  static void bnxt_qpn_cqn_sched_task(struct work_struct *work) @@ -283,7 +243,7 @@ static void bnxt_qplib_service_nq(unsigned long data)  	u32 sw_cons, raw_cons;  	u16 type;  	int budget = nq->budget; -	u64 q_handle; +	uintptr_t q_handle;  	/* Service the NQ until empty */  	raw_cons = hwq->cons; @@ -566,7 +526,7 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,  	/* Configure the request */  	req.dpi = cpu_to_le32(srq->dpi->dpi); -	req.srq_handle = cpu_to_le64(srq); +	req.srq_handle = cpu_to_le64((uintptr_t)srq);  	req.srq_size = cpu_to_le16((u16)srq->hwq.max_elements);  	pbl = &srq->hwq.pbl[PBL_LVL_0]; @@ -1419,7 +1379,6 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,  	struct bnxt_qplib_rcfw *rcfw = res->rcfw;  	struct cmdq_destroy_qp req;  	struct creq_destroy_qp_resp resp; -	unsigned long flags;  	u16 cmd_flags = 0;  	int rc; @@ -1437,19 +1396,12 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,  		return rc;  	} -	/* Must walk the associated CQs to nullified the QP ptr */ -	spin_lock_irqsave(&qp->scq->hwq.lock, flags); - -	__clean_cq(qp->scq, (u64)(unsigned long)qp); - -	if (qp->rcq && qp->rcq != qp->scq) { -		spin_lock(&qp->rcq->hwq.lock); -		__clean_cq(qp->rcq, (u64)(unsigned long)qp); -		spin_unlock(&qp->rcq->hwq.lock); -	} - -	spin_unlock_irqrestore(&qp->scq->hwq.lock, flags); +	return 0; +} +void bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res, +			    struct bnxt_qplib_qp *qp) +{  	bnxt_qplib_free_qp_hdr_buf(res, qp);  	bnxt_qplib_free_hwq(res->pdev, &qp->sq.hwq);  	kfree(qp->sq.swq); @@ -1462,7 +1414,6 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,  	if (qp->orrq.max_elements)  		bnxt_qplib_free_hwq(res->pdev, &qp->orrq); -	return 0;  }  void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp, @@ -2116,9 +2067,6 @@ void bnxt_qplib_mark_qp_error(void *qp_handle)  	/* Must block new posting of SQ and RQ */  	qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;  	bnxt_qplib_cancel_phantom_processing(qp); - -	/* Add qp to flush list of the CQ */ -	__bnxt_qplib_add_flush_qp(qp);  }  /* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive) @@ -2294,9 +2242,9 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,  				sw_sq_cons, cqe->wr_id, cqe->status);  			cqe++;  			(*budget)--; -			bnxt_qplib_lock_buddy_cq(qp, cq);  			bnxt_qplib_mark_qp_error(qp); -			bnxt_qplib_unlock_buddy_cq(qp, cq); +			/* Add qp to flush list of the CQ */ +			bnxt_qplib_add_flush_qp(qp);  		} else {  			if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {  				/* Before we complete, do WA 9060 */ @@ -2412,9 +2360,7 @@ static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,  		if (hwcqe->status != CQ_RES_RC_STATUS_OK) {  			qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;  			/* Add qp to flush list of the CQ */ -			bnxt_qplib_lock_buddy_cq(qp, cq); -			__bnxt_qplib_add_flush_qp(qp); -			bnxt_qplib_unlock_buddy_cq(qp, cq); +			bnxt_qplib_add_flush_qp(qp);  		}  	} @@ -2498,9 +2444,7 @@ static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,  		if (hwcqe->status != CQ_RES_RC_STATUS_OK) {  			qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;  			/* Add qp to flush list of the CQ */ -			bnxt_qplib_lock_buddy_cq(qp, cq); -			__bnxt_qplib_add_flush_qp(qp); -			bnxt_qplib_unlock_buddy_cq(qp, cq); +			bnxt_qplib_add_flush_qp(qp);  		}  	}  done: @@ -2510,11 +2454,9 @@ done:  bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)  {  	struct cq_base *hw_cqe, **hw_cqe_ptr; -	unsigned long flags;  	u32 sw_cons, raw_cons;  	bool rc = true; -	spin_lock_irqsave(&cq->hwq.lock, flags);  	raw_cons = cq->hwq.cons;  	sw_cons = HWQ_CMP(raw_cons, &cq->hwq);  	hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr; @@ -2522,7 +2464,6 @@ bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)  	 /* Check for Valid bit. If the CQE is valid, return false */  	rc = !CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements); -	spin_unlock_irqrestore(&cq->hwq.lock, flags);  	return rc;  } @@ -2611,9 +2552,7 @@ static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,  		if (hwcqe->status != CQ_RES_RC_STATUS_OK) {  			qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;  			/* Add qp to flush list of the CQ */ -			bnxt_qplib_lock_buddy_cq(qp, cq); -			__bnxt_qplib_add_flush_qp(qp); -			bnxt_qplib_unlock_buddy_cq(qp, cq); +			bnxt_qplib_add_flush_qp(qp);  		}  	} @@ -2728,9 +2667,7 @@ do_rq:  	 */  	/* Add qp to flush list of the CQ */ -	bnxt_qplib_lock_buddy_cq(qp, cq); -	__bnxt_qplib_add_flush_qp(qp); -	bnxt_qplib_unlock_buddy_cq(qp, cq); +	bnxt_qplib_add_flush_qp(qp);  done:  	return rc;  } @@ -2759,7 +2696,7 @@ int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,  	u32 budget = num_cqes;  	unsigned long flags; -	spin_lock_irqsave(&cq->hwq.lock, flags); +	spin_lock_irqsave(&cq->flush_lock, flags);  	list_for_each_entry(qp, &cq->sqf_head, sq_flush) {  		dev_dbg(&cq->hwq.pdev->dev,  			"QPLIB: FP: Flushing SQ QP= %p", @@ -2773,7 +2710,7 @@ int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,  			qp);  		__flush_rq(&qp->rq, qp, &cqe, &budget);  	} -	spin_unlock_irqrestore(&cq->hwq.lock, flags); +	spin_unlock_irqrestore(&cq->flush_lock, flags);  	return num_cqes - budget;  } @@ -2782,11 +2719,9 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,  		       int num_cqes, struct bnxt_qplib_qp **lib_qp)  {  	struct cq_base *hw_cqe, **hw_cqe_ptr; -	unsigned long flags;  	u32 sw_cons, raw_cons;  	int budget, rc = 0; -	spin_lock_irqsave(&cq->hwq.lock, flags);  	raw_cons = cq->hwq.cons;  	budget = num_cqes; @@ -2862,20 +2797,15 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,  		bnxt_qplib_arm_cq(cq, DBR_DBR_TYPE_CQ);  	}  exit: -	spin_unlock_irqrestore(&cq->hwq.lock, flags);  	return num_cqes - budget;  }  void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type)  { -	unsigned long flags; - -	spin_lock_irqsave(&cq->hwq.lock, flags);  	if (arm_type)  		bnxt_qplib_arm_cq(cq, arm_type);  	/* Using cq->arm_state variable to track whether to issue cq handler */  	atomic_set(&cq->arm_state, 1); -	spin_unlock_irqrestore(&cq->hwq.lock, flags);  }  void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp) diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h index 211b27a8f9e2..ade9f13c0fd1 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h @@ -389,6 +389,18 @@ struct bnxt_qplib_cq {  	struct list_head		sqf_head, rqf_head;  	atomic_t			arm_state;  	spinlock_t			compl_lock; /* synch CQ handlers */ +/* Locking Notes: + * QP can move to error state from modify_qp, async error event or error + * CQE as part of poll_cq. When QP is moved to error state, it gets added + * to two flush lists, one each for SQ and RQ. + * Each flush list is protected by qplib_cq->flush_lock. Both scq and rcq + * flush_locks should be acquired when QP is moved to error. The control path + * operations(modify_qp and async error events) are synchronized with poll_cq + * using upper level CQ locks (bnxt_re_cq->cq_lock) of both SCQ and RCQ. + * The qplib_cq->flush_lock is required to synchronize two instances of poll_cq + * of the same QP while manipulating the flush list. + */ +	spinlock_t			flush_lock; /* QP flush management */  };  #define BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE	sizeof(struct xrrq_irrq) @@ -478,6 +490,9 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);  int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);  int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);  int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp); +void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp); +void bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res, +			    struct bnxt_qplib_qp *qp);  void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp,  				struct bnxt_qplib_sge *sge);  void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp, @@ -500,7 +515,6 @@ void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type);  void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq);  int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq);  void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp); -void bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp);  void bnxt_qplib_acquire_cq_locks(struct bnxt_qplib_qp *qp,  				 unsigned long *flags);  void bnxt_qplib_release_cq_locks(struct bnxt_qplib_qp *qp, diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c index 8329ec6a7946..80027a494730 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c @@ -305,9 +305,8 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,  			err_event->res_err_state_reason);  		if (!qp)  			break; -		bnxt_qplib_acquire_cq_locks(qp, &flags);  		bnxt_qplib_mark_qp_error(qp); -		bnxt_qplib_release_cq_locks(qp, &flags); +		rcfw->aeq_handler(rcfw, qp_event, qp);  		break;  	default:  		/* Command Response */ @@ -460,7 +459,11 @@ int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,  	int rc;  	RCFW_CMD_PREP(req, INITIALIZE_FW, cmd_flags); - +	/* Supply (log-base-2-of-host-page-size - base-page-shift) +	 * to bono to adjust the doorbell page sizes. +	 */ +	req.log2_dbr_pg_size = cpu_to_le16(PAGE_SHIFT - +					   RCFW_DBR_BASE_PAGE_SHIFT);  	/*  	 * VFs need not setup the HW context area, PF  	 * shall setup this area for VF. Skipping the diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h index 6bee6e3636ea..c7cce2e4185e 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h @@ -49,6 +49,7 @@  #define RCFW_COMM_SIZE			0x104  #define RCFW_DBR_PCI_BAR_REGION		2 +#define RCFW_DBR_BASE_PAGE_SHIFT	12  #define RCFW_CMD_PREP(req, CMD, cmd_flags)				\  	do {								\ diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c index c015c1861351..ee98e5efef84 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c @@ -52,18 +52,6 @@ const struct bnxt_qplib_gid bnxt_qplib_gid_zero = {{ 0, 0, 0, 0, 0, 0, 0, 0,  /* Device */ -static bool bnxt_qplib_is_atomic_cap(struct bnxt_qplib_rcfw *rcfw) -{ -	int rc; -	u16 pcie_ctl2; - -	rc = pcie_capability_read_word(rcfw->pdev, PCI_EXP_DEVCTL2, -				       &pcie_ctl2); -	if (rc) -		return false; -	return !!(pcie_ctl2 & PCI_EXP_DEVCTL2_ATOMIC_REQ); -} -  static void bnxt_qplib_query_version(struct bnxt_qplib_rcfw *rcfw,  				     char *fw_ver)  { @@ -151,7 +139,8 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,  	attr->max_pkey = le32_to_cpu(sb->max_pkeys);  	attr->max_inline_data = le32_to_cpu(sb->max_inline_data); -	attr->l2_db_size = (sb->l2_db_space_size + 1) * PAGE_SIZE; +	attr->l2_db_size = (sb->l2_db_space_size + 1) * +			    (0x01 << RCFW_DBR_BASE_PAGE_SHIFT);  	attr->max_sgid = le32_to_cpu(sb->max_gid);  	bnxt_qplib_query_version(rcfw, attr->fw_ver); @@ -165,7 +154,7 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,  		attr->tqm_alloc_reqs[i * 4 + 3] = *(++tqm_alloc);  	} -	attr->is_atomic = bnxt_qplib_is_atomic_cap(rcfw); +	attr->is_atomic = 0;  bail:  	bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);  	return rc; diff --git a/drivers/infiniband/hw/bnxt_re/roce_hsi.h b/drivers/infiniband/hw/bnxt_re/roce_hsi.h index 2d7ea096a247..3e5a4f760d0e 100644 --- a/drivers/infiniband/hw/bnxt_re/roce_hsi.h +++ b/drivers/infiniband/hw/bnxt_re/roce_hsi.h @@ -1761,7 +1761,30 @@ struct cmdq_initialize_fw {  	#define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_2M		   (0x3UL << 4)  	#define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_8M		   (0x4UL << 4)  	#define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_1G		   (0x5UL << 4) -	__le16 reserved16; +	/* This value is (log-base-2-of-DBR-page-size - 12). +	 * 0 for 4KB. HW supported values are enumerated below. +	 */ +	__le16  log2_dbr_pg_size; +	#define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_MASK	0xfUL +	#define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_SFT		0 +	#define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_4K	0x0UL +	#define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_8K	0x1UL +	#define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_16K	0x2UL +	#define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_32K	0x3UL +	#define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_64K	0x4UL +	#define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_128K	0x5UL +	#define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_256K	0x6UL +	#define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_512K	0x7UL +	#define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_1M	0x8UL +	#define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_2M	0x9UL +	#define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_4M	0xaUL +	#define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_8M	0xbUL +	#define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_16M	0xcUL +	#define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_32M	0xdUL +	#define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_64M	0xeUL +	#define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_128M	0xfUL +	#define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_LAST		\ +			CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_128M  	__le64 qpc_page_dir;  	__le64 mrw_page_dir;  	__le64 srq_page_dir; diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c index 9a566ee3ceff..82adc0d1d30e 100644 --- a/drivers/infiniband/hw/mlx4/cq.c +++ b/drivers/infiniband/hw/mlx4/cq.c @@ -601,6 +601,7 @@ static void use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct  	wc->dlid_path_bits = 0;  	if (is_eth) { +		wc->slid = 0;  		wc->vlan_id = be16_to_cpu(hdr->tun.sl_vid);  		memcpy(&(wc->smac[0]), (char *)&hdr->tun.mac_31_0, 4);  		memcpy(&(wc->smac[4]), (char *)&hdr->tun.slid_mac_47_32, 2); @@ -851,7 +852,6 @@ repoll:  			}  		} -		wc->slid	   = be16_to_cpu(cqe->rlid);  		g_mlpath_rqpn	   = be32_to_cpu(cqe->g_mlpath_rqpn);  		wc->src_qp	   = g_mlpath_rqpn & 0xffffff;  		wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f; @@ -860,6 +860,7 @@ repoll:  		wc->wc_flags	  |= mlx4_ib_ipoib_csum_ok(cqe->status,  					cqe->checksum) ? IB_WC_IP_CSUM_OK : 0;  		if (is_eth) { +			wc->slid = 0;  			wc->sl  = be16_to_cpu(cqe->sl_vid) >> 13;  			if (be32_to_cpu(cqe->vlan_my_qpn) &  					MLX4_CQE_CVLAN_PRESENT_MASK) { @@ -871,6 +872,7 @@ repoll:  			memcpy(wc->smac, cqe->smac, ETH_ALEN);  			wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC);  		} else { +			wc->slid = be16_to_cpu(cqe->rlid);  			wc->sl  = be16_to_cpu(cqe->sl_vid) >> 12;  			wc->vlan_id = 0xffff;  		} diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 8d2ee9322f2e..5a0e4fc4785a 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -219,8 +219,6 @@ static int mlx4_ib_update_gids_v1_v2(struct gid_entry *gids,  			gid_tbl[i].version = 2;  			if (!ipv6_addr_v4mapped((struct in6_addr *)&gids[i].gid))  				gid_tbl[i].type = 1; -			else -				memset(&gid_tbl[i].gid, 0, 12);  		}  	} @@ -366,8 +364,13 @@ static int mlx4_ib_del_gid(struct ib_device *device,  		if (!gids) {  			ret = -ENOMEM;  		} else { -			for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) -				memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid)); +			for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) { +				memcpy(&gids[i].gid, +				       &port_gid_table->gids[i].gid, +				       sizeof(union ib_gid)); +				gids[i].gid_type = +				    port_gid_table->gids[i].gid_type; +			}  		}  	}  	spin_unlock_bh(&iboe->lock); diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index 5b974fb97611..15457c9569a7 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c @@ -226,7 +226,6 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,  		wc->ex.invalidate_rkey = be32_to_cpu(cqe->imm_inval_pkey);  		break;  	} -	wc->slid	   = be16_to_cpu(cqe->slid);  	wc->src_qp	   = be32_to_cpu(cqe->flags_rqpn) & 0xffffff;  	wc->dlid_path_bits = cqe->ml_path;  	g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3; @@ -241,10 +240,12 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,  	}  	if (ll != IB_LINK_LAYER_ETHERNET) { +		wc->slid = be16_to_cpu(cqe->slid);  		wc->sl = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0xf;  		return;  	} +	wc->slid = 0;  	vlan_present = cqe->l4_l3_hdr_type & 0x1;  	roce_packet_type   = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0x3;  	if (vlan_present) { @@ -1177,7 +1178,12 @@ static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,  	if (ucmd.reserved0 || ucmd.reserved1)  		return -EINVAL; -	umem = ib_umem_get(context, ucmd.buf_addr, entries * ucmd.cqe_size, +	/* check multiplication overflow */ +	if (ucmd.cqe_size && SIZE_MAX / ucmd.cqe_size <= entries - 1) +		return -EINVAL; + +	umem = ib_umem_get(context, ucmd.buf_addr, +			   (size_t)ucmd.cqe_size * entries,  			   IB_ACCESS_LOCAL_WRITE, 1);  	if (IS_ERR(umem)) {  		err = PTR_ERR(umem); diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 4236c8086820..da091de4e69d 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -245,12 +245,16 @@ struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *ibdev,  	struct mlx5_ib_multiport_info *mpi;  	struct mlx5_ib_port *port; +	if (!mlx5_core_mp_enabled(ibdev->mdev) || +	    ll != IB_LINK_LAYER_ETHERNET) { +		if (native_port_num) +			*native_port_num = ib_port_num; +		return ibdev->mdev; +	} +  	if (native_port_num)  		*native_port_num = 1; -	if (!mlx5_core_mp_enabled(ibdev->mdev) || ll != IB_LINK_LAYER_ETHERNET) -		return ibdev->mdev; -  	port = &ibdev->port[ib_port_num - 1];  	if (!port)  		return NULL; @@ -3263,7 +3267,7 @@ static void mlx5_ib_handle_event(struct work_struct *_work)  	struct mlx5_ib_dev *ibdev;  	struct ib_event ibev;  	bool fatal = false; -	u8 port = 0; +	u8 port = (u8)work->param;  	if (mlx5_core_is_mp_slave(work->dev)) {  		ibdev = mlx5_ib_get_ibdev_from_mpi(work->context); @@ -3283,8 +3287,6 @@ static void mlx5_ib_handle_event(struct work_struct *_work)  	case MLX5_DEV_EVENT_PORT_UP:  	case MLX5_DEV_EVENT_PORT_DOWN:  	case MLX5_DEV_EVENT_PORT_INITIALIZED: -		port = (u8)work->param; -  		/* In RoCE, port up/down events are handled in  		 * mlx5_netdev_event().  		 */ @@ -3298,24 +3300,19 @@ static void mlx5_ib_handle_event(struct work_struct *_work)  	case MLX5_DEV_EVENT_LID_CHANGE:  		ibev.event = IB_EVENT_LID_CHANGE; -		port = (u8)work->param;  		break;  	case MLX5_DEV_EVENT_PKEY_CHANGE:  		ibev.event = IB_EVENT_PKEY_CHANGE; -		port = (u8)work->param; -  		schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work);  		break;  	case MLX5_DEV_EVENT_GUID_CHANGE:  		ibev.event = IB_EVENT_GID_CHANGE; -		port = (u8)work->param;  		break;  	case MLX5_DEV_EVENT_CLIENT_REREG:  		ibev.event = IB_EVENT_CLIENT_REREGISTER; -		port = (u8)work->param;  		break;  	case MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT:  		schedule_work(&ibdev->delay_drop.delay_drop_work); @@ -3327,7 +3324,7 @@ static void mlx5_ib_handle_event(struct work_struct *_work)  	ibev.device	      = &ibdev->ib_dev;  	ibev.element.port_num = port; -	if (port < 1 || port > ibdev->num_ports) { +	if (!rdma_is_port_valid(&ibdev->ib_dev, port)) {  		mlx5_ib_warn(ibdev, "warning: event on port %d\n", port);  		goto out;  	} @@ -4863,19 +4860,19 @@ static int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev)  	return ib_register_device(&dev->ib_dev, NULL);  } -static void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev) +static void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev)  { -	ib_unregister_device(&dev->ib_dev); +	destroy_umrc_res(dev);  } -static int mlx5_ib_stage_umr_res_init(struct mlx5_ib_dev *dev) +static void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev)  { -	return create_umr_res(dev); +	ib_unregister_device(&dev->ib_dev);  } -static void mlx5_ib_stage_umr_res_cleanup(struct mlx5_ib_dev *dev) +static int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev)  { -	destroy_umrc_res(dev); +	return create_umr_res(dev);  }  static int mlx5_ib_stage_delay_drop_init(struct mlx5_ib_dev *dev) @@ -4985,12 +4982,15 @@ static const struct mlx5_ib_profile pf_profile = {  	STAGE_CREATE(MLX5_IB_STAGE_BFREG,  		     mlx5_ib_stage_bfrag_init,  		     mlx5_ib_stage_bfrag_cleanup), +	STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR, +		     NULL, +		     mlx5_ib_stage_pre_ib_reg_umr_cleanup),  	STAGE_CREATE(MLX5_IB_STAGE_IB_REG,  		     mlx5_ib_stage_ib_reg_init,  		     mlx5_ib_stage_ib_reg_cleanup), -	STAGE_CREATE(MLX5_IB_STAGE_UMR_RESOURCES, -		     mlx5_ib_stage_umr_res_init, -		     mlx5_ib_stage_umr_res_cleanup), +	STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR, +		     mlx5_ib_stage_post_ib_reg_umr_init, +		     NULL),  	STAGE_CREATE(MLX5_IB_STAGE_DELAY_DROP,  		     mlx5_ib_stage_delay_drop_init,  		     mlx5_ib_stage_delay_drop_cleanup), diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 139385129973..a5272499b600 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -739,8 +739,9 @@ enum mlx5_ib_stages {  	MLX5_IB_STAGE_CONG_DEBUGFS,  	MLX5_IB_STAGE_UAR,  	MLX5_IB_STAGE_BFREG, +	MLX5_IB_STAGE_PRE_IB_REG_UMR,  	MLX5_IB_STAGE_IB_REG, -	MLX5_IB_STAGE_UMR_RESOURCES, +	MLX5_IB_STAGE_POST_IB_REG_UMR,  	MLX5_IB_STAGE_DELAY_DROP,  	MLX5_IB_STAGE_CLASS_ATTR,  	MLX5_IB_STAGE_MAX, diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 556e015678de..c51c602f06d6 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -838,7 +838,8 @@ static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length,  	*umem = ib_umem_get(pd->uobject->context, start, length,  			    access_flags, 0);  	err = PTR_ERR_OR_ZERO(*umem); -	if (err < 0) { +	if (err) { +		*umem = NULL;  		mlx5_ib_err(dev, "umem get failed (%d)\n", err);  		return err;  	} @@ -1415,6 +1416,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,  		if (err) {  			mlx5_ib_warn(dev, "Failed to rereg UMR\n");  			ib_umem_release(mr->umem); +			mr->umem = NULL;  			clean_mr(dev, mr);  			return err;  		} @@ -1498,14 +1500,11 @@ static int clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)  		u32 key = mr->mmkey.key;  		err = destroy_mkey(dev, mr); -		kfree(mr);  		if (err) {  			mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n",  				     key, err);  			return err;  		} -	} else { -		mlx5_mr_cache_free(dev, mr);  	}  	return 0; @@ -1548,6 +1547,11 @@ static int dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)  		atomic_sub(npages, &dev->mdev->priv.reg_pages);  	} +	if (!mr->allocated_from_cache) +		kfree(mr); +	else +		mlx5_mr_cache_free(dev, mr); +  	return 0;  } @@ -1816,7 +1820,6 @@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,  	mr->ibmr.iova = sg_dma_address(sg) + sg_offset;  	mr->ibmr.length = 0; -	mr->ndescs = sg_nents;  	for_each_sg(sgl, sg, sg_nents, i) {  		if (unlikely(i >= mr->max_descs)) @@ -1828,6 +1831,7 @@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,  		sg_offset = 0;  	} +	mr->ndescs = i;  	if (sg_offset_p)  		*sg_offset_p = sg_offset; diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 39d24bf694a8..a2e1aa86e133 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -1161,7 +1161,7 @@ static void destroy_raw_packet_qp_sq(struct mlx5_ib_dev *dev,  	ib_umem_release(sq->ubuffer.umem);  } -static int get_rq_pas_size(void *qpc) +static size_t get_rq_pas_size(void *qpc)  {  	u32 log_page_size = MLX5_GET(qpc, qpc, log_page_size) + 12;  	u32 log_rq_stride = MLX5_GET(qpc, qpc, log_rq_stride); @@ -1177,7 +1177,8 @@ static int get_rq_pas_size(void *qpc)  }  static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev, -				   struct mlx5_ib_rq *rq, void *qpin) +				   struct mlx5_ib_rq *rq, void *qpin, +				   size_t qpinlen)  {  	struct mlx5_ib_qp *mqp = rq->base.container_mibqp;  	__be64 *pas; @@ -1186,9 +1187,12 @@ static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev,  	void *rqc;  	void *wq;  	void *qpc = MLX5_ADDR_OF(create_qp_in, qpin, qpc); -	int inlen; +	size_t rq_pas_size = get_rq_pas_size(qpc); +	size_t inlen;  	int err; -	u32 rq_pas_size = get_rq_pas_size(qpc); + +	if (qpinlen < rq_pas_size + MLX5_BYTE_OFF(create_qp_in, pas)) +		return -EINVAL;  	inlen = MLX5_ST_SZ_BYTES(create_rq_in) + rq_pas_size;  	in = kvzalloc(inlen, GFP_KERNEL); @@ -1277,7 +1281,7 @@ static void destroy_raw_packet_qp_tir(struct mlx5_ib_dev *dev,  }  static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, -				u32 *in, +				u32 *in, size_t inlen,  				struct ib_pd *pd)  {  	struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp; @@ -1309,7 +1313,7 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,  			rq->flags |= MLX5_IB_RQ_CVLAN_STRIPPING;  		if (qp->flags & MLX5_IB_QP_PCI_WRITE_END_PADDING)  			rq->flags |= MLX5_IB_RQ_PCI_WRITE_END_PADDING; -		err = create_raw_packet_qp_rq(dev, rq, in); +		err = create_raw_packet_qp_rq(dev, rq, in, inlen);  		if (err)  			goto err_destroy_sq; @@ -1584,6 +1588,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,  	u32 uidx = MLX5_IB_DEFAULT_UIDX;  	struct mlx5_ib_create_qp ucmd;  	struct mlx5_ib_qp_base *base; +	int mlx5_st;  	void *qpc;  	u32 *in;  	int err; @@ -1592,6 +1597,10 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,  	spin_lock_init(&qp->sq.lock);  	spin_lock_init(&qp->rq.lock); +	mlx5_st = to_mlx5_st(init_attr->qp_type); +	if (mlx5_st < 0) +		return -EINVAL; +  	if (init_attr->rwq_ind_tbl) {  		if (!udata)  			return -ENOSYS; @@ -1753,7 +1762,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,  	qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); -	MLX5_SET(qpc, qpc, st, to_mlx5_st(init_attr->qp_type)); +	MLX5_SET(qpc, qpc, st, mlx5_st);  	MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);  	if (init_attr->qp_type != MLX5_IB_QPT_REG_UMR) @@ -1867,11 +1876,16 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,  		}  	} +	if (inlen < 0) { +		err = -EINVAL; +		goto err; +	} +  	if (init_attr->qp_type == IB_QPT_RAW_PACKET ||  	    qp->flags & MLX5_IB_QP_UNDERLAY) {  		qp->raw_packet_qp.sq.ubuffer.buf_addr = ucmd.sq_buf_addr;  		raw_packet_qp_copy_info(qp, &qp->raw_packet_qp); -		err = create_raw_packet_qp(dev, qp, in, pd); +		err = create_raw_packet_qp(dev, qp, in, inlen, pd);  	} else {  		err = mlx5_core_create_qp(dev->mdev, &base->mqp, in, inlen);  	} @@ -3095,8 +3109,10 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,  		goto out;  	if (mlx5_cur >= MLX5_QP_NUM_STATE || mlx5_new >= MLX5_QP_NUM_STATE || -	    !optab[mlx5_cur][mlx5_new]) +	    !optab[mlx5_cur][mlx5_new]) { +		err = -EINVAL;  		goto out; +	}  	op = optab[mlx5_cur][mlx5_new];  	optpar = ib_mask_to_mlx5_opt(attr_mask); diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c index 6d5fadad9090..3c7522d025f2 100644 --- a/drivers/infiniband/hw/mlx5/srq.c +++ b/drivers/infiniband/hw/mlx5/srq.c @@ -241,8 +241,8 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,  {  	struct mlx5_ib_dev *dev = to_mdev(pd->device);  	struct mlx5_ib_srq *srq; -	int desc_size; -	int buf_size; +	size_t desc_size; +	size_t buf_size;  	int err;  	struct mlx5_srq_attr in = {0};  	__u32 max_srq_wqes = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz); @@ -266,15 +266,18 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,  	desc_size = sizeof(struct mlx5_wqe_srq_next_seg) +  		    srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg); +	if (desc_size == 0 || srq->msrq.max_gs > desc_size) +		return ERR_PTR(-EINVAL);  	desc_size = roundup_pow_of_two(desc_size); -	desc_size = max_t(int, 32, desc_size); +	desc_size = max_t(size_t, 32, desc_size); +	if (desc_size < sizeof(struct mlx5_wqe_srq_next_seg)) +		return ERR_PTR(-EINVAL);  	srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) /  		sizeof(struct mlx5_wqe_data_seg);  	srq->msrq.wqe_shift = ilog2(desc_size);  	buf_size = srq->msrq.max * desc_size; -	mlx5_ib_dbg(dev, "desc_size 0x%x, req wr 0x%x, srq size 0x%x, max_gs 0x%x, max_avail_gather 0x%x\n", -		    desc_size, init_attr->attr.max_wr, srq->msrq.max, srq->msrq.max_gs, -		    srq->msrq.max_avail_gather); +	if (buf_size < desc_size) +		return ERR_PTR(-EINVAL);  	in.type = init_attr->srq_type;  	if (pd->uobject) diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c index 478b7317b80a..26dc374787f7 100644 --- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c +++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c @@ -458,8 +458,7 @@ qedr_addr6_resolve(struct qedr_dev *dev,  		}  		return -EINVAL;  	} -	neigh = dst_neigh_lookup(dst, &dst_in); - +	neigh = dst_neigh_lookup(dst, &fl6.daddr);  	if (neigh) {  		rcu_read_lock();  		if (neigh->nud_state & NUD_VALID) { @@ -494,10 +493,14 @@ int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)  	qp = idr_find(&dev->qpidr, conn_param->qpn); -	laddr = (struct sockaddr_in *)&cm_id->local_addr; -	raddr = (struct sockaddr_in *)&cm_id->remote_addr; -	laddr6 = (struct sockaddr_in6 *)&cm_id->local_addr; -	raddr6 = (struct sockaddr_in6 *)&cm_id->remote_addr; +	laddr = (struct sockaddr_in *)&cm_id->m_local_addr; +	raddr = (struct sockaddr_in *)&cm_id->m_remote_addr; +	laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr; +	raddr6 = (struct sockaddr_in6 *)&cm_id->m_remote_addr; + +	DP_DEBUG(dev, QEDR_MSG_IWARP, "MAPPED %d %d\n", +		 ntohs(((struct sockaddr_in *)&cm_id->remote_addr)->sin_port), +		 ntohs(raddr->sin_port));  	DP_DEBUG(dev, QEDR_MSG_IWARP,  		 "Connect source address: %pISpc, remote address: %pISpc\n", @@ -599,8 +602,8 @@ int qedr_iw_create_listen(struct iw_cm_id *cm_id, int backlog)  	int rc;  	int i; -	laddr = (struct sockaddr_in *)&cm_id->local_addr; -	laddr6 = (struct sockaddr_in6 *)&cm_id->local_addr; +	laddr = (struct sockaddr_in *)&cm_id->m_local_addr; +	laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;  	DP_DEBUG(dev, QEDR_MSG_IWARP,  		 "Create Listener address: %pISpc\n", &cm_id->local_addr); diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 53f00dbf313f..875b17272d65 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -3034,6 +3034,11 @@ static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,  	switch (wr->opcode) {  	case IB_WR_SEND_WITH_IMM: +		if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) { +			rc = -EINVAL; +			*bad_wr = wr; +			break; +		}  		wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM;  		swqe = (struct rdma_sq_send_wqe_1st *)wqe;  		swqe->wqe_size = 2; @@ -3075,6 +3080,11 @@ static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,  		break;  	case IB_WR_RDMA_WRITE_WITH_IMM: +		if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) { +			rc = -EINVAL; +			*bad_wr = wr; +			break; +		}  		wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM;  		rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe; @@ -3724,7 +3734,7 @@ int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)  {  	struct qedr_dev *dev = get_qedr_dev(ibcq->device);  	struct qedr_cq *cq = get_qedr_cq(ibcq); -	union rdma_cqe *cqe = cq->latest_cqe; +	union rdma_cqe *cqe;  	u32 old_cons, new_cons;  	unsigned long flags;  	int update = 0; @@ -3741,6 +3751,7 @@ int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)  		return qedr_gsi_poll_cq(ibcq, num_entries, wc);  	spin_lock_irqsave(&cq->cq_lock, flags); +	cqe = cq->latest_cqe;  	old_cons = qed_chain_get_cons_idx_u32(&cq->pbl);  	while (num_entries && is_valid_cqe(cq, cqe)) {  		struct qedr_qp *qp; diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c index faa9478c14a6..f95b97646c25 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c @@ -114,6 +114,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,  	union pvrdma_cmd_resp rsp;  	struct pvrdma_cmd_create_cq *cmd = &req.create_cq;  	struct pvrdma_cmd_create_cq_resp *resp = &rsp.create_cq_resp; +	struct pvrdma_create_cq_resp cq_resp = {0};  	struct pvrdma_create_cq ucmd;  	BUILD_BUG_ON(sizeof(struct pvrdma_cqe) != 64); @@ -197,6 +198,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,  	cq->ibcq.cqe = resp->cqe;  	cq->cq_handle = resp->cq_handle; +	cq_resp.cqn = resp->cq_handle;  	spin_lock_irqsave(&dev->cq_tbl_lock, flags);  	dev->cq_tbl[cq->cq_handle % dev->dsr->caps.max_cq] = cq;  	spin_unlock_irqrestore(&dev->cq_tbl_lock, flags); @@ -205,7 +207,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,  		cq->uar = &(to_vucontext(context)->uar);  		/* Copy udata back. */ -		if (ib_copy_to_udata(udata, &cq->cq_handle, sizeof(__u32))) { +		if (ib_copy_to_udata(udata, &cq_resp, sizeof(cq_resp))) {  			dev_warn(&dev->pdev->dev,  				 "failed to copy back udata\n");  			pvrdma_destroy_cq(&cq->ibcq); diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c index 5acebb1ef631..af235967a9c2 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c @@ -113,6 +113,7 @@ struct ib_srq *pvrdma_create_srq(struct ib_pd *pd,  	union pvrdma_cmd_resp rsp;  	struct pvrdma_cmd_create_srq *cmd = &req.create_srq;  	struct pvrdma_cmd_create_srq_resp *resp = &rsp.create_srq_resp; +	struct pvrdma_create_srq_resp srq_resp = {0};  	struct pvrdma_create_srq ucmd;  	unsigned long flags;  	int ret; @@ -204,12 +205,13 @@ struct ib_srq *pvrdma_create_srq(struct ib_pd *pd,  	}  	srq->srq_handle = resp->srqn; +	srq_resp.srqn = resp->srqn;  	spin_lock_irqsave(&dev->srq_tbl_lock, flags);  	dev->srq_tbl[srq->srq_handle % dev->dsr->caps.max_srq] = srq;  	spin_unlock_irqrestore(&dev->srq_tbl_lock, flags);  	/* Copy udata back. */ -	if (ib_copy_to_udata(udata, &srq->srq_handle, sizeof(__u32))) { +	if (ib_copy_to_udata(udata, &srq_resp, sizeof(srq_resp))) {  		dev_warn(&dev->pdev->dev, "failed to copy back udata\n");  		pvrdma_destroy_srq(&srq->ibsrq);  		return ERR_PTR(-EINVAL); diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c index 16b96616ef7e..a51463cd2f37 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c @@ -447,6 +447,7 @@ struct ib_pd *pvrdma_alloc_pd(struct ib_device *ibdev,  	union pvrdma_cmd_resp rsp;  	struct pvrdma_cmd_create_pd *cmd = &req.create_pd;  	struct pvrdma_cmd_create_pd_resp *resp = &rsp.create_pd_resp; +	struct pvrdma_alloc_pd_resp pd_resp = {0};  	int ret;  	void *ptr; @@ -475,9 +476,10 @@ struct ib_pd *pvrdma_alloc_pd(struct ib_device *ibdev,  	pd->privileged = !context;  	pd->pd_handle = resp->pd_handle;  	pd->pdn = resp->pd_handle; +	pd_resp.pdn = resp->pd_handle;  	if (context) { -		if (ib_copy_to_udata(udata, &pd->pdn, sizeof(__u32))) { +		if (ib_copy_to_udata(udata, &pd_resp, sizeof(pd_resp))) {  			dev_warn(&dev->pdev->dev,  				 "failed to copy back protection domain\n");  			pvrdma_dealloc_pd(&pd->ibpd); diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c index 1b2e5362a3ff..cc429b567d0a 100644 --- a/drivers/infiniband/sw/rdmavt/mr.c +++ b/drivers/infiniband/sw/rdmavt/mr.c @@ -489,11 +489,13 @@ static int rvt_check_refs(struct rvt_mregion *mr, const char *t)  	unsigned long timeout;  	struct rvt_dev_info *rdi = ib_to_rvt(mr->pd->device); -	if (percpu_ref_is_zero(&mr->refcount)) -		return 0; -	/* avoid dma mr */ -	if (mr->lkey) +	if (mr->lkey) { +		/* avoid dma mr */  		rvt_dereg_clean_qps(mr); +		/* @mr was indexed on rcu protected @lkey_table */ +		synchronize_rcu(); +	} +  	timeout = wait_for_completion_timeout(&mr->comp, 5 * HZ);  	if (!timeout) {  		rvt_pr_err(rdi, diff --git a/drivers/infiniband/ulp/ipoib/ipoib_fs.c b/drivers/infiniband/ulp/ipoib/ipoib_fs.c index 11f74cbe6660..ea302b054601 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_fs.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_fs.c @@ -281,8 +281,6 @@ void ipoib_delete_debug_files(struct net_device *dev)  {  	struct ipoib_dev_priv *priv = ipoib_priv(dev); -	WARN_ONCE(!priv->mcg_dentry, "null mcg debug file\n"); -	WARN_ONCE(!priv->path_dentry, "null path debug file\n");  	debugfs_remove(priv->mcg_dentry);  	debugfs_remove(priv->path_dentry);  	priv->mcg_dentry = priv->path_dentry = NULL; diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c index 1f316d66e6f7..41614c185918 100644 --- a/drivers/input/keyboard/matrix_keypad.c +++ b/drivers/input/keyboard/matrix_keypad.c @@ -218,8 +218,10 @@ static void matrix_keypad_stop(struct input_dev *dev)  {  	struct matrix_keypad *keypad = input_get_drvdata(dev); +	spin_lock_irq(&keypad->lock);  	keypad->stopped = true; -	mb(); +	spin_unlock_irq(&keypad->lock); +  	flush_work(&keypad->work.work);  	/*  	 * matrix_keypad_scan() will leave IRQs enabled; diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index 3d2e23a0ae39..a246fc686bb7 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c @@ -173,7 +173,6 @@ static const char * const smbus_pnp_ids[] = {  	"LEN0046", /* X250 */  	"LEN004a", /* W541 */  	"LEN200f", /* T450s */ -	"LEN2018", /* T460p */  	NULL  }; diff --git a/drivers/input/touchscreen/mms114.c b/drivers/input/touchscreen/mms114.c index db4f6bb502e3..a5ab774da4cc 100644 --- a/drivers/input/touchscreen/mms114.c +++ b/drivers/input/touchscreen/mms114.c @@ -1,11 +1,8 @@ -/* - * Copyright (C) 2012 Samsung Electronics Co.Ltd - * Author: Joonyoung Shim <jy0922.shim@samsung.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ +// SPDX-License-Identifier: GPL-2.0 +// Melfas MMS114/MMS152 touchscreen device driver +// +// Copyright (c) 2012 Samsung Electronics Co., Ltd. +// Author: Joonyoung Shim <jy0922.shim@samsung.com>  #include <linux/module.h>  #include <linux/delay.h> @@ -624,4 +621,4 @@ module_i2c_driver(mms114_driver);  /* Module information */  MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>");  MODULE_DESCRIPTION("MELFAS mms114 Touchscreen driver"); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c index 35a408d0ae4f..99bc9bd64b9e 100644 --- a/drivers/iommu/intel-svm.c +++ b/drivers/iommu/intel-svm.c @@ -205,7 +205,7 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d  			 * for example, an "address" value of 0x12345f000 will  			 * flush from 0x123440000 to 0x12347ffff (256KiB). */  			unsigned long last = address + ((unsigned long)(pages - 1) << VTD_PAGE_SHIFT); -			unsigned long mask = __rounddown_pow_of_two(address ^ last);; +			unsigned long mask = __rounddown_pow_of_two(address ^ last);  			desc.high = QI_DEV_EIOTLB_ADDR((address & ~mask) | (mask - 1)) | QI_DEV_EIOTLB_SIZE;  		} else { diff --git a/drivers/irqchip/irq-bcm7038-l1.c b/drivers/irqchip/irq-bcm7038-l1.c index 55cfb986225b..faf734ff4cf3 100644 --- a/drivers/irqchip/irq-bcm7038-l1.c +++ b/drivers/irqchip/irq-bcm7038-l1.c @@ -339,9 +339,6 @@ int __init bcm7038_l1_of_init(struct device_node *dn,  		goto out_unmap;  	} -	pr_info("registered BCM7038 L1 intc (mem: 0x%p, IRQs: %d)\n", -		intc->cpus[0]->map_base, IRQS_PER_WORD * intc->n_words); -  	return 0;  out_unmap: diff --git a/drivers/irqchip/irq-bcm7120-l2.c b/drivers/irqchip/irq-bcm7120-l2.c index 983640eba418..8968e5e93fcb 100644 --- a/drivers/irqchip/irq-bcm7120-l2.c +++ b/drivers/irqchip/irq-bcm7120-l2.c @@ -318,9 +318,6 @@ static int __init bcm7120_l2_intc_probe(struct device_node *dn,  		}  	} -	pr_info("registered %s intc (mem: 0x%p, parent IRQ(s): %d)\n", -			intc_name, data->map_base[0], data->num_parent_irqs); -  	return 0;  out_free_domain: diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c index 691d20eb0bec..0e65f609352e 100644 --- a/drivers/irqchip/irq-brcmstb-l2.c +++ b/drivers/irqchip/irq-brcmstb-l2.c @@ -262,9 +262,6 @@ static int __init brcmstb_l2_intc_of_init(struct device_node *np,  		ct->chip.irq_set_wake = irq_gc_set_wake;  	} -	pr_info("registered L2 intc (mem: 0x%p, parent irq: %d)\n", -			base, parent_irq); -  	return 0;  out_free_domain: diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c index 993a8426a453..1ff38aff9f29 100644 --- a/drivers/irqchip/irq-gic-v2m.c +++ b/drivers/irqchip/irq-gic-v2m.c @@ -94,7 +94,7 @@ static struct irq_chip gicv2m_msi_irq_chip = {  static struct msi_domain_info gicv2m_msi_domain_info = {  	.flags	= (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | -		   MSI_FLAG_PCI_MSIX), +		   MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),  	.chip	= &gicv2m_msi_irq_chip,  }; @@ -155,18 +155,12 @@ static int gicv2m_irq_gic_domain_alloc(struct irq_domain *domain,  	return 0;  } -static void gicv2m_unalloc_msi(struct v2m_data *v2m, unsigned int hwirq) +static void gicv2m_unalloc_msi(struct v2m_data *v2m, unsigned int hwirq, +			       int nr_irqs)  { -	int pos; - -	pos = hwirq - v2m->spi_start; -	if (pos < 0 || pos >= v2m->nr_spis) { -		pr_err("Failed to teardown msi. Invalid hwirq %d\n", hwirq); -		return; -	} -  	spin_lock(&v2m_lock); -	__clear_bit(pos, v2m->bm); +	bitmap_release_region(v2m->bm, hwirq - v2m->spi_start, +			      get_count_order(nr_irqs));  	spin_unlock(&v2m_lock);  } @@ -174,13 +168,13 @@ static int gicv2m_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,  				   unsigned int nr_irqs, void *args)  {  	struct v2m_data *v2m = NULL, *tmp; -	int hwirq, offset, err = 0; +	int hwirq, offset, i, err = 0;  	spin_lock(&v2m_lock);  	list_for_each_entry(tmp, &v2m_nodes, entry) { -		offset = find_first_zero_bit(tmp->bm, tmp->nr_spis); -		if (offset < tmp->nr_spis) { -			__set_bit(offset, tmp->bm); +		offset = bitmap_find_free_region(tmp->bm, tmp->nr_spis, +						 get_count_order(nr_irqs)); +		if (offset >= 0) {  			v2m = tmp;  			break;  		} @@ -192,16 +186,21 @@ static int gicv2m_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,  	hwirq = v2m->spi_start + offset; -	err = gicv2m_irq_gic_domain_alloc(domain, virq, hwirq); -	if (err) { -		gicv2m_unalloc_msi(v2m, hwirq); -		return err; -	} +	for (i = 0; i < nr_irqs; i++) { +		err = gicv2m_irq_gic_domain_alloc(domain, virq + i, hwirq + i); +		if (err) +			goto fail; -	irq_domain_set_hwirq_and_chip(domain, virq, hwirq, -				      &gicv2m_irq_chip, v2m); +		irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i, +					      &gicv2m_irq_chip, v2m); +	}  	return 0; + +fail: +	irq_domain_free_irqs_parent(domain, virq, nr_irqs); +	gicv2m_unalloc_msi(v2m, hwirq, get_count_order(nr_irqs)); +	return err;  }  static void gicv2m_irq_domain_free(struct irq_domain *domain, @@ -210,8 +209,7 @@ static void gicv2m_irq_domain_free(struct irq_domain *domain,  	struct irq_data *d = irq_domain_get_irq_data(domain, virq);  	struct v2m_data *v2m = irq_data_get_irq_chip_data(d); -	BUG_ON(nr_irqs != 1); -	gicv2m_unalloc_msi(v2m, d->hwirq); +	gicv2m_unalloc_msi(v2m, d->hwirq, nr_irqs);  	irq_domain_free_irqs_parent(domain, virq, nr_irqs);  } diff --git a/drivers/irqchip/irq-gic-v3-its-pci-msi.c b/drivers/irqchip/irq-gic-v3-its-pci-msi.c index 14a8c0a7e095..25a98de5cfb2 100644 --- a/drivers/irqchip/irq-gic-v3-its-pci-msi.c +++ b/drivers/irqchip/irq-gic-v3-its-pci-msi.c @@ -132,6 +132,8 @@ static int __init its_pci_of_msi_init(void)  	for (np = of_find_matching_node(NULL, its_device_id); np;  	     np = of_find_matching_node(np, its_device_id)) { +		if (!of_device_is_available(np)) +			continue;  		if (!of_property_read_bool(np, "msi-controller"))  			continue; diff --git a/drivers/irqchip/irq-gic-v3-its-platform-msi.c b/drivers/irqchip/irq-gic-v3-its-platform-msi.c index 833a90fe33ae..8881a053c173 100644 --- a/drivers/irqchip/irq-gic-v3-its-platform-msi.c +++ b/drivers/irqchip/irq-gic-v3-its-platform-msi.c @@ -154,6 +154,8 @@ static void __init its_pmsi_of_init(void)  	for (np = of_find_matching_node(NULL, its_device_id); np;  	     np = of_find_matching_node(np, its_device_id)) { +		if (!of_device_is_available(np)) +			continue;  		if (!of_property_read_bool(np, "msi-controller"))  			continue; diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 06f025fd5726..2cbb19cddbf8 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -1412,7 +1412,7 @@ static struct irq_chip its_irq_chip = {   * This gives us (((1UL << id_bits) - 8192) >> 5) possible allocations.   */  #define IRQS_PER_CHUNK_SHIFT	5 -#define IRQS_PER_CHUNK		(1 << IRQS_PER_CHUNK_SHIFT) +#define IRQS_PER_CHUNK		(1UL << IRQS_PER_CHUNK_SHIFT)  #define ITS_MAX_LPI_NRBITS	16 /* 64K LPIs */  static unsigned long *lpi_bitmap; @@ -2119,11 +2119,10 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,  	dev = kzalloc(sizeof(*dev), GFP_KERNEL);  	/* -	 * At least one bit of EventID is being used, hence a minimum -	 * of two entries. No, the architecture doesn't let you -	 * express an ITT with a single entry. +	 * We allocate at least one chunk worth of LPIs bet device, +	 * and thus that many ITEs. The device may require less though.  	 */ -	nr_ites = max(2UL, roundup_pow_of_two(nvecs)); +	nr_ites = max(IRQS_PER_CHUNK, roundup_pow_of_two(nvecs));  	sz = nr_ites * its->ite_size;  	sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;  	itt = kzalloc(sz, GFP_KERNEL); @@ -2495,7 +2494,7 @@ static int its_vpe_set_affinity(struct irq_data *d,  static void its_vpe_schedule(struct its_vpe *vpe)  { -	void * __iomem vlpi_base = gic_data_rdist_vlpi_base(); +	void __iomem *vlpi_base = gic_data_rdist_vlpi_base();  	u64 val;  	/* Schedule the VPE */ @@ -2527,7 +2526,7 @@ static void its_vpe_schedule(struct its_vpe *vpe)  static void its_vpe_deschedule(struct its_vpe *vpe)  { -	void * __iomem vlpi_base = gic_data_rdist_vlpi_base(); +	void __iomem *vlpi_base = gic_data_rdist_vlpi_base();  	u32 count = 1000000;	/* 1s! */  	bool clean;  	u64 val; @@ -3314,6 +3313,8 @@ static int __init its_of_probe(struct device_node *node)  	for (np = of_find_matching_node(node, its_device_id); np;  	     np = of_find_matching_node(np, its_device_id)) { +		if (!of_device_is_available(np)) +			continue;  		if (!of_property_read_bool(np, "msi-controller")) {  			pr_warn("%pOF: no msi-controller property, ITS ignored\n",  				np); diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index a57c0fbbd34a..d99cc07903ec 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -673,7 +673,7 @@ static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq)  	       MPIDR_TO_SGI_RS(cluster_id)		|  	       tlist << ICC_SGI1R_TARGET_LIST_SHIFT); -	pr_debug("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val); +	pr_devel("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val);  	gic_write_sgi1r(val);  } @@ -688,7 +688,7 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)  	 * Ensure that stores to Normal memory are visible to the  	 * other CPUs before issuing the IPI.  	 */ -	smp_wmb(); +	wmb();  	for_each_cpu(cpu, mask) {  		u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID(cpu_logical_map(cpu)); diff --git a/drivers/irqchip/irq-imx-gpcv2.c b/drivers/irqchip/irq-imx-gpcv2.c index 675eda5ff2b8..4760307ab43f 100644 --- a/drivers/irqchip/irq-imx-gpcv2.c +++ b/drivers/irqchip/irq-imx-gpcv2.c @@ -28,20 +28,6 @@ struct gpcv2_irqchip_data {  static struct gpcv2_irqchip_data *imx_gpcv2_instance; -/* - * Interface for the low level wakeup code. - */ -u32 imx_gpcv2_get_wakeup_source(u32 **sources) -{ -	if (!imx_gpcv2_instance) -		return 0; - -	if (sources) -		*sources = imx_gpcv2_instance->wakeup_sources; - -	return IMR_NUM; -} -  static int gpcv2_wakeup_source_save(void)  {  	struct gpcv2_irqchip_data *cd; diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c index ef92a4d2038e..d32268cc1174 100644 --- a/drivers/irqchip/irq-mips-gic.c +++ b/drivers/irqchip/irq-mips-gic.c @@ -424,8 +424,6 @@ static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,  	spin_lock_irqsave(&gic_lock, flags);  	write_gic_map_pin(intr, GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin);  	write_gic_map_vp(intr, BIT(mips_cm_vp_id(cpu))); -	gic_clear_pcpu_masks(intr); -	set_bit(intr, per_cpu_ptr(pcpu_masks, cpu));  	irq_data_update_effective_affinity(data, cpumask_of(cpu));  	spin_unlock_irqrestore(&gic_lock, flags); diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c index 62f541f968f6..07074820a167 100644 --- a/drivers/macintosh/macio_asic.c +++ b/drivers/macintosh/macio_asic.c @@ -375,6 +375,7 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip,  	dev->ofdev.dev.of_node = np;  	dev->ofdev.archdata.dma_mask = 0xffffffffUL;  	dev->ofdev.dev.dma_mask = &dev->ofdev.archdata.dma_mask; +	dev->ofdev.dev.coherent_dma_mask = dev->ofdev.archdata.dma_mask;  	dev->ofdev.dev.parent = parent;  	dev->ofdev.dev.bus = &macio_bus_type;  	dev->ofdev.dev.release = macio_release_dev; diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 1a46b41dac70..6422846b546e 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -659,11 +659,11 @@ static void do_bio_hook(struct search *s, struct bio *orig_bio)  static void search_free(struct closure *cl)  {  	struct search *s = container_of(cl, struct search, cl); -	bio_complete(s);  	if (s->iop.bio)  		bio_put(s->iop.bio); +	bio_complete(s);  	closure_debug_destroy(cl);  	mempool_free(s, s->d->c->search);  } diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 312895788036..f2273143b3cb 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -963,6 +963,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,  	uint32_t rtime = cpu_to_le32(get_seconds());  	struct uuid_entry *u;  	char buf[BDEVNAME_SIZE]; +	struct cached_dev *exist_dc, *t;  	bdevname(dc->bdev, buf); @@ -987,6 +988,16 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,  		return -EINVAL;  	} +	/* Check whether already attached */ +	list_for_each_entry_safe(exist_dc, t, &c->cached_devs, list) { +		if (!memcmp(dc->sb.uuid, exist_dc->sb.uuid, 16)) { +			pr_err("Tried to attach %s but duplicate UUID already attached", +				buf); + +			return -EINVAL; +		} +	} +  	u = uuid_find(c, dc->sb.uuid);  	if (u && @@ -1204,7 +1215,7 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page,  	return;  err: -	pr_notice("error opening %s: %s", bdevname(bdev, name), err); +	pr_notice("error %s: %s", bdevname(bdev, name), err);  	bcache_device_stop(&dc->disk);  } @@ -1274,7 +1285,7 @@ static int flash_devs_run(struct cache_set *c)  	struct uuid_entry *u;  	for (u = c->uuids; -	     u < c->uuids + c->devices_max_used && !ret; +	     u < c->uuids + c->nr_uuids && !ret;  	     u++)  		if (UUID_FLASH_ONLY(u))  			ret = flash_dev_run(c, u); @@ -1883,6 +1894,8 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,  	const char *err = NULL; /* must be set for any error case */  	int ret = 0; +	bdevname(bdev, name); +  	memcpy(&ca->sb, sb, sizeof(struct cache_sb));  	ca->bdev = bdev;  	ca->bdev->bd_holder = ca; @@ -1891,11 +1904,12 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,  	bio_first_bvec_all(&ca->sb_bio)->bv_page = sb_page;  	get_page(sb_page); -	if (blk_queue_discard(bdev_get_queue(ca->bdev))) +	if (blk_queue_discard(bdev_get_queue(bdev)))  		ca->discard = CACHE_DISCARD(&ca->sb);  	ret = cache_alloc(ca);  	if (ret != 0) { +		blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);  		if (ret == -ENOMEM)  			err = "cache_alloc(): -ENOMEM";  		else @@ -1918,14 +1932,14 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,  		goto out;  	} -	pr_info("registered cache device %s", bdevname(bdev, name)); +	pr_info("registered cache device %s", name);  out:  	kobject_put(&ca->kobj);  err:  	if (err) -		pr_notice("error opening %s: %s", bdevname(bdev, name), err); +		pr_notice("error %s: %s", name, err);  	return ret;  } @@ -2014,6 +2028,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,  	if (err)  		goto err_close; +	err = "failed to register device";  	if (SB_IS_BDEV(sb)) {  		struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL);  		if (!dc) @@ -2028,7 +2043,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,  			goto err_close;  		if (register_cache(sb, sb_page, bdev, ca) != 0) -			goto err_close; +			goto err;  	}  out:  	if (sb_page) @@ -2041,7 +2056,7 @@ out:  err_close:  	blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);  err: -	pr_info("error opening %s: %s", path, err); +	pr_info("error %s: %s", path, err);  	ret = -EINVAL;  	goto out;  } diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index 414c9af54ded..aa2032fa80d4 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -386,9 +386,6 @@ static void __cache_size_refresh(void)  static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,  			       enum data_mode *data_mode)  { -	unsigned noio_flag; -	void *ptr; -  	if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) {  		*data_mode = DATA_MODE_SLAB;  		return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask); @@ -412,16 +409,15 @@ static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,  	 * all allocations done by this process (including pagetables) are done  	 * as if GFP_NOIO was specified.  	 */ +	if (gfp_mask & __GFP_NORETRY) { +		unsigned noio_flag = memalloc_noio_save(); +		void *ptr = __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL); -	if (gfp_mask & __GFP_NORETRY) -		noio_flag = memalloc_noio_save(); - -	ptr = __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL); - -	if (gfp_mask & __GFP_NORETRY)  		memalloc_noio_restore(noio_flag); +		return ptr; +	} -	return ptr; +	return __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);  }  /* diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 7d3e572072f5..a05a560d3cba 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -211,29 +211,27 @@ static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m)  		else  			m->queue_mode = DM_TYPE_REQUEST_BASED; -	} else if (m->queue_mode == DM_TYPE_BIO_BASED || -		   m->queue_mode == DM_TYPE_NVME_BIO_BASED) { +	} else if (m->queue_mode == DM_TYPE_BIO_BASED) {  		INIT_WORK(&m->process_queued_bios, process_queued_bios); - -		if (m->queue_mode == DM_TYPE_BIO_BASED) { -			/* -			 * bio-based doesn't support any direct scsi_dh management; -			 * it just discovers if a scsi_dh is attached. -			 */ -			set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags); -		} -	} - -	if (m->queue_mode != DM_TYPE_NVME_BIO_BASED) { -		set_bit(MPATHF_QUEUE_IO, &m->flags); -		atomic_set(&m->pg_init_in_progress, 0); -		atomic_set(&m->pg_init_count, 0); -		m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT; -		init_waitqueue_head(&m->pg_init_wait); +		/* +		 * bio-based doesn't support any direct scsi_dh management; +		 * it just discovers if a scsi_dh is attached. +		 */ +		set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);  	}  	dm_table_set_type(ti->table, m->queue_mode); +	/* +	 * Init fields that are only used when a scsi_dh is attached +	 * - must do this unconditionally (really doesn't hurt non-SCSI uses) +	 */ +	set_bit(MPATHF_QUEUE_IO, &m->flags); +	atomic_set(&m->pg_init_in_progress, 0); +	atomic_set(&m->pg_init_count, 0); +	m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT; +	init_waitqueue_head(&m->pg_init_wait); +  	return 0;  } @@ -337,9 +335,6 @@ static void __switch_pg(struct multipath *m, struct priority_group *pg)  {  	m->current_pg = pg; -	if (m->queue_mode == DM_TYPE_NVME_BIO_BASED) -		return; -  	/* Must we initialise the PG first, and queue I/O till it's ready? */  	if (m->hw_handler_name) {  		set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags); @@ -385,8 +380,7 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)  	unsigned bypassed = 1;  	if (!atomic_read(&m->nr_valid_paths)) { -		if (m->queue_mode != DM_TYPE_NVME_BIO_BASED) -			clear_bit(MPATHF_QUEUE_IO, &m->flags); +		clear_bit(MPATHF_QUEUE_IO, &m->flags);  		goto failed;  	} @@ -599,7 +593,7 @@ static struct pgpath *__map_bio(struct multipath *m, struct bio *bio)  	return pgpath;  } -static struct pgpath *__map_bio_nvme(struct multipath *m, struct bio *bio) +static struct pgpath *__map_bio_fast(struct multipath *m, struct bio *bio)  {  	struct pgpath *pgpath;  	unsigned long flags; @@ -634,8 +628,8 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio,  {  	struct pgpath *pgpath; -	if (m->queue_mode == DM_TYPE_NVME_BIO_BASED) -		pgpath = __map_bio_nvme(m, bio); +	if (!m->hw_handler_name) +		pgpath = __map_bio_fast(m, bio);  	else  		pgpath = __map_bio(m, bio); @@ -675,8 +669,7 @@ static void process_queued_io_list(struct multipath *m)  {  	if (m->queue_mode == DM_TYPE_MQ_REQUEST_BASED)  		dm_mq_kick_requeue_list(dm_table_get_md(m->ti->table)); -	else if (m->queue_mode == DM_TYPE_BIO_BASED || -		 m->queue_mode == DM_TYPE_NVME_BIO_BASED) +	else if (m->queue_mode == DM_TYPE_BIO_BASED)  		queue_work(kmultipathd, &m->process_queued_bios);  } @@ -811,15 +804,14 @@ static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg,  	return 0;  } -static int setup_scsi_dh(struct block_device *bdev, struct multipath *m, char **error) +static int setup_scsi_dh(struct block_device *bdev, struct multipath *m, +			 const char *attached_handler_name, char **error)  {  	struct request_queue *q = bdev_get_queue(bdev); -	const char *attached_handler_name;  	int r;  	if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags)) {  retain: -		attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL);  		if (attached_handler_name) {  			/*  			 * Clear any hw_handler_params associated with a @@ -873,6 +865,8 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps  	int r;  	struct pgpath *p;  	struct multipath *m = ti->private; +	struct request_queue *q; +	const char *attached_handler_name;  	/* we need at least a path arg */  	if (as->argc < 1) { @@ -891,9 +885,11 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps  		goto bad;  	} -	if (m->queue_mode != DM_TYPE_NVME_BIO_BASED) { +	q = bdev_get_queue(p->path.dev->bdev); +	attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL); +	if (attached_handler_name) {  		INIT_DELAYED_WORK(&p->activate_path, activate_path_work); -		r = setup_scsi_dh(p->path.dev->bdev, m, &ti->error); +		r = setup_scsi_dh(p->path.dev->bdev, m, attached_handler_name, &ti->error);  		if (r) {  			dm_put_device(ti, p->path.dev);  			goto bad; @@ -1001,8 +997,7 @@ static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)  	if (!hw_argc)  		return 0; -	if (m->queue_mode == DM_TYPE_BIO_BASED || -	    m->queue_mode == DM_TYPE_NVME_BIO_BASED) { +	if (m->queue_mode == DM_TYPE_BIO_BASED) {  		dm_consume_args(as, hw_argc);  		DMERR("bio-based multipath doesn't allow hardware handler args");  		return 0; @@ -1091,8 +1086,6 @@ static int parse_features(struct dm_arg_set *as, struct multipath *m)  			if (!strcasecmp(queue_mode_name, "bio"))  				m->queue_mode = DM_TYPE_BIO_BASED; -			else if (!strcasecmp(queue_mode_name, "nvme")) -				m->queue_mode = DM_TYPE_NVME_BIO_BASED;  			else if (!strcasecmp(queue_mode_name, "rq"))  				m->queue_mode = DM_TYPE_REQUEST_BASED;  			else if (!strcasecmp(queue_mode_name, "mq")) @@ -1193,7 +1186,7 @@ static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv)  	ti->num_discard_bios = 1;  	ti->num_write_same_bios = 1;  	ti->num_write_zeroes_bios = 1; -	if (m->queue_mode == DM_TYPE_BIO_BASED || m->queue_mode == DM_TYPE_NVME_BIO_BASED) +	if (m->queue_mode == DM_TYPE_BIO_BASED)  		ti->per_io_data_size = multipath_per_bio_data_size();  	else  		ti->per_io_data_size = sizeof(struct dm_mpath_io); @@ -1730,9 +1723,6 @@ static void multipath_status(struct dm_target *ti, status_type_t type,  			case DM_TYPE_BIO_BASED:  				DMEMIT("queue_mode bio ");  				break; -			case DM_TYPE_NVME_BIO_BASED: -				DMEMIT("queue_mode nvme "); -				break;  			case DM_TYPE_MQ_REQUEST_BASED:  				DMEMIT("queue_mode mq ");  				break; @@ -2030,8 +2020,9 @@ static int multipath_busy(struct dm_target *ti)   *---------------------------------------------------------------*/  static struct target_type multipath_target = {  	.name = "multipath", -	.version = {1, 12, 0}, -	.features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE, +	.version = {1, 13, 0}, +	.features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE | +		    DM_TARGET_PASSES_INTEGRITY,  	.module = THIS_MODULE,  	.ctr = multipath_ctr,  	.dtr = multipath_dtr, diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 7ef469e902c6..c1d1034ff7b7 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -3408,9 +3408,10 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,  		set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);  	} else { -		if (test_bit(MD_RECOVERY_NEEDED, &recovery) || -		    test_bit(MD_RECOVERY_RESHAPE, &recovery) || -		    test_bit(MD_RECOVERY_RUNNING, &recovery)) +		if (!test_bit(MD_RECOVERY_INTR, &recovery) && +		    (test_bit(MD_RECOVERY_NEEDED, &recovery) || +		     test_bit(MD_RECOVERY_RESHAPE, &recovery) || +		     test_bit(MD_RECOVERY_RUNNING, &recovery)))  			r = mddev->curr_resync_completed;  		else  			r = mddev->recovery_cp; diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 5fe7ec356c33..7eb3e2a3c07d 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -942,17 +942,12 @@ static int dm_table_determine_type(struct dm_table *t)  	if (t->type != DM_TYPE_NONE) {  		/* target already set the table's type */ -		if (t->type == DM_TYPE_BIO_BASED) -			return 0; -		else if (t->type == DM_TYPE_NVME_BIO_BASED) { -			if (!dm_table_does_not_support_partial_completion(t)) { -				DMERR("nvme bio-based is only possible with devices" -				      " that don't support partial completion"); -				return -EINVAL; -			} -			/* Fallthru, also verify all devices are blk-mq */ +		if (t->type == DM_TYPE_BIO_BASED) { +			/* possibly upgrade to a variant of bio-based */ +			goto verify_bio_based;  		}  		BUG_ON(t->type == DM_TYPE_DAX_BIO_BASED); +		BUG_ON(t->type == DM_TYPE_NVME_BIO_BASED);  		goto verify_rq_based;  	} @@ -985,6 +980,7 @@ static int dm_table_determine_type(struct dm_table *t)  	}  	if (bio_based) { +verify_bio_based:  		/* We must use this table as bio-based */  		t->type = DM_TYPE_BIO_BASED;  		if (dm_table_supports_dax(t) || @@ -1755,7 +1751,7 @@ static int device_no_partial_completion(struct dm_target *ti, struct dm_dev *dev  	char b[BDEVNAME_SIZE];  	/* For now, NVMe devices are the only devices of this class */ -	return (strncmp(bdevname(dev->bdev, b), "nvme", 3) == 0); +	return (strncmp(bdevname(dev->bdev, b), "nvme", 4) == 0);  }  static bool dm_table_does_not_support_partial_completion(struct dm_table *t) diff --git a/drivers/md/dm.c b/drivers/md/dm.c index d6de00f367ef..45328d8b2859 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -458,9 +458,11 @@ static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)  	return dm_get_geometry(md, geo);  } -static int dm_grab_bdev_for_ioctl(struct mapped_device *md, -				  struct block_device **bdev, -				  fmode_t *mode) +static char *_dm_claim_ptr = "I belong to device-mapper"; + +static int dm_get_bdev_for_ioctl(struct mapped_device *md, +				 struct block_device **bdev, +				 fmode_t *mode)  {  	struct dm_target *tgt;  	struct dm_table *map; @@ -490,6 +492,10 @@ retry:  		goto out;  	bdgrab(*bdev); +	r = blkdev_get(*bdev, *mode, _dm_claim_ptr); +	if (r < 0) +		goto out; +  	dm_put_live_table(md, srcu_idx);  	return r; @@ -508,7 +514,7 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,  	struct mapped_device *md = bdev->bd_disk->private_data;  	int r; -	r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); +	r = dm_get_bdev_for_ioctl(md, &bdev, &mode);  	if (r < 0)  		return r; @@ -528,7 +534,7 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,  	r =  __blkdev_driver_ioctl(bdev, mode, cmd, arg);  out: -	bdput(bdev); +	blkdev_put(bdev, mode);  	return r;  } @@ -708,14 +714,13 @@ static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU)  static int open_table_device(struct table_device *td, dev_t dev,  			     struct mapped_device *md)  { -	static char *_claim_ptr = "I belong to device-mapper";  	struct block_device *bdev;  	int r;  	BUG_ON(td->dm_dev.bdev); -	bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _claim_ptr); +	bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _dm_claim_ptr);  	if (IS_ERR(bdev))  		return PTR_ERR(bdev); @@ -903,7 +908,8 @@ static void dec_pending(struct dm_io *io, blk_status_t error)  			queue_io(md, bio);  		} else {  			/* done with normal IO or empty flush */ -			bio->bi_status = io_error; +			if (io_error) +				bio->bi_status = io_error;  			bio_endio(bio);  		}  	} @@ -3010,7 +3016,7 @@ static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type,  	fmode_t mode;  	int r; -	r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); +	r = dm_get_bdev_for_ioctl(md, &bdev, &mode);  	if (r < 0)  		return r; @@ -3020,7 +3026,7 @@ static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type,  	else  		r = -EOPNOTSUPP; -	bdput(bdev); +	blkdev_put(bdev, mode);  	return r;  } @@ -3031,7 +3037,7 @@ static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type)  	fmode_t mode;  	int r; -	r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); +	r = dm_get_bdev_for_ioctl(md, &bdev, &mode);  	if (r < 0)  		return r; @@ -3041,7 +3047,7 @@ static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type)  	else  		r = -EOPNOTSUPP; -	bdput(bdev); +	blkdev_put(bdev, mode);  	return r;  } @@ -3053,7 +3059,7 @@ static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key,  	fmode_t mode;  	int r; -	r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); +	r = dm_get_bdev_for_ioctl(md, &bdev, &mode);  	if (r < 0)  		return r; @@ -3063,7 +3069,7 @@ static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key,  	else  		r = -EOPNOTSUPP; -	bdput(bdev); +	blkdev_put(bdev, mode);  	return r;  } @@ -3074,7 +3080,7 @@ static int dm_pr_clear(struct block_device *bdev, u64 key)  	fmode_t mode;  	int r; -	r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); +	r = dm_get_bdev_for_ioctl(md, &bdev, &mode);  	if (r < 0)  		return r; @@ -3084,7 +3090,7 @@ static int dm_pr_clear(struct block_device *bdev, u64 key)  	else  		r = -EOPNOTSUPP; -	bdput(bdev); +	blkdev_put(bdev, mode);  	return r;  } diff --git a/drivers/md/md-multipath.c b/drivers/md/md-multipath.c index e40065bdbfc8..0a7e99d62c69 100644 --- a/drivers/md/md-multipath.c +++ b/drivers/md/md-multipath.c @@ -157,7 +157,7 @@ static void multipath_status(struct seq_file *seq, struct mddev *mddev)  		seq_printf (seq, "%s", rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");  	}  	rcu_read_unlock(); -	seq_printf (seq, "]"); +	seq_putc(seq, ']');  }  static int multipath_congested(struct mddev *mddev, int bits) diff --git a/drivers/md/md.c b/drivers/md/md.c index bc67ab6844f0..254e44e44668 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -801,6 +801,9 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,  	struct bio *bio;  	int ff = 0; +	if (!page) +		return; +  	if (test_bit(Faulty, &rdev->flags))  		return; @@ -5452,6 +5455,7 @@ int md_run(struct mddev *mddev)  	 * the only valid external interface is through the md  	 * device.  	 */ +	mddev->has_superblocks = false;  	rdev_for_each(rdev, mddev) {  		if (test_bit(Faulty, &rdev->flags))  			continue; @@ -5465,6 +5469,9 @@ int md_run(struct mddev *mddev)  				set_disk_ro(mddev->gendisk, 1);  		} +		if (rdev->sb_page) +			mddev->has_superblocks = true; +  		/* perform some consistency tests on the device.  		 * We don't want the data to overlap the metadata,  		 * Internal Bitmap issues have been handled elsewhere. @@ -5497,8 +5504,10 @@ int md_run(struct mddev *mddev)  	}  	if (mddev->sync_set == NULL) {  		mddev->sync_set = bioset_create(BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS); -		if (!mddev->sync_set) -			return -ENOMEM; +		if (!mddev->sync_set) { +			err = -ENOMEM; +			goto abort; +		}  	}  	spin_lock(&pers_lock); @@ -5511,7 +5520,8 @@ int md_run(struct mddev *mddev)  		else  			pr_warn("md: personality for level %s is not loaded!\n",  				mddev->clevel); -		return -EINVAL; +		err = -EINVAL; +		goto abort;  	}  	spin_unlock(&pers_lock);  	if (mddev->level != pers->level) { @@ -5524,7 +5534,8 @@ int md_run(struct mddev *mddev)  	    pers->start_reshape == NULL) {  		/* This personality cannot handle reshaping... */  		module_put(pers->owner); -		return -EINVAL; +		err = -EINVAL; +		goto abort;  	}  	if (pers->sync_request) { @@ -5593,7 +5604,7 @@ int md_run(struct mddev *mddev)  		mddev->private = NULL;  		module_put(pers->owner);  		bitmap_destroy(mddev); -		return err; +		goto abort;  	}  	if (mddev->queue) {  		bool nonrot = true; @@ -5655,6 +5666,18 @@ int md_run(struct mddev *mddev)  	sysfs_notify_dirent_safe(mddev->sysfs_action);  	sysfs_notify(&mddev->kobj, NULL, "degraded");  	return 0; + +abort: +	if (mddev->bio_set) { +		bioset_free(mddev->bio_set); +		mddev->bio_set = NULL; +	} +	if (mddev->sync_set) { +		bioset_free(mddev->sync_set); +		mddev->sync_set = NULL; +	} + +	return err;  }  EXPORT_SYMBOL_GPL(md_run); @@ -8049,6 +8072,7 @@ EXPORT_SYMBOL(md_done_sync);  bool md_write_start(struct mddev *mddev, struct bio *bi)  {  	int did_change = 0; +  	if (bio_data_dir(bi) != WRITE)  		return true; @@ -8081,6 +8105,8 @@ bool md_write_start(struct mddev *mddev, struct bio *bi)  	rcu_read_unlock();  	if (did_change)  		sysfs_notify_dirent_safe(mddev->sysfs_state); +	if (!mddev->has_superblocks) +		return true;  	wait_event(mddev->sb_wait,  		   !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) ||  		   mddev->suspended); @@ -8543,6 +8569,19 @@ void md_do_sync(struct md_thread *thread)  	set_mask_bits(&mddev->sb_flags, 0,  		      BIT(MD_SB_CHANGE_PENDING) | BIT(MD_SB_CHANGE_DEVS)); +	if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && +			!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && +			mddev->delta_disks > 0 && +			mddev->pers->finish_reshape && +			mddev->pers->size && +			mddev->queue) { +		mddev_lock_nointr(mddev); +		md_set_array_sectors(mddev, mddev->pers->size(mddev, 0, 0)); +		mddev_unlock(mddev); +		set_capacity(mddev->gendisk, mddev->array_sectors); +		revalidate_disk(mddev->gendisk); +	} +  	spin_lock(&mddev->lock);  	if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {  		/* We completed so min/max setting can be forgotten if used. */ @@ -8569,6 +8608,10 @@ static int remove_and_add_spares(struct mddev *mddev,  	int removed = 0;  	bool remove_some = false; +	if (this && test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) +		/* Mustn't remove devices when resync thread is running */ +		return 0; +  	rdev_for_each(rdev, mddev) {  		if ((this == NULL || rdev == this) &&  		    rdev->raid_disk >= 0 && diff --git a/drivers/md/md.h b/drivers/md/md.h index 58cd20a5e85e..fbc925cce810 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -468,6 +468,8 @@ struct mddev {  	void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev);  	struct md_cluster_info		*cluster_info;  	unsigned int			good_device_nr;	/* good device num within cluster raid */ + +	bool	has_superblocks:1;  };  enum recovery_flags { diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index b2eae332e1a2..fe872dc6712e 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1108,7 +1108,7 @@ static void alloc_behind_master_bio(struct r1bio *r1_bio,  	bio_copy_data(behind_bio, bio);  skip_copy: -	r1_bio->behind_master_bio = behind_bio;; +	r1_bio->behind_master_bio = behind_bio;  	set_bit(R1BIO_BehindIO, &r1_bio->state);  	return; @@ -1809,6 +1809,17 @@ static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)  			struct md_rdev *repl =  				conf->mirrors[conf->raid_disks + number].rdev;  			freeze_array(conf, 0); +			if (atomic_read(&repl->nr_pending)) { +				/* It means that some queued IO of retry_list +				 * hold repl. Thus, we cannot set replacement +				 * as NULL, avoiding rdev NULL pointer +				 * dereference in sync_request_write and +				 * handle_write_finished. +				 */ +				err = -EBUSY; +				unfreeze_array(conf); +				goto abort; +			}  			clear_bit(Replacement, &repl->flags);  			p->rdev = repl;  			conf->mirrors[conf->raid_disks + number].rdev = NULL; diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h index c7294e7557e0..eb84bc68e2fd 100644 --- a/drivers/md/raid1.h +++ b/drivers/md/raid1.h @@ -26,6 +26,18 @@  #define BARRIER_BUCKETS_NR_BITS		(PAGE_SHIFT - ilog2(sizeof(atomic_t)))  #define BARRIER_BUCKETS_NR		(1<<BARRIER_BUCKETS_NR_BITS) +/* Note: raid1_info.rdev can be set to NULL asynchronously by raid1_remove_disk. + * There are three safe ways to access raid1_info.rdev. + * 1/ when holding mddev->reconfig_mutex + * 2/ when resync/recovery is known to be happening - i.e. in code that is + *    called as part of performing resync/recovery. + * 3/ while holding rcu_read_lock(), use rcu_dereference to get the pointer + *    and if it is non-NULL, increment rdev->nr_pending before dropping the + *    RCU lock. + * When .rdev is set to NULL, the nr_pending count checked again and if it has + * been incremented, the pointer is put back in .rdev. + */ +  struct raid1_info {  	struct md_rdev	*rdev;  	sector_t	head_position; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 99c9207899a7..c5e6c60fc0d4 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -141,7 +141,7 @@ static void r10bio_pool_free(void *r10_bio, void *data)  #define RESYNC_WINDOW (1024*1024)  /* maximum number of concurrent requests, memory permitting */  #define RESYNC_DEPTH (32*1024*1024/RESYNC_BLOCK_SIZE) -#define CLUSTER_RESYNC_WINDOW (16 * RESYNC_WINDOW) +#define CLUSTER_RESYNC_WINDOW (32 * RESYNC_WINDOW)  #define CLUSTER_RESYNC_WINDOW_SECTORS (CLUSTER_RESYNC_WINDOW >> 9)  /* @@ -2655,7 +2655,8 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)  		for (m = 0; m < conf->copies; m++) {  			int dev = r10_bio->devs[m].devnum;  			rdev = conf->mirrors[dev].rdev; -			if (r10_bio->devs[m].bio == NULL) +			if (r10_bio->devs[m].bio == NULL || +				r10_bio->devs[m].bio->bi_end_io == NULL)  				continue;  			if (!r10_bio->devs[m].bio->bi_status) {  				rdev_clear_badblocks( @@ -2670,7 +2671,8 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)  					md_error(conf->mddev, rdev);  			}  			rdev = conf->mirrors[dev].replacement; -			if (r10_bio->devs[m].repl_bio == NULL) +			if (r10_bio->devs[m].repl_bio == NULL || +				r10_bio->devs[m].repl_bio->bi_end_io == NULL)  				continue;  			if (!r10_bio->devs[m].repl_bio->bi_status) { @@ -3782,7 +3784,7 @@ static int raid10_run(struct mddev *mddev)  		if (fc > 1 || fo > 0) {  			pr_err("only near layout is supported by clustered"  				" raid10\n"); -			goto out; +			goto out_free_conf;  		}  	} @@ -4830,17 +4832,11 @@ static void raid10_finish_reshape(struct mddev *mddev)  		return;  	if (mddev->delta_disks > 0) { -		sector_t size = raid10_size(mddev, 0, 0); -		md_set_array_sectors(mddev, size);  		if (mddev->recovery_cp > mddev->resync_max_sectors) {  			mddev->recovery_cp = mddev->resync_max_sectors;  			set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);  		} -		mddev->resync_max_sectors = size; -		if (mddev->queue) { -			set_capacity(mddev->gendisk, mddev->array_sectors); -			revalidate_disk(mddev->gendisk); -		} +		mddev->resync_max_sectors = mddev->array_sectors;  	} else {  		int d;  		rcu_read_lock(); diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h index db2ac22ac1b4..e2e8840de9bf 100644 --- a/drivers/md/raid10.h +++ b/drivers/md/raid10.h @@ -2,6 +2,19 @@  #ifndef _RAID10_H  #define _RAID10_H +/* Note: raid10_info.rdev can be set to NULL asynchronously by + * raid10_remove_disk. + * There are three safe ways to access raid10_info.rdev. + * 1/ when holding mddev->reconfig_mutex + * 2/ when resync/recovery/reshape is known to be happening - i.e. in code + *    that is called as part of performing resync/recovery/reshape. + * 3/ while holding rcu_read_lock(), use rcu_dereference to get the pointer + *    and if it is non-NULL, increment rdev->nr_pending before dropping the + *    RCU lock. + * When .rdev is set to NULL, the nr_pending count checked again and if it has + * been incremented, the pointer is put back in .rdev. + */ +  struct raid10_info {  	struct md_rdev	*rdev, *replacement;  	sector_t	head_position; diff --git a/drivers/md/raid5-log.h b/drivers/md/raid5-log.h index 0c76bcedfc1c..a001808a2b77 100644 --- a/drivers/md/raid5-log.h +++ b/drivers/md/raid5-log.h @@ -44,6 +44,7 @@ extern void ppl_write_stripe_run(struct r5conf *conf);  extern void ppl_stripe_write_finished(struct stripe_head *sh);  extern int ppl_modify_log(struct r5conf *conf, struct md_rdev *rdev, bool add);  extern void ppl_quiesce(struct r5conf *conf, int quiesce); +extern int ppl_handle_flush_request(struct r5l_log *log, struct bio *bio);  static inline bool raid5_has_ppl(struct r5conf *conf)  { @@ -104,7 +105,7 @@ static inline int log_handle_flush_request(struct r5conf *conf, struct bio *bio)  	if (conf->log)  		ret = r5l_handle_flush_request(conf->log, bio);  	else if (raid5_has_ppl(conf)) -		ret = 0; +		ret = ppl_handle_flush_request(conf->log, bio);  	return ret;  } diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c index 2764c2290062..42890a08375b 100644 --- a/drivers/md/raid5-ppl.c +++ b/drivers/md/raid5-ppl.c @@ -693,6 +693,16 @@ void ppl_quiesce(struct r5conf *conf, int quiesce)  	}  } +int ppl_handle_flush_request(struct r5l_log *log, struct bio *bio) +{ +	if (bio->bi_iter.bi_size == 0) { +		bio_endio(bio); +		return 0; +	} +	bio->bi_opf &= ~REQ_PREFLUSH; +	return -EAGAIN; +} +  void ppl_stripe_write_finished(struct stripe_head *sh)  {  	struct ppl_io_unit *io; diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 50d01144b805..b5d2601483e3 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -2196,15 +2196,16 @@ static int grow_one_stripe(struct r5conf *conf, gfp_t gfp)  static int grow_stripes(struct r5conf *conf, int num)  {  	struct kmem_cache *sc; +	size_t namelen = sizeof(conf->cache_name[0]);  	int devs = max(conf->raid_disks, conf->previous_raid_disks);  	if (conf->mddev->gendisk) -		sprintf(conf->cache_name[0], +		snprintf(conf->cache_name[0], namelen,  			"raid%d-%s", conf->level, mdname(conf->mddev));  	else -		sprintf(conf->cache_name[0], +		snprintf(conf->cache_name[0], namelen,  			"raid%d-%p", conf->level, conf->mddev); -	sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]); +	snprintf(conf->cache_name[1], namelen, "%.27s-alt", conf->cache_name[0]);  	conf->active_name = 0;  	sc = kmem_cache_create(conf->cache_name[conf->active_name], @@ -6764,9 +6765,7 @@ static void free_conf(struct r5conf *conf)  	log_exit(conf); -	if (conf->shrinker.nr_deferred) -		unregister_shrinker(&conf->shrinker); - +	unregister_shrinker(&conf->shrinker);  	free_thread_groups(conf);  	shrink_stripes(conf);  	raid5_free_percpu(conf); @@ -8001,13 +8000,7 @@ static void raid5_finish_reshape(struct mddev *mddev)  	if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { -		if (mddev->delta_disks > 0) { -			md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); -			if (mddev->queue) { -				set_capacity(mddev->gendisk, mddev->array_sectors); -				revalidate_disk(mddev->gendisk); -			} -		} else { +		if (mddev->delta_disks <= 0) {  			int d;  			spin_lock_irq(&conf->device_lock);  			mddev->degraded = raid5_calc_degraded(conf); diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index 2e6123825095..3f8da26032ac 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h @@ -450,6 +450,18 @@ enum {   * HANDLE gets cleared if stripe_handle leaves nothing locked.   */ +/* Note: disk_info.rdev can be set to NULL asynchronously by raid5_remove_disk. + * There are three safe ways to access disk_info.rdev. + * 1/ when holding mddev->reconfig_mutex + * 2/ when resync/recovery/reshape is known to be happening - i.e. in code that + *    is called as part of performing resync/recovery/reshape. + * 3/ while holding rcu_read_lock(), use rcu_dereference to get the pointer + *    and if it is non-NULL, increment rdev->nr_pending before dropping the RCU + *    lock. + * When .rdev is set to NULL, the nr_pending count checked again and if + * it has been incremented, the pointer is put back in .rdev. + */ +  struct disk_info {  	struct md_rdev	*rdev, *replacement;  	struct page	*extra_page; /* extra page to use in prexor */ diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig index 145e12bfb819..86c1a190d946 100644 --- a/drivers/media/Kconfig +++ b/drivers/media/Kconfig @@ -147,9 +147,11 @@ config DVB_CORE  config DVB_MMAP  	bool "Enable DVB memory-mapped API (EXPERIMENTAL)"  	depends on DVB_CORE +	depends on VIDEO_V4L2=y || VIDEO_V4L2=DVB_CORE +	select VIDEOBUF2_VMALLOC  	default n  	help -	  This option enables DVB experimental memory-mapped API, with +	  This option enables DVB experimental memory-mapped API, which  	  reduces the number of context switches to read DVB buffers, as  	  the buffers can use mmap() syscalls. diff --git a/drivers/media/common/videobuf2/Kconfig b/drivers/media/common/videobuf2/Kconfig index 5df05250de94..17c32ea58395 100644 --- a/drivers/media/common/videobuf2/Kconfig +++ b/drivers/media/common/videobuf2/Kconfig @@ -3,6 +3,9 @@ config VIDEOBUF2_CORE  	select DMA_SHARED_BUFFER  	tristate +config VIDEOBUF2_V4L2 +	tristate +  config VIDEOBUF2_MEMOPS  	tristate  	select FRAME_VECTOR diff --git a/drivers/media/common/videobuf2/Makefile b/drivers/media/common/videobuf2/Makefile index 19de5ccda20b..77bebe8b202f 100644 --- a/drivers/media/common/videobuf2/Makefile +++ b/drivers/media/common/videobuf2/Makefile @@ -1,5 +1,12 @@ +# SPDX-License-Identifier: GPL-2.0 +videobuf2-common-objs := videobuf2-core.o -obj-$(CONFIG_VIDEOBUF2_CORE) += videobuf2-core.o videobuf2-v4l2.o +ifeq ($(CONFIG_TRACEPOINTS),y) +  videobuf2-common-objs += vb2-trace.o +endif + +obj-$(CONFIG_VIDEOBUF2_CORE) += videobuf2-common.o +obj-$(CONFIG_VIDEOBUF2_V4L2) += videobuf2-v4l2.o  obj-$(CONFIG_VIDEOBUF2_MEMOPS) += videobuf2-memops.o  obj-$(CONFIG_VIDEOBUF2_VMALLOC) += videobuf2-vmalloc.o  obj-$(CONFIG_VIDEOBUF2_DMA_CONTIG) += videobuf2-dma-contig.o diff --git a/drivers/media/v4l2-core/vb2-trace.c b/drivers/media/common/videobuf2/vb2-trace.c index 4c0f39d271f0..4c0f39d271f0 100644 --- a/drivers/media/v4l2-core/vb2-trace.c +++ b/drivers/media/common/videobuf2/vb2-trace.c diff --git a/drivers/media/dvb-core/Makefile b/drivers/media/dvb-core/Makefile index 3a105d82019a..62b028ded9f7 100644 --- a/drivers/media/dvb-core/Makefile +++ b/drivers/media/dvb-core/Makefile @@ -4,7 +4,7 @@  #  dvb-net-$(CONFIG_DVB_NET) := dvb_net.o -dvb-vb2-$(CONFIG_DVB_MMSP) := dvb_vb2.o +dvb-vb2-$(CONFIG_DVB_MMAP) := dvb_vb2.o  dvb-core-objs := dvbdev.o dmxdev.o dvb_demux.o			\  		 dvb_ca_en50221.o dvb_frontend.o		\ diff --git a/drivers/media/dvb-core/dmxdev.c b/drivers/media/dvb-core/dmxdev.c index 6d53af00190e..61a750fae465 100644 --- a/drivers/media/dvb-core/dmxdev.c +++ b/drivers/media/dvb-core/dmxdev.c @@ -128,11 +128,7 @@ static int dvb_dvr_open(struct inode *inode, struct file *file)  	struct dvb_device *dvbdev = file->private_data;  	struct dmxdev *dmxdev = dvbdev->priv;  	struct dmx_frontend *front; -#ifndef DVB_MMAP  	bool need_ringbuffer = false; -#else -	const bool need_ringbuffer = true; -#endif  	dprintk("%s\n", __func__); @@ -144,17 +140,31 @@ static int dvb_dvr_open(struct inode *inode, struct file *file)  		return -ENODEV;  	} -#ifndef DVB_MMAP +	dmxdev->may_do_mmap = 0; + +	/* +	 * The logic here is a little tricky due to the ifdef. +	 * +	 * The ringbuffer is used for both read and mmap. +	 * +	 * It is not needed, however, on two situations: +	 *	- Write devices (access with O_WRONLY); +	 *	- For duplex device nodes, opened with O_RDWR. +	 */ +  	if ((file->f_flags & O_ACCMODE) == O_RDONLY)  		need_ringbuffer = true; -#else -	if ((file->f_flags & O_ACCMODE) == O_RDWR) { +	else if ((file->f_flags & O_ACCMODE) == O_RDWR) {  		if (!(dmxdev->capabilities & DMXDEV_CAP_DUPLEX)) { +#ifdef CONFIG_DVB_MMAP +			dmxdev->may_do_mmap = 1; +			need_ringbuffer = true; +#else  			mutex_unlock(&dmxdev->mutex);  			return -EOPNOTSUPP; +#endif  		}  	} -#endif  	if (need_ringbuffer) {  		void *mem; @@ -169,8 +179,9 @@ static int dvb_dvr_open(struct inode *inode, struct file *file)  			return -ENOMEM;  		}  		dvb_ringbuffer_init(&dmxdev->dvr_buffer, mem, DVR_BUFFER_SIZE); -		dvb_vb2_init(&dmxdev->dvr_vb2_ctx, "dvr", -			     file->f_flags & O_NONBLOCK); +		if (dmxdev->may_do_mmap) +			dvb_vb2_init(&dmxdev->dvr_vb2_ctx, "dvr", +				     file->f_flags & O_NONBLOCK);  		dvbdev->readers--;  	} @@ -200,11 +211,6 @@ static int dvb_dvr_release(struct inode *inode, struct file *file)  {  	struct dvb_device *dvbdev = file->private_data;  	struct dmxdev *dmxdev = dvbdev->priv; -#ifndef DVB_MMAP -	bool need_ringbuffer = false; -#else -	const bool need_ringbuffer = true; -#endif  	mutex_lock(&dmxdev->mutex); @@ -213,15 +219,14 @@ static int dvb_dvr_release(struct inode *inode, struct file *file)  		dmxdev->demux->connect_frontend(dmxdev->demux,  						dmxdev->dvr_orig_fe);  	} -#ifndef DVB_MMAP -	if ((file->f_flags & O_ACCMODE) == O_RDONLY) -		need_ringbuffer = true; -#endif -	if (need_ringbuffer) { -		if (dvb_vb2_is_streaming(&dmxdev->dvr_vb2_ctx)) -			dvb_vb2_stream_off(&dmxdev->dvr_vb2_ctx); -		dvb_vb2_release(&dmxdev->dvr_vb2_ctx); +	if (((file->f_flags & O_ACCMODE) == O_RDONLY) || +	    dmxdev->may_do_mmap) { +		if (dmxdev->may_do_mmap) { +			if (dvb_vb2_is_streaming(&dmxdev->dvr_vb2_ctx)) +				dvb_vb2_stream_off(&dmxdev->dvr_vb2_ctx); +			dvb_vb2_release(&dmxdev->dvr_vb2_ctx); +		}  		dvbdev->readers++;  		if (dmxdev->dvr_buffer.data) {  			void *mem = dmxdev->dvr_buffer.data; @@ -380,7 +385,8 @@ static void dvb_dmxdev_filter_timer(struct dmxdev_filter *dmxdevfilter)  static int dvb_dmxdev_section_callback(const u8 *buffer1, size_t buffer1_len,  				       const u8 *buffer2, size_t buffer2_len, -				       struct dmx_section_filter *filter) +				       struct dmx_section_filter *filter, +				       u32 *buffer_flags)  {  	struct dmxdev_filter *dmxdevfilter = filter->priv;  	int ret; @@ -399,10 +405,12 @@ static int dvb_dmxdev_section_callback(const u8 *buffer1, size_t buffer1_len,  	dprintk("section callback %*ph\n", 6, buffer1);  	if (dvb_vb2_is_streaming(&dmxdevfilter->vb2_ctx)) {  		ret = dvb_vb2_fill_buffer(&dmxdevfilter->vb2_ctx, -					  buffer1, buffer1_len); +					  buffer1, buffer1_len, +					  buffer_flags);  		if (ret == buffer1_len)  			ret = dvb_vb2_fill_buffer(&dmxdevfilter->vb2_ctx, -						  buffer2, buffer2_len); +						  buffer2, buffer2_len, +						  buffer_flags);  	} else {  		ret = dvb_dmxdev_buffer_write(&dmxdevfilter->buffer,  					      buffer1, buffer1_len); @@ -422,11 +430,12 @@ static int dvb_dmxdev_section_callback(const u8 *buffer1, size_t buffer1_len,  static int dvb_dmxdev_ts_callback(const u8 *buffer1, size_t buffer1_len,  				  const u8 *buffer2, size_t buffer2_len, -				  struct dmx_ts_feed *feed) +				  struct dmx_ts_feed *feed, +				  u32 *buffer_flags)  {  	struct dmxdev_filter *dmxdevfilter = feed->priv;  	struct dvb_ringbuffer *buffer; -#ifdef DVB_MMAP +#ifdef CONFIG_DVB_MMAP  	struct dvb_vb2_ctx *ctx;  #endif  	int ret; @@ -440,20 +449,22 @@ static int dvb_dmxdev_ts_callback(const u8 *buffer1, size_t buffer1_len,  	if (dmxdevfilter->params.pes.output == DMX_OUT_TAP ||  	    dmxdevfilter->params.pes.output == DMX_OUT_TSDEMUX_TAP) {  		buffer = &dmxdevfilter->buffer; -#ifdef DVB_MMAP +#ifdef CONFIG_DVB_MMAP  		ctx = &dmxdevfilter->vb2_ctx;  #endif  	} else {  		buffer = &dmxdevfilter->dev->dvr_buffer; -#ifdef DVB_MMAP +#ifdef CONFIG_DVB_MMAP  		ctx = &dmxdevfilter->dev->dvr_vb2_ctx;  #endif  	}  	if (dvb_vb2_is_streaming(ctx)) { -		ret = dvb_vb2_fill_buffer(ctx, buffer1, buffer1_len); +		ret = dvb_vb2_fill_buffer(ctx, buffer1, buffer1_len, +					  buffer_flags);  		if (ret == buffer1_len) -			ret = dvb_vb2_fill_buffer(ctx, buffer2, buffer2_len); +			ret = dvb_vb2_fill_buffer(ctx, buffer2, buffer2_len, +						  buffer_flags);  	} else {  		if (buffer->error) {  			spin_unlock(&dmxdevfilter->dev->lock); @@ -802,6 +813,12 @@ static int dvb_demux_open(struct inode *inode, struct file *file)  	mutex_init(&dmxdevfilter->mutex);  	file->private_data = dmxdevfilter; +#ifdef CONFIG_DVB_MMAP +	dmxdev->may_do_mmap = 1; +#else +	dmxdev->may_do_mmap = 0; +#endif +  	dvb_ringbuffer_init(&dmxdevfilter->buffer, NULL, 8192);  	dvb_vb2_init(&dmxdevfilter->vb2_ctx, "demux_filter",  		     file->f_flags & O_NONBLOCK); @@ -1111,7 +1128,7 @@ static int dvb_demux_do_ioctl(struct file *file,  		mutex_unlock(&dmxdevfilter->mutex);  		break; -#ifdef DVB_MMAP +#ifdef CONFIG_DVB_MMAP  	case DMX_REQBUFS:  		if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {  			mutex_unlock(&dmxdev->mutex); @@ -1160,7 +1177,7 @@ static int dvb_demux_do_ioctl(struct file *file,  		break;  #endif  	default: -		ret = -EINVAL; +		ret = -ENOTTY;  		break;  	}  	mutex_unlock(&dmxdev->mutex); @@ -1199,13 +1216,16 @@ static __poll_t dvb_demux_poll(struct file *file, poll_table *wait)  	return mask;  } -#ifdef DVB_MMAP +#ifdef CONFIG_DVB_MMAP  static int dvb_demux_mmap(struct file *file, struct vm_area_struct *vma)  {  	struct dmxdev_filter *dmxdevfilter = file->private_data;  	struct dmxdev *dmxdev = dmxdevfilter->dev;  	int ret; +	if (!dmxdev->may_do_mmap) +		return -ENOTTY; +  	if (mutex_lock_interruptible(&dmxdev->mutex))  		return -ERESTARTSYS; @@ -1249,7 +1269,7 @@ static const struct file_operations dvb_demux_fops = {  	.release = dvb_demux_release,  	.poll = dvb_demux_poll,  	.llseek = default_llseek, -#ifdef DVB_MMAP +#ifdef CONFIG_DVB_MMAP  	.mmap = dvb_demux_mmap,  #endif  }; @@ -1280,7 +1300,7 @@ static int dvb_dvr_do_ioctl(struct file *file,  		ret = dvb_dvr_set_buffer_size(dmxdev, arg);  		break; -#ifdef DVB_MMAP +#ifdef CONFIG_DVB_MMAP  	case DMX_REQBUFS:  		ret = dvb_vb2_reqbufs(&dmxdev->dvr_vb2_ctx, parg);  		break; @@ -1304,7 +1324,7 @@ static int dvb_dvr_do_ioctl(struct file *file,  		break;  #endif  	default: -		ret = -EINVAL; +		ret = -ENOTTY;  		break;  	}  	mutex_unlock(&dmxdev->mutex); @@ -1322,11 +1342,6 @@ static __poll_t dvb_dvr_poll(struct file *file, poll_table *wait)  	struct dvb_device *dvbdev = file->private_data;  	struct dmxdev *dmxdev = dvbdev->priv;  	__poll_t mask = 0; -#ifndef DVB_MMAP -	bool need_ringbuffer = false; -#else -	const bool need_ringbuffer = true; -#endif  	dprintk("%s\n", __func__); @@ -1337,11 +1352,8 @@ static __poll_t dvb_dvr_poll(struct file *file, poll_table *wait)  	poll_wait(file, &dmxdev->dvr_buffer.queue, wait); -#ifndef DVB_MMAP -	if ((file->f_flags & O_ACCMODE) == O_RDONLY) -		need_ringbuffer = true; -#endif -	if (need_ringbuffer) { +	if (((file->f_flags & O_ACCMODE) == O_RDONLY) || +	    dmxdev->may_do_mmap) {  		if (dmxdev->dvr_buffer.error)  			mask |= (EPOLLIN | EPOLLRDNORM | EPOLLPRI | EPOLLERR); @@ -1353,13 +1365,16 @@ static __poll_t dvb_dvr_poll(struct file *file, poll_table *wait)  	return mask;  } -#ifdef DVB_MMAP +#ifdef CONFIG_DVB_MMAP  static int dvb_dvr_mmap(struct file *file, struct vm_area_struct *vma)  {  	struct dvb_device *dvbdev = file->private_data;  	struct dmxdev *dmxdev = dvbdev->priv;  	int ret; +	if (!dmxdev->may_do_mmap) +		return -ENOTTY; +  	if (dmxdev->exit)  		return -ENODEV; @@ -1381,7 +1396,7 @@ static const struct file_operations dvb_dvr_fops = {  	.release = dvb_dvr_release,  	.poll = dvb_dvr_poll,  	.llseek = default_llseek, -#ifdef DVB_MMAP +#ifdef CONFIG_DVB_MMAP  	.mmap = dvb_dvr_mmap,  #endif  }; diff --git a/drivers/media/dvb-core/dvb_demux.c b/drivers/media/dvb-core/dvb_demux.c index 210eed0269b0..f45091246bdc 100644 --- a/drivers/media/dvb-core/dvb_demux.c +++ b/drivers/media/dvb-core/dvb_demux.c @@ -55,6 +55,17 @@ MODULE_PARM_DESC(dvb_demux_feed_err_pkts,  		dprintk(x);				\  } while (0) +#ifdef CONFIG_DVB_DEMUX_SECTION_LOSS_LOG +#  define dprintk_sect_loss(x...) dprintk(x) +#else +#  define dprintk_sect_loss(x...) +#endif + +#define set_buf_flags(__feed, __flag)			\ +	do {						\ +		(__feed)->buffer_flags |= (__flag);	\ +	} while (0) +  /******************************************************************************   * static inlined helper functions   ******************************************************************************/ @@ -104,31 +115,30 @@ static inline int dvb_dmx_swfilter_payload(struct dvb_demux_feed *feed,  {  	int count = payload(buf);  	int p; -#ifdef CONFIG_DVB_DEMUX_SECTION_LOSS_LOG  	int ccok;  	u8 cc; -#endif  	if (count == 0)  		return -1;  	p = 188 - count; -#ifdef CONFIG_DVB_DEMUX_SECTION_LOSS_LOG  	cc = buf[3] & 0x0f;  	ccok = ((feed->cc + 1) & 0x0f) == cc;  	feed->cc = cc; -	if (!ccok) -		dprintk("missed packet: %d instead of %d!\n", -			cc, (feed->cc + 1) & 0x0f); -#endif +	if (!ccok) { +		set_buf_flags(feed, DMX_BUFFER_FLAG_DISCONTINUITY_DETECTED); +		dprintk_sect_loss("missed packet: %d instead of %d!\n", +				  cc, (feed->cc + 1) & 0x0f); +	}  	if (buf[1] & 0x40)	// PUSI ?  		feed->peslen = 0xfffa;  	feed->peslen += count; -	return feed->cb.ts(&buf[p], count, NULL, 0, &feed->feed.ts); +	return feed->cb.ts(&buf[p], count, NULL, 0, &feed->feed.ts, +			   &feed->buffer_flags);  }  static int dvb_dmx_swfilter_sectionfilter(struct dvb_demux_feed *feed, @@ -150,7 +160,7 @@ static int dvb_dmx_swfilter_sectionfilter(struct dvb_demux_feed *feed,  		return 0;  	return feed->cb.sec(feed->feed.sec.secbuf, feed->feed.sec.seclen, -			    NULL, 0, &f->filter); +			    NULL, 0, &f->filter, &feed->buffer_flags);  }  static inline int dvb_dmx_swfilter_section_feed(struct dvb_demux_feed *feed) @@ -169,8 +179,10 @@ static inline int dvb_dmx_swfilter_section_feed(struct dvb_demux_feed *feed)  	if (sec->check_crc) {  		section_syntax_indicator = ((sec->secbuf[1] & 0x80) != 0);  		if (section_syntax_indicator && -		    demux->check_crc32(feed, sec->secbuf, sec->seclen)) +		    demux->check_crc32(feed, sec->secbuf, sec->seclen)) { +			set_buf_flags(feed, DMX_BUFFER_FLAG_HAD_CRC32_DISCARD);  			return -1; +		}  	}  	do { @@ -187,7 +199,6 @@ static void dvb_dmx_swfilter_section_new(struct dvb_demux_feed *feed)  {  	struct dmx_section_feed *sec = &feed->feed.sec; -#ifdef CONFIG_DVB_DEMUX_SECTION_LOSS_LOG  	if (sec->secbufp < sec->tsfeedp) {  		int n = sec->tsfeedp - sec->secbufp; @@ -197,12 +208,13 @@ static void dvb_dmx_swfilter_section_new(struct dvb_demux_feed *feed)  		 * but just first and last.  		 */  		if (sec->secbuf[0] != 0xff || sec->secbuf[n - 1] != 0xff) { -			dprintk("section ts padding loss: %d/%d\n", -			       n, sec->tsfeedp); -			dprintk("pad data: %*ph\n", n, sec->secbuf); +			set_buf_flags(feed, +				      DMX_BUFFER_FLAG_DISCONTINUITY_DETECTED); +			dprintk_sect_loss("section ts padding loss: %d/%d\n", +					  n, sec->tsfeedp); +			dprintk_sect_loss("pad data: %*ph\n", n, sec->secbuf);  		}  	} -#endif  	sec->tsfeedp = sec->secbufp = sec->seclen = 0;  	sec->secbuf = sec->secbuf_base; @@ -237,11 +249,10 @@ static int dvb_dmx_swfilter_section_copy_dump(struct dvb_demux_feed *feed,  		return 0;  	if (sec->tsfeedp + len > DMX_MAX_SECFEED_SIZE) { -#ifdef CONFIG_DVB_DEMUX_SECTION_LOSS_LOG -		dprintk("section buffer full loss: %d/%d\n", -			sec->tsfeedp + len - DMX_MAX_SECFEED_SIZE, -			DMX_MAX_SECFEED_SIZE); -#endif +		set_buf_flags(feed, DMX_BUFFER_FLAG_DISCONTINUITY_DETECTED); +		dprintk_sect_loss("section buffer full loss: %d/%d\n", +				  sec->tsfeedp + len - DMX_MAX_SECFEED_SIZE, +				  DMX_MAX_SECFEED_SIZE);  		len = DMX_MAX_SECFEED_SIZE - sec->tsfeedp;  	} @@ -269,12 +280,13 @@ static int dvb_dmx_swfilter_section_copy_dump(struct dvb_demux_feed *feed,  		sec->seclen = seclen;  		sec->crc_val = ~0;  		/* dump [secbuf .. secbuf+seclen) */ -		if (feed->pusi_seen) +		if (feed->pusi_seen) {  			dvb_dmx_swfilter_section_feed(feed); -#ifdef CONFIG_DVB_DEMUX_SECTION_LOSS_LOG -		else -			dprintk("pusi not seen, discarding section data\n"); -#endif +		} else { +			set_buf_flags(feed, +				      DMX_BUFFER_FLAG_DISCONTINUITY_DETECTED); +			dprintk_sect_loss("pusi not seen, discarding section data\n"); +		}  		sec->secbufp += seclen;	/* secbufp and secbuf moving together is */  		sec->secbuf += seclen;	/* redundant but saves pointer arithmetic */  	} @@ -307,18 +319,22 @@ static int dvb_dmx_swfilter_section_packet(struct dvb_demux_feed *feed,  	}  	if (!ccok || dc_i) { -#ifdef CONFIG_DVB_DEMUX_SECTION_LOSS_LOG -		if (dc_i) -			dprintk("%d frame with disconnect indicator\n", +		if (dc_i) { +			set_buf_flags(feed, +				      DMX_BUFFER_FLAG_DISCONTINUITY_INDICATOR); +			dprintk_sect_loss("%d frame with disconnect indicator\n",  				cc); -		else -			dprintk("discontinuity: %d instead of %d. %d bytes lost\n", +		} else { +			set_buf_flags(feed, +				      DMX_BUFFER_FLAG_DISCONTINUITY_DETECTED); +			dprintk_sect_loss("discontinuity: %d instead of %d. %d bytes lost\n",  				cc, (feed->cc + 1) & 0x0f, count + 4); +		}  		/* -		 * those bytes under sume circumstances will again be reported +		 * those bytes under some circumstances will again be reported  		 * in the following dvb_dmx_swfilter_section_new  		 */ -#endif +  		/*  		 * Discontinuity detected. Reset pusi_seen to  		 * stop feeding of suspicious data until next PUSI=1 arrives @@ -326,6 +342,7 @@ static int dvb_dmx_swfilter_section_packet(struct dvb_demux_feed *feed,  		 * FIXME: does it make sense if the MPEG-TS is the one  		 *	reporting discontinuity?  		 */ +  		feed->pusi_seen = false;  		dvb_dmx_swfilter_section_new(feed);  	} @@ -345,11 +362,11 @@ static int dvb_dmx_swfilter_section_packet(struct dvb_demux_feed *feed,  			dvb_dmx_swfilter_section_new(feed);  			dvb_dmx_swfilter_section_copy_dump(feed, after,  							   after_len); +		} else if (count > 0) { +			set_buf_flags(feed, +				      DMX_BUFFER_FLAG_DISCONTINUITY_DETECTED); +			dprintk_sect_loss("PUSI=1 but %d bytes lost\n", count);  		} -#ifdef CONFIG_DVB_DEMUX_SECTION_LOSS_LOG -		else if (count > 0) -			dprintk("PUSI=1 but %d bytes lost\n", count); -#endif  	} else {  		/* PUSI=0 (is not set), no section boundary */  		dvb_dmx_swfilter_section_copy_dump(feed, &buf[p], count); @@ -369,7 +386,8 @@ static inline void dvb_dmx_swfilter_packet_type(struct dvb_demux_feed *feed,  			if (feed->ts_type & TS_PAYLOAD_ONLY)  				dvb_dmx_swfilter_payload(feed, buf);  			else -				feed->cb.ts(buf, 188, NULL, 0, &feed->feed.ts); +				feed->cb.ts(buf, 188, NULL, 0, &feed->feed.ts, +					    &feed->buffer_flags);  		}  		/* Used only on full-featured devices */  		if (feed->ts_type & TS_DECODER) @@ -430,6 +448,11 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf)  	}  	if (buf[1] & 0x80) { +		list_for_each_entry(feed, &demux->feed_list, list_head) { +			if ((feed->pid != pid) && (feed->pid != 0x2000)) +				continue; +			set_buf_flags(feed, DMX_BUFFER_FLAG_TEI); +		}  		dprintk_tscheck("TEI detected. PID=0x%x data1=0x%x\n",  				pid, buf[1]);  		/* data in this packet can't be trusted - drop it unless @@ -445,6 +468,13 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf)  						(demux->cnt_storage[pid] + 1) & 0xf;  				if ((buf[3] & 0xf) != demux->cnt_storage[pid]) { +					list_for_each_entry(feed, &demux->feed_list, list_head) { +						if ((feed->pid != pid) && (feed->pid != 0x2000)) +							continue; +						set_buf_flags(feed, +							      DMX_BUFFER_PKT_COUNTER_MISMATCH); +					} +  					dprintk_tscheck("TS packet counter mismatch. PID=0x%x expected 0x%x got 0x%x\n",  							pid, demux->cnt_storage[pid],  							buf[3] & 0xf); @@ -466,7 +496,8 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf)  		if (feed->pid == pid)  			dvb_dmx_swfilter_packet_type(feed, buf);  		else if (feed->pid == 0x2000) -			feed->cb.ts(buf, 188, NULL, 0, &feed->feed.ts); +			feed->cb.ts(buf, 188, NULL, 0, &feed->feed.ts, +				    &feed->buffer_flags);  	}  } @@ -585,7 +616,8 @@ void dvb_dmx_swfilter_raw(struct dvb_demux *demux, const u8 *buf, size_t count)  	spin_lock_irqsave(&demux->lock, flags); -	demux->feed->cb.ts(buf, count, NULL, 0, &demux->feed->feed.ts); +	demux->feed->cb.ts(buf, count, NULL, 0, &demux->feed->feed.ts, +			   &demux->feed->buffer_flags);  	spin_unlock_irqrestore(&demux->lock, flags);  } @@ -785,6 +817,7 @@ static int dvbdmx_allocate_ts_feed(struct dmx_demux *dmx,  	feed->demux = demux;  	feed->pid = 0xffff;  	feed->peslen = 0xfffa; +	feed->buffer_flags = 0;  	(*ts_feed) = &feed->feed.ts;  	(*ts_feed)->parent = dmx; @@ -1042,6 +1075,7 @@ static int dvbdmx_allocate_section_feed(struct dmx_demux *demux,  	dvbdmxfeed->cb.sec = callback;  	dvbdmxfeed->demux = dvbdmx;  	dvbdmxfeed->pid = 0xffff; +	dvbdmxfeed->buffer_flags = 0;  	dvbdmxfeed->feed.sec.secbuf = dvbdmxfeed->feed.sec.secbuf_base;  	dvbdmxfeed->feed.sec.secbufp = dvbdmxfeed->feed.sec.seclen = 0;  	dvbdmxfeed->feed.sec.tsfeedp = 0; diff --git a/drivers/media/dvb-core/dvb_net.c b/drivers/media/dvb-core/dvb_net.c index b6c7eec863b9..ba39f9942e1d 100644 --- a/drivers/media/dvb-core/dvb_net.c +++ b/drivers/media/dvb-core/dvb_net.c @@ -883,7 +883,8 @@ static void dvb_net_ule(struct net_device *dev, const u8 *buf, size_t buf_len)  static int dvb_net_ts_callback(const u8 *buffer1, size_t buffer1_len,  			       const u8 *buffer2, size_t buffer2_len, -			       struct dmx_ts_feed *feed) +			       struct dmx_ts_feed *feed, +			       u32 *buffer_flags)  {  	struct net_device *dev = feed->priv; @@ -992,7 +993,7 @@ static void dvb_net_sec(struct net_device *dev,  static int dvb_net_sec_callback(const u8 *buffer1, size_t buffer1_len,  		 const u8 *buffer2, size_t buffer2_len, -		 struct dmx_section_filter *filter) +		 struct dmx_section_filter *filter, u32 *buffer_flags)  {  	struct net_device *dev = filter->priv; diff --git a/drivers/media/dvb-core/dvb_vb2.c b/drivers/media/dvb-core/dvb_vb2.c index 763145d74e83..b811adf88afa 100644 --- a/drivers/media/dvb-core/dvb_vb2.c +++ b/drivers/media/dvb-core/dvb_vb2.c @@ -256,7 +256,8 @@ int dvb_vb2_is_streaming(struct dvb_vb2_ctx *ctx)  }  int dvb_vb2_fill_buffer(struct dvb_vb2_ctx *ctx, -			const unsigned char *src, int len) +			const unsigned char *src, int len, +			enum dmx_buffer_flags *buffer_flags)  {  	unsigned long flags = 0;  	void *vbuf = NULL; @@ -264,15 +265,17 @@ int dvb_vb2_fill_buffer(struct dvb_vb2_ctx *ctx,  	unsigned char *psrc = (unsigned char *)src;  	int ll = 0; -	dprintk(3, "[%s] %d bytes are rcvd\n", ctx->name, len); -	if (!src) { -		dprintk(3, "[%s]:NULL pointer src\n", ctx->name); -		/**normal case: This func is called twice from demux driver -		 * once with valid src pointer, second time with NULL pointer -		 */ +	/* +	 * normal case: This func is called twice from demux driver +	 * one with valid src pointer, second time with NULL pointer +	 */ +	if (!src || !len)  		return 0; -	}  	spin_lock_irqsave(&ctx->slock, flags); +	if (buffer_flags && *buffer_flags) { +		ctx->flags |= *buffer_flags; +		*buffer_flags = 0; +	}  	while (todo) {  		if (!ctx->buf) {  			if (list_empty(&ctx->dvb_q)) { @@ -395,6 +398,7 @@ int dvb_vb2_qbuf(struct dvb_vb2_ctx *ctx, struct dmx_buffer *b)  int dvb_vb2_dqbuf(struct dvb_vb2_ctx *ctx, struct dmx_buffer *b)  { +	unsigned long flags;  	int ret;  	ret = vb2_core_dqbuf(&ctx->vb_q, &b->index, b, ctx->nonblocking); @@ -402,7 +406,16 @@ int dvb_vb2_dqbuf(struct dvb_vb2_ctx *ctx, struct dmx_buffer *b)  		dprintk(1, "[%s] errno=%d\n", ctx->name, ret);  		return ret;  	} -	dprintk(5, "[%s] index=%d\n", ctx->name, b->index); + +	spin_lock_irqsave(&ctx->slock, flags); +	b->count = ctx->count++; +	b->flags = ctx->flags; +	ctx->flags = 0; +	spin_unlock_irqrestore(&ctx->slock, flags); + +	dprintk(5, "[%s] index=%d, count=%d, flags=%d\n", +		ctx->name, b->index, ctx->count, b->flags); +  	return 0;  } diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c index 50bce68ffd66..65d157fe76d1 100644 --- a/drivers/media/dvb-frontends/m88ds3103.c +++ b/drivers/media/dvb-frontends/m88ds3103.c @@ -1262,11 +1262,12 @@ static int m88ds3103_select(struct i2c_mux_core *muxc, u32 chan)   * New users must use I2C client binding directly!   */  struct dvb_frontend *m88ds3103_attach(const struct m88ds3103_config *cfg, -		struct i2c_adapter *i2c, struct i2c_adapter **tuner_i2c_adapter) +				      struct i2c_adapter *i2c, +				      struct i2c_adapter **tuner_i2c_adapter)  {  	struct i2c_client *client;  	struct i2c_board_info board_info; -	struct m88ds3103_platform_data pdata; +	struct m88ds3103_platform_data pdata = {};  	pdata.clk = cfg->clock;  	pdata.i2c_wr_max = cfg->i2c_wr_max; @@ -1409,6 +1410,8 @@ static int m88ds3103_probe(struct i2c_client *client,  	case M88DS3103_CHIP_ID:  		break;  	default: +		ret = -ENODEV; +		dev_err(&client->dev, "Unknown device. Chip_id=%02x\n", dev->chip_id);  		goto err_kfree;  	} diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c index 3c1851984b90..2476d812f669 100644 --- a/drivers/media/i2c/tvp5150.c +++ b/drivers/media/i2c/tvp5150.c @@ -505,80 +505,77 @@ static struct i2c_vbi_ram_value vbi_ram_default[] =  	/* FIXME: Current api doesn't handle all VBI types, those not  	   yet supported are placed under #if 0 */  #if 0 -	{0x010, /* Teletext, SECAM, WST System A */ +	[0] = {0x010, /* Teletext, SECAM, WST System A */  		{V4L2_SLICED_TELETEXT_SECAM,6,23,1},  		{ 0xaa, 0xaa, 0xff, 0xff, 0xe7, 0x2e, 0x20, 0x26,  		  0xe6, 0xb4, 0x0e, 0x00, 0x00, 0x00, 0x10, 0x00 }  	},  #endif -	{0x030, /* Teletext, PAL, WST System B */ +	[1] = {0x030, /* Teletext, PAL, WST System B */  		{V4L2_SLICED_TELETEXT_B,6,22,1},  		{ 0xaa, 0xaa, 0xff, 0xff, 0x27, 0x2e, 0x20, 0x2b,  		  0xa6, 0x72, 0x10, 0x00, 0x00, 0x00, 0x10, 0x00 }  	},  #if 0 -	{0x050, /* Teletext, PAL, WST System C */ +	[2] = {0x050, /* Teletext, PAL, WST System C */  		{V4L2_SLICED_TELETEXT_PAL_C,6,22,1},  		{ 0xaa, 0xaa, 0xff, 0xff, 0xe7, 0x2e, 0x20, 0x22,  		  0xa6, 0x98, 0x0d, 0x00, 0x00, 0x00, 0x10, 0x00 }  	}, -	{0x070, /* Teletext, NTSC, WST System B */ +	[3] = {0x070, /* Teletext, NTSC, WST System B */  		{V4L2_SLICED_TELETEXT_NTSC_B,10,21,1},  		{ 0xaa, 0xaa, 0xff, 0xff, 0x27, 0x2e, 0x20, 0x23,  		  0x69, 0x93, 0x0d, 0x00, 0x00, 0x00, 0x10, 0x00 }  	}, -	{0x090, /* Tetetext, NTSC NABTS System C */ +	[4] = {0x090, /* Tetetext, NTSC NABTS System C */  		{V4L2_SLICED_TELETEXT_NTSC_C,10,21,1},  		{ 0xaa, 0xaa, 0xff, 0xff, 0xe7, 0x2e, 0x20, 0x22,  		  0x69, 0x93, 0x0d, 0x00, 0x00, 0x00, 0x15, 0x00 }  	}, -	{0x0b0, /* Teletext, NTSC-J, NABTS System D */ +	[5] = {0x0b0, /* Teletext, NTSC-J, NABTS System D */  		{V4L2_SLICED_TELETEXT_NTSC_D,10,21,1},  		{ 0xaa, 0xaa, 0xff, 0xff, 0xa7, 0x2e, 0x20, 0x23,  		  0x69, 0x93, 0x0d, 0x00, 0x00, 0x00, 0x10, 0x00 }  	}, -	{0x0d0, /* Closed Caption, PAL/SECAM */ +	[6] = {0x0d0, /* Closed Caption, PAL/SECAM */  		{V4L2_SLICED_CAPTION_625,22,22,1},  		{ 0xaa, 0x2a, 0xff, 0x3f, 0x04, 0x51, 0x6e, 0x02,  		  0xa6, 0x7b, 0x09, 0x00, 0x00, 0x00, 0x27, 0x00 }  	},  #endif -	{0x0f0, /* Closed Caption, NTSC */ +	[7] = {0x0f0, /* Closed Caption, NTSC */  		{V4L2_SLICED_CAPTION_525,21,21,1},  		{ 0xaa, 0x2a, 0xff, 0x3f, 0x04, 0x51, 0x6e, 0x02,  		  0x69, 0x8c, 0x09, 0x00, 0x00, 0x00, 0x27, 0x00 }  	}, -	{0x110, /* Wide Screen Signal, PAL/SECAM */ +	[8] = {0x110, /* Wide Screen Signal, PAL/SECAM */  		{V4L2_SLICED_WSS_625,23,23,1},  		{ 0x5b, 0x55, 0xc5, 0xff, 0x00, 0x71, 0x6e, 0x42,  		  0xa6, 0xcd, 0x0f, 0x00, 0x00, 0x00, 0x3a, 0x00 }  	},  #if 0 -	{0x130, /* Wide Screen Signal, NTSC C */ +	[9] = {0x130, /* Wide Screen Signal, NTSC C */  		{V4L2_SLICED_WSS_525,20,20,1},  		{ 0x38, 0x00, 0x3f, 0x00, 0x00, 0x71, 0x6e, 0x43,  		  0x69, 0x7c, 0x08, 0x00, 0x00, 0x00, 0x39, 0x00 }  	}, -	{0x150, /* Vertical Interval Timecode (VITC), PAL/SECAM */ +	[10] = {0x150, /* Vertical Interval Timecode (VITC), PAL/SECAM */  		{V4l2_SLICED_VITC_625,6,22,0},  		{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x8f, 0x6d, 0x49,  		  0xa6, 0x85, 0x08, 0x00, 0x00, 0x00, 0x4c, 0x00 }  	}, -	{0x170, /* Vertical Interval Timecode (VITC), NTSC */ +	[11] = {0x170, /* Vertical Interval Timecode (VITC), NTSC */  		{V4l2_SLICED_VITC_525,10,20,0},  		{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x8f, 0x6d, 0x49,  		  0x69, 0x94, 0x08, 0x00, 0x00, 0x00, 0x4c, 0x00 }  	},  #endif -	{0x190, /* Video Program System (VPS), PAL */ +	[12] = {0x190, /* Video Program System (VPS), PAL */  		{V4L2_SLICED_VPS,16,16,0},  		{ 0xaa, 0xaa, 0xff, 0xff, 0xba, 0xce, 0x2b, 0x0d,  		  0xa6, 0xda, 0x0b, 0x00, 0x00, 0x00, 0x60, 0x00 }  	},  	/* 0x1d0 User programmable */ - -	/* End of struct */ -	{ (u16)-1 }  };  static int tvp5150_write_inittab(struct v4l2_subdev *sd, @@ -591,10 +588,10 @@ static int tvp5150_write_inittab(struct v4l2_subdev *sd,  	return 0;  } -static int tvp5150_vdp_init(struct v4l2_subdev *sd, -				const struct i2c_vbi_ram_value *regs) +static int tvp5150_vdp_init(struct v4l2_subdev *sd)  {  	unsigned int i; +	int j;  	/* Disable Full Field */  	tvp5150_write(sd, TVP5150_FULL_FIELD_ENA, 0); @@ -604,14 +601,17 @@ static int tvp5150_vdp_init(struct v4l2_subdev *sd,  		tvp5150_write(sd, i, 0xff);  	/* Load Ram Table */ -	while (regs->reg != (u16)-1) { +	for (j = 0; j < ARRAY_SIZE(vbi_ram_default); j++) { +		const struct i2c_vbi_ram_value *regs = &vbi_ram_default[j]; + +		if (!regs->type.vbi_type) +			continue; +  		tvp5150_write(sd, TVP5150_CONF_RAM_ADDR_HIGH, regs->reg >> 8);  		tvp5150_write(sd, TVP5150_CONF_RAM_ADDR_LOW, regs->reg);  		for (i = 0; i < 16; i++)  			tvp5150_write(sd, TVP5150_VDP_CONF_RAM_DATA, regs->values[i]); - -		regs++;  	}  	return 0;  } @@ -620,19 +620,23 @@ static int tvp5150_vdp_init(struct v4l2_subdev *sd,  static int tvp5150_g_sliced_vbi_cap(struct v4l2_subdev *sd,  				struct v4l2_sliced_vbi_cap *cap)  { -	const struct i2c_vbi_ram_value *regs = vbi_ram_default; -	int line; +	int line, i;  	dev_dbg_lvl(sd->dev, 1, debug, "g_sliced_vbi_cap\n");  	memset(cap, 0, sizeof *cap); -	while (regs->reg != (u16)-1 ) { -		for (line=regs->type.ini_line;line<=regs->type.end_line;line++) { +	for (i = 0; i < ARRAY_SIZE(vbi_ram_default); i++) { +		const struct i2c_vbi_ram_value *regs = &vbi_ram_default[i]; + +		if (!regs->type.vbi_type) +			continue; + +		for (line = regs->type.ini_line; +		     line <= regs->type.end_line; +		     line++) {  			cap->service_lines[0][line] |= regs->type.vbi_type;  		}  		cap->service_set |= regs->type.vbi_type; - -		regs++;  	}  	return 0;  } @@ -651,14 +655,13 @@ static int tvp5150_g_sliced_vbi_cap(struct v4l2_subdev *sd,   *	MSB = field2   */  static int tvp5150_set_vbi(struct v4l2_subdev *sd, -			const struct i2c_vbi_ram_value *regs,  			unsigned int type,u8 flags, int line,  			const int fields)  {  	struct tvp5150 *decoder = to_tvp5150(sd);  	v4l2_std_id std = decoder->norm;  	u8 reg; -	int pos = 0; +	int i, pos = 0;  	if (std == V4L2_STD_ALL) {  		dev_err(sd->dev, "VBI can't be configured without knowing number of lines\n"); @@ -671,19 +674,19 @@ static int tvp5150_set_vbi(struct v4l2_subdev *sd,  	if (line < 6 || line > 27)  		return 0; -	while (regs->reg != (u16)-1) { +	for (i = 0; i < ARRAY_SIZE(vbi_ram_default); i++) { +		const struct i2c_vbi_ram_value *regs =  &vbi_ram_default[i]; + +		if (!regs->type.vbi_type) +			continue; +  		if ((type & regs->type.vbi_type) &&  		    (line >= regs->type.ini_line) &&  		    (line <= regs->type.end_line))  			break; - -		regs++;  		pos++;  	} -	if (regs->reg == (u16)-1) -		return 0; -  	type = pos | (flags & 0xf0);  	reg = ((line - 6) << 1) + TVP5150_LINE_MODE_INI; @@ -696,8 +699,7 @@ static int tvp5150_set_vbi(struct v4l2_subdev *sd,  	return type;  } -static int tvp5150_get_vbi(struct v4l2_subdev *sd, -			const struct i2c_vbi_ram_value *regs, int line) +static int tvp5150_get_vbi(struct v4l2_subdev *sd, int line)  {  	struct tvp5150 *decoder = to_tvp5150(sd);  	v4l2_std_id std = decoder->norm; @@ -726,8 +728,8 @@ static int tvp5150_get_vbi(struct v4l2_subdev *sd,  			return 0;  		}  		pos = ret & 0x0f; -		if (pos < 0x0f) -			type |= regs[pos].type.vbi_type; +		if (pos < ARRAY_SIZE(vbi_ram_default)) +			type |= vbi_ram_default[pos].type.vbi_type;  	}  	return type; @@ -788,7 +790,7 @@ static int tvp5150_reset(struct v4l2_subdev *sd, u32 val)  	tvp5150_write_inittab(sd, tvp5150_init_default);  	/* Initializes VDP registers */ -	tvp5150_vdp_init(sd, vbi_ram_default); +	tvp5150_vdp_init(sd);  	/* Selects decoder input */  	tvp5150_selmux(sd); @@ -1121,8 +1123,8 @@ static int tvp5150_s_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_f  		for (i = 0; i <= 23; i++) {  			svbi->service_lines[1][i] = 0;  			svbi->service_lines[0][i] = -				tvp5150_set_vbi(sd, vbi_ram_default, -				       svbi->service_lines[0][i], 0xf0, i, 3); +				tvp5150_set_vbi(sd, svbi->service_lines[0][i], +						0xf0, i, 3);  		}  		/* Enables FIFO */  		tvp5150_write(sd, TVP5150_FIFO_OUT_CTRL, 1); @@ -1148,7 +1150,7 @@ static int tvp5150_g_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_f  	for (i = 0; i <= 23; i++) {  		svbi->service_lines[0][i] = -			tvp5150_get_vbi(sd, vbi_ram_default, i); +			tvp5150_get_vbi(sd, i);  		mask |= svbi->service_lines[0][i];  	}  	svbi->service_set = mask; diff --git a/drivers/media/pci/ttpci/av7110.c b/drivers/media/pci/ttpci/av7110.c index dc8e577b2f74..d6816effb878 100644 --- a/drivers/media/pci/ttpci/av7110.c +++ b/drivers/media/pci/ttpci/av7110.c @@ -324,14 +324,15 @@ static int DvbDmxFilterCallback(u8 *buffer1, size_t buffer1_len,  		}  		return dvbdmxfilter->feed->cb.sec(buffer1, buffer1_len,  						  buffer2, buffer2_len, -						  &dvbdmxfilter->filter); +						  &dvbdmxfilter->filter, NULL);  	case DMX_TYPE_TS:  		if (!(dvbdmxfilter->feed->ts_type & TS_PACKET))  			return 0;  		if (dvbdmxfilter->feed->ts_type & TS_PAYLOAD_ONLY)  			return dvbdmxfilter->feed->cb.ts(buffer1, buffer1_len,  							 buffer2, buffer2_len, -							 &dvbdmxfilter->feed->feed.ts); +							 &dvbdmxfilter->feed->feed.ts, +							 NULL);  		else  			av7110_p2t_write(buffer1, buffer1_len,  					 dvbdmxfilter->feed->pid, diff --git a/drivers/media/pci/ttpci/av7110_av.c b/drivers/media/pci/ttpci/av7110_av.c index 4daba76ec240..ef1bc17cdc4d 100644 --- a/drivers/media/pci/ttpci/av7110_av.c +++ b/drivers/media/pci/ttpci/av7110_av.c @@ -99,7 +99,7 @@ int av7110_record_cb(struct dvb_filter_pes2ts *p2t, u8 *buf, size_t len)  		buf[4] = buf[5] = 0;  	if (dvbdmxfeed->ts_type & TS_PAYLOAD_ONLY)  		return dvbdmxfeed->cb.ts(buf, len, NULL, 0, -					 &dvbdmxfeed->feed.ts); +					 &dvbdmxfeed->feed.ts, NULL);  	else  		return dvb_filter_pes2ts(p2t, buf, len, 1);  } @@ -109,7 +109,7 @@ static int dvb_filter_pes2ts_cb(void *priv, unsigned char *data)  	struct dvb_demux_feed *dvbdmxfeed = (struct dvb_demux_feed *) priv;  	dvbdmxfeed->cb.ts(data, 188, NULL, 0, -			  &dvbdmxfeed->feed.ts); +			  &dvbdmxfeed->feed.ts, NULL);  	return 0;  } @@ -814,7 +814,7 @@ static void p_to_t(u8 const *buf, long int length, u16 pid, u8 *counter,  			memcpy(obuf + l, buf + c, TS_SIZE - l);  			c = length;  		} -		feed->cb.ts(obuf, 188, NULL, 0, &feed->feed.ts); +		feed->cb.ts(obuf, 188, NULL, 0, &feed->feed.ts, NULL);  		pes_start = 0;  	}  } diff --git a/drivers/media/platform/tegra-cec/tegra_cec.c b/drivers/media/platform/tegra-cec/tegra_cec.c index 92f93a880015..aba488cd0e64 100644 --- a/drivers/media/platform/tegra-cec/tegra_cec.c +++ b/drivers/media/platform/tegra-cec/tegra_cec.c @@ -172,16 +172,13 @@ static irqreturn_t tegra_cec_irq_handler(int irq, void *data)  		}  	} -	if (status & (TEGRA_CEC_INT_STAT_RX_REGISTER_OVERRUN | -		      TEGRA_CEC_INT_STAT_RX_BUS_ANOMALY_DETECTED | -		      TEGRA_CEC_INT_STAT_RX_START_BIT_DETECTED | -		      TEGRA_CEC_INT_STAT_RX_BUS_ERROR_DETECTED)) { +	if (status & TEGRA_CEC_INT_STAT_RX_START_BIT_DETECTED) {  		cec_write(cec, TEGRA_CEC_INT_STAT, -			  (TEGRA_CEC_INT_STAT_RX_REGISTER_OVERRUN | -			   TEGRA_CEC_INT_STAT_RX_BUS_ANOMALY_DETECTED | -			   TEGRA_CEC_INT_STAT_RX_START_BIT_DETECTED | -			   TEGRA_CEC_INT_STAT_RX_BUS_ERROR_DETECTED)); -	} else if (status & TEGRA_CEC_INT_STAT_RX_REGISTER_FULL) { +			  TEGRA_CEC_INT_STAT_RX_START_BIT_DETECTED); +		cec->rx_done = false; +		cec->rx_buf_cnt = 0; +	} +	if (status & TEGRA_CEC_INT_STAT_RX_REGISTER_FULL) {  		u32 v;  		cec_write(cec, TEGRA_CEC_INT_STAT, @@ -255,7 +252,7 @@ static int tegra_cec_adap_enable(struct cec_adapter *adap, bool enable)  		  TEGRA_CEC_INT_MASK_TX_BUS_ANOMALY_DETECTED |  		  TEGRA_CEC_INT_MASK_TX_FRAME_TRANSMITTED |  		  TEGRA_CEC_INT_MASK_RX_REGISTER_FULL | -		  TEGRA_CEC_INT_MASK_RX_REGISTER_OVERRUN); +		  TEGRA_CEC_INT_MASK_RX_START_BIT_DETECTED);  	cec_write(cec, TEGRA_CEC_HW_CONTROL, TEGRA_CEC_HWCTRL_TX_RX_MODE);  	return 0; diff --git a/drivers/media/usb/au0828/Kconfig b/drivers/media/usb/au0828/Kconfig index 70521e0b4c53..bfaa806633df 100644 --- a/drivers/media/usb/au0828/Kconfig +++ b/drivers/media/usb/au0828/Kconfig @@ -1,7 +1,7 @@  config VIDEO_AU0828  	tristate "Auvitek AU0828 support" -	depends on I2C && INPUT && DVB_CORE && USB +	depends on I2C && INPUT && DVB_CORE && USB && VIDEO_V4L2  	select I2C_ALGOBIT  	select VIDEO_TVEEPROM  	select VIDEOBUF2_VMALLOC diff --git a/drivers/media/usb/ttusb-dec/ttusb_dec.c b/drivers/media/usb/ttusb-dec/ttusb_dec.c index a8900f5571f7..44ca66cb9b8f 100644 --- a/drivers/media/usb/ttusb-dec/ttusb_dec.c +++ b/drivers/media/usb/ttusb-dec/ttusb_dec.c @@ -428,7 +428,7 @@ static int ttusb_dec_audio_pes2ts_cb(void *priv, unsigned char *data)  	struct ttusb_dec *dec = priv;  	dec->audio_filter->feed->cb.ts(data, 188, NULL, 0, -				       &dec->audio_filter->feed->feed.ts); +				       &dec->audio_filter->feed->feed.ts, NULL);  	return 0;  } @@ -438,7 +438,7 @@ static int ttusb_dec_video_pes2ts_cb(void *priv, unsigned char *data)  	struct ttusb_dec *dec = priv;  	dec->video_filter->feed->cb.ts(data, 188, NULL, 0, -				       &dec->video_filter->feed->feed.ts); +				       &dec->video_filter->feed->feed.ts, NULL);  	return 0;  } @@ -490,7 +490,7 @@ static void ttusb_dec_process_pva(struct ttusb_dec *dec, u8 *pva, int length)  		if (output_pva) {  			dec->video_filter->feed->cb.ts(pva, length, NULL, 0, -				&dec->video_filter->feed->feed.ts); +				&dec->video_filter->feed->feed.ts, NULL);  			return;  		} @@ -551,7 +551,7 @@ static void ttusb_dec_process_pva(struct ttusb_dec *dec, u8 *pva, int length)  	case 0x02:		/* MainAudioStream */  		if (output_pva) {  			dec->audio_filter->feed->cb.ts(pva, length, NULL, 0, -				&dec->audio_filter->feed->feed.ts); +				&dec->audio_filter->feed->feed.ts, NULL);  			return;  		} @@ -589,7 +589,7 @@ static void ttusb_dec_process_filter(struct ttusb_dec *dec, u8 *packet,  	if (filter)  		filter->feed->cb.sec(&packet[2], length - 2, NULL, 0, -				     &filter->filter); +				     &filter->filter, NULL);  }  static void ttusb_dec_process_packet(struct ttusb_dec *dec) diff --git a/drivers/media/v4l2-core/Kconfig b/drivers/media/v4l2-core/Kconfig index bf52fbd07aed..8e37e7c5e0f7 100644 --- a/drivers/media/v4l2-core/Kconfig +++ b/drivers/media/v4l2-core/Kconfig @@ -7,6 +7,7 @@ config VIDEO_V4L2  	tristate  	depends on (I2C || I2C=n) && VIDEO_DEV  	select RATIONAL +	select VIDEOBUF2_V4L2 if VIDEOBUF2_CORE  	default (I2C || I2C=n) && VIDEO_DEV  config VIDEO_ADV_DEBUG diff --git a/drivers/media/v4l2-core/Makefile b/drivers/media/v4l2-core/Makefile index 80de2cb9c476..7df54582e956 100644 --- a/drivers/media/v4l2-core/Makefile +++ b/drivers/media/v4l2-core/Makefile @@ -13,7 +13,7 @@ ifeq ($(CONFIG_COMPAT),y)  endif  obj-$(CONFIG_V4L2_FWNODE) += v4l2-fwnode.o  ifeq ($(CONFIG_TRACEPOINTS),y) -  videodev-objs += vb2-trace.o v4l2-trace.o +  videodev-objs += v4l2-trace.o  endif  videodev-$(CONFIG_MEDIA_CONTROLLER) += v4l2-mc.o @@ -35,4 +35,3 @@ obj-$(CONFIG_VIDEOBUF_DVB) += videobuf-dvb.o  ccflags-y += -I$(srctree)/drivers/media/dvb-frontends  ccflags-y += -I$(srctree)/drivers/media/tuners - diff --git a/drivers/memory/brcmstb_dpfe.c b/drivers/memory/brcmstb_dpfe.c index 0a7bdbed3a6f..e9c1485c32b9 100644 --- a/drivers/memory/brcmstb_dpfe.c +++ b/drivers/memory/brcmstb_dpfe.c @@ -45,8 +45,16 @@  #define REG_TO_DCPU_MBOX	0x10  #define REG_TO_HOST_MBOX	0x14 +/* Macros to process offsets returned by the DCPU */ +#define DRAM_MSG_ADDR_OFFSET	0x0 +#define DRAM_MSG_TYPE_OFFSET	0x1c +#define DRAM_MSG_ADDR_MASK	((1UL << DRAM_MSG_TYPE_OFFSET) - 1) +#define DRAM_MSG_TYPE_MASK	((1UL << \ +				 (BITS_PER_LONG - DRAM_MSG_TYPE_OFFSET)) - 1) +  /* Message RAM */ -#define DCPU_MSG_RAM(x)		(0x100 + (x) * sizeof(u32)) +#define DCPU_MSG_RAM_START	0x100 +#define DCPU_MSG_RAM(x)		(DCPU_MSG_RAM_START + (x) * sizeof(u32))  /* DRAM Info Offsets & Masks */  #define DRAM_INFO_INTERVAL	0x0 @@ -255,6 +263,40 @@ static unsigned int get_msg_chksum(const u32 msg[])  	return sum;  } +static void __iomem *get_msg_ptr(struct private_data *priv, u32 response, +				 char *buf, ssize_t *size) +{ +	unsigned int msg_type; +	unsigned int offset; +	void __iomem *ptr = NULL; + +	msg_type = (response >> DRAM_MSG_TYPE_OFFSET) & DRAM_MSG_TYPE_MASK; +	offset = (response >> DRAM_MSG_ADDR_OFFSET) & DRAM_MSG_ADDR_MASK; + +	/* +	 * msg_type == 1: the offset is relative to the message RAM +	 * msg_type == 0: the offset is relative to the data RAM (this is the +	 *                previous way of passing data) +	 * msg_type is anything else: there's critical hardware problem +	 */ +	switch (msg_type) { +	case 1: +		ptr = priv->regs + DCPU_MSG_RAM_START + offset; +		break; +	case 0: +		ptr = priv->dmem + offset; +		break; +	default: +		dev_emerg(priv->dev, "invalid message reply from DCPU: %#x\n", +			response); +		if (buf && size) +			*size = sprintf(buf, +				"FATAL: communication error with DCPU\n"); +	} + +	return ptr; +} +  static int __send_command(struct private_data *priv, unsigned int cmd,  			  u32 result[])  { @@ -507,7 +549,7 @@ static ssize_t show_info(struct device *dev, struct device_attribute *devattr,  {  	u32 response[MSG_FIELD_MAX];  	unsigned int info; -	int ret; +	ssize_t ret;  	ret = generic_show(DPFE_CMD_GET_INFO, response, dev, buf);  	if (ret) @@ -528,18 +570,19 @@ static ssize_t show_refresh(struct device *dev,  	u32 response[MSG_FIELD_MAX];  	void __iomem *info;  	struct private_data *priv; -	unsigned int offset;  	u8 refresh, sr_abort, ppre, thermal_offs, tuf;  	u32 mr4; -	int ret; +	ssize_t ret;  	ret = generic_show(DPFE_CMD_GET_REFRESH, response, dev, buf);  	if (ret)  		return ret;  	priv = dev_get_drvdata(dev); -	offset = response[MSG_ARG0]; -	info = priv->dmem + offset; + +	info = get_msg_ptr(priv, response[MSG_ARG0], buf, &ret); +	if (!info) +		return ret;  	mr4 = readl_relaxed(info + DRAM_INFO_MR4) & DRAM_INFO_MR4_MASK; @@ -561,7 +604,6 @@ static ssize_t store_refresh(struct device *dev, struct device_attribute *attr,  	u32 response[MSG_FIELD_MAX];  	struct private_data *priv;  	void __iomem *info; -	unsigned int offset;  	unsigned long val;  	int ret; @@ -574,8 +616,10 @@ static ssize_t store_refresh(struct device *dev, struct device_attribute *attr,  	if (ret)  		return ret; -	offset = response[MSG_ARG0]; -	info = priv->dmem + offset; +	info = get_msg_ptr(priv, response[MSG_ARG0], NULL, NULL); +	if (!info) +		return -EIO; +  	writel_relaxed(val, info + DRAM_INFO_INTERVAL);  	return count; @@ -587,23 +631,25 @@ static ssize_t show_vendor(struct device *dev, struct device_attribute *devattr,  	u32 response[MSG_FIELD_MAX];  	struct private_data *priv;  	void __iomem *info; -	unsigned int offset; -	int ret; +	ssize_t ret;  	ret = generic_show(DPFE_CMD_GET_VENDOR, response, dev, buf);  	if (ret)  		return ret; -	offset = response[MSG_ARG0];  	priv = dev_get_drvdata(dev); -	info = priv->dmem + offset; + +	info = get_msg_ptr(priv, response[MSG_ARG0], buf, &ret); +	if (!info) +		return ret;  	return sprintf(buf, "%#x %#x %#x %#x %#x\n",  		       readl_relaxed(info + DRAM_VENDOR_MR5) & DRAM_VENDOR_MASK,  		       readl_relaxed(info + DRAM_VENDOR_MR6) & DRAM_VENDOR_MASK,  		       readl_relaxed(info + DRAM_VENDOR_MR7) & DRAM_VENDOR_MASK,  		       readl_relaxed(info + DRAM_VENDOR_MR8) & DRAM_VENDOR_MASK, -		       readl_relaxed(info + DRAM_VENDOR_ERROR)); +		       readl_relaxed(info + DRAM_VENDOR_ERROR) & +				     DRAM_VENDOR_MASK);  }  static int brcmstb_dpfe_resume(struct platform_device *pdev) diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c index 8d12017b9893..4470630dd545 100644 --- a/drivers/message/fusion/mptctl.c +++ b/drivers/message/fusion/mptctl.c @@ -2687,6 +2687,8 @@ mptctl_hp_targetinfo(unsigned long arg)  				__FILE__, __LINE__, iocnum);  		return -ENODEV;  	} +	if (karg.hdr.id >= MPT_MAX_FC_DEVICES) +		return -EINVAL;  	dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_hp_targetinfo called.\n",  	    ioc->name)); diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c index 3e5eabdae8d9..772d02922529 100644 --- a/drivers/misc/mei/bus.c +++ b/drivers/misc/mei/bus.c @@ -548,12 +548,6 @@ int mei_cldev_disable(struct mei_cl_device *cldev)  		goto out;  	} -	if (bus->dev_state == MEI_DEV_POWER_DOWN) { -		dev_dbg(bus->dev, "Device is powering down, don't bother with disconnection\n"); -		err = 0; -		goto out; -	} -  	err = mei_cl_disconnect(cl);  	if (err < 0)  		dev_err(bus->dev, "Could not disconnect from the ME client\n"); diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c index be64969d986a..7e60c1817c31 100644 --- a/drivers/misc/mei/client.c +++ b/drivers/misc/mei/client.c @@ -945,6 +945,12 @@ int mei_cl_disconnect(struct mei_cl *cl)  		return 0;  	} +	if (dev->dev_state == MEI_DEV_POWER_DOWN) { +		cl_dbg(dev, cl, "Device is powering down, don't bother with disconnection\n"); +		mei_cl_set_disconnected(cl); +		return 0; +	} +  	rets = pm_runtime_get(dev->dev);  	if (rets < 0 && rets != -EINPROGRESS) {  		pm_runtime_put_noidle(dev->dev); diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h index 0ccccbaf530d..e4b10b2d1a08 100644 --- a/drivers/misc/mei/hw-me-regs.h +++ b/drivers/misc/mei/hw-me-regs.h @@ -132,6 +132,11 @@  #define MEI_DEV_ID_KBP        0xA2BA  /* Kaby Point */  #define MEI_DEV_ID_KBP_2      0xA2BB  /* Kaby Point 2 */ +#define MEI_DEV_ID_CNP_LP     0x9DE0  /* Cannon Point LP */ +#define MEI_DEV_ID_CNP_LP_4   0x9DE4  /* Cannon Point LP 4 (iTouch) */ +#define MEI_DEV_ID_CNP_H      0xA360  /* Cannon Point H */ +#define MEI_DEV_ID_CNP_H_4    0xA364  /* Cannon Point H 4 (iTouch) */ +  /*   * MEI HW Section   */ diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index 4a0ccda4d04b..ea4e152270a3 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c @@ -98,6 +98,11 @@ static const struct pci_device_id mei_me_pci_tbl[] = {  	{MEI_PCI_DEVICE(MEI_DEV_ID_KBP, MEI_ME_PCH8_CFG)},  	{MEI_PCI_DEVICE(MEI_DEV_ID_KBP_2, MEI_ME_PCH8_CFG)}, +	{MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP, MEI_ME_PCH8_CFG)}, +	{MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP_4, MEI_ME_PCH8_CFG)}, +	{MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH8_CFG)}, +	{MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_4, MEI_ME_PCH8_CFG)}, +  	/* required last entry */  	{0, }  }; diff --git a/drivers/misc/ocxl/file.c b/drivers/misc/ocxl/file.c index d9aa407db06a..038509e5d031 100644 --- a/drivers/misc/ocxl/file.c +++ b/drivers/misc/ocxl/file.c @@ -102,10 +102,32 @@ static long afu_ioctl_attach(struct ocxl_context *ctx,  	return rc;  } +static long afu_ioctl_get_metadata(struct ocxl_context *ctx, +		struct ocxl_ioctl_metadata __user *uarg) +{ +	struct ocxl_ioctl_metadata arg; + +	memset(&arg, 0, sizeof(arg)); + +	arg.version = 0; + +	arg.afu_version_major = ctx->afu->config.version_major; +	arg.afu_version_minor = ctx->afu->config.version_minor; +	arg.pasid = ctx->pasid; +	arg.pp_mmio_size = ctx->afu->config.pp_mmio_stride; +	arg.global_mmio_size = ctx->afu->config.global_mmio_size; + +	if (copy_to_user(uarg, &arg, sizeof(arg))) +		return -EFAULT; + +	return 0; +} +  #define CMD_STR(x) (x == OCXL_IOCTL_ATTACH ? "ATTACH" :			\  			x == OCXL_IOCTL_IRQ_ALLOC ? "IRQ_ALLOC" :	\  			x == OCXL_IOCTL_IRQ_FREE ? "IRQ_FREE" :		\  			x == OCXL_IOCTL_IRQ_SET_FD ? "IRQ_SET_FD" :	\ +			x == OCXL_IOCTL_GET_METADATA ? "GET_METADATA" :	\  			"UNKNOWN")  static long afu_ioctl(struct file *file, unsigned int cmd, @@ -133,8 +155,10 @@ static long afu_ioctl(struct file *file, unsigned int cmd,  		if (!rc) {  			rc = copy_to_user((u64 __user *) args, &irq_offset,  					sizeof(irq_offset)); -			if (rc) +			if (rc) {  				ocxl_afu_irq_free(ctx, irq_offset); +				return -EFAULT; +			}  		}  		break; @@ -157,6 +181,11 @@ static long afu_ioctl(struct file *file, unsigned int cmd,  					irq_fd.eventfd);  		break; +	case OCXL_IOCTL_GET_METADATA: +		rc = afu_ioctl_get_metadata(ctx, +				(struct ocxl_ioctl_metadata __user *) args); +		break; +  	default:  		rc = -EINVAL;  	} @@ -277,7 +306,7 @@ static ssize_t afu_read(struct file *file, char __user *buf, size_t count,  	struct ocxl_context *ctx = file->private_data;  	struct ocxl_kernel_event_header header;  	ssize_t rc; -	size_t used = 0; +	ssize_t used = 0;  	DEFINE_WAIT(event_wait);  	memset(&header, 0, sizeof(header)); @@ -329,7 +358,7 @@ static ssize_t afu_read(struct file *file, char __user *buf, size_t count,  	used += sizeof(header); -	rc = (ssize_t) used; +	rc = used;  	return rc;  } diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 20135a5de748..2cfb963d9f37 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -72,6 +72,7 @@ MODULE_ALIAS("mmc:block");  #define MMC_BLK_TIMEOUT_MS  (10 * 1000)  #define MMC_SANITIZE_REQ_TIMEOUT 240000  #define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16) +#define MMC_EXTRACT_VALUE_FROM_ARG(x) ((x & 0x0000FF00) >> 8)  #define mmc_req_rel_wr(req)	((req->cmd_flags & REQ_FUA) && \  				  (rq_data_dir(req) == WRITE)) @@ -587,6 +588,24 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,  	}  	/* +	 * Make sure the cache of the PARTITION_CONFIG register and +	 * PARTITION_ACCESS bits is updated in case the ioctl ext_csd write +	 * changed it successfully. +	 */ +	if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_PART_CONFIG) && +	    (cmd.opcode == MMC_SWITCH)) { +		struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev); +		u8 value = MMC_EXTRACT_VALUE_FROM_ARG(cmd.arg); + +		/* +		 * Update cache so the next mmc_blk_part_switch call operates +		 * on up-to-date data. +		 */ +		card->ext_csd.part_config = value; +		main_md->part_curr = value & EXT_CSD_PART_CONFIG_ACC_MASK; +	} + +	/*  	 * According to the SD specs, some commands require a delay after  	 * issuing the command.  	 */ diff --git a/drivers/mmc/core/card.h b/drivers/mmc/core/card.h index 79a5b985ccf5..9c821eedd156 100644 --- a/drivers/mmc/core/card.h +++ b/drivers/mmc/core/card.h @@ -82,6 +82,7 @@ struct mmc_fixup {  #define CID_MANFID_APACER       0x27  #define CID_MANFID_KINGSTON     0x70  #define CID_MANFID_HYNIX	0x90 +#define CID_MANFID_NUMONYX	0xFE  #define END_FIXUP { NULL } diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c index 908e4db03535..42d6aa89a48a 100644 --- a/drivers/mmc/core/mmc_ops.c +++ b/drivers/mmc/core/mmc_ops.c @@ -848,7 +848,6 @@ int mmc_interrupt_hpi(struct mmc_card *card)  		return 1;  	} -	mmc_claim_host(card->host);  	err = mmc_send_status(card, &status);  	if (err) {  		pr_err("%s: Get card status fail\n", mmc_hostname(card->host)); @@ -890,7 +889,6 @@ int mmc_interrupt_hpi(struct mmc_card *card)  	} while (!err);  out: -	mmc_release_host(card->host);  	return err;  } @@ -932,9 +930,7 @@ static int mmc_read_bkops_status(struct mmc_card *card)  	int err;  	u8 *ext_csd; -	mmc_claim_host(card->host);  	err = mmc_get_ext_csd(card, &ext_csd); -	mmc_release_host(card->host);  	if (err)  		return err; diff --git a/drivers/mmc/core/quirks.h b/drivers/mmc/core/quirks.h index 75d317623852..5153577754f0 100644 --- a/drivers/mmc/core/quirks.h +++ b/drivers/mmc/core/quirks.h @@ -109,6 +109,12 @@ static const struct mmc_fixup mmc_ext_csd_fixups[] = {  	 */  	MMC_FIXUP_EXT_CSD_REV(CID_NAME_ANY, CID_MANFID_HYNIX,  			      0x014a, add_quirk, MMC_QUIRK_BROKEN_HPI, 5), +	/* +	 * Certain Micron (Numonyx) eMMC 4.5 cards might get broken when HPI +	 * feature is used so disable the HPI feature for such buggy cards. +	 */ +	MMC_FIXUP_EXT_CSD_REV(CID_NAME_ANY, CID_MANFID_NUMONYX, +			      0x014e, add_quirk, MMC_QUIRK_BROKEN_HPI, 6),  	END_FIXUP  }; diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c index 229dc18f0581..768972af8b85 100644 --- a/drivers/mmc/host/bcm2835.c +++ b/drivers/mmc/host/bcm2835.c @@ -1265,7 +1265,8 @@ static int bcm2835_add_host(struct bcm2835_host *host)  	char pio_limit_string[20];  	int ret; -	mmc->f_max = host->max_clk; +	if (!mmc->f_max || mmc->f_max > host->max_clk) +		mmc->f_max = host->max_clk;  	mmc->f_min = host->max_clk / SDCDIV_MAX_CDIV;  	mmc->max_busy_timeout = ~0 / (mmc->f_max / 1000); diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c index 35026795be28..a84aa3f1ae85 100644 --- a/drivers/mmc/host/dw_mmc-exynos.c +++ b/drivers/mmc/host/dw_mmc-exynos.c @@ -165,9 +165,15 @@ static void dw_mci_exynos_set_clksel_timing(struct dw_mci *host, u32 timing)  static int dw_mci_exynos_runtime_resume(struct device *dev)  {  	struct dw_mci *host = dev_get_drvdata(dev); +	int ret; + +	ret = dw_mci_runtime_resume(dev); +	if (ret) +		return ret;  	dw_mci_exynos_config_smu(host); -	return dw_mci_runtime_resume(dev); + +	return ret;  }  /** @@ -487,6 +493,7 @@ static unsigned long exynos_dwmmc_caps[4] = {  static const struct dw_mci_drv_data exynos_drv_data = {  	.caps			= exynos_dwmmc_caps, +	.num_caps		= ARRAY_SIZE(exynos_dwmmc_caps),  	.init			= dw_mci_exynos_priv_init,  	.set_ios		= dw_mci_exynos_set_ios,  	.parse_dt		= dw_mci_exynos_parse_dt, diff --git a/drivers/mmc/host/dw_mmc-k3.c b/drivers/mmc/host/dw_mmc-k3.c index 73fd75c3c824..89cdb3d533bb 100644 --- a/drivers/mmc/host/dw_mmc-k3.c +++ b/drivers/mmc/host/dw_mmc-k3.c @@ -135,6 +135,9 @@ static int dw_mci_hi6220_parse_dt(struct dw_mci *host)  	if (priv->ctrl_id < 0)  		priv->ctrl_id = 0; +	if (priv->ctrl_id >= TIMING_MODE) +		return -EINVAL; +  	host->priv = priv;  	return 0;  } @@ -207,6 +210,7 @@ static int dw_mci_hi6220_execute_tuning(struct dw_mci_slot *slot, u32 opcode)  static const struct dw_mci_drv_data hi6220_data = {  	.caps			= dw_mci_hi6220_caps, +	.num_caps		= ARRAY_SIZE(dw_mci_hi6220_caps),  	.switch_voltage		= dw_mci_hi6220_switch_voltage,  	.set_ios		= dw_mci_hi6220_set_ios,  	.parse_dt		= dw_mci_hi6220_parse_dt, diff --git a/drivers/mmc/host/dw_mmc-rockchip.c b/drivers/mmc/host/dw_mmc-rockchip.c index a3f1c2b30145..339295212935 100644 --- a/drivers/mmc/host/dw_mmc-rockchip.c +++ b/drivers/mmc/host/dw_mmc-rockchip.c @@ -319,6 +319,7 @@ static const struct dw_mci_drv_data rk2928_drv_data = {  static const struct dw_mci_drv_data rk3288_drv_data = {  	.caps			= dw_mci_rk3288_dwmmc_caps, +	.num_caps		= ARRAY_SIZE(dw_mci_rk3288_dwmmc_caps),  	.set_ios		= dw_mci_rk3288_set_ios,  	.execute_tuning		= dw_mci_rk3288_execute_tuning,  	.parse_dt		= dw_mci_rk3288_parse_dt, diff --git a/drivers/mmc/host/dw_mmc-zx.c b/drivers/mmc/host/dw_mmc-zx.c index d38e94ae2b85..c06b5393312f 100644 --- a/drivers/mmc/host/dw_mmc-zx.c +++ b/drivers/mmc/host/dw_mmc-zx.c @@ -195,6 +195,7 @@ static unsigned long zx_dwmmc_caps[3] = {  static const struct dw_mci_drv_data zx_drv_data = {  	.caps			= zx_dwmmc_caps, +	.num_caps		= ARRAY_SIZE(zx_dwmmc_caps),  	.execute_tuning		= dw_mci_zx_execute_tuning,  	.prepare_hs400_tuning	= dw_mci_zx_prepare_hs400_tuning,  	.parse_dt               = dw_mci_zx_parse_dt, diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index 0aa39975f33b..06d47414d0c1 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c @@ -165,6 +165,8 @@ static int dw_mci_regs_show(struct seq_file *s, void *v)  {  	struct dw_mci *host = s->private; +	pm_runtime_get_sync(host->dev); +  	seq_printf(s, "STATUS:\t0x%08x\n", mci_readl(host, STATUS));  	seq_printf(s, "RINTSTS:\t0x%08x\n", mci_readl(host, RINTSTS));  	seq_printf(s, "CMD:\t0x%08x\n", mci_readl(host, CMD)); @@ -172,6 +174,8 @@ static int dw_mci_regs_show(struct seq_file *s, void *v)  	seq_printf(s, "INTMASK:\t0x%08x\n", mci_readl(host, INTMASK));  	seq_printf(s, "CLKENA:\t0x%08x\n", mci_readl(host, CLKENA)); +	pm_runtime_put_autosuspend(host->dev); +  	return 0;  } @@ -409,7 +413,9 @@ static inline void dw_mci_set_cto(struct dw_mci *host)  	cto_div = (mci_readl(host, CLKDIV) & 0xff) * 2;  	if (cto_div == 0)  		cto_div = 1; -	cto_ms = DIV_ROUND_UP(MSEC_PER_SEC * cto_clks * cto_div, host->bus_hz); + +	cto_ms = DIV_ROUND_UP_ULL((u64)MSEC_PER_SEC * cto_clks * cto_div, +				  host->bus_hz);  	/* add a bit spare time */  	cto_ms += 10; @@ -558,6 +564,7 @@ static int dw_mci_idmac_init(struct dw_mci *host)  					(sizeof(struct idmac_desc_64addr) *  							(i + 1))) >> 32;  			/* Initialize reserved and buffer size fields to "0" */ +			p->des0 = 0;  			p->des1 = 0;  			p->des2 = 0;  			p->des3 = 0; @@ -580,6 +587,7 @@ static int dw_mci_idmac_init(struct dw_mci *host)  		     i++, p++) {  			p->des3 = cpu_to_le32(host->sg_dma +  					(sizeof(struct idmac_desc) * (i + 1))); +			p->des0 = 0;  			p->des1 = 0;  		} @@ -1795,8 +1803,8 @@ static bool dw_mci_reset(struct dw_mci *host)  	}  	if (host->use_dma == TRANS_MODE_IDMAC) -		/* It is also recommended that we reset and reprogram idmac */ -		dw_mci_idmac_reset(host); +		/* It is also required that we reinit idmac */ +		dw_mci_idmac_init(host);  	ret = true; @@ -1944,8 +1952,9 @@ static void dw_mci_set_drto(struct dw_mci *host)  	drto_div = (mci_readl(host, CLKDIV) & 0xff) * 2;  	if (drto_div == 0)  		drto_div = 1; -	drto_ms = DIV_ROUND_UP(MSEC_PER_SEC * drto_clks * drto_div, -			       host->bus_hz); + +	drto_ms = DIV_ROUND_UP_ULL((u64)MSEC_PER_SEC * drto_clks * drto_div, +				   host->bus_hz);  	/* add a bit spare time */  	drto_ms += 10; @@ -2778,12 +2787,57 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)  	return IRQ_HANDLED;  } +static int dw_mci_init_slot_caps(struct dw_mci_slot *slot) +{ +	struct dw_mci *host = slot->host; +	const struct dw_mci_drv_data *drv_data = host->drv_data; +	struct mmc_host *mmc = slot->mmc; +	int ctrl_id; + +	if (host->pdata->caps) +		mmc->caps = host->pdata->caps; + +	/* +	 * Support MMC_CAP_ERASE by default. +	 * It needs to use trim/discard/erase commands. +	 */ +	mmc->caps |= MMC_CAP_ERASE; + +	if (host->pdata->pm_caps) +		mmc->pm_caps = host->pdata->pm_caps; + +	if (host->dev->of_node) { +		ctrl_id = of_alias_get_id(host->dev->of_node, "mshc"); +		if (ctrl_id < 0) +			ctrl_id = 0; +	} else { +		ctrl_id = to_platform_device(host->dev)->id; +	} + +	if (drv_data && drv_data->caps) { +		if (ctrl_id >= drv_data->num_caps) { +			dev_err(host->dev, "invalid controller id %d\n", +				ctrl_id); +			return -EINVAL; +		} +		mmc->caps |= drv_data->caps[ctrl_id]; +	} + +	if (host->pdata->caps2) +		mmc->caps2 = host->pdata->caps2; + +	/* Process SDIO IRQs through the sdio_irq_work. */ +	if (mmc->caps & MMC_CAP_SDIO_IRQ) +		mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD; + +	return 0; +} +  static int dw_mci_init_slot(struct dw_mci *host)  {  	struct mmc_host *mmc;  	struct dw_mci_slot *slot; -	const struct dw_mci_drv_data *drv_data = host->drv_data; -	int ctrl_id, ret; +	int ret;  	u32 freq[2];  	mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev); @@ -2817,38 +2871,13 @@ static int dw_mci_init_slot(struct dw_mci *host)  	if (!mmc->ocr_avail)  		mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; -	if (host->pdata->caps) -		mmc->caps = host->pdata->caps; - -	/* -	 * Support MMC_CAP_ERASE by default. -	 * It needs to use trim/discard/erase commands. -	 */ -	mmc->caps |= MMC_CAP_ERASE; - -	if (host->pdata->pm_caps) -		mmc->pm_caps = host->pdata->pm_caps; - -	if (host->dev->of_node) { -		ctrl_id = of_alias_get_id(host->dev->of_node, "mshc"); -		if (ctrl_id < 0) -			ctrl_id = 0; -	} else { -		ctrl_id = to_platform_device(host->dev)->id; -	} -	if (drv_data && drv_data->caps) -		mmc->caps |= drv_data->caps[ctrl_id]; - -	if (host->pdata->caps2) -		mmc->caps2 = host->pdata->caps2; -  	ret = mmc_of_parse(mmc);  	if (ret)  		goto err_host_allocated; -	/* Process SDIO IRQs through the sdio_irq_work. */ -	if (mmc->caps & MMC_CAP_SDIO_IRQ) -		mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD; +	ret = dw_mci_init_slot_caps(slot); +	if (ret) +		goto err_host_allocated;  	/* Useful defaults if platform data is unset. */  	if (host->use_dma == TRANS_MODE_IDMAC) { diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h index e3124f06a47e..1424bd490dd1 100644 --- a/drivers/mmc/host/dw_mmc.h +++ b/drivers/mmc/host/dw_mmc.h @@ -543,6 +543,7 @@ struct dw_mci_slot {  /**   * dw_mci driver data - dw-mshc implementation specific driver data.   * @caps: mmc subsystem specified capabilities of the controller(s). + * @num_caps: number of capabilities specified by @caps.   * @init: early implementation specific initialization.   * @set_ios: handle bus specific extensions.   * @parse_dt: parse implementation specific device tree properties. @@ -554,6 +555,7 @@ struct dw_mci_slot {   */  struct dw_mci_drv_data {  	unsigned long	*caps; +	u32		num_caps;  	int		(*init)(struct dw_mci *host);  	void		(*set_ios)(struct dw_mci *host, struct mmc_ios *ios);  	int		(*parse_dt)(struct dw_mci *host); diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c index 22438ebfe4e6..4f972b879fe6 100644 --- a/drivers/mmc/host/meson-gx-mmc.c +++ b/drivers/mmc/host/meson-gx-mmc.c @@ -717,22 +717,6 @@ static int meson_mmc_clk_phase_tuning(struct mmc_host *mmc, u32 opcode,  static int meson_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode)  {  	struct meson_host *host = mmc_priv(mmc); -	int ret; - -	/* -	 * If this is the initial tuning, try to get a sane Rx starting -	 * phase before doing the actual tuning. -	 */ -	if (!mmc->doing_retune) { -		ret = meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk); - -		if (ret) -			return ret; -	} - -	ret = meson_mmc_clk_phase_tuning(mmc, opcode, host->tx_clk); -	if (ret) -		return ret;  	return meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk);  } @@ -763,9 +747,8 @@ static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)  		if (!IS_ERR(mmc->supply.vmmc))  			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd); -		/* Reset phases */ +		/* Reset rx phase */  		clk_set_phase(host->rx_clk, 0); -		clk_set_phase(host->tx_clk, 270);  		break; diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c index 4065da58789d..32321bd596d8 100644 --- a/drivers/mmc/host/sdhci-acpi.c +++ b/drivers/mmc/host/sdhci-acpi.c @@ -680,7 +680,7 @@ static int sdhci_acpi_probe(struct platform_device *pdev)  	host->hw_name	= "ACPI";  	host->ops	= &sdhci_acpi_ops_dflt;  	host->irq	= platform_get_irq(pdev, 0); -	if (host->irq <= 0) { +	if (host->irq < 0) {  		err = -EINVAL;  		goto err_free;  	} diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c index 6d1a983e6227..82c4f05f91d8 100644 --- a/drivers/mmc/host/sdhci-pci-core.c +++ b/drivers/mmc/host/sdhci-pci-core.c @@ -654,9 +654,36 @@ static void byt_read_dsm(struct sdhci_pci_slot *slot)  	slot->chip->rpm_retune = intel_host->d3_retune;  } -static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot) +static int intel_execute_tuning(struct mmc_host *mmc, u32 opcode) +{ +	int err = sdhci_execute_tuning(mmc, opcode); +	struct sdhci_host *host = mmc_priv(mmc); + +	if (err) +		return err; + +	/* +	 * Tuning can leave the IP in an active state (Buffer Read Enable bit +	 * set) which prevents the entry to low power states (i.e. S0i3). Data +	 * reset will clear it. +	 */ +	sdhci_reset(host, SDHCI_RESET_DATA); + +	return 0; +} + +static void byt_probe_slot(struct sdhci_pci_slot *slot)  { +	struct mmc_host_ops *ops = &slot->host->mmc_host_ops; +  	byt_read_dsm(slot); + +	ops->execute_tuning = intel_execute_tuning; +} + +static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot) +{ +	byt_probe_slot(slot);  	slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |  				 MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR |  				 MMC_CAP_CMD_DURING_TFR | @@ -779,7 +806,7 @@ static int ni_byt_sdio_probe_slot(struct sdhci_pci_slot *slot)  {  	int err; -	byt_read_dsm(slot); +	byt_probe_slot(slot);  	err = ni_set_max_freq(slot);  	if (err) @@ -792,7 +819,7 @@ static int ni_byt_sdio_probe_slot(struct sdhci_pci_slot *slot)  static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot)  { -	byt_read_dsm(slot); +	byt_probe_slot(slot);  	slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE |  				 MMC_CAP_WAIT_WHILE_BUSY;  	return 0; @@ -800,7 +827,7 @@ static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot)  static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)  { -	byt_read_dsm(slot); +	byt_probe_slot(slot);  	slot->host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY |  				 MMC_CAP_AGGRESSIVE_PM | MMC_CAP_CD_WAKE;  	slot->cd_idx = 0; diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c index de8c902059b8..7d80a8bb96fe 100644 --- a/drivers/mtd/mtdchar.c +++ b/drivers/mtd/mtdchar.c @@ -479,7 +479,7 @@ static int shrink_ecclayout(struct mtd_info *mtd,  	for (i = 0; i < MTD_MAX_ECCPOS_ENTRIES;) {  		u32 eccpos; -		ret = mtd_ooblayout_ecc(mtd, section, &oobregion); +		ret = mtd_ooblayout_ecc(mtd, section++, &oobregion);  		if (ret < 0) {  			if (ret != -ERANGE)  				return ret; @@ -526,7 +526,7 @@ static int get_oobinfo(struct mtd_info *mtd, struct nand_oobinfo *to)  	for (i = 0; i < ARRAY_SIZE(to->eccpos);) {  		u32 eccpos; -		ret = mtd_ooblayout_ecc(mtd, section, &oobregion); +		ret = mtd_ooblayout_ecc(mtd, section++, &oobregion);  		if (ret < 0) {  			if (ret != -ERANGE)  				return ret; diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig index e6b8c59f2c0d..736ac887303c 100644 --- a/drivers/mtd/nand/Kconfig +++ b/drivers/mtd/nand/Kconfig @@ -328,7 +328,7 @@ config MTD_NAND_MARVELL  	tristate "NAND controller support on Marvell boards"  	depends on PXA3xx || ARCH_MMP || PLAT_ORION || ARCH_MVEBU || \  		   COMPILE_TEST -	depends on HAS_IOMEM +	depends on HAS_IOMEM && HAS_DMA  	help  	  This enables the NAND flash controller driver for Marvell boards,  	  including: diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c index 4872a7ba6503..5a9c2f0020c2 100644 --- a/drivers/mtd/nand/fsl_ifc_nand.c +++ b/drivers/mtd/nand/fsl_ifc_nand.c @@ -173,14 +173,9 @@ static void set_addr(struct mtd_info *mtd, int column, int page_addr, int oob)  /* returns nonzero if entire page is blank */  static int check_read_ecc(struct mtd_info *mtd, struct fsl_ifc_ctrl *ctrl, -			  u32 *eccstat, unsigned int bufnum) +			  u32 eccstat, unsigned int bufnum)  { -	u32 reg = eccstat[bufnum / 4]; -	int errors; - -	errors = (reg >> ((3 - bufnum % 4) * 8)) & 15; - -	return errors; +	return  (eccstat >> ((3 - bufnum % 4) * 8)) & 15;  }  /* @@ -193,7 +188,7 @@ static void fsl_ifc_run_command(struct mtd_info *mtd)  	struct fsl_ifc_ctrl *ctrl = priv->ctrl;  	struct fsl_ifc_nand_ctrl *nctrl = ifc_nand_ctrl;  	struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs; -	u32 eccstat[4]; +	u32 eccstat;  	int i;  	/* set the chip select for NAND Transaction */ @@ -228,19 +223,17 @@ static void fsl_ifc_run_command(struct mtd_info *mtd)  	if (nctrl->eccread) {  		int errors;  		int bufnum = nctrl->page & priv->bufnum_mask; -		int sector = bufnum * chip->ecc.steps; -		int sector_end = sector + chip->ecc.steps - 1; +		int sector_start = bufnum * chip->ecc.steps; +		int sector_end = sector_start + chip->ecc.steps - 1;  		__be32 *eccstat_regs; -		if (ctrl->version >= FSL_IFC_VERSION_2_0_0) -			eccstat_regs = ifc->ifc_nand.v2_nand_eccstat; -		else -			eccstat_regs = ifc->ifc_nand.v1_nand_eccstat; +		eccstat_regs = ifc->ifc_nand.nand_eccstat; +		eccstat = ifc_in32(&eccstat_regs[sector_start / 4]); -		for (i = sector / 4; i <= sector_end / 4; i++) -			eccstat[i] = ifc_in32(&eccstat_regs[i]); +		for (i = sector_start; i <= sector_end; i++) { +			if (i != sector_start && !(i % 4)) +				eccstat = ifc_in32(&eccstat_regs[i / 4]); -		for (i = sector; i <= sector_end; i++) {  			errors = check_read_ecc(mtd, ctrl, eccstat, i);  			if (errors == 15) { @@ -626,6 +619,7 @@ static int fsl_ifc_wait(struct mtd_info *mtd, struct nand_chip *chip)  	struct fsl_ifc_ctrl *ctrl = priv->ctrl;  	struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs;  	u32 nand_fsr; +	int status;  	/* Use READ_STATUS command, but wait for the device to be ready */  	ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | @@ -640,12 +634,12 @@ static int fsl_ifc_wait(struct mtd_info *mtd, struct nand_chip *chip)  	fsl_ifc_run_command(mtd);  	nand_fsr = ifc_in32(&ifc->ifc_nand.nand_fsr); - +	status = nand_fsr >> 24;  	/*  	 * The chip always seems to report that it is  	 * write-protected, even when it is not.  	 */ -	return nand_fsr | NAND_STATUS_WP; +	return status | NAND_STATUS_WP;  }  /* diff --git a/drivers/mtd/nand/vf610_nfc.c b/drivers/mtd/nand/vf610_nfc.c index 80d31a58e558..f367144f3c6f 100644 --- a/drivers/mtd/nand/vf610_nfc.c +++ b/drivers/mtd/nand/vf610_nfc.c @@ -752,10 +752,8 @@ static int vf610_nfc_probe(struct platform_device *pdev)  		if (mtd->oobsize > 64)  			mtd->oobsize = 64; -		/* -		 * mtd->ecclayout is not specified here because we're using the -		 * default large page ECC layout defined in NAND core. -		 */ +		/* Use default large page ECC layout defined in NAND core */ +		mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);  		if (chip->ecc.strength == 32) {  			nfc->ecc_mode = ECC_60_BYTE;  			chip->ecc.bytes = 60; diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c index 1e37313054f3..6da69af103e6 100644 --- a/drivers/net/can/cc770/cc770.c +++ b/drivers/net/can/cc770/cc770.c @@ -390,37 +390,23 @@ static int cc770_get_berr_counter(const struct net_device *dev,  	return 0;  } -static netdev_tx_t cc770_start_xmit(struct sk_buff *skb, struct net_device *dev) +static void cc770_tx(struct net_device *dev, int mo)  {  	struct cc770_priv *priv = netdev_priv(dev); -	struct net_device_stats *stats = &dev->stats; -	struct can_frame *cf = (struct can_frame *)skb->data; -	unsigned int mo = obj2msgobj(CC770_OBJ_TX); +	struct can_frame *cf = (struct can_frame *)priv->tx_skb->data;  	u8 dlc, rtr;  	u32 id;  	int i; -	if (can_dropped_invalid_skb(dev, skb)) -		return NETDEV_TX_OK; - -	if ((cc770_read_reg(priv, -			    msgobj[mo].ctrl1) & TXRQST_UNC) == TXRQST_SET) { -		netdev_err(dev, "TX register is still occupied!\n"); -		return NETDEV_TX_BUSY; -	} - -	netif_stop_queue(dev); -  	dlc = cf->can_dlc;  	id = cf->can_id; -	if (cf->can_id & CAN_RTR_FLAG) -		rtr = 0; -	else -		rtr = MSGCFG_DIR; +	rtr = cf->can_id & CAN_RTR_FLAG ? 0 : MSGCFG_DIR; + +	cc770_write_reg(priv, msgobj[mo].ctrl0, +			MSGVAL_RES | TXIE_RES | RXIE_RES | INTPND_RES);  	cc770_write_reg(priv, msgobj[mo].ctrl1,  			RMTPND_RES | TXRQST_RES | CPUUPD_SET | NEWDAT_RES); -	cc770_write_reg(priv, msgobj[mo].ctrl0, -			MSGVAL_SET | TXIE_SET | RXIE_RES | INTPND_RES); +  	if (id & CAN_EFF_FLAG) {  		id &= CAN_EFF_MASK;  		cc770_write_reg(priv, msgobj[mo].config, @@ -439,22 +425,30 @@ static netdev_tx_t cc770_start_xmit(struct sk_buff *skb, struct net_device *dev)  	for (i = 0; i < dlc; i++)  		cc770_write_reg(priv, msgobj[mo].data[i], cf->data[i]); -	/* Store echo skb before starting the transfer */ -	can_put_echo_skb(skb, dev, 0); -  	cc770_write_reg(priv, msgobj[mo].ctrl1, -			RMTPND_RES | TXRQST_SET | CPUUPD_RES | NEWDAT_UNC); +			RMTPND_UNC | TXRQST_SET | CPUUPD_RES | NEWDAT_UNC); +	cc770_write_reg(priv, msgobj[mo].ctrl0, +			MSGVAL_SET | TXIE_SET | RXIE_SET | INTPND_UNC); +} -	stats->tx_bytes += dlc; +static netdev_tx_t cc770_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ +	struct cc770_priv *priv = netdev_priv(dev); +	unsigned int mo = obj2msgobj(CC770_OBJ_TX); +	if (can_dropped_invalid_skb(dev, skb)) +		return NETDEV_TX_OK; -	/* -	 * HM: We had some cases of repeated IRQs so make sure the -	 * INT is acknowledged I know it's already further up, but -	 * doing again fixed the issue -	 */ -	cc770_write_reg(priv, msgobj[mo].ctrl0, -			MSGVAL_UNC | TXIE_UNC | RXIE_UNC | INTPND_RES); +	netif_stop_queue(dev); + +	if ((cc770_read_reg(priv, +			    msgobj[mo].ctrl1) & TXRQST_UNC) == TXRQST_SET) { +		netdev_err(dev, "TX register is still occupied!\n"); +		return NETDEV_TX_BUSY; +	} + +	priv->tx_skb = skb; +	cc770_tx(dev, mo);  	return NETDEV_TX_OK;  } @@ -680,19 +674,46 @@ static void cc770_tx_interrupt(struct net_device *dev, unsigned int o)  	struct cc770_priv *priv = netdev_priv(dev);  	struct net_device_stats *stats = &dev->stats;  	unsigned int mo = obj2msgobj(o); +	struct can_frame *cf; +	u8 ctrl1; + +	ctrl1 = cc770_read_reg(priv, msgobj[mo].ctrl1); -	/* Nothing more to send, switch off interrupts */  	cc770_write_reg(priv, msgobj[mo].ctrl0,  			MSGVAL_RES | TXIE_RES | RXIE_RES | INTPND_RES); -	/* -	 * We had some cases of repeated IRQ so make sure the -	 * INT is acknowledged +	cc770_write_reg(priv, msgobj[mo].ctrl1, +			RMTPND_RES | TXRQST_RES | MSGLST_RES | NEWDAT_RES); + +	if (unlikely(!priv->tx_skb)) { +		netdev_err(dev, "missing tx skb in tx interrupt\n"); +		return; +	} + +	if (unlikely(ctrl1 & MSGLST_SET)) { +		stats->rx_over_errors++; +		stats->rx_errors++; +	} + +	/* When the CC770 is sending an RTR message and it receives a regular +	 * message that matches the id of the RTR message, it will overwrite the +	 * outgoing message in the TX register. When this happens we must +	 * process the received message and try to transmit the outgoing skb +	 * again.  	 */ -	cc770_write_reg(priv, msgobj[mo].ctrl0, -			MSGVAL_UNC | TXIE_UNC | RXIE_UNC | INTPND_RES); +	if (unlikely(ctrl1 & NEWDAT_SET)) { +		cc770_rx(dev, mo, ctrl1); +		cc770_tx(dev, mo); +		return; +	} +	cf = (struct can_frame *)priv->tx_skb->data; +	stats->tx_bytes += cf->can_dlc;  	stats->tx_packets++; + +	can_put_echo_skb(priv->tx_skb, dev, 0);  	can_get_echo_skb(dev, 0); +	priv->tx_skb = NULL; +  	netif_wake_queue(dev);  } @@ -804,6 +825,7 @@ struct net_device *alloc_cc770dev(int sizeof_priv)  	priv->can.do_set_bittiming = cc770_set_bittiming;  	priv->can.do_set_mode = cc770_set_mode;  	priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES; +	priv->tx_skb = NULL;  	memcpy(priv->obj_flags, cc770_obj_flags, sizeof(cc770_obj_flags)); diff --git a/drivers/net/can/cc770/cc770.h b/drivers/net/can/cc770/cc770.h index a1739db98d91..95752e1d1283 100644 --- a/drivers/net/can/cc770/cc770.h +++ b/drivers/net/can/cc770/cc770.h @@ -193,6 +193,8 @@ struct cc770_priv {  	u8 cpu_interface;	/* CPU interface register */  	u8 clkout;		/* Clock out register */  	u8 bus_config;		/* Bus conffiguration register */ + +	struct sk_buff *tx_skb;  };  struct net_device *alloc_cc770dev(int sizeof_priv); diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c index 2772d05ff11c..fedd927ba6ed 100644 --- a/drivers/net/can/ifi_canfd/ifi_canfd.c +++ b/drivers/net/can/ifi_canfd/ifi_canfd.c @@ -30,6 +30,7 @@  #define IFI_CANFD_STCMD_ERROR_ACTIVE		BIT(2)  #define IFI_CANFD_STCMD_ERROR_PASSIVE		BIT(3)  #define IFI_CANFD_STCMD_BUSOFF			BIT(4) +#define IFI_CANFD_STCMD_ERROR_WARNING		BIT(5)  #define IFI_CANFD_STCMD_BUSMONITOR		BIT(16)  #define IFI_CANFD_STCMD_LOOPBACK		BIT(18)  #define IFI_CANFD_STCMD_DISABLE_CANFD		BIT(24) @@ -52,7 +53,10 @@  #define IFI_CANFD_TXSTCMD_OVERFLOW		BIT(13)  #define IFI_CANFD_INTERRUPT			0xc +#define IFI_CANFD_INTERRUPT_ERROR_BUSOFF	BIT(0)  #define IFI_CANFD_INTERRUPT_ERROR_WARNING	BIT(1) +#define IFI_CANFD_INTERRUPT_ERROR_STATE_CHG	BIT(2) +#define IFI_CANFD_INTERRUPT_ERROR_REC_TEC_INC	BIT(3)  #define IFI_CANFD_INTERRUPT_ERROR_COUNTER	BIT(10)  #define IFI_CANFD_INTERRUPT_TXFIFO_EMPTY	BIT(16)  #define IFI_CANFD_INTERRUPT_TXFIFO_REMOVE	BIT(22) @@ -61,6 +65,10 @@  #define IFI_CANFD_INTERRUPT_SET_IRQ		((u32)BIT(31))  #define IFI_CANFD_IRQMASK			0x10 +#define IFI_CANFD_IRQMASK_ERROR_BUSOFF		BIT(0) +#define IFI_CANFD_IRQMASK_ERROR_WARNING		BIT(1) +#define IFI_CANFD_IRQMASK_ERROR_STATE_CHG	BIT(2) +#define IFI_CANFD_IRQMASK_ERROR_REC_TEC_INC	BIT(3)  #define IFI_CANFD_IRQMASK_SET_ERR		BIT(7)  #define IFI_CANFD_IRQMASK_SET_TS		BIT(15)  #define IFI_CANFD_IRQMASK_TXFIFO_EMPTY		BIT(16) @@ -136,6 +144,8 @@  #define IFI_CANFD_SYSCLOCK			0x50  #define IFI_CANFD_VER				0x54 +#define IFI_CANFD_VER_REV_MASK			0xff +#define IFI_CANFD_VER_REV_MIN_SUPPORTED		0x15  #define IFI_CANFD_IP_ID				0x58  #define IFI_CANFD_IP_ID_VALUE			0xD073CAFD @@ -220,7 +230,10 @@ static void ifi_canfd_irq_enable(struct net_device *ndev, bool enable)  	if (enable) {  		enirq = IFI_CANFD_IRQMASK_TXFIFO_EMPTY | -			IFI_CANFD_IRQMASK_RXFIFO_NEMPTY; +			IFI_CANFD_IRQMASK_RXFIFO_NEMPTY | +			IFI_CANFD_IRQMASK_ERROR_STATE_CHG | +			IFI_CANFD_IRQMASK_ERROR_WARNING | +			IFI_CANFD_IRQMASK_ERROR_BUSOFF;  		if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)  			enirq |= IFI_CANFD_INTERRUPT_ERROR_COUNTER;  	} @@ -361,12 +374,13 @@ static int ifi_canfd_handle_lost_msg(struct net_device *ndev)  	return 1;  } -static int ifi_canfd_handle_lec_err(struct net_device *ndev, const u32 errctr) +static int ifi_canfd_handle_lec_err(struct net_device *ndev)  {  	struct ifi_canfd_priv *priv = netdev_priv(ndev);  	struct net_device_stats *stats = &ndev->stats;  	struct can_frame *cf;  	struct sk_buff *skb; +	u32 errctr = readl(priv->base + IFI_CANFD_ERROR_CTR);  	const u32 errmask = IFI_CANFD_ERROR_CTR_OVERLOAD_FIRST |  			    IFI_CANFD_ERROR_CTR_ACK_ERROR_FIRST |  			    IFI_CANFD_ERROR_CTR_BIT0_ERROR_FIRST | @@ -449,6 +463,11 @@ static int ifi_canfd_handle_state_change(struct net_device *ndev,  	switch (new_state) {  	case CAN_STATE_ERROR_ACTIVE: +		/* error active state */ +		priv->can.can_stats.error_warning++; +		priv->can.state = CAN_STATE_ERROR_ACTIVE; +		break; +	case CAN_STATE_ERROR_WARNING:  		/* error warning state */  		priv->can.can_stats.error_warning++;  		priv->can.state = CAN_STATE_ERROR_WARNING; @@ -477,7 +496,7 @@ static int ifi_canfd_handle_state_change(struct net_device *ndev,  	ifi_canfd_get_berr_counter(ndev, &bec);  	switch (new_state) { -	case CAN_STATE_ERROR_ACTIVE: +	case CAN_STATE_ERROR_WARNING:  		/* error warning state */  		cf->can_id |= CAN_ERR_CRTL;  		cf->data[1] = (bec.txerr > bec.rxerr) ? @@ -510,22 +529,21 @@ static int ifi_canfd_handle_state_change(struct net_device *ndev,  	return 1;  } -static int ifi_canfd_handle_state_errors(struct net_device *ndev, u32 stcmd) +static int ifi_canfd_handle_state_errors(struct net_device *ndev)  {  	struct ifi_canfd_priv *priv = netdev_priv(ndev); +	u32 stcmd = readl(priv->base + IFI_CANFD_STCMD);  	int work_done = 0; -	u32 isr; -	/* -	 * The ErrWarn condition is a little special, since the bit is -	 * located in the INTERRUPT register instead of STCMD register. -	 */ -	isr = readl(priv->base + IFI_CANFD_INTERRUPT); -	if ((isr & IFI_CANFD_INTERRUPT_ERROR_WARNING) && +	if ((stcmd & IFI_CANFD_STCMD_ERROR_ACTIVE) && +	    (priv->can.state != CAN_STATE_ERROR_ACTIVE)) { +		netdev_dbg(ndev, "Error, entered active state\n"); +		work_done += ifi_canfd_handle_state_change(ndev, +						CAN_STATE_ERROR_ACTIVE); +	} + +	if ((stcmd & IFI_CANFD_STCMD_ERROR_WARNING) &&  	    (priv->can.state != CAN_STATE_ERROR_WARNING)) { -		/* Clear the interrupt */ -		writel(IFI_CANFD_INTERRUPT_ERROR_WARNING, -		       priv->base + IFI_CANFD_INTERRUPT);  		netdev_dbg(ndev, "Error, entered warning state\n");  		work_done += ifi_canfd_handle_state_change(ndev,  						CAN_STATE_ERROR_WARNING); @@ -552,18 +570,11 @@ static int ifi_canfd_poll(struct napi_struct *napi, int quota)  {  	struct net_device *ndev = napi->dev;  	struct ifi_canfd_priv *priv = netdev_priv(ndev); -	const u32 stcmd_state_mask = IFI_CANFD_STCMD_ERROR_PASSIVE | -				     IFI_CANFD_STCMD_BUSOFF; -	int work_done = 0; - -	u32 stcmd = readl(priv->base + IFI_CANFD_STCMD);  	u32 rxstcmd = readl(priv->base + IFI_CANFD_RXSTCMD); -	u32 errctr = readl(priv->base + IFI_CANFD_ERROR_CTR); +	int work_done = 0;  	/* Handle bus state changes */ -	if ((stcmd & stcmd_state_mask) || -	    ((stcmd & IFI_CANFD_STCMD_ERROR_ACTIVE) == 0)) -		work_done += ifi_canfd_handle_state_errors(ndev, stcmd); +	work_done += ifi_canfd_handle_state_errors(ndev);  	/* Handle lost messages on RX */  	if (rxstcmd & IFI_CANFD_RXSTCMD_OVERFLOW) @@ -571,7 +582,7 @@ static int ifi_canfd_poll(struct napi_struct *napi, int quota)  	/* Handle lec errors on the bus */  	if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) -		work_done += ifi_canfd_handle_lec_err(ndev, errctr); +		work_done += ifi_canfd_handle_lec_err(ndev);  	/* Handle normal messages on RX */  	if (!(rxstcmd & IFI_CANFD_RXSTCMD_EMPTY)) @@ -592,12 +603,13 @@ static irqreturn_t ifi_canfd_isr(int irq, void *dev_id)  	struct net_device_stats *stats = &ndev->stats;  	const u32 rx_irq_mask = IFI_CANFD_INTERRUPT_RXFIFO_NEMPTY |  				IFI_CANFD_INTERRUPT_RXFIFO_NEMPTY_PER | +				IFI_CANFD_INTERRUPT_ERROR_COUNTER | +				IFI_CANFD_INTERRUPT_ERROR_STATE_CHG |  				IFI_CANFD_INTERRUPT_ERROR_WARNING | -				IFI_CANFD_INTERRUPT_ERROR_COUNTER; +				IFI_CANFD_INTERRUPT_ERROR_BUSOFF;  	const u32 tx_irq_mask = IFI_CANFD_INTERRUPT_TXFIFO_EMPTY |  				IFI_CANFD_INTERRUPT_TXFIFO_REMOVE; -	const u32 clr_irq_mask = ~((u32)(IFI_CANFD_INTERRUPT_SET_IRQ | -					 IFI_CANFD_INTERRUPT_ERROR_WARNING)); +	const u32 clr_irq_mask = ~((u32)IFI_CANFD_INTERRUPT_SET_IRQ);  	u32 isr;  	isr = readl(priv->base + IFI_CANFD_INTERRUPT); @@ -933,7 +945,7 @@ static int ifi_canfd_plat_probe(struct platform_device *pdev)  	struct resource *res;  	void __iomem *addr;  	int irq, ret; -	u32 id; +	u32 id, rev;  	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);  	addr = devm_ioremap_resource(dev, res); @@ -947,6 +959,13 @@ static int ifi_canfd_plat_probe(struct platform_device *pdev)  		return -EINVAL;  	} +	rev = readl(addr + IFI_CANFD_VER) & IFI_CANFD_VER_REV_MASK; +	if (rev < IFI_CANFD_VER_REV_MIN_SUPPORTED) { +		dev_err(dev, "This block is too old (rev %i), minimum supported is rev %i\n", +			rev, IFI_CANFD_VER_REV_MIN_SUPPORTED); +		return -EINVAL; +	} +  	ndev = alloc_candev(sizeof(*priv), 1);  	if (!ndev)  		return -ENOMEM; diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index 2594f7779c6f..b397a33f3d32 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -26,6 +26,7 @@  #include <linux/pm_runtime.h>  #include <linux/iopoll.h>  #include <linux/can/dev.h> +#include <linux/pinctrl/consumer.h>  /* napi related */  #define M_CAN_NAPI_WEIGHT	64 @@ -253,7 +254,7 @@ enum m_can_mram_cfg {  /* Rx FIFO 0/1 Configuration (RXF0C/RXF1C) */  #define RXFC_FWM_SHIFT	24 -#define RXFC_FWM_MASK	(0x7f < RXFC_FWM_SHIFT) +#define RXFC_FWM_MASK	(0x7f << RXFC_FWM_SHIFT)  #define RXFC_FS_SHIFT	16  #define RXFC_FS_MASK	(0x7f << RXFC_FS_SHIFT) @@ -1700,6 +1701,8 @@ static __maybe_unused int m_can_suspend(struct device *dev)  		m_can_clk_stop(priv);  	} +	pinctrl_pm_select_sleep_state(dev); +  	priv->can.state = CAN_STATE_SLEEPING;  	return 0; @@ -1710,6 +1713,8 @@ static __maybe_unused int m_can_resume(struct device *dev)  	struct net_device *ndev = dev_get_drvdata(dev);  	struct m_can_priv *priv = netdev_priv(ndev); +	pinctrl_pm_select_default_state(dev); +  	m_can_init_ram(priv);  	priv->can.state = CAN_STATE_ERROR_ACTIVE; diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c index 55513411a82e..ed8561d4a90f 100644 --- a/drivers/net/can/peak_canfd/peak_canfd.c +++ b/drivers/net/can/peak_canfd/peak_canfd.c @@ -262,7 +262,6 @@ static int pucan_handle_can_rx(struct peak_canfd_priv *priv,  		spin_lock_irqsave(&priv->echo_lock, flags);  		can_get_echo_skb(priv->ndev, msg->client); -		spin_unlock_irqrestore(&priv->echo_lock, flags);  		/* count bytes of the echo instead of skb */  		stats->tx_bytes += cf_len; @@ -271,6 +270,7 @@ static int pucan_handle_can_rx(struct peak_canfd_priv *priv,  		/* restart tx queue (a slot is free) */  		netif_wake_queue(priv->ndev); +		spin_unlock_irqrestore(&priv->echo_lock, flags);  		return 0;  	} @@ -333,7 +333,6 @@ static int pucan_handle_status(struct peak_canfd_priv *priv,  	/* this STATUS is the CNF of the RX_BARRIER: Tx path can be setup */  	if (pucan_status_is_rx_barrier(msg)) { -		unsigned long flags;  		if (priv->enable_tx_path) {  			int err = priv->enable_tx_path(priv); @@ -342,16 +341,8 @@ static int pucan_handle_status(struct peak_canfd_priv *priv,  				return err;  		} -		/* restart network queue only if echo skb array is free */ -		spin_lock_irqsave(&priv->echo_lock, flags); - -		if (!priv->can.echo_skb[priv->echo_idx]) { -			spin_unlock_irqrestore(&priv->echo_lock, flags); - -			netif_wake_queue(ndev); -		} else { -			spin_unlock_irqrestore(&priv->echo_lock, flags); -		} +		/* start network queue (echo_skb array is empty) */ +		netif_start_queue(ndev);  		return 0;  	} @@ -726,11 +717,6 @@ static netdev_tx_t peak_canfd_start_xmit(struct sk_buff *skb,  	 */  	should_stop_tx_queue = !!(priv->can.echo_skb[priv->echo_idx]); -	spin_unlock_irqrestore(&priv->echo_lock, flags); - -	/* write the skb on the interface */ -	priv->write_tx_msg(priv, msg); -  	/* stop network tx queue if not enough room to save one more msg too */  	if (priv->can.ctrlmode & CAN_CTRLMODE_FD)  		should_stop_tx_queue |= (room_left < @@ -742,6 +728,11 @@ static netdev_tx_t peak_canfd_start_xmit(struct sk_buff *skb,  	if (should_stop_tx_queue)  		netif_stop_queue(ndev); +	spin_unlock_irqrestore(&priv->echo_lock, flags); + +	/* write the skb on the interface */ +	priv->write_tx_msg(priv, msg); +  	return NETDEV_TX_OK;  } diff --git a/drivers/net/can/peak_canfd/peak_pciefd_main.c b/drivers/net/can/peak_canfd/peak_pciefd_main.c index 788c3464a3b0..3c51a884db87 100644 --- a/drivers/net/can/peak_canfd/peak_pciefd_main.c +++ b/drivers/net/can/peak_canfd/peak_pciefd_main.c @@ -349,8 +349,12 @@ static irqreturn_t pciefd_irq_handler(int irq, void *arg)  		priv->tx_pages_free++;  		spin_unlock_irqrestore(&priv->tx_lock, flags); -		/* wake producer up */ -		netif_wake_queue(priv->ucan.ndev); +		/* wake producer up (only if enough room in echo_skb array) */ +		spin_lock_irqsave(&priv->ucan.echo_lock, flags); +		if (!priv->ucan.can.echo_skb[priv->ucan.echo_idx]) +			netif_wake_queue(priv->ucan.ndev); + +		spin_unlock_irqrestore(&priv->ucan.echo_lock, flags);  	}  	/* re-enable Rx DMA transfer for this CAN */ diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile index d040aeb45172..15c2a831edf1 100644 --- a/drivers/net/dsa/Makefile +++ b/drivers/net/dsa/Makefile @@ -1,7 +1,10 @@  # SPDX-License-Identifier: GPL-2.0  obj-$(CONFIG_NET_DSA_BCM_SF2)	+= bcm-sf2.o  bcm-sf2-objs			:= bcm_sf2.o bcm_sf2_cfp.o -obj-$(CONFIG_NET_DSA_LOOP)	+= dsa_loop.o dsa_loop_bdinfo.o +obj-$(CONFIG_NET_DSA_LOOP)	+= dsa_loop.o +ifdef CONFIG_NET_DSA_LOOP +obj-$(CONFIG_FIXED_PHY)		+= dsa_loop_bdinfo.o +endif  obj-$(CONFIG_NET_DSA_MT7530)	+= mt7530.o  obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o  obj-$(CONFIG_NET_DSA_QCA8K)	+= qca8k.o diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index db830a1141d9..63e02a54d537 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -814,8 +814,8 @@ void b53_get_strings(struct dsa_switch *ds, int port, uint8_t *data)  	unsigned int i;  	for (i = 0; i < mib_size; i++) -		memcpy(data + i * ETH_GSTRING_LEN, -		       mibs[i].name, ETH_GSTRING_LEN); +		strlcpy(data + i * ETH_GSTRING_LEN, +			mibs[i].name, ETH_GSTRING_LEN);  }  EXPORT_SYMBOL(b53_get_strings); diff --git a/drivers/net/ethernet/8390/Kconfig b/drivers/net/ethernet/8390/Kconfig index 29c3075bfb05..fdc673484add 100644 --- a/drivers/net/ethernet/8390/Kconfig +++ b/drivers/net/ethernet/8390/Kconfig @@ -3,7 +3,7 @@  #  config NET_VENDOR_8390 -	bool "National Semi-conductor 8390 devices" +	bool "National Semiconductor 8390 devices"  	default y  	depends on NET_VENDOR_NATSEMI  	---help--- diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c index 3e5833cf1fab..eb23f9ba1a9a 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c @@ -426,6 +426,8 @@ static int xgbe_pci_resume(struct pci_dev *pdev)  	struct net_device *netdev = pdata->netdev;  	int ret = 0; +	XP_IOWRITE(pdata, XP_INT_EN, 0x1fffff); +  	pdata->lpm_ctrl &= ~MDIO_CTRL1_LPOWER;  	XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h index 0b49f1aeebd3..fc7383106946 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h @@ -36,6 +36,8 @@  #define AQ_CFG_TX_FRAME_MAX  (16U * 1024U)  #define AQ_CFG_RX_FRAME_MAX  (4U * 1024U) +#define AQ_CFG_TX_CLEAN_BUDGET 256U +  /* LRO */  #define AQ_CFG_IS_LRO_DEF           1U diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index ebbaf63eaf47..c96a92118b8b 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -247,6 +247,8 @@ void aq_nic_ndev_init(struct aq_nic_s *self)  	self->ndev->hw_features |= aq_hw_caps->hw_features;  	self->ndev->features = aq_hw_caps->hw_features;  	self->ndev->priv_flags = aq_hw_caps->hw_priv_flags; +	self->ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE; +  	self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN;  	self->ndev->max_mtu = aq_hw_caps->mtu - ETH_FCS_LEN - ETH_HLEN; @@ -937,3 +939,23 @@ err_exit:  out:  	return err;  } + +void aq_nic_shutdown(struct aq_nic_s *self) +{ +	int err = 0; + +	if (!self->ndev) +		return; + +	rtnl_lock(); + +	netif_device_detach(self->ndev); + +	err = aq_nic_stop(self); +	if (err < 0) +		goto err_exit; +	aq_nic_deinit(self); + +err_exit: +	rtnl_unlock(); +}
\ No newline at end of file diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h index d16b0f1a95aa..219b550d1665 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h @@ -118,5 +118,6 @@ struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self);  u32 aq_nic_get_fw_version(struct aq_nic_s *self);  int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg);  int aq_nic_update_interrupt_moderation_settings(struct aq_nic_s *self); +void aq_nic_shutdown(struct aq_nic_s *self);  #endif /* AQ_NIC_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c index 22889fc158f2..ecc6306f940f 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c @@ -226,6 +226,10 @@ static int aq_pci_probe(struct pci_dev *pdev,  		goto err_ioremap;  	self->aq_hw = kzalloc(sizeof(*self->aq_hw), GFP_KERNEL); +	if (!self->aq_hw) { +		err = -ENOMEM; +		goto err_ioremap; +	}  	self->aq_hw->aq_nic_cfg = aq_nic_get_cfg(self);  	for (bar = 0; bar < 4; ++bar) { @@ -235,19 +239,19 @@ static int aq_pci_probe(struct pci_dev *pdev,  			mmio_pa = pci_resource_start(pdev, bar);  			if (mmio_pa == 0U) {  				err = -EIO; -				goto err_ioremap; +				goto err_free_aq_hw;  			}  			reg_sz = pci_resource_len(pdev, bar);  			if ((reg_sz <= 24 /*ATL_REGS_SIZE*/)) {  				err = -EIO; -				goto err_ioremap; +				goto err_free_aq_hw;  			}  			self->aq_hw->mmio = ioremap_nocache(mmio_pa, reg_sz);  			if (!self->aq_hw->mmio) {  				err = -EIO; -				goto err_ioremap; +				goto err_free_aq_hw;  			}  			break;  		} @@ -255,7 +259,7 @@ static int aq_pci_probe(struct pci_dev *pdev,  	if (bar == 4) {  		err = -EIO; -		goto err_ioremap; +		goto err_free_aq_hw;  	}  	numvecs = min((u8)AQ_CFG_VECS_DEF, @@ -290,6 +294,8 @@ err_register:  	aq_pci_free_irq_vectors(self);  err_hwinit:  	iounmap(self->aq_hw->mmio); +err_free_aq_hw: +	kfree(self->aq_hw);  err_ioremap:  	free_netdev(ndev);  err_pci_func: @@ -317,6 +323,20 @@ static void aq_pci_remove(struct pci_dev *pdev)  	pci_disable_device(pdev);  } +static void aq_pci_shutdown(struct pci_dev *pdev) +{ +	struct aq_nic_s *self = pci_get_drvdata(pdev); + +	aq_nic_shutdown(self); + +	pci_disable_device(pdev); + +	if (system_state == SYSTEM_POWER_OFF) { +		pci_wake_from_d3(pdev, false); +		pci_set_power_state(pdev, PCI_D3hot); +	} +} +  static int aq_pci_suspend(struct pci_dev *pdev, pm_message_t pm_msg)  {  	struct aq_nic_s *self = pci_get_drvdata(pdev); @@ -339,6 +359,7 @@ static struct pci_driver aq_pci_ops = {  	.remove = aq_pci_remove,  	.suspend = aq_pci_suspend,  	.resume = aq_pci_resume, +	.shutdown = aq_pci_shutdown,  };  module_pci_driver(aq_pci_ops); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c index 0be6a11370bb..b5f1f62e8e25 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c @@ -136,11 +136,12 @@ void aq_ring_queue_stop(struct aq_ring_s *ring)  		netif_stop_subqueue(ndev, ring->idx);  } -void aq_ring_tx_clean(struct aq_ring_s *self) +bool aq_ring_tx_clean(struct aq_ring_s *self)  {  	struct device *dev = aq_nic_get_dev(self->aq_nic); +	unsigned int budget = AQ_CFG_TX_CLEAN_BUDGET; -	for (; self->sw_head != self->hw_head; +	for (; self->sw_head != self->hw_head && budget--;  		self->sw_head = aq_ring_next_dx(self, self->sw_head)) {  		struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head]; @@ -167,6 +168,8 @@ void aq_ring_tx_clean(struct aq_ring_s *self)  		buff->pa = 0U;  		buff->eop_index = 0xffffU;  	} + +	return !!budget;  }  #define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h index 965fae0fb6e0..ac1329f4051d 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h @@ -153,7 +153,7 @@ void aq_ring_free(struct aq_ring_s *self);  void aq_ring_update_queue_state(struct aq_ring_s *ring);  void aq_ring_queue_wake(struct aq_ring_s *ring);  void aq_ring_queue_stop(struct aq_ring_s *ring); -void aq_ring_tx_clean(struct aq_ring_s *self); +bool aq_ring_tx_clean(struct aq_ring_s *self);  int aq_ring_rx_clean(struct aq_ring_s *self,  		     struct napi_struct *napi,  		     int *work_done, diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c index f890b8a5a862..d335c334fa56 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c @@ -35,12 +35,12 @@ struct aq_vec_s {  static int aq_vec_poll(struct napi_struct *napi, int budget)  {  	struct aq_vec_s *self = container_of(napi, struct aq_vec_s, napi); +	unsigned int sw_tail_old = 0U;  	struct aq_ring_s *ring = NULL; +	bool was_tx_cleaned = true; +	unsigned int i = 0U;  	int work_done = 0;  	int err = 0; -	unsigned int i = 0U; -	unsigned int sw_tail_old = 0U; -	bool was_tx_cleaned = false;  	if (!self) {  		err = -EINVAL; @@ -57,9 +57,8 @@ static int aq_vec_poll(struct napi_struct *napi, int budget)  			if (ring[AQ_VEC_TX_ID].sw_head !=  			    ring[AQ_VEC_TX_ID].hw_head) { -				aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]); +				was_tx_cleaned = aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]);  				aq_ring_update_queue_state(&ring[AQ_VEC_TX_ID]); -				was_tx_cleaned = true;  			}  			err = self->aq_hw_ops->hw_ring_rx_receive(self->aq_hw, @@ -90,7 +89,7 @@ static int aq_vec_poll(struct napi_struct *napi, int budget)  			}  		} -		if (was_tx_cleaned) +		if (!was_tx_cleaned)  			work_done = budget;  		if (work_done < budget) { diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c index 967f0fd07fcf..d3b847ec7465 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c @@ -21,6 +21,10 @@  #define HW_ATL_UCP_0X370_REG    0x0370U +#define HW_ATL_MIF_CMD          0x0200U +#define HW_ATL_MIF_ADDR         0x0208U +#define HW_ATL_MIF_VAL          0x020CU +  #define HW_ATL_FW_SM_RAM        0x2U  #define HW_ATL_MPI_FW_VERSION	0x18  #define HW_ATL_MPI_CONTROL_ADR  0x0368U @@ -79,16 +83,15 @@ int hw_atl_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops)  static int hw_atl_utils_soft_reset_flb(struct aq_hw_s *self)  { +	u32 gsr, val;  	int k = 0; -	u32 gsr;  	aq_hw_write_reg(self, 0x404, 0x40e1);  	AQ_HW_SLEEP(50);  	/* Cleanup SPI */ -	aq_hw_write_reg(self, 0x534, 0xA0); -	aq_hw_write_reg(self, 0x100, 0x9F); -	aq_hw_write_reg(self, 0x100, 0x809F); +	val = aq_hw_read_reg(self, 0x53C); +	aq_hw_write_reg(self, 0x53C, val | 0x10);  	gsr = aq_hw_read_reg(self, HW_ATL_GLB_SOFT_RES_ADR);  	aq_hw_write_reg(self, HW_ATL_GLB_SOFT_RES_ADR, (gsr & 0xBFFF) | 0x8000); @@ -97,7 +100,14 @@ static int hw_atl_utils_soft_reset_flb(struct aq_hw_s *self)  	aq_hw_write_reg(self, 0x404, 0x80e0);  	aq_hw_write_reg(self, 0x32a8, 0x0);  	aq_hw_write_reg(self, 0x520, 0x1); + +	/* Reset SPI again because of possible interrupted SPI burst */ +	val = aq_hw_read_reg(self, 0x53C); +	aq_hw_write_reg(self, 0x53C, val | 0x10);  	AQ_HW_SLEEP(10); +	/* Clear SPI reset state */ +	aq_hw_write_reg(self, 0x53C, val & ~0x10); +  	aq_hw_write_reg(self, 0x404, 0x180e0);  	for (k = 0; k < 1000; k++) { @@ -141,13 +151,15 @@ static int hw_atl_utils_soft_reset_flb(struct aq_hw_s *self)  		aq_pr_err("FW kickstart failed\n");  		return -EIO;  	} +	/* Old FW requires fixed delay after init */ +	AQ_HW_SLEEP(15);  	return 0;  }  static int hw_atl_utils_soft_reset_rbl(struct aq_hw_s *self)  { -	u32 gsr, rbl_status; +	u32 gsr, val, rbl_status;  	int k;  	aq_hw_write_reg(self, 0x404, 0x40e1); @@ -157,6 +169,10 @@ static int hw_atl_utils_soft_reset_rbl(struct aq_hw_s *self)  	/* Alter RBL status */  	aq_hw_write_reg(self, 0x388, 0xDEAD); +	/* Cleanup SPI */ +	val = aq_hw_read_reg(self, 0x53C); +	aq_hw_write_reg(self, 0x53C, val | 0x10); +  	/* Global software reset*/  	hw_atl_rx_rx_reg_res_dis_set(self, 0U);  	hw_atl_tx_tx_reg_res_dis_set(self, 0U); @@ -204,6 +220,8 @@ static int hw_atl_utils_soft_reset_rbl(struct aq_hw_s *self)  		aq_pr_err("FW kickstart failed\n");  		return -EIO;  	} +	/* Old FW requires fixed delay after init */ +	AQ_HW_SLEEP(15);  	return 0;  } @@ -255,18 +273,22 @@ int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a,  		}  	} -	aq_hw_write_reg(self, 0x00000208U, a); - -	for (++cnt; --cnt;) { -		u32 i = 0U; +	aq_hw_write_reg(self, HW_ATL_MIF_ADDR, a); -		aq_hw_write_reg(self, 0x00000200U, 0x00008000U); +	for (++cnt; --cnt && !err;) { +		aq_hw_write_reg(self, HW_ATL_MIF_CMD, 0x00008000U); -		for (i = 1024U; -			(0x100U & aq_hw_read_reg(self, 0x00000200U)) && --i;) { -		} +		if (IS_CHIP_FEATURE(REVISION_B1)) +			AQ_HW_WAIT_FOR(a != aq_hw_read_reg(self, +							   HW_ATL_MIF_ADDR), +				       1, 1000U); +		else +			AQ_HW_WAIT_FOR(!(0x100 & aq_hw_read_reg(self, +							   HW_ATL_MIF_CMD)), +				       1, 1000U); -		*(p++) = aq_hw_read_reg(self, 0x0000020CU); +		*(p++) = aq_hw_read_reg(self, HW_ATL_MIF_VAL); +		a += 4;  	}  	hw_atl_reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM); @@ -662,14 +684,18 @@ void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p)  	u32 val = hw_atl_reg_glb_mif_id_get(self);  	u32 mif_rev = val & 0xFFU; -	if ((3U & mif_rev) == 1U) { -		chip_features |= -			HAL_ATLANTIC_UTILS_CHIP_REVISION_A0 | +	if ((0xFU & mif_rev) == 1U) { +		chip_features |= HAL_ATLANTIC_UTILS_CHIP_REVISION_A0 |  			HAL_ATLANTIC_UTILS_CHIP_MPI_AQ |  			HAL_ATLANTIC_UTILS_CHIP_MIPS; -	} else if ((3U & mif_rev) == 2U) { -		chip_features |= -			HAL_ATLANTIC_UTILS_CHIP_REVISION_B0 | +	} else if ((0xFU & mif_rev) == 2U) { +		chip_features |= HAL_ATLANTIC_UTILS_CHIP_REVISION_B0 | +			HAL_ATLANTIC_UTILS_CHIP_MPI_AQ | +			HAL_ATLANTIC_UTILS_CHIP_MIPS | +			HAL_ATLANTIC_UTILS_CHIP_TPO2 | +			HAL_ATLANTIC_UTILS_CHIP_RPF2; +	} else if ((0xFU & mif_rev) == 0xAU) { +		chip_features |= HAL_ATLANTIC_UTILS_CHIP_REVISION_B1 |  			HAL_ATLANTIC_UTILS_CHIP_MPI_AQ |  			HAL_ATLANTIC_UTILS_CHIP_MIPS |  			HAL_ATLANTIC_UTILS_CHIP_TPO2 | diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h index 2c690947910a..cd8f18f39c61 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h @@ -161,6 +161,7 @@ struct __packed hw_aq_atl_utils_mbox {  #define HAL_ATLANTIC_UTILS_CHIP_MPI_AQ       0x00000010U  #define HAL_ATLANTIC_UTILS_CHIP_REVISION_A0  0x01000000U  #define HAL_ATLANTIC_UTILS_CHIP_REVISION_B0  0x02000000U +#define HAL_ATLANTIC_UTILS_CHIP_REVISION_B1  0x04000000U  #define IS_CHIP_FEATURE(_F_) (HAL_ATLANTIC_UTILS_CHIP_##_F_ & \  	self->chip_features) diff --git a/drivers/net/ethernet/aquantia/atlantic/ver.h b/drivers/net/ethernet/aquantia/atlantic/ver.h index 5265b937677b..a445de6837a6 100644 --- a/drivers/net/ethernet/aquantia/atlantic/ver.h +++ b/drivers/net/ethernet/aquantia/atlantic/ver.h @@ -13,7 +13,7 @@  #define NIC_MAJOR_DRIVER_VERSION           2  #define NIC_MINOR_DRIVER_VERSION           0  #define NIC_BUILD_DRIVER_VERSION           2 -#define NIC_REVISION_DRIVER_VERSION        0 +#define NIC_REVISION_DRIVER_VERSION        1  #define AQ_CFG_DRV_VERSION_SUFFIX "-kern" diff --git a/drivers/net/ethernet/arc/emac_rockchip.c b/drivers/net/ethernet/arc/emac_rockchip.c index 16f9bee992fe..0f6576802607 100644 --- a/drivers/net/ethernet/arc/emac_rockchip.c +++ b/drivers/net/ethernet/arc/emac_rockchip.c @@ -169,8 +169,10 @@ static int emac_rockchip_probe(struct platform_device *pdev)  	/* Optional regulator for PHY */  	priv->regulator = devm_regulator_get_optional(dev, "phy");  	if (IS_ERR(priv->regulator)) { -		if (PTR_ERR(priv->regulator) == -EPROBE_DEFER) -			return -EPROBE_DEFER; +		if (PTR_ERR(priv->regulator) == -EPROBE_DEFER) { +			err = -EPROBE_DEFER; +			goto out_clk_disable; +		}  		dev_err(dev, "no regulator found\n");  		priv->regulator = NULL;  	} diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index f15a8fc6dfc9..3fc549b88c43 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -855,10 +855,12 @@ static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_tx_ring *ring,  static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,  					     struct bcm_sysport_tx_ring *ring)  { -	unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs;  	unsigned int pkts_compl = 0, bytes_compl = 0;  	struct net_device *ndev = priv->netdev; +	unsigned int txbds_processed = 0;  	struct bcm_sysport_cb *cb; +	unsigned int txbds_ready; +	unsigned int c_index;  	u32 hw_ind;  	/* Clear status before servicing to reduce spurious interrupts */ @@ -871,29 +873,23 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,  	/* Compute how many descriptors have been processed since last call */  	hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index));  	c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK; -	ring->p_index = (hw_ind & RING_PROD_INDEX_MASK); - -	last_c_index = ring->c_index; -	num_tx_cbs = ring->size; - -	c_index &= (num_tx_cbs - 1); - -	if (c_index >= last_c_index) -		last_tx_cn = c_index - last_c_index; -	else -		last_tx_cn = num_tx_cbs - last_c_index + c_index; +	txbds_ready = (c_index - ring->c_index) & RING_CONS_INDEX_MASK;  	netif_dbg(priv, tx_done, ndev, -		  "ring=%d c_index=%d last_tx_cn=%d last_c_index=%d\n", -		  ring->index, c_index, last_tx_cn, last_c_index); +		  "ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n", +		  ring->index, ring->c_index, c_index, txbds_ready); -	while (last_tx_cn-- > 0) { -		cb = ring->cbs + last_c_index; +	while (txbds_processed < txbds_ready) { +		cb = &ring->cbs[ring->clean_index];  		bcm_sysport_tx_reclaim_one(ring, cb, &bytes_compl, &pkts_compl);  		ring->desc_count++; -		last_c_index++; -		last_c_index &= (num_tx_cbs - 1); +		txbds_processed++; + +		if (likely(ring->clean_index < ring->size - 1)) +			ring->clean_index++; +		else +			ring->clean_index = 0;  	}  	u64_stats_update_begin(&priv->syncp); @@ -1394,6 +1390,7 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,  	netif_tx_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64);  	ring->index = index;  	ring->size = size; +	ring->clean_index = 0;  	ring->alloc_size = ring->size;  	ring->desc_cpu = p;  	ring->desc_count = ring->size; diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h index f5a984c1c986..19c91c76e327 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.h +++ b/drivers/net/ethernet/broadcom/bcmsysport.h @@ -706,7 +706,7 @@ struct bcm_sysport_tx_ring {  	unsigned int	desc_count;	/* Number of descriptors */  	unsigned int	curr_desc;	/* Current descriptor */  	unsigned int	c_index;	/* Last consumer index */ -	unsigned int	p_index;	/* Current producer index */ +	unsigned int	clean_index;	/* Current clean index */  	struct bcm_sysport_cb *cbs;	/* Transmit control blocks */  	struct dma_desc	*desc_cpu;	/* CPU view of the descriptor */  	struct bcm_sysport_priv *priv;	/* private context backpointer */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 74fc9af4aadb..b8388e93520a 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -13913,7 +13913,7 @@ static void bnx2x_register_phc(struct bnx2x *bp)  	bp->ptp_clock = ptp_clock_register(&bp->ptp_clock_info, &bp->pdev->dev);  	if (IS_ERR(bp->ptp_clock)) {  		bp->ptp_clock = NULL; -		BNX2X_ERR("PTP clock registeration failed\n"); +		BNX2X_ERR("PTP clock registration failed\n");  	}  } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 1500243b9886..c7e5e6f09647 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1439,7 +1439,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,  	    (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {  		u16 vlan_proto = tpa_info->metadata >>  			RX_CMP_FLAGS2_METADATA_TPID_SFT; -		u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_VID_MASK; +		u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;  		__vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);  	} @@ -1623,7 +1623,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,  	     cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&  	    (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {  		u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data); -		u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_VID_MASK; +		u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;  		u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;  		__vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag); @@ -3847,6 +3847,9 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)  	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];  	struct hwrm_vnic_tpa_cfg_input req = {0}; +	if (vnic->fw_vnic_id == INVALID_HW_RING_ID) +		return 0; +  	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);  	if (tpa_flags) { @@ -4558,18 +4561,17 @@ int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)  	return rc;  } -static int -bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, -			   int ring_grps, int cp_rings, int vnics) +static void +__bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req, +			     int tx_rings, int rx_rings, int ring_grps, +			     int cp_rings, int vnics)  { -	struct hwrm_func_cfg_input req = {0};  	u32 enables = 0; -	int rc; -	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); -	req.fid = cpu_to_le16(0xffff); +	bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_CFG, -1, -1); +	req->fid = cpu_to_le16(0xffff);  	enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; -	req.num_tx_rings = cpu_to_le16(tx_rings); +	req->num_tx_rings = cpu_to_le16(tx_rings);  	if (bp->flags & BNXT_FLAG_NEW_RM) {  		enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;  		enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS | @@ -4578,16 +4580,53 @@ bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,  			   FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;  		enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0; -		req.num_rx_rings = cpu_to_le16(rx_rings); -		req.num_hw_ring_grps = cpu_to_le16(ring_grps); -		req.num_cmpl_rings = cpu_to_le16(cp_rings); -		req.num_stat_ctxs = req.num_cmpl_rings; -		req.num_vnics = cpu_to_le16(vnics); +		req->num_rx_rings = cpu_to_le16(rx_rings); +		req->num_hw_ring_grps = cpu_to_le16(ring_grps); +		req->num_cmpl_rings = cpu_to_le16(cp_rings); +		req->num_stat_ctxs = req->num_cmpl_rings; +		req->num_vnics = cpu_to_le16(vnics);  	} -	if (!enables) +	req->enables = cpu_to_le32(enables); +} + +static void +__bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, +			     struct hwrm_func_vf_cfg_input *req, int tx_rings, +			     int rx_rings, int ring_grps, int cp_rings, +			     int vnics) +{ +	u32 enables = 0; + +	bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_VF_CFG, -1, -1); +	enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; +	enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS : 0; +	enables |= cp_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS | +			      FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; +	enables |= ring_grps ? FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0; +	enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0; + +	req->num_tx_rings = cpu_to_le16(tx_rings); +	req->num_rx_rings = cpu_to_le16(rx_rings); +	req->num_hw_ring_grps = cpu_to_le16(ring_grps); +	req->num_cmpl_rings = cpu_to_le16(cp_rings); +	req->num_stat_ctxs = req->num_cmpl_rings; +	req->num_vnics = cpu_to_le16(vnics); + +	req->enables = cpu_to_le32(enables); +} + +static int +bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, +			   int ring_grps, int cp_rings, int vnics) +{ +	struct hwrm_func_cfg_input req = {0}; +	int rc; + +	__bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps, +				     cp_rings, vnics); +	if (!req.enables)  		return 0; -	req.enables = cpu_to_le32(enables);  	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);  	if (rc)  		return -ENOMEM; @@ -4604,7 +4643,6 @@ bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,  			   int ring_grps, int cp_rings, int vnics)  {  	struct hwrm_func_vf_cfg_input req = {0}; -	u32 enables = 0;  	int rc;  	if (!(bp->flags & BNXT_FLAG_NEW_RM)) { @@ -4612,22 +4650,8 @@ bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,  		return 0;  	} -	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1); -	enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; -	enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS : 0; -	enables |= cp_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS | -			      FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; -	enables |= ring_grps ? FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0; -	enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0; - -	req.num_tx_rings = cpu_to_le16(tx_rings); -	req.num_rx_rings = cpu_to_le16(rx_rings); -	req.num_hw_ring_grps = cpu_to_le16(ring_grps); -	req.num_cmpl_rings = cpu_to_le16(cp_rings); -	req.num_stat_ctxs = req.num_cmpl_rings; -	req.num_vnics = cpu_to_le16(vnics); - -	req.enables = cpu_to_le32(enables); +	__bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps, +				     cp_rings, vnics);  	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);  	if (rc)  		return -ENOMEM; @@ -4743,39 +4767,25 @@ static bool bnxt_need_reserve_rings(struct bnxt *bp)  }  static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, -				    int ring_grps, int cp_rings) +				    int ring_grps, int cp_rings, int vnics)  {  	struct hwrm_func_vf_cfg_input req = {0}; -	u32 flags, enables; +	u32 flags;  	int rc;  	if (!(bp->flags & BNXT_FLAG_NEW_RM))  		return 0; -	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1); +	__bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps, +				     cp_rings, vnics);  	flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |  		FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |  		FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |  		FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST |  		FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |  		FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST; -	enables = FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS | -		  FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS | -		  FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS | -		  FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS | -		  FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS | -		  FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS;  	req.flags = cpu_to_le32(flags); -	req.enables = cpu_to_le32(enables); -	req.num_tx_rings = cpu_to_le16(tx_rings); -	req.num_rx_rings = cpu_to_le16(rx_rings); -	req.num_cmpl_rings = cpu_to_le16(cp_rings); -	req.num_hw_ring_grps = cpu_to_le16(ring_grps); -	req.num_stat_ctxs = cpu_to_le16(cp_rings); -	req.num_vnics = cpu_to_le16(1); -	if (bp->flags & BNXT_FLAG_RFS) -		req.num_vnics = cpu_to_le16(rx_rings + 1);  	rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);  	if (rc)  		return -ENOMEM; @@ -4783,38 +4793,23 @@ static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,  }  static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, -				    int ring_grps, int cp_rings) +				    int ring_grps, int cp_rings, int vnics)  {  	struct hwrm_func_cfg_input req = {0}; -	u32 flags, enables; +	u32 flags;  	int rc; -	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); -	req.fid = cpu_to_le16(0xffff); +	__bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps, +				     cp_rings, vnics);  	flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST; -	enables = FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS; -	req.num_tx_rings = cpu_to_le16(tx_rings); -	if (bp->flags & BNXT_FLAG_NEW_RM) { +	if (bp->flags & BNXT_FLAG_NEW_RM)  		flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |  			 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |  			 FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST |  			 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |  			 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST; -		enables |= FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS | -			   FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS | -			   FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS | -			   FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS | -			   FUNC_CFG_REQ_ENABLES_NUM_VNICS; -		req.num_rx_rings = cpu_to_le16(rx_rings); -		req.num_cmpl_rings = cpu_to_le16(cp_rings); -		req.num_hw_ring_grps = cpu_to_le16(ring_grps); -		req.num_stat_ctxs = cpu_to_le16(cp_rings); -		req.num_vnics = cpu_to_le16(1); -		if (bp->flags & BNXT_FLAG_RFS) -			req.num_vnics = cpu_to_le16(rx_rings + 1); -	} +  	req.flags = cpu_to_le32(flags); -	req.enables = cpu_to_le32(enables);  	rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);  	if (rc)  		return -ENOMEM; @@ -4822,17 +4817,17 @@ static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,  }  static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings, -				 int ring_grps, int cp_rings) +				 int ring_grps, int cp_rings, int vnics)  {  	if (bp->hwrm_spec_code < 0x10801)  		return 0;  	if (BNXT_PF(bp))  		return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings, -						ring_grps, cp_rings); +						ring_grps, cp_rings, vnics);  	return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps, -					cp_rings); +					cp_rings, vnics);  }  static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal, @@ -5865,7 +5860,6 @@ static int bnxt_init_msix(struct bnxt *bp)  		if (rc)  			goto msix_setup_exit; -		bp->tx_nr_rings_per_tc = bp->tx_nr_rings;  		bp->cp_nr_rings = (min == 1) ?  				  max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :  				  bp->tx_nr_rings + bp->rx_nr_rings; @@ -5897,7 +5891,6 @@ static int bnxt_init_inta(struct bnxt *bp)  	bp->rx_nr_rings = 1;  	bp->tx_nr_rings = 1;  	bp->cp_nr_rings = 1; -	bp->tx_nr_rings_per_tc = bp->tx_nr_rings;  	bp->flags |= BNXT_FLAG_SHARED_RINGS;  	bp->irq_tbl[0].vector = bp->pdev->irq;  	return 0; @@ -7531,7 +7524,7 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,  	int max_rx, max_tx, tx_sets = 1;  	int tx_rings_needed;  	int rx_rings = rx; -	int cp, rc; +	int cp, vnics, rc;  	if (tcs)  		tx_sets = tcs; @@ -7547,10 +7540,15 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,  	if (max_tx < tx_rings_needed)  		return -ENOMEM; +	vnics = 1; +	if (bp->flags & BNXT_FLAG_RFS) +		vnics += rx_rings; +  	if (bp->flags & BNXT_FLAG_AGG_RINGS)  		rx_rings <<= 1;  	cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx; -	return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp); +	return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp, +				     vnics);  }  static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev) @@ -8437,13 +8435,20 @@ int bnxt_restore_pf_fw_resources(struct bnxt *bp)  		return 0;  	bnxt_hwrm_func_qcaps(bp); -	__bnxt_close_nic(bp, true, false); + +	if (netif_running(bp->dev)) +		__bnxt_close_nic(bp, true, false); +  	bnxt_clear_int_mode(bp);  	rc = bnxt_init_int_mode(bp); -	if (rc) -		dev_close(bp->dev); -	else -		rc = bnxt_open_nic(bp, true, false); + +	if (netif_running(bp->dev)) { +		if (rc) +			dev_close(bp->dev); +		else +			rc = bnxt_open_nic(bp, true, false); +	} +  	return rc;  } @@ -8664,6 +8669,11 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)  	if (rc)  		goto init_err_pci_clean; +	/* No TC has been set yet and rings may have been trimmed due to +	 * limited MSIX, so we re-initialize the TX rings per TC. +	 */ +	bp->tx_nr_rings_per_tc = bp->tx_nr_rings; +  	bnxt_get_wol_settings(bp);  	if (bp->flags & BNXT_FLAG_WOL_CAP)  		device_set_wakeup_enable(&pdev->dev, bp->wol); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 1989c470172c..5e3d62189cab 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -189,6 +189,7 @@ struct rx_cmp_ext {  	#define RX_CMP_FLAGS2_T_L4_CS_CALC			(0x1 << 3)  	#define RX_CMP_FLAGS2_META_FORMAT_VLAN			(0x1 << 4)  	__le32 rx_cmp_meta_data; +	#define RX_CMP_FLAGS2_METADATA_TCI_MASK			0xffff  	#define RX_CMP_FLAGS2_METADATA_VID_MASK			0xfff  	#define RX_CMP_FLAGS2_METADATA_TPID_MASK		0xffff0000  	 #define RX_CMP_FLAGS2_METADATA_TPID_SFT		 16 diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index fbe6e208e17b..65c2cee35766 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c @@ -349,6 +349,9 @@ static int bnxt_hwrm_cfa_flow_free(struct bnxt *bp, __le16 flow_handle)  	if (rc)  		netdev_info(bp->dev, "Error: %s: flow_handle=0x%x rc=%d",  			    __func__, flow_handle, rc); + +	if (rc) +		rc = -EIO;  	return rc;  } @@ -484,13 +487,15 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,  	req.action_flags = cpu_to_le16(action_flags);  	mutex_lock(&bp->hwrm_cmd_lock); -  	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);  	if (!rc)  		*flow_handle = resp->flow_handle; -  	mutex_unlock(&bp->hwrm_cmd_lock); +	if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR) +		rc = -ENOSPC; +	else if (rc) +		rc = -EIO;  	return rc;  } @@ -561,6 +566,8 @@ static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp,  		netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);  	mutex_unlock(&bp->hwrm_cmd_lock); +	if (rc) +		rc = -EIO;  	return rc;  } @@ -576,6 +583,9 @@ static int hwrm_cfa_decap_filter_free(struct bnxt *bp,  	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);  	if (rc)  		netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); + +	if (rc) +		rc = -EIO;  	return rc;  } @@ -624,6 +634,8 @@ static int hwrm_cfa_encap_record_alloc(struct bnxt *bp,  		netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);  	mutex_unlock(&bp->hwrm_cmd_lock); +	if (rc) +		rc = -EIO;  	return rc;  } @@ -639,6 +651,9 @@ static int hwrm_cfa_encap_record_free(struct bnxt *bp,  	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);  	if (rc)  		netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); + +	if (rc) +		rc = -EIO;  	return rc;  } @@ -1269,11 +1284,8 @@ static int bnxt_tc_del_flow(struct bnxt *bp,  	flow_node = rhashtable_lookup_fast(&tc_info->flow_table,  					   &tc_flow_cmd->cookie,  					   tc_info->flow_ht_params); -	if (!flow_node) { -		netdev_info(bp->dev, "ERROR: no flow_node for cookie %lx", -			    tc_flow_cmd->cookie); +	if (!flow_node)  		return -EINVAL; -	}  	return __bnxt_tc_del_flow(bp, flow_node);  } @@ -1290,11 +1302,8 @@ static int bnxt_tc_get_flow_stats(struct bnxt *bp,  	flow_node = rhashtable_lookup_fast(&tc_info->flow_table,  					   &tc_flow_cmd->cookie,  					   tc_info->flow_ht_params); -	if (!flow_node) { -		netdev_info(bp->dev, "Error: no flow_node for cookie %lx", -			    tc_flow_cmd->cookie); +	if (!flow_node)  		return -1; -	}  	flow = &flow_node->flow;  	curr_stats = &flow->stats; @@ -1344,8 +1353,10 @@ bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows,  	} else {  		netdev_info(bp->dev, "error rc=%d", rc);  	} -  	mutex_unlock(&bp->hwrm_cmd_lock); + +	if (rc) +		rc = -EIO;  	return rc;  } diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index a77ee2f8fb8d..f2593978ae75 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -922,8 +922,8 @@ static int tg3_ape_send_event(struct tg3 *tp, u32 event)  	if (!(apedata & APE_FW_STATUS_READY))  		return -EAGAIN; -	/* Wait for up to 1 millisecond for APE to service previous event. */ -	err = tg3_ape_event_lock(tp, 1000); +	/* Wait for up to 20 millisecond for APE to service previous event. */ +	err = tg3_ape_event_lock(tp, 20000);  	if (err)  		return err; @@ -946,6 +946,7 @@ static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)  	switch (kind) {  	case RESET_KIND_INIT: +		tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);  		tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,  				APE_HOST_SEG_SIG_MAGIC);  		tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN, @@ -962,13 +963,6 @@ static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)  		event = APE_EVENT_STATUS_STATE_START;  		break;  	case RESET_KIND_SHUTDOWN: -		/* With the interface we are currently using, -		 * APE does not track driver state.  Wiping -		 * out the HOST SEGMENT SIGNATURE forces -		 * the APE to assume OS absent status. -		 */ -		tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0); -  		if (device_may_wakeup(&tp->pdev->dev) &&  		    tg3_flag(tp, WOL_ENABLE)) {  			tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED, @@ -990,6 +984,18 @@ static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)  	tg3_ape_send_event(tp, event);  } +static void tg3_send_ape_heartbeat(struct tg3 *tp, +				   unsigned long interval) +{ +	/* Check if hb interval has exceeded */ +	if (!tg3_flag(tp, ENABLE_APE) || +	    time_before(jiffies, tp->ape_hb_jiffies + interval)) +		return; + +	tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++); +	tp->ape_hb_jiffies = jiffies; +} +  static void tg3_disable_ints(struct tg3 *tp)  {  	int i; @@ -7262,6 +7268,7 @@ static int tg3_poll_msix(struct napi_struct *napi, int budget)  		}  	} +	tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);  	return work_done;  tx_recovery: @@ -7344,6 +7351,7 @@ static int tg3_poll(struct napi_struct *napi, int budget)  		}  	} +	tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);  	return work_done;  tx_recovery: @@ -10732,7 +10740,7 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)  	if (tg3_flag(tp, ENABLE_APE))  		/* Write our heartbeat update interval to APE. */  		tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS, -				APE_HOST_HEARTBEAT_INT_DISABLE); +				APE_HOST_HEARTBEAT_INT_5SEC);  	tg3_write_sig_post_reset(tp, RESET_KIND_INIT); @@ -11077,6 +11085,9 @@ static void tg3_timer(struct timer_list *t)  		tp->asf_counter = tp->asf_multiplier;  	} +	/* Update the APE heartbeat every 5 seconds.*/ +	tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL); +  	spin_unlock(&tp->lock);  restart_timer: @@ -16653,6 +16664,8 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)  				       pci_state_reg);  		tg3_ape_lock_init(tp); +		tp->ape_hb_interval = +			msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC);  	}  	/* Set up tp->grc_local_ctrl before calling diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h index 47f51cc0566d..1d61aa3efda1 100644 --- a/drivers/net/ethernet/broadcom/tg3.h +++ b/drivers/net/ethernet/broadcom/tg3.h @@ -2508,6 +2508,7 @@  #define TG3_APE_LOCK_PHY3		5  #define TG3_APE_LOCK_GPIO		7 +#define TG3_APE_HB_INTERVAL             (tp->ape_hb_interval)  #define TG3_EEPROM_SB_F1R2_MBA_OFF	0x10 @@ -3423,6 +3424,10 @@ struct tg3 {  	struct device			*hwmon_dev;  	bool				link_up;  	bool				pcierr_recovery; + +	u32                             ape_hb; +	unsigned long                   ape_hb_interval; +	unsigned long                   ape_hb_jiffies;  };  /* Accessor macros for chip and asic attributes diff --git a/drivers/net/ethernet/cavium/common/cavium_ptp.c b/drivers/net/ethernet/cavium/common/cavium_ptp.c index c87c9c684a33..d59497a7bdce 100644 --- a/drivers/net/ethernet/cavium/common/cavium_ptp.c +++ b/drivers/net/ethernet/cavium/common/cavium_ptp.c @@ -75,6 +75,8 @@ EXPORT_SYMBOL(cavium_ptp_get);  void cavium_ptp_put(struct cavium_ptp *ptp)  { +	if (!ptp) +		return;  	pci_dev_put(ptp->pdev);  }  EXPORT_SYMBOL(cavium_ptp_put); diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index b68cde9f17d2..7d9c5ffbd041 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -67,11 +67,6 @@ module_param(cpi_alg, int, S_IRUGO);  MODULE_PARM_DESC(cpi_alg,  		 "PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)"); -struct nicvf_xdp_tx { -	u64 dma_addr; -	u8  qidx; -}; -  static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx)  {  	if (nic->sqs_mode) @@ -507,29 +502,14 @@ static int nicvf_init_resources(struct nicvf *nic)  	return 0;  } -static void nicvf_unmap_page(struct nicvf *nic, struct page *page, u64 dma_addr) -{ -	/* Check if it's a recycled page, if not unmap the DMA mapping. -	 * Recycled page holds an extra reference. -	 */ -	if (page_ref_count(page) == 1) { -		dma_addr &= PAGE_MASK; -		dma_unmap_page_attrs(&nic->pdev->dev, dma_addr, -				     RCV_FRAG_LEN + XDP_HEADROOM, -				     DMA_FROM_DEVICE, -				     DMA_ATTR_SKIP_CPU_SYNC); -	} -} -  static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,  				struct cqe_rx_t *cqe_rx, struct snd_queue *sq,  				struct rcv_queue *rq, struct sk_buff **skb)  {  	struct xdp_buff xdp;  	struct page *page; -	struct nicvf_xdp_tx *xdp_tx = NULL;  	u32 action; -	u16 len, err, offset = 0; +	u16 len, offset = 0;  	u64 dma_addr, cpu_addr;  	void *orig_data; @@ -543,7 +523,7 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,  	cpu_addr = (u64)phys_to_virt(cpu_addr);  	page = virt_to_page((void *)cpu_addr); -	xdp.data_hard_start = page_address(page) + RCV_BUF_HEADROOM; +	xdp.data_hard_start = page_address(page);  	xdp.data = (void *)cpu_addr;  	xdp_set_data_meta_invalid(&xdp);  	xdp.data_end = xdp.data + len; @@ -563,7 +543,18 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,  	switch (action) {  	case XDP_PASS: -		nicvf_unmap_page(nic, page, dma_addr); +		/* Check if it's a recycled page, if not +		 * unmap the DMA mapping. +		 * +		 * Recycled page holds an extra reference. +		 */ +		if (page_ref_count(page) == 1) { +			dma_addr &= PAGE_MASK; +			dma_unmap_page_attrs(&nic->pdev->dev, dma_addr, +					     RCV_FRAG_LEN + XDP_PACKET_HEADROOM, +					     DMA_FROM_DEVICE, +					     DMA_ATTR_SKIP_CPU_SYNC); +		}  		/* Build SKB and pass on packet to network stack */  		*skb = build_skb(xdp.data, @@ -576,20 +567,6 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,  	case XDP_TX:  		nicvf_xdp_sq_append_pkt(nic, sq, (u64)xdp.data, dma_addr, len);  		return true; -	case XDP_REDIRECT: -		/* Save DMA address for use while transmitting */ -		xdp_tx = (struct nicvf_xdp_tx *)page_address(page); -		xdp_tx->dma_addr = dma_addr; -		xdp_tx->qidx = nicvf_netdev_qidx(nic, cqe_rx->rq_idx); - -		err = xdp_do_redirect(nic->pnicvf->netdev, &xdp, prog); -		if (!err) -			return true; - -		/* Free the page on error */ -		nicvf_unmap_page(nic, page, dma_addr); -		put_page(page); -		break;  	default:  		bpf_warn_invalid_xdp_action(action);  		/* fall through */ @@ -597,7 +574,18 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,  		trace_xdp_exception(nic->netdev, prog, action);  		/* fall through */  	case XDP_DROP: -		nicvf_unmap_page(nic, page, dma_addr); +		/* Check if it's a recycled page, if not +		 * unmap the DMA mapping. +		 * +		 * Recycled page holds an extra reference. +		 */ +		if (page_ref_count(page) == 1) { +			dma_addr &= PAGE_MASK; +			dma_unmap_page_attrs(&nic->pdev->dev, dma_addr, +					     RCV_FRAG_LEN + XDP_PACKET_HEADROOM, +					     DMA_FROM_DEVICE, +					     DMA_ATTR_SKIP_CPU_SYNC); +		}  		put_page(page);  		return true;  	} @@ -1864,50 +1852,6 @@ static int nicvf_xdp(struct net_device *netdev, struct netdev_bpf *xdp)  	}  } -static int nicvf_xdp_xmit(struct net_device *netdev, struct xdp_buff *xdp) -{ -	struct nicvf *nic = netdev_priv(netdev); -	struct nicvf *snic = nic; -	struct nicvf_xdp_tx *xdp_tx; -	struct snd_queue *sq; -	struct page *page; -	int err, qidx; - -	if (!netif_running(netdev) || !nic->xdp_prog) -		return -EINVAL; - -	page = virt_to_page(xdp->data); -	xdp_tx = (struct nicvf_xdp_tx *)page_address(page); -	qidx = xdp_tx->qidx; - -	if (xdp_tx->qidx >= nic->xdp_tx_queues) -		return -EINVAL; - -	/* Get secondary Qset's info */ -	if (xdp_tx->qidx >= MAX_SND_QUEUES_PER_QS) { -		qidx = xdp_tx->qidx / MAX_SND_QUEUES_PER_QS; -		snic = (struct nicvf *)nic->snicvf[qidx - 1]; -		if (!snic) -			return -EINVAL; -		qidx = xdp_tx->qidx % MAX_SND_QUEUES_PER_QS; -	} - -	sq = &snic->qs->sq[qidx]; -	err = nicvf_xdp_sq_append_pkt(snic, sq, (u64)xdp->data, -				      xdp_tx->dma_addr, -				      xdp->data_end - xdp->data); -	if (err) -		return -ENOMEM; - -	nicvf_xdp_sq_doorbell(snic, sq, qidx); -	return 0; -} - -static void nicvf_xdp_flush(struct net_device *dev) -{ -	return; -} -  static int nicvf_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)  {  	struct hwtstamp_config config; @@ -1986,8 +1930,6 @@ static const struct net_device_ops nicvf_netdev_ops = {  	.ndo_fix_features       = nicvf_fix_features,  	.ndo_set_features       = nicvf_set_features,  	.ndo_bpf		= nicvf_xdp, -	.ndo_xdp_xmit		= nicvf_xdp_xmit, -	.ndo_xdp_flush          = nicvf_xdp_flush,  	.ndo_do_ioctl           = nicvf_ioctl,  }; diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index 3eae9ff9b53a..d42704d07484 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c @@ -204,7 +204,7 @@ static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, struct rbdr *rbdr,  	/* Reserve space for header modifications by BPF program */  	if (rbdr->is_xdp) -		buf_len += XDP_HEADROOM; +		buf_len += XDP_PACKET_HEADROOM;  	/* Check if it's recycled */  	if (pgcache) @@ -224,9 +224,8 @@ ret:  			nic->rb_page = NULL;  			return -ENOMEM;  		} -  		if (pgcache) -			pgcache->dma_addr = *rbuf + XDP_HEADROOM; +			pgcache->dma_addr = *rbuf + XDP_PACKET_HEADROOM;  		nic->rb_page_offset += buf_len;  	} @@ -1244,7 +1243,7 @@ int nicvf_xdp_sq_append_pkt(struct nicvf *nic, struct snd_queue *sq,  	int qentry;  	if (subdesc_cnt > sq->xdp_free_cnt) -		return -1; +		return 0;  	qentry = nicvf_get_sq_desc(sq, subdesc_cnt); @@ -1255,7 +1254,7 @@ int nicvf_xdp_sq_append_pkt(struct nicvf *nic, struct snd_queue *sq,  	sq->xdp_desc_cnt += subdesc_cnt; -	return 0; +	return 1;  }  /* Calculate no of SQ subdescriptors needed to transmit all @@ -1656,7 +1655,7 @@ static void nicvf_unmap_rcv_buffer(struct nicvf *nic, u64 dma_addr,  		if (page_ref_count(page) != 1)  			return; -		len += XDP_HEADROOM; +		len += XDP_PACKET_HEADROOM;  		/* Receive buffers in XDP mode are mapped from page start */  		dma_addr &= PAGE_MASK;  	} diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h index ce1eed7a6d63..5e9a03cf1b4d 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h @@ -11,7 +11,6 @@  #include <linux/netdevice.h>  #include <linux/iommu.h> -#include <linux/bpf.h>  #include <net/xdp.h>  #include "q_struct.h" @@ -94,9 +93,6 @@  #define RCV_FRAG_LEN	 (SKB_DATA_ALIGN(DMA_BUFFER_LEN + NET_SKB_PAD) + \  			 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) -#define RCV_BUF_HEADROOM	128 /* To store dma address for XDP redirect */ -#define XDP_HEADROOM		(XDP_PACKET_HEADROOM + RCV_BUF_HEADROOM) -  #define MAX_CQES_FOR_TX		((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) * \  				 MAX_CQE_PER_PKT_XMIT) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c index 557fd8bfd54e..00a1d2d13169 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c @@ -472,7 +472,7 @@ int cudbg_collect_cim_la(struct cudbg_init *pdbg_init,  	if (is_t6(padap->params.chip)) {  		size = padap->params.cim_la_size / 10 + 1; -		size *= 11 * sizeof(u32); +		size *= 10 * sizeof(u32);  	} else {  		size = padap->params.cim_la_size / 8;  		size *= 8 * sizeof(u32); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c index 30485f9a598f..143686c60234 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c @@ -102,7 +102,7 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)  	case CUDBG_CIM_LA:  		if (is_t6(adap->params.chip)) {  			len = adap->params.cim_la_size / 10 + 1; -			len *= 11 * sizeof(u32); +			len *= 10 * sizeof(u32);  		} else {  			len = adap->params.cim_la_size / 8;  			len *= 8 * sizeof(u32); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 56bc626ef006..61022b5f6743 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -4970,7 +4970,6 @@ static void cxgb4_mgmt_setup(struct net_device *dev)  	/* Initialize the device structure. */  	dev->netdev_ops = &cxgb4_mgmt_netdev_ops;  	dev->ethtool_ops = &cxgb4_mgmt_ethtool_ops; -	dev->needs_free_netdev = true;  }  static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs) @@ -4982,9 +4981,10 @@ static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)  	pcie_fw = readl(adap->regs + PCIE_FW_A);  	/* Check if cxgb4 is the MASTER and fw is initialized */ -	if (!(pcie_fw & PCIE_FW_INIT_F) || +	if (num_vfs && +	    (!(pcie_fw & PCIE_FW_INIT_F) ||  	    !(pcie_fw & PCIE_FW_MASTER_VLD_F) || -	    PCIE_FW_MASTER_G(pcie_fw) != CXGB4_UNIFIED_PF) { +	    PCIE_FW_MASTER_G(pcie_fw) != CXGB4_UNIFIED_PF)) {  		dev_warn(&pdev->dev,  			 "cxgb4 driver needs to be MASTER to support SRIOV\n");  		return -EOPNOTSUPP; @@ -5180,6 +5180,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)  	adapter->name = pci_name(pdev);  	adapter->mbox = func;  	adapter->pf = func; +	adapter->params.chip = chip; +	adapter->adap_idx = adap_idx;  	adapter->msg_enable = DFLT_MSG_ENABLE;  	adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) +  				    (sizeof(struct mbox_cmd) * @@ -5599,24 +5601,24 @@ static void remove_one(struct pci_dev *pdev)  #if IS_ENABLED(CONFIG_IPV6)  		t4_cleanup_clip_tbl(adapter);  #endif -		iounmap(adapter->regs);  		if (!is_t4(adapter->params.chip))  			iounmap(adapter->bar2); -		pci_disable_pcie_error_reporting(pdev); -		if ((adapter->flags & DEV_ENABLED)) { -			pci_disable_device(pdev); -			adapter->flags &= ~DEV_ENABLED; -		} -		pci_release_regions(pdev); -		kfree(adapter->mbox_log); -		synchronize_rcu(); -		kfree(adapter);  	}  #ifdef CONFIG_PCI_IOV  	else {  		cxgb4_iov_configure(adapter->pdev, 0);  	}  #endif +	iounmap(adapter->regs); +	pci_disable_pcie_error_reporting(pdev); +	if ((adapter->flags & DEV_ENABLED)) { +		pci_disable_device(pdev); +		adapter->flags &= ~DEV_ENABLED; +	} +	pci_release_regions(pdev); +	kfree(adapter->mbox_log); +	synchronize_rcu(); +	kfree(adapter);  }  /* "Shutdown" quiesces the device, stopping Ingress Packet and Interrupt diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 047609ef0515..920bccd6bc40 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -2637,7 +2637,6 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)  }  #define EEPROM_STAT_ADDR   0x7bfc -#define VPD_SIZE           0x800  #define VPD_BASE           0x400  #define VPD_BASE_OLD       0  #define VPD_LEN            1024 @@ -2704,15 +2703,6 @@ int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)  	if (!vpd)  		return -ENOMEM; -	/* We have two VPD data structures stored in the adapter VPD area. -	 * By default, Linux calculates the size of the VPD area by traversing -	 * the first VPD area at offset 0x0, so we need to tell the OS what -	 * our real VPD size is. -	 */ -	ret = pci_set_vpd_size(adapter->pdev, VPD_SIZE); -	if (ret < 0) -		goto out; -  	/* Card information normally starts at VPD_BASE but early cards had  	 * it at 0.  	 */ diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c index 5eb999af2c40..bd3f6e4d1341 100644 --- a/drivers/net/ethernet/cortina/gemini.c +++ b/drivers/net/ethernet/cortina/gemini.c @@ -540,6 +540,7 @@ static int gmac_setup_txqs(struct net_device *netdev)  	if (port->txq_dma_base & ~DMA_Q_BASE_MASK) {  		dev_warn(geth->dev, "TX queue base it not aligned\n"); +		kfree(skb_tab);  		return -ENOMEM;  	} diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index 7caa8da48421..e4ec32a9ca15 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -2008,7 +2008,6 @@ static inline int dpaa_xmit(struct dpaa_priv *priv,  	}  	if (unlikely(err < 0)) { -		percpu_stats->tx_errors++;  		percpu_stats->tx_fifo_errors++;  		return err;  	} @@ -2278,7 +2277,6 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,  	vaddr = phys_to_virt(addr);  	prefetch(vaddr + qm_fd_get_offset(fd)); -	fd_format = qm_fd_get_format(fd);  	/* The only FD types that we may receive are contig and S/G */  	WARN_ON((fd_format != qm_fd_contig) && (fd_format != qm_fd_sg)); @@ -2311,8 +2309,10 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,  	skb_len = skb->len; -	if (unlikely(netif_receive_skb(skb) == NET_RX_DROP)) +	if (unlikely(netif_receive_skb(skb) == NET_RX_DROP)) { +		percpu_stats->rx_dropped++;  		return qman_cb_dqrr_consume; +	}  	percpu_stats->rx_packets++;  	percpu_stats->rx_bytes += skb_len; @@ -2860,7 +2860,7 @@ static int dpaa_remove(struct platform_device *pdev)  	struct device *dev;  	int err; -	dev = &pdev->dev; +	dev = pdev->dev.parent;  	net_dev = dev_get_drvdata(dev);  	priv = netdev_priv(net_dev); diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 7a7f3a42b2aa..d4604bc8eb5b 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -3600,6 +3600,8 @@ fec_drv_remove(struct platform_device *pdev)  	fec_enet_mii_remove(fep);  	if (fep->reg_phy)  		regulator_disable(fep->reg_phy); +	pm_runtime_put(&pdev->dev); +	pm_runtime_disable(&pdev->dev);  	if (of_phy_is_fixed_link(np))  		of_phy_deregister_fixed_link(np);  	of_node_put(fep->phy_node); diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c index ea43b4974149..7af31ddd093f 100644 --- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c +++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c @@ -1100,7 +1100,7 @@ int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)  	set_bucket(dtsec->regs, bucket, true);  	/* Create element to be added to the driver hash table */ -	hash_entry = kmalloc(sizeof(*hash_entry), GFP_KERNEL); +	hash_entry = kmalloc(sizeof(*hash_entry), GFP_ATOMIC);  	if (!hash_entry)  		return -ENOMEM;  	hash_entry->addr = addr; diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 3bdeb295514b..f27f9bae1a4a 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -2934,29 +2934,17 @@ static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,  {  	int size = lstatus & BD_LENGTH_MASK;  	struct page *page = rxb->page; -	bool last = !!(lstatus & BD_LFLAG(RXBD_LAST)); - -	/* Remove the FCS from the packet length */ -	if (last) -		size -= ETH_FCS_LEN;  	if (likely(first)) {  		skb_put(skb, size);  	} else {  		/* the last fragments' length contains the full frame length */ -		if (last) +		if (lstatus & BD_LFLAG(RXBD_LAST))  			size -= skb->len; -		/* Add the last fragment if it contains something other than -		 * the FCS, otherwise drop it and trim off any part of the FCS -		 * that was already received. -		 */ -		if (size > 0) -			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, -					rxb->page_offset + RXBUF_ALIGNMENT, -					size, GFAR_RXB_TRUESIZE); -		else if (size < 0) -			pskb_trim(skb, skb->len + size); +		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, +				rxb->page_offset + RXBUF_ALIGNMENT, +				size, GFAR_RXB_TRUESIZE);  	}  	/* try reuse page */ @@ -3069,12 +3057,12 @@ static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb)  	if (priv->padding)  		skb_pull(skb, priv->padding); +	/* Trim off the FCS */ +	pskb_trim(skb, skb->len - ETH_FCS_LEN); +  	if (ndev->features & NETIF_F_RXCSUM)  		gfar_rx_checksum(skb, fcb); -	/* Tell the skb what kind of packet this is */ -	skb->protocol = eth_type_trans(skb, ndev); -  	/* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.  	 * Even if vlan rx accel is disabled, on some chips  	 * RXFCB_VLN is pseudo randomly set. @@ -3145,13 +3133,15 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)  			continue;  		} +		gfar_process_frame(ndev, skb); +  		/* Increment the number of packets */  		total_pkts++;  		total_bytes += skb->len;  		skb_record_rx_queue(skb, rx_queue->qindex); -		gfar_process_frame(ndev, skb); +		skb->protocol = eth_type_trans(skb, ndev);  		/* Send the packet up the stack */  		napi_gro_receive(&rx_queue->grp->napi_rx, skb); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c index 86944bc3b273..74bd260ca02a 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c @@ -666,7 +666,7 @@ static void hns_gmac_get_strings(u32 stringset, u8 *data)  static int hns_gmac_get_sset_count(int stringset)  { -	if (stringset == ETH_SS_STATS || stringset == ETH_SS_PRIV_FLAGS) +	if (stringset == ETH_SS_STATS)  		return ARRAY_SIZE(g_gmac_stats_string);  	return 0; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c index b62816c1574e..93e71e27401b 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c @@ -422,7 +422,7 @@ void hns_ppe_update_stats(struct hns_ppe_cb *ppe_cb)  int hns_ppe_get_sset_count(int stringset)  { -	if (stringset == ETH_SS_STATS || stringset == ETH_SS_PRIV_FLAGS) +	if (stringset == ETH_SS_STATS)  		return ETH_PPE_STATIC_NUM;  	return 0;  } diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c index 6f3570cfb501..e2e28532e4dc 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c @@ -876,7 +876,7 @@ void hns_rcb_get_stats(struct hnae_queue *queue, u64 *data)   */  int hns_rcb_get_ring_sset_count(int stringset)  { -	if (stringset == ETH_SS_STATS || stringset == ETH_SS_PRIV_FLAGS) +	if (stringset == ETH_SS_STATS)  		return HNS_RING_STATIC_REG_NUM;  	return 0; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c index 7ea7f8a4aa2a..2e14a3ae1d8b 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c @@ -993,8 +993,10 @@ int hns_get_sset_count(struct net_device *netdev, int stringset)  			cnt--;  		return cnt; -	} else { +	} else if (stringset == ETH_SS_STATS) {  		return (HNS_NET_STATS_CNT + ops->get_sset_count(h, stringset)); +	} else { +		return -EOPNOTSUPP;  	}  } diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 27447260215d..1b3cc8bb0705 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -791,6 +791,18 @@ static int ibmvnic_login(struct net_device *netdev)  	return 0;  } +static void release_login_buffer(struct ibmvnic_adapter *adapter) +{ +	kfree(adapter->login_buf); +	adapter->login_buf = NULL; +} + +static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter) +{ +	kfree(adapter->login_rsp_buf); +	adapter->login_rsp_buf = NULL; +} +  static void release_resources(struct ibmvnic_adapter *adapter)  {  	int i; @@ -813,6 +825,10 @@ static void release_resources(struct ibmvnic_adapter *adapter)  			}  		}  	} +	kfree(adapter->napi); +	adapter->napi = NULL; + +	release_login_rsp_buffer(adapter);  }  static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state) @@ -1057,6 +1073,35 @@ static int ibmvnic_open(struct net_device *netdev)  	return rc;  } +static void clean_rx_pools(struct ibmvnic_adapter *adapter) +{ +	struct ibmvnic_rx_pool *rx_pool; +	u64 rx_entries; +	int rx_scrqs; +	int i, j; + +	if (!adapter->rx_pool) +		return; + +	rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); +	rx_entries = adapter->req_rx_add_entries_per_subcrq; + +	/* Free any remaining skbs in the rx buffer pools */ +	for (i = 0; i < rx_scrqs; i++) { +		rx_pool = &adapter->rx_pool[i]; +		if (!rx_pool) +			continue; + +		netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i); +		for (j = 0; j < rx_entries; j++) { +			if (rx_pool->rx_buff[j].skb) { +				dev_kfree_skb_any(rx_pool->rx_buff[j].skb); +				rx_pool->rx_buff[j].skb = NULL; +			} +		} +	} +} +  static void clean_tx_pools(struct ibmvnic_adapter *adapter)  {  	struct ibmvnic_tx_pool *tx_pool; @@ -1134,7 +1179,7 @@ static int __ibmvnic_close(struct net_device *netdev)  			}  		}  	} - +	clean_rx_pools(adapter);  	clean_tx_pools(adapter);  	adapter->state = VNIC_CLOSED;  	return rc; @@ -1670,8 +1715,6 @@ static int do_reset(struct ibmvnic_adapter *adapter,  		return 0;  	} -	netif_carrier_on(netdev); -  	/* kick napi */  	for (i = 0; i < adapter->req_rx_queues; i++)  		napi_schedule(&adapter->napi[i]); @@ -1679,6 +1722,8 @@ static int do_reset(struct ibmvnic_adapter *adapter,  	if (adapter->reset_reason != VNIC_RESET_FAILOVER)  		netdev_notify_peers(netdev); +	netif_carrier_on(netdev); +  	return 0;  } @@ -1853,6 +1898,12 @@ restart_poll:  				   be16_to_cpu(next->rx_comp.rc));  			/* free the entry */  			next->rx_comp.first = 0; +			dev_kfree_skb_any(rx_buff->skb); +			remove_buff_from_pool(adapter, rx_buff); +			continue; +		} else if (!rx_buff->skb) { +			/* free the entry */ +			next->rx_comp.first = 0;  			remove_buff_from_pool(adapter, rx_buff);  			continue;  		} @@ -3013,6 +3064,7 @@ static void send_login(struct ibmvnic_adapter *adapter)  	struct vnic_login_client_data *vlcd;  	int i; +	release_login_rsp_buffer(adapter);  	client_data_len = vnic_client_data_len(adapter);  	buffer_size = @@ -3738,6 +3790,7 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,  		ibmvnic_remove(adapter->vdev);  		return -EIO;  	} +	release_login_buffer(adapter);  	complete(&adapter->init_done);  	return 0; diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h index afb7ebe20b24..824fd44e25f0 100644 --- a/drivers/net/ethernet/intel/e1000e/defines.h +++ b/drivers/net/ethernet/intel/e1000e/defines.h @@ -400,6 +400,10 @@  #define E1000_ICR_RXDMT0        0x00000010 /* Rx desc min. threshold (0) */  #define E1000_ICR_RXO           0x00000040 /* Receiver Overrun */  #define E1000_ICR_RXT0          0x00000080 /* Rx timer intr (ring 0) */ +#define E1000_ICR_MDAC          0x00000200 /* MDIO Access Complete */ +#define E1000_ICR_SRPD          0x00010000 /* Small Receive Packet Detected */ +#define E1000_ICR_ACK           0x00020000 /* Receive ACK Frame Detected */ +#define E1000_ICR_MNG           0x00040000 /* Manageability Event Detected */  #define E1000_ICR_ECCER         0x00400000 /* Uncorrectable ECC Error */  /* If this bit asserted, the driver should claim the interrupt */  #define E1000_ICR_INT_ASSERTED	0x80000000 @@ -407,7 +411,7 @@  #define E1000_ICR_RXQ1          0x00200000 /* Rx Queue 1 Interrupt */  #define E1000_ICR_TXQ0          0x00400000 /* Tx Queue 0 Interrupt */  #define E1000_ICR_TXQ1          0x00800000 /* Tx Queue 1 Interrupt */ -#define E1000_ICR_OTHER         0x01000000 /* Other Interrupts */ +#define E1000_ICR_OTHER         0x01000000 /* Other Interrupt */  /* PBA ECC Register */  #define E1000_PBA_ECC_COUNTER_MASK  0xFFF00000 /* ECC counter mask */ @@ -431,12 +435,27 @@  	E1000_IMS_RXSEQ  |    \  	E1000_IMS_LSC) +/* These are all of the events related to the OTHER interrupt. + */ +#define IMS_OTHER_MASK ( \ +	E1000_IMS_LSC  | \ +	E1000_IMS_RXO  | \ +	E1000_IMS_MDAC | \ +	E1000_IMS_SRPD | \ +	E1000_IMS_ACK  | \ +	E1000_IMS_MNG) +  /* Interrupt Mask Set */  #define E1000_IMS_TXDW      E1000_ICR_TXDW      /* Transmit desc written back */  #define E1000_IMS_LSC       E1000_ICR_LSC       /* Link Status Change */  #define E1000_IMS_RXSEQ     E1000_ICR_RXSEQ     /* Rx sequence error */  #define E1000_IMS_RXDMT0    E1000_ICR_RXDMT0    /* Rx desc min. threshold */ +#define E1000_IMS_RXO       E1000_ICR_RXO       /* Receiver Overrun */  #define E1000_IMS_RXT0      E1000_ICR_RXT0      /* Rx timer intr */ +#define E1000_IMS_MDAC      E1000_ICR_MDAC      /* MDIO Access Complete */ +#define E1000_IMS_SRPD      E1000_ICR_SRPD      /* Small Receive Packet */ +#define E1000_IMS_ACK       E1000_ICR_ACK       /* Receive ACK Frame Detected */ +#define E1000_IMS_MNG       E1000_ICR_MNG       /* Manageability Event */  #define E1000_IMS_ECCER     E1000_ICR_ECCER     /* Uncorrectable ECC Error */  #define E1000_IMS_RXQ0      E1000_ICR_RXQ0      /* Rx Queue 0 Interrupt */  #define E1000_IMS_RXQ1      E1000_ICR_RXQ1      /* Rx Queue 1 Interrupt */ diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index 31277d3bb7dc..1dddfb7b2de6 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -1367,9 +1367,6 @@ out:   *  Checks to see of the link status of the hardware has changed.  If a   *  change in link status has been detected, then we read the PHY registers   *  to get the current speed/duplex if link exists. - * - *  Returns a negative error code (-E1000_ERR_*) or 0 (link down) or 1 (link - *  up).   **/  static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)  { @@ -1385,7 +1382,8 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)  	 * Change or Rx Sequence Error interrupt.  	 */  	if (!mac->get_link_status) -		return 1; +		return 0; +	mac->get_link_status = false;  	/* First we want to see if the MII Status Register reports  	 * link.  If so, then we want to get the current speed/duplex @@ -1393,12 +1391,12 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)  	 */  	ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);  	if (ret_val) -		return ret_val; +		goto out;  	if (hw->mac.type == e1000_pchlan) {  		ret_val = e1000_k1_gig_workaround_hv(hw, link);  		if (ret_val) -			return ret_val; +			goto out;  	}  	/* When connected at 10Mbps half-duplex, some parts are excessively @@ -1431,7 +1429,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)  		ret_val = hw->phy.ops.acquire(hw);  		if (ret_val) -			return ret_val; +			goto out;  		if (hw->mac.type == e1000_pch2lan)  			emi_addr = I82579_RX_CONFIG; @@ -1453,7 +1451,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)  		hw->phy.ops.release(hw);  		if (ret_val) -			return ret_val; +			goto out;  		if (hw->mac.type >= e1000_pch_spt) {  			u16 data; @@ -1462,14 +1460,14 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)  			if (speed == SPEED_1000) {  				ret_val = hw->phy.ops.acquire(hw);  				if (ret_val) -					return ret_val; +					goto out;  				ret_val = e1e_rphy_locked(hw,  							  PHY_REG(776, 20),  							  &data);  				if (ret_val) {  					hw->phy.ops.release(hw); -					return ret_val; +					goto out;  				}  				ptr_gap = (data & (0x3FF << 2)) >> 2; @@ -1483,18 +1481,18 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)  				}  				hw->phy.ops.release(hw);  				if (ret_val) -					return ret_val; +					goto out;  			} else {  				ret_val = hw->phy.ops.acquire(hw);  				if (ret_val) -					return ret_val; +					goto out;  				ret_val = e1e_wphy_locked(hw,  							  PHY_REG(776, 20),  							  0xC023);  				hw->phy.ops.release(hw);  				if (ret_val) -					return ret_val; +					goto out;  			}  		} @@ -1521,7 +1519,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)  	    (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V3)) {  		ret_val = e1000_k1_workaround_lpt_lp(hw, link);  		if (ret_val) -			return ret_val; +			goto out;  	}  	if (hw->mac.type >= e1000_pch_lpt) {  		/* Set platform power management values for @@ -1529,7 +1527,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)  		 */  		ret_val = e1000_platform_pm_pch_lpt(hw, link);  		if (ret_val) -			return ret_val; +			goto out;  	}  	/* Clear link partner's EEE ability */ @@ -1552,9 +1550,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)  	}  	if (!link) -		return 0;	/* No link detected */ - -	mac->get_link_status = false; +		goto out;  	switch (hw->mac.type) {  	case e1000_pch2lan: @@ -1616,12 +1612,14 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)  	 * different link partner.  	 */  	ret_val = e1000e_config_fc_after_link_up(hw); -	if (ret_val) { +	if (ret_val)  		e_dbg("Error configuring flow control\n"); -		return ret_val; -	} -	return 1; +	return ret_val; + +out: +	mac->get_link_status = true; +	return ret_val;  }  static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter) diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c index f457c5703d0c..5bdc3a2d4fd7 100644 --- a/drivers/net/ethernet/intel/e1000e/mac.c +++ b/drivers/net/ethernet/intel/e1000e/mac.c @@ -410,9 +410,6 @@ void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw)   *  Checks to see of the link status of the hardware has changed.  If a   *  change in link status has been detected, then we read the PHY registers   *  to get the current speed/duplex if link exists. - * - *  Returns a negative error code (-E1000_ERR_*) or 0 (link down) or 1 (link - *  up).   **/  s32 e1000e_check_for_copper_link(struct e1000_hw *hw)  { @@ -426,20 +423,16 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)  	 * Change or Rx Sequence Error interrupt.  	 */  	if (!mac->get_link_status) -		return 1; +		return 0; +	mac->get_link_status = false;  	/* First we want to see if the MII Status Register reports  	 * link.  If so, then we want to get the current speed/duplex  	 * of the PHY.  	 */  	ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); -	if (ret_val) -		return ret_val; - -	if (!link) -		return 0;	/* No link detected */ - -	mac->get_link_status = false; +	if (ret_val || !link) +		goto out;  	/* Check if there was DownShift, must be checked  	 * immediately after link-up @@ -464,12 +457,14 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)  	 * different link partner.  	 */  	ret_val = e1000e_config_fc_after_link_up(hw); -	if (ret_val) { +	if (ret_val)  		e_dbg("Error configuring flow control\n"); -		return ret_val; -	} -	return 1; +	return ret_val; + +out: +	mac->get_link_status = true; +	return ret_val;  }  /** diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 1298b69f990b..dc853b0863af 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -1914,30 +1914,20 @@ static irqreturn_t e1000_msix_other(int __always_unused irq, void *data)  	struct net_device *netdev = data;  	struct e1000_adapter *adapter = netdev_priv(netdev);  	struct e1000_hw *hw = &adapter->hw; -	u32 icr; -	bool enable = true; - -	icr = er32(ICR); -	if (icr & E1000_ICR_RXO) { -		ew32(ICR, E1000_ICR_RXO); -		enable = false; -		/* napi poll will re-enable Other, make sure it runs */ -		if (napi_schedule_prep(&adapter->napi)) { -			adapter->total_rx_bytes = 0; -			adapter->total_rx_packets = 0; -			__napi_schedule(&adapter->napi); -		} -	} +	u32 icr = er32(ICR); + +	if (icr & adapter->eiac_mask) +		ew32(ICS, (icr & adapter->eiac_mask)); +  	if (icr & E1000_ICR_LSC) { -		ew32(ICR, E1000_ICR_LSC);  		hw->mac.get_link_status = true;  		/* guard against interrupt when we're going down */  		if (!test_bit(__E1000_DOWN, &adapter->state))  			mod_timer(&adapter->watchdog_timer, jiffies + 1);  	} -	if (enable && !test_bit(__E1000_DOWN, &adapter->state)) -		ew32(IMS, E1000_IMS_OTHER); +	if (!test_bit(__E1000_DOWN, &adapter->state)) +		ew32(IMS, E1000_IMS_OTHER | IMS_OTHER_MASK);  	return IRQ_HANDLED;  } @@ -2040,7 +2030,6 @@ static void e1000_configure_msix(struct e1000_adapter *adapter)  		       hw->hw_addr + E1000_EITR_82574(vector));  	else  		writel(1, hw->hw_addr + E1000_EITR_82574(vector)); -	adapter->eiac_mask |= E1000_IMS_OTHER;  	/* Cause Tx interrupts on every write back */  	ivar |= BIT(31); @@ -2265,7 +2254,8 @@ static void e1000_irq_enable(struct e1000_adapter *adapter)  	if (adapter->msix_entries) {  		ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574); -		ew32(IMS, adapter->eiac_mask | E1000_IMS_LSC); +		ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | +		     IMS_OTHER_MASK);  	} else if (hw->mac.type >= e1000_pch_lpt) {  		ew32(IMS, IMS_ENABLE_MASK | E1000_IMS_ECCER);  	} else { @@ -2333,8 +2323,8 @@ static int e1000_alloc_ring_dma(struct e1000_adapter *adapter,  {  	struct pci_dev *pdev = adapter->pdev; -	ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma, -					GFP_KERNEL); +	ring->desc = dma_zalloc_coherent(&pdev->dev, ring->size, &ring->dma, +					 GFP_KERNEL);  	if (!ring->desc)  		return -ENOMEM; @@ -2707,8 +2697,7 @@ static int e1000e_poll(struct napi_struct *napi, int weight)  		napi_complete_done(napi, work_done);  		if (!test_bit(__E1000_DOWN, &adapter->state)) {  			if (adapter->msix_entries) -				ew32(IMS, adapter->rx_ring->ims_val | -				     E1000_IMS_OTHER); +				ew32(IMS, adapter->rx_ring->ims_val);  			else  				e1000_irq_enable(adapter);  		} @@ -5101,7 +5090,7 @@ static bool e1000e_has_link(struct e1000_adapter *adapter)  	case e1000_media_type_copper:  		if (hw->mac.get_link_status) {  			ret_val = hw->mac.ops.check_for_link(hw); -			link_active = ret_val > 0; +			link_active = !hw->mac.get_link_status;  		} else {  			link_active = true;  		} diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 0da5aa2c8aba..9fc063af233c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1888,6 +1888,14 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,  				     ixgbe_rx_pg_size(rx_ring),  				     DMA_FROM_DEVICE,  				     IXGBE_RX_DMA_ATTR); +	} else if (ring_uses_build_skb(rx_ring)) { +		unsigned long offset = (unsigned long)(skb->data) & ~PAGE_MASK; + +		dma_sync_single_range_for_cpu(rx_ring->dev, +					      IXGBE_CB(skb)->dma, +					      offset, +					      skb_headlen(skb), +					      DMA_FROM_DEVICE);  	} else {  		struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index a1d7b88cf083..5a1668cdb461 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -7137,6 +7137,7 @@ static void mvpp2_set_rx_mode(struct net_device *dev)  	int id = port->id;  	bool allmulti = dev->flags & IFF_ALLMULTI; +retry:  	mvpp2_prs_mac_promisc_set(priv, id, dev->flags & IFF_PROMISC);  	mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_ALL, allmulti);  	mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_IP6, allmulti); @@ -7144,9 +7145,13 @@ static void mvpp2_set_rx_mode(struct net_device *dev)  	/* Remove all port->id's mcast enries */  	mvpp2_prs_mcast_del_all(priv, id); -	if (allmulti && !netdev_mc_empty(dev)) { -		netdev_for_each_mc_addr(ha, dev) -			mvpp2_prs_mac_da_accept(priv, id, ha->addr, true); +	if (!allmulti) { +		netdev_for_each_mc_addr(ha, dev) { +			if (mvpp2_prs_mac_da_accept(priv, id, ha->addr, true)) { +				allmulti = true; +				goto retry; +			} +		}  	}  } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c index 0be4575b58a2..fd509160c8f6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c @@ -96,10 +96,10 @@ static void print_lyr_2_4_hdrs(struct trace_seq *p,  					  "%pI4");  		} else if (ethertype.v == ETH_P_IPV6) {  			static const struct in6_addr full_ones = { -				.in6_u.u6_addr32 = {htonl(0xffffffff), -						    htonl(0xffffffff), -						    htonl(0xffffffff), -						    htonl(0xffffffff)}, +				.in6_u.u6_addr32 = {__constant_htonl(0xffffffff), +						    __constant_htonl(0xffffffff), +						    __constant_htonl(0xffffffff), +						    __constant_htonl(0xffffffff)},  			};  			DECLARE_MASK_VAL(struct in6_addr, src_ipv6);  			DECLARE_MASK_VAL(struct in6_addr, dst_ipv6); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 47bab842c5ee..da94c8cba5ee 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -1768,13 +1768,16 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv,  	param->wq.linear = 1;  } -static void mlx5e_build_drop_rq_param(struct mlx5e_rq_param *param) +static void mlx5e_build_drop_rq_param(struct mlx5_core_dev *mdev, +				      struct mlx5e_rq_param *param)  {  	void *rqc = param->rqc;  	void *wq = MLX5_ADDR_OF(rqc, rqc, wq);  	MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);  	MLX5_SET(wq, wq, log_wq_stride,    ilog2(sizeof(struct mlx5e_rx_wqe))); + +	param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev);  }  static void mlx5e_build_sq_param_common(struct mlx5e_priv *priv, @@ -2634,6 +2637,9 @@ static int mlx5e_alloc_drop_cq(struct mlx5_core_dev *mdev,  			       struct mlx5e_cq *cq,  			       struct mlx5e_cq_param *param)  { +	param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev); +	param->wq.db_numa_node  = dev_to_node(&mdev->pdev->dev); +  	return mlx5e_alloc_cq_common(mdev, param, cq);  } @@ -2645,7 +2651,7 @@ static int mlx5e_open_drop_rq(struct mlx5_core_dev *mdev,  	struct mlx5e_cq *cq = &drop_rq->cq;  	int err; -	mlx5e_build_drop_rq_param(&rq_param); +	mlx5e_build_drop_rq_param(mdev, &rq_param);  	err = mlx5e_alloc_drop_cq(mdev, cq, &cq_param);  	if (err) @@ -2994,8 +3000,8 @@ static int mlx5e_setup_tc_block(struct net_device *dev,  }  #endif -int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type, -		   void *type_data) +static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type, +			  void *type_data)  {  	switch (type) {  #ifdef CONFIG_MLX5_ESWITCH diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 0d4bb0688faa..e5c3ab46a24a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -36,6 +36,7 @@  #include <linux/tcp.h>  #include <linux/bpf_trace.h>  #include <net/busy_poll.h> +#include <net/ip6_checksum.h>  #include "en.h"  #include "en_tc.h"  #include "eswitch.h" @@ -546,20 +547,33 @@ bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)  	return true;  } +static void mlx5e_lro_update_tcp_hdr(struct mlx5_cqe64 *cqe, struct tcphdr *tcp) +{ +	u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe); +	u8 tcp_ack     = (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA) || +			 (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA); + +	tcp->check                      = 0; +	tcp->psh                        = get_cqe_lro_tcppsh(cqe); + +	if (tcp_ack) { +		tcp->ack                = 1; +		tcp->ack_seq            = cqe->lro_ack_seq_num; +		tcp->window             = cqe->lro_tcp_win; +	} +} +  static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,  				 u32 cqe_bcnt)  {  	struct ethhdr	*eth = (struct ethhdr *)(skb->data);  	struct tcphdr	*tcp;  	int network_depth = 0; +	__wsum check;  	__be16 proto;  	u16 tot_len;  	void *ip_p; -	u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe); -	u8 tcp_ack = (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA) || -		(l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA); -  	proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth);  	tot_len = cqe_bcnt - network_depth; @@ -576,23 +590,30 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,  		ipv4->check             = 0;  		ipv4->check             = ip_fast_csum((unsigned char *)ipv4,  						       ipv4->ihl); + +		mlx5e_lro_update_tcp_hdr(cqe, tcp); +		check = csum_partial(tcp, tcp->doff * 4, +				     csum_unfold((__force __sum16)cqe->check_sum)); +		/* Almost done, don't forget the pseudo header */ +		tcp->check = csum_tcpudp_magic(ipv4->saddr, ipv4->daddr, +					       tot_len - sizeof(struct iphdr), +					       IPPROTO_TCP, check);  	} else { +		u16 payload_len = tot_len - sizeof(struct ipv6hdr);  		struct ipv6hdr *ipv6 = ip_p;  		tcp = ip_p + sizeof(struct ipv6hdr);  		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;  		ipv6->hop_limit         = cqe->lro_min_ttl; -		ipv6->payload_len       = cpu_to_be16(tot_len - -						      sizeof(struct ipv6hdr)); -	} - -	tcp->psh = get_cqe_lro_tcppsh(cqe); - -	if (tcp_ack) { -		tcp->ack                = 1; -		tcp->ack_seq            = cqe->lro_ack_seq_num; -		tcp->window             = cqe->lro_tcp_win; +		ipv6->payload_len       = cpu_to_be16(payload_len); + +		mlx5e_lro_update_tcp_hdr(cqe, tcp); +		check = csum_partial(tcp, tcp->doff * 4, +				     csum_unfold((__force __sum16)cqe->check_sum)); +		/* Almost done, don't forget the pseudo header */ +		tcp->check = csum_ipv6_magic(&ipv6->saddr, &ipv6->daddr, payload_len, +					     IPPROTO_TCP, check);  	}  } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c index 5a4608281f38..707976482c09 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c @@ -216,7 +216,8 @@ mlx5e_test_loopback_validate(struct sk_buff *skb,  	if (iph->protocol != IPPROTO_UDP)  		goto out; -	udph = udp_hdr(skb); +	/* Don't assume skb_transport_header() was set */ +	udph = (struct udphdr *)((u8 *)iph + 4 * iph->ihl);  	if (udph->dest != htons(9))  		goto out; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index fd98b0dc610f..fa86a1466718 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -2529,7 +2529,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,  			if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {  				attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;  			} else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) { -				if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q)) +				if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q) || +				    tcf_vlan_push_prio(a))  					return -EOPNOTSUPP;  				attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 569b42a01026..11b4f1089d1c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -176,7 +176,7 @@ static inline u16 mlx5e_calc_min_inline(enum mlx5_inline_modes mode,  	default:  		hlen = mlx5e_skb_l2_header_offset(skb);  	} -	return min_t(u16, hlen, skb->len); +	return min_t(u16, hlen, skb_headlen(skb));  }  static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 5ecf2cddc16d..c2b1d7d351fc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -1529,6 +1529,10 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,  	esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num); +	/* Create steering drop counters for ingress and egress ACLs */ +	if (vport_num && esw->mode == SRIOV_LEGACY) +		esw_vport_create_drop_counters(vport); +  	/* Restore old vport configuration */  	esw_apply_vport_conf(esw, vport); @@ -1545,10 +1549,6 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,  	if (!vport_num)  		vport->info.trusted = true; -	/* create steering drop counters for ingress and egress ACLs */ -	if (vport_num && esw->mode == SRIOV_LEGACY) -		esw_vport_create_drop_counters(vport); -  	esw_vport_change_handle_locked(vport);  	esw->enabled_vports++; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index c025c98700e4..31fc2cfac3b3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -1429,7 +1429,8 @@ static bool check_conflicting_actions(u32 action1, u32 action2)  	if (xored_actions & (MLX5_FLOW_CONTEXT_ACTION_DROP  |  			     MLX5_FLOW_CONTEXT_ACTION_ENCAP | -			     MLX5_FLOW_CONTEXT_ACTION_DECAP)) +			     MLX5_FLOW_CONTEXT_ACTION_DECAP | +			     MLX5_FLOW_CONTEXT_ACTION_MOD_HDR))  		return true;  	return false; @@ -1758,8 +1759,11 @@ search_again_locked:  	/* Collect all fgs which has a matching match_criteria */  	err = build_match_list(&match_head, ft, spec); -	if (err) +	if (err) { +		if (take_write) +			up_write_ref_node(&ft->node);  		return ERR_PTR(err); +	}  	if (!take_write)  		up_read_ref_node(&ft->node); @@ -1768,8 +1772,11 @@ search_again_locked:  				      dest_num, version);  	free_match_list(&match_head);  	if (!IS_ERR(rule) || -	    (PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN)) +	    (PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN)) { +		if (take_write) +			up_write_ref_node(&ft->node);  		return rule; +	}  	if (!take_write) {  		nested_down_write_ref_node(&ft->node, FS_LOCK_GRANDPARENT); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index 21d29f7936f6..d39b0b7011b2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -124,7 +124,7 @@ void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force)  		trigger_cmd_completions(dev);  	} -	mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 0); +	mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 1);  	mlx5_core_err(dev, "end\n");  unlock: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c index e159243e0fcf..857035583ccd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c @@ -34,6 +34,7 @@  #include <linux/highmem.h>  #include <rdma/mlx5-abi.h>  #include "en.h" +#include "clock.h"  enum {  	MLX5_CYCLES_SHIFT	= 23 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 2ef641c91c26..ae391e4b7070 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -551,7 +551,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)  		MLX5_SET(cmd_hca_cap,  			 set_hca_cap,  			 cache_line_128byte, -			 cache_line_size() == 128 ? 1 : 0); +			 cache_line_size() >= 128 ? 1 : 0);  	if (MLX5_CAP_GEN_MAX(dev, dct))  		MLX5_SET(cmd_hca_cap, set_hca_cap, dct, 1); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c index b698fb481b2e..996dc099cd58 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c @@ -443,6 +443,17 @@ int mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id)  }  EXPORT_SYMBOL(mlxsw_afa_block_jump); +int mlxsw_afa_block_terminate(struct mlxsw_afa_block *block) +{ +	if (block->finished) +		return -EINVAL; +	mlxsw_afa_set_goto_set(block->cur_set, +			       MLXSW_AFA_SET_GOTO_BINDING_CMD_TERM, 0); +	block->finished = true; +	return 0; +} +EXPORT_SYMBOL(mlxsw_afa_block_terminate); +  static struct mlxsw_afa_fwd_entry *  mlxsw_afa_fwd_entry_create(struct mlxsw_afa *mlxsw_afa, u8 local_port)  { diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h index 43132293475c..b91f2b0829b0 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h @@ -65,6 +65,7 @@ char *mlxsw_afa_block_first_set(struct mlxsw_afa_block *block);  u32 mlxsw_afa_block_first_set_kvdl_index(struct mlxsw_afa_block *block);  int mlxsw_afa_block_continue(struct mlxsw_afa_block *block);  int mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id); +int mlxsw_afa_block_terminate(struct mlxsw_afa_block *block);  int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block);  int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block, u16 trap_id);  int mlxsw_afa_block_append_trap_and_forward(struct mlxsw_afa_block *block, diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h index f6963b0b4a55..122506daa586 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h @@ -107,20 +107,20 @@ static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = {  	MLXSW_AFK_ELEMENT_INFO_U32(VID, 0x10, 8, 12),  	MLXSW_AFK_ELEMENT_INFO_U32(PCP, 0x10, 20, 3),  	MLXSW_AFK_ELEMENT_INFO_U32(TCP_FLAGS, 0x10, 23, 9), -	MLXSW_AFK_ELEMENT_INFO_U32(IP_TTL_, 0x14, 0, 8), -	MLXSW_AFK_ELEMENT_INFO_U32(IP_ECN, 0x14, 9, 2), -	MLXSW_AFK_ELEMENT_INFO_U32(IP_DSCP, 0x14, 11, 6), -	MLXSW_AFK_ELEMENT_INFO_U32(SRC_IP4, 0x18, 0, 32), -	MLXSW_AFK_ELEMENT_INFO_U32(DST_IP4, 0x1C, 0, 32), -	MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_HI, 0x18, 8), -	MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_LO, 0x20, 8), -	MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_HI, 0x28, 8), -	MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_LO, 0x30, 8),  	MLXSW_AFK_ELEMENT_INFO_U32(DST_L4_PORT, 0x14, 0, 16),  	MLXSW_AFK_ELEMENT_INFO_U32(SRC_L4_PORT, 0x14, 16, 16), +	MLXSW_AFK_ELEMENT_INFO_U32(IP_TTL_, 0x18, 0, 8), +	MLXSW_AFK_ELEMENT_INFO_U32(IP_ECN, 0x18, 9, 2), +	MLXSW_AFK_ELEMENT_INFO_U32(IP_DSCP, 0x18, 11, 6), +	MLXSW_AFK_ELEMENT_INFO_U32(SRC_IP4, 0x20, 0, 32), +	MLXSW_AFK_ELEMENT_INFO_U32(DST_IP4, 0x24, 0, 32), +	MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_HI, 0x20, 8), +	MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_LO, 0x28, 8), +	MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_HI, 0x30, 8), +	MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_LO, 0x38, 8),  }; -#define MLXSW_AFK_ELEMENT_STORAGE_SIZE 0x38 +#define MLXSW_AFK_ELEMENT_STORAGE_SIZE 0x40  struct mlxsw_afk_element_inst { /* element instance in actual block */  	const struct mlxsw_afk_element_info *info; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 3dcc58d61506..bf400c75fcc8 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -655,13 +655,17 @@ static int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu)  }  static struct mlxsw_sp_span_inspected_port * -mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_port *port, -				    struct mlxsw_sp_span_entry *span_entry) +mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_span_entry *span_entry, +				    enum mlxsw_sp_span_type type, +				    struct mlxsw_sp_port *port, +				    bool bind)  {  	struct mlxsw_sp_span_inspected_port *p;  	list_for_each_entry(p, &span_entry->bound_ports_list, list) -		if (port->local_port == p->local_port) +		if (type == p->type && +		    port->local_port == p->local_port && +		    bind == p->bound)  			return p;  	return NULL;  } @@ -691,8 +695,22 @@ mlxsw_sp_span_inspected_port_add(struct mlxsw_sp_port *port,  	struct mlxsw_sp_span_inspected_port *inspected_port;  	struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;  	char sbib_pl[MLXSW_REG_SBIB_LEN]; +	int i;  	int err; +	/* A given (source port, direction) can only be bound to one analyzer, +	 * so if a binding is requested, check for conflicts. +	 */ +	if (bind) +		for (i = 0; i < mlxsw_sp->span.entries_count; i++) { +			struct mlxsw_sp_span_entry *curr = +				&mlxsw_sp->span.entries[i]; + +			if (mlxsw_sp_span_entry_bound_port_find(curr, type, +								port, bind)) +				return -EEXIST; +		} +  	/* if it is an egress SPAN, bind a shared buffer to it */  	if (type == MLXSW_SP_SPAN_EGRESS) {  		u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp, @@ -720,6 +738,7 @@ mlxsw_sp_span_inspected_port_add(struct mlxsw_sp_port *port,  	}  	inspected_port->local_port = port->local_port;  	inspected_port->type = type; +	inspected_port->bound = bind;  	list_add_tail(&inspected_port->list, &span_entry->bound_ports_list);  	return 0; @@ -746,7 +765,8 @@ mlxsw_sp_span_inspected_port_del(struct mlxsw_sp_port *port,  	struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;  	char sbib_pl[MLXSW_REG_SBIB_LEN]; -	inspected_port = mlxsw_sp_span_entry_bound_port_find(port, span_entry); +	inspected_port = mlxsw_sp_span_entry_bound_port_find(span_entry, type, +							     port, bind);  	if (!inspected_port)  		return; @@ -1459,6 +1479,7 @@ mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)  	}  	mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port; +	mlxsw_sp_port_vlan->ref_count = 1;  	mlxsw_sp_port_vlan->vid = vid;  	list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list); @@ -1486,8 +1507,10 @@ mlxsw_sp_port_vlan_get(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)  	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;  	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); -	if (mlxsw_sp_port_vlan) +	if (mlxsw_sp_port_vlan) { +		mlxsw_sp_port_vlan->ref_count++;  		return mlxsw_sp_port_vlan; +	}  	return mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid);  } @@ -1496,6 +1519,9 @@ void mlxsw_sp_port_vlan_put(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)  {  	struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid; +	if (--mlxsw_sp_port_vlan->ref_count != 0) +		return; +  	if (mlxsw_sp_port_vlan->bridge_port)  		mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);  	else if (fid) @@ -4207,13 +4233,12 @@ static struct devlink_resource_ops mlxsw_sp_resource_kvd_hash_double_ops = {  	.size_validate = mlxsw_sp_resource_kvd_hash_double_size_validate,  }; -static struct devlink_resource_size_params mlxsw_sp_kvd_size_params; -static struct devlink_resource_size_params mlxsw_sp_linear_size_params; -static struct devlink_resource_size_params mlxsw_sp_hash_single_size_params; -static struct devlink_resource_size_params mlxsw_sp_hash_double_size_params; -  static void -mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core) +mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core, +				      struct devlink_resource_size_params *kvd_size_params, +				      struct devlink_resource_size_params *linear_size_params, +				      struct devlink_resource_size_params *hash_double_size_params, +				      struct devlink_resource_size_params *hash_single_size_params)  {  	u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core,  						 KVD_SINGLE_MIN_SIZE); @@ -4222,37 +4247,35 @@ mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core)  	u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);  	u32 linear_size_min = 0; -	/* KVD top resource */ -	mlxsw_sp_kvd_size_params.size_min = kvd_size; -	mlxsw_sp_kvd_size_params.size_max = kvd_size; -	mlxsw_sp_kvd_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY; -	mlxsw_sp_kvd_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY; - -	/* Linear part init */ -	mlxsw_sp_linear_size_params.size_min = linear_size_min; -	mlxsw_sp_linear_size_params.size_max = kvd_size - single_size_min - -					       double_size_min; -	mlxsw_sp_linear_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY; -	mlxsw_sp_linear_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY; - -	/* Hash double part init */ -	mlxsw_sp_hash_double_size_params.size_min = double_size_min; -	mlxsw_sp_hash_double_size_params.size_max = kvd_size - single_size_min - -						    linear_size_min; -	mlxsw_sp_hash_double_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY; -	mlxsw_sp_hash_double_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY; - -	/* Hash single part init */ -	mlxsw_sp_hash_single_size_params.size_min = single_size_min; -	mlxsw_sp_hash_single_size_params.size_max = kvd_size - double_size_min - -						    linear_size_min; -	mlxsw_sp_hash_single_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY; -	mlxsw_sp_hash_single_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY; +	devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size, +					  MLXSW_SP_KVD_GRANULARITY, +					  DEVLINK_RESOURCE_UNIT_ENTRY); +	devlink_resource_size_params_init(linear_size_params, linear_size_min, +					  kvd_size - single_size_min - +					  double_size_min, +					  MLXSW_SP_KVD_GRANULARITY, +					  DEVLINK_RESOURCE_UNIT_ENTRY); +	devlink_resource_size_params_init(hash_double_size_params, +					  double_size_min, +					  kvd_size - single_size_min - +					  linear_size_min, +					  MLXSW_SP_KVD_GRANULARITY, +					  DEVLINK_RESOURCE_UNIT_ENTRY); +	devlink_resource_size_params_init(hash_single_size_params, +					  single_size_min, +					  kvd_size - double_size_min - +					  linear_size_min, +					  MLXSW_SP_KVD_GRANULARITY, +					  DEVLINK_RESOURCE_UNIT_ENTRY);  }  static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)  {  	struct devlink *devlink = priv_to_devlink(mlxsw_core); +	struct devlink_resource_size_params hash_single_size_params; +	struct devlink_resource_size_params hash_double_size_params; +	struct devlink_resource_size_params linear_size_params; +	struct devlink_resource_size_params kvd_size_params;  	u32 kvd_size, single_size, double_size, linear_size;  	const struct mlxsw_config_profile *profile;  	int err; @@ -4261,13 +4284,17 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)  	if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))  		return -EIO; -	mlxsw_sp_resource_size_params_prepare(mlxsw_core); +	mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params, +					      &linear_size_params, +					      &hash_double_size_params, +					      &hash_single_size_params); +  	kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);  	err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,  					true, kvd_size,  					MLXSW_SP_RESOURCE_KVD,  					DEVLINK_RESOURCE_ID_PARENT_TOP, -					&mlxsw_sp_kvd_size_params, +					&kvd_size_params,  					&mlxsw_sp_resource_kvd_ops);  	if (err)  		return err; @@ -4277,7 +4304,7 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)  					false, linear_size,  					MLXSW_SP_RESOURCE_KVD_LINEAR,  					MLXSW_SP_RESOURCE_KVD, -					&mlxsw_sp_linear_size_params, +					&linear_size_params,  					&mlxsw_sp_resource_kvd_linear_ops);  	if (err)  		return err; @@ -4291,7 +4318,7 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)  					false, double_size,  					MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,  					MLXSW_SP_RESOURCE_KVD, -					&mlxsw_sp_hash_double_size_params, +					&hash_double_size_params,  					&mlxsw_sp_resource_kvd_hash_double_ops);  	if (err)  		return err; @@ -4301,7 +4328,7 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)  					false, single_size,  					MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,  					MLXSW_SP_RESOURCE_KVD, -					&mlxsw_sp_hash_single_size_params, +					&hash_single_size_params,  					&mlxsw_sp_resource_kvd_hash_single_ops);  	if (err)  		return err; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index bdd8f94a452c..92064db2ae44 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -120,6 +120,9 @@ struct mlxsw_sp_span_inspected_port {  	struct list_head list;  	enum mlxsw_sp_span_type type;  	u8 local_port; + +	/* Whether this is a directly bound mirror (port-to-port) or an ACL. */ +	bool bound;  };  struct mlxsw_sp_span_entry { @@ -211,6 +214,7 @@ struct mlxsw_sp_port_vlan {  	struct list_head list;  	struct mlxsw_sp_port *mlxsw_sp_port;  	struct mlxsw_sp_fid *fid; +	unsigned int ref_count;  	u16 vid;  	struct mlxsw_sp_bridge_port *bridge_port;  	struct list_head bridge_vlan_node; @@ -552,6 +556,7 @@ void mlxsw_sp_acl_rulei_keymask_buf(struct mlxsw_sp_acl_rule_info *rulei,  int mlxsw_sp_acl_rulei_act_continue(struct mlxsw_sp_acl_rule_info *rulei);  int mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei,  				u16 group_id); +int mlxsw_sp_acl_rulei_act_terminate(struct mlxsw_sp_acl_rule_info *rulei);  int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei);  int mlxsw_sp_acl_rulei_act_trap(struct mlxsw_sp_acl_rule_info *rulei);  int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c index 0897a5435cc2..92d90ed7207e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c @@ -528,6 +528,11 @@ int mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei,  	return mlxsw_afa_block_jump(rulei->act_block, group_id);  } +int mlxsw_sp_acl_rulei_act_terminate(struct mlxsw_sp_acl_rule_info *rulei) +{ +	return mlxsw_afa_block_terminate(rulei->act_block); +} +  int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei)  {  	return mlxsw_afa_block_append_drop(rulei->act_block); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c index 93728c694e6d..0a9adc5962fb 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c @@ -385,13 +385,13 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_egress[] = {  static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = {  	MLXSW_SP_CPU_PORT_SB_CM, +	MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0), +	MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0), +	MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0), +	MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0), +	MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),  	MLXSW_SP_CPU_PORT_SB_CM, -	MLXSW_SP_CPU_PORT_SB_CM, -	MLXSW_SP_CPU_PORT_SB_CM, -	MLXSW_SP_CPU_PORT_SB_CM, -	MLXSW_SP_CPU_PORT_SB_CM, -	MLXSW_SP_CPU_PORT_SB_CM, -	MLXSW_SP_SB_CM(10000, 0, 0), +	MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),  	MLXSW_SP_CPU_PORT_SB_CM,  	MLXSW_SP_CPU_PORT_SB_CM,  	MLXSW_SP_CPU_PORT_SB_CM, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c index bbd238e50f05..54262af4e98f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c @@ -112,11 +112,11 @@ static const int mlxsw_sp_sfgc_bc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = {  	[MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP]	= 1,  	[MLXSW_REG_SFGC_TYPE_IPV4_LINK_LOCAL]			= 1,  	[MLXSW_REG_SFGC_TYPE_IPV6_ALL_HOST]			= 1, +	[MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6]	= 1,  };  static const int mlxsw_sp_sfgc_mc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = {  	[MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4]	= 1, -	[MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6]	= 1,  };  static const int *mlxsw_sp_packet_type_sfgc_types[] = { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c index 6ce00e28d4ea..89dbf569dff5 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c @@ -65,7 +65,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,  	tcf_exts_to_list(exts, &actions);  	list_for_each_entry(a, &actions, list) {  		if (is_tcf_gact_ok(a)) { -			err = mlxsw_sp_acl_rulei_act_continue(rulei); +			err = mlxsw_sp_acl_rulei_act_terminate(rulei);  			if (err)  				return err;  		} else if (is_tcf_gact_shot(a)) { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index f0b25baba09a..f7948e983637 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -788,6 +788,9 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,  					      u32 tb_id,  					      struct netlink_ext_ack *extack)  { +	struct mlxsw_sp_mr_table *mr4_table; +	struct mlxsw_sp_fib *fib4; +	struct mlxsw_sp_fib *fib6;  	struct mlxsw_sp_vr *vr;  	int err; @@ -796,29 +799,30 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,  		NL_SET_ERR_MSG(extack, "spectrum: Exceeded number of supported virtual routers");  		return ERR_PTR(-EBUSY);  	} -	vr->fib4 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4); -	if (IS_ERR(vr->fib4)) -		return ERR_CAST(vr->fib4); -	vr->fib6 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6); -	if (IS_ERR(vr->fib6)) { -		err = PTR_ERR(vr->fib6); +	fib4 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4); +	if (IS_ERR(fib4)) +		return ERR_CAST(fib4); +	fib6 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6); +	if (IS_ERR(fib6)) { +		err = PTR_ERR(fib6);  		goto err_fib6_create;  	} -	vr->mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id, -						 MLXSW_SP_L3_PROTO_IPV4); -	if (IS_ERR(vr->mr4_table)) { -		err = PTR_ERR(vr->mr4_table); +	mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id, +					     MLXSW_SP_L3_PROTO_IPV4); +	if (IS_ERR(mr4_table)) { +		err = PTR_ERR(mr4_table);  		goto err_mr_table_create;  	} +	vr->fib4 = fib4; +	vr->fib6 = fib6; +	vr->mr4_table = mr4_table;  	vr->tb_id = tb_id;  	return vr;  err_mr_table_create: -	mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib6); -	vr->fib6 = NULL; +	mlxsw_sp_fib_destroy(mlxsw_sp, fib6);  err_fib6_create: -	mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib4); -	vr->fib4 = NULL; +	mlxsw_sp_fib_destroy(mlxsw_sp, fib4);  	return ERR_PTR(err);  } @@ -3790,6 +3794,9 @@ mlxsw_sp_fib4_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)  	struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;  	int i; +	if (!list_is_singular(&nh_grp->fib_list)) +		return; +  	for (i = 0; i < nh_grp->count; i++) {  		struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i]; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 593ad31be749..161bcdc012f0 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -1203,6 +1203,7 @@ static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,  				     bool dynamic)  {  	char *sfd_pl; +	u8 num_rec;  	int err;  	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); @@ -1212,9 +1213,16 @@ static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,  	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);  	mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),  			      mac, fid, action, local_port); +	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);  	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); -	kfree(sfd_pl); +	if (err) +		goto out; + +	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl)) +		err = -EBUSY; +out: +	kfree(sfd_pl);  	return err;  } @@ -1239,6 +1247,7 @@ static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,  				       bool adding, bool dynamic)  {  	char *sfd_pl; +	u8 num_rec;  	int err;  	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); @@ -1249,9 +1258,16 @@ static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,  	mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),  				  mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP,  				  lag_vid, lag_id); +	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);  	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); -	kfree(sfd_pl); +	if (err) +		goto out; + +	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl)) +		err = -EBUSY; +out: +	kfree(sfd_pl);  	return err;  } @@ -1296,6 +1312,7 @@ static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,  				u16 fid, u16 mid_idx, bool adding)  {  	char *sfd_pl; +	u8 num_rec;  	int err;  	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); @@ -1305,7 +1322,15 @@ static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,  	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);  	mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid,  			      MLXSW_REG_SFD_REC_ACTION_NOP, mid_idx); +	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);  	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); +	if (err) +		goto out; + +	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl)) +		err = -EBUSY; + +out:  	kfree(sfd_pl);  	return err;  } diff --git a/drivers/net/ethernet/natsemi/Kconfig b/drivers/net/ethernet/natsemi/Kconfig index a10ef50e4f12..017fb2322589 100644 --- a/drivers/net/ethernet/natsemi/Kconfig +++ b/drivers/net/ethernet/natsemi/Kconfig @@ -1,16 +1,16 @@  # -# National Semi-conductor device configuration +# National Semiconductor device configuration  #  config NET_VENDOR_NATSEMI -	bool "National Semi-conductor devices" +	bool "National Semiconductor devices"  	default y  	---help---  	  If you have a network (Ethernet) card belonging to this class, say Y.  	  Note that the answer to this question doesn't directly affect the  	  kernel: saying N will just cause the configurator to skip all -	  the questions about National Semi-conductor devices. If you say Y, +	  the questions about National Semiconductor devices. If you say Y,  	  you will be asked for your specific card in the following questions.  if NET_VENDOR_NATSEMI diff --git a/drivers/net/ethernet/natsemi/Makefile b/drivers/net/ethernet/natsemi/Makefile index cc664977596e..a759aa09ef59 100644 --- a/drivers/net/ethernet/natsemi/Makefile +++ b/drivers/net/ethernet/natsemi/Makefile @@ -1,6 +1,6 @@  # SPDX-License-Identifier: GPL-2.0  # -# Makefile for the National Semi-conductor Sonic devices. +# Makefile for the National Semiconductor Sonic devices.  #  obj-$(CONFIG_MACSONIC) += macsonic.o diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index 6f546e869d8d..00f41c145d4d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -2480,7 +2480,10 @@ int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto)  	if (rc)  		return rc; -	/* Free Task CXT */ +	/* Free Task CXT ( Intentionally RoCE as task-id is shared between +	 * RoCE and iWARP ) +	 */ +	proto = PROTOCOLID_ROCE;  	rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_TASK, 0,  				    qed_cxt_get_proto_tid_count(p_hwfn, proto));  	if (rc) diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c index ca4a81dc1ace..d5d02be72947 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c @@ -1703,6 +1703,13 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn,  	iph = (struct iphdr *)((u8 *)(ethh) + eth_hlen);  	if (eth_type == ETH_P_IP) { +		if (iph->protocol != IPPROTO_TCP) { +			DP_NOTICE(p_hwfn, +				  "Unexpected ip protocol on ll2 %x\n", +				  iph->protocol); +			return -EINVAL; +		} +  		cm_info->local_ip[0] = ntohl(iph->daddr);  		cm_info->remote_ip[0] = ntohl(iph->saddr);  		cm_info->ip_version = TCP_IPV4; @@ -1711,6 +1718,14 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn,  		*payload_len = ntohs(iph->tot_len) - ip_hlen;  	} else if (eth_type == ETH_P_IPV6) {  		ip6h = (struct ipv6hdr *)iph; + +		if (ip6h->nexthdr != IPPROTO_TCP) { +			DP_NOTICE(p_hwfn, +				  "Unexpected ip protocol on ll2 %x\n", +				  iph->protocol); +			return -EINVAL; +		} +  		for (i = 0; i < 4; i++) {  			cm_info->local_ip[i] =  			    ntohl(ip6h->daddr.in6_u.u6_addr32[i]); @@ -1928,8 +1943,8 @@ qed_iwarp_update_fpdu_length(struct qed_hwfn *p_hwfn,  		/* Missing lower byte is now available */  		mpa_len = fpdu->fpdu_length | *mpa_data;  		fpdu->fpdu_length = QED_IWARP_FPDU_LEN_WITH_PAD(mpa_len); -		fpdu->mpa_frag_len = fpdu->fpdu_length;  		/* one byte of hdr */ +		fpdu->mpa_frag_len = 1;  		fpdu->incomplete_bytes = fpdu->fpdu_length - 1;  		DP_VERBOSE(p_hwfn,  			   QED_MSG_RDMA, diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c index 5d040b873137..a411f9c702a1 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c @@ -379,6 +379,7 @@ static void qed_rdma_free(struct qed_hwfn *p_hwfn)  	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Freeing RDMA\n");  	qed_rdma_free_reserved_lkey(p_hwfn); +	qed_cxt_free_proto_ilt(p_hwfn, p_hwfn->p_rdma_info->proto);  	qed_rdma_resc_free(p_hwfn);  } diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 2db70eabddfe..a01e7d6e5442 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -288,7 +288,7 @@ int __init qede_init(void)  	}  	/* Must register notifier before pci ops, since we might miss -	 * interface rename after pci probe and netdev registeration. +	 * interface rename after pci probe and netdev registration.  	 */  	ret = register_netdevice_notifier(&qede_netdev_notifier);  	if (ret) { @@ -988,7 +988,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,  	if (rc)  		goto err3; -	/* Prepare the lock prior to the registeration of the netdev, +	/* Prepare the lock prior to the registration of the netdev,  	 * as once it's registered we might reach flows requiring it  	 * [it's even possible to reach a flow needing it directly  	 * from there, although it's unlikely]. @@ -2067,8 +2067,6 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,  	link_params.link_up = true;  	edev->ops->common->set_link(edev->cdev, &link_params); -	qede_rdma_dev_event_open(edev); -  	edev->state = QEDE_STATE_OPEN;  	DP_INFO(edev, "Ending successfully qede load\n"); @@ -2169,12 +2167,14 @@ static void qede_link_update(void *dev, struct qed_link_output *link)  			DP_NOTICE(edev, "Link is up\n");  			netif_tx_start_all_queues(edev->ndev);  			netif_carrier_on(edev->ndev); +			qede_rdma_dev_event_open(edev);  		}  	} else {  		if (netif_carrier_ok(edev->ndev)) {  			DP_NOTICE(edev, "Link is down\n");  			netif_tx_disable(edev->ndev);  			netif_carrier_off(edev->ndev); +			qede_rdma_dev_event_close(edev);  		}  	}  } diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c index 9b2280badaf7..02adb513f475 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c @@ -485,7 +485,7 @@ int qede_ptp_enable(struct qede_dev *edev, bool init_tc)  	ptp->clock = ptp_clock_register(&ptp->clock_info, &edev->pdev->dev);  	if (IS_ERR(ptp->clock)) {  		rc = -EINVAL; -		DP_ERR(edev, "PTP clock registeration failed\n"); +		DP_ERR(edev, "PTP clock registration failed\n");  		goto err2;  	} diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c index 9cbb27263742..d5a32b7c7dc5 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c @@ -1194,9 +1194,9 @@ void emac_mac_tx_process(struct emac_adapter *adpt, struct emac_tx_queue *tx_q)  	while (tx_q->tpd.consume_idx != hw_consume_idx) {  		tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.consume_idx);  		if (tpbuf->dma_addr) { -			dma_unmap_single(adpt->netdev->dev.parent, -					 tpbuf->dma_addr, tpbuf->length, -					 DMA_TO_DEVICE); +			dma_unmap_page(adpt->netdev->dev.parent, +				       tpbuf->dma_addr, tpbuf->length, +				       DMA_TO_DEVICE);  			tpbuf->dma_addr = 0;  		} @@ -1353,9 +1353,11 @@ static void emac_tx_fill_tpd(struct emac_adapter *adpt,  		tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx);  		tpbuf->length = mapped_len; -		tpbuf->dma_addr = dma_map_single(adpt->netdev->dev.parent, -						 skb->data, tpbuf->length, -						 DMA_TO_DEVICE); +		tpbuf->dma_addr = dma_map_page(adpt->netdev->dev.parent, +					       virt_to_page(skb->data), +					       offset_in_page(skb->data), +					       tpbuf->length, +					       DMA_TO_DEVICE);  		ret = dma_mapping_error(adpt->netdev->dev.parent,  					tpbuf->dma_addr);  		if (ret) @@ -1371,9 +1373,12 @@ static void emac_tx_fill_tpd(struct emac_adapter *adpt,  	if (mapped_len < len) {  		tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx);  		tpbuf->length = len - mapped_len; -		tpbuf->dma_addr = dma_map_single(adpt->netdev->dev.parent, -						 skb->data + mapped_len, -						 tpbuf->length, DMA_TO_DEVICE); +		tpbuf->dma_addr = dma_map_page(adpt->netdev->dev.parent, +					       virt_to_page(skb->data + +							    mapped_len), +					       offset_in_page(skb->data + +							      mapped_len), +					       tpbuf->length, DMA_TO_DEVICE);  		ret = dma_mapping_error(adpt->netdev->dev.parent,  					tpbuf->dma_addr);  		if (ret) diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c index 7e7704daf5f1..c4949183eef3 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c @@ -43,12 +43,6 @@  /* Local Definitions and Declarations */ -struct rmnet_walk_data { -	struct net_device *real_dev; -	struct list_head *head; -	struct rmnet_port *port; -}; -  static int rmnet_is_real_dev_registered(const struct net_device *real_dev)  {  	return rcu_access_pointer(real_dev->rx_handler) == rmnet_rx_handler; @@ -112,17 +106,14 @@ static int rmnet_register_real_device(struct net_device *real_dev)  static void rmnet_unregister_bridge(struct net_device *dev,  				    struct rmnet_port *port)  { -	struct net_device *rmnet_dev, *bridge_dev;  	struct rmnet_port *bridge_port; +	struct net_device *bridge_dev;  	if (port->rmnet_mode != RMNET_EPMODE_BRIDGE)  		return;  	/* bridge slave handling */  	if (!port->nr_rmnet_devs) { -		rmnet_dev = netdev_master_upper_dev_get_rcu(dev); -		netdev_upper_dev_unlink(dev, rmnet_dev); -  		bridge_dev = port->bridge_ep;  		bridge_port = rmnet_get_port_rtnl(bridge_dev); @@ -132,9 +123,6 @@ static void rmnet_unregister_bridge(struct net_device *dev,  		bridge_dev = port->bridge_ep;  		bridge_port = rmnet_get_port_rtnl(bridge_dev); -		rmnet_dev = netdev_master_upper_dev_get_rcu(bridge_dev); -		netdev_upper_dev_unlink(bridge_dev, rmnet_dev); -  		rmnet_unregister_real_device(bridge_dev, bridge_port);  	}  } @@ -173,10 +161,6 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,  	if (err)  		goto err1; -	err = netdev_master_upper_dev_link(dev, real_dev, NULL, NULL, extack); -	if (err) -		goto err2; -  	port->rmnet_mode = mode;  	hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]); @@ -193,8 +177,6 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,  	return 0; -err2: -	rmnet_vnd_dellink(mux_id, port, ep);  err1:  	rmnet_unregister_real_device(real_dev, port);  err0: @@ -204,14 +186,13 @@ err0:  static void rmnet_dellink(struct net_device *dev, struct list_head *head)  { +	struct rmnet_priv *priv = netdev_priv(dev);  	struct net_device *real_dev;  	struct rmnet_endpoint *ep;  	struct rmnet_port *port;  	u8 mux_id; -	rcu_read_lock(); -	real_dev = netdev_master_upper_dev_get_rcu(dev); -	rcu_read_unlock(); +	real_dev = priv->real_dev;  	if (!real_dev || !rmnet_is_real_dev_registered(real_dev))  		return; @@ -219,7 +200,6 @@ static void rmnet_dellink(struct net_device *dev, struct list_head *head)  	port = rmnet_get_port_rtnl(real_dev);  	mux_id = rmnet_vnd_get_mux(dev); -	netdev_upper_dev_unlink(dev, real_dev);  	ep = rmnet_get_endpoint(port, mux_id);  	if (ep) { @@ -233,30 +213,13 @@ static void rmnet_dellink(struct net_device *dev, struct list_head *head)  	unregister_netdevice_queue(dev, head);  } -static int rmnet_dev_walk_unreg(struct net_device *rmnet_dev, void *data) -{ -	struct rmnet_walk_data *d = data; -	struct rmnet_endpoint *ep; -	u8 mux_id; - -	mux_id = rmnet_vnd_get_mux(rmnet_dev); -	ep = rmnet_get_endpoint(d->port, mux_id); -	if (ep) { -		hlist_del_init_rcu(&ep->hlnode); -		rmnet_vnd_dellink(mux_id, d->port, ep); -		kfree(ep); -	} -	netdev_upper_dev_unlink(rmnet_dev, d->real_dev); -	unregister_netdevice_queue(rmnet_dev, d->head); - -	return 0; -} -  static void rmnet_force_unassociate_device(struct net_device *dev)  {  	struct net_device *real_dev = dev; -	struct rmnet_walk_data d; +	struct hlist_node *tmp_ep; +	struct rmnet_endpoint *ep;  	struct rmnet_port *port; +	unsigned long bkt_ep;  	LIST_HEAD(list);  	if (!rmnet_is_real_dev_registered(real_dev)) @@ -264,16 +227,19 @@ static void rmnet_force_unassociate_device(struct net_device *dev)  	ASSERT_RTNL(); -	d.real_dev = real_dev; -	d.head = &list; -  	port = rmnet_get_port_rtnl(dev); -	d.port = port;  	rcu_read_lock();  	rmnet_unregister_bridge(dev, port); -	netdev_walk_all_lower_dev_rcu(real_dev, rmnet_dev_walk_unreg, &d); +	hash_for_each_safe(port->muxed_ep, bkt_ep, tmp_ep, ep, hlnode) { +		unregister_netdevice_queue(ep->egress_dev, &list); +		rmnet_vnd_dellink(ep->mux_id, port, ep); + +		hlist_del_init_rcu(&ep->hlnode); +		kfree(ep); +	} +  	rcu_read_unlock();  	unregister_netdevice_many(&list); @@ -422,11 +388,6 @@ int rmnet_add_bridge(struct net_device *rmnet_dev,  	if (err)  		return -EBUSY; -	err = netdev_master_upper_dev_link(slave_dev, rmnet_dev, NULL, NULL, -					   extack); -	if (err) -		return -EINVAL; -  	slave_port = rmnet_get_port(slave_dev);  	slave_port->rmnet_mode = RMNET_EPMODE_BRIDGE;  	slave_port->bridge_ep = real_dev; @@ -449,7 +410,6 @@ int rmnet_del_bridge(struct net_device *rmnet_dev,  	port->rmnet_mode = RMNET_EPMODE_VND;  	port->bridge_ep = NULL; -	netdev_upper_dev_unlink(slave_dev, rmnet_dev);  	slave_port = rmnet_get_port(slave_dev);  	rmnet_unregister_real_device(slave_dev, slave_port); diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c index 6bc328fb88e1..b0dbca070c00 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c @@ -38,6 +38,11 @@ static u8 rmnet_map_do_flow_control(struct sk_buff *skb,  	}  	ep = rmnet_get_endpoint(port, mux_id); +	if (!ep) { +		kfree_skb(skb); +		return RX_HANDLER_CONSUMED; +	} +  	vnd = ep->egress_dev;  	ip_family = cmd->flow_control.ip_family; diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c index 570a227acdd8..346d310914df 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c @@ -121,7 +121,7 @@ static void rmnet_get_stats64(struct net_device *dev,  	memset(&total_stats, 0, sizeof(struct rmnet_vnd_stats));  	for_each_possible_cpu(cpu) { -		pcpu_ptr = this_cpu_ptr(priv->pcpu_stats); +		pcpu_ptr = per_cpu_ptr(priv->pcpu_stats, cpu);  		do {  			start = u64_stats_fetch_begin_irq(&pcpu_ptr->syncp); diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index c87f57ca4437..a95fbd5510d9 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -2255,9 +2255,6 @@ static int ravb_wol_setup(struct net_device *ndev)  	/* Enable MagicPacket */  	ravb_modify(ndev, ECMR, ECMR_MPDE, ECMR_MPDE); -	/* Increased clock usage so device won't be suspended */ -	clk_enable(priv->clk); -  	return enable_irq_wake(priv->emac_irq);  } @@ -2276,9 +2273,6 @@ static int ravb_wol_restore(struct net_device *ndev)  	if (ret < 0)  		return ret; -	/* Restore clock usage count */ -	clk_disable(priv->clk); -  	return disable_irq_wake(priv->emac_irq);  } diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index a197e11f3a56..14c839bb09e7 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -40,7 +40,6 @@  #include <linux/slab.h>  #include <linux/ethtool.h>  #include <linux/if_vlan.h> -#include <linux/clk.h>  #include <linux/sh_eth.h>  #include <linux/of_mdio.h> @@ -440,6 +439,17 @@ static void sh_eth_modify(struct net_device *ndev, int enum_index, u32 clear,  		     enum_index);  } +static void sh_eth_tsu_write(struct sh_eth_private *mdp, u32 data, +			     int enum_index) +{ +	iowrite32(data, mdp->tsu_addr + mdp->reg_offset[enum_index]); +} + +static u32 sh_eth_tsu_read(struct sh_eth_private *mdp, int enum_index) +{ +	return ioread32(mdp->tsu_addr + mdp->reg_offset[enum_index]); +} +  static bool sh_eth_is_gether(struct sh_eth_private *mdp)  {  	return mdp->reg_offset == sh_eth_offset_gigabit; @@ -2304,7 +2314,7 @@ static void sh_eth_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)  	wol->supported = 0;  	wol->wolopts = 0; -	if (mdp->cd->magic && mdp->clk) { +	if (mdp->cd->magic) {  		wol->supported = WAKE_MAGIC;  		wol->wolopts = mdp->wol_enabled ? WAKE_MAGIC : 0;  	} @@ -2314,7 +2324,7 @@ static int sh_eth_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)  {  	struct sh_eth_private *mdp = netdev_priv(ndev); -	if (!mdp->cd->magic || !mdp->clk || wol->wolopts & ~WAKE_MAGIC) +	if (!mdp->cd->magic || wol->wolopts & ~WAKE_MAGIC)  		return -EOPNOTSUPP;  	mdp->wol_enabled = !!(wol->wolopts & WAKE_MAGIC); @@ -3153,11 +3163,6 @@ static int sh_eth_drv_probe(struct platform_device *pdev)  		goto out_release;  	} -	/* Get clock, if not found that's OK but Wake-On-Lan is unavailable */ -	mdp->clk = devm_clk_get(&pdev->dev, NULL); -	if (IS_ERR(mdp->clk)) -		mdp->clk = NULL; -  	ndev->base_addr = res->start;  	spin_lock_init(&mdp->lock); @@ -3278,7 +3283,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)  	if (ret)  		goto out_napi_del; -	if (mdp->cd->magic && mdp->clk) +	if (mdp->cd->magic)  		device_set_wakeup_capable(&pdev->dev, 1);  	/* print device information */ @@ -3331,9 +3336,6 @@ static int sh_eth_wol_setup(struct net_device *ndev)  	/* Enable MagicPacket */  	sh_eth_modify(ndev, ECMR, ECMR_MPDE, ECMR_MPDE); -	/* Increased clock usage so device won't be suspended */ -	clk_enable(mdp->clk); -  	return enable_irq_wake(ndev->irq);  } @@ -3359,9 +3361,6 @@ static int sh_eth_wol_restore(struct net_device *ndev)  	if (ret < 0)  		return ret; -	/* Restore clock usage count */ -	clk_disable(mdp->clk); -  	return disable_irq_wake(ndev->irq);  } diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index a6753ccba711..e5fe70134690 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h @@ -567,15 +567,4 @@ static inline void *sh_eth_tsu_get_offset(struct sh_eth_private *mdp,  	return mdp->tsu_addr + mdp->reg_offset[enum_index];  } -static inline void sh_eth_tsu_write(struct sh_eth_private *mdp, u32 data, -				    int enum_index) -{ -	iowrite32(data, mdp->tsu_addr + mdp->reg_offset[enum_index]); -} - -static inline u32 sh_eth_tsu_read(struct sh_eth_private *mdp, int enum_index) -{ -	return ioread32(mdp->tsu_addr + mdp->reg_offset[enum_index]); -} -  #endif	/* #ifndef __SH_ETH_H__ */ diff --git a/drivers/net/ethernet/smsc/Kconfig b/drivers/net/ethernet/smsc/Kconfig index 63aca9f847e1..4c2f612e4414 100644 --- a/drivers/net/ethernet/smsc/Kconfig +++ b/drivers/net/ethernet/smsc/Kconfig @@ -20,7 +20,7 @@ if NET_VENDOR_SMSC  config SMC9194  	tristate "SMC 9194 support" -	depends on (ISA || MAC && BROKEN) +	depends on ISA  	select CRC32  	---help---  	  This is support for the SMC9xxx based Ethernet cards. Choose this diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c index 012fb66eed8d..f0afb88d7bc2 100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c @@ -2335,14 +2335,14 @@ static int smsc911x_drv_remove(struct platform_device *pdev)  	pdata = netdev_priv(dev);  	BUG_ON(!pdata);  	BUG_ON(!pdata->ioaddr); -	WARN_ON(dev->phydev);  	SMSC_TRACE(pdata, ifdown, "Stopping driver"); +	unregister_netdev(dev); +  	mdiobus_unregister(pdata->mii_bus);  	mdiobus_free(pdata->mii_bus); -	unregister_netdev(dev);  	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,  					   "smsc911x-memory");  	if (!res) diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c index 111e7ca9df56..f5c5984afefb 100644 --- a/drivers/net/ethernet/socionext/sni_ave.c +++ b/drivers/net/ethernet/socionext/sni_ave.c @@ -1295,7 +1295,7 @@ static int ave_open(struct net_device *ndev)  	val |= AVE_IIRQC_EN0 | (AVE_INTM_COUNT << 16);  	writel(val, priv->base + AVE_IIRQC); -	val = AVE_GI_RXIINT | AVE_GI_RXOVF | AVE_GI_TX; +	val = AVE_GI_RXIINT | AVE_GI_RXOVF | AVE_GI_TX | AVE_GI_RXDROP;  	ave_irq_restore(ndev, val);  	napi_enable(&priv->napi_rx); diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c index 63d3d6b215f3..a94f50442613 100644 --- a/drivers/net/ethernet/sun/sunvnet.c +++ b/drivers/net/ethernet/sun/sunvnet.c @@ -312,7 +312,7 @@ static struct vnet *vnet_new(const u64 *local_mac,  	dev->ethtool_ops = &vnet_ethtool_ops;  	dev->watchdog_timeo = VNET_TX_TIMEOUT; -	dev->hw_features = NETIF_F_TSO | NETIF_F_GSO | NETIF_F_GSO_SOFTWARE | +	dev->hw_features = NETIF_F_TSO | NETIF_F_GSO | NETIF_F_ALL_TSO |  			   NETIF_F_HW_CSUM | NETIF_F_SG;  	dev->features = dev->hw_features; diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 1b1b78fdc138..b2b30c9df037 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -1014,7 +1014,8 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave,  		/* set speed_in input in case RMII mode is used in 100Mbps */  		if (phy->speed == 100)  			mac_control |= BIT(15); -		else if (phy->speed == 10) +		/* in band mode only works in 10Mbps RGMII mode */ +		else if ((phy->speed == 10) && phy_interface_is_rgmii(phy))  			mac_control |= BIT(18); /* In Band mode */  		if (priv->rx_pause) diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 0db3bd1ea06f..32861036c3fc 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -173,6 +173,7 @@ struct rndis_device {  	struct list_head req_list;  	struct work_struct mcast_work; +	u32 filter;  	bool link_state;        /* 0 - link up, 1 - link down */ @@ -211,7 +212,6 @@ void netvsc_channel_cb(void *context);  int netvsc_poll(struct napi_struct *napi, int budget);  void rndis_set_subchannel(struct work_struct *w); -bool rndis_filter_opened(const struct netvsc_device *nvdev);  int rndis_filter_open(struct netvsc_device *nvdev);  int rndis_filter_close(struct netvsc_device *nvdev);  struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 17e529af79dc..7472172823f3 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -90,6 +90,11 @@ static void free_netvsc_device(struct rcu_head *head)  		= container_of(head, struct netvsc_device, rcu);  	int i; +	kfree(nvdev->extension); +	vfree(nvdev->recv_buf); +	vfree(nvdev->send_buf); +	kfree(nvdev->send_section_map); +  	for (i = 0; i < VRSS_CHANNEL_MAX; i++)  		vfree(nvdev->chan_table[i].mrc.slots); @@ -211,12 +216,6 @@ static void netvsc_teardown_gpadl(struct hv_device *device,  		net_device->recv_buf_gpadl_handle = 0;  	} -	if (net_device->recv_buf) { -		/* Free up the receive buffer */ -		vfree(net_device->recv_buf); -		net_device->recv_buf = NULL; -	} -  	if (net_device->send_buf_gpadl_handle) {  		ret = vmbus_teardown_gpadl(device->channel,  					   net_device->send_buf_gpadl_handle); @@ -231,12 +230,6 @@ static void netvsc_teardown_gpadl(struct hv_device *device,  		}  		net_device->send_buf_gpadl_handle = 0;  	} -	if (net_device->send_buf) { -		/* Free up the send buffer */ -		vfree(net_device->send_buf); -		net_device->send_buf = NULL; -	} -	kfree(net_device->send_section_map);  }  int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx) @@ -562,26 +555,29 @@ void netvsc_device_remove(struct hv_device *device)  		= rtnl_dereference(net_device_ctx->nvdev);  	int i; -	cancel_work_sync(&net_device->subchan_work); -  	netvsc_revoke_buf(device, net_device);  	RCU_INIT_POINTER(net_device_ctx->nvdev, NULL); +	/* And disassociate NAPI context from device */ +	for (i = 0; i < net_device->num_chn; i++) +		netif_napi_del(&net_device->chan_table[i].napi); +  	/*  	 * At this point, no one should be accessing net_device  	 * except in here  	 */  	netdev_dbg(ndev, "net device safe to remove\n"); +	/* older versions require that buffer be revoked before close */ +	if (net_device->nvsp_version < NVSP_PROTOCOL_VERSION_4) +		netvsc_teardown_gpadl(device, net_device); +  	/* Now, we can close the channel safely */  	vmbus_close(device->channel); -	netvsc_teardown_gpadl(device, net_device); - -	/* And dissassociate NAPI context from device */ -	for (i = 0; i < net_device->num_chn; i++) -		netif_napi_del(&net_device->chan_table[i].napi); +	if (net_device->nvsp_version >= NVSP_PROTOCOL_VERSION_4) +		netvsc_teardown_gpadl(device, net_device);  	/* Release all resources */  	free_netvsc_device_rcu(net_device); @@ -645,14 +641,18 @@ static void netvsc_send_tx_complete(struct netvsc_device *net_device,  	queue_sends =  		atomic_dec_return(&net_device->chan_table[q_idx].queue_sends); -	if (net_device->destroy && queue_sends == 0) -		wake_up(&net_device->wait_drain); +	if (unlikely(net_device->destroy)) { +		if (queue_sends == 0) +			wake_up(&net_device->wait_drain); +	} else { +		struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx); -	if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) && -	    (hv_ringbuf_avail_percent(&channel->outbound) > RING_AVAIL_PERCENT_HIWATER || -	     queue_sends < 1)) { -		netif_tx_wake_queue(netdev_get_tx_queue(ndev, q_idx)); -		ndev_ctx->eth_stats.wake_queue++; +		if (netif_tx_queue_stopped(txq) && +		    (hv_ringbuf_avail_percent(&channel->outbound) > RING_AVAIL_PERCENT_HIWATER || +		     queue_sends < 1)) { +			netif_tx_wake_queue(txq); +			ndev_ctx->eth_stats.wake_queue++; +		}  	}  } @@ -852,13 +852,6 @@ int netvsc_send(struct net_device *ndev,  	if (unlikely(!net_device || net_device->destroy))  		return -ENODEV; -	/* We may race with netvsc_connect_vsp()/netvsc_init_buf() and get -	 * here before the negotiation with the host is finished and -	 * send_section_map may not be allocated yet. -	 */ -	if (unlikely(!net_device->send_section_map)) -		return -EAGAIN; -  	nvchan = &net_device->chan_table[packet->q_idx];  	packet->send_buf_index = NETVSC_INVALID_INDEX;  	packet->cp_partial = false; @@ -866,10 +859,8 @@ int netvsc_send(struct net_device *ndev,  	/* Send control message directly without accessing msd (Multi-Send  	 * Data) field which may be changed during data packet processing.  	 */ -	if (!skb) { -		cur_send = packet; -		goto send_now; -	} +	if (!skb) +		return netvsc_send_pkt(device, packet, net_device, pb, skb);  	/* batch packets in send buffer if possible */  	msdp = &nvchan->msd; @@ -953,7 +944,6 @@ int netvsc_send(struct net_device *ndev,  		}  	} -send_now:  	if (cur_send)  		ret = netvsc_send_pkt(device, cur_send, net_device, pb, skb); @@ -1217,9 +1207,10 @@ int netvsc_poll(struct napi_struct *napi, int budget)  	if (send_recv_completions(ndev, net_device, nvchan) == 0 &&  	    work_done < budget &&  	    napi_complete_done(napi, work_done) && -	    hv_end_read(&channel->inbound)) { +	    hv_end_read(&channel->inbound) && +	    napi_schedule_prep(napi)) {  		hv_begin_read(&channel->inbound); -		napi_reschedule(napi); +		__napi_schedule(napi);  	}  	/* Driver may overshoot since multiple packets per descriptor */ @@ -1242,7 +1233,7 @@ void netvsc_channel_cb(void *context)  		/* disable interupts from host */  		hv_begin_read(rbi); -		__napi_schedule(&nvchan->napi); +		__napi_schedule_irqoff(&nvchan->napi);  	}  } @@ -1296,7 +1287,6 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,  			 netvsc_channel_cb, net_device->chan_table);  	if (ret != 0) { -		netif_napi_del(&net_device->chan_table[0].napi);  		netdev_err(ndev, "unable to open channel: %d\n", ret);  		goto cleanup;  	} @@ -1306,11 +1296,6 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,  	napi_enable(&net_device->chan_table[0].napi); -	/* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is -	 * populated. -	 */ -	rcu_assign_pointer(net_device_ctx->nvdev, net_device); -  	/* Connect with the NetVsp */  	ret = netvsc_connect_vsp(device, net_device, device_info);  	if (ret != 0) { @@ -1319,6 +1304,11 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,  		goto close;  	} +	/* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is +	 * populated. +	 */ +	rcu_assign_pointer(net_device_ctx->nvdev, net_device); +  	return net_device;  close: @@ -1329,6 +1319,7 @@ close:  	vmbus_close(device->channel);  cleanup: +	netif_napi_del(&net_device->chan_table[0].napi);  	free_netvsc_device(&net_device->rcu);  	return ERR_PTR(ret); diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index c5584c2d440e..f28c85d212ce 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -46,7 +46,10 @@  #include "hyperv_net.h" -#define RING_SIZE_MIN		64 +#define RING_SIZE_MIN	64 +#define RETRY_US_LO	5000 +#define RETRY_US_HI	10000 +#define RETRY_MAX	2000	/* >10 sec */  #define LINKCHANGE_INT (2 * HZ)  #define VF_TAKEOVER_INT (HZ / 10) @@ -66,12 +69,43 @@ static int debug = -1;  module_param(debug, int, S_IRUGO);  MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); -static void netvsc_set_multicast_list(struct net_device *net) +static void netvsc_change_rx_flags(struct net_device *net, int change)  { -	struct net_device_context *net_device_ctx = netdev_priv(net); -	struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); +	struct net_device_context *ndev_ctx = netdev_priv(net); +	struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev); +	int inc; + +	if (!vf_netdev) +		return; + +	if (change & IFF_PROMISC) { +		inc = (net->flags & IFF_PROMISC) ? 1 : -1; +		dev_set_promiscuity(vf_netdev, inc); +	} + +	if (change & IFF_ALLMULTI) { +		inc = (net->flags & IFF_ALLMULTI) ? 1 : -1; +		dev_set_allmulti(vf_netdev, inc); +	} +} -	rndis_filter_update(nvdev); +static void netvsc_set_rx_mode(struct net_device *net) +{ +	struct net_device_context *ndev_ctx = netdev_priv(net); +	struct net_device *vf_netdev; +	struct netvsc_device *nvdev; + +	rcu_read_lock(); +	vf_netdev = rcu_dereference(ndev_ctx->vf_netdev); +	if (vf_netdev) { +		dev_uc_sync(vf_netdev, net); +		dev_mc_sync(vf_netdev, net); +	} + +	nvdev = rcu_dereference(ndev_ctx->nvdev); +	if (nvdev) +		rndis_filter_update(nvdev); +	rcu_read_unlock();  }  static int netvsc_open(struct net_device *net) @@ -91,10 +125,7 @@ static int netvsc_open(struct net_device *net)  		return ret;  	} -	netif_tx_wake_all_queues(net); -  	rdev = nvdev->extension; -  	if (!rdev->link_state)  		netif_carrier_on(net); @@ -112,36 +143,25 @@ static int netvsc_open(struct net_device *net)  	return 0;  } -static int netvsc_close(struct net_device *net) +static int netvsc_wait_until_empty(struct netvsc_device *nvdev)  { -	struct net_device_context *net_device_ctx = netdev_priv(net); -	struct net_device *vf_netdev -		= rtnl_dereference(net_device_ctx->vf_netdev); -	struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); -	int ret = 0; -	u32 aread, i, msec = 10, retry = 0, retry_max = 20; -	struct vmbus_channel *chn; - -	netif_tx_disable(net); - -	/* No need to close rndis filter if it is removed already */ -	if (!nvdev) -		goto out; - -	ret = rndis_filter_close(nvdev); -	if (ret != 0) { -		netdev_err(net, "unable to close device (ret %d).\n", ret); -		return ret; -	} +	unsigned int retry = 0; +	int i;  	/* Ensure pending bytes in ring are read */ -	while (true) { -		aread = 0; +	for (;;) { +		u32 aread = 0; +  		for (i = 0; i < nvdev->num_chn; i++) { -			chn = nvdev->chan_table[i].channel; +			struct vmbus_channel *chn +				= nvdev->chan_table[i].channel; +  			if (!chn)  				continue; +			/* make sure receive not running now */ +			napi_synchronize(&nvdev->chan_table[i].napi); +  			aread = hv_get_bytes_to_read(&chn->inbound);  			if (aread)  				break; @@ -151,22 +171,40 @@ static int netvsc_close(struct net_device *net)  				break;  		} -		retry++; -		if (retry > retry_max || aread == 0) -			break; +		if (aread == 0) +			return 0; -		msleep(msec); +		if (++retry > RETRY_MAX) +			return -ETIMEDOUT; -		if (msec < 1000) -			msec *= 2; +		usleep_range(RETRY_US_LO, RETRY_US_HI);  	} +} -	if (aread) { -		netdev_err(net, "Ring buffer not empty after closing rndis\n"); -		ret = -ETIMEDOUT; +static int netvsc_close(struct net_device *net) +{ +	struct net_device_context *net_device_ctx = netdev_priv(net); +	struct net_device *vf_netdev +		= rtnl_dereference(net_device_ctx->vf_netdev); +	struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); +	int ret; + +	netif_tx_disable(net); + +	/* No need to close rndis filter if it is removed already */ +	if (!nvdev) +		return 0; + +	ret = rndis_filter_close(nvdev); +	if (ret != 0) { +		netdev_err(net, "unable to close device (ret %d).\n", ret); +		return ret;  	} -out: +	ret = netvsc_wait_until_empty(nvdev); +	if (ret) +		netdev_err(net, "Ring buffer not empty after closing rndis\n"); +  	if (vf_netdev)  		dev_close(vf_netdev); @@ -299,8 +337,19 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,  	rcu_read_lock();  	vf_netdev = rcu_dereference(ndc->vf_netdev);  	if (vf_netdev) { -		txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0; -		qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping; +		const struct net_device_ops *vf_ops = vf_netdev->netdev_ops; + +		if (vf_ops->ndo_select_queue) +			txq = vf_ops->ndo_select_queue(vf_netdev, skb, +						       accel_priv, fallback); +		else +			txq = fallback(vf_netdev, skb); + +		/* Record the queue selected by VF so that it can be +		 * used for common case where VF has more queues than +		 * the synthetic device. +		 */ +		qdisc_skb_cb(skb)->slave_dev_queue_mapping = txq;  	} else {  		txq = netvsc_pick_tx(ndev, skb);  	} @@ -804,16 +853,81 @@ static void netvsc_get_channels(struct net_device *net,  	}  } +static int netvsc_detach(struct net_device *ndev, +			 struct netvsc_device *nvdev) +{ +	struct net_device_context *ndev_ctx = netdev_priv(ndev); +	struct hv_device *hdev = ndev_ctx->device_ctx; +	int ret; + +	/* Don't try continuing to try and setup sub channels */ +	if (cancel_work_sync(&nvdev->subchan_work)) +		nvdev->num_chn = 1; + +	/* If device was up (receiving) then shutdown */ +	if (netif_running(ndev)) { +		netif_tx_disable(ndev); + +		ret = rndis_filter_close(nvdev); +		if (ret) { +			netdev_err(ndev, +				   "unable to close device (ret %d).\n", ret); +			return ret; +		} + +		ret = netvsc_wait_until_empty(nvdev); +		if (ret) { +			netdev_err(ndev, +				   "Ring buffer not empty after closing rndis\n"); +			return ret; +		} +	} + +	netif_device_detach(ndev); + +	rndis_filter_device_remove(hdev, nvdev); + +	return 0; +} + +static int netvsc_attach(struct net_device *ndev, +			 struct netvsc_device_info *dev_info) +{ +	struct net_device_context *ndev_ctx = netdev_priv(ndev); +	struct hv_device *hdev = ndev_ctx->device_ctx; +	struct netvsc_device *nvdev; +	struct rndis_device *rdev; +	int ret; + +	nvdev = rndis_filter_device_add(hdev, dev_info); +	if (IS_ERR(nvdev)) +		return PTR_ERR(nvdev); + +	/* Note: enable and attach happen when sub-channels setup */ + +	netif_carrier_off(ndev); + +	if (netif_running(ndev)) { +		ret = rndis_filter_open(nvdev); +		if (ret) +			return ret; + +		rdev = nvdev->extension; +		if (!rdev->link_state) +			netif_carrier_on(ndev); +	} + +	return 0; +} +  static int netvsc_set_channels(struct net_device *net,  			       struct ethtool_channels *channels)  {  	struct net_device_context *net_device_ctx = netdev_priv(net); -	struct hv_device *dev = net_device_ctx->device_ctx;  	struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);  	unsigned int orig, count = channels->combined_count;  	struct netvsc_device_info device_info; -	bool was_opened; -	int ret = 0; +	int ret;  	/* We do not support separate count for rx, tx, or other */  	if (count == 0 || @@ -830,9 +944,6 @@ static int netvsc_set_channels(struct net_device *net,  		return -EINVAL;  	orig = nvdev->num_chn; -	was_opened = rndis_filter_opened(nvdev); -	if (was_opened) -		rndis_filter_close(nvdev);  	memset(&device_info, 0, sizeof(device_info));  	device_info.num_chn = count; @@ -841,28 +952,17 @@ static int netvsc_set_channels(struct net_device *net,  	device_info.recv_sections = nvdev->recv_section_cnt;  	device_info.recv_section_size = nvdev->recv_section_size; -	rndis_filter_device_remove(dev, nvdev); +	ret = netvsc_detach(net, nvdev); +	if (ret) +		return ret; -	nvdev = rndis_filter_device_add(dev, &device_info); -	if (IS_ERR(nvdev)) { -		ret = PTR_ERR(nvdev); +	ret = netvsc_attach(net, &device_info); +	if (ret) {  		device_info.num_chn = orig; -		nvdev = rndis_filter_device_add(dev, &device_info); - -		if (IS_ERR(nvdev)) { -			netdev_err(net, "restoring channel setting failed: %ld\n", -				   PTR_ERR(nvdev)); -			return ret; -		} +		if (netvsc_attach(net, &device_info)) +			netdev_err(net, "restoring channel setting failed\n");  	} -	if (was_opened) -		rndis_filter_open(nvdev); - -	/* We may have missed link change notifications */ -	net_device_ctx->last_reconfig = 0; -	schedule_delayed_work(&net_device_ctx->dwork, 0); -  	return ret;  } @@ -928,10 +1028,8 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)  	struct net_device_context *ndevctx = netdev_priv(ndev);  	struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev);  	struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); -	struct hv_device *hdev = ndevctx->device_ctx;  	int orig_mtu = ndev->mtu;  	struct netvsc_device_info device_info; -	bool was_opened;  	int ret = 0;  	if (!nvdev || nvdev->destroy) @@ -944,11 +1042,6 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)  			return ret;  	} -	netif_device_detach(ndev); -	was_opened = rndis_filter_opened(nvdev); -	if (was_opened) -		rndis_filter_close(nvdev); -  	memset(&device_info, 0, sizeof(device_info));  	device_info.num_chn = nvdev->num_chn;  	device_info.send_sections = nvdev->send_section_cnt; @@ -956,35 +1049,27 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)  	device_info.recv_sections = nvdev->recv_section_cnt;  	device_info.recv_section_size = nvdev->recv_section_size; -	rndis_filter_device_remove(hdev, nvdev); +	ret = netvsc_detach(ndev, nvdev); +	if (ret) +		goto rollback_vf;  	ndev->mtu = mtu; -	nvdev = rndis_filter_device_add(hdev, &device_info); -	if (IS_ERR(nvdev)) { -		ret = PTR_ERR(nvdev); - -		/* Attempt rollback to original MTU */ -		ndev->mtu = orig_mtu; -		nvdev = rndis_filter_device_add(hdev, &device_info); - -		if (vf_netdev) -			dev_set_mtu(vf_netdev, orig_mtu); - -		if (IS_ERR(nvdev)) { -			netdev_err(ndev, "restoring mtu failed: %ld\n", -				   PTR_ERR(nvdev)); -			return ret; -		} -	} +	ret = netvsc_attach(ndev, &device_info); +	if (ret) +		goto rollback; -	if (was_opened) -		rndis_filter_open(nvdev); +	return 0; -	netif_device_attach(ndev); +rollback: +	/* Attempt rollback to original MTU */ +	ndev->mtu = orig_mtu; -	/* We may have missed link change notifications */ -	schedule_delayed_work(&ndevctx->dwork, 0); +	if (netvsc_attach(ndev, &device_info)) +		netdev_err(ndev, "restoring mtu failed\n"); +rollback_vf: +	if (vf_netdev) +		dev_set_mtu(vf_netdev, orig_mtu);  	return ret;  } @@ -1490,11 +1575,9 @@ static int netvsc_set_ringparam(struct net_device *ndev,  {  	struct net_device_context *ndevctx = netdev_priv(ndev);  	struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); -	struct hv_device *hdev = ndevctx->device_ctx;  	struct netvsc_device_info device_info;  	struct ethtool_ringparam orig;  	u32 new_tx, new_rx; -	bool was_opened;  	int ret = 0;  	if (!nvdev || nvdev->destroy) @@ -1519,34 +1602,18 @@ static int netvsc_set_ringparam(struct net_device *ndev,  	device_info.recv_sections = new_rx;  	device_info.recv_section_size = nvdev->recv_section_size; -	netif_device_detach(ndev); -	was_opened = rndis_filter_opened(nvdev); -	if (was_opened) -		rndis_filter_close(nvdev); - -	rndis_filter_device_remove(hdev, nvdev); - -	nvdev = rndis_filter_device_add(hdev, &device_info); -	if (IS_ERR(nvdev)) { -		ret = PTR_ERR(nvdev); +	ret = netvsc_detach(ndev, nvdev); +	if (ret) +		return ret; +	ret = netvsc_attach(ndev, &device_info); +	if (ret) {  		device_info.send_sections = orig.tx_pending;  		device_info.recv_sections = orig.rx_pending; -		nvdev = rndis_filter_device_add(hdev, &device_info); -		if (IS_ERR(nvdev)) { -			netdev_err(ndev, "restoring ringparam failed: %ld\n", -				   PTR_ERR(nvdev)); -			return ret; -		} -	} -	if (was_opened) -		rndis_filter_open(nvdev); -	netif_device_attach(ndev); - -	/* We may have missed link change notifications */ -	ndevctx->last_reconfig = 0; -	schedule_delayed_work(&ndevctx->dwork, 0); +		if (netvsc_attach(ndev, &device_info)) +			netdev_err(ndev, "restoring ringparam failed"); +	}  	return ret;  } @@ -1576,7 +1643,8 @@ static const struct net_device_ops device_ops = {  	.ndo_open =			netvsc_open,  	.ndo_stop =			netvsc_close,  	.ndo_start_xmit =		netvsc_start_xmit, -	.ndo_set_rx_mode =		netvsc_set_multicast_list, +	.ndo_change_rx_flags =		netvsc_change_rx_flags, +	.ndo_set_rx_mode =		netvsc_set_rx_mode,  	.ndo_change_mtu =		netvsc_change_mtu,  	.ndo_validate_addr =		eth_validate_addr,  	.ndo_set_mac_address =		netvsc_set_mac_addr, @@ -1807,6 +1875,15 @@ static void __netvsc_vf_setup(struct net_device *ndev,  		netdev_warn(vf_netdev,  			    "unable to change mtu to %u\n", ndev->mtu); +	/* set multicast etc flags on VF */ +	dev_change_flags(vf_netdev, ndev->flags | IFF_SLAVE); + +	/* sync address list from ndev to VF */ +	netif_addr_lock_bh(ndev); +	dev_uc_sync(vf_netdev, ndev); +	dev_mc_sync(vf_netdev, ndev); +	netif_addr_unlock_bh(ndev); +  	if (netif_running(ndev)) {  		ret = dev_open(vf_netdev);  		if (ret) @@ -2021,8 +2098,8 @@ no_net:  static int netvsc_remove(struct hv_device *dev)  {  	struct net_device_context *ndev_ctx; -	struct net_device *vf_netdev; -	struct net_device *net; +	struct net_device *vf_netdev, *net; +	struct netvsc_device *nvdev;  	net = hv_get_drvdata(dev);  	if (net == NULL) { @@ -2032,10 +2109,14 @@ static int netvsc_remove(struct hv_device *dev)  	ndev_ctx = netdev_priv(net); -	netif_device_detach(net); -  	cancel_delayed_work_sync(&ndev_ctx->dwork); +	rcu_read_lock(); +	nvdev = rcu_dereference(ndev_ctx->nvdev); + +	if  (nvdev) +		cancel_work_sync(&nvdev->subchan_work); +  	/*  	 * Call to the vsc driver to let it know that the device is being  	 * removed. Also blocks mtu and channel changes. @@ -2045,11 +2126,13 @@ static int netvsc_remove(struct hv_device *dev)  	if (vf_netdev)  		netvsc_unregister_vf(vf_netdev); +	if (nvdev) +		rndis_filter_device_remove(dev, nvdev); +  	unregister_netdevice(net); -	rndis_filter_device_remove(dev, -				   rtnl_dereference(ndev_ctx->nvdev));  	rtnl_unlock(); +	rcu_read_unlock();  	hv_set_drvdata(dev, NULL); diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index c3ca191fea7f..a6ec41c399d6 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -264,13 +264,23 @@ static void rndis_set_link_state(struct rndis_device *rdev,  	}  } -static void rndis_filter_receive_response(struct rndis_device *dev, -				       struct rndis_message *resp) +static void rndis_filter_receive_response(struct net_device *ndev, +					  struct netvsc_device *nvdev, +					  const struct rndis_message *resp)  { +	struct rndis_device *dev = nvdev->extension;  	struct rndis_request *request = NULL;  	bool found = false;  	unsigned long flags; -	struct net_device *ndev = dev->ndev; + +	/* This should never happen, it means control message +	 * response received after device removed. +	 */ +	if (dev->state == RNDIS_DEV_UNINITIALIZED) { +		netdev_err(ndev, +			   "got rndis message uninitialized\n"); +		return; +	}  	spin_lock_irqsave(&dev->request_lock, flags);  	list_for_each_entry(request, &dev->req_list, list_ent) { @@ -352,7 +362,6 @@ static inline void *rndis_get_ppi(struct rndis_packet *rpkt, u32 type)  static int rndis_filter_receive_data(struct net_device *ndev,  				     struct netvsc_device *nvdev, -				     struct rndis_device *dev,  				     struct rndis_message *msg,  				     struct vmbus_channel *channel,  				     void *data, u32 data_buflen) @@ -372,7 +381,7 @@ static int rndis_filter_receive_data(struct net_device *ndev,  	 * should be the data packet size plus the trailer padding size  	 */  	if (unlikely(data_buflen < rndis_pkt->data_len)) { -		netdev_err(dev->ndev, "rndis message buffer " +		netdev_err(ndev, "rndis message buffer "  			   "overflow detected (got %u, min %u)"  			   "...dropping this message!\n",  			   data_buflen, rndis_pkt->data_len); @@ -400,35 +409,20 @@ int rndis_filter_receive(struct net_device *ndev,  			 void *data, u32 buflen)  {  	struct net_device_context *net_device_ctx = netdev_priv(ndev); -	struct rndis_device *rndis_dev = net_dev->extension;  	struct rndis_message *rndis_msg = data; -	/* Make sure the rndis device state is initialized */ -	if (unlikely(!rndis_dev)) { -		netif_dbg(net_device_ctx, rx_err, ndev, -			  "got rndis message but no rndis device!\n"); -		return NVSP_STAT_FAIL; -	} - -	if (unlikely(rndis_dev->state == RNDIS_DEV_UNINITIALIZED)) { -		netif_dbg(net_device_ctx, rx_err, ndev, -			  "got rndis message uninitialized\n"); -		return NVSP_STAT_FAIL; -	} -  	if (netif_msg_rx_status(net_device_ctx))  		dump_rndis_message(ndev, rndis_msg);  	switch (rndis_msg->ndis_msg_type) {  	case RNDIS_MSG_PACKET: -		return rndis_filter_receive_data(ndev, net_dev, -						 rndis_dev, rndis_msg, +		return rndis_filter_receive_data(ndev, net_dev, rndis_msg,  						 channel, data, buflen);  	case RNDIS_MSG_INIT_C:  	case RNDIS_MSG_QUERY_C:  	case RNDIS_MSG_SET_C:  		/* completion msgs */ -		rndis_filter_receive_response(rndis_dev, rndis_msg); +		rndis_filter_receive_response(ndev, net_dev, rndis_msg);  		break;  	case RNDIS_MSG_INDICATE: @@ -825,13 +819,15 @@ static int rndis_filter_set_packet_filter(struct rndis_device *dev,  	struct rndis_set_request *set;  	int ret; +	if (dev->filter == new_filter) +		return 0; +  	request = get_rndis_request(dev, RNDIS_MSG_SET,  			RNDIS_MESSAGE_SIZE(struct rndis_set_request) +  			sizeof(u32));  	if (!request)  		return -ENOMEM; -  	/* Setup the rndis set */  	set = &request->request_msg.msg.set_req;  	set->oid = RNDIS_OID_GEN_CURRENT_PACKET_FILTER; @@ -842,8 +838,10 @@ static int rndis_filter_set_packet_filter(struct rndis_device *dev,  	       &new_filter, sizeof(u32));  	ret = rndis_filter_send_request(dev, request); -	if (ret == 0) +	if (ret == 0) {  		wait_for_completion(&request->wait_event); +		dev->filter = new_filter; +	}  	put_rndis_request(dev, request); @@ -854,15 +852,19 @@ static void rndis_set_multicast(struct work_struct *w)  {  	struct rndis_device *rdev  		= container_of(w, struct rndis_device, mcast_work); +	u32 filter = NDIS_PACKET_TYPE_DIRECTED; +	unsigned int flags = rdev->ndev->flags; -	if (rdev->ndev->flags & IFF_PROMISC) -		rndis_filter_set_packet_filter(rdev, -					       NDIS_PACKET_TYPE_PROMISCUOUS); -	else -		rndis_filter_set_packet_filter(rdev, -					       NDIS_PACKET_TYPE_BROADCAST | -					       NDIS_PACKET_TYPE_ALL_MULTICAST | -					       NDIS_PACKET_TYPE_DIRECTED); +	if (flags & IFF_PROMISC) { +		filter = NDIS_PACKET_TYPE_PROMISCUOUS; +	} else { +		if (flags & IFF_ALLMULTI) +			filter |= NDIS_PACKET_TYPE_ALL_MULTICAST; +		if (flags & IFF_BROADCAST) +			filter |= NDIS_PACKET_TYPE_BROADCAST; +	} + +	rndis_filter_set_packet_filter(rdev, filter);  }  void rndis_filter_update(struct netvsc_device *nvdev) @@ -1116,6 +1118,7 @@ void rndis_set_subchannel(struct work_struct *w)  	for (i = 0; i < VRSS_SEND_TAB_SIZE; i++)  		ndev_ctx->tx_table[i] = i % nvdev->num_chn; +	netif_device_attach(ndev);  	rtnl_unlock();  	return; @@ -1126,6 +1129,8 @@ failed:  	nvdev->max_chn = 1;  	nvdev->num_chn = 1; + +	netif_device_attach(ndev);  unlock:  	rtnl_unlock();  } @@ -1328,6 +1333,10 @@ out:  		net_device->num_chn = 1;  	} +	/* No sub channels, device is ready */ +	if (net_device->num_chn == 1) +		netif_device_attach(net); +  	return net_device;  err_dev_remv: @@ -1346,7 +1355,6 @@ void rndis_filter_device_remove(struct hv_device *dev,  	net_dev->extension = NULL;  	netvsc_device_remove(dev); -	kfree(rndis_dev);  }  int rndis_filter_open(struct netvsc_device *nvdev) @@ -1364,10 +1372,3 @@ int rndis_filter_close(struct netvsc_device *nvdev)  	return rndis_filter_close_device(nvdev->extension);  } - -bool rndis_filter_opened(const struct netvsc_device *nvdev) -{ -	const struct rndis_device *dev = nvdev->extension; - -	return dev->state == RNDIS_DEV_DATAINITIALIZED; -} diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index 7de88b33d5b9..9cbb0c8a896a 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -3277,7 +3277,7 @@ static int macsec_newlink(struct net *net, struct net_device *dev,  	err = netdev_upper_dev_link(real_dev, dev, extack);  	if (err < 0) -		goto unregister; +		goto put_dev;  	/* need to be already registered so that ->init has run and  	 * the MAC addr is set @@ -3316,7 +3316,8 @@ del_dev:  	macsec_del_dev(macsec);  unlink:  	netdev_upper_dev_unlink(real_dev, dev); -unregister: +put_dev: +	dev_put(real_dev);  	unregister_netdevice(dev);  	return err;  } diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index a0f2be81d52e..725f4b4afc6d 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -1036,7 +1036,7 @@ static netdev_features_t macvlan_fix_features(struct net_device *dev,  	lowerdev_features &= (features | ~NETIF_F_LRO);  	features = netdev_increment_features(lowerdev_features, features, mask);  	features |= ALWAYS_ON_FEATURES; -	features &= ~NETIF_F_NETNS_LOCAL; +	features &= (ALWAYS_ON_FEATURES | MACVLAN_FEATURES);  	return features;  } @@ -1451,7 +1451,7 @@ destroy_macvlan_port:  	/* the macvlan port may be freed by macvlan_uninit when fail to register.  	 * so we destroy the macvlan port only when it's valid.  	 */ -	if (create && macvlan_port_get_rtnl(dev)) +	if (create && macvlan_port_get_rtnl(lowerdev))  		macvlan_port_destroy(port->dev);  	return err;  } diff --git a/drivers/net/phy/bcm-phy-lib.c b/drivers/net/phy/bcm-phy-lib.c index 171010eb4d9c..5ad130c3da43 100644 --- a/drivers/net/phy/bcm-phy-lib.c +++ b/drivers/net/phy/bcm-phy-lib.c @@ -341,8 +341,8 @@ void bcm_phy_get_strings(struct phy_device *phydev, u8 *data)  	unsigned int i;  	for (i = 0; i < ARRAY_SIZE(bcm_phy_hw_stats); i++) -		memcpy(data + i * ETH_GSTRING_LEN, -		       bcm_phy_hw_stats[i].string, ETH_GSTRING_LEN); +		strlcpy(data + i * ETH_GSTRING_LEN, +			bcm_phy_hw_stats[i].string, ETH_GSTRING_LEN);  }  EXPORT_SYMBOL_GPL(bcm_phy_get_strings); diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 22d9bc9c33a4..0e0978d8a0eb 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -1452,8 +1452,8 @@ static void marvell_get_strings(struct phy_device *phydev, u8 *data)  	int i;  	for (i = 0; i < ARRAY_SIZE(marvell_hw_stats); i++) { -		memcpy(data + i * ETH_GSTRING_LEN, -		       marvell_hw_stats[i].string, ETH_GSTRING_LEN); +		strlcpy(data + i * ETH_GSTRING_LEN, +			marvell_hw_stats[i].string, ETH_GSTRING_LEN);  	}  } diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 0f45310300f6..f41b224a9cdb 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -635,25 +635,6 @@ static int ksz8873mll_config_aneg(struct phy_device *phydev)  	return 0;  } -/* This routine returns -1 as an indication to the caller that the - * Micrel ksz9021 10/100/1000 PHY does not support standard IEEE - * MMD extended PHY registers. - */ -static int -ksz9021_rd_mmd_phyreg(struct phy_device *phydev, int devad, u16 regnum) -{ -	return -1; -} - -/* This routine does nothing since the Micrel ksz9021 does not support - * standard IEEE MMD extended PHY registers. - */ -static int -ksz9021_wr_mmd_phyreg(struct phy_device *phydev, int devad, u16 regnum, u16 val) -{ -	return -1; -} -  static int kszphy_get_sset_count(struct phy_device *phydev)  {  	return ARRAY_SIZE(kszphy_hw_stats); @@ -664,8 +645,8 @@ static void kszphy_get_strings(struct phy_device *phydev, u8 *data)  	int i;  	for (i = 0; i < ARRAY_SIZE(kszphy_hw_stats); i++) { -		memcpy(data + i * ETH_GSTRING_LEN, -		       kszphy_hw_stats[i].string, ETH_GSTRING_LEN); +		strlcpy(data + i * ETH_GSTRING_LEN, +			kszphy_hw_stats[i].string, ETH_GSTRING_LEN);  	}  } @@ -946,8 +927,8 @@ static struct phy_driver ksphy_driver[] = {  	.get_stats	= kszphy_get_stats,  	.suspend	= genphy_suspend,  	.resume		= genphy_resume, -	.read_mmd	= ksz9021_rd_mmd_phyreg, -	.write_mmd	= ksz9021_wr_mmd_phyreg, +	.read_mmd	= genphy_read_mmd_unsupported, +	.write_mmd	= genphy_write_mmd_unsupported,  }, {  	.phy_id		= PHY_ID_KSZ9031,  	.phy_id_mask	= MICREL_PHY_ID_MASK, diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index e3e29c2b028b..9aabfa1a455a 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -618,6 +618,77 @@ static void phy_error(struct phy_device *phydev)  }  /** + * phy_disable_interrupts - Disable the PHY interrupts from the PHY side + * @phydev: target phy_device struct + */ +static int phy_disable_interrupts(struct phy_device *phydev) +{ +	int err; + +	/* Disable PHY interrupts */ +	err = phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED); +	if (err) +		goto phy_err; + +	/* Clear the interrupt */ +	err = phy_clear_interrupt(phydev); +	if (err) +		goto phy_err; + +	return 0; + +phy_err: +	phy_error(phydev); + +	return err; +} + +/** + * phy_change - Called by the phy_interrupt to handle PHY changes + * @phydev: phy_device struct that interrupted + */ +static irqreturn_t phy_change(struct phy_device *phydev) +{ +	if (phy_interrupt_is_valid(phydev)) { +		if (phydev->drv->did_interrupt && +		    !phydev->drv->did_interrupt(phydev)) +			return IRQ_NONE; + +		if (phydev->state == PHY_HALTED) +			if (phy_disable_interrupts(phydev)) +				goto phy_err; +	} + +	mutex_lock(&phydev->lock); +	if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state)) +		phydev->state = PHY_CHANGELINK; +	mutex_unlock(&phydev->lock); + +	/* reschedule state queue work to run as soon as possible */ +	phy_trigger_machine(phydev, true); + +	if (phy_interrupt_is_valid(phydev) && phy_clear_interrupt(phydev)) +		goto phy_err; +	return IRQ_HANDLED; + +phy_err: +	phy_error(phydev); +	return IRQ_NONE; +} + +/** + * phy_change_work - Scheduled by the phy_mac_interrupt to handle PHY changes + * @work: work_struct that describes the work to be done + */ +void phy_change_work(struct work_struct *work) +{ +	struct phy_device *phydev = +		container_of(work, struct phy_device, phy_queue); + +	phy_change(phydev); +} + +/**   * phy_interrupt - PHY interrupt handler   * @irq: interrupt line   * @phy_dat: phy_device pointer @@ -632,9 +703,7 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat)  	if (PHY_HALTED == phydev->state)  		return IRQ_NONE;		/* It can't be ours.  */ -	phy_change(phydev); - -	return IRQ_HANDLED; +	return phy_change(phydev);  }  /** @@ -652,32 +721,6 @@ static int phy_enable_interrupts(struct phy_device *phydev)  }  /** - * phy_disable_interrupts - Disable the PHY interrupts from the PHY side - * @phydev: target phy_device struct - */ -static int phy_disable_interrupts(struct phy_device *phydev) -{ -	int err; - -	/* Disable PHY interrupts */ -	err = phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED); -	if (err) -		goto phy_err; - -	/* Clear the interrupt */ -	err = phy_clear_interrupt(phydev); -	if (err) -		goto phy_err; - -	return 0; - -phy_err: -	phy_error(phydev); - -	return err; -} - -/**   * phy_start_interrupts - request and enable interrupts for a PHY device   * @phydev: target phy_device struct   * @@ -720,50 +763,6 @@ int phy_stop_interrupts(struct phy_device *phydev)  EXPORT_SYMBOL(phy_stop_interrupts);  /** - * phy_change - Called by the phy_interrupt to handle PHY changes - * @phydev: phy_device struct that interrupted - */ -void phy_change(struct phy_device *phydev) -{ -	if (phy_interrupt_is_valid(phydev)) { -		if (phydev->drv->did_interrupt && -		    !phydev->drv->did_interrupt(phydev)) -			return; - -		if (phydev->state == PHY_HALTED) -			if (phy_disable_interrupts(phydev)) -				goto phy_err; -	} - -	mutex_lock(&phydev->lock); -	if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state)) -		phydev->state = PHY_CHANGELINK; -	mutex_unlock(&phydev->lock); - -	/* reschedule state queue work to run as soon as possible */ -	phy_trigger_machine(phydev, true); - -	if (phy_interrupt_is_valid(phydev) && phy_clear_interrupt(phydev)) -		goto phy_err; -	return; - -phy_err: -	phy_error(phydev); -} - -/** - * phy_change_work - Scheduled by the phy_mac_interrupt to handle PHY changes - * @work: work_struct that describes the work to be done - */ -void phy_change_work(struct work_struct *work) -{ -	struct phy_device *phydev = -		container_of(work, struct phy_device, phy_queue); - -	phy_change(phydev); -} - -/**   * phy_stop - Bring down the PHY link, and stop checking the status   * @phydev: target phy_device struct   */ @@ -819,7 +818,7 @@ void phy_start(struct phy_device *phydev)  		break;  	case PHY_HALTED:  		/* if phy was suspended, bring the physical link up again */ -		phy_resume(phydev); +		__phy_resume(phydev);  		/* make sure interrupts are re-enabled for the PHY */  		if (phy_interrupt_is_valid(phydev)) { diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index b13eed21c87d..74664a6c0cdc 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -135,9 +135,7 @@ static int mdio_bus_phy_resume(struct device *dev)  	if (!mdio_bus_phy_may_suspend(phydev))  		goto no_resume; -	mutex_lock(&phydev->lock);  	ret = phy_resume(phydev); -	mutex_unlock(&phydev->lock);  	if (ret < 0)  		return ret; @@ -1014,10 +1012,17 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,  	err = sysfs_create_link(&phydev->mdio.dev.kobj, &dev->dev.kobj,  				"attached_dev");  	if (!err) { -		err = sysfs_create_link(&dev->dev.kobj, &phydev->mdio.dev.kobj, -					"phydev"); -		if (err) -			goto error; +		err = sysfs_create_link_nowarn(&dev->dev.kobj, +					       &phydev->mdio.dev.kobj, +					       "phydev"); +		if (err) { +			dev_err(&dev->dev, "could not add device link to %s err %d\n", +				kobject_name(&phydev->mdio.dev.kobj), +				err); +			/* non-fatal - some net drivers can use one netdevice +			 * with more then one phy +			 */ +		}  		phydev->sysfs_links = true;  	} @@ -1041,9 +1046,7 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,  	if (err)  		goto error; -	mutex_lock(&phydev->lock);  	phy_resume(phydev); -	mutex_unlock(&phydev->lock);  	phy_led_triggers_register(phydev);  	return err; @@ -1172,7 +1175,7 @@ int phy_suspend(struct phy_device *phydev)  }  EXPORT_SYMBOL(phy_suspend); -int phy_resume(struct phy_device *phydev) +int __phy_resume(struct phy_device *phydev)  {  	struct phy_driver *phydrv = to_phy_driver(phydev->mdio.dev.driver);  	int ret = 0; @@ -1189,6 +1192,18 @@ int phy_resume(struct phy_device *phydev)  	return ret;  } +EXPORT_SYMBOL(__phy_resume); + +int phy_resume(struct phy_device *phydev) +{ +	int ret; + +	mutex_lock(&phydev->lock); +	ret = __phy_resume(phydev); +	mutex_unlock(&phydev->lock); + +	return ret; +}  EXPORT_SYMBOL(phy_resume);  int phy_loopback(struct phy_device *phydev, bool enable) @@ -1382,7 +1397,7 @@ int genphy_setup_forced(struct phy_device *phydev)  		ctl |= BMCR_FULLDPLX;  	return phy_modify(phydev, MII_BMCR, -			  BMCR_LOOPBACK | BMCR_ISOLATE | BMCR_PDOWN, ctl); +			  ~(BMCR_LOOPBACK | BMCR_ISOLATE | BMCR_PDOWN), ctl);  }  EXPORT_SYMBOL(genphy_setup_forced); @@ -1658,6 +1673,23 @@ int genphy_config_init(struct phy_device *phydev)  }  EXPORT_SYMBOL(genphy_config_init); +/* This is used for the phy device which doesn't support the MMD extended + * register access, but it does have side effect when we are trying to access + * the MMD register via indirect method. + */ +int genphy_read_mmd_unsupported(struct phy_device *phdev, int devad, u16 regnum) +{ +	return -EOPNOTSUPP; +} +EXPORT_SYMBOL(genphy_read_mmd_unsupported); + +int genphy_write_mmd_unsupported(struct phy_device *phdev, int devnum, +				 u16 regnum, u16 val) +{ +	return -EOPNOTSUPP; +} +EXPORT_SYMBOL(genphy_write_mmd_unsupported); +  int genphy_suspend(struct phy_device *phydev)  {  	return phy_set_bits(phydev, MII_BMCR, BMCR_PDOWN); diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c index ee3ca4a2f12b..9f48ecf9c627 100644 --- a/drivers/net/phy/realtek.c +++ b/drivers/net/phy/realtek.c @@ -172,6 +172,8 @@ static struct phy_driver realtek_drvs[] = {  		.flags		= PHY_HAS_INTERRUPT,  		.ack_interrupt	= &rtl821x_ack_interrupt,  		.config_intr	= &rtl8211b_config_intr, +		.read_mmd	= &genphy_read_mmd_unsupported, +		.write_mmd	= &genphy_write_mmd_unsupported,  	}, {  		.phy_id		= 0x001cc914,  		.name		= "RTL8211DN Gigabit Ethernet", diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index 255a5def56e9..da1937832c99 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -257,7 +257,7 @@ struct ppp_net {  /* Prototypes. */  static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,  			struct file *file, unsigned int cmd, unsigned long arg); -static void ppp_xmit_process(struct ppp *ppp); +static void ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb);  static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb);  static void ppp_push(struct ppp *ppp);  static void ppp_channel_push(struct channel *pch); @@ -513,13 +513,12 @@ static ssize_t ppp_write(struct file *file, const char __user *buf,  		goto out;  	} -	skb_queue_tail(&pf->xq, skb); -  	switch (pf->kind) {  	case INTERFACE: -		ppp_xmit_process(PF_TO_PPP(pf)); +		ppp_xmit_process(PF_TO_PPP(pf), skb);  		break;  	case CHANNEL: +		skb_queue_tail(&pf->xq, skb);  		ppp_channel_push(PF_TO_CHANNEL(pf));  		break;  	} @@ -1267,8 +1266,8 @@ ppp_start_xmit(struct sk_buff *skb, struct net_device *dev)  	put_unaligned_be16(proto, pp);  	skb_scrub_packet(skb, !net_eq(ppp->ppp_net, dev_net(dev))); -	skb_queue_tail(&ppp->file.xq, skb); -	ppp_xmit_process(ppp); +	ppp_xmit_process(ppp, skb); +  	return NETDEV_TX_OK;   outf: @@ -1420,13 +1419,14 @@ static void ppp_setup(struct net_device *dev)   */  /* Called to do any work queued up on the transmit side that can now be done */ -static void __ppp_xmit_process(struct ppp *ppp) +static void __ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb)  { -	struct sk_buff *skb; -  	ppp_xmit_lock(ppp);  	if (!ppp->closing) {  		ppp_push(ppp); + +		if (skb) +			skb_queue_tail(&ppp->file.xq, skb);  		while (!ppp->xmit_pending &&  		       (skb = skb_dequeue(&ppp->file.xq)))  			ppp_send_frame(ppp, skb); @@ -1440,7 +1440,7 @@ static void __ppp_xmit_process(struct ppp *ppp)  	ppp_xmit_unlock(ppp);  } -static void ppp_xmit_process(struct ppp *ppp) +static void ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb)  {  	local_bh_disable(); @@ -1448,7 +1448,7 @@ static void ppp_xmit_process(struct ppp *ppp)  		goto err;  	(*this_cpu_ptr(ppp->xmit_recursion))++; -	__ppp_xmit_process(ppp); +	__ppp_xmit_process(ppp, skb);  	(*this_cpu_ptr(ppp->xmit_recursion))--;  	local_bh_enable(); @@ -1458,6 +1458,8 @@ static void ppp_xmit_process(struct ppp *ppp)  err:  	local_bh_enable(); +	kfree_skb(skb); +  	if (net_ratelimit())  		netdev_err(ppp->dev, "recursion detected\n");  } @@ -1942,7 +1944,7 @@ static void __ppp_channel_push(struct channel *pch)  	if (skb_queue_empty(&pch->file.xq)) {  		ppp = pch->ppp;  		if (ppp) -			__ppp_xmit_process(ppp); +			__ppp_xmit_process(ppp, NULL);  	}  } @@ -3161,6 +3163,15 @@ ppp_connect_channel(struct channel *pch, int unit)  		goto outl;  	ppp_lock(ppp); +	spin_lock_bh(&pch->downl); +	if (!pch->chan) { +		/* Don't connect unregistered channels */ +		spin_unlock_bh(&pch->downl); +		ppp_unlock(ppp); +		ret = -ENOTCONN; +		goto outl; +	} +	spin_unlock_bh(&pch->downl);  	if (pch->file.hdrlen > ppp->file.hdrlen)  		ppp->file.hdrlen = pch->file.hdrlen;  	hdrlen = pch->file.hdrlen + 2;	/* for protocol bytes */ diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index a468439969df..56c701b73c12 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -2395,7 +2395,7 @@ send_done:  	if (!nlh) {  		err = __send_and_alloc_skb(&skb, team, portid, send_func);  		if (err) -			goto errout; +			return err;  		goto send_done;  	} @@ -2681,7 +2681,7 @@ send_done:  	if (!nlh) {  		err = __send_and_alloc_skb(&skb, team, portid, send_func);  		if (err) -			goto errout; +			return err;  		goto send_done;  	} diff --git a/drivers/net/thunderbolt.c b/drivers/net/thunderbolt.c index ca5e375de27c..e0d6760f3219 100644 --- a/drivers/net/thunderbolt.c +++ b/drivers/net/thunderbolt.c @@ -166,6 +166,8 @@ struct tbnet_ring {   * @connected_work: Worker that finalizes the ThunderboltIP connection   *		    setup and enables DMA paths for high speed data   *		    transfers + * @disconnect_work: Worker that handles tearing down the ThunderboltIP + *		     connection   * @rx_hdr: Copy of the currently processed Rx frame. Used when a   *	    network packet consists of multiple Thunderbolt frames.   *	    In host byte order. @@ -190,6 +192,7 @@ struct tbnet {  	int login_retries;  	struct delayed_work login_work;  	struct work_struct connected_work; +	struct work_struct disconnect_work;  	struct thunderbolt_ip_frame_header rx_hdr;  	struct tbnet_ring rx_ring;  	atomic_t frame_id; @@ -445,7 +448,7 @@ static int tbnet_handle_packet(const void *buf, size_t size, void *data)  	case TBIP_LOGOUT:  		ret = tbnet_logout_response(net, route, sequence, command_id);  		if (!ret) -			tbnet_tear_down(net, false); +			queue_work(system_long_wq, &net->disconnect_work);  		break;  	default: @@ -659,6 +662,13 @@ static void tbnet_login_work(struct work_struct *work)  	}  } +static void tbnet_disconnect_work(struct work_struct *work) +{ +	struct tbnet *net = container_of(work, typeof(*net), disconnect_work); + +	tbnet_tear_down(net, false); +} +  static bool tbnet_check_frame(struct tbnet *net, const struct tbnet_frame *tf,  			      const struct thunderbolt_ip_frame_header *hdr)  { @@ -881,6 +891,7 @@ static int tbnet_stop(struct net_device *dev)  	napi_disable(&net->napi); +	cancel_work_sync(&net->disconnect_work);  	tbnet_tear_down(net, true);  	tb_ring_free(net->rx_ring.ring); @@ -1195,6 +1206,7 @@ static int tbnet_probe(struct tb_service *svc, const struct tb_service_id *id)  	net = netdev_priv(dev);  	INIT_DELAYED_WORK(&net->login_work, tbnet_login_work);  	INIT_WORK(&net->connected_work, tbnet_connected_work); +	INIT_WORK(&net->disconnect_work, tbnet_disconnect_work);  	mutex_init(&net->connection_lock);  	atomic_set(&net->command_id, 0);  	atomic_set(&net->frame_id, 0); @@ -1270,10 +1282,7 @@ static int __maybe_unused tbnet_suspend(struct device *dev)  	stop_login(net);  	if (netif_running(net->dev)) {  		netif_device_detach(net->dev); -		tb_ring_stop(net->rx_ring.ring); -		tb_ring_stop(net->tx_ring.ring); -		tbnet_free_buffers(&net->rx_ring); -		tbnet_free_buffers(&net->tx_ring); +		tbnet_tear_down(net, true);  	}  	return 0; diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 81e6cc951e7f..28cfa642e39a 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -181,7 +181,6 @@ struct tun_file {  	struct tun_struct *detached;  	struct ptr_ring tx_ring;  	struct xdp_rxq_info xdp_rxq; -	int xdp_pending_pkts;  };  struct tun_flow_entry { @@ -656,7 +655,7 @@ static struct tun_struct *tun_enable_queue(struct tun_file *tfile)  	return tun;  } -static void tun_ptr_free(void *ptr) +void tun_ptr_free(void *ptr)  {  	if (!ptr)  		return; @@ -668,6 +667,7 @@ static void tun_ptr_free(void *ptr)  		__skb_array_destroy_skb(ptr);  	}  } +EXPORT_SYMBOL_GPL(tun_ptr_free);  static void tun_queue_purge(struct tun_file *tfile)  { @@ -1489,27 +1489,23 @@ static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile,  	skb->truesize += skb->data_len;  	for (i = 1; i < it->nr_segs; i++) { +		struct page_frag *pfrag = ¤t->task_frag;  		size_t fragsz = it->iov[i].iov_len; -		unsigned long offset; -		struct page *page; -		void *data;  		if (fragsz == 0 || fragsz > PAGE_SIZE) {  			err = -EINVAL;  			goto free;  		} -		local_bh_disable(); -		data = napi_alloc_frag(fragsz); -		local_bh_enable(); -		if (!data) { +		if (!skb_page_frag_refill(fragsz, pfrag, GFP_KERNEL)) {  			err = -ENOMEM;  			goto free;  		} -		page = virt_to_head_page(data); -		offset = data - page_address(page); -		skb_fill_page_desc(skb, i - 1, page, offset, fragsz); +		skb_fill_page_desc(skb, i - 1, pfrag->page, +				   pfrag->offset, fragsz); +		page_ref_inc(pfrag->page); +		pfrag->offset += fragsz;  	}  	return skb; @@ -1647,6 +1643,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,  	else  		*skb_xdp = 0; +	preempt_disable();  	rcu_read_lock();  	xdp_prog = rcu_dereference(tun->xdp_prog);  	if (xdp_prog && !*skb_xdp) { @@ -1666,11 +1663,12 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,  		case XDP_REDIRECT:  			get_page(alloc_frag->page);  			alloc_frag->offset += buflen; -			++tfile->xdp_pending_pkts;  			err = xdp_do_redirect(tun->dev, &xdp, xdp_prog); +			xdp_do_flush_map();  			if (err)  				goto err_redirect;  			rcu_read_unlock(); +			preempt_enable();  			return NULL;  		case XDP_TX:  			xdp_xmit = true; @@ -1692,6 +1690,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,  	skb = build_skb(buf, buflen);  	if (!skb) {  		rcu_read_unlock(); +		preempt_enable();  		return ERR_PTR(-ENOMEM);  	} @@ -1704,10 +1703,12 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,  		skb->dev = tun->dev;  		generic_xdp_tx(skb, xdp_prog);  		rcu_read_unlock(); +		preempt_enable();  		return NULL;  	}  	rcu_read_unlock(); +	preempt_enable();  	return skb; @@ -1715,6 +1716,7 @@ err_redirect:  	put_page(alloc_frag->page);  err_xdp:  	rcu_read_unlock(); +	preempt_enable();  	this_cpu_inc(tun->pcpu_stats->rx_dropped);  	return NULL;  } @@ -1988,11 +1990,6 @@ static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from)  	result = tun_get_user(tun, tfile, NULL, from,  			      file->f_flags & O_NONBLOCK, false); -	if (tfile->xdp_pending_pkts) { -		tfile->xdp_pending_pkts = 0; -		xdp_do_flush_map(); -	} -  	tun_put(tun);  	return result;  } @@ -2329,13 +2326,6 @@ static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)  	ret = tun_get_user(tun, tfile, m->msg_control, &m->msg_iter,  			   m->msg_flags & MSG_DONTWAIT,  			   m->msg_flags & MSG_MORE); - -	if (tfile->xdp_pending_pkts >= NAPI_POLL_WEIGHT || -	    !(m->msg_flags & MSG_MORE)) { -		tfile->xdp_pending_pkts = 0; -		xdp_do_flush_map(); -	} -  	tun_put(tun);  	return ret;  } @@ -3167,7 +3157,6 @@ static int tun_chr_open(struct inode *inode, struct file * file)  	sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);  	memset(&tfile->tx_ring, 0, sizeof(tfile->tx_ring)); -	tfile->xdp_pending_pkts = 0;  	return 0;  } diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index 05dca3e5c93d..fff4b13eece2 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -896,6 +896,12 @@ static const struct usb_device_id	products[] = {  				      USB_CDC_PROTO_NONE),  	.driver_info = (unsigned long)&wwan_info,  }, { +	/* Cinterion PLS8 modem by GEMALTO */ +	USB_DEVICE_AND_INTERFACE_INFO(0x1e2d, 0x0061, USB_CLASS_COMM, +				      USB_CDC_SUBCLASS_ETHERNET, +				      USB_CDC_PROTO_NONE), +	.driver_info = (unsigned long)&wwan_info, +}, {  	USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET,  			USB_CDC_PROTO_NONE),  	.driver_info = (unsigned long) &cdc_info, diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 958b2e8b90f6..86f7196f9d91 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -1794,7 +1794,7 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg)  		tx_data += len;  		agg->skb_len += len; -		agg->skb_num++; +		agg->skb_num += skb_shinfo(skb)->gso_segs ?: 1;  		dev_kfree_skb_any(skb); diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c index d0a113743195..7a6a1fe79309 100644 --- a/drivers/net/usb/smsc75xx.c +++ b/drivers/net/usb/smsc75xx.c @@ -954,10 +954,11 @@ static int smsc75xx_set_features(struct net_device *netdev,  	/* it's racing here! */  	ret = smsc75xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl); -	if (ret < 0) +	if (ret < 0) {  		netdev_warn(dev->net, "Error writing RFE_CTL\n"); - -	return ret; +		return ret; +	} +	return 0;  }  static int smsc75xx_wait_ready(struct usbnet *dev, int in_pm) diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 8a22ff67b026..d9eea8cfe6cb 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -315,6 +315,7 @@ static void __usbnet_status_stop_force(struct usbnet *dev)  void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb)  {  	struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->stats64); +	unsigned long flags;  	int	status;  	if (test_bit(EVENT_RX_PAUSED, &dev->flags)) { @@ -326,10 +327,10 @@ void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb)  	if (skb->protocol == 0)  		skb->protocol = eth_type_trans (skb, dev->net); -	u64_stats_update_begin(&stats64->syncp); +	flags = u64_stats_update_begin_irqsave(&stats64->syncp);  	stats64->rx_packets++;  	stats64->rx_bytes += skb->len; -	u64_stats_update_end(&stats64->syncp); +	u64_stats_update_end_irqrestore(&stats64->syncp, flags);  	netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",  		  skb->len + sizeof (struct ethhdr), skb->protocol); @@ -1248,11 +1249,12 @@ static void tx_complete (struct urb *urb)  	if (urb->status == 0) {  		struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->stats64); +		unsigned long flags; -		u64_stats_update_begin(&stats64->syncp); +		flags = u64_stats_update_begin_irqsave(&stats64->syncp);  		stats64->tx_packets += entry->packets;  		stats64->tx_bytes += entry->length; -		u64_stats_update_end(&stats64->syncp); +		u64_stats_update_end_irqrestore(&stats64->syncp, flags);  	} else {  		dev->net->stats.tx_errors++; diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 626c27352ae2..23374603e4d9 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -443,12 +443,8 @@ static bool __virtnet_xdp_xmit(struct virtnet_info *vi,  	sg_init_one(sq->sg, xdp->data, xdp->data_end - xdp->data);  	err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp->data, GFP_ATOMIC); -	if (unlikely(err)) { -		struct page *page = virt_to_head_page(xdp->data); - -		put_page(page); -		return false; -	} +	if (unlikely(err)) +		return false; /* Caller handle free/refcnt */  	return true;  } @@ -456,8 +452,18 @@ static bool __virtnet_xdp_xmit(struct virtnet_info *vi,  static int virtnet_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp)  {  	struct virtnet_info *vi = netdev_priv(dev); -	bool sent = __virtnet_xdp_xmit(vi, xdp); +	struct receive_queue *rq = vi->rq; +	struct bpf_prog *xdp_prog; +	bool sent; + +	/* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this +	 * indicate XDP resources have been successfully allocated. +	 */ +	xdp_prog = rcu_dereference(rq->xdp_prog); +	if (!xdp_prog) +		return -ENXIO; +	sent = __virtnet_xdp_xmit(vi, xdp);  	if (!sent)  		return -ENOSPC;  	return 0; @@ -498,6 +504,7 @@ static struct page *xdp_linearize_page(struct receive_queue *rq,  	page_off += *len;  	while (--*num_buf) { +		int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));  		unsigned int buflen;  		void *buf;  		int off; @@ -512,7 +519,7 @@ static struct page *xdp_linearize_page(struct receive_queue *rq,  		/* guard against a misconfigured or uncooperative backend that  		 * is sending packet larger than the MTU.  		 */ -		if ((page_off + buflen) > PAGE_SIZE) { +		if ((page_off + buflen + tailroom) > PAGE_SIZE) {  			put_page(p);  			goto err_buf;  		} @@ -546,8 +553,11 @@ static struct sk_buff *receive_small(struct net_device *dev,  	unsigned int buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +  			      SKB_DATA_ALIGN(sizeof(struct skb_shared_info));  	struct page *page = virt_to_head_page(buf); -	unsigned int delta = 0, err; +	unsigned int delta = 0;  	struct page *xdp_page; +	bool sent; +	int err; +  	len -= vi->hdr_len;  	rcu_read_lock(); @@ -558,7 +568,7 @@ static struct sk_buff *receive_small(struct net_device *dev,  		void *orig_data;  		u32 act; -		if (unlikely(hdr->hdr.gso_type || hdr->hdr.flags)) +		if (unlikely(hdr->hdr.gso_type))  			goto err_xdp;  		if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) { @@ -596,16 +606,19 @@ static struct sk_buff *receive_small(struct net_device *dev,  			delta = orig_data - xdp.data;  			break;  		case XDP_TX: -			if (unlikely(!__virtnet_xdp_xmit(vi, &xdp))) +			sent = __virtnet_xdp_xmit(vi, &xdp); +			if (unlikely(!sent)) {  				trace_xdp_exception(vi->dev, xdp_prog, act); -			else -				*xdp_xmit = true; +				goto err_xdp; +			} +			*xdp_xmit = true;  			rcu_read_unlock();  			goto xdp_xmit;  		case XDP_REDIRECT:  			err = xdp_do_redirect(dev, &xdp, xdp_prog); -			if (!err) -				*xdp_xmit = true; +			if (err) +				goto err_xdp; +			*xdp_xmit = true;  			rcu_read_unlock();  			goto xdp_xmit;  		default: @@ -677,6 +690,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,  	struct bpf_prog *xdp_prog;  	unsigned int truesize;  	unsigned int headroom = mergeable_ctx_to_headroom(ctx); +	bool sent;  	int err;  	head_skb = NULL; @@ -689,7 +703,12 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,  		void *data;  		u32 act; -		/* This happens when rx buffer size is underestimated */ +		/* This happens when rx buffer size is underestimated +		 * or headroom is not enough because of the buffer +		 * was refilled before XDP is set. This should only +		 * happen for the first several packets, so we don't +		 * care much about its performance. +		 */  		if (unlikely(num_buf > 1 ||  			     headroom < virtnet_get_headroom(vi))) {  			/* linearize data for XDP */ @@ -724,9 +743,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,  		act = bpf_prog_run_xdp(xdp_prog, &xdp); -		if (act != XDP_PASS) -			ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len); -  		switch (act) {  		case XDP_PASS:  			/* recalculate offset to account for any header @@ -746,18 +762,28 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,  			}  			break;  		case XDP_TX: -			if (unlikely(!__virtnet_xdp_xmit(vi, &xdp))) +			sent = __virtnet_xdp_xmit(vi, &xdp); +			if (unlikely(!sent)) {  				trace_xdp_exception(vi->dev, xdp_prog, act); -			else -				*xdp_xmit = true; +				if (unlikely(xdp_page != page)) +					put_page(xdp_page); +				goto err_xdp; +			} +			*xdp_xmit = true;  			if (unlikely(xdp_page != page))  				goto err_xdp;  			rcu_read_unlock();  			goto xdp_xmit;  		case XDP_REDIRECT:  			err = xdp_do_redirect(dev, &xdp, xdp_prog); -			if (!err) -				*xdp_xmit = true; +			if (err) { +				if (unlikely(xdp_page != page)) +					put_page(xdp_page); +				goto err_xdp; +			} +			*xdp_xmit = true; +			if (unlikely(xdp_page != page)) +				goto err_xdp;  			rcu_read_unlock();  			goto xdp_xmit;  		default: @@ -1003,13 +1029,18 @@ static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq,  }  static unsigned int get_mergeable_buf_len(struct receive_queue *rq, -					  struct ewma_pkt_len *avg_pkt_len) +					  struct ewma_pkt_len *avg_pkt_len, +					  unsigned int room)  {  	const size_t hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);  	unsigned int len; -	len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len), +	if (room) +		return PAGE_SIZE - room; + +	len = hdr_len +	clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len),  				rq->min_buf_len, PAGE_SIZE - hdr_len); +  	return ALIGN(len, L1_CACHE_BYTES);  } @@ -1018,21 +1049,27 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi,  {  	struct page_frag *alloc_frag = &rq->alloc_frag;  	unsigned int headroom = virtnet_get_headroom(vi); +	unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0; +	unsigned int room = SKB_DATA_ALIGN(headroom + tailroom);  	char *buf;  	void *ctx;  	int err;  	unsigned int len, hole; -	len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len); -	if (unlikely(!skb_page_frag_refill(len + headroom, alloc_frag, gfp))) +	/* Extra tailroom is needed to satisfy XDP's assumption. This +	 * means rx frags coalescing won't work, but consider we've +	 * disabled GSO for XDP, it won't be a big issue. +	 */ +	len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len, room); +	if (unlikely(!skb_page_frag_refill(len + room, alloc_frag, gfp)))  		return -ENOMEM;  	buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;  	buf += headroom; /* advance address leaving hole at front of pkt */  	get_page(alloc_frag->page); -	alloc_frag->offset += len + headroom; +	alloc_frag->offset += len + room;  	hole = alloc_frag->size - alloc_frag->offset; -	if (hole < len + headroom) { +	if (hole < len + room) {  		/* To avoid internal fragmentation, if there is very likely not  		 * enough space for another buffer, add the remaining space to  		 * the current buffer. @@ -2175,8 +2212,9 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,  	}  	/* Make sure NAPI is not using any XDP TX queues for RX. */ -	for (i = 0; i < vi->max_queue_pairs; i++) -		napi_disable(&vi->rq[i].napi); +	if (netif_running(dev)) +		for (i = 0; i < vi->max_queue_pairs; i++) +			napi_disable(&vi->rq[i].napi);  	netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);  	err = _virtnet_set_queues(vi, curr_qp + xdp_qp); @@ -2195,7 +2233,8 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,  		}  		if (old_prog)  			bpf_prog_put(old_prog); -		virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); +		if (netif_running(dev)) +			virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);  	}  	return 0; @@ -2566,12 +2605,15 @@ static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue,  {  	struct virtnet_info *vi = netdev_priv(queue->dev);  	unsigned int queue_index = get_netdev_rx_queue_index(queue); +	unsigned int headroom = virtnet_get_headroom(vi); +	unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;  	struct ewma_pkt_len *avg;  	BUG_ON(queue_index >= vi->max_queue_pairs);  	avg = &vi->rq[queue_index].mrg_avg_pkt_len;  	return sprintf(buf, "%u\n", -		       get_mergeable_buf_len(&vi->rq[queue_index], avg)); +		       get_mergeable_buf_len(&vi->rq[queue_index], avg, +				       SKB_DATA_ALIGN(headroom + tailroom)));  }  static struct rx_queue_attribute mergeable_rx_buffer_size_attribute = diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 8b39c160743d..e04937f44f33 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -977,6 +977,8 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,  {  	int ret;  	u32 count; +	int num_pkts; +	int tx_num_deferred;  	unsigned long flags;  	struct vmxnet3_tx_ctx ctx;  	union Vmxnet3_GenericDesc *gdesc; @@ -1075,12 +1077,12 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,  #else  	gdesc = ctx.sop_txd;  #endif +	tx_num_deferred = le32_to_cpu(tq->shared->txNumDeferred);  	if (ctx.mss) {  		gdesc->txd.hlen = ctx.eth_ip_hdr_size + ctx.l4_hdr_size;  		gdesc->txd.om = VMXNET3_OM_TSO;  		gdesc->txd.msscof = ctx.mss; -		le32_add_cpu(&tq->shared->txNumDeferred, (skb->len - -			     gdesc->txd.hlen + ctx.mss - 1) / ctx.mss); +		num_pkts = (skb->len - gdesc->txd.hlen + ctx.mss - 1) / ctx.mss;  	} else {  		if (skb->ip_summed == CHECKSUM_PARTIAL) {  			gdesc->txd.hlen = ctx.eth_ip_hdr_size; @@ -1091,8 +1093,10 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,  			gdesc->txd.om = 0;  			gdesc->txd.msscof = 0;  		} -		le32_add_cpu(&tq->shared->txNumDeferred, 1); +		num_pkts = 1;  	} +	le32_add_cpu(&tq->shared->txNumDeferred, num_pkts); +	tx_num_deferred += num_pkts;  	if (skb_vlan_tag_present(skb)) {  		gdesc->txd.ti = 1; @@ -1118,8 +1122,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,  	spin_unlock_irqrestore(&tq->tx_lock, flags); -	if (le32_to_cpu(tq->shared->txNumDeferred) >= -					le32_to_cpu(tq->shared->txThreshold)) { +	if (tx_num_deferred >= le32_to_cpu(tq->shared->txThreshold)) {  		tq->shared->txNumDeferred = 0;  		VMXNET3_WRITE_BAR0_REG(adapter,  				       VMXNET3_REG_TXPROD + tq->qid * 8, @@ -1470,7 +1473,8 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,  			vmxnet3_rx_csum(adapter, skb,  					(union Vmxnet3_GenericDesc *)rcd);  			skb->protocol = eth_type_trans(skb, adapter->netdev); -			if (!rcd->tcp || !adapter->lro) +			if (!rcd->tcp || +			    !(adapter->netdev->features & NETIF_F_LRO))  				goto not_lro;  			if (segCnt != 0 && mss != 0) { diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h index 5ba222920e80..59ec34052a65 100644 --- a/drivers/net/vmxnet3/vmxnet3_int.h +++ b/drivers/net/vmxnet3/vmxnet3_int.h @@ -69,10 +69,10 @@  /*   * Version numbers   */ -#define VMXNET3_DRIVER_VERSION_STRING   "1.4.11.0-k" +#define VMXNET3_DRIVER_VERSION_STRING   "1.4.13.0-k"  /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ -#define VMXNET3_DRIVER_VERSION_NUM      0x01040b00 +#define VMXNET3_DRIVER_VERSION_NUM      0x01040d00  #if defined(CONFIG_PCI_MSI)  	/* RSS only makes sense if MSI-X is supported. */ @@ -342,9 +342,6 @@ struct vmxnet3_adapter {  	u8			__iomem *hw_addr1; /* for BAR 1 */  	u8                              version; -	bool				rxcsum; -	bool				lro; -  #ifdef VMXNET3_RSS  	struct UPT1_RSSConf		*rss_conf;  	bool				rss; diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c index afeca6bcdade..ab8b3cbbb205 100644 --- a/drivers/net/wan/hdlc_ppp.c +++ b/drivers/net/wan/hdlc_ppp.c @@ -574,7 +574,10 @@ static void ppp_timer(struct timer_list *t)  			ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0,  				     0, NULL);  			proto->restart_counter--; -		} else +		} else if (netif_carrier_ok(proto->dev)) +			ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0, +				     0, NULL); +		else  			ppp_cp_event(proto->dev, proto->pid, TO_BAD, 0, 0,  				     0, NULL);  		break; diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c index e89e5ef2c2a4..f246e9ed4a81 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c @@ -729,6 +729,7 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,  	ieee80211_hw_set(hw, SPECTRUM_MGMT);  	ieee80211_hw_set(hw, SIGNAL_DBM);  	ieee80211_hw_set(hw, AMPDU_AGGREGATION); +	ieee80211_hw_set(hw, DOESNT_SUPPORT_QOS_NDP);  	if (ath9k_ps_enable)  		ieee80211_hw_set(hw, SUPPORTS_PS); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h index df8a1ecb9924..232dcbb83311 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h @@ -181,6 +181,7 @@ enum brcmf_netif_stop_reason {   * @netif_stop_lock: spinlock for update netif_stop from multiple sources.   * @pend_8021x_cnt: tracks outstanding number of 802.1x frames.   * @pend_8021x_wait: used for signalling change in count. + * @fwil_fwerr: flag indicating fwil layer should return firmware error codes.   */  struct brcmf_if {  	struct brcmf_pub *drvr; @@ -198,6 +199,7 @@ struct brcmf_if {  	wait_queue_head_t pend_8021x_wait;  	struct in6_addr ipv6_addr_tbl[NDOL_MAX_ENTRIES];  	u8 ipv6addr_idx; +	bool fwil_fwerr;  };  int brcmf_netdev_wait_pend8021x(struct brcmf_if *ifp); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c index 47de35a33853..bede7b7fd996 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c @@ -104,6 +104,9 @@ static void brcmf_feat_iovar_int_get(struct brcmf_if *ifp,  	u32 data;  	int err; +	/* we need to know firmware error */ +	ifp->fwil_fwerr = true; +  	err = brcmf_fil_iovar_int_get(ifp, name, &data);  	if (err == 0) {  		brcmf_dbg(INFO, "enabling feature: %s\n", brcmf_feat_names[id]); @@ -112,6 +115,8 @@ static void brcmf_feat_iovar_int_get(struct brcmf_if *ifp,  		brcmf_dbg(TRACE, "%s feature check failed: %d\n",  			  brcmf_feat_names[id], err);  	} + +	ifp->fwil_fwerr = false;  }  static void brcmf_feat_iovar_data_set(struct brcmf_if *ifp, @@ -120,6 +125,9 @@ static void brcmf_feat_iovar_data_set(struct brcmf_if *ifp,  {  	int err; +	/* we need to know firmware error */ +	ifp->fwil_fwerr = true; +  	err = brcmf_fil_iovar_data_set(ifp, name, data, len);  	if (err != -BRCMF_FW_UNSUPPORTED) {  		brcmf_dbg(INFO, "enabling feature: %s\n", brcmf_feat_names[id]); @@ -128,6 +136,8 @@ static void brcmf_feat_iovar_data_set(struct brcmf_if *ifp,  		brcmf_dbg(TRACE, "%s feature check failed: %d\n",  			  brcmf_feat_names[id], err);  	} + +	ifp->fwil_fwerr = false;  }  #define MAX_CAPS_BUFFER_SIZE	512 diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c index f2cfdd3b2bf1..fc5751116d99 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c @@ -131,6 +131,9 @@ brcmf_fil_cmd_data(struct brcmf_if *ifp, u32 cmd, void *data, u32 len, bool set)  			  brcmf_fil_get_errstr((u32)(-fwerr)), fwerr);  		err = -EBADE;  	} +	if (ifp->fwil_fwerr) +		return fwerr; +  	return err;  } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c index 2ee54133efa1..82064e909784 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c @@ -462,25 +462,23 @@ static int brcmf_p2p_set_firmware(struct brcmf_if *ifp, u8 *p2p_mac)   * @dev_addr: optional device address.   *   * P2P needs mac addresses for P2P device and interface. If no device - * address it specified, these are derived from the primary net device, ie. - * the permanent ethernet address of the device. + * address it specified, these are derived from a random ethernet + * address.   */  static void brcmf_p2p_generate_bss_mac(struct brcmf_p2p_info *p2p, u8 *dev_addr)  { -	struct brcmf_if *pri_ifp = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp; -	bool local_admin = false; +	bool random_addr = false; -	if (!dev_addr || is_zero_ether_addr(dev_addr)) { -		dev_addr = pri_ifp->mac_addr; -		local_admin = true; -	} +	if (!dev_addr || is_zero_ether_addr(dev_addr)) +		random_addr = true; -	/* Generate the P2P Device Address.  This consists of the device's -	 * primary MAC address with the locally administered bit set. +	/* Generate the P2P Device Address obtaining a random ethernet +	 * address with the locally administered bit set.  	 */ -	memcpy(p2p->dev_addr, dev_addr, ETH_ALEN); -	if (local_admin) -		p2p->dev_addr[0] |= 0x02; +	if (random_addr) +		eth_random_addr(p2p->dev_addr); +	else +		memcpy(p2p->dev_addr, dev_addr, ETH_ALEN);  	/* Generate the P2P Interface Address.  If the discovery and connection  	 * BSSCFGs need to simultaneously co-exist, then this address must be diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig index c5f2ddf9b0fe..e5a2fc738ac3 100644 --- a/drivers/net/wireless/intel/iwlwifi/Kconfig +++ b/drivers/net/wireless/intel/iwlwifi/Kconfig @@ -91,7 +91,6 @@ config IWLWIFI_BCAST_FILTERING  config IWLWIFI_PCIE_RTPM         bool "Enable runtime power management mode for PCIe devices"         depends on IWLMVM && PM && EXPERT -       default false         help           Say Y here to enable runtime power management for PCIe           devices.  If enabled, the device will go into low power mode diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h index 3721a3ed358b..f824bebceb06 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h @@ -211,7 +211,7 @@ enum {   * @TE_V2_NOTIF_HOST_FRAG_END:request/receive notification on frag end   * @TE_V2_NOTIF_INTERNAL_FRAG_START: internal FW use.   * @TE_V2_NOTIF_INTERNAL_FRAG_END: internal FW use. - * @T2_V2_START_IMMEDIATELY: start time event immediately + * @TE_V2_START_IMMEDIATELY: start time event immediately   * @TE_V2_DEP_OTHER: depends on another time event   * @TE_V2_DEP_TSF: depends on a specific time   * @TE_V2_EVENT_SOCIOPATHIC: can't co-exist with other events of tha same MAC @@ -230,7 +230,7 @@ enum iwl_time_event_policy {  	TE_V2_NOTIF_HOST_FRAG_END = BIT(5),  	TE_V2_NOTIF_INTERNAL_FRAG_START = BIT(6),  	TE_V2_NOTIF_INTERNAL_FRAG_END = BIT(7), -	T2_V2_START_IMMEDIATELY = BIT(11), +	TE_V2_START_IMMEDIATELY = BIT(11),  	/* placement characteristics */  	TE_V2_DEP_OTHER = BIT(TE_V2_PLACEMENT_POS), diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c index 67aefc8fc9ac..7bd704a3e640 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c @@ -8,6 +8,7 @@   * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.   * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH   * Copyright(c) 2015 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018        Intel Corporation   *   * This program is free software; you can redistribute it and/or modify   * it under the terms of version 2 of the GNU General Public License as @@ -33,6 +34,7 @@   * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.   * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH   * Copyright(c) 2015 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018        Intel Corporation   * All rights reserved.   *   * Redistribution and use in source and binary forms, with or without @@ -942,7 +944,6 @@ dump_trans_data:  out:  	iwl_fw_free_dump_desc(fwrt); -	fwrt->dump.trig = NULL;  	clear_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status);  	IWL_DEBUG_INFO(fwrt, "WRT dump done\n");  } @@ -1112,6 +1113,14 @@ void iwl_fw_error_dump_wk(struct work_struct *work)  	    fwrt->ops->dump_start(fwrt->ops_ctx))  		return; +	if (fwrt->ops && fwrt->ops->fw_running && +	    !fwrt->ops->fw_running(fwrt->ops_ctx)) { +		IWL_ERR(fwrt, "Firmware not running - cannot dump error\n"); +		iwl_fw_free_dump_desc(fwrt); +		clear_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status); +		goto out; +	} +  	if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {  		/* stop recording */  		iwl_fw_dbg_stop_recording(fwrt); @@ -1145,7 +1154,7 @@ void iwl_fw_error_dump_wk(struct work_struct *work)  			iwl_write_prph(fwrt->trans, DBGC_OUT_CTRL, out_ctrl);  		}  	} - +out:  	if (fwrt->ops && fwrt->ops->dump_end)  		fwrt->ops->dump_end(fwrt->ops_ctx);  } diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h index 223fb77a3aa9..72259bff9922 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h @@ -8,6 +8,7 @@   * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.   * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH   * Copyright(c) 2015 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018        Intel Corporation   *   * This program is free software; you can redistribute it and/or modify   * it under the terms of version 2 of the GNU General Public License as @@ -33,6 +34,7 @@   * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.   * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH   * Copyright(c) 2015 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018        Intel Corporation   * All rights reserved.   *   * Redistribution and use in source and binary forms, with or without @@ -91,6 +93,7 @@ static inline void iwl_fw_free_dump_desc(struct iwl_fw_runtime *fwrt)  	if (fwrt->dump.desc != &iwl_dump_desc_assert)  		kfree(fwrt->dump.desc);  	fwrt->dump.desc = NULL; +	fwrt->dump.trig = NULL;  }  void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h index e57ff92a68ae..3da468d2cc92 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h @@ -75,6 +75,20 @@ static inline void iwl_fw_cancel_timestamp(struct iwl_fw_runtime *fwrt)  	cancel_delayed_work_sync(&fwrt->timestamp.wk);  } +static inline void iwl_fw_suspend_timestamp(struct iwl_fw_runtime *fwrt) +{ +	cancel_delayed_work_sync(&fwrt->timestamp.wk); +} + +static inline void iwl_fw_resume_timestamp(struct iwl_fw_runtime *fwrt) +{ +	if (!fwrt->timestamp.delay) +		return; + +	schedule_delayed_work(&fwrt->timestamp.wk, +			      round_jiffies_relative(fwrt->timestamp.delay)); +} +  #else  static inline int iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt,  					  struct dentry *dbgfs_dir) @@ -84,4 +98,8 @@ static inline int iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt,  static inline void iwl_fw_cancel_timestamp(struct iwl_fw_runtime *fwrt) {} +static inline void iwl_fw_suspend_timestamp(struct iwl_fw_runtime *fwrt) {} + +static inline void iwl_fw_resume_timestamp(struct iwl_fw_runtime *fwrt) {} +  #endif /* CONFIG_IWLWIFI_DEBUGFS */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/init.c b/drivers/net/wireless/intel/iwlwifi/fw/init.c index c39fe84bb4c4..2efac307909e 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/init.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/init.c @@ -77,8 +77,14 @@ void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans,  }  IWL_EXPORT_SYMBOL(iwl_fw_runtime_init); -void iwl_fw_runtime_exit(struct iwl_fw_runtime *fwrt) +void iwl_fw_runtime_suspend(struct iwl_fw_runtime *fwrt)  { -	iwl_fw_cancel_timestamp(fwrt); +	iwl_fw_suspend_timestamp(fwrt);  } -IWL_EXPORT_SYMBOL(iwl_fw_runtime_exit); +IWL_EXPORT_SYMBOL(iwl_fw_runtime_suspend); + +void iwl_fw_runtime_resume(struct iwl_fw_runtime *fwrt) +{ +	iwl_fw_resume_timestamp(fwrt); +} +IWL_EXPORT_SYMBOL(iwl_fw_runtime_resume); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h index e25c049f980f..3fb940ebd74a 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h @@ -6,6 +6,7 @@   * GPL LICENSE SUMMARY   *   * Copyright(c) 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation   *   * This program is free software; you can redistribute it and/or modify   * it under the terms of version 2 of the GNU General Public License as @@ -26,6 +27,7 @@   * BSD LICENSE   *   * Copyright(c) 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation   * All rights reserved.   *   * Redistribution and use in source and binary forms, with or without @@ -68,6 +70,7 @@  struct iwl_fw_runtime_ops {  	int (*dump_start)(void *ctx);  	void (*dump_end)(void *ctx); +	bool (*fw_running)(void *ctx);  };  #define MAX_NUM_LMAC 2 @@ -150,6 +153,10 @@ void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans,  void iwl_fw_runtime_exit(struct iwl_fw_runtime *fwrt); +void iwl_fw_runtime_suspend(struct iwl_fw_runtime *fwrt); + +void iwl_fw_runtime_resume(struct iwl_fw_runtime *fwrt); +  static inline void iwl_fw_set_current_image(struct iwl_fw_runtime *fwrt,  					    enum iwl_ucode_type cur_fw_img)  { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index 0e6cf39285f4..2efe9b099556 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -1098,6 +1098,8 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)  	/* make sure the d0i3 exit work is not pending */  	flush_work(&mvm->d0i3_exit_work); +	iwl_fw_runtime_suspend(&mvm->fwrt); +  	ret = iwl_trans_suspend(trans);  	if (ret)  		return ret; @@ -2012,6 +2014,8 @@ int iwl_mvm_resume(struct ieee80211_hw *hw)  	mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED; +	iwl_fw_runtime_resume(&mvm->fwrt); +  	return ret;  } @@ -2038,6 +2042,8 @@ static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file)  	mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_D3; +	iwl_fw_runtime_suspend(&mvm->fwrt); +  	/* start pseudo D3 */  	rtnl_lock();  	err = __iwl_mvm_suspend(mvm->hw, mvm->hw->wiphy->wowlan_config, true); @@ -2098,6 +2104,8 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)  	__iwl_mvm_resume(mvm, true);  	rtnl_unlock(); +	iwl_fw_runtime_resume(&mvm->fwrt); +  	mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;  	iwl_abort_notification_waits(&mvm->notif_wait); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index a7892c1254a2..9c436d8d001d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -8,6 +8,7 @@   * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.   * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH   * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018        Intel Corporation   *   * This program is free software; you can redistribute it and/or modify   * it under the terms of version 2 of the GNU General Public License as @@ -35,6 +36,7 @@   * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.   * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH   * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018        Intel Corporation   * All rights reserved.   *   * Redistribution and use in source and binary forms, with or without @@ -1281,9 +1283,6 @@ static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm,  {  	int ret; -	if (!iwl_mvm_firmware_running(mvm)) -		return -EIO; -  	ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PRPH_WRITE);  	if (ret)  		return ret; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index 2f22e14e00fe..8ba16fc24e3a 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -438,7 +438,8 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)  	}  	/* Allocate the CAB queue for softAP and GO interfaces */ -	if (vif->type == NL80211_IFTYPE_AP) { +	if (vif->type == NL80211_IFTYPE_AP || +	    vif->type == NL80211_IFTYPE_ADHOC) {  		/*  		 * For TVQM this will be overwritten later with the FW assigned  		 * queue value (when queue is enabled). diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 8aed40a8bc38..ebf511150f4d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -8,6 +8,7 @@   * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.   * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH   * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018        Intel Corporation   *   * This program is free software; you can redistribute it and/or modify   * it under the terms of version 2 of the GNU General Public License as @@ -2106,15 +2107,40 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,  	if (ret)  		goto out_remove; -	ret = iwl_mvm_add_mcast_sta(mvm, vif); -	if (ret) -		goto out_unbind; - -	/* Send the bcast station. At this stage the TBTT and DTIM time events -	 * are added and applied to the scheduler */ -	ret = iwl_mvm_send_add_bcast_sta(mvm, vif); -	if (ret) -		goto out_rm_mcast; +	/* +	 * This is not very nice, but the simplest: +	 * For older FWs adding the mcast sta before the bcast station may +	 * cause assert 0x2b00. +	 * This is fixed in later FW so make the order of removal depend on +	 * the TLV +	 */ +	if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) { +		ret = iwl_mvm_add_mcast_sta(mvm, vif); +		if (ret) +			goto out_unbind; +		/* +		 * Send the bcast station. At this stage the TBTT and DTIM time +		 * events are added and applied to the scheduler +		 */ +		ret = iwl_mvm_send_add_bcast_sta(mvm, vif); +		if (ret) { +			iwl_mvm_rm_mcast_sta(mvm, vif); +			goto out_unbind; +		} +	} else { +		/* +		 * Send the bcast station. At this stage the TBTT and DTIM time +		 * events are added and applied to the scheduler +		 */ +		iwl_mvm_send_add_bcast_sta(mvm, vif); +		if (ret) +			goto out_unbind; +		iwl_mvm_add_mcast_sta(mvm, vif); +		if (ret) { +			iwl_mvm_send_rm_bcast_sta(mvm, vif); +			goto out_unbind; +		} +	}  	/* must be set before quota calculations */  	mvmvif->ap_ibss_active = true; @@ -2144,7 +2170,6 @@ out_quota_failed:  	iwl_mvm_power_update_mac(mvm);  	mvmvif->ap_ibss_active = false;  	iwl_mvm_send_rm_bcast_sta(mvm, vif); -out_rm_mcast:  	iwl_mvm_rm_mcast_sta(mvm, vif);  out_unbind:  	iwl_mvm_binding_remove_vif(mvm, vif); @@ -2682,6 +2707,10 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,  		/* enable beacon filtering */  		WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); + +		iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, +				     false); +  		ret = 0;  	} else if (old_state == IEEE80211_STA_AUTHORIZED &&  		   new_state == IEEE80211_STA_ASSOC) { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 2d28e0804218..89ff02d7c876 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -90,6 +90,7 @@  #include "fw/runtime.h"  #include "fw/dbg.h"  #include "fw/acpi.h" +#include "fw/debugfs.h"  #define IWL_MVM_MAX_ADDRESSES		5  /* RSSI offset for WkP */ @@ -1783,6 +1784,7 @@ static inline u32 iwl_mvm_flushable_queues(struct iwl_mvm *mvm)  static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm)  { +	iwl_fw_cancel_timestamp(&mvm->fwrt);  	iwl_free_fw_paging(&mvm->fwrt);  	clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);  	iwl_fw_dump_conf_clear(&mvm->fwrt); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 5d525a0023dc..ab7fb5aad984 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -8,6 +8,7 @@   * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.   * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH   * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018        Intel Corporation   *   * This program is free software; you can redistribute it and/or modify   * it under the terms of version 2 of the GNU General Public License as @@ -35,6 +36,7 @@   * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.   * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH   * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018        Intel Corporation   * All rights reserved.   *   * Redistribution and use in source and binary forms, with or without @@ -552,9 +554,15 @@ static void iwl_mvm_fwrt_dump_end(void *ctx)  	iwl_mvm_unref(mvm, IWL_MVM_REF_FW_DBG_COLLECT);  } +static bool iwl_mvm_fwrt_fw_running(void *ctx) +{ +	return iwl_mvm_firmware_running(ctx); +} +  static const struct iwl_fw_runtime_ops iwl_mvm_fwrt_ops = {  	.dump_start = iwl_mvm_fwrt_dump_start,  	.dump_end = iwl_mvm_fwrt_dump_end, +	.fw_running = iwl_mvm_fwrt_fw_running,  };  static struct iwl_op_mode * @@ -802,7 +810,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,  	iwl_mvm_leds_exit(mvm);  	iwl_mvm_thermal_exit(mvm);   out_free: -	iwl_fw_runtime_exit(&mvm->fwrt);  	iwl_fw_flush_dump(&mvm->fwrt);  	if (iwlmvm_mod_params.init_dbg) @@ -843,7 +850,6 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)  #if defined(CONFIG_PM_SLEEP) && defined(CONFIG_IWLWIFI_DEBUGFS)  	kfree(mvm->d3_resume_sram);  #endif -	iwl_fw_runtime_exit(&mvm->fwrt);  	iwl_trans_op_mode_leave(mvm->trans);  	iwl_phy_db_free(mvm->phy_db); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c index 60abb0084ee5..47f4c7a1d80d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c @@ -2684,7 +2684,8 @@ static void rs_get_initial_rate(struct iwl_mvm *mvm,  				struct ieee80211_sta *sta,  				struct iwl_lq_sta *lq_sta,  				enum nl80211_band band, -				struct rs_rate *rate) +				struct rs_rate *rate, +				bool init)  {  	int i, nentries;  	unsigned long active_rate; @@ -2738,14 +2739,25 @@ static void rs_get_initial_rate(struct iwl_mvm *mvm,  	 */  	if (sta->vht_cap.vht_supported &&  	    best_rssi > IWL_RS_LOW_RSSI_THRESHOLD) { -		switch (sta->bandwidth) { -		case IEEE80211_STA_RX_BW_160: -		case IEEE80211_STA_RX_BW_80: -		case IEEE80211_STA_RX_BW_40: +		/* +		 * In AP mode, when a new station associates, rs is initialized +		 * immediately upon association completion, before the phy +		 * context is updated with the association parameters, so the +		 * sta bandwidth might be wider than the phy context allows. +		 * To avoid this issue, always initialize rs with 20mhz +		 * bandwidth rate, and after authorization, when the phy context +		 * is already up-to-date, re-init rs with the correct bw. +		 */ +		u32 bw = init ? RATE_MCS_CHAN_WIDTH_20 : rs_bw_from_sta_bw(sta); + +		switch (bw) { +		case RATE_MCS_CHAN_WIDTH_40: +		case RATE_MCS_CHAN_WIDTH_80: +		case RATE_MCS_CHAN_WIDTH_160:  			initial_rates = rs_optimal_rates_vht;  			nentries = ARRAY_SIZE(rs_optimal_rates_vht);  			break; -		case IEEE80211_STA_RX_BW_20: +		case RATE_MCS_CHAN_WIDTH_20:  			initial_rates = rs_optimal_rates_vht_20mhz;  			nentries = ARRAY_SIZE(rs_optimal_rates_vht_20mhz);  			break; @@ -2756,7 +2768,7 @@ static void rs_get_initial_rate(struct iwl_mvm *mvm,  		active_rate = lq_sta->active_siso_rate;  		rate->type = LQ_VHT_SISO; -		rate->bw = rs_bw_from_sta_bw(sta); +		rate->bw = bw;  	} else if (sta->ht_cap.ht_supported &&  		   best_rssi > IWL_RS_LOW_RSSI_THRESHOLD) {  		initial_rates = rs_optimal_rates_ht; @@ -2839,7 +2851,7 @@ static void rs_initialize_lq(struct iwl_mvm *mvm,  	tbl = &(lq_sta->lq_info[active_tbl]);  	rate = &tbl->rate; -	rs_get_initial_rate(mvm, sta, lq_sta, band, rate); +	rs_get_initial_rate(mvm, sta, lq_sta, band, rate, init);  	rs_init_optimal_rate(mvm, sta, lq_sta);  	WARN_ONCE(rate->ant != ANT_A && rate->ant != ANT_B, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index a3f7c1bf3cc8..580de5851fc7 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -71,6 +71,7 @@ static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb,  	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;  	struct ieee80211_rx_status *stats = IEEE80211_SKB_RXCB(skb);  	struct iwl_mvm_key_pn *ptk_pn; +	int res;  	u8 tid, keyidx;  	u8 pn[IEEE80211_CCMP_PN_LEN];  	u8 *extiv; @@ -127,12 +128,13 @@ static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb,  	pn[4] = extiv[1];  	pn[5] = extiv[0]; -	if (memcmp(pn, ptk_pn->q[queue].pn[tid], -		   IEEE80211_CCMP_PN_LEN) <= 0) +	res = memcmp(pn, ptk_pn->q[queue].pn[tid], IEEE80211_CCMP_PN_LEN); +	if (res < 0) +		return -1; +	if (!res && !(stats->flag & RX_FLAG_ALLOW_SAME_PN))  		return -1; -	if (!(stats->flag & RX_FLAG_AMSDU_MORE)) -		memcpy(ptk_pn->q[queue].pn[tid], pn, IEEE80211_CCMP_PN_LEN); +	memcpy(ptk_pn->q[queue].pn[tid], pn, IEEE80211_CCMP_PN_LEN);  	stats->flag |= RX_FLAG_PN_VALIDATED;  	return 0; @@ -314,28 +316,21 @@ static void iwl_mvm_rx_csum(struct ieee80211_sta *sta,  }  /* - * returns true if a packet outside BA session is a duplicate and - * should be dropped + * returns true if a packet is a duplicate and should be dropped. + * Updates AMSDU PN tracking info   */ -static bool iwl_mvm_is_nonagg_dup(struct ieee80211_sta *sta, int queue, -				  struct ieee80211_rx_status *rx_status, -				  struct ieee80211_hdr *hdr, -				  struct iwl_rx_mpdu_desc *desc) +static bool iwl_mvm_is_dup(struct ieee80211_sta *sta, int queue, +			   struct ieee80211_rx_status *rx_status, +			   struct ieee80211_hdr *hdr, +			   struct iwl_rx_mpdu_desc *desc)  {  	struct iwl_mvm_sta *mvm_sta;  	struct iwl_mvm_rxq_dup_data *dup_data; -	u8 baid, tid, sub_frame_idx; +	u8 tid, sub_frame_idx;  	if (WARN_ON(IS_ERR_OR_NULL(sta)))  		return false; -	baid = (le32_to_cpu(desc->reorder_data) & -		IWL_RX_MPDU_REORDER_BAID_MASK) >> -		IWL_RX_MPDU_REORDER_BAID_SHIFT; - -	if (baid != IWL_RX_REORDER_DATA_INVALID_BAID) -		return false; -  	mvm_sta = iwl_mvm_sta_from_mac80211(sta);  	dup_data = &mvm_sta->dup_data[queue]; @@ -365,6 +360,12 @@ static bool iwl_mvm_is_nonagg_dup(struct ieee80211_sta *sta, int queue,  		     dup_data->last_sub_frame[tid] >= sub_frame_idx))  		return true; +	/* Allow same PN as the first subframe for following sub frames */ +	if (dup_data->last_seq[tid] == hdr->seq_ctrl && +	    sub_frame_idx > dup_data->last_sub_frame[tid] && +	    desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU) +		rx_status->flag |= RX_FLAG_ALLOW_SAME_PN; +  	dup_data->last_seq[tid] = hdr->seq_ctrl;  	dup_data->last_sub_frame[tid] = sub_frame_idx; @@ -971,7 +972,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,  		if (ieee80211_is_data(hdr->frame_control))  			iwl_mvm_rx_csum(sta, skb, desc); -		if (iwl_mvm_is_nonagg_dup(sta, queue, rx_status, hdr, desc)) { +		if (iwl_mvm_is_dup(sta, queue, rx_status, hdr, desc)) {  			kfree_skb(skb);  			goto out;  		} diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 6b2674e02606..630e23cb0ffb 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -2039,7 +2039,7 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)  	struct iwl_trans_txq_scd_cfg cfg = {  		.fifo = IWL_MVM_TX_FIFO_MCAST,  		.sta_id = msta->sta_id, -		.tid = IWL_MAX_TID_COUNT, +		.tid = 0,  		.aggregate = false,  		.frame_limit = IWL_FRAME_LIMIT,  	}; @@ -2053,6 +2053,17 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)  		return -ENOTSUPP;  	/* +	 * In IBSS, ieee80211_check_queues() sets the cab_queue to be +	 * invalid, so make sure we use the queue we want. +	 * Note that this is done here as we want to avoid making DQA +	 * changes in mac80211 layer. +	 */ +	if (vif->type == NL80211_IFTYPE_ADHOC) { +		vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE; +		mvmvif->cab_queue = vif->cab_queue; +	} + +	/*  	 * While in previous FWs we had to exclude cab queue from TFD queue  	 * mask, now it is needed as any other queue.  	 */ @@ -2079,24 +2090,13 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)  	if (iwl_mvm_has_new_tx_api(mvm)) {  		int queue = iwl_mvm_tvqm_enable_txq(mvm, vif->cab_queue,  						    msta->sta_id, -						    IWL_MAX_TID_COUNT, +						    0,  						    timeout);  		mvmvif->cab_queue = queue;  	} else if (!fw_has_api(&mvm->fw->ucode_capa, -			       IWL_UCODE_TLV_API_STA_TYPE)) { -		/* -		 * In IBSS, ieee80211_check_queues() sets the cab_queue to be -		 * invalid, so make sure we use the queue we want. -		 * Note that this is done here as we want to avoid making DQA -		 * changes in mac80211 layer. -		 */ -		if (vif->type == NL80211_IFTYPE_ADHOC) { -			vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE; -			mvmvif->cab_queue = vif->cab_queue; -		} +			       IWL_UCODE_TLV_API_STA_TYPE))  		iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,  				   &cfg, timeout); -	}  	return 0;  } @@ -2115,7 +2115,7 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)  	iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true, 0);  	iwl_mvm_disable_txq(mvm, mvmvif->cab_queue, vif->cab_queue, -			    IWL_MAX_TID_COUNT, 0); +			    0, 0);  	ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);  	if (ret) @@ -3170,8 +3170,9 @@ static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,  	int ret, size;  	u32 status; +	/* This is a valid situation for GTK removal */  	if (sta_id == IWL_MVM_INVALID_STA) -		return -EINVAL; +		return 0;  	key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &  				 STA_KEY_FLG_KEYID_MSK); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c index 200ab50ec86b..acb217e666db 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c @@ -616,7 +616,7 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm,  	time_cmd.repeat = 1;  	time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |  				      TE_V2_NOTIF_HOST_EVENT_END | -				      T2_V2_START_IMMEDIATELY); +				      TE_V2_START_IMMEDIATELY);  	if (!wait_for_notif) {  		iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd); @@ -803,7 +803,7 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,  	time_cmd.repeat = 1;  	time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |  				      TE_V2_NOTIF_HOST_EVENT_END | -				      T2_V2_START_IMMEDIATELY); +				      TE_V2_START_IMMEDIATELY);  	return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);  } @@ -913,6 +913,8 @@ int iwl_mvm_schedule_csa_period(struct iwl_mvm *mvm,  	time_cmd.interval = cpu_to_le32(1);  	time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |  				      TE_V2_ABSENCE); +	if (!apply_time) +		time_cmd.policy |= cpu_to_le16(TE_V2_START_IMMEDIATELY);  	return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);  } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index dda77b327c98..af6dfceab6b8 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -419,11 +419,11 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,  {  	struct ieee80211_key_conf *keyconf = info->control.hw_key;  	u8 *crypto_hdr = skb_frag->data + hdrlen; +	enum iwl_tx_cmd_sec_ctrl type = TX_CMD_SEC_CCM;  	u64 pn;  	switch (keyconf->cipher) {  	case WLAN_CIPHER_SUITE_CCMP: -	case WLAN_CIPHER_SUITE_CCMP_256:  		iwl_mvm_set_tx_cmd_ccmp(info, tx_cmd);  		iwl_mvm_set_tx_cmd_pn(info, crypto_hdr);  		break; @@ -447,13 +447,16 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,  		break;  	case WLAN_CIPHER_SUITE_GCMP:  	case WLAN_CIPHER_SUITE_GCMP_256: +		type = TX_CMD_SEC_GCMP; +		/* Fall through */ +	case WLAN_CIPHER_SUITE_CCMP_256:  		/* TODO: Taking the key from the table might introduce a race  		 * when PTK rekeying is done, having an old packets with a PN  		 * based on the old key but the message encrypted with a new  		 * one.  		 * Need to handle this.  		 */ -		tx_cmd->sec_ctl |= TX_CMD_SEC_GCMP | TX_CMD_SEC_KEY_FROM_TABLE; +		tx_cmd->sec_ctl |= type | TX_CMD_SEC_KEY_FROM_TABLE;  		tx_cmd->key[0] = keyconf->hw_key_idx;  		iwl_mvm_set_tx_cmd_pn(info, crypto_hdr);  		break; @@ -645,7 +648,11 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)  		if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE ||  		    info.control.vif->type == NL80211_IFTYPE_AP ||  		    info.control.vif->type == NL80211_IFTYPE_ADHOC) { -			sta_id = mvmvif->bcast_sta.sta_id; +			if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE) +				sta_id = mvmvif->bcast_sta.sta_id; +			else +				sta_id = mvmvif->mcast_sta.sta_id; +  			queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info,  							   hdr->frame_control);  			if (queue < 0) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c index 6d0a907d5ba5..fabae0f60683 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c @@ -147,7 +147,7 @@ static void iwl_pcie_gen2_tfd_unmap(struct iwl_trans *trans,  	/* Sanity check on number of chunks */  	num_tbs = iwl_pcie_gen2_get_num_tbs(trans, tfd); -	if (num_tbs >= trans_pcie->max_tbs) { +	if (num_tbs > trans_pcie->max_tbs) {  		IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);  		return;  	} diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index 3f85713c41dc..1a566287993d 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c @@ -378,7 +378,7 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,  	/* Sanity check on number of chunks */  	num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd); -	if (num_tbs >= trans_pcie->max_tbs) { +	if (num_tbs > trans_pcie->max_tbs) {  		IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);  		/* @todo issue fatal error, it is quite serious situation */  		return; diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 1cf22e62e3dd..35b21f8152bb 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -2727,6 +2727,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,  	mutex_init(&data->mutex);  	data->netgroup = hwsim_net_get_netgroup(net); +	data->wmediumd = hwsim_net_get_wmediumd(net);  	/* Enable frame retransmissions for lossy channels */  	hw->max_rates = 4; @@ -3516,7 +3517,7 @@ static int __init init_mac80211_hwsim(void)  	spin_lock_init(&hwsim_radio_lock); -	hwsim_wq = alloc_workqueue("hwsim_wq",WQ_MEM_RECLAIM,0); +	hwsim_wq = alloc_workqueue("hwsim_wq", 0, 0);  	if (!hwsim_wq)  		return -ENOMEM;  	rhashtable_init(&hwsim_radios_rht, &hwsim_rht_params); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c index f9ccd13c79f9..e7bbbc95cdb1 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c @@ -1125,7 +1125,8 @@ static void _rtl8723be_enable_aspm_back_door(struct ieee80211_hw *hw)  	/* Configuration Space offset 0x70f BIT7 is used to control L0S */  	tmp8 = _rtl8723be_dbi_read(rtlpriv, 0x70f); -	_rtl8723be_dbi_write(rtlpriv, 0x70f, tmp8 | BIT(7)); +	_rtl8723be_dbi_write(rtlpriv, 0x70f, tmp8 | BIT(7) | +			     ASPM_L1_LATENCY << 3);  	/* Configuration Space offset 0x719 Bit3 is for L1  	 * BIT4 is for clock request diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 8328d395e332..3127bc8633ca 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -2005,7 +2005,10 @@ static void netback_changed(struct xenbus_device *dev,  	case XenbusStateInitialised:  	case XenbusStateReconfiguring:  	case XenbusStateReconfigured: +		break; +  	case XenbusStateUnknown: +		wake_up_all(&module_unload_q);  		break;  	case XenbusStateInitWait: @@ -2136,7 +2139,9 @@ static int xennet_remove(struct xenbus_device *dev)  		xenbus_switch_state(dev, XenbusStateClosing);  		wait_event(module_unload_q,  			   xenbus_read_driver_state(dev->otherend) == -			   XenbusStateClosing); +			   XenbusStateClosing || +			   xenbus_read_driver_state(dev->otherend) == +			   XenbusStateUnknown);  		xenbus_switch_state(dev, XenbusStateClosed);  		wait_event(module_unload_q, diff --git a/drivers/nvdimm/blk.c b/drivers/nvdimm/blk.c index 345acca576b3..1bd7b3734751 100644 --- a/drivers/nvdimm/blk.c +++ b/drivers/nvdimm/blk.c @@ -278,8 +278,6 @@ static int nsblk_attach_disk(struct nd_namespace_blk *nsblk)  	disk->queue		= q;  	disk->flags		= GENHD_FL_EXT_DEVT;  	nvdimm_namespace_disk_name(&nsblk->common, disk->disk_name); -	set_capacity(disk, 0); -	device_add_disk(dev, disk);  	if (devm_add_action_or_reset(dev, nd_blk_release_disk, disk))  		return -ENOMEM; @@ -292,6 +290,7 @@ static int nsblk_attach_disk(struct nd_namespace_blk *nsblk)  	}  	set_capacity(disk, available_disk_size >> SECTOR_SHIFT); +	device_add_disk(dev, disk);  	revalidate_disk(disk);  	return 0;  } diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c index 2ef544f10ec8..4b95ac513de2 100644 --- a/drivers/nvdimm/btt.c +++ b/drivers/nvdimm/btt.c @@ -1545,8 +1545,6 @@ static int btt_blk_init(struct btt *btt)  	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, btt->btt_queue);  	btt->btt_queue->queuedata = btt; -	set_capacity(btt->btt_disk, 0); -	device_add_disk(&btt->nd_btt->dev, btt->btt_disk);  	if (btt_meta_size(btt)) {  		int rc = nd_integrity_init(btt->btt_disk, btt_meta_size(btt)); @@ -1558,6 +1556,7 @@ static int btt_blk_init(struct btt *btt)  		}  	}  	set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9); +	device_add_disk(&btt->nd_btt->dev, btt->btt_disk);  	btt->nd_btt->size = btt->nlba * (u64)btt->sector_size;  	revalidate_disk(btt->btt_disk); diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c index f5c4e8c6e29d..2f4d18752c97 100644 --- a/drivers/nvdimm/pfn_devs.c +++ b/drivers/nvdimm/pfn_devs.c @@ -304,7 +304,7 @@ static const struct attribute_group *nd_pfn_attribute_groups[] = {  struct device *nd_pfn_devinit(struct nd_pfn *nd_pfn,  		struct nd_namespace_common *ndns)  { -	struct device *dev = &nd_pfn->dev; +	struct device *dev;  	if (!nd_pfn)  		return NULL; diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index 10041ac4032c..06f8dcc52ca6 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -335,8 +335,7 @@ static int pmem_attach_disk(struct device *dev,  		dev_warn(dev, "unable to guarantee persistence of writes\n");  		fua = 0;  	} -	wbc = nvdimm_has_cache(nd_region) && -		!test_bit(ND_REGION_PERSIST_CACHE, &nd_region->flags); +	wbc = nvdimm_has_cache(nd_region);  	if (!devm_request_mem_region(dev, res->start, resource_size(res),  				dev_name(&ndns->dev))) { diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c index e6d01911e092..1593e1806b16 100644 --- a/drivers/nvdimm/region_devs.c +++ b/drivers/nvdimm/region_devs.c @@ -532,11 +532,13 @@ static ssize_t persistence_domain_show(struct device *dev,  		struct device_attribute *attr, char *buf)  {  	struct nd_region *nd_region = to_nd_region(dev); -	unsigned long flags = nd_region->flags; -	return sprintf(buf, "%s%s\n", -			flags & BIT(ND_REGION_PERSIST_CACHE) ? "cpu_cache " : "", -			flags & BIT(ND_REGION_PERSIST_MEMCTRL) ? "memory_controller " : ""); +	if (test_bit(ND_REGION_PERSIST_CACHE, &nd_region->flags)) +		return sprintf(buf, "cpu_cache\n"); +	else if (test_bit(ND_REGION_PERSIST_MEMCTRL, &nd_region->flags)) +		return sprintf(buf, "memory_controller\n"); +	else +		return sprintf(buf, "\n");  }  static DEVICE_ATTR_RO(persistence_domain); @@ -593,6 +595,13 @@ static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n)  			return 0;  	} +	if (a == &dev_attr_persistence_domain.attr) { +		if ((nd_region->flags & (BIT(ND_REGION_PERSIST_CACHE) +					| BIT(ND_REGION_PERSIST_MEMCTRL))) == 0) +			return 0; +		return a->mode; +	} +  	if (a != &dev_attr_set_cookie.attr  			&& a != &dev_attr_available_size.attr)  		return a->mode; diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index f431c32774f3..7aeca5db7916 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -120,8 +120,12 @@ int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)  	int ret;  	ret = nvme_reset_ctrl(ctrl); -	if (!ret) +	if (!ret) {  		flush_work(&ctrl->reset_work); +		if (ctrl->state != NVME_CTRL_LIVE) +			ret = -ENETRESET; +	} +  	return ret;  }  EXPORT_SYMBOL_GPL(nvme_reset_ctrl_sync); @@ -265,7 +269,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,  	switch (new_state) {  	case NVME_CTRL_ADMIN_ONLY:  		switch (old_state) { -		case NVME_CTRL_RECONNECTING: +		case NVME_CTRL_CONNECTING:  			changed = true;  			/* FALLTHRU */  		default: @@ -276,7 +280,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,  		switch (old_state) {  		case NVME_CTRL_NEW:  		case NVME_CTRL_RESETTING: -		case NVME_CTRL_RECONNECTING: +		case NVME_CTRL_CONNECTING:  			changed = true;  			/* FALLTHRU */  		default: @@ -294,9 +298,9 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,  			break;  		}  		break; -	case NVME_CTRL_RECONNECTING: +	case NVME_CTRL_CONNECTING:  		switch (old_state) { -		case NVME_CTRL_LIVE: +		case NVME_CTRL_NEW:  		case NVME_CTRL_RESETTING:  			changed = true;  			/* FALLTHRU */ @@ -309,7 +313,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,  		case NVME_CTRL_LIVE:  		case NVME_CTRL_ADMIN_ONLY:  		case NVME_CTRL_RESETTING: -		case NVME_CTRL_RECONNECTING: +		case NVME_CTRL_CONNECTING:  			changed = true;  			/* FALLTHRU */  		default: @@ -518,9 +522,11 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,  		u64 slba = nvme_block_nr(ns, bio->bi_iter.bi_sector);  		u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift; -		range[n].cattr = cpu_to_le32(0); -		range[n].nlb = cpu_to_le32(nlb); -		range[n].slba = cpu_to_le64(slba); +		if (n < segments) { +			range[n].cattr = cpu_to_le32(0); +			range[n].nlb = cpu_to_le32(nlb); +			range[n].slba = cpu_to_le64(slba); +		}  		n++;  	} @@ -794,13 +800,9 @@ static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)  static int nvme_keep_alive(struct nvme_ctrl *ctrl)  { -	struct nvme_command c;  	struct request *rq; -	memset(&c, 0, sizeof(c)); -	c.common.opcode = nvme_admin_keep_alive; - -	rq = nvme_alloc_request(ctrl->admin_q, &c, BLK_MQ_REQ_RESERVED, +	rq = nvme_alloc_request(ctrl->admin_q, &ctrl->ka_cmd, BLK_MQ_REQ_RESERVED,  			NVME_QID_ANY);  	if (IS_ERR(rq))  		return PTR_ERR(rq); @@ -832,6 +834,8 @@ void nvme_start_keep_alive(struct nvme_ctrl *ctrl)  		return;  	INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work); +	memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd)); +	ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive;  	schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);  }  EXPORT_SYMBOL_GPL(nvme_start_keep_alive); @@ -1117,14 +1121,19 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,  static void nvme_update_formats(struct nvme_ctrl *ctrl)  { -	struct nvme_ns *ns; +	struct nvme_ns *ns, *next; +	LIST_HEAD(rm_list);  	mutex_lock(&ctrl->namespaces_mutex);  	list_for_each_entry(ns, &ctrl->namespaces, list) { -		if (ns->disk && nvme_revalidate_disk(ns->disk)) -			nvme_ns_remove(ns); +		if (ns->disk && nvme_revalidate_disk(ns->disk)) { +			list_move_tail(&ns->list, &rm_list); +		}  	}  	mutex_unlock(&ctrl->namespaces_mutex); + +	list_for_each_entry_safe(ns, next, &rm_list, list) +		nvme_ns_remove(ns);  }  static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects) @@ -2687,7 +2696,7 @@ static ssize_t nvme_sysfs_show_state(struct device *dev,  		[NVME_CTRL_LIVE]	= "live",  		[NVME_CTRL_ADMIN_ONLY]	= "only-admin",  		[NVME_CTRL_RESETTING]	= "resetting", -		[NVME_CTRL_RECONNECTING]= "reconnecting", +		[NVME_CTRL_CONNECTING]	= "connecting",  		[NVME_CTRL_DELETING]	= "deleting",  		[NVME_CTRL_DEAD]	= "dead",  	}; @@ -2835,7 +2844,7 @@ out:  }  static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid, -		struct nvme_id_ns *id, bool *new) +		struct nvme_id_ns *id)  {  	struct nvme_ctrl *ctrl = ns->ctrl;  	bool is_shared = id->nmic & (1 << 0); @@ -2851,8 +2860,6 @@ static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,  			ret = PTR_ERR(head);  			goto out_unlock;  		} - -		*new = true;  	} else {  		struct nvme_ns_ids ids; @@ -2864,8 +2871,6 @@ static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,  			ret = -EINVAL;  			goto out_unlock;  		} - -		*new = false;  	}  	list_add_tail(&ns->siblings, &head->list); @@ -2936,7 +2941,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)  	struct nvme_id_ns *id;  	char disk_name[DISK_NAME_LEN];  	int node = dev_to_node(ctrl->dev), flags = GENHD_FL_EXT_DEVT; -	bool new = true;  	ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);  	if (!ns) @@ -2962,7 +2966,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)  	if (id->ncap == 0)  		goto out_free_id; -	if (nvme_init_ns_head(ns, nsid, id, &new)) +	if (nvme_init_ns_head(ns, nsid, id))  		goto out_free_id;  	nvme_setup_streams_ns(ctrl, ns); @@ -3028,9 +3032,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)  		pr_warn("%s: failed to register lightnvm sysfs group for identification\n",  			ns->disk->disk_name); -	if (new) -		nvme_mpath_add_disk(ns->head); -	nvme_mpath_add_disk_links(ns); +	nvme_mpath_add_disk(ns->head);  	return;   out_unlink_ns:  	mutex_lock(&ctrl->subsys->lock); @@ -3050,7 +3052,6 @@ static void nvme_ns_remove(struct nvme_ns *ns)  		return;  	if (ns->disk && ns->disk->flags & GENHD_FL_UP) { -		nvme_mpath_remove_disk_links(ns);  		sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,  					&nvme_ns_id_attr_group);  		if (ns->ndev) diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c index 5dd4ceefed8f..8f0f34d06d46 100644 --- a/drivers/nvme/host/fabrics.c +++ b/drivers/nvme/host/fabrics.c @@ -493,7 +493,7 @@ EXPORT_SYMBOL_GPL(nvmf_should_reconnect);   */  int nvmf_register_transport(struct nvmf_transport_ops *ops)  { -	if (!ops->create_ctrl || !ops->module) +	if (!ops->create_ctrl)  		return -EINVAL;  	down_write(&nvmf_transports_rwsem); @@ -650,6 +650,11 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,  				ret = -EINVAL;  				goto out;  			} +			if (opts->discovery_nqn) { +				pr_debug("Ignoring nr_io_queues value for discovery controller\n"); +				break; +			} +  			opts->nr_io_queues = min_t(unsigned int,  					num_online_cpus(), token);  			break; diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h index 25b19f722f5b..a3145d90c1d2 100644 --- a/drivers/nvme/host/fabrics.h +++ b/drivers/nvme/host/fabrics.h @@ -171,13 +171,14 @@ static inline blk_status_t nvmf_check_init_req(struct nvme_ctrl *ctrl,  	    cmd->common.opcode != nvme_fabrics_command ||  	    cmd->fabrics.fctype != nvme_fabrics_type_connect) {  		/* -		 * Reconnecting state means transport disruption, which can take -		 * a long time and even might fail permanently, fail fast to -		 * give upper layers a chance to failover. +		 * Connecting state means transport disruption or initial +		 * establishment, which can take a long time and even might +		 * fail permanently, fail fast to give upper layers a chance +		 * to failover.  		 * Deleting state means that the ctrl will never accept commands  		 * again, fail it permanently.  		 */ -		if (ctrl->state == NVME_CTRL_RECONNECTING || +		if (ctrl->state == NVME_CTRL_CONNECTING ||  		    ctrl->state == NVME_CTRL_DELETING) {  			nvme_req(rq)->status = NVME_SC_ABORT_REQ;  			return BLK_STS_IOERR; diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index b856d7c919d2..1dc1387b7134 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -55,9 +55,7 @@ struct nvme_fc_queue {  enum nvme_fcop_flags {  	FCOP_FLAGS_TERMIO	= (1 << 0), -	FCOP_FLAGS_RELEASED	= (1 << 1), -	FCOP_FLAGS_COMPLETE	= (1 << 2), -	FCOP_FLAGS_AEN		= (1 << 3), +	FCOP_FLAGS_AEN		= (1 << 1),  };  struct nvmefc_ls_req_op { @@ -532,7 +530,7 @@ nvme_fc_resume_controller(struct nvme_fc_ctrl *ctrl)  {  	switch (ctrl->ctrl.state) {  	case NVME_CTRL_NEW: -	case NVME_CTRL_RECONNECTING: +	case NVME_CTRL_CONNECTING:  		/*  		 * As all reconnects were suppressed, schedule a  		 * connect. @@ -777,7 +775,7 @@ nvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl *ctrl)  		}  		break; -	case NVME_CTRL_RECONNECTING: +	case NVME_CTRL_CONNECTING:  		/*  		 * The association has already been terminated and the  		 * controller is attempting reconnects.  No need to do anything @@ -1208,7 +1206,7 @@ nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl,  				sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));  	assoc_rqst->assoc_cmd.ersp_ratio = cpu_to_be16(ersp_ratio); -	assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize); +	assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize - 1);  	/* Linux supports only Dynamic controllers */  	assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff);  	uuid_copy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id); @@ -1323,7 +1321,7 @@ nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,  				sizeof(struct fcnvme_lsdesc_cr_conn_cmd));  	conn_rqst->connect_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);  	conn_rqst->connect_cmd.qid  = cpu_to_be16(queue->qnum); -	conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize); +	conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize - 1);  	lsop->queue = queue;  	lsreq->rqstaddr = conn_rqst; @@ -1470,7 +1468,6 @@ nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)  /* *********************** NVME Ctrl Routines **************************** */ -static void __nvme_fc_final_op_cleanup(struct request *rq);  static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg);  static int @@ -1512,13 +1509,19 @@ nvme_fc_exit_request(struct blk_mq_tag_set *set, struct request *rq,  static int  __nvme_fc_abort_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op)  { -	int state; +	unsigned long flags; +	int opstate; + +	spin_lock_irqsave(&ctrl->lock, flags); +	opstate = atomic_xchg(&op->state, FCPOP_STATE_ABORTED); +	if (opstate != FCPOP_STATE_ACTIVE) +		atomic_set(&op->state, opstate); +	else if (ctrl->flags & FCCTRL_TERMIO) +		ctrl->iocnt++; +	spin_unlock_irqrestore(&ctrl->lock, flags); -	state = atomic_xchg(&op->state, FCPOP_STATE_ABORTED); -	if (state != FCPOP_STATE_ACTIVE) { -		atomic_set(&op->state, state); +	if (opstate != FCPOP_STATE_ACTIVE)  		return -ECANCELED; -	}  	ctrl->lport->ops->fcp_abort(&ctrl->lport->localport,  					&ctrl->rport->remoteport, @@ -1532,60 +1535,26 @@ static void  nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl)  {  	struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops; -	unsigned long flags; -	int i, ret; - -	for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) { -		if (atomic_read(&aen_op->state) != FCPOP_STATE_ACTIVE) -			continue; - -		spin_lock_irqsave(&ctrl->lock, flags); -		if (ctrl->flags & FCCTRL_TERMIO) { -			ctrl->iocnt++; -			aen_op->flags |= FCOP_FLAGS_TERMIO; -		} -		spin_unlock_irqrestore(&ctrl->lock, flags); - -		ret = __nvme_fc_abort_op(ctrl, aen_op); -		if (ret) { -			/* -			 * if __nvme_fc_abort_op failed the io wasn't -			 * active. Thus this call path is running in -			 * parallel to the io complete. Treat as non-error. -			 */ +	int i; -			/* back out the flags/counters */ -			spin_lock_irqsave(&ctrl->lock, flags); -			if (ctrl->flags & FCCTRL_TERMIO) -				ctrl->iocnt--; -			aen_op->flags &= ~FCOP_FLAGS_TERMIO; -			spin_unlock_irqrestore(&ctrl->lock, flags); -			return; -		} -	} +	for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) +		__nvme_fc_abort_op(ctrl, aen_op);  } -static inline int +static inline void  __nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl, -		struct nvme_fc_fcp_op *op) +		struct nvme_fc_fcp_op *op, int opstate)  {  	unsigned long flags; -	bool complete_rq = false; -	spin_lock_irqsave(&ctrl->lock, flags); -	if (unlikely(op->flags & FCOP_FLAGS_TERMIO)) { +	if (opstate == FCPOP_STATE_ABORTED) { +		spin_lock_irqsave(&ctrl->lock, flags);  		if (ctrl->flags & FCCTRL_TERMIO) {  			if (!--ctrl->iocnt)  				wake_up(&ctrl->ioabort_wait);  		} +		spin_unlock_irqrestore(&ctrl->lock, flags);  	} -	if (op->flags & FCOP_FLAGS_RELEASED) -		complete_rq = true; -	else -		op->flags |= FCOP_FLAGS_COMPLETE; -	spin_unlock_irqrestore(&ctrl->lock, flags); - -	return complete_rq;  }  static void @@ -1601,6 +1570,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)  	__le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1);  	union nvme_result result;  	bool terminate_assoc = true; +	int opstate;  	/*  	 * WARNING: @@ -1639,11 +1609,12 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)  	 * association to be terminated.  	 */ +	opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE); +  	fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma,  				sizeof(op->rsp_iu), DMA_FROM_DEVICE); -	if (atomic_read(&op->state) == FCPOP_STATE_ABORTED || -			op->flags & FCOP_FLAGS_TERMIO) +	if (opstate == FCPOP_STATE_ABORTED)  		status = cpu_to_le16(NVME_SC_ABORT_REQ << 1);  	else if (freq->status)  		status = cpu_to_le16(NVME_SC_INTERNAL << 1); @@ -1708,7 +1679,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)  done:  	if (op->flags & FCOP_FLAGS_AEN) {  		nvme_complete_async_event(&queue->ctrl->ctrl, status, &result); -		__nvme_fc_fcpop_chk_teardowns(ctrl, op); +		__nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);  		atomic_set(&op->state, FCPOP_STATE_IDLE);  		op->flags = FCOP_FLAGS_AEN;	/* clear other flags */  		nvme_fc_ctrl_put(ctrl); @@ -1722,13 +1693,11 @@ done:  	if (status &&  	    (blk_queue_dying(rq->q) ||  	     ctrl->ctrl.state == NVME_CTRL_NEW || -	     ctrl->ctrl.state == NVME_CTRL_RECONNECTING)) +	     ctrl->ctrl.state == NVME_CTRL_CONNECTING))  		status |= cpu_to_le16(NVME_SC_DNR << 1); -	if (__nvme_fc_fcpop_chk_teardowns(ctrl, op)) -		__nvme_fc_final_op_cleanup(rq); -	else -		nvme_end_request(rq, status, result); +	__nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate); +	nvme_end_request(rq, status, result);  check_error:  	if (terminate_assoc) @@ -2415,46 +2384,16 @@ nvme_fc_submit_async_event(struct nvme_ctrl *arg)  }  static void -__nvme_fc_final_op_cleanup(struct request *rq) +nvme_fc_complete_rq(struct request *rq)  {  	struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);  	struct nvme_fc_ctrl *ctrl = op->ctrl;  	atomic_set(&op->state, FCPOP_STATE_IDLE); -	op->flags &= ~(FCOP_FLAGS_TERMIO | FCOP_FLAGS_RELEASED | -			FCOP_FLAGS_COMPLETE);  	nvme_fc_unmap_data(ctrl, rq, op);  	nvme_complete_rq(rq);  	nvme_fc_ctrl_put(ctrl); - -} - -static void -nvme_fc_complete_rq(struct request *rq) -{ -	struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); -	struct nvme_fc_ctrl *ctrl = op->ctrl; -	unsigned long flags; -	bool completed = false; - -	/* -	 * the core layer, on controller resets after calling -	 * nvme_shutdown_ctrl(), calls complete_rq without our -	 * calling blk_mq_complete_request(), thus there may still -	 * be live i/o outstanding with the LLDD. Means transport has -	 * to track complete calls vs fcpio_done calls to know what -	 * path to take on completes and dones. -	 */ -	spin_lock_irqsave(&ctrl->lock, flags); -	if (op->flags & FCOP_FLAGS_COMPLETE) -		completed = true; -	else -		op->flags |= FCOP_FLAGS_RELEASED; -	spin_unlock_irqrestore(&ctrl->lock, flags); - -	if (completed) -		__nvme_fc_final_op_cleanup(rq);  }  /* @@ -2476,35 +2415,11 @@ nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved)  	struct nvme_ctrl *nctrl = data;  	struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);  	struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req); -	unsigned long flags; -	int status;  	if (!blk_mq_request_started(req))  		return; -	spin_lock_irqsave(&ctrl->lock, flags); -	if (ctrl->flags & FCCTRL_TERMIO) { -		ctrl->iocnt++; -		op->flags |= FCOP_FLAGS_TERMIO; -	} -	spin_unlock_irqrestore(&ctrl->lock, flags); - -	status = __nvme_fc_abort_op(ctrl, op); -	if (status) { -		/* -		 * if __nvme_fc_abort_op failed the io wasn't -		 * active. Thus this call path is running in -		 * parallel to the io complete. Treat as non-error. -		 */ - -		/* back out the flags/counters */ -		spin_lock_irqsave(&ctrl->lock, flags); -		if (ctrl->flags & FCCTRL_TERMIO) -			ctrl->iocnt--; -		op->flags &= ~FCOP_FLAGS_TERMIO; -		spin_unlock_irqrestore(&ctrl->lock, flags); -		return; -	} +	__nvme_fc_abort_op(ctrl, op);  } @@ -2566,11 +2481,11 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)  		goto out_free_tag_set;  	} -	ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.opts->queue_size); +	ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);  	if (ret)  		goto out_cleanup_blk_queue; -	ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.opts->queue_size); +	ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1);  	if (ret)  		goto out_delete_hw_queues; @@ -2617,11 +2532,11 @@ nvme_fc_reinit_io_queues(struct nvme_fc_ctrl *ctrl)  	if (ret)  		goto out_free_io_queues; -	ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.opts->queue_size); +	ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);  	if (ret)  		goto out_free_io_queues; -	ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.opts->queue_size); +	ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1);  	if (ret)  		goto out_delete_hw_queues; @@ -2717,13 +2632,12 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)  	nvme_fc_init_queue(ctrl, 0);  	ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0, -				NVME_AQ_BLK_MQ_DEPTH); +				NVME_AQ_DEPTH);  	if (ret)  		goto out_free_queue;  	ret = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0], -				NVME_AQ_BLK_MQ_DEPTH, -				(NVME_AQ_BLK_MQ_DEPTH / 4)); +				NVME_AQ_DEPTH, (NVME_AQ_DEPTH / 4));  	if (ret)  		goto out_delete_hw_queue; @@ -2751,7 +2665,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)  	}  	ctrl->ctrl.sqsize = -		min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap) + 1, ctrl->ctrl.sqsize); +		min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap), ctrl->ctrl.sqsize);  	ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap);  	if (ret) @@ -2784,6 +2698,14 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)  		opts->queue_size = ctrl->ctrl.maxcmd;  	} +	if (opts->queue_size > ctrl->ctrl.sqsize + 1) { +		/* warn if sqsize is lower than queue_size */ +		dev_warn(ctrl->ctrl.device, +			"queue_size %zu > ctrl sqsize %u, clamping down\n", +			opts->queue_size, ctrl->ctrl.sqsize + 1); +		opts->queue_size = ctrl->ctrl.sqsize + 1; +	} +  	ret = nvme_fc_init_aen_ops(ctrl);  	if (ret)  		goto out_term_aen_ops; @@ -2943,7 +2865,7 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)  	unsigned long recon_delay = ctrl->ctrl.opts->reconnect_delay * HZ;  	bool recon = true; -	if (ctrl->ctrl.state != NVME_CTRL_RECONNECTING) +	if (ctrl->ctrl.state != NVME_CTRL_CONNECTING)  		return;  	if (portptr->port_state == FC_OBJSTATE_ONLINE) @@ -2991,10 +2913,10 @@ nvme_fc_reset_ctrl_work(struct work_struct *work)  	/* will block will waiting for io to terminate */  	nvme_fc_delete_association(ctrl); -	if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) { +	if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {  		dev_err(ctrl->ctrl.device,  			"NVME-FC{%d}: error_recovery: Couldn't change state " -			"to RECONNECTING\n", ctrl->cnum); +			"to CONNECTING\n", ctrl->cnum);  		return;  	} @@ -3195,7 +3117,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,  	 * transport errors (frame drop, LS failure) inherently must kill  	 * the association. The transport is coded so that any command used  	 * to create the association (prior to a LIVE state transition -	 * while NEW or RECONNECTING) will fail if it completes in error or +	 * while NEW or CONNECTING) will fail if it completes in error or  	 * times out.  	 *  	 * As such: as the connect request was mostly likely due to a diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 3b211d9e58b8..060f69e03427 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -198,30 +198,16 @@ void nvme_mpath_add_disk(struct nvme_ns_head *head)  {  	if (!head->disk)  		return; -	device_add_disk(&head->subsys->dev, head->disk); -	if (sysfs_create_group(&disk_to_dev(head->disk)->kobj, -			&nvme_ns_id_attr_group)) -		pr_warn("%s: failed to create sysfs group for identification\n", -			head->disk->disk_name); -} - -void nvme_mpath_add_disk_links(struct nvme_ns *ns) -{ -	struct kobject *slave_disk_kobj, *holder_disk_kobj; - -	if (!ns->head->disk) -		return; - -	slave_disk_kobj = &disk_to_dev(ns->disk)->kobj; -	if (sysfs_create_link(ns->head->disk->slave_dir, slave_disk_kobj, -			kobject_name(slave_disk_kobj))) -		return; -	holder_disk_kobj = &disk_to_dev(ns->head->disk)->kobj; -	if (sysfs_create_link(ns->disk->part0.holder_dir, holder_disk_kobj, -			kobject_name(holder_disk_kobj))) -		sysfs_remove_link(ns->head->disk->slave_dir, -			kobject_name(slave_disk_kobj)); +	mutex_lock(&head->subsys->lock); +	if (!(head->disk->flags & GENHD_FL_UP)) { +		device_add_disk(&head->subsys->dev, head->disk); +		if (sysfs_create_group(&disk_to_dev(head->disk)->kobj, +				&nvme_ns_id_attr_group)) +			pr_warn("%s: failed to create sysfs group for identification\n", +				head->disk->disk_name); +	} +	mutex_unlock(&head->subsys->lock);  }  void nvme_mpath_remove_disk(struct nvme_ns_head *head) @@ -238,14 +224,3 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head)  	blk_cleanup_queue(head->disk->queue);  	put_disk(head->disk);  } - -void nvme_mpath_remove_disk_links(struct nvme_ns *ns) -{ -	if (!ns->head->disk) -		return; - -	sysfs_remove_link(ns->disk->part0.holder_dir, -			kobject_name(&disk_to_dev(ns->head->disk)->kobj)); -	sysfs_remove_link(ns->head->disk->slave_dir, -			kobject_name(&disk_to_dev(ns->disk)->kobj)); -} diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 8e4550fa08f8..d733b14ede9d 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -123,7 +123,7 @@ enum nvme_ctrl_state {  	NVME_CTRL_LIVE,  	NVME_CTRL_ADMIN_ONLY,    /* Only admin queue live */  	NVME_CTRL_RESETTING, -	NVME_CTRL_RECONNECTING, +	NVME_CTRL_CONNECTING,  	NVME_CTRL_DELETING,  	NVME_CTRL_DEAD,  }; @@ -183,6 +183,7 @@ struct nvme_ctrl {  	struct work_struct scan_work;  	struct work_struct async_event_work;  	struct delayed_work ka_work; +	struct nvme_command ka_cmd;  	struct work_struct fw_act_work;  	/* Power saving configuration */ @@ -409,9 +410,7 @@ bool nvme_req_needs_failover(struct request *req, blk_status_t error);  void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);  int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head);  void nvme_mpath_add_disk(struct nvme_ns_head *head); -void nvme_mpath_add_disk_links(struct nvme_ns *ns);  void nvme_mpath_remove_disk(struct nvme_ns_head *head); -void nvme_mpath_remove_disk_links(struct nvme_ns *ns);  static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns)  { @@ -453,12 +452,6 @@ static inline void nvme_mpath_add_disk(struct nvme_ns_head *head)  static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head)  {  } -static inline void nvme_mpath_add_disk_links(struct nvme_ns *ns) -{ -} -static inline void nvme_mpath_remove_disk_links(struct nvme_ns *ns) -{ -}  static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns)  {  } diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 6fe7af00a1f4..b6f43b738f03 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -1141,7 +1141,7 @@ static bool nvme_should_reset(struct nvme_dev *dev, u32 csts)  	/* If there is a reset/reinit ongoing, we shouldn't reset again. */  	switch (dev->ctrl.state) {  	case NVME_CTRL_RESETTING: -	case NVME_CTRL_RECONNECTING: +	case NVME_CTRL_CONNECTING:  		return false;  	default:  		break; @@ -1153,12 +1153,6 @@ static bool nvme_should_reset(struct nvme_dev *dev, u32 csts)  	if (!(csts & NVME_CSTS_CFS) && !nssro)  		return false; -	/* If PCI error recovery process is happening, we cannot reset or -	 * the recovery mechanism will surely fail. -	 */ -	if (pci_channel_offline(to_pci_dev(dev->dev))) -		return false; -  	return true;  } @@ -1189,6 +1183,13 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)  	struct nvme_command cmd;  	u32 csts = readl(dev->bar + NVME_REG_CSTS); +	/* If PCI error recovery process is happening, we cannot reset or +	 * the recovery mechanism will surely fail. +	 */ +	mb(); +	if (pci_channel_offline(to_pci_dev(dev->dev))) +		return BLK_EH_RESET_TIMER; +  	/*  	 * Reset immediately if the controller is failed  	 */ @@ -1215,13 +1216,17 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)  	 * cancellation error. All outstanding requests are completed on  	 * shutdown, so we return BLK_EH_HANDLED.  	 */ -	if (dev->ctrl.state == NVME_CTRL_RESETTING) { +	switch (dev->ctrl.state) { +	case NVME_CTRL_CONNECTING: +	case NVME_CTRL_RESETTING:  		dev_warn(dev->ctrl.device,  			 "I/O %d QID %d timeout, disable controller\n",  			 req->tag, nvmeq->qid);  		nvme_dev_disable(dev, false);  		nvme_req(req)->flags |= NVME_REQ_CANCELLED;  		return BLK_EH_HANDLED; +	default: +		break;  	}  	/* @@ -1364,18 +1369,14 @@ static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues,  static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,  				int qid, int depth)  { -	if (qid && dev->cmb && use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) { -		unsigned offset = (qid - 1) * roundup(SQ_SIZE(depth), -						      dev->ctrl.page_size); -		nvmeq->sq_dma_addr = dev->cmb_bus_addr + offset; -		nvmeq->sq_cmds_io = dev->cmb + offset; -	} else { -		nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth), -					&nvmeq->sq_dma_addr, GFP_KERNEL); -		if (!nvmeq->sq_cmds) -			return -ENOMEM; -	} +	/* CMB SQEs will be mapped before creation */ +	if (qid && dev->cmb && use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) +		return 0; +	nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth), +					    &nvmeq->sq_dma_addr, GFP_KERNEL); +	if (!nvmeq->sq_cmds) +		return -ENOMEM;  	return 0;  } @@ -1449,10 +1450,17 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)  	struct nvme_dev *dev = nvmeq->dev;  	int result; +	if (dev->cmb && use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) { +		unsigned offset = (qid - 1) * roundup(SQ_SIZE(nvmeq->q_depth), +						      dev->ctrl.page_size); +		nvmeq->sq_dma_addr = dev->cmb_bus_addr + offset; +		nvmeq->sq_cmds_io = dev->cmb + offset; +	} +  	nvmeq->cq_vector = qid - 1;  	result = adapter_alloc_cq(dev, qid, nvmeq);  	if (result < 0) -		return result; +		goto release_vector;  	result = adapter_alloc_sq(dev, qid, nvmeq);  	if (result < 0) @@ -1466,9 +1474,12 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)  	return result;   release_sq: +	dev->online_queues--;  	adapter_delete_sq(dev, qid);   release_cq:  	adapter_delete_cq(dev, qid); + release_vector: +	nvmeq->cq_vector = -1;  	return result;  } @@ -1903,7 +1914,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)  	int result, nr_io_queues;  	unsigned long size; -	nr_io_queues = num_present_cpus(); +	nr_io_queues = num_possible_cpus();  	result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues);  	if (result < 0)  		return result; @@ -2288,12 +2299,12 @@ static void nvme_reset_work(struct work_struct *work)  		nvme_dev_disable(dev, false);  	/* -	 * Introduce RECONNECTING state from nvme-fc/rdma transports to mark the +	 * Introduce CONNECTING state from nvme-fc/rdma transports to mark the  	 * initializing procedure here.  	 */ -	if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RECONNECTING)) { +	if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) {  		dev_warn(dev->ctrl.device, -			"failed to mark controller RECONNECTING\n"); +			"failed to mark controller CONNECTING\n");  		goto out;  	} diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 2bc059f7d73c..4d84a73ee12d 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -887,7 +887,7 @@ free_ctrl:  static void nvme_rdma_reconnect_or_remove(struct nvme_rdma_ctrl *ctrl)  {  	/* If we are resetting/deleting then do nothing */ -	if (ctrl->ctrl.state != NVME_CTRL_RECONNECTING) { +	if (ctrl->ctrl.state != NVME_CTRL_CONNECTING) {  		WARN_ON_ONCE(ctrl->ctrl.state == NVME_CTRL_NEW ||  			ctrl->ctrl.state == NVME_CTRL_LIVE);  		return; @@ -973,7 +973,7 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)  	blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);  	nvme_start_queues(&ctrl->ctrl); -	if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) { +	if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {  		/* state change failure should never happen */  		WARN_ON_ONCE(1);  		return; @@ -1051,7 +1051,7 @@ static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue,  	struct nvme_rdma_device *dev = queue->device;  	struct ib_device *ibdev = dev->dev; -	if (!blk_rq_bytes(rq)) +	if (!blk_rq_payload_bytes(rq))  		return;  	if (req->mr) { @@ -1166,7 +1166,7 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,  	c->common.flags |= NVME_CMD_SGL_METABUF; -	if (!blk_rq_bytes(rq)) +	if (!blk_rq_payload_bytes(rq))  		return nvme_rdma_set_sg_null(c);  	req->sg_table.sgl = req->first_sgl; @@ -1756,7 +1756,7 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work)  	nvme_stop_ctrl(&ctrl->ctrl);  	nvme_rdma_shutdown_ctrl(ctrl, false); -	if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) { +	if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {  		/* state change failure should never happen */  		WARN_ON_ONCE(1);  		return; @@ -1784,11 +1784,8 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work)  	return;  out_fail: -	dev_warn(ctrl->ctrl.device, "Removing after reset failure\n"); -	nvme_remove_namespaces(&ctrl->ctrl); -	nvme_rdma_shutdown_ctrl(ctrl, true); -	nvme_uninit_ctrl(&ctrl->ctrl); -	nvme_put_ctrl(&ctrl->ctrl); +	++ctrl->ctrl.nr_reconnects; +	nvme_rdma_reconnect_or_remove(ctrl);  }  static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = { @@ -1942,6 +1939,9 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,  	if (!ctrl->queues)  		goto out_uninit_ctrl; +	changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING); +	WARN_ON_ONCE(!changed); +  	ret = nvme_rdma_configure_admin_queue(ctrl, true);  	if (ret)  		goto out_kfree_queues; diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 0bd737117a80..a78029e4e5f4 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -520,9 +520,12 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,  		goto fail;  	} -	/* either variant of SGLs is fine, as we don't support metadata */ -	if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF && -		     (flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METASEG)) { +	/* +	 * For fabrics, PSDT field shall describe metadata pointer (MPTR) that +	 * contains an address of a single contiguous physical buffer that is +	 * byte aligned. +	 */ +	if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF)) {  		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;  		goto fail;  	} diff --git a/drivers/nvme/target/io-cmd.c b/drivers/nvme/target/io-cmd.c index 0a4372a016f2..28bbdff4a88b 100644 --- a/drivers/nvme/target/io-cmd.c +++ b/drivers/nvme/target/io-cmd.c @@ -105,10 +105,13 @@ static void nvmet_execute_flush(struct nvmet_req *req)  static u16 nvmet_discard_range(struct nvmet_ns *ns,  		struct nvme_dsm_range *range, struct bio **bio)  { -	if (__blkdev_issue_discard(ns->bdev, +	int ret; + +	ret = __blkdev_issue_discard(ns->bdev,  			le64_to_cpu(range->slba) << (ns->blksize_shift - 9),  			le32_to_cpu(range->nlb) << (ns->blksize_shift - 9), -			GFP_KERNEL, 0, bio)) +			GFP_KERNEL, 0, bio); +	if (ret && ret != -EOPNOTSUPP)  		return NVME_SC_INTERNAL | NVME_SC_DNR;  	return 0;  } diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index 7991ec3a17db..861d1509b22b 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c @@ -184,7 +184,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,  		return BLK_STS_OK;  	} -	if (blk_rq_bytes(req)) { +	if (blk_rq_payload_bytes(req)) {  		iod->sg_table.sgl = iod->first_sgl;  		if (sg_alloc_table_chained(&iod->sg_table,  				blk_rq_nr_phys_segments(req), @@ -193,7 +193,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,  		iod->req.sg = iod->sg_table.sgl;  		iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl); -		iod->req.transfer_len = blk_rq_bytes(req); +		iod->req.transfer_len = blk_rq_payload_bytes(req);  	}  	blk_mq_start_request(req); diff --git a/drivers/of/property.c b/drivers/of/property.c index 36ed84e26d9c..f46828e3b082 100644 --- a/drivers/of/property.c +++ b/drivers/of/property.c @@ -977,11 +977,11 @@ static int of_fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,  	return 0;  } -static void * +static const void *  of_fwnode_device_get_match_data(const struct fwnode_handle *fwnode,  				const struct device *dev)  { -	return (void *)of_device_get_match_data(dev); +	return of_device_get_match_data(dev);  }  const struct fwnode_operations of_fwnode_ops = { diff --git a/drivers/opp/cpu.c b/drivers/opp/cpu.c index 2d87bc1adf38..0c0910709435 100644 --- a/drivers/opp/cpu.c +++ b/drivers/opp/cpu.c @@ -55,7 +55,7 @@ int dev_pm_opp_init_cpufreq_table(struct device *dev,  	if (max_opps <= 0)  		return max_opps ? max_opps : -ENODATA; -	freq_table = kcalloc((max_opps + 1), sizeof(*freq_table), GFP_ATOMIC); +	freq_table = kcalloc((max_opps + 1), sizeof(*freq_table), GFP_KERNEL);  	if (!freq_table)  		return -ENOMEM; diff --git a/drivers/pci/dwc/pcie-designware-host.c b/drivers/pci/dwc/pcie-designware-host.c index 8de2d5c69b1d..dc9303abda42 100644 --- a/drivers/pci/dwc/pcie-designware-host.c +++ b/drivers/pci/dwc/pcie-designware-host.c @@ -613,7 +613,7 @@ void dw_pcie_setup_rc(struct pcie_port *pp)  	/* setup bus numbers */  	val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS);  	val &= 0xff000000; -	val |= 0x00010100; +	val |= 0x00ff0100;  	dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val);  	/* setup command register */ diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index fc734014206f..46d47bd6ca1f 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -3419,22 +3419,29 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PORT_RIDGE,  static void quirk_chelsio_extend_vpd(struct pci_dev *dev)  { -	pci_set_vpd_size(dev, 8192); -} - -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x20, quirk_chelsio_extend_vpd); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x21, quirk_chelsio_extend_vpd); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x22, quirk_chelsio_extend_vpd); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x23, quirk_chelsio_extend_vpd); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x24, quirk_chelsio_extend_vpd); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x25, quirk_chelsio_extend_vpd); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x26, quirk_chelsio_extend_vpd); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x30, quirk_chelsio_extend_vpd); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x31, quirk_chelsio_extend_vpd); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x32, quirk_chelsio_extend_vpd); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x35, quirk_chelsio_extend_vpd); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x36, quirk_chelsio_extend_vpd); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x37, quirk_chelsio_extend_vpd); +	int chip = (dev->device & 0xf000) >> 12; +	int func = (dev->device & 0x0f00) >>  8; +	int prod = (dev->device & 0x00ff) >>  0; + +	/* +	 * If this is a T3-based adapter, there's a 1KB VPD area at offset +	 * 0xc00 which contains the preferred VPD values.  If this is a T4 or +	 * later based adapter, the special VPD is at offset 0x400 for the +	 * Physical Functions (the SR-IOV Virtual Functions have no VPD +	 * Capabilities).  The PCI VPD Access core routines will normally +	 * compute the size of the VPD by parsing the VPD Data Structure at +	 * offset 0x000.  This will result in silent failures when attempting +	 * to accesses these other VPD areas which are beyond those computed +	 * limits. +	 */ +	if (chip == 0x0 && prod >= 0x20) +		pci_set_vpd_size(dev, 8192); +	else if (chip >= 0x4 && func < 0x8) +		pci_set_vpd_size(dev, 2048); +} + +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID, +			quirk_chelsio_extend_vpd);  #ifdef CONFIG_ACPI  /* @@ -3901,6 +3908,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9230,  			 quirk_dma_func1_alias);  DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TTI, 0x0642,  			 quirk_dma_func1_alias); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TTI, 0x0645, +			 quirk_dma_func1_alias);  /* https://bugs.gentoo.org/show_bug.cgi?id=497630 */  DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_JMICRON,  			 PCI_DEVICE_ID_JMICRON_JMB388_ESD, diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c index 369d48d6c6f1..365447240d95 100644 --- a/drivers/pci/setup-res.c +++ b/drivers/pci/setup-res.c @@ -401,6 +401,10 @@ void pci_release_resource(struct pci_dev *dev, int resno)  	struct resource *res = dev->resource + resno;  	pci_info(dev, "BAR %d: releasing %pR\n", resno, res); + +	if (!res->parent) +		return; +  	release_resource(res);  	res->end = resource_size(res) - 1;  	res->start = 0; diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index 7bc5eee96b31..f63db346c219 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -17,7 +17,6 @@  #include <linux/export.h>  #include <linux/kernel.h>  #include <linux/perf/arm_pmu.h> -#include <linux/platform_device.h>  #include <linux/slab.h>  #include <linux/sched/clock.h>  #include <linux/spinlock.h> @@ -26,6 +25,9 @@  #include <asm/irq_regs.h> +static DEFINE_PER_CPU(struct arm_pmu *, cpu_armpmu); +static DEFINE_PER_CPU(int, cpu_irq); +  static int  armpmu_map_cache_event(const unsigned (*cache_map)  				      [PERF_COUNT_HW_CACHE_MAX] @@ -320,17 +322,9 @@ validate_group(struct perf_event *event)  	return 0;  } -static struct arm_pmu_platdata *armpmu_get_platdata(struct arm_pmu *armpmu) -{ -	struct platform_device *pdev = armpmu->plat_device; - -	return pdev ? dev_get_platdata(&pdev->dev) : NULL; -} -  static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)  {  	struct arm_pmu *armpmu; -	struct arm_pmu_platdata *plat;  	int ret;  	u64 start_clock, finish_clock; @@ -341,14 +335,11 @@ static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)  	 * dereference.  	 */  	armpmu = *(void **)dev; - -	plat = armpmu_get_platdata(armpmu); +	if (WARN_ON_ONCE(!armpmu)) +		return IRQ_NONE;  	start_clock = sched_clock(); -	if (plat && plat->handle_irq) -		ret = plat->handle_irq(irq, armpmu, armpmu->handle_irq); -	else -		ret = armpmu->handle_irq(irq, armpmu); +	ret = armpmu->handle_irq(irq, armpmu);  	finish_clock = sched_clock();  	perf_sample_event_took(finish_clock - start_clock); @@ -531,54 +522,41 @@ int perf_num_counters(void)  }  EXPORT_SYMBOL_GPL(perf_num_counters); -void armpmu_free_irq(struct arm_pmu *armpmu, int cpu) +static int armpmu_count_irq_users(const int irq)  { -	struct pmu_hw_events __percpu *hw_events = armpmu->hw_events; -	int irq = per_cpu(hw_events->irq, cpu); +	int cpu, count = 0; -	if (!cpumask_test_and_clear_cpu(cpu, &armpmu->active_irqs)) -		return; - -	if (irq_is_percpu_devid(irq)) { -		free_percpu_irq(irq, &hw_events->percpu_pmu); -		cpumask_clear(&armpmu->active_irqs); -		return; +	for_each_possible_cpu(cpu) { +		if (per_cpu(cpu_irq, cpu) == irq) +			count++;  	} -	free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu)); +	return count;  } -void armpmu_free_irqs(struct arm_pmu *armpmu) +void armpmu_free_irq(int irq, int cpu)  { -	int cpu; +	if (per_cpu(cpu_irq, cpu) == 0) +		return; +	if (WARN_ON(irq != per_cpu(cpu_irq, cpu))) +		return; + +	if (!irq_is_percpu_devid(irq)) +		free_irq(irq, per_cpu_ptr(&cpu_armpmu, cpu)); +	else if (armpmu_count_irq_users(irq) == 1) +		free_percpu_irq(irq, &cpu_armpmu); -	for_each_cpu(cpu, &armpmu->supported_cpus) -		armpmu_free_irq(armpmu, cpu); +	per_cpu(cpu_irq, cpu) = 0;  } -int armpmu_request_irq(struct arm_pmu *armpmu, int cpu) +int armpmu_request_irq(int irq, int cpu)  {  	int err = 0; -	struct pmu_hw_events __percpu *hw_events = armpmu->hw_events;  	const irq_handler_t handler = armpmu_dispatch_irq; -	int irq = per_cpu(hw_events->irq, cpu);  	if (!irq)  		return 0; -	if (irq_is_percpu_devid(irq) && cpumask_empty(&armpmu->active_irqs)) { -		err = request_percpu_irq(irq, handler, "arm-pmu", -					 &hw_events->percpu_pmu); -	} else if (irq_is_percpu_devid(irq)) { -		int other_cpu = cpumask_first(&armpmu->active_irqs); -		int other_irq = per_cpu(hw_events->irq, other_cpu); - -		if (irq != other_irq) { -			pr_warn("mismatched PPIs detected.\n"); -			err = -EINVAL; -			goto err_out; -		} -	} else { -		struct arm_pmu_platdata *platdata = armpmu_get_platdata(armpmu); +	if (!irq_is_percpu_devid(irq)) {  		unsigned long irq_flags;  		err = irq_force_affinity(irq, cpumask_of(cpu)); @@ -589,22 +567,22 @@ int armpmu_request_irq(struct arm_pmu *armpmu, int cpu)  			goto err_out;  		} -		if (platdata && platdata->irq_flags) { -			irq_flags = platdata->irq_flags; -		} else { -			irq_flags = IRQF_PERCPU | -				    IRQF_NOBALANCING | -				    IRQF_NO_THREAD; -		} +		irq_flags = IRQF_PERCPU | +			    IRQF_NOBALANCING | +			    IRQF_NO_THREAD; +		irq_set_status_flags(irq, IRQ_NOAUTOEN);  		err = request_irq(irq, handler, irq_flags, "arm-pmu", -				  per_cpu_ptr(&hw_events->percpu_pmu, cpu)); +				  per_cpu_ptr(&cpu_armpmu, cpu)); +	} else if (armpmu_count_irq_users(irq) == 0) { +		err = request_percpu_irq(irq, handler, "arm-pmu", +					 &cpu_armpmu);  	}  	if (err)  		goto err_out; -	cpumask_set_cpu(cpu, &armpmu->active_irqs); +	per_cpu(cpu_irq, cpu) = irq;  	return 0;  err_out: @@ -612,19 +590,6 @@ err_out:  	return err;  } -int armpmu_request_irqs(struct arm_pmu *armpmu) -{ -	int cpu, err; - -	for_each_cpu(cpu, &armpmu->supported_cpus) { -		err = armpmu_request_irq(armpmu, cpu); -		if (err) -			break; -	} - -	return err; -} -  static int armpmu_get_cpu_irq(struct arm_pmu *pmu, int cpu)  {  	struct pmu_hw_events __percpu *hw_events = pmu->hw_events; @@ -647,12 +612,14 @@ static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node)  	if (pmu->reset)  		pmu->reset(pmu); +	per_cpu(cpu_armpmu, cpu) = pmu; +  	irq = armpmu_get_cpu_irq(pmu, cpu);  	if (irq) { -		if (irq_is_percpu_devid(irq)) { +		if (irq_is_percpu_devid(irq))  			enable_percpu_irq(irq, IRQ_TYPE_NONE); -			return 0; -		} +		else +			enable_irq(irq);  	}  	return 0; @@ -667,8 +634,14 @@ static int arm_perf_teardown_cpu(unsigned int cpu, struct hlist_node *node)  		return 0;  	irq = armpmu_get_cpu_irq(pmu, cpu); -	if (irq && irq_is_percpu_devid(irq)) -		disable_percpu_irq(irq); +	if (irq) { +		if (irq_is_percpu_devid(irq)) +			disable_percpu_irq(irq); +		else +			disable_irq_nosync(irq); +	} + +	per_cpu(cpu_armpmu, cpu) = NULL;  	return 0;  } @@ -800,18 +773,18 @@ static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)  					    &cpu_pmu->node);  } -struct arm_pmu *armpmu_alloc(void) +static struct arm_pmu *__armpmu_alloc(gfp_t flags)  {  	struct arm_pmu *pmu;  	int cpu; -	pmu = kzalloc(sizeof(*pmu), GFP_KERNEL); +	pmu = kzalloc(sizeof(*pmu), flags);  	if (!pmu) {  		pr_info("failed to allocate PMU device!\n");  		goto out;  	} -	pmu->hw_events = alloc_percpu(struct pmu_hw_events); +	pmu->hw_events = alloc_percpu_gfp(struct pmu_hw_events, flags);  	if (!pmu->hw_events) {  		pr_info("failed to allocate per-cpu PMU data.\n");  		goto out_free_pmu; @@ -857,6 +830,17 @@ out:  	return NULL;  } +struct arm_pmu *armpmu_alloc(void) +{ +	return __armpmu_alloc(GFP_KERNEL); +} + +struct arm_pmu *armpmu_alloc_atomic(void) +{ +	return __armpmu_alloc(GFP_ATOMIC); +} + +  void armpmu_free(struct arm_pmu *pmu)  {  	free_percpu(pmu->hw_events); diff --git a/drivers/perf/arm_pmu_acpi.c b/drivers/perf/arm_pmu_acpi.c index 705f1a390e31..0f197516d708 100644 --- a/drivers/perf/arm_pmu_acpi.c +++ b/drivers/perf/arm_pmu_acpi.c @@ -11,6 +11,8 @@  #include <linux/acpi.h>  #include <linux/cpumask.h>  #include <linux/init.h> +#include <linux/irq.h> +#include <linux/irqdesc.h>  #include <linux/percpu.h>  #include <linux/perf/arm_pmu.h> @@ -87,7 +89,13 @@ static int arm_pmu_acpi_parse_irqs(void)  			pr_warn("No ACPI PMU IRQ for CPU%d\n", cpu);  		} +		/* +		 * Log and request the IRQ so the core arm_pmu code can manage +		 * it. We'll have to sanity-check IRQs later when we associate +		 * them with their PMUs. +		 */  		per_cpu(pmu_irqs, cpu) = irq; +		armpmu_request_irq(irq, cpu);  	}  	return 0; @@ -127,7 +135,7 @@ static struct arm_pmu *arm_pmu_acpi_find_alloc_pmu(void)  		return pmu;  	} -	pmu = armpmu_alloc(); +	pmu = armpmu_alloc_atomic();  	if (!pmu) {  		pr_warn("Unable to allocate PMU for CPU%d\n",  			smp_processor_id()); @@ -140,6 +148,35 @@ static struct arm_pmu *arm_pmu_acpi_find_alloc_pmu(void)  }  /* + * Check whether the new IRQ is compatible with those already associated with + * the PMU (e.g. we don't have mismatched PPIs). + */ +static bool pmu_irq_matches(struct arm_pmu *pmu, int irq) +{ +	struct pmu_hw_events __percpu *hw_events = pmu->hw_events; +	int cpu; + +	if (!irq) +		return true; + +	for_each_cpu(cpu, &pmu->supported_cpus) { +		int other_irq = per_cpu(hw_events->irq, cpu); +		if (!other_irq) +			continue; + +		if (irq == other_irq) +			continue; +		if (!irq_is_percpu_devid(irq) && !irq_is_percpu_devid(other_irq)) +			continue; + +		pr_warn("mismatched PPIs detected\n"); +		return false; +	} + +	return true; +} + +/*   * This must run before the common arm_pmu hotplug logic, so that we can   * associate a CPU and its interrupt before the common code tries to manage the   * affinity and so on. @@ -164,19 +201,14 @@ static int arm_pmu_acpi_cpu_starting(unsigned int cpu)  	if (!pmu)  		return -ENOMEM; -	cpumask_set_cpu(cpu, &pmu->supported_cpus); -  	per_cpu(probed_pmus, cpu) = pmu; -	/* -	 * Log and request the IRQ so the core arm_pmu code can manage it.  In -	 * some situations (e.g. mismatched PPIs), we may fail to request the -	 * IRQ. However, it may be too late for us to do anything about it. -	 * The common ARM PMU code will log a warning in this case. -	 */ -	hw_events = pmu->hw_events; -	per_cpu(hw_events->irq, cpu) = irq; -	armpmu_request_irq(pmu, cpu); +	if (pmu_irq_matches(pmu, irq)) { +		hw_events = pmu->hw_events; +		per_cpu(hw_events->irq, cpu) = irq; +	} + +	cpumask_set_cpu(cpu, &pmu->supported_cpus);  	/*  	 * Ideally, we'd probe the PMU here when we find the first matching @@ -247,11 +279,6 @@ static int arm_pmu_acpi_init(void)  	if (acpi_disabled)  		return 0; -	/* -	 * We can't request IRQs yet, since we don't know the cookie value -	 * until we know which CPUs share the same logical PMU. We'll handle -	 * that in arm_pmu_acpi_cpu_starting(). -	 */  	ret = arm_pmu_acpi_parse_irqs();  	if (ret)  		return ret; diff --git a/drivers/perf/arm_pmu_platform.c b/drivers/perf/arm_pmu_platform.c index 46501cc79fd7..7729eda5909d 100644 --- a/drivers/perf/arm_pmu_platform.c +++ b/drivers/perf/arm_pmu_platform.c @@ -127,13 +127,6 @@ static int pmu_parse_irqs(struct arm_pmu *pmu)  			pdev->dev.of_node);  	} -	/* -	 * Some platforms have all PMU IRQs OR'd into a single IRQ, with a -	 * special platdata function that attempts to demux them. -	 */ -	if (dev_get_platdata(&pdev->dev)) -		cpumask_setall(&pmu->supported_cpus); -  	for (i = 0; i < num_irqs; i++) {  		int cpu, irq; @@ -164,6 +157,36 @@ static int pmu_parse_irqs(struct arm_pmu *pmu)  	return 0;  } +static int armpmu_request_irqs(struct arm_pmu *armpmu) +{ +	struct pmu_hw_events __percpu *hw_events = armpmu->hw_events; +	int cpu, err; + +	for_each_cpu(cpu, &armpmu->supported_cpus) { +		int irq = per_cpu(hw_events->irq, cpu); +		if (!irq) +			continue; + +		err = armpmu_request_irq(irq, cpu); +		if (err) +			break; +	} + +	return err; +} + +static void armpmu_free_irqs(struct arm_pmu *armpmu) +{ +	int cpu; +	struct pmu_hw_events __percpu *hw_events = armpmu->hw_events; + +	for_each_cpu(cpu, &armpmu->supported_cpus) { +		int irq = per_cpu(hw_events->irq, cpu); + +		armpmu_free_irq(irq, cpu); +	} +} +  int arm_pmu_device_probe(struct platform_device *pdev,  			 const struct of_device_id *of_table,  			 const struct pmu_probe_info *probe_table) diff --git a/drivers/phy/qualcomm/phy-qcom-ufs.c b/drivers/phy/qualcomm/phy-qcom-ufs.c index c5ff4525edef..c5493ea51282 100644 --- a/drivers/phy/qualcomm/phy-qcom-ufs.c +++ b/drivers/phy/qualcomm/phy-qcom-ufs.c @@ -675,3 +675,8 @@ int ufs_qcom_phy_power_off(struct phy *generic_phy)  	return 0;  }  EXPORT_SYMBOL_GPL(ufs_qcom_phy_power_off); + +MODULE_AUTHOR("Yaniv Gardi <ygardi@codeaurora.org>"); +MODULE_AUTHOR("Vivek Gautam <vivek.gautam@codeaurora.org>"); +MODULE_DESCRIPTION("Universal Flash Storage (UFS) QCOM PHY"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pinctrl/meson/pinctrl-meson-axg.c b/drivers/pinctrl/meson/pinctrl-meson-axg.c index 1fda9d6c7ea3..4b91ff74779b 100644 --- a/drivers/pinctrl/meson/pinctrl-meson-axg.c +++ b/drivers/pinctrl/meson/pinctrl-meson-axg.c @@ -716,7 +716,7 @@ static const char * const uart_b_groups[] = {  	"uart_tx_b_x", "uart_rx_b_x", "uart_cts_b_x", "uart_rts_b_x",  }; -static const char * const uart_ao_b_gpioz_groups[] = { +static const char * const uart_ao_b_z_groups[] = {  	"uart_ao_tx_b_z", "uart_ao_rx_b_z",  	"uart_ao_cts_b_z", "uart_ao_rts_b_z",  }; @@ -855,7 +855,7 @@ static struct meson_pmx_func meson_axg_periphs_functions[] = {  	FUNCTION(nand),  	FUNCTION(uart_a),  	FUNCTION(uart_b), -	FUNCTION(uart_ao_b_gpioz), +	FUNCTION(uart_ao_b_z),  	FUNCTION(i2c0),  	FUNCTION(i2c1),  	FUNCTION(i2c2), diff --git a/drivers/pinctrl/samsung/pinctrl-exynos-arm.c b/drivers/pinctrl/samsung/pinctrl-exynos-arm.c index c32399faff57..90c274490181 100644 --- a/drivers/pinctrl/samsung/pinctrl-exynos-arm.c +++ b/drivers/pinctrl/samsung/pinctrl-exynos-arm.c @@ -124,7 +124,7 @@ static const struct samsung_pin_bank_data s5pv210_pin_bank[] __initconst = {  	EXYNOS_PIN_BANK_EINTW(8, 0xc60, "gph3", 0x0c),  }; -const struct samsung_pin_ctrl s5pv210_pin_ctrl[] __initconst = { +static const struct samsung_pin_ctrl s5pv210_pin_ctrl[] __initconst = {  	{  		/* pin-controller instance 0 data */  		.pin_banks	= s5pv210_pin_bank, @@ -137,6 +137,11 @@ const struct samsung_pin_ctrl s5pv210_pin_ctrl[] __initconst = {  	},  }; +const struct samsung_pinctrl_of_match_data s5pv210_of_data __initconst = { +	.ctrl		= s5pv210_pin_ctrl, +	.num_ctrl	= ARRAY_SIZE(s5pv210_pin_ctrl), +}; +  /* Pad retention control code for accessing PMU regmap */  static atomic_t exynos_shared_retention_refcnt; @@ -199,7 +204,7 @@ static const struct samsung_retention_data exynos3250_retention_data __initconst   * Samsung pinctrl driver data for Exynos3250 SoC. Exynos3250 SoC includes   * two gpio/pin-mux/pinconfig controllers.   */ -const struct samsung_pin_ctrl exynos3250_pin_ctrl[] __initconst = { +static const struct samsung_pin_ctrl exynos3250_pin_ctrl[] __initconst = {  	{  		/* pin-controller instance 0 data */  		.pin_banks	= exynos3250_pin_banks0, @@ -220,6 +225,11 @@ const struct samsung_pin_ctrl exynos3250_pin_ctrl[] __initconst = {  	},  }; +const struct samsung_pinctrl_of_match_data exynos3250_of_data __initconst = { +	.ctrl		= exynos3250_pin_ctrl, +	.num_ctrl	= ARRAY_SIZE(exynos3250_pin_ctrl), +}; +  /* pin banks of exynos4210 pin-controller 0 */  static const struct samsung_pin_bank_data exynos4210_pin_banks0[] __initconst = {  	EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00), @@ -303,7 +313,7 @@ static const struct samsung_retention_data exynos4_audio_retention_data __initco   * Samsung pinctrl driver data for Exynos4210 SoC. Exynos4210 SoC includes   * three gpio/pin-mux/pinconfig controllers.   */ -const struct samsung_pin_ctrl exynos4210_pin_ctrl[] __initconst = { +static const struct samsung_pin_ctrl exynos4210_pin_ctrl[] __initconst = {  	{  		/* pin-controller instance 0 data */  		.pin_banks	= exynos4210_pin_banks0, @@ -329,6 +339,11 @@ const struct samsung_pin_ctrl exynos4210_pin_ctrl[] __initconst = {  	},  }; +const struct samsung_pinctrl_of_match_data exynos4210_of_data __initconst = { +	.ctrl		= exynos4210_pin_ctrl, +	.num_ctrl	= ARRAY_SIZE(exynos4210_pin_ctrl), +}; +  /* pin banks of exynos4x12 pin-controller 0 */  static const struct samsung_pin_bank_data exynos4x12_pin_banks0[] __initconst = {  	EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00), @@ -391,7 +406,7 @@ static const struct samsung_pin_bank_data exynos4x12_pin_banks3[] __initconst =   * Samsung pinctrl driver data for Exynos4x12 SoC. Exynos4x12 SoC includes   * four gpio/pin-mux/pinconfig controllers.   */ -const struct samsung_pin_ctrl exynos4x12_pin_ctrl[] __initconst = { +static const struct samsung_pin_ctrl exynos4x12_pin_ctrl[] __initconst = {  	{  		/* pin-controller instance 0 data */  		.pin_banks	= exynos4x12_pin_banks0, @@ -427,6 +442,11 @@ const struct samsung_pin_ctrl exynos4x12_pin_ctrl[] __initconst = {  	},  }; +const struct samsung_pinctrl_of_match_data exynos4x12_of_data __initconst = { +	.ctrl		= exynos4x12_pin_ctrl, +	.num_ctrl	= ARRAY_SIZE(exynos4x12_pin_ctrl), +}; +  /* pin banks of exynos5250 pin-controller 0 */  static const struct samsung_pin_bank_data exynos5250_pin_banks0[] __initconst = {  	EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00), @@ -487,7 +507,7 @@ static const struct samsung_pin_bank_data exynos5250_pin_banks3[] __initconst =   * Samsung pinctrl driver data for Exynos5250 SoC. Exynos5250 SoC includes   * four gpio/pin-mux/pinconfig controllers.   */ -const struct samsung_pin_ctrl exynos5250_pin_ctrl[] __initconst = { +static const struct samsung_pin_ctrl exynos5250_pin_ctrl[] __initconst = {  	{  		/* pin-controller instance 0 data */  		.pin_banks	= exynos5250_pin_banks0, @@ -523,6 +543,11 @@ const struct samsung_pin_ctrl exynos5250_pin_ctrl[] __initconst = {  	},  }; +const struct samsung_pinctrl_of_match_data exynos5250_of_data __initconst = { +	.ctrl		= exynos5250_pin_ctrl, +	.num_ctrl	= ARRAY_SIZE(exynos5250_pin_ctrl), +}; +  /* pin banks of exynos5260 pin-controller 0 */  static const struct samsung_pin_bank_data exynos5260_pin_banks0[] __initconst = {  	EXYNOS_PIN_BANK_EINTG(4, 0x000, "gpa0", 0x00), @@ -567,7 +592,7 @@ static const struct samsung_pin_bank_data exynos5260_pin_banks2[] __initconst =   * Samsung pinctrl driver data for Exynos5260 SoC. Exynos5260 SoC includes   * three gpio/pin-mux/pinconfig controllers.   */ -const struct samsung_pin_ctrl exynos5260_pin_ctrl[] __initconst = { +static const struct samsung_pin_ctrl exynos5260_pin_ctrl[] __initconst = {  	{  		/* pin-controller instance 0 data */  		.pin_banks	= exynos5260_pin_banks0, @@ -587,6 +612,11 @@ const struct samsung_pin_ctrl exynos5260_pin_ctrl[] __initconst = {  	},  }; +const struct samsung_pinctrl_of_match_data exynos5260_of_data __initconst = { +	.ctrl		= exynos5260_pin_ctrl, +	.num_ctrl	= ARRAY_SIZE(exynos5260_pin_ctrl), +}; +  /* pin banks of exynos5410 pin-controller 0 */  static const struct samsung_pin_bank_data exynos5410_pin_banks0[] __initconst = {  	EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00), @@ -657,7 +687,7 @@ static const struct samsung_pin_bank_data exynos5410_pin_banks3[] __initconst =   * Samsung pinctrl driver data for Exynos5410 SoC. Exynos5410 SoC includes   * four gpio/pin-mux/pinconfig controllers.   */ -const struct samsung_pin_ctrl exynos5410_pin_ctrl[] __initconst = { +static const struct samsung_pin_ctrl exynos5410_pin_ctrl[] __initconst = {  	{  		/* pin-controller instance 0 data */  		.pin_banks	= exynos5410_pin_banks0, @@ -690,6 +720,11 @@ const struct samsung_pin_ctrl exynos5410_pin_ctrl[] __initconst = {  	},  }; +const struct samsung_pinctrl_of_match_data exynos5410_of_data __initconst = { +	.ctrl		= exynos5410_pin_ctrl, +	.num_ctrl	= ARRAY_SIZE(exynos5410_pin_ctrl), +}; +  /* pin banks of exynos5420 pin-controller 0 */  static const struct samsung_pin_bank_data exynos5420_pin_banks0[] __initconst = {  	EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpy7", 0x00), @@ -774,7 +809,7 @@ static const struct samsung_retention_data exynos5420_retention_data __initconst   * Samsung pinctrl driver data for Exynos5420 SoC. Exynos5420 SoC includes   * four gpio/pin-mux/pinconfig controllers.   */ -const struct samsung_pin_ctrl exynos5420_pin_ctrl[] __initconst = { +static const struct samsung_pin_ctrl exynos5420_pin_ctrl[] __initconst = {  	{  		/* pin-controller instance 0 data */  		.pin_banks	= exynos5420_pin_banks0, @@ -808,3 +843,8 @@ const struct samsung_pin_ctrl exynos5420_pin_ctrl[] __initconst = {  		.retention_data	= &exynos4_audio_retention_data,  	},  }; + +const struct samsung_pinctrl_of_match_data exynos5420_of_data __initconst = { +	.ctrl		= exynos5420_pin_ctrl, +	.num_ctrl	= ARRAY_SIZE(exynos5420_pin_ctrl), +}; diff --git a/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c b/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c index fc8f7833bec0..71c9d1d9f345 100644 --- a/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c +++ b/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c @@ -175,7 +175,7 @@ static const struct samsung_retention_data exynos5433_fsys_retention_data __init   * Samsung pinctrl driver data for Exynos5433 SoC. Exynos5433 SoC includes   * ten gpio/pin-mux/pinconfig controllers.   */ -const struct samsung_pin_ctrl exynos5433_pin_ctrl[] __initconst = { +static const struct samsung_pin_ctrl exynos5433_pin_ctrl[] __initconst = {  	{  		/* pin-controller instance 0 data */  		.pin_banks	= exynos5433_pin_banks0, @@ -260,6 +260,11 @@ const struct samsung_pin_ctrl exynos5433_pin_ctrl[] __initconst = {  	},  }; +const struct samsung_pinctrl_of_match_data exynos5433_of_data __initconst = { +	.ctrl		= exynos5433_pin_ctrl, +	.num_ctrl	= ARRAY_SIZE(exynos5433_pin_ctrl), +}; +  /* pin banks of exynos7 pin-controller - ALIVE */  static const struct samsung_pin_bank_data exynos7_pin_banks0[] __initconst = {  	EXYNOS_PIN_BANK_EINTW(8, 0x000, "gpa0", 0x00), @@ -339,7 +344,7 @@ static const struct samsung_pin_bank_data exynos7_pin_banks9[] __initconst = {  	EXYNOS_PIN_BANK_EINTG(4, 0x020, "gpz1", 0x04),  }; -const struct samsung_pin_ctrl exynos7_pin_ctrl[] __initconst = { +static const struct samsung_pin_ctrl exynos7_pin_ctrl[] __initconst = {  	{  		/* pin-controller instance 0 Alive data */  		.pin_banks	= exynos7_pin_banks0, @@ -392,3 +397,8 @@ const struct samsung_pin_ctrl exynos7_pin_ctrl[] __initconst = {  		.eint_gpio_init = exynos_eint_gpio_init,  	},  }; + +const struct samsung_pinctrl_of_match_data exynos7_of_data __initconst = { +	.ctrl		= exynos7_pin_ctrl, +	.num_ctrl	= ARRAY_SIZE(exynos7_pin_ctrl), +}; diff --git a/drivers/pinctrl/samsung/pinctrl-s3c24xx.c b/drivers/pinctrl/samsung/pinctrl-s3c24xx.c index 10187cb0e9b9..7e824e4d20f4 100644 --- a/drivers/pinctrl/samsung/pinctrl-s3c24xx.c +++ b/drivers/pinctrl/samsung/pinctrl-s3c24xx.c @@ -565,7 +565,7 @@ static const struct samsung_pin_bank_data s3c2412_pin_banks[] __initconst = {  	PIN_BANK_2BIT(13, 0x080, "gpj"),  }; -const struct samsung_pin_ctrl s3c2412_pin_ctrl[] __initconst = { +static const struct samsung_pin_ctrl s3c2412_pin_ctrl[] __initconst = {  	{  		.pin_banks	= s3c2412_pin_banks,  		.nr_banks	= ARRAY_SIZE(s3c2412_pin_banks), @@ -573,6 +573,11 @@ const struct samsung_pin_ctrl s3c2412_pin_ctrl[] __initconst = {  	},  }; +const struct samsung_pinctrl_of_match_data s3c2412_of_data __initconst = { +	.ctrl		= s3c2412_pin_ctrl, +	.num_ctrl	= ARRAY_SIZE(s3c2412_pin_ctrl), +}; +  static const struct samsung_pin_bank_data s3c2416_pin_banks[] __initconst = {  	PIN_BANK_A(27, 0x000, "gpa"),  	PIN_BANK_2BIT(11, 0x010, "gpb"), @@ -587,7 +592,7 @@ static const struct samsung_pin_bank_data s3c2416_pin_banks[] __initconst = {  	PIN_BANK_2BIT(2, 0x100, "gpm"),  }; -const struct samsung_pin_ctrl s3c2416_pin_ctrl[] __initconst = { +static const struct samsung_pin_ctrl s3c2416_pin_ctrl[] __initconst = {  	{  		.pin_banks	= s3c2416_pin_banks,  		.nr_banks	= ARRAY_SIZE(s3c2416_pin_banks), @@ -595,6 +600,11 @@ const struct samsung_pin_ctrl s3c2416_pin_ctrl[] __initconst = {  	},  }; +const struct samsung_pinctrl_of_match_data s3c2416_of_data __initconst = { +	.ctrl		= s3c2416_pin_ctrl, +	.num_ctrl	= ARRAY_SIZE(s3c2416_pin_ctrl), +}; +  static const struct samsung_pin_bank_data s3c2440_pin_banks[] __initconst = {  	PIN_BANK_A(25, 0x000, "gpa"),  	PIN_BANK_2BIT(11, 0x010, "gpb"), @@ -607,7 +617,7 @@ static const struct samsung_pin_bank_data s3c2440_pin_banks[] __initconst = {  	PIN_BANK_2BIT(13, 0x0d0, "gpj"),  }; -const struct samsung_pin_ctrl s3c2440_pin_ctrl[] __initconst = { +static const struct samsung_pin_ctrl s3c2440_pin_ctrl[] __initconst = {  	{  		.pin_banks	= s3c2440_pin_banks,  		.nr_banks	= ARRAY_SIZE(s3c2440_pin_banks), @@ -615,6 +625,11 @@ const struct samsung_pin_ctrl s3c2440_pin_ctrl[] __initconst = {  	},  }; +const struct samsung_pinctrl_of_match_data s3c2440_of_data __initconst = { +	.ctrl		= s3c2440_pin_ctrl, +	.num_ctrl	= ARRAY_SIZE(s3c2440_pin_ctrl), +}; +  static const struct samsung_pin_bank_data s3c2450_pin_banks[] __initconst = {  	PIN_BANK_A(28, 0x000, "gpa"),  	PIN_BANK_2BIT(11, 0x010, "gpb"), @@ -630,10 +645,15 @@ static const struct samsung_pin_bank_data s3c2450_pin_banks[] __initconst = {  	PIN_BANK_2BIT(2, 0x100, "gpm"),  }; -const struct samsung_pin_ctrl s3c2450_pin_ctrl[] __initconst = { +static const struct samsung_pin_ctrl s3c2450_pin_ctrl[] __initconst = {  	{  		.pin_banks	= s3c2450_pin_banks,  		.nr_banks	= ARRAY_SIZE(s3c2450_pin_banks),  		.eint_wkup_init = s3c24xx_eint_init,  	},  }; + +const struct samsung_pinctrl_of_match_data s3c2450_of_data __initconst = { +	.ctrl		= s3c2450_pin_ctrl, +	.num_ctrl	= ARRAY_SIZE(s3c2450_pin_ctrl), +}; diff --git a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c index 679628ac4b31..288e6567ceb1 100644 --- a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c +++ b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c @@ -789,7 +789,7 @@ static const struct samsung_pin_bank_data s3c64xx_pin_banks0[] __initconst = {   * Samsung pinctrl driver data for S3C64xx SoC. S3C64xx SoC includes   * one gpio/pin-mux/pinconfig controller.   */ -const struct samsung_pin_ctrl s3c64xx_pin_ctrl[] __initconst = { +static const struct samsung_pin_ctrl s3c64xx_pin_ctrl[] __initconst = {  	{  		/* pin-controller instance 1 data */  		.pin_banks	= s3c64xx_pin_banks0, @@ -798,3 +798,8 @@ const struct samsung_pin_ctrl s3c64xx_pin_ctrl[] __initconst = {  		.eint_wkup_init = s3c64xx_eint_eint0_init,  	},  }; + +const struct samsung_pinctrl_of_match_data s3c64xx_of_data __initconst = { +	.ctrl		= s3c64xx_pin_ctrl, +	.num_ctrl	= ARRAY_SIZE(s3c64xx_pin_ctrl), +}; diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c index da58e4554137..336e88d7bdb9 100644 --- a/drivers/pinctrl/samsung/pinctrl-samsung.c +++ b/drivers/pinctrl/samsung/pinctrl-samsung.c @@ -942,12 +942,33 @@ static int samsung_gpiolib_register(struct platform_device *pdev,  	return 0;  } +static const struct samsung_pin_ctrl * +samsung_pinctrl_get_soc_data_for_of_alias(struct platform_device *pdev) +{ +	struct device_node *node = pdev->dev.of_node; +	const struct samsung_pinctrl_of_match_data *of_data; +	int id; + +	id = of_alias_get_id(node, "pinctrl"); +	if (id < 0) { +		dev_err(&pdev->dev, "failed to get alias id\n"); +		return NULL; +	} + +	of_data = of_device_get_match_data(&pdev->dev); +	if (id >= of_data->num_ctrl) { +		dev_err(&pdev->dev, "invalid alias id %d\n", id); +		return NULL; +	} + +	return &(of_data->ctrl[id]); +} +  /* retrieve the soc specific data */  static const struct samsung_pin_ctrl *  samsung_pinctrl_get_soc_data(struct samsung_pinctrl_drv_data *d,  			     struct platform_device *pdev)  { -	int id;  	struct device_node *node = pdev->dev.of_node;  	struct device_node *np;  	const struct samsung_pin_bank_data *bdata; @@ -957,13 +978,9 @@ samsung_pinctrl_get_soc_data(struct samsung_pinctrl_drv_data *d,  	void __iomem *virt_base[SAMSUNG_PINCTRL_NUM_RESOURCES];  	unsigned int i; -	id = of_alias_get_id(node, "pinctrl"); -	if (id < 0) { -		dev_err(&pdev->dev, "failed to get alias id\n"); +	ctrl = samsung_pinctrl_get_soc_data_for_of_alias(pdev); +	if (!ctrl)  		return ERR_PTR(-ENOENT); -	} -	ctrl = of_device_get_match_data(&pdev->dev); -	ctrl += id;  	d->suspend = ctrl->suspend;  	d->resume = ctrl->resume; @@ -1188,41 +1205,41 @@ static int __maybe_unused samsung_pinctrl_resume(struct device *dev)  static const struct of_device_id samsung_pinctrl_dt_match[] = {  #ifdef CONFIG_PINCTRL_EXYNOS_ARM  	{ .compatible = "samsung,exynos3250-pinctrl", -		.data = exynos3250_pin_ctrl }, +		.data = &exynos3250_of_data },  	{ .compatible = "samsung,exynos4210-pinctrl", -		.data = exynos4210_pin_ctrl }, +		.data = &exynos4210_of_data },  	{ .compatible = "samsung,exynos4x12-pinctrl", -		.data = exynos4x12_pin_ctrl }, +		.data = &exynos4x12_of_data },  	{ .compatible = "samsung,exynos5250-pinctrl", -		.data = exynos5250_pin_ctrl }, +		.data = &exynos5250_of_data },  	{ .compatible = "samsung,exynos5260-pinctrl", -		.data = exynos5260_pin_ctrl }, +		.data = &exynos5260_of_data },  	{ .compatible = "samsung,exynos5410-pinctrl", -		.data = exynos5410_pin_ctrl }, +		.data = &exynos5410_of_data },  	{ .compatible = "samsung,exynos5420-pinctrl", -		.data = exynos5420_pin_ctrl }, +		.data = &exynos5420_of_data },  	{ .compatible = "samsung,s5pv210-pinctrl", -		.data = s5pv210_pin_ctrl }, +		.data = &s5pv210_of_data },  #endif  #ifdef CONFIG_PINCTRL_EXYNOS_ARM64  	{ .compatible = "samsung,exynos5433-pinctrl", -		.data = exynos5433_pin_ctrl }, +		.data = &exynos5433_of_data },  	{ .compatible = "samsung,exynos7-pinctrl", -		.data = exynos7_pin_ctrl }, +		.data = &exynos7_of_data },  #endif  #ifdef CONFIG_PINCTRL_S3C64XX  	{ .compatible = "samsung,s3c64xx-pinctrl", -		.data = s3c64xx_pin_ctrl }, +		.data = &s3c64xx_of_data },  #endif  #ifdef CONFIG_PINCTRL_S3C24XX  	{ .compatible = "samsung,s3c2412-pinctrl", -		.data = s3c2412_pin_ctrl }, +		.data = &s3c2412_of_data },  	{ .compatible = "samsung,s3c2416-pinctrl", -		.data = s3c2416_pin_ctrl }, +		.data = &s3c2416_of_data },  	{ .compatible = "samsung,s3c2440-pinctrl", -		.data = s3c2440_pin_ctrl }, +		.data = &s3c2440_of_data },  	{ .compatible = "samsung,s3c2450-pinctrl", -		.data = s3c2450_pin_ctrl }, +		.data = &s3c2450_of_data },  #endif  	{},  }; diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.h b/drivers/pinctrl/samsung/pinctrl-samsung.h index e204f609823b..f0cda9424dfe 100644 --- a/drivers/pinctrl/samsung/pinctrl-samsung.h +++ b/drivers/pinctrl/samsung/pinctrl-samsung.h @@ -282,6 +282,16 @@ struct samsung_pinctrl_drv_data {  };  /** + * struct samsung_pinctrl_of_match_data: OF match device specific configuration data. + * @ctrl: array of pin controller data. + * @num_ctrl: size of array @ctrl. + */ +struct samsung_pinctrl_of_match_data { +	const struct samsung_pin_ctrl	*ctrl; +	unsigned int			num_ctrl; +}; + +/**   * struct samsung_pin_group: represent group of pins of a pinmux function.   * @name: name of the pin group, used to lookup the group.   * @pins: the pins included in this group. @@ -309,20 +319,20 @@ struct samsung_pmx_func {  };  /* list of all exported SoC specific data */ -extern const struct samsung_pin_ctrl exynos3250_pin_ctrl[]; -extern const struct samsung_pin_ctrl exynos4210_pin_ctrl[]; -extern const struct samsung_pin_ctrl exynos4x12_pin_ctrl[]; -extern const struct samsung_pin_ctrl exynos5250_pin_ctrl[]; -extern const struct samsung_pin_ctrl exynos5260_pin_ctrl[]; -extern const struct samsung_pin_ctrl exynos5410_pin_ctrl[]; -extern const struct samsung_pin_ctrl exynos5420_pin_ctrl[]; -extern const struct samsung_pin_ctrl exynos5433_pin_ctrl[]; -extern const struct samsung_pin_ctrl exynos7_pin_ctrl[]; -extern const struct samsung_pin_ctrl s3c64xx_pin_ctrl[]; -extern const struct samsung_pin_ctrl s3c2412_pin_ctrl[]; -extern const struct samsung_pin_ctrl s3c2416_pin_ctrl[]; -extern const struct samsung_pin_ctrl s3c2440_pin_ctrl[]; -extern const struct samsung_pin_ctrl s3c2450_pin_ctrl[]; -extern const struct samsung_pin_ctrl s5pv210_pin_ctrl[]; +extern const struct samsung_pinctrl_of_match_data exynos3250_of_data; +extern const struct samsung_pinctrl_of_match_data exynos4210_of_data; +extern const struct samsung_pinctrl_of_match_data exynos4x12_of_data; +extern const struct samsung_pinctrl_of_match_data exynos5250_of_data; +extern const struct samsung_pinctrl_of_match_data exynos5260_of_data; +extern const struct samsung_pinctrl_of_match_data exynos5410_of_data; +extern const struct samsung_pinctrl_of_match_data exynos5420_of_data; +extern const struct samsung_pinctrl_of_match_data exynos5433_of_data; +extern const struct samsung_pinctrl_of_match_data exynos7_of_data; +extern const struct samsung_pinctrl_of_match_data s3c64xx_of_data; +extern const struct samsung_pinctrl_of_match_data s3c2412_of_data; +extern const struct samsung_pinctrl_of_match_data s3c2416_of_data; +extern const struct samsung_pinctrl_of_match_data s3c2440_of_data; +extern const struct samsung_pinctrl_of_match_data s3c2450_of_data; +extern const struct samsung_pinctrl_of_match_data s5pv210_of_data;  #endif /* __PINCTRL_SAMSUNG_H */ diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7795.c b/drivers/pinctrl/sh-pfc/pfc-r8a7795.c index 18aeee592fdc..35951e7b89d2 100644 --- a/drivers/pinctrl/sh-pfc/pfc-r8a7795.c +++ b/drivers/pinctrl/sh-pfc/pfc-r8a7795.c @@ -1538,7 +1538,6 @@ static const struct sh_pfc_pin pinmux_pins[] = {  	SH_PFC_PIN_NAMED_CFG('B', 18, AVB_TD1, CFG_FLAGS),  	SH_PFC_PIN_NAMED_CFG('B', 19, AVB_RXC, CFG_FLAGS),  	SH_PFC_PIN_NAMED_CFG('C',  1, PRESETOUT#, CFG_FLAGS), -	SH_PFC_PIN_NAMED_CFG('F',  1, CLKOUT, CFG_FLAGS),  	SH_PFC_PIN_NAMED_CFG('H', 37, MLB_REF, CFG_FLAGS),  	SH_PFC_PIN_NAMED_CFG('V',  3, QSPI1_SPCLK, CFG_FLAGS),  	SH_PFC_PIN_NAMED_CFG('V',  5, QSPI1_SSL, CFG_FLAGS), diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c index 6dec6ab13300..d8599736a41a 100644 --- a/drivers/platform/chrome/chromeos_laptop.c +++ b/drivers/platform/chrome/chromeos_laptop.c @@ -423,7 +423,7 @@ static int chromeos_laptop_probe(struct platform_device *pdev)  	return ret;  } -static const struct chromeos_laptop samsung_series_5_550 = { +static struct chromeos_laptop samsung_series_5_550 = {  	.i2c_peripherals = {  		/* Touchpad. */  		{ .add = setup_cyapa_tp, I2C_ADAPTER_SMBUS }, @@ -432,14 +432,14 @@ static const struct chromeos_laptop samsung_series_5_550 = {  	},  }; -static const struct chromeos_laptop samsung_series_5 = { +static struct chromeos_laptop samsung_series_5 = {  	.i2c_peripherals = {  		/* Light Sensor. */  		{ .add = setup_tsl2583_als, I2C_ADAPTER_SMBUS },  	},  }; -static const struct chromeos_laptop chromebook_pixel = { +static struct chromeos_laptop chromebook_pixel = {  	.i2c_peripherals = {  		/* Touch Screen. */  		{ .add = setup_atmel_1664s_ts, I2C_ADAPTER_PANEL }, @@ -450,14 +450,14 @@ static const struct chromeos_laptop chromebook_pixel = {  	},  }; -static const struct chromeos_laptop hp_chromebook_14 = { +static struct chromeos_laptop hp_chromebook_14 = {  	.i2c_peripherals = {  		/* Touchpad. */  		{ .add = setup_cyapa_tp, I2C_ADAPTER_DESIGNWARE_0 },  	},  }; -static const struct chromeos_laptop dell_chromebook_11 = { +static struct chromeos_laptop dell_chromebook_11 = {  	.i2c_peripherals = {  		/* Touchpad. */  		{ .add = setup_cyapa_tp, I2C_ADAPTER_DESIGNWARE_0 }, @@ -466,28 +466,28 @@ static const struct chromeos_laptop dell_chromebook_11 = {  	},  }; -static const struct chromeos_laptop toshiba_cb35 = { +static struct chromeos_laptop toshiba_cb35 = {  	.i2c_peripherals = {  		/* Touchpad. */  		{ .add = setup_cyapa_tp, I2C_ADAPTER_DESIGNWARE_0 },  	},  }; -static const struct chromeos_laptop acer_c7_chromebook = { +static struct chromeos_laptop acer_c7_chromebook = {  	.i2c_peripherals = {  		/* Touchpad. */  		{ .add = setup_cyapa_tp, I2C_ADAPTER_SMBUS },  	},  }; -static const struct chromeos_laptop acer_ac700 = { +static struct chromeos_laptop acer_ac700 = {  	.i2c_peripherals = {  		/* Light Sensor. */  		{ .add = setup_tsl2563_als, I2C_ADAPTER_SMBUS },  	},  }; -static const struct chromeos_laptop acer_c720 = { +static struct chromeos_laptop acer_c720 = {  	.i2c_peripherals = {  		/* Touchscreen. */  		{ .add = setup_atmel_1664s_ts, I2C_ADAPTER_DESIGNWARE_1 }, @@ -500,14 +500,14 @@ static const struct chromeos_laptop acer_c720 = {  	},  }; -static const struct chromeos_laptop hp_pavilion_14_chromebook = { +static struct chromeos_laptop hp_pavilion_14_chromebook = {  	.i2c_peripherals = {  		/* Touchpad. */  		{ .add = setup_cyapa_tp, I2C_ADAPTER_SMBUS },  	},  }; -static const struct chromeos_laptop cr48 = { +static struct chromeos_laptop cr48 = {  	.i2c_peripherals = {  		/* Light Sensor. */  		{ .add = setup_tsl2563_als, I2C_ADAPTER_SMBUS }, diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index 9a8f96465cdc..51ebc5a6053f 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -105,31 +105,45 @@ config ASUS_LAPTOP  	  If you have an ACPI-compatible ASUS laptop, say Y or M here. +# +# The DELL_SMBIOS driver depends on ACPI_WMI and/or DCDBAS if those +# backends are selected. The "depends" line prevents a configuration +# where DELL_SMBIOS=y while either of those dependencies =m. +#  config DELL_SMBIOS -	tristate +	tristate "Dell SMBIOS driver" +	depends on DCDBAS || DCDBAS=n +	depends on ACPI_WMI || ACPI_WMI=n +	---help--- +	This provides support for the Dell SMBIOS calling interface. +	If you have a Dell computer you should enable this option. + +	Be sure to select at least one backend for it to work properly.  config DELL_SMBIOS_WMI -	tristate "Dell SMBIOS calling interface (WMI implementation)" +	bool "Dell SMBIOS driver WMI backend" +	default y  	depends on ACPI_WMI  	select DELL_WMI_DESCRIPTOR -	select DELL_SMBIOS +	depends on DELL_SMBIOS  	---help---  	This provides an implementation for the Dell SMBIOS calling interface  	communicated over ACPI-WMI. -	If you have a Dell computer from >2007 you should say Y or M here. +	If you have a Dell computer from >2007 you should say Y here.  	If you aren't sure and this module doesn't work for your computer  	it just won't load.  config DELL_SMBIOS_SMM -	tristate "Dell SMBIOS calling interface (SMM implementation)" +	bool "Dell SMBIOS driver SMM backend" +	default y  	depends on DCDBAS -	select DELL_SMBIOS +	depends on DELL_SMBIOS  	---help---  	This provides an implementation for the Dell SMBIOS calling interface  	communicated over SMI/SMM. -	If you have a Dell computer from <=2017 you should say Y or M here. +	If you have a Dell computer from <=2017 you should say Y here.  	If you aren't sure and this module doesn't work for your computer  	it just won't load. diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile index c388608ad2a3..2ba6cb795338 100644 --- a/drivers/platform/x86/Makefile +++ b/drivers/platform/x86/Makefile @@ -13,8 +13,9 @@ obj-$(CONFIG_MSI_LAPTOP)	+= msi-laptop.o  obj-$(CONFIG_ACPI_CMPC)		+= classmate-laptop.o  obj-$(CONFIG_COMPAL_LAPTOP)	+= compal-laptop.o  obj-$(CONFIG_DELL_SMBIOS)	+= dell-smbios.o -obj-$(CONFIG_DELL_SMBIOS_WMI)	+= dell-smbios-wmi.o -obj-$(CONFIG_DELL_SMBIOS_SMM)	+= dell-smbios-smm.o +dell-smbios-objs		:= dell-smbios-base.o +dell-smbios-$(CONFIG_DELL_SMBIOS_WMI)	+= dell-smbios-wmi.o +dell-smbios-$(CONFIG_DELL_SMBIOS_SMM)	+= dell-smbios-smm.o  obj-$(CONFIG_DELL_LAPTOP)	+= dell-laptop.o  obj-$(CONFIG_DELL_WMI)		+= dell-wmi.o  obj-$(CONFIG_DELL_WMI_DESCRIPTOR)	+= dell-wmi-descriptor.o diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c index 2a68f59d2228..c52c6723374b 100644 --- a/drivers/platform/x86/dell-laptop.c +++ b/drivers/platform/x86/dell-laptop.c @@ -127,24 +127,6 @@ static const struct dmi_system_id dell_device_table[] __initconst = {  		},  	},  	{ -		.matches = { -			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), -			DMI_MATCH(DMI_CHASSIS_TYPE, "30"), /*Tablet*/ -		}, -	}, -	{ -		.matches = { -			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), -			DMI_MATCH(DMI_CHASSIS_TYPE, "31"), /*Convertible*/ -		}, -	}, -	{ -		.matches = { -			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), -			DMI_MATCH(DMI_CHASSIS_TYPE, "32"), /*Detachable*/ -		}, -	}, -	{  		.ident = "Dell Computer Corporation",  		.matches = {  			DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"), @@ -1279,7 +1261,7 @@ static int kbd_get_state(struct kbd_state *state)  	struct calling_interface_buffer buffer;  	int ret; -	dell_fill_request(&buffer, 0, 0, 0, 0); +	dell_fill_request(&buffer, 0x1, 0, 0, 0);  	ret = dell_send_request(&buffer,  				CLASS_KBD_BACKLIGHT, SELECT_KBD_BACKLIGHT);  	if (ret) diff --git a/drivers/platform/x86/dell-smbios.c b/drivers/platform/x86/dell-smbios-base.c index 8541cde4cb7d..2485c80a9fdd 100644 --- a/drivers/platform/x86/dell-smbios.c +++ b/drivers/platform/x86/dell-smbios-base.c @@ -36,7 +36,7 @@ static DEFINE_MUTEX(smbios_mutex);  struct smbios_device {  	struct list_head list;  	struct device *device; -	int (*call_fn)(struct calling_interface_buffer *); +	int (*call_fn)(struct calling_interface_buffer *arg);  };  struct smbios_call { @@ -352,8 +352,10 @@ static void __init parse_da_table(const struct dmi_header *dm)  	struct calling_interface_structure *table =  		container_of(dm, struct calling_interface_structure, header); -	/* 4 bytes of table header, plus 7 bytes of Dell header, plus at least -	   6 bytes of entry */ +	/* +	 * 4 bytes of table header, plus 7 bytes of Dell header +	 * plus at least 6 bytes of entry +	 */  	if (dm->length < 17)  		return; @@ -554,7 +556,7 @@ static void free_group(struct platform_device *pdev)  static int __init dell_smbios_init(void)  {  	const struct dmi_device *valid; -	int ret; +	int ret, wmi, smm;  	valid = dmi_find_device(DMI_DEV_TYPE_OEM_STRING, "Dell System", NULL);  	if (!valid) { @@ -589,8 +591,24 @@ static int __init dell_smbios_init(void)  	if (ret)  		goto fail_create_group; +	/* register backends */ +	wmi = init_dell_smbios_wmi(); +	if (wmi) +		pr_debug("Failed to initialize WMI backend: %d\n", wmi); +	smm = init_dell_smbios_smm(); +	if (smm) +		pr_debug("Failed to initialize SMM backend: %d\n", smm); +	if (wmi && smm) { +		pr_err("No SMBIOS backends available (wmi: %d, smm: %d)\n", +			wmi, smm); +		goto fail_sysfs; +	} +  	return 0; +fail_sysfs: +	free_group(platform_device); +  fail_create_group:  	platform_device_del(platform_device); @@ -607,6 +625,8 @@ fail_platform_driver:  static void __exit dell_smbios_exit(void)  { +	exit_dell_smbios_wmi(); +	exit_dell_smbios_smm();  	mutex_lock(&smbios_mutex);  	if (platform_device) {  		free_group(platform_device); @@ -617,11 +637,12 @@ static void __exit dell_smbios_exit(void)  	mutex_unlock(&smbios_mutex);  } -subsys_initcall(dell_smbios_init); +module_init(dell_smbios_init);  module_exit(dell_smbios_exit);  MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>");  MODULE_AUTHOR("Gabriele Mazzotta <gabriele.mzt@gmail.com>");  MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>"); +MODULE_AUTHOR("Mario Limonciello <mario.limonciello@dell.com>");  MODULE_DESCRIPTION("Common functions for kernel modules using Dell SMBIOS");  MODULE_LICENSE("GPL"); diff --git a/drivers/platform/x86/dell-smbios-smm.c b/drivers/platform/x86/dell-smbios-smm.c index 89f65c4651a0..e9e9da556318 100644 --- a/drivers/platform/x86/dell-smbios-smm.c +++ b/drivers/platform/x86/dell-smbios-smm.c @@ -58,7 +58,7 @@ static const struct dmi_system_id dell_device_table[] __initconst = {  };  MODULE_DEVICE_TABLE(dmi, dell_device_table); -static void __init parse_da_table(const struct dmi_header *dm) +static void parse_da_table(const struct dmi_header *dm)  {  	struct calling_interface_structure *table =  		container_of(dm, struct calling_interface_structure, header); @@ -73,7 +73,7 @@ static void __init parse_da_table(const struct dmi_header *dm)  	da_command_code = table->cmdIOCode;  } -static void __init find_cmd_address(const struct dmi_header *dm, void *dummy) +static void find_cmd_address(const struct dmi_header *dm, void *dummy)  {  	switch (dm->type) {  	case 0xda: /* Calling interface */ @@ -128,7 +128,7 @@ static bool test_wsmt_enabled(void)  	return false;  } -static int __init dell_smbios_smm_init(void) +int init_dell_smbios_smm(void)  {  	int ret;  	/* @@ -176,7 +176,7 @@ fail_platform_device_alloc:  	return ret;  } -static void __exit dell_smbios_smm_exit(void) +void exit_dell_smbios_smm(void)  {  	if (platform_device) {  		dell_smbios_unregister_device(&platform_device->dev); @@ -184,13 +184,3 @@ static void __exit dell_smbios_smm_exit(void)  		free_page((unsigned long)buffer);  	}  } - -subsys_initcall(dell_smbios_smm_init); -module_exit(dell_smbios_smm_exit); - -MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>"); -MODULE_AUTHOR("Gabriele Mazzotta <gabriele.mzt@gmail.com>"); -MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>"); -MODULE_AUTHOR("Mario Limonciello <mario.limonciello@dell.com>"); -MODULE_DESCRIPTION("Dell SMBIOS communications over SMI"); -MODULE_LICENSE("GPL"); diff --git a/drivers/platform/x86/dell-smbios-wmi.c b/drivers/platform/x86/dell-smbios-wmi.c index 609557aa5868..fbefedb1c172 100644 --- a/drivers/platform/x86/dell-smbios-wmi.c +++ b/drivers/platform/x86/dell-smbios-wmi.c @@ -228,7 +228,7 @@ static const struct wmi_device_id dell_smbios_wmi_id_table[] = {  	{ },  }; -static void __init parse_b1_table(const struct dmi_header *dm) +static void parse_b1_table(const struct dmi_header *dm)  {  	struct misc_bios_flags_structure *flags =  	container_of(dm, struct misc_bios_flags_structure, header); @@ -242,7 +242,7 @@ static void __init parse_b1_table(const struct dmi_header *dm)  		wmi_supported = 1;  } -static void __init find_b1(const struct dmi_header *dm, void *dummy) +static void find_b1(const struct dmi_header *dm, void *dummy)  {  	switch (dm->type) {  	case 0xb1: /* misc bios flags */ @@ -261,7 +261,7 @@ static struct wmi_driver dell_smbios_wmi_driver = {  	.filter_callback = dell_smbios_wmi_filter,  }; -static int __init init_dell_smbios_wmi(void) +int init_dell_smbios_wmi(void)  {  	dmi_walk(find_b1, NULL); @@ -271,15 +271,9 @@ static int __init init_dell_smbios_wmi(void)  	return wmi_driver_register(&dell_smbios_wmi_driver);  } -static void __exit exit_dell_smbios_wmi(void) +void exit_dell_smbios_wmi(void)  {  	wmi_driver_unregister(&dell_smbios_wmi_driver);  } -module_init(init_dell_smbios_wmi); -module_exit(exit_dell_smbios_wmi); -  MODULE_ALIAS("wmi:" DELL_WMI_SMBIOS_GUID); -MODULE_AUTHOR("Mario Limonciello <mario.limonciello@dell.com>"); -MODULE_DESCRIPTION("Dell SMBIOS communications over WMI"); -MODULE_LICENSE("GPL"); diff --git a/drivers/platform/x86/dell-smbios.h b/drivers/platform/x86/dell-smbios.h index 138d478d9adc..d8adaf959740 100644 --- a/drivers/platform/x86/dell-smbios.h +++ b/drivers/platform/x86/dell-smbios.h @@ -75,4 +75,29 @@ int dell_laptop_register_notifier(struct notifier_block *nb);  int dell_laptop_unregister_notifier(struct notifier_block *nb);  void dell_laptop_call_notifier(unsigned long action, void *data); -#endif +/* for the supported backends */ +#ifdef CONFIG_DELL_SMBIOS_WMI +int init_dell_smbios_wmi(void); +void exit_dell_smbios_wmi(void); +#else /* CONFIG_DELL_SMBIOS_WMI */ +static inline int init_dell_smbios_wmi(void) +{ +	return -ENODEV; +} +static inline void exit_dell_smbios_wmi(void) +{} +#endif /* CONFIG_DELL_SMBIOS_WMI */ + +#ifdef CONFIG_DELL_SMBIOS_SMM +int init_dell_smbios_smm(void); +void exit_dell_smbios_smm(void); +#else /* CONFIG_DELL_SMBIOS_SMM */ +static inline int init_dell_smbios_smm(void) +{ +	return -ENODEV; +} +static inline void exit_dell_smbios_smm(void) +{} +#endif /* CONFIG_DELL_SMBIOS_SMM */ + +#endif /* _DELL_SMBIOS_H_ */ diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c index 2c9927430d85..8d102195a392 100644 --- a/drivers/platform/x86/dell-wmi.c +++ b/drivers/platform/x86/dell-wmi.c @@ -714,7 +714,7 @@ static int __init dell_wmi_init(void)  	return wmi_driver_register(&dell_wmi_driver);  } -module_init(dell_wmi_init); +late_initcall(dell_wmi_init);  static void __exit dell_wmi_exit(void)  { diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c index 5b6f18b18801..535199c9e6bc 100644 --- a/drivers/platform/x86/ideapad-laptop.c +++ b/drivers/platform/x86/ideapad-laptop.c @@ -113,7 +113,7 @@ MODULE_PARM_DESC(no_bt_rfkill, "No rfkill for bluetooth.");  /*   * ACPI Helpers   */ -#define IDEAPAD_EC_TIMEOUT (100) /* in ms */ +#define IDEAPAD_EC_TIMEOUT (200) /* in ms */  static int read_method_int(acpi_handle handle, const char *method, int *val)  { diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c index d1a01311c1a2..5e3df194723e 100644 --- a/drivers/platform/x86/intel-hid.c +++ b/drivers/platform/x86/intel-hid.c @@ -376,6 +376,7 @@ static int intel_hid_remove(struct platform_device *device)  {  	acpi_handle handle = ACPI_HANDLE(&device->dev); +	device_init_wakeup(&device->dev, false);  	acpi_remove_notify_handler(handle, ACPI_DEVICE_NOTIFY, notify_handler);  	intel_hid_set_enable(&device->dev, false);  	intel_button_array_enable(&device->dev, false); diff --git a/drivers/platform/x86/intel-vbtn.c b/drivers/platform/x86/intel-vbtn.c index b703d6f5b099..c13780b8dabb 100644 --- a/drivers/platform/x86/intel-vbtn.c +++ b/drivers/platform/x86/intel-vbtn.c @@ -7,6 +7,7 @@   */  #include <linux/acpi.h> +#include <linux/dmi.h>  #include <linux/input.h>  #include <linux/input/sparse-keymap.h>  #include <linux/kernel.h> @@ -97,9 +98,35 @@ out_unknown:  	dev_dbg(&device->dev, "unknown event index 0x%x\n", event);  } -static int intel_vbtn_probe(struct platform_device *device) +static void detect_tablet_mode(struct platform_device *device)  { +	const char *chassis_type = dmi_get_system_info(DMI_CHASSIS_TYPE); +	struct intel_vbtn_priv *priv = dev_get_drvdata(&device->dev); +	acpi_handle handle = ACPI_HANDLE(&device->dev);  	struct acpi_buffer vgbs_output = { ACPI_ALLOCATE_BUFFER, NULL }; +	union acpi_object *obj; +	acpi_status status; +	int m; + +	if (!(chassis_type && strcmp(chassis_type, "31") == 0)) +		goto out; + +	status = acpi_evaluate_object(handle, "VGBS", NULL, &vgbs_output); +	if (ACPI_FAILURE(status)) +		goto out; + +	obj = vgbs_output.pointer; +	if (!(obj && obj->type == ACPI_TYPE_INTEGER)) +		goto out; + +	m = !(obj->integer.value & TABLET_MODE_FLAG); +	input_report_switch(priv->input_dev, SW_TABLET_MODE, m); +out: +	kfree(vgbs_output.pointer); +} + +static int intel_vbtn_probe(struct platform_device *device) +{  	acpi_handle handle = ACPI_HANDLE(&device->dev);  	struct intel_vbtn_priv *priv;  	acpi_status status; @@ -122,22 +149,7 @@ static int intel_vbtn_probe(struct platform_device *device)  		return err;  	} -	/* -	 * VGBS being present and returning something means we have -	 * a tablet mode switch. -	 */ -	status = acpi_evaluate_object(handle, "VGBS", NULL, &vgbs_output); -	if (ACPI_SUCCESS(status)) { -		union acpi_object *obj = vgbs_output.pointer; - -		if (obj && obj->type == ACPI_TYPE_INTEGER) { -			int m = !(obj->integer.value & TABLET_MODE_FLAG); - -			input_report_switch(priv->input_dev, SW_TABLET_MODE, m); -		} -	} - -	kfree(vgbs_output.pointer); +	detect_tablet_mode(device);  	status = acpi_install_notify_handler(handle,  					     ACPI_DEVICE_NOTIFY, @@ -154,6 +166,7 @@ static int intel_vbtn_remove(struct platform_device *device)  {  	acpi_handle handle = ACPI_HANDLE(&device->dev); +	device_init_wakeup(&device->dev, false);  	acpi_remove_notify_handler(handle, ACPI_DEVICE_NOTIFY, notify_handler);  	/* diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c index daa68acbc900..8796211ef24a 100644 --- a/drivers/platform/x86/wmi.c +++ b/drivers/platform/x86/wmi.c @@ -933,7 +933,7 @@ static int wmi_dev_probe(struct device *dev)  			goto probe_failure;  		} -		buf = kmalloc(strlen(wdriver->driver.name) + 4, GFP_KERNEL); +		buf = kmalloc(strlen(wdriver->driver.name) + 5, GFP_KERNEL);  		if (!buf) {  			ret = -ENOMEM;  			goto probe_string_failure; @@ -945,7 +945,7 @@ static int wmi_dev_probe(struct device *dev)  		wblock->char_dev.mode = 0444;  		ret = misc_register(&wblock->char_dev);  		if (ret) { -			dev_warn(dev, "failed to register char dev: %d", ret); +			dev_warn(dev, "failed to register char dev: %d\n", ret);  			ret = -ENOMEM;  			goto probe_misc_failure;  		} @@ -1048,7 +1048,7 @@ static int wmi_create_device(struct device *wmi_bus_dev,  	if (result) {  		dev_warn(wmi_bus_dev, -			 "%s data block query control method not found", +			 "%s data block query control method not found\n",  			 method);  		return result;  	} @@ -1198,7 +1198,7 @@ static int parse_wdg(struct device *wmi_bus_dev, struct acpi_device *device)  		retval = device_add(&wblock->dev.dev);  		if (retval) { -			dev_err(wmi_bus_dev, "failed to register %pULL\n", +			dev_err(wmi_bus_dev, "failed to register %pUL\n",  				wblock->gblock.guid);  			if (debug_event)  				wmi_method_enable(wblock, 0); diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index dd4708c58480..1fc0c0811da4 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -4310,7 +4310,7 @@ static int _regulator_resume_early(struct device *dev, void *data)  	rstate = regulator_get_suspend_state(rdev, *state);  	if (rstate == NULL) -		return -EINVAL; +		return 0;  	mutex_lock(&rdev->mutex); diff --git a/drivers/regulator/stm32-vrefbuf.c b/drivers/regulator/stm32-vrefbuf.c index 72c8b3e1022b..e0a9c445ed67 100644 --- a/drivers/regulator/stm32-vrefbuf.c +++ b/drivers/regulator/stm32-vrefbuf.c @@ -51,7 +51,7 @@ static int stm32_vrefbuf_enable(struct regulator_dev *rdev)  	 * arbitrary timeout.  	 */  	ret = readl_poll_timeout(priv->base + STM32_VREFBUF_CSR, val, -				 !(val & STM32_VRR), 650, 10000); +				 val & STM32_VRR, 650, 10000);  	if (ret) {  		dev_err(&rdev->dev, "stm32 vrefbuf timed out!\n");  		val = readl_relaxed(priv->base + STM32_VREFBUF_CSR); diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index a7c15f0085e2..ecef8e73d40b 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -2581,8 +2581,6 @@ int dasd_cancel_req(struct dasd_ccw_req *cqr)  	case DASD_CQR_QUEUED:  		/* request was not started - just set to cleared */  		cqr->status = DASD_CQR_CLEARED; -		if (cqr->callback_data == DASD_SLEEPON_START_TAG) -			cqr->callback_data = DASD_SLEEPON_END_TAG;  		break;  	case DASD_CQR_IN_IO:  		/* request in IO - terminate IO and release again */ @@ -3902,9 +3900,12 @@ static int dasd_generic_requeue_all_requests(struct dasd_device *device)  		wait_event(dasd_flush_wq,  			   (cqr->status != DASD_CQR_CLEAR_PENDING)); -		/* mark sleepon requests as ended */ -		if (cqr->callback_data == DASD_SLEEPON_START_TAG) -			cqr->callback_data = DASD_SLEEPON_END_TAG; +		/* +		 * requeue requests to blocklayer will only work +		 * for block device requests +		 */ +		if (_dasd_requeue_request(cqr)) +			continue;  		/* remove requests from device and block queue */  		list_del_init(&cqr->devlist); @@ -3917,13 +3918,6 @@ static int dasd_generic_requeue_all_requests(struct dasd_device *device)  			cqr = refers;  		} -		/* -		 * requeue requests to blocklayer will only work -		 * for block device requests -		 */ -		if (_dasd_requeue_request(cqr)) -			continue; -  		if (cqr->block)  			list_del_init(&cqr->blocklist);  		cqr->block->base->discipline->free_cp( @@ -3940,8 +3934,7 @@ static int dasd_generic_requeue_all_requests(struct dasd_device *device)  		list_splice_tail(&requeue_queue, &device->ccw_queue);  		spin_unlock_irq(get_ccwdev_lock(device->cdev));  	} -	/* wake up generic waitqueue for eventually ended sleepon requests */ -	wake_up(&generic_waitq); +	dasd_schedule_device_bh(device);  	return rc;  } diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index 1319122e9d12..9169af7dbb43 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c @@ -795,6 +795,7 @@ ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event)  	ccw_device_set_timeout(cdev, 0);  	cdev->private->iretry = 255; +	cdev->private->async_kill_io_rc = -ETIMEDOUT;  	ret = ccw_device_cancel_halt_clear(cdev);  	if (ret == -EBUSY) {  		ccw_device_set_timeout(cdev, 3*HZ); @@ -871,7 +872,7 @@ ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event)  	/* OK, i/o is dead now. Call interrupt handler. */  	if (cdev->handler)  		cdev->handler(cdev, cdev->private->intparm, -			      ERR_PTR(-EIO)); +			      ERR_PTR(cdev->private->async_kill_io_rc));  }  static void @@ -888,14 +889,16 @@ ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event)  	ccw_device_online_verify(cdev, 0);  	if (cdev->handler)  		cdev->handler(cdev, cdev->private->intparm, -			      ERR_PTR(-EIO)); +			      ERR_PTR(cdev->private->async_kill_io_rc));  }  void ccw_device_kill_io(struct ccw_device *cdev)  {  	int ret; +	ccw_device_set_timeout(cdev, 0);  	cdev->private->iretry = 255; +	cdev->private->async_kill_io_rc = -EIO;  	ret = ccw_device_cancel_halt_clear(cdev);  	if (ret == -EBUSY) {  		ccw_device_set_timeout(cdev, 3*HZ); diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c index 1caf6a398760..75ce12a24dc2 100644 --- a/drivers/s390/cio/device_ops.c +++ b/drivers/s390/cio/device_ops.c @@ -159,7 +159,7 @@ int ccw_device_clear(struct ccw_device *cdev, unsigned long intparm)  }  /** - * ccw_device_start_key() - start a s390 channel program with key + * ccw_device_start_timeout_key() - start a s390 channel program with timeout and key   * @cdev: target ccw device   * @cpa: logical start address of channel program   * @intparm: user specific interruption parameter; will be presented back to @@ -170,10 +170,15 @@ int ccw_device_clear(struct ccw_device *cdev, unsigned long intparm)   * @key: storage key to be used for the I/O   * @flags: additional flags; defines the action to be performed for I/O   *	   processing. + * @expires: timeout value in jiffies   *   * Start a S/390 channel program. When the interrupt arrives, the   * IRQ handler is called, either immediately, delayed (dev-end missing,   * or sense required) or never (no IRQ handler registered). + * This function notifies the device driver if the channel program has not + * completed during the time specified by @expires. If a timeout occurs, the + * channel program is terminated via xsch, hsch or csch, and the device's + * interrupt handler will be called with an irb containing ERR_PTR(-%ETIMEDOUT).   * Returns:   *  %0, if the operation was successful;   *  -%EBUSY, if the device is busy, or status pending; @@ -182,9 +187,9 @@ int ccw_device_clear(struct ccw_device *cdev, unsigned long intparm)   * Context:   *  Interrupts disabled, ccw device lock held   */ -int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa, -			 unsigned long intparm, __u8 lpm, __u8 key, -			 unsigned long flags) +int ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa, +				 unsigned long intparm, __u8 lpm, __u8 key, +				 unsigned long flags, int expires)  {  	struct subchannel *sch;  	int ret; @@ -224,6 +229,8 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,  	switch (ret) {  	case 0:  		cdev->private->intparm = intparm; +		if (expires) +			ccw_device_set_timeout(cdev, expires);  		break;  	case -EACCES:  	case -ENODEV: @@ -234,7 +241,7 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,  }  /** - * ccw_device_start_timeout_key() - start a s390 channel program with timeout and key + * ccw_device_start_key() - start a s390 channel program with key   * @cdev: target ccw device   * @cpa: logical start address of channel program   * @intparm: user specific interruption parameter; will be presented back to @@ -245,15 +252,10 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,   * @key: storage key to be used for the I/O   * @flags: additional flags; defines the action to be performed for I/O   *	   processing. - * @expires: timeout value in jiffies   *   * Start a S/390 channel program. When the interrupt arrives, the   * IRQ handler is called, either immediately, delayed (dev-end missing,   * or sense required) or never (no IRQ handler registered). - * This function notifies the device driver if the channel program has not - * completed during the time specified by @expires. If a timeout occurs, the - * channel program is terminated via xsch, hsch or csch, and the device's - * interrupt handler will be called with an irb containing ERR_PTR(-%ETIMEDOUT).   * Returns:   *  %0, if the operation was successful;   *  -%EBUSY, if the device is busy, or status pending; @@ -262,19 +264,12 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,   * Context:   *  Interrupts disabled, ccw device lock held   */ -int ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa, -				 unsigned long intparm, __u8 lpm, __u8 key, -				 unsigned long flags, int expires) +int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa, +			 unsigned long intparm, __u8 lpm, __u8 key, +			 unsigned long flags)  { -	int ret; - -	if (!cdev) -		return -ENODEV; -	ccw_device_set_timeout(cdev, expires); -	ret = ccw_device_start_key(cdev, cpa, intparm, lpm, key, flags); -	if (ret != 0) -		ccw_device_set_timeout(cdev, 0); -	return ret; +	return ccw_device_start_timeout_key(cdev, cpa, intparm, lpm, key, +					    flags, 0);  }  /** @@ -489,18 +484,20 @@ void ccw_device_get_id(struct ccw_device *cdev, struct ccw_dev_id *dev_id)  EXPORT_SYMBOL(ccw_device_get_id);  /** - * ccw_device_tm_start_key() - perform start function + * ccw_device_tm_start_timeout_key() - perform start function   * @cdev: ccw device on which to perform the start function   * @tcw: transport-command word to be started   * @intparm: user defined parameter to be passed to the interrupt handler   * @lpm: mask of paths to use   * @key: storage key to use for storage access + * @expires: time span in jiffies after which to abort request   *   * Start the tcw on the given ccw device. Return zero on success, non-zero   * otherwise.   */ -int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw, -			    unsigned long intparm, u8 lpm, u8 key) +int ccw_device_tm_start_timeout_key(struct ccw_device *cdev, struct tcw *tcw, +				    unsigned long intparm, u8 lpm, u8 key, +				    int expires)  {  	struct subchannel *sch;  	int rc; @@ -527,37 +524,32 @@ int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw,  			return -EACCES;  	}  	rc = cio_tm_start_key(sch, tcw, lpm, key); -	if (rc == 0) +	if (rc == 0) {  		cdev->private->intparm = intparm; +		if (expires) +			ccw_device_set_timeout(cdev, expires); +	}  	return rc;  } -EXPORT_SYMBOL(ccw_device_tm_start_key); +EXPORT_SYMBOL(ccw_device_tm_start_timeout_key);  /** - * ccw_device_tm_start_timeout_key() - perform start function + * ccw_device_tm_start_key() - perform start function   * @cdev: ccw device on which to perform the start function   * @tcw: transport-command word to be started   * @intparm: user defined parameter to be passed to the interrupt handler   * @lpm: mask of paths to use   * @key: storage key to use for storage access - * @expires: time span in jiffies after which to abort request   *   * Start the tcw on the given ccw device. Return zero on success, non-zero   * otherwise.   */ -int ccw_device_tm_start_timeout_key(struct ccw_device *cdev, struct tcw *tcw, -				    unsigned long intparm, u8 lpm, u8 key, -				    int expires) +int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw, +			    unsigned long intparm, u8 lpm, u8 key)  { -	int ret; - -	ccw_device_set_timeout(cdev, expires); -	ret = ccw_device_tm_start_key(cdev, tcw, intparm, lpm, key); -	if (ret != 0) -		ccw_device_set_timeout(cdev, 0); -	return ret; +	return ccw_device_tm_start_timeout_key(cdev, tcw, intparm, lpm, key, 0);  } -EXPORT_SYMBOL(ccw_device_tm_start_timeout_key); +EXPORT_SYMBOL(ccw_device_tm_start_key);  /**   * ccw_device_tm_start() - perform start function diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h index af571d8d6925..90e4e3a7841b 100644 --- a/drivers/s390/cio/io_sch.h +++ b/drivers/s390/cio/io_sch.h @@ -157,6 +157,7 @@ struct ccw_device_private {  	unsigned long intparm;	/* user interruption parameter */  	struct qdio_irq *qdio_data;  	struct irb irb;		/* device status */ +	int async_kill_io_rc;  	struct senseid senseid;	/* SenseID info */  	struct pgid pgid[8];	/* path group IDs per chpid*/  	struct ccw1 iccws[2];	/* ccws for SNID/SID/SPGID commands */ diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index ca72f3311004..3653bea38470 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -527,8 +527,7 @@ static inline int qeth_is_cq(struct qeth_card *card, unsigned int queue)  	    queue == card->qdio.no_in_queues - 1;  } - -static int qeth_issue_next_read(struct qeth_card *card) +static int __qeth_issue_next_read(struct qeth_card *card)  {  	int rc;  	struct qeth_cmd_buffer *iob; @@ -559,6 +558,17 @@ static int qeth_issue_next_read(struct qeth_card *card)  	return rc;  } +static int qeth_issue_next_read(struct qeth_card *card) +{ +	int ret; + +	spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card))); +	ret = __qeth_issue_next_read(card); +	spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card))); + +	return ret; +} +  static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card)  {  	struct qeth_reply *reply; @@ -960,7 +970,7 @@ void qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread)  	spin_lock_irqsave(&card->thread_mask_lock, flags);  	card->thread_running_mask &= ~thread;  	spin_unlock_irqrestore(&card->thread_mask_lock, flags); -	wake_up(&card->wait_q); +	wake_up_all(&card->wait_q);  }  EXPORT_SYMBOL_GPL(qeth_clear_thread_running_bit); @@ -1164,6 +1174,7 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,  		}  		rc = qeth_get_problem(cdev, irb);  		if (rc) { +			card->read_or_write_problem = 1;  			qeth_clear_ipacmd_list(card);  			qeth_schedule_recovery(card);  			goto out; @@ -1182,7 +1193,7 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,  		return;  	if (channel == &card->read &&  	    channel->state == CH_STATE_UP) -		qeth_issue_next_read(card); +		__qeth_issue_next_read(card);  	iob = channel->iob;  	index = channel->buf_no; @@ -2134,24 +2145,25 @@ int qeth_send_control_data(struct qeth_card *card, int len,  	}  	reply->callback = reply_cb;  	reply->param = reply_param; -	if (card->state == CARD_STATE_DOWN) -		reply->seqno = QETH_IDX_COMMAND_SEQNO; -	else -		reply->seqno = card->seqno.ipa++; +  	init_waitqueue_head(&reply->wait_q); -	spin_lock_irqsave(&card->lock, flags); -	list_add_tail(&reply->list, &card->cmd_waiter_list); -	spin_unlock_irqrestore(&card->lock, flags);  	while (atomic_cmpxchg(&card->write.irq_pending, 0, 1)) ; -	qeth_prepare_control_data(card, len, iob);  	if (IS_IPA(iob->data)) {  		cmd = __ipa_cmd(iob); +		cmd->hdr.seqno = card->seqno.ipa++; +		reply->seqno = cmd->hdr.seqno;  		event_timeout = QETH_IPA_TIMEOUT;  	} else { +		reply->seqno = QETH_IDX_COMMAND_SEQNO;  		event_timeout = QETH_TIMEOUT;  	} +	qeth_prepare_control_data(card, len, iob); + +	spin_lock_irqsave(&card->lock, flags); +	list_add_tail(&reply->list, &card->cmd_waiter_list); +	spin_unlock_irqrestore(&card->lock, flags);  	timeout = jiffies + event_timeout; @@ -2933,7 +2945,7 @@ static void qeth_fill_ipacmd_header(struct qeth_card *card,  	memset(cmd, 0, sizeof(struct qeth_ipa_cmd));  	cmd->hdr.command = command;  	cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST; -	cmd->hdr.seqno = card->seqno.ipa; +	/* cmd->hdr.seqno is set by qeth_send_control_data() */  	cmd->hdr.adapter_type = qeth_get_ipa_adp_type(card->info.link_type);  	cmd->hdr.rel_adapter_no = (__u8) card->info.portno;  	if (card->options.layer2) @@ -3898,10 +3910,12 @@ EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags);  int qeth_get_elements_no(struct qeth_card *card,  		     struct sk_buff *skb, int extra_elems, int data_offset)  { -	int elements = qeth_get_elements_for_range( -				(addr_t)skb->data + data_offset, -				(addr_t)skb->data + skb_headlen(skb)) + -			qeth_get_elements_for_frags(skb); +	addr_t end = (addr_t)skb->data + skb_headlen(skb); +	int elements = qeth_get_elements_for_frags(skb); +	addr_t start = (addr_t)skb->data + data_offset; + +	if (start != end) +		elements += qeth_get_elements_for_range(start, end);  	if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) {  		QETH_DBF_MESSAGE(2, "Invalid size of IP packet " @@ -5084,8 +5098,6 @@ static void qeth_core_free_card(struct qeth_card *card)  	QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));  	qeth_clean_channel(&card->read);  	qeth_clean_channel(&card->write); -	if (card->dev) -		free_netdev(card->dev);  	qeth_free_qdio_buffers(card);  	unregister_service_level(&card->qeth_service_level);  	kfree(card); diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 7f236440483f..5ef4c978ad19 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -915,8 +915,8 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)  		qeth_l2_set_offline(cgdev);  	if (card->dev) { -		netif_napi_del(&card->napi);  		unregister_netdev(card->dev); +		free_netdev(card->dev);  		card->dev = NULL;  	}  	return; diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h index bdd45f4dcace..498fe9af2cdb 100644 --- a/drivers/s390/net/qeth_l3.h +++ b/drivers/s390/net/qeth_l3.h @@ -40,8 +40,40 @@ struct qeth_ipaddr {  			unsigned int pfxlen;  		} a6;  	} u; -  }; + +static inline bool qeth_l3_addr_match_ip(struct qeth_ipaddr *a1, +					 struct qeth_ipaddr *a2) +{ +	if (a1->proto != a2->proto) +		return false; +	if (a1->proto == QETH_PROT_IPV6) +		return ipv6_addr_equal(&a1->u.a6.addr, &a2->u.a6.addr); +	return a1->u.a4.addr == a2->u.a4.addr; +} + +static inline bool qeth_l3_addr_match_all(struct qeth_ipaddr *a1, +					  struct qeth_ipaddr *a2) +{ +	/* Assumes that the pair was obtained via qeth_l3_addr_find_by_ip(), +	 * so 'proto' and 'addr' match for sure. +	 * +	 * For ucast: +	 * -	'mac' is always 0. +	 * -	'mask'/'pfxlen' for RXIP/VIPA is always 0. For NORMAL, matching +	 *	values are required to avoid mixups in takeover eligibility. +	 * +	 * For mcast, +	 * -	'mac' is mapped from the IP, and thus always matches. +	 * -	'mask'/'pfxlen' is always 0. +	 */ +	if (a1->type != a2->type) +		return false; +	if (a1->proto == QETH_PROT_IPV6) +		return a1->u.a6.pfxlen == a2->u.a6.pfxlen; +	return a1->u.a4.mask == a2->u.a4.mask; +} +  static inline  u64 qeth_l3_ipaddr_hash(struct qeth_ipaddr *addr)  {  	u64  ret = 0; diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index b0c888e86cd4..b6b12220da71 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -67,6 +67,24 @@ void qeth_l3_ipaddr_to_string(enum qeth_prot_versions proto, const __u8 *addr,  		qeth_l3_ipaddr6_to_string(addr, buf);  } +static struct qeth_ipaddr *qeth_l3_find_addr_by_ip(struct qeth_card *card, +						   struct qeth_ipaddr *query) +{ +	u64 key = qeth_l3_ipaddr_hash(query); +	struct qeth_ipaddr *addr; + +	if (query->is_multicast) { +		hash_for_each_possible(card->ip_mc_htable, addr, hnode, key) +			if (qeth_l3_addr_match_ip(addr, query)) +				return addr; +	} else { +		hash_for_each_possible(card->ip_htable,  addr, hnode, key) +			if (qeth_l3_addr_match_ip(addr, query)) +				return addr; +	} +	return NULL; +} +  static void qeth_l3_convert_addr_to_bits(u8 *addr, u8 *bits, int len)  {  	int i, j; @@ -120,34 +138,6 @@ static bool qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card,  	return rc;  } -inline int -qeth_l3_ipaddrs_is_equal(struct qeth_ipaddr *addr1, struct qeth_ipaddr *addr2) -{ -	return addr1->proto == addr2->proto && -	       !memcmp(&addr1->u, &addr2->u, sizeof(addr1->u)) && -	       ether_addr_equal_64bits(addr1->mac, addr2->mac); -} - -static struct qeth_ipaddr * -qeth_l3_ip_from_hash(struct qeth_card *card, struct qeth_ipaddr *tmp_addr) -{ -	struct qeth_ipaddr *addr; - -	if (tmp_addr->is_multicast) { -		hash_for_each_possible(card->ip_mc_htable,  addr, -				hnode, qeth_l3_ipaddr_hash(tmp_addr)) -			if (qeth_l3_ipaddrs_is_equal(tmp_addr, addr)) -				return addr; -	} else { -		hash_for_each_possible(card->ip_htable,  addr, -				hnode, qeth_l3_ipaddr_hash(tmp_addr)) -			if (qeth_l3_ipaddrs_is_equal(tmp_addr, addr)) -				return addr; -	} - -	return NULL; -} -  int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)  {  	int rc = 0; @@ -162,23 +152,18 @@ int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)  		QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8);  	} -	addr = qeth_l3_ip_from_hash(card, tmp_addr); -	if (!addr) +	addr = qeth_l3_find_addr_by_ip(card, tmp_addr); +	if (!addr || !qeth_l3_addr_match_all(addr, tmp_addr))  		return -ENOENT;  	addr->ref_counter--; -	if (addr->ref_counter > 0 && (addr->type == QETH_IP_TYPE_NORMAL || -				      addr->type == QETH_IP_TYPE_RXIP)) +	if (addr->type == QETH_IP_TYPE_NORMAL && addr->ref_counter > 0)  		return rc;  	if (addr->in_progress)  		return -EINPROGRESS; -	if (!qeth_card_hw_is_reachable(card)) { -		addr->disp_flag = QETH_DISP_ADDR_DELETE; -		return 0; -	} - -	rc = qeth_l3_deregister_addr_entry(card, addr); +	if (qeth_card_hw_is_reachable(card)) +		rc = qeth_l3_deregister_addr_entry(card, addr);  	hash_del(&addr->hnode);  	kfree(addr); @@ -190,6 +175,7 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)  {  	int rc = 0;  	struct qeth_ipaddr *addr; +	char buf[40];  	QETH_CARD_TEXT(card, 4, "addip"); @@ -200,8 +186,20 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)  		QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8);  	} -	addr = qeth_l3_ip_from_hash(card, tmp_addr); -	if (!addr) { +	addr = qeth_l3_find_addr_by_ip(card, tmp_addr); +	if (addr) { +		if (tmp_addr->type != QETH_IP_TYPE_NORMAL) +			return -EADDRINUSE; +		if (qeth_l3_addr_match_all(addr, tmp_addr)) { +			addr->ref_counter++; +			return 0; +		} +		qeth_l3_ipaddr_to_string(tmp_addr->proto, (u8 *)&tmp_addr->u, +					 buf); +		dev_warn(&card->gdev->dev, +			 "Registering IP address %s failed\n", buf); +		return -EADDRINUSE; +	} else {  		addr = qeth_l3_get_addr_buffer(tmp_addr->proto);  		if (!addr)  			return -ENOMEM; @@ -241,19 +239,15 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)  				(rc == IPA_RC_LAN_OFFLINE)) {  			addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING;  			if (addr->ref_counter < 1) { -				qeth_l3_delete_ip(card, addr); +				qeth_l3_deregister_addr_entry(card, addr); +				hash_del(&addr->hnode);  				kfree(addr);  			}  		} else {  			hash_del(&addr->hnode);  			kfree(addr);  		} -	} else { -		if (addr->type == QETH_IP_TYPE_NORMAL || -		    addr->type == QETH_IP_TYPE_RXIP) -			addr->ref_counter++;  	} -  	return rc;  } @@ -321,11 +315,7 @@ static void qeth_l3_recover_ip(struct qeth_card *card)  	spin_lock_bh(&card->ip_lock);  	hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) { -		if (addr->disp_flag == QETH_DISP_ADDR_DELETE) { -			qeth_l3_deregister_addr_entry(card, addr); -			hash_del(&addr->hnode); -			kfree(addr); -		} else if (addr->disp_flag == QETH_DISP_ADDR_ADD) { +		if (addr->disp_flag == QETH_DISP_ADDR_ADD) {  			if (addr->proto == QETH_PROT_IPV4) {  				addr->in_progress = 1;  				spin_unlock_bh(&card->ip_lock); @@ -643,12 +633,7 @@ int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto,  		return -ENOMEM;  	spin_lock_bh(&card->ip_lock); - -	if (qeth_l3_ip_from_hash(card, ipaddr)) -		rc = -EEXIST; -	else -		rc = qeth_l3_add_ip(card, ipaddr); - +	rc = qeth_l3_add_ip(card, ipaddr);  	spin_unlock_bh(&card->ip_lock);  	kfree(ipaddr); @@ -713,12 +698,7 @@ int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto,  		return -ENOMEM;  	spin_lock_bh(&card->ip_lock); - -	if (qeth_l3_ip_from_hash(card, ipaddr)) -		rc = -EEXIST; -	else -		rc = qeth_l3_add_ip(card, ipaddr); - +	rc = qeth_l3_add_ip(card, ipaddr);  	spin_unlock_bh(&card->ip_lock);  	kfree(ipaddr); @@ -1239,8 +1219,9 @@ qeth_l3_add_mc_to_hash(struct qeth_card *card, struct in_device *in4_dev)  		tmp->u.a4.addr = be32_to_cpu(im4->multiaddr);  		tmp->is_multicast = 1; -		ipm = qeth_l3_ip_from_hash(card, tmp); +		ipm = qeth_l3_find_addr_by_ip(card, tmp);  		if (ipm) { +			/* for mcast, by-IP match means full match */  			ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING;  		} else {  			ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4); @@ -1319,8 +1300,9 @@ static void qeth_l3_add_mc6_to_hash(struct qeth_card *card,  		       sizeof(struct in6_addr));  		tmp->is_multicast = 1; -		ipm = qeth_l3_ip_from_hash(card, tmp); +		ipm = qeth_l3_find_addr_by_ip(card, tmp);  		if (ipm) { +			/* for mcast, by-IP match means full match */  			ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING;  			continue;  		} @@ -2450,11 +2432,12 @@ static void qeth_tso_fill_header(struct qeth_card *card,  static int qeth_l3_get_elements_no_tso(struct qeth_card *card,  			struct sk_buff *skb, int extra_elems)  { -	addr_t tcpdptr = (addr_t)tcp_hdr(skb) + tcp_hdrlen(skb); -	int elements = qeth_get_elements_for_range( -				tcpdptr, -				(addr_t)skb->data + skb_headlen(skb)) + -				qeth_get_elements_for_frags(skb); +	addr_t start = (addr_t)tcp_hdr(skb) + tcp_hdrlen(skb); +	addr_t end = (addr_t)skb->data + skb_headlen(skb); +	int elements = qeth_get_elements_for_frags(skb); + +	if (start != end) +		elements += qeth_get_elements_for_range(start, end);  	if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) {  		QETH_DBF_MESSAGE(2, @@ -2882,8 +2865,8 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)  		qeth_l3_set_offline(cgdev);  	if (card->dev) { -		netif_napi_del(&card->napi);  		unregister_netdev(card->dev); +		free_netdev(card->dev);  		card->dev = NULL;  	} diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c index ba2e0856d22c..8f5c1d7f751a 100644 --- a/drivers/s390/virtio/virtio_ccw.c +++ b/drivers/s390/virtio/virtio_ccw.c @@ -1297,6 +1297,9 @@ static int virtio_ccw_cio_notify(struct ccw_device *cdev, int event)  		vcdev->device_lost = true;  		rc = NOTIFY_DONE;  		break; +	case CIO_OPER: +		rc = NOTIFY_OK; +		break;  	default:  		rc = NOTIFY_DONE;  		break; @@ -1309,6 +1312,27 @@ static struct ccw_device_id virtio_ids[] = {  	{},  }; +#ifdef CONFIG_PM_SLEEP +static int virtio_ccw_freeze(struct ccw_device *cdev) +{ +	struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev); + +	return virtio_device_freeze(&vcdev->vdev); +} + +static int virtio_ccw_restore(struct ccw_device *cdev) +{ +	struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev); +	int ret; + +	ret = virtio_ccw_set_transport_rev(vcdev); +	if (ret) +		return ret; + +	return virtio_device_restore(&vcdev->vdev); +} +#endif +  static struct ccw_driver virtio_ccw_driver = {  	.driver = {  		.owner = THIS_MODULE, @@ -1321,6 +1345,11 @@ static struct ccw_driver virtio_ccw_driver = {  	.set_online = virtio_ccw_online,  	.notify = virtio_ccw_cio_notify,  	.int_class = IRQIO_VIR, +#ifdef CONFIG_PM_SLEEP +	.freeze = virtio_ccw_freeze, +	.thaw = virtio_ccw_restore, +	.restore = virtio_ccw_restore, +#endif  };  static int __init pure_hex(char **cp, unsigned int *val, int min_digit, diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index fcfd28d2884c..de1b3fce936d 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile @@ -185,7 +185,6 @@ ncr53c8xx-flags-$(CONFIG_SCSI_ZALON) \  CFLAGS_ncr53c8xx.o	:= $(ncr53c8xx-flags-y) $(ncr53c8xx-flags-m)  zalon7xx-objs	:= zalon.o ncr53c8xx.o  NCR_Q720_mod-objs	:= NCR_Q720.o ncr53c8xx.o -oktagon_esp_mod-objs	:= oktagon_esp.o oktagon_io.o  # Files generated that shall be removed upon make clean  clean-files :=	53c700_d.h 53c700_u.h diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index b3b931ab77eb..2664ea0df35f 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -1693,8 +1693,10 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)  	 *	Map in the registers from the adapter.  	 */  	aac->base_size = AAC_MIN_FOOTPRINT_SIZE; -	if ((*aac_drivers[index].init)(aac)) +	if ((*aac_drivers[index].init)(aac)) { +		error = -ENODEV;  		goto out_unmap; +	}  	if (aac->sync_mode) {  		if (aac_sync_mode) diff --git a/drivers/scsi/aic7xxx/aiclib.c b/drivers/scsi/aic7xxx/aiclib.c deleted file mode 100644 index 828ae3d9a510..000000000000 --- a/drivers/scsi/aic7xxx/aiclib.c +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Implementation of Utility functions for all SCSI device types. - * - * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs. - * Copyright (c) 1997, 1998 Kenneth D. Merry. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - *    notice, this list of conditions, and the following disclaimer, - *    without modification, immediately at the beginning of the file. - * 2. The name of the author may not be used to endorse or promote products - *    derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR - * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * $FreeBSD: src/sys/cam/scsi/scsi_all.c,v 1.38 2002/09/23 04:56:35 mjacob Exp $ - * $Id$ - */ - -#include "aiclib.h" - diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c index 8e2f767147cb..5a645b8b9af1 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_io.c +++ b/drivers/scsi/bnx2fc/bnx2fc_io.c @@ -1889,6 +1889,7 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,  		/* we will not receive ABTS response for this IO */  		BNX2FC_IO_DBG(io_req, "Timer context finished processing "  			   "this scsi cmd\n"); +		return;  	}  	/* Cancel the timeout_work, as we received IO completion */ diff --git a/drivers/scsi/csiostor/csio_lnode.c b/drivers/scsi/csiostor/csio_lnode.c index be5ee2d37815..7dbbbb81a1e7 100644 --- a/drivers/scsi/csiostor/csio_lnode.c +++ b/drivers/scsi/csiostor/csio_lnode.c @@ -114,7 +114,7 @@ static enum csio_ln_ev fwevt_to_lnevt[] = {  static struct csio_lnode *  csio_ln_lookup_by_portid(struct csio_hw *hw, uint8_t portid)  { -	struct csio_lnode *ln = hw->rln; +	struct csio_lnode *ln;  	struct list_head *tmp;  	/* Match siblings lnode with portid */ diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c index 022e421c2185..4b44325d1a82 100644 --- a/drivers/scsi/device_handler/scsi_dh_alua.c +++ b/drivers/scsi/device_handler/scsi_dh_alua.c @@ -876,6 +876,11 @@ static void alua_rtpg_work(struct work_struct *work)  /**   * alua_rtpg_queue() - cause RTPG to be submitted asynchronously + * @pg: ALUA port group associated with @sdev. + * @sdev: SCSI device for which to submit an RTPG. + * @qdata: Information about the callback to invoke after the RTPG. + * @force: Whether or not to submit an RTPG if a work item that will submit an + *         RTPG already has been scheduled.   *   * Returns true if and only if alua_rtpg_work() will be called asynchronously.   * That function is responsible for calling @qdata->fn(). diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c index 57bf43e34863..dd9464920456 100644 --- a/drivers/scsi/hosts.c +++ b/drivers/scsi/hosts.c @@ -328,8 +328,6 @@ static void scsi_host_dev_release(struct device *dev)  	if (shost->work_q)  		destroy_workqueue(shost->work_q); -	destroy_rcu_head(&shost->rcu); -  	if (shost->shost_state == SHOST_CREATED) {  		/*  		 * Free the shost_dev device name here if scsi_host_alloc() @@ -404,7 +402,6 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)  	INIT_LIST_HEAD(&shost->starved_list);  	init_waitqueue_head(&shost->host_wait);  	mutex_init(&shost->scan_mutex); -	init_rcu_head(&shost->rcu);  	index = ida_simple_get(&host_index_ida, 0, 0, GFP_KERNEL);  	if (index < 0) diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h index 9a0696f68f37..b81a53c4a9a8 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.h +++ b/drivers/scsi/ibmvscsi/ibmvfc.h @@ -367,7 +367,7 @@ enum ibmvfc_fcp_rsp_info_codes {  };  struct ibmvfc_fcp_rsp_info { -	__be16 reserved; +	u8 reserved[3];  	u8 rsp_code;  	u8 reserved2[4];  }__attribute__((packed, aligned (2))); diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c index 6de9681ace82..ceab5e5c41c2 100644 --- a/drivers/scsi/libsas/sas_scsi_host.c +++ b/drivers/scsi/libsas/sas_scsi_host.c @@ -223,6 +223,7 @@ out_done:  static void sas_eh_finish_cmd(struct scsi_cmnd *cmd)  {  	struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(cmd->device->host); +	struct domain_device *dev = cmd_to_domain_dev(cmd);  	struct sas_task *task = TO_SAS_TASK(cmd);  	/* At this point, we only get called following an actual abort @@ -231,6 +232,14 @@ static void sas_eh_finish_cmd(struct scsi_cmnd *cmd)  	 */  	sas_end_task(cmd, task); +	if (dev_is_sata(dev)) { +		/* defer commands to libata so that libata EH can +		 * handle ata qcs correctly +		 */ +		list_move_tail(&cmd->eh_entry, &sas_ha->eh_ata_q); +		return; +	} +  	/* now finish the command and move it on to the error  	 * handler done list, this also takes it off the  	 * error handler pending list. @@ -238,22 +247,6 @@ static void sas_eh_finish_cmd(struct scsi_cmnd *cmd)  	scsi_eh_finish_cmd(cmd, &sas_ha->eh_done_q);  } -static void sas_eh_defer_cmd(struct scsi_cmnd *cmd) -{ -	struct domain_device *dev = cmd_to_domain_dev(cmd); -	struct sas_ha_struct *ha = dev->port->ha; -	struct sas_task *task = TO_SAS_TASK(cmd); - -	if (!dev_is_sata(dev)) { -		sas_eh_finish_cmd(cmd); -		return; -	} - -	/* report the timeout to libata */ -	sas_end_task(cmd, task); -	list_move_tail(&cmd->eh_entry, &ha->eh_ata_q); -} -  static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd *my_cmd)  {  	struct scsi_cmnd *cmd, *n; @@ -261,7 +254,7 @@ static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd  	list_for_each_entry_safe(cmd, n, error_q, eh_entry) {  		if (cmd->device->sdev_target == my_cmd->device->sdev_target &&  		    cmd->device->lun == my_cmd->device->lun) -			sas_eh_defer_cmd(cmd); +			sas_eh_finish_cmd(cmd);  	}  } @@ -631,12 +624,12 @@ static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head *  		case TASK_IS_DONE:  			SAS_DPRINTK("%s: task 0x%p is done\n", __func__,  				    task); -			sas_eh_defer_cmd(cmd); +			sas_eh_finish_cmd(cmd);  			continue;  		case TASK_IS_ABORTED:  			SAS_DPRINTK("%s: task 0x%p is aborted\n",  				    __func__, task); -			sas_eh_defer_cmd(cmd); +			sas_eh_finish_cmd(cmd);  			continue;  		case TASK_IS_AT_LU:  			SAS_DPRINTK("task 0x%p is at LU: lu recover\n", task); @@ -647,7 +640,7 @@ static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head *  					    "recovered\n",  					    SAS_ADDR(task->dev),  					    cmd->device->lun); -				sas_eh_defer_cmd(cmd); +				sas_eh_finish_cmd(cmd);  				sas_scsi_clear_queue_lu(work_q, cmd);  				goto Again;  			} diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 073ced07e662..dc8e850fbfd2 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -216,36 +216,30 @@ inline void megasas_return_cmd_fusion(struct megasas_instance *instance,  /**   * megasas_fire_cmd_fusion -	Sends command to the FW   * @instance:			Adapter soft state - * @req_desc:			32bit or 64bit Request descriptor + * @req_desc:			64bit Request descriptor   * - * Perform PCI Write. Ventura supports 32 bit Descriptor. - * Prior to Ventura (12G) MR controller supports 64 bit Descriptor. + * Perform PCI Write.   */  static void  megasas_fire_cmd_fusion(struct megasas_instance *instance,  		union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc)  { -	if (instance->adapter_type == VENTURA_SERIES) -		writel(le32_to_cpu(req_desc->u.low), -			&instance->reg_set->inbound_single_queue_port); -	else {  #if defined(writeq) && defined(CONFIG_64BIT) -		u64 req_data = (((u64)le32_to_cpu(req_desc->u.high) << 32) | -				le32_to_cpu(req_desc->u.low)); +	u64 req_data = (((u64)le32_to_cpu(req_desc->u.high) << 32) | +		le32_to_cpu(req_desc->u.low)); -		writeq(req_data, &instance->reg_set->inbound_low_queue_port); +	writeq(req_data, &instance->reg_set->inbound_low_queue_port);  #else -		unsigned long flags; -		spin_lock_irqsave(&instance->hba_lock, flags); -		writel(le32_to_cpu(req_desc->u.low), -			&instance->reg_set->inbound_low_queue_port); -		writel(le32_to_cpu(req_desc->u.high), -			&instance->reg_set->inbound_high_queue_port); -		mmiowb(); -		spin_unlock_irqrestore(&instance->hba_lock, flags); +	unsigned long flags; +	spin_lock_irqsave(&instance->hba_lock, flags); +	writel(le32_to_cpu(req_desc->u.low), +		&instance->reg_set->inbound_low_queue_port); +	writel(le32_to_cpu(req_desc->u.high), +		&instance->reg_set->inbound_high_queue_port); +	mmiowb(); +	spin_unlock_irqrestore(&instance->hba_lock, flags);  #endif -	}  }  /** @@ -982,7 +976,6 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)  	const char *sys_info;  	MFI_CAPABILITIES *drv_ops;  	u32 scratch_pad_2; -	unsigned long flags;  	ktime_t time;  	bool cur_fw_64bit_dma_capable; @@ -1121,14 +1114,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)  			break;  	} -	/* For Ventura also IOC INIT required 64 bit Descriptor write. */ -	spin_lock_irqsave(&instance->hba_lock, flags); -	writel(le32_to_cpu(req_desc.u.low), -	       &instance->reg_set->inbound_low_queue_port); -	writel(le32_to_cpu(req_desc.u.high), -	       &instance->reg_set->inbound_high_queue_port); -	mmiowb(); -	spin_unlock_irqrestore(&instance->hba_lock, flags); +	megasas_fire_cmd_fusion(instance, &req_desc);  	wait_and_poll(instance, cmd, MFI_POLL_TIMEOUT_SECS); diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index 13d6e4ec3022..0aafbfd1b746 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -2410,8 +2410,11 @@ _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)  				continue;  			} -			for_each_cpu(cpu, mask) +			for_each_cpu_and(cpu, mask, cpu_online_mask) { +				if (cpu >= ioc->cpu_msix_table_sz) +					break;  				ioc->cpu_msix_table[cpu] = reply_q->msix_index; +			}  		}  		return;  	} @@ -6294,14 +6297,14 @@ _base_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase)  }  /** - * _wait_for_commands_to_complete - reset controller + * mpt3sas_wait_for_commands_to_complete - reset controller   * @ioc: Pointer to MPT_ADAPTER structure   *   * This function is waiting 10s for all pending commands to complete   * prior to putting controller in reset.   */ -static void -_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc) +void +mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc)  {  	u32 ioc_state; @@ -6374,7 +6377,7 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,  			is_fault = 1;  	}  	_base_reset_handler(ioc, MPT3_IOC_PRE_RESET); -	_wait_for_commands_to_complete(ioc); +	mpt3sas_wait_for_commands_to_complete(ioc);  	_base_mask_interrupts(ioc);  	r = _base_make_ioc_ready(ioc, type);  	if (r) diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h index 789bc421424b..99ccf83b8c51 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.h +++ b/drivers/scsi/mpt3sas/mpt3sas_base.h @@ -1433,6 +1433,9 @@ void mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc,  int mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc); +void +mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc); +  /* scsih shared API */  struct scsi_cmnd *mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index 74fca184dba9..a1cb0236c550 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -2835,7 +2835,8 @@ scsih_abort(struct scsi_cmnd *scmd)  	_scsih_tm_display_info(ioc, scmd);  	sas_device_priv_data = scmd->device->hostdata; -	if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { +	if (!sas_device_priv_data || !sas_device_priv_data->sas_target || +	    ioc->remove_host) {  		sdev_printk(KERN_INFO, scmd->device,  			"device been deleted! scmd(%p)\n", scmd);  		scmd->result = DID_NO_CONNECT << 16; @@ -2898,7 +2899,8 @@ scsih_dev_reset(struct scsi_cmnd *scmd)  	_scsih_tm_display_info(ioc, scmd);  	sas_device_priv_data = scmd->device->hostdata; -	if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { +	if (!sas_device_priv_data || !sas_device_priv_data->sas_target || +	    ioc->remove_host) {  		sdev_printk(KERN_INFO, scmd->device,  			"device been deleted! scmd(%p)\n", scmd);  		scmd->result = DID_NO_CONNECT << 16; @@ -2961,7 +2963,8 @@ scsih_target_reset(struct scsi_cmnd *scmd)  	_scsih_tm_display_info(ioc, scmd);  	sas_device_priv_data = scmd->device->hostdata; -	if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { +	if (!sas_device_priv_data || !sas_device_priv_data->sas_target || +	    ioc->remove_host) {  		starget_printk(KERN_INFO, starget, "target been deleted! scmd(%p)\n",  			scmd);  		scmd->result = DID_NO_CONNECT << 16; @@ -3019,7 +3022,7 @@ scsih_host_reset(struct scsi_cmnd *scmd)  	    ioc->name, scmd);  	scsi_print_command(scmd); -	if (ioc->is_driver_loading) { +	if (ioc->is_driver_loading || ioc->remove_host) {  		pr_info(MPT3SAS_FMT "Blocking the host reset\n",  		    ioc->name);  		r = FAILED; @@ -4453,7 +4456,7 @@ _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)  		st = scsi_cmd_priv(scmd);  		mpt3sas_base_clear_st(ioc, st);  		scsi_dma_unmap(scmd); -		if (ioc->pci_error_recovery) +		if (ioc->pci_error_recovery || ioc->remove_host)  			scmd->result = DID_NO_CONNECT << 16;  		else  			scmd->result = DID_RESET << 16; @@ -9739,6 +9742,10 @@ static void scsih_remove(struct pci_dev *pdev)  	unsigned long flags;  	ioc->remove_host = 1; + +	mpt3sas_wait_for_commands_to_complete(ioc); +	_scsih_flush_running_cmds(ioc); +  	_scsih_fw_event_cleanup_queue(ioc);  	spin_lock_irqsave(&ioc->fw_event_lock, flags); @@ -9815,6 +9822,10 @@ scsih_shutdown(struct pci_dev *pdev)  	unsigned long flags;  	ioc->remove_host = 1; + +	mpt3sas_wait_for_commands_to_complete(ioc); +	_scsih_flush_running_cmds(ioc); +  	_scsih_fw_event_cleanup_queue(ioc);  	spin_lock_irqsave(&ioc->fw_event_lock, flags); @@ -10547,7 +10558,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)  	snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name),  	    "fw_event_%s%d", ioc->driver_name, ioc->id);  	ioc->firmware_event_thread = alloc_ordered_workqueue( -	    ioc->firmware_event_name, WQ_MEM_RECLAIM); +	    ioc->firmware_event_name, 0);  	if (!ioc->firmware_event_thread) {  		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",  		    ioc->name, __FILE__, __LINE__, __func__); diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c index 667d7697ba01..d09afe1b567d 100644 --- a/drivers/scsi/qedi/qedi_fw.c +++ b/drivers/scsi/qedi/qedi_fw.c @@ -762,6 +762,11 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,  	iscsi_cid = cqe->conn_id;  	qedi_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid]; +	if (!qedi_conn) { +		QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, +			  "icid not found 0x%x\n", cqe->conn_id); +		return; +	}  	/* Based on this itt get the corresponding qedi_cmd */  	spin_lock_bh(&qedi_conn->tmf_work_lock); diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c index 029e2e69b29f..f57a94b4f0d9 100644 --- a/drivers/scsi/qedi/qedi_main.c +++ b/drivers/scsi/qedi/qedi_main.c @@ -1724,7 +1724,6 @@ static ssize_t qedi_show_boot_eth_info(void *data, int type, char *buf)  {  	struct qedi_ctx *qedi = data;  	struct nvm_iscsi_initiator *initiator; -	char *str = buf;  	int rc = 1;  	u32 ipv6_en, dhcp_en, ip_len;  	struct nvm_iscsi_block *block; @@ -1758,32 +1757,32 @@ static ssize_t qedi_show_boot_eth_info(void *data, int type, char *buf)  	switch (type) {  	case ISCSI_BOOT_ETH_IP_ADDR: -		rc = snprintf(str, ip_len, fmt, ip); +		rc = snprintf(buf, ip_len, fmt, ip);  		break;  	case ISCSI_BOOT_ETH_SUBNET_MASK: -		rc = snprintf(str, ip_len, fmt, sub); +		rc = snprintf(buf, ip_len, fmt, sub);  		break;  	case ISCSI_BOOT_ETH_GATEWAY: -		rc = snprintf(str, ip_len, fmt, gw); +		rc = snprintf(buf, ip_len, fmt, gw);  		break;  	case ISCSI_BOOT_ETH_FLAGS: -		rc = snprintf(str, 3, "%hhd\n", +		rc = snprintf(buf, 3, "%hhd\n",  			      SYSFS_FLAG_FW_SEL_BOOT);  		break;  	case ISCSI_BOOT_ETH_INDEX: -		rc = snprintf(str, 3, "0\n"); +		rc = snprintf(buf, 3, "0\n");  		break;  	case ISCSI_BOOT_ETH_MAC: -		rc = sysfs_format_mac(str, qedi->mac, ETH_ALEN); +		rc = sysfs_format_mac(buf, qedi->mac, ETH_ALEN);  		break;  	case ISCSI_BOOT_ETH_VLAN: -		rc = snprintf(str, 12, "%d\n", +		rc = snprintf(buf, 12, "%d\n",  			      GET_FIELD2(initiator->generic_cont0,  					 NVM_ISCSI_CFG_INITIATOR_VLAN));  		break;  	case ISCSI_BOOT_ETH_ORIGIN:  		if (dhcp_en) -			rc = snprintf(str, 3, "3\n"); +			rc = snprintf(buf, 3, "3\n");  		break;  	default:  		rc = 0; @@ -1819,7 +1818,6 @@ static ssize_t qedi_show_boot_ini_info(void *data, int type, char *buf)  {  	struct qedi_ctx *qedi = data;  	struct nvm_iscsi_initiator *initiator; -	char *str = buf;  	int rc;  	struct nvm_iscsi_block *block; @@ -1831,8 +1829,8 @@ static ssize_t qedi_show_boot_ini_info(void *data, int type, char *buf)  	switch (type) {  	case ISCSI_BOOT_INI_INITIATOR_NAME: -		rc = snprintf(str, NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN, "%s\n", -			      initiator->initiator_name.byte); +		rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN, +			     initiator->initiator_name.byte);  		break;  	default:  		rc = 0; @@ -1860,7 +1858,6 @@ static ssize_t  qedi_show_boot_tgt_info(struct qedi_ctx *qedi, int type,  			char *buf, enum qedi_nvm_tgts idx)  { -	char *str = buf;  	int rc = 1;  	u32 ctrl_flags, ipv6_en, chap_en, mchap_en, ip_len;  	struct nvm_iscsi_block *block; @@ -1899,48 +1896,48 @@ qedi_show_boot_tgt_info(struct qedi_ctx *qedi, int type,  	switch (type) {  	case ISCSI_BOOT_TGT_NAME: -		rc = snprintf(str, NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN, "%s\n", -			      block->target[idx].target_name.byte); +		rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN, +			     block->target[idx].target_name.byte);  		break;  	case ISCSI_BOOT_TGT_IP_ADDR:  		if (ipv6_en) -			rc = snprintf(str, ip_len, "%pI6\n", +			rc = snprintf(buf, ip_len, "%pI6\n",  				      block->target[idx].ipv6_addr.byte);  		else -			rc = snprintf(str, ip_len, "%pI4\n", +			rc = snprintf(buf, ip_len, "%pI4\n",  				      block->target[idx].ipv4_addr.byte);  		break;  	case ISCSI_BOOT_TGT_PORT: -		rc = snprintf(str, 12, "%d\n", +		rc = snprintf(buf, 12, "%d\n",  			      GET_FIELD2(block->target[idx].generic_cont0,  					 NVM_ISCSI_CFG_TARGET_TCP_PORT));  		break;  	case ISCSI_BOOT_TGT_LUN: -		rc = snprintf(str, 22, "%.*d\n", +		rc = snprintf(buf, 22, "%.*d\n",  			      block->target[idx].lun.value[1],  			      block->target[idx].lun.value[0]);  		break;  	case ISCSI_BOOT_TGT_CHAP_NAME: -		rc = snprintf(str, NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN, "%s\n", -			      chap_name); +		rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN, +			     chap_name);  		break;  	case ISCSI_BOOT_TGT_CHAP_SECRET: -		rc = snprintf(str, NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN, "%s\n", -			      chap_secret); +		rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN, +			     chap_secret);  		break;  	case ISCSI_BOOT_TGT_REV_CHAP_NAME: -		rc = snprintf(str, NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN, "%s\n", -			      mchap_name); +		rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN, +			     mchap_name);  		break;  	case ISCSI_BOOT_TGT_REV_CHAP_SECRET: -		rc = snprintf(str, NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN, "%s\n", -			      mchap_secret); +		rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN, +			     mchap_secret);  		break;  	case ISCSI_BOOT_TGT_FLAGS: -		rc = snprintf(str, 3, "%hhd\n", SYSFS_FLAG_FW_SEL_BOOT); +		rc = snprintf(buf, 3, "%hhd\n", SYSFS_FLAG_FW_SEL_BOOT);  		break;  	case ISCSI_BOOT_TGT_NIC_ASSOC: -		rc = snprintf(str, 3, "0\n"); +		rc = snprintf(buf, 3, "0\n");  		break;  	default:  		rc = 0; diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index be7d6824581a..c9689f97c307 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -261,9 +261,9 @@  struct name_list_extended {  	struct get_name_list_extended *l;  	dma_addr_t		ldma; -	struct list_head 	fcports;	/* protect by sess_list */ +	struct list_head	fcports; +	spinlock_t		fcports_lock;  	u32			size; -	u8			sent;  };  /*   * Timeout timer counts in seconds @@ -2217,6 +2217,7 @@ typedef struct {  /* FCP-4 types */  #define FC4_TYPE_FCP_SCSI	0x08 +#define FC4_TYPE_NVME		0x28  #define FC4_TYPE_OTHER		0x0  #define FC4_TYPE_UNKNOWN	0xff diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index 5bf9a59432f6..403fa096f8c8 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c @@ -3179,6 +3179,7 @@ done_free_sp:  	sp->free(sp);  	fcport->flags &= ~FCF_ASYNC_SENT;  done: +	fcport->flags &= ~FCF_ASYNC_ACTIVE;  	return rval;  } @@ -3370,6 +3371,7 @@ done_free_sp:  	sp->free(sp);  	fcport->flags &= ~FCF_ASYNC_SENT;  done: +	fcport->flags &= ~FCF_ASYNC_ACTIVE;  	return rval;  } @@ -3971,6 +3973,9 @@ out:  	spin_lock_irqsave(&vha->work_lock, flags);  	vha->scan.scan_flags &= ~SF_SCANNING;  	spin_unlock_irqrestore(&vha->work_lock, flags); + +	if ((fc4type == FC4_TYPE_FCP_SCSI) && vha->flags.nvme_enabled) +		qla24xx_async_gpnft(vha, FC4_TYPE_NVME);  }  static void qla2x00_async_gpnft_gnnft_sp_done(void *s, int res) diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index aececf664654..8d7fab3cd01d 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -59,8 +59,6 @@ qla2x00_sp_timeout(struct timer_list *t)  	req->outstanding_cmds[sp->handle] = NULL;  	iocb = &sp->u.iocb_cmd;  	iocb->timeout(sp); -	if (sp->type != SRB_ELS_DCMD) -		sp->free(sp);  	spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);  } @@ -102,7 +100,6 @@ qla2x00_async_iocb_timeout(void *data)  	srb_t *sp = data;  	fc_port_t *fcport = sp->fcport;  	struct srb_iocb *lio = &sp->u.iocb_cmd; -	struct event_arg ea;  	if (fcport) {  		ql_dbg(ql_dbg_disc, fcport->vha, 0x2071, @@ -117,25 +114,13 @@ qla2x00_async_iocb_timeout(void *data)  	switch (sp->type) {  	case SRB_LOGIN_CMD: -		if (!fcport) -			break;  		/* Retry as needed. */  		lio->u.logio.data[0] = MBS_COMMAND_ERROR;  		lio->u.logio.data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?  			QLA_LOGIO_LOGIN_RETRIED : 0; -		memset(&ea, 0, sizeof(ea)); -		ea.event = FCME_PLOGI_DONE; -		ea.fcport = sp->fcport; -		ea.data[0] = lio->u.logio.data[0]; -		ea.data[1] = lio->u.logio.data[1]; -		ea.sp = sp; -		qla24xx_handle_plogi_done_event(fcport->vha, &ea); +		sp->done(sp, QLA_FUNCTION_TIMEOUT);  		break;  	case SRB_LOGOUT_CMD: -		if (!fcport) -			break; -		qlt_logo_completion_handler(fcport, QLA_FUNCTION_TIMEOUT); -		break;  	case SRB_CT_PTHRU_CMD:  	case SRB_MB_IOCB:  	case SRB_NACK_PLOGI: @@ -228,6 +213,7 @@ done_free_sp:  	sp->free(sp);  	fcport->flags &= ~FCF_ASYNC_SENT;  done: +	fcport->flags &= ~FCF_ASYNC_ACTIVE;  	return rval;  } @@ -235,12 +221,10 @@ static void  qla2x00_async_logout_sp_done(void *ptr, int res)  {  	srb_t *sp = ptr; -	struct srb_iocb *lio = &sp->u.iocb_cmd;  	sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); -	if (!test_bit(UNLOADING, &sp->vha->dpc_flags)) -		qla2x00_post_async_logout_done_work(sp->vha, sp->fcport, -		    lio->u.logio.data); +	sp->fcport->login_gen++; +	qlt_logo_completion_handler(sp->fcport, res);  	sp->free(sp);  } @@ -280,7 +264,7 @@ qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)  done_free_sp:  	sp->free(sp);  done: -	fcport->flags &= ~FCF_ASYNC_SENT; +	fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);  	return rval;  } @@ -288,6 +272,7 @@ void  qla2x00_async_prlo_done(struct scsi_qla_host *vha, fc_port_t *fcport,      uint16_t *data)  { +	fcport->flags &= ~FCF_ASYNC_ACTIVE;  	/* Don't re-login in target mode */  	if (!fcport->tgt_session)  		qla2x00_mark_device_lost(vha, fcport, 1, 0); @@ -301,6 +286,7 @@ qla2x00_async_prlo_sp_done(void *s, int res)  	struct srb_iocb *lio = &sp->u.iocb_cmd;  	struct scsi_qla_host *vha = sp->vha; +	sp->fcport->flags &= ~FCF_ASYNC_ACTIVE;  	if (!test_bit(UNLOADING, &vha->dpc_flags))  		qla2x00_post_async_prlo_done_work(sp->fcport->vha, sp->fcport,  		    lio->u.logio.data); @@ -339,6 +325,7 @@ qla2x00_async_prlo(struct scsi_qla_host *vha, fc_port_t *fcport)  done_free_sp:  	sp->free(sp);  done: +	fcport->flags &= ~FCF_ASYNC_ACTIVE;  	return rval;  } @@ -392,6 +379,8 @@ qla2x00_async_adisc_sp_done(void *ptr, int res)  	    "Async done-%s res %x %8phC\n",  	    sp->name, res, sp->fcport->port_name); +	sp->fcport->flags &= ~FCF_ASYNC_SENT; +  	memset(&ea, 0, sizeof(ea));  	ea.event = FCME_ADISC_DONE;  	ea.rc = res; @@ -442,7 +431,7 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,  done_free_sp:  	sp->free(sp);  done: -	fcport->flags &= ~FCF_ASYNC_SENT; +	fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);  	qla2x00_post_async_adisc_work(vha, fcport, data);  	return rval;  } @@ -660,8 +649,7 @@ qla24xx_async_gnl_sp_done(void *s, int res)  		    (loop_id & 0x7fff));  	} -	spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); -	vha->gnl.sent = 0; +	spin_lock_irqsave(&vha->gnl.fcports_lock, flags);  	INIT_LIST_HEAD(&h);  	fcport = tf = NULL; @@ -670,12 +658,16 @@ qla24xx_async_gnl_sp_done(void *s, int res)  	list_for_each_entry_safe(fcport, tf, &h, gnl_entry) {  		list_del_init(&fcport->gnl_entry); +		spin_lock(&vha->hw->tgt.sess_lock);  		fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); +		spin_unlock(&vha->hw->tgt.sess_lock);  		ea.fcport = fcport;  		qla2x00_fcport_event_handler(vha, &ea);  	} +	spin_unlock_irqrestore(&vha->gnl.fcports_lock, flags); +	spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);  	/* create new fcport if fw has knowledge of new sessions */  	for (i = 0; i < n; i++) {  		port_id_t id; @@ -727,18 +719,21 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)  	ql_dbg(ql_dbg_disc, vha, 0x20d9,  	    "Async-gnlist WWPN %8phC \n", fcport->port_name); -	spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); +	spin_lock_irqsave(&vha->gnl.fcports_lock, flags); +	if (!list_empty(&fcport->gnl_entry)) { +		spin_unlock_irqrestore(&vha->gnl.fcports_lock, flags); +		rval = QLA_SUCCESS; +		goto done; +	} + +	spin_lock(&vha->hw->tgt.sess_lock);  	fcport->disc_state = DSC_GNL;  	fcport->last_rscn_gen = fcport->rscn_gen;  	fcport->last_login_gen = fcport->login_gen; +	spin_unlock(&vha->hw->tgt.sess_lock);  	list_add_tail(&fcport->gnl_entry, &vha->gnl.fcports); -	if (vha->gnl.sent) { -		spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); -		return QLA_SUCCESS; -	} -	vha->gnl.sent = 1; -	spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); +	spin_unlock_irqrestore(&vha->gnl.fcports_lock, flags);  	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);  	if (!sp) @@ -1066,6 +1061,7 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)  	fc_port_t *fcport = ea->fcport;  	struct port_database_24xx *pd;  	struct srb *sp = ea->sp; +	uint8_t	ls;  	pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in; @@ -1078,7 +1074,12 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)  	if (fcport->disc_state == DSC_DELETE_PEND)  		return; -	switch (pd->current_login_state) { +	if (fcport->fc4f_nvme) +		ls = pd->current_login_state >> 4; +	else +		ls = pd->current_login_state & 0xf; + +	switch (ls) {  	case PDS_PRLI_COMPLETE:  		__qla24xx_parse_gpdb(vha, fcport, pd);  		break; @@ -1168,8 +1169,9 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)  	if (fcport->scan_state != QLA_FCPORT_FOUND)  		return 0; -	if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) || -	    (fcport->fw_login_state == DSC_LS_PRLI_PEND)) +	if ((fcport->loop_id != FC_NO_LOOP_ID) && +	    ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) || +	     (fcport->fw_login_state == DSC_LS_PRLI_PEND)))  		return 0;  	if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) { @@ -1544,6 +1546,7 @@ qla24xx_abort_sp_done(void *ptr, int res)  	srb_t *sp = ptr;  	struct srb_iocb *abt = &sp->u.iocb_cmd; +	del_timer(&sp->u.iocb_cmd.timer);  	complete(&abt->u.abt.comp);  } @@ -1716,7 +1719,6 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)  			set_bit(ea->fcport->loop_id, vha->hw->loop_id_map);  			spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); -			ea->fcport->loop_id = FC_NO_LOOP_ID;  			ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset;  			ea->fcport->logout_on_delete = 1;  			ea->fcport->send_els_logo = 0; @@ -1808,6 +1810,7 @@ qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport,  	qla2x00_mark_device_lost(vha, fcport, 1, 0);  	qlt_logo_completion_handler(fcport, data[0]);  	fcport->login_gen++; +	fcport->flags &= ~FCF_ASYNC_ACTIVE;  	return;  } @@ -1815,6 +1818,7 @@ void  qla2x00_async_adisc_done(struct scsi_qla_host *vha, fc_port_t *fcport,      uint16_t *data)  { +	fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);  	if (data[0] == MBS_COMMAND_COMPLETE) {  		qla2x00_update_fcport(vha, fcport); @@ -1822,7 +1826,6 @@ qla2x00_async_adisc_done(struct scsi_qla_host *vha, fc_port_t *fcport,  	}  	/* Retry login. */ -	fcport->flags &= ~FCF_ASYNC_SENT;  	if (data[1] & QLA_LOGIO_LOGIN_RETRIED)  		set_bit(RELOGIN_NEEDED, &vha->dpc_flags);  	else diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index 1b62e943ec49..8d00d559bd26 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -3275,12 +3275,11 @@ qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)  	memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));  	abt_iocb->entry_type = ABORT_IOCB_TYPE;  	abt_iocb->entry_count = 1; -	abt_iocb->handle = -	     cpu_to_le32(MAKE_HANDLE(aio->u.abt.req_que_no, -		 aio->u.abt.cmd_hndl)); +	abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));  	abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);  	abt_iocb->handle_to_abort = -	    cpu_to_le32(MAKE_HANDLE(req->id, aio->u.abt.cmd_hndl)); +	    cpu_to_le32(MAKE_HANDLE(aio->u.abt.req_que_no, +				    aio->u.abt.cmd_hndl));  	abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;  	abt_iocb->port_id[1] = sp->fcport->d_id.b.area;  	abt_iocb->port_id[2] = sp->fcport->d_id.b.domain; diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 14109d86c3f6..89f93ebd819d 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -272,7 +272,8 @@ qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)  	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;  	/* Read all mbox registers? */ -	mboxes = (1 << ha->mbx_count) - 1; +	WARN_ON_ONCE(ha->mbx_count > 32); +	mboxes = (1ULL << ha->mbx_count) - 1;  	if (!ha->mcp)  		ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n");  	else @@ -2880,7 +2881,8 @@ qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)  	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;  	/* Read all mbox registers? */ -	mboxes = (1 << ha->mbx_count) - 1; +	WARN_ON_ONCE(ha->mbx_count > 32); +	mboxes = (1ULL << ha->mbx_count) - 1;  	if (!ha->mcp)  		ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n");  	else diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 12ee6e02d146..5c5dcca4d1da 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -454,7 +454,7 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req,  	ha->req_q_map[0] = req;  	set_bit(0, ha->rsp_qid_map);  	set_bit(0, ha->req_qid_map); -	return 1; +	return 0;  fail_qpair_map:  	kfree(ha->base_qpair); @@ -471,6 +471,9 @@ fail_req_map:  static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req)  { +	if (!ha->req_q_map) +		return; +  	if (IS_QLAFX00(ha)) {  		if (req && req->ring_fx00)  			dma_free_coherent(&ha->pdev->dev, @@ -481,14 +484,17 @@ static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req)  		(req->length + 1) * sizeof(request_t),  		req->ring, req->dma); -	if (req) +	if (req) {  		kfree(req->outstanding_cmds); - -	kfree(req); +		kfree(req); +	}  }  static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp)  { +	if (!ha->rsp_q_map) +		return; +  	if (IS_QLAFX00(ha)) {  		if (rsp && rsp->ring)  			dma_free_coherent(&ha->pdev->dev, @@ -499,7 +505,8 @@ static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp)  		(rsp->length + 1) * sizeof(response_t),  		rsp->ring, rsp->dma);  	} -	kfree(rsp); +	if (rsp) +		kfree(rsp);  }  static void qla2x00_free_queues(struct qla_hw_data *ha) @@ -1723,6 +1730,8 @@ __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)  	struct qla_tgt_cmd *cmd;  	uint8_t trace = 0; +	if (!ha->req_q_map) +		return;  	spin_lock_irqsave(qp->qp_lock_ptr, flags);  	req = qp->req;  	for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { @@ -3095,14 +3104,14 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)  	/* Set up the irqs */  	ret = qla2x00_request_irqs(ha, rsp);  	if (ret) -		goto probe_hw_failed; +		goto probe_failed;  	/* Alloc arrays of request and response ring ptrs */ -	if (!qla2x00_alloc_queues(ha, req, rsp)) { +	if (qla2x00_alloc_queues(ha, req, rsp)) {  		ql_log(ql_log_fatal, base_vha, 0x003d,  		    "Failed to allocate memory for queue pointers..."  		    "aborting.\n"); -		goto probe_init_failed; +		goto probe_failed;  	}  	if (ha->mqenable && shost_use_blk_mq(host)) { @@ -3387,15 +3396,6 @@ skip_dpc:  	return 0; -probe_init_failed: -	qla2x00_free_req_que(ha, req); -	ha->req_q_map[0] = NULL; -	clear_bit(0, ha->req_qid_map); -	qla2x00_free_rsp_que(ha, rsp); -	ha->rsp_q_map[0] = NULL; -	clear_bit(0, ha->rsp_qid_map); -	ha->max_req_queues = ha->max_rsp_queues = 0; -  probe_failed:  	if (base_vha->timer_active)  		qla2x00_stop_timer(base_vha); @@ -3625,6 +3625,8 @@ qla2x00_remove_one(struct pci_dev *pdev)  	}  	qla2x00_wait_for_hba_ready(base_vha); +	qla2x00_wait_for_sess_deletion(base_vha); +  	/*  	 * if UNLOAD flag is already set, then continue unload,  	 * where it was set first. @@ -4506,11 +4508,17 @@ qla2x00_mem_free(struct qla_hw_data *ha)  	if (ha->init_cb)  		dma_free_coherent(&ha->pdev->dev, ha->init_cb_size,  			ha->init_cb, ha->init_cb_dma); -	vfree(ha->optrom_buffer); -	kfree(ha->nvram); -	kfree(ha->npiv_info); -	kfree(ha->swl); -	kfree(ha->loop_id_map); + +	if (ha->optrom_buffer) +		vfree(ha->optrom_buffer); +	if (ha->nvram) +		kfree(ha->nvram); +	if (ha->npiv_info) +		kfree(ha->npiv_info); +	if (ha->swl) +		kfree(ha->swl); +	if (ha->loop_id_map) +		kfree(ha->loop_id_map);  	ha->srb_mempool = NULL;  	ha->ctx_mempool = NULL; @@ -4526,6 +4534,15 @@ qla2x00_mem_free(struct qla_hw_data *ha)  	ha->ex_init_cb_dma = 0;  	ha->async_pd = NULL;  	ha->async_pd_dma = 0; +	ha->loop_id_map = NULL; +	ha->npiv_info = NULL; +	ha->optrom_buffer = NULL; +	ha->swl = NULL; +	ha->nvram = NULL; +	ha->mctp_dump = NULL; +	ha->dcbx_tlv = NULL; +	ha->xgmac_data = NULL; +	ha->sfp_data = NULL;  	ha->s_dma_pool = NULL;  	ha->dl_dma_pool = NULL; @@ -4575,6 +4592,7 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,  	spin_lock_init(&vha->work_lock);  	spin_lock_init(&vha->cmd_list_lock); +	spin_lock_init(&vha->gnl.fcports_lock);  	init_waitqueue_head(&vha->fcport_waitQ);  	init_waitqueue_head(&vha->vref_waitq); @@ -4804,9 +4822,12 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)  			fcport->d_id = e->u.new_sess.id;  			fcport->flags |= FCF_FABRIC_DEVICE;  			fcport->fw_login_state = DSC_LS_PLOGI_PEND; -			if (e->u.new_sess.fc4_type == FC4_TYPE_FCP_SCSI) +			if (e->u.new_sess.fc4_type == FC4_TYPE_FCP_SCSI) {  				fcport->fc4_type = FC4_TYPE_FCP_SCSI; - +			} else if (e->u.new_sess.fc4_type == FC4_TYPE_NVME) { +				fcport->fc4_type = FC4_TYPE_OTHER; +				fcport->fc4f_nvme = FC4_TYPE_NVME; +			}  			memcpy(fcport->port_name, e->u.new_sess.port_name,  			    WWN_SIZE);  		} else { @@ -4875,6 +4896,8 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)  			}  			qlt_plogi_ack_unref(vha, pla);  		} else { +			fc_port_t *dfcp = NULL; +  			spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);  			tfcp = qla2x00_find_fcport_by_nportid(vha,  			    &e->u.new_sess.id, 1); @@ -4897,11 +4920,13 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)  				default:  					fcport->login_pause = 1;  					tfcp->conflict = fcport; -					qlt_schedule_sess_for_deletion(tfcp); +					dfcp = tfcp;  					break;  				}  			}  			spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); +			if (dfcp) +				qlt_schedule_sess_for_deletion(tfcp);  			wwn = wwn_to_u64(fcport->node_name); diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index fc89af8fe256..b49ac85f3de2 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -1224,10 +1224,10 @@ static void qla24xx_chk_fcp_state(struct fc_port *sess)  	}  } -/* ha->tgt.sess_lock supposed to be held on entry */  void qlt_schedule_sess_for_deletion(struct fc_port *sess)  {  	struct qla_tgt *tgt = sess->tgt; +	struct qla_hw_data *ha = sess->vha->hw;  	unsigned long flags;  	if (sess->disc_state == DSC_DELETE_PEND) @@ -1244,16 +1244,16 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess)  			return;  	} +	spin_lock_irqsave(&ha->tgt.sess_lock, flags);  	if (sess->deleted == QLA_SESS_DELETED)  		sess->logout_on_delete = 0; -	spin_lock_irqsave(&sess->vha->work_lock, flags);  	if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { -		spin_unlock_irqrestore(&sess->vha->work_lock, flags); +		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);  		return;  	}  	sess->deleted = QLA_SESS_DELETION_IN_PROGRESS; -	spin_unlock_irqrestore(&sess->vha->work_lock, flags); +	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);  	sess->disc_state = DSC_DELETE_PEND; @@ -1262,13 +1262,10 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess)  	ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,  	    "Scheduling sess %p for deletion\n", sess); -	/* use cancel to push work element through before re-queue */ -	cancel_work_sync(&sess->del_work);  	INIT_WORK(&sess->del_work, qla24xx_delete_sess_fn); -	queue_work(sess->vha->hw->wq, &sess->del_work); +	WARN_ON(!queue_work(sess->vha->hw->wq, &sess->del_work));  } -/* ha->tgt.sess_lock supposed to be held on entry */  static void qlt_clear_tgt_db(struct qla_tgt *tgt)  {  	struct fc_port *sess; @@ -1451,8 +1448,8 @@ qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen)  	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess);  	sess->local = 1; -	qlt_schedule_sess_for_deletion(sess);  	spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); +	qlt_schedule_sess_for_deletion(sess);  }  static inline int test_tgt_sess_count(struct qla_tgt *tgt) @@ -1512,10 +1509,8 @@ int qlt_stop_phase1(struct qla_tgt *tgt)  	 * Lock is needed, because we still can get an incoming packet.  	 */  	mutex_lock(&vha->vha_tgt.tgt_mutex); -	spin_lock_irqsave(&ha->tgt.sess_lock, flags);  	tgt->tgt_stop = 1;  	qlt_clear_tgt_db(tgt); -	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);  	mutex_unlock(&vha->vha_tgt.tgt_mutex);  	mutex_unlock(&qla_tgt_mutex); @@ -4871,8 +4866,6 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,  				    sess);  				qlt_send_term_imm_notif(vha, iocb, 1);  				res = 0; -				spin_lock_irqsave(&tgt->ha->tgt.sess_lock, -				    flags);  				break;  			} diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h index fc233717355f..817f312023a9 100644 --- a/drivers/scsi/qla4xxx/ql4_def.h +++ b/drivers/scsi/qla4xxx/ql4_def.h @@ -168,6 +168,8 @@  #define DEV_DB_NON_PERSISTENT	0  #define DEV_DB_PERSISTENT	1 +#define QL4_ISP_REG_DISCONNECT 0xffffffffU +  #define COPY_ISID(dst_isid, src_isid) {			\  	int i, j;					\  	for (i = 0, j = ISID_SIZE - 1; i < ISID_SIZE;)	\ diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index 82e889bbe0ed..fc2c97d9a0d6 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c @@ -262,6 +262,24 @@ static struct iscsi_transport qla4xxx_iscsi_transport = {  static struct scsi_transport_template *qla4xxx_scsi_transport; +static int qla4xxx_isp_check_reg(struct scsi_qla_host *ha) +{ +	u32 reg_val = 0; +	int rval = QLA_SUCCESS; + +	if (is_qla8022(ha)) +		reg_val = readl(&ha->qla4_82xx_reg->host_status); +	else if (is_qla8032(ha) || is_qla8042(ha)) +		reg_val = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_ALIVE_COUNTER); +	else +		reg_val = readw(&ha->reg->ctrl_status); + +	if (reg_val == QL4_ISP_REG_DISCONNECT) +		rval = QLA_ERROR; + +	return rval; +} +  static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num,  			     uint32_t iface_type, uint32_t payload_size,  			     uint32_t pid, struct sockaddr *dst_addr) @@ -9186,10 +9204,17 @@ static int qla4xxx_eh_abort(struct scsi_cmnd *cmd)  	struct srb *srb = NULL;  	int ret = SUCCESS;  	int wait = 0; +	int rval;  	ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%llu: Abort command issued cmd=%p, cdb=0x%x\n",  		   ha->host_no, id, lun, cmd, cmd->cmnd[0]); +	rval = qla4xxx_isp_check_reg(ha); +	if (rval != QLA_SUCCESS) { +		ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n"); +		return FAILED; +	} +  	spin_lock_irqsave(&ha->hardware_lock, flags);  	srb = (struct srb *) CMD_SP(cmd);  	if (!srb) { @@ -9241,6 +9266,7 @@ static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd)  	struct scsi_qla_host *ha = to_qla_host(cmd->device->host);  	struct ddb_entry *ddb_entry = cmd->device->hostdata;  	int ret = FAILED, stat; +	int rval;  	if (!ddb_entry)  		return ret; @@ -9260,6 +9286,12 @@ static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd)  		      cmd, jiffies, cmd->request->timeout / HZ,  		      ha->dpc_flags, cmd->result, cmd->allowed)); +	rval = qla4xxx_isp_check_reg(ha); +	if (rval != QLA_SUCCESS) { +		ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n"); +		return FAILED; +	} +  	/* FIXME: wait for hba to go online */  	stat = qla4xxx_reset_lun(ha, ddb_entry, cmd->device->lun);  	if (stat != QLA_SUCCESS) { @@ -9303,6 +9335,7 @@ static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd)  	struct scsi_qla_host *ha = to_qla_host(cmd->device->host);  	struct ddb_entry *ddb_entry = cmd->device->hostdata;  	int stat, ret; +	int rval;  	if (!ddb_entry)  		return FAILED; @@ -9320,6 +9353,12 @@ static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd)  		      ha->host_no, cmd, jiffies, cmd->request->timeout / HZ,  		      ha->dpc_flags, cmd->result, cmd->allowed)); +	rval = qla4xxx_isp_check_reg(ha); +	if (rval != QLA_SUCCESS) { +		ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n"); +		return FAILED; +	} +  	stat = qla4xxx_reset_target(ha, ddb_entry);  	if (stat != QLA_SUCCESS) {  		starget_printk(KERN_INFO, scsi_target(cmd->device), @@ -9374,9 +9413,16 @@ static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd)  {  	int return_status = FAILED;  	struct scsi_qla_host *ha; +	int rval;  	ha = to_qla_host(cmd->device->host); +	rval = qla4xxx_isp_check_reg(ha); +	if (rval != QLA_SUCCESS) { +		ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n"); +		return FAILED; +	} +  	if ((is_qla8032(ha) || is_qla8042(ha)) && ql4xdontresethba)  		qla4_83xx_set_idc_dontreset(ha); diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index d042915ce895..ca53a5f785ee 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -223,7 +223,8 @@ static void scsi_eh_reset(struct scsi_cmnd *scmd)  static void scsi_eh_inc_host_failed(struct rcu_head *head)  { -	struct Scsi_Host *shost = container_of(head, typeof(*shost), rcu); +	struct scsi_cmnd *scmd = container_of(head, typeof(*scmd), rcu); +	struct Scsi_Host *shost = scmd->device->host;  	unsigned long flags;  	spin_lock_irqsave(shost->host_lock, flags); @@ -259,7 +260,7 @@ void scsi_eh_scmd_add(struct scsi_cmnd *scmd)  	 * Ensure that all tasks observe the host state change before the  	 * host_failed change.  	 */ -	call_rcu(&shost->rcu, scsi_eh_inc_host_failed); +	call_rcu(&scmd->rcu, scsi_eh_inc_host_failed);  }  /** diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index a86df9ca7d1c..c84f931388f2 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -671,6 +671,7 @@ static bool scsi_end_request(struct request *req, blk_status_t error,  	if (!blk_rq_is_scsi(req)) {  		WARN_ON_ONCE(!(cmd->flags & SCMD_INITIALIZED));  		cmd->flags &= ~SCMD_INITIALIZED; +		destroy_rcu_head(&cmd->rcu);  	}  	if (req->mq_ctx) { @@ -720,6 +721,8 @@ static blk_status_t __scsi_error_from_host_byte(struct scsi_cmnd *cmd,  		int result)  {  	switch (host_byte(result)) { +	case DID_OK: +		return BLK_STS_OK;  	case DID_TRANSPORT_FAILFAST:  		return BLK_STS_TRANSPORT;  	case DID_TARGET_FAILURE: @@ -1151,6 +1154,7 @@ static void scsi_initialize_rq(struct request *rq)  	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);  	scsi_req_init(&cmd->req); +	init_rcu_head(&cmd->rcu);  	cmd->jiffies_at_alloc = jiffies;  	cmd->retries = 0;  } diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index bff21e636ddd..3541caf3fceb 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -2595,6 +2595,7 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer)  	int res;  	struct scsi_device *sdp = sdkp->device;  	struct scsi_mode_data data; +	int disk_ro = get_disk_ro(sdkp->disk);  	int old_wp = sdkp->write_prot;  	set_disk_ro(sdkp->disk, 0); @@ -2635,7 +2636,7 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer)  			  "Test WP failed, assume Write Enabled\n");  	} else {  		sdkp->write_prot = ((data.device_specific & 0x80) != 0); -		set_disk_ro(sdkp->disk, sdkp->write_prot); +		set_disk_ro(sdkp->disk, sdkp->write_prot || disk_ro);  		if (sdkp->first_scan || old_wp != sdkp->write_prot) {  			sd_printk(KERN_NOTICE, sdkp, "Write Protect is %s\n",  				  sdkp->write_prot ? "on" : "off"); diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c index 6c348a211ebb..89cf4498f535 100644 --- a/drivers/scsi/sd_zbc.c +++ b/drivers/scsi/sd_zbc.c @@ -403,7 +403,7 @@ static int sd_zbc_check_capacity(struct scsi_disk *sdkp, unsigned char *buf)   */  static int sd_zbc_check_zone_size(struct scsi_disk *sdkp)  { -	u64 zone_blocks; +	u64 zone_blocks = 0;  	sector_t block = 0;  	unsigned char *buf;  	unsigned char *rec; @@ -421,10 +421,8 @@ static int sd_zbc_check_zone_size(struct scsi_disk *sdkp)  	/* Do a report zone to get the same field */  	ret = sd_zbc_report_zones(sdkp, buf, SD_ZBC_BUF_SIZE, 0); -	if (ret) { -		zone_blocks = 0; -		goto out; -	} +	if (ret) +		goto out_free;  	same = buf[4] & 0x0f;  	if (same > 0) { @@ -464,7 +462,7 @@ static int sd_zbc_check_zone_size(struct scsi_disk *sdkp)  			ret = sd_zbc_report_zones(sdkp, buf,  						  SD_ZBC_BUF_SIZE, block);  			if (ret) -				return ret; +				goto out_free;  		}  	} while (block < sdkp->capacity); @@ -472,35 +470,32 @@ static int sd_zbc_check_zone_size(struct scsi_disk *sdkp)  	zone_blocks = sdkp->zone_blocks;  out: -	kfree(buf); -  	if (!zone_blocks) {  		if (sdkp->first_scan)  			sd_printk(KERN_NOTICE, sdkp,  				  "Devices with non constant zone "  				  "size are not supported\n"); -		return -ENODEV; -	} - -	if (!is_power_of_2(zone_blocks)) { +		ret = -ENODEV; +	} else if (!is_power_of_2(zone_blocks)) {  		if (sdkp->first_scan)  			sd_printk(KERN_NOTICE, sdkp,  				  "Devices with non power of 2 zone "  				  "size are not supported\n"); -		return -ENODEV; -	} - -	if (logical_to_sectors(sdkp->device, zone_blocks) > UINT_MAX) { +		ret = -ENODEV; +	} else if (logical_to_sectors(sdkp->device, zone_blocks) > UINT_MAX) {  		if (sdkp->first_scan)  			sd_printk(KERN_NOTICE, sdkp,  				  "Zone size too large\n"); -		return -ENODEV; +		ret = -ENODEV; +	} else { +		sdkp->zone_blocks = zone_blocks; +		sdkp->zone_shift = ilog2(zone_blocks);  	} -	sdkp->zone_blocks = zone_blocks; -	sdkp->zone_shift = ilog2(zone_blocks); +out_free: +	kfree(buf); -	return 0; +	return ret;  }  /** diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index 40fc7a590e81..8c51d628b52e 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -1311,7 +1311,8 @@ static int storvsc_do_io(struct hv_device *device,  			 */  			cpumask_and(&alloced_mask, &stor_device->alloced_cpus,  				    cpumask_of_node(cpu_to_node(q_num))); -			for_each_cpu(tgt_cpu, &alloced_mask) { +			for_each_cpu_wrap(tgt_cpu, &alloced_mask, +					outgoing_channel->target_cpu + 1) {  				if (tgt_cpu != outgoing_channel->target_cpu) {  					outgoing_channel =  					stor_device->stor_chns[tgt_cpu]; @@ -1657,7 +1658,7 @@ static struct scsi_host_template scsi_driver = {  	.eh_timed_out =		storvsc_eh_timed_out,  	.slave_alloc =		storvsc_device_alloc,  	.slave_configure =	storvsc_device_configure, -	.cmd_per_lun =		255, +	.cmd_per_lun =		2048,  	.this_id =		-1,  	.use_clustering =	ENABLE_CLUSTERING,  	/* Make sure we dont get a sg segment crosses a page boundary */ diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c index ca360daa6a25..378af306fda1 100644 --- a/drivers/scsi/sym53c8xx_2/sym_hipd.c +++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c @@ -536,7 +536,7 @@ sym_getsync(struct sym_hcb *np, u_char dt, u_char sfac, u_char *divp, u_char *fa  	 *  Look for the greatest clock divisor that allows an   	 *  input speed faster than the period.  	 */ -	while (div-- > 0) +	while (--div > 0)  		if (kpc >= (div_10M[div] << 2)) break;  	/* diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index a355d989b414..c7da2c185990 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -4352,6 +4352,8 @@ static int ufshcd_slave_alloc(struct scsi_device *sdev)  	/* REPORT SUPPORTED OPERATION CODES is not supported */  	sdev->no_report_opcodes = 1; +	/* WRITE_SAME command is not supported */ +	sdev->no_write_same = 1;  	ufshcd_set_queue_depth(sdev); diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c index e4f5bb056fd2..ba3cfa8e279b 100644 --- a/drivers/soc/fsl/qbman/qman.c +++ b/drivers/soc/fsl/qbman/qman.c @@ -2443,39 +2443,21 @@ struct cgr_comp {  	struct completion completion;  }; -static int qman_delete_cgr_thread(void *p) +static void qman_delete_cgr_smp_call(void *p)  { -	struct cgr_comp *cgr_comp = (struct cgr_comp *)p; -	int ret; - -	ret = qman_delete_cgr(cgr_comp->cgr); -	complete(&cgr_comp->completion); - -	return ret; +	qman_delete_cgr((struct qman_cgr *)p);  }  void qman_delete_cgr_safe(struct qman_cgr *cgr)  { -	struct task_struct *thread; -	struct cgr_comp cgr_comp; -  	preempt_disable();  	if (qman_cgr_cpus[cgr->cgrid] != smp_processor_id()) { -		init_completion(&cgr_comp.completion); -		cgr_comp.cgr = cgr; -		thread = kthread_create(qman_delete_cgr_thread, &cgr_comp, -					"cgr_del"); - -		if (IS_ERR(thread)) -			goto out; - -		kthread_bind(thread, qman_cgr_cpus[cgr->cgrid]); -		wake_up_process(thread); -		wait_for_completion(&cgr_comp.completion); +		smp_call_function_single(qman_cgr_cpus[cgr->cgrid], +					 qman_delete_cgr_smp_call, cgr, true);  		preempt_enable();  		return;  	} -out: +  	qman_delete_cgr(cgr);  	preempt_enable();  } diff --git a/drivers/soc/imx/gpc.c b/drivers/soc/imx/gpc.c index 53f7275d6cbd..750f93197411 100644 --- a/drivers/soc/imx/gpc.c +++ b/drivers/soc/imx/gpc.c @@ -348,7 +348,7 @@ static int imx_gpc_old_dt_init(struct device *dev, struct regmap *regmap,  		if (i == 1) {  			domain->supply = devm_regulator_get(dev, "pu");  			if (IS_ERR(domain->supply)) -				return PTR_ERR(domain->supply);; +				return PTR_ERR(domain->supply);  			ret = imx_pgc_get_clocks(dev, domain);  			if (ret) @@ -470,13 +470,21 @@ static int imx_gpc_probe(struct platform_device *pdev)  static int imx_gpc_remove(struct platform_device *pdev)  { +	struct device_node *pgc_node;  	int ret; +	pgc_node = of_get_child_by_name(pdev->dev.of_node, "pgc"); + +	/* bail out if DT too old and doesn't provide the necessary info */ +	if (!of_property_read_bool(pdev->dev.of_node, "#power-domain-cells") && +	    !pgc_node) +		return 0; +  	/*  	 * If the old DT binding is used the toplevel driver needs to  	 * de-register the power domains  	 */ -	if (!of_get_child_by_name(pdev->dev.of_node, "pgc")) { +	if (!pgc_node) {  		of_genpd_del_provider(pdev->dev.of_node);  		ret = pm_genpd_remove(&imx_gpc_domains[GPC_PGC_DOMAIN_PU].base); diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c index bbdc53b686dd..86580b6df33d 100644 --- a/drivers/staging/android/ashmem.c +++ b/drivers/staging/android/ashmem.c @@ -326,24 +326,23 @@ static loff_t ashmem_llseek(struct file *file, loff_t offset, int origin)  	mutex_lock(&ashmem_mutex);  	if (asma->size == 0) { -		ret = -EINVAL; -		goto out; +		mutex_unlock(&ashmem_mutex); +		return -EINVAL;  	}  	if (!asma->file) { -		ret = -EBADF; -		goto out; +		mutex_unlock(&ashmem_mutex); +		return -EBADF;  	} +	mutex_unlock(&ashmem_mutex); +  	ret = vfs_llseek(asma->file, offset, origin);  	if (ret < 0) -		goto out; +		return ret;  	/** Copy f_pos from backing file, since f_ops->llseek() sets it */  	file->f_pos = asma->file->f_pos; - -out: -	mutex_unlock(&ashmem_mutex);  	return ret;  } @@ -702,30 +701,30 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,  	size_t pgstart, pgend;  	int ret = -EINVAL; -	if (unlikely(!asma->file)) -		return -EINVAL; -  	if (unlikely(copy_from_user(&pin, p, sizeof(pin))))  		return -EFAULT; +	mutex_lock(&ashmem_mutex); + +	if (unlikely(!asma->file)) +		goto out_unlock; +  	/* per custom, you can pass zero for len to mean "everything onward" */  	if (!pin.len)  		pin.len = PAGE_ALIGN(asma->size) - pin.offset;  	if (unlikely((pin.offset | pin.len) & ~PAGE_MASK)) -		return -EINVAL; +		goto out_unlock;  	if (unlikely(((__u32)-1) - pin.offset < pin.len)) -		return -EINVAL; +		goto out_unlock;  	if (unlikely(PAGE_ALIGN(asma->size) < pin.offset + pin.len)) -		return -EINVAL; +		goto out_unlock;  	pgstart = pin.offset / PAGE_SIZE;  	pgend = pgstart + (pin.len / PAGE_SIZE) - 1; -	mutex_lock(&ashmem_mutex); -  	switch (cmd) {  	case ASHMEM_PIN:  		ret = ashmem_pin(asma, pgstart, pgend); @@ -738,6 +737,7 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,  		break;  	} +out_unlock:  	mutex_unlock(&ashmem_mutex);  	return ret; diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c index 94e06925c712..49718c96bf9e 100644 --- a/drivers/staging/android/ion/ion_cma_heap.c +++ b/drivers/staging/android/ion/ion_cma_heap.c @@ -12,6 +12,7 @@  #include <linux/err.h>  #include <linux/cma.h>  #include <linux/scatterlist.h> +#include <linux/highmem.h>  #include "ion.h" @@ -42,6 +43,22 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,  	if (!pages)  		return -ENOMEM; +	if (PageHighMem(pages)) { +		unsigned long nr_clear_pages = nr_pages; +		struct page *page = pages; + +		while (nr_clear_pages > 0) { +			void *vaddr = kmap_atomic(page); + +			memset(vaddr, 0, PAGE_SIZE); +			kunmap_atomic(vaddr); +			page++; +			nr_clear_pages--; +		} +	} else { +		memset(page_address(pages), 0, size); +	} +  	table = kmalloc(sizeof(*table), GFP_KERNEL);  	if (!table)  		goto err; diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c index e618a87521a3..9d733471ca2e 100644 --- a/drivers/staging/comedi/drivers.c +++ b/drivers/staging/comedi/drivers.c @@ -475,8 +475,7 @@ unsigned int comedi_nsamples_left(struct comedi_subdevice *s,  	struct comedi_cmd *cmd = &async->cmd;  	if (cmd->stop_src == TRIG_COUNT) { -		unsigned int nscans = nsamples / cmd->scan_end_arg; -		unsigned int scans_left = __comedi_nscans_left(s, nscans); +		unsigned int scans_left = __comedi_nscans_left(s, cmd->stop_arg);  		unsigned int scan_pos =  		    comedi_bytes_to_samples(s, async->scan_progress);  		unsigned long long samples_left = 0; diff --git a/drivers/staging/fsl-mc/bus/Kconfig b/drivers/staging/fsl-mc/bus/Kconfig index 1f9100049176..b35ef7ee6901 100644 --- a/drivers/staging/fsl-mc/bus/Kconfig +++ b/drivers/staging/fsl-mc/bus/Kconfig @@ -7,7 +7,7 @@  config FSL_MC_BUS  	bool "QorIQ DPAA2 fsl-mc bus driver" -	depends on OF && (ARCH_LAYERSCAPE || (COMPILE_TEST && (ARM || ARM64 || X86 || PPC))) +	depends on OF && (ARCH_LAYERSCAPE || (COMPILE_TEST && (ARM || ARM64 || X86_LOCAL_APIC || PPC)))  	select GENERIC_MSI_IRQ_DOMAIN  	help  	  Driver to enable the bus infrastructure for the QorIQ DPAA2 diff --git a/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c b/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c index 5064d5ddf581..fc2013aade51 100644 --- a/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c +++ b/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c @@ -73,6 +73,8 @@ static int __init its_fsl_mc_msi_init(void)  	for (np = of_find_matching_node(NULL, its_device_id); np;  	     np = of_find_matching_node(np, its_device_id)) { +		if (!of_device_is_available(np)) +			continue;  		if (!of_property_read_bool(np, "msi-controller"))  			continue; diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c index f01595593ce2..425e8b82533b 100644 --- a/drivers/staging/iio/adc/ad7192.c +++ b/drivers/staging/iio/adc/ad7192.c @@ -141,6 +141,8 @@  #define AD7192_GPOCON_P1DAT	BIT(1) /* P1 state */  #define AD7192_GPOCON_P0DAT	BIT(0) /* P0 state */ +#define AD7192_EXT_FREQ_MHZ_MIN	2457600 +#define AD7192_EXT_FREQ_MHZ_MAX	5120000  #define AD7192_INT_FREQ_MHZ	4915200  /* NOTE: @@ -218,6 +220,12 @@ static int ad7192_calibrate_all(struct ad7192_state *st)  				ARRAY_SIZE(ad7192_calib_arr));  } +static inline bool ad7192_valid_external_frequency(u32 freq) +{ +	return (freq >= AD7192_EXT_FREQ_MHZ_MIN && +		freq <= AD7192_EXT_FREQ_MHZ_MAX); +} +  static int ad7192_setup(struct ad7192_state *st,  			const struct ad7192_platform_data *pdata)  { @@ -243,17 +251,20 @@ static int ad7192_setup(struct ad7192_state *st,  			 id);  	switch (pdata->clock_source_sel) { -	case AD7192_CLK_EXT_MCLK1_2: -	case AD7192_CLK_EXT_MCLK2: -		st->mclk = AD7192_INT_FREQ_MHZ; -		break;  	case AD7192_CLK_INT:  	case AD7192_CLK_INT_CO: -		if (pdata->ext_clk_hz) -			st->mclk = pdata->ext_clk_hz; -		else -			st->mclk = AD7192_INT_FREQ_MHZ; +		st->mclk = AD7192_INT_FREQ_MHZ;  		break; +	case AD7192_CLK_EXT_MCLK1_2: +	case AD7192_CLK_EXT_MCLK2: +		if (ad7192_valid_external_frequency(pdata->ext_clk_hz)) { +			st->mclk = pdata->ext_clk_hz; +			break; +		} +		dev_err(&st->sd.spi->dev, "Invalid frequency setting %u\n", +			pdata->ext_clk_hz); +		ret = -EINVAL; +		goto out;  	default:  		ret = -EINVAL;  		goto out; diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c index 2b28fb9c0048..3bcf49466361 100644 --- a/drivers/staging/iio/impedance-analyzer/ad5933.c +++ b/drivers/staging/iio/impedance-analyzer/ad5933.c @@ -648,8 +648,6 @@ static int ad5933_register_ring_funcs_and_init(struct iio_dev *indio_dev)  	/* Ring buffer functions - here trigger setup related */  	indio_dev->setup_ops = &ad5933_ring_setup_ops; -	indio_dev->modes |= INDIO_BUFFER_HARDWARE; -  	return 0;  } @@ -762,7 +760,7 @@ static int ad5933_probe(struct i2c_client *client,  	indio_dev->dev.parent = &client->dev;  	indio_dev->info = &ad5933_info;  	indio_dev->name = id->name; -	indio_dev->modes = INDIO_DIRECT_MODE; +	indio_dev->modes = (INDIO_BUFFER_SOFTWARE | INDIO_DIRECT_MODE);  	indio_dev->channels = ad5933_channels;  	indio_dev->num_channels = ARRAY_SIZE(ad5933_channels); diff --git a/drivers/staging/ncpfs/ncplib_kernel.c b/drivers/staging/ncpfs/ncplib_kernel.c index 804adfebba2f..3e047eb4cc7c 100644 --- a/drivers/staging/ncpfs/ncplib_kernel.c +++ b/drivers/staging/ncpfs/ncplib_kernel.c @@ -981,6 +981,10 @@ ncp_read_kernel(struct ncp_server *server, const char *file_id,  		goto out;  	}  	*bytes_read = ncp_reply_be16(server, 0); +	if (*bytes_read > to_read) { +		result = -EINVAL; +		goto out; +	}  	source = ncp_reply_data(server, 2 + (offset & 1));  	memcpy(target, source, *bytes_read); diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c index 5c0e59e8fe46..cbe98bc2b998 100644 --- a/drivers/tty/n_tty.c +++ b/drivers/tty/n_tty.c @@ -2180,6 +2180,12 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,  				}  				if (tty_hung_up_p(file))  					break; +				/* +				 * Abort readers for ttys which never actually +				 * get hung up.  See __tty_hangup(). +				 */ +				if (test_bit(TTY_HUPPING, &tty->flags)) +					break;  				if (!timeout)  					break;  				if (file->f_flags & O_NONBLOCK) { diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index 54adf8d56350..a93f77ab3da0 100644 --- a/drivers/tty/serial/8250/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c @@ -3387,11 +3387,9 @@ static int serial_pci_is_class_communication(struct pci_dev *dev)  	/*  	 * If it is not a communications device or the programming  	 * interface is greater than 6, give up. -	 * -	 * (Should we try to make guesses for multiport serial devices -	 * later?)  	 */  	if ((((dev->class >> 8) != PCI_CLASS_COMMUNICATION_SERIAL) && +	     ((dev->class >> 8) != PCI_CLASS_COMMUNICATION_MULTISERIAL) &&  	     ((dev->class >> 8) != PCI_CLASS_COMMUNICATION_MODEM)) ||  	    (dev->class & 0xff) > 6)  		return -ENODEV; @@ -3428,6 +3426,12 @@ serial_pci_guess_board(struct pci_dev *dev, struct pciserial_board *board)  {  	int num_iomem, num_port, first_port = -1, i; +	/* +	 * Should we try to make guesses for multiport serial devices later? +	 */ +	if ((dev->class >> 8) == PCI_CLASS_COMMUNICATION_MULTISERIAL) +		return -ENODEV; +  	num_iomem = num_port = 0;  	for (i = 0; i < PCI_NUM_BAR_RESOURCES; i++) {  		if (pci_resource_flags(dev, i) & IORESOURCE_IO) { @@ -4699,6 +4703,17 @@ static const struct pci_device_id serial_pci_tbl[] = {  		PCI_ANY_ID, PCI_ANY_ID, 0, 0,    /* 135a.0dc0 */  		pbn_b2_4_115200 },  	/* +	 * BrainBoxes UC-260 +	 */ +	{	PCI_VENDOR_ID_INTASHIELD, 0x0D21, +		PCI_ANY_ID, PCI_ANY_ID, +		PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00, +		pbn_b2_4_115200 }, +	{	PCI_VENDOR_ID_INTASHIELD, 0x0E34, +		PCI_ANY_ID, PCI_ANY_ID, +		 PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00, +		pbn_b2_4_115200 }, +	/*  	 * Perle PCI-RAS cards  	 */  	{       PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c index df46a9e88c34..e287fe8f10fc 100644 --- a/drivers/tty/serial/atmel_serial.c +++ b/drivers/tty/serial/atmel_serial.c @@ -1734,6 +1734,7 @@ static void atmel_get_ip_name(struct uart_port *port)  		switch (version) {  		case 0x302:  		case 0x10213: +		case 0x10302:  			dev_dbg(port->dev, "This version is usart\n");  			atmel_port->has_frac_baudrate = true;  			atmel_port->has_hw_timer = true; diff --git a/drivers/tty/serial/earlycon.c b/drivers/tty/serial/earlycon.c index 870e84fb6e39..a24278380fec 100644 --- a/drivers/tty/serial/earlycon.c +++ b/drivers/tty/serial/earlycon.c @@ -245,11 +245,12 @@ int __init of_setup_earlycon(const struct earlycon_id *match,  	}  	port->mapbase = addr;  	port->uartclk = BASE_BAUD * 16; -	port->membase = earlycon_map(port->mapbase, SZ_4K);  	val = of_get_flat_dt_prop(node, "reg-offset", NULL);  	if (val)  		port->mapbase += be32_to_cpu(*val); +	port->membase = earlycon_map(port->mapbase, SZ_4K); +  	val = of_get_flat_dt_prop(node, "reg-shift", NULL);  	if (val)  		port->regshift = be32_to_cpu(*val); diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c index 1d7ca382bc12..a33c685af990 100644 --- a/drivers/tty/serial/imx.c +++ b/drivers/tty/serial/imx.c @@ -2093,7 +2093,7 @@ static int serial_imx_probe(struct platform_device *pdev)  	uart_get_rs485_mode(&pdev->dev, &sport->port.rs485);  	if (sport->port.rs485.flags & SER_RS485_ENABLED && -	    (!sport->have_rtscts || !sport->have_rtsgpio)) +	    (!sport->have_rtscts && !sport->have_rtsgpio))  		dev_err(&pdev->dev, "no RTS control, disabling rs485\n");  	imx_rs485_config(&sport->port, &sport->port.rs485); diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index c8dde56b532b..35b9201db3b4 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -1144,6 +1144,8 @@ static int uart_do_autoconfig(struct tty_struct *tty,struct uart_state *state)  		uport->ops->config_port(uport, flags);  		ret = uart_startup(tty, state, 1); +		if (ret == 0) +			tty_port_set_initialized(port, true);  		if (ret > 0)  			ret = 0;  	} diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index 7257c078e155..44adf9db38f8 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c @@ -885,6 +885,8 @@ static void sci_receive_chars(struct uart_port *port)  		/* Tell the rest of the system the news. New characters! */  		tty_flip_buffer_push(tport);  	} else { +		/* TTY buffers full; read from RX reg to prevent lockup */ +		serial_port_in(port, SCxRDR);  		serial_port_in(port, SCxSR); /* dummy read */  		sci_clear_SCxSR(port, SCxSR_RDxF_CLEAR(port));  	} diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index eb9133b472f4..63114ea35ec1 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c @@ -586,6 +586,14 @@ static void __tty_hangup(struct tty_struct *tty, int exit_session)  		return;  	} +	/* +	 * Some console devices aren't actually hung up for technical and +	 * historical reasons, which can lead to indefinite interruptible +	 * sleep in n_tty_read().  The following explicitly tells +	 * n_tty_read() to abort readers. +	 */ +	set_bit(TTY_HUPPING, &tty->flags); +  	/* inuse_filps is protected by the single tty lock,  	   this really needs to change if we want to flush the  	   workqueue with the lock held */ @@ -640,6 +648,7 @@ static void __tty_hangup(struct tty_struct *tty, int exit_session)  	 * from the ldisc side, which is now guaranteed.  	 */  	set_bit(TTY_HUPPED, &tty->flags); +	clear_bit(TTY_HUPPING, &tty->flags);  	tty_unlock(tty);  	if (f) diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index 88b902c525d7..b4e57c5a8bba 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -1727,7 +1727,7 @@ static void reset_terminal(struct vc_data *vc, int do_clear)  	default_attr(vc);  	update_attr(vc); -	vc->vc_tab_stop[0]	= 0x01010100; +	vc->vc_tab_stop[0]	=  	vc->vc_tab_stop[1]	=  	vc->vc_tab_stop[2]	=  	vc->vc_tab_stop[3]	= @@ -1771,7 +1771,7 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)  		vc->vc_pos -= (vc->vc_x << 1);  		while (vc->vc_x < vc->vc_cols - 1) {  			vc->vc_x++; -			if (vc->vc_tab_stop[vc->vc_x >> 5] & (1 << (vc->vc_x & 31))) +			if (vc->vc_tab_stop[7 & (vc->vc_x >> 5)] & (1 << (vc->vc_x & 31)))  				break;  		}  		vc->vc_pos += (vc->vc_x << 1); @@ -1831,7 +1831,7 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)  			lf(vc);  			return;  		case 'H': -			vc->vc_tab_stop[vc->vc_x >> 5] |= (1 << (vc->vc_x & 31)); +			vc->vc_tab_stop[7 & (vc->vc_x >> 5)] |= (1 << (vc->vc_x & 31));  			return;  		case 'Z':  			respond_ID(tty); @@ -2024,7 +2024,7 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)  			return;  		case 'g':  			if (!vc->vc_par[0]) -				vc->vc_tab_stop[vc->vc_x >> 5] &= ~(1 << (vc->vc_x & 31)); +				vc->vc_tab_stop[7 & (vc->vc_x >> 5)] &= ~(1 << (vc->vc_x & 31));  			else if (vc->vc_par[0] == 3) {  				vc->vc_tab_stop[0] =  					vc->vc_tab_stop[1] = diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig index f699abab1787..148f3ee70286 100644 --- a/drivers/usb/Kconfig +++ b/drivers/usb/Kconfig @@ -19,6 +19,12 @@ config USB_EHCI_BIG_ENDIAN_MMIO  config USB_EHCI_BIG_ENDIAN_DESC  	bool +config USB_UHCI_BIG_ENDIAN_MMIO +	bool + +config USB_UHCI_BIG_ENDIAN_DESC +	bool +  menuconfig USB_SUPPORT  	bool "USB support"  	depends on HAS_IOMEM diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 06b3b54a0e68..7b366a6c0b49 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -174,6 +174,7 @@ static int acm_wb_alloc(struct acm *acm)  		wb = &acm->wb[wbn];  		if (!wb->use) {  			wb->use = 1; +			wb->len = 0;  			return wbn;  		}  		wbn = (wbn + 1) % ACM_NW; @@ -805,16 +806,18 @@ static int acm_tty_write(struct tty_struct *tty,  static void acm_tty_flush_chars(struct tty_struct *tty)  {  	struct acm *acm = tty->driver_data; -	struct acm_wb *cur = acm->putbuffer; +	struct acm_wb *cur;  	int err;  	unsigned long flags; +	spin_lock_irqsave(&acm->write_lock, flags); + +	cur = acm->putbuffer;  	if (!cur) /* nothing to do */ -		return; +		goto out;  	acm->putbuffer = NULL;  	err = usb_autopm_get_interface_async(acm->control); -	spin_lock_irqsave(&acm->write_lock, flags);  	if (err < 0) {  		cur->use = 0;  		acm->putbuffer = cur; diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c index c64cf6c4a83d..0c11d40a12bc 100644 --- a/drivers/usb/core/message.c +++ b/drivers/usb/core/message.c @@ -151,6 +151,10 @@ int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,  	ret = usb_internal_control_msg(dev, pipe, dr, data, size, timeout); +	/* Linger a bit, prior to the next control message. */ +	if (dev->quirks & USB_QUIRK_DELAY_CTRL_MSG) +		msleep(200); +  	kfree(dr);  	return ret; diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 4024926c1d68..54b019e267c5 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -226,8 +226,12 @@ static const struct usb_device_id usb_quirk_list[] = {  	{ USB_DEVICE(0x1a0a, 0x0200), .driver_info =  			USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL }, +	/* Corsair K70 RGB */ +	{ USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT }, +  	/* Corsair Strafe RGB */ -	{ USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT }, +	{ USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT | +	  USB_QUIRK_DELAY_CTRL_MSG },  	/* Corsair K70 LUX */  	{ USB_DEVICE(0x1b1c, 0x1b36), .driver_info = USB_QUIRK_DELAY_INIT }, diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c index e4c3ce0de5de..5bcad1d869b5 100644 --- a/drivers/usb/dwc2/gadget.c +++ b/drivers/usb/dwc2/gadget.c @@ -1917,7 +1917,9 @@ static void dwc2_hsotg_program_zlp(struct dwc2_hsotg *hsotg,  		/* Not specific buffer needed for ep0 ZLP */  		dma_addr_t dma = hs_ep->desc_list_dma; -		dwc2_gadget_set_ep0_desc_chain(hsotg, hs_ep); +		if (!index) +			dwc2_gadget_set_ep0_desc_chain(hsotg, hs_ep); +  		dwc2_gadget_config_nonisoc_xfer_ddma(hs_ep, dma, 0);  	} else {  		dwc2_writel(DXEPTSIZ_MC(1) | DXEPTSIZ_PKTCNT(1) | @@ -2974,9 +2976,13 @@ static void dwc2_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx,  	if (ints & DXEPINT_STSPHSERCVD) {  		dev_dbg(hsotg->dev, "%s: StsPhseRcvd\n", __func__); -		/* Move to STATUS IN for DDMA */ -		if (using_desc_dma(hsotg)) -			dwc2_hsotg_ep0_zlp(hsotg, true); +		/* Safety check EP0 state when STSPHSERCVD asserted */ +		if (hsotg->ep0_state == DWC2_EP0_DATA_OUT) { +			/* Move to STATUS IN for DDMA */ +			if (using_desc_dma(hsotg)) +				dwc2_hsotg_ep0_zlp(hsotg, true); +		} +  	}  	if (ints & DXEPINT_BACK2BACKSETUP) @@ -3375,12 +3381,6 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,  	dwc2_writel(dwc2_hsotg_ep0_mps(hsotg->eps_out[0]->ep.maxpacket) |  	       DXEPCTL_USBACTEP, hsotg->regs + DIEPCTL0); -	dwc2_hsotg_enqueue_setup(hsotg); - -	dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n", -		dwc2_readl(hsotg->regs + DIEPCTL0), -		dwc2_readl(hsotg->regs + DOEPCTL0)); -  	/* clear global NAKs */  	val = DCTL_CGOUTNAK | DCTL_CGNPINNAK;  	if (!is_usb_reset) @@ -3391,6 +3391,12 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,  	mdelay(3);  	hsotg->lx_state = DWC2_L0; + +	dwc2_hsotg_enqueue_setup(hsotg); + +	dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n", +		dwc2_readl(hsotg->regs + DIEPCTL0), +		dwc2_readl(hsotg->regs + DOEPCTL0));  }  static void dwc2_hsotg_core_disconnect(struct dwc2_hsotg *hsotg) diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c index 03fd20f0b496..c4a47496d2fb 100644 --- a/drivers/usb/dwc2/params.c +++ b/drivers/usb/dwc2/params.c @@ -137,7 +137,7 @@ static void dwc2_set_stm32f4x9_fsotg_params(struct dwc2_hsotg *hsotg)  	p->activate_stm_fs_transceiver = true;  } -static void dwc2_set_stm32f7xx_hsotg_params(struct dwc2_hsotg *hsotg) +static void dwc2_set_stm32f7_hsotg_params(struct dwc2_hsotg *hsotg)  {  	struct dwc2_core_params *p = &hsotg->params; @@ -164,8 +164,8 @@ const struct of_device_id dwc2_of_match_table[] = {  	{ .compatible = "st,stm32f4x9-fsotg",  	  .data = dwc2_set_stm32f4x9_fsotg_params },  	{ .compatible = "st,stm32f4x9-hsotg" }, -	{ .compatible = "st,stm32f7xx-hsotg", -	  .data = dwc2_set_stm32f7xx_hsotg_params }, +	{ .compatible = "st,stm32f7-hsotg", +	  .data = dwc2_set_stm32f7_hsotg_params },  	{},  };  MODULE_DEVICE_TABLE(of, dwc2_of_match_table); diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index ade2ab00d37a..e94bf91cc58a 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -100,6 +100,8 @@ static void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode)  	reg &= ~(DWC3_GCTL_PRTCAPDIR(DWC3_GCTL_PRTCAP_OTG));  	reg |= DWC3_GCTL_PRTCAPDIR(mode);  	dwc3_writel(dwc->regs, DWC3_GCTL, reg); + +	dwc->current_dr_role = mode;  }  static void __dwc3_set_mode(struct work_struct *work) @@ -133,8 +135,6 @@ static void __dwc3_set_mode(struct work_struct *work)  	dwc3_set_prtcap(dwc, dwc->desired_dr_role); -	dwc->current_dr_role = dwc->desired_dr_role; -  	spin_unlock_irqrestore(&dwc->lock, flags);  	switch (dwc->desired_dr_role) { @@ -175,7 +175,7 @@ void dwc3_set_mode(struct dwc3 *dwc, u32 mode)  	dwc->desired_dr_role = mode;  	spin_unlock_irqrestore(&dwc->lock, flags); -	queue_work(system_power_efficient_wq, &dwc->drd_work); +	queue_work(system_freezable_wq, &dwc->drd_work);  }  u32 dwc3_core_fifo_space(struct dwc3_ep *dep, u8 type) @@ -219,7 +219,7 @@ static int dwc3_core_soft_reset(struct dwc3 *dwc)  	 * XHCI driver will reset the host block. If dwc3 was configured for  	 * host-only mode, then we can return early.  	 */ -	if (dwc->dr_mode == USB_DR_MODE_HOST) +	if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST)  		return 0;  	reg = dwc3_readl(dwc->regs, DWC3_DCTL); @@ -234,6 +234,9 @@ static int dwc3_core_soft_reset(struct dwc3 *dwc)  		udelay(1);  	} while (--retries); +	phy_exit(dwc->usb3_generic_phy); +	phy_exit(dwc->usb2_generic_phy); +  	return -ETIMEDOUT;  } @@ -483,6 +486,22 @@ static void dwc3_cache_hwparams(struct dwc3 *dwc)  	parms->hwparams8 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS8);  } +static int dwc3_core_ulpi_init(struct dwc3 *dwc) +{ +	int intf; +	int ret = 0; + +	intf = DWC3_GHWPARAMS3_HSPHY_IFC(dwc->hwparams.hwparams3); + +	if (intf == DWC3_GHWPARAMS3_HSPHY_IFC_ULPI || +	    (intf == DWC3_GHWPARAMS3_HSPHY_IFC_UTMI_ULPI && +	     dwc->hsphy_interface && +	     !strncmp(dwc->hsphy_interface, "ulpi", 4))) +		ret = dwc3_ulpi_init(dwc); + +	return ret; +} +  /**   * dwc3_phy_setup - Configure USB PHY Interface of DWC3 Core   * @dwc: Pointer to our controller context structure @@ -494,7 +513,6 @@ static void dwc3_cache_hwparams(struct dwc3 *dwc)  static int dwc3_phy_setup(struct dwc3 *dwc)  {  	u32 reg; -	int ret;  	reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0)); @@ -565,9 +583,6 @@ static int dwc3_phy_setup(struct dwc3 *dwc)  		}  		/* FALLTHROUGH */  	case DWC3_GHWPARAMS3_HSPHY_IFC_ULPI: -		ret = dwc3_ulpi_init(dwc); -		if (ret) -			return ret;  		/* FALLTHROUGH */  	default:  		break; @@ -724,6 +739,7 @@ static void dwc3_core_setup_global_control(struct dwc3 *dwc)  }  static int dwc3_core_get_phy(struct dwc3 *dwc); +static int dwc3_core_ulpi_init(struct dwc3 *dwc);  /**   * dwc3_core_init - Low-level initialization of DWC3 Core @@ -755,17 +771,27 @@ static int dwc3_core_init(struct dwc3 *dwc)  			dwc->maximum_speed = USB_SPEED_HIGH;  	} -	ret = dwc3_core_get_phy(dwc); +	ret = dwc3_phy_setup(dwc);  	if (ret)  		goto err0; -	ret = dwc3_core_soft_reset(dwc); -	if (ret) -		goto err0; +	if (!dwc->ulpi_ready) { +		ret = dwc3_core_ulpi_init(dwc); +		if (ret) +			goto err0; +		dwc->ulpi_ready = true; +	} -	ret = dwc3_phy_setup(dwc); +	if (!dwc->phys_ready) { +		ret = dwc3_core_get_phy(dwc); +		if (ret) +			goto err0a; +		dwc->phys_ready = true; +	} + +	ret = dwc3_core_soft_reset(dwc);  	if (ret) -		goto err0; +		goto err0a;  	dwc3_core_setup_global_control(dwc);  	dwc3_core_num_eps(dwc); @@ -838,6 +864,9 @@ err1:  	phy_exit(dwc->usb2_generic_phy);  	phy_exit(dwc->usb3_generic_phy); +err0a: +	dwc3_ulpi_exit(dwc); +  err0:  	return ret;  } @@ -916,7 +945,6 @@ static int dwc3_core_init_mode(struct dwc3 *dwc)  	switch (dwc->dr_mode) {  	case USB_DR_MODE_PERIPHERAL: -		dwc->current_dr_role = DWC3_GCTL_PRTCAP_DEVICE;  		dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE);  		if (dwc->usb2_phy) @@ -932,7 +960,6 @@ static int dwc3_core_init_mode(struct dwc3 *dwc)  		}  		break;  	case USB_DR_MODE_HOST: -		dwc->current_dr_role = DWC3_GCTL_PRTCAP_HOST;  		dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST);  		if (dwc->usb2_phy) @@ -1234,7 +1261,6 @@ err4:  err3:  	dwc3_free_event_buffers(dwc); -	dwc3_ulpi_exit(dwc);  err2:  	pm_runtime_allow(&pdev->dev); @@ -1284,7 +1310,7 @@ static int dwc3_remove(struct platform_device *pdev)  }  #ifdef CONFIG_PM -static int dwc3_suspend_common(struct dwc3 *dwc) +static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg)  {  	unsigned long	flags; @@ -1296,6 +1322,10 @@ static int dwc3_suspend_common(struct dwc3 *dwc)  		dwc3_core_exit(dwc);  		break;  	case DWC3_GCTL_PRTCAP_HOST: +		/* do nothing during host runtime_suspend */ +		if (!PMSG_IS_AUTO(msg)) +			dwc3_core_exit(dwc); +		break;  	default:  		/* do nothing */  		break; @@ -1304,7 +1334,7 @@ static int dwc3_suspend_common(struct dwc3 *dwc)  	return 0;  } -static int dwc3_resume_common(struct dwc3 *dwc) +static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg)  {  	unsigned long	flags;  	int		ret; @@ -1320,6 +1350,13 @@ static int dwc3_resume_common(struct dwc3 *dwc)  		spin_unlock_irqrestore(&dwc->lock, flags);  		break;  	case DWC3_GCTL_PRTCAP_HOST: +		/* nothing to do on host runtime_resume */ +		if (!PMSG_IS_AUTO(msg)) { +			ret = dwc3_core_init(dwc); +			if (ret) +				return ret; +		} +		break;  	default:  		/* do nothing */  		break; @@ -1331,12 +1368,11 @@ static int dwc3_resume_common(struct dwc3 *dwc)  static int dwc3_runtime_checks(struct dwc3 *dwc)  {  	switch (dwc->current_dr_role) { -	case USB_DR_MODE_PERIPHERAL: -	case USB_DR_MODE_OTG: +	case DWC3_GCTL_PRTCAP_DEVICE:  		if (dwc->connected)  			return -EBUSY;  		break; -	case USB_DR_MODE_HOST: +	case DWC3_GCTL_PRTCAP_HOST:  	default:  		/* do nothing */  		break; @@ -1353,7 +1389,7 @@ static int dwc3_runtime_suspend(struct device *dev)  	if (dwc3_runtime_checks(dwc))  		return -EBUSY; -	ret = dwc3_suspend_common(dwc); +	ret = dwc3_suspend_common(dwc, PMSG_AUTO_SUSPEND);  	if (ret)  		return ret; @@ -1369,7 +1405,7 @@ static int dwc3_runtime_resume(struct device *dev)  	device_init_wakeup(dev, false); -	ret = dwc3_resume_common(dwc); +	ret = dwc3_resume_common(dwc, PMSG_AUTO_RESUME);  	if (ret)  		return ret; @@ -1416,7 +1452,7 @@ static int dwc3_suspend(struct device *dev)  	struct dwc3	*dwc = dev_get_drvdata(dev);  	int		ret; -	ret = dwc3_suspend_common(dwc); +	ret = dwc3_suspend_common(dwc, PMSG_SUSPEND);  	if (ret)  		return ret; @@ -1432,7 +1468,7 @@ static int dwc3_resume(struct device *dev)  	pinctrl_pm_select_default_state(dev); -	ret = dwc3_resume_common(dwc); +	ret = dwc3_resume_common(dwc, PMSG_RESUME);  	if (ret)  		return ret; diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h index 03c7aaaac926..860d2bc184d1 100644 --- a/drivers/usb/dwc3/core.h +++ b/drivers/usb/dwc3/core.h @@ -158,13 +158,15 @@  #define DWC3_GDBGFIFOSPACE_TYPE(n)	(((n) << 5) & 0x1e0)  #define DWC3_GDBGFIFOSPACE_SPACE_AVAILABLE(n) (((n) >> 16) & 0xffff) -#define DWC3_TXFIFOQ		1 -#define DWC3_RXFIFOQ		3 -#define DWC3_TXREQQ		5 -#define DWC3_RXREQQ		7 -#define DWC3_RXINFOQ		9 -#define DWC3_DESCFETCHQ		13 -#define DWC3_EVENTQ		15 +#define DWC3_TXFIFOQ		0 +#define DWC3_RXFIFOQ		1 +#define DWC3_TXREQQ		2 +#define DWC3_RXREQQ		3 +#define DWC3_RXINFOQ		4 +#define DWC3_PSTATQ		5 +#define DWC3_DESCFETCHQ		6 +#define DWC3_EVENTQ		7 +#define DWC3_AUXEVENTQ		8  /* Global RX Threshold Configuration Register */  #define DWC3_GRXTHRCFG_MAXRXBURSTSIZE(n) (((n) & 0x1f) << 19) @@ -795,7 +797,9 @@ struct dwc3_scratchpad_array {   * @usb3_phy: pointer to USB3 PHY   * @usb2_generic_phy: pointer to USB2 PHY   * @usb3_generic_phy: pointer to USB3 PHY + * @phys_ready: flag to indicate that PHYs are ready   * @ulpi: pointer to ulpi interface + * @ulpi_ready: flag to indicate that ULPI is initialized   * @u2sel: parameter from Set SEL request.   * @u2pel: parameter from Set SEL request.   * @u1sel: parameter from Set SEL request. @@ -893,7 +897,10 @@ struct dwc3 {  	struct phy		*usb2_generic_phy;  	struct phy		*usb3_generic_phy; +	bool			phys_ready; +  	struct ulpi		*ulpi; +	bool			ulpi_ready;  	void __iomem		*regs;  	size_t			regs_size; diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c index 7ae0eefc7cc7..e54c3622eb28 100644 --- a/drivers/usb/dwc3/dwc3-of-simple.c +++ b/drivers/usb/dwc3/dwc3-of-simple.c @@ -143,6 +143,7 @@ static int dwc3_of_simple_remove(struct platform_device *pdev)  		clk_disable_unprepare(simple->clks[i]);  		clk_put(simple->clks[i]);  	} +	simple->num_clocks = 0;  	reset_control_assert(simple->resets);  	reset_control_put(simple->resets); diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c index a4719e853b85..ed8b86517675 100644 --- a/drivers/usb/dwc3/dwc3-omap.c +++ b/drivers/usb/dwc3/dwc3-omap.c @@ -582,9 +582,25 @@ static int dwc3_omap_resume(struct device *dev)  	return 0;  } +static void dwc3_omap_complete(struct device *dev) +{ +	struct dwc3_omap	*omap = dev_get_drvdata(dev); + +	if (extcon_get_state(omap->edev, EXTCON_USB)) +		dwc3_omap_set_mailbox(omap, OMAP_DWC3_VBUS_VALID); +	else +		dwc3_omap_set_mailbox(omap, OMAP_DWC3_VBUS_OFF); + +	if (extcon_get_state(omap->edev, EXTCON_USB_HOST)) +		dwc3_omap_set_mailbox(omap, OMAP_DWC3_ID_GROUND); +	else +		dwc3_omap_set_mailbox(omap, OMAP_DWC3_ID_FLOAT); +} +  static const struct dev_pm_ops dwc3_omap_dev_pm_ops = {  	SET_SYSTEM_SLEEP_PM_OPS(dwc3_omap_suspend, dwc3_omap_resume) +	.complete = dwc3_omap_complete,  };  #define DEV_PM_OPS	(&dwc3_omap_dev_pm_ops) diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c index 9c2e4a17918e..18be31d5743a 100644 --- a/drivers/usb/dwc3/ep0.c +++ b/drivers/usb/dwc3/ep0.c @@ -854,7 +854,12 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc,  		trb++;  		trb->ctrl &= ~DWC3_TRB_CTRL_HWO;  		trace_dwc3_complete_trb(ep0, trb); -		ep0->trb_enqueue = 0; + +		if (r->direction) +			dwc->eps[1]->trb_enqueue = 0; +		else +			dwc->eps[0]->trb_enqueue = 0; +  		dwc->ep0_bounced = false;  	} diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 616ef49ccb49..2bda4eb1e9ac 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -2745,6 +2745,8 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)  		break;  	} +	dwc->eps[1]->endpoint.maxpacket = dwc->gadget.ep0->maxpacket; +  	/* Enable USB2 LPM Capability */  	if ((dwc->revision > DWC3_REVISION_194A) && diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index 8f2cf3baa19c..d2428a9e8900 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -1538,7 +1538,6 @@ ffs_fs_kill_sb(struct super_block *sb)  	if (sb->s_fs_info) {  		ffs_release_dev(sb->s_fs_info);  		ffs_data_closed(sb->s_fs_info); -		ffs_data_put(sb->s_fs_info);  	}  } @@ -1855,44 +1854,20 @@ static int ffs_func_eps_enable(struct ffs_function *func)  	spin_lock_irqsave(&func->ffs->eps_lock, flags);  	while(count--) { -		struct usb_endpoint_descriptor *ds; -		struct usb_ss_ep_comp_descriptor *comp_desc = NULL; -		int needs_comp_desc = false; -		int desc_idx; - -		if (ffs->gadget->speed == USB_SPEED_SUPER) { -			desc_idx = 2; -			needs_comp_desc = true; -		} else if (ffs->gadget->speed == USB_SPEED_HIGH) -			desc_idx = 1; -		else -			desc_idx = 0; - -		/* fall-back to lower speed if desc missing for current speed */ -		do { -			ds = ep->descs[desc_idx]; -		} while (!ds && --desc_idx >= 0); - -		if (!ds) { -			ret = -EINVAL; -			break; -		} -  		ep->ep->driver_data = ep; -		ep->ep->desc = ds; -		if (needs_comp_desc) { -			comp_desc = (struct usb_ss_ep_comp_descriptor *)(ds + -					USB_DT_ENDPOINT_SIZE); -			ep->ep->maxburst = comp_desc->bMaxBurst + 1; -			ep->ep->comp_desc = comp_desc; +		ret = config_ep_by_speed(func->gadget, &func->function, ep->ep); +		if (ret) { +			pr_err("%s: config_ep_by_speed(%s) returned %d\n", +					__func__, ep->ep->name, ret); +			break;  		}  		ret = usb_ep_enable(ep->ep);  		if (likely(!ret)) {  			epfile->ep = ep; -			epfile->in = usb_endpoint_dir_in(ds); -			epfile->isoc = usb_endpoint_xfer_isoc(ds); +			epfile->in = usb_endpoint_dir_in(ep->ep->desc); +			epfile->isoc = usb_endpoint_xfer_isoc(ep->ep->desc);  		} else {  			break;  		} @@ -2979,10 +2954,8 @@ static int _ffs_func_bind(struct usb_configuration *c,  	struct ffs_data *ffs = func->ffs;  	const int full = !!func->ffs->fs_descs_count; -	const int high = gadget_is_dualspeed(func->gadget) && -		func->ffs->hs_descs_count; -	const int super = gadget_is_superspeed(func->gadget) && -		func->ffs->ss_descs_count; +	const int high = !!func->ffs->hs_descs_count; +	const int super = !!func->ffs->ss_descs_count;  	int fs_len, hs_len, ss_len, ret, i;  	struct ffs_ep *eps_ptr; diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c index 11fe788b4308..d2dc1f00180b 100644 --- a/drivers/usb/gadget/function/f_uac2.c +++ b/drivers/usb/gadget/function/f_uac2.c @@ -524,6 +524,8 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)  		dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);  		return ret;  	} +	iad_desc.bFirstInterface = ret; +  	std_ac_if_desc.bInterfaceNumber = ret;  	uac2->ac_intf = ret;  	uac2->ac_alt = 0; diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig index 1e9567091d86..0875d38476ee 100644 --- a/drivers/usb/gadget/udc/Kconfig +++ b/drivers/usb/gadget/udc/Kconfig @@ -274,7 +274,6 @@ config USB_SNP_UDC_PLAT  	tristate "Synopsys USB 2.0 Device controller"  	depends on USB_GADGET && OF && HAS_DMA  	depends on EXTCON || EXTCON=n -	select USB_GADGET_DUALSPEED  	select USB_SNP_CORE  	default ARCH_BCM_IPROC  	help diff --git a/drivers/usb/gadget/udc/bdc/bdc_pci.c b/drivers/usb/gadget/udc/bdc/bdc_pci.c index 1e940f054cb8..6dbc489513cd 100644 --- a/drivers/usb/gadget/udc/bdc/bdc_pci.c +++ b/drivers/usb/gadget/udc/bdc/bdc_pci.c @@ -77,6 +77,7 @@ static int bdc_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)  	if (ret) {  		dev_err(&pci->dev,  			"couldn't add resources to bdc device\n"); +		platform_device_put(bdc);  		return ret;  	} diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c index 859d5b11ba4c..1f8b19d9cf97 100644 --- a/drivers/usb/gadget/udc/core.c +++ b/drivers/usb/gadget/udc/core.c @@ -180,8 +180,8 @@ EXPORT_SYMBOL_GPL(usb_ep_alloc_request);  void usb_ep_free_request(struct usb_ep *ep,  				       struct usb_request *req)  { -	ep->ops->free_request(ep, req);  	trace_usb_ep_free_request(ep, req, 0); +	ep->ops->free_request(ep, req);  }  EXPORT_SYMBOL_GPL(usb_ep_free_request); diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c index e5b4ee96c4bf..56b517a38865 100644 --- a/drivers/usb/gadget/udc/fsl_udc_core.c +++ b/drivers/usb/gadget/udc/fsl_udc_core.c @@ -1305,7 +1305,7 @@ static void udc_reset_ep_queue(struct fsl_udc *udc, u8 pipe)  {  	struct fsl_ep *ep = get_ep_by_pipe(udc, pipe); -	if (ep->name) +	if (ep->ep.name)  		nuke(ep, -ESHUTDOWN);  } @@ -1693,7 +1693,7 @@ static void dtd_complete_irq(struct fsl_udc *udc)  		curr_ep = get_ep_by_pipe(udc, i);  		/* If the ep is configured */ -		if (curr_ep->name == NULL) { +		if (!curr_ep->ep.name) {  			WARNING("Invalid EP?");  			continue;  		} diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c index 6e87af248367..409cde4e6a51 100644 --- a/drivers/usb/gadget/udc/renesas_usb3.c +++ b/drivers/usb/gadget/udc/renesas_usb3.c @@ -2410,7 +2410,7 @@ static int renesas_usb3_remove(struct platform_device *pdev)  	__renesas_usb3_ep_free_request(usb3->ep0_req);  	if (usb3->phy)  		phy_put(usb3->phy); -	pm_runtime_disable(usb3_to_dev(usb3)); +	pm_runtime_disable(&pdev->dev);  	return 0;  } diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig index 6150bed7cfa8..4fcfb3084b36 100644 --- a/drivers/usb/host/Kconfig +++ b/drivers/usb/host/Kconfig @@ -633,14 +633,6 @@ config USB_UHCI_ASPEED         bool         default y if ARCH_ASPEED -config USB_UHCI_BIG_ENDIAN_MMIO -	bool -	default y if SPARC_LEON - -config USB_UHCI_BIG_ENDIAN_DESC -	bool -	default y if SPARC_LEON -  config USB_FHCI_HCD  	tristate "Freescale QE USB Host Controller support"  	depends on OF_GPIO && QE_GPIO && QUICC_ENGINE diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c index facafdf8fb95..d7641cbdee43 100644 --- a/drivers/usb/host/ehci-hub.c +++ b/drivers/usb/host/ehci-hub.c @@ -774,12 +774,12 @@ static struct urb *request_single_step_set_feature_urb(  	atomic_inc(&urb->use_count);  	atomic_inc(&urb->dev->urbnum);  	urb->setup_dma = dma_map_single( -			hcd->self.controller, +			hcd->self.sysdev,  			urb->setup_packet,  			sizeof(struct usb_ctrlrequest),  			DMA_TO_DEVICE);  	urb->transfer_dma = dma_map_single( -			hcd->self.controller, +			hcd->self.sysdev,  			urb->transfer_buffer,  			urb->transfer_buffer_length,  			DMA_FROM_DEVICE); diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c index 88158324dcae..327630405695 100644 --- a/drivers/usb/host/ehci-q.c +++ b/drivers/usb/host/ehci-q.c @@ -1188,10 +1188,10 @@ static int submit_single_step_set_feature(  	 * 15 secs after the setup  	 */  	if (is_setup) { -		/* SETUP pid */ +		/* SETUP pid, and interrupt after SETUP completion */  		qtd_fill(ehci, qtd, urb->setup_dma,  				sizeof(struct usb_ctrlrequest), -				token | (2 /* "setup" */ << 8), 8); +				QTD_IOC | token | (2 /* "setup" */ << 8), 8);  		submit_async(ehci, urb, &qtd_list, GFP_ATOMIC);  		return 0; /*Return now; we shall come back after 15 seconds*/ @@ -1228,12 +1228,8 @@ static int submit_single_step_set_feature(  	qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);  	list_add_tail(&qtd->qtd_list, head); -	/* dont fill any data in such packets */ -	qtd_fill(ehci, qtd, 0, 0, token, 0); - -	/* by default, enable interrupt on urb completion */ -	if (likely(!(urb->transfer_flags & URB_NO_INTERRUPT))) -		qtd->hw_token |= cpu_to_hc32(ehci, QTD_IOC); +	/* Interrupt after STATUS completion */ +	qtd_fill(ehci, qtd, 0, 0, token | QTD_IOC, 0);  	submit_async(ehci, urb, &qtd_list, GFP_KERNEL); diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c index ee9676349333..d088c340e4d0 100644 --- a/drivers/usb/host/ohci-hcd.c +++ b/drivers/usb/host/ohci-hcd.c @@ -74,6 +74,7 @@ static const char	hcd_name [] = "ohci_hcd";  #define	STATECHANGE_DELAY	msecs_to_jiffies(300)  #define	IO_WATCHDOG_DELAY	msecs_to_jiffies(275) +#define	IO_WATCHDOG_OFF		0xffffff00  #include "ohci.h"  #include "pci-quirks.h" @@ -231,7 +232,7 @@ static int ohci_urb_enqueue (  		}  		/* Start up the I/O watchdog timer, if it's not running */ -		if (!timer_pending(&ohci->io_watchdog) && +		if (ohci->prev_frame_no == IO_WATCHDOG_OFF &&  				list_empty(&ohci->eds_in_use) &&  				!(ohci->flags & OHCI_QUIRK_QEMU)) {  			ohci->prev_frame_no = ohci_frame_no(ohci); @@ -446,7 +447,8 @@ static int ohci_init (struct ohci_hcd *ohci)  	struct usb_hcd *hcd = ohci_to_hcd(ohci);  	/* Accept arbitrarily long scatter-gather lists */ -	hcd->self.sg_tablesize = ~0; +	if (!(hcd->driver->flags & HCD_LOCAL_MEM)) +		hcd->self.sg_tablesize = ~0;  	if (distrust_firmware)  		ohci->flags |= OHCI_QUIRK_HUB_POWER; @@ -501,6 +503,7 @@ static int ohci_init (struct ohci_hcd *ohci)  		return 0;  	timer_setup(&ohci->io_watchdog, io_watchdog_func, 0); +	ohci->prev_frame_no = IO_WATCHDOG_OFF;  	ohci->hcca = dma_alloc_coherent (hcd->self.controller,  			sizeof(*ohci->hcca), &ohci->hcca_dma, GFP_KERNEL); @@ -730,7 +733,7 @@ static void io_watchdog_func(struct timer_list *t)  	u32		head;  	struct ed	*ed;  	struct td	*td, *td_start, *td_next; -	unsigned	frame_no; +	unsigned	frame_no, prev_frame_no = IO_WATCHDOG_OFF;  	unsigned long	flags;  	spin_lock_irqsave(&ohci->lock, flags); @@ -835,7 +838,7 @@ static void io_watchdog_func(struct timer_list *t)  			}  		}  		if (!list_empty(&ohci->eds_in_use)) { -			ohci->prev_frame_no = frame_no; +			prev_frame_no = frame_no;  			ohci->prev_wdh_cnt = ohci->wdh_cnt;  			ohci->prev_donehead = ohci_readl(ohci,  					&ohci->regs->donehead); @@ -845,6 +848,7 @@ static void io_watchdog_func(struct timer_list *t)  	}   done: +	ohci->prev_frame_no = prev_frame_no;  	spin_unlock_irqrestore(&ohci->lock, flags);  } @@ -973,6 +977,7 @@ static void ohci_stop (struct usb_hcd *hcd)  	if (quirk_nec(ohci))  		flush_work(&ohci->nec_work);  	del_timer_sync(&ohci->io_watchdog); +	ohci->prev_frame_no = IO_WATCHDOG_OFF;  	ohci_writel (ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable);  	ohci_usb_reset(ohci); diff --git a/drivers/usb/host/ohci-hub.c b/drivers/usb/host/ohci-hub.c index fb7aaa3b9d06..634f3c7bf774 100644 --- a/drivers/usb/host/ohci-hub.c +++ b/drivers/usb/host/ohci-hub.c @@ -311,8 +311,10 @@ static int ohci_bus_suspend (struct usb_hcd *hcd)  		rc = ohci_rh_suspend (ohci, 0);  	spin_unlock_irq (&ohci->lock); -	if (rc == 0) +	if (rc == 0) {  		del_timer_sync(&ohci->io_watchdog); +		ohci->prev_frame_no = IO_WATCHDOG_OFF; +	}  	return rc;  } diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c index b2ec8c399363..4ccb85a67bb3 100644 --- a/drivers/usb/host/ohci-q.c +++ b/drivers/usb/host/ohci-q.c @@ -1019,6 +1019,8 @@ skip_ed:  		 * have modified this list.  normally it's just prepending  		 * entries (which we'd ignore), but paranoia won't hurt.  		 */ +		*last = ed->ed_next; +		ed->ed_next = NULL;  		modified = 0;  		/* unlink urbs as requested, but rescan the list after @@ -1077,21 +1079,22 @@ rescan_this:  			goto rescan_this;  		/* -		 * If no TDs are queued, take ED off the ed_rm_list. +		 * If no TDs are queued, ED is now idle.  		 * Otherwise, if the HC is running, reschedule. -		 * If not, leave it on the list for further dequeues. +		 * If the HC isn't running, add ED back to the +		 * start of the list for later processing.  		 */  		if (list_empty(&ed->td_list)) { -			*last = ed->ed_next; -			ed->ed_next = NULL;  			ed->state = ED_IDLE;  			list_del(&ed->in_use_list);  		} else if (ohci->rh_state == OHCI_RH_RUNNING) { -			*last = ed->ed_next; -			ed->ed_next = NULL;  			ed_schedule(ohci, ed);  		} else { -			last = &ed->ed_next; +			ed->ed_next = ohci->ed_rm_list; +			ohci->ed_rm_list = ed; +			/* Don't loop on the same ED */ +			if (last == &ohci->ed_rm_list) +				last = &ed->ed_next;  		}  		if (modified) diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c index 161536717025..67ad4bb6919a 100644 --- a/drivers/usb/host/pci-quirks.c +++ b/drivers/usb/host/pci-quirks.c @@ -66,6 +66,23 @@  #define	AX_INDXC		0x30  #define	AX_DATAC		0x34 +#define PT_ADDR_INDX		0xE8 +#define PT_READ_INDX		0xE4 +#define PT_SIG_1_ADDR		0xA520 +#define PT_SIG_2_ADDR		0xA521 +#define PT_SIG_3_ADDR		0xA522 +#define PT_SIG_4_ADDR		0xA523 +#define PT_SIG_1_DATA		0x78 +#define PT_SIG_2_DATA		0x56 +#define PT_SIG_3_DATA		0x34 +#define PT_SIG_4_DATA		0x12 +#define PT4_P1_REG		0xB521 +#define PT4_P2_REG		0xB522 +#define PT2_P1_REG		0xD520 +#define PT2_P2_REG		0xD521 +#define PT1_P1_REG		0xD522 +#define PT1_P2_REG		0xD523 +  #define	NB_PCIE_INDX_ADDR	0xe0  #define	NB_PCIE_INDX_DATA	0xe4  #define	PCIE_P_CNTL		0x10040 @@ -513,6 +530,98 @@ void usb_amd_dev_put(void)  EXPORT_SYMBOL_GPL(usb_amd_dev_put);  /* + * Check if port is disabled in BIOS on AMD Promontory host. + * BIOS Disabled ports may wake on connect/disconnect and need + * driver workaround to keep them disabled. + * Returns true if port is marked disabled. + */ +bool usb_amd_pt_check_port(struct device *device, int port) +{ +	unsigned char value, port_shift; +	struct pci_dev *pdev; +	u16 reg; + +	pdev = to_pci_dev(device); +	pci_write_config_word(pdev, PT_ADDR_INDX, PT_SIG_1_ADDR); + +	pci_read_config_byte(pdev, PT_READ_INDX, &value); +	if (value != PT_SIG_1_DATA) +		return false; + +	pci_write_config_word(pdev, PT_ADDR_INDX, PT_SIG_2_ADDR); + +	pci_read_config_byte(pdev, PT_READ_INDX, &value); +	if (value != PT_SIG_2_DATA) +		return false; + +	pci_write_config_word(pdev, PT_ADDR_INDX, PT_SIG_3_ADDR); + +	pci_read_config_byte(pdev, PT_READ_INDX, &value); +	if (value != PT_SIG_3_DATA) +		return false; + +	pci_write_config_word(pdev, PT_ADDR_INDX, PT_SIG_4_ADDR); + +	pci_read_config_byte(pdev, PT_READ_INDX, &value); +	if (value != PT_SIG_4_DATA) +		return false; + +	/* Check disabled port setting, if bit is set port is enabled */ +	switch (pdev->device) { +	case 0x43b9: +	case 0x43ba: +	/* +	 * device is AMD_PROMONTORYA_4(0x43b9) or PROMONTORYA_3(0x43ba) +	 * PT4_P1_REG bits[7..1] represents USB2.0 ports 6 to 0 +	 * PT4_P2_REG bits[6..0] represents ports 13 to 7 +	 */ +		if (port > 6) { +			reg = PT4_P2_REG; +			port_shift = port - 7; +		} else { +			reg = PT4_P1_REG; +			port_shift = port + 1; +		} +		break; +	case 0x43bb: +	/* +	 * device is AMD_PROMONTORYA_2(0x43bb) +	 * PT2_P1_REG bits[7..5] represents USB2.0 ports 2 to 0 +	 * PT2_P2_REG bits[5..0] represents ports 9 to 3 +	 */ +		if (port > 2) { +			reg = PT2_P2_REG; +			port_shift = port - 3; +		} else { +			reg = PT2_P1_REG; +			port_shift = port + 5; +		} +		break; +	case 0x43bc: +	/* +	 * device is AMD_PROMONTORYA_1(0x43bc) +	 * PT1_P1_REG[7..4] represents USB2.0 ports 3 to 0 +	 * PT1_P2_REG[5..0] represents ports 9 to 4 +	 */ +		if (port > 3) { +			reg = PT1_P2_REG; +			port_shift = port - 4; +		} else { +			reg = PT1_P1_REG; +			port_shift = port + 4; +		} +		break; +	default: +		return false; +	} +	pci_write_config_word(pdev, PT_ADDR_INDX, reg); +	pci_read_config_byte(pdev, PT_READ_INDX, &value); + +	return !(value & BIT(port_shift)); +} +EXPORT_SYMBOL_GPL(usb_amd_pt_check_port); + +/*   * Make sure the controller is completely inactive, unable to   * generate interrupts or do DMA.   */ diff --git a/drivers/usb/host/pci-quirks.h b/drivers/usb/host/pci-quirks.h index b68dcb5dd0fd..4ca0d9b7e463 100644 --- a/drivers/usb/host/pci-quirks.h +++ b/drivers/usb/host/pci-quirks.h @@ -17,6 +17,7 @@ void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev);  void usb_disable_xhci_ports(struct pci_dev *xhci_pdev);  void sb800_prefetch(struct device *dev, int on);  bool usb_xhci_needs_pci_reset(struct pci_dev *pdev); +bool usb_amd_pt_check_port(struct device *device, int port);  #else  struct pci_dev;  static inline void usb_amd_quirk_pll_disable(void) {} @@ -25,6 +26,10 @@ static inline void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev) {}  static inline void usb_amd_dev_put(void) {}  static inline void usb_disable_xhci_ports(struct pci_dev *xhci_pdev) {}  static inline void sb800_prefetch(struct device *dev, int on) {} +static inline bool usb_amd_pt_check_port(struct device *device, int port) +{ +	return false; +}  #endif  /* CONFIG_USB_PCI */  #endif  /*  __LINUX_USB_PCI_QUIRKS_H  */ diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c index a1ab8acf39ba..c359bae7b754 100644 --- a/drivers/usb/host/xhci-dbgcap.c +++ b/drivers/usb/host/xhci-dbgcap.c @@ -328,13 +328,14 @@ dbc_ep_do_queue(struct dbc_ep *dep, struct dbc_request *req)  int dbc_ep_queue(struct dbc_ep *dep, struct dbc_request *req,  		 gfp_t gfp_flags)  { +	unsigned long		flags;  	struct xhci_dbc		*dbc = dep->dbc;  	int			ret = -ESHUTDOWN; -	spin_lock(&dbc->lock); +	spin_lock_irqsave(&dbc->lock, flags);  	if (dbc->state == DS_CONFIGURED)  		ret = dbc_ep_do_queue(dep, req); -	spin_unlock(&dbc->lock); +	spin_unlock_irqrestore(&dbc->lock, flags);  	mod_delayed_work(system_wq, &dbc->event_work, 0); @@ -521,15 +522,16 @@ static void xhci_do_dbc_stop(struct xhci_hcd *xhci)  static int xhci_dbc_start(struct xhci_hcd *xhci)  {  	int			ret; +	unsigned long		flags;  	struct xhci_dbc		*dbc = xhci->dbc;  	WARN_ON(!dbc);  	pm_runtime_get_sync(xhci_to_hcd(xhci)->self.controller); -	spin_lock(&dbc->lock); +	spin_lock_irqsave(&dbc->lock, flags);  	ret = xhci_do_dbc_start(xhci); -	spin_unlock(&dbc->lock); +	spin_unlock_irqrestore(&dbc->lock, flags);  	if (ret) {  		pm_runtime_put(xhci_to_hcd(xhci)->self.controller); @@ -541,6 +543,7 @@ static int xhci_dbc_start(struct xhci_hcd *xhci)  static void xhci_dbc_stop(struct xhci_hcd *xhci)  { +	unsigned long		flags;  	struct xhci_dbc		*dbc = xhci->dbc;  	struct dbc_port		*port = &dbc->port; @@ -551,9 +554,9 @@ static void xhci_dbc_stop(struct xhci_hcd *xhci)  	if (port->registered)  		xhci_dbc_tty_unregister_device(xhci); -	spin_lock(&dbc->lock); +	spin_lock_irqsave(&dbc->lock, flags);  	xhci_do_dbc_stop(xhci); -	spin_unlock(&dbc->lock); +	spin_unlock_irqrestore(&dbc->lock, flags);  	pm_runtime_put_sync(xhci_to_hcd(xhci)->self.controller);  } @@ -779,14 +782,15 @@ static void xhci_dbc_handle_events(struct work_struct *work)  	int			ret;  	enum evtreturn		evtr;  	struct xhci_dbc		*dbc; +	unsigned long		flags;  	struct xhci_hcd		*xhci;  	dbc = container_of(to_delayed_work(work), struct xhci_dbc, event_work);  	xhci = dbc->xhci; -	spin_lock(&dbc->lock); +	spin_lock_irqsave(&dbc->lock, flags);  	evtr = xhci_dbc_do_handle_events(dbc); -	spin_unlock(&dbc->lock); +	spin_unlock_irqrestore(&dbc->lock, flags);  	switch (evtr) {  	case EVT_GSER: diff --git a/drivers/usb/host/xhci-dbgtty.c b/drivers/usb/host/xhci-dbgtty.c index 8d47b6fbf973..75f0b92694ba 100644 --- a/drivers/usb/host/xhci-dbgtty.c +++ b/drivers/usb/host/xhci-dbgtty.c @@ -92,21 +92,23 @@ static void dbc_start_rx(struct dbc_port *port)  static void  dbc_read_complete(struct xhci_hcd *xhci, struct dbc_request *req)  { +	unsigned long		flags;  	struct xhci_dbc		*dbc = xhci->dbc;  	struct dbc_port		*port = &dbc->port; -	spin_lock(&port->port_lock); +	spin_lock_irqsave(&port->port_lock, flags);  	list_add_tail(&req->list_pool, &port->read_queue);  	tasklet_schedule(&port->push); -	spin_unlock(&port->port_lock); +	spin_unlock_irqrestore(&port->port_lock, flags);  }  static void dbc_write_complete(struct xhci_hcd *xhci, struct dbc_request *req)  { +	unsigned long		flags;  	struct xhci_dbc		*dbc = xhci->dbc;  	struct dbc_port		*port = &dbc->port; -	spin_lock(&port->port_lock); +	spin_lock_irqsave(&port->port_lock, flags);  	list_add(&req->list_pool, &port->write_pool);  	switch (req->status) {  	case 0: @@ -119,7 +121,7 @@ static void dbc_write_complete(struct xhci_hcd *xhci, struct dbc_request *req)  			  req->status);  		break;  	} -	spin_unlock(&port->port_lock); +	spin_unlock_irqrestore(&port->port_lock, flags);  }  static void xhci_dbc_free_req(struct dbc_ep *dep, struct dbc_request *req) @@ -327,12 +329,13 @@ static void dbc_rx_push(unsigned long _port)  {  	struct dbc_request	*req;  	struct tty_struct	*tty; +	unsigned long		flags;  	bool			do_push = false;  	bool			disconnect = false;  	struct dbc_port		*port = (void *)_port;  	struct list_head	*queue = &port->read_queue; -	spin_lock_irq(&port->port_lock); +	spin_lock_irqsave(&port->port_lock, flags);  	tty = port->port.tty;  	while (!list_empty(queue)) {  		req = list_first_entry(queue, struct dbc_request, list_pool); @@ -392,16 +395,17 @@ static void dbc_rx_push(unsigned long _port)  	if (!disconnect)  		dbc_start_rx(port); -	spin_unlock_irq(&port->port_lock); +	spin_unlock_irqrestore(&port->port_lock, flags);  }  static int dbc_port_activate(struct tty_port *_port, struct tty_struct *tty)  { +	unsigned long	flags;  	struct dbc_port	*port = container_of(_port, struct dbc_port, port); -	spin_lock_irq(&port->port_lock); +	spin_lock_irqsave(&port->port_lock, flags);  	dbc_start_rx(port); -	spin_unlock_irq(&port->port_lock); +	spin_unlock_irqrestore(&port->port_lock, flags);  	return 0;  } diff --git a/drivers/usb/host/xhci-debugfs.c b/drivers/usb/host/xhci-debugfs.c index e26e685d8a57..5851052d4668 100644 --- a/drivers/usb/host/xhci-debugfs.c +++ b/drivers/usb/host/xhci-debugfs.c @@ -211,7 +211,7 @@ static void xhci_ring_dump_segment(struct seq_file *s,  static int xhci_ring_trb_show(struct seq_file *s, void *unused)  {  	int			i; -	struct xhci_ring	*ring = s->private; +	struct xhci_ring	*ring = *(struct xhci_ring **)s->private;  	struct xhci_segment	*seg = ring->first_seg;  	for (i = 0; i < ring->num_segs; i++) { @@ -387,7 +387,7 @@ void xhci_debugfs_create_endpoint(struct xhci_hcd *xhci,  	snprintf(epriv->name, sizeof(epriv->name), "ep%02d", ep_index);  	epriv->root = xhci_debugfs_create_ring_dir(xhci, -						   &dev->eps[ep_index].new_ring, +						   &dev->eps[ep_index].ring,  						   epriv->name,  						   spriv->root);  	spriv->eps[ep_index] = epriv; diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index 46d5e08f05f1..72ebbc908e19 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -1224,17 +1224,17 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,  				temp = readl(port_array[wIndex]);  				break;  			} - -			/* Software should not attempt to set -			 * port link state above '3' (U3) and the port -			 * must be enabled. -			 */ -			if ((temp & PORT_PE) == 0 || -				(link_state > USB_SS_PORT_LS_U3)) { -				xhci_warn(xhci, "Cannot set link state.\n"); +			/* Port must be enabled */ +			if (!(temp & PORT_PE)) { +				retval = -ENODEV; +				break; +			} +			/* Can't set port link state above '3' (U3) */ +			if (link_state > USB_SS_PORT_LS_U3) { +				xhci_warn(xhci, "Cannot set port %d link state %d\n", +					 wIndex, link_state);  				goto error;  			} -  			if (link_state == USB_SS_PORT_LS_U3) {  				slot_id = xhci_find_slot_id_by_port(hcd, xhci,  						wIndex + 1); @@ -1522,6 +1522,13 @@ int xhci_bus_suspend(struct usb_hcd *hcd)  				t2 |= PORT_WKOC_E | PORT_WKCONN_E;  				t2 &= ~PORT_WKDISC_E;  			} + +			if ((xhci->quirks & XHCI_U2_DISABLE_WAKE) && +			    (hcd->speed < HCD_USB3)) { +				if (usb_amd_pt_check_port(hcd->self.controller, +							  port_index)) +					t2 &= ~PORT_WAKE_BITS; +			}  		} else  			t2 &= ~PORT_WAKE_BITS; diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 6c79037876db..d9f831b67e57 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -42,6 +42,10 @@  #define PCI_DEVICE_ID_INTEL_APL_XHCI			0x5aa8  #define PCI_DEVICE_ID_INTEL_DNV_XHCI			0x19d0 +#define PCI_DEVICE_ID_AMD_PROMONTORYA_4			0x43b9 +#define PCI_DEVICE_ID_AMD_PROMONTORYA_3			0x43ba +#define PCI_DEVICE_ID_AMD_PROMONTORYA_2			0x43bb +#define PCI_DEVICE_ID_AMD_PROMONTORYA_1			0x43bc  #define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI		0x1142  static const char hcd_name[] = "xhci_hcd"; @@ -122,9 +126,19 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)  	if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info())  		xhci->quirks |= XHCI_AMD_PLL_FIX; +	if (pdev->vendor == PCI_VENDOR_ID_AMD && pdev->device == 0x43bb) +		xhci->quirks |= XHCI_SUSPEND_DELAY; +  	if (pdev->vendor == PCI_VENDOR_ID_AMD)  		xhci->quirks |= XHCI_TRUST_TX_LENGTH; +	if ((pdev->vendor == PCI_VENDOR_ID_AMD) && +		((pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_4) || +		(pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_3) || +		(pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_2) || +		(pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_1))) +		xhci->quirks |= XHCI_U2_DISABLE_WAKE; +  	if (pdev->vendor == PCI_VENDOR_ID_INTEL) {  		xhci->quirks |= XHCI_LPM_SUPPORT;  		xhci->quirks |= XHCI_INTEL_HOST; diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c index 6f038306c14d..6652e2d5bd2e 100644 --- a/drivers/usb/host/xhci-plat.c +++ b/drivers/usb/host/xhci-plat.c @@ -360,7 +360,6 @@ static int __maybe_unused xhci_plat_suspend(struct device *dev)  {  	struct usb_hcd	*hcd = dev_get_drvdata(dev);  	struct xhci_hcd	*xhci = hcd_to_xhci(hcd); -	int ret;  	/*  	 * xhci_suspend() needs `do_wakeup` to know whether host is allowed @@ -370,12 +369,7 @@ static int __maybe_unused xhci_plat_suspend(struct device *dev)  	 * reconsider this when xhci_plat_suspend enlarges its scope, e.g.,  	 * also applies to runtime suspend.  	 */ -	ret = xhci_suspend(xhci, device_may_wakeup(dev)); - -	if (!device_may_wakeup(dev) && !IS_ERR(xhci->clk)) -		clk_disable_unprepare(xhci->clk); - -	return ret; +	return xhci_suspend(xhci, device_may_wakeup(dev));  }  static int __maybe_unused xhci_plat_resume(struct device *dev) @@ -384,9 +378,6 @@ static int __maybe_unused xhci_plat_resume(struct device *dev)  	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);  	int ret; -	if (!device_may_wakeup(dev) && !IS_ERR(xhci->clk)) -		clk_prepare_enable(xhci->clk); -  	ret = xhci_priv_resume_quirk(hcd);  	if (ret)  		return ret; diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c index f0b559660007..f33ffc2bc4ed 100644 --- a/drivers/usb/host/xhci-rcar.c +++ b/drivers/usb/host/xhci-rcar.c @@ -83,6 +83,10 @@ static const struct soc_device_attribute rcar_quirks_match[]  = {  		.soc_id = "r8a7796",  		.data = (void *)RCAR_XHCI_FIRMWARE_V3,  	}, +	{ +		.soc_id = "r8a77965", +		.data = (void *)RCAR_XHCI_FIRMWARE_V3, +	},  	{ /* sentinel */ },  }; diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 1eeb3396300f..5d37700ae4b0 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -646,8 +646,6 @@ static void xhci_stop(struct usb_hcd *hcd)  		return;  	} -	xhci_debugfs_exit(xhci); -  	xhci_dbc_exit(xhci);  	spin_lock_irq(&xhci->lock); @@ -680,6 +678,7 @@ static void xhci_stop(struct usb_hcd *hcd)  	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "cleaning up memory");  	xhci_mem_cleanup(xhci); +	xhci_debugfs_exit(xhci);  	xhci_dbg_trace(xhci, trace_xhci_dbg_init,  			"xhci_stop completed - status = %x",  			readl(&xhci->op_regs->status)); @@ -878,6 +877,9 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)  	clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);  	del_timer_sync(&xhci->shared_hcd->rh_timer); +	if (xhci->quirks & XHCI_SUSPEND_DELAY) +		usleep_range(1000, 1500); +  	spin_lock_irq(&xhci->lock);  	clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);  	clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); @@ -1014,6 +1016,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)  		xhci_dbg(xhci, "cleaning up memory\n");  		xhci_mem_cleanup(xhci); +		xhci_debugfs_exit(xhci);  		xhci_dbg(xhci, "xhci_stop completed - status = %x\n",  			    readl(&xhci->op_regs->status)); @@ -3544,12 +3547,10 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)  		virt_dev->eps[i].ep_state &= ~EP_STOP_CMD_PENDING;  		del_timer_sync(&virt_dev->eps[i].stop_cmd_timer);  	} - +	xhci_debugfs_remove_slot(xhci, udev->slot_id);  	ret = xhci_disable_slot(xhci, udev->slot_id); -	if (ret) { -		xhci_debugfs_remove_slot(xhci, udev->slot_id); +	if (ret)  		xhci_free_virt_device(xhci, udev->slot_id); -	}  }  int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id) diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 96099a245c69..866e141d4972 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -718,11 +718,12 @@ struct xhci_ep_ctx {  /* bits 10:14 are Max Primary Streams */  /* bit 15 is Linear Stream Array */  /* Interval - period between requests to an endpoint - 125u increments. */ -#define EP_INTERVAL(p)		(((p) & 0xff) << 16) -#define EP_INTERVAL_TO_UFRAMES(p)		(1 << (((p) >> 16) & 0xff)) -#define CTX_TO_EP_INTERVAL(p)	(((p) >> 16) & 0xff) -#define EP_MAXPSTREAMS_MASK	(0x1f << 10) -#define EP_MAXPSTREAMS(p)	(((p) << 10) & EP_MAXPSTREAMS_MASK) +#define EP_INTERVAL(p)			(((p) & 0xff) << 16) +#define EP_INTERVAL_TO_UFRAMES(p)	(1 << (((p) >> 16) & 0xff)) +#define CTX_TO_EP_INTERVAL(p)		(((p) >> 16) & 0xff) +#define EP_MAXPSTREAMS_MASK		(0x1f << 10) +#define EP_MAXPSTREAMS(p)		(((p) << 10) & EP_MAXPSTREAMS_MASK) +#define CTX_TO_EP_MAXPSTREAMS(p)	(((p) & EP_MAXPSTREAMS_MASK) >> 10)  /* Endpoint is set up with a Linear Stream Array (vs. Secondary Stream Array) */  #define	EP_HAS_LSA		(1 << 15)  /* hosts with LEC=1 use bits 31:24 as ESIT high bits. */ @@ -1822,9 +1823,10 @@ struct xhci_hcd {  /* For controller with a broken Port Disable implementation */  #define XHCI_BROKEN_PORT_PED	(1 << 25)  #define XHCI_LIMIT_ENDPOINT_INTERVAL_7	(1 << 26) -/* Reserved. It was XHCI_U2_DISABLE_WAKE */ +#define XHCI_U2_DISABLE_WAKE	(1 << 27)  #define XHCI_ASMEDIA_MODIFY_FLOWCONTROL	(1 << 28)  #define XHCI_HW_LPM_DISABLE	(1 << 29) +#define XHCI_SUSPEND_DELAY	(1 << 30)  	unsigned int		num_active_eps;  	unsigned int		limit_active_eps; @@ -2549,21 +2551,22 @@ static inline const char *xhci_decode_ep_context(u32 info, u32 info2, u64 deq,  	u8 burst;  	u8 cerr;  	u8 mult; -	u8 lsa; -	u8 hid; + +	bool lsa; +	bool hid;  	esit = CTX_TO_MAX_ESIT_PAYLOAD_HI(info) << 16 |  		CTX_TO_MAX_ESIT_PAYLOAD(tx_info);  	ep_state = info & EP_STATE_MASK; -	max_pstr = info & EP_MAXPSTREAMS_MASK; +	max_pstr = CTX_TO_EP_MAXPSTREAMS(info);  	interval = CTX_TO_EP_INTERVAL(info);  	mult = CTX_TO_EP_MULT(info) + 1; -	lsa = info & EP_HAS_LSA; +	lsa = !!(info & EP_HAS_LSA);  	cerr = (info2 & (3 << 1)) >> 1;  	ep_type = CTX_TO_EP_TYPE(info2); -	hid = info2 & (1 << 7); +	hid = !!(info2 & (1 << 7));  	burst = CTX_TO_MAX_BURST(info2);  	maxp = MAX_PACKET_DECODED(info2); diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c index 63b9e85dc0e9..236a60f53099 100644 --- a/drivers/usb/misc/ldusb.c +++ b/drivers/usb/misc/ldusb.c @@ -42,6 +42,9 @@  #define USB_DEVICE_ID_LD_MICROCASSYTIME		0x1033	/* USB Product ID of Micro-CASSY Time (reserved) */  #define USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE	0x1035	/* USB Product ID of Micro-CASSY Temperature */  #define USB_DEVICE_ID_LD_MICROCASSYPH		0x1038	/* USB Product ID of Micro-CASSY pH */ +#define USB_DEVICE_ID_LD_POWERANALYSERCASSY	0x1040	/* USB Product ID of Power Analyser CASSY */ +#define USB_DEVICE_ID_LD_CONVERTERCONTROLLERCASSY	0x1042	/* USB Product ID of Converter Controller CASSY */ +#define USB_DEVICE_ID_LD_MACHINETESTCASSY	0x1043	/* USB Product ID of Machine Test CASSY */  #define USB_DEVICE_ID_LD_JWM		0x1080	/* USB Product ID of Joule and Wattmeter */  #define USB_DEVICE_ID_LD_DMMP		0x1081	/* USB Product ID of Digital Multimeter P (reserved) */  #define USB_DEVICE_ID_LD_UMIP		0x1090	/* USB Product ID of UMI P */ @@ -84,6 +87,9 @@ static const struct usb_device_id ld_usb_table[] = {  	{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTIME) },  	{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE) },  	{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYPH) }, +	{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POWERANALYSERCASSY) }, +	{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_CONVERTERCONTROLLERCASSY) }, +	{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MACHINETESTCASSY) },  	{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_JWM) },  	{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_DMMP) },  	{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIP) }, diff --git a/drivers/usb/mon/mon_text.c b/drivers/usb/mon/mon_text.c index f5e1bb5e5217..984f7e12a6a5 100644 --- a/drivers/usb/mon/mon_text.c +++ b/drivers/usb/mon/mon_text.c @@ -85,6 +85,8 @@ struct mon_reader_text {  	wait_queue_head_t wait;  	int printf_size; +	size_t printf_offset; +	size_t printf_togo;  	char *printf_buf;  	struct mutex printf_lock; @@ -376,75 +378,103 @@ err_alloc:  	return rc;  } -/* - * For simplicity, we read one record in one system call and throw out - * what does not fit. This means that the following does not work: - *   dd if=/dbg/usbmon/0t bs=10 - * Also, we do not allow seeks and do not bother advancing the offset. - */ +static ssize_t mon_text_copy_to_user(struct mon_reader_text *rp, +    char __user * const buf, const size_t nbytes) +{ +	const size_t togo = min(nbytes, rp->printf_togo); + +	if (copy_to_user(buf, &rp->printf_buf[rp->printf_offset], togo)) +		return -EFAULT; +	rp->printf_togo -= togo; +	rp->printf_offset += togo; +	return togo; +} + +/* ppos is not advanced since the llseek operation is not permitted. */  static ssize_t mon_text_read_t(struct file *file, char __user *buf, -				size_t nbytes, loff_t *ppos) +    size_t nbytes, loff_t *ppos)  {  	struct mon_reader_text *rp = file->private_data;  	struct mon_event_text *ep;  	struct mon_text_ptr ptr; +	ssize_t ret; -	ep = mon_text_read_wait(rp, file); -	if (IS_ERR(ep)) -		return PTR_ERR(ep);  	mutex_lock(&rp->printf_lock); -	ptr.cnt = 0; -	ptr.pbuf = rp->printf_buf; -	ptr.limit = rp->printf_size; - -	mon_text_read_head_t(rp, &ptr, ep); -	mon_text_read_statset(rp, &ptr, ep); -	ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt, -	    " %d", ep->length); -	mon_text_read_data(rp, &ptr, ep); - -	if (copy_to_user(buf, rp->printf_buf, ptr.cnt)) -		ptr.cnt = -EFAULT; + +	if (rp->printf_togo == 0) { + +		ep = mon_text_read_wait(rp, file); +		if (IS_ERR(ep)) { +			mutex_unlock(&rp->printf_lock); +			return PTR_ERR(ep); +		} +		ptr.cnt = 0; +		ptr.pbuf = rp->printf_buf; +		ptr.limit = rp->printf_size; + +		mon_text_read_head_t(rp, &ptr, ep); +		mon_text_read_statset(rp, &ptr, ep); +		ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt, +		    " %d", ep->length); +		mon_text_read_data(rp, &ptr, ep); + +		rp->printf_togo = ptr.cnt; +		rp->printf_offset = 0; + +		kmem_cache_free(rp->e_slab, ep); +	} + +	ret = mon_text_copy_to_user(rp, buf, nbytes);  	mutex_unlock(&rp->printf_lock); -	kmem_cache_free(rp->e_slab, ep); -	return ptr.cnt; +	return ret;  } +/* ppos is not advanced since the llseek operation is not permitted. */  static ssize_t mon_text_read_u(struct file *file, char __user *buf, -				size_t nbytes, loff_t *ppos) +    size_t nbytes, loff_t *ppos)  {  	struct mon_reader_text *rp = file->private_data;  	struct mon_event_text *ep;  	struct mon_text_ptr ptr; +	ssize_t ret; -	ep = mon_text_read_wait(rp, file); -	if (IS_ERR(ep)) -		return PTR_ERR(ep);  	mutex_lock(&rp->printf_lock); -	ptr.cnt = 0; -	ptr.pbuf = rp->printf_buf; -	ptr.limit = rp->printf_size; -	mon_text_read_head_u(rp, &ptr, ep); -	if (ep->type == 'E') { -		mon_text_read_statset(rp, &ptr, ep); -	} else if (ep->xfertype == USB_ENDPOINT_XFER_ISOC) { -		mon_text_read_isostat(rp, &ptr, ep); -		mon_text_read_isodesc(rp, &ptr, ep); -	} else if (ep->xfertype == USB_ENDPOINT_XFER_INT) { -		mon_text_read_intstat(rp, &ptr, ep); -	} else { -		mon_text_read_statset(rp, &ptr, ep); +	if (rp->printf_togo == 0) { + +		ep = mon_text_read_wait(rp, file); +		if (IS_ERR(ep)) { +			mutex_unlock(&rp->printf_lock); +			return PTR_ERR(ep); +		} +		ptr.cnt = 0; +		ptr.pbuf = rp->printf_buf; +		ptr.limit = rp->printf_size; + +		mon_text_read_head_u(rp, &ptr, ep); +		if (ep->type == 'E') { +			mon_text_read_statset(rp, &ptr, ep); +		} else if (ep->xfertype == USB_ENDPOINT_XFER_ISOC) { +			mon_text_read_isostat(rp, &ptr, ep); +			mon_text_read_isodesc(rp, &ptr, ep); +		} else if (ep->xfertype == USB_ENDPOINT_XFER_INT) { +			mon_text_read_intstat(rp, &ptr, ep); +		} else { +			mon_text_read_statset(rp, &ptr, ep); +		} +		ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt, +		    " %d", ep->length); +		mon_text_read_data(rp, &ptr, ep); + +		rp->printf_togo = ptr.cnt; +		rp->printf_offset = 0; + +		kmem_cache_free(rp->e_slab, ep);  	} -	ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt, -	    " %d", ep->length); -	mon_text_read_data(rp, &ptr, ep); -	if (copy_to_user(buf, rp->printf_buf, ptr.cnt)) -		ptr.cnt = -EFAULT; +	ret = mon_text_copy_to_user(rp, buf, nbytes);  	mutex_unlock(&rp->printf_lock); -	kmem_cache_free(rp->e_slab, ep); -	return ptr.cnt; +	return ret;  }  static struct mon_event_text *mon_text_read_wait(struct mon_reader_text *rp, diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index 968bf1e8b0fe..4d723077be2b 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c @@ -1756,6 +1756,7 @@ vbus_show(struct device *dev, struct device_attribute *attr, char *buf)  	int		vbus;  	u8		devctl; +	pm_runtime_get_sync(dev);  	spin_lock_irqsave(&musb->lock, flags);  	val = musb->a_wait_bcon;  	vbus = musb_platform_get_vbus_status(musb); @@ -1769,6 +1770,7 @@ vbus_show(struct device *dev, struct device_attribute *attr, char *buf)  			vbus = 0;  	}  	spin_unlock_irqrestore(&musb->lock, flags); +	pm_runtime_put_sync(dev);  	return sprintf(buf, "Vbus %s, timeout %lu msec\n",  			vbus ? "on" : "off", val); @@ -2471,11 +2473,11 @@ static int musb_remove(struct platform_device *pdev)  	musb_disable_interrupts(musb);  	musb_writeb(musb->mregs, MUSB_DEVCTL, 0);  	spin_unlock_irqrestore(&musb->lock, flags); +	musb_platform_exit(musb);  	pm_runtime_dont_use_autosuspend(musb->controller);  	pm_runtime_put_sync(musb->controller);  	pm_runtime_disable(musb->controller); -	musb_platform_exit(musb);  	musb_phy_callback = NULL;  	if (musb->dma_controller)  		musb_dma_controller_destroy(musb->dma_controller); @@ -2708,7 +2710,8 @@ static int musb_resume(struct device *dev)  	if ((devctl & mask) != (musb->context.devctl & mask))  		musb->port1_status = 0; -	musb_start(musb); +	musb_enable_interrupts(musb); +	musb_platform_enable(musb);  	spin_lock_irqsave(&musb->lock, flags);  	error = musb_run_resume_work(musb); diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c index 394b4ac86161..45ed32c2cba9 100644 --- a/drivers/usb/musb/musb_host.c +++ b/drivers/usb/musb/musb_host.c @@ -391,13 +391,7 @@ static void musb_advance_schedule(struct musb *musb, struct urb *urb,  		}  	} -	/* -	 * The pipe must be broken if current urb->status is set, so don't -	 * start next urb. -	 * TODO: to minimize the risk of regression, only check urb->status -	 * for RX, until we have a test case to understand the behavior of TX. -	 */ -	if ((!status || !is_in) && qh && qh->is_ready) { +	if (qh != NULL && qh->is_ready) {  		musb_dbg(musb, "... next ep%d %cX urb %p",  		    hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh));  		musb_start_urb(musb, is_in, qh); diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c index da031c45395a..fbec863350f6 100644 --- a/drivers/usb/phy/phy-mxs-usb.c +++ b/drivers/usb/phy/phy-mxs-usb.c @@ -602,6 +602,9 @@ static enum usb_charger_type mxs_phy_charger_detect(struct usb_phy *phy)  	void __iomem *base = phy->io_priv;  	enum usb_charger_type chgr_type = UNKNOWN_TYPE; +	if (!regmap) +		return UNKNOWN_TYPE; +  	if (mxs_charger_data_contact_detect(mxs_phy))  		return chgr_type; diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c index 5925d111bd47..39fa2fc1b8b7 100644 --- a/drivers/usb/renesas_usbhs/fifo.c +++ b/drivers/usb/renesas_usbhs/fifo.c @@ -982,6 +982,10 @@ static int usbhsf_dma_prepare_pop_with_usb_dmac(struct usbhs_pkt *pkt,  	if ((uintptr_t)pkt->buf & (USBHS_USB_DMAC_XFER_SIZE - 1))  		goto usbhsf_pio_prepare_pop; +	/* return at this time if the pipe is running */ +	if (usbhs_pipe_is_running(pipe)) +		return 0; +  	usbhs_pipe_config_change_bfre(pipe, 1);  	ret = usbhsf_fifo_select(pipe, fifo, 0); @@ -1172,6 +1176,7 @@ static int usbhsf_dma_pop_done_with_usb_dmac(struct usbhs_pkt *pkt,  	usbhsf_fifo_clear(pipe, fifo);  	pkt->actual = usbhs_dma_calc_received_size(pkt, chan, rcv_len); +	usbhs_pipe_running(pipe, 0);  	usbhsf_dma_stop(pipe, fifo);  	usbhsf_dma_unmap(pkt);  	usbhsf_fifo_unselect(pipe, pipe->fifo); diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 5db8ed517e0e..2d8d9150da0c 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -241,6 +241,7 @@ static void option_instat_callback(struct urb *urb);  #define QUECTEL_PRODUCT_EC21			0x0121  #define QUECTEL_PRODUCT_EC25			0x0125  #define QUECTEL_PRODUCT_BG96			0x0296 +#define QUECTEL_PRODUCT_EP06			0x0306  #define CMOTECH_VENDOR_ID			0x16d8  #define CMOTECH_PRODUCT_6001			0x6001 @@ -689,6 +690,10 @@ static const struct option_blacklist_info yuga_clm920_nc5_blacklist = {  	.reserved = BIT(1) | BIT(4),  }; +static const struct option_blacklist_info quectel_ep06_blacklist = { +	.reserved = BIT(4) | BIT(5), +}; +  static const struct usb_device_id option_ids[] = {  	{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },  	{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) }, @@ -1203,6 +1208,8 @@ static const struct usb_device_id option_ids[] = {  	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },  	{ USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96),  	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, +	{ USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06), +	  .driver_info = (kernel_ulong_t)&quectel_ep06_blacklist },  	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },  	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },  	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003), diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c index 3b1b9695177a..6034c39b67d1 100644 --- a/drivers/usb/storage/uas.c +++ b/drivers/usb/storage/uas.c @@ -1076,7 +1076,7 @@ static int uas_post_reset(struct usb_interface *intf)  		return 0;  	err = uas_configure_endpoints(devinfo); -	if (err && err != ENODEV) +	if (err && err != -ENODEV)  		shost_printk(KERN_ERR, shost,  			     "%s: alloc streams error %d after reset",  			     __func__, err); diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index 264af199aec8..747d3a9596d9 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -2118,6 +2118,13 @@ UNUSUAL_DEV(  0x152d, 0x2566, 0x0114, 0x0114,  		USB_SC_DEVICE, USB_PR_DEVICE, NULL,  		US_FL_BROKEN_FUA ), +/* Reported by Teijo Kinnunen <teijo.kinnunen@code-q.fi> */ +UNUSUAL_DEV(  0x152d, 0x2567, 0x0117, 0x0117, +		"JMicron", +		"USB to ATA/ATAPI Bridge", +		USB_SC_DEVICE, USB_PR_DEVICE, NULL, +		US_FL_BROKEN_FUA ), +  /* Reported-by George Cherian <george.cherian@cavium.com> */  UNUSUAL_DEV(0x152d, 0x9561, 0x0000, 0x9999,  		"JMicron", diff --git a/drivers/usb/typec/fusb302/fusb302.c b/drivers/usb/typec/fusb302/fusb302.c index 9ce4756adad6..dcd8ef085b30 100644 --- a/drivers/usb/typec/fusb302/fusb302.c +++ b/drivers/usb/typec/fusb302/fusb302.c @@ -1857,7 +1857,8 @@ static int fusb302_probe(struct i2c_client *client,  	chip->tcpm_port = tcpm_register_port(&client->dev, &chip->tcpc_dev);  	if (IS_ERR(chip->tcpm_port)) {  		ret = PTR_ERR(chip->tcpm_port); -		dev_err(dev, "cannot register tcpm port, ret=%d", ret); +		if (ret != -EPROBE_DEFER) +			dev_err(dev, "cannot register tcpm port, ret=%d", ret);  		goto destroy_workqueue;  	} diff --git a/drivers/usb/typec/tcpm.c b/drivers/usb/typec/tcpm.c index f4d563ee7690..8b637a4b474b 100644 --- a/drivers/usb/typec/tcpm.c +++ b/drivers/usb/typec/tcpm.c @@ -252,9 +252,6 @@ struct tcpm_port {  	unsigned int nr_src_pdo;  	u32 snk_pdo[PDO_MAX_OBJECTS];  	unsigned int nr_snk_pdo; -	unsigned int nr_fixed; /* number of fixed sink PDOs */ -	unsigned int nr_var; /* number of variable sink PDOs */ -	unsigned int nr_batt; /* number of battery sink PDOs */  	u32 snk_vdo[VDO_MAX_OBJECTS];  	unsigned int nr_snk_vdo; @@ -1770,90 +1767,39 @@ static int tcpm_pd_check_request(struct tcpm_port *port)  	return 0;  } -#define min_power(x, y) min(pdo_max_power(x), pdo_max_power(y)) -#define min_current(x, y) min(pdo_max_current(x), pdo_max_current(y)) - -static int tcpm_pd_select_pdo(struct tcpm_port *port, int *sink_pdo, -			      int *src_pdo) +static int tcpm_pd_select_pdo(struct tcpm_port *port)  { -	unsigned int i, j, max_mw = 0, max_mv = 0, mw = 0, mv = 0, ma = 0; +	unsigned int i, max_mw = 0, max_mv = 0;  	int ret = -EINVAL;  	/* -	 * Select the source PDO providing the most power which has a -	 * matchig sink cap. +	 * Select the source PDO providing the most power while staying within +	 * the board's voltage limits. Prefer PDO providing exp  	 */  	for (i = 0; i < port->nr_source_caps; i++) {  		u32 pdo = port->source_caps[i];  		enum pd_pdo_type type = pdo_type(pdo); +		unsigned int mv, ma, mw; -		if (type == PDO_TYPE_FIXED) { -			for (j = 0; j < port->nr_fixed; j++) { -				if (pdo_fixed_voltage(pdo) == -				    pdo_fixed_voltage(port->snk_pdo[j])) { -					ma = min_current(pdo, port->snk_pdo[j]); -					mv = pdo_fixed_voltage(pdo); -					mw = ma * mv / 1000; -					if (mw > max_mw || -					    (mw == max_mw && mv > max_mv)) { -						ret = 0; -						*src_pdo = i; -						*sink_pdo = j; -						max_mw = mw; -						max_mv = mv; -					} -					/* There could only be one fixed pdo -					 * at a specific voltage level. -					 * So breaking here. -					 */ -					break; -				} -			} -		} else if (type == PDO_TYPE_BATT) { -			for (j = port->nr_fixed; -			     j < port->nr_fixed + -				 port->nr_batt; -			     j++) { -				if (pdo_min_voltage(pdo) >= -				     pdo_min_voltage(port->snk_pdo[j]) && -				     pdo_max_voltage(pdo) <= -				     pdo_max_voltage(port->snk_pdo[j])) { -					mw = min_power(pdo, port->snk_pdo[j]); -					mv = pdo_min_voltage(pdo); -					if (mw > max_mw || -					    (mw == max_mw && mv > max_mv)) { -						ret = 0; -						*src_pdo = i; -						*sink_pdo = j; -						max_mw = mw; -						max_mv = mv; -					} -				} -			} -		} else if (type == PDO_TYPE_VAR) { -			for (j = port->nr_fixed + -				 port->nr_batt; -			     j < port->nr_fixed + -				 port->nr_batt + -				 port->nr_var; -			     j++) { -				if (pdo_min_voltage(pdo) >= -				     pdo_min_voltage(port->snk_pdo[j]) && -				     pdo_max_voltage(pdo) <= -				     pdo_max_voltage(port->snk_pdo[j])) { -					ma = min_current(pdo, port->snk_pdo[j]); -					mv = pdo_min_voltage(pdo); -					mw = ma * mv / 1000; -					if (mw > max_mw || -					    (mw == max_mw && mv > max_mv)) { -						ret = 0; -						*src_pdo = i; -						*sink_pdo = j; -						max_mw = mw; -						max_mv = mv; -					} -				} -			} +		if (type == PDO_TYPE_FIXED) +			mv = pdo_fixed_voltage(pdo); +		else +			mv = pdo_min_voltage(pdo); + +		if (type == PDO_TYPE_BATT) { +			mw = pdo_max_power(pdo); +		} else { +			ma = min(pdo_max_current(pdo), +				 port->max_snk_ma); +			mw = ma * mv / 1000; +		} + +		/* Perfer higher voltages if available */ +		if ((mw > max_mw || (mw == max_mw && mv > max_mv)) && +		    mv <= port->max_snk_mv) { +			ret = i; +			max_mw = mw; +			max_mv = mv;  		}  	} @@ -1865,14 +1811,13 @@ static int tcpm_pd_build_request(struct tcpm_port *port, u32 *rdo)  	unsigned int mv, ma, mw, flags;  	unsigned int max_ma, max_mw;  	enum pd_pdo_type type; -	int src_pdo_index, snk_pdo_index; -	u32 pdo, matching_snk_pdo; +	int index; +	u32 pdo; -	if (tcpm_pd_select_pdo(port, &snk_pdo_index, &src_pdo_index) < 0) +	index = tcpm_pd_select_pdo(port); +	if (index < 0)  		return -EINVAL; - -	pdo = port->source_caps[src_pdo_index]; -	matching_snk_pdo = port->snk_pdo[snk_pdo_index]; +	pdo = port->source_caps[index];  	type = pdo_type(pdo);  	if (type == PDO_TYPE_FIXED) @@ -1880,28 +1825,26 @@ static int tcpm_pd_build_request(struct tcpm_port *port, u32 *rdo)  	else  		mv = pdo_min_voltage(pdo); -	/* Select maximum available current within the sink pdo's limit */ +	/* Select maximum available current within the board's power limit */  	if (type == PDO_TYPE_BATT) { -		mw = min_power(pdo, matching_snk_pdo); -		ma = 1000 * mw / mv; +		mw = pdo_max_power(pdo); +		ma = 1000 * min(mw, port->max_snk_mw) / mv;  	} else { -		ma = min_current(pdo, matching_snk_pdo); -		mw = ma * mv / 1000; +		ma = min(pdo_max_current(pdo), +			 1000 * port->max_snk_mw / mv);  	} +	ma = min(ma, port->max_snk_ma);  	flags = RDO_USB_COMM | RDO_NO_SUSPEND;  	/* Set mismatch bit if offered power is less than operating power */ +	mw = ma * mv / 1000;  	max_ma = ma;  	max_mw = mw;  	if (mw < port->operating_snk_mw) {  		flags |= RDO_CAP_MISMATCH; -		if (type == PDO_TYPE_BATT && -		    (pdo_max_power(matching_snk_pdo) > pdo_max_power(pdo))) -			max_mw = pdo_max_power(matching_snk_pdo); -		else if (pdo_max_current(matching_snk_pdo) > -			 pdo_max_current(pdo)) -			max_ma = pdo_max_current(matching_snk_pdo); +		max_mw = port->operating_snk_mw; +		max_ma = max_mw * 1000 / mv;  	}  	tcpm_log(port, "cc=%d cc1=%d cc2=%d vbus=%d vconn=%s polarity=%d", @@ -1910,16 +1853,16 @@ static int tcpm_pd_build_request(struct tcpm_port *port, u32 *rdo)  		 port->polarity);  	if (type == PDO_TYPE_BATT) { -		*rdo = RDO_BATT(src_pdo_index + 1, mw, max_mw, flags); +		*rdo = RDO_BATT(index + 1, mw, max_mw, flags);  		tcpm_log(port, "Requesting PDO %d: %u mV, %u mW%s", -			 src_pdo_index, mv, mw, +			 index, mv, mw,  			 flags & RDO_CAP_MISMATCH ? " [mismatch]" : "");  	} else { -		*rdo = RDO_FIXED(src_pdo_index + 1, ma, max_ma, flags); +		*rdo = RDO_FIXED(index + 1, ma, max_ma, flags);  		tcpm_log(port, "Requesting PDO %d: %u mV, %u mA%s", -			 src_pdo_index, mv, ma, +			 index, mv, ma,  			 flags & RDO_CAP_MISMATCH ? " [mismatch]" : "");  	} @@ -3650,19 +3593,6 @@ int tcpm_update_sink_capabilities(struct tcpm_port *port, const u32 *pdo,  }  EXPORT_SYMBOL_GPL(tcpm_update_sink_capabilities); -static int nr_type_pdos(const u32 *pdo, unsigned int nr_pdo, -			enum pd_pdo_type type) -{ -	int count = 0; -	int i; - -	for (i = 0; i < nr_pdo; i++) { -		if (pdo_type(pdo[i]) == type) -			count++; -	} -	return count; -} -  struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)  {  	struct tcpm_port *port; @@ -3708,15 +3638,6 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)  					  tcpc->config->nr_src_pdo);  	port->nr_snk_pdo = tcpm_copy_pdos(port->snk_pdo, tcpc->config->snk_pdo,  					  tcpc->config->nr_snk_pdo); -	port->nr_fixed =  nr_type_pdos(port->snk_pdo, -				       port->nr_snk_pdo, -				       PDO_TYPE_FIXED); -	port->nr_var = nr_type_pdos(port->snk_pdo, -				    port->nr_snk_pdo, -				    PDO_TYPE_VAR); -	port->nr_batt = nr_type_pdos(port->snk_pdo, -				     port->nr_snk_pdo, -				     PDO_TYPE_BATT);  	port->nr_snk_vdo = tcpm_copy_vdos(port->snk_vdo, tcpc->config->snk_vdo,  					  tcpc->config->nr_snk_vdo); diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c index 49e552472c3f..dd8ef36ab10e 100644 --- a/drivers/usb/usbip/stub_dev.c +++ b/drivers/usb/usbip/stub_dev.c @@ -73,6 +73,7 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a  			goto err;  		sdev->ud.tcp_socket = socket; +		sdev->ud.sockfd = sockfd;  		spin_unlock_irq(&sdev->ud.lock); @@ -172,6 +173,7 @@ static void stub_shutdown_connection(struct usbip_device *ud)  	if (ud->tcp_socket) {  		sockfd_put(ud->tcp_socket);  		ud->tcp_socket = NULL; +		ud->sockfd = -1;  	}  	/* 3. free used data */ @@ -266,6 +268,7 @@ static struct stub_device *stub_device_alloc(struct usb_device *udev)  	sdev->ud.status		= SDEV_ST_AVAILABLE;  	spin_lock_init(&sdev->ud.lock);  	sdev->ud.tcp_socket	= NULL; +	sdev->ud.sockfd		= -1;  	INIT_LIST_HEAD(&sdev->priv_init);  	INIT_LIST_HEAD(&sdev->priv_tx); diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c index c3e1008aa491..20e3d4609583 100644 --- a/drivers/usb/usbip/vhci_hcd.c +++ b/drivers/usb/usbip/vhci_hcd.c @@ -984,6 +984,7 @@ static void vhci_shutdown_connection(struct usbip_device *ud)  	if (vdev->ud.tcp_socket) {  		sockfd_put(vdev->ud.tcp_socket);  		vdev->ud.tcp_socket = NULL; +		vdev->ud.sockfd = -1;  	}  	pr_info("release socket\n"); @@ -1030,6 +1031,7 @@ static void vhci_device_reset(struct usbip_device *ud)  	if (ud->tcp_socket) {  		sockfd_put(ud->tcp_socket);  		ud->tcp_socket = NULL; +		ud->sockfd = -1;  	}  	ud->status = VDEV_ST_NULL; diff --git a/drivers/usb/usbip/vudc_sysfs.c b/drivers/usb/usbip/vudc_sysfs.c index d86f72bbbb91..6dcd3ff655c3 100644 --- a/drivers/usb/usbip/vudc_sysfs.c +++ b/drivers/usb/usbip/vudc_sysfs.c @@ -105,10 +105,14 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a  	if (rv != 0)  		return -EINVAL; +	if (!udc) { +		dev_err(dev, "no device"); +		return -ENODEV; +	}  	spin_lock_irqsave(&udc->lock, flags);  	/* Don't export what we don't have */ -	if (!udc || !udc->driver || !udc->pullup) { -		dev_err(dev, "no device or gadget not bound"); +	if (!udc->driver || !udc->pullup) { +		dev_err(dev, "gadget not bound");  		ret = -ENODEV;  		goto unlock;  	} diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index b0f759476900..8a1508a8e481 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c @@ -207,9 +207,6 @@ static bool vfio_pci_nointx(struct pci_dev *pdev)  		}  	} -	if (!pdev->irq) -		return true; -  	return false;  } diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index e30e29ae4819..45657e2b1ff7 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -338,11 +338,12 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,  {  	struct page *page[1];  	struct vm_area_struct *vma; +	struct vm_area_struct *vmas[1];  	int ret;  	if (mm == current->mm) { -		ret = get_user_pages_fast(vaddr, 1, !!(prot & IOMMU_WRITE), -					  page); +		ret = get_user_pages_longterm(vaddr, 1, !!(prot & IOMMU_WRITE), +					      page, vmas);  	} else {  		unsigned int flags = 0; @@ -351,7 +352,18 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,  		down_read(&mm->mmap_sem);  		ret = get_user_pages_remote(NULL, mm, vaddr, 1, flags, page, -					    NULL, NULL); +					    vmas, NULL); +		/* +		 * The lifetime of a vaddr_get_pfn() page pin is +		 * userspace-controlled. In the fs-dax case this could +		 * lead to indefinite stalls in filesystem operations. +		 * Disallow attempts to pin fs-dax pages via this +		 * interface. +		 */ +		if (ret > 0 && vma_is_fsdax(vmas[0])) { +			ret = -EOPNOTSUPP; +			put_page(page[0]); +		}  		up_read(&mm->mmap_sem);  	} diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 610cba276d47..8139bc70ad7d 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -170,7 +170,7 @@ static void vhost_net_buf_unproduce(struct vhost_net_virtqueue *nvq)  	if (nvq->rx_ring && !vhost_net_buf_is_empty(rxq)) {  		ptr_ring_unconsume(nvq->rx_ring, rxq->queue + rxq->head,  				   vhost_net_buf_get_size(rxq), -				   __skb_array_destroy_skb); +				   tun_ptr_free);  		rxq->head = rxq->tail = 0;  	}  } @@ -948,6 +948,7 @@ static int vhost_net_open(struct inode *inode, struct file *f)  		n->vqs[i].done_idx = 0;  		n->vqs[i].vhost_hlen = 0;  		n->vqs[i].sock_hlen = 0; +		n->vqs[i].rx_ring = NULL;  		vhost_net_buf_init(&n->vqs[i].rxq);  	}  	vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX); @@ -972,6 +973,7 @@ static struct socket *vhost_net_stop_vq(struct vhost_net *n,  	vhost_net_disable_vq(n, vq);  	vq->private_data = NULL;  	vhost_net_buf_unproduce(nvq); +	nvq->rx_ring = NULL;  	mutex_unlock(&vq->mutex);  	return sock;  } @@ -1161,14 +1163,14 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)  		vhost_net_disable_vq(n, vq);  		vq->private_data = sock;  		vhost_net_buf_unproduce(nvq); -		if (index == VHOST_NET_VQ_RX) -			nvq->rx_ring = get_tap_ptr_ring(fd);  		r = vhost_vq_init_access(vq);  		if (r)  			goto err_used;  		r = vhost_net_enable_vq(n, vq);  		if (r)  			goto err_used; +		if (index == VHOST_NET_VQ_RX) +			nvq->rx_ring = get_tap_ptr_ring(fd);  		oldubufs = nvq->ubufs;  		nvq->ubufs = ubufs; diff --git a/drivers/video/fbdev/geode/video_gx.c b/drivers/video/fbdev/geode/video_gx.c index 6082f653c68a..67773e8bbb95 100644 --- a/drivers/video/fbdev/geode/video_gx.c +++ b/drivers/video/fbdev/geode/video_gx.c @@ -127,7 +127,7 @@ void gx_set_dclk_frequency(struct fb_info *info)  	int timeout = 1000;  	/* Rev. 1 Geode GXs use a 14 MHz reference clock instead of 48 MHz. */ -	if (cpu_data(0).x86_mask == 1) { +	if (cpu_data(0).x86_stepping == 1) {  		pll_table = gx_pll_table_14MHz;  		pll_table_len = ARRAY_SIZE(gx_pll_table_14MHz);  	} else { diff --git a/drivers/video/fbdev/sbuslib.c b/drivers/video/fbdev/sbuslib.c index af6fc97f4ba4..a436d44f1b7f 100644 --- a/drivers/video/fbdev/sbuslib.c +++ b/drivers/video/fbdev/sbuslib.c @@ -122,7 +122,7 @@ int sbusfb_ioctl_helper(unsigned long cmd, unsigned long arg,  		unsigned char __user *ured;  		unsigned char __user *ugreen;  		unsigned char __user *ublue; -		int index, count, i; +		unsigned int index, count, i;  		if (get_user(index, &c->index) ||  		    __get_user(count, &c->count) || @@ -161,7 +161,7 @@ int sbusfb_ioctl_helper(unsigned long cmd, unsigned long arg,  		unsigned char __user *ugreen;  		unsigned char __user *ublue;  		struct fb_cmap *cmap = &info->cmap; -		int index, count, i; +		unsigned int index, count, i;  		u8 red, green, blue;  		if (get_user(index, &c->index) || diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index eb30f3e09a47..71458f493cf8 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -428,8 +428,6 @@ unmap_release:  		i = virtio16_to_cpu(_vq->vdev, vq->vring.desc[i].next);  	} -	vq->vq.num_free += total_sg; -  	if (indirect)  		kfree(desc); diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index aff773bcebdb..37460cd6cabb 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -226,6 +226,7 @@ config ZIIRAVE_WATCHDOG  config RAVE_SP_WATCHDOG  	tristate "RAVE SP Watchdog timer"  	depends on RAVE_SP_CORE +	depends on NVMEM || !NVMEM  	select WATCHDOG_CORE  	help  	  Support for the watchdog on RAVE SP device. @@ -903,6 +904,7 @@ config F71808E_WDT  config SP5100_TCO  	tristate "AMD/ATI SP5100 TCO Timer/Watchdog"  	depends on X86 && PCI +	select WATCHDOG_CORE  	---help---  	  Hardware watchdog driver for the AMD/ATI SP5100 chipset. The TCO  	  (Total Cost of Ownership) timer is a watchdog timer that will reboot @@ -1008,6 +1010,7 @@ config WAFER_WDT  config I6300ESB_WDT  	tristate "Intel 6300ESB Timer/Watchdog"  	depends on PCI +	select WATCHDOG_CORE  	---help---  	  Hardware driver for the watchdog timer built into the Intel  	  6300ESB controller hub. @@ -1837,6 +1840,7 @@ config WATCHDOG_SUN4V  config XEN_WDT  	tristate "Xen Watchdog support"  	depends on XEN +	select WATCHDOG_CORE  	help  	  Say Y here to support the hypervisor watchdog capability provided  	  by Xen 4.0 and newer.  The watchdog timeout period is normally one diff --git a/drivers/watchdog/f71808e_wdt.c b/drivers/watchdog/f71808e_wdt.c index e0678c14480f..3a33c5344bd5 100644 --- a/drivers/watchdog/f71808e_wdt.c +++ b/drivers/watchdog/f71808e_wdt.c @@ -566,7 +566,8 @@ static ssize_t watchdog_write(struct file *file, const char __user *buf,  				char c;  				if (get_user(c, buf + i))  					return -EFAULT; -				expect_close = (c == 'V'); +				if (c == 'V') +					expect_close = true;  			}  			/* Properly order writes across fork()ed processes */ diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c index f1f00dfc0e68..b0a158073abd 100644 --- a/drivers/watchdog/hpwdt.c +++ b/drivers/watchdog/hpwdt.c @@ -28,16 +28,7 @@  #include <linux/types.h>  #include <linux/uaccess.h>  #include <linux/watchdog.h> -#ifdef CONFIG_HPWDT_NMI_DECODING -#include <linux/dmi.h> -#include <linux/spinlock.h> -#include <linux/nmi.h> -#include <linux/kdebug.h> -#include <linux/notifier.h> -#include <asm/set_memory.h> -#endif /* CONFIG_HPWDT_NMI_DECODING */  #include <asm/nmi.h> -#include <asm/frame.h>  #define HPWDT_VERSION			"1.4.0"  #define SECS_TO_TICKS(secs)		((secs) * 1000 / 128) @@ -48,6 +39,9 @@  static unsigned int soft_margin = DEFAULT_MARGIN;	/* in seconds */  static unsigned int reload;			/* the computed soft_margin */  static bool nowayout = WATCHDOG_NOWAYOUT; +#ifdef CONFIG_HPWDT_NMI_DECODING +static unsigned int allow_kdump = 1; +#endif  static char expect_release;  static unsigned long hpwdt_is_open; @@ -63,373 +57,6 @@ static const struct pci_device_id hpwdt_devices[] = {  };  MODULE_DEVICE_TABLE(pci, hpwdt_devices); -#ifdef CONFIG_HPWDT_NMI_DECODING -#define PCI_BIOS32_SD_VALUE		0x5F32335F	/* "_32_" */ -#define CRU_BIOS_SIGNATURE_VALUE	0x55524324 -#define PCI_BIOS32_PARAGRAPH_LEN	16 -#define PCI_ROM_BASE1			0x000F0000 -#define ROM_SIZE			0x10000 - -struct bios32_service_dir { -	u32 signature; -	u32 entry_point; -	u8 revision; -	u8 length; -	u8 checksum; -	u8 reserved[5]; -}; - -/* type 212 */ -struct smbios_cru64_info { -	u8 type; -	u8 byte_length; -	u16 handle; -	u32 signature; -	u64 physical_address; -	u32 double_length; -	u32 double_offset; -}; -#define SMBIOS_CRU64_INFORMATION	212 - -/* type 219 */ -struct smbios_proliant_info { -	u8 type; -	u8 byte_length; -	u16 handle; -	u32 power_features; -	u32 omega_features; -	u32 reserved; -	u32 misc_features; -}; -#define SMBIOS_ICRU_INFORMATION		219 - - -struct cmn_registers { -	union { -		struct { -			u8 ral; -			u8 rah; -			u16 rea2; -		}; -		u32 reax; -	} u1; -	union { -		struct { -			u8 rbl; -			u8 rbh; -			u8 reb2l; -			u8 reb2h; -		}; -		u32 rebx; -	} u2; -	union { -		struct { -			u8 rcl; -			u8 rch; -			u16 rec2; -		}; -		u32 recx; -	} u3; -	union { -		struct { -			u8 rdl; -			u8 rdh; -			u16 red2; -		}; -		u32 redx; -	} u4; - -	u32 resi; -	u32 redi; -	u16 rds; -	u16 res; -	u32 reflags; -}  __attribute__((packed)); - -static unsigned int hpwdt_nmi_decoding; -static unsigned int allow_kdump = 1; -static unsigned int is_icru; -static unsigned int is_uefi; -static DEFINE_SPINLOCK(rom_lock); -static void *cru_rom_addr; -static struct cmn_registers cmn_regs; - -extern asmlinkage void asminline_call(struct cmn_registers *pi86Regs, -						unsigned long *pRomEntry); - -#ifdef CONFIG_X86_32 -/* --32 Bit Bios------------------------------------------------------------ */ - -#define HPWDT_ARCH	32 - -asm(".text                          \n\t" -    ".align 4                       \n\t" -    ".globl asminline_call	    \n" -    "asminline_call:                \n\t" -    "pushl       %ebp               \n\t" -    "movl        %esp, %ebp         \n\t" -    "pusha                          \n\t" -    "pushf                          \n\t" -    "push        %es                \n\t" -    "push        %ds                \n\t" -    "pop         %es                \n\t" -    "movl        8(%ebp),%eax       \n\t" -    "movl        4(%eax),%ebx       \n\t" -    "movl        8(%eax),%ecx       \n\t" -    "movl        12(%eax),%edx      \n\t" -    "movl        16(%eax),%esi      \n\t" -    "movl        20(%eax),%edi      \n\t" -    "movl        (%eax),%eax        \n\t" -    "push        %cs                \n\t" -    "call        *12(%ebp)          \n\t" -    "pushf                          \n\t" -    "pushl       %eax               \n\t" -    "movl        8(%ebp),%eax       \n\t" -    "movl        %ebx,4(%eax)       \n\t" -    "movl        %ecx,8(%eax)       \n\t" -    "movl        %edx,12(%eax)      \n\t" -    "movl        %esi,16(%eax)      \n\t" -    "movl        %edi,20(%eax)      \n\t" -    "movw        %ds,24(%eax)       \n\t" -    "movw        %es,26(%eax)       \n\t" -    "popl        %ebx               \n\t" -    "movl        %ebx,(%eax)        \n\t" -    "popl        %ebx               \n\t" -    "movl        %ebx,28(%eax)      \n\t" -    "pop         %es                \n\t" -    "popf                           \n\t" -    "popa                           \n\t" -    "leave                          \n\t" -    "ret                            \n\t" -    ".previous"); - - -/* - *	cru_detect - * - *	Routine Description: - *	This function uses the 32-bit BIOS Service Directory record to - *	search for a $CRU record. - * - *	Return Value: - *	0        :  SUCCESS - *	<0       :  FAILURE - */ -static int cru_detect(unsigned long map_entry, -	unsigned long map_offset) -{ -	void *bios32_map; -	unsigned long *bios32_entrypoint; -	unsigned long cru_physical_address; -	unsigned long cru_length; -	unsigned long physical_bios_base = 0; -	unsigned long physical_bios_offset = 0; -	int retval = -ENODEV; - -	bios32_map = ioremap(map_entry, (2 * PAGE_SIZE)); - -	if (bios32_map == NULL) -		return -ENODEV; - -	bios32_entrypoint = bios32_map + map_offset; - -	cmn_regs.u1.reax = CRU_BIOS_SIGNATURE_VALUE; - -	set_memory_x((unsigned long)bios32_map, 2); -	asminline_call(&cmn_regs, bios32_entrypoint); - -	if (cmn_regs.u1.ral != 0) { -		pr_warn("Call succeeded but with an error: 0x%x\n", -			cmn_regs.u1.ral); -	} else { -		physical_bios_base = cmn_regs.u2.rebx; -		physical_bios_offset = cmn_regs.u4.redx; -		cru_length = cmn_regs.u3.recx; -		cru_physical_address = -			physical_bios_base + physical_bios_offset; - -		/* If the values look OK, then map it in. */ -		if ((physical_bios_base + physical_bios_offset)) { -			cru_rom_addr = -				ioremap(cru_physical_address, cru_length); -			if (cru_rom_addr) { -				set_memory_x((unsigned long)cru_rom_addr & PAGE_MASK, -					(cru_length + PAGE_SIZE - 1) >> PAGE_SHIFT); -				retval = 0; -			} -		} - -		pr_debug("CRU Base Address:   0x%lx\n", physical_bios_base); -		pr_debug("CRU Offset Address: 0x%lx\n", physical_bios_offset); -		pr_debug("CRU Length:         0x%lx\n", cru_length); -		pr_debug("CRU Mapped Address: %p\n", &cru_rom_addr); -	} -	iounmap(bios32_map); -	return retval; -} - -/* - *	bios_checksum - */ -static int bios_checksum(const char __iomem *ptr, int len) -{ -	char sum = 0; -	int i; - -	/* -	 * calculate checksum of size bytes. This should add up -	 * to zero if we have a valid header. -	 */ -	for (i = 0; i < len; i++) -		sum += ptr[i]; - -	return ((sum == 0) && (len > 0)); -} - -/* - *	bios32_present - * - *	Routine Description: - *	This function finds the 32-bit BIOS Service Directory - * - *	Return Value: - *	0        :  SUCCESS - *	<0       :  FAILURE - */ -static int bios32_present(const char __iomem *p) -{ -	struct bios32_service_dir *bios_32_ptr; -	int length; -	unsigned long map_entry, map_offset; - -	bios_32_ptr = (struct bios32_service_dir *) p; - -	/* -	 * Search for signature by checking equal to the swizzled value -	 * instead of calling another routine to perform a strcmp. -	 */ -	if (bios_32_ptr->signature == PCI_BIOS32_SD_VALUE) { -		length = bios_32_ptr->length * PCI_BIOS32_PARAGRAPH_LEN; -		if (bios_checksum(p, length)) { -			/* -			 * According to the spec, we're looking for the -			 * first 4KB-aligned address below the entrypoint -			 * listed in the header. The Service Directory code -			 * is guaranteed to occupy no more than 2 4KB pages. -			 */ -			map_entry = bios_32_ptr->entry_point & ~(PAGE_SIZE - 1); -			map_offset = bios_32_ptr->entry_point - map_entry; - -			return cru_detect(map_entry, map_offset); -		} -	} -	return -ENODEV; -} - -static int detect_cru_service(void) -{ -	char __iomem *p, *q; -	int rc = -1; - -	/* -	 * Search from 0x0f0000 through 0x0fffff, inclusive. -	 */ -	p = ioremap(PCI_ROM_BASE1, ROM_SIZE); -	if (p == NULL) -		return -ENOMEM; - -	for (q = p; q < p + ROM_SIZE; q += 16) { -		rc = bios32_present(q); -		if (!rc) -			break; -	} -	iounmap(p); -	return rc; -} -/* ------------------------------------------------------------------------- */ -#endif /* CONFIG_X86_32 */ -#ifdef CONFIG_X86_64 -/* --64 Bit Bios------------------------------------------------------------ */ - -#define HPWDT_ARCH	64 - -asm(".text                      \n\t" -    ".align 4                   \n\t" -    ".globl asminline_call	\n\t" -    ".type asminline_call, @function \n\t" -    "asminline_call:            \n\t" -    FRAME_BEGIN -    "pushq      %rax            \n\t" -    "pushq      %rbx            \n\t" -    "pushq      %rdx            \n\t" -    "pushq      %r12            \n\t" -    "pushq      %r9             \n\t" -    "movq       %rsi, %r12      \n\t" -    "movq       %rdi, %r9       \n\t" -    "movl       4(%r9),%ebx     \n\t" -    "movl       8(%r9),%ecx     \n\t" -    "movl       12(%r9),%edx    \n\t" -    "movl       16(%r9),%esi    \n\t" -    "movl       20(%r9),%edi    \n\t" -    "movl       (%r9),%eax      \n\t" -    "call       *%r12           \n\t" -    "pushfq                     \n\t" -    "popq        %r12           \n\t" -    "movl       %eax, (%r9)     \n\t" -    "movl       %ebx, 4(%r9)    \n\t" -    "movl       %ecx, 8(%r9)    \n\t" -    "movl       %edx, 12(%r9)   \n\t" -    "movl       %esi, 16(%r9)   \n\t" -    "movl       %edi, 20(%r9)   \n\t" -    "movq       %r12, %rax      \n\t" -    "movl       %eax, 28(%r9)   \n\t" -    "popq       %r9             \n\t" -    "popq       %r12            \n\t" -    "popq       %rdx            \n\t" -    "popq       %rbx            \n\t" -    "popq       %rax            \n\t" -    FRAME_END -    "ret                        \n\t" -    ".previous"); - -/* - *	dmi_find_cru - * - *	Routine Description: - *	This function checks whether or not a SMBIOS/DMI record is - *	the 64bit CRU info or not - */ -static void dmi_find_cru(const struct dmi_header *dm, void *dummy) -{ -	struct smbios_cru64_info *smbios_cru64_ptr; -	unsigned long cru_physical_address; - -	if (dm->type == SMBIOS_CRU64_INFORMATION) { -		smbios_cru64_ptr = (struct smbios_cru64_info *) dm; -		if (smbios_cru64_ptr->signature == CRU_BIOS_SIGNATURE_VALUE) { -			cru_physical_address = -				smbios_cru64_ptr->physical_address + -				smbios_cru64_ptr->double_offset; -			cru_rom_addr = ioremap(cru_physical_address, -				smbios_cru64_ptr->double_length); -			set_memory_x((unsigned long)cru_rom_addr & PAGE_MASK, -				smbios_cru64_ptr->double_length >> PAGE_SHIFT); -		} -	} -} - -static int detect_cru_service(void) -{ -	cru_rom_addr = NULL; - -	dmi_walk(dmi_find_cru, NULL); - -	/* if cru_rom_addr has been set then we found a CRU service */ -	return ((cru_rom_addr != NULL) ? 0 : -ENODEV); -} -/* ------------------------------------------------------------------------- */ -#endif /* CONFIG_X86_64 */ -#endif /* CONFIG_HPWDT_NMI_DECODING */  /*   *	Watchdog operations @@ -486,30 +113,12 @@ static int hpwdt_my_nmi(void)   */  static int hpwdt_pretimeout(unsigned int ulReason, struct pt_regs *regs)  { -	unsigned long rom_pl; -	static int die_nmi_called; - -	if (!hpwdt_nmi_decoding) -		return NMI_DONE; -  	if ((ulReason == NMI_UNKNOWN) && !hpwdt_my_nmi())  		return NMI_DONE; -	spin_lock_irqsave(&rom_lock, rom_pl); -	if (!die_nmi_called && !is_icru && !is_uefi) -		asminline_call(&cmn_regs, cru_rom_addr); -	die_nmi_called = 1; -	spin_unlock_irqrestore(&rom_lock, rom_pl); -  	if (allow_kdump)  		hpwdt_stop(); -	if (!is_icru && !is_uefi) { -		if (cmn_regs.u1.ral == 0) { -			nmi_panic(regs, "An NMI occurred, but unable to determine source.\n"); -			return NMI_HANDLED; -		} -	}  	nmi_panic(regs, "An NMI occurred. Depending on your system the reason "  		"for the NMI is logged in any one of the following "  		"resources:\n" @@ -675,84 +284,11 @@ static struct miscdevice hpwdt_miscdev = {   *	Init & Exit   */ -#ifdef CONFIG_HPWDT_NMI_DECODING -#ifdef CONFIG_X86_LOCAL_APIC -static void hpwdt_check_nmi_decoding(struct pci_dev *dev) -{ -	/* -	 * If nmi_watchdog is turned off then we can turn on -	 * our nmi decoding capability. -	 */ -	hpwdt_nmi_decoding = 1; -} -#else -static void hpwdt_check_nmi_decoding(struct pci_dev *dev) -{ -	dev_warn(&dev->dev, "NMI decoding is disabled. " -		"Your kernel does not support a NMI Watchdog.\n"); -} -#endif /* CONFIG_X86_LOCAL_APIC */ - -/* - *	dmi_find_icru - * - *	Routine Description: - *	This function checks whether or not we are on an iCRU-based server. - *	This check is independent of architecture and needs to be made for - *	any ProLiant system. - */ -static void dmi_find_icru(const struct dmi_header *dm, void *dummy) -{ -	struct smbios_proliant_info *smbios_proliant_ptr; - -	if (dm->type == SMBIOS_ICRU_INFORMATION) { -		smbios_proliant_ptr = (struct smbios_proliant_info *) dm; -		if (smbios_proliant_ptr->misc_features & 0x01) -			is_icru = 1; -		if (smbios_proliant_ptr->misc_features & 0x1400) -			is_uefi = 1; -	} -}  static int hpwdt_init_nmi_decoding(struct pci_dev *dev)  { +#ifdef CONFIG_HPWDT_NMI_DECODING  	int retval; - -	/* -	 * On typical CRU-based systems we need to map that service in -	 * the BIOS. For 32 bit Operating Systems we need to go through -	 * the 32 Bit BIOS Service Directory. For 64 bit Operating -	 * Systems we get that service through SMBIOS. -	 * -	 * On systems that support the new iCRU service all we need to -	 * do is call dmi_walk to get the supported flag value and skip -	 * the old cru detect code. -	 */ -	dmi_walk(dmi_find_icru, NULL); -	if (!is_icru && !is_uefi) { - -		/* -		* We need to map the ROM to get the CRU service. -		* For 32 bit Operating Systems we need to go through the 32 Bit -		* BIOS Service Directory -		* For 64 bit Operating Systems we get that service through SMBIOS. -		*/ -		retval = detect_cru_service(); -		if (retval < 0) { -			dev_warn(&dev->dev, -				"Unable to detect the %d Bit CRU Service.\n", -				HPWDT_ARCH); -			return retval; -		} - -		/* -		* We know this is the only CRU call we need to make so lets keep as -		* few instructions as possible once the NMI comes in. -		*/ -		cmn_regs.u1.rah = 0x0D; -		cmn_regs.u1.ral = 0x02; -	} -  	/*  	 * Only one function can register for NMI_UNKNOWN  	 */ @@ -780,45 +316,26 @@ error:  	dev_warn(&dev->dev,  		"Unable to register a die notifier (err=%d).\n",  		retval); -	if (cru_rom_addr) -		iounmap(cru_rom_addr);  	return retval; +#endif	/* CONFIG_HPWDT_NMI_DECODING */ +	return 0;  }  static void hpwdt_exit_nmi_decoding(void)  { +#ifdef CONFIG_HPWDT_NMI_DECODING  	unregister_nmi_handler(NMI_UNKNOWN, "hpwdt");  	unregister_nmi_handler(NMI_SERR, "hpwdt");  	unregister_nmi_handler(NMI_IO_CHECK, "hpwdt"); -	if (cru_rom_addr) -		iounmap(cru_rom_addr); -} -#else /* !CONFIG_HPWDT_NMI_DECODING */ -static void hpwdt_check_nmi_decoding(struct pci_dev *dev) -{ -} - -static int hpwdt_init_nmi_decoding(struct pci_dev *dev) -{ -	return 0; +#endif  } -static void hpwdt_exit_nmi_decoding(void) -{ -} -#endif /* CONFIG_HPWDT_NMI_DECODING */ -  static int hpwdt_init_one(struct pci_dev *dev,  					const struct pci_device_id *ent)  {  	int retval;  	/* -	 * Check if we can do NMI decoding or not -	 */ -	hpwdt_check_nmi_decoding(dev); - -	/*  	 * First let's find out if we are on an iLO2+ server. We will  	 * not run on a legacy ASM box.  	 * So we only support the G5 ProLiant servers and higher. @@ -922,6 +439,6 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="  #ifdef CONFIG_HPWDT_NMI_DECODING  module_param(allow_kdump, int, 0);  MODULE_PARM_DESC(allow_kdump, "Start a kernel dump after NMI occurs"); -#endif /* !CONFIG_HPWDT_NMI_DECODING */ +#endif /* CONFIG_HPWDT_NMI_DECODING */  module_pci_driver(hpwdt_driver); diff --git a/drivers/watchdog/sbsa_gwdt.c b/drivers/watchdog/sbsa_gwdt.c index 316c2eb122d2..e8bd9887c566 100644 --- a/drivers/watchdog/sbsa_gwdt.c +++ b/drivers/watchdog/sbsa_gwdt.c @@ -50,6 +50,7 @@   */  #include <linux/io.h> +#include <linux/io-64-nonatomic-lo-hi.h>  #include <linux/interrupt.h>  #include <linux/module.h>  #include <linux/moduleparam.h> @@ -159,7 +160,7 @@ static unsigned int sbsa_gwdt_get_timeleft(struct watchdog_device *wdd)  	    !(readl(gwdt->control_base + SBSA_GWDT_WCS) & SBSA_GWDT_WCS_WS0))  		timeleft += readl(gwdt->control_base + SBSA_GWDT_WOR); -	timeleft += readq(gwdt->control_base + SBSA_GWDT_WCV) - +	timeleft += lo_hi_readq(gwdt->control_base + SBSA_GWDT_WCV) -  		    arch_counter_get_cntvct();  	do_div(timeleft, gwdt->clk); diff --git a/drivers/watchdog/wdat_wdt.c b/drivers/watchdog/wdat_wdt.c index 6d1fbda0f461..0da9943d405f 100644 --- a/drivers/watchdog/wdat_wdt.c +++ b/drivers/watchdog/wdat_wdt.c @@ -392,7 +392,7 @@ static int wdat_wdt_probe(struct platform_device *pdev)  		memset(&r, 0, sizeof(r));  		r.start = gas->address; -		r.end = r.start + gas->access_width; +		r.end = r.start + gas->access_width - 1;  		if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {  			r.flags = IORESOURCE_MEM;  		} else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) { diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index 1ab4bd11f5f3..762378f1811c 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c @@ -755,8 +755,8 @@ out:  	mutex_unlock(&irq_mapping_update_lock);  	return irq;  error_irq: -	for (; i >= 0; i--) -		__unbind_from_irq(irq + i); +	while (nvec--) +		__unbind_from_irq(irq + nvec);  	mutex_unlock(&irq_mapping_update_lock);  	return ret;  } diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c index 156e5aea36db..b1092fbefa63 100644 --- a/drivers/xen/pvcalls-back.c +++ b/drivers/xen/pvcalls-back.c @@ -416,7 +416,7 @@ static int pvcalls_back_connect(struct xenbus_device *dev,  					sock);  	if (!map) {  		ret = -EFAULT; -		sock_release(map->sock); +		sock_release(sock);  	}  out: diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c index 753d9cb437d0..2f11ca72a281 100644 --- a/drivers/xen/pvcalls-front.c +++ b/drivers/xen/pvcalls-front.c @@ -60,6 +60,7 @@ struct sock_mapping {  	bool active_socket;  	struct list_head list;  	struct socket *sock; +	atomic_t refcount;  	union {  		struct {  			int irq; @@ -72,20 +73,25 @@ struct sock_mapping {  			wait_queue_head_t inflight_conn_req;  		} active;  		struct { -		/* Socket status */ +		/* +		 * Socket status, needs to be 64-bit aligned due to the +		 * test_and_* functions which have this requirement on arm64. +		 */  #define PVCALLS_STATUS_UNINITALIZED  0  #define PVCALLS_STATUS_BIND          1  #define PVCALLS_STATUS_LISTEN        2 -			uint8_t status; +			uint8_t status __attribute__((aligned(8)));  		/*  		 * Internal state-machine flags.  		 * Only one accept operation can be inflight for a socket.  		 * Only one poll operation can be inflight for a given socket. +		 * flags needs to be 64-bit aligned due to the test_and_* +		 * functions which have this requirement on arm64.  		 */  #define PVCALLS_FLAG_ACCEPT_INFLIGHT 0  #define PVCALLS_FLAG_POLL_INFLIGHT   1  #define PVCALLS_FLAG_POLL_RET        2 -			uint8_t flags; +			uint8_t flags __attribute__((aligned(8)));  			uint32_t inflight_req_id;  			struct sock_mapping *accept_map;  			wait_queue_head_t inflight_accept_req; @@ -93,6 +99,32 @@ struct sock_mapping {  	};  }; +static inline struct sock_mapping *pvcalls_enter_sock(struct socket *sock) +{ +	struct sock_mapping *map; + +	if (!pvcalls_front_dev || +		dev_get_drvdata(&pvcalls_front_dev->dev) == NULL) +		return ERR_PTR(-ENOTCONN); + +	map = (struct sock_mapping *)sock->sk->sk_send_head; +	if (map == NULL) +		return ERR_PTR(-ENOTSOCK); + +	pvcalls_enter(); +	atomic_inc(&map->refcount); +	return map; +} + +static inline void pvcalls_exit_sock(struct socket *sock) +{ +	struct sock_mapping *map; + +	map = (struct sock_mapping *)sock->sk->sk_send_head; +	atomic_dec(&map->refcount); +	pvcalls_exit(); +} +  static inline int get_request(struct pvcalls_bedata *bedata, int *req_id)  {  	*req_id = bedata->ring.req_prod_pvt & (RING_SIZE(&bedata->ring) - 1); @@ -369,31 +401,23 @@ int pvcalls_front_connect(struct socket *sock, struct sockaddr *addr,  	if (addr->sa_family != AF_INET || sock->type != SOCK_STREAM)  		return -EOPNOTSUPP; -	pvcalls_enter(); -	if (!pvcalls_front_dev) { -		pvcalls_exit(); -		return -ENOTCONN; -	} +	map = pvcalls_enter_sock(sock); +	if (IS_ERR(map)) +		return PTR_ERR(map);  	bedata = dev_get_drvdata(&pvcalls_front_dev->dev); -	map = (struct sock_mapping *)sock->sk->sk_send_head; -	if (!map) { -		pvcalls_exit(); -		return -ENOTSOCK; -	} -  	spin_lock(&bedata->socket_lock);  	ret = get_request(bedata, &req_id);  	if (ret < 0) {  		spin_unlock(&bedata->socket_lock); -		pvcalls_exit(); +		pvcalls_exit_sock(sock);  		return ret;  	}  	ret = create_active(map, &evtchn);  	if (ret < 0) {  		spin_unlock(&bedata->socket_lock); -		pvcalls_exit(); +		pvcalls_exit_sock(sock);  		return ret;  	} @@ -423,7 +447,7 @@ int pvcalls_front_connect(struct socket *sock, struct sockaddr *addr,  	smp_rmb();  	ret = bedata->rsp[req_id].ret;  	bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID; -	pvcalls_exit(); +	pvcalls_exit_sock(sock);  	return ret;  } @@ -488,23 +512,15 @@ int pvcalls_front_sendmsg(struct socket *sock, struct msghdr *msg,  	if (flags & (MSG_CONFIRM|MSG_DONTROUTE|MSG_EOR|MSG_OOB))  		return -EOPNOTSUPP; -	pvcalls_enter(); -	if (!pvcalls_front_dev) { -		pvcalls_exit(); -		return -ENOTCONN; -	} +	map = pvcalls_enter_sock(sock); +	if (IS_ERR(map)) +		return PTR_ERR(map);  	bedata = dev_get_drvdata(&pvcalls_front_dev->dev); -	map = (struct sock_mapping *) sock->sk->sk_send_head; -	if (!map) { -		pvcalls_exit(); -		return -ENOTSOCK; -	} -  	mutex_lock(&map->active.out_mutex);  	if ((flags & MSG_DONTWAIT) && !pvcalls_front_write_todo(map)) {  		mutex_unlock(&map->active.out_mutex); -		pvcalls_exit(); +		pvcalls_exit_sock(sock);  		return -EAGAIN;  	}  	if (len > INT_MAX) @@ -526,7 +542,7 @@ again:  		tot_sent = sent;  	mutex_unlock(&map->active.out_mutex); -	pvcalls_exit(); +	pvcalls_exit_sock(sock);  	return tot_sent;  } @@ -591,19 +607,11 @@ int pvcalls_front_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,  	if (flags & (MSG_CMSG_CLOEXEC|MSG_ERRQUEUE|MSG_OOB|MSG_TRUNC))  		return -EOPNOTSUPP; -	pvcalls_enter(); -	if (!pvcalls_front_dev) { -		pvcalls_exit(); -		return -ENOTCONN; -	} +	map = pvcalls_enter_sock(sock); +	if (IS_ERR(map)) +		return PTR_ERR(map);  	bedata = dev_get_drvdata(&pvcalls_front_dev->dev); -	map = (struct sock_mapping *) sock->sk->sk_send_head; -	if (!map) { -		pvcalls_exit(); -		return -ENOTSOCK; -	} -  	mutex_lock(&map->active.in_mutex);  	if (len > XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER))  		len = XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER); @@ -623,7 +631,7 @@ int pvcalls_front_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,  		ret = 0;  	mutex_unlock(&map->active.in_mutex); -	pvcalls_exit(); +	pvcalls_exit_sock(sock);  	return ret;  } @@ -637,24 +645,16 @@ int pvcalls_front_bind(struct socket *sock, struct sockaddr *addr, int addr_len)  	if (addr->sa_family != AF_INET || sock->type != SOCK_STREAM)  		return -EOPNOTSUPP; -	pvcalls_enter(); -	if (!pvcalls_front_dev) { -		pvcalls_exit(); -		return -ENOTCONN; -	} +	map = pvcalls_enter_sock(sock); +	if (IS_ERR(map)) +		return PTR_ERR(map);  	bedata = dev_get_drvdata(&pvcalls_front_dev->dev); -	map = (struct sock_mapping *) sock->sk->sk_send_head; -	if (map == NULL) { -		pvcalls_exit(); -		return -ENOTSOCK; -	} -  	spin_lock(&bedata->socket_lock);  	ret = get_request(bedata, &req_id);  	if (ret < 0) {  		spin_unlock(&bedata->socket_lock); -		pvcalls_exit(); +		pvcalls_exit_sock(sock);  		return ret;  	}  	req = RING_GET_REQUEST(&bedata->ring, req_id); @@ -684,7 +684,7 @@ int pvcalls_front_bind(struct socket *sock, struct sockaddr *addr, int addr_len)  	bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID;  	map->passive.status = PVCALLS_STATUS_BIND; -	pvcalls_exit(); +	pvcalls_exit_sock(sock);  	return 0;  } @@ -695,21 +695,13 @@ int pvcalls_front_listen(struct socket *sock, int backlog)  	struct xen_pvcalls_request *req;  	int notify, req_id, ret; -	pvcalls_enter(); -	if (!pvcalls_front_dev) { -		pvcalls_exit(); -		return -ENOTCONN; -	} +	map = pvcalls_enter_sock(sock); +	if (IS_ERR(map)) +		return PTR_ERR(map);  	bedata = dev_get_drvdata(&pvcalls_front_dev->dev); -	map = (struct sock_mapping *) sock->sk->sk_send_head; -	if (!map) { -		pvcalls_exit(); -		return -ENOTSOCK; -	} -  	if (map->passive.status != PVCALLS_STATUS_BIND) { -		pvcalls_exit(); +		pvcalls_exit_sock(sock);  		return -EOPNOTSUPP;  	} @@ -717,7 +709,7 @@ int pvcalls_front_listen(struct socket *sock, int backlog)  	ret = get_request(bedata, &req_id);  	if (ret < 0) {  		spin_unlock(&bedata->socket_lock); -		pvcalls_exit(); +		pvcalls_exit_sock(sock);  		return ret;  	}  	req = RING_GET_REQUEST(&bedata->ring, req_id); @@ -741,7 +733,7 @@ int pvcalls_front_listen(struct socket *sock, int backlog)  	bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID;  	map->passive.status = PVCALLS_STATUS_LISTEN; -	pvcalls_exit(); +	pvcalls_exit_sock(sock);  	return ret;  } @@ -753,21 +745,13 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)  	struct xen_pvcalls_request *req;  	int notify, req_id, ret, evtchn, nonblock; -	pvcalls_enter(); -	if (!pvcalls_front_dev) { -		pvcalls_exit(); -		return -ENOTCONN; -	} +	map = pvcalls_enter_sock(sock); +	if (IS_ERR(map)) +		return PTR_ERR(map);  	bedata = dev_get_drvdata(&pvcalls_front_dev->dev); -	map = (struct sock_mapping *) sock->sk->sk_send_head; -	if (!map) { -		pvcalls_exit(); -		return -ENOTSOCK; -	} -  	if (map->passive.status != PVCALLS_STATUS_LISTEN) { -		pvcalls_exit(); +		pvcalls_exit_sock(sock);  		return -EINVAL;  	} @@ -785,13 +769,13 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)  			goto received;  		}  		if (nonblock) { -			pvcalls_exit(); +			pvcalls_exit_sock(sock);  			return -EAGAIN;  		}  		if (wait_event_interruptible(map->passive.inflight_accept_req,  			!test_and_set_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,  					  (void *)&map->passive.flags))) { -			pvcalls_exit(); +			pvcalls_exit_sock(sock);  			return -EINTR;  		}  	} @@ -802,7 +786,7 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)  		clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,  			  (void *)&map->passive.flags);  		spin_unlock(&bedata->socket_lock); -		pvcalls_exit(); +		pvcalls_exit_sock(sock);  		return ret;  	}  	map2 = kzalloc(sizeof(*map2), GFP_ATOMIC); @@ -810,7 +794,7 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)  		clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,  			  (void *)&map->passive.flags);  		spin_unlock(&bedata->socket_lock); -		pvcalls_exit(); +		pvcalls_exit_sock(sock);  		return -ENOMEM;  	}  	ret = create_active(map2, &evtchn); @@ -819,7 +803,7 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)  		clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,  			  (void *)&map->passive.flags);  		spin_unlock(&bedata->socket_lock); -		pvcalls_exit(); +		pvcalls_exit_sock(sock);  		return ret;  	}  	list_add_tail(&map2->list, &bedata->socket_mappings); @@ -841,13 +825,13 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)  	/* We could check if we have received a response before returning. */  	if (nonblock) {  		WRITE_ONCE(map->passive.inflight_req_id, req_id); -		pvcalls_exit(); +		pvcalls_exit_sock(sock);  		return -EAGAIN;  	}  	if (wait_event_interruptible(bedata->inflight_req,  		READ_ONCE(bedata->rsp[req_id].req_id) == req_id)) { -		pvcalls_exit(); +		pvcalls_exit_sock(sock);  		return -EINTR;  	}  	/* read req_id, then the content */ @@ -862,7 +846,7 @@ received:  		clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,  			  (void *)&map->passive.flags);  		pvcalls_front_free_map(bedata, map2); -		pvcalls_exit(); +		pvcalls_exit_sock(sock);  		return -ENOMEM;  	}  	newsock->sk->sk_send_head = (void *)map2; @@ -874,7 +858,7 @@ received:  	clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, (void *)&map->passive.flags);  	wake_up(&map->passive.inflight_accept_req); -	pvcalls_exit(); +	pvcalls_exit_sock(sock);  	return ret;  } @@ -965,23 +949,16 @@ __poll_t pvcalls_front_poll(struct file *file, struct socket *sock,  	struct sock_mapping *map;  	__poll_t ret; -	pvcalls_enter(); -	if (!pvcalls_front_dev) { -		pvcalls_exit(); +	map = pvcalls_enter_sock(sock); +	if (IS_ERR(map))  		return EPOLLNVAL; -	}  	bedata = dev_get_drvdata(&pvcalls_front_dev->dev); -	map = (struct sock_mapping *) sock->sk->sk_send_head; -	if (!map) { -		pvcalls_exit(); -		return EPOLLNVAL; -	}  	if (map->active_socket)  		ret = pvcalls_front_poll_active(file, bedata, map, wait);  	else  		ret = pvcalls_front_poll_passive(file, bedata, map, wait); -	pvcalls_exit(); +	pvcalls_exit_sock(sock);  	return ret;  } @@ -995,25 +972,20 @@ int pvcalls_front_release(struct socket *sock)  	if (sock->sk == NULL)  		return 0; -	pvcalls_enter(); -	if (!pvcalls_front_dev) { -		pvcalls_exit(); -		return -EIO; +	map = pvcalls_enter_sock(sock); +	if (IS_ERR(map)) { +		if (PTR_ERR(map) == -ENOTCONN) +			return -EIO; +		else +			return 0;  	} -  	bedata = dev_get_drvdata(&pvcalls_front_dev->dev); -	map = (struct sock_mapping *) sock->sk->sk_send_head; -	if (map == NULL) { -		pvcalls_exit(); -		return 0; -	} -  	spin_lock(&bedata->socket_lock);  	ret = get_request(bedata, &req_id);  	if (ret < 0) {  		spin_unlock(&bedata->socket_lock); -		pvcalls_exit(); +		pvcalls_exit_sock(sock);  		return ret;  	}  	sock->sk->sk_send_head = NULL; @@ -1043,14 +1015,20 @@ int pvcalls_front_release(struct socket *sock)  		/*  		 * We need to make sure that sendmsg/recvmsg on this socket have  		 * not started before we've cleared sk_send_head here. The -		 * easiest (though not optimal) way to guarantee this is to see -		 * that no pvcall (other than us) is in progress. +		 * easiest way to guarantee this is to see that no pvcalls +		 * (other than us) is in progress on this socket.  		 */ -		while (atomic_read(&pvcalls_refcount) > 1) +		while (atomic_read(&map->refcount) > 1)  			cpu_relax();  		pvcalls_front_free_map(bedata, map);  	} else { +		wake_up(&bedata->inflight_req); +		wake_up(&map->passive.inflight_accept_req); + +		while (atomic_read(&map->refcount) > 1) +			cpu_relax(); +  		spin_lock(&bedata->socket_lock);  		list_del(&map->list);  		spin_unlock(&bedata->socket_lock); diff --git a/drivers/xen/tmem.c b/drivers/xen/tmem.c index bf13d1ec51f3..04e7b3b29bac 100644 --- a/drivers/xen/tmem.c +++ b/drivers/xen/tmem.c @@ -284,6 +284,10 @@ static int tmem_frontswap_store(unsigned type, pgoff_t offset,  	int pool = tmem_frontswap_poolid;  	int ret; +	/* THP isn't supported */ +	if (PageTransHuge(page)) +		return -1; +  	if (pool < 0)  		return -1;  	if (ind64 != ind) diff --git a/drivers/xen/xenbus/xenbus.h b/drivers/xen/xenbus/xenbus.h index 149c5e7efc89..092981171df1 100644 --- a/drivers/xen/xenbus/xenbus.h +++ b/drivers/xen/xenbus/xenbus.h @@ -76,6 +76,7 @@ struct xb_req_data {  	struct list_head list;  	wait_queue_head_t wq;  	struct xsd_sockmsg msg; +	uint32_t caller_req_id;  	enum xsd_sockmsg_type type;  	char *body;  	const struct kvec *vec; diff --git a/drivers/xen/xenbus/xenbus_comms.c b/drivers/xen/xenbus/xenbus_comms.c index 5b081a01779d..d239fc3c5e3d 100644 --- a/drivers/xen/xenbus/xenbus_comms.c +++ b/drivers/xen/xenbus/xenbus_comms.c @@ -309,6 +309,7 @@ static int process_msg(void)  			goto out;  		if (req->state == xb_req_state_wait_reply) { +			req->msg.req_id = req->caller_req_id;  			req->msg.type = state.msg.type;  			req->msg.len = state.msg.len;  			req->body = state.body; diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c index 74888cacd0b0..ec9eb4fba59c 100644 --- a/drivers/xen/xenbus/xenbus_probe.c +++ b/drivers/xen/xenbus/xenbus_probe.c @@ -466,8 +466,11 @@ int xenbus_probe_node(struct xen_bus_type *bus,  	/* Register with generic device framework. */  	err = device_register(&xendev->dev); -	if (err) +	if (err) { +		put_device(&xendev->dev); +		xendev = NULL;  		goto fail; +	}  	return 0;  fail: diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c index 3e59590c7254..3f3b29398ab8 100644 --- a/drivers/xen/xenbus/xenbus_xs.c +++ b/drivers/xen/xenbus/xenbus_xs.c @@ -227,6 +227,8 @@ static void xs_send(struct xb_req_data *req, struct xsd_sockmsg *msg)  	req->state = xb_req_state_queued;  	init_waitqueue_head(&req->wq); +	/* Save the caller req_id and restore it later in the reply */ +	req->caller_req_id = req->msg.req_id;  	req->msg.req_id = xs_request_enter(req);  	mutex_lock(&xb_write_mutex); @@ -310,6 +312,7 @@ static void *xs_talkv(struct xenbus_transaction t,  	req->num_vecs = num_vecs;  	req->cb = xs_wake_up; +	msg.req_id = 0;  	msg.tx_id = t.id;  	msg.type = type;  	msg.len = 0;  | 

