diff options
Diffstat (limited to 'drivers/pci')
42 files changed, 3011 insertions, 1314 deletions
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig index e1ca42591ac4..2a4501dd2515 100644 --- a/drivers/pci/Kconfig +++ b/drivers/pci/Kconfig @@ -42,6 +42,15 @@ config PCI_DEBUG When in doubt, say N. +config PCI_STUB + tristate "PCI Stub driver" + depends on PCI + help + Say Y or M here if you want be able to reserve a PCI device + when it is going to be assigned to a guest operating system. + + When in doubt, say N. + config HT_IRQ bool "Interrupts on hypertransport devices" default y diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile index af3bfe22847b..3d07ce24f6a8 100644 --- a/drivers/pci/Makefile +++ b/drivers/pci/Makefile @@ -53,6 +53,8 @@ obj-$(CONFIG_HOTPLUG) += setup-bus.o obj-$(CONFIG_PCI_SYSCALL) += syscall.o +obj-$(CONFIG_PCI_STUB) += pci-stub.o + ifeq ($(CONFIG_PCI_DEBUG),y) EXTRA_CFLAGS += -DDEBUG endif diff --git a/drivers/pci/access.c b/drivers/pci/access.c index 39bb96b413ef..381444794778 100644 --- a/drivers/pci/access.c +++ b/drivers/pci/access.c @@ -66,6 +66,39 @@ EXPORT_SYMBOL(pci_bus_write_config_byte); EXPORT_SYMBOL(pci_bus_write_config_word); EXPORT_SYMBOL(pci_bus_write_config_dword); + +/** + * pci_read_vpd - Read one entry from Vital Product Data + * @dev: pci device struct + * @pos: offset in vpd space + * @count: number of bytes to read + * @buf: pointer to where to store result + * + */ +ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf) +{ + if (!dev->vpd || !dev->vpd->ops) + return -ENODEV; + return dev->vpd->ops->read(dev, pos, count, buf); +} +EXPORT_SYMBOL(pci_read_vpd); + +/** + * pci_write_vpd - Write entry to Vital Product Data + * @dev: pci device struct + * @pos: offset in vpd space + * @count: number of bytes to read + * @val: value to write + * + */ +ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf) +{ + if (!dev->vpd || !dev->vpd->ops) + return -ENODEV; + return dev->vpd->ops->write(dev, pos, count, buf); +} +EXPORT_SYMBOL(pci_write_vpd); + /* * The following routines are to prevent the user from accessing PCI config * space when it's unsafe to do so. Some devices require this during BIST and @@ -133,125 +166,145 @@ PCI_USER_WRITE_CONFIG(dword, u32) struct pci_vpd_pci22 { struct pci_vpd base; - spinlock_t lock; /* controls access to hardware and the flags */ - u8 cap; + struct mutex lock; + u16 flag; bool busy; - bool flag; /* value of F bit to wait for */ + u8 cap; }; -/* Wait for last operation to complete */ +/* + * Wait for last operation to complete. + * This code has to spin since there is no other notification from the PCI + * hardware. Since the VPD is often implemented by serial attachment to an + * EEPROM, it may take many milliseconds to complete. + */ static int pci_vpd_pci22_wait(struct pci_dev *dev) { struct pci_vpd_pci22 *vpd = container_of(dev->vpd, struct pci_vpd_pci22, base); - u16 flag, status; - int wait; + unsigned long timeout = jiffies + HZ/20 + 2; + u16 status; int ret; if (!vpd->busy) return 0; - flag = vpd->flag ? PCI_VPD_ADDR_F : 0; - wait = vpd->flag ? 10 : 1000; /* read: 100 us; write: 10 ms */ for (;;) { - ret = pci_user_read_config_word(dev, - vpd->cap + PCI_VPD_ADDR, + ret = pci_user_read_config_word(dev, vpd->cap + PCI_VPD_ADDR, &status); - if (ret < 0) + if (ret) return ret; - if ((status & PCI_VPD_ADDR_F) == flag) { + + if ((status & PCI_VPD_ADDR_F) == vpd->flag) { vpd->busy = false; return 0; } - if (wait-- == 0) + + if (time_after(jiffies, timeout)) return -ETIMEDOUT; - udelay(10); + if (fatal_signal_pending(current)) + return -EINTR; + if (!cond_resched()) + udelay(10); } } -static int pci_vpd_pci22_read(struct pci_dev *dev, int pos, int size, - char *buf) +static ssize_t pci_vpd_pci22_read(struct pci_dev *dev, loff_t pos, size_t count, + void *arg) { struct pci_vpd_pci22 *vpd = container_of(dev->vpd, struct pci_vpd_pci22, base); - u32 val; int ret; - int begin, end, i; + loff_t end = pos + count; + u8 *buf = arg; - if (pos < 0 || pos > vpd->base.len || size > vpd->base.len - pos) + if (pos < 0 || pos > vpd->base.len || end > vpd->base.len) return -EINVAL; - if (size == 0) - return 0; - spin_lock_irq(&vpd->lock); - ret = pci_vpd_pci22_wait(dev); - if (ret < 0) - goto out; - ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR, - pos & ~3); - if (ret < 0) - goto out; - vpd->busy = true; - vpd->flag = 1; + if (mutex_lock_killable(&vpd->lock)) + return -EINTR; + ret = pci_vpd_pci22_wait(dev); if (ret < 0) goto out; - ret = pci_user_read_config_dword(dev, vpd->cap + PCI_VPD_DATA, - &val); -out: - spin_unlock_irq(&vpd->lock); - if (ret < 0) - return ret; - - /* Convert to bytes */ - begin = pos & 3; - end = min(4, begin + size); - for (i = 0; i < end; ++i) { - if (i >= begin) - *buf++ = val; - val >>= 8; + + while (pos < end) { + u32 val; + unsigned int i, skip; + + ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR, + pos & ~3); + if (ret < 0) + break; + vpd->busy = true; + vpd->flag = PCI_VPD_ADDR_F; + ret = pci_vpd_pci22_wait(dev); + if (ret < 0) + break; + + ret = pci_user_read_config_dword(dev, vpd->cap + PCI_VPD_DATA, &val); + if (ret < 0) + break; + + skip = pos & 3; + for (i = 0; i < sizeof(u32); i++) { + if (i >= skip) { + *buf++ = val; + if (++pos == end) + break; + } + val >>= 8; + } } - return end - begin; +out: + mutex_unlock(&vpd->lock); + return ret ? ret : count; } -static int pci_vpd_pci22_write(struct pci_dev *dev, int pos, int size, - const char *buf) +static ssize_t pci_vpd_pci22_write(struct pci_dev *dev, loff_t pos, size_t count, + const void *arg) { struct pci_vpd_pci22 *vpd = container_of(dev->vpd, struct pci_vpd_pci22, base); - u32 val; - int ret; + const u8 *buf = arg; + loff_t end = pos + count; + int ret = 0; - if (pos < 0 || pos > vpd->base.len || pos & 3 || - size > vpd->base.len - pos || size < 4) + if (pos < 0 || (pos & 3) || (count & 3) || end > vpd->base.len) return -EINVAL; - val = (u8) *buf++; - val |= ((u8) *buf++) << 8; - val |= ((u8) *buf++) << 16; - val |= ((u32)(u8) *buf++) << 24; + if (mutex_lock_killable(&vpd->lock)) + return -EINTR; - spin_lock_irq(&vpd->lock); ret = pci_vpd_pci22_wait(dev); if (ret < 0) goto out; - ret = pci_user_write_config_dword(dev, vpd->cap + PCI_VPD_DATA, - val); - if (ret < 0) - goto out; - ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR, - pos | PCI_VPD_ADDR_F); - if (ret < 0) - goto out; - vpd->busy = true; - vpd->flag = 0; - ret = pci_vpd_pci22_wait(dev); -out: - spin_unlock_irq(&vpd->lock); - if (ret < 0) - return ret; - return 4; + while (pos < end) { + u32 val; + + val = *buf++; + val |= *buf++ << 8; + val |= *buf++ << 16; + val |= *buf++ << 24; + + ret = pci_user_write_config_dword(dev, vpd->cap + PCI_VPD_DATA, val); + if (ret < 0) + break; + ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR, + pos | PCI_VPD_ADDR_F); + if (ret < 0) + break; + + vpd->busy = true; + vpd->flag = 0; + ret = pci_vpd_pci22_wait(dev); + + pos += sizeof(u32); + } +out: + mutex_unlock(&vpd->lock); + return ret ? ret : count; } static void pci_vpd_pci22_release(struct pci_dev *dev) @@ -259,7 +312,7 @@ static void pci_vpd_pci22_release(struct pci_dev *dev) kfree(container_of(dev->vpd, struct pci_vpd_pci22, base)); } -static struct pci_vpd_ops pci_vpd_pci22_ops = { +static const struct pci_vpd_ops pci_vpd_pci22_ops = { .read = pci_vpd_pci22_read, .write = pci_vpd_pci22_write, .release = pci_vpd_pci22_release, @@ -279,7 +332,7 @@ int pci_vpd_pci22_init(struct pci_dev *dev) vpd->base.len = PCI_VPD_PCI22_SIZE; vpd->base.ops = &pci_vpd_pci22_ops; - spin_lock_init(&vpd->lock); + mutex_init(&vpd->lock); vpd->cap = cap; vpd->busy = false; dev->vpd = &vpd->base; @@ -287,6 +340,29 @@ int pci_vpd_pci22_init(struct pci_dev *dev) } /** + * pci_vpd_truncate - Set available Vital Product Data size + * @dev: pci device struct + * @size: available memory in bytes + * + * Adjust size of available VPD area. + */ +int pci_vpd_truncate(struct pci_dev *dev, size_t size) +{ + if (!dev->vpd) + return -EINVAL; + + /* limited by the access method */ + if (size > dev->vpd->len) + return -EINVAL; + + dev->vpd->len = size; + dev->vpd->attr->size = size; + + return 0; +} +EXPORT_SYMBOL(pci_vpd_truncate); + +/** * pci_block_user_cfg_access - Block userspace PCI config reads/writes * @dev: pci device struct * diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c index 999cc4088b59..52b54f053be0 100644 --- a/drivers/pci/bus.c +++ b/drivers/pci/bus.c @@ -71,7 +71,7 @@ pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res, } /** - * add a single device + * pci_bus_add_device - add a single device * @dev: device to add * * This adds a single pci device to the global @@ -91,6 +91,37 @@ int pci_bus_add_device(struct pci_dev *dev) } /** + * pci_bus_add_child - add a child bus + * @bus: bus to add + * + * This adds sysfs entries for a single bus + */ +int pci_bus_add_child(struct pci_bus *bus) +{ + int retval; + + if (bus->bridge) + bus->dev.parent = bus->bridge; + + retval = device_register(&bus->dev); + if (retval) + return retval; + + bus->is_added = 1; + + retval = device_create_file(&bus->dev, &dev_attr_cpuaffinity); + if (retval) + return retval; + + retval = device_create_file(&bus->dev, &dev_attr_cpulistaffinity); + + /* Create legacy_io and legacy_mem files for this bus */ + pci_create_legacy_files(bus); + + return retval; +} + +/** * pci_bus_add_devices - insert newly discovered PCI devices * @bus: bus to check for new devices * @@ -105,7 +136,7 @@ int pci_bus_add_device(struct pci_dev *dev) void pci_bus_add_devices(struct pci_bus *bus) { struct pci_dev *dev; - struct pci_bus *child_bus; + struct pci_bus *child; int retval; list_for_each_entry(dev, &bus->devices, bus_list) { @@ -120,45 +151,29 @@ void pci_bus_add_devices(struct pci_bus *bus) list_for_each_entry(dev, &bus->devices, bus_list) { BUG_ON(!dev->is_added); + child = dev->subordinate; /* * If there is an unattached subordinate bus, attach * it and then scan for unattached PCI devices. */ - if (dev->subordinate) { - if (list_empty(&dev->subordinate->node)) { - down_write(&pci_bus_sem); - list_add_tail(&dev->subordinate->node, - &dev->bus->children); - up_write(&pci_bus_sem); - } - pci_bus_add_devices(dev->subordinate); - - /* register the bus with sysfs as the parent is now - * properly registered. */ - child_bus = dev->subordinate; - if (child_bus->is_added) - continue; - child_bus->dev.parent = child_bus->bridge; - retval = device_register(&child_bus->dev); - if (retval) - dev_err(&dev->dev, "Error registering pci_bus," - " continuing...\n"); - else { - child_bus->is_added = 1; - retval = device_create_file(&child_bus->dev, - &dev_attr_cpuaffinity); - } - if (retval) - dev_err(&dev->dev, "Error creating cpuaffinity" - " file, continuing...\n"); - - retval = device_create_file(&child_bus->dev, - &dev_attr_cpulistaffinity); - if (retval) - dev_err(&dev->dev, - "Error creating cpulistaffinity" - " file, continuing...\n"); + if (!child) + continue; + if (list_empty(&child->node)) { + down_write(&pci_bus_sem); + list_add_tail(&child->node, &dev->bus->children); + up_write(&pci_bus_sem); } + pci_bus_add_devices(child); + + /* + * register the bus with sysfs as the parent is now + * properly registered. + */ + if (child->is_added) + continue; + retval = pci_bus_add_child(child); + if (retval) + dev_err(&dev->dev, "Error adding bus, continuing\n"); } } diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c index 691b3adeb870..f5a662a50acb 100644 --- a/drivers/pci/dmar.c +++ b/drivers/pci/dmar.c @@ -191,26 +191,17 @@ dmar_parse_one_drhd(struct acpi_dmar_header *header) static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru) { struct acpi_dmar_hardware_unit *drhd; - static int include_all; int ret = 0; drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr; - if (!dmaru->include_all) - ret = dmar_parse_dev_scope((void *)(drhd + 1), + if (dmaru->include_all) + return 0; + + ret = dmar_parse_dev_scope((void *)(drhd + 1), ((void *)drhd) + drhd->header.length, &dmaru->devices_cnt, &dmaru->devices, drhd->segment); - else { - /* Only allow one INCLUDE_ALL */ - if (include_all) { - printk(KERN_WARNING PREFIX "Only one INCLUDE_ALL " - "device scope is allowed\n"); - ret = -EINVAL; - } - include_all = 1; - } - if (ret) { list_del(&dmaru->list); kfree(dmaru); @@ -384,12 +375,21 @@ int dmar_pci_device_match(struct pci_dev *devices[], int cnt, struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev) { - struct dmar_drhd_unit *drhd = NULL; + struct dmar_drhd_unit *dmaru = NULL; + struct acpi_dmar_hardware_unit *drhd; - list_for_each_entry(drhd, &dmar_drhd_units, list) { - if (drhd->include_all || dmar_pci_device_match(drhd->devices, - drhd->devices_cnt, dev)) - return drhd; + list_for_each_entry(dmaru, &dmar_drhd_units, list) { + drhd = container_of(dmaru->hdr, + struct acpi_dmar_hardware_unit, + header); + + if (dmaru->include_all && + drhd->segment == pci_domain_nr(dev->bus)) + return dmaru; + + if (dmar_pci_device_match(dmaru->devices, + dmaru->devices_cnt, dev)) + return dmaru; } return NULL; @@ -491,6 +491,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) int map_size; u32 ver; static int iommu_allocated = 0; + int agaw; iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); if (!iommu) @@ -506,6 +507,15 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG); iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG); + agaw = iommu_calculate_agaw(iommu); + if (agaw < 0) { + printk(KERN_ERR + "Cannot get a valid agaw for iommu (seq_id = %d)\n", + iommu->seq_id); + goto error; + } + iommu->agaw = agaw; + /* the registers might be more than one page */ map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap), cap_max_fault_reg_offset(iommu->cap)); diff --git a/drivers/pci/hotplug/Makefile b/drivers/pci/hotplug/Makefile index 9bdbe1a6688f..e31fb91652ce 100644 --- a/drivers/pci/hotplug/Makefile +++ b/drivers/pci/hotplug/Makefile @@ -55,6 +55,9 @@ pciehp-objs := pciehp_core.o \ pciehp_ctrl.o \ pciehp_pci.o \ pciehp_hpc.o +ifdef CONFIG_ACPI +pciehp-objs += pciehp_acpi.o +endif shpchp-objs := shpchp_core.o \ shpchp_ctrl.o \ diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c index 2c981cbb0719..1c1141801060 100644 --- a/drivers/pci/hotplug/acpi_pcihp.c +++ b/drivers/pci/hotplug/acpi_pcihp.c @@ -500,5 +500,74 @@ int acpi_root_bridge(acpi_handle handle) } EXPORT_SYMBOL_GPL(acpi_root_bridge); + +static int is_ejectable(acpi_handle handle) +{ + acpi_status status; + acpi_handle tmp; + unsigned long long removable; + status = acpi_get_handle(handle, "_ADR", &tmp); + if (ACPI_FAILURE(status)) + return 0; + status = acpi_get_handle(handle, "_EJ0", &tmp); + if (ACPI_SUCCESS(status)) + return 1; + status = acpi_evaluate_integer(handle, "_RMV", NULL, &removable); + if (ACPI_SUCCESS(status) && removable) + return 1; + return 0; +} + +/** + * acpi_pcihp_check_ejectable - check if handle is ejectable ACPI PCI slot + * @pbus: the PCI bus of the PCI slot corresponding to 'handle' + * @handle: ACPI handle to check + * + * Return 1 if handle is ejectable PCI slot, 0 otherwise. + */ +int acpi_pci_check_ejectable(struct pci_bus *pbus, acpi_handle handle) +{ + acpi_handle bridge_handle, parent_handle; + + if (!(bridge_handle = acpi_pci_get_bridge_handle(pbus))) + return 0; + if ((ACPI_FAILURE(acpi_get_parent(handle, &parent_handle)))) + return 0; + if (bridge_handle != parent_handle) + return 0; + return is_ejectable(handle); +} +EXPORT_SYMBOL_GPL(acpi_pci_check_ejectable); + +static acpi_status +check_hotplug(acpi_handle handle, u32 lvl, void *context, void **rv) +{ + int *found = (int *)context; + if (is_ejectable(handle)) { + *found = 1; + return AE_CTRL_TERMINATE; + } + return AE_OK; +} + +/** + * acpi_pci_detect_ejectable - check if the PCI bus has ejectable slots + * @pbus - PCI bus to scan + * + * Returns 1 if the PCI bus has ACPI based ejectable slots, 0 otherwise. + */ +int acpi_pci_detect_ejectable(struct pci_bus *pbus) +{ + acpi_handle handle; + int found = 0; + + if (!(handle = acpi_pci_get_bridge_handle(pbus))) + return 0; + acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1, + check_hotplug, (void *)&found, NULL); + return found; +} +EXPORT_SYMBOL_GPL(acpi_pci_detect_ejectable); + module_param(debug_acpi, bool, 0644); MODULE_PARM_DESC(debug_acpi, "Debugging mode for ACPI enabled or not"); diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h index 9bcb6cbd5aa9..4fc168b70095 100644 --- a/drivers/pci/hotplug/acpiphp.h +++ b/drivers/pci/hotplug/acpiphp.h @@ -44,7 +44,7 @@ do { \ if (acpiphp_debug) \ printk(KERN_DEBUG "%s: " format, \ - MY_NAME , ## arg); \ + MY_NAME , ## arg); \ } while (0) #define err(format, arg...) printk(KERN_ERR "%s: " format, MY_NAME , ## arg) #define info(format, arg...) printk(KERN_INFO "%s: " format, MY_NAME , ## arg) diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index 3affc6472e65..f09b1010d477 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c @@ -46,6 +46,7 @@ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/pci_hotplug.h> +#include <linux/pci-acpi.h> #include <linux/mutex.h> #include "../pci.h" @@ -62,61 +63,6 @@ static void acpiphp_sanitize_bus(struct pci_bus *bus); static void acpiphp_set_hpp_values(acpi_handle handle, struct pci_bus *bus); static void handle_hotplug_event_func(acpi_handle handle, u32 type, void *context); - -/* - * initialization & terminatation routines - */ - -/** - * is_ejectable - determine if a slot is ejectable - * @handle: handle to acpi namespace - * - * Ejectable slot should satisfy at least these conditions: - * - * 1. has _ADR method - * 2. has _EJ0 method - * - * optionally - * - * 1. has _STA method - * 2. has _PS0 method - * 3. has _PS3 method - * 4. .. - */ -static int is_ejectable(acpi_handle handle) -{ - acpi_status status; - acpi_handle tmp; - - status = acpi_get_handle(handle, "_ADR", &tmp); - if (ACPI_FAILURE(status)) { - return 0; - } - - status = acpi_get_handle(handle, "_EJ0", &tmp); - if (ACPI_FAILURE(status)) { - return 0; - } - - return 1; -} - - -/* callback routine to check for the existence of ejectable slots */ -static acpi_status -is_ejectable_slot(acpi_handle handle, u32 lvl, void *context, void **rv) -{ - int *count = (int *)context; - - if (is_ejectable(handle)) { - (*count)++; - /* only one ejectable slot is enough */ - return AE_CTRL_TERMINATE; - } else { - return AE_OK; - } -} - /* callback routine to check for the existence of a pci dock device */ static acpi_status is_pci_dock_device(acpi_handle handle, u32 lvl, void *context, void **rv) @@ -131,9 +77,6 @@ is_pci_dock_device(acpi_handle handle, u32 lvl, void *context, void **rv) } } - - - /* * the _DCK method can do funny things... and sometimes not * hah-hah funny. @@ -160,9 +103,9 @@ static int post_dock_fixups(struct notifier_block *nb, unsigned long val, if (((buses >> 8) & 0xff) != bus->secondary) { buses = (buses & 0xff000000) - | ((unsigned int)(bus->primary) << 0) - | ((unsigned int)(bus->secondary) << 8) - | ((unsigned int)(bus->subordinate) << 16); + | ((unsigned int)(bus->primary) << 0) + | ((unsigned int)(bus->secondary) << 8) + | ((unsigned int)(bus->subordinate) << 16); pci_write_config_dword(bus->self, PCI_PRIMARY_BUS, buses); } return NOTIFY_OK; @@ -184,17 +127,12 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv) acpi_status status = AE_OK; unsigned long long adr, sun; int device, function, retval; + struct pci_bus *pbus = bridge->pci_bus; - status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr); - - if (ACPI_FAILURE(status)) - return AE_OK; - - status = acpi_get_handle(handle, "_EJ0", &tmp); - - if (ACPI_FAILURE(status) && !(is_dock_device(handle))) + if (!acpi_pci_check_ejectable(pbus, handle) && !is_dock_device(handle)) return AE_OK; + acpi_evaluate_integer(handle, "_ADR", NULL, &adr); device = (adr >> 16) & 0xffff; function = adr & 0xffff; @@ -205,7 +143,8 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv) INIT_LIST_HEAD(&newfunc->sibling); newfunc->handle = handle; newfunc->function = function; - if (ACPI_SUCCESS(status)) + + if (ACPI_SUCCESS(acpi_get_handle(handle, "_EJ0", &tmp))) newfunc->flags = FUNC_HAS_EJ0; if (ACPI_SUCCESS(acpi_get_handle(handle, "_STA", &tmp))) @@ -256,8 +195,7 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv) bridge->nr_slots++; dbg("found ACPI PCI Hotplug slot %llu at PCI %04x:%02x:%02x\n", - slot->sun, pci_domain_nr(bridge->pci_bus), - bridge->pci_bus->number, slot->device); + slot->sun, pci_domain_nr(pbus), pbus->number, device); retval = acpiphp_register_hotplug_slot(slot); if (retval) { if (retval == -EBUSY) @@ -274,8 +212,7 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv) list_add_tail(&newfunc->sibling, &slot->funcs); /* associate corresponding pci_dev */ - newfunc->pci_dev = pci_get_slot(bridge->pci_bus, - PCI_DEVFN(device, function)); + newfunc->pci_dev = pci_get_slot(pbus, PCI_DEVFN(device, function)); if (newfunc->pci_dev) { slot->flags |= (SLOT_ENABLED | SLOT_POWEREDON); } @@ -324,27 +261,15 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv) /* see if it's worth looking at this bridge */ -static int detect_ejectable_slots(acpi_handle *bridge_handle) +static int detect_ejectable_slots(struct pci_bus *pbus) { - acpi_status status; - int count; - - count = 0; - - /* only check slots defined directly below bridge object */ - status = acpi_walk_namespace(ACPI_TYPE_DEVICE, bridge_handle, (u32)1, - is_ejectable_slot, (void *)&count, NULL); - - /* - * we also need to add this bridge if there is a dock bridge or - * other pci device on a dock station (removable) - */ - if (!count) - status = acpi_walk_namespace(ACPI_TYPE_DEVICE, bridge_handle, - (u32)1, is_pci_dock_device, (void *)&count, - NULL); - - return count; + int found = acpi_pci_detect_ejectable(pbus); + if (!found) { + acpi_handle bridge_handle = acpi_pci_get_bridge_handle(pbus); + acpi_walk_namespace(ACPI_TYPE_DEVICE, bridge_handle, (u32)1, + is_pci_dock_device, (void *)&found, NULL); + } + return found; } @@ -554,7 +479,7 @@ find_p2p_bridge(acpi_handle handle, u32 lvl, void *context, void **rv) goto out; /* check if this bridge has ejectable slots */ - if ((detect_ejectable_slots(handle) > 0)) { + if ((detect_ejectable_slots(dev->subordinate) > 0)) { dbg("found PCI-to-PCI bridge at PCI %s\n", pci_name(dev)); add_p2p_bridge(handle, dev); } @@ -615,7 +540,7 @@ static int add_bridge(acpi_handle handle) } /* check if this bridge has ejectable slots */ - if (detect_ejectable_slots(handle) > 0) { + if (detect_ejectable_slots(pci_bus) > 0) { dbg("found PCI host-bus bridge with hot-pluggable slots\n"); add_host_bridge(handle, pci_bus); } diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c index 881fdd2b7313..5befa7e379b7 100644 --- a/drivers/pci/hotplug/acpiphp_ibm.c +++ b/drivers/pci/hotplug/acpiphp_ibm.c @@ -271,7 +271,7 @@ static void ibm_handle_events(acpi_handle handle, u32 event, void *context) dbg("%s: generationg bus event\n", __func__); acpi_bus_generate_proc_event(note->device, note->event, detail); acpi_bus_generate_netlink_event(note->device->pnp.device_class, - note->device->dev.bus_id, + dev_name(¬e->device->dev), note->event, detail); } else note->event = event; diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c index 8514c3a1746a..c2e1bcbb28a7 100644 --- a/drivers/pci/hotplug/cpqphp_core.c +++ b/drivers/pci/hotplug/cpqphp_core.c @@ -45,7 +45,7 @@ #include "cpqphp.h" #include "cpqphp_nvram.h" -#include "../../../arch/x86/pci/pci.h" /* horrible hack showing how processor dependent we are... */ +#include <asm/pci_x86.h> /* Global variables */ diff --git a/drivers/pci/hotplug/cpqphp_ctrl.c b/drivers/pci/hotplug/cpqphp_ctrl.c index a60a25290995..cc227a8c4b11 100644 --- a/drivers/pci/hotplug/cpqphp_ctrl.c +++ b/drivers/pci/hotplug/cpqphp_ctrl.c @@ -1954,7 +1954,7 @@ void cpqhp_pushbutton_thread(unsigned long slot) return ; } - if (func != NULL && ctrl != NULL) { + if (ctrl != NULL) { if (cpqhp_process_SI(ctrl, func) != 0) { amber_LED_on(ctrl, hp_slot); green_LED_off(ctrl, hp_slot); @@ -2604,7 +2604,7 @@ static int configure_new_function(struct controller *ctrl, struct pci_func *func for (cloop = 0; cloop < 4; cloop++) { if (irqs.valid_INT & (0x01 << cloop)) { rc = cpqhp_set_irq(func->bus, func->device, - 0x0A + cloop, irqs.interrupt[cloop]); + cloop + 1, irqs.interrupt[cloop]); if (rc) goto free_and_out; } @@ -2945,7 +2945,7 @@ static int configure_new_function(struct controller *ctrl, struct pci_func *func } if (!behind_bridge) { - rc = cpqhp_set_irq(func->bus, func->device, temp_byte + 0x09, IRQ); + rc = cpqhp_set_irq(func->bus, func->device, temp_byte, IRQ); if (rc) return 1; } else { diff --git a/drivers/pci/hotplug/cpqphp_pci.c b/drivers/pci/hotplug/cpqphp_pci.c index 09021930589f..6c0ed0fcb8ee 100644 --- a/drivers/pci/hotplug/cpqphp_pci.c +++ b/drivers/pci/hotplug/cpqphp_pci.c @@ -37,7 +37,7 @@ #include "../pci.h" #include "cpqphp.h" #include "cpqphp_nvram.h" -#include "../../../arch/x86/pci/pci.h" /* horrible hack showing how processor dependent we are... */ +#include <asm/pci_x86.h> u8 cpqhp_nic_irq; @@ -171,7 +171,7 @@ int cpqhp_set_irq (u8 bus_num, u8 dev_num, u8 int_pin, u8 irq_num) fakebus->number = bus_num; dbg("%s: dev %d, bus %d, pin %d, num %d\n", __func__, dev_num, bus_num, int_pin, irq_num); - rc = pcibios_set_irq_routing(fakedev, int_pin - 0x0a, irq_num); + rc = pcibios_set_irq_routing(fakedev, int_pin - 1, irq_num); kfree(fakedev); kfree(fakebus); dbg("%s: rc %d\n", __func__, rc); diff --git a/drivers/pci/hotplug/fakephp.c b/drivers/pci/hotplug/fakephp.c index 3a2637a00934..b0e7de9e536d 100644 --- a/drivers/pci/hotplug/fakephp.c +++ b/drivers/pci/hotplug/fakephp.c @@ -324,6 +324,7 @@ static int disable_slot(struct hotplug_slot *slot) if (test_and_set_bit(0, &dslot->removed)) { dbg("Slot already scheduled for removal\n"); + pci_dev_put(dev); return -ENODEV; } diff --git a/drivers/pci/hotplug/ibmphp_core.c b/drivers/pci/hotplug/ibmphp_core.c index 633e743442ac..dd18f857dfb0 100644 --- a/drivers/pci/hotplug/ibmphp_core.c +++ b/drivers/pci/hotplug/ibmphp_core.c @@ -35,7 +35,7 @@ #include <linux/delay.h> #include <linux/wait.h> #include "../pci.h" -#include "../../../arch/x86/pci/pci.h" /* for struct irq_routing_table */ +#include <asm/pci_x86.h> /* for struct irq_routing_table */ #include "ibmphp.h" #define attn_on(sl) ibmphp_hpc_writeslot (sl, HPC_SLOT_ATTNON) diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h index 7072952ea1d2..db85284ffb62 100644 --- a/drivers/pci/hotplug/pciehp.h +++ b/drivers/pci/hotplug/pciehp.h @@ -219,11 +219,23 @@ struct hpc_ops { #include <acpi/acpi_bus.h> #include <linux/pci-acpi.h> +extern void __init pciehp_acpi_slot_detection_init(void); +extern int pciehp_acpi_slot_detection_check(struct pci_dev *dev); + +static inline void pciehp_firmware_init(void) +{ + pciehp_acpi_slot_detection_init(); +} + static inline int pciehp_get_hp_hw_control_from_firmware(struct pci_dev *dev) { + int retval; u32 flags = (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL | OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL); - return acpi_get_hp_hw_control_from_firmware(dev, flags); + retval = acpi_get_hp_hw_control_from_firmware(dev, flags); + if (retval) + return retval; + return pciehp_acpi_slot_detection_check(dev); } static inline int pciehp_get_hp_params_from_firmware(struct pci_dev *dev, @@ -234,6 +246,7 @@ static inline int pciehp_get_hp_params_from_firmware(struct pci_dev *dev, return 0; } #else +#define pciehp_firmware_init() do {} while (0) #define pciehp_get_hp_hw_control_from_firmware(dev) 0 #define pciehp_get_hp_params_from_firmware(dev, hpp) (-ENODEV) #endif /* CONFIG_ACPI */ diff --git a/drivers/pci/hotplug/pciehp_acpi.c b/drivers/pci/hotplug/pciehp_acpi.c new file mode 100644 index 000000000000..438d795f9fe3 --- /dev/null +++ b/drivers/pci/hotplug/pciehp_acpi.c @@ -0,0 +1,141 @@ +/* + * ACPI related functions for PCI Express Hot Plug driver. + * + * Copyright (C) 2008 Kenji Kaneshige + * Copyright (C) 2008 Fujitsu Limited. + * + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ + +#include <linux/acpi.h> +#include <linux/pci.h> +#include <linux/pci_hotplug.h> +#include "pciehp.h" + +#define PCIEHP_DETECT_PCIE (0) +#define PCIEHP_DETECT_ACPI (1) +#define PCIEHP_DETECT_AUTO (2) +#define PCIEHP_DETECT_DEFAULT PCIEHP_DETECT_AUTO + +static int slot_detection_mode; +static char *pciehp_detect_mode; +module_param(pciehp_detect_mode, charp, 0444); +MODULE_PARM_DESC(pciehp_detect_mode, + "Slot detection mode: pcie, acpi, auto\n" + " pcie - Use PCIe based slot detection\n" + " acpi - Use ACPI for slot detection\n" + " auto(default) - Auto select mode. Use acpi option if duplicate\n" + " slot ids are found. Otherwise, use pcie option\n"); + +int pciehp_acpi_slot_detection_check(struct pci_dev *dev) +{ + if (slot_detection_mode != PCIEHP_DETECT_ACPI) + return 0; + if (acpi_pci_detect_ejectable(dev->subordinate)) + return 0; + return -ENODEV; +} + +static int __init parse_detect_mode(void) +{ + if (!pciehp_detect_mode) + return PCIEHP_DETECT_DEFAULT; + if (!strcmp(pciehp_detect_mode, "pcie")) + return PCIEHP_DETECT_PCIE; + if (!strcmp(pciehp_detect_mode, "acpi")) + return PCIEHP_DETECT_ACPI; + if (!strcmp(pciehp_detect_mode, "auto")) + return PCIEHP_DETECT_AUTO; + warn("bad specifier '%s' for pciehp_detect_mode. Use default\n", + pciehp_detect_mode); + return PCIEHP_DETECT_DEFAULT; +} + +static struct pcie_port_service_id __initdata port_pci_ids[] = { + { + .vendor = PCI_ANY_ID, + .device = PCI_ANY_ID, + .port_type = PCIE_ANY_PORT, + .service_type = PCIE_PORT_SERVICE_HP, + .driver_data = 0, + }, { /* end: all zeroes */ } +}; + +static int __initdata dup_slot_id; +static int __initdata acpi_slot_detected; +static struct list_head __initdata dummy_slots = LIST_HEAD_INIT(dummy_slots); + +/* Dummy driver for dumplicate name detection */ +static int __init dummy_probe(struct pcie_device *dev, + const struct pcie_port_service_id *id) +{ + int pos; + u32 slot_cap; + struct slot *slot, *tmp; + struct pci_dev *pdev = dev->port; + struct pci_bus *pbus = pdev->subordinate; + if (!(slot = kzalloc(sizeof(*slot), GFP_KERNEL))) + return -ENOMEM; + /* Note: pciehp_detect_mode != PCIEHP_DETECT_ACPI here */ + if (pciehp_get_hp_hw_control_from_firmware(pdev)) + return -ENODEV; + if (!(pos = pci_find_capability(pdev, PCI_CAP_ID_EXP))) + return -ENODEV; + pci_read_config_dword(pdev, pos + PCI_EXP_SLTCAP, &slot_cap); + slot->number = slot_cap >> 19; + list_for_each_entry(tmp, &dummy_slots, slot_list) { + if (tmp->number == slot->number) + dup_slot_id++; + } + list_add_tail(&slot->slot_list, &dummy_slots); + if (!acpi_slot_detected && acpi_pci_detect_ejectable(pbus)) + acpi_slot_detected = 1; + return -ENODEV; /* dummy driver always returns error */ +} + +static struct pcie_port_service_driver __initdata dummy_driver = { + .name = "pciehp_dummy", + .id_table = port_pci_ids, + .probe = dummy_probe, +}; + +static int __init select_detection_mode(void) +{ + struct slot *slot, *tmp; + pcie_port_service_register(&dummy_driver); + pcie_port_service_unregister(&dummy_driver); + list_for_each_entry_safe(slot, tmp, &dummy_slots, slot_list) { + list_del(&slot->slot_list); + kfree(slot); + } + if (acpi_slot_detected && dup_slot_id) + return PCIEHP_DETECT_ACPI; + return PCIEHP_DETECT_PCIE; +} + +void __init pciehp_acpi_slot_detection_init(void) +{ + slot_detection_mode = parse_detect_mode(); + if (slot_detection_mode != PCIEHP_DETECT_AUTO) + goto out; + slot_detection_mode = select_detection_mode(); +out: + if (slot_detection_mode == PCIEHP_DETECT_ACPI) + info("Using ACPI for slot detection.\n"); +} diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c index 39cf248d24e3..5482d4ed8256 100644 --- a/drivers/pci/hotplug/pciehp_core.c +++ b/drivers/pci/hotplug/pciehp_core.c @@ -522,6 +522,7 @@ static int __init pcied_init(void) { int retval = 0; + pciehp_firmware_init(); retval = pcie_port_service_register(&hpdriver_portdrv); dbg("pcie_port_service_register = %d\n", retval); info(DRIVER_DESC " version: " DRIVER_VERSION "\n"); diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c index fead63c6b49e..ff4034502d24 100644 --- a/drivers/pci/hotplug/pciehp_ctrl.c +++ b/drivers/pci/hotplug/pciehp_ctrl.c @@ -178,15 +178,14 @@ static void set_slot_off(struct controller *ctrl, struct slot * pslot) "Issue of Slot Power Off command failed\n"); return; } + /* + * After turning power off, we must wait for at least 1 second + * before taking any action that relies on power having been + * removed from the slot/adapter. + */ + msleep(1000); } - /* - * After turning power off, we must wait for at least 1 second - * before taking any action that relies on power having been - * removed from the slot/adapter. - */ - msleep(1000); - if (PWR_LED(ctrl)) pslot->hpc_ops->green_led_off(pslot); @@ -286,15 +285,14 @@ static int remove_board(struct slot *p_slot) "Issue of Slot Disable command failed\n"); return retval; } + /* + * After turning power off, we must wait for at least 1 second + * before taking any action that relies on power having been + * removed from the slot/adapter. + */ + msleep(1000); } - /* - * After turning power off, we must wait for at least 1 second - * before taking any action that relies on power having been - * removed from the slot/adapter. - */ - msleep(1000); - if (PWR_LED(ctrl)) /* turn off Green LED */ p_slot->hpc_ops->green_led_off(p_slot); diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index b643ca13e4f1..71a8012886b0 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c @@ -42,42 +42,6 @@ static atomic_t pciehp_num_controllers = ATOMIC_INIT(0); -struct ctrl_reg { - u8 cap_id; - u8 nxt_ptr; - u16 cap_reg; - u32 dev_cap; - u16 dev_ctrl; - u16 dev_status; - u32 lnk_cap; - u16 lnk_ctrl; - u16 lnk_status; - u32 slot_cap; - u16 slot_ctrl; - u16 slot_status; - u16 root_ctrl; - u16 rsvp; - u32 root_status; -} __attribute__ ((packed)); - -/* offsets to the controller registers based on the above structure layout */ -enum ctrl_offsets { - PCIECAPID = offsetof(struct ctrl_reg, cap_id), - NXTCAPPTR = offsetof(struct ctrl_reg, nxt_ptr), - CAPREG = offsetof(struct ctrl_reg, cap_reg), - DEVCAP = offsetof(struct ctrl_reg, dev_cap), - DEVCTRL = offsetof(struct ctrl_reg, dev_ctrl), - DEVSTATUS = offsetof(struct ctrl_reg, dev_status), - LNKCAP = offsetof(struct ctrl_reg, lnk_cap), - LNKCTRL = offsetof(struct ctrl_reg, lnk_ctrl), - LNKSTATUS = offsetof(struct ctrl_reg, lnk_status), - SLOTCAP = offsetof(struct ctrl_reg, slot_cap), - SLOTCTRL = offsetof(struct ctrl_reg, slot_ctrl), - SLOTSTATUS = offsetof(struct ctrl_reg, slot_status), - ROOTCTRL = offsetof(struct ctrl_reg, root_ctrl), - ROOTSTATUS = offsetof(struct ctrl_reg, root_status), -}; - static inline int pciehp_readw(struct controller *ctrl, int reg, u16 *value) { struct pci_dev *dev = ctrl->pci_dev; @@ -102,95 +66,9 @@ static inline int pciehp_writel(struct controller *ctrl, int reg, u32 value) return pci_write_config_dword(dev, ctrl->cap_base + reg, value); } -/* Field definitions in PCI Express Capabilities Register */ -#define CAP_VER 0x000F -#define DEV_PORT_TYPE 0x00F0 -#define SLOT_IMPL 0x0100 -#define MSG_NUM 0x3E00 - -/* Device or Port Type */ -#define NAT_ENDPT 0x00 -#define LEG_ENDPT 0x01 -#define ROOT_PORT 0x04 -#define UP_STREAM 0x05 -#define DN_STREAM 0x06 -#define PCIE_PCI_BRDG 0x07 -#define PCI_PCIE_BRDG 0x10 - -/* Field definitions in Device Capabilities Register */ -#define DATTN_BUTTN_PRSN 0x1000 -#define DATTN_LED_PRSN 0x2000 -#define DPWR_LED_PRSN 0x4000 - -/* Field definitions in Link Capabilities Register */ -#define MAX_LNK_SPEED 0x000F -#define MAX_LNK_WIDTH 0x03F0 -#define LINK_ACTIVE_REPORTING 0x00100000 - -/* Link Width Encoding */ -#define LNK_X1 0x01 -#define LNK_X2 0x02 -#define LNK_X4 0x04 -#define LNK_X8 0x08 -#define LNK_X12 0x0C -#define LNK_X16 0x10 -#define LNK_X32 0x20 - -/*Field definitions of Link Status Register */ -#define LNK_SPEED 0x000F -#define NEG_LINK_WD 0x03F0 -#define LNK_TRN_ERR 0x0400 -#define LNK_TRN 0x0800 -#define SLOT_CLK_CONF 0x1000 -#define LINK_ACTIVE 0x2000 - -/* Field definitions in Slot Capabilities Register */ -#define ATTN_BUTTN_PRSN 0x00000001 -#define PWR_CTRL_PRSN 0x00000002 -#define MRL_SENS_PRSN 0x00000004 -#define ATTN_LED_PRSN 0x00000008 -#define PWR_LED_PRSN 0x00000010 -#define HP_SUPR_RM_SUP 0x00000020 -#define HP_CAP 0x00000040 -#define SLOT_PWR_VALUE 0x000003F8 -#define SLOT_PWR_LIMIT 0x00000C00 -#define PSN 0xFFF80000 /* PSN: Physical Slot Number */ - -/* Field definitions in Slot Control Register */ -#define ATTN_BUTTN_ENABLE 0x0001 -#define PWR_FAULT_DETECT_ENABLE 0x0002 -#define MRL_DETECT_ENABLE 0x0004 -#define PRSN_DETECT_ENABLE 0x0008 -#define CMD_CMPL_INTR_ENABLE 0x0010 -#define HP_INTR_ENABLE 0x0020 -#define ATTN_LED_CTRL 0x00C0 -#define PWR_LED_CTRL 0x0300 -#define PWR_CTRL 0x0400 -#define EMI_CTRL 0x0800 - -/* Attention indicator and Power indicator states */ -#define LED_ON 0x01 -#define LED_BLINK 0x10 -#define LED_OFF 0x11 - /* Power Control Command */ #define POWER_ON 0 -#define POWER_OFF 0x0400 - -/* EMI Status defines */ -#define EMI_DISENGAGED 0 -#define EMI_ENGAGED 1 - -/* Field definitions in Slot Status Register */ -#define ATTN_BUTTN_PRESSED 0x0001 -#define PWR_FAULT_DETECTED 0x0002 -#define MRL_SENS_CHANGED 0x0004 -#define PRSN_DETECT_CHANGED 0x0008 -#define CMD_COMPLETED 0x0010 -#define MRL_STATE 0x0020 -#define PRSN_STATE 0x0040 -#define EMI_STATE 0x0080 -#define EMI_STATUS_BIT 7 +#define POWER_OFF PCI_EXP_SLTCTL_PCC static irqreturn_t pcie_isr(int irq, void *dev_id); static void start_int_poll_timer(struct controller *ctrl, int sec); @@ -253,22 +131,20 @@ static inline void pciehp_free_irq(struct controller *ctrl) static int pcie_poll_cmd(struct controller *ctrl) { u16 slot_status; - int timeout = 1000; + int err, timeout = 1000; - if (!pciehp_readw(ctrl, SLOTSTATUS, &slot_status)) { - if (slot_status & CMD_COMPLETED) { - pciehp_writew(ctrl, SLOTSTATUS, CMD_COMPLETED); - return 1; - } + err = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status); + if (!err && (slot_status & PCI_EXP_SLTSTA_CC)) { + pciehp_writew(ctrl, PCI_EXP_SLTSTA, PCI_EXP_SLTSTA_CC); + return 1; } while (timeout > 0) { msleep(10); timeout -= 10; - if (!pciehp_readw(ctrl, SLOTSTATUS, &slot_status)) { - if (slot_status & CMD_COMPLETED) { - pciehp_writew(ctrl, SLOTSTATUS, CMD_COMPLETED); - return 1; - } + err = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status); + if (!err && (slot_status & PCI_EXP_SLTSTA_CC)) { + pciehp_writew(ctrl, PCI_EXP_SLTSTA, PCI_EXP_SLTSTA_CC); + return 1; } } return 0; /* timeout */ @@ -302,14 +178,14 @@ static int pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask) mutex_lock(&ctrl->ctrl_lock); - retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status); + retval = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status); if (retval) { ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS register\n", __func__); goto out; } - if (slot_status & CMD_COMPLETED) { + if (slot_status & PCI_EXP_SLTSTA_CC) { if (!ctrl->no_cmd_complete) { /* * After 1 sec and CMD_COMPLETED still not set, just @@ -332,7 +208,7 @@ static int pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask) } } - retval = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl); + retval = pciehp_readw(ctrl, PCI_EXP_SLTCTL, &slot_ctrl); if (retval) { ctrl_err(ctrl, "%s: Cannot read SLOTCTRL register\n", __func__); goto out; @@ -342,7 +218,7 @@ static int pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask) slot_ctrl |= (cmd & mask); ctrl->cmd_busy = 1; smp_mb(); - retval = pciehp_writew(ctrl, SLOTCTRL, slot_ctrl); + retval = pciehp_writew(ctrl, PCI_EXP_SLTCTL, slot_ctrl); if (retval) ctrl_err(ctrl, "Cannot write to SLOTCTRL register\n"); @@ -356,8 +232,8 @@ static int pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask) * completed interrupt is not enabled, we need to poll * command completed event. */ - if (!(slot_ctrl & HP_INTR_ENABLE) || - !(slot_ctrl & CMD_CMPL_INTR_ENABLE)) + if (!(slot_ctrl & PCI_EXP_SLTCTL_HPIE) || + !(slot_ctrl & PCI_EXP_SLTCTL_CCIE)) poll = 1; pcie_wait_cmd(ctrl, poll); } @@ -370,9 +246,9 @@ static inline int check_link_active(struct controller *ctrl) { u16 link_status; - if (pciehp_readw(ctrl, LNKSTATUS, &link_status)) + if (pciehp_readw(ctrl, PCI_EXP_LNKSTA, &link_status)) return 0; - return !!(link_status & LINK_ACTIVE); + return !!(link_status & PCI_EXP_LNKSTA_DLLLA); } static void pcie_wait_link_active(struct controller *ctrl) @@ -412,15 +288,15 @@ static int hpc_check_lnk_status(struct controller *ctrl) } else msleep(1000); - retval = pciehp_readw(ctrl, LNKSTATUS, &lnk_status); + retval = pciehp_readw(ctrl, PCI_EXP_LNKSTA, &lnk_status); if (retval) { ctrl_err(ctrl, "Cannot read LNKSTATUS register\n"); return retval; } ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status); - if ( (lnk_status & LNK_TRN) || (lnk_status & LNK_TRN_ERR) || - !(lnk_status & NEG_LINK_WD)) { + if ((lnk_status & PCI_EXP_LNKSTA_LT) || + !(lnk_status & PCI_EXP_LNKSTA_NLW)) { ctrl_err(ctrl, "Link Training Error occurs \n"); retval = -1; return retval; @@ -436,16 +312,16 @@ static int hpc_get_attention_status(struct slot *slot, u8 *status) u8 atten_led_state; int retval = 0; - retval = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl); + retval = pciehp_readw(ctrl, PCI_EXP_SLTCTL, &slot_ctrl); if (retval) { ctrl_err(ctrl, "%s: Cannot read SLOTCTRL register\n", __func__); return retval; } ctrl_dbg(ctrl, "%s: SLOTCTRL %x, value read %x\n", - __func__, ctrl->cap_base + SLOTCTRL, slot_ctrl); + __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_ctrl); - atten_led_state = (slot_ctrl & ATTN_LED_CTRL) >> 6; + atten_led_state = (slot_ctrl & PCI_EXP_SLTCTL_AIC) >> 6; switch (atten_led_state) { case 0: @@ -475,15 +351,15 @@ static int hpc_get_power_status(struct slot *slot, u8 *status) u8 pwr_state; int retval = 0; - retval = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl); + retval = pciehp_readw(ctrl, PCI_EXP_SLTCTL, &slot_ctrl); if (retval) { ctrl_err(ctrl, "%s: Cannot read SLOTCTRL register\n", __func__); return retval; } ctrl_dbg(ctrl, "%s: SLOTCTRL %x value read %x\n", - __func__, ctrl->cap_base + SLOTCTRL, slot_ctrl); + __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_ctrl); - pwr_state = (slot_ctrl & PWR_CTRL) >> 10; + pwr_state = (slot_ctrl & PCI_EXP_SLTCTL_PCC) >> 10; switch (pwr_state) { case 0: @@ -504,17 +380,15 @@ static int hpc_get_latch_status(struct slot *slot, u8 *status) { struct controller *ctrl = slot->ctrl; u16 slot_status; - int retval = 0; + int retval; - retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status); + retval = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status); if (retval) { ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS register\n", __func__); return retval; } - - *status = (((slot_status & MRL_STATE) >> 5) == 0) ? 0 : 1; - + *status = !!(slot_status & PCI_EXP_SLTSTA_MRLSS); return 0; } @@ -522,18 +396,15 @@ static int hpc_get_adapter_status(struct slot *slot, u8 *status) { struct controller *ctrl = slot->ctrl; u16 slot_status; - u8 card_state; - int retval = 0; + int retval; - retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status); + retval = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status); if (retval) { ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS register\n", __func__); return retval; } - card_state = (u8)((slot_status & PRSN_STATE) >> 6); - *status = (card_state == 1) ? 1 : 0; - + *status = !!(slot_status & PCI_EXP_SLTSTA_PDS); return 0; } @@ -541,32 +412,28 @@ static int hpc_query_power_fault(struct slot *slot) { struct controller *ctrl = slot->ctrl; u16 slot_status; - u8 pwr_fault; - int retval = 0; + int retval; - retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status); + retval = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status); if (retval) { ctrl_err(ctrl, "Cannot check for power fault\n"); return retval; } - pwr_fault = (u8)((slot_status & PWR_FAULT_DETECTED) >> 1); - - return pwr_fault; + return !!(slot_status & PCI_EXP_SLTSTA_PFD); } static int hpc_get_emi_status(struct slot *slot, u8 *status) { struct controller *ctrl = slot->ctrl; u16 slot_status; - int retval = 0; + int retval; - retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status); + retval = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status); if (retval) { ctrl_err(ctrl, "Cannot check EMI status\n"); return retval; } - *status = (slot_status & EMI_STATE) >> EMI_STATUS_BIT; - + *status = !!(slot_status & PCI_EXP_SLTSTA_EIS); return retval; } @@ -576,8 +443,8 @@ static int hpc_toggle_emi(struct slot *slot) u16 cmd_mask; int rc; - slot_cmd = EMI_CTRL; - cmd_mask = EMI_CTRL; + slot_cmd = PCI_EXP_SLTCTL_EIC; + cmd_mask = PCI_EXP_SLTCTL_EIC; rc = pcie_write_cmd(slot->ctrl, slot_cmd, cmd_mask); slot->last_emi_toggle = get_seconds(); @@ -591,7 +458,7 @@ static int hpc_set_attention_status(struct slot *slot, u8 value) u16 cmd_mask; int rc; - cmd_mask = ATTN_LED_CTRL; + cmd_mask = PCI_EXP_SLTCTL_AIC; switch (value) { case 0 : /* turn off */ slot_cmd = 0x00C0; @@ -607,7 +474,7 @@ static int hpc_set_attention_status(struct slot *slot, u8 value) } rc = pcie_write_cmd(ctrl, slot_cmd, cmd_mask); ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", - __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); + __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd); return rc; } @@ -619,10 +486,10 @@ static void hpc_set_green_led_on(struct slot *slot) u16 cmd_mask; slot_cmd = 0x0100; - cmd_mask = PWR_LED_CTRL; + cmd_mask = PCI_EXP_SLTCTL_PIC; pcie_write_cmd(ctrl, slot_cmd, cmd_mask); ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", - __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); + __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd); } static void hpc_set_green_led_off(struct slot *slot) @@ -632,10 +499,10 @@ static void hpc_set_green_led_off(struct slot *slot) u16 cmd_mask; slot_cmd = 0x0300; - cmd_mask = PWR_LED_CTRL; + cmd_mask = PCI_EXP_SLTCTL_PIC; pcie_write_cmd(ctrl, slot_cmd, cmd_mask); ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", - __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); + __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd); } static void hpc_set_green_led_blink(struct slot *slot) @@ -645,10 +512,10 @@ static void hpc_set_green_led_blink(struct slot *slot) u16 cmd_mask; slot_cmd = 0x0200; - cmd_mask = PWR_LED_CTRL; + cmd_mask = PCI_EXP_SLTCTL_PIC; pcie_write_cmd(ctrl, slot_cmd, cmd_mask); ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", - __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); + __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd); } static int hpc_power_on_slot(struct slot * slot) @@ -662,15 +529,15 @@ static int hpc_power_on_slot(struct slot * slot) ctrl_dbg(ctrl, "%s: slot->hp_slot %x\n", __func__, slot->hp_slot); /* Clear sticky power-fault bit from previous power failures */ - retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status); + retval = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status); if (retval) { ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS register\n", __func__); return retval; } - slot_status &= PWR_FAULT_DETECTED; + slot_status &= PCI_EXP_SLTSTA_PFD; if (slot_status) { - retval = pciehp_writew(ctrl, SLOTSTATUS, slot_status); + retval = pciehp_writew(ctrl, PCI_EXP_SLTSTA, slot_status); if (retval) { ctrl_err(ctrl, "%s: Cannot write to SLOTSTATUS register\n", @@ -680,13 +547,13 @@ static int hpc_power_on_slot(struct slot * slot) } slot_cmd = POWER_ON; - cmd_mask = PWR_CTRL; + cmd_mask = PCI_EXP_SLTCTL_PCC; /* Enable detection that we turned off at slot power-off time */ if (!pciehp_poll_mode) { - slot_cmd |= (PWR_FAULT_DETECT_ENABLE | MRL_DETECT_ENABLE | - PRSN_DETECT_ENABLE); - cmd_mask |= (PWR_FAULT_DETECT_ENABLE | MRL_DETECT_ENABLE | - PRSN_DETECT_ENABLE); + slot_cmd |= (PCI_EXP_SLTCTL_PFDE | PCI_EXP_SLTCTL_MRLSCE | + PCI_EXP_SLTCTL_PDCE); + cmd_mask |= (PCI_EXP_SLTCTL_PFDE | PCI_EXP_SLTCTL_MRLSCE | + PCI_EXP_SLTCTL_PDCE); } retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask); @@ -696,7 +563,7 @@ static int hpc_power_on_slot(struct slot * slot) return -1; } ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", - __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); + __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd); return retval; } @@ -753,7 +620,7 @@ static int hpc_power_off_slot(struct slot * slot) changed = pcie_mask_bad_dllp(ctrl); slot_cmd = POWER_OFF; - cmd_mask = PWR_CTRL; + cmd_mask = PCI_EXP_SLTCTL_PCC; /* * If we get MRL or presence detect interrupts now, the isr * will notice the sticky power-fault bit too and issue power @@ -762,10 +629,10 @@ static int hpc_power_off_slot(struct slot * slot) * till the slot is powered on again. */ if (!pciehp_poll_mode) { - slot_cmd &= ~(PWR_FAULT_DETECT_ENABLE | MRL_DETECT_ENABLE | - PRSN_DETECT_ENABLE); - cmd_mask |= (PWR_FAULT_DETECT_ENABLE | MRL_DETECT_ENABLE | - PRSN_DETECT_ENABLE); + slot_cmd &= ~(PCI_EXP_SLTCTL_PFDE | PCI_EXP_SLTCTL_MRLSCE | + PCI_EXP_SLTCTL_PDCE); + cmd_mask |= (PCI_EXP_SLTCTL_PFDE | PCI_EXP_SLTCTL_MRLSCE | + PCI_EXP_SLTCTL_PDCE); } retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask); @@ -775,7 +642,7 @@ static int hpc_power_off_slot(struct slot * slot) goto out; } ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", - __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); + __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd); out: if (changed) pcie_unmask_bad_dllp(ctrl); @@ -796,19 +663,19 @@ static irqreturn_t pcie_isr(int irq, void *dev_id) */ intr_loc = 0; do { - if (pciehp_readw(ctrl, SLOTSTATUS, &detected)) { + if (pciehp_readw(ctrl, PCI_EXP_SLTSTA, &detected)) { ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS\n", __func__); return IRQ_NONE; } - detected &= (ATTN_BUTTN_PRESSED | PWR_FAULT_DETECTED | - MRL_SENS_CHANGED | PRSN_DETECT_CHANGED | - CMD_COMPLETED); + detected &= (PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD | + PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_PDC | + PCI_EXP_SLTSTA_CC); intr_loc |= detected; if (!intr_loc) return IRQ_NONE; - if (detected && pciehp_writew(ctrl, SLOTSTATUS, detected)) { + if (detected && pciehp_writew(ctrl, PCI_EXP_SLTSTA, detected)) { ctrl_err(ctrl, "%s: Cannot write to SLOTSTATUS\n", __func__); return IRQ_NONE; @@ -818,31 +685,31 @@ static irqreturn_t pcie_isr(int irq, void *dev_id) ctrl_dbg(ctrl, "%s: intr_loc %x\n", __func__, intr_loc); /* Check Command Complete Interrupt Pending */ - if (intr_loc & CMD_COMPLETED) { + if (intr_loc & PCI_EXP_SLTSTA_CC) { ctrl->cmd_busy = 0; smp_mb(); wake_up(&ctrl->queue); } - if (!(intr_loc & ~CMD_COMPLETED)) + if (!(intr_loc & ~PCI_EXP_SLTSTA_CC)) return IRQ_HANDLED; p_slot = pciehp_find_slot(ctrl, ctrl->slot_device_offset); /* Check MRL Sensor Changed */ - if (intr_loc & MRL_SENS_CHANGED) + if (intr_loc & PCI_EXP_SLTSTA_MRLSC) pciehp_handle_switch_change(p_slot); /* Check Attention Button Pressed */ - if (intr_loc & ATTN_BUTTN_PRESSED) + if (intr_loc & PCI_EXP_SLTSTA_ABP) pciehp_handle_attention_button(p_slot); /* Check Presence Detect Changed */ - if (intr_loc & PRSN_DETECT_CHANGED) + if (intr_loc & PCI_EXP_SLTSTA_PDC) pciehp_handle_presence_change(p_slot); /* Check Power Fault Detected */ - if (intr_loc & PWR_FAULT_DETECTED) + if (intr_loc & PCI_EXP_SLTSTA_PFD) pciehp_handle_power_fault(p_slot); return IRQ_HANDLED; @@ -855,7 +722,7 @@ static int hpc_get_max_lnk_speed(struct slot *slot, enum pci_bus_speed *value) u32 lnk_cap; int retval = 0; - retval = pciehp_readl(ctrl, LNKCAP, &lnk_cap); + retval = pciehp_readl(ctrl, PCI_EXP_LNKCAP, &lnk_cap); if (retval) { ctrl_err(ctrl, "%s: Cannot read LNKCAP register\n", __func__); return retval; @@ -884,13 +751,13 @@ static int hpc_get_max_lnk_width(struct slot *slot, u32 lnk_cap; int retval = 0; - retval = pciehp_readl(ctrl, LNKCAP, &lnk_cap); + retval = pciehp_readl(ctrl, PCI_EXP_LNKCAP, &lnk_cap); if (retval) { ctrl_err(ctrl, "%s: Cannot read LNKCAP register\n", __func__); return retval; } - switch ((lnk_cap & 0x03F0) >> 4){ + switch ((lnk_cap & PCI_EXP_LNKSTA_NLW) >> 4){ case 0: lnk_wdth = PCIE_LNK_WIDTH_RESRV; break; @@ -933,14 +800,14 @@ static int hpc_get_cur_lnk_speed(struct slot *slot, enum pci_bus_speed *value) int retval = 0; u16 lnk_status; - retval = pciehp_readw(ctrl, LNKSTATUS, &lnk_status); + retval = pciehp_readw(ctrl, PCI_EXP_LNKSTA, &lnk_status); if (retval) { ctrl_err(ctrl, "%s: Cannot read LNKSTATUS register\n", __func__); return retval; } - switch (lnk_status & 0x0F) { + switch (lnk_status & PCI_EXP_LNKSTA_CLS) { case 1: lnk_speed = PCIE_2PT5GB; break; @@ -963,14 +830,14 @@ static int hpc_get_cur_lnk_width(struct slot *slot, int retval = 0; u16 lnk_status; - retval = pciehp_readw(ctrl, LNKSTATUS, &lnk_status); + retval = pciehp_readw(ctrl, PCI_EXP_LNKSTA, &lnk_status); if (retval) { ctrl_err(ctrl, "%s: Cannot read LNKSTATUS register\n", __func__); return retval; } - switch ((lnk_status & 0x03F0) >> 4){ + switch ((lnk_status & PCI_EXP_LNKSTA_NLW) >> 4){ case 0: lnk_wdth = PCIE_LNK_WIDTH_RESRV; break; @@ -1036,18 +903,19 @@ int pcie_enable_notification(struct controller *ctrl) { u16 cmd, mask; - cmd = PRSN_DETECT_ENABLE; + cmd = PCI_EXP_SLTCTL_PDCE; if (ATTN_BUTTN(ctrl)) - cmd |= ATTN_BUTTN_ENABLE; + cmd |= PCI_EXP_SLTCTL_ABPE; if (POWER_CTRL(ctrl)) - cmd |= PWR_FAULT_DETECT_ENABLE; + cmd |= PCI_EXP_SLTCTL_PFDE; if (MRL_SENS(ctrl)) - cmd |= MRL_DETECT_ENABLE; + cmd |= PCI_EXP_SLTCTL_MRLSCE; if (!pciehp_poll_mode) - cmd |= HP_INTR_ENABLE | CMD_CMPL_INTR_ENABLE; + cmd |= PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE; - mask = PRSN_DETECT_ENABLE | ATTN_BUTTN_ENABLE | MRL_DETECT_ENABLE | - PWR_FAULT_DETECT_ENABLE | HP_INTR_ENABLE | CMD_CMPL_INTR_ENABLE; + mask = (PCI_EXP_SLTCTL_PDCE | PCI_EXP_SLTCTL_ABPE | + PCI_EXP_SLTCTL_MRLSCE | PCI_EXP_SLTCTL_PFDE | + PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE); if (pcie_write_cmd(ctrl, cmd, mask)) { ctrl_err(ctrl, "Cannot enable software notification\n"); @@ -1059,8 +927,9 @@ int pcie_enable_notification(struct controller *ctrl) static void pcie_disable_notification(struct controller *ctrl) { u16 mask; - mask = PRSN_DETECT_ENABLE | ATTN_BUTTN_ENABLE | MRL_DETECT_ENABLE | - PWR_FAULT_DETECT_ENABLE | HP_INTR_ENABLE | CMD_CMPL_INTR_ENABLE; + mask = (PCI_EXP_SLTCTL_PDCE | PCI_EXP_SLTCTL_ABPE | + PCI_EXP_SLTCTL_MRLSCE | PCI_EXP_SLTCTL_PFDE | + PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE); if (pcie_write_cmd(ctrl, 0, mask)) ctrl_warn(ctrl, "Cannot disable software notification\n"); } @@ -1157,9 +1026,9 @@ static inline void dbg_ctrl(struct controller *ctrl) EMI(ctrl) ? "yes" : "no"); ctrl_info(ctrl, " Command Completed : %3s\n", NO_CMD_CMPL(ctrl) ? "no" : "yes"); - pciehp_readw(ctrl, SLOTSTATUS, ®16); + pciehp_readw(ctrl, PCI_EXP_SLTSTA, ®16); ctrl_info(ctrl, "Slot Status : 0x%04x\n", reg16); - pciehp_readw(ctrl, SLOTCTRL, ®16); + pciehp_readw(ctrl, PCI_EXP_SLTCTL, ®16); ctrl_info(ctrl, "Slot Control : 0x%04x\n", reg16); } @@ -1183,7 +1052,7 @@ struct controller *pcie_init(struct pcie_device *dev) ctrl_err(ctrl, "Cannot find PCI Express capability\n"); goto abort_ctrl; } - if (pciehp_readl(ctrl, SLOTCAP, &slot_cap)) { + if (pciehp_readl(ctrl, PCI_EXP_SLTCAP, &slot_cap)) { ctrl_err(ctrl, "Cannot read SLOTCAP register\n"); goto abort_ctrl; } @@ -1208,17 +1077,17 @@ struct controller *pcie_init(struct pcie_device *dev) ctrl->no_cmd_complete = 1; /* Check if Data Link Layer Link Active Reporting is implemented */ - if (pciehp_readl(ctrl, LNKCAP, &link_cap)) { + if (pciehp_readl(ctrl, PCI_EXP_LNKCAP, &link_cap)) { ctrl_err(ctrl, "%s: Cannot read LNKCAP register\n", __func__); goto abort_ctrl; } - if (link_cap & LINK_ACTIVE_REPORTING) { + if (link_cap & PCI_EXP_LNKCAP_DLLLARC) { ctrl_dbg(ctrl, "Link Active Reporting supported\n"); ctrl->link_active_reporting = 1; } /* Clear all remaining event bits in Slot Status register */ - if (pciehp_writew(ctrl, SLOTSTATUS, 0x1f)) + if (pciehp_writew(ctrl, PCI_EXP_SLTSTA, 0x1f)) goto abort_ctrl; /* Disable sotfware notification */ diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c index 9c2a22fed18b..4e3e0382c16e 100644 --- a/drivers/pci/hotplug/rpadlpar_core.c +++ b/drivers/pci/hotplug/rpadlpar_core.c @@ -14,6 +14,9 @@ * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ + +#undef DEBUG + #include <linux/init.h> #include <linux/pci.h> #include <linux/string.h> @@ -151,20 +154,20 @@ static void dlpar_pci_add_bus(struct device_node *dn) return; } + /* Scan below the new bridge */ if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) of_scan_pci_bridge(dn, dev); - pcibios_fixup_new_pci_devices(dev->subordinate); - - /* Claim new bus resources */ - pcibios_claim_one_bus(dev->bus); - /* Map IO space for child bus, which may or may not succeed */ pcibios_map_io_space(dev->subordinate); - /* Add new devices to global lists. Register in proc, sysfs. */ - pci_bus_add_devices(phb->bus); + /* Finish adding it : resource allocation, adding devices, etc... + * Note that we need to perform the finish pass on the -parent- + * bus of the EADS bridge so the bridge device itself gets + * properly added + */ + pcibios_finish_adding_to_bus(phb->bus); } static int dlpar_add_pci_slot(char *drc_name, struct device_node *dn) @@ -203,27 +206,6 @@ static int dlpar_add_pci_slot(char *drc_name, struct device_node *dn) return 0; } -static int dlpar_remove_root_bus(struct pci_controller *phb) -{ - struct pci_bus *phb_bus; - int rc; - - phb_bus = phb->bus; - if (!(list_empty(&phb_bus->children) && - list_empty(&phb_bus->devices))) { - return -EBUSY; - } - - rc = pcibios_remove_root_bus(phb); - if (rc) - return -EIO; - - device_unregister(phb_bus->bridge); - pci_remove_bus(phb_bus); - - return 0; -} - static int dlpar_remove_phb(char *drc_name, struct device_node *dn) { struct slot *slot; @@ -235,18 +217,15 @@ static int dlpar_remove_phb(char *drc_name, struct device_node *dn) /* If pci slot is hotplugable, use hotplug to remove it */ slot = find_php_slot(dn); - if (slot) { - if (rpaphp_deregister_slot(slot)) { - printk(KERN_ERR - "%s: unable to remove hotplug slot %s\n", - __func__, drc_name); - return -EIO; - } + if (slot && rpaphp_deregister_slot(slot)) { + printk(KERN_ERR "%s: unable to remove hotplug slot %s\n", + __func__, drc_name); + return -EIO; } pdn = dn->data; BUG_ON(!pdn || !pdn->phb); - rc = dlpar_remove_root_bus(pdn->phb); + rc = remove_phb_dynamic(pdn->phb); if (rc < 0) return rc; @@ -378,26 +357,38 @@ int dlpar_remove_pci_slot(char *drc_name, struct device_node *dn) if (!bus) return -EINVAL; - /* If pci slot is hotplugable, use hotplug to remove it */ + pr_debug("PCI: Removing PCI slot below EADS bridge %s\n", + bus->self ? pci_name(bus->self) : "<!PHB!>"); + slot = find_php_slot(dn); if (slot) { + pr_debug("PCI: Removing hotplug slot for %04x:%02x...\n", + pci_domain_nr(bus), bus->number); + if (rpaphp_deregister_slot(slot)) { printk(KERN_ERR "%s: unable to remove hotplug slot %s\n", __func__, drc_name); return -EIO; } - } else - pcibios_remove_pci_devices(bus); + } + + /* Remove all devices below slot */ + pcibios_remove_pci_devices(bus); + /* Unmap PCI IO space */ if (pcibios_unmap_io_space(bus)) { printk(KERN_ERR "%s: failed to unmap bus range\n", __func__); return -ERANGE; } + /* Remove the EADS bridge device itself */ BUG_ON(!bus->self); + pr_debug("PCI: Now removing bridge device %s\n", pci_name(bus->self)); + eeh_remove_bus_device(bus->self); pci_remove_bus_device(bus->self); + return 0; } diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index 5c8baa43ac9c..235fb7a5a8a5 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c @@ -27,7 +27,6 @@ #include <linux/slab.h> #include <linux/irq.h> #include <linux/interrupt.h> -#include <linux/sysdev.h> #include <linux/spinlock.h> #include <linux/pci.h> #include <linux/dmar.h> @@ -35,6 +34,7 @@ #include <linux/mempool.h> #include <linux/timer.h> #include <linux/iova.h> +#include <linux/iommu.h> #include <linux/intel-iommu.h> #include <asm/cacheflush.h> #include <asm/iommu.h> @@ -54,6 +54,195 @@ #define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1) +#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT) +#define DMA_32BIT_PFN IOVA_PFN(DMA_32BIT_MASK) +#define DMA_64BIT_PFN IOVA_PFN(DMA_64BIT_MASK) + +/* global iommu list, set NULL for ignored DMAR units */ +static struct intel_iommu **g_iommus; + +/* + * 0: Present + * 1-11: Reserved + * 12-63: Context Ptr (12 - (haw-1)) + * 64-127: Reserved + */ +struct root_entry { + u64 val; + u64 rsvd1; +}; +#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry)) +static inline bool root_present(struct root_entry *root) +{ + return (root->val & 1); +} +static inline void set_root_present(struct root_entry *root) +{ + root->val |= 1; +} +static inline void set_root_value(struct root_entry *root, unsigned long value) +{ + root->val |= value & VTD_PAGE_MASK; +} + +static inline struct context_entry * +get_context_addr_from_root(struct root_entry *root) +{ + return (struct context_entry *) + (root_present(root)?phys_to_virt( + root->val & VTD_PAGE_MASK) : + NULL); +} + +/* + * low 64 bits: + * 0: present + * 1: fault processing disable + * 2-3: translation type + * 12-63: address space root + * high 64 bits: + * 0-2: address width + * 3-6: aval + * 8-23: domain id + */ +struct context_entry { + u64 lo; + u64 hi; +}; + +static inline bool context_present(struct context_entry *context) +{ + return (context->lo & 1); +} +static inline void context_set_present(struct context_entry *context) +{ + context->lo |= 1; +} + +static inline void context_set_fault_enable(struct context_entry *context) +{ + context->lo &= (((u64)-1) << 2) | 1; +} + +#define CONTEXT_TT_MULTI_LEVEL 0 + +static inline void context_set_translation_type(struct context_entry *context, + unsigned long value) +{ + context->lo &= (((u64)-1) << 4) | 3; + context->lo |= (value & 3) << 2; +} + +static inline void context_set_address_root(struct context_entry *context, + unsigned long value) +{ + context->lo |= value & VTD_PAGE_MASK; +} + +static inline void context_set_address_width(struct context_entry *context, + unsigned long value) +{ + context->hi |= value & 7; +} + +static inline void context_set_domain_id(struct context_entry *context, + unsigned long value) +{ + context->hi |= (value & ((1 << 16) - 1)) << 8; +} + +static inline void context_clear_entry(struct context_entry *context) +{ + context->lo = 0; + context->hi = 0; +} + +/* + * 0: readable + * 1: writable + * 2-6: reserved + * 7: super page + * 8-11: available + * 12-63: Host physcial address + */ +struct dma_pte { + u64 val; +}; + +static inline void dma_clear_pte(struct dma_pte *pte) +{ + pte->val = 0; +} + +static inline void dma_set_pte_readable(struct dma_pte *pte) +{ + pte->val |= DMA_PTE_READ; +} + +static inline void dma_set_pte_writable(struct dma_pte *pte) +{ + pte->val |= DMA_PTE_WRITE; +} + +static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot) +{ + pte->val = (pte->val & ~3) | (prot & 3); +} + +static inline u64 dma_pte_addr(struct dma_pte *pte) +{ + return (pte->val & VTD_PAGE_MASK); +} + +static inline void dma_set_pte_addr(struct dma_pte *pte, u64 addr) +{ + pte->val |= (addr & VTD_PAGE_MASK); +} + +static inline bool dma_pte_present(struct dma_pte *pte) +{ + return (pte->val & 3) != 0; +} + +/* devices under the same p2p bridge are owned in one domain */ +#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0) + +/* domain represents a virtual machine, more than one devices + * across iommus may be owned in one domain, e.g. kvm guest. + */ +#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1) + +struct dmar_domain { + int id; /* domain id */ + unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/ + + struct list_head devices; /* all devices' list */ + struct iova_domain iovad; /* iova's that belong to this domain */ + + struct dma_pte *pgd; /* virtual address */ + spinlock_t mapping_lock; /* page table lock */ + int gaw; /* max guest address width */ + + /* adjusted guest address width, 0 is level 2 30-bit */ + int agaw; + + int flags; /* flags to find out type of domain */ + + int iommu_coherency;/* indicate coherency of iommu access */ + int iommu_count; /* reference count of iommu */ + spinlock_t iommu_lock; /* protect iommu set in domain */ + u64 max_addr; /* maximum mapped address */ +}; + +/* PCI domain-device relationship */ +struct device_domain_info { + struct list_head link; /* link to domain siblings */ + struct list_head global; /* link to global list */ + u8 bus; /* PCI bus numer */ + u8 devfn; /* PCI devfn number */ + struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */ + struct dmar_domain *domain; /* pointer to domain */ +}; static void flush_unmaps_timeout(unsigned long data); @@ -88,6 +277,8 @@ static int intel_iommu_strict; static DEFINE_SPINLOCK(device_domain_lock); static LIST_HEAD(device_domain_list); +static struct iommu_ops intel_iommu_ops; + static int __init intel_iommu_setup(char *str) { if (!str) @@ -184,6 +375,87 @@ void free_iova_mem(struct iova *iova) kmem_cache_free(iommu_iova_cache, iova); } + +static inline int width_to_agaw(int width); + +/* calculate agaw for each iommu. + * "SAGAW" may be different across iommus, use a default agaw, and + * get a supported less agaw for iommus that don't support the default agaw. + */ +int iommu_calculate_agaw(struct intel_iommu *iommu) +{ + unsigned long sagaw; + int agaw = -1; + + sagaw = cap_sagaw(iommu->cap); + for (agaw = width_to_agaw(DEFAULT_DOMAIN_ADDRESS_WIDTH); + agaw >= 0; agaw--) { + if (test_bit(agaw, &sagaw)) + break; + } + + return agaw; +} + +/* in native case, each domain is related to only one iommu */ +static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain) +{ + int iommu_id; + + BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE); + + iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus); + if (iommu_id < 0 || iommu_id >= g_num_of_iommus) + return NULL; + + return g_iommus[iommu_id]; +} + +/* "Coherency" capability may be different across iommus */ +static void domain_update_iommu_coherency(struct dmar_domain *domain) +{ + int i; + + domain->iommu_coherency = 1; + + i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus); + for (; i < g_num_of_iommus; ) { + if (!ecap_coherent(g_iommus[i]->ecap)) { + domain->iommu_coherency = 0; + break; + } + i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1); + } +} + +static struct intel_iommu *device_to_iommu(u8 bus, u8 devfn) +{ + struct dmar_drhd_unit *drhd = NULL; + int i; + + for_each_drhd_unit(drhd) { + if (drhd->ignored) + continue; + + for (i = 0; i < drhd->devices_cnt; i++) + if (drhd->devices[i]->bus->number == bus && + drhd->devices[i]->devfn == devfn) + return drhd->iommu; + + if (drhd->include_all) + return drhd->iommu; + } + + return NULL; +} + +static void domain_flush_cache(struct dmar_domain *domain, + void *addr, int size) +{ + if (!domain->iommu_coherency) + clflush_cache_range(addr, size); +} + /* Gets context entry for a given bus and devfn */ static struct context_entry * device_to_context_entry(struct intel_iommu *iommu, u8 bus, u8 devfn) @@ -226,7 +498,7 @@ static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn) ret = 0; goto out; } - ret = context_present(context[devfn]); + ret = context_present(&context[devfn]); out: spin_unlock_irqrestore(&iommu->lock, flags); return ret; @@ -242,7 +514,7 @@ static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn) root = &iommu->root_entry[bus]; context = get_context_addr_from_root(root); if (context) { - context_clear_entry(context[devfn]); + context_clear_entry(&context[devfn]); __iommu_flush_cache(iommu, &context[devfn], \ sizeof(*context)); } @@ -339,7 +611,7 @@ static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr) if (level == 1) break; - if (!dma_pte_present(*pte)) { + if (!dma_pte_present(pte)) { tmp_page = alloc_pgtable_page(); if (!tmp_page) { @@ -347,18 +619,17 @@ static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr) flags); return NULL; } - __iommu_flush_cache(domain->iommu, tmp_page, - PAGE_SIZE); - dma_set_pte_addr(*pte, virt_to_phys(tmp_page)); + domain_flush_cache(domain, tmp_page, PAGE_SIZE); + dma_set_pte_addr(pte, virt_to_phys(tmp_page)); /* * high level table always sets r/w, last level page * table control read/write */ - dma_set_pte_readable(*pte); - dma_set_pte_writable(*pte); - __iommu_flush_cache(domain->iommu, pte, sizeof(*pte)); + dma_set_pte_readable(pte); + dma_set_pte_writable(pte); + domain_flush_cache(domain, pte, sizeof(*pte)); } - parent = phys_to_virt(dma_pte_addr(*pte)); + parent = phys_to_virt(dma_pte_addr(pte)); level--; } @@ -381,9 +652,9 @@ static struct dma_pte *dma_addr_level_pte(struct dmar_domain *domain, u64 addr, if (level == total) return pte; - if (!dma_pte_present(*pte)) + if (!dma_pte_present(pte)) break; - parent = phys_to_virt(dma_pte_addr(*pte)); + parent = phys_to_virt(dma_pte_addr(pte)); total--; } return NULL; @@ -398,8 +669,8 @@ static void dma_pte_clear_one(struct dmar_domain *domain, u64 addr) pte = dma_addr_level_pte(domain, addr, 1); if (pte) { - dma_clear_pte(*pte); - __iommu_flush_cache(domain->iommu, pte, sizeof(*pte)); + dma_clear_pte(pte); + domain_flush_cache(domain, pte, sizeof(*pte)); } } @@ -445,10 +716,9 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain, pte = dma_addr_level_pte(domain, tmp, level); if (pte) { free_pgtable_page( - phys_to_virt(dma_pte_addr(*pte))); - dma_clear_pte(*pte); - __iommu_flush_cache(domain->iommu, - pte, sizeof(*pte)); + phys_to_virt(dma_pte_addr(pte))); + dma_clear_pte(pte); + domain_flush_cache(domain, pte, sizeof(*pte)); } tmp += level_size(level); } @@ -950,17 +1220,28 @@ static int iommu_init_domains(struct intel_iommu *iommu) static void domain_exit(struct dmar_domain *domain); +static void vm_domain_exit(struct dmar_domain *domain); void free_dmar_iommu(struct intel_iommu *iommu) { struct dmar_domain *domain; int i; + unsigned long flags; i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap)); for (; i < cap_ndoms(iommu->cap); ) { domain = iommu->domains[i]; clear_bit(i, iommu->domain_ids); - domain_exit(domain); + + spin_lock_irqsave(&domain->iommu_lock, flags); + if (--domain->iommu_count == 0) { + if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) + vm_domain_exit(domain); + else + domain_exit(domain); + } + spin_unlock_irqrestore(&domain->iommu_lock, flags); + i = find_next_bit(iommu->domain_ids, cap_ndoms(iommu->cap), i+1); } @@ -978,6 +1259,17 @@ void free_dmar_iommu(struct intel_iommu *iommu) kfree(iommu->domains); kfree(iommu->domain_ids); + g_iommus[iommu->seq_id] = NULL; + + /* if all iommus are freed, free g_iommus */ + for (i = 0; i < g_num_of_iommus; i++) { + if (g_iommus[i]) + break; + } + + if (i == g_num_of_iommus) + kfree(g_iommus); + /* free context mapping */ free_context_table(iommu); } @@ -1006,7 +1298,9 @@ static struct dmar_domain * iommu_alloc_domain(struct intel_iommu *iommu) set_bit(num, iommu->domain_ids); domain->id = num; - domain->iommu = iommu; + memset(&domain->iommu_bmp, 0, sizeof(unsigned long)); + set_bit(iommu->seq_id, &domain->iommu_bmp); + domain->flags = 0; iommu->domains[num] = domain; spin_unlock_irqrestore(&iommu->lock, flags); @@ -1016,10 +1310,13 @@ static struct dmar_domain * iommu_alloc_domain(struct intel_iommu *iommu) static void iommu_free_domain(struct dmar_domain *domain) { unsigned long flags; + struct intel_iommu *iommu; + + iommu = domain_get_iommu(domain); - spin_lock_irqsave(&domain->iommu->lock, flags); - clear_bit(domain->id, domain->iommu->domain_ids); - spin_unlock_irqrestore(&domain->iommu->lock, flags); + spin_lock_irqsave(&iommu->lock, flags); + clear_bit(domain->id, iommu->domain_ids); + spin_unlock_irqrestore(&iommu->lock, flags); } static struct iova_domain reserved_iova_list; @@ -1094,11 +1391,12 @@ static int domain_init(struct dmar_domain *domain, int guest_width) init_iova_domain(&domain->iovad, DMA_32BIT_PFN); spin_lock_init(&domain->mapping_lock); + spin_lock_init(&domain->iommu_lock); domain_reserve_special_ranges(domain); /* calculate AGAW */ - iommu = domain->iommu; + iommu = domain_get_iommu(domain); if (guest_width > cap_mgaw(iommu->cap)) guest_width = cap_mgaw(iommu->cap); domain->gaw = guest_width; @@ -1115,6 +1413,13 @@ static int domain_init(struct dmar_domain *domain, int guest_width) domain->agaw = agaw; INIT_LIST_HEAD(&domain->devices); + if (ecap_coherent(iommu->ecap)) + domain->iommu_coherency = 1; + else + domain->iommu_coherency = 0; + + domain->iommu_count = 1; + /* always allocate the top pgd */ domain->pgd = (struct dma_pte *)alloc_pgtable_page(); if (!domain->pgd) @@ -1151,28 +1456,82 @@ static int domain_context_mapping_one(struct dmar_domain *domain, u8 bus, u8 devfn) { struct context_entry *context; - struct intel_iommu *iommu = domain->iommu; unsigned long flags; + struct intel_iommu *iommu; + struct dma_pte *pgd; + unsigned long num; + unsigned long ndomains; + int id; + int agaw; pr_debug("Set context mapping for %02x:%02x.%d\n", bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); BUG_ON(!domain->pgd); + + iommu = device_to_iommu(bus, devfn); + if (!iommu) + return -ENODEV; + context = device_to_context_entry(iommu, bus, devfn); if (!context) return -ENOMEM; spin_lock_irqsave(&iommu->lock, flags); - if (context_present(*context)) { + if (context_present(context)) { spin_unlock_irqrestore(&iommu->lock, flags); return 0; } - context_set_domain_id(*context, domain->id); - context_set_address_width(*context, domain->agaw); - context_set_address_root(*context, virt_to_phys(domain->pgd)); - context_set_translation_type(*context, CONTEXT_TT_MULTI_LEVEL); - context_set_fault_enable(*context); - context_set_present(*context); - __iommu_flush_cache(iommu, context, sizeof(*context)); + id = domain->id; + pgd = domain->pgd; + + if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) { + int found = 0; + + /* find an available domain id for this device in iommu */ + ndomains = cap_ndoms(iommu->cap); + num = find_first_bit(iommu->domain_ids, ndomains); + for (; num < ndomains; ) { + if (iommu->domains[num] == domain) { + id = num; + found = 1; + break; + } + num = find_next_bit(iommu->domain_ids, + cap_ndoms(iommu->cap), num+1); + } + + if (found == 0) { + num = find_first_zero_bit(iommu->domain_ids, ndomains); + if (num >= ndomains) { + spin_unlock_irqrestore(&iommu->lock, flags); + printk(KERN_ERR "IOMMU: no free domain ids\n"); + return -EFAULT; + } + + set_bit(num, iommu->domain_ids); + iommu->domains[num] = domain; + id = num; + } + + /* Skip top levels of page tables for + * iommu which has less agaw than default. + */ + for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) { + pgd = phys_to_virt(dma_pte_addr(pgd)); + if (!dma_pte_present(pgd)) { + spin_unlock_irqrestore(&iommu->lock, flags); + return -ENOMEM; + } + } + } + + context_set_domain_id(context, id); + context_set_address_width(context, iommu->agaw); + context_set_address_root(context, virt_to_phys(pgd)); + context_set_translation_type(context, CONTEXT_TT_MULTI_LEVEL); + context_set_fault_enable(context); + context_set_present(context); + domain_flush_cache(domain, context, sizeof(*context)); /* it's a non-present to present mapping */ if (iommu->flush.flush_context(iommu, domain->id, @@ -1183,6 +1542,13 @@ static int domain_context_mapping_one(struct dmar_domain *domain, iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH, 0); spin_unlock_irqrestore(&iommu->lock, flags); + + spin_lock_irqsave(&domain->iommu_lock, flags); + if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) { + domain->iommu_count++; + domain_update_iommu_coherency(domain); + } + spin_unlock_irqrestore(&domain->iommu_lock, flags); return 0; } @@ -1218,13 +1584,17 @@ domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev) tmp->bus->number, tmp->devfn); } -static int domain_context_mapped(struct dmar_domain *domain, - struct pci_dev *pdev) +static int domain_context_mapped(struct pci_dev *pdev) { int ret; struct pci_dev *tmp, *parent; + struct intel_iommu *iommu; + + iommu = device_to_iommu(pdev->bus->number, pdev->devfn); + if (!iommu) + return -ENODEV; - ret = device_context_mapped(domain->iommu, + ret = device_context_mapped(iommu, pdev->bus->number, pdev->devfn); if (!ret) return ret; @@ -1235,17 +1605,17 @@ static int domain_context_mapped(struct dmar_domain *domain, /* Secondary interface's bus number and devfn 0 */ parent = pdev->bus->self; while (parent != tmp) { - ret = device_context_mapped(domain->iommu, parent->bus->number, + ret = device_context_mapped(iommu, parent->bus->number, parent->devfn); if (!ret) return ret; parent = parent->bus->self; } if (tmp->is_pcie) - return device_context_mapped(domain->iommu, + return device_context_mapped(iommu, tmp->subordinate->number, 0); else - return device_context_mapped(domain->iommu, + return device_context_mapped(iommu, tmp->bus->number, tmp->devfn); } @@ -1273,22 +1643,25 @@ domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova, /* We don't need lock here, nobody else * touches the iova range */ - BUG_ON(dma_pte_addr(*pte)); - dma_set_pte_addr(*pte, start_pfn << VTD_PAGE_SHIFT); - dma_set_pte_prot(*pte, prot); - __iommu_flush_cache(domain->iommu, pte, sizeof(*pte)); + BUG_ON(dma_pte_addr(pte)); + dma_set_pte_addr(pte, start_pfn << VTD_PAGE_SHIFT); + dma_set_pte_prot(pte, prot); + domain_flush_cache(domain, pte, sizeof(*pte)); start_pfn++; index++; } return 0; } -static void detach_domain_for_dev(struct dmar_domain *domain, u8 bus, u8 devfn) +static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn) { - clear_context_table(domain->iommu, bus, devfn); - domain->iommu->flush.flush_context(domain->iommu, 0, 0, 0, + if (!iommu) + return; + + clear_context_table(iommu, bus, devfn); + iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL, 0); - domain->iommu->flush.flush_iotlb(domain->iommu, 0, 0, 0, + iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH, 0); } @@ -1296,6 +1669,7 @@ static void domain_remove_dev_info(struct dmar_domain *domain) { struct device_domain_info *info; unsigned long flags; + struct intel_iommu *iommu; spin_lock_irqsave(&device_domain_lock, flags); while (!list_empty(&domain->devices)) { @@ -1307,7 +1681,8 @@ static void domain_remove_dev_info(struct dmar_domain *domain) info->dev->dev.archdata.iommu = NULL; spin_unlock_irqrestore(&device_domain_lock, flags); - detach_domain_for_dev(info->domain, info->bus, info->devfn); + iommu = device_to_iommu(info->bus, info->devfn); + iommu_detach_dev(iommu, info->bus, info->devfn); free_devinfo_mem(info); spin_lock_irqsave(&device_domain_lock, flags); @@ -1400,7 +1775,7 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw) info->dev = NULL; info->domain = domain; /* This domain is shared by devices under p2p bridge */ - domain->flags |= DOMAIN_FLAG_MULTIPLE_DEVICES; + domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES; /* pcie-to-pci bridge already has a domain, uses it */ found = NULL; @@ -1563,6 +1938,11 @@ static void __init iommu_prepare_gfx_mapping(void) printk(KERN_ERR "IOMMU: mapping reserved region failed\n"); } } +#else /* !CONFIG_DMAR_GFX_WA */ +static inline void iommu_prepare_gfx_mapping(void) +{ + return; +} #endif #ifdef CONFIG_DMAR_FLOPPY_WA @@ -1590,7 +1970,7 @@ static inline void iommu_prepare_isa(void) } #endif /* !CONFIG_DMAR_FLPY_WA */ -int __init init_dmars(void) +static int __init init_dmars(void) { struct dmar_drhd_unit *drhd; struct dmar_rmrr_unit *rmrr; @@ -1613,9 +1993,18 @@ int __init init_dmars(void) */ } + g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *), + GFP_KERNEL); + if (!g_iommus) { + printk(KERN_ERR "Allocating global iommu array failed\n"); + ret = -ENOMEM; + goto error; + } + deferred_flush = kzalloc(g_num_of_iommus * sizeof(struct deferred_flush_tables), GFP_KERNEL); if (!deferred_flush) { + kfree(g_iommus); ret = -ENOMEM; goto error; } @@ -1625,6 +2014,7 @@ int __init init_dmars(void) continue; iommu = drhd->iommu; + g_iommus[iommu->seq_id] = iommu; ret = iommu_init_domains(iommu); if (ret) @@ -1737,6 +2127,7 @@ error: iommu = drhd->iommu; free_iommu(iommu); } + kfree(g_iommus); return ret; } @@ -1805,7 +2196,7 @@ get_valid_domain_for_dev(struct pci_dev *pdev) } /* make sure context mapping is ok */ - if (unlikely(!domain_context_mapped(domain, pdev))) { + if (unlikely(!domain_context_mapped(pdev))) { ret = domain_context_mapping(domain, pdev); if (ret) { printk(KERN_ERR @@ -1827,6 +2218,7 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, struct iova *iova; int prot = 0; int ret; + struct intel_iommu *iommu; BUG_ON(dir == DMA_NONE); if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) @@ -1836,6 +2228,7 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, if (!domain) return 0; + iommu = domain_get_iommu(domain); size = aligned_size((u64)paddr, size); iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask); @@ -1849,7 +2242,7 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, * mappings.. */ if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \ - !cap_zlr(domain->iommu->cap)) + !cap_zlr(iommu->cap)) prot |= DMA_PTE_READ; if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) prot |= DMA_PTE_WRITE; @@ -1865,10 +2258,10 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, goto error; /* it's a non-present to present mapping */ - ret = iommu_flush_iotlb_psi(domain->iommu, domain->id, + ret = iommu_flush_iotlb_psi(iommu, domain->id, start_paddr, size >> VTD_PAGE_SHIFT, 1); if (ret) - iommu_flush_write_buffer(domain->iommu); + iommu_flush_write_buffer(iommu); return start_paddr + ((u64)paddr & (~PAGE_MASK)); @@ -1895,10 +2288,11 @@ static void flush_unmaps(void) /* just flush them all */ for (i = 0; i < g_num_of_iommus; i++) { - if (deferred_flush[i].next) { - struct intel_iommu *iommu = - deferred_flush[i].domain[0]->iommu; + struct intel_iommu *iommu = g_iommus[i]; + if (!iommu) + continue; + if (deferred_flush[i].next) { iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH, 0); for (j = 0; j < deferred_flush[i].next; j++) { @@ -1925,12 +2319,14 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova) { unsigned long flags; int next, iommu_id; + struct intel_iommu *iommu; spin_lock_irqsave(&async_umap_flush_lock, flags); if (list_size == HIGH_WATER_MARK) flush_unmaps(); - iommu_id = dom->iommu->seq_id; + iommu = domain_get_iommu(dom); + iommu_id = iommu->seq_id; next = deferred_flush[iommu_id].next; deferred_flush[iommu_id].domain[next] = dom; @@ -1952,12 +2348,15 @@ void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size, struct dmar_domain *domain; unsigned long start_addr; struct iova *iova; + struct intel_iommu *iommu; if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) return; domain = find_domain(pdev); BUG_ON(!domain); + iommu = domain_get_iommu(domain); + iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr)); if (!iova) return; @@ -1973,9 +2372,9 @@ void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size, /* free page tables */ dma_pte_free_pagetable(domain, start_addr, start_addr + size); if (intel_iommu_strict) { - if (iommu_flush_iotlb_psi(domain->iommu, + if (iommu_flush_iotlb_psi(iommu, domain->id, start_addr, size >> VTD_PAGE_SHIFT, 0)) - iommu_flush_write_buffer(domain->iommu); + iommu_flush_write_buffer(iommu); /* free iova */ __free_iova(&domain->iovad, iova); } else { @@ -2036,11 +2435,15 @@ void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, size_t size = 0; void *addr; struct scatterlist *sg; + struct intel_iommu *iommu; if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) return; domain = find_domain(pdev); + BUG_ON(!domain); + + iommu = domain_get_iommu(domain); iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address)); if (!iova) @@ -2057,9 +2460,9 @@ void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, /* free page tables */ dma_pte_free_pagetable(domain, start_addr, start_addr + size); - if (iommu_flush_iotlb_psi(domain->iommu, domain->id, start_addr, + if (iommu_flush_iotlb_psi(iommu, domain->id, start_addr, size >> VTD_PAGE_SHIFT, 0)) - iommu_flush_write_buffer(domain->iommu); + iommu_flush_write_buffer(iommu); /* free iova */ __free_iova(&domain->iovad, iova); @@ -2093,6 +2496,7 @@ int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, int ret; struct scatterlist *sg; unsigned long start_addr; + struct intel_iommu *iommu; BUG_ON(dir == DMA_NONE); if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) @@ -2102,6 +2506,8 @@ int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, if (!domain) return 0; + iommu = domain_get_iommu(domain); + for_each_sg(sglist, sg, nelems, i) { addr = SG_ENT_VIRT_ADDRESS(sg); addr = (void *)virt_to_phys(addr); @@ -2119,7 +2525,7 @@ int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, * mappings.. */ if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \ - !cap_zlr(domain->iommu->cap)) + !cap_zlr(iommu->cap)) prot |= DMA_PTE_READ; if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) prot |= DMA_PTE_WRITE; @@ -2151,9 +2557,9 @@ int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, } /* it's a non-present to present mapping */ - if (iommu_flush_iotlb_psi(domain->iommu, domain->id, + if (iommu_flush_iotlb_psi(iommu, domain->id, start_addr, offset >> VTD_PAGE_SHIFT, 1)) - iommu_flush_write_buffer(domain->iommu); + iommu_flush_write_buffer(iommu); return nelems; } @@ -2325,10 +2731,220 @@ int __init intel_iommu_init(void) init_timer(&unmap_timer); force_iommu = 1; dma_ops = &intel_dma_ops; + + register_iommu(&intel_iommu_ops); + + return 0; +} + +static int vm_domain_add_dev_info(struct dmar_domain *domain, + struct pci_dev *pdev) +{ + struct device_domain_info *info; + unsigned long flags; + + info = alloc_devinfo_mem(); + if (!info) + return -ENOMEM; + + info->bus = pdev->bus->number; + info->devfn = pdev->devfn; + info->dev = pdev; + info->domain = domain; + + spin_lock_irqsave(&device_domain_lock, flags); + list_add(&info->link, &domain->devices); + list_add(&info->global, &device_domain_list); + pdev->dev.archdata.iommu = info; + spin_unlock_irqrestore(&device_domain_lock, flags); + + return 0; +} + +static void vm_domain_remove_one_dev_info(struct dmar_domain *domain, + struct pci_dev *pdev) +{ + struct device_domain_info *info; + struct intel_iommu *iommu; + unsigned long flags; + int found = 0; + struct list_head *entry, *tmp; + + iommu = device_to_iommu(pdev->bus->number, pdev->devfn); + if (!iommu) + return; + + spin_lock_irqsave(&device_domain_lock, flags); + list_for_each_safe(entry, tmp, &domain->devices) { + info = list_entry(entry, struct device_domain_info, link); + if (info->bus == pdev->bus->number && + info->devfn == pdev->devfn) { + list_del(&info->link); + list_del(&info->global); + if (info->dev) + info->dev->dev.archdata.iommu = NULL; + spin_unlock_irqrestore(&device_domain_lock, flags); + + iommu_detach_dev(iommu, info->bus, info->devfn); + free_devinfo_mem(info); + + spin_lock_irqsave(&device_domain_lock, flags); + + if (found) + break; + else + continue; + } + + /* if there is no other devices under the same iommu + * owned by this domain, clear this iommu in iommu_bmp + * update iommu count and coherency + */ + if (device_to_iommu(info->bus, info->devfn) == iommu) + found = 1; + } + + if (found == 0) { + unsigned long tmp_flags; + spin_lock_irqsave(&domain->iommu_lock, tmp_flags); + clear_bit(iommu->seq_id, &domain->iommu_bmp); + domain->iommu_count--; + domain_update_iommu_coherency(domain); + spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags); + } + + spin_unlock_irqrestore(&device_domain_lock, flags); +} + +static void vm_domain_remove_all_dev_info(struct dmar_domain *domain) +{ + struct device_domain_info *info; + struct intel_iommu *iommu; + unsigned long flags1, flags2; + + spin_lock_irqsave(&device_domain_lock, flags1); + while (!list_empty(&domain->devices)) { + info = list_entry(domain->devices.next, + struct device_domain_info, link); + list_del(&info->link); + list_del(&info->global); + if (info->dev) + info->dev->dev.archdata.iommu = NULL; + + spin_unlock_irqrestore(&device_domain_lock, flags1); + + iommu = device_to_iommu(info->bus, info->devfn); + iommu_detach_dev(iommu, info->bus, info->devfn); + + /* clear this iommu in iommu_bmp, update iommu count + * and coherency + */ + spin_lock_irqsave(&domain->iommu_lock, flags2); + if (test_and_clear_bit(iommu->seq_id, + &domain->iommu_bmp)) { + domain->iommu_count--; + domain_update_iommu_coherency(domain); + } + spin_unlock_irqrestore(&domain->iommu_lock, flags2); + + free_devinfo_mem(info); + spin_lock_irqsave(&device_domain_lock, flags1); + } + spin_unlock_irqrestore(&device_domain_lock, flags1); +} + +/* domain id for virtual machine, it won't be set in context */ +static unsigned long vm_domid; + +static int vm_domain_min_agaw(struct dmar_domain *domain) +{ + int i; + int min_agaw = domain->agaw; + + i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus); + for (; i < g_num_of_iommus; ) { + if (min_agaw > g_iommus[i]->agaw) + min_agaw = g_iommus[i]->agaw; + + i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1); + } + + return min_agaw; +} + +static struct dmar_domain *iommu_alloc_vm_domain(void) +{ + struct dmar_domain *domain; + + domain = alloc_domain_mem(); + if (!domain) + return NULL; + + domain->id = vm_domid++; + memset(&domain->iommu_bmp, 0, sizeof(unsigned long)); + domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE; + + return domain; +} + +static int vm_domain_init(struct dmar_domain *domain, int guest_width) +{ + int adjust_width; + + init_iova_domain(&domain->iovad, DMA_32BIT_PFN); + spin_lock_init(&domain->mapping_lock); + spin_lock_init(&domain->iommu_lock); + + domain_reserve_special_ranges(domain); + + /* calculate AGAW */ + domain->gaw = guest_width; + adjust_width = guestwidth_to_adjustwidth(guest_width); + domain->agaw = width_to_agaw(adjust_width); + + INIT_LIST_HEAD(&domain->devices); + + domain->iommu_count = 0; + domain->iommu_coherency = 0; + domain->max_addr = 0; + + /* always allocate the top pgd */ + domain->pgd = (struct dma_pte *)alloc_pgtable_page(); + if (!domain->pgd) + return -ENOMEM; + domain_flush_cache(domain, domain->pgd, PAGE_SIZE); return 0; } -void intel_iommu_domain_exit(struct dmar_domain *domain) +static void iommu_free_vm_domain(struct dmar_domain *domain) +{ + unsigned long flags; + struct dmar_drhd_unit *drhd; + struct intel_iommu *iommu; + unsigned long i; + unsigned long ndomains; + + for_each_drhd_unit(drhd) { + if (drhd->ignored) + continue; + iommu = drhd->iommu; + + ndomains = cap_ndoms(iommu->cap); + i = find_first_bit(iommu->domain_ids, ndomains); + for (; i < ndomains; ) { + if (iommu->domains[i] == domain) { + spin_lock_irqsave(&iommu->lock, flags); + clear_bit(i, iommu->domain_ids); + iommu->domains[i] = NULL; + spin_unlock_irqrestore(&iommu->lock, flags); + break; + } + i = find_next_bit(iommu->domain_ids, ndomains, i+1); + } + } +} + +static void vm_domain_exit(struct dmar_domain *domain) { u64 end; @@ -2336,6 +2952,9 @@ void intel_iommu_domain_exit(struct dmar_domain *domain) if (!domain) return; + vm_domain_remove_all_dev_info(domain); + /* destroy iovas */ + put_iova_domain(&domain->iovad); end = DOMAIN_MAX_ADDR(domain->gaw); end = end & (~VTD_PAGE_MASK); @@ -2345,94 +2964,167 @@ void intel_iommu_domain_exit(struct dmar_domain *domain) /* free page tables */ dma_pte_free_pagetable(domain, 0, end); - iommu_free_domain(domain); + iommu_free_vm_domain(domain); free_domain_mem(domain); } -EXPORT_SYMBOL_GPL(intel_iommu_domain_exit); -struct dmar_domain *intel_iommu_domain_alloc(struct pci_dev *pdev) +static int intel_iommu_domain_init(struct iommu_domain *domain) { - struct dmar_drhd_unit *drhd; - struct dmar_domain *domain; - struct intel_iommu *iommu; - - drhd = dmar_find_matched_drhd_unit(pdev); - if (!drhd) { - printk(KERN_ERR "intel_iommu_domain_alloc: drhd == NULL\n"); - return NULL; - } + struct dmar_domain *dmar_domain; - iommu = drhd->iommu; - if (!iommu) { - printk(KERN_ERR - "intel_iommu_domain_alloc: iommu == NULL\n"); - return NULL; - } - domain = iommu_alloc_domain(iommu); - if (!domain) { + dmar_domain = iommu_alloc_vm_domain(); + if (!dmar_domain) { printk(KERN_ERR - "intel_iommu_domain_alloc: domain == NULL\n"); - return NULL; + "intel_iommu_domain_init: dmar_domain == NULL\n"); + return -ENOMEM; } - if (domain_init(domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { + if (vm_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { printk(KERN_ERR - "intel_iommu_domain_alloc: domain_init() failed\n"); - intel_iommu_domain_exit(domain); - return NULL; + "intel_iommu_domain_init() failed\n"); + vm_domain_exit(dmar_domain); + return -ENOMEM; } - return domain; + domain->priv = dmar_domain; + + return 0; } -EXPORT_SYMBOL_GPL(intel_iommu_domain_alloc); -int intel_iommu_context_mapping( - struct dmar_domain *domain, struct pci_dev *pdev) +static void intel_iommu_domain_destroy(struct iommu_domain *domain) { - int rc; - rc = domain_context_mapping(domain, pdev); - return rc; + struct dmar_domain *dmar_domain = domain->priv; + + domain->priv = NULL; + vm_domain_exit(dmar_domain); } -EXPORT_SYMBOL_GPL(intel_iommu_context_mapping); -int intel_iommu_page_mapping( - struct dmar_domain *domain, dma_addr_t iova, - u64 hpa, size_t size, int prot) +static int intel_iommu_attach_device(struct iommu_domain *domain, + struct device *dev) { - int rc; - rc = domain_page_mapping(domain, iova, hpa, size, prot); - return rc; + struct dmar_domain *dmar_domain = domain->priv; + struct pci_dev *pdev = to_pci_dev(dev); + struct intel_iommu *iommu; + int addr_width; + u64 end; + int ret; + + /* normally pdev is not mapped */ + if (unlikely(domain_context_mapped(pdev))) { + struct dmar_domain *old_domain; + + old_domain = find_domain(pdev); + if (old_domain) { + if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) + vm_domain_remove_one_dev_info(old_domain, pdev); + else + domain_remove_dev_info(old_domain); + } + } + + iommu = device_to_iommu(pdev->bus->number, pdev->devfn); + if (!iommu) + return -ENODEV; + + /* check if this iommu agaw is sufficient for max mapped address */ + addr_width = agaw_to_width(iommu->agaw); + end = DOMAIN_MAX_ADDR(addr_width); + end = end & VTD_PAGE_MASK; + if (end < dmar_domain->max_addr) { + printk(KERN_ERR "%s: iommu agaw (%d) is not " + "sufficient for the mapped address (%llx)\n", + __func__, iommu->agaw, dmar_domain->max_addr); + return -EFAULT; + } + + ret = domain_context_mapping(dmar_domain, pdev); + if (ret) + return ret; + + ret = vm_domain_add_dev_info(dmar_domain, pdev); + return ret; } -EXPORT_SYMBOL_GPL(intel_iommu_page_mapping); -void intel_iommu_detach_dev(struct dmar_domain *domain, u8 bus, u8 devfn) +static void intel_iommu_detach_device(struct iommu_domain *domain, + struct device *dev) { - detach_domain_for_dev(domain, bus, devfn); + struct dmar_domain *dmar_domain = domain->priv; + struct pci_dev *pdev = to_pci_dev(dev); + + vm_domain_remove_one_dev_info(dmar_domain, pdev); } -EXPORT_SYMBOL_GPL(intel_iommu_detach_dev); -struct dmar_domain * -intel_iommu_find_domain(struct pci_dev *pdev) +static int intel_iommu_map_range(struct iommu_domain *domain, + unsigned long iova, phys_addr_t hpa, + size_t size, int iommu_prot) { - return find_domain(pdev); + struct dmar_domain *dmar_domain = domain->priv; + u64 max_addr; + int addr_width; + int prot = 0; + int ret; + + if (iommu_prot & IOMMU_READ) + prot |= DMA_PTE_READ; + if (iommu_prot & IOMMU_WRITE) + prot |= DMA_PTE_WRITE; + + max_addr = (iova & VTD_PAGE_MASK) + VTD_PAGE_ALIGN(size); + if (dmar_domain->max_addr < max_addr) { + int min_agaw; + u64 end; + + /* check if minimum agaw is sufficient for mapped address */ + min_agaw = vm_domain_min_agaw(dmar_domain); + addr_width = agaw_to_width(min_agaw); + end = DOMAIN_MAX_ADDR(addr_width); + end = end & VTD_PAGE_MASK; + if (end < max_addr) { + printk(KERN_ERR "%s: iommu agaw (%d) is not " + "sufficient for the mapped address (%llx)\n", + __func__, min_agaw, max_addr); + return -EFAULT; + } + dmar_domain->max_addr = max_addr; + } + + ret = domain_page_mapping(dmar_domain, iova, hpa, size, prot); + return ret; } -EXPORT_SYMBOL_GPL(intel_iommu_find_domain); -int intel_iommu_found(void) +static void intel_iommu_unmap_range(struct iommu_domain *domain, + unsigned long iova, size_t size) { - return g_num_of_iommus; + struct dmar_domain *dmar_domain = domain->priv; + dma_addr_t base; + + /* The address might not be aligned */ + base = iova & VTD_PAGE_MASK; + size = VTD_PAGE_ALIGN(size); + dma_pte_clear_range(dmar_domain, base, base + size); + + if (dmar_domain->max_addr == base + size) + dmar_domain->max_addr = base; } -EXPORT_SYMBOL_GPL(intel_iommu_found); -u64 intel_iommu_iova_to_pfn(struct dmar_domain *domain, u64 iova) +static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, + unsigned long iova) { + struct dmar_domain *dmar_domain = domain->priv; struct dma_pte *pte; - u64 pfn; - - pfn = 0; - pte = addr_to_dma_pte(domain, iova); + u64 phys = 0; + pte = addr_to_dma_pte(dmar_domain, iova); if (pte) - pfn = dma_pte_addr(*pte); + phys = dma_pte_addr(pte); - return pfn >> VTD_PAGE_SHIFT; + return phys; } -EXPORT_SYMBOL_GPL(intel_iommu_iova_to_pfn); + +static struct iommu_ops intel_iommu_ops = { + .domain_init = intel_iommu_domain_init, + .domain_destroy = intel_iommu_domain_destroy, + .attach_dev = intel_iommu_attach_device, + .detach_dev = intel_iommu_detach_device, + .map = intel_iommu_map_range, + .unmap = intel_iommu_unmap_range, + .iova_to_phys = intel_iommu_iova_to_phys, +}; diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c index 2de5a3238c94..f78371b22529 100644 --- a/drivers/pci/intr_remapping.c +++ b/drivers/pci/intr_remapping.c @@ -5,6 +5,7 @@ #include <linux/pci.h> #include <linux/irq.h> #include <asm/io_apic.h> +#include <asm/smp.h> #include <linux/intel-iommu.h> #include "intr_remapping.h" @@ -19,17 +20,75 @@ struct irq_2_iommu { u8 irte_mask; }; -static struct irq_2_iommu irq_2_iommuX[NR_IRQS]; +#ifdef CONFIG_SPARSE_IRQ +static struct irq_2_iommu *get_one_free_irq_2_iommu(int cpu) +{ + struct irq_2_iommu *iommu; + int node; + + node = cpu_to_node(cpu); + + iommu = kzalloc_node(sizeof(*iommu), GFP_ATOMIC, node); + printk(KERN_DEBUG "alloc irq_2_iommu on cpu %d node %d\n", cpu, node); + + return iommu; +} static struct irq_2_iommu *irq_2_iommu(unsigned int irq) { - return (irq < nr_irqs) ? irq_2_iommuX + irq : NULL; + struct irq_desc *desc; + + desc = irq_to_desc(irq); + + if (WARN_ON_ONCE(!desc)) + return NULL; + + return desc->irq_2_iommu; +} + +static struct irq_2_iommu *irq_2_iommu_alloc_cpu(unsigned int irq, int cpu) +{ + struct irq_desc *desc; + struct irq_2_iommu *irq_iommu; + + /* + * alloc irq desc if not allocated already. + */ + desc = irq_to_desc_alloc_cpu(irq, cpu); + if (!desc) { + printk(KERN_INFO "can not get irq_desc for %d\n", irq); + return NULL; + } + + irq_iommu = desc->irq_2_iommu; + + if (!irq_iommu) + desc->irq_2_iommu = get_one_free_irq_2_iommu(cpu); + + return desc->irq_2_iommu; } static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq) { + return irq_2_iommu_alloc_cpu(irq, boot_cpu_id); +} + +#else /* !CONFIG_SPARSE_IRQ */ + +static struct irq_2_iommu irq_2_iommuX[NR_IRQS]; + +static struct irq_2_iommu *irq_2_iommu(unsigned int irq) +{ + if (irq < nr_irqs) + return &irq_2_iommuX[irq]; + + return NULL; +} +static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq) +{ return irq_2_iommu(irq); } +#endif static DEFINE_SPINLOCK(irq_2_ir_lock); @@ -86,9 +145,11 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) if (!count) return -1; +#ifndef CONFIG_SPARSE_IRQ /* protect irq_2_iommu_alloc later */ if (irq >= nr_irqs) return -1; +#endif /* * start the IRTE search from index 0. @@ -130,6 +191,12 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) table->base[i].present = 1; irq_iommu = irq_2_iommu_alloc(irq); + if (!irq_iommu) { + spin_unlock(&irq_2_ir_lock); + printk(KERN_ERR "can't allocate irq_2_iommu\n"); + return -1; + } + irq_iommu->iommu = iommu; irq_iommu->irte_index = index; irq_iommu->sub_handle = 0; @@ -177,6 +244,12 @@ int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) irq_iommu = irq_2_iommu_alloc(irq); + if (!irq_iommu) { + spin_unlock(&irq_2_ir_lock); + printk(KERN_ERR "can't allocate irq_2_iommu\n"); + return -1; + } + irq_iommu->iommu = iommu; irq_iommu->irte_index = index; irq_iommu->sub_handle = subhandle; diff --git a/drivers/pci/irq.c b/drivers/pci/irq.c index 6441dfa969a3..de01174aff06 100644 --- a/drivers/pci/irq.c +++ b/drivers/pci/irq.c @@ -15,7 +15,7 @@ static void pci_note_irq_problem(struct pci_dev *pdev, const char *reason) dev_printk(KERN_ERR, &pdev->dev, "Potentially misrouted IRQ (Bridge %s %04x:%04x)\n", - parent->dev.bus_id, parent->vendor, parent->device); + dev_name(&parent->dev), parent->vendor, parent->device); dev_printk(KERN_ERR, &pdev->dev, "%s\n", reason); dev_printk(KERN_ERR, &pdev->dev, "Please report to linux-kernel@vger.kernel.org\n"); WARN_ON(1); diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 74801f7df9c9..b4a90badd0a6 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -103,11 +103,11 @@ static void msix_set_enable(struct pci_dev *dev, int enable) } } -static void msix_flush_writes(unsigned int irq) +static void msix_flush_writes(struct irq_desc *desc) { struct msi_desc *entry; - entry = get_irq_msi(irq); + entry = get_irq_desc_msi(desc); BUG_ON(!entry || !entry->dev); switch (entry->msi_attrib.type) { case PCI_CAP_ID_MSI: @@ -135,11 +135,11 @@ static void msix_flush_writes(unsigned int irq) * Returns 1 if it succeeded in masking the interrupt and 0 if the device * doesn't support MSI masking. */ -static int msi_set_mask_bits(unsigned int irq, u32 mask, u32 flag) +static int msi_set_mask_bits(struct irq_desc *desc, u32 mask, u32 flag) { struct msi_desc *entry; - entry = get_irq_msi(irq); + entry = get_irq_desc_msi(desc); BUG_ON(!entry || !entry->dev); switch (entry->msi_attrib.type) { case PCI_CAP_ID_MSI: @@ -172,9 +172,9 @@ static int msi_set_mask_bits(unsigned int irq, u32 mask, u32 flag) return 1; } -void read_msi_msg(unsigned int irq, struct msi_msg *msg) +void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) { - struct msi_desc *entry = get_irq_msi(irq); + struct msi_desc *entry = get_irq_desc_msi(desc); switch(entry->msi_attrib.type) { case PCI_CAP_ID_MSI: { @@ -211,9 +211,16 @@ void read_msi_msg(unsigned int irq, struct msi_msg *msg) } } -void write_msi_msg(unsigned int irq, struct msi_msg *msg) +void read_msi_msg(unsigned int irq, struct msi_msg *msg) +{ + struct irq_desc *desc = irq_to_desc(irq); + + read_msi_msg_desc(desc, msg); +} + +void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) { - struct msi_desc *entry = get_irq_msi(irq); + struct msi_desc *entry = get_irq_desc_msi(desc); switch (entry->msi_attrib.type) { case PCI_CAP_ID_MSI: { @@ -252,21 +259,31 @@ void write_msi_msg(unsigned int irq, struct msi_msg *msg) entry->msg = *msg; } +void write_msi_msg(unsigned int irq, struct msi_msg *msg) +{ + struct irq_desc *desc = irq_to_desc(irq); + + write_msi_msg_desc(desc, msg); +} + void mask_msi_irq(unsigned int irq) { - msi_set_mask_bits(irq, 1, 1); - msix_flush_writes(irq); + struct irq_desc *desc = irq_to_desc(irq); + + msi_set_mask_bits(desc, 1, 1); + msix_flush_writes(desc); } void unmask_msi_irq(unsigned int irq) { - msi_set_mask_bits(irq, 1, 0); - msix_flush_writes(irq); + struct irq_desc *desc = irq_to_desc(irq); + + msi_set_mask_bits(desc, 1, 0); + msix_flush_writes(desc); } static int msi_free_irqs(struct pci_dev* dev); - static struct msi_desc* alloc_msi_entry(void) { struct msi_desc *entry; @@ -303,9 +320,11 @@ static void __pci_restore_msi_state(struct pci_dev *dev) pci_intx_for_msi(dev, 0); msi_set_enable(dev, 0); write_msi_msg(dev->irq, &entry->msg); - if (entry->msi_attrib.maskbit) - msi_set_mask_bits(dev->irq, entry->msi_attrib.maskbits_mask, + if (entry->msi_attrib.maskbit) { + struct irq_desc *desc = irq_to_desc(dev->irq); + msi_set_mask_bits(desc, entry->msi_attrib.maskbits_mask, entry->msi_attrib.masked); + } pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control); control &= ~PCI_MSI_FLAGS_QSIZE; @@ -327,8 +346,9 @@ static void __pci_restore_msix_state(struct pci_dev *dev) msix_set_enable(dev, 0); list_for_each_entry(entry, &dev->msi_list, list) { + struct irq_desc *desc = irq_to_desc(entry->irq); write_msi_msg(entry->irq, &entry->msg); - msi_set_mask_bits(entry->irq, 1, entry->msi_attrib.masked); + msi_set_mask_bits(desc, 1, entry->msi_attrib.masked); } BUG_ON(list_empty(&dev->msi_list)); @@ -596,7 +616,8 @@ void pci_msi_shutdown(struct pci_dev* dev) /* Return the the pci reset with msi irqs unmasked */ if (entry->msi_attrib.maskbit) { u32 mask = entry->msi_attrib.maskbits_mask; - msi_set_mask_bits(dev->irq, mask, ~mask); + struct irq_desc *desc = irq_to_desc(dev->irq); + msi_set_mask_bits(desc, mask, ~mask); } if (!entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI) return; @@ -755,28 +776,19 @@ void pci_no_msi(void) pci_msi_enable = 0; } -void pci_msi_init_pci_dev(struct pci_dev *dev) -{ - INIT_LIST_HEAD(&dev->msi_list); -} - -#ifdef CONFIG_ACPI -#include <linux/acpi.h> -#include <linux/pci-acpi.h> -static void __devinit msi_acpi_init(void) +/** + * pci_msi_enabled - is MSI enabled? + * + * Returns true if MSI has not been disabled by the command-line option + * pci=nomsi. + **/ +int pci_msi_enabled(void) { - if (acpi_pci_disabled) - return; - pci_osc_support_set(OSC_MSI_SUPPORT); - pcie_osc_support_set(OSC_MSI_SUPPORT); + return pci_msi_enable; } -#else -static inline void msi_acpi_init(void) { } -#endif /* CONFIG_ACPI */ +EXPORT_SYMBOL(pci_msi_enabled); -void __devinit msi_init(void) +void pci_msi_init_pci_dev(struct pci_dev *dev) { - if (!pci_msi_enable) - return; - msi_acpi_init(); + INIT_LIST_HEAD(&dev->msi_list); } diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index 9d976d51d406..deea8a187eb8 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c @@ -22,13 +22,14 @@ struct acpi_osc_data { acpi_handle handle; u32 support_set; u32 control_set; + u32 control_query; + int is_queried; struct list_head sibiling; }; static LIST_HEAD(acpi_osc_data_list); struct acpi_osc_args { u32 capbuf[3]; - u32 ctrl_result; }; static DEFINE_MUTEX(pci_acpi_lock); @@ -54,7 +55,7 @@ static u8 OSC_UUID[16] = {0x5B, 0x4D, 0xDB, 0x33, 0xF7, 0x1F, 0x1C, 0x40, 0x96, 0x57, 0x74, 0x41, 0xC0, 0x3D, 0xD7, 0x66}; static acpi_status acpi_run_osc(acpi_handle handle, - struct acpi_osc_args *osc_args) + struct acpi_osc_args *osc_args, u32 *retval) { acpi_status status; struct acpi_object_list input; @@ -110,8 +111,7 @@ static acpi_status acpi_run_osc(acpi_handle handle, goto out_kfree; } out_success: - osc_args->ctrl_result = - *((u32 *)(out_obj->buffer.pointer + 8)); + *retval = *((u32 *)(out_obj->buffer.pointer + 8)); status = AE_OK; out_kfree: @@ -119,11 +119,10 @@ out_kfree: return status; } -static acpi_status __acpi_query_osc(u32 flags, struct acpi_osc_data *osc_data, - u32 *result) +static acpi_status __acpi_query_osc(u32 flags, struct acpi_osc_data *osc_data) { acpi_status status; - u32 support_set; + u32 support_set, result; struct acpi_osc_args osc_args; /* do _OSC query for all possible controls */ @@ -132,56 +131,45 @@ static acpi_status __acpi_query_osc(u32 flags, struct acpi_osc_data *osc_data, osc_args.capbuf[OSC_SUPPORT_TYPE] = support_set; osc_args.capbuf[OSC_CONTROL_TYPE] = OSC_CONTROL_MASKS; - status = acpi_run_osc(osc_data->handle, &osc_args); + status = acpi_run_osc(osc_data->handle, &osc_args, &result); if (ACPI_SUCCESS(status)) { osc_data->support_set = support_set; - *result = osc_args.ctrl_result; + osc_data->control_query = result; + osc_data->is_queried = 1; } return status; } -static acpi_status acpi_query_osc(acpi_handle handle, - u32 level, void *context, void **retval) +/* + * pci_acpi_osc_support: Invoke _OSC indicating support for the given feature + * @flags: Bitmask of flags to support + * + * See the ACPI spec for the definition of the flags + */ +int pci_acpi_osc_support(acpi_handle handle, u32 flags) { acpi_status status; - struct acpi_osc_data *osc_data; - u32 flags = (unsigned long)context, dummy; acpi_handle tmp; + struct acpi_osc_data *osc_data; + int rc = 0; status = acpi_get_handle(handle, "_OSC", &tmp); if (ACPI_FAILURE(status)) - return AE_OK; + return -ENOTTY; mutex_lock(&pci_acpi_lock); osc_data = acpi_get_osc_data(handle); if (!osc_data) { printk(KERN_ERR "acpi osc data array is full\n"); + rc = -ENOMEM; goto out; } - __acpi_query_osc(flags, osc_data, &dummy); + __acpi_query_osc(flags, osc_data); out: mutex_unlock(&pci_acpi_lock); - return AE_OK; -} - -/** - * __pci_osc_support_set - register OS support to Firmware - * @flags: OS support bits - * @hid: hardware ID - * - * Update OS support fields and doing a _OSC Query to obtain an update - * from Firmware on supported control bits. - **/ -acpi_status __pci_osc_support_set(u32 flags, const char *hid) -{ - if (!(flags & OSC_SUPPORT_MASKS)) - return AE_TYPE; - - acpi_get_devices(hid, acpi_query_osc, - (void *)(unsigned long)flags, NULL); - return AE_OK; + return rc; } /** @@ -194,7 +182,7 @@ acpi_status __pci_osc_support_set(u32 flags, const char *hid) acpi_status pci_osc_control_set(acpi_handle handle, u32 flags) { acpi_status status; - u32 ctrlset, control_set, result; + u32 control_req, control_set, result; acpi_handle tmp; struct acpi_osc_data *osc_data; struct acpi_osc_args osc_args; @@ -211,28 +199,34 @@ acpi_status pci_osc_control_set(acpi_handle handle, u32 flags) goto out; } - ctrlset = (flags & OSC_CONTROL_MASKS); - if (!ctrlset) { + control_req = (flags & OSC_CONTROL_MASKS); + if (!control_req) { status = AE_TYPE; goto out; } - status = __acpi_query_osc(osc_data->support_set, osc_data, &result); - if (ACPI_FAILURE(status)) + /* No need to evaluate _OSC if the control was already granted. */ + if ((osc_data->control_set & control_req) == control_req) goto out; - if ((result & ctrlset) != ctrlset) { + if (!osc_data->is_queried) { + status = __acpi_query_osc(osc_data->support_set, osc_data); + if (ACPI_FAILURE(status)) + goto out; + } + + if ((osc_data->control_query & control_req) != control_req) { status = AE_SUPPORT; goto out; } - control_set = osc_data->control_set | ctrlset; + control_set = osc_data->control_set | control_req; osc_args.capbuf[OSC_QUERY_TYPE] = 0; osc_args.capbuf[OSC_SUPPORT_TYPE] = osc_data->support_set; osc_args.capbuf[OSC_CONTROL_TYPE] = control_set; - status = acpi_run_osc(handle, &osc_args); + status = acpi_run_osc(handle, &osc_args, &result); if (ACPI_SUCCESS(status)) - osc_data->control_set = control_set; + osc_data->control_set = result; out: mutex_unlock(&pci_acpi_lock); return status; @@ -373,7 +367,7 @@ static int acpi_pci_find_root_bridge(struct device *dev, acpi_handle *handle) * The string should be the same as root bridge's name * Please look at 'pci_scan_bus_parented' */ - num = sscanf(dev->bus_id, "pci%04x:%02x", &seg, &bus); + num = sscanf(dev_name(dev), "pci%04x:%02x", &seg, &bus); if (num != 2) return -ENODEV; *handle = acpi_get_pci_rootbridge_handle(seg, bus); diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index b4cdd690ae71..c697f2680856 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -16,6 +16,7 @@ #include <linux/string.h> #include <linux/slab.h> #include <linux/sched.h> +#include <linux/cpu.h> #include "pci.h" /* @@ -48,7 +49,7 @@ store_new_id(struct device_driver *driver, const char *buf, size_t count) subdevice=PCI_ANY_ID, class=0, class_mask=0; unsigned long driver_data=0; int fields=0; - int retval; + int retval=0; fields = sscanf(buf, "%x %x %x %x %x %x %lx", &vendor, &device, &subvendor, &subdevice, @@ -58,16 +59,18 @@ store_new_id(struct device_driver *driver, const char *buf, size_t count) /* Only accept driver_data values that match an existing id_table entry */ - retval = -EINVAL; - while (ids->vendor || ids->subvendor || ids->class_mask) { - if (driver_data == ids->driver_data) { - retval = 0; - break; + if (ids) { + retval = -EINVAL; + while (ids->vendor || ids->subvendor || ids->class_mask) { + if (driver_data == ids->driver_data) { + retval = 0; + break; + } + ids++; } - ids++; + if (retval) /* No match */ + return retval; } - if (retval) /* No match */ - return retval; dynid = kzalloc(sizeof(*dynid), GFP_KERNEL); if (!dynid) @@ -183,32 +186,43 @@ static const struct pci_device_id *pci_match_device(struct pci_driver *drv, return pci_match_id(drv->id_table, dev); } +struct drv_dev_and_id { + struct pci_driver *drv; + struct pci_dev *dev; + const struct pci_device_id *id; +}; + +static long local_pci_probe(void *_ddi) +{ + struct drv_dev_and_id *ddi = _ddi; + + return ddi->drv->probe(ddi->dev, ddi->id); +} + static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev, const struct pci_device_id *id) { - int error; -#ifdef CONFIG_NUMA - /* Execute driver initialization on node where the - device's bus is attached to. This way the driver likely - allocates its local memory on the right node without - any need to change it. */ - struct mempolicy *oldpol; - cpumask_t oldmask = current->cpus_allowed; - int node = dev_to_node(&dev->dev); + int error, node; + struct drv_dev_and_id ddi = { drv, dev, id }; + /* Execute driver initialization on node where the device's + bus is attached to. This way the driver likely allocates + its local memory on the right node without any need to + change it. */ + node = dev_to_node(&dev->dev); if (node >= 0) { + int cpu; node_to_cpumask_ptr(nodecpumask, node); - set_cpus_allowed_ptr(current, nodecpumask); - } - /* And set default memory allocation policy */ - oldpol = current->mempolicy; - current->mempolicy = NULL; /* fall back to system default policy */ -#endif - error = drv->probe(dev, id); -#ifdef CONFIG_NUMA - set_cpus_allowed_ptr(current, &oldmask); - current->mempolicy = oldpol; -#endif + + get_online_cpus(); + cpu = cpumask_any_and(nodecpumask, cpu_online_mask); + if (cpu < nr_cpu_ids) + error = work_on_cpu(cpu, local_pci_probe, &ddi); + else + error = local_pci_probe(&ddi); + put_online_cpus(); + } else + error = local_pci_probe(&ddi); return error; } @@ -302,11 +316,10 @@ static void pci_device_shutdown(struct device *dev) /* * Default "suspend" method for devices that have no driver provided suspend, - * or not even a driver at all. + * or not even a driver at all (second part). */ -static void pci_default_pm_suspend(struct pci_dev *pci_dev) +static void pci_pm_set_unknown_state(struct pci_dev *pci_dev) { - pci_save_state(pci_dev); /* * mark its power state as "unknown", since we don't know if * e.g. the BIOS will change its device state when we suspend. @@ -317,14 +330,12 @@ static void pci_default_pm_suspend(struct pci_dev *pci_dev) /* * Default "resume" method for devices that have no driver provided resume, - * or not even a driver at all. + * or not even a driver at all (second part). */ -static int pci_default_pm_resume(struct pci_dev *pci_dev) +static int pci_pm_reenable_device(struct pci_dev *pci_dev) { - int retval = 0; + int retval; - /* restore the PCI config space */ - pci_restore_state(pci_dev); /* if the device was enabled before suspend, reenable */ retval = pci_reenable_device(pci_dev); /* @@ -347,8 +358,16 @@ static int pci_legacy_suspend(struct device *dev, pm_message_t state) i = drv->suspend(pci_dev, state); suspend_report_result(drv->suspend, i); } else { - pci_default_pm_suspend(pci_dev); + pci_save_state(pci_dev); + /* + * This is for compatibility with existing code with legacy PM + * support. + */ + pci_pm_set_unknown_state(pci_dev); } + + pci_fixup_device(pci_fixup_suspend, pci_dev); + return i; } @@ -365,30 +384,130 @@ static int pci_legacy_suspend_late(struct device *dev, pm_message_t state) return i; } +static int pci_legacy_resume_early(struct device *dev) +{ + int error = 0; + struct pci_dev * pci_dev = to_pci_dev(dev); + struct pci_driver * drv = pci_dev->driver; + + pci_fixup_device(pci_fixup_resume_early, pci_dev); + + if (drv && drv->resume_early) + error = drv->resume_early(pci_dev); + return error; +} + static int pci_legacy_resume(struct device *dev) { int error; struct pci_dev * pci_dev = to_pci_dev(dev); struct pci_driver * drv = pci_dev->driver; - if (drv && drv->resume) + pci_fixup_device(pci_fixup_resume, pci_dev); + + if (drv && drv->resume) { error = drv->resume(pci_dev); - else - error = pci_default_pm_resume(pci_dev); + } else { + /* restore the PCI config space */ + pci_restore_state(pci_dev); + error = pci_pm_reenable_device(pci_dev); + } return error; } -static int pci_legacy_resume_early(struct device *dev) +/* Auxiliary functions used by the new power management framework */ + +static int pci_restore_standard_config(struct pci_dev *pci_dev) { + struct pci_dev *parent = pci_dev->bus->self; int error = 0; - struct pci_dev * pci_dev = to_pci_dev(dev); - struct pci_driver * drv = pci_dev->driver; - if (drv && drv->resume_early) - error = drv->resume_early(pci_dev); + /* Check if the device's bus is operational */ + if (!parent || parent->current_state == PCI_D0) { + pci_restore_state(pci_dev); + pci_update_current_state(pci_dev, PCI_D0); + } else { + dev_warn(&pci_dev->dev, "unable to restore config, " + "bridge %s in low power state D%d\n", pci_name(parent), + parent->current_state); + pci_dev->current_state = PCI_UNKNOWN; + error = -EAGAIN; + } + return error; } +static bool pci_is_bridge(struct pci_dev *pci_dev) +{ + return !!(pci_dev->subordinate); +} + +static void pci_pm_default_resume_noirq(struct pci_dev *pci_dev) +{ + if (pci_restore_standard_config(pci_dev)) + pci_fixup_device(pci_fixup_resume_early, pci_dev); +} + +static int pci_pm_default_resume(struct pci_dev *pci_dev) +{ + /* + * pci_restore_standard_config() should have been called once already, + * but it would have failed if the device's parent bridge had not been + * in power state D0 at that time. Check it and try again if necessary. + */ + if (pci_dev->current_state == PCI_UNKNOWN) { + int error = pci_restore_standard_config(pci_dev); + if (error) + return error; + } + + pci_fixup_device(pci_fixup_resume, pci_dev); + + if (!pci_is_bridge(pci_dev)) + pci_enable_wake(pci_dev, PCI_D0, false); + + return pci_pm_reenable_device(pci_dev); +} + +static void pci_pm_default_suspend_generic(struct pci_dev *pci_dev) +{ + /* If device is enabled at this point, disable it */ + pci_disable_enabled_device(pci_dev); + /* + * Save state with interrupts enabled, because in principle the bus the + * device is on may be put into a low power state after this code runs. + */ + pci_save_state(pci_dev); +} + +static void pci_pm_default_suspend(struct pci_dev *pci_dev) +{ + pci_pm_default_suspend_generic(pci_dev); + + if (!pci_is_bridge(pci_dev)) + pci_prepare_to_sleep(pci_dev); + + pci_fixup_device(pci_fixup_suspend, pci_dev); +} + +static bool pci_has_legacy_pm_support(struct pci_dev *pci_dev) +{ + struct pci_driver *drv = pci_dev->driver; + bool ret = drv && (drv->suspend || drv->suspend_late || drv->resume + || drv->resume_early); + + /* + * Legacy PM support is used by default, so warn if the new framework is + * supported as well. Drivers are supposed to support either the + * former, or the latter, but not both at the same time. + */ + WARN_ON(ret && drv->driver.pm); + + return ret; +} + +/* New power management framework */ + static int pci_pm_prepare(struct device *dev) { struct device_driver *drv = dev->driver; @@ -416,17 +535,16 @@ static int pci_pm_suspend(struct device *dev) struct device_driver *drv = dev->driver; int error = 0; - if (drv && drv->pm) { - if (drv->pm->suspend) { - error = drv->pm->suspend(dev); - suspend_report_result(drv->pm->suspend, error); - } else { - pci_default_pm_suspend(pci_dev); - } - } else { - error = pci_legacy_suspend(dev, PMSG_SUSPEND); + if (pci_has_legacy_pm_support(pci_dev)) + return pci_legacy_suspend(dev, PMSG_SUSPEND); + + if (drv && drv->pm && drv->pm->suspend) { + error = drv->pm->suspend(dev); + suspend_report_result(drv->pm->suspend, error); } - pci_fixup_device(pci_fixup_suspend, pci_dev); + + if (!error) + pci_pm_default_suspend(pci_dev); return error; } @@ -434,53 +552,53 @@ static int pci_pm_suspend(struct device *dev) static int pci_pm_suspend_noirq(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); - struct pci_driver *drv = pci_dev->driver; + struct device_driver *drv = dev->driver; int error = 0; - if (drv && drv->pm) { - if (drv->pm->suspend_noirq) { - error = drv->pm->suspend_noirq(dev); - suspend_report_result(drv->pm->suspend_noirq, error); - } - } else { - error = pci_legacy_suspend_late(dev, PMSG_SUSPEND); + if (pci_has_legacy_pm_support(pci_dev)) + return pci_legacy_suspend_late(dev, PMSG_SUSPEND); + + if (drv && drv->pm && drv->pm->suspend_noirq) { + error = drv->pm->suspend_noirq(dev); + suspend_report_result(drv->pm->suspend_noirq, error); } + if (!error) + pci_pm_set_unknown_state(pci_dev); + return error; } -static int pci_pm_resume(struct device *dev) +static int pci_pm_resume_noirq(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); struct device_driver *drv = dev->driver; - int error; + int error = 0; - pci_fixup_device(pci_fixup_resume, pci_dev); + if (pci_has_legacy_pm_support(pci_dev)) + return pci_legacy_resume_early(dev); - if (drv && drv->pm) { - error = drv->pm->resume ? drv->pm->resume(dev) : - pci_default_pm_resume(pci_dev); - } else { - error = pci_legacy_resume(dev); - } + pci_pm_default_resume_noirq(pci_dev); + + if (drv && drv->pm && drv->pm->resume_noirq) + error = drv->pm->resume_noirq(dev); return error; } -static int pci_pm_resume_noirq(struct device *dev) +static int pci_pm_resume(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); - struct pci_driver *drv = pci_dev->driver; + struct device_driver *drv = dev->driver; int error = 0; - pci_fixup_device(pci_fixup_resume_early, pci_dev); + if (pci_has_legacy_pm_support(pci_dev)) + return pci_legacy_resume(dev); - if (drv && drv->pm) { - if (drv->pm->resume_noirq) - error = drv->pm->resume_noirq(dev); - } else { - error = pci_legacy_resume_early(dev); - } + error = pci_pm_default_resume(pci_dev); + + if (!error && drv && drv->pm && drv->pm->resume) + error = drv->pm->resume(dev); return error; } @@ -502,141 +620,140 @@ static int pci_pm_freeze(struct device *dev) struct device_driver *drv = dev->driver; int error = 0; - if (drv && drv->pm) { - if (drv->pm->freeze) { - error = drv->pm->freeze(dev); - suspend_report_result(drv->pm->freeze, error); - } else { - pci_default_pm_suspend(pci_dev); - } - } else { - error = pci_legacy_suspend(dev, PMSG_FREEZE); - pci_fixup_device(pci_fixup_suspend, pci_dev); + if (pci_has_legacy_pm_support(pci_dev)) + return pci_legacy_suspend(dev, PMSG_FREEZE); + + if (drv && drv->pm && drv->pm->freeze) { + error = drv->pm->freeze(dev); + suspend_report_result(drv->pm->freeze, error); } + if (!error) + pci_pm_default_suspend_generic(pci_dev); + return error; } static int pci_pm_freeze_noirq(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); - struct pci_driver *drv = pci_dev->driver; + struct device_driver *drv = dev->driver; int error = 0; - if (drv && drv->pm) { - if (drv->pm->freeze_noirq) { - error = drv->pm->freeze_noirq(dev); - suspend_report_result(drv->pm->freeze_noirq, error); - } - } else { - error = pci_legacy_suspend_late(dev, PMSG_FREEZE); + if (pci_has_legacy_pm_support(pci_dev)) + return pci_legacy_suspend_late(dev, PMSG_FREEZE); + + if (drv && drv->pm && drv->pm->freeze_noirq) { + error = drv->pm->freeze_noirq(dev); + suspend_report_result(drv->pm->freeze_noirq, error); } + if (!error) + pci_pm_set_unknown_state(pci_dev); + return error; } -static int pci_pm_thaw(struct device *dev) +static int pci_pm_thaw_noirq(struct device *dev) { + struct pci_dev *pci_dev = to_pci_dev(dev); struct device_driver *drv = dev->driver; int error = 0; - if (drv && drv->pm) { - if (drv->pm->thaw) - error = drv->pm->thaw(dev); - } else { - pci_fixup_device(pci_fixup_resume, to_pci_dev(dev)); - error = pci_legacy_resume(dev); - } + if (pci_has_legacy_pm_support(pci_dev)) + return pci_legacy_resume_early(dev); + + pci_update_current_state(pci_dev, PCI_D0); + + if (drv && drv->pm && drv->pm->thaw_noirq) + error = drv->pm->thaw_noirq(dev); return error; } -static int pci_pm_thaw_noirq(struct device *dev) +static int pci_pm_thaw(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); - struct pci_driver *drv = pci_dev->driver; + struct device_driver *drv = dev->driver; int error = 0; - if (drv && drv->pm) { - if (drv->pm->thaw_noirq) - error = drv->pm->thaw_noirq(dev); - } else { - pci_fixup_device(pci_fixup_resume_early, pci_dev); - error = pci_legacy_resume_early(dev); - } + if (pci_has_legacy_pm_support(pci_dev)) + return pci_legacy_resume(dev); + + pci_pm_reenable_device(pci_dev); + + if (drv && drv->pm && drv->pm->thaw) + error = drv->pm->thaw(dev); return error; } static int pci_pm_poweroff(struct device *dev) { + struct pci_dev *pci_dev = to_pci_dev(dev); struct device_driver *drv = dev->driver; int error = 0; - pci_fixup_device(pci_fixup_suspend, to_pci_dev(dev)); + if (pci_has_legacy_pm_support(pci_dev)) + return pci_legacy_suspend(dev, PMSG_HIBERNATE); - if (drv && drv->pm) { - if (drv->pm->poweroff) { - error = drv->pm->poweroff(dev); - suspend_report_result(drv->pm->poweroff, error); - } - } else { - error = pci_legacy_suspend(dev, PMSG_HIBERNATE); + if (drv && drv->pm && drv->pm->poweroff) { + error = drv->pm->poweroff(dev); + suspend_report_result(drv->pm->poweroff, error); } + if (!error) + pci_pm_default_suspend(pci_dev); + return error; } static int pci_pm_poweroff_noirq(struct device *dev) { - struct pci_dev *pci_dev = to_pci_dev(dev); - struct pci_driver *drv = pci_dev->driver; + struct device_driver *drv = dev->driver; int error = 0; - if (drv && drv->pm) { - if (drv->pm->poweroff_noirq) { - error = drv->pm->poweroff_noirq(dev); - suspend_report_result(drv->pm->poweroff_noirq, error); - } - } else { - error = pci_legacy_suspend_late(dev, PMSG_HIBERNATE); + if (pci_has_legacy_pm_support(to_pci_dev(dev))) + return pci_legacy_suspend_late(dev, PMSG_HIBERNATE); + + if (drv && drv->pm && drv->pm->poweroff_noirq) { + error = drv->pm->poweroff_noirq(dev); + suspend_report_result(drv->pm->poweroff_noirq, error); } return error; } -static int pci_pm_restore(struct device *dev) +static int pci_pm_restore_noirq(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); struct device_driver *drv = dev->driver; - int error; + int error = 0; - if (drv && drv->pm) { - error = drv->pm->restore ? drv->pm->restore(dev) : - pci_default_pm_resume(pci_dev); - } else { - error = pci_legacy_resume(dev); - } - pci_fixup_device(pci_fixup_resume, pci_dev); + if (pci_has_legacy_pm_support(pci_dev)) + return pci_legacy_resume_early(dev); + + pci_pm_default_resume_noirq(pci_dev); + + if (drv && drv->pm && drv->pm->restore_noirq) + error = drv->pm->restore_noirq(dev); return error; } -static int pci_pm_restore_noirq(struct device *dev) +static int pci_pm_restore(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); - struct pci_driver *drv = pci_dev->driver; + struct device_driver *drv = dev->driver; int error = 0; - pci_fixup_device(pci_fixup_resume, pci_dev); + if (pci_has_legacy_pm_support(pci_dev)) + return pci_legacy_resume(dev); - if (drv && drv->pm) { - if (drv->pm->restore_noirq) - error = drv->pm->restore_noirq(dev); - } else { - error = pci_legacy_resume_early(dev); - } - pci_fixup_device(pci_fixup_resume_early, pci_dev); + error = pci_pm_default_resume(pci_dev); + + if (!error && drv && drv->pm && drv->pm->restore) + error = drv->pm->restore(dev); return error; } @@ -654,17 +771,15 @@ static int pci_pm_restore_noirq(struct device *dev) #endif /* !CONFIG_HIBERNATION */ -struct pm_ext_ops pci_pm_ops = { - .base = { - .prepare = pci_pm_prepare, - .complete = pci_pm_complete, - .suspend = pci_pm_suspend, - .resume = pci_pm_resume, - .freeze = pci_pm_freeze, - .thaw = pci_pm_thaw, - .poweroff = pci_pm_poweroff, - .restore = pci_pm_restore, - }, +struct dev_pm_ops pci_dev_pm_ops = { + .prepare = pci_pm_prepare, + .complete = pci_pm_complete, + .suspend = pci_pm_suspend, + .resume = pci_pm_resume, + .freeze = pci_pm_freeze, + .thaw = pci_pm_thaw, + .poweroff = pci_pm_poweroff, + .restore = pci_pm_restore, .suspend_noirq = pci_pm_suspend_noirq, .resume_noirq = pci_pm_resume_noirq, .freeze_noirq = pci_pm_freeze_noirq, @@ -673,7 +788,7 @@ struct pm_ext_ops pci_pm_ops = { .restore_noirq = pci_pm_restore_noirq, }; -#define PCI_PM_OPS_PTR &pci_pm_ops +#define PCI_PM_OPS_PTR (&pci_dev_pm_ops) #else /* !CONFIG_PM_SLEEP */ @@ -703,9 +818,6 @@ int __pci_register_driver(struct pci_driver *drv, struct module *owner, drv->driver.owner = owner; drv->driver.mod_name = mod_name; - if (drv->pm) - drv->driver.pm = &drv->pm->base; - spin_lock_init(&drv->dynids.lock); INIT_LIST_HEAD(&drv->dynids.list); diff --git a/drivers/pci/pci-stub.c b/drivers/pci/pci-stub.c new file mode 100644 index 000000000000..74fbec0bf6cb --- /dev/null +++ b/drivers/pci/pci-stub.c @@ -0,0 +1,47 @@ +/* pci-stub - simple stub driver to reserve a pci device + * + * Copyright (C) 2008 Red Hat, Inc. + * Author: + * Chris Wright + * + * This work is licensed under the terms of the GNU GPL, version 2. + * + * Usage is simple, allocate a new id to the stub driver and bind the + * device to it. For example: + * + * # echo "8086 10f5" > /sys/bus/pci/drivers/pci-stub/new_id + * # echo -n 0000:00:19.0 > /sys/bus/pci/drivers/e1000e/unbind + * # echo -n 0000:00:19.0 > /sys/bus/pci/drivers/pci-stub/bind + * # ls -l /sys/bus/pci/devices/0000:00:19.0/driver + * .../0000:00:19.0/driver -> ../../../bus/pci/drivers/pci-stub + */ + +#include <linux/module.h> +#include <linux/pci.h> + +static int pci_stub_probe(struct pci_dev *dev, const struct pci_device_id *id) +{ + return 0; +} + +static struct pci_driver stub_driver = { + .name = "pci-stub", + .id_table = NULL, /* only dynamic id's */ + .probe = pci_stub_probe, +}; + +static int __init pci_stub_init(void) +{ + return pci_register_driver(&stub_driver); +} + +static void __exit pci_stub_exit(void) +{ + pci_unregister_driver(&stub_driver); +} + +module_init(pci_stub_init); +module_exit(pci_stub_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Chris Wright <chrisw@sous-sol.org>"); diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index 5d72866897a8..c23619fb6c4b 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -58,13 +58,14 @@ static ssize_t broken_parity_status_store(struct device *dev, const char *buf, size_t count) { struct pci_dev *pdev = to_pci_dev(dev); - ssize_t consumed = -EINVAL; + unsigned long val; - if ((count > 0) && (*buf == '0' || *buf == '1')) { - pdev->broken_parity_status = *buf == '1' ? 1 : 0; - consumed = count; - } - return consumed; + if (strict_strtoul(buf, 0, &val) < 0) + return -EINVAL; + + pdev->broken_parity_status = !!val; + + return count; } static ssize_t local_cpus_show(struct device *dev, @@ -74,7 +75,7 @@ static ssize_t local_cpus_show(struct device *dev, int len; mask = pcibus_to_cpumask(to_pci_dev(dev)->bus); - len = cpumask_scnprintf(buf, PAGE_SIZE-2, mask); + len = cpumask_scnprintf(buf, PAGE_SIZE-2, &mask); buf[len++] = '\n'; buf[len] = '\0'; return len; @@ -88,7 +89,7 @@ static ssize_t local_cpulist_show(struct device *dev, int len; mask = pcibus_to_cpumask(to_pci_dev(dev)->bus); - len = cpulist_scnprintf(buf, PAGE_SIZE-2, mask); + len = cpulist_scnprintf(buf, PAGE_SIZE-2, &mask); buf[len++] = '\n'; buf[len] = '\0'; return len; @@ -101,11 +102,13 @@ resource_show(struct device * dev, struct device_attribute *attr, char * buf) struct pci_dev * pci_dev = to_pci_dev(dev); char * str = buf; int i; - int max = 7; + int max; resource_size_t start, end; if (pci_dev->subordinate) max = DEVICE_COUNT_RESOURCE; + else + max = PCI_BRIDGE_RESOURCES; for (i = 0; i < max; i++) { struct resource *res = &pci_dev->resource[i]; @@ -133,19 +136,23 @@ static ssize_t is_enabled_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - ssize_t result = -EINVAL; struct pci_dev *pdev = to_pci_dev(dev); + unsigned long val; + ssize_t result = strict_strtoul(buf, 0, &val); + + if (result < 0) + return result; /* this can crash the machine when done on the "wrong" device */ if (!capable(CAP_SYS_ADMIN)) - return count; + return -EPERM; - if (*buf == '0') { + if (!val) { if (atomic_read(&pdev->enable_cnt) != 0) pci_disable_device(pdev); else result = -EIO; - } else if (*buf == '1') + } else result = pci_enable_device(pdev); return result < 0 ? result : count; @@ -185,25 +192,28 @@ msi_bus_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct pci_dev *pdev = to_pci_dev(dev); + unsigned long val; + + if (strict_strtoul(buf, 0, &val) < 0) + return -EINVAL; /* bad things may happen if the no_msi flag is changed * while some drivers are loaded */ if (!capable(CAP_SYS_ADMIN)) - return count; + return -EPERM; + /* Maybe pci devices without subordinate busses shouldn't even have this + * attribute in the first place? */ if (!pdev->subordinate) return count; - if (*buf == '0') { - pdev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI; - dev_warn(&pdev->dev, "forced subordinate bus to not support MSI," - " bad things could happen.\n"); - } + /* Is the flag going to change, or keep the value it already had? */ + if (!(pdev->subordinate->bus_flags & PCI_BUS_FLAGS_NO_MSI) ^ + !!val) { + pdev->subordinate->bus_flags ^= PCI_BUS_FLAGS_NO_MSI; - if (*buf == '1') { - pdev->subordinate->bus_flags &= ~PCI_BUS_FLAGS_NO_MSI; - dev_warn(&pdev->dev, "forced subordinate bus to support MSI," - " bad things could happen.\n"); + dev_warn(&pdev->dev, "forced subordinate bus to%s support MSI," + " bad things could happen\n", val ? "" : " not"); } return count; @@ -361,55 +371,33 @@ pci_write_config(struct kobject *kobj, struct bin_attribute *bin_attr, } static ssize_t -pci_read_vpd(struct kobject *kobj, struct bin_attribute *bin_attr, - char *buf, loff_t off, size_t count) +read_vpd_attr(struct kobject *kobj, struct bin_attribute *bin_attr, + char *buf, loff_t off, size_t count) { struct pci_dev *dev = to_pci_dev(container_of(kobj, struct device, kobj)); - int end; - int ret; if (off > bin_attr->size) count = 0; else if (count > bin_attr->size - off) count = bin_attr->size - off; - end = off + count; - - while (off < end) { - ret = dev->vpd->ops->read(dev, off, end - off, buf); - if (ret < 0) - return ret; - buf += ret; - off += ret; - } - return count; + return pci_read_vpd(dev, off, count, buf); } static ssize_t -pci_write_vpd(struct kobject *kobj, struct bin_attribute *bin_attr, - char *buf, loff_t off, size_t count) +write_vpd_attr(struct kobject *kobj, struct bin_attribute *bin_attr, + char *buf, loff_t off, size_t count) { struct pci_dev *dev = to_pci_dev(container_of(kobj, struct device, kobj)); - int end; - int ret; if (off > bin_attr->size) count = 0; else if (count > bin_attr->size - off) count = bin_attr->size - off; - end = off + count; - - while (off < end) { - ret = dev->vpd->ops->write(dev, off, end - off, buf); - if (ret < 0) - return ret; - buf += ret; - off += ret; - } - return count; + return pci_write_vpd(dev, off, count, buf); } #ifdef HAVE_PCI_LEGACY @@ -569,7 +557,7 @@ void pci_remove_legacy_files(struct pci_bus *b) #ifdef HAVE_PCI_MMAP -static int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma) +int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma) { unsigned long nr, start, size; @@ -620,6 +608,9 @@ pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr, vma->vm_pgoff += start >> PAGE_SHIFT; mmap_type = res->flags & IORESOURCE_MEM ? pci_mmap_mem : pci_mmap_io; + if (res->flags & IORESOURCE_MEM && iomem_is_exclusive(start)) + return -EINVAL; + return pci_mmap_page_range(pdev, vma, mmap_type, write_combine); } @@ -832,8 +823,8 @@ static int pci_create_capabilities_sysfs(struct pci_dev *dev) attr->size = dev->vpd->len; attr->attr.name = "vpd"; attr->attr.mode = S_IRUSR | S_IWUSR; - attr->read = pci_read_vpd; - attr->write = pci_write_vpd; + attr->read = read_vpd_attr; + attr->write = write_vpd_attr; retval = sysfs_create_bin_file(&dev->dev.kobj, attr); if (retval) { kfree(dev->vpd->attr); diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 061d1ee0046a..c12f6c790698 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -56,6 +56,22 @@ unsigned char pci_bus_max_busnr(struct pci_bus* bus) } EXPORT_SYMBOL_GPL(pci_bus_max_busnr); +#ifdef CONFIG_HAS_IOMEM +void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar) +{ + /* + * Make sure the BAR is actually a memory resource, not an IO resource + */ + if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) { + WARN_ON(1); + return NULL; + } + return ioremap_nocache(pci_resource_start(pdev, bar), + pci_resource_len(pdev, bar)); +} +EXPORT_SYMBOL_GPL(pci_ioremap_bar); +#endif + #if 0 /** * pci_max_busnr - returns maximum PCI bus number @@ -360,25 +376,10 @@ pci_find_parent_resource(const struct pci_dev *dev, struct resource *res) static void pci_restore_bars(struct pci_dev *dev) { - int i, numres; - - switch (dev->hdr_type) { - case PCI_HEADER_TYPE_NORMAL: - numres = 6; - break; - case PCI_HEADER_TYPE_BRIDGE: - numres = 2; - break; - case PCI_HEADER_TYPE_CARDBUS: - numres = 1; - break; - default: - /* Should never get here, but just in case... */ - return; - } + int i; - for (i = 0; i < numres; i ++) - pci_update_resource(dev, &dev->resource[i], i); + for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) + pci_update_resource(dev, i); } static struct pci_platform_pm_ops *pci_platform_pm; @@ -524,14 +525,17 @@ pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state) * pci_update_current_state - Read PCI power state of given device from its * PCI PM registers and cache it * @dev: PCI device to handle. + * @state: State to cache in case the device doesn't have the PM capability */ -static void pci_update_current_state(struct pci_dev *dev) +void pci_update_current_state(struct pci_dev *dev, pci_power_t state) { if (dev->pm_cap) { u16 pmcsr; pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK); + } else { + dev->current_state = state; } } @@ -574,7 +578,7 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state) */ int ret = platform_pci_set_power_state(dev, PCI_D0); if (!ret) - pci_update_current_state(dev); + pci_update_current_state(dev, PCI_D0); } /* This device is quirked not to be put into D3, so don't put it in D3 */ @@ -587,7 +591,7 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state) /* Allow the platform to finalize the transition */ int ret = platform_pci_set_power_state(dev, state); if (!ret) { - pci_update_current_state(dev); + pci_update_current_state(dev, state); error = 0; } } @@ -640,19 +644,14 @@ static int pci_save_pcie_state(struct pci_dev *dev) int pos, i = 0; struct pci_cap_saved_state *save_state; u16 *cap; - int found = 0; pos = pci_find_capability(dev, PCI_CAP_ID_EXP); if (pos <= 0) return 0; save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP); - if (!save_state) - save_state = kzalloc(sizeof(*save_state) + sizeof(u16) * 4, GFP_KERNEL); - else - found = 1; if (!save_state) { - dev_err(&dev->dev, "out of memory in pci_save_pcie_state\n"); + dev_err(&dev->dev, "buffer not found in %s\n", __FUNCTION__); return -ENOMEM; } cap = (u16 *)&save_state->data[0]; @@ -661,9 +660,7 @@ static int pci_save_pcie_state(struct pci_dev *dev) pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, &cap[i++]); pci_read_config_word(dev, pos + PCI_EXP_SLTCTL, &cap[i++]); pci_read_config_word(dev, pos + PCI_EXP_RTCTL, &cap[i++]); - save_state->cap_nr = PCI_CAP_ID_EXP; - if (!found) - pci_add_saved_cap(dev, save_state); + return 0; } @@ -688,30 +685,21 @@ static void pci_restore_pcie_state(struct pci_dev *dev) static int pci_save_pcix_state(struct pci_dev *dev) { - int pos, i = 0; + int pos; struct pci_cap_saved_state *save_state; - u16 *cap; - int found = 0; pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); if (pos <= 0) return 0; save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX); - if (!save_state) - save_state = kzalloc(sizeof(*save_state) + sizeof(u16), GFP_KERNEL); - else - found = 1; if (!save_state) { - dev_err(&dev->dev, "out of memory in pci_save_pcie_state\n"); + dev_err(&dev->dev, "buffer not found in %s\n", __FUNCTION__); return -ENOMEM; } - cap = (u16 *)&save_state->data[0]; - pci_read_config_word(dev, pos + PCI_X_CMD, &cap[i++]); - save_state->cap_nr = PCI_CAP_ID_PCIX; - if (!found) - pci_add_saved_cap(dev, save_state); + pci_read_config_word(dev, pos + PCI_X_CMD, (u16 *)save_state->data); + return 0; } @@ -982,6 +970,32 @@ void pcim_pin_device(struct pci_dev *pdev) */ void __attribute__ ((weak)) pcibios_disable_device (struct pci_dev *dev) {} +static void do_pci_disable_device(struct pci_dev *dev) +{ + u16 pci_command; + + pci_read_config_word(dev, PCI_COMMAND, &pci_command); + if (pci_command & PCI_COMMAND_MASTER) { + pci_command &= ~PCI_COMMAND_MASTER; + pci_write_config_word(dev, PCI_COMMAND, pci_command); + } + + pcibios_disable_device(dev); +} + +/** + * pci_disable_enabled_device - Disable device without updating enable_cnt + * @dev: PCI device to disable + * + * NOTE: This function is a backend of PCI power management routines and is + * not supposed to be called drivers. + */ +void pci_disable_enabled_device(struct pci_dev *dev) +{ + if (atomic_read(&dev->enable_cnt)) + do_pci_disable_device(dev); +} + /** * pci_disable_device - Disable PCI device after use * @dev: PCI device to be disabled @@ -996,7 +1010,6 @@ void pci_disable_device(struct pci_dev *dev) { struct pci_devres *dr; - u16 pci_command; dr = find_pci_dr(dev); if (dr) @@ -1005,14 +1018,9 @@ pci_disable_device(struct pci_dev *dev) if (atomic_sub_return(1, &dev->enable_cnt) != 0) return; - pci_read_config_word(dev, PCI_COMMAND, &pci_command); - if (pci_command & PCI_COMMAND_MASTER) { - pci_command &= ~PCI_COMMAND_MASTER; - pci_write_config_word(dev, PCI_COMMAND, pci_command); - } - dev->is_busmaster = 0; + do_pci_disable_device(dev); - pcibios_disable_device(dev); + dev->is_busmaster = 0; } /** @@ -1107,7 +1115,7 @@ int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable) int error = 0; bool pme_done = false; - if (!device_may_wakeup(&dev->dev)) + if (enable && !device_may_wakeup(&dev->dev)) return -EINVAL; /* @@ -1252,14 +1260,15 @@ void pci_pm_init(struct pci_dev *dev) /* find PCI PM capability in list */ pm = pci_find_capability(dev, PCI_CAP_ID_PM); if (!pm) - return; + goto Exit; + /* Check device's ability to generate PME# */ pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc); if ((pmc & PCI_PM_CAP_VER_MASK) > 3) { dev_err(&dev->dev, "unsupported PM cap regs version (%u)\n", pmc & PCI_PM_CAP_VER_MASK); - return; + goto Exit; } dev->pm_cap = pm; @@ -1298,6 +1307,74 @@ void pci_pm_init(struct pci_dev *dev) } else { dev->pme_support = 0; } + + Exit: + pci_update_current_state(dev, PCI_D0); +} + +/** + * platform_pci_wakeup_init - init platform wakeup if present + * @dev: PCI device + * + * Some devices don't have PCI PM caps but can still generate wakeup + * events through platform methods (like ACPI events). If @dev supports + * platform wakeup events, set the device flag to indicate as much. This + * may be redundant if the device also supports PCI PM caps, but double + * initialization should be safe in that case. + */ +void platform_pci_wakeup_init(struct pci_dev *dev) +{ + if (!platform_pci_can_wakeup(dev)) + return; + + device_set_wakeup_capable(&dev->dev, true); + device_set_wakeup_enable(&dev->dev, false); + platform_pci_sleep_wake(dev, false); +} + +/** + * pci_add_save_buffer - allocate buffer for saving given capability registers + * @dev: the PCI device + * @cap: the capability to allocate the buffer for + * @size: requested size of the buffer + */ +static int pci_add_cap_save_buffer( + struct pci_dev *dev, char cap, unsigned int size) +{ + int pos; + struct pci_cap_saved_state *save_state; + + pos = pci_find_capability(dev, cap); + if (pos <= 0) + return 0; + + save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL); + if (!save_state) + return -ENOMEM; + + save_state->cap_nr = cap; + pci_add_saved_cap(dev, save_state); + + return 0; +} + +/** + * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities + * @dev: the PCI device + */ +void pci_allocate_cap_save_buffers(struct pci_dev *dev) +{ + int error; + + error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP, 4 * sizeof(u16)); + if (error) + dev_err(&dev->dev, + "unable to preallocate PCI Express save buffer\n"); + + error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16)); + if (error) + dev_err(&dev->dev, + "unable to preallocate PCI-X save buffer\n"); } /** @@ -1337,6 +1414,20 @@ void pci_enable_ari(struct pci_dev *dev) bridge->ari_enabled = 1; } +/** + * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge + * @dev: the PCI device + * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTD, 4=INTD) + * + * Perform INTx swizzling for a device behind one level of bridge. This is + * required by section 9.1 of the PCI-to-PCI bridge specification for devices + * behind bridges on add-in cards. + */ +u8 pci_swizzle_interrupt_pin(struct pci_dev *dev, u8 pin) +{ + return (((pin - 1) + PCI_SLOT(dev->devfn)) % 4) + 1; +} + int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge) { @@ -1345,9 +1436,9 @@ pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge) pin = dev->pin; if (!pin) return -1; - pin--; + while (dev->bus->self) { - pin = (pin + PCI_SLOT(dev->devfn)) % 4; + pin = pci_swizzle_interrupt_pin(dev, pin); dev = dev->bus->self; } *bridge = dev; @@ -1355,6 +1446,26 @@ pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge) } /** + * pci_common_swizzle - swizzle INTx all the way to root bridge + * @dev: the PCI device + * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD) + * + * Perform INTx swizzling for a device. This traverses through all PCI-to-PCI + * bridges all the way up to a PCI root bus. + */ +u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp) +{ + u8 pin = *pinp; + + while (dev->bus->self) { + pin = pci_swizzle_interrupt_pin(dev, pin); + dev = dev->bus->self; + } + *pinp = pin; + return PCI_SLOT(dev->devfn); +} + +/** * pci_release_region - Release a PCI bar * @pdev: PCI device whose resources were previously reserved by pci_request_region * @bar: BAR to release @@ -1395,7 +1506,8 @@ void pci_release_region(struct pci_dev *pdev, int bar) * Returns 0 on success, or %EBUSY on error. A warning * message is also printed on failure. */ -int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name) +static int __pci_request_region(struct pci_dev *pdev, int bar, const char *res_name, + int exclusive) { struct pci_devres *dr; @@ -1408,8 +1520,9 @@ int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name) goto err_out; } else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) { - if (!request_mem_region(pci_resource_start(pdev, bar), - pci_resource_len(pdev, bar), res_name)) + if (!__request_mem_region(pci_resource_start(pdev, bar), + pci_resource_len(pdev, bar), res_name, + exclusive)) goto err_out; } @@ -1428,6 +1541,47 @@ err_out: } /** + * pci_request_region - Reserved PCI I/O and memory resource + * @pdev: PCI device whose resources are to be reserved + * @bar: BAR to be reserved + * @res_name: Name to be associated with resource. + * + * Mark the PCI region associated with PCI device @pdev BR @bar as + * being reserved by owner @res_name. Do not access any + * address inside the PCI regions unless this call returns + * successfully. + * + * Returns 0 on success, or %EBUSY on error. A warning + * message is also printed on failure. + */ +int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name) +{ + return __pci_request_region(pdev, bar, res_name, 0); +} + +/** + * pci_request_region_exclusive - Reserved PCI I/O and memory resource + * @pdev: PCI device whose resources are to be reserved + * @bar: BAR to be reserved + * @res_name: Name to be associated with resource. + * + * Mark the PCI region associated with PCI device @pdev BR @bar as + * being reserved by owner @res_name. Do not access any + * address inside the PCI regions unless this call returns + * successfully. + * + * Returns 0 on success, or %EBUSY on error. A warning + * message is also printed on failure. + * + * The key difference that _exclusive makes it that userspace is + * explicitly not allowed to map the resource via /dev/mem or + * sysfs. + */ +int pci_request_region_exclusive(struct pci_dev *pdev, int bar, const char *res_name) +{ + return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE); +} +/** * pci_release_selected_regions - Release selected PCI I/O and memory resources * @pdev: PCI device whose resources were previously reserved * @bars: Bitmask of BARs to be released @@ -1444,20 +1598,14 @@ void pci_release_selected_regions(struct pci_dev *pdev, int bars) pci_release_region(pdev, i); } -/** - * pci_request_selected_regions - Reserve selected PCI I/O and memory resources - * @pdev: PCI device whose resources are to be reserved - * @bars: Bitmask of BARs to be requested - * @res_name: Name to be associated with resource - */ -int pci_request_selected_regions(struct pci_dev *pdev, int bars, - const char *res_name) +int __pci_request_selected_regions(struct pci_dev *pdev, int bars, + const char *res_name, int excl) { int i; for (i = 0; i < 6; i++) if (bars & (1 << i)) - if(pci_request_region(pdev, i, res_name)) + if (__pci_request_region(pdev, i, res_name, excl)) goto err_out; return 0; @@ -1469,6 +1617,26 @@ err_out: return -EBUSY; } + +/** + * pci_request_selected_regions - Reserve selected PCI I/O and memory resources + * @pdev: PCI device whose resources are to be reserved + * @bars: Bitmask of BARs to be requested + * @res_name: Name to be associated with resource + */ +int pci_request_selected_regions(struct pci_dev *pdev, int bars, + const char *res_name) +{ + return __pci_request_selected_regions(pdev, bars, res_name, 0); +} + +int pci_request_selected_regions_exclusive(struct pci_dev *pdev, + int bars, const char *res_name) +{ + return __pci_request_selected_regions(pdev, bars, res_name, + IORESOURCE_EXCLUSIVE); +} + /** * pci_release_regions - Release reserved PCI I/O and memory resources * @pdev: PCI device whose resources were previously reserved by pci_request_regions @@ -1502,27 +1670,66 @@ int pci_request_regions(struct pci_dev *pdev, const char *res_name) } /** + * pci_request_regions_exclusive - Reserved PCI I/O and memory resources + * @pdev: PCI device whose resources are to be reserved + * @res_name: Name to be associated with resource. + * + * Mark all PCI regions associated with PCI device @pdev as + * being reserved by owner @res_name. Do not access any + * address inside the PCI regions unless this call returns + * successfully. + * + * pci_request_regions_exclusive() will mark the region so that + * /dev/mem and the sysfs MMIO access will not be allowed. + * + * Returns 0 on success, or %EBUSY on error. A warning + * message is also printed on failure. + */ +int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name) +{ + return pci_request_selected_regions_exclusive(pdev, + ((1 << 6) - 1), res_name); +} + +static void __pci_set_master(struct pci_dev *dev, bool enable) +{ + u16 old_cmd, cmd; + + pci_read_config_word(dev, PCI_COMMAND, &old_cmd); + if (enable) + cmd = old_cmd | PCI_COMMAND_MASTER; + else + cmd = old_cmd & ~PCI_COMMAND_MASTER; + if (cmd != old_cmd) { + dev_dbg(&dev->dev, "%s bus mastering\n", + enable ? "enabling" : "disabling"); + pci_write_config_word(dev, PCI_COMMAND, cmd); + } + dev->is_busmaster = enable; +} + +/** * pci_set_master - enables bus-mastering for device dev * @dev: the PCI device to enable * * Enables bus-mastering on the device and calls pcibios_set_master() * to do the needed arch specific settings. */ -void -pci_set_master(struct pci_dev *dev) +void pci_set_master(struct pci_dev *dev) { - u16 cmd; - - pci_read_config_word(dev, PCI_COMMAND, &cmd); - if (! (cmd & PCI_COMMAND_MASTER)) { - dev_dbg(&dev->dev, "enabling bus mastering\n"); - cmd |= PCI_COMMAND_MASTER; - pci_write_config_word(dev, PCI_COMMAND, cmd); - } - dev->is_busmaster = 1; + __pci_set_master(dev, true); pcibios_set_master(dev); } +/** + * pci_clear_master - disables bus-mastering for device dev + * @dev: the PCI device to disable + */ +void pci_clear_master(struct pci_dev *dev) +{ + __pci_set_master(dev, false); +} + #ifdef PCI_DISABLE_MWI int pci_set_mwi(struct pci_dev *dev) { @@ -1751,24 +1958,7 @@ int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask) EXPORT_SYMBOL(pci_set_dma_seg_boundary); #endif -/** - * pci_execute_reset_function() - Reset a PCI device function - * @dev: Device function to reset - * - * Some devices allow an individual function to be reset without affecting - * other functions in the same device. The PCI device must be responsive - * to PCI config space in order to use this function. - * - * The device function is presumed to be unused when this function is called. - * Resetting the device will make the contents of PCI configuration space - * random, so any caller of this must be prepared to reinitialise the - * device including MSI, bus mastering, BARs, decoding IO and memory spaces, - * etc. - * - * Returns 0 if the device function was successfully reset or -ENOTTY if the - * device doesn't support resetting a single function. - */ -int pci_execute_reset_function(struct pci_dev *dev) +static int __pcie_flr(struct pci_dev *dev, int probe) { u16 status; u32 cap; @@ -1780,6 +1970,9 @@ int pci_execute_reset_function(struct pci_dev *dev) if (!(cap & PCI_EXP_DEVCAP_FLR)) return -ENOTTY; + if (probe) + return 0; + pci_block_user_cfg_access(dev); /* Wait for Transaction Pending bit clean */ @@ -1802,6 +1995,80 @@ int pci_execute_reset_function(struct pci_dev *dev) pci_unblock_user_cfg_access(dev); return 0; } + +static int __pci_af_flr(struct pci_dev *dev, int probe) +{ + int cappos = pci_find_capability(dev, PCI_CAP_ID_AF); + u8 status; + u8 cap; + + if (!cappos) + return -ENOTTY; + pci_read_config_byte(dev, cappos + PCI_AF_CAP, &cap); + if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR)) + return -ENOTTY; + + if (probe) + return 0; + + pci_block_user_cfg_access(dev); + + /* Wait for Transaction Pending bit clean */ + msleep(100); + pci_read_config_byte(dev, cappos + PCI_AF_STATUS, &status); + if (status & PCI_AF_STATUS_TP) { + dev_info(&dev->dev, "Busy after 100ms while trying to" + " reset; sleeping for 1 second\n"); + ssleep(1); + pci_read_config_byte(dev, + cappos + PCI_AF_STATUS, &status); + if (status & PCI_AF_STATUS_TP) + dev_info(&dev->dev, "Still busy after 1s; " + "proceeding with reset anyway\n"); + } + pci_write_config_byte(dev, cappos + PCI_AF_CTRL, PCI_AF_CTRL_FLR); + mdelay(100); + + pci_unblock_user_cfg_access(dev); + return 0; +} + +static int __pci_reset_function(struct pci_dev *pdev, int probe) +{ + int res; + + res = __pcie_flr(pdev, probe); + if (res != -ENOTTY) + return res; + + res = __pci_af_flr(pdev, probe); + if (res != -ENOTTY) + return res; + + return res; +} + +/** + * pci_execute_reset_function() - Reset a PCI device function + * @dev: Device function to reset + * + * Some devices allow an individual function to be reset without affecting + * other functions in the same device. The PCI device must be responsive + * to PCI config space in order to use this function. + * + * The device function is presumed to be unused when this function is called. + * Resetting the device will make the contents of PCI configuration space + * random, so any caller of this must be prepared to reinitialise the + * device including MSI, bus mastering, BARs, decoding IO and memory spaces, + * etc. + * + * Returns 0 if the device function was successfully reset or -ENOTTY if the + * device doesn't support resetting a single function. + */ +int pci_execute_reset_function(struct pci_dev *dev) +{ + return __pci_reset_function(dev, 0); +} EXPORT_SYMBOL_GPL(pci_execute_reset_function); /** @@ -1822,15 +2089,10 @@ EXPORT_SYMBOL_GPL(pci_execute_reset_function); */ int pci_reset_function(struct pci_dev *dev) { - u32 cap; - int exppos = pci_find_capability(dev, PCI_CAP_ID_EXP); - int r; + int r = __pci_reset_function(dev, 1); - if (!exppos) - return -ENOTTY; - pci_read_config_dword(dev, exppos + PCI_EXP_DEVCAP, &cap); - if (!(cap & PCI_EXP_DEVCAP_FLR)) - return -ENOTTY; + if (r < 0) + return r; if (!dev->msi_enabled && !dev->msix_enabled && dev->irq != 0) disable_irq(dev->irq); @@ -2022,6 +2284,28 @@ int pci_select_bars(struct pci_dev *dev, unsigned long flags) return bars; } +/** + * pci_resource_bar - get position of the BAR associated with a resource + * @dev: the PCI device + * @resno: the resource number + * @type: the BAR type to be filled in + * + * Returns BAR position in config space, or 0 if the BAR is invalid. + */ +int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type) +{ + if (resno < PCI_ROM_RESOURCE) { + *type = pci_bar_unknown; + return PCI_BASE_ADDRESS_0 + 4 * resno; + } else if (resno == PCI_ROM_RESOURCE) { + *type = pci_bar_mem32; + return dev->rom_base_reg; + } + + dev_err(&dev->dev, "BAR: invalid resource #%d\n", resno); + return 0; +} + static void __devinit pci_no_domains(void) { #ifdef CONFIG_PCI_DOMAINS @@ -2029,6 +2313,19 @@ static void __devinit pci_no_domains(void) #endif } +/** + * pci_ext_cfg_enabled - can we access extended PCI config space? + * @dev: The PCI device of the root bridge. + * + * Returns 1 if we can access PCI extended config space (offsets + * greater than 0xff). This is the default implementation. Architecture + * implementations can override this. + */ +int __attribute__ ((weak)) pci_ext_cfg_avail(struct pci_dev *dev) +{ + return 1; +} + static int __devinit pci_init(void) { struct pci_dev *dev = NULL; @@ -2037,8 +2334,6 @@ static int __devinit pci_init(void) pci_fixup_device(pci_fixup_final, dev); } - msi_init(); - return 0; } @@ -2083,11 +2378,15 @@ EXPORT_SYMBOL(pci_find_capability); EXPORT_SYMBOL(pci_bus_find_capability); EXPORT_SYMBOL(pci_release_regions); EXPORT_SYMBOL(pci_request_regions); +EXPORT_SYMBOL(pci_request_regions_exclusive); EXPORT_SYMBOL(pci_release_region); EXPORT_SYMBOL(pci_request_region); +EXPORT_SYMBOL(pci_request_region_exclusive); EXPORT_SYMBOL(pci_release_selected_regions); EXPORT_SYMBOL(pci_request_selected_regions); +EXPORT_SYMBOL(pci_request_selected_regions_exclusive); EXPORT_SYMBOL(pci_set_master); +EXPORT_SYMBOL(pci_clear_master); EXPORT_SYMBOL(pci_set_mwi); EXPORT_SYMBOL(pci_try_set_mwi); EXPORT_SYMBOL(pci_clear_mwi); diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 9de87e9f98f5..1351bb4addde 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -10,6 +10,10 @@ extern int pci_uevent(struct device *dev, struct kobj_uevent_env *env); extern int pci_create_sysfs_dev_files(struct pci_dev *pdev); extern void pci_remove_sysfs_dev_files(struct pci_dev *pdev); extern void pci_cleanup_rom(struct pci_dev *dev); +#ifdef HAVE_PCI_MMAP +extern int pci_mmap_fits(struct pci_dev *pdev, int resno, + struct vm_area_struct *vma); +#endif /** * Firmware PM callbacks @@ -40,7 +44,11 @@ struct pci_platform_pm_ops { }; extern int pci_set_platform_pm(struct pci_platform_pm_ops *ops); +extern void pci_update_current_state(struct pci_dev *dev, pci_power_t state); +extern void pci_disable_enabled_device(struct pci_dev *dev); extern void pci_pm_init(struct pci_dev *dev); +extern void platform_pci_wakeup_init(struct pci_dev *dev); +extern void pci_allocate_cap_save_buffers(struct pci_dev *dev); extern int pci_user_read_config_byte(struct pci_dev *dev, int where, u8 *val); extern int pci_user_read_config_word(struct pci_dev *dev, int where, u16 *val); @@ -50,14 +58,14 @@ extern int pci_user_write_config_word(struct pci_dev *dev, int where, u16 val); extern int pci_user_write_config_dword(struct pci_dev *dev, int where, u32 val); struct pci_vpd_ops { - int (*read)(struct pci_dev *dev, int pos, int size, char *buf); - int (*write)(struct pci_dev *dev, int pos, int size, const char *buf); + ssize_t (*read)(struct pci_dev *dev, loff_t pos, size_t count, void *buf); + ssize_t (*write)(struct pci_dev *dev, loff_t pos, size_t count, const void *buf); void (*release)(struct pci_dev *dev); }; struct pci_vpd { unsigned int len; - struct pci_vpd_ops *ops; + const struct pci_vpd_ops *ops; struct bin_attribute *attr; /* descriptor for sysfs VPD entry */ }; @@ -98,11 +106,9 @@ extern unsigned int pci_pm_d3_delay; #ifdef CONFIG_PCI_MSI void pci_no_msi(void); extern void pci_msi_init_pci_dev(struct pci_dev *dev); -extern void __devinit msi_init(void); #else static inline void pci_no_msi(void) { } static inline void pci_msi_init_pci_dev(struct pci_dev *dev) { } -static inline void msi_init(void) { } #endif #ifdef CONFIG_PCIEAER @@ -159,16 +165,28 @@ struct pci_slot_attribute { }; #define to_pci_slot_attr(s) container_of(s, struct pci_slot_attribute, attr) +enum pci_bar_type { + pci_bar_unknown, /* Standard PCI BAR probe */ + pci_bar_io, /* An io port BAR */ + pci_bar_mem32, /* A 32-bit memory BAR */ + pci_bar_mem64, /* A 64-bit memory BAR */ +}; + +extern int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, + struct resource *res, unsigned int reg); +extern int pci_resource_bar(struct pci_dev *dev, int resno, + enum pci_bar_type *type); +extern int pci_bus_add_child(struct pci_bus *bus); extern void pci_enable_ari(struct pci_dev *dev); /** * pci_ari_enabled - query ARI forwarding status - * @dev: the PCI device + * @bus: the PCI bus * * Returns 1 if ARI forwarding is enabled, or 0 if not enabled; */ -static inline int pci_ari_enabled(struct pci_dev *dev) +static inline int pci_ari_enabled(struct pci_bus *bus) { - return dev->ari_enabled; + return bus->self && bus->self->ari_enabled; } #endif /* DRIVERS_PCI_H */ diff --git a/drivers/pci/pcie/aer/aerdrv_acpi.c b/drivers/pci/pcie/aer/aerdrv_acpi.c index 6dd7b13e9808..ebce26c37049 100644 --- a/drivers/pci/pcie/aer/aerdrv_acpi.c +++ b/drivers/pci/pcie/aer/aerdrv_acpi.c @@ -38,7 +38,6 @@ int aer_osc_setup(struct pcie_device *pciedev) handle = acpi_find_root_bridge_handle(pdev); if (handle) { - pcie_osc_support_set(OSC_EXT_PCI_CONFIG_SUPPORT); status = pci_osc_control_set(handle, OSC_PCI_EXPRESS_AER_CONTROL | OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL); diff --git a/drivers/pci/pcie/aer/aerdrv_errprint.c b/drivers/pci/pcie/aer/aerdrv_errprint.c index 3933d4f30e8c..0fc29ae80df8 100644 --- a/drivers/pci/pcie/aer/aerdrv_errprint.c +++ b/drivers/pci/pcie/aer/aerdrv_errprint.c @@ -233,7 +233,7 @@ void aer_print_error(struct pci_dev *dev, struct aer_err_info *info) if (info->flags & AER_TLP_HEADER_VALID_FLAG) { unsigned char *tlp = (unsigned char *) &info->tlp; - printk("%sTLB Header:\n", loglevel); + printk("%sTLP Header:\n", loglevel); printk("%s%02x%02x%02x%02x %02x%02x%02x%02x" " %02x%02x%02x%02x %02x%02x%02x%02x\n", loglevel, diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index 9aad608bcf3f..586b6f75910d 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c @@ -17,6 +17,7 @@ #include <linux/init.h> #include <linux/slab.h> #include <linux/jiffies.h> +#include <linux/delay.h> #include <linux/pci-aspm.h> #include "../pci.h" @@ -33,6 +34,11 @@ struct endpoint_state { struct pcie_link_state { struct list_head sibiling; struct pci_dev *pdev; + bool downstream_has_switch; + + struct pcie_link_state *parent; + struct list_head children; + struct list_head link; /* ASPM state */ unsigned int support_state; @@ -70,6 +76,8 @@ static const char *policy_str[] = { [POLICY_POWERSAVE] = "powersave" }; +#define LINK_RETRAIN_TIMEOUT HZ + static int policy_to_aspm_state(struct pci_dev *pdev) { struct pcie_link_state *link_state = pdev->link_state; @@ -125,7 +133,7 @@ static void pcie_set_clock_pm(struct pci_dev *pdev, int enable) link_state->clk_pm_enabled = !!enable; } -static void pcie_check_clock_pm(struct pci_dev *pdev) +static void pcie_check_clock_pm(struct pci_dev *pdev, int blacklist) { int pos; u32 reg32; @@ -149,10 +157,26 @@ static void pcie_check_clock_pm(struct pci_dev *pdev) if (!(reg16 & PCI_EXP_LNKCTL_CLKREQ_EN)) enabled = 0; } - link_state->clk_pm_capable = capable; link_state->clk_pm_enabled = enabled; link_state->bios_clk_state = enabled; - pcie_set_clock_pm(pdev, policy_to_clkpm_state(pdev)); + if (!blacklist) { + link_state->clk_pm_capable = capable; + pcie_set_clock_pm(pdev, policy_to_clkpm_state(pdev)); + } else { + link_state->clk_pm_capable = 0; + pcie_set_clock_pm(pdev, 0); + } +} + +static bool pcie_aspm_downstream_has_switch(struct pci_dev *pdev) +{ + struct pci_dev *child_dev; + + list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) { + if (child_dev->pcie_type == PCI_EXP_TYPE_UPSTREAM) + return true; + } + return false; } /* @@ -217,16 +241,18 @@ static void pcie_aspm_configure_common_clock(struct pci_dev *pdev) pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16); /* Wait for link training end */ - /* break out after waiting for 1 second */ + /* break out after waiting for timeout */ start_jiffies = jiffies; - while ((jiffies - start_jiffies) < HZ) { + for (;;) { pci_read_config_word(pdev, pos + PCI_EXP_LNKSTA, ®16); if (!(reg16 & PCI_EXP_LNKSTA_LT)) break; - cpu_relax(); + if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT)) + break; + msleep(1); } /* training failed -> recover */ - if ((jiffies - start_jiffies) >= HZ) { + if (reg16 & PCI_EXP_LNKSTA_LT) { dev_printk (KERN_ERR, &pdev->dev, "ASPM: Could not configure" " common clock\n"); i = 0; @@ -419,9 +445,9 @@ static unsigned int pcie_aspm_check_state(struct pci_dev *pdev, { struct pci_dev *child_dev; - /* If no child, disable the link */ + /* If no child, ignore the link */ if (list_empty(&pdev->subordinate->devices)) - return 0; + return state; list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) { if (child_dev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) { /* @@ -462,6 +488,9 @@ static void __pcie_aspm_config_link(struct pci_dev *pdev, unsigned int state) int valid = 1; struct pcie_link_state *link_state = pdev->link_state; + /* If no child, disable the link */ + if (list_empty(&pdev->subordinate->devices)) + state = 0; /* * if the downstream component has pci bridge function, don't do ASPM * now @@ -493,20 +522,52 @@ static void __pcie_aspm_config_link(struct pci_dev *pdev, unsigned int state) link_state->enabled_state = state; } +static struct pcie_link_state *get_root_port_link(struct pcie_link_state *link) +{ + struct pcie_link_state *root_port_link = link; + while (root_port_link->parent) + root_port_link = root_port_link->parent; + return root_port_link; +} + +/* check the whole hierarchy, and configure each link in the hierarchy */ static void __pcie_aspm_configure_link_state(struct pci_dev *pdev, unsigned int state) { struct pcie_link_state *link_state = pdev->link_state; + struct pcie_link_state *root_port_link = get_root_port_link(link_state); + struct pcie_link_state *leaf; - if (link_state->support_state == 0) - return; state &= PCIE_LINK_STATE_L0S|PCIE_LINK_STATE_L1; - /* state 0 means disabling aspm */ - state = pcie_aspm_check_state(pdev, state); + /* check all links who have specific root port link */ + list_for_each_entry(leaf, &link_list, sibiling) { + if (!list_empty(&leaf->children) || + get_root_port_link(leaf) != root_port_link) + continue; + state = pcie_aspm_check_state(leaf->pdev, state); + } + /* check root port link too in case it hasn't children */ + state = pcie_aspm_check_state(root_port_link->pdev, state); + if (link_state->enabled_state == state) return; - __pcie_aspm_config_link(pdev, state); + + /* + * we must change the hierarchy. See comments in + * __pcie_aspm_config_link for the order + **/ + if (state & PCIE_LINK_STATE_L1) { + list_for_each_entry(leaf, &link_list, sibiling) { + if (get_root_port_link(leaf) == root_port_link) + __pcie_aspm_config_link(leaf->pdev, state); + } + } else { + list_for_each_entry_reverse(leaf, &link_list, sibiling) { + if (get_root_port_link(leaf) == root_port_link) + __pcie_aspm_config_link(leaf->pdev, state); + } + } } /* @@ -570,6 +631,7 @@ void pcie_aspm_init_link_state(struct pci_dev *pdev) unsigned int state; struct pcie_link_state *link_state; int error = 0; + int blacklist; if (aspm_disabled || !pdev->is_pcie || pdev->link_state) return; @@ -580,29 +642,58 @@ void pcie_aspm_init_link_state(struct pci_dev *pdev) if (list_empty(&pdev->subordinate->devices)) goto out; - if (pcie_aspm_sanity_check(pdev)) - goto out; + blacklist = !!pcie_aspm_sanity_check(pdev); mutex_lock(&aspm_lock); link_state = kzalloc(sizeof(*link_state), GFP_KERNEL); if (!link_state) goto unlock_out; - pdev->link_state = link_state; - pcie_aspm_configure_common_clock(pdev); + link_state->downstream_has_switch = pcie_aspm_downstream_has_switch(pdev); + INIT_LIST_HEAD(&link_state->children); + INIT_LIST_HEAD(&link_state->link); + if (pdev->bus->self) {/* this is a switch */ + struct pcie_link_state *parent_link_state; - pcie_aspm_cap_init(pdev); + parent_link_state = pdev->bus->parent->self->link_state; + if (!parent_link_state) { + kfree(link_state); + goto unlock_out; + } + list_add(&link_state->link, &parent_link_state->children); + link_state->parent = parent_link_state; + } - /* config link state to avoid BIOS error */ - state = pcie_aspm_check_state(pdev, policy_to_aspm_state(pdev)); - __pcie_aspm_config_link(pdev, state); + pdev->link_state = link_state; - pcie_check_clock_pm(pdev); + if (!blacklist) { + pcie_aspm_configure_common_clock(pdev); + pcie_aspm_cap_init(pdev); + } else { + link_state->enabled_state = PCIE_LINK_STATE_L0S|PCIE_LINK_STATE_L1; + link_state->bios_aspm_state = 0; + /* Set support state to 0, so we will disable ASPM later */ + link_state->support_state = 0; + } link_state->pdev = pdev; list_add(&link_state->sibiling, &link_list); + if (link_state->downstream_has_switch) { + /* + * If link has switch, delay the link config. The leaf link + * initialization will config the whole hierarchy. but we must + * make sure BIOS doesn't set unsupported link state + **/ + state = pcie_aspm_check_state(pdev, link_state->bios_aspm_state); + __pcie_aspm_config_link(pdev, state); + } else + __pcie_aspm_configure_link_state(pdev, + policy_to_aspm_state(pdev)); + + pcie_check_clock_pm(pdev, blacklist); + unlock_out: if (error) free_link_state(pdev); @@ -635,6 +726,7 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev) /* All functions are removed, so just disable ASPM for the link */ __pcie_aspm_config_one_dev(parent, 0); list_del(&link_state->sibiling); + list_del(&link_state->link); /* Clock PM is for endpoint device */ free_link_state(parent); @@ -857,24 +949,15 @@ void pcie_no_aspm(void) aspm_disabled = 1; } -#ifdef CONFIG_ACPI -#include <acpi/acpi_bus.h> -#include <linux/pci-acpi.h> -static void pcie_aspm_platform_init(void) -{ - pcie_osc_support_set(OSC_ACTIVE_STATE_PWR_SUPPORT| - OSC_CLOCK_PWR_CAPABILITY_SUPPORT); -} -#else -static inline void pcie_aspm_platform_init(void) { } -#endif - -static int __init pcie_aspm_init(void) +/** + * pcie_aspm_enabled - is PCIe ASPM enabled? + * + * Returns true if ASPM has not been disabled by the command-line option + * pcie_aspm=off. + **/ +int pcie_aspm_enabled(void) { - if (aspm_disabled) - return 0; - pcie_aspm_platform_init(); - return 0; + return !aspm_disabled; } +EXPORT_SYMBOL(pcie_aspm_enabled); -fs_initcall(pcie_aspm_init); diff --git a/drivers/pci/pcie/portdrv_bus.c b/drivers/pci/pcie/portdrv_bus.c index 359fe5568df1..eec89b767f9f 100644 --- a/drivers/pci/pcie/portdrv_bus.c +++ b/drivers/pci/pcie/portdrv_bus.c @@ -16,14 +16,10 @@ #include "portdrv.h" static int pcie_port_bus_match(struct device *dev, struct device_driver *drv); -static int pcie_port_bus_suspend(struct device *dev, pm_message_t state); -static int pcie_port_bus_resume(struct device *dev); struct bus_type pcie_port_bus_type = { .name = "pci_express", .match = pcie_port_bus_match, - .suspend = pcie_port_bus_suspend, - .resume = pcie_port_bus_resume, }; EXPORT_SYMBOL_GPL(pcie_port_bus_type); @@ -49,32 +45,12 @@ static int pcie_port_bus_match(struct device *dev, struct device_driver *drv) return 1; } -static int pcie_port_bus_suspend(struct device *dev, pm_message_t state) +int pcie_port_bus_register(void) { - struct pcie_device *pciedev; - struct pcie_port_service_driver *driver; - - if (!dev || !dev->driver) - return 0; - - pciedev = to_pcie_device(dev); - driver = to_service_driver(dev->driver); - if (driver && driver->suspend) - driver->suspend(pciedev, state); - return 0; + return bus_register(&pcie_port_bus_type); } -static int pcie_port_bus_resume(struct device *dev) +void pcie_port_bus_unregister(void) { - struct pcie_device *pciedev; - struct pcie_port_service_driver *driver; - - if (!dev || !dev->driver) - return 0; - - pciedev = to_pcie_device(dev); - driver = to_service_driver(dev->driver); - if (driver && driver->resume) - driver->resume(pciedev); - return 0; + bus_unregister(&pcie_port_bus_type); } diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c index 2e091e014829..8b3f8c18032f 100644 --- a/drivers/pci/pcie/portdrv_core.c +++ b/drivers/pci/pcie/portdrv_core.c @@ -19,91 +19,15 @@ extern int pcie_mch_quirk; /* MSI-quirk Indicator */ -static int pcie_port_probe_service(struct device *dev) -{ - struct pcie_device *pciedev; - struct pcie_port_service_driver *driver; - int status; - - if (!dev || !dev->driver) - return -ENODEV; - - driver = to_service_driver(dev->driver); - if (!driver || !driver->probe) - return -ENODEV; - - pciedev = to_pcie_device(dev); - status = driver->probe(pciedev, driver->id_table); - if (!status) { - dev_printk(KERN_DEBUG, dev, "service driver %s loaded\n", - driver->name); - get_device(dev); - } - return status; -} - -static int pcie_port_remove_service(struct device *dev) -{ - struct pcie_device *pciedev; - struct pcie_port_service_driver *driver; - - if (!dev || !dev->driver) - return 0; - - pciedev = to_pcie_device(dev); - driver = to_service_driver(dev->driver); - if (driver && driver->remove) { - dev_printk(KERN_DEBUG, dev, "unloading service driver %s\n", - driver->name); - driver->remove(pciedev); - put_device(dev); - } - return 0; -} - -static void pcie_port_shutdown_service(struct device *dev) {} - -static int pcie_port_suspend_service(struct device *dev, pm_message_t state) -{ - struct pcie_device *pciedev; - struct pcie_port_service_driver *driver; - - if (!dev || !dev->driver) - return 0; - - pciedev = to_pcie_device(dev); - driver = to_service_driver(dev->driver); - if (driver && driver->suspend) - driver->suspend(pciedev, state); - return 0; -} - -static int pcie_port_resume_service(struct device *dev) -{ - struct pcie_device *pciedev; - struct pcie_port_service_driver *driver; - - if (!dev || !dev->driver) - return 0; - - pciedev = to_pcie_device(dev); - driver = to_service_driver(dev->driver); - - if (driver && driver->resume) - driver->resume(pciedev); - return 0; -} - -/* - * release_pcie_device - * - * Being invoked automatically when device is being removed - * in response to device_unregister(dev) call. - * Release all resources being claimed. +/** + * release_pcie_device - free PCI Express port service device structure + * @dev: Port service device to release + * + * Invoked automatically when device is being removed in response to + * device_unregister(dev). Release all resources being claimed. */ static void release_pcie_device(struct device *dev) { - dev_printk(KERN_DEBUG, dev, "free port service\n"); kfree(to_pcie_device(dev)); } @@ -128,7 +52,16 @@ static int is_msi_quirked(struct pci_dev *dev) } return quirk; } - + +/** + * assign_interrupt_mode - choose interrupt mode for PCI Express port services + * (INTx, MSI-X, MSI) and set up vectors + * @dev: PCI Express port to handle + * @vectors: Array of interrupt vectors to populate + * @mask: Bitmask of port capabilities returned by get_port_device_capability() + * + * Return value: Interrupt mode associated with the port + */ static int assign_interrupt_mode(struct pci_dev *dev, int *vectors, int mask) { int i, pos, nvec, status = -EINVAL; @@ -150,7 +83,6 @@ static int assign_interrupt_mode(struct pci_dev *dev, int *vectors, int mask) if (pos) { struct msix_entry msix_entries[PCIE_PORT_DEVICE_MAXSERVICES] = {{0, 0}, {0, 1}, {0, 2}, {0, 3}}; - dev_info(&dev->dev, "found MSI-X capability\n"); status = pci_enable_msix(dev, msix_entries, nvec); if (!status) { int j = 0; @@ -165,7 +97,6 @@ static int assign_interrupt_mode(struct pci_dev *dev, int *vectors, int mask) if (status) { pos = pci_find_capability(dev, PCI_CAP_ID_MSI); if (pos) { - dev_info(&dev->dev, "found MSI capability\n"); status = pci_enable_msi(dev); if (!status) { interrupt_mode = PCIE_PORT_MSI_MODE; @@ -177,6 +108,16 @@ static int assign_interrupt_mode(struct pci_dev *dev, int *vectors, int mask) return interrupt_mode; } +/** + * get_port_device_capability - discover capabilities of a PCI Express port + * @dev: PCI Express port to examine + * + * The capabilities are read from the port's PCI Express configuration registers + * as described in PCI Express Base Specification 1.0a sections 7.8.2, 7.8.9 and + * 7.9 - 7.11. + * + * Return value: Bitmask of discovered port capabilities + */ static int get_port_device_capability(struct pci_dev *dev) { int services = 0, pos; @@ -204,6 +145,15 @@ static int get_port_device_capability(struct pci_dev *dev) return services; } +/** + * pcie_device_init - initialize PCI Express port service device + * @dev: Port service device to initialize + * @parent: PCI Express port to associate the service device with + * @port_type: Type of the port + * @service_type: Type of service to associate with the service device + * @irq: Interrupt vector to associate with the service device + * @irq_mode: Interrupt mode of the service (INTx, MSI-X, MSI) + */ static void pcie_device_init(struct pci_dev *parent, struct pcie_device *dev, int port_type, int service_type, int irq, int irq_mode) { @@ -224,11 +174,19 @@ static void pcie_device_init(struct pci_dev *parent, struct pcie_device *dev, device->driver = NULL; device->driver_data = NULL; device->release = release_pcie_device; /* callback to free pcie dev */ - snprintf(device->bus_id, sizeof(device->bus_id), "%s:pcie%02x", + dev_set_name(device, "%s:pcie%02x", pci_name(parent), get_descriptor_id(port_type, service_type)); device->parent = &parent->dev; } +/** + * alloc_pcie_device - allocate PCI Express port service device structure + * @parent: PCI Express port to associate the service device with + * @port_type: Type of the port + * @service_type: Type of service to associate with the service device + * @irq: Interrupt vector to associate with the service device + * @irq_mode: Interrupt mode of the service (INTx, MSI-X, MSI) + */ static struct pcie_device* alloc_pcie_device(struct pci_dev *parent, int port_type, int service_type, int irq, int irq_mode) { @@ -239,10 +197,13 @@ static struct pcie_device* alloc_pcie_device(struct pci_dev *parent, return NULL; pcie_device_init(parent, device, port_type, service_type, irq,irq_mode); - dev_printk(KERN_DEBUG, &device->device, "allocate port service\n"); return device; } +/** + * pcie_port_device_probe - check if device is a PCI Express port + * @dev: Device to check + */ int pcie_port_device_probe(struct pci_dev *dev) { int pos, type; @@ -260,6 +221,13 @@ int pcie_port_device_probe(struct pci_dev *dev) return -ENODEV; } +/** + * pcie_port_device_register - register PCI Express port + * @dev: PCI Express port to register + * + * Allocate the port extension structure and register services associated with + * the port. + */ int pcie_port_device_register(struct pci_dev *dev) { struct pcie_port_device_ext *p_ext; @@ -323,6 +291,11 @@ static int suspend_iter(struct device *dev, void *data) return 0; } +/** + * pcie_port_device_suspend - suspend port services associated with a PCIe port + * @dev: PCI Express port to handle + * @state: Representation of system power management transition in progress + */ int pcie_port_device_suspend(struct pci_dev *dev, pm_message_t state) { return device_for_each_child(&dev->dev, &state, suspend_iter); @@ -341,6 +314,10 @@ static int resume_iter(struct device *dev, void *data) return 0; } +/** + * pcie_port_device_suspend - resume port services associated with a PCIe port + * @dev: PCI Express port to handle + */ int pcie_port_device_resume(struct pci_dev *dev) { return device_for_each_child(&dev->dev, NULL, resume_iter); @@ -363,6 +340,13 @@ static int remove_iter(struct device *dev, void *data) return 0; } +/** + * pcie_port_device_remove - unregister PCI Express port service devices + * @dev: PCI Express port the service devices to unregister are associated with + * + * Remove PCI Express port service devices associated with given port and + * disable MSI-X or MSI for the port. + */ void pcie_port_device_remove(struct pci_dev *dev) { struct device *device; @@ -386,16 +370,80 @@ void pcie_port_device_remove(struct pci_dev *dev) pci_disable_msi(dev); } -int pcie_port_bus_register(void) +/** + * pcie_port_probe_service - probe driver for given PCI Express port service + * @dev: PCI Express port service device to probe against + * + * If PCI Express port service driver is registered with + * pcie_port_service_register(), this function will be called by the driver core + * whenever match is found between the driver and a port service device. + */ +static int pcie_port_probe_service(struct device *dev) { - return bus_register(&pcie_port_bus_type); + struct pcie_device *pciedev; + struct pcie_port_service_driver *driver; + int status; + + if (!dev || !dev->driver) + return -ENODEV; + + driver = to_service_driver(dev->driver); + if (!driver || !driver->probe) + return -ENODEV; + + pciedev = to_pcie_device(dev); + status = driver->probe(pciedev, driver->id_table); + if (!status) { + dev_printk(KERN_DEBUG, dev, "service driver %s loaded\n", + driver->name); + get_device(dev); + } + return status; } -void pcie_port_bus_unregister(void) +/** + * pcie_port_remove_service - detach driver from given PCI Express port service + * @dev: PCI Express port service device to handle + * + * If PCI Express port service driver is registered with + * pcie_port_service_register(), this function will be called by the driver core + * when device_unregister() is called for the port service device associated + * with the driver. + */ +static int pcie_port_remove_service(struct device *dev) { - bus_unregister(&pcie_port_bus_type); + struct pcie_device *pciedev; + struct pcie_port_service_driver *driver; + + if (!dev || !dev->driver) + return 0; + + pciedev = to_pcie_device(dev); + driver = to_service_driver(dev->driver); + if (driver && driver->remove) { + dev_printk(KERN_DEBUG, dev, "unloading service driver %s\n", + driver->name); + driver->remove(pciedev); + put_device(dev); + } + return 0; } +/** + * pcie_port_shutdown_service - shut down given PCI Express port service + * @dev: PCI Express port service device to handle + * + * If PCI Express port service driver is registered with + * pcie_port_service_register(), this function will be called by the driver core + * when device_shutdown() is called for the port service device associated + * with the driver. + */ +static void pcie_port_shutdown_service(struct device *dev) {} + +/** + * pcie_port_service_register - register PCI Express port service driver + * @new: PCI Express port service driver to register + */ int pcie_port_service_register(struct pcie_port_service_driver *new) { new->driver.name = (char *)new->name; @@ -403,15 +451,17 @@ int pcie_port_service_register(struct pcie_port_service_driver *new) new->driver.probe = pcie_port_probe_service; new->driver.remove = pcie_port_remove_service; new->driver.shutdown = pcie_port_shutdown_service; - new->driver.suspend = pcie_port_suspend_service; - new->driver.resume = pcie_port_resume_service; return driver_register(&new->driver); } -void pcie_port_service_unregister(struct pcie_port_service_driver *new) +/** + * pcie_port_service_unregister - unregister PCI Express port service driver + * @drv: PCI Express port service driver to unregister + */ +void pcie_port_service_unregister(struct pcie_port_service_driver *drv) { - driver_unregister(&new->driver); + driver_unregister(&drv->driver); } EXPORT_SYMBOL(pcie_port_service_register); diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c index 584422da8d8b..99a914a027f8 100644 --- a/drivers/pci/pcie/portdrv_pci.c +++ b/drivers/pci/pcie/portdrv_pci.c @@ -41,7 +41,6 @@ static int pcie_portdrv_restore_config(struct pci_dev *dev) { int retval; - pci_restore_state(dev); retval = pci_enable_device(dev); if (retval) return retval; @@ -52,11 +51,18 @@ static int pcie_portdrv_restore_config(struct pci_dev *dev) #ifdef CONFIG_PM static int pcie_portdrv_suspend(struct pci_dev *dev, pm_message_t state) { - int ret = pcie_port_device_suspend(dev, state); + return pcie_port_device_suspend(dev, state); - if (!ret) - ret = pcie_portdrv_save_config(dev); - return ret; +} + +static int pcie_portdrv_suspend_late(struct pci_dev *dev, pm_message_t state) +{ + return pci_save_state(dev); +} + +static int pcie_portdrv_resume_early(struct pci_dev *dev) +{ + return pci_restore_state(dev); } static int pcie_portdrv_resume(struct pci_dev *dev) @@ -66,6 +72,8 @@ static int pcie_portdrv_resume(struct pci_dev *dev) } #else #define pcie_portdrv_suspend NULL +#define pcie_portdrv_suspend_late NULL +#define pcie_portdrv_resume_early NULL #define pcie_portdrv_resume NULL #endif @@ -221,6 +229,7 @@ static pci_ers_result_t pcie_portdrv_slot_reset(struct pci_dev *dev) /* If fatal, restore cfg space for possible link reset at upstream */ if (dev->error_state == pci_channel_io_frozen) { + pci_restore_state(dev); pcie_portdrv_restore_config(dev); pci_enable_pcie_error_reporting(dev); } @@ -283,6 +292,8 @@ static struct pci_driver pcie_portdriver = { .remove = pcie_portdrv_remove, .suspend = pcie_portdrv_suspend, + .suspend_late = pcie_portdrv_suspend_late, + .resume_early = pcie_portdrv_resume_early, .resume = pcie_portdrv_resume, .err_handler = &pcie_portdrv_err_handler, diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 003a9b3c293f..303644614eea 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -55,8 +55,8 @@ static ssize_t pci_bus_show_cpuaffinity(struct device *dev, cpumask = pcibus_to_cpumask(to_pci_bus(dev)); ret = type? - cpulist_scnprintf(buf, PAGE_SIZE-2, cpumask): - cpumask_scnprintf(buf, PAGE_SIZE-2, cpumask); + cpulist_scnprintf(buf, PAGE_SIZE-2, &cpumask) : + cpumask_scnprintf(buf, PAGE_SIZE-2, &cpumask); buf[ret++] = '\n'; buf[ret] = '\0'; return ret; @@ -135,13 +135,6 @@ static u64 pci_size(u64 base, u64 maxbase, u64 mask) return size; } -enum pci_bar_type { - pci_bar_unknown, /* Standard PCI BAR probe */ - pci_bar_io, /* An io port BAR */ - pci_bar_mem32, /* A 32-bit memory BAR */ - pci_bar_mem64, /* A 64-bit memory BAR */ -}; - static inline enum pci_bar_type decode_bar(struct resource *res, u32 bar) { if ((bar & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) { @@ -156,11 +149,16 @@ static inline enum pci_bar_type decode_bar(struct resource *res, u32 bar) return pci_bar_mem32; } -/* - * If the type is not unknown, we assume that the lowest bit is 'enable'. - * Returns 1 if the BAR was 64-bit and 0 if it was 32-bit. +/** + * pci_read_base - read a PCI BAR + * @dev: the PCI device + * @type: type of the BAR + * @res: resource buffer to be filled in + * @pos: BAR position in the config space + * + * Returns 1 if the BAR is 64-bit, or 0 if 32-bit. */ -static int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, +int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, struct resource *res, unsigned int pos) { u32 l, sz, mask; @@ -400,19 +398,17 @@ static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent, if (!child) return NULL; - child->self = bridge; child->parent = parent; child->ops = parent->ops; child->sysdata = parent->sysdata; child->bus_flags = parent->bus_flags; - child->bridge = get_device(&bridge->dev); /* initialize some portions of the bus device, but don't register it * now as the parent is not properly set up yet. This device will get * registered later in pci_bus_add_devices() */ child->dev.class = &pcibus_class; - sprintf(child->dev.bus_id, "%04x:%02x", pci_domain_nr(child), busnr); + dev_set_name(&child->dev, "%04x:%02x", pci_domain_nr(child), busnr); /* * Set up the primary, secondary and subordinate @@ -422,8 +418,14 @@ static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent, child->primary = parent->secondary; child->subordinate = 0xff; + if (!bridge) + return child; + + child->self = bridge; + child->bridge = get_device(&bridge->dev); + /* Set up default resource pointers and names.. */ - for (i = 0; i < 4; i++) { + for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) { child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i]; child->resource[i]->name = child->name; } @@ -958,8 +960,12 @@ static void pci_init_capabilities(struct pci_dev *dev) /* MSI/MSI-X list */ pci_msi_init_pci_dev(dev); + /* Buffers for saving PCIe and PCI-X capabilities */ + pci_allocate_cap_save_buffers(dev); + /* Power Management */ pci_pm_init(dev); + platform_pci_wakeup_init(dev); /* Vital Product Data */ pci_vpd_pci22_init(dev); @@ -1130,7 +1136,7 @@ struct pci_bus * pci_create_bus(struct device *parent, memset(dev, 0, sizeof(*dev)); dev->parent = parent; dev->release = pci_release_bus_bridge_dev; - sprintf(dev->bus_id, "pci%04x:%02x", pci_domain_nr(b), bus); + dev_set_name(dev, "pci%04x:%02x", pci_domain_nr(b), bus); error = device_register(dev); if (error) goto dev_reg_err; @@ -1141,7 +1147,7 @@ struct pci_bus * pci_create_bus(struct device *parent, b->dev.class = &pcibus_class; b->dev.parent = b->bridge; - sprintf(b->dev.bus_id, "%04x:%02x", pci_domain_nr(b), bus); + dev_set_name(&b->dev, "%04x:%02x", pci_domain_nr(b), bus); error = device_register(&b->dev); if (error) goto class_dev_reg_err; diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c index e1098c302c45..593bb844b8db 100644 --- a/drivers/pci/proc.c +++ b/drivers/pci/proc.c @@ -252,11 +252,20 @@ static int proc_bus_pci_mmap(struct file *file, struct vm_area_struct *vma) const struct proc_dir_entry *dp = PDE(inode); struct pci_dev *dev = dp->data; struct pci_filp_private *fpriv = file->private_data; - int ret; + int i, ret; if (!capable(CAP_SYS_RAWIO)) return -EPERM; + /* Make sure the caller is mapping a real resource for this device */ + for (i = 0; i < PCI_ROM_RESOURCE; i++) { + if (pci_mmap_fits(dev, i, vma)) + break; + } + + if (i >= PCI_ROM_RESOURCE) + return -ENODEV; + ret = pci_mmap_page_range(dev, vma, fpriv->mmap_state, fpriv->write_combine); @@ -352,15 +361,16 @@ static int show_device(struct seq_file *m, void *v) dev->vendor, dev->device, dev->irq); - /* Here should be 7 and not PCI_NUM_RESOURCES as we need to preserve compatibility */ - for (i=0; i<7; i++) { + + /* only print standard and ROM resources to preserve compatibility */ + for (i = 0; i <= PCI_ROM_RESOURCE; i++) { resource_size_t start, end; pci_resource_to_user(dev, i, &dev->resource[i], &start, &end); seq_printf(m, "\t%16llx", (unsigned long long)(start | (dev->resource[i].flags & PCI_REGION_FLAG_MASK))); } - for (i=0; i<7; i++) { + for (i = 0; i <= PCI_ROM_RESOURCE; i++) { resource_size_t start, end; pci_resource_to_user(dev, i, &dev->resource[i], &start, &end); seq_printf(m, "\t%16llx", diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 5f4f85f56cb7..baad093aafe3 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -56,7 +56,7 @@ static void quirk_passive_release(struct pci_dev *dev) while ((d = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371SB_0, d))) { pci_read_config_byte(d, 0x82, &dlc); if (!(dlc & 1<<1)) { - dev_err(&d->dev, "PIIX3: Enabling Passive Release\n"); + dev_info(&d->dev, "PIIX3: Enabling Passive Release\n"); dlc |= 1<<1; pci_write_config_byte(d, 0x82, dlc); } @@ -449,7 +449,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, quirk_ich4_lpc_acpi); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1, quirk_ich4_lpc_acpi); -static void __devinit quirk_ich6_lpc_acpi(struct pci_dev *dev) +static void __devinit ich6_lpc_acpi_gpio(struct pci_dev *dev) { u32 region; @@ -459,20 +459,95 @@ static void __devinit quirk_ich6_lpc_acpi(struct pci_dev *dev) pci_read_config_dword(dev, 0x48, ®ion); quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES+1, "ICH6 GPIO"); } -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0, quirk_ich6_lpc_acpi); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, quirk_ich6_lpc_acpi); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0, quirk_ich6_lpc_acpi); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1, quirk_ich6_lpc_acpi); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_31, quirk_ich6_lpc_acpi); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_0, quirk_ich6_lpc_acpi); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_2, quirk_ich6_lpc_acpi); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_3, quirk_ich6_lpc_acpi); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_1, quirk_ich6_lpc_acpi); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_4, quirk_ich6_lpc_acpi); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_2, quirk_ich6_lpc_acpi); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_4, quirk_ich6_lpc_acpi); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_7, quirk_ich6_lpc_acpi); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_8, quirk_ich6_lpc_acpi); + +static void __devinit ich6_lpc_generic_decode(struct pci_dev *dev, unsigned reg, const char *name, int dynsize) +{ + u32 val; + u32 size, base; + + pci_read_config_dword(dev, reg, &val); + + /* Enabled? */ + if (!(val & 1)) + return; + base = val & 0xfffc; + if (dynsize) { + /* + * This is not correct. It is 16, 32 or 64 bytes depending on + * register D31:F0:ADh bits 5:4. + * + * But this gets us at least _part_ of it. + */ + size = 16; + } else { + size = 128; + } + base &= ~(size-1); + + /* Just print it out for now. We should reserve it after more debugging */ + dev_info(&dev->dev, "%s PIO at %04x-%04x\n", name, base, base+size-1); +} + +static void __devinit quirk_ich6_lpc(struct pci_dev *dev) +{ + /* Shared ACPI/GPIO decode with all ICH6+ */ + ich6_lpc_acpi_gpio(dev); + + /* ICH6-specific generic IO decode */ + ich6_lpc_generic_decode(dev, 0x84, "LPC Generic IO decode 1", 0); + ich6_lpc_generic_decode(dev, 0x88, "LPC Generic IO decode 2", 1); +} +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0, quirk_ich6_lpc); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, quirk_ich6_lpc); + +static void __devinit ich7_lpc_generic_decode(struct pci_dev *dev, unsigned reg, const char *name) +{ + u32 val; + u32 mask, base; + + pci_read_config_dword(dev, reg, &val); + + /* Enabled? */ + if (!(val & 1)) + return; + + /* + * IO base in bits 15:2, mask in bits 23:18, both + * are dword-based + */ + base = val & 0xfffc; + mask = (val >> 16) & 0xfc; + mask |= 3; + + /* Just print it out for now. We should reserve it after more debugging */ + dev_info(&dev->dev, "%s PIO at %04x (mask %04x)\n", name, base, mask); +} + +/* ICH7-10 has the same common LPC generic IO decode registers */ +static void __devinit quirk_ich7_lpc(struct pci_dev *dev) +{ + /* We share the common ACPI/DPIO decode with ICH6 */ + ich6_lpc_acpi_gpio(dev); + + /* And have 4 ICH7+ generic decodes */ + ich7_lpc_generic_decode(dev, 0x84, "ICH7 LPC Generic IO decode 1"); + ich7_lpc_generic_decode(dev, 0x88, "ICH7 LPC Generic IO decode 2"); + ich7_lpc_generic_decode(dev, 0x8c, "ICH7 LPC Generic IO decode 3"); + ich7_lpc_generic_decode(dev, 0x90, "ICH7 LPC Generic IO decode 4"); +} +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0, quirk_ich7_lpc); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1, quirk_ich7_lpc); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_31, quirk_ich7_lpc); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_0, quirk_ich7_lpc); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_2, quirk_ich7_lpc); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_3, quirk_ich7_lpc); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_1, quirk_ich7_lpc); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_4, quirk_ich7_lpc); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_2, quirk_ich7_lpc); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_4, quirk_ich7_lpc); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_7, quirk_ich7_lpc); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_8, quirk_ich7_lpc); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_1, quirk_ich7_lpc); /* * VIA ACPI: One IO region pointed to by longword at @@ -606,27 +681,6 @@ static void __init quirk_ioapic_rmw(struct pci_dev *dev) sis_apic_bug = 1; } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, PCI_ANY_ID, quirk_ioapic_rmw); - -#define AMD8131_revA0 0x01 -#define AMD8131_revB0 0x11 -#define AMD8131_MISC 0x40 -#define AMD8131_NIOAMODE_BIT 0 -static void quirk_amd_8131_ioapic(struct pci_dev *dev) -{ - unsigned char tmp; - - if (nr_ioapics == 0) - return; - - if (dev->revision == AMD8131_revA0 || dev->revision == AMD8131_revB0) { - dev_info(&dev->dev, "Fixing up AMD8131 IOAPIC mode\n"); - pci_read_config_byte( dev, AMD8131_MISC, &tmp); - tmp &= ~(1 << AMD8131_NIOAMODE_BIT); - pci_write_config_byte( dev, AMD8131_MISC, tmp); - } -} -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_amd_8131_ioapic); -DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_amd_8131_ioapic); #endif /* CONFIG_X86_IO_APIC */ /* @@ -1423,6 +1477,155 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2609, quirk_intel_pcie_pm); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260a, quirk_intel_pcie_pm); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260b, quirk_intel_pcie_pm); +#ifdef CONFIG_X86_IO_APIC +/* + * Boot interrupts on some chipsets cannot be turned off. For these chipsets, + * remap the original interrupt in the linux kernel to the boot interrupt, so + * that a PCI device's interrupt handler is installed on the boot interrupt + * line instead. + */ +static void quirk_reroute_to_boot_interrupts_intel(struct pci_dev *dev) +{ + if (noioapicquirk || noioapicreroute) + return; + + dev->irq_reroute_variant = INTEL_IRQ_REROUTE_VARIANT; + + printk(KERN_INFO "PCI quirk: reroute interrupts for 0x%04x:0x%04x\n", + dev->vendor, dev->device); + return; +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_0, quirk_reroute_to_boot_interrupts_intel); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_1, quirk_reroute_to_boot_interrupts_intel); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0, quirk_reroute_to_boot_interrupts_intel); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0, quirk_reroute_to_boot_interrupts_intel); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1, quirk_reroute_to_boot_interrupts_intel); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHV, quirk_reroute_to_boot_interrupts_intel); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_0, quirk_reroute_to_boot_interrupts_intel); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_1, quirk_reroute_to_boot_interrupts_intel); +DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_0, quirk_reroute_to_boot_interrupts_intel); +DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_1, quirk_reroute_to_boot_interrupts_intel); +DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0, quirk_reroute_to_boot_interrupts_intel); +DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0, quirk_reroute_to_boot_interrupts_intel); +DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1, quirk_reroute_to_boot_interrupts_intel); +DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHV, quirk_reroute_to_boot_interrupts_intel); +DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_0, quirk_reroute_to_boot_interrupts_intel); +DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_1, quirk_reroute_to_boot_interrupts_intel); + +/* + * On some chipsets we can disable the generation of legacy INTx boot + * interrupts. + */ + +/* + * IO-APIC1 on 6300ESB generates boot interrupts, see intel order no + * 300641-004US, section 5.7.3. + */ +#define INTEL_6300_IOAPIC_ABAR 0x40 +#define INTEL_6300_DISABLE_BOOT_IRQ (1<<14) + +static void quirk_disable_intel_boot_interrupt(struct pci_dev *dev) +{ + u16 pci_config_word; + + if (noioapicquirk) + return; + + pci_read_config_word(dev, INTEL_6300_IOAPIC_ABAR, &pci_config_word); + pci_config_word |= INTEL_6300_DISABLE_BOOT_IRQ; + pci_write_config_word(dev, INTEL_6300_IOAPIC_ABAR, pci_config_word); + + printk(KERN_INFO "disabled boot interrupt on device 0x%04x:0x%04x\n", + dev->vendor, dev->device); +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt); +DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt); + +/* + * disable boot interrupts on HT-1000 + */ +#define BC_HT1000_FEATURE_REG 0x64 +#define BC_HT1000_PIC_REGS_ENABLE (1<<0) +#define BC_HT1000_MAP_IDX 0xC00 +#define BC_HT1000_MAP_DATA 0xC01 + +static void quirk_disable_broadcom_boot_interrupt(struct pci_dev *dev) +{ + u32 pci_config_dword; + u8 irq; + + if (noioapicquirk) + return; + + pci_read_config_dword(dev, BC_HT1000_FEATURE_REG, &pci_config_dword); + pci_write_config_dword(dev, BC_HT1000_FEATURE_REG, pci_config_dword | + BC_HT1000_PIC_REGS_ENABLE); + + for (irq = 0x10; irq < 0x10 + 32; irq++) { + outb(irq, BC_HT1000_MAP_IDX); + outb(0x00, BC_HT1000_MAP_DATA); + } + + pci_write_config_dword(dev, BC_HT1000_FEATURE_REG, pci_config_dword); + + printk(KERN_INFO "disabled boot interrupts on PCI device" + "0x%04x:0x%04x\n", dev->vendor, dev->device); +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt); +DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt); + +/* + * disable boot interrupts on AMD and ATI chipsets + */ +/* + * NOIOAMODE needs to be disabled to disable "boot interrupts". For AMD 8131 + * rev. A0 and B0, NOIOAMODE needs to be disabled anyway to fix IO-APIC mode + * (due to an erratum). + */ +#define AMD_813X_MISC 0x40 +#define AMD_813X_NOIOAMODE (1<<0) + +static void quirk_disable_amd_813x_boot_interrupt(struct pci_dev *dev) +{ + u32 pci_config_dword; + + if (noioapicquirk) + return; + + pci_read_config_dword(dev, AMD_813X_MISC, &pci_config_dword); + pci_config_dword &= ~AMD_813X_NOIOAMODE; + pci_write_config_dword(dev, AMD_813X_MISC, pci_config_dword); + + printk(KERN_INFO "disabled boot interrupts on PCI device " + "0x%04x:0x%04x\n", dev->vendor, dev->device); +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_amd_813x_boot_interrupt); +DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE, quirk_disable_amd_813x_boot_interrupt); + +#define AMD_8111_PCI_IRQ_ROUTING 0x56 + +static void quirk_disable_amd_8111_boot_interrupt(struct pci_dev *dev) +{ + u16 pci_config_word; + + if (noioapicquirk) + return; + + pci_read_config_word(dev, AMD_8111_PCI_IRQ_ROUTING, &pci_config_word); + if (!pci_config_word) { + printk(KERN_INFO "boot interrupts on PCI device 0x%04x:0x%04x " + "already disabled\n", + dev->vendor, dev->device); + return; + } + pci_write_config_word(dev, AMD_8111_PCI_IRQ_ROUTING, 0); + printk(KERN_INFO "disabled boot interrupts on PCI device " + "0x%04x:0x%04x\n", dev->vendor, dev->device); +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt); +DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt); +#endif /* CONFIG_X86_IO_APIC */ + /* * Toshiba TC86C001 IDE controller reports the standard 8-byte BAR0 size * but the PIO transfers won't work if BAR0 falls at the odd 8 bytes. @@ -1946,11 +2149,12 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4375, #endif /* CONFIG_PCI_MSI */ -static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f, struct pci_fixup *end) +static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f, + struct pci_fixup *end) { while (f < end) { if ((f->vendor == dev->vendor || f->vendor == (u16) PCI_ANY_ID) && - (f->device == dev->device || f->device == (u16) PCI_ANY_ID)) { + (f->device == dev->device || f->device == (u16) PCI_ANY_ID)) { dev_dbg(&dev->dev, "calling %pF\n", f->hook); f->hook(dev); } diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index ea979f2bc6db..704608945780 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c @@ -536,9 +536,8 @@ static void pci_bus_dump_res(struct pci_bus *bus) if (!res) continue; - printk(KERN_INFO "bus: %02x index %x %s: %pR\n", - bus->number, i, - (res->flags & IORESOURCE_IO) ? "io port" : "mmio", res); + dev_printk(KERN_DEBUG, &bus->dev, "resource %d %s %pR\n", i, + (res->flags & IORESOURCE_IO) ? "io: " : "mem:", res); } } diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c index 2dbd96cce2d8..32e8d88a4619 100644 --- a/drivers/pci/setup-res.c +++ b/drivers/pci/setup-res.c @@ -26,11 +26,13 @@ #include "pci.h" -void pci_update_resource(struct pci_dev *dev, struct resource *res, int resno) +void pci_update_resource(struct pci_dev *dev, int resno) { struct pci_bus_region region; u32 new, check, mask; int reg; + enum pci_bar_type type; + struct resource *res = dev->resource + resno; /* * Ignore resources for unimplemented BARs and unused resource slots @@ -61,17 +63,13 @@ void pci_update_resource(struct pci_dev *dev, struct resource *res, int resno) else mask = (u32)PCI_BASE_ADDRESS_MEM_MASK; - if (resno < 6) { - reg = PCI_BASE_ADDRESS_0 + 4 * resno; - } else if (resno == PCI_ROM_RESOURCE) { + reg = pci_resource_bar(dev, resno, &type); + if (!reg) + return; + if (type != pci_bar_unknown) { if (!(res->flags & IORESOURCE_ROM_ENABLE)) return; new |= PCI_ROM_ADDRESS_ENABLE; - reg = dev->rom_base_reg; - } else { - /* Hmm, non-standard resource. */ - - return; /* kill uninitialised var warning */ } pci_write_config_dword(dev, reg, new); @@ -134,7 +132,7 @@ int pci_assign_resource(struct pci_dev *dev, int resno) align = resource_alignment(res); if (!align) { - dev_err(&dev->dev, "BAR %d: can't allocate resource (bogus " + dev_info(&dev->dev, "BAR %d: can't allocate resource (bogus " "alignment) %pR flags %#lx\n", resno, res, res->flags); return -EINVAL; @@ -157,12 +155,12 @@ int pci_assign_resource(struct pci_dev *dev, int resno) } if (ret) { - dev_err(&dev->dev, "BAR %d: can't allocate %s resource %pR\n", + dev_info(&dev->dev, "BAR %d: can't allocate %s resource %pR\n", resno, res->flags & IORESOURCE_IO ? "I/O" : "mem", res); } else { res->flags &= ~IORESOURCE_STARTALIGN; if (resno < PCI_BRIDGE_RESOURCES) - pci_update_resource(dev, res, resno); + pci_update_resource(dev, resno); } return ret; @@ -197,7 +195,7 @@ int pci_assign_resource_fixed(struct pci_dev *dev, int resno) dev_err(&dev->dev, "BAR %d: can't allocate %s resource %pR\n", resno, res->flags & IORESOURCE_IO ? "I/O" : "mem", res); } else if (resno < PCI_BRIDGE_RESOURCES) { - pci_update_resource(dev, res, resno); + pci_update_resource(dev, resno); } return ret; |