diff options
author | Linus Torvalds <torvalds@g5.osdl.org> | 2006-06-22 15:07:59 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-06-22 15:07:59 -0700 |
commit | 6c763eb9ead86c612492b59287b36c0dcf7d09b1 (patch) | |
tree | 46497ff44bcf909517bffac70f0d6c7ad45ae1f0 | |
parent | dcc1a66a09420ccc5a22671bddc5a842f92d67e5 (diff) | |
parent | cf34a8e07f02c76f3f1232eecb681301a3d7b10b (diff) | |
download | talos-obmc-linux-6c763eb9ead86c612492b59287b36c0dcf7d09b1.tar.gz talos-obmc-linux-6c763eb9ead86c612492b59287b36c0dcf7d09b1.zip |
Merge master.kernel.org:/pub/scm/linux/kernel/git/gregkh/pci-2.6
* master.kernel.org:/pub/scm/linux/kernel/git/gregkh/pci-2.6: (27 commits)
[PATCH] PCI: nVidia quirk to make AER PCI-E extended capability visible
[PATCH] PCI: fix issues with extended conf space when MMCONFIG disabled because of e820
[PATCH] PCI: Bus Parity Status sysfs interface
[PATCH] PCI: fix memory leak in MMCONFIG error path
[PATCH] PCI: fix error with pci_get_device() call in the mpc85xx driver
[PATCH] PCI: MSI-K8T-Neo2-Fir: run only where needed
[PATCH] PCI: fix race with pci_walk_bus and pci_destroy_dev
[PATCH] PCI: clean up pci documentation to be more specific
[PATCH] PCI: remove unneeded msi code
[PATCH] PCI: don't move ioapics below PCI bridge
[PATCH] PCI: cleanup unused variable about msi driver
[PATCH] PCI: disable msi mode in pci_disable_device
[PATCH] PCI: Allow MSI to work on kexec kernel
[PATCH] PCI: AMD 8131 MSI quirk called too late, bus_flags not inherited ?
[PATCH] PCI: Move various PCI IDs to header file
[PATCH] PCI Bus Parity Status-broken hardware attribute, EDAC foundation
[PATCH] PCI: i386/x86_84: disable PCI resource decode on device disable
[PATCH] PCI ACPI: Rename the functions to avoid multiple instances.
[PATCH] PCI: don't enable device if already enabled
[PATCH] PCI: Add a "enable" sysfs attribute to the pci devices to allow userspace (Xorg) to enable devices without doing foul direct access
...
45 files changed, 1096 insertions, 436 deletions
diff --git a/Documentation/pci.txt b/Documentation/pci.txt index 66bbbf1d1ef6..3242e5c1ee9c 100644 --- a/Documentation/pci.txt +++ b/Documentation/pci.txt @@ -213,9 +213,17 @@ have been remapped by the kernel. See Documentation/IO-mapping.txt for how to access device memory. - You still need to call request_region() for I/O regions and -request_mem_region() for memory regions to make sure nobody else is using the -same device. + The device driver needs to call pci_request_region() to make sure +no other device is already using the same resource. The driver is expected +to determine MMIO and IO Port resource availability _before_ calling +pci_enable_device(). Conversely, drivers should call pci_release_region() +_after_ calling pci_disable_device(). The idea is to prevent two devices +colliding on the same address range. + +Generic flavors of pci_request_region() are request_mem_region() +(for MMIO ranges) and request_region() (for IO Port ranges). +Use these for address resources that are not described by "normal" PCI +interfaces (e.g. BAR). All interrupt handlers should be registered with SA_SHIRQ and use the devid to map IRQs to devices (remember that all PCI interrupts are shared). diff --git a/arch/i386/kernel/acpi/boot.c b/arch/i386/kernel/acpi/boot.c index 40e5aba3ad3d..fbe93084244c 100644 --- a/arch/i386/kernel/acpi/boot.c +++ b/arch/i386/kernel/acpi/boot.c @@ -202,6 +202,8 @@ int __init acpi_parse_mcfg(unsigned long phys_addr, unsigned long size) if (mcfg->config[i].base_reserved) { printk(KERN_ERR PREFIX "MMCONFIG not in low 4GB of memory\n"); + kfree(pci_mmcfg_config); + pci_mmcfg_config_num = 0; return -ENODEV; } } diff --git a/arch/i386/pci/common.c b/arch/i386/pci/common.c index dbece776c5b2..c624b61e1104 100644 --- a/arch/i386/pci/common.c +++ b/arch/i386/pci/common.c @@ -288,6 +288,7 @@ int pcibios_enable_device(struct pci_dev *dev, int mask) void pcibios_disable_device (struct pci_dev *dev) { + pcibios_disable_resources(dev); if (pcibios_disable_irq) pcibios_disable_irq(dev); } diff --git a/arch/i386/pci/i386.c b/arch/i386/pci/i386.c index ed2c8c899bd3..7852827a599b 100644 --- a/arch/i386/pci/i386.c +++ b/arch/i386/pci/i386.c @@ -242,6 +242,15 @@ int pcibios_enable_resources(struct pci_dev *dev, int mask) return 0; } +void pcibios_disable_resources(struct pci_dev *dev) +{ + u16 cmd; + + pci_read_config_word(dev, PCI_COMMAND, &cmd); + cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY); + pci_write_config_word(dev, PCI_COMMAND, cmd); +} + /* * If we set up a device for bus mastering, we need to check the latency * timer as certain crappy BIOSes forget to set it properly. diff --git a/arch/i386/pci/mmconfig.c b/arch/i386/pci/mmconfig.c index 6b1ea0c9a570..e545b0992c48 100644 --- a/arch/i386/pci/mmconfig.c +++ b/arch/i386/pci/mmconfig.c @@ -15,7 +15,9 @@ #include <asm/e820.h> #include "pci.h" -#define MMCONFIG_APER_SIZE (256*1024*1024) +/* aperture is up to 256MB but BIOS may reserve less */ +#define MMCONFIG_APER_MIN (2 * 1024*1024) +#define MMCONFIG_APER_MAX (256 * 1024*1024) /* Assume systems with more busses have correct MCFG */ #define MAX_CHECK_BUS 16 @@ -197,9 +199,10 @@ void __init pci_mmcfg_init(void) return; if (!e820_all_mapped(pci_mmcfg_config[0].base_address, - pci_mmcfg_config[0].base_address + MMCONFIG_APER_SIZE, + pci_mmcfg_config[0].base_address + MMCONFIG_APER_MIN, E820_RESERVED)) { - printk(KERN_ERR "PCI: BIOS Bug: MCFG area is not E820-reserved\n"); + printk(KERN_ERR "PCI: BIOS Bug: MCFG area at %x is not E820-reserved\n", + pci_mmcfg_config[0].base_address); printk(KERN_ERR "PCI: Not using MMCONFIG.\n"); return; } diff --git a/arch/i386/pci/pci.h b/arch/i386/pci/pci.h index 12035e29108b..12bf3d8dda29 100644 --- a/arch/i386/pci/pci.h +++ b/arch/i386/pci/pci.h @@ -35,6 +35,7 @@ extern unsigned int pcibios_max_latency; void pcibios_resource_survey(void); int pcibios_enable_resources(struct pci_dev *, int); +void pcibios_disable_resources(struct pci_dev *); /* pci-pc.c */ diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c index 6c4d59fd0364..ef9a2b49307a 100644 --- a/arch/ia64/kernel/irq_ia64.c +++ b/arch/ia64/kernel/irq_ia64.c @@ -46,6 +46,10 @@ #define IRQ_DEBUG 0 +/* These can be overridden in platform_irq_init */ +int ia64_first_device_vector = IA64_DEF_FIRST_DEVICE_VECTOR; +int ia64_last_device_vector = IA64_DEF_LAST_DEVICE_VECTOR; + /* default base addr of IPI table */ void __iomem *ipi_base_addr = ((void __iomem *) (__IA64_UNCACHED_OFFSET | IA64_IPI_DEFAULT_BASE_ADDR)); @@ -60,7 +64,7 @@ __u8 isa_irq_to_vector_map[16] = { }; EXPORT_SYMBOL(isa_irq_to_vector_map); -static unsigned long ia64_vector_mask[BITS_TO_LONGS(IA64_NUM_DEVICE_VECTORS)]; +static unsigned long ia64_vector_mask[BITS_TO_LONGS(IA64_MAX_DEVICE_VECTORS)]; int assign_irq_vector (int irq) @@ -89,6 +93,19 @@ free_irq_vector (int vector) printk(KERN_WARNING "%s: double free!\n", __FUNCTION__); } +int +reserve_irq_vector (int vector) +{ + int pos; + + if (vector < IA64_FIRST_DEVICE_VECTOR || + vector > IA64_LAST_DEVICE_VECTOR) + return -EINVAL; + + pos = vector - IA64_FIRST_DEVICE_VECTOR; + return test_and_set_bit(pos, ia64_vector_mask); +} + #ifdef CONFIG_SMP # define IS_RESCHEDULE(vec) (vec == IA64_IPI_RESCHEDULE) #else diff --git a/arch/ia64/sn/kernel/io_init.c b/arch/ia64/sn/kernel/io_init.c index 5101ac462643..dc09a6a28a37 100644 --- a/arch/ia64/sn/kernel/io_init.c +++ b/arch/ia64/sn/kernel/io_init.c @@ -58,7 +58,7 @@ static int max_pcibus_number = 255; /* Default highest pci bus number */ */ static dma_addr_t -sn_default_pci_map(struct pci_dev *pdev, unsigned long paddr, size_t size) +sn_default_pci_map(struct pci_dev *pdev, unsigned long paddr, size_t size, int type) { return 0; } @@ -457,13 +457,6 @@ void sn_pci_fixup_slot(struct pci_dev *dev) pcidev_info->pdi_sn_irq_info = NULL; kfree(sn_irq_info); } - - /* - * MSI currently not supported on altix. Remove this when - * the MSI abstraction patches are integrated into the kernel - * (sometime after 2.6.16 releases) - */ - dev->no_msi = 1; } /* diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c index c265e02f5036..dc8e2b696713 100644 --- a/arch/ia64/sn/kernel/irq.c +++ b/arch/ia64/sn/kernel/irq.c @@ -26,11 +26,11 @@ static void unregister_intr_pda(struct sn_irq_info *sn_irq_info); int sn_force_interrupt_flag = 1; extern int sn_ioif_inited; -static struct list_head **sn_irq_lh; +struct list_head **sn_irq_lh; static spinlock_t sn_irq_info_lock = SPIN_LOCK_UNLOCKED; /* non-IRQ lock */ -static inline u64 sn_intr_alloc(nasid_t local_nasid, int local_widget, - u64 sn_irq_info, +u64 sn_intr_alloc(nasid_t local_nasid, int local_widget, + struct sn_irq_info *sn_irq_info, int req_irq, nasid_t req_nasid, int req_slice) { @@ -40,12 +40,13 @@ static inline u64 sn_intr_alloc(nasid_t local_nasid, int local_widget, SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT, (u64) SAL_INTR_ALLOC, (u64) local_nasid, - (u64) local_widget, (u64) sn_irq_info, (u64) req_irq, + (u64) local_widget, __pa(sn_irq_info), (u64) req_irq, (u64) req_nasid, (u64) req_slice); + return ret_stuff.status; } -static inline void sn_intr_free(nasid_t local_nasid, int local_widget, +void sn_intr_free(nasid_t local_nasid, int local_widget, struct sn_irq_info *sn_irq_info) { struct ia64_sal_retval ret_stuff; @@ -112,73 +113,91 @@ static void sn_end_irq(unsigned int irq) static void sn_irq_info_free(struct rcu_head *head); -static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask) +struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *sn_irq_info, + nasid_t nasid, int slice) { - struct sn_irq_info *sn_irq_info, *sn_irq_info_safe; - int cpuid, cpuphys; + int vector; + int cpuphys; + int64_t bridge; + int local_widget, status; + nasid_t local_nasid; + struct sn_irq_info *new_irq_info; + struct sn_pcibus_provider *pci_provider; - cpuid = first_cpu(mask); - cpuphys = cpu_physical_id(cpuid); + new_irq_info = kmalloc(sizeof(struct sn_irq_info), GFP_ATOMIC); + if (new_irq_info == NULL) + return NULL; - list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe, - sn_irq_lh[irq], list) { - u64 bridge; - int local_widget, status; - nasid_t local_nasid; - struct sn_irq_info *new_irq_info; - struct sn_pcibus_provider *pci_provider; - - new_irq_info = kmalloc(sizeof(struct sn_irq_info), GFP_ATOMIC); - if (new_irq_info == NULL) - break; - memcpy(new_irq_info, sn_irq_info, sizeof(struct sn_irq_info)); - - bridge = (u64) new_irq_info->irq_bridge; - if (!bridge) { - kfree(new_irq_info); - break; /* irq is not a device interrupt */ - } + memcpy(new_irq_info, sn_irq_info, sizeof(struct sn_irq_info)); + + bridge = (u64) new_irq_info->irq_bridge; + if (!bridge) { + kfree(new_irq_info); + return NULL; /* irq is not a device interrupt */ + } - local_nasid = NASID_GET(bridge); + local_nasid = NASID_GET(bridge); - if (local_nasid & 1) - local_widget = TIO_SWIN_WIDGETNUM(bridge); - else - local_widget = SWIN_WIDGETNUM(bridge); + if (local_nasid & 1) + local_widget = TIO_SWIN_WIDGETNUM(bridge); + else + local_widget = SWIN_WIDGETNUM(bridge); - /* Free the old PROM new_irq_info structure */ - sn_intr_free(local_nasid, local_widget, new_irq_info); - /* Update kernels new_irq_info with new target info */ - unregister_intr_pda(new_irq_info); + vector = sn_irq_info->irq_irq; + /* Free the old PROM new_irq_info structure */ + sn_intr_free(local_nasid, local_widget, new_irq_info); + /* Update kernels new_irq_info with new target info */ + unregister_intr_pda(new_irq_info); - /* allocate a new PROM new_irq_info struct */ - status = sn_intr_alloc(local_nasid, local_widget, - __pa(new_irq_info), irq, - cpuid_to_nasid(cpuid), - cpuid_to_slice(cpuid)); + /* allocate a new PROM new_irq_info struct */ + status = sn_intr_alloc(local_nasid, local_widget, + new_irq_info, vector, + nasid, slice); - /* SAL call failed */ - if (status) { - kfree(new_irq_info); - break; - } + /* SAL call failed */ + if (status) { + kfree(new_irq_info); + return NULL; + } - new_irq_info->irq_cpuid = cpuid; - register_intr_pda(new_irq_info); + cpuphys = nasid_slice_to_cpuid(nasid, slice); + new_irq_info->irq_cpuid = cpuphys; + register_intr_pda(new_irq_info); - pci_provider = sn_pci_provider[new_irq_info->irq_bridge_type]; - if (pci_provider && pci_provider->target_interrupt) - (pci_provider->target_interrupt)(new_irq_info); + pci_provider = sn_pci_provider[new_irq_info->irq_bridge_type]; - spin_lock(&sn_irq_info_lock); - list_replace_rcu(&sn_irq_info->list, &new_irq_info->list); - spin_unlock(&sn_irq_info_lock); - call_rcu(&sn_irq_info->rcu, sn_irq_info_free); + /* + * If this represents a line interrupt, target it. If it's + * an msi (irq_int_bit < 0), it's already targeted. + */ + if (new_irq_info->irq_int_bit >= 0 && + pci_provider && pci_provider->target_interrupt) + (pci_provider->target_interrupt)(new_irq_info); + + spin_lock(&sn_irq_info_lock); + list_replace_rcu(&sn_irq_info->list, &new_irq_info->list); + spin_unlock(&sn_irq_info_lock); + call_rcu(&sn_irq_info->rcu, sn_irq_info_free); #ifdef CONFIG_SMP - set_irq_affinity_info((irq & 0xff), cpuphys, 0); + set_irq_affinity_info((vector & 0xff), cpuphys, 0); #endif - } + + return new_irq_info; +} + +static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask) +{ + struct sn_irq_info *sn_irq_info, *sn_irq_info_safe; + nasid_t nasid; + int slice; + + nasid = cpuid_to_nasid(first_cpu(mask)); + slice = cpuid_to_slice(first_cpu(mask)); + + list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe, + sn_irq_lh[irq], list) + (void)sn_retarget_vector(sn_irq_info, nasid, slice); } struct hw_interrupt_type irq_type_sn = { @@ -202,6 +221,9 @@ void sn_irq_init(void) int i; irq_desc_t *base_desc = irq_desc; + ia64_first_device_vector = IA64_SN2_FIRST_DEVICE_VECTOR; + ia64_last_device_vector = IA64_SN2_LAST_DEVICE_VECTOR; + for (i = 0; i < NR_IRQS; i++) { if (base_desc[i].handler == &no_irq_type) { base_desc[i].handler = &irq_type_sn; @@ -285,6 +307,7 @@ void sn_irq_fixup(struct pci_dev *pci_dev, struct sn_irq_info *sn_irq_info) /* link it into the sn_irq[irq] list */ spin_lock(&sn_irq_info_lock); list_add_rcu(&sn_irq_info->list, sn_irq_lh[sn_irq_info->irq_irq]); + reserve_irq_vector(sn_irq_info->irq_irq); spin_unlock(&sn_irq_info_lock); register_intr_pda(sn_irq_info); @@ -310,8 +333,11 @@ void sn_irq_unfixup(struct pci_dev *pci_dev) spin_lock(&sn_irq_info_lock); list_del_rcu(&sn_irq_info->list); spin_unlock(&sn_irq_info_lock); + if (list_empty(sn_irq_lh[sn_irq_info->irq_irq])) + free_irq_vector(sn_irq_info->irq_irq); call_rcu(&sn_irq_info->rcu, sn_irq_info_free); pci_dev_put(pci_dev); + } static inline void diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c index b4b84c269210..7a291a271511 100644 --- a/arch/ia64/sn/pci/pci_dma.c +++ b/arch/ia64/sn/pci/pci_dma.c @@ -11,7 +11,7 @@ #include <linux/module.h> #include <asm/dma.h> -#include <asm/sn/pcibr_provider.h> +#include <asm/sn/intr.h> #include <asm/sn/pcibus_provider_defs.h> #include <asm/sn/pcidev.h> #include <asm/sn/sn_sal.h> @@ -113,7 +113,8 @@ void *sn_dma_alloc_coherent(struct device *dev, size_t size, * resources. */ - *dma_handle = provider->dma_map_consistent(pdev, phys_addr, size); + *dma_handle = provider->dma_map_consistent(pdev, phys_addr, size, + SN_DMA_ADDR_PHYS); if (!*dma_handle) { printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__); free_pages((unsigned long)cpuaddr, get_order(size)); @@ -176,7 +177,7 @@ dma_addr_t sn_dma_map_single(struct device *dev, void *cpu_addr, size_t size, BUG_ON(dev->bus != &pci_bus_type); phys_addr = __pa(cpu_addr); - dma_addr = provider->dma_map(pdev, phys_addr, size); + dma_addr = provider->dma_map(pdev, phys_addr, size, SN_DMA_ADDR_PHYS); if (!dma_addr) { printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__); return 0; @@ -260,7 +261,8 @@ int sn_dma_map_sg(struct device *dev, struct scatterlist *sg, int nhwentries, for (i = 0; i < nhwentries; i++, sg++) { phys_addr = SG_ENT_PHYS_ADDRESS(sg); sg->dma_address = provider->dma_map(pdev, - phys_addr, sg->length); + phys_addr, sg->length, + SN_DMA_ADDR_PHYS); if (!sg->dma_address) { printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__); diff --git a/arch/ia64/sn/pci/pcibr/pcibr_dma.c b/arch/ia64/sn/pci/pcibr/pcibr_dma.c index 9f86bb6519aa..a86c7b945962 100644 --- a/arch/ia64/sn/pci/pcibr/pcibr_dma.c +++ b/arch/ia64/sn/pci/pcibr/pcibr_dma.c @@ -41,7 +41,7 @@ extern int sn_ioif_inited; static dma_addr_t pcibr_dmamap_ate32(struct pcidev_info *info, - u64 paddr, size_t req_size, u64 flags) + u64 paddr, size_t req_size, u64 flags, int dma_flags) { struct pcidev_info *pcidev_info = info->pdi_host_pcidev_info; @@ -81,9 +81,12 @@ pcibr_dmamap_ate32(struct pcidev_info *info, if (IS_PCIX(pcibus_info)) ate_flags &= ~(PCI32_ATE_PREF); - xio_addr = - IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) : - PHYS_TO_TIODMA(paddr); + if (SN_DMA_ADDRTYPE(dma_flags == SN_DMA_ADDR_PHYS)) + xio_addr = IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) : + PHYS_TO_TIODMA(paddr); + else + xio_addr = paddr; + offset = IOPGOFF(xio_addr); ate = ate_flags | (xio_addr - offset); @@ -91,6 +94,13 @@ pcibr_dmamap_ate32(struct pcidev_info *info, if (IS_PIC_SOFT(pcibus_info)) { ate |= (pcibus_info->pbi_hub_xid << PIC_ATE_TARGETID_SHFT); } + + /* + * If we're mapping for MSI, set the MSI bit in the ATE + */ + if (dma_flags & SN_DMA_MSI) + ate |= PCI32_ATE_MSI; + ate_write(pcibus_info, ate_index, ate_count, ate); /* @@ -105,20 +115,27 @@ pcibr_dmamap_ate32(struct pcidev_info *info, if (pcibus_info->pbi_devreg[internal_device] & PCIBR_DEV_SWAP_DIR) ATE_SWAP_ON(pci_addr); + return pci_addr; } static dma_addr_t pcibr_dmatrans_direct64(struct pcidev_info * info, u64 paddr, - u64 dma_attributes) + u64 dma_attributes, int dma_flags) { struct pcibus_info *pcibus_info = (struct pcibus_info *) ((info->pdi_host_pcidev_info)->pdi_pcibus_info); u64 pci_addr; /* Translate to Crosstalk View of Physical Address */ - pci_addr = (IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) : - PHYS_TO_TIODMA(paddr)) | dma_attributes; + if (SN_DMA_ADDRTYPE(dma_flags) == SN_DMA_ADDR_PHYS) + pci_addr = IS_PIC_SOFT(pcibus_info) ? + PHYS_TO_DMA(paddr) : + PHYS_TO_TIODMA(paddr) | dma_attributes; + else + pci_addr = IS_PIC_SOFT(pcibus_info) ? + paddr : + paddr | dma_attributes; /* Handle Bus mode */ if (IS_PCIX(pcibus_info)) @@ -130,7 +147,9 @@ pcibr_dmatrans_direct64(struct pcidev_info * info, u64 paddr, ((u64) pcibus_info-> pbi_hub_xid << PIC_PCI64_ATTR_TARG_SHFT); } else - pci_addr |= TIOCP_PCI64_CMDTYPE_MEM; + pci_addr |= (dma_flags & SN_DMA_MSI) ? + TIOCP_PCI64_CMDTYPE_MSI : + TIOCP_PCI64_CMDTYPE_MEM; /* If PCI mode, func zero uses VCHAN0, every other func uses VCHAN1 */ if (!IS_PCIX(pcibus_info) && PCI_FUNC(info->pdi_linux_pcidev->devfn)) @@ -141,7 +160,7 @@ pcibr_dmatrans_direct64(struct pcidev_info * info, u64 paddr, static dma_addr_t pcibr_dmatrans_direct32(struct pcidev_info * info, - u64 paddr, size_t req_size, u64 flags) + u64 paddr, size_t req_size, u64 flags, int dma_flags) { struct pcidev_info *pcidev_info = info->pdi_host_pcidev_info; struct pcibus_info *pcibus_info = (struct pcibus_info *)pcidev_info-> @@ -156,8 +175,14 @@ pcibr_dmatrans_direct32(struct pcidev_info * info, return 0; } - xio_addr = IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) : - PHYS_TO_TIODMA(paddr); + if (dma_flags & SN_DMA_MSI) + return 0; + + if (SN_DMA_ADDRTYPE(dma_flags) == SN_DMA_ADDR_PHYS) + xio_addr = IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) : + PHYS_TO_TIODMA(paddr); + else + xio_addr = paddr; xio_base = pcibus_info->pbi_dir_xbase; offset = xio_addr - xio_base; @@ -327,7 +352,7 @@ void sn_dma_flush(u64 addr) */ dma_addr_t -pcibr_dma_map(struct pci_dev * hwdev, unsigned long phys_addr, size_t size) +pcibr_dma_map(struct pci_dev * hwdev, unsigned long phys_addr, size_t size, int dma_flags) { dma_addr_t dma_handle; struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev); @@ -344,11 +369,11 @@ pcibr_dma_map(struct pci_dev * hwdev, unsigned long phys_addr, size_t size) */ dma_handle = pcibr_dmatrans_direct64(pcidev_info, phys_addr, - PCI64_ATTR_PREF); + PCI64_ATTR_PREF, dma_flags); } else { /* Handle 32-63 bit cards via direct mapping */ dma_handle = pcibr_dmatrans_direct32(pcidev_info, phys_addr, - size, 0); + size, 0, dma_flags); if (!dma_handle) { /* * It is a 32 bit card and we cannot do direct mapping, @@ -356,7 +381,8 @@ pcibr_dma_map(struct pci_dev * hwdev, unsigned long phys_addr, size_t size) */ dma_handle = pcibr_dmamap_ate32(pcidev_info, phys_addr, - size, PCI32_ATE_PREF); + size, PCI32_ATE_PREF, + dma_flags); } } @@ -365,18 +391,18 @@ pcibr_dma_map(struct pci_dev * hwdev, unsigned long phys_addr, size_t size) dma_addr_t pcibr_dma_map_consistent(struct pci_dev * hwdev, unsigned long phys_addr, - size_t size) + size_t size, int dma_flags) { dma_addr_t dma_handle; struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev); if (hwdev->dev.coherent_dma_mask == ~0UL) { dma_handle = pcibr_dmatrans_direct64(pcidev_info, phys_addr, - PCI64_ATTR_BAR); + PCI64_ATTR_BAR, dma_flags); } else { dma_handle = (dma_addr_t) pcibr_dmamap_ate32(pcidev_info, phys_addr, size, - PCI32_ATE_BAR); + PCI32_ATE_BAR, dma_flags); } return dma_handle; diff --git a/arch/ia64/sn/pci/tioca_provider.c b/arch/ia64/sn/pci/tioca_provider.c index be0176912968..20de72791b97 100644 --- a/arch/ia64/sn/pci/tioca_provider.c +++ b/arch/ia64/sn/pci/tioca_provider.c @@ -515,11 +515,17 @@ tioca_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir) * use the GART mapped mode. */ static u64 -tioca_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count) +tioca_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma_flags) { u64 mapaddr; /* + * Not supported for now ... + */ + if (dma_flags & SN_DMA_MSI) + return 0; + + /* * If card is 64 or 48 bit addresable, use a direct mapping. 32 * bit direct is so restrictive w.r.t. where the memory resides that * we don't use it even though CA has some support. diff --git a/arch/ia64/sn/pci/tioce_provider.c b/arch/ia64/sn/pci/tioce_provider.c index 833295624e5d..4cac7bdc7c7f 100644 --- a/arch/ia64/sn/pci/tioce_provider.c +++ b/arch/ia64/sn/pci/tioce_provider.c @@ -170,7 +170,8 @@ tioce_mmr_war_post(struct tioce_kernel *kern, void *mmr_addr) (ATE_PAGE((start)+(len)-1, pagesize) - ATE_PAGE(start, pagesize) + 1) #define ATE_VALID(ate) ((ate) & (1UL << 63)) -#define ATE_MAKE(addr, ps) (((addr) & ~ATE_PAGEMASK(ps)) | (1UL << 63)) +#define ATE_MAKE(addr, ps, msi) \ + (((addr) & ~ATE_PAGEMASK(ps)) | (1UL << 63) | ((msi)?(1UL << 62):0)) /* * Flavors of ate-based mapping supported by tioce_alloc_map() @@ -196,15 +197,17 @@ tioce_mmr_war_post(struct tioce_kernel *kern, void *mmr_addr) * * 63 - must be 1 to indicate d64 mode to CE hardware * 62 - barrier bit ... controlled with tioce_dma_barrier() - * 61 - 0 since this is not an MSI transaction + * 61 - msi bit ... specified through dma_flags * 60:54 - reserved, MBZ */ static u64 -tioce_dma_d64(unsigned long ct_addr) +tioce_dma_d64(unsigned long ct_addr, int dma_flags) { u64 bus_addr; bus_addr = ct_addr | (1UL << 63); + if (dma_flags & SN_DMA_MSI) + bus_addr |= (1UL << 61); return bus_addr; } @@ -261,7 +264,7 @@ pcidev_to_tioce(struct pci_dev *pdev, struct tioce **base, */ static u64 tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port, - u64 ct_addr, int len) + u64 ct_addr, int len, int dma_flags) { int i; int j; @@ -270,6 +273,7 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port, int entries; int nates; u64 pagesize; + int msi_capable, msi_wanted; u64 *ate_shadow; u64 *ate_reg; u64 addr; @@ -291,6 +295,7 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port, ate_reg = ce_mmr->ce_ure_ate3240; pagesize = ce_kern->ce_ate3240_pagesize; bus_base = TIOCE_M32_MIN; + msi_capable = 1; break; case TIOCE_ATE_M40: first = 0; @@ -299,6 +304,7 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port, ate_reg = ce_mmr->ce_ure_ate40; pagesize = MB(64); bus_base = TIOCE_M40_MIN; + msi_capable = 0; break; case TIOCE_ATE_M40S: /* @@ -311,11 +317,16 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port, ate_reg = ce_mmr->ce_ure_ate3240; pagesize = GB(16); bus_base = TIOCE_M40S_MIN; + msi_capable = 0; break; default: return 0; } + msi_wanted = dma_flags & SN_DMA_MSI; + if (msi_wanted && !msi_capable) + return 0; + nates = ATE_NPAGES(ct_addr, len, pagesize); if (nates > entries) return 0; @@ -344,7 +355,7 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port, for (j = 0; j < nates; j++) { u64 ate; - ate = ATE_MAKE(addr, pagesize); + ate = ATE_MAKE(addr, pagesize, msi_wanted); ate_shadow[i + j] = ate; tioce_mmr_storei(ce_kern, &ate_reg[i + j], ate); addr += pagesize; @@ -371,7 +382,7 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port, * Map @paddr into 32-bit bus space of the CE associated with @pcidev_info. */ static u64 -tioce_dma_d32(struct pci_dev *pdev, u64 ct_addr) +tioce_dma_d32(struct pci_dev *pdev, u64 ct_addr, int dma_flags) { int dma_ok; int port; @@ -381,6 +392,9 @@ tioce_dma_d32(struct pci_dev *pdev, u64 ct_addr) u64 ct_lower; dma_addr_t bus_addr; + if (dma_flags & SN_DMA_MSI) + return 0; + ct_upper = ct_addr & ~0x3fffffffUL; ct_lower = ct_addr & 0x3fffffffUL; @@ -507,7 +521,7 @@ tioce_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir) */ static u64 tioce_do_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count, - int barrier) + int barrier, int dma_flags) { unsigned long flags; u64 ct_addr; @@ -523,15 +537,18 @@ tioce_do_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count, if (dma_mask < 0x7fffffffUL) return 0; - ct_addr = PHYS_TO_TIODMA(paddr); + if (SN_DMA_ADDRTYPE(dma_flags) == SN_DMA_ADDR_PHYS) + ct_addr = PHYS_TO_TIODMA(paddr); + else + ct_addr = paddr; /* * If the device can generate 64 bit addresses, create a D64 map. - * Since this should never fail, bypass the rest of the checks. */ if (dma_mask == ~0UL) { - mapaddr = tioce_dma_d64(ct_addr); - goto dma_map_done; + mapaddr = tioce_dma_d64(ct_addr, dma_flags); + if (mapaddr) + goto dma_map_done; } pcidev_to_tioce(pdev, NULL, &ce_kern, &port); @@ -574,18 +591,22 @@ tioce_do_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count, if (byte_count > MB(64)) { mapaddr = tioce_alloc_map(ce_kern, TIOCE_ATE_M40S, - port, ct_addr, byte_count); + port, ct_addr, byte_count, + dma_flags); if (!mapaddr) mapaddr = tioce_alloc_map(ce_kern, TIOCE_ATE_M40, -1, - ct_addr, byte_count); + ct_addr, byte_count, + dma_flags); } else { mapaddr = tioce_alloc_map(ce_kern, TIOCE_ATE_M40, -1, - ct_addr, byte_count); + ct_addr, byte_count, + dma_flags); if (!mapaddr) mapaddr = tioce_alloc_map(ce_kern, TIOCE_ATE_M40S, - port, ct_addr, byte_count); + port, ct_addr, byte_count, + dma_flags); } } @@ -593,7 +614,7 @@ tioce_do_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count, * 32-bit direct is the next mode to try */ if (!mapaddr && dma_mask >= 0xffffffffUL) - mapaddr = tioce_dma_d32(pdev, ct_addr); + mapaddr = tioce_dma_d32(pdev, ct_addr, dma_flags); /* * Last resort, try 32-bit ATE-based map. @@ -601,7 +622,7 @@ tioce_do_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count, if (!mapaddr) mapaddr = tioce_alloc_map(ce_kern, TIOCE_ATE_M32, -1, ct_addr, - byte_count); + byte_count, dma_flags); spin_unlock_irqrestore(&ce_kern->ce_lock, flags); @@ -622,9 +643,9 @@ dma_map_done: * in the address. */ static u64 -tioce_dma(struct pci_dev *pdev, u64 paddr, size_t byte_count) +tioce_dma(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma_flags) { - return tioce_do_dma_map(pdev, paddr, byte_count, 0); + return tioce_do_dma_map(pdev, paddr, byte_count, 0, dma_flags); } /** @@ -636,9 +657,9 @@ tioce_dma(struct pci_dev *pdev, u64 paddr, size_t byte_count) * Simply call tioce_do_dma_map() to create a map with the barrier bit set * in the address. */ static u64 -tioce_dma_consistent(struct pci_dev *pdev, u64 paddr, size_t byte_count) +tioce_dma_consistent(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma_flags) { - return tioce_do_dma_map(pdev, paddr, byte_count, 1); + return tioce_do_dma_map(pdev, paddr, byte_count, 1, dma_flags); } /** @@ -696,7 +717,7 @@ tioce_reserve_m32(struct tioce_kernel *ce_kern, u64 base, u64 limit) while (ate_index <= last_ate) { u64 ate; - ate = ATE_MAKE(0xdeadbeef, ps); + ate = ATE_MAKE(0xdeadbeef, ps, 0); ce_kern->ce_ate3240_shadow[ate_index] = ate; tioce_mmr_storei(ce_kern, &ce_mmr->ce_ure_ate3240[ate_index], ate); diff --git a/arch/ppc/platforms/85xx/mpc85xx_cds_common.c b/arch/ppc/platforms/85xx/mpc85xx_cds_common.c index c9e0aeeca3d8..4368dc3f3c30 100644 --- a/arch/ppc/platforms/85xx/mpc85xx_cds_common.c +++ b/arch/ppc/platforms/85xx/mpc85xx_cds_common.c @@ -379,13 +379,12 @@ mpc85xx_cds_pcibios_fixup(void) PCI_DEVICE_ID_VIA_82C586_2, NULL))) { dev->irq = 10; pci_write_config_byte(dev, PCI_INTERRUPT_LINE, 10); - pci_dev_put(dev); - } - if ((dev = pci_get_device(PCI_VENDOR_ID_VIA, + if ((dev = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_2, dev))) { - dev->irq = 11; - pci_write_config_byte(dev, PCI_INTERRUPT_LINE, 11); + dev->irq = 11; + pci_write_config_byte(dev, PCI_INTERRUPT_LINE, 11); + } pci_dev_put(dev); } } diff --git a/arch/x86_64/pci/mmconfig.c b/arch/x86_64/pci/mmconfig.c index a2060e4d5de6..3c55c76c6fd5 100644 --- a/arch/x86_64/pci/mmconfig.c +++ b/arch/x86_64/pci/mmconfig.c @@ -13,7 +13,10 @@ #include "pci.h" -#define MMCONFIG_APER_SIZE (256*1024*1024) +/* aperture is up to 256MB but BIOS may reserve less */ +#define MMCONFIG_APER_MIN (2 * 1024*1024) +#define MMCONFIG_APER_MAX (256 * 1024*1024) + /* Verify the first 16 busses. We assume that systems with more busses get MCFG right. */ #define MAX_CHECK_BUS 16 @@ -175,9 +178,10 @@ void __init pci_mmcfg_init(void) return; if (!e820_all_mapped(pci_mmcfg_config[0].base_address, - pci_mmcfg_config[0].base_address + MMCONFIG_APER_SIZE, + pci_mmcfg_config[0].base_address + MMCONFIG_APER_MIN, E820_RESERVED)) { - printk(KERN_ERR "PCI: BIOS Bug: MCFG area is not E820-reserved\n"); + printk(KERN_ERR "PCI: BIOS Bug: MCFG area at %x is not E820-reserved\n", + pci_mmcfg_config[0].base_address); printk(KERN_ERR "PCI: Not using MMCONFIG.\n"); return; } @@ -190,7 +194,8 @@ void __init pci_mmcfg_init(void) } for (i = 0; i < pci_mmcfg_config_num; ++i) { pci_mmcfg_virt[i].cfg = &pci_mmcfg_config[i]; - pci_mmcfg_virt[i].virt = ioremap_nocache(pci_mmcfg_config[i].base_address, MMCONFIG_APER_SIZE); + pci_mmcfg_virt[i].virt = ioremap_nocache(pci_mmcfg_config[i].base_address, + MMCONFIG_APER_MAX); if (!pci_mmcfg_virt[i].virt) { printk("PCI: Cannot map mmconfig aperture for segment %d\n", pci_mmcfg_config[i].pci_segment_group_number); diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile index 6707df968934..f2d152b818f0 100644 --- a/drivers/pci/Makefile +++ b/drivers/pci/Makefile @@ -26,7 +26,11 @@ obj-$(CONFIG_PPC32) += setup-irq.o obj-$(CONFIG_PPC64) += setup-bus.o obj-$(CONFIG_MIPS) += setup-bus.o setup-irq.o obj-$(CONFIG_X86_VISWS) += setup-irq.o -obj-$(CONFIG_PCI_MSI) += msi.o + +msiobj-y := msi.o msi-apic.o +msiobj-$(CONFIG_IA64_GENERIC) += msi-altix.o +msiobj-$(CONFIG_IA64_SGI_SN2) += msi-altix.o +obj-$(CONFIG_PCI_MSI) += $(msiobj-y) # # ACPI Related PCI FW Functions diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c index eed67d9e73bc..723092682023 100644 --- a/drivers/pci/bus.c +++ b/drivers/pci/bus.c @@ -81,9 +81,9 @@ void __devinit pci_bus_add_device(struct pci_dev *dev) { device_add(&dev->dev); - spin_lock(&pci_bus_lock); + down_write(&pci_bus_sem); list_add_tail(&dev->global_list, &pci_devices); - spin_unlock(&pci_bus_lock); + up_write(&pci_bus_sem); pci_proc_attach_device(dev); pci_create_sysfs_dev_files(dev); @@ -125,10 +125,10 @@ void __devinit pci_bus_add_devices(struct pci_bus *bus) */ if (dev->subordinate) { if (list_empty(&dev->subordinate->node)) { - spin_lock(&pci_bus_lock); + down_write(&pci_bus_sem); list_add_tail(&dev->subordinate->node, &dev->bus->children); - spin_unlock(&pci_bus_lock); + up_write(&pci_bus_sem); } pci_bus_add_devices(dev->subordinate); @@ -168,7 +168,7 @@ void pci_walk_bus(struct pci_bus *top, void (*cb)(struct pci_dev *, void *), struct list_head *next; bus = top; - spin_lock(&pci_bus_lock); + down_read(&pci_bus_sem); next = top->devices.next; for (;;) { if (next == &bus->devices) { @@ -180,22 +180,19 @@ void pci_walk_bus(struct pci_bus *top, void (*cb)(struct pci_dev *, void *), continue; } dev = list_entry(next, struct pci_dev, bus_list); - pci_dev_get(dev); if (dev->subordinate) { /* this is a pci-pci bridge, do its devices next */ next = dev->subordinate->devices.next; bus = dev->subordinate; } else next = dev->bus_list.next; - spin_unlock(&pci_bus_lock); - /* Run device routines with the bus unlocked */ + /* Run device routines with the device locked */ + down(&dev->dev.sem); cb(dev, userdata); - - spin_lock(&pci_bus_lock); - pci_dev_put(dev); + up(&dev->dev.sem); } - spin_unlock(&pci_bus_lock); + up_read(&pci_bus_sem); } EXPORT_SYMBOL_GPL(pci_walk_bus); diff --git a/drivers/pci/msi-altix.c b/drivers/pci/msi-altix.c new file mode 100644 index 000000000000..bed4183a5e39 --- /dev/null +++ b/drivers/pci/msi-altix.c @@ -0,0 +1,210 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2006 Silicon Graphics, Inc. All Rights Reserved. + */ + +#include <linux/types.h> +#include <linux/pci.h> +#include <linux/cpumask.h> + +#include <asm/sn/addrs.h> +#include <asm/sn/intr.h> +#include <asm/sn/pcibus_provider_defs.h> +#include <asm/sn/pcidev.h> +#include <asm/sn/nodepda.h> + +#include "msi.h" + +struct sn_msi_info { + u64 pci_addr; + struct sn_irq_info *sn_irq_info; +}; + +static struct sn_msi_info *sn_msi_info; + +static void +sn_msi_teardown(unsigned int vector) +{ + nasid_t nasid; + int widget; + struct pci_dev *pdev; + struct pcidev_info *sn_pdev; + struct sn_irq_info *sn_irq_info; + struct pcibus_bussoft *bussoft; + struct sn_pcibus_provider *provider; + + sn_irq_info = sn_msi_info[vector].sn_irq_info; + if (sn_irq_info == NULL || sn_irq_info->irq_int_bit >= 0) + return; + + sn_pdev = (struct pcidev_info *)sn_irq_info->irq_pciioinfo; + pdev = sn_pdev->pdi_linux_pcidev; + provider = SN_PCIDEV_BUSPROVIDER(pdev); + + (*provider->dma_unmap)(pdev, + sn_msi_info[vector].pci_addr, + PCI_DMA_FROMDEVICE); + sn_msi_info[vector].pci_addr = 0; + + bussoft = SN_PCIDEV_BUSSOFT(pdev); + nasid = NASID_GET(bussoft->bs_base); + widget = (nasid & 1) ? + TIO_SWIN_WIDGETNUM(bussoft->bs_base) : + SWIN_WIDGETNUM(bussoft->bs_base); + + sn_intr_free(nasid, widget, sn_irq_info); + sn_msi_info[vector].sn_irq_info = NULL; + + return; +} + +int +sn_msi_setup(struct pci_dev *pdev, unsigned int vector, + u32 *addr_hi, u32 *addr_lo, u32 *data) +{ + int widget; + int status; + nasid_t nasid; + u64 bus_addr; + struct sn_irq_info *sn_irq_info; + struct pcibus_bussoft *bussoft = SN_PCIDEV_BUSSOFT(pdev); + struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); + + if (bussoft == NULL) + return -EINVAL; + + if (provider == NULL || provider->dma_map_consistent == NULL) + return -EINVAL; + + /* + * Set up the vector plumbing. Let the prom (via sn_intr_alloc) + * decide which cpu to direct this msi at by default. + */ + + nasid = NASID_GET(bussoft->bs_base); + widget = (nasid & 1) ? + TIO_SWIN_WIDGETNUM(bussoft->bs_base) : + SWIN_WIDGETNUM(bussoft->bs_base); + + sn_irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL); + if (! sn_irq_info) + return -ENOMEM; + + status = sn_intr_alloc(nasid, widget, sn_irq_info, vector, -1, -1); + if (status) { + kfree(sn_irq_info); + return -ENOMEM; + } + + sn_irq_info->irq_int_bit = -1; /* mark this as an MSI irq */ + sn_irq_fixup(pdev, sn_irq_info); + + /* Prom probably should fill these in, but doesn't ... */ + sn_irq_info->irq_bridge_type = bussoft->bs_asic_type; + sn_irq_info->irq_bridge = (void *)bussoft->bs_base; + + /* + * Map the xio address into bus space + */ + bus_addr = (*provider->dma_map_consistent)(pdev, + sn_irq_info->irq_xtalkaddr, + sizeof(sn_irq_info->irq_xtalkaddr), + SN_DMA_MSI|SN_DMA_ADDR_XIO); + if (! bus_addr) { + sn_intr_free(nasid, widget, sn_irq_info); + kfree(sn_irq_info); + return -ENOMEM; + } + + sn_msi_info[vector].sn_irq_info = sn_irq_info; + sn_msi_info[vector].pci_addr = bus_addr; + + *addr_hi = (u32)(bus_addr >> 32); + *addr_lo = (u32)(bus_addr & 0x00000000ffffffff); + + /* + * In the SN platform, bit 16 is a "send vector" bit which + * must be present in order to move the vector through the system. + */ + *data = 0x100 + (unsigned int)vector; + +#ifdef CONFIG_SMP + set_irq_affinity_info((vector & 0xff), sn_irq_info->irq_cpuid, 0); +#endif + + return 0; +} + +static void +sn_msi_target(unsigned int vector, unsigned int cpu, + u32 *addr_hi, u32 *addr_lo) +{ + int slice; + nasid_t nasid; + u64 bus_addr; + struct pci_dev *pdev; + struct pcidev_info *sn_pdev; + struct sn_irq_info *sn_irq_info; + struct sn_irq_info *new_irq_info; + struct sn_pcibus_provider *provider; + + sn_irq_info = sn_msi_info[vector].sn_irq_info; + if (sn_irq_info == NULL || sn_irq_info->irq_int_bit >= 0) + return; + + /* + * Release XIO resources for the old MSI PCI address + */ + + sn_pdev = (struct pcidev_info *)sn_irq_info->irq_pciioinfo; + pdev = sn_pdev->pdi_linux_pcidev; + provider = SN_PCIDEV_BUSPROVIDER(pdev); + + bus_addr = (u64)(*addr_hi) << 32 | (u64)(*addr_lo); + (*provider->dma_unmap)(pdev, bus_addr, PCI_DMA_FROMDEVICE); + sn_msi_info[vector].pci_addr = 0; + + nasid = cpuid_to_nasid(cpu); + slice = cpuid_to_slice(cpu); + + new_irq_info = sn_retarget_vector(sn_irq_info, nasid, slice); + sn_msi_info[vector].sn_irq_info = new_irq_info; + if (new_irq_info == NULL) + return; + + /* + * Map the xio address into bus space + */ + + bus_addr = (*provider->dma_map_consistent)(pdev, + new_irq_info->irq_xtalkaddr, + sizeof(new_irq_info->irq_xtalkaddr), + SN_DMA_MSI|SN_DMA_ADDR_XIO); + + sn_msi_info[vector].pci_addr = bus_addr; + *addr_hi = (u32)(bus_addr >> 32); + *addr_lo = (u32)(bus_addr & 0x00000000ffffffff); +} + +struct msi_ops sn_msi_ops = { + .setup = sn_msi_setup, + .teardown = sn_msi_teardown, +#ifdef CONFIG_SMP + .target = sn_msi_target, +#endif +}; + +int +sn_msi_init(void) +{ + sn_msi_info = + kzalloc(sizeof(struct sn_msi_info) * NR_VECTORS, GFP_KERNEL); + if (! sn_msi_info) + return -ENOMEM; + + msi_register(&sn_msi_ops); + return 0; +} diff --git a/drivers/pci/msi-apic.c b/drivers/pci/msi-apic.c new file mode 100644 index 000000000000..0eb5fe9003a2 --- /dev/null +++ b/drivers/pci/msi-apic.c @@ -0,0 +1,100 @@ +/* + * MSI hooks for standard x86 apic + */ + +#include <linux/pci.h> +#include <linux/irq.h> + +#include "msi.h" + +/* + * Shifts for APIC-based data + */ + +#define MSI_DATA_VECTOR_SHIFT 0 +#define MSI_DATA_VECTOR(v) (((u8)v) << MSI_DATA_VECTOR_SHIFT) + +#define MSI_DATA_DELIVERY_SHIFT 8 +#define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_SHIFT) +#define MSI_DATA_DELIVERY_LOWPRI (1 << MSI_DATA_DELIVERY_SHIFT) + +#define MSI_DATA_LEVEL_SHIFT 14 +#define MSI_DATA_LEVEL_DEASSERT (0 << MSI_DATA_LEVEL_SHIFT) +#define MSI_DATA_LEVEL_ASSERT (1 << MSI_DATA_LEVEL_SHIFT) + +#define MSI_DATA_TRIGGER_SHIFT 15 +#define MSI_DATA_TRIGGER_EDGE (0 << MSI_DATA_TRIGGER_SHIFT) +#define MSI_DATA_TRIGGER_LEVEL (1 << MSI_DATA_TRIGGER_SHIFT) + +/* + * Shift/mask fields for APIC-based bus address + */ + +#define MSI_ADDR_HEADER 0xfee00000 + +#define MSI_ADDR_DESTID_MASK 0xfff0000f +#define MSI_ADDR_DESTID_CPU(cpu) ((cpu) << MSI_TARGET_CPU_SHIFT) + +#define MSI_ADDR_DESTMODE_SHIFT 2 +#define MSI_ADDR_DESTMODE_PHYS (0 << MSI_ADDR_DESTMODE_SHIFT) +#define MSI_ADDR_DESTMODE_LOGIC (1 << MSI_ADDR_DESTMODE_SHIFT) + +#define MSI_ADDR_REDIRECTION_SHIFT 3 +#define MSI_ADDR_REDIRECTION_CPU (0 << MSI_ADDR_REDIRECTION_SHIFT) +#define MSI_ADDR_REDIRECTION_LOWPRI (1 << MSI_ADDR_REDIRECTION_SHIFT) + + +static void +msi_target_apic(unsigned int vector, + unsigned int dest_cpu, + u32 *address_hi, /* in/out */ + u32 *address_lo) /* in/out */ +{ + u32 addr = *address_lo; + + addr &= MSI_ADDR_DESTID_MASK; + addr |= MSI_ADDR_DESTID_CPU(cpu_physical_id(dest_cpu)); + + *address_lo = addr; +} + +static int +msi_setup_apic(struct pci_dev *pdev, /* unused in generic */ + unsigned int vector, + u32 *address_hi, + u32 *address_lo, + u32 *data) +{ + unsigned long dest_phys_id; + + dest_phys_id = cpu_physical_id(first_cpu(cpu_online_map)); + + *address_hi = 0; + *address_lo = MSI_ADDR_HEADER | + MSI_ADDR_DESTMODE_PHYS | + MSI_ADDR_REDIRECTION_CPU | + MSI_ADDR_DESTID_CPU(dest_phys_id); + + *data = MSI_DATA_TRIGGER_EDGE | + MSI_DATA_LEVEL_ASSERT | + MSI_DATA_DELIVERY_FIXED | + MSI_DATA_VECTOR(vector); + + return 0; +} + +static void +msi_teardown_apic(unsigned int vector) +{ + return; /* no-op */ +} + +/* + * Generic ops used on most IA archs/platforms. Set with msi_register() + */ + +struct msi_ops msi_apic_ops = { + .setup = msi_setup_apic, + .teardown = msi_teardown_apic, + .target = msi_target_apic, +}; diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 9855c4c920b8..7f8429284fab 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -23,8 +23,6 @@ #include "pci.h" #include "msi.h" -#define MSI_TARGET_CPU first_cpu(cpu_online_map) - static DEFINE_SPINLOCK(msi_lock); static struct msi_desc* msi_desc[NR_IRQS] = { [0 ... NR_IRQS-1] = NULL }; static kmem_cache_t* msi_cachep; @@ -37,9 +35,17 @@ static int nr_msix_devices; #ifndef CONFIG_X86_IO_APIC int vector_irq[NR_VECTORS] = { [0 ... NR_VECTORS - 1] = -1}; -u8 irq_vector[NR_IRQ_VECTORS] = { FIRST_DEVICE_VECTOR , 0 }; #endif +static struct msi_ops *msi_ops; + +int +msi_register(struct msi_ops *ops) +{ + msi_ops = ops; + return 0; +} + static void msi_cache_ctor(void *p, kmem_cache_t *cache, unsigned long flags) { memset(p, 0, NR_IRQS * sizeof(struct msi_desc)); @@ -92,7 +98,7 @@ static void msi_set_mask_bit(unsigned int vector, int flag) static void set_msi_affinity(unsigned int vector, cpumask_t cpu_mask) { struct msi_desc *entry; - struct msg_address address; + u32 address_hi, address_lo; unsigned int irq = vector; unsigned int dest_cpu = first_cpu(cpu_mask); @@ -108,28 +114,36 @@ static void set_msi_affinity(unsigned int vector, cpumask_t cpu_mask) if (!pos) return; + pci_read_config_dword(entry->dev, msi_upper_address_reg(pos), + &address_hi); pci_read_config_dword(entry->dev, msi_lower_address_reg(pos), - &address.lo_address.value); - address.lo_address.value &= MSI_ADDRESS_DEST_ID_MASK; - address.lo_address.value |= (cpu_physical_id(dest_cpu) << - MSI_TARGET_CPU_SHIFT); - entry->msi_attrib.current_cpu = cpu_physical_id(dest_cpu); + &address_lo); + + msi_ops->target(vector, dest_cpu, &address_hi, &address_lo); + + pci_write_config_dword(entry->dev, msi_upper_address_reg(pos), + address_hi); pci_write_config_dword(entry->dev, msi_lower_address_reg(pos), - address.lo_address.value); + address_lo); set_native_irq_info(irq, cpu_mask); break; } case PCI_CAP_ID_MSIX: { - int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + - PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET; - - address.lo_address.value = readl(entry->mask_base + offset); - address.lo_address.value &= MSI_ADDRESS_DEST_ID_MASK; - address.lo_address.value |= (cpu_physical_id(dest_cpu) << - MSI_TARGET_CPU_SHIFT); - entry->msi_attrib.current_cpu = cpu_physical_id(dest_cpu); - writel(address.lo_address.value, entry->mask_base + offset); + int offset_hi = + entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET; + int offset_lo = + entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET; + + address_hi = readl(entry->mask_base + offset_hi); + address_lo = readl(entry->mask_base + offset_lo); + + msi_ops->target(vector, dest_cpu, &address_hi, &address_lo); + + writel(address_hi, entry->mask_base + offset_hi); + writel(address_lo, entry->mask_base + offset_lo); set_native_irq_info(irq, cpu_mask); break; } @@ -251,30 +265,6 @@ static struct hw_interrupt_type msi_irq_wo_maskbit_type = { .set_affinity = set_msi_affinity }; -static void msi_data_init(struct msg_data *msi_data, - unsigned int vector) -{ - memset(msi_data, 0, sizeof(struct msg_data)); - msi_data->vector = (u8)vector; - msi_data->delivery_mode = MSI_DELIVERY_MODE; - msi_data->level = MSI_LEVEL_MODE; - msi_data->trigger = MSI_TRIGGER_MODE; -} - -static void msi_address_init(struct msg_address *msi_address) -{ - unsigned int dest_id; - unsigned long dest_phys_id = cpu_physical_id(MSI_TARGET_CPU); - - memset(msi_address, 0, sizeof(struct msg_address)); - msi_address->hi_address = (u32)0; - dest_id = (MSI_ADDRESS_HEADER << MSI_ADDRESS_HEADER_SHIFT); - msi_address->lo_address.u.dest_mode = MSI_PHYSICAL_MODE; - msi_address->lo_address.u.redirection_hint = MSI_REDIRECTION_HINT_MODE; - msi_address->lo_address.u.dest_id = dest_id; - msi_address->lo_address.value |= (dest_phys_id << MSI_TARGET_CPU_SHIFT); -} - static int msi_free_vector(struct pci_dev* dev, int vector, int reassign); static int assign_msi_vector(void) { @@ -369,13 +359,29 @@ static int msi_init(void) return status; } + status = msi_arch_init(); + if (status < 0) { + pci_msi_enable = 0; + printk(KERN_WARNING + "PCI: MSI arch init failed. MSI disabled.\n"); + return status; + } + + if (! msi_ops) { + printk(KERN_WARNING + "PCI: MSI ops not registered. MSI disabled.\n"); + status = -EINVAL; + return status; + } + + last_alloc_vector = assign_irq_vector(AUTO_ASSIGN); status = msi_cache_init(); if (status < 0) { pci_msi_enable = 0; printk(KERN_WARNING "PCI: MSI cache init failed\n"); return status; } - last_alloc_vector = assign_irq_vector(AUTO_ASSIGN); + if (last_alloc_vector < 0) { pci_msi_enable = 0; printk(KERN_WARNING "PCI: No interrupt vectors available for MSI\n"); @@ -442,9 +448,11 @@ static void enable_msi_mode(struct pci_dev *dev, int pos, int type) /* Set enabled bits to single MSI & enable MSI_enable bit */ msi_enable(control, 1); pci_write_config_word(dev, msi_control_reg(pos), control); + dev->msi_enabled = 1; } else { msix_enable(control); pci_write_config_word(dev, msi_control_reg(pos), control); + dev->msix_enabled = 1; } if (pci_find_capability(dev, PCI_CAP_ID_EXP)) { /* PCI Express Endpoint device detected */ @@ -461,9 +469,11 @@ void disable_msi_mode(struct pci_dev *dev, int pos, int type) /* Set enabled bits to single MSI & enable MSI_enable bit */ msi_disable(control); pci_write_config_word(dev, msi_control_reg(pos), control); + dev->msi_enabled = 0; } else { msix_disable(control); pci_write_config_word(dev, msi_control_reg(pos), control); + dev->msix_enabled = 0; } if (pci_find_capability(dev, PCI_CAP_ID_EXP)) { /* PCI Express Endpoint device detected */ @@ -538,7 +548,6 @@ int pci_save_msi_state(struct pci_dev *dev) pci_read_config_dword(dev, pos + PCI_MSI_DATA_32, &cap[i++]); if (control & PCI_MSI_FLAGS_MASKBIT) pci_read_config_dword(dev, pos + PCI_MSI_MASK_BIT, &cap[i++]); - disable_msi_mode(dev, pos, PCI_CAP_ID_MSI); save_state->cap_nr = PCI_CAP_ID_MSI; pci_add_saved_cap(dev, save_state); return 0; @@ -575,6 +584,8 @@ void pci_restore_msi_state(struct pci_dev *dev) int pci_save_msix_state(struct pci_dev *dev) { int pos; + int temp; + int vector, head, tail = 0; u16 control; struct pci_cap_saved_state *save_state; @@ -582,6 +593,7 @@ int pci_save_msix_state(struct pci_dev *dev) if (pos <= 0 || dev->no_msi) return 0; + /* save the capability */ pci_read_config_word(dev, msi_control_reg(pos), &control); if (!(control & PCI_MSIX_FLAGS_ENABLE)) return 0; @@ -593,7 +605,38 @@ int pci_save_msix_state(struct pci_dev *dev) } *((u16 *)&save_state->data[0]) = control; - disable_msi_mode(dev, pos, PCI_CAP_ID_MSIX); + /* save the table */ + temp = dev->irq; + if (msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) { + kfree(save_state); + return -EINVAL; + } + + vector = head = dev->irq; + while (head != tail) { + int j; + void __iomem *base; + struct msi_desc *entry; + + entry = msi_desc[vector]; + base = entry->mask_base; + j = entry->msi_attrib.entry_nr; + + entry->address_lo_save = + readl(base + j * PCI_MSIX_ENTRY_SIZE + + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET); + entry->address_hi_save = + readl(base + j * PCI_MSIX_ENTRY_SIZE + + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET); + entry->data_save = + readl(base + j * PCI_MSIX_ENTRY_SIZE + + PCI_MSIX_ENTRY_DATA_OFFSET); + + tail = msi_desc[vector]->link.tail; + vector = tail; + } + dev->irq = temp; + save_state->cap_nr = PCI_CAP_ID_MSIX; pci_add_saved_cap(dev, save_state); return 0; @@ -606,8 +649,6 @@ void pci_restore_msix_state(struct pci_dev *dev) int vector, head, tail = 0; void __iomem *base; int j; - struct msg_address address; - struct msg_data data; struct msi_desc *entry; int temp; struct pci_cap_saved_state *save_state; @@ -633,20 +674,13 @@ void pci_restore_msix_state(struct pci_dev *dev) base = entry->mask_base; j = entry->msi_attrib.entry_nr; - msi_address_init(&address); - msi_data_init(&data, vector); - - address.lo_address.value &= MSI_ADDRESS_DEST_ID_MASK; - address.lo_address.value |= entry->msi_attrib.current_cpu << - MSI_TARGET_CPU_SHIFT; - - writel(address.lo_address.value, + writel(entry->address_lo_save, base + j * PCI_MSIX_ENTRY_SIZE + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET); - writel(address.hi_address, + writel(entry->address_hi_save, base + j * PCI_MSIX_ENTRY_SIZE + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET); - writel(*(u32*)&data, + writel(entry->data_save, base + j * PCI_MSIX_ENTRY_SIZE + PCI_MSIX_ENTRY_DATA_OFFSET); @@ -660,30 +694,32 @@ void pci_restore_msix_state(struct pci_dev *dev) } #endif -static void msi_register_init(struct pci_dev *dev, struct msi_desc *entry) +static int msi_register_init(struct pci_dev *dev, struct msi_desc *entry) { - struct msg_address address; - struct msg_data data; + int status; + u32 address_hi; + u32 address_lo; + u32 data; int pos, vector = dev->irq; u16 control; pos = pci_find_capability(dev, PCI_CAP_ID_MSI); pci_read_config_word(dev, msi_control_reg(pos), &control); + /* Configure MSI capability structure */ - msi_address_init(&address); - msi_data_init(&data, vector); - entry->msi_attrib.current_cpu = ((address.lo_address.u.dest_id >> - MSI_TARGET_CPU_SHIFT) & MSI_TARGET_CPU_MASK); - pci_write_config_dword(dev, msi_lower_address_reg(pos), - address.lo_address.value); + status = msi_ops->setup(dev, vector, &address_hi, &address_lo, &data); + if (status < 0) + return status; + + pci_write_config_dword(dev, msi_lower_address_reg(pos), address_lo); if (is_64bit_address(control)) { pci_write_config_dword(dev, - msi_upper_address_reg(pos), address.hi_address); + msi_upper_address_reg(pos), address_hi); pci_write_config_word(dev, - msi_data_reg(pos, 1), *((u32*)&data)); + msi_data_reg(pos, 1), data); } else pci_write_config_word(dev, - msi_data_reg(pos, 0), *((u32*)&data)); + msi_data_reg(pos, 0), data); if (entry->msi_attrib.maskbit) { unsigned int maskbits, temp; /* All MSIs are unmasked by default, Mask them all */ @@ -697,6 +733,8 @@ static void msi_register_init(struct pci_dev *dev, struct msi_desc *entry) msi_mask_bits_reg(pos, is_64bit_address(control)), maskbits); } + + return 0; } /** @@ -710,6 +748,7 @@ static void msi_register_init(struct pci_dev *dev, struct msi_desc *entry) **/ static int msi_capability_init(struct pci_dev *dev) { + int status; struct msi_desc *entry; int pos, vector; u16 control; @@ -742,7 +781,12 @@ static int msi_capability_init(struct pci_dev *dev) /* Replace with MSI handler */ irq_handler_init(PCI_CAP_ID_MSI, vector, entry->msi_attrib.maskbit); /* Configure MSI capability structure */ - msi_register_init(dev, entry); + status = msi_register_init(dev, entry); + if (status != 0) { + dev->irq = entry->msi_attrib.default_vector; + kmem_cache_free(msi_cachep, entry); + return status; + } attach_msi_entry(entry, vector); /* Set MSI enabled bits */ @@ -765,8 +809,10 @@ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries, int nvec) { struct msi_desc *head = NULL, *tail = NULL, *entry = NULL; - struct msg_address address; - struct msg_data data; + u32 address_hi; + u32 address_lo; + u32 data; + int status; int vector, pos, i, j, nr_entries, temp = 0; unsigned long phys_addr; u32 table_offset; @@ -822,18 +868,20 @@ static int msix_capability_init(struct pci_dev *dev, /* Replace with MSI-X handler */ irq_handler_init(PCI_CAP_ID_MSIX, vector, 1); /* Configure MSI-X capability structure */ - msi_address_init(&address); - msi_data_init(&data, vector); - entry->msi_attrib.current_cpu = - ((address.lo_address.u.dest_id >> - MSI_TARGET_CPU_SHIFT) & MSI_TARGET_CPU_MASK); - writel(address.lo_address.value, + status = msi_ops->setup(dev, vector, + &address_hi, + &address_lo, + &data); + if (status < 0) + break; + + writel(address_lo, base + j * PCI_MSIX_ENTRY_SIZE + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET); - writel(address.hi_address, + writel(address_hi, base + j * PCI_MSIX_ENTRY_SIZE + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET); - writel(*(u32*)&data, + writel(data, base + j * PCI_MSIX_ENTRY_SIZE + PCI_MSIX_ENTRY_DATA_OFFSET); attach_msi_entry(entry, vector); @@ -865,6 +913,7 @@ static int msix_capability_init(struct pci_dev *dev, **/ int pci_enable_msi(struct pci_dev* dev) { + struct pci_bus *bus; int pos, temp, status = -EINVAL; u16 control; @@ -874,8 +923,9 @@ int pci_enable_msi(struct pci_dev* dev) if (dev->no_msi) return status; - if (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MSI) - return -EINVAL; + for (bus = dev->bus; bus; bus = bus->parent) + if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI) + return -EINVAL; temp = dev->irq; @@ -887,23 +937,23 @@ int pci_enable_msi(struct pci_dev* dev) if (!pos) return -EINVAL; - pci_read_config_word(dev, msi_control_reg(pos), &control); - if (control & PCI_MSI_FLAGS_ENABLE) - return 0; /* Already in MSI mode */ - if (!msi_lookup_vector(dev, PCI_CAP_ID_MSI)) { /* Lookup Sucess */ unsigned long flags; + pci_read_config_word(dev, msi_control_reg(pos), &control); + if (control & PCI_MSI_FLAGS_ENABLE) + return 0; /* Already in MSI mode */ spin_lock_irqsave(&msi_lock, flags); if (!vector_irq[dev->irq]) { msi_desc[dev->irq]->msi_attrib.state = 0; vector_irq[dev->irq] = -1; nr_released_vectors--; spin_unlock_irqrestore(&msi_lock, flags); - msi_register_init(dev, msi_desc[dev->irq]); - enable_msi_mode(dev, pos, PCI_CAP_ID_MSI); - return 0; + status = msi_register_init(dev, msi_desc[dev->irq]); + if (status == 0) + enable_msi_mode(dev, pos, PCI_CAP_ID_MSI); + return status; } spin_unlock_irqrestore(&msi_lock, flags); dev->irq = temp; @@ -980,6 +1030,8 @@ static int msi_free_vector(struct pci_dev* dev, int vector, int reassign) void __iomem *base; unsigned long flags; + msi_ops->teardown(vector); + spin_lock_irqsave(&msi_lock, flags); entry = msi_desc[vector]; if (!entry || entry->dev != dev) { @@ -1008,33 +1060,8 @@ static int msi_free_vector(struct pci_dev* dev, int vector, int reassign) entry_nr * PCI_MSIX_ENTRY_SIZE + PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET); - if (head == vector) { - /* - * Detect last MSI-X vector to be released. - * Release the MSI-X memory-mapped table. - */ -#if 0 - int pos, nr_entries; - unsigned long phys_addr; - u32 table_offset; - u16 control; - u8 bir; - - pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); - pci_read_config_word(dev, msi_control_reg(pos), - &control); - nr_entries = multi_msix_capable(control); - pci_read_config_dword(dev, msix_table_offset_reg(pos), - &table_offset); - bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK); - table_offset &= ~PCI_MSIX_FLAGS_BIRMASK; - phys_addr = pci_resource_start(dev, bir) + table_offset; -/* - * FIXME! and what did you want to do with phys_addr? - */ -#endif + if (head == vector) iounmap(base); - } } return 0; @@ -1108,6 +1135,7 @@ static int reroute_msix_table(int head, struct msix_entry *entries, int *nvec) **/ int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec) { + struct pci_bus *bus; int status, pos, nr_entries, free_vectors; int i, j, temp; u16 control; @@ -1116,6 +1144,13 @@ int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec) if (!pci_msi_enable || !dev || !entries) return -EINVAL; + if (dev->no_msi) + return -EINVAL; + + for (bus = dev->bus; bus; bus = bus->parent) + if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI) + return -EINVAL; + status = msi_init(); if (status < 0) return status; @@ -1300,24 +1335,6 @@ void msi_remove_pci_irq_vectors(struct pci_dev* dev) } msi_free_vector(dev, vector, 0); if (warning) { - /* Force to release the MSI-X memory-mapped table */ -#if 0 - unsigned long phys_addr; - u32 table_offset; - u16 control; - u8 bir; - - pci_read_config_word(dev, msi_control_reg(pos), - &control); - pci_read_config_dword(dev, msix_table_offset_reg(pos), - &table_offset); - bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK); - table_offset &= ~PCI_MSIX_FLAGS_BIRMASK; - phys_addr = pci_resource_start(dev, bir) + table_offset; -/* - * FIXME! and what did you want to do with phys_addr? - */ -#endif iounmap(base); printk(KERN_WARNING "PCI: %s: msi_remove_pci_irq_vectors() " "called without free_irq() on all MSI-X vectors\n", diff --git a/drivers/pci/msi.h b/drivers/pci/msi.h index 4ac52d441e47..56951c39d3a3 100644 --- a/drivers/pci/msi.h +++ b/drivers/pci/msi.h @@ -6,6 +6,68 @@ #ifndef MSI_H #define MSI_H +/* + * MSI operation vector. Used by the msi core code (drivers/pci/msi.c) + * to abstract platform-specific tasks relating to MSI address generation + * and resource management. + */ +struct msi_ops { + /** + * setup - generate an MSI bus address and data for a given vector + * @pdev: PCI device context (in) + * @vector: vector allocated by the msi core (in) + * @addr_hi: upper 32 bits of PCI bus MSI address (out) + * @addr_lo: lower 32 bits of PCI bus MSI address (out) + * @data: MSI data payload (out) + * + * Description: The setup op is used to generate a PCI bus addres and + * data which the msi core will program into the card MSI capability + * registers. The setup routine is responsible for picking an initial + * cpu to target the MSI at. The setup routine is responsible for + * examining pdev to determine the MSI capabilities of the card and + * generating a suitable address/data. The setup routine is + * responsible for allocating and tracking any system resources it + * needs to route the MSI to the cpu it picks, and for associating + * those resources with the passed in vector. + * + * Returns 0 if the MSI address/data was successfully setup. + **/ + + int (*setup) (struct pci_dev *pdev, unsigned int vector, + u32 *addr_hi, u32 *addr_lo, u32 *data); + + /** + * teardown - release resources allocated by setup + * @vector: vector context for resources (in) + * + * Description: The teardown op is used to release any resources + * that were allocated in the setup routine associated with the passed + * in vector. + **/ + + void (*teardown) (unsigned int vector); + + /** + * target - retarget an MSI at a different cpu + * @vector: vector context for resources (in) + * @cpu: new cpu to direct vector at (in) + * @addr_hi: new value of PCI bus upper 32 bits (in/out) + * @addr_lo: new value of PCI bus lower 32 bits (in/out) + * + * Description: The target op is used to redirect an MSI vector + * at a different cpu. addr_hi/addr_lo coming in are the existing + * values that the MSI core has programmed into the card. The + * target code is responsible for freeing any resources (if any) + * associated with the old address, and generating a new PCI bus + * addr_hi/addr_lo that will redirect the vector at the indicated cpu. + **/ + + void (*target) (unsigned int vector, unsigned int cpu, + u32 *addr_hi, u32 *addr_lo); +}; + +extern int msi_register(struct msi_ops *ops); + #include <asm/msi.h> /* @@ -63,67 +125,6 @@ extern int pci_vector_resources(int last, int nr_released); #define msix_mask(address) (address | PCI_MSIX_FLAGS_BITMASK) #define msix_is_pending(address) (address & PCI_MSIX_FLAGS_PENDMASK) -/* - * MSI Defined Data Structures - */ -#define MSI_ADDRESS_HEADER 0xfee -#define MSI_ADDRESS_HEADER_SHIFT 12 -#define MSI_ADDRESS_HEADER_MASK 0xfff000 -#define MSI_ADDRESS_DEST_ID_MASK 0xfff0000f -#define MSI_TARGET_CPU_MASK 0xff -#define MSI_DELIVERY_MODE 0 -#define MSI_LEVEL_MODE 1 /* Edge always assert */ -#define MSI_TRIGGER_MODE 0 /* MSI is edge sensitive */ -#define MSI_PHYSICAL_MODE 0 -#define MSI_LOGICAL_MODE 1 -#define MSI_REDIRECTION_HINT_MODE 0 - -struct msg_data { -#if defined(__LITTLE_ENDIAN_BITFIELD) - __u32 vector : 8; - __u32 delivery_mode : 3; /* 000b: FIXED | 001b: lowest prior */ - __u32 reserved_1 : 3; - __u32 level : 1; /* 0: deassert | 1: assert */ - __u32 trigger : 1; /* 0: edge | 1: level */ - __u32 reserved_2 : 16; -#elif defined(__BIG_ENDIAN_BITFIELD) - __u32 reserved_2 : 16; - __u32 trigger : 1; /* 0: edge | 1: level */ - __u32 level : 1; /* 0: deassert | 1: assert */ - __u32 reserved_1 : 3; - __u32 delivery_mode : 3; /* 000b: FIXED | 001b: lowest prior */ - __u32 vector : 8; -#else -#error "Bitfield endianness not defined! Check your byteorder.h" -#endif -} __attribute__ ((packed)); - -struct msg_address { - union { - struct { -#if defined(__LITTLE_ENDIAN_BITFIELD) - __u32 reserved_1 : 2; - __u32 dest_mode : 1; /*0:physic | 1:logic */ - __u32 redirection_hint: 1; /*0: dedicated CPU - 1: lowest priority */ - __u32 reserved_2 : 4; - __u32 dest_id : 24; /* Destination ID */ -#elif defined(__BIG_ENDIAN_BITFIELD) - __u32 dest_id : 24; /* Destination ID */ - __u32 reserved_2 : 4; - __u32 redirection_hint: 1; /*0: dedicated CPU - 1: lowest priority */ - __u32 dest_mode : 1; /*0:physic | 1:logic */ - __u32 reserved_1 : 2; -#else -#error "Bitfield endianness not defined! Check your byteorder.h" -#endif - }u; - __u32 value; - }lo_address; - __u32 hi_address; -} __attribute__ ((packed)); - struct msi_desc { struct { __u8 type : 5; /* {0: unused, 5h:MSI, 11h:MSI-X} */ @@ -132,7 +133,7 @@ struct msi_desc { __u8 reserved: 1; /* reserved */ __u8 entry_nr; /* specific enabled entry */ __u8 default_vector; /* default pre-assigned vector */ - __u8 current_cpu; /* current destination cpu */ + __u8 unused; /* formerly unused destination cpu*/ }msi_attrib; struct { @@ -142,6 +143,14 @@ struct msi_desc { void __iomem *mask_base; struct pci_dev *dev; + +#ifdef CONFIG_PM + /* PM save area for MSIX address/data */ + + u32 address_hi_save; + u32 address_lo_save; + u32 data_save; +#endif }; #endif /* MSI_H */ diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index c2ecae5ff0c1..bb7456c1dbac 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c @@ -267,7 +267,7 @@ static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state) /* ACPI bus type */ -static int pci_acpi_find_device(struct device *dev, acpi_handle *handle) +static int acpi_pci_find_device(struct device *dev, acpi_handle *handle) { struct pci_dev * pci_dev; acpi_integer addr; @@ -281,7 +281,7 @@ static int pci_acpi_find_device(struct device *dev, acpi_handle *handle) return 0; } -static int pci_acpi_find_root_bridge(struct device *dev, acpi_handle *handle) +static int acpi_pci_find_root_bridge(struct device *dev, acpi_handle *handle) { int num; unsigned int seg, bus; @@ -299,21 +299,21 @@ static int pci_acpi_find_root_bridge(struct device *dev, acpi_handle *handle) return 0; } -static struct acpi_bus_type pci_acpi_bus = { +static struct acpi_bus_type acpi_pci_bus = { .bus = &pci_bus_type, - .find_device = pci_acpi_find_device, - .find_bridge = pci_acpi_find_root_bridge, + .find_device = acpi_pci_find_device, + .find_bridge = acpi_pci_find_root_bridge, }; -static int __init pci_acpi_init(void) +static int __init acpi_pci_init(void) { int ret; - ret = register_acpi_bus_type(&pci_acpi_bus); + ret = register_acpi_bus_type(&acpi_pci_bus); if (ret) return 0; platform_pci_choose_state = acpi_pci_choose_state; platform_pci_set_power_state = acpi_pci_set_power_state; return 0; } -arch_initcall(pci_acpi_init); +arch_initcall(acpi_pci_init); diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index 56ac2bc003c7..bc405c035ce3 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -43,6 +43,29 @@ pci_config_attr(subsystem_vendor, "0x%04x\n"); pci_config_attr(subsystem_device, "0x%04x\n"); pci_config_attr(class, "0x%06x\n"); pci_config_attr(irq, "%u\n"); +pci_config_attr(is_enabled, "%u\n"); + +static ssize_t broken_parity_status_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct pci_dev *pdev = to_pci_dev(dev); + return sprintf (buf, "%u\n", pdev->broken_parity_status); +} + +static ssize_t broken_parity_status_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct pci_dev *pdev = to_pci_dev(dev); + ssize_t consumed = -EINVAL; + + if ((count > 0) && (*buf == '0' || *buf == '1')) { + pdev->broken_parity_status = *buf == '1' ? 1 : 0; + consumed = count; + } + return consumed; +} static ssize_t local_cpus_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -90,6 +113,25 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, (u8)(pci_dev->class >> 16), (u8)(pci_dev->class >> 8), (u8)(pci_dev->class)); } +static ssize_t +is_enabled_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct pci_dev *pdev = to_pci_dev(dev); + + /* this can crash the machine when done on the "wrong" device */ + if (!capable(CAP_SYS_ADMIN)) + return count; + + if (*buf == '0') + pci_disable_device(pdev); + + if (*buf == '1') + pci_enable_device(pdev); + + return count; +} + struct device_attribute pci_dev_attrs[] = { __ATTR_RO(resource), @@ -101,6 +143,9 @@ struct device_attribute pci_dev_attrs[] = { __ATTR_RO(irq), __ATTR_RO(local_cpus), __ATTR_RO(modalias), + __ATTR(enable, 0600, is_enabled_show, is_enabled_store), + __ATTR(broken_parity_status,(S_IRUGO|S_IWUSR), + broken_parity_status_show,broken_parity_status_store), __ATTR_NULL, }; diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index fde41cc14734..d408a3c30426 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -517,7 +517,12 @@ pci_enable_device_bars(struct pci_dev *dev, int bars) int pci_enable_device(struct pci_dev *dev) { - int err = pci_enable_device_bars(dev, (1 << PCI_NUM_RESOURCES) - 1); + int err; + + if (dev->is_enabled) + return 0; + + err = pci_enable_device_bars(dev, (1 << PCI_NUM_RESOURCES) - 1); if (err) return err; pci_fixup_device(pci_fixup_enable, dev); @@ -546,7 +551,14 @@ void pci_disable_device(struct pci_dev *dev) { u16 pci_command; - + + if (dev->msi_enabled) + disable_msi_mode(dev, pci_find_capability(dev, PCI_CAP_ID_MSI), + PCI_CAP_ID_MSI); + if (dev->msix_enabled) + disable_msi_mode(dev, pci_find_capability(dev, PCI_CAP_ID_MSI), + PCI_CAP_ID_MSIX); + pci_read_config_word(dev, PCI_COMMAND, &pci_command); if (pci_command & PCI_COMMAND_MASTER) { pci_command &= ~PCI_COMMAND_MASTER; diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 30630cbe2fe3..29bdeca031a8 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -40,7 +40,7 @@ extern int pci_bus_find_capability (struct pci_bus *bus, unsigned int devfn, int extern void pci_remove_legacy_files(struct pci_bus *bus); /* Lock for read/write access to pci device and bus lists */ -extern spinlock_t pci_bus_lock; +extern struct rw_semaphore pci_bus_sem; #ifdef CONFIG_X86_IO_APIC extern int pci_msi_quirk; diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index a10ed9dab2c2..f89dbc3738b7 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -180,25 +180,31 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom) res->flags |= pci_calc_resource_flags(l); if ((l & (PCI_BASE_ADDRESS_SPACE | PCI_BASE_ADDRESS_MEM_TYPE_MASK)) == (PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_64)) { - pci_read_config_dword(dev, reg+4, &l); + u32 szhi, lhi; + pci_read_config_dword(dev, reg+4, &lhi); + pci_write_config_dword(dev, reg+4, ~0); + pci_read_config_dword(dev, reg+4, &szhi); + pci_write_config_dword(dev, reg+4, lhi); + szhi = pci_size(lhi, szhi, 0xffffffff); next++; #if BITS_PER_LONG == 64 - res->start |= ((unsigned long) l) << 32; + res->start |= ((unsigned long) lhi) << 32; res->end = res->start + sz; - pci_write_config_dword(dev, reg+4, ~0); - pci_read_config_dword(dev, reg+4, &sz); - pci_write_config_dword(dev, reg+4, l); - sz = pci_size(l, sz, 0xffffffff); - if (sz) { + if (szhi) { /* This BAR needs > 4GB? Wow. */ - res->end |= (unsigned long)sz<<32; + res->end |= (unsigned long)szhi<<32; } #else - if (l) { - printk(KERN_ERR "PCI: Unable to handle 64-bit address for device %s\n", pci_name(dev)); + if (szhi) { + printk(KERN_ERR "PCI: Unable to handle 64-bit BAR for device %s\n", pci_name(dev)); res->start = 0; res->flags = 0; - continue; + } else if (lhi) { + /* 64-bit wide address, treat as disabled */ + pci_write_config_dword(dev, reg, l & ~(u32)PCI_BASE_ADDRESS_MEM_MASK); + pci_write_config_dword(dev, reg+4, 0); + res->start = 0; + res->end = sz; } #endif } @@ -377,9 +383,9 @@ struct pci_bus * __devinit pci_add_new_bus(struct pci_bus *parent, struct pci_de child = pci_alloc_child_bus(parent, dev, busnr); if (child) { - spin_lock(&pci_bus_lock); + down_write(&pci_bus_sem); list_add_tail(&child->node, &parent->children); - spin_unlock(&pci_bus_lock); + up_write(&pci_bus_sem); } return child; } @@ -838,9 +844,9 @@ void __devinit pci_device_add(struct pci_dev *dev, struct pci_bus *bus) * and the bus list for fixup functions, etc. */ INIT_LIST_HEAD(&dev->global_list); - spin_lock(&pci_bus_lock); + down_write(&pci_bus_sem); list_add_tail(&dev->bus_list, &bus->devices); - spin_unlock(&pci_bus_lock); + up_write(&pci_bus_sem); } struct pci_dev * __devinit @@ -975,9 +981,10 @@ struct pci_bus * __devinit pci_create_bus(struct device *parent, pr_debug("PCI: Bus %04x:%02x already known\n", pci_domain_nr(b), bus); goto err_out; } - spin_lock(&pci_bus_lock); + + down_write(&pci_bus_sem); list_add_tail(&b->node, &pci_root_buses); - spin_unlock(&pci_bus_lock); + up_write(&pci_bus_sem); memset(dev, 0, sizeof(*dev)); dev->parent = parent; @@ -1017,9 +1024,9 @@ class_dev_create_file_err: class_dev_reg_err: device_unregister(dev); dev_reg_err: - spin_lock(&pci_bus_lock); + down_write(&pci_bus_sem); list_del(&b->node); - spin_unlock(&pci_bus_lock); + up_write(&pci_bus_sem); err_out: kfree(dev); kfree(b); diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index d378478612fb..4364d793f73b 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -24,6 +24,17 @@ #include <linux/acpi.h> #include "pci.h" +/* The Mellanox Tavor device gives false positive parity errors + * Mark this device with a broken_parity_status, to allow + * PCI scanning code to "skip" this now blacklisted device. + */ +static void __devinit quirk_mellanox_tavor(struct pci_dev *dev) +{ + dev->broken_parity_status = 1; /* This device gives false positives */ +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX,PCI_DEVICE_ID_MELLANOX_TAVOR,quirk_mellanox_tavor); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX,PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE,quirk_mellanox_tavor); + /* Deal with broken BIOS'es that neglect to enable passive release, which can cause problems in combination with the 82441FX/PPro MTRRs */ static void __devinit quirk_passive_release(struct pci_dev *dev) @@ -878,27 +889,30 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82375, quirk_e * when a PCI-Soundcard is added. The BIOS only gives Options * "Disabled" and "AUTO". This Quirk Sets the corresponding * Register-Value to enable the Soundcard. + * + * FIXME: Presently this quirk will run on anything that has an 8237 + * which isn't correct, we need to check DMI tables or something in + * order to make sure it only runs on the MSI-K8T-Neo2Fir. Because it + * runs everywhere at present we suppress the printk output in most + * irrelevant cases. */ static void __init k8t_sound_hostbridge(struct pci_dev *dev) { unsigned char val; - printk(KERN_INFO "PCI: Quirk-MSI-K8T Soundcard On\n"); pci_read_config_byte(dev, 0x50, &val); if (val == 0x88 || val == 0xc8) { + /* Assume it's probably a MSI-K8T-Neo2Fir */ + printk(KERN_INFO "PCI: MSI-K8T-Neo2Fir, attempting to turn soundcard ON\n"); pci_write_config_byte(dev, 0x50, val & (~0x40)); /* Verify the Change for Status output */ pci_read_config_byte(dev, 0x50, &val); if (val & 0x40) - printk(KERN_INFO "PCI: MSI-K8T soundcard still off\n"); + printk(KERN_INFO "PCI: MSI-K8T-Neo2Fir, soundcard still off\n"); else - printk(KERN_INFO "PCI: MSI-K8T soundcard on\n"); - } else { - printk(KERN_INFO "PCI: Unexpected Value in PCI-Register: " - "no Change!\n"); + printk(KERN_INFO "PCI: MSI-K8T-Neo2Fir, soundcard on\n"); } - } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, k8t_sound_hostbridge); @@ -1485,6 +1499,25 @@ static void __devinit quirk_p64h2_1k_io(struct pci_dev *dev) } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1460, quirk_p64h2_1k_io); +/* Under some circumstances, AER is not linked with extended capabilities. + * Force it to be linked by setting the corresponding control bit in the + * config space. + */ +static void __devinit quirk_nvidia_ck804_pcie_aer_ext_cap(struct pci_dev *dev) +{ + uint8_t b; + if (pci_read_config_byte(dev, 0xf41, &b) == 0) { + if (!(b & 0x20)) { + pci_write_config_byte(dev, 0xf41, b | 0x20); + printk(KERN_INFO + "PCI: Linking AER extended capability on %s\n", + pci_name(dev)); + } + } +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE, + quirk_nvidia_ck804_pcie_aer_ext_cap); + EXPORT_SYMBOL(pcie_mch_quirk); #ifdef CONFIG_HOTPLUG EXPORT_SYMBOL(pci_fixup_device); diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c index 1a6bf9de166f..99ffbd478b29 100644 --- a/drivers/pci/remove.c +++ b/drivers/pci/remove.c @@ -22,18 +22,18 @@ static void pci_destroy_dev(struct pci_dev *dev) pci_proc_detach_device(dev); pci_remove_sysfs_dev_files(dev); device_unregister(&dev->dev); - spin_lock(&pci_bus_lock); + down_write(&pci_bus_sem); list_del(&dev->global_list); dev->global_list.next = dev->global_list.prev = NULL; - spin_unlock(&pci_bus_lock); + up_write(&pci_bus_sem); } /* Remove the device from the device lists, and prevent any further * list accesses from this device */ - spin_lock(&pci_bus_lock); + down_write(&pci_bus_sem); list_del(&dev->bus_list); dev->bus_list.next = dev->bus_list.prev = NULL; - spin_unlock(&pci_bus_lock); + up_write(&pci_bus_sem); pci_free_resources(dev); pci_dev_put(dev); @@ -62,9 +62,9 @@ void pci_remove_bus(struct pci_bus *pci_bus) { pci_proc_detach_bus(pci_bus); - spin_lock(&pci_bus_lock); + down_write(&pci_bus_sem); list_del(&pci_bus->node); - spin_unlock(&pci_bus_lock); + up_write(&pci_bus_sem); pci_remove_legacy_files(pci_bus); class_device_remove_file(&pci_bus->class_dev, &class_device_attr_cpuaffinity); diff --git a/drivers/pci/search.c b/drivers/pci/search.c index ce7dd6e7be60..622b3f8ba820 100644 --- a/drivers/pci/search.c +++ b/drivers/pci/search.c @@ -13,7 +13,7 @@ #include <linux/interrupt.h> #include "pci.h" -DEFINE_SPINLOCK(pci_bus_lock); +DECLARE_RWSEM(pci_bus_sem); static struct pci_bus * __devinit pci_do_find_bus(struct pci_bus* bus, unsigned char busnr) @@ -72,11 +72,11 @@ pci_find_next_bus(const struct pci_bus *from) struct pci_bus *b = NULL; WARN_ON(in_interrupt()); - spin_lock(&pci_bus_lock); + down_read(&pci_bus_sem); n = from ? from->node.next : pci_root_buses.next; if (n != &pci_root_buses) b = pci_bus_b(n); - spin_unlock(&pci_bus_lock); + up_read(&pci_bus_sem); return b; } @@ -124,7 +124,7 @@ struct pci_dev * pci_get_slot(struct pci_bus *bus, unsigned int devfn) struct pci_dev *dev; WARN_ON(in_interrupt()); - spin_lock(&pci_bus_lock); + down_read(&pci_bus_sem); list_for_each(tmp, &bus->devices) { dev = pci_dev_b(tmp); @@ -135,7 +135,7 @@ struct pci_dev * pci_get_slot(struct pci_bus *bus, unsigned int devfn) dev = NULL; out: pci_dev_get(dev); - spin_unlock(&pci_bus_lock); + up_read(&pci_bus_sem); return dev; } @@ -167,7 +167,7 @@ static struct pci_dev * pci_find_subsys(unsigned int vendor, struct pci_dev *dev; WARN_ON(in_interrupt()); - spin_lock(&pci_bus_lock); + down_read(&pci_bus_sem); n = from ? from->global_list.next : pci_devices.next; while (n && (n != &pci_devices)) { @@ -181,7 +181,7 @@ static struct pci_dev * pci_find_subsys(unsigned int vendor, } dev = NULL; exit: - spin_unlock(&pci_bus_lock); + up_read(&pci_bus_sem); return dev; } @@ -232,7 +232,7 @@ pci_get_subsys(unsigned int vendor, unsigned int device, struct pci_dev *dev; WARN_ON(in_interrupt()); - spin_lock(&pci_bus_lock); + down_read(&pci_bus_sem); n = from ? from->global_list.next : pci_devices.next; while (n && (n != &pci_devices)) { @@ -247,7 +247,7 @@ pci_get_subsys(unsigned int vendor, unsigned int device, dev = NULL; exit: dev = pci_dev_get(dev); - spin_unlock(&pci_bus_lock); + up_read(&pci_bus_sem); pci_dev_put(from); return dev; } @@ -292,7 +292,7 @@ pci_find_device_reverse(unsigned int vendor, unsigned int device, const struct p struct pci_dev *dev; WARN_ON(in_interrupt()); - spin_lock(&pci_bus_lock); + down_read(&pci_bus_sem); n = from ? from->global_list.prev : pci_devices.prev; while (n && (n != &pci_devices)) { @@ -304,7 +304,7 @@ pci_find_device_reverse(unsigned int vendor, unsigned int device, const struct p } dev = NULL; exit: - spin_unlock(&pci_bus_lock); + up_read(&pci_bus_sem); return dev; } @@ -328,7 +328,7 @@ struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from) struct pci_dev *dev; WARN_ON(in_interrupt()); - spin_lock(&pci_bus_lock); + down_read(&pci_bus_sem); n = from ? from->global_list.next : pci_devices.next; while (n && (n != &pci_devices)) { @@ -340,7 +340,7 @@ struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from) dev = NULL; exit: dev = pci_dev_get(dev); - spin_unlock(&pci_bus_lock); + up_read(&pci_bus_sem); pci_dev_put(from); return dev; } @@ -362,7 +362,7 @@ int pci_dev_present(const struct pci_device_id *ids) int found = 0; WARN_ON(in_interrupt()); - spin_lock(&pci_bus_lock); + down_read(&pci_bus_sem); while (ids->vendor || ids->subvendor || ids->class_mask) { list_for_each_entry(dev, &pci_devices, global_list) { if (pci_match_one_device(ids, dev)) { @@ -372,8 +372,8 @@ int pci_dev_present(const struct pci_device_id *ids) } ids++; } -exit: - spin_unlock(&pci_bus_lock); +exit: + up_read(&pci_bus_sem); return found; } EXPORT_SYMBOL(pci_dev_present); diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index 28ce3a7ee434..35086e80faa9 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c @@ -55,9 +55,10 @@ pbus_assign_resources_sorted(struct pci_bus *bus) list_for_each_entry(dev, &bus->devices, bus_list) { u16 class = dev->class >> 8; - /* Don't touch classless devices and host bridges. */ + /* Don't touch classless devices or host bridges or ioapics. */ if (class == PCI_CLASS_NOT_DEFINED || - class == PCI_CLASS_BRIDGE_HOST) + class == PCI_CLASS_BRIDGE_HOST || + class == PCI_CLASS_SYSTEM_PIC) continue; pdev_sort_resources(dev, &head); diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c index ea9277b7f899..577f4b55c46d 100644 --- a/drivers/pci/setup-res.c +++ b/drivers/pci/setup-res.c @@ -155,6 +155,46 @@ int pci_assign_resource(struct pci_dev *dev, int resno) return ret; } +#ifdef CONFIG_EMBEDDED +int pci_assign_resource_fixed(struct pci_dev *dev, int resno) +{ + struct pci_bus *bus = dev->bus; + struct resource *res = dev->resource + resno; + unsigned int type_mask; + int i, ret = -EBUSY; + + type_mask = IORESOURCE_IO | IORESOURCE_MEM | IORESOURCE_PREFETCH; + + for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) { + struct resource *r = bus->resource[i]; + if (!r) + continue; + + /* type_mask must match */ + if ((res->flags ^ r->flags) & type_mask) + continue; + + ret = request_resource(r, res); + + if (ret == 0) + break; + } + + if (ret) { + printk(KERN_ERR "PCI: Failed to allocate %s resource " + "#%d:%llx@%llx for %s\n", + res->flags & IORESOURCE_IO ? "I/O" : "mem", + resno, (unsigned long long)(res->end - res->start + 1), + (unsigned long long)res->start, pci_name(dev)); + } else if (resno < PCI_BRIDGE_RESOURCES) { + pci_update_resource(dev, res, resno); + } + + return ret; +} +EXPORT_SYMBOL_GPL(pci_assign_resource_fixed); +#endif + /* Sort resources by alignment */ void __devinit pdev_sort_resources(struct pci_dev *dev, struct resource_list *head) diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c index 77bb2351500c..680f6063954b 100644 --- a/drivers/scsi/qla1280.c +++ b/drivers/scsi/qla1280.c @@ -397,30 +397,6 @@ #include "ql1280_fw.h" #include "ql1040_fw.h" - -/* - * Missing PCI ID's - */ -#ifndef PCI_DEVICE_ID_QLOGIC_ISP1080 -#define PCI_DEVICE_ID_QLOGIC_ISP1080 0x1080 -#endif -#ifndef PCI_DEVICE_ID_QLOGIC_ISP1240 -#define PCI_DEVICE_ID_QLOGIC_ISP1240 0x1240 -#endif -#ifndef PCI_DEVICE_ID_QLOGIC_ISP1280 -#define PCI_DEVICE_ID_QLOGIC_ISP1280 0x1280 -#endif -#ifndef PCI_DEVICE_ID_QLOGIC_ISP10160 -#define PCI_DEVICE_ID_QLOGIC_ISP10160 0x1016 -#endif -#ifndef PCI_DEVICE_ID_QLOGIC_ISP12160 -#define PCI_DEVICE_ID_QLOGIC_ISP12160 0x1216 -#endif - -#ifndef PCI_VENDOR_ID_AMI -#define PCI_VENDOR_ID_AMI 0x101e -#endif - #ifndef BITS_PER_LONG #error "BITS_PER_LONG not defined!" #endif diff --git a/drivers/scsi/sata_vsc.c b/drivers/scsi/sata_vsc.c index 8a29ce340b47..27d658704cf9 100644 --- a/drivers/scsi/sata_vsc.c +++ b/drivers/scsi/sata_vsc.c @@ -433,13 +433,14 @@ err_out: /* - * 0x1725/0x7174 is the Vitesse VSC-7174 - * 0x8086/0x3200 is the Intel 31244, which is supposed to be identical - * compatibility is untested as of yet + * Intel 31244 is supposed to be identical. + * Compatibility is untested as of yet. */ static const struct pci_device_id vsc_sata_pci_tbl[] = { - { 0x1725, 0x7174, PCI_ANY_ID, PCI_ANY_ID, 0x10600, 0xFFFFFF, 0 }, - { 0x8086, 0x3200, PCI_ANY_ID, PCI_ANY_ID, 0x10600, 0xFFFFFF, 0 }, + { PCI_VENDOR_ID_VITESSE, PCI_DEVICE_ID_VITESSE_VSC7174, + PCI_ANY_ID, PCI_ANY_ID, 0x10600, 0xFFFFFF, 0 }, + { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_GD31244, + PCI_ANY_ID, PCI_ANY_ID, 0x10600, 0xFFFFFF, 0 }, { } }; diff --git a/include/asm-i386/msi.h b/include/asm-i386/msi.h index f041d4495faf..b11c4b7dfaef 100644 --- a/include/asm-i386/msi.h +++ b/include/asm-i386/msi.h @@ -9,7 +9,15 @@ #include <asm/desc.h> #include <mach_apic.h> -#define LAST_DEVICE_VECTOR 232 +#define LAST_DEVICE_VECTOR (FIRST_SYSTEM_VECTOR - 1) #define MSI_TARGET_CPU_SHIFT 12 +extern struct msi_ops msi_apic_ops; + +static inline int msi_arch_init(void) +{ + msi_register(&msi_apic_ops); + return 0; +} + #endif /* ASM_MSI_H */ diff --git a/include/asm-ia64/hw_irq.h b/include/asm-ia64/hw_irq.h index 0cf119b42f7d..ea8b8c407ab4 100644 --- a/include/asm-ia64/hw_irq.h +++ b/include/asm-ia64/hw_irq.h @@ -47,9 +47,19 @@ typedef u8 ia64_vector; #define IA64_CMC_VECTOR 0x1f /* corrected machine-check interrupt vector */ /* * Vectors 0x20-0x2f are reserved for legacy ISA IRQs. + * Use vectors 0x30-0xe7 as the default device vector range for ia64. + * Platforms may choose to reduce this range in platform_irq_setup, but the + * platform range must fall within + * [IA64_DEF_FIRST_DEVICE_VECTOR..IA64_DEF_LAST_DEVICE_VECTOR] */ -#define IA64_FIRST_DEVICE_VECTOR 0x30 -#define IA64_LAST_DEVICE_VECTOR 0xe7 +extern int ia64_first_device_vector; +extern int ia64_last_device_vector; + +#define IA64_DEF_FIRST_DEVICE_VECTOR 0x30 +#define IA64_DEF_LAST_DEVICE_VECTOR 0xe7 +#define IA64_FIRST_DEVICE_VECTOR ia64_first_device_vector +#define IA64_LAST_DEVICE_VECTOR ia64_last_device_vector +#define IA64_MAX_DEVICE_VECTORS (IA64_DEF_LAST_DEVICE_VECTOR - IA64_DEF_FIRST_DEVICE_VECTOR + 1) #define IA64_NUM_DEVICE_VECTORS (IA64_LAST_DEVICE_VECTOR - IA64_FIRST_DEVICE_VECTOR + 1) #define IA64_MCA_RENDEZ_VECTOR 0xe8 /* MCA rendez interrupt */ @@ -83,6 +93,7 @@ extern struct hw_interrupt_type irq_type_ia64_lsapic; /* CPU-internal interrupt extern int assign_irq_vector (int irq); /* allocate a free vector */ extern void free_irq_vector (int vector); +extern int reserve_irq_vector (int vector); extern void ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect); extern void register_percpu_irq (ia64_vector vec, struct irqaction *action); diff --git a/include/asm-ia64/machvec.h b/include/asm-ia64/machvec.h index 0df72a134c8b..15b545a897a4 100644 --- a/include/asm-ia64/machvec.h +++ b/include/asm-ia64/machvec.h @@ -75,6 +75,7 @@ typedef unsigned char ia64_mv_readb_relaxed_t (const volatile void __iomem *); typedef unsigned short ia64_mv_readw_relaxed_t (const volatile void __iomem *); typedef unsigned int ia64_mv_readl_relaxed_t (const volatile void __iomem *); typedef unsigned long ia64_mv_readq_relaxed_t (const volatile void __iomem *); +typedef int ia64_mv_msi_init_t (void); static inline void machvec_noop (void) @@ -153,6 +154,7 @@ extern void machvec_tlb_migrate_finish (struct mm_struct *); # define platform_readl_relaxed ia64_mv.readl_relaxed # define platform_readq_relaxed ia64_mv.readq_relaxed # define platform_migrate ia64_mv.migrate +# define platform_msi_init ia64_mv.msi_init # endif /* __attribute__((__aligned__(16))) is required to make size of the @@ -202,6 +204,7 @@ struct ia64_machine_vector { ia64_mv_readl_relaxed_t *readl_relaxed; ia64_mv_readq_relaxed_t *readq_relaxed; ia64_mv_migrate_t *migrate; + ia64_mv_msi_init_t *msi_init; } __attribute__((__aligned__(16))); /* align attrib? see above comment */ #define MACHVEC_INIT(name) \ @@ -247,6 +250,7 @@ struct ia64_machine_vector { platform_readl_relaxed, \ platform_readq_relaxed, \ platform_migrate, \ + platform_msi_init, \ } extern struct ia64_machine_vector ia64_mv; @@ -400,5 +404,8 @@ extern int ia64_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size #ifndef platform_migrate # define platform_migrate machvec_noop_task #endif +#ifndef platform_msi_init +# define platform_msi_init ((ia64_mv_msi_init_t*)NULL) +#endif #endif /* _ASM_IA64_MACHVEC_H */ diff --git a/include/asm-ia64/machvec_sn2.h b/include/asm-ia64/machvec_sn2.h index da1d43755afe..cf724dc79d8c 100644 --- a/include/asm-ia64/machvec_sn2.h +++ b/include/asm-ia64/machvec_sn2.h @@ -67,6 +67,8 @@ extern ia64_mv_dma_sync_sg_for_device sn_dma_sync_sg_for_device; extern ia64_mv_dma_mapping_error sn_dma_mapping_error; extern ia64_mv_dma_supported sn_dma_supported; extern ia64_mv_migrate_t sn_migrate; +extern ia64_mv_msi_init_t sn_msi_init; + /* * This stuff has dual use! @@ -117,6 +119,11 @@ extern ia64_mv_migrate_t sn_migrate; #define platform_dma_mapping_error sn_dma_mapping_error #define platform_dma_supported sn_dma_supported #define platform_migrate sn_migrate +#ifdef CONFIG_PCI_MSI +#define platform_msi_init sn_msi_init +#else +#define platform_msi_init ((ia64_mv_msi_init_t*)NULL) +#endif #include <asm/sn/io.h> diff --git a/include/asm-ia64/msi.h b/include/asm-ia64/msi.h index 97890f7762b3..bb92b0dbde2f 100644 --- a/include/asm-ia64/msi.h +++ b/include/asm-ia64/msi.h @@ -14,4 +14,16 @@ static inline void set_intr_gate (int nr, void *func) {} #define ack_APIC_irq ia64_eoi #define MSI_TARGET_CPU_SHIFT 4 +extern struct msi_ops msi_apic_ops; + +static inline int msi_arch_init(void) +{ + if (platform_msi_init) + return platform_msi_init(); + + /* default ops for most ia64 platforms */ + msi_register(&msi_apic_ops); + return 0; +} + #endif /* ASM_MSI_H */ diff --git a/include/asm-ia64/sn/intr.h b/include/asm-ia64/sn/intr.h index 60a51a406eec..12b54ddb06be 100644 --- a/include/asm-ia64/sn/intr.h +++ b/include/asm-ia64/sn/intr.h @@ -10,6 +10,7 @@ #define _ASM_IA64_SN_INTR_H #include <linux/rcupdate.h> +#include <asm/sn/types.h> #define SGI_UART_VECTOR 0xe9 @@ -40,6 +41,7 @@ struct sn_irq_info { int irq_cpuid; /* kernel logical cpuid */ int irq_irq; /* the IRQ number */ int irq_int_bit; /* Bridge interrupt pin */ + /* <0 means MSI */ u64 irq_xtalkaddr; /* xtalkaddr IRQ is sent to */ int irq_bridge_type;/* pciio asic type (pciio.h) */ void *irq_bridge; /* bridge generating irq */ @@ -53,6 +55,12 @@ struct sn_irq_info { }; extern void sn_send_IPI_phys(int, long, int, int); +extern u64 sn_intr_alloc(nasid_t, int, + struct sn_irq_info *, + int, nasid_t, int); +extern void sn_intr_free(nasid_t, int, struct sn_irq_info *); +extern struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *, nasid_t, int); +extern struct list_head **sn_irq_lh; #define CPU_VECTOR_TO_IRQ(cpuid,vector) (vector) diff --git a/include/asm-ia64/sn/pcibr_provider.h b/include/asm-ia64/sn/pcibr_provider.h index 51260ab70d91..e3b0c3fe5eed 100644 --- a/include/asm-ia64/sn/pcibr_provider.h +++ b/include/asm-ia64/sn/pcibr_provider.h @@ -55,6 +55,7 @@ #define PCI32_ATE_V (0x1 << 0) #define PCI32_ATE_CO (0x1 << 1) #define PCI32_ATE_PREC (0x1 << 2) +#define PCI32_ATE_MSI (0x1 << 2) #define PCI32_ATE_PREF (0x1 << 3) #define PCI32_ATE_BAR (0x1 << 4) #define PCI32_ATE_ADDR_SHFT 12 @@ -117,8 +118,8 @@ struct pcibus_info { extern int pcibr_init_provider(void); extern void *pcibr_bus_fixup(struct pcibus_bussoft *, struct pci_controller *); -extern dma_addr_t pcibr_dma_map(struct pci_dev *, unsigned long, size_t); -extern dma_addr_t pcibr_dma_map_consistent(struct pci_dev *, unsigned long, size_t); +extern dma_addr_t pcibr_dma_map(struct pci_dev *, unsigned long, size_t, int type); +extern dma_addr_t pcibr_dma_map_consistent(struct pci_dev *, unsigned long, size_t, int type); extern void pcibr_dma_unmap(struct pci_dev *, dma_addr_t, int); /* diff --git a/include/asm-ia64/sn/pcibus_provider_defs.h b/include/asm-ia64/sn/pcibus_provider_defs.h index ce3f6c328241..8f7c83d0f6d3 100644 --- a/include/asm-ia64/sn/pcibus_provider_defs.h +++ b/include/asm-ia64/sn/pcibus_provider_defs.h @@ -3,7 +3,7 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved. + * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved. */ #ifndef _ASM_IA64_SN_PCI_PCIBUS_PROVIDER_H #define _ASM_IA64_SN_PCI_PCIBUS_PROVIDER_H @@ -45,13 +45,24 @@ struct pci_controller; */ struct sn_pcibus_provider { - dma_addr_t (*dma_map)(struct pci_dev *, unsigned long, size_t); - dma_addr_t (*dma_map_consistent)(struct pci_dev *, unsigned long, size_t); + dma_addr_t (*dma_map)(struct pci_dev *, unsigned long, size_t, int flags); + dma_addr_t (*dma_map_consistent)(struct pci_dev *, unsigned long, size_t, int flags); void (*dma_unmap)(struct pci_dev *, dma_addr_t, int); void * (*bus_fixup)(struct pcibus_bussoft *, struct pci_controller *); void (*force_interrupt)(struct sn_irq_info *); void (*target_interrupt)(struct sn_irq_info *); }; +/* + * Flags used by the map interfaces + * bits 3:0 specifies format of passed in address + * bit 4 specifies that address is to be used for MSI + */ + +#define SN_DMA_ADDRTYPE(x) ((x) & 0xf) +#define SN_DMA_ADDR_PHYS 1 /* address is an xio address. */ +#define SN_DMA_ADDR_XIO 2 /* address is phys memory */ +#define SN_DMA_MSI 0x10 /* Bus address is to be used for MSI */ + extern struct sn_pcibus_provider *sn_pci_provider[]; #endif /* _ASM_IA64_SN_PCI_PCIBUS_PROVIDER_H */ diff --git a/include/asm-ia64/sn/tiocp.h b/include/asm-ia64/sn/tiocp.h index f47c08ab483c..e8ad0bb5b6c5 100644 --- a/include/asm-ia64/sn/tiocp.h +++ b/include/asm-ia64/sn/tiocp.h @@ -3,13 +3,14 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (C) 2003-2004 Silicon Graphics, Inc. All rights reserved. + * Copyright (C) 2003-2005 Silicon Graphics, Inc. All rights reserved. */ #ifndef _ASM_IA64_SN_PCI_TIOCP_H #define _ASM_IA64_SN_PCI_TIOCP_H #define TIOCP_HOST_INTR_ADDR 0x003FFFFFFFFFFFFFUL #define TIOCP_PCI64_CMDTYPE_MEM (0x1ull << 60) +#define TIOCP_PCI64_CMDTYPE_MSI (0x3ull << 60) /***************************************************************************** diff --git a/include/asm-x86_64/msi.h b/include/asm-x86_64/msi.h index 356e0e82f50b..3ad2346624b2 100644 --- a/include/asm-x86_64/msi.h +++ b/include/asm-x86_64/msi.h @@ -10,7 +10,15 @@ #include <asm/mach_apic.h> #include <asm/smp.h> -#define LAST_DEVICE_VECTOR 232 +#define LAST_DEVICE_VECTOR (FIRST_SYSTEM_VECTOR - 1) #define MSI_TARGET_CPU_SHIFT 12 +extern struct msi_ops msi_apic_ops; + +static inline int msi_arch_init(void) +{ + msi_register(&msi_apic_ops); + return 0; +} + #endif /* ASM_MSI_H */ diff --git a/include/linux/pci.h b/include/linux/pci.h index 6c4bc773f7b7..62a8c22f5f60 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -162,6 +162,9 @@ struct pci_dev { unsigned int is_busmaster:1; /* device is busmaster */ unsigned int no_msi:1; /* device may not use msi */ unsigned int block_ucfg_access:1; /* userspace config space access is blocked */ + unsigned int broken_parity_status:1; /* Device generates false positive parity */ + unsigned int msi_enabled:1; + unsigned int msix_enabled:1; u32 saved_config_space[16]; /* config space saved at suspend time */ struct hlist_head saved_cap_space; @@ -496,6 +499,7 @@ int pci_set_dma_mask(struct pci_dev *dev, u64 mask); int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask); void pci_update_resource(struct pci_dev *dev, struct resource *res, int resno); int pci_assign_resource(struct pci_dev *dev, int i); +int pci_assign_resource_fixed(struct pci_dev *dev, int i); void pci_restore_bars(struct pci_dev *dev); /* ROM control related routines */ diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index bcfe9d4f56ae..fd54a9d4c3d4 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -848,7 +848,12 @@ #define PCI_VENDOR_ID_QLOGIC 0x1077 +#define PCI_DEVICE_ID_QLOGIC_ISP10160 0x1016 #define PCI_DEVICE_ID_QLOGIC_ISP1020 0x1020 +#define PCI_DEVICE_ID_QLOGIC_ISP1080 0x1080 +#define PCI_DEVICE_ID_QLOGIC_ISP12160 0x1216 +#define PCI_DEVICE_ID_QLOGIC_ISP1240 0x1240 +#define PCI_DEVICE_ID_QLOGIC_ISP1280 0x1280 #define PCI_DEVICE_ID_QLOGIC_ISP2100 0x2100 #define PCI_DEVICE_ID_QLOGIC_ISP2200 0x2200 #define PCI_DEVICE_ID_QLOGIC_ISP2300 0x2300 @@ -1018,6 +1023,7 @@ #define PCI_DEVICE_ID_NVIDIA_NVENET_8 0x0056 #define PCI_DEVICE_ID_NVIDIA_NVENET_9 0x0057 #define PCI_DEVICE_ID_NVIDIA_CK804_AUDIO 0x0059 +#define PCI_DEVICE_ID_NVIDIA_CK804_PCIE 0x005d #define PCI_DEVICE_ID_NVIDIA_NFORCE2_SMBUS 0x0064 #define PCI_DEVICE_ID_NVIDIA_NFORCE2_IDE 0x0065 #define PCI_DEVICE_ID_NVIDIA_NVENET_2 0x0066 @@ -1946,6 +1952,7 @@ #define PCI_VENDOR_ID_MELLANOX 0x15b3 #define PCI_DEVICE_ID_MELLANOX_TAVOR 0x5a44 +#define PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE 0x5a46 #define PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT 0x6278 #define PCI_DEVICE_ID_MELLANOX_ARBEL 0x6282 #define PCI_DEVICE_ID_MELLANOX_SINAI_OLD 0x5e8c @@ -1969,6 +1976,9 @@ #define PCI_VENDOR_ID_NETCELL 0x169c #define PCI_DEVICE_ID_REVOLUTION 0x0044 +#define PCI_VENDOR_ID_VITESSE 0x1725 +#define PCI_DEVICE_ID_VITESSE_VSC7174 0x7174 + #define PCI_VENDOR_ID_LINKSYS 0x1737 #define PCI_DEVICE_ID_LINKSYS_EG1064 0x1064 @@ -2148,6 +2158,7 @@ #define PCI_DEVICE_ID_INTEL_ICH8_4 0x2815 #define PCI_DEVICE_ID_INTEL_ICH8_5 0x283e #define PCI_DEVICE_ID_INTEL_ICH8_6 0x2850 +#define PCI_DEVICE_ID_INTEL_GD31244 0x3200 #define PCI_DEVICE_ID_INTEL_82855PM_HB 0x3340 #define PCI_DEVICE_ID_INTEL_82830_HB 0x3575 #define PCI_DEVICE_ID_INTEL_82830_CGC 0x3577 |