summaryrefslogtreecommitdiffstats
path: root/drivers/pci
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-04-06 14:26:05 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2009-04-06 14:26:05 -0700
commitffa009c366e33f3eae48bba2547051fe15795f64 (patch)
tree78736a4ee7c16819830a32a313867e3a88ac6aff /drivers/pci
parent8e320d02718d2872d52ef88a69a493e420494269 (diff)
parent46f06b72378d3187f0d12f7a60d020676bfbf332 (diff)
downloadblackbird-op-linux-ffa009c366e33f3eae48bba2547051fe15795f64.tar.gz
blackbird-op-linux-ffa009c366e33f3eae48bba2547051fe15795f64.zip
Merge git://git.infradead.org/iommu-2.6
* git://git.infradead.org/iommu-2.6: drivers/pci/intr_remapping.c: include acpi.h intel-iommu: Fix oops in device_to_iommu() when devices not found. intel-iommu: Handle PCI domains appropriately. intel-iommu: Fix device-to-iommu mapping for PCI-PCI bridges. x2apic/intr-remap: decouple interrupt remapping from x2apic x86, dmar: check if it's initialized before disable queue invalidation intel-iommu: set compatibility format interrupt Intel IOMMU Suspend/Resume Support - Interrupt Remapping Intel IOMMU Suspend/Resume Support - Queued Invalidation Intel IOMMU Suspend/Resume Support - DMAR intel-iommu: Add for_each_iommu() and for_each_active_iommu() macros
Diffstat (limited to 'drivers/pci')
-rw-r--r--drivers/pci/dmar.c71
-rw-r--r--drivers/pci/intel-iommu.c239
-rw-r--r--drivers/pci/intr_remapping.c84
3 files changed, 348 insertions, 46 deletions
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
index d313039e2fdf..25a00ce4f24d 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/pci/dmar.c
@@ -180,6 +180,7 @@ dmar_parse_one_drhd(struct acpi_dmar_header *header)
dmaru->hdr = header;
drhd = (struct acpi_dmar_hardware_unit *)header;
dmaru->reg_base_addr = drhd->address;
+ dmaru->segment = drhd->segment;
dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
ret = alloc_iommu(dmaru);
@@ -790,14 +791,41 @@ end:
}
/*
+ * Enable queued invalidation.
+ */
+static void __dmar_enable_qi(struct intel_iommu *iommu)
+{
+ u32 cmd, sts;
+ unsigned long flags;
+ struct q_inval *qi = iommu->qi;
+
+ qi->free_head = qi->free_tail = 0;
+ qi->free_cnt = QI_LENGTH;
+
+ spin_lock_irqsave(&iommu->register_lock, flags);
+
+ /* write zero to the tail reg */
+ writel(0, iommu->reg + DMAR_IQT_REG);
+
+ dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
+
+ cmd = iommu->gcmd | DMA_GCMD_QIE;
+ iommu->gcmd |= DMA_GCMD_QIE;
+ writel(cmd, iommu->reg + DMAR_GCMD_REG);
+
+ /* Make sure hardware complete it */
+ IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
+
+ spin_unlock_irqrestore(&iommu->register_lock, flags);
+}
+
+/*
* Enable Queued Invalidation interface. This is a must to support
* interrupt-remapping. Also used by DMA-remapping, which replaces
* register based IOTLB invalidation.
*/
int dmar_enable_qi(struct intel_iommu *iommu)
{
- u32 cmd, sts;
- unsigned long flags;
struct q_inval *qi;
if (!ecap_qis(iommu->ecap))
@@ -835,19 +863,7 @@ int dmar_enable_qi(struct intel_iommu *iommu)
spin_lock_init(&qi->q_lock);
- spin_lock_irqsave(&iommu->register_lock, flags);
- /* write zero to the tail reg */
- writel(0, iommu->reg + DMAR_IQT_REG);
-
- dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
-
- cmd = iommu->gcmd | DMA_GCMD_QIE;
- iommu->gcmd |= DMA_GCMD_QIE;
- writel(cmd, iommu->reg + DMAR_GCMD_REG);
-
- /* Make sure hardware complete it */
- IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
- spin_unlock_irqrestore(&iommu->register_lock, flags);
+ __dmar_enable_qi(iommu);
return 0;
}
@@ -1102,3 +1118,28 @@ int __init enable_drhd_fault_handling(void)
return 0;
}
+
+/*
+ * Re-enable Queued Invalidation interface.
+ */
+int dmar_reenable_qi(struct intel_iommu *iommu)
+{
+ if (!ecap_qis(iommu->ecap))
+ return -ENOENT;
+
+ if (!iommu->qi)
+ return -ENOENT;
+
+ /*
+ * First disable queued invalidation.
+ */
+ dmar_disable_qi(iommu);
+ /*
+ * Then enable queued invalidation again. Since there is no pending
+ * invalidation requests now, it's safe to re-enable queued
+ * invalidation.
+ */
+ __dmar_enable_qi(iommu);
+
+ return 0;
+}
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 23e56a564e05..dcda5212f3bb 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -36,6 +36,7 @@
#include <linux/iova.h>
#include <linux/iommu.h>
#include <linux/intel-iommu.h>
+#include <linux/sysdev.h>
#include <asm/cacheflush.h>
#include <asm/iommu.h>
#include "pci.h"
@@ -247,7 +248,8 @@ struct dmar_domain {
struct device_domain_info {
struct list_head link; /* link to domain siblings */
struct list_head global; /* link to global list */
- u8 bus; /* PCI bus numer */
+ int segment; /* PCI domain */
+ u8 bus; /* PCI bus number */
u8 devfn; /* PCI devfn number */
struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */
struct dmar_domain *domain; /* pointer to domain */
@@ -467,7 +469,7 @@ static void domain_update_iommu_cap(struct dmar_domain *domain)
domain_update_iommu_snooping(domain);
}
-static struct intel_iommu *device_to_iommu(u8 bus, u8 devfn)
+static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
{
struct dmar_drhd_unit *drhd = NULL;
int i;
@@ -475,12 +477,20 @@ static struct intel_iommu *device_to_iommu(u8 bus, u8 devfn)
for_each_drhd_unit(drhd) {
if (drhd->ignored)
continue;
+ if (segment != drhd->segment)
+ continue;
- for (i = 0; i < drhd->devices_cnt; i++)
+ for (i = 0; i < drhd->devices_cnt; i++) {
if (drhd->devices[i] &&
drhd->devices[i]->bus->number == bus &&
drhd->devices[i]->devfn == devfn)
return drhd->iommu;
+ if (drhd->devices[i] &&
+ drhd->devices[i]->subordinate &&
+ drhd->devices[i]->subordinate->number <= bus &&
+ drhd->devices[i]->subordinate->subordinate >= bus)
+ return drhd->iommu;
+ }
if (drhd->include_all)
return drhd->iommu;
@@ -1312,7 +1322,7 @@ static void domain_exit(struct dmar_domain *domain)
}
static int domain_context_mapping_one(struct dmar_domain *domain,
- u8 bus, u8 devfn)
+ int segment, u8 bus, u8 devfn)
{
struct context_entry *context;
unsigned long flags;
@@ -1327,7 +1337,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
BUG_ON(!domain->pgd);
- iommu = device_to_iommu(bus, devfn);
+ iommu = device_to_iommu(segment, bus, devfn);
if (!iommu)
return -ENODEV;
@@ -1417,8 +1427,8 @@ domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev)
int ret;
struct pci_dev *tmp, *parent;
- ret = domain_context_mapping_one(domain, pdev->bus->number,
- pdev->devfn);
+ ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus),
+ pdev->bus->number, pdev->devfn);
if (ret)
return ret;
@@ -1429,18 +1439,23 @@ domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev)
/* Secondary interface's bus number and devfn 0 */
parent = pdev->bus->self;
while (parent != tmp) {
- ret = domain_context_mapping_one(domain, parent->bus->number,
- parent->devfn);
+ ret = domain_context_mapping_one(domain,
+ pci_domain_nr(parent->bus),
+ parent->bus->number,
+ parent->devfn);
if (ret)
return ret;
parent = parent->bus->self;
}
if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
return domain_context_mapping_one(domain,
- tmp->subordinate->number, 0);
+ pci_domain_nr(tmp->subordinate),
+ tmp->subordinate->number, 0);
else /* this is a legacy PCI bridge */
return domain_context_mapping_one(domain,
- tmp->bus->number, tmp->devfn);
+ pci_domain_nr(tmp->bus),
+ tmp->bus->number,
+ tmp->devfn);
}
static int domain_context_mapped(struct pci_dev *pdev)
@@ -1449,12 +1464,12 @@ static int domain_context_mapped(struct pci_dev *pdev)
struct pci_dev *tmp, *parent;
struct intel_iommu *iommu;
- iommu = device_to_iommu(pdev->bus->number, pdev->devfn);
+ iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
+ pdev->devfn);
if (!iommu)
return -ENODEV;
- ret = device_context_mapped(iommu,
- pdev->bus->number, pdev->devfn);
+ ret = device_context_mapped(iommu, pdev->bus->number, pdev->devfn);
if (!ret)
return ret;
/* dependent device mapping */
@@ -1465,17 +1480,17 @@ static int domain_context_mapped(struct pci_dev *pdev)
parent = pdev->bus->self;
while (parent != tmp) {
ret = device_context_mapped(iommu, parent->bus->number,
- parent->devfn);
+ parent->devfn);
if (!ret)
return ret;
parent = parent->bus->self;
}
if (tmp->is_pcie)
- return device_context_mapped(iommu,
- tmp->subordinate->number, 0);
+ return device_context_mapped(iommu, tmp->subordinate->number,
+ 0);
else
- return device_context_mapped(iommu,
- tmp->bus->number, tmp->devfn);
+ return device_context_mapped(iommu, tmp->bus->number,
+ tmp->devfn);
}
static int
@@ -1542,7 +1557,7 @@ static void domain_remove_dev_info(struct dmar_domain *domain)
info->dev->dev.archdata.iommu = NULL;
spin_unlock_irqrestore(&device_domain_lock, flags);
- iommu = device_to_iommu(info->bus, info->devfn);
+ iommu = device_to_iommu(info->segment, info->bus, info->devfn);
iommu_detach_dev(iommu, info->bus, info->devfn);
free_devinfo_mem(info);
@@ -1577,11 +1592,14 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
struct pci_dev *dev_tmp;
unsigned long flags;
int bus = 0, devfn = 0;
+ int segment;
domain = find_domain(pdev);
if (domain)
return domain;
+ segment = pci_domain_nr(pdev->bus);
+
dev_tmp = pci_find_upstream_pcie_bridge(pdev);
if (dev_tmp) {
if (dev_tmp->is_pcie) {
@@ -1593,7 +1611,8 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
}
spin_lock_irqsave(&device_domain_lock, flags);
list_for_each_entry(info, &device_domain_list, global) {
- if (info->bus == bus && info->devfn == devfn) {
+ if (info->segment == segment &&
+ info->bus == bus && info->devfn == devfn) {
found = info->domain;
break;
}
@@ -1631,6 +1650,7 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
domain_exit(domain);
goto error;
}
+ info->segment = segment;
info->bus = bus;
info->devfn = devfn;
info->dev = NULL;
@@ -1642,7 +1662,8 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
found = NULL;
spin_lock_irqsave(&device_domain_lock, flags);
list_for_each_entry(tmp, &device_domain_list, global) {
- if (tmp->bus == bus && tmp->devfn == devfn) {
+ if (tmp->segment == segment &&
+ tmp->bus == bus && tmp->devfn == devfn) {
found = tmp->domain;
break;
}
@@ -1662,6 +1683,7 @@ found_domain:
info = alloc_devinfo_mem();
if (!info)
goto error;
+ info->segment = segment;
info->bus = pdev->bus->number;
info->devfn = pdev->devfn;
info->dev = pdev;
@@ -1946,6 +1968,15 @@ static int __init init_dmars(void)
}
}
+#ifdef CONFIG_INTR_REMAP
+ if (!intr_remapping_enabled) {
+ ret = enable_intr_remapping(0);
+ if (ret)
+ printk(KERN_ERR
+ "IOMMU: enable interrupt remapping failed\n");
+ }
+#endif
+
/*
* For each rmrr
* for each dev attached to rmrr
@@ -2597,6 +2628,150 @@ static void __init init_no_remapping_devices(void)
}
}
+#ifdef CONFIG_SUSPEND
+static int init_iommu_hw(void)
+{
+ struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu = NULL;
+
+ for_each_active_iommu(iommu, drhd)
+ if (iommu->qi)
+ dmar_reenable_qi(iommu);
+
+ for_each_active_iommu(iommu, drhd) {
+ iommu_flush_write_buffer(iommu);
+
+ iommu_set_root_entry(iommu);
+
+ iommu->flush.flush_context(iommu, 0, 0, 0,
+ DMA_CCMD_GLOBAL_INVL, 0);
+ iommu->flush.flush_iotlb(iommu, 0, 0, 0,
+ DMA_TLB_GLOBAL_FLUSH, 0);
+ iommu_disable_protect_mem_regions(iommu);
+ iommu_enable_translation(iommu);
+ }
+
+ return 0;
+}
+
+static void iommu_flush_all(void)
+{
+ struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu;
+
+ for_each_active_iommu(iommu, drhd) {
+ iommu->flush.flush_context(iommu, 0, 0, 0,
+ DMA_CCMD_GLOBAL_INVL, 0);
+ iommu->flush.flush_iotlb(iommu, 0, 0, 0,
+ DMA_TLB_GLOBAL_FLUSH, 0);
+ }
+}
+
+static int iommu_suspend(struct sys_device *dev, pm_message_t state)
+{
+ struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu = NULL;
+ unsigned long flag;
+
+ for_each_active_iommu(iommu, drhd) {
+ iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
+ GFP_ATOMIC);
+ if (!iommu->iommu_state)
+ goto nomem;
+ }
+
+ iommu_flush_all();
+
+ for_each_active_iommu(iommu, drhd) {
+ iommu_disable_translation(iommu);
+
+ spin_lock_irqsave(&iommu->register_lock, flag);
+
+ iommu->iommu_state[SR_DMAR_FECTL_REG] =
+ readl(iommu->reg + DMAR_FECTL_REG);
+ iommu->iommu_state[SR_DMAR_FEDATA_REG] =
+ readl(iommu->reg + DMAR_FEDATA_REG);
+ iommu->iommu_state[SR_DMAR_FEADDR_REG] =
+ readl(iommu->reg + DMAR_FEADDR_REG);
+ iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
+ readl(iommu->reg + DMAR_FEUADDR_REG);
+
+ spin_unlock_irqrestore(&iommu->register_lock, flag);
+ }
+ return 0;
+
+nomem:
+ for_each_active_iommu(iommu, drhd)
+ kfree(iommu->iommu_state);
+
+ return -ENOMEM;
+}
+
+static int iommu_resume(struct sys_device *dev)
+{
+ struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu = NULL;
+ unsigned long flag;
+
+ if (init_iommu_hw()) {
+ WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
+ return -EIO;
+ }
+
+ for_each_active_iommu(iommu, drhd) {
+
+ spin_lock_irqsave(&iommu->register_lock, flag);
+
+ writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
+ iommu->reg + DMAR_FECTL_REG);
+ writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
+ iommu->reg + DMAR_FEDATA_REG);
+ writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
+ iommu->reg + DMAR_FEADDR_REG);
+ writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
+ iommu->reg + DMAR_FEUADDR_REG);
+
+ spin_unlock_irqrestore(&iommu->register_lock, flag);
+ }
+
+ for_each_active_iommu(iommu, drhd)
+ kfree(iommu->iommu_state);
+
+ return 0;
+}
+
+static struct sysdev_class iommu_sysclass = {
+ .name = "iommu",
+ .resume = iommu_resume,
+ .suspend = iommu_suspend,
+};
+
+static struct sys_device device_iommu = {
+ .cls = &iommu_sysclass,
+};
+
+static int __init init_iommu_sysfs(void)
+{
+ int error;
+
+ error = sysdev_class_register(&iommu_sysclass);
+ if (error)
+ return error;
+
+ error = sysdev_register(&device_iommu);
+ if (error)
+ sysdev_class_unregister(&iommu_sysclass);
+
+ return error;
+}
+
+#else
+static int __init init_iommu_sysfs(void)
+{
+ return 0;
+}
+#endif /* CONFIG_PM */
+
int __init intel_iommu_init(void)
{
int ret = 0;
@@ -2632,6 +2807,7 @@ int __init intel_iommu_init(void)
init_timer(&unmap_timer);
force_iommu = 1;
dma_ops = &intel_dma_ops;
+ init_iommu_sysfs();
register_iommu(&intel_iommu_ops);
@@ -2648,6 +2824,7 @@ static int vm_domain_add_dev_info(struct dmar_domain *domain,
if (!info)
return -ENOMEM;
+ info->segment = pci_domain_nr(pdev->bus);
info->bus = pdev->bus->number;
info->devfn = pdev->devfn;
info->dev = pdev;
@@ -2677,15 +2854,15 @@ static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
parent = pdev->bus->self;
while (parent != tmp) {
iommu_detach_dev(iommu, parent->bus->number,
- parent->devfn);
+ parent->devfn);
parent = parent->bus->self;
}
if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
iommu_detach_dev(iommu,
tmp->subordinate->number, 0);
else /* this is a legacy PCI bridge */
- iommu_detach_dev(iommu,
- tmp->bus->number, tmp->devfn);
+ iommu_detach_dev(iommu, tmp->bus->number,
+ tmp->devfn);
}
}
@@ -2698,13 +2875,15 @@ static void vm_domain_remove_one_dev_info(struct dmar_domain *domain,
int found = 0;
struct list_head *entry, *tmp;
- iommu = device_to_iommu(pdev->bus->number, pdev->devfn);
+ iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
+ pdev->devfn);
if (!iommu)
return;
spin_lock_irqsave(&device_domain_lock, flags);
list_for_each_safe(entry, tmp, &domain->devices) {
info = list_entry(entry, struct device_domain_info, link);
+ /* No need to compare PCI domain; it has to be the same */
if (info->bus == pdev->bus->number &&
info->devfn == pdev->devfn) {
list_del(&info->link);
@@ -2729,7 +2908,8 @@ static void vm_domain_remove_one_dev_info(struct dmar_domain *domain,
* owned by this domain, clear this iommu in iommu_bmp
* update iommu count and coherency
*/
- if (device_to_iommu(info->bus, info->devfn) == iommu)
+ if (iommu == device_to_iommu(info->segment, info->bus,
+ info->devfn))
found = 1;
}
@@ -2762,7 +2942,7 @@ static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
spin_unlock_irqrestore(&device_domain_lock, flags1);
- iommu = device_to_iommu(info->bus, info->devfn);
+ iommu = device_to_iommu(info->segment, info->bus, info->devfn);
iommu_detach_dev(iommu, info->bus, info->devfn);
iommu_detach_dependent_devices(iommu, info->dev);
@@ -2950,7 +3130,8 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
}
}
- iommu = device_to_iommu(pdev->bus->number, pdev->devfn);
+ iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
+ pdev->devfn);
if (!iommu)
return -ENODEV;
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c
index b041a409f4a7..f5e0ea724a6f 100644
--- a/drivers/pci/intr_remapping.c
+++ b/drivers/pci/intr_remapping.c
@@ -9,6 +9,7 @@
#include <asm/cpu.h>
#include <linux/intel-iommu.h>
#include "intr_remapping.h"
+#include <acpi/acpi.h>
static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
static int ir_ioapic_num;
@@ -415,12 +416,27 @@ static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
/* Set interrupt-remapping table pointer */
cmd = iommu->gcmd | DMA_GCMD_SIRTP;
+ iommu->gcmd |= DMA_GCMD_SIRTP;
writel(cmd, iommu->reg + DMAR_GCMD_REG);
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
readl, (sts & DMA_GSTS_IRTPS), sts);
spin_unlock_irqrestore(&iommu->register_lock, flags);
+ if (mode == 0) {
+ spin_lock_irqsave(&iommu->register_lock, flags);
+
+ /* enable comaptiblity format interrupt pass through */
+ cmd = iommu->gcmd | DMA_GCMD_CFI;
+ iommu->gcmd |= DMA_GCMD_CFI;
+ writel(cmd, iommu->reg + DMAR_GCMD_REG);
+
+ IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
+ readl, (sts & DMA_GSTS_CFIS), sts);
+
+ spin_unlock_irqrestore(&iommu->register_lock, flags);
+ }
+
/*
* global invalidation of interrupt entry cache before enabling
* interrupt-remapping.
@@ -470,7 +486,7 @@ static int setup_intr_remapping(struct intel_iommu *iommu, int mode)
/*
* Disable Interrupt Remapping.
*/
-static void disable_intr_remapping(struct intel_iommu *iommu)
+static void iommu_disable_intr_remapping(struct intel_iommu *iommu)
{
unsigned long flags;
u32 sts;
@@ -478,6 +494,12 @@ static void disable_intr_remapping(struct intel_iommu *iommu)
if (!ecap_ir_support(iommu->ecap))
return;
+ /*
+ * global invalidation of interrupt entry cache before disabling
+ * interrupt-remapping.
+ */
+ qi_global_iec(iommu);
+
spin_lock_irqsave(&iommu->register_lock, flags);
sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
@@ -503,6 +525,13 @@ int __init enable_intr_remapping(int eim)
struct intel_iommu *iommu = drhd->iommu;
/*
+ * If the queued invalidation is already initialized,
+ * shouldn't disable it.
+ */
+ if (iommu->qi)
+ continue;
+
+ /*
* Clear previous faults.
*/
dmar_fault(-1, iommu);
@@ -511,7 +540,7 @@ int __init enable_intr_remapping(int eim)
* Disable intr remapping and queued invalidation, if already
* enabled prior to OS handover.
*/
- disable_intr_remapping(iommu);
+ iommu_disable_intr_remapping(iommu);
dmar_disable_qi(iommu);
}
@@ -639,3 +668,54 @@ int __init parse_ioapics_under_ir(void)
return ir_supported;
}
+
+void disable_intr_remapping(void)
+{
+ struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu = NULL;
+
+ /*
+ * Disable Interrupt-remapping for all the DRHD's now.
+ */
+ for_each_iommu(iommu, drhd) {
+ if (!ecap_ir_support(iommu->ecap))
+ continue;
+
+ iommu_disable_intr_remapping(iommu);
+ }
+}
+
+int reenable_intr_remapping(int eim)
+{
+ struct dmar_drhd_unit *drhd;
+ int setup = 0;
+ struct intel_iommu *iommu = NULL;
+
+ for_each_iommu(iommu, drhd)
+ if (iommu->qi)
+ dmar_reenable_qi(iommu);
+
+ /*
+ * Setup Interrupt-remapping for all the DRHD's now.
+ */
+ for_each_iommu(iommu, drhd) {
+ if (!ecap_ir_support(iommu->ecap))
+ continue;
+
+ /* Set up interrupt remapping for iommu.*/
+ iommu_set_intr_remapping(iommu, eim);
+ setup = 1;
+ }
+
+ if (!setup)
+ goto error;
+
+ return 0;
+
+error:
+ /*
+ * handle error condition gracefully here!
+ */
+ return -1;
+}
+
OpenPOWER on IntegriCloud