summaryrefslogtreecommitdiffstats
path: root/arch/ppc64/kernel
diff options
context:
space:
mode:
authorOlof Johansson <olof@lixom.net>2005-09-21 09:55:31 -0700
committerLinus Torvalds <torvalds@g5.osdl.org>2005-09-21 10:11:54 -0700
commit3c2822ccb1f8cc96fc006aa82e68e1944290014a (patch)
tree3834b0004ef12b96f699529ca8008a0644531fa3 /arch/ppc64/kernel
parent31f6d9d628739c097964b8dbae939ea997da94a3 (diff)
downloadblackbird-op-linux-3c2822ccb1f8cc96fc006aa82e68e1944290014a.tar.gz
blackbird-op-linux-3c2822ccb1f8cc96fc006aa82e68e1944290014a.zip
[PATCH] PPC64: Fix boot for some pre-POWER4 systems
Some RS64 systems (such as F80) have non-python host bridges with EADS. However, they have two EADS with 4 buses each under them, so the old logic that assumed no more than 7 busses per PHB failed miserably. Big thanks to Olaf Hering for helping me test this, he's got one of the few machines that broke from the previous logic. Also, to be a bit smarter at detecting the need for a PHB-level IOMMU table by checking for the presence of an ISA bus. Only PHBs with ISA bridges should need the PHB-level table. Signed-off-by: Olof Johansson <olof@lixom.net> Cc: Anton Blanchard <anton@samba.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Paul Mackerras <paulus@samba.org> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/ppc64/kernel')
-rw-r--r--arch/ppc64/kernel/pSeries_iommu.c169
1 files changed, 89 insertions, 80 deletions
diff --git a/arch/ppc64/kernel/pSeries_iommu.c b/arch/ppc64/kernel/pSeries_iommu.c
index f0fd7fbd6531..8c6313e7e145 100644
--- a/arch/ppc64/kernel/pSeries_iommu.c
+++ b/arch/ppc64/kernel/pSeries_iommu.c
@@ -265,8 +265,10 @@ static void iommu_table_setparms(struct pci_controller *phb,
tbl->it_offset = phb->dma_window_base_cur >> PAGE_SHIFT;
/* Test if we are going over 2GB of DMA space */
- if (phb->dma_window_base_cur + phb->dma_window_size > (1L << 31))
+ if (phb->dma_window_base_cur + phb->dma_window_size > 0x80000000ul) {
+ udbg_printf("PCI_DMA: Unexpected number of IOAs under this PHB.\n");
panic("PCI_DMA: Unexpected number of IOAs under this PHB.\n");
+ }
phb->dma_window_base_cur += phb->dma_window_size;
@@ -310,92 +312,84 @@ static void iommu_table_setparms_lpar(struct pci_controller *phb,
static void iommu_bus_setup_pSeries(struct pci_bus *bus)
{
- struct device_node *dn, *pdn;
- struct pci_dn *pci;
+ struct device_node *dn;
struct iommu_table *tbl;
+ struct device_node *isa_dn, *isa_dn_orig;
+ struct device_node *tmp;
+ struct pci_dn *pci;
+ int children;
DBG("iommu_bus_setup_pSeries, bus %p, bus->self %p\n", bus, bus->self);
- /* For each (root) bus, we carve up the available DMA space in 256MB
- * pieces. Since each piece is used by one (sub) bus/device, that would
- * give a maximum of 7 devices per PHB. In most cases, this is plenty.
- *
- * The exception is on Python PHBs (pre-POWER4). Here we don't have EADS
- * bridges below the PHB to allocate the sectioned tables to, so instead
- * we allocate a 1GB table at the PHB level.
+ dn = pci_bus_to_OF_node(bus);
+ pci = PCI_DN(dn);
+
+ if (bus->self) {
+ /* This is not a root bus, any setup will be done for the
+ * device-side of the bridge in iommu_dev_setup_pSeries().
+ */
+ return;
+ }
+
+ /* Check if the ISA bus on the system is under
+ * this PHB.
*/
+ isa_dn = isa_dn_orig = of_find_node_by_type(NULL, "isa");
- dn = pci_bus_to_OF_node(bus);
- pci = dn->data;
-
- if (!bus->self) {
- /* Root bus */
- if (is_python(dn)) {
- unsigned int *iohole;
-
- DBG("Python root bus %s\n", bus->name);
-
- iohole = (unsigned int *)get_property(dn, "io-hole", 0);
-
- if (iohole) {
- /* On first bus we need to leave room for the
- * ISA address space. Just skip the first 256MB
- * alltogether. This leaves 768MB for the window.
- */
- DBG("PHB has io-hole, reserving 256MB\n");
- pci->phb->dma_window_size = 3 << 28;
- pci->phb->dma_window_base_cur = 1 << 28;
- } else {
- /* 1GB window by default */
- pci->phb->dma_window_size = 1 << 30;
- pci->phb->dma_window_base_cur = 0;
- }
-
- tbl = kmalloc(sizeof(struct iommu_table), GFP_KERNEL);
-
- iommu_table_setparms(pci->phb, dn, tbl);
- pci->iommu_table = iommu_init_table(tbl);
- } else {
- /* Do a 128MB table at root. This is used for the IDE
- * controller on some SMP-mode POWER4 machines. It
- * doesn't hurt to allocate it on other machines
- * -- it'll just be unused since new tables are
- * allocated on the EADS level.
- *
- * Allocate at offset 128MB to avoid having to deal
- * with ISA holes; 128MB table for IDE is plenty.
- */
- pci->phb->dma_window_size = 1 << 27;
- pci->phb->dma_window_base_cur = 1 << 27;
-
- tbl = kmalloc(sizeof(struct iommu_table), GFP_KERNEL);
-
- iommu_table_setparms(pci->phb, dn, tbl);
- pci->iommu_table = iommu_init_table(tbl);
-
- /* All child buses have 256MB tables */
- pci->phb->dma_window_size = 1 << 28;
- }
- } else {
- pdn = pci_bus_to_OF_node(bus->parent);
+ while (isa_dn && isa_dn != dn)
+ isa_dn = isa_dn->parent;
+
+ if (isa_dn_orig)
+ of_node_put(isa_dn_orig);
- if (!bus->parent->self && !is_python(pdn)) {
- struct iommu_table *tbl;
- /* First child and not python means this is the EADS
- * level. Allocate new table for this slot with 256MB
- * window.
- */
+ /* Count number of direct PCI children of the PHB.
+ * All PCI device nodes have class-code property, so it's
+ * an easy way to find them.
+ */
+ for (children = 0, tmp = dn->child; tmp; tmp = tmp->sibling)
+ if (get_property(tmp, "class-code", NULL))
+ children++;
- tbl = kmalloc(sizeof(struct iommu_table), GFP_KERNEL);
+ DBG("Children: %d\n", children);
- iommu_table_setparms(pci->phb, dn, tbl);
+ /* Calculate amount of DMA window per slot. Each window must be
+ * a power of two (due to pci_alloc_consistent requirements).
+ *
+ * Keep 256MB aside for PHBs with ISA.
+ */
- pci->iommu_table = iommu_init_table(tbl);
- } else {
- /* Lower than first child or under python, use parent table */
- pci->iommu_table = PCI_DN(pdn)->iommu_table;
- }
+ if (!isa_dn) {
+ /* No ISA/IDE - just set window size and return */
+ pci->phb->dma_window_size = 0x80000000ul; /* To be divided */
+
+ while (pci->phb->dma_window_size * children > 0x80000000ul)
+ pci->phb->dma_window_size >>= 1;
+ DBG("No ISA/IDE, window size is %x\n", pci->phb->dma_window_size);
+ pci->phb->dma_window_base_cur = 0;
+
+ return;
}
+
+ /* If we have ISA, then we probably have an IDE
+ * controller too. Allocate a 128MB table but
+ * skip the first 128MB to avoid stepping on ISA
+ * space.
+ */
+ pci->phb->dma_window_size = 0x8000000ul;
+ pci->phb->dma_window_base_cur = 0x8000000ul;
+
+ tbl = kmalloc(sizeof(struct iommu_table), GFP_KERNEL);
+
+ iommu_table_setparms(pci->phb, dn, tbl);
+ pci->iommu_table = iommu_init_table(tbl);
+
+ /* Divide the rest (1.75GB) among the children */
+ pci->phb->dma_window_size = 0x80000000ul;
+ while (pci->phb->dma_window_size * children > 0x70000000ul)
+ pci->phb->dma_window_size >>= 1;
+
+ DBG("ISA/IDE, window size is %x\n", pci->phb->dma_window_size);
+
}
@@ -446,14 +440,29 @@ static void iommu_bus_setup_pSeriesLP(struct pci_bus *bus)
static void iommu_dev_setup_pSeries(struct pci_dev *dev)
{
struct device_node *dn, *mydn;
+ struct iommu_table *tbl;
DBG("iommu_dev_setup_pSeries, dev %p (%s)\n", dev, dev->pretty_name);
- /* Now copy the iommu_table ptr from the bus device down to the
- * pci device_node. This means get_iommu_table() won't need to search
- * up the device tree to find it.
- */
+
mydn = dn = pci_device_to_OF_node(dev);
+ /* If we're the direct child of a root bus, then we need to allocate
+ * an iommu table ourselves. The bus setup code should have setup
+ * the window sizes already.
+ */
+ if (!dev->bus->self) {
+ DBG(" --> first child, no bridge. Allocating iommu table.\n");
+ tbl = kmalloc(sizeof(struct iommu_table), GFP_KERNEL);
+ iommu_table_setparms(PCI_DN(dn)->phb, dn, tbl);
+ PCI_DN(mydn)->iommu_table = iommu_init_table(tbl);
+
+ return;
+ }
+
+ /* If this device is further down the bus tree, search upwards until
+ * an already allocated iommu table is found and use that.
+ */
+
while (dn && dn->data && PCI_DN(dn)->iommu_table == NULL)
dn = dn->parent;
OpenPOWER on IntegriCloud