summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/platforms
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/platforms')
-rw-r--r--arch/powerpc/platforms/powermac/low_i2c.c2
-rw-r--r--arch/powerpc/platforms/powernv/opal.c83
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c37
-rw-r--r--arch/powerpc/platforms/powernv/vas-window.c26
4 files changed, 94 insertions, 54 deletions
diff --git a/arch/powerpc/platforms/powermac/low_i2c.c b/arch/powerpc/platforms/powermac/low_i2c.c
index fa89f30e7f27..d4d411820597 100644
--- a/arch/powerpc/platforms/powermac/low_i2c.c
+++ b/arch/powerpc/platforms/powermac/low_i2c.c
@@ -1192,7 +1192,7 @@ static void pmac_i2c_devscan(void (*callback)(struct device_node *dev,
{ NULL, NULL, 0 },
};
- /* Only some devices need to have platform functions instanciated
+ /* Only some devices need to have platform functions instantiated
* here. For now, we have a table. Others, like 9554 i2c GPIOs used
* on Xserve, if we ever do a driver for them, will use their own
* platform function instance
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index 404c379db168..38fe4087484a 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -370,12 +370,8 @@ static int __opal_put_chars(uint32_t vtermno, const char *data, int total_len, b
olen = cpu_to_be64(total_len);
rc = opal_console_write(vtermno, &olen, data);
if (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
- if (rc == OPAL_BUSY_EVENT) {
- mdelay(OPAL_BUSY_DELAY_MS);
+ if (rc == OPAL_BUSY_EVENT)
opal_poll_events(NULL);
- } else if (rc == OPAL_BUSY_EVENT) {
- mdelay(OPAL_BUSY_DELAY_MS);
- }
written = -EAGAIN;
goto out;
}
@@ -401,15 +397,6 @@ out:
if (atomic)
spin_unlock_irqrestore(&opal_write_lock, flags);
- /* In the -EAGAIN case, callers loop, so we have to flush the console
- * here in case they have interrupts off (and we don't want to wait
- * for async flushing if we can make immediate progress here). If
- * necessary the API could be made entirely non-flushing if the
- * callers had a ->flush API to use.
- */
- if (written == -EAGAIN)
- opal_flush_console(vtermno);
-
return written;
}
@@ -429,40 +416,74 @@ int opal_put_chars_atomic(uint32_t vtermno, const char *data, int total_len)
return __opal_put_chars(vtermno, data, total_len, true);
}
-int opal_flush_console(uint32_t vtermno)
+static s64 __opal_flush_console(uint32_t vtermno)
{
s64 rc;
if (!opal_check_token(OPAL_CONSOLE_FLUSH)) {
__be64 evt;
- WARN_ONCE(1, "opal: OPAL_CONSOLE_FLUSH missing.\n");
/*
* If OPAL_CONSOLE_FLUSH is not implemented in the firmware,
* the console can still be flushed by calling the polling
* function while it has OPAL_EVENT_CONSOLE_OUTPUT events.
*/
- do {
- opal_poll_events(&evt);
- } while (be64_to_cpu(evt) & OPAL_EVENT_CONSOLE_OUTPUT);
+ WARN_ONCE(1, "opal: OPAL_CONSOLE_FLUSH missing.\n");
+
+ opal_poll_events(&evt);
+ if (!(be64_to_cpu(evt) & OPAL_EVENT_CONSOLE_OUTPUT))
+ return OPAL_SUCCESS;
+ return OPAL_BUSY;
- return OPAL_SUCCESS;
+ } else {
+ rc = opal_console_flush(vtermno);
+ if (rc == OPAL_BUSY_EVENT) {
+ opal_poll_events(NULL);
+ rc = OPAL_BUSY;
+ }
+ return rc;
}
- do {
- rc = OPAL_BUSY;
- while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
- rc = opal_console_flush(vtermno);
- if (rc == OPAL_BUSY_EVENT) {
- mdelay(OPAL_BUSY_DELAY_MS);
- opal_poll_events(NULL);
- } else if (rc == OPAL_BUSY) {
- mdelay(OPAL_BUSY_DELAY_MS);
+}
+
+/*
+ * opal_flush_console spins until the console is flushed
+ */
+int opal_flush_console(uint32_t vtermno)
+{
+ for (;;) {
+ s64 rc = __opal_flush_console(vtermno);
+
+ if (rc == OPAL_BUSY || rc == OPAL_PARTIAL) {
+ mdelay(1);
+ continue;
+ }
+
+ return opal_error_code(rc);
+ }
+}
+
+/*
+ * opal_flush_chars is an hvc interface that sleeps until the console is
+ * flushed if wait, otherwise it will return -EBUSY if the console has data,
+ * -EAGAIN if it has data and some of it was flushed.
+ */
+int opal_flush_chars(uint32_t vtermno, bool wait)
+{
+ for (;;) {
+ s64 rc = __opal_flush_console(vtermno);
+
+ if (rc == OPAL_BUSY || rc == OPAL_PARTIAL) {
+ if (wait) {
+ msleep(OPAL_BUSY_DELAY_MS);
+ continue;
}
+ if (rc == OPAL_PARTIAL)
+ return -EAGAIN;
}
- } while (rc == OPAL_PARTIAL); /* More to flush */
- return opal_error_code(rc);
+ return opal_error_code(rc);
+ }
}
static int opal_recover_mce(struct pt_regs *regs,
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 4e6302bf4073..cde710297a4e 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -3228,12 +3228,49 @@ static void pnv_pci_ioda_create_dbgfs(void)
#endif /* CONFIG_DEBUG_FS */
}
+static void pnv_pci_enable_bridge(struct pci_bus *bus)
+{
+ struct pci_dev *dev = bus->self;
+ struct pci_bus *child;
+
+ /* Empty bus ? bail */
+ if (list_empty(&bus->devices))
+ return;
+
+ /*
+ * If there's a bridge associated with that bus enable it. This works
+ * around races in the generic code if the enabling is done during
+ * parallel probing. This can be removed once those races have been
+ * fixed.
+ */
+ if (dev) {
+ int rc = pci_enable_device(dev);
+ if (rc)
+ pci_err(dev, "Error enabling bridge (%d)\n", rc);
+ pci_set_master(dev);
+ }
+
+ /* Perform the same to child busses */
+ list_for_each_entry(child, &bus->children, node)
+ pnv_pci_enable_bridge(child);
+}
+
+static void pnv_pci_enable_bridges(void)
+{
+ struct pci_controller *hose;
+
+ list_for_each_entry(hose, &hose_list, list_node)
+ pnv_pci_enable_bridge(hose->bus);
+}
+
static void pnv_pci_ioda_fixup(void)
{
pnv_pci_ioda_setup_PEs();
pnv_pci_ioda_setup_iommu_api();
pnv_pci_ioda_create_dbgfs();
+ pnv_pci_enable_bridges();
+
#ifdef CONFIG_EEH
pnv_eeh_post_init();
#endif
diff --git a/arch/powerpc/platforms/powernv/vas-window.c b/arch/powerpc/platforms/powernv/vas-window.c
index ff9f48812331..e59e0e60e5b5 100644
--- a/arch/powerpc/platforms/powernv/vas-window.c
+++ b/arch/powerpc/platforms/powernv/vas-window.c
@@ -515,35 +515,17 @@ int init_winctx_regs(struct vas_window *window, struct vas_winctx *winctx)
return 0;
}
-static DEFINE_SPINLOCK(vas_ida_lock);
-
static void vas_release_window_id(struct ida *ida, int winid)
{
- spin_lock(&vas_ida_lock);
- ida_remove(ida, winid);
- spin_unlock(&vas_ida_lock);
+ ida_free(ida, winid);
}
static int vas_assign_window_id(struct ida *ida)
{
- int rc, winid;
-
- do {
- rc = ida_pre_get(ida, GFP_KERNEL);
- if (!rc)
- return -EAGAIN;
-
- spin_lock(&vas_ida_lock);
- rc = ida_get_new(ida, &winid);
- spin_unlock(&vas_ida_lock);
- } while (rc == -EAGAIN);
-
- if (rc)
- return rc;
+ int winid = ida_alloc_max(ida, VAS_WINDOWS_PER_CHIP - 1, GFP_KERNEL);
- if (winid > VAS_WINDOWS_PER_CHIP) {
- pr_err("Too many (%d) open windows\n", winid);
- vas_release_window_id(ida, winid);
+ if (winid == -ENOSPC) {
+ pr_err("Too many (%d) open windows\n", VAS_WINDOWS_PER_CHIP);
return -EAGAIN;
}
OpenPOWER on IntegriCloud