diff options
Diffstat (limited to 'arch/powerpc/platforms/powernv/opal.c')
| -rw-r--r-- | arch/powerpc/platforms/powernv/opal.c | 214 |
1 files changed, 149 insertions, 65 deletions
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c index c15182765ff5..beed86f4224b 100644 --- a/arch/powerpc/platforms/powernv/opal.c +++ b/arch/powerpc/platforms/powernv/opal.c @@ -171,7 +171,7 @@ int __init early_init_dt_scan_recoverable_ranges(unsigned long node, /* * Allocate a buffer to hold the MC recoverable ranges. */ - mc_recoverable_range =__va(memblock_alloc(size, __alignof__(u64))); + mc_recoverable_range =__va(memblock_phys_alloc(size, __alignof__(u64))); memset(mc_recoverable_range, 0, size); for (i = 0; i < mc_recoverable_range_len; i++) { @@ -344,72 +344,148 @@ int opal_get_chars(uint32_t vtermno, char *buf, int count) return 0; } -int opal_put_chars(uint32_t vtermno, const char *data, int total_len) +static int __opal_put_chars(uint32_t vtermno, const char *data, int total_len, bool atomic) { - int written = 0; + unsigned long flags = 0 /* shut up gcc */; + int written; __be64 olen; - s64 len, rc; - unsigned long flags; - __be64 evt; + s64 rc; if (!opal.entry) return -ENODEV; - /* We want put_chars to be atomic to avoid mangling of hvsi - * packets. To do that, we first test for room and return - * -EAGAIN if there isn't enough. - * - * Unfortunately, opal_console_write_buffer_space() doesn't - * appear to work on opal v1, so we just assume there is - * enough room and be done with it - */ - spin_lock_irqsave(&opal_write_lock, flags); + if (atomic) + spin_lock_irqsave(&opal_write_lock, flags); rc = opal_console_write_buffer_space(vtermno, &olen); - len = be64_to_cpu(olen); - if (rc || len < total_len) { - spin_unlock_irqrestore(&opal_write_lock, flags); + if (rc || be64_to_cpu(olen) < total_len) { /* Closed -> drop characters */ if (rc) - return total_len; - opal_poll_events(NULL); - return -EAGAIN; + written = total_len; + else + written = -EAGAIN; + goto out; } - /* We still try to handle partial completions, though they - * should no longer happen. - */ - rc = OPAL_BUSY; - while(total_len > 0 && (rc == OPAL_BUSY || - rc == OPAL_BUSY_EVENT || rc == OPAL_SUCCESS)) { - olen = cpu_to_be64(total_len); - rc = opal_console_write(vtermno, &olen, data); - len = be64_to_cpu(olen); - - /* Closed or other error drop */ - if (rc != OPAL_SUCCESS && rc != OPAL_BUSY && - rc != OPAL_BUSY_EVENT) { - written = total_len; - break; - } - if (rc == OPAL_SUCCESS) { - total_len -= len; - data += len; - written += len; + /* Should not get a partial write here because space is available. */ + olen = cpu_to_be64(total_len); + rc = opal_console_write(vtermno, &olen, data); + if (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) { + if (rc == OPAL_BUSY_EVENT) + opal_poll_events(NULL); + written = -EAGAIN; + goto out; + } + + /* Closed or other error drop */ + if (rc != OPAL_SUCCESS) { + written = opal_error_code(rc); + goto out; + } + + written = be64_to_cpu(olen); + if (written < total_len) { + if (atomic) { + /* Should not happen */ + pr_warn("atomic console write returned partial " + "len=%d written=%d\n", total_len, written); } - /* This is a bit nasty but we need that for the console to - * flush when there aren't any interrupts. We will clean - * things a bit later to limit that to synchronous path - * such as the kernel console and xmon/udbg - */ - do - opal_poll_events(&evt); - while(rc == OPAL_SUCCESS && - (be64_to_cpu(evt) & OPAL_EVENT_CONSOLE_OUTPUT)); + if (!written) + written = -EAGAIN; } - spin_unlock_irqrestore(&opal_write_lock, flags); + +out: + if (atomic) + spin_unlock_irqrestore(&opal_write_lock, flags); + return written; } +int opal_put_chars(uint32_t vtermno, const char *data, int total_len) +{ + return __opal_put_chars(vtermno, data, total_len, false); +} + +/* + * opal_put_chars_atomic will not perform partial-writes. Data will be + * atomically written to the terminal or not at all. This is not strictly + * true at the moment because console space can race with OPAL's console + * writes. + */ +int opal_put_chars_atomic(uint32_t vtermno, const char *data, int total_len) +{ + return __opal_put_chars(vtermno, data, total_len, true); +} + +static s64 __opal_flush_console(uint32_t vtermno) +{ + s64 rc; + + if (!opal_check_token(OPAL_CONSOLE_FLUSH)) { + __be64 evt; + + /* + * If OPAL_CONSOLE_FLUSH is not implemented in the firmware, + * the console can still be flushed by calling the polling + * function while it has OPAL_EVENT_CONSOLE_OUTPUT events. + */ + WARN_ONCE(1, "opal: OPAL_CONSOLE_FLUSH missing.\n"); + + opal_poll_events(&evt); + if (!(be64_to_cpu(evt) & OPAL_EVENT_CONSOLE_OUTPUT)) + return OPAL_SUCCESS; + return OPAL_BUSY; + + } else { + rc = opal_console_flush(vtermno); + if (rc == OPAL_BUSY_EVENT) { + opal_poll_events(NULL); + rc = OPAL_BUSY; + } + return rc; + } + +} + +/* + * opal_flush_console spins until the console is flushed + */ +int opal_flush_console(uint32_t vtermno) +{ + for (;;) { + s64 rc = __opal_flush_console(vtermno); + + if (rc == OPAL_BUSY || rc == OPAL_PARTIAL) { + mdelay(1); + continue; + } + + return opal_error_code(rc); + } +} + +/* + * opal_flush_chars is an hvc interface that sleeps until the console is + * flushed if wait, otherwise it will return -EBUSY if the console has data, + * -EAGAIN if it has data and some of it was flushed. + */ +int opal_flush_chars(uint32_t vtermno, bool wait) +{ + for (;;) { + s64 rc = __opal_flush_console(vtermno); + + if (rc == OPAL_BUSY || rc == OPAL_PARTIAL) { + if (wait) { + msleep(OPAL_BUSY_DELAY_MS); + continue; + } + if (rc == OPAL_PARTIAL) + return -EAGAIN; + } + + return opal_error_code(rc); + } +} + static int opal_recover_mce(struct pt_regs *regs, struct machine_check_event *evt) { @@ -459,7 +535,7 @@ static int opal_recover_mce(struct pt_regs *regs, return recovered; } -void pnv_platform_error_reboot(struct pt_regs *regs, const char *msg) +void __noreturn pnv_platform_error_reboot(struct pt_regs *regs, const char *msg) { panic_flush_kmsg_start(); @@ -490,9 +566,12 @@ void pnv_platform_error_reboot(struct pt_regs *regs, const char *msg) * opal to trigger checkstop explicitly for error analysis. * The FSP PRD component would have already got notified * about this error through other channels. + * 4. We are running on a newer skiboot that by default does + * not cause a checkstop, drops us back to the kernel to + * extract context and state at the time of the error. */ - ppc_md.restart(NULL); + panic(msg); } int opal_machine_check(struct pt_regs *regs) @@ -537,21 +616,15 @@ int opal_hmi_exception_early(struct pt_regs *regs) /* HMI exception handler called in virtual mode during check_irq_replay. */ int opal_handle_hmi_exception(struct pt_regs *regs) { - s64 rc; - __be64 evt = 0; - /* * Check if HMI event is available. - * if Yes, then call opal_poll_events to pull opal messages and - * process them. + * if Yes, then wake kopald to process them. */ if (!local_paca->hmi_event_available) return 0; local_paca->hmi_event_available = 0; - rc = opal_poll_events(&evt); - if (rc == OPAL_SUCCESS && evt) - opal_handle_events(be64_to_cpu(evt)); + opal_wake_poller(); return 1; } @@ -754,14 +827,19 @@ static void __init opal_imc_init_dev(void) static int kopald(void *unused) { unsigned long timeout = msecs_to_jiffies(opal_heartbeat) + 1; - __be64 events; set_freezable(); do { try_to_freeze(); - opal_poll_events(&events); - opal_handle_events(be64_to_cpu(events)); - schedule_timeout_interruptible(timeout); + + opal_handle_events(); + + set_current_state(TASK_INTERRUPTIBLE); + if (opal_have_pending_events()) + __set_current_state(TASK_RUNNING); + else + schedule_timeout(timeout); + } while (!kthread_should_stop()); return 0; @@ -821,6 +899,9 @@ static int __init opal_init(void) /* Create i2c platform devices */ opal_pdev_init("ibm,opal-i2c"); + /* Handle non-volatile memory devices */ + opal_pdev_init("pmem-region"); + /* Setup a heatbeat thread if requested by OPAL */ opal_init_heartbeat(); @@ -917,6 +998,7 @@ EXPORT_SYMBOL_GPL(opal_flash_read); EXPORT_SYMBOL_GPL(opal_flash_write); EXPORT_SYMBOL_GPL(opal_flash_erase); EXPORT_SYMBOL_GPL(opal_prd_msg); +EXPORT_SYMBOL_GPL(opal_check_token); /* Convert a region of vmalloc memory to an opal sg list */ struct opal_sg_list *opal_vmalloc_to_sg_list(void *vmalloc_addr, @@ -1029,3 +1111,5 @@ EXPORT_SYMBOL_GPL(opal_write_oppanel_async); EXPORT_SYMBOL_GPL(opal_int_set_mfrr); EXPORT_SYMBOL_GPL(opal_int_eoi); EXPORT_SYMBOL_GPL(opal_error_code); +/* Export the below symbol for NX compression */ +EXPORT_SYMBOL(opal_nx_coproc_init); |

