summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/sysdev
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/sysdev')
-rw-r--r--arch/powerpc/sysdev/Kconfig7
-rw-r--r--arch/powerpc/sysdev/Makefile3
-rw-r--r--arch/powerpc/sysdev/dart_iommu.c2
-rw-r--r--arch/powerpc/sysdev/fsl_pci.c16
-rw-r--r--arch/powerpc/sysdev/mpic.c4
-rw-r--r--arch/powerpc/sysdev/scom.c223
-rw-r--r--arch/powerpc/sysdev/simple_gpio.c143
-rw-r--r--arch/powerpc/sysdev/simple_gpio.h13
-rw-r--r--arch/powerpc/sysdev/xics/icp-native.c6
-rw-r--r--arch/powerpc/sysdev/xics/icp-opal.c6
-rw-r--r--arch/powerpc/sysdev/xive/common.c164
-rw-r--r--arch/powerpc/sysdev/xive/native.c33
-rw-r--r--arch/powerpc/sysdev/xive/spapr.c69
-rw-r--r--arch/powerpc/sysdev/xive/xive-internal.h2
14 files changed, 246 insertions, 445 deletions
diff --git a/arch/powerpc/sysdev/Kconfig b/arch/powerpc/sysdev/Kconfig
index d23288c4abf6..9ebcc1337560 100644
--- a/arch/powerpc/sysdev/Kconfig
+++ b/arch/powerpc/sysdev/Kconfig
@@ -28,13 +28,6 @@ config PPC_MSI_BITMAP
source "arch/powerpc/sysdev/xics/Kconfig"
source "arch/powerpc/sysdev/xive/Kconfig"
-config PPC_SCOM
- bool
-
-config SCOM_DEBUGFS
- bool "Expose SCOM controllers via debugfs"
- depends on PPC_SCOM && DEBUG_FS
-
config GE_FPGA
bool
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile
index 9d73dfddf060..cb5a5bd2cef5 100644
--- a/arch/powerpc/sysdev/Makefile
+++ b/arch/powerpc/sysdev/Makefile
@@ -24,7 +24,6 @@ obj-$(CONFIG_FSL_CORENET_RCPM) += fsl_rcpm.o
obj-$(CONFIG_FSL_LBC) += fsl_lbc.o
obj-$(CONFIG_FSL_GTM) += fsl_gtm.o
obj-$(CONFIG_FSL_85XX_CACHE_SRAM) += fsl_85xx_l2ctlr.o fsl_85xx_cache_sram.o
-obj-$(CONFIG_SIMPLE_GPIO) += simple_gpio.o
obj-$(CONFIG_FSL_RIO) += fsl_rio.o fsl_rmu.o
obj-$(CONFIG_TSI108_BRIDGE) += tsi108_pci.o tsi108_dev.o
obj-$(CONFIG_RTC_DRV_CMOS) += rtc_cmos_setup.o
@@ -49,8 +48,6 @@ ifdef CONFIG_SUSPEND
obj-$(CONFIG_PPC_BOOK3S_32) += 6xx-suspend.o
endif
-obj-$(CONFIG_PPC_SCOM) += scom.o
-
obj-$(CONFIG_PPC_EARLY_DEBUG_MEMCONS) += udbg_memcons.o
obj-$(CONFIG_PPC_XICS) += xics/
diff --git a/arch/powerpc/sysdev/dart_iommu.c b/arch/powerpc/sysdev/dart_iommu.c
index 21a1fae0714e..6b4a34b36d98 100644
--- a/arch/powerpc/sysdev/dart_iommu.c
+++ b/arch/powerpc/sysdev/dart_iommu.c
@@ -344,7 +344,7 @@ static void iommu_table_dart_setup(void)
iommu_table_dart.it_index = 0;
iommu_table_dart.it_blocksize = 1;
iommu_table_dart.it_ops = &iommu_dart_ops;
- iommu_init_table(&iommu_table_dart, -1);
+ iommu_init_table(&iommu_table_dart, -1, 0, 0);
/* Reserve the last page of the DART to avoid possible prefetch
* past the DART mapped area
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index ff0e2b156cb5..4a8874bc1057 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -115,8 +115,8 @@ static void pci_dma_dev_setup_swiotlb(struct pci_dev *pdev)
{
struct pci_controller *hose = pci_bus_to_host(pdev->bus);
- pdev->dev.bus_dma_mask =
- hose->dma_window_base_cur + hose->dma_window_size;
+ pdev->dev.bus_dma_limit =
+ hose->dma_window_base_cur + hose->dma_window_size - 1;
}
static void setup_swiotlb_ops(struct pci_controller *hose)
@@ -135,7 +135,7 @@ static void fsl_pci_dma_set_mask(struct device *dev, u64 dma_mask)
* mapping that allows addressing any RAM address from across PCI.
*/
if (dev_is_pci(dev) && dma_mask >= pci64_dma_offset * 2 - 1) {
- dev->bus_dma_mask = 0;
+ dev->bus_dma_limit = 0;
dev->archdata.dma_offset = pci64_dma_offset;
}
}
@@ -1065,13 +1065,11 @@ int fsl_pci_mcheck_exception(struct pt_regs *regs)
addr += mfspr(SPRN_MCAR);
if (is_in_pci_mem_space(addr)) {
- if (user_mode(regs)) {
- pagefault_disable();
- ret = get_user(inst, (__u32 __user *)regs->nip);
- pagefault_enable();
- } else {
+ if (user_mode(regs))
+ ret = probe_user_read(&inst, (void __user *)regs->nip,
+ sizeof(inst));
+ else
ret = probe_kernel_address((void *)regs->nip, inst);
- }
if (!ret && mcheck_handle_load(regs, inst)) {
regs->nip += 4;
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 934a77324f6b..a3a72b780e67 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -964,7 +964,7 @@ static struct irq_chip mpic_irq_chip = {
};
#ifdef CONFIG_SMP
-static struct irq_chip mpic_ipi_chip = {
+static const struct irq_chip mpic_ipi_chip = {
.irq_mask = mpic_mask_ipi,
.irq_unmask = mpic_unmask_ipi,
.irq_eoi = mpic_end_ipi,
@@ -978,7 +978,7 @@ static struct irq_chip mpic_tm_chip = {
};
#ifdef CONFIG_MPIC_U3_HT_IRQS
-static struct irq_chip mpic_irq_ht_chip = {
+static const struct irq_chip mpic_irq_ht_chip = {
.irq_startup = mpic_startup_ht_irq,
.irq_shutdown = mpic_shutdown_ht_irq,
.irq_mask = mpic_mask_irq,
diff --git a/arch/powerpc/sysdev/scom.c b/arch/powerpc/sysdev/scom.c
deleted file mode 100644
index 94e885bf3aee..000000000000
--- a/arch/powerpc/sysdev/scom.c
+++ /dev/null
@@ -1,223 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright 2010 Benjamin Herrenschmidt, IBM Corp
- * <benh@kernel.crashing.org>
- * and David Gibson, IBM Corporation.
- */
-
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/export.h>
-#include <asm/debugfs.h>
-#include <asm/prom.h>
-#include <asm/scom.h>
-#include <linux/uaccess.h>
-
-const struct scom_controller *scom_controller;
-EXPORT_SYMBOL_GPL(scom_controller);
-
-struct device_node *scom_find_parent(struct device_node *node)
-{
- struct device_node *par, *tmp;
- const u32 *p;
-
- for (par = of_node_get(node); par;) {
- if (of_get_property(par, "scom-controller", NULL))
- break;
- p = of_get_property(par, "scom-parent", NULL);
- tmp = par;
- if (p == NULL)
- par = of_get_parent(par);
- else
- par = of_find_node_by_phandle(*p);
- of_node_put(tmp);
- }
- return par;
-}
-EXPORT_SYMBOL_GPL(scom_find_parent);
-
-scom_map_t scom_map_device(struct device_node *dev, int index)
-{
- struct device_node *parent;
- unsigned int cells, size;
- const __be32 *prop, *sprop;
- u64 reg, cnt;
- scom_map_t ret;
-
- parent = scom_find_parent(dev);
-
- if (parent == NULL)
- return NULL;
-
- /*
- * We support "scom-reg" properties for adding scom registers
- * to a random device-tree node with an explicit scom-parent
- *
- * We also support the simple "reg" property if the device is
- * a direct child of a scom controller.
- *
- * In case both exist, "scom-reg" takes precedence.
- */
- prop = of_get_property(dev, "scom-reg", &size);
- sprop = of_get_property(parent, "#scom-cells", NULL);
- if (!prop && parent == dev->parent) {
- prop = of_get_property(dev, "reg", &size);
- sprop = of_get_property(parent, "#address-cells", NULL);
- }
- if (!prop)
- return NULL;
- cells = sprop ? be32_to_cpup(sprop) : 1;
- size >>= 2;
-
- if (index >= (size / (2*cells)))
- return NULL;
-
- reg = of_read_number(&prop[index * cells * 2], cells);
- cnt = of_read_number(&prop[index * cells * 2 + cells], cells);
-
- ret = scom_map(parent, reg, cnt);
- of_node_put(parent);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(scom_map_device);
-
-#ifdef CONFIG_SCOM_DEBUGFS
-struct scom_debug_entry {
- struct device_node *dn;
- struct debugfs_blob_wrapper path;
- char name[16];
-};
-
-static ssize_t scom_debug_read(struct file *filp, char __user *ubuf,
- size_t count, loff_t *ppos)
-{
- struct scom_debug_entry *ent = filp->private_data;
- u64 __user *ubuf64 = (u64 __user *)ubuf;
- loff_t off = *ppos;
- ssize_t done = 0;
- u64 reg, reg_cnt, val;
- scom_map_t map;
- int rc;
-
- if (off < 0 || (off & 7) || (count & 7))
- return -EINVAL;
- reg = off >> 3;
- reg_cnt = count >> 3;
-
- map = scom_map(ent->dn, reg, reg_cnt);
- if (!scom_map_ok(map))
- return -ENXIO;
-
- for (reg = 0; reg < reg_cnt; reg++) {
- rc = scom_read(map, reg, &val);
- if (!rc)
- rc = put_user(val, ubuf64);
- if (rc) {
- if (!done)
- done = rc;
- break;
- }
- ubuf64++;
- *ppos += 8;
- done += 8;
- }
- scom_unmap(map);
- return done;
-}
-
-static ssize_t scom_debug_write(struct file* filp, const char __user *ubuf,
- size_t count, loff_t *ppos)
-{
- struct scom_debug_entry *ent = filp->private_data;
- u64 __user *ubuf64 = (u64 __user *)ubuf;
- loff_t off = *ppos;
- ssize_t done = 0;
- u64 reg, reg_cnt, val;
- scom_map_t map;
- int rc;
-
- if (off < 0 || (off & 7) || (count & 7))
- return -EINVAL;
- reg = off >> 3;
- reg_cnt = count >> 3;
-
- map = scom_map(ent->dn, reg, reg_cnt);
- if (!scom_map_ok(map))
- return -ENXIO;
-
- for (reg = 0; reg < reg_cnt; reg++) {
- rc = get_user(val, ubuf64);
- if (!rc)
- rc = scom_write(map, reg, val);
- if (rc) {
- if (!done)
- done = rc;
- break;
- }
- ubuf64++;
- done += 8;
- }
- scom_unmap(map);
- return done;
-}
-
-static const struct file_operations scom_debug_fops = {
- .read = scom_debug_read,
- .write = scom_debug_write,
- .open = simple_open,
- .llseek = default_llseek,
-};
-
-static int scom_debug_init_one(struct dentry *root, struct device_node *dn,
- int i)
-{
- struct scom_debug_entry *ent;
- struct dentry *dir;
-
- ent = kzalloc(sizeof(*ent), GFP_KERNEL);
- if (!ent)
- return -ENOMEM;
-
- ent->dn = of_node_get(dn);
- snprintf(ent->name, 16, "%08x", i);
- ent->path.data = (void*)kasprintf(GFP_KERNEL, "%pOF", dn);
- ent->path.size = strlen((char *)ent->path.data);
-
- dir = debugfs_create_dir(ent->name, root);
- if (!dir) {
- of_node_put(dn);
- kfree(ent->path.data);
- kfree(ent);
- return -1;
- }
-
- debugfs_create_blob("devspec", 0400, dir, &ent->path);
- debugfs_create_file("access", 0600, dir, ent, &scom_debug_fops);
-
- return 0;
-}
-
-static int scom_debug_init(void)
-{
- struct device_node *dn;
- struct dentry *root;
- int i, rc;
-
- root = debugfs_create_dir("scom", powerpc_debugfs_root);
- if (!root)
- return -1;
-
- i = rc = 0;
- for_each_node_with_property(dn, "scom-controller") {
- int id = of_get_ibm_chip_id(dn);
- if (id == -1)
- id = i;
- rc |= scom_debug_init_one(root, dn, id);
- i++;
- }
-
- return rc;
-}
-device_initcall(scom_debug_init);
-#endif /* CONFIG_SCOM_DEBUGFS */
diff --git a/arch/powerpc/sysdev/simple_gpio.c b/arch/powerpc/sysdev/simple_gpio.c
deleted file mode 100644
index dc1740cd9e42..000000000000
--- a/arch/powerpc/sysdev/simple_gpio.c
+++ /dev/null
@@ -1,143 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Simple Memory-Mapped GPIOs
- *
- * Copyright (c) MontaVista Software, Inc. 2008.
- *
- * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
- */
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/spinlock.h>
-#include <linux/types.h>
-#include <linux/ioport.h>
-#include <linux/io.h>
-#include <linux/of.h>
-#include <linux/of_gpio.h>
-#include <linux/gpio/driver.h>
-#include <linux/slab.h>
-#include <asm/prom.h>
-#include "simple_gpio.h"
-
-struct u8_gpio_chip {
- struct of_mm_gpio_chip mm_gc;
- spinlock_t lock;
-
- /* shadowed data register to clear/set bits safely */
- u8 data;
-};
-
-static u8 u8_pin2mask(unsigned int pin)
-{
- return 1 << (8 - 1 - pin);
-}
-
-static int u8_gpio_get(struct gpio_chip *gc, unsigned int gpio)
-{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
-
- return !!(in_8(mm_gc->regs) & u8_pin2mask(gpio));
-}
-
-static void u8_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
-{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
- struct u8_gpio_chip *u8_gc = gpiochip_get_data(gc);
- unsigned long flags;
-
- spin_lock_irqsave(&u8_gc->lock, flags);
-
- if (val)
- u8_gc->data |= u8_pin2mask(gpio);
- else
- u8_gc->data &= ~u8_pin2mask(gpio);
-
- out_8(mm_gc->regs, u8_gc->data);
-
- spin_unlock_irqrestore(&u8_gc->lock, flags);
-}
-
-static int u8_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
-{
- return 0;
-}
-
-static int u8_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
-{
- u8_gpio_set(gc, gpio, val);
- return 0;
-}
-
-static void u8_gpio_save_regs(struct of_mm_gpio_chip *mm_gc)
-{
- struct u8_gpio_chip *u8_gc =
- container_of(mm_gc, struct u8_gpio_chip, mm_gc);
-
- u8_gc->data = in_8(mm_gc->regs);
-}
-
-static int __init u8_simple_gpiochip_add(struct device_node *np)
-{
- int ret;
- struct u8_gpio_chip *u8_gc;
- struct of_mm_gpio_chip *mm_gc;
- struct gpio_chip *gc;
-
- u8_gc = kzalloc(sizeof(*u8_gc), GFP_KERNEL);
- if (!u8_gc)
- return -ENOMEM;
-
- spin_lock_init(&u8_gc->lock);
-
- mm_gc = &u8_gc->mm_gc;
- gc = &mm_gc->gc;
-
- mm_gc->save_regs = u8_gpio_save_regs;
- gc->ngpio = 8;
- gc->direction_input = u8_gpio_dir_in;
- gc->direction_output = u8_gpio_dir_out;
- gc->get = u8_gpio_get;
- gc->set = u8_gpio_set;
-
- ret = of_mm_gpiochip_add_data(np, mm_gc, u8_gc);
- if (ret)
- goto err;
- return 0;
-err:
- kfree(u8_gc);
- return ret;
-}
-
-void __init simple_gpiochip_init(const char *compatible)
-{
- struct device_node *np;
-
- for_each_compatible_node(np, NULL, compatible) {
- int ret;
- struct resource r;
-
- ret = of_address_to_resource(np, 0, &r);
- if (ret)
- goto err;
-
- switch (resource_size(&r)) {
- case 1:
- ret = u8_simple_gpiochip_add(np);
- if (ret)
- goto err;
- break;
- default:
- /*
- * Whenever you need support for GPIO bank width > 1,
- * please just turn u8_ code into huge macros, and
- * construct needed uX_ code with it.
- */
- ret = -ENOSYS;
- goto err;
- }
- continue;
-err:
- pr_err("%pOF: registration failed, status %d\n", np, ret);
- }
-}
diff --git a/arch/powerpc/sysdev/simple_gpio.h b/arch/powerpc/sysdev/simple_gpio.h
deleted file mode 100644
index f3f3a20d39e2..000000000000
--- a/arch/powerpc/sysdev/simple_gpio.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __SYSDEV_SIMPLE_GPIO_H
-#define __SYSDEV_SIMPLE_GPIO_H
-
-#include <linux/errno.h>
-
-#ifdef CONFIG_SIMPLE_GPIO
-extern void simple_gpiochip_init(const char *compatible);
-#else
-static inline void simple_gpiochip_init(const char *compatible) {}
-#endif /* CONFIG_SIMPLE_GPIO */
-
-#endif /* __SYSDEV_SIMPLE_GPIO_H */
diff --git a/arch/powerpc/sysdev/xics/icp-native.c b/arch/powerpc/sysdev/xics/icp-native.c
index 485569ff7ef1..7d13d2ef5a90 100644
--- a/arch/powerpc/sysdev/xics/icp-native.c
+++ b/arch/powerpc/sysdev/xics/icp-native.c
@@ -140,7 +140,7 @@ static unsigned int icp_native_get_irq(void)
static void icp_native_cause_ipi(int cpu)
{
- kvmppc_set_host_ipi(cpu, 1);
+ kvmppc_set_host_ipi(cpu);
icp_native_set_qirr(cpu, IPI_PRIORITY);
}
@@ -179,7 +179,7 @@ void icp_native_flush_interrupt(void)
if (vec == XICS_IPI) {
/* Clear pending IPI */
int cpu = smp_processor_id();
- kvmppc_set_host_ipi(cpu, 0);
+ kvmppc_clear_host_ipi(cpu);
icp_native_set_qirr(cpu, 0xff);
} else {
pr_err("XICS: hw interrupt 0x%x to offline cpu, disabling\n",
@@ -200,7 +200,7 @@ static irqreturn_t icp_native_ipi_action(int irq, void *dev_id)
{
int cpu = smp_processor_id();
- kvmppc_set_host_ipi(cpu, 0);
+ kvmppc_clear_host_ipi(cpu);
icp_native_set_qirr(cpu, 0xff);
return smp_ipi_demux();
diff --git a/arch/powerpc/sysdev/xics/icp-opal.c b/arch/powerpc/sysdev/xics/icp-opal.c
index 8bb8dd7dd6ad..68fd2540b093 100644
--- a/arch/powerpc/sysdev/xics/icp-opal.c
+++ b/arch/powerpc/sysdev/xics/icp-opal.c
@@ -126,7 +126,7 @@ static void icp_opal_cause_ipi(int cpu)
{
int hw_cpu = get_hard_smp_processor_id(cpu);
- kvmppc_set_host_ipi(cpu, 1);
+ kvmppc_set_host_ipi(cpu);
opal_int_set_mfrr(hw_cpu, IPI_PRIORITY);
}
@@ -134,7 +134,7 @@ static irqreturn_t icp_opal_ipi_action(int irq, void *dev_id)
{
int cpu = smp_processor_id();
- kvmppc_set_host_ipi(cpu, 0);
+ kvmppc_clear_host_ipi(cpu);
opal_int_set_mfrr(get_hard_smp_processor_id(cpu), 0xff);
return smp_ipi_demux();
@@ -157,7 +157,7 @@ void icp_opal_flush_interrupt(void)
if (vec == XICS_IPI) {
/* Clear pending IPI */
int cpu = smp_processor_id();
- kvmppc_set_host_ipi(cpu, 0);
+ kvmppc_clear_host_ipi(cpu);
opal_int_set_mfrr(get_hard_smp_processor_id(cpu), 0xff);
} else {
pr_err("XICS: hw interrupt 0x%x to offline cpu, "
diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c
index 1cdb39575eae..9651ca061828 100644
--- a/arch/powerpc/sysdev/xive/common.c
+++ b/arch/powerpc/sysdev/xive/common.c
@@ -135,7 +135,7 @@ static u32 xive_read_eq(struct xive_q *q, bool just_peek)
static u32 xive_scan_interrupts(struct xive_cpu *xc, bool just_peek)
{
u32 irq = 0;
- u8 prio;
+ u8 prio = 0;
/* Find highest pending priority */
while (xc->pending_prio != 0) {
@@ -148,8 +148,19 @@ static u32 xive_scan_interrupts(struct xive_cpu *xc, bool just_peek)
irq = xive_read_eq(&xc->queue[prio], just_peek);
/* Found something ? That's it */
- if (irq)
- break;
+ if (irq) {
+ if (just_peek || irq_to_desc(irq))
+ break;
+ /*
+ * We should never get here; if we do then we must
+ * have failed to synchronize the interrupt properly
+ * when shutting it down.
+ */
+ pr_crit("xive: got interrupt %d without descriptor, dropping\n",
+ irq);
+ WARN_ON(1);
+ continue;
+ }
/* Clear pending bits */
xc->pending_prio &= ~(1 << prio);
@@ -185,7 +196,7 @@ static u32 xive_scan_interrupts(struct xive_cpu *xc, bool just_peek)
/*
* This is used to perform the magic loads from an ESB
- * described in xive.h
+ * described in xive-regs.h
*/
static notrace u8 xive_esb_read(struct xive_irq_data *xd, u32 offset)
{
@@ -226,26 +237,61 @@ static notrace void xive_dump_eq(const char *name, struct xive_q *q)
i0 = be32_to_cpup(q->qpage + idx);
idx = (idx + 1) & q->msk;
i1 = be32_to_cpup(q->qpage + idx);
- xmon_printf(" %s Q T=%d %08x %08x ...\n", name,
- q->toggle, i0, i1);
+ xmon_printf("%s idx=%d T=%d %08x %08x ...", name,
+ q->idx, q->toggle, i0, i1);
}
notrace void xmon_xive_do_dump(int cpu)
{
struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
- xmon_printf("XIVE state for CPU %d:\n", cpu);
- xmon_printf(" pp=%02x cppr=%02x\n", xc->pending_prio, xc->cppr);
- xive_dump_eq("IRQ", &xc->queue[xive_irq_priority]);
+ xmon_printf("CPU %d:", cpu);
+ if (xc) {
+ xmon_printf("pp=%02x CPPR=%02x ", xc->pending_prio, xc->cppr);
+
#ifdef CONFIG_SMP
- {
- u64 val = xive_esb_read(&xc->ipi_data, XIVE_ESB_GET);
- xmon_printf(" IPI state: %x:%c%c\n", xc->hw_ipi,
- val & XIVE_ESB_VAL_P ? 'P' : 'p',
- val & XIVE_ESB_VAL_Q ? 'Q' : 'q');
- }
+ {
+ u64 val = xive_esb_read(&xc->ipi_data, XIVE_ESB_GET);
+
+ xmon_printf("IPI=0x%08x PQ=%c%c ", xc->hw_ipi,
+ val & XIVE_ESB_VAL_P ? 'P' : '-',
+ val & XIVE_ESB_VAL_Q ? 'Q' : '-');
+ }
#endif
+ xive_dump_eq("EQ", &xc->queue[xive_irq_priority]);
+ }
+ xmon_printf("\n");
}
+
+int xmon_xive_get_irq_config(u32 hw_irq, struct irq_data *d)
+{
+ int rc;
+ u32 target;
+ u8 prio;
+ u32 lirq;
+
+ rc = xive_ops->get_irq_config(hw_irq, &target, &prio, &lirq);
+ if (rc) {
+ xmon_printf("IRQ 0x%08x : no config rc=%d\n", hw_irq, rc);
+ return rc;
+ }
+
+ xmon_printf("IRQ 0x%08x : target=0x%x prio=%02x lirq=0x%x ",
+ hw_irq, target, prio, lirq);
+
+ if (d) {
+ struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
+ u64 val = xive_esb_read(xd, XIVE_ESB_GET);
+
+ xmon_printf("PQ=%c%c",
+ val & XIVE_ESB_VAL_P ? 'P' : '-',
+ val & XIVE_ESB_VAL_Q ? 'Q' : '-');
+ }
+
+ xmon_printf("\n");
+ return 0;
+}
+
#endif /* CONFIG_XMON */
static unsigned int xive_get_irq(void)
@@ -307,6 +353,7 @@ static void xive_do_queue_eoi(struct xive_cpu *xc)
*/
static void xive_do_source_eoi(u32 hw_irq, struct xive_irq_data *xd)
{
+ xd->stale_p = false;
/* If the XIVE supports the new "store EOI facility, use it */
if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
xive_esb_write(xd, XIVE_ESB_STORE_EOI, 0);
@@ -350,7 +397,7 @@ static void xive_do_source_eoi(u32 hw_irq, struct xive_irq_data *xd)
}
}
-/* irq_chip eoi callback */
+/* irq_chip eoi callback, called with irq descriptor lock held */
static void xive_irq_eoi(struct irq_data *d)
{
struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
@@ -366,6 +413,8 @@ static void xive_irq_eoi(struct irq_data *d)
if (!irqd_irq_disabled(d) && !irqd_is_forwarded_to_vcpu(d) &&
!(xd->flags & XIVE_IRQ_NO_EOI))
xive_do_source_eoi(irqd_to_hwirq(d), xd);
+ else
+ xd->stale_p = true;
/*
* Clear saved_p to indicate that it's no longer occupying
@@ -397,11 +446,16 @@ static void xive_do_source_set_mask(struct xive_irq_data *xd,
*/
if (mask) {
val = xive_esb_read(xd, XIVE_ESB_SET_PQ_01);
- xd->saved_p = !!(val & XIVE_ESB_VAL_P);
- } else if (xd->saved_p)
+ if (!xd->stale_p && !!(val & XIVE_ESB_VAL_P))
+ xd->saved_p = true;
+ xd->stale_p = false;
+ } else if (xd->saved_p) {
xive_esb_read(xd, XIVE_ESB_SET_PQ_10);
- else
+ xd->saved_p = false;
+ } else {
xive_esb_read(xd, XIVE_ESB_SET_PQ_00);
+ xd->stale_p = false;
+ }
}
/*
@@ -541,6 +595,8 @@ static unsigned int xive_irq_startup(struct irq_data *d)
unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
int target, rc;
+ xd->saved_p = false;
+ xd->stale_p = false;
pr_devel("xive_irq_startup: irq %d [0x%x] data @%p\n",
d->irq, hw_irq, d);
@@ -587,6 +643,7 @@ static unsigned int xive_irq_startup(struct irq_data *d)
return 0;
}
+/* called with irq descriptor lock held */
static void xive_irq_shutdown(struct irq_data *d)
{
struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
@@ -602,16 +659,6 @@ static void xive_irq_shutdown(struct irq_data *d)
xive_do_source_set_mask(xd, true);
/*
- * The above may have set saved_p. We clear it otherwise it
- * will prevent re-enabling later on. It is ok to forget the
- * fact that the interrupt might be in a queue because we are
- * accounting that already in xive_dec_target_count() and will
- * be re-routing it to a new queue with proper accounting when
- * it's started up again
- */
- xd->saved_p = false;
-
- /*
* Mask the interrupt in HW in the IVT/EAS and set the number
* to be the "bad" IRQ number
*/
@@ -797,6 +844,10 @@ static int xive_irq_retrigger(struct irq_data *d)
return 1;
}
+/*
+ * Caller holds the irq descriptor lock, so this won't be called
+ * concurrently with xive_get_irqchip_state on the same interrupt.
+ */
static int xive_irq_set_vcpu_affinity(struct irq_data *d, void *state)
{
struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
@@ -820,6 +871,10 @@ static int xive_irq_set_vcpu_affinity(struct irq_data *d, void *state)
/* Set it to PQ=10 state to prevent further sends */
pq = xive_esb_read(xd, XIVE_ESB_SET_PQ_10);
+ if (!xd->stale_p) {
+ xd->saved_p = !!(pq & XIVE_ESB_VAL_P);
+ xd->stale_p = !xd->saved_p;
+ }
/* No target ? nothing to do */
if (xd->target == XIVE_INVALID_TARGET) {
@@ -827,7 +882,7 @@ static int xive_irq_set_vcpu_affinity(struct irq_data *d, void *state)
* An untargetted interrupt should have been
* also masked at the source
*/
- WARN_ON(pq & 2);
+ WARN_ON(xd->saved_p);
return 0;
}
@@ -847,9 +902,8 @@ static int xive_irq_set_vcpu_affinity(struct irq_data *d, void *state)
* This saved_p is cleared by the host EOI, when we know
* for sure the queue slot is no longer in use.
*/
- if (pq & 2) {
- pq = xive_esb_read(xd, XIVE_ESB_SET_PQ_11);
- xd->saved_p = true;
+ if (xd->saved_p) {
+ xive_esb_read(xd, XIVE_ESB_SET_PQ_11);
/*
* Sync the XIVE source HW to ensure the interrupt
@@ -862,8 +916,7 @@ static int xive_irq_set_vcpu_affinity(struct irq_data *d, void *state)
*/
if (xive_ops->sync_source)
xive_ops->sync_source(hw_irq);
- } else
- xd->saved_p = false;
+ }
} else {
irqd_clr_forwarded_to_vcpu(d);
@@ -914,6 +967,32 @@ static int xive_irq_set_vcpu_affinity(struct irq_data *d, void *state)
return 0;
}
+/* Called with irq descriptor lock held. */
+static int xive_get_irqchip_state(struct irq_data *data,
+ enum irqchip_irq_state which, bool *state)
+{
+ struct xive_irq_data *xd = irq_data_get_irq_handler_data(data);
+ u8 pq;
+
+ switch (which) {
+ case IRQCHIP_STATE_ACTIVE:
+ pq = xive_esb_read(xd, XIVE_ESB_GET);
+
+ /*
+ * The esb value being all 1's means we couldn't get
+ * the PQ state of the interrupt through mmio. It may
+ * happen, for example when querying a PHB interrupt
+ * while the PHB is in an error state. We consider the
+ * interrupt to be inactive in that case.
+ */
+ *state = (pq != XIVE_ESB_INVALID) && !xd->stale_p &&
+ (xd->saved_p || !!(pq & XIVE_ESB_VAL_P));
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
static struct irq_chip xive_irq_chip = {
.name = "XIVE-IRQ",
.irq_startup = xive_irq_startup,
@@ -925,6 +1004,7 @@ static struct irq_chip xive_irq_chip = {
.irq_set_type = xive_irq_set_type,
.irq_retrigger = xive_irq_retrigger,
.irq_set_vcpu_affinity = xive_irq_set_vcpu_affinity,
+ .irq_get_irqchip_state = xive_get_irqchip_state,
};
bool is_xive_irq(struct irq_chip *chip)
@@ -964,6 +1044,15 @@ static int xive_irq_alloc_data(unsigned int virq, irq_hw_number_t hw)
xd->target = XIVE_INVALID_TARGET;
irq_set_handler_data(virq, xd);
+ /*
+ * Turn OFF by default the interrupt being mapped. A side
+ * effect of this check is the mapping the ESB page of the
+ * interrupt in the Linux address space. This prevents page
+ * fault issues in the crash handler which masks all
+ * interrupts.
+ */
+ xive_esb_read(xd, XIVE_ESB_SET_PQ_01);
+
return 0;
}
@@ -1338,6 +1427,11 @@ static void xive_flush_cpu_queue(unsigned int cpu, struct xive_cpu *xc)
xd = irq_desc_get_handler_data(desc);
/*
+ * Clear saved_p to indicate that it's no longer pending
+ */
+ xd->saved_p = false;
+
+ /*
* For LSIs, we EOI, this will cause a resend if it's
* still asserted. Otherwise do an MSI retrigger.
*/
diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c
index 2f26b74f6cfa..0ff6b739052c 100644
--- a/arch/powerpc/sysdev/xive/native.c
+++ b/arch/powerpc/sysdev/xive/native.c
@@ -111,6 +111,20 @@ int xive_native_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq)
}
EXPORT_SYMBOL_GPL(xive_native_configure_irq);
+static int xive_native_get_irq_config(u32 hw_irq, u32 *target, u8 *prio,
+ u32 *sw_irq)
+{
+ s64 rc;
+ __be64 vp;
+ __be32 lirq;
+
+ rc = opal_xive_get_irq_config(hw_irq, &vp, prio, &lirq);
+
+ *target = be64_to_cpu(vp);
+ *sw_irq = be32_to_cpu(lirq);
+
+ return rc == 0 ? 0 : -ENXIO;
+}
/* This can be called multiple time to change a queue configuration */
int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio,
@@ -231,6 +245,17 @@ static bool xive_native_match(struct device_node *node)
return of_device_is_compatible(node, "ibm,opal-xive-vc");
}
+static s64 opal_xive_allocate_irq(u32 chip_id)
+{
+ s64 irq = opal_xive_allocate_irq_raw(chip_id);
+
+ /*
+ * Old versions of skiboot can incorrectly return 0xffffffff to
+ * indicate no space, fix it up here.
+ */
+ return irq == 0xffffffff ? OPAL_RESOURCE : irq;
+}
+
#ifdef CONFIG_SMP
static int xive_native_get_ipi(unsigned int cpu, struct xive_cpu *xc)
{
@@ -442,6 +467,7 @@ EXPORT_SYMBOL_GPL(xive_native_sync_queue);
static const struct xive_ops xive_native_ops = {
.populate_irq_data = xive_native_populate_irq_data,
.configure_irq = xive_native_configure_irq,
+ .get_irq_config = xive_native_get_irq_config,
.setup_queue = xive_native_setup_queue,
.cleanup_queue = xive_native_cleanup_queue,
.match = xive_native_match,
@@ -800,6 +826,13 @@ int xive_native_set_queue_state(u32 vp_id, u32 prio, u32 qtoggle, u32 qindex)
}
EXPORT_SYMBOL_GPL(xive_native_set_queue_state);
+bool xive_native_has_queue_state_support(void)
+{
+ return opal_check_token(OPAL_XIVE_GET_QUEUE_STATE) &&
+ opal_check_token(OPAL_XIVE_SET_QUEUE_STATE);
+}
+EXPORT_SYMBOL_GPL(xive_native_has_queue_state_support);
+
int xive_native_get_vp_state(u32 vp_id, u64 *out_state)
{
__be64 state;
diff --git a/arch/powerpc/sysdev/xive/spapr.c b/arch/powerpc/sysdev/xive/spapr.c
index 8ef9cf4ebb1c..55dc61cb4867 100644
--- a/arch/powerpc/sysdev/xive/spapr.c
+++ b/arch/powerpc/sysdev/xive/spapr.c
@@ -45,7 +45,7 @@ static int xive_irq_bitmap_add(int base, int count)
{
struct xive_irq_bitmap *xibm;
- xibm = kzalloc(sizeof(*xibm), GFP_ATOMIC);
+ xibm = kzalloc(sizeof(*xibm), GFP_KERNEL);
if (!xibm)
return -ENOMEM;
@@ -53,6 +53,10 @@ static int xive_irq_bitmap_add(int base, int count)
xibm->base = base;
xibm->count = count;
xibm->bitmap = kzalloc(xibm->count, GFP_KERNEL);
+ if (!xibm->bitmap) {
+ kfree(xibm);
+ return -ENOMEM;
+ }
list_add(&xibm->list, &xive_irq_bitmaps);
pr_info("Using IRQ range [%x-%x]", xibm->base,
@@ -211,6 +215,38 @@ static long plpar_int_set_source_config(unsigned long flags,
return 0;
}
+static long plpar_int_get_source_config(unsigned long flags,
+ unsigned long lisn,
+ unsigned long *target,
+ unsigned long *prio,
+ unsigned long *sw_irq)
+{
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+ long rc;
+
+ pr_devel("H_INT_GET_SOURCE_CONFIG flags=%lx lisn=%lx\n", flags, lisn);
+
+ do {
+ rc = plpar_hcall(H_INT_GET_SOURCE_CONFIG, retbuf, flags, lisn,
+ target, prio, sw_irq);
+ } while (plpar_busy_delay(rc));
+
+ if (rc) {
+ pr_err("H_INT_GET_SOURCE_CONFIG lisn=%ld failed %ld\n",
+ lisn, rc);
+ return rc;
+ }
+
+ *target = retbuf[0];
+ *prio = retbuf[1];
+ *sw_irq = retbuf[2];
+
+ pr_devel("H_INT_GET_SOURCE_CONFIG target=%lx prio=%lx sw_irq=%lx\n",
+ retbuf[0], retbuf[1], retbuf[2]);
+
+ return 0;
+}
+
static long plpar_int_get_queue_info(unsigned long flags,
unsigned long target,
unsigned long priority,
@@ -356,20 +392,28 @@ static int xive_spapr_populate_irq_data(u32 hw_irq, struct xive_irq_data *data)
data->esb_shift = esb_shift;
data->trig_page = trig_page;
+ data->hw_irq = hw_irq;
+
/*
* No chip-id for the sPAPR backend. This has an impact how we
* pick a target. See xive_pick_irq_target().
*/
data->src_chip = XIVE_INVALID_CHIP_ID;
+ /*
+ * When the H_INT_ESB flag is set, the H_INT_ESB hcall should
+ * be used for interrupt management. Skip the remapping of the
+ * ESB pages which are not available.
+ */
+ if (data->flags & XIVE_IRQ_FLAG_H_INT_ESB)
+ return 0;
+
data->eoi_mmio = ioremap(data->eoi_page, 1u << data->esb_shift);
if (!data->eoi_mmio) {
pr_err("Failed to map EOI page for irq 0x%x\n", hw_irq);
return -ENOMEM;
}
- data->hw_irq = hw_irq;
-
/* Full function page supports trigger */
if (flags & XIVE_SRC_TRIGGER) {
data->trig_mmio = data->eoi_mmio;
@@ -394,6 +438,24 @@ static int xive_spapr_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq)
return rc == 0 ? 0 : -ENXIO;
}
+static int xive_spapr_get_irq_config(u32 hw_irq, u32 *target, u8 *prio,
+ u32 *sw_irq)
+{
+ long rc;
+ unsigned long h_target;
+ unsigned long h_prio;
+ unsigned long h_sw_irq;
+
+ rc = plpar_int_get_source_config(0, hw_irq, &h_target, &h_prio,
+ &h_sw_irq);
+
+ *target = h_target;
+ *prio = h_prio;
+ *sw_irq = h_sw_irq;
+
+ return rc == 0 ? 0 : -ENXIO;
+}
+
/* This can be called multiple time to change a queue configuration */
static int xive_spapr_configure_queue(u32 target, struct xive_q *q, u8 prio,
__be32 *qpage, u32 order)
@@ -586,6 +648,7 @@ static void xive_spapr_sync_source(u32 hw_irq)
static const struct xive_ops xive_spapr_ops = {
.populate_irq_data = xive_spapr_populate_irq_data,
.configure_irq = xive_spapr_configure_irq,
+ .get_irq_config = xive_spapr_get_irq_config,
.setup_queue = xive_spapr_setup_queue,
.cleanup_queue = xive_spapr_cleanup_queue,
.match = xive_spapr_match,
diff --git a/arch/powerpc/sysdev/xive/xive-internal.h b/arch/powerpc/sysdev/xive/xive-internal.h
index 211725dbf364..59cd366e7933 100644
--- a/arch/powerpc/sysdev/xive/xive-internal.h
+++ b/arch/powerpc/sysdev/xive/xive-internal.h
@@ -33,6 +33,8 @@ struct xive_cpu {
struct xive_ops {
int (*populate_irq_data)(u32 hw_irq, struct xive_irq_data *data);
int (*configure_irq)(u32 hw_irq, u32 target, u8 prio, u32 sw_irq);
+ int (*get_irq_config)(u32 hw_irq, u32 *target, u8 *prio,
+ u32 *sw_irq);
int (*setup_queue)(unsigned int cpu, struct xive_cpu *xc, u8 prio);
void (*cleanup_queue)(unsigned int cpu, struct xive_cpu *xc, u8 prio);
void (*setup_cpu)(unsigned int cpu, struct xive_cpu *xc);
OpenPOWER on IntegriCloud