summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/platforms/pseries
diff options
context:
space:
mode:
authorH. Peter Anvin <hpa@linux.intel.com>2014-02-07 11:27:30 -0800
committerH. Peter Anvin <hpa@linux.intel.com>2014-02-07 11:27:30 -0800
commita3b072cd180c12e8fe0ece9487b9065808327640 (patch)
tree62b982041be84748852d77cdf6ca5639ef40858f /arch/powerpc/platforms/pseries
parent75a1ba5b2c529db60ca49626bcaf0bddf4548438 (diff)
parent081cd62a010f97b5bc1d2b0cd123c5abc692b68a (diff)
downloadblackbird-obmc-linux-a3b072cd180c12e8fe0ece9487b9065808327640.tar.gz
blackbird-obmc-linux-a3b072cd180c12e8fe0ece9487b9065808327640.zip
Merge tag 'efi-urgent' into x86/urgent
* Avoid WARN_ON() when mapping BGRT on Baytrail (EFI 32-bit). Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/powerpc/platforms/pseries')
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig11
-rw-r--r--arch/powerpc/platforms/pseries/Makefile1
-rw-r--r--arch/powerpc/platforms/pseries/cmm.c1
-rw-r--r--arch/powerpc/platforms/pseries/dtl.c1
-rw-r--r--arch/powerpc/platforms/pseries/eeh_pseries.c4
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c167
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c5
-rw-r--r--arch/powerpc/platforms/pseries/processor_idle.c364
-rw-r--r--arch/powerpc/platforms/pseries/setup.c4
9 files changed, 51 insertions, 507 deletions
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index 62b4f8025de0..37300f6ee244 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -34,7 +34,7 @@ config PPC_SPLPAR
config PSERIES_MSI
bool
- depends on PCI_MSI && EEH
+ depends on PCI_MSI && PPC_PSERIES && EEH
default y
config PSERIES_ENERGY
@@ -119,12 +119,3 @@ config DTL
which are accessible through a debugfs file.
Say N if you are unsure.
-
-config PSERIES_IDLE
- bool "Cpuidle driver for pSeries platforms"
- depends on CPU_IDLE
- depends on PPC_PSERIES
- default y
- help
- Select this option to enable processor idle state management
- through cpuidle subsystem.
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile
index fbccac9cd2dc..03480796af9a 100644
--- a/arch/powerpc/platforms/pseries/Makefile
+++ b/arch/powerpc/platforms/pseries/Makefile
@@ -21,7 +21,6 @@ obj-$(CONFIG_HCALL_STATS) += hvCall_inst.o
obj-$(CONFIG_CMM) += cmm.o
obj-$(CONFIG_DTL) += dtl.o
obj-$(CONFIG_IO_EVENT_IRQ) += io_event_irq.o
-obj-$(CONFIG_PSERIES_IDLE) += processor_idle.o
obj-$(CONFIG_LPARCFG) += lparcfg.o
ifeq ($(CONFIG_PPC_PSERIES),y)
diff --git a/arch/powerpc/platforms/pseries/cmm.c b/arch/powerpc/platforms/pseries/cmm.c
index 1e561bef459b..2d8bf15879fd 100644
--- a/arch/powerpc/platforms/pseries/cmm.c
+++ b/arch/powerpc/platforms/pseries/cmm.c
@@ -25,7 +25,6 @@
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/gfp.h>
-#include <linux/init.h>
#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/oom.h>
diff --git a/arch/powerpc/platforms/pseries/dtl.c b/arch/powerpc/platforms/pseries/dtl.c
index 5db66f1fbc26..7d61498e45c0 100644
--- a/arch/powerpc/platforms/pseries/dtl.c
+++ b/arch/powerpc/platforms/pseries/dtl.c
@@ -20,7 +20,6 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/debugfs.h>
#include <linux/spinlock.h>
diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c
index ccb633e077b1..9ef3cc8ebc11 100644
--- a/arch/powerpc/platforms/pseries/eeh_pseries.c
+++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
@@ -689,7 +689,9 @@ static struct eeh_ops pseries_eeh_ops = {
.get_log = pseries_eeh_get_log,
.configure_bridge = pseries_eeh_configure_bridge,
.read_config = pseries_eeh_read_config,
- .write_config = pseries_eeh_write_config
+ .write_config = pseries_eeh_write_config,
+ .next_error = NULL,
+ .restore_config = NULL
};
/**
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index f253361552ae..33b552ffbe57 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -486,9 +486,10 @@ static void iommu_table_setparms(struct pci_controller *phb,
memset((void *)tbl->it_base, 0, *sizep);
tbl->it_busno = phb->bus->number;
+ tbl->it_page_shift = IOMMU_PAGE_SHIFT_4K;
/* Units of tce entries */
- tbl->it_offset = phb->dma_window_base_cur >> IOMMU_PAGE_SHIFT;
+ tbl->it_offset = phb->dma_window_base_cur >> tbl->it_page_shift;
/* Test if we are going over 2GB of DMA space */
if (phb->dma_window_base_cur + phb->dma_window_size > 0x80000000ul) {
@@ -499,7 +500,7 @@ static void iommu_table_setparms(struct pci_controller *phb,
phb->dma_window_base_cur += phb->dma_window_size;
/* Set the tce table size - measured in entries */
- tbl->it_size = phb->dma_window_size >> IOMMU_PAGE_SHIFT;
+ tbl->it_size = phb->dma_window_size >> tbl->it_page_shift;
tbl->it_index = 0;
tbl->it_blocksize = 16;
@@ -537,11 +538,12 @@ static void iommu_table_setparms_lpar(struct pci_controller *phb,
of_parse_dma_window(dn, dma_window, &tbl->it_index, &offset, &size);
tbl->it_busno = phb->bus->number;
+ tbl->it_page_shift = IOMMU_PAGE_SHIFT_4K;
tbl->it_base = 0;
tbl->it_blocksize = 16;
tbl->it_type = TCE_PCI;
- tbl->it_offset = offset >> IOMMU_PAGE_SHIFT;
- tbl->it_size = size >> IOMMU_PAGE_SHIFT;
+ tbl->it_offset = offset >> tbl->it_page_shift;
+ tbl->it_size = size >> tbl->it_page_shift;
}
static void pci_dma_bus_setup_pSeries(struct pci_bus *bus)
@@ -687,7 +689,8 @@ static void pci_dma_dev_setup_pSeries(struct pci_dev *dev)
iommu_table_setparms(phb, dn, tbl);
PCI_DN(dn)->iommu_table = iommu_init_table(tbl, phb->node);
iommu_register_group(tbl, pci_domain_nr(phb->bus), 0);
- set_iommu_table_base(&dev->dev, PCI_DN(dn)->iommu_table);
+ set_iommu_table_base_and_group(&dev->dev,
+ PCI_DN(dn)->iommu_table);
return;
}
@@ -699,7 +702,8 @@ static void pci_dma_dev_setup_pSeries(struct pci_dev *dev)
dn = dn->parent;
if (dn && PCI_DN(dn))
- set_iommu_table_base(&dev->dev, PCI_DN(dn)->iommu_table);
+ set_iommu_table_base_and_group(&dev->dev,
+ PCI_DN(dn)->iommu_table);
else
printk(KERN_WARNING "iommu: Device %s has no iommu table\n",
pci_name(dev));
@@ -717,21 +721,6 @@ static int __init disable_ddw_setup(char *str)
early_param("disable_ddw", disable_ddw_setup);
-static inline void __remove_ddw(struct device_node *np, const u32 *ddw_avail, u64 liobn)
-{
- int ret;
-
- ret = rtas_call(ddw_avail[2], 1, 1, NULL, liobn);
- if (ret)
- pr_warning("%s: failed to remove DMA window: rtas returned "
- "%d to ibm,remove-pe-dma-window(%x) %llx\n",
- np->full_name, ret, ddw_avail[2], liobn);
- else
- pr_debug("%s: successfully removed DMA window: rtas returned "
- "%d to ibm,remove-pe-dma-window(%x) %llx\n",
- np->full_name, ret, ddw_avail[2], liobn);
-}
-
static void remove_ddw(struct device_node *np)
{
struct dynamic_dma_window_prop *dwp;
@@ -761,7 +750,15 @@ static void remove_ddw(struct device_node *np)
pr_debug("%s successfully cleared tces in window.\n",
np->full_name);
- __remove_ddw(np, ddw_avail, liobn);
+ ret = rtas_call(ddw_avail[2], 1, 1, NULL, liobn);
+ if (ret)
+ pr_warning("%s: failed to remove direct window: rtas returned "
+ "%d to ibm,remove-pe-dma-window(%x) %llx\n",
+ np->full_name, ret, ddw_avail[2], liobn);
+ else
+ pr_debug("%s: successfully removed direct window: rtas returned "
+ "%d to ibm,remove-pe-dma-window(%x) %llx\n",
+ np->full_name, ret, ddw_avail[2], liobn);
delprop:
ret = of_remove_property(np, win64);
@@ -790,68 +787,33 @@ static u64 find_existing_ddw(struct device_node *pdn)
return dma_addr;
}
-static void __restore_default_window(struct eeh_dev *edev,
- u32 ddw_restore_token)
-{
- u32 cfg_addr;
- u64 buid;
- int ret;
-
- /*
- * Get the config address and phb buid of the PE window.
- * Rely on eeh to retrieve this for us.
- * Retrieve them from the pci device, not the node with the
- * dma-window property
- */
- cfg_addr = edev->config_addr;
- if (edev->pe_config_addr)
- cfg_addr = edev->pe_config_addr;
- buid = edev->phb->buid;
-
- do {
- ret = rtas_call(ddw_restore_token, 3, 1, NULL, cfg_addr,
- BUID_HI(buid), BUID_LO(buid));
- } while (rtas_busy_delay(ret));
- pr_info("ibm,reset-pe-dma-windows(%x) %x %x %x returned %d\n",
- ddw_restore_token, cfg_addr, BUID_HI(buid), BUID_LO(buid), ret);
-}
-
static int find_existing_ddw_windows(void)
{
+ int len;
struct device_node *pdn;
+ struct direct_window *window;
const struct dynamic_dma_window_prop *direct64;
- const u32 *ddw_extensions;
if (!firmware_has_feature(FW_FEATURE_LPAR))
return 0;
for_each_node_with_property(pdn, DIRECT64_PROPNAME) {
- direct64 = of_get_property(pdn, DIRECT64_PROPNAME, NULL);
+ direct64 = of_get_property(pdn, DIRECT64_PROPNAME, &len);
if (!direct64)
continue;
- /*
- * We need to ensure the IOMMU table is active when we
- * return from the IOMMU setup so that the common code
- * can clear the table or find the holes. To that end,
- * first, remove any existing DDW configuration.
- */
- remove_ddw(pdn);
+ window = kzalloc(sizeof(*window), GFP_KERNEL);
+ if (!window || len < sizeof(struct dynamic_dma_window_prop)) {
+ kfree(window);
+ remove_ddw(pdn);
+ continue;
+ }
- /*
- * Second, if we are running on a new enough level of
- * firmware where the restore API is present, use it to
- * restore the 32-bit window, which was removed in
- * create_ddw.
- * If the API is not present, then create_ddw couldn't
- * have removed the 32-bit window in the first place, so
- * removing the DDW configuration should be sufficient.
- */
- ddw_extensions = of_get_property(pdn, "ibm,ddw-extensions",
- NULL);
- if (ddw_extensions && ddw_extensions[0] > 0)
- __restore_default_window(of_node_to_eeh_dev(pdn),
- ddw_extensions[1]);
+ window->device = pdn;
+ window->prop = direct64;
+ spin_lock(&direct_window_list_lock);
+ list_add(&window->list, &direct_window_list);
+ spin_unlock(&direct_window_list_lock);
}
return 0;
@@ -921,12 +883,6 @@ static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail,
return ret;
}
-static void restore_default_window(struct pci_dev *dev,
- u32 ddw_restore_token)
-{
- __restore_default_window(pci_dev_to_eeh_dev(dev), ddw_restore_token);
-}
-
struct failed_ddw_pdn {
struct device_node *pdn;
struct list_head list;
@@ -954,13 +910,9 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
u64 dma_addr, max_addr;
struct device_node *dn;
const u32 *uninitialized_var(ddw_avail);
- const u32 *uninitialized_var(ddw_extensions);
- u32 ddw_restore_token = 0;
struct direct_window *window;
struct property *win64;
struct dynamic_dma_window_prop *ddwprop;
- const void *dma_window = NULL;
- unsigned long liobn, offset, size;
struct failed_ddw_pdn *fpdn;
mutex_lock(&direct_window_init_mutex);
@@ -991,42 +943,9 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
*/
ddw_avail = of_get_property(pdn, "ibm,ddw-applicable", &len);
if (!ddw_avail || len < 3 * sizeof(u32))
- goto out_unlock;
-
- /*
- * the extensions property is only required to exist in certain
- * levels of firmware and later
- * the ibm,ddw-extensions property is a list with the first
- * element containing the number of extensions and each
- * subsequent entry is a value corresponding to that extension
- */
- ddw_extensions = of_get_property(pdn, "ibm,ddw-extensions", &len);
- if (ddw_extensions) {
- /*
- * each new defined extension length should be added to
- * the top of the switch so the "earlier" entries also
- * get picked up
- */
- switch (ddw_extensions[0]) {
- /* ibm,reset-pe-dma-windows */
- case 1:
- ddw_restore_token = ddw_extensions[1];
- break;
- }
- }
-
- /*
- * Only remove the existing DMA window if we can restore back to
- * the default state. Removing the existing window maximizes the
- * resources available to firmware for dynamic window creation.
- */
- if (ddw_restore_token) {
- dma_window = of_get_property(pdn, "ibm,dma-window", NULL);
- of_parse_dma_window(pdn, dma_window, &liobn, &offset, &size);
- __remove_ddw(pdn, ddw_avail, liobn);
- }
+ goto out_failed;
- /*
+ /*
* Query if there is a second window of size to map the
* whole partition. Query returns number of windows, largest
* block assigned to PE (partition endpoint), and two bitmasks
@@ -1035,7 +954,7 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
dn = pci_device_to_OF_node(dev);
ret = query_ddw(dev, ddw_avail, &query);
if (ret != 0)
- goto out_restore_window;
+ goto out_failed;
if (query.windows_available == 0) {
/*
@@ -1044,7 +963,7 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
* trading in for a larger page size.
*/
dev_dbg(&dev->dev, "no free dynamic windows");
- goto out_restore_window;
+ goto out_failed;
}
if (be32_to_cpu(query.page_size) & 4) {
page_shift = 24; /* 16MB */
@@ -1055,7 +974,7 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
} else {
dev_dbg(&dev->dev, "no supported direct page size in mask %x",
query.page_size);
- goto out_restore_window;
+ goto out_failed;
}
/* verify the window * number of ptes will map the partition */
/* check largest block * page size > max memory hotplug addr */
@@ -1064,14 +983,14 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
dev_dbg(&dev->dev, "can't map partiton max 0x%llx with %u "
"%llu-sized pages\n", max_addr, query.largest_available_block,
1ULL << page_shift);
- goto out_restore_window;
+ goto out_failed;
}
len = order_base_2(max_addr);
win64 = kzalloc(sizeof(struct property), GFP_KERNEL);
if (!win64) {
dev_info(&dev->dev,
"couldn't allocate property for 64bit dma window\n");
- goto out_restore_window;
+ goto out_failed;
}
win64->name = kstrdup(DIRECT64_PROPNAME, GFP_KERNEL);
win64->value = ddwprop = kmalloc(sizeof(*ddwprop), GFP_KERNEL);
@@ -1133,9 +1052,7 @@ out_free_prop:
kfree(win64->value);
kfree(win64);
-out_restore_window:
- if (ddw_restore_token)
- restore_default_window(dev, ddw_restore_token);
+out_failed:
fpdn = kzalloc(sizeof(*fpdn), GFP_KERNEL);
if (!fpdn)
@@ -1193,7 +1110,7 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
pr_debug(" found DMA window, table: %p\n", pci->iommu_table);
}
- set_iommu_table_base(&dev->dev, pci->iommu_table);
+ set_iommu_table_base_and_group(&dev->dev, pci->iommu_table);
}
static int dma_set_mask_pSeriesLP(struct device *dev, u64 dma_mask)
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 4fca3def9db9..b02af9ef3ff6 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -92,7 +92,7 @@ void vpa_init(int cpu)
* PAPR says this feature is SLB-Buffer but firmware never
* reports that. All SPLPAR support SLB shadow buffer.
*/
- addr = __pa(&slb_shadow[cpu]);
+ addr = __pa(paca[cpu].slb_shadow_ptr);
if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
ret = register_slb_shadow(hwcpu, addr);
if (ret)
@@ -153,7 +153,8 @@ static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
/* Make pHyp happy */
if ((rflags & _PAGE_NO_CACHE) && !(rflags & _PAGE_WRITETHRU))
- hpte_r &= ~_PAGE_COHERENT;
+ hpte_r &= ~HPTE_R_M;
+
if (firmware_has_feature(FW_FEATURE_XCMO) && !(hpte_r & HPTE_R_N))
flags |= H_COALESCE_CAND;
diff --git a/arch/powerpc/platforms/pseries/processor_idle.c b/arch/powerpc/platforms/pseries/processor_idle.c
deleted file mode 100644
index a166e38bd683..000000000000
--- a/arch/powerpc/platforms/pseries/processor_idle.c
+++ /dev/null
@@ -1,364 +0,0 @@
-/*
- * processor_idle - idle state cpuidle driver.
- * Adapted from drivers/idle/intel_idle.c and
- * drivers/acpi/processor_idle.c
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/moduleparam.h>
-#include <linux/cpuidle.h>
-#include <linux/cpu.h>
-#include <linux/notifier.h>
-
-#include <asm/paca.h>
-#include <asm/reg.h>
-#include <asm/machdep.h>
-#include <asm/firmware.h>
-#include <asm/runlatch.h>
-#include <asm/plpar_wrappers.h>
-
-struct cpuidle_driver pseries_idle_driver = {
- .name = "pseries_idle",
- .owner = THIS_MODULE,
-};
-
-#define MAX_IDLE_STATE_COUNT 2
-
-static int max_idle_state = MAX_IDLE_STATE_COUNT - 1;
-static struct cpuidle_device __percpu *pseries_cpuidle_devices;
-static struct cpuidle_state *cpuidle_state_table;
-
-static inline void idle_loop_prolog(unsigned long *in_purr)
-{
- *in_purr = mfspr(SPRN_PURR);
- /*
- * Indicate to the HV that we are idle. Now would be
- * a good time to find other work to dispatch.
- */
- get_lppaca()->idle = 1;
-}
-
-static inline void idle_loop_epilog(unsigned long in_purr)
-{
- u64 wait_cycles;
-
- wait_cycles = be64_to_cpu(get_lppaca()->wait_state_cycles);
- wait_cycles += mfspr(SPRN_PURR) - in_purr;
- get_lppaca()->wait_state_cycles = cpu_to_be64(wait_cycles);
- get_lppaca()->idle = 0;
-}
-
-static int snooze_loop(struct cpuidle_device *dev,
- struct cpuidle_driver *drv,
- int index)
-{
- unsigned long in_purr;
- int cpu = dev->cpu;
-
- idle_loop_prolog(&in_purr);
- local_irq_enable();
- set_thread_flag(TIF_POLLING_NRFLAG);
-
- while ((!need_resched()) && cpu_online(cpu)) {
- ppc64_runlatch_off();
- HMT_low();
- HMT_very_low();
- }
-
- HMT_medium();
- clear_thread_flag(TIF_POLLING_NRFLAG);
- smp_mb();
-
- idle_loop_epilog(in_purr);
-
- return index;
-}
-
-static void check_and_cede_processor(void)
-{
- /*
- * Ensure our interrupt state is properly tracked,
- * also checks if no interrupt has occurred while we
- * were soft-disabled
- */
- if (prep_irq_for_idle()) {
- cede_processor();
-#ifdef CONFIG_TRACE_IRQFLAGS
- /* Ensure that H_CEDE returns with IRQs on */
- if (WARN_ON(!(mfmsr() & MSR_EE)))
- __hard_irq_enable();
-#endif
- }
-}
-
-static int dedicated_cede_loop(struct cpuidle_device *dev,
- struct cpuidle_driver *drv,
- int index)
-{
- unsigned long in_purr;
-
- idle_loop_prolog(&in_purr);
- get_lppaca()->donate_dedicated_cpu = 1;
-
- ppc64_runlatch_off();
- HMT_medium();
- check_and_cede_processor();
-
- get_lppaca()->donate_dedicated_cpu = 0;
-
- idle_loop_epilog(in_purr);
-
- return index;
-}
-
-static int shared_cede_loop(struct cpuidle_device *dev,
- struct cpuidle_driver *drv,
- int index)
-{
- unsigned long in_purr;
-
- idle_loop_prolog(&in_purr);
-
- /*
- * Yield the processor to the hypervisor. We return if
- * an external interrupt occurs (which are driven prior
- * to returning here) or if a prod occurs from another
- * processor. When returning here, external interrupts
- * are enabled.
- */
- check_and_cede_processor();
-
- idle_loop_epilog(in_purr);
-
- return index;
-}
-
-/*
- * States for dedicated partition case.
- */
-static struct cpuidle_state dedicated_states[MAX_IDLE_STATE_COUNT] = {
- { /* Snooze */
- .name = "snooze",
- .desc = "snooze",
- .flags = CPUIDLE_FLAG_TIME_VALID,
- .exit_latency = 0,
- .target_residency = 0,
- .enter = &snooze_loop },
- { /* CEDE */
- .name = "CEDE",
- .desc = "CEDE",
- .flags = CPUIDLE_FLAG_TIME_VALID,
- .exit_latency = 10,
- .target_residency = 100,
- .enter = &dedicated_cede_loop },
-};
-
-/*
- * States for shared partition case.
- */
-static struct cpuidle_state shared_states[MAX_IDLE_STATE_COUNT] = {
- { /* Shared Cede */
- .name = "Shared Cede",
- .desc = "Shared Cede",
- .flags = CPUIDLE_FLAG_TIME_VALID,
- .exit_latency = 0,
- .target_residency = 0,
- .enter = &shared_cede_loop },
-};
-
-void update_smt_snooze_delay(int cpu, int residency)
-{
- struct cpuidle_driver *drv = cpuidle_get_driver();
- struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);
-
- if (cpuidle_state_table != dedicated_states)
- return;
-
- if (residency < 0) {
- /* Disable the Nap state on that cpu */
- if (dev)
- dev->states_usage[1].disable = 1;
- } else
- if (drv)
- drv->states[1].target_residency = residency;
-}
-
-static int pseries_cpuidle_add_cpu_notifier(struct notifier_block *n,
- unsigned long action, void *hcpu)
-{
- int hotcpu = (unsigned long)hcpu;
- struct cpuidle_device *dev =
- per_cpu_ptr(pseries_cpuidle_devices, hotcpu);
-
- if (dev && cpuidle_get_driver()) {
- switch (action) {
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
- cpuidle_pause_and_lock();
- cpuidle_enable_device(dev);
- cpuidle_resume_and_unlock();
- break;
-
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- cpuidle_pause_and_lock();
- cpuidle_disable_device(dev);
- cpuidle_resume_and_unlock();
- break;
-
- default:
- return NOTIFY_DONE;
- }
- }
- return NOTIFY_OK;
-}
-
-static struct notifier_block setup_hotplug_notifier = {
- .notifier_call = pseries_cpuidle_add_cpu_notifier,
-};
-
-/*
- * pseries_cpuidle_driver_init()
- */
-static int pseries_cpuidle_driver_init(void)
-{
- int idle_state;
- struct cpuidle_driver *drv = &pseries_idle_driver;
-
- drv->state_count = 0;
-
- for (idle_state = 0; idle_state < MAX_IDLE_STATE_COUNT; ++idle_state) {
-
- if (idle_state > max_idle_state)
- break;
-
- /* is the state not enabled? */
- if (cpuidle_state_table[idle_state].enter == NULL)
- continue;
-
- drv->states[drv->state_count] = /* structure copy */
- cpuidle_state_table[idle_state];
-
- drv->state_count += 1;
- }
-
- return 0;
-}
-
-/* pseries_idle_devices_uninit(void)
- * unregister cpuidle devices and de-allocate memory
- */
-static void pseries_idle_devices_uninit(void)
-{
- int i;
- struct cpuidle_device *dev;
-
- for_each_possible_cpu(i) {
- dev = per_cpu_ptr(pseries_cpuidle_devices, i);
- cpuidle_unregister_device(dev);
- }
-
- free_percpu(pseries_cpuidle_devices);
- return;
-}
-
-/* pseries_idle_devices_init()
- * allocate, initialize and register cpuidle device
- */
-static int pseries_idle_devices_init(void)
-{
- int i;
- struct cpuidle_driver *drv = &pseries_idle_driver;
- struct cpuidle_device *dev;
-
- pseries_cpuidle_devices = alloc_percpu(struct cpuidle_device);
- if (pseries_cpuidle_devices == NULL)
- return -ENOMEM;
-
- for_each_possible_cpu(i) {
- dev = per_cpu_ptr(pseries_cpuidle_devices, i);
- dev->state_count = drv->state_count;
- dev->cpu = i;
- if (cpuidle_register_device(dev)) {
- printk(KERN_DEBUG \
- "cpuidle_register_device %d failed!\n", i);
- return -EIO;
- }
- }
-
- return 0;
-}
-
-/*
- * pseries_idle_probe()
- * Choose state table for shared versus dedicated partition
- */
-static int pseries_idle_probe(void)
-{
-
- if (!firmware_has_feature(FW_FEATURE_SPLPAR))
- return -ENODEV;
-
- if (cpuidle_disable != IDLE_NO_OVERRIDE)
- return -ENODEV;
-
- if (max_idle_state == 0) {
- printk(KERN_DEBUG "pseries processor idle disabled.\n");
- return -EPERM;
- }
-
- if (lppaca_shared_proc(get_lppaca()))
- cpuidle_state_table = shared_states;
- else
- cpuidle_state_table = dedicated_states;
-
- return 0;
-}
-
-static int __init pseries_processor_idle_init(void)
-{
- int retval;
-
- retval = pseries_idle_probe();
- if (retval)
- return retval;
-
- pseries_cpuidle_driver_init();
- retval = cpuidle_register_driver(&pseries_idle_driver);
- if (retval) {
- printk(KERN_DEBUG "Registration of pseries driver failed.\n");
- return retval;
- }
-
- retval = pseries_idle_devices_init();
- if (retval) {
- pseries_idle_devices_uninit();
- cpuidle_unregister_driver(&pseries_idle_driver);
- return retval;
- }
-
- register_cpu_notifier(&setup_hotplug_notifier);
- printk(KERN_DEBUG "pseries_idle_driver registered\n");
-
- return 0;
-}
-
-static void __exit pseries_processor_idle_exit(void)
-{
-
- unregister_cpu_notifier(&setup_hotplug_notifier);
- pseries_idle_devices_uninit();
- cpuidle_unregister_driver(&pseries_idle_driver);
-
- return;
-}
-
-module_init(pseries_processor_idle_init);
-module_exit(pseries_processor_idle_exit);
-
-MODULE_AUTHOR("Deepthi Dharwar <deepthi@linux.vnet.ibm.com>");
-MODULE_DESCRIPTION("Cpuidle driver for POWER");
-MODULE_LICENSE("GPL");
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 6f76ae417f47..8e639d7cbda7 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -72,7 +72,7 @@
int CMO_PrPSP = -1;
int CMO_SecPSP = -1;
-unsigned long CMO_PageSize = (ASM_CONST(1) << IOMMU_PAGE_SHIFT);
+unsigned long CMO_PageSize = (ASM_CONST(1) << IOMMU_PAGE_SHIFT_4K);
EXPORT_SYMBOL(CMO_PageSize);
int fwnmi_active; /* TRUE if an FWNMI handler is present */
@@ -569,7 +569,7 @@ void pSeries_cmo_feature_init(void)
{
char *ptr, *key, *value, *end;
int call_status;
- int page_order = IOMMU_PAGE_SHIFT;
+ int page_order = IOMMU_PAGE_SHIFT_4K;
pr_debug(" -> fw_cmo_feature_init()\n");
spin_lock(&rtas_data_buf_lock);
OpenPOWER on IntegriCloud