diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 15:20:36 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 15:20:36 -0700 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /drivers/acpi/events | |
download | blackbird-op-linux-1da177e4c3f41524e886b7f1b8a0c1fc7321cac2.tar.gz blackbird-op-linux-1da177e4c3f41524e886b7f1b8a0c1fc7321cac2.zip |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'drivers/acpi/events')
-rw-r--r-- | drivers/acpi/events/Makefile | 9 | ||||
-rw-r--r-- | drivers/acpi/events/evevent.c | 297 | ||||
-rw-r--r-- | drivers/acpi/events/evgpe.c | 756 | ||||
-rw-r--r-- | drivers/acpi/events/evgpeblk.c | 1141 | ||||
-rw-r--r-- | drivers/acpi/events/evmisc.c | 588 | ||||
-rw-r--r-- | drivers/acpi/events/evregion.c | 1067 | ||||
-rw-r--r-- | drivers/acpi/events/evrgnini.c | 580 | ||||
-rw-r--r-- | drivers/acpi/events/evsci.c | 199 | ||||
-rw-r--r-- | drivers/acpi/events/evxface.c | 834 | ||||
-rw-r--r-- | drivers/acpi/events/evxfevnt.c | 778 | ||||
-rw-r--r-- | drivers/acpi/events/evxfregn.c | 247 |
11 files changed, 6496 insertions, 0 deletions
diff --git a/drivers/acpi/events/Makefile b/drivers/acpi/events/Makefile new file mode 100644 index 000000000000..d29f2ee449cc --- /dev/null +++ b/drivers/acpi/events/Makefile @@ -0,0 +1,9 @@ +# +# Makefile for all Linux ACPI interpreter subdirectories +# + +obj-y := evevent.o evregion.o evsci.o evxfevnt.o \ + evmisc.o evrgnini.o evxface.o evxfregn.o \ + evgpe.o evgpeblk.o + +EXTRA_CFLAGS += $(ACPI_CFLAGS) diff --git a/drivers/acpi/events/evevent.c b/drivers/acpi/events/evevent.c new file mode 100644 index 000000000000..2a213604ae51 --- /dev/null +++ b/drivers/acpi/events/evevent.c @@ -0,0 +1,297 @@ +/****************************************************************************** + * + * Module Name: evevent - Fixed Event handling and dispatch + * + *****************************************************************************/ + +/* + * Copyright (C) 2000 - 2005, R. Byron Moore + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + +#include <acpi/acpi.h> +#include <acpi/acevents.h> + +#define _COMPONENT ACPI_EVENTS + ACPI_MODULE_NAME ("evevent") + + +/******************************************************************************* + * + * FUNCTION: acpi_ev_initialize_events + * + * PARAMETERS: None + * + * RETURN: Status + * + * DESCRIPTION: Initialize global data structures for events. + * + ******************************************************************************/ + +acpi_status +acpi_ev_initialize_events ( + void) +{ + acpi_status status; + + + ACPI_FUNCTION_TRACE ("ev_initialize_events"); + + + /* Make sure we have ACPI tables */ + + if (!acpi_gbl_DSDT) { + ACPI_DEBUG_PRINT ((ACPI_DB_WARN, "No ACPI tables present!\n")); + return_ACPI_STATUS (AE_NO_ACPI_TABLES); + } + + /* + * Initialize the Fixed and General Purpose Events. This is + * done prior to enabling SCIs to prevent interrupts from + * occurring before handers are installed. + */ + status = acpi_ev_fixed_event_initialize (); + if (ACPI_FAILURE (status)) { + ACPI_REPORT_ERROR (( + "Unable to initialize fixed events, %s\n", + acpi_format_exception (status))); + return_ACPI_STATUS (status); + } + + status = acpi_ev_gpe_initialize (); + if (ACPI_FAILURE (status)) { + ACPI_REPORT_ERROR (( + "Unable to initialize general purpose events, %s\n", + acpi_format_exception (status))); + return_ACPI_STATUS (status); + } + + return_ACPI_STATUS (status); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ev_install_xrupt_handlers + * + * PARAMETERS: None + * + * RETURN: Status + * + * DESCRIPTION: Install interrupt handlers for the SCI and Global Lock + * + ******************************************************************************/ + +acpi_status +acpi_ev_install_xrupt_handlers ( + void) +{ + acpi_status status; + + + ACPI_FUNCTION_TRACE ("ev_install_xrupt_handlers"); + + + /* Install the SCI handler */ + + status = acpi_ev_install_sci_handler (); + if (ACPI_FAILURE (status)) { + ACPI_REPORT_ERROR (( + "Unable to install System Control Interrupt Handler, %s\n", + acpi_format_exception (status))); + return_ACPI_STATUS (status); + } + + /* Install the handler for the Global Lock */ + + status = acpi_ev_init_global_lock_handler (); + if (ACPI_FAILURE (status)) { + ACPI_REPORT_ERROR (( + "Unable to initialize Global Lock handler, %s\n", + acpi_format_exception (status))); + return_ACPI_STATUS (status); + } + + acpi_gbl_events_initialized = TRUE; + return_ACPI_STATUS (status); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ev_fixed_event_initialize + * + * PARAMETERS: None + * + * RETURN: Status + * + * DESCRIPTION: Install the fixed event handlers and enable the fixed events. + * + ******************************************************************************/ + +acpi_status +acpi_ev_fixed_event_initialize ( + void) +{ + acpi_native_uint i; + acpi_status status; + + + /* + * Initialize the structure that keeps track of fixed event handlers + * and enable the fixed events. + */ + for (i = 0; i < ACPI_NUM_FIXED_EVENTS; i++) { + acpi_gbl_fixed_event_handlers[i].handler = NULL; + acpi_gbl_fixed_event_handlers[i].context = NULL; + + /* Enable the fixed event */ + + if (acpi_gbl_fixed_event_info[i].enable_register_id != 0xFF) { + status = acpi_set_register (acpi_gbl_fixed_event_info[i].enable_register_id, + 0, ACPI_MTX_LOCK); + if (ACPI_FAILURE (status)) { + return (status); + } + } + } + + return (AE_OK); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ev_fixed_event_detect + * + * PARAMETERS: None + * + * RETURN: INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED + * + * DESCRIPTION: Checks the PM status register for fixed events + * + ******************************************************************************/ + +u32 +acpi_ev_fixed_event_detect ( + void) +{ + u32 int_status = ACPI_INTERRUPT_NOT_HANDLED; + u32 fixed_status; + u32 fixed_enable; + acpi_native_uint i; + + + ACPI_FUNCTION_NAME ("ev_fixed_event_detect"); + + + /* + * Read the fixed feature status and enable registers, as all the cases + * depend on their values. Ignore errors here. + */ + (void) acpi_hw_register_read (ACPI_MTX_DO_NOT_LOCK, ACPI_REGISTER_PM1_STATUS, &fixed_status); + (void) acpi_hw_register_read (ACPI_MTX_DO_NOT_LOCK, ACPI_REGISTER_PM1_ENABLE, &fixed_enable); + + ACPI_DEBUG_PRINT ((ACPI_DB_INTERRUPTS, + "Fixed Event Block: Enable %08X Status %08X\n", + fixed_enable, fixed_status)); + + /* + * Check for all possible Fixed Events and dispatch those that are active + */ + for (i = 0; i < ACPI_NUM_FIXED_EVENTS; i++) { + /* Both the status and enable bits must be on for this event */ + + if ((fixed_status & acpi_gbl_fixed_event_info[i].status_bit_mask) && + (fixed_enable & acpi_gbl_fixed_event_info[i].enable_bit_mask)) { + /* Found an active (signalled) event */ + + int_status |= acpi_ev_fixed_event_dispatch ((u32) i); + } + } + + return (int_status); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ev_fixed_event_dispatch + * + * PARAMETERS: Event - Event type + * + * RETURN: INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED + * + * DESCRIPTION: Clears the status bit for the requested event, calls the + * handler that previously registered for the event. + * + ******************************************************************************/ + +u32 +acpi_ev_fixed_event_dispatch ( + u32 event) +{ + + + ACPI_FUNCTION_ENTRY (); + + + /* Clear the status bit */ + + (void) acpi_set_register (acpi_gbl_fixed_event_info[event].status_register_id, + 1, ACPI_MTX_DO_NOT_LOCK); + + /* + * Make sure we've got a handler. If not, report an error. + * The event is disabled to prevent further interrupts. + */ + if (NULL == acpi_gbl_fixed_event_handlers[event].handler) { + (void) acpi_set_register (acpi_gbl_fixed_event_info[event].enable_register_id, + 0, ACPI_MTX_DO_NOT_LOCK); + + ACPI_REPORT_ERROR ( + ("No installed handler for fixed event [%08X]\n", + event)); + + return (ACPI_INTERRUPT_NOT_HANDLED); + } + + /* Invoke the Fixed Event handler */ + + return ((acpi_gbl_fixed_event_handlers[event].handler)( + acpi_gbl_fixed_event_handlers[event].context)); +} + + diff --git a/drivers/acpi/events/evgpe.c b/drivers/acpi/events/evgpe.c new file mode 100644 index 000000000000..118d72ac7c76 --- /dev/null +++ b/drivers/acpi/events/evgpe.c @@ -0,0 +1,756 @@ +/****************************************************************************** + * + * Module Name: evgpe - General Purpose Event handling and dispatch + * + *****************************************************************************/ + +/* + * Copyright (C) 2000 - 2005, R. Byron Moore + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + +#include <acpi/acpi.h> +#include <acpi/acevents.h> +#include <acpi/acnamesp.h> + +#define _COMPONENT ACPI_EVENTS + ACPI_MODULE_NAME ("evgpe") + + +/******************************************************************************* + * + * FUNCTION: acpi_ev_set_gpe_type + * + * PARAMETERS: gpe_event_info - GPE to set + * Type - New type + * + * RETURN: Status + * + * DESCRIPTION: Sets the new type for the GPE (wake, run, or wake/run) + * + ******************************************************************************/ + +acpi_status +acpi_ev_set_gpe_type ( + struct acpi_gpe_event_info *gpe_event_info, + u8 type) +{ + acpi_status status; + + + ACPI_FUNCTION_TRACE ("ev_set_gpe_type"); + + + /* Validate type and update register enable masks */ + + switch (type) { + case ACPI_GPE_TYPE_WAKE: + case ACPI_GPE_TYPE_RUNTIME: + case ACPI_GPE_TYPE_WAKE_RUN: + break; + + default: + return_ACPI_STATUS (AE_BAD_PARAMETER); + } + + /* Disable the GPE if currently enabled */ + + status = acpi_ev_disable_gpe (gpe_event_info); + + /* Type was validated above */ + + gpe_event_info->flags &= ~ACPI_GPE_TYPE_MASK; /* Clear type bits */ + gpe_event_info->flags |= type; /* Insert type */ + return_ACPI_STATUS (status); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ev_update_gpe_enable_masks + * + * PARAMETERS: gpe_event_info - GPE to update + * Type - What to do: ACPI_GPE_DISABLE or + * ACPI_GPE_ENABLE + * + * RETURN: Status + * + * DESCRIPTION: Updates GPE register enable masks based on the GPE type + * + ******************************************************************************/ + +acpi_status +acpi_ev_update_gpe_enable_masks ( + struct acpi_gpe_event_info *gpe_event_info, + u8 type) +{ + struct acpi_gpe_register_info *gpe_register_info; + u8 register_bit; + + + ACPI_FUNCTION_TRACE ("ev_update_gpe_enable_masks"); + + + gpe_register_info = gpe_event_info->register_info; + if (!gpe_register_info) { + return_ACPI_STATUS (AE_NOT_EXIST); + } + register_bit = gpe_event_info->register_bit; + + /* 1) Disable case. Simply clear all enable bits */ + + if (type == ACPI_GPE_DISABLE) { + ACPI_CLEAR_BIT (gpe_register_info->enable_for_wake, register_bit); + ACPI_CLEAR_BIT (gpe_register_info->enable_for_run, register_bit); + return_ACPI_STATUS (AE_OK); + } + + /* 2) Enable case. Set/Clear the appropriate enable bits */ + + switch (gpe_event_info->flags & ACPI_GPE_TYPE_MASK) { + case ACPI_GPE_TYPE_WAKE: + ACPI_SET_BIT (gpe_register_info->enable_for_wake, register_bit); + ACPI_CLEAR_BIT (gpe_register_info->enable_for_run, register_bit); + break; + + case ACPI_GPE_TYPE_RUNTIME: + ACPI_CLEAR_BIT (gpe_register_info->enable_for_wake, register_bit); + ACPI_SET_BIT (gpe_register_info->enable_for_run, register_bit); + break; + + case ACPI_GPE_TYPE_WAKE_RUN: + ACPI_SET_BIT (gpe_register_info->enable_for_wake, register_bit); + ACPI_SET_BIT (gpe_register_info->enable_for_run, register_bit); + break; + + default: + return_ACPI_STATUS (AE_BAD_PARAMETER); + } + + return_ACPI_STATUS (AE_OK); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ev_enable_gpe + * + * PARAMETERS: gpe_event_info - GPE to enable + * write_to_hardware - Enable now, or just mark data structs + * (WAKE GPEs should be deferred) + * + * RETURN: Status + * + * DESCRIPTION: Enable a GPE based on the GPE type + * + ******************************************************************************/ + +acpi_status +acpi_ev_enable_gpe ( + struct acpi_gpe_event_info *gpe_event_info, + u8 write_to_hardware) +{ + acpi_status status; + + + ACPI_FUNCTION_TRACE ("ev_enable_gpe"); + + + /* Make sure HW enable masks are updated */ + + status = acpi_ev_update_gpe_enable_masks (gpe_event_info, ACPI_GPE_ENABLE); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + + /* Mark wake-enabled or HW enable, or both */ + + switch (gpe_event_info->flags & ACPI_GPE_TYPE_MASK) { + case ACPI_GPE_TYPE_WAKE: + + ACPI_SET_BIT (gpe_event_info->flags, ACPI_GPE_WAKE_ENABLED); + break; + + case ACPI_GPE_TYPE_WAKE_RUN: + + ACPI_SET_BIT (gpe_event_info->flags, ACPI_GPE_WAKE_ENABLED); + + /*lint -fallthrough */ + + case ACPI_GPE_TYPE_RUNTIME: + + ACPI_SET_BIT (gpe_event_info->flags, ACPI_GPE_RUN_ENABLED); + + if (write_to_hardware) { + /* Clear the GPE (of stale events), then enable it */ + + status = acpi_hw_clear_gpe (gpe_event_info); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + + /* Enable the requested runtime GPE */ + + status = acpi_hw_write_gpe_enable_reg (gpe_event_info); + } + break; + + default: + return_ACPI_STATUS (AE_BAD_PARAMETER); + } + + return_ACPI_STATUS (AE_OK); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ev_disable_gpe + * + * PARAMETERS: gpe_event_info - GPE to disable + * + * RETURN: Status + * + * DESCRIPTION: Disable a GPE based on the GPE type + * + ******************************************************************************/ + +acpi_status +acpi_ev_disable_gpe ( + struct acpi_gpe_event_info *gpe_event_info) +{ + acpi_status status; + + + ACPI_FUNCTION_TRACE ("ev_disable_gpe"); + + + if (!(gpe_event_info->flags & ACPI_GPE_ENABLE_MASK)) { + return_ACPI_STATUS (AE_OK); + } + + /* Make sure HW enable masks are updated */ + + status = acpi_ev_update_gpe_enable_masks (gpe_event_info, ACPI_GPE_DISABLE); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + + /* Mark wake-disabled or HW disable, or both */ + + switch (gpe_event_info->flags & ACPI_GPE_TYPE_MASK) { + case ACPI_GPE_TYPE_WAKE: + ACPI_CLEAR_BIT (gpe_event_info->flags, ACPI_GPE_WAKE_ENABLED); + break; + + case ACPI_GPE_TYPE_WAKE_RUN: + ACPI_CLEAR_BIT (gpe_event_info->flags, ACPI_GPE_WAKE_ENABLED); + + /*lint -fallthrough */ + + case ACPI_GPE_TYPE_RUNTIME: + + /* Disable the requested runtime GPE */ + + ACPI_CLEAR_BIT (gpe_event_info->flags, ACPI_GPE_RUN_ENABLED); + status = acpi_hw_write_gpe_enable_reg (gpe_event_info); + break; + + default: + return_ACPI_STATUS (AE_BAD_PARAMETER); + } + + return_ACPI_STATUS (AE_OK); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ev_get_gpe_event_info + * + * PARAMETERS: gpe_device - Device node. NULL for GPE0/GPE1 + * gpe_number - Raw GPE number + * + * RETURN: A GPE event_info struct. NULL if not a valid GPE + * + * DESCRIPTION: Returns the event_info struct associated with this GPE. + * Validates the gpe_block and the gpe_number + * + * Should be called only when the GPE lists are semaphore locked + * and not subject to change. + * + ******************************************************************************/ + +struct acpi_gpe_event_info * +acpi_ev_get_gpe_event_info ( + acpi_handle gpe_device, + u32 gpe_number) +{ + union acpi_operand_object *obj_desc; + struct acpi_gpe_block_info *gpe_block; + acpi_native_uint i; + + + ACPI_FUNCTION_ENTRY (); + + + /* A NULL gpe_block means use the FADT-defined GPE block(s) */ + + if (!gpe_device) { + /* Examine GPE Block 0 and 1 (These blocks are permanent) */ + + for (i = 0; i < ACPI_MAX_GPE_BLOCKS; i++) { + gpe_block = acpi_gbl_gpe_fadt_blocks[i]; + if (gpe_block) { + if ((gpe_number >= gpe_block->block_base_number) && + (gpe_number < gpe_block->block_base_number + (gpe_block->register_count * 8))) { + return (&gpe_block->event_info[gpe_number - gpe_block->block_base_number]); + } + } + } + + /* The gpe_number was not in the range of either FADT GPE block */ + + return (NULL); + } + + /* A Non-NULL gpe_device means this is a GPE Block Device */ + + obj_desc = acpi_ns_get_attached_object ((struct acpi_namespace_node *) gpe_device); + if (!obj_desc || + !obj_desc->device.gpe_block) { + return (NULL); + } + + gpe_block = obj_desc->device.gpe_block; + + if ((gpe_number >= gpe_block->block_base_number) && + (gpe_number < gpe_block->block_base_number + (gpe_block->register_count * 8))) { + return (&gpe_block->event_info[gpe_number - gpe_block->block_base_number]); + } + + return (NULL); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ev_gpe_detect + * + * PARAMETERS: gpe_xrupt_list - Interrupt block for this interrupt. + * Can have multiple GPE blocks attached. + * + * RETURN: INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED + * + * DESCRIPTION: Detect if any GP events have occurred. This function is + * executed at interrupt level. + * + ******************************************************************************/ + +u32 +acpi_ev_gpe_detect ( + struct acpi_gpe_xrupt_info *gpe_xrupt_list) +{ + u32 int_status = ACPI_INTERRUPT_NOT_HANDLED; + u8 enabled_status_byte; + struct acpi_gpe_register_info *gpe_register_info; + u32 status_reg; + u32 enable_reg; + acpi_status status; + struct acpi_gpe_block_info *gpe_block; + acpi_native_uint i; + acpi_native_uint j; + + + ACPI_FUNCTION_NAME ("ev_gpe_detect"); + + /* Check for the case where there are no GPEs */ + + if (!gpe_xrupt_list) { + return (int_status); + } + + /* Examine all GPE blocks attached to this interrupt level */ + + acpi_os_acquire_lock (acpi_gbl_gpe_lock, ACPI_ISR); + gpe_block = gpe_xrupt_list->gpe_block_list_head; + while (gpe_block) { + /* + * Read all of the 8-bit GPE status and enable registers + * in this GPE block, saving all of them. + * Find all currently active GP events. + */ + for (i = 0; i < gpe_block->register_count; i++) { + /* Get the next status/enable pair */ + + gpe_register_info = &gpe_block->register_info[i]; + + /* Read the Status Register */ + + status = acpi_hw_low_level_read (ACPI_GPE_REGISTER_WIDTH, &status_reg, + &gpe_register_info->status_address); + if (ACPI_FAILURE (status)) { + goto unlock_and_exit; + } + + /* Read the Enable Register */ + + status = acpi_hw_low_level_read (ACPI_GPE_REGISTER_WIDTH, &enable_reg, + &gpe_register_info->enable_address); + if (ACPI_FAILURE (status)) { + goto unlock_and_exit; + } + + ACPI_DEBUG_PRINT ((ACPI_DB_INTERRUPTS, + "Read GPE Register at GPE%X: Status=%02X, Enable=%02X\n", + gpe_register_info->base_gpe_number, status_reg, enable_reg)); + + /* First check if there is anything active at all in this register */ + + enabled_status_byte = (u8) (status_reg & enable_reg); + if (!enabled_status_byte) { + /* No active GPEs in this register, move on */ + + continue; + } + + /* Now look at the individual GPEs in this byte register */ + + for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) { + /* Examine one GPE bit */ + + if (enabled_status_byte & acpi_gbl_decode_to8bit[j]) { + /* + * Found an active GPE. Dispatch the event to a handler + * or method. + */ + int_status |= acpi_ev_gpe_dispatch ( + &gpe_block->event_info[(i * ACPI_GPE_REGISTER_WIDTH) + j], + (u32) j + gpe_register_info->base_gpe_number); + } + } + } + + gpe_block = gpe_block->next; + } + +unlock_and_exit: + + acpi_os_release_lock (acpi_gbl_gpe_lock, ACPI_ISR); + return (int_status); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ev_asynch_execute_gpe_method + * + * PARAMETERS: Context (gpe_event_info) - Info for this GPE + * + * RETURN: None + * + * DESCRIPTION: Perform the actual execution of a GPE control method. This + * function is called from an invocation of acpi_os_queue_for_execution + * (and therefore does NOT execute at interrupt level) so that + * the control method itself is not executed in the context of + * an interrupt handler. + * + ******************************************************************************/ + +static void ACPI_SYSTEM_XFACE +acpi_ev_asynch_execute_gpe_method ( + void *context) +{ + struct acpi_gpe_event_info *gpe_event_info = (void *) context; + u32 gpe_number = 0; + acpi_status status; + struct acpi_gpe_event_info local_gpe_event_info; + struct acpi_parameter_info info; + + + ACPI_FUNCTION_TRACE ("ev_asynch_execute_gpe_method"); + + + status = acpi_ut_acquire_mutex (ACPI_MTX_EVENTS); + if (ACPI_FAILURE (status)) { + return_VOID; + } + + /* Must revalidate the gpe_number/gpe_block */ + + if (!acpi_ev_valid_gpe_event (gpe_event_info)) { + status = acpi_ut_release_mutex (ACPI_MTX_EVENTS); + return_VOID; + } + + /* Set the GPE flags for return to enabled state */ + + (void) acpi_ev_enable_gpe (gpe_event_info, FALSE); + + /* + * Take a snapshot of the GPE info for this level - we copy the + * info to prevent a race condition with remove_handler/remove_block. + */ + ACPI_MEMCPY (&local_gpe_event_info, gpe_event_info, sizeof (struct acpi_gpe_event_info)); + + status = acpi_ut_release_mutex (ACPI_MTX_EVENTS); + if (ACPI_FAILURE (status)) { + return_VOID; + } + + /* + * Must check for control method type dispatch one more + * time to avoid race with ev_gpe_install_handler + */ + if ((local_gpe_event_info.flags & ACPI_GPE_DISPATCH_MASK) == ACPI_GPE_DISPATCH_METHOD) { + /* + * Invoke the GPE Method (_Lxx, _Exx) i.e., evaluate the _Lxx/_Exx + * control method that corresponds to this GPE + */ + info.node = local_gpe_event_info.dispatch.method_node; + info.parameters = ACPI_CAST_PTR (union acpi_operand_object *, gpe_event_info); + info.parameter_type = ACPI_PARAM_GPE; + + status = acpi_ns_evaluate_by_handle (&info); + if (ACPI_FAILURE (status)) { + ACPI_REPORT_ERROR (( + "%s while evaluating method [%4.4s] for GPE[%2X]\n", + acpi_format_exception (status), + acpi_ut_get_node_name (local_gpe_event_info.dispatch.method_node), + gpe_number)); + } + } + + if ((local_gpe_event_info.flags & ACPI_GPE_XRUPT_TYPE_MASK) == ACPI_GPE_LEVEL_TRIGGERED) { + /* + * GPE is level-triggered, we clear the GPE status bit after + * handling the event. + */ + status = acpi_hw_clear_gpe (&local_gpe_event_info); + if (ACPI_FAILURE (status)) { + return_VOID; + } + } + + /* Enable this GPE */ + + (void) acpi_hw_write_gpe_enable_reg (&local_gpe_event_info); + return_VOID; +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ev_gpe_dispatch + * + * PARAMETERS: gpe_event_info - info for this GPE + * gpe_number - Number relative to the parent GPE block + * + * RETURN: INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED + * + * DESCRIPTION: Dispatch a General Purpose Event to either a function (e.g. EC) + * or method (e.g. _Lxx/_Exx) handler. + * + * This function executes at interrupt level. + * + ******************************************************************************/ + +u32 +acpi_ev_gpe_dispatch ( + struct acpi_gpe_event_info *gpe_event_info, + u32 gpe_number) +{ + acpi_status status; + + + ACPI_FUNCTION_TRACE ("ev_gpe_dispatch"); + + + /* + * If edge-triggered, clear the GPE status bit now. Note that + * level-triggered events are cleared after the GPE is serviced. + */ + if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) == ACPI_GPE_EDGE_TRIGGERED) { + status = acpi_hw_clear_gpe (gpe_event_info); + if (ACPI_FAILURE (status)) { + ACPI_REPORT_ERROR (("acpi_ev_gpe_dispatch: %s, Unable to clear GPE[%2X]\n", + acpi_format_exception (status), gpe_number)); + return_VALUE (ACPI_INTERRUPT_NOT_HANDLED); + } + } + + /* Save current system state */ + + if (acpi_gbl_system_awake_and_running) { + ACPI_SET_BIT (gpe_event_info->flags, ACPI_GPE_SYSTEM_RUNNING); + } + else { + ACPI_CLEAR_BIT (gpe_event_info->flags, ACPI_GPE_SYSTEM_RUNNING); + } + + /* + * Dispatch the GPE to either an installed handler, or the control + * method associated with this GPE (_Lxx or _Exx). + * If a handler exists, we invoke it and do not attempt to run the method. + * If there is neither a handler nor a method, we disable the level to + * prevent further events from coming in here. + */ + switch (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) { + case ACPI_GPE_DISPATCH_HANDLER: + + /* + * Invoke the installed handler (at interrupt level) + * Ignore return status for now. TBD: leave GPE disabled on error? + */ + (void) gpe_event_info->dispatch.handler->address ( + gpe_event_info->dispatch.handler->context); + + /* It is now safe to clear level-triggered events. */ + + if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) == ACPI_GPE_LEVEL_TRIGGERED) { + status = acpi_hw_clear_gpe (gpe_event_info); + if (ACPI_FAILURE (status)) { + ACPI_REPORT_ERROR (( + "acpi_ev_gpe_dispatch: %s, Unable to clear GPE[%2X]\n", + acpi_format_exception (status), gpe_number)); + return_VALUE (ACPI_INTERRUPT_NOT_HANDLED); + } + } + break; + + case ACPI_GPE_DISPATCH_METHOD: + + /* + * Disable GPE, so it doesn't keep firing before the method has a + * chance to run. + */ + status = acpi_ev_disable_gpe (gpe_event_info); + if (ACPI_FAILURE (status)) { + ACPI_REPORT_ERROR (( + "acpi_ev_gpe_dispatch: %s, Unable to disable GPE[%2X]\n", + acpi_format_exception (status), gpe_number)); + return_VALUE (ACPI_INTERRUPT_NOT_HANDLED); + } + + /* + * Execute the method associated with the GPE + * NOTE: Level-triggered GPEs are cleared after the method completes. + */ + status = acpi_os_queue_for_execution (OSD_PRIORITY_GPE, + acpi_ev_asynch_execute_gpe_method, gpe_event_info); + if (ACPI_FAILURE (status)) { + ACPI_REPORT_ERROR (( + "acpi_ev_gpe_dispatch: %s, Unable to queue handler for GPE[%2X] - event disabled\n", + acpi_format_exception (status), gpe_number)); + } + break; + + default: + + /* No handler or method to run! */ + + ACPI_REPORT_ERROR (( + "acpi_ev_gpe_dispatch: No handler or method for GPE[%2X], disabling event\n", + gpe_number)); + + /* + * Disable the GPE. The GPE will remain disabled until the ACPI + * Core Subsystem is restarted, or a handler is installed. + */ + status = acpi_ev_disable_gpe (gpe_event_info); + if (ACPI_FAILURE (status)) { + ACPI_REPORT_ERROR (( + "acpi_ev_gpe_dispatch: %s, Unable to disable GPE[%2X]\n", + acpi_format_exception (status), gpe_number)); + return_VALUE (ACPI_INTERRUPT_NOT_HANDLED); + } + break; + } + + return_VALUE (ACPI_INTERRUPT_HANDLED); +} + + +#ifdef ACPI_GPE_NOTIFY_CHECK + +/******************************************************************************* + * TBD: NOT USED, PROTOTYPE ONLY AND WILL PROBABLY BE REMOVED + * + * FUNCTION: acpi_ev_check_for_wake_only_gpe + * + * PARAMETERS: gpe_event_info - info for this GPE + * + * RETURN: Status + * + * DESCRIPTION: Determine if a a GPE is "wake-only". + * + * Called from Notify() code in interpreter when a "device_wake" + * Notify comes in. + * + ******************************************************************************/ + +acpi_status +acpi_ev_check_for_wake_only_gpe ( + struct acpi_gpe_event_info *gpe_event_info) +{ + acpi_status status; + + + ACPI_FUNCTION_TRACE ("ev_check_for_wake_only_gpe"); + + + if ((gpe_event_info) && /* Only >0 for _Lxx/_Exx */ + ((gpe_event_info->flags & ACPI_GPE_SYSTEM_MASK) == ACPI_GPE_SYSTEM_RUNNING)) /* System state at GPE time */ { + /* This must be a wake-only GPE, disable it */ + + status = acpi_ev_disable_gpe (gpe_event_info); + + /* Set GPE to wake-only. Do not change wake disabled/enabled status */ + + acpi_ev_set_gpe_type (gpe_event_info, ACPI_GPE_TYPE_WAKE); + + ACPI_REPORT_INFO (("GPE %p was updated from wake/run to wake-only\n", + gpe_event_info)); + + /* This was a wake-only GPE */ + + return_ACPI_STATUS (AE_WAKE_ONLY_GPE); + } + + return_ACPI_STATUS (AE_OK); +} +#endif + + diff --git a/drivers/acpi/events/evgpeblk.c b/drivers/acpi/events/evgpeblk.c new file mode 100644 index 000000000000..00d981f53c6a --- /dev/null +++ b/drivers/acpi/events/evgpeblk.c @@ -0,0 +1,1141 @@ +/****************************************************************************** + * + * Module Name: evgpeblk - GPE block creation and initialization. + * + *****************************************************************************/ + +/* + * Copyright (C) 2000 - 2005, R. Byron Moore + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + +#include <acpi/acpi.h> +#include <acpi/acevents.h> +#include <acpi/acnamesp.h> + +#define _COMPONENT ACPI_EVENTS + ACPI_MODULE_NAME ("evgpeblk") + + +/******************************************************************************* + * + * FUNCTION: acpi_ev_valid_gpe_event + * + * PARAMETERS: gpe_event_info - Info for this GPE + * + * RETURN: TRUE if the gpe_event is valid + * + * DESCRIPTION: Validate a GPE event. DO NOT CALL FROM INTERRUPT LEVEL. + * Should be called only when the GPE lists are semaphore locked + * and not subject to change. + * + ******************************************************************************/ + +u8 +acpi_ev_valid_gpe_event ( + struct acpi_gpe_event_info *gpe_event_info) +{ + struct acpi_gpe_xrupt_info *gpe_xrupt_block; + struct acpi_gpe_block_info *gpe_block; + + + ACPI_FUNCTION_ENTRY (); + + + /* No need for spin lock since we are not changing any list elements */ + + /* Walk the GPE interrupt levels */ + + gpe_xrupt_block = acpi_gbl_gpe_xrupt_list_head; + while (gpe_xrupt_block) { + gpe_block = gpe_xrupt_block->gpe_block_list_head; + + /* Walk the GPE blocks on this interrupt level */ + + while (gpe_block) { + if ((&gpe_block->event_info[0] <= gpe_event_info) && + (&gpe_block->event_info[((acpi_size) gpe_block->register_count) * 8] > gpe_event_info)) { + return (TRUE); + } + + gpe_block = gpe_block->next; + } + + gpe_xrupt_block = gpe_xrupt_block->next; + } + + return (FALSE); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ev_walk_gpe_list + * + * PARAMETERS: gpe_walk_callback - Routine called for each GPE block + * Flags - ACPI_NOT_ISR or ACPI_ISR + * + * RETURN: Status + * + * DESCRIPTION: Walk the GPE lists. + * + ******************************************************************************/ + +acpi_status +acpi_ev_walk_gpe_list ( + ACPI_GPE_CALLBACK gpe_walk_callback, + u32 flags) +{ + struct acpi_gpe_block_info *gpe_block; + struct acpi_gpe_xrupt_info *gpe_xrupt_info; + acpi_status status = AE_OK; + + + ACPI_FUNCTION_TRACE ("ev_walk_gpe_list"); + + + acpi_os_acquire_lock (acpi_gbl_gpe_lock, flags); + + /* Walk the interrupt level descriptor list */ + + gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head; + while (gpe_xrupt_info) { + /* Walk all Gpe Blocks attached to this interrupt level */ + + gpe_block = gpe_xrupt_info->gpe_block_list_head; + while (gpe_block) { + /* One callback per GPE block */ + + status = gpe_walk_callback (gpe_xrupt_info, gpe_block); + if (ACPI_FAILURE (status)) { + goto unlock_and_exit; + } + + gpe_block = gpe_block->next; + } + + gpe_xrupt_info = gpe_xrupt_info->next; + } + +unlock_and_exit: + acpi_os_release_lock (acpi_gbl_gpe_lock, flags); + return_ACPI_STATUS (status); +} + + +/****************************************************************************** + * + * FUNCTION: acpi_ev_delete_gpe_handlers + * + * PARAMETERS: gpe_xrupt_info - GPE Interrupt info + * gpe_block - Gpe Block info + * + * RETURN: Status + * + * DESCRIPTION: Delete all Handler objects found in the GPE data structs. + * Used only prior to termination. + * + ******************************************************************************/ + +acpi_status +acpi_ev_delete_gpe_handlers ( + struct acpi_gpe_xrupt_info *gpe_xrupt_info, + struct acpi_gpe_block_info *gpe_block) +{ + struct acpi_gpe_event_info *gpe_event_info; + acpi_native_uint i; + acpi_native_uint j; + + + ACPI_FUNCTION_TRACE ("ev_delete_gpe_handlers"); + + + /* Examine each GPE Register within the block */ + + for (i = 0; i < gpe_block->register_count; i++) { + /* Now look at the individual GPEs in this byte register */ + + for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) { + gpe_event_info = &gpe_block->event_info[(i * ACPI_GPE_REGISTER_WIDTH) + j]; + + if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == ACPI_GPE_DISPATCH_HANDLER) { + ACPI_MEM_FREE (gpe_event_info->dispatch.handler); + gpe_event_info->dispatch.handler = NULL; + gpe_event_info->flags &= ~ACPI_GPE_DISPATCH_MASK; + } + } + } + + return_ACPI_STATUS (AE_OK); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ev_save_method_info + * + * PARAMETERS: Callback from walk_namespace + * + * RETURN: Status + * + * DESCRIPTION: Called from acpi_walk_namespace. Expects each object to be a + * control method under the _GPE portion of the namespace. + * Extract the name and GPE type from the object, saving this + * information for quick lookup during GPE dispatch + * + * The name of each GPE control method is of the form: + * "_Lxx" or "_Exx" + * Where: + * L - means that the GPE is level triggered + * E - means that the GPE is edge triggered + * xx - is the GPE number [in HEX] + * + ******************************************************************************/ + +static acpi_status +acpi_ev_save_method_info ( + acpi_handle obj_handle, + u32 level, + void *obj_desc, + void **return_value) +{ + struct acpi_gpe_block_info *gpe_block = (void *) obj_desc; + struct acpi_gpe_event_info *gpe_event_info; + u32 gpe_number; + char name[ACPI_NAME_SIZE + 1]; + u8 type; + acpi_status status; + + + ACPI_FUNCTION_TRACE ("ev_save_method_info"); + + + /* + * _Lxx and _Exx GPE method support + * + * 1) Extract the name from the object and convert to a string + */ + ACPI_MOVE_32_TO_32 (name, + &((struct acpi_namespace_node *) obj_handle)->name.integer); + name[ACPI_NAME_SIZE] = 0; + + /* + * 2) Edge/Level determination is based on the 2nd character + * of the method name + * + * NOTE: Default GPE type is RUNTIME. May be changed later to WAKE + * if a _PRW object is found that points to this GPE. + */ + switch (name[1]) { + case 'L': + type = ACPI_GPE_LEVEL_TRIGGERED; + break; + + case 'E': + type = ACPI_GPE_EDGE_TRIGGERED; + break; + + default: + /* Unknown method type, just ignore it! */ + + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, + "Unknown GPE method type: %s (name not of form _Lxx or _Exx)\n", + name)); + return_ACPI_STATUS (AE_OK); + } + + /* Convert the last two characters of the name to the GPE Number */ + + gpe_number = ACPI_STRTOUL (&name[2], NULL, 16); + if (gpe_number == ACPI_UINT32_MAX) { + /* Conversion failed; invalid method, just ignore it */ + + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, + "Could not extract GPE number from name: %s (name is not of form _Lxx or _Exx)\n", + name)); + return_ACPI_STATUS (AE_OK); + } + + /* Ensure that we have a valid GPE number for this GPE block */ + + if ((gpe_number < gpe_block->block_base_number) || + (gpe_number >= (gpe_block->block_base_number + (gpe_block->register_count * 8)))) { + /* + * Not valid for this GPE block, just ignore it + * However, it may be valid for a different GPE block, since GPE0 and GPE1 + * methods both appear under \_GPE. + */ + return_ACPI_STATUS (AE_OK); + } + + /* + * Now we can add this information to the gpe_event_info block + * for use during dispatch of this GPE. Default type is RUNTIME, although + * this may change when the _PRW methods are executed later. + */ + gpe_event_info = &gpe_block->event_info[gpe_number - gpe_block->block_base_number]; + + gpe_event_info->flags = (u8) (type | ACPI_GPE_DISPATCH_METHOD | + ACPI_GPE_TYPE_RUNTIME); + + gpe_event_info->dispatch.method_node = (struct acpi_namespace_node *) obj_handle; + + /* Update enable mask, but don't enable the HW GPE as of yet */ + + status = acpi_ev_enable_gpe (gpe_event_info, FALSE); + + ACPI_DEBUG_PRINT ((ACPI_DB_LOAD, + "Registered GPE method %s as GPE number 0x%.2X\n", + name, gpe_number)); + return_ACPI_STATUS (status); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ev_match_prw_and_gpe + * + * PARAMETERS: Callback from walk_namespace + * + * RETURN: Status. NOTE: We ignore errors so that the _PRW walk is + * not aborted on a single _PRW failure. + * + * DESCRIPTION: Called from acpi_walk_namespace. Expects each object to be a + * Device. Run the _PRW method. If present, extract the GPE + * number and mark the GPE as a WAKE GPE. + * + ******************************************************************************/ + +static acpi_status +acpi_ev_match_prw_and_gpe ( + acpi_handle obj_handle, + u32 level, + void *info, + void **return_value) +{ + struct acpi_gpe_walk_info *gpe_info = (void *) info; + struct acpi_namespace_node *gpe_device; + struct acpi_gpe_block_info *gpe_block; + struct acpi_namespace_node *target_gpe_device; + struct acpi_gpe_event_info *gpe_event_info; + union acpi_operand_object *pkg_desc; + union acpi_operand_object *obj_desc; + u32 gpe_number; + acpi_status status; + + + ACPI_FUNCTION_TRACE ("ev_match_prw_and_gpe"); + + + /* Check for a _PRW method under this device */ + + status = acpi_ut_evaluate_object (obj_handle, METHOD_NAME__PRW, + ACPI_BTYPE_PACKAGE, &pkg_desc); + if (ACPI_FAILURE (status)) { + /* Ignore all errors from _PRW, we don't want to abort the subsystem */ + + return_ACPI_STATUS (AE_OK); + } + + /* The returned _PRW package must have at least two elements */ + + if (pkg_desc->package.count < 2) { + goto cleanup; + } + + /* Extract pointers from the input context */ + + gpe_device = gpe_info->gpe_device; + gpe_block = gpe_info->gpe_block; + + /* + * The _PRW object must return a package, we are only interested + * in the first element + */ + obj_desc = pkg_desc->package.elements[0]; + + if (ACPI_GET_OBJECT_TYPE (obj_desc) == ACPI_TYPE_INTEGER) { + /* Use FADT-defined GPE device (from definition of _PRW) */ + + target_gpe_device = acpi_gbl_fadt_gpe_device; + + /* Integer is the GPE number in the FADT described GPE blocks */ + + gpe_number = (u32) obj_desc->integer.value; + } + else if (ACPI_GET_OBJECT_TYPE (obj_desc) == ACPI_TYPE_PACKAGE) { + /* Package contains a GPE reference and GPE number within a GPE block */ + + if ((obj_desc->package.count < 2) || + (ACPI_GET_OBJECT_TYPE (obj_desc->package.elements[0]) != ACPI_TYPE_LOCAL_REFERENCE) || + (ACPI_GET_OBJECT_TYPE (obj_desc->package.elements[1]) != ACPI_TYPE_INTEGER)) { + goto cleanup; + } + + /* Get GPE block reference and decode */ + + target_gpe_device = obj_desc->package.elements[0]->reference.node; + gpe_number = (u32) obj_desc->package.elements[1]->integer.value; + } + else { + /* Unknown type, just ignore it */ + + goto cleanup; + } + + /* + * Is this GPE within this block? + * + * TRUE iff these conditions are true: + * 1) The GPE devices match. + * 2) The GPE index(number) is within the range of the Gpe Block + * associated with the GPE device. + */ + if ((gpe_device == target_gpe_device) && + (gpe_number >= gpe_block->block_base_number) && + (gpe_number < gpe_block->block_base_number + (gpe_block->register_count * 8))) { + gpe_event_info = &gpe_block->event_info[gpe_number - gpe_block->block_base_number]; + + /* Mark GPE for WAKE-ONLY but WAKE_DISABLED */ + + gpe_event_info->flags &= ~(ACPI_GPE_WAKE_ENABLED | ACPI_GPE_RUN_ENABLED); + status = acpi_ev_set_gpe_type (gpe_event_info, ACPI_GPE_TYPE_WAKE); + if (ACPI_FAILURE (status)) { + goto cleanup; + } + status = acpi_ev_update_gpe_enable_masks (gpe_event_info, ACPI_GPE_DISABLE); + } + +cleanup: + acpi_ut_remove_reference (pkg_desc); + return_ACPI_STATUS (AE_OK); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ev_get_gpe_xrupt_block + * + * PARAMETERS: interrupt_level - Interrupt for a GPE block + * + * RETURN: A GPE interrupt block + * + * DESCRIPTION: Get or Create a GPE interrupt block. There is one interrupt + * block per unique interrupt level used for GPEs. + * Should be called only when the GPE lists are semaphore locked + * and not subject to change. + * + ******************************************************************************/ + +static struct acpi_gpe_xrupt_info * +acpi_ev_get_gpe_xrupt_block ( + u32 interrupt_level) +{ + struct acpi_gpe_xrupt_info *next_gpe_xrupt; + struct acpi_gpe_xrupt_info *gpe_xrupt; + acpi_status status; + + + ACPI_FUNCTION_TRACE ("ev_get_gpe_xrupt_block"); + + + /* No need for spin lock since we are not changing any list elements here */ + + next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head; + while (next_gpe_xrupt) { + if (next_gpe_xrupt->interrupt_level == interrupt_level) { + return_PTR (next_gpe_xrupt); + } + + next_gpe_xrupt = next_gpe_xrupt->next; + } + + /* Not found, must allocate a new xrupt descriptor */ + + gpe_xrupt = ACPI_MEM_CALLOCATE (sizeof (struct acpi_gpe_xrupt_info)); + if (!gpe_xrupt) { + return_PTR (NULL); + } + + gpe_xrupt->interrupt_level = interrupt_level; + + /* Install new interrupt descriptor with spin lock */ + + acpi_os_acquire_lock (acpi_gbl_gpe_lock, ACPI_NOT_ISR); + if (acpi_gbl_gpe_xrupt_list_head) { + next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head; + while (next_gpe_xrupt->next) { + next_gpe_xrupt = next_gpe_xrupt->next; + } + + next_gpe_xrupt->next = gpe_xrupt; + gpe_xrupt->previous = next_gpe_xrupt; + } + else { + acpi_gbl_gpe_xrupt_list_head = gpe_xrupt; + } + acpi_os_release_lock (acpi_gbl_gpe_lock, ACPI_NOT_ISR); + + /* Install new interrupt handler if not SCI_INT */ + + if (interrupt_level != acpi_gbl_FADT->sci_int) { + status = acpi_os_install_interrupt_handler (interrupt_level, + acpi_ev_gpe_xrupt_handler, gpe_xrupt); + if (ACPI_FAILURE (status)) { + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, + "Could not install GPE interrupt handler at level 0x%X\n", + interrupt_level)); + return_PTR (NULL); + } + } + + return_PTR (gpe_xrupt); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ev_delete_gpe_xrupt + * + * PARAMETERS: gpe_xrupt - A GPE interrupt info block + * + * RETURN: Status + * + * DESCRIPTION: Remove and free a gpe_xrupt block. Remove an associated + * interrupt handler if not the SCI interrupt. + * + ******************************************************************************/ + +static acpi_status +acpi_ev_delete_gpe_xrupt ( + struct acpi_gpe_xrupt_info *gpe_xrupt) +{ + acpi_status status; + + + ACPI_FUNCTION_TRACE ("ev_delete_gpe_xrupt"); + + + /* We never want to remove the SCI interrupt handler */ + + if (gpe_xrupt->interrupt_level == acpi_gbl_FADT->sci_int) { + gpe_xrupt->gpe_block_list_head = NULL; + return_ACPI_STATUS (AE_OK); + } + + /* Disable this interrupt */ + + status = acpi_os_remove_interrupt_handler (gpe_xrupt->interrupt_level, + acpi_ev_gpe_xrupt_handler); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + + /* Unlink the interrupt block with lock */ + + acpi_os_acquire_lock (acpi_gbl_gpe_lock, ACPI_NOT_ISR); + if (gpe_xrupt->previous) { + gpe_xrupt->previous->next = gpe_xrupt->next; + } + + if (gpe_xrupt->next) { + gpe_xrupt->next->previous = gpe_xrupt->previous; + } + acpi_os_release_lock (acpi_gbl_gpe_lock, ACPI_NOT_ISR); + + /* Free the block */ + + ACPI_MEM_FREE (gpe_xrupt); + return_ACPI_STATUS (AE_OK); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ev_install_gpe_block + * + * PARAMETERS: gpe_block - New GPE block + * interrupt_level - Level to be associated with this GPE block + * + * RETURN: Status + * + * DESCRIPTION: Install new GPE block with mutex support + * + ******************************************************************************/ + +static acpi_status +acpi_ev_install_gpe_block ( + struct acpi_gpe_block_info *gpe_block, + u32 interrupt_level) +{ + struct acpi_gpe_block_info *next_gpe_block; + struct acpi_gpe_xrupt_info *gpe_xrupt_block; + acpi_status status; + + + ACPI_FUNCTION_TRACE ("ev_install_gpe_block"); + + + status = acpi_ut_acquire_mutex (ACPI_MTX_EVENTS); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + + gpe_xrupt_block = acpi_ev_get_gpe_xrupt_block (interrupt_level); + if (!gpe_xrupt_block) { + status = AE_NO_MEMORY; + goto unlock_and_exit; + } + + /* Install the new block at the end of the list for this interrupt with lock */ + + acpi_os_acquire_lock (acpi_gbl_gpe_lock, ACPI_NOT_ISR); + if (gpe_xrupt_block->gpe_block_list_head) { + next_gpe_block = gpe_xrupt_block->gpe_block_list_head; + while (next_gpe_block->next) { + next_gpe_block = next_gpe_block->next; + } + + next_gpe_block->next = gpe_block; + gpe_block->previous = next_gpe_block; + } + else { + gpe_xrupt_block->gpe_block_list_head = gpe_block; + } + + gpe_block->xrupt_block = gpe_xrupt_block; + acpi_os_release_lock (acpi_gbl_gpe_lock, ACPI_NOT_ISR); + +unlock_and_exit: + status = acpi_ut_release_mutex (ACPI_MTX_EVENTS); + return_ACPI_STATUS (status); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ev_delete_gpe_block + * + * PARAMETERS: gpe_block - Existing GPE block + * + * RETURN: Status + * + * DESCRIPTION: Remove a GPE block + * + ******************************************************************************/ + +acpi_status +acpi_ev_delete_gpe_block ( + struct acpi_gpe_block_info *gpe_block) +{ + acpi_status status; + + + ACPI_FUNCTION_TRACE ("ev_install_gpe_block"); + + + status = acpi_ut_acquire_mutex (ACPI_MTX_EVENTS); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + + /* Disable all GPEs in this block */ + + status = acpi_hw_disable_gpe_block (gpe_block->xrupt_block, gpe_block); + + if (!gpe_block->previous && !gpe_block->next) { + /* This is the last gpe_block on this interrupt */ + + status = acpi_ev_delete_gpe_xrupt (gpe_block->xrupt_block); + if (ACPI_FAILURE (status)) { + goto unlock_and_exit; + } + } + else { + /* Remove the block on this interrupt with lock */ + + acpi_os_acquire_lock (acpi_gbl_gpe_lock, ACPI_NOT_ISR); + if (gpe_block->previous) { + gpe_block->previous->next = gpe_block->next; + } + else { + gpe_block->xrupt_block->gpe_block_list_head = gpe_block->next; + } + + if (gpe_block->next) { + gpe_block->next->previous = gpe_block->previous; + } + acpi_os_release_lock (acpi_gbl_gpe_lock, ACPI_NOT_ISR); + } + + /* Free the gpe_block */ + + ACPI_MEM_FREE (gpe_block->register_info); + ACPI_MEM_FREE (gpe_block->event_info); + ACPI_MEM_FREE (gpe_block); + +unlock_and_exit: + status = acpi_ut_release_mutex (ACPI_MTX_EVENTS); + return_ACPI_STATUS (status); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ev_create_gpe_info_blocks + * + * PARAMETERS: gpe_block - New GPE block + * + * RETURN: Status + * + * DESCRIPTION: Create the register_info and event_info blocks for this GPE block + * + ******************************************************************************/ + +static acpi_status +acpi_ev_create_gpe_info_blocks ( + struct acpi_gpe_block_info *gpe_block) +{ + struct acpi_gpe_register_info *gpe_register_info = NULL; + struct acpi_gpe_event_info *gpe_event_info = NULL; + struct acpi_gpe_event_info *this_event; + struct acpi_gpe_register_info *this_register; + acpi_native_uint i; + acpi_native_uint j; + acpi_status status; + + + ACPI_FUNCTION_TRACE ("ev_create_gpe_info_blocks"); + + + /* Allocate the GPE register information block */ + + gpe_register_info = ACPI_MEM_CALLOCATE ( + (acpi_size) gpe_block->register_count * + sizeof (struct acpi_gpe_register_info)); + if (!gpe_register_info) { + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, + "Could not allocate the gpe_register_info table\n")); + return_ACPI_STATUS (AE_NO_MEMORY); + } + + /* + * Allocate the GPE event_info block. There are eight distinct GPEs + * per register. Initialization to zeros is sufficient. + */ + gpe_event_info = ACPI_MEM_CALLOCATE ( + ((acpi_size) gpe_block->register_count * ACPI_GPE_REGISTER_WIDTH) * + sizeof (struct acpi_gpe_event_info)); + if (!gpe_event_info) { + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Could not allocate the gpe_event_info table\n")); + status = AE_NO_MEMORY; + goto error_exit; + } + + /* Save the new Info arrays in the GPE block */ + + gpe_block->register_info = gpe_register_info; + gpe_block->event_info = gpe_event_info; + + /* + * Initialize the GPE Register and Event structures. A goal of these + * tables is to hide the fact that there are two separate GPE register sets + * in a given gpe hardware block, the status registers occupy the first half, + * and the enable registers occupy the second half. + */ + this_register = gpe_register_info; + this_event = gpe_event_info; + + for (i = 0; i < gpe_block->register_count; i++) { + /* Init the register_info for this GPE register (8 GPEs) */ + + this_register->base_gpe_number = (u8) (gpe_block->block_base_number + + (i * ACPI_GPE_REGISTER_WIDTH)); + + ACPI_STORE_ADDRESS (this_register->status_address.address, + (gpe_block->block_address.address + + i)); + + ACPI_STORE_ADDRESS (this_register->enable_address.address, + (gpe_block->block_address.address + + i + + gpe_block->register_count)); + + this_register->status_address.address_space_id = gpe_block->block_address.address_space_id; + this_register->enable_address.address_space_id = gpe_block->block_address.address_space_id; + this_register->status_address.register_bit_width = ACPI_GPE_REGISTER_WIDTH; + this_register->enable_address.register_bit_width = ACPI_GPE_REGISTER_WIDTH; + this_register->status_address.register_bit_offset = ACPI_GPE_REGISTER_WIDTH; + this_register->enable_address.register_bit_offset = ACPI_GPE_REGISTER_WIDTH; + + /* Init the event_info for each GPE within this register */ + + for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) { + this_event->register_bit = acpi_gbl_decode_to8bit[j]; + this_event->register_info = this_register; + this_event++; + } + + /* + * Clear the status/enable registers. Note that status registers + * are cleared by writing a '1', while enable registers are cleared + * by writing a '0'. + */ + status = acpi_hw_low_level_write (ACPI_GPE_REGISTER_WIDTH, 0x00, + &this_register->enable_address); + if (ACPI_FAILURE (status)) { + goto error_exit; + } + + status = acpi_hw_low_level_write (ACPI_GPE_REGISTER_WIDTH, 0xFF, + &this_register->status_address); + if (ACPI_FAILURE (status)) { + goto error_exit; + } + + this_register++; + } + + return_ACPI_STATUS (AE_OK); + + +error_exit: + if (gpe_register_info) { + ACPI_MEM_FREE (gpe_register_info); + } + if (gpe_event_info) { + ACPI_MEM_FREE (gpe_event_info); + } + + return_ACPI_STATUS (status); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ev_create_gpe_block + * + * PARAMETERS: gpe_device - Handle to the parent GPE block + * gpe_block_address - Address and space_iD + * register_count - Number of GPE register pairs in the block + * gpe_block_base_number - Starting GPE number for the block + * interrupt_level - H/W interrupt for the block + * return_gpe_block - Where the new block descriptor is returned + * + * RETURN: Status + * + * DESCRIPTION: Create and Install a block of GPE registers + * + ******************************************************************************/ + +acpi_status +acpi_ev_create_gpe_block ( + struct acpi_namespace_node *gpe_device, + struct acpi_generic_address *gpe_block_address, + u32 register_count, + u8 gpe_block_base_number, + u32 interrupt_level, + struct acpi_gpe_block_info **return_gpe_block) +{ + struct acpi_gpe_block_info *gpe_block; + struct acpi_gpe_event_info *gpe_event_info; + acpi_native_uint i; + acpi_native_uint j; + u32 wake_gpe_count; + u32 gpe_enabled_count; + acpi_status status; + struct acpi_gpe_walk_info gpe_info; + + + ACPI_FUNCTION_TRACE ("ev_create_gpe_block"); + + + if (!register_count) { + return_ACPI_STATUS (AE_OK); + } + + /* Allocate a new GPE block */ + + gpe_block = ACPI_MEM_CALLOCATE (sizeof (struct acpi_gpe_block_info)); + if (!gpe_block) { + return_ACPI_STATUS (AE_NO_MEMORY); + } + + /* Initialize the new GPE block */ + + gpe_block->register_count = register_count; + gpe_block->block_base_number = gpe_block_base_number; + gpe_block->node = gpe_device; + + ACPI_MEMCPY (&gpe_block->block_address, gpe_block_address, sizeof (struct acpi_generic_address)); + + /* Create the register_info and event_info sub-structures */ + + status = acpi_ev_create_gpe_info_blocks (gpe_block); + if (ACPI_FAILURE (status)) { + ACPI_MEM_FREE (gpe_block); + return_ACPI_STATUS (status); + } + + /* Install the new block in the global list(s) */ + + status = acpi_ev_install_gpe_block (gpe_block, interrupt_level); + if (ACPI_FAILURE (status)) { + ACPI_MEM_FREE (gpe_block); + return_ACPI_STATUS (status); + } + + /* Find all GPE methods (_Lxx, _Exx) for this block */ + + status = acpi_ns_walk_namespace (ACPI_TYPE_METHOD, gpe_device, + ACPI_UINT32_MAX, ACPI_NS_WALK_NO_UNLOCK, acpi_ev_save_method_info, + gpe_block, NULL); + + /* + * Runtime option: Should Wake GPEs be enabled at runtime? The default + * is No, they should only be enabled just as the machine goes to sleep. + */ + if (acpi_gbl_leave_wake_gpes_disabled) { + /* + * Differentiate RUNTIME vs WAKE GPEs, via the _PRW control methods. + * (Each GPE that has one or more _PRWs that reference it is by + * definition a WAKE GPE and will not be enabled while the machine + * is running.) + */ + gpe_info.gpe_block = gpe_block; + gpe_info.gpe_device = gpe_device; + + status = acpi_ns_walk_namespace (ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, + ACPI_UINT32_MAX, ACPI_NS_WALK_UNLOCK, acpi_ev_match_prw_and_gpe, + &gpe_info, NULL); + } + + /* + * Enable all GPEs in this block that are 1) "runtime" or "run/wake" GPEs, + * and 2) have a corresponding _Lxx or _Exx method. All other GPEs must + * be enabled via the acpi_enable_gpe() external interface. + */ + wake_gpe_count = 0; + gpe_enabled_count = 0; + + for (i = 0; i < gpe_block->register_count; i++) { + for (j = 0; j < 8; j++) { + /* Get the info block for this particular GPE */ + + gpe_event_info = &gpe_block->event_info[(i * ACPI_GPE_REGISTER_WIDTH) + j]; + + if (((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == ACPI_GPE_DISPATCH_METHOD) && + (gpe_event_info->flags & ACPI_GPE_TYPE_RUNTIME)) { + gpe_enabled_count++; + } + + if (gpe_event_info->flags & ACPI_GPE_TYPE_WAKE) { + wake_gpe_count++; + } + } + } + + /* Dump info about this GPE block */ + + ACPI_DEBUG_PRINT ((ACPI_DB_INIT, + "GPE %02X to %02X [%4.4s] %u regs on int 0x%X\n", + (u32) gpe_block->block_base_number, + (u32) (gpe_block->block_base_number + + ((gpe_block->register_count * ACPI_GPE_REGISTER_WIDTH) -1)), + gpe_device->name.ascii, + gpe_block->register_count, + interrupt_level)); + + /* Enable all valid GPEs found above */ + + status = acpi_hw_enable_runtime_gpe_block (NULL, gpe_block); + + ACPI_DEBUG_PRINT ((ACPI_DB_INIT, + "Found %u Wake, Enabled %u Runtime GPEs in this block\n", + wake_gpe_count, gpe_enabled_count)); + + /* Return the new block */ + + if (return_gpe_block) { + (*return_gpe_block) = gpe_block; + } + + return_ACPI_STATUS (AE_OK); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ev_gpe_initialize + * + * PARAMETERS: None + * + * RETURN: Status + * + * DESCRIPTION: Initialize the GPE data structures + * + ******************************************************************************/ + +acpi_status +acpi_ev_gpe_initialize ( + void) +{ + u32 register_count0 = 0; + u32 register_count1 = 0; + u32 gpe_number_max = 0; + acpi_status status; + + + ACPI_FUNCTION_TRACE ("ev_gpe_initialize"); + + + status = acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + + /* + * Initialize the GPE Block(s) defined in the FADT + * + * Why the GPE register block lengths are divided by 2: From the ACPI Spec, + * section "General-Purpose Event Registers", we have: + * + * "Each register block contains two registers of equal length + * GPEx_STS and GPEx_EN (where x is 0 or 1). The length of the + * GPE0_STS and GPE0_EN registers is equal to half the GPE0_LEN + * The length of the GPE1_STS and GPE1_EN registers is equal to + * half the GPE1_LEN. If a generic register block is not supported + * then its respective block pointer and block length values in the + * FADT table contain zeros. The GPE0_LEN and GPE1_LEN do not need + * to be the same size." + */ + + /* + * Determine the maximum GPE number for this machine. + * + * Note: both GPE0 and GPE1 are optional, and either can exist without + * the other. + * + * If EITHER the register length OR the block address are zero, then that + * particular block is not supported. + */ + if (acpi_gbl_FADT->gpe0_blk_len && + acpi_gbl_FADT->xgpe0_blk.address) { + /* GPE block 0 exists (has both length and address > 0) */ + + register_count0 = (u16) (acpi_gbl_FADT->gpe0_blk_len / 2); + + gpe_number_max = (register_count0 * ACPI_GPE_REGISTER_WIDTH) - 1; + + /* Install GPE Block 0 */ + + status = acpi_ev_create_gpe_block (acpi_gbl_fadt_gpe_device, &acpi_gbl_FADT->xgpe0_blk, + register_count0, 0, acpi_gbl_FADT->sci_int, &acpi_gbl_gpe_fadt_blocks[0]); + + if (ACPI_FAILURE (status)) { + ACPI_REPORT_ERROR (( + "Could not create GPE Block 0, %s\n", + acpi_format_exception (status))); + } + } + + if (acpi_gbl_FADT->gpe1_blk_len && + acpi_gbl_FADT->xgpe1_blk.address) { + /* GPE block 1 exists (has both length and address > 0) */ + + register_count1 = (u16) (acpi_gbl_FADT->gpe1_blk_len / 2); + + /* Check for GPE0/GPE1 overlap (if both banks exist) */ + + if ((register_count0) && + (gpe_number_max >= acpi_gbl_FADT->gpe1_base)) { + ACPI_REPORT_ERROR (( + "GPE0 block (GPE 0 to %d) overlaps the GPE1 block (GPE %d to %d) - Ignoring GPE1\n", + gpe_number_max, acpi_gbl_FADT->gpe1_base, + acpi_gbl_FADT->gpe1_base + + ((register_count1 * ACPI_GPE_REGISTER_WIDTH) - 1))); + + /* Ignore GPE1 block by setting the register count to zero */ + + register_count1 = 0; + } + else { + /* Install GPE Block 1 */ + + status = acpi_ev_create_gpe_block (acpi_gbl_fadt_gpe_device, &acpi_gbl_FADT->xgpe1_blk, + register_count1, acpi_gbl_FADT->gpe1_base, + acpi_gbl_FADT->sci_int, &acpi_gbl_gpe_fadt_blocks[1]); + + if (ACPI_FAILURE (status)) { + ACPI_REPORT_ERROR (( + "Could not create GPE Block 1, %s\n", + acpi_format_exception (status))); + } + + /* + * GPE0 and GPE1 do not have to be contiguous in the GPE number + * space. However, GPE0 always starts at GPE number zero. + */ + gpe_number_max = acpi_gbl_FADT->gpe1_base + + ((register_count1 * ACPI_GPE_REGISTER_WIDTH) - 1); + } + } + + /* Exit if there are no GPE registers */ + + if ((register_count0 + register_count1) == 0) { + /* GPEs are not required by ACPI, this is OK */ + + ACPI_DEBUG_PRINT ((ACPI_DB_INIT, + "There are no GPE blocks defined in the FADT\n")); + status = AE_OK; + goto cleanup; + } + + /* Check for Max GPE number out-of-range */ + + if (gpe_number_max > ACPI_GPE_MAX) { + ACPI_REPORT_ERROR (("Maximum GPE number from FADT is too large: 0x%X\n", + gpe_number_max)); + status = AE_BAD_VALUE; + goto cleanup; + } + +cleanup: + (void) acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); + return_ACPI_STATUS (AE_OK); +} + + diff --git a/drivers/acpi/events/evmisc.c b/drivers/acpi/events/evmisc.c new file mode 100644 index 000000000000..2548efa7a45f --- /dev/null +++ b/drivers/acpi/events/evmisc.c @@ -0,0 +1,588 @@ +/****************************************************************************** + * + * Module Name: evmisc - Miscellaneous event manager support functions + * + *****************************************************************************/ + +/* + * Copyright (C) 2000 - 2005, R. Byron Moore + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + +#include <acpi/acpi.h> +#include <acpi/acevents.h> +#include <acpi/acnamesp.h> +#include <acpi/acinterp.h> + +#define _COMPONENT ACPI_EVENTS + ACPI_MODULE_NAME ("evmisc") + + +/******************************************************************************* + * + * FUNCTION: acpi_ev_is_notify_object + * + * PARAMETERS: Node - Node to check + * + * RETURN: TRUE if notifies allowed on this object + * + * DESCRIPTION: Check type of node for a object that supports notifies. + * + * TBD: This could be replaced by a flag bit in the node. + * + ******************************************************************************/ + +u8 +acpi_ev_is_notify_object ( + struct acpi_namespace_node *node) +{ + switch (node->type) { + case ACPI_TYPE_DEVICE: + case ACPI_TYPE_PROCESSOR: + case ACPI_TYPE_POWER: + case ACPI_TYPE_THERMAL: + /* + * These are the ONLY objects that can receive ACPI notifications + */ + return (TRUE); + + default: + return (FALSE); + } +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ev_queue_notify_request + * + * PARAMETERS: Node - NS node for the notified object + * notify_value - Value from the Notify() request + * + * RETURN: Status + * + * DESCRIPTION: Dispatch a device notification event to a previously + * installed handler. + * + ******************************************************************************/ + +#ifdef ACPI_DEBUG_OUTPUT +static const char *acpi_notify_value_names[] = +{ + "Bus Check", + "Device Check", + "Device Wake", + "Eject request", + "Device Check Light", + "Frequency Mismatch", + "Bus Mode Mismatch", + "Power Fault" +}; +#endif + +acpi_status +acpi_ev_queue_notify_request ( + struct acpi_namespace_node *node, + u32 notify_value) +{ + union acpi_operand_object *obj_desc; + union acpi_operand_object *handler_obj = NULL; + union acpi_generic_state *notify_info; + acpi_status status = AE_OK; + + + ACPI_FUNCTION_NAME ("ev_queue_notify_request"); + + + /* + * For value 3 (Ejection Request), some device method may need to be run. + * For value 2 (Device Wake) if _PRW exists, the _PS0 method may need to be run. + * For value 0x80 (Status Change) on the power button or sleep button, + * initiate soft-off or sleep operation? + */ + ACPI_DEBUG_PRINT ((ACPI_DB_INFO, + "Dispatching Notify(%X) on node %p\n", notify_value, node)); + + if (notify_value <= 7) { + ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Notify value: %s\n", + acpi_notify_value_names[notify_value])); + } + else { + ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Notify value: 0x%2.2X **Device Specific**\n", + notify_value)); + } + + /* Get the notify object attached to the NS Node */ + + obj_desc = acpi_ns_get_attached_object (node); + if (obj_desc) { + /* We have the notify object, Get the right handler */ + + switch (node->type) { + case ACPI_TYPE_DEVICE: + case ACPI_TYPE_THERMAL: + case ACPI_TYPE_PROCESSOR: + case ACPI_TYPE_POWER: + + if (notify_value <= ACPI_MAX_SYS_NOTIFY) { + handler_obj = obj_desc->common_notify.system_notify; + } + else { + handler_obj = obj_desc->common_notify.device_notify; + } + break; + + default: + /* All other types are not supported */ + return (AE_TYPE); + } + } + + /* If there is any handler to run, schedule the dispatcher */ + + if ((acpi_gbl_system_notify.handler && (notify_value <= ACPI_MAX_SYS_NOTIFY)) || + (acpi_gbl_device_notify.handler && (notify_value > ACPI_MAX_SYS_NOTIFY)) || + handler_obj) { + notify_info = acpi_ut_create_generic_state (); + if (!notify_info) { + return (AE_NO_MEMORY); + } + + notify_info->common.data_type = ACPI_DESC_TYPE_STATE_NOTIFY; + notify_info->notify.node = node; + notify_info->notify.value = (u16) notify_value; + notify_info->notify.handler_obj = handler_obj; + + status = acpi_os_queue_for_execution (OSD_PRIORITY_HIGH, + acpi_ev_notify_dispatch, notify_info); + if (ACPI_FAILURE (status)) { + acpi_ut_delete_generic_state (notify_info); + } + } + + if (!handler_obj) { + /* + * There is no per-device notify handler for this device. + * This may or may not be a problem. + */ + ACPI_DEBUG_PRINT ((ACPI_DB_INFO, + "No notify handler for Notify(%4.4s, %X) node %p\n", + acpi_ut_get_node_name (node), notify_value, node)); + } + + return (status); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ev_notify_dispatch + * + * PARAMETERS: Context - To be passsed to the notify handler + * + * RETURN: None. + * + * DESCRIPTION: Dispatch a device notification event to a previously + * installed handler. + * + ******************************************************************************/ + +void ACPI_SYSTEM_XFACE +acpi_ev_notify_dispatch ( + void *context) +{ + union acpi_generic_state *notify_info = (union acpi_generic_state *) context; + acpi_notify_handler global_handler = NULL; + void *global_context = NULL; + union acpi_operand_object *handler_obj; + + + ACPI_FUNCTION_ENTRY (); + + + /* + * We will invoke a global notify handler if installed. + * This is done _before_ we invoke the per-device handler attached to the device. + */ + if (notify_info->notify.value <= ACPI_MAX_SYS_NOTIFY) { + /* Global system notification handler */ + + if (acpi_gbl_system_notify.handler) { + global_handler = acpi_gbl_system_notify.handler; + global_context = acpi_gbl_system_notify.context; + } + } + else { + /* Global driver notification handler */ + + if (acpi_gbl_device_notify.handler) { + global_handler = acpi_gbl_device_notify.handler; + global_context = acpi_gbl_device_notify.context; + } + } + + /* Invoke the system handler first, if present */ + + if (global_handler) { + global_handler (notify_info->notify.node, notify_info->notify.value, global_context); + } + + /* Now invoke the per-device handler, if present */ + + handler_obj = notify_info->notify.handler_obj; + if (handler_obj) { + handler_obj->notify.handler (notify_info->notify.node, notify_info->notify.value, + handler_obj->notify.context); + } + + /* All done with the info object */ + + acpi_ut_delete_generic_state (notify_info); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ev_global_lock_thread + * + * PARAMETERS: Context - From thread interface, not used + * + * RETURN: None + * + * DESCRIPTION: Invoked by SCI interrupt handler upon acquisition of the + * Global Lock. Simply signal all threads that are waiting + * for the lock. + * + ******************************************************************************/ + +static void ACPI_SYSTEM_XFACE +acpi_ev_global_lock_thread ( + void *context) +{ + acpi_status status; + + + /* Signal threads that are waiting for the lock */ + + if (acpi_gbl_global_lock_thread_count) { + /* Send sufficient units to the semaphore */ + + status = acpi_os_signal_semaphore (acpi_gbl_global_lock_semaphore, + acpi_gbl_global_lock_thread_count); + if (ACPI_FAILURE (status)) { + ACPI_REPORT_ERROR (("Could not signal Global Lock semaphore\n")); + } + } +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ev_global_lock_handler + * + * PARAMETERS: Context - From thread interface, not used + * + * RETURN: ACPI_INTERRUPT_HANDLED or ACPI_INTERRUPT_NOT_HANDLED + * + * DESCRIPTION: Invoked directly from the SCI handler when a global lock + * release interrupt occurs. Grab the global lock and queue + * the global lock thread for execution + * + ******************************************************************************/ + +static u32 +acpi_ev_global_lock_handler ( + void *context) +{ + u8 acquired = FALSE; + acpi_status status; + + + /* + * Attempt to get the lock + * If we don't get it now, it will be marked pending and we will + * take another interrupt when it becomes free. + */ + ACPI_ACQUIRE_GLOBAL_LOCK (acpi_gbl_common_fACS.global_lock, acquired); + if (acquired) { + /* Got the lock, now wake all threads waiting for it */ + + acpi_gbl_global_lock_acquired = TRUE; + + /* Run the Global Lock thread which will signal all waiting threads */ + + status = acpi_os_queue_for_execution (OSD_PRIORITY_HIGH, + acpi_ev_global_lock_thread, context); + if (ACPI_FAILURE (status)) { + ACPI_REPORT_ERROR (("Could not queue Global Lock thread, %s\n", + acpi_format_exception (status))); + + return (ACPI_INTERRUPT_NOT_HANDLED); + } + } + + return (ACPI_INTERRUPT_HANDLED); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ev_init_global_lock_handler + * + * PARAMETERS: None + * + * RETURN: Status + * + * DESCRIPTION: Install a handler for the global lock release event + * + ******************************************************************************/ + +acpi_status +acpi_ev_init_global_lock_handler (void) +{ + acpi_status status; + + + ACPI_FUNCTION_TRACE ("ev_init_global_lock_handler"); + + + acpi_gbl_global_lock_present = TRUE; + status = acpi_install_fixed_event_handler (ACPI_EVENT_GLOBAL, + acpi_ev_global_lock_handler, NULL); + + /* + * If the global lock does not exist on this platform, the attempt + * to enable GBL_STATUS will fail (the GBL_ENABLE bit will not stick) + * Map to AE_OK, but mark global lock as not present. + * Any attempt to actually use the global lock will be flagged + * with an error. + */ + if (status == AE_NO_HARDWARE_RESPONSE) { + acpi_gbl_global_lock_present = FALSE; + status = AE_OK; + } + + return_ACPI_STATUS (status); +} + + +/****************************************************************************** + * + * FUNCTION: acpi_ev_acquire_global_lock + * + * PARAMETERS: Timeout - Max time to wait for the lock, in millisec. + * + * RETURN: Status + * + * DESCRIPTION: Attempt to gain ownership of the Global Lock. + * + *****************************************************************************/ + +acpi_status +acpi_ev_acquire_global_lock ( + u16 timeout) +{ + acpi_status status = AE_OK; + u8 acquired = FALSE; + + + ACPI_FUNCTION_TRACE ("ev_acquire_global_lock"); + + +#ifndef ACPI_APPLICATION + /* Make sure that we actually have a global lock */ + + if (!acpi_gbl_global_lock_present) { + return_ACPI_STATUS (AE_NO_GLOBAL_LOCK); + } +#endif + + /* One more thread wants the global lock */ + + acpi_gbl_global_lock_thread_count++; + + /* If we (OS side vs. BIOS side) have the hardware lock already, we are done */ + + if (acpi_gbl_global_lock_acquired) { + return_ACPI_STATUS (AE_OK); + } + + /* We must acquire the actual hardware lock */ + + ACPI_ACQUIRE_GLOBAL_LOCK (acpi_gbl_common_fACS.global_lock, acquired); + if (acquired) { + /* We got the lock */ + + ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "Acquired the HW Global Lock\n")); + + acpi_gbl_global_lock_acquired = TRUE; + return_ACPI_STATUS (AE_OK); + } + + /* + * Did not get the lock. The pending bit was set above, and we must now + * wait until we get the global lock released interrupt. + */ + ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "Waiting for the HW Global Lock\n")); + + /* + * Acquire the global lock semaphore first. + * Since this wait will block, we must release the interpreter + */ + status = acpi_ex_system_wait_semaphore (acpi_gbl_global_lock_semaphore, + timeout); + return_ACPI_STATUS (status); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ev_release_global_lock + * + * PARAMETERS: None + * + * RETURN: Status + * + * DESCRIPTION: Releases ownership of the Global Lock. + * + ******************************************************************************/ + +acpi_status +acpi_ev_release_global_lock (void) +{ + u8 pending = FALSE; + acpi_status status = AE_OK; + + + ACPI_FUNCTION_TRACE ("ev_release_global_lock"); + + + if (!acpi_gbl_global_lock_thread_count) { + ACPI_REPORT_WARNING(("Cannot release HW Global Lock, it has not been acquired\n")); + return_ACPI_STATUS (AE_NOT_ACQUIRED); + } + + /* One fewer thread has the global lock */ + + acpi_gbl_global_lock_thread_count--; + if (acpi_gbl_global_lock_thread_count) { + /* There are still some threads holding the lock, cannot release */ + + return_ACPI_STATUS (AE_OK); + } + + /* + * No more threads holding lock, we can do the actual hardware + * release + */ + ACPI_RELEASE_GLOBAL_LOCK (acpi_gbl_common_fACS.global_lock, pending); + acpi_gbl_global_lock_acquired = FALSE; + + /* + * If the pending bit was set, we must write GBL_RLS to the control + * register + */ + if (pending) { + status = acpi_set_register (ACPI_BITREG_GLOBAL_LOCK_RELEASE, 1, ACPI_MTX_LOCK); + } + + return_ACPI_STATUS (status); +} + + +/****************************************************************************** + * + * FUNCTION: acpi_ev_terminate + * + * PARAMETERS: none + * + * RETURN: none + * + * DESCRIPTION: Disable events and free memory allocated for table storage. + * + ******************************************************************************/ + +void +acpi_ev_terminate (void) +{ + acpi_native_uint i; + acpi_status status; + + + ACPI_FUNCTION_TRACE ("ev_terminate"); + + + if (acpi_gbl_events_initialized) { + /* + * Disable all event-related functionality. + * In all cases, on error, print a message but obviously we don't abort. + */ + + /* Disable all fixed events */ + + for (i = 0; i < ACPI_NUM_FIXED_EVENTS; i++) { + status = acpi_disable_event ((u32) i, 0); + if (ACPI_FAILURE (status)) { + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Could not disable fixed event %d\n", (u32) i)); + } + } + + /* Disable all GPEs in all GPE blocks */ + + status = acpi_ev_walk_gpe_list (acpi_hw_disable_gpe_block, ACPI_NOT_ISR); + + /* Remove SCI handler */ + + status = acpi_ev_remove_sci_handler (); + if (ACPI_FAILURE(status)) { + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Could not remove SCI handler\n")); + } + } + + /* Deallocate all handler objects installed within GPE info structs */ + + status = acpi_ev_walk_gpe_list (acpi_ev_delete_gpe_handlers, ACPI_NOT_ISR); + + /* Return to original mode if necessary */ + + if (acpi_gbl_original_mode == ACPI_SYS_MODE_LEGACY) { + status = acpi_disable (); + if (ACPI_FAILURE (status)) { + ACPI_DEBUG_PRINT ((ACPI_DB_WARN, "acpi_disable failed\n")); + } + } + return_VOID; +} + diff --git a/drivers/acpi/events/evregion.c b/drivers/acpi/events/evregion.c new file mode 100644 index 000000000000..772342708a7a --- /dev/null +++ b/drivers/acpi/events/evregion.c @@ -0,0 +1,1067 @@ +/****************************************************************************** + * + * Module Name: evregion - ACPI address_space (op_region) handler dispatch + * + *****************************************************************************/ + +/* + * Copyright (C) 2000 - 2005, R. Byron Moore + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + + +#include <acpi/acpi.h> +#include <acpi/acevents.h> +#include <acpi/acnamesp.h> +#include <acpi/acinterp.h> + +#define _COMPONENT ACPI_EVENTS + ACPI_MODULE_NAME ("evregion") + +#define ACPI_NUM_DEFAULT_SPACES 4 + +static u8 acpi_gbl_default_address_spaces[ACPI_NUM_DEFAULT_SPACES] = { + ACPI_ADR_SPACE_SYSTEM_MEMORY, + ACPI_ADR_SPACE_SYSTEM_IO, + ACPI_ADR_SPACE_PCI_CONFIG, + ACPI_ADR_SPACE_DATA_TABLE}; + + +/******************************************************************************* + * + * FUNCTION: acpi_ev_install_region_handlers + * + * PARAMETERS: None + * + * RETURN: Status + * + * DESCRIPTION: Installs the core subsystem default address space handlers. + * + ******************************************************************************/ + +acpi_status +acpi_ev_install_region_handlers ( + void) { + acpi_status status; + acpi_native_uint i; + + + ACPI_FUNCTION_TRACE ("ev_install_region_handlers"); + + + status = acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + + /* + * All address spaces (PCI Config, EC, SMBus) are scope dependent + * and registration must occur for a specific device. + * + * In the case of the system memory and IO address spaces there is currently + * no device associated with the address space. For these we use the root. + * + * We install the default PCI config space handler at the root so + * that this space is immediately available even though the we have + * not enumerated all the PCI Root Buses yet. This is to conform + * to the ACPI specification which states that the PCI config + * space must be always available -- even though we are nowhere + * near ready to find the PCI root buses at this point. + * + * NOTE: We ignore AE_ALREADY_EXISTS because this means that a handler + * has already been installed (via acpi_install_address_space_handler). + * Similar for AE_SAME_HANDLER. + */ + for (i = 0; i < ACPI_NUM_DEFAULT_SPACES; i++) { + status = acpi_ev_install_space_handler (acpi_gbl_root_node, + acpi_gbl_default_address_spaces[i], + ACPI_DEFAULT_HANDLER, NULL, NULL); + switch (status) { + case AE_OK: + case AE_SAME_HANDLER: + case AE_ALREADY_EXISTS: + + /* These exceptions are all OK */ + + status = AE_OK; + break; + + default: + + goto unlock_and_exit; + } + } + +unlock_and_exit: + (void) acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); + return_ACPI_STATUS (status); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ev_initialize_op_regions + * + * PARAMETERS: None + * + * RETURN: Status + * + * DESCRIPTION: Execute _REG methods for all Operation Regions that have + * an installed default region handler. + * + ******************************************************************************/ + +acpi_status +acpi_ev_initialize_op_regions ( + void) +{ + acpi_status status; + acpi_native_uint i; + + + ACPI_FUNCTION_TRACE ("ev_initialize_op_regions"); + + + status = acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + + /* + * Run the _REG methods for op_regions in each default address space + */ + for (i = 0; i < ACPI_NUM_DEFAULT_SPACES; i++) { + /* TBD: Make sure handler is the DEFAULT handler, otherwise + * _REG will have already been run. + */ + status = acpi_ev_execute_reg_methods (acpi_gbl_root_node, + acpi_gbl_default_address_spaces[i]); + } + + (void) acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); + return_ACPI_STATUS (status); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ev_execute_reg_method + * + * PARAMETERS: region_obj - Object structure + * Function - Passed to _REG: On (1) or Off (0) + * + * RETURN: Status + * + * DESCRIPTION: Execute _REG method for a region + * + ******************************************************************************/ + +acpi_status +acpi_ev_execute_reg_method ( + union acpi_operand_object *region_obj, + u32 function) +{ + struct acpi_parameter_info info; + union acpi_operand_object *params[3]; + union acpi_operand_object *region_obj2; + acpi_status status; + + + ACPI_FUNCTION_TRACE ("ev_execute_reg_method"); + + + region_obj2 = acpi_ns_get_secondary_object (region_obj); + if (!region_obj2) { + return_ACPI_STATUS (AE_NOT_EXIST); + } + + if (region_obj2->extra.method_REG == NULL) { + return_ACPI_STATUS (AE_OK); + } + + /* + * The _REG method has two arguments: + * + * Arg0, Integer: Operation region space ID + * Same value as region_obj->Region.space_id + * Arg1, Integer: connection status + * 1 for connecting the handler, + * 0 for disconnecting the handler + * Passed as a parameter + */ + params[0] = acpi_ut_create_internal_object (ACPI_TYPE_INTEGER); + if (!params[0]) { + return_ACPI_STATUS (AE_NO_MEMORY); + } + + params[1] = acpi_ut_create_internal_object (ACPI_TYPE_INTEGER); + if (!params[1]) { + status = AE_NO_MEMORY; + goto cleanup; + } + + /* Setup the parameter objects */ + + params[0]->integer.value = region_obj->region.space_id; + params[1]->integer.value = function; + params[2] = NULL; + + info.node = region_obj2->extra.method_REG; + info.parameters = params; + info.parameter_type = ACPI_PARAM_ARGS; + + /* Execute the method, no return value */ + + ACPI_DEBUG_EXEC (acpi_ut_display_init_pathname ( + ACPI_TYPE_METHOD, info.node, NULL)); + status = acpi_ns_evaluate_by_handle (&info); + + acpi_ut_remove_reference (params[1]); + +cleanup: + acpi_ut_remove_reference (params[0]); + + return_ACPI_STATUS (status); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ev_address_space_dispatch + * + * PARAMETERS: region_obj - Internal region object + * Function - Read or Write operation + * Address - Where in the space to read or write + * bit_width - Field width in bits (8, 16, 32, or 64) + * Value - Pointer to in or out value + * + * RETURN: Status + * + * DESCRIPTION: Dispatch an address space or operation region access to + * a previously installed handler. + * + ******************************************************************************/ + +acpi_status +acpi_ev_address_space_dispatch ( + union acpi_operand_object *region_obj, + u32 function, + acpi_physical_address address, + u32 bit_width, + void *value) +{ + acpi_status status; + acpi_status status2; + acpi_adr_space_handler handler; + acpi_adr_space_setup region_setup; + union acpi_operand_object *handler_desc; + union acpi_operand_object *region_obj2; + void *region_context = NULL; + + + ACPI_FUNCTION_TRACE ("ev_address_space_dispatch"); + + + region_obj2 = acpi_ns_get_secondary_object (region_obj); + if (!region_obj2) { + return_ACPI_STATUS (AE_NOT_EXIST); + } + + /* Ensure that there is a handler associated with this region */ + + handler_desc = region_obj->region.handler; + if (!handler_desc) { + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, + "No handler for Region [%4.4s] (%p) [%s]\n", + acpi_ut_get_node_name (region_obj->region.node), + region_obj, acpi_ut_get_region_name (region_obj->region.space_id))); + + return_ACPI_STATUS (AE_NOT_EXIST); + } + + /* + * It may be the case that the region has never been initialized + * Some types of regions require special init code + */ + if (!(region_obj->region.flags & AOPOBJ_SETUP_COMPLETE)) { + /* + * This region has not been initialized yet, do it + */ + region_setup = handler_desc->address_space.setup; + if (!region_setup) { + /* No initialization routine, exit with error */ + + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "No init routine for region(%p) [%s]\n", + region_obj, acpi_ut_get_region_name (region_obj->region.space_id))); + return_ACPI_STATUS (AE_NOT_EXIST); + } + + /* + * We must exit the interpreter because the region setup will potentially + * execute control methods (e.g., _REG method for this region) + */ + acpi_ex_exit_interpreter (); + + status = region_setup (region_obj, ACPI_REGION_ACTIVATE, + handler_desc->address_space.context, ®ion_context); + + /* Re-enter the interpreter */ + + status2 = acpi_ex_enter_interpreter (); + if (ACPI_FAILURE (status2)) { + return_ACPI_STATUS (status2); + } + + /* Check for failure of the Region Setup */ + + if (ACPI_FAILURE (status)) { + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Region Init: %s [%s]\n", + acpi_format_exception (status), + acpi_ut_get_region_name (region_obj->region.space_id))); + return_ACPI_STATUS (status); + } + + /* + * Region initialization may have been completed by region_setup + */ + if (!(region_obj->region.flags & AOPOBJ_SETUP_COMPLETE)) { + region_obj->region.flags |= AOPOBJ_SETUP_COMPLETE; + + if (region_obj2->extra.region_context) { + /* The handler for this region was already installed */ + + ACPI_MEM_FREE (region_context); + } + else { + /* + * Save the returned context for use in all accesses to + * this particular region + */ + region_obj2->extra.region_context = region_context; + } + } + } + + /* We have everything we need, we can invoke the address space handler */ + + handler = handler_desc->address_space.handler; + + ACPI_DEBUG_PRINT ((ACPI_DB_OPREGION, + "Handler %p (@%p) Address %8.8X%8.8X [%s]\n", + ®ion_obj->region.handler->address_space, handler, + ACPI_FORMAT_UINT64 (address), + acpi_ut_get_region_name (region_obj->region.space_id))); + + if (!(handler_desc->address_space.hflags & ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) { + /* + * For handlers other than the default (supplied) handlers, we must + * exit the interpreter because the handler *might* block -- we don't + * know what it will do, so we can't hold the lock on the intepreter. + */ + acpi_ex_exit_interpreter(); + } + + /* Call the handler */ + + status = handler (function, address, bit_width, value, + handler_desc->address_space.context, + region_obj2->extra.region_context); + + if (ACPI_FAILURE (status)) { + ACPI_REPORT_ERROR (("Handler for [%s] returned %s\n", + acpi_ut_get_region_name (region_obj->region.space_id), + acpi_format_exception (status))); + } + + if (!(handler_desc->address_space.hflags & ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) { + /* + * We just returned from a non-default handler, we must re-enter the + * interpreter + */ + status2 = acpi_ex_enter_interpreter (); + if (ACPI_FAILURE (status2)) { + return_ACPI_STATUS (status2); + } + } + + return_ACPI_STATUS (status); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ev_detach_region + * + * PARAMETERS: region_obj - Region Object + * acpi_ns_is_locked - Namespace Region Already Locked? + * + * RETURN: None + * + * DESCRIPTION: Break the association between the handler and the region + * this is a two way association. + * + ******************************************************************************/ + +void +acpi_ev_detach_region( + union acpi_operand_object *region_obj, + u8 acpi_ns_is_locked) +{ + union acpi_operand_object *handler_obj; + union acpi_operand_object *obj_desc; + union acpi_operand_object **last_obj_ptr; + acpi_adr_space_setup region_setup; + void **region_context; + union acpi_operand_object *region_obj2; + acpi_status status; + + + ACPI_FUNCTION_TRACE ("ev_detach_region"); + + + region_obj2 = acpi_ns_get_secondary_object (region_obj); + if (!region_obj2) { + return_VOID; + } + region_context = ®ion_obj2->extra.region_context; + + /* Get the address handler from the region object */ + + handler_obj = region_obj->region.handler; + if (!handler_obj) { + /* This region has no handler, all done */ + + return_VOID; + } + + /* Find this region in the handler's list */ + + obj_desc = handler_obj->address_space.region_list; + last_obj_ptr = &handler_obj->address_space.region_list; + + while (obj_desc) { + /* Is this the correct Region? */ + + if (obj_desc == region_obj) { + ACPI_DEBUG_PRINT ((ACPI_DB_OPREGION, + "Removing Region %p from address handler %p\n", + region_obj, handler_obj)); + + /* This is it, remove it from the handler's list */ + + *last_obj_ptr = obj_desc->region.next; + obj_desc->region.next = NULL; /* Must clear field */ + + if (acpi_ns_is_locked) { + status = acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); + if (ACPI_FAILURE (status)) { + return_VOID; + } + } + + /* Now stop region accesses by executing the _REG method */ + + status = acpi_ev_execute_reg_method (region_obj, 0); + if (ACPI_FAILURE (status)) { + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "%s from region _REG, [%s]\n", + acpi_format_exception (status), + acpi_ut_get_region_name (region_obj->region.space_id))); + } + + if (acpi_ns_is_locked) { + status = acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE); + if (ACPI_FAILURE (status)) { + return_VOID; + } + } + + /* Call the setup handler with the deactivate notification */ + + region_setup = handler_obj->address_space.setup; + status = region_setup (region_obj, ACPI_REGION_DEACTIVATE, + handler_obj->address_space.context, region_context); + + /* Init routine may fail, Just ignore errors */ + + if (ACPI_FAILURE (status)) { + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "%s from region init, [%s]\n", + acpi_format_exception (status), + acpi_ut_get_region_name (region_obj->region.space_id))); + } + + region_obj->region.flags &= ~(AOPOBJ_SETUP_COMPLETE); + + /* + * Remove handler reference in the region + * + * NOTE: this doesn't mean that the region goes away + * The region is just inaccessible as indicated to + * the _REG method + * + * If the region is on the handler's list + * this better be the region's handler + */ + region_obj->region.handler = NULL; + acpi_ut_remove_reference (handler_obj); + + return_VOID; + } + + /* Walk the linked list of handlers */ + + last_obj_ptr = &obj_desc->region.next; + obj_desc = obj_desc->region.next; + } + + /* If we get here, the region was not in the handler's region list */ + + ACPI_DEBUG_PRINT ((ACPI_DB_OPREGION, + "Cannot remove region %p from address handler %p\n", + region_obj, handler_obj)); + + return_VOID; +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ev_attach_region + * + * PARAMETERS: handler_obj - Handler Object + * region_obj - Region Object + * acpi_ns_is_locked - Namespace Region Already Locked? + * + * RETURN: None + * + * DESCRIPTION: Create the association between the handler and the region + * this is a two way association. + * + ******************************************************************************/ + +acpi_status +acpi_ev_attach_region ( + union acpi_operand_object *handler_obj, + union acpi_operand_object *region_obj, + u8 acpi_ns_is_locked) +{ + + ACPI_FUNCTION_TRACE ("ev_attach_region"); + + + ACPI_DEBUG_PRINT ((ACPI_DB_OPREGION, + "Adding Region [%4.4s] %p to address handler %p [%s]\n", + acpi_ut_get_node_name (region_obj->region.node), + region_obj, handler_obj, + acpi_ut_get_region_name (region_obj->region.space_id))); + + /* Link this region to the front of the handler's list */ + + region_obj->region.next = handler_obj->address_space.region_list; + handler_obj->address_space.region_list = region_obj; + + /* Install the region's handler */ + + if (region_obj->region.handler) { + return_ACPI_STATUS (AE_ALREADY_EXISTS); + } + + region_obj->region.handler = handler_obj; + acpi_ut_add_reference (handler_obj); + + return_ACPI_STATUS (AE_OK); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ev_install_handler + * + * PARAMETERS: walk_namespace callback + * + * DESCRIPTION: This routine installs an address handler into objects that are + * of type Region or Device. + * + * If the Object is a Device, and the device has a handler of + * the same type then the search is terminated in that branch. + * + * This is because the existing handler is closer in proximity + * to any more regions than the one we are trying to install. + * + ******************************************************************************/ + +acpi_status +acpi_ev_install_handler ( + acpi_handle obj_handle, + u32 level, + void *context, + void **return_value) +{ + union acpi_operand_object *handler_obj; + union acpi_operand_object *next_handler_obj; + union acpi_operand_object *obj_desc; + struct acpi_namespace_node *node; + acpi_status status; + + + ACPI_FUNCTION_NAME ("ev_install_handler"); + + + handler_obj = (union acpi_operand_object *) context; + + /* Parameter validation */ + + if (!handler_obj) { + return (AE_OK); + } + + /* Convert and validate the device handle */ + + node = acpi_ns_map_handle_to_node (obj_handle); + if (!node) { + return (AE_BAD_PARAMETER); + } + + /* + * We only care about regions.and objects + * that are allowed to have address space handlers + */ + if ((node->type != ACPI_TYPE_DEVICE) && + (node->type != ACPI_TYPE_REGION) && + (node != acpi_gbl_root_node)) { + return (AE_OK); + } + + /* Check for an existing internal object */ + + obj_desc = acpi_ns_get_attached_object (node); + if (!obj_desc) { + /* No object, just exit */ + + return (AE_OK); + } + + /* Devices are handled different than regions */ + + if (ACPI_GET_OBJECT_TYPE (obj_desc) == ACPI_TYPE_DEVICE) { + /* Check if this Device already has a handler for this address space */ + + next_handler_obj = obj_desc->device.handler; + while (next_handler_obj) { + /* Found a handler, is it for the same address space? */ + + if (next_handler_obj->address_space.space_id == handler_obj->address_space.space_id) { + ACPI_DEBUG_PRINT ((ACPI_DB_OPREGION, + "Found handler for region [%s] in device %p(%p) handler %p\n", + acpi_ut_get_region_name (handler_obj->address_space.space_id), + obj_desc, next_handler_obj, handler_obj)); + + /* + * Since the object we found it on was a device, then it + * means that someone has already installed a handler for + * the branch of the namespace from this device on. Just + * bail out telling the walk routine to not traverse this + * branch. This preserves the scoping rule for handlers. + */ + return (AE_CTRL_DEPTH); + } + + /* Walk the linked list of handlers attached to this device */ + + next_handler_obj = next_handler_obj->address_space.next; + } + + /* + * As long as the device didn't have a handler for this + * space we don't care about it. We just ignore it and + * proceed. + */ + return (AE_OK); + } + + /* Object is a Region */ + + if (obj_desc->region.space_id != handler_obj->address_space.space_id) { + /* + * This region is for a different address space + * -- just ignore it + */ + return (AE_OK); + } + + /* + * Now we have a region and it is for the handler's address + * space type. + * + * First disconnect region for any previous handler (if any) + */ + acpi_ev_detach_region (obj_desc, FALSE); + + /* Connect the region to the new handler */ + + status = acpi_ev_attach_region (handler_obj, obj_desc, FALSE); + return (status); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ev_install_space_handler + * + * PARAMETERS: Node - Namespace node for the device + * space_id - The address space ID + * Handler - Address of the handler + * Setup - Address of the setup function + * Context - Value passed to the handler on each access + * + * RETURN: Status + * + * DESCRIPTION: Install a handler for all op_regions of a given space_id. + * Assumes namespace is locked + * + ******************************************************************************/ + +acpi_status +acpi_ev_install_space_handler ( + struct acpi_namespace_node *node, + acpi_adr_space_type space_id, + acpi_adr_space_handler handler, + acpi_adr_space_setup setup, + void *context) +{ + union acpi_operand_object *obj_desc; + union acpi_operand_object *handler_obj; + acpi_status status; + acpi_object_type type; + u16 flags = 0; + + + ACPI_FUNCTION_TRACE ("ev_install_space_handler"); + + + /* + * This registration is valid for only the types below + * and the root. This is where the default handlers + * get placed. + */ + if ((node->type != ACPI_TYPE_DEVICE) && + (node->type != ACPI_TYPE_PROCESSOR) && + (node->type != ACPI_TYPE_THERMAL) && + (node != acpi_gbl_root_node)) { + status = AE_BAD_PARAMETER; + goto unlock_and_exit; + } + + if (handler == ACPI_DEFAULT_HANDLER) { + flags = ACPI_ADDR_HANDLER_DEFAULT_INSTALLED; + + switch (space_id) { + case ACPI_ADR_SPACE_SYSTEM_MEMORY: + handler = acpi_ex_system_memory_space_handler; + setup = acpi_ev_system_memory_region_setup; + break; + + case ACPI_ADR_SPACE_SYSTEM_IO: + handler = acpi_ex_system_io_space_handler; + setup = acpi_ev_io_space_region_setup; + break; + + case ACPI_ADR_SPACE_PCI_CONFIG: + handler = acpi_ex_pci_config_space_handler; + setup = acpi_ev_pci_config_region_setup; + break; + + case ACPI_ADR_SPACE_CMOS: + handler = acpi_ex_cmos_space_handler; + setup = acpi_ev_cmos_region_setup; + break; + + case ACPI_ADR_SPACE_PCI_BAR_TARGET: + handler = acpi_ex_pci_bar_space_handler; + setup = acpi_ev_pci_bar_region_setup; + break; + + case ACPI_ADR_SPACE_DATA_TABLE: + handler = acpi_ex_data_table_space_handler; + setup = NULL; + break; + + default: + status = AE_BAD_PARAMETER; + goto unlock_and_exit; + } + } + + /* If the caller hasn't specified a setup routine, use the default */ + + if (!setup) { + setup = acpi_ev_default_region_setup; + } + + /* Check for an existing internal object */ + + obj_desc = acpi_ns_get_attached_object (node); + if (obj_desc) { + /* + * The attached device object already exists. + * Make sure the handler is not already installed. + */ + handler_obj = obj_desc->device.handler; + + /* Walk the handler list for this device */ + + while (handler_obj) { + /* Same space_id indicates a handler already installed */ + + if (handler_obj->address_space.space_id == space_id) { + if (handler_obj->address_space.handler == handler) { + /* + * It is (relatively) OK to attempt to install the SAME + * handler twice. This can easily happen with PCI_Config space. + */ + status = AE_SAME_HANDLER; + goto unlock_and_exit; + } + else { + /* A handler is already installed */ + + status = AE_ALREADY_EXISTS; + } + goto unlock_and_exit; + } + + /* Walk the linked list of handlers */ + + handler_obj = handler_obj->address_space.next; + } + } + else { + ACPI_DEBUG_PRINT ((ACPI_DB_OPREGION, + "Creating object on Device %p while installing handler\n", node)); + + /* obj_desc does not exist, create one */ + + if (node->type == ACPI_TYPE_ANY) { + type = ACPI_TYPE_DEVICE; + } + else { + type = node->type; + } + + obj_desc = acpi_ut_create_internal_object (type); + if (!obj_desc) { + status = AE_NO_MEMORY; + goto unlock_and_exit; + } + + /* Init new descriptor */ + + obj_desc->common.type = (u8) type; + + /* Attach the new object to the Node */ + + status = acpi_ns_attach_object (node, obj_desc, type); + + /* Remove local reference to the object */ + + acpi_ut_remove_reference (obj_desc); + + if (ACPI_FAILURE (status)) { + goto unlock_and_exit; + } + } + + ACPI_DEBUG_PRINT ((ACPI_DB_OPREGION, + "Installing address handler for region %s(%X) on Device %4.4s %p(%p)\n", + acpi_ut_get_region_name (space_id), space_id, + acpi_ut_get_node_name (node), node, obj_desc)); + + /* + * Install the handler + * + * At this point there is no existing handler. + * Just allocate the object for the handler and link it + * into the list. + */ + handler_obj = acpi_ut_create_internal_object (ACPI_TYPE_LOCAL_ADDRESS_HANDLER); + if (!handler_obj) { + status = AE_NO_MEMORY; + goto unlock_and_exit; + } + + /* Init handler obj */ + + handler_obj->address_space.space_id = (u8) space_id; + handler_obj->address_space.hflags = flags; + handler_obj->address_space.region_list = NULL; + handler_obj->address_space.node = node; + handler_obj->address_space.handler = handler; + handler_obj->address_space.context = context; + handler_obj->address_space.setup = setup; + + /* Install at head of Device.address_space list */ + + handler_obj->address_space.next = obj_desc->device.handler; + + /* + * The Device object is the first reference on the handler_obj. + * Each region that uses the handler adds a reference. + */ + obj_desc->device.handler = handler_obj; + + /* + * Walk the namespace finding all of the regions this + * handler will manage. + * + * Start at the device and search the branch toward + * the leaf nodes until either the leaf is encountered or + * a device is detected that has an address handler of the + * same type. + * + * In either case, back up and search down the remainder + * of the branch + */ + status = acpi_ns_walk_namespace (ACPI_TYPE_ANY, node, ACPI_UINT32_MAX, + ACPI_NS_WALK_UNLOCK, acpi_ev_install_handler, + handler_obj, NULL); + +unlock_and_exit: + return_ACPI_STATUS (status); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ev_execute_reg_methods + * + * PARAMETERS: Node - Namespace node for the device + * space_id - The address space ID + * + * RETURN: Status + * + * DESCRIPTION: Run all _REG methods for the input Space ID; + * Note: assumes namespace is locked, or system init time. + * + ******************************************************************************/ + +acpi_status +acpi_ev_execute_reg_methods ( + struct acpi_namespace_node *node, + acpi_adr_space_type space_id) +{ + acpi_status status; + + + ACPI_FUNCTION_TRACE ("ev_execute_reg_methods"); + + + /* + * Run all _REG methods for all Operation Regions for this + * space ID. This is a separate walk in order to handle any + * interdependencies between regions and _REG methods. (i.e. handlers + * must be installed for all regions of this Space ID before we + * can run any _REG methods) + */ + status = acpi_ns_walk_namespace (ACPI_TYPE_ANY, node, ACPI_UINT32_MAX, + ACPI_NS_WALK_UNLOCK, acpi_ev_reg_run, + &space_id, NULL); + + return_ACPI_STATUS (status); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ev_reg_run + * + * PARAMETERS: walk_namespace callback + * + * DESCRIPTION: Run _REg method for region objects of the requested space_iD + * + ******************************************************************************/ + +acpi_status +acpi_ev_reg_run ( + acpi_handle obj_handle, + u32 level, + void *context, + void **return_value) +{ + union acpi_operand_object *obj_desc; + struct acpi_namespace_node *node; + acpi_adr_space_type space_id; + acpi_status status; + + + space_id = *ACPI_CAST_PTR (acpi_adr_space_type, context); + + /* Convert and validate the device handle */ + + node = acpi_ns_map_handle_to_node (obj_handle); + if (!node) { + return (AE_BAD_PARAMETER); + } + + /* + * We only care about regions.and objects + * that are allowed to have address space handlers + */ + if ((node->type != ACPI_TYPE_REGION) && + (node != acpi_gbl_root_node)) { + return (AE_OK); + } + + /* Check for an existing internal object */ + + obj_desc = acpi_ns_get_attached_object (node); + if (!obj_desc) { + /* No object, just exit */ + + return (AE_OK); + } + + /* Object is a Region */ + + if (obj_desc->region.space_id != space_id) { + /* + * This region is for a different address space + * -- just ignore it + */ + return (AE_OK); + } + + status = acpi_ev_execute_reg_method (obj_desc, 1); + return (status); +} + diff --git a/drivers/acpi/events/evrgnini.c b/drivers/acpi/events/evrgnini.c new file mode 100644 index 000000000000..4983a3378be5 --- /dev/null +++ b/drivers/acpi/events/evrgnini.c @@ -0,0 +1,580 @@ +/****************************************************************************** + * + * Module Name: evrgnini- ACPI address_space (op_region) init + * + *****************************************************************************/ + +/* + * Copyright (C) 2000 - 2005, R. Byron Moore + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + + +#include <acpi/acpi.h> +#include <acpi/acevents.h> +#include <acpi/acnamesp.h> + +#define _COMPONENT ACPI_EVENTS + ACPI_MODULE_NAME ("evrgnini") + + +/******************************************************************************* + * + * FUNCTION: acpi_ev_system_memory_region_setup + * + * PARAMETERS: Handle - Region we are interested in + * Function - Start or stop + * handler_context - Address space handler context + * region_context - Region specific context + * + * RETURN: Status + * + * DESCRIPTION: Do any prep work for region handling, a nop for now + * + ******************************************************************************/ + +acpi_status +acpi_ev_system_memory_region_setup ( + acpi_handle handle, + u32 function, + void *handler_context, + void **region_context) +{ + union acpi_operand_object *region_desc = (union acpi_operand_object *) handle; + struct acpi_mem_space_context *local_region_context; + + + ACPI_FUNCTION_TRACE ("ev_system_memory_region_setup"); + + + if (function == ACPI_REGION_DEACTIVATE) { + if (*region_context) { + ACPI_MEM_FREE (*region_context); + *region_context = NULL; + } + return_ACPI_STATUS (AE_OK); + } + + /* Create a new context */ + + local_region_context = ACPI_MEM_CALLOCATE (sizeof (struct acpi_mem_space_context)); + if (!(local_region_context)) { + return_ACPI_STATUS (AE_NO_MEMORY); + } + + /* Save the region length and address for use in the handler */ + + local_region_context->length = region_desc->region.length; + local_region_context->address = region_desc->region.address; + + *region_context = local_region_context; + return_ACPI_STATUS (AE_OK); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ev_io_space_region_setup + * + * PARAMETERS: Handle - Region we are interested in + * Function - Start or stop + * handler_context - Address space handler context + * region_context - Region specific context + * + * RETURN: Status + * + * DESCRIPTION: Do any prep work for region handling + * + ******************************************************************************/ + +acpi_status +acpi_ev_io_space_region_setup ( + acpi_handle handle, + u32 function, + void *handler_context, + void **region_context) +{ + ACPI_FUNCTION_TRACE ("ev_io_space_region_setup"); + + + if (function == ACPI_REGION_DEACTIVATE) { + *region_context = NULL; + } + else { + *region_context = handler_context; + } + + return_ACPI_STATUS (AE_OK); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ev_pci_config_region_setup + * + * PARAMETERS: Handle - Region we are interested in + * Function - Start or stop + * handler_context - Address space handler context + * region_context - Region specific context + * + * RETURN: Status + * + * DESCRIPTION: Do any prep work for region handling + * + * MUTEX: Assumes namespace is not locked + * + ******************************************************************************/ + +acpi_status +acpi_ev_pci_config_region_setup ( + acpi_handle handle, + u32 function, + void *handler_context, + void **region_context) +{ + acpi_status status = AE_OK; + acpi_integer pci_value; + struct acpi_pci_id *pci_id = *region_context; + union acpi_operand_object *handler_obj; + struct acpi_namespace_node *parent_node; + struct acpi_namespace_node *pci_root_node; + union acpi_operand_object *region_obj = (union acpi_operand_object *) handle; + struct acpi_device_id object_hID; + + + ACPI_FUNCTION_TRACE ("ev_pci_config_region_setup"); + + + handler_obj = region_obj->region.handler; + if (!handler_obj) { + /* + * No installed handler. This shouldn't happen because the dispatch + * routine checks before we get here, but we check again just in case. + */ + ACPI_DEBUG_PRINT ((ACPI_DB_OPREGION, + "Attempting to init a region %p, with no handler\n", region_obj)); + return_ACPI_STATUS (AE_NOT_EXIST); + } + + *region_context = NULL; + if (function == ACPI_REGION_DEACTIVATE) { + if (pci_id) { + ACPI_MEM_FREE (pci_id); + } + return_ACPI_STATUS (status); + } + + parent_node = acpi_ns_get_parent_node (region_obj->region.node); + + /* + * Get the _SEG and _BBN values from the device upon which the handler + * is installed. + * + * We need to get the _SEG and _BBN objects relative to the PCI BUS device. + * This is the device the handler has been registered to handle. + */ + + /* + * If the address_space.Node is still pointing to the root, we need + * to scan upward for a PCI Root bridge and re-associate the op_region + * handlers with that device. + */ + if (handler_obj->address_space.node == acpi_gbl_root_node) { + /* Start search from the parent object */ + + pci_root_node = parent_node; + while (pci_root_node != acpi_gbl_root_node) { + status = acpi_ut_execute_HID (pci_root_node, &object_hID); + if (ACPI_SUCCESS (status)) { + /* Got a valid _HID, check if this is a PCI root */ + + if (!(ACPI_STRNCMP (object_hID.value, PCI_ROOT_HID_STRING, + sizeof (PCI_ROOT_HID_STRING)))) { + /* Install a handler for this PCI root bridge */ + + status = acpi_install_address_space_handler ((acpi_handle) pci_root_node, + ACPI_ADR_SPACE_PCI_CONFIG, + ACPI_DEFAULT_HANDLER, NULL, NULL); + if (ACPI_FAILURE (status)) { + if (status == AE_SAME_HANDLER) { + /* + * It is OK if the handler is already installed on the root + * bridge. Still need to return a context object for the + * new PCI_Config operation region, however. + */ + status = AE_OK; + } + else { + ACPI_REPORT_ERROR (( + "Could not install pci_config handler for Root Bridge %4.4s, %s\n", + acpi_ut_get_node_name (pci_root_node), acpi_format_exception (status))); + } + } + break; + } + } + + pci_root_node = acpi_ns_get_parent_node (pci_root_node); + } + + /* PCI root bridge not found, use namespace root node */ + } + else { + pci_root_node = handler_obj->address_space.node; + } + + /* + * If this region is now initialized, we are done. + * (install_address_space_handler could have initialized it) + */ + if (region_obj->region.flags & AOPOBJ_SETUP_COMPLETE) { + return_ACPI_STATUS (AE_OK); + } + + /* Region is still not initialized. Create a new context */ + + pci_id = ACPI_MEM_CALLOCATE (sizeof (struct acpi_pci_id)); + if (!pci_id) { + return_ACPI_STATUS (AE_NO_MEMORY); + } + + /* + * For PCI_Config space access, we need the segment, bus, + * device and function numbers. Acquire them here. + */ + + /* + * Get the PCI device and function numbers from the _ADR object + * contained in the parent's scope. + */ + status = acpi_ut_evaluate_numeric_object (METHOD_NAME__ADR, parent_node, &pci_value); + + /* + * The default is zero, and since the allocation above zeroed + * the data, just do nothing on failure. + */ + if (ACPI_SUCCESS (status)) { + pci_id->device = ACPI_HIWORD (ACPI_LODWORD (pci_value)); + pci_id->function = ACPI_LOWORD (ACPI_LODWORD (pci_value)); + } + + /* The PCI segment number comes from the _SEG method */ + + status = acpi_ut_evaluate_numeric_object (METHOD_NAME__SEG, pci_root_node, &pci_value); + if (ACPI_SUCCESS (status)) { + pci_id->segment = ACPI_LOWORD (pci_value); + } + + /* The PCI bus number comes from the _BBN method */ + + status = acpi_ut_evaluate_numeric_object (METHOD_NAME__BBN, pci_root_node, &pci_value); + if (ACPI_SUCCESS (status)) { + pci_id->bus = ACPI_LOWORD (pci_value); + } + + /* Complete this device's pci_id */ + + acpi_os_derive_pci_id (pci_root_node, region_obj->region.node, &pci_id); + + *region_context = pci_id; + return_ACPI_STATUS (AE_OK); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ev_pci_bar_region_setup + * + * PARAMETERS: Handle - Region we are interested in + * Function - Start or stop + * handler_context - Address space handler context + * region_context - Region specific context + * + * RETURN: Status + * + * DESCRIPTION: Do any prep work for region handling + * + * MUTEX: Assumes namespace is not locked + * + ******************************************************************************/ + +acpi_status +acpi_ev_pci_bar_region_setup ( + acpi_handle handle, + u32 function, + void *handler_context, + void **region_context) +{ + ACPI_FUNCTION_TRACE ("ev_pci_bar_region_setup"); + + + return_ACPI_STATUS (AE_OK); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ev_cmos_region_setup + * + * PARAMETERS: Handle - Region we are interested in + * Function - Start or stop + * handler_context - Address space handler context + * region_context - Region specific context + * + * RETURN: Status + * + * DESCRIPTION: Do any prep work for region handling + * + * MUTEX: Assumes namespace is not locked + * + ******************************************************************************/ + +acpi_status +acpi_ev_cmos_region_setup ( + acpi_handle handle, + u32 function, + void *handler_context, + void **region_context) +{ + ACPI_FUNCTION_TRACE ("ev_cmos_region_setup"); + + + return_ACPI_STATUS (AE_OK); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ev_default_region_setup + * + * PARAMETERS: Handle - Region we are interested in + * Function - Start or stop + * handler_context - Address space handler context + * region_context - Region specific context + * + * RETURN: Status + * + * DESCRIPTION: Do any prep work for region handling + * + ******************************************************************************/ + +acpi_status +acpi_ev_default_region_setup ( + acpi_handle handle, + u32 function, + void *handler_context, + void **region_context) +{ + ACPI_FUNCTION_TRACE ("ev_default_region_setup"); + + + if (function == ACPI_REGION_DEACTIVATE) { + *region_context = NULL; + } + else { + *region_context = handler_context; + } + + return_ACPI_STATUS (AE_OK); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ev_initialize_region + * + * PARAMETERS: region_obj - Region we are initializing + * acpi_ns_locked - Is namespace locked? + * + * RETURN: Status + * + * DESCRIPTION: Initializes the region, finds any _REG methods and saves them + * for execution at a later time + * + * Get the appropriate address space handler for a newly + * created region. + * + * This also performs address space specific initialization. For + * example, PCI regions must have an _ADR object that contains + * a PCI address in the scope of the definition. This address is + * required to perform an access to PCI config space. + * + ******************************************************************************/ + +acpi_status +acpi_ev_initialize_region ( + union acpi_operand_object *region_obj, + u8 acpi_ns_locked) +{ + union acpi_operand_object *handler_obj; + union acpi_operand_object *obj_desc; + acpi_adr_space_type space_id; + struct acpi_namespace_node *node; + acpi_status status; + struct acpi_namespace_node *method_node; + acpi_name *reg_name_ptr = (acpi_name *) METHOD_NAME__REG; + union acpi_operand_object *region_obj2; + + + ACPI_FUNCTION_TRACE_U32 ("ev_initialize_region", acpi_ns_locked); + + + if (!region_obj) { + return_ACPI_STATUS (AE_BAD_PARAMETER); + } + + if (region_obj->common.flags & AOPOBJ_OBJECT_INITIALIZED) { + return_ACPI_STATUS (AE_OK); + } + + region_obj2 = acpi_ns_get_secondary_object (region_obj); + if (!region_obj2) { + return_ACPI_STATUS (AE_NOT_EXIST); + } + + node = acpi_ns_get_parent_node (region_obj->region.node); + space_id = region_obj->region.space_id; + + /* Setup defaults */ + + region_obj->region.handler = NULL; + region_obj2->extra.method_REG = NULL; + region_obj->common.flags &= ~(AOPOBJ_SETUP_COMPLETE); + region_obj->common.flags |= AOPOBJ_OBJECT_INITIALIZED; + + /* Find any "_REG" method associated with this region definition */ + + status = acpi_ns_search_node (*reg_name_ptr, node, + ACPI_TYPE_METHOD, &method_node); + if (ACPI_SUCCESS (status)) { + /* + * The _REG method is optional and there can be only one per region + * definition. This will be executed when the handler is attached + * or removed + */ + region_obj2->extra.method_REG = method_node; + } + + /* + * The following loop depends upon the root Node having no parent + * ie: acpi_gbl_root_node->parent_entry being set to NULL + */ + while (node) { + /* Check to see if a handler exists */ + + handler_obj = NULL; + obj_desc = acpi_ns_get_attached_object (node); + if (obj_desc) { + /* Can only be a handler if the object exists */ + + switch (node->type) { + case ACPI_TYPE_DEVICE: + + handler_obj = obj_desc->device.handler; + break; + + case ACPI_TYPE_PROCESSOR: + + handler_obj = obj_desc->processor.handler; + break; + + case ACPI_TYPE_THERMAL: + + handler_obj = obj_desc->thermal_zone.handler; + break; + + default: + /* Ignore other objects */ + break; + } + + while (handler_obj) { + /* Is this handler of the correct type? */ + + if (handler_obj->address_space.space_id == space_id) { + /* Found correct handler */ + + ACPI_DEBUG_PRINT ((ACPI_DB_OPREGION, + "Found handler %p for region %p in obj %p\n", + handler_obj, region_obj, obj_desc)); + + status = acpi_ev_attach_region (handler_obj, region_obj, + acpi_ns_locked); + + /* + * Tell all users that this region is usable by running the _REG + * method + */ + if (acpi_ns_locked) { + status = acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + } + + status = acpi_ev_execute_reg_method (region_obj, 1); + + if (acpi_ns_locked) { + status = acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + } + + return_ACPI_STATUS (AE_OK); + } + + /* Try next handler in the list */ + + handler_obj = handler_obj->address_space.next; + } + } + + /* + * This node does not have the handler we need; + * Pop up one level + */ + node = acpi_ns_get_parent_node (node); + } + + /* If we get here, there is no handler for this region */ + + ACPI_DEBUG_PRINT ((ACPI_DB_OPREGION, + "No handler for region_type %s(%X) (region_obj %p)\n", + acpi_ut_get_region_name (space_id), space_id, region_obj)); + + return_ACPI_STATUS (AE_NOT_EXIST); +} + diff --git a/drivers/acpi/events/evsci.c b/drivers/acpi/events/evsci.c new file mode 100644 index 000000000000..46b31995c827 --- /dev/null +++ b/drivers/acpi/events/evsci.c @@ -0,0 +1,199 @@ +/******************************************************************************* + * + * Module Name: evsci - System Control Interrupt configuration and + * legacy to ACPI mode state transition functions + * + ******************************************************************************/ + +/* + * Copyright (C) 2000 - 2005, R. Byron Moore + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + +#include <acpi/acpi.h> +#include <acpi/acevents.h> + + +#define _COMPONENT ACPI_EVENTS + ACPI_MODULE_NAME ("evsci") + + +/******************************************************************************* + * + * FUNCTION: acpi_ev_sci_xrupt_handler + * + * PARAMETERS: Context - Calling Context + * + * RETURN: Status code indicates whether interrupt was handled. + * + * DESCRIPTION: Interrupt handler that will figure out what function or + * control method to call to deal with a SCI. + * + ******************************************************************************/ + +static u32 ACPI_SYSTEM_XFACE +acpi_ev_sci_xrupt_handler ( + void *context) +{ + struct acpi_gpe_xrupt_info *gpe_xrupt_list = context; + u32 interrupt_handled = ACPI_INTERRUPT_NOT_HANDLED; + + + ACPI_FUNCTION_TRACE("ev_sci_xrupt_handler"); + + + /* + * We are guaranteed by the ACPI CA initialization/shutdown code that + * if this interrupt handler is installed, ACPI is enabled. + */ + + /* + * Fixed Events: + * Check for and dispatch any Fixed Events that have occurred + */ + interrupt_handled |= acpi_ev_fixed_event_detect (); + + /* + * General Purpose Events: + * Check for and dispatch any GPEs that have occurred + */ + interrupt_handled |= acpi_ev_gpe_detect (gpe_xrupt_list); + + return_VALUE (interrupt_handled); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_ev_gpe_xrupt_handler + * + * PARAMETERS: Context - Calling Context + * + * RETURN: Status code indicates whether interrupt was handled. + * + * DESCRIPTION: Handler for GPE Block Device interrupts + * + ******************************************************************************/ + +u32 ACPI_SYSTEM_XFACE +acpi_ev_gpe_xrupt_handler ( + void *context) +{ + struct acpi_gpe_xrupt_info *gpe_xrupt_list = context; + u32 interrupt_handled = ACPI_INTERRUPT_NOT_HANDLED; + + + ACPI_FUNCTION_TRACE("ev_gpe_xrupt_handler"); + + + /* + * We are guaranteed by the ACPI CA initialization/shutdown code that + * if this interrupt handler is installed, ACPI is enabled. + */ + + /* + * GPEs: + * Check for and dispatch any GPEs that have occurred + */ + interrupt_handled |= acpi_ev_gpe_detect (gpe_xrupt_list); + + return_VALUE (interrupt_handled); +} + + +/****************************************************************************** + * + * FUNCTION: acpi_ev_install_sci_handler + * + * PARAMETERS: none + * + * RETURN: Status + * + * DESCRIPTION: Installs SCI handler. + * + ******************************************************************************/ + +u32 +acpi_ev_install_sci_handler (void) +{ + u32 status = AE_OK; + + + ACPI_FUNCTION_TRACE ("ev_install_sci_handler"); + + + status = acpi_os_install_interrupt_handler ((u32) acpi_gbl_FADT->sci_int, + acpi_ev_sci_xrupt_handler, acpi_gbl_gpe_xrupt_list_head); + return_ACPI_STATUS (status); +} + + +/****************************************************************************** + * + * FUNCTION: acpi_ev_remove_sci_handler + * + * PARAMETERS: none + * + * RETURN: E_OK if handler uninstalled OK, E_ERROR if handler was not + * installed to begin with + * + * DESCRIPTION: Remove the SCI interrupt handler. No further SCIs will be + * taken. + * + * Note: It doesn't seem important to disable all events or set the event + * enable registers to their original values. The OS should disable + * the SCI interrupt level when the handler is removed, so no more + * events will come in. + * + ******************************************************************************/ + +acpi_status +acpi_ev_remove_sci_handler (void) +{ + acpi_status status; + + + ACPI_FUNCTION_TRACE ("ev_remove_sci_handler"); + + + /* Just let the OS remove the handler and disable the level */ + + status = acpi_os_remove_interrupt_handler ((u32) acpi_gbl_FADT->sci_int, + acpi_ev_sci_xrupt_handler); + + return_ACPI_STATUS (status); +} + + diff --git a/drivers/acpi/events/evxface.c b/drivers/acpi/events/evxface.c new file mode 100644 index 000000000000..0bfec10a5f1e --- /dev/null +++ b/drivers/acpi/events/evxface.c @@ -0,0 +1,834 @@ +/****************************************************************************** + * + * Module Name: evxface - External interfaces for ACPI events + * + *****************************************************************************/ + +/* + * Copyright (C) 2000 - 2005, R. Byron Moore + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + +#include <linux/module.h> + +#include <acpi/acpi.h> +#include <acpi/acnamesp.h> +#include <acpi/acevents.h> +#include <acpi/acinterp.h> + +#define _COMPONENT ACPI_EVENTS + ACPI_MODULE_NAME ("evxface") + + +/******************************************************************************* + * + * FUNCTION: acpi_install_exception_handler + * + * PARAMETERS: Handler - Pointer to the handler function for the + * event + * + * RETURN: Status + * + * DESCRIPTION: Saves the pointer to the handler function + * + ******************************************************************************/ +#ifdef ACPI_FUTURE_USAGE +acpi_status +acpi_install_exception_handler ( + acpi_exception_handler handler) +{ + acpi_status status; + + + ACPI_FUNCTION_TRACE ("acpi_install_exception_handler"); + + + status = acpi_ut_acquire_mutex (ACPI_MTX_EVENTS); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + + /* Don't allow two handlers. */ + + if (acpi_gbl_exception_handler) { + status = AE_ALREADY_EXISTS; + goto cleanup; + } + + /* Install the handler */ + + acpi_gbl_exception_handler = handler; + +cleanup: + (void) acpi_ut_release_mutex (ACPI_MTX_EVENTS); + return_ACPI_STATUS (status); +} +#endif /* ACPI_FUTURE_USAGE */ + + +/******************************************************************************* + * + * FUNCTION: acpi_install_fixed_event_handler + * + * PARAMETERS: Event - Event type to enable. + * Handler - Pointer to the handler function for the + * event + * Context - Value passed to the handler on each GPE + * + * RETURN: Status + * + * DESCRIPTION: Saves the pointer to the handler function and then enables the + * event. + * + ******************************************************************************/ + +acpi_status +acpi_install_fixed_event_handler ( + u32 event, + acpi_event_handler handler, + void *context) +{ + acpi_status status; + + + ACPI_FUNCTION_TRACE ("acpi_install_fixed_event_handler"); + + + /* Parameter validation */ + + if (event > ACPI_EVENT_MAX) { + return_ACPI_STATUS (AE_BAD_PARAMETER); + } + + status = acpi_ut_acquire_mutex (ACPI_MTX_EVENTS); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + + /* Don't allow two handlers. */ + + if (NULL != acpi_gbl_fixed_event_handlers[event].handler) { + status = AE_ALREADY_EXISTS; + goto cleanup; + } + + /* Install the handler before enabling the event */ + + acpi_gbl_fixed_event_handlers[event].handler = handler; + acpi_gbl_fixed_event_handlers[event].context = context; + + status = acpi_clear_event (event); + if (ACPI_SUCCESS(status)) + status = acpi_enable_event (event, 0); + if (ACPI_FAILURE (status)) { + ACPI_DEBUG_PRINT ((ACPI_DB_WARN, "Could not enable fixed event.\n")); + + /* Remove the handler */ + + acpi_gbl_fixed_event_handlers[event].handler = NULL; + acpi_gbl_fixed_event_handlers[event].context = NULL; + } + else { + ACPI_DEBUG_PRINT ((ACPI_DB_INFO, + "Enabled fixed event %X, Handler=%p\n", event, handler)); + } + + +cleanup: + (void) acpi_ut_release_mutex (ACPI_MTX_EVENTS); + return_ACPI_STATUS (status); +} +EXPORT_SYMBOL(acpi_install_fixed_event_handler); + + +/******************************************************************************* + * + * FUNCTION: acpi_remove_fixed_event_handler + * + * PARAMETERS: Event - Event type to disable. + * Handler - Address of the handler + * + * RETURN: Status + * + * DESCRIPTION: Disables the event and unregisters the event handler. + * + ******************************************************************************/ + +acpi_status +acpi_remove_fixed_event_handler ( + u32 event, + acpi_event_handler handler) +{ + acpi_status status = AE_OK; + + + ACPI_FUNCTION_TRACE ("acpi_remove_fixed_event_handler"); + + + /* Parameter validation */ + + if (event > ACPI_EVENT_MAX) { + return_ACPI_STATUS (AE_BAD_PARAMETER); + } + + status = acpi_ut_acquire_mutex (ACPI_MTX_EVENTS); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + + /* Disable the event before removing the handler */ + + status = acpi_disable_event (event, 0); + + /* Always Remove the handler */ + + acpi_gbl_fixed_event_handlers[event].handler = NULL; + acpi_gbl_fixed_event_handlers[event].context = NULL; + + if (ACPI_FAILURE (status)) { + ACPI_DEBUG_PRINT ((ACPI_DB_WARN, + "Could not write to fixed event enable register.\n")); + } + else { + ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Disabled fixed event %X.\n", event)); + } + + (void) acpi_ut_release_mutex (ACPI_MTX_EVENTS); + return_ACPI_STATUS (status); +} +EXPORT_SYMBOL(acpi_remove_fixed_event_handler); + + +/******************************************************************************* + * + * FUNCTION: acpi_install_notify_handler + * + * PARAMETERS: Device - The device for which notifies will be handled + * handler_type - The type of handler: + * ACPI_SYSTEM_NOTIFY: system_handler (00-7f) + * ACPI_DEVICE_NOTIFY: driver_handler (80-ff) + * ACPI_ALL_NOTIFY: both system and device + * Handler - Address of the handler + * Context - Value passed to the handler on each GPE + * + * RETURN: Status + * + * DESCRIPTION: Install a handler for notifies on an ACPI device + * + ******************************************************************************/ + +acpi_status +acpi_install_notify_handler ( + acpi_handle device, + u32 handler_type, + acpi_notify_handler handler, + void *context) +{ + union acpi_operand_object *obj_desc; + union acpi_operand_object *notify_obj; + struct acpi_namespace_node *node; + acpi_status status; + + + ACPI_FUNCTION_TRACE ("acpi_install_notify_handler"); + + + /* Parameter validation */ + + if ((!device) || + (!handler) || + (handler_type > ACPI_MAX_NOTIFY_HANDLER_TYPE)) { + return_ACPI_STATUS (AE_BAD_PARAMETER); + } + + status = acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + + /* Convert and validate the device handle */ + + node = acpi_ns_map_handle_to_node (device); + if (!node) { + status = AE_BAD_PARAMETER; + goto unlock_and_exit; + } + + /* + * Root Object: + * Registering a notify handler on the root object indicates that the + * caller wishes to receive notifications for all objects. Note that + * only one <external> global handler can be regsitered (per notify type). + */ + if (device == ACPI_ROOT_OBJECT) { + /* Make sure the handler is not already installed */ + + if (((handler_type & ACPI_SYSTEM_NOTIFY) && + acpi_gbl_system_notify.handler) || + ((handler_type & ACPI_DEVICE_NOTIFY) && + acpi_gbl_device_notify.handler)) { + status = AE_ALREADY_EXISTS; + goto unlock_and_exit; + } + + if (handler_type & ACPI_SYSTEM_NOTIFY) { + acpi_gbl_system_notify.node = node; + acpi_gbl_system_notify.handler = handler; + acpi_gbl_system_notify.context = context; + } + + if (handler_type & ACPI_DEVICE_NOTIFY) { + acpi_gbl_device_notify.node = node; + acpi_gbl_device_notify.handler = handler; + acpi_gbl_device_notify.context = context; + } + + /* Global notify handler installed */ + } + + /* + * All Other Objects: + * Caller will only receive notifications specific to the target object. + * Note that only certain object types can receive notifications. + */ + else { + /* Notifies allowed on this object? */ + + if (!acpi_ev_is_notify_object (node)) { + status = AE_TYPE; + goto unlock_and_exit; + } + + /* Check for an existing internal object */ + + obj_desc = acpi_ns_get_attached_object (node); + if (obj_desc) { + /* Object exists - make sure there's no handler */ + + if (((handler_type & ACPI_SYSTEM_NOTIFY) && + obj_desc->common_notify.system_notify) || + ((handler_type & ACPI_DEVICE_NOTIFY) && + obj_desc->common_notify.device_notify)) { + status = AE_ALREADY_EXISTS; + goto unlock_and_exit; + } + } + else { + /* Create a new object */ + + obj_desc = acpi_ut_create_internal_object (node->type); + if (!obj_desc) { + status = AE_NO_MEMORY; + goto unlock_and_exit; + } + + /* Attach new object to the Node */ + + status = acpi_ns_attach_object (device, obj_desc, node->type); + + /* Remove local reference to the object */ + + acpi_ut_remove_reference (obj_desc); + if (ACPI_FAILURE (status)) { + goto unlock_and_exit; + } + } + + /* Install the handler */ + + notify_obj = acpi_ut_create_internal_object (ACPI_TYPE_LOCAL_NOTIFY); + if (!notify_obj) { + status = AE_NO_MEMORY; + goto unlock_and_exit; + } + + notify_obj->notify.node = node; + notify_obj->notify.handler = handler; + notify_obj->notify.context = context; + + if (handler_type & ACPI_SYSTEM_NOTIFY) { + obj_desc->common_notify.system_notify = notify_obj; + } + + if (handler_type & ACPI_DEVICE_NOTIFY) { + obj_desc->common_notify.device_notify = notify_obj; + } + + if (handler_type == ACPI_ALL_NOTIFY) { + /* Extra ref if installed in both */ + + acpi_ut_add_reference (notify_obj); + } + } + + +unlock_and_exit: + (void) acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); + return_ACPI_STATUS (status); +} +EXPORT_SYMBOL(acpi_install_notify_handler); + + +/******************************************************************************* + * + * FUNCTION: acpi_remove_notify_handler + * + * PARAMETERS: Device - The device for which notifies will be handled + * handler_type - The type of handler: + * ACPI_SYSTEM_NOTIFY: system_handler (00-7f) + * ACPI_DEVICE_NOTIFY: driver_handler (80-ff) + * ACPI_ALL_NOTIFY: both system and device + * Handler - Address of the handler + * + * RETURN: Status + * + * DESCRIPTION: Remove a handler for notifies on an ACPI device + * + ******************************************************************************/ + +acpi_status +acpi_remove_notify_handler ( + acpi_handle device, + u32 handler_type, + acpi_notify_handler handler) +{ + union acpi_operand_object *notify_obj; + union acpi_operand_object *obj_desc; + struct acpi_namespace_node *node; + acpi_status status; + + + ACPI_FUNCTION_TRACE ("acpi_remove_notify_handler"); + + + /* Parameter validation */ + + if ((!device) || + (!handler) || + (handler_type > ACPI_MAX_NOTIFY_HANDLER_TYPE)) { + return_ACPI_STATUS (AE_BAD_PARAMETER); + } + + status = acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + + /* Convert and validate the device handle */ + + node = acpi_ns_map_handle_to_node (device); + if (!node) { + status = AE_BAD_PARAMETER; + goto unlock_and_exit; + } + + /* Root Object */ + + if (device == ACPI_ROOT_OBJECT) { + ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Removing notify handler for ROOT object.\n")); + + if (((handler_type & ACPI_SYSTEM_NOTIFY) && + !acpi_gbl_system_notify.handler) || + ((handler_type & ACPI_DEVICE_NOTIFY) && + !acpi_gbl_device_notify.handler)) { + status = AE_NOT_EXIST; + goto unlock_and_exit; + } + + /* Make sure all deferred tasks are completed */ + + (void) acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); + acpi_os_wait_events_complete(NULL); + status = acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + + if (handler_type & ACPI_SYSTEM_NOTIFY) { + acpi_gbl_system_notify.node = NULL; + acpi_gbl_system_notify.handler = NULL; + acpi_gbl_system_notify.context = NULL; + } + + if (handler_type & ACPI_DEVICE_NOTIFY) { + acpi_gbl_device_notify.node = NULL; + acpi_gbl_device_notify.handler = NULL; + acpi_gbl_device_notify.context = NULL; + } + } + + /* All Other Objects */ + + else { + /* Notifies allowed on this object? */ + + if (!acpi_ev_is_notify_object (node)) { + status = AE_TYPE; + goto unlock_and_exit; + } + + /* Check for an existing internal object */ + + obj_desc = acpi_ns_get_attached_object (node); + if (!obj_desc) { + status = AE_NOT_EXIST; + goto unlock_and_exit; + } + + /* Object exists - make sure there's an existing handler */ + + if (handler_type & ACPI_SYSTEM_NOTIFY) { + notify_obj = obj_desc->common_notify.system_notify; + if ((!notify_obj) || + (notify_obj->notify.handler != handler)) { + status = AE_BAD_PARAMETER; + goto unlock_and_exit; + } + /* Make sure all deferred tasks are completed */ + + (void) acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); + acpi_os_wait_events_complete(NULL); + status = acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + + /* Remove the handler */ + obj_desc->common_notify.system_notify = NULL; + acpi_ut_remove_reference (notify_obj); + } + + if (handler_type & ACPI_DEVICE_NOTIFY) { + notify_obj = obj_desc->common_notify.device_notify; + if ((!notify_obj) || + (notify_obj->notify.handler != handler)) { + status = AE_BAD_PARAMETER; + goto unlock_and_exit; + } + /* Make sure all deferred tasks are completed */ + + (void) acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); + acpi_os_wait_events_complete(NULL); + status = acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + + /* Remove the handler */ + obj_desc->common_notify.device_notify = NULL; + acpi_ut_remove_reference (notify_obj); + } + } + + +unlock_and_exit: + (void) acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); + return_ACPI_STATUS (status); +} +EXPORT_SYMBOL(acpi_remove_notify_handler); + + +/******************************************************************************* + * + * FUNCTION: acpi_install_gpe_handler + * + * PARAMETERS: gpe_number - The GPE number within the GPE block + * gpe_block - GPE block (NULL == FADT GPEs) + * Type - Whether this GPE should be treated as an + * edge- or level-triggered interrupt. + * Address - Address of the handler + * Context - Value passed to the handler on each GPE + * + * RETURN: Status + * + * DESCRIPTION: Install a handler for a General Purpose Event. + * + ******************************************************************************/ + +acpi_status +acpi_install_gpe_handler ( + acpi_handle gpe_device, + u32 gpe_number, + u32 type, + acpi_event_handler address, + void *context) +{ + struct acpi_gpe_event_info *gpe_event_info; + struct acpi_handler_info *handler; + acpi_status status; + + + ACPI_FUNCTION_TRACE ("acpi_install_gpe_handler"); + + + /* Parameter validation */ + + if ((!address) || (type > ACPI_GPE_XRUPT_TYPE_MASK)) { + return_ACPI_STATUS (AE_BAD_PARAMETER); + } + + status = acpi_ut_acquire_mutex (ACPI_MTX_EVENTS); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + + /* Ensure that we have a valid GPE number */ + + gpe_event_info = acpi_ev_get_gpe_event_info (gpe_device, gpe_number); + if (!gpe_event_info) { + status = AE_BAD_PARAMETER; + goto unlock_and_exit; + } + + /* Make sure that there isn't a handler there already */ + + if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == ACPI_GPE_DISPATCH_HANDLER) { + status = AE_ALREADY_EXISTS; + goto unlock_and_exit; + } + + /* Allocate and init handler object */ + + handler = ACPI_MEM_CALLOCATE (sizeof (struct acpi_handler_info)); + if (!handler) { + status = AE_NO_MEMORY; + goto unlock_and_exit; + } + + handler->address = address; + handler->context = context; + handler->method_node = gpe_event_info->dispatch.method_node; + + /* Disable the GPE before installing the handler */ + + status = acpi_ev_disable_gpe (gpe_event_info); + if (ACPI_FAILURE (status)) { + goto unlock_and_exit; + } + + /* Install the handler */ + + acpi_os_acquire_lock (acpi_gbl_gpe_lock, ACPI_NOT_ISR); + gpe_event_info->dispatch.handler = handler; + + /* Setup up dispatch flags to indicate handler (vs. method) */ + + gpe_event_info->flags &= ~(ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK); /* Clear bits */ + gpe_event_info->flags |= (u8) (type | ACPI_GPE_DISPATCH_HANDLER); + + acpi_os_release_lock (acpi_gbl_gpe_lock, ACPI_NOT_ISR); + + +unlock_and_exit: + (void) acpi_ut_release_mutex (ACPI_MTX_EVENTS); + return_ACPI_STATUS (status); +} +EXPORT_SYMBOL(acpi_install_gpe_handler); + + +/******************************************************************************* + * + * FUNCTION: acpi_remove_gpe_handler + * + * PARAMETERS: gpe_number - The event to remove a handler + * gpe_block - GPE block (NULL == FADT GPEs) + * Address - Address of the handler + * + * RETURN: Status + * + * DESCRIPTION: Remove a handler for a General Purpose acpi_event. + * + ******************************************************************************/ + +acpi_status +acpi_remove_gpe_handler ( + acpi_handle gpe_device, + u32 gpe_number, + acpi_event_handler address) +{ + struct acpi_gpe_event_info *gpe_event_info; + struct acpi_handler_info *handler; + acpi_status status; + + + ACPI_FUNCTION_TRACE ("acpi_remove_gpe_handler"); + + + /* Parameter validation */ + + if (!address) { + return_ACPI_STATUS (AE_BAD_PARAMETER); + } + + status = acpi_ut_acquire_mutex (ACPI_MTX_EVENTS); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + + /* Ensure that we have a valid GPE number */ + + gpe_event_info = acpi_ev_get_gpe_event_info (gpe_device, gpe_number); + if (!gpe_event_info) { + status = AE_BAD_PARAMETER; + goto unlock_and_exit; + } + + /* Make sure that a handler is indeed installed */ + + if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) != ACPI_GPE_DISPATCH_HANDLER) { + status = AE_NOT_EXIST; + goto unlock_and_exit; + } + + /* Make sure that the installed handler is the same */ + + if (gpe_event_info->dispatch.handler->address != address) { + status = AE_BAD_PARAMETER; + goto unlock_and_exit; + } + + /* Disable the GPE before removing the handler */ + + status = acpi_ev_disable_gpe (gpe_event_info); + if (ACPI_FAILURE (status)) { + goto unlock_and_exit; + } + + /* Make sure all deferred tasks are completed */ + + (void) acpi_ut_release_mutex (ACPI_MTX_EVENTS); + acpi_os_wait_events_complete(NULL); + status = acpi_ut_acquire_mutex (ACPI_MTX_EVENTS); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + + /* Remove the handler */ + + acpi_os_acquire_lock (acpi_gbl_gpe_lock, ACPI_NOT_ISR); + handler = gpe_event_info->dispatch.handler; + + /* Restore Method node (if any), set dispatch flags */ + + gpe_event_info->dispatch.method_node = handler->method_node; + gpe_event_info->flags &= ~ACPI_GPE_DISPATCH_MASK; /* Clear bits */ + if (handler->method_node) { + gpe_event_info->flags |= ACPI_GPE_DISPATCH_METHOD; + } + acpi_os_release_lock (acpi_gbl_gpe_lock, ACPI_NOT_ISR); + + /* Now we can free the handler object */ + + ACPI_MEM_FREE (handler); + + +unlock_and_exit: + (void) acpi_ut_release_mutex (ACPI_MTX_EVENTS); + return_ACPI_STATUS (status); +} +EXPORT_SYMBOL(acpi_remove_gpe_handler); + + +/******************************************************************************* + * + * FUNCTION: acpi_acquire_global_lock + * + * PARAMETERS: Timeout - How long the caller is willing to wait + * out_handle - A handle to the lock if acquired + * + * RETURN: Status + * + * DESCRIPTION: Acquire the ACPI Global Lock + * + ******************************************************************************/ + +acpi_status +acpi_acquire_global_lock ( + u16 timeout, + u32 *handle) +{ + acpi_status status; + + + if (!handle) { + return (AE_BAD_PARAMETER); + } + + status = acpi_ex_enter_interpreter (); + if (ACPI_FAILURE (status)) { + return (status); + } + + status = acpi_ev_acquire_global_lock (timeout); + acpi_ex_exit_interpreter (); + + if (ACPI_SUCCESS (status)) { + acpi_gbl_global_lock_handle++; + *handle = acpi_gbl_global_lock_handle; + } + + return (status); +} +EXPORT_SYMBOL(acpi_acquire_global_lock); + + +/******************************************************************************* + * + * FUNCTION: acpi_release_global_lock + * + * PARAMETERS: Handle - Returned from acpi_acquire_global_lock + * + * RETURN: Status + * + * DESCRIPTION: Release the ACPI Global Lock + * + ******************************************************************************/ + +acpi_status +acpi_release_global_lock ( + u32 handle) +{ + acpi_status status; + + + if (handle != acpi_gbl_global_lock_handle) { + return (AE_NOT_ACQUIRED); + } + + status = acpi_ev_release_global_lock (); + return (status); +} +EXPORT_SYMBOL(acpi_release_global_lock); + diff --git a/drivers/acpi/events/evxfevnt.c b/drivers/acpi/events/evxfevnt.c new file mode 100644 index 000000000000..fa8d5f25be62 --- /dev/null +++ b/drivers/acpi/events/evxfevnt.c @@ -0,0 +1,778 @@ +/****************************************************************************** + * + * Module Name: evxfevnt - External Interfaces, ACPI event disable/enable + * + *****************************************************************************/ + +/* + * Copyright (C) 2000 - 2005, R. Byron Moore + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + +#include <linux/module.h> + +#include <acpi/acpi.h> +#include <acpi/acevents.h> +#include <acpi/acnamesp.h> + +#define _COMPONENT ACPI_EVENTS + ACPI_MODULE_NAME ("evxfevnt") + + +/******************************************************************************* + * + * FUNCTION: acpi_enable + * + * PARAMETERS: None + * + * RETURN: Status + * + * DESCRIPTION: Transfers the system into ACPI mode. + * + ******************************************************************************/ + +acpi_status +acpi_enable (void) +{ + acpi_status status = AE_OK; + + + ACPI_FUNCTION_TRACE ("acpi_enable"); + + + /* Make sure we have the FADT*/ + + if (!acpi_gbl_FADT) { + ACPI_DEBUG_PRINT ((ACPI_DB_WARN, "No FADT information present!\n")); + return_ACPI_STATUS (AE_NO_ACPI_TABLES); + } + + if (acpi_hw_get_mode() == ACPI_SYS_MODE_ACPI) { + ACPI_DEBUG_PRINT ((ACPI_DB_INIT, "System is already in ACPI mode\n")); + } + else { + /* Transition to ACPI mode */ + + status = acpi_hw_set_mode (ACPI_SYS_MODE_ACPI); + if (ACPI_FAILURE (status)) { + ACPI_REPORT_ERROR (("Could not transition to ACPI mode.\n")); + return_ACPI_STATUS (status); + } + + ACPI_DEBUG_PRINT ((ACPI_DB_INIT, "Transition to ACPI mode successful\n")); + } + + return_ACPI_STATUS (status); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_disable + * + * PARAMETERS: None + * + * RETURN: Status + * + * DESCRIPTION: Transfers the system into LEGACY mode. + * + ******************************************************************************/ + +acpi_status +acpi_disable (void) +{ + acpi_status status = AE_OK; + + + ACPI_FUNCTION_TRACE ("acpi_disable"); + + + if (!acpi_gbl_FADT) { + ACPI_DEBUG_PRINT ((ACPI_DB_WARN, "No FADT information present!\n")); + return_ACPI_STATUS (AE_NO_ACPI_TABLES); + } + + if (acpi_hw_get_mode() == ACPI_SYS_MODE_LEGACY) { + ACPI_DEBUG_PRINT ((ACPI_DB_INIT, "System is already in legacy (non-ACPI) mode\n")); + } + else { + /* Transition to LEGACY mode */ + + status = acpi_hw_set_mode (ACPI_SYS_MODE_LEGACY); + + if (ACPI_FAILURE (status)) { + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Could not exit ACPI mode to legacy mode")); + return_ACPI_STATUS (status); + } + + ACPI_DEBUG_PRINT ((ACPI_DB_INIT, "ACPI mode disabled\n")); + } + + return_ACPI_STATUS (status); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_enable_event + * + * PARAMETERS: Event - The fixed eventto be enabled + * Flags - Reserved + * + * RETURN: Status + * + * DESCRIPTION: Enable an ACPI event (fixed) + * + ******************************************************************************/ + +acpi_status +acpi_enable_event ( + u32 event, + u32 flags) +{ + acpi_status status = AE_OK; + u32 value; + + + ACPI_FUNCTION_TRACE ("acpi_enable_event"); + + + /* Decode the Fixed Event */ + + if (event > ACPI_EVENT_MAX) { + return_ACPI_STATUS (AE_BAD_PARAMETER); + } + + /* + * Enable the requested fixed event (by writing a one to the + * enable register bit) + */ + status = acpi_set_register (acpi_gbl_fixed_event_info[event].enable_register_id, + 1, ACPI_MTX_LOCK); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + + /* Make sure that the hardware responded */ + + status = acpi_get_register (acpi_gbl_fixed_event_info[event].enable_register_id, + &value, ACPI_MTX_LOCK); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + + if (value != 1) { + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, + "Could not enable %s event\n", acpi_ut_get_event_name (event))); + return_ACPI_STATUS (AE_NO_HARDWARE_RESPONSE); + } + + return_ACPI_STATUS (status); +} +EXPORT_SYMBOL(acpi_enable_event); + + +/******************************************************************************* + * + * FUNCTION: acpi_set_gpe_type + * + * PARAMETERS: gpe_device - Parent GPE Device + * gpe_number - GPE level within the GPE block + * Type - New GPE type + * + * RETURN: Status + * + * DESCRIPTION: Enable an ACPI event (general purpose) + * + ******************************************************************************/ + +acpi_status +acpi_set_gpe_type ( + acpi_handle gpe_device, + u32 gpe_number, + u8 type) +{ + acpi_status status = AE_OK; + struct acpi_gpe_event_info *gpe_event_info; + + + ACPI_FUNCTION_TRACE ("acpi_set_gpe_type"); + + + /* Ensure that we have a valid GPE number */ + + gpe_event_info = acpi_ev_get_gpe_event_info (gpe_device, gpe_number); + if (!gpe_event_info) { + status = AE_BAD_PARAMETER; + goto unlock_and_exit; + } + + if ((gpe_event_info->flags & ACPI_GPE_TYPE_MASK) == type) { + return_ACPI_STATUS (AE_OK); + } + + /* Set the new type (will disable GPE if currently enabled) */ + + status = acpi_ev_set_gpe_type (gpe_event_info, type); + +unlock_and_exit: + return_ACPI_STATUS (status); +} +EXPORT_SYMBOL(acpi_set_gpe_type); + + +/******************************************************************************* + * + * FUNCTION: acpi_enable_gpe + * + * PARAMETERS: gpe_device - Parent GPE Device + * gpe_number - GPE level within the GPE block + * Flags - Just enable, or also wake enable? + * Called from ISR or not + * + * RETURN: Status + * + * DESCRIPTION: Enable an ACPI event (general purpose) + * + ******************************************************************************/ + +acpi_status +acpi_enable_gpe ( + acpi_handle gpe_device, + u32 gpe_number, + u32 flags) +{ + acpi_status status = AE_OK; + struct acpi_gpe_event_info *gpe_event_info; + + + ACPI_FUNCTION_TRACE ("acpi_enable_gpe"); + + + /* Use semaphore lock if not executing at interrupt level */ + + if (flags & ACPI_NOT_ISR) { + status = acpi_ut_acquire_mutex (ACPI_MTX_EVENTS); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + } + + /* Ensure that we have a valid GPE number */ + + gpe_event_info = acpi_ev_get_gpe_event_info (gpe_device, gpe_number); + if (!gpe_event_info) { + status = AE_BAD_PARAMETER; + goto unlock_and_exit; + } + + /* Perform the enable */ + + status = acpi_ev_enable_gpe (gpe_event_info, TRUE); + +unlock_and_exit: + if (flags & ACPI_NOT_ISR) { + (void) acpi_ut_release_mutex (ACPI_MTX_EVENTS); + } + return_ACPI_STATUS (status); +} +EXPORT_SYMBOL(acpi_enable_gpe); + + +/******************************************************************************* + * + * FUNCTION: acpi_disable_gpe + * + * PARAMETERS: gpe_device - Parent GPE Device + * gpe_number - GPE level within the GPE block + * Flags - Just disable, or also wake disable? + * Called from ISR or not + * + * RETURN: Status + * + * DESCRIPTION: Disable an ACPI event (general purpose) + * + ******************************************************************************/ + +acpi_status +acpi_disable_gpe ( + acpi_handle gpe_device, + u32 gpe_number, + u32 flags) +{ + acpi_status status = AE_OK; + struct acpi_gpe_event_info *gpe_event_info; + + + ACPI_FUNCTION_TRACE ("acpi_disable_gpe"); + + + /* Use semaphore lock if not executing at interrupt level */ + + if (flags & ACPI_NOT_ISR) { + status = acpi_ut_acquire_mutex (ACPI_MTX_EVENTS); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + } + + /* Ensure that we have a valid GPE number */ + + gpe_event_info = acpi_ev_get_gpe_event_info (gpe_device, gpe_number); + if (!gpe_event_info) { + status = AE_BAD_PARAMETER; + goto unlock_and_exit; + } + + status = acpi_ev_disable_gpe (gpe_event_info); + +unlock_and_exit: + if (flags & ACPI_NOT_ISR) { + (void) acpi_ut_release_mutex (ACPI_MTX_EVENTS); + } + return_ACPI_STATUS (status); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_disable_event + * + * PARAMETERS: Event - The fixed eventto be enabled + * Flags - Reserved + * + * RETURN: Status + * + * DESCRIPTION: Disable an ACPI event (fixed) + * + ******************************************************************************/ + +acpi_status +acpi_disable_event ( + u32 event, + u32 flags) +{ + acpi_status status = AE_OK; + u32 value; + + + ACPI_FUNCTION_TRACE ("acpi_disable_event"); + + + /* Decode the Fixed Event */ + + if (event > ACPI_EVENT_MAX) { + return_ACPI_STATUS (AE_BAD_PARAMETER); + } + + /* + * Disable the requested fixed event (by writing a zero to the + * enable register bit) + */ + status = acpi_set_register (acpi_gbl_fixed_event_info[event].enable_register_id, + 0, ACPI_MTX_LOCK); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + + status = acpi_get_register (acpi_gbl_fixed_event_info[event].enable_register_id, + &value, ACPI_MTX_LOCK); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + + if (value != 0) { + ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, + "Could not disable %s events\n", acpi_ut_get_event_name (event))); + return_ACPI_STATUS (AE_NO_HARDWARE_RESPONSE); + } + + return_ACPI_STATUS (status); +} +EXPORT_SYMBOL(acpi_disable_event); + + +/******************************************************************************* + * + * FUNCTION: acpi_clear_event + * + * PARAMETERS: Event - The fixed event to be cleared + * + * RETURN: Status + * + * DESCRIPTION: Clear an ACPI event (fixed) + * + ******************************************************************************/ + +acpi_status +acpi_clear_event ( + u32 event) +{ + acpi_status status = AE_OK; + + + ACPI_FUNCTION_TRACE ("acpi_clear_event"); + + + /* Decode the Fixed Event */ + + if (event > ACPI_EVENT_MAX) { + return_ACPI_STATUS (AE_BAD_PARAMETER); + } + + /* + * Clear the requested fixed event (By writing a one to the + * status register bit) + */ + status = acpi_set_register (acpi_gbl_fixed_event_info[event].status_register_id, + 1, ACPI_MTX_LOCK); + + return_ACPI_STATUS (status); +} +EXPORT_SYMBOL(acpi_clear_event); + + +/******************************************************************************* + * + * FUNCTION: acpi_clear_gpe + * + * PARAMETERS: gpe_device - Parent GPE Device + * gpe_number - GPE level within the GPE block + * Flags - Called from an ISR or not + * + * RETURN: Status + * + * DESCRIPTION: Clear an ACPI event (general purpose) + * + ******************************************************************************/ + +acpi_status +acpi_clear_gpe ( + acpi_handle gpe_device, + u32 gpe_number, + u32 flags) +{ + acpi_status status = AE_OK; + struct acpi_gpe_event_info *gpe_event_info; + + + ACPI_FUNCTION_TRACE ("acpi_clear_gpe"); + + + /* Use semaphore lock if not executing at interrupt level */ + + if (flags & ACPI_NOT_ISR) { + status = acpi_ut_acquire_mutex (ACPI_MTX_EVENTS); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + } + + /* Ensure that we have a valid GPE number */ + + gpe_event_info = acpi_ev_get_gpe_event_info (gpe_device, gpe_number); + if (!gpe_event_info) { + status = AE_BAD_PARAMETER; + goto unlock_and_exit; + } + + status = acpi_hw_clear_gpe (gpe_event_info); + +unlock_and_exit: + if (flags & ACPI_NOT_ISR) { + (void) acpi_ut_release_mutex (ACPI_MTX_EVENTS); + } + return_ACPI_STATUS (status); +} + + +#ifdef ACPI_FUTURE_USAGE + +/******************************************************************************* + * + * FUNCTION: acpi_get_event_status + * + * PARAMETERS: Event - The fixed event + * Event Status - Where the current status of the event will + * be returned + * + * RETURN: Status + * + * DESCRIPTION: Obtains and returns the current status of the event + * + ******************************************************************************/ + +acpi_status +acpi_get_event_status ( + u32 event, + acpi_event_status *event_status) +{ + acpi_status status = AE_OK; + + + ACPI_FUNCTION_TRACE ("acpi_get_event_status"); + + + if (!event_status) { + return_ACPI_STATUS (AE_BAD_PARAMETER); + } + + /* Decode the Fixed Event */ + + if (event > ACPI_EVENT_MAX) { + return_ACPI_STATUS (AE_BAD_PARAMETER); + } + + /* Get the status of the requested fixed event */ + + status = acpi_get_register (acpi_gbl_fixed_event_info[event].status_register_id, + event_status, ACPI_MTX_LOCK); + + return_ACPI_STATUS (status); +} + + +/******************************************************************************* + * + * FUNCTION: acpi_get_gpe_status + * + * PARAMETERS: gpe_device - Parent GPE Device + * gpe_number - GPE level within the GPE block + * Flags - Called from an ISR or not + * Event Status - Where the current status of the event will + * be returned + * + * RETURN: Status + * + * DESCRIPTION: Get status of an event (general purpose) + * + ******************************************************************************/ + +acpi_status +acpi_get_gpe_status ( + acpi_handle gpe_device, + u32 gpe_number, + u32 flags, + acpi_event_status *event_status) +{ + acpi_status status = AE_OK; + struct acpi_gpe_event_info *gpe_event_info; + + + ACPI_FUNCTION_TRACE ("acpi_get_gpe_status"); + + + /* Use semaphore lock if not executing at interrupt level */ + + if (flags & ACPI_NOT_ISR) { + status = acpi_ut_acquire_mutex (ACPI_MTX_EVENTS); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + } + + /* Ensure that we have a valid GPE number */ + + gpe_event_info = acpi_ev_get_gpe_event_info (gpe_device, gpe_number); + if (!gpe_event_info) { + status = AE_BAD_PARAMETER; + goto unlock_and_exit; + } + + /* Obtain status on the requested GPE number */ + + status = acpi_hw_get_gpe_status (gpe_event_info, event_status); + +unlock_and_exit: + if (flags & ACPI_NOT_ISR) { + (void) acpi_ut_release_mutex (ACPI_MTX_EVENTS); + } + return_ACPI_STATUS (status); +} +#endif /* ACPI_FUTURE_USAGE */ + + +/******************************************************************************* + * + * FUNCTION: acpi_install_gpe_block + * + * PARAMETERS: gpe_device - Handle to the parent GPE Block Device + * gpe_block_address - Address and space_iD + * register_count - Number of GPE register pairs in the block + * interrupt_level - H/W interrupt for the block + * + * RETURN: Status + * + * DESCRIPTION: Create and Install a block of GPE registers + * + ******************************************************************************/ + +acpi_status +acpi_install_gpe_block ( + acpi_handle gpe_device, + struct acpi_generic_address *gpe_block_address, + u32 register_count, + u32 interrupt_level) +{ + acpi_status status; + union acpi_operand_object *obj_desc; + struct acpi_namespace_node *node; + struct acpi_gpe_block_info *gpe_block; + + + ACPI_FUNCTION_TRACE ("acpi_install_gpe_block"); + + + if ((!gpe_device) || + (!gpe_block_address) || + (!register_count)) { + return_ACPI_STATUS (AE_BAD_PARAMETER); + } + + status = acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE); + if (ACPI_FAILURE (status)) { + return (status); + } + + node = acpi_ns_map_handle_to_node (gpe_device); + if (!node) { + status = AE_BAD_PARAMETER; + goto unlock_and_exit; + } + + /* + * For user-installed GPE Block Devices, the gpe_block_base_number + * is always zero + */ + status = acpi_ev_create_gpe_block (node, gpe_block_address, register_count, + 0, interrupt_level, &gpe_block); + if (ACPI_FAILURE (status)) { + goto unlock_and_exit; + } + + /* Get the device_object attached to the node */ + + obj_desc = acpi_ns_get_attached_object (node); + if (!obj_desc) { + /* No object, create a new one */ + + obj_desc = acpi_ut_create_internal_object (ACPI_TYPE_DEVICE); + if (!obj_desc) { + status = AE_NO_MEMORY; + goto unlock_and_exit; + } + + status = acpi_ns_attach_object (node, obj_desc, ACPI_TYPE_DEVICE); + + /* Remove local reference to the object */ + + acpi_ut_remove_reference (obj_desc); + + if (ACPI_FAILURE (status)) { + goto unlock_and_exit; + } + } + + /* Install the GPE block in the device_object */ + + obj_desc->device.gpe_block = gpe_block; + + +unlock_and_exit: + (void) acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); + return_ACPI_STATUS (status); +} +EXPORT_SYMBOL(acpi_install_gpe_block); + + +/******************************************************************************* + * + * FUNCTION: acpi_remove_gpe_block + * + * PARAMETERS: gpe_device - Handle to the parent GPE Block Device + * + * RETURN: Status + * + * DESCRIPTION: Remove a previously installed block of GPE registers + * + ******************************************************************************/ + +acpi_status +acpi_remove_gpe_block ( + acpi_handle gpe_device) +{ + union acpi_operand_object *obj_desc; + acpi_status status; + struct acpi_namespace_node *node; + + + ACPI_FUNCTION_TRACE ("acpi_remove_gpe_block"); + + + if (!gpe_device) { + return_ACPI_STATUS (AE_BAD_PARAMETER); + } + + status = acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE); + if (ACPI_FAILURE (status)) { + return (status); + } + + node = acpi_ns_map_handle_to_node (gpe_device); + if (!node) { + status = AE_BAD_PARAMETER; + goto unlock_and_exit; + } + + /* Get the device_object attached to the node */ + + obj_desc = acpi_ns_get_attached_object (node); + if (!obj_desc || + !obj_desc->device.gpe_block) { + return_ACPI_STATUS (AE_NULL_OBJECT); + } + + /* Delete the GPE block (but not the device_object) */ + + status = acpi_ev_delete_gpe_block (obj_desc->device.gpe_block); + if (ACPI_SUCCESS (status)) { + obj_desc->device.gpe_block = NULL; + } + +unlock_and_exit: + (void) acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); + return_ACPI_STATUS (status); +} +EXPORT_SYMBOL(acpi_remove_gpe_block); diff --git a/drivers/acpi/events/evxfregn.c b/drivers/acpi/events/evxfregn.c new file mode 100644 index 000000000000..d058587b3427 --- /dev/null +++ b/drivers/acpi/events/evxfregn.c @@ -0,0 +1,247 @@ +/****************************************************************************** + * + * Module Name: evxfregn - External Interfaces, ACPI Operation Regions and + * Address Spaces. + * + *****************************************************************************/ + +/* + * Copyright (C) 2000 - 2005, R. Byron Moore + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + +#include <linux/module.h> + +#include <acpi/acpi.h> +#include <acpi/acnamesp.h> +#include <acpi/acevents.h> + +#define _COMPONENT ACPI_EVENTS + ACPI_MODULE_NAME ("evxfregn") + + +/******************************************************************************* + * + * FUNCTION: acpi_install_address_space_handler + * + * PARAMETERS: Device - Handle for the device + * space_id - The address space ID + * Handler - Address of the handler + * Setup - Address of the setup function + * Context - Value passed to the handler on each access + * + * RETURN: Status + * + * DESCRIPTION: Install a handler for all op_regions of a given space_id. + * + ******************************************************************************/ + +acpi_status +acpi_install_address_space_handler ( + acpi_handle device, + acpi_adr_space_type space_id, + acpi_adr_space_handler handler, + acpi_adr_space_setup setup, + void *context) +{ + struct acpi_namespace_node *node; + acpi_status status; + + + ACPI_FUNCTION_TRACE ("acpi_install_address_space_handler"); + + + /* Parameter validation */ + + if (!device) { + return_ACPI_STATUS (AE_BAD_PARAMETER); + } + + status = acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + + /* Convert and validate the device handle */ + + node = acpi_ns_map_handle_to_node (device); + if (!node) { + status = AE_BAD_PARAMETER; + goto unlock_and_exit; + } + + /* Install the handler for all Regions for this Space ID */ + + status = acpi_ev_install_space_handler (node, space_id, handler, setup, context); + if (ACPI_FAILURE (status)) { + goto unlock_and_exit; + } + + /* Run all _REG methods for this address space */ + + status = acpi_ev_execute_reg_methods (node, space_id); + +unlock_and_exit: + (void) acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); + return_ACPI_STATUS (status); +} +EXPORT_SYMBOL(acpi_install_address_space_handler); + + +/******************************************************************************* + * + * FUNCTION: acpi_remove_address_space_handler + * + * PARAMETERS: Device - Handle for the device + * space_id - The address space ID + * Handler - Address of the handler + * + * RETURN: Status + * + * DESCRIPTION: Remove a previously installed handler. + * + ******************************************************************************/ + +acpi_status +acpi_remove_address_space_handler ( + acpi_handle device, + acpi_adr_space_type space_id, + acpi_adr_space_handler handler) +{ + union acpi_operand_object *obj_desc; + union acpi_operand_object *handler_obj; + union acpi_operand_object *region_obj; + union acpi_operand_object **last_obj_ptr; + struct acpi_namespace_node *node; + acpi_status status; + + + ACPI_FUNCTION_TRACE ("acpi_remove_address_space_handler"); + + + /* Parameter validation */ + + if (!device) { + return_ACPI_STATUS (AE_BAD_PARAMETER); + } + + status = acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } + + /* Convert and validate the device handle */ + + node = acpi_ns_map_handle_to_node (device); + if (!node) { + status = AE_BAD_PARAMETER; + goto unlock_and_exit; + } + + /* Make sure the internal object exists */ + + obj_desc = acpi_ns_get_attached_object (node); + if (!obj_desc) { + status = AE_NOT_EXIST; + goto unlock_and_exit; + } + + /* Find the address handler the user requested */ + + handler_obj = obj_desc->device.handler; + last_obj_ptr = &obj_desc->device.handler; + while (handler_obj) { + /* We have a handler, see if user requested this one */ + + if (handler_obj->address_space.space_id == space_id) { + /* Matched space_id, first dereference this in the Regions */ + + ACPI_DEBUG_PRINT ((ACPI_DB_OPREGION, + "Removing address handler %p(%p) for region %s on Device %p(%p)\n", + handler_obj, handler, acpi_ut_get_region_name (space_id), + node, obj_desc)); + + region_obj = handler_obj->address_space.region_list; + + /* Walk the handler's region list */ + + while (region_obj) { + /* + * First disassociate the handler from the region. + * + * NOTE: this doesn't mean that the region goes away + * The region is just inaccessible as indicated to + * the _REG method + */ + acpi_ev_detach_region (region_obj, TRUE); + + /* + * Walk the list: Just grab the head because the + * detach_region removed the previous head. + */ + region_obj = handler_obj->address_space.region_list; + + } + + /* Remove this Handler object from the list */ + + *last_obj_ptr = handler_obj->address_space.next; + + /* Now we can delete the handler object */ + + acpi_ut_remove_reference (handler_obj); + goto unlock_and_exit; + } + + /* Walk the linked list of handlers */ + + last_obj_ptr = &handler_obj->address_space.next; + handler_obj = handler_obj->address_space.next; + } + + /* The handler does not exist */ + + ACPI_DEBUG_PRINT ((ACPI_DB_OPREGION, + "Unable to remove address handler %p for %s(%X), dev_node %p, obj %p\n", + handler, acpi_ut_get_region_name (space_id), space_id, node, obj_desc)); + + status = AE_NOT_EXIST; + +unlock_and_exit: + (void) acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); + return_ACPI_STATUS (status); +} +EXPORT_SYMBOL(acpi_remove_address_space_handler); + |