diff options
28 files changed, 563 insertions, 470 deletions
diff --git a/drivers/acpi/dispatcher/dsinit.c b/drivers/acpi/dispatcher/dsinit.c index bbdf990e9f65..daf51b5b5875 100644 --- a/drivers/acpi/dispatcher/dsinit.c +++ b/drivers/acpi/dispatcher/dsinit.c @@ -125,37 +125,7 @@ acpi_ds_init_one_object(acpi_handle obj_handle,  		if (info->table_desc->pointer->revision == 1) {  			node->flags |= ANOBJ_DATA_WIDTH_32;  		} -#ifdef ACPI_INIT_PARSE_METHODS -		/* -		 * Note 11/2005: Removed this code to parse all methods during table -		 * load because it causes problems if there are any errors during the -		 * parse. Also, it seems like overkill and we probably don't want to -		 * abort a table load because of an issue with a single method. -		 */ - -		/* -		 * Print a dot for each method unless we are going to print -		 * the entire pathname -		 */ -		if (!(acpi_dbg_level & ACPI_LV_INIT_NAMES)) { -			ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, ".")); -		} -		/* -		 * Always parse methods to detect errors, we will delete -		 * the parse tree below -		 */ -		status = acpi_ds_parse_method(obj_handle); -		if (ACPI_FAILURE(status)) { -			ACPI_ERROR((AE_INFO, -				    "Method %p [%4.4s] - parse failure, %s", -				    obj_handle, -				    acpi_ut_get_node_name(obj_handle), -				    acpi_format_exception(status))); - -			/* This parse failed, but we will continue parsing more methods */ -		} -#endif  		info->method_count++;  		break; diff --git a/drivers/acpi/dispatcher/dsmethod.c b/drivers/acpi/dispatcher/dsmethod.c index bc9aca4e7401..a39a33f4847a 100644 --- a/drivers/acpi/dispatcher/dsmethod.c +++ b/drivers/acpi/dispatcher/dsmethod.c @@ -52,6 +52,10 @@  #define _COMPONENT          ACPI_DISPATCHER  ACPI_MODULE_NAME("dsmethod") +/* Local prototypes */ +static acpi_status +acpi_ds_create_method_mutex(union acpi_operand_object *method_desc); +  /*******************************************************************************   *   * FUNCTION:    acpi_ds_method_error @@ -67,6 +71,7 @@ ACPI_MODULE_NAME("dsmethod")   *              Note: Allows the exception handler to change the status code   *   ******************************************************************************/ +  acpi_status  acpi_ds_method_error(acpi_status status, struct acpi_walk_state *walk_state)  { @@ -113,11 +118,51 @@ acpi_ds_method_error(acpi_status status, struct acpi_walk_state *walk_state)  /*******************************************************************************   * + * FUNCTION:    acpi_ds_create_method_mutex + * + * PARAMETERS:  obj_desc            - The method object + * + * RETURN:      Status + * + * DESCRIPTION: Create a mutex object for a serialized control method + * + ******************************************************************************/ + +static acpi_status +acpi_ds_create_method_mutex(union acpi_operand_object *method_desc) +{ +	union acpi_operand_object *mutex_desc; +	acpi_status status; + +	ACPI_FUNCTION_NAME(ds_create_method_mutex); + +	/* Create the new mutex object */ + +	mutex_desc = acpi_ut_create_internal_object(ACPI_TYPE_MUTEX); +	if (!mutex_desc) { +		return_ACPI_STATUS(AE_NO_MEMORY); +	} + +	/* Create the actual OS Mutex */ + +	status = acpi_os_create_mutex(&mutex_desc->mutex.os_mutex); +	if (ACPI_FAILURE(status)) { +		return_ACPI_STATUS(status); +	} + +	mutex_desc->mutex.sync_level = method_desc->method.sync_level; +	method_desc->method.mutex = mutex_desc; +	return_ACPI_STATUS(AE_OK); +} + +/******************************************************************************* + *   * FUNCTION:    acpi_ds_begin_method_execution   *   * PARAMETERS:  method_node         - Node of the method   *              obj_desc            - The method object - *              calling_method_node - Caller of this method (if non-null) + *              walk_state          - current state, NULL if not yet executing + *                                    a method.   *   * RETURN:      Status   * @@ -128,9 +173,9 @@ acpi_ds_method_error(acpi_status status, struct acpi_walk_state *walk_state)   ******************************************************************************/  acpi_status -acpi_ds_begin_method_execution(struct acpi_namespace_node * method_node, -			       union acpi_operand_object * obj_desc, -			       struct acpi_namespace_node * calling_method_node) +acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node, +			       union acpi_operand_object *obj_desc, +			       struct acpi_walk_state *walk_state)  {  	acpi_status status = AE_OK; @@ -149,35 +194,80 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node * method_node,  	}  	/* -	 * If there is a concurrency limit on this method, we need to -	 * obtain a unit from the method semaphore. +	 * If this method is serialized, we need to acquire the method mutex.  	 */ -	if (obj_desc->method.semaphore) { +	if (obj_desc->method.method_flags & AML_METHOD_SERIALIZED) {  		/* -		 * Allow recursive method calls, up to the reentrancy/concurrency -		 * limit imposed by the SERIALIZED rule and the sync_level method -		 * parameter. -		 * -		 * The point of this code is to avoid permanently blocking a -		 * thread that is making recursive method calls. +		 * Create a mutex for the method if it is defined to be Serialized +		 * and a mutex has not already been created. We defer the mutex creation +		 * until a method is actually executed, to minimize the object count  		 */ -		if (method_node == calling_method_node) { -			if (obj_desc->method.thread_count >= -			    obj_desc->method.concurrency) { -				return_ACPI_STATUS(AE_AML_METHOD_LIMIT); +		if (!obj_desc->method.mutex) { +			status = acpi_ds_create_method_mutex(obj_desc); +			if (ACPI_FAILURE(status)) { +				return_ACPI_STATUS(status);  			}  		}  		/* -		 * Get a unit from the method semaphore. This releases the -		 * interpreter if we block (then reacquires it) +		 * The current_sync_level (per-thread) must be less than or equal to +		 * the sync level of the method. This mechanism provides some +		 * deadlock prevention +		 * +		 * Top-level method invocation has no walk state at this point  		 */ -		status = -		    acpi_ex_system_wait_semaphore(obj_desc->method.semaphore, -						  ACPI_WAIT_FOREVER); -		if (ACPI_FAILURE(status)) { -			return_ACPI_STATUS(status); +		if (walk_state && +		    (walk_state->thread->current_sync_level > +		     obj_desc->method.mutex->mutex.sync_level)) { +			ACPI_ERROR((AE_INFO, +				    "Cannot acquire Mutex for method [%4.4s], current SyncLevel is too large (%d)", +				    acpi_ut_get_node_name(method_node), +				    walk_state->thread->current_sync_level)); + +			return_ACPI_STATUS(AE_AML_MUTEX_ORDER);  		} + +		/* +		 * Obtain the method mutex if necessary. Do not acquire mutex for a +		 * recursive call. +		 */ +		if (!walk_state || +		    !obj_desc->method.mutex->mutex.owner_thread || +		    (walk_state->thread != +		     obj_desc->method.mutex->mutex.owner_thread)) { +			/* +			 * Acquire the method mutex. This releases the interpreter if we +			 * block (and reacquires it before it returns) +			 */ +			status = +			    acpi_ex_system_wait_mutex(obj_desc->method.mutex-> +						      mutex.os_mutex, +						      ACPI_WAIT_FOREVER); +			if (ACPI_FAILURE(status)) { +				return_ACPI_STATUS(status); +			} + +			/* Update the mutex and walk info and save the original sync_level */ + +			if (walk_state) { +				obj_desc->method.mutex->mutex. +				    original_sync_level = +				    walk_state->thread->current_sync_level; + +				obj_desc->method.mutex->mutex.owner_thread = +				    walk_state->thread; +				walk_state->thread->current_sync_level = +				    obj_desc->method.sync_level; +			} else { +				obj_desc->method.mutex->mutex. +				    original_sync_level = +				    obj_desc->method.mutex->mutex.sync_level; +			} +		} + +		/* Always increase acquisition depth */ + +		obj_desc->method.mutex->mutex.acquisition_depth++;  	}  	/* @@ -200,10 +290,10 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node * method_node,  	return_ACPI_STATUS(status);        cleanup: -	/* On error, must signal the method semaphore if present */ +	/* On error, must release the method mutex (if present) */ -	if (obj_desc->method.semaphore) { -		(void)acpi_os_signal_semaphore(obj_desc->method.semaphore, 1); +	if (obj_desc->method.mutex) { +		acpi_os_release_mutex(obj_desc->method.mutex->mutex.os_mutex);  	}  	return_ACPI_STATUS(status);  } @@ -253,10 +343,10 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,  		return_ACPI_STATUS(AE_NULL_OBJECT);  	} -	/* Init for new method, possibly wait on concurrency semaphore */ +	/* Init for new method, possibly wait on method mutex */  	status = acpi_ds_begin_method_execution(method_node, obj_desc, -						this_walk_state->method_node); +						this_walk_state);  	if (ACPI_FAILURE(status)) {  		return_ACPI_STATUS(status);  	} @@ -478,6 +568,8 @@ acpi_ds_restart_control_method(struct acpi_walk_state *walk_state,   *              created, delete all locals and arguments, and delete the parse   *              tree if requested.   * + * MUTEX:       Interpreter is locked + *   ******************************************************************************/  void @@ -503,26 +595,21 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,  	}  	/* -	 * Lock the parser while we terminate this method. -	 * If this is the last thread executing the method, -	 * we have additional cleanup to perform +	 * If method is serialized, release the mutex and restore the +	 * current sync level for this thread  	 */ -	status = acpi_ut_acquire_mutex(ACPI_MTX_CONTROL_METHOD); -	if (ACPI_FAILURE(status)) { -		return_VOID; -	} +	if (method_desc->method.mutex) { -	/* Signal completion of the execution of this method if necessary */ +		/* Acquisition Depth handles recursive calls */ -	if (method_desc->method.semaphore) { -		status = -		    acpi_os_signal_semaphore(method_desc->method.semaphore, 1); -		if (ACPI_FAILURE(status)) { - -			/* Ignore error and continue */ +		method_desc->method.mutex->mutex.acquisition_depth--; +		if (!method_desc->method.mutex->mutex.acquisition_depth) { +			walk_state->thread->current_sync_level = +			    method_desc->method.mutex->mutex. +			    original_sync_level; -			ACPI_EXCEPTION((AE_INFO, status, -					"Could not signal method semaphore")); +			acpi_os_release_mutex(method_desc->method.mutex->mutex. +					      os_mutex);  		}  	} @@ -537,7 +624,7 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,  		status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);  		if (ACPI_FAILURE(status)) { -			goto exit; +			return_VOID;  		}  		/* @@ -580,18 +667,16 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,  		/*  		 * Support to dynamically change a method from not_serialized to  		 * Serialized if it appears that the method is incorrectly written and -		 * does not support multiple thread execution.  The best example of this -		 * is if such a method creates namespace objects and blocks.  A second +		 * does not support multiple thread execution. The best example of this +		 * is if such a method creates namespace objects and blocks. A second  		 * thread will fail with an AE_ALREADY_EXISTS exception  		 *  		 * This code is here because we must wait until the last thread exits  		 * before creating the synchronization semaphore.  		 */ -		if ((method_desc->method.concurrency == 1) && -		    (!method_desc->method.semaphore)) { -			status = acpi_os_create_semaphore(1, 1, -							  &method_desc->method. -							  semaphore); +		if ((method_desc->method.method_flags & AML_METHOD_SERIALIZED) +		    && (!method_desc->method.mutex)) { +			status = acpi_ds_create_method_mutex(method_desc);  		}  		/* No more threads, we can free the owner_id */ @@ -599,144 +684,5 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,  		acpi_ut_release_owner_id(&method_desc->method.owner_id);  	} -      exit: -	(void)acpi_ut_release_mutex(ACPI_MTX_CONTROL_METHOD);  	return_VOID;  } - -#ifdef ACPI_INIT_PARSE_METHODS -	/* -	 * Note 11/2005: Removed this code to parse all methods during table -	 * load because it causes problems if there are any errors during the -	 * parse. Also, it seems like overkill and we probably don't want to -	 * abort a table load because of an issue with a single method. -	 */ - -/******************************************************************************* - * - * FUNCTION:    acpi_ds_parse_method - * - * PARAMETERS:  Node        - Method node - * - * RETURN:      Status - * - * DESCRIPTION: Parse the AML that is associated with the method. - * - * MUTEX:       Assumes parser is locked - * - ******************************************************************************/ - -acpi_status acpi_ds_parse_method(struct acpi_namespace_node *node) -{ -	acpi_status status; -	union acpi_operand_object *obj_desc; -	union acpi_parse_object *op; -	struct acpi_walk_state *walk_state; - -	ACPI_FUNCTION_TRACE_PTR(ds_parse_method, node); - -	/* Parameter Validation */ - -	if (!node) { -		return_ACPI_STATUS(AE_NULL_ENTRY); -	} - -	ACPI_DEBUG_PRINT((ACPI_DB_PARSE, -			  "**** Parsing [%4.4s] **** NamedObj=%p\n", -			  acpi_ut_get_node_name(node), node)); - -	/* Extract the method object from the method Node */ - -	obj_desc = acpi_ns_get_attached_object(node); -	if (!obj_desc) { -		return_ACPI_STATUS(AE_NULL_OBJECT); -	} - -	/* Create a mutex for the method if there is a concurrency limit */ - -	if ((obj_desc->method.concurrency != ACPI_INFINITE_CONCURRENCY) && -	    (!obj_desc->method.semaphore)) { -		status = acpi_os_create_semaphore(obj_desc->method.concurrency, -						  obj_desc->method.concurrency, -						  &obj_desc->method.semaphore); -		if (ACPI_FAILURE(status)) { -			return_ACPI_STATUS(status); -		} -	} - -	/* -	 * Allocate a new parser op to be the root of the parsed -	 * method tree -	 */ -	op = acpi_ps_alloc_op(AML_METHOD_OP); -	if (!op) { -		return_ACPI_STATUS(AE_NO_MEMORY); -	} - -	/* Init new op with the method name and pointer back to the Node */ - -	acpi_ps_set_name(op, node->name.integer); -	op->common.node = node; - -	/* -	 * Get a new owner_id for objects created by this method. Namespace -	 * objects (such as Operation Regions) can be created during the -	 * first pass parse. -	 */ -	status = acpi_ut_allocate_owner_id(&obj_desc->method.owner_id); -	if (ACPI_FAILURE(status)) { -		goto cleanup; -	} - -	/* Create and initialize a new walk state */ - -	walk_state = -	    acpi_ds_create_walk_state(obj_desc->method.owner_id, NULL, NULL, -				      NULL); -	if (!walk_state) { -		status = AE_NO_MEMORY; -		goto cleanup2; -	} - -	status = acpi_ds_init_aml_walk(walk_state, op, node, -				       obj_desc->method.aml_start, -				       obj_desc->method.aml_length, NULL, 1); -	if (ACPI_FAILURE(status)) { -		acpi_ds_delete_walk_state(walk_state); -		goto cleanup2; -	} - -	/* -	 * Parse the method, first pass -	 * -	 * The first pass load is where newly declared named objects are added into -	 * the namespace.  Actual evaluation of the named objects (what would be -	 * called a "second pass") happens during the actual execution of the -	 * method so that operands to the named objects can take on dynamic -	 * run-time values. -	 */ -	status = acpi_ps_parse_aml(walk_state); -	if (ACPI_FAILURE(status)) { -		goto cleanup2; -	} - -	ACPI_DEBUG_PRINT((ACPI_DB_PARSE, -			  "**** [%4.4s] Parsed **** NamedObj=%p Op=%p\n", -			  acpi_ut_get_node_name(node), node, op)); - -	/* -	 * Delete the parse tree. We simply re-parse the method for every -	 * execution since there isn't much overhead (compared to keeping lots -	 * of parse trees around) -	 */ -	acpi_ns_delete_namespace_subtree(node); -	acpi_ns_delete_namespace_by_owner(obj_desc->method.owner_id); - -      cleanup2: -	acpi_ut_release_owner_id(&obj_desc->method.owner_id); - -      cleanup: -	acpi_ps_delete_parse_tree(op); -	return_ACPI_STATUS(status); -} -#endif diff --git a/drivers/acpi/dispatcher/dswexec.c b/drivers/acpi/dispatcher/dswexec.c index 3acbd9145d72..b1ded62d0df1 100644 --- a/drivers/acpi/dispatcher/dswexec.c +++ b/drivers/acpi/dispatcher/dswexec.c @@ -472,7 +472,6 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)  			    acpi_ds_result_push(walk_state->result_obj,  						walk_state);  		} -  		break;  	default: @@ -510,6 +509,7 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)  				ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,  						  "Method Reference in a Package, Op=%p\n",  						  op)); +  				op->common.node =  				    (struct acpi_namespace_node *)op->asl.value.  				    arg->asl.node->object; @@ -670,7 +670,6 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)  				status = acpi_ds_result_stack_pop(walk_state);  			} -  			break;  		case AML_TYPE_UNDEFINED: @@ -708,7 +707,6 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)  	 * Check if we just completed the evaluation of a  	 * conditional predicate  	 */ -  	if ((ACPI_SUCCESS(status)) &&  	    (walk_state->control_state) &&  	    (walk_state->control_state->common.state == diff --git a/drivers/acpi/dispatcher/dswload.c b/drivers/acpi/dispatcher/dswload.c index 35074399c617..e3ca7f6539c1 100644 --- a/drivers/acpi/dispatcher/dswload.c +++ b/drivers/acpi/dispatcher/dswload.c @@ -175,7 +175,7 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,  		if (status == AE_NOT_FOUND) {  			/*  			 * Table disassembly: -			 * Target of Scope() not found.  Generate an External for it, and +			 * Target of Scope() not found. Generate an External for it, and  			 * insert the name into the namespace.  			 */  			acpi_dm_add_to_external_list(path, ACPI_TYPE_DEVICE, 0); @@ -210,16 +210,15 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,  		case ACPI_TYPE_BUFFER:  			/* -			 * These types we will allow, but we will change the type.  This +			 * These types we will allow, but we will change the type. This  			 * enables some existing code of the form:  			 *  			 *  Name (DEB, 0)  			 *  Scope (DEB) { ... }  			 * -			 * Note: silently change the type here.  On the second pass, we will report +			 * Note: silently change the type here. On the second pass, we will report  			 * a warning  			 */ -  			ACPI_DEBUG_PRINT((ACPI_DB_INFO,  					  "Type override - [%4.4s] had invalid type (%s) for Scope operator, changed to (Scope)\n",  					  path, @@ -242,7 +241,6 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,  		break;  	default: -  		/*  		 * For all other named opcodes, we will enter the name into  		 * the namespace. @@ -259,7 +257,6 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,  		 *       buffer_field, or Package), the name of the object is already  		 *       in the namespace.  		 */ -  		if (walk_state->deferred_node) {  			/* This name is already in the namespace, get the node */ @@ -293,8 +290,8 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,  		}  		/* -		 * Enter the named type into the internal namespace.  We enter the name -		 * as we go downward in the parse tree.  Any necessary subobjects that +		 * Enter the named type into the internal namespace. We enter the name +		 * as we go downward in the parse tree. Any necessary subobjects that  		 * involve arguments to the opcode must be created as we go back up the  		 * parse tree later.  		 */ @@ -327,12 +324,12 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,  							    (status);  						}  					} +  					status = AE_OK;  				}  			}  			if (ACPI_FAILURE(status)) { -  				ACPI_ERROR_NAMESPACE(path, status);  				return_ACPI_STATUS(status);  			} @@ -434,9 +431,13 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state)  			status =  			    acpi_ex_create_region(op->named.data,  						  op->named.length, -						  (acpi_adr_space_type) -						  ((op->common.value.arg)-> -						   common.value.integer), +						  (acpi_adr_space_type) ((op-> +									  common. +									  value. +									  arg)-> +									 common. +									 value. +									 integer),  						  walk_state);  			if (ACPI_FAILURE(status)) {  				return_ACPI_STATUS(status); @@ -474,7 +475,7 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state)  			 * method_op pkg_length name_string method_flags term_list  			 *  			 * Note: We must create the method node/object pair as soon as we -			 * see the method declaration.  This allows later pass1 parsing +			 * see the method declaration. This allows later pass1 parsing  			 * of invocations of the method (need to know the number of  			 * arguments.)  			 */ @@ -499,6 +500,7 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state)  								  length,  								  walk_state);  				} +  				walk_state->operands[0] = NULL;  				walk_state->num_operands = 0; @@ -570,7 +572,6 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,  #ifdef ACPI_ENABLE_MODULE_LEVEL_CODE  			if ((walk_state->op_info->class == AML_CLASS_EXECUTE) ||  			    (walk_state->op_info->class == AML_CLASS_CONTROL)) { -  				ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,  						  "Begin/EXEC: %s (fl %8.8X)\n",  						  walk_state->op_info->name, @@ -602,7 +603,7 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,  		} else {  			/* Get name from the op */ -			buffer_ptr = (char *)&op->named.name; +			buffer_ptr = ACPI_CAST_PTR(char, &op->named.name);  		}  	} else {  		/* Get the namestring from the raw AML */ @@ -629,7 +630,6 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,  		break;  	case AML_INT_NAMEPATH_OP: -  		/*  		 * The name_path is an object reference to an existing object.  		 * Don't enter the name into the namespace, but look it up @@ -642,7 +642,6 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,  		break;  	case AML_SCOPE_OP: -  		/*  		 * The Path is an object reference to an existing object.  		 * Don't enter the name into the namespace, but look it up @@ -664,6 +663,7 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,  #endif  			return_ACPI_STATUS(status);  		} +  		/*  		 * We must check to make sure that the target is  		 * one of the opcodes that actually opens a scope @@ -683,13 +683,12 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,  		case ACPI_TYPE_BUFFER:  			/* -			 * These types we will allow, but we will change the type.  This +			 * These types we will allow, but we will change the type. This  			 * enables some existing code of the form:  			 *  			 *  Name (DEB, 0)  			 *  Scope (DEB) { ... }  			 */ -  			ACPI_WARNING((AE_INFO,  				      "Type override - [%4.4s] had invalid type (%s) for Scope operator, changed to (Scope)",  				      buffer_ptr, @@ -729,14 +728,14 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,  				if (ACPI_FAILURE(status)) {  					return_ACPI_STATUS(status);  				} -  			} +  			return_ACPI_STATUS(AE_OK);  		}  		/* -		 * Enter the named type into the internal namespace.  We enter the name -		 * as we go downward in the parse tree.  Any necessary subobjects that +		 * Enter the named type into the internal namespace. We enter the name +		 * as we go downward in the parse tree. Any necessary subobjects that  		 * involve arguments to the opcode must be created as we go back up the  		 * parse tree later.  		 * @@ -787,7 +786,6 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,  	 * can get it again quickly when this scope is closed  	 */  	op->common.node = node; -  	return_ACPI_STATUS(status);  } @@ -922,7 +920,6 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)  #ifndef ACPI_NO_METHOD_EXECUTION  	case AML_TYPE_CREATE_FIELD: -  		/*  		 * Create the field object, but the field buffer and index must  		 * be evaluated later during the execution phase @@ -931,7 +928,6 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)  		break;  	case AML_TYPE_NAMED_FIELD: -  		/*  		 * If we are executing a method, initialize the field  		 */ @@ -1051,6 +1047,7 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)  			 * argument is the space_id. (We must save the address of the  			 * AML of the address and length operands)  			 */ +  			/*  			 * If we have a valid region, initialize it  			 * Namespace is NOT locked at this point. @@ -1080,7 +1077,7 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)  			 * method_op pkg_length name_string method_flags term_list  			 *  			 * Note: We must create the method node/object pair as soon as we -			 * see the method declaration.  This allows later pass1 parsing +			 * see the method declaration. This allows later pass1 parsing  			 * of invocations of the method (need to know the number of  			 * arguments.)  			 */ diff --git a/drivers/acpi/events/evgpe.c b/drivers/acpi/events/evgpe.c index f01d339407f8..c76c0583ca6a 100644 --- a/drivers/acpi/events/evgpe.c +++ b/drivers/acpi/events/evgpe.c @@ -382,7 +382,6 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)  	u32 status_reg;  	u32 enable_reg;  	acpi_cpu_flags flags; -	acpi_cpu_flags hw_flags;  	acpi_native_uint i;  	acpi_native_uint j; @@ -394,8 +393,11 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)  		return (int_status);  	} -	/* We need to hold the GPE lock now, hardware lock in the loop */ - +	/* +	 * We need to obtain the GPE lock for both the data structs and registers +	 * Note: Not necessary to obtain the hardware lock, since the GPE registers +	 * are owned by the gpe_lock. +	 */  	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);  	/* Examine all GPE blocks attached to this interrupt level */ @@ -413,8 +415,6 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)  			gpe_register_info = &gpe_block->register_info[i]; -			hw_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock); -  			/* Read the Status Register */  			status = @@ -423,8 +423,6 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)  						   &gpe_register_info->  						   status_address);  			if (ACPI_FAILURE(status)) { -				acpi_os_release_lock(acpi_gbl_hardware_lock, -						     hw_flags);  				goto unlock_and_exit;  			} @@ -435,8 +433,6 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)  						   &enable_reg,  						   &gpe_register_info->  						   enable_address); -			acpi_os_release_lock(acpi_gbl_hardware_lock, hw_flags); -  			if (ACPI_FAILURE(status)) {  				goto unlock_and_exit;  			} diff --git a/drivers/acpi/executer/exconfig.c b/drivers/acpi/executer/exconfig.c index 823352435e08..83fed079a276 100644 --- a/drivers/acpi/executer/exconfig.c +++ b/drivers/acpi/executer/exconfig.c @@ -266,6 +266,10 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state,  		}  	} +	ACPI_INFO((AE_INFO, +		   "Dynamic OEM Table Load - [%4.4s] OemId [%6.6s] OemTableId [%8.8s]", +		   table->signature, table->oem_id, table->oem_table_id)); +  	*return_desc = ddb_handle;  	return_ACPI_STATUS(status);  } @@ -446,6 +450,10 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,  		return_ACPI_STATUS(status);  	} +	ACPI_INFO((AE_INFO, +		   "Dynamic SSDT Load - OemId [%6.6s] OemTableId [%8.8s]", +		   table_ptr->oem_id, table_ptr->oem_table_id)); +        cleanup:  	if (ACPI_FAILURE(status)) {  		ACPI_FREE(table_ptr); diff --git a/drivers/acpi/executer/excreate.c b/drivers/acpi/executer/excreate.c index 106dc7219df7..34eec82c1b1e 100644 --- a/drivers/acpi/executer/excreate.c +++ b/drivers/acpi/executer/excreate.c @@ -177,7 +177,7 @@ acpi_status acpi_ex_create_event(struct acpi_walk_state *walk_state)  	 * that the event is created in an unsignalled state  	 */  	status = acpi_os_create_semaphore(ACPI_NO_UNIT_LIMIT, 0, -					  &obj_desc->event.semaphore); +					  &obj_desc->event.os_semaphore);  	if (ACPI_FAILURE(status)) {  		goto cleanup;  	} @@ -226,12 +226,9 @@ acpi_status acpi_ex_create_mutex(struct acpi_walk_state *walk_state)  		goto cleanup;  	} -	/* -	 * Create the actual OS semaphore. -	 * One unit max to make it a mutex, with one initial unit to allow -	 * the mutex to be acquired. -	 */ -	status = acpi_os_create_semaphore(1, 1, &obj_desc->mutex.semaphore); +	/* Create the actual OS Mutex */ + +	status = acpi_os_create_mutex(&obj_desc->mutex.os_mutex);  	if (ACPI_FAILURE(status)) {  		goto cleanup;  	} @@ -565,7 +562,7 @@ acpi_ex_create_method(u8 * aml_start,  	obj_desc->method.aml_length = aml_length;  	/* -	 * Disassemble the method flags.  Split off the Arg Count +	 * Disassemble the method flags. Split off the Arg Count  	 * for efficiency  	 */  	method_flags = (u8) operand[1]->integer.value; @@ -576,21 +573,19 @@ acpi_ex_create_method(u8 * aml_start,  	    (u8) (method_flags & AML_METHOD_ARG_COUNT);  	/* -	 * Get the concurrency count.  If required, a semaphore will be +	 * Get the sync_level. If method is serialized, a mutex will be  	 * created for this method when it is parsed.  	 */  	if (acpi_gbl_all_methods_serialized) { -		obj_desc->method.concurrency = 1; +		obj_desc->method.sync_level = 0;  		obj_desc->method.method_flags |= AML_METHOD_SERIALIZED;  	} else if (method_flags & AML_METHOD_SERIALIZED) {  		/* -		 * ACPI 1.0: Concurrency = 1 -		 * ACPI 2.0: Concurrency = (sync_level (in method declaration) + 1) +		 * ACPI 1.0: sync_level = 0 +		 * ACPI 2.0: sync_level = sync_level in method declaration  		 */ -		obj_desc->method.concurrency = (u8) -		    (((method_flags & AML_METHOD_SYNCH_LEVEL) >> 4) + 1); -	} else { -		obj_desc->method.concurrency = ACPI_INFINITE_CONCURRENCY; +		obj_desc->method.sync_level = (u8) +		    ((method_flags & AML_METHOD_SYNCH_LEVEL) >> 4);  	}  	/* Attach the new object to the method Node */ diff --git a/drivers/acpi/executer/exdump.c b/drivers/acpi/executer/exdump.c index 7b9718e976bf..2450943add33 100644 --- a/drivers/acpi/executer/exdump.c +++ b/drivers/acpi/executer/exdump.c @@ -118,14 +118,14 @@ static struct acpi_exdump_info acpi_ex_dump_device[4] = {  static struct acpi_exdump_info acpi_ex_dump_event[2] = {  	{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_event), NULL}, -	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(event.semaphore), "Semaphore"} +	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(event.os_semaphore), "OsSemaphore"}  };  static struct acpi_exdump_info acpi_ex_dump_method[8] = {  	{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_method), NULL},  	{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.param_count), "ParamCount"}, -	{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.concurrency), "Concurrency"}, -	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(method.semaphore), "Semaphore"}, +	{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.sync_level), "Sync Level"}, +	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(method.mutex), "Mutex"},  	{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.owner_id), "Owner Id"},  	{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.thread_count), "Thread Count"},  	{ACPI_EXD_UINT32, ACPI_EXD_OFFSET(method.aml_length), "Aml Length"}, @@ -138,7 +138,7 @@ static struct acpi_exdump_info acpi_ex_dump_mutex[5] = {  	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(mutex.owner_thread), "Owner Thread"},  	{ACPI_EXD_UINT16, ACPI_EXD_OFFSET(mutex.acquisition_depth),  	 "Acquire Depth"}, -	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(mutex.semaphore), "Semaphore"} +	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(mutex.os_mutex), "OsMutex"}  };  static struct acpi_exdump_info acpi_ex_dump_region[7] = { diff --git a/drivers/acpi/executer/exfldio.c b/drivers/acpi/executer/exfldio.c index 051053f7cccb..40f0bee6faa5 100644 --- a/drivers/acpi/executer/exfldio.c +++ b/drivers/acpi/executer/exfldio.c @@ -727,11 +727,23 @@ acpi_ex_extract_from_field(union acpi_operand_object *obj_desc,  			return_ACPI_STATUS(status);  		} -		/* Merge with previous datum if necessary */ - -		merged_datum |= raw_datum << -		    (obj_desc->common_field.access_bit_width - -		     obj_desc->common_field.start_field_bit_offset); +		/* +		 * Merge with previous datum if necessary. +		 * +		 * Note: Before the shift, check if the shift value will be larger than +		 * the integer size. If so, there is no need to perform the operation. +		 * This avoids the differences in behavior between different compilers +		 * concerning shift values larger than the target data width. +		 */ +		if ((obj_desc->common_field.access_bit_width - +		     obj_desc->common_field.start_field_bit_offset) < +		    ACPI_INTEGER_BIT_SIZE) { +			merged_datum |= +			    raw_datum << (obj_desc->common_field. +					  access_bit_width - +					  obj_desc->common_field. +					  start_field_bit_offset); +		}  		if (i == datum_count) {  			break; @@ -808,13 +820,23 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc,  		return_ACPI_STATUS(AE_BUFFER_OVERFLOW);  	} -	/* Compute the number of datums (access width data items) */ +	/* +	 * Create the bitmasks used for bit insertion. +	 * Note: This if/else is used to bypass compiler differences with the +	 * shift operator +	 */ +	if (obj_desc->common_field.access_bit_width == ACPI_INTEGER_BIT_SIZE) { +		width_mask = ACPI_INTEGER_MAX; +	} else { +		width_mask = +		    ACPI_MASK_BITS_ABOVE(obj_desc->common_field. +					 access_bit_width); +	} -	width_mask = -	    ACPI_MASK_BITS_ABOVE(obj_desc->common_field.access_bit_width); -	mask = -	    width_mask & ACPI_MASK_BITS_BELOW(obj_desc->common_field. -					      start_field_bit_offset); +	mask = width_mask & +	    ACPI_MASK_BITS_BELOW(obj_desc->common_field.start_field_bit_offset); + +	/* Compute the number of datums (access width data items) */  	datum_count = ACPI_ROUND_UP_TO(obj_desc->common_field.bit_length,  				       obj_desc->common_field.access_bit_width); @@ -848,12 +870,29 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc,  			return_ACPI_STATUS(status);  		} -		/* Start new output datum by merging with previous input datum */ -  		field_offset += obj_desc->common_field.access_byte_width; -		merged_datum = raw_datum >> -		    (obj_desc->common_field.access_bit_width - -		     obj_desc->common_field.start_field_bit_offset); + +		/* +		 * Start new output datum by merging with previous input datum +		 * if necessary. +		 * +		 * Note: Before the shift, check if the shift value will be larger than +		 * the integer size. If so, there is no need to perform the operation. +		 * This avoids the differences in behavior between different compilers +		 * concerning shift values larger than the target data width. +		 */ +		if ((obj_desc->common_field.access_bit_width - +		     obj_desc->common_field.start_field_bit_offset) < +		    ACPI_INTEGER_BIT_SIZE) { +			merged_datum = +			    raw_datum >> (obj_desc->common_field. +					  access_bit_width - +					  obj_desc->common_field. +					  start_field_bit_offset); +		} else { +			merged_datum = 0; +		} +  		mask = width_mask;  		if (i == datum_count) { diff --git a/drivers/acpi/executer/exmutex.c b/drivers/acpi/executer/exmutex.c index 93098d68cadf..d8ac2877cf05 100644 --- a/drivers/acpi/executer/exmutex.c +++ b/drivers/acpi/executer/exmutex.c @@ -161,12 +161,13 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,  	/*  	 * Current Sync must be less than or equal to the sync level of the -	 * mutex.  This mechanism provides some deadlock prevention +	 * mutex. This mechanism provides some deadlock prevention  	 */  	if (walk_state->thread->current_sync_level > obj_desc->mutex.sync_level) {  		ACPI_ERROR((AE_INFO, -			    "Cannot acquire Mutex [%4.4s], incorrect SyncLevel", -			    acpi_ut_get_node_name(obj_desc->mutex.node))); +			    "Cannot acquire Mutex [%4.4s], current SyncLevel is too large (%d)", +			    acpi_ut_get_node_name(obj_desc->mutex.node), +			    walk_state->thread->current_sync_level));  		return_ACPI_STATUS(AE_AML_MUTEX_ORDER);  	} @@ -178,8 +179,7 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,  		if ((obj_desc->mutex.owner_thread->thread_id ==  		     walk_state->thread->thread_id) || -		    (obj_desc->mutex.semaphore == -		     acpi_gbl_global_lock_semaphore)) { +		    (obj_desc->mutex.os_mutex == ACPI_GLOBAL_LOCK)) {  			/*  			 * The mutex is already owned by this thread,  			 * just increment the acquisition depth @@ -264,7 +264,7 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,  	 */  	if ((obj_desc->mutex.owner_thread->thread_id !=  	     walk_state->thread->thread_id) -	    && (obj_desc->mutex.semaphore != acpi_gbl_global_lock_semaphore)) { +	    && (obj_desc->mutex.os_mutex != ACPI_GLOBAL_LOCK)) {  		ACPI_ERROR((AE_INFO,  			    "Thread %X cannot release Mutex [%4.4s] acquired by thread %X",  			    walk_state->thread->thread_id, diff --git a/drivers/acpi/executer/exsystem.c b/drivers/acpi/executer/exsystem.c index 52beee3674a0..6b5d1e6ce94b 100644 --- a/drivers/acpi/executer/exsystem.c +++ b/drivers/acpi/executer/exsystem.c @@ -63,14 +63,14 @@ ACPI_MODULE_NAME("exsystem")   *              interpreter is released.   *   ******************************************************************************/ -acpi_status acpi_ex_system_wait_semaphore(acpi_handle semaphore, u16 timeout) +acpi_status acpi_ex_system_wait_semaphore(acpi_semaphore semaphore, u16 timeout)  {  	acpi_status status;  	acpi_status status2;  	ACPI_FUNCTION_TRACE(ex_system_wait_semaphore); -	status = acpi_os_wait_semaphore(semaphore, 1, 0); +	status = acpi_os_wait_semaphore(semaphore, 1, ACPI_DO_NOT_WAIT);  	if (ACPI_SUCCESS(status)) {  		return_ACPI_STATUS(status);  	} @@ -103,6 +103,59 @@ acpi_status acpi_ex_system_wait_semaphore(acpi_handle semaphore, u16 timeout)  /*******************************************************************************   * + * FUNCTION:    acpi_ex_system_wait_mutex + * + * PARAMETERS:  Mutex           - Mutex to wait on + *              Timeout         - Max time to wait + * + * RETURN:      Status + * + * DESCRIPTION: Implements a semaphore wait with a check to see if the + *              semaphore is available immediately.  If it is not, the + *              interpreter is released. + * + ******************************************************************************/ + +acpi_status acpi_ex_system_wait_mutex(acpi_mutex mutex, u16 timeout) +{ +	acpi_status status; +	acpi_status status2; + +	ACPI_FUNCTION_TRACE(ex_system_wait_mutex); + +	status = acpi_os_acquire_mutex(mutex, ACPI_DO_NOT_WAIT); +	if (ACPI_SUCCESS(status)) { +		return_ACPI_STATUS(status); +	} + +	if (status == AE_TIME) { + +		/* We must wait, so unlock the interpreter */ + +		acpi_ex_exit_interpreter(); + +		status = acpi_os_acquire_mutex(mutex, timeout); + +		ACPI_DEBUG_PRINT((ACPI_DB_EXEC, +				  "*** Thread awake after blocking, %s\n", +				  acpi_format_exception(status))); + +		/* Reacquire the interpreter */ + +		status2 = acpi_ex_enter_interpreter(); +		if (ACPI_FAILURE(status2)) { + +			/* Report fatal error, could not acquire interpreter */ + +			return_ACPI_STATUS(status2); +		} +	} + +	return_ACPI_STATUS(status); +} + +/******************************************************************************* + *   * FUNCTION:    acpi_ex_system_do_stall   *   * PARAMETERS:  how_long        - The amount of time to stall, @@ -176,7 +229,7 @@ acpi_status acpi_ex_system_do_suspend(acpi_integer how_long)   *   * FUNCTION:    acpi_ex_system_acquire_mutex   * - * PARAMETERS:  time_desc       - The 'time to delay' object descriptor + * PARAMETERS:  time_desc       - Maximum time to wait for the mutex   *              obj_desc        - The object descriptor for this op   *   * RETURN:      Status @@ -201,14 +254,14 @@ acpi_ex_system_acquire_mutex(union acpi_operand_object * time_desc,  	/* Support for the _GL_ Mutex object -- go get the global lock */ -	if (obj_desc->mutex.semaphore == acpi_gbl_global_lock_semaphore) { +	if (obj_desc->mutex.os_mutex == ACPI_GLOBAL_LOCK) {  		status =  		    acpi_ev_acquire_global_lock((u16) time_desc->integer.value);  		return_ACPI_STATUS(status);  	} -	status = acpi_ex_system_wait_semaphore(obj_desc->mutex.semaphore, -					       (u16) time_desc->integer.value); +	status = acpi_ex_system_wait_mutex(obj_desc->mutex.os_mutex, +					   (u16) time_desc->integer.value);  	return_ACPI_STATUS(status);  } @@ -239,13 +292,13 @@ acpi_status acpi_ex_system_release_mutex(union acpi_operand_object *obj_desc)  	/* Support for the _GL_ Mutex object -- release the global lock */ -	if (obj_desc->mutex.semaphore == acpi_gbl_global_lock_semaphore) { +	if (obj_desc->mutex.os_mutex == ACPI_GLOBAL_LOCK) {  		status = acpi_ev_release_global_lock();  		return_ACPI_STATUS(status);  	} -	status = acpi_os_signal_semaphore(obj_desc->mutex.semaphore, 1); -	return_ACPI_STATUS(status); +	acpi_os_release_mutex(obj_desc->mutex.os_mutex); +	return_ACPI_STATUS(AE_OK);  }  /******************************************************************************* @@ -268,7 +321,8 @@ acpi_status acpi_ex_system_signal_event(union acpi_operand_object *obj_desc)  	ACPI_FUNCTION_TRACE(ex_system_signal_event);  	if (obj_desc) { -		status = acpi_os_signal_semaphore(obj_desc->event.semaphore, 1); +		status = +		    acpi_os_signal_semaphore(obj_desc->event.os_semaphore, 1);  	}  	return_ACPI_STATUS(status); @@ -299,7 +353,7 @@ acpi_ex_system_wait_event(union acpi_operand_object *time_desc,  	if (obj_desc) {  		status = -		    acpi_ex_system_wait_semaphore(obj_desc->event.semaphore, +		    acpi_ex_system_wait_semaphore(obj_desc->event.os_semaphore,  						  (u16) time_desc->integer.  						  value);  	} @@ -322,7 +376,7 @@ acpi_ex_system_wait_event(union acpi_operand_object *time_desc,  acpi_status acpi_ex_system_reset_event(union acpi_operand_object *obj_desc)  {  	acpi_status status = AE_OK; -	void *temp_semaphore; +	acpi_semaphore temp_semaphore;  	ACPI_FUNCTION_ENTRY(); @@ -333,8 +387,8 @@ acpi_status acpi_ex_system_reset_event(union acpi_operand_object *obj_desc)  	status =  	    acpi_os_create_semaphore(ACPI_NO_UNIT_LIMIT, 0, &temp_semaphore);  	if (ACPI_SUCCESS(status)) { -		(void)acpi_os_delete_semaphore(obj_desc->event.semaphore); -		obj_desc->event.semaphore = temp_semaphore; +		(void)acpi_os_delete_semaphore(obj_desc->event.os_semaphore); +		obj_desc->event.os_semaphore = temp_semaphore;  	}  	return (status); diff --git a/drivers/acpi/hardware/hwregs.c b/drivers/acpi/hardware/hwregs.c index ae142de19507..3143f36fcec9 100644 --- a/drivers/acpi/hardware/hwregs.c +++ b/drivers/acpi/hardware/hwregs.c @@ -172,9 +172,9 @@ acpi_get_sleep_type_data(u8 sleep_state, u8 * sleep_type_a, u8 * sleep_type_b)  	}  	/* -	 * The package must have at least two elements.  NOTE (March 2005): This +	 * The package must have at least two elements. NOTE (March 2005): This  	 * goes against the current ACPI spec which defines this object as a -	 * package with one encoded DWORD element.  However, existing practice +	 * package with one encoded DWORD element. However, existing practice  	 * by BIOS vendors seems to be to have 2 or more elements, at least  	 * one per sleep type (A/B).  	 */ @@ -255,7 +255,7 @@ struct acpi_bit_register_info *acpi_hw_get_bit_register_info(u32 register_id)   *              return_value    - Value that was read from the register   *              Flags           - Lock the hardware or not   * - * RETURN:      Status and the value read from specified Register.  Value + * RETURN:      Status and the value read from specified Register. Value   *              returned is normalized to bit0 (is shifted all the way right)   *   * DESCRIPTION: ACPI bit_register read function. @@ -361,8 +361,8 @@ acpi_status acpi_set_register(u32 register_id, u32 value, u32 flags)  	case ACPI_REGISTER_PM1_STATUS:  		/* -		 * Status Registers are different from the rest.  Clear by -		 * writing 1, and writing 0 has no effect.  So, the only relevant +		 * Status Registers are different from the rest. Clear by +		 * writing 1, and writing 0 has no effect. So, the only relevant  		 * information is the single bit we're interested in, all others should  		 * be written as 0 so they will be left unchanged.  		 */ @@ -467,14 +467,13 @@ ACPI_EXPORT_SYMBOL(acpi_set_register)   *   * FUNCTION:    acpi_hw_register_read   * - * PARAMETERS:  use_lock            - Mutex hw access - *              register_id         - register_iD + Offset + * PARAMETERS:  use_lock            - Lock hardware? True/False + *              register_id         - ACPI Register ID   *              return_value        - Where the register value is returned   *   * RETURN:      Status and the value read.   * - * DESCRIPTION: Acpi register read function.  Registers are read at the - *              given offset. + * DESCRIPTION: Read from the specified ACPI register   *   ******************************************************************************/  acpi_status @@ -580,14 +579,26 @@ acpi_hw_register_read(u8 use_lock, u32 register_id, u32 * return_value)   *   * FUNCTION:    acpi_hw_register_write   * - * PARAMETERS:  use_lock            - Mutex hw access - *              register_id         - register_iD + Offset + * PARAMETERS:  use_lock            - Lock hardware? True/False + *              register_id         - ACPI Register ID   *              Value               - The value to write   *   * RETURN:      Status   * - * DESCRIPTION: Acpi register Write function.  Registers are written at the - *              given offset. + * DESCRIPTION: Write to the specified ACPI register + * + * NOTE: In accordance with the ACPI specification, this function automatically + * preserves the value of the following bits, meaning that these bits cannot be + * changed via this interface: + * + * PM1_CONTROL[0] = SCI_EN + * PM1_CONTROL[9] + * PM1_STATUS[11] + * + * ACPI References: + * 1) Hardware Ignored Bits: When software writes to a register with ignored + *      bit fields, it preserves the ignored bit fields + * 2) SCI_EN: OSPM always preserves this bit position   *   ******************************************************************************/ @@ -595,6 +606,7 @@ acpi_status acpi_hw_register_write(u8 use_lock, u32 register_id, u32 value)  {  	acpi_status status;  	acpi_cpu_flags lock_flags = 0; +	u32 read_value;  	ACPI_FUNCTION_TRACE(hw_register_write); @@ -605,6 +617,22 @@ acpi_status acpi_hw_register_write(u8 use_lock, u32 register_id, u32 value)  	switch (register_id) {  	case ACPI_REGISTER_PM1_STATUS:	/* 16-bit access */ +		/* Perform a read first to preserve certain bits (per ACPI spec) */ + +		status = acpi_hw_register_read(ACPI_MTX_DO_NOT_LOCK, +					       ACPI_REGISTER_PM1_STATUS, +					       &read_value); +		if (ACPI_FAILURE(status)) { +			goto unlock_and_exit; +		} + +		/* Insert the bits to be preserved */ + +		ACPI_INSERT_BITS(value, ACPI_PM1_STATUS_PRESERVED_BITS, +				 read_value); + +		/* Now we can write the data */ +  		status =  		    acpi_hw_low_level_write(16, value,  					    &acpi_gbl_FADT->xpm1a_evt_blk); @@ -635,6 +663,25 @@ acpi_status acpi_hw_register_write(u8 use_lock, u32 register_id, u32 value)  	case ACPI_REGISTER_PM1_CONTROL:	/* 16-bit access */ +		/* +		 * Perform a read first to preserve certain bits (per ACPI spec) +		 * +		 * Note: This includes SCI_EN, we never want to change this bit +		 */ +		status = acpi_hw_register_read(ACPI_MTX_DO_NOT_LOCK, +					       ACPI_REGISTER_PM1_CONTROL, +					       &read_value); +		if (ACPI_FAILURE(status)) { +			goto unlock_and_exit; +		} + +		/* Insert the bits to be preserved */ + +		ACPI_INSERT_BITS(value, ACPI_PM1_CONTROL_PRESERVED_BITS, +				 read_value); + +		/* Now we can write the data */ +  		status =  		    acpi_hw_low_level_write(16, value,  					    &acpi_gbl_FADT->xpm1a_cnt_blk); @@ -726,7 +773,7 @@ acpi_hw_low_level_read(u32 width, u32 * value, struct acpi_generic_address *reg)  		return (AE_OK);  	} -	/* Get a local copy of the address.  Handles possible alignment issues */ +	/* Get a local copy of the address. Handles possible alignment issues */  	ACPI_MOVE_64_TO_64(&address, ®->address);  	if (!address) { @@ -798,7 +845,7 @@ acpi_hw_low_level_write(u32 width, u32 value, struct acpi_generic_address * reg)  		return (AE_OK);  	} -	/* Get a local copy of the address.  Handles possible alignment issues */ +	/* Get a local copy of the address. Handles possible alignment issues */  	ACPI_MOVE_64_TO_64(&address, ®->address);  	if (!address) { diff --git a/drivers/acpi/namespace/nsaccess.c b/drivers/acpi/namespace/nsaccess.c index 48fadade52e2..c1c6c236df9a 100644 --- a/drivers/acpi/namespace/nsaccess.c +++ b/drivers/acpi/namespace/nsaccess.c @@ -196,33 +196,30 @@ acpi_status acpi_ns_root_initialize(void)  				    (u8) (ACPI_TO_INTEGER(val) - 1);  				if (ACPI_STRCMP(init_val->name, "_GL_") == 0) { -					/* -					 * Create a counting semaphore for the -					 * global lock -					 */ + +					/* Create a counting semaphore for the global lock */ +  					status =  					    acpi_os_create_semaphore  					    (ACPI_NO_UNIT_LIMIT, 1, -					     &obj_desc->mutex.semaphore); +					     &acpi_gbl_global_lock_semaphore);  					if (ACPI_FAILURE(status)) {  						acpi_ut_remove_reference  						    (obj_desc);  						goto unlock_and_exit;  					} -					/* -					 * We just created the mutex for the -					 * global lock, save it -					 */ -					acpi_gbl_global_lock_semaphore = -					    obj_desc->mutex.semaphore; +					/* Mark this mutex as very special */ + +					obj_desc->mutex.os_mutex = +					    ACPI_GLOBAL_LOCK;  				} else {  					/* Create a mutex */ -					status = acpi_os_create_semaphore(1, 1, -									  &obj_desc-> -									  mutex. -									  semaphore); +					status = +					    acpi_os_create_mutex(&obj_desc-> +								 mutex. +								 os_mutex);  					if (ACPI_FAILURE(status)) {  						acpi_ut_remove_reference  						    (obj_desc); diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index 8e46d1b39491..afd937b158b3 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c @@ -688,18 +688,9 @@ EXPORT_SYMBOL(acpi_os_wait_events_complete);  /*   * Allocate the memory for a spinlock and initialize it.   */ -acpi_status acpi_os_create_lock(acpi_handle * out_handle) +acpi_status acpi_os_create_lock(acpi_spinlock * handle)  { -	spinlock_t *lock_ptr; - - -	lock_ptr = acpi_os_allocate(sizeof(spinlock_t)); - -	spin_lock_init(lock_ptr); - -	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Creating spinlock[%p].\n", lock_ptr)); - -	*out_handle = lock_ptr; +	spin_lock_init(*handle);  	return AE_OK;  } @@ -707,13 +698,8 @@ acpi_status acpi_os_create_lock(acpi_handle * out_handle)  /*   * Deallocate the memory for a spinlock.   */ -void acpi_os_delete_lock(acpi_handle handle) +void acpi_os_delete_lock(acpi_spinlock handle)  { - -	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting spinlock[%p].\n", handle)); - -	acpi_os_free(handle); -  	return;  } @@ -1037,10 +1023,10 @@ EXPORT_SYMBOL(max_cstate);   * handle is a pointer to the spinlock_t.   */ -acpi_cpu_flags acpi_os_acquire_lock(acpi_handle handle) +acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp)  {  	acpi_cpu_flags flags; -	spin_lock_irqsave((spinlock_t *) handle, flags); +	spin_lock_irqsave(lockp, flags);  	return flags;  } @@ -1048,9 +1034,9 @@ acpi_cpu_flags acpi_os_acquire_lock(acpi_handle handle)   * Release a spinlock. See above.   */ -void acpi_os_release_lock(acpi_handle handle, acpi_cpu_flags flags) +void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags)  { -	spin_unlock_irqrestore((spinlock_t *) handle, flags); +	spin_unlock_irqrestore(lockp, flags);  }  #ifndef ACPI_USE_LOCAL_CACHE diff --git a/drivers/acpi/parser/psparse.c b/drivers/acpi/parser/psparse.c index 7ee2f2e77525..a02aa62fe1e5 100644 --- a/drivers/acpi/parser/psparse.c +++ b/drivers/acpi/parser/psparse.c @@ -469,6 +469,16 @@ acpi_status acpi_ps_parse_aml(struct acpi_walk_state *walk_state)  	}  	walk_state->thread = thread; + +	/* +	 * If executing a method, the starting sync_level is this method's +	 * sync_level +	 */ +	if (walk_state->method_desc) { +		walk_state->thread->current_sync_level = +		    walk_state->method_desc->method.sync_level; +	} +  	acpi_ds_push_walk_state(walk_state, thread);  	/* @@ -505,6 +515,10 @@ acpi_status acpi_ps_parse_aml(struct acpi_walk_state *walk_state)  			status =  			    acpi_ds_call_control_method(thread, walk_state,  							NULL); +			if (ACPI_FAILURE(status)) { +				status = +				    acpi_ds_method_error(status, walk_state); +			}  			/*  			 * If the transfer to the new method method call worked, a new walk @@ -525,7 +539,7 @@ acpi_status acpi_ps_parse_aml(struct acpi_walk_state *walk_state)  			/* Check for possible multi-thread reentrancy problem */  			if ((status == AE_ALREADY_EXISTS) && -			    (!walk_state->method_desc->method.semaphore)) { +			    (!walk_state->method_desc->method.mutex)) {  				/*  				 * Method tried to create an object twice. The probable cause is  				 * that the method cannot handle reentrancy. @@ -537,7 +551,7 @@ acpi_status acpi_ps_parse_aml(struct acpi_walk_state *walk_state)  				 */  				walk_state->method_desc->method.method_flags |=  				    AML_METHOD_SERIALIZED; -				walk_state->method_desc->method.concurrency = 1; +				walk_state->method_desc->method.sync_level = 0;  			}  		} diff --git a/drivers/acpi/utilities/utdelete.c b/drivers/acpi/utilities/utdelete.c index 67b9f325c6fa..38ebe1c54330 100644 --- a/drivers/acpi/utilities/utdelete.c +++ b/drivers/acpi/utilities/utdelete.c @@ -155,21 +155,30 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)  	case ACPI_TYPE_MUTEX:  		ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, -				  "***** Mutex %p, Semaphore %p\n", -				  object, object->mutex.semaphore)); +				  "***** Mutex %p, OS Mutex %p\n", +				  object, object->mutex.os_mutex)); -		acpi_ex_unlink_mutex(object); -		(void)acpi_os_delete_semaphore(object->mutex.semaphore); +		if (object->mutex.os_mutex != ACPI_GLOBAL_LOCK) { +			acpi_ex_unlink_mutex(object); +			acpi_os_delete_mutex(object->mutex.os_mutex); +		} else { +			/* Global Lock "mutex" is actually a counting semaphore */ + +			(void) +			    acpi_os_delete_semaphore +			    (acpi_gbl_global_lock_semaphore); +			acpi_gbl_global_lock_semaphore = NULL; +		}  		break;  	case ACPI_TYPE_EVENT:  		ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, -				  "***** Event %p, Semaphore %p\n", -				  object, object->event.semaphore)); +				  "***** Event %p, OS Semaphore %p\n", +				  object, object->event.os_semaphore)); -		(void)acpi_os_delete_semaphore(object->event.semaphore); -		object->event.semaphore = NULL; +		(void)acpi_os_delete_semaphore(object->event.os_semaphore); +		object->event.os_semaphore = NULL;  		break;  	case ACPI_TYPE_METHOD: @@ -177,12 +186,13 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)  		ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,  				  "***** Method %p\n", object)); -		/* Delete the method semaphore if it exists */ +		/* Delete the method mutex if it exists */ -		if (object->method.semaphore) { -			(void)acpi_os_delete_semaphore(object->method. -						       semaphore); -			object->method.semaphore = NULL; +		if (object->method.mutex) { +			acpi_os_delete_mutex(object->method.mutex->mutex. +					     os_mutex); +			acpi_ut_delete_object_desc(object->method.mutex); +			object->method.mutex = NULL;  		}  		break; diff --git a/drivers/acpi/utilities/utglobal.c b/drivers/acpi/utilities/utglobal.c index e5999c65c0b8..014030af8b50 100644 --- a/drivers/acpi/utilities/utglobal.c +++ b/drivers/acpi/utilities/utglobal.c @@ -794,6 +794,7 @@ void acpi_ut_init_globals(void)  	/* Global Lock support */ +	acpi_gbl_global_lock_semaphore = NULL;  	acpi_gbl_global_lock_acquired = FALSE;  	acpi_gbl_global_lock_thread_count = 0;  	acpi_gbl_global_lock_handle = 0; diff --git a/drivers/acpi/utilities/utmutex.c b/drivers/acpi/utilities/utmutex.c index 25eb34369afa..dfc8f30ca892 100644 --- a/drivers/acpi/utilities/utmutex.c +++ b/drivers/acpi/utilities/utmutex.c @@ -82,12 +82,9 @@ acpi_status acpi_ut_mutex_initialize(void)  	/* Create the spinlocks for use at interrupt level */ -	status = acpi_os_create_lock(&acpi_gbl_gpe_lock); -	if (ACPI_FAILURE(status)) { -		return_ACPI_STATUS(status); -	} +	spin_lock_init(acpi_gbl_gpe_lock); +	spin_lock_init(acpi_gbl_hardware_lock); -	status = acpi_os_create_lock(&acpi_gbl_hardware_lock);  	return_ACPI_STATUS(status);  } @@ -146,9 +143,8 @@ static acpi_status acpi_ut_create_mutex(acpi_mutex_handle mutex_id)  	}  	if (!acpi_gbl_mutex_info[mutex_id].mutex) { -		status = acpi_os_create_semaphore(1, 1, -						  &acpi_gbl_mutex_info -						  [mutex_id].mutex); +		status = +		    acpi_os_create_mutex(&acpi_gbl_mutex_info[mutex_id].mutex);  		acpi_gbl_mutex_info[mutex_id].thread_id =  		    ACPI_MUTEX_NOT_ACQUIRED;  		acpi_gbl_mutex_info[mutex_id].use_count = 0; @@ -171,7 +167,6 @@ static acpi_status acpi_ut_create_mutex(acpi_mutex_handle mutex_id)  static acpi_status acpi_ut_delete_mutex(acpi_mutex_handle mutex_id)  { -	acpi_status status;  	ACPI_FUNCTION_TRACE_U32(ut_delete_mutex, mutex_id); @@ -179,12 +174,12 @@ static acpi_status acpi_ut_delete_mutex(acpi_mutex_handle mutex_id)  		return_ACPI_STATUS(AE_BAD_PARAMETER);  	} -	status = acpi_os_delete_semaphore(acpi_gbl_mutex_info[mutex_id].mutex); +	acpi_os_delete_mutex(acpi_gbl_mutex_info[mutex_id].mutex);  	acpi_gbl_mutex_info[mutex_id].mutex = NULL;  	acpi_gbl_mutex_info[mutex_id].thread_id = ACPI_MUTEX_NOT_ACQUIRED; -	return_ACPI_STATUS(status); +	return_ACPI_STATUS(AE_OK);  }  /******************************************************************************* @@ -251,8 +246,8 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id)  			  "Thread %X attempting to acquire Mutex [%s]\n",  			  this_thread_id, acpi_ut_get_mutex_name(mutex_id))); -	status = acpi_os_wait_semaphore(acpi_gbl_mutex_info[mutex_id].mutex, -					1, ACPI_WAIT_FOREVER); +	status = acpi_os_acquire_mutex(acpi_gbl_mutex_info[mutex_id].mutex, +				       ACPI_WAIT_FOREVER);  	if (ACPI_SUCCESS(status)) {  		ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,  				  "Thread %X acquired Mutex [%s]\n", @@ -284,7 +279,6 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id)  acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id)  { -	acpi_status status;  	acpi_thread_id this_thread_id;  	ACPI_FUNCTION_NAME(ut_release_mutex); @@ -340,19 +334,6 @@ acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id)  	acpi_gbl_mutex_info[mutex_id].thread_id = ACPI_MUTEX_NOT_ACQUIRED; -	status = -	    acpi_os_signal_semaphore(acpi_gbl_mutex_info[mutex_id].mutex, 1); - -	if (ACPI_FAILURE(status)) { -		ACPI_EXCEPTION((AE_INFO, status, -				"Thread %X could not release Mutex [%X]", -				this_thread_id, mutex_id)); -	} else { -		ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, -				  "Thread %X released Mutex [%s]\n", -				  this_thread_id, -				  acpi_ut_get_mutex_name(mutex_id))); -	} - -	return (status); +	acpi_os_release_mutex(acpi_gbl_mutex_info[mutex_id].mutex); +	return (AE_OK);  } diff --git a/include/acpi/acconfig.h b/include/acpi/acconfig.h index b9beceb33141..b492857fe721 100644 --- a/include/acpi/acconfig.h +++ b/include/acpi/acconfig.h @@ -63,7 +63,7 @@  /* Current ACPICA subsystem version in YYYYMMDD format */ -#define ACPI_CA_VERSION                 0x20060608 +#define ACPI_CA_VERSION                 0x20060623  /*   * OS name, used for the _OS object.  The _OS object is essentially obsolete, diff --git a/include/acpi/acdispat.h b/include/acpi/acdispat.h index 288f84903af7..a22fe9cf8493 100644 --- a/include/acpi/acdispat.h +++ b/include/acpi/acdispat.h @@ -201,7 +201,7 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,  acpi_status  acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,  			       union acpi_operand_object *obj_desc, -			       struct acpi_namespace_node *calling_method_node); +			       struct acpi_walk_state *walk_state);  acpi_status  acpi_ds_method_error(acpi_status status, struct acpi_walk_state *walk_state); diff --git a/include/acpi/acglobal.h b/include/acpi/acglobal.h index 14531d48f6b6..06972e6637de 100644 --- a/include/acpi/acglobal.h +++ b/include/acpi/acglobal.h @@ -181,6 +181,12 @@ ACPI_EXTERN u8 acpi_gbl_integer_nybble_width;  extern struct acpi_table_list acpi_gbl_table_lists[ACPI_TABLE_ID_MAX + 1];  extern struct acpi_table_support acpi_gbl_table_data[ACPI_TABLE_ID_MAX + 1]; +/***************************************************************************** + * + * Mutual exlusion within ACPICA subsystem + * + ****************************************************************************/ +  /*   * Predefined mutex objects.  This array contains the   * actual OS mutex handles, indexed by the local ACPI_MUTEX_HANDLEs. @@ -188,6 +194,20 @@ extern struct acpi_table_support acpi_gbl_table_data[ACPI_TABLE_ID_MAX + 1];   */  ACPI_EXTERN struct acpi_mutex_info acpi_gbl_mutex_info[ACPI_NUM_MUTEX]; +/* + * Global lock semaphore works in conjunction with the actual HW global lock + */ +ACPI_EXTERN acpi_semaphore acpi_gbl_global_lock_semaphore; + +/* + * Spinlocks are used for interfaces that can be possibly called at + * interrupt level + */ +ACPI_EXTERN spinlock_t _acpi_gbl_gpe_lock;	/* For GPE data structs and registers */ +ACPI_EXTERN spinlock_t _acpi_gbl_hardware_lock;	/* For ACPI H/W except GPE registers */ +#define acpi_gbl_gpe_lock	&_acpi_gbl_gpe_lock +#define acpi_gbl_hardware_lock	&_acpi_gbl_hardware_lock +  /*****************************************************************************   *   * Miscellaneous globals @@ -217,7 +237,6 @@ ACPI_EXTERN struct acpi_object_notify_handler acpi_gbl_system_notify;  ACPI_EXTERN acpi_exception_handler acpi_gbl_exception_handler;  ACPI_EXTERN acpi_init_handler acpi_gbl_init_handler;  ACPI_EXTERN struct acpi_walk_state *acpi_gbl_breakpoint_walk; -ACPI_EXTERN acpi_handle acpi_gbl_global_lock_semaphore;  /* Misc */ @@ -315,11 +334,6 @@ ACPI_EXTERN struct acpi_gpe_xrupt_info *acpi_gbl_gpe_xrupt_list_head;  ACPI_EXTERN struct acpi_gpe_block_info      *acpi_gbl_gpe_fadt_blocks[ACPI_MAX_GPE_BLOCKS]; -/* Spinlocks */ - -ACPI_EXTERN acpi_handle acpi_gbl_gpe_lock; -ACPI_EXTERN acpi_handle acpi_gbl_hardware_lock; -  /*****************************************************************************   *   * Debugger globals diff --git a/include/acpi/acinterp.h b/include/acpi/acinterp.h index 9f22cfcb624b..216339a8f1f6 100644 --- a/include/acpi/acinterp.h +++ b/include/acpi/acinterp.h @@ -287,7 +287,10 @@ acpi_ex_system_wait_event(union acpi_operand_object *time,  acpi_status acpi_ex_system_reset_event(union acpi_operand_object *obj_desc); -acpi_status acpi_ex_system_wait_semaphore(acpi_handle semaphore, u16 timeout); +acpi_status +acpi_ex_system_wait_semaphore(acpi_semaphore semaphore, u16 timeout); + +acpi_status acpi_ex_system_wait_mutex(acpi_mutex mutex, u16 timeout);  /*   * exoparg1 - ACPI AML execution, 1 operand diff --git a/include/acpi/aclocal.h b/include/acpi/aclocal.h index 1eeca7adca95..56b802486161 100644 --- a/include/acpi/aclocal.h +++ b/include/acpi/aclocal.h @@ -47,10 +47,11 @@  /* acpisrc:struct_defs -- for acpisrc conversion */  #define ACPI_WAIT_FOREVER               0xFFFF	/* u16, as per ACPI spec */ -#define ACPI_INFINITE_CONCURRENCY       0xFF +#define ACPI_DO_NOT_WAIT                0 +#define ACPI_SERIALIZED                 0xFF -typedef void *acpi_mutex;  typedef u32 acpi_mutex_handle; +#define ACPI_GLOBAL_LOCK                (acpi_semaphore) (-1)  /* Total number of aml opcodes defined */ @@ -79,16 +80,15 @@ union acpi_parse_object;   * table below also!   */  #define ACPI_MTX_INTERPRETER            0	/* AML Interpreter, main lock */ -#define ACPI_MTX_CONTROL_METHOD         1	/* Control method termination [TBD: may no longer be necessary] */ -#define ACPI_MTX_TABLES                 2	/* Data for ACPI tables */ -#define ACPI_MTX_NAMESPACE              3	/* ACPI Namespace */ -#define ACPI_MTX_EVENTS                 4	/* Data for ACPI events */ -#define ACPI_MTX_CACHES                 5	/* Internal caches, general purposes */ -#define ACPI_MTX_MEMORY                 6	/* Debug memory tracking lists */ -#define ACPI_MTX_DEBUG_CMD_COMPLETE     7	/* AML debugger */ -#define ACPI_MTX_DEBUG_CMD_READY        8	/* AML debugger */ - -#define ACPI_MAX_MUTEX                  8 +#define ACPI_MTX_TABLES                 1	/* Data for ACPI tables */ +#define ACPI_MTX_NAMESPACE              2	/* ACPI Namespace */ +#define ACPI_MTX_EVENTS                 3	/* Data for ACPI events */ +#define ACPI_MTX_CACHES                 4	/* Internal caches, general purposes */ +#define ACPI_MTX_MEMORY                 5	/* Debug memory tracking lists */ +#define ACPI_MTX_DEBUG_CMD_COMPLETE     6	/* AML debugger */ +#define ACPI_MTX_DEBUG_CMD_READY        7	/* AML debugger */ + +#define ACPI_MAX_MUTEX                  7  #define ACPI_NUM_MUTEX                  ACPI_MAX_MUTEX+1  #if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER) @@ -98,14 +98,13 @@ union acpi_parse_object;  static char *acpi_gbl_mutex_names[ACPI_NUM_MUTEX] = {  	"ACPI_MTX_Interpreter", -	"ACPI_MTX_Method",  	"ACPI_MTX_Tables",  	"ACPI_MTX_Namespace",  	"ACPI_MTX_Events",  	"ACPI_MTX_Caches",  	"ACPI_MTX_Memory", -	"ACPI_MTX_DebugCmdComplete", -	"ACPI_MTX_DebugCmdReady" +	"ACPI_MTX_CommandComplete", +	"ACPI_MTX_CommandReady"  };  #endif @@ -705,6 +704,13 @@ struct acpi_bit_register_info {  };  /* + * Some ACPI registers have bits that must be ignored -- meaning that they + * must be preserved. + */ +#define ACPI_PM1_STATUS_PRESERVED_BITS          0x0800	/* Bit 11 */ +#define ACPI_PM1_CONTROL_PRESERVED_BITS         0x0201	/* Bit 9, Bit 0 (SCI_EN) */ + +/*   * Register IDs   * These are the full ACPI registers   */ diff --git a/include/acpi/acmacros.h b/include/acpi/acmacros.h index 38f9aa4bef00..4bb38068f40d 100644 --- a/include/acpi/acmacros.h +++ b/include/acpi/acmacros.h @@ -394,6 +394,8 @@  #define ACPI_REGISTER_PREPARE_BITS(val, pos, mask)      ((val << pos) & mask)  #define ACPI_REGISTER_INSERT_VALUE(reg, pos, mask, val)  reg = (reg & (~(mask))) | ACPI_REGISTER_PREPARE_BITS(val, pos, mask) +#define ACPI_INSERT_BITS(target, mask, source)          target = ((target & (~(mask))) | (source & mask)) +  /* Generate a UUID */  #define ACPI_INIT_UUID(a,b,c,d0,d1,d2,d3,d4,d5,d6,d7) \ diff --git a/include/acpi/acobject.h b/include/acpi/acobject.h index 1747d94084d8..8fdee31119f3 100644 --- a/include/acpi/acobject.h +++ b/include/acpi/acobject.h @@ -140,14 +140,14 @@ struct acpi_object_package {   *****************************************************************************/  struct acpi_object_event { -	ACPI_OBJECT_COMMON_HEADER void *semaphore; +	ACPI_OBJECT_COMMON_HEADER acpi_semaphore os_semaphore;	/* Actual OS synchronization object */  };  struct acpi_object_mutex {  	ACPI_OBJECT_COMMON_HEADER u8 sync_level;	/* 0-15, specified in Mutex() call */  	u16 acquisition_depth;	/* Allow multiple Acquires, same thread */  	struct acpi_thread_state *owner_thread;	/* Current owner of the mutex */ -	void *semaphore;	/* Actual OS synchronization object */ +	acpi_mutex os_mutex;	/* Actual OS synchronization object */  	union acpi_operand_object *prev;	/* Link for list of acquired mutexes */  	union acpi_operand_object *next;	/* Link for list of acquired mutexes */  	struct acpi_namespace_node *node;	/* Containing namespace node */ @@ -166,8 +166,8 @@ struct acpi_object_region {  struct acpi_object_method {  	ACPI_OBJECT_COMMON_HEADER u8 method_flags;  	u8 param_count; -	u8 concurrency; -	void *semaphore; +	u8 sync_level; +	union acpi_operand_object *mutex;  	u8 *aml_start;  	ACPI_INTERNAL_METHOD implementation;  	u32 aml_length; diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h index 8f473c83b7c4..89bc4a16c2e8 100644 --- a/include/acpi/acpiosxf.h +++ b/include/acpi/acpiosxf.h @@ -96,25 +96,47 @@ acpi_os_table_override(struct acpi_table_header *existing_table,  		       struct acpi_table_header **new_table);  /* - * Synchronization primitives + * Spinlock primitives + */ +acpi_status acpi_os_create_lock(acpi_spinlock * out_handle); + +void acpi_os_delete_lock(acpi_spinlock handle); + +acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock handle); + +void acpi_os_release_lock(acpi_spinlock handle, acpi_cpu_flags flags); + +/* + * Semaphore primitives   */  acpi_status  acpi_os_create_semaphore(u32 max_units, -			 u32 initial_units, acpi_handle * out_handle); +			 u32 initial_units, acpi_semaphore * out_handle); -acpi_status acpi_os_delete_semaphore(acpi_handle handle); +acpi_status acpi_os_delete_semaphore(acpi_semaphore handle); -acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout); +acpi_status +acpi_os_wait_semaphore(acpi_semaphore handle, u32 units, u16 timeout); + +acpi_status acpi_os_signal_semaphore(acpi_semaphore handle, u32 units); + +/* + * Mutex primitives + */ +acpi_status acpi_os_create_mutex(acpi_mutex * out_handle); -acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units); +void acpi_os_delete_mutex(acpi_mutex handle); -acpi_status acpi_os_create_lock(acpi_handle * out_handle); +acpi_status acpi_os_acquire_mutex(acpi_mutex handle, u16 timeout); -void acpi_os_delete_lock(acpi_handle handle); +void acpi_os_release_mutex(acpi_mutex handle); -acpi_cpu_flags acpi_os_acquire_lock(acpi_handle handle); +/* Temporary macros for Mutex* interfaces, map to existing semaphore xfaces */ -void acpi_os_release_lock(acpi_handle handle, acpi_cpu_flags flags); +#define acpi_os_create_mutex(out_handle)    acpi_os_create_semaphore (1, 1, out_handle) +#define acpi_os_delete_mutex(handle)        (void) acpi_os_delete_semaphore (handle) +#define acpi_os_acquire_mutex(handle,time)  acpi_os_wait_semaphore (handle, 1, time) +#define acpi_os_release_mutex(handle)       (void) acpi_os_signal_semaphore (handle, 1)  /*   * Memory allocation and mapping diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h index 77cf1236b05a..64b603cfe92e 100644 --- a/include/acpi/actypes.h +++ b/include/acpi/actypes.h @@ -241,7 +241,7 @@ typedef acpi_native_uint acpi_size;  /*******************************************************************************   * - * OS- or compiler-dependent types + * OS-dependent and compiler-dependent types   *   * If the defaults below are not appropriate for the host system, they can   * be defined in the compiler-specific or OS-specific header, and this will @@ -249,29 +249,36 @@ typedef acpi_native_uint acpi_size;   *   ******************************************************************************/ -/* Use C99 uintptr_t for pointer casting if available, "void *" otherwise */ +/* Value returned by acpi_os_get_thread_id */ -#ifndef acpi_uintptr_t -#define acpi_uintptr_t                  void * +#ifndef acpi_thread_id +#define acpi_thread_id                  acpi_native_uint  #endif -/* - * If acpi_cache_t was not defined in the OS-dependent header, - * define it now. This is typically the case where the local cache - * manager implementation is to be used (ACPI_USE_LOCAL_CACHE) - */ -#ifndef acpi_cache_t -#define acpi_cache_t                    struct acpi_memory_list +/* Object returned from acpi_os_create_lock */ + +#ifndef acpi_spinlock +#define acpi_spinlock                   void *  #endif -/* - * Allow the CPU flags word to be defined per-OS to simplify the use of the - * lock and unlock OSL interfaces. - */ +/* Flags for acpi_os_acquire_lock/acpi_os_release_lock */ +  #ifndef acpi_cpu_flags  #define acpi_cpu_flags                  acpi_native_uint  #endif +/* Object returned from acpi_os_create_cache */ + +#ifndef acpi_cache_t +#define acpi_cache_t                    struct acpi_memory_list +#endif + +/* Use C99 uintptr_t for pointer casting if available, "void *" otherwise */ + +#ifndef acpi_uintptr_t +#define acpi_uintptr_t                  void * +#endif +  /*   * ACPI_PRINTF_LIKE is used to tag functions as "printf-like" because   * some compilers can catch printf format string problems @@ -298,13 +305,6 @@ typedef acpi_native_uint acpi_size;  #define ACPI_EXPORT_SYMBOL(symbol)  #endif -/* - * thread_id is returned by acpi_os_get_thread_id. - */ -#ifndef acpi_thread_id -#define acpi_thread_id                  acpi_native_uint -#endif -  /*******************************************************************************   *   * Independent types @@ -380,6 +380,11 @@ struct uint32_struct {  	u32 hi;  }; +/* Synchronization objects */ + +#define acpi_mutex                      void * +#define acpi_semaphore                  void * +  /*   * Acpi integer width. In ACPI version 1, integers are   * 32 bits.  In ACPI version 2, integers are 64 bits. diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h index 277d35bced03..3f853cabbd41 100644 --- a/include/acpi/platform/aclinux.h +++ b/include/acpi/platform/aclinux.h @@ -58,11 +58,13 @@  #include <asm/div64.h>  #include <asm/acpi.h>  #include <linux/slab.h> +#include <linux/spinlock_types.h>  /* Host-dependent types and defines */  #define ACPI_MACHINE_WIDTH          BITS_PER_LONG  #define acpi_cache_t                        kmem_cache_t +#define acpi_spinlock                   spinlock_t *  #define ACPI_EXPORT_SYMBOL(symbol)  EXPORT_SYMBOL(symbol);  #define strtoul                     simple_strtoul  | 

