diff options
author | Daniel Walker <dwalker@mvista.com> | 2008-02-06 01:37:39 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2008-02-06 10:41:07 -0800 |
commit | 66656ebb5bf3f94aaeca1fbd369672bba980babf (patch) | |
tree | 1d49dac859aa6c401a42bc98e433f0649b23cac1 /Documentation/DocBook/kernel-locking.tmpl | |
parent | ec5b1157f8e819c72fc93aa6d2d5117c08cdc961 (diff) | |
download | blackbird-obmc-linux-66656ebb5bf3f94aaeca1fbd369672bba980babf.tar.gz blackbird-obmc-linux-66656ebb5bf3f94aaeca1fbd369672bba980babf.zip |
docs: kernel-locking: Convert semaphore references
I converted some of the document to reflect mutex usage instead of
semaphore usage. Since we shouldin't be promoting semaphore usage when
it's on it's way out..
Signed-off-by: Daniel Walker <dwalker@mvista.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'Documentation/DocBook/kernel-locking.tmpl')
-rw-r--r-- | Documentation/DocBook/kernel-locking.tmpl | 32 |
1 files changed, 16 insertions, 16 deletions
diff --git a/Documentation/DocBook/kernel-locking.tmpl b/Documentation/DocBook/kernel-locking.tmpl index 01825ee7db64..2e9d6b41f034 100644 --- a/Documentation/DocBook/kernel-locking.tmpl +++ b/Documentation/DocBook/kernel-locking.tmpl @@ -717,7 +717,7 @@ used, and when it gets full, throws out the least used one. <para> For our first example, we assume that all operations are in user context (ie. from system calls), so we can sleep. This means we can -use a semaphore to protect the cache and all the objects within +use a mutex to protect the cache and all the objects within it. Here's the code: </para> @@ -725,7 +725,7 @@ it. Here's the code: #include <linux/list.h> #include <linux/slab.h> #include <linux/string.h> -#include <asm/semaphore.h> +#include <linux/mutex.h> #include <asm/errno.h> struct object @@ -737,7 +737,7 @@ struct object }; /* Protects the cache, cache_num, and the objects within it */ -static DECLARE_MUTEX(cache_lock); +static DEFINE_MUTEX(cache_lock); static LIST_HEAD(cache); static unsigned int cache_num = 0; #define MAX_CACHE_SIZE 10 @@ -789,17 +789,17 @@ int cache_add(int id, const char *name) obj->id = id; obj->popularity = 0; - down(&cache_lock); + mutex_lock(&cache_lock); __cache_add(obj); - up(&cache_lock); + mutex_unlock(&cache_lock); return 0; } void cache_delete(int id) { - down(&cache_lock); + mutex_lock(&cache_lock); __cache_delete(__cache_find(id)); - up(&cache_lock); + mutex_unlock(&cache_lock); } int cache_find(int id, char *name) @@ -807,13 +807,13 @@ int cache_find(int id, char *name) struct object *obj; int ret = -ENOENT; - down(&cache_lock); + mutex_lock(&cache_lock); obj = __cache_find(id); if (obj) { ret = 0; strcpy(name, obj->name); } - up(&cache_lock); + mutex_unlock(&cache_lock); return ret; } </programlisting> @@ -853,7 +853,7 @@ The change is shown below, in standard patch format: the int popularity; }; --static DECLARE_MUTEX(cache_lock); +-static DEFINE_MUTEX(cache_lock); +static spinlock_t cache_lock = SPIN_LOCK_UNLOCKED; static LIST_HEAD(cache); static unsigned int cache_num = 0; @@ -870,22 +870,22 @@ The change is shown below, in standard patch format: the obj->id = id; obj->popularity = 0; -- down(&cache_lock); +- mutex_lock(&cache_lock); + spin_lock_irqsave(&cache_lock, flags); __cache_add(obj); -- up(&cache_lock); +- mutex_unlock(&cache_lock); + spin_unlock_irqrestore(&cache_lock, flags); return 0; } void cache_delete(int id) { -- down(&cache_lock); +- mutex_lock(&cache_lock); + unsigned long flags; + + spin_lock_irqsave(&cache_lock, flags); __cache_delete(__cache_find(id)); -- up(&cache_lock); +- mutex_unlock(&cache_lock); + spin_unlock_irqrestore(&cache_lock, flags); } @@ -895,14 +895,14 @@ The change is shown below, in standard patch format: the int ret = -ENOENT; + unsigned long flags; -- down(&cache_lock); +- mutex_lock(&cache_lock); + spin_lock_irqsave(&cache_lock, flags); obj = __cache_find(id); if (obj) { ret = 0; strcpy(name, obj->name); } -- up(&cache_lock); +- mutex_unlock(&cache_lock); + spin_unlock_irqrestore(&cache_lock, flags); return ret; } |