From a5516438959d90b071ff0a484ce4f3f523dc3152 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Wed, 23 Jul 2008 21:27:41 -0700 Subject: hugetlb: modular state for hugetlb page size The goal of this patchset is to support multiple hugetlb page sizes. This is achieved by introducing a new struct hstate structure, which encapsulates the important hugetlb state and constants (eg. huge page size, number of huge pages currently allocated, etc). The hstate structure is then passed around the code which requires these fields, they will do the right thing regardless of the exact hstate they are operating on. This patch adds the hstate structure, with a single global instance of it (default_hstate), and does the basic work of converting hugetlb to use the hstate. Future patches will add more hstate structures to allow for different hugetlbfs mounts to have different page sizes. [akpm@linux-foundation.org: coding-style fixes] Acked-by: Adam Litke Acked-by: Nishanth Aravamudan Signed-off-by: Andi Kleen Signed-off-by: Nick Piggin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- ipc/shm.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'ipc') diff --git a/ipc/shm.c b/ipc/shm.c index 790240cd067f..a726aebce7d7 100644 --- a/ipc/shm.c +++ b/ipc/shm.c @@ -577,7 +577,8 @@ static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss, if (is_file_hugepages(shp->shm_file)) { struct address_space *mapping = inode->i_mapping; - *rss += (HPAGE_SIZE/PAGE_SIZE)*mapping->nrpages; + struct hstate *h = hstate_file(shp->shm_file); + *rss += pages_per_huge_page(h) * mapping->nrpages; } else { struct shmem_inode_info *info = SHMEM_I(inode); spin_lock(&info->lock); -- cgit v1.2.1 From 983bfb7db303cfde56ae5bbf4e0f2f46e38c9576 Mon Sep 17 00:00:00 2001 From: Nadia Derbey Date: Fri, 25 Jul 2008 01:48:03 -0700 Subject: ipc: call idr_find() without locking in ipc_lock() Call idr_find() locklessly from ipc_lock(), since the idr tree is now RCU protected. Signed-off-by: Nadia Derbey Acked-by: "Paul E. McKenney" Cc: Manfred Spraul Cc: Jim Houston Cc: Pierre Peiffer Acked-by: Rik van Riel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- ipc/util.c | 9 --------- 1 file changed, 9 deletions(-) (limited to 'ipc') diff --git a/ipc/util.c b/ipc/util.c index 3339177b336c..0f468c34e83c 100644 --- a/ipc/util.c +++ b/ipc/util.c @@ -688,10 +688,6 @@ void ipc64_perm_to_ipc_perm (struct ipc64_perm *in, struct ipc_perm *out) * Look for an id in the ipc ids idr and lock the associated ipc object. * * The ipc object is locked on exit. - * - * This is the routine that should be called when the rw_mutex is not already - * held, i.e. idr tree not protected: it protects the idr tree in read mode - * during the idr_find(). */ struct kern_ipc_perm *ipc_lock(struct ipc_ids *ids, int id) @@ -699,18 +695,13 @@ struct kern_ipc_perm *ipc_lock(struct ipc_ids *ids, int id) struct kern_ipc_perm *out; int lid = ipcid_to_idx(id); - down_read(&ids->rw_mutex); - rcu_read_lock(); out = idr_find(&ids->ipcs_idr, lid); if (out == NULL) { rcu_read_unlock(); - up_read(&ids->rw_mutex); return ERR_PTR(-EINVAL); } - up_read(&ids->rw_mutex); - spin_lock(&out->lock); /* ipc_rmid() may have already freed the ID while ipc_lock -- cgit v1.2.1 From 00c2bf85d8febfcfddde63822043462b026134ff Mon Sep 17 00:00:00 2001 From: Nadia Derbey Date: Fri, 25 Jul 2008 01:48:03 -0700 Subject: ipc: get rid of ipc_lock_down() Remove the ipc_lock_down() routines: they used to call idr_find() locklessly (given that the ipc ids lock was already held), so they are not needed anymore. Signed-off-by: Nadia Derbey Acked-by: "Paul E. McKenney" Cc: Manfred Spraul Cc: Jim Houston Cc: Pierre Peiffer Acked-by: Rik van Riel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- ipc/shm.c | 21 +++------------------ ipc/util.c | 52 +--------------------------------------------------- ipc/util.h | 6 ------ 3 files changed, 4 insertions(+), 75 deletions(-) (limited to 'ipc') diff --git a/ipc/shm.c b/ipc/shm.c index a726aebce7d7..e77ec698cf40 100644 --- a/ipc/shm.c +++ b/ipc/shm.c @@ -111,24 +111,9 @@ void __init shm_init (void) IPC_SHM_IDS, sysvipc_shm_proc_show); } -/* - * shm_lock_(check_)down routines are called in the paths where the rw_mutex - * is held to protect access to the idr tree. - */ -static inline struct shmid_kernel *shm_lock_down(struct ipc_namespace *ns, - int id) -{ - struct kern_ipc_perm *ipcp = ipc_lock_down(&shm_ids(ns), id); - - if (IS_ERR(ipcp)) - return (struct shmid_kernel *)ipcp; - - return container_of(ipcp, struct shmid_kernel, shm_perm); -} - /* * shm_lock_(check_) routines are called in the paths where the rw_mutex - * is not held. + * is not necessarily held. */ static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id) { @@ -211,7 +196,7 @@ static void shm_close(struct vm_area_struct *vma) down_write(&shm_ids(ns).rw_mutex); /* remove from the list of attaches of the shm segment */ - shp = shm_lock_down(ns, sfd->id); + shp = shm_lock(ns, sfd->id); BUG_ON(IS_ERR(shp)); shp->shm_lprid = task_tgid_vnr(current); shp->shm_dtim = get_seconds(); @@ -932,7 +917,7 @@ invalid: out_nattch: down_write(&shm_ids(ns).rw_mutex); - shp = shm_lock_down(ns, shmid); + shp = shm_lock(ns, shmid); BUG_ON(IS_ERR(shp)); shp->shm_nattch--; if(shp->shm_nattch == 0 && diff --git a/ipc/util.c b/ipc/util.c index 0f468c34e83c..49b3ea615dc5 100644 --- a/ipc/util.c +++ b/ipc/util.c @@ -716,56 +716,6 @@ struct kern_ipc_perm *ipc_lock(struct ipc_ids *ids, int id) return out; } -/** - * ipc_lock_down - Lock an ipc structure with rw_sem held - * @ids: IPC identifier set - * @id: ipc id to look for - * - * Look for an id in the ipc ids idr and lock the associated ipc object. - * - * The ipc object is locked on exit. - * - * This is the routine that should be called when the rw_mutex is already - * held, i.e. idr tree protected. - */ - -struct kern_ipc_perm *ipc_lock_down(struct ipc_ids *ids, int id) -{ - struct kern_ipc_perm *out; - int lid = ipcid_to_idx(id); - - rcu_read_lock(); - out = idr_find(&ids->ipcs_idr, lid); - if (out == NULL) { - rcu_read_unlock(); - return ERR_PTR(-EINVAL); - } - - spin_lock(&out->lock); - - /* - * No need to verify that the structure is still valid since the - * rw_mutex is held. - */ - return out; -} - -struct kern_ipc_perm *ipc_lock_check_down(struct ipc_ids *ids, int id) -{ - struct kern_ipc_perm *out; - - out = ipc_lock_down(ids, id); - if (IS_ERR(out)) - return out; - - if (ipc_checkid(out, id)) { - ipc_unlock(out); - return ERR_PTR(-EIDRM); - } - - return out; -} - struct kern_ipc_perm *ipc_lock_check(struct ipc_ids *ids, int id) { struct kern_ipc_perm *out; @@ -837,7 +787,7 @@ struct kern_ipc_perm *ipcctl_pre_down(struct ipc_ids *ids, int id, int cmd, int err; down_write(&ids->rw_mutex); - ipcp = ipc_lock_check_down(ids, id); + ipcp = ipc_lock_check(ids, id); if (IS_ERR(ipcp)) { err = PTR_ERR(ipcp); goto out_up; diff --git a/ipc/util.h b/ipc/util.h index cdb966aebe07..3646b45a03c9 100644 --- a/ipc/util.h +++ b/ipc/util.h @@ -102,11 +102,6 @@ void* ipc_rcu_alloc(int size); void ipc_rcu_getref(void *ptr); void ipc_rcu_putref(void *ptr); -/* - * ipc_lock_down: called with rw_mutex held - * ipc_lock: called without that lock held - */ -struct kern_ipc_perm *ipc_lock_down(struct ipc_ids *, int); struct kern_ipc_perm *ipc_lock(struct ipc_ids *, int); void kernel_to_ipc64_perm(struct kern_ipc_perm *in, struct ipc64_perm *out); @@ -155,7 +150,6 @@ static inline void ipc_unlock(struct kern_ipc_perm *perm) rcu_read_unlock(); } -struct kern_ipc_perm *ipc_lock_check_down(struct ipc_ids *ids, int id); struct kern_ipc_perm *ipc_lock_check(struct ipc_ids *ids, int id); int ipcget(struct ipc_namespace *ns, struct ipc_ids *ids, struct ipc_ops *ops, struct ipc_params *params); -- cgit v1.2.1 From 4daa28f6d8f5cda8ea0f55048e3c8811c384cbdd Mon Sep 17 00:00:00 2001 From: Manfred Spraul Date: Fri, 25 Jul 2008 01:48:04 -0700 Subject: ipc/sem.c: convert undo structures to struct list_head The undo structures contain two linked lists, the attached patch replaces them with generic struct list_head lists. [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Manfred Spraul Cc: Nadia Derbey Cc: Pierre Peiffer Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- ipc/sem.c | 163 ++++++++++++++++++++++++++++++++++---------------------------- 1 file changed, 89 insertions(+), 74 deletions(-) (limited to 'ipc') diff --git a/ipc/sem.c b/ipc/sem.c index e9418df5ff3e..4f26c7157356 100644 --- a/ipc/sem.c +++ b/ipc/sem.c @@ -274,7 +274,7 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params) sma->sem_base = (struct sem *) &sma[1]; /* sma->sem_pending = NULL; */ sma->sem_pending_last = &sma->sem_pending; - /* sma->undo = NULL; */ + INIT_LIST_HEAD(&sma->list_id); sma->sem_nsems = nsems; sma->sem_ctime = get_seconds(); sem_unlock(sma); @@ -536,7 +536,8 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) * (They will be freed without any further action in exit_sem() * or during the next semop.) */ - for (un = sma->undo; un; un = un->id_next) + assert_spin_locked(&sma->sem_perm.lock); + list_for_each_entry(un, &sma->list_id, list_id) un->semid = -1; /* Wake up all pending processes and let them fail with EIDRM. */ @@ -763,9 +764,12 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, for (i = 0; i < nsems; i++) sma->sem_base[i].semval = sem_io[i]; - for (un = sma->undo; un; un = un->id_next) + + assert_spin_locked(&sma->sem_perm.lock); + list_for_each_entry(un, &sma->list_id, list_id) { for (i = 0; i < nsems; i++) un->semadj[i] = 0; + } sma->sem_ctime = get_seconds(); /* maybe some queued-up processes were waiting for this */ update_queue(sma); @@ -797,12 +801,15 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, { int val = arg.val; struct sem_undo *un; + err = -ERANGE; if (val > SEMVMX || val < 0) goto out_unlock; - for (un = sma->undo; un; un = un->id_next) + assert_spin_locked(&sma->sem_perm.lock); + list_for_each_entry(un, &sma->list_id, list_id) un->semadj[semnum] = 0; + curr->semval = val; curr->sempid = task_tgid_vnr(current); sma->sem_ctime = get_seconds(); @@ -952,6 +959,8 @@ static inline int get_undo_list(struct sem_undo_list **undo_listp) return -ENOMEM; spin_lock_init(&undo_list->lock); atomic_set(&undo_list->refcnt, 1); + INIT_LIST_HEAD(&undo_list->list_proc); + current->sysvsem.undo_list = undo_list; } *undo_listp = undo_list; @@ -960,25 +969,30 @@ static inline int get_undo_list(struct sem_undo_list **undo_listp) static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid) { - struct sem_undo **last, *un; - - last = &ulp->proc_list; - un = *last; - while(un != NULL) { - if(un->semid==semid) - break; - if(un->semid==-1) { - *last=un->proc_next; - kfree(un); - } else { - last=&un->proc_next; + struct sem_undo *walk, *tmp; + + assert_spin_locked(&ulp->lock); + list_for_each_entry_safe(walk, tmp, &ulp->list_proc, list_proc) { + if (walk->semid == semid) + return walk; + if (walk->semid == -1) { + list_del(&walk->list_proc); + kfree(walk); } - un=*last; } - return un; + return NULL; } -static struct sem_undo *find_undo(struct ipc_namespace *ns, int semid) +/** + * find_alloc_undo - Lookup (and if not present create) undo array + * @ns: namespace + * @semid: semaphore array id + * + * The function looks up (and if not present creates) the undo structure. + * The size of the undo structure depends on the size of the semaphore + * array, thus the alloc path is not that straightforward. + */ +static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid) { struct sem_array *sma; struct sem_undo_list *ulp; @@ -997,6 +1011,7 @@ static struct sem_undo *find_undo(struct ipc_namespace *ns, int semid) goto out; /* no undo structure around - allocate one. */ + /* step 1: figure out the size of the semaphore array */ sma = sem_lock_check(ns, semid); if (IS_ERR(sma)) return ERR_PTR(PTR_ERR(sma)); @@ -1004,15 +1019,19 @@ static struct sem_undo *find_undo(struct ipc_namespace *ns, int semid) nsems = sma->sem_nsems; sem_getref_and_unlock(sma); + /* step 2: allocate new undo structure */ new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL); if (!new) { sem_putref(sma); return ERR_PTR(-ENOMEM); } - new->semadj = (short *) &new[1]; - new->semid = semid; + /* step 3: Acquire the lock on the undo list pointer */ spin_lock(&ulp->lock); + + /* step 4: check for races: someone else allocated the undo struct, + * semaphore array was destroyed. + */ un = lookup_undo(ulp, semid); if (un) { spin_unlock(&ulp->lock); @@ -1028,13 +1047,17 @@ static struct sem_undo *find_undo(struct ipc_namespace *ns, int semid) un = ERR_PTR(-EIDRM); goto out; } - new->proc_next = ulp->proc_list; - ulp->proc_list = new; - new->id_next = sma->undo; - sma->undo = new; + /* step 5: initialize & link new undo structure */ + new->semadj = (short *) &new[1]; + new->semid = semid; + assert_spin_locked(&ulp->lock); + list_add(&new->list_proc, &ulp->list_proc); + assert_spin_locked(&sma->sem_perm.lock); + list_add(&new->list_id, &sma->list_id); + sem_unlock(sma); - un = new; spin_unlock(&ulp->lock); + un = new; out: return un; } @@ -1090,9 +1113,8 @@ asmlinkage long sys_semtimedop(int semid, struct sembuf __user *tsops, alter = 1; } -retry_undos: if (undos) { - un = find_undo(ns, semid); + un = find_alloc_undo(ns, semid); if (IS_ERR(un)) { error = PTR_ERR(un); goto out_free; @@ -1107,14 +1129,14 @@ retry_undos: } /* - * semid identifiers are not unique - find_undo may have + * semid identifiers are not unique - find_alloc_undo may have * allocated an undo structure, it was invalidated by an RMID - * and now a new array with received the same id. Check and retry. + * and now a new array with received the same id. Check and fail. */ - if (un && un->semid == -1) { - sem_unlock(sma); - goto retry_undos; - } + error = -EIDRM; + if (un && un->semid == -1) + goto out_unlock_free; + error = -EFBIG; if (max >= sma->sem_nsems) goto out_unlock_free; @@ -1243,56 +1265,44 @@ int copy_semundo(unsigned long clone_flags, struct task_struct *tsk) */ void exit_sem(struct task_struct *tsk) { - struct sem_undo_list *undo_list; - struct sem_undo *u, **up; - struct ipc_namespace *ns; + struct sem_undo_list *ulp; + struct sem_undo *un, *tmp; - undo_list = tsk->sysvsem.undo_list; - if (!undo_list) + ulp = tsk->sysvsem.undo_list; + if (!ulp) return; tsk->sysvsem.undo_list = NULL; - if (!atomic_dec_and_test(&undo_list->refcnt)) + if (!atomic_dec_and_test(&ulp->refcnt)) return; - ns = tsk->nsproxy->ipc_ns; - /* There's no need to hold the semundo list lock, as current - * is the last task exiting for this undo list. - */ - for (up = &undo_list->proc_list; (u = *up); *up = u->proc_next, kfree(u)) { + spin_lock(&ulp->lock); + + list_for_each_entry_safe(un, tmp, &ulp->list_proc, list_proc) { struct sem_array *sma; - int nsems, i; - struct sem_undo *un, **unp; - int semid; - - semid = u->semid; - - if(semid == -1) - continue; - sma = sem_lock(ns, semid); + int i; + + if (un->semid == -1) + goto free; + + sma = sem_lock(tsk->nsproxy->ipc_ns, un->semid); if (IS_ERR(sma)) - continue; + goto free; - if (u->semid == -1) - goto next_entry; + if (un->semid == -1) + goto unlock_free; - BUG_ON(sem_checkid(sma, u->semid)); + BUG_ON(sem_checkid(sma, un->semid)); - /* remove u from the sma->undo list */ - for (unp = &sma->undo; (un = *unp); unp = &un->id_next) { - if (u == un) - goto found; - } - printk ("exit_sem undo list error id=%d\n", u->semid); - goto next_entry; -found: - *unp = un->id_next; - /* perform adjustments registered in u */ - nsems = sma->sem_nsems; - for (i = 0; i < nsems; i++) { + /* remove un from sma->list_id */ + assert_spin_locked(&sma->sem_perm.lock); + list_del(&un->list_id); + + /* perform adjustments registered in un */ + for (i = 0; i < sma->sem_nsems; i++) { struct sem * semaphore = &sma->sem_base[i]; - if (u->semadj[i]) { - semaphore->semval += u->semadj[i]; + if (un->semadj[i]) { + semaphore->semval += un->semadj[i]; /* * Range checks of the new semaphore value, * not defined by sus: @@ -1316,10 +1326,15 @@ found: sma->sem_otime = get_seconds(); /* maybe some queued-up processes were waiting for this */ update_queue(sma); -next_entry: +unlock_free: sem_unlock(sma); +free: + assert_spin_locked(&ulp->lock); + list_del(&un->list_proc); + kfree(un); } - kfree(undo_list); + spin_unlock(&ulp->lock); + kfree(ulp); } #ifdef CONFIG_PROC_FS -- cgit v1.2.1 From 2c0c29d414087f3b021059673c20a7088f5f1fff Mon Sep 17 00:00:00 2001 From: Manfred Spraul Date: Fri, 25 Jul 2008 01:48:05 -0700 Subject: ipc/sem.c: remove unused entries from struct sem_queue sem_queue.sma and sem_queue.id were never used, the attached patch removes them. Signed-off-by: Manfred Spraul Reviewed-by: Nadia Derbey Cc: Pierre Peiffer Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- ipc/sem.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'ipc') diff --git a/ipc/sem.c b/ipc/sem.c index 4f26c7157356..d5ce4000ca17 100644 --- a/ipc/sem.c +++ b/ipc/sem.c @@ -1160,12 +1160,10 @@ asmlinkage long sys_semtimedop(int semid, struct sembuf __user *tsops, * task into the pending queue and go to sleep. */ - queue.sma = sma; queue.sops = sops; queue.nsops = nsops; queue.undo = un; queue.pid = task_tgid_vnr(current); - queue.id = semid; queue.alter = alter; if (alter) append_to_queue(sma ,&queue); -- cgit v1.2.1 From a1193f8ec091cd8fd309cc2982abe4499f6f2b4d Mon Sep 17 00:00:00 2001 From: Manfred Spraul Date: Fri, 25 Jul 2008 01:48:06 -0700 Subject: ipc/sem.c: convert sem_array.sem_pending to struct list_head sem_array.sem_pending is a double linked list, the attached patch converts it to struct list_head. [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Manfred Spraul Reviewed-by: Nadia Derbey Cc: Pierre Peiffer Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- ipc/sem.c | 92 ++++++++++++++++++++++++--------------------------------------- 1 file changed, 35 insertions(+), 57 deletions(-) (limited to 'ipc') diff --git a/ipc/sem.c b/ipc/sem.c index d5ce4000ca17..3ca232736b31 100644 --- a/ipc/sem.c +++ b/ipc/sem.c @@ -272,8 +272,7 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params) ns->used_sems += nsems; sma->sem_base = (struct sem *) &sma[1]; - /* sma->sem_pending = NULL; */ - sma->sem_pending_last = &sma->sem_pending; + INIT_LIST_HEAD(&sma->sem_pending); INIT_LIST_HEAD(&sma->list_id); sma->sem_nsems = nsems; sma->sem_ctime = get_seconds(); @@ -331,38 +330,6 @@ asmlinkage long sys_semget(key_t key, int nsems, int semflg) return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params); } -/* Manage the doubly linked list sma->sem_pending as a FIFO: - * insert new queue elements at the tail sma->sem_pending_last. - */ -static inline void append_to_queue (struct sem_array * sma, - struct sem_queue * q) -{ - *(q->prev = sma->sem_pending_last) = q; - *(sma->sem_pending_last = &q->next) = NULL; -} - -static inline void prepend_to_queue (struct sem_array * sma, - struct sem_queue * q) -{ - q->next = sma->sem_pending; - *(q->prev = &sma->sem_pending) = q; - if (q->next) - q->next->prev = &q->next; - else /* sma->sem_pending_last == &sma->sem_pending */ - sma->sem_pending_last = &q->next; -} - -static inline void remove_from_queue (struct sem_array * sma, - struct sem_queue * q) -{ - *(q->prev) = q->next; - if (q->next) - q->next->prev = q->prev; - else /* sma->sem_pending_last == &q->next */ - sma->sem_pending_last = q->prev; - q->prev = NULL; /* mark as removed */ -} - /* * Determine whether a sequence of semaphore operations would succeed * all at once. Return 0 if yes, 1 if need to sleep, else return error code. @@ -438,16 +405,15 @@ static void update_queue (struct sem_array * sma) int error; struct sem_queue * q; - q = sma->sem_pending; - while(q) { + q = list_entry(sma->sem_pending.next, struct sem_queue, list); + while (&q->list != &sma->sem_pending) { error = try_atomic_semop(sma, q->sops, q->nsops, q->undo, q->pid); /* Does q->sleeper still need to sleep? */ if (error <= 0) { struct sem_queue *n; - remove_from_queue(sma,q); - q->status = IN_WAKEUP; + /* * Continue scanning. The next operation * that must be checked depends on the type of the @@ -458,11 +424,26 @@ static void update_queue (struct sem_array * sma) * for semaphore values to become 0. * - if the operation didn't modify the array, * then just continue. + * The order of list_del() and reading ->next + * is crucial: In the former case, the list_del() + * must be done first [because we might be the + * first entry in ->sem_pending], in the latter + * case the list_del() must be done last + * [because the list is invalid after the list_del()] */ - if (q->alter) - n = sma->sem_pending; - else - n = q->next; + if (q->alter) { + list_del(&q->list); + n = list_entry(sma->sem_pending.next, + struct sem_queue, list); + } else { + n = list_entry(q->list.next, struct sem_queue, + list); + list_del(&q->list); + } + + /* wake up the waiting thread */ + q->status = IN_WAKEUP; + wake_up_process(q->sleeper); /* hands-off: q will disappear immediately after * writing q->status. @@ -471,7 +452,7 @@ static void update_queue (struct sem_array * sma) q->status = error; q = n; } else { - q = q->next; + q = list_entry(q->list.next, struct sem_queue, list); } } } @@ -491,7 +472,7 @@ static int count_semncnt (struct sem_array * sma, ushort semnum) struct sem_queue * q; semncnt = 0; - for (q = sma->sem_pending; q; q = q->next) { + list_for_each_entry(q, &sma->sem_pending, list) { struct sembuf * sops = q->sops; int nsops = q->nsops; int i; @@ -503,13 +484,14 @@ static int count_semncnt (struct sem_array * sma, ushort semnum) } return semncnt; } + static int count_semzcnt (struct sem_array * sma, ushort semnum) { int semzcnt; struct sem_queue * q; semzcnt = 0; - for (q = sma->sem_pending; q; q = q->next) { + list_for_each_entry(q, &sma->sem_pending, list) { struct sembuf * sops = q->sops; int nsops = q->nsops; int i; @@ -529,7 +511,7 @@ static int count_semzcnt (struct sem_array * sma, ushort semnum) static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) { struct sem_undo *un; - struct sem_queue *q; + struct sem_queue *q, *t; struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm); /* Invalidate the existing undo structures for this semaphore set. @@ -541,17 +523,14 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) un->semid = -1; /* Wake up all pending processes and let them fail with EIDRM. */ - q = sma->sem_pending; - while(q) { - struct sem_queue *n; - /* lazy remove_from_queue: we are killing the whole queue */ - q->prev = NULL; - n = q->next; + + list_for_each_entry_safe(q, t, &sma->sem_pending, list) { + list_del(&q->list); + q->status = IN_WAKEUP; wake_up_process(q->sleeper); /* doesn't sleep */ smp_wmb(); q->status = -EIDRM; /* hands-off q */ - q = n; } /* Remove the semaphore set from the IDR */ @@ -1166,9 +1145,9 @@ asmlinkage long sys_semtimedop(int semid, struct sembuf __user *tsops, queue.pid = task_tgid_vnr(current); queue.alter = alter; if (alter) - append_to_queue(sma ,&queue); + list_add_tail(&queue.list, &sma->sem_pending); else - prepend_to_queue(sma ,&queue); + list_add(&queue.list, &sma->sem_pending); queue.status = -EINTR; queue.sleeper = current; @@ -1194,7 +1173,6 @@ asmlinkage long sys_semtimedop(int semid, struct sembuf __user *tsops, sma = sem_lock(ns, semid); if (IS_ERR(sma)) { - BUG_ON(queue.prev != NULL); error = -EIDRM; goto out_free; } @@ -1212,7 +1190,7 @@ asmlinkage long sys_semtimedop(int semid, struct sembuf __user *tsops, */ if (timeout && jiffies_left == 0) error = -EAGAIN; - remove_from_queue(sma,&queue); + list_del(&queue.list); goto out_unlock_free; out_unlock_free: -- cgit v1.2.1 From 380af1b33b3ff92df5cda96329b58f5d1b6b5a53 Mon Sep 17 00:00:00 2001 From: Manfred Spraul Date: Fri, 25 Jul 2008 01:48:06 -0700 Subject: ipc/sem.c: rewrite undo list locking The attached patch: - reverses the locking order of ulp->lock and sem_lock: Previously, it was first ulp->lock, then inside sem_lock. Now it's the other way around. - converts the undo structure to rcu. Benefits: - With the old locking order, IPC_RMID could not kfree the undo structures. The stale entries remained in the linked lists and were released later. - The patch fixes a a race in semtimedop(): if both IPC_RMID and a semget() that recreates exactly the same id happen between find_alloc_undo() and sem_lock, then semtimedop() would access already kfree'd memory. [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Manfred Spraul Reviewed-by: Nadia Derbey Cc: Pierre Peiffer Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- ipc/sem.c | 147 +++++++++++++++++++++++++++++++++++++++----------------------- 1 file changed, 93 insertions(+), 54 deletions(-) (limited to 'ipc') diff --git a/ipc/sem.c b/ipc/sem.c index 3ca232736b31..bf1bc36cb7ee 100644 --- a/ipc/sem.c +++ b/ipc/sem.c @@ -504,27 +504,35 @@ static int count_semzcnt (struct sem_array * sma, ushort semnum) return semzcnt; } +void free_un(struct rcu_head *head) +{ + struct sem_undo *un = container_of(head, struct sem_undo, rcu); + kfree(un); +} + /* Free a semaphore set. freeary() is called with sem_ids.rw_mutex locked * as a writer and the spinlock for this semaphore set hold. sem_ids.rw_mutex * remains locked on exit. */ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) { - struct sem_undo *un; - struct sem_queue *q, *t; + struct sem_undo *un, *tu; + struct sem_queue *q, *tq; struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm); - /* Invalidate the existing undo structures for this semaphore set. - * (They will be freed without any further action in exit_sem() - * or during the next semop.) - */ + /* Free the existing undo structures for this semaphore set. */ assert_spin_locked(&sma->sem_perm.lock); - list_for_each_entry(un, &sma->list_id, list_id) + list_for_each_entry_safe(un, tu, &sma->list_id, list_id) { + list_del(&un->list_id); + spin_lock(&un->ulp->lock); un->semid = -1; + list_del_rcu(&un->list_proc); + spin_unlock(&un->ulp->lock); + call_rcu(&un->rcu, free_un); + } /* Wake up all pending processes and let them fail with EIDRM. */ - - list_for_each_entry_safe(q, t, &sma->sem_pending, list) { + list_for_each_entry_safe(q, tq, &sma->sem_pending, list) { list_del(&q->list); q->status = IN_WAKEUP; @@ -948,16 +956,11 @@ static inline int get_undo_list(struct sem_undo_list **undo_listp) static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid) { - struct sem_undo *walk, *tmp; + struct sem_undo *walk; - assert_spin_locked(&ulp->lock); - list_for_each_entry_safe(walk, tmp, &ulp->list_proc, list_proc) { + list_for_each_entry_rcu(walk, &ulp->list_proc, list_proc) { if (walk->semid == semid) return walk; - if (walk->semid == -1) { - list_del(&walk->list_proc); - kfree(walk); - } } return NULL; } @@ -970,6 +973,8 @@ static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid) * The function looks up (and if not present creates) the undo structure. * The size of the undo structure depends on the size of the semaphore * array, thus the alloc path is not that straightforward. + * Lifetime-rules: sem_undo is rcu-protected, on success, the function + * performs a rcu_read_lock(). */ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid) { @@ -983,11 +988,13 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid) if (error) return ERR_PTR(error); + rcu_read_lock(); spin_lock(&ulp->lock); un = lookup_undo(ulp, semid); spin_unlock(&ulp->lock); if (likely(un!=NULL)) goto out; + rcu_read_unlock(); /* no undo structure around - allocate one. */ /* step 1: figure out the size of the semaphore array */ @@ -1005,38 +1012,38 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid) return ERR_PTR(-ENOMEM); } - /* step 3: Acquire the lock on the undo list pointer */ - spin_lock(&ulp->lock); - - /* step 4: check for races: someone else allocated the undo struct, - * semaphore array was destroyed. - */ - un = lookup_undo(ulp, semid); - if (un) { - spin_unlock(&ulp->lock); - kfree(new); - sem_putref(sma); - goto out; - } + /* step 3: Acquire the lock on semaphore array */ sem_lock_and_putref(sma); if (sma->sem_perm.deleted) { sem_unlock(sma); - spin_unlock(&ulp->lock); kfree(new); un = ERR_PTR(-EIDRM); goto out; } + spin_lock(&ulp->lock); + + /* + * step 4: check for races: did someone else allocate the undo struct? + */ + un = lookup_undo(ulp, semid); + if (un) { + kfree(new); + goto success; + } /* step 5: initialize & link new undo structure */ new->semadj = (short *) &new[1]; + new->ulp = ulp; new->semid = semid; assert_spin_locked(&ulp->lock); - list_add(&new->list_proc, &ulp->list_proc); + list_add_rcu(&new->list_proc, &ulp->list_proc); assert_spin_locked(&sma->sem_perm.lock); list_add(&new->list_id, &sma->list_id); + un = new; - sem_unlock(sma); +success: spin_unlock(&ulp->lock); - un = new; + rcu_read_lock(); + sem_unlock(sma); out: return un; } @@ -1103,6 +1110,8 @@ asmlinkage long sys_semtimedop(int semid, struct sembuf __user *tsops, sma = sem_lock_check(ns, semid); if (IS_ERR(sma)) { + if (un) + rcu_read_unlock(); error = PTR_ERR(sma); goto out_free; } @@ -1111,10 +1120,26 @@ asmlinkage long sys_semtimedop(int semid, struct sembuf __user *tsops, * semid identifiers are not unique - find_alloc_undo may have * allocated an undo structure, it was invalidated by an RMID * and now a new array with received the same id. Check and fail. + * This case can be detected checking un->semid. The existance of + * "un" itself is guaranteed by rcu. */ error = -EIDRM; - if (un && un->semid == -1) - goto out_unlock_free; + if (un) { + if (un->semid == -1) { + rcu_read_unlock(); + goto out_unlock_free; + } else { + /* + * rcu lock can be released, "un" cannot disappear: + * - sem_lock is acquired, thus IPC_RMID is + * impossible. + * - exit_sem is impossible, it always operates on + * current (or a dead task). + */ + + rcu_read_unlock(); + } + } error = -EFBIG; if (max >= sma->sem_nsems) @@ -1242,7 +1267,6 @@ int copy_semundo(unsigned long clone_flags, struct task_struct *tsk) void exit_sem(struct task_struct *tsk) { struct sem_undo_list *ulp; - struct sem_undo *un, *tmp; ulp = tsk->sysvsem.undo_list; if (!ulp) @@ -1252,28 +1276,47 @@ void exit_sem(struct task_struct *tsk) if (!atomic_dec_and_test(&ulp->refcnt)) return; - spin_lock(&ulp->lock); - - list_for_each_entry_safe(un, tmp, &ulp->list_proc, list_proc) { + for (;;) { struct sem_array *sma; + struct sem_undo *un; + int semid; int i; - if (un->semid == -1) - goto free; + rcu_read_lock(); + un = list_entry(rcu_dereference(ulp->list_proc.next), + struct sem_undo, list_proc); + if (&un->list_proc == &ulp->list_proc) + semid = -1; + else + semid = un->semid; + rcu_read_unlock(); - sma = sem_lock(tsk->nsproxy->ipc_ns, un->semid); - if (IS_ERR(sma)) - goto free; + if (semid == -1) + break; - if (un->semid == -1) - goto unlock_free; + sma = sem_lock_check(tsk->nsproxy->ipc_ns, un->semid); - BUG_ON(sem_checkid(sma, un->semid)); + /* exit_sem raced with IPC_RMID, nothing to do */ + if (IS_ERR(sma)) + continue; - /* remove un from sma->list_id */ + un = lookup_undo(ulp, semid); + if (un == NULL) { + /* exit_sem raced with IPC_RMID+semget() that created + * exactly the same semid. Nothing to do. + */ + sem_unlock(sma); + continue; + } + + /* remove un from the linked lists */ assert_spin_locked(&sma->sem_perm.lock); list_del(&un->list_id); + spin_lock(&ulp->lock); + list_del_rcu(&un->list_proc); + spin_unlock(&ulp->lock); + /* perform adjustments registered in un */ for (i = 0; i < sma->sem_nsems; i++) { struct sem * semaphore = &sma->sem_base[i]; @@ -1302,14 +1345,10 @@ void exit_sem(struct task_struct *tsk) sma->sem_otime = get_seconds(); /* maybe some queued-up processes were waiting for this */ update_queue(sma); -unlock_free: sem_unlock(sma); -free: - assert_spin_locked(&ulp->lock); - list_del(&un->list_proc); - kfree(un); + + call_rcu(&un->rcu, free_un); } - spin_unlock(&ulp->lock); kfree(ulp); } -- cgit v1.2.1 From f1a43f93f0f3bab418800eaccb9e2e3b5427e173 Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Fri, 25 Jul 2008 01:48:07 -0700 Subject: ipc: use simple_read_from_buffer() Also this patch kills unneccesary trailing NULL character. Signed-off-by: Akinobu Mita Cc: Nadia Derbey Cc: Manfred Spraul Cc: Pierre Peiffer Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- ipc/mqueue.c | 25 +++++++------------------ 1 file changed, 7 insertions(+), 18 deletions(-) (limited to 'ipc') diff --git a/ipc/mqueue.c b/ipc/mqueue.c index 3e84b958186b..1fdc2eb2f6d8 100644 --- a/ipc/mqueue.c +++ b/ipc/mqueue.c @@ -314,15 +314,11 @@ static int mqueue_unlink(struct inode *dir, struct dentry *dentry) * through std routines) */ static ssize_t mqueue_read_file(struct file *filp, char __user *u_data, - size_t count, loff_t * off) + size_t count, loff_t *off) { struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode); char buffer[FILENT_SIZE]; - size_t slen; - loff_t o; - - if (!count) - return 0; + ssize_t ret; spin_lock(&info->lock); snprintf(buffer, sizeof(buffer), @@ -335,21 +331,14 @@ static ssize_t mqueue_read_file(struct file *filp, char __user *u_data, pid_vnr(info->notify_owner)); spin_unlock(&info->lock); buffer[sizeof(buffer)-1] = '\0'; - slen = strlen(buffer)+1; - - o = *off; - if (o > slen) - return 0; - - if (o + count > slen) - count = slen - o; - if (copy_to_user(u_data, buffer + o, count)) - return -EFAULT; + ret = simple_read_from_buffer(u_data, count, off, buffer, + strlen(buffer)); + if (ret <= 0) + return ret; - *off = o + count; filp->f_path.dentry->d_inode->i_atime = filp->f_path.dentry->d_inode->i_ctime = CURRENT_TIME; - return count; + return ret; } static int mqueue_flush_file(struct file *filp, fl_owner_t id) -- cgit v1.2.1 From 9eefe520c814f6f62c5d36a2ddcd3fb99dfdb30e Mon Sep 17 00:00:00 2001 From: Nadia Derbey Date: Fri, 25 Jul 2008 01:48:08 -0700 Subject: ipc: do not use a negative value to re-enable msgmni automatic recomputing This patch proposes an alternative to the "magical positive-versus-negative number trick" Andrew complained about last week in http://lkml.org/lkml/2008/6/24/418. This had been introduced with the patches that scale msgmni to the amount of lowmem. With these patches, msgmni has a registered notification routine that recomputes msgmni value upon memory add/remove or ipc namespace creation/ removal. When msgmni is changed from user space (i.e. value written to the proc file), that notification routine is unregistered, and the way to make it registered back is to write a negative value into the proc file. This is the "magical positive-versus-negative number trick". To fix this, a new proc file is introduced: /proc/sys/kernel/auto_msgmni. This file acts as ON/OFF for msgmni automatic recomputing. With this patch, the process is the following: 1) kernel boots in "automatic recomputing mode" /proc/sys/kernel/msgmni contains the value that has been computed (depends on lowmem) /proc/sys/kernel/automatic_msgmni contains "1" 2) echo > /proc/sys/kernel/msgmni . sets msg_ctlmni to . de-activates automatic recomputing (i.e. if, say, some memory is added msgmni won't be recomputed anymore) . /proc/sys/kernel/automatic_msgmni now contains "0" 3) echo "0" > /proc/sys/kernel/automatic_msgmni . de-activates msgmni automatic recomputing this has the same effect as 2) except that msg_ctlmni's value stays blocked at its current value) 3) echo "1" > /proc/sys/kernel/automatic_msgmni . recomputes msgmni's value based on the current available memory size and number of ipc namespaces . re-activates automatic recomputing for msgmni. Signed-off-by: Nadia Derbey Cc: Solofo Ramangalahy Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- ipc/ipc_sysctl.c | 72 ++++++++++++++++++++++++++++++++++++++++++---------- ipc/ipcns_notifier.c | 20 +++++++++++---- 2 files changed, 74 insertions(+), 18 deletions(-) (limited to 'ipc') diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c index d3497465cc0a..69bc85978ba0 100644 --- a/ipc/ipc_sysctl.c +++ b/ipc/ipc_sysctl.c @@ -27,15 +27,17 @@ static void *get_ipc(ctl_table *table) } /* - * Routine that is called when a tunable has successfully been changed by - * hand and it has a callback routine registered on the ipc namespace notifier - * chain: we don't want such tunables to be recomputed anymore upon memory - * add/remove or ipc namespace creation/removal. - * They can come back to a recomputable state by being set to a <0 value. + * Routine that is called when the file "auto_msgmni" has successfully been + * written. + * Two values are allowed: + * 0: unregister msgmni's callback routine from the ipc namespace notifier + * chain. This means that msgmni won't be recomputed anymore upon memory + * add/remove or ipc namespace creation/removal. + * 1: register back the callback routine. */ -static void tunable_set_callback(int val) +static void ipc_auto_callback(int val) { - if (val >= 0) + if (!val) unregister_ipcns_notifier(current->nsproxy->ipc_ns); else { /* @@ -71,7 +73,12 @@ static int proc_ipc_callback_dointvec(ctl_table *table, int write, rc = proc_dointvec(&ipc_table, write, filp, buffer, lenp, ppos); if (write && !rc && lenp_bef == *lenp) - tunable_set_callback(*((int *)(ipc_table.data))); + /* + * Tunable has successfully been changed by hand. Disable its + * automatic adjustment. This simply requires unregistering + * the notifiers that trigger recalculation. + */ + unregister_ipcns_notifier(current->nsproxy->ipc_ns); return rc; } @@ -87,10 +94,39 @@ static int proc_ipc_doulongvec_minmax(ctl_table *table, int write, lenp, ppos); } +static int proc_ipcauto_dointvec_minmax(ctl_table *table, int write, + struct file *filp, void __user *buffer, size_t *lenp, loff_t *ppos) +{ + struct ctl_table ipc_table; + size_t lenp_bef = *lenp; + int oldval; + int rc; + + memcpy(&ipc_table, table, sizeof(ipc_table)); + ipc_table.data = get_ipc(table); + oldval = *((int *)(ipc_table.data)); + + rc = proc_dointvec_minmax(&ipc_table, write, filp, buffer, lenp, ppos); + + if (write && !rc && lenp_bef == *lenp) { + int newval = *((int *)(ipc_table.data)); + /* + * The file "auto_msgmni" has correctly been set. + * React by (un)registering the corresponding tunable, if the + * value has changed. + */ + if (newval != oldval) + ipc_auto_callback(newval); + } + + return rc; +} + #else #define proc_ipc_doulongvec_minmax NULL #define proc_ipc_dointvec NULL #define proc_ipc_callback_dointvec NULL +#define proc_ipcauto_dointvec_minmax NULL #endif #ifdef CONFIG_SYSCTL_SYSCALL @@ -142,14 +178,11 @@ static int sysctl_ipc_registered_data(ctl_table *table, int __user *name, rc = sysctl_ipc_data(table, name, nlen, oldval, oldlenp, newval, newlen); - if (newval && newlen && rc > 0) { + if (newval && newlen && rc > 0) /* * Tunable has successfully been changed from userland */ - int *data = get_ipc(table); - - tunable_set_callback(*data); - } + unregister_ipcns_notifier(current->nsproxy->ipc_ns); return rc; } @@ -158,6 +191,9 @@ static int sysctl_ipc_registered_data(ctl_table *table, int __user *name, #define sysctl_ipc_registered_data NULL #endif +static int zero; +static int one = 1; + static struct ctl_table ipc_kern_table[] = { { .ctl_name = KERN_SHMMAX, @@ -222,6 +258,16 @@ static struct ctl_table ipc_kern_table[] = { .proc_handler = proc_ipc_dointvec, .strategy = sysctl_ipc_data, }, + { + .ctl_name = CTL_UNNUMBERED, + .procname = "auto_msgmni", + .data = &init_ipc_ns.auto_msgmni, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_ipcauto_dointvec_minmax, + .extra1 = &zero, + .extra2 = &one, + }, {} }; diff --git a/ipc/ipcns_notifier.c b/ipc/ipcns_notifier.c index 70ff09183f7b..b9b31a4f77e1 100644 --- a/ipc/ipcns_notifier.c +++ b/ipc/ipcns_notifier.c @@ -55,25 +55,35 @@ static int ipcns_callback(struct notifier_block *self, int register_ipcns_notifier(struct ipc_namespace *ns) { + int rc; + memset(&ns->ipcns_nb, 0, sizeof(ns->ipcns_nb)); ns->ipcns_nb.notifier_call = ipcns_callback; ns->ipcns_nb.priority = IPCNS_CALLBACK_PRI; - return blocking_notifier_chain_register(&ipcns_chain, &ns->ipcns_nb); + rc = blocking_notifier_chain_register(&ipcns_chain, &ns->ipcns_nb); + if (!rc) + ns->auto_msgmni = 1; + return rc; } int cond_register_ipcns_notifier(struct ipc_namespace *ns) { + int rc; + memset(&ns->ipcns_nb, 0, sizeof(ns->ipcns_nb)); ns->ipcns_nb.notifier_call = ipcns_callback; ns->ipcns_nb.priority = IPCNS_CALLBACK_PRI; - return blocking_notifier_chain_cond_register(&ipcns_chain, + rc = blocking_notifier_chain_cond_register(&ipcns_chain, &ns->ipcns_nb); + if (!rc) + ns->auto_msgmni = 1; + return rc; } -int unregister_ipcns_notifier(struct ipc_namespace *ns) +void unregister_ipcns_notifier(struct ipc_namespace *ns) { - return blocking_notifier_chain_unregister(&ipcns_chain, - &ns->ipcns_nb); + blocking_notifier_chain_unregister(&ipcns_chain, &ns->ipcns_nb); + ns->auto_msgmni = 0; } int ipcns_notify(unsigned long val) -- cgit v1.2.1 From 51cc50685a4275c6a02653670af9f108a64e01cf Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Fri, 25 Jul 2008 19:45:34 -0700 Subject: SL*B: drop kmem cache argument from constructor Kmem cache passed to constructor is only needed for constructors that are themselves multiplexeres. Nobody uses this "feature", nor does anybody uses passed kmem cache in non-trivial way, so pass only pointer to object. Non-trivial places are: arch/powerpc/mm/init_64.c arch/powerpc/mm/hugetlbpage.c This is flag day, yes. Signed-off-by: Alexey Dobriyan Acked-by: Pekka Enberg Acked-by: Christoph Lameter Cc: Jon Tollefson Cc: Nick Piggin Cc: Matt Mackall [akpm@linux-foundation.org: fix arch/powerpc/mm/hugetlbpage.c] [akpm@linux-foundation.org: fix mm/slab.c] [akpm@linux-foundation.org: fix ubifs] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- ipc/mqueue.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'ipc') diff --git a/ipc/mqueue.c b/ipc/mqueue.c index 1fdc2eb2f6d8..474984f9e032 100644 --- a/ipc/mqueue.c +++ b/ipc/mqueue.c @@ -207,7 +207,7 @@ static int mqueue_get_sb(struct file_system_type *fs_type, return get_sb_single(fs_type, flags, data, mqueue_fill_super, mnt); } -static void init_once(struct kmem_cache *cachep, void *foo) +static void init_once(void *foo) { struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo; -- cgit v1.2.1 From f419a2e3b64def707e1384ee38abb77f99af5f6d Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 22 Jul 2008 00:07:17 -0400 Subject: [PATCH] kill nameidata passing to permission(), rename to inode_permission() Incidentally, the name that gives hundreds of false positives on grep is not a good idea... Signed-off-by: Al Viro --- ipc/mqueue.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'ipc') diff --git a/ipc/mqueue.c b/ipc/mqueue.c index 474984f9e032..96fb36cd9874 100644 --- a/ipc/mqueue.c +++ b/ipc/mqueue.c @@ -638,7 +638,7 @@ static int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE, return ERR_PTR(-EINVAL); } - if (permission(dentry->d_inode, oflag2acc[oflag & O_ACCMODE], NULL)) { + if (inode_permission(dentry->d_inode, oflag2acc[oflag & O_ACCMODE])) { dput(dentry); mntput(mqueue_mnt); return ERR_PTR(-EACCES); -- cgit v1.2.1