diff options
Diffstat (limited to 'mm/filemap.c')
| -rw-r--r-- | mm/filemap.c | 46 | 
1 files changed, 38 insertions, 8 deletions
diff --git a/mm/filemap.c b/mm/filemap.c index 9d21afd692b9..870971e20967 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -909,13 +909,33 @@ static void wake_up_page_bit(struct page *page, int bit_nr)  	wait_queue_head_t *q = page_waitqueue(page);  	struct wait_page_key key;  	unsigned long flags; +	wait_queue_entry_t bookmark;  	key.page = page;  	key.bit_nr = bit_nr;  	key.page_match = 0; +	bookmark.flags = 0; +	bookmark.private = NULL; +	bookmark.func = NULL; +	INIT_LIST_HEAD(&bookmark.entry); +  	spin_lock_irqsave(&q->lock, flags); -	__wake_up_locked_key(q, TASK_NORMAL, &key); +	__wake_up_locked_key_bookmark(q, TASK_NORMAL, &key, &bookmark); + +	while (bookmark.flags & WQ_FLAG_BOOKMARK) { +		/* +		 * Take a breather from holding the lock, +		 * allow pages that finish wake up asynchronously +		 * to acquire the lock and remove themselves +		 * from wait queue +		 */ +		spin_unlock_irqrestore(&q->lock, flags); +		cpu_relax(); +		spin_lock_irqsave(&q->lock, flags); +		__wake_up_locked_key_bookmark(q, TASK_NORMAL, &key, &bookmark); +	} +  	/*  	 * It is possible for other pages to have collided on the waitqueue  	 * hash, so in that case check for a page match. That prevents a long- @@ -1897,9 +1917,8 @@ static void shrink_readahead_size_eio(struct file *filp,  }  /** - * do_generic_file_read - generic file read routine - * @filp:	the file to read - * @ppos:	current file position + * generic_file_buffered_read - generic file read routine + * @iocb:	the iocb to read   * @iter:	data destination   * @written:	already copied   * @@ -1909,12 +1928,14 @@ static void shrink_readahead_size_eio(struct file *filp,   * This is really ugly. But the goto's actually try to clarify some   * of the logic when it comes to error handling etc.   */ -static ssize_t do_generic_file_read(struct file *filp, loff_t *ppos, +static ssize_t generic_file_buffered_read(struct kiocb *iocb,  		struct iov_iter *iter, ssize_t written)  { +	struct file *filp = iocb->ki_filp;  	struct address_space *mapping = filp->f_mapping;  	struct inode *inode = mapping->host;  	struct file_ra_state *ra = &filp->f_ra; +	loff_t *ppos = &iocb->ki_pos;  	pgoff_t index;  	pgoff_t last_index;  	pgoff_t prev_index; @@ -1947,6 +1968,8 @@ find_page:  		page = find_get_page(mapping, index);  		if (!page) { +			if (iocb->ki_flags & IOCB_NOWAIT) +				goto would_block;  			page_cache_sync_readahead(mapping,  					ra, filp,  					index, last_index - index); @@ -1960,6 +1983,11 @@ find_page:  					index, last_index - index);  		}  		if (!PageUptodate(page)) { +			if (iocb->ki_flags & IOCB_NOWAIT) { +				put_page(page); +				goto would_block; +			} +  			/*  			 * See comment in do_read_cache_page on why  			 * wait_on_page_locked is used to avoid unnecessarily @@ -2141,6 +2169,8 @@ no_cached_page:  		goto readpage;  	} +would_block: +	error = -EAGAIN;  out:  	ra->prev_pos = prev_index;  	ra->prev_pos <<= PAGE_SHIFT; @@ -2162,14 +2192,14 @@ out:  ssize_t  generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)  { -	struct file *file = iocb->ki_filp; -	ssize_t retval = 0;  	size_t count = iov_iter_count(iter); +	ssize_t retval = 0;  	if (!count)  		goto out; /* skip atime */  	if (iocb->ki_flags & IOCB_DIRECT) { +		struct file *file = iocb->ki_filp;  		struct address_space *mapping = file->f_mapping;  		struct inode *inode = mapping->host;  		loff_t size; @@ -2210,7 +2240,7 @@ generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)  			goto out;  	} -	retval = do_generic_file_read(file, &iocb->ki_pos, iter, retval); +	retval = generic_file_buffered_read(iocb, iter, retval);  out:  	return retval;  }  | 

