summaryrefslogtreecommitdiffstats
path: root/fs/dax.c
diff options
context:
space:
mode:
authorJan Kara <jack@suse.cz>2016-10-19 14:48:38 +0200
committerDan Williams <dan.j.williams@intel.com>2016-12-26 20:29:25 -0800
commitf449b936f1aff7696b24a338f493d5cee8d48d55 (patch)
tree5eb066bfc0c39f9485c83a7c28da77533fef4e11 /fs/dax.c
parente3fce68cdbed297d927e993b3ea7b8b1cee545da (diff)
downloadtalos-op-linux-f449b936f1aff7696b24a338f493d5cee8d48d55.tar.gz
talos-op-linux-f449b936f1aff7696b24a338f493d5cee8d48d55.zip
dax: Finish fault completely when loading holes
The only case when we do not finish the page fault completely is when we are loading hole pages into a radix tree. Avoid this special case and finish the fault in that case as well inside the DAX fault handler. It will allow us for easier iomap handling. Reviewed-by: Ross Zwisler <ross.zwisler@linux.intel.com> Signed-off-by: Jan Kara <jack@suse.cz> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'fs/dax.c')
-rw-r--r--fs/dax.c27
1 files changed, 18 insertions, 9 deletions
diff --git a/fs/dax.c b/fs/dax.c
index 08e15db28b79..bfec6f2ef613 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -539,15 +539,16 @@ int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
* otherwise it will simply fall out of the page cache under memory
* pressure without ever having been dirtied.
*/
-static int dax_load_hole(struct address_space *mapping, void *entry,
+static int dax_load_hole(struct address_space *mapping, void **entry,
struct vm_fault *vmf)
{
struct page *page;
+ int ret;
/* Hole page already exists? Return it... */
- if (!radix_tree_exceptional_entry(entry)) {
- vmf->page = entry;
- return VM_FAULT_LOCKED;
+ if (!radix_tree_exceptional_entry(*entry)) {
+ page = *entry;
+ goto out;
}
/* This will replace locked radix tree entry with a hole page */
@@ -555,8 +556,17 @@ static int dax_load_hole(struct address_space *mapping, void *entry,
vmf->gfp_mask | __GFP_ZERO);
if (!page)
return VM_FAULT_OOM;
+ out:
vmf->page = page;
- return VM_FAULT_LOCKED;
+ ret = finish_fault(vmf);
+ vmf->page = NULL;
+ *entry = page;
+ if (!ret) {
+ /* Grab reference for PTE that is now referencing the page */
+ get_page(page);
+ return VM_FAULT_NOPAGE;
+ }
+ return ret;
}
static int copy_user_dax(struct block_device *bdev, sector_t sector, size_t size,
@@ -1163,8 +1173,8 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
case IOMAP_UNWRITTEN:
case IOMAP_HOLE:
if (!(vmf->flags & FAULT_FLAG_WRITE)) {
- vmf_ret = dax_load_hole(mapping, entry, vmf);
- break;
+ vmf_ret = dax_load_hole(mapping, &entry, vmf);
+ goto finish_iomap;
}
/*FALLTHRU*/
default:
@@ -1185,8 +1195,7 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
}
}
unlock_entry:
- if (vmf_ret != VM_FAULT_LOCKED || error)
- put_locked_mapping_entry(mapping, vmf->pgoff, entry);
+ put_locked_mapping_entry(mapping, vmf->pgoff, entry);
out:
if (error == -ENOMEM)
return VM_FAULT_OOM | major;
OpenPOWER on IntegriCloud