summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/mm.h4
-rw-r--r--mm/memory.c14
2 files changed, 10 insertions, 8 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 39c17a2efcea..6e25f4916d6f 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -301,7 +301,8 @@ struct vm_fault {
* the 'address' */
pte_t orig_pte; /* Value of PTE at the time of fault */
- struct page *cow_page; /* Handler may choose to COW */
+ struct page *cow_page; /* Page handler may use for COW fault */
+ struct mem_cgroup *memcg; /* Cgroup cow_page belongs to */
struct page *page; /* ->fault handlers should return a
* page here, unless VM_FAULT_NOPAGE
* is set (which is also implied by
@@ -1103,6 +1104,7 @@ static inline void clear_page_pfmemalloc(struct page *page)
#define VM_FAULT_RETRY 0x0400 /* ->fault blocked, must retry */
#define VM_FAULT_FALLBACK 0x0800 /* huge page fault failed, fall back to small */
#define VM_FAULT_DAX_LOCKED 0x1000 /* ->fault has locked DAX entry */
+#define VM_FAULT_DONE_COW 0x2000 /* ->fault has fully handled COW */
#define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */
diff --git a/mm/memory.c b/mm/memory.c
index cf74f7ca911b..02504cd4ca0e 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2844,9 +2844,8 @@ static int __do_fault(struct vm_fault *vmf)
int ret;
ret = vma->vm_ops->fault(vma, vmf);
- if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
- return ret;
- if (ret & VM_FAULT_DAX_LOCKED)
+ if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY |
+ VM_FAULT_DAX_LOCKED | VM_FAULT_DONE_COW)))
return ret;
if (unlikely(PageHWPoison(vmf->page))) {
@@ -3226,7 +3225,6 @@ static int do_read_fault(struct vm_fault *vmf)
static int do_cow_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
- struct mem_cgroup *memcg;
int ret;
if (unlikely(anon_vma_prepare(vma)))
@@ -3237,7 +3235,7 @@ static int do_cow_fault(struct vm_fault *vmf)
return VM_FAULT_OOM;
if (mem_cgroup_try_charge(vmf->cow_page, vma->vm_mm, GFP_KERNEL,
- &memcg, false)) {
+ &vmf->memcg, false)) {
put_page(vmf->cow_page);
return VM_FAULT_OOM;
}
@@ -3245,12 +3243,14 @@ static int do_cow_fault(struct vm_fault *vmf)
ret = __do_fault(vmf);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
goto uncharge_out;
+ if (ret & VM_FAULT_DONE_COW)
+ return ret;
if (!(ret & VM_FAULT_DAX_LOCKED))
copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma);
__SetPageUptodate(vmf->cow_page);
- ret |= alloc_set_pte(vmf, memcg, vmf->cow_page);
+ ret |= alloc_set_pte(vmf, vmf->memcg, vmf->cow_page);
if (vmf->pte)
pte_unmap_unlock(vmf->pte, vmf->ptl);
if (!(ret & VM_FAULT_DAX_LOCKED)) {
@@ -3263,7 +3263,7 @@ static int do_cow_fault(struct vm_fault *vmf)
goto uncharge_out;
return ret;
uncharge_out:
- mem_cgroup_cancel_charge(vmf->cow_page, memcg, false);
+ mem_cgroup_cancel_charge(vmf->cow_page, vmf->memcg, false);
put_page(vmf->cow_page);
return ret;
}
OpenPOWER on IntegriCloud