diff options
| author | Sean Paul <seanpaul@chromium.org> | 2019-04-10 15:50:49 -0400 |
|---|---|---|
| committer | Sean Paul <seanpaul@chromium.org> | 2019-04-10 15:50:49 -0400 |
| commit | 3bfaf1f7044c6a3b1e00fcad2d0529f0da449d67 (patch) | |
| tree | 490bc7e8aa29c2ee41eb4813683f1aa23971abd3 /fs/dax.c | |
| parent | 669105a74a287b14cdec04c64eb51db1bb890f64 (diff) | |
| parent | 14d2bd53a47a7e1cb3e03d00a6b952734cf90f3f (diff) | |
| download | talos-op-linux-3bfaf1f7044c6a3b1e00fcad2d0529f0da449d67.tar.gz talos-op-linux-3bfaf1f7044c6a3b1e00fcad2d0529f0da449d67.zip | |
Merge drm/drm-next into drm-misc-next
Finally have a reason for a backmerge other than "it's been a while"!
Backmerging drm-next to -misc-next to facilitate Rob Herring's work on
Panfrost.
Signed-off-by: Sean Paul <seanpaul@chromium.org>
Diffstat (limited to 'fs/dax.c')
| -rw-r--r-- | fs/dax.c | 25 |
1 files changed, 11 insertions, 14 deletions
@@ -788,7 +788,7 @@ static void dax_entry_mkclean(struct address_space *mapping, pgoff_t index, address = pgoff_address(index, vma); /* - * Note because we provide start/end to follow_pte_pmd it will + * Note because we provide range to follow_pte_pmd it will * call mmu_notifier_invalidate_range_start() on our behalf * before taking any lock. */ @@ -843,9 +843,8 @@ unlock_pte: static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev, struct address_space *mapping, void *entry) { - unsigned long pfn; + unsigned long pfn, index, count; long ret = 0; - size_t size; /* * A page got tagged dirty in DAX mapping? Something is seriously @@ -894,17 +893,18 @@ static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev, xas_unlock_irq(xas); /* - * Even if dax_writeback_mapping_range() was given a wbc->range_start - * in the middle of a PMD, the 'index' we are given will be aligned to - * the start index of the PMD, as will the pfn we pull from 'entry'. + * If dax_writeback_mapping_range() was given a wbc->range_start + * in the middle of a PMD, the 'index' we use needs to be + * aligned to the start of the PMD. * This allows us to flush for PMD_SIZE and not have to worry about * partial PMD writebacks. */ pfn = dax_to_pfn(entry); - size = PAGE_SIZE << dax_entry_order(entry); + count = 1UL << dax_entry_order(entry); + index = xas->xa_index & ~(count - 1); - dax_entry_mkclean(mapping, xas->xa_index, pfn); - dax_flush(dax_dev, page_address(pfn_to_page(pfn)), size); + dax_entry_mkclean(mapping, index, pfn); + dax_flush(dax_dev, page_address(pfn_to_page(pfn)), count * PAGE_SIZE); /* * After we have flushed the cache, we can clear the dirty tag. There * cannot be new dirty data in the pfn after the flush has completed as @@ -917,8 +917,7 @@ static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev, xas_clear_mark(xas, PAGECACHE_TAG_DIRTY); dax_wake_entry(xas, entry, false); - trace_dax_writeback_one(mapping->host, xas->xa_index, - size >> PAGE_SHIFT); + trace_dax_writeback_one(mapping->host, index, count); return ret; put_unlocked: @@ -1220,9 +1219,7 @@ static vm_fault_t dax_fault_return(int error) { if (error == 0) return VM_FAULT_NOPAGE; - if (error == -ENOMEM) - return VM_FAULT_OOM; - return VM_FAULT_SIGBUS; + return vmf_error(error); } /* |

