From e7638488434415aa478e78435cac8f0365737638 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 16 May 2018 11:46:08 -0700 Subject: mm: introduce MEMORY_DEVICE_FS_DAX and CONFIG_DEV_PAGEMAP_OPS MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In preparation for fixing dax-dma-vs-unmap issues, filesystems need to be able to rely on the fact that they will get wakeups on dev_pagemap page-idle events. Introduce MEMORY_DEVICE_FS_DAX and generic_dax_page_free() as common indicator / infrastructure for dax filesytems to require. With this change there are no users of the MEMORY_DEVICE_HOST designation, so remove it. The HMM sub-system extended dev_pagemap to arrange a callback when a dev_pagemap managed page is freed. Since a dev_pagemap page is free / idle when its reference count is 1 it requires an additional branch to check the page-type at put_page() time. Given put_page() is a hot-path we do not want to incur that check if HMM is not in use, so a static branch is used to avoid that overhead when not necessary. Now, the FS_DAX implementation wants to reuse this mechanism for receiving dev_pagemap ->page_free() callbacks. Rework the HMM-specific static-key into a generic mechanism that either HMM or FS_DAX code paths can enable. For ARCH=um builds, and any other arch that lacks ZONE_DEVICE support, care must be taken to compile out the DEV_PAGEMAP_OPS infrastructure. However, we still need to support FS_DAX in the FS_DAX_LIMITED case implemented by the s390/dcssblk driver. Cc: Martin Schwidefsky Cc: Heiko Carstens Cc: Michal Hocko Reported-by: kbuild test robot Reported-by: Thomas Meyer Reported-by: Dave Jiang Cc: "Jérôme Glisse" Reviewed-by: Jan Kara Reviewed-by: Christoph Hellwig Signed-off-by: Dan Williams --- include/linux/memremap.h | 36 +++++++----------------- include/linux/mm.h | 71 ++++++++++++++++++++++++++++++++++++------------ 2 files changed, 63 insertions(+), 44 deletions(-) (limited to 'include') diff --git a/include/linux/memremap.h b/include/linux/memremap.h index 7b4899c06f49..5ebfff65da4d 100644 --- a/include/linux/memremap.h +++ b/include/linux/memremap.h @@ -1,7 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_MEMREMAP_H_ #define _LINUX_MEMREMAP_H_ -#include #include #include @@ -30,13 +29,6 @@ struct vmem_altmap { * Specialize ZONE_DEVICE memory into multiple types each having differents * usage. * - * MEMORY_DEVICE_HOST: - * Persistent device memory (pmem): struct page might be allocated in different - * memory and architecture might want to perform special actions. It is similar - * to regular memory, in that the CPU can access it transparently. However, - * it is likely to have different bandwidth and latency than regular memory. - * See Documentation/nvdimm/nvdimm.txt for more information. - * * MEMORY_DEVICE_PRIVATE: * Device memory that is not directly addressable by the CPU: CPU can neither * read nor write private memory. In this case, we do still have struct pages @@ -53,11 +45,19 @@ struct vmem_altmap { * driver can hotplug the device memory using ZONE_DEVICE and with that memory * type. Any page of a process can be migrated to such memory. However no one * should be allow to pin such memory so that it can always be evicted. + * + * MEMORY_DEVICE_FS_DAX: + * Host memory that has similar access semantics as System RAM i.e. DMA + * coherent and supports page pinning. In support of coordinating page + * pinning vs other operations MEMORY_DEVICE_FS_DAX arranges for a + * wakeup event whenever a page is unpinned and becomes idle. This + * wakeup is used to coordinate physical address space management (ex: + * fs truncate/hole punch) vs pinned pages (ex: device dma). */ enum memory_type { - MEMORY_DEVICE_HOST = 0, - MEMORY_DEVICE_PRIVATE, + MEMORY_DEVICE_PRIVATE = 1, MEMORY_DEVICE_PUBLIC, + MEMORY_DEVICE_FS_DAX, }; /* @@ -129,8 +129,6 @@ struct dev_pagemap *get_dev_pagemap(unsigned long pfn, unsigned long vmem_altmap_offset(struct vmem_altmap *altmap); void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns); - -static inline bool is_zone_device_page(const struct page *page); #else static inline void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) @@ -161,20 +159,6 @@ static inline void vmem_altmap_free(struct vmem_altmap *altmap, } #endif /* CONFIG_ZONE_DEVICE */ -#if defined(CONFIG_DEVICE_PRIVATE) || defined(CONFIG_DEVICE_PUBLIC) -static inline bool is_device_private_page(const struct page *page) -{ - return is_zone_device_page(page) && - page->pgmap->type == MEMORY_DEVICE_PRIVATE; -} - -static inline bool is_device_public_page(const struct page *page) -{ - return is_zone_device_page(page) && - page->pgmap->type == MEMORY_DEVICE_PUBLIC; -} -#endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */ - static inline void put_dev_pagemap(struct dev_pagemap *pgmap) { if (pgmap) diff --git a/include/linux/mm.h b/include/linux/mm.h index 1ac1f06a4be6..6e19265ee8f8 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -821,27 +821,65 @@ static inline bool is_zone_device_page(const struct page *page) } #endif -#if defined(CONFIG_DEVICE_PRIVATE) || defined(CONFIG_DEVICE_PUBLIC) -void put_zone_device_private_or_public_page(struct page *page); -DECLARE_STATIC_KEY_FALSE(device_private_key); -#define IS_HMM_ENABLED static_branch_unlikely(&device_private_key) -static inline bool is_device_private_page(const struct page *page); -static inline bool is_device_public_page(const struct page *page); -#else /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */ -static inline void put_zone_device_private_or_public_page(struct page *page) +#ifdef CONFIG_DEV_PAGEMAP_OPS +void dev_pagemap_get_ops(void); +void dev_pagemap_put_ops(void); +void __put_devmap_managed_page(struct page *page); +DECLARE_STATIC_KEY_FALSE(devmap_managed_key); +static inline bool put_devmap_managed_page(struct page *page) +{ + if (!static_branch_unlikely(&devmap_managed_key)) + return false; + if (!is_zone_device_page(page)) + return false; + switch (page->pgmap->type) { + case MEMORY_DEVICE_PRIVATE: + case MEMORY_DEVICE_PUBLIC: + case MEMORY_DEVICE_FS_DAX: + __put_devmap_managed_page(page); + return true; + default: + break; + } + return false; +} + +static inline bool is_device_private_page(const struct page *page) { + return is_zone_device_page(page) && + page->pgmap->type == MEMORY_DEVICE_PRIVATE; } -#define IS_HMM_ENABLED 0 + +static inline bool is_device_public_page(const struct page *page) +{ + return is_zone_device_page(page) && + page->pgmap->type == MEMORY_DEVICE_PUBLIC; +} + +#else /* CONFIG_DEV_PAGEMAP_OPS */ +static inline void dev_pagemap_get_ops(void) +{ +} + +static inline void dev_pagemap_put_ops(void) +{ +} + +static inline bool put_devmap_managed_page(struct page *page) +{ + return false; +} + static inline bool is_device_private_page(const struct page *page) { return false; } + static inline bool is_device_public_page(const struct page *page) { return false; } -#endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */ - +#endif /* CONFIG_DEV_PAGEMAP_OPS */ static inline void get_page(struct page *page) { @@ -859,16 +897,13 @@ static inline void put_page(struct page *page) page = compound_head(page); /* - * For private device pages we need to catch refcount transition from - * 2 to 1, when refcount reach one it means the private device page is - * free and we need to inform the device driver through callback. See + * For devmap managed pages we need to catch refcount transition from + * 2 to 1, when refcount reach one it means the page is free and we + * need to inform the device driver through callback. See * include/linux/memremap.h and HMM for details. */ - if (IS_HMM_ENABLED && unlikely(is_device_private_page(page) || - unlikely(is_device_public_page(page)))) { - put_zone_device_private_or_public_page(page); + if (put_devmap_managed_page(page)) return; - } if (put_page_testzero(page)) __put_page(page); -- cgit v1.2.1 From 5fac7408d828719db6d3fdba63e3c3726a6d1ee5 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Fri, 9 Mar 2018 17:44:31 -0800 Subject: mm, fs, dax: handle layout changes to pinned dax mappings Background: get_user_pages() in the filesystem pins file backed memory pages for access by devices performing dma. However, it only pins the memory pages not the page-to-file offset association. If a file is truncated the pages are mapped out of the file and dma may continue indefinitely into a page that is owned by a device driver. This breaks coherency of the file vs dma, but the assumption is that if userspace wants the file-space truncated it does not matter what data is inbound from the device, it is not relevant anymore. The only expectation is that dma can safely continue while the filesystem reallocates the block(s). Problem: This expectation that dma can safely continue while the filesystem changes the block map is broken by dax. With dax the target dma page *is* the filesystem block. The model of leaving the page pinned for dma, but truncating the file block out of the file, means that the filesytem is free to reallocate a block under active dma to another file and now the expected data-incoherency situation has turned into active data-corruption. Solution: Defer all filesystem operations (fallocate(), truncate()) on a dax mode file while any page/block in the file is under active dma. This solution assumes that dma is transient. Cases where dma operations are known to not be transient, like RDMA, have been explicitly disabled via commits like 5f1d43de5416 "IB/core: disable memory registration of filesystem-dax vmas". The dax_layout_busy_page() routine is called by filesystems with a lock held against mm faults (i_mmap_lock) to find pinned / busy dax pages. The process of looking up a busy page invalidates all mappings to trigger any subsequent get_user_pages() to block on i_mmap_lock. The filesystem continues to call dax_layout_busy_page() until it finally returns no more active pages. This approach assumes that the page pinning is transient, if that assumption is violated the system would have likely hung from the uncompleted I/O. Cc: Jeff Moyer Cc: Dave Chinner Cc: Matthew Wilcox Cc: Alexander Viro Cc: "Darrick J. Wong" Cc: Ross Zwisler Cc: Dave Hansen Cc: Andrew Morton Reported-by: Christoph Hellwig Reviewed-by: Christoph Hellwig Reviewed-by: Jan Kara Signed-off-by: Dan Williams --- include/linux/dax.h | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'include') diff --git a/include/linux/dax.h b/include/linux/dax.h index f9eb22ad341e..25bab6abb695 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -83,6 +83,8 @@ static inline void fs_put_dax(struct dax_device *dax_dev) struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev); int dax_writeback_mapping_range(struct address_space *mapping, struct block_device *bdev, struct writeback_control *wbc); + +struct page *dax_layout_busy_page(struct address_space *mapping); #else static inline int bdev_dax_supported(struct super_block *sb, int blocksize) { @@ -103,6 +105,11 @@ static inline struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev) return NULL; } +static inline struct page *dax_layout_busy_page(struct address_space *mapping) +{ + return NULL; +} + static inline int dax_writeback_mapping_range(struct address_space *mapping, struct block_device *bdev, struct writeback_control *wbc) { -- cgit v1.2.1 From 522239b445a2de988edb81672963708a6aaf9046 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 22 May 2018 23:17:03 -0700 Subject: uio, lib: Fix CONFIG_ARCH_HAS_UACCESS_MCSAFE compilation Add a common Kconfig CONFIG_ARCH_HAS_UACCESS_MCSAFE that archs can optionally select, and fixup the declaration of _copy_to_iter_mcsafe(). Fixes: 8780356ef630 ("x86/asm/memcpy_mcsafe: Define copy_to_iter_mcsafe()") Signed-off-by: Dan Williams --- include/linux/uio.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/uio.h b/include/linux/uio.h index f5766e853a77..409c845d4cd3 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -155,7 +155,7 @@ size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i); #endif #ifdef CONFIG_ARCH_HAS_UACCESS_MCSAFE -size_t _copy_to_iter_mcsafe(void *addr, size_t bytes, struct iov_iter *i); +size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i); #else #define _copy_to_iter_mcsafe _copy_to_iter #endif -- cgit v1.2.1 From b3a9a0c36e1f7b9e2e6cf965c2bb973624f2b3b9 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 2 May 2018 06:46:33 -0700 Subject: dax: Introduce a ->copy_to_iter dax operation Similar to the ->copy_from_iter() operation, a platform may want to deploy an architecture or device specific routine for handling reads from a dax_device like /dev/pmemX. On x86 this routine will point to a machine check safe version of copy_to_iter(). For now, add the plumbing to device-mapper and the dax core. Cc: Ross Zwisler Cc: Mike Snitzer Cc: Christoph Hellwig Signed-off-by: Dan Williams --- include/linux/dax.h | 5 +++++ include/linux/device-mapper.h | 5 +++-- 2 files changed, 8 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/dax.h b/include/linux/dax.h index f9eb22ad341e..a43b396fb336 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -20,6 +20,9 @@ struct dax_operations { /* copy_from_iter: required operation for fs-dax direct-i/o */ size_t (*copy_from_iter)(struct dax_device *, pgoff_t, void *, size_t, struct iov_iter *); + /* copy_to_iter: required operation for fs-dax direct-i/o */ + size_t (*copy_to_iter)(struct dax_device *, pgoff_t, void *, size_t, + struct iov_iter *); }; extern struct attribute_group dax_attribute_group; @@ -118,6 +121,8 @@ long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn); size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i); +size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, + size_t bytes, struct iov_iter *i); void dax_flush(struct dax_device *dax_dev, void *addr, size_t size); ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 31fef7c34185..6fb0808e87c8 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -133,7 +133,7 @@ typedef int (*dm_busy_fn) (struct dm_target *ti); */ typedef long (*dm_dax_direct_access_fn) (struct dm_target *ti, pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn); -typedef size_t (*dm_dax_copy_from_iter_fn)(struct dm_target *ti, pgoff_t pgoff, +typedef size_t (*dm_dax_copy_iter_fn)(struct dm_target *ti, pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i); #define PAGE_SECTORS (PAGE_SIZE / 512) @@ -184,7 +184,8 @@ struct target_type { dm_iterate_devices_fn iterate_devices; dm_io_hints_fn io_hints; dm_dax_direct_access_fn direct_access; - dm_dax_copy_from_iter_fn dax_copy_from_iter; + dm_dax_copy_iter_fn dax_copy_from_iter; + dm_dax_copy_iter_fn dax_copy_to_iter; /* For internal device-mapper use. */ struct list_head list; -- cgit v1.2.1