summaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/Kconfig16
-rw-r--r--drivers/md/bcache/super.c8
-rw-r--r--drivers/md/bcache/util.h12
-rw-r--r--drivers/md/dm-bufio.c2
-rw-r--r--drivers/md/dm-cache-metadata.c12
-rw-r--r--drivers/md/dm-cache-policy-smq.c30
-rw-r--r--drivers/md/dm-core.h1
-rw-r--r--drivers/md/dm-ioctl.c13
-rw-r--r--drivers/md/dm-linear.c27
-rw-r--r--drivers/md/dm-rq.c7
-rw-r--r--drivers/md/dm-snap.c6
-rw-r--r--drivers/md/dm-stats.c7
-rw-r--r--drivers/md/dm-stripe.c29
-rw-r--r--drivers/md/dm-target.c6
-rw-r--r--drivers/md/dm.c67
15 files changed, 140 insertions, 103 deletions
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 7468a22f9d10..906103c168ea 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -200,6 +200,7 @@ config BLK_DEV_DM_BUILTIN
config BLK_DEV_DM
tristate "Device mapper support"
select BLK_DEV_DM_BUILTIN
+ select DAX
---help---
Device-mapper is a low level volume manager. It works by allowing
people to specify mappings for ranges of logical sectors. Various
@@ -502,13 +503,24 @@ config DM_LOG_WRITES
If unsure, say N.
config DM_INTEGRITY
- tristate "Integrity target"
+ tristate "Integrity target support"
depends on BLK_DEV_DM
select BLK_DEV_INTEGRITY
select DM_BUFIO
select CRYPTO
select ASYNC_XOR
---help---
- This is the integrity target.
+ This device-mapper target emulates a block device that has
+ additional per-sector tags that can be used for storing
+ integrity information.
+
+ This integrity target is used with the dm-crypt target to
+ provide authenticated disk encryption or it can be used
+ standalone.
+
+ To compile this code as a module, choose M here: the module will
+ be called dm-integrity.
+
+ If unsure, say N.
endif # MD
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 85e3f21c2514..e57353e39168 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -767,16 +767,12 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
}
n = d->nr_stripes * sizeof(atomic_t);
- d->stripe_sectors_dirty = n < PAGE_SIZE << 6
- ? kzalloc(n, GFP_KERNEL)
- : vzalloc(n);
+ d->stripe_sectors_dirty = kvzalloc(n, GFP_KERNEL);
if (!d->stripe_sectors_dirty)
return -ENOMEM;
n = BITS_TO_LONGS(d->nr_stripes) * sizeof(unsigned long);
- d->full_dirty_stripes = n < PAGE_SIZE << 6
- ? kzalloc(n, GFP_KERNEL)
- : vzalloc(n);
+ d->full_dirty_stripes = kvzalloc(n, GFP_KERNEL);
if (!d->full_dirty_stripes)
return -ENOMEM;
diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h
index 5d13930f0f22..cb8d2ccbb6c6 100644
--- a/drivers/md/bcache/util.h
+++ b/drivers/md/bcache/util.h
@@ -43,11 +43,7 @@ struct closure;
(heap)->used = 0; \
(heap)->size = (_size); \
_bytes = (heap)->size * sizeof(*(heap)->data); \
- (heap)->data = NULL; \
- if (_bytes < KMALLOC_MAX_SIZE) \
- (heap)->data = kmalloc(_bytes, (gfp)); \
- if ((!(heap)->data) && ((gfp) & GFP_KERNEL)) \
- (heap)->data = vmalloc(_bytes); \
+ (heap)->data = kvmalloc(_bytes, (gfp) & GFP_KERNEL); \
(heap)->data; \
})
@@ -136,12 +132,8 @@ do { \
\
(fifo)->mask = _allocated_size - 1; \
(fifo)->front = (fifo)->back = 0; \
- (fifo)->data = NULL; \
\
- if (_bytes < KMALLOC_MAX_SIZE) \
- (fifo)->data = kmalloc(_bytes, (gfp)); \
- if ((!(fifo)->data) && ((gfp) & GFP_KERNEL)) \
- (fifo)->data = vmalloc(_bytes); \
+ (fifo)->data = kvmalloc(_bytes, (gfp) & GFP_KERNEL); \
(fifo)->data; \
})
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index c92c31b23e54..5db11a405129 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -406,7 +406,7 @@ static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
if (gfp_mask & __GFP_NORETRY)
noio_flag = memalloc_noio_save();
- ptr = __vmalloc(c->block_size, gfp_mask | __GFP_HIGHMEM, PAGE_KERNEL);
+ ptr = __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
if (gfp_mask & __GFP_NORETRY)
memalloc_noio_restore(noio_flag);
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 8568dbd50ba4..4a4e9c75fc4c 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -1624,17 +1624,19 @@ void dm_cache_metadata_set_stats(struct dm_cache_metadata *cmd,
int dm_cache_commit(struct dm_cache_metadata *cmd, bool clean_shutdown)
{
- int r;
+ int r = -EINVAL;
flags_mutator mutator = (clean_shutdown ? set_clean_shutdown :
clear_clean_shutdown);
WRITE_LOCK(cmd);
+ if (cmd->fail_io)
+ goto out;
+
r = __commit_transaction(cmd, mutator);
if (r)
goto out;
r = __begin_transaction(cmd);
-
out:
WRITE_UNLOCK(cmd);
return r;
@@ -1646,7 +1648,8 @@ int dm_cache_get_free_metadata_block_count(struct dm_cache_metadata *cmd,
int r = -EINVAL;
READ_LOCK(cmd);
- r = dm_sm_get_nr_free(cmd->metadata_sm, result);
+ if (!cmd->fail_io)
+ r = dm_sm_get_nr_free(cmd->metadata_sm, result);
READ_UNLOCK(cmd);
return r;
@@ -1658,7 +1661,8 @@ int dm_cache_get_metadata_dev_size(struct dm_cache_metadata *cmd,
int r = -EINVAL;
READ_LOCK(cmd);
- r = dm_sm_get_nr_blocks(cmd->metadata_sm, result);
+ if (!cmd->fail_io)
+ r = dm_sm_get_nr_blocks(cmd->metadata_sm, result);
READ_UNLOCK(cmd);
return r;
diff --git a/drivers/md/dm-cache-policy-smq.c b/drivers/md/dm-cache-policy-smq.c
index e0c40aec5e96..72479bd61e11 100644
--- a/drivers/md/dm-cache-policy-smq.c
+++ b/drivers/md/dm-cache-policy-smq.c
@@ -1120,28 +1120,30 @@ static bool clean_target_met(struct smq_policy *mq, bool idle)
* Cache entries may not be populated. So we cannot rely on the
* size of the clean queue.
*/
- unsigned nr_clean = from_cblock(mq->cache_size) - q_size(&mq->dirty);
+ unsigned nr_clean;
- if (idle)
+ if (idle) {
/*
* We'd like to clean everything.
*/
return q_size(&mq->dirty) == 0u;
- else
- return (nr_clean + btracker_nr_writebacks_queued(mq->bg_work)) >=
- percent_to_target(mq, CLEAN_TARGET);
+ }
+
+ nr_clean = from_cblock(mq->cache_size) - q_size(&mq->dirty);
+ return (nr_clean + btracker_nr_writebacks_queued(mq->bg_work)) >=
+ percent_to_target(mq, CLEAN_TARGET);
}
static bool free_target_met(struct smq_policy *mq, bool idle)
{
- unsigned nr_free = from_cblock(mq->cache_size) -
- mq->cache_alloc.nr_allocated;
+ unsigned nr_free;
- if (idle)
- return (nr_free + btracker_nr_demotions_queued(mq->bg_work)) >=
- percent_to_target(mq, FREE_TARGET);
- else
+ if (!idle)
return true;
+
+ nr_free = from_cblock(mq->cache_size) - mq->cache_alloc.nr_allocated;
+ return (nr_free + btracker_nr_demotions_queued(mq->bg_work)) >=
+ percent_to_target(mq, FREE_TARGET);
}
/*----------------------------------------------------------------*/
@@ -1214,7 +1216,11 @@ static void queue_promotion(struct smq_policy *mq, dm_oblock_t oblock,
return;
if (allocator_empty(&mq->cache_alloc)) {
- if (!free_target_met(mq, false))
+ /*
+ * We always claim to be 'idle' to ensure some demotions happen
+ * with continuous loads.
+ */
+ if (!free_target_met(mq, true))
queue_demotion(mq);
return;
}
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index 97db4d11c05a..52ca8d059e82 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -58,6 +58,7 @@ struct mapped_device {
struct target_type *immutable_target_type;
struct gendisk *disk;
+ struct dax_device *dax_dev;
char name[16];
void *interface_ptr;
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 2d5d7064acbf..0555b4410e05 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1691,6 +1691,7 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kern
struct dm_ioctl *dmi;
int secure_data;
const size_t minimum_data_size = offsetof(struct dm_ioctl, data);
+ unsigned noio_flag;
if (copy_from_user(param_kernel, user, minimum_data_size))
return -EFAULT;
@@ -1713,15 +1714,9 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kern
* Use kmalloc() rather than vmalloc() when we can.
*/
dmi = NULL;
- if (param_kernel->data_size <= KMALLOC_MAX_SIZE)
- dmi = kmalloc(param_kernel->data_size, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
-
- if (!dmi) {
- unsigned noio_flag;
- noio_flag = memalloc_noio_save();
- dmi = __vmalloc(param_kernel->data_size, GFP_NOIO | __GFP_HIGH | __GFP_HIGHMEM, PAGE_KERNEL);
- memalloc_noio_restore(noio_flag);
- }
+ noio_flag = memalloc_noio_save();
+ dmi = kvmalloc(param_kernel->data_size, GFP_KERNEL);
+ memalloc_noio_restore(noio_flag);
if (!dmi) {
if (secure_data && clear_user(user, param_kernel->data_size))
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index a5120961632a..7d42a9d9f406 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -9,6 +9,7 @@
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/bio.h>
+#include <linux/dax.h>
#include <linux/slab.h>
#include <linux/device-mapper.h>
@@ -142,22 +143,20 @@ static int linear_iterate_devices(struct dm_target *ti,
return fn(ti, lc->dev, lc->start, ti->len, data);
}
-static long linear_direct_access(struct dm_target *ti, sector_t sector,
- void **kaddr, pfn_t *pfn, long size)
+static long linear_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
+ long nr_pages, void **kaddr, pfn_t *pfn)
{
+ long ret;
struct linear_c *lc = ti->private;
struct block_device *bdev = lc->dev->bdev;
- struct blk_dax_ctl dax = {
- .sector = linear_map_sector(ti, sector),
- .size = size,
- };
- long ret;
-
- ret = bdev_direct_access(bdev, &dax);
- *kaddr = dax.addr;
- *pfn = dax.pfn;
-
- return ret;
+ struct dax_device *dax_dev = lc->dev->dax_dev;
+ sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
+
+ dev_sector = linear_map_sector(ti, sector);
+ ret = bdev_dax_pgoff(bdev, dev_sector, nr_pages * PAGE_SIZE, &pgoff);
+ if (ret)
+ return ret;
+ return dax_direct_access(dax_dev, pgoff, nr_pages, kaddr, pfn);
}
static struct target_type linear_target = {
@@ -171,7 +170,7 @@ static struct target_type linear_target = {
.status = linear_status,
.prepare_ioctl = linear_prepare_ioctl,
.iterate_devices = linear_iterate_devices,
- .direct_access = linear_direct_access,
+ .direct_access = linear_dax_direct_access,
};
int __init dm_linear_init(void)
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index a48130b90157..2af27026aa2e 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -720,11 +720,10 @@ int dm_old_init_request_queue(struct mapped_device *md, struct dm_table *t)
return 0;
}
-static int dm_mq_init_request(void *data, struct request *rq,
- unsigned int hctx_idx, unsigned int request_idx,
- unsigned int numa_node)
+static int dm_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
+ unsigned int hctx_idx, unsigned int numa_node)
{
- return __dm_rq_init_rq(data, rq);
+ return __dm_rq_init_rq(set->driver_data, rq);
}
static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index c65feeada864..e152d9817c81 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -2302,8 +2302,8 @@ static int origin_map(struct dm_target *ti, struct bio *bio)
return do_origin(o->dev, bio);
}
-static long origin_direct_access(struct dm_target *ti, sector_t sector,
- void **kaddr, pfn_t *pfn, long size)
+static long origin_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
+ long nr_pages, void **kaddr, pfn_t *pfn)
{
DMWARN("device does not support dax.");
return -EIO;
@@ -2368,7 +2368,7 @@ static struct target_type origin_target = {
.postsuspend = origin_postsuspend,
.status = origin_status,
.iterate_devices = origin_iterate_devices,
- .direct_access = origin_direct_access,
+ .direct_access = origin_dax_direct_access,
};
static struct target_type snapshot_target = {
diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
index 0250e7e521ab..6028d8247f58 100644
--- a/drivers/md/dm-stats.c
+++ b/drivers/md/dm-stats.c
@@ -146,12 +146,7 @@ static void *dm_kvzalloc(size_t alloc_size, int node)
if (!claim_shared_memory(alloc_size))
return NULL;
- if (alloc_size <= KMALLOC_MAX_SIZE) {
- p = kzalloc_node(alloc_size, GFP_KERNEL | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN, node);
- if (p)
- return p;
- }
- p = vzalloc_node(alloc_size, node);
+ p = kvzalloc_node(alloc_size, GFP_KERNEL | __GFP_NOMEMALLOC, node);
if (p)
return p;
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index 4b50ae115c6d..75152482f3ad 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -11,6 +11,7 @@
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/bio.h>
+#include <linux/dax.h>
#include <linux/slab.h>
#include <linux/log2.h>
@@ -310,27 +311,25 @@ static int stripe_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_REMAPPED;
}
-static long stripe_direct_access(struct dm_target *ti, sector_t sector,
- void **kaddr, pfn_t *pfn, long size)
+static long stripe_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
+ long nr_pages, void **kaddr, pfn_t *pfn)
{
+ sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
struct stripe_c *sc = ti->private;
- uint32_t stripe;
+ struct dax_device *dax_dev;
struct block_device *bdev;
- struct blk_dax_ctl dax = {
- .size = size,
- };
+ uint32_t stripe;
long ret;
- stripe_map_sector(sc, sector, &stripe, &dax.sector);
-
- dax.sector += sc->stripe[stripe].physical_start;
+ stripe_map_sector(sc, sector, &stripe, &dev_sector);
+ dev_sector += sc->stripe[stripe].physical_start;
+ dax_dev = sc->stripe[stripe].dev->dax_dev;
bdev = sc->stripe[stripe].dev->bdev;
- ret = bdev_direct_access(bdev, &dax);
- *kaddr = dax.addr;
- *pfn = dax.pfn;
-
- return ret;
+ ret = bdev_dax_pgoff(bdev, dev_sector, nr_pages * PAGE_SIZE, &pgoff);
+ if (ret)
+ return ret;
+ return dax_direct_access(dax_dev, pgoff, nr_pages, kaddr, pfn);
}
/*
@@ -451,7 +450,7 @@ static struct target_type stripe_target = {
.status = stripe_status,
.iterate_devices = stripe_iterate_devices,
.io_hints = stripe_io_hints,
- .direct_access = stripe_direct_access,
+ .direct_access = stripe_dax_direct_access,
};
int __init dm_stripe_init(void)
diff --git a/drivers/md/dm-target.c b/drivers/md/dm-target.c
index 6264ff00dcf0..b242b750542f 100644
--- a/drivers/md/dm-target.c
+++ b/drivers/md/dm-target.c
@@ -142,8 +142,8 @@ static void io_err_release_clone_rq(struct request *clone)
{
}
-static long io_err_direct_access(struct dm_target *ti, sector_t sector,
- void **kaddr, pfn_t *pfn, long size)
+static long io_err_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
+ long nr_pages, void **kaddr, pfn_t *pfn)
{
return -EIO;
}
@@ -157,7 +157,7 @@ static struct target_type error_target = {
.map = io_err_map,
.clone_and_map_rq = io_err_clone_and_map_rq,
.release_clone_rq = io_err_release_clone_rq,
- .direct_access = io_err_direct_access,
+ .direct_access = io_err_dax_direct_access,
};
int __init dm_target_init(void)
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 268edf402bbb..6ef9500226c0 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -16,6 +16,7 @@
#include <linux/blkpg.h>
#include <linux/bio.h>
#include <linux/mempool.h>
+#include <linux/dax.h>
#include <linux/slab.h>
#include <linux/idr.h>
#include <linux/hdreg.h>
@@ -629,6 +630,7 @@ static int open_table_device(struct table_device *td, dev_t dev,
}
td->dm_dev.bdev = bdev;
+ td->dm_dev.dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
return 0;
}
@@ -642,7 +644,9 @@ static void close_table_device(struct table_device *td, struct mapped_device *md
bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md));
blkdev_put(td->dm_dev.bdev, td->dm_dev.mode | FMODE_EXCL);
+ put_dax(td->dm_dev.dax_dev);
td->dm_dev.bdev = NULL;
+ td->dm_dev.dax_dev = NULL;
}
static struct table_device *find_table_device(struct list_head *l, dev_t dev,
@@ -920,31 +924,49 @@ int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
}
EXPORT_SYMBOL_GPL(dm_set_target_max_io_len);
-static long dm_blk_direct_access(struct block_device *bdev, sector_t sector,
- void **kaddr, pfn_t *pfn, long size)
+static struct dm_target *dm_dax_get_live_target(struct mapped_device *md,
+ sector_t sector, int *srcu_idx)
{
- struct mapped_device *md = bdev->bd_disk->private_data;
struct dm_table *map;
struct dm_target *ti;
- int srcu_idx;
- long len, ret = -EIO;
- map = dm_get_live_table(md, &srcu_idx);
+ map = dm_get_live_table(md, srcu_idx);
if (!map)
- goto out;
+ return NULL;
ti = dm_table_find_target(map, sector);
if (!dm_target_is_valid(ti))
- goto out;
+ return NULL;
+
+ return ti;
+}
+
+static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
+ long nr_pages, void **kaddr, pfn_t *pfn)
+{
+ struct mapped_device *md = dax_get_private(dax_dev);
+ sector_t sector = pgoff * PAGE_SECTORS;
+ struct dm_target *ti;
+ long len, ret = -EIO;
+ int srcu_idx;
- len = max_io_len(sector, ti) << SECTOR_SHIFT;
- size = min(len, size);
+ ti = dm_dax_get_live_target(md, sector, &srcu_idx);
+ if (!ti)
+ goto out;
+ if (!ti->type->direct_access)
+ goto out;
+ len = max_io_len(sector, ti) / PAGE_SECTORS;
+ if (len < 1)
+ goto out;
+ nr_pages = min(len, nr_pages);
if (ti->type->direct_access)
- ret = ti->type->direct_access(ti, sector, kaddr, pfn, size);
-out:
+ ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn);
+
+ out:
dm_put_live_table(md, srcu_idx);
- return min(ret, size);
+
+ return ret;
}
/*
@@ -1471,6 +1493,7 @@ static int next_free_minor(int *minor)
}
static const struct block_device_operations dm_blk_dops;
+static const struct dax_operations dm_dax_ops;
static void dm_wq_work(struct work_struct *work);
@@ -1517,6 +1540,12 @@ static void cleanup_mapped_device(struct mapped_device *md)
if (md->bs)
bioset_free(md->bs);
+ if (md->dax_dev) {
+ kill_dax(md->dax_dev);
+ put_dax(md->dax_dev);
+ md->dax_dev = NULL;
+ }
+
if (md->disk) {
spin_lock(&_minor_lock);
md->disk->private_data = NULL;
@@ -1544,6 +1573,7 @@ static void cleanup_mapped_device(struct mapped_device *md)
static struct mapped_device *alloc_dev(int minor)
{
int r, numa_node_id = dm_get_numa_node();
+ struct dax_device *dax_dev;
struct mapped_device *md;
void *old_md;
@@ -1608,6 +1638,12 @@ static struct mapped_device *alloc_dev(int minor)
md->disk->queue = md->queue;
md->disk->private_data = md;
sprintf(md->disk->disk_name, "dm-%d", minor);
+
+ dax_dev = alloc_dax(md, md->disk->disk_name, &dm_dax_ops);
+ if (!dax_dev)
+ goto bad;
+ md->dax_dev = dax_dev;
+
add_disk(md->disk);
format_dev_t(md->name, MKDEV(_major, minor));
@@ -2816,12 +2852,15 @@ static const struct block_device_operations dm_blk_dops = {
.open = dm_blk_open,
.release = dm_blk_close,
.ioctl = dm_blk_ioctl,
- .direct_access = dm_blk_direct_access,
.getgeo = dm_blk_getgeo,
.pr_ops = &dm_pr_ops,
.owner = THIS_MODULE
};
+static const struct dax_operations dm_dax_ops = {
+ .direct_access = dm_dax_direct_access,
+};
+
/*
* module hooks
*/
OpenPOWER on IntegriCloud