1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
|
/*
* Copyright (C) 2003 Sistina Software
*
* This file is released under the GPL.
*/
#ifndef _DM_IO_H
#define _DM_IO_H
#include "dm.h"
struct io_region {
struct block_device *bdev;
sector_t sector;
sector_t count;
};
struct page_list {
struct page_list *next;
struct page *page;
};
/*
* 'error' is a bitset, with each bit indicating whether an error
* occurred doing io to the corresponding region.
*/
typedef void (*io_notify_fn)(unsigned long error, void *context);
enum dm_io_mem_type {
DM_IO_PAGE_LIST,/* Page list */
DM_IO_BVEC, /* Bio vector */
DM_IO_VMA, /* Virtual memory area */
DM_IO_KMEM, /* Kernel memory */
};
struct dm_io_memory {
enum dm_io_mem_type type;
union {
struct page_list *pl;
struct bio_vec *bvec;
void *vma;
void *addr;
} ptr;
unsigned offset;
};
struct dm_io_notify {
io_notify_fn fn; /* Callback for asynchronous requests */
void *context; /* Passed to callback */
};
/*
* IO request structure
*/
struct dm_io_client;
struct dm_io_request {
int bi_rw; /* READ|WRITE - not READA */
struct dm_io_memory mem; /* Memory to use for io */
struct dm_io_notify notify; /* Synchronous if notify.fn is NULL */
struct dm_io_client *client; /* Client memory handler */
};
/*
* Before anyone uses the IO interface they should call
* dm_io_get(), specifying roughly how many pages they are
* expecting to perform io on concurrently.
*
* This function may block.
*/
int dm_io_get(unsigned int num_pages);
void dm_io_put(unsigned int num_pages);
/*
* For async io calls, users can alternatively use the dm_io() function below
* and dm_io_client_create() to create private mempools for the client.
*
* Create/destroy may block.
*/
struct dm_io_client *dm_io_client_create(unsigned num_pages);
int dm_io_client_resize(unsigned num_pages, struct dm_io_client *client);
void dm_io_client_destroy(struct dm_io_client *client);
/*
* Synchronous IO.
*
* Please ensure that the rw flag in the next two functions is
* either READ or WRITE, ie. we don't take READA. Any
* regions with a zero count field will be ignored.
*/
int dm_io_sync(unsigned int num_regions, struct io_region *where, int rw,
struct page_list *pl, unsigned int offset,
unsigned long *error_bits);
int dm_io_sync_bvec(unsigned int num_regions, struct io_region *where, int rw,
struct bio_vec *bvec, unsigned long *error_bits);
int dm_io_sync_vm(unsigned int num_regions, struct io_region *where, int rw,
void *data, unsigned long *error_bits);
/*
* Aynchronous IO.
*
* The 'where' array may be safely allocated on the stack since
* the function takes a copy.
*/
int dm_io_async(unsigned int num_regions, struct io_region *where, int rw,
struct page_list *pl, unsigned int offset,
io_notify_fn fn, void *context);
int dm_io_async_bvec(unsigned int num_regions, struct io_region *where, int rw,
struct bio_vec *bvec, io_notify_fn fn, void *context);
int dm_io_async_vm(unsigned int num_regions, struct io_region *where, int rw,
void *data, io_notify_fn fn, void *context);
/*
* IO interface using private per-client pools.
*/
int dm_io(struct dm_io_request *io_req, unsigned num_regions,
struct io_region *region, unsigned long *sync_error_bits);
#endif
|