summaryrefslogtreecommitdiffstats
path: root/drivers/staging/zcache
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>2012-02-10 10:58:25 -0800
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2012-02-10 10:58:25 -0800
commitb91867f2ee5c84b550f95ce54c91b180f70f48cb (patch)
treebd7a5be4484d539af13e6b345320455e989612cf /drivers/staging/zcache
parentb05ee6bf9e6c7acc38dca1466b63bb24ae5df6f3 (diff)
parent9196dc1129fbb3ecf93027224a6bdbc86d086e3a (diff)
downloadtalos-obmc-linux-b91867f2ee5c84b550f95ce54c91b180f70f48cb.tar.gz
talos-obmc-linux-b91867f2ee5c84b550f95ce54c91b180f70f48cb.zip
Merge tag 'staging-3.3-rc3' into staging-next
This was done to resolve some merge issues with the following files that had changed in both branches: drivers/staging/rtl8712/rtl871x_sta_mgt.c drivers/staging/tidspbridge/rmgr/drv_interface.c drivers/staging/zcache/zcache-main.c Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/staging/zcache')
-rw-r--r--drivers/staging/zcache/Kconfig9
-rw-r--r--drivers/staging/zcache/zcache-main.c231
2 files changed, 169 insertions, 71 deletions
diff --git a/drivers/staging/zcache/Kconfig b/drivers/staging/zcache/Kconfig
index 7fabcb2bc80d..94e48aa9f36b 100644
--- a/drivers/staging/zcache/Kconfig
+++ b/drivers/staging/zcache/Kconfig
@@ -1,13 +1,12 @@
config ZCACHE
tristate "Dynamic compression of swap pages and clean pagecache pages"
- depends on CLEANCACHE || FRONTSWAP
- select XVMALLOC
- select LZO_COMPRESS
- select LZO_DECOMPRESS
+ depends on (CLEANCACHE || FRONTSWAP) && CRYPTO
+ select ZSMALLOC
+ select CRYPTO_LZO
default n
help
Zcache doubles RAM efficiency while providing a significant
- performance boosts on many workloads. Zcache uses lzo1x
+ performance boosts on many workloads. Zcache uses
compression and an in-kernel implementation of transcendent
memory to store clean page cache pages and swap in RAM,
providing a noticeable reduction in disk I/O.
diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c
index ef7c52bb1df9..d7020b774039 100644
--- a/drivers/staging/zcache/zcache-main.c
+++ b/drivers/staging/zcache/zcache-main.c
@@ -6,9 +6,10 @@
*
* Zcache provides an in-kernel "host implementation" for transcendent memory
* and, thus indirectly, for cleancache and frontswap. Zcache includes two
- * page-accessible memory [1] interfaces, both utilizing lzo1x compression:
+ * page-accessible memory [1] interfaces, both utilizing the crypto compression
+ * API:
* 1) "compression buddies" ("zbud") is used for ephemeral pages
- * 2) xvmalloc is used for persistent pages.
+ * 2) zsmalloc is used for persistent pages.
* Xvmalloc (based on the TLSF allocator) has very low fragmentation
* so maximizes space efficiency, while zbud allows pairs (and potentially,
* in the future, more than a pair of) compressed pages to be closely linked
@@ -23,15 +24,16 @@
#include <linux/cpu.h>
#include <linux/highmem.h>
#include <linux/list.h>
-#include <linux/lzo.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/atomic.h>
#include <linux/math64.h>
+#include <linux/crypto.h>
+#include <linux/string.h>
#include "tmem.h"
-#include "../zram/xvmalloc.h" /* if built in drivers/staging */
+#include "../zsmalloc/zsmalloc.h"
#if (!defined(CONFIG_CLEANCACHE) && !defined(CONFIG_FRONTSWAP))
#error "zcache is useless without CONFIG_CLEANCACHE or CONFIG_FRONTSWAP"
@@ -60,7 +62,7 @@ MODULE_LICENSE("GPL");
struct zcache_client {
struct tmem_pool *tmem_pools[MAX_POOLS_PER_CLIENT];
- struct xv_pool *xvpool;
+ struct zs_pool *zspool;
bool allocated;
atomic_t refcount;
};
@@ -81,6 +83,38 @@ static inline bool is_local_client(struct zcache_client *cli)
return cli == &zcache_host;
}
+/* crypto API for zcache */
+#define ZCACHE_COMP_NAME_SZ CRYPTO_MAX_ALG_NAME
+static char zcache_comp_name[ZCACHE_COMP_NAME_SZ];
+static struct crypto_comp * __percpu *zcache_comp_pcpu_tfms;
+
+enum comp_op {
+ ZCACHE_COMPOP_COMPRESS,
+ ZCACHE_COMPOP_DECOMPRESS
+};
+
+static inline int zcache_comp_op(enum comp_op op,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen)
+{
+ struct crypto_comp *tfm;
+ int ret;
+
+ BUG_ON(!zcache_comp_pcpu_tfms);
+ tfm = *per_cpu_ptr(zcache_comp_pcpu_tfms, get_cpu());
+ BUG_ON(!tfm);
+ switch (op) {
+ case ZCACHE_COMPOP_COMPRESS:
+ ret = crypto_comp_compress(tfm, src, slen, dst, dlen);
+ break;
+ case ZCACHE_COMPOP_DECOMPRESS:
+ ret = crypto_comp_decompress(tfm, src, slen, dst, dlen);
+ break;
+ }
+ put_cpu();
+ return ret;
+}
+
/**********
* Compression buddies ("zbud") provides for packing two (or, possibly
* in the future, more) compressed ephemeral pages into a single "raw"
@@ -407,7 +441,7 @@ static int zbud_decompress(struct page *page, struct zbud_hdr *zh)
{
struct zbud_page *zbpg;
unsigned budnum = zbud_budnum(zh);
- size_t out_len = PAGE_SIZE;
+ unsigned int out_len = PAGE_SIZE;
char *to_va, *from_va;
unsigned size;
int ret = 0;
@@ -424,8 +458,9 @@ static int zbud_decompress(struct page *page, struct zbud_hdr *zh)
to_va = kmap_atomic(page, KM_USER0);
size = zh->size;
from_va = zbud_data(zh, size);
- ret = lzo1x_decompress_safe(from_va, size, to_va, &out_len);
- BUG_ON(ret != LZO_E_OK);
+ ret = zcache_comp_op(ZCACHE_COMPOP_DECOMPRESS, from_va, size,
+ to_va, &out_len);
+ BUG_ON(ret);
BUG_ON(out_len != PAGE_SIZE);
kunmap_atomic(to_va, KM_USER0);
out:
@@ -622,8 +657,8 @@ static int zbud_show_cumul_chunk_counts(char *buf)
#endif
/**********
- * This "zv" PAM implementation combines the TLSF-based xvMalloc
- * with lzo1x compression to maximize the amount of data that can
+ * This "zv" PAM implementation combines the slab-based zsmalloc
+ * with the crypto compression API to maximize the amount of data that can
* be packed into a physical page.
*
* Zv represents a PAM page with the index and object (plus a "size" value
@@ -636,6 +671,7 @@ struct zv_hdr {
uint32_t pool_id;
struct tmem_oid oid;
uint32_t index;
+ size_t size;
DECL_SENTINEL
};
@@ -657,72 +693,74 @@ static unsigned int zv_max_mean_zsize = (PAGE_SIZE / 8) * 5;
static atomic_t zv_curr_dist_counts[NCHUNKS];
static atomic_t zv_cumul_dist_counts[NCHUNKS];
-static struct zv_hdr *zv_create(struct xv_pool *xvpool, uint32_t pool_id,
+static struct zv_hdr *zv_create(struct zs_pool *pool, uint32_t pool_id,
struct tmem_oid *oid, uint32_t index,
void *cdata, unsigned clen)
{
- struct page *page;
- struct zv_hdr *zv = NULL;
- uint32_t offset;
- int alloc_size = clen + sizeof(struct zv_hdr);
- int chunks = (alloc_size + (CHUNK_SIZE - 1)) >> CHUNK_SHIFT;
- int ret;
+ struct zv_hdr *zv;
+ u32 size = clen + sizeof(struct zv_hdr);
+ int chunks = (size + (CHUNK_SIZE - 1)) >> CHUNK_SHIFT;
+ void *handle = NULL;
+ char *buf;
BUG_ON(!irqs_disabled());
BUG_ON(chunks >= NCHUNKS);
- ret = xv_malloc(xvpool, alloc_size,
- &page, &offset, ZCACHE_GFP_MASK);
- if (unlikely(ret))
+ handle = zs_malloc(pool, size);
+ if (!handle)
goto out;
atomic_inc(&zv_curr_dist_counts[chunks]);
atomic_inc(&zv_cumul_dist_counts[chunks]);
- zv = kmap_atomic(page, KM_USER0) + offset;
+ zv = (struct zv_hdr *)((char *)cdata - sizeof(*zv));
zv->index = index;
zv->oid = *oid;
zv->pool_id = pool_id;
+ zv->size = clen;
SET_SENTINEL(zv, ZVH);
- memcpy((char *)zv + sizeof(struct zv_hdr), cdata, clen);
- kunmap_atomic(zv, KM_USER0);
+ buf = zs_map_object(pool, handle);
+ memcpy(buf, zv, clen + sizeof(*zv));
+ zs_unmap_object(pool, handle);
out:
- return zv;
+ return handle;
}
-static void zv_free(struct xv_pool *xvpool, struct zv_hdr *zv)
+static void zv_free(struct zs_pool *pool, void *handle)
{
unsigned long flags;
- struct page *page;
- uint32_t offset;
- uint16_t size = xv_get_object_size(zv);
- int chunks = (size + (CHUNK_SIZE - 1)) >> CHUNK_SHIFT;
+ struct zv_hdr *zv;
+ uint16_t size;
+ int chunks;
+ zv = zs_map_object(pool, handle);
ASSERT_SENTINEL(zv, ZVH);
+ size = zv->size + sizeof(struct zv_hdr);
+ INVERT_SENTINEL(zv, ZVH);
+ zs_unmap_object(pool, handle);
+
+ chunks = (size + (CHUNK_SIZE - 1)) >> CHUNK_SHIFT;
BUG_ON(chunks >= NCHUNKS);
atomic_dec(&zv_curr_dist_counts[chunks]);
- size -= sizeof(*zv);
- BUG_ON(size == 0);
- INVERT_SENTINEL(zv, ZVH);
- page = virt_to_page(zv);
- offset = (unsigned long)zv & ~PAGE_MASK;
+
local_irq_save(flags);
- xv_free(xvpool, page, offset);
+ zs_free(pool, handle);
local_irq_restore(flags);
}
-static void zv_decompress(struct page *page, struct zv_hdr *zv)
+static void zv_decompress(struct page *page, void *handle)
{
- size_t clen = PAGE_SIZE;
+ unsigned int clen = PAGE_SIZE;
char *to_va;
- unsigned size;
int ret;
+ struct zv_hdr *zv;
+ zv = zs_map_object(zcache_host.zspool, handle);
+ BUG_ON(zv->size == 0);
ASSERT_SENTINEL(zv, ZVH);
- size = xv_get_object_size(zv) - sizeof(*zv);
- BUG_ON(size == 0);
to_va = kmap_atomic(page, KM_USER0);
- ret = lzo1x_decompress_safe((char *)zv + sizeof(*zv),
- size, to_va, &clen);
+ ret = zcache_comp_op(ZCACHE_COMPOP_DECOMPRESS, (char *)zv + sizeof(*zv),
+ zv->size, to_va, &clen);
kunmap_atomic(to_va, KM_USER0);
- BUG_ON(ret != LZO_E_OK);
+ zs_unmap_object(zcache_host.zspool, handle);
+ BUG_ON(ret);
BUG_ON(clen != PAGE_SIZE);
}
@@ -948,8 +986,8 @@ int zcache_new_client(uint16_t cli_id)
goto out;
cli->allocated = 1;
#ifdef CONFIG_FRONTSWAP
- cli->xvpool = xv_create_pool();
- if (cli->xvpool == NULL)
+ cli->zspool = zs_create_pool("zcache", ZCACHE_GFP_MASK);
+ if (cli->zspool == NULL)
goto out;
#endif
ret = 0;
@@ -1180,7 +1218,7 @@ static void *zcache_pampd_create(char *data, size_t size, bool raw, int eph,
}
/* reject if mean compression is too poor */
if ((clen > zv_max_mean_zsize) && (curr_pers_pampd_count > 0)) {
- total_zsize = xv_get_total_size_bytes(cli->xvpool);
+ total_zsize = zs_get_total_size_bytes(cli->zspool);
zv_mean_zsize = div_u64(total_zsize,
curr_pers_pampd_count);
if (zv_mean_zsize > zv_max_mean_zsize) {
@@ -1188,7 +1226,7 @@ static void *zcache_pampd_create(char *data, size_t size, bool raw, int eph,
goto out;
}
}
- pampd = (void *)zv_create(cli->xvpool, pool->pool_id,
+ pampd = (void *)zv_create(cli->zspool, pool->pool_id,
oid, index, cdata, clen);
if (pampd == NULL)
goto out;
@@ -1246,7 +1284,7 @@ static void zcache_pampd_free(void *pampd, struct tmem_pool *pool,
atomic_dec(&zcache_curr_eph_pampd_count);
BUG_ON(atomic_read(&zcache_curr_eph_pampd_count) < 0);
} else {
- zv_free(cli->xvpool, (struct zv_hdr *)pampd);
+ zv_free(cli->zspool, pampd);
atomic_dec(&zcache_curr_pers_pampd_count);
BUG_ON(atomic_read(&zcache_curr_pers_pampd_count) < 0);
}
@@ -1285,25 +1323,24 @@ static struct tmem_pamops zcache_pamops = {
* zcache compression/decompression and related per-cpu stuff
*/
-#define LZO_WORKMEM_BYTES LZO1X_1_MEM_COMPRESS
-#define LZO_DSTMEM_PAGE_ORDER 1
-static DEFINE_PER_CPU(unsigned char *, zcache_workmem);
static DEFINE_PER_CPU(unsigned char *, zcache_dstmem);
+#define ZCACHE_DSTMEM_ORDER 1
static int zcache_compress(struct page *from, void **out_va, size_t *out_len)
{
int ret = 0;
unsigned char *dmem = __get_cpu_var(zcache_dstmem);
- unsigned char *wmem = __get_cpu_var(zcache_workmem);
char *from_va;
BUG_ON(!irqs_disabled());
- if (unlikely(dmem == NULL || wmem == NULL))
- goto out; /* no buffer, so can't compress */
+ if (unlikely(dmem == NULL))
+ goto out; /* no buffer or no compressor so can't compress */
+ *out_len = PAGE_SIZE << ZCACHE_DSTMEM_ORDER;
from_va = kmap_atomic(from, KM_USER0);
mb();
- ret = lzo1x_1_compress(from_va, PAGE_SIZE, dmem, out_len, wmem);
- BUG_ON(ret != LZO_E_OK);
+ ret = zcache_comp_op(ZCACHE_COMPOP_COMPRESS, from_va, PAGE_SIZE, dmem,
+ (unsigned int *)out_len);
+ BUG_ON(ret);
*out_va = dmem;
kunmap_atomic(from_va, KM_USER0);
ret = 1;
@@ -1311,29 +1348,48 @@ out:
return ret;
}
+static int zcache_comp_cpu_up(int cpu)
+{
+ struct crypto_comp *tfm;
+
+ tfm = crypto_alloc_comp(zcache_comp_name, 0, 0);
+ if (IS_ERR(tfm))
+ return NOTIFY_BAD;
+ *per_cpu_ptr(zcache_comp_pcpu_tfms, cpu) = tfm;
+ return NOTIFY_OK;
+}
+
+static void zcache_comp_cpu_down(int cpu)
+{
+ struct crypto_comp *tfm;
+
+ tfm = *per_cpu_ptr(zcache_comp_pcpu_tfms, cpu);
+ crypto_free_comp(tfm);
+ *per_cpu_ptr(zcache_comp_pcpu_tfms, cpu) = NULL;
+}
static int zcache_cpu_notifier(struct notifier_block *nb,
unsigned long action, void *pcpu)
{
- int cpu = (long)pcpu;
+ int ret, cpu = (long)pcpu;
struct zcache_preload *kp;
switch (action) {
case CPU_UP_PREPARE:
+ ret = zcache_comp_cpu_up(cpu);
+ if (ret != NOTIFY_OK) {
+ pr_err("zcache: can't allocate compressor transform\n");
+ return ret;
+ }
per_cpu(zcache_dstmem, cpu) = (void *)__get_free_pages(
- GFP_KERNEL | __GFP_REPEAT,
- LZO_DSTMEM_PAGE_ORDER),
- per_cpu(zcache_workmem, cpu) =
- kzalloc(LZO1X_MEM_COMPRESS,
- GFP_KERNEL | __GFP_REPEAT);
+ GFP_KERNEL | __GFP_REPEAT, ZCACHE_DSTMEM_ORDER);
break;
case CPU_DEAD:
case CPU_UP_CANCELED:
+ zcache_comp_cpu_down(cpu);
free_pages((unsigned long)per_cpu(zcache_dstmem, cpu),
- LZO_DSTMEM_PAGE_ORDER);
+ ZCACHE_DSTMEM_ORDER);
per_cpu(zcache_dstmem, cpu) = NULL;
- kfree(per_cpu(zcache_workmem, cpu));
- per_cpu(zcache_workmem, cpu) = NULL;
kp = &per_cpu(zcache_preloads, cpu);
while (kp->nr) {
kmem_cache_free(zcache_objnode_cache,
@@ -1918,6 +1974,44 @@ static int __init no_frontswap(char *s)
__setup("nofrontswap", no_frontswap);
+static int __init enable_zcache_compressor(char *s)
+{
+ strncpy(zcache_comp_name, s, ZCACHE_COMP_NAME_SZ);
+ zcache_enabled = 1;
+ return 1;
+}
+__setup("zcache=", enable_zcache_compressor);
+
+
+static int zcache_comp_init(void)
+{
+ int ret = 0;
+
+ /* check crypto algorithm */
+ if (*zcache_comp_name != '\0') {
+ ret = crypto_has_comp(zcache_comp_name, 0, 0);
+ if (!ret)
+ pr_info("zcache: %s not supported\n",
+ zcache_comp_name);
+ }
+ if (!ret)
+ strcpy(zcache_comp_name, "lzo");
+ ret = crypto_has_comp(zcache_comp_name, 0, 0);
+ if (!ret) {
+ ret = 1;
+ goto out;
+ }
+ pr_info("zcache: using %s compressor\n", zcache_comp_name);
+
+ /* alloc percpu transforms */
+ ret = 0;
+ zcache_comp_pcpu_tfms = alloc_percpu(struct crypto_comp *);
+ if (!zcache_comp_pcpu_tfms)
+ ret = 1;
+out:
+ return ret;
+}
+
static int __init zcache_init(void)
{
int ret = 0;
@@ -1940,6 +2034,11 @@ static int __init zcache_init(void)
pr_err("zcache: can't register cpu notifier\n");
goto out;
}
+ ret = zcache_comp_init();
+ if (ret) {
+ pr_err("zcache: compressor initialization failed\n");
+ goto out;
+ }
for_each_online_cpu(cpu) {
void *pcpu = (void *)(long)cpu;
zcache_cpu_notifier(&zcache_cpu_notifier_block,
@@ -1975,7 +2074,7 @@ static int __init zcache_init(void)
old_ops = zcache_frontswap_register_ops();
pr_info("zcache: frontswap enabled using kernel "
- "transcendent memory and xvmalloc\n");
+ "transcendent memory and zsmalloc\n");
if (old_ops.init != NULL)
pr_warning("zcache: frontswap_ops overridden");
}
OpenPOWER on IntegriCloud