summaryrefslogtreecommitdiffstats
path: root/drivers/s390/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390/crypto')
-rw-r--r--drivers/s390/crypto/Makefile1
-rw-r--r--drivers/s390/crypto/ap_bus.c14
-rw-r--r--drivers/s390/crypto/ap_bus.h9
-rw-r--r--drivers/s390/crypto/ap_card.c8
-rw-r--r--drivers/s390/crypto/ap_queue.c11
-rw-r--r--drivers/s390/crypto/pkey_api.c2065
-rw-r--r--drivers/s390/crypto/vfio_ap_drv.c2
-rw-r--r--drivers/s390/crypto/vfio_ap_ops.c2
-rw-r--r--drivers/s390/crypto/zcrypt_api.c98
-rw-r--r--drivers/s390/crypto/zcrypt_api.h11
-rw-r--r--drivers/s390/crypto/zcrypt_ccamisc.c1765
-rw-r--r--drivers/s390/crypto/zcrypt_ccamisc.h218
-rw-r--r--drivers/s390/crypto/zcrypt_cex2a.c1
-rw-r--r--drivers/s390/crypto/zcrypt_cex2c.c2
-rw-r--r--drivers/s390/crypto/zcrypt_cex4.c412
-rw-r--r--drivers/s390/crypto/zcrypt_ep11misc.c1293
-rw-r--r--drivers/s390/crypto/zcrypt_ep11misc.h124
-rw-r--r--drivers/s390/crypto/zcrypt_error.h2
18 files changed, 4996 insertions, 1042 deletions
diff --git a/drivers/s390/crypto/Makefile b/drivers/s390/crypto/Makefile
index 6ccd93d0b1cb..22d2db690cd3 100644
--- a/drivers/s390/crypto/Makefile
+++ b/drivers/s390/crypto/Makefile
@@ -8,6 +8,7 @@ obj-$(subst m,y,$(CONFIG_ZCRYPT)) += ap.o
# zcrypt_api.o and zcrypt_msgtype*.o depend on ap.o
zcrypt-objs := zcrypt_api.o zcrypt_card.o zcrypt_queue.o
zcrypt-objs += zcrypt_msgtype6.o zcrypt_msgtype50.o
+zcrypt-objs += zcrypt_ccamisc.o zcrypt_ep11misc.o
obj-$(CONFIG_ZCRYPT) += zcrypt.o
# adapter drivers depend on ap.o and zcrypt.o
obj-$(CONFIG_ZCRYPT) += zcrypt_cex2c.o zcrypt_cex2a.o zcrypt_cex4.o
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index a76b8a8bcbbb..5256e3ce84e5 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -793,8 +793,6 @@ static int ap_device_probe(struct device *dev)
drvres = ap_drv->flags & AP_DRIVER_FLAG_DEFAULT;
if (!!devres != !!drvres)
return -ENODEV;
- /* (re-)init queue's state machine */
- ap_queue_reinit_state(to_ap_queue(dev));
}
/* Add queue/card to list of active queues/cards */
@@ -1322,24 +1320,24 @@ static int ap_get_compatible_type(ap_qid_t qid, int rawtype, unsigned int func)
/* < CEX2A is not supported */
if (rawtype < AP_DEVICE_TYPE_CEX2A)
return 0;
- /* up to CEX6 known and fully supported */
- if (rawtype <= AP_DEVICE_TYPE_CEX6)
+ /* up to CEX7 known and fully supported */
+ if (rawtype <= AP_DEVICE_TYPE_CEX7)
return rawtype;
/*
- * unknown new type > CEX6, check for compatibility
+ * unknown new type > CEX7, check for compatibility
* to the highest known and supported type which is
- * currently CEX6 with the help of the QACT function.
+ * currently CEX7 with the help of the QACT function.
*/
if (ap_qact_available()) {
struct ap_queue_status status;
union ap_qact_ap_info apinfo = {0};
apinfo.mode = (func >> 26) & 0x07;
- apinfo.cat = AP_DEVICE_TYPE_CEX6;
+ apinfo.cat = AP_DEVICE_TYPE_CEX7;
status = ap_qact(qid, 0, &apinfo);
if (status.response_code == AP_RESPONSE_NORMAL
&& apinfo.cat >= AP_DEVICE_TYPE_CEX2A
- && apinfo.cat <= AP_DEVICE_TYPE_CEX6)
+ && apinfo.cat <= AP_DEVICE_TYPE_CEX7)
comp_type = apinfo.cat;
}
if (!comp_type)
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index 6f3cf37776ca..4348fdff1c61 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/*
- * Copyright IBM Corp. 2006, 2012
+ * Copyright IBM Corp. 2006, 2019
* Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
* Ralph Wuerthner <rwuerthn@de.ibm.com>
@@ -63,6 +63,7 @@ static inline int ap_test_bit(unsigned int *ptr, unsigned int nr)
#define AP_DEVICE_TYPE_CEX4 10
#define AP_DEVICE_TYPE_CEX5 11
#define AP_DEVICE_TYPE_CEX6 12
+#define AP_DEVICE_TYPE_CEX7 13
/*
* Known function facilities
@@ -161,7 +162,7 @@ struct ap_card {
unsigned int functions; /* AP device function bitfield. */
int queue_depth; /* AP queue depth.*/
int id; /* AP card number. */
- atomic_t total_request_count; /* # requests ever for this AP device.*/
+ atomic64_t total_request_count; /* # requests ever for this AP device.*/
};
#define to_ap_card(x) container_of((x), struct ap_card, ap_dev.device)
@@ -178,7 +179,7 @@ struct ap_queue {
enum ap_state state; /* State of the AP device. */
int pendingq_count; /* # requests on pendingq list. */
int requestq_count; /* # requests on requestq list. */
- int total_request_count; /* # requests ever for this AP device.*/
+ u64 total_request_count; /* # requests ever for this AP device.*/
int request_timeout; /* Request timeout in jiffies. */
struct timer_list timeout; /* Timer for request timeouts. */
struct list_head pendingq; /* List of message sent to AP queue. */
@@ -260,7 +261,7 @@ void ap_queue_prepare_remove(struct ap_queue *aq);
void ap_queue_remove(struct ap_queue *aq);
void ap_queue_suspend(struct ap_device *ap_dev);
void ap_queue_resume(struct ap_device *ap_dev);
-void ap_queue_reinit_state(struct ap_queue *aq);
+void ap_queue_init_state(struct ap_queue *aq);
struct ap_card *ap_card_create(int id, int queue_depth, int raw_device_type,
int comp_device_type, unsigned int functions);
diff --git a/drivers/s390/crypto/ap_card.c b/drivers/s390/crypto/ap_card.c
index 63b4cc6cd7e5..e85bfca1ed16 100644
--- a/drivers/s390/crypto/ap_card.c
+++ b/drivers/s390/crypto/ap_card.c
@@ -63,13 +63,13 @@ static ssize_t request_count_show(struct device *dev,
char *buf)
{
struct ap_card *ac = to_ap_card(dev);
- unsigned int req_cnt;
+ u64 req_cnt;
req_cnt = 0;
spin_lock_bh(&ap_list_lock);
- req_cnt = atomic_read(&ac->total_request_count);
+ req_cnt = atomic64_read(&ac->total_request_count);
spin_unlock_bh(&ap_list_lock);
- return snprintf(buf, PAGE_SIZE, "%d\n", req_cnt);
+ return snprintf(buf, PAGE_SIZE, "%llu\n", req_cnt);
}
static ssize_t request_count_store(struct device *dev,
@@ -83,7 +83,7 @@ static ssize_t request_count_store(struct device *dev,
for_each_ap_queue(aq, ac)
aq->total_request_count = 0;
spin_unlock_bh(&ap_list_lock);
- atomic_set(&ac->total_request_count, 0);
+ atomic64_set(&ac->total_request_count, 0);
return count;
}
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
index dad2be333d82..a317ab484932 100644
--- a/drivers/s390/crypto/ap_queue.c
+++ b/drivers/s390/crypto/ap_queue.c
@@ -479,12 +479,12 @@ static ssize_t request_count_show(struct device *dev,
char *buf)
{
struct ap_queue *aq = to_ap_queue(dev);
- unsigned int req_cnt;
+ u64 req_cnt;
spin_lock_bh(&aq->lock);
req_cnt = aq->total_request_count;
spin_unlock_bh(&aq->lock);
- return snprintf(buf, PAGE_SIZE, "%d\n", req_cnt);
+ return snprintf(buf, PAGE_SIZE, "%llu\n", req_cnt);
}
static ssize_t request_count_store(struct device *dev,
@@ -638,7 +638,7 @@ struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type)
aq->ap_dev.device.type = &ap_queue_type;
aq->ap_dev.device_type = device_type;
aq->qid = qid;
- aq->state = AP_STATE_RESET_START;
+ aq->state = AP_STATE_UNBOUND;
aq->interrupt = AP_INTR_DISABLED;
spin_lock_init(&aq->lock);
INIT_LIST_HEAD(&aq->list);
@@ -676,7 +676,7 @@ void ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg)
list_add_tail(&ap_msg->list, &aq->requestq);
aq->requestq_count++;
aq->total_request_count++;
- atomic_inc(&aq->card->total_request_count);
+ atomic64_inc(&aq->card->total_request_count);
/* Send/receive as many request from the queue as possible. */
ap_wait(ap_sm_event_loop(aq, AP_EVENT_POLL));
spin_unlock_bh(&aq->lock);
@@ -771,10 +771,11 @@ void ap_queue_remove(struct ap_queue *aq)
spin_unlock_bh(&aq->lock);
}
-void ap_queue_reinit_state(struct ap_queue *aq)
+void ap_queue_init_state(struct ap_queue *aq)
{
spin_lock_bh(&aq->lock);
aq->state = AP_STATE_RESET_START;
ap_wait(ap_sm_event(aq, AP_EVENT_POLL));
spin_unlock_bh(&aq->lock);
}
+EXPORT_SYMBOL(ap_queue_init_state);
diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
index 7f418d2d8cdf..2f33c5fcf676 100644
--- a/drivers/s390/crypto/pkey_api.c
+++ b/drivers/s390/crypto/pkey_api.c
@@ -2,7 +2,7 @@
/*
* pkey device driver
*
- * Copyright IBM Corp. 2017
+ * Copyright IBM Corp. 2017,2019
* Author(s): Harald Freudenberger
*/
@@ -24,16 +24,15 @@
#include <crypto/aes.h>
#include "zcrypt_api.h"
+#include "zcrypt_ccamisc.h"
+#include "zcrypt_ep11misc.h"
MODULE_LICENSE("GPL");
MODULE_AUTHOR("IBM Corporation");
MODULE_DESCRIPTION("s390 protected key interface");
-/* Size of parameter block used for all cca requests/replies */
-#define PARMBSIZE 512
-
-/* Size of vardata block used for some of the cca requests/replies */
-#define VARDATASIZE 4096
+#define KEYBLOBBUFSIZE 8192 /* key buffer size used for internal processing */
+#define MAXAPQNSINLIST 64 /* max 64 apqns within a apqn list */
/* mask of available pckmo subfunctions, fetched once at module init */
static cpacf_mask_t pckmo_functions;
@@ -62,40 +61,6 @@ static void __exit pkey_debug_exit(void)
debug_unregister(debug_info);
}
-/* Key token types */
-#define TOKTYPE_NON_CCA 0x00 /* Non-CCA key token */
-#define TOKTYPE_CCA_INTERNAL 0x01 /* CCA internal key token */
-
-/* For TOKTYPE_NON_CCA: */
-#define TOKVER_PROTECTED_KEY 0x01 /* Protected key token */
-
-/* For TOKTYPE_CCA_INTERNAL: */
-#define TOKVER_CCA_AES 0x04 /* CCA AES key token */
-
-/* header part of a key token */
-struct keytoken_header {
- u8 type; /* one of the TOKTYPE values */
- u8 res0[3];
- u8 version; /* one of the TOKVER values */
- u8 res1[3];
-} __packed;
-
-/* inside view of a secure key token (only type 0x01 version 0x04) */
-struct secaeskeytoken {
- u8 type; /* 0x01 for internal key token */
- u8 res0[3];
- u8 version; /* should be 0x04 */
- u8 res1[1];
- u8 flag; /* key flags */
- u8 res2[1];
- u64 mkvp; /* master key verification pattern */
- u8 key[32]; /* key value (encrypted) */
- u8 cv[8]; /* control vector */
- u16 bitsize; /* key bit size */
- u16 keysize; /* key byte size */
- u8 tvv[4]; /* token validation value */
-} __packed;
-
/* inside view of a protected key token (only type 0x00 version 0x01) */
struct protaeskeytoken {
u8 type; /* 0x00 for PAES specific key tokens */
@@ -107,558 +72,23 @@ struct protaeskeytoken {
u8 protkey[MAXPROTKEYSIZE]; /* the protected key blob */
} __packed;
-/*
- * Simple check if the token is a valid CCA secure AES key
- * token. If keybitsize is given, the bitsize of the key is
- * also checked. Returns 0 on success or errno value on failure.
- */
-static int check_secaeskeytoken(const u8 *token, int keybitsize)
-{
- struct secaeskeytoken *t = (struct secaeskeytoken *) token;
-
- if (t->type != TOKTYPE_CCA_INTERNAL) {
- DEBUG_ERR(
- "%s secure token check failed, type mismatch 0x%02x != 0x%02x\n",
- __func__, (int) t->type, TOKTYPE_CCA_INTERNAL);
- return -EINVAL;
- }
- if (t->version != TOKVER_CCA_AES) {
- DEBUG_ERR(
- "%s secure token check failed, version mismatch 0x%02x != 0x%02x\n",
- __func__, (int) t->version, TOKVER_CCA_AES);
- return -EINVAL;
- }
- if (keybitsize > 0 && t->bitsize != keybitsize) {
- DEBUG_ERR(
- "%s secure token check failed, bitsize mismatch %d != %d\n",
- __func__, (int) t->bitsize, keybitsize);
- return -EINVAL;
- }
-
- return 0;
-}
-
-/*
- * Allocate consecutive memory for request CPRB, request param
- * block, reply CPRB and reply param block and fill in values
- * for the common fields. Returns 0 on success or errno value
- * on failure.
- */
-static int alloc_and_prep_cprbmem(size_t paramblen,
- u8 **pcprbmem,
- struct CPRBX **preqCPRB,
- struct CPRBX **prepCPRB)
-{
- u8 *cprbmem;
- size_t cprbplusparamblen = sizeof(struct CPRBX) + paramblen;
- struct CPRBX *preqcblk, *prepcblk;
-
- /*
- * allocate consecutive memory for request CPRB, request param
- * block, reply CPRB and reply param block
- */
- cprbmem = kcalloc(2, cprbplusparamblen, GFP_KERNEL);
- if (!cprbmem)
- return -ENOMEM;
-
- preqcblk = (struct CPRBX *) cprbmem;
- prepcblk = (struct CPRBX *) (cprbmem + cprbplusparamblen);
-
- /* fill request cprb struct */
- preqcblk->cprb_len = sizeof(struct CPRBX);
- preqcblk->cprb_ver_id = 0x02;
- memcpy(preqcblk->func_id, "T2", 2);
- preqcblk->rpl_msgbl = cprbplusparamblen;
- if (paramblen) {
- preqcblk->req_parmb =
- ((u8 *) preqcblk) + sizeof(struct CPRBX);
- preqcblk->rpl_parmb =
- ((u8 *) prepcblk) + sizeof(struct CPRBX);
- }
-
- *pcprbmem = cprbmem;
- *preqCPRB = preqcblk;
- *prepCPRB = prepcblk;
-
- return 0;
-}
-
-/*
- * Free the cprb memory allocated with the function above.
- * If the scrub value is not zero, the memory is filled
- * with zeros before freeing (useful if there was some
- * clear key material in there).
- */
-static void free_cprbmem(void *mem, size_t paramblen, int scrub)
-{
- if (scrub)
- memzero_explicit(mem, 2 * (sizeof(struct CPRBX) + paramblen));
- kfree(mem);
-}
-
-/*
- * Helper function to prepare the xcrb struct
- */
-static inline void prep_xcrb(struct ica_xcRB *pxcrb,
- u16 cardnr,
- struct CPRBX *preqcblk,
- struct CPRBX *prepcblk)
-{
- memset(pxcrb, 0, sizeof(*pxcrb));
- pxcrb->agent_ID = 0x4341; /* 'CA' */
- pxcrb->user_defined = (cardnr == 0xFFFF ? AUTOSELECT : cardnr);
- pxcrb->request_control_blk_length =
- preqcblk->cprb_len + preqcblk->req_parml;
- pxcrb->request_control_blk_addr = (void __user *) preqcblk;
- pxcrb->reply_control_blk_length = preqcblk->rpl_msgbl;
- pxcrb->reply_control_blk_addr = (void __user *) prepcblk;
-}
-
-/*
- * Helper function which calls zcrypt_send_cprb with
- * memory management segment adjusted to kernel space
- * so that the copy_from_user called within this
- * function do in fact copy from kernel space.
- */
-static inline int _zcrypt_send_cprb(struct ica_xcRB *xcrb)
-{
- int rc;
- mm_segment_t old_fs = get_fs();
-
- set_fs(KERNEL_DS);
- rc = zcrypt_send_cprb(xcrb);
- set_fs(old_fs);
-
- return rc;
-}
-
-/*
- * Generate (random) AES secure key.
- */
-int pkey_genseckey(u16 cardnr, u16 domain,
- u32 keytype, struct pkey_seckey *seckey)
-{
- int i, rc, keysize;
- int seckeysize;
- u8 *mem;
- struct CPRBX *preqcblk, *prepcblk;
- struct ica_xcRB xcrb;
- struct kgreqparm {
- u8 subfunc_code[2];
- u16 rule_array_len;
- struct lv1 {
- u16 len;
- char key_form[8];
- char key_length[8];
- char key_type1[8];
- char key_type2[8];
- } lv1;
- struct lv2 {
- u16 len;
- struct keyid {
- u16 len;
- u16 attr;
- u8 data[SECKEYBLOBSIZE];
- } keyid[6];
- } lv2;
- } *preqparm;
- struct kgrepparm {
- u8 subfunc_code[2];
- u16 rule_array_len;
- struct lv3 {
- u16 len;
- u16 keyblocklen;
- struct {
- u16 toklen;
- u16 tokattr;
- u8 tok[0];
- /* ... some more data ... */
- } keyblock;
- } lv3;
- } *prepparm;
-
- /* get already prepared memory for 2 cprbs with param block each */
- rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk);
- if (rc)
- return rc;
-
- /* fill request cprb struct */
- preqcblk->domain = domain;
-
- /* fill request cprb param block with KG request */
- preqparm = (struct kgreqparm *) preqcblk->req_parmb;
- memcpy(preqparm->subfunc_code, "KG", 2);
- preqparm->rule_array_len = sizeof(preqparm->rule_array_len);
- preqparm->lv1.len = sizeof(struct lv1);
- memcpy(preqparm->lv1.key_form, "OP ", 8);
- switch (keytype) {
- case PKEY_KEYTYPE_AES_128:
- keysize = 16;
- memcpy(preqparm->lv1.key_length, "KEYLN16 ", 8);
- break;
- case PKEY_KEYTYPE_AES_192:
- keysize = 24;
- memcpy(preqparm->lv1.key_length, "KEYLN24 ", 8);
- break;
- case PKEY_KEYTYPE_AES_256:
- keysize = 32;
- memcpy(preqparm->lv1.key_length, "KEYLN32 ", 8);
- break;
- default:
- DEBUG_ERR(
- "%s unknown/unsupported keytype %d\n",
- __func__, keytype);
- rc = -EINVAL;
- goto out;
- }
- memcpy(preqparm->lv1.key_type1, "AESDATA ", 8);
- preqparm->lv2.len = sizeof(struct lv2);
- for (i = 0; i < 6; i++) {
- preqparm->lv2.keyid[i].len = sizeof(struct keyid);
- preqparm->lv2.keyid[i].attr = (i == 2 ? 0x30 : 0x10);
- }
- preqcblk->req_parml = sizeof(struct kgreqparm);
-
- /* fill xcrb struct */
- prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
-
- /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
- rc = _zcrypt_send_cprb(&xcrb);
- if (rc) {
- DEBUG_ERR(
- "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed with errno %d\n",
- __func__, (int) cardnr, (int) domain, rc);
- goto out;
- }
-
- /* check response returncode and reasoncode */
- if (prepcblk->ccp_rtcode != 0) {
- DEBUG_ERR(
- "%s secure key generate failure, card response %d/%d\n",
- __func__,
- (int) prepcblk->ccp_rtcode,
- (int) prepcblk->ccp_rscode);
- rc = -EIO;
- goto out;
- }
-
- /* process response cprb param block */
- prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
- prepparm = (struct kgrepparm *) prepcblk->rpl_parmb;
-
- /* check length of the returned secure key token */
- seckeysize = prepparm->lv3.keyblock.toklen
- - sizeof(prepparm->lv3.keyblock.toklen)
- - sizeof(prepparm->lv3.keyblock.tokattr);
- if (seckeysize != SECKEYBLOBSIZE) {
- DEBUG_ERR(
- "%s secure token size mismatch %d != %d bytes\n",
- __func__, seckeysize, SECKEYBLOBSIZE);
- rc = -EIO;
- goto out;
- }
-
- /* check secure key token */
- rc = check_secaeskeytoken(prepparm->lv3.keyblock.tok, 8*keysize);
- if (rc) {
- rc = -EIO;
- goto out;
- }
-
- /* copy the generated secure key token */
- memcpy(seckey->seckey, prepparm->lv3.keyblock.tok, SECKEYBLOBSIZE);
-
-out:
- free_cprbmem(mem, PARMBSIZE, 0);
- return rc;
-}
-EXPORT_SYMBOL(pkey_genseckey);
-
-/*
- * Generate an AES secure key with given key value.
- */
-int pkey_clr2seckey(u16 cardnr, u16 domain, u32 keytype,
- const struct pkey_clrkey *clrkey,
- struct pkey_seckey *seckey)
-{
- int rc, keysize, seckeysize;
- u8 *mem;
- struct CPRBX *preqcblk, *prepcblk;
- struct ica_xcRB xcrb;
- struct cmreqparm {
- u8 subfunc_code[2];
- u16 rule_array_len;
- char rule_array[8];
- struct lv1 {
- u16 len;
- u8 clrkey[0];
- } lv1;
- struct lv2 {
- u16 len;
- struct keyid {
- u16 len;
- u16 attr;
- u8 data[SECKEYBLOBSIZE];
- } keyid;
- } lv2;
- } *preqparm;
- struct lv2 *plv2;
- struct cmrepparm {
- u8 subfunc_code[2];
- u16 rule_array_len;
- struct lv3 {
- u16 len;
- u16 keyblocklen;
- struct {
- u16 toklen;
- u16 tokattr;
- u8 tok[0];
- /* ... some more data ... */
- } keyblock;
- } lv3;
- } *prepparm;
-
- /* get already prepared memory for 2 cprbs with param block each */
- rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk);
- if (rc)
- return rc;
-
- /* fill request cprb struct */
- preqcblk->domain = domain;
-
- /* fill request cprb param block with CM request */
- preqparm = (struct cmreqparm *) preqcblk->req_parmb;
- memcpy(preqparm->subfunc_code, "CM", 2);
- memcpy(preqparm->rule_array, "AES ", 8);
- preqparm->rule_array_len =
- sizeof(preqparm->rule_array_len) + sizeof(preqparm->rule_array);
- switch (keytype) {
- case PKEY_KEYTYPE_AES_128:
- keysize = 16;
- break;
- case PKEY_KEYTYPE_AES_192:
- keysize = 24;
- break;
- case PKEY_KEYTYPE_AES_256:
- keysize = 32;
- break;
- default:
- DEBUG_ERR(
- "%s unknown/unsupported keytype %d\n",
- __func__, keytype);
- rc = -EINVAL;
- goto out;
- }
- preqparm->lv1.len = sizeof(struct lv1) + keysize;
- memcpy(preqparm->lv1.clrkey, clrkey->clrkey, keysize);
- plv2 = (struct lv2 *) (((u8 *) &preqparm->lv2) + keysize);
- plv2->len = sizeof(struct lv2);
- plv2->keyid.len = sizeof(struct keyid);
- plv2->keyid.attr = 0x30;
- preqcblk->req_parml = sizeof(struct cmreqparm) + keysize;
-
- /* fill xcrb struct */
- prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
-
- /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
- rc = _zcrypt_send_cprb(&xcrb);
- if (rc) {
- DEBUG_ERR(
- "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed with errno %d\n",
- __func__, (int) cardnr, (int) domain, rc);
- goto out;
- }
-
- /* check response returncode and reasoncode */
- if (prepcblk->ccp_rtcode != 0) {
- DEBUG_ERR(
- "%s clear key import failure, card response %d/%d\n",
- __func__,
- (int) prepcblk->ccp_rtcode,
- (int) prepcblk->ccp_rscode);
- rc = -EIO;
- goto out;
- }
-
- /* process response cprb param block */
- prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
- prepparm = (struct cmrepparm *) prepcblk->rpl_parmb;
-
- /* check length of the returned secure key token */
- seckeysize = prepparm->lv3.keyblock.toklen
- - sizeof(prepparm->lv3.keyblock.toklen)
- - sizeof(prepparm->lv3.keyblock.tokattr);
- if (seckeysize != SECKEYBLOBSIZE) {
- DEBUG_ERR(
- "%s secure token size mismatch %d != %d bytes\n",
- __func__, seckeysize, SECKEYBLOBSIZE);
- rc = -EIO;
- goto out;
- }
-
- /* check secure key token */
- rc = check_secaeskeytoken(prepparm->lv3.keyblock.tok, 8*keysize);
- if (rc) {
- rc = -EIO;
- goto out;
- }
-
- /* copy the generated secure key token */
- memcpy(seckey->seckey, prepparm->lv3.keyblock.tok, SECKEYBLOBSIZE);
-
-out:
- free_cprbmem(mem, PARMBSIZE, 1);
- return rc;
-}
-EXPORT_SYMBOL(pkey_clr2seckey);
-
-/*
- * Derive a proteced key from the secure key blob.
- */
-int pkey_sec2protkey(u16 cardnr, u16 domain,
- const struct pkey_seckey *seckey,
- struct pkey_protkey *protkey)
-{
- int rc;
- u8 *mem;
- struct CPRBX *preqcblk, *prepcblk;
- struct ica_xcRB xcrb;
- struct uskreqparm {
- u8 subfunc_code[2];
- u16 rule_array_len;
- struct lv1 {
- u16 len;
- u16 attr_len;
- u16 attr_flags;
- } lv1;
- struct lv2 {
- u16 len;
- u16 attr_len;
- u16 attr_flags;
- u8 token[0]; /* cca secure key token */
- } lv2 __packed;
- } *preqparm;
- struct uskrepparm {
- u8 subfunc_code[2];
- u16 rule_array_len;
- struct lv3 {
- u16 len;
- u16 attr_len;
- u16 attr_flags;
- struct cpacfkeyblock {
- u8 version; /* version of this struct */
- u8 flags[2];
- u8 algo;
- u8 form;
- u8 pad1[3];
- u16 keylen;
- u8 key[64]; /* the key (keylen bytes) */
- u16 keyattrlen;
- u8 keyattr[32];
- u8 pad2[1];
- u8 vptype;
- u8 vp[32]; /* verification pattern */
- } keyblock;
- } lv3 __packed;
- } *prepparm;
-
- /* get already prepared memory for 2 cprbs with param block each */
- rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk);
- if (rc)
- return rc;
-
- /* fill request cprb struct */
- preqcblk->domain = domain;
-
- /* fill request cprb param block with USK request */
- preqparm = (struct uskreqparm *) preqcblk->req_parmb;
- memcpy(preqparm->subfunc_code, "US", 2);
- preqparm->rule_array_len = sizeof(preqparm->rule_array_len);
- preqparm->lv1.len = sizeof(struct lv1);
- preqparm->lv1.attr_len = sizeof(struct lv1) - sizeof(preqparm->lv1.len);
- preqparm->lv1.attr_flags = 0x0001;
- preqparm->lv2.len = sizeof(struct lv2) + SECKEYBLOBSIZE;
- preqparm->lv2.attr_len = sizeof(struct lv2)
- - sizeof(preqparm->lv2.len) + SECKEYBLOBSIZE;
- preqparm->lv2.attr_flags = 0x0000;
- memcpy(preqparm->lv2.token, seckey->seckey, SECKEYBLOBSIZE);
- preqcblk->req_parml = sizeof(struct uskreqparm) + SECKEYBLOBSIZE;
-
- /* fill xcrb struct */
- prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
-
- /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
- rc = _zcrypt_send_cprb(&xcrb);
- if (rc) {
- DEBUG_ERR(
- "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed with errno %d\n",
- __func__, (int) cardnr, (int) domain, rc);
- goto out;
- }
-
- /* check response returncode and reasoncode */
- if (prepcblk->ccp_rtcode != 0) {
- DEBUG_ERR(
- "%s unwrap secure key failure, card response %d/%d\n",
- __func__,
- (int) prepcblk->ccp_rtcode,
- (int) prepcblk->ccp_rscode);
- rc = -EIO;
- goto out;
- }
- if (prepcblk->ccp_rscode != 0) {
- DEBUG_WARN(
- "%s unwrap secure key warning, card response %d/%d\n",
- __func__,
- (int) prepcblk->ccp_rtcode,
- (int) prepcblk->ccp_rscode);
- }
-
- /* process response cprb param block */
- prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
- prepparm = (struct uskrepparm *) prepcblk->rpl_parmb;
-
- /* check the returned keyblock */
- if (prepparm->lv3.keyblock.version != 0x01) {
- DEBUG_ERR(
- "%s reply param keyblock version mismatch 0x%02x != 0x01\n",
- __func__, (int) prepparm->lv3.keyblock.version);
- rc = -EIO;
- goto out;
- }
-
- /* copy the tanslated protected key */
- switch (prepparm->lv3.keyblock.keylen) {
- case 16+32:
- protkey->type = PKEY_KEYTYPE_AES_128;
- break;
- case 24+32:
- protkey->type = PKEY_KEYTYPE_AES_192;
- break;
- case 32+32:
- protkey->type = PKEY_KEYTYPE_AES_256;
- break;
- default:
- DEBUG_ERR("%s unknown/unsupported keytype %d\n",
- __func__, prepparm->lv3.keyblock.keylen);
- rc = -EIO;
- goto out;
- }
- protkey->len = prepparm->lv3.keyblock.keylen;
- memcpy(protkey->protkey, prepparm->lv3.keyblock.key, protkey->len);
-
-out:
- free_cprbmem(mem, PARMBSIZE, 0);
- return rc;
-}
-EXPORT_SYMBOL(pkey_sec2protkey);
+/* inside view of a clear key token (type 0x00 version 0x02) */
+struct clearaeskeytoken {
+ u8 type; /* 0x00 for PAES specific key tokens */
+ u8 res0[3];
+ u8 version; /* 0x02 for clear AES key token */
+ u8 res1[3];
+ u32 keytype; /* key type, one of the PKEY_KEYTYPE values */
+ u32 len; /* bytes actually stored in clearkey[] */
+ u8 clearkey[0]; /* clear key value */
+} __packed;
/*
* Create a protected key from a clear key value.
*/
-int pkey_clr2protkey(u32 keytype,
- const struct pkey_clrkey *clrkey,
- struct pkey_protkey *protkey)
+static int pkey_clr2protkey(u32 keytype,
+ const struct pkey_clrkey *clrkey,
+ struct pkey_protkey *protkey)
{
long fc;
int keysize;
@@ -707,363 +137,132 @@ int pkey_clr2protkey(u32 keytype,
return 0;
}
-EXPORT_SYMBOL(pkey_clr2protkey);
/*
- * query cryptographic facility from adapter
- */
-static int query_crypto_facility(u16 cardnr, u16 domain,
- const char *keyword,
- u8 *rarray, size_t *rarraylen,
- u8 *varray, size_t *varraylen)
-{
- int rc;
- u16 len;
- u8 *mem, *ptr;
- struct CPRBX *preqcblk, *prepcblk;
- struct ica_xcRB xcrb;
- struct fqreqparm {
- u8 subfunc_code[2];
- u16 rule_array_len;
- char rule_array[8];
- struct lv1 {
- u16 len;
- u8 data[VARDATASIZE];
- } lv1;
- u16 dummylen;
- } *preqparm;
- size_t parmbsize = sizeof(struct fqreqparm);
- struct fqrepparm {
- u8 subfunc_code[2];
- u8 lvdata[0];
- } *prepparm;
-
- /* get already prepared memory for 2 cprbs with param block each */
- rc = alloc_and_prep_cprbmem(parmbsize, &mem, &preqcblk, &prepcblk);
- if (rc)
- return rc;
-
- /* fill request cprb struct */
- preqcblk->domain = domain;
-
- /* fill request cprb param block with FQ request */
- preqparm = (struct fqreqparm *) preqcblk->req_parmb;
- memcpy(preqparm->subfunc_code, "FQ", 2);
- memcpy(preqparm->rule_array, keyword, sizeof(preqparm->rule_array));
- preqparm->rule_array_len =
- sizeof(preqparm->rule_array_len) + sizeof(preqparm->rule_array);
- preqparm->lv1.len = sizeof(preqparm->lv1);
- preqparm->dummylen = sizeof(preqparm->dummylen);
- preqcblk->req_parml = parmbsize;
-
- /* fill xcrb struct */
- prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
-
- /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
- rc = _zcrypt_send_cprb(&xcrb);
- if (rc) {
- DEBUG_ERR(
- "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed with errno %d\n",
- __func__, (int) cardnr, (int) domain, rc);
- goto out;
- }
-
- /* check response returncode and reasoncode */
- if (prepcblk->ccp_rtcode != 0) {
- DEBUG_ERR(
- "%s unwrap secure key failure, card response %d/%d\n",
- __func__,
- (int) prepcblk->ccp_rtcode,
- (int) prepcblk->ccp_rscode);
- rc = -EIO;
- goto out;
- }
-
- /* process response cprb param block */
- prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
- prepparm = (struct fqrepparm *) prepcblk->rpl_parmb;
- ptr = prepparm->lvdata;
-
- /* check and possibly copy reply rule array */
- len = *((u16 *) ptr);
- if (len > sizeof(u16)) {
- ptr += sizeof(u16);
- len -= sizeof(u16);
- if (rarray && rarraylen && *rarraylen > 0) {
- *rarraylen = (len > *rarraylen ? *rarraylen : len);
- memcpy(rarray, ptr, *rarraylen);
- }
- ptr += len;
- }
- /* check and possible copy reply var array */
- len = *((u16 *) ptr);
- if (len > sizeof(u16)) {
- ptr += sizeof(u16);
- len -= sizeof(u16);
- if (varray && varraylen && *varraylen > 0) {
- *varraylen = (len > *varraylen ? *varraylen : len);
- memcpy(varray, ptr, *varraylen);
- }
- ptr += len;
- }
-
-out:
- free_cprbmem(mem, parmbsize, 0);
- return rc;
-}
-
-/*
- * Fetch the current and old mkvp values via
- * query_crypto_facility from adapter.
+ * Find card and transform secure key into protected key.
*/
-static int fetch_mkvp(u16 cardnr, u16 domain, u64 mkvp[2])
+static int pkey_skey2pkey(const u8 *key, struct pkey_protkey *pkey)
{
- int rc, found = 0;
- size_t rlen, vlen;
- u8 *rarray, *varray, *pg;
-
- pg = (u8 *) __get_free_page(GFP_KERNEL);
- if (!pg)
- return -ENOMEM;
- rarray = pg;
- varray = pg + PAGE_SIZE/2;
- rlen = vlen = PAGE_SIZE/2;
-
- rc = query_crypto_facility(cardnr, domain, "STATICSA",
- rarray, &rlen, varray, &vlen);
- if (rc == 0 && rlen > 8*8 && vlen > 184+8) {
- if (rarray[8*8] == '2') {
- /* current master key state is valid */
- mkvp[0] = *((u64 *)(varray + 184));
- mkvp[1] = *((u64 *)(varray + 172));
- found = 1;
- }
- }
-
- free_page((unsigned long) pg);
-
- return found ? 0 : -ENOENT;
-}
-
-/* struct to hold cached mkvp info for each card/domain */
-struct mkvp_info {
- struct list_head list;
- u16 cardnr;
- u16 domain;
- u64 mkvp[2];
-};
-
-/* a list with mkvp_info entries */
-static LIST_HEAD(mkvp_list);
-static DEFINE_SPINLOCK(mkvp_list_lock);
+ int rc, verify;
+ u16 cardnr, domain;
+ struct keytoken_header *hdr = (struct keytoken_header *)key;
-static int mkvp_cache_fetch(u16 cardnr, u16 domain, u64 mkvp[2])
-{
- int rc = -ENOENT;
- struct mkvp_info *ptr;
-
- spin_lock_bh(&mkvp_list_lock);
- list_for_each_entry(ptr, &mkvp_list, list) {
- if (ptr->cardnr == cardnr &&
- ptr->domain == domain) {
- memcpy(mkvp, ptr->mkvp, 2 * sizeof(u64));
- rc = 0;
+ /*
+ * The cca_xxx2protkey call may fail when a card has been
+ * addressed where the master key was changed after last fetch
+ * of the mkvp into the cache. Try 3 times: First witout verify
+ * then with verify and last round with verify and old master
+ * key verification pattern match not ignored.
+ */
+ for (verify = 0; verify < 3; verify++) {
+ rc = cca_findcard(key, &cardnr, &domain, verify);
+ if (rc < 0)
+ continue;
+ if (rc > 0 && verify < 2)
+ continue;
+ switch (hdr->version) {
+ case TOKVER_CCA_AES:
+ rc = cca_sec2protkey(cardnr, domain,
+ key, pkey->protkey,
+ &pkey->len, &pkey->type);
break;
- }
- }
- spin_unlock_bh(&mkvp_list_lock);
-
- return rc;
-}
-
-static void mkvp_cache_update(u16 cardnr, u16 domain, u64 mkvp[2])
-{
- int found = 0;
- struct mkvp_info *ptr;
-
- spin_lock_bh(&mkvp_list_lock);
- list_for_each_entry(ptr, &mkvp_list, list) {
- if (ptr->cardnr == cardnr &&
- ptr->domain == domain) {
- memcpy(ptr->mkvp, mkvp, 2 * sizeof(u64));
- found = 1;
+ case TOKVER_CCA_VLSC:
+ rc = cca_cipher2protkey(cardnr, domain,
+ key, pkey->protkey,
+ &pkey->len, &pkey->type);
break;
+ default:
+ return -EINVAL;
}
- }
- if (!found) {
- ptr = kmalloc(sizeof(*ptr), GFP_ATOMIC);
- if (!ptr) {
- spin_unlock_bh(&mkvp_list_lock);
- return;
- }
- ptr->cardnr = cardnr;
- ptr->domain = domain;
- memcpy(ptr->mkvp, mkvp, 2 * sizeof(u64));
- list_add(&ptr->list, &mkvp_list);
- }
- spin_unlock_bh(&mkvp_list_lock);
-}
-
-static void mkvp_cache_scrub(u16 cardnr, u16 domain)
-{
- struct mkvp_info *ptr;
-
- spin_lock_bh(&mkvp_list_lock);
- list_for_each_entry(ptr, &mkvp_list, list) {
- if (ptr->cardnr == cardnr &&
- ptr->domain == domain) {
- list_del(&ptr->list);
- kfree(ptr);
+ if (rc == 0)
break;
- }
}
- spin_unlock_bh(&mkvp_list_lock);
-}
-static void __exit mkvp_cache_free(void)
-{
- struct mkvp_info *ptr, *pnext;
+ if (rc)
+ DEBUG_DBG("%s failed rc=%d\n", __func__, rc);
- spin_lock_bh(&mkvp_list_lock);
- list_for_each_entry_safe(ptr, pnext, &mkvp_list, list) {
- list_del(&ptr->list);
- kfree(ptr);
- }
- spin_unlock_bh(&mkvp_list_lock);
+ return rc;
}
/*
- * Search for a matching crypto card based on the Master Key
- * Verification Pattern provided inside a secure key.
+ * Construct EP11 key with given clear key value.
*/
-int pkey_findcard(const struct pkey_seckey *seckey,
- u16 *pcardnr, u16 *pdomain, int verify)
+static int pkey_clr2ep11key(const u8 *clrkey, size_t clrkeylen,
+ u8 *keybuf, size_t *keybuflen)
{
- struct secaeskeytoken *t = (struct secaeskeytoken *) seckey;
- struct zcrypt_device_status_ext *device_status;
+ int i, rc;
u16 card, dom;
- u64 mkvp[2];
- int i, rc, oi = -1;
+ u32 nr_apqns, *apqns = NULL;
- /* mkvp must not be zero */
- if (t->mkvp == 0)
- return -EINVAL;
+ /* build a list of apqns suitable for ep11 keys with cpacf support */
+ rc = ep11_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF,
+ ZCRYPT_CEX7, EP11_API_V, NULL);
+ if (rc)
+ goto out;
- /* fetch status of all crypto cards */
- device_status = kmalloc_array(MAX_ZDEV_ENTRIES_EXT,
- sizeof(struct zcrypt_device_status_ext),
- GFP_KERNEL);
- if (!device_status)
- return -ENOMEM;
- zcrypt_device_status_mask_ext(device_status);
-
- /* walk through all crypto cards */
- for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) {
- card = AP_QID_CARD(device_status[i].qid);
- dom = AP_QID_QUEUE(device_status[i].qid);
- if (device_status[i].online &&
- device_status[i].functions & 0x04) {
- /* an enabled CCA Coprocessor card */
- /* try cached mkvp */
- if (mkvp_cache_fetch(card, dom, mkvp) == 0 &&
- t->mkvp == mkvp[0]) {
- if (!verify)
- break;
- /* verify: fetch mkvp from adapter */
- if (fetch_mkvp(card, dom, mkvp) == 0) {
- mkvp_cache_update(card, dom, mkvp);
- if (t->mkvp == mkvp[0])
- break;
- }
- }
- } else {
- /* Card is offline and/or not a CCA card. */
- /* del mkvp entry from cache if it exists */
- mkvp_cache_scrub(card, dom);
- }
- }
- if (i >= MAX_ZDEV_ENTRIES_EXT) {
- /* nothing found, so this time without cache */
- for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) {
- if (!(device_status[i].online &&
- device_status[i].functions & 0x04))
- continue;
- card = AP_QID_CARD(device_status[i].qid);
- dom = AP_QID_QUEUE(device_status[i].qid);
- /* fresh fetch mkvp from adapter */
- if (fetch_mkvp(card, dom, mkvp) == 0) {
- mkvp_cache_update(card, dom, mkvp);
- if (t->mkvp == mkvp[0])
- break;
- if (t->mkvp == mkvp[1] && oi < 0)
- oi = i;
- }
- }
- if (i >= MAX_ZDEV_ENTRIES_EXT && oi >= 0) {
- /* old mkvp matched, use this card then */
- card = AP_QID_CARD(device_status[oi].qid);
- dom = AP_QID_QUEUE(device_status[oi].qid);
- }
+ /* go through the list of apqns and try to bild an ep11 key */
+ for (rc = -ENODEV, i = 0; i < nr_apqns; i++) {
+ card = apqns[i] >> 16;
+ dom = apqns[i] & 0xFFFF;
+ rc = ep11_clr2keyblob(card, dom, clrkeylen * 8,
+ 0, clrkey, keybuf, keybuflen);
+ if (rc == 0)
+ break;
}
- if (i < MAX_ZDEV_ENTRIES_EXT || oi >= 0) {
- if (pcardnr)
- *pcardnr = card;
- if (pdomain)
- *pdomain = dom;
- rc = 0;
- } else
- rc = -ENODEV;
- kfree(device_status);
+out:
+ kfree(apqns);
+ if (rc)
+ DEBUG_DBG("%s failed rc=%d\n", __func__, rc);
return rc;
}
-EXPORT_SYMBOL(pkey_findcard);
/*
- * Find card and transform secure key into protected key.
+ * Find card and transform EP11 secure key into protected key.
*/
-int pkey_skey2pkey(const struct pkey_seckey *seckey,
- struct pkey_protkey *protkey)
+static int pkey_ep11key2pkey(const u8 *key, struct pkey_protkey *pkey)
{
- u16 cardnr, domain;
- int rc, verify;
+ int i, rc;
+ u16 card, dom;
+ u32 nr_apqns, *apqns = NULL;
+ struct ep11keyblob *kb = (struct ep11keyblob *) key;
- /*
- * The pkey_sec2protkey call may fail when a card has been
- * addressed where the master key was changed after last fetch
- * of the mkvp into the cache. So first try without verify then
- * with verify enabled (thus refreshing the mkvp for each card).
- */
- for (verify = 0; verify < 2; verify++) {
- rc = pkey_findcard(seckey, &cardnr, &domain, verify);
- if (rc)
- continue;
- rc = pkey_sec2protkey(cardnr, domain, seckey, protkey);
+ /* build a list of apqns suitable for this key */
+ rc = ep11_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF,
+ ZCRYPT_CEX7, EP11_API_V, kb->wkvp);
+ if (rc)
+ goto out;
+
+ /* go through the list of apqns and try to derive an pkey */
+ for (rc = -ENODEV, i = 0; i < nr_apqns; i++) {
+ card = apqns[i] >> 16;
+ dom = apqns[i] & 0xFFFF;
+ rc = ep11_key2protkey(card, dom, key, kb->head.len,
+ pkey->protkey, &pkey->len, &pkey->type);
if (rc == 0)
break;
}
+out:
+ kfree(apqns);
if (rc)
DEBUG_DBG("%s failed rc=%d\n", __func__, rc);
-
return rc;
}
-EXPORT_SYMBOL(pkey_skey2pkey);
/*
* Verify key and give back some info about the key.
*/
-int pkey_verifykey(const struct pkey_seckey *seckey,
- u16 *pcardnr, u16 *pdomain,
- u16 *pkeysize, u32 *pattributes)
+static int pkey_verifykey(const struct pkey_seckey *seckey,
+ u16 *pcardnr, u16 *pdomain,
+ u16 *pkeysize, u32 *pattributes)
{
struct secaeskeytoken *t = (struct secaeskeytoken *) seckey;
u16 cardnr, domain;
- u64 mkvp[2];
int rc;
/* check the secure key for valid AES secure key */
- rc = check_secaeskeytoken((u8 *) seckey, 0);
+ rc = cca_check_secaeskeytoken(debug_info, 3, (u8 *) seckey, 0);
if (rc)
goto out;
if (pattributes)
@@ -1072,18 +271,16 @@ int pkey_verifykey(const struct pkey_seckey *seckey,
*pkeysize = t->bitsize;
/* try to find a card which can handle this key */
- rc = pkey_findcard(seckey, &cardnr, &domain, 1);
- if (rc)
+ rc = cca_findcard(seckey->seckey, &cardnr, &domain, 1);
+ if (rc < 0)
goto out;
- /* check mkvp for old mkvp match */
- rc = mkvp_cache_fetch(cardnr, domain, mkvp);
- if (rc)
- goto out;
- if (t->mkvp == mkvp[1] && t->mkvp != mkvp[0]) {
+ if (rc > 0) {
+ /* key mkvp matches to old master key mkvp */
DEBUG_DBG("%s secure key has old mkvp\n", __func__);
if (pattributes)
*pattributes |= PKEY_VERIFY_ATTR_OLD_MKVP;
+ rc = 0;
}
if (pcardnr)
@@ -1095,12 +292,11 @@ out:
DEBUG_DBG("%s rc=%d\n", __func__, rc);
return rc;
}
-EXPORT_SYMBOL(pkey_verifykey);
/*
* Generate a random protected key
*/
-int pkey_genprotkey(__u32 keytype, struct pkey_protkey *protkey)
+static int pkey_genprotkey(u32 keytype, struct pkey_protkey *protkey)
{
struct pkey_clrkey clrkey;
int keysize;
@@ -1135,12 +331,11 @@ int pkey_genprotkey(__u32 keytype, struct pkey_protkey *protkey)
return 0;
}
-EXPORT_SYMBOL(pkey_genprotkey);
/*
* Verify if a protected key is still valid
*/
-int pkey_verifyprotkey(const struct pkey_protkey *protkey)
+static int pkey_verifyprotkey(const struct pkey_protkey *protkey)
{
unsigned long fc;
struct {
@@ -1181,40 +376,103 @@ int pkey_verifyprotkey(const struct pkey_protkey *protkey)
return 0;
}
-EXPORT_SYMBOL(pkey_verifyprotkey);
/*
* Transform a non-CCA key token into a protected key
*/
-static int pkey_nonccatok2pkey(const __u8 *key, __u32 keylen,
+static int pkey_nonccatok2pkey(const u8 *key, u32 keylen,
struct pkey_protkey *protkey)
{
+ int rc = -EINVAL;
+ u8 *tmpbuf = NULL;
struct keytoken_header *hdr = (struct keytoken_header *)key;
- struct protaeskeytoken *t;
switch (hdr->version) {
- case TOKVER_PROTECTED_KEY:
- if (keylen != sizeof(struct protaeskeytoken))
- return -EINVAL;
+ case TOKVER_PROTECTED_KEY: {
+ struct protaeskeytoken *t;
+ if (keylen != sizeof(struct protaeskeytoken))
+ goto out;
t = (struct protaeskeytoken *)key;
protkey->len = t->len;
protkey->type = t->keytype;
memcpy(protkey->protkey, t->protkey,
sizeof(protkey->protkey));
-
- return pkey_verifyprotkey(protkey);
+ rc = pkey_verifyprotkey(protkey);
+ break;
+ }
+ case TOKVER_CLEAR_KEY: {
+ struct clearaeskeytoken *t;
+ struct pkey_clrkey ckey;
+ union u_tmpbuf {
+ u8 skey[SECKEYBLOBSIZE];
+ u8 ep11key[MAXEP11AESKEYBLOBSIZE];
+ };
+ size_t tmpbuflen = sizeof(union u_tmpbuf);
+
+ if (keylen < sizeof(struct clearaeskeytoken))
+ goto out;
+ t = (struct clearaeskeytoken *)key;
+ if (keylen != sizeof(*t) + t->len)
+ goto out;
+ if ((t->keytype == PKEY_KEYTYPE_AES_128 && t->len == 16)
+ || (t->keytype == PKEY_KEYTYPE_AES_192 && t->len == 24)
+ || (t->keytype == PKEY_KEYTYPE_AES_256 && t->len == 32))
+ memcpy(ckey.clrkey, t->clearkey, t->len);
+ else
+ goto out;
+ /* alloc temp key buffer space */
+ tmpbuf = kmalloc(tmpbuflen, GFP_ATOMIC);
+ if (!tmpbuf) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ /* try direct way with the PCKMO instruction */
+ rc = pkey_clr2protkey(t->keytype, &ckey, protkey);
+ if (rc == 0)
+ break;
+ /* PCKMO failed, so try the CCA secure key way */
+ rc = cca_clr2seckey(0xFFFF, 0xFFFF, t->keytype,
+ ckey.clrkey, tmpbuf);
+ if (rc == 0)
+ rc = pkey_skey2pkey(tmpbuf, protkey);
+ if (rc == 0)
+ break;
+ /* if the CCA way also failed, let's try via EP11 */
+ rc = pkey_clr2ep11key(ckey.clrkey, t->len,
+ tmpbuf, &tmpbuflen);
+ if (rc == 0)
+ rc = pkey_ep11key2pkey(tmpbuf, protkey);
+ /* now we should really have an protected key */
+ DEBUG_ERR("%s unable to build protected key from clear",
+ __func__);
+ break;
+ }
+ case TOKVER_EP11_AES: {
+ if (keylen < MINEP11AESKEYBLOBSIZE)
+ goto out;
+ /* check ep11 key for exportable as protected key */
+ rc = ep11_check_aeskeyblob(debug_info, 3, key, 0, 1);
+ if (rc)
+ goto out;
+ rc = pkey_ep11key2pkey(key, protkey);
+ break;
+ }
default:
DEBUG_ERR("%s unknown/unsupported non-CCA token version %d\n",
__func__, hdr->version);
- return -EINVAL;
+ rc = -EINVAL;
}
+
+out:
+ kfree(tmpbuf);
+ return rc;
}
/*
* Transform a CCA internal key token into a protected key
*/
-static int pkey_ccainttok2pkey(const __u8 *key, __u32 keylen,
+static int pkey_ccainttok2pkey(const u8 *key, u32 keylen,
struct pkey_protkey *protkey)
{
struct keytoken_header *hdr = (struct keytoken_header *)key;
@@ -1223,44 +481,474 @@ static int pkey_ccainttok2pkey(const __u8 *key, __u32 keylen,
case TOKVER_CCA_AES:
if (keylen != sizeof(struct secaeskeytoken))
return -EINVAL;
-
- return pkey_skey2pkey((struct pkey_seckey *)key,
- protkey);
+ break;
+ case TOKVER_CCA_VLSC:
+ if (keylen < hdr->len || keylen > MAXCCAVLSCTOKENSIZE)
+ return -EINVAL;
+ break;
default:
DEBUG_ERR("%s unknown/unsupported CCA internal token version %d\n",
__func__, hdr->version);
return -EINVAL;
}
+
+ return pkey_skey2pkey(key, protkey);
}
/*
* Transform a key blob (of any type) into a protected key
*/
-int pkey_keyblob2pkey(const __u8 *key, __u32 keylen,
+int pkey_keyblob2pkey(const u8 *key, u32 keylen,
struct pkey_protkey *protkey)
{
+ int rc;
struct keytoken_header *hdr = (struct keytoken_header *)key;
- if (keylen < sizeof(struct keytoken_header))
+ if (keylen < sizeof(struct keytoken_header)) {
+ DEBUG_ERR("%s invalid keylen %d\n", __func__, keylen);
return -EINVAL;
+ }
switch (hdr->type) {
case TOKTYPE_NON_CCA:
- return pkey_nonccatok2pkey(key, keylen, protkey);
+ rc = pkey_nonccatok2pkey(key, keylen, protkey);
+ break;
case TOKTYPE_CCA_INTERNAL:
- return pkey_ccainttok2pkey(key, keylen, protkey);
+ rc = pkey_ccainttok2pkey(key, keylen, protkey);
+ break;
default:
- DEBUG_ERR("%s unknown/unsupported blob type %d\n", __func__,
- hdr->type);
+ DEBUG_ERR("%s unknown/unsupported blob type %d\n",
+ __func__, hdr->type);
return -EINVAL;
}
+
+ DEBUG_DBG("%s rc=%d\n", __func__, rc);
+ return rc;
+
}
EXPORT_SYMBOL(pkey_keyblob2pkey);
+static int pkey_genseckey2(const struct pkey_apqn *apqns, size_t nr_apqns,
+ enum pkey_key_type ktype, enum pkey_key_size ksize,
+ u32 kflags, u8 *keybuf, size_t *keybufsize)
+{
+ int i, card, dom, rc;
+
+ /* check for at least one apqn given */
+ if (!apqns || !nr_apqns)
+ return -EINVAL;
+
+ /* check key type and size */
+ switch (ktype) {
+ case PKEY_TYPE_CCA_DATA:
+ case PKEY_TYPE_CCA_CIPHER:
+ if (*keybufsize < SECKEYBLOBSIZE)
+ return -EINVAL;
+ break;
+ case PKEY_TYPE_EP11:
+ if (*keybufsize < MINEP11AESKEYBLOBSIZE)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+ switch (ksize) {
+ case PKEY_SIZE_AES_128:
+ case PKEY_SIZE_AES_192:
+ case PKEY_SIZE_AES_256:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* simple try all apqns from the list */
+ for (i = 0, rc = -ENODEV; i < nr_apqns; i++) {
+ card = apqns[i].card;
+ dom = apqns[i].domain;
+ if (ktype == PKEY_TYPE_EP11) {
+ rc = ep11_genaeskey(card, dom, ksize, kflags,
+ keybuf, keybufsize);
+ } else if (ktype == PKEY_TYPE_CCA_DATA) {
+ rc = cca_genseckey(card, dom, ksize, keybuf);
+ *keybufsize = (rc ? 0 : SECKEYBLOBSIZE);
+ } else /* TOKVER_CCA_VLSC */
+ rc = cca_gencipherkey(card, dom, ksize, kflags,
+ keybuf, keybufsize);
+ if (rc == 0)
+ break;
+ }
+
+ return rc;
+}
+
+static int pkey_clr2seckey2(const struct pkey_apqn *apqns, size_t nr_apqns,
+ enum pkey_key_type ktype, enum pkey_key_size ksize,
+ u32 kflags, const u8 *clrkey,
+ u8 *keybuf, size_t *keybufsize)
+{
+ int i, card, dom, rc;
+
+ /* check for at least one apqn given */
+ if (!apqns || !nr_apqns)
+ return -EINVAL;
+
+ /* check key type and size */
+ switch (ktype) {
+ case PKEY_TYPE_CCA_DATA:
+ case PKEY_TYPE_CCA_CIPHER:
+ if (*keybufsize < SECKEYBLOBSIZE)
+ return -EINVAL;
+ break;
+ case PKEY_TYPE_EP11:
+ if (*keybufsize < MINEP11AESKEYBLOBSIZE)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+ switch (ksize) {
+ case PKEY_SIZE_AES_128:
+ case PKEY_SIZE_AES_192:
+ case PKEY_SIZE_AES_256:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* simple try all apqns from the list */
+ for (i = 0, rc = -ENODEV; i < nr_apqns; i++) {
+ card = apqns[i].card;
+ dom = apqns[i].domain;
+ if (ktype == PKEY_TYPE_EP11) {
+ rc = ep11_clr2keyblob(card, dom, ksize, kflags,
+ clrkey, keybuf, keybufsize);
+ } else if (ktype == PKEY_TYPE_CCA_DATA) {
+ rc = cca_clr2seckey(card, dom, ksize,
+ clrkey, keybuf);
+ *keybufsize = (rc ? 0 : SECKEYBLOBSIZE);
+ } else /* TOKVER_CCA_VLSC */
+ rc = cca_clr2cipherkey(card, dom, ksize, kflags,
+ clrkey, keybuf, keybufsize);
+ if (rc == 0)
+ break;
+ }
+
+ return rc;
+}
+
+static int pkey_verifykey2(const u8 *key, size_t keylen,
+ u16 *cardnr, u16 *domain,
+ enum pkey_key_type *ktype,
+ enum pkey_key_size *ksize, u32 *flags)
+{
+ int rc;
+ u32 _nr_apqns, *_apqns = NULL;
+ struct keytoken_header *hdr = (struct keytoken_header *)key;
+
+ if (keylen < sizeof(struct keytoken_header))
+ return -EINVAL;
+
+ if (hdr->type == TOKTYPE_CCA_INTERNAL
+ && hdr->version == TOKVER_CCA_AES) {
+ struct secaeskeytoken *t = (struct secaeskeytoken *)key;
+
+ rc = cca_check_secaeskeytoken(debug_info, 3, key, 0);
+ if (rc)
+ goto out;
+ if (ktype)
+ *ktype = PKEY_TYPE_CCA_DATA;
+ if (ksize)
+ *ksize = (enum pkey_key_size) t->bitsize;
+
+ rc = cca_findcard2(&_apqns, &_nr_apqns, *cardnr, *domain,
+ ZCRYPT_CEX3C, t->mkvp, 0, 1);
+ if (rc == 0 && flags)
+ *flags = PKEY_FLAGS_MATCH_CUR_MKVP;
+ if (rc == -ENODEV) {
+ rc = cca_findcard2(&_apqns, &_nr_apqns,
+ *cardnr, *domain,
+ ZCRYPT_CEX3C, 0, t->mkvp, 1);
+ if (rc == 0 && flags)
+ *flags = PKEY_FLAGS_MATCH_ALT_MKVP;
+ }
+ if (rc)
+ goto out;
+
+ *cardnr = ((struct pkey_apqn *)_apqns)->card;
+ *domain = ((struct pkey_apqn *)_apqns)->domain;
+
+ } else if (hdr->type == TOKTYPE_CCA_INTERNAL
+ && hdr->version == TOKVER_CCA_VLSC) {
+ struct cipherkeytoken *t = (struct cipherkeytoken *)key;
+
+ rc = cca_check_secaescipherkey(debug_info, 3, key, 0, 1);
+ if (rc)
+ goto out;
+ if (ktype)
+ *ktype = PKEY_TYPE_CCA_CIPHER;
+ if (ksize) {
+ *ksize = PKEY_SIZE_UNKNOWN;
+ if (!t->plfver && t->wpllen == 512)
+ *ksize = PKEY_SIZE_AES_128;
+ else if (!t->plfver && t->wpllen == 576)
+ *ksize = PKEY_SIZE_AES_192;
+ else if (!t->plfver && t->wpllen == 640)
+ *ksize = PKEY_SIZE_AES_256;
+ }
+
+ rc = cca_findcard2(&_apqns, &_nr_apqns, *cardnr, *domain,
+ ZCRYPT_CEX6, t->mkvp0, 0, 1);
+ if (rc == 0 && flags)
+ *flags = PKEY_FLAGS_MATCH_CUR_MKVP;
+ if (rc == -ENODEV) {
+ rc = cca_findcard2(&_apqns, &_nr_apqns,
+ *cardnr, *domain,
+ ZCRYPT_CEX6, 0, t->mkvp0, 1);
+ if (rc == 0 && flags)
+ *flags = PKEY_FLAGS_MATCH_ALT_MKVP;
+ }
+ if (rc)
+ goto out;
+
+ *cardnr = ((struct pkey_apqn *)_apqns)->card;
+ *domain = ((struct pkey_apqn *)_apqns)->domain;
+
+ } else if (hdr->type == TOKTYPE_NON_CCA
+ && hdr->version == TOKVER_EP11_AES) {
+ struct ep11keyblob *kb = (struct ep11keyblob *)key;
+
+ rc = ep11_check_aeskeyblob(debug_info, 3, key, 0, 1);
+ if (rc)
+ goto out;
+ if (ktype)
+ *ktype = PKEY_TYPE_EP11;
+ if (ksize)
+ *ksize = kb->head.keybitlen;
+
+ rc = ep11_findcard2(&_apqns, &_nr_apqns, *cardnr, *domain,
+ ZCRYPT_CEX7, EP11_API_V, kb->wkvp);
+ if (rc)
+ goto out;
+
+ if (flags)
+ *flags = PKEY_FLAGS_MATCH_CUR_MKVP;
+
+ *cardnr = ((struct pkey_apqn *)_apqns)->card;
+ *domain = ((struct pkey_apqn *)_apqns)->domain;
+
+ } else
+ rc = -EINVAL;
+
+out:
+ kfree(_apqns);
+ return rc;
+}
+
+static int pkey_keyblob2pkey2(const struct pkey_apqn *apqns, size_t nr_apqns,
+ const u8 *key, size_t keylen,
+ struct pkey_protkey *pkey)
+{
+ int i, card, dom, rc;
+ struct keytoken_header *hdr = (struct keytoken_header *)key;
+
+ /* check for at least one apqn given */
+ if (!apqns || !nr_apqns)
+ return -EINVAL;
+
+ if (keylen < sizeof(struct keytoken_header))
+ return -EINVAL;
+
+ if (hdr->type == TOKTYPE_CCA_INTERNAL) {
+ if (hdr->version == TOKVER_CCA_AES) {
+ if (keylen != sizeof(struct secaeskeytoken))
+ return -EINVAL;
+ if (cca_check_secaeskeytoken(debug_info, 3, key, 0))
+ return -EINVAL;
+ } else if (hdr->version == TOKVER_CCA_VLSC) {
+ if (keylen < hdr->len || keylen > MAXCCAVLSCTOKENSIZE)
+ return -EINVAL;
+ if (cca_check_secaescipherkey(debug_info, 3, key, 0, 1))
+ return -EINVAL;
+ } else {
+ DEBUG_ERR("%s unknown CCA internal token version %d\n",
+ __func__, hdr->version);
+ return -EINVAL;
+ }
+ } else if (hdr->type == TOKTYPE_NON_CCA) {
+ if (hdr->version == TOKVER_EP11_AES) {
+ if (keylen < sizeof(struct ep11keyblob))
+ return -EINVAL;
+ if (ep11_check_aeskeyblob(debug_info, 3, key, 0, 1))
+ return -EINVAL;
+ } else {
+ return pkey_nonccatok2pkey(key, keylen, pkey);
+ }
+ } else {
+ DEBUG_ERR("%s unknown/unsupported blob type %d\n",
+ __func__, hdr->type);
+ return -EINVAL;
+ }
+
+ /* simple try all apqns from the list */
+ for (i = 0, rc = -ENODEV; i < nr_apqns; i++) {
+ card = apqns[i].card;
+ dom = apqns[i].domain;
+ if (hdr->type == TOKTYPE_CCA_INTERNAL
+ && hdr->version == TOKVER_CCA_AES)
+ rc = cca_sec2protkey(card, dom, key, pkey->protkey,
+ &pkey->len, &pkey->type);
+ else if (hdr->type == TOKTYPE_CCA_INTERNAL
+ && hdr->version == TOKVER_CCA_VLSC)
+ rc = cca_cipher2protkey(card, dom, key, pkey->protkey,
+ &pkey->len, &pkey->type);
+ else { /* EP11 AES secure key blob */
+ struct ep11keyblob *kb = (struct ep11keyblob *) key;
+
+ rc = ep11_key2protkey(card, dom, key, kb->head.len,
+ pkey->protkey, &pkey->len,
+ &pkey->type);
+ }
+ if (rc == 0)
+ break;
+ }
+
+ return rc;
+}
+
+static int pkey_apqns4key(const u8 *key, size_t keylen, u32 flags,
+ struct pkey_apqn *apqns, size_t *nr_apqns)
+{
+ int rc = EINVAL;
+ u32 _nr_apqns, *_apqns = NULL;
+ struct keytoken_header *hdr = (struct keytoken_header *)key;
+
+ if (keylen < sizeof(struct keytoken_header) || flags == 0)
+ return -EINVAL;
+
+ if (hdr->type == TOKTYPE_NON_CCA && hdr->version == TOKVER_EP11_AES) {
+ int minhwtype = 0, api = 0;
+ struct ep11keyblob *kb = (struct ep11keyblob *) key;
+
+ if (flags != PKEY_FLAGS_MATCH_CUR_MKVP)
+ return -EINVAL;
+ if (kb->attr & EP11_BLOB_PKEY_EXTRACTABLE) {
+ minhwtype = ZCRYPT_CEX7;
+ api = EP11_API_V;
+ }
+ rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
+ minhwtype, api, kb->wkvp);
+ if (rc)
+ goto out;
+ } else if (hdr->type == TOKTYPE_CCA_INTERNAL) {
+ int minhwtype = ZCRYPT_CEX3C;
+ u64 cur_mkvp = 0, old_mkvp = 0;
+
+ if (hdr->version == TOKVER_CCA_AES) {
+ struct secaeskeytoken *t = (struct secaeskeytoken *)key;
+
+ if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
+ cur_mkvp = t->mkvp;
+ if (flags & PKEY_FLAGS_MATCH_ALT_MKVP)
+ old_mkvp = t->mkvp;
+ } else if (hdr->version == TOKVER_CCA_VLSC) {
+ struct cipherkeytoken *t = (struct cipherkeytoken *)key;
+
+ minhwtype = ZCRYPT_CEX6;
+ if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
+ cur_mkvp = t->mkvp0;
+ if (flags & PKEY_FLAGS_MATCH_ALT_MKVP)
+ old_mkvp = t->mkvp0;
+ } else {
+ /* unknown cca internal token type */
+ return -EINVAL;
+ }
+ rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
+ minhwtype, cur_mkvp, old_mkvp, 1);
+ if (rc)
+ goto out;
+ } else
+ return -EINVAL;
+
+ if (apqns) {
+ if (*nr_apqns < _nr_apqns)
+ rc = -ENOSPC;
+ else
+ memcpy(apqns, _apqns, _nr_apqns * sizeof(u32));
+ }
+ *nr_apqns = _nr_apqns;
+
+out:
+ kfree(_apqns);
+ return rc;
+}
+
+static int pkey_apqns4keytype(enum pkey_key_type ktype,
+ u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags,
+ struct pkey_apqn *apqns, size_t *nr_apqns)
+{
+ int rc = -EINVAL;
+ u32 _nr_apqns, *_apqns = NULL;
+
+ if (ktype == PKEY_TYPE_CCA_DATA || ktype == PKEY_TYPE_CCA_CIPHER) {
+ u64 cur_mkvp = 0, old_mkvp = 0;
+ int minhwtype = ZCRYPT_CEX3C;
+
+ if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
+ cur_mkvp = *((u64 *) cur_mkvp);
+ if (flags & PKEY_FLAGS_MATCH_ALT_MKVP)
+ old_mkvp = *((u64 *) alt_mkvp);
+ if (ktype == PKEY_TYPE_CCA_CIPHER)
+ minhwtype = ZCRYPT_CEX6;
+ rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
+ minhwtype, cur_mkvp, old_mkvp, 1);
+ if (rc)
+ goto out;
+ } else if (ktype == PKEY_TYPE_EP11) {
+ u8 *wkvp = NULL;
+
+ if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
+ wkvp = cur_mkvp;
+ rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
+ ZCRYPT_CEX7, EP11_API_V, wkvp);
+ if (rc)
+ goto out;
+
+ } else
+ return -EINVAL;
+
+ if (apqns) {
+ if (*nr_apqns < _nr_apqns)
+ rc = -ENOSPC;
+ else
+ memcpy(apqns, _apqns, _nr_apqns * sizeof(u32));
+ }
+ *nr_apqns = _nr_apqns;
+
+out:
+ kfree(_apqns);
+ return rc;
+}
+
/*
* File io functions
*/
+static void *_copy_key_from_user(void __user *ukey, size_t keylen)
+{
+ if (!ukey || keylen < MINKEYBLOBSIZE || keylen > KEYBLOBBUFSIZE)
+ return ERR_PTR(-EINVAL);
+
+ return memdup_user(ukey, keylen);
+}
+
+static void *_copy_apqns_from_user(void __user *uapqns, size_t nr_apqns)
+{
+ if (!uapqns || nr_apqns == 0)
+ return NULL;
+
+ return memdup_user(uapqns, nr_apqns * sizeof(struct pkey_apqn));
+}
+
static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
@@ -1273,9 +961,9 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
if (copy_from_user(&kgs, ugs, sizeof(kgs)))
return -EFAULT;
- rc = pkey_genseckey(kgs.cardnr, kgs.domain,
- kgs.keytype, &kgs.seckey);
- DEBUG_DBG("%s pkey_genseckey()=%d\n", __func__, rc);
+ rc = cca_genseckey(kgs.cardnr, kgs.domain,
+ kgs.keytype, kgs.seckey.seckey);
+ DEBUG_DBG("%s cca_genseckey()=%d\n", __func__, rc);
if (rc)
break;
if (copy_to_user(ugs, &kgs, sizeof(kgs)))
@@ -1288,9 +976,9 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
if (copy_from_user(&kcs, ucs, sizeof(kcs)))
return -EFAULT;
- rc = pkey_clr2seckey(kcs.cardnr, kcs.domain, kcs.keytype,
- &kcs.clrkey, &kcs.seckey);
- DEBUG_DBG("%s pkey_clr2seckey()=%d\n", __func__, rc);
+ rc = cca_clr2seckey(kcs.cardnr, kcs.domain, kcs.keytype,
+ kcs.clrkey.clrkey, kcs.seckey.seckey);
+ DEBUG_DBG("%s cca_clr2seckey()=%d\n", __func__, rc);
if (rc)
break;
if (copy_to_user(ucs, &kcs, sizeof(kcs)))
@@ -1304,9 +992,10 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
if (copy_from_user(&ksp, usp, sizeof(ksp)))
return -EFAULT;
- rc = pkey_sec2protkey(ksp.cardnr, ksp.domain,
- &ksp.seckey, &ksp.protkey);
- DEBUG_DBG("%s pkey_sec2protkey()=%d\n", __func__, rc);
+ rc = cca_sec2protkey(ksp.cardnr, ksp.domain,
+ ksp.seckey.seckey, ksp.protkey.protkey,
+ &ksp.protkey.len, &ksp.protkey.type);
+ DEBUG_DBG("%s cca_sec2protkey()=%d\n", __func__, rc);
if (rc)
break;
if (copy_to_user(usp, &ksp, sizeof(ksp)))
@@ -1335,10 +1024,10 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
if (copy_from_user(&kfc, ufc, sizeof(kfc)))
return -EFAULT;
- rc = pkey_findcard(&kfc.seckey,
- &kfc.cardnr, &kfc.domain, 1);
- DEBUG_DBG("%s pkey_findcard()=%d\n", __func__, rc);
- if (rc)
+ rc = cca_findcard(kfc.seckey.seckey,
+ &kfc.cardnr, &kfc.domain, 1);
+ DEBUG_DBG("%s cca_findcard()=%d\n", __func__, rc);
+ if (rc < 0)
break;
if (copy_to_user(ufc, &kfc, sizeof(kfc)))
return -EFAULT;
@@ -1350,7 +1039,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
if (copy_from_user(&ksp, usp, sizeof(ksp)))
return -EFAULT;
- rc = pkey_skey2pkey(&ksp.seckey, &ksp.protkey);
+ rc = pkey_skey2pkey(ksp.seckey.seckey, &ksp.protkey);
DEBUG_DBG("%s pkey_skey2pkey()=%d\n", __func__, rc);
if (rc)
break;
@@ -1400,24 +1089,148 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
case PKEY_KBLOB2PROTK: {
struct pkey_kblob2pkey __user *utp = (void __user *) arg;
struct pkey_kblob2pkey ktp;
- __u8 __user *ukey;
- __u8 *kkey;
+ u8 *kkey;
if (copy_from_user(&ktp, utp, sizeof(ktp)))
return -EFAULT;
- if (ktp.keylen < MINKEYBLOBSIZE ||
- ktp.keylen > MAXKEYBLOBSIZE)
- return -EINVAL;
- ukey = ktp.key;
- kkey = kmalloc(ktp.keylen, GFP_KERNEL);
- if (kkey == NULL)
+ kkey = _copy_key_from_user(ktp.key, ktp.keylen);
+ if (IS_ERR(kkey))
+ return PTR_ERR(kkey);
+ rc = pkey_keyblob2pkey(kkey, ktp.keylen, &ktp.protkey);
+ DEBUG_DBG("%s pkey_keyblob2pkey()=%d\n", __func__, rc);
+ kfree(kkey);
+ if (rc)
+ break;
+ if (copy_to_user(utp, &ktp, sizeof(ktp)))
+ return -EFAULT;
+ break;
+ }
+ case PKEY_GENSECK2: {
+ struct pkey_genseck2 __user *ugs = (void __user *) arg;
+ struct pkey_genseck2 kgs;
+ struct pkey_apqn *apqns;
+ size_t klen = KEYBLOBBUFSIZE;
+ u8 *kkey;
+
+ if (copy_from_user(&kgs, ugs, sizeof(kgs)))
+ return -EFAULT;
+ apqns = _copy_apqns_from_user(kgs.apqns, kgs.apqn_entries);
+ if (IS_ERR(apqns))
+ return PTR_ERR(apqns);
+ kkey = kmalloc(klen, GFP_KERNEL);
+ if (!kkey) {
+ kfree(apqns);
+ return -ENOMEM;
+ }
+ rc = pkey_genseckey2(apqns, kgs.apqn_entries,
+ kgs.type, kgs.size, kgs.keygenflags,
+ kkey, &klen);
+ DEBUG_DBG("%s pkey_genseckey2()=%d\n", __func__, rc);
+ kfree(apqns);
+ if (rc) {
+ kfree(kkey);
+ break;
+ }
+ if (kgs.key) {
+ if (kgs.keylen < klen) {
+ kfree(kkey);
+ return -EINVAL;
+ }
+ if (copy_to_user(kgs.key, kkey, klen)) {
+ kfree(kkey);
+ return -EFAULT;
+ }
+ }
+ kgs.keylen = klen;
+ if (copy_to_user(ugs, &kgs, sizeof(kgs)))
+ rc = -EFAULT;
+ kfree(kkey);
+ break;
+ }
+ case PKEY_CLR2SECK2: {
+ struct pkey_clr2seck2 __user *ucs = (void __user *) arg;
+ struct pkey_clr2seck2 kcs;
+ struct pkey_apqn *apqns;
+ size_t klen = KEYBLOBBUFSIZE;
+ u8 *kkey;
+
+ if (copy_from_user(&kcs, ucs, sizeof(kcs)))
+ return -EFAULT;
+ apqns = _copy_apqns_from_user(kcs.apqns, kcs.apqn_entries);
+ if (IS_ERR(apqns))
+ return PTR_ERR(apqns);
+ kkey = kmalloc(klen, GFP_KERNEL);
+ if (!kkey) {
+ kfree(apqns);
return -ENOMEM;
- if (copy_from_user(kkey, ukey, ktp.keylen)) {
+ }
+ rc = pkey_clr2seckey2(apqns, kcs.apqn_entries,
+ kcs.type, kcs.size, kcs.keygenflags,
+ kcs.clrkey.clrkey, kkey, &klen);
+ DEBUG_DBG("%s pkey_clr2seckey2()=%d\n", __func__, rc);
+ kfree(apqns);
+ if (rc) {
kfree(kkey);
+ break;
+ }
+ if (kcs.key) {
+ if (kcs.keylen < klen) {
+ kfree(kkey);
+ return -EINVAL;
+ }
+ if (copy_to_user(kcs.key, kkey, klen)) {
+ kfree(kkey);
+ return -EFAULT;
+ }
+ }
+ kcs.keylen = klen;
+ if (copy_to_user(ucs, &kcs, sizeof(kcs)))
+ rc = -EFAULT;
+ memzero_explicit(&kcs, sizeof(kcs));
+ kfree(kkey);
+ break;
+ }
+ case PKEY_VERIFYKEY2: {
+ struct pkey_verifykey2 __user *uvk = (void __user *) arg;
+ struct pkey_verifykey2 kvk;
+ u8 *kkey;
+
+ if (copy_from_user(&kvk, uvk, sizeof(kvk)))
return -EFAULT;
+ kkey = _copy_key_from_user(kvk.key, kvk.keylen);
+ if (IS_ERR(kkey))
+ return PTR_ERR(kkey);
+ rc = pkey_verifykey2(kkey, kvk.keylen,
+ &kvk.cardnr, &kvk.domain,
+ &kvk.type, &kvk.size, &kvk.flags);
+ DEBUG_DBG("%s pkey_verifykey2()=%d\n", __func__, rc);
+ kfree(kkey);
+ if (rc)
+ break;
+ if (copy_to_user(uvk, &kvk, sizeof(kvk)))
+ return -EFAULT;
+ break;
+ }
+ case PKEY_KBLOB2PROTK2: {
+ struct pkey_kblob2pkey2 __user *utp = (void __user *) arg;
+ struct pkey_kblob2pkey2 ktp;
+ struct pkey_apqn *apqns = NULL;
+ u8 *kkey;
+
+ if (copy_from_user(&ktp, utp, sizeof(ktp)))
+ return -EFAULT;
+ apqns = _copy_apqns_from_user(ktp.apqns, ktp.apqn_entries);
+ if (IS_ERR(apqns))
+ return PTR_ERR(apqns);
+ kkey = _copy_key_from_user(ktp.key, ktp.keylen);
+ if (IS_ERR(kkey)) {
+ kfree(apqns);
+ return PTR_ERR(kkey);
}
- rc = pkey_keyblob2pkey(kkey, ktp.keylen, &ktp.protkey);
- DEBUG_DBG("%s pkey_keyblob2pkey()=%d\n", __func__, rc);
+ rc = pkey_keyblob2pkey2(apqns, ktp.apqn_entries,
+ kkey, ktp.keylen, &ktp.protkey);
+ DEBUG_DBG("%s pkey_keyblob2pkey2()=%d\n", __func__, rc);
+ kfree(apqns);
kfree(kkey);
if (rc)
break;
@@ -1425,6 +1238,97 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
return -EFAULT;
break;
}
+ case PKEY_APQNS4K: {
+ struct pkey_apqns4key __user *uak = (void __user *) arg;
+ struct pkey_apqns4key kak;
+ struct pkey_apqn *apqns = NULL;
+ size_t nr_apqns, len;
+ u8 *kkey;
+
+ if (copy_from_user(&kak, uak, sizeof(kak)))
+ return -EFAULT;
+ nr_apqns = kak.apqn_entries;
+ if (nr_apqns) {
+ apqns = kmalloc_array(nr_apqns,
+ sizeof(struct pkey_apqn),
+ GFP_KERNEL);
+ if (!apqns)
+ return -ENOMEM;
+ }
+ kkey = _copy_key_from_user(kak.key, kak.keylen);
+ if (IS_ERR(kkey)) {
+ kfree(apqns);
+ return PTR_ERR(kkey);
+ }
+ rc = pkey_apqns4key(kkey, kak.keylen, kak.flags,
+ apqns, &nr_apqns);
+ DEBUG_DBG("%s pkey_apqns4key()=%d\n", __func__, rc);
+ kfree(kkey);
+ if (rc && rc != -ENOSPC) {
+ kfree(apqns);
+ break;
+ }
+ if (!rc && kak.apqns) {
+ if (nr_apqns > kak.apqn_entries) {
+ kfree(apqns);
+ return -EINVAL;
+ }
+ len = nr_apqns * sizeof(struct pkey_apqn);
+ if (len) {
+ if (copy_to_user(kak.apqns, apqns, len)) {
+ kfree(apqns);
+ return -EFAULT;
+ }
+ }
+ }
+ kak.apqn_entries = nr_apqns;
+ if (copy_to_user(uak, &kak, sizeof(kak)))
+ rc = -EFAULT;
+ kfree(apqns);
+ break;
+ }
+ case PKEY_APQNS4KT: {
+ struct pkey_apqns4keytype __user *uat = (void __user *) arg;
+ struct pkey_apqns4keytype kat;
+ struct pkey_apqn *apqns = NULL;
+ size_t nr_apqns, len;
+
+ if (copy_from_user(&kat, uat, sizeof(kat)))
+ return -EFAULT;
+ nr_apqns = kat.apqn_entries;
+ if (nr_apqns) {
+ apqns = kmalloc_array(nr_apqns,
+ sizeof(struct pkey_apqn),
+ GFP_KERNEL);
+ if (!apqns)
+ return -ENOMEM;
+ }
+ rc = pkey_apqns4keytype(kat.type, kat.cur_mkvp, kat.alt_mkvp,
+ kat.flags, apqns, &nr_apqns);
+ DEBUG_DBG("%s pkey_apqns4keytype()=%d\n", __func__, rc);
+ if (rc && rc != -ENOSPC) {
+ kfree(apqns);
+ break;
+ }
+ if (!rc && kat.apqns) {
+ if (nr_apqns > kat.apqn_entries) {
+ kfree(apqns);
+ return -EINVAL;
+ }
+ len = nr_apqns * sizeof(struct pkey_apqn);
+ if (len) {
+ if (copy_to_user(kat.apqns, apqns, len)) {
+ kfree(apqns);
+ return -EFAULT;
+ }
+ }
+ }
+ kat.apqn_entries = nr_apqns;
+ if (copy_to_user(uat, &kat, sizeof(kat)))
+ rc = -EFAULT;
+ kfree(apqns);
+ break;
+ }
default:
/* unknown/unsupported ioctl cmd */
return -ENOTTY;
@@ -1567,6 +1471,7 @@ static ssize_t pkey_ccadata_aes_attr_read(u32 keytype, bool is_xts, char *buf,
loff_t off, size_t count)
{
int rc;
+ struct pkey_seckey *seckey = (struct pkey_seckey *) buf;
if (off != 0 || count < sizeof(struct secaeskeytoken))
return -EINVAL;
@@ -1574,13 +1479,13 @@ static ssize_t pkey_ccadata_aes_attr_read(u32 keytype, bool is_xts, char *buf,
if (count < 2 * sizeof(struct secaeskeytoken))
return -EINVAL;
- rc = pkey_genseckey(-1, -1, keytype, (struct pkey_seckey *)buf);
+ rc = cca_genseckey(-1, -1, keytype, seckey->seckey);
if (rc)
return rc;
if (is_xts) {
- buf += sizeof(struct pkey_seckey);
- rc = pkey_genseckey(-1, -1, keytype, (struct pkey_seckey *)buf);
+ seckey++;
+ rc = cca_genseckey(-1, -1, keytype, seckey->seckey);
if (rc)
return rc;
@@ -1660,9 +1565,256 @@ static struct attribute_group ccadata_attr_group = {
.bin_attrs = ccadata_attrs,
};
+#define CCACIPHERTOKENSIZE (sizeof(struct cipherkeytoken) + 80)
+
+/*
+ * Sysfs attribute read function for all secure key ccacipher binary attributes.
+ * The implementation can not deal with partial reads, because a new random
+ * secure key blob is generated with each read. In case of partial reads
+ * (i.e. off != 0 or count < key blob size) -EINVAL is returned.
+ */
+static ssize_t pkey_ccacipher_aes_attr_read(enum pkey_key_size keybits,
+ bool is_xts, char *buf, loff_t off,
+ size_t count)
+{
+ int i, rc, card, dom;
+ u32 nr_apqns, *apqns = NULL;
+ size_t keysize = CCACIPHERTOKENSIZE;
+
+ if (off != 0 || count < CCACIPHERTOKENSIZE)
+ return -EINVAL;
+ if (is_xts)
+ if (count < 2 * CCACIPHERTOKENSIZE)
+ return -EINVAL;
+
+ /* build a list of apqns able to generate an cipher key */
+ rc = cca_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF,
+ ZCRYPT_CEX6, 0, 0, 0);
+ if (rc)
+ return rc;
+
+ memset(buf, 0, is_xts ? 2 * keysize : keysize);
+
+ /* simple try all apqns from the list */
+ for (i = 0, rc = -ENODEV; i < nr_apqns; i++) {
+ card = apqns[i] >> 16;
+ dom = apqns[i] & 0xFFFF;
+ rc = cca_gencipherkey(card, dom, keybits, 0, buf, &keysize);
+ if (rc == 0)
+ break;
+ }
+ if (rc)
+ return rc;
+
+ if (is_xts) {
+ keysize = CCACIPHERTOKENSIZE;
+ buf += CCACIPHERTOKENSIZE;
+ rc = cca_gencipherkey(card, dom, keybits, 0, buf, &keysize);
+ if (rc == 0)
+ return 2 * CCACIPHERTOKENSIZE;
+ }
+
+ return CCACIPHERTOKENSIZE;
+}
+
+static ssize_t ccacipher_aes_128_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_128, false, buf,
+ off, count);
+}
+
+static ssize_t ccacipher_aes_192_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_192, false, buf,
+ off, count);
+}
+
+static ssize_t ccacipher_aes_256_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_256, false, buf,
+ off, count);
+}
+
+static ssize_t ccacipher_aes_128_xts_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_128, true, buf,
+ off, count);
+}
+
+static ssize_t ccacipher_aes_256_xts_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_256, true, buf,
+ off, count);
+}
+
+static BIN_ATTR_RO(ccacipher_aes_128, CCACIPHERTOKENSIZE);
+static BIN_ATTR_RO(ccacipher_aes_192, CCACIPHERTOKENSIZE);
+static BIN_ATTR_RO(ccacipher_aes_256, CCACIPHERTOKENSIZE);
+static BIN_ATTR_RO(ccacipher_aes_128_xts, 2 * CCACIPHERTOKENSIZE);
+static BIN_ATTR_RO(ccacipher_aes_256_xts, 2 * CCACIPHERTOKENSIZE);
+
+static struct bin_attribute *ccacipher_attrs[] = {
+ &bin_attr_ccacipher_aes_128,
+ &bin_attr_ccacipher_aes_192,
+ &bin_attr_ccacipher_aes_256,
+ &bin_attr_ccacipher_aes_128_xts,
+ &bin_attr_ccacipher_aes_256_xts,
+ NULL
+};
+
+static struct attribute_group ccacipher_attr_group = {
+ .name = "ccacipher",
+ .bin_attrs = ccacipher_attrs,
+};
+
+/*
+ * Sysfs attribute read function for all ep11 aes key binary attributes.
+ * The implementation can not deal with partial reads, because a new random
+ * secure key blob is generated with each read. In case of partial reads
+ * (i.e. off != 0 or count < key blob size) -EINVAL is returned.
+ * This function and the sysfs attributes using it provide EP11 key blobs
+ * padded to the upper limit of MAXEP11AESKEYBLOBSIZE which is currently
+ * 320 bytes.
+ */
+static ssize_t pkey_ep11_aes_attr_read(enum pkey_key_size keybits,
+ bool is_xts, char *buf, loff_t off,
+ size_t count)
+{
+ int i, rc, card, dom;
+ u32 nr_apqns, *apqns = NULL;
+ size_t keysize = MAXEP11AESKEYBLOBSIZE;
+
+ if (off != 0 || count < MAXEP11AESKEYBLOBSIZE)
+ return -EINVAL;
+ if (is_xts)
+ if (count < 2 * MAXEP11AESKEYBLOBSIZE)
+ return -EINVAL;
+
+ /* build a list of apqns able to generate an cipher key */
+ rc = ep11_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF,
+ ZCRYPT_CEX7, EP11_API_V, NULL);
+ if (rc)
+ return rc;
+
+ memset(buf, 0, is_xts ? 2 * keysize : keysize);
+
+ /* simple try all apqns from the list */
+ for (i = 0, rc = -ENODEV; i < nr_apqns; i++) {
+ card = apqns[i] >> 16;
+ dom = apqns[i] & 0xFFFF;
+ rc = ep11_genaeskey(card, dom, keybits, 0, buf, &keysize);
+ if (rc == 0)
+ break;
+ }
+ if (rc)
+ return rc;
+
+ if (is_xts) {
+ keysize = MAXEP11AESKEYBLOBSIZE;
+ buf += MAXEP11AESKEYBLOBSIZE;
+ rc = ep11_genaeskey(card, dom, keybits, 0, buf, &keysize);
+ if (rc == 0)
+ return 2 * MAXEP11AESKEYBLOBSIZE;
+ }
+
+ return MAXEP11AESKEYBLOBSIZE;
+}
+
+static ssize_t ep11_aes_128_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_128, false, buf,
+ off, count);
+}
+
+static ssize_t ep11_aes_192_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_192, false, buf,
+ off, count);
+}
+
+static ssize_t ep11_aes_256_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_256, false, buf,
+ off, count);
+}
+
+static ssize_t ep11_aes_128_xts_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_128, true, buf,
+ off, count);
+}
+
+static ssize_t ep11_aes_256_xts_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_256, true, buf,
+ off, count);
+}
+
+static BIN_ATTR_RO(ep11_aes_128, MAXEP11AESKEYBLOBSIZE);
+static BIN_ATTR_RO(ep11_aes_192, MAXEP11AESKEYBLOBSIZE);
+static BIN_ATTR_RO(ep11_aes_256, MAXEP11AESKEYBLOBSIZE);
+static BIN_ATTR_RO(ep11_aes_128_xts, 2 * MAXEP11AESKEYBLOBSIZE);
+static BIN_ATTR_RO(ep11_aes_256_xts, 2 * MAXEP11AESKEYBLOBSIZE);
+
+static struct bin_attribute *ep11_attrs[] = {
+ &bin_attr_ep11_aes_128,
+ &bin_attr_ep11_aes_192,
+ &bin_attr_ep11_aes_256,
+ &bin_attr_ep11_aes_128_xts,
+ &bin_attr_ep11_aes_256_xts,
+ NULL
+};
+
+static struct attribute_group ep11_attr_group = {
+ .name = "ep11",
+ .bin_attrs = ep11_attrs,
+};
+
static const struct attribute_group *pkey_attr_groups[] = {
&protkey_attr_group,
&ccadata_attr_group,
+ &ccacipher_attr_group,
+ &ep11_attr_group,
NULL,
};
@@ -1716,7 +1868,6 @@ static int __init pkey_init(void)
static void __exit pkey_exit(void)
{
misc_deregister(&pkey_dev);
- mkvp_cache_free();
pkey_debug_exit();
}
diff --git a/drivers/s390/crypto/vfio_ap_drv.c b/drivers/s390/crypto/vfio_ap_drv.c
index 003662aa8060..be2520cc010b 100644
--- a/drivers/s390/crypto/vfio_ap_drv.c
+++ b/drivers/s390/crypto/vfio_ap_drv.c
@@ -36,6 +36,8 @@ static struct ap_device_id ap_queue_ids[] = {
.match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
{ .dev_type = AP_DEVICE_TYPE_CEX6,
.match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
+ { .dev_type = AP_DEVICE_TYPE_CEX7,
+ .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
{ /* end of sibling */ },
};
diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
index 0604b49a4d32..5c0f53c6dde7 100644
--- a/drivers/s390/crypto/vfio_ap_ops.c
+++ b/drivers/s390/crypto/vfio_ap_ops.c
@@ -1143,7 +1143,7 @@ int vfio_ap_mdev_reset_queue(unsigned int apid, unsigned int apqi,
msleep(20);
status = ap_tapq(apqn, NULL);
}
- WARN_ON_ONCE(retry <= 0);
+ WARN_ON_ONCE(retry2 <= 0);
return 0;
case AP_RESPONSE_RESET_IN_PROGRESS:
case AP_RESPONSE_BUSY:
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 1058b4b5cc1e..56a405dce8bc 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -35,6 +35,8 @@
#include "zcrypt_msgtype6.h"
#include "zcrypt_msgtype50.h"
+#include "zcrypt_ccamisc.h"
+#include "zcrypt_ep11misc.h"
/*
* Module description.
@@ -133,18 +135,6 @@ struct zcdn_device {
static int zcdn_create(const char *name);
static int zcdn_destroy(const char *name);
-/* helper function, matches the name for find_zcdndev_by_name() */
-static int __match_zcdn_name(struct device *dev, const void *data)
-{
- return strcmp(dev_name(dev), (const char *)data) == 0;
-}
-
-/* helper function, matches the devt value for find_zcdndev_by_devt() */
-static int __match_zcdn_devt(struct device *dev, const void *data)
-{
- return dev->devt == *((dev_t *) data);
-}
-
/*
* Find zcdn device by name.
* Returns reference to the zcdn device which needs to be released
@@ -152,10 +142,7 @@ static int __match_zcdn_devt(struct device *dev, const void *data)
*/
static inline struct zcdn_device *find_zcdndev_by_name(const char *name)
{
- struct device *dev =
- class_find_device(zcrypt_class, NULL,
- (void *) name,
- __match_zcdn_name);
+ struct device *dev = class_find_device_by_name(zcrypt_class, name);
return dev ? to_zcdn_dev(dev) : NULL;
}
@@ -167,10 +154,7 @@ static inline struct zcdn_device *find_zcdndev_by_name(const char *name)
*/
static inline struct zcdn_device *find_zcdndev_by_devt(dev_t devt)
{
- struct device *dev =
- class_find_device(zcrypt_class, NULL,
- (void *) &devt,
- __match_zcdn_devt);
+ struct device *dev = class_find_device_by_devt(zcrypt_class, devt);
return dev ? to_zcdn_dev(dev) : NULL;
}
@@ -539,8 +523,7 @@ static int zcrypt_release(struct inode *inode, struct file *filp)
if (filp->f_inode->i_cdev == &zcrypt_cdev) {
struct zcdn_device *zcdndev;
- if (mutex_lock_interruptible(&ap_perms_mutex))
- return -ERESTARTSYS;
+ mutex_lock(&ap_perms_mutex);
zcdndev = find_zcdndev_by_devt(filp->f_inode->i_rdev);
mutex_unlock(&ap_perms_mutex);
if (zcdndev) {
@@ -623,8 +606,8 @@ static inline bool zcrypt_card_compare(struct zcrypt_card *zc,
weight += atomic_read(&zc->load);
pref_weight += atomic_read(&pref_zc->load);
if (weight == pref_weight)
- return atomic_read(&zc->card->total_request_count) >
- atomic_read(&pref_zc->card->total_request_count);
+ return atomic64_read(&zc->card->total_request_count) >
+ atomic64_read(&pref_zc->card->total_request_count);
return weight > pref_weight;
}
@@ -867,7 +850,7 @@ static long _zcrypt_send_cprb(struct ap_perms *perms,
/* check if device is online and eligible */
if (!zq->online ||
!zq->ops->send_cprb ||
- (tdom != (unsigned short) AUTOSELECT &&
+ (tdom != AUTOSEL_DOM &&
tdom != AP_QID_QUEUE(zq->queue->qid)))
continue;
/* check if device node has admission for this queue */
@@ -892,7 +875,7 @@ static long _zcrypt_send_cprb(struct ap_perms *perms,
/* in case of auto select, provide the correct domain */
qid = pref_zq->queue->qid;
- if (*domain == (unsigned short) AUTOSELECT)
+ if (*domain == AUTOSEL_DOM)
*domain = AP_QID_QUEUE(qid);
rc = pref_zq->ops->send_cprb(pref_zq, xcRB, &ap_msg);
@@ -919,7 +902,7 @@ static bool is_desired_ep11_card(unsigned int dev_id,
struct ep11_target_dev *targets)
{
while (target_num-- > 0) {
- if (dev_id == targets->ap_id)
+ if (targets->ap_id == dev_id || targets->ap_id == AUTOSEL_AP)
return true;
targets++;
}
@@ -930,16 +913,19 @@ static bool is_desired_ep11_queue(unsigned int dev_qid,
unsigned short target_num,
struct ep11_target_dev *targets)
{
+ int card = AP_QID_CARD(dev_qid), dom = AP_QID_QUEUE(dev_qid);
+
while (target_num-- > 0) {
- if (AP_MKQID(targets->ap_id, targets->dom_id) == dev_qid)
+ if ((targets->ap_id == card || targets->ap_id == AUTOSEL_AP) &&
+ (targets->dom_id == dom || targets->dom_id == AUTOSEL_DOM))
return true;
targets++;
}
return false;
}
-static long zcrypt_send_ep11_cprb(struct ap_perms *perms,
- struct ep11_urb *xcrb)
+static long _zcrypt_send_ep11_cprb(struct ap_perms *perms,
+ struct ep11_urb *xcrb)
{
struct zcrypt_card *zc, *pref_zc;
struct zcrypt_queue *zq, *pref_zq;
@@ -1044,6 +1030,12 @@ out:
return rc;
}
+long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb)
+{
+ return _zcrypt_send_ep11_cprb(&ap_perms, xcrb);
+}
+EXPORT_SYMBOL(zcrypt_send_ep11_cprb);
+
static long zcrypt_rng(char *buffer)
{
struct zcrypt_card *zc, *pref_zc;
@@ -1160,6 +1152,34 @@ void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus)
}
EXPORT_SYMBOL(zcrypt_device_status_mask_ext);
+int zcrypt_device_status_ext(int card, int queue,
+ struct zcrypt_device_status_ext *devstat)
+{
+ struct zcrypt_card *zc;
+ struct zcrypt_queue *zq;
+
+ memset(devstat, 0, sizeof(*devstat));
+
+ spin_lock(&zcrypt_list_lock);
+ for_each_zcrypt_card(zc) {
+ for_each_zcrypt_queue(zq, zc) {
+ if (card == AP_QID_CARD(zq->queue->qid) &&
+ queue == AP_QID_QUEUE(zq->queue->qid)) {
+ devstat->hwtype = zc->card->ap_dev.device_type;
+ devstat->functions = zc->card->functions >> 26;
+ devstat->qid = zq->queue->qid;
+ devstat->online = zq->online ? 0x01 : 0x00;
+ spin_unlock(&zcrypt_list_lock);
+ return 0;
+ }
+ }
+ }
+ spin_unlock(&zcrypt_list_lock);
+
+ return -ENODEV;
+}
+EXPORT_SYMBOL(zcrypt_device_status_ext);
+
static void zcrypt_status_mask(char status[], size_t max_adapters)
{
struct zcrypt_card *zc;
@@ -1206,11 +1226,12 @@ static void zcrypt_qdepth_mask(char qdepth[], size_t max_adapters)
spin_unlock(&zcrypt_list_lock);
}
-static void zcrypt_perdev_reqcnt(int reqcnt[], size_t max_adapters)
+static void zcrypt_perdev_reqcnt(u32 reqcnt[], size_t max_adapters)
{
struct zcrypt_card *zc;
struct zcrypt_queue *zq;
int card;
+ u64 cnt;
memset(reqcnt, 0, sizeof(int) * max_adapters);
spin_lock(&zcrypt_list_lock);
@@ -1222,8 +1243,9 @@ static void zcrypt_perdev_reqcnt(int reqcnt[], size_t max_adapters)
|| card >= max_adapters)
continue;
spin_lock(&zq->queue->lock);
- reqcnt[card] = zq->queue->total_request_count;
+ cnt = zq->queue->total_request_count;
spin_unlock(&zq->queue->lock);
+ reqcnt[card] = (cnt < UINT_MAX) ? (u32) cnt : UINT_MAX;
}
}
local_bh_enable();
@@ -1356,12 +1378,12 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb)))
return -EFAULT;
do {
- rc = zcrypt_send_ep11_cprb(perms, &xcrb);
+ rc = _zcrypt_send_ep11_cprb(perms, &xcrb);
} while (rc == -EAGAIN);
/* on failure: retry once again after a requested rescan */
if ((rc == -ENODEV) && (zcrypt_process_rescan()))
do {
- rc = zcrypt_send_ep11_cprb(perms, &xcrb);
+ rc = _zcrypt_send_ep11_cprb(perms, &xcrb);
} while (rc == -EAGAIN);
if (rc)
ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDEP11CPRB rc=%d\n", rc);
@@ -1401,9 +1423,9 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
return 0;
}
case ZCRYPT_PERDEV_REQCNT: {
- int *reqcnt;
+ u32 *reqcnt;
- reqcnt = kcalloc(AP_DEVICES, sizeof(int), GFP_KERNEL);
+ reqcnt = kcalloc(AP_DEVICES, sizeof(u32), GFP_KERNEL);
if (!reqcnt)
return -ENOMEM;
zcrypt_perdev_reqcnt(reqcnt, AP_DEVICES);
@@ -1460,7 +1482,7 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
}
case Z90STAT_PERDEV_REQCNT: {
/* the old ioctl supports only 64 adapters */
- int reqcnt[MAX_ZDEV_CARDIDS];
+ u32 reqcnt[MAX_ZDEV_CARDIDS];
zcrypt_perdev_reqcnt(reqcnt, MAX_ZDEV_CARDIDS);
if (copy_to_user((int __user *) arg, reqcnt, sizeof(reqcnt)))
@@ -1874,6 +1896,8 @@ void __exit zcrypt_api_exit(void)
misc_deregister(&zcrypt_misc_device);
zcrypt_msgtype6_exit();
zcrypt_msgtype50_exit();
+ zcrypt_ccamisc_exit();
+ zcrypt_ep11misc_exit();
zcrypt_debug_exit();
}
diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h
index af67a768a3fc..599e68bf53f7 100644
--- a/drivers/s390/crypto/zcrypt_api.h
+++ b/drivers/s390/crypto/zcrypt_api.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/*
- * Copyright IBM Corp. 2001, 2018
+ * Copyright IBM Corp. 2001, 2019
* Author(s): Robert Burroughs
* Eric Rossman (edrossma@us.ibm.com)
* Cornelia Huck <cornelia.huck@de.ibm.com>
@@ -29,6 +29,7 @@
#define ZCRYPT_CEX4 10
#define ZCRYPT_CEX5 11
#define ZCRYPT_CEX6 12
+#define ZCRYPT_CEX7 13
/**
* Large random numbers are pulled in 4096 byte chunks from the crypto cards
@@ -121,9 +122,6 @@ void zcrypt_card_get(struct zcrypt_card *);
int zcrypt_card_put(struct zcrypt_card *);
int zcrypt_card_register(struct zcrypt_card *);
void zcrypt_card_unregister(struct zcrypt_card *);
-struct zcrypt_card *zcrypt_card_get_best(unsigned int *,
- unsigned int, unsigned int);
-void zcrypt_card_put_best(struct zcrypt_card *, unsigned int);
struct zcrypt_queue *zcrypt_queue_alloc(size_t);
void zcrypt_queue_free(struct zcrypt_queue *);
@@ -132,8 +130,6 @@ int zcrypt_queue_put(struct zcrypt_queue *);
int zcrypt_queue_register(struct zcrypt_queue *);
void zcrypt_queue_unregister(struct zcrypt_queue *);
void zcrypt_queue_force_online(struct zcrypt_queue *, int);
-struct zcrypt_queue *zcrypt_queue_get_best(unsigned int, unsigned int);
-void zcrypt_queue_put_best(struct zcrypt_queue *, unsigned int);
int zcrypt_rng_device_add(void);
void zcrypt_rng_device_remove(void);
@@ -144,6 +140,9 @@ struct zcrypt_ops *zcrypt_msgtype(unsigned char *, int);
int zcrypt_api_init(void);
void zcrypt_api_exit(void);
long zcrypt_send_cprb(struct ica_xcRB *xcRB);
+long zcrypt_send_ep11_cprb(struct ep11_urb *urb);
void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus);
+int zcrypt_device_status_ext(int card, int queue,
+ struct zcrypt_device_status_ext *devstatus);
#endif /* _ZCRYPT_API_H_ */
diff --git a/drivers/s390/crypto/zcrypt_ccamisc.c b/drivers/s390/crypto/zcrypt_ccamisc.c
new file mode 100644
index 000000000000..110fe9d0cb91
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_ccamisc.c
@@ -0,0 +1,1765 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright IBM Corp. 2019
+ * Author(s): Harald Freudenberger <freude@linux.ibm.com>
+ * Ingo Franzki <ifranzki@linux.ibm.com>
+ *
+ * Collection of CCA misc functions used by zcrypt and pkey
+ */
+
+#define KMSG_COMPONENT "zcrypt"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <asm/zcrypt.h>
+#include <asm/pkey.h>
+
+#include "ap_bus.h"
+#include "zcrypt_api.h"
+#include "zcrypt_debug.h"
+#include "zcrypt_msgtype6.h"
+#include "zcrypt_ccamisc.h"
+
+#define DEBUG_DBG(...) ZCRYPT_DBF(DBF_DEBUG, ##__VA_ARGS__)
+#define DEBUG_INFO(...) ZCRYPT_DBF(DBF_INFO, ##__VA_ARGS__)
+#define DEBUG_WARN(...) ZCRYPT_DBF(DBF_WARN, ##__VA_ARGS__)
+#define DEBUG_ERR(...) ZCRYPT_DBF(DBF_ERR, ##__VA_ARGS__)
+
+/* Size of parameter block used for all cca requests/replies */
+#define PARMBSIZE 512
+
+/* Size of vardata block used for some of the cca requests/replies */
+#define VARDATASIZE 4096
+
+struct cca_info_list_entry {
+ struct list_head list;
+ u16 cardnr;
+ u16 domain;
+ struct cca_info info;
+};
+
+/* a list with cca_info_list_entry entries */
+static LIST_HEAD(cca_info_list);
+static DEFINE_SPINLOCK(cca_info_list_lock);
+
+/*
+ * Simple check if the token is a valid CCA secure AES data key
+ * token. If keybitsize is given, the bitsize of the key is
+ * also checked. Returns 0 on success or errno value on failure.
+ */
+int cca_check_secaeskeytoken(debug_info_t *dbg, int dbflvl,
+ const u8 *token, int keybitsize)
+{
+ struct secaeskeytoken *t = (struct secaeskeytoken *) token;
+
+#define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__)
+
+ if (t->type != TOKTYPE_CCA_INTERNAL) {
+ if (dbg)
+ DBF("%s token check failed, type 0x%02x != 0x%02x\n",
+ __func__, (int) t->type, TOKTYPE_CCA_INTERNAL);
+ return -EINVAL;
+ }
+ if (t->version != TOKVER_CCA_AES) {
+ if (dbg)
+ DBF("%s token check failed, version 0x%02x != 0x%02x\n",
+ __func__, (int) t->version, TOKVER_CCA_AES);
+ return -EINVAL;
+ }
+ if (keybitsize > 0 && t->bitsize != keybitsize) {
+ if (dbg)
+ DBF("%s token check failed, bitsize %d != %d\n",
+ __func__, (int) t->bitsize, keybitsize);
+ return -EINVAL;
+ }
+
+#undef DBF
+
+ return 0;
+}
+EXPORT_SYMBOL(cca_check_secaeskeytoken);
+
+/*
+ * Simple check if the token is a valid CCA secure AES cipher key
+ * token. If keybitsize is given, the bitsize of the key is
+ * also checked. If checkcpacfexport is enabled, the key is also
+ * checked for the export flag to allow CPACF export.
+ * Returns 0 on success or errno value on failure.
+ */
+int cca_check_secaescipherkey(debug_info_t *dbg, int dbflvl,
+ const u8 *token, int keybitsize,
+ int checkcpacfexport)
+{
+ struct cipherkeytoken *t = (struct cipherkeytoken *) token;
+ bool keybitsizeok = true;
+
+#define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__)
+
+ if (t->type != TOKTYPE_CCA_INTERNAL) {
+ if (dbg)
+ DBF("%s token check failed, type 0x%02x != 0x%02x\n",
+ __func__, (int) t->type, TOKTYPE_CCA_INTERNAL);
+ return -EINVAL;
+ }
+ if (t->version != TOKVER_CCA_VLSC) {
+ if (dbg)
+ DBF("%s token check failed, version 0x%02x != 0x%02x\n",
+ __func__, (int) t->version, TOKVER_CCA_VLSC);
+ return -EINVAL;
+ }
+ if (t->algtype != 0x02) {
+ if (dbg)
+ DBF("%s token check failed, algtype 0x%02x != 0x02\n",
+ __func__, (int) t->algtype);
+ return -EINVAL;
+ }
+ if (t->keytype != 0x0001) {
+ if (dbg)
+ DBF("%s token check failed, keytype 0x%04x != 0x0001\n",
+ __func__, (int) t->keytype);
+ return -EINVAL;
+ }
+ if (t->plfver != 0x00 && t->plfver != 0x01) {
+ if (dbg)
+ DBF("%s token check failed, unknown plfver 0x%02x\n",
+ __func__, (int) t->plfver);
+ return -EINVAL;
+ }
+ if (t->wpllen != 512 && t->wpllen != 576 && t->wpllen != 640) {
+ if (dbg)
+ DBF("%s token check failed, unknown wpllen %d\n",
+ __func__, (int) t->wpllen);
+ return -EINVAL;
+ }
+ if (keybitsize > 0) {
+ switch (keybitsize) {
+ case 128:
+ if (t->wpllen != (t->plfver ? 640 : 512))
+ keybitsizeok = false;
+ break;
+ case 192:
+ if (t->wpllen != (t->plfver ? 640 : 576))
+ keybitsizeok = false;
+ break;
+ case 256:
+ if (t->wpllen != 640)
+ keybitsizeok = false;
+ break;
+ default:
+ keybitsizeok = false;
+ break;
+ }
+ if (!keybitsizeok) {
+ if (dbg)
+ DBF("%s token check failed, bitsize %d\n",
+ __func__, keybitsize);
+ return -EINVAL;
+ }
+ }
+ if (checkcpacfexport && !(t->kmf1 & KMF1_XPRT_CPAC)) {
+ if (dbg)
+ DBF("%s token check failed, XPRT_CPAC bit is 0\n",
+ __func__);
+ return -EINVAL;
+ }
+
+#undef DBF
+
+ return 0;
+}
+EXPORT_SYMBOL(cca_check_secaescipherkey);
+
+/*
+ * Allocate consecutive memory for request CPRB, request param
+ * block, reply CPRB and reply param block and fill in values
+ * for the common fields. Returns 0 on success or errno value
+ * on failure.
+ */
+static int alloc_and_prep_cprbmem(size_t paramblen,
+ u8 **pcprbmem,
+ struct CPRBX **preqCPRB,
+ struct CPRBX **prepCPRB)
+{
+ u8 *cprbmem;
+ size_t cprbplusparamblen = sizeof(struct CPRBX) + paramblen;
+ struct CPRBX *preqcblk, *prepcblk;
+
+ /*
+ * allocate consecutive memory for request CPRB, request param
+ * block, reply CPRB and reply param block
+ */
+ cprbmem = kcalloc(2, cprbplusparamblen, GFP_KERNEL);
+ if (!cprbmem)
+ return -ENOMEM;
+
+ preqcblk = (struct CPRBX *) cprbmem;
+ prepcblk = (struct CPRBX *) (cprbmem + cprbplusparamblen);
+
+ /* fill request cprb struct */
+ preqcblk->cprb_len = sizeof(struct CPRBX);
+ preqcblk->cprb_ver_id = 0x02;
+ memcpy(preqcblk->func_id, "T2", 2);
+ preqcblk->rpl_msgbl = cprbplusparamblen;
+ if (paramblen) {
+ preqcblk->req_parmb =
+ ((u8 *) preqcblk) + sizeof(struct CPRBX);
+ preqcblk->rpl_parmb =
+ ((u8 *) prepcblk) + sizeof(struct CPRBX);
+ }
+
+ *pcprbmem = cprbmem;
+ *preqCPRB = preqcblk;
+ *prepCPRB = prepcblk;
+
+ return 0;
+}
+
+/*
+ * Free the cprb memory allocated with the function above.
+ * If the scrub value is not zero, the memory is filled
+ * with zeros before freeing (useful if there was some
+ * clear key material in there).
+ */
+static void free_cprbmem(void *mem, size_t paramblen, int scrub)
+{
+ if (scrub)
+ memzero_explicit(mem, 2 * (sizeof(struct CPRBX) + paramblen));
+ kfree(mem);
+}
+
+/*
+ * Helper function to prepare the xcrb struct
+ */
+static inline void prep_xcrb(struct ica_xcRB *pxcrb,
+ u16 cardnr,
+ struct CPRBX *preqcblk,
+ struct CPRBX *prepcblk)
+{
+ memset(pxcrb, 0, sizeof(*pxcrb));
+ pxcrb->agent_ID = 0x4341; /* 'CA' */
+ pxcrb->user_defined = (cardnr == 0xFFFF ? AUTOSELECT : cardnr);
+ pxcrb->request_control_blk_length =
+ preqcblk->cprb_len + preqcblk->req_parml;
+ pxcrb->request_control_blk_addr = (void __user *) preqcblk;
+ pxcrb->reply_control_blk_length = preqcblk->rpl_msgbl;
+ pxcrb->reply_control_blk_addr = (void __user *) prepcblk;
+}
+
+/*
+ * Helper function which calls zcrypt_send_cprb with
+ * memory management segment adjusted to kernel space
+ * so that the copy_from_user called within this
+ * function do in fact copy from kernel space.
+ */
+static inline int _zcrypt_send_cprb(struct ica_xcRB *xcrb)
+{
+ int rc;
+ mm_segment_t old_fs = get_fs();
+
+ set_fs(KERNEL_DS);
+ rc = zcrypt_send_cprb(xcrb);
+ set_fs(old_fs);
+
+ return rc;
+}
+
+/*
+ * Generate (random) CCA AES DATA secure key.
+ */
+int cca_genseckey(u16 cardnr, u16 domain,
+ u32 keybitsize, u8 seckey[SECKEYBLOBSIZE])
+{
+ int i, rc, keysize;
+ int seckeysize;
+ u8 *mem;
+ struct CPRBX *preqcblk, *prepcblk;
+ struct ica_xcRB xcrb;
+ struct kgreqparm {
+ u8 subfunc_code[2];
+ u16 rule_array_len;
+ struct lv1 {
+ u16 len;
+ char key_form[8];
+ char key_length[8];
+ char key_type1[8];
+ char key_type2[8];
+ } lv1;
+ struct lv2 {
+ u16 len;
+ struct keyid {
+ u16 len;
+ u16 attr;
+ u8 data[SECKEYBLOBSIZE];
+ } keyid[6];
+ } lv2;
+ } __packed * preqparm;
+ struct kgrepparm {
+ u8 subfunc_code[2];
+ u16 rule_array_len;
+ struct lv3 {
+ u16 len;
+ u16 keyblocklen;
+ struct {
+ u16 toklen;
+ u16 tokattr;
+ u8 tok[0];
+ /* ... some more data ... */
+ } keyblock;
+ } lv3;
+ } __packed * prepparm;
+
+ /* get already prepared memory for 2 cprbs with param block each */
+ rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk);
+ if (rc)
+ return rc;
+
+ /* fill request cprb struct */
+ preqcblk->domain = domain;
+
+ /* fill request cprb param block with KG request */
+ preqparm = (struct kgreqparm *) preqcblk->req_parmb;
+ memcpy(preqparm->subfunc_code, "KG", 2);
+ preqparm->rule_array_len = sizeof(preqparm->rule_array_len);
+ preqparm->lv1.len = sizeof(struct lv1);
+ memcpy(preqparm->lv1.key_form, "OP ", 8);
+ switch (keybitsize) {
+ case PKEY_SIZE_AES_128:
+ case PKEY_KEYTYPE_AES_128: /* older ioctls used this */
+ keysize = 16;
+ memcpy(preqparm->lv1.key_length, "KEYLN16 ", 8);
+ break;
+ case PKEY_SIZE_AES_192:
+ case PKEY_KEYTYPE_AES_192: /* older ioctls used this */
+ keysize = 24;
+ memcpy(preqparm->lv1.key_length, "KEYLN24 ", 8);
+ break;
+ case PKEY_SIZE_AES_256:
+ case PKEY_KEYTYPE_AES_256: /* older ioctls used this */
+ keysize = 32;
+ memcpy(preqparm->lv1.key_length, "KEYLN32 ", 8);
+ break;
+ default:
+ DEBUG_ERR("%s unknown/unsupported keybitsize %d\n",
+ __func__, keybitsize);
+ rc = -EINVAL;
+ goto out;
+ }
+ memcpy(preqparm->lv1.key_type1, "AESDATA ", 8);
+ preqparm->lv2.len = sizeof(struct lv2);
+ for (i = 0; i < 6; i++) {
+ preqparm->lv2.keyid[i].len = sizeof(struct keyid);
+ preqparm->lv2.keyid[i].attr = (i == 2 ? 0x30 : 0x10);
+ }
+ preqcblk->req_parml = sizeof(struct kgreqparm);
+
+ /* fill xcrb struct */
+ prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
+
+ /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
+ rc = _zcrypt_send_cprb(&xcrb);
+ if (rc) {
+ DEBUG_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, errno %d\n",
+ __func__, (int) cardnr, (int) domain, rc);
+ goto out;
+ }
+
+ /* check response returncode and reasoncode */
+ if (prepcblk->ccp_rtcode != 0) {
+ DEBUG_ERR("%s secure key generate failure, card response %d/%d\n",
+ __func__,
+ (int) prepcblk->ccp_rtcode,
+ (int) prepcblk->ccp_rscode);
+ rc = -EIO;
+ goto out;
+ }
+
+ /* process response cprb param block */
+ prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
+ prepparm = (struct kgrepparm *) prepcblk->rpl_parmb;
+
+ /* check length of the returned secure key token */
+ seckeysize = prepparm->lv3.keyblock.toklen
+ - sizeof(prepparm->lv3.keyblock.toklen)
+ - sizeof(prepparm->lv3.keyblock.tokattr);
+ if (seckeysize != SECKEYBLOBSIZE) {
+ DEBUG_ERR("%s secure token size mismatch %d != %d bytes\n",
+ __func__, seckeysize, SECKEYBLOBSIZE);
+ rc = -EIO;
+ goto out;
+ }
+
+ /* check secure key token */
+ rc = cca_check_secaeskeytoken(zcrypt_dbf_info, DBF_ERR,
+ prepparm->lv3.keyblock.tok, 8*keysize);
+ if (rc) {
+ rc = -EIO;
+ goto out;
+ }
+
+ /* copy the generated secure key token */
+ memcpy(seckey, prepparm->lv3.keyblock.tok, SECKEYBLOBSIZE);
+
+out:
+ free_cprbmem(mem, PARMBSIZE, 0);
+ return rc;
+}
+EXPORT_SYMBOL(cca_genseckey);
+
+/*
+ * Generate an CCA AES DATA secure key with given key value.
+ */
+int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize,
+ const u8 *clrkey, u8 seckey[SECKEYBLOBSIZE])
+{
+ int rc, keysize, seckeysize;
+ u8 *mem;
+ struct CPRBX *preqcblk, *prepcblk;
+ struct ica_xcRB xcrb;
+ struct cmreqparm {
+ u8 subfunc_code[2];
+ u16 rule_array_len;
+ char rule_array[8];
+ struct lv1 {
+ u16 len;
+ u8 clrkey[0];
+ } lv1;
+ struct lv2 {
+ u16 len;
+ struct keyid {
+ u16 len;
+ u16 attr;
+ u8 data[SECKEYBLOBSIZE];
+ } keyid;
+ } lv2;
+ } __packed * preqparm;
+ struct lv2 *plv2;
+ struct cmrepparm {
+ u8 subfunc_code[2];
+ u16 rule_array_len;
+ struct lv3 {
+ u16 len;
+ u16 keyblocklen;
+ struct {
+ u16 toklen;
+ u16 tokattr;
+ u8 tok[0];
+ /* ... some more data ... */
+ } keyblock;
+ } lv3;
+ } __packed * prepparm;
+
+ /* get already prepared memory for 2 cprbs with param block each */
+ rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk);
+ if (rc)
+ return rc;
+
+ /* fill request cprb struct */
+ preqcblk->domain = domain;
+
+ /* fill request cprb param block with CM request */
+ preqparm = (struct cmreqparm *) preqcblk->req_parmb;
+ memcpy(preqparm->subfunc_code, "CM", 2);
+ memcpy(preqparm->rule_array, "AES ", 8);
+ preqparm->rule_array_len =
+ sizeof(preqparm->rule_array_len) + sizeof(preqparm->rule_array);
+ switch (keybitsize) {
+ case PKEY_SIZE_AES_128:
+ case PKEY_KEYTYPE_AES_128: /* older ioctls used this */
+ keysize = 16;
+ break;
+ case PKEY_SIZE_AES_192:
+ case PKEY_KEYTYPE_AES_192: /* older ioctls used this */
+ keysize = 24;
+ break;
+ case PKEY_SIZE_AES_256:
+ case PKEY_KEYTYPE_AES_256: /* older ioctls used this */
+ keysize = 32;
+ break;
+ default:
+ DEBUG_ERR("%s unknown/unsupported keybitsize %d\n",
+ __func__, keybitsize);
+ rc = -EINVAL;
+ goto out;
+ }
+ preqparm->lv1.len = sizeof(struct lv1) + keysize;
+ memcpy(preqparm->lv1.clrkey, clrkey, keysize);
+ plv2 = (struct lv2 *) (((u8 *) &preqparm->lv2) + keysize);
+ plv2->len = sizeof(struct lv2);
+ plv2->keyid.len = sizeof(struct keyid);
+ plv2->keyid.attr = 0x30;
+ preqcblk->req_parml = sizeof(struct cmreqparm) + keysize;
+
+ /* fill xcrb struct */
+ prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
+
+ /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
+ rc = _zcrypt_send_cprb(&xcrb);
+ if (rc) {
+ DEBUG_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
+ __func__, (int) cardnr, (int) domain, rc);
+ goto out;
+ }
+
+ /* check response returncode and reasoncode */
+ if (prepcblk->ccp_rtcode != 0) {
+ DEBUG_ERR("%s clear key import failure, card response %d/%d\n",
+ __func__,
+ (int) prepcblk->ccp_rtcode,
+ (int) prepcblk->ccp_rscode);
+ rc = -EIO;
+ goto out;
+ }
+
+ /* process response cprb param block */
+ prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
+ prepparm = (struct cmrepparm *) prepcblk->rpl_parmb;
+
+ /* check length of the returned secure key token */
+ seckeysize = prepparm->lv3.keyblock.toklen
+ - sizeof(prepparm->lv3.keyblock.toklen)
+ - sizeof(prepparm->lv3.keyblock.tokattr);
+ if (seckeysize != SECKEYBLOBSIZE) {
+ DEBUG_ERR("%s secure token size mismatch %d != %d bytes\n",
+ __func__, seckeysize, SECKEYBLOBSIZE);
+ rc = -EIO;
+ goto out;
+ }
+
+ /* check secure key token */
+ rc = cca_check_secaeskeytoken(zcrypt_dbf_info, DBF_ERR,
+ prepparm->lv3.keyblock.tok, 8*keysize);
+ if (rc) {
+ rc = -EIO;
+ goto out;
+ }
+
+ /* copy the generated secure key token */
+ if (seckey)
+ memcpy(seckey, prepparm->lv3.keyblock.tok, SECKEYBLOBSIZE);
+
+out:
+ free_cprbmem(mem, PARMBSIZE, 1);
+ return rc;
+}
+EXPORT_SYMBOL(cca_clr2seckey);
+
+/*
+ * Derive proteced key from an CCA AES DATA secure key.
+ */
+int cca_sec2protkey(u16 cardnr, u16 domain,
+ const u8 seckey[SECKEYBLOBSIZE],
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype)
+{
+ int rc;
+ u8 *mem;
+ struct CPRBX *preqcblk, *prepcblk;
+ struct ica_xcRB xcrb;
+ struct uskreqparm {
+ u8 subfunc_code[2];
+ u16 rule_array_len;
+ struct lv1 {
+ u16 len;
+ u16 attr_len;
+ u16 attr_flags;
+ } lv1;
+ struct lv2 {
+ u16 len;
+ u16 attr_len;
+ u16 attr_flags;
+ u8 token[0]; /* cca secure key token */
+ } lv2;
+ } __packed * preqparm;
+ struct uskrepparm {
+ u8 subfunc_code[2];
+ u16 rule_array_len;
+ struct lv3 {
+ u16 len;
+ u16 attr_len;
+ u16 attr_flags;
+ struct cpacfkeyblock {
+ u8 version; /* version of this struct */
+ u8 flags[2];
+ u8 algo;
+ u8 form;
+ u8 pad1[3];
+ u16 len;
+ u8 key[64]; /* the key (len bytes) */
+ u16 keyattrlen;
+ u8 keyattr[32];
+ u8 pad2[1];
+ u8 vptype;
+ u8 vp[32]; /* verification pattern */
+ } keyblock;
+ } lv3;
+ } __packed * prepparm;
+
+ /* get already prepared memory for 2 cprbs with param block each */
+ rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk);
+ if (rc)
+ return rc;
+
+ /* fill request cprb struct */
+ preqcblk->domain = domain;
+
+ /* fill request cprb param block with USK request */
+ preqparm = (struct uskreqparm *) preqcblk->req_parmb;
+ memcpy(preqparm->subfunc_code, "US", 2);
+ preqparm->rule_array_len = sizeof(preqparm->rule_array_len);
+ preqparm->lv1.len = sizeof(struct lv1);
+ preqparm->lv1.attr_len = sizeof(struct lv1) - sizeof(preqparm->lv1.len);
+ preqparm->lv1.attr_flags = 0x0001;
+ preqparm->lv2.len = sizeof(struct lv2) + SECKEYBLOBSIZE;
+ preqparm->lv2.attr_len = sizeof(struct lv2)
+ - sizeof(preqparm->lv2.len) + SECKEYBLOBSIZE;
+ preqparm->lv2.attr_flags = 0x0000;
+ memcpy(preqparm->lv2.token, seckey, SECKEYBLOBSIZE);
+ preqcblk->req_parml = sizeof(struct uskreqparm) + SECKEYBLOBSIZE;
+
+ /* fill xcrb struct */
+ prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
+
+ /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
+ rc = _zcrypt_send_cprb(&xcrb);
+ if (rc) {
+ DEBUG_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
+ __func__, (int) cardnr, (int) domain, rc);
+ goto out;
+ }
+
+ /* check response returncode and reasoncode */
+ if (prepcblk->ccp_rtcode != 0) {
+ DEBUG_ERR("%s unwrap secure key failure, card response %d/%d\n",
+ __func__,
+ (int) prepcblk->ccp_rtcode,
+ (int) prepcblk->ccp_rscode);
+ rc = -EIO;
+ goto out;
+ }
+ if (prepcblk->ccp_rscode != 0) {
+ DEBUG_WARN("%s unwrap secure key warning, card response %d/%d\n",
+ __func__,
+ (int) prepcblk->ccp_rtcode,
+ (int) prepcblk->ccp_rscode);
+ }
+
+ /* process response cprb param block */
+ prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
+ prepparm = (struct uskrepparm *) prepcblk->rpl_parmb;
+
+ /* check the returned keyblock */
+ if (prepparm->lv3.keyblock.version != 0x01) {
+ DEBUG_ERR("%s reply param keyblock version mismatch 0x%02x != 0x01\n",
+ __func__, (int) prepparm->lv3.keyblock.version);
+ rc = -EIO;
+ goto out;
+ }
+
+ /* copy the tanslated protected key */
+ switch (prepparm->lv3.keyblock.len) {
+ case 16+32:
+ /* AES 128 protected key */
+ if (protkeytype)
+ *protkeytype = PKEY_KEYTYPE_AES_128;
+ break;
+ case 24+32:
+ /* AES 192 protected key */
+ if (protkeytype)
+ *protkeytype = PKEY_KEYTYPE_AES_192;
+ break;
+ case 32+32:
+ /* AES 256 protected key */
+ if (protkeytype)
+ *protkeytype = PKEY_KEYTYPE_AES_256;
+ break;
+ default:
+ DEBUG_ERR("%s unknown/unsupported keylen %d\n",
+ __func__, prepparm->lv3.keyblock.len);
+ rc = -EIO;
+ goto out;
+ }
+ memcpy(protkey, prepparm->lv3.keyblock.key, prepparm->lv3.keyblock.len);
+ if (protkeylen)
+ *protkeylen = prepparm->lv3.keyblock.len;
+
+out:
+ free_cprbmem(mem, PARMBSIZE, 0);
+ return rc;
+}
+EXPORT_SYMBOL(cca_sec2protkey);
+
+/*
+ * AES cipher key skeleton created with CSNBKTB2 with these flags:
+ * INTERNAL, NO-KEY, AES, CIPHER, ANY-MODE, NOEX-SYM, NOEXAASY,
+ * NOEXUASY, XPRTCPAC, NOEX-RAW, NOEX-DES, NOEX-AES, NOEX-RSA
+ * used by cca_gencipherkey() and cca_clr2cipherkey().
+ */
+static const u8 aes_cipher_key_skeleton[] = {
+ 0x01, 0x00, 0x00, 0x38, 0x05, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00,
+ 0x00, 0x1a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x02, 0x00, 0x01, 0x02, 0xc0, 0x00, 0xff,
+ 0x00, 0x03, 0x08, 0xc8, 0x00, 0x00, 0x00, 0x00 };
+#define SIZEOF_SKELETON (sizeof(aes_cipher_key_skeleton))
+
+/*
+ * Generate (random) CCA AES CIPHER secure key.
+ */
+int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
+ u8 *keybuf, size_t *keybufsize)
+{
+ int rc;
+ u8 *mem;
+ struct CPRBX *preqcblk, *prepcblk;
+ struct ica_xcRB xcrb;
+ struct gkreqparm {
+ u8 subfunc_code[2];
+ u16 rule_array_len;
+ char rule_array[2*8];
+ struct {
+ u16 len;
+ u8 key_type_1[8];
+ u8 key_type_2[8];
+ u16 clear_key_bit_len;
+ u16 key_name_1_len;
+ u16 key_name_2_len;
+ u16 user_data_1_len;
+ u16 user_data_2_len;
+ u8 key_name_1[0];
+ u8 key_name_2[0];
+ u8 user_data_1[0];
+ u8 user_data_2[0];
+ } vud;
+ struct {
+ u16 len;
+ struct {
+ u16 len;
+ u16 flag;
+ u8 kek_id_1[0];
+ } tlv1;
+ struct {
+ u16 len;
+ u16 flag;
+ u8 kek_id_2[0];
+ } tlv2;
+ struct {
+ u16 len;
+ u16 flag;
+ u8 gen_key_id_1[SIZEOF_SKELETON];
+ } tlv3;
+ struct {
+ u16 len;
+ u16 flag;
+ u8 gen_key_id_1_label[0];
+ } tlv4;
+ struct {
+ u16 len;
+ u16 flag;
+ u8 gen_key_id_2[0];
+ } tlv5;
+ struct {
+ u16 len;
+ u16 flag;
+ u8 gen_key_id_2_label[0];
+ } tlv6;
+ } kb;
+ } __packed * preqparm;
+ struct gkrepparm {
+ u8 subfunc_code[2];
+ u16 rule_array_len;
+ struct {
+ u16 len;
+ } vud;
+ struct {
+ u16 len;
+ struct {
+ u16 len;
+ u16 flag;
+ u8 gen_key[0]; /* 120-136 bytes */
+ } tlv1;
+ } kb;
+ } __packed * prepparm;
+ struct cipherkeytoken *t;
+
+ /* get already prepared memory for 2 cprbs with param block each */
+ rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk);
+ if (rc)
+ return rc;
+
+ /* fill request cprb struct */
+ preqcblk->domain = domain;
+ preqcblk->req_parml = sizeof(struct gkreqparm);
+
+ /* prepare request param block with GK request */
+ preqparm = (struct gkreqparm *) preqcblk->req_parmb;
+ memcpy(preqparm->subfunc_code, "GK", 2);
+ preqparm->rule_array_len = sizeof(uint16_t) + 2 * 8;
+ memcpy(preqparm->rule_array, "AES OP ", 2*8);
+
+ /* prepare vud block */
+ preqparm->vud.len = sizeof(preqparm->vud);
+ switch (keybitsize) {
+ case 128:
+ case 192:
+ case 256:
+ break;
+ default:
+ DEBUG_ERR(
+ "%s unknown/unsupported keybitsize %d\n",
+ __func__, keybitsize);
+ rc = -EINVAL;
+ goto out;
+ }
+ preqparm->vud.clear_key_bit_len = keybitsize;
+ memcpy(preqparm->vud.key_type_1, "TOKEN ", 8);
+ memset(preqparm->vud.key_type_2, ' ', sizeof(preqparm->vud.key_type_2));
+
+ /* prepare kb block */
+ preqparm->kb.len = sizeof(preqparm->kb);
+ preqparm->kb.tlv1.len = sizeof(preqparm->kb.tlv1);
+ preqparm->kb.tlv1.flag = 0x0030;
+ preqparm->kb.tlv2.len = sizeof(preqparm->kb.tlv2);
+ preqparm->kb.tlv2.flag = 0x0030;
+ preqparm->kb.tlv3.len = sizeof(preqparm->kb.tlv3);
+ preqparm->kb.tlv3.flag = 0x0030;
+ memcpy(preqparm->kb.tlv3.gen_key_id_1,
+ aes_cipher_key_skeleton, SIZEOF_SKELETON);
+ preqparm->kb.tlv4.len = sizeof(preqparm->kb.tlv4);
+ preqparm->kb.tlv4.flag = 0x0030;
+ preqparm->kb.tlv5.len = sizeof(preqparm->kb.tlv5);
+ preqparm->kb.tlv5.flag = 0x0030;
+ preqparm->kb.tlv6.len = sizeof(preqparm->kb.tlv6);
+ preqparm->kb.tlv6.flag = 0x0030;
+
+ /* patch the skeleton key token export flags inside the kb block */
+ if (keygenflags) {
+ t = (struct cipherkeytoken *) preqparm->kb.tlv3.gen_key_id_1;
+ t->kmf1 |= (u16) (keygenflags & 0x0000FF00);
+ t->kmf1 &= (u16) ~(keygenflags & 0x000000FF);
+ }
+
+ /* prepare xcrb struct */
+ prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
+
+ /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
+ rc = _zcrypt_send_cprb(&xcrb);
+ if (rc) {
+ DEBUG_ERR(
+ "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
+ __func__, (int) cardnr, (int) domain, rc);
+ goto out;
+ }
+
+ /* check response returncode and reasoncode */
+ if (prepcblk->ccp_rtcode != 0) {
+ DEBUG_ERR(
+ "%s cipher key generate failure, card response %d/%d\n",
+ __func__,
+ (int) prepcblk->ccp_rtcode,
+ (int) prepcblk->ccp_rscode);
+ rc = -EIO;
+ goto out;
+ }
+
+ /* process response cprb param block */
+ prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
+ prepparm = (struct gkrepparm *) prepcblk->rpl_parmb;
+
+ /* do some plausibility checks on the key block */
+ if (prepparm->kb.len < 120 + 5 * sizeof(uint16_t) ||
+ prepparm->kb.len > 136 + 5 * sizeof(uint16_t)) {
+ DEBUG_ERR("%s reply with invalid or unknown key block\n",
+ __func__);
+ rc = -EIO;
+ goto out;
+ }
+
+ /* and some checks on the generated key */
+ rc = cca_check_secaescipherkey(zcrypt_dbf_info, DBF_ERR,
+ prepparm->kb.tlv1.gen_key,
+ keybitsize, 1);
+ if (rc) {
+ rc = -EIO;
+ goto out;
+ }
+
+ /* copy the generated vlsc key token */
+ t = (struct cipherkeytoken *) prepparm->kb.tlv1.gen_key;
+ if (keybuf) {
+ if (*keybufsize >= t->len)
+ memcpy(keybuf, t, t->len);
+ else
+ rc = -EINVAL;
+ }
+ *keybufsize = t->len;
+
+out:
+ free_cprbmem(mem, PARMBSIZE, 0);
+ return rc;
+}
+EXPORT_SYMBOL(cca_gencipherkey);
+
+/*
+ * Helper function, does a the CSNBKPI2 CPRB.
+ */
+static int _ip_cprb_helper(u16 cardnr, u16 domain,
+ const char *rule_array_1,
+ const char *rule_array_2,
+ const char *rule_array_3,
+ const u8 *clr_key_value,
+ int clr_key_bit_size,
+ u8 *key_token,
+ int *key_token_size)
+{
+ int rc, n;
+ u8 *mem;
+ struct CPRBX *preqcblk, *prepcblk;
+ struct ica_xcRB xcrb;
+ struct rule_array_block {
+ u8 subfunc_code[2];
+ u16 rule_array_len;
+ char rule_array[0];
+ } __packed * preq_ra_block;
+ struct vud_block {
+ u16 len;
+ struct {
+ u16 len;
+ u16 flag; /* 0x0064 */
+ u16 clr_key_bit_len;
+ } tlv1;
+ struct {
+ u16 len;
+ u16 flag; /* 0x0063 */
+ u8 clr_key[0]; /* clear key value bytes */
+ } tlv2;
+ } __packed * preq_vud_block;
+ struct key_block {
+ u16 len;
+ struct {
+ u16 len;
+ u16 flag; /* 0x0030 */
+ u8 key_token[0]; /* key skeleton */
+ } tlv1;
+ } __packed * preq_key_block;
+ struct iprepparm {
+ u8 subfunc_code[2];
+ u16 rule_array_len;
+ struct {
+ u16 len;
+ } vud;
+ struct {
+ u16 len;
+ struct {
+ u16 len;
+ u16 flag; /* 0x0030 */
+ u8 key_token[0]; /* key token */
+ } tlv1;
+ } kb;
+ } __packed * prepparm;
+ struct cipherkeytoken *t;
+ int complete = strncmp(rule_array_2, "COMPLETE", 8) ? 0 : 1;
+
+ /* get already prepared memory for 2 cprbs with param block each */
+ rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk);
+ if (rc)
+ return rc;
+
+ /* fill request cprb struct */
+ preqcblk->domain = domain;
+ preqcblk->req_parml = 0;
+
+ /* prepare request param block with IP request */
+ preq_ra_block = (struct rule_array_block *) preqcblk->req_parmb;
+ memcpy(preq_ra_block->subfunc_code, "IP", 2);
+ preq_ra_block->rule_array_len = sizeof(uint16_t) + 2 * 8;
+ memcpy(preq_ra_block->rule_array, rule_array_1, 8);
+ memcpy(preq_ra_block->rule_array + 8, rule_array_2, 8);
+ preqcblk->req_parml = sizeof(struct rule_array_block) + 2 * 8;
+ if (rule_array_3) {
+ preq_ra_block->rule_array_len += 8;
+ memcpy(preq_ra_block->rule_array + 16, rule_array_3, 8);
+ preqcblk->req_parml += 8;
+ }
+
+ /* prepare vud block */
+ preq_vud_block = (struct vud_block *)
+ (preqcblk->req_parmb + preqcblk->req_parml);
+ n = complete ? 0 : (clr_key_bit_size + 7) / 8;
+ preq_vud_block->len = sizeof(struct vud_block) + n;
+ preq_vud_block->tlv1.len = sizeof(preq_vud_block->tlv1);
+ preq_vud_block->tlv1.flag = 0x0064;
+ preq_vud_block->tlv1.clr_key_bit_len = complete ? 0 : clr_key_bit_size;
+ preq_vud_block->tlv2.len = sizeof(preq_vud_block->tlv2) + n;
+ preq_vud_block->tlv2.flag = 0x0063;
+ if (!complete)
+ memcpy(preq_vud_block->tlv2.clr_key, clr_key_value, n);
+ preqcblk->req_parml += preq_vud_block->len;
+
+ /* prepare key block */
+ preq_key_block = (struct key_block *)
+ (preqcblk->req_parmb + preqcblk->req_parml);
+ n = *key_token_size;
+ preq_key_block->len = sizeof(struct key_block) + n;
+ preq_key_block->tlv1.len = sizeof(preq_key_block->tlv1) + n;
+ preq_key_block->tlv1.flag = 0x0030;
+ memcpy(preq_key_block->tlv1.key_token, key_token, *key_token_size);
+ preqcblk->req_parml += preq_key_block->len;
+
+ /* prepare xcrb struct */
+ prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
+
+ /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
+ rc = _zcrypt_send_cprb(&xcrb);
+ if (rc) {
+ DEBUG_ERR(
+ "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
+ __func__, (int) cardnr, (int) domain, rc);
+ goto out;
+ }
+
+ /* check response returncode and reasoncode */
+ if (prepcblk->ccp_rtcode != 0) {
+ DEBUG_ERR(
+ "%s CSNBKPI2 failure, card response %d/%d\n",
+ __func__,
+ (int) prepcblk->ccp_rtcode,
+ (int) prepcblk->ccp_rscode);
+ rc = -EIO;
+ goto out;
+ }
+
+ /* process response cprb param block */
+ prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
+ prepparm = (struct iprepparm *) prepcblk->rpl_parmb;
+
+ /* do some plausibility checks on the key block */
+ if (prepparm->kb.len < 120 + 3 * sizeof(uint16_t) ||
+ prepparm->kb.len > 136 + 3 * sizeof(uint16_t)) {
+ DEBUG_ERR("%s reply with invalid or unknown key block\n",
+ __func__);
+ rc = -EIO;
+ goto out;
+ }
+
+ /* do not check the key here, it may be incomplete */
+
+ /* copy the vlsc key token back */
+ t = (struct cipherkeytoken *) prepparm->kb.tlv1.key_token;
+ memcpy(key_token, t, t->len);
+ *key_token_size = t->len;
+
+out:
+ free_cprbmem(mem, PARMBSIZE, 0);
+ return rc;
+}
+
+/*
+ * Build CCA AES CIPHER secure key with a given clear key value.
+ */
+int cca_clr2cipherkey(u16 card, u16 dom, u32 keybitsize, u32 keygenflags,
+ const u8 *clrkey, u8 *keybuf, size_t *keybufsize)
+{
+ int rc;
+ u8 *token;
+ int tokensize;
+ u8 exorbuf[32];
+ struct cipherkeytoken *t;
+
+ /* fill exorbuf with random data */
+ get_random_bytes(exorbuf, sizeof(exorbuf));
+
+ /* allocate space for the key token to build */
+ token = kmalloc(MAXCCAVLSCTOKENSIZE, GFP_KERNEL);
+ if (!token)
+ return -ENOMEM;
+
+ /* prepare the token with the key skeleton */
+ tokensize = SIZEOF_SKELETON;
+ memcpy(token, aes_cipher_key_skeleton, tokensize);
+
+ /* patch the skeleton key token export flags */
+ if (keygenflags) {
+ t = (struct cipherkeytoken *) token;
+ t->kmf1 |= (u16) (keygenflags & 0x0000FF00);
+ t->kmf1 &= (u16) ~(keygenflags & 0x000000FF);
+ }
+
+ /*
+ * Do the key import with the clear key value in 4 steps:
+ * 1/4 FIRST import with only random data
+ * 2/4 EXOR the clear key
+ * 3/4 EXOR the very same random data again
+ * 4/4 COMPLETE the secure cipher key import
+ */
+ rc = _ip_cprb_helper(card, dom, "AES ", "FIRST ", "MIN3PART",
+ exorbuf, keybitsize, token, &tokensize);
+ if (rc) {
+ DEBUG_ERR(
+ "%s clear key import 1/4 with CSNBKPI2 failed, rc=%d\n",
+ __func__, rc);
+ goto out;
+ }
+ rc = _ip_cprb_helper(card, dom, "AES ", "ADD-PART", NULL,
+ clrkey, keybitsize, token, &tokensize);
+ if (rc) {
+ DEBUG_ERR(
+ "%s clear key import 2/4 with CSNBKPI2 failed, rc=%d\n",
+ __func__, rc);
+ goto out;
+ }
+ rc = _ip_cprb_helper(card, dom, "AES ", "ADD-PART", NULL,
+ exorbuf, keybitsize, token, &tokensize);
+ if (rc) {
+ DEBUG_ERR(
+ "%s clear key import 3/4 with CSNBKPI2 failed, rc=%d\n",
+ __func__, rc);
+ goto out;
+ }
+ rc = _ip_cprb_helper(card, dom, "AES ", "COMPLETE", NULL,
+ NULL, keybitsize, token, &tokensize);
+ if (rc) {
+ DEBUG_ERR(
+ "%s clear key import 4/4 with CSNBKPI2 failed, rc=%d\n",
+ __func__, rc);
+ goto out;
+ }
+
+ /* copy the generated key token */
+ if (keybuf) {
+ if (tokensize > *keybufsize)
+ rc = -EINVAL;
+ else
+ memcpy(keybuf, token, tokensize);
+ }
+ *keybufsize = tokensize;
+
+out:
+ kfree(token);
+ return rc;
+}
+EXPORT_SYMBOL(cca_clr2cipherkey);
+
+/*
+ * Derive proteced key from CCA AES cipher secure key.
+ */
+int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey,
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype)
+{
+ int rc;
+ u8 *mem;
+ struct CPRBX *preqcblk, *prepcblk;
+ struct ica_xcRB xcrb;
+ struct aureqparm {
+ u8 subfunc_code[2];
+ u16 rule_array_len;
+ u8 rule_array[8];
+ struct {
+ u16 len;
+ u16 tk_blob_len;
+ u16 tk_blob_tag;
+ u8 tk_blob[66];
+ } vud;
+ struct {
+ u16 len;
+ u16 cca_key_token_len;
+ u16 cca_key_token_flags;
+ u8 cca_key_token[0]; // 64 or more
+ } kb;
+ } __packed * preqparm;
+ struct aurepparm {
+ u8 subfunc_code[2];
+ u16 rule_array_len;
+ struct {
+ u16 len;
+ u16 sublen;
+ u16 tag;
+ struct cpacfkeyblock {
+ u8 version; /* version of this struct */
+ u8 flags[2];
+ u8 algo;
+ u8 form;
+ u8 pad1[3];
+ u16 keylen;
+ u8 key[64]; /* the key (keylen bytes) */
+ u16 keyattrlen;
+ u8 keyattr[32];
+ u8 pad2[1];
+ u8 vptype;
+ u8 vp[32]; /* verification pattern */
+ } ckb;
+ } vud;
+ struct {
+ u16 len;
+ } kb;
+ } __packed * prepparm;
+ int keytoklen = ((struct cipherkeytoken *)ckey)->len;
+
+ /* get already prepared memory for 2 cprbs with param block each */
+ rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk);
+ if (rc)
+ return rc;
+
+ /* fill request cprb struct */
+ preqcblk->domain = domain;
+
+ /* fill request cprb param block with AU request */
+ preqparm = (struct aureqparm *) preqcblk->req_parmb;
+ memcpy(preqparm->subfunc_code, "AU", 2);
+ preqparm->rule_array_len =
+ sizeof(preqparm->rule_array_len)
+ + sizeof(preqparm->rule_array);
+ memcpy(preqparm->rule_array, "EXPT-SK ", 8);
+ /* vud, tk blob */
+ preqparm->vud.len = sizeof(preqparm->vud);
+ preqparm->vud.tk_blob_len = sizeof(preqparm->vud.tk_blob)
+ + 2 * sizeof(uint16_t);
+ preqparm->vud.tk_blob_tag = 0x00C2;
+ /* kb, cca token */
+ preqparm->kb.len = keytoklen + 3 * sizeof(uint16_t);
+ preqparm->kb.cca_key_token_len = keytoklen + 2 * sizeof(uint16_t);
+ memcpy(preqparm->kb.cca_key_token, ckey, keytoklen);
+ /* now fill length of param block into cprb */
+ preqcblk->req_parml = sizeof(struct aureqparm) + keytoklen;
+
+ /* fill xcrb struct */
+ prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
+
+ /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
+ rc = _zcrypt_send_cprb(&xcrb);
+ if (rc) {
+ DEBUG_ERR(
+ "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
+ __func__, (int) cardnr, (int) domain, rc);
+ goto out;
+ }
+
+ /* check response returncode and reasoncode */
+ if (prepcblk->ccp_rtcode != 0) {
+ DEBUG_ERR(
+ "%s unwrap secure key failure, card response %d/%d\n",
+ __func__,
+ (int) prepcblk->ccp_rtcode,
+ (int) prepcblk->ccp_rscode);
+ rc = -EIO;
+ goto out;
+ }
+ if (prepcblk->ccp_rscode != 0) {
+ DEBUG_WARN(
+ "%s unwrap secure key warning, card response %d/%d\n",
+ __func__,
+ (int) prepcblk->ccp_rtcode,
+ (int) prepcblk->ccp_rscode);
+ }
+
+ /* process response cprb param block */
+ prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
+ prepparm = (struct aurepparm *) prepcblk->rpl_parmb;
+
+ /* check the returned keyblock */
+ if (prepparm->vud.ckb.version != 0x01) {
+ DEBUG_ERR(
+ "%s reply param keyblock version mismatch 0x%02x != 0x01\n",
+ __func__, (int) prepparm->vud.ckb.version);
+ rc = -EIO;
+ goto out;
+ }
+ if (prepparm->vud.ckb.algo != 0x02) {
+ DEBUG_ERR(
+ "%s reply param keyblock algo mismatch 0x%02x != 0x02\n",
+ __func__, (int) prepparm->vud.ckb.algo);
+ rc = -EIO;
+ goto out;
+ }
+
+ /* copy the translated protected key */
+ switch (prepparm->vud.ckb.keylen) {
+ case 16+32:
+ /* AES 128 protected key */
+ if (protkeytype)
+ *protkeytype = PKEY_KEYTYPE_AES_128;
+ break;
+ case 24+32:
+ /* AES 192 protected key */
+ if (protkeytype)
+ *protkeytype = PKEY_KEYTYPE_AES_192;
+ break;
+ case 32+32:
+ /* AES 256 protected key */
+ if (protkeytype)
+ *protkeytype = PKEY_KEYTYPE_AES_256;
+ break;
+ default:
+ DEBUG_ERR("%s unknown/unsupported keylen %d\n",
+ __func__, prepparm->vud.ckb.keylen);
+ rc = -EIO;
+ goto out;
+ }
+ memcpy(protkey, prepparm->vud.ckb.key, prepparm->vud.ckb.keylen);
+ if (protkeylen)
+ *protkeylen = prepparm->vud.ckb.keylen;
+
+out:
+ free_cprbmem(mem, PARMBSIZE, 0);
+ return rc;
+}
+EXPORT_SYMBOL(cca_cipher2protkey);
+
+/*
+ * query cryptographic facility from CCA adapter
+ */
+int cca_query_crypto_facility(u16 cardnr, u16 domain,
+ const char *keyword,
+ u8 *rarray, size_t *rarraylen,
+ u8 *varray, size_t *varraylen)
+{
+ int rc;
+ u16 len;
+ u8 *mem, *ptr;
+ struct CPRBX *preqcblk, *prepcblk;
+ struct ica_xcRB xcrb;
+ struct fqreqparm {
+ u8 subfunc_code[2];
+ u16 rule_array_len;
+ char rule_array[8];
+ struct lv1 {
+ u16 len;
+ u8 data[VARDATASIZE];
+ } lv1;
+ u16 dummylen;
+ } __packed * preqparm;
+ size_t parmbsize = sizeof(struct fqreqparm);
+ struct fqrepparm {
+ u8 subfunc_code[2];
+ u8 lvdata[0];
+ } __packed * prepparm;
+
+ /* get already prepared memory for 2 cprbs with param block each */
+ rc = alloc_and_prep_cprbmem(parmbsize, &mem, &preqcblk, &prepcblk);
+ if (rc)
+ return rc;
+
+ /* fill request cprb struct */
+ preqcblk->domain = domain;
+
+ /* fill request cprb param block with FQ request */
+ preqparm = (struct fqreqparm *) preqcblk->req_parmb;
+ memcpy(preqparm->subfunc_code, "FQ", 2);
+ memcpy(preqparm->rule_array, keyword, sizeof(preqparm->rule_array));
+ preqparm->rule_array_len =
+ sizeof(preqparm->rule_array_len) + sizeof(preqparm->rule_array);
+ preqparm->lv1.len = sizeof(preqparm->lv1);
+ preqparm->dummylen = sizeof(preqparm->dummylen);
+ preqcblk->req_parml = parmbsize;
+
+ /* fill xcrb struct */
+ prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
+
+ /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
+ rc = _zcrypt_send_cprb(&xcrb);
+ if (rc) {
+ DEBUG_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
+ __func__, (int) cardnr, (int) domain, rc);
+ goto out;
+ }
+
+ /* check response returncode and reasoncode */
+ if (prepcblk->ccp_rtcode != 0) {
+ DEBUG_ERR("%s unwrap secure key failure, card response %d/%d\n",
+ __func__,
+ (int) prepcblk->ccp_rtcode,
+ (int) prepcblk->ccp_rscode);
+ rc = -EIO;
+ goto out;
+ }
+
+ /* process response cprb param block */
+ prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
+ prepparm = (struct fqrepparm *) prepcblk->rpl_parmb;
+ ptr = prepparm->lvdata;
+
+ /* check and possibly copy reply rule array */
+ len = *((u16 *) ptr);
+ if (len > sizeof(u16)) {
+ ptr += sizeof(u16);
+ len -= sizeof(u16);
+ if (rarray && rarraylen && *rarraylen > 0) {
+ *rarraylen = (len > *rarraylen ? *rarraylen : len);
+ memcpy(rarray, ptr, *rarraylen);
+ }
+ ptr += len;
+ }
+ /* check and possible copy reply var array */
+ len = *((u16 *) ptr);
+ if (len > sizeof(u16)) {
+ ptr += sizeof(u16);
+ len -= sizeof(u16);
+ if (varray && varraylen && *varraylen > 0) {
+ *varraylen = (len > *varraylen ? *varraylen : len);
+ memcpy(varray, ptr, *varraylen);
+ }
+ ptr += len;
+ }
+
+out:
+ free_cprbmem(mem, parmbsize, 0);
+ return rc;
+}
+EXPORT_SYMBOL(cca_query_crypto_facility);
+
+static int cca_info_cache_fetch(u16 cardnr, u16 domain, struct cca_info *ci)
+{
+ int rc = -ENOENT;
+ struct cca_info_list_entry *ptr;
+
+ spin_lock_bh(&cca_info_list_lock);
+ list_for_each_entry(ptr, &cca_info_list, list) {
+ if (ptr->cardnr == cardnr && ptr->domain == domain) {
+ memcpy(ci, &ptr->info, sizeof(*ci));
+ rc = 0;
+ break;
+ }
+ }
+ spin_unlock_bh(&cca_info_list_lock);
+
+ return rc;
+}
+
+static void cca_info_cache_update(u16 cardnr, u16 domain,
+ const struct cca_info *ci)
+{
+ int found = 0;
+ struct cca_info_list_entry *ptr;
+
+ spin_lock_bh(&cca_info_list_lock);
+ list_for_each_entry(ptr, &cca_info_list, list) {
+ if (ptr->cardnr == cardnr &&
+ ptr->domain == domain) {
+ memcpy(&ptr->info, ci, sizeof(*ci));
+ found = 1;
+ break;
+ }
+ }
+ if (!found) {
+ ptr = kmalloc(sizeof(*ptr), GFP_ATOMIC);
+ if (!ptr) {
+ spin_unlock_bh(&cca_info_list_lock);
+ return;
+ }
+ ptr->cardnr = cardnr;
+ ptr->domain = domain;
+ memcpy(&ptr->info, ci, sizeof(*ci));
+ list_add(&ptr->list, &cca_info_list);
+ }
+ spin_unlock_bh(&cca_info_list_lock);
+}
+
+static void cca_info_cache_scrub(u16 cardnr, u16 domain)
+{
+ struct cca_info_list_entry *ptr;
+
+ spin_lock_bh(&cca_info_list_lock);
+ list_for_each_entry(ptr, &cca_info_list, list) {
+ if (ptr->cardnr == cardnr &&
+ ptr->domain == domain) {
+ list_del(&ptr->list);
+ kfree(ptr);
+ break;
+ }
+ }
+ spin_unlock_bh(&cca_info_list_lock);
+}
+
+static void __exit mkvp_cache_free(void)
+{
+ struct cca_info_list_entry *ptr, *pnext;
+
+ spin_lock_bh(&cca_info_list_lock);
+ list_for_each_entry_safe(ptr, pnext, &cca_info_list, list) {
+ list_del(&ptr->list);
+ kfree(ptr);
+ }
+ spin_unlock_bh(&cca_info_list_lock);
+}
+
+/*
+ * Fetch cca_info values via query_crypto_facility from adapter.
+ */
+static int fetch_cca_info(u16 cardnr, u16 domain, struct cca_info *ci)
+{
+ int rc, found = 0;
+ size_t rlen, vlen;
+ u8 *rarray, *varray, *pg;
+ struct zcrypt_device_status_ext devstat;
+
+ memset(ci, 0, sizeof(*ci));
+
+ /* get first info from zcrypt device driver about this apqn */
+ rc = zcrypt_device_status_ext(cardnr, domain, &devstat);
+ if (rc)
+ return rc;
+ ci->hwtype = devstat.hwtype;
+
+ /* prep page for rule array and var array use */
+ pg = (u8 *) __get_free_page(GFP_KERNEL);
+ if (!pg)
+ return -ENOMEM;
+ rarray = pg;
+ varray = pg + PAGE_SIZE/2;
+ rlen = vlen = PAGE_SIZE/2;
+
+ /* QF for this card/domain */
+ rc = cca_query_crypto_facility(cardnr, domain, "STATICSA",
+ rarray, &rlen, varray, &vlen);
+ if (rc == 0 && rlen >= 10*8 && vlen >= 204) {
+ memcpy(ci->serial, rarray, 8);
+ ci->new_mk_state = (char) rarray[7*8];
+ ci->cur_mk_state = (char) rarray[8*8];
+ ci->old_mk_state = (char) rarray[9*8];
+ if (ci->old_mk_state == '2')
+ memcpy(&ci->old_mkvp, varray + 172, 8);
+ if (ci->cur_mk_state == '2')
+ memcpy(&ci->cur_mkvp, varray + 184, 8);
+ if (ci->new_mk_state == '3')
+ memcpy(&ci->new_mkvp, varray + 196, 8);
+ found = 1;
+ }
+
+ free_page((unsigned long) pg);
+
+ return found ? 0 : -ENOENT;
+}
+
+/*
+ * Fetch cca information about a CCA queue.
+ */
+int cca_get_info(u16 card, u16 dom, struct cca_info *ci, int verify)
+{
+ int rc;
+
+ rc = cca_info_cache_fetch(card, dom, ci);
+ if (rc || verify) {
+ rc = fetch_cca_info(card, dom, ci);
+ if (rc == 0)
+ cca_info_cache_update(card, dom, ci);
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(cca_get_info);
+
+/*
+ * Search for a matching crypto card based on the
+ * Master Key Verification Pattern given.
+ */
+static int findcard(u64 mkvp, u16 *pcardnr, u16 *pdomain,
+ int verify, int minhwtype)
+{
+ struct zcrypt_device_status_ext *device_status;
+ u16 card, dom;
+ struct cca_info ci;
+ int i, rc, oi = -1;
+
+ /* mkvp must not be zero, minhwtype needs to be >= 0 */
+ if (mkvp == 0 || minhwtype < 0)
+ return -EINVAL;
+
+ /* fetch status of all crypto cards */
+ device_status = kmalloc_array(MAX_ZDEV_ENTRIES_EXT,
+ sizeof(struct zcrypt_device_status_ext),
+ GFP_KERNEL);
+ if (!device_status)
+ return -ENOMEM;
+ zcrypt_device_status_mask_ext(device_status);
+
+ /* walk through all crypto cards */
+ for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) {
+ card = AP_QID_CARD(device_status[i].qid);
+ dom = AP_QID_QUEUE(device_status[i].qid);
+ if (device_status[i].online &&
+ device_status[i].functions & 0x04) {
+ /* enabled CCA card, check current mkvp from cache */
+ if (cca_info_cache_fetch(card, dom, &ci) == 0 &&
+ ci.hwtype >= minhwtype &&
+ ci.cur_mk_state == '2' &&
+ ci.cur_mkvp == mkvp) {
+ if (!verify)
+ break;
+ /* verify: refresh card info */
+ if (fetch_cca_info(card, dom, &ci) == 0) {
+ cca_info_cache_update(card, dom, &ci);
+ if (ci.hwtype >= minhwtype &&
+ ci.cur_mk_state == '2' &&
+ ci.cur_mkvp == mkvp)
+ break;
+ }
+ }
+ } else {
+ /* Card is offline and/or not a CCA card. */
+ /* del mkvp entry from cache if it exists */
+ cca_info_cache_scrub(card, dom);
+ }
+ }
+ if (i >= MAX_ZDEV_ENTRIES_EXT) {
+ /* nothing found, so this time without cache */
+ for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) {
+ if (!(device_status[i].online &&
+ device_status[i].functions & 0x04))
+ continue;
+ card = AP_QID_CARD(device_status[i].qid);
+ dom = AP_QID_QUEUE(device_status[i].qid);
+ /* fresh fetch mkvp from adapter */
+ if (fetch_cca_info(card, dom, &ci) == 0) {
+ cca_info_cache_update(card, dom, &ci);
+ if (ci.hwtype >= minhwtype &&
+ ci.cur_mk_state == '2' &&
+ ci.cur_mkvp == mkvp)
+ break;
+ if (ci.hwtype >= minhwtype &&
+ ci.old_mk_state == '2' &&
+ ci.old_mkvp == mkvp &&
+ oi < 0)
+ oi = i;
+ }
+ }
+ if (i >= MAX_ZDEV_ENTRIES_EXT && oi >= 0) {
+ /* old mkvp matched, use this card then */
+ card = AP_QID_CARD(device_status[oi].qid);
+ dom = AP_QID_QUEUE(device_status[oi].qid);
+ }
+ }
+ if (i < MAX_ZDEV_ENTRIES_EXT || oi >= 0) {
+ if (pcardnr)
+ *pcardnr = card;
+ if (pdomain)
+ *pdomain = dom;
+ rc = (i < MAX_ZDEV_ENTRIES_EXT ? 0 : 1);
+ } else
+ rc = -ENODEV;
+
+ kfree(device_status);
+ return rc;
+}
+
+/*
+ * Search for a matching crypto card based on the Master Key
+ * Verification Pattern provided inside a secure key token.
+ */
+int cca_findcard(const u8 *key, u16 *pcardnr, u16 *pdomain, int verify)
+{
+ u64 mkvp;
+ int minhwtype = 0;
+ const struct keytoken_header *hdr = (struct keytoken_header *) key;
+
+ if (hdr->type != TOKTYPE_CCA_INTERNAL)
+ return -EINVAL;
+
+ switch (hdr->version) {
+ case TOKVER_CCA_AES:
+ mkvp = ((struct secaeskeytoken *)key)->mkvp;
+ break;
+ case TOKVER_CCA_VLSC:
+ mkvp = ((struct cipherkeytoken *)key)->mkvp0;
+ minhwtype = AP_DEVICE_TYPE_CEX6;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return findcard(mkvp, pcardnr, pdomain, verify, minhwtype);
+}
+EXPORT_SYMBOL(cca_findcard);
+
+int cca_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
+ int minhwtype, u64 cur_mkvp, u64 old_mkvp, int verify)
+{
+ struct zcrypt_device_status_ext *device_status;
+ int i, n, card, dom, curmatch, oldmatch, rc = 0;
+ struct cca_info ci;
+
+ *apqns = NULL;
+ *nr_apqns = 0;
+
+ /* fetch status of all crypto cards */
+ device_status = kmalloc_array(MAX_ZDEV_ENTRIES_EXT,
+ sizeof(struct zcrypt_device_status_ext),
+ GFP_KERNEL);
+ if (!device_status)
+ return -ENOMEM;
+ zcrypt_device_status_mask_ext(device_status);
+
+ /* loop two times: first gather eligible apqns, then store them */
+ while (1) {
+ n = 0;
+ /* walk through all the crypto cards */
+ for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) {
+ card = AP_QID_CARD(device_status[i].qid);
+ dom = AP_QID_QUEUE(device_status[i].qid);
+ /* check online state */
+ if (!device_status[i].online)
+ continue;
+ /* check for cca functions */
+ if (!(device_status[i].functions & 0x04))
+ continue;
+ /* check cardnr */
+ if (cardnr != 0xFFFF && card != cardnr)
+ continue;
+ /* check domain */
+ if (domain != 0xFFFF && dom != domain)
+ continue;
+ /* get cca info on this apqn */
+ if (cca_get_info(card, dom, &ci, verify))
+ continue;
+ /* current master key needs to be valid */
+ if (ci.cur_mk_state != '2')
+ continue;
+ /* check min hardware type */
+ if (minhwtype > 0 && minhwtype > ci.hwtype)
+ continue;
+ if (cur_mkvp || old_mkvp) {
+ /* check mkvps */
+ curmatch = oldmatch = 0;
+ if (cur_mkvp && cur_mkvp == ci.cur_mkvp)
+ curmatch = 1;
+ if (old_mkvp && ci.old_mk_state == '2' &&
+ old_mkvp == ci.old_mkvp)
+ oldmatch = 1;
+ if ((cur_mkvp || old_mkvp) &&
+ (curmatch + oldmatch < 1))
+ continue;
+ }
+ /* apqn passed all filtering criterons */
+ if (*apqns && n < *nr_apqns)
+ (*apqns)[n] = (((u16)card) << 16) | ((u16) dom);
+ n++;
+ }
+ /* loop 2nd time: array has been filled */
+ if (*apqns)
+ break;
+ /* loop 1st time: have # of eligible apqns in n */
+ if (!n) {
+ rc = -ENODEV; /* no eligible apqns found */
+ break;
+ }
+ *nr_apqns = n;
+ /* allocate array to store n apqns into */
+ *apqns = kmalloc_array(n, sizeof(u32), GFP_KERNEL);
+ if (!*apqns) {
+ rc = -ENOMEM;
+ break;
+ }
+ verify = 0;
+ }
+
+ kfree(device_status);
+ return rc;
+}
+EXPORT_SYMBOL(cca_findcard2);
+
+void __exit zcrypt_ccamisc_exit(void)
+{
+ mkvp_cache_free();
+}
diff --git a/drivers/s390/crypto/zcrypt_ccamisc.h b/drivers/s390/crypto/zcrypt_ccamisc.h
new file mode 100644
index 000000000000..3a9876d5ab0e
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_ccamisc.h
@@ -0,0 +1,218 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright IBM Corp. 2019
+ * Author(s): Harald Freudenberger <freude@linux.ibm.com>
+ * Ingo Franzki <ifranzki@linux.ibm.com>
+ *
+ * Collection of CCA misc functions used by zcrypt and pkey
+ */
+
+#ifndef _ZCRYPT_CCAMISC_H_
+#define _ZCRYPT_CCAMISC_H_
+
+#include <asm/zcrypt.h>
+#include <asm/pkey.h>
+
+/* Key token types */
+#define TOKTYPE_NON_CCA 0x00 /* Non-CCA key token */
+#define TOKTYPE_CCA_INTERNAL 0x01 /* CCA internal key token */
+
+/* For TOKTYPE_NON_CCA: */
+#define TOKVER_PROTECTED_KEY 0x01 /* Protected key token */
+#define TOKVER_CLEAR_KEY 0x02 /* Clear key token */
+
+/* For TOKTYPE_CCA_INTERNAL: */
+#define TOKVER_CCA_AES 0x04 /* CCA AES key token */
+#define TOKVER_CCA_VLSC 0x05 /* var length sym cipher key token */
+
+/* Max size of a cca variable length cipher key token */
+#define MAXCCAVLSCTOKENSIZE 725
+
+/* header part of a CCA key token */
+struct keytoken_header {
+ u8 type; /* one of the TOKTYPE values */
+ u8 res0[1];
+ u16 len; /* vlsc token: total length in bytes */
+ u8 version; /* one of the TOKVER values */
+ u8 res1[3];
+} __packed;
+
+/* inside view of a CCA secure key token (only type 0x01 version 0x04) */
+struct secaeskeytoken {
+ u8 type; /* 0x01 for internal key token */
+ u8 res0[3];
+ u8 version; /* should be 0x04 */
+ u8 res1[1];
+ u8 flag; /* key flags */
+ u8 res2[1];
+ u64 mkvp; /* master key verification pattern */
+ u8 key[32]; /* key value (encrypted) */
+ u8 cv[8]; /* control vector */
+ u16 bitsize; /* key bit size */
+ u16 keysize; /* key byte size */
+ u8 tvv[4]; /* token validation value */
+} __packed;
+
+/* inside view of a variable length symmetric cipher AES key token */
+struct cipherkeytoken {
+ u8 type; /* 0x01 for internal key token */
+ u8 res0[1];
+ u16 len; /* total key token length in bytes */
+ u8 version; /* should be 0x05 */
+ u8 res1[3];
+ u8 kms; /* key material state, 0x03 means wrapped with MK */
+ u8 kvpt; /* key verification pattern type, should be 0x01 */
+ u64 mkvp0; /* master key verification pattern, lo part */
+ u64 mkvp1; /* master key verification pattern, hi part (unused) */
+ u8 eskwm; /* encrypted section key wrapping method */
+ u8 hashalg; /* hash algorithmus used for wrapping key */
+ u8 plfver; /* pay load format version */
+ u8 res2[1];
+ u8 adsver; /* associated data section version */
+ u8 res3[1];
+ u16 adslen; /* associated data section length */
+ u8 kllen; /* optional key label length */
+ u8 ieaslen; /* optional extended associated data length */
+ u8 uadlen; /* optional user definable associated data length */
+ u8 res4[1];
+ u16 wpllen; /* wrapped payload length in bits: */
+ /* plfver 0x00 0x01 */
+ /* AES-128 512 640 */
+ /* AES-192 576 640 */
+ /* AES-256 640 640 */
+ u8 res5[1];
+ u8 algtype; /* 0x02 for AES cipher */
+ u16 keytype; /* 0x0001 for 'cipher' */
+ u8 kufc; /* key usage field count */
+ u16 kuf1; /* key usage field 1 */
+ u16 kuf2; /* key usage field 2 */
+ u8 kmfc; /* key management field count */
+ u16 kmf1; /* key management field 1 */
+ u16 kmf2; /* key management field 2 */
+ u16 kmf3; /* key management field 3 */
+ u8 vdata[0]; /* variable part data follows */
+} __packed;
+
+/* Some defines for the CCA AES cipherkeytoken kmf1 field */
+#define KMF1_XPRT_SYM 0x8000
+#define KMF1_XPRT_UASY 0x4000
+#define KMF1_XPRT_AASY 0x2000
+#define KMF1_XPRT_RAW 0x1000
+#define KMF1_XPRT_CPAC 0x0800
+#define KMF1_XPRT_DES 0x0080
+#define KMF1_XPRT_AES 0x0040
+#define KMF1_XPRT_RSA 0x0008
+
+/*
+ * Simple check if the token is a valid CCA secure AES data key
+ * token. If keybitsize is given, the bitsize of the key is
+ * also checked. Returns 0 on success or errno value on failure.
+ */
+int cca_check_secaeskeytoken(debug_info_t *dbg, int dbflvl,
+ const u8 *token, int keybitsize);
+
+/*
+ * Simple check if the token is a valid CCA secure AES cipher key
+ * token. If keybitsize is given, the bitsize of the key is
+ * also checked. If checkcpacfexport is enabled, the key is also
+ * checked for the export flag to allow CPACF export.
+ * Returns 0 on success or errno value on failure.
+ */
+int cca_check_secaescipherkey(debug_info_t *dbg, int dbflvl,
+ const u8 *token, int keybitsize,
+ int checkcpacfexport);
+
+/*
+ * Generate (random) CCA AES DATA secure key.
+ */
+int cca_genseckey(u16 cardnr, u16 domain, u32 keybitsize, u8 *seckey);
+
+/*
+ * Generate CCA AES DATA secure key with given clear key value.
+ */
+int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize,
+ const u8 *clrkey, u8 *seckey);
+
+/*
+ * Derive proteced key from an CCA AES DATA secure key.
+ */
+int cca_sec2protkey(u16 cardnr, u16 domain,
+ const u8 seckey[SECKEYBLOBSIZE],
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype);
+
+/*
+ * Generate (random) CCA AES CIPHER secure key.
+ */
+int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
+ u8 *keybuf, size_t *keybufsize);
+
+/*
+ * Derive proteced key from CCA AES cipher secure key.
+ */
+int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey,
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype);
+
+/*
+ * Build CCA AES CIPHER secure key with a given clear key value.
+ */
+int cca_clr2cipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
+ const u8 *clrkey, u8 *keybuf, size_t *keybufsize);
+
+/*
+ * Query cryptographic facility from CCA adapter
+ */
+int cca_query_crypto_facility(u16 cardnr, u16 domain,
+ const char *keyword,
+ u8 *rarray, size_t *rarraylen,
+ u8 *varray, size_t *varraylen);
+
+/*
+ * Search for a matching crypto card based on the Master Key
+ * Verification Pattern provided inside a secure key.
+ * Works with CCA AES data and cipher keys.
+ * Returns < 0 on failure, 0 if CURRENT MKVP matches and
+ * 1 if OLD MKVP matches.
+ */
+int cca_findcard(const u8 *key, u16 *pcardnr, u16 *pdomain, int verify);
+
+/*
+ * Build a list of cca apqns meeting the following constrains:
+ * - apqn is online and is in fact a CCA apqn
+ * - if cardnr is not FFFF only apqns with this cardnr
+ * - if domain is not FFFF only apqns with this domainnr
+ * - if minhwtype > 0 only apqns with hwtype >= minhwtype
+ * - if cur_mkvp != 0 only apqns where cur_mkvp == mkvp
+ * - if old_mkvp != 0 only apqns where old_mkvp == mkvp
+ * - if verify is enabled and a cur_mkvp and/or old_mkvp
+ * value is given, then refetch the cca_info and make sure the current
+ * cur_mkvp or old_mkvp values of the apqn are used.
+ * The array of apqn entries is allocated with kmalloc and returned in *apqns;
+ * the number of apqns stored into the list is returned in *nr_apqns. One apqn
+ * entry is simple a 32 bit value with 16 bit cardnr and 16 bit domain nr and
+ * may be casted to struct pkey_apqn. The return value is either 0 for success
+ * or a negative errno value. If no apqn meeting the criterias is found,
+ * -ENODEV is returned.
+ */
+int cca_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
+ int minhwtype, u64 cur_mkvp, u64 old_mkvp, int verify);
+
+/* struct to hold info for each CCA queue */
+struct cca_info {
+ int hwtype; /* one of the defined AP_DEVICE_TYPE_* */
+ char new_mk_state; /* '1' empty, '2' partially full, '3' full */
+ char cur_mk_state; /* '1' invalid, '2' valid */
+ char old_mk_state; /* '1' invalid, '2' valid */
+ u64 new_mkvp; /* truncated sha256 hash of new master key */
+ u64 cur_mkvp; /* truncated sha256 hash of current master key */
+ u64 old_mkvp; /* truncated sha256 hash of old master key */
+ char serial[9]; /* serial number string (8 ascii numbers + 0x00) */
+};
+
+/*
+ * Fetch cca information about an CCA queue.
+ */
+int cca_get_info(u16 card, u16 dom, struct cca_info *ci, int verify);
+
+void zcrypt_ccamisc_exit(void);
+
+#endif /* _ZCRYPT_CCAMISC_H_ */
diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c
index c50f3e86cc74..7cbb384ec535 100644
--- a/drivers/s390/crypto/zcrypt_cex2a.c
+++ b/drivers/s390/crypto/zcrypt_cex2a.c
@@ -175,6 +175,7 @@ static int zcrypt_cex2a_queue_probe(struct ap_device *ap_dev)
zq->queue = aq;
zq->online = 1;
atomic_set(&zq->load, 0);
+ ap_queue_init_state(aq);
ap_queue_init_reply(aq, &zq->reply);
aq->request_timeout = CEX2A_CLEANUP_TIME,
aq->private = zq;
diff --git a/drivers/s390/crypto/zcrypt_cex2c.c b/drivers/s390/crypto/zcrypt_cex2c.c
index 35c7c6672713..c78c0d119806 100644
--- a/drivers/s390/crypto/zcrypt_cex2c.c
+++ b/drivers/s390/crypto/zcrypt_cex2c.c
@@ -220,6 +220,7 @@ static int zcrypt_cex2c_queue_probe(struct ap_device *ap_dev)
zq->queue = aq;
zq->online = 1;
atomic_set(&zq->load, 0);
+ ap_rapq(aq->qid);
rc = zcrypt_cex2c_rng_supported(aq);
if (rc < 0) {
zcrypt_queue_free(zq);
@@ -231,6 +232,7 @@ static int zcrypt_cex2c_queue_probe(struct ap_device *ap_dev)
else
zq->ops = zcrypt_msgtype(MSGTYPE06_NAME,
MSGTYPE06_VARIANT_NORNG);
+ ap_queue_init_state(aq);
ap_queue_init_reply(aq, &zq->reply);
aq->request_timeout = CEX2C_CLEANUP_TIME;
aq->private = zq;
diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c
index 582ffa7e0f18..9a9d02e19774 100644
--- a/drivers/s390/crypto/zcrypt_cex4.c
+++ b/drivers/s390/crypto/zcrypt_cex4.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright IBM Corp. 2012
+ * Copyright IBM Corp. 2012, 2019
* Author(s): Holger Dengler <hd@linux.vnet.ibm.com>
*/
@@ -18,6 +18,8 @@
#include "zcrypt_msgtype50.h"
#include "zcrypt_error.h"
#include "zcrypt_cex4.h"
+#include "zcrypt_ccamisc.h"
+#include "zcrypt_ep11misc.h"
#define CEX4A_MIN_MOD_SIZE 1 /* 8 bits */
#define CEX4A_MAX_MOD_SIZE_2K 256 /* 2048 bits */
@@ -37,8 +39,8 @@
#define CEX4_CLEANUP_TIME (900*HZ)
MODULE_AUTHOR("IBM Corporation");
-MODULE_DESCRIPTION("CEX4/CEX5/CEX6 Cryptographic Card device driver, " \
- "Copyright IBM Corp. 2018");
+MODULE_DESCRIPTION("CEX4/CEX5/CEX6/CEX7 Cryptographic Card device driver, " \
+ "Copyright IBM Corp. 2019");
MODULE_LICENSE("GPL");
static struct ap_device_id zcrypt_cex4_card_ids[] = {
@@ -48,6 +50,8 @@ static struct ap_device_id zcrypt_cex4_card_ids[] = {
.match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
{ .dev_type = AP_DEVICE_TYPE_CEX6,
.match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
+ { .dev_type = AP_DEVICE_TYPE_CEX7,
+ .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
{ /* end of list */ },
};
@@ -60,13 +64,312 @@ static struct ap_device_id zcrypt_cex4_queue_ids[] = {
.match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
{ .dev_type = AP_DEVICE_TYPE_CEX6,
.match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
+ { .dev_type = AP_DEVICE_TYPE_CEX7,
+ .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
{ /* end of list */ },
};
MODULE_DEVICE_TABLE(ap, zcrypt_cex4_queue_ids);
+/*
+ * CCA card additional device attributes
+ */
+static ssize_t cca_serialnr_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct cca_info ci;
+ struct ap_card *ac = to_ap_card(dev);
+ struct zcrypt_card *zc = ac->private;
+
+ memset(&ci, 0, sizeof(ci));
+
+ if (ap_domain_index >= 0)
+ cca_get_info(ac->id, ap_domain_index, &ci, zc->online);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", ci.serial);
+}
+
+static struct device_attribute dev_attr_cca_serialnr =
+ __ATTR(serialnr, 0444, cca_serialnr_show, NULL);
+
+static struct attribute *cca_card_attrs[] = {
+ &dev_attr_cca_serialnr.attr,
+ NULL,
+};
+
+static const struct attribute_group cca_card_attr_grp = {
+ .attrs = cca_card_attrs,
+};
+
+ /*
+ * CCA queue additional device attributes
+ */
+static ssize_t cca_mkvps_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int n = 0;
+ struct cca_info ci;
+ struct zcrypt_queue *zq = to_ap_queue(dev)->private;
+ static const char * const cao_state[] = { "invalid", "valid" };
+ static const char * const new_state[] = { "empty", "partial", "full" };
+
+ memset(&ci, 0, sizeof(ci));
+
+ cca_get_info(AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid),
+ &ci, zq->online);
+
+ if (ci.new_mk_state >= '1' && ci.new_mk_state <= '3')
+ n = snprintf(buf, PAGE_SIZE, "AES NEW: %s 0x%016llx\n",
+ new_state[ci.new_mk_state - '1'], ci.new_mkvp);
+ else
+ n = snprintf(buf, PAGE_SIZE, "AES NEW: - -\n");
+
+ if (ci.cur_mk_state >= '1' && ci.cur_mk_state <= '2')
+ n += snprintf(buf + n, PAGE_SIZE - n, "AES CUR: %s 0x%016llx\n",
+ cao_state[ci.cur_mk_state - '1'], ci.cur_mkvp);
+ else
+ n += snprintf(buf + n, PAGE_SIZE - n, "AES CUR: - -\n");
+
+ if (ci.old_mk_state >= '1' && ci.old_mk_state <= '2')
+ n += snprintf(buf + n, PAGE_SIZE - n, "AES OLD: %s 0x%016llx\n",
+ cao_state[ci.old_mk_state - '1'], ci.old_mkvp);
+ else
+ n += snprintf(buf + n, PAGE_SIZE - n, "AES OLD: - -\n");
+
+ return n;
+}
+
+static struct device_attribute dev_attr_cca_mkvps =
+ __ATTR(mkvps, 0444, cca_mkvps_show, NULL);
+
+static struct attribute *cca_queue_attrs[] = {
+ &dev_attr_cca_mkvps.attr,
+ NULL,
+};
+
+static const struct attribute_group cca_queue_attr_grp = {
+ .attrs = cca_queue_attrs,
+};
+
+/*
+ * EP11 card additional device attributes
+ */
+static ssize_t ep11_api_ordinalnr_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ep11_card_info ci;
+ struct ap_card *ac = to_ap_card(dev);
+ struct zcrypt_card *zc = ac->private;
+
+ memset(&ci, 0, sizeof(ci));
+
+ ep11_get_card_info(ac->id, &ci, zc->online);
+
+ if (ci.API_ord_nr > 0)
+ return snprintf(buf, PAGE_SIZE, "%u\n", ci.API_ord_nr);
+ else
+ return snprintf(buf, PAGE_SIZE, "\n");
+}
+
+static struct device_attribute dev_attr_ep11_api_ordinalnr =
+ __ATTR(API_ordinalnr, 0444, ep11_api_ordinalnr_show, NULL);
+
+static ssize_t ep11_fw_version_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ep11_card_info ci;
+ struct ap_card *ac = to_ap_card(dev);
+ struct zcrypt_card *zc = ac->private;
+
+ memset(&ci, 0, sizeof(ci));
+
+ ep11_get_card_info(ac->id, &ci, zc->online);
+
+ if (ci.FW_version > 0)
+ return snprintf(buf, PAGE_SIZE, "%d.%d\n",
+ (int)(ci.FW_version >> 8),
+ (int)(ci.FW_version & 0xFF));
+ else
+ return snprintf(buf, PAGE_SIZE, "\n");
+}
+
+static struct device_attribute dev_attr_ep11_fw_version =
+ __ATTR(FW_version, 0444, ep11_fw_version_show, NULL);
+
+static ssize_t ep11_serialnr_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ep11_card_info ci;
+ struct ap_card *ac = to_ap_card(dev);
+ struct zcrypt_card *zc = ac->private;
+
+ memset(&ci, 0, sizeof(ci));
+
+ ep11_get_card_info(ac->id, &ci, zc->online);
+
+ if (ci.serial[0])
+ return snprintf(buf, PAGE_SIZE, "%16.16s\n", ci.serial);
+ else
+ return snprintf(buf, PAGE_SIZE, "\n");
+}
+
+static struct device_attribute dev_attr_ep11_serialnr =
+ __ATTR(serialnr, 0444, ep11_serialnr_show, NULL);
+
+static const struct {
+ int mode_bit;
+ const char *mode_txt;
+} ep11_op_modes[] = {
+ { 0, "FIPS2009" },
+ { 1, "BSI2009" },
+ { 2, "FIPS2011" },
+ { 3, "BSI2011" },
+ { 6, "BSICC2017" },
+ { 0, NULL }
+};
+
+static ssize_t ep11_card_op_modes_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int i, n = 0;
+ struct ep11_card_info ci;
+ struct ap_card *ac = to_ap_card(dev);
+ struct zcrypt_card *zc = ac->private;
+
+ memset(&ci, 0, sizeof(ci));
+
+ ep11_get_card_info(ac->id, &ci, zc->online);
+
+ for (i = 0; ep11_op_modes[i].mode_txt; i++) {
+ if (ci.op_mode & (1 << ep11_op_modes[i].mode_bit)) {
+ if (n > 0)
+ buf[n++] = ' ';
+ n += snprintf(buf + n, PAGE_SIZE - n,
+ "%s", ep11_op_modes[i].mode_txt);
+ }
+ }
+ n += snprintf(buf + n, PAGE_SIZE - n, "\n");
+
+ return n;
+}
+
+static struct device_attribute dev_attr_ep11_card_op_modes =
+ __ATTR(op_modes, 0444, ep11_card_op_modes_show, NULL);
+
+static struct attribute *ep11_card_attrs[] = {
+ &dev_attr_ep11_api_ordinalnr.attr,
+ &dev_attr_ep11_fw_version.attr,
+ &dev_attr_ep11_serialnr.attr,
+ &dev_attr_ep11_card_op_modes.attr,
+ NULL,
+};
+
+static const struct attribute_group ep11_card_attr_grp = {
+ .attrs = ep11_card_attrs,
+};
+
+/*
+ * EP11 queue additional device attributes
+ */
+
+static ssize_t ep11_mkvps_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int n = 0;
+ struct ep11_domain_info di;
+ struct zcrypt_queue *zq = to_ap_queue(dev)->private;
+ static const char * const cwk_state[] = { "invalid", "valid" };
+ static const char * const nwk_state[] = { "empty", "uncommitted",
+ "committed" };
+
+ memset(&di, 0, sizeof(di));
+
+ if (zq->online)
+ ep11_get_domain_info(AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid),
+ &di);
+
+ if (di.cur_wk_state == '0') {
+ n = snprintf(buf, PAGE_SIZE, "WK CUR: %s -\n",
+ cwk_state[di.cur_wk_state - '0']);
+ } else if (di.cur_wk_state == '1') {
+ n = snprintf(buf, PAGE_SIZE, "WK CUR: %s 0x",
+ cwk_state[di.cur_wk_state - '0']);
+ bin2hex(buf + n, di.cur_wkvp, sizeof(di.cur_wkvp));
+ n += 2 * sizeof(di.cur_wkvp);
+ n += snprintf(buf + n, PAGE_SIZE - n, "\n");
+ } else
+ n = snprintf(buf, PAGE_SIZE, "WK CUR: - -\n");
+
+ if (di.new_wk_state == '0') {
+ n += snprintf(buf + n, PAGE_SIZE - n, "WK NEW: %s -\n",
+ nwk_state[di.new_wk_state - '0']);
+ } else if (di.new_wk_state >= '1' && di.new_wk_state <= '2') {
+ n += snprintf(buf + n, PAGE_SIZE - n, "WK NEW: %s 0x",
+ nwk_state[di.new_wk_state - '0']);
+ bin2hex(buf + n, di.new_wkvp, sizeof(di.new_wkvp));
+ n += 2 * sizeof(di.new_wkvp);
+ n += snprintf(buf + n, PAGE_SIZE - n, "\n");
+ } else
+ n += snprintf(buf + n, PAGE_SIZE - n, "WK NEW: - -\n");
+
+ return n;
+}
+
+static struct device_attribute dev_attr_ep11_mkvps =
+ __ATTR(mkvps, 0444, ep11_mkvps_show, NULL);
+
+static ssize_t ep11_queue_op_modes_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int i, n = 0;
+ struct ep11_domain_info di;
+ struct zcrypt_queue *zq = to_ap_queue(dev)->private;
+
+ memset(&di, 0, sizeof(di));
+
+ if (zq->online)
+ ep11_get_domain_info(AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid),
+ &di);
+
+ for (i = 0; ep11_op_modes[i].mode_txt; i++) {
+ if (di.op_mode & (1 << ep11_op_modes[i].mode_bit)) {
+ if (n > 0)
+ buf[n++] = ' ';
+ n += snprintf(buf + n, PAGE_SIZE - n,
+ "%s", ep11_op_modes[i].mode_txt);
+ }
+ }
+ n += snprintf(buf + n, PAGE_SIZE - n, "\n");
+
+ return n;
+}
+
+static struct device_attribute dev_attr_ep11_queue_op_modes =
+ __ATTR(op_modes, 0444, ep11_queue_op_modes_show, NULL);
+
+static struct attribute *ep11_queue_attrs[] = {
+ &dev_attr_ep11_mkvps.attr,
+ &dev_attr_ep11_queue_op_modes.attr,
+ NULL,
+};
+
+static const struct attribute_group ep11_queue_attr_grp = {
+ .attrs = ep11_queue_attrs,
+};
+
/**
- * Probe function for CEX4/CEX5/CEX6 card device. It always
+ * Probe function for CEX4/CEX5/CEX6/CEX7 card device. It always
* accepts the AP device since the bus_match already checked
* the hardware type.
* @ap_dev: pointer to the AP device.
@@ -78,25 +381,31 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
* MEX_1k, MEX_2k, MEX_4k, CRT_1k, CRT_2k, CRT_4k, RNG, SECKEY
*/
static const int CEX4A_SPEED_IDX[] = {
- 14, 19, 249, 42, 228, 1458, 0, 0};
+ 14, 19, 249, 42, 228, 1458, 0, 0};
static const int CEX5A_SPEED_IDX[] = {
- 8, 9, 20, 18, 66, 458, 0, 0};
+ 8, 9, 20, 18, 66, 458, 0, 0};
static const int CEX6A_SPEED_IDX[] = {
- 6, 9, 20, 17, 65, 438, 0, 0};
+ 6, 9, 20, 17, 65, 438, 0, 0};
+ static const int CEX7A_SPEED_IDX[] = {
+ 6, 8, 17, 15, 54, 362, 0, 0};
static const int CEX4C_SPEED_IDX[] = {
59, 69, 308, 83, 278, 2204, 209, 40};
static const int CEX5C_SPEED_IDX[] = {
- 24, 31, 50, 37, 90, 479, 27, 10};
+ 24, 31, 50, 37, 90, 479, 27, 10};
static const int CEX6C_SPEED_IDX[] = {
- 16, 20, 32, 27, 77, 455, 23, 9};
+ 16, 20, 32, 27, 77, 455, 24, 9};
+ static const int CEX7C_SPEED_IDX[] = {
+ 14, 16, 26, 23, 64, 376, 23, 8};
static const int CEX4P_SPEED_IDX[] = {
- 224, 313, 3560, 359, 605, 2827, 0, 50};
+ 0, 0, 0, 0, 0, 0, 0, 50};
static const int CEX5P_SPEED_IDX[] = {
- 63, 84, 156, 83, 142, 533, 0, 10};
+ 0, 0, 0, 0, 0, 0, 0, 10};
static const int CEX6P_SPEED_IDX[] = {
- 55, 70, 121, 73, 129, 522, 0, 9};
+ 0, 0, 0, 0, 0, 0, 0, 9};
+ static const int CEX7P_SPEED_IDX[] = {
+ 0, 0, 0, 0, 0, 0, 0, 8};
struct ap_card *ac = to_ap_card(&ap_dev->device);
struct zcrypt_card *zc;
@@ -118,11 +427,19 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
zc->user_space_type = ZCRYPT_CEX5;
memcpy(zc->speed_rating, CEX5A_SPEED_IDX,
sizeof(CEX5A_SPEED_IDX));
- } else {
+ } else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX6) {
zc->type_string = "CEX6A";
zc->user_space_type = ZCRYPT_CEX6;
memcpy(zc->speed_rating, CEX6A_SPEED_IDX,
sizeof(CEX6A_SPEED_IDX));
+ } else {
+ zc->type_string = "CEX7A";
+ /* wrong user space type, just for compatibility
+ * with the ZCRYPT_STATUS_MASK ioctl.
+ */
+ zc->user_space_type = ZCRYPT_CEX6;
+ memcpy(zc->speed_rating, CEX7A_SPEED_IDX,
+ sizeof(CEX7A_SPEED_IDX));
}
zc->min_mod_size = CEX4A_MIN_MOD_SIZE;
if (ap_test_bit(&ac->functions, AP_FUNC_MEX4K) &&
@@ -152,7 +469,7 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
zc->user_space_type = ZCRYPT_CEX3C;
memcpy(zc->speed_rating, CEX5C_SPEED_IDX,
sizeof(CEX5C_SPEED_IDX));
- } else {
+ } else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX6) {
zc->type_string = "CEX6C";
/* wrong user space type, must be CEX6
* just keep it for cca compatibility
@@ -160,6 +477,14 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
zc->user_space_type = ZCRYPT_CEX3C;
memcpy(zc->speed_rating, CEX6C_SPEED_IDX,
sizeof(CEX6C_SPEED_IDX));
+ } else {
+ zc->type_string = "CEX7C";
+ /* wrong user space type, must be CEX7
+ * just keep it for cca compatibility
+ */
+ zc->user_space_type = ZCRYPT_CEX3C;
+ memcpy(zc->speed_rating, CEX7C_SPEED_IDX,
+ sizeof(CEX7C_SPEED_IDX));
}
zc->min_mod_size = CEX4C_MIN_MOD_SIZE;
zc->max_mod_size = CEX4C_MAX_MOD_SIZE;
@@ -175,11 +500,19 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
zc->user_space_type = ZCRYPT_CEX5;
memcpy(zc->speed_rating, CEX5P_SPEED_IDX,
sizeof(CEX5P_SPEED_IDX));
- } else {
+ } else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX6) {
zc->type_string = "CEX6P";
zc->user_space_type = ZCRYPT_CEX6;
memcpy(zc->speed_rating, CEX6P_SPEED_IDX,
sizeof(CEX6P_SPEED_IDX));
+ } else {
+ zc->type_string = "CEX7P";
+ /* wrong user space type, just for compatibility
+ * with the ZCRYPT_STATUS_MASK ioctl.
+ */
+ zc->user_space_type = ZCRYPT_CEX6;
+ memcpy(zc->speed_rating, CEX7P_SPEED_IDX,
+ sizeof(CEX7P_SPEED_IDX));
}
zc->min_mod_size = CEX4C_MIN_MOD_SIZE;
zc->max_mod_size = CEX4C_MAX_MOD_SIZE;
@@ -194,19 +527,38 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
if (rc) {
ac->private = NULL;
zcrypt_card_free(zc);
+ goto out;
+ }
+
+ if (ap_test_bit(&ac->functions, AP_FUNC_COPRO)) {
+ rc = sysfs_create_group(&ap_dev->device.kobj,
+ &cca_card_attr_grp);
+ if (rc)
+ zcrypt_card_unregister(zc);
+ } else if (ap_test_bit(&ac->functions, AP_FUNC_EP11)) {
+ rc = sysfs_create_group(&ap_dev->device.kobj,
+ &ep11_card_attr_grp);
+ if (rc)
+ zcrypt_card_unregister(zc);
}
+out:
return rc;
}
/**
- * This is called to remove the CEX4/CEX5/CEX6 card driver information
- * if an AP card device is removed.
+ * This is called to remove the CEX4/CEX5/CEX6/CEX7 card driver
+ * information if an AP card device is removed.
*/
static void zcrypt_cex4_card_remove(struct ap_device *ap_dev)
{
- struct zcrypt_card *zc = to_ap_card(&ap_dev->device)->private;
+ struct ap_card *ac = to_ap_card(&ap_dev->device);
+ struct zcrypt_card *zc = ac->private;
+ if (ap_test_bit(&ac->functions, AP_FUNC_COPRO))
+ sysfs_remove_group(&ap_dev->device.kobj, &cca_card_attr_grp);
+ else if (ap_test_bit(&ac->functions, AP_FUNC_EP11))
+ sysfs_remove_group(&ap_dev->device.kobj, &ep11_card_attr_grp);
if (zc)
zcrypt_card_unregister(zc);
}
@@ -219,7 +571,7 @@ static struct ap_driver zcrypt_cex4_card_driver = {
};
/**
- * Probe function for CEX4/CEX5/CEX6 queue device. It always
+ * Probe function for CEX4/CEX5/CEX6/CEX7 queue device. It always
* accepts the AP device since the bus_match already checked
* the hardware type.
* @ap_dev: pointer to the AP device.
@@ -251,9 +603,11 @@ static int zcrypt_cex4_queue_probe(struct ap_device *ap_dev)
} else {
return -ENODEV;
}
+
zq->queue = aq;
zq->online = 1;
atomic_set(&zq->load, 0);
+ ap_queue_init_state(aq);
ap_queue_init_reply(aq, &zq->reply);
aq->request_timeout = CEX4_CLEANUP_TIME,
aq->private = zq;
@@ -261,13 +615,27 @@ static int zcrypt_cex4_queue_probe(struct ap_device *ap_dev)
if (rc) {
aq->private = NULL;
zcrypt_queue_free(zq);
+ goto out;
+ }
+
+ if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO)) {
+ rc = sysfs_create_group(&ap_dev->device.kobj,
+ &cca_queue_attr_grp);
+ if (rc)
+ zcrypt_queue_unregister(zq);
+ } else if (ap_test_bit(&aq->card->functions, AP_FUNC_EP11)) {
+ rc = sysfs_create_group(&ap_dev->device.kobj,
+ &ep11_queue_attr_grp);
+ if (rc)
+ zcrypt_queue_unregister(zq);
}
+out:
return rc;
}
/**
- * This is called to remove the CEX4/CEX5/CEX6 queue driver
+ * This is called to remove the CEX4/CEX5/CEX6/CEX7 queue driver
* information if an AP queue device is removed.
*/
static void zcrypt_cex4_queue_remove(struct ap_device *ap_dev)
@@ -275,6 +643,10 @@ static void zcrypt_cex4_queue_remove(struct ap_device *ap_dev)
struct ap_queue *aq = to_ap_queue(&ap_dev->device);
struct zcrypt_queue *zq = aq->private;
+ if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO))
+ sysfs_remove_group(&ap_dev->device.kobj, &cca_queue_attr_grp);
+ else if (ap_test_bit(&aq->card->functions, AP_FUNC_EP11))
+ sysfs_remove_group(&ap_dev->device.kobj, &ep11_queue_attr_grp);
if (zq)
zcrypt_queue_unregister(zq);
}
diff --git a/drivers/s390/crypto/zcrypt_ep11misc.c b/drivers/s390/crypto/zcrypt_ep11misc.c
new file mode 100644
index 000000000000..d4caf46ff9df
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_ep11misc.c
@@ -0,0 +1,1293 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright IBM Corp. 2019
+ * Author(s): Harald Freudenberger <freude@linux.ibm.com>
+ *
+ * Collection of EP11 misc functions used by zcrypt and pkey
+ */
+
+#define KMSG_COMPONENT "zcrypt"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <asm/zcrypt.h>
+#include <asm/pkey.h>
+
+#include "ap_bus.h"
+#include "zcrypt_api.h"
+#include "zcrypt_debug.h"
+#include "zcrypt_msgtype6.h"
+#include "zcrypt_ep11misc.h"
+#include "zcrypt_ccamisc.h"
+
+#define DEBUG_DBG(...) ZCRYPT_DBF(DBF_DEBUG, ##__VA_ARGS__)
+#define DEBUG_INFO(...) ZCRYPT_DBF(DBF_INFO, ##__VA_ARGS__)
+#define DEBUG_WARN(...) ZCRYPT_DBF(DBF_WARN, ##__VA_ARGS__)
+#define DEBUG_ERR(...) ZCRYPT_DBF(DBF_ERR, ##__VA_ARGS__)
+
+/* default iv used here */
+static const u8 def_iv[16] = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77,
+ 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff };
+
+/* ep11 card info cache */
+struct card_list_entry {
+ struct list_head list;
+ u16 cardnr;
+ struct ep11_card_info info;
+};
+static LIST_HEAD(card_list);
+static DEFINE_SPINLOCK(card_list_lock);
+
+static int card_cache_fetch(u16 cardnr, struct ep11_card_info *ci)
+{
+ int rc = -ENOENT;
+ struct card_list_entry *ptr;
+
+ spin_lock_bh(&card_list_lock);
+ list_for_each_entry(ptr, &card_list, list) {
+ if (ptr->cardnr == cardnr) {
+ memcpy(ci, &ptr->info, sizeof(*ci));
+ rc = 0;
+ break;
+ }
+ }
+ spin_unlock_bh(&card_list_lock);
+
+ return rc;
+}
+
+static void card_cache_update(u16 cardnr, const struct ep11_card_info *ci)
+{
+ int found = 0;
+ struct card_list_entry *ptr;
+
+ spin_lock_bh(&card_list_lock);
+ list_for_each_entry(ptr, &card_list, list) {
+ if (ptr->cardnr == cardnr) {
+ memcpy(&ptr->info, ci, sizeof(*ci));
+ found = 1;
+ break;
+ }
+ }
+ if (!found) {
+ ptr = kmalloc(sizeof(*ptr), GFP_ATOMIC);
+ if (!ptr) {
+ spin_unlock_bh(&card_list_lock);
+ return;
+ }
+ ptr->cardnr = cardnr;
+ memcpy(&ptr->info, ci, sizeof(*ci));
+ list_add(&ptr->list, &card_list);
+ }
+ spin_unlock_bh(&card_list_lock);
+}
+
+static void card_cache_scrub(u16 cardnr)
+{
+ struct card_list_entry *ptr;
+
+ spin_lock_bh(&card_list_lock);
+ list_for_each_entry(ptr, &card_list, list) {
+ if (ptr->cardnr == cardnr) {
+ list_del(&ptr->list);
+ kfree(ptr);
+ break;
+ }
+ }
+ spin_unlock_bh(&card_list_lock);
+}
+
+static void __exit card_cache_free(void)
+{
+ struct card_list_entry *ptr, *pnext;
+
+ spin_lock_bh(&card_list_lock);
+ list_for_each_entry_safe(ptr, pnext, &card_list, list) {
+ list_del(&ptr->list);
+ kfree(ptr);
+ }
+ spin_unlock_bh(&card_list_lock);
+}
+
+/*
+ * Simple check if the key blob is a valid EP11 secure AES key.
+ */
+int ep11_check_aeskeyblob(debug_info_t *dbg, int dbflvl,
+ const u8 *key, int keybitsize,
+ int checkcpacfexport)
+{
+ struct ep11keyblob *kb = (struct ep11keyblob *) key;
+
+#define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__)
+
+ if (kb->head.type != TOKTYPE_NON_CCA) {
+ if (dbg)
+ DBF("%s key check failed, type 0x%02x != 0x%02x\n",
+ __func__, (int) kb->head.type, TOKTYPE_NON_CCA);
+ return -EINVAL;
+ }
+ if (kb->head.version != TOKVER_EP11_AES) {
+ if (dbg)
+ DBF("%s key check failed, version 0x%02x != 0x%02x\n",
+ __func__, (int) kb->head.version, TOKVER_EP11_AES);
+ return -EINVAL;
+ }
+ if (kb->version != EP11_STRUCT_MAGIC) {
+ if (dbg)
+ DBF("%s key check failed, magic 0x%04x != 0x%04x\n",
+ __func__, (int) kb->version, EP11_STRUCT_MAGIC);
+ return -EINVAL;
+ }
+ switch (kb->head.keybitlen) {
+ case 128:
+ case 192:
+ case 256:
+ break;
+ default:
+ if (dbg)
+ DBF("%s key check failed, keybitlen %d invalid\n",
+ __func__, (int) kb->head.keybitlen);
+ return -EINVAL;
+ }
+ if (keybitsize > 0 && keybitsize != (int) kb->head.keybitlen) {
+ DBF("%s key check failed, keybitsize %d\n",
+ __func__, keybitsize);
+ return -EINVAL;
+ }
+ if (checkcpacfexport && !(kb->attr & EP11_BLOB_PKEY_EXTRACTABLE)) {
+ if (dbg)
+ DBF("%s key check failed, PKEY_EXTRACTABLE is 0\n",
+ __func__);
+ return -EINVAL;
+ }
+#undef DBF
+
+ return 0;
+}
+EXPORT_SYMBOL(ep11_check_aeskeyblob);
+
+/*
+ * Helper function which calls zcrypt_send_ep11_cprb with
+ * memory management segment adjusted to kernel space
+ * so that the copy_from_user called within this
+ * function do in fact copy from kernel space.
+ */
+static inline int _zcrypt_send_ep11_cprb(struct ep11_urb *urb)
+{
+ int rc;
+ mm_segment_t old_fs = get_fs();
+
+ set_fs(KERNEL_DS);
+ rc = zcrypt_send_ep11_cprb(urb);
+ set_fs(old_fs);
+
+ return rc;
+}
+
+/*
+ * Allocate and prepare ep11 cprb plus additional payload.
+ */
+static inline struct ep11_cprb *alloc_cprb(size_t payload_len)
+{
+ size_t len = sizeof(struct ep11_cprb) + payload_len;
+ struct ep11_cprb *cprb;
+
+ cprb = kmalloc(len, GFP_KERNEL);
+ if (!cprb)
+ return NULL;
+
+ memset(cprb, 0, len);
+ cprb->cprb_len = sizeof(struct ep11_cprb);
+ cprb->cprb_ver_id = 0x04;
+ memcpy(cprb->func_id, "T4", 2);
+ cprb->ret_code = 0xFFFFFFFF;
+ cprb->payload_len = payload_len;
+
+ return cprb;
+}
+
+/*
+ * Some helper functions related to ASN1 encoding.
+ * Limited to length info <= 2 byte.
+ */
+
+#define ASN1TAGLEN(x) (2 + (x) + ((x) > 127 ? 1 : 0) + ((x) > 255 ? 1 : 0))
+
+static int asn1tag_write(u8 *ptr, u8 tag, const u8 *pvalue, u16 valuelen)
+{
+ ptr[0] = tag;
+ if (valuelen > 255) {
+ ptr[1] = 0x82;
+ *((u16 *)(ptr + 2)) = valuelen;
+ memcpy(ptr + 4, pvalue, valuelen);
+ return 4 + valuelen;
+ }
+ if (valuelen > 127) {
+ ptr[1] = 0x81;
+ ptr[2] = (u8) valuelen;
+ memcpy(ptr + 3, pvalue, valuelen);
+ return 3 + valuelen;
+ }
+ ptr[1] = (u8) valuelen;
+ memcpy(ptr + 2, pvalue, valuelen);
+ return 2 + valuelen;
+}
+
+/* EP11 payload > 127 bytes starts with this struct */
+struct pl_head {
+ u8 tag;
+ u8 lenfmt;
+ u16 len;
+ u8 func_tag;
+ u8 func_len;
+ u32 func;
+ u8 dom_tag;
+ u8 dom_len;
+ u32 dom;
+} __packed;
+
+/* prep ep11 payload head helper function */
+static inline void prep_head(struct pl_head *h,
+ size_t pl_size, int api, int func)
+{
+ h->tag = 0x30;
+ h->lenfmt = 0x82;
+ h->len = pl_size - 4;
+ h->func_tag = 0x04;
+ h->func_len = sizeof(u32);
+ h->func = (api << 16) + func;
+ h->dom_tag = 0x04;
+ h->dom_len = sizeof(u32);
+}
+
+/* prep urb helper function */
+static inline void prep_urb(struct ep11_urb *u,
+ struct ep11_target_dev *t, int nt,
+ struct ep11_cprb *req, size_t req_len,
+ struct ep11_cprb *rep, size_t rep_len)
+{
+ u->targets = (u8 __user *) t;
+ u->targets_num = nt;
+ u->req = (u8 __user *) req;
+ u->req_len = req_len;
+ u->resp = (u8 __user *) rep;
+ u->resp_len = rep_len;
+}
+
+/* Check ep11 reply payload, return 0 or suggested errno value. */
+static int check_reply_pl(const u8 *pl, const char *func)
+{
+ int len;
+ u32 ret;
+
+ /* start tag */
+ if (*pl++ != 0x30) {
+ DEBUG_ERR("%s reply start tag mismatch\n", func);
+ return -EIO;
+ }
+
+ /* payload length format */
+ if (*pl < 127) {
+ len = *pl;
+ pl++;
+ } else if (*pl == 0x81) {
+ pl++;
+ len = *pl;
+ pl++;
+ } else if (*pl == 0x82) {
+ pl++;
+ len = *((u16 *)pl);
+ pl += 2;
+ } else {
+ DEBUG_ERR("%s reply start tag lenfmt mismatch 0x%02hhx\n",
+ func, *pl);
+ return -EIO;
+ }
+
+ /* len should cover at least 3 fields with 32 bit value each */
+ if (len < 3 * 6) {
+ DEBUG_ERR("%s reply length %d too small\n", func, len);
+ return -EIO;
+ }
+
+ /* function tag, length and value */
+ if (pl[0] != 0x04 || pl[1] != 0x04) {
+ DEBUG_ERR("%s function tag or length mismatch\n", func);
+ return -EIO;
+ }
+ pl += 6;
+
+ /* dom tag, length and value */
+ if (pl[0] != 0x04 || pl[1] != 0x04) {
+ DEBUG_ERR("%s dom tag or length mismatch\n", func);
+ return -EIO;
+ }
+ pl += 6;
+
+ /* return value tag, length and value */
+ if (pl[0] != 0x04 || pl[1] != 0x04) {
+ DEBUG_ERR("%s return value tag or length mismatch\n", func);
+ return -EIO;
+ }
+ pl += 2;
+ ret = *((u32 *)pl);
+ if (ret != 0) {
+ DEBUG_ERR("%s return value 0x%04x != 0\n", func, ret);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+
+/*
+ * Helper function which does an ep11 query with given query type.
+ */
+static int ep11_query_info(u16 cardnr, u16 domain, u32 query_type,
+ size_t buflen, u8 *buf)
+{
+ struct ep11_info_req_pl {
+ struct pl_head head;
+ u8 query_type_tag;
+ u8 query_type_len;
+ u32 query_type;
+ u8 query_subtype_tag;
+ u8 query_subtype_len;
+ u32 query_subtype;
+ } __packed * req_pl;
+ struct ep11_info_rep_pl {
+ struct pl_head head;
+ u8 rc_tag;
+ u8 rc_len;
+ u32 rc;
+ u8 data_tag;
+ u8 data_lenfmt;
+ u16 data_len;
+ } __packed * rep_pl;
+ struct ep11_cprb *req = NULL, *rep = NULL;
+ struct ep11_target_dev target;
+ struct ep11_urb *urb = NULL;
+ int api = 1, rc = -ENOMEM;
+
+ /* request cprb and payload */
+ req = alloc_cprb(sizeof(struct ep11_info_req_pl));
+ if (!req)
+ goto out;
+ req_pl = (struct ep11_info_req_pl *) (((u8 *) req) + sizeof(*req));
+ prep_head(&req_pl->head, sizeof(*req_pl), api, 38); /* get xcp info */
+ req_pl->query_type_tag = 0x04;
+ req_pl->query_type_len = sizeof(u32);
+ req_pl->query_type = query_type;
+ req_pl->query_subtype_tag = 0x04;
+ req_pl->query_subtype_len = sizeof(u32);
+
+ /* reply cprb and payload */
+ rep = alloc_cprb(sizeof(struct ep11_info_rep_pl) + buflen);
+ if (!rep)
+ goto out;
+ rep_pl = (struct ep11_info_rep_pl *) (((u8 *) rep) + sizeof(*rep));
+
+ /* urb and target */
+ urb = kmalloc(sizeof(struct ep11_urb), GFP_KERNEL);
+ if (!urb)
+ goto out;
+ target.ap_id = cardnr;
+ target.dom_id = domain;
+ prep_urb(urb, &target, 1,
+ req, sizeof(*req) + sizeof(*req_pl),
+ rep, sizeof(*rep) + sizeof(*rep_pl) + buflen);
+
+ rc = _zcrypt_send_ep11_cprb(urb);
+ if (rc) {
+ DEBUG_ERR(
+ "%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
+ __func__, (int) cardnr, (int) domain, rc);
+ goto out;
+ }
+
+ rc = check_reply_pl((u8 *)rep_pl, __func__);
+ if (rc)
+ goto out;
+ if (rep_pl->data_tag != 0x04 || rep_pl->data_lenfmt != 0x82) {
+ DEBUG_ERR("%s unknown reply data format\n", __func__);
+ rc = -EIO;
+ goto out;
+ }
+ if (rep_pl->data_len > buflen) {
+ DEBUG_ERR("%s mismatch between reply data len and buffer len\n",
+ __func__);
+ rc = -ENOSPC;
+ goto out;
+ }
+
+ memcpy(buf, ((u8 *) rep_pl) + sizeof(*rep_pl), rep_pl->data_len);
+
+out:
+ kfree(req);
+ kfree(rep);
+ kfree(urb);
+ return rc;
+}
+
+/*
+ * Provide information about an EP11 card.
+ */
+int ep11_get_card_info(u16 card, struct ep11_card_info *info, int verify)
+{
+ int rc;
+ struct ep11_module_query_info {
+ u32 API_ord_nr;
+ u32 firmware_id;
+ u8 FW_major_vers;
+ u8 FW_minor_vers;
+ u8 CSP_major_vers;
+ u8 CSP_minor_vers;
+ u8 fwid[32];
+ u8 xcp_config_hash[32];
+ u8 CSP_config_hash[32];
+ u8 serial[16];
+ u8 module_date_time[16];
+ u64 op_mode;
+ u32 PKCS11_flags;
+ u32 ext_flags;
+ u32 domains;
+ u32 sym_state_bytes;
+ u32 digest_state_bytes;
+ u32 pin_blob_bytes;
+ u32 SPKI_bytes;
+ u32 priv_key_blob_bytes;
+ u32 sym_blob_bytes;
+ u32 max_payload_bytes;
+ u32 CP_profile_bytes;
+ u32 max_CP_index;
+ } __packed * pmqi = NULL;
+
+ rc = card_cache_fetch(card, info);
+ if (rc || verify) {
+ pmqi = kmalloc(sizeof(*pmqi), GFP_KERNEL);
+ if (!pmqi)
+ return -ENOMEM;
+ rc = ep11_query_info(card, AUTOSEL_DOM,
+ 0x01 /* module info query */,
+ sizeof(*pmqi), (u8 *) pmqi);
+ if (rc) {
+ if (rc == -ENODEV)
+ card_cache_scrub(card);
+ goto out;
+ }
+ memset(info, 0, sizeof(*info));
+ info->API_ord_nr = pmqi->API_ord_nr;
+ info->FW_version =
+ (pmqi->FW_major_vers << 8) + pmqi->FW_minor_vers;
+ memcpy(info->serial, pmqi->serial, sizeof(info->serial));
+ info->op_mode = pmqi->op_mode;
+ card_cache_update(card, info);
+ }
+
+out:
+ kfree(pmqi);
+ return rc;
+}
+EXPORT_SYMBOL(ep11_get_card_info);
+
+/*
+ * Provide information about a domain within an EP11 card.
+ */
+int ep11_get_domain_info(u16 card, u16 domain, struct ep11_domain_info *info)
+{
+ int rc;
+ struct ep11_domain_query_info {
+ u32 dom_index;
+ u8 cur_WK_VP[32];
+ u8 new_WK_VP[32];
+ u32 dom_flags;
+ u64 op_mode;
+ } __packed * p_dom_info;
+
+ p_dom_info = kmalloc(sizeof(*p_dom_info), GFP_KERNEL);
+ if (!p_dom_info)
+ return -ENOMEM;
+
+ rc = ep11_query_info(card, domain, 0x03 /* domain info query */,
+ sizeof(*p_dom_info), (u8 *) p_dom_info);
+ if (rc)
+ goto out;
+
+ memset(info, 0, sizeof(*info));
+ info->cur_wk_state = '0';
+ info->new_wk_state = '0';
+ if (p_dom_info->dom_flags & 0x10 /* left imprint mode */) {
+ if (p_dom_info->dom_flags & 0x02 /* cur wk valid */) {
+ info->cur_wk_state = '1';
+ memcpy(info->cur_wkvp, p_dom_info->cur_WK_VP, 32);
+ }
+ if (p_dom_info->dom_flags & 0x04 /* new wk present */
+ || p_dom_info->dom_flags & 0x08 /* new wk committed */) {
+ info->new_wk_state =
+ p_dom_info->dom_flags & 0x08 ? '2' : '1';
+ memcpy(info->new_wkvp, p_dom_info->new_WK_VP, 32);
+ }
+ }
+ info->op_mode = p_dom_info->op_mode;
+
+out:
+ kfree(p_dom_info);
+ return rc;
+}
+EXPORT_SYMBOL(ep11_get_domain_info);
+
+/*
+ * Default EP11 AES key generate attributes, used when no keygenflags given:
+ * XCP_BLOB_ENCRYPT | XCP_BLOB_DECRYPT | XCP_BLOB_PROTKEY_EXTRACTABLE
+ */
+#define KEY_ATTR_DEFAULTS 0x00200c00
+
+int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
+ u8 *keybuf, size_t *keybufsize)
+{
+ struct keygen_req_pl {
+ struct pl_head head;
+ u8 var_tag;
+ u8 var_len;
+ u32 var;
+ u8 keybytes_tag;
+ u8 keybytes_len;
+ u32 keybytes;
+ u8 mech_tag;
+ u8 mech_len;
+ u32 mech;
+ u8 attr_tag;
+ u8 attr_len;
+ u32 attr_header;
+ u32 attr_bool_mask;
+ u32 attr_bool_bits;
+ u32 attr_val_len_type;
+ u32 attr_val_len_value;
+ u8 pin_tag;
+ u8 pin_len;
+ } __packed * req_pl;
+ struct keygen_rep_pl {
+ struct pl_head head;
+ u8 rc_tag;
+ u8 rc_len;
+ u32 rc;
+ u8 data_tag;
+ u8 data_lenfmt;
+ u16 data_len;
+ u8 data[512];
+ } __packed * rep_pl;
+ struct ep11_cprb *req = NULL, *rep = NULL;
+ struct ep11_target_dev target;
+ struct ep11_urb *urb = NULL;
+ struct ep11keyblob *kb;
+ int api, rc = -ENOMEM;
+
+ switch (keybitsize) {
+ case 128:
+ case 192:
+ case 256:
+ break;
+ default:
+ DEBUG_ERR(
+ "%s unknown/unsupported keybitsize %d\n",
+ __func__, keybitsize);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* request cprb and payload */
+ req = alloc_cprb(sizeof(struct keygen_req_pl));
+ if (!req)
+ goto out;
+ req_pl = (struct keygen_req_pl *) (((u8 *) req) + sizeof(*req));
+ api = (!keygenflags || keygenflags & 0x00200000) ? 4 : 1;
+ prep_head(&req_pl->head, sizeof(*req_pl), api, 21); /* GenerateKey */
+ req_pl->var_tag = 0x04;
+ req_pl->var_len = sizeof(u32);
+ req_pl->keybytes_tag = 0x04;
+ req_pl->keybytes_len = sizeof(u32);
+ req_pl->keybytes = keybitsize / 8;
+ req_pl->mech_tag = 0x04;
+ req_pl->mech_len = sizeof(u32);
+ req_pl->mech = 0x00001080; /* CKM_AES_KEY_GEN */
+ req_pl->attr_tag = 0x04;
+ req_pl->attr_len = 5 * sizeof(u32);
+ req_pl->attr_header = 0x10010000;
+ req_pl->attr_bool_mask = keygenflags ? keygenflags : KEY_ATTR_DEFAULTS;
+ req_pl->attr_bool_bits = keygenflags ? keygenflags : KEY_ATTR_DEFAULTS;
+ req_pl->attr_val_len_type = 0x00000161; /* CKA_VALUE_LEN */
+ req_pl->attr_val_len_value = keybitsize / 8;
+ req_pl->pin_tag = 0x04;
+
+ /* reply cprb and payload */
+ rep = alloc_cprb(sizeof(struct keygen_rep_pl));
+ if (!rep)
+ goto out;
+ rep_pl = (struct keygen_rep_pl *) (((u8 *) rep) + sizeof(*rep));
+
+ /* urb and target */
+ urb = kmalloc(sizeof(struct ep11_urb), GFP_KERNEL);
+ if (!urb)
+ goto out;
+ target.ap_id = card;
+ target.dom_id = domain;
+ prep_urb(urb, &target, 1,
+ req, sizeof(*req) + sizeof(*req_pl),
+ rep, sizeof(*rep) + sizeof(*rep_pl));
+
+ rc = _zcrypt_send_ep11_cprb(urb);
+ if (rc) {
+ DEBUG_ERR(
+ "%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
+ __func__, (int) card, (int) domain, rc);
+ goto out;
+ }
+
+ rc = check_reply_pl((u8 *)rep_pl, __func__);
+ if (rc)
+ goto out;
+ if (rep_pl->data_tag != 0x04 || rep_pl->data_lenfmt != 0x82) {
+ DEBUG_ERR("%s unknown reply data format\n", __func__);
+ rc = -EIO;
+ goto out;
+ }
+ if (rep_pl->data_len > *keybufsize) {
+ DEBUG_ERR("%s mismatch reply data len / key buffer len\n",
+ __func__);
+ rc = -ENOSPC;
+ goto out;
+ }
+
+ /* copy key blob and set header values */
+ memcpy(keybuf, rep_pl->data, rep_pl->data_len);
+ *keybufsize = rep_pl->data_len;
+ kb = (struct ep11keyblob *) keybuf;
+ kb->head.type = TOKTYPE_NON_CCA;
+ kb->head.len = rep_pl->data_len;
+ kb->head.version = TOKVER_EP11_AES;
+ kb->head.keybitlen = keybitsize;
+
+out:
+ kfree(req);
+ kfree(rep);
+ kfree(urb);
+ return rc;
+}
+EXPORT_SYMBOL(ep11_genaeskey);
+
+static int ep11_cryptsingle(u16 card, u16 domain,
+ u16 mode, u32 mech, const u8 *iv,
+ const u8 *key, size_t keysize,
+ const u8 *inbuf, size_t inbufsize,
+ u8 *outbuf, size_t *outbufsize)
+{
+ struct crypt_req_pl {
+ struct pl_head head;
+ u8 var_tag;
+ u8 var_len;
+ u32 var;
+ u8 mech_tag;
+ u8 mech_len;
+ u32 mech;
+ /*
+ * maybe followed by iv data
+ * followed by key tag + key blob
+ * followed by plaintext tag + plaintext
+ */
+ } __packed * req_pl;
+ struct crypt_rep_pl {
+ struct pl_head head;
+ u8 rc_tag;
+ u8 rc_len;
+ u32 rc;
+ u8 data_tag;
+ u8 data_lenfmt;
+ /* data follows */
+ } __packed * rep_pl;
+ struct ep11_cprb *req = NULL, *rep = NULL;
+ struct ep11_target_dev target;
+ struct ep11_urb *urb = NULL;
+ size_t req_pl_size, rep_pl_size;
+ int n, api = 1, rc = -ENOMEM;
+ u8 *p;
+
+ /* the simple asn1 coding used has length limits */
+ if (keysize > 0xFFFF || inbufsize > 0xFFFF)
+ return -EINVAL;
+
+ /* request cprb and payload */
+ req_pl_size = sizeof(struct crypt_req_pl) + (iv ? 16 : 0)
+ + ASN1TAGLEN(keysize) + ASN1TAGLEN(inbufsize);
+ req = alloc_cprb(req_pl_size);
+ if (!req)
+ goto out;
+ req_pl = (struct crypt_req_pl *) (((u8 *) req) + sizeof(*req));
+ prep_head(&req_pl->head, req_pl_size, api, (mode ? 20 : 19));
+ req_pl->var_tag = 0x04;
+ req_pl->var_len = sizeof(u32);
+ /* mech is mech + mech params (iv here) */
+ req_pl->mech_tag = 0x04;
+ req_pl->mech_len = sizeof(u32) + (iv ? 16 : 0);
+ req_pl->mech = (mech ? mech : 0x00001085); /* CKM_AES_CBC_PAD */
+ p = ((u8 *) req_pl) + sizeof(*req_pl);
+ if (iv) {
+ memcpy(p, iv, 16);
+ p += 16;
+ }
+ /* key and input data */
+ p += asn1tag_write(p, 0x04, key, keysize);
+ p += asn1tag_write(p, 0x04, inbuf, inbufsize);
+
+ /* reply cprb and payload, assume out data size <= in data size + 32 */
+ rep_pl_size = sizeof(struct crypt_rep_pl) + ASN1TAGLEN(inbufsize + 32);
+ rep = alloc_cprb(rep_pl_size);
+ if (!rep)
+ goto out;
+ rep_pl = (struct crypt_rep_pl *) (((u8 *) rep) + sizeof(*rep));
+
+ /* urb and target */
+ urb = kmalloc(sizeof(struct ep11_urb), GFP_KERNEL);
+ if (!urb)
+ goto out;
+ target.ap_id = card;
+ target.dom_id = domain;
+ prep_urb(urb, &target, 1,
+ req, sizeof(*req) + req_pl_size,
+ rep, sizeof(*rep) + rep_pl_size);
+
+ rc = _zcrypt_send_ep11_cprb(urb);
+ if (rc) {
+ DEBUG_ERR(
+ "%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
+ __func__, (int) card, (int) domain, rc);
+ goto out;
+ }
+
+ rc = check_reply_pl((u8 *)rep_pl, __func__);
+ if (rc)
+ goto out;
+ if (rep_pl->data_tag != 0x04) {
+ DEBUG_ERR("%s unknown reply data format\n", __func__);
+ rc = -EIO;
+ goto out;
+ }
+ p = ((u8 *) rep_pl) + sizeof(*rep_pl);
+ if (rep_pl->data_lenfmt <= 127)
+ n = rep_pl->data_lenfmt;
+ else if (rep_pl->data_lenfmt == 0x81)
+ n = *p++;
+ else if (rep_pl->data_lenfmt == 0x82) {
+ n = *((u16 *) p);
+ p += 2;
+ } else {
+ DEBUG_ERR("%s unknown reply data length format 0x%02hhx\n",
+ __func__, rep_pl->data_lenfmt);
+ rc = -EIO;
+ goto out;
+ }
+ if (n > *outbufsize) {
+ DEBUG_ERR("%s mismatch reply data len %d / output buffer %zu\n",
+ __func__, n, *outbufsize);
+ rc = -ENOSPC;
+ goto out;
+ }
+
+ memcpy(outbuf, p, n);
+ *outbufsize = n;
+
+out:
+ kfree(req);
+ kfree(rep);
+ kfree(urb);
+ return rc;
+}
+
+static int ep11_unwrapkey(u16 card, u16 domain,
+ const u8 *kek, size_t keksize,
+ const u8 *enckey, size_t enckeysize,
+ u32 mech, const u8 *iv,
+ u32 keybitsize, u32 keygenflags,
+ u8 *keybuf, size_t *keybufsize)
+{
+ struct uw_req_pl {
+ struct pl_head head;
+ u8 attr_tag;
+ u8 attr_len;
+ u32 attr_header;
+ u32 attr_bool_mask;
+ u32 attr_bool_bits;
+ u32 attr_key_type;
+ u32 attr_key_type_value;
+ u32 attr_val_len;
+ u32 attr_val_len_value;
+ u8 mech_tag;
+ u8 mech_len;
+ u32 mech;
+ /*
+ * maybe followed by iv data
+ * followed by kek tag + kek blob
+ * followed by empty mac tag
+ * followed by empty pin tag
+ * followed by encryted key tag + bytes
+ */
+ } __packed * req_pl;
+ struct uw_rep_pl {
+ struct pl_head head;
+ u8 rc_tag;
+ u8 rc_len;
+ u32 rc;
+ u8 data_tag;
+ u8 data_lenfmt;
+ u16 data_len;
+ u8 data[512];
+ } __packed * rep_pl;
+ struct ep11_cprb *req = NULL, *rep = NULL;
+ struct ep11_target_dev target;
+ struct ep11_urb *urb = NULL;
+ struct ep11keyblob *kb;
+ size_t req_pl_size;
+ int api, rc = -ENOMEM;
+ u8 *p;
+
+ /* request cprb and payload */
+ req_pl_size = sizeof(struct uw_req_pl) + (iv ? 16 : 0)
+ + ASN1TAGLEN(keksize) + 4 + ASN1TAGLEN(enckeysize);
+ req = alloc_cprb(req_pl_size);
+ if (!req)
+ goto out;
+ req_pl = (struct uw_req_pl *) (((u8 *) req) + sizeof(*req));
+ api = (!keygenflags || keygenflags & 0x00200000) ? 4 : 1;
+ prep_head(&req_pl->head, req_pl_size, api, 34); /* UnwrapKey */
+ req_pl->attr_tag = 0x04;
+ req_pl->attr_len = 7 * sizeof(u32);
+ req_pl->attr_header = 0x10020000;
+ req_pl->attr_bool_mask = keygenflags ? keygenflags : KEY_ATTR_DEFAULTS;
+ req_pl->attr_bool_bits = keygenflags ? keygenflags : KEY_ATTR_DEFAULTS;
+ req_pl->attr_key_type = 0x00000100; /* CKA_KEY_TYPE */
+ req_pl->attr_key_type_value = 0x0000001f; /* CKK_AES */
+ req_pl->attr_val_len = 0x00000161; /* CKA_VALUE_LEN */
+ req_pl->attr_val_len_value = keybitsize / 8;
+ /* mech is mech + mech params (iv here) */
+ req_pl->mech_tag = 0x04;
+ req_pl->mech_len = sizeof(u32) + (iv ? 16 : 0);
+ req_pl->mech = (mech ? mech : 0x00001085); /* CKM_AES_CBC_PAD */
+ p = ((u8 *) req_pl) + sizeof(*req_pl);
+ if (iv) {
+ memcpy(p, iv, 16);
+ p += 16;
+ }
+ /* kek */
+ p += asn1tag_write(p, 0x04, kek, keksize);
+ /* empty mac key tag */
+ *p++ = 0x04;
+ *p++ = 0;
+ /* empty pin tag */
+ *p++ = 0x04;
+ *p++ = 0;
+ /* encrytped key value tag and bytes */
+ p += asn1tag_write(p, 0x04, enckey, enckeysize);
+
+ /* reply cprb and payload */
+ rep = alloc_cprb(sizeof(struct uw_rep_pl));
+ if (!rep)
+ goto out;
+ rep_pl = (struct uw_rep_pl *) (((u8 *) rep) + sizeof(*rep));
+
+ /* urb and target */
+ urb = kmalloc(sizeof(struct ep11_urb), GFP_KERNEL);
+ if (!urb)
+ goto out;
+ target.ap_id = card;
+ target.dom_id = domain;
+ prep_urb(urb, &target, 1,
+ req, sizeof(*req) + req_pl_size,
+ rep, sizeof(*rep) + sizeof(*rep_pl));
+
+ rc = _zcrypt_send_ep11_cprb(urb);
+ if (rc) {
+ DEBUG_ERR(
+ "%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
+ __func__, (int) card, (int) domain, rc);
+ goto out;
+ }
+
+ rc = check_reply_pl((u8 *)rep_pl, __func__);
+ if (rc)
+ goto out;
+ if (rep_pl->data_tag != 0x04 || rep_pl->data_lenfmt != 0x82) {
+ DEBUG_ERR("%s unknown reply data format\n", __func__);
+ rc = -EIO;
+ goto out;
+ }
+ if (rep_pl->data_len > *keybufsize) {
+ DEBUG_ERR("%s mismatch reply data len / key buffer len\n",
+ __func__);
+ rc = -ENOSPC;
+ goto out;
+ }
+
+ /* copy key blob and set header values */
+ memcpy(keybuf, rep_pl->data, rep_pl->data_len);
+ *keybufsize = rep_pl->data_len;
+ kb = (struct ep11keyblob *) keybuf;
+ kb->head.type = TOKTYPE_NON_CCA;
+ kb->head.len = rep_pl->data_len;
+ kb->head.version = TOKVER_EP11_AES;
+ kb->head.keybitlen = keybitsize;
+
+out:
+ kfree(req);
+ kfree(rep);
+ kfree(urb);
+ return rc;
+}
+
+static int ep11_wrapkey(u16 card, u16 domain,
+ const u8 *key, size_t keysize,
+ u32 mech, const u8 *iv,
+ u8 *databuf, size_t *datasize)
+{
+ struct wk_req_pl {
+ struct pl_head head;
+ u8 var_tag;
+ u8 var_len;
+ u32 var;
+ u8 mech_tag;
+ u8 mech_len;
+ u32 mech;
+ /*
+ * followed by iv data
+ * followed by key tag + key blob
+ * followed by dummy kek param
+ * followed by dummy mac param
+ */
+ } __packed * req_pl;
+ struct wk_rep_pl {
+ struct pl_head head;
+ u8 rc_tag;
+ u8 rc_len;
+ u32 rc;
+ u8 data_tag;
+ u8 data_lenfmt;
+ u16 data_len;
+ u8 data[512];
+ } __packed * rep_pl;
+ struct ep11_cprb *req = NULL, *rep = NULL;
+ struct ep11_target_dev target;
+ struct ep11_urb *urb = NULL;
+ struct ep11keyblob *kb;
+ size_t req_pl_size;
+ int api, rc = -ENOMEM;
+ u8 *p;
+
+ /* request cprb and payload */
+ req_pl_size = sizeof(struct wk_req_pl) + (iv ? 16 : 0)
+ + ASN1TAGLEN(keysize) + 4;
+ req = alloc_cprb(req_pl_size);
+ if (!req)
+ goto out;
+ if (!mech || mech == 0x80060001)
+ req->flags |= 0x20; /* CPACF_WRAP needs special bit */
+ req_pl = (struct wk_req_pl *) (((u8 *) req) + sizeof(*req));
+ api = (!mech || mech == 0x80060001) ? 4 : 1; /* CKM_IBM_CPACF_WRAP */
+ prep_head(&req_pl->head, req_pl_size, api, 33); /* WrapKey */
+ req_pl->var_tag = 0x04;
+ req_pl->var_len = sizeof(u32);
+ /* mech is mech + mech params (iv here) */
+ req_pl->mech_tag = 0x04;
+ req_pl->mech_len = sizeof(u32) + (iv ? 16 : 0);
+ req_pl->mech = (mech ? mech : 0x80060001); /* CKM_IBM_CPACF_WRAP */
+ p = ((u8 *) req_pl) + sizeof(*req_pl);
+ if (iv) {
+ memcpy(p, iv, 16);
+ p += 16;
+ }
+ /* key blob */
+ p += asn1tag_write(p, 0x04, key, keysize);
+ /* maybe the key argument needs the head data cleaned out */
+ kb = (struct ep11keyblob *)(p - keysize);
+ if (kb->head.version == TOKVER_EP11_AES)
+ memset(&kb->head, 0, sizeof(kb->head));
+ /* empty kek tag */
+ *p++ = 0x04;
+ *p++ = 0;
+ /* empty mac tag */
+ *p++ = 0x04;
+ *p++ = 0;
+
+ /* reply cprb and payload */
+ rep = alloc_cprb(sizeof(struct wk_rep_pl));
+ if (!rep)
+ goto out;
+ rep_pl = (struct wk_rep_pl *) (((u8 *) rep) + sizeof(*rep));
+
+ /* urb and target */
+ urb = kmalloc(sizeof(struct ep11_urb), GFP_KERNEL);
+ if (!urb)
+ goto out;
+ target.ap_id = card;
+ target.dom_id = domain;
+ prep_urb(urb, &target, 1,
+ req, sizeof(*req) + req_pl_size,
+ rep, sizeof(*rep) + sizeof(*rep_pl));
+
+ rc = _zcrypt_send_ep11_cprb(urb);
+ if (rc) {
+ DEBUG_ERR(
+ "%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
+ __func__, (int) card, (int) domain, rc);
+ goto out;
+ }
+
+ rc = check_reply_pl((u8 *)rep_pl, __func__);
+ if (rc)
+ goto out;
+ if (rep_pl->data_tag != 0x04 || rep_pl->data_lenfmt != 0x82) {
+ DEBUG_ERR("%s unknown reply data format\n", __func__);
+ rc = -EIO;
+ goto out;
+ }
+ if (rep_pl->data_len > *datasize) {
+ DEBUG_ERR("%s mismatch reply data len / data buffer len\n",
+ __func__);
+ rc = -ENOSPC;
+ goto out;
+ }
+
+ /* copy the data from the cprb to the data buffer */
+ memcpy(databuf, rep_pl->data, rep_pl->data_len);
+ *datasize = rep_pl->data_len;
+
+out:
+ kfree(req);
+ kfree(rep);
+ kfree(urb);
+ return rc;
+}
+
+int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
+ const u8 *clrkey, u8 *keybuf, size_t *keybufsize)
+{
+ int rc;
+ struct ep11keyblob *kb;
+ u8 encbuf[64], *kek = NULL;
+ size_t clrkeylen, keklen, encbuflen = sizeof(encbuf);
+
+ if (keybitsize == 128 || keybitsize == 192 || keybitsize == 256)
+ clrkeylen = keybitsize / 8;
+ else {
+ DEBUG_ERR(
+ "%s unknown/unsupported keybitsize %d\n",
+ __func__, keybitsize);
+ return -EINVAL;
+ }
+
+ /* allocate memory for the temp kek */
+ keklen = MAXEP11AESKEYBLOBSIZE;
+ kek = kmalloc(keklen, GFP_ATOMIC);
+ if (!kek) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /* Step 1: generate AES 256 bit random kek key */
+ rc = ep11_genaeskey(card, domain, 256,
+ 0x00006c00, /* EN/DECRYTP, WRAP/UNWRAP */
+ kek, &keklen);
+ if (rc) {
+ DEBUG_ERR(
+ "%s generate kek key failed, rc=%d\n",
+ __func__, rc);
+ goto out;
+ }
+ kb = (struct ep11keyblob *) kek;
+ memset(&kb->head, 0, sizeof(kb->head));
+
+ /* Step 2: encrypt clear key value with the kek key */
+ rc = ep11_cryptsingle(card, domain, 0, 0, def_iv, kek, keklen,
+ clrkey, clrkeylen, encbuf, &encbuflen);
+ if (rc) {
+ DEBUG_ERR(
+ "%s encrypting key value with kek key failed, rc=%d\n",
+ __func__, rc);
+ goto out;
+ }
+
+ /* Step 3: import the encrypted key value as a new key */
+ rc = ep11_unwrapkey(card, domain, kek, keklen,
+ encbuf, encbuflen, 0, def_iv,
+ keybitsize, 0, keybuf, keybufsize);
+ if (rc) {
+ DEBUG_ERR(
+ "%s importing key value as new key failed,, rc=%d\n",
+ __func__, rc);
+ goto out;
+ }
+
+out:
+ kfree(kek);
+ return rc;
+}
+EXPORT_SYMBOL(ep11_clr2keyblob);
+
+int ep11_key2protkey(u16 card, u16 dom, const u8 *key, size_t keylen,
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype)
+{
+ int rc = -EIO;
+ u8 *wkbuf = NULL;
+ size_t wkbuflen = 256;
+ struct wk_info {
+ u16 version;
+ u8 res1[16];
+ u32 pkeytype;
+ u32 pkeybitsize;
+ u64 pkeysize;
+ u8 res2[8];
+ u8 pkey[0];
+ } __packed * wki;
+
+ /* alloc temp working buffer */
+ wkbuf = kmalloc(wkbuflen, GFP_ATOMIC);
+ if (!wkbuf)
+ return -ENOMEM;
+
+ /* ep11 secure key -> protected key + info */
+ rc = ep11_wrapkey(card, dom, key, keylen,
+ 0, def_iv, wkbuf, &wkbuflen);
+ if (rc) {
+ DEBUG_ERR(
+ "%s rewrapping ep11 key to pkey failed, rc=%d\n",
+ __func__, rc);
+ goto out;
+ }
+ wki = (struct wk_info *) wkbuf;
+
+ /* check struct version and pkey type */
+ if (wki->version != 1 || wki->pkeytype != 1) {
+ DEBUG_ERR("%s wk info version %d or pkeytype %d mismatch.\n",
+ __func__, (int) wki->version, (int) wki->pkeytype);
+ rc = -EIO;
+ goto out;
+ }
+
+ /* copy the tanslated protected key */
+ switch (wki->pkeysize) {
+ case 16+32:
+ /* AES 128 protected key */
+ if (protkeytype)
+ *protkeytype = PKEY_KEYTYPE_AES_128;
+ break;
+ case 24+32:
+ /* AES 192 protected key */
+ if (protkeytype)
+ *protkeytype = PKEY_KEYTYPE_AES_192;
+ break;
+ case 32+32:
+ /* AES 256 protected key */
+ if (protkeytype)
+ *protkeytype = PKEY_KEYTYPE_AES_256;
+ break;
+ default:
+ DEBUG_ERR("%s unknown/unsupported pkeysize %d\n",
+ __func__, (int) wki->pkeysize);
+ rc = -EIO;
+ goto out;
+ }
+ memcpy(protkey, wki->pkey, wki->pkeysize);
+ if (protkeylen)
+ *protkeylen = (u32) wki->pkeysize;
+ rc = 0;
+
+out:
+ kfree(wkbuf);
+ return rc;
+}
+EXPORT_SYMBOL(ep11_key2protkey);
+
+int ep11_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
+ int minhwtype, int minapi, const u8 *wkvp)
+{
+ struct zcrypt_device_status_ext *device_status;
+ u32 *_apqns = NULL, _nr_apqns = 0;
+ int i, card, dom, rc = -ENOMEM;
+ struct ep11_domain_info edi;
+ struct ep11_card_info eci;
+
+ /* fetch status of all crypto cards */
+ device_status = kmalloc_array(MAX_ZDEV_ENTRIES_EXT,
+ sizeof(struct zcrypt_device_status_ext),
+ GFP_KERNEL);
+ if (!device_status)
+ return -ENOMEM;
+ zcrypt_device_status_mask_ext(device_status);
+
+ /* allocate 1k space for up to 256 apqns */
+ _apqns = kmalloc_array(256, sizeof(u32), GFP_KERNEL);
+ if (!_apqns) {
+ kfree(device_status);
+ return -ENOMEM;
+ }
+
+ /* walk through all the crypto apqnss */
+ for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) {
+ card = AP_QID_CARD(device_status[i].qid);
+ dom = AP_QID_QUEUE(device_status[i].qid);
+ /* check online state */
+ if (!device_status[i].online)
+ continue;
+ /* check for ep11 functions */
+ if (!(device_status[i].functions & 0x01))
+ continue;
+ /* check cardnr */
+ if (cardnr != 0xFFFF && card != cardnr)
+ continue;
+ /* check domain */
+ if (domain != 0xFFFF && dom != domain)
+ continue;
+ /* check min hardware type */
+ if (minhwtype && device_status[i].hwtype < minhwtype)
+ continue;
+ /* check min api version if given */
+ if (minapi > 0) {
+ if (ep11_get_card_info(card, &eci, 0))
+ continue;
+ if (minapi > eci.API_ord_nr)
+ continue;
+ }
+ /* check wkvp if given */
+ if (wkvp) {
+ if (ep11_get_domain_info(card, dom, &edi))
+ continue;
+ if (edi.cur_wk_state != '1')
+ continue;
+ if (memcmp(wkvp, edi.cur_wkvp, 16))
+ continue;
+ }
+ /* apqn passed all filtering criterons, add to the array */
+ if (_nr_apqns < 256)
+ _apqns[_nr_apqns++] = (((u16)card) << 16) | ((u16) dom);
+ }
+
+ /* nothing found ? */
+ if (!_nr_apqns) {
+ kfree(_apqns);
+ rc = -ENODEV;
+ } else {
+ /* no re-allocation, simple return the _apqns array */
+ *apqns = _apqns;
+ *nr_apqns = _nr_apqns;
+ rc = 0;
+ }
+
+ kfree(device_status);
+ return rc;
+}
+EXPORT_SYMBOL(ep11_findcard2);
+
+void __exit zcrypt_ep11misc_exit(void)
+{
+ card_cache_free();
+}
diff --git a/drivers/s390/crypto/zcrypt_ep11misc.h b/drivers/s390/crypto/zcrypt_ep11misc.h
new file mode 100644
index 000000000000..e3ed5ed1de86
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_ep11misc.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright IBM Corp. 2019
+ * Author(s): Harald Freudenberger <freude@linux.ibm.com>
+ *
+ * Collection of EP11 misc functions used by zcrypt and pkey
+ */
+
+#ifndef _ZCRYPT_EP11MISC_H_
+#define _ZCRYPT_EP11MISC_H_
+
+#include <asm/zcrypt.h>
+#include <asm/pkey.h>
+
+#define TOKVER_EP11_AES 0x03 /* EP11 AES key blob */
+
+#define EP11_API_V 4 /* highest known and supported EP11 API version */
+
+#define EP11_STRUCT_MAGIC 0x1234
+#define EP11_BLOB_PKEY_EXTRACTABLE 0x200000
+
+/* inside view of an EP11 secure key blob */
+struct ep11keyblob {
+ union {
+ u8 session[32];
+ struct {
+ u8 type; /* 0x00 (TOKTYPE_NON_CCA) */
+ u8 res0; /* unused */
+ u16 len; /* total length in bytes of this blob */
+ u8 version; /* 0x06 (TOKVER_EP11_AES) */
+ u8 res1; /* unused */
+ u16 keybitlen; /* clear key bit len, 0 for unknown */
+ } head;
+ };
+ u8 wkvp[16]; /* wrapping key verification pattern */
+ u64 attr; /* boolean key attributes */
+ u64 mode; /* mode bits */
+ u16 version; /* 0x1234, EP11_STRUCT_MAGIC */
+ u8 iv[14];
+ u8 encrypted_key_data[144];
+ u8 mac[32];
+} __packed;
+
+/*
+ * Simple check if the key blob is a valid EP11 secure AES key.
+ * If keybitsize is given, the bitsize of the key is also checked.
+ * If checkcpacfexport is enabled, the key is also checked for the
+ * attributes needed to export this key for CPACF use.
+ * Returns 0 on success or errno value on failure.
+ */
+int ep11_check_aeskeyblob(debug_info_t *dbg, int dbflvl,
+ const u8 *key, int keybitsize,
+ int checkcpacfexport);
+
+/* EP11 card info struct */
+struct ep11_card_info {
+ u32 API_ord_nr; /* API ordinal number */
+ u16 FW_version; /* Firmware major and minor version */
+ char serial[16]; /* serial number string (16 ascii, no 0x00 !) */
+ u64 op_mode; /* card operational mode(s) */
+};
+
+/* EP11 domain info struct */
+struct ep11_domain_info {
+ char cur_wk_state; /* '0' invalid, '1' valid */
+ char new_wk_state; /* '0' empty, '1' uncommitted, '2' committed */
+ u8 cur_wkvp[32]; /* current wrapping key verification pattern */
+ u8 new_wkvp[32]; /* new wrapping key verification pattern */
+ u64 op_mode; /* domain operational mode(s) */
+};
+
+/*
+ * Provide information about an EP11 card.
+ */
+int ep11_get_card_info(u16 card, struct ep11_card_info *info, int verify);
+
+/*
+ * Provide information about a domain within an EP11 card.
+ */
+int ep11_get_domain_info(u16 card, u16 domain, struct ep11_domain_info *info);
+
+/*
+ * Generate (random) EP11 AES secure key.
+ */
+int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
+ u8 *keybuf, size_t *keybufsize);
+
+/*
+ * Generate EP11 AES secure key with given clear key value.
+ */
+int ep11_clr2keyblob(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
+ const u8 *clrkey, u8 *keybuf, size_t *keybufsize);
+
+/*
+ * Derive proteced key from EP11 AES secure key blob.
+ */
+int ep11_key2protkey(u16 cardnr, u16 domain, const u8 *key, size_t keylen,
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype);
+
+/*
+ * Build a list of ep11 apqns meeting the following constrains:
+ * - apqn is online and is in fact an EP11 apqn
+ * - if cardnr is not FFFF only apqns with this cardnr
+ * - if domain is not FFFF only apqns with this domainnr
+ * - if minhwtype > 0 only apqns with hwtype >= minhwtype
+ * - if minapi > 0 only apqns with API_ord_nr >= minapi
+ * - if wkvp != NULL only apqns where the wkvp (EP11_WKVPLEN bytes) matches
+ * to the first EP11_WKVPLEN bytes of the wkvp of the current wrapping
+ * key for this domain. When a wkvp is given there will aways be a re-fetch
+ * of the domain info for the potential apqn - so this triggers an request
+ * reply to each apqn eligible.
+ * The array of apqn entries is allocated with kmalloc and returned in *apqns;
+ * the number of apqns stored into the list is returned in *nr_apqns. One apqn
+ * entry is simple a 32 bit value with 16 bit cardnr and 16 bit domain nr and
+ * may be casted to struct pkey_apqn. The return value is either 0 for success
+ * or a negative errno value. If no apqn meeting the criterias is found,
+ * -ENODEV is returned.
+ */
+int ep11_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
+ int minhwtype, int minapi, const u8 *wkvp);
+
+void zcrypt_ep11misc_exit(void);
+
+#endif /* _ZCRYPT_EP11MISC_H_ */
diff --git a/drivers/s390/crypto/zcrypt_error.h b/drivers/s390/crypto/zcrypt_error.h
index f34ee41cbed8..4f4dd9d727c9 100644
--- a/drivers/s390/crypto/zcrypt_error.h
+++ b/drivers/s390/crypto/zcrypt_error.h
@@ -61,6 +61,7 @@ struct error_hdr {
#define REP82_ERROR_EVEN_MOD_IN_OPND 0x85
#define REP82_ERROR_RESERVED_FIELD 0x88
#define REP82_ERROR_INVALID_DOMAIN_PENDING 0x8A
+#define REP82_ERROR_FILTERED_BY_HYPERVISOR 0x8B
#define REP82_ERROR_TRANSPORT_FAIL 0x90
#define REP82_ERROR_PACKET_TRUNCATED 0xA0
#define REP82_ERROR_ZERO_BUFFER_LEN 0xB0
@@ -91,6 +92,7 @@ static inline int convert_error(struct zcrypt_queue *zq,
case REP82_ERROR_INVALID_DOMAIN_PRECHECK:
case REP82_ERROR_INVALID_DOMAIN_PENDING:
case REP82_ERROR_INVALID_SPECIAL_CMD:
+ case REP82_ERROR_FILTERED_BY_HYPERVISOR:
// REP88_ERROR_INVALID_KEY // '82' CEX2A
// REP88_ERROR_OPERAND // '84' CEX2A
// REP88_ERROR_OPERAND_EVEN_MOD // '85' CEX2A
OpenPOWER on IntegriCloud