diff options
Diffstat (limited to 'drivers/s390')
74 files changed, 7653 insertions, 3162 deletions
diff --git a/drivers/s390/Makefile b/drivers/s390/Makefile index a863b0462b43..cde73b6a9afb 100644 --- a/drivers/s390/Makefile +++ b/drivers/s390/Makefile @@ -4,6 +4,3 @@ # obj-y += cio/ block/ char/ crypto/ net/ scsi/ virtio/ - -drivers-y += drivers/s390/built-in.a - diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index fc53e1e221f0..a28b9ff82378 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -1128,7 +1128,8 @@ static u32 get_fcx_max_data(struct dasd_device *device) { struct dasd_eckd_private *private = device->private; int fcx_in_css, fcx_in_gneq, fcx_in_features; - int tpm, mdc; + unsigned int mdc; + int tpm; if (dasd_nofcx) return 0; @@ -1142,7 +1143,7 @@ static u32 get_fcx_max_data(struct dasd_device *device) return 0; mdc = ccw_device_get_mdc(device->cdev, 0); - if (mdc < 0) { + if (mdc == 0) { dev_warn(&device->cdev->dev, "Detecting the maximum supported data size for zHPF requests failed\n"); return 0; } else { @@ -1153,12 +1154,12 @@ static u32 get_fcx_max_data(struct dasd_device *device) static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm) { struct dasd_eckd_private *private = device->private; - int mdc; + unsigned int mdc; u32 fcx_max_data; if (private->fcx_max_data) { mdc = ccw_device_get_mdc(device->cdev, lpm); - if ((mdc < 0)) { + if (mdc == 0) { dev_warn(&device->cdev->dev, "Detecting the maximum data size for zHPF " "requests failed (rc=%d) for a new path %x\n", @@ -1553,8 +1554,8 @@ static int dasd_eckd_read_vol_info(struct dasd_device *device) if (rc == 0) { memcpy(&private->vsq, vsq, sizeof(*vsq)); } else { - dev_warn(&device->cdev->dev, - "Reading the volume storage information failed with rc=%d\n", rc); + DBF_EVENT_DEVID(DBF_WARNING, device->cdev, + "Reading the volume storage information failed with rc=%d", rc); } if (useglobal) @@ -1737,8 +1738,8 @@ static int dasd_eckd_read_ext_pool_info(struct dasd_device *device) if (rc == 0) { dasd_eckd_cpy_ext_pool_data(device, lcq); } else { - dev_warn(&device->cdev->dev, - "Reading the logical configuration failed with rc=%d\n", rc); + DBF_EVENT_DEVID(DBF_WARNING, device->cdev, + "Reading the logical configuration failed with rc=%d", rc); } dasd_sfree_request(cqr, cqr->memdev); @@ -2020,14 +2021,10 @@ dasd_eckd_check_characteristics(struct dasd_device *device) dasd_eckd_read_features(device); /* Read Volume Information */ - rc = dasd_eckd_read_vol_info(device); - if (rc) - goto out_err3; + dasd_eckd_read_vol_info(device); /* Read Extent Pool Information */ - rc = dasd_eckd_read_ext_pool_info(device); - if (rc) - goto out_err3; + dasd_eckd_read_ext_pool_info(device); /* Read Device Characteristics */ rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC, @@ -2059,9 +2056,6 @@ dasd_eckd_check_characteristics(struct dasd_device *device) if (readonly) set_bit(DASD_FLAG_DEVICE_RO, &device->flags); - if (dasd_eckd_is_ese(device)) - dasd_set_feature(device->cdev, DASD_FEATURE_DISCARD, 1); - dev_info(&device->cdev->dev, "New DASD %04X/%02X (CU %04X/%02X) " "with %d cylinders, %d heads, %d sectors%s\n", private->rdc_data.dev_type, @@ -2080,7 +2074,7 @@ out_err2: dasd_free_block(device->block); device->block = NULL; out_err1: - kfree(private->conf_data); + dasd_eckd_clear_conf_data(device); kfree(device->private); device->private = NULL; return rc; @@ -2089,7 +2083,6 @@ out_err1: static void dasd_eckd_uncheck_device(struct dasd_device *device) { struct dasd_eckd_private *private = device->private; - int i; if (!private) return; @@ -2099,21 +2092,7 @@ static void dasd_eckd_uncheck_device(struct dasd_device *device) private->sneq = NULL; private->vdsneq = NULL; private->gneq = NULL; - private->conf_len = 0; - for (i = 0; i < 8; i++) { - kfree(device->path[i].conf_data); - if ((__u8 *)device->path[i].conf_data == - private->conf_data) { - private->conf_data = NULL; - private->conf_len = 0; - } - device->path[i].conf_data = NULL; - device->path[i].cssid = 0; - device->path[i].ssid = 0; - device->path[i].chpid = 0; - } - kfree(private->conf_data); - private->conf_data = NULL; + dasd_eckd_clear_conf_data(device); } static struct dasd_ccw_req * @@ -3695,14 +3674,6 @@ static int dasd_eckd_release_space(struct dasd_device *device, return -EINVAL; } -static struct dasd_ccw_req * -dasd_eckd_build_cp_discard(struct dasd_device *device, struct dasd_block *block, - struct request *req, sector_t first_trk, - sector_t last_trk) -{ - return dasd_eckd_dso_ras(device, block, req, first_trk, last_trk, 1); -} - static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single( struct dasd_device *startdev, struct dasd_block *block, @@ -4447,10 +4418,6 @@ static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev, cmdwtd = private->features.feature[12] & 0x40; use_prefix = private->features.feature[8] & 0x01; - if (req_op(req) == REQ_OP_DISCARD) - return dasd_eckd_build_cp_discard(startdev, block, req, - first_trk, last_trk); - cqr = NULL; if (cdlspecial || dasd_page_cache) { /* do nothing, just fall through to the cmd mode single case */ @@ -4729,14 +4696,12 @@ static struct dasd_ccw_req *dasd_eckd_build_alias_cp(struct dasd_device *base, struct dasd_block *block, struct request *req) { - struct dasd_device *startdev = NULL; struct dasd_eckd_private *private; - struct dasd_ccw_req *cqr; + struct dasd_device *startdev; unsigned long flags; + struct dasd_ccw_req *cqr; - /* Discard requests can only be processed on base devices */ - if (req_op(req) != REQ_OP_DISCARD) - startdev = dasd_alias_get_start_dev(base); + startdev = dasd_alias_get_start_dev(base); if (!startdev) startdev = base; private = startdev->private; @@ -5663,14 +5628,10 @@ static int dasd_eckd_restore_device(struct dasd_device *device) dasd_eckd_read_features(device); /* Read Volume Information */ - rc = dasd_eckd_read_vol_info(device); - if (rc) - goto out_err2; + dasd_eckd_read_vol_info(device); /* Read Extent Pool Information */ - rc = dasd_eckd_read_ext_pool_info(device); - if (rc) - goto out_err2; + dasd_eckd_read_ext_pool_info(device); /* Read Device Characteristics */ rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC, @@ -6521,20 +6482,8 @@ static void dasd_eckd_setup_blk_queue(struct dasd_block *block) unsigned int logical_block_size = block->bp_block; struct request_queue *q = block->request_queue; struct dasd_device *device = block->base; - struct dasd_eckd_private *private; - unsigned int max_discard_sectors; - unsigned int max_bytes; - unsigned int ext_bytes; /* Extent Size in Bytes */ - int recs_per_trk; - int trks_per_cyl; - int ext_limit; - int ext_size; /* Extent Size in Cylinders */ int max; - private = device->private; - trks_per_cyl = private->rdc_data.trk_per_cyl; - recs_per_trk = recs_per_track(&private->rdc_data, 0, logical_block_size); - if (device->features & DASD_FEATURE_USERAW) { /* * the max_blocks value for raw_track access is 256 @@ -6555,28 +6504,6 @@ static void dasd_eckd_setup_blk_queue(struct dasd_block *block) /* With page sized segments each segment can be translated into one idaw/tidaw */ blk_queue_max_segment_size(q, PAGE_SIZE); blk_queue_segment_boundary(q, PAGE_SIZE - 1); - - if (dasd_eckd_is_ese(device)) { - /* - * Depending on the extent size, up to UINT_MAX bytes can be - * accepted. However, neither DASD_ECKD_RAS_EXTS_MAX nor the - * device limits should be exceeded. - */ - ext_size = dasd_eckd_ext_size(device); - ext_limit = min(private->real_cyl / ext_size, DASD_ECKD_RAS_EXTS_MAX); - ext_bytes = ext_size * trks_per_cyl * recs_per_trk * - logical_block_size; - max_bytes = UINT_MAX - (UINT_MAX % ext_bytes); - if (max_bytes / ext_bytes > ext_limit) - max_bytes = ext_bytes * ext_limit; - - max_discard_sectors = max_bytes / 512; - - blk_queue_max_discard_sectors(q, max_discard_sectors); - blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); - q->limits.discard_granularity = ext_bytes; - q->limits.discard_alignment = ext_bytes; - } } static struct ccw_driver dasd_eckd_driver = { diff --git a/drivers/s390/block/dasd_fba.h b/drivers/s390/block/dasd_fba.h index 8f75df06e893..45ddabec4017 100644 --- a/drivers/s390/block/dasd_fba.h +++ b/drivers/s390/block/dasd_fba.h @@ -2,7 +2,7 @@ /* * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> * Bugreports.to..: <Linux390@de.ibm.com> - * Coypright IBM Corp. 1999, 2000 + * Copyright IBM Corp. 1999, 2000 * */ diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c index 5542d9eadfe0..7d079154f849 100644 --- a/drivers/s390/block/dasd_genhd.c +++ b/drivers/s390/block/dasd_genhd.c @@ -116,7 +116,9 @@ int dasd_scan_partitions(struct dasd_block *block) return -ENODEV; } - rc = blkdev_reread_part(bdev); + mutex_lock(&bdev->bd_mutex); + rc = bdev_disk_changed(bdev, false); + mutex_unlock(&bdev->bd_mutex); if (rc) DBF_DEV_EVENT(DBF_ERR, block->base, "scan partitions error, rc %d", rc); diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c index 1770b99f607e..62a859ea67f8 100644 --- a/drivers/s390/block/dasd_proc.c +++ b/drivers/s390/block/dasd_proc.c @@ -5,7 +5,7 @@ * Carsten Otte <Cotte@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com> * Bugreports.to..: <Linux390@de.ibm.com> - * Coypright IBM Corp. 1999, 2002 + * Copyright IBM Corp. 1999, 2002 * * /proc interface for the dasd driver. * @@ -320,13 +320,12 @@ out_error: #endif /* CONFIG_DASD_PROFILE */ } -static const struct file_operations dasd_stats_proc_fops = { - .owner = THIS_MODULE, - .open = dasd_stats_proc_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, - .write = dasd_stats_proc_write, +static const struct proc_ops dasd_stats_proc_ops = { + .proc_open = dasd_stats_proc_open, + .proc_read = seq_read, + .proc_lseek = seq_lseek, + .proc_release = single_release, + .proc_write = dasd_stats_proc_write, }; /* @@ -347,7 +346,7 @@ dasd_proc_init(void) dasd_statistics_entry = proc_create("statistics", S_IFREG | S_IRUGO | S_IWUSR, dasd_proc_root_entry, - &dasd_stats_proc_fops); + &dasd_stats_proc_ops); if (!dasd_statistics_entry) goto out_nostatistics; return 0; diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile index b8a8816d94e7..845e12ac5954 100644 --- a/drivers/s390/char/Makefile +++ b/drivers/s390/char/Makefile @@ -49,6 +49,3 @@ obj-$(CONFIG_CRASH_DUMP) += sclp_sdias.o zcore.o hmcdrv-objs := hmcdrv_mod.o hmcdrv_dev.o hmcdrv_ftp.o hmcdrv_cache.o diag_ftp.o sclp_ftp.o obj-$(CONFIG_HMC_DRV) += hmcdrv.o - -chkbss := sclp_early_core.o -include $(srctree)/arch/s390/scripts/Makefile.chkbss diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c index e71992a3c55f..cc5e84b80c69 100644 --- a/drivers/s390/char/sclp_early.c +++ b/drivers/s390/char/sclp_early.c @@ -40,7 +40,7 @@ static void __init sclp_early_facilities_detect(struct read_info_sccb *sccb) sclp.has_gisaf = !!(sccb->fac118 & 0x08); sclp.has_hvs = !!(sccb->fac119 & 0x80); sclp.has_kss = !!(sccb->fac98 & 0x01); - sclp.has_sipl = !!(sccb->cbl & 0x02); + sclp.has_sipl = !!(sccb->cbl & 0x4000); if (sccb->fac85 & 0x02) S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP; if (sccb->fac91 & 0x40) diff --git a/drivers/s390/char/tape_char.c b/drivers/s390/char/tape_char.c index ea4253939555..8abb42923307 100644 --- a/drivers/s390/char/tape_char.c +++ b/drivers/s390/char/tape_char.c @@ -341,14 +341,14 @@ tapechar_release(struct inode *inode, struct file *filp) */ static int __tapechar_ioctl(struct tape_device *device, - unsigned int no, unsigned long data) + unsigned int no, void __user *data) { int rc; if (no == MTIOCTOP) { struct mtop op; - if (copy_from_user(&op, (char __user *) data, sizeof(op)) != 0) + if (copy_from_user(&op, data, sizeof(op)) != 0) return -EFAULT; if (op.mt_count < 0) return -EINVAL; @@ -392,9 +392,7 @@ __tapechar_ioctl(struct tape_device *device, if (rc < 0) return rc; pos.mt_blkno = rc; - if (copy_to_user((char __user *) data, &pos, sizeof(pos)) != 0) - return -EFAULT; - return 0; + return put_user_mtpos(data, &pos); } if (no == MTIOCGET) { /* MTIOCGET: query the tape drive status. */ @@ -424,15 +422,12 @@ __tapechar_ioctl(struct tape_device *device, get.mt_blkno = rc; } - if (copy_to_user((char __user *) data, &get, sizeof(get)) != 0) - return -EFAULT; - - return 0; + return put_user_mtget(data, &get); } /* Try the discipline ioctl function. */ if (device->discipline->ioctl_fn == NULL) return -EINVAL; - return device->discipline->ioctl_fn(device, no, data); + return device->discipline->ioctl_fn(device, no, (unsigned long)data); } static long @@ -445,7 +440,7 @@ tapechar_ioctl(struct file *filp, unsigned int no, unsigned long data) device = (struct tape_device *) filp->private_data; mutex_lock(&device->mutex); - rc = __tapechar_ioctl(device, no, data); + rc = __tapechar_ioctl(device, no, (void __user *)data); mutex_unlock(&device->mutex); return rc; } @@ -455,23 +450,17 @@ static long tapechar_compat_ioctl(struct file *filp, unsigned int no, unsigned long data) { struct tape_device *device = filp->private_data; - int rval = -ENOIOCTLCMD; - unsigned long argp; + long rc; - /* The 'arg' argument of any ioctl function may only be used for - * pointers because of the compat pointer conversion. - * Consider this when adding new ioctls. - */ - argp = (unsigned long) compat_ptr(data); - if (device->discipline->ioctl_fn) { - mutex_lock(&device->mutex); - rval = device->discipline->ioctl_fn(device, no, argp); - mutex_unlock(&device->mutex); - if (rval == -EINVAL) - rval = -ENOIOCTLCMD; - } + if (no == MTIOCPOS32) + no = MTIOCPOS; + else if (no == MTIOCGET32) + no = MTIOCGET; - return rval; + mutex_lock(&device->mutex); + rc = __tapechar_ioctl(device, no, compat_ptr(data)); + mutex_unlock(&device->mutex); + return rc; } #endif /* CONFIG_COMPAT */ diff --git a/drivers/s390/char/vmcp.c b/drivers/s390/char/vmcp.c index 0fa1b6b1491a..9e066281e2d0 100644 --- a/drivers/s390/char/vmcp.c +++ b/drivers/s390/char/vmcp.c @@ -43,6 +43,8 @@ static struct cma *vmcp_cma; static int __init early_parse_vmcp_cma(char *p) { + if (!p) + return 1; vmcp_cma_size = ALIGN(memparse(p, NULL), PAGE_SIZE); return 0; } diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile index f6a8db04177c..23eae4188876 100644 --- a/drivers/s390/cio/Makefile +++ b/drivers/s390/cio/Makefile @@ -5,7 +5,7 @@ # The following is required for define_trace.h to find ./trace.h CFLAGS_trace.o := -I$(src) -CFLAGS_vfio_ccw_fsm.o := -I$(src) +CFLAGS_vfio_ccw_trace.o := -I$(src) obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o isc.o \ fcx.o itcw.o crw.o ccwreq.o trace.o ioasm.o @@ -21,5 +21,5 @@ qdio-objs := qdio_main.o qdio_thinint.o qdio_debug.o qdio_setup.o obj-$(CONFIG_QDIO) += qdio.o vfio_ccw-objs += vfio_ccw_drv.o vfio_ccw_cp.o vfio_ccw_ops.o vfio_ccw_fsm.o \ - vfio_ccw_async.o + vfio_ccw_async.o vfio_ccw_trace.o obj-$(CONFIG_VFIO_CCW) += vfio_ccw.o diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c index 2a3f874a21d5..da642e811f7f 100644 --- a/drivers/s390/cio/blacklist.c +++ b/drivers/s390/cio/blacklist.c @@ -398,12 +398,12 @@ cio_ignore_proc_open(struct inode *inode, struct file *file) sizeof(struct ccwdev_iter)); } -static const struct file_operations cio_ignore_proc_fops = { - .open = cio_ignore_proc_open, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release_private, - .write = cio_ignore_write, +static const struct proc_ops cio_ignore_proc_ops = { + .proc_open = cio_ignore_proc_open, + .proc_read = seq_read, + .proc_lseek = seq_lseek, + .proc_release = seq_release_private, + .proc_write = cio_ignore_write, }; static int @@ -412,7 +412,7 @@ cio_ignore_proc_init (void) struct proc_dir_entry *entry; entry = proc_create("cio_ignore", S_IFREG | S_IRUGO | S_IWUSR, NULL, - &cio_ignore_proc_fops); + &cio_ignore_proc_ops); if (!entry) return -ENOENT; return 0; diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c index c522e9313c50..b42a93736668 100644 --- a/drivers/s390/cio/ccwgroup.c +++ b/drivers/s390/cio/ccwgroup.c @@ -372,7 +372,7 @@ int ccwgroup_create_dev(struct device *parent, struct ccwgroup_driver *gdrv, goto error; } /* Check for trailing stuff. */ - if (i == num_devices && strlen(buf) > 0) { + if (i == num_devices && buf && strlen(buf) > 0) { rc = -EINVAL; goto error; } @@ -581,11 +581,6 @@ int ccwgroup_driver_register(struct ccwgroup_driver *cdriver) } EXPORT_SYMBOL(ccwgroup_driver_register); -static int __ccwgroup_match_all(struct device *dev, const void *data) -{ - return 1; -} - /** * ccwgroup_driver_unregister() - deregister a ccw group driver * @cdriver: driver to be deregistered @@ -597,8 +592,7 @@ void ccwgroup_driver_unregister(struct ccwgroup_driver *cdriver) struct device *dev; /* We don't want ccwgroup devices to live longer than their driver. */ - while ((dev = driver_find_device(&cdriver->driver, NULL, NULL, - __ccwgroup_match_all))) { + while ((dev = driver_find_next_device(&cdriver->driver, NULL))) { struct ccwgroup_device *gdev = to_ccwgroupdev(dev); ccwgroup_ungroup(gdev); @@ -608,13 +602,6 @@ void ccwgroup_driver_unregister(struct ccwgroup_driver *cdriver) } EXPORT_SYMBOL(ccwgroup_driver_unregister); -static int __ccwgroupdev_check_busid(struct device *dev, const void *id) -{ - const char *bus_id = id; - - return (strcmp(bus_id, dev_name(dev)) == 0); -} - /** * get_ccwgroupdev_by_busid() - obtain device from a bus id * @gdrv: driver the device is owned by @@ -631,8 +618,7 @@ struct ccwgroup_device *get_ccwgroupdev_by_busid(struct ccwgroup_driver *gdrv, { struct device *dev; - dev = driver_find_device(&gdrv->driver, NULL, bus_id, - __ccwgroupdev_check_busid); + dev = driver_find_device_by_name(&gdrv->driver, bus_id); return dev ? to_ccwgroupdev(dev) : NULL; } diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h index ba7d2480613b..dcdaba689b20 100644 --- a/drivers/s390/cio/cio.h +++ b/drivers/s390/cio/cio.h @@ -113,6 +113,7 @@ struct subchannel { enum sch_todo todo; struct work_struct todo_work; struct schib_config config; + u64 dma_mask; char *driver_override; /* Driver name to force a match */ } __attribute__ ((aligned(8))); diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index 22c55816100b..94edbb33d0d1 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c @@ -232,7 +232,12 @@ struct subchannel *css_alloc_subchannel(struct subchannel_id schid, * belong to a subchannel need to fit 31 bit width (e.g. ccw). */ sch->dev.coherent_dma_mask = DMA_BIT_MASK(31); - sch->dev.dma_mask = &sch->dev.coherent_dma_mask; + /* + * But we don't have such restrictions imposed on the stuff that + * is handled by the streaming API. + */ + sch->dma_mask = DMA_BIT_MASK(64); + sch->dev.dma_mask = &sch->dma_mask; return sch; err: @@ -1367,18 +1372,17 @@ static ssize_t cio_settle_write(struct file *file, const char __user *buf, return ret ? ret : count; } -static const struct file_operations cio_settle_proc_fops = { - .open = nonseekable_open, - .write = cio_settle_write, - .llseek = no_llseek, +static const struct proc_ops cio_settle_proc_ops = { + .proc_open = nonseekable_open, + .proc_write = cio_settle_write, + .proc_lseek = no_llseek, }; static int __init cio_settle_init(void) { struct proc_dir_entry *entry; - entry = proc_create("cio_settle", S_IWUSR, NULL, - &cio_settle_proc_fops); + entry = proc_create("cio_settle", S_IWUSR, NULL, &cio_settle_proc_ops); if (!entry) return -ENOMEM; return 0; @@ -1388,6 +1392,8 @@ device_initcall(cio_settle_init); int sch_is_pseudo_sch(struct subchannel *sch) { + if (!sch->dev.parent) + return 0; return sch == to_css(sch->dev.parent)->pseudo_subchannel; } diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index c421899be20f..0c6245fc7706 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -710,7 +710,7 @@ static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch) if (!cdev->private) goto err_priv; cdev->dev.coherent_dma_mask = sch->dev.coherent_dma_mask; - cdev->dev.dma_mask = &cdev->dev.coherent_dma_mask; + cdev->dev.dma_mask = sch->dev.dma_mask; dma_pool = cio_gp_dma_create(&cdev->dev, 1); if (!dma_pool) goto err_dma_pool; @@ -1695,18 +1695,6 @@ int ccw_device_force_console(struct ccw_device *cdev) EXPORT_SYMBOL_GPL(ccw_device_force_console); #endif -/* - * get ccw_device matching the busid, but only if owned by cdrv - */ -static int -__ccwdev_check_busid(struct device *dev, const void *id) -{ - const char *bus_id = id; - - return (strcmp(bus_id, dev_name(dev)) == 0); -} - - /** * get_ccwdev_by_busid() - obtain device from a bus id * @cdrv: driver the device is owned by @@ -1723,8 +1711,7 @@ struct ccw_device *get_ccwdev_by_busid(struct ccw_driver *cdrv, { struct device *dev; - dev = driver_find_device(&cdrv->driver, NULL, (void *)bus_id, - __ccwdev_check_busid); + dev = driver_find_device_by_name(&cdrv->driver, bus_id); return dev ? to_ccwdev(dev) : NULL; } diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c index d722458c5928..ccecf6b9504e 100644 --- a/drivers/s390/cio/device_ops.c +++ b/drivers/s390/cio/device_ops.c @@ -124,9 +124,7 @@ EXPORT_SYMBOL(ccw_device_is_multipath); /** * ccw_device_clear() - terminate I/O request processing * @cdev: target ccw device - * @intparm: interruption parameter; value is only used if no I/O is - * outstanding, otherwise the intparm associated with the I/O request - * is returned + * @intparm: interruption parameter to be returned upon conclusion of csch * * ccw_device_clear() calls csch on @cdev's subchannel. * Returns: @@ -179,6 +177,9 @@ int ccw_device_clear(struct ccw_device *cdev, unsigned long intparm) * completed during the time specified by @expires. If a timeout occurs, the * channel program is terminated via xsch, hsch or csch, and the device's * interrupt handler will be called with an irb containing ERR_PTR(-%ETIMEDOUT). + * The interruption handler will echo back the @intparm specified here, unless + * another interruption parameter is specified by a subsequent invocation of + * ccw_device_halt() or ccw_device_clear(). * Returns: * %0, if the operation was successful; * -%EBUSY, if the device is busy, or status pending; @@ -256,6 +257,9 @@ int ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa, * Start a S/390 channel program. When the interrupt arrives, the * IRQ handler is called, either immediately, delayed (dev-end missing, * or sense required) or never (no IRQ handler registered). + * The interruption handler will echo back the @intparm specified here, unless + * another interruption parameter is specified by a subsequent invocation of + * ccw_device_halt() or ccw_device_clear(). * Returns: * %0, if the operation was successful; * -%EBUSY, if the device is busy, or status pending; @@ -287,6 +291,9 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa, * Start a S/390 channel program. When the interrupt arrives, the * IRQ handler is called, either immediately, delayed (dev-end missing, * or sense required) or never (no IRQ handler registered). + * The interruption handler will echo back the @intparm specified here, unless + * another interruption parameter is specified by a subsequent invocation of + * ccw_device_halt() or ccw_device_clear(). * Returns: * %0, if the operation was successful; * -%EBUSY, if the device is busy, or status pending; @@ -322,6 +329,9 @@ int ccw_device_start(struct ccw_device *cdev, struct ccw1 *cpa, * completed during the time specified by @expires. If a timeout occurs, the * channel program is terminated via xsch, hsch or csch, and the device's * interrupt handler will be called with an irb containing ERR_PTR(-%ETIMEDOUT). + * The interruption handler will echo back the @intparm specified here, unless + * another interruption parameter is specified by a subsequent invocation of + * ccw_device_halt() or ccw_device_clear(). * Returns: * %0, if the operation was successful; * -%EBUSY, if the device is busy, or status pending; @@ -343,11 +353,12 @@ int ccw_device_start_timeout(struct ccw_device *cdev, struct ccw1 *cpa, /** * ccw_device_halt() - halt I/O request processing * @cdev: target ccw device - * @intparm: interruption parameter; value is only used if no I/O is - * outstanding, otherwise the intparm associated with the I/O request - * is returned + * @intparm: interruption parameter to be returned upon conclusion of hsch * * ccw_device_halt() calls hsch on @cdev's subchannel. + * The interruption handler will echo back the @intparm specified here, unless + * another interruption parameter is specified by a subsequent invocation of + * ccw_device_clear(). * Returns: * %0 on success, * -%ENODEV on device not operational, @@ -624,7 +635,7 @@ EXPORT_SYMBOL(ccw_device_tm_start_timeout); * @mask: mask of paths to use * * Return the number of 64K-bytes blocks all paths at least support - * for a transport command. Return values <= 0 indicate failures. + * for a transport command. Return value 0 indicates failure. */ int ccw_device_get_mdc(struct ccw_device *cdev, u8 mask) { diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h index a06944399865..ff74eb5fce50 100644 --- a/drivers/s390/cio/qdio.h +++ b/drivers/s390/cio/qdio.h @@ -82,6 +82,7 @@ enum qdio_irq_states { #define QDIO_SIGA_WRITE 0x00 #define QDIO_SIGA_READ 0x01 #define QDIO_SIGA_SYNC 0x02 +#define QDIO_SIGA_WRITEM 0x03 #define QDIO_SIGA_WRITEQ 0x04 #define QDIO_SIGA_QEBSM_FLAG 0x80 @@ -181,11 +182,9 @@ enum qdio_queue_irq_states { }; struct qdio_input_q { - /* input buffer acknowledgement flag */ - int polling; /* first ACK'ed buffer */ int ack_start; - /* how much sbals are acknowledged with qebsm */ + /* how many SBALs are acknowledged */ int ack_count; /* last time of noticing incoming data */ u64 timestamp; @@ -206,8 +205,6 @@ struct qdio_output_q { struct qdio_outbuf_state *sbal_state; /* timer to check for more outbound work */ struct timer_list timer; - /* used SBALs before tasklet schedule */ - int scan_threshold; }; /* @@ -254,9 +251,6 @@ struct qdio_q { /* input or output queue */ int is_input_q; - /* list of thinint input queues */ - struct list_head entry; - /* upper-layer program handler */ qdio_handler_t (*handler); @@ -274,6 +268,7 @@ struct qdio_irq { struct qib qib; u32 *dsci; /* address of device state change indicator */ struct ccw_device *cdev; + struct list_head entry; /* list of thinint devices */ struct dentry *debugfs_dev; struct dentry *debugfs_perf; @@ -295,6 +290,7 @@ struct qdio_irq { struct qdio_ssqd_desc ssqd_desc; void (*orig_handler) (struct ccw_device *, unsigned long, struct irb *); + unsigned int scan_threshold; /* used SBALs before tasklet schedule */ int perf_stat_enabled; struct qdr *qdr; @@ -318,13 +314,15 @@ struct qdio_irq { #define qperf(__qdev, __attr) ((__qdev)->perf_stat.(__attr)) -#define qperf_inc(__q, __attr) \ +#define QDIO_PERF_STAT_INC(__irq, __attr) \ ({ \ - struct qdio_irq *qdev = (__q)->irq_ptr; \ + struct qdio_irq *qdev = __irq; \ if (qdev->perf_stat_enabled) \ (qdev->perf_stat.__attr)++; \ }) +#define qperf_inc(__q, __attr) QDIO_PERF_STAT_INC((__q)->irq_ptr, __attr) + static inline void account_sbals_error(struct qdio_q *q, int count) { q->q_stats.nr_sbal_error += count; @@ -356,14 +354,10 @@ static inline int multicast_outbound(struct qdio_q *q) for (i = 0; i < irq_ptr->nr_output_qs && \ ({ q = irq_ptr->output_qs[i]; 1; }); i++) -#define prev_buf(bufnr) \ - ((bufnr + QDIO_MAX_BUFFERS_MASK) & QDIO_MAX_BUFFERS_MASK) -#define next_buf(bufnr) \ - ((bufnr + 1) & QDIO_MAX_BUFFERS_MASK) -#define add_buf(bufnr, inc) \ - ((bufnr + inc) & QDIO_MAX_BUFFERS_MASK) -#define sub_buf(bufnr, dec) \ - ((bufnr - dec) & QDIO_MAX_BUFFERS_MASK) +#define add_buf(bufnr, inc) QDIO_BUFNR((bufnr) + (inc)) +#define next_buf(bufnr) add_buf(bufnr, 1) +#define sub_buf(bufnr, dec) QDIO_BUFNR((bufnr) - (dec)) +#define prev_buf(bufnr) sub_buf(bufnr, 1) #define queue_irqs_enabled(q) \ (test_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state) == 0) @@ -376,8 +370,8 @@ extern u64 last_ai_time; void qdio_setup_thinint(struct qdio_irq *irq_ptr); int qdio_establish_thinint(struct qdio_irq *irq_ptr); void qdio_shutdown_thinint(struct qdio_irq *irq_ptr); -void tiqdio_add_input_queues(struct qdio_irq *irq_ptr); -void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr); +void tiqdio_add_device(struct qdio_irq *irq_ptr); +void tiqdio_remove_device(struct qdio_irq *irq_ptr); void tiqdio_inbound_processing(unsigned long q); int tiqdio_allocate_memory(void); void tiqdio_free_memory(void); diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c index 35410e6eda2e..9c0370b27426 100644 --- a/drivers/s390/cio/qdio_debug.c +++ b/drivers/s390/cio/qdio_debug.c @@ -124,9 +124,8 @@ static int qstat_show(struct seq_file *m, void *v) seq_printf(m, "nr_used: %d ftc: %d\n", atomic_read(&q->nr_buf_used), q->first_to_check); if (q->is_input_q) { - seq_printf(m, "polling: %d ack start: %d ack count: %d\n", - q->u.in.polling, q->u.in.ack_start, - q->u.in.ack_count); + seq_printf(m, "ack start: %d ack count: %d\n", + q->u.in.ack_start, q->u.in.ack_count); seq_printf(m, "DSCI: %x IRQs disabled: %u\n", *(u8 *)q->irq_ptr->dsci, test_bit(QDIO_QUEUE_IRQS_DISABLED, diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index 4142c85e77d8..3475317c42e5 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c @@ -131,7 +131,7 @@ again: case 96: /* not all buffers processed */ qperf_inc(q, eqbs_partial); - DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS part:%02x", + DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "EQBS part:%02x", tmp_count); return count - tmp_count; case 97: @@ -310,18 +310,19 @@ static inline int qdio_siga_sync_q(struct qdio_q *q) return qdio_siga_sync(q, q->mask, 0); } -static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit, - unsigned long aob) +static int qdio_siga_output(struct qdio_q *q, unsigned int count, + unsigned int *busy_bit, unsigned long aob) { unsigned long schid = *((u32 *) &q->irq_ptr->schid); unsigned int fc = QDIO_SIGA_WRITE; u64 start_time = 0; int retries = 0, cc; - unsigned long laob = 0; - if (aob) { - fc = QDIO_SIGA_WRITEQ; - laob = aob; + if (queue_type(q) == QDIO_IQDIO_QFMT && !multicast_outbound(q)) { + if (count > 1) + fc = QDIO_SIGA_WRITEM; + else if (aob) + fc = QDIO_SIGA_WRITEQ; } if (is_qebsm(q)) { @@ -329,7 +330,7 @@ static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit, fc |= QDIO_SIGA_QEBSM_FLAG; } again: - cc = do_siga_output(schid, q->mask, busy_bit, fc, laob); + cc = do_siga_output(schid, q->mask, busy_bit, fc, aob); /* hipersocket busy condition */ if (unlikely(*busy_bit)) { @@ -392,19 +393,15 @@ int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr, static inline void qdio_stop_polling(struct qdio_q *q) { - if (!q->u.in.polling) + if (!q->u.in.ack_count) return; - q->u.in.polling = 0; qperf_inc(q, stop_polling); /* show the card that we are not polling anymore */ - if (is_qebsm(q)) { - set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT, - q->u.in.ack_count); - q->u.in.ack_count = 0; - } else - set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT); + set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT, + q->u.in.ack_count); + q->u.in.ack_count = 0; } static inline void account_sbals(struct qdio_q *q, unsigned int count) @@ -423,9 +420,6 @@ static inline void account_sbals(struct qdio_q *q, unsigned int count) static void process_buffer_error(struct qdio_q *q, unsigned int start, int count) { - unsigned char state = (q->is_input_q) ? SLSB_P_INPUT_NOT_INIT : - SLSB_P_OUTPUT_NOT_INIT; - q->qdio_error = QDIO_ERROR_SLSB_STATE; /* special handling for no target buffer empty */ @@ -433,7 +427,7 @@ static void process_buffer_error(struct qdio_q *q, unsigned int start, q->sbal[start]->element[15].sflags == 0x10) { qperf_inc(q, target_full); DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x", start); - goto set; + return; } DBF_ERROR("%4x BUF ERROR", SCH_NO(q)); @@ -442,13 +436,6 @@ static void process_buffer_error(struct qdio_q *q, unsigned int start, DBF_ERROR("F14:%2x F15:%2x", q->sbal[start]->element[14].sflags, q->sbal[start]->element[15].sflags); - -set: - /* - * Interrupts may be avoided as long as the error is present - * so change the buffer state immediately to avoid starvation. - */ - set_buf_states(q, start, state, count); } static inline void inbound_primed(struct qdio_q *q, unsigned int start, @@ -460,8 +447,7 @@ static inline void inbound_primed(struct qdio_q *q, unsigned int start, /* for QEBSM the ACK was already set by EQBS */ if (is_qebsm(q)) { - if (!q->u.in.polling) { - q->u.in.polling = 1; + if (!q->u.in.ack_count) { q->u.in.ack_count = count; q->u.in.ack_start = start; return; @@ -480,12 +466,12 @@ static inline void inbound_primed(struct qdio_q *q, unsigned int start, * or by the next inbound run. */ new = add_buf(start, count - 1); - if (q->u.in.polling) { + if (q->u.in.ack_count) { /* reset the previous ACK but first set the new one */ set_buf_state(q, new, SLSB_P_INPUT_ACK); set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT); } else { - q->u.in.polling = 1; + q->u.in.ack_count = 1; set_buf_state(q, new, SLSB_P_INPUT_ACK); } @@ -530,6 +516,11 @@ static int get_inbound_buffer_frontier(struct qdio_q *q, unsigned int start) return count; case SLSB_P_INPUT_ERROR: process_buffer_error(q, start, count); + /* + * Interrupts may be avoided as long as the error is present + * so change the buffer state immediately to avoid starvation. + */ + set_buf_states(q, start, SLSB_P_INPUT_NOT_INIT, count); if (atomic_sub_return(count, &q->nr_buf_used) == 0) qperf_inc(q, inbound_queue_full); if (q->irq_ptr->perf_stat_enabled) @@ -647,8 +638,6 @@ static void qdio_kick_handler(struct qdio_q *q, unsigned int count) qperf_inc(q, outbound_handler); DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x", start, count); - if (q->u.out.use_cq) - qdio_handle_aobs(q, start, count); } q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count, @@ -774,13 +763,17 @@ static inline int qdio_outbound_q_moved(struct qdio_q *q, unsigned int start) count = get_outbound_buffer_frontier(q, start); - if (count) + if (count) { DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr); + if (q->u.out.use_cq) + qdio_handle_aobs(q, start, count); + } return count; } -static int qdio_kick_outbound_q(struct qdio_q *q, unsigned long aob) +static int qdio_kick_outbound_q(struct qdio_q *q, unsigned int count, + unsigned long aob) { int retries = 0, cc; unsigned int busy_bit; @@ -792,7 +785,7 @@ static int qdio_kick_outbound_q(struct qdio_q *q, unsigned long aob) retry: qperf_inc(q, siga_write); - cc = qdio_siga_output(q, &busy_bit, aob); + cc = qdio_siga_output(q, count, &busy_bit, aob); switch (cc) { case 0: break; @@ -879,7 +872,7 @@ static inline void qdio_check_outbound_pci_queues(struct qdio_irq *irq) struct qdio_q *out; int i; - if (!pci_out_supported(irq)) + if (!pci_out_supported(irq) || !irq->scan_threshold) return; for_each_output_queue(irq, out, i) @@ -962,7 +955,7 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr) /* skip if polling is enabled or already in work */ if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state)) { - qperf_inc(q, int_discarded); + QDIO_PERF_STAT_INC(irq_ptr, int_discarded); continue; } q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr, @@ -972,7 +965,7 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr) } } - if (!pci_out_supported(irq_ptr)) + if (!pci_out_supported(irq_ptr) || !irq_ptr->scan_threshold) return; for_each_output_queue(irq_ptr, q, i) { @@ -1161,7 +1154,7 @@ int qdio_shutdown(struct ccw_device *cdev, int how) */ qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED); - tiqdio_remove_input_queues(irq_ptr); + tiqdio_remove_device(irq_ptr); qdio_shutdown_queues(cdev); qdio_shutdown_debug_entries(irq_ptr); @@ -1283,6 +1276,7 @@ int qdio_allocate(struct qdio_initialize *init_data) init_data->no_output_qs)) goto out_rel; + INIT_LIST_HEAD(&irq_ptr->entry); init_data->cdev->private->qdio_data = irq_ptr; qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); return 0; @@ -1427,7 +1421,7 @@ int qdio_activate(struct ccw_device *cdev) } if (is_thinint_irq(irq_ptr)) - tiqdio_add_input_queues(irq_ptr); + tiqdio_add_device(irq_ptr); /* wait for subchannel to become active */ msleep(5); @@ -1480,13 +1474,12 @@ static int handle_inbound(struct qdio_q *q, unsigned int callflags, qperf_inc(q, inbound_call); - if (!q->u.in.polling) + if (!q->u.in.ack_count) goto set; /* protect against stop polling setting an ACK for an emptied slsb */ if (count == QDIO_MAX_BUFFERS_PER_Q) { /* overwriting everything, just delete polling status */ - q->u.in.polling = 0; q->u.in.ack_count = 0; goto set; } else if (buf_in_between(q->u.in.ack_start, bufnr, count)) { @@ -1496,15 +1489,14 @@ static int handle_inbound(struct qdio_q *q, unsigned int callflags, diff = sub_buf(diff, q->u.in.ack_start); q->u.in.ack_count -= diff; if (q->u.in.ack_count <= 0) { - q->u.in.polling = 0; q->u.in.ack_count = 0; goto set; } q->u.in.ack_start = add_buf(q->u.in.ack_start, diff); + } else { + /* the only ACK will be deleted */ + q->u.in.ack_count = 0; } - else - /* the only ACK will be deleted, so stop polling */ - q->u.in.polling = 0; } set: @@ -1525,8 +1517,9 @@ set: * @count: how many buffers are filled */ static int handle_outbound(struct qdio_q *q, unsigned int callflags, - int bufnr, int count) + unsigned int bufnr, unsigned int count) { + const unsigned int scan_threshold = q->irq_ptr->scan_threshold; unsigned char state = 0; int used, rc = 0; @@ -1547,13 +1540,10 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags, if (queue_type(q) == QDIO_IQDIO_QFMT) { unsigned long phys_aob = 0; - /* One SIGA-W per buffer required for unicast HSI */ - WARN_ON_ONCE(count > 1 && !multicast_outbound(q)); - - if (q->u.out.use_cq) + if (q->u.out.use_cq && count == 1) phys_aob = qdio_aob_for_buffer(&q->u.out, bufnr); - rc = qdio_kick_outbound_q(q, phys_aob); + rc = qdio_kick_outbound_q(q, count, phys_aob); } else if (need_siga_sync(q)) { rc = qdio_siga_sync_q(q); } else if (count < QDIO_MAX_BUFFERS_PER_Q && @@ -1562,11 +1552,15 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags, /* The previous buffer is not processed yet, tack on. */ qperf_inc(q, fast_requeue); } else { - rc = qdio_kick_outbound_q(q, 0); + rc = qdio_kick_outbound_q(q, count, 0); } + /* Let drivers implement their own completion scanning: */ + if (!scan_threshold) + return rc; + /* in case of SIGA errors we must process the error immediately */ - if (used >= q->u.out.scan_threshold || rc) + if (used >= scan_threshold || rc) qdio_tasklet_schedule(q); else /* free the SBALs in case of no further traffic */ @@ -1655,6 +1649,44 @@ rescan: } EXPORT_SYMBOL(qdio_start_irq); +static int __qdio_inspect_queue(struct qdio_q *q, unsigned int *bufnr, + unsigned int *error) +{ + unsigned int start = q->first_to_check; + int count; + + count = q->is_input_q ? qdio_inbound_q_moved(q, start) : + qdio_outbound_q_moved(q, start); + if (count == 0) + return 0; + + *bufnr = start; + *error = q->qdio_error; + + /* for the next time */ + q->first_to_check = add_buf(start, count); + q->qdio_error = 0; + + return count; +} + +int qdio_inspect_queue(struct ccw_device *cdev, unsigned int nr, bool is_input, + unsigned int *bufnr, unsigned int *error) +{ + struct qdio_irq *irq_ptr = cdev->private->qdio_data; + struct qdio_q *q; + + if (!irq_ptr) + return -ENODEV; + q = is_input ? irq_ptr->input_qs[nr] : irq_ptr->output_qs[nr]; + + if (need_siga_sync(q)) + qdio_siga_sync_q(q); + + return __qdio_inspect_queue(q, bufnr, error); +} +EXPORT_SYMBOL_GPL(qdio_inspect_queue); + /** * qdio_get_next_buffers - process input buffers * @cdev: associated ccw_device for the qdio subchannel @@ -1672,13 +1704,10 @@ int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr, { struct qdio_q *q; struct qdio_irq *irq_ptr = cdev->private->qdio_data; - unsigned int start; - int count; if (!irq_ptr) return -ENODEV; q = irq_ptr->input_qs[nr]; - start = q->first_to_check; /* * Cannot rely on automatic sync after interrupt since queues may @@ -1689,25 +1718,11 @@ int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr, qdio_check_outbound_pci_queues(irq_ptr); - count = qdio_inbound_q_moved(q, start); - if (count == 0) - return 0; - - start = add_buf(start, count); - q->first_to_check = start; - /* Note: upper-layer MUST stop processing immediately here ... */ if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)) return -EIO; - *bufnr = q->first_to_kick; - *error = q->qdio_error; - - /* for the next time */ - q->first_to_kick = add_buf(q->first_to_kick, count); - q->qdio_error = 0; - - return count; + return __qdio_inspect_queue(q, bufnr, error); } EXPORT_SYMBOL(qdio_get_next_buffers); diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c index d4101cecdc8d..3ab8e80d7bbc 100644 --- a/drivers/s390/cio/qdio_setup.c +++ b/drivers/s390/cio/qdio_setup.c @@ -113,7 +113,7 @@ static void set_impl_params(struct qdio_irq *irq_ptr, irq_ptr->qib.pfmt = qib_param_field_format; if (qib_param_field) memcpy(irq_ptr->qib.parm, qib_param_field, - QDIO_MAX_BUFFERS_PER_Q); + sizeof(irq_ptr->qib.parm)); if (!input_slib_elements) goto output; @@ -150,7 +150,6 @@ static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues) return -ENOMEM; } irq_ptr_qs[i] = q; - INIT_LIST_HEAD(&q->entry); } return 0; } @@ -179,7 +178,6 @@ static void setup_queues_misc(struct qdio_q *q, struct qdio_irq *irq_ptr, q->mask = 1 << (31 - i); q->nr = i; q->handler = handler; - INIT_LIST_HEAD(&q->entry); } static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr, @@ -248,7 +246,6 @@ static void setup_queues(struct qdio_irq *irq_ptr, output_sbal_state_array += QDIO_MAX_BUFFERS_PER_Q; q->is_input_q = 0; - q->u.out.scan_threshold = qdio_init->scan_threshold; setup_storage_lists(q, irq_ptr, output_sbal_array, i); output_sbal_array += QDIO_MAX_BUFFERS_PER_Q; @@ -474,6 +471,7 @@ int qdio_setup_irq(struct qdio_initialize *init_data) irq_ptr->nr_input_qs = init_data->no_input_qs; irq_ptr->nr_output_qs = init_data->no_output_qs; irq_ptr->cdev = init_data->cdev; + irq_ptr->scan_threshold = init_data->scan_threshold; ccw_device_get_schid(irq_ptr->cdev, &irq_ptr->schid); setup_queues(irq_ptr, init_data); @@ -538,7 +536,7 @@ void qdio_print_subchannel_info(struct qdio_irq *irq_ptr, int qdio_enable_async_operation(struct qdio_output_q *outq) { outq->aobs = kcalloc(QDIO_MAX_BUFFERS_PER_Q, sizeof(struct qaob *), - GFP_ATOMIC); + GFP_KERNEL); if (!outq->aobs) { outq->use_cq = 0; return -ENOMEM; diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c index 93ee067c10ca..7c4e4ec08a12 100644 --- a/drivers/s390/cio/qdio_thinint.c +++ b/drivers/s390/cio/qdio_thinint.c @@ -39,14 +39,6 @@ struct indicator_t { static LIST_HEAD(tiq_list); static DEFINE_MUTEX(tiq_list_lock); -/* Adapter interrupt definitions */ -static void tiqdio_thinint_handler(struct airq_struct *airq, bool floating); - -static struct airq_struct tiqdio_airq = { - .handler = tiqdio_thinint_handler, - .isc = QDIO_AIRQ_ISC, -}; - static struct indicator_t *q_indicators; u64 last_ai_time; @@ -74,26 +66,20 @@ static void put_indicator(u32 *addr) atomic_dec(&ind->count); } -void tiqdio_add_input_queues(struct qdio_irq *irq_ptr) +void tiqdio_add_device(struct qdio_irq *irq_ptr) { mutex_lock(&tiq_list_lock); - list_add_rcu(&irq_ptr->input_qs[0]->entry, &tiq_list); + list_add_rcu(&irq_ptr->entry, &tiq_list); mutex_unlock(&tiq_list_lock); } -void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr) +void tiqdio_remove_device(struct qdio_irq *irq_ptr) { - struct qdio_q *q; - - q = irq_ptr->input_qs[0]; - if (!q) - return; - mutex_lock(&tiq_list_lock); - list_del_rcu(&q->entry); + list_del_rcu(&irq_ptr->entry); mutex_unlock(&tiq_list_lock); synchronize_rcu(); - INIT_LIST_HEAD(&q->entry); + INIT_LIST_HEAD(&irq_ptr->entry); } static inline int has_multiple_inq_on_dsci(struct qdio_irq *irq_ptr) @@ -154,7 +140,7 @@ static inline void tiqdio_call_inq_handlers(struct qdio_irq *irq) /* skip if polling is enabled or already in work */ if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state)) { - qperf_inc(q, int_discarded); + QDIO_PERF_STAT_INC(irq, int_discarded); continue; } @@ -182,7 +168,7 @@ static inline void tiqdio_call_inq_handlers(struct qdio_irq *irq) static void tiqdio_thinint_handler(struct airq_struct *airq, bool floating) { u32 si_used = clear_shared_ind(); - struct qdio_q *q; + struct qdio_irq *irq; last_ai_time = S390_lowcore.int_clock; inc_irq_stat(IRQIO_QAI); @@ -190,12 +176,8 @@ static void tiqdio_thinint_handler(struct airq_struct *airq, bool floating) /* protect tiq_list entries, only changed in activate or shutdown */ rcu_read_lock(); - /* check for work on all inbound thinint queues */ - list_for_each_entry_rcu(q, &tiq_list, entry) { - struct qdio_irq *irq; - + list_for_each_entry_rcu(irq, &tiq_list, entry) { /* only process queues from changed sets */ - irq = q->irq_ptr; if (unlikely(references_shared_dsci(irq))) { if (!si_used) continue; @@ -204,11 +186,16 @@ static void tiqdio_thinint_handler(struct airq_struct *airq, bool floating) tiqdio_call_inq_handlers(irq); - qperf_inc(q, adapter_int); + QDIO_PERF_STAT_INC(irq, adapter_int); } rcu_read_unlock(); } +static struct airq_struct tiqdio_airq = { + .handler = tiqdio_thinint_handler, + .isc = QDIO_AIRQ_ISC, +}; + static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset) { struct chsc_scssc_area *scssc = (void *)irq_ptr->chsc_page; diff --git a/drivers/s390/cio/vfio_ccw_cp.h b/drivers/s390/cio/vfio_ccw_cp.h index 7cdc38049033..ba31240ce965 100644 --- a/drivers/s390/cio/vfio_ccw_cp.h +++ b/drivers/s390/cio/vfio_ccw_cp.h @@ -15,6 +15,7 @@ #include <asm/scsw.h> #include "orb.h" +#include "vfio_ccw_trace.h" /* * Max length for ccw chain. diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c index 9208c0e56c33..e401a3d0aa57 100644 --- a/drivers/s390/cio/vfio_ccw_drv.c +++ b/drivers/s390/cio/vfio_ccw_drv.c @@ -27,6 +27,9 @@ struct workqueue_struct *vfio_ccw_work_q; static struct kmem_cache *vfio_ccw_io_region; static struct kmem_cache *vfio_ccw_cmd_region; +debug_info_t *vfio_ccw_debug_msg_id; +debug_info_t *vfio_ccw_debug_trace_id; + /* * Helpers */ @@ -164,6 +167,9 @@ static int vfio_ccw_sch_probe(struct subchannel *sch) if (ret) goto out_disable; + VFIO_CCW_MSG_EVENT(4, "bound to subchannel %x.%x.%04x\n", + sch->schid.cssid, sch->schid.ssid, + sch->schid.sch_no); return 0; out_disable: @@ -194,6 +200,9 @@ static int vfio_ccw_sch_remove(struct subchannel *sch) kfree(private->cp.guest_cp); kfree(private); + VFIO_CCW_MSG_EVENT(4, "unbound from subchannel %x.%x.%04x\n", + sch->schid.cssid, sch->schid.ssid, + sch->schid.sch_no); return 0; } @@ -263,27 +272,64 @@ static struct css_driver vfio_ccw_sch_driver = { .sch_event = vfio_ccw_sch_event, }; +static int __init vfio_ccw_debug_init(void) +{ + vfio_ccw_debug_msg_id = debug_register("vfio_ccw_msg", 16, 1, + 11 * sizeof(long)); + if (!vfio_ccw_debug_msg_id) + goto out_unregister; + debug_register_view(vfio_ccw_debug_msg_id, &debug_sprintf_view); + debug_set_level(vfio_ccw_debug_msg_id, 2); + vfio_ccw_debug_trace_id = debug_register("vfio_ccw_trace", 16, 1, 16); + if (!vfio_ccw_debug_trace_id) + goto out_unregister; + debug_register_view(vfio_ccw_debug_trace_id, &debug_hex_ascii_view); + debug_set_level(vfio_ccw_debug_trace_id, 2); + return 0; + +out_unregister: + debug_unregister(vfio_ccw_debug_msg_id); + debug_unregister(vfio_ccw_debug_trace_id); + return -1; +} + +static void vfio_ccw_debug_exit(void) +{ + debug_unregister(vfio_ccw_debug_msg_id); + debug_unregister(vfio_ccw_debug_trace_id); +} + static int __init vfio_ccw_sch_init(void) { - int ret = -ENOMEM; + int ret; + + ret = vfio_ccw_debug_init(); + if (ret) + return ret; vfio_ccw_work_q = create_singlethread_workqueue("vfio-ccw"); - if (!vfio_ccw_work_q) - return -ENOMEM; + if (!vfio_ccw_work_q) { + ret = -ENOMEM; + goto out_err; + } vfio_ccw_io_region = kmem_cache_create_usercopy("vfio_ccw_io_region", sizeof(struct ccw_io_region), 0, SLAB_ACCOUNT, 0, sizeof(struct ccw_io_region), NULL); - if (!vfio_ccw_io_region) + if (!vfio_ccw_io_region) { + ret = -ENOMEM; goto out_err; + } vfio_ccw_cmd_region = kmem_cache_create_usercopy("vfio_ccw_cmd_region", sizeof(struct ccw_cmd_region), 0, SLAB_ACCOUNT, 0, sizeof(struct ccw_cmd_region), NULL); - if (!vfio_ccw_cmd_region) + if (!vfio_ccw_cmd_region) { + ret = -ENOMEM; goto out_err; + } isc_register(VFIO_CCW_ISC); ret = css_driver_register(&vfio_ccw_sch_driver); @@ -298,6 +344,7 @@ out_err: kmem_cache_destroy(vfio_ccw_cmd_region); kmem_cache_destroy(vfio_ccw_io_region); destroy_workqueue(vfio_ccw_work_q); + vfio_ccw_debug_exit(); return ret; } @@ -308,6 +355,7 @@ static void __exit vfio_ccw_sch_exit(void) kmem_cache_destroy(vfio_ccw_io_region); kmem_cache_destroy(vfio_ccw_cmd_region); destroy_workqueue(vfio_ccw_work_q); + vfio_ccw_debug_exit(); } module_init(vfio_ccw_sch_init); module_exit(vfio_ccw_sch_exit); diff --git a/drivers/s390/cio/vfio_ccw_fsm.c b/drivers/s390/cio/vfio_ccw_fsm.c index 49d9d3da0282..23e61aa638e4 100644 --- a/drivers/s390/cio/vfio_ccw_fsm.c +++ b/drivers/s390/cio/vfio_ccw_fsm.c @@ -15,9 +15,6 @@ #include "ioasm.h" #include "vfio_ccw_private.h" -#define CREATE_TRACE_POINTS -#include "vfio_ccw_trace.h" - static int fsm_io_helper(struct vfio_ccw_private *private) { struct subchannel *sch; @@ -37,9 +34,14 @@ static int fsm_io_helper(struct vfio_ccw_private *private) goto out; } + VFIO_CCW_TRACE_EVENT(5, "stIO"); + VFIO_CCW_TRACE_EVENT(5, dev_name(&sch->dev)); + /* Issue "Start Subchannel" */ ccode = ssch(sch->schid, orb); + VFIO_CCW_HEX_EVENT(5, &ccode, sizeof(ccode)); + switch (ccode) { case 0: /* @@ -86,9 +88,14 @@ static int fsm_do_halt(struct vfio_ccw_private *private) spin_lock_irqsave(sch->lock, flags); + VFIO_CCW_TRACE_EVENT(2, "haltIO"); + VFIO_CCW_TRACE_EVENT(2, dev_name(&sch->dev)); + /* Issue "Halt Subchannel" */ ccode = hsch(sch->schid); + VFIO_CCW_HEX_EVENT(2, &ccode, sizeof(ccode)); + switch (ccode) { case 0: /* @@ -122,9 +129,14 @@ static int fsm_do_clear(struct vfio_ccw_private *private) spin_lock_irqsave(sch->lock, flags); + VFIO_CCW_TRACE_EVENT(2, "clearIO"); + VFIO_CCW_TRACE_EVENT(2, dev_name(&sch->dev)); + /* Issue "Clear Subchannel" */ ccode = csch(sch->schid); + VFIO_CCW_HEX_EVENT(2, &ccode, sizeof(ccode)); + switch (ccode) { case 0: /* @@ -149,6 +161,9 @@ static void fsm_notoper(struct vfio_ccw_private *private, { struct subchannel *sch = private->sch; + VFIO_CCW_TRACE_EVENT(2, "notoper"); + VFIO_CCW_TRACE_EVENT(2, dev_name(&sch->dev)); + /* * TODO: * Probably we should send the machine check to the guest. @@ -229,6 +244,7 @@ static void fsm_io_request(struct vfio_ccw_private *private, struct ccw_io_region *io_region = private->io_region; struct mdev_device *mdev = private->mdev; char *errstr = "request"; + struct subchannel_id schid = get_schid(private); private->state = VFIO_CCW_STATE_CP_PROCESSING; memcpy(scsw, io_region->scsw_area, sizeof(*scsw)); @@ -239,18 +255,32 @@ static void fsm_io_request(struct vfio_ccw_private *private, /* Don't try to build a cp if transport mode is specified. */ if (orb->tm.b) { io_region->ret_code = -EOPNOTSUPP; + VFIO_CCW_MSG_EVENT(2, + "%pUl (%x.%x.%04x): transport mode\n", + mdev_uuid(mdev), schid.cssid, + schid.ssid, schid.sch_no); errstr = "transport mode"; goto err_out; } io_region->ret_code = cp_init(&private->cp, mdev_dev(mdev), orb); if (io_region->ret_code) { + VFIO_CCW_MSG_EVENT(2, + "%pUl (%x.%x.%04x): cp_init=%d\n", + mdev_uuid(mdev), schid.cssid, + schid.ssid, schid.sch_no, + io_region->ret_code); errstr = "cp init"; goto err_out; } io_region->ret_code = cp_prefetch(&private->cp); if (io_region->ret_code) { + VFIO_CCW_MSG_EVENT(2, + "%pUl (%x.%x.%04x): cp_prefetch=%d\n", + mdev_uuid(mdev), schid.cssid, + schid.ssid, schid.sch_no, + io_region->ret_code); errstr = "cp prefetch"; cp_free(&private->cp); goto err_out; @@ -259,24 +289,37 @@ static void fsm_io_request(struct vfio_ccw_private *private, /* Start channel program and wait for I/O interrupt. */ io_region->ret_code = fsm_io_helper(private); if (io_region->ret_code) { + VFIO_CCW_MSG_EVENT(2, + "%pUl (%x.%x.%04x): fsm_io_helper=%d\n", + mdev_uuid(mdev), schid.cssid, + schid.ssid, schid.sch_no, + io_region->ret_code); errstr = "cp fsm_io_helper"; cp_free(&private->cp); goto err_out; } return; } else if (scsw->cmd.fctl & SCSW_FCTL_HALT_FUNC) { + VFIO_CCW_MSG_EVENT(2, + "%pUl (%x.%x.%04x): halt on io_region\n", + mdev_uuid(mdev), schid.cssid, + schid.ssid, schid.sch_no); /* halt is handled via the async cmd region */ io_region->ret_code = -EOPNOTSUPP; goto err_out; } else if (scsw->cmd.fctl & SCSW_FCTL_CLEAR_FUNC) { + VFIO_CCW_MSG_EVENT(2, + "%pUl (%x.%x.%04x): clear on io_region\n", + mdev_uuid(mdev), schid.cssid, + schid.ssid, schid.sch_no); /* clear is handled via the async cmd region */ io_region->ret_code = -EOPNOTSUPP; goto err_out; } err_out: - trace_vfio_ccw_io_fctl(scsw->cmd.fctl, get_schid(private), - io_region->ret_code, errstr); + trace_vfio_ccw_fsm_io_request(scsw->cmd.fctl, schid, + io_region->ret_code, errstr); } /* @@ -298,6 +341,10 @@ static void fsm_async_request(struct vfio_ccw_private *private, /* should not happen? */ cmd_region->ret_code = -EINVAL; } + + trace_vfio_ccw_fsm_async_request(get_schid(private), + cmd_region->command, + cmd_region->ret_code); } /* @@ -308,6 +355,9 @@ static void fsm_irq(struct vfio_ccw_private *private, { struct irb *irb = this_cpu_ptr(&cio_irb); + VFIO_CCW_TRACE_EVENT(6, "IRQ"); + VFIO_CCW_TRACE_EVENT(6, dev_name(&private->sch->dev)); + memcpy(&private->irb, irb, sizeof(*irb)); queue_work(vfio_ccw_work_q, &private->io_work); diff --git a/drivers/s390/cio/vfio_ccw_ops.c b/drivers/s390/cio/vfio_ccw_ops.c index 5eb61116ca6f..f0d71ab77c50 100644 --- a/drivers/s390/cio/vfio_ccw_ops.c +++ b/drivers/s390/cio/vfio_ccw_ops.c @@ -124,6 +124,11 @@ static int vfio_ccw_mdev_create(struct kobject *kobj, struct mdev_device *mdev) private->mdev = mdev; private->state = VFIO_CCW_STATE_IDLE; + VFIO_CCW_MSG_EVENT(2, "mdev %pUl, sch %x.%x.%04x: create\n", + mdev_uuid(mdev), private->sch->schid.cssid, + private->sch->schid.ssid, + private->sch->schid.sch_no); + return 0; } @@ -132,6 +137,11 @@ static int vfio_ccw_mdev_remove(struct mdev_device *mdev) struct vfio_ccw_private *private = dev_get_drvdata(mdev_parent_dev(mdev)); + VFIO_CCW_MSG_EVENT(2, "mdev %pUl, sch %x.%x.%04x: remove\n", + mdev_uuid(mdev), private->sch->schid.cssid, + private->sch->schid.ssid, + private->sch->schid.sch_no); + if ((private->state != VFIO_CCW_STATE_NOT_OPER) && (private->state != VFIO_CCW_STATE_STANDBY)) { if (!vfio_ccw_sch_quiesce(private->sch)) diff --git a/drivers/s390/cio/vfio_ccw_private.h b/drivers/s390/cio/vfio_ccw_private.h index f1092c3dc1b1..9b9bb4982972 100644 --- a/drivers/s390/cio/vfio_ccw_private.h +++ b/drivers/s390/cio/vfio_ccw_private.h @@ -17,6 +17,7 @@ #include <linux/eventfd.h> #include <linux/workqueue.h> #include <linux/vfio_ccw.h> +#include <asm/debug.h> #include "css.h" #include "vfio_ccw_cp.h" @@ -134,9 +135,26 @@ extern fsm_func_t *vfio_ccw_jumptable[NR_VFIO_CCW_STATES][NR_VFIO_CCW_EVENTS]; static inline void vfio_ccw_fsm_event(struct vfio_ccw_private *private, int event) { + trace_vfio_ccw_fsm_event(private->sch->schid, private->state, event); vfio_ccw_jumptable[private->state][event](private, event); } extern struct workqueue_struct *vfio_ccw_work_q; + +/* s390 debug feature, similar to base cio */ +extern debug_info_t *vfio_ccw_debug_msg_id; +extern debug_info_t *vfio_ccw_debug_trace_id; + +#define VFIO_CCW_TRACE_EVENT(imp, txt) \ + debug_text_event(vfio_ccw_debug_trace_id, imp, txt) + +#define VFIO_CCW_MSG_EVENT(imp, args...) \ + debug_sprintf_event(vfio_ccw_debug_msg_id, imp, ##args) + +static inline void VFIO_CCW_HEX_EVENT(int level, void *data, int length) +{ + debug_event(vfio_ccw_debug_trace_id, level, data, length); +} + #endif diff --git a/drivers/s390/cio/vfio_ccw_trace.c b/drivers/s390/cio/vfio_ccw_trace.c new file mode 100644 index 000000000000..8c671d2519f6 --- /dev/null +++ b/drivers/s390/cio/vfio_ccw_trace.c @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Tracepoint definitions for vfio_ccw + * + * Copyright IBM Corp. 2019 + * Author(s): Eric Farman <farman@linux.ibm.com> + */ + +#define CREATE_TRACE_POINTS +#include "vfio_ccw_trace.h" + +EXPORT_TRACEPOINT_SYMBOL(vfio_ccw_fsm_async_request); +EXPORT_TRACEPOINT_SYMBOL(vfio_ccw_fsm_event); +EXPORT_TRACEPOINT_SYMBOL(vfio_ccw_fsm_io_request); diff --git a/drivers/s390/cio/vfio_ccw_trace.h b/drivers/s390/cio/vfio_ccw_trace.h index b1da53ddec1f..f5d31887d413 100644 --- a/drivers/s390/cio/vfio_ccw_trace.h +++ b/drivers/s390/cio/vfio_ccw_trace.h @@ -1,5 +1,5 @@ -/* SPDX-License-Identifier: GPL-2.0 - * Tracepoints for vfio_ccw driver +/* SPDX-License-Identifier: GPL-2.0 */ +/* Tracepoints for vfio_ccw driver * * Copyright IBM Corp. 2018 * @@ -7,6 +7,8 @@ * Halil Pasic <pasic@linux.vnet.ibm.com> */ +#include "cio.h" + #undef TRACE_SYSTEM #define TRACE_SYSTEM vfio_ccw @@ -15,28 +17,88 @@ #include <linux/tracepoint.h> -TRACE_EVENT(vfio_ccw_io_fctl, +TRACE_EVENT(vfio_ccw_fsm_async_request, + TP_PROTO(struct subchannel_id schid, + int command, + int errno), + TP_ARGS(schid, command, errno), + + TP_STRUCT__entry( + __field(u8, cssid) + __field(u8, ssid) + __field(u16, sch_no) + __field(int, command) + __field(int, errno) + ), + + TP_fast_assign( + __entry->cssid = schid.cssid; + __entry->ssid = schid.ssid; + __entry->sch_no = schid.sch_no; + __entry->command = command; + __entry->errno = errno; + ), + + TP_printk("schid=%x.%x.%04x command=0x%x errno=%d", + __entry->cssid, + __entry->ssid, + __entry->sch_no, + __entry->command, + __entry->errno) +); + +TRACE_EVENT(vfio_ccw_fsm_event, + TP_PROTO(struct subchannel_id schid, int state, int event), + TP_ARGS(schid, state, event), + + TP_STRUCT__entry( + __field(u8, cssid) + __field(u8, ssid) + __field(u16, schno) + __field(int, state) + __field(int, event) + ), + + TP_fast_assign( + __entry->cssid = schid.cssid; + __entry->ssid = schid.ssid; + __entry->schno = schid.sch_no; + __entry->state = state; + __entry->event = event; + ), + + TP_printk("schid=%x.%x.%04x state=%d event=%d", + __entry->cssid, __entry->ssid, __entry->schno, + __entry->state, + __entry->event) +); + +TRACE_EVENT(vfio_ccw_fsm_io_request, TP_PROTO(int fctl, struct subchannel_id schid, int errno, char *errstr), TP_ARGS(fctl, schid, errno, errstr), TP_STRUCT__entry( + __field(u8, cssid) + __field(u8, ssid) + __field(u16, sch_no) __field(int, fctl) - __field_struct(struct subchannel_id, schid) __field(int, errno) __field(char*, errstr) ), TP_fast_assign( + __entry->cssid = schid.cssid; + __entry->ssid = schid.ssid; + __entry->sch_no = schid.sch_no; __entry->fctl = fctl; - __entry->schid = schid; __entry->errno = errno; __entry->errstr = errstr; ), - TP_printk("schid=%x.%x.%04x fctl=%x errno=%d info=%s", - __entry->schid.cssid, - __entry->schid.ssid, - __entry->schid.sch_no, + TP_printk("schid=%x.%x.%04x fctl=0x%x errno=%d info=%s", + __entry->cssid, + __entry->ssid, + __entry->sch_no, __entry->fctl, __entry->errno, __entry->errstr) diff --git a/drivers/s390/crypto/Makefile b/drivers/s390/crypto/Makefile index 6ccd93d0b1cb..22d2db690cd3 100644 --- a/drivers/s390/crypto/Makefile +++ b/drivers/s390/crypto/Makefile @@ -8,6 +8,7 @@ obj-$(subst m,y,$(CONFIG_ZCRYPT)) += ap.o # zcrypt_api.o and zcrypt_msgtype*.o depend on ap.o zcrypt-objs := zcrypt_api.o zcrypt_card.o zcrypt_queue.o zcrypt-objs += zcrypt_msgtype6.o zcrypt_msgtype50.o +zcrypt-objs += zcrypt_ccamisc.o zcrypt_ep11misc.o obj-$(CONFIG_ZCRYPT) += zcrypt.o # adapter drivers depend on ap.o and zcrypt.o obj-$(CONFIG_ZCRYPT) += zcrypt_cex2c.o zcrypt_cex2a.o zcrypt_cex4.o diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index a76b8a8bcbbb..5256e3ce84e5 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c @@ -793,8 +793,6 @@ static int ap_device_probe(struct device *dev) drvres = ap_drv->flags & AP_DRIVER_FLAG_DEFAULT; if (!!devres != !!drvres) return -ENODEV; - /* (re-)init queue's state machine */ - ap_queue_reinit_state(to_ap_queue(dev)); } /* Add queue/card to list of active queues/cards */ @@ -1322,24 +1320,24 @@ static int ap_get_compatible_type(ap_qid_t qid, int rawtype, unsigned int func) /* < CEX2A is not supported */ if (rawtype < AP_DEVICE_TYPE_CEX2A) return 0; - /* up to CEX6 known and fully supported */ - if (rawtype <= AP_DEVICE_TYPE_CEX6) + /* up to CEX7 known and fully supported */ + if (rawtype <= AP_DEVICE_TYPE_CEX7) return rawtype; /* - * unknown new type > CEX6, check for compatibility + * unknown new type > CEX7, check for compatibility * to the highest known and supported type which is - * currently CEX6 with the help of the QACT function. + * currently CEX7 with the help of the QACT function. */ if (ap_qact_available()) { struct ap_queue_status status; union ap_qact_ap_info apinfo = {0}; apinfo.mode = (func >> 26) & 0x07; - apinfo.cat = AP_DEVICE_TYPE_CEX6; + apinfo.cat = AP_DEVICE_TYPE_CEX7; status = ap_qact(qid, 0, &apinfo); if (status.response_code == AP_RESPONSE_NORMAL && apinfo.cat >= AP_DEVICE_TYPE_CEX2A - && apinfo.cat <= AP_DEVICE_TYPE_CEX6) + && apinfo.cat <= AP_DEVICE_TYPE_CEX7) comp_type = apinfo.cat; } if (!comp_type) diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h index 6f3cf37776ca..4348fdff1c61 100644 --- a/drivers/s390/crypto/ap_bus.h +++ b/drivers/s390/crypto/ap_bus.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0+ */ /* - * Copyright IBM Corp. 2006, 2012 + * Copyright IBM Corp. 2006, 2019 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com> * Ralph Wuerthner <rwuerthn@de.ibm.com> @@ -63,6 +63,7 @@ static inline int ap_test_bit(unsigned int *ptr, unsigned int nr) #define AP_DEVICE_TYPE_CEX4 10 #define AP_DEVICE_TYPE_CEX5 11 #define AP_DEVICE_TYPE_CEX6 12 +#define AP_DEVICE_TYPE_CEX7 13 /* * Known function facilities @@ -161,7 +162,7 @@ struct ap_card { unsigned int functions; /* AP device function bitfield. */ int queue_depth; /* AP queue depth.*/ int id; /* AP card number. */ - atomic_t total_request_count; /* # requests ever for this AP device.*/ + atomic64_t total_request_count; /* # requests ever for this AP device.*/ }; #define to_ap_card(x) container_of((x), struct ap_card, ap_dev.device) @@ -178,7 +179,7 @@ struct ap_queue { enum ap_state state; /* State of the AP device. */ int pendingq_count; /* # requests on pendingq list. */ int requestq_count; /* # requests on requestq list. */ - int total_request_count; /* # requests ever for this AP device.*/ + u64 total_request_count; /* # requests ever for this AP device.*/ int request_timeout; /* Request timeout in jiffies. */ struct timer_list timeout; /* Timer for request timeouts. */ struct list_head pendingq; /* List of message sent to AP queue. */ @@ -260,7 +261,7 @@ void ap_queue_prepare_remove(struct ap_queue *aq); void ap_queue_remove(struct ap_queue *aq); void ap_queue_suspend(struct ap_device *ap_dev); void ap_queue_resume(struct ap_device *ap_dev); -void ap_queue_reinit_state(struct ap_queue *aq); +void ap_queue_init_state(struct ap_queue *aq); struct ap_card *ap_card_create(int id, int queue_depth, int raw_device_type, int comp_device_type, unsigned int functions); diff --git a/drivers/s390/crypto/ap_card.c b/drivers/s390/crypto/ap_card.c index 63b4cc6cd7e5..e85bfca1ed16 100644 --- a/drivers/s390/crypto/ap_card.c +++ b/drivers/s390/crypto/ap_card.c @@ -63,13 +63,13 @@ static ssize_t request_count_show(struct device *dev, char *buf) { struct ap_card *ac = to_ap_card(dev); - unsigned int req_cnt; + u64 req_cnt; req_cnt = 0; spin_lock_bh(&ap_list_lock); - req_cnt = atomic_read(&ac->total_request_count); + req_cnt = atomic64_read(&ac->total_request_count); spin_unlock_bh(&ap_list_lock); - return snprintf(buf, PAGE_SIZE, "%d\n", req_cnt); + return snprintf(buf, PAGE_SIZE, "%llu\n", req_cnt); } static ssize_t request_count_store(struct device *dev, @@ -83,7 +83,7 @@ static ssize_t request_count_store(struct device *dev, for_each_ap_queue(aq, ac) aq->total_request_count = 0; spin_unlock_bh(&ap_list_lock); - atomic_set(&ac->total_request_count, 0); + atomic64_set(&ac->total_request_count, 0); return count; } diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c index dad2be333d82..a317ab484932 100644 --- a/drivers/s390/crypto/ap_queue.c +++ b/drivers/s390/crypto/ap_queue.c @@ -479,12 +479,12 @@ static ssize_t request_count_show(struct device *dev, char *buf) { struct ap_queue *aq = to_ap_queue(dev); - unsigned int req_cnt; + u64 req_cnt; spin_lock_bh(&aq->lock); req_cnt = aq->total_request_count; spin_unlock_bh(&aq->lock); - return snprintf(buf, PAGE_SIZE, "%d\n", req_cnt); + return snprintf(buf, PAGE_SIZE, "%llu\n", req_cnt); } static ssize_t request_count_store(struct device *dev, @@ -638,7 +638,7 @@ struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type) aq->ap_dev.device.type = &ap_queue_type; aq->ap_dev.device_type = device_type; aq->qid = qid; - aq->state = AP_STATE_RESET_START; + aq->state = AP_STATE_UNBOUND; aq->interrupt = AP_INTR_DISABLED; spin_lock_init(&aq->lock); INIT_LIST_HEAD(&aq->list); @@ -676,7 +676,7 @@ void ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg) list_add_tail(&ap_msg->list, &aq->requestq); aq->requestq_count++; aq->total_request_count++; - atomic_inc(&aq->card->total_request_count); + atomic64_inc(&aq->card->total_request_count); /* Send/receive as many request from the queue as possible. */ ap_wait(ap_sm_event_loop(aq, AP_EVENT_POLL)); spin_unlock_bh(&aq->lock); @@ -771,10 +771,11 @@ void ap_queue_remove(struct ap_queue *aq) spin_unlock_bh(&aq->lock); } -void ap_queue_reinit_state(struct ap_queue *aq) +void ap_queue_init_state(struct ap_queue *aq) { spin_lock_bh(&aq->lock); aq->state = AP_STATE_RESET_START; ap_wait(ap_sm_event(aq, AP_EVENT_POLL)); spin_unlock_bh(&aq->lock); } +EXPORT_SYMBOL(ap_queue_init_state); diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c index 7f418d2d8cdf..2f33c5fcf676 100644 --- a/drivers/s390/crypto/pkey_api.c +++ b/drivers/s390/crypto/pkey_api.c @@ -2,7 +2,7 @@ /* * pkey device driver * - * Copyright IBM Corp. 2017 + * Copyright IBM Corp. 2017,2019 * Author(s): Harald Freudenberger */ @@ -24,16 +24,15 @@ #include <crypto/aes.h> #include "zcrypt_api.h" +#include "zcrypt_ccamisc.h" +#include "zcrypt_ep11misc.h" MODULE_LICENSE("GPL"); MODULE_AUTHOR("IBM Corporation"); MODULE_DESCRIPTION("s390 protected key interface"); -/* Size of parameter block used for all cca requests/replies */ -#define PARMBSIZE 512 - -/* Size of vardata block used for some of the cca requests/replies */ -#define VARDATASIZE 4096 +#define KEYBLOBBUFSIZE 8192 /* key buffer size used for internal processing */ +#define MAXAPQNSINLIST 64 /* max 64 apqns within a apqn list */ /* mask of available pckmo subfunctions, fetched once at module init */ static cpacf_mask_t pckmo_functions; @@ -62,40 +61,6 @@ static void __exit pkey_debug_exit(void) debug_unregister(debug_info); } -/* Key token types */ -#define TOKTYPE_NON_CCA 0x00 /* Non-CCA key token */ -#define TOKTYPE_CCA_INTERNAL 0x01 /* CCA internal key token */ - -/* For TOKTYPE_NON_CCA: */ -#define TOKVER_PROTECTED_KEY 0x01 /* Protected key token */ - -/* For TOKTYPE_CCA_INTERNAL: */ -#define TOKVER_CCA_AES 0x04 /* CCA AES key token */ - -/* header part of a key token */ -struct keytoken_header { - u8 type; /* one of the TOKTYPE values */ - u8 res0[3]; - u8 version; /* one of the TOKVER values */ - u8 res1[3]; -} __packed; - -/* inside view of a secure key token (only type 0x01 version 0x04) */ -struct secaeskeytoken { - u8 type; /* 0x01 for internal key token */ - u8 res0[3]; - u8 version; /* should be 0x04 */ - u8 res1[1]; - u8 flag; /* key flags */ - u8 res2[1]; - u64 mkvp; /* master key verification pattern */ - u8 key[32]; /* key value (encrypted) */ - u8 cv[8]; /* control vector */ - u16 bitsize; /* key bit size */ - u16 keysize; /* key byte size */ - u8 tvv[4]; /* token validation value */ -} __packed; - /* inside view of a protected key token (only type 0x00 version 0x01) */ struct protaeskeytoken { u8 type; /* 0x00 for PAES specific key tokens */ @@ -107,558 +72,23 @@ struct protaeskeytoken { u8 protkey[MAXPROTKEYSIZE]; /* the protected key blob */ } __packed; -/* - * Simple check if the token is a valid CCA secure AES key - * token. If keybitsize is given, the bitsize of the key is - * also checked. Returns 0 on success or errno value on failure. - */ -static int check_secaeskeytoken(const u8 *token, int keybitsize) -{ - struct secaeskeytoken *t = (struct secaeskeytoken *) token; - - if (t->type != TOKTYPE_CCA_INTERNAL) { - DEBUG_ERR( - "%s secure token check failed, type mismatch 0x%02x != 0x%02x\n", - __func__, (int) t->type, TOKTYPE_CCA_INTERNAL); - return -EINVAL; - } - if (t->version != TOKVER_CCA_AES) { - DEBUG_ERR( - "%s secure token check failed, version mismatch 0x%02x != 0x%02x\n", - __func__, (int) t->version, TOKVER_CCA_AES); - return -EINVAL; - } - if (keybitsize > 0 && t->bitsize != keybitsize) { - DEBUG_ERR( - "%s secure token check failed, bitsize mismatch %d != %d\n", - __func__, (int) t->bitsize, keybitsize); - return -EINVAL; - } - - return 0; -} - -/* - * Allocate consecutive memory for request CPRB, request param - * block, reply CPRB and reply param block and fill in values - * for the common fields. Returns 0 on success or errno value - * on failure. - */ -static int alloc_and_prep_cprbmem(size_t paramblen, - u8 **pcprbmem, - struct CPRBX **preqCPRB, - struct CPRBX **prepCPRB) -{ - u8 *cprbmem; - size_t cprbplusparamblen = sizeof(struct CPRBX) + paramblen; - struct CPRBX *preqcblk, *prepcblk; - - /* - * allocate consecutive memory for request CPRB, request param - * block, reply CPRB and reply param block - */ - cprbmem = kcalloc(2, cprbplusparamblen, GFP_KERNEL); - if (!cprbmem) - return -ENOMEM; - - preqcblk = (struct CPRBX *) cprbmem; - prepcblk = (struct CPRBX *) (cprbmem + cprbplusparamblen); - - /* fill request cprb struct */ - preqcblk->cprb_len = sizeof(struct CPRBX); - preqcblk->cprb_ver_id = 0x02; - memcpy(preqcblk->func_id, "T2", 2); - preqcblk->rpl_msgbl = cprbplusparamblen; - if (paramblen) { - preqcblk->req_parmb = - ((u8 *) preqcblk) + sizeof(struct CPRBX); - preqcblk->rpl_parmb = - ((u8 *) prepcblk) + sizeof(struct CPRBX); - } - - *pcprbmem = cprbmem; - *preqCPRB = preqcblk; - *prepCPRB = prepcblk; - - return 0; -} - -/* - * Free the cprb memory allocated with the function above. - * If the scrub value is not zero, the memory is filled - * with zeros before freeing (useful if there was some - * clear key material in there). - */ -static void free_cprbmem(void *mem, size_t paramblen, int scrub) -{ - if (scrub) - memzero_explicit(mem, 2 * (sizeof(struct CPRBX) + paramblen)); - kfree(mem); -} - -/* - * Helper function to prepare the xcrb struct - */ -static inline void prep_xcrb(struct ica_xcRB *pxcrb, - u16 cardnr, - struct CPRBX *preqcblk, - struct CPRBX *prepcblk) -{ - memset(pxcrb, 0, sizeof(*pxcrb)); - pxcrb->agent_ID = 0x4341; /* 'CA' */ - pxcrb->user_defined = (cardnr == 0xFFFF ? AUTOSELECT : cardnr); - pxcrb->request_control_blk_length = - preqcblk->cprb_len + preqcblk->req_parml; - pxcrb->request_control_blk_addr = (void __user *) preqcblk; - pxcrb->reply_control_blk_length = preqcblk->rpl_msgbl; - pxcrb->reply_control_blk_addr = (void __user *) prepcblk; -} - -/* - * Helper function which calls zcrypt_send_cprb with - * memory management segment adjusted to kernel space - * so that the copy_from_user called within this - * function do in fact copy from kernel space. - */ -static inline int _zcrypt_send_cprb(struct ica_xcRB *xcrb) -{ - int rc; - mm_segment_t old_fs = get_fs(); - - set_fs(KERNEL_DS); - rc = zcrypt_send_cprb(xcrb); - set_fs(old_fs); - - return rc; -} - -/* - * Generate (random) AES secure key. - */ -int pkey_genseckey(u16 cardnr, u16 domain, - u32 keytype, struct pkey_seckey *seckey) -{ - int i, rc, keysize; - int seckeysize; - u8 *mem; - struct CPRBX *preqcblk, *prepcblk; - struct ica_xcRB xcrb; - struct kgreqparm { - u8 subfunc_code[2]; - u16 rule_array_len; - struct lv1 { - u16 len; - char key_form[8]; - char key_length[8]; - char key_type1[8]; - char key_type2[8]; - } lv1; - struct lv2 { - u16 len; - struct keyid { - u16 len; - u16 attr; - u8 data[SECKEYBLOBSIZE]; - } keyid[6]; - } lv2; - } *preqparm; - struct kgrepparm { - u8 subfunc_code[2]; - u16 rule_array_len; - struct lv3 { - u16 len; - u16 keyblocklen; - struct { - u16 toklen; - u16 tokattr; - u8 tok[0]; - /* ... some more data ... */ - } keyblock; - } lv3; - } *prepparm; - - /* get already prepared memory for 2 cprbs with param block each */ - rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk); - if (rc) - return rc; - - /* fill request cprb struct */ - preqcblk->domain = domain; - - /* fill request cprb param block with KG request */ - preqparm = (struct kgreqparm *) preqcblk->req_parmb; - memcpy(preqparm->subfunc_code, "KG", 2); - preqparm->rule_array_len = sizeof(preqparm->rule_array_len); - preqparm->lv1.len = sizeof(struct lv1); - memcpy(preqparm->lv1.key_form, "OP ", 8); - switch (keytype) { - case PKEY_KEYTYPE_AES_128: - keysize = 16; - memcpy(preqparm->lv1.key_length, "KEYLN16 ", 8); - break; - case PKEY_KEYTYPE_AES_192: - keysize = 24; - memcpy(preqparm->lv1.key_length, "KEYLN24 ", 8); - break; - case PKEY_KEYTYPE_AES_256: - keysize = 32; - memcpy(preqparm->lv1.key_length, "KEYLN32 ", 8); - break; - default: - DEBUG_ERR( - "%s unknown/unsupported keytype %d\n", - __func__, keytype); - rc = -EINVAL; - goto out; - } - memcpy(preqparm->lv1.key_type1, "AESDATA ", 8); - preqparm->lv2.len = sizeof(struct lv2); - for (i = 0; i < 6; i++) { - preqparm->lv2.keyid[i].len = sizeof(struct keyid); - preqparm->lv2.keyid[i].attr = (i == 2 ? 0x30 : 0x10); - } - preqcblk->req_parml = sizeof(struct kgreqparm); - - /* fill xcrb struct */ - prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk); - - /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */ - rc = _zcrypt_send_cprb(&xcrb); - if (rc) { - DEBUG_ERR( - "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed with errno %d\n", - __func__, (int) cardnr, (int) domain, rc); - goto out; - } - - /* check response returncode and reasoncode */ - if (prepcblk->ccp_rtcode != 0) { - DEBUG_ERR( - "%s secure key generate failure, card response %d/%d\n", - __func__, - (int) prepcblk->ccp_rtcode, - (int) prepcblk->ccp_rscode); - rc = -EIO; - goto out; - } - - /* process response cprb param block */ - prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX); - prepparm = (struct kgrepparm *) prepcblk->rpl_parmb; - - /* check length of the returned secure key token */ - seckeysize = prepparm->lv3.keyblock.toklen - - sizeof(prepparm->lv3.keyblock.toklen) - - sizeof(prepparm->lv3.keyblock.tokattr); - if (seckeysize != SECKEYBLOBSIZE) { - DEBUG_ERR( - "%s secure token size mismatch %d != %d bytes\n", - __func__, seckeysize, SECKEYBLOBSIZE); - rc = -EIO; - goto out; - } - - /* check secure key token */ - rc = check_secaeskeytoken(prepparm->lv3.keyblock.tok, 8*keysize); - if (rc) { - rc = -EIO; - goto out; - } - - /* copy the generated secure key token */ - memcpy(seckey->seckey, prepparm->lv3.keyblock.tok, SECKEYBLOBSIZE); - -out: - free_cprbmem(mem, PARMBSIZE, 0); - return rc; -} -EXPORT_SYMBOL(pkey_genseckey); - -/* - * Generate an AES secure key with given key value. - */ -int pkey_clr2seckey(u16 cardnr, u16 domain, u32 keytype, - const struct pkey_clrkey *clrkey, - struct pkey_seckey *seckey) -{ - int rc, keysize, seckeysize; - u8 *mem; - struct CPRBX *preqcblk, *prepcblk; - struct ica_xcRB xcrb; - struct cmreqparm { - u8 subfunc_code[2]; - u16 rule_array_len; - char rule_array[8]; - struct lv1 { - u16 len; - u8 clrkey[0]; - } lv1; - struct lv2 { - u16 len; - struct keyid { - u16 len; - u16 attr; - u8 data[SECKEYBLOBSIZE]; - } keyid; - } lv2; - } *preqparm; - struct lv2 *plv2; - struct cmrepparm { - u8 subfunc_code[2]; - u16 rule_array_len; - struct lv3 { - u16 len; - u16 keyblocklen; - struct { - u16 toklen; - u16 tokattr; - u8 tok[0]; - /* ... some more data ... */ - } keyblock; - } lv3; - } *prepparm; - - /* get already prepared memory for 2 cprbs with param block each */ - rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk); - if (rc) - return rc; - - /* fill request cprb struct */ - preqcblk->domain = domain; - - /* fill request cprb param block with CM request */ - preqparm = (struct cmreqparm *) preqcblk->req_parmb; - memcpy(preqparm->subfunc_code, "CM", 2); - memcpy(preqparm->rule_array, "AES ", 8); - preqparm->rule_array_len = - sizeof(preqparm->rule_array_len) + sizeof(preqparm->rule_array); - switch (keytype) { - case PKEY_KEYTYPE_AES_128: - keysize = 16; - break; - case PKEY_KEYTYPE_AES_192: - keysize = 24; - break; - case PKEY_KEYTYPE_AES_256: - keysize = 32; - break; - default: - DEBUG_ERR( - "%s unknown/unsupported keytype %d\n", - __func__, keytype); - rc = -EINVAL; - goto out; - } - preqparm->lv1.len = sizeof(struct lv1) + keysize; - memcpy(preqparm->lv1.clrkey, clrkey->clrkey, keysize); - plv2 = (struct lv2 *) (((u8 *) &preqparm->lv2) + keysize); - plv2->len = sizeof(struct lv2); - plv2->keyid.len = sizeof(struct keyid); - plv2->keyid.attr = 0x30; - preqcblk->req_parml = sizeof(struct cmreqparm) + keysize; - - /* fill xcrb struct */ - prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk); - - /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */ - rc = _zcrypt_send_cprb(&xcrb); - if (rc) { - DEBUG_ERR( - "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed with errno %d\n", - __func__, (int) cardnr, (int) domain, rc); - goto out; - } - - /* check response returncode and reasoncode */ - if (prepcblk->ccp_rtcode != 0) { - DEBUG_ERR( - "%s clear key import failure, card response %d/%d\n", - __func__, - (int) prepcblk->ccp_rtcode, - (int) prepcblk->ccp_rscode); - rc = -EIO; - goto out; - } - - /* process response cprb param block */ - prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX); - prepparm = (struct cmrepparm *) prepcblk->rpl_parmb; - - /* check length of the returned secure key token */ - seckeysize = prepparm->lv3.keyblock.toklen - - sizeof(prepparm->lv3.keyblock.toklen) - - sizeof(prepparm->lv3.keyblock.tokattr); - if (seckeysize != SECKEYBLOBSIZE) { - DEBUG_ERR( - "%s secure token size mismatch %d != %d bytes\n", - __func__, seckeysize, SECKEYBLOBSIZE); - rc = -EIO; - goto out; - } - - /* check secure key token */ - rc = check_secaeskeytoken(prepparm->lv3.keyblock.tok, 8*keysize); - if (rc) { - rc = -EIO; - goto out; - } - - /* copy the generated secure key token */ - memcpy(seckey->seckey, prepparm->lv3.keyblock.tok, SECKEYBLOBSIZE); - -out: - free_cprbmem(mem, PARMBSIZE, 1); - return rc; -} -EXPORT_SYMBOL(pkey_clr2seckey); - -/* - * Derive a proteced key from the secure key blob. - */ -int pkey_sec2protkey(u16 cardnr, u16 domain, - const struct pkey_seckey *seckey, - struct pkey_protkey *protkey) -{ - int rc; - u8 *mem; - struct CPRBX *preqcblk, *prepcblk; - struct ica_xcRB xcrb; - struct uskreqparm { - u8 subfunc_code[2]; - u16 rule_array_len; - struct lv1 { - u16 len; - u16 attr_len; - u16 attr_flags; - } lv1; - struct lv2 { - u16 len; - u16 attr_len; - u16 attr_flags; - u8 token[0]; /* cca secure key token */ - } lv2 __packed; - } *preqparm; - struct uskrepparm { - u8 subfunc_code[2]; - u16 rule_array_len; - struct lv3 { - u16 len; - u16 attr_len; - u16 attr_flags; - struct cpacfkeyblock { - u8 version; /* version of this struct */ - u8 flags[2]; - u8 algo; - u8 form; - u8 pad1[3]; - u16 keylen; - u8 key[64]; /* the key (keylen bytes) */ - u16 keyattrlen; - u8 keyattr[32]; - u8 pad2[1]; - u8 vptype; - u8 vp[32]; /* verification pattern */ - } keyblock; - } lv3 __packed; - } *prepparm; - - /* get already prepared memory for 2 cprbs with param block each */ - rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk); - if (rc) - return rc; - - /* fill request cprb struct */ - preqcblk->domain = domain; - - /* fill request cprb param block with USK request */ - preqparm = (struct uskreqparm *) preqcblk->req_parmb; - memcpy(preqparm->subfunc_code, "US", 2); - preqparm->rule_array_len = sizeof(preqparm->rule_array_len); - preqparm->lv1.len = sizeof(struct lv1); - preqparm->lv1.attr_len = sizeof(struct lv1) - sizeof(preqparm->lv1.len); - preqparm->lv1.attr_flags = 0x0001; - preqparm->lv2.len = sizeof(struct lv2) + SECKEYBLOBSIZE; - preqparm->lv2.attr_len = sizeof(struct lv2) - - sizeof(preqparm->lv2.len) + SECKEYBLOBSIZE; - preqparm->lv2.attr_flags = 0x0000; - memcpy(preqparm->lv2.token, seckey->seckey, SECKEYBLOBSIZE); - preqcblk->req_parml = sizeof(struct uskreqparm) + SECKEYBLOBSIZE; - - /* fill xcrb struct */ - prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk); - - /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */ - rc = _zcrypt_send_cprb(&xcrb); - if (rc) { - DEBUG_ERR( - "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed with errno %d\n", - __func__, (int) cardnr, (int) domain, rc); - goto out; - } - - /* check response returncode and reasoncode */ - if (prepcblk->ccp_rtcode != 0) { - DEBUG_ERR( - "%s unwrap secure key failure, card response %d/%d\n", - __func__, - (int) prepcblk->ccp_rtcode, - (int) prepcblk->ccp_rscode); - rc = -EIO; - goto out; - } - if (prepcblk->ccp_rscode != 0) { - DEBUG_WARN( - "%s unwrap secure key warning, card response %d/%d\n", - __func__, - (int) prepcblk->ccp_rtcode, - (int) prepcblk->ccp_rscode); - } - - /* process response cprb param block */ - prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX); - prepparm = (struct uskrepparm *) prepcblk->rpl_parmb; - - /* check the returned keyblock */ - if (prepparm->lv3.keyblock.version != 0x01) { - DEBUG_ERR( - "%s reply param keyblock version mismatch 0x%02x != 0x01\n", - __func__, (int) prepparm->lv3.keyblock.version); - rc = -EIO; - goto out; - } - - /* copy the tanslated protected key */ - switch (prepparm->lv3.keyblock.keylen) { - case 16+32: - protkey->type = PKEY_KEYTYPE_AES_128; - break; - case 24+32: - protkey->type = PKEY_KEYTYPE_AES_192; - break; - case 32+32: - protkey->type = PKEY_KEYTYPE_AES_256; - break; - default: - DEBUG_ERR("%s unknown/unsupported keytype %d\n", - __func__, prepparm->lv3.keyblock.keylen); - rc = -EIO; - goto out; - } - protkey->len = prepparm->lv3.keyblock.keylen; - memcpy(protkey->protkey, prepparm->lv3.keyblock.key, protkey->len); - -out: - free_cprbmem(mem, PARMBSIZE, 0); - return rc; -} -EXPORT_SYMBOL(pkey_sec2protkey); +/* inside view of a clear key token (type 0x00 version 0x02) */ +struct clearaeskeytoken { + u8 type; /* 0x00 for PAES specific key tokens */ + u8 res0[3]; + u8 version; /* 0x02 for clear AES key token */ + u8 res1[3]; + u32 keytype; /* key type, one of the PKEY_KEYTYPE values */ + u32 len; /* bytes actually stored in clearkey[] */ + u8 clearkey[0]; /* clear key value */ +} __packed; /* * Create a protected key from a clear key value. */ -int pkey_clr2protkey(u32 keytype, - const struct pkey_clrkey *clrkey, - struct pkey_protkey *protkey) +static int pkey_clr2protkey(u32 keytype, + const struct pkey_clrkey *clrkey, + struct pkey_protkey *protkey) { long fc; int keysize; @@ -707,363 +137,132 @@ int pkey_clr2protkey(u32 keytype, return 0; } -EXPORT_SYMBOL(pkey_clr2protkey); /* - * query cryptographic facility from adapter - */ -static int query_crypto_facility(u16 cardnr, u16 domain, - const char *keyword, - u8 *rarray, size_t *rarraylen, - u8 *varray, size_t *varraylen) -{ - int rc; - u16 len; - u8 *mem, *ptr; - struct CPRBX *preqcblk, *prepcblk; - struct ica_xcRB xcrb; - struct fqreqparm { - u8 subfunc_code[2]; - u16 rule_array_len; - char rule_array[8]; - struct lv1 { - u16 len; - u8 data[VARDATASIZE]; - } lv1; - u16 dummylen; - } *preqparm; - size_t parmbsize = sizeof(struct fqreqparm); - struct fqrepparm { - u8 subfunc_code[2]; - u8 lvdata[0]; - } *prepparm; - - /* get already prepared memory for 2 cprbs with param block each */ - rc = alloc_and_prep_cprbmem(parmbsize, &mem, &preqcblk, &prepcblk); - if (rc) - return rc; - - /* fill request cprb struct */ - preqcblk->domain = domain; - - /* fill request cprb param block with FQ request */ - preqparm = (struct fqreqparm *) preqcblk->req_parmb; - memcpy(preqparm->subfunc_code, "FQ", 2); - memcpy(preqparm->rule_array, keyword, sizeof(preqparm->rule_array)); - preqparm->rule_array_len = - sizeof(preqparm->rule_array_len) + sizeof(preqparm->rule_array); - preqparm->lv1.len = sizeof(preqparm->lv1); - preqparm->dummylen = sizeof(preqparm->dummylen); - preqcblk->req_parml = parmbsize; - - /* fill xcrb struct */ - prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk); - - /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */ - rc = _zcrypt_send_cprb(&xcrb); - if (rc) { - DEBUG_ERR( - "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed with errno %d\n", - __func__, (int) cardnr, (int) domain, rc); - goto out; - } - - /* check response returncode and reasoncode */ - if (prepcblk->ccp_rtcode != 0) { - DEBUG_ERR( - "%s unwrap secure key failure, card response %d/%d\n", - __func__, - (int) prepcblk->ccp_rtcode, - (int) prepcblk->ccp_rscode); - rc = -EIO; - goto out; - } - - /* process response cprb param block */ - prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX); - prepparm = (struct fqrepparm *) prepcblk->rpl_parmb; - ptr = prepparm->lvdata; - - /* check and possibly copy reply rule array */ - len = *((u16 *) ptr); - if (len > sizeof(u16)) { - ptr += sizeof(u16); - len -= sizeof(u16); - if (rarray && rarraylen && *rarraylen > 0) { - *rarraylen = (len > *rarraylen ? *rarraylen : len); - memcpy(rarray, ptr, *rarraylen); - } - ptr += len; - } - /* check and possible copy reply var array */ - len = *((u16 *) ptr); - if (len > sizeof(u16)) { - ptr += sizeof(u16); - len -= sizeof(u16); - if (varray && varraylen && *varraylen > 0) { - *varraylen = (len > *varraylen ? *varraylen : len); - memcpy(varray, ptr, *varraylen); - } - ptr += len; - } - -out: - free_cprbmem(mem, parmbsize, 0); - return rc; -} - -/* - * Fetch the current and old mkvp values via - * query_crypto_facility from adapter. + * Find card and transform secure key into protected key. */ -static int fetch_mkvp(u16 cardnr, u16 domain, u64 mkvp[2]) +static int pkey_skey2pkey(const u8 *key, struct pkey_protkey *pkey) { - int rc, found = 0; - size_t rlen, vlen; - u8 *rarray, *varray, *pg; - - pg = (u8 *) __get_free_page(GFP_KERNEL); - if (!pg) - return -ENOMEM; - rarray = pg; - varray = pg + PAGE_SIZE/2; - rlen = vlen = PAGE_SIZE/2; - - rc = query_crypto_facility(cardnr, domain, "STATICSA", - rarray, &rlen, varray, &vlen); - if (rc == 0 && rlen > 8*8 && vlen > 184+8) { - if (rarray[8*8] == '2') { - /* current master key state is valid */ - mkvp[0] = *((u64 *)(varray + 184)); - mkvp[1] = *((u64 *)(varray + 172)); - found = 1; - } - } - - free_page((unsigned long) pg); - - return found ? 0 : -ENOENT; -} - -/* struct to hold cached mkvp info for each card/domain */ -struct mkvp_info { - struct list_head list; - u16 cardnr; - u16 domain; - u64 mkvp[2]; -}; - -/* a list with mkvp_info entries */ -static LIST_HEAD(mkvp_list); -static DEFINE_SPINLOCK(mkvp_list_lock); + int rc, verify; + u16 cardnr, domain; + struct keytoken_header *hdr = (struct keytoken_header *)key; -static int mkvp_cache_fetch(u16 cardnr, u16 domain, u64 mkvp[2]) -{ - int rc = -ENOENT; - struct mkvp_info *ptr; - - spin_lock_bh(&mkvp_list_lock); - list_for_each_entry(ptr, &mkvp_list, list) { - if (ptr->cardnr == cardnr && - ptr->domain == domain) { - memcpy(mkvp, ptr->mkvp, 2 * sizeof(u64)); - rc = 0; + /* + * The cca_xxx2protkey call may fail when a card has been + * addressed where the master key was changed after last fetch + * of the mkvp into the cache. Try 3 times: First witout verify + * then with verify and last round with verify and old master + * key verification pattern match not ignored. + */ + for (verify = 0; verify < 3; verify++) { + rc = cca_findcard(key, &cardnr, &domain, verify); + if (rc < 0) + continue; + if (rc > 0 && verify < 2) + continue; + switch (hdr->version) { + case TOKVER_CCA_AES: + rc = cca_sec2protkey(cardnr, domain, + key, pkey->protkey, + &pkey->len, &pkey->type); break; - } - } - spin_unlock_bh(&mkvp_list_lock); - - return rc; -} - -static void mkvp_cache_update(u16 cardnr, u16 domain, u64 mkvp[2]) -{ - int found = 0; - struct mkvp_info *ptr; - - spin_lock_bh(&mkvp_list_lock); - list_for_each_entry(ptr, &mkvp_list, list) { - if (ptr->cardnr == cardnr && - ptr->domain == domain) { - memcpy(ptr->mkvp, mkvp, 2 * sizeof(u64)); - found = 1; + case TOKVER_CCA_VLSC: + rc = cca_cipher2protkey(cardnr, domain, + key, pkey->protkey, + &pkey->len, &pkey->type); break; + default: + return -EINVAL; } - } - if (!found) { - ptr = kmalloc(sizeof(*ptr), GFP_ATOMIC); - if (!ptr) { - spin_unlock_bh(&mkvp_list_lock); - return; - } - ptr->cardnr = cardnr; - ptr->domain = domain; - memcpy(ptr->mkvp, mkvp, 2 * sizeof(u64)); - list_add(&ptr->list, &mkvp_list); - } - spin_unlock_bh(&mkvp_list_lock); -} - -static void mkvp_cache_scrub(u16 cardnr, u16 domain) -{ - struct mkvp_info *ptr; - - spin_lock_bh(&mkvp_list_lock); - list_for_each_entry(ptr, &mkvp_list, list) { - if (ptr->cardnr == cardnr && - ptr->domain == domain) { - list_del(&ptr->list); - kfree(ptr); + if (rc == 0) break; - } } - spin_unlock_bh(&mkvp_list_lock); -} -static void __exit mkvp_cache_free(void) -{ - struct mkvp_info *ptr, *pnext; + if (rc) + DEBUG_DBG("%s failed rc=%d\n", __func__, rc); - spin_lock_bh(&mkvp_list_lock); - list_for_each_entry_safe(ptr, pnext, &mkvp_list, list) { - list_del(&ptr->list); - kfree(ptr); - } - spin_unlock_bh(&mkvp_list_lock); + return rc; } /* - * Search for a matching crypto card based on the Master Key - * Verification Pattern provided inside a secure key. + * Construct EP11 key with given clear key value. */ -int pkey_findcard(const struct pkey_seckey *seckey, - u16 *pcardnr, u16 *pdomain, int verify) +static int pkey_clr2ep11key(const u8 *clrkey, size_t clrkeylen, + u8 *keybuf, size_t *keybuflen) { - struct secaeskeytoken *t = (struct secaeskeytoken *) seckey; - struct zcrypt_device_status_ext *device_status; + int i, rc; u16 card, dom; - u64 mkvp[2]; - int i, rc, oi = -1; + u32 nr_apqns, *apqns = NULL; - /* mkvp must not be zero */ - if (t->mkvp == 0) - return -EINVAL; + /* build a list of apqns suitable for ep11 keys with cpacf support */ + rc = ep11_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF, + ZCRYPT_CEX7, EP11_API_V, NULL); + if (rc) + goto out; - /* fetch status of all crypto cards */ - device_status = kmalloc_array(MAX_ZDEV_ENTRIES_EXT, - sizeof(struct zcrypt_device_status_ext), - GFP_KERNEL); - if (!device_status) - return -ENOMEM; - zcrypt_device_status_mask_ext(device_status); - - /* walk through all crypto cards */ - for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) { - card = AP_QID_CARD(device_status[i].qid); - dom = AP_QID_QUEUE(device_status[i].qid); - if (device_status[i].online && - device_status[i].functions & 0x04) { - /* an enabled CCA Coprocessor card */ - /* try cached mkvp */ - if (mkvp_cache_fetch(card, dom, mkvp) == 0 && - t->mkvp == mkvp[0]) { - if (!verify) - break; - /* verify: fetch mkvp from adapter */ - if (fetch_mkvp(card, dom, mkvp) == 0) { - mkvp_cache_update(card, dom, mkvp); - if (t->mkvp == mkvp[0]) - break; - } - } - } else { - /* Card is offline and/or not a CCA card. */ - /* del mkvp entry from cache if it exists */ - mkvp_cache_scrub(card, dom); - } - } - if (i >= MAX_ZDEV_ENTRIES_EXT) { - /* nothing found, so this time without cache */ - for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) { - if (!(device_status[i].online && - device_status[i].functions & 0x04)) - continue; - card = AP_QID_CARD(device_status[i].qid); - dom = AP_QID_QUEUE(device_status[i].qid); - /* fresh fetch mkvp from adapter */ - if (fetch_mkvp(card, dom, mkvp) == 0) { - mkvp_cache_update(card, dom, mkvp); - if (t->mkvp == mkvp[0]) - break; - if (t->mkvp == mkvp[1] && oi < 0) - oi = i; - } - } - if (i >= MAX_ZDEV_ENTRIES_EXT && oi >= 0) { - /* old mkvp matched, use this card then */ - card = AP_QID_CARD(device_status[oi].qid); - dom = AP_QID_QUEUE(device_status[oi].qid); - } + /* go through the list of apqns and try to bild an ep11 key */ + for (rc = -ENODEV, i = 0; i < nr_apqns; i++) { + card = apqns[i] >> 16; + dom = apqns[i] & 0xFFFF; + rc = ep11_clr2keyblob(card, dom, clrkeylen * 8, + 0, clrkey, keybuf, keybuflen); + if (rc == 0) + break; } - if (i < MAX_ZDEV_ENTRIES_EXT || oi >= 0) { - if (pcardnr) - *pcardnr = card; - if (pdomain) - *pdomain = dom; - rc = 0; - } else - rc = -ENODEV; - kfree(device_status); +out: + kfree(apqns); + if (rc) + DEBUG_DBG("%s failed rc=%d\n", __func__, rc); return rc; } -EXPORT_SYMBOL(pkey_findcard); /* - * Find card and transform secure key into protected key. + * Find card and transform EP11 secure key into protected key. */ -int pkey_skey2pkey(const struct pkey_seckey *seckey, - struct pkey_protkey *protkey) +static int pkey_ep11key2pkey(const u8 *key, struct pkey_protkey *pkey) { - u16 cardnr, domain; - int rc, verify; + int i, rc; + u16 card, dom; + u32 nr_apqns, *apqns = NULL; + struct ep11keyblob *kb = (struct ep11keyblob *) key; - /* - * The pkey_sec2protkey call may fail when a card has been - * addressed where the master key was changed after last fetch - * of the mkvp into the cache. So first try without verify then - * with verify enabled (thus refreshing the mkvp for each card). - */ - for (verify = 0; verify < 2; verify++) { - rc = pkey_findcard(seckey, &cardnr, &domain, verify); - if (rc) - continue; - rc = pkey_sec2protkey(cardnr, domain, seckey, protkey); + /* build a list of apqns suitable for this key */ + rc = ep11_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF, + ZCRYPT_CEX7, EP11_API_V, kb->wkvp); + if (rc) + goto out; + + /* go through the list of apqns and try to derive an pkey */ + for (rc = -ENODEV, i = 0; i < nr_apqns; i++) { + card = apqns[i] >> 16; + dom = apqns[i] & 0xFFFF; + rc = ep11_key2protkey(card, dom, key, kb->head.len, + pkey->protkey, &pkey->len, &pkey->type); if (rc == 0) break; } +out: + kfree(apqns); if (rc) DEBUG_DBG("%s failed rc=%d\n", __func__, rc); - return rc; } -EXPORT_SYMBOL(pkey_skey2pkey); /* * Verify key and give back some info about the key. */ -int pkey_verifykey(const struct pkey_seckey *seckey, - u16 *pcardnr, u16 *pdomain, - u16 *pkeysize, u32 *pattributes) +static int pkey_verifykey(const struct pkey_seckey *seckey, + u16 *pcardnr, u16 *pdomain, + u16 *pkeysize, u32 *pattributes) { struct secaeskeytoken *t = (struct secaeskeytoken *) seckey; u16 cardnr, domain; - u64 mkvp[2]; int rc; /* check the secure key for valid AES secure key */ - rc = check_secaeskeytoken((u8 *) seckey, 0); + rc = cca_check_secaeskeytoken(debug_info, 3, (u8 *) seckey, 0); if (rc) goto out; if (pattributes) @@ -1072,18 +271,16 @@ int pkey_verifykey(const struct pkey_seckey *seckey, *pkeysize = t->bitsize; /* try to find a card which can handle this key */ - rc = pkey_findcard(seckey, &cardnr, &domain, 1); - if (rc) + rc = cca_findcard(seckey->seckey, &cardnr, &domain, 1); + if (rc < 0) goto out; - /* check mkvp for old mkvp match */ - rc = mkvp_cache_fetch(cardnr, domain, mkvp); - if (rc) - goto out; - if (t->mkvp == mkvp[1] && t->mkvp != mkvp[0]) { + if (rc > 0) { + /* key mkvp matches to old master key mkvp */ DEBUG_DBG("%s secure key has old mkvp\n", __func__); if (pattributes) *pattributes |= PKEY_VERIFY_ATTR_OLD_MKVP; + rc = 0; } if (pcardnr) @@ -1095,12 +292,11 @@ out: DEBUG_DBG("%s rc=%d\n", __func__, rc); return rc; } -EXPORT_SYMBOL(pkey_verifykey); /* * Generate a random protected key */ -int pkey_genprotkey(__u32 keytype, struct pkey_protkey *protkey) +static int pkey_genprotkey(u32 keytype, struct pkey_protkey *protkey) { struct pkey_clrkey clrkey; int keysize; @@ -1135,12 +331,11 @@ int pkey_genprotkey(__u32 keytype, struct pkey_protkey *protkey) return 0; } -EXPORT_SYMBOL(pkey_genprotkey); /* * Verify if a protected key is still valid */ -int pkey_verifyprotkey(const struct pkey_protkey *protkey) +static int pkey_verifyprotkey(const struct pkey_protkey *protkey) { unsigned long fc; struct { @@ -1181,40 +376,103 @@ int pkey_verifyprotkey(const struct pkey_protkey *protkey) return 0; } -EXPORT_SYMBOL(pkey_verifyprotkey); /* * Transform a non-CCA key token into a protected key */ -static int pkey_nonccatok2pkey(const __u8 *key, __u32 keylen, +static int pkey_nonccatok2pkey(const u8 *key, u32 keylen, struct pkey_protkey *protkey) { + int rc = -EINVAL; + u8 *tmpbuf = NULL; struct keytoken_header *hdr = (struct keytoken_header *)key; - struct protaeskeytoken *t; switch (hdr->version) { - case TOKVER_PROTECTED_KEY: - if (keylen != sizeof(struct protaeskeytoken)) - return -EINVAL; + case TOKVER_PROTECTED_KEY: { + struct protaeskeytoken *t; + if (keylen != sizeof(struct protaeskeytoken)) + goto out; t = (struct protaeskeytoken *)key; protkey->len = t->len; protkey->type = t->keytype; memcpy(protkey->protkey, t->protkey, sizeof(protkey->protkey)); - - return pkey_verifyprotkey(protkey); + rc = pkey_verifyprotkey(protkey); + break; + } + case TOKVER_CLEAR_KEY: { + struct clearaeskeytoken *t; + struct pkey_clrkey ckey; + union u_tmpbuf { + u8 skey[SECKEYBLOBSIZE]; + u8 ep11key[MAXEP11AESKEYBLOBSIZE]; + }; + size_t tmpbuflen = sizeof(union u_tmpbuf); + + if (keylen < sizeof(struct clearaeskeytoken)) + goto out; + t = (struct clearaeskeytoken *)key; + if (keylen != sizeof(*t) + t->len) + goto out; + if ((t->keytype == PKEY_KEYTYPE_AES_128 && t->len == 16) + || (t->keytype == PKEY_KEYTYPE_AES_192 && t->len == 24) + || (t->keytype == PKEY_KEYTYPE_AES_256 && t->len == 32)) + memcpy(ckey.clrkey, t->clearkey, t->len); + else + goto out; + /* alloc temp key buffer space */ + tmpbuf = kmalloc(tmpbuflen, GFP_ATOMIC); + if (!tmpbuf) { + rc = -ENOMEM; + goto out; + } + /* try direct way with the PCKMO instruction */ + rc = pkey_clr2protkey(t->keytype, &ckey, protkey); + if (rc == 0) + break; + /* PCKMO failed, so try the CCA secure key way */ + rc = cca_clr2seckey(0xFFFF, 0xFFFF, t->keytype, + ckey.clrkey, tmpbuf); + if (rc == 0) + rc = pkey_skey2pkey(tmpbuf, protkey); + if (rc == 0) + break; + /* if the CCA way also failed, let's try via EP11 */ + rc = pkey_clr2ep11key(ckey.clrkey, t->len, + tmpbuf, &tmpbuflen); + if (rc == 0) + rc = pkey_ep11key2pkey(tmpbuf, protkey); + /* now we should really have an protected key */ + DEBUG_ERR("%s unable to build protected key from clear", + __func__); + break; + } + case TOKVER_EP11_AES: { + if (keylen < MINEP11AESKEYBLOBSIZE) + goto out; + /* check ep11 key for exportable as protected key */ + rc = ep11_check_aeskeyblob(debug_info, 3, key, 0, 1); + if (rc) + goto out; + rc = pkey_ep11key2pkey(key, protkey); + break; + } default: DEBUG_ERR("%s unknown/unsupported non-CCA token version %d\n", __func__, hdr->version); - return -EINVAL; + rc = -EINVAL; } + +out: + kfree(tmpbuf); + return rc; } /* * Transform a CCA internal key token into a protected key */ -static int pkey_ccainttok2pkey(const __u8 *key, __u32 keylen, +static int pkey_ccainttok2pkey(const u8 *key, u32 keylen, struct pkey_protkey *protkey) { struct keytoken_header *hdr = (struct keytoken_header *)key; @@ -1223,44 +481,474 @@ static int pkey_ccainttok2pkey(const __u8 *key, __u32 keylen, case TOKVER_CCA_AES: if (keylen != sizeof(struct secaeskeytoken)) return -EINVAL; - - return pkey_skey2pkey((struct pkey_seckey *)key, - protkey); + break; + case TOKVER_CCA_VLSC: + if (keylen < hdr->len || keylen > MAXCCAVLSCTOKENSIZE) + return -EINVAL; + break; default: DEBUG_ERR("%s unknown/unsupported CCA internal token version %d\n", __func__, hdr->version); return -EINVAL; } + + return pkey_skey2pkey(key, protkey); } /* * Transform a key blob (of any type) into a protected key */ -int pkey_keyblob2pkey(const __u8 *key, __u32 keylen, +int pkey_keyblob2pkey(const u8 *key, u32 keylen, struct pkey_protkey *protkey) { + int rc; struct keytoken_header *hdr = (struct keytoken_header *)key; - if (keylen < sizeof(struct keytoken_header)) + if (keylen < sizeof(struct keytoken_header)) { + DEBUG_ERR("%s invalid keylen %d\n", __func__, keylen); return -EINVAL; + } switch (hdr->type) { case TOKTYPE_NON_CCA: - return pkey_nonccatok2pkey(key, keylen, protkey); + rc = pkey_nonccatok2pkey(key, keylen, protkey); + break; case TOKTYPE_CCA_INTERNAL: - return pkey_ccainttok2pkey(key, keylen, protkey); + rc = pkey_ccainttok2pkey(key, keylen, protkey); + break; default: - DEBUG_ERR("%s unknown/unsupported blob type %d\n", __func__, - hdr->type); + DEBUG_ERR("%s unknown/unsupported blob type %d\n", + __func__, hdr->type); return -EINVAL; } + + DEBUG_DBG("%s rc=%d\n", __func__, rc); + return rc; + } EXPORT_SYMBOL(pkey_keyblob2pkey); +static int pkey_genseckey2(const struct pkey_apqn *apqns, size_t nr_apqns, + enum pkey_key_type ktype, enum pkey_key_size ksize, + u32 kflags, u8 *keybuf, size_t *keybufsize) +{ + int i, card, dom, rc; + + /* check for at least one apqn given */ + if (!apqns || !nr_apqns) + return -EINVAL; + + /* check key type and size */ + switch (ktype) { + case PKEY_TYPE_CCA_DATA: + case PKEY_TYPE_CCA_CIPHER: + if (*keybufsize < SECKEYBLOBSIZE) + return -EINVAL; + break; + case PKEY_TYPE_EP11: + if (*keybufsize < MINEP11AESKEYBLOBSIZE) + return -EINVAL; + break; + default: + return -EINVAL; + } + switch (ksize) { + case PKEY_SIZE_AES_128: + case PKEY_SIZE_AES_192: + case PKEY_SIZE_AES_256: + break; + default: + return -EINVAL; + } + + /* simple try all apqns from the list */ + for (i = 0, rc = -ENODEV; i < nr_apqns; i++) { + card = apqns[i].card; + dom = apqns[i].domain; + if (ktype == PKEY_TYPE_EP11) { + rc = ep11_genaeskey(card, dom, ksize, kflags, + keybuf, keybufsize); + } else if (ktype == PKEY_TYPE_CCA_DATA) { + rc = cca_genseckey(card, dom, ksize, keybuf); + *keybufsize = (rc ? 0 : SECKEYBLOBSIZE); + } else /* TOKVER_CCA_VLSC */ + rc = cca_gencipherkey(card, dom, ksize, kflags, + keybuf, keybufsize); + if (rc == 0) + break; + } + + return rc; +} + +static int pkey_clr2seckey2(const struct pkey_apqn *apqns, size_t nr_apqns, + enum pkey_key_type ktype, enum pkey_key_size ksize, + u32 kflags, const u8 *clrkey, + u8 *keybuf, size_t *keybufsize) +{ + int i, card, dom, rc; + + /* check for at least one apqn given */ + if (!apqns || !nr_apqns) + return -EINVAL; + + /* check key type and size */ + switch (ktype) { + case PKEY_TYPE_CCA_DATA: + case PKEY_TYPE_CCA_CIPHER: + if (*keybufsize < SECKEYBLOBSIZE) + return -EINVAL; + break; + case PKEY_TYPE_EP11: + if (*keybufsize < MINEP11AESKEYBLOBSIZE) + return -EINVAL; + break; + default: + return -EINVAL; + } + switch (ksize) { + case PKEY_SIZE_AES_128: + case PKEY_SIZE_AES_192: + case PKEY_SIZE_AES_256: + break; + default: + return -EINVAL; + } + + /* simple try all apqns from the list */ + for (i = 0, rc = -ENODEV; i < nr_apqns; i++) { + card = apqns[i].card; + dom = apqns[i].domain; + if (ktype == PKEY_TYPE_EP11) { + rc = ep11_clr2keyblob(card, dom, ksize, kflags, + clrkey, keybuf, keybufsize); + } else if (ktype == PKEY_TYPE_CCA_DATA) { + rc = cca_clr2seckey(card, dom, ksize, + clrkey, keybuf); + *keybufsize = (rc ? 0 : SECKEYBLOBSIZE); + } else /* TOKVER_CCA_VLSC */ + rc = cca_clr2cipherkey(card, dom, ksize, kflags, + clrkey, keybuf, keybufsize); + if (rc == 0) + break; + } + + return rc; +} + +static int pkey_verifykey2(const u8 *key, size_t keylen, + u16 *cardnr, u16 *domain, + enum pkey_key_type *ktype, + enum pkey_key_size *ksize, u32 *flags) +{ + int rc; + u32 _nr_apqns, *_apqns = NULL; + struct keytoken_header *hdr = (struct keytoken_header *)key; + + if (keylen < sizeof(struct keytoken_header)) + return -EINVAL; + + if (hdr->type == TOKTYPE_CCA_INTERNAL + && hdr->version == TOKVER_CCA_AES) { + struct secaeskeytoken *t = (struct secaeskeytoken *)key; + + rc = cca_check_secaeskeytoken(debug_info, 3, key, 0); + if (rc) + goto out; + if (ktype) + *ktype = PKEY_TYPE_CCA_DATA; + if (ksize) + *ksize = (enum pkey_key_size) t->bitsize; + + rc = cca_findcard2(&_apqns, &_nr_apqns, *cardnr, *domain, + ZCRYPT_CEX3C, t->mkvp, 0, 1); + if (rc == 0 && flags) + *flags = PKEY_FLAGS_MATCH_CUR_MKVP; + if (rc == -ENODEV) { + rc = cca_findcard2(&_apqns, &_nr_apqns, + *cardnr, *domain, + ZCRYPT_CEX3C, 0, t->mkvp, 1); + if (rc == 0 && flags) + *flags = PKEY_FLAGS_MATCH_ALT_MKVP; + } + if (rc) + goto out; + + *cardnr = ((struct pkey_apqn *)_apqns)->card; + *domain = ((struct pkey_apqn *)_apqns)->domain; + + } else if (hdr->type == TOKTYPE_CCA_INTERNAL + && hdr->version == TOKVER_CCA_VLSC) { + struct cipherkeytoken *t = (struct cipherkeytoken *)key; + + rc = cca_check_secaescipherkey(debug_info, 3, key, 0, 1); + if (rc) + goto out; + if (ktype) + *ktype = PKEY_TYPE_CCA_CIPHER; + if (ksize) { + *ksize = PKEY_SIZE_UNKNOWN; + if (!t->plfver && t->wpllen == 512) + *ksize = PKEY_SIZE_AES_128; + else if (!t->plfver && t->wpllen == 576) + *ksize = PKEY_SIZE_AES_192; + else if (!t->plfver && t->wpllen == 640) + *ksize = PKEY_SIZE_AES_256; + } + + rc = cca_findcard2(&_apqns, &_nr_apqns, *cardnr, *domain, + ZCRYPT_CEX6, t->mkvp0, 0, 1); + if (rc == 0 && flags) + *flags = PKEY_FLAGS_MATCH_CUR_MKVP; + if (rc == -ENODEV) { + rc = cca_findcard2(&_apqns, &_nr_apqns, + *cardnr, *domain, + ZCRYPT_CEX6, 0, t->mkvp0, 1); + if (rc == 0 && flags) + *flags = PKEY_FLAGS_MATCH_ALT_MKVP; + } + if (rc) + goto out; + + *cardnr = ((struct pkey_apqn *)_apqns)->card; + *domain = ((struct pkey_apqn *)_apqns)->domain; + + } else if (hdr->type == TOKTYPE_NON_CCA + && hdr->version == TOKVER_EP11_AES) { + struct ep11keyblob *kb = (struct ep11keyblob *)key; + + rc = ep11_check_aeskeyblob(debug_info, 3, key, 0, 1); + if (rc) + goto out; + if (ktype) + *ktype = PKEY_TYPE_EP11; + if (ksize) + *ksize = kb->head.keybitlen; + + rc = ep11_findcard2(&_apqns, &_nr_apqns, *cardnr, *domain, + ZCRYPT_CEX7, EP11_API_V, kb->wkvp); + if (rc) + goto out; + + if (flags) + *flags = PKEY_FLAGS_MATCH_CUR_MKVP; + + *cardnr = ((struct pkey_apqn *)_apqns)->card; + *domain = ((struct pkey_apqn *)_apqns)->domain; + + } else + rc = -EINVAL; + +out: + kfree(_apqns); + return rc; +} + +static int pkey_keyblob2pkey2(const struct pkey_apqn *apqns, size_t nr_apqns, + const u8 *key, size_t keylen, + struct pkey_protkey *pkey) +{ + int i, card, dom, rc; + struct keytoken_header *hdr = (struct keytoken_header *)key; + + /* check for at least one apqn given */ + if (!apqns || !nr_apqns) + return -EINVAL; + + if (keylen < sizeof(struct keytoken_header)) + return -EINVAL; + + if (hdr->type == TOKTYPE_CCA_INTERNAL) { + if (hdr->version == TOKVER_CCA_AES) { + if (keylen != sizeof(struct secaeskeytoken)) + return -EINVAL; + if (cca_check_secaeskeytoken(debug_info, 3, key, 0)) + return -EINVAL; + } else if (hdr->version == TOKVER_CCA_VLSC) { + if (keylen < hdr->len || keylen > MAXCCAVLSCTOKENSIZE) + return -EINVAL; + if (cca_check_secaescipherkey(debug_info, 3, key, 0, 1)) + return -EINVAL; + } else { + DEBUG_ERR("%s unknown CCA internal token version %d\n", + __func__, hdr->version); + return -EINVAL; + } + } else if (hdr->type == TOKTYPE_NON_CCA) { + if (hdr->version == TOKVER_EP11_AES) { + if (keylen < sizeof(struct ep11keyblob)) + return -EINVAL; + if (ep11_check_aeskeyblob(debug_info, 3, key, 0, 1)) + return -EINVAL; + } else { + return pkey_nonccatok2pkey(key, keylen, pkey); + } + } else { + DEBUG_ERR("%s unknown/unsupported blob type %d\n", + __func__, hdr->type); + return -EINVAL; + } + + /* simple try all apqns from the list */ + for (i = 0, rc = -ENODEV; i < nr_apqns; i++) { + card = apqns[i].card; + dom = apqns[i].domain; + if (hdr->type == TOKTYPE_CCA_INTERNAL + && hdr->version == TOKVER_CCA_AES) + rc = cca_sec2protkey(card, dom, key, pkey->protkey, + &pkey->len, &pkey->type); + else if (hdr->type == TOKTYPE_CCA_INTERNAL + && hdr->version == TOKVER_CCA_VLSC) + rc = cca_cipher2protkey(card, dom, key, pkey->protkey, + &pkey->len, &pkey->type); + else { /* EP11 AES secure key blob */ + struct ep11keyblob *kb = (struct ep11keyblob *) key; + + rc = ep11_key2protkey(card, dom, key, kb->head.len, + pkey->protkey, &pkey->len, + &pkey->type); + } + if (rc == 0) + break; + } + + return rc; +} + +static int pkey_apqns4key(const u8 *key, size_t keylen, u32 flags, + struct pkey_apqn *apqns, size_t *nr_apqns) +{ + int rc = EINVAL; + u32 _nr_apqns, *_apqns = NULL; + struct keytoken_header *hdr = (struct keytoken_header *)key; + + if (keylen < sizeof(struct keytoken_header) || flags == 0) + return -EINVAL; + + if (hdr->type == TOKTYPE_NON_CCA && hdr->version == TOKVER_EP11_AES) { + int minhwtype = 0, api = 0; + struct ep11keyblob *kb = (struct ep11keyblob *) key; + + if (flags != PKEY_FLAGS_MATCH_CUR_MKVP) + return -EINVAL; + if (kb->attr & EP11_BLOB_PKEY_EXTRACTABLE) { + minhwtype = ZCRYPT_CEX7; + api = EP11_API_V; + } + rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, + minhwtype, api, kb->wkvp); + if (rc) + goto out; + } else if (hdr->type == TOKTYPE_CCA_INTERNAL) { + int minhwtype = ZCRYPT_CEX3C; + u64 cur_mkvp = 0, old_mkvp = 0; + + if (hdr->version == TOKVER_CCA_AES) { + struct secaeskeytoken *t = (struct secaeskeytoken *)key; + + if (flags & PKEY_FLAGS_MATCH_CUR_MKVP) + cur_mkvp = t->mkvp; + if (flags & PKEY_FLAGS_MATCH_ALT_MKVP) + old_mkvp = t->mkvp; + } else if (hdr->version == TOKVER_CCA_VLSC) { + struct cipherkeytoken *t = (struct cipherkeytoken *)key; + + minhwtype = ZCRYPT_CEX6; + if (flags & PKEY_FLAGS_MATCH_CUR_MKVP) + cur_mkvp = t->mkvp0; + if (flags & PKEY_FLAGS_MATCH_ALT_MKVP) + old_mkvp = t->mkvp0; + } else { + /* unknown cca internal token type */ + return -EINVAL; + } + rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, + minhwtype, cur_mkvp, old_mkvp, 1); + if (rc) + goto out; + } else + return -EINVAL; + + if (apqns) { + if (*nr_apqns < _nr_apqns) + rc = -ENOSPC; + else + memcpy(apqns, _apqns, _nr_apqns * sizeof(u32)); + } + *nr_apqns = _nr_apqns; + +out: + kfree(_apqns); + return rc; +} + +static int pkey_apqns4keytype(enum pkey_key_type ktype, + u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags, + struct pkey_apqn *apqns, size_t *nr_apqns) +{ + int rc = -EINVAL; + u32 _nr_apqns, *_apqns = NULL; + + if (ktype == PKEY_TYPE_CCA_DATA || ktype == PKEY_TYPE_CCA_CIPHER) { + u64 cur_mkvp = 0, old_mkvp = 0; + int minhwtype = ZCRYPT_CEX3C; + + if (flags & PKEY_FLAGS_MATCH_CUR_MKVP) + cur_mkvp = *((u64 *) cur_mkvp); + if (flags & PKEY_FLAGS_MATCH_ALT_MKVP) + old_mkvp = *((u64 *) alt_mkvp); + if (ktype == PKEY_TYPE_CCA_CIPHER) + minhwtype = ZCRYPT_CEX6; + rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, + minhwtype, cur_mkvp, old_mkvp, 1); + if (rc) + goto out; + } else if (ktype == PKEY_TYPE_EP11) { + u8 *wkvp = NULL; + + if (flags & PKEY_FLAGS_MATCH_CUR_MKVP) + wkvp = cur_mkvp; + rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, + ZCRYPT_CEX7, EP11_API_V, wkvp); + if (rc) + goto out; + + } else + return -EINVAL; + + if (apqns) { + if (*nr_apqns < _nr_apqns) + rc = -ENOSPC; + else + memcpy(apqns, _apqns, _nr_apqns * sizeof(u32)); + } + *nr_apqns = _nr_apqns; + +out: + kfree(_apqns); + return rc; +} + /* * File io functions */ +static void *_copy_key_from_user(void __user *ukey, size_t keylen) +{ + if (!ukey || keylen < MINKEYBLOBSIZE || keylen > KEYBLOBBUFSIZE) + return ERR_PTR(-EINVAL); + + return memdup_user(ukey, keylen); +} + +static void *_copy_apqns_from_user(void __user *uapqns, size_t nr_apqns) +{ + if (!uapqns || nr_apqns == 0) + return NULL; + + return memdup_user(uapqns, nr_apqns * sizeof(struct pkey_apqn)); +} + static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { @@ -1273,9 +961,9 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, if (copy_from_user(&kgs, ugs, sizeof(kgs))) return -EFAULT; - rc = pkey_genseckey(kgs.cardnr, kgs.domain, - kgs.keytype, &kgs.seckey); - DEBUG_DBG("%s pkey_genseckey()=%d\n", __func__, rc); + rc = cca_genseckey(kgs.cardnr, kgs.domain, + kgs.keytype, kgs.seckey.seckey); + DEBUG_DBG("%s cca_genseckey()=%d\n", __func__, rc); if (rc) break; if (copy_to_user(ugs, &kgs, sizeof(kgs))) @@ -1288,9 +976,9 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, if (copy_from_user(&kcs, ucs, sizeof(kcs))) return -EFAULT; - rc = pkey_clr2seckey(kcs.cardnr, kcs.domain, kcs.keytype, - &kcs.clrkey, &kcs.seckey); - DEBUG_DBG("%s pkey_clr2seckey()=%d\n", __func__, rc); + rc = cca_clr2seckey(kcs.cardnr, kcs.domain, kcs.keytype, + kcs.clrkey.clrkey, kcs.seckey.seckey); + DEBUG_DBG("%s cca_clr2seckey()=%d\n", __func__, rc); if (rc) break; if (copy_to_user(ucs, &kcs, sizeof(kcs))) @@ -1304,9 +992,10 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, if (copy_from_user(&ksp, usp, sizeof(ksp))) return -EFAULT; - rc = pkey_sec2protkey(ksp.cardnr, ksp.domain, - &ksp.seckey, &ksp.protkey); - DEBUG_DBG("%s pkey_sec2protkey()=%d\n", __func__, rc); + rc = cca_sec2protkey(ksp.cardnr, ksp.domain, + ksp.seckey.seckey, ksp.protkey.protkey, + &ksp.protkey.len, &ksp.protkey.type); + DEBUG_DBG("%s cca_sec2protkey()=%d\n", __func__, rc); if (rc) break; if (copy_to_user(usp, &ksp, sizeof(ksp))) @@ -1335,10 +1024,10 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, if (copy_from_user(&kfc, ufc, sizeof(kfc))) return -EFAULT; - rc = pkey_findcard(&kfc.seckey, - &kfc.cardnr, &kfc.domain, 1); - DEBUG_DBG("%s pkey_findcard()=%d\n", __func__, rc); - if (rc) + rc = cca_findcard(kfc.seckey.seckey, + &kfc.cardnr, &kfc.domain, 1); + DEBUG_DBG("%s cca_findcard()=%d\n", __func__, rc); + if (rc < 0) break; if (copy_to_user(ufc, &kfc, sizeof(kfc))) return -EFAULT; @@ -1350,7 +1039,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, if (copy_from_user(&ksp, usp, sizeof(ksp))) return -EFAULT; - rc = pkey_skey2pkey(&ksp.seckey, &ksp.protkey); + rc = pkey_skey2pkey(ksp.seckey.seckey, &ksp.protkey); DEBUG_DBG("%s pkey_skey2pkey()=%d\n", __func__, rc); if (rc) break; @@ -1400,24 +1089,148 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, case PKEY_KBLOB2PROTK: { struct pkey_kblob2pkey __user *utp = (void __user *) arg; struct pkey_kblob2pkey ktp; - __u8 __user *ukey; - __u8 *kkey; + u8 *kkey; if (copy_from_user(&ktp, utp, sizeof(ktp))) return -EFAULT; - if (ktp.keylen < MINKEYBLOBSIZE || - ktp.keylen > MAXKEYBLOBSIZE) - return -EINVAL; - ukey = ktp.key; - kkey = kmalloc(ktp.keylen, GFP_KERNEL); - if (kkey == NULL) + kkey = _copy_key_from_user(ktp.key, ktp.keylen); + if (IS_ERR(kkey)) + return PTR_ERR(kkey); + rc = pkey_keyblob2pkey(kkey, ktp.keylen, &ktp.protkey); + DEBUG_DBG("%s pkey_keyblob2pkey()=%d\n", __func__, rc); + kfree(kkey); + if (rc) + break; + if (copy_to_user(utp, &ktp, sizeof(ktp))) + return -EFAULT; + break; + } + case PKEY_GENSECK2: { + struct pkey_genseck2 __user *ugs = (void __user *) arg; + struct pkey_genseck2 kgs; + struct pkey_apqn *apqns; + size_t klen = KEYBLOBBUFSIZE; + u8 *kkey; + + if (copy_from_user(&kgs, ugs, sizeof(kgs))) + return -EFAULT; + apqns = _copy_apqns_from_user(kgs.apqns, kgs.apqn_entries); + if (IS_ERR(apqns)) + return PTR_ERR(apqns); + kkey = kmalloc(klen, GFP_KERNEL); + if (!kkey) { + kfree(apqns); + return -ENOMEM; + } + rc = pkey_genseckey2(apqns, kgs.apqn_entries, + kgs.type, kgs.size, kgs.keygenflags, + kkey, &klen); + DEBUG_DBG("%s pkey_genseckey2()=%d\n", __func__, rc); + kfree(apqns); + if (rc) { + kfree(kkey); + break; + } + if (kgs.key) { + if (kgs.keylen < klen) { + kfree(kkey); + return -EINVAL; + } + if (copy_to_user(kgs.key, kkey, klen)) { + kfree(kkey); + return -EFAULT; + } + } + kgs.keylen = klen; + if (copy_to_user(ugs, &kgs, sizeof(kgs))) + rc = -EFAULT; + kfree(kkey); + break; + } + case PKEY_CLR2SECK2: { + struct pkey_clr2seck2 __user *ucs = (void __user *) arg; + struct pkey_clr2seck2 kcs; + struct pkey_apqn *apqns; + size_t klen = KEYBLOBBUFSIZE; + u8 *kkey; + + if (copy_from_user(&kcs, ucs, sizeof(kcs))) + return -EFAULT; + apqns = _copy_apqns_from_user(kcs.apqns, kcs.apqn_entries); + if (IS_ERR(apqns)) + return PTR_ERR(apqns); + kkey = kmalloc(klen, GFP_KERNEL); + if (!kkey) { + kfree(apqns); return -ENOMEM; - if (copy_from_user(kkey, ukey, ktp.keylen)) { + } + rc = pkey_clr2seckey2(apqns, kcs.apqn_entries, + kcs.type, kcs.size, kcs.keygenflags, + kcs.clrkey.clrkey, kkey, &klen); + DEBUG_DBG("%s pkey_clr2seckey2()=%d\n", __func__, rc); + kfree(apqns); + if (rc) { kfree(kkey); + break; + } + if (kcs.key) { + if (kcs.keylen < klen) { + kfree(kkey); + return -EINVAL; + } + if (copy_to_user(kcs.key, kkey, klen)) { + kfree(kkey); + return -EFAULT; + } + } + kcs.keylen = klen; + if (copy_to_user(ucs, &kcs, sizeof(kcs))) + rc = -EFAULT; + memzero_explicit(&kcs, sizeof(kcs)); + kfree(kkey); + break; + } + case PKEY_VERIFYKEY2: { + struct pkey_verifykey2 __user *uvk = (void __user *) arg; + struct pkey_verifykey2 kvk; + u8 *kkey; + + if (copy_from_user(&kvk, uvk, sizeof(kvk))) return -EFAULT; + kkey = _copy_key_from_user(kvk.key, kvk.keylen); + if (IS_ERR(kkey)) + return PTR_ERR(kkey); + rc = pkey_verifykey2(kkey, kvk.keylen, + &kvk.cardnr, &kvk.domain, + &kvk.type, &kvk.size, &kvk.flags); + DEBUG_DBG("%s pkey_verifykey2()=%d\n", __func__, rc); + kfree(kkey); + if (rc) + break; + if (copy_to_user(uvk, &kvk, sizeof(kvk))) + return -EFAULT; + break; + } + case PKEY_KBLOB2PROTK2: { + struct pkey_kblob2pkey2 __user *utp = (void __user *) arg; + struct pkey_kblob2pkey2 ktp; + struct pkey_apqn *apqns = NULL; + u8 *kkey; + + if (copy_from_user(&ktp, utp, sizeof(ktp))) + return -EFAULT; + apqns = _copy_apqns_from_user(ktp.apqns, ktp.apqn_entries); + if (IS_ERR(apqns)) + return PTR_ERR(apqns); + kkey = _copy_key_from_user(ktp.key, ktp.keylen); + if (IS_ERR(kkey)) { + kfree(apqns); + return PTR_ERR(kkey); } - rc = pkey_keyblob2pkey(kkey, ktp.keylen, &ktp.protkey); - DEBUG_DBG("%s pkey_keyblob2pkey()=%d\n", __func__, rc); + rc = pkey_keyblob2pkey2(apqns, ktp.apqn_entries, + kkey, ktp.keylen, &ktp.protkey); + DEBUG_DBG("%s pkey_keyblob2pkey2()=%d\n", __func__, rc); + kfree(apqns); kfree(kkey); if (rc) break; @@ -1425,6 +1238,97 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, return -EFAULT; break; } + case PKEY_APQNS4K: { + struct pkey_apqns4key __user *uak = (void __user *) arg; + struct pkey_apqns4key kak; + struct pkey_apqn *apqns = NULL; + size_t nr_apqns, len; + u8 *kkey; + + if (copy_from_user(&kak, uak, sizeof(kak))) + return -EFAULT; + nr_apqns = kak.apqn_entries; + if (nr_apqns) { + apqns = kmalloc_array(nr_apqns, + sizeof(struct pkey_apqn), + GFP_KERNEL); + if (!apqns) + return -ENOMEM; + } + kkey = _copy_key_from_user(kak.key, kak.keylen); + if (IS_ERR(kkey)) { + kfree(apqns); + return PTR_ERR(kkey); + } + rc = pkey_apqns4key(kkey, kak.keylen, kak.flags, + apqns, &nr_apqns); + DEBUG_DBG("%s pkey_apqns4key()=%d\n", __func__, rc); + kfree(kkey); + if (rc && rc != -ENOSPC) { + kfree(apqns); + break; + } + if (!rc && kak.apqns) { + if (nr_apqns > kak.apqn_entries) { + kfree(apqns); + return -EINVAL; + } + len = nr_apqns * sizeof(struct pkey_apqn); + if (len) { + if (copy_to_user(kak.apqns, apqns, len)) { + kfree(apqns); + return -EFAULT; + } + } + } + kak.apqn_entries = nr_apqns; + if (copy_to_user(uak, &kak, sizeof(kak))) + rc = -EFAULT; + kfree(apqns); + break; + } + case PKEY_APQNS4KT: { + struct pkey_apqns4keytype __user *uat = (void __user *) arg; + struct pkey_apqns4keytype kat; + struct pkey_apqn *apqns = NULL; + size_t nr_apqns, len; + + if (copy_from_user(&kat, uat, sizeof(kat))) + return -EFAULT; + nr_apqns = kat.apqn_entries; + if (nr_apqns) { + apqns = kmalloc_array(nr_apqns, + sizeof(struct pkey_apqn), + GFP_KERNEL); + if (!apqns) + return -ENOMEM; + } + rc = pkey_apqns4keytype(kat.type, kat.cur_mkvp, kat.alt_mkvp, + kat.flags, apqns, &nr_apqns); + DEBUG_DBG("%s pkey_apqns4keytype()=%d\n", __func__, rc); + if (rc && rc != -ENOSPC) { + kfree(apqns); + break; + } + if (!rc && kat.apqns) { + if (nr_apqns > kat.apqn_entries) { + kfree(apqns); + return -EINVAL; + } + len = nr_apqns * sizeof(struct pkey_apqn); + if (len) { + if (copy_to_user(kat.apqns, apqns, len)) { + kfree(apqns); + return -EFAULT; + } + } + } + kat.apqn_entries = nr_apqns; + if (copy_to_user(uat, &kat, sizeof(kat))) + rc = -EFAULT; + kfree(apqns); + break; + } default: /* unknown/unsupported ioctl cmd */ return -ENOTTY; @@ -1567,6 +1471,7 @@ static ssize_t pkey_ccadata_aes_attr_read(u32 keytype, bool is_xts, char *buf, loff_t off, size_t count) { int rc; + struct pkey_seckey *seckey = (struct pkey_seckey *) buf; if (off != 0 || count < sizeof(struct secaeskeytoken)) return -EINVAL; @@ -1574,13 +1479,13 @@ static ssize_t pkey_ccadata_aes_attr_read(u32 keytype, bool is_xts, char *buf, if (count < 2 * sizeof(struct secaeskeytoken)) return -EINVAL; - rc = pkey_genseckey(-1, -1, keytype, (struct pkey_seckey *)buf); + rc = cca_genseckey(-1, -1, keytype, seckey->seckey); if (rc) return rc; if (is_xts) { - buf += sizeof(struct pkey_seckey); - rc = pkey_genseckey(-1, -1, keytype, (struct pkey_seckey *)buf); + seckey++; + rc = cca_genseckey(-1, -1, keytype, seckey->seckey); if (rc) return rc; @@ -1660,9 +1565,256 @@ static struct attribute_group ccadata_attr_group = { .bin_attrs = ccadata_attrs, }; +#define CCACIPHERTOKENSIZE (sizeof(struct cipherkeytoken) + 80) + +/* + * Sysfs attribute read function for all secure key ccacipher binary attributes. + * The implementation can not deal with partial reads, because a new random + * secure key blob is generated with each read. In case of partial reads + * (i.e. off != 0 or count < key blob size) -EINVAL is returned. + */ +static ssize_t pkey_ccacipher_aes_attr_read(enum pkey_key_size keybits, + bool is_xts, char *buf, loff_t off, + size_t count) +{ + int i, rc, card, dom; + u32 nr_apqns, *apqns = NULL; + size_t keysize = CCACIPHERTOKENSIZE; + + if (off != 0 || count < CCACIPHERTOKENSIZE) + return -EINVAL; + if (is_xts) + if (count < 2 * CCACIPHERTOKENSIZE) + return -EINVAL; + + /* build a list of apqns able to generate an cipher key */ + rc = cca_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF, + ZCRYPT_CEX6, 0, 0, 0); + if (rc) + return rc; + + memset(buf, 0, is_xts ? 2 * keysize : keysize); + + /* simple try all apqns from the list */ + for (i = 0, rc = -ENODEV; i < nr_apqns; i++) { + card = apqns[i] >> 16; + dom = apqns[i] & 0xFFFF; + rc = cca_gencipherkey(card, dom, keybits, 0, buf, &keysize); + if (rc == 0) + break; + } + if (rc) + return rc; + + if (is_xts) { + keysize = CCACIPHERTOKENSIZE; + buf += CCACIPHERTOKENSIZE; + rc = cca_gencipherkey(card, dom, keybits, 0, buf, &keysize); + if (rc == 0) + return 2 * CCACIPHERTOKENSIZE; + } + + return CCACIPHERTOKENSIZE; +} + +static ssize_t ccacipher_aes_128_read(struct file *filp, + struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, + size_t count) +{ + return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_128, false, buf, + off, count); +} + +static ssize_t ccacipher_aes_192_read(struct file *filp, + struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, + size_t count) +{ + return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_192, false, buf, + off, count); +} + +static ssize_t ccacipher_aes_256_read(struct file *filp, + struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, + size_t count) +{ + return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_256, false, buf, + off, count); +} + +static ssize_t ccacipher_aes_128_xts_read(struct file *filp, + struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, + size_t count) +{ + return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_128, true, buf, + off, count); +} + +static ssize_t ccacipher_aes_256_xts_read(struct file *filp, + struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, + size_t count) +{ + return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_256, true, buf, + off, count); +} + +static BIN_ATTR_RO(ccacipher_aes_128, CCACIPHERTOKENSIZE); +static BIN_ATTR_RO(ccacipher_aes_192, CCACIPHERTOKENSIZE); +static BIN_ATTR_RO(ccacipher_aes_256, CCACIPHERTOKENSIZE); +static BIN_ATTR_RO(ccacipher_aes_128_xts, 2 * CCACIPHERTOKENSIZE); +static BIN_ATTR_RO(ccacipher_aes_256_xts, 2 * CCACIPHERTOKENSIZE); + +static struct bin_attribute *ccacipher_attrs[] = { + &bin_attr_ccacipher_aes_128, + &bin_attr_ccacipher_aes_192, + &bin_attr_ccacipher_aes_256, + &bin_attr_ccacipher_aes_128_xts, + &bin_attr_ccacipher_aes_256_xts, + NULL +}; + +static struct attribute_group ccacipher_attr_group = { + .name = "ccacipher", + .bin_attrs = ccacipher_attrs, +}; + +/* + * Sysfs attribute read function for all ep11 aes key binary attributes. + * The implementation can not deal with partial reads, because a new random + * secure key blob is generated with each read. In case of partial reads + * (i.e. off != 0 or count < key blob size) -EINVAL is returned. + * This function and the sysfs attributes using it provide EP11 key blobs + * padded to the upper limit of MAXEP11AESKEYBLOBSIZE which is currently + * 320 bytes. + */ +static ssize_t pkey_ep11_aes_attr_read(enum pkey_key_size keybits, + bool is_xts, char *buf, loff_t off, + size_t count) +{ + int i, rc, card, dom; + u32 nr_apqns, *apqns = NULL; + size_t keysize = MAXEP11AESKEYBLOBSIZE; + + if (off != 0 || count < MAXEP11AESKEYBLOBSIZE) + return -EINVAL; + if (is_xts) + if (count < 2 * MAXEP11AESKEYBLOBSIZE) + return -EINVAL; + + /* build a list of apqns able to generate an cipher key */ + rc = ep11_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF, + ZCRYPT_CEX7, EP11_API_V, NULL); + if (rc) + return rc; + + memset(buf, 0, is_xts ? 2 * keysize : keysize); + + /* simple try all apqns from the list */ + for (i = 0, rc = -ENODEV; i < nr_apqns; i++) { + card = apqns[i] >> 16; + dom = apqns[i] & 0xFFFF; + rc = ep11_genaeskey(card, dom, keybits, 0, buf, &keysize); + if (rc == 0) + break; + } + if (rc) + return rc; + + if (is_xts) { + keysize = MAXEP11AESKEYBLOBSIZE; + buf += MAXEP11AESKEYBLOBSIZE; + rc = ep11_genaeskey(card, dom, keybits, 0, buf, &keysize); + if (rc == 0) + return 2 * MAXEP11AESKEYBLOBSIZE; + } + + return MAXEP11AESKEYBLOBSIZE; +} + +static ssize_t ep11_aes_128_read(struct file *filp, + struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, + size_t count) +{ + return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_128, false, buf, + off, count); +} + +static ssize_t ep11_aes_192_read(struct file *filp, + struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, + size_t count) +{ + return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_192, false, buf, + off, count); +} + +static ssize_t ep11_aes_256_read(struct file *filp, + struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, + size_t count) +{ + return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_256, false, buf, + off, count); +} + +static ssize_t ep11_aes_128_xts_read(struct file *filp, + struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, + size_t count) +{ + return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_128, true, buf, + off, count); +} + +static ssize_t ep11_aes_256_xts_read(struct file *filp, + struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, + size_t count) +{ + return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_256, true, buf, + off, count); +} + +static BIN_ATTR_RO(ep11_aes_128, MAXEP11AESKEYBLOBSIZE); +static BIN_ATTR_RO(ep11_aes_192, MAXEP11AESKEYBLOBSIZE); +static BIN_ATTR_RO(ep11_aes_256, MAXEP11AESKEYBLOBSIZE); +static BIN_ATTR_RO(ep11_aes_128_xts, 2 * MAXEP11AESKEYBLOBSIZE); +static BIN_ATTR_RO(ep11_aes_256_xts, 2 * MAXEP11AESKEYBLOBSIZE); + +static struct bin_attribute *ep11_attrs[] = { + &bin_attr_ep11_aes_128, + &bin_attr_ep11_aes_192, + &bin_attr_ep11_aes_256, + &bin_attr_ep11_aes_128_xts, + &bin_attr_ep11_aes_256_xts, + NULL +}; + +static struct attribute_group ep11_attr_group = { + .name = "ep11", + .bin_attrs = ep11_attrs, +}; + static const struct attribute_group *pkey_attr_groups[] = { &protkey_attr_group, &ccadata_attr_group, + &ccacipher_attr_group, + &ep11_attr_group, NULL, }; @@ -1716,7 +1868,6 @@ static int __init pkey_init(void) static void __exit pkey_exit(void) { misc_deregister(&pkey_dev); - mkvp_cache_free(); pkey_debug_exit(); } diff --git a/drivers/s390/crypto/vfio_ap_drv.c b/drivers/s390/crypto/vfio_ap_drv.c index 003662aa8060..be2520cc010b 100644 --- a/drivers/s390/crypto/vfio_ap_drv.c +++ b/drivers/s390/crypto/vfio_ap_drv.c @@ -36,6 +36,8 @@ static struct ap_device_id ap_queue_ids[] = { .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE }, { .dev_type = AP_DEVICE_TYPE_CEX6, .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE }, + { .dev_type = AP_DEVICE_TYPE_CEX7, + .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE }, { /* end of sibling */ }, }; diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c index 0604b49a4d32..5c0f53c6dde7 100644 --- a/drivers/s390/crypto/vfio_ap_ops.c +++ b/drivers/s390/crypto/vfio_ap_ops.c @@ -1143,7 +1143,7 @@ int vfio_ap_mdev_reset_queue(unsigned int apid, unsigned int apqi, msleep(20); status = ap_tapq(apqn, NULL); } - WARN_ON_ONCE(retry <= 0); + WARN_ON_ONCE(retry2 <= 0); return 0; case AP_RESPONSE_RESET_IN_PROGRESS: case AP_RESPONSE_BUSY: diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c index 1058b4b5cc1e..56a405dce8bc 100644 --- a/drivers/s390/crypto/zcrypt_api.c +++ b/drivers/s390/crypto/zcrypt_api.c @@ -35,6 +35,8 @@ #include "zcrypt_msgtype6.h" #include "zcrypt_msgtype50.h" +#include "zcrypt_ccamisc.h" +#include "zcrypt_ep11misc.h" /* * Module description. @@ -133,18 +135,6 @@ struct zcdn_device { static int zcdn_create(const char *name); static int zcdn_destroy(const char *name); -/* helper function, matches the name for find_zcdndev_by_name() */ -static int __match_zcdn_name(struct device *dev, const void *data) -{ - return strcmp(dev_name(dev), (const char *)data) == 0; -} - -/* helper function, matches the devt value for find_zcdndev_by_devt() */ -static int __match_zcdn_devt(struct device *dev, const void *data) -{ - return dev->devt == *((dev_t *) data); -} - /* * Find zcdn device by name. * Returns reference to the zcdn device which needs to be released @@ -152,10 +142,7 @@ static int __match_zcdn_devt(struct device *dev, const void *data) */ static inline struct zcdn_device *find_zcdndev_by_name(const char *name) { - struct device *dev = - class_find_device(zcrypt_class, NULL, - (void *) name, - __match_zcdn_name); + struct device *dev = class_find_device_by_name(zcrypt_class, name); return dev ? to_zcdn_dev(dev) : NULL; } @@ -167,10 +154,7 @@ static inline struct zcdn_device *find_zcdndev_by_name(const char *name) */ static inline struct zcdn_device *find_zcdndev_by_devt(dev_t devt) { - struct device *dev = - class_find_device(zcrypt_class, NULL, - (void *) &devt, - __match_zcdn_devt); + struct device *dev = class_find_device_by_devt(zcrypt_class, devt); return dev ? to_zcdn_dev(dev) : NULL; } @@ -539,8 +523,7 @@ static int zcrypt_release(struct inode *inode, struct file *filp) if (filp->f_inode->i_cdev == &zcrypt_cdev) { struct zcdn_device *zcdndev; - if (mutex_lock_interruptible(&ap_perms_mutex)) - return -ERESTARTSYS; + mutex_lock(&ap_perms_mutex); zcdndev = find_zcdndev_by_devt(filp->f_inode->i_rdev); mutex_unlock(&ap_perms_mutex); if (zcdndev) { @@ -623,8 +606,8 @@ static inline bool zcrypt_card_compare(struct zcrypt_card *zc, weight += atomic_read(&zc->load); pref_weight += atomic_read(&pref_zc->load); if (weight == pref_weight) - return atomic_read(&zc->card->total_request_count) > - atomic_read(&pref_zc->card->total_request_count); + return atomic64_read(&zc->card->total_request_count) > + atomic64_read(&pref_zc->card->total_request_count); return weight > pref_weight; } @@ -867,7 +850,7 @@ static long _zcrypt_send_cprb(struct ap_perms *perms, /* check if device is online and eligible */ if (!zq->online || !zq->ops->send_cprb || - (tdom != (unsigned short) AUTOSELECT && + (tdom != AUTOSEL_DOM && tdom != AP_QID_QUEUE(zq->queue->qid))) continue; /* check if device node has admission for this queue */ @@ -892,7 +875,7 @@ static long _zcrypt_send_cprb(struct ap_perms *perms, /* in case of auto select, provide the correct domain */ qid = pref_zq->queue->qid; - if (*domain == (unsigned short) AUTOSELECT) + if (*domain == AUTOSEL_DOM) *domain = AP_QID_QUEUE(qid); rc = pref_zq->ops->send_cprb(pref_zq, xcRB, &ap_msg); @@ -919,7 +902,7 @@ static bool is_desired_ep11_card(unsigned int dev_id, struct ep11_target_dev *targets) { while (target_num-- > 0) { - if (dev_id == targets->ap_id) + if (targets->ap_id == dev_id || targets->ap_id == AUTOSEL_AP) return true; targets++; } @@ -930,16 +913,19 @@ static bool is_desired_ep11_queue(unsigned int dev_qid, unsigned short target_num, struct ep11_target_dev *targets) { + int card = AP_QID_CARD(dev_qid), dom = AP_QID_QUEUE(dev_qid); + while (target_num-- > 0) { - if (AP_MKQID(targets->ap_id, targets->dom_id) == dev_qid) + if ((targets->ap_id == card || targets->ap_id == AUTOSEL_AP) && + (targets->dom_id == dom || targets->dom_id == AUTOSEL_DOM)) return true; targets++; } return false; } -static long zcrypt_send_ep11_cprb(struct ap_perms *perms, - struct ep11_urb *xcrb) +static long _zcrypt_send_ep11_cprb(struct ap_perms *perms, + struct ep11_urb *xcrb) { struct zcrypt_card *zc, *pref_zc; struct zcrypt_queue *zq, *pref_zq; @@ -1044,6 +1030,12 @@ out: return rc; } +long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb) +{ + return _zcrypt_send_ep11_cprb(&ap_perms, xcrb); +} +EXPORT_SYMBOL(zcrypt_send_ep11_cprb); + static long zcrypt_rng(char *buffer) { struct zcrypt_card *zc, *pref_zc; @@ -1160,6 +1152,34 @@ void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus) } EXPORT_SYMBOL(zcrypt_device_status_mask_ext); +int zcrypt_device_status_ext(int card, int queue, + struct zcrypt_device_status_ext *devstat) +{ + struct zcrypt_card *zc; + struct zcrypt_queue *zq; + + memset(devstat, 0, sizeof(*devstat)); + + spin_lock(&zcrypt_list_lock); + for_each_zcrypt_card(zc) { + for_each_zcrypt_queue(zq, zc) { + if (card == AP_QID_CARD(zq->queue->qid) && + queue == AP_QID_QUEUE(zq->queue->qid)) { + devstat->hwtype = zc->card->ap_dev.device_type; + devstat->functions = zc->card->functions >> 26; + devstat->qid = zq->queue->qid; + devstat->online = zq->online ? 0x01 : 0x00; + spin_unlock(&zcrypt_list_lock); + return 0; + } + } + } + spin_unlock(&zcrypt_list_lock); + + return -ENODEV; +} +EXPORT_SYMBOL(zcrypt_device_status_ext); + static void zcrypt_status_mask(char status[], size_t max_adapters) { struct zcrypt_card *zc; @@ -1206,11 +1226,12 @@ static void zcrypt_qdepth_mask(char qdepth[], size_t max_adapters) spin_unlock(&zcrypt_list_lock); } -static void zcrypt_perdev_reqcnt(int reqcnt[], size_t max_adapters) +static void zcrypt_perdev_reqcnt(u32 reqcnt[], size_t max_adapters) { struct zcrypt_card *zc; struct zcrypt_queue *zq; int card; + u64 cnt; memset(reqcnt, 0, sizeof(int) * max_adapters); spin_lock(&zcrypt_list_lock); @@ -1222,8 +1243,9 @@ static void zcrypt_perdev_reqcnt(int reqcnt[], size_t max_adapters) || card >= max_adapters) continue; spin_lock(&zq->queue->lock); - reqcnt[card] = zq->queue->total_request_count; + cnt = zq->queue->total_request_count; spin_unlock(&zq->queue->lock); + reqcnt[card] = (cnt < UINT_MAX) ? (u32) cnt : UINT_MAX; } } local_bh_enable(); @@ -1356,12 +1378,12 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd, if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb))) return -EFAULT; do { - rc = zcrypt_send_ep11_cprb(perms, &xcrb); + rc = _zcrypt_send_ep11_cprb(perms, &xcrb); } while (rc == -EAGAIN); /* on failure: retry once again after a requested rescan */ if ((rc == -ENODEV) && (zcrypt_process_rescan())) do { - rc = zcrypt_send_ep11_cprb(perms, &xcrb); + rc = _zcrypt_send_ep11_cprb(perms, &xcrb); } while (rc == -EAGAIN); if (rc) ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDEP11CPRB rc=%d\n", rc); @@ -1401,9 +1423,9 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd, return 0; } case ZCRYPT_PERDEV_REQCNT: { - int *reqcnt; + u32 *reqcnt; - reqcnt = kcalloc(AP_DEVICES, sizeof(int), GFP_KERNEL); + reqcnt = kcalloc(AP_DEVICES, sizeof(u32), GFP_KERNEL); if (!reqcnt) return -ENOMEM; zcrypt_perdev_reqcnt(reqcnt, AP_DEVICES); @@ -1460,7 +1482,7 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd, } case Z90STAT_PERDEV_REQCNT: { /* the old ioctl supports only 64 adapters */ - int reqcnt[MAX_ZDEV_CARDIDS]; + u32 reqcnt[MAX_ZDEV_CARDIDS]; zcrypt_perdev_reqcnt(reqcnt, MAX_ZDEV_CARDIDS); if (copy_to_user((int __user *) arg, reqcnt, sizeof(reqcnt))) @@ -1874,6 +1896,8 @@ void __exit zcrypt_api_exit(void) misc_deregister(&zcrypt_misc_device); zcrypt_msgtype6_exit(); zcrypt_msgtype50_exit(); + zcrypt_ccamisc_exit(); + zcrypt_ep11misc_exit(); zcrypt_debug_exit(); } diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h index af67a768a3fc..599e68bf53f7 100644 --- a/drivers/s390/crypto/zcrypt_api.h +++ b/drivers/s390/crypto/zcrypt_api.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0+ */ /* - * Copyright IBM Corp. 2001, 2018 + * Copyright IBM Corp. 2001, 2019 * Author(s): Robert Burroughs * Eric Rossman (edrossma@us.ibm.com) * Cornelia Huck <cornelia.huck@de.ibm.com> @@ -29,6 +29,7 @@ #define ZCRYPT_CEX4 10 #define ZCRYPT_CEX5 11 #define ZCRYPT_CEX6 12 +#define ZCRYPT_CEX7 13 /** * Large random numbers are pulled in 4096 byte chunks from the crypto cards @@ -121,9 +122,6 @@ void zcrypt_card_get(struct zcrypt_card *); int zcrypt_card_put(struct zcrypt_card *); int zcrypt_card_register(struct zcrypt_card *); void zcrypt_card_unregister(struct zcrypt_card *); -struct zcrypt_card *zcrypt_card_get_best(unsigned int *, - unsigned int, unsigned int); -void zcrypt_card_put_best(struct zcrypt_card *, unsigned int); struct zcrypt_queue *zcrypt_queue_alloc(size_t); void zcrypt_queue_free(struct zcrypt_queue *); @@ -132,8 +130,6 @@ int zcrypt_queue_put(struct zcrypt_queue *); int zcrypt_queue_register(struct zcrypt_queue *); void zcrypt_queue_unregister(struct zcrypt_queue *); void zcrypt_queue_force_online(struct zcrypt_queue *, int); -struct zcrypt_queue *zcrypt_queue_get_best(unsigned int, unsigned int); -void zcrypt_queue_put_best(struct zcrypt_queue *, unsigned int); int zcrypt_rng_device_add(void); void zcrypt_rng_device_remove(void); @@ -144,6 +140,9 @@ struct zcrypt_ops *zcrypt_msgtype(unsigned char *, int); int zcrypt_api_init(void); void zcrypt_api_exit(void); long zcrypt_send_cprb(struct ica_xcRB *xcRB); +long zcrypt_send_ep11_cprb(struct ep11_urb *urb); void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus); +int zcrypt_device_status_ext(int card, int queue, + struct zcrypt_device_status_ext *devstatus); #endif /* _ZCRYPT_API_H_ */ diff --git a/drivers/s390/crypto/zcrypt_ccamisc.c b/drivers/s390/crypto/zcrypt_ccamisc.c new file mode 100644 index 000000000000..110fe9d0cb91 --- /dev/null +++ b/drivers/s390/crypto/zcrypt_ccamisc.c @@ -0,0 +1,1765 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright IBM Corp. 2019 + * Author(s): Harald Freudenberger <freude@linux.ibm.com> + * Ingo Franzki <ifranzki@linux.ibm.com> + * + * Collection of CCA misc functions used by zcrypt and pkey + */ + +#define KMSG_COMPONENT "zcrypt" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/random.h> +#include <asm/zcrypt.h> +#include <asm/pkey.h> + +#include "ap_bus.h" +#include "zcrypt_api.h" +#include "zcrypt_debug.h" +#include "zcrypt_msgtype6.h" +#include "zcrypt_ccamisc.h" + +#define DEBUG_DBG(...) ZCRYPT_DBF(DBF_DEBUG, ##__VA_ARGS__) +#define DEBUG_INFO(...) ZCRYPT_DBF(DBF_INFO, ##__VA_ARGS__) +#define DEBUG_WARN(...) ZCRYPT_DBF(DBF_WARN, ##__VA_ARGS__) +#define DEBUG_ERR(...) ZCRYPT_DBF(DBF_ERR, ##__VA_ARGS__) + +/* Size of parameter block used for all cca requests/replies */ +#define PARMBSIZE 512 + +/* Size of vardata block used for some of the cca requests/replies */ +#define VARDATASIZE 4096 + +struct cca_info_list_entry { + struct list_head list; + u16 cardnr; + u16 domain; + struct cca_info info; +}; + +/* a list with cca_info_list_entry entries */ +static LIST_HEAD(cca_info_list); +static DEFINE_SPINLOCK(cca_info_list_lock); + +/* + * Simple check if the token is a valid CCA secure AES data key + * token. If keybitsize is given, the bitsize of the key is + * also checked. Returns 0 on success or errno value on failure. + */ +int cca_check_secaeskeytoken(debug_info_t *dbg, int dbflvl, + const u8 *token, int keybitsize) +{ + struct secaeskeytoken *t = (struct secaeskeytoken *) token; + +#define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__) + + if (t->type != TOKTYPE_CCA_INTERNAL) { + if (dbg) + DBF("%s token check failed, type 0x%02x != 0x%02x\n", + __func__, (int) t->type, TOKTYPE_CCA_INTERNAL); + return -EINVAL; + } + if (t->version != TOKVER_CCA_AES) { + if (dbg) + DBF("%s token check failed, version 0x%02x != 0x%02x\n", + __func__, (int) t->version, TOKVER_CCA_AES); + return -EINVAL; + } + if (keybitsize > 0 && t->bitsize != keybitsize) { + if (dbg) + DBF("%s token check failed, bitsize %d != %d\n", + __func__, (int) t->bitsize, keybitsize); + return -EINVAL; + } + +#undef DBF + + return 0; +} +EXPORT_SYMBOL(cca_check_secaeskeytoken); + +/* + * Simple check if the token is a valid CCA secure AES cipher key + * token. If keybitsize is given, the bitsize of the key is + * also checked. If checkcpacfexport is enabled, the key is also + * checked for the export flag to allow CPACF export. + * Returns 0 on success or errno value on failure. + */ +int cca_check_secaescipherkey(debug_info_t *dbg, int dbflvl, + const u8 *token, int keybitsize, + int checkcpacfexport) +{ + struct cipherkeytoken *t = (struct cipherkeytoken *) token; + bool keybitsizeok = true; + +#define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__) + + if (t->type != TOKTYPE_CCA_INTERNAL) { + if (dbg) + DBF("%s token check failed, type 0x%02x != 0x%02x\n", + __func__, (int) t->type, TOKTYPE_CCA_INTERNAL); + return -EINVAL; + } + if (t->version != TOKVER_CCA_VLSC) { + if (dbg) + DBF("%s token check failed, version 0x%02x != 0x%02x\n", + __func__, (int) t->version, TOKVER_CCA_VLSC); + return -EINVAL; + } + if (t->algtype != 0x02) { + if (dbg) + DBF("%s token check failed, algtype 0x%02x != 0x02\n", + __func__, (int) t->algtype); + return -EINVAL; + } + if (t->keytype != 0x0001) { + if (dbg) + DBF("%s token check failed, keytype 0x%04x != 0x0001\n", + __func__, (int) t->keytype); + return -EINVAL; + } + if (t->plfver != 0x00 && t->plfver != 0x01) { + if (dbg) + DBF("%s token check failed, unknown plfver 0x%02x\n", + __func__, (int) t->plfver); + return -EINVAL; + } + if (t->wpllen != 512 && t->wpllen != 576 && t->wpllen != 640) { + if (dbg) + DBF("%s token check failed, unknown wpllen %d\n", + __func__, (int) t->wpllen); + return -EINVAL; + } + if (keybitsize > 0) { + switch (keybitsize) { + case 128: + if (t->wpllen != (t->plfver ? 640 : 512)) + keybitsizeok = false; + break; + case 192: + if (t->wpllen != (t->plfver ? 640 : 576)) + keybitsizeok = false; + break; + case 256: + if (t->wpllen != 640) + keybitsizeok = false; + break; + default: + keybitsizeok = false; + break; + } + if (!keybitsizeok) { + if (dbg) + DBF("%s token check failed, bitsize %d\n", + __func__, keybitsize); + return -EINVAL; + } + } + if (checkcpacfexport && !(t->kmf1 & KMF1_XPRT_CPAC)) { + if (dbg) + DBF("%s token check failed, XPRT_CPAC bit is 0\n", + __func__); + return -EINVAL; + } + +#undef DBF + + return 0; +} +EXPORT_SYMBOL(cca_check_secaescipherkey); + +/* + * Allocate consecutive memory for request CPRB, request param + * block, reply CPRB and reply param block and fill in values + * for the common fields. Returns 0 on success or errno value + * on failure. + */ +static int alloc_and_prep_cprbmem(size_t paramblen, + u8 **pcprbmem, + struct CPRBX **preqCPRB, + struct CPRBX **prepCPRB) +{ + u8 *cprbmem; + size_t cprbplusparamblen = sizeof(struct CPRBX) + paramblen; + struct CPRBX *preqcblk, *prepcblk; + + /* + * allocate consecutive memory for request CPRB, request param + * block, reply CPRB and reply param block + */ + cprbmem = kcalloc(2, cprbplusparamblen, GFP_KERNEL); + if (!cprbmem) + return -ENOMEM; + + preqcblk = (struct CPRBX *) cprbmem; + prepcblk = (struct CPRBX *) (cprbmem + cprbplusparamblen); + + /* fill request cprb struct */ + preqcblk->cprb_len = sizeof(struct CPRBX); + preqcblk->cprb_ver_id = 0x02; + memcpy(preqcblk->func_id, "T2", 2); + preqcblk->rpl_msgbl = cprbplusparamblen; + if (paramblen) { + preqcblk->req_parmb = + ((u8 *) preqcblk) + sizeof(struct CPRBX); + preqcblk->rpl_parmb = + ((u8 *) prepcblk) + sizeof(struct CPRBX); + } + + *pcprbmem = cprbmem; + *preqCPRB = preqcblk; + *prepCPRB = prepcblk; + + return 0; +} + +/* + * Free the cprb memory allocated with the function above. + * If the scrub value is not zero, the memory is filled + * with zeros before freeing (useful if there was some + * clear key material in there). + */ +static void free_cprbmem(void *mem, size_t paramblen, int scrub) +{ + if (scrub) + memzero_explicit(mem, 2 * (sizeof(struct CPRBX) + paramblen)); + kfree(mem); +} + +/* + * Helper function to prepare the xcrb struct + */ +static inline void prep_xcrb(struct ica_xcRB *pxcrb, + u16 cardnr, + struct CPRBX *preqcblk, + struct CPRBX *prepcblk) +{ + memset(pxcrb, 0, sizeof(*pxcrb)); + pxcrb->agent_ID = 0x4341; /* 'CA' */ + pxcrb->user_defined = (cardnr == 0xFFFF ? AUTOSELECT : cardnr); + pxcrb->request_control_blk_length = + preqcblk->cprb_len + preqcblk->req_parml; + pxcrb->request_control_blk_addr = (void __user *) preqcblk; + pxcrb->reply_control_blk_length = preqcblk->rpl_msgbl; + pxcrb->reply_control_blk_addr = (void __user *) prepcblk; +} + +/* + * Helper function which calls zcrypt_send_cprb with + * memory management segment adjusted to kernel space + * so that the copy_from_user called within this + * function do in fact copy from kernel space. + */ +static inline int _zcrypt_send_cprb(struct ica_xcRB *xcrb) +{ + int rc; + mm_segment_t old_fs = get_fs(); + + set_fs(KERNEL_DS); + rc = zcrypt_send_cprb(xcrb); + set_fs(old_fs); + + return rc; +} + +/* + * Generate (random) CCA AES DATA secure key. + */ +int cca_genseckey(u16 cardnr, u16 domain, + u32 keybitsize, u8 seckey[SECKEYBLOBSIZE]) +{ + int i, rc, keysize; + int seckeysize; + u8 *mem; + struct CPRBX *preqcblk, *prepcblk; + struct ica_xcRB xcrb; + struct kgreqparm { + u8 subfunc_code[2]; + u16 rule_array_len; + struct lv1 { + u16 len; + char key_form[8]; + char key_length[8]; + char key_type1[8]; + char key_type2[8]; + } lv1; + struct lv2 { + u16 len; + struct keyid { + u16 len; + u16 attr; + u8 data[SECKEYBLOBSIZE]; + } keyid[6]; + } lv2; + } __packed * preqparm; + struct kgrepparm { + u8 subfunc_code[2]; + u16 rule_array_len; + struct lv3 { + u16 len; + u16 keyblocklen; + struct { + u16 toklen; + u16 tokattr; + u8 tok[0]; + /* ... some more data ... */ + } keyblock; + } lv3; + } __packed * prepparm; + + /* get already prepared memory for 2 cprbs with param block each */ + rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk); + if (rc) + return rc; + + /* fill request cprb struct */ + preqcblk->domain = domain; + + /* fill request cprb param block with KG request */ + preqparm = (struct kgreqparm *) preqcblk->req_parmb; + memcpy(preqparm->subfunc_code, "KG", 2); + preqparm->rule_array_len = sizeof(preqparm->rule_array_len); + preqparm->lv1.len = sizeof(struct lv1); + memcpy(preqparm->lv1.key_form, "OP ", 8); + switch (keybitsize) { + case PKEY_SIZE_AES_128: + case PKEY_KEYTYPE_AES_128: /* older ioctls used this */ + keysize = 16; + memcpy(preqparm->lv1.key_length, "KEYLN16 ", 8); + break; + case PKEY_SIZE_AES_192: + case PKEY_KEYTYPE_AES_192: /* older ioctls used this */ + keysize = 24; + memcpy(preqparm->lv1.key_length, "KEYLN24 ", 8); + break; + case PKEY_SIZE_AES_256: + case PKEY_KEYTYPE_AES_256: /* older ioctls used this */ + keysize = 32; + memcpy(preqparm->lv1.key_length, "KEYLN32 ", 8); + break; + default: + DEBUG_ERR("%s unknown/unsupported keybitsize %d\n", + __func__, keybitsize); + rc = -EINVAL; + goto out; + } + memcpy(preqparm->lv1.key_type1, "AESDATA ", 8); + preqparm->lv2.len = sizeof(struct lv2); + for (i = 0; i < 6; i++) { + preqparm->lv2.keyid[i].len = sizeof(struct keyid); + preqparm->lv2.keyid[i].attr = (i == 2 ? 0x30 : 0x10); + } + preqcblk->req_parml = sizeof(struct kgreqparm); + + /* fill xcrb struct */ + prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk); + + /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */ + rc = _zcrypt_send_cprb(&xcrb); + if (rc) { + DEBUG_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, errno %d\n", + __func__, (int) cardnr, (int) domain, rc); + goto out; + } + + /* check response returncode and reasoncode */ + if (prepcblk->ccp_rtcode != 0) { + DEBUG_ERR("%s secure key generate failure, card response %d/%d\n", + __func__, + (int) prepcblk->ccp_rtcode, + (int) prepcblk->ccp_rscode); + rc = -EIO; + goto out; + } + + /* process response cprb param block */ + prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX); + prepparm = (struct kgrepparm *) prepcblk->rpl_parmb; + + /* check length of the returned secure key token */ + seckeysize = prepparm->lv3.keyblock.toklen + - sizeof(prepparm->lv3.keyblock.toklen) + - sizeof(prepparm->lv3.keyblock.tokattr); + if (seckeysize != SECKEYBLOBSIZE) { + DEBUG_ERR("%s secure token size mismatch %d != %d bytes\n", + __func__, seckeysize, SECKEYBLOBSIZE); + rc = -EIO; + goto out; + } + + /* check secure key token */ + rc = cca_check_secaeskeytoken(zcrypt_dbf_info, DBF_ERR, + prepparm->lv3.keyblock.tok, 8*keysize); + if (rc) { + rc = -EIO; + goto out; + } + + /* copy the generated secure key token */ + memcpy(seckey, prepparm->lv3.keyblock.tok, SECKEYBLOBSIZE); + +out: + free_cprbmem(mem, PARMBSIZE, 0); + return rc; +} +EXPORT_SYMBOL(cca_genseckey); + +/* + * Generate an CCA AES DATA secure key with given key value. + */ +int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize, + const u8 *clrkey, u8 seckey[SECKEYBLOBSIZE]) +{ + int rc, keysize, seckeysize; + u8 *mem; + struct CPRBX *preqcblk, *prepcblk; + struct ica_xcRB xcrb; + struct cmreqparm { + u8 subfunc_code[2]; + u16 rule_array_len; + char rule_array[8]; + struct lv1 { + u16 len; + u8 clrkey[0]; + } lv1; + struct lv2 { + u16 len; + struct keyid { + u16 len; + u16 attr; + u8 data[SECKEYBLOBSIZE]; + } keyid; + } lv2; + } __packed * preqparm; + struct lv2 *plv2; + struct cmrepparm { + u8 subfunc_code[2]; + u16 rule_array_len; + struct lv3 { + u16 len; + u16 keyblocklen; + struct { + u16 toklen; + u16 tokattr; + u8 tok[0]; + /* ... some more data ... */ + } keyblock; + } lv3; + } __packed * prepparm; + + /* get already prepared memory for 2 cprbs with param block each */ + rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk); + if (rc) + return rc; + + /* fill request cprb struct */ + preqcblk->domain = domain; + + /* fill request cprb param block with CM request */ + preqparm = (struct cmreqparm *) preqcblk->req_parmb; + memcpy(preqparm->subfunc_code, "CM", 2); + memcpy(preqparm->rule_array, "AES ", 8); + preqparm->rule_array_len = + sizeof(preqparm->rule_array_len) + sizeof(preqparm->rule_array); + switch (keybitsize) { + case PKEY_SIZE_AES_128: + case PKEY_KEYTYPE_AES_128: /* older ioctls used this */ + keysize = 16; + break; + case PKEY_SIZE_AES_192: + case PKEY_KEYTYPE_AES_192: /* older ioctls used this */ + keysize = 24; + break; + case PKEY_SIZE_AES_256: + case PKEY_KEYTYPE_AES_256: /* older ioctls used this */ + keysize = 32; + break; + default: + DEBUG_ERR("%s unknown/unsupported keybitsize %d\n", + __func__, keybitsize); + rc = -EINVAL; + goto out; + } + preqparm->lv1.len = sizeof(struct lv1) + keysize; + memcpy(preqparm->lv1.clrkey, clrkey, keysize); + plv2 = (struct lv2 *) (((u8 *) &preqparm->lv2) + keysize); + plv2->len = sizeof(struct lv2); + plv2->keyid.len = sizeof(struct keyid); + plv2->keyid.attr = 0x30; + preqcblk->req_parml = sizeof(struct cmreqparm) + keysize; + + /* fill xcrb struct */ + prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk); + + /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */ + rc = _zcrypt_send_cprb(&xcrb); + if (rc) { + DEBUG_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", + __func__, (int) cardnr, (int) domain, rc); + goto out; + } + + /* check response returncode and reasoncode */ + if (prepcblk->ccp_rtcode != 0) { + DEBUG_ERR("%s clear key import failure, card response %d/%d\n", + __func__, + (int) prepcblk->ccp_rtcode, + (int) prepcblk->ccp_rscode); + rc = -EIO; + goto out; + } + + /* process response cprb param block */ + prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX); + prepparm = (struct cmrepparm *) prepcblk->rpl_parmb; + + /* check length of the returned secure key token */ + seckeysize = prepparm->lv3.keyblock.toklen + - sizeof(prepparm->lv3.keyblock.toklen) + - sizeof(prepparm->lv3.keyblock.tokattr); + if (seckeysize != SECKEYBLOBSIZE) { + DEBUG_ERR("%s secure token size mismatch %d != %d bytes\n", + __func__, seckeysize, SECKEYBLOBSIZE); + rc = -EIO; + goto out; + } + + /* check secure key token */ + rc = cca_check_secaeskeytoken(zcrypt_dbf_info, DBF_ERR, + prepparm->lv3.keyblock.tok, 8*keysize); + if (rc) { + rc = -EIO; + goto out; + } + + /* copy the generated secure key token */ + if (seckey) + memcpy(seckey, prepparm->lv3.keyblock.tok, SECKEYBLOBSIZE); + +out: + free_cprbmem(mem, PARMBSIZE, 1); + return rc; +} +EXPORT_SYMBOL(cca_clr2seckey); + +/* + * Derive proteced key from an CCA AES DATA secure key. + */ +int cca_sec2protkey(u16 cardnr, u16 domain, + const u8 seckey[SECKEYBLOBSIZE], + u8 *protkey, u32 *protkeylen, u32 *protkeytype) +{ + int rc; + u8 *mem; + struct CPRBX *preqcblk, *prepcblk; + struct ica_xcRB xcrb; + struct uskreqparm { + u8 subfunc_code[2]; + u16 rule_array_len; + struct lv1 { + u16 len; + u16 attr_len; + u16 attr_flags; + } lv1; + struct lv2 { + u16 len; + u16 attr_len; + u16 attr_flags; + u8 token[0]; /* cca secure key token */ + } lv2; + } __packed * preqparm; + struct uskrepparm { + u8 subfunc_code[2]; + u16 rule_array_len; + struct lv3 { + u16 len; + u16 attr_len; + u16 attr_flags; + struct cpacfkeyblock { + u8 version; /* version of this struct */ + u8 flags[2]; + u8 algo; + u8 form; + u8 pad1[3]; + u16 len; + u8 key[64]; /* the key (len bytes) */ + u16 keyattrlen; + u8 keyattr[32]; + u8 pad2[1]; + u8 vptype; + u8 vp[32]; /* verification pattern */ + } keyblock; + } lv3; + } __packed * prepparm; + + /* get already prepared memory for 2 cprbs with param block each */ + rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk); + if (rc) + return rc; + + /* fill request cprb struct */ + preqcblk->domain = domain; + + /* fill request cprb param block with USK request */ + preqparm = (struct uskreqparm *) preqcblk->req_parmb; + memcpy(preqparm->subfunc_code, "US", 2); + preqparm->rule_array_len = sizeof(preqparm->rule_array_len); + preqparm->lv1.len = sizeof(struct lv1); + preqparm->lv1.attr_len = sizeof(struct lv1) - sizeof(preqparm->lv1.len); + preqparm->lv1.attr_flags = 0x0001; + preqparm->lv2.len = sizeof(struct lv2) + SECKEYBLOBSIZE; + preqparm->lv2.attr_len = sizeof(struct lv2) + - sizeof(preqparm->lv2.len) + SECKEYBLOBSIZE; + preqparm->lv2.attr_flags = 0x0000; + memcpy(preqparm->lv2.token, seckey, SECKEYBLOBSIZE); + preqcblk->req_parml = sizeof(struct uskreqparm) + SECKEYBLOBSIZE; + + /* fill xcrb struct */ + prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk); + + /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */ + rc = _zcrypt_send_cprb(&xcrb); + if (rc) { + DEBUG_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", + __func__, (int) cardnr, (int) domain, rc); + goto out; + } + + /* check response returncode and reasoncode */ + if (prepcblk->ccp_rtcode != 0) { + DEBUG_ERR("%s unwrap secure key failure, card response %d/%d\n", + __func__, + (int) prepcblk->ccp_rtcode, + (int) prepcblk->ccp_rscode); + rc = -EIO; + goto out; + } + if (prepcblk->ccp_rscode != 0) { + DEBUG_WARN("%s unwrap secure key warning, card response %d/%d\n", + __func__, + (int) prepcblk->ccp_rtcode, + (int) prepcblk->ccp_rscode); + } + + /* process response cprb param block */ + prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX); + prepparm = (struct uskrepparm *) prepcblk->rpl_parmb; + + /* check the returned keyblock */ + if (prepparm->lv3.keyblock.version != 0x01) { + DEBUG_ERR("%s reply param keyblock version mismatch 0x%02x != 0x01\n", + __func__, (int) prepparm->lv3.keyblock.version); + rc = -EIO; + goto out; + } + + /* copy the tanslated protected key */ + switch (prepparm->lv3.keyblock.len) { + case 16+32: + /* AES 128 protected key */ + if (protkeytype) + *protkeytype = PKEY_KEYTYPE_AES_128; + break; + case 24+32: + /* AES 192 protected key */ + if (protkeytype) + *protkeytype = PKEY_KEYTYPE_AES_192; + break; + case 32+32: + /* AES 256 protected key */ + if (protkeytype) + *protkeytype = PKEY_KEYTYPE_AES_256; + break; + default: + DEBUG_ERR("%s unknown/unsupported keylen %d\n", + __func__, prepparm->lv3.keyblock.len); + rc = -EIO; + goto out; + } + memcpy(protkey, prepparm->lv3.keyblock.key, prepparm->lv3.keyblock.len); + if (protkeylen) + *protkeylen = prepparm->lv3.keyblock.len; + +out: + free_cprbmem(mem, PARMBSIZE, 0); + return rc; +} +EXPORT_SYMBOL(cca_sec2protkey); + +/* + * AES cipher key skeleton created with CSNBKTB2 with these flags: + * INTERNAL, NO-KEY, AES, CIPHER, ANY-MODE, NOEX-SYM, NOEXAASY, + * NOEXUASY, XPRTCPAC, NOEX-RAW, NOEX-DES, NOEX-AES, NOEX-RSA + * used by cca_gencipherkey() and cca_clr2cipherkey(). + */ +static const u8 aes_cipher_key_skeleton[] = { + 0x01, 0x00, 0x00, 0x38, 0x05, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, + 0x00, 0x1a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x02, 0x00, 0x01, 0x02, 0xc0, 0x00, 0xff, + 0x00, 0x03, 0x08, 0xc8, 0x00, 0x00, 0x00, 0x00 }; +#define SIZEOF_SKELETON (sizeof(aes_cipher_key_skeleton)) + +/* + * Generate (random) CCA AES CIPHER secure key. + */ +int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags, + u8 *keybuf, size_t *keybufsize) +{ + int rc; + u8 *mem; + struct CPRBX *preqcblk, *prepcblk; + struct ica_xcRB xcrb; + struct gkreqparm { + u8 subfunc_code[2]; + u16 rule_array_len; + char rule_array[2*8]; + struct { + u16 len; + u8 key_type_1[8]; + u8 key_type_2[8]; + u16 clear_key_bit_len; + u16 key_name_1_len; + u16 key_name_2_len; + u16 user_data_1_len; + u16 user_data_2_len; + u8 key_name_1[0]; + u8 key_name_2[0]; + u8 user_data_1[0]; + u8 user_data_2[0]; + } vud; + struct { + u16 len; + struct { + u16 len; + u16 flag; + u8 kek_id_1[0]; + } tlv1; + struct { + u16 len; + u16 flag; + u8 kek_id_2[0]; + } tlv2; + struct { + u16 len; + u16 flag; + u8 gen_key_id_1[SIZEOF_SKELETON]; + } tlv3; + struct { + u16 len; + u16 flag; + u8 gen_key_id_1_label[0]; + } tlv4; + struct { + u16 len; + u16 flag; + u8 gen_key_id_2[0]; + } tlv5; + struct { + u16 len; + u16 flag; + u8 gen_key_id_2_label[0]; + } tlv6; + } kb; + } __packed * preqparm; + struct gkrepparm { + u8 subfunc_code[2]; + u16 rule_array_len; + struct { + u16 len; + } vud; + struct { + u16 len; + struct { + u16 len; + u16 flag; + u8 gen_key[0]; /* 120-136 bytes */ + } tlv1; + } kb; + } __packed * prepparm; + struct cipherkeytoken *t; + + /* get already prepared memory for 2 cprbs with param block each */ + rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk); + if (rc) + return rc; + + /* fill request cprb struct */ + preqcblk->domain = domain; + preqcblk->req_parml = sizeof(struct gkreqparm); + + /* prepare request param block with GK request */ + preqparm = (struct gkreqparm *) preqcblk->req_parmb; + memcpy(preqparm->subfunc_code, "GK", 2); + preqparm->rule_array_len = sizeof(uint16_t) + 2 * 8; + memcpy(preqparm->rule_array, "AES OP ", 2*8); + + /* prepare vud block */ + preqparm->vud.len = sizeof(preqparm->vud); + switch (keybitsize) { + case 128: + case 192: + case 256: + break; + default: + DEBUG_ERR( + "%s unknown/unsupported keybitsize %d\n", + __func__, keybitsize); + rc = -EINVAL; + goto out; + } + preqparm->vud.clear_key_bit_len = keybitsize; + memcpy(preqparm->vud.key_type_1, "TOKEN ", 8); + memset(preqparm->vud.key_type_2, ' ', sizeof(preqparm->vud.key_type_2)); + + /* prepare kb block */ + preqparm->kb.len = sizeof(preqparm->kb); + preqparm->kb.tlv1.len = sizeof(preqparm->kb.tlv1); + preqparm->kb.tlv1.flag = 0x0030; + preqparm->kb.tlv2.len = sizeof(preqparm->kb.tlv2); + preqparm->kb.tlv2.flag = 0x0030; + preqparm->kb.tlv3.len = sizeof(preqparm->kb.tlv3); + preqparm->kb.tlv3.flag = 0x0030; + memcpy(preqparm->kb.tlv3.gen_key_id_1, + aes_cipher_key_skeleton, SIZEOF_SKELETON); + preqparm->kb.tlv4.len = sizeof(preqparm->kb.tlv4); + preqparm->kb.tlv4.flag = 0x0030; + preqparm->kb.tlv5.len = sizeof(preqparm->kb.tlv5); + preqparm->kb.tlv5.flag = 0x0030; + preqparm->kb.tlv6.len = sizeof(preqparm->kb.tlv6); + preqparm->kb.tlv6.flag = 0x0030; + + /* patch the skeleton key token export flags inside the kb block */ + if (keygenflags) { + t = (struct cipherkeytoken *) preqparm->kb.tlv3.gen_key_id_1; + t->kmf1 |= (u16) (keygenflags & 0x0000FF00); + t->kmf1 &= (u16) ~(keygenflags & 0x000000FF); + } + + /* prepare xcrb struct */ + prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk); + + /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */ + rc = _zcrypt_send_cprb(&xcrb); + if (rc) { + DEBUG_ERR( + "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", + __func__, (int) cardnr, (int) domain, rc); + goto out; + } + + /* check response returncode and reasoncode */ + if (prepcblk->ccp_rtcode != 0) { + DEBUG_ERR( + "%s cipher key generate failure, card response %d/%d\n", + __func__, + (int) prepcblk->ccp_rtcode, + (int) prepcblk->ccp_rscode); + rc = -EIO; + goto out; + } + + /* process response cprb param block */ + prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX); + prepparm = (struct gkrepparm *) prepcblk->rpl_parmb; + + /* do some plausibility checks on the key block */ + if (prepparm->kb.len < 120 + 5 * sizeof(uint16_t) || + prepparm->kb.len > 136 + 5 * sizeof(uint16_t)) { + DEBUG_ERR("%s reply with invalid or unknown key block\n", + __func__); + rc = -EIO; + goto out; + } + + /* and some checks on the generated key */ + rc = cca_check_secaescipherkey(zcrypt_dbf_info, DBF_ERR, + prepparm->kb.tlv1.gen_key, + keybitsize, 1); + if (rc) { + rc = -EIO; + goto out; + } + + /* copy the generated vlsc key token */ + t = (struct cipherkeytoken *) prepparm->kb.tlv1.gen_key; + if (keybuf) { + if (*keybufsize >= t->len) + memcpy(keybuf, t, t->len); + else + rc = -EINVAL; + } + *keybufsize = t->len; + +out: + free_cprbmem(mem, PARMBSIZE, 0); + return rc; +} +EXPORT_SYMBOL(cca_gencipherkey); + +/* + * Helper function, does a the CSNBKPI2 CPRB. + */ +static int _ip_cprb_helper(u16 cardnr, u16 domain, + const char *rule_array_1, + const char *rule_array_2, + const char *rule_array_3, + const u8 *clr_key_value, + int clr_key_bit_size, + u8 *key_token, + int *key_token_size) +{ + int rc, n; + u8 *mem; + struct CPRBX *preqcblk, *prepcblk; + struct ica_xcRB xcrb; + struct rule_array_block { + u8 subfunc_code[2]; + u16 rule_array_len; + char rule_array[0]; + } __packed * preq_ra_block; + struct vud_block { + u16 len; + struct { + u16 len; + u16 flag; /* 0x0064 */ + u16 clr_key_bit_len; + } tlv1; + struct { + u16 len; + u16 flag; /* 0x0063 */ + u8 clr_key[0]; /* clear key value bytes */ + } tlv2; + } __packed * preq_vud_block; + struct key_block { + u16 len; + struct { + u16 len; + u16 flag; /* 0x0030 */ + u8 key_token[0]; /* key skeleton */ + } tlv1; + } __packed * preq_key_block; + struct iprepparm { + u8 subfunc_code[2]; + u16 rule_array_len; + struct { + u16 len; + } vud; + struct { + u16 len; + struct { + u16 len; + u16 flag; /* 0x0030 */ + u8 key_token[0]; /* key token */ + } tlv1; + } kb; + } __packed * prepparm; + struct cipherkeytoken *t; + int complete = strncmp(rule_array_2, "COMPLETE", 8) ? 0 : 1; + + /* get already prepared memory for 2 cprbs with param block each */ + rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk); + if (rc) + return rc; + + /* fill request cprb struct */ + preqcblk->domain = domain; + preqcblk->req_parml = 0; + + /* prepare request param block with IP request */ + preq_ra_block = (struct rule_array_block *) preqcblk->req_parmb; + memcpy(preq_ra_block->subfunc_code, "IP", 2); + preq_ra_block->rule_array_len = sizeof(uint16_t) + 2 * 8; + memcpy(preq_ra_block->rule_array, rule_array_1, 8); + memcpy(preq_ra_block->rule_array + 8, rule_array_2, 8); + preqcblk->req_parml = sizeof(struct rule_array_block) + 2 * 8; + if (rule_array_3) { + preq_ra_block->rule_array_len += 8; + memcpy(preq_ra_block->rule_array + 16, rule_array_3, 8); + preqcblk->req_parml += 8; + } + + /* prepare vud block */ + preq_vud_block = (struct vud_block *) + (preqcblk->req_parmb + preqcblk->req_parml); + n = complete ? 0 : (clr_key_bit_size + 7) / 8; + preq_vud_block->len = sizeof(struct vud_block) + n; + preq_vud_block->tlv1.len = sizeof(preq_vud_block->tlv1); + preq_vud_block->tlv1.flag = 0x0064; + preq_vud_block->tlv1.clr_key_bit_len = complete ? 0 : clr_key_bit_size; + preq_vud_block->tlv2.len = sizeof(preq_vud_block->tlv2) + n; + preq_vud_block->tlv2.flag = 0x0063; + if (!complete) + memcpy(preq_vud_block->tlv2.clr_key, clr_key_value, n); + preqcblk->req_parml += preq_vud_block->len; + + /* prepare key block */ + preq_key_block = (struct key_block *) + (preqcblk->req_parmb + preqcblk->req_parml); + n = *key_token_size; + preq_key_block->len = sizeof(struct key_block) + n; + preq_key_block->tlv1.len = sizeof(preq_key_block->tlv1) + n; + preq_key_block->tlv1.flag = 0x0030; + memcpy(preq_key_block->tlv1.key_token, key_token, *key_token_size); + preqcblk->req_parml += preq_key_block->len; + + /* prepare xcrb struct */ + prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk); + + /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */ + rc = _zcrypt_send_cprb(&xcrb); + if (rc) { + DEBUG_ERR( + "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", + __func__, (int) cardnr, (int) domain, rc); + goto out; + } + + /* check response returncode and reasoncode */ + if (prepcblk->ccp_rtcode != 0) { + DEBUG_ERR( + "%s CSNBKPI2 failure, card response %d/%d\n", + __func__, + (int) prepcblk->ccp_rtcode, + (int) prepcblk->ccp_rscode); + rc = -EIO; + goto out; + } + + /* process response cprb param block */ + prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX); + prepparm = (struct iprepparm *) prepcblk->rpl_parmb; + + /* do some plausibility checks on the key block */ + if (prepparm->kb.len < 120 + 3 * sizeof(uint16_t) || + prepparm->kb.len > 136 + 3 * sizeof(uint16_t)) { + DEBUG_ERR("%s reply with invalid or unknown key block\n", + __func__); + rc = -EIO; + goto out; + } + + /* do not check the key here, it may be incomplete */ + + /* copy the vlsc key token back */ + t = (struct cipherkeytoken *) prepparm->kb.tlv1.key_token; + memcpy(key_token, t, t->len); + *key_token_size = t->len; + +out: + free_cprbmem(mem, PARMBSIZE, 0); + return rc; +} + +/* + * Build CCA AES CIPHER secure key with a given clear key value. + */ +int cca_clr2cipherkey(u16 card, u16 dom, u32 keybitsize, u32 keygenflags, + const u8 *clrkey, u8 *keybuf, size_t *keybufsize) +{ + int rc; + u8 *token; + int tokensize; + u8 exorbuf[32]; + struct cipherkeytoken *t; + + /* fill exorbuf with random data */ + get_random_bytes(exorbuf, sizeof(exorbuf)); + + /* allocate space for the key token to build */ + token = kmalloc(MAXCCAVLSCTOKENSIZE, GFP_KERNEL); + if (!token) + return -ENOMEM; + + /* prepare the token with the key skeleton */ + tokensize = SIZEOF_SKELETON; + memcpy(token, aes_cipher_key_skeleton, tokensize); + + /* patch the skeleton key token export flags */ + if (keygenflags) { + t = (struct cipherkeytoken *) token; + t->kmf1 |= (u16) (keygenflags & 0x0000FF00); + t->kmf1 &= (u16) ~(keygenflags & 0x000000FF); + } + + /* + * Do the key import with the clear key value in 4 steps: + * 1/4 FIRST import with only random data + * 2/4 EXOR the clear key + * 3/4 EXOR the very same random data again + * 4/4 COMPLETE the secure cipher key import + */ + rc = _ip_cprb_helper(card, dom, "AES ", "FIRST ", "MIN3PART", + exorbuf, keybitsize, token, &tokensize); + if (rc) { + DEBUG_ERR( + "%s clear key import 1/4 with CSNBKPI2 failed, rc=%d\n", + __func__, rc); + goto out; + } + rc = _ip_cprb_helper(card, dom, "AES ", "ADD-PART", NULL, + clrkey, keybitsize, token, &tokensize); + if (rc) { + DEBUG_ERR( + "%s clear key import 2/4 with CSNBKPI2 failed, rc=%d\n", + __func__, rc); + goto out; + } + rc = _ip_cprb_helper(card, dom, "AES ", "ADD-PART", NULL, + exorbuf, keybitsize, token, &tokensize); + if (rc) { + DEBUG_ERR( + "%s clear key import 3/4 with CSNBKPI2 failed, rc=%d\n", + __func__, rc); + goto out; + } + rc = _ip_cprb_helper(card, dom, "AES ", "COMPLETE", NULL, + NULL, keybitsize, token, &tokensize); + if (rc) { + DEBUG_ERR( + "%s clear key import 4/4 with CSNBKPI2 failed, rc=%d\n", + __func__, rc); + goto out; + } + + /* copy the generated key token */ + if (keybuf) { + if (tokensize > *keybufsize) + rc = -EINVAL; + else + memcpy(keybuf, token, tokensize); + } + *keybufsize = tokensize; + +out: + kfree(token); + return rc; +} +EXPORT_SYMBOL(cca_clr2cipherkey); + +/* + * Derive proteced key from CCA AES cipher secure key. + */ +int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey, + u8 *protkey, u32 *protkeylen, u32 *protkeytype) +{ + int rc; + u8 *mem; + struct CPRBX *preqcblk, *prepcblk; + struct ica_xcRB xcrb; + struct aureqparm { + u8 subfunc_code[2]; + u16 rule_array_len; + u8 rule_array[8]; + struct { + u16 len; + u16 tk_blob_len; + u16 tk_blob_tag; + u8 tk_blob[66]; + } vud; + struct { + u16 len; + u16 cca_key_token_len; + u16 cca_key_token_flags; + u8 cca_key_token[0]; // 64 or more + } kb; + } __packed * preqparm; + struct aurepparm { + u8 subfunc_code[2]; + u16 rule_array_len; + struct { + u16 len; + u16 sublen; + u16 tag; + struct cpacfkeyblock { + u8 version; /* version of this struct */ + u8 flags[2]; + u8 algo; + u8 form; + u8 pad1[3]; + u16 keylen; + u8 key[64]; /* the key (keylen bytes) */ + u16 keyattrlen; + u8 keyattr[32]; + u8 pad2[1]; + u8 vptype; + u8 vp[32]; /* verification pattern */ + } ckb; + } vud; + struct { + u16 len; + } kb; + } __packed * prepparm; + int keytoklen = ((struct cipherkeytoken *)ckey)->len; + + /* get already prepared memory for 2 cprbs with param block each */ + rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk); + if (rc) + return rc; + + /* fill request cprb struct */ + preqcblk->domain = domain; + + /* fill request cprb param block with AU request */ + preqparm = (struct aureqparm *) preqcblk->req_parmb; + memcpy(preqparm->subfunc_code, "AU", 2); + preqparm->rule_array_len = + sizeof(preqparm->rule_array_len) + + sizeof(preqparm->rule_array); + memcpy(preqparm->rule_array, "EXPT-SK ", 8); + /* vud, tk blob */ + preqparm->vud.len = sizeof(preqparm->vud); + preqparm->vud.tk_blob_len = sizeof(preqparm->vud.tk_blob) + + 2 * sizeof(uint16_t); + preqparm->vud.tk_blob_tag = 0x00C2; + /* kb, cca token */ + preqparm->kb.len = keytoklen + 3 * sizeof(uint16_t); + preqparm->kb.cca_key_token_len = keytoklen + 2 * sizeof(uint16_t); + memcpy(preqparm->kb.cca_key_token, ckey, keytoklen); + /* now fill length of param block into cprb */ + preqcblk->req_parml = sizeof(struct aureqparm) + keytoklen; + + /* fill xcrb struct */ + prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk); + + /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */ + rc = _zcrypt_send_cprb(&xcrb); + if (rc) { + DEBUG_ERR( + "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", + __func__, (int) cardnr, (int) domain, rc); + goto out; + } + + /* check response returncode and reasoncode */ + if (prepcblk->ccp_rtcode != 0) { + DEBUG_ERR( + "%s unwrap secure key failure, card response %d/%d\n", + __func__, + (int) prepcblk->ccp_rtcode, + (int) prepcblk->ccp_rscode); + rc = -EIO; + goto out; + } + if (prepcblk->ccp_rscode != 0) { + DEBUG_WARN( + "%s unwrap secure key warning, card response %d/%d\n", + __func__, + (int) prepcblk->ccp_rtcode, + (int) prepcblk->ccp_rscode); + } + + /* process response cprb param block */ + prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX); + prepparm = (struct aurepparm *) prepcblk->rpl_parmb; + + /* check the returned keyblock */ + if (prepparm->vud.ckb.version != 0x01) { + DEBUG_ERR( + "%s reply param keyblock version mismatch 0x%02x != 0x01\n", + __func__, (int) prepparm->vud.ckb.version); + rc = -EIO; + goto out; + } + if (prepparm->vud.ckb.algo != 0x02) { + DEBUG_ERR( + "%s reply param keyblock algo mismatch 0x%02x != 0x02\n", + __func__, (int) prepparm->vud.ckb.algo); + rc = -EIO; + goto out; + } + + /* copy the translated protected key */ + switch (prepparm->vud.ckb.keylen) { + case 16+32: + /* AES 128 protected key */ + if (protkeytype) + *protkeytype = PKEY_KEYTYPE_AES_128; + break; + case 24+32: + /* AES 192 protected key */ + if (protkeytype) + *protkeytype = PKEY_KEYTYPE_AES_192; + break; + case 32+32: + /* AES 256 protected key */ + if (protkeytype) + *protkeytype = PKEY_KEYTYPE_AES_256; + break; + default: + DEBUG_ERR("%s unknown/unsupported keylen %d\n", + __func__, prepparm->vud.ckb.keylen); + rc = -EIO; + goto out; + } + memcpy(protkey, prepparm->vud.ckb.key, prepparm->vud.ckb.keylen); + if (protkeylen) + *protkeylen = prepparm->vud.ckb.keylen; + +out: + free_cprbmem(mem, PARMBSIZE, 0); + return rc; +} +EXPORT_SYMBOL(cca_cipher2protkey); + +/* + * query cryptographic facility from CCA adapter + */ +int cca_query_crypto_facility(u16 cardnr, u16 domain, + const char *keyword, + u8 *rarray, size_t *rarraylen, + u8 *varray, size_t *varraylen) +{ + int rc; + u16 len; + u8 *mem, *ptr; + struct CPRBX *preqcblk, *prepcblk; + struct ica_xcRB xcrb; + struct fqreqparm { + u8 subfunc_code[2]; + u16 rule_array_len; + char rule_array[8]; + struct lv1 { + u16 len; + u8 data[VARDATASIZE]; + } lv1; + u16 dummylen; + } __packed * preqparm; + size_t parmbsize = sizeof(struct fqreqparm); + struct fqrepparm { + u8 subfunc_code[2]; + u8 lvdata[0]; + } __packed * prepparm; + + /* get already prepared memory for 2 cprbs with param block each */ + rc = alloc_and_prep_cprbmem(parmbsize, &mem, &preqcblk, &prepcblk); + if (rc) + return rc; + + /* fill request cprb struct */ + preqcblk->domain = domain; + + /* fill request cprb param block with FQ request */ + preqparm = (struct fqreqparm *) preqcblk->req_parmb; + memcpy(preqparm->subfunc_code, "FQ", 2); + memcpy(preqparm->rule_array, keyword, sizeof(preqparm->rule_array)); + preqparm->rule_array_len = + sizeof(preqparm->rule_array_len) + sizeof(preqparm->rule_array); + preqparm->lv1.len = sizeof(preqparm->lv1); + preqparm->dummylen = sizeof(preqparm->dummylen); + preqcblk->req_parml = parmbsize; + + /* fill xcrb struct */ + prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk); + + /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */ + rc = _zcrypt_send_cprb(&xcrb); + if (rc) { + DEBUG_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", + __func__, (int) cardnr, (int) domain, rc); + goto out; + } + + /* check response returncode and reasoncode */ + if (prepcblk->ccp_rtcode != 0) { + DEBUG_ERR("%s unwrap secure key failure, card response %d/%d\n", + __func__, + (int) prepcblk->ccp_rtcode, + (int) prepcblk->ccp_rscode); + rc = -EIO; + goto out; + } + + /* process response cprb param block */ + prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX); + prepparm = (struct fqrepparm *) prepcblk->rpl_parmb; + ptr = prepparm->lvdata; + + /* check and possibly copy reply rule array */ + len = *((u16 *) ptr); + if (len > sizeof(u16)) { + ptr += sizeof(u16); + len -= sizeof(u16); + if (rarray && rarraylen && *rarraylen > 0) { + *rarraylen = (len > *rarraylen ? *rarraylen : len); + memcpy(rarray, ptr, *rarraylen); + } + ptr += len; + } + /* check and possible copy reply var array */ + len = *((u16 *) ptr); + if (len > sizeof(u16)) { + ptr += sizeof(u16); + len -= sizeof(u16); + if (varray && varraylen && *varraylen > 0) { + *varraylen = (len > *varraylen ? *varraylen : len); + memcpy(varray, ptr, *varraylen); + } + ptr += len; + } + +out: + free_cprbmem(mem, parmbsize, 0); + return rc; +} +EXPORT_SYMBOL(cca_query_crypto_facility); + +static int cca_info_cache_fetch(u16 cardnr, u16 domain, struct cca_info *ci) +{ + int rc = -ENOENT; + struct cca_info_list_entry *ptr; + + spin_lock_bh(&cca_info_list_lock); + list_for_each_entry(ptr, &cca_info_list, list) { + if (ptr->cardnr == cardnr && ptr->domain == domain) { + memcpy(ci, &ptr->info, sizeof(*ci)); + rc = 0; + break; + } + } + spin_unlock_bh(&cca_info_list_lock); + + return rc; +} + +static void cca_info_cache_update(u16 cardnr, u16 domain, + const struct cca_info *ci) +{ + int found = 0; + struct cca_info_list_entry *ptr; + + spin_lock_bh(&cca_info_list_lock); + list_for_each_entry(ptr, &cca_info_list, list) { + if (ptr->cardnr == cardnr && + ptr->domain == domain) { + memcpy(&ptr->info, ci, sizeof(*ci)); + found = 1; + break; + } + } + if (!found) { + ptr = kmalloc(sizeof(*ptr), GFP_ATOMIC); + if (!ptr) { + spin_unlock_bh(&cca_info_list_lock); + return; + } + ptr->cardnr = cardnr; + ptr->domain = domain; + memcpy(&ptr->info, ci, sizeof(*ci)); + list_add(&ptr->list, &cca_info_list); + } + spin_unlock_bh(&cca_info_list_lock); +} + +static void cca_info_cache_scrub(u16 cardnr, u16 domain) +{ + struct cca_info_list_entry *ptr; + + spin_lock_bh(&cca_info_list_lock); + list_for_each_entry(ptr, &cca_info_list, list) { + if (ptr->cardnr == cardnr && + ptr->domain == domain) { + list_del(&ptr->list); + kfree(ptr); + break; + } + } + spin_unlock_bh(&cca_info_list_lock); +} + +static void __exit mkvp_cache_free(void) +{ + struct cca_info_list_entry *ptr, *pnext; + + spin_lock_bh(&cca_info_list_lock); + list_for_each_entry_safe(ptr, pnext, &cca_info_list, list) { + list_del(&ptr->list); + kfree(ptr); + } + spin_unlock_bh(&cca_info_list_lock); +} + +/* + * Fetch cca_info values via query_crypto_facility from adapter. + */ +static int fetch_cca_info(u16 cardnr, u16 domain, struct cca_info *ci) +{ + int rc, found = 0; + size_t rlen, vlen; + u8 *rarray, *varray, *pg; + struct zcrypt_device_status_ext devstat; + + memset(ci, 0, sizeof(*ci)); + + /* get first info from zcrypt device driver about this apqn */ + rc = zcrypt_device_status_ext(cardnr, domain, &devstat); + if (rc) + return rc; + ci->hwtype = devstat.hwtype; + + /* prep page for rule array and var array use */ + pg = (u8 *) __get_free_page(GFP_KERNEL); + if (!pg) + return -ENOMEM; + rarray = pg; + varray = pg + PAGE_SIZE/2; + rlen = vlen = PAGE_SIZE/2; + + /* QF for this card/domain */ + rc = cca_query_crypto_facility(cardnr, domain, "STATICSA", + rarray, &rlen, varray, &vlen); + if (rc == 0 && rlen >= 10*8 && vlen >= 204) { + memcpy(ci->serial, rarray, 8); + ci->new_mk_state = (char) rarray[7*8]; + ci->cur_mk_state = (char) rarray[8*8]; + ci->old_mk_state = (char) rarray[9*8]; + if (ci->old_mk_state == '2') + memcpy(&ci->old_mkvp, varray + 172, 8); + if (ci->cur_mk_state == '2') + memcpy(&ci->cur_mkvp, varray + 184, 8); + if (ci->new_mk_state == '3') + memcpy(&ci->new_mkvp, varray + 196, 8); + found = 1; + } + + free_page((unsigned long) pg); + + return found ? 0 : -ENOENT; +} + +/* + * Fetch cca information about a CCA queue. + */ +int cca_get_info(u16 card, u16 dom, struct cca_info *ci, int verify) +{ + int rc; + + rc = cca_info_cache_fetch(card, dom, ci); + if (rc || verify) { + rc = fetch_cca_info(card, dom, ci); + if (rc == 0) + cca_info_cache_update(card, dom, ci); + } + + return rc; +} +EXPORT_SYMBOL(cca_get_info); + +/* + * Search for a matching crypto card based on the + * Master Key Verification Pattern given. + */ +static int findcard(u64 mkvp, u16 *pcardnr, u16 *pdomain, + int verify, int minhwtype) +{ + struct zcrypt_device_status_ext *device_status; + u16 card, dom; + struct cca_info ci; + int i, rc, oi = -1; + + /* mkvp must not be zero, minhwtype needs to be >= 0 */ + if (mkvp == 0 || minhwtype < 0) + return -EINVAL; + + /* fetch status of all crypto cards */ + device_status = kmalloc_array(MAX_ZDEV_ENTRIES_EXT, + sizeof(struct zcrypt_device_status_ext), + GFP_KERNEL); + if (!device_status) + return -ENOMEM; + zcrypt_device_status_mask_ext(device_status); + + /* walk through all crypto cards */ + for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) { + card = AP_QID_CARD(device_status[i].qid); + dom = AP_QID_QUEUE(device_status[i].qid); + if (device_status[i].online && + device_status[i].functions & 0x04) { + /* enabled CCA card, check current mkvp from cache */ + if (cca_info_cache_fetch(card, dom, &ci) == 0 && + ci.hwtype >= minhwtype && + ci.cur_mk_state == '2' && + ci.cur_mkvp == mkvp) { + if (!verify) + break; + /* verify: refresh card info */ + if (fetch_cca_info(card, dom, &ci) == 0) { + cca_info_cache_update(card, dom, &ci); + if (ci.hwtype >= minhwtype && + ci.cur_mk_state == '2' && + ci.cur_mkvp == mkvp) + break; + } + } + } else { + /* Card is offline and/or not a CCA card. */ + /* del mkvp entry from cache if it exists */ + cca_info_cache_scrub(card, dom); + } + } + if (i >= MAX_ZDEV_ENTRIES_EXT) { + /* nothing found, so this time without cache */ + for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) { + if (!(device_status[i].online && + device_status[i].functions & 0x04)) + continue; + card = AP_QID_CARD(device_status[i].qid); + dom = AP_QID_QUEUE(device_status[i].qid); + /* fresh fetch mkvp from adapter */ + if (fetch_cca_info(card, dom, &ci) == 0) { + cca_info_cache_update(card, dom, &ci); + if (ci.hwtype >= minhwtype && + ci.cur_mk_state == '2' && + ci.cur_mkvp == mkvp) + break; + if (ci.hwtype >= minhwtype && + ci.old_mk_state == '2' && + ci.old_mkvp == mkvp && + oi < 0) + oi = i; + } + } + if (i >= MAX_ZDEV_ENTRIES_EXT && oi >= 0) { + /* old mkvp matched, use this card then */ + card = AP_QID_CARD(device_status[oi].qid); + dom = AP_QID_QUEUE(device_status[oi].qid); + } + } + if (i < MAX_ZDEV_ENTRIES_EXT || oi >= 0) { + if (pcardnr) + *pcardnr = card; + if (pdomain) + *pdomain = dom; + rc = (i < MAX_ZDEV_ENTRIES_EXT ? 0 : 1); + } else + rc = -ENODEV; + + kfree(device_status); + return rc; +} + +/* + * Search for a matching crypto card based on the Master Key + * Verification Pattern provided inside a secure key token. + */ +int cca_findcard(const u8 *key, u16 *pcardnr, u16 *pdomain, int verify) +{ + u64 mkvp; + int minhwtype = 0; + const struct keytoken_header *hdr = (struct keytoken_header *) key; + + if (hdr->type != TOKTYPE_CCA_INTERNAL) + return -EINVAL; + + switch (hdr->version) { + case TOKVER_CCA_AES: + mkvp = ((struct secaeskeytoken *)key)->mkvp; + break; + case TOKVER_CCA_VLSC: + mkvp = ((struct cipherkeytoken *)key)->mkvp0; + minhwtype = AP_DEVICE_TYPE_CEX6; + break; + default: + return -EINVAL; + } + + return findcard(mkvp, pcardnr, pdomain, verify, minhwtype); +} +EXPORT_SYMBOL(cca_findcard); + +int cca_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain, + int minhwtype, u64 cur_mkvp, u64 old_mkvp, int verify) +{ + struct zcrypt_device_status_ext *device_status; + int i, n, card, dom, curmatch, oldmatch, rc = 0; + struct cca_info ci; + + *apqns = NULL; + *nr_apqns = 0; + + /* fetch status of all crypto cards */ + device_status = kmalloc_array(MAX_ZDEV_ENTRIES_EXT, + sizeof(struct zcrypt_device_status_ext), + GFP_KERNEL); + if (!device_status) + return -ENOMEM; + zcrypt_device_status_mask_ext(device_status); + + /* loop two times: first gather eligible apqns, then store them */ + while (1) { + n = 0; + /* walk through all the crypto cards */ + for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) { + card = AP_QID_CARD(device_status[i].qid); + dom = AP_QID_QUEUE(device_status[i].qid); + /* check online state */ + if (!device_status[i].online) + continue; + /* check for cca functions */ + if (!(device_status[i].functions & 0x04)) + continue; + /* check cardnr */ + if (cardnr != 0xFFFF && card != cardnr) + continue; + /* check domain */ + if (domain != 0xFFFF && dom != domain) + continue; + /* get cca info on this apqn */ + if (cca_get_info(card, dom, &ci, verify)) + continue; + /* current master key needs to be valid */ + if (ci.cur_mk_state != '2') + continue; + /* check min hardware type */ + if (minhwtype > 0 && minhwtype > ci.hwtype) + continue; + if (cur_mkvp || old_mkvp) { + /* check mkvps */ + curmatch = oldmatch = 0; + if (cur_mkvp && cur_mkvp == ci.cur_mkvp) + curmatch = 1; + if (old_mkvp && ci.old_mk_state == '2' && + old_mkvp == ci.old_mkvp) + oldmatch = 1; + if ((cur_mkvp || old_mkvp) && + (curmatch + oldmatch < 1)) + continue; + } + /* apqn passed all filtering criterons */ + if (*apqns && n < *nr_apqns) + (*apqns)[n] = (((u16)card) << 16) | ((u16) dom); + n++; + } + /* loop 2nd time: array has been filled */ + if (*apqns) + break; + /* loop 1st time: have # of eligible apqns in n */ + if (!n) { + rc = -ENODEV; /* no eligible apqns found */ + break; + } + *nr_apqns = n; + /* allocate array to store n apqns into */ + *apqns = kmalloc_array(n, sizeof(u32), GFP_KERNEL); + if (!*apqns) { + rc = -ENOMEM; + break; + } + verify = 0; + } + + kfree(device_status); + return rc; +} +EXPORT_SYMBOL(cca_findcard2); + +void __exit zcrypt_ccamisc_exit(void) +{ + mkvp_cache_free(); +} diff --git a/drivers/s390/crypto/zcrypt_ccamisc.h b/drivers/s390/crypto/zcrypt_ccamisc.h new file mode 100644 index 000000000000..3a9876d5ab0e --- /dev/null +++ b/drivers/s390/crypto/zcrypt_ccamisc.h @@ -0,0 +1,218 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright IBM Corp. 2019 + * Author(s): Harald Freudenberger <freude@linux.ibm.com> + * Ingo Franzki <ifranzki@linux.ibm.com> + * + * Collection of CCA misc functions used by zcrypt and pkey + */ + +#ifndef _ZCRYPT_CCAMISC_H_ +#define _ZCRYPT_CCAMISC_H_ + +#include <asm/zcrypt.h> +#include <asm/pkey.h> + +/* Key token types */ +#define TOKTYPE_NON_CCA 0x00 /* Non-CCA key token */ +#define TOKTYPE_CCA_INTERNAL 0x01 /* CCA internal key token */ + +/* For TOKTYPE_NON_CCA: */ +#define TOKVER_PROTECTED_KEY 0x01 /* Protected key token */ +#define TOKVER_CLEAR_KEY 0x02 /* Clear key token */ + +/* For TOKTYPE_CCA_INTERNAL: */ +#define TOKVER_CCA_AES 0x04 /* CCA AES key token */ +#define TOKVER_CCA_VLSC 0x05 /* var length sym cipher key token */ + +/* Max size of a cca variable length cipher key token */ +#define MAXCCAVLSCTOKENSIZE 725 + +/* header part of a CCA key token */ +struct keytoken_header { + u8 type; /* one of the TOKTYPE values */ + u8 res0[1]; + u16 len; /* vlsc token: total length in bytes */ + u8 version; /* one of the TOKVER values */ + u8 res1[3]; +} __packed; + +/* inside view of a CCA secure key token (only type 0x01 version 0x04) */ +struct secaeskeytoken { + u8 type; /* 0x01 for internal key token */ + u8 res0[3]; + u8 version; /* should be 0x04 */ + u8 res1[1]; + u8 flag; /* key flags */ + u8 res2[1]; + u64 mkvp; /* master key verification pattern */ + u8 key[32]; /* key value (encrypted) */ + u8 cv[8]; /* control vector */ + u16 bitsize; /* key bit size */ + u16 keysize; /* key byte size */ + u8 tvv[4]; /* token validation value */ +} __packed; + +/* inside view of a variable length symmetric cipher AES key token */ +struct cipherkeytoken { + u8 type; /* 0x01 for internal key token */ + u8 res0[1]; + u16 len; /* total key token length in bytes */ + u8 version; /* should be 0x05 */ + u8 res1[3]; + u8 kms; /* key material state, 0x03 means wrapped with MK */ + u8 kvpt; /* key verification pattern type, should be 0x01 */ + u64 mkvp0; /* master key verification pattern, lo part */ + u64 mkvp1; /* master key verification pattern, hi part (unused) */ + u8 eskwm; /* encrypted section key wrapping method */ + u8 hashalg; /* hash algorithmus used for wrapping key */ + u8 plfver; /* pay load format version */ + u8 res2[1]; + u8 adsver; /* associated data section version */ + u8 res3[1]; + u16 adslen; /* associated data section length */ + u8 kllen; /* optional key label length */ + u8 ieaslen; /* optional extended associated data length */ + u8 uadlen; /* optional user definable associated data length */ + u8 res4[1]; + u16 wpllen; /* wrapped payload length in bits: */ + /* plfver 0x00 0x01 */ + /* AES-128 512 640 */ + /* AES-192 576 640 */ + /* AES-256 640 640 */ + u8 res5[1]; + u8 algtype; /* 0x02 for AES cipher */ + u16 keytype; /* 0x0001 for 'cipher' */ + u8 kufc; /* key usage field count */ + u16 kuf1; /* key usage field 1 */ + u16 kuf2; /* key usage field 2 */ + u8 kmfc; /* key management field count */ + u16 kmf1; /* key management field 1 */ + u16 kmf2; /* key management field 2 */ + u16 kmf3; /* key management field 3 */ + u8 vdata[0]; /* variable part data follows */ +} __packed; + +/* Some defines for the CCA AES cipherkeytoken kmf1 field */ +#define KMF1_XPRT_SYM 0x8000 +#define KMF1_XPRT_UASY 0x4000 +#define KMF1_XPRT_AASY 0x2000 +#define KMF1_XPRT_RAW 0x1000 +#define KMF1_XPRT_CPAC 0x0800 +#define KMF1_XPRT_DES 0x0080 +#define KMF1_XPRT_AES 0x0040 +#define KMF1_XPRT_RSA 0x0008 + +/* + * Simple check if the token is a valid CCA secure AES data key + * token. If keybitsize is given, the bitsize of the key is + * also checked. Returns 0 on success or errno value on failure. + */ +int cca_check_secaeskeytoken(debug_info_t *dbg, int dbflvl, + const u8 *token, int keybitsize); + +/* + * Simple check if the token is a valid CCA secure AES cipher key + * token. If keybitsize is given, the bitsize of the key is + * also checked. If checkcpacfexport is enabled, the key is also + * checked for the export flag to allow CPACF export. + * Returns 0 on success or errno value on failure. + */ +int cca_check_secaescipherkey(debug_info_t *dbg, int dbflvl, + const u8 *token, int keybitsize, + int checkcpacfexport); + +/* + * Generate (random) CCA AES DATA secure key. + */ +int cca_genseckey(u16 cardnr, u16 domain, u32 keybitsize, u8 *seckey); + +/* + * Generate CCA AES DATA secure key with given clear key value. + */ +int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize, + const u8 *clrkey, u8 *seckey); + +/* + * Derive proteced key from an CCA AES DATA secure key. + */ +int cca_sec2protkey(u16 cardnr, u16 domain, + const u8 seckey[SECKEYBLOBSIZE], + u8 *protkey, u32 *protkeylen, u32 *protkeytype); + +/* + * Generate (random) CCA AES CIPHER secure key. + */ +int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags, + u8 *keybuf, size_t *keybufsize); + +/* + * Derive proteced key from CCA AES cipher secure key. + */ +int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey, + u8 *protkey, u32 *protkeylen, u32 *protkeytype); + +/* + * Build CCA AES CIPHER secure key with a given clear key value. + */ +int cca_clr2cipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags, + const u8 *clrkey, u8 *keybuf, size_t *keybufsize); + +/* + * Query cryptographic facility from CCA adapter + */ +int cca_query_crypto_facility(u16 cardnr, u16 domain, + const char *keyword, + u8 *rarray, size_t *rarraylen, + u8 *varray, size_t *varraylen); + +/* + * Search for a matching crypto card based on the Master Key + * Verification Pattern provided inside a secure key. + * Works with CCA AES data and cipher keys. + * Returns < 0 on failure, 0 if CURRENT MKVP matches and + * 1 if OLD MKVP matches. + */ +int cca_findcard(const u8 *key, u16 *pcardnr, u16 *pdomain, int verify); + +/* + * Build a list of cca apqns meeting the following constrains: + * - apqn is online and is in fact a CCA apqn + * - if cardnr is not FFFF only apqns with this cardnr + * - if domain is not FFFF only apqns with this domainnr + * - if minhwtype > 0 only apqns with hwtype >= minhwtype + * - if cur_mkvp != 0 only apqns where cur_mkvp == mkvp + * - if old_mkvp != 0 only apqns where old_mkvp == mkvp + * - if verify is enabled and a cur_mkvp and/or old_mkvp + * value is given, then refetch the cca_info and make sure the current + * cur_mkvp or old_mkvp values of the apqn are used. + * The array of apqn entries is allocated with kmalloc and returned in *apqns; + * the number of apqns stored into the list is returned in *nr_apqns. One apqn + * entry is simple a 32 bit value with 16 bit cardnr and 16 bit domain nr and + * may be casted to struct pkey_apqn. The return value is either 0 for success + * or a negative errno value. If no apqn meeting the criterias is found, + * -ENODEV is returned. + */ +int cca_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain, + int minhwtype, u64 cur_mkvp, u64 old_mkvp, int verify); + +/* struct to hold info for each CCA queue */ +struct cca_info { + int hwtype; /* one of the defined AP_DEVICE_TYPE_* */ + char new_mk_state; /* '1' empty, '2' partially full, '3' full */ + char cur_mk_state; /* '1' invalid, '2' valid */ + char old_mk_state; /* '1' invalid, '2' valid */ + u64 new_mkvp; /* truncated sha256 hash of new master key */ + u64 cur_mkvp; /* truncated sha256 hash of current master key */ + u64 old_mkvp; /* truncated sha256 hash of old master key */ + char serial[9]; /* serial number string (8 ascii numbers + 0x00) */ +}; + +/* + * Fetch cca information about an CCA queue. + */ +int cca_get_info(u16 card, u16 dom, struct cca_info *ci, int verify); + +void zcrypt_ccamisc_exit(void); + +#endif /* _ZCRYPT_CCAMISC_H_ */ diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c index c50f3e86cc74..7cbb384ec535 100644 --- a/drivers/s390/crypto/zcrypt_cex2a.c +++ b/drivers/s390/crypto/zcrypt_cex2a.c @@ -175,6 +175,7 @@ static int zcrypt_cex2a_queue_probe(struct ap_device *ap_dev) zq->queue = aq; zq->online = 1; atomic_set(&zq->load, 0); + ap_queue_init_state(aq); ap_queue_init_reply(aq, &zq->reply); aq->request_timeout = CEX2A_CLEANUP_TIME, aq->private = zq; diff --git a/drivers/s390/crypto/zcrypt_cex2c.c b/drivers/s390/crypto/zcrypt_cex2c.c index 35c7c6672713..c78c0d119806 100644 --- a/drivers/s390/crypto/zcrypt_cex2c.c +++ b/drivers/s390/crypto/zcrypt_cex2c.c @@ -220,6 +220,7 @@ static int zcrypt_cex2c_queue_probe(struct ap_device *ap_dev) zq->queue = aq; zq->online = 1; atomic_set(&zq->load, 0); + ap_rapq(aq->qid); rc = zcrypt_cex2c_rng_supported(aq); if (rc < 0) { zcrypt_queue_free(zq); @@ -231,6 +232,7 @@ static int zcrypt_cex2c_queue_probe(struct ap_device *ap_dev) else zq->ops = zcrypt_msgtype(MSGTYPE06_NAME, MSGTYPE06_VARIANT_NORNG); + ap_queue_init_state(aq); ap_queue_init_reply(aq, &zq->reply); aq->request_timeout = CEX2C_CLEANUP_TIME; aq->private = zq; diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c index 582ffa7e0f18..9a9d02e19774 100644 --- a/drivers/s390/crypto/zcrypt_cex4.c +++ b/drivers/s390/crypto/zcrypt_cex4.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Copyright IBM Corp. 2012 + * Copyright IBM Corp. 2012, 2019 * Author(s): Holger Dengler <hd@linux.vnet.ibm.com> */ @@ -18,6 +18,8 @@ #include "zcrypt_msgtype50.h" #include "zcrypt_error.h" #include "zcrypt_cex4.h" +#include "zcrypt_ccamisc.h" +#include "zcrypt_ep11misc.h" #define CEX4A_MIN_MOD_SIZE 1 /* 8 bits */ #define CEX4A_MAX_MOD_SIZE_2K 256 /* 2048 bits */ @@ -37,8 +39,8 @@ #define CEX4_CLEANUP_TIME (900*HZ) MODULE_AUTHOR("IBM Corporation"); -MODULE_DESCRIPTION("CEX4/CEX5/CEX6 Cryptographic Card device driver, " \ - "Copyright IBM Corp. 2018"); +MODULE_DESCRIPTION("CEX4/CEX5/CEX6/CEX7 Cryptographic Card device driver, " \ + "Copyright IBM Corp. 2019"); MODULE_LICENSE("GPL"); static struct ap_device_id zcrypt_cex4_card_ids[] = { @@ -48,6 +50,8 @@ static struct ap_device_id zcrypt_cex4_card_ids[] = { .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE }, { .dev_type = AP_DEVICE_TYPE_CEX6, .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE }, + { .dev_type = AP_DEVICE_TYPE_CEX7, + .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE }, { /* end of list */ }, }; @@ -60,13 +64,312 @@ static struct ap_device_id zcrypt_cex4_queue_ids[] = { .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE }, { .dev_type = AP_DEVICE_TYPE_CEX6, .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE }, + { .dev_type = AP_DEVICE_TYPE_CEX7, + .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE }, { /* end of list */ }, }; MODULE_DEVICE_TABLE(ap, zcrypt_cex4_queue_ids); +/* + * CCA card additional device attributes + */ +static ssize_t cca_serialnr_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct cca_info ci; + struct ap_card *ac = to_ap_card(dev); + struct zcrypt_card *zc = ac->private; + + memset(&ci, 0, sizeof(ci)); + + if (ap_domain_index >= 0) + cca_get_info(ac->id, ap_domain_index, &ci, zc->online); + + return snprintf(buf, PAGE_SIZE, "%s\n", ci.serial); +} + +static struct device_attribute dev_attr_cca_serialnr = + __ATTR(serialnr, 0444, cca_serialnr_show, NULL); + +static struct attribute *cca_card_attrs[] = { + &dev_attr_cca_serialnr.attr, + NULL, +}; + +static const struct attribute_group cca_card_attr_grp = { + .attrs = cca_card_attrs, +}; + + /* + * CCA queue additional device attributes + */ +static ssize_t cca_mkvps_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + int n = 0; + struct cca_info ci; + struct zcrypt_queue *zq = to_ap_queue(dev)->private; + static const char * const cao_state[] = { "invalid", "valid" }; + static const char * const new_state[] = { "empty", "partial", "full" }; + + memset(&ci, 0, sizeof(ci)); + + cca_get_info(AP_QID_CARD(zq->queue->qid), + AP_QID_QUEUE(zq->queue->qid), + &ci, zq->online); + + if (ci.new_mk_state >= '1' && ci.new_mk_state <= '3') + n = snprintf(buf, PAGE_SIZE, "AES NEW: %s 0x%016llx\n", + new_state[ci.new_mk_state - '1'], ci.new_mkvp); + else + n = snprintf(buf, PAGE_SIZE, "AES NEW: - -\n"); + + if (ci.cur_mk_state >= '1' && ci.cur_mk_state <= '2') + n += snprintf(buf + n, PAGE_SIZE - n, "AES CUR: %s 0x%016llx\n", + cao_state[ci.cur_mk_state - '1'], ci.cur_mkvp); + else + n += snprintf(buf + n, PAGE_SIZE - n, "AES CUR: - -\n"); + + if (ci.old_mk_state >= '1' && ci.old_mk_state <= '2') + n += snprintf(buf + n, PAGE_SIZE - n, "AES OLD: %s 0x%016llx\n", + cao_state[ci.old_mk_state - '1'], ci.old_mkvp); + else + n += snprintf(buf + n, PAGE_SIZE - n, "AES OLD: - -\n"); + + return n; +} + +static struct device_attribute dev_attr_cca_mkvps = + __ATTR(mkvps, 0444, cca_mkvps_show, NULL); + +static struct attribute *cca_queue_attrs[] = { + &dev_attr_cca_mkvps.attr, + NULL, +}; + +static const struct attribute_group cca_queue_attr_grp = { + .attrs = cca_queue_attrs, +}; + +/* + * EP11 card additional device attributes + */ +static ssize_t ep11_api_ordinalnr_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct ep11_card_info ci; + struct ap_card *ac = to_ap_card(dev); + struct zcrypt_card *zc = ac->private; + + memset(&ci, 0, sizeof(ci)); + + ep11_get_card_info(ac->id, &ci, zc->online); + + if (ci.API_ord_nr > 0) + return snprintf(buf, PAGE_SIZE, "%u\n", ci.API_ord_nr); + else + return snprintf(buf, PAGE_SIZE, "\n"); +} + +static struct device_attribute dev_attr_ep11_api_ordinalnr = + __ATTR(API_ordinalnr, 0444, ep11_api_ordinalnr_show, NULL); + +static ssize_t ep11_fw_version_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct ep11_card_info ci; + struct ap_card *ac = to_ap_card(dev); + struct zcrypt_card *zc = ac->private; + + memset(&ci, 0, sizeof(ci)); + + ep11_get_card_info(ac->id, &ci, zc->online); + + if (ci.FW_version > 0) + return snprintf(buf, PAGE_SIZE, "%d.%d\n", + (int)(ci.FW_version >> 8), + (int)(ci.FW_version & 0xFF)); + else + return snprintf(buf, PAGE_SIZE, "\n"); +} + +static struct device_attribute dev_attr_ep11_fw_version = + __ATTR(FW_version, 0444, ep11_fw_version_show, NULL); + +static ssize_t ep11_serialnr_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct ep11_card_info ci; + struct ap_card *ac = to_ap_card(dev); + struct zcrypt_card *zc = ac->private; + + memset(&ci, 0, sizeof(ci)); + + ep11_get_card_info(ac->id, &ci, zc->online); + + if (ci.serial[0]) + return snprintf(buf, PAGE_SIZE, "%16.16s\n", ci.serial); + else + return snprintf(buf, PAGE_SIZE, "\n"); +} + +static struct device_attribute dev_attr_ep11_serialnr = + __ATTR(serialnr, 0444, ep11_serialnr_show, NULL); + +static const struct { + int mode_bit; + const char *mode_txt; +} ep11_op_modes[] = { + { 0, "FIPS2009" }, + { 1, "BSI2009" }, + { 2, "FIPS2011" }, + { 3, "BSI2011" }, + { 6, "BSICC2017" }, + { 0, NULL } +}; + +static ssize_t ep11_card_op_modes_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + int i, n = 0; + struct ep11_card_info ci; + struct ap_card *ac = to_ap_card(dev); + struct zcrypt_card *zc = ac->private; + + memset(&ci, 0, sizeof(ci)); + + ep11_get_card_info(ac->id, &ci, zc->online); + + for (i = 0; ep11_op_modes[i].mode_txt; i++) { + if (ci.op_mode & (1 << ep11_op_modes[i].mode_bit)) { + if (n > 0) + buf[n++] = ' '; + n += snprintf(buf + n, PAGE_SIZE - n, + "%s", ep11_op_modes[i].mode_txt); + } + } + n += snprintf(buf + n, PAGE_SIZE - n, "\n"); + + return n; +} + +static struct device_attribute dev_attr_ep11_card_op_modes = + __ATTR(op_modes, 0444, ep11_card_op_modes_show, NULL); + +static struct attribute *ep11_card_attrs[] = { + &dev_attr_ep11_api_ordinalnr.attr, + &dev_attr_ep11_fw_version.attr, + &dev_attr_ep11_serialnr.attr, + &dev_attr_ep11_card_op_modes.attr, + NULL, +}; + +static const struct attribute_group ep11_card_attr_grp = { + .attrs = ep11_card_attrs, +}; + +/* + * EP11 queue additional device attributes + */ + +static ssize_t ep11_mkvps_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + int n = 0; + struct ep11_domain_info di; + struct zcrypt_queue *zq = to_ap_queue(dev)->private; + static const char * const cwk_state[] = { "invalid", "valid" }; + static const char * const nwk_state[] = { "empty", "uncommitted", + "committed" }; + + memset(&di, 0, sizeof(di)); + + if (zq->online) + ep11_get_domain_info(AP_QID_CARD(zq->queue->qid), + AP_QID_QUEUE(zq->queue->qid), + &di); + + if (di.cur_wk_state == '0') { + n = snprintf(buf, PAGE_SIZE, "WK CUR: %s -\n", + cwk_state[di.cur_wk_state - '0']); + } else if (di.cur_wk_state == '1') { + n = snprintf(buf, PAGE_SIZE, "WK CUR: %s 0x", + cwk_state[di.cur_wk_state - '0']); + bin2hex(buf + n, di.cur_wkvp, sizeof(di.cur_wkvp)); + n += 2 * sizeof(di.cur_wkvp); + n += snprintf(buf + n, PAGE_SIZE - n, "\n"); + } else + n = snprintf(buf, PAGE_SIZE, "WK CUR: - -\n"); + + if (di.new_wk_state == '0') { + n += snprintf(buf + n, PAGE_SIZE - n, "WK NEW: %s -\n", + nwk_state[di.new_wk_state - '0']); + } else if (di.new_wk_state >= '1' && di.new_wk_state <= '2') { + n += snprintf(buf + n, PAGE_SIZE - n, "WK NEW: %s 0x", + nwk_state[di.new_wk_state - '0']); + bin2hex(buf + n, di.new_wkvp, sizeof(di.new_wkvp)); + n += 2 * sizeof(di.new_wkvp); + n += snprintf(buf + n, PAGE_SIZE - n, "\n"); + } else + n += snprintf(buf + n, PAGE_SIZE - n, "WK NEW: - -\n"); + + return n; +} + +static struct device_attribute dev_attr_ep11_mkvps = + __ATTR(mkvps, 0444, ep11_mkvps_show, NULL); + +static ssize_t ep11_queue_op_modes_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + int i, n = 0; + struct ep11_domain_info di; + struct zcrypt_queue *zq = to_ap_queue(dev)->private; + + memset(&di, 0, sizeof(di)); + + if (zq->online) + ep11_get_domain_info(AP_QID_CARD(zq->queue->qid), + AP_QID_QUEUE(zq->queue->qid), + &di); + + for (i = 0; ep11_op_modes[i].mode_txt; i++) { + if (di.op_mode & (1 << ep11_op_modes[i].mode_bit)) { + if (n > 0) + buf[n++] = ' '; + n += snprintf(buf + n, PAGE_SIZE - n, + "%s", ep11_op_modes[i].mode_txt); + } + } + n += snprintf(buf + n, PAGE_SIZE - n, "\n"); + + return n; +} + +static struct device_attribute dev_attr_ep11_queue_op_modes = + __ATTR(op_modes, 0444, ep11_queue_op_modes_show, NULL); + +static struct attribute *ep11_queue_attrs[] = { + &dev_attr_ep11_mkvps.attr, + &dev_attr_ep11_queue_op_modes.attr, + NULL, +}; + +static const struct attribute_group ep11_queue_attr_grp = { + .attrs = ep11_queue_attrs, +}; + /** - * Probe function for CEX4/CEX5/CEX6 card device. It always + * Probe function for CEX4/CEX5/CEX6/CEX7 card device. It always * accepts the AP device since the bus_match already checked * the hardware type. * @ap_dev: pointer to the AP device. @@ -78,25 +381,31 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev) * MEX_1k, MEX_2k, MEX_4k, CRT_1k, CRT_2k, CRT_4k, RNG, SECKEY */ static const int CEX4A_SPEED_IDX[] = { - 14, 19, 249, 42, 228, 1458, 0, 0}; + 14, 19, 249, 42, 228, 1458, 0, 0}; static const int CEX5A_SPEED_IDX[] = { - 8, 9, 20, 18, 66, 458, 0, 0}; + 8, 9, 20, 18, 66, 458, 0, 0}; static const int CEX6A_SPEED_IDX[] = { - 6, 9, 20, 17, 65, 438, 0, 0}; + 6, 9, 20, 17, 65, 438, 0, 0}; + static const int CEX7A_SPEED_IDX[] = { + 6, 8, 17, 15, 54, 362, 0, 0}; static const int CEX4C_SPEED_IDX[] = { 59, 69, 308, 83, 278, 2204, 209, 40}; static const int CEX5C_SPEED_IDX[] = { - 24, 31, 50, 37, 90, 479, 27, 10}; + 24, 31, 50, 37, 90, 479, 27, 10}; static const int CEX6C_SPEED_IDX[] = { - 16, 20, 32, 27, 77, 455, 23, 9}; + 16, 20, 32, 27, 77, 455, 24, 9}; + static const int CEX7C_SPEED_IDX[] = { + 14, 16, 26, 23, 64, 376, 23, 8}; static const int CEX4P_SPEED_IDX[] = { - 224, 313, 3560, 359, 605, 2827, 0, 50}; + 0, 0, 0, 0, 0, 0, 0, 50}; static const int CEX5P_SPEED_IDX[] = { - 63, 84, 156, 83, 142, 533, 0, 10}; + 0, 0, 0, 0, 0, 0, 0, 10}; static const int CEX6P_SPEED_IDX[] = { - 55, 70, 121, 73, 129, 522, 0, 9}; + 0, 0, 0, 0, 0, 0, 0, 9}; + static const int CEX7P_SPEED_IDX[] = { + 0, 0, 0, 0, 0, 0, 0, 8}; struct ap_card *ac = to_ap_card(&ap_dev->device); struct zcrypt_card *zc; @@ -118,11 +427,19 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev) zc->user_space_type = ZCRYPT_CEX5; memcpy(zc->speed_rating, CEX5A_SPEED_IDX, sizeof(CEX5A_SPEED_IDX)); - } else { + } else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX6) { zc->type_string = "CEX6A"; zc->user_space_type = ZCRYPT_CEX6; memcpy(zc->speed_rating, CEX6A_SPEED_IDX, sizeof(CEX6A_SPEED_IDX)); + } else { + zc->type_string = "CEX7A"; + /* wrong user space type, just for compatibility + * with the ZCRYPT_STATUS_MASK ioctl. + */ + zc->user_space_type = ZCRYPT_CEX6; + memcpy(zc->speed_rating, CEX7A_SPEED_IDX, + sizeof(CEX7A_SPEED_IDX)); } zc->min_mod_size = CEX4A_MIN_MOD_SIZE; if (ap_test_bit(&ac->functions, AP_FUNC_MEX4K) && @@ -152,7 +469,7 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev) zc->user_space_type = ZCRYPT_CEX3C; memcpy(zc->speed_rating, CEX5C_SPEED_IDX, sizeof(CEX5C_SPEED_IDX)); - } else { + } else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX6) { zc->type_string = "CEX6C"; /* wrong user space type, must be CEX6 * just keep it for cca compatibility @@ -160,6 +477,14 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev) zc->user_space_type = ZCRYPT_CEX3C; memcpy(zc->speed_rating, CEX6C_SPEED_IDX, sizeof(CEX6C_SPEED_IDX)); + } else { + zc->type_string = "CEX7C"; + /* wrong user space type, must be CEX7 + * just keep it for cca compatibility + */ + zc->user_space_type = ZCRYPT_CEX3C; + memcpy(zc->speed_rating, CEX7C_SPEED_IDX, + sizeof(CEX7C_SPEED_IDX)); } zc->min_mod_size = CEX4C_MIN_MOD_SIZE; zc->max_mod_size = CEX4C_MAX_MOD_SIZE; @@ -175,11 +500,19 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev) zc->user_space_type = ZCRYPT_CEX5; memcpy(zc->speed_rating, CEX5P_SPEED_IDX, sizeof(CEX5P_SPEED_IDX)); - } else { + } else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX6) { zc->type_string = "CEX6P"; zc->user_space_type = ZCRYPT_CEX6; memcpy(zc->speed_rating, CEX6P_SPEED_IDX, sizeof(CEX6P_SPEED_IDX)); + } else { + zc->type_string = "CEX7P"; + /* wrong user space type, just for compatibility + * with the ZCRYPT_STATUS_MASK ioctl. + */ + zc->user_space_type = ZCRYPT_CEX6; + memcpy(zc->speed_rating, CEX7P_SPEED_IDX, + sizeof(CEX7P_SPEED_IDX)); } zc->min_mod_size = CEX4C_MIN_MOD_SIZE; zc->max_mod_size = CEX4C_MAX_MOD_SIZE; @@ -194,19 +527,38 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev) if (rc) { ac->private = NULL; zcrypt_card_free(zc); + goto out; + } + + if (ap_test_bit(&ac->functions, AP_FUNC_COPRO)) { + rc = sysfs_create_group(&ap_dev->device.kobj, + &cca_card_attr_grp); + if (rc) + zcrypt_card_unregister(zc); + } else if (ap_test_bit(&ac->functions, AP_FUNC_EP11)) { + rc = sysfs_create_group(&ap_dev->device.kobj, + &ep11_card_attr_grp); + if (rc) + zcrypt_card_unregister(zc); } +out: return rc; } /** - * This is called to remove the CEX4/CEX5/CEX6 card driver information - * if an AP card device is removed. + * This is called to remove the CEX4/CEX5/CEX6/CEX7 card driver + * information if an AP card device is removed. */ static void zcrypt_cex4_card_remove(struct ap_device *ap_dev) { - struct zcrypt_card *zc = to_ap_card(&ap_dev->device)->private; + struct ap_card *ac = to_ap_card(&ap_dev->device); + struct zcrypt_card *zc = ac->private; + if (ap_test_bit(&ac->functions, AP_FUNC_COPRO)) + sysfs_remove_group(&ap_dev->device.kobj, &cca_card_attr_grp); + else if (ap_test_bit(&ac->functions, AP_FUNC_EP11)) + sysfs_remove_group(&ap_dev->device.kobj, &ep11_card_attr_grp); if (zc) zcrypt_card_unregister(zc); } @@ -219,7 +571,7 @@ static struct ap_driver zcrypt_cex4_card_driver = { }; /** - * Probe function for CEX4/CEX5/CEX6 queue device. It always + * Probe function for CEX4/CEX5/CEX6/CEX7 queue device. It always * accepts the AP device since the bus_match already checked * the hardware type. * @ap_dev: pointer to the AP device. @@ -251,9 +603,11 @@ static int zcrypt_cex4_queue_probe(struct ap_device *ap_dev) } else { return -ENODEV; } + zq->queue = aq; zq->online = 1; atomic_set(&zq->load, 0); + ap_queue_init_state(aq); ap_queue_init_reply(aq, &zq->reply); aq->request_timeout = CEX4_CLEANUP_TIME, aq->private = zq; @@ -261,13 +615,27 @@ static int zcrypt_cex4_queue_probe(struct ap_device *ap_dev) if (rc) { aq->private = NULL; zcrypt_queue_free(zq); + goto out; + } + + if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO)) { + rc = sysfs_create_group(&ap_dev->device.kobj, + &cca_queue_attr_grp); + if (rc) + zcrypt_queue_unregister(zq); + } else if (ap_test_bit(&aq->card->functions, AP_FUNC_EP11)) { + rc = sysfs_create_group(&ap_dev->device.kobj, + &ep11_queue_attr_grp); + if (rc) + zcrypt_queue_unregister(zq); } +out: return rc; } /** - * This is called to remove the CEX4/CEX5/CEX6 queue driver + * This is called to remove the CEX4/CEX5/CEX6/CEX7 queue driver * information if an AP queue device is removed. */ static void zcrypt_cex4_queue_remove(struct ap_device *ap_dev) @@ -275,6 +643,10 @@ static void zcrypt_cex4_queue_remove(struct ap_device *ap_dev) struct ap_queue *aq = to_ap_queue(&ap_dev->device); struct zcrypt_queue *zq = aq->private; + if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO)) + sysfs_remove_group(&ap_dev->device.kobj, &cca_queue_attr_grp); + else if (ap_test_bit(&aq->card->functions, AP_FUNC_EP11)) + sysfs_remove_group(&ap_dev->device.kobj, &ep11_queue_attr_grp); if (zq) zcrypt_queue_unregister(zq); } diff --git a/drivers/s390/crypto/zcrypt_ep11misc.c b/drivers/s390/crypto/zcrypt_ep11misc.c new file mode 100644 index 000000000000..d4caf46ff9df --- /dev/null +++ b/drivers/s390/crypto/zcrypt_ep11misc.c @@ -0,0 +1,1293 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright IBM Corp. 2019 + * Author(s): Harald Freudenberger <freude@linux.ibm.com> + * + * Collection of EP11 misc functions used by zcrypt and pkey + */ + +#define KMSG_COMPONENT "zcrypt" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/random.h> +#include <asm/zcrypt.h> +#include <asm/pkey.h> + +#include "ap_bus.h" +#include "zcrypt_api.h" +#include "zcrypt_debug.h" +#include "zcrypt_msgtype6.h" +#include "zcrypt_ep11misc.h" +#include "zcrypt_ccamisc.h" + +#define DEBUG_DBG(...) ZCRYPT_DBF(DBF_DEBUG, ##__VA_ARGS__) +#define DEBUG_INFO(...) ZCRYPT_DBF(DBF_INFO, ##__VA_ARGS__) +#define DEBUG_WARN(...) ZCRYPT_DBF(DBF_WARN, ##__VA_ARGS__) +#define DEBUG_ERR(...) ZCRYPT_DBF(DBF_ERR, ##__VA_ARGS__) + +/* default iv used here */ +static const u8 def_iv[16] = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, + 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff }; + +/* ep11 card info cache */ +struct card_list_entry { + struct list_head list; + u16 cardnr; + struct ep11_card_info info; +}; +static LIST_HEAD(card_list); +static DEFINE_SPINLOCK(card_list_lock); + +static int card_cache_fetch(u16 cardnr, struct ep11_card_info *ci) +{ + int rc = -ENOENT; + struct card_list_entry *ptr; + + spin_lock_bh(&card_list_lock); + list_for_each_entry(ptr, &card_list, list) { + if (ptr->cardnr == cardnr) { + memcpy(ci, &ptr->info, sizeof(*ci)); + rc = 0; + break; + } + } + spin_unlock_bh(&card_list_lock); + + return rc; +} + +static void card_cache_update(u16 cardnr, const struct ep11_card_info *ci) +{ + int found = 0; + struct card_list_entry *ptr; + + spin_lock_bh(&card_list_lock); + list_for_each_entry(ptr, &card_list, list) { + if (ptr->cardnr == cardnr) { + memcpy(&ptr->info, ci, sizeof(*ci)); + found = 1; + break; + } + } + if (!found) { + ptr = kmalloc(sizeof(*ptr), GFP_ATOMIC); + if (!ptr) { + spin_unlock_bh(&card_list_lock); + return; + } + ptr->cardnr = cardnr; + memcpy(&ptr->info, ci, sizeof(*ci)); + list_add(&ptr->list, &card_list); + } + spin_unlock_bh(&card_list_lock); +} + +static void card_cache_scrub(u16 cardnr) +{ + struct card_list_entry *ptr; + + spin_lock_bh(&card_list_lock); + list_for_each_entry(ptr, &card_list, list) { + if (ptr->cardnr == cardnr) { + list_del(&ptr->list); + kfree(ptr); + break; + } + } + spin_unlock_bh(&card_list_lock); +} + +static void __exit card_cache_free(void) +{ + struct card_list_entry *ptr, *pnext; + + spin_lock_bh(&card_list_lock); + list_for_each_entry_safe(ptr, pnext, &card_list, list) { + list_del(&ptr->list); + kfree(ptr); + } + spin_unlock_bh(&card_list_lock); +} + +/* + * Simple check if the key blob is a valid EP11 secure AES key. + */ +int ep11_check_aeskeyblob(debug_info_t *dbg, int dbflvl, + const u8 *key, int keybitsize, + int checkcpacfexport) +{ + struct ep11keyblob *kb = (struct ep11keyblob *) key; + +#define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__) + + if (kb->head.type != TOKTYPE_NON_CCA) { + if (dbg) + DBF("%s key check failed, type 0x%02x != 0x%02x\n", + __func__, (int) kb->head.type, TOKTYPE_NON_CCA); + return -EINVAL; + } + if (kb->head.version != TOKVER_EP11_AES) { + if (dbg) + DBF("%s key check failed, version 0x%02x != 0x%02x\n", + __func__, (int) kb->head.version, TOKVER_EP11_AES); + return -EINVAL; + } + if (kb->version != EP11_STRUCT_MAGIC) { + if (dbg) + DBF("%s key check failed, magic 0x%04x != 0x%04x\n", + __func__, (int) kb->version, EP11_STRUCT_MAGIC); + return -EINVAL; + } + switch (kb->head.keybitlen) { + case 128: + case 192: + case 256: + break; + default: + if (dbg) + DBF("%s key check failed, keybitlen %d invalid\n", + __func__, (int) kb->head.keybitlen); + return -EINVAL; + } + if (keybitsize > 0 && keybitsize != (int) kb->head.keybitlen) { + DBF("%s key check failed, keybitsize %d\n", + __func__, keybitsize); + return -EINVAL; + } + if (checkcpacfexport && !(kb->attr & EP11_BLOB_PKEY_EXTRACTABLE)) { + if (dbg) + DBF("%s key check failed, PKEY_EXTRACTABLE is 0\n", + __func__); + return -EINVAL; + } +#undef DBF + + return 0; +} +EXPORT_SYMBOL(ep11_check_aeskeyblob); + +/* + * Helper function which calls zcrypt_send_ep11_cprb with + * memory management segment adjusted to kernel space + * so that the copy_from_user called within this + * function do in fact copy from kernel space. + */ +static inline int _zcrypt_send_ep11_cprb(struct ep11_urb *urb) +{ + int rc; + mm_segment_t old_fs = get_fs(); + + set_fs(KERNEL_DS); + rc = zcrypt_send_ep11_cprb(urb); + set_fs(old_fs); + + return rc; +} + +/* + * Allocate and prepare ep11 cprb plus additional payload. + */ +static inline struct ep11_cprb *alloc_cprb(size_t payload_len) +{ + size_t len = sizeof(struct ep11_cprb) + payload_len; + struct ep11_cprb *cprb; + + cprb = kmalloc(len, GFP_KERNEL); + if (!cprb) + return NULL; + + memset(cprb, 0, len); + cprb->cprb_len = sizeof(struct ep11_cprb); + cprb->cprb_ver_id = 0x04; + memcpy(cprb->func_id, "T4", 2); + cprb->ret_code = 0xFFFFFFFF; + cprb->payload_len = payload_len; + + return cprb; +} + +/* + * Some helper functions related to ASN1 encoding. + * Limited to length info <= 2 byte. + */ + +#define ASN1TAGLEN(x) (2 + (x) + ((x) > 127 ? 1 : 0) + ((x) > 255 ? 1 : 0)) + +static int asn1tag_write(u8 *ptr, u8 tag, const u8 *pvalue, u16 valuelen) +{ + ptr[0] = tag; + if (valuelen > 255) { + ptr[1] = 0x82; + *((u16 *)(ptr + 2)) = valuelen; + memcpy(ptr + 4, pvalue, valuelen); + return 4 + valuelen; + } + if (valuelen > 127) { + ptr[1] = 0x81; + ptr[2] = (u8) valuelen; + memcpy(ptr + 3, pvalue, valuelen); + return 3 + valuelen; + } + ptr[1] = (u8) valuelen; + memcpy(ptr + 2, pvalue, valuelen); + return 2 + valuelen; +} + +/* EP11 payload > 127 bytes starts with this struct */ +struct pl_head { + u8 tag; + u8 lenfmt; + u16 len; + u8 func_tag; + u8 func_len; + u32 func; + u8 dom_tag; + u8 dom_len; + u32 dom; +} __packed; + +/* prep ep11 payload head helper function */ +static inline void prep_head(struct pl_head *h, + size_t pl_size, int api, int func) +{ + h->tag = 0x30; + h->lenfmt = 0x82; + h->len = pl_size - 4; + h->func_tag = 0x04; + h->func_len = sizeof(u32); + h->func = (api << 16) + func; + h->dom_tag = 0x04; + h->dom_len = sizeof(u32); +} + +/* prep urb helper function */ +static inline void prep_urb(struct ep11_urb *u, + struct ep11_target_dev *t, int nt, + struct ep11_cprb *req, size_t req_len, + struct ep11_cprb *rep, size_t rep_len) +{ + u->targets = (u8 __user *) t; + u->targets_num = nt; + u->req = (u8 __user *) req; + u->req_len = req_len; + u->resp = (u8 __user *) rep; + u->resp_len = rep_len; +} + +/* Check ep11 reply payload, return 0 or suggested errno value. */ +static int check_reply_pl(const u8 *pl, const char *func) +{ + int len; + u32 ret; + + /* start tag */ + if (*pl++ != 0x30) { + DEBUG_ERR("%s reply start tag mismatch\n", func); + return -EIO; + } + + /* payload length format */ + if (*pl < 127) { + len = *pl; + pl++; + } else if (*pl == 0x81) { + pl++; + len = *pl; + pl++; + } else if (*pl == 0x82) { + pl++; + len = *((u16 *)pl); + pl += 2; + } else { + DEBUG_ERR("%s reply start tag lenfmt mismatch 0x%02hhx\n", + func, *pl); + return -EIO; + } + + /* len should cover at least 3 fields with 32 bit value each */ + if (len < 3 * 6) { + DEBUG_ERR("%s reply length %d too small\n", func, len); + return -EIO; + } + + /* function tag, length and value */ + if (pl[0] != 0x04 || pl[1] != 0x04) { + DEBUG_ERR("%s function tag or length mismatch\n", func); + return -EIO; + } + pl += 6; + + /* dom tag, length and value */ + if (pl[0] != 0x04 || pl[1] != 0x04) { + DEBUG_ERR("%s dom tag or length mismatch\n", func); + return -EIO; + } + pl += 6; + + /* return value tag, length and value */ + if (pl[0] != 0x04 || pl[1] != 0x04) { + DEBUG_ERR("%s return value tag or length mismatch\n", func); + return -EIO; + } + pl += 2; + ret = *((u32 *)pl); + if (ret != 0) { + DEBUG_ERR("%s return value 0x%04x != 0\n", func, ret); + return -EIO; + } + + return 0; +} + + +/* + * Helper function which does an ep11 query with given query type. + */ +static int ep11_query_info(u16 cardnr, u16 domain, u32 query_type, + size_t buflen, u8 *buf) +{ + struct ep11_info_req_pl { + struct pl_head head; + u8 query_type_tag; + u8 query_type_len; + u32 query_type; + u8 query_subtype_tag; + u8 query_subtype_len; + u32 query_subtype; + } __packed * req_pl; + struct ep11_info_rep_pl { + struct pl_head head; + u8 rc_tag; + u8 rc_len; + u32 rc; + u8 data_tag; + u8 data_lenfmt; + u16 data_len; + } __packed * rep_pl; + struct ep11_cprb *req = NULL, *rep = NULL; + struct ep11_target_dev target; + struct ep11_urb *urb = NULL; + int api = 1, rc = -ENOMEM; + + /* request cprb and payload */ + req = alloc_cprb(sizeof(struct ep11_info_req_pl)); + if (!req) + goto out; + req_pl = (struct ep11_info_req_pl *) (((u8 *) req) + sizeof(*req)); + prep_head(&req_pl->head, sizeof(*req_pl), api, 38); /* get xcp info */ + req_pl->query_type_tag = 0x04; + req_pl->query_type_len = sizeof(u32); + req_pl->query_type = query_type; + req_pl->query_subtype_tag = 0x04; + req_pl->query_subtype_len = sizeof(u32); + + /* reply cprb and payload */ + rep = alloc_cprb(sizeof(struct ep11_info_rep_pl) + buflen); + if (!rep) + goto out; + rep_pl = (struct ep11_info_rep_pl *) (((u8 *) rep) + sizeof(*rep)); + + /* urb and target */ + urb = kmalloc(sizeof(struct ep11_urb), GFP_KERNEL); + if (!urb) + goto out; + target.ap_id = cardnr; + target.dom_id = domain; + prep_urb(urb, &target, 1, + req, sizeof(*req) + sizeof(*req_pl), + rep, sizeof(*rep) + sizeof(*rep_pl) + buflen); + + rc = _zcrypt_send_ep11_cprb(urb); + if (rc) { + DEBUG_ERR( + "%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n", + __func__, (int) cardnr, (int) domain, rc); + goto out; + } + + rc = check_reply_pl((u8 *)rep_pl, __func__); + if (rc) + goto out; + if (rep_pl->data_tag != 0x04 || rep_pl->data_lenfmt != 0x82) { + DEBUG_ERR("%s unknown reply data format\n", __func__); + rc = -EIO; + goto out; + } + if (rep_pl->data_len > buflen) { + DEBUG_ERR("%s mismatch between reply data len and buffer len\n", + __func__); + rc = -ENOSPC; + goto out; + } + + memcpy(buf, ((u8 *) rep_pl) + sizeof(*rep_pl), rep_pl->data_len); + +out: + kfree(req); + kfree(rep); + kfree(urb); + return rc; +} + +/* + * Provide information about an EP11 card. + */ +int ep11_get_card_info(u16 card, struct ep11_card_info *info, int verify) +{ + int rc; + struct ep11_module_query_info { + u32 API_ord_nr; + u32 firmware_id; + u8 FW_major_vers; + u8 FW_minor_vers; + u8 CSP_major_vers; + u8 CSP_minor_vers; + u8 fwid[32]; + u8 xcp_config_hash[32]; + u8 CSP_config_hash[32]; + u8 serial[16]; + u8 module_date_time[16]; + u64 op_mode; + u32 PKCS11_flags; + u32 ext_flags; + u32 domains; + u32 sym_state_bytes; + u32 digest_state_bytes; + u32 pin_blob_bytes; + u32 SPKI_bytes; + u32 priv_key_blob_bytes; + u32 sym_blob_bytes; + u32 max_payload_bytes; + u32 CP_profile_bytes; + u32 max_CP_index; + } __packed * pmqi = NULL; + + rc = card_cache_fetch(card, info); + if (rc || verify) { + pmqi = kmalloc(sizeof(*pmqi), GFP_KERNEL); + if (!pmqi) + return -ENOMEM; + rc = ep11_query_info(card, AUTOSEL_DOM, + 0x01 /* module info query */, + sizeof(*pmqi), (u8 *) pmqi); + if (rc) { + if (rc == -ENODEV) + card_cache_scrub(card); + goto out; + } + memset(info, 0, sizeof(*info)); + info->API_ord_nr = pmqi->API_ord_nr; + info->FW_version = + (pmqi->FW_major_vers << 8) + pmqi->FW_minor_vers; + memcpy(info->serial, pmqi->serial, sizeof(info->serial)); + info->op_mode = pmqi->op_mode; + card_cache_update(card, info); + } + +out: + kfree(pmqi); + return rc; +} +EXPORT_SYMBOL(ep11_get_card_info); + +/* + * Provide information about a domain within an EP11 card. + */ +int ep11_get_domain_info(u16 card, u16 domain, struct ep11_domain_info *info) +{ + int rc; + struct ep11_domain_query_info { + u32 dom_index; + u8 cur_WK_VP[32]; + u8 new_WK_VP[32]; + u32 dom_flags; + u64 op_mode; + } __packed * p_dom_info; + + p_dom_info = kmalloc(sizeof(*p_dom_info), GFP_KERNEL); + if (!p_dom_info) + return -ENOMEM; + + rc = ep11_query_info(card, domain, 0x03 /* domain info query */, + sizeof(*p_dom_info), (u8 *) p_dom_info); + if (rc) + goto out; + + memset(info, 0, sizeof(*info)); + info->cur_wk_state = '0'; + info->new_wk_state = '0'; + if (p_dom_info->dom_flags & 0x10 /* left imprint mode */) { + if (p_dom_info->dom_flags & 0x02 /* cur wk valid */) { + info->cur_wk_state = '1'; + memcpy(info->cur_wkvp, p_dom_info->cur_WK_VP, 32); + } + if (p_dom_info->dom_flags & 0x04 /* new wk present */ + || p_dom_info->dom_flags & 0x08 /* new wk committed */) { + info->new_wk_state = + p_dom_info->dom_flags & 0x08 ? '2' : '1'; + memcpy(info->new_wkvp, p_dom_info->new_WK_VP, 32); + } + } + info->op_mode = p_dom_info->op_mode; + +out: + kfree(p_dom_info); + return rc; +} +EXPORT_SYMBOL(ep11_get_domain_info); + +/* + * Default EP11 AES key generate attributes, used when no keygenflags given: + * XCP_BLOB_ENCRYPT | XCP_BLOB_DECRYPT | XCP_BLOB_PROTKEY_EXTRACTABLE + */ +#define KEY_ATTR_DEFAULTS 0x00200c00 + +int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags, + u8 *keybuf, size_t *keybufsize) +{ + struct keygen_req_pl { + struct pl_head head; + u8 var_tag; + u8 var_len; + u32 var; + u8 keybytes_tag; + u8 keybytes_len; + u32 keybytes; + u8 mech_tag; + u8 mech_len; + u32 mech; + u8 attr_tag; + u8 attr_len; + u32 attr_header; + u32 attr_bool_mask; + u32 attr_bool_bits; + u32 attr_val_len_type; + u32 attr_val_len_value; + u8 pin_tag; + u8 pin_len; + } __packed * req_pl; + struct keygen_rep_pl { + struct pl_head head; + u8 rc_tag; + u8 rc_len; + u32 rc; + u8 data_tag; + u8 data_lenfmt; + u16 data_len; + u8 data[512]; + } __packed * rep_pl; + struct ep11_cprb *req = NULL, *rep = NULL; + struct ep11_target_dev target; + struct ep11_urb *urb = NULL; + struct ep11keyblob *kb; + int api, rc = -ENOMEM; + + switch (keybitsize) { + case 128: + case 192: + case 256: + break; + default: + DEBUG_ERR( + "%s unknown/unsupported keybitsize %d\n", + __func__, keybitsize); + rc = -EINVAL; + goto out; + } + + /* request cprb and payload */ + req = alloc_cprb(sizeof(struct keygen_req_pl)); + if (!req) + goto out; + req_pl = (struct keygen_req_pl *) (((u8 *) req) + sizeof(*req)); + api = (!keygenflags || keygenflags & 0x00200000) ? 4 : 1; + prep_head(&req_pl->head, sizeof(*req_pl), api, 21); /* GenerateKey */ + req_pl->var_tag = 0x04; + req_pl->var_len = sizeof(u32); + req_pl->keybytes_tag = 0x04; + req_pl->keybytes_len = sizeof(u32); + req_pl->keybytes = keybitsize / 8; + req_pl->mech_tag = 0x04; + req_pl->mech_len = sizeof(u32); + req_pl->mech = 0x00001080; /* CKM_AES_KEY_GEN */ + req_pl->attr_tag = 0x04; + req_pl->attr_len = 5 * sizeof(u32); + req_pl->attr_header = 0x10010000; + req_pl->attr_bool_mask = keygenflags ? keygenflags : KEY_ATTR_DEFAULTS; + req_pl->attr_bool_bits = keygenflags ? keygenflags : KEY_ATTR_DEFAULTS; + req_pl->attr_val_len_type = 0x00000161; /* CKA_VALUE_LEN */ + req_pl->attr_val_len_value = keybitsize / 8; + req_pl->pin_tag = 0x04; + + /* reply cprb and payload */ + rep = alloc_cprb(sizeof(struct keygen_rep_pl)); + if (!rep) + goto out; + rep_pl = (struct keygen_rep_pl *) (((u8 *) rep) + sizeof(*rep)); + + /* urb and target */ + urb = kmalloc(sizeof(struct ep11_urb), GFP_KERNEL); + if (!urb) + goto out; + target.ap_id = card; + target.dom_id = domain; + prep_urb(urb, &target, 1, + req, sizeof(*req) + sizeof(*req_pl), + rep, sizeof(*rep) + sizeof(*rep_pl)); + + rc = _zcrypt_send_ep11_cprb(urb); + if (rc) { + DEBUG_ERR( + "%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n", + __func__, (int) card, (int) domain, rc); + goto out; + } + + rc = check_reply_pl((u8 *)rep_pl, __func__); + if (rc) + goto out; + if (rep_pl->data_tag != 0x04 || rep_pl->data_lenfmt != 0x82) { + DEBUG_ERR("%s unknown reply data format\n", __func__); + rc = -EIO; + goto out; + } + if (rep_pl->data_len > *keybufsize) { + DEBUG_ERR("%s mismatch reply data len / key buffer len\n", + __func__); + rc = -ENOSPC; + goto out; + } + + /* copy key blob and set header values */ + memcpy(keybuf, rep_pl->data, rep_pl->data_len); + *keybufsize = rep_pl->data_len; + kb = (struct ep11keyblob *) keybuf; + kb->head.type = TOKTYPE_NON_CCA; + kb->head.len = rep_pl->data_len; + kb->head.version = TOKVER_EP11_AES; + kb->head.keybitlen = keybitsize; + +out: + kfree(req); + kfree(rep); + kfree(urb); + return rc; +} +EXPORT_SYMBOL(ep11_genaeskey); + +static int ep11_cryptsingle(u16 card, u16 domain, + u16 mode, u32 mech, const u8 *iv, + const u8 *key, size_t keysize, + const u8 *inbuf, size_t inbufsize, + u8 *outbuf, size_t *outbufsize) +{ + struct crypt_req_pl { + struct pl_head head; + u8 var_tag; + u8 var_len; + u32 var; + u8 mech_tag; + u8 mech_len; + u32 mech; + /* + * maybe followed by iv data + * followed by key tag + key blob + * followed by plaintext tag + plaintext + */ + } __packed * req_pl; + struct crypt_rep_pl { + struct pl_head head; + u8 rc_tag; + u8 rc_len; + u32 rc; + u8 data_tag; + u8 data_lenfmt; + /* data follows */ + } __packed * rep_pl; + struct ep11_cprb *req = NULL, *rep = NULL; + struct ep11_target_dev target; + struct ep11_urb *urb = NULL; + size_t req_pl_size, rep_pl_size; + int n, api = 1, rc = -ENOMEM; + u8 *p; + + /* the simple asn1 coding used has length limits */ + if (keysize > 0xFFFF || inbufsize > 0xFFFF) + return -EINVAL; + + /* request cprb and payload */ + req_pl_size = sizeof(struct crypt_req_pl) + (iv ? 16 : 0) + + ASN1TAGLEN(keysize) + ASN1TAGLEN(inbufsize); + req = alloc_cprb(req_pl_size); + if (!req) + goto out; + req_pl = (struct crypt_req_pl *) (((u8 *) req) + sizeof(*req)); + prep_head(&req_pl->head, req_pl_size, api, (mode ? 20 : 19)); + req_pl->var_tag = 0x04; + req_pl->var_len = sizeof(u32); + /* mech is mech + mech params (iv here) */ + req_pl->mech_tag = 0x04; + req_pl->mech_len = sizeof(u32) + (iv ? 16 : 0); + req_pl->mech = (mech ? mech : 0x00001085); /* CKM_AES_CBC_PAD */ + p = ((u8 *) req_pl) + sizeof(*req_pl); + if (iv) { + memcpy(p, iv, 16); + p += 16; + } + /* key and input data */ + p += asn1tag_write(p, 0x04, key, keysize); + p += asn1tag_write(p, 0x04, inbuf, inbufsize); + + /* reply cprb and payload, assume out data size <= in data size + 32 */ + rep_pl_size = sizeof(struct crypt_rep_pl) + ASN1TAGLEN(inbufsize + 32); + rep = alloc_cprb(rep_pl_size); + if (!rep) + goto out; + rep_pl = (struct crypt_rep_pl *) (((u8 *) rep) + sizeof(*rep)); + + /* urb and target */ + urb = kmalloc(sizeof(struct ep11_urb), GFP_KERNEL); + if (!urb) + goto out; + target.ap_id = card; + target.dom_id = domain; + prep_urb(urb, &target, 1, + req, sizeof(*req) + req_pl_size, + rep, sizeof(*rep) + rep_pl_size); + + rc = _zcrypt_send_ep11_cprb(urb); + if (rc) { + DEBUG_ERR( + "%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n", + __func__, (int) card, (int) domain, rc); + goto out; + } + + rc = check_reply_pl((u8 *)rep_pl, __func__); + if (rc) + goto out; + if (rep_pl->data_tag != 0x04) { + DEBUG_ERR("%s unknown reply data format\n", __func__); + rc = -EIO; + goto out; + } + p = ((u8 *) rep_pl) + sizeof(*rep_pl); + if (rep_pl->data_lenfmt <= 127) + n = rep_pl->data_lenfmt; + else if (rep_pl->data_lenfmt == 0x81) + n = *p++; + else if (rep_pl->data_lenfmt == 0x82) { + n = *((u16 *) p); + p += 2; + } else { + DEBUG_ERR("%s unknown reply data length format 0x%02hhx\n", + __func__, rep_pl->data_lenfmt); + rc = -EIO; + goto out; + } + if (n > *outbufsize) { + DEBUG_ERR("%s mismatch reply data len %d / output buffer %zu\n", + __func__, n, *outbufsize); + rc = -ENOSPC; + goto out; + } + + memcpy(outbuf, p, n); + *outbufsize = n; + +out: + kfree(req); + kfree(rep); + kfree(urb); + return rc; +} + +static int ep11_unwrapkey(u16 card, u16 domain, + const u8 *kek, size_t keksize, + const u8 *enckey, size_t enckeysize, + u32 mech, const u8 *iv, + u32 keybitsize, u32 keygenflags, + u8 *keybuf, size_t *keybufsize) +{ + struct uw_req_pl { + struct pl_head head; + u8 attr_tag; + u8 attr_len; + u32 attr_header; + u32 attr_bool_mask; + u32 attr_bool_bits; + u32 attr_key_type; + u32 attr_key_type_value; + u32 attr_val_len; + u32 attr_val_len_value; + u8 mech_tag; + u8 mech_len; + u32 mech; + /* + * maybe followed by iv data + * followed by kek tag + kek blob + * followed by empty mac tag + * followed by empty pin tag + * followed by encryted key tag + bytes + */ + } __packed * req_pl; + struct uw_rep_pl { + struct pl_head head; + u8 rc_tag; + u8 rc_len; + u32 rc; + u8 data_tag; + u8 data_lenfmt; + u16 data_len; + u8 data[512]; + } __packed * rep_pl; + struct ep11_cprb *req = NULL, *rep = NULL; + struct ep11_target_dev target; + struct ep11_urb *urb = NULL; + struct ep11keyblob *kb; + size_t req_pl_size; + int api, rc = -ENOMEM; + u8 *p; + + /* request cprb and payload */ + req_pl_size = sizeof(struct uw_req_pl) + (iv ? 16 : 0) + + ASN1TAGLEN(keksize) + 4 + ASN1TAGLEN(enckeysize); + req = alloc_cprb(req_pl_size); + if (!req) + goto out; + req_pl = (struct uw_req_pl *) (((u8 *) req) + sizeof(*req)); + api = (!keygenflags || keygenflags & 0x00200000) ? 4 : 1; + prep_head(&req_pl->head, req_pl_size, api, 34); /* UnwrapKey */ + req_pl->attr_tag = 0x04; + req_pl->attr_len = 7 * sizeof(u32); + req_pl->attr_header = 0x10020000; + req_pl->attr_bool_mask = keygenflags ? keygenflags : KEY_ATTR_DEFAULTS; + req_pl->attr_bool_bits = keygenflags ? keygenflags : KEY_ATTR_DEFAULTS; + req_pl->attr_key_type = 0x00000100; /* CKA_KEY_TYPE */ + req_pl->attr_key_type_value = 0x0000001f; /* CKK_AES */ + req_pl->attr_val_len = 0x00000161; /* CKA_VALUE_LEN */ + req_pl->attr_val_len_value = keybitsize / 8; + /* mech is mech + mech params (iv here) */ + req_pl->mech_tag = 0x04; + req_pl->mech_len = sizeof(u32) + (iv ? 16 : 0); + req_pl->mech = (mech ? mech : 0x00001085); /* CKM_AES_CBC_PAD */ + p = ((u8 *) req_pl) + sizeof(*req_pl); + if (iv) { + memcpy(p, iv, 16); + p += 16; + } + /* kek */ + p += asn1tag_write(p, 0x04, kek, keksize); + /* empty mac key tag */ + *p++ = 0x04; + *p++ = 0; + /* empty pin tag */ + *p++ = 0x04; + *p++ = 0; + /* encrytped key value tag and bytes */ + p += asn1tag_write(p, 0x04, enckey, enckeysize); + + /* reply cprb and payload */ + rep = alloc_cprb(sizeof(struct uw_rep_pl)); + if (!rep) + goto out; + rep_pl = (struct uw_rep_pl *) (((u8 *) rep) + sizeof(*rep)); + + /* urb and target */ + urb = kmalloc(sizeof(struct ep11_urb), GFP_KERNEL); + if (!urb) + goto out; + target.ap_id = card; + target.dom_id = domain; + prep_urb(urb, &target, 1, + req, sizeof(*req) + req_pl_size, + rep, sizeof(*rep) + sizeof(*rep_pl)); + + rc = _zcrypt_send_ep11_cprb(urb); + if (rc) { + DEBUG_ERR( + "%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n", + __func__, (int) card, (int) domain, rc); + goto out; + } + + rc = check_reply_pl((u8 *)rep_pl, __func__); + if (rc) + goto out; + if (rep_pl->data_tag != 0x04 || rep_pl->data_lenfmt != 0x82) { + DEBUG_ERR("%s unknown reply data format\n", __func__); + rc = -EIO; + goto out; + } + if (rep_pl->data_len > *keybufsize) { + DEBUG_ERR("%s mismatch reply data len / key buffer len\n", + __func__); + rc = -ENOSPC; + goto out; + } + + /* copy key blob and set header values */ + memcpy(keybuf, rep_pl->data, rep_pl->data_len); + *keybufsize = rep_pl->data_len; + kb = (struct ep11keyblob *) keybuf; + kb->head.type = TOKTYPE_NON_CCA; + kb->head.len = rep_pl->data_len; + kb->head.version = TOKVER_EP11_AES; + kb->head.keybitlen = keybitsize; + +out: + kfree(req); + kfree(rep); + kfree(urb); + return rc; +} + +static int ep11_wrapkey(u16 card, u16 domain, + const u8 *key, size_t keysize, + u32 mech, const u8 *iv, + u8 *databuf, size_t *datasize) +{ + struct wk_req_pl { + struct pl_head head; + u8 var_tag; + u8 var_len; + u32 var; + u8 mech_tag; + u8 mech_len; + u32 mech; + /* + * followed by iv data + * followed by key tag + key blob + * followed by dummy kek param + * followed by dummy mac param + */ + } __packed * req_pl; + struct wk_rep_pl { + struct pl_head head; + u8 rc_tag; + u8 rc_len; + u32 rc; + u8 data_tag; + u8 data_lenfmt; + u16 data_len; + u8 data[512]; + } __packed * rep_pl; + struct ep11_cprb *req = NULL, *rep = NULL; + struct ep11_target_dev target; + struct ep11_urb *urb = NULL; + struct ep11keyblob *kb; + size_t req_pl_size; + int api, rc = -ENOMEM; + u8 *p; + + /* request cprb and payload */ + req_pl_size = sizeof(struct wk_req_pl) + (iv ? 16 : 0) + + ASN1TAGLEN(keysize) + 4; + req = alloc_cprb(req_pl_size); + if (!req) + goto out; + if (!mech || mech == 0x80060001) + req->flags |= 0x20; /* CPACF_WRAP needs special bit */ + req_pl = (struct wk_req_pl *) (((u8 *) req) + sizeof(*req)); + api = (!mech || mech == 0x80060001) ? 4 : 1; /* CKM_IBM_CPACF_WRAP */ + prep_head(&req_pl->head, req_pl_size, api, 33); /* WrapKey */ + req_pl->var_tag = 0x04; + req_pl->var_len = sizeof(u32); + /* mech is mech + mech params (iv here) */ + req_pl->mech_tag = 0x04; + req_pl->mech_len = sizeof(u32) + (iv ? 16 : 0); + req_pl->mech = (mech ? mech : 0x80060001); /* CKM_IBM_CPACF_WRAP */ + p = ((u8 *) req_pl) + sizeof(*req_pl); + if (iv) { + memcpy(p, iv, 16); + p += 16; + } + /* key blob */ + p += asn1tag_write(p, 0x04, key, keysize); + /* maybe the key argument needs the head data cleaned out */ + kb = (struct ep11keyblob *)(p - keysize); + if (kb->head.version == TOKVER_EP11_AES) + memset(&kb->head, 0, sizeof(kb->head)); + /* empty kek tag */ + *p++ = 0x04; + *p++ = 0; + /* empty mac tag */ + *p++ = 0x04; + *p++ = 0; + + /* reply cprb and payload */ + rep = alloc_cprb(sizeof(struct wk_rep_pl)); + if (!rep) + goto out; + rep_pl = (struct wk_rep_pl *) (((u8 *) rep) + sizeof(*rep)); + + /* urb and target */ + urb = kmalloc(sizeof(struct ep11_urb), GFP_KERNEL); + if (!urb) + goto out; + target.ap_id = card; + target.dom_id = domain; + prep_urb(urb, &target, 1, + req, sizeof(*req) + req_pl_size, + rep, sizeof(*rep) + sizeof(*rep_pl)); + + rc = _zcrypt_send_ep11_cprb(urb); + if (rc) { + DEBUG_ERR( + "%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n", + __func__, (int) card, (int) domain, rc); + goto out; + } + + rc = check_reply_pl((u8 *)rep_pl, __func__); + if (rc) + goto out; + if (rep_pl->data_tag != 0x04 || rep_pl->data_lenfmt != 0x82) { + DEBUG_ERR("%s unknown reply data format\n", __func__); + rc = -EIO; + goto out; + } + if (rep_pl->data_len > *datasize) { + DEBUG_ERR("%s mismatch reply data len / data buffer len\n", + __func__); + rc = -ENOSPC; + goto out; + } + + /* copy the data from the cprb to the data buffer */ + memcpy(databuf, rep_pl->data, rep_pl->data_len); + *datasize = rep_pl->data_len; + +out: + kfree(req); + kfree(rep); + kfree(urb); + return rc; +} + +int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags, + const u8 *clrkey, u8 *keybuf, size_t *keybufsize) +{ + int rc; + struct ep11keyblob *kb; + u8 encbuf[64], *kek = NULL; + size_t clrkeylen, keklen, encbuflen = sizeof(encbuf); + + if (keybitsize == 128 || keybitsize == 192 || keybitsize == 256) + clrkeylen = keybitsize / 8; + else { + DEBUG_ERR( + "%s unknown/unsupported keybitsize %d\n", + __func__, keybitsize); + return -EINVAL; + } + + /* allocate memory for the temp kek */ + keklen = MAXEP11AESKEYBLOBSIZE; + kek = kmalloc(keklen, GFP_ATOMIC); + if (!kek) { + rc = -ENOMEM; + goto out; + } + + /* Step 1: generate AES 256 bit random kek key */ + rc = ep11_genaeskey(card, domain, 256, + 0x00006c00, /* EN/DECRYTP, WRAP/UNWRAP */ + kek, &keklen); + if (rc) { + DEBUG_ERR( + "%s generate kek key failed, rc=%d\n", + __func__, rc); + goto out; + } + kb = (struct ep11keyblob *) kek; + memset(&kb->head, 0, sizeof(kb->head)); + + /* Step 2: encrypt clear key value with the kek key */ + rc = ep11_cryptsingle(card, domain, 0, 0, def_iv, kek, keklen, + clrkey, clrkeylen, encbuf, &encbuflen); + if (rc) { + DEBUG_ERR( + "%s encrypting key value with kek key failed, rc=%d\n", + __func__, rc); + goto out; + } + + /* Step 3: import the encrypted key value as a new key */ + rc = ep11_unwrapkey(card, domain, kek, keklen, + encbuf, encbuflen, 0, def_iv, + keybitsize, 0, keybuf, keybufsize); + if (rc) { + DEBUG_ERR( + "%s importing key value as new key failed,, rc=%d\n", + __func__, rc); + goto out; + } + +out: + kfree(kek); + return rc; +} +EXPORT_SYMBOL(ep11_clr2keyblob); + +int ep11_key2protkey(u16 card, u16 dom, const u8 *key, size_t keylen, + u8 *protkey, u32 *protkeylen, u32 *protkeytype) +{ + int rc = -EIO; + u8 *wkbuf = NULL; + size_t wkbuflen = 256; + struct wk_info { + u16 version; + u8 res1[16]; + u32 pkeytype; + u32 pkeybitsize; + u64 pkeysize; + u8 res2[8]; + u8 pkey[0]; + } __packed * wki; + + /* alloc temp working buffer */ + wkbuf = kmalloc(wkbuflen, GFP_ATOMIC); + if (!wkbuf) + return -ENOMEM; + + /* ep11 secure key -> protected key + info */ + rc = ep11_wrapkey(card, dom, key, keylen, + 0, def_iv, wkbuf, &wkbuflen); + if (rc) { + DEBUG_ERR( + "%s rewrapping ep11 key to pkey failed, rc=%d\n", + __func__, rc); + goto out; + } + wki = (struct wk_info *) wkbuf; + + /* check struct version and pkey type */ + if (wki->version != 1 || wki->pkeytype != 1) { + DEBUG_ERR("%s wk info version %d or pkeytype %d mismatch.\n", + __func__, (int) wki->version, (int) wki->pkeytype); + rc = -EIO; + goto out; + } + + /* copy the tanslated protected key */ + switch (wki->pkeysize) { + case 16+32: + /* AES 128 protected key */ + if (protkeytype) + *protkeytype = PKEY_KEYTYPE_AES_128; + break; + case 24+32: + /* AES 192 protected key */ + if (protkeytype) + *protkeytype = PKEY_KEYTYPE_AES_192; + break; + case 32+32: + /* AES 256 protected key */ + if (protkeytype) + *protkeytype = PKEY_KEYTYPE_AES_256; + break; + default: + DEBUG_ERR("%s unknown/unsupported pkeysize %d\n", + __func__, (int) wki->pkeysize); + rc = -EIO; + goto out; + } + memcpy(protkey, wki->pkey, wki->pkeysize); + if (protkeylen) + *protkeylen = (u32) wki->pkeysize; + rc = 0; + +out: + kfree(wkbuf); + return rc; +} +EXPORT_SYMBOL(ep11_key2protkey); + +int ep11_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain, + int minhwtype, int minapi, const u8 *wkvp) +{ + struct zcrypt_device_status_ext *device_status; + u32 *_apqns = NULL, _nr_apqns = 0; + int i, card, dom, rc = -ENOMEM; + struct ep11_domain_info edi; + struct ep11_card_info eci; + + /* fetch status of all crypto cards */ + device_status = kmalloc_array(MAX_ZDEV_ENTRIES_EXT, + sizeof(struct zcrypt_device_status_ext), + GFP_KERNEL); + if (!device_status) + return -ENOMEM; + zcrypt_device_status_mask_ext(device_status); + + /* allocate 1k space for up to 256 apqns */ + _apqns = kmalloc_array(256, sizeof(u32), GFP_KERNEL); + if (!_apqns) { + kfree(device_status); + return -ENOMEM; + } + + /* walk through all the crypto apqnss */ + for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) { + card = AP_QID_CARD(device_status[i].qid); + dom = AP_QID_QUEUE(device_status[i].qid); + /* check online state */ + if (!device_status[i].online) + continue; + /* check for ep11 functions */ + if (!(device_status[i].functions & 0x01)) + continue; + /* check cardnr */ + if (cardnr != 0xFFFF && card != cardnr) + continue; + /* check domain */ + if (domain != 0xFFFF && dom != domain) + continue; + /* check min hardware type */ + if (minhwtype && device_status[i].hwtype < minhwtype) + continue; + /* check min api version if given */ + if (minapi > 0) { + if (ep11_get_card_info(card, &eci, 0)) + continue; + if (minapi > eci.API_ord_nr) + continue; + } + /* check wkvp if given */ + if (wkvp) { + if (ep11_get_domain_info(card, dom, &edi)) + continue; + if (edi.cur_wk_state != '1') + continue; + if (memcmp(wkvp, edi.cur_wkvp, 16)) + continue; + } + /* apqn passed all filtering criterons, add to the array */ + if (_nr_apqns < 256) + _apqns[_nr_apqns++] = (((u16)card) << 16) | ((u16) dom); + } + + /* nothing found ? */ + if (!_nr_apqns) { + kfree(_apqns); + rc = -ENODEV; + } else { + /* no re-allocation, simple return the _apqns array */ + *apqns = _apqns; + *nr_apqns = _nr_apqns; + rc = 0; + } + + kfree(device_status); + return rc; +} +EXPORT_SYMBOL(ep11_findcard2); + +void __exit zcrypt_ep11misc_exit(void) +{ + card_cache_free(); +} diff --git a/drivers/s390/crypto/zcrypt_ep11misc.h b/drivers/s390/crypto/zcrypt_ep11misc.h new file mode 100644 index 000000000000..e3ed5ed1de86 --- /dev/null +++ b/drivers/s390/crypto/zcrypt_ep11misc.h @@ -0,0 +1,124 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright IBM Corp. 2019 + * Author(s): Harald Freudenberger <freude@linux.ibm.com> + * + * Collection of EP11 misc functions used by zcrypt and pkey + */ + +#ifndef _ZCRYPT_EP11MISC_H_ +#define _ZCRYPT_EP11MISC_H_ + +#include <asm/zcrypt.h> +#include <asm/pkey.h> + +#define TOKVER_EP11_AES 0x03 /* EP11 AES key blob */ + +#define EP11_API_V 4 /* highest known and supported EP11 API version */ + +#define EP11_STRUCT_MAGIC 0x1234 +#define EP11_BLOB_PKEY_EXTRACTABLE 0x200000 + +/* inside view of an EP11 secure key blob */ +struct ep11keyblob { + union { + u8 session[32]; + struct { + u8 type; /* 0x00 (TOKTYPE_NON_CCA) */ + u8 res0; /* unused */ + u16 len; /* total length in bytes of this blob */ + u8 version; /* 0x06 (TOKVER_EP11_AES) */ + u8 res1; /* unused */ + u16 keybitlen; /* clear key bit len, 0 for unknown */ + } head; + }; + u8 wkvp[16]; /* wrapping key verification pattern */ + u64 attr; /* boolean key attributes */ + u64 mode; /* mode bits */ + u16 version; /* 0x1234, EP11_STRUCT_MAGIC */ + u8 iv[14]; + u8 encrypted_key_data[144]; + u8 mac[32]; +} __packed; + +/* + * Simple check if the key blob is a valid EP11 secure AES key. + * If keybitsize is given, the bitsize of the key is also checked. + * If checkcpacfexport is enabled, the key is also checked for the + * attributes needed to export this key for CPACF use. + * Returns 0 on success or errno value on failure. + */ +int ep11_check_aeskeyblob(debug_info_t *dbg, int dbflvl, + const u8 *key, int keybitsize, + int checkcpacfexport); + +/* EP11 card info struct */ +struct ep11_card_info { + u32 API_ord_nr; /* API ordinal number */ + u16 FW_version; /* Firmware major and minor version */ + char serial[16]; /* serial number string (16 ascii, no 0x00 !) */ + u64 op_mode; /* card operational mode(s) */ +}; + +/* EP11 domain info struct */ +struct ep11_domain_info { + char cur_wk_state; /* '0' invalid, '1' valid */ + char new_wk_state; /* '0' empty, '1' uncommitted, '2' committed */ + u8 cur_wkvp[32]; /* current wrapping key verification pattern */ + u8 new_wkvp[32]; /* new wrapping key verification pattern */ + u64 op_mode; /* domain operational mode(s) */ +}; + +/* + * Provide information about an EP11 card. + */ +int ep11_get_card_info(u16 card, struct ep11_card_info *info, int verify); + +/* + * Provide information about a domain within an EP11 card. + */ +int ep11_get_domain_info(u16 card, u16 domain, struct ep11_domain_info *info); + +/* + * Generate (random) EP11 AES secure key. + */ +int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags, + u8 *keybuf, size_t *keybufsize); + +/* + * Generate EP11 AES secure key with given clear key value. + */ +int ep11_clr2keyblob(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags, + const u8 *clrkey, u8 *keybuf, size_t *keybufsize); + +/* + * Derive proteced key from EP11 AES secure key blob. + */ +int ep11_key2protkey(u16 cardnr, u16 domain, const u8 *key, size_t keylen, + u8 *protkey, u32 *protkeylen, u32 *protkeytype); + +/* + * Build a list of ep11 apqns meeting the following constrains: + * - apqn is online and is in fact an EP11 apqn + * - if cardnr is not FFFF only apqns with this cardnr + * - if domain is not FFFF only apqns with this domainnr + * - if minhwtype > 0 only apqns with hwtype >= minhwtype + * - if minapi > 0 only apqns with API_ord_nr >= minapi + * - if wkvp != NULL only apqns where the wkvp (EP11_WKVPLEN bytes) matches + * to the first EP11_WKVPLEN bytes of the wkvp of the current wrapping + * key for this domain. When a wkvp is given there will aways be a re-fetch + * of the domain info for the potential apqn - so this triggers an request + * reply to each apqn eligible. + * The array of apqn entries is allocated with kmalloc and returned in *apqns; + * the number of apqns stored into the list is returned in *nr_apqns. One apqn + * entry is simple a 32 bit value with 16 bit cardnr and 16 bit domain nr and + * may be casted to struct pkey_apqn. The return value is either 0 for success + * or a negative errno value. If no apqn meeting the criterias is found, + * -ENODEV is returned. + */ +int ep11_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain, + int minhwtype, int minapi, const u8 *wkvp); + +void zcrypt_ep11misc_exit(void); + +#endif /* _ZCRYPT_EP11MISC_H_ */ diff --git a/drivers/s390/crypto/zcrypt_error.h b/drivers/s390/crypto/zcrypt_error.h index f34ee41cbed8..4f4dd9d727c9 100644 --- a/drivers/s390/crypto/zcrypt_error.h +++ b/drivers/s390/crypto/zcrypt_error.h @@ -61,6 +61,7 @@ struct error_hdr { #define REP82_ERROR_EVEN_MOD_IN_OPND 0x85 #define REP82_ERROR_RESERVED_FIELD 0x88 #define REP82_ERROR_INVALID_DOMAIN_PENDING 0x8A +#define REP82_ERROR_FILTERED_BY_HYPERVISOR 0x8B #define REP82_ERROR_TRANSPORT_FAIL 0x90 #define REP82_ERROR_PACKET_TRUNCATED 0xA0 #define REP82_ERROR_ZERO_BUFFER_LEN 0xB0 @@ -91,6 +92,7 @@ static inline int convert_error(struct zcrypt_queue *zq, case REP82_ERROR_INVALID_DOMAIN_PRECHECK: case REP82_ERROR_INVALID_DOMAIN_PENDING: case REP82_ERROR_INVALID_SPECIAL_CMD: + case REP82_ERROR_FILTERED_BY_HYPERVISOR: // REP88_ERROR_INVALID_KEY // '82' CEX2A // REP88_ERROR_OPERAND // '84' CEX2A // REP88_ERROR_OPERAND_EVEN_MOD // '85' CEX2A diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c index 4a8a5373cb35..3ce99e4db44d 100644 --- a/drivers/s390/net/ctcm_fsms.c +++ b/drivers/s390/net/ctcm_fsms.c @@ -307,8 +307,7 @@ static void chx_txdone(fsm_instance *fi, int event, void *arg) ch->ccw[1].count = ch->trans_skb->len; fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); ch->prof.send_stamp = jiffies; - rc = ccw_device_start(ch->cdev, &ch->ccw[0], - (unsigned long)ch, 0xff, 0); + rc = ccw_device_start(ch->cdev, &ch->ccw[0], 0, 0xff, 0); ch->prof.doios_multi++; if (rc != 0) { priv->stats.tx_dropped += i; @@ -417,8 +416,7 @@ static void chx_rx(fsm_instance *fi, int event, void *arg) if (ctcm_checkalloc_buffer(ch)) return; ch->ccw[1].count = ch->max_bufsize; - rc = ccw_device_start(ch->cdev, &ch->ccw[0], - (unsigned long)ch, 0xff, 0); + rc = ccw_device_start(ch->cdev, &ch->ccw[0], 0, 0xff, 0); if (rc != 0) ctcm_ccw_check_rc(ch, rc, "normal RX"); } @@ -478,8 +476,7 @@ static void chx_firstio(fsm_instance *fi, int event, void *arg) fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? CTC_STATE_RXINIT : CTC_STATE_TXINIT); - rc = ccw_device_start(ch->cdev, &ch->ccw[0], - (unsigned long)ch, 0xff, 0); + rc = ccw_device_start(ch->cdev, &ch->ccw[0], 0, 0xff, 0); if (rc != 0) { fsm_deltimer(&ch->timer); fsm_newstate(fi, CTC_STATE_SETUPWAIT); @@ -527,8 +524,7 @@ static void chx_rxidle(fsm_instance *fi, int event, void *arg) return; ch->ccw[1].count = ch->max_bufsize; fsm_newstate(fi, CTC_STATE_RXIDLE); - rc = ccw_device_start(ch->cdev, &ch->ccw[0], - (unsigned long)ch, 0xff, 0); + rc = ccw_device_start(ch->cdev, &ch->ccw[0], 0, 0xff, 0); if (rc != 0) { fsm_newstate(fi, CTC_STATE_RXINIT); ctcm_ccw_check_rc(ch, rc, "initial RX"); @@ -571,8 +567,7 @@ static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg) /* Such conditional locking is undeterministic in * static view. => ignore sparse warnings here. */ - rc = ccw_device_start(ch->cdev, &ch->ccw[6], - (unsigned long)ch, 0xff, 0); + rc = ccw_device_start(ch->cdev, &ch->ccw[6], 0, 0xff, 0); if (event == CTC_EVENT_TIMER) /* see above comments */ spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); if (rc != 0) { @@ -637,7 +632,7 @@ static void ctcm_chx_start(fsm_instance *fi, int event, void *arg) fsm_newstate(fi, CTC_STATE_STARTWAIT); fsm_addtimer(&ch->timer, 1000, CTC_EVENT_TIMER, ch); spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); - rc = ccw_device_halt(ch->cdev, (unsigned long)ch); + rc = ccw_device_halt(ch->cdev, 0); spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); if (rc != 0) { if (rc != -EBUSY) @@ -672,7 +667,7 @@ static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg) * static view. => ignore sparse warnings here. */ oldstate = fsm_getstate(fi); fsm_newstate(fi, CTC_STATE_TERM); - rc = ccw_device_halt(ch->cdev, (unsigned long)ch); + rc = ccw_device_halt(ch->cdev, 0); if (event == CTC_EVENT_STOP) spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); @@ -799,7 +794,7 @@ static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg) fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); if (!IS_MPC(ch) && (CHANNEL_DIRECTION(ch->flags) == CTCM_READ)) { - int rc = ccw_device_halt(ch->cdev, (unsigned long)ch); + int rc = ccw_device_halt(ch->cdev, 0); if (rc != 0) ctcm_ccw_check_rc(ch, rc, "HaltIO in chx_setuperr"); @@ -851,7 +846,7 @@ static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg) /* Such conditional locking is a known problem for * sparse because its undeterministic in static view. * Warnings should be ignored here. */ - rc = ccw_device_halt(ch->cdev, (unsigned long)ch); + rc = ccw_device_halt(ch->cdev, 0); if (event == CTC_EVENT_TIMER) spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); if (rc != 0) { @@ -947,8 +942,8 @@ static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg) ch2 = priv->channel[CTCM_WRITE]; fsm_newstate(ch2->fsm, CTC_STATE_DTERM); - ccw_device_halt(ch->cdev, (unsigned long)ch); - ccw_device_halt(ch2->cdev, (unsigned long)ch2); + ccw_device_halt(ch->cdev, 0); + ccw_device_halt(ch2->cdev, 0); } /** @@ -1041,8 +1036,7 @@ static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg) ctcmpc_dumpit((char *)&ch->ccw[3], sizeof(struct ccw1) * 3); - rc = ccw_device_start(ch->cdev, &ch->ccw[3], - (unsigned long)ch, 0xff, 0); + rc = ccw_device_start(ch->cdev, &ch->ccw[3], 0, 0xff, 0); if (event == CTC_EVENT_TIMER) spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); @@ -1361,8 +1355,7 @@ static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg) ch->prof.send_stamp = jiffies; if (do_debug_ccw) ctcmpc_dumpit((char *)&ch->ccw[0], sizeof(struct ccw1) * 3); - rc = ccw_device_start(ch->cdev, &ch->ccw[0], - (unsigned long)ch, 0xff, 0); + rc = ccw_device_start(ch->cdev, &ch->ccw[0], 0, 0xff, 0); ch->prof.doios_multi++; if (rc != 0) { priv->stats.tx_dropped += i; @@ -1462,8 +1455,7 @@ again: if (dolock) spin_lock_irqsave( get_ccwdev_lock(ch->cdev), saveflags); - rc = ccw_device_start(ch->cdev, &ch->ccw[0], - (unsigned long)ch, 0xff, 0); + rc = ccw_device_start(ch->cdev, &ch->ccw[0], 0, 0xff, 0); if (dolock) /* see remark about conditional locking */ spin_unlock_irqrestore( get_ccwdev_lock(ch->cdev), saveflags); @@ -1569,8 +1561,7 @@ void ctcmpc_chx_rxidle(fsm_instance *fi, int event, void *arg) if (event == CTC_EVENT_START) /* see remark about conditional locking */ spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); - rc = ccw_device_start(ch->cdev, &ch->ccw[0], - (unsigned long)ch, 0xff, 0); + rc = ccw_device_start(ch->cdev, &ch->ccw[0], 0, 0xff, 0); if (event == CTC_EVENT_START) spin_unlock_irqrestore( get_ccwdev_lock(ch->cdev), saveflags); @@ -1825,8 +1816,7 @@ static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg) spin_lock_irqsave(get_ccwdev_lock(wch->cdev), saveflags); wch->prof.send_stamp = jiffies; - rc = ccw_device_start(wch->cdev, &wch->ccw[3], - (unsigned long) wch, 0xff, 0); + rc = ccw_device_start(wch->cdev, &wch->ccw[3], 0, 0xff, 0); spin_unlock_irqrestore(get_ccwdev_lock(wch->cdev), saveflags); if ((grp->sweep_req_pend_num == 0) && diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c index f63c5c871d3d..437a6d822105 100644 --- a/drivers/s390/net/ctcm_main.c +++ b/drivers/s390/net/ctcm_main.c @@ -569,8 +569,7 @@ static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb) fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); ch->prof.send_stamp = jiffies; - rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx], - (unsigned long)ch, 0xff, 0); + rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx], 0, 0xff, 0); spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); if (ccw_idx == 3) ch->prof.doios_single++; @@ -833,8 +832,7 @@ static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb) spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); ch->prof.send_stamp = jiffies; - rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx], - (unsigned long)ch, 0xff, 0); + rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx], 0, 0xff, 0); spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); if (ccw_idx == 3) ch->prof.doios_single++; @@ -1074,10 +1072,8 @@ static void ctcm_free_netdevice(struct net_device *dev) if (grp) { if (grp->fsm) kfree_fsm(grp->fsm); - if (grp->xid_skb) - dev_kfree_skb(grp->xid_skb); - if (grp->rcvd_xid_skb) - dev_kfree_skb(grp->rcvd_xid_skb); + dev_kfree_skb(grp->xid_skb); + dev_kfree_skb(grp->rcvd_xid_skb); tasklet_kill(&grp->mpc_tasklet2); kfree(grp); priv->mpcg = NULL; diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c index 1534420a0243..ab316baa8284 100644 --- a/drivers/s390/net/ctcm_mpc.c +++ b/drivers/s390/net/ctcm_mpc.c @@ -1523,8 +1523,7 @@ void mpc_action_send_discontact(unsigned long thischan) unsigned long saveflags = 0; spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); - rc = ccw_device_start(ch->cdev, &ch->ccw[15], - (unsigned long)ch, 0xff, 0); + rc = ccw_device_start(ch->cdev, &ch->ccw[15], 0, 0xff, 0); spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); if (rc != 0) { @@ -1797,8 +1796,7 @@ static void mpc_action_side_xid(fsm_instance *fsm, void *arg, int side) } fsm_addtimer(&ch->timer, 5000 , CTC_EVENT_TIMER, ch); - rc = ccw_device_start(ch->cdev, &ch->ccw[8], - (unsigned long)ch, 0xff, 0); + rc = ccw_device_start(ch->cdev, &ch->ccw[8], 0, 0xff, 0); if (gotlock) /* see remark above about conditional locking */ spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); diff --git a/drivers/s390/net/ism.h b/drivers/s390/net/ism.h index 66eac2b9704d..1901e9c80ed8 100644 --- a/drivers/s390/net/ism.h +++ b/drivers/s390/net/ism.h @@ -32,8 +32,6 @@ #define ISM_UNREG_SBA 0x11 #define ISM_UNREG_IEQ 0x12 -#define ISM_ERROR 0xFFFF - struct ism_req_hdr { u32 cmd; u16 : 16; diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c index 2d9fe7e4ee40..8f08b0a2917c 100644 --- a/drivers/s390/net/lcs.c +++ b/drivers/s390/net/lcs.c @@ -504,7 +504,7 @@ lcs_clear_channel(struct lcs_channel *channel) LCS_DBF_TEXT(4,trace,"clearch"); LCS_DBF_TEXT_(4, trace, "%s", dev_name(&channel->ccwdev->dev)); spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); - rc = ccw_device_clear(channel->ccwdev, (addr_t) channel); + rc = ccw_device_clear(channel->ccwdev, 0); spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); if (rc) { LCS_DBF_TEXT_(4, trace, "ecsc%s", @@ -532,7 +532,7 @@ lcs_stop_channel(struct lcs_channel *channel) LCS_DBF_TEXT_(4, trace, "%s", dev_name(&channel->ccwdev->dev)); channel->state = LCS_CH_STATE_INIT; spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); - rc = ccw_device_halt(channel->ccwdev, (addr_t) channel); + rc = ccw_device_halt(channel->ccwdev, 0); spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); if (rc) { LCS_DBF_TEXT_(4, trace, "ehsc%s", @@ -1427,7 +1427,7 @@ lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) channel->state = LCS_CH_STATE_SUSPENDED; if (irb->scsw.cmd.fctl & SCSW_FCTL_HALT_FUNC) { if (irb->scsw.cmd.cc != 0) { - ccw_device_halt(channel->ccwdev, (addr_t) channel); + ccw_device_halt(channel->ccwdev, 0); return; } /* The channel has been stopped by halt_IO. */ diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 28db887d38ed..9575a627a1e1 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -22,6 +22,7 @@ #include <linux/hashtable.h> #include <linux/ip.h> #include <linux/refcount.h> +#include <linux/timer.h> #include <linux/wait.h> #include <linux/workqueue.h> @@ -30,6 +31,7 @@ #include <net/ipv6.h> #include <net/if_inet6.h> #include <net/addrconf.h> +#include <net/sch_generic.h> #include <net/tcp.h> #include <asm/debug.h> @@ -123,12 +125,6 @@ struct qeth_routing_info { enum qeth_routing_types type; }; -/* IPA stuff */ -struct qeth_ipa_info { - __u32 supported_funcs; - __u32 enabled_funcs; -}; - /* SETBRIDGEPORT stuff */ enum qeth_sbp_roles { QETH_SBP_ROLE_NONE = 0, @@ -167,41 +163,6 @@ struct qeth_vnicc_info { bool rx_bcast_enabled; }; -static inline int qeth_is_adp_supported(struct qeth_ipa_info *ipa, - enum qeth_ipa_setadp_cmd func) -{ - return (ipa->supported_funcs & func); -} - -static inline int qeth_is_ipa_supported(struct qeth_ipa_info *ipa, - enum qeth_ipa_funcs func) -{ - return (ipa->supported_funcs & func); -} - -static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa, - enum qeth_ipa_funcs func) -{ - return (ipa->supported_funcs & ipa->enabled_funcs & func); -} - -#define qeth_adp_supported(c, f) \ - qeth_is_adp_supported(&c->options.adp, f) -#define qeth_is_supported(c, f) \ - qeth_is_ipa_supported(&c->options.ipa4, f) -#define qeth_is_enabled(c, f) \ - qeth_is_ipa_enabled(&c->options.ipa4, f) -#define qeth_is_supported6(c, f) \ - qeth_is_ipa_supported(&c->options.ipa6, f) -#define qeth_is_enabled6(c, f) \ - qeth_is_ipa_enabled(&c->options.ipa6, f) -#define qeth_is_ipafunc_supported(c, prot, f) \ - ((prot == QETH_PROT_IPV6) ? \ - qeth_is_supported6(c, f) : qeth_is_supported(c, f)) -#define qeth_is_ipafunc_enabled(c, prot, f) \ - ((prot == QETH_PROT_IPV6) ? \ - qeth_is_enabled6(c, f) : qeth_is_enabled(c, f)) - #define QETH_IDX_FUNC_LEVEL_OSD 0x0101 #define QETH_IDX_FUNC_LEVEL_IQD 0x4108 @@ -260,7 +221,6 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa, /* large receive scatter gather copy break */ #define QETH_RX_SG_CB (PAGE_SIZE >> 1) -#define QETH_RX_PULL_LEN 256 struct qeth_hdr_layer3 { __u8 id; @@ -366,6 +326,7 @@ enum qeth_header_ids { QETH_HEADER_TYPE_L3_TSO = 0x03, QETH_HEADER_TYPE_OSN = 0x04, QETH_HEADER_TYPE_L2_TSO = 0x06, + QETH_HEADER_MASK_INVAL = 0x80, }; /* flags for qeth_hdr.ext_flags */ #define QETH_HDR_EXT_VLAN_FRAME 0x01 @@ -376,6 +337,28 @@ enum qeth_header_ids { #define QETH_HDR_EXT_CSUM_TRANSP_REQ 0x20 #define QETH_HDR_EXT_UDP 0x40 /*bit off for TCP*/ +static inline bool qeth_l2_same_vlan(struct qeth_hdr_layer2 *h1, + struct qeth_hdr_layer2 *h2) +{ + return !((h1->flags[2] ^ h2->flags[2]) & QETH_LAYER2_FLAG_VLAN) && + h1->vlan_id == h2->vlan_id; +} + +static inline bool qeth_l3_iqd_same_vlan(struct qeth_hdr_layer3 *h1, + struct qeth_hdr_layer3 *h2) +{ + return !((h1->ext_flags ^ h2->ext_flags) & QETH_HDR_EXT_VLAN_FRAME) && + h1->vlan_id == h2->vlan_id; +} + +static inline bool qeth_l3_same_next_hop(struct qeth_hdr_layer3 *h1, + struct qeth_hdr_layer3 *h2) +{ + return !((h1->flags ^ h2->flags) & QETH_HDR_IPV6) && + ipv6_addr_equal(&h1->next_hop.ipv6_addr, + &h2->next_hop.ipv6_addr); +} + enum qeth_qdio_info_states { QETH_QDIO_UNINITIALIZED, QETH_QDIO_ALLOCATED, @@ -424,6 +407,7 @@ struct qeth_qdio_out_buffer { struct qdio_buffer *buffer; atomic_t state; int next_element_to_fill; + unsigned int bytes; struct sk_buff_head skb_list; int is_header[QDIO_MAX_ELEMENTS_PER_BUFFER]; @@ -452,12 +436,17 @@ struct qeth_card_stats { u64 rx_sg_frags; u64 rx_sg_alloc_page; + u64 rx_dropped_nomem; + u64 rx_dropped_notsupp; + u64 rx_dropped_runt; + /* rtnl_link_stats64 */ u64 rx_packets; u64 rx_bytes; - u64 rx_errors; - u64 rx_dropped; u64 rx_multicast; + u64 rx_length_errors; + u64 rx_frame_errors; + u64 rx_fifo_errors; }; struct qeth_out_q_stats { @@ -473,6 +462,8 @@ struct qeth_out_q_stats { u64 tso_bytes; u64 packing_mode_switch; u64 stopped; + u64 completion_yield; + u64 completion_timer; /* rtnl_link_stats64 */ u64 tx_packets; @@ -481,6 +472,8 @@ struct qeth_out_q_stats { u64 tx_dropped; }; +#define QETH_TX_TIMER_USECS 500 + struct qeth_qdio_out_q { struct qdio_buffer *qdio_bufs[QDIO_MAX_BUFFERS_PER_Q]; struct qeth_qdio_out_buffer *bufs[QDIO_MAX_BUFFERS_PER_Q]; @@ -499,13 +492,38 @@ struct qeth_qdio_out_q { atomic_t used_buffers; /* indicates whether PCI flag must be set (or if one is outstanding) */ atomic_t set_pci_flags_count; + struct napi_struct napi; + struct timer_list timer; + struct qeth_hdr *prev_hdr; + u8 bulk_start; + u8 bulk_count; + u8 bulk_max; }; +#define qeth_for_each_output_queue(card, q, i) \ + for (i = 0; i < card->qdio.no_out_queues && \ + (q = card->qdio.out_qs[i]); i++) + +#define qeth_napi_to_out_queue(n) container_of(n, struct qeth_qdio_out_q, napi) + +static inline void qeth_tx_arm_timer(struct qeth_qdio_out_q *queue) +{ + if (timer_pending(&queue->timer)) + return; + mod_timer(&queue->timer, usecs_to_jiffies(QETH_TX_TIMER_USECS) + + jiffies); +} + static inline bool qeth_out_queue_is_full(struct qeth_qdio_out_q *queue) { return atomic_read(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q; } +static inline bool qeth_out_queue_is_empty(struct qeth_qdio_out_q *queue) +{ + return atomic_read(&queue->used_buffers) == 0; +} + struct qeth_qdio_info { atomic_t state; /* input */ @@ -540,7 +558,6 @@ enum qeth_channel_states { */ enum qeth_card_states { CARD_STATE_DOWN, - CARD_STATE_HARDSETUP, CARD_STATE_SOFTSETUP, }; @@ -568,19 +585,33 @@ struct qeth_ipato { struct qeth_channel { struct ccw_device *ccwdev; + struct qeth_cmd_buffer *active_cmd; enum qeth_channel_states state; atomic_t irq_pending; }; +struct qeth_reply { + int (*callback)(struct qeth_card *card, struct qeth_reply *reply, + unsigned long data); + void *param; +}; + struct qeth_cmd_buffer { + struct list_head list; + struct completion done; + spinlock_t lock; unsigned int length; refcount_t ref_count; struct qeth_channel *channel; - struct qeth_reply *reply; + struct qeth_reply reply; long timeout; unsigned char *data; void (*finalize)(struct qeth_card *card, struct qeth_cmd_buffer *iob); - void (*callback)(struct qeth_card *card, struct qeth_cmd_buffer *iob); + bool (*match)(struct qeth_cmd_buffer *iob, + struct qeth_cmd_buffer *reply); + void (*callback)(struct qeth_card *card, struct qeth_cmd_buffer *iob, + unsigned int data_length); + int rc; }; static inline void qeth_get_cmd(struct qeth_cmd_buffer *iob) @@ -588,6 +619,14 @@ static inline void qeth_get_cmd(struct qeth_cmd_buffer *iob) refcount_inc(&iob->ref_count); } +static inline struct qeth_ipa_cmd *__ipa_reply(struct qeth_cmd_buffer *iob) +{ + if (!IS_IPA(iob->data)) + return NULL; + + return (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data); +} + static inline struct qeth_ipa_cmd *__ipa_cmd(struct qeth_cmd_buffer *iob) { return (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE); @@ -626,19 +665,6 @@ struct qeth_seqno { __u16 ipa; }; -struct qeth_reply { - struct list_head list; - struct completion received; - spinlock_t lock; - int (*callback)(struct qeth_card *, struct qeth_reply *, - unsigned long); - u32 seqno; - unsigned long offset; - int rc; - void *param; - refcount_t refcnt; -}; - struct qeth_card_blkt { int time_total; int inter_packet; @@ -651,10 +677,11 @@ struct qeth_card_blkt { struct qeth_card_info { unsigned short unit_addr2; unsigned short cula; - unsigned short chpid; + u8 chpid; __u16 func_level; char mcl_level[QETH_MCL_LENGTH + 1]; u8 open_when_online:1; + u8 promisc_mode:1; u8 use_v1_blkt:1; u8 is_vm_nic:1; int mac_bits; @@ -664,7 +691,6 @@ struct qeth_card_info { int unique_id; bool layer_enforced; struct qeth_card_blkt blkt; - enum qeth_ipa_promisc_modes promisc_mode; __u32 diagass_support; __u32 hwtrap; }; @@ -676,11 +702,11 @@ enum qeth_discipline_id { }; struct qeth_card_options { + struct qeth_ipa_caps ipa4; + struct qeth_ipa_caps ipa6; struct qeth_routing_info route4; - struct qeth_ipa_info ipa4; - struct qeth_ipa_info adp; /*Adapter parameters*/ struct qeth_routing_info route6; - struct qeth_ipa_info ipa6; + struct qeth_ipa_caps adp; /* Adapter parameters */ struct qeth_sbp_info sbp; /* SETBRIDGEPORT options */ struct qeth_vnicc_info vnicc; /* VNICC options */ int fake_broadcast; @@ -710,12 +736,10 @@ struct qeth_osn_info { struct qeth_discipline { const struct device_type *devtype; - int (*process_rx_buffer)(struct qeth_card *card, int budget, int *done); - int (*recover)(void *ptr); int (*setup) (struct ccwgroup_device *); void (*remove) (struct ccwgroup_device *); - int (*set_online) (struct ccwgroup_device *); - int (*set_offline) (struct ccwgroup_device *); + int (*set_online)(struct qeth_card *card); + void (*set_offline)(struct qeth_card *card); int (*do_ioctl)(struct net_device *dev, struct ifreq *rq, int cmd); int (*control_event_handler)(struct qeth_card *card, struct qeth_ipa_cmd *cmd); @@ -767,7 +791,6 @@ struct qeth_card { struct workqueue_struct *event_wq; struct workqueue_struct *cmd_wq; wait_queue_head_t wait_q; - unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; DECLARE_HASHTABLE(mac_htable, 4); DECLARE_HASHTABLE(ip_htable, 4); struct mutex ip_lock; @@ -789,6 +812,7 @@ struct qeth_card { struct service_level qeth_service_level; struct qdio_ssqd_desc ssqd; debug_info_t *debug; + struct mutex sbp_lock; struct mutex conf_mutex; struct mutex discipline_mutex; struct napi_struct napi; @@ -803,6 +827,13 @@ static inline bool qeth_card_hw_is_reachable(struct qeth_card *card) return card->state == CARD_STATE_SOFTSETUP; } +static inline void qeth_unlock_channel(struct qeth_card *card, + struct qeth_channel *channel) +{ + atomic_set(&channel->irq_pending, 0); + wake_up(&card->wait_q); +} + struct qeth_trap_id { __u16 lparnr; char vmname[8]; @@ -828,6 +859,13 @@ static inline u16 qeth_iqd_translate_txq(struct net_device *dev, u16 txq) return txq; } +static inline bool qeth_iqd_is_mcast_queue(struct qeth_card *card, + struct qeth_qdio_out_q *queue) +{ + return qeth_iqd_translate_txq(card->dev, queue->queue_no) == + QETH_IQD_MCAST_TXQ; +} + static inline void qeth_scrub_qdio_buffer(struct qdio_buffer *buf, unsigned int elements) { @@ -891,18 +929,6 @@ static inline struct dst_entry *qeth_dst_check_rcu(struct sk_buff *skb, int ipv) return dst; } -static inline void qeth_rx_csum(struct qeth_card *card, struct sk_buff *skb, - u8 flags) -{ - if ((card->dev->features & NETIF_F_RXCSUM) && - (flags & QETH_HDR_EXT_CSUM_TRANSP_REQ)) { - skb->ip_summed = CHECKSUM_UNNECESSARY; - QETH_CARD_STAT_INC(card, rx_skb_csum); - } else { - skb->ip_summed = CHECKSUM_NONE; - } -} - static inline void qeth_tx_csum(struct sk_buff *skb, u8 *flags, int ipv) { *flags |= QETH_HDR_EXT_CSUM_TRANSP_REQ; @@ -969,12 +995,11 @@ struct net_device *qeth_clone_netdev(struct net_device *orig); struct qeth_card *qeth_get_card_by_busid(char *bus_id); void qeth_set_allowed_threads(struct qeth_card *, unsigned long , int); int qeth_threads_running(struct qeth_card *, unsigned long); -int qeth_do_run_thread(struct qeth_card *, unsigned long); -void qeth_clear_thread_start_bit(struct qeth_card *, unsigned long); -void qeth_clear_thread_running_bit(struct qeth_card *, unsigned long); int qeth_core_hardsetup_card(struct qeth_card *card, bool *carrier_ok); +int qeth_stop_channel(struct qeth_channel *channel); +int qeth_set_offline(struct qeth_card *card, bool resetting); + void qeth_print_status_message(struct qeth_card *); -int qeth_init_qdio_queues(struct qeth_card *); int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *, int (*reply_cb) (struct qeth_card *, struct qeth_reply *, unsigned long), @@ -994,23 +1019,22 @@ struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card, struct qeth_cmd_buffer *qeth_get_diag_cmd(struct qeth_card *card, enum qeth_diags_cmds sub_cmd, unsigned int data_length); +void qeth_notify_cmd(struct qeth_cmd_buffer *iob, int reason); void qeth_put_cmd(struct qeth_cmd_buffer *iob); -struct sk_buff *qeth_core_get_next_skb(struct qeth_card *, - struct qeth_qdio_buffer *, struct qdio_buffer_element **, int *, - struct qeth_hdr **); void qeth_schedule_recovery(struct qeth_card *); int qeth_poll(struct napi_struct *napi, int budget); void qeth_clear_ipacmd_list(struct qeth_card *); int qeth_qdio_clear_card(struct qeth_card *, int); void qeth_clear_working_pool_list(struct qeth_card *); void qeth_drain_output_queues(struct qeth_card *card); -void qeth_setadp_promisc_mode(struct qeth_card *); +void qeth_setadp_promisc_mode(struct qeth_card *card, bool enable); int qeth_setadpparms_change_macaddr(struct qeth_card *); -void qeth_tx_timeout(struct net_device *); -void qeth_notify_reply(struct qeth_reply *reply, int reason); +void qeth_tx_timeout(struct net_device *, unsigned int txqueue); void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, - u16 cmd_length); + u16 cmd_length, + bool (*match)(struct qeth_cmd_buffer *iob, + struct qeth_cmd_buffer *reply)); int qeth_query_switch_attributes(struct qeth_card *card, struct qeth_switch_info *sw_info); int qeth_query_card_info(struct qeth_card *card, diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 9c3310c4d61d..9639938581f5 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -63,14 +63,16 @@ static struct device *qeth_core_root_dev; static struct lock_class_key qdio_out_skb_queue_key; static void qeth_issue_next_read_cb(struct qeth_card *card, - struct qeth_cmd_buffer *iob); + struct qeth_cmd_buffer *iob, + unsigned int data_length); static void qeth_free_buffer_pool(struct qeth_card *); static int qeth_qdio_establish(struct qeth_card *); static void qeth_free_qdio_queues(struct qeth_card *card); static void qeth_notify_skbs(struct qeth_qdio_out_q *queue, struct qeth_qdio_out_buffer *buf, enum iucv_tx_notify notification); -static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf); +static void qeth_tx_complete_buf(struct qeth_qdio_out_buffer *buf, bool error, + int budget); static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int); static void qeth_close_dev_handler(struct work_struct *work) @@ -245,9 +247,6 @@ int qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt) { QETH_CARD_TEXT(card, 2, "realcbp"); - if (card->state != CARD_STATE_DOWN) - return -EPERM; - /* TODO: steel/add buffers from/to a running card's buffer pool (?) */ qeth_clear_working_pool_list(card); qeth_free_buffer_pool(card); @@ -410,7 +409,7 @@ static void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q, int bidx, /* release here to avoid interleaving between outbound tasklet and inbound tasklet regarding notifications and lifecycle */ - qeth_release_skbs(c); + qeth_tx_complete_buf(c, forced_cleanup, 0); c = f->next_pending; WARN_ON_ONCE(head->next_pending != f); @@ -513,14 +512,15 @@ static int __qeth_issue_next_read(struct qeth_card *card) QETH_CARD_TEXT(card, 6, "noirqpnd"); rc = ccw_device_start(channel->ccwdev, ccw, (addr_t) iob, 0, 0); - if (rc) { + if (!rc) { + channel->active_cmd = iob; + } else { QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n", rc, CARD_DEVID(card)); - atomic_set(&channel->irq_pending, 0); + qeth_unlock_channel(card, channel); qeth_put_cmd(iob); card->read_or_write_problem = 1; qeth_schedule_recovery(card); - wake_up(&card->wait_q); } return rc; } @@ -536,50 +536,28 @@ static int qeth_issue_next_read(struct qeth_card *card) return ret; } -static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card) -{ - struct qeth_reply *reply; - - reply = kzalloc(sizeof(*reply), GFP_KERNEL); - if (reply) { - refcount_set(&reply->refcnt, 1); - init_completion(&reply->received); - spin_lock_init(&reply->lock); - } - return reply; -} - -static void qeth_get_reply(struct qeth_reply *reply) -{ - refcount_inc(&reply->refcnt); -} - -static void qeth_put_reply(struct qeth_reply *reply) -{ - if (refcount_dec_and_test(&reply->refcnt)) - kfree(reply); -} - -static void qeth_enqueue_reply(struct qeth_card *card, struct qeth_reply *reply) +static void qeth_enqueue_cmd(struct qeth_card *card, + struct qeth_cmd_buffer *iob) { spin_lock_irq(&card->lock); - list_add_tail(&reply->list, &card->cmd_waiter_list); + list_add_tail(&iob->list, &card->cmd_waiter_list); spin_unlock_irq(&card->lock); } -static void qeth_dequeue_reply(struct qeth_card *card, struct qeth_reply *reply) +static void qeth_dequeue_cmd(struct qeth_card *card, + struct qeth_cmd_buffer *iob) { spin_lock_irq(&card->lock); - list_del(&reply->list); + list_del(&iob->list); spin_unlock_irq(&card->lock); } -void qeth_notify_reply(struct qeth_reply *reply, int reason) +void qeth_notify_cmd(struct qeth_cmd_buffer *iob, int reason) { - reply->rc = reason; - complete(&reply->received); + iob->rc = reason; + complete(&iob->done); } -EXPORT_SYMBOL_GPL(qeth_notify_reply); +EXPORT_SYMBOL_GPL(qeth_notify_cmd); static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc, struct qeth_card *card) @@ -657,14 +635,14 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card, void qeth_clear_ipacmd_list(struct qeth_card *card) { - struct qeth_reply *reply; + struct qeth_cmd_buffer *iob; unsigned long flags; QETH_CARD_TEXT(card, 4, "clipalst"); spin_lock_irqsave(&card->lock, flags); - list_for_each_entry(reply, &card->cmd_waiter_list, list) - qeth_notify_reply(reply, -EIO); + list_for_each_entry(iob, &card->cmd_waiter_list, list) + qeth_notify_cmd(iob, -EIO); spin_unlock_irqrestore(&card->lock, flags); } EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list); @@ -673,17 +651,17 @@ static int qeth_check_idx_response(struct qeth_card *card, unsigned char *buffer) { QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN); - if ((buffer[2] & 0xc0) == 0xc0) { + if ((buffer[2] & QETH_IDX_TERMINATE_MASK) == QETH_IDX_TERMINATE) { QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#04x\n", buffer[4]); QETH_CARD_TEXT(card, 2, "ckidxres"); QETH_CARD_TEXT(card, 2, " idxterm"); - QETH_CARD_TEXT_(card, 2, " rc%d", -EIO); - if (buffer[4] == 0xf6) { + QETH_CARD_TEXT_(card, 2, "rc%x", buffer[4]); + if (buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT || + buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT_VM) { dev_err(&card->gdev->dev, - "The qeth device is not configured " - "for the OSI layer required by z/VM\n"); - return -EPERM; + "The device does not support the configured transport mode\n"); + return -EPROTONOSUPPORT; } return -EIO; } @@ -693,8 +671,6 @@ static int qeth_check_idx_response(struct qeth_card *card, void qeth_put_cmd(struct qeth_cmd_buffer *iob) { if (refcount_dec_and_test(&iob->ref_count)) { - if (iob->reply) - qeth_put_reply(iob->reply); kfree(iob->data); kfree(iob); } @@ -702,17 +678,15 @@ void qeth_put_cmd(struct qeth_cmd_buffer *iob) EXPORT_SYMBOL_GPL(qeth_put_cmd); static void qeth_release_buffer_cb(struct qeth_card *card, - struct qeth_cmd_buffer *iob) + struct qeth_cmd_buffer *iob, + unsigned int data_length) { qeth_put_cmd(iob); } static void qeth_cancel_cmd(struct qeth_cmd_buffer *iob, int rc) { - struct qeth_reply *reply = iob->reply; - - if (reply) - qeth_notify_reply(reply, rc); + qeth_notify_cmd(iob, rc); qeth_put_cmd(iob); } @@ -736,6 +710,9 @@ struct qeth_cmd_buffer *qeth_alloc_cmd(struct qeth_channel *channel, return NULL; } + init_completion(&iob->done); + spin_lock_init(&iob->lock); + INIT_LIST_HEAD(&iob->list); refcount_set(&iob->ref_count, 1); iob->channel = channel; iob->timeout = timeout; @@ -745,11 +722,13 @@ struct qeth_cmd_buffer *qeth_alloc_cmd(struct qeth_channel *channel, EXPORT_SYMBOL_GPL(qeth_alloc_cmd); static void qeth_issue_next_read_cb(struct qeth_card *card, - struct qeth_cmd_buffer *iob) + struct qeth_cmd_buffer *iob, + unsigned int data_length) { + struct qeth_cmd_buffer *request = NULL; struct qeth_ipa_cmd *cmd = NULL; struct qeth_reply *reply = NULL; - struct qeth_reply *r; + struct qeth_cmd_buffer *tmp; unsigned long flags; int rc = 0; @@ -759,15 +738,15 @@ static void qeth_issue_next_read_cb(struct qeth_card *card, case 0: break; case -EIO: - qeth_clear_ipacmd_list(card); qeth_schedule_recovery(card); /* fall through */ default: + qeth_clear_ipacmd_list(card); goto out; } - if (IS_IPA(iob->data)) { - cmd = (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data); + cmd = __ipa_reply(iob); + if (cmd) { cmd = qeth_check_ipa_data(card, cmd); if (!cmd) goto out; @@ -776,52 +755,42 @@ static void qeth_issue_next_read_cb(struct qeth_card *card, card->osn_info.assist_cb(card->dev, cmd); goto out; } - } else { - /* non-IPA commands should only flow during initialization */ - if (card->state != CARD_STATE_DOWN) - goto out; } /* match against pending cmd requests */ spin_lock_irqsave(&card->lock, flags); - list_for_each_entry(r, &card->cmd_waiter_list, list) { - if ((r->seqno == QETH_IDX_COMMAND_SEQNO) || - (cmd && (r->seqno == cmd->hdr.seqno))) { - reply = r; + list_for_each_entry(tmp, &card->cmd_waiter_list, list) { + if (tmp->match && tmp->match(tmp, iob)) { + request = tmp; /* take the object outside the lock */ - qeth_get_reply(reply); + qeth_get_cmd(request); break; } } spin_unlock_irqrestore(&card->lock, flags); - if (!reply) + if (!request) goto out; + reply = &request->reply; if (!reply->callback) { rc = 0; goto no_callback; } - spin_lock_irqsave(&reply->lock, flags); - if (reply->rc) { + spin_lock_irqsave(&request->lock, flags); + if (request->rc) /* Bail out when the requestor has already left: */ - rc = reply->rc; - } else { - if (cmd) { - reply->offset = (u16)((char *)cmd - (char *)iob->data); - rc = reply->callback(card, reply, (unsigned long)cmd); - } else { - rc = reply->callback(card, reply, (unsigned long)iob); - } - } - spin_unlock_irqrestore(&reply->lock, flags); + rc = request->rc; + else + rc = reply->callback(card, reply, cmd ? (unsigned long)cmd : + (unsigned long)iob); + spin_unlock_irqrestore(&request->lock, flags); no_callback: if (rc <= 0) - qeth_notify_reply(reply, rc); - qeth_put_reply(reply); - + qeth_notify_cmd(request, rc); + qeth_put_cmd(request); out: memcpy(&card->seqno.pdu_hdr_ack, QETH_PDU_HEADER_SEQ_NO(iob->data), @@ -846,7 +815,8 @@ static int qeth_set_thread_start_bit(struct qeth_card *card, return 0; } -void qeth_clear_thread_start_bit(struct qeth_card *card, unsigned long thread) +static void qeth_clear_thread_start_bit(struct qeth_card *card, + unsigned long thread) { unsigned long flags; @@ -855,9 +825,9 @@ void qeth_clear_thread_start_bit(struct qeth_card *card, unsigned long thread) spin_unlock_irqrestore(&card->thread_mask_lock, flags); wake_up(&card->wait_q); } -EXPORT_SYMBOL_GPL(qeth_clear_thread_start_bit); -void qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread) +static void qeth_clear_thread_running_bit(struct qeth_card *card, + unsigned long thread) { unsigned long flags; @@ -866,7 +836,6 @@ void qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread) spin_unlock_irqrestore(&card->thread_mask_lock, flags); wake_up_all(&card->wait_q); } -EXPORT_SYMBOL_GPL(qeth_clear_thread_running_bit); static int __qeth_do_run_thread(struct qeth_card *card, unsigned long thread) { @@ -887,7 +856,7 @@ static int __qeth_do_run_thread(struct qeth_card *card, unsigned long thread) return rc; } -int qeth_do_run_thread(struct qeth_card *card, unsigned long thread) +static int qeth_do_run_thread(struct qeth_card *card, unsigned long thread) { int rc = 0; @@ -895,7 +864,6 @@ int qeth_do_run_thread(struct qeth_card *card, unsigned long thread) (rc = __qeth_do_run_thread(card, thread)) >= 0); return rc; } -EXPORT_SYMBOL_GPL(qeth_do_run_thread); void qeth_schedule_recovery(struct qeth_card *card) { @@ -903,7 +871,6 @@ void qeth_schedule_recovery(struct qeth_card *card) if (qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD) == 0) schedule_work(&card->kernel_thread_starter); } -EXPORT_SYMBOL_GPL(qeth_schedule_recovery); static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev, struct irb *irb) @@ -925,30 +892,30 @@ static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev, CCW_DEVID(cdev), dstat, cstat); print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET, 16, 1, irb, 64, 1); - return 1; + return -EIO; } if (dstat & DEV_STAT_UNIT_CHECK) { if (sense[SENSE_RESETTING_EVENT_BYTE] & SENSE_RESETTING_EVENT_FLAG) { QETH_CARD_TEXT(card, 2, "REVIND"); - return 1; + return -EIO; } if (sense[SENSE_COMMAND_REJECT_BYTE] & SENSE_COMMAND_REJECT_FLAG) { QETH_CARD_TEXT(card, 2, "CMDREJi"); - return 1; + return -EIO; } if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) { QETH_CARD_TEXT(card, 2, "AFFE"); - return 1; + return -EIO; } if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) { QETH_CARD_TEXT(card, 2, "ZEROSEN"); return 0; } QETH_CARD_TEXT(card, 2, "DGENCHK"); - return 1; + return -EIO; } return 0; } @@ -994,8 +961,6 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, /* while we hold the ccwdev lock, this stays valid: */ gdev = dev_get_drvdata(&cdev->dev); card = dev_get_drvdata(&gdev->dev); - if (!card) - return; QETH_CARD_TEXT(card, 5, "irq"); @@ -1010,36 +975,45 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, QETH_CARD_TEXT(card, 5, "data"); } - if (qeth_intparm_is_iob(intparm)) - iob = (struct qeth_cmd_buffer *) __va((addr_t)intparm); + if (intparm == 0) { + QETH_CARD_TEXT(card, 5, "irqunsol"); + } else if ((addr_t)intparm != (addr_t)channel->active_cmd) { + QETH_CARD_TEXT(card, 5, "irqunexp"); + + dev_err(&cdev->dev, + "Received IRQ with intparm %lx, expected %px\n", + intparm, channel->active_cmd); + if (channel->active_cmd) + qeth_cancel_cmd(channel->active_cmd, -EIO); + } else { + iob = (struct qeth_cmd_buffer *) (addr_t)intparm; + } + + channel->active_cmd = NULL; + qeth_unlock_channel(card, channel); rc = qeth_check_irb_error(card, cdev, irb); if (rc) { /* IO was terminated, free its resources. */ if (iob) qeth_cancel_cmd(iob, rc); - atomic_set(&channel->irq_pending, 0); - wake_up(&card->wait_q); return; } - atomic_set(&channel->irq_pending, 0); - - if (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC)) + if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC) { channel->state = CH_STATE_STOPPED; + wake_up(&card->wait_q); + } - if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC)) + if (irb->scsw.cmd.fctl & SCSW_FCTL_HALT_FUNC) { channel->state = CH_STATE_HALTED; - - if (intparm == QETH_CLEAR_CHANNEL_PARM) { - QETH_CARD_TEXT(card, 6, "clrchpar"); - /* we don't have to handle this further */ - intparm = 0; + wake_up(&card->wait_q); } - if (intparm == QETH_HALT_CHANNEL_PARM) { - QETH_CARD_TEXT(card, 6, "hltchpar"); - /* we don't have to handle this further */ - intparm = 0; + + if (iob && (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC | + SCSW_FCTL_HALT_FUNC))) { + qeth_cancel_cmd(iob, -ECANCELED); + iob = NULL; } cstat = irb->scsw.cmd.cstat; @@ -1068,16 +1042,20 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, qeth_cancel_cmd(iob, rc); qeth_clear_ipacmd_list(card); qeth_schedule_recovery(card); - goto out; + return; } } - if (iob && iob->callback) - iob->callback(card, iob); - -out: - wake_up(&card->wait_q); - return; + if (iob) { + /* sanity check: */ + if (irb->scsw.cmd.count > iob->length) { + qeth_cancel_cmd(iob, -EIO); + return; + } + if (iob->callback) + iob->callback(card, iob, + iob->length - irb->scsw.cmd.count); + } } static void qeth_notify_skbs(struct qeth_qdio_out_q *q, @@ -1094,22 +1072,52 @@ static void qeth_notify_skbs(struct qeth_qdio_out_q *q, } } -static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf) +static void qeth_tx_complete_buf(struct qeth_qdio_out_buffer *buf, bool error, + int budget) { + struct qeth_qdio_out_q *queue = buf->q; struct sk_buff *skb; /* release may never happen from within CQ tasklet scope */ WARN_ON_ONCE(atomic_read(&buf->state) == QETH_QDIO_BUF_IN_CQ); if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING) - qeth_notify_skbs(buf->q, buf, TX_NOTIFY_GENERALERROR); + qeth_notify_skbs(queue, buf, TX_NOTIFY_GENERALERROR); + + /* Empty buffer? */ + if (buf->next_element_to_fill == 0) + return; + + QETH_TXQ_STAT_INC(queue, bufs); + QETH_TXQ_STAT_ADD(queue, buf_elements, buf->next_element_to_fill); + while ((skb = __skb_dequeue(&buf->skb_list)) != NULL) { + unsigned int bytes = qdisc_pkt_len(skb); + bool is_tso = skb_is_gso(skb); + unsigned int packets; + + packets = is_tso ? skb_shinfo(skb)->gso_segs : 1; + if (error) { + QETH_TXQ_STAT_ADD(queue, tx_errors, packets); + } else { + QETH_TXQ_STAT_ADD(queue, tx_packets, packets); + QETH_TXQ_STAT_ADD(queue, tx_bytes, bytes); + if (skb->ip_summed == CHECKSUM_PARTIAL) + QETH_TXQ_STAT_ADD(queue, skbs_csum, packets); + if (skb_is_nonlinear(skb)) + QETH_TXQ_STAT_INC(queue, skbs_sg); + if (is_tso) { + QETH_TXQ_STAT_INC(queue, skbs_tso); + QETH_TXQ_STAT_ADD(queue, tso_bytes, bytes); + } + } - while ((skb = __skb_dequeue(&buf->skb_list)) != NULL) - consume_skb(skb); + napi_consume_skb(skb, budget); + } } static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, - struct qeth_qdio_out_buffer *buf) + struct qeth_qdio_out_buffer *buf, + bool error, int budget) { int i; @@ -1117,7 +1125,7 @@ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ) atomic_dec(&queue->set_pci_flags_count); - qeth_release_skbs(buf); + qeth_tx_complete_buf(buf, error, budget); for (i = 0; i < queue->max_elements; ++i) { if (buf->buffer->element[i].addr && buf->is_header[i]) @@ -1128,6 +1136,7 @@ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, qeth_scrub_qdio_buffer(buf->buffer, queue->max_elements); buf->next_element_to_fill = 0; + buf->bytes = 0; atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY); } @@ -1139,7 +1148,7 @@ static void qeth_drain_output_queue(struct qeth_qdio_out_q *q, bool free) if (!q->bufs[j]) continue; qeth_cleanup_handled_pending(q, j, 1); - qeth_clear_output_buffer(q, q->bufs[j]); + qeth_clear_output_buffer(q, q->bufs[j], true, 0); if (free) { kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[j]); q->bufs[j] = NULL; @@ -1173,31 +1182,6 @@ static void qeth_free_buffer_pool(struct qeth_card *card) } } -static void qeth_clean_channel(struct qeth_channel *channel) -{ - struct ccw_device *cdev = channel->ccwdev; - - QETH_DBF_TEXT(SETUP, 2, "freech"); - - spin_lock_irq(get_ccwdev_lock(cdev)); - cdev->handler = NULL; - spin_unlock_irq(get_ccwdev_lock(cdev)); -} - -static void qeth_setup_channel(struct qeth_channel *channel) -{ - struct ccw_device *cdev = channel->ccwdev; - - QETH_DBF_TEXT(SETUP, 2, "setupch"); - - channel->state = CH_STATE_DOWN; - atomic_set(&channel->irq_pending, 0); - - spin_lock_irq(get_ccwdev_lock(cdev)); - cdev->handler = qeth_irq; - spin_unlock_irq(get_ccwdev_lock(cdev)); -} - static int qeth_osa_set_output_queues(struct qeth_card *card, bool single) { unsigned int count = single ? 1 : card->dev->num_tx_queues; @@ -1293,6 +1277,7 @@ static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread) return rc; } +static int qeth_do_reset(void *data); static void qeth_start_kernel_thread(struct work_struct *work) { struct task_struct *ts; @@ -1304,8 +1289,7 @@ static void qeth_start_kernel_thread(struct work_struct *work) card->write.state != CH_STATE_UP) return; if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) { - ts = kthread_run(card->discipline->recover, (void *)card, - "qeth_recover"); + ts = kthread_run(qeth_do_reset, card, "qeth_recover"); if (IS_ERR(ts)) { qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); qeth_clear_thread_running_bit(card, @@ -1370,9 +1354,6 @@ static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev) if (!card->read_cmd) goto out_read_cmd; - qeth_setup_channel(&card->read); - qeth_setup_channel(&card->write); - qeth_setup_channel(&card->data); card->qeth_service_level.seq_print = qeth_core_sl_print; register_service_level(&card->qeth_service_level); return card; @@ -1393,7 +1374,7 @@ static int qeth_clear_channel(struct qeth_card *card, QETH_CARD_TEXT(card, 3, "clearch"); spin_lock_irq(get_ccwdev_lock(channel->ccwdev)); - rc = ccw_device_clear(channel->ccwdev, QETH_CLEAR_CHANNEL_PARM); + rc = ccw_device_clear(channel->ccwdev, (addr_t)channel->active_cmd); spin_unlock_irq(get_ccwdev_lock(channel->ccwdev)); if (rc) @@ -1415,7 +1396,7 @@ static int qeth_halt_channel(struct qeth_card *card, QETH_CARD_TEXT(card, 3, "haltch"); spin_lock_irq(get_ccwdev_lock(channel->ccwdev)); - rc = ccw_device_halt(channel->ccwdev, QETH_HALT_CHANNEL_PARM); + rc = ccw_device_halt(channel->ccwdev, (addr_t)channel->active_cmd); spin_unlock_irq(get_ccwdev_lock(channel->ccwdev)); if (rc) @@ -1429,6 +1410,51 @@ static int qeth_halt_channel(struct qeth_card *card, return 0; } +int qeth_stop_channel(struct qeth_channel *channel) +{ + struct ccw_device *cdev = channel->ccwdev; + int rc; + + rc = ccw_device_set_offline(cdev); + + spin_lock_irq(get_ccwdev_lock(cdev)); + if (channel->active_cmd) { + dev_err(&cdev->dev, "Stopped channel while cmd %px was still active\n", + channel->active_cmd); + channel->active_cmd = NULL; + } + cdev->handler = NULL; + spin_unlock_irq(get_ccwdev_lock(cdev)); + + return rc; +} +EXPORT_SYMBOL_GPL(qeth_stop_channel); + +static int qeth_start_channel(struct qeth_channel *channel) +{ + struct ccw_device *cdev = channel->ccwdev; + int rc; + + channel->state = CH_STATE_DOWN; + atomic_set(&channel->irq_pending, 0); + + spin_lock_irq(get_ccwdev_lock(cdev)); + cdev->handler = qeth_irq; + spin_unlock_irq(get_ccwdev_lock(cdev)); + + rc = ccw_device_set_online(cdev); + if (rc) + goto err; + + return 0; + +err: + spin_lock_irq(get_ccwdev_lock(cdev)); + cdev->handler = NULL; + spin_unlock_irq(get_ccwdev_lock(cdev)); + return rc; +} + static int qeth_halt_channels(struct qeth_card *card) { int rc1 = 0, rc2 = 0, rc3 = 0; @@ -1498,7 +1524,6 @@ int qeth_qdio_clear_card(struct qeth_card *card, int use_halt) rc = qeth_clear_halt_card(card, use_halt); if (rc) QETH_CARD_TEXT_(card, 3, "2err%d", rc); - card->state = CARD_STATE_DOWN; return rc; } EXPORT_SYMBOL_GPL(qeth_qdio_clear_card); @@ -1652,10 +1677,16 @@ static void qeth_mpc_finalize_cmd(struct qeth_card *card, memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data), &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH); - iob->reply->seqno = QETH_IDX_COMMAND_SEQNO; iob->callback = qeth_release_buffer_cb; } +static bool qeth_mpc_match_reply(struct qeth_cmd_buffer *iob, + struct qeth_cmd_buffer *reply) +{ + /* MPC cmds are issued strictly in sequence. */ + return !IS_IPA(reply->data); +} + static struct qeth_cmd_buffer *qeth_mpc_alloc_cmd(struct qeth_card *card, void *data, unsigned int data_length) @@ -1670,6 +1701,7 @@ static struct qeth_cmd_buffer *qeth_mpc_alloc_cmd(struct qeth_card *card, qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, data_length, iob->data); iob->finalize = qeth_mpc_finalize_cmd; + iob->match = qeth_mpc_match_reply; return iob; } @@ -1703,29 +1735,19 @@ static int qeth_send_control_data(struct qeth_card *card, void *reply_param) { struct qeth_channel *channel = iob->channel; + struct qeth_reply *reply = &iob->reply; long timeout = iob->timeout; int rc; - struct qeth_reply *reply = NULL; QETH_CARD_TEXT(card, 2, "sendctl"); - reply = qeth_alloc_reply(card); - if (!reply) { - qeth_put_cmd(iob); - return -ENOMEM; - } reply->callback = reply_cb; reply->param = reply_param; - /* pairs with qeth_put_cmd(): */ - qeth_get_reply(reply); - iob->reply = reply; - timeout = wait_event_interruptible_timeout(card->wait_q, qeth_trylock_channel(channel), timeout); if (timeout <= 0) { - qeth_put_reply(reply); qeth_put_cmd(iob); return (timeout == -ERESTARTSYS) ? -EINTR : -ETIME; } @@ -1734,62 +1756,89 @@ static int qeth_send_control_data(struct qeth_card *card, iob->finalize(card, iob); QETH_DBF_HEX(CTRL, 2, iob->data, min(iob->length, QETH_DBF_CTRL_LEN)); - qeth_enqueue_reply(card, reply); + qeth_enqueue_cmd(card, iob); + + /* This pairs with iob->callback, and keeps the iob alive after IO: */ + qeth_get_cmd(iob); QETH_CARD_TEXT(card, 6, "noirqpnd"); spin_lock_irq(get_ccwdev_lock(channel->ccwdev)); rc = ccw_device_start_timeout(channel->ccwdev, __ccw_from_cmd(iob), (addr_t) iob, 0, 0, timeout); + if (!rc) + channel->active_cmd = iob; spin_unlock_irq(get_ccwdev_lock(channel->ccwdev)); if (rc) { QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n", CARD_DEVID(card), rc); QETH_CARD_TEXT_(card, 2, " err%d", rc); - qeth_dequeue_reply(card, reply); - qeth_put_reply(reply); + qeth_dequeue_cmd(card, iob); qeth_put_cmd(iob); - atomic_set(&channel->irq_pending, 0); - wake_up(&card->wait_q); - return rc; + qeth_unlock_channel(card, channel); + goto out; } - timeout = wait_for_completion_interruptible_timeout(&reply->received, + timeout = wait_for_completion_interruptible_timeout(&iob->done, timeout); if (timeout <= 0) rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME; - qeth_dequeue_reply(card, reply); + qeth_dequeue_cmd(card, iob); if (reply_cb) { /* Wait until the callback for a late reply has completed: */ - spin_lock_irq(&reply->lock); + spin_lock_irq(&iob->lock); if (rc) /* Zap any callback that's still pending: */ - reply->rc = rc; - spin_unlock_irq(&reply->lock); + iob->rc = rc; + spin_unlock_irq(&iob->lock); } if (!rc) - rc = reply->rc; - qeth_put_reply(reply); + rc = iob->rc; + +out: + qeth_put_cmd(iob); return rc; } +struct qeth_node_desc { + struct node_descriptor nd1; + struct node_descriptor nd2; + struct node_descriptor nd3; +}; + static void qeth_read_conf_data_cb(struct qeth_card *card, - struct qeth_cmd_buffer *iob) + struct qeth_cmd_buffer *iob, + unsigned int data_length) { - unsigned char *prcd = iob->data; + struct qeth_node_desc *nd = (struct qeth_node_desc *) iob->data; + int rc = 0; + u8 *tag; QETH_CARD_TEXT(card, 2, "cfgunit"); - card->info.chpid = prcd[30]; - card->info.unit_addr2 = prcd[31]; - card->info.cula = prcd[63]; - card->info.is_vm_nic = ((prcd[0x10] == _ascebc['V']) && - (prcd[0x11] == _ascebc['M'])); - card->info.use_v1_blkt = prcd[74] == 0xF0 && prcd[75] == 0xF0 && - prcd[76] >= 0xF1 && prcd[76] <= 0xF4; - - qeth_notify_reply(iob->reply, 0); + + if (data_length < sizeof(*nd)) { + rc = -EINVAL; + goto out; + } + + card->info.is_vm_nic = nd->nd1.plant[0] == _ascebc['V'] && + nd->nd1.plant[1] == _ascebc['M']; + tag = (u8 *)&nd->nd1.tag; + card->info.chpid = tag[0]; + card->info.unit_addr2 = tag[1]; + + tag = (u8 *)&nd->nd2.tag; + card->info.cula = tag[1]; + + card->info.use_v1_blkt = nd->nd3.model[0] == 0xF0 && + nd->nd3.model[1] == 0xF0 && + nd->nd3.model[2] >= 0xF1 && + nd->nd3.model[2] <= 0xF4; + +out: + qeth_notify_cmd(iob, rc); qeth_put_cmd(iob); } @@ -1803,6 +1852,8 @@ static int qeth_read_conf_data(struct qeth_card *card) ciw = ccw_device_get_ciw(channel->ccwdev, CIW_TYPE_RCD); if (!ciw || ciw->cmd == 0) return -EOPNOTSUPP; + if (ciw->count < sizeof(struct qeth_node_desc)) + return -EINVAL; iob = qeth_alloc_cmd(channel, ciw->count, 1, QETH_RCD_TIMEOUT); if (!iob) @@ -1850,7 +1901,8 @@ static int qeth_idx_check_activate_response(struct qeth_card *card, } static void qeth_idx_activate_read_channel_cb(struct qeth_card *card, - struct qeth_cmd_buffer *iob) + struct qeth_cmd_buffer *iob, + unsigned int data_length) { struct qeth_channel *channel = iob->channel; u16 peer_level; @@ -1878,12 +1930,13 @@ static void qeth_idx_activate_read_channel_cb(struct qeth_card *card, QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH); out: - qeth_notify_reply(iob->reply, rc); + qeth_notify_cmd(iob, rc); qeth_put_cmd(iob); } static void qeth_idx_activate_write_channel_cb(struct qeth_card *card, - struct qeth_cmd_buffer *iob) + struct qeth_cmd_buffer *iob, + unsigned int data_length) { struct qeth_channel *channel = iob->channel; u16 peer_level; @@ -1905,7 +1958,7 @@ static void qeth_idx_activate_write_channel_cb(struct qeth_card *card, } out: - qeth_notify_reply(iob->reply, rc); + qeth_notify_cmd(iob, rc); qeth_put_cmd(iob); } @@ -1923,6 +1976,7 @@ static void qeth_idx_setup_activate_cmd(struct qeth_card *card, ccw_device_get_id(CARD_DDEV(card), &dev_id); iob->finalize = qeth_idx_finalize_cmd; + port |= QETH_IDX_ACT_INVAL_FRAME; memcpy(QETH_IDX_ACT_PNO(iob->data), &port, 1); memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data), &card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH); @@ -2253,6 +2307,14 @@ static struct qeth_qdio_out_q *qeth_alloc_output_queue(void) return q; } +static void qeth_tx_completion_timer(struct timer_list *timer) +{ + struct qeth_qdio_out_q *queue = from_timer(queue, timer, timer); + + napi_schedule(&queue->napi); + QETH_TXQ_STAT_INC(queue, completion_timer); +} + static int qeth_alloc_qdio_queues(struct qeth_card *card) { int i, j; @@ -2274,17 +2336,22 @@ static int qeth_alloc_qdio_queues(struct qeth_card *card) /* outbound */ for (i = 0; i < card->qdio.no_out_queues; ++i) { - card->qdio.out_qs[i] = qeth_alloc_output_queue(); - if (!card->qdio.out_qs[i]) + struct qeth_qdio_out_q *queue; + + queue = qeth_alloc_output_queue(); + if (!queue) goto out_freeoutq; QETH_CARD_TEXT_(card, 2, "outq %i", i); - QETH_CARD_HEX(card, 2, &card->qdio.out_qs[i], sizeof(void *)); - card->qdio.out_qs[i]->card = card; - card->qdio.out_qs[i]->queue_no = i; + QETH_CARD_HEX(card, 2, &queue, sizeof(void *)); + card->qdio.out_qs[i] = queue; + queue->card = card; + queue->queue_no = i; + timer_setup(&queue->timer, qeth_tx_completion_timer, 0); + /* give outbound qeth_qdio_buffers their qdio_buffers */ for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { - WARN_ON(card->qdio.out_qs[i]->bufs[j] != NULL); - if (qeth_init_qdio_out_buf(card->qdio.out_qs[i], j)) + WARN_ON(queue->bufs[j]); + if (qeth_init_qdio_out_buf(queue, j)) goto out_freeoutqbufs; } } @@ -2404,50 +2471,46 @@ static int qeth_mpc_initialize(struct qeth_card *card) rc = qeth_cm_enable(card); if (rc) { QETH_CARD_TEXT_(card, 2, "2err%d", rc); - goto out_qdio; + return rc; } rc = qeth_cm_setup(card); if (rc) { QETH_CARD_TEXT_(card, 2, "3err%d", rc); - goto out_qdio; + return rc; } rc = qeth_ulp_enable(card); if (rc) { QETH_CARD_TEXT_(card, 2, "4err%d", rc); - goto out_qdio; + return rc; } rc = qeth_ulp_setup(card); if (rc) { QETH_CARD_TEXT_(card, 2, "5err%d", rc); - goto out_qdio; + return rc; } rc = qeth_alloc_qdio_queues(card); if (rc) { QETH_CARD_TEXT_(card, 2, "5err%d", rc); - goto out_qdio; + return rc; } rc = qeth_qdio_establish(card); if (rc) { QETH_CARD_TEXT_(card, 2, "6err%d", rc); qeth_free_qdio_queues(card); - goto out_qdio; + return rc; } rc = qeth_qdio_activate(card); if (rc) { QETH_CARD_TEXT_(card, 2, "7err%d", rc); - goto out_qdio; + return rc; } rc = qeth_dm_act(card); if (rc) { QETH_CARD_TEXT_(card, 2, "8err%d", rc); - goto out_qdio; + return rc; } return 0; -out_qdio: - qeth_qdio_clear_card(card, !IS_IQD(card)); - qdio_free(CARD_DDEV(card)); - return rc; } void qeth_print_status_message(struct qeth_card *card) @@ -2558,7 +2621,8 @@ static int qeth_init_input_buffer(struct qeth_card *card, if ((card->options.cq == QETH_CQ_ENABLED) && (!buf->rx_skb)) { buf->rx_skb = netdev_alloc_skb(card->dev, - QETH_RX_PULL_LEN + ETH_HLEN); + ETH_HLEN + + sizeof(struct ipv6hdr)); if (!buf->rx_skb) return 1; } @@ -2587,7 +2651,19 @@ static int qeth_init_input_buffer(struct qeth_card *card, return 0; } -int qeth_init_qdio_queues(struct qeth_card *card) +static unsigned int qeth_tx_select_bulk_max(struct qeth_card *card, + struct qeth_qdio_out_q *queue) +{ + if (!IS_IQD(card) || + qeth_iqd_is_mcast_queue(card, queue) || + card->options.cq == QETH_CQ_ENABLED || + qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd)) + return 1; + + return card->ssqd.mmwc ? card->ssqd.mmwc : 1; +} + +static int qeth_init_qdio_queues(struct qeth_card *card) { unsigned int i; int rc; @@ -2624,13 +2700,17 @@ int qeth_init_qdio_queues(struct qeth_card *card) queue->max_elements = QETH_MAX_BUFFER_ELEMENTS(card); queue->next_buf_to_fill = 0; queue->do_pack = 0; + queue->prev_hdr = NULL; + queue->bulk_start = 0; + queue->bulk_count = 0; + queue->bulk_max = qeth_tx_select_bulk_max(card, queue); atomic_set(&queue->used_buffers, 0); atomic_set(&queue->set_pci_flags_count, 0); atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); + netdev_tx_reset_queue(netdev_get_tx_queue(card->dev, i)); } return 0; } -EXPORT_SYMBOL_GPL(qeth_init_qdio_queues); static void qeth_ipa_finalize_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob) @@ -2638,12 +2718,13 @@ static void qeth_ipa_finalize_cmd(struct qeth_card *card, qeth_mpc_finalize_cmd(card, iob); /* override with IPA-specific values: */ - __ipa_cmd(iob)->hdr.seqno = card->seqno.ipa; - iob->reply->seqno = card->seqno.ipa++; + __ipa_cmd(iob)->hdr.seqno = card->seqno.ipa++; } void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, - u16 cmd_length) + u16 cmd_length, + bool (*match)(struct qeth_cmd_buffer *iob, + struct qeth_cmd_buffer *reply)) { u8 prot_type = qeth_mpc_select_prot_type(card); u16 total_length = iob->length; @@ -2651,6 +2732,7 @@ void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, total_length, iob->data); iob->finalize = qeth_ipa_finalize_cmd; + iob->match = match; memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE); memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &total_length, 2); @@ -2663,6 +2745,14 @@ void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, } EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd); +static bool qeth_ipa_match_reply(struct qeth_cmd_buffer *iob, + struct qeth_cmd_buffer *reply) +{ + struct qeth_ipa_cmd *ipa_reply = __ipa_reply(reply); + + return ipa_reply && (__ipa_cmd(iob)->hdr.seqno == ipa_reply->hdr.seqno); +} + struct qeth_cmd_buffer *qeth_ipa_alloc_cmd(struct qeth_card *card, enum qeth_ipa_cmds cmd_code, enum qeth_prot_versions prot, @@ -2678,7 +2768,7 @@ struct qeth_cmd_buffer *qeth_ipa_alloc_cmd(struct qeth_card *card, if (!iob) return NULL; - qeth_prepare_ipa_cmd(card, iob, data_length); + qeth_prepare_ipa_cmd(card, iob, data_length, qeth_ipa_match_reply); hdr = &__ipa_cmd(iob)->hdr; hdr->command = cmd_code; @@ -2777,7 +2867,7 @@ static int qeth_query_setadapterparms_cb(struct qeth_card *card, cmd->data.setadapterparms.data.query_cmds_supp.lan_type; QETH_CARD_TEXT_(card, 2, "lnk %d", card->info.link_type); } - card->options.adp.supported_funcs = + card->options.adp.supported = cmd->data.setadapterparms.data.query_cmds_supp.supported_cmds; return 0; } @@ -2833,8 +2923,8 @@ static int qeth_query_ipassists_cb(struct qeth_card *card, case IPA_RC_NOTSUPP: case IPA_RC_L2_UNSUPPORTED_CMD: QETH_CARD_TEXT(card, 2, "ipaunsup"); - card->options.ipa4.supported_funcs |= IPA_SETADAPTERPARMS; - card->options.ipa6.supported_funcs |= IPA_SETADAPTERPARMS; + card->options.ipa4.supported |= IPA_SETADAPTERPARMS; + card->options.ipa6.supported |= IPA_SETADAPTERPARMS; return -EOPNOTSUPP; default: QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Unhandled rc=%#x\n", @@ -2842,13 +2932,11 @@ static int qeth_query_ipassists_cb(struct qeth_card *card, return -EIO; } - if (cmd->hdr.prot_version == QETH_PROT_IPV4) { - card->options.ipa4.supported_funcs = cmd->hdr.ipa_supported; - card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled; - } else if (cmd->hdr.prot_version == QETH_PROT_IPV6) { - card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported; - card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled; - } else + if (cmd->hdr.prot_version == QETH_PROT_IPV4) + card->options.ipa4 = cmd->hdr.assists; + else if (cmd->hdr.prot_version == QETH_PROT_IPV6) + card->options.ipa6 = cmd->hdr.assists; + else QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Flawed LIC detected\n", CARD_DEVID(card)); return 0; @@ -3016,7 +3104,6 @@ int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action) } return qeth_send_ipa_cmd(card, iob, qeth_hw_trap_cb, NULL); } -EXPORT_SYMBOL_GPL(qeth_hw_trap); static int qeth_check_qdio_errors(struct qeth_card *card, struct qdio_buffer *buf, @@ -3031,7 +3118,7 @@ static int qeth_check_qdio_errors(struct qeth_card *card, buf->element[14].sflags); QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error); if ((buf->element[15].sflags) == 0x12) { - QETH_CARD_STAT_INC(card, rx_dropped); + QETH_CARD_STAT_INC(card, rx_fifo_errors); return 0; } else return 1; @@ -3058,7 +3145,7 @@ static void qeth_queue_input_buffer(struct qeth_card *card, int index) for (i = queue->next_buf_to_init; i < queue->next_buf_to_init + count; ++i) { if (qeth_init_input_buffer(card, - &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q])) { + &queue->bufs[QDIO_BUFNR(i)])) { break; } else { newcount++; @@ -3100,8 +3187,8 @@ static void qeth_queue_input_buffer(struct qeth_card *card, int index) if (rc) { QETH_CARD_TEXT(card, 2, "qinberr"); } - queue->next_buf_to_init = (queue->next_buf_to_init + count) % - QDIO_MAX_BUFFERS_PER_Q; + queue->next_buf_to_init = QDIO_BUFNR(queue->next_buf_to_init + + count); } } @@ -3149,7 +3236,7 @@ static int qeth_prep_flush_pack_buffer(struct qeth_qdio_out_q *queue) /* it's a packing buffer */ atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); queue->next_buf_to_fill = - (queue->next_buf_to_fill + 1) % QDIO_MAX_BUFFERS_PER_Q; + QDIO_BUFNR(queue->next_buf_to_fill + 1); return 1; } return 0; @@ -3196,13 +3283,15 @@ static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue) static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index, int count) { + struct qeth_card *card = queue->card; struct qeth_qdio_out_buffer *buf; int rc; int i; unsigned int qdio_flags; for (i = index; i < index + count; ++i) { - int bidx = i % QDIO_MAX_BUFFERS_PER_Q; + unsigned int bidx = QDIO_BUFNR(i); + buf = queue->bufs[bidx]; buf->buffer->element[buf->next_element_to_fill - 1].eflags |= SBAL_EFLAGS_LAST_ENTRY; @@ -3239,14 +3328,17 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index, } } - QETH_TXQ_STAT_ADD(queue, bufs, count); qdio_flags = QDIO_FLAG_SYNC_OUTPUT; if (atomic_read(&queue->set_pci_flags_count)) qdio_flags |= QDIO_FLAG_PCI_OUT; rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags, queue->queue_no, index, count); + + /* Fake the TX completion interrupt: */ + if (IS_IQD(card)) + napi_schedule(&queue->napi); + if (rc) { - QETH_TXQ_STAT_ADD(queue, tx_errors, count); /* ignore temporary SIGA errors without busy condition */ if (rc == -ENOBUFS) return; @@ -3263,6 +3355,15 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index, } } +static void qeth_flush_queue(struct qeth_qdio_out_q *queue) +{ + qeth_flush_buffers(queue, queue->bulk_start, queue->bulk_count); + + queue->bulk_start = QDIO_BUFNR(queue->bulk_start + queue->bulk_count); + queue->prev_hdr = NULL; + queue->bulk_count = 0; +} + static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue) { int index; @@ -3305,7 +3406,7 @@ static void qeth_qdio_start_poll(struct ccw_device *ccwdev, int queue, struct qeth_card *card = (struct qeth_card *)card_ptr; if (card->dev->flags & IFF_UP) - napi_schedule(&card->napi); + napi_schedule_irqoff(&card->napi); } int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq) @@ -3321,11 +3422,6 @@ int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq) goto out; } - if (card->state != CARD_STATE_DOWN) { - rc = -1; - goto out; - } - qeth_free_qdio_queues(card); card->options.cq = cq; rc = 0; @@ -3358,8 +3454,7 @@ static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err, } for (i = first_element; i < first_element + count; ++i) { - int bidx = i % QDIO_MAX_BUFFERS_PER_Q; - struct qdio_buffer *buffer = cq->qdio_bufs[bidx]; + struct qdio_buffer *buffer = cq->qdio_bufs[QDIO_BUFNR(i)]; int e = 0; while ((e < QDIO_MAX_ELEMENTS_PER_BUFFER) && @@ -3380,8 +3475,8 @@ static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err, "QDIO reported an error, rc=%i\n", rc); QETH_CARD_TEXT(card, 2, "qcqherr"); } - card->qdio.c_q->next_buf_to_init = (card->qdio.c_q->next_buf_to_init - + count) % QDIO_MAX_BUFFERS_PER_Q; + + cq->next_buf_to_init = QDIO_BUFNR(cq->next_buf_to_init + count); } static void qeth_qdio_input_handler(struct ccw_device *ccwdev, @@ -3407,7 +3502,6 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev, { struct qeth_card *card = (struct qeth_card *) card_ptr; struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue]; - struct qeth_qdio_out_buffer *buffer; struct net_device *dev = card->dev; struct netdev_queue *txq; int i; @@ -3421,51 +3515,15 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev, } for (i = first_element; i < (first_element + count); ++i) { - int bidx = i % QDIO_MAX_BUFFERS_PER_Q; - buffer = queue->bufs[bidx]; - qeth_handle_send_error(card, buffer, qdio_error); - - if (queue->bufstates && - (queue->bufstates[bidx].flags & - QDIO_OUTBUF_STATE_FLAG_PENDING) != 0) { - WARN_ON_ONCE(card->options.cq != QETH_CQ_ENABLED); - - if (atomic_cmpxchg(&buffer->state, - QETH_QDIO_BUF_PRIMED, - QETH_QDIO_BUF_PENDING) == - QETH_QDIO_BUF_PRIMED) { - qeth_notify_skbs(queue, buffer, - TX_NOTIFY_PENDING); - } - QETH_CARD_TEXT_(queue->card, 5, "pel%d", bidx); - - /* prepare the queue slot for re-use: */ - qeth_scrub_qdio_buffer(buffer->buffer, - queue->max_elements); - if (qeth_init_qdio_out_buf(queue, bidx)) { - QETH_CARD_TEXT(card, 2, "outofbuf"); - qeth_schedule_recovery(card); - } - } else { - if (card->options.cq == QETH_CQ_ENABLED) { - enum iucv_tx_notify n; - - n = qeth_compute_cq_notification( - buffer->buffer->element[15].sflags, 0); - qeth_notify_skbs(queue, buffer, n); - } + struct qeth_qdio_out_buffer *buf = queue->bufs[QDIO_BUFNR(i)]; - qeth_clear_output_buffer(queue, buffer); - } - qeth_cleanup_handled_pending(queue, bidx, 0); + qeth_handle_send_error(card, buf, qdio_error); + qeth_clear_output_buffer(queue, buf, qdio_error, 0); } + atomic_sub(count, &queue->used_buffers); - /* check if we need to do something on this outbound queue */ - if (!IS_IQD(card)) - qeth_check_outbound_queue(queue); + qeth_check_outbound_queue(queue); - if (IS_IQD(card)) - __queue = qeth_iqd_translate_txq(dev, __queue); txq = netdev_get_tx_queue(dev, __queue); /* xmit may have observed the full-condition, but not yet stopped the * txq. In which case the code below won't trigger. So before returning, @@ -3535,7 +3593,7 @@ static int qeth_get_elements_for_frags(struct sk_buff *skb) int cnt, elements = 0; for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) { - struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[cnt]; + skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt]; elements += qeth_get_elements_for_range( (addr_t)skb_frag_address(frag), @@ -3654,9 +3712,32 @@ check_layout: return 0; } -static void __qeth_fill_buffer(struct sk_buff *skb, - struct qeth_qdio_out_buffer *buf, - bool is_first_elem, unsigned int offset) +static bool qeth_iqd_may_bulk(struct qeth_qdio_out_q *queue, + struct sk_buff *curr_skb, + struct qeth_hdr *curr_hdr) +{ + struct qeth_qdio_out_buffer *buffer = queue->bufs[queue->bulk_start]; + struct qeth_hdr *prev_hdr = queue->prev_hdr; + + if (!prev_hdr) + return true; + + /* All packets must have the same target: */ + if (curr_hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) { + struct sk_buff *prev_skb = skb_peek(&buffer->skb_list); + + return ether_addr_equal(eth_hdr(prev_skb)->h_dest, + eth_hdr(curr_skb)->h_dest) && + qeth_l2_same_vlan(&prev_hdr->hdr.l2, &curr_hdr->hdr.l2); + } + + return qeth_l3_same_next_hop(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3) && + qeth_l3_iqd_same_vlan(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3); +} + +static unsigned int __qeth_fill_buffer(struct sk_buff *skb, + struct qeth_qdio_out_buffer *buf, + bool is_first_elem, unsigned int offset) { struct qdio_buffer *buffer = buf->buffer; int element = buf->next_element_to_fill; @@ -3713,24 +3794,21 @@ static void __qeth_fill_buffer(struct sk_buff *skb, if (buffer->element[element - 1].eflags) buffer->element[element - 1].eflags = SBAL_EFLAGS_LAST_FRAG; buf->next_element_to_fill = element; + return element; } /** * qeth_fill_buffer() - map skb into an output buffer - * @queue: QDIO queue to submit the buffer on * @buf: buffer to transport the skb * @skb: skb to map into the buffer * @hdr: qeth_hdr for this skb. Either at skb->data, or allocated * from qeth_core_header_cache. * @offset: when mapping the skb, start at skb->data + offset * @hd_len: if > 0, build a dedicated header element of this size - * flush: Prepare the buffer to be flushed, regardless of its fill level. */ -static int qeth_fill_buffer(struct qeth_qdio_out_q *queue, - struct qeth_qdio_out_buffer *buf, - struct sk_buff *skb, struct qeth_hdr *hdr, - unsigned int offset, unsigned int hd_len, - bool flush) +static unsigned int qeth_fill_buffer(struct qeth_qdio_out_buffer *buf, + struct sk_buff *skb, struct qeth_hdr *hdr, + unsigned int offset, unsigned int hd_len) { struct qdio_buffer *buffer = buf->buffer; bool is_first_elem = true; @@ -3750,35 +3828,23 @@ static int qeth_fill_buffer(struct qeth_qdio_out_q *queue, buf->next_element_to_fill++; } - __qeth_fill_buffer(skb, buf, is_first_elem, offset); - - if (!queue->do_pack) { - QETH_CARD_TEXT(queue->card, 6, "fillbfnp"); - } else { - QETH_CARD_TEXT(queue->card, 6, "fillbfpa"); - - QETH_TXQ_STAT_INC(queue, skbs_pack); - /* If the buffer still has free elements, keep using it. */ - if (!flush && - buf->next_element_to_fill < queue->max_elements) - return 0; - } - - /* flush out the buffer */ - atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED); - queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) % - QDIO_MAX_BUFFERS_PER_Q; - return 1; + return __qeth_fill_buffer(skb, buf, is_first_elem, offset); } -static int qeth_do_send_packet_fast(struct qeth_qdio_out_q *queue, - struct sk_buff *skb, struct qeth_hdr *hdr, - unsigned int offset, unsigned int hd_len) +static int __qeth_xmit(struct qeth_card *card, struct qeth_qdio_out_q *queue, + struct sk_buff *skb, unsigned int elements, + struct qeth_hdr *hdr, unsigned int offset, + unsigned int hd_len) { - int index = queue->next_buf_to_fill; - struct qeth_qdio_out_buffer *buffer = queue->bufs[index]; + unsigned int bytes = qdisc_pkt_len(skb); + struct qeth_qdio_out_buffer *buffer; + unsigned int next_element; struct netdev_queue *txq; bool stopped = false; + bool flush; + + buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start + queue->bulk_count)]; + txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb)); /* Just a sanity check, the wake/stop logic should ensure that we always * get a free buffer. @@ -3786,9 +3852,31 @@ static int qeth_do_send_packet_fast(struct qeth_qdio_out_q *queue, if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) return -EBUSY; - txq = netdev_get_tx_queue(queue->card->dev, skb_get_queue_mapping(skb)); + flush = !qeth_iqd_may_bulk(queue, skb, hdr); - if (atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) { + if (flush || + (buffer->next_element_to_fill + elements > queue->max_elements)) { + if (buffer->next_element_to_fill > 0) { + atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); + queue->bulk_count++; + } + + if (queue->bulk_count >= queue->bulk_max) + flush = true; + + if (flush) + qeth_flush_queue(queue); + + buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start + + queue->bulk_count)]; + + /* Sanity-check again: */ + if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) + return -EBUSY; + } + + if (buffer->next_element_to_fill == 0 && + atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) { /* If a TX completion happens right _here_ and misses to wake * the txq, then our re-check below will catch the race. */ @@ -3797,8 +3885,23 @@ static int qeth_do_send_packet_fast(struct qeth_qdio_out_q *queue, stopped = true; } - qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len, stopped); - qeth_flush_buffers(queue, index, 1); + next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len); + buffer->bytes += bytes; + queue->prev_hdr = hdr; + + flush = __netdev_tx_sent_queue(txq, bytes, + !stopped && netdev_xmit_more()); + + if (flush || next_element >= queue->max_elements) { + atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); + queue->bulk_count++; + + if (queue->bulk_count >= queue->bulk_max) + flush = true; + + if (flush) + qeth_flush_queue(queue); + } if (stopped && !qeth_out_queue_is_full(queue)) netif_tx_start_queue(txq); @@ -3811,6 +3914,7 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, int elements_needed) { struct qeth_qdio_out_buffer *buffer; + unsigned int next_element; struct netdev_queue *txq; bool stopped = false; int start_index; @@ -3846,8 +3950,7 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); flush_count++; queue->next_buf_to_fill = - (queue->next_buf_to_fill + 1) % - QDIO_MAX_BUFFERS_PER_Q; + QDIO_BUFNR(queue->next_buf_to_fill + 1); buffer = queue->bufs[queue->next_buf_to_fill]; /* We stepped forward, so sanity-check again: */ @@ -3873,8 +3976,17 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, stopped = true; } - flush_count += qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len, - stopped); + next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len); + + if (queue->do_pack) + QETH_TXQ_STAT_INC(queue, skbs_pack); + if (!queue->do_pack || stopped || next_element >= queue->max_elements) { + flush_count++; + atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); + queue->next_buf_to_fill = + QDIO_BUFNR(queue->next_buf_to_fill + 1); + } + if (flush_count) qeth_flush_buffers(queue, start_index, flush_count); else if (!atomic_read(&queue->set_pci_flags_count)) @@ -3941,7 +4053,6 @@ int qeth_xmit(struct qeth_card *card, struct sk_buff *skb, unsigned int hd_len = 0; unsigned int elements; int push_len, rc; - bool is_sg; if (is_tso) { hw_hdr_len = sizeof(struct qeth_hdr_tso); @@ -3970,10 +4081,9 @@ int qeth_xmit(struct qeth_card *card, struct sk_buff *skb, qeth_fill_tso_ext((struct qeth_hdr_tso *) hdr, frame_len - proto_len, skb, proto_len); - is_sg = skb_is_nonlinear(skb); if (IS_IQD(card)) { - rc = qeth_do_send_packet_fast(queue, skb, hdr, data_offset, - hd_len); + rc = __qeth_xmit(card, queue, skb, elements, hdr, data_offset, + hd_len); } else { /* TODO: drop skb_orphan() once TX completion is fast enough */ skb_orphan(skb); @@ -3981,18 +4091,9 @@ int qeth_xmit(struct qeth_card *card, struct sk_buff *skb, hd_len, elements); } - if (!rc) { - QETH_TXQ_STAT_ADD(queue, buf_elements, elements); - if (is_sg) - QETH_TXQ_STAT_INC(queue, skbs_sg); - if (is_tso) { - QETH_TXQ_STAT_INC(queue, skbs_tso); - QETH_TXQ_STAT_ADD(queue, tso_bytes, frame_len); - } - } else { - if (!push_len) - kmem_cache_free(qeth_core_header_cache, hdr); - } + if (rc && !push_len) + kmem_cache_free(qeth_core_header_cache, hdr); + return rc; } EXPORT_SYMBOL_GPL(qeth_xmit); @@ -4014,23 +4115,14 @@ static int qeth_setadp_promisc_mode_cb(struct qeth_card *card, return (cmd->hdr.return_code) ? -EIO : 0; } -void qeth_setadp_promisc_mode(struct qeth_card *card) +void qeth_setadp_promisc_mode(struct qeth_card *card, bool enable) { - enum qeth_ipa_promisc_modes mode; - struct net_device *dev = card->dev; + enum qeth_ipa_promisc_modes mode = enable ? SET_PROMISC_MODE_ON : + SET_PROMISC_MODE_OFF; struct qeth_cmd_buffer *iob; struct qeth_ipa_cmd *cmd; QETH_CARD_TEXT(card, 4, "setprom"); - - if (((dev->flags & IFF_PROMISC) && - (card->info.promisc_mode == SET_PROMISC_MODE_ON)) || - (!(dev->flags & IFF_PROMISC) && - (card->info.promisc_mode == SET_PROMISC_MODE_OFF))) - return; - mode = SET_PROMISC_MODE_OFF; - if (dev->flags & IFF_PROMISC) - mode = SET_PROMISC_MODE_ON; QETH_CARD_TEXT_(card, 4, "mode:%x", mode); iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE, @@ -4220,9 +4312,8 @@ int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback) } return rc; } -EXPORT_SYMBOL_GPL(qeth_set_access_ctrl_online); -void qeth_tx_timeout(struct net_device *dev) +void qeth_tx_timeout(struct net_device *dev, unsigned int txqueue) { struct qeth_card *card; @@ -4275,7 +4366,9 @@ static int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum) case MII_NWAYTEST: /* N-way auto-neg test register */ break; case MII_RERRCOUNTER: /* rx error counter */ - rc = card->stats.rx_errors; + rc = card->stats.rx_length_errors + + card->stats.rx_frame_errors + + card->stats.rx_fifo_errors; break; case MII_SREVISION: /* silicon revision */ break; @@ -4298,20 +4391,16 @@ static int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum) } static int qeth_snmp_command_cb(struct qeth_card *card, - struct qeth_reply *reply, unsigned long sdata) + struct qeth_reply *reply, unsigned long data) { - struct qeth_ipa_cmd *cmd; - struct qeth_arp_query_info *qinfo; - unsigned char *data; + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; + struct qeth_arp_query_info *qinfo = reply->param; + struct qeth_ipacmd_setadpparms *adp_cmd; + unsigned int data_len; void *snmp_data; - __u16 data_len; QETH_CARD_TEXT(card, 3, "snpcmdcb"); - cmd = (struct qeth_ipa_cmd *) sdata; - data = (unsigned char *)((char *)cmd - reply->offset); - qinfo = (struct qeth_arp_query_info *) reply->param; - if (cmd->hdr.return_code) { QETH_CARD_TEXT_(card, 4, "scer1%x", cmd->hdr.return_code); return -EIO; @@ -4322,15 +4411,14 @@ static int qeth_snmp_command_cb(struct qeth_card *card, QETH_CARD_TEXT_(card, 4, "scer2%x", cmd->hdr.return_code); return -EIO; } - data_len = *((__u16 *)QETH_IPA_PDU_LEN_PDU1(data)); - if (cmd->data.setadapterparms.hdr.seq_no == 1) { - snmp_data = &cmd->data.setadapterparms.data.snmp; - data_len -= offsetof(struct qeth_ipa_cmd, - data.setadapterparms.data.snmp); + + adp_cmd = &cmd->data.setadapterparms; + data_len = adp_cmd->hdr.cmdlength - sizeof(adp_cmd->hdr); + if (adp_cmd->hdr.seq_no == 1) { + snmp_data = &adp_cmd->data.snmp; } else { - snmp_data = &cmd->data.setadapterparms.data.snmp.request; - data_len -= offsetof(struct qeth_ipa_cmd, - data.setadapterparms.data.snmp.request); + snmp_data = &adp_cmd->data.snmp.request; + data_len -= offsetof(struct qeth_snmp_cmd, request); } /* check if there is enough room in userspace */ @@ -4374,6 +4462,10 @@ static int qeth_snmp_command(struct qeth_card *card, char __user *udata) get_user(req_len, &ureq->hdr.req_len)) return -EFAULT; + /* Sanitize user input, to avoid overflows in iob size calculation: */ + if (req_len > QETH_BUFSIZE) + return -EINVAL; + iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, req_len); if (!iob) return -ENOMEM; @@ -4594,15 +4686,15 @@ EXPORT_SYMBOL_GPL(qeth_vm_request_mac); static void qeth_determine_capabilities(struct qeth_card *card) { + struct qeth_channel *channel = &card->data; + struct ccw_device *ddev = channel->ccwdev; int rc; - struct ccw_device *ddev; int ddev_offline = 0; QETH_CARD_TEXT(card, 2, "detcapab"); - ddev = CARD_DDEV(card); if (!ddev->online) { ddev_offline = 1; - rc = ccw_device_set_online(ddev); + rc = qeth_start_channel(channel); if (rc) { QETH_CARD_TEXT_(card, 2, "3err%d", rc); goto out; @@ -4638,7 +4730,7 @@ static void qeth_determine_capabilities(struct qeth_card *card) out_offline: if (ddev_offline == 1) - ccw_device_set_offline(ddev); + qeth_stop_channel(channel); out: return; } @@ -4675,8 +4767,7 @@ static int qeth_qdio_establish(struct qeth_card *card) QETH_CARD_TEXT(card, 2, "qdioest"); - qib_param_field = kzalloc(QDIO_MAX_BUFFERS_PER_Q, - GFP_KERNEL); + qib_param_field = kzalloc(sizeof_field(struct qib, parm), GFP_KERNEL); if (!qib_param_field) { rc = -ENOMEM; goto out_free_nothing; @@ -4737,7 +4828,7 @@ static int qeth_qdio_establish(struct qeth_card *card) init_data.input_sbal_addr_array = in_sbal_ptrs; init_data.output_sbal_addr_array = out_sbal_ptrs; init_data.output_sbal_state_array = card->qdio.out_bufstates; - init_data.scan_threshold = IS_IQD(card) ? 1 : 32; + init_data.scan_threshold = IS_IQD(card) ? 0 : 32; if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED, QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) { @@ -4778,12 +4869,8 @@ out_free_nothing: static void qeth_core_free_card(struct qeth_card *card) { QETH_CARD_TEXT(card, 2, "freecrd"); - qeth_clean_channel(&card->read); - qeth_clean_channel(&card->write); - qeth_clean_channel(&card->data); qeth_put_cmd(card->read_cmd); destroy_workqueue(card->event_wq); - qeth_free_qdio_queues(card); unregister_service_level(&card->qeth_service_level); dev_set_drvdata(&card->gdev->dev, NULL); kfree(card); @@ -4840,17 +4927,18 @@ retry: QETH_DBF_MESSAGE(2, "Retrying to do IDX activates on device %x.\n", CARD_DEVID(card)); rc = qeth_qdio_clear_card(card, !IS_IQD(card)); - ccw_device_set_offline(CARD_DDEV(card)); - ccw_device_set_offline(CARD_WDEV(card)); - ccw_device_set_offline(CARD_RDEV(card)); + qeth_stop_channel(&card->data); + qeth_stop_channel(&card->write); + qeth_stop_channel(&card->read); qdio_free(CARD_DDEV(card)); - rc = ccw_device_set_online(CARD_RDEV(card)); + + rc = qeth_start_channel(&card->read); if (rc) goto retriable; - rc = ccw_device_set_online(CARD_WDEV(card)); + rc = qeth_start_channel(&card->write); if (rc) goto retriable; - rc = ccw_device_set_online(CARD_DDEV(card)); + rc = qeth_start_channel(&card->data); if (rc) goto retriable; retriable: @@ -4911,9 +4999,9 @@ retriable: *carrier_ok = true; } - card->options.ipa4.supported_funcs = 0; - card->options.ipa6.supported_funcs = 0; - card->options.adp.supported_funcs = 0; + card->options.ipa4.supported = 0; + card->options.ipa6.supported = 0; + card->options.adp.supported = 0; card->options.sbp.supported_funcs = 0; card->info.diagass_support = 0; rc = qeth_query_ipassists(card, QETH_PROT_IPV4); @@ -4933,11 +5021,24 @@ retriable: } if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) { rc = qeth_query_setdiagass(card); - if (rc < 0) { + if (rc) QETH_CARD_TEXT_(card, 2, "8err%d", rc); - goto out; - } } + + if (!qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP) || + (card->info.hwtrap && qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM))) + card->info.hwtrap = 0; + + rc = qeth_set_access_ctrl_online(card, 0); + if (rc) + goto out; + + rc = qeth_init_qdio_queues(card); + if (rc) { + QETH_CARD_TEXT_(card, 2, "9err%d", rc); + goto out; + } + return 0; out: dev_warn(&card->gdev->dev, "The qeth device driver failed to recover " @@ -4948,27 +5049,213 @@ out: } EXPORT_SYMBOL_GPL(qeth_core_hardsetup_card); -static void qeth_create_skb_frag(struct qdio_buffer_element *element, - struct sk_buff *skb, int offset, int data_len) +static int qeth_set_online(struct qeth_card *card) { - struct page *page = virt_to_page(element->addr); - unsigned int next_frag; + int rc; - /* first fill the linear space */ - if (!skb->len) { - unsigned int linear = min(data_len, skb_tailroom(skb)); + mutex_lock(&card->discipline_mutex); + mutex_lock(&card->conf_mutex); + QETH_CARD_TEXT(card, 2, "setonlin"); - skb_put_data(skb, element->addr + offset, linear); - data_len -= linear; - if (!data_len) - return; - offset += linear; - /* fall through to add page frag for remaining data */ + rc = card->discipline->set_online(card); + + mutex_unlock(&card->conf_mutex); + mutex_unlock(&card->discipline_mutex); + + return rc; +} + +int qeth_set_offline(struct qeth_card *card, bool resetting) +{ + int rc, rc2, rc3; + + mutex_lock(&card->discipline_mutex); + mutex_lock(&card->conf_mutex); + QETH_CARD_TEXT(card, 3, "setoffl"); + + if ((!resetting && card->info.hwtrap) || card->info.hwtrap == 2) { + qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); + card->info.hwtrap = 1; + } + + rtnl_lock(); + card->info.open_when_online = card->dev->flags & IFF_UP; + dev_close(card->dev); + netif_device_detach(card->dev); + netif_carrier_off(card->dev); + rtnl_unlock(); + + card->discipline->set_offline(card); + + rc = qeth_stop_channel(&card->data); + rc2 = qeth_stop_channel(&card->write); + rc3 = qeth_stop_channel(&card->read); + if (!rc) + rc = (rc2) ? rc2 : rc3; + if (rc) + QETH_CARD_TEXT_(card, 2, "1err%d", rc); + qdio_free(CARD_DDEV(card)); + + /* let user_space know that device is offline */ + kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE); + + mutex_unlock(&card->conf_mutex); + mutex_unlock(&card->discipline_mutex); + return 0; +} +EXPORT_SYMBOL_GPL(qeth_set_offline); + +static int qeth_do_reset(void *data) +{ + struct qeth_card *card = data; + int rc; + + QETH_CARD_TEXT(card, 2, "recover1"); + if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD)) + return 0; + QETH_CARD_TEXT(card, 2, "recover2"); + dev_warn(&card->gdev->dev, + "A recovery process has been started for the device\n"); + + qeth_set_offline(card, true); + rc = qeth_set_online(card); + if (!rc) { + dev_info(&card->gdev->dev, + "Device successfully recovered!\n"); + } else { + ccwgroup_set_offline(card->gdev); + dev_warn(&card->gdev->dev, + "The qeth device driver failed to recover an error on the device\n"); + } + qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); + qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); + return 0; +} + +#if IS_ENABLED(CONFIG_QETH_L3) +static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb, + struct qeth_hdr *hdr) +{ + struct af_iucv_trans_hdr *iucv = (struct af_iucv_trans_hdr *) skb->data; + struct qeth_hdr_layer3 *l3_hdr = &hdr->hdr.l3; + struct net_device *dev = skb->dev; + + if (IS_IQD(card) && iucv->magic == ETH_P_AF_IUCV) { + dev_hard_header(skb, dev, ETH_P_AF_IUCV, dev->dev_addr, + "FAKELL", skb->len); + return; + } + + if (!(l3_hdr->flags & QETH_HDR_PASSTHRU)) { + u16 prot = (l3_hdr->flags & QETH_HDR_IPV6) ? ETH_P_IPV6 : + ETH_P_IP; + unsigned char tg_addr[ETH_ALEN]; + + skb_reset_network_header(skb); + switch (l3_hdr->flags & QETH_HDR_CAST_MASK) { + case QETH_CAST_MULTICAST: + if (prot == ETH_P_IP) + ip_eth_mc_map(ip_hdr(skb)->daddr, tg_addr); + else + ipv6_eth_mc_map(&ipv6_hdr(skb)->daddr, tg_addr); + QETH_CARD_STAT_INC(card, rx_multicast); + break; + case QETH_CAST_BROADCAST: + ether_addr_copy(tg_addr, dev->broadcast); + QETH_CARD_STAT_INC(card, rx_multicast); + break; + default: + if (card->options.sniffer) + skb->pkt_type = PACKET_OTHERHOST; + ether_addr_copy(tg_addr, dev->dev_addr); + } + + if (l3_hdr->ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR) + dev_hard_header(skb, dev, prot, tg_addr, + &l3_hdr->next_hop.rx.src_mac, skb->len); + else + dev_hard_header(skb, dev, prot, tg_addr, "FAKELL", + skb->len); + } + + /* copy VLAN tag from hdr into skb */ + if (!card->options.sniffer && + (l3_hdr->ext_flags & (QETH_HDR_EXT_VLAN_FRAME | + QETH_HDR_EXT_INCLUDE_VLAN_TAG))) { + u16 tag = (l3_hdr->ext_flags & QETH_HDR_EXT_VLAN_FRAME) ? + l3_hdr->vlan_id : + l3_hdr->next_hop.rx.vlan_id; + + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag); + } +} +#endif + +static void qeth_receive_skb(struct qeth_card *card, struct sk_buff *skb, + struct qeth_hdr *hdr, bool uses_frags) +{ + struct napi_struct *napi = &card->napi; + bool is_cso; + + switch (hdr->hdr.l2.id) { + case QETH_HEADER_TYPE_OSN: + skb_push(skb, sizeof(*hdr)); + skb_copy_to_linear_data(skb, hdr, sizeof(*hdr)); + QETH_CARD_STAT_ADD(card, rx_bytes, skb->len); + QETH_CARD_STAT_INC(card, rx_packets); + + card->osn_info.data_cb(skb); + return; +#if IS_ENABLED(CONFIG_QETH_L3) + case QETH_HEADER_TYPE_LAYER3: + qeth_l3_rebuild_skb(card, skb, hdr); + is_cso = hdr->hdr.l3.ext_flags & QETH_HDR_EXT_CSUM_TRANSP_REQ; + break; +#endif + case QETH_HEADER_TYPE_LAYER2: + is_cso = hdr->hdr.l2.flags[1] & QETH_HDR_EXT_CSUM_TRANSP_REQ; + break; + default: + /* never happens */ + if (uses_frags) + napi_free_frags(napi); + else + dev_kfree_skb_any(skb); + return; + } + + if (is_cso && (card->dev->features & NETIF_F_RXCSUM)) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + QETH_CARD_STAT_INC(card, rx_skb_csum); + } else { + skb->ip_summed = CHECKSUM_NONE; + } + + QETH_CARD_STAT_ADD(card, rx_bytes, skb->len); + QETH_CARD_STAT_INC(card, rx_packets); + if (skb_is_nonlinear(skb)) { + QETH_CARD_STAT_INC(card, rx_sg_skbs); + QETH_CARD_STAT_ADD(card, rx_sg_frags, + skb_shinfo(skb)->nr_frags); + } + + if (uses_frags) { + napi_gro_frags(napi); + } else { + skb->protocol = eth_type_trans(skb, skb->dev); + napi_gro_receive(napi, skb); } +} + +static void qeth_create_skb_frag(struct sk_buff *skb, char *data, int data_len) +{ + struct page *page = virt_to_page(data); + unsigned int next_frag; next_frag = skb_shinfo(skb)->nr_frags; get_page(page); - skb_add_rx_frag(skb, next_frag, page, offset, data_len, data_len); + skb_add_rx_frag(skb, next_frag, page, offset_in_page(data), data_len, + data_len); } static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale) @@ -4976,120 +5263,214 @@ static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale) return (sbale->eflags & SBAL_EFLAGS_LAST_ENTRY); } -struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card, - struct qeth_qdio_buffer *qethbuffer, - struct qdio_buffer_element **__element, int *__offset, - struct qeth_hdr **hdr) +static int qeth_extract_skb(struct qeth_card *card, + struct qeth_qdio_buffer *qethbuffer, + struct qdio_buffer_element **__element, + int *__offset) { struct qdio_buffer_element *element = *__element; struct qdio_buffer *buffer = qethbuffer->buffer; + struct napi_struct *napi = &card->napi; + unsigned int linear_len = 0; + bool uses_frags = false; int offset = *__offset; + bool use_rx_sg = false; + unsigned int headroom; + struct qeth_hdr *hdr; struct sk_buff *skb; int skb_len = 0; - void *data_ptr; - int data_len; - int headroom = 0; - int use_rx_sg = 0; +next_packet: /* qeth_hdr must not cross element boundaries */ while (element->length < offset + sizeof(struct qeth_hdr)) { if (qeth_is_last_sbale(element)) - return NULL; + return -ENODATA; element++; offset = 0; } - *hdr = element->addr + offset; - offset += sizeof(struct qeth_hdr); - switch ((*hdr)->hdr.l2.id) { + hdr = element->addr + offset; + offset += sizeof(*hdr); + skb = NULL; + + switch (hdr->hdr.l2.id) { case QETH_HEADER_TYPE_LAYER2: - skb_len = (*hdr)->hdr.l2.pkt_length; + skb_len = hdr->hdr.l2.pkt_length; + linear_len = ETH_HLEN; + headroom = 0; break; case QETH_HEADER_TYPE_LAYER3: - skb_len = (*hdr)->hdr.l3.length; + skb_len = hdr->hdr.l3.length; + if (!IS_LAYER3(card)) { + QETH_CARD_STAT_INC(card, rx_dropped_notsupp); + goto walk_packet; + } + + if (hdr->hdr.l3.flags & QETH_HDR_PASSTHRU) { + linear_len = ETH_HLEN; + headroom = 0; + break; + } + + if (hdr->hdr.l3.flags & QETH_HDR_IPV6) + linear_len = sizeof(struct ipv6hdr); + else + linear_len = sizeof(struct iphdr); headroom = ETH_HLEN; break; case QETH_HEADER_TYPE_OSN: - skb_len = (*hdr)->hdr.osn.pdu_length; + skb_len = hdr->hdr.osn.pdu_length; + if (!IS_OSN(card)) { + QETH_CARD_STAT_INC(card, rx_dropped_notsupp); + goto walk_packet; + } + + linear_len = skb_len; headroom = sizeof(struct qeth_hdr); break; default: - break; + if (hdr->hdr.l2.id & QETH_HEADER_MASK_INVAL) + QETH_CARD_STAT_INC(card, rx_frame_errors); + else + QETH_CARD_STAT_INC(card, rx_dropped_notsupp); + + /* Can't determine packet length, drop the whole buffer. */ + return -EPROTONOSUPPORT; } - if (!skb_len) - return NULL; + if (skb_len < linear_len) { + QETH_CARD_STAT_INC(card, rx_dropped_runt); + goto walk_packet; + } - if (((skb_len >= card->options.rx_sg_cb) && - !IS_OSN(card) && - (!atomic_read(&card->force_alloc_skb))) || - (card->options.cq == QETH_CQ_ENABLED)) - use_rx_sg = 1; + use_rx_sg = (card->options.cq == QETH_CQ_ENABLED) || + ((skb_len >= card->options.rx_sg_cb) && + !atomic_read(&card->force_alloc_skb) && + !IS_OSN(card)); - if (use_rx_sg && qethbuffer->rx_skb) { + if (use_rx_sg) { /* QETH_CQ_ENABLED only: */ - skb = qethbuffer->rx_skb; - qethbuffer->rx_skb = NULL; - } else { - unsigned int linear = (use_rx_sg) ? QETH_RX_PULL_LEN : skb_len; + if (qethbuffer->rx_skb && + skb_tailroom(qethbuffer->rx_skb) >= linear_len + headroom) { + skb = qethbuffer->rx_skb; + qethbuffer->rx_skb = NULL; + goto use_skb; + } - skb = napi_alloc_skb(&card->napi, linear + headroom); + skb = napi_get_frags(napi); + if (!skb) { + /* -ENOMEM, no point in falling back further. */ + QETH_CARD_STAT_INC(card, rx_dropped_nomem); + goto walk_packet; + } + + if (skb_tailroom(skb) >= linear_len + headroom) { + uses_frags = true; + goto use_skb; + } + + netdev_info_once(card->dev, + "Insufficient linear space in NAPI frags skb, need %u but have %u\n", + linear_len + headroom, skb_tailroom(skb)); + /* Shouldn't happen. Don't optimize, fall back to linear skb. */ } - if (!skb) - goto no_mem; + + linear_len = skb_len; + skb = napi_alloc_skb(napi, linear_len + headroom); + if (!skb) { + QETH_CARD_STAT_INC(card, rx_dropped_nomem); + goto walk_packet; + } + +use_skb: if (headroom) skb_reserve(skb, headroom); - - data_ptr = element->addr + offset; +walk_packet: while (skb_len) { - data_len = min(skb_len, (int)(element->length - offset)); - if (data_len) { - if (use_rx_sg) - qeth_create_skb_frag(element, skb, offset, - data_len); - else - skb_put_data(skb, data_ptr, data_len); - } + int data_len = min(skb_len, (int)(element->length - offset)); + char *data = element->addr + offset; + skb_len -= data_len; + offset += data_len; + + /* Extract data from current element: */ + if (skb && data_len) { + if (linear_len) { + unsigned int copy_len; + + copy_len = min_t(unsigned int, linear_len, + data_len); + + skb_put_data(skb, data, copy_len); + linear_len -= copy_len; + data_len -= copy_len; + data += copy_len; + } + + if (data_len) + qeth_create_skb_frag(skb, data, data_len); + } + + /* Step forward to next element: */ if (skb_len) { if (qeth_is_last_sbale(element)) { QETH_CARD_TEXT(card, 4, "unexeob"); QETH_CARD_HEX(card, 2, buffer, sizeof(void *)); - dev_kfree_skb_any(skb); - QETH_CARD_STAT_INC(card, rx_errors); - return NULL; + if (skb) { + if (uses_frags) + napi_free_frags(napi); + else + dev_kfree_skb_any(skb); + QETH_CARD_STAT_INC(card, + rx_length_errors); + } + return -EMSGSIZE; } element++; offset = 0; - data_ptr = element->addr; - } else { - offset += data_len; } } + + /* This packet was skipped, go get another one: */ + if (!skb) + goto next_packet; + *__element = element; *__offset = offset; - if (use_rx_sg) { - QETH_CARD_STAT_INC(card, rx_sg_skbs); - QETH_CARD_STAT_ADD(card, rx_sg_frags, - skb_shinfo(skb)->nr_frags); - } - return skb; -no_mem: - if (net_ratelimit()) { - QETH_CARD_TEXT(card, 2, "noskbmem"); + + qeth_receive_skb(card, skb, hdr, uses_frags); + return 0; +} + +static int qeth_extract_skbs(struct qeth_card *card, int budget, + struct qeth_qdio_buffer *buf, bool *done) +{ + int work_done = 0; + + WARN_ON_ONCE(!budget); + *done = false; + + while (budget) { + if (qeth_extract_skb(card, buf, &card->rx.b_element, + &card->rx.e_offset)) { + *done = true; + break; + } + + work_done++; + budget--; } - QETH_CARD_STAT_INC(card, rx_dropped); - return NULL; + + return work_done; } -EXPORT_SYMBOL_GPL(qeth_core_get_next_skb); int qeth_poll(struct napi_struct *napi, int budget) { struct qeth_card *card = container_of(napi, struct qeth_card, napi); int work_done = 0; struct qeth_qdio_buffer *buffer; - int done; int new_budget = budget; + bool done; while (1) { if (!card->rx.b_count) { @@ -5112,11 +5493,10 @@ int qeth_poll(struct napi_struct *napi, int budget) if (!(card->rx.qdio_err && qeth_check_qdio_errors(card, buffer->buffer, card->rx.qdio_err, "qinerr"))) - work_done += - card->discipline->process_rx_buffer( - card, new_budget, &done); + work_done += qeth_extract_skbs(card, new_budget, + buffer, &done); else - done = 1; + done = true; if (done) { QETH_CARD_STAT_INC(card, rx_bufs); @@ -5126,8 +5506,7 @@ int qeth_poll(struct napi_struct *napi, int budget) card->rx.b_count--; if (card->rx.b_count) { card->rx.b_index = - (card->rx.b_index + 1) % - QDIO_MAX_BUFFERS_PER_Q; + QDIO_BUFNR(card->rx.b_index + 1); card->rx.b_element = &card->qdio.in_q ->bufs[card->rx.b_index] @@ -5143,14 +5522,115 @@ int qeth_poll(struct napi_struct *napi, int budget) } } - napi_complete_done(napi, work_done); - if (qdio_start_irq(card->data.ccwdev, 0)) - napi_schedule(&card->napi); + if (napi_complete_done(napi, work_done) && + qdio_start_irq(CARD_DDEV(card), 0)) + napi_schedule(napi); out: return work_done; } EXPORT_SYMBOL_GPL(qeth_poll); +static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue, + unsigned int bidx, bool error, int budget) +{ + struct qeth_qdio_out_buffer *buffer = queue->bufs[bidx]; + u8 sflags = buffer->buffer->element[15].sflags; + struct qeth_card *card = queue->card; + + if (queue->bufstates && (queue->bufstates[bidx].flags & + QDIO_OUTBUF_STATE_FLAG_PENDING)) { + WARN_ON_ONCE(card->options.cq != QETH_CQ_ENABLED); + + if (atomic_cmpxchg(&buffer->state, QETH_QDIO_BUF_PRIMED, + QETH_QDIO_BUF_PENDING) == + QETH_QDIO_BUF_PRIMED) + qeth_notify_skbs(queue, buffer, TX_NOTIFY_PENDING); + + QETH_CARD_TEXT_(card, 5, "pel%u", bidx); + + /* prepare the queue slot for re-use: */ + qeth_scrub_qdio_buffer(buffer->buffer, queue->max_elements); + if (qeth_init_qdio_out_buf(queue, bidx)) { + QETH_CARD_TEXT(card, 2, "outofbuf"); + qeth_schedule_recovery(card); + } + + return; + } + + if (card->options.cq == QETH_CQ_ENABLED) + qeth_notify_skbs(queue, buffer, + qeth_compute_cq_notification(sflags, 0)); + qeth_clear_output_buffer(queue, buffer, error, budget); +} + +static int qeth_tx_poll(struct napi_struct *napi, int budget) +{ + struct qeth_qdio_out_q *queue = qeth_napi_to_out_queue(napi); + unsigned int queue_no = queue->queue_no; + struct qeth_card *card = queue->card; + struct net_device *dev = card->dev; + unsigned int work_done = 0; + struct netdev_queue *txq; + + txq = netdev_get_tx_queue(dev, qeth_iqd_translate_txq(dev, queue_no)); + + while (1) { + unsigned int start, error, i; + unsigned int packets = 0; + unsigned int bytes = 0; + int completed; + + if (qeth_out_queue_is_empty(queue)) { + napi_complete(napi); + return 0; + } + + /* Give the CPU a breather: */ + if (work_done >= QDIO_MAX_BUFFERS_PER_Q) { + QETH_TXQ_STAT_INC(queue, completion_yield); + if (napi_complete_done(napi, 0)) + napi_schedule(napi); + return 0; + } + + completed = qdio_inspect_queue(CARD_DDEV(card), queue_no, false, + &start, &error); + if (completed <= 0) { + /* Ensure we see TX completion for pending work: */ + if (napi_complete_done(napi, 0)) + qeth_tx_arm_timer(queue); + return 0; + } + + for (i = start; i < start + completed; i++) { + struct qeth_qdio_out_buffer *buffer; + unsigned int bidx = QDIO_BUFNR(i); + + buffer = queue->bufs[bidx]; + packets += skb_queue_len(&buffer->skb_list); + bytes += buffer->bytes; + + qeth_handle_send_error(card, buffer, error); + qeth_iqd_tx_complete(queue, bidx, error, budget); + qeth_cleanup_handled_pending(queue, bidx, false); + } + + netdev_tx_completed_queue(txq, packets, bytes); + atomic_sub(completed, &queue->used_buffers); + work_done += completed; + + /* xmit may have observed the full-condition, but not yet + * stopped the txq. In which case the code below won't trigger. + * So before returning, xmit will re-check the txq's fill level + * and wake it up if needed. + */ + if (netif_tx_queue_stopped(txq) && + !qeth_out_queue_is_full(queue)) + netif_tx_wake_queue(txq); + } +} + static int qeth_setassparms_inspect_rc(struct qeth_ipa_cmd *cmd) { if (!cmd->hdr.return_code) @@ -5185,9 +5665,9 @@ int qeth_setassparms_cb(struct qeth_card *card, cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code; if (cmd->hdr.prot_version == QETH_PROT_IPV4) - card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled; + card->options.ipa4.enabled = cmd->hdr.assists.enabled; if (cmd->hdr.prot_version == QETH_PROT_IPV6) - card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled; + card->options.ipa6.enabled = cmd->hdr.assists.enabled; return 0; } EXPORT_SYMBOL_GPL(qeth_setassparms_cb); @@ -5563,6 +6043,8 @@ static void qeth_core_remove_device(struct ccwgroup_device *gdev) qeth_core_free_discipline(card); } + qeth_free_qdio_queues(card); + free_netdev(card->dev); qeth_core_free_card(card); put_device(&gdev->dev); @@ -5586,7 +6068,8 @@ static int qeth_core_set_online(struct ccwgroup_device *gdev) goto err; } } - rc = card->discipline->set_online(gdev); + + rc = qeth_set_online(card); err: return rc; } @@ -5594,7 +6077,8 @@ err: static int qeth_core_set_offline(struct ccwgroup_device *gdev) { struct qeth_card *card = dev_get_drvdata(&gdev->dev); - return card->discipline->set_offline(gdev); + + return qeth_set_offline(card, false); } static void qeth_core_shutdown(struct ccwgroup_device *gdev) @@ -5617,7 +6101,7 @@ static int qeth_suspend(struct ccwgroup_device *gdev) if (gdev->state == CCWGROUP_OFFLINE) return 0; - card->discipline->set_offline(gdev); + qeth_set_offline(card, false); return 0; } @@ -5626,7 +6110,7 @@ static int qeth_resume(struct ccwgroup_device *gdev) struct qeth_card *card = dev_get_drvdata(&gdev->dev); int rc; - rc = card->discipline->set_online(gdev); + rc = qeth_set_online(card); qeth_set_allowed_threads(card, 0xffffffff, 0); if (rc) @@ -6058,9 +6542,16 @@ void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) stats->rx_packets = card->stats.rx_packets; stats->rx_bytes = card->stats.rx_bytes; - stats->rx_errors = card->stats.rx_errors; - stats->rx_dropped = card->stats.rx_dropped; + stats->rx_errors = card->stats.rx_length_errors + + card->stats.rx_frame_errors + + card->stats.rx_fifo_errors; + stats->rx_dropped = card->stats.rx_dropped_nomem + + card->stats.rx_dropped_notsupp + + card->stats.rx_dropped_runt; stats->multicast = card->stats.rx_multicast; + stats->rx_length_errors = card->stats.rx_length_errors; + stats->rx_frame_errors = card->stats.rx_frame_errors; + stats->rx_fifo_errors = card->stats.rx_fifo_errors; for (i = 0; i < card->qdio.no_out_queues; i++) { queue = card->qdio.out_qs[i]; @@ -6097,6 +6588,17 @@ int qeth_open(struct net_device *dev) napi_enable(&card->napi); local_bh_disable(); napi_schedule(&card->napi); + if (IS_IQD(card)) { + struct qeth_qdio_out_q *queue; + unsigned int i; + + qeth_for_each_output_queue(card, queue, i) { + netif_tx_napi_add(dev, &queue->napi, qeth_tx_poll, + QETH_NAPI_WEIGHT); + napi_enable(&queue->napi); + napi_schedule(&queue->napi); + } + } /* kick-start the NAPI softirq: */ local_bh_enable(); return 0; @@ -6108,7 +6610,26 @@ int qeth_stop(struct net_device *dev) struct qeth_card *card = dev->ml_priv; QETH_CARD_TEXT(card, 4, "qethstop"); - netif_tx_disable(dev); + if (IS_IQD(card)) { + struct qeth_qdio_out_q *queue; + unsigned int i; + + /* Quiesce the NAPI instances: */ + qeth_for_each_output_queue(card, queue, i) { + napi_disable(&queue->napi); + del_timer_sync(&queue->timer); + } + + /* Stop .ndo_start_xmit, might still access queue->napi. */ + netif_tx_disable(dev); + + /* Queues may get re-allocated, so remove the NAPIs here. */ + qeth_for_each_output_queue(card, queue, i) + netif_napi_del(&queue->napi); + } else { + netif_tx_disable(dev); + } + napi_disable(&card->napi); return 0; } diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h index 75b5834ed28d..3865f7258449 100644 --- a/drivers/s390/net/qeth_core_mpc.h +++ b/drivers/s390/net/qeth_core_mpc.h @@ -11,6 +11,7 @@ #include <asm/qeth.h> #include <uapi/linux/if_ether.h> +#include <uapi/linux/in6.h> #define IPA_PDU_HEADER_SIZE 0x40 #define QETH_IPA_PDU_LEN_TOTAL(buffer) (buffer + 0x0e) @@ -27,21 +28,6 @@ extern unsigned char IPA_PDU_HEADER[]; #define QETH_TIMEOUT (10 * HZ) #define QETH_IPA_TIMEOUT (45 * HZ) -#define QETH_IDX_COMMAND_SEQNO 0xffff0000 - -#define QETH_CLEAR_CHANNEL_PARM -10 -#define QETH_HALT_CHANNEL_PARM -11 - -static inline bool qeth_intparm_is_iob(unsigned long intparm) -{ - switch (intparm) { - case QETH_CLEAR_CHANNEL_PARM: - case QETH_HALT_CHANNEL_PARM: - case 0: - return false; - } - return true; -} /*****************************************************************************/ /* IP Assist related definitions */ @@ -67,6 +53,16 @@ static inline bool qeth_ipa_caps_enabled(struct qeth_ipa_caps *caps, u32 mask) return (caps->enabled & mask) == mask; } +#define qeth_adp_supported(c, f) \ + qeth_ipa_caps_supported(&c->options.adp, f) +#define qeth_is_supported(c, f) \ + qeth_ipa_caps_supported(&c->options.ipa4, f) +#define qeth_is_supported6(c, f) \ + qeth_ipa_caps_supported(&c->options.ipa6, f) +#define qeth_is_ipafunc_supported(c, prot, f) \ + ((prot == QETH_PROT_IPV6) ? qeth_is_supported6(c, f) : \ + qeth_is_supported(c, f)) + enum qeth_card_types { QETH_CARD_TYPE_OSD = 1, QETH_CARD_TYPE_IQD = 5, @@ -352,22 +348,21 @@ enum qeth_card_info_port_speed { /* (SET)DELIP(M) IPA stuff ***************************************************/ struct qeth_ipacmd_setdelip4 { - __u8 ip_addr[4]; - __u8 mask[4]; + __be32 addr; + __be32 mask; __u32 flags; } __attribute__ ((packed)); struct qeth_ipacmd_setdelip6 { - __u8 ip_addr[16]; - __u8 mask[16]; + struct in6_addr addr; + struct in6_addr prefix; __u32 flags; } __attribute__ ((packed)); struct qeth_ipacmd_setdelipm { __u8 mac[6]; __u8 padding[2]; - __u8 ip6[12]; - __u8 ip4[4]; + struct in6_addr ip; } __attribute__ ((packed)); struct qeth_ipacmd_layer2setdelmac { @@ -436,7 +431,7 @@ struct qeth_ipacmd_setassparms { } data; } __attribute__ ((packed)); -#define SETASS_DATA_SIZEOF(field) FIELD_SIZEOF(struct qeth_ipacmd_setassparms,\ +#define SETASS_DATA_SIZEOF(field) sizeof_field(struct qeth_ipacmd_setassparms,\ data.field) /* SETRTG IPA Command: ****************************************************/ @@ -550,7 +545,7 @@ struct qeth_ipacmd_setadpparms { } data; } __attribute__ ((packed)); -#define SETADP_DATA_SIZEOF(field) FIELD_SIZEOF(struct qeth_ipacmd_setadpparms,\ +#define SETADP_DATA_SIZEOF(field) sizeof_field(struct qeth_ipacmd_setadpparms,\ data.field) /* CREATE_ADDR IPA Command: ***********************************************/ @@ -663,7 +658,7 @@ struct qeth_ipacmd_vnicc { } data; }; -#define VNICC_DATA_SIZEOF(field) FIELD_SIZEOF(struct qeth_ipacmd_vnicc,\ +#define VNICC_DATA_SIZEOF(field) sizeof_field(struct qeth_ipacmd_vnicc,\ data.field) /* SETBRIDGEPORT IPA Command: *********************************************/ @@ -744,7 +739,7 @@ struct qeth_ipacmd_setbridgeport { } data; } __packed; -#define SBP_DATA_SIZEOF(field) FIELD_SIZEOF(struct qeth_ipacmd_setbridgeport,\ +#define SBP_DATA_SIZEOF(field) sizeof_field(struct qeth_ipacmd_setbridgeport,\ data.field) /* ADDRESS_CHANGE_NOTIFICATION adapter-initiated "command" *******************/ @@ -781,8 +776,7 @@ struct qeth_ipacmd_hdr { __u8 prim_version_no; __u8 param_count; __u16 prot_version; - __u32 ipa_supported; - __u32 ipa_enabled; + struct qeth_ipa_caps assists; } __attribute__ ((packed)); /* The IPA command itself */ @@ -805,7 +799,7 @@ struct qeth_ipa_cmd { } data; } __attribute__ ((packed)); -#define IPA_DATA_SIZEOF(field) FIELD_SIZEOF(struct qeth_ipa_cmd, data.field) +#define IPA_DATA_SIZEOF(field) sizeof_field(struct qeth_ipa_cmd, data.field) /* * special command for ARP processing. @@ -901,6 +895,7 @@ extern unsigned char IDX_ACTIVATE_WRITE[]; #define IDX_ACTIVATE_SIZE 0x22 #define QETH_IDX_ACT_PNO(buffer) (buffer+0x0b) #define QETH_IDX_ACT_ISSUER_RM_TOKEN(buffer) (buffer + 0x0c) +#define QETH_IDX_ACT_INVAL_FRAME 0x40 #define QETH_IDX_NO_PORTNAME_REQUIRED(buffer) ((buffer)[0x0b] & 0x80) #define QETH_IDX_ACT_FUNC_LEVEL(buffer) (buffer + 0x10) #define QETH_IDX_ACT_DATASET_NAME(buffer) (buffer + 0x16) @@ -913,6 +908,11 @@ extern unsigned char IDX_ACTIVATE_WRITE[]; #define QETH_IDX_ACT_ERR_AUTH 0x1E #define QETH_IDX_ACT_ERR_AUTH_USER 0x20 +#define QETH_IDX_TERMINATE 0xc0 +#define QETH_IDX_TERMINATE_MASK 0xc0 +#define QETH_IDX_TERM_BAD_TRANSPORT 0x41 +#define QETH_IDX_TERM_BAD_TRANSPORT_VM 0xf6 + #define PDU_ENCAPSULATION(buffer) \ (buffer + *(buffer + (*(buffer + 0x0b)) + \ *(buffer + *(buffer + 0x0b) + 0x11) + 0x07)) diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c index 9f392497d570..2bd9993aa60b 100644 --- a/drivers/s390/net/qeth_core_sys.c +++ b/drivers/s390/net/qeth_core_sys.c @@ -20,14 +20,10 @@ static ssize_t qeth_dev_state_show(struct device *dev, struct device_attribute *attr, char *buf) { struct qeth_card *card = dev_get_drvdata(dev); - if (!card) - return -EINVAL; switch (card->state) { case CARD_STATE_DOWN: return sprintf(buf, "DOWN\n"); - case CARD_STATE_HARDSETUP: - return sprintf(buf, "HARDSETUP\n"); case CARD_STATE_SOFTSETUP: if (card->dev->flags & IFF_UP) return sprintf(buf, "UP (LAN %s)\n", @@ -45,8 +41,6 @@ static ssize_t qeth_dev_chpid_show(struct device *dev, struct device_attribute *attr, char *buf) { struct qeth_card *card = dev_get_drvdata(dev); - if (!card) - return -EINVAL; return sprintf(buf, "%02X\n", card->info.chpid); } @@ -57,8 +51,7 @@ static ssize_t qeth_dev_if_name_show(struct device *dev, struct device_attribute *attr, char *buf) { struct qeth_card *card = dev_get_drvdata(dev); - if (!card) - return -EINVAL; + return sprintf(buf, "%s\n", QETH_CARD_IFNAME(card)); } @@ -68,8 +61,6 @@ static ssize_t qeth_dev_card_type_show(struct device *dev, struct device_attribute *attr, char *buf) { struct qeth_card *card = dev_get_drvdata(dev); - if (!card) - return -EINVAL; return sprintf(buf, "%s\n", qeth_get_cardname_short(card)); } @@ -94,8 +85,6 @@ static ssize_t qeth_dev_inbuf_size_show(struct device *dev, struct device_attribute *attr, char *buf) { struct qeth_card *card = dev_get_drvdata(dev); - if (!card) - return -EINVAL; return sprintf(buf, "%s\n", qeth_get_bufsize_str(card)); } @@ -106,8 +95,6 @@ static ssize_t qeth_dev_portno_show(struct device *dev, struct device_attribute *attr, char *buf) { struct qeth_card *card = dev_get_drvdata(dev); - if (!card) - return -EINVAL; return sprintf(buf, "%i\n", card->dev->dev_port); } @@ -120,9 +107,6 @@ static ssize_t qeth_dev_portno_store(struct device *dev, unsigned int portno, limit; int rc = 0; - if (!card) - return -EINVAL; - mutex_lock(&card->conf_mutex); if (card->state != CARD_STATE_DOWN) { rc = -EPERM; @@ -171,9 +155,6 @@ static ssize_t qeth_dev_prioqing_show(struct device *dev, { struct qeth_card *card = dev_get_drvdata(dev); - if (!card) - return -EINVAL; - switch (card->qdio.do_prio_queueing) { case QETH_PRIO_Q_ING_PREC: return sprintf(buf, "%s\n", "by precedence"); @@ -195,9 +176,6 @@ static ssize_t qeth_dev_prioqing_store(struct device *dev, struct qeth_card *card = dev_get_drvdata(dev); int rc = 0; - if (!card) - return -EINVAL; - if (IS_IQD(card)) return -EOPNOTSUPP; @@ -227,7 +205,7 @@ static ssize_t qeth_dev_prioqing_store(struct device *dev, card->qdio.default_out_queue = QETH_DEFAULT_QUEUE; } else if (sysfs_streq(buf, "prio_queueing_vlan")) { if (IS_LAYER3(card)) { - rc = -ENOTSUPP; + rc = -EOPNOTSUPP; goto out; } card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_VLAN; @@ -262,9 +240,6 @@ static ssize_t qeth_dev_bufcnt_show(struct device *dev, { struct qeth_card *card = dev_get_drvdata(dev); - if (!card) - return -EINVAL; - return sprintf(buf, "%i\n", card->qdio.in_buf_pool.buf_count); } @@ -276,9 +251,6 @@ static ssize_t qeth_dev_bufcnt_store(struct device *dev, int cnt, old_cnt; int rc = 0; - if (!card) - return -EINVAL; - mutex_lock(&card->conf_mutex); if (card->state != CARD_STATE_DOWN) { rc = -EPERM; @@ -307,9 +279,6 @@ static ssize_t qeth_dev_recover_store(struct device *dev, char *tmp; int i; - if (!card) - return -EINVAL; - if (!qeth_card_hw_is_reachable(card)) return -EPERM; @@ -325,11 +294,6 @@ static DEVICE_ATTR(recover, 0200, NULL, qeth_dev_recover_store); static ssize_t qeth_dev_performance_stats_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct qeth_card *card = dev_get_drvdata(dev); - - if (!card) - return -EINVAL; - return sprintf(buf, "1\n"); } @@ -342,9 +306,6 @@ static ssize_t qeth_dev_performance_stats_store(struct device *dev, bool reset; int rc; - if (!card) - return -EINVAL; - rc = kstrtobool(buf, &reset); if (rc) return rc; @@ -370,9 +331,6 @@ static ssize_t qeth_dev_layer2_show(struct device *dev, { struct qeth_card *card = dev_get_drvdata(dev); - if (!card) - return -EINVAL; - return sprintf(buf, "%i\n", card->options.layer); } @@ -385,9 +343,6 @@ static ssize_t qeth_dev_layer2_store(struct device *dev, int i, rc = 0; enum qeth_discipline_id newdis; - if (!card) - return -EINVAL; - mutex_lock(&card->discipline_mutex); if (card->state != CARD_STATE_DOWN) { rc = -EPERM; @@ -453,9 +408,6 @@ static ssize_t qeth_dev_isolation_show(struct device *dev, { struct qeth_card *card = dev_get_drvdata(dev); - if (!card) - return -EINVAL; - switch (card->options.isolation) { case ISOLATION_MODE_NONE: return snprintf(buf, 6, "%s\n", ATTR_QETH_ISOLATION_NONE); @@ -475,9 +427,6 @@ static ssize_t qeth_dev_isolation_store(struct device *dev, enum qeth_ipa_isolation_modes isolation; int rc = 0; - if (!card) - return -EINVAL; - mutex_lock(&card->conf_mutex); if (!IS_OSD(card) && !IS_OSX(card)) { rc = -EOPNOTSUPP; @@ -522,9 +471,6 @@ static ssize_t qeth_dev_switch_attrs_show(struct device *dev, struct qeth_switch_info sw_info; int rc = 0; - if (!card) - return -EINVAL; - if (!qeth_card_hw_is_reachable(card)) return sprintf(buf, "n/a\n"); @@ -555,8 +501,6 @@ static ssize_t qeth_hw_trap_show(struct device *dev, { struct qeth_card *card = dev_get_drvdata(dev); - if (!card) - return -EINVAL; if (card->info.hwtrap) return snprintf(buf, 5, "arm\n"); else @@ -570,9 +514,6 @@ static ssize_t qeth_hw_trap_store(struct device *dev, int rc = 0; int state = 0; - if (!card) - return -EINVAL; - mutex_lock(&card->conf_mutex); if (qeth_card_hw_is_reachable(card)) state = 1; @@ -607,24 +548,12 @@ static ssize_t qeth_hw_trap_store(struct device *dev, static DEVICE_ATTR(hw_trap, 0644, qeth_hw_trap_show, qeth_hw_trap_store); -static ssize_t qeth_dev_blkt_show(char *buf, struct qeth_card *card, int value) -{ - - if (!card) - return -EINVAL; - - return sprintf(buf, "%i\n", value); -} - static ssize_t qeth_dev_blkt_store(struct qeth_card *card, const char *buf, size_t count, int *value, int max_value) { char *tmp; int i, rc = 0; - if (!card) - return -EINVAL; - mutex_lock(&card->conf_mutex); if (card->state != CARD_STATE_DOWN) { rc = -EPERM; @@ -645,7 +574,7 @@ static ssize_t qeth_dev_blkt_total_show(struct device *dev, { struct qeth_card *card = dev_get_drvdata(dev); - return qeth_dev_blkt_show(buf, card, card->info.blkt.time_total); + return sprintf(buf, "%i\n", card->info.blkt.time_total); } static ssize_t qeth_dev_blkt_total_store(struct device *dev, @@ -657,8 +586,6 @@ static ssize_t qeth_dev_blkt_total_store(struct device *dev, &card->info.blkt.time_total, 5000); } - - static DEVICE_ATTR(total, 0644, qeth_dev_blkt_total_show, qeth_dev_blkt_total_store); @@ -667,7 +594,7 @@ static ssize_t qeth_dev_blkt_inter_show(struct device *dev, { struct qeth_card *card = dev_get_drvdata(dev); - return qeth_dev_blkt_show(buf, card, card->info.blkt.inter_packet); + return sprintf(buf, "%i\n", card->info.blkt.inter_packet); } static ssize_t qeth_dev_blkt_inter_store(struct device *dev, @@ -687,8 +614,7 @@ static ssize_t qeth_dev_blkt_inter_jumbo_show(struct device *dev, { struct qeth_card *card = dev_get_drvdata(dev); - return qeth_dev_blkt_show(buf, card, - card->info.blkt.inter_packet_jumbo); + return sprintf(buf, "%i\n", card->info.blkt.inter_packet_jumbo); } static ssize_t qeth_dev_blkt_inter_jumbo_store(struct device *dev, diff --git a/drivers/s390/net/qeth_ethtool.c b/drivers/s390/net/qeth_ethtool.c index 4166eb29f0bd..ab59bc975719 100644 --- a/drivers/s390/net/qeth_ethtool.c +++ b/drivers/s390/net/qeth_ethtool.c @@ -39,6 +39,8 @@ static const struct qeth_stats txq_stats[] = { QETH_TXQ_STAT("TSO bytes", tso_bytes), QETH_TXQ_STAT("Packing mode switches", packing_mode_switch), QETH_TXQ_STAT("Queue stopped", stopped), + QETH_TXQ_STAT("Completion yield", completion_yield), + QETH_TXQ_STAT("Completion timer", completion_timer), }; static const struct qeth_stats card_stats[] = { @@ -47,6 +49,9 @@ static const struct qeth_stats card_stats[] = { QETH_CARD_STAT("rx0 SG skbs", rx_sg_skbs), QETH_CARD_STAT("rx0 SG page frags", rx_sg_frags), QETH_CARD_STAT("rx0 SG page allocs", rx_sg_alloc_page), + QETH_CARD_STAT("rx0 dropped, no memory", rx_dropped_nomem), + QETH_CARD_STAT("rx0 dropped, bad format", rx_dropped_notsupp), + QETH_CARD_STAT("rx0 dropped, runt", rx_dropped_runt), }; #define TXQ_STATS_LEN ARRAY_SIZE(txq_stats) diff --git a/drivers/s390/net/qeth_l2.h b/drivers/s390/net/qeth_l2.h index ddc615b431a8..adf25c9fd2b3 100644 --- a/drivers/s390/net/qeth_l2.h +++ b/drivers/s390/net/qeth_l2.h @@ -13,7 +13,6 @@ extern const struct attribute_group *qeth_l2_attr_groups[]; int qeth_l2_create_device_attributes(struct device *); void qeth_l2_remove_device_attributes(struct device *); -void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card); int qeth_bridgeport_query_ports(struct qeth_card *card, enum qeth_sbp_roles *role, enum qeth_sbp_states *state); diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index cbead3d1b2fd..692bd2623401 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -24,7 +24,6 @@ #include "qeth_core.h" #include "qeth_l2.h" -static int qeth_l2_set_offline(struct ccwgroup_device *); static void qeth_bridgeport_query_support(struct qeth_card *card); static void qeth_bridge_state_change(struct qeth_card *card, struct qeth_ipa_cmd *cmd); @@ -175,10 +174,8 @@ static void qeth_l2_fill_header(struct qeth_qdio_out_q *queue, hdr->hdr.l2.id = QETH_HEADER_TYPE_L2_TSO; } else { hdr->hdr.l2.id = QETH_HEADER_TYPE_LAYER2; - if (skb->ip_summed == CHECKSUM_PARTIAL) { + if (skb->ip_summed == CHECKSUM_PARTIAL) qeth_tx_csum(skb, &hdr->hdr.l2.flags[1], ipv); - QETH_TXQ_STAT_INC(queue, skbs_csum); - } } /* set byte byte 3 to casting flags */ @@ -286,66 +283,15 @@ static void qeth_l2_stop_card(struct qeth_card *card) if (card->state == CARD_STATE_SOFTSETUP) { qeth_clear_ipacmd_list(card); - card->state = CARD_STATE_HARDSETUP; - } - if (card->state == CARD_STATE_HARDSETUP) { - qeth_qdio_clear_card(card, 0); qeth_drain_output_queues(card); - qeth_clear_working_pool_list(card); card->state = CARD_STATE_DOWN; } + qeth_qdio_clear_card(card, 0); + qeth_clear_working_pool_list(card); flush_workqueue(card->event_wq); card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED; -} - -static int qeth_l2_process_inbound_buffer(struct qeth_card *card, - int budget, int *done) -{ - int work_done = 0; - struct sk_buff *skb; - struct qeth_hdr *hdr; - unsigned int len; - - *done = 0; - WARN_ON_ONCE(!budget); - while (budget) { - skb = qeth_core_get_next_skb(card, - &card->qdio.in_q->bufs[card->rx.b_index], - &card->rx.b_element, &card->rx.e_offset, &hdr); - if (!skb) { - *done = 1; - break; - } - switch (hdr->hdr.l2.id) { - case QETH_HEADER_TYPE_LAYER2: - skb->protocol = eth_type_trans(skb, skb->dev); - qeth_rx_csum(card, skb, hdr->hdr.l2.flags[1]); - len = skb->len; - napi_gro_receive(&card->napi, skb); - break; - case QETH_HEADER_TYPE_OSN: - if (IS_OSN(card)) { - skb_push(skb, sizeof(struct qeth_hdr)); - skb_copy_to_linear_data(skb, hdr, - sizeof(struct qeth_hdr)); - len = skb->len; - card->osn_info.data_cb(skb); - break; - } - /* Else, fall through */ - default: - dev_kfree_skb_any(skb); - QETH_CARD_TEXT(card, 3, "inbunkno"); - QETH_DBF_HEX(CTRL, 3, hdr, sizeof(*hdr)); - continue; - } - work_done++; - budget--; - QETH_CARD_STAT_INC(card, rx_packets); - QETH_CARD_STAT_ADD(card, rx_bytes, len); - } - return work_done; + card->info.promisc_mode = 0; } static int qeth_l2_request_initial_mac(struct qeth_card *card) @@ -439,23 +385,14 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p) return 0; } -static void qeth_promisc_to_bridge(struct qeth_card *card) +static void qeth_l2_promisc_to_bridge(struct qeth_card *card, bool enable) { - struct net_device *dev = card->dev; - enum qeth_ipa_promisc_modes promisc_mode; int role; int rc; QETH_CARD_TEXT(card, 3, "pmisc2br"); - if (!card->options.sbp.reflect_promisc) - return; - promisc_mode = (dev->flags & IFF_PROMISC) ? SET_PROMISC_MODE_ON - : SET_PROMISC_MODE_OFF; - if (promisc_mode == card->info.promisc_mode) - return; - - if (promisc_mode == SET_PROMISC_MODE_ON) { + if (enable) { if (card->options.sbp.reflect_promisc_primary) role = QETH_SBP_ROLE_PRIMARY; else @@ -464,14 +401,30 @@ static void qeth_promisc_to_bridge(struct qeth_card *card) role = QETH_SBP_ROLE_NONE; rc = qeth_bridgeport_setrole(card, role); - QETH_CARD_TEXT_(card, 2, "bpm%c%04x", - (promisc_mode == SET_PROMISC_MODE_ON) ? '+' : '-', rc); + QETH_CARD_TEXT_(card, 2, "bpm%c%04x", enable ? '+' : '-', rc); if (!rc) { card->options.sbp.role = role; - card->info.promisc_mode = promisc_mode; + card->info.promisc_mode = enable; } +} + +static void qeth_l2_set_promisc_mode(struct qeth_card *card) +{ + bool enable = card->dev->flags & IFF_PROMISC; + + if (card->info.promisc_mode == enable) + return; + if (qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE)) { + qeth_setadp_promisc_mode(card, enable); + } else { + mutex_lock(&card->sbp_lock); + if (card->options.sbp.reflect_promisc) + qeth_l2_promisc_to_bridge(card, enable); + mutex_unlock(&card->sbp_lock); + } } + /* New MAC address is added to the hash table and marked to be written on card * only if there is not in the hash table storage already * @@ -539,10 +492,7 @@ static void qeth_l2_rx_mode_work(struct work_struct *work) } } - if (qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE)) - qeth_setadp_promisc_mode(card); - else - qeth_promisc_to_bridge(card); + qeth_l2_set_promisc_mode(card); } static int qeth_l2_xmit_osn(struct qeth_card *card, struct sk_buff *skb, @@ -588,9 +538,10 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb, struct qeth_card *card = dev->ml_priv; u16 txq = skb_get_queue_mapping(skb); struct qeth_qdio_out_q *queue; - int tx_bytes = skb->len; int rc; + if (!skb_is_gso(skb)) + qdisc_skb_cb(skb)->pkt_len = skb->len; if (IS_IQD(card)) txq = qeth_iqd_translate_txq(dev, txq); queue = card->qdio.out_qs[txq]; @@ -601,11 +552,8 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb, rc = qeth_xmit(card, skb, queue, qeth_get_ip_version(skb), qeth_l2_fill_header); - if (!rc) { - QETH_TXQ_STAT_INC(queue, tx_packets); - QETH_TXQ_STAT_ADD(queue, tx_bytes, tx_bytes); + if (!rc) return NETDEV_TX_OK; - } QETH_TXQ_STAT_INC(queue, tx_dropped); kfree_skb(skb); @@ -635,6 +583,7 @@ static int qeth_l2_probe_device(struct ccwgroup_device *gdev) int rc; qeth_l2_vnicc_set_defaults(card); + mutex_init(&card->sbp_lock); if (gdev->dev.type == &qeth_generic_devtype) { rc = qeth_l2_create_device_attributes(&gdev->dev); @@ -657,7 +606,7 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev) wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0); if (cgdev->state == CCWGROUP_ONLINE) - qeth_l2_set_offline(cgdev); + qeth_set_offline(card, false); cancel_work_sync(&card->close_dev_work); if (qeth_netdev_is_registered(card->dev)) @@ -763,14 +712,6 @@ add_napi: return rc; } -static int qeth_l2_start_ipassists(struct qeth_card *card) -{ - /* configure isolation level */ - if (qeth_set_access_ctrl_online(card, 0)) - return -ENODEV; - return 0; -} - static void qeth_l2_trace_features(struct qeth_card *card) { /* Set BridgePort features */ @@ -783,17 +724,31 @@ static void qeth_l2_trace_features(struct qeth_card *card) sizeof(card->options.vnicc.sup_chars)); } -static int qeth_l2_set_online(struct ccwgroup_device *gdev) +static void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card) { - struct qeth_card *card = dev_get_drvdata(&gdev->dev); + if (!card->options.sbp.reflect_promisc && + card->options.sbp.role != QETH_SBP_ROLE_NONE) { + /* Conditional to avoid spurious error messages */ + qeth_bridgeport_setrole(card, card->options.sbp.role); + /* Let the callback function refresh the stored role value. */ + qeth_bridgeport_query_ports(card, &card->options.sbp.role, + NULL); + } + if (card->options.sbp.hostnotification) { + if (qeth_bridgeport_an_set(card, 1)) + card->options.sbp.hostnotification = 0; + } else { + qeth_bridgeport_an_set(card, 0); + } +} + +static int qeth_l2_set_online(struct qeth_card *card) +{ + struct ccwgroup_device *gdev = card->gdev; struct net_device *dev = card->dev; int rc = 0; bool carrier_ok; - mutex_lock(&card->discipline_mutex); - mutex_lock(&card->conf_mutex); - QETH_CARD_TEXT(card, 2, "setonlin"); - rc = qeth_core_hardsetup_card(card, &carrier_ok); if (rc) { QETH_CARD_TEXT_(card, 2, "2err%04x", rc); @@ -801,17 +756,14 @@ static int qeth_l2_set_online(struct ccwgroup_device *gdev) goto out_remove; } - if (qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP)) { - if (card->info.hwtrap && - qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM)) - card->info.hwtrap = 0; - } else - card->info.hwtrap = 0; - + mutex_lock(&card->sbp_lock); qeth_bridgeport_query_support(card); - if (card->options.sbp.supported_funcs) + if (card->options.sbp.supported_funcs) { + qeth_l2_setup_bridgeport_attrs(card); dev_info(&card->gdev->dev, - "The device represents a Bridge Capable Port\n"); + "The device represents a Bridge Capable Port\n"); + } + mutex_unlock(&card->sbp_lock); qeth_l2_register_dev_addr(card); @@ -821,26 +773,11 @@ static int qeth_l2_set_online(struct ccwgroup_device *gdev) qeth_trace_features(card); qeth_l2_trace_features(card); - qeth_l2_setup_bridgeport_attrs(card); - - card->state = CARD_STATE_HARDSETUP; qeth_print_status_message(card); /* softsetup */ QETH_CARD_TEXT(card, 2, "softsetp"); - if (IS_OSD(card) || IS_OSX(card)) { - rc = qeth_l2_start_ipassists(card); - if (rc) - goto out_remove; - } - - rc = qeth_init_qdio_queues(card); - if (rc) { - QETH_CARD_TEXT_(card, 2, "6err%d", rc); - rc = -ENODEV; - goto out_remove; - } card->state = CARD_STATE_SOFTSETUP; qeth_set_allowed_threads(card, 0xffffffff, 0); @@ -867,91 +804,20 @@ static int qeth_l2_set_online(struct ccwgroup_device *gdev) } /* let user_space know that device is online */ kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE); - mutex_unlock(&card->conf_mutex); - mutex_unlock(&card->discipline_mutex); return 0; out_remove: qeth_l2_stop_card(card); - ccw_device_set_offline(CARD_DDEV(card)); - ccw_device_set_offline(CARD_WDEV(card)); - ccw_device_set_offline(CARD_RDEV(card)); + qeth_stop_channel(&card->data); + qeth_stop_channel(&card->write); + qeth_stop_channel(&card->read); qdio_free(CARD_DDEV(card)); - - mutex_unlock(&card->conf_mutex); - mutex_unlock(&card->discipline_mutex); return rc; } -static int __qeth_l2_set_offline(struct ccwgroup_device *cgdev, - int recovery_mode) +static void qeth_l2_set_offline(struct qeth_card *card) { - struct qeth_card *card = dev_get_drvdata(&cgdev->dev); - int rc = 0, rc2 = 0, rc3 = 0; - - mutex_lock(&card->discipline_mutex); - mutex_lock(&card->conf_mutex); - QETH_CARD_TEXT(card, 3, "setoffl"); - - if ((!recovery_mode && card->info.hwtrap) || card->info.hwtrap == 2) { - qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); - card->info.hwtrap = 1; - } - - rtnl_lock(); - card->info.open_when_online = card->dev->flags & IFF_UP; - dev_close(card->dev); - netif_device_detach(card->dev); - netif_carrier_off(card->dev); - rtnl_unlock(); - qeth_l2_stop_card(card); - rc = ccw_device_set_offline(CARD_DDEV(card)); - rc2 = ccw_device_set_offline(CARD_WDEV(card)); - rc3 = ccw_device_set_offline(CARD_RDEV(card)); - if (!rc) - rc = (rc2) ? rc2 : rc3; - if (rc) - QETH_CARD_TEXT_(card, 2, "1err%d", rc); - qdio_free(CARD_DDEV(card)); - - /* let user_space know that device is offline */ - kobject_uevent(&cgdev->dev.kobj, KOBJ_CHANGE); - mutex_unlock(&card->conf_mutex); - mutex_unlock(&card->discipline_mutex); - return 0; -} - -static int qeth_l2_set_offline(struct ccwgroup_device *cgdev) -{ - return __qeth_l2_set_offline(cgdev, 0); -} - -static int qeth_l2_recover(void *ptr) -{ - struct qeth_card *card; - int rc = 0; - - card = (struct qeth_card *) ptr; - QETH_CARD_TEXT(card, 2, "recover1"); - if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD)) - return 0; - QETH_CARD_TEXT(card, 2, "recover2"); - dev_warn(&card->gdev->dev, - "A recovery process has been started for the device\n"); - __qeth_l2_set_offline(card->gdev, 1); - rc = qeth_l2_set_online(card->gdev); - if (!rc) - dev_info(&card->gdev->dev, - "Device successfully recovered!\n"); - else { - ccwgroup_set_offline(card->gdev); - dev_warn(&card->gdev->dev, "The qeth device driver " - "failed to recover an error on the device\n"); - } - qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); - qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); - return 0; } static int __init qeth_l2_init(void) @@ -988,8 +854,6 @@ static int qeth_l2_control_event(struct qeth_card *card, struct qeth_discipline qeth_l2_discipline = { .devtype = &qeth_l2_devtype, - .process_rx_buffer = qeth_l2_process_inbound_buffer, - .recover = qeth_l2_recover, .setup = qeth_l2_probe_device, .remove = qeth_l2_remove_device, .set_online = qeth_l2_set_online, @@ -1000,9 +864,10 @@ struct qeth_discipline qeth_l2_discipline = { EXPORT_SYMBOL_GPL(qeth_l2_discipline); static void qeth_osn_assist_cb(struct qeth_card *card, - struct qeth_cmd_buffer *iob) + struct qeth_cmd_buffer *iob, + unsigned int data_length) { - qeth_notify_reply(iob->reply, 0); + qeth_notify_cmd(iob, 0); qeth_put_cmd(iob); } @@ -1027,7 +892,8 @@ int qeth_osn_assist(struct net_device *dev, void *data, int data_len) if (!iob) return -ENOMEM; - qeth_prepare_ipa_cmd(card, iob, (u16) data_len); + qeth_prepare_ipa_cmd(card, iob, (u16) data_len, NULL); + memcpy(__ipa_cmd(iob), data, data_len); iob->callback = qeth_osn_assist_cb; return qeth_send_ipa_cmd(card, iob, NULL, NULL); @@ -1165,9 +1031,9 @@ static void qeth_bridge_state_change_worker(struct work_struct *work) /* Role should not change by itself, but if it did, */ /* information from the hardware is authoritative. */ - mutex_lock(&data->card->conf_mutex); + mutex_lock(&data->card->sbp_lock); data->card->options.sbp.role = entry->role; - mutex_unlock(&data->card->conf_mutex); + mutex_unlock(&data->card->sbp_lock); snprintf(env_locrem, sizeof(env_locrem), "BRIDGEPORT=statechange"); snprintf(env_role, sizeof(env_role), "ROLE=%s", @@ -1233,9 +1099,9 @@ static void qeth_bridge_host_event_worker(struct work_struct *work) : (data->hostevs.lost_event_mask == 0x02) ? "Bridge port state change" : "Unknown reason"); - mutex_lock(&data->card->conf_mutex); + mutex_lock(&data->card->sbp_lock); data->card->options.sbp.hostnotification = 0; - mutex_unlock(&data->card->conf_mutex); + mutex_unlock(&data->card->sbp_lock); qeth_bridge_emit_host_event(data->card, anev_abort, 0, NULL, NULL); } else @@ -1703,7 +1569,6 @@ static int qeth_l2_vnicc_makerc(struct qeth_card *card, u16 ipa_rc) /* generic VNICC request call back control */ struct _qeth_l2_vnicc_request_cbctl { - u32 sub_cmd; struct { union{ u32 *sup_cmds; @@ -1721,6 +1586,7 @@ static int qeth_l2_vnicc_request_cb(struct qeth_card *card, (struct _qeth_l2_vnicc_request_cbctl *) reply->param; struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; struct qeth_ipacmd_vnicc *rep = &cmd->data.vnicc; + u32 sub_cmd = cmd->data.vnicc.hdr.sub_command; QETH_CARD_TEXT(card, 2, "vniccrcb"); if (cmd->hdr.return_code) @@ -1729,10 +1595,9 @@ static int qeth_l2_vnicc_request_cb(struct qeth_card *card, card->options.vnicc.sup_chars = rep->vnicc_cmds.supported; card->options.vnicc.cur_chars = rep->vnicc_cmds.enabled; - if (cbctl->sub_cmd == IPA_VNICC_QUERY_CMDS) + if (sub_cmd == IPA_VNICC_QUERY_CMDS) *cbctl->result.sup_cmds = rep->data.query_cmds.sup_cmds; - - if (cbctl->sub_cmd == IPA_VNICC_GET_TIMEOUT) + else if (sub_cmd == IPA_VNICC_GET_TIMEOUT) *cbctl->result.timeout = rep->data.getset_timeout.timeout; return 0; @@ -1760,7 +1625,6 @@ static struct qeth_cmd_buffer *qeth_l2_vnicc_build_cmd(struct qeth_card *card, /* VNICC query VNIC characteristics request */ static int qeth_l2_vnicc_query_chars(struct qeth_card *card) { - struct _qeth_l2_vnicc_request_cbctl cbctl; struct qeth_cmd_buffer *iob; QETH_CARD_TEXT(card, 2, "vniccqch"); @@ -1768,10 +1632,7 @@ static int qeth_l2_vnicc_query_chars(struct qeth_card *card) if (!iob) return -ENOMEM; - /* prepare callback control */ - cbctl.sub_cmd = IPA_VNICC_QUERY_CHARS; - - return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, &cbctl); + return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, NULL); } /* VNICC query sub commands request */ @@ -1790,7 +1651,6 @@ static int qeth_l2_vnicc_query_cmds(struct qeth_card *card, u32 vnic_char, __ipa_cmd(iob)->data.vnicc.data.query_cmds.vnic_char = vnic_char; /* prepare callback control */ - cbctl.sub_cmd = IPA_VNICC_QUERY_CMDS; cbctl.result.sup_cmds = sup_cmds; return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, &cbctl); @@ -1800,7 +1660,6 @@ static int qeth_l2_vnicc_query_cmds(struct qeth_card *card, u32 vnic_char, static int qeth_l2_vnicc_set_char(struct qeth_card *card, u32 vnic_char, u32 cmd) { - struct _qeth_l2_vnicc_request_cbctl cbctl; struct qeth_cmd_buffer *iob; QETH_CARD_TEXT(card, 2, "vniccedc"); @@ -1810,10 +1669,7 @@ static int qeth_l2_vnicc_set_char(struct qeth_card *card, u32 vnic_char, __ipa_cmd(iob)->data.vnicc.data.set_char.vnic_char = vnic_char; - /* prepare callback control */ - cbctl.sub_cmd = cmd; - - return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, &cbctl); + return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, NULL); } /* VNICC get/set timeout for characteristic request */ @@ -1837,7 +1693,6 @@ static int qeth_l2_vnicc_getset_timeout(struct qeth_card *card, u32 vnicc, getset_timeout->timeout = *timeout; /* prepare callback control */ - cbctl.sub_cmd = cmd; if (cmd == IPA_VNICC_GET_TIMEOUT) cbctl.result.timeout = timeout; @@ -1989,8 +1844,7 @@ int qeth_l2_vnicc_get_timeout(struct qeth_card *card, u32 *timeout) /* check if VNICC is currently enabled */ bool qeth_l2_vnicc_is_in_use(struct qeth_card *card) { - /* if everything is turned off, VNICC is not active */ - if (!card->options.vnicc.cur_chars) + if (!card->options.vnicc.sup_chars) return false; /* default values are only OK if rx_bcast was not enabled by user * or the card is offline. @@ -2035,10 +1889,10 @@ static bool qeth_l2_vnicc_recover_char(struct qeth_card *card, u32 vnicc, static void qeth_l2_vnicc_init(struct qeth_card *card) { u32 *timeout = &card->options.vnicc.learning_timeout; + bool enable, error = false; unsigned int chars_len, i; unsigned long chars_tmp; u32 sup_cmds, vnicc; - bool enable, error; QETH_CARD_TEXT(card, 2, "vniccini"); /* reset rx_bcast */ @@ -2059,19 +1913,27 @@ static void qeth_l2_vnicc_init(struct qeth_card *card) chars_len = sizeof(card->options.vnicc.sup_chars) * BITS_PER_BYTE; for_each_set_bit(i, &chars_tmp, chars_len) { vnicc = BIT(i); - qeth_l2_vnicc_query_cmds(card, vnicc, &sup_cmds); - if (!(sup_cmds & IPA_VNICC_SET_TIMEOUT) || - !(sup_cmds & IPA_VNICC_GET_TIMEOUT)) + if (qeth_l2_vnicc_query_cmds(card, vnicc, &sup_cmds)) { + sup_cmds = 0; + error = true; + } + if ((sup_cmds & IPA_VNICC_SET_TIMEOUT) && + (sup_cmds & IPA_VNICC_GET_TIMEOUT)) + card->options.vnicc.getset_timeout_sup |= vnicc; + else card->options.vnicc.getset_timeout_sup &= ~vnicc; - if (!(sup_cmds & IPA_VNICC_ENABLE) || - !(sup_cmds & IPA_VNICC_DISABLE)) + if ((sup_cmds & IPA_VNICC_ENABLE) && + (sup_cmds & IPA_VNICC_DISABLE)) + card->options.vnicc.set_char_sup |= vnicc; + else card->options.vnicc.set_char_sup &= ~vnicc; } /* enforce assumed default values and recover settings, if changed */ - error = qeth_l2_vnicc_recover_timeout(card, QETH_VNICC_LEARNING, - timeout); - chars_tmp = card->options.vnicc.wanted_chars ^ QETH_VNICC_DEFAULT; - chars_tmp |= QETH_VNICC_BRIDGE_INVISIBLE; + error |= qeth_l2_vnicc_recover_timeout(card, QETH_VNICC_LEARNING, + timeout); + /* Change chars, if necessary */ + chars_tmp = card->options.vnicc.wanted_chars ^ + card->options.vnicc.cur_chars; chars_len = sizeof(card->options.vnicc.wanted_chars) * BITS_PER_BYTE; for_each_set_bit(i, &chars_tmp, chars_len) { vnicc = BIT(i); diff --git a/drivers/s390/net/qeth_l2_sys.c b/drivers/s390/net/qeth_l2_sys.c index f2c3b127b1e4..86bcae992f72 100644 --- a/drivers/s390/net/qeth_l2_sys.c +++ b/drivers/s390/net/qeth_l2_sys.c @@ -18,12 +18,10 @@ static ssize_t qeth_bridge_port_role_state_show(struct device *dev, int rc = 0; char *word; - if (!card) - return -EINVAL; - if (qeth_l2_vnicc_is_in_use(card)) return sprintf(buf, "n/a (VNIC characteristics)\n"); + mutex_lock(&card->sbp_lock); if (qeth_card_hw_is_reachable(card) && card->options.sbp.supported_funcs) rc = qeth_bridgeport_query_ports(card, @@ -57,6 +55,7 @@ static ssize_t qeth_bridge_port_role_state_show(struct device *dev, else rc = sprintf(buf, "%s\n", word); } + mutex_unlock(&card->sbp_lock); return rc; } @@ -79,8 +78,6 @@ static ssize_t qeth_bridge_port_role_store(struct device *dev, int rc = 0; enum qeth_sbp_roles role; - if (!card) - return -EINVAL; if (sysfs_streq(buf, "primary")) role = QETH_SBP_ROLE_PRIMARY; else if (sysfs_streq(buf, "secondary")) @@ -91,6 +88,7 @@ static ssize_t qeth_bridge_port_role_store(struct device *dev, return -EINVAL; mutex_lock(&card->conf_mutex); + mutex_lock(&card->sbp_lock); if (qeth_l2_vnicc_is_in_use(card)) rc = -EBUSY; @@ -104,6 +102,7 @@ static ssize_t qeth_bridge_port_role_store(struct device *dev, } else card->options.sbp.role = role; + mutex_unlock(&card->sbp_lock); mutex_unlock(&card->conf_mutex); return rc ? rc : count; @@ -132,9 +131,6 @@ static ssize_t qeth_bridgeport_hostnotification_show(struct device *dev, struct qeth_card *card = dev_get_drvdata(dev); int enabled; - if (!card) - return -EINVAL; - if (qeth_l2_vnicc_is_in_use(card)) return sprintf(buf, "n/a (VNIC characteristics)\n"); @@ -150,14 +146,12 @@ static ssize_t qeth_bridgeport_hostnotification_store(struct device *dev, bool enable; int rc; - if (!card) - return -EINVAL; - rc = kstrtobool(buf, &enable); if (rc) return rc; mutex_lock(&card->conf_mutex); + mutex_lock(&card->sbp_lock); if (qeth_l2_vnicc_is_in_use(card)) rc = -EBUSY; @@ -168,6 +162,7 @@ static ssize_t qeth_bridgeport_hostnotification_store(struct device *dev, } else card->options.sbp.hostnotification = enable; + mutex_unlock(&card->sbp_lock); mutex_unlock(&card->conf_mutex); return rc ? rc : count; @@ -183,9 +178,6 @@ static ssize_t qeth_bridgeport_reflect_show(struct device *dev, struct qeth_card *card = dev_get_drvdata(dev); char *state; - if (!card) - return -EINVAL; - if (qeth_l2_vnicc_is_in_use(card)) return sprintf(buf, "n/a (VNIC characteristics)\n"); @@ -207,9 +199,6 @@ static ssize_t qeth_bridgeport_reflect_store(struct device *dev, int enable, primary; int rc = 0; - if (!card) - return -EINVAL; - if (sysfs_streq(buf, "none")) { enable = 0; primary = 0; @@ -223,6 +212,7 @@ static ssize_t qeth_bridgeport_reflect_store(struct device *dev, return -EINVAL; mutex_lock(&card->conf_mutex); + mutex_lock(&card->sbp_lock); if (qeth_l2_vnicc_is_in_use(card)) rc = -EBUSY; @@ -234,6 +224,7 @@ static ssize_t qeth_bridgeport_reflect_store(struct device *dev, rc = 0; } + mutex_unlock(&card->sbp_lock); mutex_unlock(&card->conf_mutex); return rc ? rc : count; @@ -255,35 +246,6 @@ static struct attribute_group qeth_l2_bridgeport_attr_group = { .attrs = qeth_l2_bridgeport_attrs, }; -/** - * qeth_l2_setup_bridgeport_attrs() - set/restore attrs when turning online. - * @card: qeth_card structure pointer - * - * Note: this function is called with conf_mutex held by the caller - */ -void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card) -{ - int rc; - - if (!card) - return; - if (!card->options.sbp.supported_funcs) - return; - if (card->options.sbp.role != QETH_SBP_ROLE_NONE) { - /* Conditional to avoid spurious error messages */ - qeth_bridgeport_setrole(card, card->options.sbp.role); - /* Let the callback function refresh the stored role value. */ - qeth_bridgeport_query_ports(card, - &card->options.sbp.role, NULL); - } - if (card->options.sbp.hostnotification) { - rc = qeth_bridgeport_an_set(card, 1); - if (rc) - card->options.sbp.hostnotification = 0; - } else - qeth_bridgeport_an_set(card, 0); -} - /* VNIC CHARS support */ /* convert sysfs attr name to VNIC characteristic */ @@ -315,9 +277,6 @@ static ssize_t qeth_vnicc_timeout_show(struct device *dev, u32 timeout; int rc; - if (!card) - return -EINVAL; - rc = qeth_l2_vnicc_get_timeout(card, &timeout); if (rc == -EBUSY) return sprintf(buf, "n/a (BridgePort)\n"); @@ -335,9 +294,6 @@ static ssize_t qeth_vnicc_timeout_store(struct device *dev, u32 timeout; int rc; - if (!card) - return -EINVAL; - rc = kstrtou32(buf, 10, &timeout); if (rc) return rc; @@ -357,9 +313,6 @@ static ssize_t qeth_vnicc_char_show(struct device *dev, u32 vnicc; int rc; - if (!card) - return -EINVAL; - vnicc = qeth_l2_vnicc_sysfs_attr_to_char(attr->attr.name); rc = qeth_l2_vnicc_get_state(card, vnicc, &state); @@ -380,9 +333,6 @@ static ssize_t qeth_vnicc_char_store(struct device *dev, u32 vnicc; int rc; - if (!card) - return -EINVAL; - if (kstrtobool(buf, &state)) return -EINVAL; diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h index 87659cfc9066..6ccfe2121095 100644 --- a/drivers/s390/net/qeth_l3.h +++ b/drivers/s390/net/qeth_l3.h @@ -13,8 +13,6 @@ #include "qeth_core.h" #include <linux/hashtable.h> -#define QETH_SNIFF_AVAIL 0x0008 - enum qeth_ip_types { QETH_IP_TYPE_NORMAL, QETH_IP_TYPE_VIPA, @@ -24,9 +22,7 @@ enum qeth_ip_types { struct qeth_ipaddr { struct hlist_node hnode; enum qeth_ip_types type; - unsigned char mac[ETH_ALEN]; u8 is_multicast:1; - u8 in_progress:1; u8 disp_flag:2; u8 ipato:1; /* ucast only */ @@ -37,8 +33,8 @@ struct qeth_ipaddr { enum qeth_prot_versions proto; union { struct { - unsigned int addr; - unsigned int mask; + __be32 addr; + __be32 mask; } a4; struct { struct in6_addr addr; @@ -55,6 +51,7 @@ static inline void qeth_l3_init_ipaddr(struct qeth_ipaddr *addr, addr->type = type; addr->proto = proto; addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING; + addr->ref_counter = 1; } static inline bool qeth_l3_addr_match_ip(struct qeth_ipaddr *a1, @@ -74,12 +71,10 @@ static inline bool qeth_l3_addr_match_all(struct qeth_ipaddr *a1, * so 'proto' and 'addr' match for sure. * * For ucast: - * - 'mac' is always 0. * - 'mask'/'pfxlen' for RXIP/VIPA is always 0. For NORMAL, matching * values are required to avoid mixups in takeover eligibility. * * For mcast, - * - 'mac' is mapped from the IP, and thus always matches. * - 'mask'/'pfxlen' is always 0. */ if (a1->type != a2->type) @@ -89,21 +84,12 @@ static inline bool qeth_l3_addr_match_all(struct qeth_ipaddr *a1, return a1->u.a4.mask == a2->u.a4.mask; } -static inline u64 qeth_l3_ipaddr_hash(struct qeth_ipaddr *addr) +static inline u32 qeth_l3_ipaddr_hash(struct qeth_ipaddr *addr) { - u64 ret = 0; - u8 *point; - - if (addr->proto == QETH_PROT_IPV6) { - point = (u8 *) &addr->u.a6.addr; - ret = get_unaligned((u64 *)point) ^ - get_unaligned((u64 *) (point + 8)); - } - if (addr->proto == QETH_PROT_IPV4) { - point = (u8 *) &addr->u.a4.addr; - ret = get_unaligned((u32 *) point); - } - return ret; + if (addr->proto == QETH_PROT_IPV6) + return ipv6_addr_hash(&addr->u.a6.addr); + else + return ipv4_addr_hash(addr->u.a4.addr); } struct qeth_ipato_entry { @@ -115,7 +101,8 @@ struct qeth_ipato_entry { extern const struct attribute_group *qeth_l3_attr_groups[]; -void qeth_l3_ipaddr_to_string(enum qeth_prot_versions, const __u8 *, char *); +int qeth_l3_ipaddr_to_string(enum qeth_prot_versions proto, const u8 *addr, + char *buf); int qeth_l3_create_device_attributes(struct device *); void qeth_l3_remove_device_attributes(struct device *); int qeth_l3_setrouting_v4(struct qeth_card *); diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 2dd99f103671..317d56647a4a 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -37,46 +37,24 @@ #include "qeth_l3.h" - -static int qeth_l3_set_offline(struct ccwgroup_device *); -static void qeth_l3_set_rx_mode(struct net_device *dev); static int qeth_l3_register_addr_entry(struct qeth_card *, struct qeth_ipaddr *); static int qeth_l3_deregister_addr_entry(struct qeth_card *, struct qeth_ipaddr *); -static void qeth_l3_ipaddr4_to_string(const __u8 *addr, char *buf) -{ - sprintf(buf, "%pI4", addr); -} - -static void qeth_l3_ipaddr6_to_string(const __u8 *addr, char *buf) -{ - sprintf(buf, "%pI6", addr); -} - -void qeth_l3_ipaddr_to_string(enum qeth_prot_versions proto, const __u8 *addr, - char *buf) +int qeth_l3_ipaddr_to_string(enum qeth_prot_versions proto, const u8 *addr, + char *buf) { if (proto == QETH_PROT_IPV4) - qeth_l3_ipaddr4_to_string(addr, buf); - else if (proto == QETH_PROT_IPV6) - qeth_l3_ipaddr6_to_string(addr, buf); -} - -static struct qeth_ipaddr *qeth_l3_get_addr_buffer(enum qeth_prot_versions prot) -{ - struct qeth_ipaddr *addr = kmalloc(sizeof(*addr), GFP_ATOMIC); - - if (addr) - qeth_l3_init_ipaddr(addr, QETH_IP_TYPE_NORMAL, prot); - return addr; + return sprintf(buf, "%pI4", addr); + else + return sprintf(buf, "%pI6", addr); } static struct qeth_ipaddr *qeth_l3_find_addr_by_ip(struct qeth_card *card, struct qeth_ipaddr *query) { - u64 key = qeth_l3_ipaddr_hash(query); + u32 key = qeth_l3_ipaddr_hash(query); struct qeth_ipaddr *addr; if (query->is_multicast) { @@ -171,8 +149,6 @@ static int qeth_l3_delete_ip(struct qeth_card *card, addr->ref_counter--; if (addr->type == QETH_IP_TYPE_NORMAL && addr->ref_counter > 0) return rc; - if (addr->in_progress) - return -EINPROGRESS; if (qeth_card_hw_is_reachable(card)) rc = qeth_l3_deregister_addr_entry(card, addr); @@ -217,13 +193,10 @@ static int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr) "Registering IP address %s failed\n", buf); return -EADDRINUSE; } else { - addr = qeth_l3_get_addr_buffer(tmp_addr->proto); + addr = kmemdup(tmp_addr, sizeof(*tmp_addr), GFP_KERNEL); if (!addr) return -ENOMEM; - memcpy(addr, tmp_addr, sizeof(struct qeth_ipaddr)); - addr->ref_counter = 1; - if (qeth_l3_is_addr_covered_by_ipato(card, addr)) { QETH_CARD_TEXT(card, 2, "tkovaddr"); addr->ipato = 1; @@ -236,29 +209,10 @@ static int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr) return 0; } - /* qeth_l3_register_addr_entry can go to sleep - * if we add a IPV4 addr. It is caused by the reason - * that SETIP ipa cmd starts ARP staff for IPV4 addr. - * Thus we should unlock spinlock, and make a protection - * using in_progress variable to indicate that there is - * an hardware operation with this IPV4 address - */ - if (addr->proto == QETH_PROT_IPV4) { - addr->in_progress = 1; - mutex_unlock(&card->ip_lock); - rc = qeth_l3_register_addr_entry(card, addr); - mutex_lock(&card->ip_lock); - addr->in_progress = 0; - } else - rc = qeth_l3_register_addr_entry(card, addr); + rc = qeth_l3_register_addr_entry(card, addr); if (!rc || rc == -EADDRINUSE || rc == -ENETDOWN) { addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING; - if (addr->ref_counter < 1) { - qeth_l3_deregister_addr_entry(card, addr); - hash_del(&addr->hnode); - kfree(addr); - } } else { hash_del(&addr->hnode); kfree(addr); @@ -326,19 +280,10 @@ static void qeth_l3_recover_ip(struct qeth_card *card) hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) { if (addr->disp_flag == QETH_DISP_ADDR_ADD) { - if (addr->proto == QETH_PROT_IPV4) { - addr->in_progress = 1; - mutex_unlock(&card->ip_lock); - rc = qeth_l3_register_addr_entry(card, addr); - mutex_lock(&card->ip_lock); - addr->in_progress = 0; - } else - rc = qeth_l3_register_addr_entry(card, addr); + rc = qeth_l3_register_addr_entry(card, addr); if (!rc) { addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING; - if (addr->ref_counter < 1) - qeth_l3_delete_ip(card, addr); } else { hash_del(&addr->hnode); kfree(addr); @@ -381,27 +326,27 @@ static int qeth_l3_send_setdelmc(struct qeth_card *card, if (!iob) return -ENOMEM; cmd = __ipa_cmd(iob); - ether_addr_copy(cmd->data.setdelipm.mac, addr->mac); - if (addr->proto == QETH_PROT_IPV6) - memcpy(cmd->data.setdelipm.ip6, &addr->u.a6.addr, - sizeof(struct in6_addr)); - else - memcpy(&cmd->data.setdelipm.ip4, &addr->u.a4.addr, 4); + if (addr->proto == QETH_PROT_IPV6) { + cmd->data.setdelipm.ip = addr->u.a6.addr; + ipv6_eth_mc_map(&addr->u.a6.addr, cmd->data.setdelipm.mac); + } else { + cmd->data.setdelipm.ip.s6_addr32[3] = addr->u.a4.addr; + ip_eth_mc_map(addr->u.a4.addr, cmd->data.setdelipm.mac); + } return qeth_send_ipa_cmd(card, iob, qeth_l3_setdelip_cb, NULL); } -static void qeth_l3_fill_netmask(u8 *netmask, unsigned int len) +static void qeth_l3_set_ipv6_prefix(struct in6_addr *prefix, unsigned int len) { - int i, j; - for (i = 0; i < 16; i++) { - j = (len) - (i * 8); - if (j >= 8) - netmask[i] = 0xff; - else if (j > 0) - netmask[i] = (u8)(0xFF00 >> j); - else - netmask[i] = 0; + unsigned int i = 0; + + while (len && i < 4) { + int mask_len = min_t(int, len, 32); + + prefix->s6_addr32[i] = inet_make_mask(mask_len); + len -= mask_len; + i++; } } @@ -424,7 +369,6 @@ static int qeth_l3_send_setdelip(struct qeth_card *card, { struct qeth_cmd_buffer *iob; struct qeth_ipa_cmd *cmd; - __u8 netmask[16]; u32 flags; QETH_CARD_TEXT(card, 4, "setdelip"); @@ -439,15 +383,13 @@ static int qeth_l3_send_setdelip(struct qeth_card *card, QETH_CARD_TEXT_(card, 4, "flags%02X", flags); if (addr->proto == QETH_PROT_IPV6) { - memcpy(cmd->data.setdelip6.ip_addr, &addr->u.a6.addr, - sizeof(struct in6_addr)); - qeth_l3_fill_netmask(netmask, addr->u.a6.pfxlen); - memcpy(cmd->data.setdelip6.mask, netmask, - sizeof(struct in6_addr)); + cmd->data.setdelip6.addr = addr->u.a6.addr; + qeth_l3_set_ipv6_prefix(&cmd->data.setdelip6.prefix, + addr->u.a6.pfxlen); cmd->data.setdelip6.flags = flags; } else { - memcpy(cmd->data.setdelip4.ip_addr, &addr->u.a4.addr, 4); - memcpy(cmd->data.setdelip4.mask, &addr->u.a4.mask, 4); + cmd->data.setdelip4.addr = addr->u.a4.addr; + cmd->data.setdelip4.mask = addr->u.a4.mask; cmd->data.setdelip4.flags = flags; } @@ -593,6 +535,7 @@ int qeth_l3_add_ipato_entry(struct qeth_card *card, QETH_CARD_TEXT(card, 2, "addipato"); + mutex_lock(&card->conf_mutex); mutex_lock(&card->ip_lock); list_for_each_entry(ipatoe, &card->ipato.entries, entry) { @@ -612,6 +555,7 @@ int qeth_l3_add_ipato_entry(struct qeth_card *card, } mutex_unlock(&card->ip_lock); + mutex_unlock(&card->conf_mutex); return rc; } @@ -625,6 +569,7 @@ int qeth_l3_del_ipato_entry(struct qeth_card *card, QETH_CARD_TEXT(card, 2, "delipato"); + mutex_lock(&card->conf_mutex); mutex_lock(&card->ip_lock); list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) { @@ -641,6 +586,8 @@ int qeth_l3_del_ipato_entry(struct qeth_card *card, } mutex_unlock(&card->ip_lock); + mutex_unlock(&card->conf_mutex); + return rc; } @@ -649,6 +596,7 @@ int qeth_l3_modify_rxip_vipa(struct qeth_card *card, bool add, const u8 *ip, enum qeth_prot_versions proto) { struct qeth_ipaddr addr; + int rc; qeth_l3_init_ipaddr(&addr, type, proto); if (proto == QETH_PROT_IPV4) @@ -656,7 +604,11 @@ int qeth_l3_modify_rxip_vipa(struct qeth_card *card, bool add, const u8 *ip, else memcpy(&addr.u.a6.addr, ip, 16); - return qeth_l3_modify_ip(card, &addr, add); + mutex_lock(&card->conf_mutex); + rc = qeth_l3_modify_ip(card, &addr, add); + mutex_unlock(&card->conf_mutex); + + return rc; } int qeth_l3_modify_hsuid(struct qeth_card *card, bool add) @@ -949,19 +901,16 @@ out: return rc; } -static int qeth_l3_start_ipassists(struct qeth_card *card) +static void qeth_l3_start_ipassists(struct qeth_card *card) { QETH_CARD_TEXT(card, 3, "strtipas"); - if (qeth_set_access_ctrl_online(card, 0)) - return -EIO; qeth_l3_start_ipa_arp_processing(card); /* go on*/ qeth_l3_start_ipa_source_mac(card); /* go on*/ qeth_l3_start_ipa_vlan(card); /* go on*/ qeth_l3_start_ipa_multicast(card); /* go on*/ qeth_l3_start_ipa_ipv6(card); /* go on*/ qeth_l3_start_ipa_broadcast(card); /* go on*/ - return 0; } static int qeth_l3_iqd_read_initial_mac_cb(struct qeth_card *card, @@ -1115,176 +1064,83 @@ qeth_diags_trace(struct qeth_card *card, enum qeth_diags_trace_cmds diags_cmd) return qeth_send_ipa_cmd(card, iob, qeth_diags_trace_cb, NULL); } -static void -qeth_l3_add_mc_to_hash(struct qeth_card *card, struct in_device *in4_dev) +static int qeth_l3_add_mcast_rtnl(struct net_device *dev, int vid, void *arg) { + struct qeth_card *card = arg; + struct inet6_dev *in6_dev; + struct in_device *in4_dev; + struct qeth_ipaddr *ipm; + struct qeth_ipaddr tmp; struct ip_mc_list *im4; - struct qeth_ipaddr *tmp, *ipm; + struct ifmcaddr6 *im6; QETH_CARD_TEXT(card, 4, "addmc"); - tmp = qeth_l3_get_addr_buffer(QETH_PROT_IPV4); - if (!tmp) - return; + if (!dev || !(dev->flags & IFF_UP)) + goto out; + + in4_dev = __in_dev_get_rtnl(dev); + if (!in4_dev) + goto walk_ipv6; + + qeth_l3_init_ipaddr(&tmp, QETH_IP_TYPE_NORMAL, QETH_PROT_IPV4); + tmp.disp_flag = QETH_DISP_ADDR_ADD; + tmp.is_multicast = 1; - for (im4 = rcu_dereference(in4_dev->mc_list); im4 != NULL; - im4 = rcu_dereference(im4->next_rcu)) { - ip_eth_mc_map(im4->multiaddr, tmp->mac); - tmp->u.a4.addr = be32_to_cpu(im4->multiaddr); - tmp->is_multicast = 1; + for (im4 = rtnl_dereference(in4_dev->mc_list); im4 != NULL; + im4 = rtnl_dereference(im4->next_rcu)) { + tmp.u.a4.addr = im4->multiaddr; - ipm = qeth_l3_find_addr_by_ip(card, tmp); + ipm = qeth_l3_find_addr_by_ip(card, &tmp); if (ipm) { /* for mcast, by-IP match means full match */ ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING; - } else { - ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4); - if (!ipm) - continue; - ether_addr_copy(ipm->mac, tmp->mac); - ipm->u.a4.addr = be32_to_cpu(im4->multiaddr); - ipm->is_multicast = 1; - ipm->disp_flag = QETH_DISP_ADDR_ADD; - hash_add(card->ip_mc_htable, - &ipm->hnode, qeth_l3_ipaddr_hash(ipm)); + continue; } - } - - kfree(tmp); -} - -/* called with rcu_read_lock */ -static void qeth_l3_add_vlan_mc(struct qeth_card *card) -{ - struct in_device *in_dev; - u16 vid; - QETH_CARD_TEXT(card, 4, "addmcvl"); - - if (!qeth_is_supported(card, IPA_FULL_VLAN)) - return; - - for_each_set_bit(vid, card->active_vlans, VLAN_N_VID) { - struct net_device *netdev; - - netdev = __vlan_find_dev_deep_rcu(card->dev, htons(ETH_P_8021Q), - vid); - if (netdev == NULL || - !(netdev->flags & IFF_UP)) - continue; - in_dev = __in_dev_get_rcu(netdev); - if (!in_dev) + ipm = kmemdup(&tmp, sizeof(tmp), GFP_KERNEL); + if (!ipm) continue; - qeth_l3_add_mc_to_hash(card, in_dev); - } -} - -static void qeth_l3_add_multicast_ipv4(struct qeth_card *card) -{ - struct in_device *in4_dev; - - QETH_CARD_TEXT(card, 4, "chkmcv4"); - rcu_read_lock(); - in4_dev = __in_dev_get_rcu(card->dev); - if (in4_dev == NULL) - goto unlock; - qeth_l3_add_mc_to_hash(card, in4_dev); - qeth_l3_add_vlan_mc(card); -unlock: - rcu_read_unlock(); -} + hash_add(card->ip_mc_htable, &ipm->hnode, + qeth_l3_ipaddr_hash(ipm)); + } -static void qeth_l3_add_mc6_to_hash(struct qeth_card *card, - struct inet6_dev *in6_dev) -{ - struct qeth_ipaddr *ipm; - struct ifmcaddr6 *im6; - struct qeth_ipaddr *tmp; +walk_ipv6: + if (!qeth_is_supported(card, IPA_IPV6)) + goto out; - QETH_CARD_TEXT(card, 4, "addmc6"); + in6_dev = __in6_dev_get(dev); + if (!in6_dev) + goto out; - tmp = qeth_l3_get_addr_buffer(QETH_PROT_IPV6); - if (!tmp) - return; + qeth_l3_init_ipaddr(&tmp, QETH_IP_TYPE_NORMAL, QETH_PROT_IPV6); + tmp.disp_flag = QETH_DISP_ADDR_ADD; + tmp.is_multicast = 1; + read_lock_bh(&in6_dev->lock); for (im6 = in6_dev->mc_list; im6 != NULL; im6 = im6->next) { - ipv6_eth_mc_map(&im6->mca_addr, tmp->mac); - memcpy(&tmp->u.a6.addr, &im6->mca_addr.s6_addr, - sizeof(struct in6_addr)); - tmp->is_multicast = 1; + tmp.u.a6.addr = im6->mca_addr; - ipm = qeth_l3_find_addr_by_ip(card, tmp); + ipm = qeth_l3_find_addr_by_ip(card, &tmp); if (ipm) { /* for mcast, by-IP match means full match */ ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING; continue; } - ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV6); + ipm = kmemdup(&tmp, sizeof(tmp), GFP_ATOMIC); if (!ipm) continue; - ether_addr_copy(ipm->mac, tmp->mac); - memcpy(&ipm->u.a6.addr, &im6->mca_addr.s6_addr, - sizeof(struct in6_addr)); - ipm->is_multicast = 1; - ipm->disp_flag = QETH_DISP_ADDR_ADD; hash_add(card->ip_mc_htable, &ipm->hnode, qeth_l3_ipaddr_hash(ipm)); } - kfree(tmp); -} - -/* called with rcu_read_lock */ -static void qeth_l3_add_vlan_mc6(struct qeth_card *card) -{ - struct inet6_dev *in_dev; - u16 vid; - - QETH_CARD_TEXT(card, 4, "admc6vl"); - - if (!qeth_is_supported(card, IPA_FULL_VLAN)) - return; - - for_each_set_bit(vid, card->active_vlans, VLAN_N_VID) { - struct net_device *netdev; - - netdev = __vlan_find_dev_deep_rcu(card->dev, htons(ETH_P_8021Q), - vid); - if (netdev == NULL || - !(netdev->flags & IFF_UP)) - continue; - in_dev = in6_dev_get(netdev); - if (!in_dev) - continue; - read_lock_bh(&in_dev->lock); - qeth_l3_add_mc6_to_hash(card, in_dev); - read_unlock_bh(&in_dev->lock); - in6_dev_put(in_dev); - } -} - -static void qeth_l3_add_multicast_ipv6(struct qeth_card *card) -{ - struct inet6_dev *in6_dev; - - QETH_CARD_TEXT(card, 4, "chkmcv6"); - - if (!qeth_is_supported(card, IPA_IPV6)) - return ; - in6_dev = in6_dev_get(card->dev); - if (!in6_dev) - return; - - rcu_read_lock(); - read_lock_bh(&in6_dev->lock); - qeth_l3_add_mc6_to_hash(card, in6_dev); - qeth_l3_add_vlan_mc6(card); read_unlock_bh(&in6_dev->lock); - rcu_read_unlock(); - in6_dev_put(in6_dev); + +out: + return 0; } static int qeth_l3_vlan_rx_add_vid(struct net_device *dev, @@ -1292,7 +1148,7 @@ static int qeth_l3_vlan_rx_add_vid(struct net_device *dev, { struct qeth_card *card = dev->ml_priv; - set_bit(vid, card->active_vlans); + QETH_CARD_TEXT_(card, 4, "aid:%d", vid); return 0; } @@ -1302,111 +1158,9 @@ static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev, struct qeth_card *card = dev->ml_priv; QETH_CARD_TEXT_(card, 4, "kid:%d", vid); - - clear_bit(vid, card->active_vlans); - qeth_l3_set_rx_mode(dev); return 0; } -static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb, - struct qeth_hdr *hdr) -{ - struct af_iucv_trans_hdr *iucv = (struct af_iucv_trans_hdr *) skb->data; - struct net_device *dev = skb->dev; - - if (IS_IQD(card) && iucv->magic == ETH_P_AF_IUCV) { - dev_hard_header(skb, dev, ETH_P_AF_IUCV, dev->dev_addr, - "FAKELL", skb->len); - return; - } - - if (!(hdr->hdr.l3.flags & QETH_HDR_PASSTHRU)) { - u16 prot = (hdr->hdr.l3.flags & QETH_HDR_IPV6) ? ETH_P_IPV6 : - ETH_P_IP; - unsigned char tg_addr[ETH_ALEN]; - - skb_reset_network_header(skb); - switch (hdr->hdr.l3.flags & QETH_HDR_CAST_MASK) { - case QETH_CAST_MULTICAST: - if (prot == ETH_P_IP) - ip_eth_mc_map(ip_hdr(skb)->daddr, tg_addr); - else - ipv6_eth_mc_map(&ipv6_hdr(skb)->daddr, tg_addr); - QETH_CARD_STAT_INC(card, rx_multicast); - break; - case QETH_CAST_BROADCAST: - ether_addr_copy(tg_addr, card->dev->broadcast); - QETH_CARD_STAT_INC(card, rx_multicast); - break; - default: - if (card->options.sniffer) - skb->pkt_type = PACKET_OTHERHOST; - ether_addr_copy(tg_addr, card->dev->dev_addr); - } - - if (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR) - card->dev->header_ops->create(skb, card->dev, prot, - tg_addr, &hdr->hdr.l3.next_hop.rx.src_mac, - skb->len); - else - card->dev->header_ops->create(skb, card->dev, prot, - tg_addr, "FAKELL", skb->len); - } - - /* copy VLAN tag from hdr into skb */ - if (!card->options.sniffer && - (hdr->hdr.l3.ext_flags & (QETH_HDR_EXT_VLAN_FRAME | - QETH_HDR_EXT_INCLUDE_VLAN_TAG))) { - u16 tag = (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_VLAN_FRAME) ? - hdr->hdr.l3.vlan_id : - hdr->hdr.l3.next_hop.rx.vlan_id; - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag); - } - - qeth_rx_csum(card, skb, hdr->hdr.l3.ext_flags); -} - -static int qeth_l3_process_inbound_buffer(struct qeth_card *card, - int budget, int *done) -{ - int work_done = 0; - struct sk_buff *skb; - struct qeth_hdr *hdr; - unsigned int len; - - *done = 0; - WARN_ON_ONCE(!budget); - while (budget) { - skb = qeth_core_get_next_skb(card, - &card->qdio.in_q->bufs[card->rx.b_index], - &card->rx.b_element, &card->rx.e_offset, &hdr); - if (!skb) { - *done = 1; - break; - } - switch (hdr->hdr.l3.id) { - case QETH_HEADER_TYPE_LAYER3: - qeth_l3_rebuild_skb(card, skb, hdr); - /* fall through */ - case QETH_HEADER_TYPE_LAYER2: /* for HiperSockets sniffer */ - skb->protocol = eth_type_trans(skb, skb->dev); - len = skb->len; - napi_gro_receive(&card->napi, skb); - break; - default: - dev_kfree_skb_any(skb); - QETH_CARD_TEXT(card, 3, "inbunkno"); - QETH_DBF_HEX(CTRL, 3, hdr, sizeof(*hdr)); - continue; - } - work_done++; - budget--; - QETH_CARD_STAT_INC(card, rx_packets); - QETH_CARD_STAT_ADD(card, rx_bytes, len); - } - return work_done; -} - static void qeth_l3_stop_card(struct qeth_card *card) { QETH_CARD_TEXT(card, 2, "stopcard"); @@ -1423,39 +1177,29 @@ static void qeth_l3_stop_card(struct qeth_card *card) if (card->state == CARD_STATE_SOFTSETUP) { qeth_l3_clear_ip_htable(card, 1); qeth_clear_ipacmd_list(card); - card->state = CARD_STATE_HARDSETUP; - } - if (card->state == CARD_STATE_HARDSETUP) { - qeth_qdio_clear_card(card, 0); qeth_drain_output_queues(card); - qeth_clear_working_pool_list(card); card->state = CARD_STATE_DOWN; } + qeth_qdio_clear_card(card, 0); + qeth_clear_working_pool_list(card); flush_workqueue(card->event_wq); + card->info.promisc_mode = 0; } -/* - * test for and Switch promiscuous mode (on or off) - * either for guestlan or HiperSocket Sniffer - */ -static void -qeth_l3_handle_promisc_mode(struct qeth_card *card) +static void qeth_l3_set_promisc_mode(struct qeth_card *card) { - struct net_device *dev = card->dev; + bool enable = card->dev->flags & IFF_PROMISC; - if (((dev->flags & IFF_PROMISC) && - (card->info.promisc_mode == SET_PROMISC_MODE_ON)) || - (!(dev->flags & IFF_PROMISC) && - (card->info.promisc_mode == SET_PROMISC_MODE_OFF))) + if (card->info.promisc_mode == enable) return; if (IS_VM_NIC(card)) { /* Guestlan trace */ if (qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE)) - qeth_setadp_promisc_mode(card); + qeth_setadp_promisc_mode(card, enable); } else if (card->options.sniffer && /* HiperSockets trace */ qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) { - if (dev->flags & IFF_PROMISC) { + if (enable) { QETH_CARD_TEXT(card, 3, "+promisc"); qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_ENABLE); } else { @@ -1476,8 +1220,11 @@ static void qeth_l3_rx_mode_work(struct work_struct *work) QETH_CARD_TEXT(card, 3, "setmulti"); if (!card->options.sniffer) { - qeth_l3_add_multicast_ipv4(card); - qeth_l3_add_multicast_ipv6(card); + rtnl_lock(); + qeth_l3_add_mcast_rtnl(card->dev, 0, card); + if (qeth_is_supported(card, IPA_FULL_VLAN)) + vlan_for_each(card->dev, qeth_l3_add_mcast_rtnl, card); + rtnl_unlock(); hash_for_each_safe(card->ip_mc_htable, i, tmp, addr, hnode) { switch (addr->disp_flag) { @@ -1502,11 +1249,9 @@ static void qeth_l3_rx_mode_work(struct work_struct *work) addr->disp_flag = QETH_DISP_ADDR_DELETE; } } - - if (!qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE)) - return; } - qeth_l3_handle_promisc_mode(card); + + qeth_l3_set_promisc_mode(card); } static int qeth_l3_arp_makerc(u16 rc) @@ -1967,7 +1712,6 @@ static void qeth_l3_fill_header(struct qeth_qdio_out_q *queue, /* some HW requires combined L3+L4 csum offload: */ if (ipv == 4) hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_CSUM_HDR_REQ; - QETH_TXQ_STAT_INC(queue, skbs_csum); } } @@ -2054,9 +1798,10 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, u16 txq = skb_get_queue_mapping(skb); int ipv = qeth_get_ip_version(skb); struct qeth_qdio_out_q *queue; - int tx_bytes = skb->len; int rc; + if (!skb_is_gso(skb)) + qdisc_skb_cb(skb)->pkt_len = skb->len; if (IS_IQD(card)) { queue = card->qdio.out_qs[qeth_iqd_translate_txq(dev, txq)]; @@ -2079,11 +1824,8 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, else rc = qeth_xmit(card, skb, queue, ipv, qeth_l3_fill_header); - if (!rc) { - QETH_TXQ_STAT_INC(queue, tx_packets); - QETH_TXQ_STAT_ADD(queue, tx_bytes, tx_bytes); + if (!rc) return NETDEV_TX_OK; - } tx_drop: QETH_TXQ_STAT_INC(queue, tx_dropped); @@ -2296,7 +2038,7 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev) wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0); if (cgdev->state == CCWGROUP_ONLINE) - qeth_l3_set_offline(cgdev); + qeth_set_offline(card, false); cancel_work_sync(&card->close_dev_work); if (qeth_netdev_is_registered(card->dev)) @@ -2308,17 +2050,13 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev) qeth_l3_clear_ipato_list(card); } -static int qeth_l3_set_online(struct ccwgroup_device *gdev) +static int qeth_l3_set_online(struct qeth_card *card) { - struct qeth_card *card = dev_get_drvdata(&gdev->dev); + struct ccwgroup_device *gdev = card->gdev; struct net_device *dev = card->dev; int rc = 0; bool carrier_ok; - mutex_lock(&card->discipline_mutex); - mutex_lock(&card->conf_mutex); - QETH_CARD_TEXT(card, 2, "setonlin"); - rc = qeth_core_hardsetup_card(card, &carrier_ok); if (rc) { QETH_CARD_TEXT_(card, 2, "2err%04x", rc); @@ -2326,14 +2064,6 @@ static int qeth_l3_set_online(struct ccwgroup_device *gdev) goto out_remove; } - if (qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP)) { - if (card->info.hwtrap && - qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM)) - card->info.hwtrap = 0; - } else - card->info.hwtrap = 0; - - card->state = CARD_STATE_HARDSETUP; qeth_print_status_message(card); /* softsetup */ @@ -2343,11 +2073,8 @@ static int qeth_l3_set_online(struct ccwgroup_device *gdev) if (rc) QETH_CARD_TEXT_(card, 2, "2err%04x", rc); if (!card->options.sniffer) { - rc = qeth_l3_start_ipassists(card); - if (rc) { - QETH_CARD_TEXT_(card, 2, "3err%d", rc); - goto out_remove; - } + qeth_l3_start_ipassists(card); + rc = qeth_l3_setrouting_v4(card); if (rc) QETH_CARD_TEXT_(card, 2, "4err%04x", rc); @@ -2356,12 +2083,6 @@ static int qeth_l3_set_online(struct ccwgroup_device *gdev) QETH_CARD_TEXT_(card, 2, "5err%04x", rc); } - rc = qeth_init_qdio_queues(card); - if (rc) { - QETH_CARD_TEXT_(card, 2, "6err%d", rc); - rc = -ENODEV; - goto out_remove; - } card->state = CARD_STATE_SOFTSETUP; qeth_set_allowed_threads(card, 0xffffffff, 0); @@ -2390,96 +2111,19 @@ static int qeth_l3_set_online(struct ccwgroup_device *gdev) qeth_trace_features(card); /* let user_space know that device is online */ kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE); - mutex_unlock(&card->conf_mutex); - mutex_unlock(&card->discipline_mutex); return 0; out_remove: qeth_l3_stop_card(card); - ccw_device_set_offline(CARD_DDEV(card)); - ccw_device_set_offline(CARD_WDEV(card)); - ccw_device_set_offline(CARD_RDEV(card)); + qeth_stop_channel(&card->data); + qeth_stop_channel(&card->write); + qeth_stop_channel(&card->read); qdio_free(CARD_DDEV(card)); - - mutex_unlock(&card->conf_mutex); - mutex_unlock(&card->discipline_mutex); return rc; } -static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev, - int recovery_mode) +static void qeth_l3_set_offline(struct qeth_card *card) { - struct qeth_card *card = dev_get_drvdata(&cgdev->dev); - int rc = 0, rc2 = 0, rc3 = 0; - - mutex_lock(&card->discipline_mutex); - mutex_lock(&card->conf_mutex); - QETH_CARD_TEXT(card, 3, "setoffl"); - - if ((!recovery_mode && card->info.hwtrap) || card->info.hwtrap == 2) { - qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); - card->info.hwtrap = 1; - } - - rtnl_lock(); - card->info.open_when_online = card->dev->flags & IFF_UP; - dev_close(card->dev); - netif_device_detach(card->dev); - netif_carrier_off(card->dev); - rtnl_unlock(); - qeth_l3_stop_card(card); - if (card->options.cq == QETH_CQ_ENABLED) { - rtnl_lock(); - call_netdevice_notifiers(NETDEV_REBOOT, card->dev); - rtnl_unlock(); - } - rc = ccw_device_set_offline(CARD_DDEV(card)); - rc2 = ccw_device_set_offline(CARD_WDEV(card)); - rc3 = ccw_device_set_offline(CARD_RDEV(card)); - if (!rc) - rc = (rc2) ? rc2 : rc3; - if (rc) - QETH_CARD_TEXT_(card, 2, "1err%d", rc); - qdio_free(CARD_DDEV(card)); - - /* let user_space know that device is offline */ - kobject_uevent(&cgdev->dev.kobj, KOBJ_CHANGE); - mutex_unlock(&card->conf_mutex); - mutex_unlock(&card->discipline_mutex); - return 0; -} - -static int qeth_l3_set_offline(struct ccwgroup_device *cgdev) -{ - return __qeth_l3_set_offline(cgdev, 0); -} - -static int qeth_l3_recover(void *ptr) -{ - struct qeth_card *card; - int rc = 0; - - card = (struct qeth_card *) ptr; - QETH_CARD_TEXT(card, 2, "recover1"); - QETH_CARD_HEX(card, 2, &card, sizeof(void *)); - if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD)) - return 0; - QETH_CARD_TEXT(card, 2, "recover2"); - dev_warn(&card->gdev->dev, - "A recovery process has been started for the device\n"); - __qeth_l3_set_offline(card->gdev, 1); - rc = qeth_l3_set_online(card->gdev); - if (!rc) - dev_info(&card->gdev->dev, - "Device successfully recovered!\n"); - else { - ccwgroup_set_offline(card->gdev); - dev_warn(&card->gdev->dev, "The qeth device driver " - "failed to recover an error on the device\n"); - } - qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); - qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); - return 0; } /* Returns zero if the command is successfully "consumed" */ @@ -2491,8 +2135,6 @@ static int qeth_l3_control_event(struct qeth_card *card, struct qeth_discipline qeth_l3_discipline = { .devtype = &qeth_l3_devtype, - .process_rx_buffer = qeth_l3_process_inbound_buffer, - .recover = qeth_l3_recover, .setup = qeth_l3_probe_device, .remove = qeth_l3_remove_device, .set_online = qeth_l3_set_online, @@ -2570,8 +2212,8 @@ static int qeth_l3_ip_event(struct notifier_block *this, QETH_CARD_TEXT(card, 3, "ipevent"); qeth_l3_init_ipaddr(&addr, QETH_IP_TYPE_NORMAL, QETH_PROT_IPV4); - addr.u.a4.addr = be32_to_cpu(ifa->ifa_address); - addr.u.a4.mask = be32_to_cpu(ifa->ifa_mask); + addr.u.a4.addr = ifa->ifa_address; + addr.u.a4.mask = ifa->ifa_mask; return qeth_l3_handle_ip_event(card, &addr, event); } diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c index 2f73b33c9347..29f2517d2a31 100644 --- a/drivers/s390/net/qeth_l3_sys.c +++ b/drivers/s390/net/qeth_l3_sys.c @@ -60,9 +60,6 @@ static ssize_t qeth_l3_dev_route4_show(struct device *dev, { struct qeth_card *card = dev_get_drvdata(dev); - if (!card) - return -EINVAL; - return qeth_l3_dev_route_show(card, &card->options.route4, buf); } @@ -109,9 +106,6 @@ static ssize_t qeth_l3_dev_route4_store(struct device *dev, { struct qeth_card *card = dev_get_drvdata(dev); - if (!card) - return -EINVAL; - return qeth_l3_dev_route_store(card, &card->options.route4, QETH_PROT_IPV4, buf, count); } @@ -124,9 +118,6 @@ static ssize_t qeth_l3_dev_route6_show(struct device *dev, { struct qeth_card *card = dev_get_drvdata(dev); - if (!card) - return -EINVAL; - return qeth_l3_dev_route_show(card, &card->options.route6, buf); } @@ -135,9 +126,6 @@ static ssize_t qeth_l3_dev_route6_store(struct device *dev, { struct qeth_card *card = dev_get_drvdata(dev); - if (!card) - return -EINVAL; - return qeth_l3_dev_route_store(card, &card->options.route6, QETH_PROT_IPV6, buf, count); } @@ -150,9 +138,6 @@ static ssize_t qeth_l3_dev_fake_broadcast_show(struct device *dev, { struct qeth_card *card = dev_get_drvdata(dev); - if (!card) - return -EINVAL; - return sprintf(buf, "%i\n", card->options.fake_broadcast? 1:0); } @@ -163,9 +148,6 @@ static ssize_t qeth_l3_dev_fake_broadcast_store(struct device *dev, char *tmp; int i, rc = 0; - if (!card) - return -EINVAL; - mutex_lock(&card->conf_mutex); if (card->state != CARD_STATE_DOWN) { rc = -EPERM; @@ -190,9 +172,6 @@ static ssize_t qeth_l3_dev_sniffer_show(struct device *dev, { struct qeth_card *card = dev_get_drvdata(dev); - if (!card) - return -EINVAL; - return sprintf(buf, "%i\n", card->options.sniffer ? 1 : 0); } @@ -203,9 +182,6 @@ static ssize_t qeth_l3_dev_sniffer_store(struct device *dev, int rc = 0; unsigned long i; - if (!card) - return -EINVAL; - if (!IS_IQD(card)) return -EPERM; if (card->options.cq == QETH_CQ_ENABLED) @@ -228,7 +204,7 @@ static ssize_t qeth_l3_dev_sniffer_store(struct device *dev, break; case 1: qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd); - if (card->ssqd.qdioac2 & QETH_SNIFF_AVAIL) { + if (card->ssqd.qdioac2 & CHSC_AC2_SNIFFER_AVAILABLE) { card->options.sniffer = i; if (card->qdio.init_pool.buf_count != QETH_IN_BUF_COUNT_MAX) @@ -248,16 +224,12 @@ out: static DEVICE_ATTR(sniffer, 0644, qeth_l3_dev_sniffer_show, qeth_l3_dev_sniffer_store); - static ssize_t qeth_l3_dev_hsuid_show(struct device *dev, struct device_attribute *attr, char *buf) { struct qeth_card *card = dev_get_drvdata(dev); char tmp_hsuid[9]; - if (!card) - return -EINVAL; - if (!IS_IQD(card)) return -EPERM; @@ -270,24 +242,33 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct qeth_card *card = dev_get_drvdata(dev); + int rc = 0; char *tmp; - int rc; - - if (!card) - return -EINVAL; if (!IS_IQD(card)) return -EPERM; - if (card->state != CARD_STATE_DOWN) - return -EPERM; - if (card->options.sniffer) - return -EPERM; - if (card->options.cq == QETH_CQ_NOTAVAILABLE) - return -EPERM; + + mutex_lock(&card->conf_mutex); + if (card->state != CARD_STATE_DOWN) { + rc = -EPERM; + goto out; + } + + if (card->options.sniffer) { + rc = -EPERM; + goto out; + } + + if (card->options.cq == QETH_CQ_NOTAVAILABLE) { + rc = -EPERM; + goto out; + } tmp = strsep((char **)&buf, "\n"); - if (strlen(tmp) > 8) - return -EINVAL; + if (strlen(tmp) > 8) { + rc = -EINVAL; + goto out; + } if (card->options.hsuid[0]) /* delete old ip address */ @@ -298,11 +279,13 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev, card->options.hsuid[0] = '\0'; memcpy(card->dev->perm_addr, card->options.hsuid, 9); qeth_configure_cq(card, QETH_CQ_DISABLED); - return count; + goto out; } - if (qeth_configure_cq(card, QETH_CQ_ENABLED)) - return -EPERM; + if (qeth_configure_cq(card, QETH_CQ_ENABLED)) { + rc = -EPERM; + goto out; + } snprintf(card->options.hsuid, sizeof(card->options.hsuid), "%-8s", tmp); @@ -311,6 +294,8 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev, rc = qeth_l3_modify_hsuid(card, true); +out: + mutex_unlock(&card->conf_mutex); return rc ? rc : count; } @@ -336,9 +321,6 @@ static ssize_t qeth_l3_dev_ipato_enable_show(struct device *dev, { struct qeth_card *card = dev_get_drvdata(dev); - if (!card) - return -EINVAL; - return sprintf(buf, "%i\n", card->ipato.enabled? 1:0); } @@ -349,9 +331,6 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev, bool enable; int rc = 0; - if (!card) - return -EINVAL; - mutex_lock(&card->conf_mutex); if (card->state != CARD_STATE_DOWN) { rc = -EPERM; @@ -385,9 +364,6 @@ static ssize_t qeth_l3_dev_ipato_invert4_show(struct device *dev, { struct qeth_card *card = dev_get_drvdata(dev); - if (!card) - return -EINVAL; - return sprintf(buf, "%i\n", card->ipato.invert4? 1:0); } @@ -399,9 +375,6 @@ static ssize_t qeth_l3_dev_ipato_invert4_store(struct device *dev, bool invert; int rc = 0; - if (!card) - return -EINVAL; - mutex_lock(&card->conf_mutex); if (sysfs_streq(buf, "toggle")) { invert = !card->ipato.invert4; @@ -429,30 +402,35 @@ static ssize_t qeth_l3_dev_ipato_add_show(char *buf, struct qeth_card *card, enum qeth_prot_versions proto) { struct qeth_ipato_entry *ipatoe; - char addr_str[40]; - int entry_len; /* length of 1 entry string, differs between v4 and v6 */ - int i = 0; + int str_len = 0; - entry_len = (proto == QETH_PROT_IPV4)? 12 : 40; - /* add strlen for "/<mask>\n" */ - entry_len += (proto == QETH_PROT_IPV4)? 5 : 6; mutex_lock(&card->ip_lock); list_for_each_entry(ipatoe, &card->ipato.entries, entry) { + char addr_str[40]; + int entry_len; + if (ipatoe->proto != proto) continue; - /* String must not be longer than PAGE_SIZE. So we check if - * string length gets near PAGE_SIZE. Then we can savely display - * the next IPv6 address (worst case, compared to IPv4) */ - if ((PAGE_SIZE - i) <= entry_len) + + entry_len = qeth_l3_ipaddr_to_string(proto, ipatoe->addr, + addr_str); + if (entry_len < 0) + continue; + + /* Append /%mask to the entry: */ + entry_len += 1 + ((proto == QETH_PROT_IPV4) ? 2 : 3); + /* Enough room to format %entry\n into null terminated page? */ + if (entry_len + 1 > PAGE_SIZE - str_len - 1) break; - qeth_l3_ipaddr_to_string(proto, ipatoe->addr, addr_str); - i += snprintf(buf + i, PAGE_SIZE - i, - "%s/%i\n", addr_str, ipatoe->mask_bits); + + entry_len = scnprintf(buf, PAGE_SIZE - str_len, + "%s/%i\n", addr_str, ipatoe->mask_bits); + str_len += entry_len; + buf += entry_len; } mutex_unlock(&card->ip_lock); - i += snprintf(buf + i, PAGE_SIZE - i, "\n"); - return i; + return str_len ? str_len : scnprintf(buf, PAGE_SIZE, "\n"); } static ssize_t qeth_l3_dev_ipato_add4_show(struct device *dev, @@ -460,9 +438,6 @@ static ssize_t qeth_l3_dev_ipato_add4_show(struct device *dev, { struct qeth_card *card = dev_get_drvdata(dev); - if (!card) - return -EINVAL; - return qeth_l3_dev_ipato_add_show(buf, card, QETH_PROT_IPV4); } @@ -501,16 +476,14 @@ static ssize_t qeth_l3_dev_ipato_add_store(const char *buf, size_t count, int mask_bits; int rc = 0; - mutex_lock(&card->conf_mutex); rc = qeth_l3_parse_ipatoe(buf, proto, addr, &mask_bits); if (rc) - goto out; + return rc; ipatoe = kzalloc(sizeof(struct qeth_ipato_entry), GFP_KERNEL); - if (!ipatoe) { - rc = -ENOMEM; - goto out; - } + if (!ipatoe) + return -ENOMEM; + ipatoe->proto = proto; memcpy(ipatoe->addr, addr, (proto == QETH_PROT_IPV4)? 4:16); ipatoe->mask_bits = mask_bits; @@ -518,8 +491,7 @@ static ssize_t qeth_l3_dev_ipato_add_store(const char *buf, size_t count, rc = qeth_l3_add_ipato_entry(card, ipatoe); if (rc) kfree(ipatoe); -out: - mutex_unlock(&card->conf_mutex); + return rc ? rc : count; } @@ -528,9 +500,6 @@ static ssize_t qeth_l3_dev_ipato_add4_store(struct device *dev, { struct qeth_card *card = dev_get_drvdata(dev); - if (!card) - return -EINVAL; - return qeth_l3_dev_ipato_add_store(buf, count, card, QETH_PROT_IPV4); } @@ -545,11 +514,9 @@ static ssize_t qeth_l3_dev_ipato_del_store(const char *buf, size_t count, int mask_bits; int rc = 0; - mutex_lock(&card->conf_mutex); rc = qeth_l3_parse_ipatoe(buf, proto, addr, &mask_bits); if (!rc) rc = qeth_l3_del_ipato_entry(card, proto, addr, mask_bits); - mutex_unlock(&card->conf_mutex); return rc ? rc : count; } @@ -558,9 +525,6 @@ static ssize_t qeth_l3_dev_ipato_del4_store(struct device *dev, { struct qeth_card *card = dev_get_drvdata(dev); - if (!card) - return -EINVAL; - return qeth_l3_dev_ipato_del_store(buf, count, card, QETH_PROT_IPV4); } @@ -572,9 +536,6 @@ static ssize_t qeth_l3_dev_ipato_invert6_show(struct device *dev, { struct qeth_card *card = dev_get_drvdata(dev); - if (!card) - return -EINVAL; - return sprintf(buf, "%i\n", card->ipato.invert6? 1:0); } @@ -585,9 +546,6 @@ static ssize_t qeth_l3_dev_ipato_invert6_store(struct device *dev, bool invert; int rc = 0; - if (!card) - return -EINVAL; - mutex_lock(&card->conf_mutex); if (sysfs_streq(buf, "toggle")) { invert = !card->ipato.invert6; @@ -617,9 +575,6 @@ static ssize_t qeth_l3_dev_ipato_add6_show(struct device *dev, { struct qeth_card *card = dev_get_drvdata(dev); - if (!card) - return -EINVAL; - return qeth_l3_dev_ipato_add_show(buf, card, QETH_PROT_IPV6); } @@ -628,9 +583,6 @@ static ssize_t qeth_l3_dev_ipato_add6_store(struct device *dev, { struct qeth_card *card = dev_get_drvdata(dev); - if (!card) - return -EINVAL; - return qeth_l3_dev_ipato_add_store(buf, count, card, QETH_PROT_IPV6); } @@ -643,9 +595,6 @@ static ssize_t qeth_l3_dev_ipato_del6_store(struct device *dev, { struct qeth_card *card = dev_get_drvdata(dev); - if (!card) - return -EINVAL; - return qeth_l3_dev_ipato_del_store(buf, count, card, QETH_PROT_IPV6); } @@ -674,34 +623,34 @@ static ssize_t qeth_l3_dev_ip_add_show(struct device *dev, char *buf, { struct qeth_card *card = dev_get_drvdata(dev); struct qeth_ipaddr *ipaddr; - char addr_str[40]; int str_len = 0; - int entry_len; /* length of 1 entry string, differs between v4 and v6 */ int i; - if (!card) - return -EINVAL; - - entry_len = (proto == QETH_PROT_IPV4)? 12 : 40; - entry_len += 2; /* \n + terminator */ mutex_lock(&card->ip_lock); hash_for_each(card->ip_htable, i, ipaddr, hnode) { + char addr_str[40]; + int entry_len; + if (ipaddr->proto != proto || ipaddr->type != type) continue; - /* String must not be longer than PAGE_SIZE. So we check if - * string length gets near PAGE_SIZE. Then we can savely display - * the next IPv6 address (worst case, compared to IPv4) */ - if ((PAGE_SIZE - str_len) <= entry_len) + + entry_len = qeth_l3_ipaddr_to_string(proto, (u8 *)&ipaddr->u, + addr_str); + if (entry_len < 0) + continue; + + /* Enough room to format %addr\n into null terminated page? */ + if (entry_len + 1 > PAGE_SIZE - str_len - 1) break; - qeth_l3_ipaddr_to_string(proto, (const u8 *)&ipaddr->u, - addr_str); - str_len += snprintf(buf + str_len, PAGE_SIZE - str_len, "%s\n", - addr_str); + + entry_len = scnprintf(buf, PAGE_SIZE - str_len, "%s\n", + addr_str); + str_len += entry_len; + buf += entry_len; } mutex_unlock(&card->ip_lock); - str_len += snprintf(buf + str_len, PAGE_SIZE - str_len, "\n"); - return str_len; + return str_len ? str_len : scnprintf(buf, PAGE_SIZE, "\n"); } static ssize_t qeth_l3_dev_vipa_add4_show(struct device *dev, @@ -712,69 +661,34 @@ static ssize_t qeth_l3_dev_vipa_add4_show(struct device *dev, QETH_IP_TYPE_VIPA); } -static int qeth_l3_parse_vipae(const char *buf, enum qeth_prot_versions proto, - u8 *addr) -{ - if (qeth_l3_string_to_ipaddr(buf, proto, addr)) { - return -EINVAL; - } - return 0; -} - -static ssize_t qeth_l3_dev_vipa_add_store(const char *buf, size_t count, - struct qeth_card *card, enum qeth_prot_versions proto) +static ssize_t qeth_l3_vipa_store(struct device *dev, const char *buf, bool add, + size_t count, enum qeth_prot_versions proto) { + struct qeth_card *card = dev_get_drvdata(dev); u8 addr[16] = {0, }; int rc; - mutex_lock(&card->conf_mutex); - rc = qeth_l3_parse_vipae(buf, proto, addr); + rc = qeth_l3_string_to_ipaddr(buf, proto, addr); if (!rc) - rc = qeth_l3_modify_rxip_vipa(card, true, addr, + rc = qeth_l3_modify_rxip_vipa(card, add, addr, QETH_IP_TYPE_VIPA, proto); - mutex_unlock(&card->conf_mutex); return rc ? rc : count; } static ssize_t qeth_l3_dev_vipa_add4_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - struct qeth_card *card = dev_get_drvdata(dev); - - if (!card) - return -EINVAL; - - return qeth_l3_dev_vipa_add_store(buf, count, card, QETH_PROT_IPV4); + return qeth_l3_vipa_store(dev, buf, true, count, QETH_PROT_IPV4); } static QETH_DEVICE_ATTR(vipa_add4, add4, 0644, qeth_l3_dev_vipa_add4_show, qeth_l3_dev_vipa_add4_store); -static ssize_t qeth_l3_dev_vipa_del_store(const char *buf, size_t count, - struct qeth_card *card, enum qeth_prot_versions proto) -{ - u8 addr[16]; - int rc; - - mutex_lock(&card->conf_mutex); - rc = qeth_l3_parse_vipae(buf, proto, addr); - if (!rc) - rc = qeth_l3_modify_rxip_vipa(card, false, addr, - QETH_IP_TYPE_VIPA, proto); - mutex_unlock(&card->conf_mutex); - return rc ? rc : count; -} - static ssize_t qeth_l3_dev_vipa_del4_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - struct qeth_card *card = dev_get_drvdata(dev); - - if (!card) - return -EINVAL; - - return qeth_l3_dev_vipa_del_store(buf, count, card, QETH_PROT_IPV4); + return qeth_l3_vipa_store(dev, buf, true, count, QETH_PROT_IPV4); } static QETH_DEVICE_ATTR(vipa_del4, del4, 0200, NULL, @@ -791,12 +705,7 @@ static ssize_t qeth_l3_dev_vipa_add6_show(struct device *dev, static ssize_t qeth_l3_dev_vipa_add6_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - struct qeth_card *card = dev_get_drvdata(dev); - - if (!card) - return -EINVAL; - - return qeth_l3_dev_vipa_add_store(buf, count, card, QETH_PROT_IPV6); + return qeth_l3_vipa_store(dev, buf, true, count, QETH_PROT_IPV6); } static QETH_DEVICE_ATTR(vipa_add6, add6, 0644, @@ -806,12 +715,7 @@ static QETH_DEVICE_ATTR(vipa_add6, add6, 0644, static ssize_t qeth_l3_dev_vipa_del6_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - struct qeth_card *card = dev_get_drvdata(dev); - - if (!card) - return -EINVAL; - - return qeth_l3_dev_vipa_del_store(buf, count, card, QETH_PROT_IPV6); + return qeth_l3_vipa_store(dev, buf, false, count, QETH_PROT_IPV6); } static QETH_DEVICE_ATTR(vipa_del6, del6, 0200, NULL, @@ -864,60 +768,34 @@ static int qeth_l3_parse_rxipe(const char *buf, enum qeth_prot_versions proto, return 0; } -static ssize_t qeth_l3_dev_rxip_add_store(const char *buf, size_t count, - struct qeth_card *card, enum qeth_prot_versions proto) +static ssize_t qeth_l3_rxip_store(struct device *dev, const char *buf, bool add, + size_t count, enum qeth_prot_versions proto) { + struct qeth_card *card = dev_get_drvdata(dev); u8 addr[16] = {0, }; int rc; - mutex_lock(&card->conf_mutex); rc = qeth_l3_parse_rxipe(buf, proto, addr); if (!rc) - rc = qeth_l3_modify_rxip_vipa(card, true, addr, + rc = qeth_l3_modify_rxip_vipa(card, add, addr, QETH_IP_TYPE_RXIP, proto); - mutex_unlock(&card->conf_mutex); return rc ? rc : count; } static ssize_t qeth_l3_dev_rxip_add4_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - struct qeth_card *card = dev_get_drvdata(dev); - - if (!card) - return -EINVAL; - - return qeth_l3_dev_rxip_add_store(buf, count, card, QETH_PROT_IPV4); + return qeth_l3_rxip_store(dev, buf, true, count, QETH_PROT_IPV4); } static QETH_DEVICE_ATTR(rxip_add4, add4, 0644, qeth_l3_dev_rxip_add4_show, qeth_l3_dev_rxip_add4_store); -static ssize_t qeth_l3_dev_rxip_del_store(const char *buf, size_t count, - struct qeth_card *card, enum qeth_prot_versions proto) -{ - u8 addr[16]; - int rc; - - mutex_lock(&card->conf_mutex); - rc = qeth_l3_parse_rxipe(buf, proto, addr); - if (!rc) - rc = qeth_l3_modify_rxip_vipa(card, false, addr, - QETH_IP_TYPE_RXIP, proto); - mutex_unlock(&card->conf_mutex); - return rc ? rc : count; -} - static ssize_t qeth_l3_dev_rxip_del4_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - struct qeth_card *card = dev_get_drvdata(dev); - - if (!card) - return -EINVAL; - - return qeth_l3_dev_rxip_del_store(buf, count, card, QETH_PROT_IPV4); + return qeth_l3_rxip_store(dev, buf, false, count, QETH_PROT_IPV4); } static QETH_DEVICE_ATTR(rxip_del4, del4, 0200, NULL, @@ -934,12 +812,7 @@ static ssize_t qeth_l3_dev_rxip_add6_show(struct device *dev, static ssize_t qeth_l3_dev_rxip_add6_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - struct qeth_card *card = dev_get_drvdata(dev); - - if (!card) - return -EINVAL; - - return qeth_l3_dev_rxip_add_store(buf, count, card, QETH_PROT_IPV6); + return qeth_l3_rxip_store(dev, buf, true, count, QETH_PROT_IPV6); } static QETH_DEVICE_ATTR(rxip_add6, add6, 0644, @@ -949,12 +822,7 @@ static QETH_DEVICE_ATTR(rxip_add6, add6, 0644, static ssize_t qeth_l3_dev_rxip_del6_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - struct qeth_card *card = dev_get_drvdata(dev); - - if (!card) - return -EINVAL; - - return qeth_l3_dev_rxip_del_store(buf, count, card, QETH_PROT_IPV6); + return qeth_l3_rxip_store(dev, buf, false, count, QETH_PROT_IPV6); } static QETH_DEVICE_ATTR(rxip_del6, del6, 0200, NULL, diff --git a/drivers/s390/scsi/Makefile b/drivers/s390/scsi/Makefile index 9dda431ec8f3..352056eb0dd1 100644 --- a/drivers/s390/scsi/Makefile +++ b/drivers/s390/scsi/Makefile @@ -5,6 +5,6 @@ zfcp-objs := zfcp_aux.o zfcp_ccw.o zfcp_dbf.o zfcp_erp.o \ zfcp_fc.o zfcp_fsf.o zfcp_qdio.o zfcp_scsi.o zfcp_sysfs.o \ - zfcp_unit.o + zfcp_unit.o zfcp_diag.o obj-$(CONFIG_ZFCP) += zfcp.o diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index e390f8c6d5f3..09ec846fe01d 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -4,7 +4,7 @@ * * Module interface and handling of zfcp data structures. * - * Copyright IBM Corp. 2002, 2017 + * Copyright IBM Corp. 2002, 2018 */ /* @@ -25,6 +25,7 @@ * Martin Petermann * Sven Schuetz * Steffen Maier + * Benjamin Block */ #define KMSG_COMPONENT "zfcp" @@ -36,6 +37,7 @@ #include "zfcp_ext.h" #include "zfcp_fc.h" #include "zfcp_reqlist.h" +#include "zfcp_diag.h" #define ZFCP_BUS_ID_SIZE 20 @@ -356,6 +358,9 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device) adapter->erp_action.adapter = adapter; + if (zfcp_diag_adapter_setup(adapter)) + goto failed; + if (zfcp_qdio_setup(adapter)) goto failed; @@ -402,6 +407,9 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device) &zfcp_sysfs_adapter_attrs)) goto failed; + if (zfcp_diag_sysfs_setup(adapter)) + goto failed; + /* report size limit per scatter-gather segment */ adapter->ccw_device->dev.dma_parms = &adapter->dma_parms; @@ -426,6 +434,7 @@ void zfcp_adapter_unregister(struct zfcp_adapter *adapter) zfcp_fc_wka_ports_force_offline(adapter->gs); zfcp_scsi_adapter_unregister(adapter); + zfcp_diag_sysfs_destroy(adapter); sysfs_remove_group(&cdev->dev.kobj, &zfcp_sysfs_adapter_attrs); zfcp_erp_thread_kill(adapter); @@ -449,6 +458,7 @@ void zfcp_adapter_release(struct kref *ref) dev_set_drvdata(&adapter->ccw_device->dev, NULL); zfcp_fc_gs_destroy(adapter); zfcp_free_low_mem_buffers(adapter); + zfcp_diag_adapter_free(adapter); kfree(adapter->req_list); kfree(adapter->fc_stats); kfree(adapter->stats_reset_data); diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c index dccdb41bed8c..1234294700c4 100644 --- a/drivers/s390/scsi/zfcp_dbf.c +++ b/drivers/s390/scsi/zfcp_dbf.c @@ -95,11 +95,9 @@ void zfcp_dbf_hba_fsf_res(char *tag, int level, struct zfcp_fsf_req *req) memcpy(rec->u.res.fsf_status_qual, &q_head->fsf_status_qual, FSF_STATUS_QUALIFIER_SIZE); - if (q_head->fsf_command != FSF_QTCB_FCP_CMND) { - rec->pl_len = q_head->log_length; - zfcp_dbf_pl_write(dbf, (char *)q_pref + q_head->log_start, - rec->pl_len, "fsf_res", req->req_id); - } + rec->pl_len = q_head->log_length; + zfcp_dbf_pl_write(dbf, (char *)q_pref + q_head->log_start, + rec->pl_len, "fsf_res", req->req_id); debug_event(dbf->hba, level, rec, sizeof(*rec)); spin_unlock_irqrestore(&dbf->hba_lock, flags); diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index 87d2f47a6990..8cc0eefe4ccc 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h @@ -4,7 +4,7 @@ * * Global definitions for the zfcp device driver. * - * Copyright IBM Corp. 2002, 2017 + * Copyright IBM Corp. 2002, 2018 */ #ifndef ZFCP_DEF_H @@ -86,6 +86,7 @@ #define ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED 0x00000080 #define ZFCP_STATUS_FSFREQ_TMFUNCFAILED 0x00000200 #define ZFCP_STATUS_FSFREQ_DISMISSED 0x00001000 +#define ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE 0x00020000 /************************* STRUCTURE DEFINITIONS *****************************/ @@ -197,6 +198,7 @@ struct zfcp_adapter { struct device_dma_parameters dma_parms; struct zfcp_fc_events events; unsigned long next_port_scan; + struct zfcp_diag_adapter *diagnostics; }; struct zfcp_port { diff --git a/drivers/s390/scsi/zfcp_diag.c b/drivers/s390/scsi/zfcp_diag.c new file mode 100644 index 000000000000..67a8f4e57db1 --- /dev/null +++ b/drivers/s390/scsi/zfcp_diag.c @@ -0,0 +1,305 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * zfcp device driver + * + * Functions to handle diagnostics. + * + * Copyright IBM Corp. 2018 + */ + +#include <linux/spinlock.h> +#include <linux/jiffies.h> +#include <linux/string.h> +#include <linux/kernfs.h> +#include <linux/sysfs.h> +#include <linux/errno.h> +#include <linux/slab.h> + +#include "zfcp_diag.h" +#include "zfcp_ext.h" +#include "zfcp_def.h" + +static DECLARE_WAIT_QUEUE_HEAD(__zfcp_diag_publish_wait); + +/** + * zfcp_diag_adapter_setup() - Setup storage for adapter diagnostics. + * @adapter: the adapter to setup diagnostics for. + * + * Creates the data-structures to store the diagnostics for an adapter. This + * overwrites whatever was stored before at &zfcp_adapter->diagnostics! + * + * Return: + * * 0 - Everyting is OK + * * -ENOMEM - Could not allocate all/parts of the data-structures; + * &zfcp_adapter->diagnostics remains unchanged + */ +int zfcp_diag_adapter_setup(struct zfcp_adapter *const adapter) +{ + struct zfcp_diag_adapter *diag; + struct zfcp_diag_header *hdr; + + diag = kzalloc(sizeof(*diag), GFP_KERNEL); + if (diag == NULL) + return -ENOMEM; + + diag->max_age = (5 * 1000); /* default value: 5 s */ + + /* setup header for port_data */ + hdr = &diag->port_data.header; + + spin_lock_init(&hdr->access_lock); + hdr->buffer = &diag->port_data.data; + hdr->buffer_size = sizeof(diag->port_data.data); + /* set the timestamp so that the first test on age will always fail */ + hdr->timestamp = jiffies - msecs_to_jiffies(diag->max_age); + + /* setup header for config_data */ + hdr = &diag->config_data.header; + + spin_lock_init(&hdr->access_lock); + hdr->buffer = &diag->config_data.data; + hdr->buffer_size = sizeof(diag->config_data.data); + /* set the timestamp so that the first test on age will always fail */ + hdr->timestamp = jiffies - msecs_to_jiffies(diag->max_age); + + adapter->diagnostics = diag; + return 0; +} + +/** + * zfcp_diag_adapter_free() - Frees all adapter diagnostics allocations. + * @adapter: the adapter whose diagnostic structures should be freed. + * + * Frees all data-structures in the given adapter that store diagnostics + * information. Can savely be called with partially setup diagnostics. + */ +void zfcp_diag_adapter_free(struct zfcp_adapter *const adapter) +{ + kfree(adapter->diagnostics); + adapter->diagnostics = NULL; +} + +/** + * zfcp_diag_sysfs_setup() - Setup the sysfs-group for adapter-diagnostics. + * @adapter: target adapter to which the group should be added. + * + * Return: 0 on success; Something else otherwise (see sysfs_create_group()). + */ +int zfcp_diag_sysfs_setup(struct zfcp_adapter *const adapter) +{ + int rc = sysfs_create_group(&adapter->ccw_device->dev.kobj, + &zfcp_sysfs_diag_attr_group); + if (rc == 0) + adapter->diagnostics->sysfs_established = 1; + + return rc; +} + +/** + * zfcp_diag_sysfs_destroy() - Remove the sysfs-group for adapter-diagnostics. + * @adapter: target adapter from which the group should be removed. + */ +void zfcp_diag_sysfs_destroy(struct zfcp_adapter *const adapter) +{ + if (adapter->diagnostics == NULL || + !adapter->diagnostics->sysfs_established) + return; + + /* + * We need this state-handling so we can prevent warnings being printed + * on the kernel-console in case we have to abort a halfway done + * zfcp_adapter_enqueue(), in which the sysfs-group was not yet + * established. sysfs_remove_group() does this checking as well, but + * still prints a warning in case we try to remove a group that has not + * been established before + */ + adapter->diagnostics->sysfs_established = 0; + sysfs_remove_group(&adapter->ccw_device->dev.kobj, + &zfcp_sysfs_diag_attr_group); +} + + +/** + * zfcp_diag_update_xdata() - Update a diagnostics buffer. + * @hdr: the meta data to update. + * @data: data to use for the update. + * @incomplete: flag stating whether the data in @data is incomplete. + */ +void zfcp_diag_update_xdata(struct zfcp_diag_header *const hdr, + const void *const data, const bool incomplete) +{ + const unsigned long capture_timestamp = jiffies; + unsigned long flags; + + spin_lock_irqsave(&hdr->access_lock, flags); + + /* make sure we never go into the past with an update */ + if (!time_after_eq(capture_timestamp, hdr->timestamp)) + goto out; + + hdr->timestamp = capture_timestamp; + hdr->incomplete = incomplete; + memcpy(hdr->buffer, data, hdr->buffer_size); +out: + spin_unlock_irqrestore(&hdr->access_lock, flags); +} + +/** + * zfcp_diag_update_port_data_buffer() - Implementation of + * &typedef zfcp_diag_update_buffer_func + * to collect and update Port Data. + * @adapter: Adapter to collect Port Data from. + * + * This call is SYNCHRONOUS ! It blocks till the respective command has + * finished completely, or has failed in some way. + * + * Return: + * * 0 - Successfully retrieved new Diagnostics and Updated the buffer; + * this also includes cases where data was retrieved, but + * incomplete; you'll have to check the flag ``incomplete`` + * of &struct zfcp_diag_header. + * * see zfcp_fsf_exchange_port_data_sync() for possible error-codes ( + * excluding -EAGAIN) + */ +int zfcp_diag_update_port_data_buffer(struct zfcp_adapter *const adapter) +{ + int rc; + + rc = zfcp_fsf_exchange_port_data_sync(adapter->qdio, NULL); + if (rc == -EAGAIN) + rc = 0; /* signaling incomplete via struct zfcp_diag_header */ + + /* buffer-data was updated in zfcp_fsf_exchange_port_data_handler() */ + + return rc; +} + +/** + * zfcp_diag_update_config_data_buffer() - Implementation of + * &typedef zfcp_diag_update_buffer_func + * to collect and update Config Data. + * @adapter: Adapter to collect Config Data from. + * + * This call is SYNCHRONOUS ! It blocks till the respective command has + * finished completely, or has failed in some way. + * + * Return: + * * 0 - Successfully retrieved new Diagnostics and Updated the buffer; + * this also includes cases where data was retrieved, but + * incomplete; you'll have to check the flag ``incomplete`` + * of &struct zfcp_diag_header. + * * see zfcp_fsf_exchange_config_data_sync() for possible error-codes ( + * excluding -EAGAIN) + */ +int zfcp_diag_update_config_data_buffer(struct zfcp_adapter *const adapter) +{ + int rc; + + rc = zfcp_fsf_exchange_config_data_sync(adapter->qdio, NULL); + if (rc == -EAGAIN) + rc = 0; /* signaling incomplete via struct zfcp_diag_header */ + + /* buffer-data was updated in zfcp_fsf_exchange_config_data_handler() */ + + return rc; +} + +static int __zfcp_diag_update_buffer(struct zfcp_adapter *const adapter, + struct zfcp_diag_header *const hdr, + zfcp_diag_update_buffer_func buffer_update, + unsigned long *const flags) + __must_hold(hdr->access_lock) +{ + int rc; + + if (hdr->updating == 1) { + rc = wait_event_interruptible_lock_irq(__zfcp_diag_publish_wait, + hdr->updating == 0, + hdr->access_lock); + rc = (rc == 0 ? -EAGAIN : -EINTR); + } else { + hdr->updating = 1; + spin_unlock_irqrestore(&hdr->access_lock, *flags); + + /* unlocked, because update function sleeps */ + rc = buffer_update(adapter); + + spin_lock_irqsave(&hdr->access_lock, *flags); + hdr->updating = 0; + + /* + * every thread waiting here went via an interruptible wait, + * so its fine to only wake those + */ + wake_up_interruptible_all(&__zfcp_diag_publish_wait); + } + + return rc; +} + +static bool +__zfcp_diag_test_buffer_age_isfresh(const struct zfcp_diag_adapter *const diag, + const struct zfcp_diag_header *const hdr) + __must_hold(hdr->access_lock) +{ + const unsigned long now = jiffies; + + /* + * Should not happen (data is from the future).. if it does, still + * signal that it needs refresh + */ + if (!time_after_eq(now, hdr->timestamp)) + return false; + + if (jiffies_to_msecs(now - hdr->timestamp) >= diag->max_age) + return false; + + return true; +} + +/** + * zfcp_diag_update_buffer_limited() - Collect diagnostics and update a + * diagnostics buffer rate limited. + * @adapter: Adapter to collect the diagnostics from. + * @hdr: buffer-header for which to update with the collected diagnostics. + * @buffer_update: Specific implementation for collecting and updating. + * + * This function will cause an update of the given @hdr by calling the also + * given @buffer_update function. If called by multiple sources at the same + * time, it will synchornize the update by only allowing one source to call + * @buffer_update and the others to wait for that source to complete instead + * (the wait is interruptible). + * + * Additionally this version is rate-limited and will only exit if either the + * buffer is fresh enough (within the limit) - it will do nothing if the buffer + * is fresh enough to begin with -, or if the source/thread that started this + * update is the one that made the update (to prevent endless loops). + * + * Return: + * * 0 - If the update was successfully published and/or the buffer is + * fresh enough + * * -EINTR - If the thread went into the wait-state and was interrupted + * * whatever @buffer_update returns + */ +int zfcp_diag_update_buffer_limited(struct zfcp_adapter *const adapter, + struct zfcp_diag_header *const hdr, + zfcp_diag_update_buffer_func buffer_update) +{ + unsigned long flags; + int rc; + + spin_lock_irqsave(&hdr->access_lock, flags); + + for (rc = 0; + !__zfcp_diag_test_buffer_age_isfresh(adapter->diagnostics, hdr); + rc = 0) { + rc = __zfcp_diag_update_buffer(adapter, hdr, buffer_update, + &flags); + if (rc != -EAGAIN) + break; + } + + spin_unlock_irqrestore(&hdr->access_lock, flags); + + return rc; +} diff --git a/drivers/s390/scsi/zfcp_diag.h b/drivers/s390/scsi/zfcp_diag.h new file mode 100644 index 000000000000..b9c93d15f67c --- /dev/null +++ b/drivers/s390/scsi/zfcp_diag.h @@ -0,0 +1,101 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * zfcp device driver + * + * Definitions for handling diagnostics in the the zfcp device driver. + * + * Copyright IBM Corp. 2018 + */ + +#ifndef ZFCP_DIAG_H +#define ZFCP_DIAG_H + +#include <linux/spinlock.h> + +#include "zfcp_fsf.h" +#include "zfcp_def.h" + +/** + * struct zfcp_diag_header - general part of a diagnostic buffer. + * @access_lock: lock protecting all the data in this buffer. + * @updating: flag showing that an update for this buffer is currently running. + * @incomplete: flag showing that the data in @buffer is incomplete. + * @timestamp: time in jiffies when the data of this buffer was last captured. + * @buffer: implementation-depending data of this buffer + * @buffer_size: size of @buffer + */ +struct zfcp_diag_header { + spinlock_t access_lock; + + /* Flags */ + u64 updating :1; + u64 incomplete :1; + + unsigned long timestamp; + + void *buffer; + size_t buffer_size; +}; + +/** + * struct zfcp_diag_adapter - central storage for all diagnostics concerning an + * adapter. + * @sysfs_established: flag showing that the associated sysfs-group was created + * during run of zfcp_adapter_enqueue(). + * @max_age: maximum age of data in diagnostic buffers before they need to be + * refreshed (in ms). + * @port_data: data retrieved using exchange port data. + * @port_data.header: header with metadata for the cache in @port_data.data. + * @port_data.data: cached QTCB Bottom of command exchange port data. + * @config_data: data retrieved using exchange config data. + * @config_data.header: header with metadata for the cache in @config_data.data. + * @config_data.data: cached QTCB Bottom of command exchange config data. + */ +struct zfcp_diag_adapter { + u64 sysfs_established :1; + + unsigned long max_age; + + struct { + struct zfcp_diag_header header; + struct fsf_qtcb_bottom_port data; + } port_data; + struct { + struct zfcp_diag_header header; + struct fsf_qtcb_bottom_config data; + } config_data; +}; + +int zfcp_diag_adapter_setup(struct zfcp_adapter *const adapter); +void zfcp_diag_adapter_free(struct zfcp_adapter *const adapter); + +int zfcp_diag_sysfs_setup(struct zfcp_adapter *const adapter); +void zfcp_diag_sysfs_destroy(struct zfcp_adapter *const adapter); + +void zfcp_diag_update_xdata(struct zfcp_diag_header *const hdr, + const void *const data, const bool incomplete); + +/* + * Function-Type used in zfcp_diag_update_buffer_limited() for the function + * that does the buffer-implementation dependent work. + */ +typedef int (*zfcp_diag_update_buffer_func)(struct zfcp_adapter *const adapter); + +int zfcp_diag_update_config_data_buffer(struct zfcp_adapter *const adapter); +int zfcp_diag_update_port_data_buffer(struct zfcp_adapter *const adapter); +int zfcp_diag_update_buffer_limited(struct zfcp_adapter *const adapter, + struct zfcp_diag_header *const hdr, + zfcp_diag_update_buffer_func buffer_update); + +/** + * zfcp_diag_support_sfp() - Return %true if the @adapter supports reporting + * SFP Data. + * @adapter: adapter to test the availability of SFP Data reporting for. + */ +static inline bool +zfcp_diag_support_sfp(const struct zfcp_adapter *const adapter) +{ + return !!(adapter->adapter_features & FSF_FEATURE_REPORT_SFP_DATA); +} + +#endif /* ZFCP_DIAG_H */ diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index 96f0d34e9459..93655b85b73f 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c @@ -174,7 +174,7 @@ static enum zfcp_erp_act_type zfcp_erp_required_act(enum zfcp_erp_act_type want, return 0; p_status = atomic_read(&port->status); if (!(p_status & ZFCP_STATUS_COMMON_RUNNING) || - p_status & ZFCP_STATUS_COMMON_ERP_FAILED) + p_status & ZFCP_STATUS_COMMON_ERP_FAILED) return 0; if (!(p_status & ZFCP_STATUS_COMMON_UNBLOCKED)) need = ZFCP_ERP_ACTION_REOPEN_PORT; @@ -190,7 +190,7 @@ static enum zfcp_erp_act_type zfcp_erp_required_act(enum zfcp_erp_act_type want, return 0; a_status = atomic_read(&adapter->status); if (!(a_status & ZFCP_STATUS_COMMON_RUNNING) || - a_status & ZFCP_STATUS_COMMON_ERP_FAILED) + a_status & ZFCP_STATUS_COMMON_ERP_FAILED) return 0; if (p_status & ZFCP_STATUS_COMMON_NOESC) return need; diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index 31e8a7240fd7..c8556787cfdc 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h @@ -167,6 +167,7 @@ extern const struct attribute_group *zfcp_port_attr_groups[]; extern struct mutex zfcp_sysfs_port_units_mutex; extern struct device_attribute *zfcp_sysfs_sdev_attrs[]; extern struct device_attribute *zfcp_sysfs_shost_attrs[]; +extern const struct attribute_group zfcp_sysfs_diag_attr_group; bool zfcp_sysfs_port_is_removing(const struct zfcp_port *const port); /* zfcp_unit.c */ diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 296bbc3c4606..223a805f0b0b 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -11,6 +11,7 @@ #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/blktrace_api.h> +#include <linux/jiffies.h> #include <linux/types.h> #include <linux/slab.h> #include <scsi/fc/fc_els.h> @@ -19,6 +20,7 @@ #include "zfcp_dbf.h" #include "zfcp_qdio.h" #include "zfcp_reqlist.h" +#include "zfcp_diag.h" /* timeout for FSF requests sent during scsi_eh: abort or FCP TMF */ #define ZFCP_FSF_SCSI_ER_TIMEOUT (10*HZ) @@ -27,6 +29,11 @@ struct kmem_cache *zfcp_fsf_qtcb_cache; +static bool ber_stop = true; +module_param(ber_stop, bool, 0600); +MODULE_PARM_DESC(ber_stop, + "Shuts down FCP devices for FCP channels that report a bit-error count in excess of its threshold (default on)"); + static void zfcp_fsf_request_timeout_handler(struct timer_list *t) { struct zfcp_fsf_req *fsf_req = from_timer(fsf_req, t, timer); @@ -236,10 +243,15 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req) case FSF_STATUS_READ_SENSE_DATA_AVAIL: break; case FSF_STATUS_READ_BIT_ERROR_THRESHOLD: - dev_warn(&adapter->ccw_device->dev, - "The error threshold for checksum statistics " - "has been exceeded\n"); zfcp_dbf_hba_bit_err("fssrh_3", req); + if (ber_stop) { + dev_warn(&adapter->ccw_device->dev, + "All paths over this FCP device are disused because of excessive bit errors\n"); + zfcp_erp_adapter_shutdown(adapter, 0, "fssrh_b"); + } else { + dev_warn(&adapter->ccw_device->dev, + "The error threshold for checksum statistics has been exceeded\n"); + } break; case FSF_STATUS_READ_LINK_DOWN: zfcp_fsf_status_read_link_down(req); @@ -544,6 +556,8 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req) static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req) { struct zfcp_adapter *adapter = req->adapter; + struct zfcp_diag_header *const diag_hdr = + &adapter->diagnostics->config_data.header; struct fsf_qtcb *qtcb = req->qtcb; struct fsf_qtcb_bottom_config *bottom = &qtcb->bottom.config; struct Scsi_Host *shost = adapter->scsi_host; @@ -560,6 +574,12 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req) switch (qtcb->header.fsf_status) { case FSF_GOOD: + /* + * usually we wait with an update till the cache is too old, + * but because we have the data available, update it anyway + */ + zfcp_diag_update_xdata(diag_hdr, bottom, false); + if (zfcp_fsf_exchange_config_evaluate(req)) return; @@ -575,6 +595,9 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req) &adapter->status); break; case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE: + zfcp_diag_update_xdata(diag_hdr, bottom, true); + req->status |= ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE; + fc_host_node_name(shost) = 0; fc_host_port_name(shost) = 0; fc_host_port_id(shost) = 0; @@ -643,16 +666,28 @@ static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req) static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req) { + struct zfcp_diag_header *const diag_hdr = + &req->adapter->diagnostics->port_data.header; struct fsf_qtcb *qtcb = req->qtcb; + struct fsf_qtcb_bottom_port *bottom = &qtcb->bottom.port; if (req->status & ZFCP_STATUS_FSFREQ_ERROR) return; switch (qtcb->header.fsf_status) { case FSF_GOOD: + /* + * usually we wait with an update till the cache is too old, + * but because we have the data available, update it anyway + */ + zfcp_diag_update_xdata(diag_hdr, bottom, false); + zfcp_fsf_exchange_port_evaluate(req); break; case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE: + zfcp_diag_update_xdata(diag_hdr, bottom, true); + req->status |= ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE; + zfcp_fsf_exchange_port_evaluate(req); zfcp_fsf_link_down_info_eval(req, &qtcb->header.fsf_status_qual.link_down_info); @@ -1251,7 +1286,8 @@ int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action) req->qtcb->bottom.config.feature_selection = FSF_FEATURE_NOTIFICATION_LOST | - FSF_FEATURE_UPDATE_ALERT; + FSF_FEATURE_UPDATE_ALERT | + FSF_FEATURE_REQUEST_SFP_DATA; req->erp_action = erp_action; req->handler = zfcp_fsf_exchange_config_data_handler; erp_action->fsf_req_id = req->req_id; @@ -1268,6 +1304,19 @@ out: return retval; } + +/** + * zfcp_fsf_exchange_config_data_sync() - Request information about FCP channel. + * @qdio: pointer to the QDIO-Queue to use for sending the command. + * @data: pointer to the QTCB-Bottom for storing the result of the command, + * might be %NULL. + * + * Returns: + * * 0 - Exchange Config Data was successful, @data is complete + * * -EIO - Exchange Config Data was not successful, @data is invalid + * * -EAGAIN - @data contains incomplete data + * * -ENOMEM - Some memory allocation failed along the way + */ int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio, struct fsf_qtcb_bottom_config *data) { @@ -1291,7 +1340,8 @@ int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio, req->qtcb->bottom.config.feature_selection = FSF_FEATURE_NOTIFICATION_LOST | - FSF_FEATURE_UPDATE_ALERT; + FSF_FEATURE_UPDATE_ALERT | + FSF_FEATURE_REQUEST_SFP_DATA; if (data) req->data = data; @@ -1299,9 +1349,16 @@ int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio, zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); retval = zfcp_fsf_req_send(req); spin_unlock_irq(&qdio->req_q_lock); + if (!retval) { /* NOTE: ONLY TOUCH SYNC req AGAIN ON req->completion. */ wait_for_completion(&req->completion); + + if (req->status & + (ZFCP_STATUS_FSFREQ_ERROR | ZFCP_STATUS_FSFREQ_DISMISSED)) + retval = -EIO; + else if (req->status & ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE) + retval = -EAGAIN; } zfcp_fsf_req_free(req); @@ -1359,10 +1416,17 @@ out: } /** - * zfcp_fsf_exchange_port_data_sync - request information about local port - * @qdio: pointer to struct zfcp_qdio - * @data: pointer to struct fsf_qtcb_bottom_port - * Returns: 0 on success, error otherwise + * zfcp_fsf_exchange_port_data_sync() - Request information about local port. + * @qdio: pointer to the QDIO-Queue to use for sending the command. + * @data: pointer to the QTCB-Bottom for storing the result of the command, + * might be %NULL. + * + * Returns: + * * 0 - Exchange Port Data was successful, @data is complete + * * -EIO - Exchange Port Data was not successful, @data is invalid + * * -EAGAIN - @data contains incomplete data + * * -ENOMEM - Some memory allocation failed along the way + * * -EOPNOTSUPP - This operation is not supported */ int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio, struct fsf_qtcb_bottom_port *data) @@ -1398,10 +1462,15 @@ int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio, if (!retval) { /* NOTE: ONLY TOUCH SYNC req AGAIN ON req->completion. */ wait_for_completion(&req->completion); + + if (req->status & + (ZFCP_STATUS_FSFREQ_ERROR | ZFCP_STATUS_FSFREQ_DISMISSED)) + retval = -EIO; + else if (req->status & ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE) + retval = -EAGAIN; } zfcp_fsf_req_free(req); - return retval; out_unlock: diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h index 2c658b66318c..2b1e4da1944f 100644 --- a/drivers/s390/scsi/zfcp_fsf.h +++ b/drivers/s390/scsi/zfcp_fsf.h @@ -163,6 +163,8 @@ #define FSF_FEATURE_ELS_CT_CHAINED_SBALS 0x00000020 #define FSF_FEATURE_UPDATE_ALERT 0x00000100 #define FSF_FEATURE_MEASUREMENT_DATA 0x00000200 +#define FSF_FEATURE_REQUEST_SFP_DATA 0x00000200 +#define FSF_FEATURE_REPORT_SFP_DATA 0x00000800 #define FSF_FEATURE_DIF_PROT_TYPE1 0x00010000 #define FSF_FEATURE_DIX_PROT_TCPIP 0x00020000 @@ -407,7 +409,24 @@ struct fsf_qtcb_bottom_port { u8 cp_util; u8 cb_util; u8 a_util; - u8 res2[253]; + u8 res2; + u16 temperature; + u16 vcc; + u16 tx_bias; + u16 tx_power; + u16 rx_power; + union { + u16 raw; + struct { + u16 fec_active :1; + u16:7; + u16 connector_type :2; + u16 sfp_invalid :1; + u16 optical_port :1; + u16 port_tx_type :4; + }; + } sfp_flags; + u8 res3[240]; } __attribute__ ((packed)); union fsf_qtcb_bottom { diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index e9ded2befa0d..3910d529c15a 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c @@ -605,7 +605,7 @@ zfcp_scsi_get_fc_host_stats(struct Scsi_Host *host) return NULL; ret = zfcp_fsf_exchange_port_data_sync(adapter->qdio, data); - if (ret) { + if (ret != 0 && ret != -EAGAIN) { kfree(data); return NULL; } @@ -634,7 +634,7 @@ static void zfcp_scsi_reset_fc_host_stats(struct Scsi_Host *shost) return; ret = zfcp_fsf_exchange_port_data_sync(adapter->qdio, data); - if (ret) + if (ret != 0 && ret != -EAGAIN) kfree(data); else { adapter->stats_reset = jiffies/HZ; diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c index af197e2b3e69..494b9fe9cc94 100644 --- a/drivers/s390/scsi/zfcp_sysfs.c +++ b/drivers/s390/scsi/zfcp_sysfs.c @@ -11,6 +11,7 @@ #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/slab.h> +#include "zfcp_diag.h" #include "zfcp_ext.h" #define ZFCP_DEV_ATTR(_feat, _name, _mode, _show, _store) \ @@ -325,6 +326,50 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev, static ZFCP_DEV_ATTR(adapter, port_remove, S_IWUSR, NULL, zfcp_sysfs_port_remove_store); +static ssize_t +zfcp_sysfs_adapter_diag_max_age_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(to_ccwdev(dev)); + ssize_t rc; + + if (!adapter) + return -ENODEV; + + /* ceil(log(2^64 - 1) / log(10)) = 20 */ + rc = scnprintf(buf, 20 + 2, "%lu\n", adapter->diagnostics->max_age); + + zfcp_ccw_adapter_put(adapter); + return rc; +} + +static ssize_t +zfcp_sysfs_adapter_diag_max_age_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(to_ccwdev(dev)); + unsigned long max_age; + ssize_t rc; + + if (!adapter) + return -ENODEV; + + rc = kstrtoul(buf, 10, &max_age); + if (rc != 0) + goto out; + + adapter->diagnostics->max_age = max_age; + + rc = count; +out: + zfcp_ccw_adapter_put(adapter); + return rc; +} +static ZFCP_DEV_ATTR(adapter, diag_max_age, 0644, + zfcp_sysfs_adapter_diag_max_age_show, + zfcp_sysfs_adapter_diag_max_age_store); + static struct attribute *zfcp_adapter_attrs[] = { &dev_attr_adapter_failed.attr, &dev_attr_adapter_in_recovery.attr, @@ -337,6 +382,7 @@ static struct attribute *zfcp_adapter_attrs[] = { &dev_attr_adapter_lic_version.attr, &dev_attr_adapter_status.attr, &dev_attr_adapter_hardware_version.attr, + &dev_attr_adapter_diag_max_age.attr, NULL }; @@ -577,7 +623,7 @@ static ssize_t zfcp_sysfs_adapter_util_show(struct device *dev, return -ENOMEM; retval = zfcp_fsf_exchange_port_data_sync(adapter->qdio, qtcb_port); - if (!retval) + if (retval == 0 || retval == -EAGAIN) retval = sprintf(buf, "%u %u %u\n", qtcb_port->cp_util, qtcb_port->cb_util, qtcb_port->a_util); kfree(qtcb_port); @@ -603,7 +649,7 @@ static int zfcp_sysfs_adapter_ex_config(struct device *dev, return -ENOMEM; retval = zfcp_fsf_exchange_config_data_sync(adapter->qdio, qtcb_config); - if (!retval) + if (retval == 0 || retval == -EAGAIN) *stat_inf = qtcb_config->stat_info; kfree(qtcb_config); @@ -664,3 +710,123 @@ struct device_attribute *zfcp_sysfs_shost_attrs[] = { &dev_attr_queue_full, NULL }; + +static ssize_t zfcp_sysfs_adapter_diag_b2b_credit_show( + struct device *dev, struct device_attribute *attr, char *buf) +{ + struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(to_ccwdev(dev)); + struct zfcp_diag_header *diag_hdr; + struct fc_els_flogi *nsp; + ssize_t rc = -ENOLINK; + unsigned long flags; + unsigned int status; + + if (!adapter) + return -ENODEV; + + status = atomic_read(&adapter->status); + if (0 == (status & ZFCP_STATUS_COMMON_OPEN) || + 0 == (status & ZFCP_STATUS_COMMON_UNBLOCKED) || + 0 != (status & ZFCP_STATUS_COMMON_ERP_FAILED)) + goto out; + + diag_hdr = &adapter->diagnostics->config_data.header; + + rc = zfcp_diag_update_buffer_limited( + adapter, diag_hdr, zfcp_diag_update_config_data_buffer); + if (rc != 0) + goto out; + + spin_lock_irqsave(&diag_hdr->access_lock, flags); + /* nport_serv_param doesn't contain the ELS_Command code */ + nsp = (struct fc_els_flogi *)((unsigned long) + adapter->diagnostics->config_data + .data.nport_serv_param - + sizeof(u32)); + + rc = scnprintf(buf, 5 + 2, "%hu\n", + be16_to_cpu(nsp->fl_csp.sp_bb_cred)); + spin_unlock_irqrestore(&diag_hdr->access_lock, flags); + +out: + zfcp_ccw_adapter_put(adapter); + return rc; +} +static ZFCP_DEV_ATTR(adapter_diag, b2b_credit, 0400, + zfcp_sysfs_adapter_diag_b2b_credit_show, NULL); + +#define ZFCP_DEFINE_DIAG_SFP_ATTR(_name, _qtcb_member, _prtsize, _prtfmt) \ + static ssize_t zfcp_sysfs_adapter_diag_sfp_##_name##_show( \ + struct device *dev, struct device_attribute *attr, char *buf) \ + { \ + struct zfcp_adapter *const adapter = \ + zfcp_ccw_adapter_by_cdev(to_ccwdev(dev)); \ + struct zfcp_diag_header *diag_hdr; \ + ssize_t rc = -ENOLINK; \ + unsigned long flags; \ + unsigned int status; \ + \ + if (!adapter) \ + return -ENODEV; \ + \ + status = atomic_read(&adapter->status); \ + if (0 == (status & ZFCP_STATUS_COMMON_OPEN) || \ + 0 == (status & ZFCP_STATUS_COMMON_UNBLOCKED) || \ + 0 != (status & ZFCP_STATUS_COMMON_ERP_FAILED)) \ + goto out; \ + \ + if (!zfcp_diag_support_sfp(adapter)) { \ + rc = -EOPNOTSUPP; \ + goto out; \ + } \ + \ + diag_hdr = &adapter->diagnostics->port_data.header; \ + \ + rc = zfcp_diag_update_buffer_limited( \ + adapter, diag_hdr, zfcp_diag_update_port_data_buffer); \ + if (rc != 0) \ + goto out; \ + \ + spin_lock_irqsave(&diag_hdr->access_lock, flags); \ + rc = scnprintf( \ + buf, (_prtsize) + 2, _prtfmt "\n", \ + adapter->diagnostics->port_data.data._qtcb_member); \ + spin_unlock_irqrestore(&diag_hdr->access_lock, flags); \ + \ + out: \ + zfcp_ccw_adapter_put(adapter); \ + return rc; \ + } \ + static ZFCP_DEV_ATTR(adapter_diag_sfp, _name, 0400, \ + zfcp_sysfs_adapter_diag_sfp_##_name##_show, NULL) + +ZFCP_DEFINE_DIAG_SFP_ATTR(temperature, temperature, 5, "%hu"); +ZFCP_DEFINE_DIAG_SFP_ATTR(vcc, vcc, 5, "%hu"); +ZFCP_DEFINE_DIAG_SFP_ATTR(tx_bias, tx_bias, 5, "%hu"); +ZFCP_DEFINE_DIAG_SFP_ATTR(tx_power, tx_power, 5, "%hu"); +ZFCP_DEFINE_DIAG_SFP_ATTR(rx_power, rx_power, 5, "%hu"); +ZFCP_DEFINE_DIAG_SFP_ATTR(port_tx_type, sfp_flags.port_tx_type, 2, "%hu"); +ZFCP_DEFINE_DIAG_SFP_ATTR(optical_port, sfp_flags.optical_port, 1, "%hu"); +ZFCP_DEFINE_DIAG_SFP_ATTR(sfp_invalid, sfp_flags.sfp_invalid, 1, "%hu"); +ZFCP_DEFINE_DIAG_SFP_ATTR(connector_type, sfp_flags.connector_type, 1, "%hu"); +ZFCP_DEFINE_DIAG_SFP_ATTR(fec_active, sfp_flags.fec_active, 1, "%hu"); + +static struct attribute *zfcp_sysfs_diag_attrs[] = { + &dev_attr_adapter_diag_sfp_temperature.attr, + &dev_attr_adapter_diag_sfp_vcc.attr, + &dev_attr_adapter_diag_sfp_tx_bias.attr, + &dev_attr_adapter_diag_sfp_tx_power.attr, + &dev_attr_adapter_diag_sfp_rx_power.attr, + &dev_attr_adapter_diag_sfp_port_tx_type.attr, + &dev_attr_adapter_diag_sfp_optical_port.attr, + &dev_attr_adapter_diag_sfp_sfp_invalid.attr, + &dev_attr_adapter_diag_sfp_connector_type.attr, + &dev_attr_adapter_diag_sfp_fec_active.attr, + &dev_attr_adapter_diag_b2b_credit.attr, + NULL, +}; + +const struct attribute_group zfcp_sysfs_diag_attr_group = { + .name = "diagnostics", + .attrs = zfcp_sysfs_diag_attrs, +}; |