summaryrefslogtreecommitdiffstats
path: root/drivers/s390
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390')
-rw-r--r--drivers/s390/block/dasd_diag.c38
-rw-r--r--drivers/s390/block/dasd_eckd.c28
-rw-r--r--drivers/s390/block/dasd_fba.c28
-rw-r--r--drivers/s390/block/dasd_int.h3
-rw-r--r--drivers/s390/block/dcssblk.c4
-rw-r--r--drivers/s390/block/xpram.c6
-rw-r--r--drivers/s390/char/con3215.c3
-rw-r--r--drivers/s390/char/con3270.c3
-rw-r--r--drivers/s390/char/raw3270.c1
-rw-r--r--drivers/s390/char/sclp.c5
-rw-r--r--drivers/s390/char/tape_34xx.c32
-rw-r--r--drivers/s390/char/tape_3590.c63
-rw-r--r--drivers/s390/char/tty3270.c9
-rw-r--r--drivers/s390/char/tty3270.h16
-rw-r--r--drivers/s390/char/vmur.c250
-rw-r--r--drivers/s390/char/vmur.h1
-rw-r--r--drivers/s390/char/vmwatchdog.c4
-rw-r--r--drivers/s390/char/zcore.c7
-rw-r--r--drivers/s390/cio/ccwgroup.c70
-rw-r--r--drivers/s390/cio/chp.c30
-rw-r--r--drivers/s390/cio/cio.c5
-rw-r--r--drivers/s390/cio/cmf.c242
-rw-r--r--drivers/s390/cio/css.c98
-rw-r--r--drivers/s390/cio/css.h3
-rw-r--r--drivers/s390/cio/device.c100
-rw-r--r--drivers/s390/cio/device.h1
-rw-r--r--drivers/s390/cio/device_fsm.c147
-rw-r--r--drivers/s390/cio/device_id.c48
-rw-r--r--drivers/s390/cio/device_ops.c241
-rw-r--r--drivers/s390/cio/qdio.c44
-rw-r--r--drivers/s390/crypto/ap_bus.c19
-rw-r--r--drivers/s390/crypto/zcrypt_mono.c4
-rw-r--r--drivers/s390/crypto/zcrypt_pcixcc.c9
-rw-r--r--drivers/s390/crypto/zcrypt_pcixcc.h45
-rw-r--r--drivers/s390/net/claw.c1
-rw-r--r--drivers/s390/net/ctcmain.c1
-rw-r--r--drivers/s390/net/lcs.c12
-rw-r--r--drivers/s390/net/lcs.h1
-rw-r--r--drivers/s390/net/netiucv.c1
-rw-r--r--drivers/s390/net/qeth.h7
-rw-r--r--drivers/s390/net/qeth_eddp.c16
-rw-r--r--drivers/s390/net/qeth_main.c227
-rw-r--r--drivers/s390/net/qeth_mpc.h1
-rw-r--r--drivers/s390/net/qeth_sys.c8
-rw-r--r--drivers/s390/scsi/zfcp_ccw.c10
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c10
-rw-r--r--drivers/s390/scsi/zfcp_erp.c18
47 files changed, 1140 insertions, 780 deletions
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index eccac1c3b71b..571320ab9e1a 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -24,6 +24,7 @@
#include <asm/s390_ext.h>
#include <asm/todclk.h>
#include <asm/vtoc.h>
+#include <asm/diag.h>
#include "dasd_int.h"
#include "dasd_diag.h"
@@ -471,14 +472,13 @@ dasd_diag_build_cp(struct dasd_device * device, struct request *req)
struct dasd_ccw_req *cqr;
struct dasd_diag_req *dreq;
struct dasd_diag_bio *dbio;
- struct bio *bio;
+ struct req_iterator iter;
struct bio_vec *bv;
char *dst;
unsigned int count, datasize;
sector_t recid, first_rec, last_rec;
unsigned int blksize, off;
unsigned char rw_cmd;
- int i;
if (rq_data_dir(req) == READ)
rw_cmd = MDSK_READ_REQ;
@@ -492,13 +492,11 @@ dasd_diag_build_cp(struct dasd_device * device, struct request *req)
last_rec = (req->sector + req->nr_sectors - 1) >> device->s2b_shift;
/* Check struct bio and count the number of blocks for the request. */
count = 0;
- rq_for_each_bio(bio, req) {
- bio_for_each_segment(bv, bio, i) {
- if (bv->bv_len & (blksize - 1))
- /* Fba can only do full blocks. */
- return ERR_PTR(-EINVAL);
- count += bv->bv_len >> (device->s2b_shift + 9);
- }
+ rq_for_each_segment(bv, req, iter) {
+ if (bv->bv_len & (blksize - 1))
+ /* Fba can only do full blocks. */
+ return ERR_PTR(-EINVAL);
+ count += bv->bv_len >> (device->s2b_shift + 9);
}
/* Paranoia. */
if (count != last_rec - first_rec + 1)
@@ -515,18 +513,16 @@ dasd_diag_build_cp(struct dasd_device * device, struct request *req)
dreq->block_count = count;
dbio = dreq->bio;
recid = first_rec;
- rq_for_each_bio(bio, req) {
- bio_for_each_segment(bv, bio, i) {
- dst = page_address(bv->bv_page) + bv->bv_offset;
- for (off = 0; off < bv->bv_len; off += blksize) {
- memset(dbio, 0, sizeof (struct dasd_diag_bio));
- dbio->type = rw_cmd;
- dbio->block_number = recid + 1;
- dbio->buffer = dst;
- dbio++;
- dst += blksize;
- recid++;
- }
+ rq_for_each_segment(bv, req, iter) {
+ dst = page_address(bv->bv_page) + bv->bv_offset;
+ for (off = 0; off < bv->bv_len; off += blksize) {
+ memset(dbio, 0, sizeof (struct dasd_diag_bio));
+ dbio->type = rw_cmd;
+ dbio->block_number = recid + 1;
+ dbio->buffer = dst;
+ dbio++;
+ dst += blksize;
+ recid++;
}
}
cqr->retries = DIAG_MAX_RETRIES;
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index ea63ba7828f9..44adf8496bda 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -1176,7 +1176,7 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req)
struct LO_eckd_data *LO_data;
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
- struct bio *bio;
+ struct req_iterator iter;
struct bio_vec *bv;
char *dst;
unsigned int blksize, blk_per_trk, off;
@@ -1185,7 +1185,6 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req)
sector_t first_trk, last_trk;
unsigned int first_offs, last_offs;
unsigned char cmd, rcmd;
- int i;
private = (struct dasd_eckd_private *) device->private;
if (rq_data_dir(req) == READ)
@@ -1206,18 +1205,15 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req)
/* Check struct bio and count the number of blocks for the request. */
count = 0;
cidaw = 0;
- rq_for_each_bio(bio, req) {
- bio_for_each_segment(bv, bio, i) {
- if (bv->bv_len & (blksize - 1))
- /* Eckd can only do full blocks. */
- return ERR_PTR(-EINVAL);
- count += bv->bv_len >> (device->s2b_shift + 9);
+ rq_for_each_segment(bv, req, iter) {
+ if (bv->bv_len & (blksize - 1))
+ /* Eckd can only do full blocks. */
+ return ERR_PTR(-EINVAL);
+ count += bv->bv_len >> (device->s2b_shift + 9);
#if defined(CONFIG_64BIT)
- if (idal_is_needed (page_address(bv->bv_page),
- bv->bv_len))
- cidaw += bv->bv_len >> (device->s2b_shift + 9);
+ if (idal_is_needed (page_address(bv->bv_page), bv->bv_len))
+ cidaw += bv->bv_len >> (device->s2b_shift + 9);
#endif
- }
}
/* Paranoia. */
if (count != last_rec - first_rec + 1)
@@ -1257,7 +1253,7 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req)
locate_record(ccw++, LO_data++, first_trk, first_offs + 1,
last_rec - recid + 1, cmd, device, blksize);
}
- rq_for_each_bio(bio, req) bio_for_each_segment(bv, bio, i) {
+ rq_for_each_segment(bv, req, iter) {
dst = page_address(bv->bv_page) + bv->bv_offset;
if (dasd_page_cache) {
char *copy = kmem_cache_alloc(dasd_page_cache,
@@ -1328,12 +1324,12 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
{
struct dasd_eckd_private *private;
struct ccw1 *ccw;
- struct bio *bio;
+ struct req_iterator iter;
struct bio_vec *bv;
char *dst, *cda;
unsigned int blksize, blk_per_trk, off;
sector_t recid;
- int i, status;
+ int status;
if (!dasd_page_cache)
goto out;
@@ -1346,7 +1342,7 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
ccw++;
if (private->uses_cdl == 0 || recid > 2*blk_per_trk)
ccw++;
- rq_for_each_bio(bio, req) bio_for_each_segment(bv, bio, i) {
+ rq_for_each_segment(bv, req, iter) {
dst = page_address(bv->bv_page) + bv->bv_offset;
for (off = 0; off < bv->bv_len; off += blksize) {
/* Skip locate record. */
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index da16ead8aff2..1d95822e0b8e 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -234,14 +234,13 @@ dasd_fba_build_cp(struct dasd_device * device, struct request *req)
struct LO_fba_data *LO_data;
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
- struct bio *bio;
+ struct req_iterator iter;
struct bio_vec *bv;
char *dst;
int count, cidaw, cplength, datasize;
sector_t recid, first_rec, last_rec;
unsigned int blksize, off;
unsigned char cmd;
- int i;
private = (struct dasd_fba_private *) device->private;
if (rq_data_dir(req) == READ) {
@@ -257,18 +256,15 @@ dasd_fba_build_cp(struct dasd_device * device, struct request *req)
/* Check struct bio and count the number of blocks for the request. */
count = 0;
cidaw = 0;
- rq_for_each_bio(bio, req) {
- bio_for_each_segment(bv, bio, i) {
- if (bv->bv_len & (blksize - 1))
- /* Fba can only do full blocks. */
- return ERR_PTR(-EINVAL);
- count += bv->bv_len >> (device->s2b_shift + 9);
+ rq_for_each_segment(bv, req, iter) {
+ if (bv->bv_len & (blksize - 1))
+ /* Fba can only do full blocks. */
+ return ERR_PTR(-EINVAL);
+ count += bv->bv_len >> (device->s2b_shift + 9);
#if defined(CONFIG_64BIT)
- if (idal_is_needed (page_address(bv->bv_page),
- bv->bv_len))
- cidaw += bv->bv_len / blksize;
+ if (idal_is_needed (page_address(bv->bv_page), bv->bv_len))
+ cidaw += bv->bv_len / blksize;
#endif
- }
}
/* Paranoia. */
if (count != last_rec - first_rec + 1)
@@ -304,7 +300,7 @@ dasd_fba_build_cp(struct dasd_device * device, struct request *req)
locate_record(ccw++, LO_data++, rq_data_dir(req), 0, count);
}
recid = first_rec;
- rq_for_each_bio(bio, req) bio_for_each_segment(bv, bio, i) {
+ rq_for_each_segment(bv, req, iter) {
dst = page_address(bv->bv_page) + bv->bv_offset;
if (dasd_page_cache) {
char *copy = kmem_cache_alloc(dasd_page_cache,
@@ -359,11 +355,11 @@ dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req)
{
struct dasd_fba_private *private;
struct ccw1 *ccw;
- struct bio *bio;
+ struct req_iterator iter;
struct bio_vec *bv;
char *dst, *cda;
unsigned int blksize, off;
- int i, status;
+ int status;
if (!dasd_page_cache)
goto out;
@@ -374,7 +370,7 @@ dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req)
ccw++;
if (private->rdc_data.mode.bits.data_chain != 0)
ccw++;
- rq_for_each_bio(bio, req) bio_for_each_segment(bv, bio, i) {
+ rq_for_each_segment(bv, req, iter) {
dst = page_address(bv->bv_page) + bv->bv_offset;
for (off = 0; off < bv->bv_len; off += blksize) {
/* Skip locate record. */
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index aeda52682446..d427daeef511 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -53,6 +53,7 @@
#include <linux/genhd.h>
#include <linux/hdreg.h>
#include <linux/interrupt.h>
+#include <linux/log2.h>
#include <asm/ccwdev.h>
#include <linux/workqueue.h>
#include <asm/debug.h>
@@ -456,7 +457,7 @@ dasd_free_chunk(struct list_head *chunk_list, void *mem)
static inline int
dasd_check_blocksize(int bsize)
{
- if (bsize < 512 || bsize > 4096 || (bsize & (bsize - 1)) != 0)
+ if (bsize < 512 || bsize > 4096 || !is_power_of_2(bsize))
return -EMEDIUMTYPE;
return 0;
}
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 4d8798bacf97..859f870552e3 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -674,10 +674,10 @@ dcssblk_make_request(struct request_queue *q, struct bio *bio)
}
bytes_done += bvec->bv_len;
}
- bio_endio(bio, bytes_done, 0);
+ bio_endio(bio, 0);
return 0;
fail:
- bio_io_error(bio, bio->bi_size);
+ bio_io_error(bio);
return 0;
}
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index 354a060e5bec..f231bc21b1ca 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -230,12 +230,10 @@ static int xpram_make_request(struct request_queue *q, struct bio *bio)
}
}
set_bit(BIO_UPTODATE, &bio->bi_flags);
- bytes = bio->bi_size;
- bio->bi_size = 0;
- bio->bi_end_io(bio, bytes, 0);
+ bio_endio(bio, 0);
return 0;
fail:
- bio_io_error(bio, bio->bi_size);
+ bio_io_error(bio);
return 0;
}
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index 6000bdee4082..0e1f35c9ed9d 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -667,6 +667,9 @@ raw3215_probe (struct ccw_device *cdev)
struct raw3215_info *raw;
int line;
+ /* Console is special. */
+ if (raw3215[0] && (cdev->dev.driver_data == raw3215[0]))
+ return 0;
raw = kmalloc(sizeof(struct raw3215_info) +
RAW3215_INBUF_SIZE, GFP_KERNEL|GFP_DMA);
if (raw == NULL)
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
index fd3479119eb4..0b040557db02 100644
--- a/drivers/s390/char/con3270.c
+++ b/drivers/s390/char/con3270.c
@@ -22,6 +22,7 @@
#include <asm/ebcdic.h>
#include "raw3270.h"
+#include "tty3270.h"
#include "ctrlchar.h"
#define CON3270_OUTPUT_BUFFER_SIZE 1024
@@ -507,8 +508,6 @@ con3270_write(struct console *co, const char *str, unsigned int count)
spin_unlock_irqrestore(&cp->view.lock,flags);
}
-extern struct tty_driver *tty3270_driver;
-
static struct tty_driver *
con3270_device(struct console *c, int *index)
{
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index 4f2f81b16cfa..2edd5fb6d3dc 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -21,6 +21,7 @@
#include <asm/ccwdev.h>
#include <asm/cio.h>
#include <asm/ebcdic.h>
+#include <asm/diag.h>
#include "raw3270.h"
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index fa62e6944057..25629b92dec3 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -93,6 +93,7 @@ static volatile enum sclp_mask_state_t {
#define SCLP_RETRY_INTERVAL 30
static void sclp_process_queue(void);
+static void __sclp_make_read_req(void);
static int sclp_init_mask(int calculate);
static int sclp_init(void);
@@ -115,7 +116,6 @@ sclp_service_call(sclp_cmdw_t command, void *sccb)
return 0;
}
-static inline void __sclp_make_read_req(void);
static void
__sclp_queue_read_req(void)
@@ -318,8 +318,7 @@ sclp_read_cb(struct sclp_req *req, void *data)
}
/* Prepare read event data request. Called while sclp_lock is locked. */
-static inline void
-__sclp_make_read_req(void)
+static void __sclp_make_read_req(void)
{
struct sccb_header *sccb;
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c
index 80e7a537e7d2..5b47e9cce75f 100644
--- a/drivers/s390/char/tape_34xx.c
+++ b/drivers/s390/char/tape_34xx.c
@@ -1134,21 +1134,18 @@ tape_34xx_bread(struct tape_device *device, struct request *req)
{
struct tape_request *request;
struct ccw1 *ccw;
- int count = 0, i;
+ int count = 0;
unsigned off;
char *dst;
struct bio_vec *bv;
- struct bio *bio;
+ struct req_iterator iter;
struct tape_34xx_block_id * start_block;
DBF_EVENT(6, "xBREDid:");
/* Count the number of blocks for the request. */
- rq_for_each_bio(bio, req) {
- bio_for_each_segment(bv, bio, i) {
- count += bv->bv_len >> (TAPEBLOCK_HSEC_S2B + 9);
- }
- }
+ rq_for_each_segment(bv, req, iter)
+ count += bv->bv_len >> (TAPEBLOCK_HSEC_S2B + 9);
/* Allocate the ccw request. */
request = tape_alloc_request(3+count+1, 8);
@@ -1175,18 +1172,15 @@ tape_34xx_bread(struct tape_device *device, struct request *req)
ccw = tape_ccw_cc(ccw, NOP, 0, NULL);
ccw = tape_ccw_cc(ccw, NOP, 0, NULL);
- rq_for_each_bio(bio, req) {
- bio_for_each_segment(bv, bio, i) {
- dst = kmap(bv->bv_page) + bv->bv_offset;
- for (off = 0; off < bv->bv_len;
- off += TAPEBLOCK_HSEC_SIZE) {
- ccw->flags = CCW_FLAG_CC;
- ccw->cmd_code = READ_FORWARD;
- ccw->count = TAPEBLOCK_HSEC_SIZE;
- set_normalized_cda(ccw, (void*) __pa(dst));
- ccw++;
- dst += TAPEBLOCK_HSEC_SIZE;
- }
+ rq_for_each_segment(bv, req, iter) {
+ dst = kmap(bv->bv_page) + bv->bv_offset;
+ for (off = 0; off < bv->bv_len; off += TAPEBLOCK_HSEC_SIZE) {
+ ccw->flags = CCW_FLAG_CC;
+ ccw->cmd_code = READ_FORWARD;
+ ccw->count = TAPEBLOCK_HSEC_SIZE;
+ set_normalized_cda(ccw, (void*) __pa(dst));
+ ccw++;
+ dst += TAPEBLOCK_HSEC_SIZE;
}
}
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index 7e2b2ab49264..da25f8e24152 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -623,21 +623,19 @@ tape_3590_bread(struct tape_device *device, struct request *req)
{
struct tape_request *request;
struct ccw1 *ccw;
- int count = 0, start_block, i;
+ int count = 0, start_block;
unsigned off;
char *dst;
struct bio_vec *bv;
- struct bio *bio;
+ struct req_iterator iter;
DBF_EVENT(6, "xBREDid:");
start_block = req->sector >> TAPEBLOCK_HSEC_S2B;
DBF_EVENT(6, "start_block = %i\n", start_block);
- rq_for_each_bio(bio, req) {
- bio_for_each_segment(bv, bio, i) {
- count += bv->bv_len >> (TAPEBLOCK_HSEC_S2B + 9);
- }
- }
+ rq_for_each_segment(bv, req, iter)
+ count += bv->bv_len >> (TAPEBLOCK_HSEC_S2B + 9);
+
request = tape_alloc_request(2 + count + 1, 4);
if (IS_ERR(request))
return request;
@@ -653,21 +651,18 @@ tape_3590_bread(struct tape_device *device, struct request *req)
*/
ccw = tape_ccw_cc(ccw, NOP, 0, NULL);
- rq_for_each_bio(bio, req) {
- bio_for_each_segment(bv, bio, i) {
- dst = page_address(bv->bv_page) + bv->bv_offset;
- for (off = 0; off < bv->bv_len;
- off += TAPEBLOCK_HSEC_SIZE) {
- ccw->flags = CCW_FLAG_CC;
- ccw->cmd_code = READ_FORWARD;
- ccw->count = TAPEBLOCK_HSEC_SIZE;
- set_normalized_cda(ccw, (void *) __pa(dst));
- ccw++;
- dst += TAPEBLOCK_HSEC_SIZE;
- }
- if (off > bv->bv_len)
- BUG();
+ rq_for_each_segment(bv, req, iter) {
+ dst = page_address(bv->bv_page) + bv->bv_offset;
+ for (off = 0; off < bv->bv_len; off += TAPEBLOCK_HSEC_SIZE) {
+ ccw->flags = CCW_FLAG_CC;
+ ccw->cmd_code = READ_FORWARD;
+ ccw->count = TAPEBLOCK_HSEC_SIZE;
+ set_normalized_cda(ccw, (void *) __pa(dst));
+ ccw++;
+ dst += TAPEBLOCK_HSEC_SIZE;
}
+ if (off > bv->bv_len)
+ BUG();
}
ccw = tape_ccw_end(ccw, NOP, 0, NULL);
DBF_EVENT(6, "xBREDccwg\n");
@@ -713,16 +708,22 @@ static void tape_3590_med_state_set(struct tape_device *device,
c_info = &TAPE_3590_CRYPT_INFO(device);
- if (sense->masst == MSENSE_UNASSOCIATED) {
+ DBF_EVENT(6, "medium state: %x:%x\n", sense->macst, sense->masst);
+ switch (sense->macst) {
+ case 0x04:
+ case 0x05:
+ case 0x06:
tape_med_state_set(device, MS_UNLOADED);
TAPE_3590_CRYPT_INFO(device).medium_status = 0;
return;
- }
- if (sense->masst != MSENSE_ASSOCIATED_MOUNT) {
- PRINT_ERR("Unknown medium state: %x\n", sense->masst);
+ case 0x08:
+ case 0x09:
+ tape_med_state_set(device, MS_LOADED);
+ break;
+ default:
+ tape_med_state_set(device, MS_UNKNOWN);
return;
}
- tape_med_state_set(device, MS_LOADED);
c_info->medium_status |= TAPE390_MEDIUM_LOADED_MASK;
if (sense->flags & MSENSE_CRYPT_MASK) {
PRINT_INFO("Medium is encrypted (%04x)\n", sense->flags);
@@ -840,15 +841,17 @@ tape_3590_unsolicited_irq(struct tape_device *device, struct irb *irb)
/* Probably result of halt ssch */
return TAPE_IO_PENDING;
else if (irb->scsw.dstat == 0x85)
- /* Device Ready -> check medium state */
- tape_3590_schedule_work(device, TO_MSEN);
- else if (irb->scsw.dstat & DEV_STAT_ATTENTION)
+ /* Device Ready */
+ DBF_EVENT(3, "unsol.irq! tape ready: %08x\n", device->cdev_id);
+ else if (irb->scsw.dstat & DEV_STAT_ATTENTION) {
tape_3590_schedule_work(device, TO_READ_ATTMSG);
- else {
+ } else {
DBF_EVENT(3, "unsol.irq! dev end: %08x\n", device->cdev_id);
PRINT_WARN("Unsolicited IRQ (Device End) caught.\n");
tape_dump_sense(device, NULL, irb);
}
+ /* check medium state */
+ tape_3590_schedule_work(device, TO_MSEN);
return TAPE_IO_SUCCESS;
}
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
index bc33068b9ce2..70b1980a08b6 100644
--- a/drivers/s390/char/tty3270.c
+++ b/drivers/s390/char/tty3270.c
@@ -25,8 +25,8 @@
#include <asm/ebcdic.h>
#include <asm/uaccess.h>
-
#include "raw3270.h"
+#include "tty3270.h"
#include "keyboard.h"
#define TTY3270_CHAR_BUF_SIZE 256
@@ -1338,8 +1338,11 @@ tty3270_getpar(struct tty3270 *tp, int ix)
static void
tty3270_goto_xy(struct tty3270 *tp, int cx, int cy)
{
- tp->cx = min_t(int, tp->view.cols - 1, max_t(int, 0, cx));
- cy = min_t(int, tp->view.rows - 3, max_t(int, 0, cy));
+ int max_cx = max(0, cx);
+ int max_cy = max(0, cy);
+
+ tp->cx = min_t(int, tp->view.cols - 1, max_cx);
+ cy = min_t(int, tp->view.rows - 3, max_cy);
if (cy != tp->cy) {
tty3270_convert_line(tp, tp->cy);
tp->cy = cy;
diff --git a/drivers/s390/char/tty3270.h b/drivers/s390/char/tty3270.h
new file mode 100644
index 000000000000..799da57f0390
--- /dev/null
+++ b/drivers/s390/char/tty3270.h
@@ -0,0 +1,16 @@
+/*
+ * drivers/s390/char/tty3270.h
+ *
+ * Copyright IBM Corp. 2007
+ *
+ */
+
+#ifndef __DRIVERS_S390_CHAR_TTY3270_H
+#define __DRIVERS_S390_CHAR_TTY3270_H
+
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+
+extern struct tty_driver *tty3270_driver;
+
+#endif /* __DRIVERS_S390_CHAR_TTY3270_H */
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c
index 04b19bdc09da..d70a6e65bf14 100644
--- a/drivers/s390/char/vmur.c
+++ b/drivers/s390/char/vmur.c
@@ -14,6 +14,7 @@
#include <asm/cio.h>
#include <asm/ccwdev.h>
#include <asm/debug.h>
+#include <asm/diag.h>
#include "vmur.h"
@@ -68,8 +69,26 @@ static struct ccw_driver ur_driver = {
.set_offline = ur_set_offline,
};
+static DEFINE_MUTEX(vmur_mutex);
+
/*
* Allocation, freeing, getting and putting of urdev structures
+ *
+ * Each ur device (urd) contains a reference to its corresponding ccw device
+ * (cdev) using the urd->cdev pointer. Each ccw device has a reference to the
+ * ur device using the cdev->dev.driver_data pointer.
+ *
+ * urd references:
+ * - ur_probe gets a urd reference, ur_remove drops the reference
+ * (cdev->dev.driver_data)
+ * - ur_open gets a urd reference, ur_relase drops the reference
+ * (urf->urd)
+ *
+ * cdev references:
+ * - urdev_alloc get a cdev reference (urd->cdev)
+ * - urdev_free drops the cdev reference (urd->cdev)
+ *
+ * Setting and clearing of cdev->dev.driver_data is protected by the ccwdev lock
*/
static struct urdev *urdev_alloc(struct ccw_device *cdev)
{
@@ -78,42 +97,61 @@ static struct urdev *urdev_alloc(struct ccw_device *cdev)
urd = kzalloc(sizeof(struct urdev), GFP_KERNEL);
if (!urd)
return NULL;
- urd->cdev = cdev;
urd->reclen = cdev->id.driver_info;
ccw_device_get_id(cdev, &urd->dev_id);
mutex_init(&urd->io_mutex);
mutex_init(&urd->open_mutex);
+ atomic_set(&urd->ref_count, 1);
+ urd->cdev = cdev;
+ get_device(&cdev->dev);
return urd;
}
static void urdev_free(struct urdev *urd)
{
+ TRACE("urdev_free: %p\n", urd);
+ if (urd->cdev)
+ put_device(&urd->cdev->dev);
kfree(urd);
}
-/*
- * This is how the character device driver gets a reference to a
- * ur device. When this call returns successfully, a reference has
- * been taken (by get_device) on the underlying kobject. The recipient
- * of this urdev pointer must eventually drop it with urdev_put(urd)
- * which does the corresponding put_device().
- */
+static void urdev_get(struct urdev *urd)
+{
+ atomic_inc(&urd->ref_count);
+}
+
+static struct urdev *urdev_get_from_cdev(struct ccw_device *cdev)
+{
+ struct urdev *urd;
+ unsigned long flags;
+
+ spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+ urd = cdev->dev.driver_data;
+ if (urd)
+ urdev_get(urd);
+ spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+ return urd;
+}
+
static struct urdev *urdev_get_from_devno(u16 devno)
{
char bus_id[16];
struct ccw_device *cdev;
+ struct urdev *urd;
sprintf(bus_id, "0.0.%04x", devno);
cdev = get_ccwdev_by_busid(&ur_driver, bus_id);
if (!cdev)
return NULL;
-
- return cdev->dev.driver_data;
+ urd = urdev_get_from_cdev(cdev);
+ put_device(&cdev->dev);
+ return urd;
}
static void urdev_put(struct urdev *urd)
{
- put_device(&urd->cdev->dev);
+ if (atomic_dec_and_test(&urd->ref_count))
+ urdev_free(urd);
}
/*
@@ -245,6 +283,7 @@ static void ur_int_handler(struct ccw_device *cdev, unsigned long intparm,
return;
}
urd = cdev->dev.driver_data;
+ BUG_ON(!urd);
/* On special conditions irb is an error pointer */
if (IS_ERR(irb))
urd->io_request_rc = PTR_ERR(irb);
@@ -262,9 +301,15 @@ static void ur_int_handler(struct ccw_device *cdev, unsigned long intparm,
static ssize_t ur_attr_reclen_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct urdev *urd = dev->driver_data;
+ struct urdev *urd;
+ int rc;
- return sprintf(buf, "%zu\n", urd->reclen);
+ urd = urdev_get_from_cdev(to_ccwdev(dev));
+ if (!urd)
+ return -ENODEV;
+ rc = sprintf(buf, "%zu\n", urd->reclen);
+ urdev_put(urd);
+ return rc;
}
static DEVICE_ATTR(reclen, 0444, ur_attr_reclen_show, NULL);
@@ -379,31 +424,6 @@ static ssize_t ur_write(struct file *file, const char __user *udata,
return do_write(urf->urd, udata, count, urf->dev_reclen, ppos);
}
-static int do_diag_14(unsigned long rx, unsigned long ry1,
- unsigned long subcode)
-{
- register unsigned long _ry1 asm("2") = ry1;
- register unsigned long _ry2 asm("3") = subcode;
- int rc = 0;
-
- asm volatile(
-#ifdef CONFIG_64BIT
- " sam31\n"
- " diag %2,2,0x14\n"
- " sam64\n"
-#else
- " diag %2,2,0x14\n"
-#endif
- " ipm %0\n"
- " srl %0,28\n"
- : "=d" (rc), "+d" (_ry2)
- : "d" (rx), "d" (_ry1)
- : "cc");
-
- TRACE("diag 14: subcode=0x%lx, cc=%i\n", subcode, rc);
- return rc;
-}
-
/*
* diagnose code 0x14 subcode 0x0028 - position spool file to designated
* record
@@ -415,7 +435,7 @@ static int diag_position_to_record(int devno, int record)
{
int cc;
- cc = do_diag_14(record, devno, 0x28);
+ cc = diag14(record, devno, 0x28);
switch (cc) {
case 0:
return 0;
@@ -440,7 +460,7 @@ static int diag_read_file(int devno, char *buf)
{
int cc;
- cc = do_diag_14((unsigned long) buf, devno, 0x00);
+ cc = diag14((unsigned long) buf, devno, 0x00);
switch (cc) {
case 0:
return 0;
@@ -533,7 +553,7 @@ static int diag_read_next_file_info(struct file_control_block *buf, int spid)
{
int cc;
- cc = do_diag_14((unsigned long) buf, spid, 0xfff);
+ cc = diag14((unsigned long) buf, spid, 0xfff);
switch (cc) {
case 0:
return 0;
@@ -750,64 +770,63 @@ static struct file_operations ur_fops = {
/*
* ccw_device infrastructure:
- * ur_probe gets its own ref to the device (i.e. get_device),
- * creates the struct urdev, the device attributes, sets up
- * the interrupt handler and validates the virtual unit record device.
- * ur_remove removes the device attributes, frees the struct urdev
- * and drops (put_device) the ref to the device we got in ur_probe.
+ * ur_probe creates the struct urdev (with refcount = 1), the device
+ * attributes, sets up the interrupt handler and validates the virtual
+ * unit record device.
+ * ur_remove removes the device attributes and drops the reference to
+ * struct urdev.
+ *
+ * ur_probe, ur_remove, ur_set_online and ur_set_offline are serialized
+ * by the vmur_mutex lock.
+ *
+ * urd->char_device is used as indication that the online function has
+ * been completed successfully.
*/
static int ur_probe(struct ccw_device *cdev)
{
struct urdev *urd;
int rc;
- TRACE("ur_probe: cdev=%p state=%d\n", cdev, *(int *) cdev->private);
-
- if (!get_device(&cdev->dev))
- return -ENODEV;
+ TRACE("ur_probe: cdev=%p\n", cdev);
+ mutex_lock(&vmur_mutex);
urd = urdev_alloc(cdev);
if (!urd) {
rc = -ENOMEM;
- goto fail;
+ goto fail_unlock;
}
+
rc = ur_create_attributes(&cdev->dev);
if (rc) {
rc = -ENOMEM;
- goto fail;
+ goto fail_urdev_put;
}
- cdev->dev.driver_data = urd;
cdev->handler = ur_int_handler;
/* validate virtual unit record device */
urd->class = get_urd_class(urd);
if (urd->class < 0) {
rc = urd->class;
- goto fail;
+ goto fail_remove_attr;
}
if ((urd->class != DEV_CLASS_UR_I) && (urd->class != DEV_CLASS_UR_O)) {
rc = -ENOTSUPP;
- goto fail;
+ goto fail_remove_attr;
}
+ spin_lock_irq(get_ccwdev_lock(cdev));
+ cdev->dev.driver_data = urd;
+ spin_unlock_irq(get_ccwdev_lock(cdev));
+ mutex_unlock(&vmur_mutex);
return 0;
-fail:
- urdev_free(urd);
- put_device(&cdev->dev);
- return rc;
-}
-
-static void ur_remove(struct ccw_device *cdev)
-{
- struct urdev *urd = cdev->dev.driver_data;
-
- TRACE("ur_remove\n");
- if (cdev->online)
- ur_set_offline(cdev);
+fail_remove_attr:
ur_remove_attributes(&cdev->dev);
- urdev_free(urd);
- put_device(&cdev->dev);
+fail_urdev_put:
+ urdev_put(urd);
+fail_unlock:
+ mutex_unlock(&vmur_mutex);
+ return rc;
}
static int ur_set_online(struct ccw_device *cdev)
@@ -816,20 +835,29 @@ static int ur_set_online(struct ccw_device *cdev)
int minor, major, rc;
char node_id[16];
- TRACE("ur_set_online: cdev=%p state=%d\n", cdev,
- *(int *) cdev->private);
+ TRACE("ur_set_online: cdev=%p\n", cdev);
- if (!try_module_get(ur_driver.owner))
- return -EINVAL;
+ mutex_lock(&vmur_mutex);
+ urd = urdev_get_from_cdev(cdev);
+ if (!urd) {
+ /* ur_remove already deleted our urd */
+ rc = -ENODEV;
+ goto fail_unlock;
+ }
+
+ if (urd->char_device) {
+ /* Another ur_set_online was faster */
+ rc = -EBUSY;
+ goto fail_urdev_put;
+ }
- urd = (struct urdev *) cdev->dev.driver_data;
minor = urd->dev_id.devno;
major = MAJOR(ur_first_dev_maj_min);
urd->char_device = cdev_alloc();
if (!urd->char_device) {
rc = -ENOMEM;
- goto fail_module_put;
+ goto fail_urdev_put;
}
cdev_init(urd->char_device, &ur_fops);
@@ -858,29 +886,79 @@ static int ur_set_online(struct ccw_device *cdev)
TRACE("ur_set_online: device_create rc=%d\n", rc);
goto fail_free_cdev;
}
-
+ urdev_put(urd);
+ mutex_unlock(&vmur_mutex);
return 0;
fail_free_cdev:
cdev_del(urd->char_device);
-fail_module_put:
- module_put(ur_driver.owner);
-
+ urd->char_device = NULL;
+fail_urdev_put:
+ urdev_put(urd);
+fail_unlock:
+ mutex_unlock(&vmur_mutex);
return rc;
}
-static int ur_set_offline(struct ccw_device *cdev)
+static int ur_set_offline_force(struct ccw_device *cdev, int force)
{
struct urdev *urd;
+ int rc;
- TRACE("ur_set_offline: cdev=%p cdev->private=%p state=%d\n",
- cdev, cdev->private, *(int *) cdev->private);
- urd = (struct urdev *) cdev->dev.driver_data;
+ TRACE("ur_set_offline: cdev=%p\n", cdev);
+ urd = urdev_get_from_cdev(cdev);
+ if (!urd)
+ /* ur_remove already deleted our urd */
+ return -ENODEV;
+ if (!urd->char_device) {
+ /* Another ur_set_offline was faster */
+ rc = -EBUSY;
+ goto fail_urdev_put;
+ }
+ if (!force && (atomic_read(&urd->ref_count) > 2)) {
+ /* There is still a user of urd (e.g. ur_open) */
+ TRACE("ur_set_offline: BUSY\n");
+ rc = -EBUSY;
+ goto fail_urdev_put;
+ }
device_destroy(vmur_class, urd->char_device->dev);
cdev_del(urd->char_device);
- module_put(ur_driver.owner);
+ urd->char_device = NULL;
+ rc = 0;
- return 0;
+fail_urdev_put:
+ urdev_put(urd);
+ return rc;
+}
+
+static int ur_set_offline(struct ccw_device *cdev)
+{
+ int rc;
+
+ mutex_lock(&vmur_mutex);
+ rc = ur_set_offline_force(cdev, 0);
+ mutex_unlock(&vmur_mutex);
+ return rc;
+}
+
+static void ur_remove(struct ccw_device *cdev)
+{
+ unsigned long flags;
+
+ TRACE("ur_remove\n");
+
+ mutex_lock(&vmur_mutex);
+
+ if (cdev->online)
+ ur_set_offline_force(cdev, 1);
+ ur_remove_attributes(&cdev->dev);
+
+ spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+ urdev_put(cdev->dev.driver_data);
+ cdev->dev.driver_data = NULL;
+ spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+
+ mutex_unlock(&vmur_mutex);
}
/*
diff --git a/drivers/s390/char/vmur.h b/drivers/s390/char/vmur.h
index 2b3c564e0472..fa959644735a 100644
--- a/drivers/s390/char/vmur.h
+++ b/drivers/s390/char/vmur.h
@@ -70,6 +70,7 @@ struct urdev {
size_t reclen; /* Record length for *write* CCWs */
int class; /* VM device class */
int io_request_rc; /* return code from I/O request */
+ atomic_t ref_count; /* reference counter */
};
/*
diff --git a/drivers/s390/char/vmwatchdog.c b/drivers/s390/char/vmwatchdog.c
index 680b9b58b80e..6f40facb1c4d 100644
--- a/drivers/s390/char/vmwatchdog.c
+++ b/drivers/s390/char/vmwatchdog.c
@@ -66,8 +66,8 @@ static int __diag288(enum vmwdt_func func, unsigned int timeout,
"0: la %0,0\n"
"1:\n"
EX_TABLE(0b,1b)
- : "=d" (err) : "d"(__func), "d"(__timeout),
- "d"(__cmdp), "d"(__cmdl), "0" (-EINVAL) : "1", "cc");
+ : "+d" (err) : "d"(__func), "d"(__timeout),
+ "d"(__cmdp), "d"(__cmdl) : "1", "cc");
return err;
}
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index 3712ede16723..7073daf77981 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -141,15 +141,16 @@ static int memcpy_real(void *dest, unsigned long src, size_t count)
if (count == 0)
return 0;
- flags = __raw_local_irq_stnsm(0xf8); /* switch to real mode */
+ flags = __raw_local_irq_stnsm(0xf8UL); /* switch to real mode */
asm volatile (
"0: mvcle %1,%2,0x0\n"
"1: jo 0b\n"
" lhi %0,0x0\n"
"2:\n"
EX_TABLE(1b,2b)
- : "+d" (rc)
- : "d" (_dest), "d" (_src), "d" (_len1), "d" (_len2)
+ : "+d" (rc), "+d" (_dest), "+d" (_src), "+d" (_len1),
+ "+d" (_len2), "=m" (*((long*)dest))
+ : "m" (*((long*)src))
: "cc", "memory");
__raw_local_irq_ssm(flags);
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index b0a18f5176aa..5baa517c3b66 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -44,8 +44,7 @@ ccwgroup_bus_match (struct device * dev, struct device_driver * drv)
return 0;
}
static int
-ccwgroup_uevent (struct device *dev, char **envp, int num_envp, char *buffer,
- int buffer_size)
+ccwgroup_uevent (struct device *dev, struct kobj_uevent_env *env)
{
/* TODO */
return 0;
@@ -152,16 +151,24 @@ __ccwgroup_create_symlinks(struct ccwgroup_device *gdev)
return 0;
}
-/*
- * try to add a new ccwgroup device for one driver
- * argc and argv[] are a list of bus_id's of devices
- * belonging to the driver.
+/**
+ * ccwgroup_create() - create and register a ccw group device
+ * @root: parent device for the new device
+ * @creator_id: identifier of creating driver
+ * @cdrv: ccw driver of slave devices
+ * @argc: number of slave devices
+ * @argv: bus ids of slave devices
+ *
+ * Create and register a new ccw group device as a child of @root. Slave
+ * devices are obtained from the list of bus ids given in @argv[] and must all
+ * belong to @cdrv.
+ * Returns:
+ * %0 on success and an error code on failure.
+ * Context:
+ * non-atomic
*/
-int
-ccwgroup_create(struct device *root,
- unsigned int creator_id,
- struct ccw_driver *cdrv,
- int argc, char *argv[])
+int ccwgroup_create(struct device *root, unsigned int creator_id,
+ struct ccw_driver *cdrv, int argc, char *argv[])
{
struct ccwgroup_device *gdev;
int i;
@@ -390,8 +397,13 @@ static struct bus_type ccwgroup_bus_type = {
.remove = ccwgroup_remove,
};
-int
-ccwgroup_driver_register (struct ccwgroup_driver *cdriver)
+/**
+ * ccwgroup_driver_register() - register a ccw group driver
+ * @cdriver: driver to be registered
+ *
+ * This function is mainly a wrapper around driver_register().
+ */
+int ccwgroup_driver_register(struct ccwgroup_driver *cdriver)
{
/* register our new driver with the core */
cdriver->driver.bus = &ccwgroup_bus_type;
@@ -406,8 +418,13 @@ __ccwgroup_match_all(struct device *dev, void *data)
return 1;
}
-void
-ccwgroup_driver_unregister (struct ccwgroup_driver *cdriver)
+/**
+ * ccwgroup_driver_unregister() - deregister a ccw group driver
+ * @cdriver: driver to be deregistered
+ *
+ * This function is mainly a wrapper around driver_unregister().
+ */
+void ccwgroup_driver_unregister(struct ccwgroup_driver *cdriver)
{
struct device *dev;
@@ -427,8 +444,16 @@ ccwgroup_driver_unregister (struct ccwgroup_driver *cdriver)
driver_unregister(&cdriver->driver);
}
-int
-ccwgroup_probe_ccwdev(struct ccw_device *cdev)
+/**
+ * ccwgroup_probe_ccwdev() - probe function for slave devices
+ * @cdev: ccw device to be probed
+ *
+ * This is a dummy probe function for ccw devices that are slave devices in
+ * a ccw group device.
+ * Returns:
+ * always %0
+ */
+int ccwgroup_probe_ccwdev(struct ccw_device *cdev)
{
return 0;
}
@@ -452,8 +477,15 @@ __ccwgroup_get_gdev_by_cdev(struct ccw_device *cdev)
return NULL;
}
-void
-ccwgroup_remove_ccwdev(struct ccw_device *cdev)
+/**
+ * ccwgroup_remove_ccwdev() - remove function for slave devices
+ * @cdev: ccw device to be removed
+ *
+ * This is a remove function for ccw devices that are slave devices in a ccw
+ * group device. It sets the ccw device offline and also deregisters the
+ * embedding ccw group device.
+ */
+void ccwgroup_remove_ccwdev(struct ccw_device *cdev)
{
struct ccwgroup_device *gdev;
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index 920dd71e6434..42c1f4659adb 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -14,7 +14,7 @@
#include <linux/jiffies.h>
#include <linux/wait.h>
#include <linux/mutex.h>
-#include <asm/errno.h>
+#include <linux/errno.h>
#include <asm/chpid.h>
#include <asm/sclp.h>
@@ -55,7 +55,7 @@ static wait_queue_head_t cfg_wait_queue;
/* Return channel_path struct for given chpid. */
static inline struct channel_path *chpid_to_chp(struct chp_id chpid)
{
- return css[chpid.cssid]->chps[chpid.id];
+ return channel_subsystems[chpid.cssid]->chps[chpid.id];
}
/* Set vary state for given chpid. */
@@ -86,7 +86,7 @@ u8 chp_get_sch_opm(struct subchannel *sch)
opm = 0;
chp_id_init(&chpid);
- for (i=0; i < 8; i++) {
+ for (i = 0; i < 8; i++) {
opm <<= 1;
chpid.id = sch->schib.pmcw.chpid[i];
if (chp_get_status(chpid) != 0)
@@ -118,7 +118,7 @@ static int s390_vary_chpid(struct chp_id chpid, int on)
sprintf(dbf_text, on?"varyon%x.%02x":"varyoff%x.%02x", chpid.cssid,
chpid.id);
- CIO_TRACE_EVENT( 2, dbf_text);
+ CIO_TRACE_EVENT(2, dbf_text);
status = chp_get_status(chpid);
if (!on && !status) {
@@ -140,9 +140,11 @@ static ssize_t chp_measurement_chars_read(struct kobject *kobj,
char *buf, loff_t off, size_t count)
{
struct channel_path *chp;
+ struct device *device;
unsigned int size;
- chp = to_channelpath(container_of(kobj, struct device, kobj));
+ device = container_of(kobj, struct device, kobj);
+ chp = to_channelpath(device);
if (!chp->cmg_chars)
return 0;
@@ -193,9 +195,11 @@ static ssize_t chp_measurement_read(struct kobject *kobj,
{
struct channel_path *chp;
struct channel_subsystem *css;
+ struct device *device;
unsigned int size;
- chp = to_channelpath(container_of(kobj, struct device, kobj));
+ device = container_of(kobj, struct device, kobj);
+ chp = to_channelpath(device);
css = to_css(chp->dev.parent);
size = sizeof(struct cmg_entry);
@@ -353,7 +357,7 @@ static ssize_t chp_shared_show(struct device *dev,
static DEVICE_ATTR(shared, 0444, chp_shared_show, NULL);
-static struct attribute * chp_attrs[] = {
+static struct attribute *chp_attrs[] = {
&dev_attr_status.attr,
&dev_attr_configure.attr,
&dev_attr_type.attr,
@@ -395,7 +399,7 @@ int chp_new(struct chp_id chpid)
/* fill in status, etc. */
chp->chpid = chpid;
chp->state = 1;
- chp->dev.parent = &css[chpid.cssid]->device;
+ chp->dev.parent = &channel_subsystems[chpid.cssid]->device;
chp->dev.release = chp_release;
snprintf(chp->dev.bus_id, BUS_ID_SIZE, "chp%x.%02x", chpid.cssid,
chpid.id);
@@ -430,18 +434,18 @@ int chp_new(struct chp_id chpid)
device_unregister(&chp->dev);
goto out_free;
}
- mutex_lock(&css[chpid.cssid]->mutex);
- if (css[chpid.cssid]->cm_enabled) {
+ mutex_lock(&channel_subsystems[chpid.cssid]->mutex);
+ if (channel_subsystems[chpid.cssid]->cm_enabled) {
ret = chp_add_cmg_attr(chp);
if (ret) {
sysfs_remove_group(&chp->dev.kobj, &chp_attr_group);
device_unregister(&chp->dev);
- mutex_unlock(&css[chpid.cssid]->mutex);
+ mutex_unlock(&channel_subsystems[chpid.cssid]->mutex);
goto out_free;
}
}
- css[chpid.cssid]->chps[chpid.id] = chp;
- mutex_unlock(&css[chpid.cssid]->mutex);
+ channel_subsystems[chpid.cssid]->chps[chpid.id] = chp;
+ mutex_unlock(&channel_subsystems[chpid.cssid]->mutex);
return ret;
out_free:
kfree(chp);
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index f2708d65be5a..46905345159e 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -619,6 +619,11 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
sch->schib.pmcw.ena = 0;
if ((sch->lpm & (sch->lpm - 1)) != 0)
sch->schib.pmcw.mp = 1; /* multipath mode */
+ /* clean up possible residual cmf stuff */
+ sch->schib.pmcw.mme = 0;
+ sch->schib.pmcw.mbfc = 0;
+ sch->schib.pmcw.mbi = 0;
+ sch->schib.mba = 0;
return 0;
out:
if (!cio_is_console(schid))
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
index 02fd00b55e1b..b960f66843e4 100644
--- a/drivers/s390/cio/cmf.c
+++ b/drivers/s390/cio/cmf.c
@@ -45,7 +45,8 @@
#include "ioasm.h"
#include "chsc.h"
-/* parameter to enable cmf during boot, possible uses are:
+/*
+ * parameter to enable cmf during boot, possible uses are:
* "s390cmf" -- enable cmf and allocate 2 MB of ram so measuring can be
* used on any subchannel
* "s390cmf=<num>" -- enable cmf and allocate enough memory to measure
@@ -73,18 +74,20 @@ enum cmb_index {
* enum cmb_format - types of supported measurement block formats
*
* @CMF_BASIC: traditional channel measurement blocks supported
- * by all machines that we run on
+ * by all machines that we run on
* @CMF_EXTENDED: improved format that was introduced with the z990
- * machine
- * @CMF_AUTODETECT: default: use extended format when running on a z990
- * or later machine, otherwise fall back to basic format
- **/
+ * machine
+ * @CMF_AUTODETECT: default: use extended format when running on a machine
+ * supporting extended format, otherwise fall back to
+ * basic format
+ */
enum cmb_format {
CMF_BASIC,
CMF_EXTENDED,
CMF_AUTODETECT = -1,
};
-/**
+
+/*
* format - actual format for all measurement blocks
*
* The format module parameter can be set to a value of 0 (zero)
@@ -105,20 +108,21 @@ module_param(format, bool, 0444);
* either with the help of a special pool or with kmalloc
* @free: free memory allocated with @alloc
* @set: enable or disable measurement
+ * @read: read a measurement entry at an index
* @readall: read a measurement block in a common format
* @reset: clear the data in the associated measurement block and
* reset its time stamp
* @align: align an allocated block so that the hardware can use it
*/
struct cmb_operations {
- int (*alloc) (struct ccw_device*);
- void(*free) (struct ccw_device*);
- int (*set) (struct ccw_device*, u32);
- u64 (*read) (struct ccw_device*, int);
- int (*readall)(struct ccw_device*, struct cmbdata *);
- void (*reset) (struct ccw_device*);
- void * (*align) (void *);
-
+ int (*alloc) (struct ccw_device *);
+ void (*free) (struct ccw_device *);
+ int (*set) (struct ccw_device *, u32);
+ u64 (*read) (struct ccw_device *, int);
+ int (*readall)(struct ccw_device *, struct cmbdata *);
+ void (*reset) (struct ccw_device *);
+ void *(*align) (void *);
+/* private: */
struct attribute_group *attr_group;
};
static struct cmb_operations *cmbops;
@@ -130,9 +134,11 @@ struct cmb_data {
unsigned long long last_update; /* when last_block was updated */
};
-/* our user interface is designed in terms of nanoseconds,
+/*
+ * Our user interface is designed in terms of nanoseconds,
* while the hardware measures total times in its own
- * unit.*/
+ * unit.
+ */
static inline u64 time_to_nsec(u32 value)
{
return ((u64)value) * 128000ull;
@@ -159,12 +165,13 @@ static inline u64 time_to_avg_nsec(u32 value, u32 count)
return ret;
}
-/* activate or deactivate the channel monitor. When area is NULL,
+/*
+ * Activate or deactivate the channel monitor. When area is NULL,
* the monitor is deactivated. The channel monitor needs to
* be active in order to measure subchannels, which also need
- * to be enabled. */
-static inline void
-cmf_activate(void *area, unsigned int onoff)
+ * to be enabled.
+ */
+static inline void cmf_activate(void *area, unsigned int onoff)
{
register void * __gpr2 asm("2");
register long __gpr1 asm("1");
@@ -175,8 +182,8 @@ cmf_activate(void *area, unsigned int onoff)
asm("schm" : : "d" (__gpr2), "d" (__gpr1) );
}
-static int
-set_schib(struct ccw_device *cdev, u32 mme, int mbfc, unsigned long address)
+static int set_schib(struct ccw_device *cdev, u32 mme, int mbfc,
+ unsigned long address)
{
int ret;
int retry;
@@ -466,6 +473,7 @@ static void cmf_generic_reset(struct ccw_device *cdev)
*
* @mem: pointer to CMBs (only in basic measurement mode)
* @list: contains a linked list of all subchannels
+ * @num_channels: number of channels to be measured
* @lock: protect concurrent access to @mem and @list
*/
struct cmb_area {
@@ -481,28 +489,36 @@ static struct cmb_area cmb_area = {
.num_channels = 1024,
};
-
/* ****** old style CMB handling ********/
-/** int maxchannels
- *
+/*
* Basic channel measurement blocks are allocated in one contiguous
* block of memory, which can not be moved as long as any channel
* is active. Therefore, a maximum number of subchannels needs to
* be defined somewhere. This is a module parameter, defaulting to
* a resonable value of 1024, or 32 kb of memory.
* Current kernels don't allow kmalloc with more than 128kb, so the
- * maximum is 4096
+ * maximum is 4096.
*/
module_param_named(maxchannels, cmb_area.num_channels, uint, 0444);
/**
* struct cmb - basic channel measurement block
+ * @ssch_rsch_count: number of ssch and rsch
+ * @sample_count: number of samples
+ * @device_connect_time: time of device connect
+ * @function_pending_time: time of function pending
+ * @device_disconnect_time: time of device disconnect
+ * @control_unit_queuing_time: time of control unit queuing
+ * @device_active_only_time: time of device active only
+ * @reserved: unused in basic measurement mode
*
- * cmb as used by the hardware the fields are described in z/Architecture
- * Principles of Operation, chapter 17.
- * The area to be a contiguous array and may not be reallocated or freed.
+ * The measurement block as used by the hardware. The fields are described
+ * further in z/Architecture Principles of Operation, chapter 17.
+ *
+ * The cmb area made up from these blocks must be a contiguous array and may
+ * not be reallocated or freed.
* Only one cmb area can be present in the system.
*/
struct cmb {
@@ -516,8 +532,9 @@ struct cmb {
u32 reserved[2];
};
-/* insert a single device into the cmb_area list
- * called with cmb_area.lock held from alloc_cmb
+/*
+ * Insert a single device into the cmb_area list.
+ * Called with cmb_area.lock held from alloc_cmb.
*/
static int alloc_cmb_single(struct ccw_device *cdev,
struct cmb_data *cmb_data)
@@ -532,9 +549,11 @@ static int alloc_cmb_single(struct ccw_device *cdev,
goto out;
}
- /* find first unused cmb in cmb_area.mem.
- * this is a little tricky: cmb_area.list
- * remains sorted by ->cmb->hw_data pointers */
+ /*
+ * Find first unused cmb in cmb_area.mem.
+ * This is a little tricky: cmb_area.list
+ * remains sorted by ->cmb->hw_data pointers.
+ */
cmb = cmb_area.mem;
list_for_each_entry(node, &cmb_area.list, cmb_list) {
struct cmb_data *data;
@@ -558,8 +577,7 @@ out:
return ret;
}
-static int
-alloc_cmb (struct ccw_device *cdev)
+static int alloc_cmb(struct ccw_device *cdev)
{
int ret;
struct cmb *mem;
@@ -594,6 +612,9 @@ alloc_cmb (struct ccw_device *cdev)
free_pages((unsigned long)mem, get_order(size));
} else if (!mem) {
/* no luck */
+ printk(KERN_WARNING "cio: failed to allocate area "
+ "for measuring %d subchannels\n",
+ cmb_area.num_channels);
ret = -ENOMEM;
goto out;
} else {
@@ -667,7 +688,7 @@ static int set_cmb(struct ccw_device *cdev, u32 mme)
return set_schib_wait(cdev, mme, 0, offset);
}
-static u64 read_cmb (struct ccw_device *cdev, int index)
+static u64 read_cmb(struct ccw_device *cdev, int index)
{
struct cmb *cmb;
u32 val;
@@ -717,7 +738,7 @@ out:
return ret;
}
-static int readall_cmb (struct ccw_device *cdev, struct cmbdata *data)
+static int readall_cmb(struct ccw_device *cdev, struct cmbdata *data)
{
struct cmb *cmb;
struct cmb_data *cmb_data;
@@ -790,14 +811,25 @@ static struct cmb_operations cmbops_basic = {
.align = align_cmb,
.attr_group = &cmf_attr_group,
};
-
+
/* ******** extended cmb handling ********/
/**
* struct cmbe - extended channel measurement block
+ * @ssch_rsch_count: number of ssch and rsch
+ * @sample_count: number of samples
+ * @device_connect_time: time of device connect
+ * @function_pending_time: time of function pending
+ * @device_disconnect_time: time of device disconnect
+ * @control_unit_queuing_time: time of control unit queuing
+ * @device_active_only_time: time of device active only
+ * @device_busy_time: time of device busy
+ * @initial_command_response_time: initial command response time
+ * @reserved: unused
*
- * cmb as used by the hardware, may be in any 64 bit physical location,
- * the fields are described in z/Architecture Principles of Operation,
+ * The measurement block as used by the hardware. May be in any 64 bit physical
+ * location.
+ * The fields are described further in z/Architecture Principles of Operation,
* third edition, chapter 17.
*/
struct cmbe {
@@ -813,10 +845,12 @@ struct cmbe {
u32 reserved[7];
};
-/* kmalloc only guarantees 8 byte alignment, but we need cmbe
+/*
+ * kmalloc only guarantees 8 byte alignment, but we need cmbe
* pointers to be naturally aligned. Make sure to allocate
- * enough space for two cmbes */
-static inline struct cmbe* cmbe_align(struct cmbe *c)
+ * enough space for two cmbes.
+ */
+static inline struct cmbe *cmbe_align(struct cmbe *c)
{
unsigned long addr;
addr = ((unsigned long)c + sizeof (struct cmbe) - sizeof(long)) &
@@ -824,7 +858,7 @@ static inline struct cmbe* cmbe_align(struct cmbe *c)
return (struct cmbe*)addr;
}
-static int alloc_cmbe (struct ccw_device *cdev)
+static int alloc_cmbe(struct ccw_device *cdev)
{
struct cmbe *cmbe;
struct cmb_data *cmb_data;
@@ -870,7 +904,7 @@ out_free:
return ret;
}
-static void free_cmbe (struct ccw_device *cdev)
+static void free_cmbe(struct ccw_device *cdev)
{
struct cmb_data *cmb_data;
@@ -909,7 +943,7 @@ static int set_cmbe(struct ccw_device *cdev, u32 mme)
}
-static u64 read_cmbe (struct ccw_device *cdev, int index)
+static u64 read_cmbe(struct ccw_device *cdev, int index)
{
struct cmbe *cmb;
struct cmb_data *cmb_data;
@@ -967,7 +1001,7 @@ out:
return ret;
}
-static int readall_cmbe (struct ccw_device *cdev, struct cmbdata *data)
+static int readall_cmbe(struct ccw_device *cdev, struct cmbdata *data)
{
struct cmbe *cmb;
struct cmb_data *cmb_data;
@@ -1044,17 +1078,16 @@ static struct cmb_operations cmbops_extended = {
.align = align_cmbe,
.attr_group = &cmf_attr_group_ext,
};
-
-static ssize_t
-cmb_show_attr(struct device *dev, char *buf, enum cmb_index idx)
+static ssize_t cmb_show_attr(struct device *dev, char *buf, enum cmb_index idx)
{
return sprintf(buf, "%lld\n",
(unsigned long long) cmf_read(to_ccwdev(dev), idx));
}
-static ssize_t
-cmb_show_avg_sample_interval(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t cmb_show_avg_sample_interval(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
struct ccw_device *cdev;
long interval;
@@ -1076,8 +1109,9 @@ cmb_show_avg_sample_interval(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%ld\n", interval);
}
-static ssize_t
-cmb_show_avg_utilization(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t cmb_show_avg_utilization(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
struct cmbdata data;
u64 utilization;
@@ -1109,14 +1143,16 @@ cmb_show_avg_utilization(struct device *dev, struct device_attribute *attr, char
}
#define cmf_attr(name) \
-static ssize_t show_ ## name (struct device * dev, struct device_attribute *attr, char * buf) \
-{ return cmb_show_attr((dev), buf, cmb_ ## name); } \
-static DEVICE_ATTR(name, 0444, show_ ## name, NULL);
+static ssize_t show_##name(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ return cmb_show_attr((dev), buf, cmb_##name); } \
+static DEVICE_ATTR(name, 0444, show_##name, NULL);
#define cmf_attr_avg(name) \
-static ssize_t show_avg_ ## name (struct device * dev, struct device_attribute *attr, char * buf) \
-{ return cmb_show_attr((dev), buf, cmb_ ## name); } \
-static DEVICE_ATTR(avg_ ## name, 0444, show_avg_ ## name, NULL);
+static ssize_t show_avg_##name(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ return cmb_show_attr((dev), buf, cmb_##name); } \
+static DEVICE_ATTR(avg_##name, 0444, show_avg_##name, NULL);
cmf_attr(ssch_rsch_count);
cmf_attr(sample_count);
@@ -1128,7 +1164,8 @@ cmf_attr_avg(device_active_only_time);
cmf_attr_avg(device_busy_time);
cmf_attr_avg(initial_command_response_time);
-static DEVICE_ATTR(avg_sample_interval, 0444, cmb_show_avg_sample_interval, NULL);
+static DEVICE_ATTR(avg_sample_interval, 0444, cmb_show_avg_sample_interval,
+ NULL);
static DEVICE_ATTR(avg_utilization, 0444, cmb_show_avg_utilization, NULL);
static struct attribute *cmf_attributes[] = {
@@ -1169,12 +1206,16 @@ static struct attribute_group cmf_attr_group_ext = {
.attrs = cmf_attributes_ext,
};
-static ssize_t cmb_enable_show(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t cmb_enable_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
return sprintf(buf, "%d\n", to_ccwdev(dev)->private->cmb ? 1 : 0);
}
-static ssize_t cmb_enable_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t c)
+static ssize_t cmb_enable_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t c)
{
struct ccw_device *cdev;
int ret;
@@ -1199,9 +1240,16 @@ static ssize_t cmb_enable_store(struct device *dev, struct device_attribute *att
DEVICE_ATTR(cmb_enable, 0644, cmb_enable_show, cmb_enable_store);
-/* enable_cmf/disable_cmf: module interface for cmf (de)activation */
-int
-enable_cmf(struct ccw_device *cdev)
+/**
+ * enable_cmf() - switch on the channel measurement for a specific device
+ * @cdev: The ccw device to be enabled
+ *
+ * Returns %0 for success or a negative error value.
+ *
+ * Context:
+ * non-atomic
+ */
+int enable_cmf(struct ccw_device *cdev)
{
int ret;
@@ -1222,8 +1270,16 @@ enable_cmf(struct ccw_device *cdev)
return ret;
}
-int
-disable_cmf(struct ccw_device *cdev)
+/**
+ * disable_cmf() - switch off the channel measurement for a specific device
+ * @cdev: The ccw device to be disabled
+ *
+ * Returns %0 for success or a negative error value.
+ *
+ * Context:
+ * non-atomic
+ */
+int disable_cmf(struct ccw_device *cdev)
{
int ret;
@@ -1235,14 +1291,32 @@ disable_cmf(struct ccw_device *cdev)
return ret;
}
-u64
-cmf_read(struct ccw_device *cdev, int index)
+/**
+ * cmf_read() - read one value from the current channel measurement block
+ * @cdev: the channel to be read
+ * @index: the index of the value to be read
+ *
+ * Returns the value read or %0 if the value cannot be read.
+ *
+ * Context:
+ * any
+ */
+u64 cmf_read(struct ccw_device *cdev, int index)
{
return cmbops->read(cdev, index);
}
-int
-cmf_readall(struct ccw_device *cdev, struct cmbdata *data)
+/**
+ * cmf_readall() - read the current channel measurement block
+ * @cdev: the channel to be read
+ * @data: a pointer to a data block that will be filled
+ *
+ * Returns %0 on success, a negative error value otherwise.
+ *
+ * Context:
+ * any
+ */
+int cmf_readall(struct ccw_device *cdev, struct cmbdata *data)
{
return cmbops->readall(cdev, data);
}
@@ -1254,15 +1328,16 @@ int cmf_reenable(struct ccw_device *cdev)
return cmbops->set(cdev, 2);
}
-static int __init
-init_cmf(void)
+static int __init init_cmf(void)
{
char *format_string;
char *detect_string = "parameter";
- /* We cannot really autoprobe this. If the user did not give a parameter,
- see if we are running on z990 or up, otherwise fall back to basic mode. */
-
+ /*
+ * If the user did not give a parameter, see if we are running on a
+ * machine supporting extended measurement blocks, otherwise fall back
+ * to basic mode.
+ */
if (format == CMF_AUTODETECT) {
if (!css_characteristics_avail ||
!css_general_characteristics.ext_mb) {
@@ -1279,16 +1354,9 @@ init_cmf(void)
case CMF_BASIC:
format_string = "basic";
cmbops = &cmbops_basic;
- if (cmb_area.num_channels > 4096 || cmb_area.num_channels < 1) {
- printk(KERN_ERR "cio: Basic channel measurement "
- "facility can only use 1 to 4096 devices\n"
- KERN_ERR "when the cmf driver is built"
- " as a loadable module\n");
- return 1;
- }
break;
case CMF_EXTENDED:
- format_string = "extended";
+ format_string = "extended";
cmbops = &cmbops_extended;
break;
default:
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 5635e656c1a3..5d83dd471461 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -13,6 +13,7 @@
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/list.h>
+#include <linux/reboot.h>
#include "css.h"
#include "cio.h"
@@ -27,7 +28,7 @@ int css_init_done = 0;
static int need_reprobe = 0;
static int max_ssid = 0;
-struct channel_subsystem *css[__MAX_CSSID + 1];
+struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1];
int css_characteristics_avail = 0;
@@ -177,7 +178,7 @@ static int css_register_subchannel(struct subchannel *sch)
int ret;
/* Initialize the subchannel structure */
- sch->dev.parent = &css[0]->device;
+ sch->dev.parent = &channel_subsystems[0]->device;
sch->dev.bus = &css_bus_type;
sch->dev.release = &css_subchannel_release;
sch->dev.groups = subch_attr_groups;
@@ -606,30 +607,55 @@ static int __init setup_css(int nr)
{
u32 tod_high;
int ret;
+ struct channel_subsystem *css;
- memset(css[nr], 0, sizeof(struct channel_subsystem));
- css[nr]->pseudo_subchannel =
- kzalloc(sizeof(*css[nr]->pseudo_subchannel), GFP_KERNEL);
- if (!css[nr]->pseudo_subchannel)
+ css = channel_subsystems[nr];
+ memset(css, 0, sizeof(struct channel_subsystem));
+ css->pseudo_subchannel =
+ kzalloc(sizeof(*css->pseudo_subchannel), GFP_KERNEL);
+ if (!css->pseudo_subchannel)
return -ENOMEM;
- css[nr]->pseudo_subchannel->dev.parent = &css[nr]->device;
- css[nr]->pseudo_subchannel->dev.release = css_subchannel_release;
- sprintf(css[nr]->pseudo_subchannel->dev.bus_id, "defunct");
- ret = cio_create_sch_lock(css[nr]->pseudo_subchannel);
+ css->pseudo_subchannel->dev.parent = &css->device;
+ css->pseudo_subchannel->dev.release = css_subchannel_release;
+ sprintf(css->pseudo_subchannel->dev.bus_id, "defunct");
+ ret = cio_create_sch_lock(css->pseudo_subchannel);
if (ret) {
- kfree(css[nr]->pseudo_subchannel);
+ kfree(css->pseudo_subchannel);
return ret;
}
- mutex_init(&css[nr]->mutex);
- css[nr]->valid = 1;
- css[nr]->cssid = nr;
- sprintf(css[nr]->device.bus_id, "css%x", nr);
- css[nr]->device.release = channel_subsystem_release;
+ mutex_init(&css->mutex);
+ css->valid = 1;
+ css->cssid = nr;
+ sprintf(css->device.bus_id, "css%x", nr);
+ css->device.release = channel_subsystem_release;
tod_high = (u32) (get_clock() >> 32);
- css_generate_pgid(css[nr], tod_high);
+ css_generate_pgid(css, tod_high);
return 0;
}
+static int css_reboot_event(struct notifier_block *this,
+ unsigned long event,
+ void *ptr)
+{
+ int ret, i;
+
+ ret = NOTIFY_DONE;
+ for (i = 0; i <= __MAX_CSSID; i++) {
+ struct channel_subsystem *css;
+
+ css = channel_subsystems[i];
+ if (css->cm_enabled)
+ if (chsc_secm(css, 0))
+ ret = NOTIFY_BAD;
+ }
+
+ return ret;
+}
+
+static struct notifier_block css_reboot_notifier = {
+ .notifier_call = css_reboot_event,
+};
+
/*
* Now that the driver core is running, we can setup our channel subsystem.
* The struct subchannel's are created during probing (except for the
@@ -670,51 +696,63 @@ init_channel_subsystem (void)
}
/* Setup css structure. */
for (i = 0; i <= __MAX_CSSID; i++) {
- css[i] = kmalloc(sizeof(struct channel_subsystem), GFP_KERNEL);
- if (!css[i]) {
+ struct channel_subsystem *css;
+
+ css = kmalloc(sizeof(struct channel_subsystem), GFP_KERNEL);
+ if (!css) {
ret = -ENOMEM;
goto out_unregister;
}
+ channel_subsystems[i] = css;
ret = setup_css(i);
if (ret)
goto out_free;
- ret = device_register(&css[i]->device);
+ ret = device_register(&css->device);
if (ret)
goto out_free_all;
if (css_characteristics_avail &&
css_chsc_characteristics.secm) {
- ret = device_create_file(&css[i]->device,
+ ret = device_create_file(&css->device,
&dev_attr_cm_enable);
if (ret)
goto out_device;
}
- ret = device_register(&css[i]->pseudo_subchannel->dev);
+ ret = device_register(&css->pseudo_subchannel->dev);
if (ret)
goto out_file;
}
+ ret = register_reboot_notifier(&css_reboot_notifier);
+ if (ret)
+ goto out_pseudo;
css_init_done = 1;
ctl_set_bit(6, 28);
for_each_subchannel(__init_channel_subsystem, NULL);
return 0;
+out_pseudo:
+ device_unregister(&channel_subsystems[i]->pseudo_subchannel->dev);
out_file:
- device_remove_file(&css[i]->device, &dev_attr_cm_enable);
+ device_remove_file(&channel_subsystems[i]->device,
+ &dev_attr_cm_enable);
out_device:
- device_unregister(&css[i]->device);
+ device_unregister(&channel_subsystems[i]->device);
out_free_all:
- kfree(css[i]->pseudo_subchannel->lock);
- kfree(css[i]->pseudo_subchannel);
+ kfree(channel_subsystems[i]->pseudo_subchannel->lock);
+ kfree(channel_subsystems[i]->pseudo_subchannel);
out_free:
- kfree(css[i]);
+ kfree(channel_subsystems[i]);
out_unregister:
while (i > 0) {
+ struct channel_subsystem *css;
+
i--;
- device_unregister(&css[i]->pseudo_subchannel->dev);
+ css = channel_subsystems[i];
+ device_unregister(&css->pseudo_subchannel->dev);
if (css_characteristics_avail && css_chsc_characteristics.secm)
- device_remove_file(&css[i]->device,
+ device_remove_file(&css->device,
&dev_attr_cm_enable);
- device_unregister(&css[i]->device);
+ device_unregister(&css->device);
}
out_bus:
bus_unregister(&css_bus_type);
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index 5d65e83ca66e..81215ef32435 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -167,7 +167,7 @@ struct channel_subsystem {
#define to_css(dev) container_of(dev, struct channel_subsystem, device)
extern struct bus_type css_bus_type;
-extern struct channel_subsystem *css[];
+extern struct channel_subsystem *channel_subsystems[];
/* Some helper functions for disconnected state. */
int device_is_disconnected(struct subchannel *);
@@ -191,6 +191,5 @@ int sch_is_pseudo_sch(struct subchannel *);
extern struct workqueue_struct *slow_path_wq;
-int subchannel_add_files (struct device *);
extern struct attribute_group *subch_attr_groups[];
#endif
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 297659fa0e26..7ee57f084a89 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -21,6 +21,7 @@
#include <asm/ccwdev.h>
#include <asm/cio.h>
#include <asm/param.h> /* HZ */
+#include <asm/cmb.h>
#include "cio.h"
#include "cio_debug.h"
@@ -78,45 +79,37 @@ static int snprint_alias(char *buf, size_t size,
/* Set up environment variables for ccw device uevent. Return 0 on success,
* non-zero otherwise. */
-static int ccw_uevent(struct device *dev, char **envp, int num_envp,
- char *buffer, int buffer_size)
+static int ccw_uevent(struct device *dev, struct kobj_uevent_env *env)
{
struct ccw_device *cdev = to_ccwdev(dev);
struct ccw_device_id *id = &(cdev->id);
- int i = 0;
- int len = 0;
int ret;
char modalias_buf[30];
/* CU_TYPE= */
- ret = add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &len,
- "CU_TYPE=%04X", id->cu_type);
+ ret = add_uevent_var(env, "CU_TYPE=%04X", id->cu_type);
if (ret)
return ret;
/* CU_MODEL= */
- ret = add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &len,
- "CU_MODEL=%02X", id->cu_model);
+ ret = add_uevent_var(env, "CU_MODEL=%02X", id->cu_model);
if (ret)
return ret;
/* The next two can be zero, that's ok for us */
/* DEV_TYPE= */
- ret = add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &len,
- "DEV_TYPE=%04X", id->dev_type);
+ ret = add_uevent_var(env, "DEV_TYPE=%04X", id->dev_type);
if (ret)
return ret;
/* DEV_MODEL= */
- ret = add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &len,
- "DEV_MODEL=%02X", id->dev_model);
+ ret = add_uevent_var(env, "DEV_MODEL=%02X", id->dev_model);
if (ret)
return ret;
/* MODALIAS= */
snprint_alias(modalias_buf, sizeof(modalias_buf), id, "");
- ret = add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &len,
- "MODALIAS=%s", modalias_buf);
+ ret = add_uevent_var(env, "MODALIAS=%s", modalias_buf);
return ret;
}
@@ -354,8 +347,18 @@ ccw_device_remove_disconnected(struct ccw_device *cdev)
cdev->private->dev_id.devno);
}
-int
-ccw_device_set_offline(struct ccw_device *cdev)
+/**
+ * ccw_device_set_offline() - disable a ccw device for I/O
+ * @cdev: target ccw device
+ *
+ * This function calls the driver's set_offline() function for @cdev, if
+ * given, and then disables @cdev.
+ * Returns:
+ * %0 on success and a negative error value on failure.
+ * Context:
+ * enabled, ccw device lock not held
+ */
+int ccw_device_set_offline(struct ccw_device *cdev)
{
int ret;
@@ -393,8 +396,19 @@ ccw_device_set_offline(struct ccw_device *cdev)
return ret;
}
-int
-ccw_device_set_online(struct ccw_device *cdev)
+/**
+ * ccw_device_set_online() - enable a ccw device for I/O
+ * @cdev: target ccw device
+ *
+ * This function first enables @cdev and then calls the driver's set_online()
+ * function for @cdev, if given. If set_online() returns an error, @cdev is
+ * disabled again.
+ * Returns:
+ * %0 on success and a negative error value on failure.
+ * Context:
+ * enabled, ccw device lock not held
+ */
+int ccw_device_set_online(struct ccw_device *cdev)
{
int ret;
@@ -944,8 +958,7 @@ out:
wake_up(&ccw_device_init_wq);
}
-void
-ccw_device_call_sch_unregister(struct work_struct *work)
+static void ccw_device_call_sch_unregister(struct work_struct *work)
{
struct ccw_device_private *priv;
struct ccw_device *cdev;
@@ -1098,6 +1111,7 @@ io_subchannel_probe (struct subchannel *sch)
* device, e.g. the console.
*/
cdev = sch->dev.driver_data;
+ cdev->dev.groups = ccwdev_attr_groups;
device_initialize(&cdev->dev);
ccw_device_register(cdev);
/*
@@ -1323,8 +1337,19 @@ __ccwdev_check_busid(struct device *dev, void *id)
}
-struct ccw_device *
-get_ccwdev_by_busid(struct ccw_driver *cdrv, const char *bus_id)
+/**
+ * get_ccwdev_by_busid() - obtain device from a bus id
+ * @cdrv: driver the device is owned by
+ * @bus_id: bus id of the device to be searched
+ *
+ * This function searches all devices owned by @cdrv for a device with a bus
+ * id matching @bus_id.
+ * Returns:
+ * If a match is found, its reference count of the found device is increased
+ * and it is returned; else %NULL is returned.
+ */
+struct ccw_device *get_ccwdev_by_busid(struct ccw_driver *cdrv,
+ const char *bus_id)
{
struct device *dev;
struct device_driver *drv;
@@ -1398,16 +1423,34 @@ ccw_device_remove (struct device *dev)
return 0;
}
+static void ccw_device_shutdown(struct device *dev)
+{
+ struct ccw_device *cdev;
+
+ cdev = to_ccwdev(dev);
+ if (cdev->drv && cdev->drv->shutdown)
+ cdev->drv->shutdown(cdev);
+ disable_cmf(cdev);
+}
+
struct bus_type ccw_bus_type = {
.name = "ccw",
.match = ccw_bus_match,
.uevent = ccw_uevent,
.probe = ccw_device_probe,
.remove = ccw_device_remove,
+ .shutdown = ccw_device_shutdown,
};
-int
-ccw_driver_register (struct ccw_driver *cdriver)
+/**
+ * ccw_driver_register() - register a ccw driver
+ * @cdriver: driver to be registered
+ *
+ * This function is mainly a wrapper around driver_register().
+ * Returns:
+ * %0 on success and a negative error value on failure.
+ */
+int ccw_driver_register(struct ccw_driver *cdriver)
{
struct device_driver *drv = &cdriver->driver;
@@ -1417,8 +1460,13 @@ ccw_driver_register (struct ccw_driver *cdriver)
return driver_register(drv);
}
-void
-ccw_driver_unregister (struct ccw_driver *cdriver)
+/**
+ * ccw_driver_unregister() - deregister a ccw driver
+ * @cdriver: driver to be deregistered
+ *
+ * This function is mainly a wrapper around driver_unregister().
+ */
+void ccw_driver_unregister(struct ccw_driver *cdriver)
{
driver_unregister(&cdriver->driver);
}
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index b66338b76579..0d4089600439 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -80,7 +80,6 @@ void io_subchannel_recog_done(struct ccw_device *cdev);
int ccw_device_cancel_halt_clear(struct ccw_device *);
void ccw_device_do_unreg_rereg(struct work_struct *);
-void ccw_device_call_sch_unregister(struct work_struct *);
void ccw_device_move_to_orphanage(struct work_struct *);
int ccw_device_is_orphan(struct ccw_device *);
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 8633dc537695..8867443b8060 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -446,7 +446,8 @@ static void __ccw_device_get_common_pgid(struct ccw_device *cdev)
if (cdev->private->pgid[last].inf.ps.state1 ==
SNID_STATE1_RESET)
/* No previous pgid found */
- memcpy(&cdev->private->pgid[0], &css[0]->global_pgid,
+ memcpy(&cdev->private->pgid[0],
+ &channel_subsystems[0]->global_pgid,
sizeof(struct pgid));
else
/* Use existing pgid */
@@ -543,51 +544,6 @@ ccw_device_recog_timeout(struct ccw_device *cdev, enum dev_event dev_event)
}
-static void
-ccw_device_nopath_notify(struct work_struct *work)
-{
- struct ccw_device_private *priv;
- struct ccw_device *cdev;
- struct subchannel *sch;
- int ret;
- unsigned long flags;
-
- priv = container_of(work, struct ccw_device_private, kick_work);
- cdev = priv->cdev;
- spin_lock_irqsave(cdev->ccwlock, flags);
- sch = to_subchannel(cdev->dev.parent);
- /* Extra sanity. */
- if (sch->lpm)
- goto out_unlock;
- if (sch->driver && sch->driver->notify) {
- spin_unlock_irqrestore(cdev->ccwlock, flags);
- ret = sch->driver->notify(&sch->dev, CIO_NO_PATH);
- spin_lock_irqsave(cdev->ccwlock, flags);
- } else
- ret = 0;
- if (!ret) {
- if (get_device(&sch->dev)) {
- /* Driver doesn't want to keep device. */
- cio_disable_subchannel(sch);
- if (get_device(&cdev->dev)) {
- PREPARE_WORK(&cdev->private->kick_work,
- ccw_device_call_sch_unregister);
- queue_work(ccw_device_work,
- &cdev->private->kick_work);
- } else
- put_device(&sch->dev);
- }
- } else {
- cio_disable_subchannel(sch);
- ccw_device_set_timeout(cdev, 0);
- cdev->private->flags.fake_irb = 0;
- cdev->private->state = DEV_STATE_DISCONNECTED;
- wake_up(&cdev->private->wait_q);
- }
-out_unlock:
- spin_unlock_irqrestore(cdev->ccwlock, flags);
-}
-
void
ccw_device_verify_done(struct ccw_device *cdev, int err)
{
@@ -631,12 +587,9 @@ ccw_device_verify_done(struct ccw_device *cdev, int err)
default:
/* Reset oper notify indication after verify error. */
cdev->private->flags.donotify = 0;
- if (cdev->online) {
- PREPARE_WORK(&cdev->private->kick_work,
- ccw_device_nopath_notify);
- queue_work(ccw_device_notify_work,
- &cdev->private->kick_work);
- } else
+ if (cdev->online)
+ dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
+ else
ccw_device_done(cdev, DEV_STATE_NOT_OPER);
break;
}
@@ -690,11 +643,7 @@ ccw_device_disband_done(struct ccw_device *cdev, int err)
break;
default:
cdev->private->flags.donotify = 0;
- if (get_device(&cdev->dev)) {
- PREPARE_WORK(&cdev->private->kick_work,
- ccw_device_call_sch_unregister);
- queue_work(ccw_device_work, &cdev->private->kick_work);
- }
+ dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
ccw_device_done(cdev, DEV_STATE_NOT_OPER);
break;
}
@@ -765,59 +714,16 @@ ccw_device_recog_notoper(struct ccw_device *cdev, enum dev_event dev_event)
}
/*
- * Handle not operational event while offline.
+ * Handle not operational event in non-special state.
*/
-static void
-ccw_device_offline_notoper(struct ccw_device *cdev, enum dev_event dev_event)
+static void ccw_device_generic_notoper(struct ccw_device *cdev,
+ enum dev_event dev_event)
{
struct subchannel *sch;
cdev->private->state = DEV_STATE_NOT_OPER;
sch = to_subchannel(cdev->dev.parent);
- if (get_device(&cdev->dev)) {
- PREPARE_WORK(&cdev->private->kick_work,
- ccw_device_call_sch_unregister);
- queue_work(ccw_device_work, &cdev->private->kick_work);
- }
- wake_up(&cdev->private->wait_q);
-}
-
-/*
- * Handle not operational event while online.
- */
-static void
-ccw_device_online_notoper(struct ccw_device *cdev, enum dev_event dev_event)
-{
- struct subchannel *sch;
- int ret;
-
- sch = to_subchannel(cdev->dev.parent);
- if (sch->driver->notify) {
- spin_unlock_irq(cdev->ccwlock);
- ret = sch->driver->notify(&sch->dev,
- sch->lpm ? CIO_GONE : CIO_NO_PATH);
- spin_lock_irq(cdev->ccwlock);
- } else
- ret = 0;
- if (ret) {
- ccw_device_set_timeout(cdev, 0);
- cdev->private->flags.fake_irb = 0;
- cdev->private->state = DEV_STATE_DISCONNECTED;
- wake_up(&cdev->private->wait_q);
- return;
- }
- cdev->private->state = DEV_STATE_NOT_OPER;
- cio_disable_subchannel(sch);
- if (sch->schib.scsw.actl != 0) {
- // FIXME: not-oper indication to device driver ?
- ccw_device_call_handler(cdev);
- }
- if (get_device(&cdev->dev)) {
- PREPARE_WORK(&cdev->private->kick_work,
- ccw_device_call_sch_unregister);
- queue_work(ccw_device_work, &cdev->private->kick_work);
- }
- wake_up(&cdev->private->wait_q);
+ css_schedule_eval(sch->schid);
}
/*
@@ -915,18 +821,9 @@ ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event)
cdev->private->state = DEV_STATE_TIMEOUT_KILL;
return;
}
- if (ret == -ENODEV) {
- struct subchannel *sch;
-
- sch = to_subchannel(cdev->dev.parent);
- if (!sch->lpm) {
- PREPARE_WORK(&cdev->private->kick_work,
- ccw_device_nopath_notify);
- queue_work(ccw_device_notify_work,
- &cdev->private->kick_work);
- } else
- dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
- } else if (cdev->handler)
+ if (ret == -ENODEV)
+ dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
+ else if (cdev->handler)
cdev->handler(cdev, cdev->private->intparm,
ERR_PTR(-ETIMEDOUT));
}
@@ -1233,7 +1130,7 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
[DEV_EVENT_VERIFY] = ccw_device_nop,
},
[DEV_STATE_SENSE_PGID] = {
- [DEV_EVENT_NOTOPER] = ccw_device_online_notoper,
+ [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
[DEV_EVENT_INTERRUPT] = ccw_device_sense_pgid_irq,
[DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout,
[DEV_EVENT_VERIFY] = ccw_device_nop,
@@ -1245,50 +1142,50 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
[DEV_EVENT_VERIFY] = ccw_device_nop,
},
[DEV_STATE_OFFLINE] = {
- [DEV_EVENT_NOTOPER] = ccw_device_offline_notoper,
+ [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
[DEV_EVENT_INTERRUPT] = ccw_device_offline_irq,
[DEV_EVENT_TIMEOUT] = ccw_device_nop,
[DEV_EVENT_VERIFY] = ccw_device_nop,
},
[DEV_STATE_VERIFY] = {
- [DEV_EVENT_NOTOPER] = ccw_device_online_notoper,
+ [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
[DEV_EVENT_INTERRUPT] = ccw_device_verify_irq,
[DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout,
[DEV_EVENT_VERIFY] = ccw_device_delay_verify,
},
[DEV_STATE_ONLINE] = {
- [DEV_EVENT_NOTOPER] = ccw_device_online_notoper,
+ [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
[DEV_EVENT_INTERRUPT] = ccw_device_irq,
[DEV_EVENT_TIMEOUT] = ccw_device_online_timeout,
[DEV_EVENT_VERIFY] = ccw_device_online_verify,
},
[DEV_STATE_W4SENSE] = {
- [DEV_EVENT_NOTOPER] = ccw_device_online_notoper,
+ [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
[DEV_EVENT_INTERRUPT] = ccw_device_w4sense,
[DEV_EVENT_TIMEOUT] = ccw_device_nop,
[DEV_EVENT_VERIFY] = ccw_device_online_verify,
},
[DEV_STATE_DISBAND_PGID] = {
- [DEV_EVENT_NOTOPER] = ccw_device_online_notoper,
+ [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
[DEV_EVENT_INTERRUPT] = ccw_device_disband_irq,
[DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout,
[DEV_EVENT_VERIFY] = ccw_device_nop,
},
[DEV_STATE_BOXED] = {
- [DEV_EVENT_NOTOPER] = ccw_device_offline_notoper,
+ [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
[DEV_EVENT_INTERRUPT] = ccw_device_stlck_done,
[DEV_EVENT_TIMEOUT] = ccw_device_stlck_done,
[DEV_EVENT_VERIFY] = ccw_device_nop,
},
/* states to wait for i/o completion before doing something */
[DEV_STATE_CLEAR_VERIFY] = {
- [DEV_EVENT_NOTOPER] = ccw_device_online_notoper,
+ [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
[DEV_EVENT_INTERRUPT] = ccw_device_clear_verify,
[DEV_EVENT_TIMEOUT] = ccw_device_nop,
[DEV_EVENT_VERIFY] = ccw_device_nop,
},
[DEV_STATE_TIMEOUT_KILL] = {
- [DEV_EVENT_NOTOPER] = ccw_device_online_notoper,
+ [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
[DEV_EVENT_INTERRUPT] = ccw_device_killing_irq,
[DEV_EVENT_TIMEOUT] = ccw_device_killing_timeout,
[DEV_EVENT_VERIFY] = ccw_device_nop, //FIXME
diff --git a/drivers/s390/cio/device_id.c b/drivers/s390/cio/device_id.c
index 60b9347f7c92..f232832f2b22 100644
--- a/drivers/s390/cio/device_id.c
+++ b/drivers/s390/cio/device_id.c
@@ -17,6 +17,7 @@
#include <asm/delay.h>
#include <asm/cio.h>
#include <asm/lowcore.h>
+#include <asm/diag.h>
#include "cio.h"
#include "cio_debug.h"
@@ -25,51 +26,6 @@
#include "ioasm.h"
/*
- * diag210 is used under VM to get information about a virtual device
- */
-int
-diag210(struct diag210 * addr)
-{
- /*
- * diag 210 needs its data below the 2GB border, so we
- * use a static data area to be sure
- */
- static struct diag210 diag210_tmp;
- static DEFINE_SPINLOCK(diag210_lock);
- unsigned long flags;
- int ccode;
-
- spin_lock_irqsave(&diag210_lock, flags);
- diag210_tmp = *addr;
-
-#ifdef CONFIG_64BIT
- asm volatile(
- " lhi %0,-1\n"
- " sam31\n"
- " diag %1,0,0x210\n"
- "0: ipm %0\n"
- " srl %0,28\n"
- "1: sam64\n"
- EX_TABLE(0b,1b)
- : "=&d" (ccode) : "a" (&diag210_tmp) : "cc", "memory");
-#else
- asm volatile(
- " lhi %0,-1\n"
- " diag %1,0,0x210\n"
- "0: ipm %0\n"
- " srl %0,28\n"
- "1:\n"
- EX_TABLE(0b,1b)
- : "=&d" (ccode) : "a" (&diag210_tmp) : "cc", "memory");
-#endif
-
- *addr = diag210_tmp;
- spin_unlock_irqrestore(&diag210_lock, flags);
-
- return ccode;
-}
-
-/*
* Input :
* devno - device number
* ps - pointer to sense ID data area
@@ -349,5 +305,3 @@ ccw_device_sense_id_irq(struct ccw_device *cdev, enum dev_event dev_event)
break;
}
}
-
-EXPORT_SYMBOL(diag210);
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index 14eba854b155..7fd2dadc3297 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -25,6 +25,16 @@
#include "device.h"
#include "chp.h"
+/**
+ * ccw_device_set_options_mask() - set some options and unset the rest
+ * @cdev: device for which the options are to be set
+ * @flags: options to be set
+ *
+ * All flags specified in @flags are set, all flags not specified in @flags
+ * are cleared.
+ * Returns:
+ * %0 on success, -%EINVAL on an invalid flag combination.
+ */
int ccw_device_set_options_mask(struct ccw_device *cdev, unsigned long flags)
{
/*
@@ -40,6 +50,15 @@ int ccw_device_set_options_mask(struct ccw_device *cdev, unsigned long flags)
return 0;
}
+/**
+ * ccw_device_set_options() - set some options
+ * @cdev: device for which the options are to be set
+ * @flags: options to be set
+ *
+ * All flags specified in @flags are set, the remainder is left untouched.
+ * Returns:
+ * %0 on success, -%EINVAL if an invalid flag combination would ensue.
+ */
int ccw_device_set_options(struct ccw_device *cdev, unsigned long flags)
{
/*
@@ -59,6 +78,13 @@ int ccw_device_set_options(struct ccw_device *cdev, unsigned long flags)
return 0;
}
+/**
+ * ccw_device_clear_options() - clear some options
+ * @cdev: device for which the options are to be cleared
+ * @flags: options to be cleared
+ *
+ * All flags specified in @flags are cleared, the remainder is left untouched.
+ */
void ccw_device_clear_options(struct ccw_device *cdev, unsigned long flags)
{
cdev->private->options.fast &= (flags & CCWDEV_EARLY_NOTIFICATION) == 0;
@@ -67,8 +93,22 @@ void ccw_device_clear_options(struct ccw_device *cdev, unsigned long flags)
cdev->private->options.force &= (flags & CCWDEV_ALLOW_FORCE) == 0;
}
-int
-ccw_device_clear(struct ccw_device *cdev, unsigned long intparm)
+/**
+ * ccw_device_clear() - terminate I/O request processing
+ * @cdev: target ccw device
+ * @intparm: interruption parameter; value is only used if no I/O is
+ * outstanding, otherwise the intparm associated with the I/O request
+ * is returned
+ *
+ * ccw_device_clear() calls csch on @cdev's subchannel.
+ * Returns:
+ * %0 on success,
+ * -%ENODEV on device not operational,
+ * -%EINVAL on invalid device state.
+ * Context:
+ * Interrupts disabled, ccw device lock held
+ */
+int ccw_device_clear(struct ccw_device *cdev, unsigned long intparm)
{
struct subchannel *sch;
int ret;
@@ -89,10 +129,33 @@ ccw_device_clear(struct ccw_device *cdev, unsigned long intparm)
return ret;
}
-int
-ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
- unsigned long intparm, __u8 lpm, __u8 key,
- unsigned long flags)
+/**
+ * ccw_device_start_key() - start a s390 channel program with key
+ * @cdev: target ccw device
+ * @cpa: logical start address of channel program
+ * @intparm: user specific interruption parameter; will be presented back to
+ * @cdev's interrupt handler. Allows a device driver to associate
+ * the interrupt with a particular I/O request.
+ * @lpm: defines the channel path to be used for a specific I/O request. A
+ * value of 0 will make cio use the opm.
+ * @key: storage key to be used for the I/O
+ * @flags: additional flags; defines the action to be performed for I/O
+ * processing.
+ *
+ * Start a S/390 channel program. When the interrupt arrives, the
+ * IRQ handler is called, either immediately, delayed (dev-end missing,
+ * or sense required) or never (no IRQ handler registered).
+ * Returns:
+ * %0, if the operation was successful;
+ * -%EBUSY, if the device is busy, or status pending;
+ * -%EACCES, if no path specified in @lpm is operational;
+ * -%ENODEV, if the device is not operational.
+ * Context:
+ * Interrupts disabled, ccw device lock held
+ */
+int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
+ unsigned long intparm, __u8 lpm, __u8 key,
+ unsigned long flags)
{
struct subchannel *sch;
int ret;
@@ -135,11 +198,38 @@ ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
return ret;
}
-
-int
-ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa,
- unsigned long intparm, __u8 lpm, __u8 key,
- unsigned long flags, int expires)
+/**
+ * ccw_device_start_timeout_key() - start a s390 channel program with timeout and key
+ * @cdev: target ccw device
+ * @cpa: logical start address of channel program
+ * @intparm: user specific interruption parameter; will be presented back to
+ * @cdev's interrupt handler. Allows a device driver to associate
+ * the interrupt with a particular I/O request.
+ * @lpm: defines the channel path to be used for a specific I/O request. A
+ * value of 0 will make cio use the opm.
+ * @key: storage key to be used for the I/O
+ * @flags: additional flags; defines the action to be performed for I/O
+ * processing.
+ * @expires: timeout value in jiffies
+ *
+ * Start a S/390 channel program. When the interrupt arrives, the
+ * IRQ handler is called, either immediately, delayed (dev-end missing,
+ * or sense required) or never (no IRQ handler registered).
+ * This function notifies the device driver if the channel program has not
+ * completed during the time specified by @expires. If a timeout occurs, the
+ * channel program is terminated via xsch, hsch or csch, and the device's
+ * interrupt handler will be called with an irb containing ERR_PTR(-%ETIMEDOUT).
+ * Returns:
+ * %0, if the operation was successful;
+ * -%EBUSY, if the device is busy, or status pending;
+ * -%EACCES, if no path specified in @lpm is operational;
+ * -%ENODEV, if the device is not operational.
+ * Context:
+ * Interrupts disabled, ccw device lock held
+ */
+int ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa,
+ unsigned long intparm, __u8 lpm, __u8 key,
+ unsigned long flags, int expires)
{
int ret;
@@ -152,18 +242,67 @@ ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa,
return ret;
}
-int
-ccw_device_start(struct ccw_device *cdev, struct ccw1 *cpa,
- unsigned long intparm, __u8 lpm, unsigned long flags)
+/**
+ * ccw_device_start() - start a s390 channel program
+ * @cdev: target ccw device
+ * @cpa: logical start address of channel program
+ * @intparm: user specific interruption parameter; will be presented back to
+ * @cdev's interrupt handler. Allows a device driver to associate
+ * the interrupt with a particular I/O request.
+ * @lpm: defines the channel path to be used for a specific I/O request. A
+ * value of 0 will make cio use the opm.
+ * @flags: additional flags; defines the action to be performed for I/O
+ * processing.
+ *
+ * Start a S/390 channel program. When the interrupt arrives, the
+ * IRQ handler is called, either immediately, delayed (dev-end missing,
+ * or sense required) or never (no IRQ handler registered).
+ * Returns:
+ * %0, if the operation was successful;
+ * -%EBUSY, if the device is busy, or status pending;
+ * -%EACCES, if no path specified in @lpm is operational;
+ * -%ENODEV, if the device is not operational.
+ * Context:
+ * Interrupts disabled, ccw device lock held
+ */
+int ccw_device_start(struct ccw_device *cdev, struct ccw1 *cpa,
+ unsigned long intparm, __u8 lpm, unsigned long flags)
{
return ccw_device_start_key(cdev, cpa, intparm, lpm,
PAGE_DEFAULT_KEY, flags);
}
-int
-ccw_device_start_timeout(struct ccw_device *cdev, struct ccw1 *cpa,
- unsigned long intparm, __u8 lpm, unsigned long flags,
- int expires)
+/**
+ * ccw_device_start_timeout() - start a s390 channel program with timeout
+ * @cdev: target ccw device
+ * @cpa: logical start address of channel program
+ * @intparm: user specific interruption parameter; will be presented back to
+ * @cdev's interrupt handler. Allows a device driver to associate
+ * the interrupt with a particular I/O request.
+ * @lpm: defines the channel path to be used for a specific I/O request. A
+ * value of 0 will make cio use the opm.
+ * @flags: additional flags; defines the action to be performed for I/O
+ * processing.
+ * @expires: timeout value in jiffies
+ *
+ * Start a S/390 channel program. When the interrupt arrives, the
+ * IRQ handler is called, either immediately, delayed (dev-end missing,
+ * or sense required) or never (no IRQ handler registered).
+ * This function notifies the device driver if the channel program has not
+ * completed during the time specified by @expires. If a timeout occurs, the
+ * channel program is terminated via xsch, hsch or csch, and the device's
+ * interrupt handler will be called with an irb containing ERR_PTR(-%ETIMEDOUT).
+ * Returns:
+ * %0, if the operation was successful;
+ * -%EBUSY, if the device is busy, or status pending;
+ * -%EACCES, if no path specified in @lpm is operational;
+ * -%ENODEV, if the device is not operational.
+ * Context:
+ * Interrupts disabled, ccw device lock held
+ */
+int ccw_device_start_timeout(struct ccw_device *cdev, struct ccw1 *cpa,
+ unsigned long intparm, __u8 lpm,
+ unsigned long flags, int expires)
{
return ccw_device_start_timeout_key(cdev, cpa, intparm, lpm,
PAGE_DEFAULT_KEY, flags,
@@ -171,8 +310,23 @@ ccw_device_start_timeout(struct ccw_device *cdev, struct ccw1 *cpa,
}
-int
-ccw_device_halt(struct ccw_device *cdev, unsigned long intparm)
+/**
+ * ccw_device_halt() - halt I/O request processing
+ * @cdev: target ccw device
+ * @intparm: interruption parameter; value is only used if no I/O is
+ * outstanding, otherwise the intparm associated with the I/O request
+ * is returned
+ *
+ * ccw_device_halt() calls hsch on @cdev's subchannel.
+ * Returns:
+ * %0 on success,
+ * -%ENODEV on device not operational,
+ * -%EINVAL on invalid device state,
+ * -%EBUSY on device busy or interrupt pending.
+ * Context:
+ * Interrupts disabled, ccw device lock held
+ */
+int ccw_device_halt(struct ccw_device *cdev, unsigned long intparm)
{
struct subchannel *sch;
int ret;
@@ -193,8 +347,20 @@ ccw_device_halt(struct ccw_device *cdev, unsigned long intparm)
return ret;
}
-int
-ccw_device_resume(struct ccw_device *cdev)
+/**
+ * ccw_device_resume() - resume channel program execution
+ * @cdev: target ccw device
+ *
+ * ccw_device_resume() calls rsch on @cdev's subchannel.
+ * Returns:
+ * %0 on success,
+ * -%ENODEV on device not operational,
+ * -%EINVAL on invalid device state,
+ * -%EBUSY on device busy or interrupt pending.
+ * Context:
+ * Interrupts disabled, ccw device lock held
+ */
+int ccw_device_resume(struct ccw_device *cdev)
{
struct subchannel *sch;
@@ -260,11 +426,21 @@ ccw_device_call_handler(struct ccw_device *cdev)
return 1;
}
-/*
- * Search for CIW command in extended sense data.
+/**
+ * ccw_device_get_ciw() - Search for CIW command in extended sense data.
+ * @cdev: ccw device to inspect
+ * @ct: command type to look for
+ *
+ * During SenseID, command information words (CIWs) describing special
+ * commands available to the device may have been stored in the extended
+ * sense data. This function searches for CIWs of a specified command
+ * type in the extended sense data.
+ * Returns:
+ * %NULL if no extended sense data has been stored or if no CIW of the
+ * specified command type could be found,
+ * else a pointer to the CIW of the specified command type.
*/
-struct ciw *
-ccw_device_get_ciw(struct ccw_device *cdev, __u32 ct)
+struct ciw *ccw_device_get_ciw(struct ccw_device *cdev, __u32 ct)
{
int ciw_cnt;
@@ -276,8 +452,14 @@ ccw_device_get_ciw(struct ccw_device *cdev, __u32 ct)
return NULL;
}
-__u8
-ccw_device_get_path_mask(struct ccw_device *cdev)
+/**
+ * ccw_device_get_path_mask() - get currently available paths
+ * @cdev: ccw device to be queried
+ * Returns:
+ * %0 if no subchannel for the device is available,
+ * else the mask of currently available paths for the ccw device's subchannel.
+ */
+__u8 ccw_device_get_path_mask(struct ccw_device *cdev)
{
struct subchannel *sch;
@@ -357,8 +539,7 @@ out_unlock:
return ret;
}
-void *
-ccw_device_get_chp_desc(struct ccw_device *cdev, int chp_no)
+void *ccw_device_get_chp_desc(struct ccw_device *cdev, int chp_no)
{
struct subchannel *sch;
struct chp_id chpid;
diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c
index 03347aed2b3e..40a3208c7cf3 100644
--- a/drivers/s390/cio/qdio.c
+++ b/drivers/s390/cio/qdio.c
@@ -195,6 +195,8 @@ qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
again:
ccq = do_eqbs(irq->sch_token, state, q_no, start, cnt);
rc = qdio_check_ccq(q, ccq);
+ if ((ccq == 96) && (tmp_cnt != *cnt))
+ rc = 0;
if (rc == 1) {
QDIO_DBF_TEXT5(1,trace,"eqAGAIN");
goto again;
@@ -740,7 +742,8 @@ qdio_get_outbound_buffer_frontier(struct qdio_q *q)
first_not_to_check=f+qdio_min(atomic_read(&q->number_of_buffers_used),
(QDIO_MAX_BUFFERS_PER_Q-1));
- if ((!q->is_iqdio_q)&&(!q->hydra_gives_outbound_pcis))
+ if (((!q->is_iqdio_q) && (!q->hydra_gives_outbound_pcis)) ||
+ (q->queue_type == QDIO_IQDIO_QFMT_ASYNCH))
SYNC_MEMORY;
check_next:
@@ -1021,9 +1024,9 @@ __qdio_outbound_processing(struct qdio_q *q)
}
static void
-qdio_outbound_processing(struct qdio_q *q)
+qdio_outbound_processing(unsigned long q)
{
- __qdio_outbound_processing(q);
+ __qdio_outbound_processing((struct qdio_q *) q);
}
/************************* INBOUND ROUTINES *******************************/
@@ -1446,9 +1449,10 @@ out:
}
static void
-tiqdio_inbound_processing(struct qdio_q *q)
+tiqdio_inbound_processing(unsigned long q)
{
- __tiqdio_inbound_processing(q, atomic_read(&spare_indicator_usecount));
+ __tiqdio_inbound_processing((struct qdio_q *) q,
+ atomic_read(&spare_indicator_usecount));
}
static void
@@ -1491,9 +1495,9 @@ again:
}
static void
-qdio_inbound_processing(struct qdio_q *q)
+qdio_inbound_processing(unsigned long q)
{
- __qdio_inbound_processing(q);
+ __qdio_inbound_processing((struct qdio_q *) q);
}
/************************* MAIN ROUTINES *******************************/
@@ -1757,12 +1761,15 @@ qdio_fill_qs(struct qdio_irq *irq_ptr, struct ccw_device *cdev,
q->handler=input_handler;
q->dev_st_chg_ind=irq_ptr->dev_st_chg_ind;
- q->tasklet.data=(unsigned long)q;
/* q->is_thinint_q isn't valid at this time, but
- * irq_ptr->is_thinint_irq is */
- q->tasklet.func=(void(*)(unsigned long))
- ((irq_ptr->is_thinint_irq)?&tiqdio_inbound_processing:
- &qdio_inbound_processing);
+ * irq_ptr->is_thinint_irq is
+ */
+ if (irq_ptr->is_thinint_irq)
+ tasklet_init(&q->tasklet, tiqdio_inbound_processing,
+ (unsigned long) q);
+ else
+ tasklet_init(&q->tasklet, qdio_inbound_processing,
+ (unsigned long) q);
/* actually this is not used for inbound queues. yet. */
atomic_set(&q->busy_siga_counter,0);
@@ -1833,13 +1840,10 @@ qdio_fill_qs(struct qdio_irq *irq_ptr, struct ccw_device *cdev,
q->last_move_ftc=0;
q->handler=output_handler;
- q->tasklet.data=(unsigned long)q;
- q->tasklet.func=(void(*)(unsigned long))
- &qdio_outbound_processing;
- q->timer.function=(void(*)(unsigned long))
- &qdio_outbound_processing;
- q->timer.data = (long)q;
- init_timer(&q->timer);
+ tasklet_init(&q->tasklet, qdio_outbound_processing,
+ (unsigned long) q);
+ setup_timer(&q->timer, qdio_outbound_processing,
+ (unsigned long) q);
atomic_set(&q->busy_siga_counter,0);
q->timing.busy_start=0;
@@ -3723,7 +3727,7 @@ qdio_performance_stats_store(struct bus_type *bus, const char *buf, size_t count
#endif /* CONFIG_64BIT */
}
} else {
- QDIO_PRINT_WARN("QDIO performance_stats: write 0 or 1 to this file!\n");
+ QDIO_PRINT_ERR("QDIO performance_stats: write 0 or 1 to this file!\n");
return -EINVAL;
}
return count;
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 90bd22014513..67aaff3e668d 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -458,28 +458,22 @@ static int ap_bus_match(struct device *dev, struct device_driver *drv)
* uevent function for AP devices. It sets up a single environment
* variable DEV_TYPE which contains the hardware device type.
*/
-static int ap_uevent (struct device *dev, char **envp, int num_envp,
- char *buffer, int buffer_size)
+static int ap_uevent (struct device *dev, struct kobj_uevent_env *env)
{
struct ap_device *ap_dev = to_ap_dev(dev);
- int retval = 0, length = 0, i = 0;
+ int retval = 0;
if (!ap_dev)
return -ENODEV;
/* Set up DEV_TYPE environment variable. */
- retval = add_uevent_var(envp, num_envp, &i,
- buffer, buffer_size, &length,
- "DEV_TYPE=%04X", ap_dev->device_type);
+ retval = add_uevent_var(env, "DEV_TYPE=%04X", ap_dev->device_type);
if (retval)
return retval;
/* Add MODALIAS= */
- retval = add_uevent_var(envp, num_envp, &i,
- buffer, buffer_size, &length,
- "MODALIAS=ap:t%02X", ap_dev->device_type);
+ retval = add_uevent_var(env, "MODALIAS=ap:t%02X", ap_dev->device_type);
- envp[i] = NULL;
return retval;
}
@@ -1231,8 +1225,9 @@ static void ap_reset_domain(void)
{
int i;
- for (i = 0; i < AP_DEVICES; i++)
- ap_reset_queue(AP_MKQID(i, ap_domain_index));
+ if (ap_domain_index != -1)
+ for (i = 0; i < AP_DEVICES; i++)
+ ap_reset_queue(AP_MKQID(i, ap_domain_index));
}
static void ap_reset_all(void)
diff --git a/drivers/s390/crypto/zcrypt_mono.c b/drivers/s390/crypto/zcrypt_mono.c
index 2a9349ad68b7..44253fdd4136 100644
--- a/drivers/s390/crypto/zcrypt_mono.c
+++ b/drivers/s390/crypto/zcrypt_mono.c
@@ -45,7 +45,7 @@
/**
* The module initialization code.
*/
-int __init zcrypt_init(void)
+static int __init zcrypt_init(void)
{
int rc;
@@ -86,7 +86,7 @@ out:
/**
* The module termination code.
*/
-void __exit zcrypt_exit(void)
+static void __exit zcrypt_exit(void)
{
zcrypt_cex2a_exit();
zcrypt_pcixcc_exit();
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c
index 64948788d301..70b9ddc8cf9d 100644
--- a/drivers/s390/crypto/zcrypt_pcixcc.c
+++ b/drivers/s390/crypto/zcrypt_pcixcc.c
@@ -277,7 +277,7 @@ static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev,
};
struct {
struct type6_hdr hdr;
- struct ica_CPRBX cprbx;
+ struct CPRBX cprbx;
} __attribute__((packed)) *msg = ap_msg->message;
int rcblen = CEIL4(xcRB->request_control_blk_length);
@@ -432,14 +432,17 @@ static int convert_type86_ica(struct zcrypt_device *zdev,
}
if (service_rc == 8 && service_rs == 770) {
PDEBUG("Invalid key length on PCIXCC/CEX2C\n");
- zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE_OLD;
- return -EAGAIN;
+ return -EINVAL;
}
if (service_rc == 8 && service_rs == 783) {
PDEBUG("Extended bitlengths not enabled on PCIXCC/CEX2C\n");
zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE_OLD;
return -EAGAIN;
}
+ if (service_rc == 12 && service_rs == 769) {
+ PDEBUG("Invalid key on PCIXCC/CEX2C\n");
+ return -EINVAL;
+ }
PRINTK("Unknown service rc/rs (PCIXCC/CEX2C): %d/%d\n",
service_rc, service_rs);
zdev->online = 0;
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.h b/drivers/s390/crypto/zcrypt_pcixcc.h
index a78ff307fd19..8cb7d7a6973b 100644
--- a/drivers/s390/crypto/zcrypt_pcixcc.h
+++ b/drivers/s390/crypto/zcrypt_pcixcc.h
@@ -28,51 +28,6 @@
#ifndef _ZCRYPT_PCIXCC_H_
#define _ZCRYPT_PCIXCC_H_
-/**
- * CPRBX
- * Note that all shorts and ints are big-endian.
- * All pointer fields are 16 bytes long, and mean nothing.
- *
- * A request CPRB is followed by a request_parameter_block.
- *
- * The request (or reply) parameter block is organized thus:
- * function code
- * VUD block
- * key block
- */
-struct CPRBX {
- unsigned short cprb_len; /* CPRB length 220 */
- unsigned char cprb_ver_id; /* CPRB version id. 0x02 */
- unsigned char pad_000[3]; /* Alignment pad bytes */
- unsigned char func_id[2]; /* function id 0x5432 */
- unsigned char cprb_flags[4]; /* Flags */
- unsigned int req_parml; /* request parameter buffer len */
- unsigned int req_datal; /* request data buffer */
- unsigned int rpl_msgbl; /* reply message block length */
- unsigned int rpld_parml; /* replied parameter block len */
- unsigned int rpl_datal; /* reply data block len */
- unsigned int rpld_datal; /* replied data block len */
- unsigned int req_extbl; /* request extension block len */
- unsigned char pad_001[4]; /* reserved */
- unsigned int rpld_extbl; /* replied extension block len */
- unsigned char req_parmb[16]; /* request parm block 'address' */
- unsigned char req_datab[16]; /* request data block 'address' */
- unsigned char rpl_parmb[16]; /* reply parm block 'address' */
- unsigned char rpl_datab[16]; /* reply data block 'address' */
- unsigned char req_extb[16]; /* request extension block 'addr'*/
- unsigned char rpl_extb[16]; /* reply extension block 'addres'*/
- unsigned short ccp_rtcode; /* server return code */
- unsigned short ccp_rscode; /* server reason code */
- unsigned int mac_data_len; /* Mac Data Length */
- unsigned char logon_id[8]; /* Logon Identifier */
- unsigned char mac_value[8]; /* Mac Value */
- unsigned char mac_content_flgs;/* Mac content flag byte */
- unsigned char pad_002; /* Alignment */
- unsigned short domain; /* Domain */
- unsigned char pad_003[12]; /* Domain masks */
- unsigned char pad_004[36]; /* reserved */
-} __attribute__((packed));
-
int zcrypt_pcixcc_init(void);
void zcrypt_pcixcc_exit(void);
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
index 023455a0b34a..399695f7b1af 100644
--- a/drivers/s390/net/claw.c
+++ b/drivers/s390/net/claw.c
@@ -3891,7 +3891,6 @@ claw_init_netdevice(struct net_device * dev)
dev->type = ARPHRD_SLIP;
dev->tx_queue_len = 1300;
dev->flags = IFF_POINTOPOINT | IFF_NOARP;
- SET_MODULE_OWNER(dev);
#ifdef FUNCTRACE
printk(KERN_INFO "%s:%s Exit\n",dev->name,__FUNCTION__);
#endif
diff --git a/drivers/s390/net/ctcmain.c b/drivers/s390/net/ctcmain.c
index 92e8a37b5022..449937233732 100644
--- a/drivers/s390/net/ctcmain.c
+++ b/drivers/s390/net/ctcmain.c
@@ -2823,7 +2823,6 @@ ctc_init_netdevice(struct net_device * dev, int alloc_device,
dev->type = ARPHRD_SLIP;
dev->tx_queue_len = 100;
dev->flags = IFF_POINTOPOINT | IFF_NOARP;
- SET_MODULE_OWNER(dev);
return dev;
}
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index 08a994fdd1a4..0fd663b23d76 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -1400,11 +1400,14 @@ lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
PRINT_WARN("check on device %s, dstat=0x%X, cstat=0x%X \n",
cdev->dev.bus_id, dstat, cstat);
if (rc) {
- lcs_schedule_recovery(card);
- wake_up(&card->wait_q);
- return;
+ channel->state = LCS_CH_STATE_ERROR;
}
}
+ if (channel->state == LCS_CH_STATE_ERROR) {
+ lcs_schedule_recovery(card);
+ wake_up(&card->wait_q);
+ return;
+ }
/* How far in the ccw chain have we processed? */
if ((channel->state != LCS_CH_STATE_INIT) &&
(irb->scsw.fctl & SCSW_FCTL_START_FUNC)) {
@@ -1708,6 +1711,8 @@ lcs_stopcard(struct lcs_card *card)
if (card->read.state != LCS_CH_STATE_STOPPED &&
card->write.state != LCS_CH_STATE_STOPPED &&
+ card->read.state != LCS_CH_STATE_ERROR &&
+ card->write.state != LCS_CH_STATE_ERROR &&
card->state == DEV_STATE_UP) {
lcs_clear_multicast_list(card);
rc = lcs_send_stoplan(card,LCS_INITIATOR_TCPIP);
@@ -2145,7 +2150,6 @@ lcs_new_device(struct ccwgroup_device *ccwgdev)
card->dev->stop = lcs_stop_device;
card->dev->hard_start_xmit = lcs_start_xmit;
card->dev->get_stats = lcs_getstats;
- SET_MODULE_OWNER(dev);
memcpy(card->dev->dev_addr, card->mac, LCS_MAC_LENGTH);
#ifdef CONFIG_IP_MULTICAST
if (!lcs_check_multicast_support(card))
diff --git a/drivers/s390/net/lcs.h b/drivers/s390/net/lcs.h
index 0e1e4a0a88f0..8976fb0b070a 100644
--- a/drivers/s390/net/lcs.h
+++ b/drivers/s390/net/lcs.h
@@ -138,6 +138,7 @@ enum lcs_channel_states {
LCS_CH_STATE_RUNNING,
LCS_CH_STATE_SUSPENDED,
LCS_CH_STATE_CLEARED,
+ LCS_CH_STATE_ERROR,
};
/**
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 268889474339..4d18d6419ddc 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -1904,7 +1904,6 @@ static void netiucv_setup_netdevice(struct net_device *dev)
dev->type = ARPHRD_SLIP;
dev->tx_queue_len = NETIUCV_QUEUELEN_DEFAULT;
dev->flags = IFF_POINTOPOINT | IFF_NOARP;
- SET_MODULE_OWNER(dev);
}
/**
diff --git a/drivers/s390/net/qeth.h b/drivers/s390/net/qeth.h
index ec18bae05df0..8c6b72d05b1d 100644
--- a/drivers/s390/net/qeth.h
+++ b/drivers/s390/net/qeth.h
@@ -833,8 +833,7 @@ struct qeth_card {
struct qeth_qdio_info qdio;
struct qeth_perf_stats perf_stats;
int use_hard_stop;
- int (*orig_hard_header)(struct sk_buff *,struct net_device *,
- unsigned short,void *,void *,unsigned);
+ const struct header_ops *orig_header_ops;
struct qeth_osn_info osn_info;
atomic_t force_alloc_skb;
};
@@ -1178,9 +1177,9 @@ qeth_ipaddr_to_string(enum qeth_prot_versions proto, const __u8 *addr,
char *buf)
{
if (proto == QETH_PROT_IPV4)
- return qeth_ipaddr4_to_string(addr, buf);
+ qeth_ipaddr4_to_string(addr, buf);
else if (proto == QETH_PROT_IPV6)
- return qeth_ipaddr6_to_string(addr, buf);
+ qeth_ipaddr6_to_string(addr, buf);
}
static inline int
diff --git a/drivers/s390/net/qeth_eddp.c b/drivers/s390/net/qeth_eddp.c
index 70108fb16906..e3c268cfbffe 100644
--- a/drivers/s390/net/qeth_eddp.c
+++ b/drivers/s390/net/qeth_eddp.c
@@ -159,13 +159,15 @@ qeth_eddp_fill_buffer(struct qeth_qdio_out_q *queue,
buffer = buf->buffer;
/* fill one skb into buffer */
for (i = 0; i < ctx->elements_per_skb; ++i){
- buffer->element[buf->next_element_to_fill].addr =
- ctx->elements[element].addr;
- buffer->element[buf->next_element_to_fill].length =
- ctx->elements[element].length;
- buffer->element[buf->next_element_to_fill].flags =
- ctx->elements[element].flags;
- buf->next_element_to_fill++;
+ if (ctx->elements[element].length != 0) {
+ buffer->element[buf->next_element_to_fill].
+ addr = ctx->elements[element].addr;
+ buffer->element[buf->next_element_to_fill].
+ length = ctx->elements[element].length;
+ buffer->element[buf->next_element_to_fill].
+ flags = ctx->elements[element].flags;
+ buf->next_element_to_fill++;
+ }
element++;
elements--;
}
diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c
index 57f69434fbf9..a2d08c9ba3c4 100644
--- a/drivers/s390/net/qeth_main.c
+++ b/drivers/s390/net/qeth_main.c
@@ -160,6 +160,9 @@ qeth_set_multicast_list(struct net_device *);
static void
qeth_setadp_promisc_mode(struct qeth_card *);
+static int
+qeth_hard_header_parse(const struct sk_buff *skb, unsigned char *haddr);
+
static void
qeth_notify_processes(void)
{
@@ -561,7 +564,7 @@ qeth_set_offline(struct ccwgroup_device *cgdev)
}
static int
-qeth_wait_for_threads(struct qeth_card *card, unsigned long threads);
+qeth_threads_running(struct qeth_card *card, unsigned long threads);
static void
@@ -576,8 +579,7 @@ qeth_remove_device(struct ccwgroup_device *cgdev)
if (!card)
return;
- if (qeth_wait_for_threads(card, 0xffffffff))
- return;
+ wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
if (cgdev->state == CCWGROUP_ONLINE){
card->use_hard_stop = 1;
@@ -821,14 +823,15 @@ __qeth_delete_all_mc(struct qeth_card *card, unsigned long *flags)
again:
list_for_each_entry_safe(addr, tmp, &card->ip_list, entry) {
if (addr->is_multicast) {
+ list_del(&addr->entry);
spin_unlock_irqrestore(&card->ip_lock, *flags);
rc = qeth_deregister_addr_entry(card, addr);
spin_lock_irqsave(&card->ip_lock, *flags);
if (!rc) {
- list_del(&addr->entry);
kfree(addr);
goto again;
- }
+ } else
+ list_add(&addr->entry, &card->ip_list);
}
}
}
@@ -1542,16 +1545,21 @@ qeth_idx_write_cb(struct qeth_channel *channel, struct qeth_cmd_buffer *iob)
card = CARD_FROM_CDEV(channel->ccwdev);
if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) {
- PRINT_ERR("IDX_ACTIVATE on write channel device %s: negative "
- "reply\n", CARD_WDEV_ID(card));
+ if (QETH_IDX_ACT_CAUSE_CODE(iob->data) == 0x19)
+ PRINT_ERR("IDX_ACTIVATE on write channel device %s: "
+ "adapter exclusively used by another host\n",
+ CARD_WDEV_ID(card));
+ else
+ PRINT_ERR("IDX_ACTIVATE on write channel device %s: "
+ "negative reply\n", CARD_WDEV_ID(card));
goto out;
}
memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
if ((temp & ~0x0100) != qeth_peer_func_level(card->info.func_level)) {
PRINT_WARN("IDX_ACTIVATE on write channel device %s: "
- "function level mismatch "
- "(sent: 0x%x, received: 0x%x)\n",
- CARD_WDEV_ID(card), card->info.func_level, temp);
+ "function level mismatch "
+ "(sent: 0x%x, received: 0x%x)\n",
+ CARD_WDEV_ID(card), card->info.func_level, temp);
goto out;
}
channel->state = CH_STATE_UP;
@@ -1597,8 +1605,13 @@ qeth_idx_read_cb(struct qeth_channel *channel, struct qeth_cmd_buffer *iob)
goto out;
}
if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) {
- PRINT_ERR("IDX_ACTIVATE on read channel device %s: negative "
- "reply\n", CARD_RDEV_ID(card));
+ if (QETH_IDX_ACT_CAUSE_CODE(iob->data) == 0x19)
+ PRINT_ERR("IDX_ACTIVATE on read channel device %s: "
+ "adapter exclusively used by another host\n",
+ CARD_RDEV_ID(card));
+ else
+ PRINT_ERR("IDX_ACTIVATE on read channel device %s: "
+ "negative reply\n", CARD_RDEV_ID(card));
goto out;
}
@@ -1613,8 +1626,8 @@ qeth_idx_read_cb(struct qeth_channel *channel, struct qeth_cmd_buffer *iob)
memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
if (temp != qeth_peer_func_level(card->info.func_level)) {
PRINT_WARN("IDX_ACTIVATE on read channel device %s: function "
- "level mismatch (sent: 0x%x, received: 0x%x)\n",
- CARD_RDEV_ID(card), card->info.func_level, temp);
+ "level mismatch (sent: 0x%x, received: 0x%x)\n",
+ CARD_RDEV_ID(card), card->info.func_level, temp);
goto out;
}
memcpy(&card->token.issuer_rm_r,
@@ -2496,7 +2509,7 @@ qeth_rebuild_skb_fake_ll_tr(struct qeth_card *card, struct sk_buff *skb,
struct iphdr *ip_hdr;
QETH_DBF_TEXT(trace,5,"skbfktr");
- skb_set_mac_header(skb, -QETH_FAKE_LL_LEN_TR);
+ skb_set_mac_header(skb, (int)-QETH_FAKE_LL_LEN_TR);
/* this is a fake ethernet header */
fake_hdr = tr_hdr(skb);
@@ -2689,10 +2702,15 @@ qeth_process_inbound_buffer(struct qeth_card *card,
qeth_layer2_rebuild_skb(card, skb, hdr);
else if (hdr->hdr.l3.id == QETH_HEADER_TYPE_LAYER3)
vlan_tag = qeth_rebuild_skb(card, skb, hdr);
- else { /*in case of OSN*/
+ else if (hdr->hdr.osn.id == QETH_HEADER_TYPE_OSN) {
skb_push(skb, sizeof(struct qeth_hdr));
skb_copy_to_linear_data(skb, hdr,
sizeof(struct qeth_hdr));
+ } else { /* unknown header type */
+ dev_kfree_skb_any(skb);
+ QETH_DBF_TEXT(trace, 3, "inbunkno");
+ QETH_DBF_HEX(control, 3, hdr, QETH_DBF_CONTROL_LEN);
+ continue;
}
/* is device UP ? */
if (!(card->dev->flags & IFF_UP)){
@@ -2804,13 +2822,16 @@ qeth_queue_input_buffer(struct qeth_card *card, int index)
if (newcount < count) {
/* we are in memory shortage so we switch back to
traditional skb allocation and drop packages */
- if (atomic_cmpxchg(&card->force_alloc_skb, 0, 1))
- printk(KERN_WARNING
- "qeth: switch to alloc skb\n");
+ if (!atomic_read(&card->force_alloc_skb) &&
+ net_ratelimit())
+ PRINT_WARN("Switch to alloc skb\n");
+ atomic_set(&card->force_alloc_skb, 3);
count = newcount;
} else {
- if (atomic_cmpxchg(&card->force_alloc_skb, 1, 0))
- printk(KERN_WARNING "qeth: switch to sg\n");
+ if ((atomic_read(&card->force_alloc_skb) == 1) &&
+ net_ratelimit())
+ PRINT_WARN("Switch to sg\n");
+ atomic_add_unless(&card->force_alloc_skb, -1, 0);
}
/*
@@ -3354,10 +3375,12 @@ out_freeoutq:
while (i > 0)
kfree(card->qdio.out_qs[--i]);
kfree(card->qdio.out_qs);
+ card->qdio.out_qs = NULL;
out_freepool:
qeth_free_buffer_pool(card);
out_freeinq:
kfree(card->qdio.in_q);
+ card->qdio.in_q = NULL;
out_nomem:
atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
return -ENOMEM;
@@ -3373,16 +3396,20 @@ qeth_free_qdio_buffers(struct qeth_card *card)
QETH_QDIO_UNINITIALIZED)
return;
kfree(card->qdio.in_q);
+ card->qdio.in_q = NULL;
/* inbound buffer pool */
qeth_free_buffer_pool(card);
/* free outbound qdio_qs */
- for (i = 0; i < card->qdio.no_out_queues; ++i){
- for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j)
- qeth_clear_output_buffer(card->qdio.out_qs[i],
- &card->qdio.out_qs[i]->bufs[j]);
- kfree(card->qdio.out_qs[i]);
+ if (card->qdio.out_qs) {
+ for (i = 0; i < card->qdio.no_out_queues; ++i) {
+ for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j)
+ qeth_clear_output_buffer(card->qdio.out_qs[i],
+ &card->qdio.out_qs[i]->bufs[j]);
+ kfree(card->qdio.out_qs[i]);
+ }
+ kfree(card->qdio.out_qs);
+ card->qdio.out_qs = NULL;
}
- kfree(card->qdio.out_qs);
}
static void
@@ -3393,7 +3420,7 @@ qeth_clear_qdio_buffers(struct qeth_card *card)
QETH_DBF_TEXT(trace, 2, "clearqdbf");
/* clear outbound buffers to free skbs */
for (i = 0; i < card->qdio.no_out_queues; ++i)
- if (card->qdio.out_qs[i]){
+ if (card->qdio.out_qs && card->qdio.out_qs[i]) {
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j)
qeth_clear_output_buffer(card->qdio.out_qs[i],
&card->qdio.out_qs[i]->bufs[j]);
@@ -3769,8 +3796,8 @@ qeth_get_netdevice(enum qeth_card_types type, enum qeth_link_types linktype)
/*hard_header fake function; used in case fake_ll is set */
static int
qeth_fake_header(struct sk_buff *skb, struct net_device *dev,
- unsigned short type, void *daddr, void *saddr,
- unsigned len)
+ unsigned short type, const void *daddr, const void *saddr,
+ unsigned len)
{
if(dev->type == ARPHRD_IEEE802_TR){
struct trh_hdr *hdr;
@@ -3793,6 +3820,11 @@ qeth_fake_header(struct sk_buff *skb, struct net_device *dev,
}
}
+static const struct header_ops qeth_fake_ops = {
+ .create = qeth_fake_header,
+ .parse = qeth_hard_header_parse,
+};
+
static int
qeth_send_packet(struct qeth_card *, struct sk_buff *);
@@ -4482,7 +4514,8 @@ qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
/* check if we have enough elements (including following
* free buffers) to handle eddp context */
if (qeth_eddp_check_buffers_for_context(queue,ctx) < 0){
- printk("eddp tx_dropped 1\n");
+ if (net_ratelimit())
+ PRINT_WARN("eddp tx_dropped 1\n");
rc = -EBUSY;
goto out;
}
@@ -4553,6 +4586,53 @@ qeth_get_elements_no(struct qeth_card *card, void *hdr,
return elements_needed;
}
+static void qeth_tx_csum(struct sk_buff *skb)
+{
+ int tlen;
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ tlen = ntohs(ip_hdr(skb)->tot_len) - (ip_hdr(skb)->ihl << 2);
+ switch (ip_hdr(skb)->protocol) {
+ case IPPROTO_TCP:
+ tcp_hdr(skb)->check = 0;
+ tcp_hdr(skb)->check = csum_tcpudp_magic(
+ ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
+ tlen, ip_hdr(skb)->protocol,
+ skb_checksum(skb, skb_transport_offset(skb),
+ tlen, 0));
+ break;
+ case IPPROTO_UDP:
+ udp_hdr(skb)->check = 0;
+ udp_hdr(skb)->check = csum_tcpudp_magic(
+ ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
+ tlen, ip_hdr(skb)->protocol,
+ skb_checksum(skb, skb_transport_offset(skb),
+ tlen, 0));
+ break;
+ }
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ switch (ipv6_hdr(skb)->nexthdr) {
+ case IPPROTO_TCP:
+ tcp_hdr(skb)->check = 0;
+ tcp_hdr(skb)->check = csum_ipv6_magic(
+ &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
+ ipv6_hdr(skb)->payload_len,
+ ipv6_hdr(skb)->nexthdr,
+ skb_checksum(skb, skb_transport_offset(skb),
+ ipv6_hdr(skb)->payload_len, 0));
+ break;
+ case IPPROTO_UDP:
+ udp_hdr(skb)->check = 0;
+ udp_hdr(skb)->check = csum_ipv6_magic(
+ &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
+ ipv6_hdr(skb)->payload_len,
+ ipv6_hdr(skb)->nexthdr,
+ skb_checksum(skb, skb_transport_offset(skb),
+ ipv6_hdr(skb)->payload_len, 0));
+ break;
+ }
+ }
+}
static int
qeth_send_packet(struct qeth_card *card, struct sk_buff *skb)
@@ -4584,7 +4664,7 @@ qeth_send_packet(struct qeth_card *card, struct sk_buff *skb)
[qeth_get_priority_queue(card, skb, ipv, cast_type)];
if (!card->options.layer2) {
ipv = qeth_get_ip_version(skb);
- if ((card->dev->hard_header == qeth_fake_header) && ipv) {
+ if ((card->dev->header_ops == &qeth_fake_ops) && ipv) {
new_skb = qeth_pskb_unshare(skb, GFP_ATOMIC);
if (!new_skb)
return -ENOMEM;
@@ -4638,12 +4718,22 @@ qeth_send_packet(struct qeth_card *card, struct sk_buff *skb)
elements_needed += elems;
}
+ if ((large_send == QETH_LARGE_SEND_NO) &&
+ (skb->ip_summed == CHECKSUM_PARTIAL))
+ qeth_tx_csum(new_skb);
+
if (card->info.type != QETH_CARD_TYPE_IQD)
rc = qeth_do_send_packet(card, queue, new_skb, hdr,
elements_needed, ctx);
- else
+ else {
+ if ((!card->options.layer2) &&
+ (ipv == 0)) {
+ __qeth_free_new_skb(skb, new_skb);
+ return -EPERM;
+ }
rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr,
elements_needed, ctx);
+ }
if (!rc) {
card->stats.tx_packets++;
card->stats.tx_bytes += tx_bytes;
@@ -6385,20 +6475,18 @@ qeth_deregister_addr_entry(struct qeth_card *card, struct qeth_ipaddr *addr)
static u32
qeth_ethtool_get_tx_csum(struct net_device *dev)
{
- /* We may need to say that we support tx csum offload if
- * we do EDDP or TSO. There are discussions going on to
- * enforce rules in the stack and in ethtool that make
- * SG and TSO depend on HW_CSUM. At the moment there are
- * no such rules....
- * If we say yes here, we have to checksum outbound packets
- * any time. */
- return 0;
+ return (dev->features & NETIF_F_HW_CSUM) != 0;
}
static int
qeth_ethtool_set_tx_csum(struct net_device *dev, u32 data)
{
- return -EINVAL;
+ if (data)
+ dev->features |= NETIF_F_HW_CSUM;
+ else
+ dev->features &= ~NETIF_F_HW_CSUM;
+
+ return 0;
}
static u32
@@ -6488,12 +6576,16 @@ static struct ethtool_ops qeth_ethtool_ops = {
};
static int
-qeth_hard_header_parse(struct sk_buff *skb, unsigned char *haddr)
+qeth_hard_header_parse(const struct sk_buff *skb, unsigned char *haddr)
{
- struct qeth_card *card;
- struct ethhdr *eth;
+ const struct qeth_card *card;
+ const struct ethhdr *eth;
+ struct net_device *dev = skb->dev;
- card = qeth_get_card_from_dev(skb->dev);
+ if (dev->type != ARPHRD_IEEE802_TR)
+ return 0;
+
+ card = qeth_get_card_from_dev(dev);
if (card->options.layer2)
goto haveheader;
#ifdef CONFIG_QETH_IPV6
@@ -6523,6 +6615,10 @@ haveheader:
return ETH_ALEN;
}
+static const struct header_ops qeth_null_ops = {
+ .parse = qeth_hard_header_parse,
+};
+
static int
qeth_netdev_init(struct net_device *dev)
{
@@ -6547,12 +6643,8 @@ qeth_netdev_init(struct net_device *dev)
dev->vlan_rx_kill_vid = qeth_vlan_rx_kill_vid;
dev->vlan_rx_add_vid = qeth_vlan_rx_add_vid;
#endif
- if (qeth_get_netdev_flags(card) & IFF_NOARP) {
- dev->rebuild_header = NULL;
- dev->hard_header = NULL;
- dev->header_cache_update = NULL;
- dev->hard_header_cache = NULL;
- }
+ dev->header_ops = &qeth_null_ops;
+
#ifdef CONFIG_QETH_IPV6
/*IPv6 address autoconfiguration stuff*/
if (!(card->info.unique_id & UNIQUE_ID_NOT_BY_CARD))
@@ -6560,11 +6652,8 @@ qeth_netdev_init(struct net_device *dev)
#endif
if (card->options.fake_ll &&
(qeth_get_netdev_flags(card) & IFF_NOARP))
- dev->hard_header = qeth_fake_header;
- if (dev->type == ARPHRD_IEEE802_TR)
- dev->hard_header_parse = NULL;
- else
- dev->hard_header_parse = qeth_hard_header_parse;
+ dev->header_ops = &qeth_fake_ops;
+
dev->set_mac_address = qeth_layer2_set_mac_address;
dev->flags |= qeth_get_netdev_flags(card);
if ((card->options.fake_broadcast) ||
@@ -6576,7 +6665,6 @@ qeth_netdev_init(struct net_device *dev)
dev->mtu = card->info.initial_mtu;
if (card->info.type != QETH_CARD_TYPE_OSN)
SET_ETHTOOL_OPS(dev, &qeth_ethtool_ops);
- SET_MODULE_OWNER(dev);
return 0;
}
@@ -6668,10 +6756,10 @@ retry:
}
/*network device will be recovered*/
if (card->dev) {
- card->dev->hard_header = card->orig_hard_header;
+ card->dev->header_ops = card->orig_header_ops;
if (card->options.fake_ll &&
(qeth_get_netdev_flags(card) & IFF_NOARP))
- card->dev->hard_header = qeth_fake_header;
+ card->dev->header_ops = &qeth_fake_ops;
return 0;
}
/* at first set_online allocate netdev */
@@ -6685,7 +6773,7 @@ retry:
goto out;
}
card->dev->priv = card;
- card->orig_hard_header = card->dev->hard_header;
+ card->orig_header_ops = card->dev->header_ops;
card->dev->type = qeth_get_arphdr_type(card->info.type,
card->info.link_type);
card->dev->init = qeth_netdev_init;
@@ -7412,7 +7500,8 @@ qeth_start_ipa_tso(struct qeth_card *card)
}
if (rc && (card->options.large_send == QETH_LARGE_SEND_TSO)){
card->options.large_send = QETH_LARGE_SEND_NO;
- card->dev->features &= ~ (NETIF_F_TSO | NETIF_F_SG);
+ card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG |
+ NETIF_F_HW_CSUM);
}
return rc;
}
@@ -7552,22 +7641,26 @@ qeth_set_large_send(struct qeth_card *card, enum qeth_large_send_types type)
card->options.large_send = type;
switch (card->options.large_send) {
case QETH_LARGE_SEND_EDDP:
- card->dev->features |= NETIF_F_TSO | NETIF_F_SG;
+ card->dev->features |= NETIF_F_TSO | NETIF_F_SG |
+ NETIF_F_HW_CSUM;
break;
case QETH_LARGE_SEND_TSO:
if (qeth_is_supported(card, IPA_OUTBOUND_TSO)){
- card->dev->features |= NETIF_F_TSO | NETIF_F_SG;
+ card->dev->features |= NETIF_F_TSO | NETIF_F_SG |
+ NETIF_F_HW_CSUM;
} else {
PRINT_WARN("TSO not supported on %s. "
"large_send set to 'no'.\n",
card->dev->name);
- card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG);
+ card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG |
+ NETIF_F_HW_CSUM);
card->options.large_send = QETH_LARGE_SEND_NO;
rc = -EOPNOTSUPP;
}
break;
default: /* includes QETH_LARGE_SEND_NO */
- card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG);
+ card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG |
+ NETIF_F_HW_CSUM);
break;
}
if (card->state == CARD_STATE_UP)
@@ -8231,7 +8324,7 @@ qeth_arp_constructor(struct neighbour *neigh)
if (card == NULL)
goto out;
if((card->options.layer2) ||
- (card->dev->hard_header == qeth_fake_header))
+ (card->dev->header_ops == &qeth_fake_ops))
goto out;
rcu_read_lock();
diff --git a/drivers/s390/net/qeth_mpc.h b/drivers/s390/net/qeth_mpc.h
index 1d8083c91765..6de2da5ed5fd 100644
--- a/drivers/s390/net/qeth_mpc.h
+++ b/drivers/s390/net/qeth_mpc.h
@@ -565,6 +565,7 @@ extern unsigned char IDX_ACTIVATE_WRITE[];
#define QETH_IDX_ACT_QDIO_DEV_REALADDR(buffer) (buffer+0x20)
#define QETH_IS_IDX_ACT_POS_REPLY(buffer) (((buffer)[0x08]&3)==2)
#define QETH_IDX_REPLY_LEVEL(buffer) (buffer+0x12)
+#define QETH_IDX_ACT_CAUSE_CODE(buffer) (buffer)[0x09]
#define PDU_ENCAPSULATION(buffer) \
(buffer + *(buffer + (*(buffer+0x0b)) + \
diff --git a/drivers/s390/net/qeth_sys.c b/drivers/s390/net/qeth_sys.c
index bb0287ad1aac..2cc3f3a0e393 100644
--- a/drivers/s390/net/qeth_sys.c
+++ b/drivers/s390/net/qeth_sys.c
@@ -1760,10 +1760,10 @@ qeth_remove_device_attributes(struct device *dev)
{
struct qeth_card *card = dev->driver_data;
- if (card->info.type == QETH_CARD_TYPE_OSN)
- return sysfs_remove_group(&dev->kobj,
- &qeth_osn_device_attr_group);
-
+ if (card->info.type == QETH_CARD_TYPE_OSN) {
+ sysfs_remove_group(&dev->kobj, &qeth_osn_device_attr_group);
+ return;
+ }
sysfs_remove_group(&dev->kobj, &qeth_device_attr_group);
sysfs_remove_group(&dev->kobj, &qeth_device_ipato_group);
sysfs_remove_group(&dev->kobj, &qeth_device_vipa_group);
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
index 2773724c5c77..e01cbf152a81 100644
--- a/drivers/s390/scsi/zfcp_ccw.c
+++ b/drivers/s390/scsi/zfcp_ccw.c
@@ -28,7 +28,7 @@ static void zfcp_ccw_remove(struct ccw_device *);
static int zfcp_ccw_set_online(struct ccw_device *);
static int zfcp_ccw_set_offline(struct ccw_device *);
static int zfcp_ccw_notify(struct ccw_device *, int);
-static void zfcp_ccw_shutdown(struct device *);
+static void zfcp_ccw_shutdown(struct ccw_device *);
static struct ccw_device_id zfcp_ccw_device_id[] = {
{CCW_DEVICE_DEVTYPE(ZFCP_CONTROL_UNIT_TYPE,
@@ -51,9 +51,7 @@ static struct ccw_driver zfcp_ccw_driver = {
.set_online = zfcp_ccw_set_online,
.set_offline = zfcp_ccw_set_offline,
.notify = zfcp_ccw_notify,
- .driver = {
- .shutdown = zfcp_ccw_shutdown,
- },
+ .shutdown = zfcp_ccw_shutdown,
};
MODULE_DEVICE_TABLE(ccw, zfcp_ccw_device_id);
@@ -271,12 +269,12 @@ zfcp_ccw_register(void)
* Makes sure that QDIO queues are down when the system gets stopped.
*/
static void
-zfcp_ccw_shutdown(struct device *dev)
+zfcp_ccw_shutdown(struct ccw_device *cdev)
{
struct zfcp_adapter *adapter;
down(&zfcp_data.config_sema);
- adapter = dev_get_drvdata(dev);
+ adapter = dev_get_drvdata(&cdev->dev);
zfcp_erp_adapter_shutdown(adapter, 0);
zfcp_erp_wait(adapter);
up(&zfcp_data.config_sema);
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 5f3212440f68..ffa3bf756943 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -19,8 +19,8 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#include <asm/debug.h>
#include <linux/ctype.h>
+#include <asm/debug.h>
#include "zfcp_ext.h"
static u32 dbfsize = 4;
@@ -35,17 +35,17 @@ static int
zfcp_dbf_stck(char *out_buf, const char *label, unsigned long long stck)
{
unsigned long long sec;
- struct timespec xtime;
+ struct timespec dbftime;
int len = 0;
stck -= 0x8126d60e46000000LL - (0x3c26700LL * 1000000 * 4096);
sec = stck >> 12;
do_div(sec, 1000000);
- xtime.tv_sec = sec;
+ dbftime.tv_sec = sec;
stck -= (sec * 1000000) << 12;
- xtime.tv_nsec = ((stck * 1000) >> 12);
+ dbftime.tv_nsec = ((stck * 1000) >> 12);
len += sprintf(out_buf + len, "%-24s%011lu:%06lu\n",
- label, xtime.tv_sec, xtime.tv_nsec);
+ label, dbftime.tv_sec, dbftime.tv_nsec);
return len;
}
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 16bfc66ac2e8..a6475a2bb8a7 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -54,7 +54,7 @@ static int zfcp_erp_strategy_check_adapter(struct zfcp_adapter *, int);
static int zfcp_erp_strategy_statechange(int, u32, struct zfcp_adapter *,
struct zfcp_port *,
struct zfcp_unit *, int);
-static inline int zfcp_erp_strategy_statechange_detected(atomic_t *, u32);
+static int zfcp_erp_strategy_statechange_detected(atomic_t *, u32);
static int zfcp_erp_strategy_followup_actions(int, struct zfcp_adapter *,
struct zfcp_port *,
struct zfcp_unit *, int);
@@ -106,8 +106,8 @@ static void zfcp_erp_action_cleanup(int, struct zfcp_adapter *,
static void zfcp_erp_action_ready(struct zfcp_erp_action *);
static int zfcp_erp_action_exists(struct zfcp_erp_action *);
-static inline void zfcp_erp_action_to_ready(struct zfcp_erp_action *);
-static inline void zfcp_erp_action_to_running(struct zfcp_erp_action *);
+static void zfcp_erp_action_to_ready(struct zfcp_erp_action *);
+static void zfcp_erp_action_to_running(struct zfcp_erp_action *);
static void zfcp_erp_memwait_handler(unsigned long);
@@ -952,7 +952,7 @@ zfcp_erp_memwait_handler(unsigned long data)
* action gets an appropriate flag and will be processed
* accordingly
*/
-void zfcp_erp_timeout_handler(unsigned long data)
+static void zfcp_erp_timeout_handler(unsigned long data)
{
struct zfcp_erp_action *erp_action = (struct zfcp_erp_action *) data;
struct zfcp_adapter *adapter = erp_action->adapter;
@@ -1491,7 +1491,7 @@ zfcp_erp_strategy_statechange(int action,
return retval;
}
-static inline int
+static int
zfcp_erp_strategy_statechange_detected(atomic_t * target_status, u32 erp_status)
{
return
@@ -2001,7 +2001,7 @@ zfcp_erp_adapter_strategy_generic(struct zfcp_erp_action *erp_action, int close)
* returns: 0 - successful setup
* !0 - failed setup
*/
-int
+static int
zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *erp_action)
{
int retval;
@@ -3248,8 +3248,7 @@ static void zfcp_erp_action_dismiss_unit(struct zfcp_unit *unit)
zfcp_erp_action_dismiss(&unit->erp_action);
}
-static inline void
-zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action)
+static void zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action)
{
struct zfcp_adapter *adapter = erp_action->adapter;
@@ -3258,8 +3257,7 @@ zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action)
list_move(&erp_action->list, &erp_action->adapter->erp_running_head);
}
-static inline void
-zfcp_erp_action_to_ready(struct zfcp_erp_action *erp_action)
+static void zfcp_erp_action_to_ready(struct zfcp_erp_action *erp_action)
{
struct zfcp_adapter *adapter = erp_action->adapter;
OpenPOWER on IntegriCloud