From 6fdc03709433ccc2005f0f593ae9d9dd04f7b485 Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Sat, 20 Jun 2009 13:23:59 +0200 Subject: firewire: core: do not DMA-map stack addresses The DMA mapping API cannot map on-stack addresses, as explained in Documentation/DMA-mapping.txt. Convert the two cases of on-stack packet payload buffers in firewire-core (payload of lock requests in the bus manager work and in iso resource management) to slab-allocated memory. There are a number on-stack buffers for quadlet write or quadlet read requests in firewire-core and firewire-sbp2. These are harmless; they are copied to/ from card driver internal DMA buffers since quadlet payloads are inlined with packet headers. Signed-off-by: Stefan Richter --- drivers/firewire/core-card.c | 14 +++++++------- drivers/firewire/core-cdev.c | 4 +++- drivers/firewire/core-iso.c | 24 +++++++++++++----------- drivers/firewire/core.h | 3 ++- include/linux/firewire.h | 1 + 5 files changed, 26 insertions(+), 20 deletions(-) diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c index 543fccac81bb..f74edae5cb4c 100644 --- a/drivers/firewire/core-card.c +++ b/drivers/firewire/core-card.c @@ -196,8 +196,8 @@ static void allocate_broadcast_channel(struct fw_card *card, int generation) { int channel, bandwidth = 0; - fw_iso_resource_manage(card, generation, 1ULL << 31, - &channel, &bandwidth, true); + fw_iso_resource_manage(card, generation, 1ULL << 31, &channel, + &bandwidth, true, card->bm_transaction_data); if (channel == 31) { card->broadcast_channel_allocated = true; device_for_each_child(card->device, (void *)(long)generation, @@ -230,7 +230,6 @@ static void fw_card_bm_work(struct work_struct *work) bool do_reset = false; bool root_device_is_running; bool root_device_is_cmc; - __be32 lock_data[2]; spin_lock_irqsave(&card->lock, flags); @@ -273,22 +272,23 @@ static void fw_card_bm_work(struct work_struct *work) goto pick_me; } - lock_data[0] = cpu_to_be32(0x3f); - lock_data[1] = cpu_to_be32(local_id); + card->bm_transaction_data[0] = cpu_to_be32(0x3f); + card->bm_transaction_data[1] = cpu_to_be32(local_id); spin_unlock_irqrestore(&card->lock, flags); rcode = fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP, irm_id, generation, SCODE_100, CSR_REGISTER_BASE + CSR_BUS_MANAGER_ID, - lock_data, sizeof(lock_data)); + card->bm_transaction_data, + sizeof(card->bm_transaction_data)); if (rcode == RCODE_GENERATION) /* Another bus reset, BM work has been rescheduled. */ goto out; if (rcode == RCODE_COMPLETE && - lock_data[0] != cpu_to_be32(0x3f)) { + card->bm_transaction_data[0] != cpu_to_be32(0x3f)) { /* Somebody else is BM. Only act as IRM. */ if (local_id == irm_id) diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c index d1d30c615b0f..ced186d7e9a9 100644 --- a/drivers/firewire/core-cdev.c +++ b/drivers/firewire/core-cdev.c @@ -125,6 +125,7 @@ struct iso_resource { int generation; u64 channels; s32 bandwidth; + __be32 transaction_data[2]; struct iso_resource_event *e_alloc, *e_dealloc; }; @@ -1049,7 +1050,8 @@ static void iso_resource_work(struct work_struct *work) r->channels, &channel, &bandwidth, todo == ISO_RES_ALLOC || todo == ISO_RES_REALLOC || - todo == ISO_RES_ALLOC_ONCE); + todo == ISO_RES_ALLOC_ONCE, + r->transaction_data); /* * Is this generation outdated already? As long as this resource sticks * in the idr, it will be scheduled again for a newer generation or at diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c index 166f19c6d38d..110e731f5574 100644 --- a/drivers/firewire/core-iso.c +++ b/drivers/firewire/core-iso.c @@ -177,9 +177,8 @@ EXPORT_SYMBOL(fw_iso_context_stop); */ static int manage_bandwidth(struct fw_card *card, int irm_id, int generation, - int bandwidth, bool allocate) + int bandwidth, bool allocate, __be32 data[2]) { - __be32 data[2]; int try, new, old = allocate ? BANDWIDTH_AVAILABLE_INITIAL : 0; /* @@ -215,9 +214,9 @@ static int manage_bandwidth(struct fw_card *card, int irm_id, int generation, } static int manage_channel(struct fw_card *card, int irm_id, int generation, - u32 channels_mask, u64 offset, bool allocate) + u32 channels_mask, u64 offset, bool allocate, __be32 data[2]) { - __be32 data[2], c, all, old; + __be32 c, all, old; int i, retry = 5; old = all = allocate ? cpu_to_be32(~0) : 0; @@ -260,7 +259,7 @@ static int manage_channel(struct fw_card *card, int irm_id, int generation, } static void deallocate_channel(struct fw_card *card, int irm_id, - int generation, int channel) + int generation, int channel, __be32 buffer[2]) { u32 mask; u64 offset; @@ -269,7 +268,7 @@ static void deallocate_channel(struct fw_card *card, int irm_id, offset = channel < 32 ? CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI : CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO; - manage_channel(card, irm_id, generation, mask, offset, false); + manage_channel(card, irm_id, generation, mask, offset, false, buffer); } /** @@ -298,7 +297,7 @@ static void deallocate_channel(struct fw_card *card, int irm_id, */ void fw_iso_resource_manage(struct fw_card *card, int generation, u64 channels_mask, int *channel, int *bandwidth, - bool allocate) + bool allocate, __be32 buffer[2]) { u32 channels_hi = channels_mask; /* channels 31...0 */ u32 channels_lo = channels_mask >> 32; /* channels 63...32 */ @@ -310,10 +309,12 @@ void fw_iso_resource_manage(struct fw_card *card, int generation, if (channels_hi) c = manage_channel(card, irm_id, generation, channels_hi, - CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI, allocate); + CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI, + allocate, buffer); if (channels_lo && c < 0) { c = manage_channel(card, irm_id, generation, channels_lo, - CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO, allocate); + CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO, + allocate, buffer); if (c >= 0) c += 32; } @@ -325,12 +326,13 @@ void fw_iso_resource_manage(struct fw_card *card, int generation, if (*bandwidth == 0) return; - ret = manage_bandwidth(card, irm_id, generation, *bandwidth, allocate); + ret = manage_bandwidth(card, irm_id, generation, *bandwidth, + allocate, buffer); if (ret < 0) *bandwidth = 0; if (allocate && ret < 0 && c >= 0) { - deallocate_channel(card, irm_id, generation, c); + deallocate_channel(card, irm_id, generation, c, buffer); *channel = ret; } } diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h index c3cfc647e5e3..6052816be353 100644 --- a/drivers/firewire/core.h +++ b/drivers/firewire/core.h @@ -120,7 +120,8 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event); int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma); void fw_iso_resource_manage(struct fw_card *card, int generation, - u64 channels_mask, int *channel, int *bandwidth, bool allocate); + u64 channels_mask, int *channel, int *bandwidth, + bool allocate, __be32 buffer[2]); /* -topology */ diff --git a/include/linux/firewire.h b/include/linux/firewire.h index 9823946adbc5..192d1e43c43c 100644 --- a/include/linux/firewire.h +++ b/include/linux/firewire.h @@ -127,6 +127,7 @@ struct fw_card { struct delayed_work work; int bm_retries; int bm_generation; + __be32 bm_transaction_data[2]; bool broadcast_channel_allocated; u32 broadcast_channel; -- cgit v1.2.1 From af2719415a5ceae06f2a6d33e78b555e64697fc8 Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Tue, 30 Jun 2009 20:27:59 +0200 Subject: firewire: sbp2: add support for disks >2 TB (and 16 bytes long CDBs) Increase the command ORB data structure to transport up to 16 bytes long CDBs (instead of 12 bytes), and tell the SCSI mid layer about it. This is notably necessary for READ CAPACITY(16) and friends, i.e. support of large disks. Signed-off-by: Stefan Richter --- drivers/firewire/sbp2.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c index 2353643721c1..d27cb058da82 100644 --- a/drivers/firewire/sbp2.c +++ b/drivers/firewire/sbp2.c @@ -200,6 +200,12 @@ static struct fw_device *target_device(struct sbp2_target *tgt) #define SBP2_RETRY_LIMIT 0xf /* 15 retries */ #define SBP2_CYCLE_LIMIT (0xc8 << 12) /* 200 125us cycles */ +/* + * There is no transport protocol limit to the CDB length, but we implement + * a fixed length only. 16 bytes is enough for disks larger than 2 TB. + */ +#define SBP2_MAX_CDB_SIZE 16 + /* * The default maximum s/g segment size of a FireWire controller is * usually 0x10000, but SBP-2 only allows 0xffff. Since buffers have to @@ -312,7 +318,7 @@ struct sbp2_command_orb { struct sbp2_pointer next; struct sbp2_pointer data_descriptor; __be32 misc; - u8 command_block[12]; + u8 command_block[SBP2_MAX_CDB_SIZE]; } request; struct scsi_cmnd *cmd; scsi_done_fn_t done; @@ -1146,6 +1152,8 @@ static int sbp2_probe(struct device *dev) if (fw_device_enable_phys_dma(device) < 0) goto fail_shost_put; + shost->max_cmd_len = SBP2_MAX_CDB_SIZE; + if (scsi_add_host(shost, &unit->device) < 0) goto fail_shost_put; -- cgit v1.2.1 From ebbb16bffa646f853899ef3fdc0ac7abab888703 Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Tue, 30 Jun 2009 20:28:31 +0200 Subject: ieee1394: sbp2: add support for disks >2 TB (and 16 bytes long CDBs) Increase the command ORB data structure to transport up to 16 bytes long CDBs (instead of 12 bytes), and tell the SCSI mid layer about it. This is notably necessary for READ CAPACITY(16) and friends, i.e. support of large disks. Signed-off-by: Stefan Richter --- drivers/ieee1394/sbp2.c | 1 + drivers/ieee1394/sbp2.h | 8 +++++++- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c index a51ab233342d..f599f4934739 100644 --- a/drivers/ieee1394/sbp2.c +++ b/drivers/ieee1394/sbp2.c @@ -880,6 +880,7 @@ static struct sbp2_lu *sbp2_alloc_device(struct unit_directory *ud) } shost->hostdata[0] = (unsigned long)lu; + shost->max_cmd_len = SBP2_MAX_CDB_SIZE; if (!scsi_add_host(shost, &ud->device)) { lu->shost = shost; diff --git a/drivers/ieee1394/sbp2.h b/drivers/ieee1394/sbp2.h index c5036f1cc5b0..64a3a66a8a39 100644 --- a/drivers/ieee1394/sbp2.h +++ b/drivers/ieee1394/sbp2.h @@ -24,6 +24,12 @@ #define SBP2_DEVICE_NAME "sbp2" +/* + * There is no transport protocol limit to the CDB length, but we implement + * a fixed length only. 16 bytes is enough for disks larger than 2 TB. + */ +#define SBP2_MAX_CDB_SIZE 16 + /* * SBP-2 specific definitions */ @@ -51,7 +57,7 @@ struct sbp2_command_orb { u32 data_descriptor_hi; u32 data_descriptor_lo; u32 misc; - u8 cdb[12]; + u8 cdb[SBP2_MAX_CDB_SIZE]; } __attribute__((packed)); #define SBP2_LOGIN_REQUEST 0x0 -- cgit v1.2.1