diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-01-25 08:34:42 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-01-25 08:35:13 -0800 |
commit | df8dc74e8a383eaf2d9b44b80a71ec6f0e52b42e (patch) | |
tree | bc3799a43e8b94fa84b32e37b1c124d5e4868f50 /drivers/char | |
parent | 556a169dab38b5100df6f4a45b655dddd3db94c1 (diff) | |
parent | 4a3ad20ccd8f4d2a0535cf98fa83f7b561ba59a9 (diff) | |
download | blackbird-op-linux-df8dc74e8a383eaf2d9b44b80a71ec6f0e52b42e.tar.gz blackbird-op-linux-df8dc74e8a383eaf2d9b44b80a71ec6f0e52b42e.zip |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-2.6
This can be broken down into these major areas:
- Documentation updates (language translations and fixes, as
well as kobject and kset documenatation updates.)
- major kset/kobject/ktype rework and fixes. This cleans up the
kset and kobject and ktype relationship and architecture,
making sense of things now, and good documenation and samples
are provided for others to use. Also the attributes for
kobjects are much easier to handle now. This cleaned up a LOT
of code all through the kernel, making kobjects easier to use
if you want to.
- struct bus_type has been reworked to now handle the lifetime
rules properly, as the kobject is properly dynamic.
- struct driver has also been reworked, and now the lifetime
issues are resolved.
- the block subsystem has been converted to use struct device
now, and not "raw" kobjects. This patch has been in the -mm
tree for over a year now, and finally all the issues are
worked out with it. Older distros now properly work with new
kernels, and no userspace updates are needed at all.
- nozomi driver is added. This has also been in -mm for a long
time, and many people have asked for it to go in. It is now
in good enough shape to do so.
- lots of class_device conversions to use struct device instead.
The tree is almost all cleaned up now, only SCSI and IB is the
remaining code to fix up...
* git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-2.6: (196 commits)
Driver core: coding style fixes
Kobject: fix coding style issues in kobject c files
Kobject: fix coding style issues in kobject.h
Driver core: fix coding style issues in device.h
spi: use class iteration api
scsi: use class iteration api
rtc: use class iteration api
power supply : use class iteration api
ieee1394: use class iteration api
Driver Core: add class iteration api
Driver core: Cleanup get_device_parent() in device_add() and device_move()
UIO: constify function pointer tables
Driver Core: constify the name passed to platform_device_register_simple
driver core: fix build with SYSFS=n
sysfs: make SYSFS_DEPRECATED depend on SYSFS
Driver core: use LIST_HEAD instead of call to INIT_LIST_HEAD in __init
kobject: add sample code for how to use ksets/ktypes/kobjects
kobject: add sample code for how to use kobjects in a simple manner.
kobject: update the kobject/kset documentation
kobject: remove old, outdated documentation.
...
Diffstat (limited to 'drivers/char')
-rw-r--r-- | drivers/char/Kconfig | 10 | ||||
-rw-r--r-- | drivers/char/Makefile | 1 | ||||
-rw-r--r-- | drivers/char/hvc_console.c | 80 | ||||
-rw-r--r-- | drivers/char/hvcs.c | 78 | ||||
-rw-r--r-- | drivers/char/nozomi.c | 1993 |
5 files changed, 2062 insertions, 100 deletions
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index 2e3a0d4bc4c2..466629594776 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig @@ -373,6 +373,16 @@ config ISTALLION To compile this driver as a module, choose M here: the module will be called istallion. +config NOZOMI + tristate "HSDPA Broadband Wireless Data Card - Globe Trotter" + depends on PCI && EXPERIMENTAL + help + If you have a HSDPA driver Broadband Wireless Data Card - + Globe Trotter PCMCIA card, say Y here. + + To compile this driver as a module, choose M here, the module + will be called nozomi. + config A2232 tristate "Commodore A2232 serial support (EXPERIMENTAL)" depends on EXPERIMENTAL && ZORRO && BROKEN_ON_SMP diff --git a/drivers/char/Makefile b/drivers/char/Makefile index 07304d50e0cb..96fc01eddefe 100644 --- a/drivers/char/Makefile +++ b/drivers/char/Makefile @@ -26,6 +26,7 @@ obj-$(CONFIG_SERIAL167) += serial167.o obj-$(CONFIG_CYCLADES) += cyclades.o obj-$(CONFIG_STALLION) += stallion.o obj-$(CONFIG_ISTALLION) += istallion.o +obj-$(CONFIG_NOZOMI) += nozomi.o obj-$(CONFIG_DIGIEPCA) += epca.o obj-$(CONFIG_SPECIALIX) += specialix.o obj-$(CONFIG_MOXA_INTELLIO) += moxa.o diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c index 8252f8668538..480fae29c9b2 100644 --- a/drivers/char/hvc_console.c +++ b/drivers/char/hvc_console.c @@ -27,7 +27,7 @@ #include <linux/init.h> #include <linux/kbd_kern.h> #include <linux/kernel.h> -#include <linux/kobject.h> +#include <linux/kref.h> #include <linux/kthread.h> #include <linux/list.h> #include <linux/module.h> @@ -89,7 +89,7 @@ struct hvc_struct { int irq_requested; int irq; struct list_head next; - struct kobject kobj; /* ref count & hvc_struct lifetime */ + struct kref kref; /* ref count & hvc_struct lifetime */ }; /* dynamic list of hvc_struct instances */ @@ -110,7 +110,7 @@ static int last_hvc = -1; /* * Do not call this function with either the hvc_structs_lock or the hvc_struct - * lock held. If successful, this function increments the kobject reference + * lock held. If successful, this function increments the kref reference * count against the target hvc_struct so it should be released when finished. */ static struct hvc_struct *hvc_get_by_index(int index) @@ -123,7 +123,7 @@ static struct hvc_struct *hvc_get_by_index(int index) list_for_each_entry(hp, &hvc_structs, next) { spin_lock_irqsave(&hp->lock, flags); if (hp->index == index) { - kobject_get(&hp->kobj); + kref_get(&hp->kref); spin_unlock_irqrestore(&hp->lock, flags); spin_unlock(&hvc_structs_lock); return hp; @@ -242,6 +242,23 @@ static int __init hvc_console_init(void) } console_initcall(hvc_console_init); +/* callback when the kboject ref count reaches zero. */ +static void destroy_hvc_struct(struct kref *kref) +{ + struct hvc_struct *hp = container_of(kref, struct hvc_struct, kref); + unsigned long flags; + + spin_lock(&hvc_structs_lock); + + spin_lock_irqsave(&hp->lock, flags); + list_del(&(hp->next)); + spin_unlock_irqrestore(&hp->lock, flags); + + spin_unlock(&hvc_structs_lock); + + kfree(hp); +} + /* * hvc_instantiate() is an early console discovery method which locates * consoles * prior to the vio subsystem discovering them. Hotplugged @@ -261,7 +278,7 @@ int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops) /* make sure no no tty has been registered in this index */ hp = hvc_get_by_index(index); if (hp) { - kobject_put(&hp->kobj); + kref_put(&hp->kref, destroy_hvc_struct); return -1; } @@ -318,9 +335,8 @@ static int hvc_open(struct tty_struct *tty, struct file * filp) unsigned long flags; int irq = 0; int rc = 0; - struct kobject *kobjp; - /* Auto increments kobject reference if found. */ + /* Auto increments kref reference if found. */ if (!(hp = hvc_get_by_index(tty->index))) return -ENODEV; @@ -341,8 +357,6 @@ static int hvc_open(struct tty_struct *tty, struct file * filp) if (irq) hp->irq_requested = 1; - kobjp = &hp->kobj; - spin_unlock_irqrestore(&hp->lock, flags); /* check error, fallback to non-irq */ if (irq) @@ -352,7 +366,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp) * If the request_irq() fails and we return an error. The tty layer * will call hvc_close() after a failed open but we don't want to clean * up there so we'll clean up here and clear out the previously set - * tty fields and return the kobject reference. + * tty fields and return the kref reference. */ if (rc) { spin_lock_irqsave(&hp->lock, flags); @@ -360,7 +374,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp) hp->irq_requested = 0; spin_unlock_irqrestore(&hp->lock, flags); tty->driver_data = NULL; - kobject_put(kobjp); + kref_put(&hp->kref, destroy_hvc_struct); printk(KERN_ERR "hvc_open: request_irq failed with rc %d.\n", rc); } /* Force wakeup of the polling thread */ @@ -372,7 +386,6 @@ static int hvc_open(struct tty_struct *tty, struct file * filp) static void hvc_close(struct tty_struct *tty, struct file * filp) { struct hvc_struct *hp; - struct kobject *kobjp; int irq = 0; unsigned long flags; @@ -382,7 +395,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp) /* * No driver_data means that this close was issued after a failed * hvc_open by the tty layer's release_dev() function and we can just - * exit cleanly because the kobject reference wasn't made. + * exit cleanly because the kref reference wasn't made. */ if (!tty->driver_data) return; @@ -390,7 +403,6 @@ static void hvc_close(struct tty_struct *tty, struct file * filp) hp = tty->driver_data; spin_lock_irqsave(&hp->lock, flags); - kobjp = &hp->kobj; if (--hp->count == 0) { if (hp->irq_requested) irq = hp->irq; @@ -417,7 +429,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp) spin_unlock_irqrestore(&hp->lock, flags); } - kobject_put(kobjp); + kref_put(&hp->kref, destroy_hvc_struct); } static void hvc_hangup(struct tty_struct *tty) @@ -426,7 +438,6 @@ static void hvc_hangup(struct tty_struct *tty) unsigned long flags; int irq = 0; int temp_open_count; - struct kobject *kobjp; if (!hp) return; @@ -443,7 +454,6 @@ static void hvc_hangup(struct tty_struct *tty) return; } - kobjp = &hp->kobj; temp_open_count = hp->count; hp->count = 0; hp->n_outbuf = 0; @@ -457,7 +467,7 @@ static void hvc_hangup(struct tty_struct *tty) free_irq(irq, hp); while(temp_open_count) { --temp_open_count; - kobject_put(kobjp); + kref_put(&hp->kref, destroy_hvc_struct); } } @@ -729,27 +739,6 @@ static const struct tty_operations hvc_ops = { .chars_in_buffer = hvc_chars_in_buffer, }; -/* callback when the kboject ref count reaches zero. */ -static void destroy_hvc_struct(struct kobject *kobj) -{ - struct hvc_struct *hp = container_of(kobj, struct hvc_struct, kobj); - unsigned long flags; - - spin_lock(&hvc_structs_lock); - - spin_lock_irqsave(&hp->lock, flags); - list_del(&(hp->next)); - spin_unlock_irqrestore(&hp->lock, flags); - - spin_unlock(&hvc_structs_lock); - - kfree(hp); -} - -static struct kobj_type hvc_kobj_type = { - .release = destroy_hvc_struct, -}; - struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int irq, struct hv_ops *ops, int outbuf_size) { @@ -776,8 +765,7 @@ struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int irq, hp->outbuf_size = outbuf_size; hp->outbuf = &((char *)hp)[ALIGN(sizeof(*hp), sizeof(long))]; - kobject_init(&hp->kobj); - hp->kobj.ktype = &hvc_kobj_type; + kref_init(&hp->kref); spin_lock_init(&hp->lock); spin_lock(&hvc_structs_lock); @@ -806,12 +794,10 @@ struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int irq, int __devexit hvc_remove(struct hvc_struct *hp) { unsigned long flags; - struct kobject *kobjp; struct tty_struct *tty; spin_lock_irqsave(&hp->lock, flags); tty = hp->tty; - kobjp = &hp->kobj; if (hp->index < MAX_NR_HVC_CONSOLES) vtermnos[hp->index] = -1; @@ -821,12 +807,12 @@ int __devexit hvc_remove(struct hvc_struct *hp) spin_unlock_irqrestore(&hp->lock, flags); /* - * We 'put' the instance that was grabbed when the kobject instance - * was initialized using kobject_init(). Let the last holder of this - * kobject cause it to be removed, which will probably be the tty_hangup + * We 'put' the instance that was grabbed when the kref instance + * was initialized using kref_init(). Let the last holder of this + * kref cause it to be removed, which will probably be the tty_hangup * below. */ - kobject_put(kobjp); + kref_put(&hp->kref, destroy_hvc_struct); /* * This function call will auto chain call hvc_hangup. The tty should diff --git a/drivers/char/hvcs.c b/drivers/char/hvcs.c index 69d8866de783..fd7559084b82 100644 --- a/drivers/char/hvcs.c +++ b/drivers/char/hvcs.c @@ -57,11 +57,7 @@ * rescanning partner information upon a user's request. * * Each vty-server, prior to being exposed to this driver is reference counted - * using the 2.6 Linux kernel kobject construct. This kobject is also used by - * the vio bus to provide a vio device sysfs entry that this driver attaches - * device specific attributes to, including partner information. The vio bus - * framework also provides a sysfs entry for each vio driver. The hvcs driver - * provides driver attributes in this entry. + * using the 2.6 Linux kernel kref construct. * * For direction on installation and usage of this driver please reference * Documentation/powerpc/hvcs.txt. @@ -71,7 +67,7 @@ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/kernel.h> -#include <linux/kobject.h> +#include <linux/kref.h> #include <linux/kthread.h> #include <linux/list.h> #include <linux/major.h> @@ -293,12 +289,12 @@ struct hvcs_struct { int chars_in_buffer; /* - * Any variable below the kobject is valid before a tty is connected and + * Any variable below the kref is valid before a tty is connected and * stays valid after the tty is disconnected. These shouldn't be * whacked until the koject refcount reaches zero though some entries * may be changed via sysfs initiatives. */ - struct kobject kobj; /* ref count & hvcs_struct lifetime */ + struct kref kref; /* ref count & hvcs_struct lifetime */ int connected; /* is the vty-server currently connected to a vty? */ uint32_t p_unit_address; /* partner unit address */ uint32_t p_partition_ID; /* partner partition ID */ @@ -307,8 +303,8 @@ struct hvcs_struct { struct vio_dev *vdev; }; -/* Required to back map a kobject to its containing object */ -#define from_kobj(kobj) container_of(kobj, struct hvcs_struct, kobj) +/* Required to back map a kref to its containing object */ +#define from_kref(k) container_of(k, struct hvcs_struct, kref) static struct list_head hvcs_structs = LIST_HEAD_INIT(hvcs_structs); static DEFINE_SPINLOCK(hvcs_structs_lock); @@ -334,7 +330,6 @@ static void hvcs_partner_free(struct hvcs_struct *hvcsd); static int hvcs_enable_device(struct hvcs_struct *hvcsd, uint32_t unit_address, unsigned int irq, struct vio_dev *dev); -static void destroy_hvcs_struct(struct kobject *kobj); static int hvcs_open(struct tty_struct *tty, struct file *filp); static void hvcs_close(struct tty_struct *tty, struct file *filp); static void hvcs_hangup(struct tty_struct * tty); @@ -703,10 +698,10 @@ static void hvcs_return_index(int index) hvcs_index_list[index] = -1; } -/* callback when the kboject ref count reaches zero */ -static void destroy_hvcs_struct(struct kobject *kobj) +/* callback when the kref ref count reaches zero */ +static void destroy_hvcs_struct(struct kref *kref) { - struct hvcs_struct *hvcsd = from_kobj(kobj); + struct hvcs_struct *hvcsd = from_kref(kref); struct vio_dev *vdev; unsigned long flags; @@ -743,10 +738,6 @@ static void destroy_hvcs_struct(struct kobject *kobj) kfree(hvcsd); } -static struct kobj_type hvcs_kobj_type = { - .release = destroy_hvcs_struct, -}; - static int hvcs_get_index(void) { int i; @@ -791,9 +782,7 @@ static int __devinit hvcs_probe( spin_lock_init(&hvcsd->lock); /* Automatically incs the refcount the first time */ - kobject_init(&hvcsd->kobj); - /* Set up the callback for terminating the hvcs_struct's life */ - hvcsd->kobj.ktype = &hvcs_kobj_type; + kref_init(&hvcsd->kref); hvcsd->vdev = dev; dev->dev.driver_data = hvcsd; @@ -844,7 +833,6 @@ static int __devexit hvcs_remove(struct vio_dev *dev) { struct hvcs_struct *hvcsd = dev->dev.driver_data; unsigned long flags; - struct kobject *kobjp; struct tty_struct *tty; if (!hvcsd) @@ -856,15 +844,13 @@ static int __devexit hvcs_remove(struct vio_dev *dev) tty = hvcsd->tty; - kobjp = &hvcsd->kobj; - spin_unlock_irqrestore(&hvcsd->lock, flags); /* * Let the last holder of this object cause it to be removed, which * would probably be tty_hangup below. */ - kobject_put (kobjp); + kref_put(&hvcsd->kref, destroy_hvcs_struct); /* * The hangup is a scheduled function which will auto chain call @@ -1086,7 +1072,7 @@ static int hvcs_enable_device(struct hvcs_struct *hvcsd, uint32_t unit_address, } /* - * This always increments the kobject ref count if the call is successful. + * This always increments the kref ref count if the call is successful. * Please remember to dec when you are done with the instance. * * NOTICE: Do NOT hold either the hvcs_struct.lock or hvcs_structs_lock when @@ -1103,7 +1089,7 @@ static struct hvcs_struct *hvcs_get_by_index(int index) list_for_each_entry(hvcsd, &hvcs_structs, next) { spin_lock_irqsave(&hvcsd->lock, flags); if (hvcsd->index == index) { - kobject_get(&hvcsd->kobj); + kref_get(&hvcsd->kref); spin_unlock_irqrestore(&hvcsd->lock, flags); spin_unlock(&hvcs_structs_lock); return hvcsd; @@ -1129,14 +1115,13 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp) unsigned int irq; struct vio_dev *vdev; unsigned long unit_address; - struct kobject *kobjp; if (tty->driver_data) goto fast_open; /* * Is there a vty-server that shares the same index? - * This function increments the kobject index. + * This function increments the kref index. */ if (!(hvcsd = hvcs_get_by_index(tty->index))) { printk(KERN_WARNING "HVCS: open failed, no device associated" @@ -1181,7 +1166,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp) * and will grab the spinlock and free the connection if it fails. */ if (((rc = hvcs_enable_device(hvcsd, unit_address, irq, vdev)))) { - kobject_put(&hvcsd->kobj); + kref_put(&hvcsd->kref, destroy_hvcs_struct); printk(KERN_WARNING "HVCS: enable device failed.\n"); return rc; } @@ -1192,17 +1177,11 @@ fast_open: hvcsd = tty->driver_data; spin_lock_irqsave(&hvcsd->lock, flags); - if (!kobject_get(&hvcsd->kobj)) { - spin_unlock_irqrestore(&hvcsd->lock, flags); - printk(KERN_ERR "HVCS: Kobject of open" - " hvcs doesn't exist.\n"); - return -EFAULT; /* Is this the right return value? */ - } - + kref_get(&hvcsd->kref); hvcsd->open_count++; - hvcsd->todo_mask |= HVCS_SCHED_READ; spin_unlock_irqrestore(&hvcsd->lock, flags); + open_success: hvcs_kick(); @@ -1212,9 +1191,8 @@ open_success: return 0; error_release: - kobjp = &hvcsd->kobj; spin_unlock_irqrestore(&hvcsd->lock, flags); - kobject_put(&hvcsd->kobj); + kref_put(&hvcsd->kref, destroy_hvcs_struct); printk(KERN_WARNING "HVCS: partner connect failed.\n"); return retval; @@ -1224,7 +1202,6 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp) { struct hvcs_struct *hvcsd; unsigned long flags; - struct kobject *kobjp; int irq = NO_IRQ; /* @@ -1245,7 +1222,6 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp) hvcsd = tty->driver_data; spin_lock_irqsave(&hvcsd->lock, flags); - kobjp = &hvcsd->kobj; if (--hvcsd->open_count == 0) { vio_disable_interrupts(hvcsd->vdev); @@ -1270,7 +1246,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp) tty->driver_data = NULL; free_irq(irq, hvcsd); - kobject_put(kobjp); + kref_put(&hvcsd->kref, destroy_hvcs_struct); return; } else if (hvcsd->open_count < 0) { printk(KERN_ERR "HVCS: vty-server@%X open_count: %d" @@ -1279,7 +1255,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp) } spin_unlock_irqrestore(&hvcsd->lock, flags); - kobject_put(kobjp); + kref_put(&hvcsd->kref, destroy_hvcs_struct); } static void hvcs_hangup(struct tty_struct * tty) @@ -1287,21 +1263,17 @@ static void hvcs_hangup(struct tty_struct * tty) struct hvcs_struct *hvcsd = tty->driver_data; unsigned long flags; int temp_open_count; - struct kobject *kobjp; int irq = NO_IRQ; spin_lock_irqsave(&hvcsd->lock, flags); - /* Preserve this so that we know how many kobject refs to put */ + /* Preserve this so that we know how many kref refs to put */ temp_open_count = hvcsd->open_count; /* - * Don't kobject put inside the spinlock because the destruction + * Don't kref put inside the spinlock because the destruction * callback may use the spinlock and it may get called before the - * spinlock has been released. Get a pointer to the kobject and - * kobject_put on that after releasing the spinlock. + * spinlock has been released. */ - kobjp = &hvcsd->kobj; - vio_disable_interrupts(hvcsd->vdev); hvcsd->todo_mask = 0; @@ -1324,7 +1296,7 @@ static void hvcs_hangup(struct tty_struct * tty) free_irq(irq, hvcsd); /* - * We need to kobject_put() for every open_count we have since the + * We need to kref_put() for every open_count we have since the * tty_hangup() function doesn't invoke a close per open connection on a * non-console device. */ @@ -1335,7 +1307,7 @@ static void hvcs_hangup(struct tty_struct * tty) * NOTE: If this hangup was signaled from user space then the * final put will never happen. */ - kobject_put(kobjp); + kref_put(&hvcsd->kref, destroy_hvcs_struct); } } diff --git a/drivers/char/nozomi.c b/drivers/char/nozomi.c new file mode 100644 index 000000000000..6076e662886a --- /dev/null +++ b/drivers/char/nozomi.c @@ -0,0 +1,1993 @@ +/* + * nozomi.c -- HSDPA driver Broadband Wireless Data Card - Globe Trotter + * + * Written by: Ulf Jakobsson, + * Jan �erfeldt, + * Stefan Thomasson, + * + * Maintained by: Paul Hardwick (p.hardwick@option.com) + * + * Patches: + * Locking code changes for Vodafone by Sphere Systems Ltd, + * Andrew Bird (ajb@spheresystems.co.uk ) + * & Phil Sanderson + * + * Source has been ported from an implementation made by Filip Aben @ Option + * + * -------------------------------------------------------------------------- + * + * Copyright (c) 2005,2006 Option Wireless Sweden AB + * Copyright (c) 2006 Sphere Systems Ltd + * Copyright (c) 2006 Option Wireless n/v + * All rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + * -------------------------------------------------------------------------- + */ + +/* + * CHANGELOG + * Version 2.1d + * 11-November-2007 Jiri Slaby, Frank Seidel + * - Big rework of multicard support by Jiri + * - Major cleanups (semaphore to mutex, endianess, no major reservation) + * - Optimizations + * + * Version 2.1c + * 30-October-2007 Frank Seidel + * - Completed multicard support + * - Minor cleanups + * + * Version 2.1b + * 07-August-2007 Frank Seidel + * - Minor cleanups + * - theoretical multicard support + * + * Version 2.1 + * 03-July-2006 Paul Hardwick + * + * - Stability Improvements. Incorporated spinlock wraps patch. + * - Updated for newer 2.6.14+ kernels (tty_buffer_request_room) + * - using __devexit macro for tty + * + * + * Version 2.0 + * 08-feb-2006 15:34:10:Ulf + * + * -Fixed issue when not waking up line disipine layer, could probably result + * in better uplink performance for 2.4. + * + * -Fixed issue with big endian during initalization, now proper toggle flags + * are handled between preloader and maincode. + * + * -Fixed flow control issue. + * + * -Added support for setting DTR. + * + * -For 2.4 kernels, removing temporary buffer that's not needed. + * + * -Reading CTS only for modem port (only port that supports it). + * + * -Return 0 in write_room instead of netative value, it's not handled in + * upper layer. + * + * -------------------------------------------------------------------------- + * Version 1.0 + * + * First version of driver, only tested with card of type F32_2. + * Works fine with 2.4 and 2.6 kernels. + * Driver also support big endian architecture. + */ + +/* Enable this to have a lot of debug printouts */ +#define DEBUG + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/ioport.h> +#include <linux/tty.h> +#include <linux/tty_driver.h> +#include <linux/tty_flip.h> +#include <linux/serial.h> +#include <linux/interrupt.h> +#include <linux/kmod.h> +#include <linux/init.h> +#include <linux/kfifo.h> +#include <linux/uaccess.h> +#include <asm/byteorder.h> + +#include <linux/delay.h> + + +#define VERSION_STRING DRIVER_DESC " 2.1d (build date: " \ + __DATE__ " " __TIME__ ")" + +/* Macros definitions */ + +/* Default debug printout level */ +#define NOZOMI_DEBUG_LEVEL 0x00 + +#define P_BUF_SIZE 128 +#define NFO(_err_flag_, args...) \ +do { \ + char tmp[P_BUF_SIZE]; \ + snprintf(tmp, sizeof(tmp), ##args); \ + printk(_err_flag_ "[%d] %s(): %s\n", __LINE__, \ + __FUNCTION__, tmp); \ +} while (0) + +#define DBG1(args...) D_(0x01, ##args) +#define DBG2(args...) D_(0x02, ##args) +#define DBG3(args...) D_(0x04, ##args) +#define DBG4(args...) D_(0x08, ##args) +#define DBG5(args...) D_(0x10, ##args) +#define DBG6(args...) D_(0x20, ##args) +#define DBG7(args...) D_(0x40, ##args) +#define DBG8(args...) D_(0x80, ##args) + +#ifdef DEBUG +/* Do we need this settable at runtime? */ +static int debug = NOZOMI_DEBUG_LEVEL; + +#define D(lvl, args...) do {if (lvl & debug) NFO(KERN_DEBUG, ##args); } \ + while (0) +#define D_(lvl, args...) D(lvl, ##args) + +/* These printouts are always printed */ + +#else +static int debug; +#define D_(lvl, args...) +#endif + +/* TODO: rewrite to optimize macros... */ + +#define TMP_BUF_MAX 256 + +#define DUMP(buf__,len__) \ + do { \ + char tbuf[TMP_BUF_MAX] = {0};\ + if (len__ > 1) {\ + snprintf(tbuf, len__ > TMP_BUF_MAX ? TMP_BUF_MAX : len__, "%s", buf__);\ + if (tbuf[len__-2] == '\r') {\ + tbuf[len__-2] = 'r';\ + } \ + DBG1("SENDING: '%s' (%d+n)", tbuf, len__);\ + } else {\ + DBG1("SENDING: '%s' (%d)", tbuf, len__);\ + } \ +} while (0) + +/* Defines */ +#define NOZOMI_NAME "nozomi" +#define NOZOMI_NAME_TTY "nozomi_tty" +#define DRIVER_DESC "Nozomi driver" + +#define NTTY_TTY_MAXMINORS 256 +#define NTTY_FIFO_BUFFER_SIZE 8192 + +/* Must be power of 2 */ +#define FIFO_BUFFER_SIZE_UL 8192 + +/* Size of tmp send buffer to card */ +#define SEND_BUF_MAX 1024 +#define RECEIVE_BUF_MAX 4 + + +/* Define all types of vendors and devices to support */ +#define VENDOR1 0x1931 /* Vendor Option */ +#define DEVICE1 0x000c /* HSDPA card */ + +#define R_IIR 0x0000 /* Interrupt Identity Register */ +#define R_FCR 0x0000 /* Flow Control Register */ +#define R_IER 0x0004 /* Interrupt Enable Register */ + +#define CONFIG_MAGIC 0xEFEFFEFE +#define TOGGLE_VALID 0x0000 + +/* Definition of interrupt tokens */ +#define MDM_DL1 0x0001 +#define MDM_UL1 0x0002 +#define MDM_DL2 0x0004 +#define MDM_UL2 0x0008 +#define DIAG_DL1 0x0010 +#define DIAG_DL2 0x0020 +#define DIAG_UL 0x0040 +#define APP1_DL 0x0080 +#define APP1_UL 0x0100 +#define APP2_DL 0x0200 +#define APP2_UL 0x0400 +#define CTRL_DL 0x0800 +#define CTRL_UL 0x1000 +#define RESET 0x8000 + +#define MDM_DL (MDM_DL1 | MDM_DL2) +#define MDM_UL (MDM_UL1 | MDM_UL2) +#define DIAG_DL (DIAG_DL1 | DIAG_DL2) + +/* modem signal definition */ +#define CTRL_DSR 0x0001 +#define CTRL_DCD 0x0002 +#define CTRL_RI 0x0004 +#define CTRL_CTS 0x0008 + +#define CTRL_DTR 0x0001 +#define CTRL_RTS 0x0002 + +#define MAX_PORT 4 +#define NOZOMI_MAX_PORTS 5 +#define NOZOMI_MAX_CARDS (NTTY_TTY_MAXMINORS / MAX_PORT) + +/* Type definitions */ + +/* + * There are two types of nozomi cards, + * one with 2048 memory and with 8192 memory + */ +enum card_type { + F32_2 = 2048, /* 512 bytes downlink + uplink * 2 -> 2048 */ + F32_8 = 8192, /* 3072 bytes downl. + 1024 bytes uplink * 2 -> 8192 */ +}; + +/* Two different toggle channels exist */ +enum channel_type { + CH_A = 0, + CH_B = 1, +}; + +/* Port definition for the card regarding flow control */ +enum ctrl_port_type { + CTRL_CMD = 0, + CTRL_MDM = 1, + CTRL_DIAG = 2, + CTRL_APP1 = 3, + CTRL_APP2 = 4, + CTRL_ERROR = -1, +}; + +/* Ports that the nozomi has */ +enum port_type { + PORT_MDM = 0, + PORT_DIAG = 1, + PORT_APP1 = 2, + PORT_APP2 = 3, + PORT_CTRL = 4, + PORT_ERROR = -1, +}; + +#ifdef __BIG_ENDIAN +/* Big endian */ + +struct toggles { + unsigned enabled:5; /* + * Toggle fields are valid if enabled is 0, + * else A-channels must always be used. + */ + unsigned diag_dl:1; + unsigned mdm_dl:1; + unsigned mdm_ul:1; +} __attribute__ ((packed)); + +/* Configuration table to read at startup of card */ +/* Is for now only needed during initialization phase */ +struct config_table { + u32 signature; + u16 product_information; + u16 version; + u8 pad3[3]; + struct toggles toggle; + u8 pad1[4]; + u16 dl_mdm_len1; /* + * If this is 64, it can hold + * 60 bytes + 4 that is length field + */ + u16 dl_start; + + u16 dl_diag_len1; + u16 dl_mdm_len2; /* + * If this is 64, it can hold + * 60 bytes + 4 that is length field + */ + u16 dl_app1_len; + + u16 dl_diag_len2; + u16 dl_ctrl_len; + u16 dl_app2_len; + u8 pad2[16]; + u16 ul_mdm_len1; + u16 ul_start; + u16 ul_diag_len; + u16 ul_mdm_len2; + u16 ul_app1_len; + u16 ul_app2_len; + u16 ul_ctrl_len; +} __attribute__ ((packed)); + +/* This stores all control downlink flags */ +struct ctrl_dl { + u8 port; + unsigned reserved:4; + unsigned CTS:1; + unsigned RI:1; + unsigned DCD:1; + unsigned DSR:1; +} __attribute__ ((packed)); + +/* This stores all control uplink flags */ +struct ctrl_ul { + u8 port; + unsigned reserved:6; + unsigned RTS:1; + unsigned DTR:1; +} __attribute__ ((packed)); + +#else +/* Little endian */ + +/* This represents the toggle information */ +struct toggles { + unsigned mdm_ul:1; + unsigned mdm_dl:1; + unsigned diag_dl:1; + unsigned enabled:5; /* + * Toggle fields are valid if enabled is 0, + * else A-channels must always be used. + */ +} __attribute__ ((packed)); + +/* Configuration table to read at startup of card */ +struct config_table { + u32 signature; + u16 version; + u16 product_information; + struct toggles toggle; + u8 pad1[7]; + u16 dl_start; + u16 dl_mdm_len1; /* + * If this is 64, it can hold + * 60 bytes + 4 that is length field + */ + u16 dl_mdm_len2; + u16 dl_diag_len1; + u16 dl_diag_len2; + u16 dl_app1_len; + u16 dl_app2_len; + u16 dl_ctrl_len; + u8 pad2[16]; + u16 ul_start; + u16 ul_mdm_len2; + u16 ul_mdm_len1; + u16 ul_diag_len; + u16 ul_app1_len; + u16 ul_app2_len; + u16 ul_ctrl_len; +} __attribute__ ((packed)); + +/* This stores all control downlink flags */ +struct ctrl_dl { + unsigned DSR:1; + unsigned DCD:1; + unsigned RI:1; + unsigned CTS:1; + unsigned reserverd:4; + u8 port; +} __attribute__ ((packed)); + +/* This stores all control uplink flags */ +struct ctrl_ul { + unsigned DTR:1; + unsigned RTS:1; + unsigned reserved:6; + u8 port; +} __attribute__ ((packed)); +#endif + +/* This holds all information that is needed regarding a port */ +struct port { + u8 update_flow_control; + struct ctrl_ul ctrl_ul; + struct ctrl_dl ctrl_dl; + struct kfifo *fifo_ul; + void __iomem *dl_addr[2]; + u32 dl_size[2]; + u8 toggle_dl; + void __iomem *ul_addr[2]; + u32 ul_size[2]; + u8 toggle_ul; + u16 token_dl; + + struct tty_struct *tty; + int tty_open_count; + /* mutex to ensure one access patch to this port */ + struct mutex tty_sem; + wait_queue_head_t tty_wait; + struct async_icount tty_icount; +}; + +/* Private data one for each card in the system */ +struct nozomi { + void __iomem *base_addr; + unsigned long flip; + + /* Pointers to registers */ + void __iomem *reg_iir; + void __iomem *reg_fcr; + void __iomem *reg_ier; + + u16 last_ier; + enum card_type card_type; + struct config_table config_table; /* Configuration table */ + struct pci_dev *pdev; + struct port port[NOZOMI_MAX_PORTS]; + u8 *send_buf; + + spinlock_t spin_mutex; /* secures access to registers and tty */ + + unsigned int index_start; + u32 open_ttys; +}; + +/* This is a data packet that is read or written to/from card */ +struct buffer { + u32 size; /* size is the length of the data buffer */ + u8 *data; +} __attribute__ ((packed)); + +/* Global variables */ +static struct pci_device_id nozomi_pci_tbl[] = { + {PCI_DEVICE(VENDOR1, DEVICE1)}, + {}, +}; + +MODULE_DEVICE_TABLE(pci, nozomi_pci_tbl); + +static struct nozomi *ndevs[NOZOMI_MAX_CARDS]; +static struct tty_driver *ntty_driver; + +/* + * find card by tty_index + */ +static inline struct nozomi *get_dc_by_tty(const struct tty_struct *tty) +{ + return tty ? ndevs[tty->index / MAX_PORT] : NULL; +} + +static inline struct port *get_port_by_tty(const struct tty_struct *tty) +{ + struct nozomi *ndev = get_dc_by_tty(tty); + return ndev ? &ndev->port[tty->index % MAX_PORT] : NULL; +} + +/* + * TODO: + * -Optimize + * -Rewrite cleaner + */ + +static void read_mem32(u32 *buf, const void __iomem *mem_addr_start, + u32 size_bytes) +{ + u32 i = 0; + const u32 *ptr = (__force u32 *) mem_addr_start; + u16 *buf16; + + if (unlikely(!ptr || !buf)) + goto out; + + /* shortcut for extremely often used cases */ + switch (size_bytes) { + case 2: /* 2 bytes */ + buf16 = (u16 *) buf; + *buf16 = __le16_to_cpu(readw((void __iomem *)ptr)); + goto out; + break; + case 4: /* 4 bytes */ + *(buf) = __le32_to_cpu(readl((void __iomem *)ptr)); + goto out; + break; + } + + while (i < size_bytes) { + if (size_bytes - i == 2) { + /* Handle 2 bytes in the end */ + buf16 = (u16 *) buf; + *(buf16) = __le16_to_cpu(readw((void __iomem *)ptr)); + i += 2; + } else { + /* Read 4 bytes */ + *(buf) = __le32_to_cpu(readl((void __iomem *)ptr)); + i += 4; + } + buf++; + ptr++; + } +out: + return; +} + +/* + * TODO: + * -Optimize + * -Rewrite cleaner + */ +static u32 write_mem32(void __iomem *mem_addr_start, u32 *buf, + u32 size_bytes) +{ + u32 i = 0; + u32 *ptr = (__force u32 *) mem_addr_start; + u16 *buf16; + + if (unlikely(!ptr || !buf)) + return 0; + + /* shortcut for extremely often used cases */ + switch (size_bytes) { + case 2: /* 2 bytes */ + buf16 = (u16 *) buf; + writew(__cpu_to_le16(*buf16), (void __iomem *)ptr); + return 2; + break; + case 1: /* + * also needs to write 4 bytes in this case + * so falling through.. + */ + case 4: /* 4 bytes */ + writel(__cpu_to_le32(*buf), (void __iomem *)ptr); + return 4; + break; + } + + while (i < size_bytes) { + if (size_bytes - i == 2) { + /* 2 bytes */ + buf16 = (u16 *) buf; + writew(__cpu_to_le16(*buf16), (void __iomem *)ptr); + i += 2; + } else { + /* 4 bytes */ + writel(__cpu_to_le32(*buf), (void __iomem *)ptr); + i += 4; + } + buf++; + ptr++; + } + return i; +} + +/* Setup pointers to different channels and also setup buffer sizes. */ +static void setup_memory(struct nozomi *dc) +{ + void __iomem *offset = dc->base_addr + dc->config_table.dl_start; + /* The length reported is including the length field of 4 bytes, + * hence subtract with 4. + */ + const u16 buff_offset = 4; + + /* Modem port dl configuration */ + dc->port[PORT_MDM].dl_addr[CH_A] = offset; + dc->port[PORT_MDM].dl_addr[CH_B] = + (offset += dc->config_table.dl_mdm_len1); + dc->port[PORT_MDM].dl_size[CH_A] = + dc->config_table.dl_mdm_len1 - buff_offset; + dc->port[PORT_MDM].dl_size[CH_B] = + dc->config_table.dl_mdm_len2 - buff_offset; + + /* Diag port dl configuration */ + dc->port[PORT_DIAG].dl_addr[CH_A] = + (offset += dc->config_table.dl_mdm_len2); + dc->port[PORT_DIAG].dl_size[CH_A] = + dc->config_table.dl_diag_len1 - buff_offset; + dc->port[PORT_DIAG].dl_addr[CH_B] = + (offset += dc->config_table.dl_diag_len1); + dc->port[PORT_DIAG].dl_size[CH_B] = + dc->config_table.dl_diag_len2 - buff_offset; + + /* App1 port dl configuration */ + dc->port[PORT_APP1].dl_addr[CH_A] = + (offset += dc->config_table.dl_diag_len2); + dc->port[PORT_APP1].dl_size[CH_A] = + dc->config_table.dl_app1_len - buff_offset; + + /* App2 port dl configuration */ + dc->port[PORT_APP2].dl_addr[CH_A] = + (offset += dc->config_table.dl_app1_len); + dc->port[PORT_APP2].dl_size[CH_A] = + dc->config_table.dl_app2_len - buff_offset; + + /* Ctrl dl configuration */ + dc->port[PORT_CTRL].dl_addr[CH_A] = + (offset += dc->config_table.dl_app2_len); + dc->port[PORT_CTRL].dl_size[CH_A] = + dc->config_table.dl_ctrl_len - buff_offset; + + offset = dc->base_addr + dc->config_table.ul_start; + + /* Modem Port ul configuration */ + dc->port[PORT_MDM].ul_addr[CH_A] = offset; + dc->port[PORT_MDM].ul_size[CH_A] = + dc->config_table.ul_mdm_len1 - buff_offset; + dc->port[PORT_MDM].ul_addr[CH_B] = + (offset += dc->config_table.ul_mdm_len1); + dc->port[PORT_MDM].ul_size[CH_B] = + dc->config_table.ul_mdm_len2 - buff_offset; + + /* Diag port ul configuration */ + dc->port[PORT_DIAG].ul_addr[CH_A] = + (offset += dc->config_table.ul_mdm_len2); + dc->port[PORT_DIAG].ul_size[CH_A] = + dc->config_table.ul_diag_len - buff_offset; + + /* App1 port ul configuration */ + dc->port[PORT_APP1].ul_addr[CH_A] = + (offset += dc->config_table.ul_diag_len); + dc->port[PORT_APP1].ul_size[CH_A] = + dc->config_table.ul_app1_len - buff_offset; + + /* App2 port ul configuration */ + dc->port[PORT_APP2].ul_addr[CH_A] = + (offset += dc->config_table.ul_app1_len); + dc->port[PORT_APP2].ul_size[CH_A] = + dc->config_table.ul_app2_len - buff_offset; + + /* Ctrl ul configuration */ + dc->port[PORT_CTRL].ul_addr[CH_A] = + (offset += dc->config_table.ul_app2_len); + dc->port[PORT_CTRL].ul_size[CH_A] = + dc->config_table.ul_ctrl_len - buff_offset; +} + +/* Dump config table under initalization phase */ +#ifdef DEBUG +static void dump_table(const struct nozomi *dc) +{ + DBG3("signature: 0x%08X", dc->config_table.signature); + DBG3("version: 0x%04X", dc->config_table.version); + DBG3("product_information: 0x%04X", \ + dc->config_table.product_information); + DBG3("toggle enabled: %d", dc->config_table.toggle.enabled); + DBG3("toggle up_mdm: %d", dc->config_table.toggle.mdm_ul); + DBG3("toggle dl_mdm: %d", dc->config_table.toggle.mdm_dl); + DBG3("toggle dl_dbg: %d", dc->config_table.toggle.diag_dl); + + DBG3("dl_start: 0x%04X", dc->config_table.dl_start); + DBG3("dl_mdm_len0: 0x%04X, %d", dc->config_table.dl_mdm_len1, + dc->config_table.dl_mdm_len1); + DBG3("dl_mdm_len1: 0x%04X, %d", dc->config_table.dl_mdm_len2, + dc->config_table.dl_mdm_len2); + DBG3("dl_diag_len0: 0x%04X, %d", dc->config_table.dl_diag_len1, + dc->config_table.dl_diag_len1); + DBG3("dl_diag_len1: 0x%04X, %d", dc->config_table.dl_diag_len2, + dc->config_table.dl_diag_len2); + DBG3("dl_app1_len: 0x%04X, %d", dc->config_table.dl_app1_len, + dc->config_table.dl_app1_len); + DBG3("dl_app2_len: 0x%04X, %d", dc->config_table.dl_app2_len, + dc->config_table.dl_app2_len); + DBG3("dl_ctrl_len: 0x%04X, %d", dc->config_table.dl_ctrl_len, + dc->config_table.dl_ctrl_len); + DBG3("ul_start: 0x%04X, %d", dc->config_table.ul_start, + dc->config_table.ul_start); + DBG3("ul_mdm_len[0]: 0x%04X, %d", dc->config_table.ul_mdm_len1, + dc->config_table.ul_mdm_len1); + DBG3("ul_mdm_len[1]: 0x%04X, %d", dc->config_table.ul_mdm_len2, + dc->config_table.ul_mdm_len2); + DBG3("ul_diag_len: 0x%04X, %d", dc->config_table.ul_diag_len, + dc->config_table.ul_diag_len); + DBG3("ul_app1_len: 0x%04X, %d", dc->config_table.ul_app1_len, + dc->config_table.ul_app1_len); + DBG3("ul_app2_len: 0x%04X, %d", dc->config_table.ul_app2_len, + dc->config_table.ul_app2_len); + DBG3("ul_ctrl_len: 0x%04X, %d", dc->config_table.ul_ctrl_len, + dc->config_table.ul_ctrl_len); +} +#else +static __inline__ void dump_table(const struct nozomi *dc) { } +#endif + +/* + * Read configuration table from card under intalization phase + * Returns 1 if ok, else 0 + */ +static int nozomi_read_config_table(struct nozomi *dc) +{ + read_mem32((u32 *) &dc->config_table, dc->base_addr + 0, + sizeof(struct config_table)); + + if (dc->config_table.signature != CONFIG_MAGIC) { + dev_err(&dc->pdev->dev, "ConfigTable Bad! 0x%08X != 0x%08X\n", + dc->config_table.signature, CONFIG_MAGIC); + return 0; + } + + if ((dc->config_table.version == 0) + || (dc->config_table.toggle.enabled == TOGGLE_VALID)) { + int i; + DBG1("Second phase, configuring card"); + + setup_memory(dc); + + dc->port[PORT_MDM].toggle_ul = dc->config_table.toggle.mdm_ul; + dc->port[PORT_MDM].toggle_dl = dc->config_table.toggle.mdm_dl; + dc->port[PORT_DIAG].toggle_dl = dc->config_table.toggle.diag_dl; + DBG1("toggle ports: MDM UL:%d MDM DL:%d, DIAG DL:%d", + dc->port[PORT_MDM].toggle_ul, + dc->port[PORT_MDM].toggle_dl, dc->port[PORT_DIAG].toggle_dl); + + dump_table(dc); + + for (i = PORT_MDM; i < MAX_PORT; i++) { + dc->port[i].fifo_ul = + kfifo_alloc(FIFO_BUFFER_SIZE_UL, GFP_ATOMIC, NULL); + memset(&dc->port[i].ctrl_dl, 0, sizeof(struct ctrl_dl)); + memset(&dc->port[i].ctrl_ul, 0, sizeof(struct ctrl_ul)); + } + + /* Enable control channel */ + dc->last_ier = dc->last_ier | CTRL_DL; + writew(dc->last_ier, dc->reg_ier); + + dev_info(&dc->pdev->dev, "Initialization OK!\n"); + return 1; + } + + if ((dc->config_table.version > 0) + && (dc->config_table.toggle.enabled != TOGGLE_VALID)) { + u32 offset = 0; + DBG1("First phase: pushing upload buffers, clearing download"); + + dev_info(&dc->pdev->dev, "Version of card: %d\n", + dc->config_table.version); + + /* Here we should disable all I/O over F32. */ + setup_memory(dc); + + /* + * We should send ALL channel pair tokens back along + * with reset token + */ + + /* push upload modem buffers */ + write_mem32(dc->port[PORT_MDM].ul_addr[CH_A], + (u32 *) &offset, 4); + write_mem32(dc->port[PORT_MDM].ul_addr[CH_B], + (u32 *) &offset, 4); + + writew(MDM_UL | DIAG_DL | MDM_DL, dc->reg_fcr); + + DBG1("First phase done"); + } + + return 1; +} + +/* Enable uplink interrupts */ +static void enable_transmit_ul(enum port_type port, struct nozomi *dc) +{ + u16 mask[NOZOMI_MAX_PORTS] = \ + {MDM_UL, DIAG_UL, APP1_UL, APP2_UL, CTRL_UL}; + + if (port < NOZOMI_MAX_PORTS) { + dc->last_ier |= mask[port]; + writew(dc->last_ier, dc->reg_ier); + } else { + dev_err(&dc->pdev->dev, "Called with wrong port?\n"); + } +} + +/* Disable uplink interrupts */ +static void disable_transmit_ul(enum port_type port, struct nozomi *dc) +{ + u16 mask[NOZOMI_MAX_PORTS] = \ + {~MDM_UL, ~DIAG_UL, ~APP1_UL, ~APP2_UL, ~CTRL_UL}; + + if (port < NOZOMI_MAX_PORTS) { + dc->last_ier &= mask[port]; + writew(dc->last_ier, dc->reg_ier); + } else { + dev_err(&dc->pdev->dev, "Called with wrong port?\n"); + } +} + +/* Enable downlink interrupts */ +static void enable_transmit_dl(enum port_type port, struct nozomi *dc) +{ + u16 mask[NOZOMI_MAX_PORTS] = \ + {MDM_DL, DIAG_DL, APP1_DL, APP2_DL, CTRL_DL}; + + if (port < NOZOMI_MAX_PORTS) { + dc->last_ier |= mask[port]; + writew(dc->last_ier, dc->reg_ier); + } else { + dev_err(&dc->pdev->dev, "Called with wrong port?\n"); + } +} + +/* Disable downlink interrupts */ +static void disable_transmit_dl(enum port_type port, struct nozomi *dc) +{ + u16 mask[NOZOMI_MAX_PORTS] = \ + {~MDM_DL, ~DIAG_DL, ~APP1_DL, ~APP2_DL, ~CTRL_DL}; + + if (port < NOZOMI_MAX_PORTS) { + dc->last_ier &= mask[port]; + writew(dc->last_ier, dc->reg_ier); + } else { + dev_err(&dc->pdev->dev, "Called with wrong port?\n"); + } +} + +/* + * Return 1 - send buffer to card and ack. + * Return 0 - don't ack, don't send buffer to card. + */ +static int send_data(enum port_type index, struct nozomi *dc) +{ + u32 size = 0; + struct port *port = &dc->port[index]; + u8 toggle = port->toggle_ul; + void __iomem *addr = port->ul_addr[toggle]; + u32 ul_size = port->ul_size[toggle]; + struct tty_struct *tty = port->tty; + + /* Get data from tty and place in buf for now */ + size = __kfifo_get(port->fifo_ul, dc->send_buf, + ul_size < SEND_BUF_MAX ? ul_size : SEND_BUF_MAX); + + if (size == 0) { + DBG4("No more data to send, disable link:"); + return 0; + } + + /* DUMP(buf, size); */ + + /* Write length + data */ + write_mem32(addr, (u32 *) &size, 4); + write_mem32(addr + 4, (u32 *) dc->send_buf, size); + + if (tty) + tty_wakeup(tty); + + return 1; +} + +/* If all data has been read, return 1, else 0 */ +static int receive_data(enum port_type index, struct nozomi *dc) +{ + u8 buf[RECEIVE_BUF_MAX] = { 0 }; + int size; + u32 offset = 4; + struct port *port = &dc->port[index]; + void __iomem *addr = port->dl_addr[port->toggle_dl]; + struct tty_struct *tty = port->tty; + int i; + + if (unlikely(!tty)) { + DBG1("tty not open for port: %d?", index); + return 1; + } + + read_mem32((u32 *) &size, addr, 4); + /* DBG1( "%d bytes port: %d", size, index); */ + + if (test_bit(TTY_THROTTLED, &tty->flags)) { + DBG1("No room in tty, don't read data, don't ack interrupt, " + "disable interrupt"); + + /* disable interrupt in downlink... */ + disable_transmit_dl(index, dc); + return 0; + } + + if (unlikely(size == 0)) { + dev_err(&dc->pdev->dev, "size == 0?\n"); + return 1; + } + + tty_buffer_request_room(tty, size); + + while (size > 0) { + read_mem32((u32 *) buf, addr + offset, RECEIVE_BUF_MAX); + + if (size == 1) { + tty_insert_flip_char(tty, buf[0], TTY_NORMAL); + size = 0; + } else if (size < RECEIVE_BUF_MAX) { + size -= tty_insert_flip_string(tty, (char *) buf, size); + } else { + i = tty_insert_flip_string(tty, \ + (char *) buf, RECEIVE_BUF_MAX); + size -= i; + offset += i; + } + } + + set_bit(index, &dc->flip); + + return 1; +} + +/* Debug for interrupts */ +#ifdef DEBUG +static char *interrupt2str(u16 interrupt) +{ + static char buf[TMP_BUF_MAX]; + char *p = buf; + + interrupt & MDM_DL1 ? p += snprintf(p, TMP_BUF_MAX, "MDM_DL1 ") : NULL; + interrupt & MDM_DL2 ? p += snprintf(p, TMP_BUF_MAX - (p - buf), + "MDM_DL2 ") : NULL; + + interrupt & MDM_UL1 ? p += snprintf(p, TMP_BUF_MAX - (p - buf), + "MDM_UL1 ") : NULL; + interrupt & MDM_UL2 ? p += snprintf(p, TMP_BUF_MAX - (p - buf), + "MDM_UL2 ") : NULL; + + interrupt & DIAG_DL1 ? p += snprintf(p, TMP_BUF_MAX - (p - buf), + "DIAG_DL1 ") : NULL; + interrupt & DIAG_DL2 ? p += snprintf(p, TMP_BUF_MAX - (p - buf), + "DIAG_DL2 ") : NULL; + + interrupt & DIAG_UL ? p += snprintf(p, TMP_BUF_MAX - (p - buf), + "DIAG_UL ") : NULL; + + interrupt & APP1_DL ? p += snprintf(p, TMP_BUF_MAX - (p - buf), + "APP1_DL ") : NULL; + interrupt & APP2_DL ? p += snprintf(p, TMP_BUF_MAX - (p - buf), + "APP2_DL ") : NULL; + + interrupt & APP1_UL ? p += snprintf(p, TMP_BUF_MAX - (p - buf), + "APP1_UL ") : NULL; + interrupt & APP2_UL ? p += snprintf(p, TMP_BUF_MAX - (p - buf), + "APP2_UL ") : NULL; + + interrupt & CTRL_DL ? p += snprintf(p, TMP_BUF_MAX - (p - buf), + "CTRL_DL ") : NULL; + interrupt & CTRL_UL ? p += snprintf(p, TMP_BUF_MAX - (p - buf), + "CTRL_UL ") : NULL; + + interrupt & RESET ? p += snprintf(p, TMP_BUF_MAX - (p - buf), + "RESET ") : NULL; + + return buf; +} +#endif + +/* + * Receive flow control + * Return 1 - If ok, else 0 + */ +static int receive_flow_control(struct nozomi *dc) +{ + enum port_type port = PORT_MDM; + struct ctrl_dl ctrl_dl; + struct ctrl_dl old_ctrl; + u16 enable_ier = 0; + + read_mem32((u32 *) &ctrl_dl, dc->port[PORT_CTRL].dl_addr[CH_A], 2); + + switch (ctrl_dl.port) { + case CTRL_CMD: + DBG1("The Base Band sends this value as a response to a " + "request for IMSI detach sent over the control " + "channel uplink (see section 7.6.1)."); + break; + case CTRL_MDM: + port = PORT_MDM; + enable_ier = MDM_DL; + break; + case CTRL_DIAG: + port = PORT_DIAG; + enable_ier = DIAG_DL; + break; + case CTRL_APP1: + port = PORT_APP1; + enable_ier = APP1_DL; + break; + case CTRL_APP2: + port = PORT_APP2; + enable_ier = APP2_DL; + break; + default: + dev_err(&dc->pdev->dev, + "ERROR: flow control received for non-existing port\n"); + return 0; + }; + + DBG1("0x%04X->0x%04X", *((u16 *)&dc->port[port].ctrl_dl), + *((u16 *)&ctrl_dl)); + + old_ctrl = dc->port[port].ctrl_dl; + dc->port[port].ctrl_dl = ctrl_dl; + + if (old_ctrl.CTS == 1 && ctrl_dl.CTS == 0) { + DBG1("Disable interrupt (0x%04X) on port: %d", + enable_ier, port); + disable_transmit_ul(port, dc); + + } else if (old_ctrl.CTS == 0 && ctrl_dl.CTS == 1) { + + if (__kfifo_len(dc->port[port].fifo_ul)) { + DBG1("Enable interrupt (0x%04X) on port: %d", + enable_ier, port); + DBG1("Data in buffer [%d], enable transmit! ", + __kfifo_len(dc->port[port].fifo_ul)); + enable_transmit_ul(port, dc); + } else { + DBG1("No data in buffer..."); + } + } + + if (*(u16 *)&old_ctrl == *(u16 *)&ctrl_dl) { + DBG1(" No change in mctrl"); + return 1; + } + /* Update statistics */ + if (old_ctrl.CTS != ctrl_dl.CTS) + dc->port[port].tty_icount.cts++; + if (old_ctrl.DSR != ctrl_dl.DSR) + dc->port[port].tty_icount.dsr++; + if (old_ctrl.RI != ctrl_dl.RI) + dc->port[port].tty_icount.rng++; + if (old_ctrl.DCD != ctrl_dl.DCD) + dc->port[port].tty_icount.dcd++; + + wake_up_interruptible(&dc->port[port].tty_wait); + + DBG1("port: %d DCD(%d), CTS(%d), RI(%d), DSR(%d)", + port, + dc->port[port].tty_icount.dcd, dc->port[port].tty_icount.cts, + dc->port[port].tty_icount.rng, dc->port[port].tty_icount.dsr); + + return 1; +} + +static enum ctrl_port_type port2ctrl(enum port_type port, + const struct nozomi *dc) +{ + switch (port) { + case PORT_MDM: + return CTRL_MDM; + case PORT_DIAG: + return CTRL_DIAG; + case PORT_APP1: + return CTRL_APP1; + case PORT_APP2: + return CTRL_APP2; + default: + dev_err(&dc->pdev->dev, + "ERROR: send flow control " \ + "received for non-existing port\n"); + }; + return CTRL_ERROR; +} + +/* + * Send flow control, can only update one channel at a time + * Return 0 - If we have updated all flow control + * Return 1 - If we need to update more flow control, ack current enable more + */ +static int send_flow_control(struct nozomi *dc) +{ + u32 i, more_flow_control_to_be_updated = 0; + u16 *ctrl; + + for (i = PORT_MDM; i < MAX_PORT; i++) { + if (dc->port[i].update_flow_control) { + if (more_flow_control_to_be_updated) { + /* We have more flow control to be updated */ + return 1; + } + dc->port[i].ctrl_ul.port = port2ctrl(i, dc); + ctrl = (u16 *)&dc->port[i].ctrl_ul; + write_mem32(dc->port[PORT_CTRL].ul_addr[0], \ + (u32 *) ctrl, 2); + dc->port[i].update_flow_control = 0; + more_flow_control_to_be_updated = 1; + } + } + return 0; +} + +/* + * Handle donlink data, ports that are handled are modem and diagnostics + * Return 1 - ok + * Return 0 - toggle fields are out of sync + */ +static int handle_data_dl(struct nozomi *dc, enum port_type port, u8 *toggle, + u16 read_iir, u16 mask1, u16 mask2) +{ + if (*toggle == 0 && read_iir & mask1) { + if (receive_data(port, dc)) { + writew(mask1, dc->reg_fcr); + *toggle = !(*toggle); + } + + if (read_iir & mask2) { + if (receive_data(port, dc)) { + writew(mask2, dc->reg_fcr); + *toggle = !(*toggle); + } + } + } else if (*toggle == 1 && read_iir & mask2) { + if (receive_data(port, dc)) { + writew(mask2, dc->reg_fcr); + *toggle = !(*toggle); + } + + if (read_iir & mask1) { + if (receive_data(port, dc)) { + writew(mask1, dc->reg_fcr); + *toggle = !(*toggle); + } + } + } else { + dev_err(&dc->pdev->dev, "port out of sync!, toggle:%d\n", + *toggle); + return 0; + } + return 1; +} + +/* + * Handle uplink data, this is currently for the modem port + * Return 1 - ok + * Return 0 - toggle field are out of sync + */ +static int handle_data_ul(struct nozomi *dc, enum port_type port, u16 read_iir) +{ + u8 *toggle = &(dc->port[port].toggle_ul); + + if (*toggle == 0 && read_iir & MDM_UL1) { + dc->last_ier &= ~MDM_UL; + writew(dc->last_ier, dc->reg_ier); + if (send_data(port, dc)) { + writew(MDM_UL1, dc->reg_fcr); + dc->last_ier = dc->last_ier | MDM_UL; + writew(dc->last_ier, dc->reg_ier); + *toggle = !*toggle; + } + + if (read_iir & MDM_UL2) { + dc->last_ier &= ~MDM_UL; + writew(dc->last_ier, dc->reg_ier); + if (send_data(port, dc)) { + writew(MDM_UL2, dc->reg_fcr); + dc->last_ier = dc->last_ier | MDM_UL; + writew(dc->last_ier, dc->reg_ier); + *toggle = !*toggle; + } + } + + } else if (*toggle == 1 && read_iir & MDM_UL2) { + dc->last_ier &= ~MDM_UL; + writew(dc->last_ier, dc->reg_ier); + if (send_data(port, dc)) { + writew(MDM_UL2, dc->reg_fcr); + dc->last_ier = dc->last_ier | MDM_UL; + writew(dc->last_ier, dc->reg_ier); + *toggle = !*toggle; + } + + if (read_iir & MDM_UL1) { + dc->last_ier &= ~MDM_UL; + writew(dc->last_ier, dc->reg_ier); + if (send_data(port, dc)) { + writew(MDM_UL1, dc->reg_fcr); + dc->last_ier = dc->last_ier | MDM_UL; + writew(dc->last_ier, dc->reg_ier); + *toggle = !*toggle; + } + } + } else { + writew(read_iir & MDM_UL, dc->reg_fcr); + dev_err(&dc->pdev->dev, "port out of sync!\n"); + return 0; + } + return 1; +} + +static irqreturn_t interrupt_handler(int irq, void *dev_id) +{ + struct nozomi *dc = dev_id; + unsigned int a; + u16 read_iir; + + if (!dc) + return IRQ_NONE; + + spin_lock(&dc->spin_mutex); + read_iir = readw(dc->reg_iir); + + /* Card removed */ + if (read_iir == (u16)-1) + goto none; + /* + * Just handle interrupt enabled in IER + * (by masking with dc->last_ier) + */ + read_iir &= dc->last_ier; + + if (read_iir == 0) + goto none; + + + DBG4("%s irq:0x%04X, prev:0x%04X", interrupt2str(read_iir), read_iir, + dc->last_ier); + + if (read_iir & RESET) { + if (unlikely(!nozomi_read_config_table(dc))) { + dc->last_ier = 0x0; + writew(dc->last_ier, dc->reg_ier); + dev_err(&dc->pdev->dev, "Could not read status from " + "card, we should disable interface\n"); + } else { + writew(RESET, dc->reg_fcr); + } + /* No more useful info if this was the reset interrupt. */ + goto exit_handler; + } + if (read_iir & CTRL_UL) { + DBG1("CTRL_UL"); + dc->last_ier &= ~CTRL_UL; + writew(dc->last_ier, dc->reg_ier); + if (send_flow_control(dc)) { + writew(CTRL_UL, dc->reg_fcr); + dc->last_ier = dc->last_ier | CTRL_UL; + writew(dc->last_ier, dc->reg_ier); + } + } + if (read_iir & CTRL_DL) { + receive_flow_control(dc); + writew(CTRL_DL, dc->reg_fcr); + } + if (read_iir & MDM_DL) { + if (!handle_data_dl(dc, PORT_MDM, + &(dc->port[PORT_MDM].toggle_dl), read_iir, + MDM_DL1, MDM_DL2)) { + dev_err(&dc->pdev->dev, "MDM_DL out of sync!\n"); + goto exit_handler; + } + } + if (read_iir & MDM_UL) { + if (!handle_data_ul(dc, PORT_MDM, read_iir)) { + dev_err(&dc->pdev->dev, "MDM_UL out of sync!\n"); + goto exit_handler; + } + } + if (read_iir & DIAG_DL) { + if (!handle_data_dl(dc, PORT_DIAG, + &(dc->port[PORT_DIAG].toggle_dl), read_iir, + DIAG_DL1, DIAG_DL2)) { + dev_err(&dc->pdev->dev, "DIAG_DL out of sync!\n"); + goto exit_handler; + } + } + if (read_iir & DIAG_UL) { + dc->last_ier &= ~DIAG_UL; + writew(dc->last_ier, dc->reg_ier); + if (send_data(PORT_DIAG, dc)) { + writew(DIAG_UL, dc->reg_fcr); + dc->last_ier = dc->last_ier | DIAG_UL; + writew(dc->last_ier, dc->reg_ier); + } + } + if (read_iir & APP1_DL) { + if (receive_data(PORT_APP1, dc)) + writew(APP1_DL, dc->reg_fcr); + } + if (read_iir & APP1_UL) { + dc->last_ier &= ~APP1_UL; + writew(dc->last_ier, dc->reg_ier); + if (send_data(PORT_APP1, dc)) { + writew(APP1_UL, dc->reg_fcr); + dc->last_ier = dc->last_ier | APP1_UL; + writew(dc->last_ier, dc->reg_ier); + } + } + if (read_iir & APP2_DL) { + if (receive_data(PORT_APP2, dc)) + writew(APP2_DL, dc->reg_fcr); + } + if (read_iir & APP2_UL) { + dc->last_ier &= ~APP2_UL; + writew(dc->last_ier, dc->reg_ier); + if (send_data(PORT_APP2, dc)) { + writew(APP2_UL, dc->reg_fcr); + dc->last_ier = dc->last_ier | APP2_UL; + writew(dc->last_ier, dc->reg_ier); + } + } + +exit_handler: + spin_unlock(&dc->spin_mutex); + for (a = 0; a < NOZOMI_MAX_PORTS; a++) + if (test_and_clear_bit(a, &dc->flip)) + tty_flip_buffer_push(dc->port[a].tty); + return IRQ_HANDLED; +none: + spin_unlock(&dc->spin_mutex); + return IRQ_NONE; +} + +static void nozomi_get_card_type(struct nozomi *dc) +{ + int i; + u32 size = 0; + + for (i = 0; i < 6; i++) + size += pci_resource_len(dc->pdev, i); + + /* Assume card type F32_8 if no match */ + dc->card_type = size == 2048 ? F32_2 : F32_8; + + dev_info(&dc->pdev->dev, "Card type is: %d\n", dc->card_type); +} + +static void nozomi_setup_private_data(struct nozomi *dc) +{ + void __iomem *offset = dc->base_addr + dc->card_type / 2; + unsigned int i; + + dc->reg_fcr = (void __iomem *)(offset + R_FCR); + dc->reg_iir = (void __iomem *)(offset + R_IIR); + dc->reg_ier = (void __iomem *)(offset + R_IER); + dc->last_ier = 0; + dc->flip = 0; + + dc->port[PORT_MDM].token_dl = MDM_DL; + dc->port[PORT_DIAG].token_dl = DIAG_DL; + dc->port[PORT_APP1].token_dl = APP1_DL; + dc->port[PORT_APP2].token_dl = APP2_DL; + + for (i = 0; i < MAX_PORT; i++) + init_waitqueue_head(&dc->port[i].tty_wait); +} + +static ssize_t card_type_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct nozomi *dc = pci_get_drvdata(to_pci_dev(dev)); + + return sprintf(buf, "%d\n", dc->card_type); +} +static DEVICE_ATTR(card_type, 0444, card_type_show, NULL); + +static ssize_t open_ttys_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct nozomi *dc = pci_get_drvdata(to_pci_dev(dev)); + + return sprintf(buf, "%u\n", dc->open_ttys); +} +static DEVICE_ATTR(open_ttys, 0444, open_ttys_show, NULL); + +static void make_sysfs_files(struct nozomi *dc) +{ + if (device_create_file(&dc->pdev->dev, &dev_attr_card_type)) + dev_err(&dc->pdev->dev, + "Could not create sysfs file for card_type\n"); + if (device_create_file(&dc->pdev->dev, &dev_attr_open_ttys)) + dev_err(&dc->pdev->dev, + "Could not create sysfs file for open_ttys\n"); +} + +static void remove_sysfs_files(struct nozomi *dc) +{ + device_remove_file(&dc->pdev->dev, &dev_attr_card_type); + device_remove_file(&dc->pdev->dev, &dev_attr_open_ttys); +} + +/* Allocate memory for one device */ +static int __devinit nozomi_card_init(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + resource_size_t start; + int ret; + struct nozomi *dc = NULL; + int ndev_idx; + int i; + + dev_dbg(&pdev->dev, "Init, new card found\n"); + + for (ndev_idx = 0; ndev_idx < ARRAY_SIZE(ndevs); ndev_idx++) + if (!ndevs[ndev_idx]) + break; + + if (ndev_idx >= ARRAY_SIZE(ndevs)) { + dev_err(&pdev->dev, "no free tty range for this card left\n"); + ret = -EIO; + goto err; + } + + dc = kzalloc(sizeof(struct nozomi), GFP_KERNEL); + if (unlikely(!dc)) { + dev_err(&pdev->dev, "Could not allocate memory\n"); + ret = -ENOMEM; + goto err_free; + } + + dc->pdev = pdev; + + /* Find out what card type it is */ + nozomi_get_card_type(dc); + + ret = pci_enable_device(dc->pdev); + if (ret) { + dev_err(&pdev->dev, "Failed to enable PCI Device\n"); + goto err_free; + } + + start = pci_resource_start(dc->pdev, 0); + if (start == 0) { + dev_err(&pdev->dev, "No I/O address for card detected\n"); + ret = -ENODEV; + goto err_disable_device; + } + + ret = pci_request_regions(dc->pdev, NOZOMI_NAME); + if (ret) { + dev_err(&pdev->dev, "I/O address 0x%04x already in use\n", + (int) /* nozomi_private.io_addr */ 0); + goto err_disable_device; + } + + dc->base_addr = ioremap(start, dc->card_type); + if (!dc->base_addr) { + dev_err(&pdev->dev, "Unable to map card MMIO\n"); + ret = -ENODEV; + goto err_rel_regs; + } + + dc->send_buf = kmalloc(SEND_BUF_MAX, GFP_KERNEL); + if (!dc->send_buf) { + dev_err(&pdev->dev, "Could not allocate send buffer?\n"); + ret = -ENOMEM; + goto err_free_sbuf; + } + + spin_lock_init(&dc->spin_mutex); + + nozomi_setup_private_data(dc); + + /* Disable all interrupts */ + dc->last_ier = 0; + writew(dc->last_ier, dc->reg_ier); + + ret = request_irq(pdev->irq, &interrupt_handler, IRQF_SHARED, + NOZOMI_NAME, dc); + if (unlikely(ret)) { + dev_err(&pdev->dev, "can't request irq %d\n", pdev->irq); + goto err_free_sbuf; + } + + DBG1("base_addr: %p", dc->base_addr); + + make_sysfs_files(dc); + + dc->index_start = ndev_idx * MAX_PORT; + ndevs[ndev_idx] = dc; + + for (i = 0; i < MAX_PORT; i++) { + mutex_init(&dc->port[i].tty_sem); + dc->port[i].tty_open_count = 0; + dc->port[i].tty = NULL; + tty_register_device(ntty_driver, dc->index_start + i, + &pdev->dev); + } + + /* Enable RESET interrupt. */ + dc->last_ier = RESET; + writew(dc->last_ier, dc->reg_ier); + + pci_set_drvdata(pdev, dc); + + return 0; + +err_free_sbuf: + kfree(dc->send_buf); + iounmap(dc->base_addr); +err_rel_regs: + pci_release_regions(pdev); +err_disable_device: + pci_disable_device(pdev); +err_free: + kfree(dc); +err: + return ret; +} + +static void __devexit tty_exit(struct nozomi *dc) +{ + unsigned int i; + + DBG1(" "); + + flush_scheduled_work(); + + for (i = 0; i < MAX_PORT; ++i) + if (dc->port[i].tty && \ + list_empty(&dc->port[i].tty->hangup_work.entry)) + tty_hangup(dc->port[i].tty); + + while (dc->open_ttys) + msleep(1); + + for (i = dc->index_start; i < dc->index_start + MAX_PORT; ++i) + tty_unregister_device(ntty_driver, i); +} + +/* Deallocate memory for one device */ +static void __devexit nozomi_card_exit(struct pci_dev *pdev) +{ + int i; + struct ctrl_ul ctrl; + struct nozomi *dc = pci_get_drvdata(pdev); + + /* Disable all interrupts */ + dc->last_ier = 0; + writew(dc->last_ier, dc->reg_ier); + + tty_exit(dc); + + /* Send 0x0001, command card to resend the reset token. */ + /* This is to get the reset when the module is reloaded. */ + ctrl.port = 0x00; + ctrl.reserved = 0; + ctrl.RTS = 0; + ctrl.DTR = 1; + DBG1("sending flow control 0x%04X", *((u16 *)&ctrl)); + + /* Setup dc->reg addresses to we can use defines here */ + write_mem32(dc->port[PORT_CTRL].ul_addr[0], (u32 *)&ctrl, 2); + writew(CTRL_UL, dc->reg_fcr); /* push the token to the card. */ + + remove_sysfs_files(dc); + + free_irq(pdev->irq, dc); + + for (i = 0; i < MAX_PORT; i++) + if (dc->port[i].fifo_ul) + kfifo_free(dc->port[i].fifo_ul); + + kfree(dc->send_buf); + + iounmap(dc->base_addr); + + pci_release_regions(pdev); + + pci_disable_device(pdev); + + ndevs[dc->index_start / MAX_PORT] = NULL; + + kfree(dc); +} + +static void set_rts(const struct tty_struct *tty, int rts) +{ + struct port *port = get_port_by_tty(tty); + + port->ctrl_ul.RTS = rts; + port->update_flow_control = 1; + enable_transmit_ul(PORT_CTRL, get_dc_by_tty(tty)); +} + +static void set_dtr(const struct tty_struct *tty, int dtr) +{ + struct port *port = get_port_by_tty(tty); + + DBG1("SETTING DTR index: %d, dtr: %d", tty->index, dtr); + + port->ctrl_ul.DTR = dtr; + port->update_flow_control = 1; + enable_transmit_ul(PORT_CTRL, get_dc_by_tty(tty)); +} + +/* + * ---------------------------------------------------------------------------- + * TTY code + * ---------------------------------------------------------------------------- + */ + +/* Called when the userspace process opens the tty, /dev/noz*. */ +static int ntty_open(struct tty_struct *tty, struct file *file) +{ + struct port *port = get_port_by_tty(tty); + struct nozomi *dc = get_dc_by_tty(tty); + unsigned long flags; + + if (!port || !dc) + return -ENODEV; + + if (mutex_lock_interruptible(&port->tty_sem)) + return -ERESTARTSYS; + + port->tty_open_count++; + dc->open_ttys++; + + /* Enable interrupt downlink for channel */ + if (port->tty_open_count == 1) { + tty->low_latency = 1; + tty->driver_data = port; + port->tty = tty; + DBG1("open: %d", port->token_dl); + spin_lock_irqsave(&dc->spin_mutex, flags); + dc->last_ier = dc->last_ier | port->token_dl; + writew(dc->last_ier, dc->reg_ier); + spin_unlock_irqrestore(&dc->spin_mutex, flags); + } + + mutex_unlock(&port->tty_sem); + + return 0; +} + +/* Called when the userspace process close the tty, /dev/noz*. */ +static void ntty_close(struct tty_struct *tty, struct file *file) +{ + struct nozomi *dc = get_dc_by_tty(tty); + struct port *port = tty->driver_data; + unsigned long flags; + + if (!dc || !port) + return; + + if (mutex_lock_interruptible(&port->tty_sem)) + return; + + if (!port->tty_open_count) + goto exit; + + dc->open_ttys--; + port->tty_open_count--; + + if (port->tty_open_count == 0) { + DBG1("close: %d", port->token_dl); + spin_lock_irqsave(&dc->spin_mutex, flags); + dc->last_ier &= ~(port->token_dl); + writew(dc->last_ier, dc->reg_ier); + spin_unlock_irqrestore(&dc->spin_mutex, flags); + } + +exit: + mutex_unlock(&port->tty_sem); +} + +/* + * called when the userspace process writes to the tty (/dev/noz*). + * Data is inserted into a fifo, which is then read and transfered to the modem. + */ +static int ntty_write(struct tty_struct *tty, const unsigned char *buffer, + int count) +{ + int rval = -EINVAL; + struct nozomi *dc = get_dc_by_tty(tty); + struct port *port = tty->driver_data; + unsigned long flags; + + /* DBG1( "WRITEx: %d, index = %d", count, index); */ + + if (!dc || !port) + return -ENODEV; + + if (unlikely(!mutex_trylock(&port->tty_sem))) { + /* + * must test lock as tty layer wraps calls + * to this function with BKL + */ + dev_err(&dc->pdev->dev, "Would have deadlocked - " + "return EAGAIN\n"); + return -EAGAIN; + } + + if (unlikely(!port->tty_open_count)) { + DBG1(" "); + goto exit; + } + + rval = __kfifo_put(port->fifo_ul, (unsigned char *)buffer, count); + + /* notify card */ + if (unlikely(dc == NULL)) { + DBG1("No device context?"); + goto exit; + } + + spin_lock_irqsave(&dc->spin_mutex, flags); + /* CTS is only valid on the modem channel */ + if (port == &(dc->port[PORT_MDM])) { + if (port->ctrl_dl.CTS) { + DBG4("Enable interrupt"); + enable_transmit_ul(tty->index % MAX_PORT, dc); + } else { + dev_err(&dc->pdev->dev, + "CTS not active on modem port?\n"); + } + } else { + enable_transmit_ul(tty->index % MAX_PORT, dc); + } + spin_unlock_irqrestore(&dc->spin_mutex, flags); + +exit: + mutex_unlock(&port->tty_sem); + return rval; +} + +/* + * Calculate how much is left in device + * This method is called by the upper tty layer. + * #according to sources N_TTY.c it expects a value >= 0 and + * does not check for negative values. + */ +static int ntty_write_room(struct tty_struct *tty) +{ + struct port *port = tty->driver_data; + int room = 0; + struct nozomi *dc = get_dc_by_tty(tty); + + if (!dc || !port) + return 0; + if (!mutex_trylock(&port->tty_sem)) + return 0; + + if (!port->tty_open_count) + goto exit; + + room = port->fifo_ul->size - __kfifo_len(port->fifo_ul); + +exit: + mutex_unlock(&port->tty_sem); + return room; +} + +/* Gets io control parameters */ +static int ntty_tiocmget(struct tty_struct *tty, struct file *file) +{ + struct port *port = tty->driver_data; + struct ctrl_dl *ctrl_dl = &port->ctrl_dl; + struct ctrl_ul *ctrl_ul = &port->ctrl_ul; + + return (ctrl_ul->RTS ? TIOCM_RTS : 0) | + (ctrl_ul->DTR ? TIOCM_DTR : 0) | + (ctrl_dl->DCD ? TIOCM_CAR : 0) | + (ctrl_dl->RI ? TIOCM_RNG : 0) | + (ctrl_dl->DSR ? TIOCM_DSR : 0) | + (ctrl_dl->CTS ? TIOCM_CTS : 0); +} + +/* Sets io controls parameters */ +static int ntty_tiocmset(struct tty_struct *tty, struct file *file, + unsigned int set, unsigned int clear) +{ + if (set & TIOCM_RTS) + set_rts(tty, 1); + else if (clear & TIOCM_RTS) + set_rts(tty, 0); + + if (set & TIOCM_DTR) + set_dtr(tty, 1); + else if (clear & TIOCM_DTR) + set_dtr(tty, 0); + + return 0; +} + +static int ntty_cflags_changed(struct port *port, unsigned long flags, + struct async_icount *cprev) +{ + struct async_icount cnow = port->tty_icount; + int ret; + + ret = ((flags & TIOCM_RNG) && (cnow.rng != cprev->rng)) || + ((flags & TIOCM_DSR) && (cnow.dsr != cprev->dsr)) || + ((flags & TIOCM_CD) && (cnow.dcd != cprev->dcd)) || + ((flags & TIOCM_CTS) && (cnow.cts != cprev->cts)); + + *cprev = cnow; + + return ret; +} + +static int ntty_ioctl_tiocgicount(struct port *port, void __user *argp) +{ + struct async_icount cnow = port->tty_icount; + struct serial_icounter_struct icount; + + icount.cts = cnow.cts; + icount.dsr = cnow.dsr; + icount.rng = cnow.rng; + icount.dcd = cnow.dcd; + icount.rx = cnow.rx; + icount.tx = cnow.tx; + icount.frame = cnow.frame; + icount.overrun = cnow.overrun; + icount.parity = cnow.parity; + icount.brk = cnow.brk; + icount.buf_overrun = cnow.buf_overrun; + + return copy_to_user(argp, &icount, sizeof(icount)); +} + +static int ntty_ioctl(struct tty_struct *tty, struct file *file, + unsigned int cmd, unsigned long arg) +{ + struct port *port = tty->driver_data; + void __user *argp = (void __user *)arg; + int rval = -ENOIOCTLCMD; + + DBG1("******** IOCTL, cmd: %d", cmd); + + switch (cmd) { + case TIOCMIWAIT: { + struct async_icount cprev = port->tty_icount; + + rval = wait_event_interruptible(port->tty_wait, + ntty_cflags_changed(port, arg, &cprev)); + break; + } case TIOCGICOUNT: + rval = ntty_ioctl_tiocgicount(port, argp); + break; + default: + DBG1("ERR: 0x%08X, %d", cmd, cmd); + break; + }; + + return rval; +} + +/* + * Called by the upper tty layer when tty buffers are ready + * to receive data again after a call to throttle. + */ +static void ntty_unthrottle(struct tty_struct *tty) +{ + struct nozomi *dc = get_dc_by_tty(tty); + unsigned long flags; + + DBG1("UNTHROTTLE"); + spin_lock_irqsave(&dc->spin_mutex, flags); + enable_transmit_dl(tty->index % MAX_PORT, dc); + set_rts(tty, 1); + + spin_unlock_irqrestore(&dc->spin_mutex, flags); +} + +/* + * Called by the upper tty layer when the tty buffers are almost full. + * The driver should stop send more data. + */ +static void ntty_throttle(struct tty_struct *tty) +{ + struct nozomi *dc = get_dc_by_tty(tty); + unsigned long flags; + + DBG1("THROTTLE"); + spin_lock_irqsave(&dc->spin_mutex, flags); + set_rts(tty, 0); + spin_unlock_irqrestore(&dc->spin_mutex, flags); +} + +/* just to discard single character writes */ +static void ntty_put_char(struct tty_struct *tty, unsigned char c) +{ + /* FIXME !!! */ + DBG2("PUT CHAR Function: %c", c); +} + +/* Returns number of chars in buffer, called by tty layer */ +static s32 ntty_chars_in_buffer(struct tty_struct *tty) +{ + struct port *port = tty->driver_data; + struct nozomi *dc = get_dc_by_tty(tty); + s32 rval; + + if (unlikely(!dc || !port)) { + rval = -ENODEV; + goto exit_in_buffer; + } + + if (unlikely(!port->tty_open_count)) { + dev_err(&dc->pdev->dev, "No tty open?\n"); + rval = -ENODEV; + goto exit_in_buffer; + } + + rval = __kfifo_len(port->fifo_ul); + +exit_in_buffer: + return rval; +} + +static struct tty_operations tty_ops = { + .ioctl = ntty_ioctl, + .open = ntty_open, + .close = ntty_close, + .write = ntty_write, + .write_room = ntty_write_room, + .unthrottle = ntty_unthrottle, + .throttle = ntty_throttle, + .chars_in_buffer = ntty_chars_in_buffer, + .put_char = ntty_put_char, + .tiocmget = ntty_tiocmget, + .tiocmset = ntty_tiocmset, +}; + +/* Module initialization */ +static struct pci_driver nozomi_driver = { + .name = NOZOMI_NAME, + .id_table = nozomi_pci_tbl, + .probe = nozomi_card_init, + .remove = __devexit_p(nozomi_card_exit), +}; + +static __init int nozomi_init(void) +{ + int ret; + + printk(KERN_INFO "Initializing %s\n", VERSION_STRING); + + ntty_driver = alloc_tty_driver(NTTY_TTY_MAXMINORS); + if (!ntty_driver) + return -ENOMEM; + + ntty_driver->owner = THIS_MODULE; + ntty_driver->driver_name = NOZOMI_NAME_TTY; + ntty_driver->name = "noz"; + ntty_driver->major = 0; + ntty_driver->type = TTY_DRIVER_TYPE_SERIAL; + ntty_driver->subtype = SERIAL_TYPE_NORMAL; + ntty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV; + ntty_driver->init_termios = tty_std_termios; + ntty_driver->init_termios.c_cflag = B115200 | CS8 | CREAD | \ + HUPCL | CLOCAL; + ntty_driver->init_termios.c_ispeed = 115200; + ntty_driver->init_termios.c_ospeed = 115200; + tty_set_operations(ntty_driver, &tty_ops); + + ret = tty_register_driver(ntty_driver); + if (ret) { + printk(KERN_ERR "Nozomi: failed to register ntty driver\n"); + goto free_tty; + } + + ret = pci_register_driver(&nozomi_driver); + if (ret) { + printk(KERN_ERR "Nozomi: can't register pci driver\n"); + goto unr_tty; + } + + return 0; +unr_tty: + tty_unregister_driver(ntty_driver); +free_tty: + put_tty_driver(ntty_driver); + return ret; +} + +static __exit void nozomi_exit(void) +{ + printk(KERN_INFO "Unloading %s\n", DRIVER_DESC); + pci_unregister_driver(&nozomi_driver); + tty_unregister_driver(ntty_driver); + put_tty_driver(ntty_driver); +} + +module_init(nozomi_init); +module_exit(nozomi_exit); + +module_param(debug, int, S_IRUGO | S_IWUSR); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_DESCRIPTION(DRIVER_DESC); |