summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-07-28 21:14:43 +0200
committerIngo Molnar <mingo@elte.hu>2008-07-28 21:14:43 +0200
commit414f746d232d41ed6ae8632c4495ae795373c44b (patch)
tree167f9bc8f139c6e82e6732b38c7a938b8a9d31cd /drivers
parent5a7a201c51c324876d00a54e7208af6af12d1ca4 (diff)
parentc9272c4f9fbe2087beb3392f526dc5b19efaa56b (diff)
downloadblackbird-op-linux-414f746d232d41ed6ae8632c4495ae795373c44b.tar.gz
blackbird-op-linux-414f746d232d41ed6ae8632c4495ae795373c44b.zip
Merge branch 'linus' into cpus4096
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/processor_idle.c6
-rw-r--r--drivers/ata/pata_ixp4xx_cf.c2
-rw-r--r--drivers/base/core.c6
-rw-r--r--drivers/base/isa.c4
-rw-r--r--drivers/base/memory.c3
-rw-r--r--drivers/base/sys.c12
-rw-r--r--drivers/char/dsp56k.c4
-rw-r--r--drivers/char/nwflash.c6
-rw-r--r--drivers/char/rtc.c5
-rw-r--r--drivers/char/ser_a2232.c52
-rw-r--r--drivers/char/tpm/tpm.c128
-rw-r--r--drivers/char/tpm/tpm_bios.c4
-rw-r--r--drivers/char/tpm/tpm_tis.c1
-rw-r--r--drivers/char/vme_scc.c59
-rw-r--r--drivers/crypto/talitos.c49
-rw-r--r--drivers/firewire/Kconfig9
-rw-r--r--drivers/firewire/fw-card.c2
-rw-r--r--drivers/firewire/fw-cdev.c6
-rw-r--r--drivers/firewire/fw-iso.c2
-rw-r--r--drivers/firewire/fw-ohci.c39
-rw-r--r--drivers/firewire/fw-sbp2.c8
-rw-r--r--drivers/firewire/fw-topology.c2
-rw-r--r--drivers/firewire/fw-transaction.c79
-rw-r--r--drivers/firmware/memmap.c6
-rw-r--r--drivers/gpu/drm/drm_drv.c2
-rw-r--r--drivers/infiniband/core/ucm.c10
-rw-r--r--drivers/infiniband/core/ucma.c11
-rw-r--r--drivers/infiniband/hw/ipath/ipath_sdma.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_user_sdma.c6
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c1
-rw-r--r--drivers/infiniband/hw/mlx4/main.c1
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h1
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c1
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c1
-rw-r--r--drivers/infiniband/hw/mlx4/srq.c1
-rw-r--r--drivers/infiniband/hw/mlx4/user.h1
-rw-r--r--drivers/infiniband/hw/mthca/mthca_eq.c2
-rw-r--r--drivers/infiniband/hw/nes/nes.c4
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c2034
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.h23
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c9
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c15
-rw-r--r--drivers/infiniband/ulp/ipoib/Kconfig22
-rw-r--r--drivers/input/touchscreen/corgi_ts.c8
-rw-r--r--drivers/input/touchscreen/mainstone-wm97xx.c2
-rw-r--r--drivers/isdn/Kconfig4
-rw-r--r--drivers/isdn/Makefile1
-rw-r--r--drivers/isdn/hardware/Makefile1
-rw-r--r--drivers/isdn/hardware/mISDN/Kconfig26
-rw-r--r--drivers/isdn/hardware/mISDN/Makefile7
-rw-r--r--drivers/isdn/hardware/mISDN/hfc_multi.h1204
-rw-r--r--drivers/isdn/hardware/mISDN/hfc_pci.h228
-rw-r--r--drivers/isdn/hardware/mISDN/hfcmulti.c5320
-rw-r--r--drivers/isdn/hardware/mISDN/hfcpci.c2256
-rw-r--r--drivers/isdn/hisax/hisax_fcpcipnp.c2
-rw-r--r--drivers/isdn/mISDN/Kconfig44
-rw-r--r--drivers/isdn/mISDN/Makefile13
-rw-r--r--drivers/isdn/mISDN/core.c244
-rw-r--r--drivers/isdn/mISDN/core.h77
-rw-r--r--drivers/isdn/mISDN/dsp.h263
-rw-r--r--drivers/isdn/mISDN/dsp_audio.c434
-rw-r--r--drivers/isdn/mISDN/dsp_biquad.h65
-rw-r--r--drivers/isdn/mISDN/dsp_blowfish.c672
-rw-r--r--drivers/isdn/mISDN/dsp_cmx.c1886
-rw-r--r--drivers/isdn/mISDN/dsp_core.c1191
-rw-r--r--drivers/isdn/mISDN/dsp_dtmf.c303
-rw-r--r--drivers/isdn/mISDN/dsp_ecdis.h110
-rw-r--r--drivers/isdn/mISDN/dsp_hwec.c138
-rw-r--r--drivers/isdn/mISDN/dsp_hwec.h10
-rw-r--r--drivers/isdn/mISDN/dsp_pipeline.c348
-rw-r--r--drivers/isdn/mISDN/dsp_tones.c551
-rw-r--r--drivers/isdn/mISDN/fsm.c183
-rw-r--r--drivers/isdn/mISDN/fsm.h67
-rw-r--r--drivers/isdn/mISDN/hwchannel.c365
-rw-r--r--drivers/isdn/mISDN/l1oip.h91
-rw-r--r--drivers/isdn/mISDN/l1oip_codec.c374
-rw-r--r--drivers/isdn/mISDN/l1oip_core.c1518
-rw-r--r--drivers/isdn/mISDN/layer1.c403
-rw-r--r--drivers/isdn/mISDN/layer1.h26
-rw-r--r--drivers/isdn/mISDN/layer2.c2216
-rw-r--r--drivers/isdn/mISDN/layer2.h140
-rw-r--r--drivers/isdn/mISDN/socket.c781
-rw-r--r--drivers/isdn/mISDN/stack.c674
-rw-r--r--drivers/isdn/mISDN/tei.c1340
-rw-r--r--drivers/isdn/mISDN/timerdev.c301
-rw-r--r--drivers/md/dm-mpath.c13
-rw-r--r--drivers/media/dvb/pluto2/pluto2.c2
-rw-r--r--drivers/memstick/core/memstick.c36
-rw-r--r--drivers/memstick/core/mspro_block.c365
-rw-r--r--drivers/memstick/host/jmb38x_ms.c102
-rw-r--r--drivers/memstick/host/tifm_ms.c66
-rw-r--r--drivers/message/fusion/lsi/mpi_history.txt6
-rw-r--r--drivers/message/fusion/mptbase.c24
-rw-r--r--drivers/message/fusion/mptctl.c4
-rw-r--r--drivers/message/fusion/mptfc.c8
-rw-r--r--drivers/message/fusion/mptlan.c26
-rw-r--r--drivers/message/fusion/mptsas.c54
-rw-r--r--drivers/message/fusion/mptscsih.c4
-rw-r--r--drivers/mfd/asic3.c22
-rw-r--r--drivers/mfd/tc6393xb.c2
-rw-r--r--drivers/misc/Kconfig2
-rw-r--r--drivers/misc/atmel-ssc.c1
-rw-r--r--drivers/mmc/core/Makefile1
-rw-r--r--drivers/mmc/core/bus.c8
-rw-r--r--drivers/mmc/core/core.h7
-rw-r--r--drivers/mmc/core/debugfs.c225
-rw-r--r--drivers/mmc/core/host.c8
-rw-r--r--drivers/mmc/host/atmel-mci-regs.h2
-rw-r--r--drivers/mmc/host/atmel-mci.c206
-rw-r--r--drivers/mmc/host/imxmmc.c50
-rw-r--r--drivers/mmc/host/mmc_spi.c3
-rw-r--r--drivers/mmc/host/sdhci.c4
-rw-r--r--drivers/mmc/host/sdhci.h2
-rw-r--r--drivers/mtd/Kconfig2
-rw-r--r--drivers/mtd/afs.c2
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c17
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c3
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0020.c2
-rw-r--r--drivers/mtd/chips/cfi_probe.c1
-rw-r--r--drivers/mtd/chips/cfi_util.c3
-rw-r--r--drivers/mtd/chips/chipreg.c2
-rw-r--r--drivers/mtd/chips/gen_probe.c5
-rw-r--r--drivers/mtd/chips/jedec_probe.c133
-rw-r--r--drivers/mtd/chips/map_absent.c1
-rw-r--r--drivers/mtd/chips/map_ram.c1
-rw-r--r--drivers/mtd/chips/map_rom.c1
-rw-r--r--drivers/mtd/cmdlinepart.c4
-rw-r--r--drivers/mtd/devices/Kconfig1
-rw-r--r--drivers/mtd/devices/Makefile1
-rw-r--r--drivers/mtd/devices/block2mtd.c6
-rw-r--r--drivers/mtd/devices/doc2000.c2
-rw-r--r--drivers/mtd/devices/doc2001.c2
-rw-r--r--drivers/mtd/devices/doc2001plus.c2
-rw-r--r--drivers/mtd/devices/docecc.c2
-rw-r--r--drivers/mtd/devices/docprobe.c5
-rw-r--r--drivers/mtd/devices/lart.c2
-rw-r--r--drivers/mtd/devices/m25p80.c22
-rw-r--r--drivers/mtd/devices/ms02-nv.c2
-rw-r--r--drivers/mtd/devices/ms02-nv.h2
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c135
-rw-r--r--drivers/mtd/devices/mtdram.c1
-rw-r--r--drivers/mtd/devices/phram.c2
-rw-r--r--drivers/mtd/devices/pmc551.c2
-rw-r--r--drivers/mtd/devices/slram.c2
-rw-r--r--drivers/mtd/ftl.c3
-rw-r--r--drivers/mtd/inftlcore.c5
-rw-r--r--drivers/mtd/inftlmount.c4
-rw-r--r--drivers/mtd/maps/Kconfig30
-rw-r--r--drivers/mtd/maps/Makefile3
-rw-r--r--drivers/mtd/maps/amd76xrom.c1
-rw-r--r--drivers/mtd/maps/autcpu12-nvram.c2
-rw-r--r--drivers/mtd/maps/bast-flash.c226
-rw-r--r--drivers/mtd/maps/bfin-async-flash.c219
-rw-r--r--drivers/mtd/maps/cdb89712.c1
-rw-r--r--drivers/mtd/maps/ceiva.c1
-rw-r--r--drivers/mtd/maps/cfi_flagadm.c2
-rw-r--r--drivers/mtd/maps/dbox2-flash.c2
-rw-r--r--drivers/mtd/maps/dc21285.c2
-rw-r--r--drivers/mtd/maps/dilnetpc.c2
-rw-r--r--drivers/mtd/maps/dmv182.c2
-rw-r--r--drivers/mtd/maps/ebony.c2
-rw-r--r--drivers/mtd/maps/edb7312.c2
-rw-r--r--drivers/mtd/maps/fortunet.c1
-rw-r--r--drivers/mtd/maps/h720x-flash.c2
-rw-r--r--drivers/mtd/maps/ichxrom.c1
-rw-r--r--drivers/mtd/maps/impa7.c2
-rw-r--r--drivers/mtd/maps/integrator-flash.c2
-rw-r--r--drivers/mtd/maps/ipaq-flash.c2
-rw-r--r--drivers/mtd/maps/ixp2000.c2
-rw-r--r--drivers/mtd/maps/ixp4xx.c2
-rw-r--r--drivers/mtd/maps/l440gx.c2
-rw-r--r--drivers/mtd/maps/map_funcs.c2
-rw-r--r--drivers/mtd/maps/mbx860.c2
-rw-r--r--drivers/mtd/maps/netsc520.c2
-rw-r--r--drivers/mtd/maps/nettel.c2
-rw-r--r--drivers/mtd/maps/octagon-5066.c1
-rw-r--r--drivers/mtd/maps/omap-toto-flash.c2
-rw-r--r--drivers/mtd/maps/pci.c2
-rw-r--r--drivers/mtd/maps/pcmciamtd.c5
-rw-r--r--drivers/mtd/maps/physmap.c24
-rw-r--r--drivers/mtd/maps/plat-ram.c2
-rw-r--r--drivers/mtd/maps/redwood.c2
-rw-r--r--drivers/mtd/maps/rpxlite.c2
-rw-r--r--drivers/mtd/maps/sa1100-flash.c2
-rw-r--r--drivers/mtd/maps/sbc8240.c3
-rw-r--r--drivers/mtd/maps/sbc_gxx.c2
-rw-r--r--drivers/mtd/maps/sc520cdp.c2
-rw-r--r--drivers/mtd/maps/scb2_flash.c1
-rw-r--r--drivers/mtd/maps/scx200_docflash.c2
-rw-r--r--drivers/mtd/maps/sharpsl-flash.c2
-rw-r--r--drivers/mtd/maps/solutionengine.c2
-rw-r--r--drivers/mtd/maps/sun_uflash.c2
-rw-r--r--drivers/mtd/maps/tqm8xxl.c2
-rw-r--r--drivers/mtd/maps/ts5500_flash.c2
-rw-r--r--drivers/mtd/maps/tsunami_flash.c1
-rw-r--r--drivers/mtd/maps/uclinux.c2
-rw-r--r--drivers/mtd/maps/vmax301.c1
-rw-r--r--drivers/mtd/maps/walnut.c2
-rw-r--r--drivers/mtd/maps/wr_sbc82xx_flash.c2
-rw-r--r--drivers/mtd/mtd_blkdevs.c34
-rw-r--r--drivers/mtd/mtdblock.c2
-rw-r--r--drivers/mtd/mtdblock_ro.c2
-rw-r--r--drivers/mtd/mtdchar.c24
-rw-r--r--drivers/mtd/mtdconcat.c2
-rw-r--r--drivers/mtd/mtdcore.c14
-rw-r--r--drivers/mtd/mtdpart.c448
-rw-r--r--drivers/mtd/nand/Kconfig28
-rw-r--r--drivers/mtd/nand/Makefile3
-rw-r--r--drivers/mtd/nand/atmel_nand.c (renamed from drivers/mtd/nand/at91_nand.c)278
-rw-r--r--drivers/mtd/nand/atmel_nand_ecc.h36
-rw-r--r--drivers/mtd/nand/au1550nd.c4
-rw-r--r--drivers/mtd/nand/autcpu12.c2
-rw-r--r--drivers/mtd/nand/cafe_nand.c6
-rw-r--r--drivers/mtd/nand/diskonchip.c4
-rw-r--r--drivers/mtd/nand/edb7312.c2
-rw-r--r--drivers/mtd/nand/excite_nandflash.c2
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c63
-rw-r--r--drivers/mtd/nand/h1910.c2
-rw-r--r--drivers/mtd/nand/nand_base.c87
-rw-r--r--drivers/mtd/nand/nand_bbt.c2
-rw-r--r--drivers/mtd/nand/nand_ecc.c2
-rw-r--r--drivers/mtd/nand/nand_ids.c2
-rw-r--r--drivers/mtd/nand/nandsim.c41
-rw-r--r--drivers/mtd/nand/ppchameleonevb.c2
-rw-r--r--drivers/mtd/nand/rtc_from4.c2
-rw-r--r--drivers/mtd/nand/s3c2410.c168
-rw-r--r--drivers/mtd/nand/sharpsl.c2
-rw-r--r--drivers/mtd/nand/spia.c2
-rw-r--r--drivers/mtd/nand/toto.c2
-rw-r--r--drivers/mtd/nand/ts7250.c2
-rw-r--r--drivers/mtd/nftlcore.c5
-rw-r--r--drivers/mtd/nftlmount.c4
-rw-r--r--drivers/mtd/onenand/onenand_base.c54
-rw-r--r--drivers/mtd/redboot.c2
-rw-r--r--drivers/mtd/rfd_ftl.c2
-rw-r--r--drivers/net/arm/ep93xx_eth.c4
-rw-r--r--drivers/net/bnx2x_main.c14
-rw-r--r--drivers/net/cassini.c12
-rw-r--r--drivers/net/cxgb3/sge.c2
-rw-r--r--drivers/net/e100.c2
-rw-r--r--drivers/net/e1000e/ethtool.c4
-rw-r--r--drivers/net/e1000e/netdev.c11
-rw-r--r--drivers/net/ibmveth.c38
-rw-r--r--drivers/net/iseries_veth.c4
-rw-r--r--drivers/net/mlx4/alloc.c1
-rw-r--r--drivers/net/mlx4/catas.c1
-rw-r--r--drivers/net/mlx4/cmd.c2
-rw-r--r--drivers/net/mlx4/cq.c2
-rw-r--r--drivers/net/mlx4/eq.c4
-rw-r--r--drivers/net/mlx4/fw.c2
-rw-r--r--drivers/net/mlx4/fw.h2
-rw-r--r--drivers/net/mlx4/icm.c2
-rw-r--r--drivers/net/mlx4/icm.h2
-rw-r--r--drivers/net/mlx4/intf.c1
-rw-r--r--drivers/net/mlx4/main.c2
-rw-r--r--drivers/net/mlx4/mcg.c1
-rw-r--r--drivers/net/mlx4/mlx4.h2
-rw-r--r--drivers/net/mlx4/mr.c2
-rw-r--r--drivers/net/mlx4/qp.c2
-rw-r--r--drivers/net/mlx4/reset.c1
-rw-r--r--drivers/net/mlx4/srq.c1
-rw-r--r--drivers/net/pasemi_mac.c6
-rw-r--r--drivers/net/ppp_generic.c6
-rw-r--r--drivers/net/qla3xxx.c12
-rw-r--r--drivers/net/s2io.c48
-rw-r--r--drivers/net/sfc/rx.c4
-rw-r--r--drivers/net/sfc/tx.c7
-rw-r--r--drivers/net/spider_net.c4
-rw-r--r--drivers/net/tc35815.c4
-rw-r--r--drivers/net/wireless/ath5k/base.c4
-rw-r--r--drivers/parport/ieee1284.c2
-rw-r--r--drivers/parport/parport_cs.c2
-rw-r--r--drivers/parport/parport_pc.c2
-rw-r--r--drivers/parport/procfs.c3
-rw-r--r--drivers/pcmcia/soc_common.c12
-rw-r--r--drivers/pnp/base.h1
-rw-r--r--drivers/pnp/card.c6
-rw-r--r--drivers/pnp/quirks.c13
-rw-r--r--drivers/s390/kvm/Makefile2
-rw-r--r--drivers/s390/net/qeth_core_main.c14
-rw-r--r--drivers/s390/net/qeth_l2_main.c26
-rw-r--r--drivers/s390/net/qeth_l3_main.c30
-rw-r--r--drivers/scsi/3w-9xxx.c40
-rw-r--r--drivers/scsi/3w-9xxx.h9
-rw-r--r--drivers/scsi/Kconfig1
-rw-r--r--drivers/scsi/Makefile2
-rw-r--r--drivers/scsi/advansys.c2
-rw-r--r--drivers/scsi/aha152x.c12
-rw-r--r--drivers/scsi/aic94xx/aic94xx.h4
-rw-r--r--drivers/scsi/aic94xx/aic94xx_hwi.c2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_scb.c46
-rw-r--r--drivers/scsi/aic94xx/aic94xx_task.c2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_tmf.c18
-rw-r--r--drivers/scsi/arm/fas216.c4
-rw-r--r--drivers/scsi/ch.c1
-rw-r--r--drivers/scsi/device_handler/Kconfig8
-rw-r--r--drivers/scsi/device_handler/Makefile1
-rw-r--r--drivers/scsi/device_handler/scsi_dh.c446
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c802
-rw-r--r--drivers/scsi/device_handler/scsi_dh_emc.c644
-rw-r--r--drivers/scsi/device_handler/scsi_dh_hp_sw.c348
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c262
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c208
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.h44
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c4
-rw-r--r--drivers/scsi/ibmvscsi/ibmvstgt.c4
-rw-r--r--drivers/scsi/ibmvscsi/rpa_vscsi.c2
-rw-r--r--drivers/scsi/imm.c2
-rw-r--r--drivers/scsi/ipr.h6
-rw-r--r--drivers/scsi/libsas/sas_ata.c16
-rw-r--r--drivers/scsi/libsas/sas_expander.c12
-rw-r--r--drivers/scsi/libsas/sas_port.c4
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c30
-rw-r--r--drivers/scsi/libsrp.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c6
-rw-r--r--drivers/scsi/megaraid/mega_common.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c16
-rw-r--r--drivers/scsi/megaraid/megaraid_mm.c4
-rw-r--r--drivers/scsi/nsp32.c4
-rw-r--r--drivers/scsi/nsp32_debug.c2
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.c4
-rw-r--r--drivers/scsi/pcmcia/nsp_debug.c2
-rw-r--r--drivers/scsi/ppa.c2
-rw-r--r--drivers/scsi/qla1280.c12
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c118
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h12
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h5
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c133
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c14
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c9
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c16
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c94
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c48
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c4
-rw-r--r--drivers/scsi/scsi.c55
-rw-r--r--drivers/scsi/scsi_debug.c12
-rw-r--r--drivers/scsi/scsi_devinfo.c6
-rw-r--r--drivers/scsi/scsi_error.c34
-rw-r--r--drivers/scsi/scsi_lib.c55
-rw-r--r--drivers/scsi/scsi_netlink.c8
-rw-r--r--drivers/scsi/scsi_priv.h1
-rw-r--r--drivers/scsi/scsi_proc.c4
-rw-r--r--drivers/scsi/scsi_scan.c13
-rw-r--r--drivers/scsi/scsi_sysfs.c4
-rw-r--r--drivers/scsi/scsi_tgt_priv.h2
-rw-r--r--drivers/scsi/scsi_transport_fc.c12
-rw-r--r--drivers/scsi/scsi_transport_sas.c4
-rw-r--r--drivers/scsi/sd.c291
-rw-r--r--drivers/scsi/sd.h54
-rw-r--r--drivers/scsi/sd_dif.c538
-rw-r--r--drivers/scsi/st.c11
-rw-r--r--drivers/scsi/stex.c2
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_hipd.c2
-rw-r--r--drivers/scsi/tmscsim.c8
-rw-r--r--drivers/scsi/wd7000.c8
-rw-r--r--drivers/scsi/zalon.c8
-rw-r--r--drivers/spi/atmel_spi.c4
-rw-r--r--drivers/spi/au1550_spi.c6
-rw-r--r--drivers/spi/omap2_mcspi.c4
-rw-r--r--drivers/spi/pxa2xx_spi.c4
-rw-r--r--drivers/spi/spi_imx.c6
-rw-r--r--drivers/usb/mon/mon_text.c4
-rw-r--r--drivers/usb/serial/ipaq.c10
-rw-r--r--drivers/video/am200epd.c2
-rw-r--r--drivers/video/console/sticon.c2
-rw-r--r--drivers/video/console/sticore.c33
-rw-r--r--drivers/video/fbmem.c1
-rw-r--r--drivers/video/macfb.c2
-rw-r--r--drivers/video/omap/sossi.c2
-rw-r--r--drivers/video/pxafb.c2
-rw-r--r--drivers/video/sticore.h2
-rw-r--r--drivers/video/stifb.c6
378 files changed, 31470 insertions, 3685 deletions
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index d592dbb1d12a..b7f2963693a7 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -272,6 +272,8 @@ static atomic_t c3_cpu_count;
/* Common C-state entry for C2, C3, .. */
static void acpi_cstate_enter(struct acpi_processor_cx *cstate)
{
+ /* Don't trace irqs off for idle */
+ stop_critical_timings();
if (cstate->entry_method == ACPI_CSTATE_FFH) {
/* Call into architectural FFH based C-state */
acpi_processor_ffh_cstate_enter(cstate);
@@ -284,6 +286,7 @@ static void acpi_cstate_enter(struct acpi_processor_cx *cstate)
gets asserted in time to freeze execution properly. */
unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
}
+ start_critical_timings();
}
#endif /* !CONFIG_CPU_IDLE */
@@ -1418,6 +1421,8 @@ static inline void acpi_idle_update_bm_rld(struct acpi_processor *pr,
*/
static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
{
+ /* Don't trace irqs off for idle */
+ stop_critical_timings();
if (cx->entry_method == ACPI_CSTATE_FFH) {
/* Call into architectural FFH based C-state */
acpi_processor_ffh_cstate_enter(cx);
@@ -1432,6 +1437,7 @@ static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
gets asserted in time to freeze execution properly. */
unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
}
+ start_critical_timings();
}
/**
diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
index de8d186f5abf..2014253f6c88 100644
--- a/drivers/ata/pata_ixp4xx_cf.c
+++ b/drivers/ata/pata_ixp4xx_cf.c
@@ -169,7 +169,7 @@ static __devinit int ixp4xx_pata_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq)
- set_irq_type(irq, IRQT_RISING);
+ set_irq_type(irq, IRQ_TYPE_EDGE_RISING);
/* Setup expansion bus chip selects */
*data->cs0_cfg = data->cs0_bits;
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 7d5c63c81a59..068aa1c9538c 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -116,12 +116,10 @@ static void device_release(struct kobject *kobj)
dev->type->release(dev);
else if (dev->class && dev->class->dev_release)
dev->class->dev_release(dev);
- else {
- printk(KERN_ERR "Device '%s' does not have a release() "
+ else
+ WARN(1, KERN_ERR "Device '%s' does not have a release() "
"function, it is broken and must be fixed.\n",
dev->bus_id);
- WARN_ON(1);
- }
}
static struct kobj_type device_ktype = {
diff --git a/drivers/base/isa.c b/drivers/base/isa.c
index d2222397a401..efd577574948 100644
--- a/drivers/base/isa.c
+++ b/drivers/base/isa.c
@@ -7,6 +7,7 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/init.h>
+#include <linux/dma-mapping.h>
#include <linux/isa.h>
static struct device isa_bus = {
@@ -141,6 +142,9 @@ int isa_register_driver(struct isa_driver *isa_driver, unsigned int ndev)
isa_dev->dev.release = isa_dev_release;
isa_dev->id = id;
+ isa_dev->dev.coherent_dma_mask = DMA_24BIT_MASK;
+ isa_dev->dev.dma_mask = &isa_dev->dev.coherent_dma_mask;
+
error = device_register(&isa_dev->dev);
if (error) {
put_device(&isa_dev->dev);
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 855ed1a9f97b..3ad49a00029f 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -204,9 +204,8 @@ memory_block_action(struct memory_block *mem, unsigned long action)
}
break;
default:
- printk(KERN_WARNING "%s(%p, %ld) unknown action: %ld\n",
+ WARN(1, KERN_WARNING "%s(%p, %ld) unknown action: %ld\n",
__func__, mem, action, action);
- WARN_ON(1);
ret = -EINVAL;
}
diff --git a/drivers/base/sys.c b/drivers/base/sys.c
index 40fc14f03540..75dd6e22faff 100644
--- a/drivers/base/sys.c
+++ b/drivers/base/sys.c
@@ -168,19 +168,16 @@ int sysdev_driver_register(struct sysdev_class *cls, struct sysdev_driver *drv)
int err = 0;
if (!cls) {
- printk(KERN_WARNING "sysdev: invalid class passed to "
+ WARN(1, KERN_WARNING "sysdev: invalid class passed to "
"sysdev_driver_register!\n");
- WARN_ON(1);
return -EINVAL;
}
/* Check whether this driver has already been added to a class. */
- if (drv->entry.next && !list_empty(&drv->entry)) {
- printk(KERN_WARNING "sysdev: class %s: driver (%p) has already"
+ if (drv->entry.next && !list_empty(&drv->entry))
+ WARN(1, KERN_WARNING "sysdev: class %s: driver (%p) has already"
" been registered to a class, something is wrong, but "
"will forge on!\n", cls->name, drv);
- WARN_ON(1);
- }
mutex_lock(&sysdev_drivers_lock);
if (cls && kset_get(&cls->kset)) {
@@ -194,8 +191,7 @@ int sysdev_driver_register(struct sysdev_class *cls, struct sysdev_driver *drv)
}
} else {
err = -EINVAL;
- printk(KERN_ERR "%s: invalid device class\n", __func__);
- WARN_ON(1);
+ WARN(1, KERN_ERR "%s: invalid device class\n", __func__);
}
mutex_unlock(&sysdev_drivers_lock);
return err;
diff --git a/drivers/char/dsp56k.c b/drivers/char/dsp56k.c
index 19b88504e960..ca7c72a486b2 100644
--- a/drivers/char/dsp56k.c
+++ b/drivers/char/dsp56k.c
@@ -304,9 +304,9 @@ static ssize_t dsp56k_write(struct file *file, const char __user *buf, size_t co
}
static long dsp56k_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
+ unsigned long arg)
{
- int dev = iminor(inode) & 0x0f;
+ int dev = iminor(file->f_path.dentry->d_inode) & 0x0f;
void __user *argp = (void __user *)arg;
switch(dev)
diff --git a/drivers/char/nwflash.c b/drivers/char/nwflash.c
index f9f72a211292..006be92ee3f3 100644
--- a/drivers/char/nwflash.c
+++ b/drivers/char/nwflash.c
@@ -125,15 +125,15 @@ static ssize_t flash_read(struct file *file, char __user *buf, size_t size,
ssize_t ret;
if (flashdebug)
- printk(KERN_DEBUG "flash_read: flash_read: offset=0x%lX, "
- "buffer=%p, count=0x%X.\n", p, buf, count);
+ printk(KERN_DEBUG "flash_read: flash_read: offset=0x%llx, "
+ "buffer=%p, count=0x%zx.\n", *ppos, buf, size);
/*
* We now lock against reads and writes. --rmk
*/
if (mutex_lock_interruptible(&nwflash_mutex))
return -ERESTARTSYS;
- ret = simple_read_from_buffer(buf, size, ppos, FLASH_BASE, gbFlashSize);
+ ret = simple_read_from_buffer(buf, size, ppos, (void *)FLASH_BASE, gbFlashSize);
mutex_unlock(&nwflash_mutex);
return ret;
diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c
index dbefbb30ed44..d9799e2bcfbf 100644
--- a/drivers/char/rtc.c
+++ b/drivers/char/rtc.c
@@ -144,6 +144,7 @@ static ssize_t rtc_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos);
static long rtc_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+static void rtc_get_rtc_time(struct rtc_time *rtc_tm);
#ifdef RTC_IRQ
static unsigned int rtc_poll(struct file *file, poll_table *wait);
@@ -235,7 +236,7 @@ static inline unsigned char rtc_is_updating(void)
* (See ./arch/XXXX/kernel/time.c for the set_rtc_mmss() function.)
*/
-irqreturn_t rtc_interrupt(int irq, void *dev_id)
+static irqreturn_t rtc_interrupt(int irq, void *dev_id)
{
/*
* Can be an alarm interrupt, update complete interrupt,
@@ -1303,7 +1304,7 @@ static int rtc_proc_open(struct inode *inode, struct file *file)
}
#endif
-void rtc_get_rtc_time(struct rtc_time *rtc_tm)
+static void rtc_get_rtc_time(struct rtc_time *rtc_tm)
{
unsigned long uip_watchdog = jiffies, flags;
unsigned char ctrl;
diff --git a/drivers/char/ser_a2232.c b/drivers/char/ser_a2232.c
index 4ba3aec9e1cd..7b0c35207d9b 100644
--- a/drivers/char/ser_a2232.c
+++ b/drivers/char/ser_a2232.c
@@ -192,7 +192,7 @@ static inline void a2232_receive_char(struct a2232_port *port, int ch, int err)
Maybe one could implement a more efficient version by not only
transferring one character at a time.
*/
- struct tty_struct *tty = port->gs.tty;
+ struct tty_struct *tty = port->gs.port.tty;
#if 0
switch(err) {
@@ -226,7 +226,7 @@ static void a2232_disable_tx_interrupts(void *ptr)
/* Does this here really have to be? */
local_irq_save(flags);
- port->gs.flags &= ~GS_TX_INTEN;
+ port->gs.port.flags &= ~GS_TX_INTEN;
local_irq_restore(flags);
}
@@ -242,7 +242,7 @@ static void a2232_enable_tx_interrupts(void *ptr)
/* Does this here really have to be? */
local_irq_save(flags);
- port->gs.flags |= GS_TX_INTEN;
+ port->gs.port.flags |= GS_TX_INTEN;
local_irq_restore(flags);
}
@@ -276,9 +276,9 @@ static void a2232_shutdown_port(void *ptr)
local_irq_save(flags);
- port->gs.flags &= ~GS_ACTIVE;
+ port->gs.port.flags &= ~GS_ACTIVE;
- if (port->gs.tty && port->gs.tty->termios->c_cflag & HUPCL) {
+ if (port->gs.port.tty && port->gs.port.tty->termios->c_cflag & HUPCL) {
/* Set DTR and RTS to Low, flush output.
The NetBSD driver "msc.c" does it this way. */
stat->Command = ( (stat->Command & ~A2232CMD_CMask) |
@@ -309,7 +309,7 @@ static int a2232_set_real_termios(void *ptr)
volatile struct a2232status *status;
volatile struct a2232memory *mem;
- if (!port->gs.tty || !port->gs.tty->termios) return 0;
+ if (!port->gs.port.tty || !port->gs.port.tty->termios) return 0;
status = a2232stat(port->which_a2232, port->which_port_on_a2232);
mem = a2232mem(port->which_a2232);
@@ -345,7 +345,7 @@ static int a2232_set_real_termios(void *ptr)
}
a2232_param |= rate;
- cflag = port->gs.tty->termios->c_cflag;
+ cflag = port->gs.port.tty->termios->c_cflag;
// get character size
chsize = cflag & CSIZE;
@@ -382,7 +382,7 @@ static int a2232_set_real_termios(void *ptr)
the conventional way of inserting START/STOP characters
by hand in throttle()/unthrottle().
*/
- softflow = !!( port->gs.tty->termios->c_iflag & IXOFF );
+ softflow = !!( port->gs.port.tty->termios->c_iflag & IXOFF );
// get Parity (Enabled/Disabled? If Enabled, Odd or Even?)
parity = cflag & (PARENB | PARODD);
@@ -400,9 +400,9 @@ static int a2232_set_real_termios(void *ptr)
/* Hmm. Maybe an own a2232_port structure
member would be cleaner? */
if (cflag & CLOCAL)
- port->gs.flags &= ~ASYNC_CHECK_CD;
+ port->gs.port.flags &= ~ASYNC_CHECK_CD;
else
- port->gs.flags |= ASYNC_CHECK_CD;
+ port->gs.port.flags |= ASYNC_CHECK_CD;
/* Now we have all parameters and can go to set them: */
@@ -482,18 +482,18 @@ static int a2232_open(struct tty_struct * tty, struct file * filp)
port = &a2232_ports[line];
tty->driver_data = port;
- port->gs.tty = tty;
- port->gs.count++;
+ port->gs.port.tty = tty;
+ port->gs.port.count++;
retval = gs_init_port(&port->gs);
if (retval) {
- port->gs.count--;
+ port->gs.port.count--;
return retval;
}
- port->gs.flags |= GS_ACTIVE;
+ port->gs.port.flags |= GS_ACTIVE;
retval = gs_block_til_ready(port, filp);
if (retval) {
- port->gs.count--;
+ port->gs.port.count--;
return retval;
}
@@ -522,7 +522,7 @@ int ch, err, n, p;
for (p = 0; p < NUMLINES; p++){ /* for every port on this board */
err = 0;
port = &a2232_ports[n*NUMLINES+p];
- if ( port->gs.flags & GS_ACTIVE ){ /* if the port is used */
+ if ( port->gs.port.flags & GS_ACTIVE ){ /* if the port is used */
status = a2232stat(n,p);
@@ -577,8 +577,8 @@ int ch, err, n, p;
obuf = mem->OutBuf[p];
bufpos = status->OutHead;
while ( (port->gs.xmit_cnt > 0) &&
- (!port->gs.tty->stopped) &&
- (!port->gs.tty->hw_stopped) ){ /* While there are chars to transmit */
+ (!port->gs.port.tty->stopped) &&
+ (!port->gs.port.tty->hw_stopped) ){ /* While there are chars to transmit */
if (((bufpos+1) & A2232_IOBUFLENMASK) != status->OutTail) { /* If the A2232 buffer is not full */
ch = port->gs.xmit_buf[port->gs.xmit_tail]; /* get the next char to transmit */
port->gs.xmit_tail = (port->gs.xmit_tail+1) & (SERIAL_XMIT_SIZE-1); /* modulo-addition for the gs.xmit_buf ring-buffer */
@@ -592,8 +592,8 @@ int ch, err, n, p;
status->OutHead = bufpos;
/* WakeUp if output buffer runs low */
- if ((port->gs.xmit_cnt <= port->gs.wakeup_chars) && port->gs.tty) {
- tty_wakeup(port->gs.tty);
+ if ((port->gs.xmit_cnt <= port->gs.wakeup_chars) && port->gs.port.tty) {
+ tty_wakeup(port->gs.port.tty);
}
} // if the port is used
} // for every port on the board
@@ -613,16 +613,16 @@ int ch, err, n, p;
struct a2232_port *port = &a2232_ports[n*7+p];
port->cd_status = !(ncd & 1); /* ncd&1 <=> CD is now off */
- if (!(port->gs.flags & ASYNC_CHECK_CD))
+ if (!(port->gs.port.flags & ASYNC_CHECK_CD))
; /* Don't report DCD changes */
else if (port->cd_status) { // if DCD on: DCD went UP!
/* Are we blocking in open?*/
- wake_up_interruptible(&port->gs.open_wait);
+ wake_up_interruptible(&port->gs.port.open_wait);
}
else { // if DCD off: DCD went DOWN!
- if (port->gs.tty)
- tty_hangup (port->gs.tty);
+ if (port->gs.port.tty)
+ tty_hangup (port->gs.port.tty);
}
} // if CD changed for this port
@@ -655,8 +655,8 @@ static void a2232_init_portstructs(void)
#ifdef NEW_WRITE_LOCKING
mutex_init(&(port->gs.port_write_mutex));
#endif
- init_waitqueue_head(&port->gs.open_wait);
- init_waitqueue_head(&port->gs.close_wait);
+ init_waitqueue_head(&port->gs.port.open_wait);
+ init_waitqueue_head(&port->gs.port.close_wait);
}
}
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
index e1fc193d9396..ae766d868454 100644
--- a/drivers/char/tpm/tpm.c
+++ b/drivers/char/tpm/tpm.c
@@ -580,91 +580,133 @@ void tpm_continue_selftest(struct tpm_chip *chip)
}
EXPORT_SYMBOL_GPL(tpm_continue_selftest);
+#define TPM_INTERNAL_RESULT_SIZE 200
+
ssize_t tpm_show_enabled(struct device * dev, struct device_attribute * attr,
char *buf)
{
- u8 data[max_t(int, ARRAY_SIZE(tpm_cap), 35)];
+ u8 *data;
ssize_t rc;
struct tpm_chip *chip = dev_get_drvdata(dev);
if (chip == NULL)
return -ENODEV;
+ data = kzalloc(TPM_INTERNAL_RESULT_SIZE, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
memcpy(data, tpm_cap, sizeof(tpm_cap));
data[TPM_CAP_IDX] = TPM_CAP_FLAG;
data[TPM_CAP_SUBCAP_IDX] = TPM_CAP_FLAG_PERM;
- rc = transmit_cmd(chip, data, sizeof(data),
- "attemtping to determine the permanent state");
- if (rc)
+ rc = transmit_cmd(chip, data, TPM_INTERNAL_RESULT_SIZE,
+ "attemtping to determine the permanent enabled state");
+ if (rc) {
+ kfree(data);
return 0;
- return sprintf(buf, "%d\n", !data[TPM_GET_CAP_PERM_DISABLE_IDX]);
+ }
+
+ rc = sprintf(buf, "%d\n", !data[TPM_GET_CAP_PERM_DISABLE_IDX]);
+
+ kfree(data);
+ return rc;
}
EXPORT_SYMBOL_GPL(tpm_show_enabled);
ssize_t tpm_show_active(struct device * dev, struct device_attribute * attr,
char *buf)
{
- u8 data[max_t(int, ARRAY_SIZE(tpm_cap), 35)];
+ u8 *data;
ssize_t rc;
struct tpm_chip *chip = dev_get_drvdata(dev);
if (chip == NULL)
return -ENODEV;
+ data = kzalloc(TPM_INTERNAL_RESULT_SIZE, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
memcpy(data, tpm_cap, sizeof(tpm_cap));
data[TPM_CAP_IDX] = TPM_CAP_FLAG;
data[TPM_CAP_SUBCAP_IDX] = TPM_CAP_FLAG_PERM;
- rc = transmit_cmd(chip, data, sizeof(data),
- "attemtping to determine the permanent state");
- if (rc)
+ rc = transmit_cmd(chip, data, TPM_INTERNAL_RESULT_SIZE,
+ "attemtping to determine the permanent active state");
+ if (rc) {
+ kfree(data);
return 0;
- return sprintf(buf, "%d\n", !data[TPM_GET_CAP_PERM_INACTIVE_IDX]);
+ }
+
+ rc = sprintf(buf, "%d\n", !data[TPM_GET_CAP_PERM_INACTIVE_IDX]);
+
+ kfree(data);
+ return rc;
}
EXPORT_SYMBOL_GPL(tpm_show_active);
ssize_t tpm_show_owned(struct device * dev, struct device_attribute * attr,
char *buf)
{
- u8 data[sizeof(tpm_cap)];
+ u8 *data;
ssize_t rc;
struct tpm_chip *chip = dev_get_drvdata(dev);
if (chip == NULL)
return -ENODEV;
+ data = kzalloc(TPM_INTERNAL_RESULT_SIZE, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
memcpy(data, tpm_cap, sizeof(tpm_cap));
data[TPM_CAP_IDX] = TPM_CAP_PROP;
data[TPM_CAP_SUBCAP_IDX] = TPM_CAP_PROP_OWNER;
- rc = transmit_cmd(chip, data, sizeof(data),
+ rc = transmit_cmd(chip, data, TPM_INTERNAL_RESULT_SIZE,
"attempting to determine the owner state");
- if (rc)
+ if (rc) {
+ kfree(data);
return 0;
- return sprintf(buf, "%d\n", data[TPM_GET_CAP_RET_BOOL_1_IDX]);
+ }
+
+ rc = sprintf(buf, "%d\n", data[TPM_GET_CAP_RET_BOOL_1_IDX]);
+
+ kfree(data);
+ return rc;
}
EXPORT_SYMBOL_GPL(tpm_show_owned);
ssize_t tpm_show_temp_deactivated(struct device * dev,
struct device_attribute * attr, char *buf)
{
- u8 data[sizeof(tpm_cap)];
+ u8 *data;
ssize_t rc;
struct tpm_chip *chip = dev_get_drvdata(dev);
if (chip == NULL)
return -ENODEV;
+ data = kzalloc(TPM_INTERNAL_RESULT_SIZE, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
memcpy(data, tpm_cap, sizeof(tpm_cap));
data[TPM_CAP_IDX] = TPM_CAP_FLAG;
data[TPM_CAP_SUBCAP_IDX] = TPM_CAP_FLAG_VOL;
- rc = transmit_cmd(chip, data, sizeof(data),
+ rc = transmit_cmd(chip, data, TPM_INTERNAL_RESULT_SIZE,
"attempting to determine the temporary state");
- if (rc)
+ if (rc) {
+ kfree(data);
return 0;
- return sprintf(buf, "%d\n", data[TPM_GET_CAP_TEMP_INACTIVE_IDX]);
+ }
+
+ rc = sprintf(buf, "%d\n", data[TPM_GET_CAP_TEMP_INACTIVE_IDX]);
+
+ kfree(data);
+ return rc;
}
EXPORT_SYMBOL_GPL(tpm_show_temp_deactivated);
@@ -678,7 +720,7 @@ static const u8 pcrread[] = {
ssize_t tpm_show_pcrs(struct device *dev, struct device_attribute *attr,
char *buf)
{
- u8 data[max_t(int, max(ARRAY_SIZE(tpm_cap), ARRAY_SIZE(pcrread)), 30)];
+ u8 *data;
ssize_t rc;
int i, j, num_pcrs;
__be32 index;
@@ -688,21 +730,27 @@ ssize_t tpm_show_pcrs(struct device *dev, struct device_attribute *attr,
if (chip == NULL)
return -ENODEV;
+ data = kzalloc(TPM_INTERNAL_RESULT_SIZE, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
memcpy(data, tpm_cap, sizeof(tpm_cap));
data[TPM_CAP_IDX] = TPM_CAP_PROP;
data[TPM_CAP_SUBCAP_IDX] = TPM_CAP_PROP_PCR;
- rc = transmit_cmd(chip, data, sizeof(data),
+ rc = transmit_cmd(chip, data, TPM_INTERNAL_RESULT_SIZE,
"attempting to determine the number of PCRS");
- if (rc)
+ if (rc) {
+ kfree(data);
return 0;
+ }
num_pcrs = be32_to_cpu(*((__be32 *) (data + 14)));
for (i = 0; i < num_pcrs; i++) {
memcpy(data, pcrread, sizeof(pcrread));
index = cpu_to_be32(i);
memcpy(data + 10, &index, 4);
- rc = transmit_cmd(chip, data, sizeof(data),
+ rc = transmit_cmd(chip, data, TPM_INTERNAL_RESULT_SIZE,
"attempting to read a PCR");
if (rc)
goto out;
@@ -712,6 +760,7 @@ ssize_t tpm_show_pcrs(struct device *dev, struct device_attribute *attr,
str += sprintf(str, "\n");
}
out:
+ kfree(data);
return str - buf;
}
EXPORT_SYMBOL_GPL(tpm_show_pcrs);
@@ -795,7 +844,7 @@ static const u8 cap_version[] = {
ssize_t tpm_show_caps(struct device *dev, struct device_attribute *attr,
char *buf)
{
- u8 data[max_t(int, max(ARRAY_SIZE(tpm_cap), ARRAY_SIZE(cap_version)), 30)];
+ u8 *data;
ssize_t rc;
char *str = buf;
@@ -803,21 +852,27 @@ ssize_t tpm_show_caps(struct device *dev, struct device_attribute *attr,
if (chip == NULL)
return -ENODEV;
+ data = kzalloc(TPM_INTERNAL_RESULT_SIZE, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
memcpy(data, tpm_cap, sizeof(tpm_cap));
data[TPM_CAP_IDX] = TPM_CAP_PROP;
data[TPM_CAP_SUBCAP_IDX] = TPM_CAP_PROP_MANUFACTURER;
- rc = transmit_cmd(chip, data, sizeof(data),
+ rc = transmit_cmd(chip, data, TPM_INTERNAL_RESULT_SIZE,
"attempting to determine the manufacturer");
- if (rc)
+ if (rc) {
+ kfree(data);
return 0;
+ }
str += sprintf(str, "Manufacturer: 0x%x\n",
be32_to_cpu(*((__be32 *) (data + TPM_GET_CAP_RET_UINT32_1_IDX))));
memcpy(data, cap_version, sizeof(cap_version));
data[CAP_VERSION_IDX] = CAP_VERSION_1_1;
- rc = transmit_cmd(chip, data, sizeof(data),
+ rc = transmit_cmd(chip, data, TPM_INTERNAL_RESULT_SIZE,
"attempting to determine the 1.1 version");
if (rc)
goto out;
@@ -828,6 +883,7 @@ ssize_t tpm_show_caps(struct device *dev, struct device_attribute *attr,
(int) data[17]);
out:
+ kfree(data);
return str - buf;
}
EXPORT_SYMBOL_GPL(tpm_show_caps);
@@ -835,7 +891,7 @@ EXPORT_SYMBOL_GPL(tpm_show_caps);
ssize_t tpm_show_caps_1_2(struct device * dev,
struct device_attribute * attr, char *buf)
{
- u8 data[max_t(int, max(ARRAY_SIZE(tpm_cap), ARRAY_SIZE(cap_version)), 30)];
+ u8 *data;
ssize_t len;
char *str = buf;
@@ -843,15 +899,20 @@ ssize_t tpm_show_caps_1_2(struct device * dev,
if (chip == NULL)
return -ENODEV;
+ data = kzalloc(TPM_INTERNAL_RESULT_SIZE, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
memcpy(data, tpm_cap, sizeof(tpm_cap));
data[TPM_CAP_IDX] = TPM_CAP_PROP;
data[TPM_CAP_SUBCAP_IDX] = TPM_CAP_PROP_MANUFACTURER;
- if ((len = tpm_transmit(chip, data, sizeof(data))) <=
- TPM_ERROR_SIZE) {
+ len = tpm_transmit(chip, data, TPM_INTERNAL_RESULT_SIZE);
+ if (len <= TPM_ERROR_SIZE) {
dev_dbg(chip->dev, "A TPM error (%d) occurred "
"attempting to determine the manufacturer\n",
be32_to_cpu(*((__be32 *) (data + TPM_RET_CODE_IDX))));
+ kfree(data);
return 0;
}
@@ -861,8 +922,8 @@ ssize_t tpm_show_caps_1_2(struct device * dev,
memcpy(data, cap_version, sizeof(cap_version));
data[CAP_VERSION_IDX] = CAP_VERSION_1_2;
- if ((len = tpm_transmit(chip, data, sizeof(data))) <=
- TPM_ERROR_SIZE) {
+ len = tpm_transmit(chip, data, TPM_INTERNAL_RESULT_SIZE);
+ if (len <= TPM_ERROR_SIZE) {
dev_err(chip->dev, "A TPM error (%d) occurred "
"attempting to determine the 1.2 version\n",
be32_to_cpu(*((__be32 *) (data + TPM_RET_CODE_IDX))));
@@ -874,6 +935,7 @@ ssize_t tpm_show_caps_1_2(struct device * dev,
(int) data[19]);
out:
+ kfree(data);
return str - buf;
}
EXPORT_SYMBOL_GPL(tpm_show_caps_1_2);
@@ -966,7 +1028,7 @@ ssize_t tpm_write(struct file *file, const char __user *buf,
size_t size, loff_t *off)
{
struct tpm_chip *chip = file->private_data;
- int in_size = size, out_size;
+ size_t in_size = size, out_size;
/* cannot perform a write until the read has cleared
either via tpm_read or a user_read_timer timeout */
@@ -1001,7 +1063,7 @@ ssize_t tpm_read(struct file *file, char __user *buf,
size_t size, loff_t *off)
{
struct tpm_chip *chip = file->private_data;
- int ret_size;
+ ssize_t ret_size;
del_singleshot_timer_sync(&chip->user_read_timer);
flush_scheduled_work();
diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
index 60a2d2630e36..68f052b42ed7 100644
--- a/drivers/char/tpm/tpm_bios.c
+++ b/drivers/char/tpm/tpm_bios.c
@@ -448,7 +448,7 @@ out_free:
goto out;
}
-const struct file_operations tpm_ascii_bios_measurements_ops = {
+static const struct file_operations tpm_ascii_bios_measurements_ops = {
.open = tpm_ascii_bios_measurements_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -486,7 +486,7 @@ out_free:
goto out;
}
-const struct file_operations tpm_binary_bios_measurements_ops = {
+static const struct file_operations tpm_binary_bios_measurements_ops = {
.open = tpm_binary_bios_measurements_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index c7a977bc03e8..ed1879c0dd8d 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -622,6 +622,7 @@ static struct pnp_device_id tpm_pnp_tbl[] __devinitdata = {
{"ATM1200", 0}, /* Atmel */
{"IFX0102", 0}, /* Infineon */
{"BCM0101", 0}, /* Broadcom */
+ {"BCM0102", 0}, /* Broadcom */
{"NSC1200", 0}, /* National */
{"ICO0102", 0}, /* Intel */
/* Add new here */
diff --git a/drivers/char/vme_scc.c b/drivers/char/vme_scc.c
index 69c5afe97f19..1718b3c481db 100644
--- a/drivers/char/vme_scc.c
+++ b/drivers/char/vme_scc.c
@@ -183,8 +183,8 @@ static void scc_init_portstructs(void)
#ifdef NEW_WRITE_LOCKING
port->gs.port_write_mutex = MUTEX;
#endif
- init_waitqueue_head(&port->gs.open_wait);
- init_waitqueue_head(&port->gs.close_wait);
+ init_waitqueue_head(&port->gs.port.open_wait);
+ init_waitqueue_head(&port->gs.port.close_wait);
}
}
@@ -422,7 +422,7 @@ static irqreturn_t scc_rx_int(int irq, void *data)
{
unsigned char ch;
struct scc_port *port = data;
- struct tty_struct *tty = port->gs.tty;
+ struct tty_struct *tty = port->gs.port.tty;
SCC_ACCESS_INIT(port);
ch = SCCread_NB(RX_DATA_REG);
@@ -453,7 +453,7 @@ static irqreturn_t scc_rx_int(int irq, void *data)
static irqreturn_t scc_spcond_int(int irq, void *data)
{
struct scc_port *port = data;
- struct tty_struct *tty = port->gs.tty;
+ struct tty_struct *tty = port->gs.port.tty;
unsigned char stat, ch, err;
int int_pending_mask = port->channel == CHANNEL_A ?
IPR_A_RX : IPR_B_RX;
@@ -500,7 +500,7 @@ static irqreturn_t scc_tx_int(int irq, void *data)
struct scc_port *port = data;
SCC_ACCESS_INIT(port);
- if (!port->gs.tty) {
+ if (!port->gs.port.tty) {
printk(KERN_WARNING "scc_tx_int with NULL tty!\n");
SCCmod (INT_AND_DMA_REG, ~IDR_TX_INT_ENAB, 0);
SCCwrite(COMMAND_REG, CR_TX_PENDING_RESET);
@@ -512,8 +512,9 @@ static irqreturn_t scc_tx_int(int irq, void *data)
SCCwrite(TX_DATA_REG, port->x_char);
port->x_char = 0;
}
- else if ((port->gs.xmit_cnt <= 0) || port->gs.tty->stopped ||
- port->gs.tty->hw_stopped)
+ else if ((port->gs.xmit_cnt <= 0) ||
+ port->gs.port.tty->stopped ||
+ port->gs.port.tty->hw_stopped)
break;
else {
SCCwrite(TX_DATA_REG, port->gs.xmit_buf[port->gs.xmit_tail++]);
@@ -522,15 +523,15 @@ static irqreturn_t scc_tx_int(int irq, void *data)
break;
}
}
- if ((port->gs.xmit_cnt <= 0) || port->gs.tty->stopped ||
- port->gs.tty->hw_stopped) {
+ if ((port->gs.xmit_cnt <= 0) || port->gs.port.tty->stopped ||
+ port->gs.port.tty->hw_stopped) {
/* disable tx interrupts */
SCCmod (INT_AND_DMA_REG, ~IDR_TX_INT_ENAB, 0);
SCCwrite(COMMAND_REG, CR_TX_PENDING_RESET); /* disable tx_int on next tx underrun? */
- port->gs.flags &= ~GS_TX_INTEN;
+ port->gs.port.flags &= ~GS_TX_INTEN;
}
- if (port->gs.tty && port->gs.xmit_cnt <= port->gs.wakeup_chars)
- tty_wakeup(port->gs.tty);
+ if (port->gs.port.tty && port->gs.xmit_cnt <= port->gs.wakeup_chars)
+ tty_wakeup(port->gs.port.tty);
SCCwrite_NB(COMMAND_REG, CR_HIGHEST_IUS_RESET);
return IRQ_HANDLED;
@@ -550,14 +551,14 @@ static irqreturn_t scc_stat_int(int irq, void *data)
if (changed & SR_DCD) {
port->c_dcd = !!(sr & SR_DCD);
- if (!(port->gs.flags & ASYNC_CHECK_CD))
+ if (!(port->gs.port.flags & ASYNC_CHECK_CD))
; /* Don't report DCD changes */
else if (port->c_dcd) {
- wake_up_interruptible(&port->gs.open_wait);
+ wake_up_interruptible(&port->gs.port.open_wait);
}
else {
- if (port->gs.tty)
- tty_hangup (port->gs.tty);
+ if (port->gs.port.tty)
+ tty_hangup (port->gs.port.tty);
}
}
SCCwrite(COMMAND_REG, CR_EXTSTAT_RESET);
@@ -578,7 +579,7 @@ static void scc_disable_tx_interrupts(void *ptr)
local_irq_save(flags);
SCCmod(INT_AND_DMA_REG, ~IDR_TX_INT_ENAB, 0);
- port->gs.flags &= ~GS_TX_INTEN;
+ port->gs.port.flags &= ~GS_TX_INTEN;
local_irq_restore(flags);
}
@@ -636,8 +637,8 @@ static void scc_shutdown_port(void *ptr)
{
struct scc_port *port = ptr;
- port->gs.flags &= ~ GS_ACTIVE;
- if (port->gs.tty && port->gs.tty->termios->c_cflag & HUPCL) {
+ port->gs.port.flags &= ~ GS_ACTIVE;
+ if (port->gs.port.tty && port->gs.port.tty->termios->c_cflag & HUPCL) {
scc_setsignals (port, 0, 0);
}
}
@@ -652,14 +653,14 @@ static int scc_set_real_termios (void *ptr)
struct scc_port *port = ptr;
SCC_ACCESS_INIT(port);
- if (!port->gs.tty || !port->gs.tty->termios) return 0;
+ if (!port->gs.port.tty || !port->gs.port.tty->termios) return 0;
channel = port->channel;
if (channel == CHANNEL_A)
return 0; /* Settings controlled by boot PROM */
- cflag = port->gs.tty->termios->c_cflag;
+ cflag = port->gs.port.tty->termios->c_cflag;
baud = port->gs.baud;
chsize = (cflag & CSIZE) >> 4;
@@ -678,9 +679,9 @@ static int scc_set_real_termios (void *ptr)
}
if (cflag & CLOCAL)
- port->gs.flags &= ~ASYNC_CHECK_CD;
+ port->gs.port.flags &= ~ASYNC_CHECK_CD;
else
- port->gs.flags |= ASYNC_CHECK_CD;
+ port->gs.port.flags |= ASYNC_CHECK_CD;
#ifdef CONFIG_MVME147_SCC
if (MACH_IS_MVME147)
@@ -856,7 +857,7 @@ static int scc_open (struct tty_struct * tty, struct file * filp)
{ COMMAND_REG, CR_EXTSTAT_RESET },
};
#endif
- if (!(port->gs.flags & ASYNC_INITIALIZED)) {
+ if (!(port->gs.port.flags & ASYNC_INITIALIZED)) {
local_irq_save(flags);
#if defined(CONFIG_MVME147_SCC) || defined(CONFIG_MVME162_SCC)
if (MACH_IS_MVME147 || MACH_IS_MVME16x) {
@@ -880,18 +881,18 @@ static int scc_open (struct tty_struct * tty, struct file * filp)
}
tty->driver_data = port;
- port->gs.tty = tty;
- port->gs.count++;
+ port->gs.port.tty = tty;
+ port->gs.port.count++;
retval = gs_init_port(&port->gs);
if (retval) {
- port->gs.count--;
+ port->gs.port.count--;
return retval;
}
- port->gs.flags |= GS_ACTIVE;
+ port->gs.port.flags |= GS_ACTIVE;
retval = gs_block_til_ready(port, filp);
if (retval) {
- port->gs.count--;
+ port->gs.port.count--;
return retval;
}
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index b11943dadefd..681c15f42083 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -99,6 +99,9 @@ struct talitos_private {
/* next channel to be assigned next incoming descriptor */
atomic_t last_chan;
+ /* per-channel number of requests pending in channel h/w fifo */
+ atomic_t *submit_count;
+
/* per-channel request fifo */
struct talitos_request **fifo;
@@ -263,15 +266,15 @@ static int talitos_submit(struct device *dev, struct talitos_desc *desc,
spin_lock_irqsave(&priv->head_lock[ch], flags);
- head = priv->head[ch];
- request = &priv->fifo[ch][head];
-
- if (request->desc) {
- /* request queue is full */
+ if (!atomic_inc_not_zero(&priv->submit_count[ch])) {
+ /* h/w fifo is full */
spin_unlock_irqrestore(&priv->head_lock[ch], flags);
return -EAGAIN;
}
+ head = priv->head[ch];
+ request = &priv->fifo[ch][head];
+
/* map descriptor and save caller data */
request->dma_desc = dma_map_single(dev, desc, sizeof(*desc),
DMA_BIDIRECTIONAL);
@@ -335,6 +338,9 @@ static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
priv->tail[ch] = (tail + 1) & (priv->fifo_len - 1);
spin_unlock_irqrestore(&priv->tail_lock[ch], flags);
+
+ atomic_dec(&priv->submit_count[ch]);
+
saved_req.callback(dev, saved_req.desc, saved_req.context,
status);
/* channel may resume processing in single desc error case */
@@ -842,7 +848,7 @@ static int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
/* adjust (decrease) last one (or two) entry's len to cryptlen */
link_tbl_ptr--;
- while (link_tbl_ptr->len <= (-cryptlen)) {
+ while (be16_to_cpu(link_tbl_ptr->len) <= (-cryptlen)) {
/* Empty this entry, and move to previous one */
cryptlen += be16_to_cpu(link_tbl_ptr->len);
link_tbl_ptr->len = 0;
@@ -874,7 +880,7 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq,
unsigned int cryptlen = areq->cryptlen;
unsigned int authsize = ctx->authsize;
unsigned int ivsize;
- int sg_count;
+ int sg_count, ret;
/* hmac key */
map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
@@ -978,7 +984,12 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq,
map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv, 0,
DMA_FROM_DEVICE);
- return talitos_submit(dev, desc, callback, areq);
+ ret = talitos_submit(dev, desc, callback, areq);
+ if (ret != -EINPROGRESS) {
+ ipsec_esp_unmap(dev, edesc, areq);
+ kfree(edesc);
+ }
+ return ret;
}
@@ -1009,6 +1020,8 @@ static struct ipsec_esp_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq,
struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
struct ipsec_esp_edesc *edesc;
int src_nents, dst_nents, alloc_len, dma_len;
+ gfp_t flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
+ GFP_ATOMIC;
if (areq->cryptlen + ctx->authsize > TALITOS_MAX_DATA_LEN) {
dev_err(ctx->dev, "cryptlen exceeds h/w max limit\n");
@@ -1022,7 +1035,7 @@ static struct ipsec_esp_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq,
dst_nents = src_nents;
} else {
dst_nents = sg_count(areq->dst, areq->cryptlen + ctx->authsize);
- dst_nents = (dst_nents == 1) ? 0 : src_nents;
+ dst_nents = (dst_nents == 1) ? 0 : dst_nents;
}
/*
@@ -1040,7 +1053,7 @@ static struct ipsec_esp_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq,
alloc_len += icv_stashing ? ctx->authsize : 0;
}
- edesc = kmalloc(alloc_len, GFP_DMA);
+ edesc = kmalloc(alloc_len, GFP_DMA | flags);
if (!edesc) {
dev_err(ctx->dev, "could not allocate edescriptor\n");
return ERR_PTR(-ENOMEM);
@@ -1337,6 +1350,7 @@ static int __devexit talitos_remove(struct of_device *ofdev)
if (hw_supports(dev, DESC_HDR_SEL0_RNG))
talitos_unregister_rng(dev);
+ kfree(priv->submit_count);
kfree(priv->tail);
kfree(priv->head);
@@ -1466,9 +1480,6 @@ static int talitos_probe(struct of_device *ofdev,
goto err_out;
}
- of_node_put(np);
- np = NULL;
-
priv->head_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels,
GFP_KERNEL);
priv->tail_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels,
@@ -1504,6 +1515,16 @@ static int talitos_probe(struct of_device *ofdev,
}
}
+ priv->submit_count = kmalloc(sizeof(atomic_t) * priv->num_channels,
+ GFP_KERNEL);
+ if (!priv->submit_count) {
+ dev_err(dev, "failed to allocate fifo submit count space\n");
+ err = -ENOMEM;
+ goto err_out;
+ }
+ for (i = 0; i < priv->num_channels; i++)
+ atomic_set(&priv->submit_count[i], -priv->chfifo_len);
+
priv->head = kzalloc(sizeof(int) * priv->num_channels, GFP_KERNEL);
priv->tail = kzalloc(sizeof(int) * priv->num_channels, GFP_KERNEL);
if (!priv->head || !priv->tail) {
@@ -1559,8 +1580,6 @@ static int talitos_probe(struct of_device *ofdev,
err_out:
talitos_remove(ofdev);
- if (np)
- of_node_put(np);
return err;
}
diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig
index 76f26710fc16..fa6d6abefd4d 100644
--- a/drivers/firewire/Kconfig
+++ b/drivers/firewire/Kconfig
@@ -16,8 +16,13 @@ config FIREWIRE
enable the new stack.
To compile this driver as a module, say M here: the module will be
- called firewire-core. It functionally replaces ieee1394, raw1394,
- and video1394.
+ called firewire-core.
+
+ This module functionally replaces ieee1394, raw1394, and video1394.
+ To access it from application programs, you generally need at least
+ libraw1394 version 2. IIDC/DCAM applications also need libdc1394
+ version 2. No libraries are required to access storage devices
+ through the firewire-sbp2 driver.
config FIREWIRE_OHCI
tristate "OHCI-1394 controllers"
diff --git a/drivers/firewire/fw-card.c b/drivers/firewire/fw-card.c
index da873d795aad..bbd73a406e53 100644
--- a/drivers/firewire/fw-card.c
+++ b/drivers/firewire/fw-card.c
@@ -539,7 +539,7 @@ fw_core_remove_card(struct fw_card *card)
wait_for_completion(&card->done);
cancel_delayed_work_sync(&card->work);
- fw_flush_transactions(card);
+ WARN_ON(!list_empty(&card->transaction_list));
del_timer_sync(&card->flush_timer);
}
EXPORT_SYMBOL(fw_core_remove_card);
diff --git a/drivers/firewire/fw-cdev.c b/drivers/firewire/fw-cdev.c
index c639915fc3cb..bc81d6fcd2fd 100644
--- a/drivers/firewire/fw-cdev.c
+++ b/drivers/firewire/fw-cdev.c
@@ -382,9 +382,9 @@ complete_transaction(struct fw_card *card, int rcode,
response->response.type = FW_CDEV_EVENT_RESPONSE;
response->response.rcode = rcode;
- queue_event(client, &response->event,
- &response->response, sizeof(response->response),
- response->response.data, response->response.length);
+ queue_event(client, &response->event, &response->response,
+ sizeof(response->response) + response->response.length,
+ NULL, 0);
}
static int ioctl_send_request(struct client *client, void *buffer)
diff --git a/drivers/firewire/fw-iso.c b/drivers/firewire/fw-iso.c
index bcbe794a3ea5..e14c03dc0065 100644
--- a/drivers/firewire/fw-iso.c
+++ b/drivers/firewire/fw-iso.c
@@ -50,7 +50,7 @@ fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
address = dma_map_page(card->device, buffer->pages[i],
0, PAGE_SIZE, direction);
- if (dma_mapping_error(address)) {
+ if (dma_mapping_error(card->device, address)) {
__free_page(buffer->pages[i]);
goto out_pages;
}
diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c
index 333b12544dd1..251416f2148f 100644
--- a/drivers/firewire/fw-ohci.c
+++ b/drivers/firewire/fw-ohci.c
@@ -171,7 +171,6 @@ struct iso_context {
struct fw_ohci {
struct fw_card card;
- u32 version;
__iomem char *registers;
dma_addr_t self_id_bus;
__le32 *self_id_cpu;
@@ -180,6 +179,8 @@ struct fw_ohci {
int generation;
int request_generation; /* for timestamping incoming requests */
u32 bus_seconds;
+
+ bool use_dualbuffer;
bool old_uninorth;
bool bus_reset_packet_quirk;
@@ -953,7 +954,7 @@ at_context_queue_packet(struct context *ctx, struct fw_packet *packet)
payload_bus =
dma_map_single(ohci->card.device, packet->payload,
packet->payload_length, DMA_TO_DEVICE);
- if (dma_mapping_error(payload_bus)) {
+ if (dma_mapping_error(ohci->card.device, payload_bus)) {
packet->ack = RCODE_SEND_ERROR;
return -1;
}
@@ -1885,7 +1886,7 @@ ohci_allocate_iso_context(struct fw_card *card, int type, size_t header_size)
} else {
mask = &ohci->ir_context_mask;
list = ohci->ir_context_list;
- if (ohci->version >= OHCI_VERSION_1_1)
+ if (ohci->use_dualbuffer)
callback = handle_ir_dualbuffer_packet;
else
callback = handle_ir_packet_per_buffer;
@@ -1949,7 +1950,7 @@ static int ohci_start_iso(struct fw_iso_context *base,
} else {
index = ctx - ohci->ir_context_list;
control = IR_CONTEXT_ISOCH_HEADER;
- if (ohci->version >= OHCI_VERSION_1_1)
+ if (ohci->use_dualbuffer)
control |= IR_CONTEXT_DUAL_BUFFER_MODE;
match = (tags << 28) | (sync << 8) | ctx->base.channel;
if (cycle >= 0) {
@@ -2279,7 +2280,7 @@ ohci_queue_iso(struct fw_iso_context *base,
spin_lock_irqsave(&ctx->context.ohci->lock, flags);
if (base->type == FW_ISO_CONTEXT_TRANSMIT)
retval = ohci_queue_iso_transmit(base, packet, buffer, payload);
- else if (ctx->context.ohci->version >= OHCI_VERSION_1_1)
+ else if (ctx->context.ohci->use_dualbuffer)
retval = ohci_queue_iso_receive_dualbuffer(base, packet,
buffer, payload);
else
@@ -2341,7 +2342,7 @@ static int __devinit
pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
{
struct fw_ohci *ohci;
- u32 bus_options, max_receive, link_speed;
+ u32 bus_options, max_receive, link_speed, version;
u64 guid;
int err;
size_t size;
@@ -2366,12 +2367,6 @@ pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
pci_set_drvdata(dev, ohci);
-#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
- ohci->old_uninorth = dev->vendor == PCI_VENDOR_ID_APPLE &&
- dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW;
-#endif
- ohci->bus_reset_packet_quirk = dev->vendor == PCI_VENDOR_ID_TI;
-
spin_lock_init(&ohci->lock);
tasklet_init(&ohci->bus_reset_tasklet,
@@ -2390,6 +2385,23 @@ pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
goto fail_iomem;
}
+ version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
+ ohci->use_dualbuffer = version >= OHCI_VERSION_1_1;
+
+/* x86-32 currently doesn't use highmem for dma_alloc_coherent */
+#if !defined(CONFIG_X86_32)
+ /* dual-buffer mode is broken with descriptor addresses above 2G */
+ if (dev->vendor == PCI_VENDOR_ID_TI &&
+ dev->device == PCI_DEVICE_ID_TI_TSB43AB22)
+ ohci->use_dualbuffer = false;
+#endif
+
+#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
+ ohci->old_uninorth = dev->vendor == PCI_VENDOR_ID_APPLE &&
+ dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW;
+#endif
+ ohci->bus_reset_packet_quirk = dev->vendor == PCI_VENDOR_ID_TI;
+
ar_context_init(&ohci->ar_request_ctx, ohci,
OHCI1394_AsReqRcvContextControlSet);
@@ -2441,9 +2453,8 @@ pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
if (err < 0)
goto fail_self_id;
- ohci->version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
fw_notify("Added fw-ohci device %s, OHCI version %x.%x\n",
- dev->dev.bus_id, ohci->version >> 16, ohci->version & 0xff);
+ dev->dev.bus_id, version >> 16, version & 0xff);
return 0;
fail_self_id:
diff --git a/drivers/firewire/fw-sbp2.c b/drivers/firewire/fw-sbp2.c
index 53fc5a641e6d..aaff50ebba1d 100644
--- a/drivers/firewire/fw-sbp2.c
+++ b/drivers/firewire/fw-sbp2.c
@@ -543,7 +543,7 @@ sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id,
orb->response_bus =
dma_map_single(device->card->device, &orb->response,
sizeof(orb->response), DMA_FROM_DEVICE);
- if (dma_mapping_error(orb->response_bus))
+ if (dma_mapping_error(device->card->device, orb->response_bus))
goto fail_mapping_response;
orb->request.response.high = 0;
@@ -577,7 +577,7 @@ sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id,
orb->base.request_bus =
dma_map_single(device->card->device, &orb->request,
sizeof(orb->request), DMA_TO_DEVICE);
- if (dma_mapping_error(orb->base.request_bus))
+ if (dma_mapping_error(device->card->device, orb->base.request_bus))
goto fail_mapping_request;
sbp2_send_orb(&orb->base, lu, node_id, generation,
@@ -1424,7 +1424,7 @@ sbp2_map_scatterlist(struct sbp2_command_orb *orb, struct fw_device *device,
orb->page_table_bus =
dma_map_single(device->card->device, orb->page_table,
sizeof(orb->page_table), DMA_TO_DEVICE);
- if (dma_mapping_error(orb->page_table_bus))
+ if (dma_mapping_error(device->card->device, orb->page_table_bus))
goto fail_page_table;
/*
@@ -1509,7 +1509,7 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
orb->base.request_bus =
dma_map_single(device->card->device, &orb->request,
sizeof(orb->request), DMA_TO_DEVICE);
- if (dma_mapping_error(orb->base.request_bus))
+ if (dma_mapping_error(device->card->device, orb->base.request_bus))
goto out;
sbp2_send_orb(&orb->base, lu, lu->tgt->node_id, lu->generation,
diff --git a/drivers/firewire/fw-topology.c b/drivers/firewire/fw-topology.c
index 213b0ff8f3d6..c1b81077c4a8 100644
--- a/drivers/firewire/fw-topology.c
+++ b/drivers/firewire/fw-topology.c
@@ -510,8 +510,6 @@ fw_core_handle_bus_reset(struct fw_card *card,
struct fw_node *local_node;
unsigned long flags;
- fw_flush_transactions(card);
-
spin_lock_irqsave(&card->lock, flags);
/*
diff --git a/drivers/firewire/fw-transaction.c b/drivers/firewire/fw-transaction.c
index 40db80752272..e5d1a0b64fcf 100644
--- a/drivers/firewire/fw-transaction.c
+++ b/drivers/firewire/fw-transaction.c
@@ -22,6 +22,7 @@
#include <linux/kernel.h>
#include <linux/kref.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
@@ -151,7 +152,7 @@ transmit_complete_callback(struct fw_packet *packet,
static void
fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
- int node_id, int source_id, int generation, int speed,
+ int destination_id, int source_id, int generation, int speed,
unsigned long long offset, void *payload, size_t length)
{
int ext_tcode;
@@ -166,7 +167,7 @@ fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
HEADER_RETRY(RETRY_X) |
HEADER_TLABEL(tlabel) |
HEADER_TCODE(tcode) |
- HEADER_DESTINATION(node_id);
+ HEADER_DESTINATION(destination_id);
packet->header[1] =
HEADER_OFFSET_HIGH(offset >> 32) | HEADER_SOURCE(source_id);
packet->header[2] =
@@ -252,7 +253,7 @@ fw_send_request(struct fw_card *card, struct fw_transaction *t,
fw_transaction_callback_t callback, void *callback_data)
{
unsigned long flags;
- int tlabel, source;
+ int tlabel;
/*
* Bump the flush timer up 100ms first of all so we
@@ -268,7 +269,6 @@ fw_send_request(struct fw_card *card, struct fw_transaction *t,
spin_lock_irqsave(&card->lock, flags);
- source = card->node_id;
tlabel = card->current_tlabel;
if (card->tlabel_mask & (1 << tlabel)) {
spin_unlock_irqrestore(&card->lock, flags);
@@ -279,77 +279,58 @@ fw_send_request(struct fw_card *card, struct fw_transaction *t,
card->current_tlabel = (card->current_tlabel + 1) & 0x1f;
card->tlabel_mask |= (1 << tlabel);
- list_add_tail(&t->link, &card->transaction_list);
-
- spin_unlock_irqrestore(&card->lock, flags);
-
- /* Initialize rest of transaction, fill out packet and send it. */
t->node_id = node_id;
t->tlabel = tlabel;
t->callback = callback;
t->callback_data = callback_data;
- fw_fill_request(&t->packet, tcode, t->tlabel,
- node_id, source, generation,
- speed, offset, payload, length);
+ fw_fill_request(&t->packet, tcode, t->tlabel, node_id, card->node_id,
+ generation, speed, offset, payload, length);
t->packet.callback = transmit_complete_callback;
+ list_add_tail(&t->link, &card->transaction_list);
+
+ spin_unlock_irqrestore(&card->lock, flags);
+
card->driver->send_request(card, &t->packet);
}
EXPORT_SYMBOL(fw_send_request);
-struct fw_phy_packet {
- struct fw_packet packet;
- struct completion done;
- struct kref kref;
-};
-
-static void phy_packet_release(struct kref *kref)
-{
- struct fw_phy_packet *p =
- container_of(kref, struct fw_phy_packet, kref);
- kfree(p);
-}
+static DEFINE_MUTEX(phy_config_mutex);
+static DECLARE_COMPLETION(phy_config_done);
static void transmit_phy_packet_callback(struct fw_packet *packet,
struct fw_card *card, int status)
{
- struct fw_phy_packet *p =
- container_of(packet, struct fw_phy_packet, packet);
-
- complete(&p->done);
- kref_put(&p->kref, phy_packet_release);
+ complete(&phy_config_done);
}
+static struct fw_packet phy_config_packet = {
+ .header_length = 8,
+ .payload_length = 0,
+ .speed = SCODE_100,
+ .callback = transmit_phy_packet_callback,
+};
+
void fw_send_phy_config(struct fw_card *card,
int node_id, int generation, int gap_count)
{
- struct fw_phy_packet *p;
long timeout = DIV_ROUND_UP(HZ, 10);
u32 data = PHY_IDENTIFIER(PHY_PACKET_CONFIG) |
PHY_CONFIG_ROOT_ID(node_id) |
PHY_CONFIG_GAP_COUNT(gap_count);
- p = kmalloc(sizeof(*p), GFP_KERNEL);
- if (p == NULL)
- return;
+ mutex_lock(&phy_config_mutex);
+
+ phy_config_packet.header[0] = data;
+ phy_config_packet.header[1] = ~data;
+ phy_config_packet.generation = generation;
+ INIT_COMPLETION(phy_config_done);
+
+ card->driver->send_request(card, &phy_config_packet);
+ wait_for_completion_timeout(&phy_config_done, timeout);
- p->packet.header[0] = data;
- p->packet.header[1] = ~data;
- p->packet.header_length = 8;
- p->packet.payload_length = 0;
- p->packet.speed = SCODE_100;
- p->packet.generation = generation;
- p->packet.callback = transmit_phy_packet_callback;
- init_completion(&p->done);
- kref_set(&p->kref, 2);
-
- card->driver->send_request(card, &p->packet);
- timeout = wait_for_completion_timeout(&p->done, timeout);
- kref_put(&p->kref, phy_packet_release);
-
- /* will leak p if the callback is never executed */
- WARN_ON(timeout == 0);
+ mutex_unlock(&phy_config_mutex);
}
void fw_flush_transactions(struct fw_card *card)
diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c
index e23399c7f773..001622eb86f9 100644
--- a/drivers/firmware/memmap.c
+++ b/drivers/firmware/memmap.c
@@ -153,12 +153,14 @@ int __init firmware_map_add_early(resource_size_t start, resource_size_t end,
static ssize_t start_show(struct firmware_map_entry *entry, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "0x%llx\n", entry->start);
+ return snprintf(buf, PAGE_SIZE, "0x%llx\n",
+ (unsigned long long)entry->start);
}
static ssize_t end_show(struct firmware_map_entry *entry, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "0x%llx\n", entry->end);
+ return snprintf(buf, PAGE_SIZE, "0x%llx\n",
+ (unsigned long long)entry->end);
}
static ssize_t type_show(struct firmware_map_entry *entry, char *buf)
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 564138714bb5..452c2d866ec5 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -318,7 +318,7 @@ static void drm_cleanup(struct drm_device * dev)
DRM_ERROR("Cannot unload module\n");
}
-int drm_minors_cleanup(int id, void *ptr, void *data)
+static int drm_minors_cleanup(int id, void *ptr, void *data)
{
struct drm_minor *minor = ptr;
struct drm_device *dev;
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index 9494005d1c9a..e603736682bf 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -43,7 +43,6 @@
#include <linux/cdev.h>
#include <linux/idr.h>
#include <linux/mutex.h>
-#include <linux/smp_lock.h>
#include <asm/uaccess.h>
@@ -1154,11 +1153,18 @@ static unsigned int ib_ucm_poll(struct file *filp,
return mask;
}
+/*
+ * ib_ucm_open() does not need the BKL:
+ *
+ * - no global state is referred to;
+ * - there is no ioctl method to race against;
+ * - no further module initialization is required for open to work
+ * after the device is registered.
+ */
static int ib_ucm_open(struct inode *inode, struct file *filp)
{
struct ib_ucm_file *file;
- cycle_kernel_lock();
file = kmalloc(sizeof(*file), GFP_KERNEL);
if (!file)
return -ENOMEM;
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 195f97302fe5..b41dd26bbfa1 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -38,7 +38,6 @@
#include <linux/in.h>
#include <linux/in6.h>
#include <linux/miscdevice.h>
-#include <linux/smp_lock.h>
#include <rdma/rdma_user_cm.h>
#include <rdma/ib_marshall.h>
@@ -1149,6 +1148,14 @@ static unsigned int ucma_poll(struct file *filp, struct poll_table_struct *wait)
return mask;
}
+/*
+ * ucma_open() does not need the BKL:
+ *
+ * - no global state is referred to;
+ * - there is no ioctl method to race against;
+ * - no further module initialization is required for open to work
+ * after the device is registered.
+ */
static int ucma_open(struct inode *inode, struct file *filp)
{
struct ucma_file *file;
@@ -1157,7 +1164,6 @@ static int ucma_open(struct inode *inode, struct file *filp)
if (!file)
return -ENOMEM;
- lock_kernel();
INIT_LIST_HEAD(&file->event_list);
INIT_LIST_HEAD(&file->ctx_list);
init_waitqueue_head(&file->poll_wait);
@@ -1165,7 +1171,6 @@ static int ucma_open(struct inode *inode, struct file *filp)
filp->private_data = file;
file->filp = filp;
- unlock_kernel();
return 0;
}
diff --git a/drivers/infiniband/hw/ipath/ipath_sdma.c b/drivers/infiniband/hw/ipath/ipath_sdma.c
index eaba03273e4f..284c9bca517e 100644
--- a/drivers/infiniband/hw/ipath/ipath_sdma.c
+++ b/drivers/infiniband/hw/ipath/ipath_sdma.c
@@ -698,7 +698,7 @@ retry:
addr = dma_map_single(&dd->pcidev->dev, tx->txreq.map_addr,
tx->map_len, DMA_TO_DEVICE);
- if (dma_mapping_error(addr)) {
+ if (dma_mapping_error(&dd->pcidev->dev, addr)) {
ret = -EIO;
goto unlock;
}
diff --git a/drivers/infiniband/hw/ipath/ipath_user_sdma.c b/drivers/infiniband/hw/ipath/ipath_user_sdma.c
index 86e016916cd1..82d9a0b5ca2f 100644
--- a/drivers/infiniband/hw/ipath/ipath_user_sdma.c
+++ b/drivers/infiniband/hw/ipath/ipath_user_sdma.c
@@ -206,7 +206,7 @@ static int ipath_user_sdma_coalesce(const struct ipath_devdata *dd,
dma_addr = dma_map_page(&dd->pcidev->dev, page, 0, len,
DMA_TO_DEVICE);
- if (dma_mapping_error(dma_addr)) {
+ if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
ret = -ENOMEM;
goto free_unmap;
}
@@ -301,7 +301,7 @@ static int ipath_user_sdma_pin_pages(const struct ipath_devdata *dd,
pages[j], 0, flen, DMA_TO_DEVICE);
unsigned long fofs = addr & ~PAGE_MASK;
- if (dma_mapping_error(dma_addr)) {
+ if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
ret = -ENOMEM;
goto done;
}
@@ -508,7 +508,7 @@ static int ipath_user_sdma_queue_pkts(const struct ipath_devdata *dd,
if (page) {
dma_addr = dma_map_page(&dd->pcidev->dev,
page, 0, len, DMA_TO_DEVICE);
- if (dma_mapping_error(dma_addr)) {
+ if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
ret = -ENOMEM;
goto free_pbc;
}
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 0b191a4842ce..a1464574bfdd 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 38d6907ab521..a3c2851c0545 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index d26a91317d4d..6e2b0dc21b61 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2006, 2007 Cisco Systems. All rights reserved.
+ * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index db2086faa4ed..a4cdb465cd1d 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 02a99bc4442e..f7bc7dd8578a 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c
index 12d6bc6f8007..d42565258fb7 100644
--- a/drivers/infiniband/hw/mlx4/srq.c
+++ b/drivers/infiniband/hw/mlx4/srq.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/hw/mlx4/user.h b/drivers/infiniband/hw/mlx4/user.h
index e2d11be4525c..13beedeeef9f 100644
--- a/drivers/infiniband/hw/mlx4/user.h
+++ b/drivers/infiniband/hw/mlx4/user.h
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c
index 4e36aa7cb3d2..cc6858f0b65b 100644
--- a/drivers/infiniband/hw/mthca/mthca_eq.c
+++ b/drivers/infiniband/hw/mthca/mthca_eq.c
@@ -780,7 +780,7 @@ int mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt)
return -ENOMEM;
dev->eq_table.icm_dma = pci_map_page(dev->pdev, dev->eq_table.icm_page, 0,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(dev->eq_table.icm_dma)) {
+ if (pci_dma_mapping_error(dev->pdev, dev->eq_table.icm_dma)) {
__free_page(dev->eq_table.icm_page);
return -ENOMEM;
}
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
index d2884e778098..b0cab64e5e3d 100644
--- a/drivers/infiniband/hw/nes/nes.c
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -276,6 +276,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
}
nes_free_resource(nesadapter, nesadapter->allocated_qps, nesqp->hwqp.qp_id);
+ nesadapter->qp_table[nesqp->hwqp.qp_id-NES_FIRST_QPN] = NULL;
kfree(nesqp->allocated_buffer);
}
@@ -289,7 +290,6 @@ void nes_rem_ref(struct ib_qp *ibqp)
struct nes_qp *nesqp;
struct nes_vnic *nesvnic = to_nesvnic(ibqp->device);
struct nes_device *nesdev = nesvnic->nesdev;
- struct nes_adapter *nesadapter = nesdev->nesadapter;
struct nes_hw_cqp_wqe *cqp_wqe;
struct nes_cqp_request *cqp_request;
u32 opcode;
@@ -303,8 +303,6 @@ void nes_rem_ref(struct ib_qp *ibqp)
}
if (atomic_dec_and_test(&nesqp->refcount)) {
- nesadapter->qp_table[nesqp->hwqp.qp_id-NES_FIRST_QPN] = NULL;
-
/* Destroy the QP */
cqp_request = nes_get_cqp_request(nesdev);
if (cqp_request == NULL) {
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 6aa531d5276d..9f0b964b2c99 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -74,36 +74,59 @@ atomic_t cm_nodes_destroyed;
atomic_t cm_accel_dropped_pkts;
atomic_t cm_resets_recvd;
-static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
+static inline int mini_cm_accelerated(struct nes_cm_core *,
+ struct nes_cm_node *);
static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *,
- struct nes_vnic *, struct nes_cm_info *);
-static int add_ref_cm_node(struct nes_cm_node *);
-static int rem_ref_cm_node(struct nes_cm_core *, struct nes_cm_node *);
+ struct nes_vnic *, struct nes_cm_info *);
static int mini_cm_del_listen(struct nes_cm_core *, struct nes_cm_listener *);
-static struct sk_buff *form_cm_frame(struct sk_buff *, struct nes_cm_node *,
- void *, u32, void *, u32, u8);
-static struct sk_buff *get_free_pkt(struct nes_cm_node *cm_node);
-
static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *,
- struct nes_vnic *,
- struct ietf_mpa_frame *,
- struct nes_cm_info *);
+ struct nes_vnic *, u16, void *, struct nes_cm_info *);
+static int mini_cm_close(struct nes_cm_core *, struct nes_cm_node *);
static int mini_cm_accept(struct nes_cm_core *, struct ietf_mpa_frame *,
- struct nes_cm_node *);
+ struct nes_cm_node *);
static int mini_cm_reject(struct nes_cm_core *, struct ietf_mpa_frame *,
- struct nes_cm_node *);
-static int mini_cm_close(struct nes_cm_core *, struct nes_cm_node *);
-static int mini_cm_recv_pkt(struct nes_cm_core *, struct nes_vnic *,
- struct sk_buff *);
+ struct nes_cm_node *);
+static void mini_cm_recv_pkt(struct nes_cm_core *, struct nes_vnic *,
+ struct sk_buff *);
static int mini_cm_dealloc_core(struct nes_cm_core *);
static int mini_cm_get(struct nes_cm_core *);
static int mini_cm_set(struct nes_cm_core *, u32, u32);
+
+static struct sk_buff *form_cm_frame(struct sk_buff *, struct nes_cm_node *,
+ void *, u32, void *, u32, u8);
+static struct sk_buff *get_free_pkt(struct nes_cm_node *cm_node);
+static int add_ref_cm_node(struct nes_cm_node *);
+static int rem_ref_cm_node(struct nes_cm_core *, struct nes_cm_node *);
+
static int nes_cm_disconn_true(struct nes_qp *);
static int nes_cm_post_event(struct nes_cm_event *event);
static int nes_disconnect(struct nes_qp *nesqp, int abrupt);
static void nes_disconnect_worker(struct work_struct *work);
-static int send_ack(struct nes_cm_node *cm_node);
+
+static int send_mpa_request(struct nes_cm_node *, struct sk_buff *);
+static int send_syn(struct nes_cm_node *, u32, struct sk_buff *);
+static int send_reset(struct nes_cm_node *, struct sk_buff *);
+static int send_ack(struct nes_cm_node *cm_node, struct sk_buff *skb);
static int send_fin(struct nes_cm_node *cm_node, struct sk_buff *skb);
+static void process_packet(struct nes_cm_node *, struct sk_buff *,
+ struct nes_cm_core *);
+
+static void active_open_err(struct nes_cm_node *, struct sk_buff *, int);
+static void passive_open_err(struct nes_cm_node *, struct sk_buff *, int);
+static void cleanup_retrans_entry(struct nes_cm_node *);
+static void handle_rcv_mpa(struct nes_cm_node *, struct sk_buff *,
+ enum nes_cm_event_type);
+static void free_retrans_entry(struct nes_cm_node *cm_node);
+static int handle_tcp_options(struct nes_cm_node *cm_node, struct tcphdr *tcph,
+ struct sk_buff *skb, int optionsize, int passive);
+
+/* CM event handler functions */
+static void cm_event_connected(struct nes_cm_event *);
+static void cm_event_connect_error(struct nes_cm_event *);
+static void cm_event_reset(struct nes_cm_event *);
+static void cm_event_mpa_req(struct nes_cm_event *);
+
+static void print_core(struct nes_cm_core *core);
/* External CM API Interface */
/* instance of function pointers for client API */
@@ -158,11 +181,11 @@ static struct nes_cm_event *create_event(struct nes_cm_node *cm_node,
event->cm_info.loc_port = cm_node->loc_port;
event->cm_info.cm_id = cm_node->cm_id;
- nes_debug(NES_DBG_CM, "Created event=%p, type=%u, dst_addr=%08x[%x],"
- " src_addr=%08x[%x]\n",
- event, type,
- event->cm_info.loc_addr, event->cm_info.loc_port,
- event->cm_info.rem_addr, event->cm_info.rem_port);
+ nes_debug(NES_DBG_CM, "cm_node=%p Created event=%p, type=%u, "
+ "dst_addr=%08x[%x], src_addr=%08x[%x]\n",
+ cm_node, event, type, event->cm_info.loc_addr,
+ event->cm_info.loc_port, event->cm_info.rem_addr,
+ event->cm_info.rem_port);
nes_cm_post_event(event);
return event;
@@ -172,14 +195,11 @@ static struct nes_cm_event *create_event(struct nes_cm_node *cm_node,
/**
* send_mpa_request
*/
-static int send_mpa_request(struct nes_cm_node *cm_node)
+static int send_mpa_request(struct nes_cm_node *cm_node, struct sk_buff *skb)
{
- struct sk_buff *skb;
int ret;
-
- skb = get_free_pkt(cm_node);
if (!skb) {
- nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n");
+ nes_debug(NES_DBG_CM, "skb set to NULL\n");
return -1;
}
@@ -188,9 +208,8 @@ static int send_mpa_request(struct nes_cm_node *cm_node)
cm_node->mpa_frame_size, SET_ACK);
ret = schedule_nes_timer(cm_node, skb, NES_TIMER_TYPE_SEND, 1, 0);
- if (ret < 0) {
+ if (ret < 0)
return ret;
- }
return 0;
}
@@ -229,46 +248,12 @@ static int parse_mpa(struct nes_cm_node *cm_node, u8 *buffer, u32 len)
/**
- * handle_exception_pkt - process an exception packet.
- * We have been in a TSA state, and we have now received SW
- * TCP/IP traffic should be a FIN request or IP pkt with options
- */
-static int handle_exception_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb)
-{
- int ret = 0;
- struct tcphdr *tcph = tcp_hdr(skb);
-
- /* first check to see if this a FIN pkt */
- if (tcph->fin) {
- /* we need to ACK the FIN request */
- send_ack(cm_node);
-
- /* check which side we are (client/server) and set next state accordingly */
- if (cm_node->tcp_cntxt.client)
- cm_node->state = NES_CM_STATE_CLOSING;
- else {
- /* we are the server side */
- cm_node->state = NES_CM_STATE_CLOSE_WAIT;
- /* since this is a self contained CM we don't wait for */
- /* an APP to close us, just send final FIN immediately */
- ret = send_fin(cm_node, NULL);
- cm_node->state = NES_CM_STATE_LAST_ACK;
- }
- } else {
- ret = -EINVAL;
- }
-
- return ret;
-}
-
-
-/**
* form_cm_frame - get a free packet and build empty frame Use
* node info to build.
*/
-static struct sk_buff *form_cm_frame(struct sk_buff *skb, struct nes_cm_node *cm_node,
- void *options, u32 optionsize, void *data,
- u32 datasize, u8 flags)
+static struct sk_buff *form_cm_frame(struct sk_buff *skb,
+ struct nes_cm_node *cm_node, void *options, u32 optionsize,
+ void *data, u32 datasize, u8 flags)
{
struct tcphdr *tcph;
struct iphdr *iph;
@@ -332,10 +317,12 @@ static struct sk_buff *form_cm_frame(struct sk_buff *skb, struct nes_cm_node *cm
cm_node->tcp_cntxt.loc_seq_num++;
tcph->syn = 1;
} else
- cm_node->tcp_cntxt.loc_seq_num += datasize; /* data (no headers) */
+ cm_node->tcp_cntxt.loc_seq_num += datasize;
- if (flags & SET_FIN)
+ if (flags & SET_FIN) {
+ cm_node->tcp_cntxt.loc_seq_num++;
tcph->fin = 1;
+ }
if (flags & SET_RST)
tcph->rst = 1;
@@ -389,7 +376,7 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb,
int close_when_complete)
{
unsigned long flags;
- struct nes_cm_core *cm_core;
+ struct nes_cm_core *cm_core = cm_node->cm_core;
struct nes_timer_entry *new_send;
int ret = 0;
u32 was_timer_set;
@@ -411,7 +398,7 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb,
new_send->close_when_complete = close_when_complete;
if (type == NES_TIMER_TYPE_CLOSE) {
- new_send->timetosend += (HZ/2); /* TODO: decide on the correct value here */
+ new_send->timetosend += (HZ/10);
spin_lock_irqsave(&cm_node->recv_list_lock, flags);
list_add_tail(&new_send->list, &cm_node->recv_list);
spin_unlock_irqrestore(&cm_node->recv_list_lock, flags);
@@ -420,36 +407,28 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb,
if (type == NES_TIMER_TYPE_SEND) {
new_send->seq_num = ntohl(tcp_hdr(skb)->seq);
atomic_inc(&new_send->skb->users);
+ spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
+ cm_node->send_entry = new_send;
+ add_ref_cm_node(cm_node);
+ spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
+ new_send->timetosend = jiffies + NES_RETRY_TIMEOUT;
ret = nes_nic_cm_xmit(new_send->skb, cm_node->netdev);
if (ret != NETDEV_TX_OK) {
- nes_debug(NES_DBG_CM, "Error sending packet %p (jiffies = %lu)\n",
- new_send, jiffies);
+ nes_debug(NES_DBG_CM, "Error sending packet %p "
+ "(jiffies = %lu)\n", new_send, jiffies);
atomic_dec(&new_send->skb->users);
new_send->timetosend = jiffies;
} else {
cm_packets_sent++;
if (!send_retrans) {
+ cleanup_retrans_entry(cm_node);
if (close_when_complete)
- rem_ref_cm_node(cm_node->cm_core, cm_node);
- dev_kfree_skb_any(new_send->skb);
- kfree(new_send);
+ rem_ref_cm_node(cm_core, cm_node);
return ret;
}
- new_send->timetosend = jiffies + NES_RETRY_TIMEOUT;
}
- spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
- list_add_tail(&new_send->list, &cm_node->retrans_list);
- spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
- }
- if (type == NES_TIMER_TYPE_RECV) {
- new_send->seq_num = ntohl(tcp_hdr(skb)->seq);
- new_send->timetosend = jiffies;
- spin_lock_irqsave(&cm_node->recv_list_lock, flags);
- list_add_tail(&new_send->list, &cm_node->recv_list);
- spin_unlock_irqrestore(&cm_node->recv_list_lock, flags);
}
- cm_core = cm_node->cm_core;
was_timer_set = timer_pending(&cm_core->tcp_timer);
@@ -476,23 +455,27 @@ static void nes_cm_timer_tick(unsigned long pass)
struct list_head *list_node, *list_node_temp;
struct nes_cm_core *cm_core = g_cm_core;
struct nes_qp *nesqp;
- struct sk_buff *skb;
u32 settimer = 0;
int ret = NETDEV_TX_OK;
- int node_done;
+ enum nes_cm_node_state last_state;
spin_lock_irqsave(&cm_core->ht_lock, flags);
- list_for_each_safe(list_node, list_core_temp, &cm_core->connected_nodes) {
+ list_for_each_safe(list_node, list_core_temp,
+ &cm_core->connected_nodes) {
cm_node = container_of(list_node, struct nes_cm_node, list);
add_ref_cm_node(cm_node);
spin_unlock_irqrestore(&cm_core->ht_lock, flags);
spin_lock_irqsave(&cm_node->recv_list_lock, flags);
- list_for_each_safe(list_core, list_node_temp, &cm_node->recv_list) {
- recv_entry = container_of(list_core, struct nes_timer_entry, list);
- if ((time_after(recv_entry->timetosend, jiffies)) &&
- (recv_entry->type == NES_TIMER_TYPE_CLOSE)) {
- if (nexttimeout > recv_entry->timetosend || !settimer) {
+ list_for_each_safe(list_core, list_node_temp,
+ &cm_node->recv_list) {
+ recv_entry = container_of(list_core,
+ struct nes_timer_entry, list);
+ if (!recv_entry)
+ break;
+ if (time_after(recv_entry->timetosend, jiffies)) {
+ if (nexttimeout > recv_entry->timetosend ||
+ !settimer) {
nexttimeout = recv_entry->timetosend;
settimer = 1;
}
@@ -501,157 +484,143 @@ static void nes_cm_timer_tick(unsigned long pass)
list_del(&recv_entry->list);
cm_id = cm_node->cm_id;
spin_unlock_irqrestore(&cm_node->recv_list_lock, flags);
- if (recv_entry->type == NES_TIMER_TYPE_CLOSE) {
- nesqp = (struct nes_qp *)recv_entry->skb;
- spin_lock_irqsave(&nesqp->lock, qplockflags);
- if (nesqp->cm_id) {
- nes_debug(NES_DBG_CM, "QP%u: cm_id = %p, refcount = %d: "
- "****** HIT A NES_TIMER_TYPE_CLOSE"
- " with something to do!!! ******\n",
- nesqp->hwqp.qp_id, cm_id,
- atomic_read(&nesqp->refcount));
- nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED;
- nesqp->last_aeq = NES_AEQE_AEID_RESET_SENT;
- nesqp->ibqp_state = IB_QPS_ERR;
- spin_unlock_irqrestore(&nesqp->lock, qplockflags);
- nes_cm_disconn(nesqp);
- } else {
- spin_unlock_irqrestore(&nesqp->lock, qplockflags);
- nes_debug(NES_DBG_CM, "QP%u: cm_id = %p, refcount = %d:"
- " ****** HIT A NES_TIMER_TYPE_CLOSE"
- " with nothing to do!!! ******\n",
- nesqp->hwqp.qp_id, cm_id,
- atomic_read(&nesqp->refcount));
- nes_rem_ref(&nesqp->ibqp);
- }
- if (cm_id)
- cm_id->rem_ref(cm_id);
+ nesqp = (struct nes_qp *)recv_entry->skb;
+ spin_lock_irqsave(&nesqp->lock, qplockflags);
+ if (nesqp->cm_id) {
+ nes_debug(NES_DBG_CM, "QP%u: cm_id = %p, "
+ "refcount = %d: HIT A "
+ "NES_TIMER_TYPE_CLOSE with something "
+ "to do!!!\n", nesqp->hwqp.qp_id, cm_id,
+ atomic_read(&nesqp->refcount));
+ nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED;
+ nesqp->last_aeq = NES_AEQE_AEID_RESET_SENT;
+ nesqp->ibqp_state = IB_QPS_ERR;
+ spin_unlock_irqrestore(&nesqp->lock,
+ qplockflags);
+ nes_cm_disconn(nesqp);
+ } else {
+ spin_unlock_irqrestore(&nesqp->lock,
+ qplockflags);
+ nes_debug(NES_DBG_CM, "QP%u: cm_id = %p, "
+ "refcount = %d: HIT A "
+ "NES_TIMER_TYPE_CLOSE with nothing "
+ "to do!!!\n", nesqp->hwqp.qp_id, cm_id,
+ atomic_read(&nesqp->refcount));
}
+ if (cm_id)
+ cm_id->rem_ref(cm_id);
+
kfree(recv_entry);
spin_lock_irqsave(&cm_node->recv_list_lock, flags);
}
spin_unlock_irqrestore(&cm_node->recv_list_lock, flags);
spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
- node_done = 0;
- list_for_each_safe(list_core, list_node_temp, &cm_node->retrans_list) {
- if (node_done) {
- break;
- }
- send_entry = container_of(list_core, struct nes_timer_entry, list);
+ do {
+ send_entry = cm_node->send_entry;
+ if (!send_entry)
+ continue;
if (time_after(send_entry->timetosend, jiffies)) {
if (cm_node->state != NES_CM_STATE_TSA) {
- if ((nexttimeout > send_entry->timetosend) || !settimer) {
- nexttimeout = send_entry->timetosend;
+ if ((nexttimeout >
+ send_entry->timetosend) ||
+ !settimer) {
+ nexttimeout =
+ send_entry->timetosend;
settimer = 1;
+ continue;
}
- node_done = 1;
- continue;
} else {
- list_del(&send_entry->list);
- skb = send_entry->skb;
- spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
- dev_kfree_skb_any(skb);
- kfree(send_entry);
- spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
+ free_retrans_entry(cm_node);
continue;
}
}
- if (send_entry->type == NES_TIMER_NODE_CLEANUP) {
- list_del(&send_entry->list);
- spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
- kfree(send_entry);
- spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
- continue;
- }
- if ((send_entry->seq_num < cm_node->tcp_cntxt.rem_ack_num) ||
- (cm_node->state == NES_CM_STATE_TSA) ||
- (cm_node->state == NES_CM_STATE_CLOSED)) {
- skb = send_entry->skb;
- list_del(&send_entry->list);
- spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
- kfree(send_entry);
- dev_kfree_skb_any(skb);
- spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
+
+ if ((cm_node->state == NES_CM_STATE_TSA) ||
+ (cm_node->state == NES_CM_STATE_CLOSED)) {
+ free_retrans_entry(cm_node);
continue;
}
- if (!send_entry->retranscount || !send_entry->retrycount) {
+ if (!send_entry->retranscount ||
+ !send_entry->retrycount) {
cm_packets_dropped++;
- skb = send_entry->skb;
- list_del(&send_entry->list);
- spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
- dev_kfree_skb_any(skb);
- kfree(send_entry);
- if (cm_node->state == NES_CM_STATE_SYN_RCVD) {
- /* this node never even generated an indication up to the cm */
+ last_state = cm_node->state;
+ cm_node->state = NES_CM_STATE_CLOSED;
+ free_retrans_entry(cm_node);
+ spin_unlock_irqrestore(
+ &cm_node->retrans_list_lock, flags);
+ if (last_state == NES_CM_STATE_SYN_RCVD)
rem_ref_cm_node(cm_core, cm_node);
- } else {
- cm_node->state = NES_CM_STATE_CLOSED;
- create_event(cm_node, NES_CM_EVENT_ABORTED);
- }
- spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
+ else
+ create_event(cm_node,
+ NES_CM_EVENT_ABORTED);
+ spin_lock_irqsave(&cm_node->retrans_list_lock,
+ flags);
continue;
}
- /* this seems like the correct place, but leave send entry unprotected */
- /* spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags); */
atomic_inc(&send_entry->skb->users);
cm_packets_retrans++;
- nes_debug(NES_DBG_CM, "Retransmitting send_entry %p for node %p,"
- " jiffies = %lu, time to send = %lu, retranscount = %u, "
- "send_entry->seq_num = 0x%08X, cm_node->tcp_cntxt.rem_ack_num = 0x%08X\n",
- send_entry, cm_node, jiffies, send_entry->timetosend, send_entry->retranscount,
- send_entry->seq_num, cm_node->tcp_cntxt.rem_ack_num);
-
- spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
+ nes_debug(NES_DBG_CM, "Retransmitting send_entry %p "
+ "for node %p, jiffies = %lu, time to send = "
+ "%lu, retranscount = %u, send_entry->seq_num = "
+ "0x%08X, cm_node->tcp_cntxt.rem_ack_num = "
+ "0x%08X\n", send_entry, cm_node, jiffies,
+ send_entry->timetosend,
+ send_entry->retranscount,
+ send_entry->seq_num,
+ cm_node->tcp_cntxt.rem_ack_num);
+
+ spin_unlock_irqrestore(&cm_node->retrans_list_lock,
+ flags);
ret = nes_nic_cm_xmit(send_entry->skb, cm_node->netdev);
+ spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
if (ret != NETDEV_TX_OK) {
+ nes_debug(NES_DBG_CM, "rexmit failed for "
+ "node=%p\n", cm_node);
cm_packets_bounced++;
atomic_dec(&send_entry->skb->users);
send_entry->retrycount--;
nexttimeout = jiffies + NES_SHORT_TIME;
settimer = 1;
- node_done = 1;
- spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
continue;
} else {
cm_packets_sent++;
}
- spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
- list_del(&send_entry->list);
- nes_debug(NES_DBG_CM, "Packet Sent: retrans count = %u, retry count = %u.\n",
- send_entry->retranscount, send_entry->retrycount);
+ nes_debug(NES_DBG_CM, "Packet Sent: retrans count = "
+ "%u, retry count = %u.\n",
+ send_entry->retranscount,
+ send_entry->retrycount);
if (send_entry->send_retrans) {
send_entry->retranscount--;
- send_entry->timetosend = jiffies + NES_RETRY_TIMEOUT;
- if (nexttimeout > send_entry->timetosend || !settimer) {
+ send_entry->timetosend = jiffies +
+ NES_RETRY_TIMEOUT;
+ if (nexttimeout > send_entry->timetosend ||
+ !settimer) {
nexttimeout = send_entry->timetosend;
settimer = 1;
}
- list_add(&send_entry->list, &cm_node->retrans_list);
- continue;
} else {
int close_when_complete;
- skb = send_entry->skb;
- close_when_complete = send_entry->close_when_complete;
- spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
- if (close_when_complete) {
- BUG_ON(atomic_read(&cm_node->ref_count) == 1);
- rem_ref_cm_node(cm_core, cm_node);
- }
- dev_kfree_skb_any(skb);
- kfree(send_entry);
- spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
- continue;
+ close_when_complete =
+ send_entry->close_when_complete;
+ nes_debug(NES_DBG_CM, "cm_node=%p state=%d\n",
+ cm_node, cm_node->state);
+ free_retrans_entry(cm_node);
+ if (close_when_complete)
+ rem_ref_cm_node(cm_node->cm_core,
+ cm_node);
}
- }
- spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
-
- rem_ref_cm_node(cm_core, cm_node);
+ } while (0);
+ spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
+ rem_ref_cm_node(cm_node->cm_core, cm_node);
spin_lock_irqsave(&cm_core->ht_lock, flags);
- if (ret != NETDEV_TX_OK)
+ if (ret != NETDEV_TX_OK) {
+ nes_debug(NES_DBG_CM, "rexmit failed for cm_node=%p\n",
+ cm_node);
break;
+ }
}
spin_unlock_irqrestore(&cm_core->ht_lock, flags);
@@ -667,14 +636,14 @@ static void nes_cm_timer_tick(unsigned long pass)
/**
* send_syn
*/
-static int send_syn(struct nes_cm_node *cm_node, u32 sendack)
+static int send_syn(struct nes_cm_node *cm_node, u32 sendack,
+ struct sk_buff *skb)
{
int ret;
int flags = SET_SYN;
- struct sk_buff *skb;
char optionsbuffer[sizeof(struct option_mss) +
- sizeof(struct option_windowscale) +
- sizeof(struct option_base) + 1];
+ sizeof(struct option_windowscale) + sizeof(struct option_base) +
+ TCP_OPTIONS_PADDING];
int optionssize = 0;
/* Sending MSS option */
@@ -695,8 +664,7 @@ static int send_syn(struct nes_cm_node *cm_node, u32 sendack)
options->as_windowscale.shiftcount = cm_node->tcp_cntxt.rcv_wscale;
optionssize += sizeof(struct option_windowscale);
- if (sendack && !(NES_DRV_OPT_SUPRESS_OPTION_BC & nes_drv_opt)
- ) {
+ if (sendack && !(NES_DRV_OPT_SUPRESS_OPTION_BC & nes_drv_opt)) {
options = (union all_known_options *)&optionsbuffer[optionssize];
options->as_base.optionnum = OPTION_NUMBER_WRITE0;
options->as_base.length = sizeof(struct option_base);
@@ -714,7 +682,8 @@ static int send_syn(struct nes_cm_node *cm_node, u32 sendack)
options->as_end = OPTION_NUMBER_END;
optionssize += 1;
- skb = get_free_pkt(cm_node);
+ if (!skb)
+ skb = get_free_pkt(cm_node);
if (!skb) {
nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n");
return -1;
@@ -733,18 +702,18 @@ static int send_syn(struct nes_cm_node *cm_node, u32 sendack)
/**
* send_reset
*/
-static int send_reset(struct nes_cm_node *cm_node)
+static int send_reset(struct nes_cm_node *cm_node, struct sk_buff *skb)
{
int ret;
- struct sk_buff *skb = get_free_pkt(cm_node);
int flags = SET_RST | SET_ACK;
+ if (!skb)
+ skb = get_free_pkt(cm_node);
if (!skb) {
nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n");
return -1;
}
- add_ref_cm_node(cm_node);
form_cm_frame(skb, cm_node, NULL, 0, NULL, 0, flags);
ret = schedule_nes_timer(cm_node, skb, NES_TIMER_TYPE_SEND, 0, 1);
@@ -755,10 +724,12 @@ static int send_reset(struct nes_cm_node *cm_node)
/**
* send_ack
*/
-static int send_ack(struct nes_cm_node *cm_node)
+static int send_ack(struct nes_cm_node *cm_node, struct sk_buff *skb)
{
int ret;
- struct sk_buff *skb = get_free_pkt(cm_node);
+
+ if (!skb)
+ skb = get_free_pkt(cm_node);
if (!skb) {
nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n");
@@ -922,7 +893,8 @@ static int add_hte_node(struct nes_cm_core *cm_core, struct nes_cm_node *cm_node
if (!cm_node || !cm_core)
return -EINVAL;
- nes_debug(NES_DBG_CM, "Adding Node to Active Connection HT\n");
+ nes_debug(NES_DBG_CM, "Adding Node %p to Active Connection HT\n",
+ cm_node);
/* first, make an index into our hash table */
hashkey = make_hashkey(cm_node->loc_port, cm_node->loc_addr,
@@ -946,10 +918,35 @@ static int add_hte_node(struct nes_cm_core *cm_core, struct nes_cm_node *cm_node
* mini_cm_dec_refcnt_listen
*/
static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
- struct nes_cm_listener *listener, int free_hanging_nodes)
+ struct nes_cm_listener *listener, int free_hanging_nodes)
{
int ret = 1;
unsigned long flags;
+ struct list_head *list_pos = NULL;
+ struct list_head *list_temp = NULL;
+ struct nes_cm_node *cm_node = NULL;
+
+ nes_debug(NES_DBG_CM, "attempting listener= %p free_nodes= %d, "
+ "refcnt=%d\n", listener, free_hanging_nodes,
+ atomic_read(&listener->ref_count));
+ /* free non-accelerated child nodes for this listener */
+ if (free_hanging_nodes) {
+ spin_lock_irqsave(&cm_core->ht_lock, flags);
+ list_for_each_safe(list_pos, list_temp,
+ &g_cm_core->connected_nodes) {
+ cm_node = container_of(list_pos, struct nes_cm_node,
+ list);
+ if ((cm_node->listener == listener) &&
+ (!cm_node->accelerated)) {
+ cleanup_retrans_entry(cm_node);
+ spin_unlock_irqrestore(&cm_core->ht_lock,
+ flags);
+ send_reset(cm_node, NULL);
+ spin_lock_irqsave(&cm_core->ht_lock, flags);
+ }
+ }
+ spin_unlock_irqrestore(&cm_core->ht_lock, flags);
+ }
spin_lock_irqsave(&cm_core->listen_list_lock, flags);
if (!atomic_dec_return(&listener->ref_count)) {
list_del(&listener->list);
@@ -1067,18 +1064,18 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
cm_node->loc_port = cm_info->loc_port;
cm_node->rem_port = cm_info->rem_port;
cm_node->send_write0 = send_first;
- nes_debug(NES_DBG_CM, "Make node addresses : loc = " NIPQUAD_FMT ":%x, rem = " NIPQUAD_FMT ":%x\n",
- HIPQUAD(cm_node->loc_addr), cm_node->loc_port,
- HIPQUAD(cm_node->rem_addr), cm_node->rem_port);
+ nes_debug(NES_DBG_CM, "Make node addresses : loc = " NIPQUAD_FMT
+ ":%x, rem = " NIPQUAD_FMT ":%x\n",
+ HIPQUAD(cm_node->loc_addr), cm_node->loc_port,
+ HIPQUAD(cm_node->rem_addr), cm_node->rem_port);
cm_node->listener = listener;
cm_node->netdev = nesvnic->netdev;
cm_node->cm_id = cm_info->cm_id;
memcpy(cm_node->loc_mac, nesvnic->netdev->dev_addr, ETH_ALEN);
- nes_debug(NES_DBG_CM, "listener=%p, cm_id=%p\n",
- cm_node->listener, cm_node->cm_id);
+ nes_debug(NES_DBG_CM, "listener=%p, cm_id=%p\n", cm_node->listener,
+ cm_node->cm_id);
- INIT_LIST_HEAD(&cm_node->retrans_list);
spin_lock_init(&cm_node->retrans_list_lock);
INIT_LIST_HEAD(&cm_node->recv_list);
spin_lock_init(&cm_node->recv_list_lock);
@@ -1142,10 +1139,9 @@ static int add_ref_cm_node(struct nes_cm_node *cm_node)
* rem_ref_cm_node - destroy an instance of a cm node
*/
static int rem_ref_cm_node(struct nes_cm_core *cm_core,
- struct nes_cm_node *cm_node)
+ struct nes_cm_node *cm_node)
{
unsigned long flags, qplockflags;
- struct nes_timer_entry *send_entry;
struct nes_timer_entry *recv_entry;
struct iw_cm_id *cm_id;
struct list_head *list_core, *list_node_temp;
@@ -1169,48 +1165,33 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
atomic_dec(&cm_node->listener->pend_accepts_cnt);
BUG_ON(atomic_read(&cm_node->listener->pend_accepts_cnt) < 0);
}
-
- spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
- list_for_each_safe(list_core, list_node_temp, &cm_node->retrans_list) {
- send_entry = container_of(list_core, struct nes_timer_entry, list);
- list_del(&send_entry->list);
- spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
- dev_kfree_skb_any(send_entry->skb);
- kfree(send_entry);
- spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
- continue;
- }
- spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
-
+ BUG_ON(cm_node->send_entry);
spin_lock_irqsave(&cm_node->recv_list_lock, flags);
list_for_each_safe(list_core, list_node_temp, &cm_node->recv_list) {
- recv_entry = container_of(list_core, struct nes_timer_entry, list);
+ recv_entry = container_of(list_core, struct nes_timer_entry,
+ list);
list_del(&recv_entry->list);
cm_id = cm_node->cm_id;
spin_unlock_irqrestore(&cm_node->recv_list_lock, flags);
- if (recv_entry->type == NES_TIMER_TYPE_CLOSE) {
- nesqp = (struct nes_qp *)recv_entry->skb;
- spin_lock_irqsave(&nesqp->lock, qplockflags);
- if (nesqp->cm_id) {
- nes_debug(NES_DBG_CM, "QP%u: cm_id = %p: ****** HIT A NES_TIMER_TYPE_CLOSE"
- " with something to do!!! ******\n",
- nesqp->hwqp.qp_id, cm_id);
- nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED;
- nesqp->last_aeq = NES_AEQE_AEID_RESET_SENT;
- nesqp->ibqp_state = IB_QPS_ERR;
- spin_unlock_irqrestore(&nesqp->lock, qplockflags);
- nes_cm_disconn(nesqp);
- } else {
- spin_unlock_irqrestore(&nesqp->lock, qplockflags);
- nes_debug(NES_DBG_CM, "QP%u: cm_id = %p: ****** HIT A NES_TIMER_TYPE_CLOSE"
- " with nothing to do!!! ******\n",
- nesqp->hwqp.qp_id, cm_id);
- nes_rem_ref(&nesqp->ibqp);
- }
- cm_id->rem_ref(cm_id);
- } else if (recv_entry->type == NES_TIMER_TYPE_RECV) {
- dev_kfree_skb_any(recv_entry->skb);
+ nesqp = (struct nes_qp *)recv_entry->skb;
+ spin_lock_irqsave(&nesqp->lock, qplockflags);
+ if (nesqp->cm_id) {
+ nes_debug(NES_DBG_CM, "QP%u: cm_id = %p: HIT A "
+ "NES_TIMER_TYPE_CLOSE with something to do!\n",
+ nesqp->hwqp.qp_id, cm_id);
+ nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED;
+ nesqp->last_aeq = NES_AEQE_AEID_RESET_SENT;
+ nesqp->ibqp_state = IB_QPS_ERR;
+ spin_unlock_irqrestore(&nesqp->lock, qplockflags);
+ nes_cm_disconn(nesqp);
+ } else {
+ spin_unlock_irqrestore(&nesqp->lock, qplockflags);
+ nes_debug(NES_DBG_CM, "QP%u: cm_id = %p: HIT A "
+ "NES_TIMER_TYPE_CLOSE with nothing to do!\n",
+ nesqp->hwqp.qp_id, cm_id);
}
+ cm_id->rem_ref(cm_id);
+
kfree(recv_entry);
spin_lock_irqsave(&cm_node->recv_list_lock, flags);
}
@@ -1221,23 +1202,31 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
} else {
if (cm_node->apbvt_set && cm_node->nesvnic) {
nes_manage_apbvt(cm_node->nesvnic, cm_node->loc_port,
- PCI_FUNC(cm_node->nesvnic->nesdev->pcidev->devfn),
- NES_MANAGE_APBVT_DEL);
+ PCI_FUNC(
+ cm_node->nesvnic->nesdev->pcidev->devfn),
+ NES_MANAGE_APBVT_DEL);
}
}
- kfree(cm_node);
atomic_dec(&cm_core->node_cnt);
atomic_inc(&cm_nodes_destroyed);
+ nesqp = cm_node->nesqp;
+ if (nesqp) {
+ nesqp->cm_node = NULL;
+ nes_rem_ref(&nesqp->ibqp);
+ cm_node->nesqp = NULL;
+ }
+ cm_node->freed = 1;
+ kfree(cm_node);
return 0;
}
-
/**
* process_options
*/
-static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc, u32 optionsize, u32 syn_packet)
+static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
+ u32 optionsize, u32 syn_packet)
{
u32 tmp;
u32 offset = 0;
@@ -1247,35 +1236,37 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc, u32 opti
while (offset < optionsize) {
all_options = (union all_known_options *)(optionsloc + offset);
switch (all_options->as_base.optionnum) {
- case OPTION_NUMBER_END:
- offset = optionsize;
- break;
- case OPTION_NUMBER_NONE:
- offset += 1;
- continue;
- case OPTION_NUMBER_MSS:
- nes_debug(NES_DBG_CM, "%s: MSS Length: %d Offset: %d Size: %d\n",
- __func__,
- all_options->as_mss.length, offset, optionsize);
- got_mss_option = 1;
- if (all_options->as_mss.length != 4) {
- return 1;
- } else {
- tmp = ntohs(all_options->as_mss.mss);
- if (tmp > 0 && tmp < cm_node->tcp_cntxt.mss)
- cm_node->tcp_cntxt.mss = tmp;
- }
- break;
- case OPTION_NUMBER_WINDOW_SCALE:
- cm_node->tcp_cntxt.snd_wscale = all_options->as_windowscale.shiftcount;
- break;
- case OPTION_NUMBER_WRITE0:
- cm_node->send_write0 = 1;
- break;
- default:
- nes_debug(NES_DBG_CM, "TCP Option not understood: %x\n",
- all_options->as_base.optionnum);
- break;
+ case OPTION_NUMBER_END:
+ offset = optionsize;
+ break;
+ case OPTION_NUMBER_NONE:
+ offset += 1;
+ continue;
+ case OPTION_NUMBER_MSS:
+ nes_debug(NES_DBG_CM, "%s: MSS Length: %d Offset: %d "
+ "Size: %d\n", __func__,
+ all_options->as_mss.length, offset, optionsize);
+ got_mss_option = 1;
+ if (all_options->as_mss.length != 4) {
+ return 1;
+ } else {
+ tmp = ntohs(all_options->as_mss.mss);
+ if (tmp > 0 && tmp <
+ cm_node->tcp_cntxt.mss)
+ cm_node->tcp_cntxt.mss = tmp;
+ }
+ break;
+ case OPTION_NUMBER_WINDOW_SCALE:
+ cm_node->tcp_cntxt.snd_wscale =
+ all_options->as_windowscale.shiftcount;
+ break;
+ case OPTION_NUMBER_WRITE0:
+ cm_node->send_write0 = 1;
+ break;
+ default:
+ nes_debug(NES_DBG_CM, "TCP Option not understood: %x\n",
+ all_options->as_base.optionnum);
+ break;
}
offset += all_options->as_base.length;
}
@@ -1284,300 +1275,491 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc, u32 opti
return 0;
}
+static void drop_packet(struct sk_buff *skb)
+{
+ atomic_inc(&cm_accel_dropped_pkts);
+ dev_kfree_skb_any(skb);
+}
-/**
- * process_packet
- */
-static int process_packet(struct nes_cm_node *cm_node, struct sk_buff *skb,
- struct nes_cm_core *cm_core)
+static void handle_fin_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
+ struct tcphdr *tcph)
{
- int optionsize;
- int datasize;
- int ret = 0;
- struct tcphdr *tcph = tcp_hdr(skb);
- u32 inc_sequence;
- if (cm_node->state == NES_CM_STATE_SYN_SENT && tcph->syn) {
- inc_sequence = ntohl(tcph->seq);
- cm_node->tcp_cntxt.rcv_nxt = inc_sequence;
+ atomic_inc(&cm_resets_recvd);
+ nes_debug(NES_DBG_CM, "Received FIN, cm_node = %p, state = %u. "
+ "refcnt=%d\n", cm_node, cm_node->state,
+ atomic_read(&cm_node->ref_count));
+ cm_node->tcp_cntxt.rcv_nxt++;
+ cleanup_retrans_entry(cm_node);
+ switch (cm_node->state) {
+ case NES_CM_STATE_SYN_RCVD:
+ case NES_CM_STATE_SYN_SENT:
+ case NES_CM_STATE_ESTABLISHED:
+ case NES_CM_STATE_MPAREQ_SENT:
+ cm_node->state = NES_CM_STATE_LAST_ACK;
+ send_fin(cm_node, skb);
+ break;
+ case NES_CM_STATE_FIN_WAIT1:
+ cm_node->state = NES_CM_STATE_CLOSING;
+ send_ack(cm_node, skb);
+ break;
+ case NES_CM_STATE_FIN_WAIT2:
+ cm_node->state = NES_CM_STATE_TIME_WAIT;
+ send_ack(cm_node, skb);
+ cm_node->state = NES_CM_STATE_CLOSED;
+ break;
+ case NES_CM_STATE_TSA:
+ default:
+ nes_debug(NES_DBG_CM, "Error Rcvd FIN for node-%p state = %d\n",
+ cm_node, cm_node->state);
+ drop_packet(skb);
+ break;
}
+}
- if ((!tcph) || (cm_node->state == NES_CM_STATE_TSA)) {
- BUG_ON(!tcph);
- atomic_inc(&cm_accel_dropped_pkts);
- return -1;
- }
- if (tcph->rst) {
- atomic_inc(&cm_resets_recvd);
- nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u. refcnt=%d\n",
- cm_node, cm_node->state, atomic_read(&cm_node->ref_count));
- switch (cm_node->state) {
- case NES_CM_STATE_LISTENING:
- rem_ref_cm_node(cm_core, cm_node);
- break;
- case NES_CM_STATE_TSA:
- case NES_CM_STATE_CLOSED:
- break;
- case NES_CM_STATE_SYN_RCVD:
- nes_debug(NES_DBG_CM, "Received a reset for local 0x%08X:%04X,"
- " remote 0x%08X:%04X, node state = %u\n",
- cm_node->loc_addr, cm_node->loc_port,
- cm_node->rem_addr, cm_node->rem_port,
- cm_node->state);
- rem_ref_cm_node(cm_core, cm_node);
- break;
- case NES_CM_STATE_ONE_SIDE_ESTABLISHED:
- case NES_CM_STATE_ESTABLISHED:
- case NES_CM_STATE_MPAREQ_SENT:
- default:
- nes_debug(NES_DBG_CM, "Received a reset for local 0x%08X:%04X,"
- " remote 0x%08X:%04X, node state = %u refcnt=%d\n",
- cm_node->loc_addr, cm_node->loc_port,
- cm_node->rem_addr, cm_node->rem_port,
- cm_node->state, atomic_read(&cm_node->ref_count));
- /* create event */
- cm_node->state = NES_CM_STATE_CLOSED;
+static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
+ struct tcphdr *tcph)
+{
- create_event(cm_node, NES_CM_EVENT_ABORTED);
- break;
+ int reset = 0; /* whether to send reset in case of err.. */
+ atomic_inc(&cm_resets_recvd);
+ nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
+ " refcnt=%d\n", cm_node, cm_node->state,
+ atomic_read(&cm_node->ref_count));
+ cleanup_retrans_entry(cm_node);
+ switch (cm_node->state) {
+ case NES_CM_STATE_SYN_SENT:
+ case NES_CM_STATE_MPAREQ_SENT:
+ nes_debug(NES_DBG_CM, "%s[%u] create abort for cm_node=%p "
+ "listener=%p state=%d\n", __func__, __LINE__, cm_node,
+ cm_node->listener, cm_node->state);
+ active_open_err(cm_node, skb, reset);
+ break;
+ /* For PASSIVE open states, remove the cm_node event */
+ case NES_CM_STATE_ESTABLISHED:
+ case NES_CM_STATE_SYN_RCVD:
+ case NES_CM_STATE_LISTENING:
+ nes_debug(NES_DBG_CM, "Bad state %s[%u]\n", __func__, __LINE__);
+ passive_open_err(cm_node, skb, reset);
+ break;
+ case NES_CM_STATE_TSA:
+ default:
+ break;
+ }
+}
+static void handle_rcv_mpa(struct nes_cm_node *cm_node, struct sk_buff *skb,
+ enum nes_cm_event_type type)
+{
+
+ int ret;
+ int datasize = skb->len;
+ u8 *dataloc = skb->data;
+ ret = parse_mpa(cm_node, dataloc, datasize);
+ if (ret < 0) {
+ nes_debug(NES_DBG_CM, "didn't like MPA Request\n");
+ if (type == NES_CM_EVENT_CONNECTED) {
+ nes_debug(NES_DBG_CM, "%s[%u] create abort for "
+ "cm_node=%p listener=%p state=%d\n", __func__,
+ __LINE__, cm_node, cm_node->listener,
+ cm_node->state);
+ active_open_err(cm_node, skb, 1);
+ } else {
+ passive_open_err(cm_node, skb, 1);
}
- return -1;
+ } else {
+ cleanup_retrans_entry(cm_node);
+ dev_kfree_skb_any(skb);
+ if (type == NES_CM_EVENT_CONNECTED)
+ cm_node->state = NES_CM_STATE_TSA;
+ create_event(cm_node, type);
+
+ }
+ return ;
+}
+
+static void indicate_pkt_err(struct nes_cm_node *cm_node, struct sk_buff *skb)
+{
+ switch (cm_node->state) {
+ case NES_CM_STATE_SYN_SENT:
+ case NES_CM_STATE_MPAREQ_SENT:
+ nes_debug(NES_DBG_CM, "%s[%u] create abort for cm_node=%p "
+ "listener=%p state=%d\n", __func__, __LINE__, cm_node,
+ cm_node->listener, cm_node->state);
+ active_open_err(cm_node, skb, 1);
+ break;
+ case NES_CM_STATE_ESTABLISHED:
+ case NES_CM_STATE_SYN_RCVD:
+ passive_open_err(cm_node, skb, 1);
+ break;
+ case NES_CM_STATE_TSA:
+ default:
+ drop_packet(skb);
}
+}
+
+static int check_syn(struct nes_cm_node *cm_node, struct tcphdr *tcph,
+ struct sk_buff *skb)
+{
+ int err;
+
+ err = ((ntohl(tcph->ack_seq) == cm_node->tcp_cntxt.loc_seq_num))? 0 : 1;
+ if (err)
+ active_open_err(cm_node, skb, 1);
+
+ return err;
+}
+
+static int check_seq(struct nes_cm_node *cm_node, struct tcphdr *tcph,
+ struct sk_buff *skb)
+{
+ int err = 0;
+ u32 seq;
+ u32 ack_seq;
+ u32 loc_seq_num = cm_node->tcp_cntxt.loc_seq_num;
+ u32 rcv_nxt = cm_node->tcp_cntxt.rcv_nxt;
+ u32 rcv_wnd;
+ seq = ntohl(tcph->seq);
+ ack_seq = ntohl(tcph->ack_seq);
+ rcv_wnd = cm_node->tcp_cntxt.rcv_wnd;
+ if (ack_seq != loc_seq_num)
+ err = 1;
+ else if ((seq + rcv_wnd) < rcv_nxt)
+ err = 1;
+ if (err) {
+ nes_debug(NES_DBG_CM, "%s[%u] create abort for cm_node=%p "
+ "listener=%p state=%d\n", __func__, __LINE__, cm_node,
+ cm_node->listener, cm_node->state);
+ indicate_pkt_err(cm_node, skb);
+ nes_debug(NES_DBG_CM, "seq ERROR cm_node =%p seq=0x%08X "
+ "rcv_nxt=0x%08X rcv_wnd=0x%x\n", cm_node, seq, rcv_nxt,
+ rcv_wnd);
+ }
+ return err;
+}
+
+/*
+ * handle_syn_pkt() is for Passive node. The syn packet is received when a node
+ * is created with a listener or it may comein as rexmitted packet which in
+ * that case will be just dropped.
+ */
+
+static void handle_syn_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
+ struct tcphdr *tcph)
+{
+ int ret;
+ u32 inc_sequence;
+ int optionsize;
optionsize = (tcph->doff << 2) - sizeof(struct tcphdr);
+ skb_pull(skb, tcph->doff << 2);
+ inc_sequence = ntohl(tcph->seq);
- skb_pull(skb, ip_hdr(skb)->ihl << 2);
+ switch (cm_node->state) {
+ case NES_CM_STATE_SYN_SENT:
+ case NES_CM_STATE_MPAREQ_SENT:
+ /* Rcvd syn on active open connection*/
+ active_open_err(cm_node, skb, 1);
+ break;
+ case NES_CM_STATE_LISTENING:
+ /* Passive OPEN */
+ cm_node->accept_pend = 1;
+ atomic_inc(&cm_node->listener->pend_accepts_cnt);
+ if (atomic_read(&cm_node->listener->pend_accepts_cnt) >
+ cm_node->listener->backlog) {
+ nes_debug(NES_DBG_CM, "drop syn due to backlog "
+ "pressure \n");
+ cm_backlog_drops++;
+ passive_open_err(cm_node, skb, 0);
+ break;
+ }
+ ret = handle_tcp_options(cm_node, tcph, skb, optionsize,
+ 1);
+ if (ret) {
+ passive_open_err(cm_node, skb, 0);
+ /* drop pkt */
+ break;
+ }
+ cm_node->tcp_cntxt.rcv_nxt = inc_sequence + 1;
+ BUG_ON(cm_node->send_entry);
+ cm_node->state = NES_CM_STATE_SYN_RCVD;
+ send_syn(cm_node, 1, skb);
+ break;
+ case NES_CM_STATE_TSA:
+ case NES_CM_STATE_ESTABLISHED:
+ case NES_CM_STATE_FIN_WAIT1:
+ case NES_CM_STATE_FIN_WAIT2:
+ case NES_CM_STATE_MPAREQ_RCVD:
+ case NES_CM_STATE_LAST_ACK:
+ case NES_CM_STATE_CLOSING:
+ case NES_CM_STATE_UNKNOWN:
+ case NES_CM_STATE_CLOSED:
+ default:
+ drop_packet(skb);
+ break;
+ }
+}
+
+static void handle_synack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
+ struct tcphdr *tcph)
+{
+
+ int ret;
+ u32 inc_sequence;
+ int optionsize;
+
+ optionsize = (tcph->doff << 2) - sizeof(struct tcphdr);
skb_pull(skb, tcph->doff << 2);
+ inc_sequence = ntohl(tcph->seq);
+ switch (cm_node->state) {
+ case NES_CM_STATE_SYN_SENT:
+ /* active open */
+ if (check_syn(cm_node, tcph, skb))
+ return;
+ cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq);
+ /* setup options */
+ ret = handle_tcp_options(cm_node, tcph, skb, optionsize, 0);
+ if (ret) {
+ nes_debug(NES_DBG_CM, "cm_node=%p tcp_options failed\n",
+ cm_node);
+ break;
+ }
+ cleanup_retrans_entry(cm_node);
+ cm_node->tcp_cntxt.rcv_nxt = inc_sequence + 1;
+ send_mpa_request(cm_node, skb);
+ cm_node->state = NES_CM_STATE_MPAREQ_SENT;
+ break;
+ case NES_CM_STATE_MPAREQ_RCVD:
+ /* passive open, so should not be here */
+ passive_open_err(cm_node, skb, 1);
+ break;
+ case NES_CM_STATE_ESTABLISHED:
+ case NES_CM_STATE_FIN_WAIT1:
+ case NES_CM_STATE_FIN_WAIT2:
+ case NES_CM_STATE_LAST_ACK:
+ case NES_CM_STATE_TSA:
+ case NES_CM_STATE_CLOSING:
+ case NES_CM_STATE_UNKNOWN:
+ case NES_CM_STATE_CLOSED:
+ case NES_CM_STATE_MPAREQ_SENT:
+ default:
+ drop_packet(skb);
+ break;
+ }
+}
- datasize = skb->len;
+static void handle_ack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
+ struct tcphdr *tcph)
+{
+ int datasize = 0;
+ u32 inc_sequence;
+ u32 rem_seq_ack;
+ u32 rem_seq;
+ if (check_seq(cm_node, tcph, skb))
+ return;
+
+ skb_pull(skb, tcph->doff << 2);
inc_sequence = ntohl(tcph->seq);
- nes_debug(NES_DBG_CM, "datasize = %u, sequence = 0x%08X, ack_seq = 0x%08X,"
- " rcv_nxt = 0x%08X Flags: %s %s.\n",
- datasize, inc_sequence, ntohl(tcph->ack_seq),
- cm_node->tcp_cntxt.rcv_nxt, (tcph->syn ? "SYN":""),
- (tcph->ack ? "ACK":""));
-
- if (!tcph->syn && (inc_sequence != cm_node->tcp_cntxt.rcv_nxt)
- ) {
- nes_debug(NES_DBG_CM, "dropping packet, datasize = %u, sequence = 0x%08X,"
- " ack_seq = 0x%08X, rcv_nxt = 0x%08X Flags: %s.\n",
- datasize, inc_sequence, ntohl(tcph->ack_seq),
- cm_node->tcp_cntxt.rcv_nxt, (tcph->ack ? "ACK":""));
- if (cm_node->state == NES_CM_STATE_LISTENING) {
- rem_ref_cm_node(cm_core, cm_node);
+ rem_seq = ntohl(tcph->seq);
+ rem_seq_ack = ntohl(tcph->ack_seq);
+ datasize = skb->len;
+
+ switch (cm_node->state) {
+ case NES_CM_STATE_SYN_RCVD:
+ /* Passive OPEN */
+ cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq);
+ cm_node->state = NES_CM_STATE_ESTABLISHED;
+ if (datasize) {
+ cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
+ cm_node->state = NES_CM_STATE_MPAREQ_RCVD;
+ handle_rcv_mpa(cm_node, skb, NES_CM_EVENT_MPA_REQ);
+ } else { /* rcvd ACK only */
+ dev_kfree_skb_any(skb);
+ cleanup_retrans_entry(cm_node);
+ }
+ break;
+ case NES_CM_STATE_ESTABLISHED:
+ /* Passive OPEN */
+ /* We expect mpa frame to be received only */
+ if (datasize) {
+ cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
+ cm_node->state = NES_CM_STATE_MPAREQ_RCVD;
+ handle_rcv_mpa(cm_node, skb,
+ NES_CM_EVENT_MPA_REQ);
+ } else
+ drop_packet(skb);
+ break;
+ case NES_CM_STATE_MPAREQ_SENT:
+ cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq);
+ if (datasize) {
+ cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
+ handle_rcv_mpa(cm_node, skb, NES_CM_EVENT_CONNECTED);
+ } else { /* Could be just an ack pkt.. */
+ cleanup_retrans_entry(cm_node);
+ dev_kfree_skb_any(skb);
}
- return -1;
+ break;
+ case NES_CM_STATE_FIN_WAIT1:
+ case NES_CM_STATE_SYN_SENT:
+ case NES_CM_STATE_FIN_WAIT2:
+ case NES_CM_STATE_TSA:
+ case NES_CM_STATE_CLOSED:
+ case NES_CM_STATE_MPAREQ_RCVD:
+ case NES_CM_STATE_LAST_ACK:
+ case NES_CM_STATE_CLOSING:
+ case NES_CM_STATE_UNKNOWN:
+ default:
+ drop_packet(skb);
+ break;
}
+}
- cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
+static int handle_tcp_options(struct nes_cm_node *cm_node, struct tcphdr *tcph,
+ struct sk_buff *skb, int optionsize, int passive)
+{
+ u8 *optionsloc = (u8 *)&tcph[1];
if (optionsize) {
- u8 *optionsloc = (u8 *)&tcph[1];
- if (process_options(cm_node, optionsloc, optionsize, (u32)tcph->syn)) {
- nes_debug(NES_DBG_CM, "%s: Node %p, Sending RESET\n", __func__, cm_node);
- send_reset(cm_node);
- if (cm_node->state != NES_CM_STATE_SYN_SENT)
- rem_ref_cm_node(cm_core, cm_node);
- return 0;
+ if (process_options(cm_node, optionsloc, optionsize,
+ (u32)tcph->syn)) {
+ nes_debug(NES_DBG_CM, "%s: Node %p, Sending RESET\n",
+ __func__, cm_node);
+ if (passive)
+ passive_open_err(cm_node, skb, 0);
+ else
+ active_open_err(cm_node, skb, 0);
+ return 1;
}
- } else if (tcph->syn)
- cm_node->tcp_cntxt.mss = NES_CM_DEFAULT_MSS;
+ }
cm_node->tcp_cntxt.snd_wnd = ntohs(tcph->window) <<
cm_node->tcp_cntxt.snd_wscale;
- if (cm_node->tcp_cntxt.snd_wnd > cm_node->tcp_cntxt.max_snd_wnd) {
+ if (cm_node->tcp_cntxt.snd_wnd > cm_node->tcp_cntxt.max_snd_wnd)
cm_node->tcp_cntxt.max_snd_wnd = cm_node->tcp_cntxt.snd_wnd;
- }
+ return 0;
+}
- if (tcph->ack) {
- cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq);
- switch (cm_node->state) {
- case NES_CM_STATE_SYN_RCVD:
- case NES_CM_STATE_SYN_SENT:
- /* read and stash current sequence number */
- if (cm_node->tcp_cntxt.rem_ack_num != cm_node->tcp_cntxt.loc_seq_num) {
- nes_debug(NES_DBG_CM, "ERROR - cm_node->tcp_cntxt.rem_ack_num !="
- " cm_node->tcp_cntxt.loc_seq_num\n");
- send_reset(cm_node);
- return 0;
- }
- if (cm_node->state == NES_CM_STATE_SYN_SENT)
- cm_node->state = NES_CM_STATE_ONE_SIDE_ESTABLISHED;
- else {
- cm_node->state = NES_CM_STATE_ESTABLISHED;
- }
- break;
- case NES_CM_STATE_LAST_ACK:
- cm_node->state = NES_CM_STATE_CLOSED;
- break;
- case NES_CM_STATE_FIN_WAIT1:
- cm_node->state = NES_CM_STATE_FIN_WAIT2;
- break;
- case NES_CM_STATE_CLOSING:
- cm_node->state = NES_CM_STATE_TIME_WAIT;
- /* need to schedule this to happen in 2MSL timeouts */
- cm_node->state = NES_CM_STATE_CLOSED;
- break;
- case NES_CM_STATE_ONE_SIDE_ESTABLISHED:
- case NES_CM_STATE_ESTABLISHED:
- case NES_CM_STATE_MPAREQ_SENT:
- case NES_CM_STATE_CLOSE_WAIT:
- case NES_CM_STATE_TIME_WAIT:
- case NES_CM_STATE_CLOSED:
- break;
- case NES_CM_STATE_LISTENING:
- nes_debug(NES_DBG_CM, "Received an ACK on a listening port (SYN %d)\n", tcph->syn);
- cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq);
- send_reset(cm_node);
- /* send_reset bumps refcount, this should have been a new node */
- rem_ref_cm_node(cm_core, cm_node);
- return -1;
- break;
- case NES_CM_STATE_TSA:
- nes_debug(NES_DBG_CM, "Received a packet with the ack bit set while in TSA state\n");
- break;
- case NES_CM_STATE_UNKNOWN:
- case NES_CM_STATE_INITED:
- case NES_CM_STATE_ACCEPTING:
- case NES_CM_STATE_FIN_WAIT2:
- default:
- nes_debug(NES_DBG_CM, "Received ack from unknown state: %x\n",
- cm_node->state);
- send_reset(cm_node);
- break;
- }
- }
+/*
+ * active_open_err() will send reset() if flag set..
+ * It will also send ABORT event.
+ */
- if (tcph->syn) {
- if (cm_node->state == NES_CM_STATE_LISTENING) {
- /* do not exceed backlog */
- atomic_inc(&cm_node->listener->pend_accepts_cnt);
- if (atomic_read(&cm_node->listener->pend_accepts_cnt) >
- cm_node->listener->backlog) {
- nes_debug(NES_DBG_CM, "drop syn due to backlog pressure \n");
- cm_backlog_drops++;
- atomic_dec(&cm_node->listener->pend_accepts_cnt);
- rem_ref_cm_node(cm_core, cm_node);
- return 0;
- }
- cm_node->accept_pend = 1;
+static void active_open_err(struct nes_cm_node *cm_node, struct sk_buff *skb,
+ int reset)
+{
+ cleanup_retrans_entry(cm_node);
+ if (reset) {
+ nes_debug(NES_DBG_CM, "ERROR active err called for cm_node=%p, "
+ "state=%d\n", cm_node, cm_node->state);
+ add_ref_cm_node(cm_node);
+ send_reset(cm_node, skb);
+ } else
+ dev_kfree_skb_any(skb);
- }
- if (datasize == 0)
- cm_node->tcp_cntxt.rcv_nxt ++;
+ cm_node->state = NES_CM_STATE_CLOSED;
+ create_event(cm_node, NES_CM_EVENT_ABORTED);
+}
- if (cm_node->state == NES_CM_STATE_LISTENING) {
- cm_node->state = NES_CM_STATE_SYN_RCVD;
- send_syn(cm_node, 1);
- }
- if (cm_node->state == NES_CM_STATE_ONE_SIDE_ESTABLISHED) {
- cm_node->state = NES_CM_STATE_ESTABLISHED;
- /* send final handshake ACK */
- ret = send_ack(cm_node);
- if (ret < 0)
- return ret;
+/*
+ * passive_open_err() will either do a reset() or will free up the skb and
+ * remove the cm_node.
+ */
- cm_node->state = NES_CM_STATE_MPAREQ_SENT;
- ret = send_mpa_request(cm_node);
- if (ret < 0)
- return ret;
- }
+static void passive_open_err(struct nes_cm_node *cm_node, struct sk_buff *skb,
+ int reset)
+{
+ cleanup_retrans_entry(cm_node);
+ cm_node->state = NES_CM_STATE_CLOSED;
+ if (reset) {
+ nes_debug(NES_DBG_CM, "passive_open_err sending RST for "
+ "cm_node=%p state =%d\n", cm_node, cm_node->state);
+ send_reset(cm_node, skb);
+ } else {
+ dev_kfree_skb_any(skb);
+ rem_ref_cm_node(cm_node->cm_core, cm_node);
}
+}
- if (tcph->fin) {
- cm_node->tcp_cntxt.rcv_nxt++;
- switch (cm_node->state) {
- case NES_CM_STATE_SYN_RCVD:
- case NES_CM_STATE_SYN_SENT:
- case NES_CM_STATE_ONE_SIDE_ESTABLISHED:
- case NES_CM_STATE_ESTABLISHED:
- case NES_CM_STATE_ACCEPTING:
- case NES_CM_STATE_MPAREQ_SENT:
- cm_node->state = NES_CM_STATE_CLOSE_WAIT;
- cm_node->state = NES_CM_STATE_LAST_ACK;
- ret = send_fin(cm_node, NULL);
- break;
- case NES_CM_STATE_FIN_WAIT1:
- cm_node->state = NES_CM_STATE_CLOSING;
- ret = send_ack(cm_node);
- break;
- case NES_CM_STATE_FIN_WAIT2:
- cm_node->state = NES_CM_STATE_TIME_WAIT;
- cm_node->tcp_cntxt.loc_seq_num ++;
- ret = send_ack(cm_node);
- /* need to schedule this to happen in 2MSL timeouts */
- cm_node->state = NES_CM_STATE_CLOSED;
- break;
- case NES_CM_STATE_CLOSE_WAIT:
- case NES_CM_STATE_LAST_ACK:
- case NES_CM_STATE_CLOSING:
- case NES_CM_STATE_TSA:
- default:
- nes_debug(NES_DBG_CM, "Received a fin while in %x state\n",
- cm_node->state);
- ret = -EINVAL;
- break;
- }
+/*
+ * free_retrans_entry() routines assumes that the retrans_list_lock has
+ * been acquired before calling.
+ */
+static void free_retrans_entry(struct nes_cm_node *cm_node)
+{
+ struct nes_timer_entry *send_entry;
+ send_entry = cm_node->send_entry;
+ if (send_entry) {
+ cm_node->send_entry = NULL;
+ dev_kfree_skb_any(send_entry->skb);
+ kfree(send_entry);
+ rem_ref_cm_node(cm_node->cm_core, cm_node);
}
+}
- if (datasize) {
- u8 *dataloc = skb->data;
- /* figure out what state we are in and handle transition to next state */
- switch (cm_node->state) {
- case NES_CM_STATE_LISTENING:
- case NES_CM_STATE_SYN_RCVD:
- case NES_CM_STATE_SYN_SENT:
- case NES_CM_STATE_FIN_WAIT1:
- case NES_CM_STATE_FIN_WAIT2:
- case NES_CM_STATE_CLOSE_WAIT:
- case NES_CM_STATE_LAST_ACK:
- case NES_CM_STATE_CLOSING:
- break;
- case NES_CM_STATE_MPAREQ_SENT:
- /* recv the mpa res frame, ret=frame len (incl priv data) */
- ret = parse_mpa(cm_node, dataloc, datasize);
- if (ret < 0)
- break;
- /* set the req frame payload len in skb */
- /* we are done handling this state, set node to a TSA state */
- cm_node->state = NES_CM_STATE_TSA;
- send_ack(cm_node);
- create_event(cm_node, NES_CM_EVENT_CONNECTED);
- break;
-
- case NES_CM_STATE_ESTABLISHED:
- /* we are expecting an MPA req frame */
- ret = parse_mpa(cm_node, dataloc, datasize);
- if (ret < 0) {
- break;
- }
- cm_node->state = NES_CM_STATE_TSA;
- send_ack(cm_node);
- /* we got a valid MPA request, create an event */
- create_event(cm_node, NES_CM_EVENT_MPA_REQ);
- break;
- case NES_CM_STATE_TSA:
- handle_exception_pkt(cm_node, skb);
- break;
- case NES_CM_STATE_UNKNOWN:
- case NES_CM_STATE_INITED:
- default:
- ret = -1;
- }
- }
+static void cleanup_retrans_entry(struct nes_cm_node *cm_node)
+{
+ unsigned long flags;
- return ret;
+ spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
+ free_retrans_entry(cm_node);
+ spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
}
+/**
+ * process_packet
+ * Returns skb if to be freed, else it will return NULL if already used..
+ */
+static void process_packet(struct nes_cm_node *cm_node, struct sk_buff *skb,
+ struct nes_cm_core *cm_core)
+{
+ enum nes_tcpip_pkt_type pkt_type = NES_PKT_TYPE_UNKNOWN;
+ struct tcphdr *tcph = tcp_hdr(skb);
+ skb_pull(skb, ip_hdr(skb)->ihl << 2);
+
+ nes_debug(NES_DBG_CM, "process_packet: cm_node=%p state =%d syn=%d "
+ "ack=%d rst=%d fin=%d\n", cm_node, cm_node->state, tcph->syn,
+ tcph->ack, tcph->rst, tcph->fin);
+
+ if (tcph->rst)
+ pkt_type = NES_PKT_TYPE_RST;
+ else if (tcph->syn) {
+ pkt_type = NES_PKT_TYPE_SYN;
+ if (tcph->ack)
+ pkt_type = NES_PKT_TYPE_SYNACK;
+ } else if (tcph->fin)
+ pkt_type = NES_PKT_TYPE_FIN;
+ else if (tcph->ack)
+ pkt_type = NES_PKT_TYPE_ACK;
+
+ switch (pkt_type) {
+ case NES_PKT_TYPE_SYN:
+ handle_syn_pkt(cm_node, skb, tcph);
+ break;
+ case NES_PKT_TYPE_SYNACK:
+ handle_synack_pkt(cm_node, skb, tcph);
+ break;
+ case NES_PKT_TYPE_ACK:
+ handle_ack_pkt(cm_node, skb, tcph);
+ break;
+ case NES_PKT_TYPE_RST:
+ handle_rst_pkt(cm_node, skb, tcph);
+ break;
+ case NES_PKT_TYPE_FIN:
+ handle_fin_pkt(cm_node, skb, tcph);
+ break;
+ default:
+ drop_packet(skb);
+ break;
+ }
+}
/**
* mini_cm_listen - create a listen node with params
*/
static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *cm_core,
- struct nes_vnic *nesvnic, struct nes_cm_info *cm_info)
+ struct nes_vnic *nesvnic, struct nes_cm_info *cm_info)
{
struct nes_cm_listener *listener;
unsigned long flags;
@@ -1644,37 +1826,36 @@ static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *cm_core,
/**
* mini_cm_connect - make a connection node with params
*/
-static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
- struct nes_vnic *nesvnic,
- struct ietf_mpa_frame *mpa_frame,
- struct nes_cm_info *cm_info)
+struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
+ struct nes_vnic *nesvnic, u16 private_data_len,
+ void *private_data, struct nes_cm_info *cm_info)
{
int ret = 0;
struct nes_cm_node *cm_node;
struct nes_cm_listener *loopbackremotelistener;
struct nes_cm_node *loopbackremotenode;
struct nes_cm_info loopback_cm_info;
-
- u16 mpa_frame_size = sizeof(struct ietf_mpa_frame) +
- ntohs(mpa_frame->priv_data_len);
-
- cm_info->loc_addr = htonl(cm_info->loc_addr);
- cm_info->rem_addr = htonl(cm_info->rem_addr);
- cm_info->loc_port = htons(cm_info->loc_port);
- cm_info->rem_port = htons(cm_info->rem_port);
+ u16 mpa_frame_size = sizeof(struct ietf_mpa_frame) + private_data_len;
+ struct ietf_mpa_frame *mpa_frame = NULL;
/* create a CM connection node */
cm_node = make_cm_node(cm_core, nesvnic, cm_info, NULL);
if (!cm_node)
return NULL;
+ mpa_frame = &cm_node->mpa_frame;
+ strcpy(mpa_frame->key, IEFT_MPA_KEY_REQ);
+ mpa_frame->flags = IETF_MPA_FLAGS_CRC;
+ mpa_frame->rev = IETF_MPA_VERSION;
+ mpa_frame->priv_data_len = htons(private_data_len);
/* set our node side to client (active) side */
cm_node->tcp_cntxt.client = 1;
cm_node->tcp_cntxt.rcv_wscale = NES_CM_DEFAULT_RCV_WND_SCALE;
if (cm_info->loc_addr == cm_info->rem_addr) {
- loopbackremotelistener = find_listener(cm_core, cm_node->rem_addr,
- cm_node->rem_port, NES_CM_LISTENER_ACTIVE_STATE);
+ loopbackremotelistener = find_listener(cm_core,
+ ntohl(nesvnic->local_ipaddr), cm_node->rem_port,
+ NES_CM_LISTENER_ACTIVE_STATE);
if (loopbackremotelistener == NULL) {
create_event(cm_node, NES_CM_EVENT_ABORTED);
} else {
@@ -1683,26 +1864,35 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
loopback_cm_info.loc_port = cm_info->rem_port;
loopback_cm_info.rem_port = cm_info->loc_port;
loopback_cm_info.cm_id = loopbackremotelistener->cm_id;
- loopbackremotenode = make_cm_node(cm_core, nesvnic, &loopback_cm_info,
- loopbackremotelistener);
+ loopbackremotenode = make_cm_node(cm_core, nesvnic,
+ &loopback_cm_info, loopbackremotelistener);
loopbackremotenode->loopbackpartner = cm_node;
- loopbackremotenode->tcp_cntxt.rcv_wscale = NES_CM_DEFAULT_RCV_WND_SCALE;
+ loopbackremotenode->tcp_cntxt.rcv_wscale =
+ NES_CM_DEFAULT_RCV_WND_SCALE;
cm_node->loopbackpartner = loopbackremotenode;
- memcpy(loopbackremotenode->mpa_frame_buf, &mpa_frame->priv_data,
- mpa_frame_size);
- loopbackremotenode->mpa_frame_size = mpa_frame_size -
- sizeof(struct ietf_mpa_frame);
+ memcpy(loopbackremotenode->mpa_frame_buf, private_data,
+ private_data_len);
+ loopbackremotenode->mpa_frame_size = private_data_len;
- /* we are done handling this state, set node to a TSA state */
+ /* we are done handling this state. */
+ /* set node to a TSA state */
cm_node->state = NES_CM_STATE_TSA;
- cm_node->tcp_cntxt.rcv_nxt = loopbackremotenode->tcp_cntxt.loc_seq_num;
- loopbackremotenode->tcp_cntxt.rcv_nxt = cm_node->tcp_cntxt.loc_seq_num;
- cm_node->tcp_cntxt.max_snd_wnd = loopbackremotenode->tcp_cntxt.rcv_wnd;
- loopbackremotenode->tcp_cntxt.max_snd_wnd = cm_node->tcp_cntxt.rcv_wnd;
- cm_node->tcp_cntxt.snd_wnd = loopbackremotenode->tcp_cntxt.rcv_wnd;
- loopbackremotenode->tcp_cntxt.snd_wnd = cm_node->tcp_cntxt.rcv_wnd;
- cm_node->tcp_cntxt.snd_wscale = loopbackremotenode->tcp_cntxt.rcv_wscale;
- loopbackremotenode->tcp_cntxt.snd_wscale = cm_node->tcp_cntxt.rcv_wscale;
+ cm_node->tcp_cntxt.rcv_nxt =
+ loopbackremotenode->tcp_cntxt.loc_seq_num;
+ loopbackremotenode->tcp_cntxt.rcv_nxt =
+ cm_node->tcp_cntxt.loc_seq_num;
+ cm_node->tcp_cntxt.max_snd_wnd =
+ loopbackremotenode->tcp_cntxt.rcv_wnd;
+ loopbackremotenode->tcp_cntxt.max_snd_wnd =
+ cm_node->tcp_cntxt.rcv_wnd;
+ cm_node->tcp_cntxt.snd_wnd =
+ loopbackremotenode->tcp_cntxt.rcv_wnd;
+ loopbackremotenode->tcp_cntxt.snd_wnd =
+ cm_node->tcp_cntxt.rcv_wnd;
+ cm_node->tcp_cntxt.snd_wscale =
+ loopbackremotenode->tcp_cntxt.rcv_wscale;
+ loopbackremotenode->tcp_cntxt.snd_wscale =
+ cm_node->tcp_cntxt.rcv_wscale;
create_event(loopbackremotenode, NES_CM_EVENT_MPA_REQ);
}
@@ -1712,16 +1902,29 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
/* set our node side to client (active) side */
cm_node->tcp_cntxt.client = 1;
/* init our MPA frame ptr */
- memcpy(&cm_node->mpa_frame, mpa_frame, mpa_frame_size);
+ memcpy(mpa_frame->priv_data, private_data, private_data_len);
+
cm_node->mpa_frame_size = mpa_frame_size;
/* send a syn and goto syn sent state */
cm_node->state = NES_CM_STATE_SYN_SENT;
- ret = send_syn(cm_node, 0);
+ ret = send_syn(cm_node, 0, NULL);
+
+ if (ret) {
+ /* error in sending the syn free up the cm_node struct */
+ nes_debug(NES_DBG_CM, "Api - connect() FAILED: dest "
+ "addr=0x%08X, port=0x%04x, cm_node=%p, cm_id = %p.\n",
+ cm_node->rem_addr, cm_node->rem_port, cm_node,
+ cm_node->cm_id);
+ rem_ref_cm_node(cm_node->cm_core, cm_node);
+ cm_node = NULL;
+ }
- nes_debug(NES_DBG_CM, "Api - connect(): dest addr=0x%08X, port=0x%04x,"
- " cm_node=%p, cm_id = %p.\n",
- cm_node->rem_addr, cm_node->rem_port, cm_node, cm_node->cm_id);
+ if (cm_node)
+ nes_debug(NES_DBG_CM, "Api - connect(): dest addr=0x%08X,"
+ "port=0x%04x, cm_node=%p, cm_id = %p.\n",
+ cm_node->rem_addr, cm_node->rem_port, cm_node,
+ cm_node->cm_id);
return cm_node;
}
@@ -1731,8 +1934,8 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
* mini_cm_accept - accept a connection
* This function is never called
*/
-static int mini_cm_accept(struct nes_cm_core *cm_core, struct ietf_mpa_frame *mpa_frame,
- struct nes_cm_node *cm_node)
+static int mini_cm_accept(struct nes_cm_core *cm_core,
+ struct ietf_mpa_frame *mpa_frame, struct nes_cm_node *cm_node)
{
return 0;
}
@@ -1742,32 +1945,26 @@ static int mini_cm_accept(struct nes_cm_core *cm_core, struct ietf_mpa_frame *mp
* mini_cm_reject - reject and teardown a connection
*/
static int mini_cm_reject(struct nes_cm_core *cm_core,
- struct ietf_mpa_frame *mpa_frame,
- struct nes_cm_node *cm_node)
+ struct ietf_mpa_frame *mpa_frame, struct nes_cm_node *cm_node)
{
int ret = 0;
- struct sk_buff *skb;
- u16 mpa_frame_size = sizeof(struct ietf_mpa_frame) +
- ntohs(mpa_frame->priv_data_len);
- skb = get_free_pkt(cm_node);
- if (!skb) {
- nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n");
- return -1;
- }
-
- /* send an MPA Request frame */
- form_cm_frame(skb, cm_node, NULL, 0, mpa_frame, mpa_frame_size, SET_ACK | SET_FIN);
- ret = schedule_nes_timer(cm_node, skb, NES_TIMER_TYPE_SEND, 1, 0);
+ nes_debug(NES_DBG_CM, "%s cm_node=%p type=%d state=%d\n",
+ __func__, cm_node, cm_node->tcp_cntxt.client, cm_node->state);
+ if (cm_node->tcp_cntxt.client)
+ return ret;
+ cleanup_retrans_entry(cm_node);
cm_node->state = NES_CM_STATE_CLOSED;
ret = send_fin(cm_node, NULL);
- if (ret < 0) {
- printk(KERN_INFO PFX "failed to send MPA Reply (reject)\n");
- return ret;
+ if (cm_node->accept_pend) {
+ BUG_ON(!cm_node->listener);
+ atomic_dec(&cm_node->listener->pend_accepts_cnt);
+ BUG_ON(atomic_read(&cm_node->listener->pend_accepts_cnt) < 0);
}
+ ret = send_reset(cm_node, NULL);
return ret;
}
@@ -1783,35 +1980,39 @@ static int mini_cm_close(struct nes_cm_core *cm_core, struct nes_cm_node *cm_nod
return -EINVAL;
switch (cm_node->state) {
- /* if passed in node is null, create a reference key node for node search */
- /* check if we found an owner node for this pkt */
- case NES_CM_STATE_SYN_RCVD:
- case NES_CM_STATE_SYN_SENT:
- case NES_CM_STATE_ONE_SIDE_ESTABLISHED:
- case NES_CM_STATE_ESTABLISHED:
- case NES_CM_STATE_ACCEPTING:
- case NES_CM_STATE_MPAREQ_SENT:
- cm_node->state = NES_CM_STATE_FIN_WAIT1;
- send_fin(cm_node, NULL);
- break;
- case NES_CM_STATE_CLOSE_WAIT:
- cm_node->state = NES_CM_STATE_LAST_ACK;
- send_fin(cm_node, NULL);
- break;
- case NES_CM_STATE_FIN_WAIT1:
- case NES_CM_STATE_FIN_WAIT2:
- case NES_CM_STATE_LAST_ACK:
- case NES_CM_STATE_TIME_WAIT:
- case NES_CM_STATE_CLOSING:
- ret = -1;
- break;
- case NES_CM_STATE_LISTENING:
- case NES_CM_STATE_UNKNOWN:
- case NES_CM_STATE_INITED:
- case NES_CM_STATE_CLOSED:
- case NES_CM_STATE_TSA:
- ret = rem_ref_cm_node(cm_core, cm_node);
- break;
+ case NES_CM_STATE_SYN_RCVD:
+ case NES_CM_STATE_SYN_SENT:
+ case NES_CM_STATE_ONE_SIDE_ESTABLISHED:
+ case NES_CM_STATE_ESTABLISHED:
+ case NES_CM_STATE_ACCEPTING:
+ case NES_CM_STATE_MPAREQ_SENT:
+ case NES_CM_STATE_MPAREQ_RCVD:
+ cleanup_retrans_entry(cm_node);
+ send_reset(cm_node, NULL);
+ break;
+ case NES_CM_STATE_CLOSE_WAIT:
+ cm_node->state = NES_CM_STATE_LAST_ACK;
+ send_fin(cm_node, NULL);
+ break;
+ case NES_CM_STATE_FIN_WAIT1:
+ case NES_CM_STATE_FIN_WAIT2:
+ case NES_CM_STATE_LAST_ACK:
+ case NES_CM_STATE_TIME_WAIT:
+ case NES_CM_STATE_CLOSING:
+ ret = -1;
+ break;
+ case NES_CM_STATE_LISTENING:
+ case NES_CM_STATE_UNKNOWN:
+ case NES_CM_STATE_INITED:
+ case NES_CM_STATE_CLOSED:
+ ret = rem_ref_cm_node(cm_core, cm_node);
+ break;
+ case NES_CM_STATE_TSA:
+ if (cm_node->send_entry)
+ printk(KERN_ERR "ERROR Close got called from STATE_TSA "
+ "send_entry=%p\n", cm_node->send_entry);
+ ret = rem_ref_cm_node(cm_core, cm_node);
+ break;
}
cm_node->cm_id = NULL;
return ret;
@@ -1822,25 +2023,30 @@ static int mini_cm_close(struct nes_cm_core *cm_core, struct nes_cm_node *cm_nod
* recv_pkt - recv an ETHERNET packet, and process it through CM
* node state machine
*/
-static int mini_cm_recv_pkt(struct nes_cm_core *cm_core, struct nes_vnic *nesvnic,
- struct sk_buff *skb)
+static void mini_cm_recv_pkt(struct nes_cm_core *cm_core,
+ struct nes_vnic *nesvnic, struct sk_buff *skb)
{
struct nes_cm_node *cm_node = NULL;
struct nes_cm_listener *listener = NULL;
struct iphdr *iph;
struct tcphdr *tcph;
struct nes_cm_info nfo;
- int ret = 0;
- if (!skb || skb->len < sizeof(struct iphdr) + sizeof(struct tcphdr)) {
- ret = -EINVAL;
- goto out;
+ if (!skb)
+ return;
+ if (skb->len < sizeof(struct iphdr) + sizeof(struct tcphdr)) {
+ dev_kfree_skb_any(skb);
+ return;
}
iph = (struct iphdr *)skb->data;
tcph = (struct tcphdr *)(skb->data + sizeof(struct iphdr));
skb_reset_network_header(skb);
skb_set_transport_header(skb, sizeof(*tcph));
+ if (!tcph) {
+ dev_kfree_skb_any(skb);
+ return;
+ }
skb->len = ntohs(iph->tot_len);
nfo.loc_addr = ntohl(iph->daddr);
@@ -1853,61 +2059,60 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core, struct nes_vnic *nesvni
NIPQUAD(iph->daddr), tcph->dest,
NIPQUAD(iph->saddr), tcph->source);
- /* note: this call is going to increment cm_node ref count */
- cm_node = find_node(cm_core,
+ do {
+ cm_node = find_node(cm_core,
nfo.rem_port, nfo.rem_addr,
nfo.loc_port, nfo.loc_addr);
- if (!cm_node) {
- listener = find_listener(cm_core, nfo.loc_addr, nfo.loc_port,
- NES_CM_LISTENER_ACTIVE_STATE);
- if (listener) {
- nfo.cm_id = listener->cm_id;
- nfo.conn_type = listener->conn_type;
- } else {
- nfo.cm_id = NULL;
- nfo.conn_type = 0;
- }
-
- cm_node = make_cm_node(cm_core, nesvnic, &nfo, listener);
if (!cm_node) {
- nes_debug(NES_DBG_CM, "Unable to allocate node\n");
+ /* Only type of packet accepted are for */
+ /* the PASSIVE open (syn only) */
+ if ((!tcph->syn) || (tcph->ack)) {
+ cm_packets_dropped++;
+ break;
+ }
+ listener = find_listener(cm_core, nfo.loc_addr,
+ nfo.loc_port,
+ NES_CM_LISTENER_ACTIVE_STATE);
if (listener) {
- nes_debug(NES_DBG_CM, "unable to allocate node and decrementing listener refcount\n");
+ nfo.cm_id = listener->cm_id;
+ nfo.conn_type = listener->conn_type;
+ } else {
+ nes_debug(NES_DBG_CM, "Unable to find listener "
+ "for the pkt\n");
+ cm_packets_dropped++;
+ dev_kfree_skb_any(skb);
+ break;
+ }
+
+ cm_node = make_cm_node(cm_core, nesvnic, &nfo,
+ listener);
+ if (!cm_node) {
+ nes_debug(NES_DBG_CM, "Unable to allocate "
+ "node\n");
+ cm_packets_dropped++;
atomic_dec(&listener->ref_count);
+ dev_kfree_skb_any(skb);
+ break;
}
- ret = -1;
- goto out;
- }
- if (!listener) {
- nes_debug(NES_DBG_CM, "Packet found for unknown port %x refcnt=%d\n",
- nfo.loc_port, atomic_read(&cm_node->ref_count));
- if (!tcph->rst) {
- nes_debug(NES_DBG_CM, "Packet found for unknown port=%d"
- " rem_port=%d refcnt=%d\n",
- nfo.loc_port, nfo.rem_port, atomic_read(&cm_node->ref_count));
-
- cm_node->tcp_cntxt.rcv_nxt = ntohl(tcph->seq);
- cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq);
- send_reset(cm_node);
+ if (!tcph->rst && !tcph->fin) {
+ cm_node->state = NES_CM_STATE_LISTENING;
+ } else {
+ cm_packets_dropped++;
+ rem_ref_cm_node(cm_core, cm_node);
+ dev_kfree_skb_any(skb);
+ break;
}
+ add_ref_cm_node(cm_node);
+ } else if (cm_node->state == NES_CM_STATE_TSA) {
rem_ref_cm_node(cm_core, cm_node);
- ret = -1;
- goto out;
+ atomic_inc(&cm_accel_dropped_pkts);
+ dev_kfree_skb_any(skb);
+ break;
}
- add_ref_cm_node(cm_node);
- cm_node->state = NES_CM_STATE_LISTENING;
- }
-
- nes_debug(NES_DBG_CM, "Processing Packet for node %p, data = (%p):\n",
- cm_node, skb->data);
- process_packet(cm_node, skb, cm_core);
-
- rem_ref_cm_node(cm_core, cm_node);
- out:
- if (skb)
- dev_kfree_skb_any(skb);
- return ret;
+ process_packet(cm_node, skb, cm_core);
+ rem_ref_cm_node(cm_core, cm_node);
+ } while (0);
}
@@ -2107,15 +2312,12 @@ int nes_cm_disconn(struct nes_qp *nesqp)
if (nesqp->disconn_pending == 0) {
nesqp->disconn_pending++;
spin_unlock_irqrestore(&nesqp->lock, flags);
- /* nes_add_ref(&nesqp->ibqp); */
/* init our disconnect work element, to */
INIT_WORK(&nesqp->disconn_work, nes_disconnect_worker);
queue_work(g_cm_core->disconn_wq, &nesqp->disconn_work);
- } else {
+ } else
spin_unlock_irqrestore(&nesqp->lock, flags);
- nes_rem_ref(&nesqp->ibqp);
- }
return 0;
}
@@ -2161,7 +2363,6 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
nes_debug(NES_DBG_CM, "QP%u disconnect_worker cmid is NULL\n",
nesqp->hwqp.qp_id);
spin_unlock_irqrestore(&nesqp->lock, flags);
- nes_rem_ref(&nesqp->ibqp);
return -1;
}
@@ -2182,30 +2383,31 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
atomic_inc(&cm_disconnects);
cm_event.event = IW_CM_EVENT_DISCONNECT;
if (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET) {
- issued_disconnect_reset = 1;
cm_event.status = IW_CM_EVENT_STATUS_RESET;
- nes_debug(NES_DBG_CM, "Generating a CM Disconnect Event (status reset) for "
- " QP%u, cm_id = %p. \n",
- nesqp->hwqp.qp_id, cm_id);
- } else {
+ nes_debug(NES_DBG_CM, "Generating a CM "
+ "Disconnect Event (status reset) for "
+ "QP%u, cm_id = %p. \n",
+ nesqp->hwqp.qp_id, cm_id);
+ } else
cm_event.status = IW_CM_EVENT_STATUS_OK;
- }
cm_event.local_addr = cm_id->local_addr;
cm_event.remote_addr = cm_id->remote_addr;
cm_event.private_data = NULL;
cm_event.private_data_len = 0;
- nes_debug(NES_DBG_CM, "Generating a CM Disconnect Event for "
- " QP%u, SQ Head = %u, SQ Tail = %u. cm_id = %p, refcount = %u.\n",
- nesqp->hwqp.qp_id,
- nesqp->hwqp.sq_head, nesqp->hwqp.sq_tail, cm_id,
- atomic_read(&nesqp->refcount));
+ nes_debug(NES_DBG_CM, "Generating a CM Disconnect Event"
+ " for QP%u, SQ Head = %u, SQ Tail = %u. "
+ "cm_id = %p, refcount = %u.\n",
+ nesqp->hwqp.qp_id, nesqp->hwqp.sq_head,
+ nesqp->hwqp.sq_tail, cm_id,
+ atomic_read(&nesqp->refcount));
spin_unlock_irqrestore(&nesqp->lock, flags);
ret = cm_id->event_handler(cm_id, &cm_event);
if (ret)
- nes_debug(NES_DBG_CM, "OFA CM event_handler returned, ret=%d\n", ret);
+ nes_debug(NES_DBG_CM, "OFA CM event_handler "
+ "returned, ret=%d\n", ret);
spin_lock_irqsave(&nesqp->lock, flags);
}
@@ -2247,31 +2449,24 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
if (nesqp->flush_issued == 0) {
nesqp->flush_issued = 1;
spin_unlock_irqrestore(&nesqp->lock, flags);
- flush_wqes(nesvnic->nesdev, nesqp, NES_CQP_FLUSH_RQ, 1);
- } else {
+ flush_wqes(nesvnic->nesdev, nesqp,
+ NES_CQP_FLUSH_RQ, 1);
+ } else
spin_unlock_irqrestore(&nesqp->lock, flags);
- }
-
- /* This reference is from either ModifyQP or the AE processing,
- there is still a race here with modifyqp */
- nes_rem_ref(&nesqp->ibqp);
-
} else {
cm_id = nesqp->cm_id;
spin_unlock_irqrestore(&nesqp->lock, flags);
/* check to see if the inbound reset beat the outbound reset */
if ((!cm_id) && (last_ae==NES_AEQE_AEID_RESET_SENT)) {
- nes_debug(NES_DBG_CM, "QP%u: Decing refcount due to inbound reset"
- " beating the outbound reset.\n",
- nesqp->hwqp.qp_id);
- nes_rem_ref(&nesqp->ibqp);
+ nes_debug(NES_DBG_CM, "QP%u: Decing refcount "
+ "due to inbound reset beating the "
+ "outbound reset.\n", nesqp->hwqp.qp_id);
}
}
} else {
nesqp->disconn_pending = 0;
spin_unlock_irqrestore(&nesqp->lock, flags);
}
- nes_rem_ref(&nesqp->ibqp);
return 0;
}
@@ -2349,71 +2544,82 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
nesdev = nesvnic->nesdev;
adapter = nesdev->nesadapter;
- nes_debug(NES_DBG_CM, "nesvnic=%p, netdev=%p, %s\n",
- nesvnic, nesvnic->netdev, nesvnic->netdev->name);
-
- /* since this is from a listen, we were able to put node handle into cm_id */
cm_node = (struct nes_cm_node *)cm_id->provider_data;
+ nes_debug(NES_DBG_CM, "nes_accept: cm_node= %p nesvnic=%p, netdev=%p,"
+ "%s\n", cm_node, nesvnic, nesvnic->netdev,
+ nesvnic->netdev->name);
/* associate the node with the QP */
nesqp->cm_node = (void *)cm_node;
+ cm_node->nesqp = nesqp;
+ nes_add_ref(&nesqp->ibqp);
- nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu\n",
- nesqp->hwqp.qp_id, cm_node, jiffies);
+ nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
+ nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
atomic_inc(&cm_accepts);
nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
atomic_read(&nesvnic->netdev->refcnt));
- /* allocate the ietf frame and space for private data */
- nesqp->ietf_frame = pci_alloc_consistent(nesdev->pcidev,
- sizeof(struct ietf_mpa_frame) + conn_param->private_data_len,
- &nesqp->ietf_frame_pbase);
-
- if (!nesqp->ietf_frame) {
- nes_debug(NES_DBG_CM, "Unable to allocate memory for private data\n");
- return -ENOMEM;
- }
+ /* allocate the ietf frame and space for private data */
+ nesqp->ietf_frame = pci_alloc_consistent(nesdev->pcidev,
+ sizeof(struct ietf_mpa_frame) + conn_param->private_data_len,
+ &nesqp->ietf_frame_pbase);
+ if (!nesqp->ietf_frame) {
+ nes_debug(NES_DBG_CM, "Unable to allocate memory for private "
+ "data\n");
+ return -ENOMEM;
+ }
- /* setup the MPA frame */
- nesqp->private_data_len = conn_param->private_data_len;
- memcpy(nesqp->ietf_frame->key, IEFT_MPA_KEY_REP, IETF_MPA_KEY_SIZE);
- memcpy(nesqp->ietf_frame->priv_data, conn_param->private_data,
- conn_param->private_data_len);
+ /* setup the MPA frame */
+ nesqp->private_data_len = conn_param->private_data_len;
+ memcpy(nesqp->ietf_frame->key, IEFT_MPA_KEY_REP, IETF_MPA_KEY_SIZE);
- nesqp->ietf_frame->priv_data_len = cpu_to_be16(conn_param->private_data_len);
- nesqp->ietf_frame->rev = mpa_version;
- nesqp->ietf_frame->flags = IETF_MPA_FLAGS_CRC;
+ memcpy(nesqp->ietf_frame->priv_data, conn_param->private_data,
+ conn_param->private_data_len);
- /* setup our first outgoing iWarp send WQE (the IETF frame response) */
- wqe = &nesqp->hwqp.sq_vbase[0];
+ nesqp->ietf_frame->priv_data_len =
+ cpu_to_be16(conn_param->private_data_len);
+ nesqp->ietf_frame->rev = mpa_version;
+ nesqp->ietf_frame->flags = IETF_MPA_FLAGS_CRC;
- if (cm_id->remote_addr.sin_addr.s_addr != cm_id->local_addr.sin_addr.s_addr) {
- u64temp = (unsigned long)nesqp;
- u64temp |= NES_SW_CONTEXT_ALIGN>>1;
- set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_COMP_CTX_LOW_IDX,
- u64temp);
- wqe->wqe_words[NES_IWARP_SQ_WQE_MISC_IDX] =
- cpu_to_le32(NES_IWARP_SQ_WQE_STREAMING | NES_IWARP_SQ_WQE_WRPDU);
- wqe->wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX] =
- cpu_to_le32(conn_param->private_data_len + sizeof(struct ietf_mpa_frame));
- wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_LOW_IDX] =
- cpu_to_le32((u32)nesqp->ietf_frame_pbase);
- wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_HIGH_IDX] =
- cpu_to_le32((u32)((u64)nesqp->ietf_frame_pbase >> 32));
- wqe->wqe_words[NES_IWARP_SQ_WQE_LENGTH0_IDX] =
- cpu_to_le32(conn_param->private_data_len + sizeof(struct ietf_mpa_frame));
- wqe->wqe_words[NES_IWARP_SQ_WQE_STAG0_IDX] = 0;
-
- nesqp->nesqp_context->ird_ord_sizes |= cpu_to_le32(
- NES_QPCONTEXT_ORDIRD_LSMM_PRESENT | NES_QPCONTEXT_ORDIRD_WRPDU);
- } else {
- nesqp->nesqp_context->ird_ord_sizes |= cpu_to_le32((NES_QPCONTEXT_ORDIRD_LSMM_PRESENT |
- NES_QPCONTEXT_ORDIRD_WRPDU | NES_QPCONTEXT_ORDIRD_ALSMM));
- }
- nesqp->skip_lsmm = 1;
+ /* setup our first outgoing iWarp send WQE (the IETF frame response) */
+ wqe = &nesqp->hwqp.sq_vbase[0];
+
+ if (cm_id->remote_addr.sin_addr.s_addr !=
+ cm_id->local_addr.sin_addr.s_addr) {
+ u64temp = (unsigned long)nesqp;
+ u64temp |= NES_SW_CONTEXT_ALIGN>>1;
+ set_wqe_64bit_value(wqe->wqe_words,
+ NES_IWARP_SQ_WQE_COMP_CTX_LOW_IDX,
+ u64temp);
+ wqe->wqe_words[NES_IWARP_SQ_WQE_MISC_IDX] =
+ cpu_to_le32(NES_IWARP_SQ_WQE_STREAMING |
+ NES_IWARP_SQ_WQE_WRPDU);
+ wqe->wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX] =
+ cpu_to_le32(conn_param->private_data_len +
+ sizeof(struct ietf_mpa_frame));
+ wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_LOW_IDX] =
+ cpu_to_le32((u32)nesqp->ietf_frame_pbase);
+ wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_HIGH_IDX] =
+ cpu_to_le32((u32)((u64)nesqp->ietf_frame_pbase >> 32));
+ wqe->wqe_words[NES_IWARP_SQ_WQE_LENGTH0_IDX] =
+ cpu_to_le32(conn_param->private_data_len +
+ sizeof(struct ietf_mpa_frame));
+ wqe->wqe_words[NES_IWARP_SQ_WQE_STAG0_IDX] = 0;
+
+ nesqp->nesqp_context->ird_ord_sizes |=
+ cpu_to_le32(NES_QPCONTEXT_ORDIRD_LSMM_PRESENT |
+ NES_QPCONTEXT_ORDIRD_WRPDU);
+ } else {
+ nesqp->nesqp_context->ird_ord_sizes |=
+ cpu_to_le32((NES_QPCONTEXT_ORDIRD_LSMM_PRESENT |
+ NES_QPCONTEXT_ORDIRD_WRPDU |
+ NES_QPCONTEXT_ORDIRD_ALSMM));
+ }
+ nesqp->skip_lsmm = 1;
/* Cache the cm_id in the qp */
@@ -2424,55 +2630,75 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
cm_id->provider_data = nesqp;
nesqp->active_conn = 0;
+ if (cm_node->state == NES_CM_STATE_TSA)
+ nes_debug(NES_DBG_CM, "Already state = TSA for cm_node=%p\n",
+ cm_node);
+
nes_cm_init_tsa_conn(nesqp, cm_node);
- nesqp->nesqp_context->tcpPorts[0] = cpu_to_le16(ntohs(cm_id->local_addr.sin_port));
- nesqp->nesqp_context->tcpPorts[1] = cpu_to_le16(ntohs(cm_id->remote_addr.sin_port));
- nesqp->nesqp_context->ip0 = cpu_to_le32(ntohl(cm_id->remote_addr.sin_addr.s_addr));
+ nesqp->nesqp_context->tcpPorts[0] =
+ cpu_to_le16(ntohs(cm_id->local_addr.sin_port));
+ nesqp->nesqp_context->tcpPorts[1] =
+ cpu_to_le16(ntohs(cm_id->remote_addr.sin_port));
+
+ if (ipv4_is_loopback(cm_id->remote_addr.sin_addr.s_addr))
+ nesqp->nesqp_context->ip0 =
+ cpu_to_le32(ntohl(nesvnic->local_ipaddr));
+ else
+ nesqp->nesqp_context->ip0 =
+ cpu_to_le32(ntohl(cm_id->remote_addr.sin_addr.s_addr));
nesqp->nesqp_context->misc2 |= cpu_to_le32(
- (u32)PCI_FUNC(nesdev->pcidev->devfn) << NES_QPCONTEXT_MISC2_SRC_IP_SHIFT);
+ (u32)PCI_FUNC(nesdev->pcidev->devfn) <<
+ NES_QPCONTEXT_MISC2_SRC_IP_SHIFT);
- nesqp->nesqp_context->arp_index_vlan |= cpu_to_le32(
- nes_arp_table(nesdev, le32_to_cpu(nesqp->nesqp_context->ip0), NULL,
+ nesqp->nesqp_context->arp_index_vlan |=
+ cpu_to_le32(nes_arp_table(nesdev,
+ le32_to_cpu(nesqp->nesqp_context->ip0), NULL,
NES_ARP_RESOLVE) << 16);
nesqp->nesqp_context->ts_val_delta = cpu_to_le32(
- jiffies - nes_read_indexed(nesdev, NES_IDX_TCP_NOW));
+ jiffies - nes_read_indexed(nesdev, NES_IDX_TCP_NOW));
nesqp->nesqp_context->ird_index = cpu_to_le32(nesqp->hwqp.qp_id);
nesqp->nesqp_context->ird_ord_sizes |= cpu_to_le32(
- ((u32)1 << NES_QPCONTEXT_ORDIRD_IWARP_MODE_SHIFT));
- nesqp->nesqp_context->ird_ord_sizes |= cpu_to_le32((u32)conn_param->ord);
+ ((u32)1 << NES_QPCONTEXT_ORDIRD_IWARP_MODE_SHIFT));
+ nesqp->nesqp_context->ird_ord_sizes |=
+ cpu_to_le32((u32)conn_param->ord);
memset(&nes_quad, 0, sizeof(nes_quad));
- nes_quad.DstIpAdrIndex = cpu_to_le32((u32)PCI_FUNC(nesdev->pcidev->devfn) << 24);
- nes_quad.SrcIpadr = cm_id->remote_addr.sin_addr.s_addr;
- nes_quad.TcpPorts[0] = cm_id->remote_addr.sin_port;
- nes_quad.TcpPorts[1] = cm_id->local_addr.sin_port;
+ nes_quad.DstIpAdrIndex =
+ cpu_to_le32((u32)PCI_FUNC(nesdev->pcidev->devfn) << 24);
+ if (ipv4_is_loopback(cm_id->remote_addr.sin_addr.s_addr))
+ nes_quad.SrcIpadr = nesvnic->local_ipaddr;
+ else
+ nes_quad.SrcIpadr = cm_id->remote_addr.sin_addr.s_addr;
+ nes_quad.TcpPorts[0] = cm_id->remote_addr.sin_port;
+ nes_quad.TcpPorts[1] = cm_id->local_addr.sin_port;
/* Produce hash key */
crc_value = get_crc_value(&nes_quad);
nesqp->hte_index = cpu_to_be32(crc_value ^ 0xffffffff);
nes_debug(NES_DBG_CM, "HTE Index = 0x%08X, CRC = 0x%08X\n",
- nesqp->hte_index, nesqp->hte_index & adapter->hte_index_mask);
+ nesqp->hte_index, nesqp->hte_index & adapter->hte_index_mask);
nesqp->hte_index &= adapter->hte_index_mask;
nesqp->nesqp_context->hte_index = cpu_to_le32(nesqp->hte_index);
cm_node->cm_core->api->accelerated(cm_node->cm_core, cm_node);
- nes_debug(NES_DBG_CM, "QP%u, Destination IP = 0x%08X:0x%04X, local = 0x%08X:0x%04X,"
- " rcv_nxt=0x%08X, snd_nxt=0x%08X, mpa + private data length=%zu.\n",
- nesqp->hwqp.qp_id,
+ nes_debug(NES_DBG_CM, "QP%u, Destination IP = 0x%08X:0x%04X, local = "
+ "0x%08X:0x%04X, rcv_nxt=0x%08X, snd_nxt=0x%08X, mpa + "
+ "private data length=%zu.\n", nesqp->hwqp.qp_id,
ntohl(cm_id->remote_addr.sin_addr.s_addr),
ntohs(cm_id->remote_addr.sin_port),
ntohl(cm_id->local_addr.sin_addr.s_addr),
ntohs(cm_id->local_addr.sin_port),
le32_to_cpu(nesqp->nesqp_context->rcv_nxt),
le32_to_cpu(nesqp->nesqp_context->snd_nxt),
- conn_param->private_data_len+sizeof(struct ietf_mpa_frame));
+ conn_param->private_data_len +
+ sizeof(struct ietf_mpa_frame));
attr.qp_state = IB_QPS_RTS;
nes_modify_qp(&nesqp->ibqp, &attr, IB_QP_STATE, NULL);
@@ -2489,15 +2715,16 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
cm_event.private_data_len = 0;
ret = cm_id->event_handler(cm_id, &cm_event);
if (cm_node->loopbackpartner) {
- cm_node->loopbackpartner->mpa_frame_size = nesqp->private_data_len;
+ cm_node->loopbackpartner->mpa_frame_size =
+ nesqp->private_data_len;
/* copy entire MPA frame to our cm_node's frame */
- memcpy(cm_node->loopbackpartner->mpa_frame_buf, nesqp->ietf_frame->priv_data,
- nesqp->private_data_len);
+ memcpy(cm_node->loopbackpartner->mpa_frame_buf,
+ nesqp->ietf_frame->priv_data, nesqp->private_data_len);
create_event(cm_node->loopbackpartner, NES_CM_EVENT_CONNECTED);
}
if (ret)
- printk("%s[%u] OFA CM event_handler returned, ret=%d\n",
- __func__, __LINE__, ret);
+ printk(KERN_ERR "%s[%u] OFA CM event_handler returned, "
+ "ret=%d\n", __func__, __LINE__, ret);
return 0;
}
@@ -2555,74 +2782,61 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
if (!nesdev)
return -EINVAL;
- atomic_inc(&cm_connects);
-
- nesqp->ietf_frame = kzalloc(sizeof(struct ietf_mpa_frame) +
- conn_param->private_data_len, GFP_KERNEL);
- if (!nesqp->ietf_frame)
- return -ENOMEM;
+ nes_debug(NES_DBG_CM, "QP%u, current IP = 0x%08X, Destination IP = "
+ "0x%08X:0x%04X, local = 0x%08X:0x%04X.\n", nesqp->hwqp.qp_id,
+ ntohl(nesvnic->local_ipaddr),
+ ntohl(cm_id->remote_addr.sin_addr.s_addr),
+ ntohs(cm_id->remote_addr.sin_port),
+ ntohl(cm_id->local_addr.sin_addr.s_addr),
+ ntohs(cm_id->local_addr.sin_port));
- /* set qp as having an active connection */
+ atomic_inc(&cm_connects);
nesqp->active_conn = 1;
- nes_debug(NES_DBG_CM, "QP%u, Destination IP = 0x%08X:0x%04X, local = 0x%08X:0x%04X.\n",
- nesqp->hwqp.qp_id,
- ntohl(cm_id->remote_addr.sin_addr.s_addr),
- ntohs(cm_id->remote_addr.sin_port),
- ntohl(cm_id->local_addr.sin_addr.s_addr),
- ntohs(cm_id->local_addr.sin_port));
-
/* cache the cm_id in the qp */
nesqp->cm_id = cm_id;
cm_id->provider_data = nesqp;
- /* copy the private data */
- if (conn_param->private_data_len) {
- memcpy(nesqp->ietf_frame->priv_data, conn_param->private_data,
- conn_param->private_data_len);
- }
-
nesqp->private_data_len = conn_param->private_data_len;
nesqp->nesqp_context->ird_ord_sizes |= cpu_to_le32((u32)conn_param->ord);
nes_debug(NES_DBG_CM, "requested ord = 0x%08X.\n", (u32)conn_param->ord);
- nes_debug(NES_DBG_CM, "mpa private data len =%u\n", conn_param->private_data_len);
-
- strcpy(&nesqp->ietf_frame->key[0], IEFT_MPA_KEY_REQ);
- nesqp->ietf_frame->flags = IETF_MPA_FLAGS_CRC;
- nesqp->ietf_frame->rev = IETF_MPA_VERSION;
- nesqp->ietf_frame->priv_data_len = htons(conn_param->private_data_len);
+ nes_debug(NES_DBG_CM, "mpa private data len =%u\n",
+ conn_param->private_data_len);
- if (cm_id->local_addr.sin_addr.s_addr != cm_id->remote_addr.sin_addr.s_addr)
+ if (cm_id->local_addr.sin_addr.s_addr !=
+ cm_id->remote_addr.sin_addr.s_addr)
nes_manage_apbvt(nesvnic, ntohs(cm_id->local_addr.sin_port),
- PCI_FUNC(nesdev->pcidev->devfn), NES_MANAGE_APBVT_ADD);
+ PCI_FUNC(nesdev->pcidev->devfn), NES_MANAGE_APBVT_ADD);
/* set up the connection params for the node */
- cm_info.loc_addr = (cm_id->local_addr.sin_addr.s_addr);
- cm_info.loc_port = (cm_id->local_addr.sin_port);
- cm_info.rem_addr = (cm_id->remote_addr.sin_addr.s_addr);
- cm_info.rem_port = (cm_id->remote_addr.sin_port);
+ cm_info.loc_addr = htonl(cm_id->local_addr.sin_addr.s_addr);
+ cm_info.loc_port = htons(cm_id->local_addr.sin_port);
+ cm_info.rem_addr = htonl(cm_id->remote_addr.sin_addr.s_addr);
+ cm_info.rem_port = htons(cm_id->remote_addr.sin_port);
cm_info.cm_id = cm_id;
cm_info.conn_type = NES_CM_IWARP_CONN_TYPE;
cm_id->add_ref(cm_id);
- nes_add_ref(&nesqp->ibqp);
/* create a connect CM node connection */
- cm_node = g_cm_core->api->connect(g_cm_core, nesvnic, nesqp->ietf_frame, &cm_info);
+ cm_node = g_cm_core->api->connect(g_cm_core, nesvnic,
+ conn_param->private_data_len, (void *)conn_param->private_data,
+ &cm_info);
if (!cm_node) {
- if (cm_id->local_addr.sin_addr.s_addr != cm_id->remote_addr.sin_addr.s_addr)
+ if (cm_id->local_addr.sin_addr.s_addr !=
+ cm_id->remote_addr.sin_addr.s_addr)
nes_manage_apbvt(nesvnic, ntohs(cm_id->local_addr.sin_port),
- PCI_FUNC(nesdev->pcidev->devfn), NES_MANAGE_APBVT_DEL);
- nes_rem_ref(&nesqp->ibqp);
- kfree(nesqp->ietf_frame);
- nesqp->ietf_frame = NULL;
+ PCI_FUNC(nesdev->pcidev->devfn),
+ NES_MANAGE_APBVT_DEL);
+
cm_id->rem_ref(cm_id);
return -ENOMEM;
}
cm_node->apbvt_set = 1;
nesqp->cm_node = cm_node;
+ cm_node->nesqp = nesqp;
return 0;
}
@@ -2664,7 +2878,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
cm_node = g_cm_core->api->listen(g_cm_core, nesvnic, &cm_info);
if (!cm_node) {
- printk("%s[%u] Error returned from listen API call\n",
+ printk(KERN_ERR "%s[%u] Error returned from listen API call\n",
__func__, __LINE__);
return -ENOMEM;
}
@@ -2672,10 +2886,13 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
cm_id->provider_data = cm_node;
if (!cm_node->reused_node) {
- err = nes_manage_apbvt(nesvnic, ntohs(cm_id->local_addr.sin_port),
- PCI_FUNC(nesvnic->nesdev->pcidev->devfn), NES_MANAGE_APBVT_ADD);
+ err = nes_manage_apbvt(nesvnic,
+ ntohs(cm_id->local_addr.sin_port),
+ PCI_FUNC(nesvnic->nesdev->pcidev->devfn),
+ NES_MANAGE_APBVT_ADD);
if (err) {
- printk("nes_manage_apbvt call returned %d.\n", err);
+ printk(KERN_ERR "nes_manage_apbvt call returned %d.\n",
+ err);
g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
return err;
}
@@ -2795,53 +3012,70 @@ static void cm_event_connected(struct nes_cm_event *event)
nes_cm_init_tsa_conn(nesqp, cm_node);
/* set the QP tsa context */
- nesqp->nesqp_context->tcpPorts[0] = cpu_to_le16(ntohs(cm_id->local_addr.sin_port));
- nesqp->nesqp_context->tcpPorts[1] = cpu_to_le16(ntohs(cm_id->remote_addr.sin_port));
- nesqp->nesqp_context->ip0 = cpu_to_le32(ntohl(cm_id->remote_addr.sin_addr.s_addr));
+ nesqp->nesqp_context->tcpPorts[0] =
+ cpu_to_le16(ntohs(cm_id->local_addr.sin_port));
+ nesqp->nesqp_context->tcpPorts[1] =
+ cpu_to_le16(ntohs(cm_id->remote_addr.sin_port));
+ if (ipv4_is_loopback(cm_id->remote_addr.sin_addr.s_addr))
+ nesqp->nesqp_context->ip0 =
+ cpu_to_le32(ntohl(nesvnic->local_ipaddr));
+ else
+ nesqp->nesqp_context->ip0 =
+ cpu_to_le32(ntohl(cm_id->remote_addr.sin_addr.s_addr));
nesqp->nesqp_context->misc2 |= cpu_to_le32(
- (u32)PCI_FUNC(nesdev->pcidev->devfn) << NES_QPCONTEXT_MISC2_SRC_IP_SHIFT);
+ (u32)PCI_FUNC(nesdev->pcidev->devfn) <<
+ NES_QPCONTEXT_MISC2_SRC_IP_SHIFT);
nesqp->nesqp_context->arp_index_vlan |= cpu_to_le32(
- nes_arp_table(nesdev, le32_to_cpu(nesqp->nesqp_context->ip0),
+ nes_arp_table(nesdev,
+ le32_to_cpu(nesqp->nesqp_context->ip0),
NULL, NES_ARP_RESOLVE) << 16);
nesqp->nesqp_context->ts_val_delta = cpu_to_le32(
jiffies - nes_read_indexed(nesdev, NES_IDX_TCP_NOW));
nesqp->nesqp_context->ird_index = cpu_to_le32(nesqp->hwqp.qp_id);
nesqp->nesqp_context->ird_ord_sizes |=
- cpu_to_le32((u32)1 << NES_QPCONTEXT_ORDIRD_IWARP_MODE_SHIFT);
+ cpu_to_le32((u32)1 <<
+ NES_QPCONTEXT_ORDIRD_IWARP_MODE_SHIFT);
/* Adjust tail for not having a LSMM */
nesqp->hwqp.sq_tail = 1;
#if defined(NES_SEND_FIRST_WRITE)
- if (cm_node->send_write0) {
- nes_debug(NES_DBG_CM, "Sending first write.\n");
- wqe = &nesqp->hwqp.sq_vbase[0];
- u64temp = (unsigned long)nesqp;
- u64temp |= NES_SW_CONTEXT_ALIGN>>1;
- set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_COMP_CTX_LOW_IDX,
- u64temp);
- wqe->wqe_words[NES_IWARP_SQ_WQE_MISC_IDX] = cpu_to_le32(NES_IWARP_SQ_OP_RDMAW);
- wqe->wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX] = 0;
- wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_LOW_IDX] = 0;
- wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_HIGH_IDX] = 0;
- wqe->wqe_words[NES_IWARP_SQ_WQE_LENGTH0_IDX] = 0;
- wqe->wqe_words[NES_IWARP_SQ_WQE_STAG0_IDX] = 0;
-
- /* use the reserved spot on the WQ for the extra first WQE */
- nesqp->nesqp_context->ird_ord_sizes &= cpu_to_le32(~(NES_QPCONTEXT_ORDIRD_LSMM_PRESENT |
- NES_QPCONTEXT_ORDIRD_WRPDU | NES_QPCONTEXT_ORDIRD_ALSMM));
- nesqp->skip_lsmm = 1;
- nesqp->hwqp.sq_tail = 0;
- nes_write32(nesdev->regs + NES_WQE_ALLOC,
- (1 << 24) | 0x00800000 | nesqp->hwqp.qp_id);
- }
+ if (cm_node->send_write0) {
+ nes_debug(NES_DBG_CM, "Sending first write.\n");
+ wqe = &nesqp->hwqp.sq_vbase[0];
+ u64temp = (unsigned long)nesqp;
+ u64temp |= NES_SW_CONTEXT_ALIGN>>1;
+ set_wqe_64bit_value(wqe->wqe_words,
+ NES_IWARP_SQ_WQE_COMP_CTX_LOW_IDX, u64temp);
+ wqe->wqe_words[NES_IWARP_SQ_WQE_MISC_IDX] =
+ cpu_to_le32(NES_IWARP_SQ_OP_RDMAW);
+ wqe->wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX] = 0;
+ wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_LOW_IDX] = 0;
+ wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_HIGH_IDX] = 0;
+ wqe->wqe_words[NES_IWARP_SQ_WQE_LENGTH0_IDX] = 0;
+ wqe->wqe_words[NES_IWARP_SQ_WQE_STAG0_IDX] = 0;
+
+ /* use the reserved spot on the WQ for the extra first WQE */
+ nesqp->nesqp_context->ird_ord_sizes &=
+ cpu_to_le32(~(NES_QPCONTEXT_ORDIRD_LSMM_PRESENT |
+ NES_QPCONTEXT_ORDIRD_WRPDU |
+ NES_QPCONTEXT_ORDIRD_ALSMM));
+ nesqp->skip_lsmm = 1;
+ nesqp->hwqp.sq_tail = 0;
+ nes_write32(nesdev->regs + NES_WQE_ALLOC,
+ (1 << 24) | 0x00800000 | nesqp->hwqp.qp_id);
+ }
#endif
memset(&nes_quad, 0, sizeof(nes_quad));
- nes_quad.DstIpAdrIndex = cpu_to_le32((u32)PCI_FUNC(nesdev->pcidev->devfn) << 24);
- nes_quad.SrcIpadr = cm_id->remote_addr.sin_addr.s_addr;
+ nes_quad.DstIpAdrIndex =
+ cpu_to_le32((u32)PCI_FUNC(nesdev->pcidev->devfn) << 24);
+ if (ipv4_is_loopback(cm_id->remote_addr.sin_addr.s_addr))
+ nes_quad.SrcIpadr = nesvnic->local_ipaddr;
+ else
+ nes_quad.SrcIpadr = cm_id->remote_addr.sin_addr.s_addr;
nes_quad.TcpPorts[0] = cm_id->remote_addr.sin_port;
nes_quad.TcpPorts[1] = cm_id->local_addr.sin_port;
@@ -2858,10 +3092,6 @@ static void cm_event_connected(struct nes_cm_event *event)
nesqp->private_data_len = (u8) cm_node->mpa_frame_size;
cm_node->cm_core->api->accelerated(cm_node->cm_core, cm_node);
- /* modify QP state to rts */
- attr.qp_state = IB_QPS_RTS;
- nes_modify_qp(&nesqp->ibqp, &attr, IB_QP_STATE, NULL);
-
/* notify OF layer we successfully created the requested connection */
cm_event.event = IW_CM_EVENT_CONNECT_REPLY;
cm_event.status = IW_CM_EVENT_STATUS_ACCEPTED;
@@ -2870,20 +3100,21 @@ static void cm_event_connected(struct nes_cm_event *event)
cm_event.local_addr.sin_port = cm_id->local_addr.sin_port;
cm_event.remote_addr = cm_id->remote_addr;
- cm_event.private_data = (void *)event->cm_node->mpa_frame_buf;
- cm_event.private_data_len = (u8) event->cm_node->mpa_frame_size;
+ cm_event.private_data = (void *)event->cm_node->mpa_frame_buf;
+ cm_event.private_data_len = (u8) event->cm_node->mpa_frame_size;
cm_event.local_addr.sin_addr.s_addr = event->cm_info.rem_addr;
ret = cm_id->event_handler(cm_id, &cm_event);
nes_debug(NES_DBG_CM, "OFA CM event_handler returned, ret=%d\n", ret);
if (ret)
- printk("%s[%u] OFA CM event_handler returned, ret=%d\n",
- __func__, __LINE__, ret);
- nes_debug(NES_DBG_CM, "Exiting connect thread for QP%u. jiffies = %lu\n",
- nesqp->hwqp.qp_id, jiffies );
+ printk(KERN_ERR "%s[%u] OFA CM event_handler returned, "
+ "ret=%d\n", __func__, __LINE__, ret);
+ attr.qp_state = IB_QPS_RTS;
+ nes_modify_qp(&nesqp->ibqp, &attr, IB_QP_STATE, NULL);
- nes_rem_ref(&nesqp->ibqp);
+ nes_debug(NES_DBG_CM, "Exiting connect thread for QP%u. jiffies = "
+ "%lu\n", nesqp->hwqp.qp_id, jiffies);
return;
}
@@ -2927,17 +3158,19 @@ static void cm_event_connect_error(struct nes_cm_event *event)
cm_event.private_data = NULL;
cm_event.private_data_len = 0;
- nes_debug(NES_DBG_CM, "call CM_EVENT REJECTED, local_addr=%08x, remove_addr=%08x\n",
- cm_event.local_addr.sin_addr.s_addr, cm_event.remote_addr.sin_addr.s_addr);
+ nes_debug(NES_DBG_CM, "call CM_EVENT REJECTED, local_addr=%08x, "
+ "remove_addr=%08x\n", cm_event.local_addr.sin_addr.s_addr,
+ cm_event.remote_addr.sin_addr.s_addr);
ret = cm_id->event_handler(cm_id, &cm_event);
nes_debug(NES_DBG_CM, "OFA CM event_handler returned, ret=%d\n", ret);
if (ret)
- printk("%s[%u] OFA CM event_handler returned, ret=%d\n",
- __func__, __LINE__, ret);
+ printk(KERN_ERR "%s[%u] OFA CM event_handler returned, "
+ "ret=%d\n", __func__, __LINE__, ret);
nes_rem_ref(&nesqp->ibqp);
- cm_id->rem_ref(cm_id);
+ cm_id->rem_ref(cm_id);
+ rem_ref_cm_node(event->cm_node->cm_core, event->cm_node);
return;
}
@@ -3040,7 +3273,8 @@ static int nes_cm_post_event(struct nes_cm_event *event)
add_ref_cm_node(event->cm_node);
event->cm_info.cm_id->add_ref(event->cm_info.cm_id);
INIT_WORK(&event->event_work, nes_cm_event_handler);
- nes_debug(NES_DBG_CM, "queue_work, event=%p\n", event);
+ nes_debug(NES_DBG_CM, "cm_node=%p queue_work, event=%p\n",
+ event->cm_node, event);
queue_work(event->cm_node->cm_core->event_wq, &event->event_work);
@@ -3056,46 +3290,48 @@ static int nes_cm_post_event(struct nes_cm_event *event)
*/
static void nes_cm_event_handler(struct work_struct *work)
{
- struct nes_cm_event *event = container_of(work, struct nes_cm_event, event_work);
+ struct nes_cm_event *event = container_of(work, struct nes_cm_event,
+ event_work);
struct nes_cm_core *cm_core;
- if ((!event) || (!event->cm_node) || (!event->cm_node->cm_core)) {
+ if ((!event) || (!event->cm_node) || (!event->cm_node->cm_core))
return;
- }
+
cm_core = event->cm_node->cm_core;
nes_debug(NES_DBG_CM, "event=%p, event->type=%u, events posted=%u\n",
- event, event->type, atomic_read(&cm_core->events_posted));
+ event, event->type, atomic_read(&cm_core->events_posted));
switch (event->type) {
- case NES_CM_EVENT_MPA_REQ:
- cm_event_mpa_req(event);
- nes_debug(NES_DBG_CM, "CM Event: MPA REQUEST\n");
- break;
- case NES_CM_EVENT_RESET:
- nes_debug(NES_DBG_CM, "CM Event: RESET\n");
- cm_event_reset(event);
- break;
- case NES_CM_EVENT_CONNECTED:
- if ((!event->cm_node->cm_id) ||
- (event->cm_node->state != NES_CM_STATE_TSA)) {
- break;
- }
- cm_event_connected(event);
- nes_debug(NES_DBG_CM, "CM Event: CONNECTED\n");
+ case NES_CM_EVENT_MPA_REQ:
+ cm_event_mpa_req(event);
+ nes_debug(NES_DBG_CM, "cm_node=%p CM Event: MPA REQUEST\n",
+ event->cm_node);
+ break;
+ case NES_CM_EVENT_RESET:
+ nes_debug(NES_DBG_CM, "cm_node = %p CM Event: RESET\n",
+ event->cm_node);
+ cm_event_reset(event);
+ break;
+ case NES_CM_EVENT_CONNECTED:
+ if ((!event->cm_node->cm_id) ||
+ (event->cm_node->state != NES_CM_STATE_TSA))
break;
- case NES_CM_EVENT_ABORTED:
- if ((!event->cm_node->cm_id) || (event->cm_node->state == NES_CM_STATE_TSA)) {
- break;
- }
- cm_event_connect_error(event);
- nes_debug(NES_DBG_CM, "CM Event: ABORTED\n");
- break;
- case NES_CM_EVENT_DROPPED_PKT:
- nes_debug(NES_DBG_CM, "CM Event: DROPPED PKT\n");
- break;
- default:
- nes_debug(NES_DBG_CM, "CM Event: UNKNOWN EVENT TYPE\n");
+ cm_event_connected(event);
+ nes_debug(NES_DBG_CM, "CM Event: CONNECTED\n");
+ break;
+ case NES_CM_EVENT_ABORTED:
+ if ((!event->cm_node->cm_id) ||
+ (event->cm_node->state == NES_CM_STATE_TSA))
break;
+ cm_event_connect_error(event);
+ nes_debug(NES_DBG_CM, "CM Event: ABORTED\n");
+ break;
+ case NES_CM_EVENT_DROPPED_PKT:
+ nes_debug(NES_DBG_CM, "CM Event: DROPPED PKT\n");
+ break;
+ default:
+ nes_debug(NES_DBG_CM, "CM Event: UNKNOWN EVENT TYPE\n");
+ break;
}
atomic_dec(&cm_core->events_posted);
diff --git a/drivers/infiniband/hw/nes/nes_cm.h b/drivers/infiniband/hw/nes/nes_cm.h
index 7717cb2ab500..367b3d290140 100644
--- a/drivers/infiniband/hw/nes/nes_cm.h
+++ b/drivers/infiniband/hw/nes/nes_cm.h
@@ -83,6 +83,8 @@ enum nes_timer_type {
#define SET_FIN 4
#define SET_RST 8
+#define TCP_OPTIONS_PADDING 3
+
struct option_base {
u8 optionnum;
u8 length;
@@ -177,6 +179,7 @@ enum nes_cm_node_state {
NES_CM_STATE_ESTABLISHED,
NES_CM_STATE_ACCEPTING,
NES_CM_STATE_MPAREQ_SENT,
+ NES_CM_STATE_MPAREQ_RCVD,
NES_CM_STATE_TSA,
NES_CM_STATE_FIN_WAIT1,
NES_CM_STATE_FIN_WAIT2,
@@ -187,6 +190,16 @@ enum nes_cm_node_state {
NES_CM_STATE_CLOSED
};
+enum nes_tcpip_pkt_type {
+ NES_PKT_TYPE_UNKNOWN,
+ NES_PKT_TYPE_SYN,
+ NES_PKT_TYPE_SYNACK,
+ NES_PKT_TYPE_ACK,
+ NES_PKT_TYPE_FIN,
+ NES_PKT_TYPE_RST
+};
+
+
/* type of nes connection */
enum nes_cm_conn_type {
NES_CM_IWARP_CONN_TYPE,
@@ -257,7 +270,9 @@ struct nes_cm_node {
struct net_device *netdev;
struct nes_cm_node *loopbackpartner;
- struct list_head retrans_list;
+
+ struct nes_timer_entry *send_entry;
+
spinlock_t retrans_list_lock;
struct list_head recv_list;
spinlock_t recv_list_lock;
@@ -276,6 +291,8 @@ struct nes_cm_node {
struct nes_vnic *nesvnic;
int apbvt_set;
int accept_pend;
+ int freed;
+ struct nes_qp *nesqp;
};
/* structure for client or CM to fill when making CM api calls. */
@@ -366,14 +383,14 @@ struct nes_cm_ops {
struct nes_cm_info *);
int (*stop_listener)(struct nes_cm_core *, struct nes_cm_listener *);
struct nes_cm_node * (*connect)(struct nes_cm_core *,
- struct nes_vnic *, struct ietf_mpa_frame *,
+ struct nes_vnic *, u16, void *,
struct nes_cm_info *);
int (*close)(struct nes_cm_core *, struct nes_cm_node *);
int (*accept)(struct nes_cm_core *, struct ietf_mpa_frame *,
struct nes_cm_node *);
int (*reject)(struct nes_cm_core *, struct ietf_mpa_frame *,
struct nes_cm_node *);
- int (*recv_pkt)(struct nes_cm_core *, struct nes_vnic *,
+ void (*recv_pkt)(struct nes_cm_core *, struct nes_vnic *,
struct sk_buff *);
int (*destroy_cm_core)(struct nes_cm_core *);
int (*get)(struct nes_cm_core *);
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index 85f26d19a32b..1513d4066f1b 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -2814,7 +2814,6 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
nesqp = *((struct nes_qp **)&context);
if (atomic_inc_return(&nesqp->close_timer_started) == 1) {
nesqp->cm_id->add_ref(nesqp->cm_id);
- nes_add_ref(&nesqp->ibqp);
schedule_nes_timer(nesqp->cm_node, (struct sk_buff *)nesqp,
NES_TIMER_TYPE_CLOSE, 1, 0);
nes_debug(NES_DBG_AEQ, "QP%u Not decrementing QP refcount (%d),"
@@ -2838,7 +2837,6 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
if (async_event_id == NES_AEQE_AEID_RESET_SENT) {
tcp_state = NES_AEQE_TCP_STATE_CLOSED;
}
- nes_add_ref(&nesqp->ibqp);
spin_lock_irqsave(&nesqp->lock, flags);
nesqp->hw_iwarp_state = iwarp_state;
nesqp->hw_tcp_state = tcp_state;
@@ -2876,7 +2874,6 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
}
spin_unlock_irqrestore(&nesqp->lock, flags);
if (next_iwarp_state) {
- nes_add_ref(&nesqp->ibqp);
nes_debug(NES_DBG_AEQ, "issuing hw modifyqp for QP%u. next state = 0x%08X,"
" also added another reference\n",
nesqp->hwqp.qp_id, next_iwarp_state);
@@ -2888,7 +2885,6 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
/* FIN Received but ib state not RTS,
close complete will be on its way */
spin_unlock_irqrestore(&nesqp->lock, flags);
- nes_rem_ref(&nesqp->ibqp);
return;
}
spin_unlock_irqrestore(&nesqp->lock, flags);
@@ -2922,7 +2918,6 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
if ((tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) ||
((nesqp->ibqp_state == IB_QPS_RTS)&&
(async_event_id == NES_AEQE_AEID_LLP_CONNECTION_RESET))) {
- nes_add_ref(&nesqp->ibqp);
nes_cm_disconn(nesqp);
} else {
nesqp->in_disconnect = 0;
@@ -2931,7 +2926,6 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
break;
case NES_AEQE_AEID_LLP_TOO_MANY_RETRIES:
nesqp = *((struct nes_qp **)&context);
- nes_add_ref(&nesqp->ibqp);
spin_lock_irqsave(&nesqp->lock, flags);
nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_ERROR;
nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED;
@@ -3042,7 +3036,6 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
}
/* tell cm to disconnect, cm will queue work to thread */
- nes_add_ref(&nesqp->ibqp);
nes_cm_disconn(nesqp);
break;
case NES_AEQE_AEID_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE:
@@ -3062,7 +3055,6 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
}
/* tell cm to disconnect, cm will queue work to thread */
- nes_add_ref(&nesqp->ibqp);
nes_cm_disconn(nesqp);
break;
case NES_AEQE_AEID_LLP_RECEIVED_MPA_CRC_ERROR:
@@ -3082,7 +3074,6 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
}
/* tell cm to disconnect, cm will queue work to thread */
- nes_add_ref(&nesqp->ibqp);
nes_cm_disconn(nesqp);
break;
/* TODO: additional AEs need to be here */
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index e3939d13484e..d79942e84979 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -2867,7 +2867,6 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
nesqp->hwqp.qp_id, attr->qp_state, nesqp->ibqp_state,
nesqp->iwarp_state, atomic_read(&nesqp->refcount));
- nes_add_ref(&nesqp->ibqp);
spin_lock_irqsave(&nesqp->lock, qplockflags);
nes_debug(NES_DBG_MOD_QP, "QP%u: hw_iwarp_state=0x%X, hw_tcp_state=0x%X,"
@@ -2882,7 +2881,6 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
nesqp->hwqp.qp_id);
if (nesqp->iwarp_state > (u32)NES_CQP_QP_IWARP_STATE_IDLE) {
spin_unlock_irqrestore(&nesqp->lock, qplockflags);
- nes_rem_ref(&nesqp->ibqp);
return -EINVAL;
}
next_iwarp_state = NES_CQP_QP_IWARP_STATE_IDLE;
@@ -2893,7 +2891,6 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
nesqp->hwqp.qp_id);
if (nesqp->iwarp_state>(u32)NES_CQP_QP_IWARP_STATE_IDLE) {
spin_unlock_irqrestore(&nesqp->lock, qplockflags);
- nes_rem_ref(&nesqp->ibqp);
return -EINVAL;
}
next_iwarp_state = NES_CQP_QP_IWARP_STATE_IDLE;
@@ -2904,14 +2901,12 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
nesqp->hwqp.qp_id);
if (nesqp->iwarp_state>(u32)NES_CQP_QP_IWARP_STATE_RTS) {
spin_unlock_irqrestore(&nesqp->lock, qplockflags);
- nes_rem_ref(&nesqp->ibqp);
return -EINVAL;
}
if (nesqp->cm_id == NULL) {
nes_debug(NES_DBG_MOD_QP, "QP%u: Failing attempt to move QP to RTS without a CM_ID. \n",
nesqp->hwqp.qp_id );
spin_unlock_irqrestore(&nesqp->lock, qplockflags);
- nes_rem_ref(&nesqp->ibqp);
return -EINVAL;
}
next_iwarp_state = NES_CQP_QP_IWARP_STATE_RTS;
@@ -2929,7 +2924,6 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
nesqp->hwqp.qp_id, nesqp->hwqp.sq_head, nesqp->hwqp.sq_tail);
if (nesqp->iwarp_state == (u32)NES_CQP_QP_IWARP_STATE_CLOSING) {
spin_unlock_irqrestore(&nesqp->lock, qplockflags);
- nes_rem_ref(&nesqp->ibqp);
return 0;
} else {
if (nesqp->iwarp_state > (u32)NES_CQP_QP_IWARP_STATE_CLOSING) {
@@ -2937,7 +2931,6 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
" ignored due to current iWARP state\n",
nesqp->hwqp.qp_id);
spin_unlock_irqrestore(&nesqp->lock, qplockflags);
- nes_rem_ref(&nesqp->ibqp);
return -EINVAL;
}
if (nesqp->hw_iwarp_state != NES_AEQE_IWARP_STATE_RTS) {
@@ -2969,7 +2962,6 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
nesqp->hwqp.qp_id);
if (nesqp->iwarp_state>=(u32)NES_CQP_QP_IWARP_STATE_TERMINATE) {
spin_unlock_irqrestore(&nesqp->lock, qplockflags);
- nes_rem_ref(&nesqp->ibqp);
return -EINVAL;
}
/* next_iwarp_state = (NES_CQP_QP_IWARP_STATE_TERMINATE | 0x02000000); */
@@ -2982,7 +2974,6 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
case IB_QPS_RESET:
if (nesqp->iwarp_state == (u32)NES_CQP_QP_IWARP_STATE_ERROR) {
spin_unlock_irqrestore(&nesqp->lock, qplockflags);
- nes_rem_ref(&nesqp->ibqp);
return -EINVAL;
}
nes_debug(NES_DBG_MOD_QP, "QP%u: new state = error\n",
@@ -3008,7 +2999,6 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
break;
default:
spin_unlock_irqrestore(&nesqp->lock, qplockflags);
- nes_rem_ref(&nesqp->ibqp);
return -EINVAL;
break;
}
@@ -3088,7 +3078,6 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount),
original_last_aeq, nesqp->last_aeq);
/* this one is for the cm_disconnect thread */
- nes_add_ref(&nesqp->ibqp);
spin_lock_irqsave(&nesqp->lock, qplockflags);
nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED;
nesqp->last_aeq = NES_AEQE_AEID_RESET_SENT;
@@ -3097,14 +3086,12 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
} else {
nes_debug(NES_DBG_MOD_QP, "QP%u No fake disconnect, QP refcount=%d\n",
nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount));
- nes_rem_ref(&nesqp->ibqp);
}
} else {
spin_lock_irqsave(&nesqp->lock, qplockflags);
if (nesqp->cm_id) {
/* These two are for the timer thread */
if (atomic_inc_return(&nesqp->close_timer_started) == 1) {
- nes_add_ref(&nesqp->ibqp);
nesqp->cm_id->add_ref(nesqp->cm_id);
nes_debug(NES_DBG_MOD_QP, "QP%u Not decrementing QP refcount (%d),"
" need ae to finish up, original_last_aeq = 0x%04X."
@@ -3128,14 +3115,12 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
" original_last_aeq = 0x%04X. last_aeq = 0x%04X.\n",
nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount),
original_last_aeq, nesqp->last_aeq);
- nes_rem_ref(&nesqp->ibqp);
}
} else {
nes_debug(NES_DBG_MOD_QP, "QP%u Decrementing QP refcount (%d), No ae to finish up,"
" original_last_aeq = 0x%04X. last_aeq = 0x%04X.\n",
nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount),
original_last_aeq, nesqp->last_aeq);
- nes_rem_ref(&nesqp->ibqp);
}
err = 0;
diff --git a/drivers/infiniband/ulp/ipoib/Kconfig b/drivers/infiniband/ulp/ipoib/Kconfig
index 691525cf394a..9d9a9dc51f18 100644
--- a/drivers/infiniband/ulp/ipoib/Kconfig
+++ b/drivers/infiniband/ulp/ipoib/Kconfig
@@ -11,16 +11,17 @@ config INFINIBAND_IPOIB
config INFINIBAND_IPOIB_CM
bool "IP-over-InfiniBand Connected Mode support"
- depends on INFINIBAND_IPOIB && EXPERIMENTAL
+ depends on INFINIBAND_IPOIB
default n
---help---
- This option enables experimental support for IPoIB connected mode.
- After enabling this option, you need to switch to connected mode through
- /sys/class/net/ibXXX/mode to actually create connections, and then increase
- the interface MTU with e.g. ifconfig ib0 mtu 65520.
+ This option enables support for IPoIB connected mode. After
+ enabling this option, you need to switch to connected mode
+ through /sys/class/net/ibXXX/mode to actually create
+ connections, and then increase the interface MTU with
+ e.g. ifconfig ib0 mtu 65520.
- WARNING: Enabling connected mode will trigger some
- packet drops for multicast and UD mode traffic from this interface,
+ WARNING: Enabling connected mode will trigger some packet
+ drops for multicast and UD mode traffic from this interface,
unless you limit mtu for these destinations to 2044.
config INFINIBAND_IPOIB_DEBUG
@@ -33,9 +34,10 @@ config INFINIBAND_IPOIB_DEBUG
debug_level and mcast_debug_level module parameters (which
can also be set after the driver is loaded through sysfs).
- This option also creates an "ipoib_debugfs," which can be
- mounted to expose debugging information about IB multicast
- groups used by the IPoIB driver.
+ This option also creates a directory tree under ipoib/ in
+ debugfs, which contains files that expose debugging
+ information about IB multicast groups used by the IPoIB
+ driver.
config INFINIBAND_IPOIB_DEBUG_DATA
bool "IP-over-InfiniBand data path debugging"
diff --git a/drivers/input/touchscreen/corgi_ts.c b/drivers/input/touchscreen/corgi_ts.c
index 4e9d8eece2e0..d0e13fc4a88c 100644
--- a/drivers/input/touchscreen/corgi_ts.c
+++ b/drivers/input/touchscreen/corgi_ts.c
@@ -195,7 +195,7 @@ static void ts_interrupt_main(struct corgi_ts *corgi_ts, int isTimer)
{
if ((GPLR(IRQ_TO_GPIO(corgi_ts->irq_gpio)) & GPIO_bit(IRQ_TO_GPIO(corgi_ts->irq_gpio))) == 0) {
/* Disable Interrupt */
- set_irq_type(corgi_ts->irq_gpio, IRQT_NOEDGE);
+ set_irq_type(corgi_ts->irq_gpio, IRQ_TYPE_NONE);
if (read_xydata(corgi_ts)) {
corgi_ts->pendown = 1;
new_data(corgi_ts);
@@ -214,7 +214,7 @@ static void ts_interrupt_main(struct corgi_ts *corgi_ts, int isTimer)
}
/* Enable Falling Edge */
- set_irq_type(corgi_ts->irq_gpio, IRQT_FALLING);
+ set_irq_type(corgi_ts->irq_gpio, IRQ_TYPE_EDGE_FALLING);
corgi_ts->pendown = 0;
}
}
@@ -258,7 +258,7 @@ static int corgits_resume(struct platform_device *dev)
corgi_ssp_ads7846_putget((4u << ADSCTRL_ADR_SH) | ADSCTRL_STS);
/* Enable Falling Edge */
- set_irq_type(corgi_ts->irq_gpio, IRQT_FALLING);
+ set_irq_type(corgi_ts->irq_gpio, IRQ_TYPE_EDGE_FALLING);
corgi_ts->power_mode = PWR_MODE_ACTIVE;
return 0;
@@ -333,7 +333,7 @@ static int __init corgits_probe(struct platform_device *pdev)
corgi_ts->power_mode = PWR_MODE_ACTIVE;
/* Enable Falling Edge */
- set_irq_type(corgi_ts->irq_gpio, IRQT_FALLING);
+ set_irq_type(corgi_ts->irq_gpio, IRQ_TYPE_EDGE_FALLING);
return 0;
diff --git a/drivers/input/touchscreen/mainstone-wm97xx.c b/drivers/input/touchscreen/mainstone-wm97xx.c
index a79f029b91c0..590a1379aa32 100644
--- a/drivers/input/touchscreen/mainstone-wm97xx.c
+++ b/drivers/input/touchscreen/mainstone-wm97xx.c
@@ -198,7 +198,7 @@ static int wm97xx_acc_startup(struct wm97xx *wm)
switch (wm->id) {
case WM9705_ID2:
wm->pen_irq = IRQ_GPIO(4);
- set_irq_type(IRQ_GPIO(4), IRQT_BOTHEDGE);
+ set_irq_type(IRQ_GPIO(4), IRQ_TYPE_EDGE_BOTH);
break;
case WM9712_ID2:
case WM9713_ID2:
diff --git a/drivers/isdn/Kconfig b/drivers/isdn/Kconfig
index 66f946aa30b3..3d113c6e4a70 100644
--- a/drivers/isdn/Kconfig
+++ b/drivers/isdn/Kconfig
@@ -3,7 +3,7 @@
#
menuconfig ISDN
- tristate "ISDN support"
+ bool "ISDN support"
depends on NET
depends on !S390
---help---
@@ -21,6 +21,8 @@ menuconfig ISDN
if ISDN
+source "drivers/isdn/mISDN/Kconfig"
+
menuconfig ISDN_I4L
tristate "Old ISDN4Linux (deprecated)"
---help---
diff --git a/drivers/isdn/Makefile b/drivers/isdn/Makefile
index 988142c30a6d..8380a4568d11 100644
--- a/drivers/isdn/Makefile
+++ b/drivers/isdn/Makefile
@@ -4,6 +4,7 @@
obj-$(CONFIG_ISDN_I4L) += i4l/
obj-$(CONFIG_ISDN_CAPI) += capi/
+obj-$(CONFIG_MISDN) += mISDN/
obj-$(CONFIG_ISDN_CAPI) += hardware/
obj-$(CONFIG_ISDN_DIVERSION) += divert/
obj-$(CONFIG_ISDN_DRV_HISAX) += hisax/
diff --git a/drivers/isdn/hardware/Makefile b/drivers/isdn/hardware/Makefile
index 11c8a183948c..a5d8fce4c4c4 100644
--- a/drivers/isdn/hardware/Makefile
+++ b/drivers/isdn/hardware/Makefile
@@ -4,3 +4,4 @@
obj-$(CONFIG_CAPI_AVM) += avm/
obj-$(CONFIG_CAPI_EICON) += eicon/
+obj-$(CONFIG_MISDN) += mISDN/
diff --git a/drivers/isdn/hardware/mISDN/Kconfig b/drivers/isdn/hardware/mISDN/Kconfig
new file mode 100644
index 000000000000..9cd5f5f62280
--- /dev/null
+++ b/drivers/isdn/hardware/mISDN/Kconfig
@@ -0,0 +1,26 @@
+#
+# Hardware for mISDN
+#
+comment "mISDN hardware drivers"
+
+config MISDN_HFCPCI
+ tristate "Support for HFC PCI cards"
+ depends on MISDN
+ depends on PCI
+ depends on VIRT_TO_BUS
+ help
+ Enable support for cards with Cologne Chip AG's
+ HFC PCI chip.
+
+config MISDN_HFCMULTI
+ tristate "Support for HFC multiport cards (HFC-4S/8S/E1)"
+ depends on PCI
+ depends on MISDN
+ help
+ Enable support for cards with Cologne Chip AG's HFC multiport
+ chip. There are three types of chips that are quite similar,
+ but the interface is different:
+ * HFC-4S (4 S/T interfaces on one chip)
+ * HFC-8S (8 S/T interfaces on one chip)
+ * HFC-E1 (E1 interface for 2Mbit ISDN)
+
diff --git a/drivers/isdn/hardware/mISDN/Makefile b/drivers/isdn/hardware/mISDN/Makefile
new file mode 100644
index 000000000000..1e7ca5332ad7
--- /dev/null
+++ b/drivers/isdn/hardware/mISDN/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the modular ISDN hardware drivers
+#
+#
+
+obj-$(CONFIG_MISDN_HFCPCI) += hfcpci.o
+obj-$(CONFIG_MISDN_HFCMULTI) += hfcmulti.o
diff --git a/drivers/isdn/hardware/mISDN/hfc_multi.h b/drivers/isdn/hardware/mISDN/hfc_multi.h
new file mode 100644
index 000000000000..a33d87afc843
--- /dev/null
+++ b/drivers/isdn/hardware/mISDN/hfc_multi.h
@@ -0,0 +1,1204 @@
+/*
+ * see notice in hfc_multi.c
+ */
+
+extern void ztdummy_extern_interrupt(void);
+extern void ztdummy_register_interrupt(void);
+extern int ztdummy_unregister_interrupt(void);
+
+#define DEBUG_HFCMULTI_FIFO 0x00010000
+#define DEBUG_HFCMULTI_CRC 0x00020000
+#define DEBUG_HFCMULTI_INIT 0x00040000
+#define DEBUG_HFCMULTI_PLXSD 0x00080000
+#define DEBUG_HFCMULTI_MODE 0x00100000
+#define DEBUG_HFCMULTI_MSG 0x00200000
+#define DEBUG_HFCMULTI_STATE 0x00400000
+#define DEBUG_HFCMULTI_SYNC 0x01000000
+#define DEBUG_HFCMULTI_DTMF 0x02000000
+#define DEBUG_HFCMULTI_LOCK 0x80000000
+
+#define PCI_ENA_REGIO 0x01
+#define PCI_ENA_MEMIO 0x02
+
+/*
+ * NOTE: some registers are assigned multiple times due to different modes
+ * also registers are assigned differen for HFC-4s/8s and HFC-E1
+ */
+
+/*
+#define MAX_FRAME_SIZE 2048
+*/
+
+struct hfc_chan {
+ struct dchannel *dch; /* link if channel is a D-channel */
+ struct bchannel *bch; /* link if channel is a B-channel */
+ int port; /* the interface port this */
+ /* channel is associated with */
+ int nt_timer; /* -1 if off, 0 if elapsed, >0 if running */
+ int los, ais, slip_tx, slip_rx, rdi; /* current alarms */
+ int jitter;
+ u_long cfg; /* port configuration */
+ int sync; /* sync state (used by E1) */
+ u_int protocol; /* current protocol */
+ int slot_tx; /* current pcm slot */
+ int bank_tx; /* current pcm bank */
+ int slot_rx;
+ int bank_rx;
+ int conf; /* conference setting of TX slot */
+ int txpending; /* if there is currently data in */
+ /* the FIFO 0=no, 1=yes, 2=splloop */
+ int rx_off; /* set to turn fifo receive off */
+ int coeff_count; /* curren coeff block */
+ s32 *coeff; /* memory pointer to 8 coeff blocks */
+};
+
+
+struct hfcm_hw {
+ u_char r_ctrl;
+ u_char r_irq_ctrl;
+ u_char r_cirm;
+ u_char r_ram_sz;
+ u_char r_pcm_md0;
+ u_char r_irqmsk_misc;
+ u_char r_dtmf;
+ u_char r_st_sync;
+ u_char r_sci_msk;
+ u_char r_tx0, r_tx1;
+ u_char a_st_ctrl0[8];
+ timer_t timer;
+};
+
+
+/* for each stack these flags are used (cfg) */
+#define HFC_CFG_NONCAP_TX 1 /* S/T TX interface has less capacity */
+#define HFC_CFG_DIS_ECHANNEL 2 /* disable E-channel processing */
+#define HFC_CFG_REG_ECHANNEL 3 /* register E-channel */
+#define HFC_CFG_OPTICAL 4 /* the E1 interface is optical */
+#define HFC_CFG_REPORT_LOS 5 /* the card should report loss of signal */
+#define HFC_CFG_REPORT_AIS 6 /* the card should report alarm ind. sign. */
+#define HFC_CFG_REPORT_SLIP 7 /* the card should report bit slips */
+#define HFC_CFG_REPORT_RDI 8 /* the card should report remote alarm */
+#define HFC_CFG_DTMF 9 /* enable DTMF-detection */
+#define HFC_CFG_CRC4 10 /* disable CRC-4 Multiframe mode, */
+ /* use double frame instead. */
+
+#define HFC_CHIP_EXRAM_128 0 /* external ram 128k */
+#define HFC_CHIP_EXRAM_512 1 /* external ram 256k */
+#define HFC_CHIP_REVISION0 2 /* old fifo handling */
+#define HFC_CHIP_PCM_SLAVE 3 /* PCM is slave */
+#define HFC_CHIP_PCM_MASTER 4 /* PCM is master */
+#define HFC_CHIP_RX_SYNC 5 /* disable pll sync for pcm */
+#define HFC_CHIP_DTMF 6 /* DTMF decoding is enabled */
+#define HFC_CHIP_ULAW 7 /* ULAW mode */
+#define HFC_CHIP_CLOCK2 8 /* double clock mode */
+#define HFC_CHIP_E1CLOCK_GET 9 /* always get clock from E1 interface */
+#define HFC_CHIP_E1CLOCK_PUT 10 /* always put clock from E1 interface */
+#define HFC_CHIP_WATCHDOG 11 /* whether we should send signals */
+ /* to the watchdog */
+#define HFC_CHIP_B410P 12 /* whether we have a b410p with echocan in */
+ /* hw */
+#define HFC_CHIP_PLXSD 13 /* whether we have a Speech-Design PLX */
+
+#define HFC_IO_MODE_PCIMEM 0x00 /* normal memory mapped IO */
+#define HFC_IO_MODE_REGIO 0x01 /* PCI io access */
+#define HFC_IO_MODE_PLXSD 0x02 /* access HFC via PLX9030 */
+
+/* table entry in the PCI devices list */
+struct hm_map {
+ char *vendor_name;
+ char *card_name;
+ int type;
+ int ports;
+ int clock2;
+ int leds;
+ int opticalsupport;
+ int dip_type;
+ int io_mode;
+};
+
+struct hfc_multi {
+ struct list_head list;
+ struct hm_map *mtyp;
+ int id;
+ int pcm; /* id of pcm bus */
+ int type;
+ int ports;
+
+ u_int irq; /* irq used by card */
+ u_int irqcnt;
+ struct pci_dev *pci_dev;
+ int io_mode; /* selects mode */
+#ifdef HFC_REGISTER_DEBUG
+ void (*HFC_outb)(struct hfc_multi *hc, u_char reg,
+ u_char val, const char *function, int line);
+ void (*HFC_outb_nodebug)(struct hfc_multi *hc, u_char reg,
+ u_char val, const char *function, int line);
+ u_char (*HFC_inb)(struct hfc_multi *hc, u_char reg,
+ const char *function, int line);
+ u_char (*HFC_inb_nodebug)(struct hfc_multi *hc, u_char reg,
+ const char *function, int line);
+ u_short (*HFC_inw)(struct hfc_multi *hc, u_char reg,
+ const char *function, int line);
+ u_short (*HFC_inw_nodebug)(struct hfc_multi *hc, u_char reg,
+ const char *function, int line);
+ void (*HFC_wait)(struct hfc_multi *hc,
+ const char *function, int line);
+ void (*HFC_wait_nodebug)(struct hfc_multi *hc,
+ const char *function, int line);
+#else
+ void (*HFC_outb)(struct hfc_multi *hc, u_char reg,
+ u_char val);
+ void (*HFC_outb_nodebug)(struct hfc_multi *hc, u_char reg,
+ u_char val);
+ u_char (*HFC_inb)(struct hfc_multi *hc, u_char reg);
+ u_char (*HFC_inb_nodebug)(struct hfc_multi *hc, u_char reg);
+ u_short (*HFC_inw)(struct hfc_multi *hc, u_char reg);
+ u_short (*HFC_inw_nodebug)(struct hfc_multi *hc, u_char reg);
+ void (*HFC_wait)(struct hfc_multi *hc);
+ void (*HFC_wait_nodebug)(struct hfc_multi *hc);
+#endif
+ void (*read_fifo)(struct hfc_multi *hc, u_char *data,
+ int len);
+ void (*write_fifo)(struct hfc_multi *hc, u_char *data,
+ int len);
+ u_long pci_origmembase, plx_origmembase, dsp_origmembase;
+ u_char *pci_membase; /* PCI memory (MUST BE BYTE POINTER) */
+ u_char *plx_membase; /* PLX memory */
+ u_char *dsp_membase; /* DSP on PLX */
+ u_long pci_iobase; /* PCI IO */
+ struct hfcm_hw hw; /* remember data of write-only-registers */
+
+ u_long chip; /* chip configuration */
+ int masterclk; /* port that provides master clock -1=off */
+ int dtmf; /* flag that dtmf is currently in process */
+ int Flen; /* F-buffer size */
+ int Zlen; /* Z-buffer size (must be int for calculation)*/
+ int max_trans; /* maximum transparent fifo fill */
+ int Zmin; /* Z-buffer offset */
+ int DTMFbase; /* base address of DTMF coefficients */
+
+ u_int slots; /* number of PCM slots */
+ u_int leds; /* type of leds */
+ u_int ledcount; /* used to animate leds */
+ u_long ledstate; /* save last state of leds */
+ int opticalsupport; /* has the e1 board */
+ /* an optical Interface */
+ int dslot; /* channel # of d-channel (E1) default 16 */
+
+ u_long wdcount; /* every 500 ms we need to */
+ /* send the watchdog a signal */
+ u_char wdbyte; /* watchdog toggle byte */
+ u_int activity[8]; /* if there is any action on this */
+ /* port (will be cleared after */
+ /* showing led-states) */
+ int e1_state; /* keep track of last state */
+ int e1_getclock; /* if sync is retrieved from interface */
+ int syncronized; /* keep track of existing sync interface */
+ int e1_resync; /* resync jobs */
+
+ spinlock_t lock; /* the lock */
+
+ /*
+ * the channel index is counted from 0, regardless where the channel
+ * is located on the hfc-channel.
+ * the bch->channel is equvalent to the hfc-channel
+ */
+ struct hfc_chan chan[32];
+ u_char created[8]; /* what port is created */
+ signed char slot_owner[256]; /* owner channel of slot */
+};
+
+/* PLX GPIOs */
+#define PLX_GPIO4_DIR_BIT 13
+#define PLX_GPIO4_BIT 14
+#define PLX_GPIO5_DIR_BIT 16
+#define PLX_GPIO5_BIT 17
+#define PLX_GPIO6_DIR_BIT 19
+#define PLX_GPIO6_BIT 20
+#define PLX_GPIO7_DIR_BIT 22
+#define PLX_GPIO7_BIT 23
+#define PLX_GPIO8_DIR_BIT 25
+#define PLX_GPIO8_BIT 26
+
+#define PLX_GPIO4 (1 << PLX_GPIO4_BIT)
+#define PLX_GPIO5 (1 << PLX_GPIO5_BIT)
+#define PLX_GPIO6 (1 << PLX_GPIO6_BIT)
+#define PLX_GPIO7 (1 << PLX_GPIO7_BIT)
+#define PLX_GPIO8 (1 << PLX_GPIO8_BIT)
+
+#define PLX_GPIO4_DIR (1 << PLX_GPIO4_DIR_BIT)
+#define PLX_GPIO5_DIR (1 << PLX_GPIO5_DIR_BIT)
+#define PLX_GPIO6_DIR (1 << PLX_GPIO6_DIR_BIT)
+#define PLX_GPIO7_DIR (1 << PLX_GPIO7_DIR_BIT)
+#define PLX_GPIO8_DIR (1 << PLX_GPIO8_DIR_BIT)
+
+#define PLX_TERM_ON PLX_GPIO7
+#define PLX_SLAVE_EN_N PLX_GPIO5
+#define PLX_MASTER_EN PLX_GPIO6
+#define PLX_SYNC_O_EN PLX_GPIO4
+#define PLX_DSP_RES_N PLX_GPIO8
+/* GPIO4..8 Enable & Set to OUT, SLAVE_EN_N = 1 */
+#define PLX_GPIOC_INIT (PLX_GPIO4_DIR | PLX_GPIO5_DIR | PLX_GPIO6_DIR \
+ | PLX_GPIO7_DIR | PLX_GPIO8_DIR | PLX_SLAVE_EN_N)
+
+/* PLX Interrupt Control/STATUS */
+#define PLX_INTCSR_LINTI1_ENABLE 0x01
+#define PLX_INTCSR_LINTI1_STATUS 0x04
+#define PLX_INTCSR_LINTI2_ENABLE 0x08
+#define PLX_INTCSR_LINTI2_STATUS 0x20
+#define PLX_INTCSR_PCIINT_ENABLE 0x40
+
+/* PLX Registers */
+#define PLX_INTCSR 0x4c
+#define PLX_CNTRL 0x50
+#define PLX_GPIOC 0x54
+
+
+/*
+ * REGISTER SETTING FOR HFC-4S/8S AND HFC-E1
+ */
+
+/* write only registers */
+#define R_CIRM 0x00
+#define R_CTRL 0x01
+#define R_BRG_PCM_CFG 0x02
+#define R_RAM_ADDR0 0x08
+#define R_RAM_ADDR1 0x09
+#define R_RAM_ADDR2 0x0A
+#define R_FIRST_FIFO 0x0B
+#define R_RAM_SZ 0x0C
+#define R_FIFO_MD 0x0D
+#define R_INC_RES_FIFO 0x0E
+#define R_FSM_IDX 0x0F
+#define R_FIFO 0x0F
+#define R_SLOT 0x10
+#define R_IRQMSK_MISC 0x11
+#define R_SCI_MSK 0x12
+#define R_IRQ_CTRL 0x13
+#define R_PCM_MD0 0x14
+#define R_PCM_MD1 0x15
+#define R_PCM_MD2 0x15
+#define R_SH0H 0x15
+#define R_SH1H 0x15
+#define R_SH0L 0x15
+#define R_SH1L 0x15
+#define R_SL_SEL0 0x15
+#define R_SL_SEL1 0x15
+#define R_SL_SEL2 0x15
+#define R_SL_SEL3 0x15
+#define R_SL_SEL4 0x15
+#define R_SL_SEL5 0x15
+#define R_SL_SEL6 0x15
+#define R_SL_SEL7 0x15
+#define R_ST_SEL 0x16
+#define R_ST_SYNC 0x17
+#define R_CONF_EN 0x18
+#define R_TI_WD 0x1A
+#define R_BERT_WD_MD 0x1B
+#define R_DTMF 0x1C
+#define R_DTMF_N 0x1D
+#define R_E1_WR_STA 0x20
+#define R_E1_RD_STA 0x20
+#define R_LOS0 0x22
+#define R_LOS1 0x23
+#define R_RX0 0x24
+#define R_RX_FR0 0x25
+#define R_RX_FR1 0x26
+#define R_TX0 0x28
+#define R_TX1 0x29
+#define R_TX_FR0 0x2C
+
+#define R_TX_FR1 0x2D
+#define R_TX_FR2 0x2E
+#define R_JATT_ATT 0x2F /* undocumented */
+#define A_ST_RD_STATE 0x30
+#define A_ST_WR_STATE 0x30
+#define R_RX_OFF 0x30
+#define A_ST_CTRL0 0x31
+#define R_SYNC_OUT 0x31
+#define A_ST_CTRL1 0x32
+#define A_ST_CTRL2 0x33
+#define A_ST_SQ_WR 0x34
+#define R_TX_OFF 0x34
+#define R_SYNC_CTRL 0x35
+#define A_ST_CLK_DLY 0x37
+#define R_PWM0 0x38
+#define R_PWM1 0x39
+#define A_ST_B1_TX 0x3C
+#define A_ST_B2_TX 0x3D
+#define A_ST_D_TX 0x3E
+#define R_GPIO_OUT0 0x40
+#define R_GPIO_OUT1 0x41
+#define R_GPIO_EN0 0x42
+#define R_GPIO_EN1 0x43
+#define R_GPIO_SEL 0x44
+#define R_BRG_CTRL 0x45
+#define R_PWM_MD 0x46
+#define R_BRG_MD 0x47
+#define R_BRG_TIM0 0x48
+#define R_BRG_TIM1 0x49
+#define R_BRG_TIM2 0x4A
+#define R_BRG_TIM3 0x4B
+#define R_BRG_TIM_SEL01 0x4C
+#define R_BRG_TIM_SEL23 0x4D
+#define R_BRG_TIM_SEL45 0x4E
+#define R_BRG_TIM_SEL67 0x4F
+#define A_SL_CFG 0xD0
+#define A_CONF 0xD1
+#define A_CH_MSK 0xF4
+#define A_CON_HDLC 0xFA
+#define A_SUBCH_CFG 0xFB
+#define A_CHANNEL 0xFC
+#define A_FIFO_SEQ 0xFD
+#define A_IRQ_MSK 0xFF
+
+/* read only registers */
+#define A_Z12 0x04
+#define A_Z1L 0x04
+#define A_Z1 0x04
+#define A_Z1H 0x05
+#define A_Z2L 0x06
+#define A_Z2 0x06
+#define A_Z2H 0x07
+#define A_F1 0x0C
+#define A_F12 0x0C
+#define A_F2 0x0D
+#define R_IRQ_OVIEW 0x10
+#define R_IRQ_MISC 0x11
+#define R_IRQ_STATECH 0x12
+#define R_CONF_OFLOW 0x14
+#define R_RAM_USE 0x15
+#define R_CHIP_ID 0x16
+#define R_BERT_STA 0x17
+#define R_F0_CNTL 0x18
+#define R_F0_CNTH 0x19
+#define R_BERT_EC 0x1A
+#define R_BERT_ECL 0x1A
+#define R_BERT_ECH 0x1B
+#define R_STATUS 0x1C
+#define R_CHIP_RV 0x1F
+#define R_STATE 0x20
+#define R_SYNC_STA 0x24
+#define R_RX_SL0_0 0x25
+#define R_RX_SL0_1 0x26
+#define R_RX_SL0_2 0x27
+#define R_JATT_DIR 0x2b /* undocumented */
+#define R_SLIP 0x2c
+#define A_ST_RD_STA 0x30
+#define R_FAS_EC 0x30
+#define R_FAS_ECL 0x30
+#define R_FAS_ECH 0x31
+#define R_VIO_EC 0x32
+#define R_VIO_ECL 0x32
+#define R_VIO_ECH 0x33
+#define A_ST_SQ_RD 0x34
+#define R_CRC_EC 0x34
+#define R_CRC_ECL 0x34
+#define R_CRC_ECH 0x35
+#define R_E_EC 0x36
+#define R_E_ECL 0x36
+#define R_E_ECH 0x37
+#define R_SA6_SA13_EC 0x38
+#define R_SA6_SA13_ECL 0x38
+#define R_SA6_SA13_ECH 0x39
+#define R_SA6_SA23_EC 0x3A
+#define R_SA6_SA23_ECL 0x3A
+#define R_SA6_SA23_ECH 0x3B
+#define A_ST_B1_RX 0x3C
+#define A_ST_B2_RX 0x3D
+#define A_ST_D_RX 0x3E
+#define A_ST_E_RX 0x3F
+#define R_GPIO_IN0 0x40
+#define R_GPIO_IN1 0x41
+#define R_GPI_IN0 0x44
+#define R_GPI_IN1 0x45
+#define R_GPI_IN2 0x46
+#define R_GPI_IN3 0x47
+#define R_INT_DATA 0x88
+#define R_IRQ_FIFO_BL0 0xC8
+#define R_IRQ_FIFO_BL1 0xC9
+#define R_IRQ_FIFO_BL2 0xCA
+#define R_IRQ_FIFO_BL3 0xCB
+#define R_IRQ_FIFO_BL4 0xCC
+#define R_IRQ_FIFO_BL5 0xCD
+#define R_IRQ_FIFO_BL6 0xCE
+#define R_IRQ_FIFO_BL7 0xCF
+
+/* read and write registers */
+#define A_FIFO_DATA0 0x80
+#define A_FIFO_DATA1 0x80
+#define A_FIFO_DATA2 0x80
+#define A_FIFO_DATA0_NOINC 0x84
+#define A_FIFO_DATA1_NOINC 0x84
+#define A_FIFO_DATA2_NOINC 0x84
+#define R_RAM_DATA 0xC0
+
+
+/*
+ * BIT SETTING FOR HFC-4S/8S AND HFC-E1
+ */
+
+/* chapter 2: universal bus interface */
+/* R_CIRM */
+#define V_IRQ_SEL 0x01
+#define V_SRES 0x08
+#define V_HFCRES 0x10
+#define V_PCMRES 0x20
+#define V_STRES 0x40
+#define V_ETRES 0x40
+#define V_RLD_EPR 0x80
+/* R_CTRL */
+#define V_FIFO_LPRIO 0x02
+#define V_SLOW_RD 0x04
+#define V_EXT_RAM 0x08
+#define V_CLK_OFF 0x20
+#define V_ST_CLK 0x40
+/* R_RAM_ADDR0 */
+#define V_RAM_ADDR2 0x01
+#define V_ADDR_RES 0x40
+#define V_ADDR_INC 0x80
+/* R_RAM_SZ */
+#define V_RAM_SZ 0x01
+#define V_PWM0_16KHZ 0x10
+#define V_PWM1_16KHZ 0x20
+#define V_FZ_MD 0x80
+/* R_CHIP_ID */
+#define V_PNP_IRQ 0x01
+#define V_CHIP_ID 0x10
+
+/* chapter 3: data flow */
+/* R_FIRST_FIFO */
+#define V_FIRST_FIRO_DIR 0x01
+#define V_FIRST_FIFO_NUM 0x02
+/* R_FIFO_MD */
+#define V_FIFO_MD 0x01
+#define V_CSM_MD 0x04
+#define V_FSM_MD 0x08
+#define V_FIFO_SZ 0x10
+/* R_FIFO */
+#define V_FIFO_DIR 0x01
+#define V_FIFO_NUM 0x02
+#define V_REV 0x80
+/* R_SLOT */
+#define V_SL_DIR 0x01
+#define V_SL_NUM 0x02
+/* A_SL_CFG */
+#define V_CH_DIR 0x01
+#define V_CH_SEL 0x02
+#define V_ROUTING 0x40
+/* A_CON_HDLC */
+#define V_IFF 0x01
+#define V_HDLC_TRP 0x02
+#define V_TRP_IRQ 0x04
+#define V_DATA_FLOW 0x20
+/* A_SUBCH_CFG */
+#define V_BIT_CNT 0x01
+#define V_START_BIT 0x08
+#define V_LOOP_FIFO 0x40
+#define V_INV_DATA 0x80
+/* A_CHANNEL */
+#define V_CH_DIR0 0x01
+#define V_CH_NUM0 0x02
+/* A_FIFO_SEQ */
+#define V_NEXT_FIFO_DIR 0x01
+#define V_NEXT_FIFO_NUM 0x02
+#define V_SEQ_END 0x40
+
+/* chapter 4: FIFO handling and HDLC controller */
+/* R_INC_RES_FIFO */
+#define V_INC_F 0x01
+#define V_RES_F 0x02
+#define V_RES_LOST 0x04
+
+/* chapter 5: S/T interface */
+/* R_SCI_MSK */
+#define V_SCI_MSK_ST0 0x01
+#define V_SCI_MSK_ST1 0x02
+#define V_SCI_MSK_ST2 0x04
+#define V_SCI_MSK_ST3 0x08
+#define V_SCI_MSK_ST4 0x10
+#define V_SCI_MSK_ST5 0x20
+#define V_SCI_MSK_ST6 0x40
+#define V_SCI_MSK_ST7 0x80
+/* R_ST_SEL */
+#define V_ST_SEL 0x01
+#define V_MULT_ST 0x08
+/* R_ST_SYNC */
+#define V_SYNC_SEL 0x01
+#define V_AUTO_SYNC 0x08
+/* A_ST_WR_STA */
+#define V_ST_SET_STA 0x01
+#define V_ST_LD_STA 0x10
+#define V_ST_ACT 0x20
+#define V_SET_G2_G3 0x80
+/* A_ST_CTRL0 */
+#define V_B1_EN 0x01
+#define V_B2_EN 0x02
+#define V_ST_MD 0x04
+#define V_D_PRIO 0x08
+#define V_SQ_EN 0x10
+#define V_96KHZ 0x20
+#define V_TX_LI 0x40
+#define V_ST_STOP 0x80
+/* A_ST_CTRL1 */
+#define V_G2_G3_EN 0x01
+#define V_D_HI 0x04
+#define V_E_IGNO 0x08
+#define V_E_LO 0x10
+#define V_B12_SWAP 0x80
+/* A_ST_CTRL2 */
+#define V_B1_RX_EN 0x01
+#define V_B2_RX_EN 0x02
+#define V_ST_TRIS 0x40
+/* A_ST_CLK_DLY */
+#define V_ST_CK_DLY 0x01
+#define V_ST_SMPL 0x10
+/* A_ST_D_TX */
+#define V_ST_D_TX 0x40
+/* R_IRQ_STATECH */
+#define V_SCI_ST0 0x01
+#define V_SCI_ST1 0x02
+#define V_SCI_ST2 0x04
+#define V_SCI_ST3 0x08
+#define V_SCI_ST4 0x10
+#define V_SCI_ST5 0x20
+#define V_SCI_ST6 0x40
+#define V_SCI_ST7 0x80
+/* A_ST_RD_STA */
+#define V_ST_STA 0x01
+#define V_FR_SYNC_ST 0x10
+#define V_TI2_EXP 0x20
+#define V_INFO0 0x40
+#define V_G2_G3 0x80
+/* A_ST_SQ_RD */
+#define V_ST_SQ 0x01
+#define V_MF_RX_RDY 0x10
+#define V_MF_TX_RDY 0x80
+/* A_ST_D_RX */
+#define V_ST_D_RX 0x40
+/* A_ST_E_RX */
+#define V_ST_E_RX 0x40
+
+/* chapter 5: E1 interface */
+/* R_E1_WR_STA */
+/* R_E1_RD_STA */
+#define V_E1_SET_STA 0x01
+#define V_E1_LD_STA 0x10
+/* R_RX0 */
+#define V_RX_CODE 0x01
+#define V_RX_FBAUD 0x04
+#define V_RX_CMI 0x08
+#define V_RX_INV_CMI 0x10
+#define V_RX_INV_CLK 0x20
+#define V_RX_INV_DATA 0x40
+#define V_AIS_ITU 0x80
+/* R_RX_FR0 */
+#define V_NO_INSYNC 0x01
+#define V_AUTO_RESYNC 0x02
+#define V_AUTO_RECO 0x04
+#define V_SWORD_COND 0x08
+#define V_SYNC_LOSS 0x10
+#define V_XCRC_SYNC 0x20
+#define V_MF_RESYNC 0x40
+#define V_RESYNC 0x80
+/* R_RX_FR1 */
+#define V_RX_MF 0x01
+#define V_RX_MF_SYNC 0x02
+#define V_RX_SL0_RAM 0x04
+#define V_ERR_SIM 0x20
+#define V_RES_NMF 0x40
+/* R_TX0 */
+#define V_TX_CODE 0x01
+#define V_TX_FBAUD 0x04
+#define V_TX_CMI_CODE 0x08
+#define V_TX_INV_CMI_CODE 0x10
+#define V_TX_INV_CLK 0x20
+#define V_TX_INV_DATA 0x40
+#define V_OUT_EN 0x80
+/* R_TX1 */
+#define V_INV_CLK 0x01
+#define V_EXCHG_DATA_LI 0x02
+#define V_AIS_OUT 0x04
+#define V_ATX 0x20
+#define V_NTRI 0x40
+#define V_AUTO_ERR_RES 0x80
+/* R_TX_FR0 */
+#define V_TRP_FAS 0x01
+#define V_TRP_NFAS 0x02
+#define V_TRP_RAL 0x04
+#define V_TRP_SA 0x08
+/* R_TX_FR1 */
+#define V_TX_FAS 0x01
+#define V_TX_NFAS 0x02
+#define V_TX_RAL 0x04
+#define V_TX_SA 0x08
+/* R_TX_FR2 */
+#define V_TX_MF 0x01
+#define V_TRP_SL0 0x02
+#define V_TX_SL0_RAM 0x04
+#define V_TX_E 0x10
+#define V_NEG_E 0x20
+#define V_XS12_ON 0x40
+#define V_XS15_ON 0x80
+/* R_RX_OFF */
+#define V_RX_SZ 0x01
+#define V_RX_INIT 0x04
+/* R_SYNC_OUT */
+#define V_SYNC_E1_RX 0x01
+#define V_IPATS0 0x20
+#define V_IPATS1 0x40
+#define V_IPATS2 0x80
+/* R_TX_OFF */
+#define V_TX_SZ 0x01
+#define V_TX_INIT 0x04
+/* R_SYNC_CTRL */
+#define V_EXT_CLK_SYNC 0x01
+#define V_SYNC_OFFS 0x02
+#define V_PCM_SYNC 0x04
+#define V_NEG_CLK 0x08
+#define V_HCLK 0x10
+/*
+#define V_JATT_AUTO_DEL 0x20
+#define V_JATT_AUTO 0x40
+*/
+#define V_JATT_OFF 0x80
+/* R_STATE */
+#define V_E1_STA 0x01
+#define V_ALT_FR_RX 0x40
+#define V_ALT_FR_TX 0x80
+/* R_SYNC_STA */
+#define V_RX_STA 0x01
+#define V_FR_SYNC_E1 0x04
+#define V_SIG_LOS 0x08
+#define V_MFA_STA 0x10
+#define V_AIS 0x40
+#define V_NO_MF_SYNC 0x80
+/* R_RX_SL0_0 */
+#define V_SI_FAS 0x01
+#define V_SI_NFAS 0x02
+#define V_A 0x04
+#define V_CRC_OK 0x08
+#define V_TX_E1 0x10
+#define V_TX_E2 0x20
+#define V_RX_E1 0x40
+#define V_RX_E2 0x80
+/* R_SLIP */
+#define V_SLIP_RX 0x01
+#define V_FOSLIP_RX 0x08
+#define V_SLIP_TX 0x10
+#define V_FOSLIP_TX 0x80
+
+/* chapter 6: PCM interface */
+/* R_PCM_MD0 */
+#define V_PCM_MD 0x01
+#define V_C4_POL 0x02
+#define V_F0_NEG 0x04
+#define V_F0_LEN 0x08
+#define V_PCM_ADDR 0x10
+/* R_SL_SEL0 */
+#define V_SL_SEL0 0x01
+#define V_SH_SEL0 0x80
+/* R_SL_SEL1 */
+#define V_SL_SEL1 0x01
+#define V_SH_SEL1 0x80
+/* R_SL_SEL2 */
+#define V_SL_SEL2 0x01
+#define V_SH_SEL2 0x80
+/* R_SL_SEL3 */
+#define V_SL_SEL3 0x01
+#define V_SH_SEL3 0x80
+/* R_SL_SEL4 */
+#define V_SL_SEL4 0x01
+#define V_SH_SEL4 0x80
+/* R_SL_SEL5 */
+#define V_SL_SEL5 0x01
+#define V_SH_SEL5 0x80
+/* R_SL_SEL6 */
+#define V_SL_SEL6 0x01
+#define V_SH_SEL6 0x80
+/* R_SL_SEL7 */
+#define V_SL_SEL7 0x01
+#define V_SH_SEL7 0x80
+/* R_PCM_MD1 */
+#define V_ODEC_CON 0x01
+#define V_PLL_ADJ 0x04
+#define V_PCM_DR 0x10
+#define V_PCM_LOOP 0x40
+/* R_PCM_MD2 */
+#define V_SYNC_PLL 0x02
+#define V_SYNC_SRC 0x04
+#define V_SYNC_OUT 0x08
+#define V_ICR_FR_TIME 0x40
+#define V_EN_PLL 0x80
+
+/* chapter 7: pulse width modulation */
+/* R_PWM_MD */
+#define V_EXT_IRQ_EN 0x08
+#define V_PWM0_MD 0x10
+#define V_PWM1_MD 0x40
+
+/* chapter 8: multiparty audio conferences */
+/* R_CONF_EN */
+#define V_CONF_EN 0x01
+#define V_ULAW 0x80
+/* A_CONF */
+#define V_CONF_NUM 0x01
+#define V_NOISE_SUPPR 0x08
+#define V_ATT_LEV 0x20
+#define V_CONF_SL 0x80
+/* R_CONF_OFLOW */
+#define V_CONF_OFLOW0 0x01
+#define V_CONF_OFLOW1 0x02
+#define V_CONF_OFLOW2 0x04
+#define V_CONF_OFLOW3 0x08
+#define V_CONF_OFLOW4 0x10
+#define V_CONF_OFLOW5 0x20
+#define V_CONF_OFLOW6 0x40
+#define V_CONF_OFLOW7 0x80
+
+/* chapter 9: DTMF contoller */
+/* R_DTMF0 */
+#define V_DTMF_EN 0x01
+#define V_HARM_SEL 0x02
+#define V_DTMF_RX_CH 0x04
+#define V_DTMF_STOP 0x08
+#define V_CHBL_SEL 0x10
+#define V_RST_DTMF 0x40
+#define V_ULAW_SEL 0x80
+
+/* chapter 10: BERT */
+/* R_BERT_WD_MD */
+#define V_PAT_SEQ 0x01
+#define V_BERT_ERR 0x08
+#define V_AUTO_WD_RES 0x20
+#define V_WD_RES 0x80
+/* R_BERT_STA */
+#define V_BERT_SYNC_SRC 0x01
+#define V_BERT_SYNC 0x10
+#define V_BERT_INV_DATA 0x20
+
+/* chapter 11: auxiliary interface */
+/* R_BRG_PCM_CFG */
+#define V_BRG_EN 0x01
+#define V_BRG_MD 0x02
+#define V_PCM_CLK 0x20
+#define V_ADDR_WRDLY 0x40
+/* R_BRG_CTRL */
+#define V_BRG_CS 0x01
+#define V_BRG_ADDR 0x08
+#define V_BRG_CS_SRC 0x80
+/* R_BRG_MD */
+#define V_BRG_MD0 0x01
+#define V_BRG_MD1 0x02
+#define V_BRG_MD2 0x04
+#define V_BRG_MD3 0x08
+#define V_BRG_MD4 0x10
+#define V_BRG_MD5 0x20
+#define V_BRG_MD6 0x40
+#define V_BRG_MD7 0x80
+/* R_BRG_TIM0 */
+#define V_BRG_TIM0_IDLE 0x01
+#define V_BRG_TIM0_CLK 0x10
+/* R_BRG_TIM1 */
+#define V_BRG_TIM1_IDLE 0x01
+#define V_BRG_TIM1_CLK 0x10
+/* R_BRG_TIM2 */
+#define V_BRG_TIM2_IDLE 0x01
+#define V_BRG_TIM2_CLK 0x10
+/* R_BRG_TIM3 */
+#define V_BRG_TIM3_IDLE 0x01
+#define V_BRG_TIM3_CLK 0x10
+/* R_BRG_TIM_SEL01 */
+#define V_BRG_WR_SEL0 0x01
+#define V_BRG_RD_SEL0 0x04
+#define V_BRG_WR_SEL1 0x10
+#define V_BRG_RD_SEL1 0x40
+/* R_BRG_TIM_SEL23 */
+#define V_BRG_WR_SEL2 0x01
+#define V_BRG_RD_SEL2 0x04
+#define V_BRG_WR_SEL3 0x10
+#define V_BRG_RD_SEL3 0x40
+/* R_BRG_TIM_SEL45 */
+#define V_BRG_WR_SEL4 0x01
+#define V_BRG_RD_SEL4 0x04
+#define V_BRG_WR_SEL5 0x10
+#define V_BRG_RD_SEL5 0x40
+/* R_BRG_TIM_SEL67 */
+#define V_BRG_WR_SEL6 0x01
+#define V_BRG_RD_SEL6 0x04
+#define V_BRG_WR_SEL7 0x10
+#define V_BRG_RD_SEL7 0x40
+
+/* chapter 12: clock, reset, interrupt, timer and watchdog */
+/* R_IRQMSK_MISC */
+#define V_STA_IRQMSK 0x01
+#define V_TI_IRQMSK 0x02
+#define V_PROC_IRQMSK 0x04
+#define V_DTMF_IRQMSK 0x08
+#define V_IRQ1S_MSK 0x10
+#define V_SA6_IRQMSK 0x20
+#define V_RX_EOMF_MSK 0x40
+#define V_TX_EOMF_MSK 0x80
+/* R_IRQ_CTRL */
+#define V_FIFO_IRQ 0x01
+#define V_GLOB_IRQ_EN 0x08
+#define V_IRQ_POL 0x10
+/* R_TI_WD */
+#define V_EV_TS 0x01
+#define V_WD_TS 0x10
+/* A_IRQ_MSK */
+#define V_IRQ 0x01
+#define V_BERT_EN 0x02
+#define V_MIX_IRQ 0x04
+/* R_IRQ_OVIEW */
+#define V_IRQ_FIFO_BL0 0x01
+#define V_IRQ_FIFO_BL1 0x02
+#define V_IRQ_FIFO_BL2 0x04
+#define V_IRQ_FIFO_BL3 0x08
+#define V_IRQ_FIFO_BL4 0x10
+#define V_IRQ_FIFO_BL5 0x20
+#define V_IRQ_FIFO_BL6 0x40
+#define V_IRQ_FIFO_BL7 0x80
+/* R_IRQ_MISC */
+#define V_STA_IRQ 0x01
+#define V_TI_IRQ 0x02
+#define V_IRQ_PROC 0x04
+#define V_DTMF_IRQ 0x08
+#define V_IRQ1S 0x10
+#define V_SA6_IRQ 0x20
+#define V_RX_EOMF 0x40
+#define V_TX_EOMF 0x80
+/* R_STATUS */
+#define V_BUSY 0x01
+#define V_PROC 0x02
+#define V_DTMF_STA 0x04
+#define V_LOST_STA 0x08
+#define V_SYNC_IN 0x10
+#define V_EXT_IRQSTA 0x20
+#define V_MISC_IRQSTA 0x40
+#define V_FR_IRQSTA 0x80
+/* R_IRQ_FIFO_BL0 */
+#define V_IRQ_FIFO0_TX 0x01
+#define V_IRQ_FIFO0_RX 0x02
+#define V_IRQ_FIFO1_TX 0x04
+#define V_IRQ_FIFO1_RX 0x08
+#define V_IRQ_FIFO2_TX 0x10
+#define V_IRQ_FIFO2_RX 0x20
+#define V_IRQ_FIFO3_TX 0x40
+#define V_IRQ_FIFO3_RX 0x80
+/* R_IRQ_FIFO_BL1 */
+#define V_IRQ_FIFO4_TX 0x01
+#define V_IRQ_FIFO4_RX 0x02
+#define V_IRQ_FIFO5_TX 0x04
+#define V_IRQ_FIFO5_RX 0x08
+#define V_IRQ_FIFO6_TX 0x10
+#define V_IRQ_FIFO6_RX 0x20
+#define V_IRQ_FIFO7_TX 0x40
+#define V_IRQ_FIFO7_RX 0x80
+/* R_IRQ_FIFO_BL2 */
+#define V_IRQ_FIFO8_TX 0x01
+#define V_IRQ_FIFO8_RX 0x02
+#define V_IRQ_FIFO9_TX 0x04
+#define V_IRQ_FIFO9_RX 0x08
+#define V_IRQ_FIFO10_TX 0x10
+#define V_IRQ_FIFO10_RX 0x20
+#define V_IRQ_FIFO11_TX 0x40
+#define V_IRQ_FIFO11_RX 0x80
+/* R_IRQ_FIFO_BL3 */
+#define V_IRQ_FIFO12_TX 0x01
+#define V_IRQ_FIFO12_RX 0x02
+#define V_IRQ_FIFO13_TX 0x04
+#define V_IRQ_FIFO13_RX 0x08
+#define V_IRQ_FIFO14_TX 0x10
+#define V_IRQ_FIFO14_RX 0x20
+#define V_IRQ_FIFO15_TX 0x40
+#define V_IRQ_FIFO15_RX 0x80
+/* R_IRQ_FIFO_BL4 */
+#define V_IRQ_FIFO16_TX 0x01
+#define V_IRQ_FIFO16_RX 0x02
+#define V_IRQ_FIFO17_TX 0x04
+#define V_IRQ_FIFO17_RX 0x08
+#define V_IRQ_FIFO18_TX 0x10
+#define V_IRQ_FIFO18_RX 0x20
+#define V_IRQ_FIFO19_TX 0x40
+#define V_IRQ_FIFO19_RX 0x80
+/* R_IRQ_FIFO_BL5 */
+#define V_IRQ_FIFO20_TX 0x01
+#define V_IRQ_FIFO20_RX 0x02
+#define V_IRQ_FIFO21_TX 0x04
+#define V_IRQ_FIFO21_RX 0x08
+#define V_IRQ_FIFO22_TX 0x10
+#define V_IRQ_FIFO22_RX 0x20
+#define V_IRQ_FIFO23_TX 0x40
+#define V_IRQ_FIFO23_RX 0x80
+/* R_IRQ_FIFO_BL6 */
+#define V_IRQ_FIFO24_TX 0x01
+#define V_IRQ_FIFO24_RX 0x02
+#define V_IRQ_FIFO25_TX 0x04
+#define V_IRQ_FIFO25_RX 0x08
+#define V_IRQ_FIFO26_TX 0x10
+#define V_IRQ_FIFO26_RX 0x20
+#define V_IRQ_FIFO27_TX 0x40
+#define V_IRQ_FIFO27_RX 0x80
+/* R_IRQ_FIFO_BL7 */
+#define V_IRQ_FIFO28_TX 0x01
+#define V_IRQ_FIFO28_RX 0x02
+#define V_IRQ_FIFO29_TX 0x04
+#define V_IRQ_FIFO29_RX 0x08
+#define V_IRQ_FIFO30_TX 0x10
+#define V_IRQ_FIFO30_RX 0x20
+#define V_IRQ_FIFO31_TX 0x40
+#define V_IRQ_FIFO31_RX 0x80
+
+/* chapter 13: general purpose I/O pins (GPIO) and input pins (GPI) */
+/* R_GPIO_OUT0 */
+#define V_GPIO_OUT0 0x01
+#define V_GPIO_OUT1 0x02
+#define V_GPIO_OUT2 0x04
+#define V_GPIO_OUT3 0x08
+#define V_GPIO_OUT4 0x10
+#define V_GPIO_OUT5 0x20
+#define V_GPIO_OUT6 0x40
+#define V_GPIO_OUT7 0x80
+/* R_GPIO_OUT1 */
+#define V_GPIO_OUT8 0x01
+#define V_GPIO_OUT9 0x02
+#define V_GPIO_OUT10 0x04
+#define V_GPIO_OUT11 0x08
+#define V_GPIO_OUT12 0x10
+#define V_GPIO_OUT13 0x20
+#define V_GPIO_OUT14 0x40
+#define V_GPIO_OUT15 0x80
+/* R_GPIO_EN0 */
+#define V_GPIO_EN0 0x01
+#define V_GPIO_EN1 0x02
+#define V_GPIO_EN2 0x04
+#define V_GPIO_EN3 0x08
+#define V_GPIO_EN4 0x10
+#define V_GPIO_EN5 0x20
+#define V_GPIO_EN6 0x40
+#define V_GPIO_EN7 0x80
+/* R_GPIO_EN1 */
+#define V_GPIO_EN8 0x01
+#define V_GPIO_EN9 0x02
+#define V_GPIO_EN10 0x04
+#define V_GPIO_EN11 0x08
+#define V_GPIO_EN12 0x10
+#define V_GPIO_EN13 0x20
+#define V_GPIO_EN14 0x40
+#define V_GPIO_EN15 0x80
+/* R_GPIO_SEL */
+#define V_GPIO_SEL0 0x01
+#define V_GPIO_SEL1 0x02
+#define V_GPIO_SEL2 0x04
+#define V_GPIO_SEL3 0x08
+#define V_GPIO_SEL4 0x10
+#define V_GPIO_SEL5 0x20
+#define V_GPIO_SEL6 0x40
+#define V_GPIO_SEL7 0x80
+/* R_GPIO_IN0 */
+#define V_GPIO_IN0 0x01
+#define V_GPIO_IN1 0x02
+#define V_GPIO_IN2 0x04
+#define V_GPIO_IN3 0x08
+#define V_GPIO_IN4 0x10
+#define V_GPIO_IN5 0x20
+#define V_GPIO_IN6 0x40
+#define V_GPIO_IN7 0x80
+/* R_GPIO_IN1 */
+#define V_GPIO_IN8 0x01
+#define V_GPIO_IN9 0x02
+#define V_GPIO_IN10 0x04
+#define V_GPIO_IN11 0x08
+#define V_GPIO_IN12 0x10
+#define V_GPIO_IN13 0x20
+#define V_GPIO_IN14 0x40
+#define V_GPIO_IN15 0x80
+/* R_GPI_IN0 */
+#define V_GPI_IN0 0x01
+#define V_GPI_IN1 0x02
+#define V_GPI_IN2 0x04
+#define V_GPI_IN3 0x08
+#define V_GPI_IN4 0x10
+#define V_GPI_IN5 0x20
+#define V_GPI_IN6 0x40
+#define V_GPI_IN7 0x80
+/* R_GPI_IN1 */
+#define V_GPI_IN8 0x01
+#define V_GPI_IN9 0x02
+#define V_GPI_IN10 0x04
+#define V_GPI_IN11 0x08
+#define V_GPI_IN12 0x10
+#define V_GPI_IN13 0x20
+#define V_GPI_IN14 0x40
+#define V_GPI_IN15 0x80
+/* R_GPI_IN2 */
+#define V_GPI_IN16 0x01
+#define V_GPI_IN17 0x02
+#define V_GPI_IN18 0x04
+#define V_GPI_IN19 0x08
+#define V_GPI_IN20 0x10
+#define V_GPI_IN21 0x20
+#define V_GPI_IN22 0x40
+#define V_GPI_IN23 0x80
+/* R_GPI_IN3 */
+#define V_GPI_IN24 0x01
+#define V_GPI_IN25 0x02
+#define V_GPI_IN26 0x04
+#define V_GPI_IN27 0x08
+#define V_GPI_IN28 0x10
+#define V_GPI_IN29 0x20
+#define V_GPI_IN30 0x40
+#define V_GPI_IN31 0x80
+
+/* map of all registers, used for debugging */
+
+#ifdef HFC_REGISTER_DEBUG
+struct hfc_register_names {
+ char *name;
+ u_char reg;
+} hfc_register_names[] = {
+ /* write registers */
+ {"R_CIRM", 0x00},
+ {"R_CTRL", 0x01},
+ {"R_BRG_PCM_CFG ", 0x02},
+ {"R_RAM_ADDR0", 0x08},
+ {"R_RAM_ADDR1", 0x09},
+ {"R_RAM_ADDR2", 0x0A},
+ {"R_FIRST_FIFO", 0x0B},
+ {"R_RAM_SZ", 0x0C},
+ {"R_FIFO_MD", 0x0D},
+ {"R_INC_RES_FIFO", 0x0E},
+ {"R_FIFO / R_FSM_IDX", 0x0F},
+ {"R_SLOT", 0x10},
+ {"R_IRQMSK_MISC", 0x11},
+ {"R_SCI_MSK", 0x12},
+ {"R_IRQ_CTRL", 0x13},
+ {"R_PCM_MD0", 0x14},
+ {"R_0x15", 0x15},
+ {"R_ST_SEL", 0x16},
+ {"R_ST_SYNC", 0x17},
+ {"R_CONF_EN", 0x18},
+ {"R_TI_WD", 0x1A},
+ {"R_BERT_WD_MD", 0x1B},
+ {"R_DTMF", 0x1C},
+ {"R_DTMF_N", 0x1D},
+ {"R_E1_XX_STA", 0x20},
+ {"R_LOS0", 0x22},
+ {"R_LOS1", 0x23},
+ {"R_RX0", 0x24},
+ {"R_RX_FR0", 0x25},
+ {"R_RX_FR1", 0x26},
+ {"R_TX0", 0x28},
+ {"R_TX1", 0x29},
+ {"R_TX_FR0", 0x2C},
+ {"R_TX_FR1", 0x2D},
+ {"R_TX_FR2", 0x2E},
+ {"R_JATT_ATT", 0x2F},
+ {"A_ST_xx_STA/R_RX_OFF", 0x30},
+ {"A_ST_CTRL0/R_SYNC_OUT", 0x31},
+ {"A_ST_CTRL1", 0x32},
+ {"A_ST_CTRL2", 0x33},
+ {"A_ST_SQ_WR", 0x34},
+ {"R_TX_OFF", 0x34},
+ {"R_SYNC_CTRL", 0x35},
+ {"A_ST_CLK_DLY", 0x37},
+ {"R_PWM0", 0x38},
+ {"R_PWM1", 0x39},
+ {"A_ST_B1_TX", 0x3C},
+ {"A_ST_B2_TX", 0x3D},
+ {"A_ST_D_TX", 0x3E},
+ {"R_GPIO_OUT0", 0x40},
+ {"R_GPIO_OUT1", 0x41},
+ {"R_GPIO_EN0", 0x42},
+ {"R_GPIO_EN1", 0x43},
+ {"R_GPIO_SEL", 0x44},
+ {"R_BRG_CTRL", 0x45},
+ {"R_PWM_MD", 0x46},
+ {"R_BRG_MD", 0x47},
+ {"R_BRG_TIM0", 0x48},
+ {"R_BRG_TIM1", 0x49},
+ {"R_BRG_TIM2", 0x4A},
+ {"R_BRG_TIM3", 0x4B},
+ {"R_BRG_TIM_SEL01", 0x4C},
+ {"R_BRG_TIM_SEL23", 0x4D},
+ {"R_BRG_TIM_SEL45", 0x4E},
+ {"R_BRG_TIM_SEL67", 0x4F},
+ {"A_FIFO_DATA0-2", 0x80},
+ {"A_FIFO_DATA0-2_NOINC", 0x84},
+ {"R_RAM_DATA", 0xC0},
+ {"A_SL_CFG", 0xD0},
+ {"A_CONF", 0xD1},
+ {"A_CH_MSK", 0xF4},
+ {"A_CON_HDLC", 0xFA},
+ {"A_SUBCH_CFG", 0xFB},
+ {"A_CHANNEL", 0xFC},
+ {"A_FIFO_SEQ", 0xFD},
+ {"A_IRQ_MSK", 0xFF},
+ {NULL, 0},
+
+ /* read registers */
+ {"A_Z1", 0x04},
+ {"A_Z1H", 0x05},
+ {"A_Z2", 0x06},
+ {"A_Z2H", 0x07},
+ {"A_F1", 0x0C},
+ {"A_F2", 0x0D},
+ {"R_IRQ_OVIEW", 0x10},
+ {"R_IRQ_MISC", 0x11},
+ {"R_IRQ_STATECH", 0x12},
+ {"R_CONF_OFLOW", 0x14},
+ {"R_RAM_USE", 0x15},
+ {"R_CHIP_ID", 0x16},
+ {"R_BERT_STA", 0x17},
+ {"R_F0_CNTL", 0x18},
+ {"R_F0_CNTH", 0x19},
+ {"R_BERT_ECL", 0x1A},
+ {"R_BERT_ECH", 0x1B},
+ {"R_STATUS", 0x1C},
+ {"R_CHIP_RV", 0x1F},
+ {"R_STATE", 0x20},
+ {"R_SYNC_STA", 0x24},
+ {"R_RX_SL0_0", 0x25},
+ {"R_RX_SL0_1", 0x26},
+ {"R_RX_SL0_2", 0x27},
+ {"R_JATT_DIR", 0x2b},
+ {"R_SLIP", 0x2c},
+ {"A_ST_RD_STA", 0x30},
+ {"R_FAS_ECL", 0x30},
+ {"R_FAS_ECH", 0x31},
+ {"R_VIO_ECL", 0x32},
+ {"R_VIO_ECH", 0x33},
+ {"R_CRC_ECL / A_ST_SQ_RD", 0x34},
+ {"R_CRC_ECH", 0x35},
+ {"R_E_ECL", 0x36},
+ {"R_E_ECH", 0x37},
+ {"R_SA6_SA13_ECL", 0x38},
+ {"R_SA6_SA13_ECH", 0x39},
+ {"R_SA6_SA23_ECL", 0x3A},
+ {"R_SA6_SA23_ECH", 0x3B},
+ {"A_ST_B1_RX", 0x3C},
+ {"A_ST_B2_RX", 0x3D},
+ {"A_ST_D_RX", 0x3E},
+ {"A_ST_E_RX", 0x3F},
+ {"R_GPIO_IN0", 0x40},
+ {"R_GPIO_IN1", 0x41},
+ {"R_GPI_IN0", 0x44},
+ {"R_GPI_IN1", 0x45},
+ {"R_GPI_IN2", 0x46},
+ {"R_GPI_IN3", 0x47},
+ {"A_FIFO_DATA0-2", 0x80},
+ {"A_FIFO_DATA0-2_NOINC", 0x84},
+ {"R_INT_DATA", 0x88},
+ {"R_RAM_DATA", 0xC0},
+ {"R_IRQ_FIFO_BL0", 0xC8},
+ {"R_IRQ_FIFO_BL1", 0xC9},
+ {"R_IRQ_FIFO_BL2", 0xCA},
+ {"R_IRQ_FIFO_BL3", 0xCB},
+ {"R_IRQ_FIFO_BL4", 0xCC},
+ {"R_IRQ_FIFO_BL5", 0xCD},
+ {"R_IRQ_FIFO_BL6", 0xCE},
+ {"R_IRQ_FIFO_BL7", 0xCF},
+};
+#endif /* HFC_REGISTER_DEBUG */
+
diff --git a/drivers/isdn/hardware/mISDN/hfc_pci.h b/drivers/isdn/hardware/mISDN/hfc_pci.h
new file mode 100644
index 000000000000..fd2c9be6d849
--- /dev/null
+++ b/drivers/isdn/hardware/mISDN/hfc_pci.h
@@ -0,0 +1,228 @@
+/*
+ * specific defines for CCD's HFC 2BDS0 PCI chips
+ *
+ * Author Werner Cornelius (werner@isdn4linux.de)
+ *
+ * Copyright 1999 by Werner Cornelius (werner@isdn4linux.de)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+/*
+ * thresholds for transparent B-channel mode
+ * change mask and threshold simultaneously
+ */
+#define HFCPCI_BTRANS_THRESHOLD 128
+#define HFCPCI_BTRANS_MAX 256
+#define HFCPCI_BTRANS_THRESMASK 0x00
+
+/* defines for PCI config */
+#define PCI_ENA_MEMIO 0x02
+#define PCI_ENA_MASTER 0x04
+
+/* GCI/IOM bus monitor registers */
+#define HCFPCI_C_I 0x08
+#define HFCPCI_TRxR 0x0C
+#define HFCPCI_MON1_D 0x28
+#define HFCPCI_MON2_D 0x2C
+
+/* GCI/IOM bus timeslot registers */
+#define HFCPCI_B1_SSL 0x80
+#define HFCPCI_B2_SSL 0x84
+#define HFCPCI_AUX1_SSL 0x88
+#define HFCPCI_AUX2_SSL 0x8C
+#define HFCPCI_B1_RSL 0x90
+#define HFCPCI_B2_RSL 0x94
+#define HFCPCI_AUX1_RSL 0x98
+#define HFCPCI_AUX2_RSL 0x9C
+
+/* GCI/IOM bus data registers */
+#define HFCPCI_B1_D 0xA0
+#define HFCPCI_B2_D 0xA4
+#define HFCPCI_AUX1_D 0xA8
+#define HFCPCI_AUX2_D 0xAC
+
+/* GCI/IOM bus configuration registers */
+#define HFCPCI_MST_EMOD 0xB4
+#define HFCPCI_MST_MODE 0xB8
+#define HFCPCI_CONNECT 0xBC
+
+
+/* Interrupt and status registers */
+#define HFCPCI_FIFO_EN 0x44
+#define HFCPCI_TRM 0x48
+#define HFCPCI_B_MODE 0x4C
+#define HFCPCI_CHIP_ID 0x58
+#define HFCPCI_CIRM 0x60
+#define HFCPCI_CTMT 0x64
+#define HFCPCI_INT_M1 0x68
+#define HFCPCI_INT_M2 0x6C
+#define HFCPCI_INT_S1 0x78
+#define HFCPCI_INT_S2 0x7C
+#define HFCPCI_STATUS 0x70
+
+/* S/T section registers */
+#define HFCPCI_STATES 0xC0
+#define HFCPCI_SCTRL 0xC4
+#define HFCPCI_SCTRL_E 0xC8
+#define HFCPCI_SCTRL_R 0xCC
+#define HFCPCI_SQ 0xD0
+#define HFCPCI_CLKDEL 0xDC
+#define HFCPCI_B1_REC 0xF0
+#define HFCPCI_B1_SEND 0xF0
+#define HFCPCI_B2_REC 0xF4
+#define HFCPCI_B2_SEND 0xF4
+#define HFCPCI_D_REC 0xF8
+#define HFCPCI_D_SEND 0xF8
+#define HFCPCI_E_REC 0xFC
+
+
+/* bits in status register (READ) */
+#define HFCPCI_PCI_PROC 0x02
+#define HFCPCI_NBUSY 0x04
+#define HFCPCI_TIMER_ELAP 0x10
+#define HFCPCI_STATINT 0x20
+#define HFCPCI_FRAMEINT 0x40
+#define HFCPCI_ANYINT 0x80
+
+/* bits in CTMT (Write) */
+#define HFCPCI_CLTIMER 0x80
+#define HFCPCI_TIM3_125 0x04
+#define HFCPCI_TIM25 0x10
+#define HFCPCI_TIM50 0x14
+#define HFCPCI_TIM400 0x18
+#define HFCPCI_TIM800 0x1C
+#define HFCPCI_AUTO_TIMER 0x20
+#define HFCPCI_TRANSB2 0x02
+#define HFCPCI_TRANSB1 0x01
+
+/* bits in CIRM (Write) */
+#define HFCPCI_AUX_MSK 0x07
+#define HFCPCI_RESET 0x08
+#define HFCPCI_B1_REV 0x40
+#define HFCPCI_B2_REV 0x80
+
+/* bits in INT_M1 and INT_S1 */
+#define HFCPCI_INTS_B1TRANS 0x01
+#define HFCPCI_INTS_B2TRANS 0x02
+#define HFCPCI_INTS_DTRANS 0x04
+#define HFCPCI_INTS_B1REC 0x08
+#define HFCPCI_INTS_B2REC 0x10
+#define HFCPCI_INTS_DREC 0x20
+#define HFCPCI_INTS_L1STATE 0x40
+#define HFCPCI_INTS_TIMER 0x80
+
+/* bits in INT_M2 */
+#define HFCPCI_PROC_TRANS 0x01
+#define HFCPCI_GCI_I_CHG 0x02
+#define HFCPCI_GCI_MON_REC 0x04
+#define HFCPCI_IRQ_ENABLE 0x08
+#define HFCPCI_PMESEL 0x80
+
+/* bits in STATES */
+#define HFCPCI_STATE_MSK 0x0F
+#define HFCPCI_LOAD_STATE 0x10
+#define HFCPCI_ACTIVATE 0x20
+#define HFCPCI_DO_ACTION 0x40
+#define HFCPCI_NT_G2_G3 0x80
+
+/* bits in HFCD_MST_MODE */
+#define HFCPCI_MASTER 0x01
+#define HFCPCI_SLAVE 0x00
+#define HFCPCI_F0IO_POSITIV 0x02
+#define HFCPCI_F0_NEGATIV 0x04
+#define HFCPCI_F0_2C4 0x08
+/* remaining bits are for codecs control */
+
+/* bits in HFCD_SCTRL */
+#define SCTRL_B1_ENA 0x01
+#define SCTRL_B2_ENA 0x02
+#define SCTRL_MODE_TE 0x00
+#define SCTRL_MODE_NT 0x04
+#define SCTRL_LOW_PRIO 0x08
+#define SCTRL_SQ_ENA 0x10
+#define SCTRL_TEST 0x20
+#define SCTRL_NONE_CAP 0x40
+#define SCTRL_PWR_DOWN 0x80
+
+/* bits in SCTRL_E */
+#define HFCPCI_AUTO_AWAKE 0x01
+#define HFCPCI_DBIT_1 0x04
+#define HFCPCI_IGNORE_COL 0x08
+#define HFCPCI_CHG_B1_B2 0x80
+
+/* bits in FIFO_EN register */
+#define HFCPCI_FIFOEN_B1 0x03
+#define HFCPCI_FIFOEN_B2 0x0C
+#define HFCPCI_FIFOEN_DTX 0x10
+#define HFCPCI_FIFOEN_B1TX 0x01
+#define HFCPCI_FIFOEN_B1RX 0x02
+#define HFCPCI_FIFOEN_B2TX 0x04
+#define HFCPCI_FIFOEN_B2RX 0x08
+
+
+/* definitions of fifo memory area */
+#define MAX_D_FRAMES 15
+#define MAX_B_FRAMES 31
+#define B_SUB_VAL 0x200
+#define B_FIFO_SIZE (0x2000 - B_SUB_VAL)
+#define D_FIFO_SIZE 512
+#define D_FREG_MASK 0xF
+
+struct zt {
+ unsigned short z1; /* Z1 pointer 16 Bit */
+ unsigned short z2; /* Z2 pointer 16 Bit */
+};
+
+struct dfifo {
+ u_char data[D_FIFO_SIZE]; /* FIFO data space */
+ u_char fill1[0x20A0-D_FIFO_SIZE]; /* reserved, do not use */
+ u_char f1, f2; /* f pointers */
+ u_char fill2[0x20C0-0x20A2]; /* reserved, do not use */
+ /* mask index with D_FREG_MASK for access */
+ struct zt za[MAX_D_FRAMES+1];
+ u_char fill3[0x4000-0x2100]; /* align 16K */
+};
+
+struct bzfifo {
+ struct zt za[MAX_B_FRAMES+1]; /* only range 0x0..0x1F allowed */
+ u_char f1, f2; /* f pointers */
+ u_char fill[0x2100-0x2082]; /* alignment */
+};
+
+
+union fifo_area {
+ struct {
+ struct dfifo d_tx; /* D-send channel */
+ struct dfifo d_rx; /* D-receive channel */
+ } d_chan;
+ struct {
+ u_char fill1[0x200];
+ u_char txdat_b1[B_FIFO_SIZE];
+ struct bzfifo txbz_b1;
+ struct bzfifo txbz_b2;
+ u_char txdat_b2[B_FIFO_SIZE];
+ u_char fill2[D_FIFO_SIZE];
+ u_char rxdat_b1[B_FIFO_SIZE];
+ struct bzfifo rxbz_b1;
+ struct bzfifo rxbz_b2;
+ u_char rxdat_b2[B_FIFO_SIZE];
+ } b_chans;
+ u_char fill[32768];
+};
+
+#define Write_hfc(a, b, c) (writeb(c, (a->hw.pci_io)+b))
+#define Read_hfc(a, b) (readb((a->hw.pci_io)+b))
diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c
new file mode 100644
index 000000000000..2649ea55a9e8
--- /dev/null
+++ b/drivers/isdn/hardware/mISDN/hfcmulti.c
@@ -0,0 +1,5320 @@
+/*
+ * hfcmulti.c low level driver for hfc-4s/hfc-8s/hfc-e1 based cards
+ *
+ * Author Andreas Eversberg (jolly@eversberg.eu)
+ * ported to mqueue mechanism:
+ * Peter Sprenger (sprengermoving-bytes.de)
+ *
+ * inspired by existing hfc-pci driver:
+ * Copyright 1999 by Werner Cornelius (werner@isdn-development.de)
+ * Copyright 2008 by Karsten Keil (kkeil@suse.de)
+ * Copyright 2008 by Andreas Eversberg (jolly@eversberg.eu)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * Thanks to Cologne Chip AG for this great controller!
+ */
+
+/*
+ * module parameters:
+ * type:
+ * By default (0), the card is automatically detected.
+ * Or use the following combinations:
+ * Bit 0-7 = 0x00001 = HFC-E1 (1 port)
+ * or Bit 0-7 = 0x00004 = HFC-4S (4 ports)
+ * or Bit 0-7 = 0x00008 = HFC-8S (8 ports)
+ * Bit 8 = 0x00100 = uLaw (instead of aLaw)
+ * Bit 9 = 0x00200 = Disable DTMF detect on all B-channels via hardware
+ * Bit 10 = spare
+ * Bit 11 = 0x00800 = Force PCM bus into slave mode. (otherwhise auto)
+ * or Bit 12 = 0x01000 = Force PCM bus into master mode. (otherwhise auto)
+ * Bit 13 = spare
+ * Bit 14 = 0x04000 = Use external ram (128K)
+ * Bit 15 = 0x08000 = Use external ram (512K)
+ * Bit 16 = 0x10000 = Use 64 timeslots instead of 32
+ * or Bit 17 = 0x20000 = Use 128 timeslots instead of anything else
+ * Bit 18 = spare
+ * Bit 19 = 0x80000 = Send the Watchdog a Signal (Dual E1 with Watchdog)
+ * (all other bits are reserved and shall be 0)
+ * example: 0x20204 one HFC-4S with dtmf detection and 128 timeslots on PCM
+ * bus (PCM master)
+ *
+ * port: (optional or required for all ports on all installed cards)
+ * HFC-4S/HFC-8S only bits:
+ * Bit 0 = 0x001 = Use master clock for this S/T interface
+ * (ony once per chip).
+ * Bit 1 = 0x002 = transmitter line setup (non capacitive mode)
+ * Don't use this unless you know what you are doing!
+ * Bit 2 = 0x004 = Disable E-channel. (No E-channel processing)
+ * example: 0x0001,0x0000,0x0000,0x0000 one HFC-4S with master clock
+ * received from port 1
+ *
+ * HFC-E1 only bits:
+ * Bit 0 = 0x0001 = interface: 0=copper, 1=optical
+ * Bit 1 = 0x0002 = reserved (later for 32 B-channels transparent mode)
+ * Bit 2 = 0x0004 = Report LOS
+ * Bit 3 = 0x0008 = Report AIS
+ * Bit 4 = 0x0010 = Report SLIP
+ * Bit 5 = 0x0020 = Report RDI
+ * Bit 8 = 0x0100 = Turn off CRC-4 Multiframe Mode, use double frame
+ * mode instead.
+ * Bit 9 = 0x0200 = Force get clock from interface, even in NT mode.
+ * or Bit 10 = 0x0400 = Force put clock to interface, even in TE mode.
+ * Bit 11 = 0x0800 = Use direct RX clock for PCM sync rather than PLL.
+ * (E1 only)
+ * Bit 12-13 = 0xX000 = elastic jitter buffer (1-3), Set both bits to 0
+ * for default.
+ * (all other bits are reserved and shall be 0)
+ *
+ * debug:
+ * NOTE: only one debug value must be given for all cards
+ * enable debugging (see hfc_multi.h for debug options)
+ *
+ * poll:
+ * NOTE: only one poll value must be given for all cards
+ * Give the number of samples for each fifo process.
+ * By default 128 is used. Decrease to reduce delay, increase to
+ * reduce cpu load. If unsure, don't mess with it!
+ * Valid is 8, 16, 32, 64, 128, 256.
+ *
+ * pcm:
+ * NOTE: only one pcm value must be given for every card.
+ * The PCM bus id tells the mISDNdsp module about the connected PCM bus.
+ * By default (0), the PCM bus id is 100 for the card that is PCM master.
+ * If multiple cards are PCM master (because they are not interconnected),
+ * each card with PCM master will have increasing PCM id.
+ * All PCM busses with the same ID are expected to be connected and have
+ * common time slots slots.
+ * Only one chip of the PCM bus must be master, the others slave.
+ * -1 means no support of PCM bus not even.
+ * Omit this value, if all cards are interconnected or none is connected.
+ * If unsure, don't give this parameter.
+ *
+ * dslot:
+ * NOTE: only one poll value must be given for every card.
+ * Also this value must be given for non-E1 cards. If omitted, the E1
+ * card has D-channel on time slot 16, which is default.
+ * If 1..15 or 17..31, an alternate time slot is used for D-channel.
+ * In this case, the application must be able to handle this.
+ * If -1 is given, the D-channel is disabled and all 31 slots can be used
+ * for B-channel. (only for specific applications)
+ * If you don't know how to use it, you don't need it!
+ *
+ * iomode:
+ * NOTE: only one mode value must be given for every card.
+ * -> See hfc_multi.h for HFC_IO_MODE_* values
+ * By default, the IO mode is pci memory IO (MEMIO).
+ * Some cards requre specific IO mode, so it cannot be changed.
+ * It may be usefull to set IO mode to register io (REGIO) to solve
+ * PCI bridge problems.
+ * If unsure, don't give this parameter.
+ *
+ * clockdelay_nt:
+ * NOTE: only one clockdelay_nt value must be given once for all cards.
+ * Give the value of the clock control register (A_ST_CLK_DLY)
+ * of the S/T interfaces in NT mode.
+ * This register is needed for the TBR3 certification, so don't change it.
+ *
+ * clockdelay_te:
+ * NOTE: only one clockdelay_te value must be given once
+ * Give the value of the clock control register (A_ST_CLK_DLY)
+ * of the S/T interfaces in TE mode.
+ * This register is needed for the TBR3 certification, so don't change it.
+ */
+
+/*
+ * debug register access (never use this, it will flood your system log)
+ * #define HFC_REGISTER_DEBUG
+ */
+
+static const char *hfcmulti_revision = "2.00";
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/mISDNhw.h>
+#include <linux/mISDNdsp.h>
+
+/*
+#define IRQCOUNT_DEBUG
+#define IRQ_DEBUG
+*/
+
+#include "hfc_multi.h"
+#ifdef ECHOPREP
+#include "gaintab.h"
+#endif
+
+#define MAX_CARDS 8
+#define MAX_PORTS (8 * MAX_CARDS)
+
+static LIST_HEAD(HFClist);
+static spinlock_t HFClock; /* global hfc list lock */
+
+static void ph_state_change(struct dchannel *);
+static void (*hfc_interrupt)(void);
+static void (*register_interrupt)(void);
+static int (*unregister_interrupt)(void);
+static int interrupt_registered;
+
+static struct hfc_multi *syncmaster;
+int plxsd_master; /* if we have a master card (yet) */
+static spinlock_t plx_lock; /* may not acquire other lock inside */
+EXPORT_SYMBOL(plx_lock);
+
+#define TYP_E1 1
+#define TYP_4S 4
+#define TYP_8S 8
+
+static int poll_timer = 6; /* default = 128 samples = 16ms */
+/* number of POLL_TIMER interrupts for G2 timeout (ca 1s) */
+static int nt_t1_count[] = { 3840, 1920, 960, 480, 240, 120, 60, 30 };
+#define CLKDEL_TE 0x0f /* CLKDEL in TE mode */
+#define CLKDEL_NT 0x6c /* CLKDEL in NT mode
+ (0x60 MUST be included!) */
+static u_char silence = 0xff; /* silence by LAW */
+
+#define DIP_4S 0x1 /* DIP Switches for Beronet 1S/2S/4S cards */
+#define DIP_8S 0x2 /* DIP Switches for Beronet 8S+ cards */
+#define DIP_E1 0x3 /* DIP Switches for Beronet E1 cards */
+
+/*
+ * module stuff
+ */
+
+static uint type[MAX_CARDS];
+static uint pcm[MAX_CARDS];
+static uint dslot[MAX_CARDS];
+static uint iomode[MAX_CARDS];
+static uint port[MAX_PORTS];
+static uint debug;
+static uint poll;
+static uint timer;
+static uint clockdelay_te = CLKDEL_TE;
+static uint clockdelay_nt = CLKDEL_NT;
+
+static int HFC_cnt, Port_cnt, PCM_cnt = 99;
+
+MODULE_AUTHOR("Andreas Eversberg");
+MODULE_LICENSE("GPL");
+module_param(debug, uint, S_IRUGO | S_IWUSR);
+module_param(poll, uint, S_IRUGO | S_IWUSR);
+module_param(timer, uint, S_IRUGO | S_IWUSR);
+module_param(clockdelay_te, uint, S_IRUGO | S_IWUSR);
+module_param(clockdelay_nt, uint, S_IRUGO | S_IWUSR);
+module_param_array(type, uint, NULL, S_IRUGO | S_IWUSR);
+module_param_array(pcm, uint, NULL, S_IRUGO | S_IWUSR);
+module_param_array(dslot, uint, NULL, S_IRUGO | S_IWUSR);
+module_param_array(iomode, uint, NULL, S_IRUGO | S_IWUSR);
+module_param_array(port, uint, NULL, S_IRUGO | S_IWUSR);
+
+#ifdef HFC_REGISTER_DEBUG
+#define HFC_outb(hc, reg, val) \
+ (hc->HFC_outb(hc, reg, val, __func__, __LINE__))
+#define HFC_outb_nodebug(hc, reg, val) \
+ (hc->HFC_outb_nodebug(hc, reg, val, __func__, __LINE__))
+#define HFC_inb(hc, reg) \
+ (hc->HFC_inb(hc, reg, __func__, __LINE__))
+#define HFC_inb_nodebug(hc, reg) \
+ (hc->HFC_inb_nodebug(hc, reg, __func__, __LINE__))
+#define HFC_inw(hc, reg) \
+ (hc->HFC_inw(hc, reg, __func__, __LINE__))
+#define HFC_inw_nodebug(hc, reg) \
+ (hc->HFC_inw_nodebug(hc, reg, __func__, __LINE__))
+#define HFC_wait(hc) \
+ (hc->HFC_wait(hc, __func__, __LINE__))
+#define HFC_wait_nodebug(hc) \
+ (hc->HFC_wait_nodebug(hc, __func__, __LINE__))
+#else
+#define HFC_outb(hc, reg, val) (hc->HFC_outb(hc, reg, val))
+#define HFC_outb_nodebug(hc, reg, val) (hc->HFC_outb_nodebug(hc, reg, val))
+#define HFC_inb(hc, reg) (hc->HFC_inb(hc, reg))
+#define HFC_inb_nodebug(hc, reg) (hc->HFC_inb_nodebug(hc, reg))
+#define HFC_inw(hc, reg) (hc->HFC_inw(hc, reg))
+#define HFC_inw_nodebug(hc, reg) (hc->HFC_inw_nodebug(hc, reg))
+#define HFC_wait(hc) (hc->HFC_wait(hc))
+#define HFC_wait_nodebug(hc) (hc->HFC_wait_nodebug(hc))
+#endif
+
+/* HFC_IO_MODE_PCIMEM */
+static void
+#ifdef HFC_REGISTER_DEBUG
+HFC_outb_pcimem(struct hfc_multi *hc, u_char reg, u_char val,
+ const char *function, int line)
+#else
+HFC_outb_pcimem(struct hfc_multi *hc, u_char reg, u_char val)
+#endif
+{
+ writeb(val, (hc->pci_membase)+reg);
+}
+static u_char
+#ifdef HFC_REGISTER_DEBUG
+HFC_inb_pcimem(struct hfc_multi *hc, u_char reg, const char *function, int line)
+#else
+HFC_inb_pcimem(struct hfc_multi *hc, u_char reg)
+#endif
+{
+ return readb((hc->pci_membase)+reg);
+}
+static u_short
+#ifdef HFC_REGISTER_DEBUG
+HFC_inw_pcimem(struct hfc_multi *hc, u_char reg, const char *function, int line)
+#else
+HFC_inw_pcimem(struct hfc_multi *hc, u_char reg)
+#endif
+{
+ return readw((hc->pci_membase)+reg);
+}
+static void
+#ifdef HFC_REGISTER_DEBUG
+HFC_wait_pcimem(struct hfc_multi *hc, const char *function, int line)
+#else
+HFC_wait_pcimem(struct hfc_multi *hc)
+#endif
+{
+ while (readb((hc->pci_membase)+R_STATUS) & V_BUSY);
+}
+
+/* HFC_IO_MODE_REGIO */
+static void
+#ifdef HFC_REGISTER_DEBUG
+HFC_outb_regio(struct hfc_multi *hc, u_char reg, u_char val,
+ const char *function, int line)
+#else
+HFC_outb_regio(struct hfc_multi *hc, u_char reg, u_char val)
+#endif
+{
+ outb(reg, (hc->pci_iobase)+4);
+ outb(val, hc->pci_iobase);
+}
+static u_char
+#ifdef HFC_REGISTER_DEBUG
+HFC_inb_regio(struct hfc_multi *hc, u_char reg, const char *function, int line)
+#else
+HFC_inb_regio(struct hfc_multi *hc, u_char reg)
+#endif
+{
+ outb(reg, (hc->pci_iobase)+4);
+ return inb(hc->pci_iobase);
+}
+static u_short
+#ifdef HFC_REGISTER_DEBUG
+HFC_inw_regio(struct hfc_multi *hc, u_char reg, const char *function, int line)
+#else
+HFC_inw_regio(struct hfc_multi *hc, u_char reg)
+#endif
+{
+ outb(reg, (hc->pci_iobase)+4);
+ return inw(hc->pci_iobase);
+}
+static void
+#ifdef HFC_REGISTER_DEBUG
+HFC_wait_regio(struct hfc_multi *hc, const char *function, int line)
+#else
+HFC_wait_regio(struct hfc_multi *hc)
+#endif
+{
+ outb(R_STATUS, (hc->pci_iobase)+4);
+ while (inb(hc->pci_iobase) & V_BUSY);
+}
+
+#ifdef HFC_REGISTER_DEBUG
+static void
+HFC_outb_debug(struct hfc_multi *hc, u_char reg, u_char val,
+ const char *function, int line)
+{
+ char regname[256] = "", bits[9] = "xxxxxxxx";
+ int i;
+
+ i = -1;
+ while (hfc_register_names[++i].name) {
+ if (hfc_register_names[i].reg == reg)
+ strcat(regname, hfc_register_names[i].name);
+ }
+ if (regname[0] == '\0')
+ strcpy(regname, "register");
+
+ bits[7] = '0'+(!!(val&1));
+ bits[6] = '0'+(!!(val&2));
+ bits[5] = '0'+(!!(val&4));
+ bits[4] = '0'+(!!(val&8));
+ bits[3] = '0'+(!!(val&16));
+ bits[2] = '0'+(!!(val&32));
+ bits[1] = '0'+(!!(val&64));
+ bits[0] = '0'+(!!(val&128));
+ printk(KERN_DEBUG
+ "HFC_outb(chip %d, %02x=%s, 0x%02x=%s); in %s() line %d\n",
+ hc->id, reg, regname, val, bits, function, line);
+ HFC_outb_nodebug(hc, reg, val);
+}
+static u_char
+HFC_inb_debug(struct hfc_multi *hc, u_char reg, const char *function, int line)
+{
+ char regname[256] = "", bits[9] = "xxxxxxxx";
+ u_char val = HFC_inb_nodebug(hc, reg);
+ int i;
+
+ i = 0;
+ while (hfc_register_names[i++].name)
+ ;
+ while (hfc_register_names[++i].name) {
+ if (hfc_register_names[i].reg == reg)
+ strcat(regname, hfc_register_names[i].name);
+ }
+ if (regname[0] == '\0')
+ strcpy(regname, "register");
+
+ bits[7] = '0'+(!!(val&1));
+ bits[6] = '0'+(!!(val&2));
+ bits[5] = '0'+(!!(val&4));
+ bits[4] = '0'+(!!(val&8));
+ bits[3] = '0'+(!!(val&16));
+ bits[2] = '0'+(!!(val&32));
+ bits[1] = '0'+(!!(val&64));
+ bits[0] = '0'+(!!(val&128));
+ printk(KERN_DEBUG
+ "HFC_inb(chip %d, %02x=%s) = 0x%02x=%s; in %s() line %d\n",
+ hc->id, reg, regname, val, bits, function, line);
+ return val;
+}
+static u_short
+HFC_inw_debug(struct hfc_multi *hc, u_char reg, const char *function, int line)
+{
+ char regname[256] = "";
+ u_short val = HFC_inw_nodebug(hc, reg);
+ int i;
+
+ i = 0;
+ while (hfc_register_names[i++].name)
+ ;
+ while (hfc_register_names[++i].name) {
+ if (hfc_register_names[i].reg == reg)
+ strcat(regname, hfc_register_names[i].name);
+ }
+ if (regname[0] == '\0')
+ strcpy(regname, "register");
+
+ printk(KERN_DEBUG
+ "HFC_inw(chip %d, %02x=%s) = 0x%04x; in %s() line %d\n",
+ hc->id, reg, regname, val, function, line);
+ return val;
+}
+static void
+HFC_wait_debug(struct hfc_multi *hc, const char *function, int line)
+{
+ printk(KERN_DEBUG "HFC_wait(chip %d); in %s() line %d\n",
+ hc->id, function, line);
+ HFC_wait_nodebug(hc);
+}
+#endif
+
+/* write fifo data (REGIO) */
+void
+write_fifo_regio(struct hfc_multi *hc, u_char *data, int len)
+{
+ outb(A_FIFO_DATA0, (hc->pci_iobase)+4);
+ while (len>>2) {
+ outl(*(u32 *)data, hc->pci_iobase);
+ data += 4;
+ len -= 4;
+ }
+ while (len>>1) {
+ outw(*(u16 *)data, hc->pci_iobase);
+ data += 2;
+ len -= 2;
+ }
+ while (len) {
+ outb(*data, hc->pci_iobase);
+ data++;
+ len--;
+ }
+}
+/* write fifo data (PCIMEM) */
+void
+write_fifo_pcimem(struct hfc_multi *hc, u_char *data, int len)
+{
+ while (len>>2) {
+ writel(*(u32 *)data, (hc->pci_membase)+A_FIFO_DATA0);
+ data += 4;
+ len -= 4;
+ }
+ while (len>>1) {
+ writew(*(u16 *)data, (hc->pci_membase)+A_FIFO_DATA0);
+ data += 2;
+ len -= 2;
+ }
+ while (len) {
+ writeb(*data, (hc->pci_membase)+A_FIFO_DATA0);
+ data++;
+ len--;
+ }
+}
+/* read fifo data (REGIO) */
+void
+read_fifo_regio(struct hfc_multi *hc, u_char *data, int len)
+{
+ outb(A_FIFO_DATA0, (hc->pci_iobase)+4);
+ while (len>>2) {
+ *(u32 *)data = inl(hc->pci_iobase);
+ data += 4;
+ len -= 4;
+ }
+ while (len>>1) {
+ *(u16 *)data = inw(hc->pci_iobase);
+ data += 2;
+ len -= 2;
+ }
+ while (len) {
+ *data = inb(hc->pci_iobase);
+ data++;
+ len--;
+ }
+}
+
+/* read fifo data (PCIMEM) */
+void
+read_fifo_pcimem(struct hfc_multi *hc, u_char *data, int len)
+{
+ while (len>>2) {
+ *(u32 *)data =
+ readl((hc->pci_membase)+A_FIFO_DATA0);
+ data += 4;
+ len -= 4;
+ }
+ while (len>>1) {
+ *(u16 *)data =
+ readw((hc->pci_membase)+A_FIFO_DATA0);
+ data += 2;
+ len -= 2;
+ }
+ while (len) {
+ *data = readb((hc->pci_membase)+A_FIFO_DATA0);
+ data++;
+ len--;
+ }
+}
+
+
+static void
+enable_hwirq(struct hfc_multi *hc)
+{
+ hc->hw.r_irq_ctrl |= V_GLOB_IRQ_EN;
+ HFC_outb(hc, R_IRQ_CTRL, hc->hw.r_irq_ctrl);
+}
+
+static void
+disable_hwirq(struct hfc_multi *hc)
+{
+ hc->hw.r_irq_ctrl &= ~((u_char)V_GLOB_IRQ_EN);
+ HFC_outb(hc, R_IRQ_CTRL, hc->hw.r_irq_ctrl);
+}
+
+#define NUM_EC 2
+#define MAX_TDM_CHAN 32
+
+
+inline void
+enablepcibridge(struct hfc_multi *c)
+{
+ HFC_outb(c, R_BRG_PCM_CFG, (0x0 << 6) | 0x3); /* was _io before */
+}
+
+inline void
+disablepcibridge(struct hfc_multi *c)
+{
+ HFC_outb(c, R_BRG_PCM_CFG, (0x0 << 6) | 0x2); /* was _io before */
+}
+
+inline unsigned char
+readpcibridge(struct hfc_multi *hc, unsigned char address)
+{
+ unsigned short cipv;
+ unsigned char data;
+
+ if (!hc->pci_iobase)
+ return 0;
+
+ /* slow down a PCI read access by 1 PCI clock cycle */
+ HFC_outb(hc, R_CTRL, 0x4); /*was _io before*/
+
+ if (address == 0)
+ cipv = 0x4000;
+ else
+ cipv = 0x5800;
+
+ /* select local bridge port address by writing to CIP port */
+ /* data = HFC_inb(c, cipv); * was _io before */
+ outw(cipv, hc->pci_iobase + 4);
+ data = inb(hc->pci_iobase);
+
+ /* restore R_CTRL for normal PCI read cycle speed */
+ HFC_outb(hc, R_CTRL, 0x0); /* was _io before */
+
+ return data;
+}
+
+inline void
+writepcibridge(struct hfc_multi *hc, unsigned char address, unsigned char data)
+{
+ unsigned short cipv;
+ unsigned int datav;
+
+ if (!hc->pci_iobase)
+ return;
+
+ if (address == 0)
+ cipv = 0x4000;
+ else
+ cipv = 0x5800;
+
+ /* select local bridge port address by writing to CIP port */
+ outw(cipv, hc->pci_iobase + 4);
+ /* define a 32 bit dword with 4 identical bytes for write sequence */
+ datav = data | ((__u32) data << 8) | ((__u32) data << 16) |
+ ((__u32) data << 24);
+
+ /*
+ * write this 32 bit dword to the bridge data port
+ * this will initiate a write sequence of up to 4 writes to the same
+ * address on the local bus interface the number of write accesses
+ * is undefined but >=1 and depends on the next PCI transaction
+ * during write sequence on the local bus
+ */
+ outl(datav, hc->pci_iobase);
+}
+
+inline void
+cpld_set_reg(struct hfc_multi *hc, unsigned char reg)
+{
+ /* Do data pin read low byte */
+ HFC_outb(hc, R_GPIO_OUT1, reg);
+}
+
+inline void
+cpld_write_reg(struct hfc_multi *hc, unsigned char reg, unsigned char val)
+{
+ cpld_set_reg(hc, reg);
+
+ enablepcibridge(hc);
+ writepcibridge(hc, 1, val);
+ disablepcibridge(hc);
+
+ return;
+}
+
+inline unsigned char
+cpld_read_reg(struct hfc_multi *hc, unsigned char reg)
+{
+ unsigned char bytein;
+
+ cpld_set_reg(hc, reg);
+
+ /* Do data pin read low byte */
+ HFC_outb(hc, R_GPIO_OUT1, reg);
+
+ enablepcibridge(hc);
+ bytein = readpcibridge(hc, 1);
+ disablepcibridge(hc);
+
+ return bytein;
+}
+
+inline void
+vpm_write_address(struct hfc_multi *hc, unsigned short addr)
+{
+ cpld_write_reg(hc, 0, 0xff & addr);
+ cpld_write_reg(hc, 1, 0x01 & (addr >> 8));
+}
+
+inline unsigned short
+vpm_read_address(struct hfc_multi *c)
+{
+ unsigned short addr;
+ unsigned short highbit;
+
+ addr = cpld_read_reg(c, 0);
+ highbit = cpld_read_reg(c, 1);
+
+ addr = addr | (highbit << 8);
+
+ return addr & 0x1ff;
+}
+
+inline unsigned char
+vpm_in(struct hfc_multi *c, int which, unsigned short addr)
+{
+ unsigned char res;
+
+ vpm_write_address(c, addr);
+
+ if (!which)
+ cpld_set_reg(c, 2);
+ else
+ cpld_set_reg(c, 3);
+
+ enablepcibridge(c);
+ res = readpcibridge(c, 1);
+ disablepcibridge(c);
+
+ cpld_set_reg(c, 0);
+
+ return res;
+}
+
+inline void
+vpm_out(struct hfc_multi *c, int which, unsigned short addr,
+ unsigned char data)
+{
+ vpm_write_address(c, addr);
+
+ enablepcibridge(c);
+
+ if (!which)
+ cpld_set_reg(c, 2);
+ else
+ cpld_set_reg(c, 3);
+
+ writepcibridge(c, 1, data);
+
+ cpld_set_reg(c, 0);
+
+ disablepcibridge(c);
+
+ {
+ unsigned char regin;
+ regin = vpm_in(c, which, addr);
+ if (regin != data)
+ printk(KERN_DEBUG "Wrote 0x%x to register 0x%x but got back "
+ "0x%x\n", data, addr, regin);
+ }
+
+}
+
+
+void
+vpm_init(struct hfc_multi *wc)
+{
+ unsigned char reg;
+ unsigned int mask;
+ unsigned int i, x, y;
+ unsigned int ver;
+
+ for (x = 0; x < NUM_EC; x++) {
+ /* Setup GPIO's */
+ if (!x) {
+ ver = vpm_in(wc, x, 0x1a0);
+ printk(KERN_DEBUG "VPM: Chip %d: ver %02x\n", x, ver);
+ }
+
+ for (y = 0; y < 4; y++) {
+ vpm_out(wc, x, 0x1a8 + y, 0x00); /* GPIO out */
+ vpm_out(wc, x, 0x1ac + y, 0x00); /* GPIO dir */
+ vpm_out(wc, x, 0x1b0 + y, 0x00); /* GPIO sel */
+ }
+
+ /* Setup TDM path - sets fsync and tdm_clk as inputs */
+ reg = vpm_in(wc, x, 0x1a3); /* misc_con */
+ vpm_out(wc, x, 0x1a3, reg & ~2);
+
+ /* Setup Echo length (256 taps) */
+ vpm_out(wc, x, 0x022, 1);
+ vpm_out(wc, x, 0x023, 0xff);
+
+ /* Setup timeslots */
+ vpm_out(wc, x, 0x02f, 0x00);
+ mask = 0x02020202 << (x * 4);
+
+ /* Setup the tdm channel masks for all chips */
+ for (i = 0; i < 4; i++)
+ vpm_out(wc, x, 0x33 - i, (mask >> (i << 3)) & 0xff);
+
+ /* Setup convergence rate */
+ printk(KERN_DEBUG "VPM: A-law mode\n");
+ reg = 0x00 | 0x10 | 0x01;
+ vpm_out(wc, x, 0x20, reg);
+ printk(KERN_DEBUG "VPM reg 0x20 is %x\n", reg);
+ /*vpm_out(wc, x, 0x20, (0x00 | 0x08 | 0x20 | 0x10)); */
+
+ vpm_out(wc, x, 0x24, 0x02);
+ reg = vpm_in(wc, x, 0x24);
+ printk(KERN_DEBUG "NLP Thresh is set to %d (0x%x)\n", reg, reg);
+
+ /* Initialize echo cans */
+ for (i = 0; i < MAX_TDM_CHAN; i++) {
+ if (mask & (0x00000001 << i))
+ vpm_out(wc, x, i, 0x00);
+ }
+
+ /*
+ * ARM arch at least disallows a udelay of
+ * more than 2ms... it gives a fake "__bad_udelay"
+ * reference at link-time.
+ * long delays in kernel code are pretty sucky anyway
+ * for now work around it using 5 x 2ms instead of 1 x 10ms
+ */
+
+ udelay(2000);
+ udelay(2000);
+ udelay(2000);
+ udelay(2000);
+ udelay(2000);
+
+ /* Put in bypass mode */
+ for (i = 0; i < MAX_TDM_CHAN; i++) {
+ if (mask & (0x00000001 << i))
+ vpm_out(wc, x, i, 0x01);
+ }
+
+ /* Enable bypass */
+ for (i = 0; i < MAX_TDM_CHAN; i++) {
+ if (mask & (0x00000001 << i))
+ vpm_out(wc, x, 0x78 + i, 0x01);
+ }
+
+ }
+}
+
+void
+vpm_check(struct hfc_multi *hctmp)
+{
+ unsigned char gpi2;
+
+ gpi2 = HFC_inb(hctmp, R_GPI_IN2);
+
+ if ((gpi2 & 0x3) != 0x3)
+ printk(KERN_DEBUG "Got interrupt 0x%x from VPM!\n", gpi2);
+}
+
+
+/*
+ * Interface to enable/disable the HW Echocan
+ *
+ * these functions are called within a spin_lock_irqsave on
+ * the channel instance lock, so we are not disturbed by irqs
+ *
+ * we can later easily change the interface to make other
+ * things configurable, for now we configure the taps
+ *
+ */
+
+void
+vpm_echocan_on(struct hfc_multi *hc, int ch, int taps)
+{
+ unsigned int timeslot;
+ unsigned int unit;
+ struct bchannel *bch = hc->chan[ch].bch;
+#ifdef TXADJ
+ int txadj = -4;
+ struct sk_buff *skb;
+#endif
+ if (hc->chan[ch].protocol != ISDN_P_B_RAW)
+ return;
+
+ if (!bch)
+ return;
+
+#ifdef TXADJ
+ skb = _alloc_mISDN_skb(PH_CONTROL_IND, HFC_VOL_CHANGE_TX,
+ sizeof(int), &txadj, GFP_ATOMIC);
+ if (skb)
+ recv_Bchannel_skb(bch, skb);
+#endif
+
+ timeslot = ((ch/4)*8) + ((ch%4)*4) + 1;
+ unit = ch % 4;
+
+ printk(KERN_NOTICE "vpm_echocan_on called taps [%d] on timeslot %d\n",
+ taps, timeslot);
+
+ vpm_out(hc, unit, timeslot, 0x7e);
+}
+
+void
+vpm_echocan_off(struct hfc_multi *hc, int ch)
+{
+ unsigned int timeslot;
+ unsigned int unit;
+ struct bchannel *bch = hc->chan[ch].bch;
+#ifdef TXADJ
+ int txadj = 0;
+ struct sk_buff *skb;
+#endif
+
+ if (hc->chan[ch].protocol != ISDN_P_B_RAW)
+ return;
+
+ if (!bch)
+ return;
+
+#ifdef TXADJ
+ skb = _alloc_mISDN_skb(PH_CONTROL_IND, HFC_VOL_CHANGE_TX,
+ sizeof(int), &txadj, GFP_ATOMIC);
+ if (skb)
+ recv_Bchannel_skb(bch, skb);
+#endif
+
+ timeslot = ((ch/4)*8) + ((ch%4)*4) + 1;
+ unit = ch % 4;
+
+ printk(KERN_NOTICE "vpm_echocan_off called on timeslot %d\n",
+ timeslot);
+ /* FILLME */
+ vpm_out(hc, unit, timeslot, 0x01);
+}
+
+
+/*
+ * Speech Design resync feature
+ * NOTE: This is called sometimes outside interrupt handler.
+ * We must lock irqsave, so no other interrupt (other card) will occurr!
+ * Also multiple interrupts may nest, so must lock each access (lists, card)!
+ */
+static inline void
+hfcmulti_resync(struct hfc_multi *locked, struct hfc_multi *newmaster, int rm)
+{
+ struct hfc_multi *hc, *next, *pcmmaster = 0;
+ u_int *plx_acc_32, pv;
+ u_long flags;
+
+ spin_lock_irqsave(&HFClock, flags);
+ spin_lock(&plx_lock); /* must be locked inside other locks */
+
+ if (debug & DEBUG_HFCMULTI_PLXSD)
+ printk(KERN_DEBUG "%s: RESYNC(syncmaster=0x%p)\n",
+ __func__, syncmaster);
+
+ /* select new master */
+ if (newmaster) {
+ if (debug & DEBUG_HFCMULTI_PLXSD)
+ printk(KERN_DEBUG "using provided controller\n");
+ } else {
+ list_for_each_entry_safe(hc, next, &HFClist, list) {
+ if (test_bit(HFC_CHIP_PLXSD, &hc->chip)) {
+ if (hc->syncronized) {
+ newmaster = hc;
+ break;
+ }
+ }
+ }
+ }
+
+ /* Disable sync of all cards */
+ list_for_each_entry_safe(hc, next, &HFClist, list) {
+ if (test_bit(HFC_CHIP_PLXSD, &hc->chip)) {
+ plx_acc_32 = (u_int *)(hc->plx_membase+PLX_GPIOC);
+ pv = readl(plx_acc_32);
+ pv &= ~PLX_SYNC_O_EN;
+ writel(pv, plx_acc_32);
+ if (test_bit(HFC_CHIP_PCM_MASTER, &hc->chip)) {
+ pcmmaster = hc;
+ if (hc->type == 1) {
+ if (debug & DEBUG_HFCMULTI_PLXSD)
+ printk(KERN_DEBUG
+ "Schedule SYNC_I\n");
+ hc->e1_resync |= 1; /* get SYNC_I */
+ }
+ }
+ }
+ }
+
+ if (newmaster) {
+ hc = newmaster;
+ if (debug & DEBUG_HFCMULTI_PLXSD)
+ printk(KERN_DEBUG "id=%d (0x%p) = syncronized with "
+ "interface.\n", hc->id, hc);
+ /* Enable new sync master */
+ plx_acc_32 = (u_int *)(hc->plx_membase+PLX_GPIOC);
+ pv = readl(plx_acc_32);
+ pv |= PLX_SYNC_O_EN;
+ writel(pv, plx_acc_32);
+ /* switch to jatt PLL, if not disabled by RX_SYNC */
+ if (hc->type == 1 && !test_bit(HFC_CHIP_RX_SYNC, &hc->chip)) {
+ if (debug & DEBUG_HFCMULTI_PLXSD)
+ printk(KERN_DEBUG "Schedule jatt PLL\n");
+ hc->e1_resync |= 2; /* switch to jatt */
+ }
+ } else {
+ if (pcmmaster) {
+ hc = pcmmaster;
+ if (debug & DEBUG_HFCMULTI_PLXSD)
+ printk(KERN_DEBUG
+ "id=%d (0x%p) = PCM master syncronized "
+ "with QUARTZ\n", hc->id, hc);
+ if (hc->type == 1) {
+ /* Use the crystal clock for the PCM
+ master card */
+ if (debug & DEBUG_HFCMULTI_PLXSD)
+ printk(KERN_DEBUG
+ "Schedule QUARTZ for HFC-E1\n");
+ hc->e1_resync |= 4; /* switch quartz */
+ } else {
+ if (debug & DEBUG_HFCMULTI_PLXSD)
+ printk(KERN_DEBUG
+ "QUARTZ is automatically "
+ "enabled by HFC-%dS\n", hc->type);
+ }
+ plx_acc_32 = (u_int *)(hc->plx_membase+PLX_GPIOC);
+ pv = readl(plx_acc_32);
+ pv |= PLX_SYNC_O_EN;
+ writel(pv, plx_acc_32);
+ } else
+ if (!rm)
+ printk(KERN_ERR "%s no pcm master, this MUST "
+ "not happen!\n", __func__);
+ }
+ syncmaster = newmaster;
+
+ spin_unlock(&plx_lock);
+ spin_unlock_irqrestore(&HFClock, flags);
+}
+
+/* This must be called AND hc must be locked irqsave!!! */
+inline void
+plxsd_checksync(struct hfc_multi *hc, int rm)
+{
+ if (hc->syncronized) {
+ if (syncmaster == NULL) {
+ if (debug & DEBUG_HFCMULTI_PLXSD)
+ printk(KERN_WARNING "%s: GOT sync on card %d"
+ " (id=%d)\n", __func__, hc->id + 1,
+ hc->id);
+ hfcmulti_resync(hc, hc, rm);
+ }
+ } else {
+ if (syncmaster == hc) {
+ if (debug & DEBUG_HFCMULTI_PLXSD)
+ printk(KERN_WARNING "%s: LOST sync on card %d"
+ " (id=%d)\n", __func__, hc->id + 1,
+ hc->id);
+ hfcmulti_resync(hc, NULL, rm);
+ }
+ }
+}
+
+
+/*
+ * free hardware resources used by driver
+ */
+static void
+release_io_hfcmulti(struct hfc_multi *hc)
+{
+ u_int *plx_acc_32, pv;
+ u_long plx_flags;
+
+ if (debug & DEBUG_HFCMULTI_INIT)
+ printk(KERN_DEBUG "%s: entered\n", __func__);
+
+ /* soft reset also masks all interrupts */
+ hc->hw.r_cirm |= V_SRES;
+ HFC_outb(hc, R_CIRM, hc->hw.r_cirm);
+ udelay(1000);
+ hc->hw.r_cirm &= ~V_SRES;
+ HFC_outb(hc, R_CIRM, hc->hw.r_cirm);
+ udelay(1000); /* instead of 'wait' that may cause locking */
+
+ /* release Speech Design card, if PLX was initialized */
+ if (test_bit(HFC_CHIP_PLXSD, &hc->chip) && hc->plx_membase) {
+ if (debug & DEBUG_HFCMULTI_PLXSD)
+ printk(KERN_DEBUG "%s: release PLXSD card %d\n",
+ __func__, hc->id + 1);
+ spin_lock_irqsave(&plx_lock, plx_flags);
+ plx_acc_32 = (u_int *)(hc->plx_membase+PLX_GPIOC);
+ writel(PLX_GPIOC_INIT, plx_acc_32);
+ pv = readl(plx_acc_32);
+ /* Termination off */
+ pv &= ~PLX_TERM_ON;
+ /* Disconnect the PCM */
+ pv |= PLX_SLAVE_EN_N;
+ pv &= ~PLX_MASTER_EN;
+ pv &= ~PLX_SYNC_O_EN;
+ /* Put the DSP in Reset */
+ pv &= ~PLX_DSP_RES_N;
+ writel(pv, plx_acc_32);
+ if (debug & DEBUG_HFCMULTI_INIT)
+ printk(KERN_WARNING "%s: PCM off: PLX_GPIO=%x\n",
+ __func__, pv);
+ spin_unlock_irqrestore(&plx_lock, plx_flags);
+ }
+
+ /* disable memory mapped ports / io ports */
+ test_and_clear_bit(HFC_CHIP_PLXSD, &hc->chip); /* prevent resync */
+ pci_write_config_word(hc->pci_dev, PCI_COMMAND, 0);
+ if (hc->pci_membase)
+ iounmap((void *)hc->pci_membase);
+ if (hc->plx_membase)
+ iounmap((void *)hc->plx_membase);
+ if (hc->pci_iobase)
+ release_region(hc->pci_iobase, 8);
+
+ if (hc->pci_dev) {
+ pci_disable_device(hc->pci_dev);
+ pci_set_drvdata(hc->pci_dev, NULL);
+ }
+ if (debug & DEBUG_HFCMULTI_INIT)
+ printk(KERN_DEBUG "%s: done\n", __func__);
+}
+
+/*
+ * function called to reset the HFC chip. A complete software reset of chip
+ * and fifos is done. All configuration of the chip is done.
+ */
+
+static int
+init_chip(struct hfc_multi *hc)
+{
+ u_long flags, val, val2 = 0, rev;
+ int i, err = 0;
+ u_char r_conf_en, rval;
+ u_int *plx_acc_32, pv;
+ u_long plx_flags, hfc_flags;
+ int plx_count;
+ struct hfc_multi *pos, *next, *plx_last_hc;
+
+ spin_lock_irqsave(&hc->lock, flags);
+ /* reset all registers */
+ memset(&hc->hw, 0, sizeof(struct hfcm_hw));
+
+ /* revision check */
+ if (debug & DEBUG_HFCMULTI_INIT)
+ printk(KERN_DEBUG "%s: entered\n", __func__);
+ val = HFC_inb(hc, R_CHIP_ID)>>4;
+ if (val != 0x8 && val != 0xc && val != 0xe) {
+ printk(KERN_INFO "HFC_multi: unknown CHIP_ID:%x\n", (u_int)val);
+ err = -EIO;
+ goto out;
+ }
+ rev = HFC_inb(hc, R_CHIP_RV);
+ printk(KERN_INFO
+ "HFC_multi: detected HFC with chip ID=0x%lx revision=%ld%s\n",
+ val, rev, (rev == 0) ? " (old FIFO handling)" : "");
+ if (rev == 0) {
+ test_and_set_bit(HFC_CHIP_REVISION0, &hc->chip);
+ printk(KERN_WARNING
+ "HFC_multi: NOTE: Your chip is revision 0, "
+ "ask Cologne Chip for update. Newer chips "
+ "have a better FIFO handling. Old chips "
+ "still work but may have slightly lower "
+ "HDLC transmit performance.\n");
+ }
+ if (rev > 1) {
+ printk(KERN_WARNING "HFC_multi: WARNING: This driver doesn't "
+ "consider chip revision = %ld. The chip / "
+ "bridge may not work.\n", rev);
+ }
+
+ /* set s-ram size */
+ hc->Flen = 0x10;
+ hc->Zmin = 0x80;
+ hc->Zlen = 384;
+ hc->DTMFbase = 0x1000;
+ if (test_bit(HFC_CHIP_EXRAM_128, &hc->chip)) {
+ if (debug & DEBUG_HFCMULTI_INIT)
+ printk(KERN_DEBUG "%s: changing to 128K extenal RAM\n",
+ __func__);
+ hc->hw.r_ctrl |= V_EXT_RAM;
+ hc->hw.r_ram_sz = 1;
+ hc->Flen = 0x20;
+ hc->Zmin = 0xc0;
+ hc->Zlen = 1856;
+ hc->DTMFbase = 0x2000;
+ }
+ if (test_bit(HFC_CHIP_EXRAM_512, &hc->chip)) {
+ if (debug & DEBUG_HFCMULTI_INIT)
+ printk(KERN_DEBUG "%s: changing to 512K extenal RAM\n",
+ __func__);
+ hc->hw.r_ctrl |= V_EXT_RAM;
+ hc->hw.r_ram_sz = 2;
+ hc->Flen = 0x20;
+ hc->Zmin = 0xc0;
+ hc->Zlen = 8000;
+ hc->DTMFbase = 0x2000;
+ }
+ hc->max_trans = poll << 1;
+ if (hc->max_trans > hc->Zlen)
+ hc->max_trans = hc->Zlen;
+
+ /* Speech Design PLX bridge */
+ if (test_bit(HFC_CHIP_PLXSD, &hc->chip)) {
+ if (debug & DEBUG_HFCMULTI_PLXSD)
+ printk(KERN_DEBUG "%s: initializing PLXSD card %d\n",
+ __func__, hc->id + 1);
+ spin_lock_irqsave(&plx_lock, plx_flags);
+ plx_acc_32 = (u_int *)(hc->plx_membase+PLX_GPIOC);
+ writel(PLX_GPIOC_INIT, plx_acc_32);
+ pv = readl(plx_acc_32);
+ /* The first and the last cards are terminating the PCM bus */
+ pv |= PLX_TERM_ON; /* hc is currently the last */
+ /* Disconnect the PCM */
+ pv |= PLX_SLAVE_EN_N;
+ pv &= ~PLX_MASTER_EN;
+ pv &= ~PLX_SYNC_O_EN;
+ /* Put the DSP in Reset */
+ pv &= ~PLX_DSP_RES_N;
+ writel(pv, plx_acc_32);
+ spin_unlock_irqrestore(&plx_lock, plx_flags);
+ if (debug & DEBUG_HFCMULTI_INIT)
+ printk(KERN_WARNING "%s: slave/term: PLX_GPIO=%x\n",
+ __func__, pv);
+ /*
+ * If we are the 3rd PLXSD card or higher, we must turn
+ * termination of last PLXSD card off.
+ */
+ spin_lock_irqsave(&HFClock, hfc_flags);
+ plx_count = 0;
+ plx_last_hc = NULL;
+ list_for_each_entry_safe(pos, next, &HFClist, list) {
+ if (test_bit(HFC_CHIP_PLXSD, &pos->chip)) {
+ plx_count++;
+ if (pos != hc)
+ plx_last_hc = pos;
+ }
+ }
+ if (plx_count >= 3) {
+ if (debug & DEBUG_HFCMULTI_PLXSD)
+ printk(KERN_DEBUG "%s: card %d is between, so "
+ "we disable termination\n",
+ __func__, plx_last_hc->id + 1);
+ spin_lock_irqsave(&plx_lock, plx_flags);
+ plx_acc_32 = (u_int *)(plx_last_hc->plx_membase
+ + PLX_GPIOC);
+ pv = readl(plx_acc_32);
+ pv &= ~PLX_TERM_ON;
+ writel(pv, plx_acc_32);
+ spin_unlock_irqrestore(&plx_lock, plx_flags);
+ if (debug & DEBUG_HFCMULTI_INIT)
+ printk(KERN_WARNING "%s: term off: PLX_GPIO=%x\n",
+ __func__, pv);
+ }
+ spin_unlock_irqrestore(&HFClock, hfc_flags);
+ hc->hw.r_pcm_md0 = V_F0_LEN; /* shift clock for DSP */
+ }
+
+ /* we only want the real Z2 read-pointer for revision > 0 */
+ if (!test_bit(HFC_CHIP_REVISION0, &hc->chip))
+ hc->hw.r_ram_sz |= V_FZ_MD;
+
+ /* select pcm mode */
+ if (test_bit(HFC_CHIP_PCM_SLAVE, &hc->chip)) {
+ if (debug & DEBUG_HFCMULTI_INIT)
+ printk(KERN_DEBUG "%s: setting PCM into slave mode\n",
+ __func__);
+ } else
+ if (test_bit(HFC_CHIP_PCM_MASTER, &hc->chip) && !plxsd_master) {
+ if (debug & DEBUG_HFCMULTI_INIT)
+ printk(KERN_DEBUG "%s: setting PCM into master mode\n",
+ __func__);
+ hc->hw.r_pcm_md0 |= V_PCM_MD;
+ } else {
+ if (debug & DEBUG_HFCMULTI_INIT)
+ printk(KERN_DEBUG "%s: performing PCM auto detect\n",
+ __func__);
+ }
+
+ /* soft reset */
+ HFC_outb(hc, R_CTRL, hc->hw.r_ctrl);
+ HFC_outb(hc, R_RAM_SZ, hc->hw.r_ram_sz);
+ HFC_outb(hc, R_FIFO_MD, 0);
+ hc->hw.r_cirm = V_SRES | V_HFCRES | V_PCMRES | V_STRES | V_RLD_EPR;
+ HFC_outb(hc, R_CIRM, hc->hw.r_cirm);
+ udelay(100);
+ hc->hw.r_cirm = 0;
+ HFC_outb(hc, R_CIRM, hc->hw.r_cirm);
+ udelay(100);
+ HFC_outb(hc, R_RAM_SZ, hc->hw.r_ram_sz);
+
+ /* Speech Design PLX bridge pcm and sync mode */
+ if (test_bit(HFC_CHIP_PLXSD, &hc->chip)) {
+ spin_lock_irqsave(&plx_lock, plx_flags);
+ plx_acc_32 = (u_int *)(hc->plx_membase+PLX_GPIOC);
+ pv = readl(plx_acc_32);
+ /* Connect PCM */
+ if (hc->hw.r_pcm_md0 & V_PCM_MD) {
+ pv |= PLX_MASTER_EN | PLX_SLAVE_EN_N;
+ pv |= PLX_SYNC_O_EN;
+ if (debug & DEBUG_HFCMULTI_INIT)
+ printk(KERN_WARNING "%s: master: PLX_GPIO=%x\n",
+ __func__, pv);
+ } else {
+ pv &= ~(PLX_MASTER_EN | PLX_SLAVE_EN_N);
+ pv &= ~PLX_SYNC_O_EN;
+ if (debug & DEBUG_HFCMULTI_INIT)
+ printk(KERN_WARNING "%s: slave: PLX_GPIO=%x\n",
+ __func__, pv);
+ }
+ writel(pv, plx_acc_32);
+ spin_unlock_irqrestore(&plx_lock, plx_flags);
+ }
+
+ /* PCM setup */
+ HFC_outb(hc, R_PCM_MD0, hc->hw.r_pcm_md0 | 0x90);
+ if (hc->slots == 32)
+ HFC_outb(hc, R_PCM_MD1, 0x00);
+ if (hc->slots == 64)
+ HFC_outb(hc, R_PCM_MD1, 0x10);
+ if (hc->slots == 128)
+ HFC_outb(hc, R_PCM_MD1, 0x20);
+ HFC_outb(hc, R_PCM_MD0, hc->hw.r_pcm_md0 | 0xa0);
+ if (test_bit(HFC_CHIP_PLXSD, &hc->chip))
+ HFC_outb(hc, R_PCM_MD2, V_SYNC_SRC); /* sync via SYNC_I / O */
+ else
+ HFC_outb(hc, R_PCM_MD2, 0x00); /* sync from interface */
+ HFC_outb(hc, R_PCM_MD0, hc->hw.r_pcm_md0 | 0x00);
+ for (i = 0; i < 256; i++) {
+ HFC_outb_nodebug(hc, R_SLOT, i);
+ HFC_outb_nodebug(hc, A_SL_CFG, 0);
+ HFC_outb_nodebug(hc, A_CONF, 0);
+ hc->slot_owner[i] = -1;
+ }
+
+ /* set clock speed */
+ if (test_bit(HFC_CHIP_CLOCK2, &hc->chip)) {
+ if (debug & DEBUG_HFCMULTI_INIT)
+ printk(KERN_DEBUG
+ "%s: setting double clock\n", __func__);
+ HFC_outb(hc, R_BRG_PCM_CFG, V_PCM_CLK);
+ }
+
+ /* B410P GPIO */
+ if (test_bit(HFC_CHIP_B410P, &hc->chip)) {
+ printk(KERN_NOTICE "Setting GPIOs\n");
+ HFC_outb(hc, R_GPIO_SEL, 0x30);
+ HFC_outb(hc, R_GPIO_EN1, 0x3);
+ udelay(1000);
+ printk(KERN_NOTICE "calling vpm_init\n");
+ vpm_init(hc);
+ }
+
+ /* check if R_F0_CNT counts (8 kHz frame count) */
+ val = HFC_inb(hc, R_F0_CNTL);
+ val += HFC_inb(hc, R_F0_CNTH) << 8;
+ if (debug & DEBUG_HFCMULTI_INIT)
+ printk(KERN_DEBUG
+ "HFC_multi F0_CNT %ld after reset\n", val);
+ spin_unlock_irqrestore(&hc->lock, flags);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout((HZ/100)?:1); /* Timeout minimum 10ms */
+ spin_lock_irqsave(&hc->lock, flags);
+ val2 = HFC_inb(hc, R_F0_CNTL);
+ val2 += HFC_inb(hc, R_F0_CNTH) << 8;
+ if (debug & DEBUG_HFCMULTI_INIT)
+ printk(KERN_DEBUG
+ "HFC_multi F0_CNT %ld after 10 ms (1st try)\n",
+ val2);
+ if (val2 >= val+8) { /* 1 ms */
+ /* it counts, so we keep the pcm mode */
+ if (test_bit(HFC_CHIP_PCM_MASTER, &hc->chip))
+ printk(KERN_INFO "controller is PCM bus MASTER\n");
+ else
+ if (test_bit(HFC_CHIP_PCM_SLAVE, &hc->chip))
+ printk(KERN_INFO "controller is PCM bus SLAVE\n");
+ else {
+ test_and_set_bit(HFC_CHIP_PCM_SLAVE, &hc->chip);
+ printk(KERN_INFO "controller is PCM bus SLAVE "
+ "(auto detected)\n");
+ }
+ } else {
+ /* does not count */
+ if (test_bit(HFC_CHIP_PCM_MASTER, &hc->chip)) {
+controller_fail:
+ printk(KERN_ERR "HFC_multi ERROR, getting no 125us "
+ "pulse. Seems that controller fails.\n");
+ err = -EIO;
+ goto out;
+ }
+ if (test_bit(HFC_CHIP_PCM_SLAVE, &hc->chip)) {
+ printk(KERN_INFO "controller is PCM bus SLAVE "
+ "(ignoring missing PCM clock)\n");
+ } else {
+ /* only one pcm master */
+ if (test_bit(HFC_CHIP_PLXSD, &hc->chip)
+ && plxsd_master) {
+ printk(KERN_ERR "HFC_multi ERROR, no clock "
+ "on another Speech Design card found. "
+ "Please be sure to connect PCM cable.\n");
+ err = -EIO;
+ goto out;
+ }
+ /* retry with master clock */
+ if (test_bit(HFC_CHIP_PLXSD, &hc->chip)) {
+ spin_lock_irqsave(&plx_lock, plx_flags);
+ plx_acc_32 = (u_int *)(hc->plx_membase +
+ PLX_GPIOC);
+ pv = readl(plx_acc_32);
+ pv |= PLX_MASTER_EN | PLX_SLAVE_EN_N;
+ pv |= PLX_SYNC_O_EN;
+ writel(pv, plx_acc_32);
+ spin_unlock_irqrestore(&plx_lock, plx_flags);
+ if (debug & DEBUG_HFCMULTI_INIT)
+ printk(KERN_WARNING "%s: master: PLX_GPIO"
+ "=%x\n", __func__, pv);
+ }
+ hc->hw.r_pcm_md0 |= V_PCM_MD;
+ HFC_outb(hc, R_PCM_MD0, hc->hw.r_pcm_md0 | 0x00);
+ spin_unlock_irqrestore(&hc->lock, flags);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout((HZ/100)?:1); /* Timeout min. 10ms */
+ spin_lock_irqsave(&hc->lock, flags);
+ val2 = HFC_inb(hc, R_F0_CNTL);
+ val2 += HFC_inb(hc, R_F0_CNTH) << 8;
+ if (debug & DEBUG_HFCMULTI_INIT)
+ printk(KERN_DEBUG "HFC_multi F0_CNT %ld after "
+ "10 ms (2nd try)\n", val2);
+ if (val2 >= val+8) { /* 1 ms */
+ test_and_set_bit(HFC_CHIP_PCM_MASTER,
+ &hc->chip);
+ printk(KERN_INFO "controller is PCM bus MASTER "
+ "(auto detected)\n");
+ } else
+ goto controller_fail;
+ }
+ }
+
+ /* Release the DSP Reset */
+ if (test_bit(HFC_CHIP_PLXSD, &hc->chip)) {
+ if (test_bit(HFC_CHIP_PCM_MASTER, &hc->chip))
+ plxsd_master = 1;
+ spin_lock_irqsave(&plx_lock, plx_flags);
+ plx_acc_32 = (u_int *)(hc->plx_membase+PLX_GPIOC);
+ pv = readl(plx_acc_32);
+ pv |= PLX_DSP_RES_N;
+ writel(pv, plx_acc_32);
+ spin_unlock_irqrestore(&plx_lock, plx_flags);
+ if (debug & DEBUG_HFCMULTI_INIT)
+ printk(KERN_WARNING "%s: reset off: PLX_GPIO=%x\n",
+ __func__, pv);
+ }
+
+ /* pcm id */
+ if (hc->pcm)
+ printk(KERN_INFO "controller has given PCM BUS ID %d\n",
+ hc->pcm);
+ else {
+ if (test_bit(HFC_CHIP_PCM_MASTER, &hc->chip)
+ || test_bit(HFC_CHIP_PLXSD, &hc->chip)) {
+ PCM_cnt++; /* SD has proprietary bridging */
+ }
+ hc->pcm = PCM_cnt;
+ printk(KERN_INFO "controller has PCM BUS ID %d "
+ "(auto selected)\n", hc->pcm);
+ }
+
+ /* set up timer */
+ HFC_outb(hc, R_TI_WD, poll_timer);
+ hc->hw.r_irqmsk_misc |= V_TI_IRQMSK;
+
+ /*
+ * set up 125us interrupt, only if function pointer is available
+ * and module parameter timer is set
+ */
+ if (timer && hfc_interrupt && register_interrupt) {
+ /* only one chip should use this interrupt */
+ timer = 0;
+ interrupt_registered = 1;
+ hc->hw.r_irqmsk_misc |= V_PROC_IRQMSK;
+ /* deactivate other interrupts in ztdummy */
+ register_interrupt();
+ }
+
+ /* set E1 state machine IRQ */
+ if (hc->type == 1)
+ hc->hw.r_irqmsk_misc |= V_STA_IRQMSK;
+
+ /* set DTMF detection */
+ if (test_bit(HFC_CHIP_DTMF, &hc->chip)) {
+ if (debug & DEBUG_HFCMULTI_INIT)
+ printk(KERN_DEBUG "%s: enabling DTMF detection "
+ "for all B-channel\n", __func__);
+ hc->hw.r_dtmf = V_DTMF_EN | V_DTMF_STOP;
+ if (test_bit(HFC_CHIP_ULAW, &hc->chip))
+ hc->hw.r_dtmf |= V_ULAW_SEL;
+ HFC_outb(hc, R_DTMF_N, 102 - 1);
+ hc->hw.r_irqmsk_misc |= V_DTMF_IRQMSK;
+ }
+
+ /* conference engine */
+ if (test_bit(HFC_CHIP_ULAW, &hc->chip))
+ r_conf_en = V_CONF_EN | V_ULAW;
+ else
+ r_conf_en = V_CONF_EN;
+ HFC_outb(hc, R_CONF_EN, r_conf_en);
+
+ /* setting leds */
+ switch (hc->leds) {
+ case 1: /* HFC-E1 OEM */
+ if (test_bit(HFC_CHIP_WATCHDOG, &hc->chip))
+ HFC_outb(hc, R_GPIO_SEL, 0x32);
+ else
+ HFC_outb(hc, R_GPIO_SEL, 0x30);
+
+ HFC_outb(hc, R_GPIO_EN1, 0x0f);
+ HFC_outb(hc, R_GPIO_OUT1, 0x00);
+
+ HFC_outb(hc, R_GPIO_EN0, V_GPIO_EN2 | V_GPIO_EN3);
+ break;
+
+ case 2: /* HFC-4S OEM */
+ case 3:
+ HFC_outb(hc, R_GPIO_SEL, 0xf0);
+ HFC_outb(hc, R_GPIO_EN1, 0xff);
+ HFC_outb(hc, R_GPIO_OUT1, 0x00);
+ break;
+ }
+
+ /* set master clock */
+ if (hc->masterclk >= 0) {
+ if (debug & DEBUG_HFCMULTI_INIT)
+ printk(KERN_DEBUG "%s: setting ST master clock "
+ "to port %d (0..%d)\n",
+ __func__, hc->masterclk, hc->ports-1);
+ hc->hw.r_st_sync = hc->masterclk | V_AUTO_SYNC;
+ HFC_outb(hc, R_ST_SYNC, hc->hw.r_st_sync);
+ }
+
+ /* setting misc irq */
+ HFC_outb(hc, R_IRQMSK_MISC, hc->hw.r_irqmsk_misc);
+ if (debug & DEBUG_HFCMULTI_INIT)
+ printk(KERN_DEBUG "r_irqmsk_misc.2: 0x%x\n",
+ hc->hw.r_irqmsk_misc);
+
+ /* RAM access test */
+ HFC_outb(hc, R_RAM_ADDR0, 0);
+ HFC_outb(hc, R_RAM_ADDR1, 0);
+ HFC_outb(hc, R_RAM_ADDR2, 0);
+ for (i = 0; i < 256; i++) {
+ HFC_outb_nodebug(hc, R_RAM_ADDR0, i);
+ HFC_outb_nodebug(hc, R_RAM_DATA, ((i*3)&0xff));
+ }
+ for (i = 0; i < 256; i++) {
+ HFC_outb_nodebug(hc, R_RAM_ADDR0, i);
+ HFC_inb_nodebug(hc, R_RAM_DATA);
+ rval = HFC_inb_nodebug(hc, R_INT_DATA);
+ if (rval != ((i * 3) & 0xff)) {
+ printk(KERN_DEBUG
+ "addr:%x val:%x should:%x\n", i, rval,
+ (i * 3) & 0xff);
+ err++;
+ }
+ }
+ if (err) {
+ printk(KERN_DEBUG "aborting - %d RAM access errors\n", err);
+ err = -EIO;
+ goto out;
+ }
+
+ if (debug & DEBUG_HFCMULTI_INIT)
+ printk(KERN_DEBUG "%s: done\n", __func__);
+out:
+ spin_unlock_irqrestore(&hc->lock, flags);
+ return err;
+}
+
+
+/*
+ * control the watchdog
+ */
+static void
+hfcmulti_watchdog(struct hfc_multi *hc)
+{
+ hc->wdcount++;
+
+ if (hc->wdcount > 10) {
+ hc->wdcount = 0;
+ hc->wdbyte = hc->wdbyte == V_GPIO_OUT2 ?
+ V_GPIO_OUT3 : V_GPIO_OUT2;
+
+ /* printk("Sending Watchdog Kill %x\n",hc->wdbyte); */
+ HFC_outb(hc, R_GPIO_EN0, V_GPIO_EN2 | V_GPIO_EN3);
+ HFC_outb(hc, R_GPIO_OUT0, hc->wdbyte);
+ }
+}
+
+
+
+/*
+ * output leds
+ */
+static void
+hfcmulti_leds(struct hfc_multi *hc)
+{
+ unsigned long lled;
+ unsigned long leddw;
+ int i, state, active, leds;
+ struct dchannel *dch;
+ int led[4];
+
+ hc->ledcount += poll;
+ if (hc->ledcount > 4096) {
+ hc->ledcount -= 4096;
+ hc->ledstate = 0xAFFEAFFE;
+ }
+
+ switch (hc->leds) {
+ case 1: /* HFC-E1 OEM */
+ /* 2 red blinking: NT mode deactivate
+ * 2 red steady: TE mode deactivate
+ * left green: L1 active
+ * left red: frame sync, but no L1
+ * right green: L2 active
+ */
+ if (hc->chan[hc->dslot].sync != 2) { /* no frame sync */
+ if (hc->chan[hc->dslot].dch->dev.D.protocol
+ != ISDN_P_NT_E1) {
+ led[0] = 1;
+ led[1] = 1;
+ } else if (hc->ledcount>>11) {
+ led[0] = 1;
+ led[1] = 1;
+ } else {
+ led[0] = 0;
+ led[1] = 0;
+ }
+ led[2] = 0;
+ led[3] = 0;
+ } else { /* with frame sync */
+ /* TODO make it work */
+ led[0] = 0;
+ led[1] = 0;
+ led[2] = 0;
+ led[3] = 1;
+ }
+ leds = (led[0] | (led[1]<<2) | (led[2]<<1) | (led[3]<<3))^0xF;
+ /* leds are inverted */
+ if (leds != (int)hc->ledstate) {
+ HFC_outb_nodebug(hc, R_GPIO_OUT1, leds);
+ hc->ledstate = leds;
+ }
+ break;
+
+ case 2: /* HFC-4S OEM */
+ /* red blinking = PH_DEACTIVATE NT Mode
+ * red steady = PH_DEACTIVATE TE Mode
+ * green steady = PH_ACTIVATE
+ */
+ for (i = 0; i < 4; i++) {
+ state = 0;
+ active = -1;
+ dch = hc->chan[(i << 2) | 2].dch;
+ if (dch) {
+ state = dch->state;
+ if (dch->dev.D.protocol == ISDN_P_NT_S0)
+ active = 3;
+ else
+ active = 7;
+ }
+ if (state) {
+ if (state == active) {
+ led[i] = 1; /* led green */
+ } else
+ if (dch->dev.D.protocol == ISDN_P_TE_S0)
+ /* TE mode: led red */
+ led[i] = 2;
+ else
+ if (hc->ledcount>>11)
+ /* led red */
+ led[i] = 2;
+ else
+ /* led off */
+ led[i] = 0;
+ } else
+ led[i] = 0; /* led off */
+ }
+ if (test_bit(HFC_CHIP_B410P, &hc->chip)) {
+ leds = 0;
+ for (i = 0; i < 4; i++) {
+ if (led[i] == 1) {
+ /*green*/
+ leds |= (0x2 << (i * 2));
+ } else if (led[i] == 2) {
+ /*red*/
+ leds |= (0x1 << (i * 2));
+ }
+ }
+ if (leds != (int)hc->ledstate) {
+ vpm_out(hc, 0, 0x1a8 + 3, leds);
+ hc->ledstate = leds;
+ }
+ } else {
+ leds = ((led[3] > 0) << 0) | ((led[1] > 0) << 1) |
+ ((led[0] > 0) << 2) | ((led[2] > 0) << 3) |
+ ((led[3] & 1) << 4) | ((led[1] & 1) << 5) |
+ ((led[0] & 1) << 6) | ((led[2] & 1) << 7);
+ if (leds != (int)hc->ledstate) {
+ HFC_outb_nodebug(hc, R_GPIO_EN1, leds & 0x0F);
+ HFC_outb_nodebug(hc, R_GPIO_OUT1, leds >> 4);
+ hc->ledstate = leds;
+ }
+ }
+ break;
+
+ case 3: /* HFC 1S/2S Beronet */
+ /* red blinking = PH_DEACTIVATE NT Mode
+ * red steady = PH_DEACTIVATE TE Mode
+ * green steady = PH_ACTIVATE
+ */
+ for (i = 0; i < 2; i++) {
+ state = 0;
+ active = -1;
+ dch = hc->chan[(i << 2) | 2].dch;
+ if (dch) {
+ state = dch->state;
+ if (dch->dev.D.protocol == ISDN_P_NT_S0)
+ active = 3;
+ else
+ active = 7;
+ }
+ if (state) {
+ if (state == active) {
+ led[i] = 1; /* led green */
+ } else
+ if (dch->dev.D.protocol == ISDN_P_TE_S0)
+ /* TE mode: led red */
+ led[i] = 2;
+ else
+ if (hc->ledcount >> 11)
+ /* led red */
+ led[i] = 2;
+ else
+ /* led off */
+ led[i] = 0;
+ } else
+ led[i] = 0; /* led off */
+ }
+
+
+ leds = (led[0] > 0) | ((led[1] > 0)<<1) | ((led[0]&1)<<2)
+ | ((led[1]&1)<<3);
+ if (leds != (int)hc->ledstate) {
+ HFC_outb_nodebug(hc, R_GPIO_EN1,
+ ((led[0] > 0) << 2) | ((led[1] > 0) << 3));
+ HFC_outb_nodebug(hc, R_GPIO_OUT1,
+ ((led[0] & 1) << 2) | ((led[1] & 1) << 3));
+ hc->ledstate = leds;
+ }
+ break;
+ case 8: /* HFC 8S+ Beronet */
+ lled = 0;
+
+ for (i = 0; i < 8; i++) {
+ state = 0;
+ active = -1;
+ dch = hc->chan[(i << 2) | 2].dch;
+ if (dch) {
+ state = dch->state;
+ if (dch->dev.D.protocol == ISDN_P_NT_S0)
+ active = 3;
+ else
+ active = 7;
+ }
+ if (state) {
+ if (state == active) {
+ lled |= 0 << i;
+ } else
+ if (hc->ledcount >> 11)
+ lled |= 0 << i;
+ else
+ lled |= 1 << i;
+ } else
+ lled |= 1 << i;
+ }
+ leddw = lled << 24 | lled << 16 | lled << 8 | lled;
+ if (leddw != hc->ledstate) {
+ /* HFC_outb(hc, R_BRG_PCM_CFG, 1);
+ HFC_outb(c, R_BRG_PCM_CFG, (0x0 << 6) | 0x3); */
+ /* was _io before */
+ HFC_outb_nodebug(hc, R_BRG_PCM_CFG, 1 | V_PCM_CLK);
+ outw(0x4000, hc->pci_iobase + 4);
+ outl(leddw, hc->pci_iobase);
+ HFC_outb_nodebug(hc, R_BRG_PCM_CFG, V_PCM_CLK);
+ hc->ledstate = leddw;
+ }
+ break;
+ }
+}
+/*
+ * read dtmf coefficients
+ */
+
+static void
+hfcmulti_dtmf(struct hfc_multi *hc)
+{
+ s32 *coeff;
+ u_int mantissa;
+ int co, ch;
+ struct bchannel *bch = NULL;
+ u8 exponent;
+ int dtmf = 0;
+ int addr;
+ u16 w_float;
+ struct sk_buff *skb;
+ struct mISDNhead *hh;
+
+ if (debug & DEBUG_HFCMULTI_DTMF)
+ printk(KERN_DEBUG "%s: dtmf detection irq\n", __func__);
+ for (ch = 0; ch <= 31; ch++) {
+ /* only process enabled B-channels */
+ bch = hc->chan[ch].bch;
+ if (!bch)
+ continue;
+ if (!hc->created[hc->chan[ch].port])
+ continue;
+ if (!test_bit(FLG_TRANSPARENT, &bch->Flags))
+ continue;
+ if (debug & DEBUG_HFCMULTI_DTMF)
+ printk(KERN_DEBUG "%s: dtmf channel %d:",
+ __func__, ch);
+ coeff = &(hc->chan[ch].coeff[hc->chan[ch].coeff_count * 16]);
+ dtmf = 1;
+ for (co = 0; co < 8; co++) {
+ /* read W(n-1) coefficient */
+ addr = hc->DTMFbase + ((co<<7) | (ch<<2));
+ HFC_outb_nodebug(hc, R_RAM_ADDR0, addr);
+ HFC_outb_nodebug(hc, R_RAM_ADDR1, addr>>8);
+ HFC_outb_nodebug(hc, R_RAM_ADDR2, (addr>>16)
+ | V_ADDR_INC);
+ w_float = HFC_inb_nodebug(hc, R_RAM_DATA);
+ w_float |= (HFC_inb_nodebug(hc, R_RAM_DATA) << 8);
+ if (debug & DEBUG_HFCMULTI_DTMF)
+ printk(" %04x", w_float);
+
+ /* decode float (see chip doc) */
+ mantissa = w_float & 0x0fff;
+ if (w_float & 0x8000)
+ mantissa |= 0xfffff000;
+ exponent = (w_float>>12) & 0x7;
+ if (exponent) {
+ mantissa ^= 0x1000;
+ mantissa <<= (exponent-1);
+ }
+
+ /* store coefficient */
+ coeff[co<<1] = mantissa;
+
+ /* read W(n) coefficient */
+ w_float = HFC_inb_nodebug(hc, R_RAM_DATA);
+ w_float |= (HFC_inb_nodebug(hc, R_RAM_DATA) << 8);
+ if (debug & DEBUG_HFCMULTI_DTMF)
+ printk(" %04x", w_float);
+
+ /* decode float (see chip doc) */
+ mantissa = w_float & 0x0fff;
+ if (w_float & 0x8000)
+ mantissa |= 0xfffff000;
+ exponent = (w_float>>12) & 0x7;
+ if (exponent) {
+ mantissa ^= 0x1000;
+ mantissa <<= (exponent-1);
+ }
+
+ /* store coefficient */
+ coeff[(co<<1)|1] = mantissa;
+ }
+ if (debug & DEBUG_HFCMULTI_DTMF)
+ printk("%s: DTMF ready %08x %08x %08x %08x "
+ "%08x %08x %08x %08x\n", __func__,
+ coeff[0], coeff[1], coeff[2], coeff[3],
+ coeff[4], coeff[5], coeff[6], coeff[7]);
+ hc->chan[ch].coeff_count++;
+ if (hc->chan[ch].coeff_count == 8) {
+ hc->chan[ch].coeff_count = 0;
+ skb = mI_alloc_skb(512, GFP_ATOMIC);
+ if (!skb) {
+ printk(KERN_WARNING "%s: No memory for skb\n",
+ __func__);
+ continue;
+ }
+ hh = mISDN_HEAD_P(skb);
+ hh->prim = PH_CONTROL_IND;
+ hh->id = DTMF_HFC_COEF;
+ memcpy(skb_put(skb, 512), hc->chan[ch].coeff, 512);
+ recv_Bchannel_skb(bch, skb);
+ }
+ }
+
+ /* restart DTMF processing */
+ hc->dtmf = dtmf;
+ if (dtmf)
+ HFC_outb_nodebug(hc, R_DTMF, hc->hw.r_dtmf | V_RST_DTMF);
+}
+
+
+/*
+ * fill fifo as much as possible
+ */
+
+static void
+hfcmulti_tx(struct hfc_multi *hc, int ch)
+{
+ int i, ii, temp, len = 0;
+ int Zspace, z1, z2; /* must be int for calculation */
+ int Fspace, f1, f2;
+ u_char *d;
+ int *txpending, slot_tx;
+ struct bchannel *bch;
+ struct dchannel *dch;
+ struct sk_buff **sp = NULL;
+ int *idxp;
+
+ bch = hc->chan[ch].bch;
+ dch = hc->chan[ch].dch;
+ if ((!dch) && (!bch))
+ return;
+
+ txpending = &hc->chan[ch].txpending;
+ slot_tx = hc->chan[ch].slot_tx;
+ if (dch) {
+ if (!test_bit(FLG_ACTIVE, &dch->Flags))
+ return;
+ sp = &dch->tx_skb;
+ idxp = &dch->tx_idx;
+ } else {
+ if (!test_bit(FLG_ACTIVE, &bch->Flags))
+ return;
+ sp = &bch->tx_skb;
+ idxp = &bch->tx_idx;
+ }
+ if (*sp)
+ len = (*sp)->len;
+
+ if ((!len) && *txpending != 1)
+ return; /* no data */
+
+ if (test_bit(HFC_CHIP_B410P, &hc->chip) &&
+ (hc->chan[ch].protocol == ISDN_P_B_RAW) &&
+ (hc->chan[ch].slot_rx < 0) &&
+ (hc->chan[ch].slot_tx < 0))
+ HFC_outb_nodebug(hc, R_FIFO, 0x20 | (ch << 1));
+ else
+ HFC_outb_nodebug(hc, R_FIFO, ch << 1);
+ HFC_wait_nodebug(hc);
+
+ if (*txpending == 2) {
+ /* reset fifo */
+ HFC_outb_nodebug(hc, R_INC_RES_FIFO, V_RES_F);
+ HFC_wait_nodebug(hc);
+ HFC_outb(hc, A_SUBCH_CFG, 0);
+ *txpending = 1;
+ }
+next_frame:
+ if (dch || test_bit(FLG_HDLC, &bch->Flags)) {
+ f1 = HFC_inb_nodebug(hc, A_F1);
+ f2 = HFC_inb_nodebug(hc, A_F2);
+ while (f2 != (temp = HFC_inb_nodebug(hc, A_F2))) {
+ if (debug & DEBUG_HFCMULTI_FIFO)
+ printk(KERN_DEBUG
+ "%s(card %d): reread f2 because %d!=%d\n",
+ __func__, hc->id + 1, temp, f2);
+ f2 = temp; /* repeat until F2 is equal */
+ }
+ Fspace = f2 - f1 - 1;
+ if (Fspace < 0)
+ Fspace += hc->Flen;
+ /*
+ * Old FIFO handling doesn't give us the current Z2 read
+ * pointer, so we cannot send the next frame before the fifo
+ * is empty. It makes no difference except for a slightly
+ * lower performance.
+ */
+ if (test_bit(HFC_CHIP_REVISION0, &hc->chip)) {
+ if (f1 != f2)
+ Fspace = 0;
+ else
+ Fspace = 1;
+ }
+ /* one frame only for ST D-channels, to allow resending */
+ if (hc->type != 1 && dch) {
+ if (f1 != f2)
+ Fspace = 0;
+ }
+ /* F-counter full condition */
+ if (Fspace == 0)
+ return;
+ }
+ z1 = HFC_inw_nodebug(hc, A_Z1) - hc->Zmin;
+ z2 = HFC_inw_nodebug(hc, A_Z2) - hc->Zmin;
+ while (z2 != (temp = (HFC_inw_nodebug(hc, A_Z2) - hc->Zmin))) {
+ if (debug & DEBUG_HFCMULTI_FIFO)
+ printk(KERN_DEBUG "%s(card %d): reread z2 because "
+ "%d!=%d\n", __func__, hc->id + 1, temp, z2);
+ z2 = temp; /* repeat unti Z2 is equal */
+ }
+ Zspace = z2 - z1;
+ if (Zspace <= 0)
+ Zspace += hc->Zlen;
+ Zspace -= 4; /* keep not too full, so pointers will not overrun */
+ /* fill transparent data only to maxinum transparent load (minus 4) */
+ if (bch && test_bit(FLG_TRANSPARENT, &bch->Flags))
+ Zspace = Zspace - hc->Zlen + hc->max_trans;
+ if (Zspace <= 0) /* no space of 4 bytes */
+ return;
+
+ /* if no data */
+ if (!len) {
+ if (z1 == z2) { /* empty */
+ /* if done with FIFO audio data during PCM connection */
+ if (bch && (!test_bit(FLG_HDLC, &bch->Flags)) &&
+ *txpending && slot_tx >= 0) {
+ if (debug & DEBUG_HFCMULTI_MODE)
+ printk(KERN_DEBUG
+ "%s: reconnecting PCM due to no "
+ "more FIFO data: channel %d "
+ "slot_tx %d\n",
+ __func__, ch, slot_tx);
+ /* connect slot */
+ HFC_outb(hc, A_CON_HDLC, 0xc0 | 0x00 |
+ V_HDLC_TRP | V_IFF);
+ HFC_outb_nodebug(hc, R_FIFO, ch<<1 | 1);
+ HFC_wait_nodebug(hc);
+ HFC_outb(hc, A_CON_HDLC, 0xc0 | 0x00 |
+ V_HDLC_TRP | V_IFF);
+ HFC_outb_nodebug(hc, R_FIFO, ch<<1);
+ HFC_wait_nodebug(hc);
+ }
+ *txpending = 0;
+ }
+ return; /* no data */
+ }
+
+ /* if audio data and connected slot */
+ if (bch && (!test_bit(FLG_HDLC, &bch->Flags)) && (!*txpending)
+ && slot_tx >= 0) {
+ if (debug & DEBUG_HFCMULTI_MODE)
+ printk(KERN_DEBUG "%s: disconnecting PCM due to "
+ "FIFO data: channel %d slot_tx %d\n",
+ __func__, ch, slot_tx);
+ /* disconnect slot */
+ HFC_outb(hc, A_CON_HDLC, 0x80 | 0x00 | V_HDLC_TRP | V_IFF);
+ HFC_outb_nodebug(hc, R_FIFO, ch<<1 | 1);
+ HFC_wait_nodebug(hc);
+ HFC_outb(hc, A_CON_HDLC, 0x80 | 0x00 | V_HDLC_TRP | V_IFF);
+ HFC_outb_nodebug(hc, R_FIFO, ch<<1);
+ HFC_wait_nodebug(hc);
+ }
+ *txpending = 1;
+
+ /* show activity */
+ hc->activity[hc->chan[ch].port] = 1;
+
+ /* fill fifo to what we have left */
+ ii = len;
+ if (dch || test_bit(FLG_HDLC, &bch->Flags))
+ temp = 1;
+ else
+ temp = 0;
+ i = *idxp;
+ d = (*sp)->data + i;
+ if (ii - i > Zspace)
+ ii = Zspace + i;
+ if (debug & DEBUG_HFCMULTI_FIFO)
+ printk(KERN_DEBUG "%s(card %d): fifo(%d) has %d bytes space "
+ "left (z1=%04x, z2=%04x) sending %d of %d bytes %s\n",
+ __func__, hc->id + 1, ch, Zspace, z1, z2, ii-i, len-i,
+ temp ? "HDLC":"TRANS");
+
+
+ /* Have to prep the audio data */
+ hc->write_fifo(hc, d, ii - i);
+ *idxp = ii;
+
+ /* if not all data has been written */
+ if (ii != len) {
+ /* NOTE: fifo is started by the calling function */
+ return;
+ }
+
+ /* if all data has been written, terminate frame */
+ if (dch || test_bit(FLG_HDLC, &bch->Flags)) {
+ /* increment f-counter */
+ HFC_outb_nodebug(hc, R_INC_RES_FIFO, V_INC_F);
+ HFC_wait_nodebug(hc);
+ }
+
+ /* send confirm, since get_net_bframe will not do it with trans */
+ if (bch && test_bit(FLG_TRANSPARENT, &bch->Flags))
+ confirm_Bsend(bch);
+
+ /* check for next frame */
+ dev_kfree_skb(*sp);
+ if (bch && get_next_bframe(bch)) { /* hdlc is confirmed here */
+ len = (*sp)->len;
+ goto next_frame;
+ }
+ if (dch && get_next_dframe(dch)) {
+ len = (*sp)->len;
+ goto next_frame;
+ }
+
+ /*
+ * now we have no more data, so in case of transparent,
+ * we set the last byte in fifo to 'silence' in case we will get
+ * no more data at all. this prevents sending an undefined value.
+ */
+ if (bch && test_bit(FLG_TRANSPARENT, &bch->Flags))
+ HFC_outb_nodebug(hc, A_FIFO_DATA0_NOINC, silence);
+}
+
+
+/* NOTE: only called if E1 card is in active state */
+static void
+hfcmulti_rx(struct hfc_multi *hc, int ch)
+{
+ int temp;
+ int Zsize, z1, z2 = 0; /* = 0, to make GCC happy */
+ int f1 = 0, f2 = 0; /* = 0, to make GCC happy */
+ int again = 0;
+ struct bchannel *bch;
+ struct dchannel *dch;
+ struct sk_buff *skb, **sp = NULL;
+ int maxlen;
+
+ bch = hc->chan[ch].bch;
+ dch = hc->chan[ch].dch;
+ if ((!dch) && (!bch))
+ return;
+ if (dch) {
+ if (!test_bit(FLG_ACTIVE, &dch->Flags))
+ return;
+ sp = &dch->rx_skb;
+ maxlen = dch->maxlen;
+ } else {
+ if (!test_bit(FLG_ACTIVE, &bch->Flags))
+ return;
+ sp = &bch->rx_skb;
+ maxlen = bch->maxlen;
+ }
+next_frame:
+ /* on first AND before getting next valid frame, R_FIFO must be written
+ to. */
+ if (test_bit(HFC_CHIP_B410P, &hc->chip) &&
+ (hc->chan[ch].protocol == ISDN_P_B_RAW) &&
+ (hc->chan[ch].slot_rx < 0) &&
+ (hc->chan[ch].slot_tx < 0))
+ HFC_outb_nodebug(hc, R_FIFO, 0x20 | (ch<<1) | 1);
+ else
+ HFC_outb_nodebug(hc, R_FIFO, (ch<<1)|1);
+ HFC_wait_nodebug(hc);
+
+ /* ignore if rx is off BUT change fifo (above) to start pending TX */
+ if (hc->chan[ch].rx_off)
+ return;
+
+ if (dch || test_bit(FLG_HDLC, &bch->Flags)) {
+ f1 = HFC_inb_nodebug(hc, A_F1);
+ while (f1 != (temp = HFC_inb_nodebug(hc, A_F1))) {
+ if (debug & DEBUG_HFCMULTI_FIFO)
+ printk(KERN_DEBUG
+ "%s(card %d): reread f1 because %d!=%d\n",
+ __func__, hc->id + 1, temp, f1);
+ f1 = temp; /* repeat until F1 is equal */
+ }
+ f2 = HFC_inb_nodebug(hc, A_F2);
+ }
+ z1 = HFC_inw_nodebug(hc, A_Z1) - hc->Zmin;
+ while (z1 != (temp = (HFC_inw_nodebug(hc, A_Z1) - hc->Zmin))) {
+ if (debug & DEBUG_HFCMULTI_FIFO)
+ printk(KERN_DEBUG "%s(card %d): reread z2 because "
+ "%d!=%d\n", __func__, hc->id + 1, temp, z2);
+ z1 = temp; /* repeat until Z1 is equal */
+ }
+ z2 = HFC_inw_nodebug(hc, A_Z2) - hc->Zmin;
+ Zsize = z1 - z2;
+ if ((dch || test_bit(FLG_HDLC, &bch->Flags)) && f1 != f2)
+ /* complete hdlc frame */
+ Zsize++;
+ if (Zsize < 0)
+ Zsize += hc->Zlen;
+ /* if buffer is empty */
+ if (Zsize <= 0)
+ return;
+
+ if (*sp == NULL) {
+ *sp = mI_alloc_skb(maxlen + 3, GFP_ATOMIC);
+ if (*sp == NULL) {
+ printk(KERN_DEBUG "%s: No mem for rx_skb\n",
+ __func__);
+ return;
+ }
+ }
+ /* show activity */
+ hc->activity[hc->chan[ch].port] = 1;
+
+ /* empty fifo with what we have */
+ if (dch || test_bit(FLG_HDLC, &bch->Flags)) {
+ if (debug & DEBUG_HFCMULTI_FIFO)
+ printk(KERN_DEBUG "%s(card %d): fifo(%d) reading %d "
+ "bytes (z1=%04x, z2=%04x) HDLC %s (f1=%d, f2=%d) "
+ "got=%d (again %d)\n", __func__, hc->id + 1, ch,
+ Zsize, z1, z2, (f1 == f2) ? "fragment" : "COMPLETE",
+ f1, f2, Zsize + (*sp)->len, again);
+ /* HDLC */
+ if ((Zsize + (*sp)->len) > (maxlen + 3)) {
+ if (debug & DEBUG_HFCMULTI_FIFO)
+ printk(KERN_DEBUG
+ "%s(card %d): hdlc-frame too large.\n",
+ __func__, hc->id + 1);
+ skb_trim(*sp, 0);
+ HFC_outb_nodebug(hc, R_INC_RES_FIFO, V_RES_F);
+ HFC_wait_nodebug(hc);
+ return;
+ }
+
+ hc->read_fifo(hc, skb_put(*sp, Zsize), Zsize);
+
+ if (f1 != f2) {
+ /* increment Z2,F2-counter */
+ HFC_outb_nodebug(hc, R_INC_RES_FIFO, V_INC_F);
+ HFC_wait_nodebug(hc);
+ /* check size */
+ if ((*sp)->len < 4) {
+ if (debug & DEBUG_HFCMULTI_FIFO)
+ printk(KERN_DEBUG
+ "%s(card %d): Frame below minimum "
+ "size\n", __func__, hc->id + 1);
+ skb_trim(*sp, 0);
+ goto next_frame;
+ }
+ /* there is at least one complete frame, check crc */
+ if ((*sp)->data[(*sp)->len - 1]) {
+ if (debug & DEBUG_HFCMULTI_CRC)
+ printk(KERN_DEBUG
+ "%s: CRC-error\n", __func__);
+ skb_trim(*sp, 0);
+ goto next_frame;
+ }
+ skb_trim(*sp, (*sp)->len - 3);
+ if ((*sp)->len < MISDN_COPY_SIZE) {
+ skb = *sp;
+ *sp = mI_alloc_skb(skb->len, GFP_ATOMIC);
+ if (*sp) {
+ memcpy(skb_put(*sp, skb->len),
+ skb->data, skb->len);
+ skb_trim(skb, 0);
+ } else {
+ printk(KERN_DEBUG "%s: No mem\n",
+ __func__);
+ *sp = skb;
+ skb = NULL;
+ }
+ } else {
+ skb = NULL;
+ }
+ if (debug & DEBUG_HFCMULTI_FIFO) {
+ printk(KERN_DEBUG "%s(card %d):",
+ __func__, hc->id + 1);
+ temp = 0;
+ while (temp < (*sp)->len)
+ printk(" %02x", (*sp)->data[temp++]);
+ printk("\n");
+ }
+ if (dch)
+ recv_Dchannel(dch);
+ else
+ recv_Bchannel(bch);
+ *sp = skb;
+ again++;
+ goto next_frame;
+ }
+ /* there is an incomplete frame */
+ } else {
+ /* transparent */
+ if (Zsize > skb_tailroom(*sp))
+ Zsize = skb_tailroom(*sp);
+ hc->read_fifo(hc, skb_put(*sp, Zsize), Zsize);
+ if (((*sp)->len) < MISDN_COPY_SIZE) {
+ skb = *sp;
+ *sp = mI_alloc_skb(skb->len, GFP_ATOMIC);
+ if (*sp) {
+ memcpy(skb_put(*sp, skb->len),
+ skb->data, skb->len);
+ skb_trim(skb, 0);
+ } else {
+ printk(KERN_DEBUG "%s: No mem\n", __func__);
+ *sp = skb;
+ skb = NULL;
+ }
+ } else {
+ skb = NULL;
+ }
+ if (debug & DEBUG_HFCMULTI_FIFO)
+ printk(KERN_DEBUG
+ "%s(card %d): fifo(%d) reading %d bytes "
+ "(z1=%04x, z2=%04x) TRANS\n",
+ __func__, hc->id + 1, ch, Zsize, z1, z2);
+ /* only bch is transparent */
+ recv_Bchannel(bch);
+ *sp = skb;
+ }
+}
+
+
+/*
+ * Interrupt handler
+ */
+static void
+signal_state_up(struct dchannel *dch, int info, char *msg)
+{
+ struct sk_buff *skb;
+ int id, data = info;
+
+ if (debug & DEBUG_HFCMULTI_STATE)
+ printk(KERN_DEBUG "%s: %s\n", __func__, msg);
+
+ id = TEI_SAPI | (GROUP_TEI << 8); /* manager address */
+
+ skb = _alloc_mISDN_skb(MPH_INFORMATION_IND, id, sizeof(data), &data,
+ GFP_ATOMIC);
+ if (!skb)
+ return;
+ recv_Dchannel_skb(dch, skb);
+}
+
+static inline void
+handle_timer_irq(struct hfc_multi *hc)
+{
+ int ch, temp;
+ struct dchannel *dch;
+ u_long flags;
+
+ /* process queued resync jobs */
+ if (hc->e1_resync) {
+ /* lock, so e1_resync gets not changed */
+ spin_lock_irqsave(&HFClock, flags);
+ if (hc->e1_resync & 1) {
+ if (debug & DEBUG_HFCMULTI_PLXSD)
+ printk(KERN_DEBUG "Enable SYNC_I\n");
+ HFC_outb(hc, R_SYNC_CTRL, V_EXT_CLK_SYNC);
+ /* disable JATT, if RX_SYNC is set */
+ if (test_bit(HFC_CHIP_RX_SYNC, &hc->chip))
+ HFC_outb(hc, R_SYNC_OUT, V_SYNC_E1_RX);
+ }
+ if (hc->e1_resync & 2) {
+ if (debug & DEBUG_HFCMULTI_PLXSD)
+ printk(KERN_DEBUG "Enable jatt PLL\n");
+ HFC_outb(hc, R_SYNC_CTRL, V_SYNC_OFFS);
+ }
+ if (hc->e1_resync & 4) {
+ if (debug & DEBUG_HFCMULTI_PLXSD)
+ printk(KERN_DEBUG
+ "Enable QUARTZ for HFC-E1\n");
+ /* set jatt to quartz */
+ HFC_outb(hc, R_SYNC_CTRL, V_EXT_CLK_SYNC
+ | V_JATT_OFF);
+ /* switch to JATT, in case it is not already */
+ HFC_outb(hc, R_SYNC_OUT, 0);
+ }
+ hc->e1_resync = 0;
+ spin_unlock_irqrestore(&HFClock, flags);
+ }
+
+ if (hc->type != 1 || hc->e1_state == 1)
+ for (ch = 0; ch <= 31; ch++) {
+ if (hc->created[hc->chan[ch].port]) {
+ hfcmulti_tx(hc, ch);
+ /* fifo is started when switching to rx-fifo */
+ hfcmulti_rx(hc, ch);
+ if (hc->chan[ch].dch &&
+ hc->chan[ch].nt_timer > -1) {
+ dch = hc->chan[ch].dch;
+ if (!(--hc->chan[ch].nt_timer)) {
+ schedule_event(dch,
+ FLG_PHCHANGE);
+ if (debug &
+ DEBUG_HFCMULTI_STATE)
+ printk(KERN_DEBUG
+ "%s: nt_timer at "
+ "state %x\n",
+ __func__,
+ dch->state);
+ }
+ }
+ }
+ }
+ if (hc->type == 1 && hc->created[0]) {
+ dch = hc->chan[hc->dslot].dch;
+ if (test_bit(HFC_CFG_REPORT_LOS, &hc->chan[hc->dslot].cfg)) {
+ /* LOS */
+ temp = HFC_inb_nodebug(hc, R_SYNC_STA) & V_SIG_LOS;
+ if (!temp && hc->chan[hc->dslot].los)
+ signal_state_up(dch, L1_SIGNAL_LOS_ON,
+ "LOS detected");
+ if (temp && !hc->chan[hc->dslot].los)
+ signal_state_up(dch, L1_SIGNAL_LOS_OFF,
+ "LOS gone");
+ hc->chan[hc->dslot].los = temp;
+ }
+ if (test_bit(HFC_CFG_REPORT_AIS, &hc->chan[hc->dslot].cfg)) {
+ /* AIS */
+ temp = HFC_inb_nodebug(hc, R_SYNC_STA) & V_AIS;
+ if (!temp && hc->chan[hc->dslot].ais)
+ signal_state_up(dch, L1_SIGNAL_AIS_ON,
+ "AIS detected");
+ if (temp && !hc->chan[hc->dslot].ais)
+ signal_state_up(dch, L1_SIGNAL_AIS_OFF,
+ "AIS gone");
+ hc->chan[hc->dslot].ais = temp;
+ }
+ if (test_bit(HFC_CFG_REPORT_SLIP, &hc->chan[hc->dslot].cfg)) {
+ /* SLIP */
+ temp = HFC_inb_nodebug(hc, R_SLIP) & V_FOSLIP_RX;
+ if (!temp && hc->chan[hc->dslot].slip_rx)
+ signal_state_up(dch, L1_SIGNAL_SLIP_RX,
+ " bit SLIP detected RX");
+ hc->chan[hc->dslot].slip_rx = temp;
+ temp = HFC_inb_nodebug(hc, R_SLIP) & V_FOSLIP_TX;
+ if (!temp && hc->chan[hc->dslot].slip_tx)
+ signal_state_up(dch, L1_SIGNAL_SLIP_TX,
+ " bit SLIP detected TX");
+ hc->chan[hc->dslot].slip_tx = temp;
+ }
+ if (test_bit(HFC_CFG_REPORT_RDI, &hc->chan[hc->dslot].cfg)) {
+ /* RDI */
+ temp = HFC_inb_nodebug(hc, R_RX_SL0_0) & V_A;
+ if (!temp && hc->chan[hc->dslot].rdi)
+ signal_state_up(dch, L1_SIGNAL_RDI_ON,
+ "RDI detected");
+ if (temp && !hc->chan[hc->dslot].rdi)
+ signal_state_up(dch, L1_SIGNAL_RDI_OFF,
+ "RDI gone");
+ hc->chan[hc->dslot].rdi = temp;
+ }
+ temp = HFC_inb_nodebug(hc, R_JATT_DIR);
+ switch (hc->chan[hc->dslot].sync) {
+ case 0:
+ if ((temp & 0x60) == 0x60) {
+ if (debug & DEBUG_HFCMULTI_SYNC)
+ printk(KERN_DEBUG
+ "%s: (id=%d) E1 now "
+ "in clock sync\n",
+ __func__, hc->id);
+ HFC_outb(hc, R_RX_OFF,
+ hc->chan[hc->dslot].jitter | V_RX_INIT);
+ HFC_outb(hc, R_TX_OFF,
+ hc->chan[hc->dslot].jitter | V_RX_INIT);
+ hc->chan[hc->dslot].sync = 1;
+ goto check_framesync;
+ }
+ break;
+ case 1:
+ if ((temp & 0x60) != 0x60) {
+ if (debug & DEBUG_HFCMULTI_SYNC)
+ printk(KERN_DEBUG
+ "%s: (id=%d) E1 "
+ "lost clock sync\n",
+ __func__, hc->id);
+ hc->chan[hc->dslot].sync = 0;
+ break;
+ }
+check_framesync:
+ temp = HFC_inb_nodebug(hc, R_SYNC_STA);
+ if (temp == 0x27) {
+ if (debug & DEBUG_HFCMULTI_SYNC)
+ printk(KERN_DEBUG
+ "%s: (id=%d) E1 "
+ "now in frame sync\n",
+ __func__, hc->id);
+ hc->chan[hc->dslot].sync = 2;
+ }
+ break;
+ case 2:
+ if ((temp & 0x60) != 0x60) {
+ if (debug & DEBUG_HFCMULTI_SYNC)
+ printk(KERN_DEBUG
+ "%s: (id=%d) E1 lost "
+ "clock & frame sync\n",
+ __func__, hc->id);
+ hc->chan[hc->dslot].sync = 0;
+ break;
+ }
+ temp = HFC_inb_nodebug(hc, R_SYNC_STA);
+ if (temp != 0x27) {
+ if (debug & DEBUG_HFCMULTI_SYNC)
+ printk(KERN_DEBUG
+ "%s: (id=%d) E1 "
+ "lost frame sync\n",
+ __func__, hc->id);
+ hc->chan[hc->dslot].sync = 1;
+ }
+ break;
+ }
+ }
+
+ if (test_bit(HFC_CHIP_WATCHDOG, &hc->chip))
+ hfcmulti_watchdog(hc);
+
+ if (hc->leds)
+ hfcmulti_leds(hc);
+}
+
+static void
+ph_state_irq(struct hfc_multi *hc, u_char r_irq_statech)
+{
+ struct dchannel *dch;
+ int ch;
+ int active;
+ u_char st_status, temp;
+
+ /* state machine */
+ for (ch = 0; ch <= 31; ch++) {
+ if (hc->chan[ch].dch) {
+ dch = hc->chan[ch].dch;
+ if (r_irq_statech & 1) {
+ HFC_outb_nodebug(hc, R_ST_SEL,
+ hc->chan[ch].port);
+ /* undocumented: delay after R_ST_SEL */
+ udelay(1);
+ /* undocumented: status changes during read */
+ st_status = HFC_inb_nodebug(hc, A_ST_RD_STATE);
+ while (st_status != (temp =
+ HFC_inb_nodebug(hc, A_ST_RD_STATE))) {
+ if (debug & DEBUG_HFCMULTI_STATE)
+ printk(KERN_DEBUG "%s: reread "
+ "STATE because %d!=%d\n",
+ __func__, temp,
+ st_status);
+ st_status = temp; /* repeat */
+ }
+
+ /* Speech Design TE-sync indication */
+ if (test_bit(HFC_CHIP_PLXSD, &hc->chip) &&
+ dch->dev.D.protocol == ISDN_P_TE_S0) {
+ if (st_status & V_FR_SYNC_ST)
+ hc->syncronized |=
+ (1 << hc->chan[ch].port);
+ else
+ hc->syncronized &=
+ ~(1 << hc->chan[ch].port);
+ }
+ dch->state = st_status & 0x0f;
+ if (dch->dev.D.protocol == ISDN_P_NT_S0)
+ active = 3;
+ else
+ active = 7;
+ if (dch->state == active) {
+ HFC_outb_nodebug(hc, R_FIFO,
+ (ch << 1) | 1);
+ HFC_wait_nodebug(hc);
+ HFC_outb_nodebug(hc,
+ R_INC_RES_FIFO, V_RES_F);
+ HFC_wait_nodebug(hc);
+ dch->tx_idx = 0;
+ }
+ schedule_event(dch, FLG_PHCHANGE);
+ if (debug & DEBUG_HFCMULTI_STATE)
+ printk(KERN_DEBUG
+ "%s: S/T newstate %x port %d\n",
+ __func__, dch->state,
+ hc->chan[ch].port);
+ }
+ r_irq_statech >>= 1;
+ }
+ }
+ if (test_bit(HFC_CHIP_PLXSD, &hc->chip))
+ plxsd_checksync(hc, 0);
+}
+
+static void
+fifo_irq(struct hfc_multi *hc, int block)
+{
+ int ch, j;
+ struct dchannel *dch;
+ struct bchannel *bch;
+ u_char r_irq_fifo_bl;
+
+ r_irq_fifo_bl = HFC_inb_nodebug(hc, R_IRQ_FIFO_BL0 + block);
+ j = 0;
+ while (j < 8) {
+ ch = (block << 2) + (j >> 1);
+ dch = hc->chan[ch].dch;
+ bch = hc->chan[ch].bch;
+ if (((!dch) && (!bch)) || (!hc->created[hc->chan[ch].port])) {
+ j += 2;
+ continue;
+ }
+ if (dch && (r_irq_fifo_bl & (1 << j)) &&
+ test_bit(FLG_ACTIVE, &dch->Flags)) {
+ hfcmulti_tx(hc, ch);
+ /* start fifo */
+ HFC_outb_nodebug(hc, R_FIFO, 0);
+ HFC_wait_nodebug(hc);
+ }
+ if (bch && (r_irq_fifo_bl & (1 << j)) &&
+ test_bit(FLG_ACTIVE, &bch->Flags)) {
+ hfcmulti_tx(hc, ch);
+ /* start fifo */
+ HFC_outb_nodebug(hc, R_FIFO, 0);
+ HFC_wait_nodebug(hc);
+ }
+ j++;
+ if (dch && (r_irq_fifo_bl & (1 << j)) &&
+ test_bit(FLG_ACTIVE, &dch->Flags)) {
+ hfcmulti_rx(hc, ch);
+ }
+ if (bch && (r_irq_fifo_bl & (1 << j)) &&
+ test_bit(FLG_ACTIVE, &bch->Flags)) {
+ hfcmulti_rx(hc, ch);
+ }
+ j++;
+ }
+}
+
+#ifdef IRQ_DEBUG
+int irqsem;
+#endif
+static irqreturn_t
+hfcmulti_interrupt(int intno, void *dev_id)
+{
+#ifdef IRQCOUNT_DEBUG
+ static int iq1 = 0, iq2 = 0, iq3 = 0, iq4 = 0,
+ iq5 = 0, iq6 = 0, iqcnt = 0;
+#endif
+ static int count;
+ struct hfc_multi *hc = dev_id;
+ struct dchannel *dch;
+ u_char r_irq_statech, status, r_irq_misc, r_irq_oview;
+ int i;
+ u_short *plx_acc, wval;
+ u_char e1_syncsta, temp;
+ u_long flags;
+
+ if (!hc) {
+ printk(KERN_ERR "HFC-multi: Spurious interrupt!\n");
+ return IRQ_NONE;
+ }
+
+ spin_lock(&hc->lock);
+
+#ifdef IRQ_DEBUG
+ if (irqsem)
+ printk(KERN_ERR "irq for card %d during irq from "
+ "card %d, this is no bug.\n", hc->id + 1, irqsem);
+ irqsem = hc->id + 1;
+#endif
+
+ if (test_bit(HFC_CHIP_PLXSD, &hc->chip)) {
+ spin_lock_irqsave(&plx_lock, flags);
+ plx_acc = (u_short *)(hc->plx_membase + PLX_INTCSR);
+ wval = readw(plx_acc);
+ spin_unlock_irqrestore(&plx_lock, flags);
+ if (!(wval & PLX_INTCSR_LINTI1_STATUS))
+ goto irq_notforus;
+ }
+
+ status = HFC_inb_nodebug(hc, R_STATUS);
+ r_irq_statech = HFC_inb_nodebug(hc, R_IRQ_STATECH);
+#ifdef IRQCOUNT_DEBUG
+ if (r_irq_statech)
+ iq1++;
+ if (status & V_DTMF_STA)
+ iq2++;
+ if (status & V_LOST_STA)
+ iq3++;
+ if (status & V_EXT_IRQSTA)
+ iq4++;
+ if (status & V_MISC_IRQSTA)
+ iq5++;
+ if (status & V_FR_IRQSTA)
+ iq6++;
+ if (iqcnt++ > 5000) {
+ printk(KERN_ERR "iq1:%x iq2:%x iq3:%x iq4:%x iq5:%x iq6:%x\n",
+ iq1, iq2, iq3, iq4, iq5, iq6);
+ iqcnt = 0;
+ }
+#endif
+ if (!r_irq_statech &&
+ !(status & (V_DTMF_STA | V_LOST_STA | V_EXT_IRQSTA |
+ V_MISC_IRQSTA | V_FR_IRQSTA))) {
+ /* irq is not for us */
+ goto irq_notforus;
+ }
+ hc->irqcnt++;
+ if (r_irq_statech) {
+ if (hc->type != 1)
+ ph_state_irq(hc, r_irq_statech);
+ }
+ if (status & V_EXT_IRQSTA)
+ ; /* external IRQ */
+ if (status & V_LOST_STA) {
+ /* LOST IRQ */
+ HFC_outb(hc, R_INC_RES_FIFO, V_RES_LOST); /* clear irq! */
+ }
+ if (status & V_MISC_IRQSTA) {
+ /* misc IRQ */
+ r_irq_misc = HFC_inb_nodebug(hc, R_IRQ_MISC);
+ if (r_irq_misc & V_STA_IRQ) {
+ if (hc->type == 1) {
+ /* state machine */
+ dch = hc->chan[hc->dslot].dch;
+ e1_syncsta = HFC_inb_nodebug(hc, R_SYNC_STA);
+ if (test_bit(HFC_CHIP_PLXSD, &hc->chip)
+ && hc->e1_getclock) {
+ if (e1_syncsta & V_FR_SYNC_E1)
+ hc->syncronized = 1;
+ else
+ hc->syncronized = 0;
+ }
+ /* undocumented: status changes during read */
+ dch->state = HFC_inb_nodebug(hc, R_E1_RD_STA);
+ while (dch->state != (temp =
+ HFC_inb_nodebug(hc, R_E1_RD_STA))) {
+ if (debug & DEBUG_HFCMULTI_STATE)
+ printk(KERN_DEBUG "%s: reread "
+ "STATE because %d!=%d\n",
+ __func__, temp,
+ dch->state);
+ dch->state = temp; /* repeat */
+ }
+ dch->state = HFC_inb_nodebug(hc, R_E1_RD_STA)
+ & 0x7;
+ schedule_event(dch, FLG_PHCHANGE);
+ if (debug & DEBUG_HFCMULTI_STATE)
+ printk(KERN_DEBUG
+ "%s: E1 (id=%d) newstate %x\n",
+ __func__, hc->id, dch->state);
+ if (test_bit(HFC_CHIP_PLXSD, &hc->chip))
+ plxsd_checksync(hc, 0);
+ }
+ }
+ if (r_irq_misc & V_TI_IRQ)
+ handle_timer_irq(hc);
+
+ if (r_irq_misc & V_DTMF_IRQ) {
+ /* -> DTMF IRQ */
+ hfcmulti_dtmf(hc);
+ }
+ /* TODO: REPLACE !!!! 125 us Interrupts are not acceptable */
+ if (r_irq_misc & V_IRQ_PROC) {
+ /* IRQ every 125us */
+ count++;
+ /* generate 1kHz signal */
+ if (count == 8) {
+ if (hfc_interrupt)
+ hfc_interrupt();
+ count = 0;
+ }
+ }
+
+ }
+ if (status & V_FR_IRQSTA) {
+ /* FIFO IRQ */
+ r_irq_oview = HFC_inb_nodebug(hc, R_IRQ_OVIEW);
+ for (i = 0; i < 8; i++) {
+ if (r_irq_oview & (1 << i))
+ fifo_irq(hc, i);
+ }
+ }
+
+#ifdef IRQ_DEBUG
+ irqsem = 0;
+#endif
+ spin_unlock(&hc->lock);
+ return IRQ_HANDLED;
+
+irq_notforus:
+#ifdef IRQ_DEBUG
+ irqsem = 0;
+#endif
+ spin_unlock(&hc->lock);
+ return IRQ_NONE;
+}
+
+
+/*
+ * timer callback for D-chan busy resolution. Currently no function
+ */
+
+static void
+hfcmulti_dbusy_timer(struct hfc_multi *hc)
+{
+}
+
+
+/*
+ * activate/deactivate hardware for selected channels and mode
+ *
+ * configure B-channel with the given protocol
+ * ch eqals to the HFC-channel (0-31)
+ * ch is the number of channel (0-4,4-7,8-11,12-15,16-19,20-23,24-27,28-31
+ * for S/T, 1-31 for E1)
+ * the hdlc interrupts will be set/unset
+ */
+static int
+mode_hfcmulti(struct hfc_multi *hc, int ch, int protocol, int slot_tx,
+ int bank_tx, int slot_rx, int bank_rx)
+{
+ int flow_tx = 0, flow_rx = 0, routing = 0;
+ int oslot_tx, oslot_rx;
+ int conf;
+
+ if (ch < 0 || ch > 31)
+ return EINVAL;
+ oslot_tx = hc->chan[ch].slot_tx;
+ oslot_rx = hc->chan[ch].slot_rx;
+ conf = hc->chan[ch].conf;
+
+ if (debug & DEBUG_HFCMULTI_MODE)
+ printk(KERN_DEBUG
+ "%s: card %d channel %d protocol %x slot old=%d new=%d "
+ "bank new=%d (TX) slot old=%d new=%d bank new=%d (RX)\n",
+ __func__, hc->id, ch, protocol, oslot_tx, slot_tx,
+ bank_tx, oslot_rx, slot_rx, bank_rx);
+
+ if (oslot_tx >= 0 && slot_tx != oslot_tx) {
+ /* remove from slot */
+ if (debug & DEBUG_HFCMULTI_MODE)
+ printk(KERN_DEBUG "%s: remove from slot %d (TX)\n",
+ __func__, oslot_tx);
+ if (hc->slot_owner[oslot_tx<<1] == ch) {
+ HFC_outb(hc, R_SLOT, oslot_tx << 1);
+ HFC_outb(hc, A_SL_CFG, 0);
+ HFC_outb(hc, A_CONF, 0);
+ hc->slot_owner[oslot_tx<<1] = -1;
+ } else {
+ if (debug & DEBUG_HFCMULTI_MODE)
+ printk(KERN_DEBUG
+ "%s: we are not owner of this tx slot "
+ "anymore, channel %d is.\n",
+ __func__, hc->slot_owner[oslot_tx<<1]);
+ }
+ }
+
+ if (oslot_rx >= 0 && slot_rx != oslot_rx) {
+ /* remove from slot */
+ if (debug & DEBUG_HFCMULTI_MODE)
+ printk(KERN_DEBUG
+ "%s: remove from slot %d (RX)\n",
+ __func__, oslot_rx);
+ if (hc->slot_owner[(oslot_rx << 1) | 1] == ch) {
+ HFC_outb(hc, R_SLOT, (oslot_rx << 1) | V_SL_DIR);
+ HFC_outb(hc, A_SL_CFG, 0);
+ hc->slot_owner[(oslot_rx << 1) | 1] = -1;
+ } else {
+ if (debug & DEBUG_HFCMULTI_MODE)
+ printk(KERN_DEBUG
+ "%s: we are not owner of this rx slot "
+ "anymore, channel %d is.\n",
+ __func__,
+ hc->slot_owner[(oslot_rx << 1) | 1]);
+ }
+ }
+
+ if (slot_tx < 0) {
+ flow_tx = 0x80; /* FIFO->ST */
+ /* disable pcm slot */
+ hc->chan[ch].slot_tx = -1;
+ hc->chan[ch].bank_tx = 0;
+ } else {
+ /* set pcm slot */
+ if (hc->chan[ch].txpending)
+ flow_tx = 0x80; /* FIFO->ST */
+ else
+ flow_tx = 0xc0; /* PCM->ST */
+ /* put on slot */
+ routing = bank_tx ? 0xc0 : 0x80;
+ if (conf >= 0 || bank_tx > 1)
+ routing = 0x40; /* loop */
+ if (debug & DEBUG_HFCMULTI_MODE)
+ printk(KERN_DEBUG "%s: put channel %d to slot %d bank"
+ " %d flow %02x routing %02x conf %d (TX)\n",
+ __func__, ch, slot_tx, bank_tx,
+ flow_tx, routing, conf);
+ HFC_outb(hc, R_SLOT, slot_tx << 1);
+ HFC_outb(hc, A_SL_CFG, (ch<<1) | routing);
+ HFC_outb(hc, A_CONF, (conf < 0) ? 0 : (conf | V_CONF_SL));
+ hc->slot_owner[slot_tx << 1] = ch;
+ hc->chan[ch].slot_tx = slot_tx;
+ hc->chan[ch].bank_tx = bank_tx;
+ }
+ if (slot_rx < 0) {
+ /* disable pcm slot */
+ flow_rx = 0x80; /* ST->FIFO */
+ hc->chan[ch].slot_rx = -1;
+ hc->chan[ch].bank_rx = 0;
+ } else {
+ /* set pcm slot */
+ if (hc->chan[ch].txpending)
+ flow_rx = 0x80; /* ST->FIFO */
+ else
+ flow_rx = 0xc0; /* ST->(FIFO,PCM) */
+ /* put on slot */
+ routing = bank_rx?0x80:0xc0; /* reversed */
+ if (conf >= 0 || bank_rx > 1)
+ routing = 0x40; /* loop */
+ if (debug & DEBUG_HFCMULTI_MODE)
+ printk(KERN_DEBUG "%s: put channel %d to slot %d bank"
+ " %d flow %02x routing %02x conf %d (RX)\n",
+ __func__, ch, slot_rx, bank_rx,
+ flow_rx, routing, conf);
+ HFC_outb(hc, R_SLOT, (slot_rx<<1) | V_SL_DIR);
+ HFC_outb(hc, A_SL_CFG, (ch<<1) | V_CH_DIR | routing);
+ hc->slot_owner[(slot_rx<<1)|1] = ch;
+ hc->chan[ch].slot_rx = slot_rx;
+ hc->chan[ch].bank_rx = bank_rx;
+ }
+
+ switch (protocol) {
+ case (ISDN_P_NONE):
+ /* disable TX fifo */
+ HFC_outb(hc, R_FIFO, ch << 1);
+ HFC_wait(hc);
+ HFC_outb(hc, A_CON_HDLC, flow_tx | 0x00 | V_IFF);
+ HFC_outb(hc, A_SUBCH_CFG, 0);
+ HFC_outb(hc, A_IRQ_MSK, 0);
+ HFC_outb(hc, R_INC_RES_FIFO, V_RES_F);
+ HFC_wait(hc);
+ /* disable RX fifo */
+ HFC_outb(hc, R_FIFO, (ch<<1)|1);
+ HFC_wait(hc);
+ HFC_outb(hc, A_CON_HDLC, flow_rx | 0x00);
+ HFC_outb(hc, A_SUBCH_CFG, 0);
+ HFC_outb(hc, A_IRQ_MSK, 0);
+ HFC_outb(hc, R_INC_RES_FIFO, V_RES_F);
+ HFC_wait(hc);
+ if (hc->chan[ch].bch && hc->type != 1) {
+ hc->hw.a_st_ctrl0[hc->chan[ch].port] &=
+ ((ch & 0x3) == 0)? ~V_B1_EN: ~V_B2_EN;
+ HFC_outb(hc, R_ST_SEL, hc->chan[ch].port);
+ /* undocumented: delay after R_ST_SEL */
+ udelay(1);
+ HFC_outb(hc, A_ST_CTRL0,
+ hc->hw.a_st_ctrl0[hc->chan[ch].port]);
+ }
+ if (hc->chan[ch].bch) {
+ test_and_clear_bit(FLG_HDLC, &hc->chan[ch].bch->Flags);
+ test_and_clear_bit(FLG_TRANSPARENT,
+ &hc->chan[ch].bch->Flags);
+ }
+ break;
+ case (ISDN_P_B_RAW): /* B-channel */
+
+ if (test_bit(HFC_CHIP_B410P, &hc->chip) &&
+ (hc->chan[ch].slot_rx < 0) &&
+ (hc->chan[ch].slot_tx < 0)) {
+
+ printk(KERN_DEBUG
+ "Setting B-channel %d to echo cancelable "
+ "state on PCM slot %d\n", ch,
+ ((ch / 4) * 8) + ((ch % 4) * 4) + 1);
+ printk(KERN_DEBUG
+ "Enabling pass through for channel\n");
+ vpm_out(hc, ch, ((ch / 4) * 8) +
+ ((ch % 4) * 4) + 1, 0x01);
+ /* rx path */
+ /* S/T -> PCM */
+ HFC_outb(hc, R_FIFO, (ch << 1));
+ HFC_wait(hc);
+ HFC_outb(hc, A_CON_HDLC, 0xc0 | V_HDLC_TRP | V_IFF);
+ HFC_outb(hc, R_SLOT, (((ch / 4) * 8) +
+ ((ch % 4) * 4) + 1) << 1);
+ HFC_outb(hc, A_SL_CFG, 0x80 | (ch << 1));
+
+ /* PCM -> FIFO */
+ HFC_outb(hc, R_FIFO, 0x20 | (ch << 1) | 1);
+ HFC_wait(hc);
+ HFC_outb(hc, A_CON_HDLC, 0x20 | V_HDLC_TRP | V_IFF);
+ HFC_outb(hc, A_SUBCH_CFG, 0);
+ HFC_outb(hc, A_IRQ_MSK, 0);
+ HFC_outb(hc, R_INC_RES_FIFO, V_RES_F);
+ HFC_wait(hc);
+ HFC_outb(hc, R_SLOT, ((((ch / 4) * 8) +
+ ((ch % 4) * 4) + 1) << 1) | 1);
+ HFC_outb(hc, A_SL_CFG, 0x80 | 0x20 | (ch << 1) | 1);
+
+ /* tx path */
+ /* PCM -> S/T */
+ HFC_outb(hc, R_FIFO, (ch << 1) | 1);
+ HFC_wait(hc);
+ HFC_outb(hc, A_CON_HDLC, 0xc0 | V_HDLC_TRP | V_IFF);
+ HFC_outb(hc, R_SLOT, ((((ch / 4) * 8) +
+ ((ch % 4) * 4)) << 1) | 1);
+ HFC_outb(hc, A_SL_CFG, 0x80 | 0x40 | (ch << 1) | 1);
+
+ /* FIFO -> PCM */
+ HFC_outb(hc, R_FIFO, 0x20 | (ch << 1));
+ HFC_wait(hc);
+ HFC_outb(hc, A_CON_HDLC, 0x20 | V_HDLC_TRP | V_IFF);
+ HFC_outb(hc, A_SUBCH_CFG, 0);
+ HFC_outb(hc, A_IRQ_MSK, 0);
+ HFC_outb(hc, R_INC_RES_FIFO, V_RES_F);
+ HFC_wait(hc);
+ /* tx silence */
+ HFC_outb_nodebug(hc, A_FIFO_DATA0_NOINC, silence);
+ HFC_outb(hc, R_SLOT, (((ch / 4) * 8) +
+ ((ch % 4) * 4)) << 1);
+ HFC_outb(hc, A_SL_CFG, 0x80 | 0x20 | (ch << 1));
+ } else {
+ /* enable TX fifo */
+ HFC_outb(hc, R_FIFO, ch << 1);
+ HFC_wait(hc);
+ HFC_outb(hc, A_CON_HDLC, flow_tx | 0x00 |
+ V_HDLC_TRP | V_IFF);
+ HFC_outb(hc, A_SUBCH_CFG, 0);
+ HFC_outb(hc, A_IRQ_MSK, 0);
+ HFC_outb(hc, R_INC_RES_FIFO, V_RES_F);
+ HFC_wait(hc);
+ /* tx silence */
+ HFC_outb_nodebug(hc, A_FIFO_DATA0_NOINC, silence);
+ /* enable RX fifo */
+ HFC_outb(hc, R_FIFO, (ch<<1)|1);
+ HFC_wait(hc);
+ HFC_outb(hc, A_CON_HDLC, flow_rx | 0x00 | V_HDLC_TRP);
+ HFC_outb(hc, A_SUBCH_CFG, 0);
+ HFC_outb(hc, A_IRQ_MSK, 0);
+ HFC_outb(hc, R_INC_RES_FIFO, V_RES_F);
+ HFC_wait(hc);
+ }
+ if (hc->type != 1) {
+ hc->hw.a_st_ctrl0[hc->chan[ch].port] |=
+ ((ch & 0x3) == 0) ? V_B1_EN : V_B2_EN;
+ HFC_outb(hc, R_ST_SEL, hc->chan[ch].port);
+ /* undocumented: delay after R_ST_SEL */
+ udelay(1);
+ HFC_outb(hc, A_ST_CTRL0,
+ hc->hw.a_st_ctrl0[hc->chan[ch].port]);
+ }
+ if (hc->chan[ch].bch)
+ test_and_set_bit(FLG_TRANSPARENT,
+ &hc->chan[ch].bch->Flags);
+ break;
+ case (ISDN_P_B_HDLC): /* B-channel */
+ case (ISDN_P_TE_S0): /* D-channel */
+ case (ISDN_P_NT_S0):
+ case (ISDN_P_TE_E1):
+ case (ISDN_P_NT_E1):
+ /* enable TX fifo */
+ HFC_outb(hc, R_FIFO, ch<<1);
+ HFC_wait(hc);
+ if (hc->type == 1 || hc->chan[ch].bch) {
+ /* E1 or B-channel */
+ HFC_outb(hc, A_CON_HDLC, flow_tx | 0x04);
+ HFC_outb(hc, A_SUBCH_CFG, 0);
+ } else {
+ /* D-Channel without HDLC fill flags */
+ HFC_outb(hc, A_CON_HDLC, flow_tx | 0x04 | V_IFF);
+ HFC_outb(hc, A_SUBCH_CFG, 2);
+ }
+ HFC_outb(hc, A_IRQ_MSK, V_IRQ);
+ HFC_outb(hc, R_INC_RES_FIFO, V_RES_F);
+ HFC_wait(hc);
+ /* enable RX fifo */
+ HFC_outb(hc, R_FIFO, (ch<<1)|1);
+ HFC_wait(hc);
+ HFC_outb(hc, A_CON_HDLC, flow_rx | 0x04);
+ if (hc->type == 1 || hc->chan[ch].bch)
+ HFC_outb(hc, A_SUBCH_CFG, 0); /* full 8 bits */
+ else
+ HFC_outb(hc, A_SUBCH_CFG, 2); /* 2 bits dchannel */
+ HFC_outb(hc, A_IRQ_MSK, V_IRQ);
+ HFC_outb(hc, R_INC_RES_FIFO, V_RES_F);
+ HFC_wait(hc);
+ if (hc->chan[ch].bch) {
+ test_and_set_bit(FLG_HDLC, &hc->chan[ch].bch->Flags);
+ if (hc->type != 1) {
+ hc->hw.a_st_ctrl0[hc->chan[ch].port] |=
+ ((ch&0x3) == 0) ? V_B1_EN : V_B2_EN;
+ HFC_outb(hc, R_ST_SEL, hc->chan[ch].port);
+ /* undocumented: delay after R_ST_SEL */
+ udelay(1);
+ HFC_outb(hc, A_ST_CTRL0,
+ hc->hw.a_st_ctrl0[hc->chan[ch].port]);
+ }
+ }
+ break;
+ default:
+ printk(KERN_DEBUG "%s: protocol not known %x\n",
+ __func__, protocol);
+ hc->chan[ch].protocol = ISDN_P_NONE;
+ return -ENOPROTOOPT;
+ }
+ hc->chan[ch].protocol = protocol;
+ return 0;
+}
+
+
+/*
+ * connect/disconnect PCM
+ */
+
+static void
+hfcmulti_pcm(struct hfc_multi *hc, int ch, int slot_tx, int bank_tx,
+ int slot_rx, int bank_rx)
+{
+ if (slot_rx < 0 || slot_rx < 0 || bank_tx < 0 || bank_rx < 0) {
+ /* disable PCM */
+ mode_hfcmulti(hc, ch, hc->chan[ch].protocol, -1, 0, -1, 0);
+ return;
+ }
+
+ /* enable pcm */
+ mode_hfcmulti(hc, ch, hc->chan[ch].protocol, slot_tx, bank_tx,
+ slot_rx, bank_rx);
+}
+
+/*
+ * set/disable conference
+ */
+
+static void
+hfcmulti_conf(struct hfc_multi *hc, int ch, int num)
+{
+ if (num >= 0 && num <= 7)
+ hc->chan[ch].conf = num;
+ else
+ hc->chan[ch].conf = -1;
+ mode_hfcmulti(hc, ch, hc->chan[ch].protocol, hc->chan[ch].slot_tx,
+ hc->chan[ch].bank_tx, hc->chan[ch].slot_rx,
+ hc->chan[ch].bank_rx);
+}
+
+
+/*
+ * set/disable sample loop
+ */
+
+/* NOTE: this function is experimental and therefore disabled */
+
+/*
+ * Layer 1 callback function
+ */
+static int
+hfcm_l1callback(struct dchannel *dch, u_int cmd)
+{
+ struct hfc_multi *hc = dch->hw;
+ u_long flags;
+
+ switch (cmd) {
+ case INFO3_P8:
+ case INFO3_P10:
+ break;
+ case HW_RESET_REQ:
+ /* start activation */
+ spin_lock_irqsave(&hc->lock, flags);
+ if (hc->type == 1) {
+ if (debug & DEBUG_HFCMULTI_MSG)
+ printk(KERN_DEBUG
+ "%s: HW_RESET_REQ no BRI\n",
+ __func__);
+ } else {
+ HFC_outb(hc, R_ST_SEL, hc->chan[dch->slot].port);
+ /* undocumented: delay after R_ST_SEL */
+ udelay(1);
+ HFC_outb(hc, A_ST_WR_STATE, V_ST_LD_STA | 3); /* F3 */
+ udelay(6); /* wait at least 5,21us */
+ HFC_outb(hc, A_ST_WR_STATE, 3);
+ HFC_outb(hc, A_ST_WR_STATE, 3 | (V_ST_ACT*3));
+ /* activate */
+ }
+ spin_unlock_irqrestore(&hc->lock, flags);
+ l1_event(dch->l1, HW_POWERUP_IND);
+ break;
+ case HW_DEACT_REQ:
+ /* start deactivation */
+ spin_lock_irqsave(&hc->lock, flags);
+ if (hc->type == 1) {
+ if (debug & DEBUG_HFCMULTI_MSG)
+ printk(KERN_DEBUG
+ "%s: HW_DEACT_REQ no BRI\n",
+ __func__);
+ } else {
+ HFC_outb(hc, R_ST_SEL, hc->chan[dch->slot].port);
+ /* undocumented: delay after R_ST_SEL */
+ udelay(1);
+ HFC_outb(hc, A_ST_WR_STATE, V_ST_ACT*2);
+ /* deactivate */
+ if (test_bit(HFC_CHIP_PLXSD, &hc->chip)) {
+ hc->syncronized &=
+ ~(1 << hc->chan[dch->slot].port);
+ plxsd_checksync(hc, 0);
+ }
+ }
+ skb_queue_purge(&dch->squeue);
+ if (dch->tx_skb) {
+ dev_kfree_skb(dch->tx_skb);
+ dch->tx_skb = NULL;
+ }
+ dch->tx_idx = 0;
+ if (dch->rx_skb) {
+ dev_kfree_skb(dch->rx_skb);
+ dch->rx_skb = NULL;
+ }
+ test_and_clear_bit(FLG_TX_BUSY, &dch->Flags);
+ if (test_and_clear_bit(FLG_BUSY_TIMER, &dch->Flags))
+ del_timer(&dch->timer);
+ spin_unlock_irqrestore(&hc->lock, flags);
+ break;
+ case HW_POWERUP_REQ:
+ spin_lock_irqsave(&hc->lock, flags);
+ if (hc->type == 1) {
+ if (debug & DEBUG_HFCMULTI_MSG)
+ printk(KERN_DEBUG
+ "%s: HW_POWERUP_REQ no BRI\n",
+ __func__);
+ } else {
+ HFC_outb(hc, R_ST_SEL, hc->chan[dch->slot].port);
+ /* undocumented: delay after R_ST_SEL */
+ udelay(1);
+ HFC_outb(hc, A_ST_WR_STATE, 3 | 0x10); /* activate */
+ udelay(6); /* wait at least 5,21us */
+ HFC_outb(hc, A_ST_WR_STATE, 3); /* activate */
+ }
+ spin_unlock_irqrestore(&hc->lock, flags);
+ break;
+ case PH_ACTIVATE_IND:
+ test_and_set_bit(FLG_ACTIVE, &dch->Flags);
+ _queue_data(&dch->dev.D, cmd, MISDN_ID_ANY, 0, NULL,
+ GFP_ATOMIC);
+ break;
+ case PH_DEACTIVATE_IND:
+ test_and_clear_bit(FLG_ACTIVE, &dch->Flags);
+ _queue_data(&dch->dev.D, cmd, MISDN_ID_ANY, 0, NULL,
+ GFP_ATOMIC);
+ break;
+ default:
+ if (dch->debug & DEBUG_HW)
+ printk(KERN_DEBUG "%s: unknown command %x\n",
+ __func__, cmd);
+ return -1;
+ }
+ return 0;
+}
+
+/*
+ * Layer2 -> Layer 1 Transfer
+ */
+
+static int
+handle_dmsg(struct mISDNchannel *ch, struct sk_buff *skb)
+{
+ struct mISDNdevice *dev = container_of(ch, struct mISDNdevice, D);
+ struct dchannel *dch = container_of(dev, struct dchannel, dev);
+ struct hfc_multi *hc = dch->hw;
+ struct mISDNhead *hh = mISDN_HEAD_P(skb);
+ int ret = -EINVAL;
+ unsigned int id;
+ u_long flags;
+
+ switch (hh->prim) {
+ case PH_DATA_REQ:
+ if (skb->len < 1)
+ break;
+ spin_lock_irqsave(&hc->lock, flags);
+ ret = dchannel_senddata(dch, skb);
+ if (ret > 0) { /* direct TX */
+ id = hh->id; /* skb can be freed */
+ hfcmulti_tx(hc, dch->slot);
+ ret = 0;
+ /* start fifo */
+ HFC_outb(hc, R_FIFO, 0);
+ HFC_wait(hc);
+ spin_unlock_irqrestore(&hc->lock, flags);
+ queue_ch_frame(ch, PH_DATA_CNF, id, NULL);
+ } else
+ spin_unlock_irqrestore(&hc->lock, flags);
+ return ret;
+ case PH_ACTIVATE_REQ:
+ if (dch->dev.D.protocol != ISDN_P_TE_S0) {
+ spin_lock_irqsave(&hc->lock, flags);
+ ret = 0;
+ if (debug & DEBUG_HFCMULTI_MSG)
+ printk(KERN_DEBUG
+ "%s: PH_ACTIVATE port %d (0..%d)\n",
+ __func__, hc->chan[dch->slot].port,
+ hc->ports-1);
+ /* start activation */
+ if (hc->type == 1) {
+ ph_state_change(dch);
+ if (debug & DEBUG_HFCMULTI_STATE)
+ printk(KERN_DEBUG
+ "%s: E1 report state %x \n",
+ __func__, dch->state);
+ } else {
+ HFC_outb(hc, R_ST_SEL,
+ hc->chan[dch->slot].port);
+ /* undocumented: delay after R_ST_SEL */
+ udelay(1);
+ HFC_outb(hc, A_ST_WR_STATE, V_ST_LD_STA | 1);
+ /* G1 */
+ udelay(6); /* wait at least 5,21us */
+ HFC_outb(hc, A_ST_WR_STATE, 1);
+ HFC_outb(hc, A_ST_WR_STATE, 1 |
+ (V_ST_ACT*3)); /* activate */
+ dch->state = 1;
+ }
+ spin_unlock_irqrestore(&hc->lock, flags);
+ } else
+ ret = l1_event(dch->l1, hh->prim);
+ break;
+ case PH_DEACTIVATE_REQ:
+ test_and_clear_bit(FLG_L2_ACTIVATED, &dch->Flags);
+ if (dch->dev.D.protocol != ISDN_P_TE_S0) {
+ spin_lock_irqsave(&hc->lock, flags);
+ if (debug & DEBUG_HFCMULTI_MSG)
+ printk(KERN_DEBUG
+ "%s: PH_DEACTIVATE port %d (0..%d)\n",
+ __func__, hc->chan[dch->slot].port,
+ hc->ports-1);
+ /* start deactivation */
+ if (hc->type == 1) {
+ if (debug & DEBUG_HFCMULTI_MSG)
+ printk(KERN_DEBUG
+ "%s: PH_DEACTIVATE no BRI\n",
+ __func__);
+ } else {
+ HFC_outb(hc, R_ST_SEL,
+ hc->chan[dch->slot].port);
+ /* undocumented: delay after R_ST_SEL */
+ udelay(1);
+ HFC_outb(hc, A_ST_WR_STATE, V_ST_ACT * 2);
+ /* deactivate */
+ dch->state = 1;
+ }
+ skb_queue_purge(&dch->squeue);
+ if (dch->tx_skb) {
+ dev_kfree_skb(dch->tx_skb);
+ dch->tx_skb = NULL;
+ }
+ dch->tx_idx = 0;
+ if (dch->rx_skb) {
+ dev_kfree_skb(dch->rx_skb);
+ dch->rx_skb = NULL;
+ }
+ test_and_clear_bit(FLG_TX_BUSY, &dch->Flags);
+ if (test_and_clear_bit(FLG_BUSY_TIMER, &dch->Flags))
+ del_timer(&dch->timer);
+#ifdef FIXME
+ if (test_and_clear_bit(FLG_L1_BUSY, &dch->Flags))
+ dchannel_sched_event(&hc->dch, D_CLEARBUSY);
+#endif
+ ret = 0;
+ spin_unlock_irqrestore(&hc->lock, flags);
+ } else
+ ret = l1_event(dch->l1, hh->prim);
+ break;
+ }
+ if (!ret)
+ dev_kfree_skb(skb);
+ return ret;
+}
+
+static void
+deactivate_bchannel(struct bchannel *bch)
+{
+ struct hfc_multi *hc = bch->hw;
+ u_long flags;
+
+ spin_lock_irqsave(&hc->lock, flags);
+ if (test_and_clear_bit(FLG_TX_NEXT, &bch->Flags)) {
+ dev_kfree_skb(bch->next_skb);
+ bch->next_skb = NULL;
+ }
+ if (bch->tx_skb) {
+ dev_kfree_skb(bch->tx_skb);
+ bch->tx_skb = NULL;
+ }
+ bch->tx_idx = 0;
+ if (bch->rx_skb) {
+ dev_kfree_skb(bch->rx_skb);
+ bch->rx_skb = NULL;
+ }
+ hc->chan[bch->slot].coeff_count = 0;
+ test_and_clear_bit(FLG_ACTIVE, &bch->Flags);
+ test_and_clear_bit(FLG_TX_BUSY, &bch->Flags);
+ hc->chan[bch->slot].rx_off = 0;
+ hc->chan[bch->slot].conf = -1;
+ mode_hfcmulti(hc, bch->slot, ISDN_P_NONE, -1, 0, -1, 0);
+ spin_unlock_irqrestore(&hc->lock, flags);
+}
+
+static int
+handle_bmsg(struct mISDNchannel *ch, struct sk_buff *skb)
+{
+ struct bchannel *bch = container_of(ch, struct bchannel, ch);
+ struct hfc_multi *hc = bch->hw;
+ int ret = -EINVAL;
+ struct mISDNhead *hh = mISDN_HEAD_P(skb);
+ unsigned int id;
+ u_long flags;
+
+ switch (hh->prim) {
+ case PH_DATA_REQ:
+ if (!skb->len)
+ break;
+ spin_lock_irqsave(&hc->lock, flags);
+ ret = bchannel_senddata(bch, skb);
+ if (ret > 0) { /* direct TX */
+ id = hh->id; /* skb can be freed */
+ hfcmulti_tx(hc, bch->slot);
+ ret = 0;
+ /* start fifo */
+ HFC_outb_nodebug(hc, R_FIFO, 0);
+ HFC_wait_nodebug(hc);
+ if (!test_bit(FLG_TRANSPARENT, &bch->Flags)) {
+ spin_unlock_irqrestore(&hc->lock, flags);
+ queue_ch_frame(ch, PH_DATA_CNF, id, NULL);
+ } else
+ spin_unlock_irqrestore(&hc->lock, flags);
+ } else
+ spin_unlock_irqrestore(&hc->lock, flags);
+ return ret;
+ case PH_ACTIVATE_REQ:
+ if (debug & DEBUG_HFCMULTI_MSG)
+ printk(KERN_DEBUG "%s: PH_ACTIVATE ch %d (0..32)\n",
+ __func__, bch->slot);
+ spin_lock_irqsave(&hc->lock, flags);
+ /* activate B-channel if not already activated */
+ if (!test_and_set_bit(FLG_ACTIVE, &bch->Flags)) {
+ hc->chan[bch->slot].txpending = 0;
+ ret = mode_hfcmulti(hc, bch->slot,
+ ch->protocol,
+ hc->chan[bch->slot].slot_tx,
+ hc->chan[bch->slot].bank_tx,
+ hc->chan[bch->slot].slot_rx,
+ hc->chan[bch->slot].bank_rx);
+ if (!ret) {
+ if (ch->protocol == ISDN_P_B_RAW && !hc->dtmf
+ && test_bit(HFC_CHIP_DTMF, &hc->chip)) {
+ /* start decoder */
+ hc->dtmf = 1;
+ if (debug & DEBUG_HFCMULTI_DTMF)
+ printk(KERN_DEBUG
+ "%s: start dtmf decoder\n",
+ __func__);
+ HFC_outb(hc, R_DTMF, hc->hw.r_dtmf |
+ V_RST_DTMF);
+ }
+ }
+ } else
+ ret = 0;
+ spin_unlock_irqrestore(&hc->lock, flags);
+ if (!ret)
+ _queue_data(ch, PH_ACTIVATE_IND, MISDN_ID_ANY, 0, NULL,
+ GFP_KERNEL);
+ break;
+ case PH_CONTROL_REQ:
+ spin_lock_irqsave(&hc->lock, flags);
+ switch (hh->id) {
+ case HFC_SPL_LOOP_ON: /* set sample loop */
+ if (debug & DEBUG_HFCMULTI_MSG)
+ printk(KERN_DEBUG
+ "%s: HFC_SPL_LOOP_ON (len = %d)\n",
+ __func__, skb->len);
+ ret = 0;
+ break;
+ case HFC_SPL_LOOP_OFF: /* set silence */
+ if (debug & DEBUG_HFCMULTI_MSG)
+ printk(KERN_DEBUG "%s: HFC_SPL_LOOP_OFF\n",
+ __func__);
+ ret = 0;
+ break;
+ default:
+ printk(KERN_ERR
+ "%s: unknown PH_CONTROL_REQ info %x\n",
+ __func__, hh->id);
+ ret = -EINVAL;
+ }
+ spin_unlock_irqrestore(&hc->lock, flags);
+ break;
+ case PH_DEACTIVATE_REQ:
+ deactivate_bchannel(bch); /* locked there */
+ _queue_data(ch, PH_DEACTIVATE_IND, MISDN_ID_ANY, 0, NULL,
+ GFP_KERNEL);
+ ret = 0;
+ break;
+ }
+ if (!ret)
+ dev_kfree_skb(skb);
+ return ret;
+}
+
+/*
+ * bchannel control function
+ */
+static int
+channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq)
+{
+ int ret = 0;
+ struct dsp_features *features =
+ (struct dsp_features *)(*((u_long *)&cq->p1));
+ struct hfc_multi *hc = bch->hw;
+ int slot_tx;
+ int bank_tx;
+ int slot_rx;
+ int bank_rx;
+ int num;
+
+ switch (cq->op) {
+ case MISDN_CTRL_GETOP:
+ cq->op = MISDN_CTRL_HFC_OP | MISDN_CTRL_HW_FEATURES_OP
+ | MISDN_CTRL_RX_OFF;
+ break;
+ case MISDN_CTRL_RX_OFF: /* turn off / on rx stream */
+ hc->chan[bch->slot].rx_off = !!cq->p1;
+ if (!hc->chan[bch->slot].rx_off) {
+ /* reset fifo on rx on */
+ HFC_outb_nodebug(hc, R_FIFO, (bch->slot << 1) | 1);
+ HFC_wait_nodebug(hc);
+ HFC_outb_nodebug(hc, R_INC_RES_FIFO, V_RES_F);
+ HFC_wait_nodebug(hc);
+ }
+ if (debug & DEBUG_HFCMULTI_MSG)
+ printk(KERN_DEBUG "%s: RX_OFF request (nr=%d off=%d)\n",
+ __func__, bch->nr, hc->chan[bch->slot].rx_off);
+ break;
+ case MISDN_CTRL_HW_FEATURES: /* fill features structure */
+ if (debug & DEBUG_HFCMULTI_MSG)
+ printk(KERN_DEBUG "%s: HW_FEATURE request\n",
+ __func__);
+ /* create confirm */
+ features->hfc_id = hc->id;
+ if (test_bit(HFC_CHIP_DTMF, &hc->chip))
+ features->hfc_dtmf = 1;
+ features->hfc_loops = 0;
+ if (test_bit(HFC_CHIP_B410P, &hc->chip)) {
+ features->hfc_echocanhw = 1;
+ } else {
+ features->pcm_id = hc->pcm;
+ features->pcm_slots = hc->slots;
+ features->pcm_banks = 2;
+ }
+ break;
+ case MISDN_CTRL_HFC_PCM_CONN: /* connect to pcm timeslot (0..N) */
+ slot_tx = cq->p1 & 0xff;
+ bank_tx = cq->p1 >> 8;
+ slot_rx = cq->p2 & 0xff;
+ bank_rx = cq->p2 >> 8;
+ if (debug & DEBUG_HFCMULTI_MSG)
+ printk(KERN_DEBUG
+ "%s: HFC_PCM_CONN slot %d bank %d (TX) "
+ "slot %d bank %d (RX)\n",
+ __func__, slot_tx, bank_tx,
+ slot_rx, bank_rx);
+ if (slot_tx < hc->slots && bank_tx <= 2 &&
+ slot_rx < hc->slots && bank_rx <= 2)
+ hfcmulti_pcm(hc, bch->slot,
+ slot_tx, bank_tx, slot_rx, bank_rx);
+ else {
+ printk(KERN_WARNING
+ "%s: HFC_PCM_CONN slot %d bank %d (TX) "
+ "slot %d bank %d (RX) out of range\n",
+ __func__, slot_tx, bank_tx,
+ slot_rx, bank_rx);
+ ret = -EINVAL;
+ }
+ break;
+ case MISDN_CTRL_HFC_PCM_DISC: /* release interface from pcm timeslot */
+ if (debug & DEBUG_HFCMULTI_MSG)
+ printk(KERN_DEBUG "%s: HFC_PCM_DISC\n",
+ __func__);
+ hfcmulti_pcm(hc, bch->slot, -1, 0, -1, 0);
+ break;
+ case MISDN_CTRL_HFC_CONF_JOIN: /* join conference (0..7) */
+ num = cq->p1 & 0xff;
+ if (debug & DEBUG_HFCMULTI_MSG)
+ printk(KERN_DEBUG "%s: HFC_CONF_JOIN conf %d\n",
+ __func__, num);
+ if (num <= 7)
+ hfcmulti_conf(hc, bch->slot, num);
+ else {
+ printk(KERN_WARNING
+ "%s: HW_CONF_JOIN conf %d out of range\n",
+ __func__, num);
+ ret = -EINVAL;
+ }
+ break;
+ case MISDN_CTRL_HFC_CONF_SPLIT: /* split conference */
+ if (debug & DEBUG_HFCMULTI_MSG)
+ printk(KERN_DEBUG "%s: HFC_CONF_SPLIT\n", __func__);
+ hfcmulti_conf(hc, bch->slot, -1);
+ break;
+ case MISDN_CTRL_HFC_ECHOCAN_ON:
+ if (debug & DEBUG_HFCMULTI_MSG)
+ printk(KERN_DEBUG "%s: HFC_ECHOCAN_ON\n", __func__);
+ if (test_bit(HFC_CHIP_B410P, &hc->chip))
+ vpm_echocan_on(hc, bch->slot, cq->p1);
+ else
+ ret = -EINVAL;
+ break;
+
+ case MISDN_CTRL_HFC_ECHOCAN_OFF:
+ if (debug & DEBUG_HFCMULTI_MSG)
+ printk(KERN_DEBUG "%s: HFC_ECHOCAN_OFF\n",
+ __func__);
+ if (test_bit(HFC_CHIP_B410P, &hc->chip))
+ vpm_echocan_off(hc, bch->slot);
+ else
+ ret = -EINVAL;
+ break;
+ default:
+ printk(KERN_WARNING "%s: unknown Op %x\n",
+ __func__, cq->op);
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+static int
+hfcm_bctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
+{
+ struct bchannel *bch = container_of(ch, struct bchannel, ch);
+ struct hfc_multi *hc = bch->hw;
+ int err = -EINVAL;
+ u_long flags;
+
+ if (bch->debug & DEBUG_HW)
+ printk(KERN_DEBUG "%s: cmd:%x %p\n",
+ __func__, cmd, arg);
+ switch (cmd) {
+ case CLOSE_CHANNEL:
+ test_and_clear_bit(FLG_OPEN, &bch->Flags);
+ if (test_bit(FLG_ACTIVE, &bch->Flags))
+ deactivate_bchannel(bch); /* locked there */
+ ch->protocol = ISDN_P_NONE;
+ ch->peer = NULL;
+ module_put(THIS_MODULE);
+ err = 0;
+ break;
+ case CONTROL_CHANNEL:
+ spin_lock_irqsave(&hc->lock, flags);
+ err = channel_bctrl(bch, arg);
+ spin_unlock_irqrestore(&hc->lock, flags);
+ break;
+ default:
+ printk(KERN_WARNING "%s: unknown prim(%x)\n",
+ __func__, cmd);
+ }
+ return err;
+}
+
+/*
+ * handle D-channel events
+ *
+ * handle state change event
+ */
+static void
+ph_state_change(struct dchannel *dch)
+{
+ struct hfc_multi *hc = dch->hw;
+ int ch, i;
+
+ if (!dch) {
+ printk(KERN_WARNING "%s: ERROR given dch is NULL\n",
+ __func__);
+ return;
+ }
+ ch = dch->slot;
+
+ if (hc->type == 1) {
+ if (dch->dev.D.protocol == ISDN_P_TE_E1) {
+ if (debug & DEBUG_HFCMULTI_STATE)
+ printk(KERN_DEBUG
+ "%s: E1 TE (id=%d) newstate %x\n",
+ __func__, hc->id, dch->state);
+ } else {
+ if (debug & DEBUG_HFCMULTI_STATE)
+ printk(KERN_DEBUG
+ "%s: E1 NT (id=%d) newstate %x\n",
+ __func__, hc->id, dch->state);
+ }
+ switch (dch->state) {
+ case (1):
+ if (hc->e1_state != 1) {
+ for (i = 1; i <= 31; i++) {
+ /* reset fifos on e1 activation */
+ HFC_outb_nodebug(hc, R_FIFO, (i << 1) | 1);
+ HFC_wait_nodebug(hc);
+ HFC_outb_nodebug(hc,
+ R_INC_RES_FIFO, V_RES_F);
+ HFC_wait_nodebug(hc);
+ }
+ }
+ test_and_set_bit(FLG_ACTIVE, &dch->Flags);
+ _queue_data(&dch->dev.D, PH_ACTIVATE_IND,
+ MISDN_ID_ANY, 0, NULL, GFP_ATOMIC);
+ break;
+
+ default:
+ if (hc->e1_state != 1)
+ return;
+ test_and_clear_bit(FLG_ACTIVE, &dch->Flags);
+ _queue_data(&dch->dev.D, PH_DEACTIVATE_IND,
+ MISDN_ID_ANY, 0, NULL, GFP_ATOMIC);
+ }
+ hc->e1_state = dch->state;
+ } else {
+ if (dch->dev.D.protocol == ISDN_P_TE_S0) {
+ if (debug & DEBUG_HFCMULTI_STATE)
+ printk(KERN_DEBUG
+ "%s: S/T TE newstate %x\n",
+ __func__, dch->state);
+ switch (dch->state) {
+ case (0):
+ l1_event(dch->l1, HW_RESET_IND);
+ break;
+ case (3):
+ l1_event(dch->l1, HW_DEACT_IND);
+ break;
+ case (5):
+ case (8):
+ l1_event(dch->l1, ANYSIGNAL);
+ break;
+ case (6):
+ l1_event(dch->l1, INFO2);
+ break;
+ case (7):
+ l1_event(dch->l1, INFO4_P8);
+ break;
+ }
+ } else {
+ if (debug & DEBUG_HFCMULTI_STATE)
+ printk(KERN_DEBUG "%s: S/T NT newstate %x\n",
+ __func__, dch->state);
+ switch (dch->state) {
+ case (2):
+ if (hc->chan[ch].nt_timer == 0) {
+ hc->chan[ch].nt_timer = -1;
+ HFC_outb(hc, R_ST_SEL,
+ hc->chan[ch].port);
+ /* undocumented: delay after R_ST_SEL */
+ udelay(1);
+ HFC_outb(hc, A_ST_WR_STATE, 4 |
+ V_ST_LD_STA); /* G4 */
+ udelay(6); /* wait at least 5,21us */
+ HFC_outb(hc, A_ST_WR_STATE, 4);
+ dch->state = 4;
+ } else {
+ /* one extra count for the next event */
+ hc->chan[ch].nt_timer =
+ nt_t1_count[poll_timer] + 1;
+ HFC_outb(hc, R_ST_SEL,
+ hc->chan[ch].port);
+ /* undocumented: delay after R_ST_SEL */
+ udelay(1);
+ /* allow G2 -> G3 transition */
+ HFC_outb(hc, A_ST_WR_STATE, 2 |
+ V_SET_G2_G3);
+ }
+ break;
+ case (1):
+ hc->chan[ch].nt_timer = -1;
+ test_and_clear_bit(FLG_ACTIVE, &dch->Flags);
+ _queue_data(&dch->dev.D, PH_DEACTIVATE_IND,
+ MISDN_ID_ANY, 0, NULL, GFP_ATOMIC);
+ break;
+ case (4):
+ hc->chan[ch].nt_timer = -1;
+ break;
+ case (3):
+ hc->chan[ch].nt_timer = -1;
+ test_and_set_bit(FLG_ACTIVE, &dch->Flags);
+ _queue_data(&dch->dev.D, PH_ACTIVATE_IND,
+ MISDN_ID_ANY, 0, NULL, GFP_ATOMIC);
+ break;
+ }
+ }
+ }
+}
+
+/*
+ * called for card mode init message
+ */
+
+static void
+hfcmulti_initmode(struct dchannel *dch)
+{
+ struct hfc_multi *hc = dch->hw;
+ u_char a_st_wr_state, r_e1_wr_sta;
+ int i, pt;
+
+ if (debug & DEBUG_HFCMULTI_INIT)
+ printk(KERN_DEBUG "%s: entered\n", __func__);
+
+ if (hc->type == 1) {
+ hc->chan[hc->dslot].slot_tx = -1;
+ hc->chan[hc->dslot].slot_rx = -1;
+ hc->chan[hc->dslot].conf = -1;
+ if (hc->dslot) {
+ mode_hfcmulti(hc, hc->dslot, dch->dev.D.protocol,
+ -1, 0, -1, 0);
+ dch->timer.function = (void *) hfcmulti_dbusy_timer;
+ dch->timer.data = (long) dch;
+ init_timer(&dch->timer);
+ }
+ for (i = 1; i <= 31; i++) {
+ if (i == hc->dslot)
+ continue;
+ hc->chan[i].slot_tx = -1;
+ hc->chan[i].slot_rx = -1;
+ hc->chan[i].conf = -1;
+ mode_hfcmulti(hc, i, ISDN_P_NONE, -1, 0, -1, 0);
+ }
+ /* E1 */
+ if (test_bit(HFC_CFG_REPORT_LOS, &hc->chan[hc->dslot].cfg)) {
+ HFC_outb(hc, R_LOS0, 255); /* 2 ms */
+ HFC_outb(hc, R_LOS1, 255); /* 512 ms */
+ }
+ if (test_bit(HFC_CFG_OPTICAL, &hc->chan[hc->dslot].cfg)) {
+ HFC_outb(hc, R_RX0, 0);
+ hc->hw.r_tx0 = 0 | V_OUT_EN;
+ } else {
+ HFC_outb(hc, R_RX0, 1);
+ hc->hw.r_tx0 = 1 | V_OUT_EN;
+ }
+ hc->hw.r_tx1 = V_ATX | V_NTRI;
+ HFC_outb(hc, R_TX0, hc->hw.r_tx0);
+ HFC_outb(hc, R_TX1, hc->hw.r_tx1);
+ HFC_outb(hc, R_TX_FR0, 0x00);
+ HFC_outb(hc, R_TX_FR1, 0xf8);
+
+ if (test_bit(HFC_CFG_CRC4, &hc->chan[hc->dslot].cfg))
+ HFC_outb(hc, R_TX_FR2, V_TX_MF | V_TX_E | V_NEG_E);
+
+ HFC_outb(hc, R_RX_FR0, V_AUTO_RESYNC | V_AUTO_RECO | 0);
+
+ if (test_bit(HFC_CFG_CRC4, &hc->chan[hc->dslot].cfg))
+ HFC_outb(hc, R_RX_FR1, V_RX_MF | V_RX_MF_SYNC);
+
+ if (dch->dev.D.protocol == ISDN_P_NT_E1) {
+ if (debug & DEBUG_HFCMULTI_INIT)
+ printk(KERN_DEBUG "%s: E1 port is NT-mode\n",
+ __func__);
+ r_e1_wr_sta = 0; /* G0 */
+ hc->e1_getclock = 0;
+ } else {
+ if (debug & DEBUG_HFCMULTI_INIT)
+ printk(KERN_DEBUG "%s: E1 port is TE-mode\n",
+ __func__);
+ r_e1_wr_sta = 0; /* F0 */
+ hc->e1_getclock = 1;
+ }
+ if (test_bit(HFC_CHIP_RX_SYNC, &hc->chip))
+ HFC_outb(hc, R_SYNC_OUT, V_SYNC_E1_RX);
+ else
+ HFC_outb(hc, R_SYNC_OUT, 0);
+ if (test_bit(HFC_CHIP_E1CLOCK_GET, &hc->chip))
+ hc->e1_getclock = 1;
+ if (test_bit(HFC_CHIP_E1CLOCK_PUT, &hc->chip))
+ hc->e1_getclock = 0;
+ if (test_bit(HFC_CHIP_PCM_SLAVE, &hc->chip)) {
+ /* SLAVE (clock master) */
+ if (debug & DEBUG_HFCMULTI_INIT)
+ printk(KERN_DEBUG
+ "%s: E1 port is clock master "
+ "(clock from PCM)\n", __func__);
+ HFC_outb(hc, R_SYNC_CTRL, V_EXT_CLK_SYNC | V_PCM_SYNC);
+ } else {
+ if (hc->e1_getclock) {
+ /* MASTER (clock slave) */
+ if (debug & DEBUG_HFCMULTI_INIT)
+ printk(KERN_DEBUG
+ "%s: E1 port is clock slave "
+ "(clock to PCM)\n", __func__);
+ HFC_outb(hc, R_SYNC_CTRL, V_SYNC_OFFS);
+ } else {
+ /* MASTER (clock master) */
+ if (debug & DEBUG_HFCMULTI_INIT)
+ printk(KERN_DEBUG "%s: E1 port is "
+ "clock master "
+ "(clock from QUARTZ)\n",
+ __func__);
+ HFC_outb(hc, R_SYNC_CTRL, V_EXT_CLK_SYNC |
+ V_PCM_SYNC | V_JATT_OFF);
+ HFC_outb(hc, R_SYNC_OUT, 0);
+ }
+ }
+ HFC_outb(hc, R_JATT_ATT, 0x9c); /* undoc register */
+ HFC_outb(hc, R_PWM_MD, V_PWM0_MD);
+ HFC_outb(hc, R_PWM0, 0x50);
+ HFC_outb(hc, R_PWM1, 0xff);
+ /* state machine setup */
+ HFC_outb(hc, R_E1_WR_STA, r_e1_wr_sta | V_E1_LD_STA);
+ udelay(6); /* wait at least 5,21us */
+ HFC_outb(hc, R_E1_WR_STA, r_e1_wr_sta);
+ if (test_bit(HFC_CHIP_PLXSD, &hc->chip)) {
+ hc->syncronized = 0;
+ plxsd_checksync(hc, 0);
+ }
+ } else {
+ i = dch->slot;
+ hc->chan[i].slot_tx = -1;
+ hc->chan[i].slot_rx = -1;
+ hc->chan[i].conf = -1;
+ mode_hfcmulti(hc, i, dch->dev.D.protocol, -1, 0, -1, 0);
+ dch->timer.function = (void *)hfcmulti_dbusy_timer;
+ dch->timer.data = (long) dch;
+ init_timer(&dch->timer);
+ hc->chan[i - 2].slot_tx = -1;
+ hc->chan[i - 2].slot_rx = -1;
+ hc->chan[i - 2].conf = -1;
+ mode_hfcmulti(hc, i - 2, ISDN_P_NONE, -1, 0, -1, 0);
+ hc->chan[i - 1].slot_tx = -1;
+ hc->chan[i - 1].slot_rx = -1;
+ hc->chan[i - 1].conf = -1;
+ mode_hfcmulti(hc, i - 1, ISDN_P_NONE, -1, 0, -1, 0);
+ /* ST */
+ pt = hc->chan[i].port;
+ /* select interface */
+ HFC_outb(hc, R_ST_SEL, pt);
+ /* undocumented: delay after R_ST_SEL */
+ udelay(1);
+ if (dch->dev.D.protocol == ISDN_P_NT_S0) {
+ if (debug & DEBUG_HFCMULTI_INIT)
+ printk(KERN_DEBUG
+ "%s: ST port %d is NT-mode\n",
+ __func__, pt);
+ /* clock delay */
+ HFC_outb(hc, A_ST_CLK_DLY, clockdelay_nt);
+ a_st_wr_state = 1; /* G1 */
+ hc->hw.a_st_ctrl0[pt] = V_ST_MD;
+ } else {
+ if (debug & DEBUG_HFCMULTI_INIT)
+ printk(KERN_DEBUG
+ "%s: ST port %d is TE-mode\n",
+ __func__, pt);
+ /* clock delay */
+ HFC_outb(hc, A_ST_CLK_DLY, clockdelay_te);
+ a_st_wr_state = 2; /* F2 */
+ hc->hw.a_st_ctrl0[pt] = 0;
+ }
+ if (!test_bit(HFC_CFG_NONCAP_TX, &hc->chan[i].cfg))
+ hc->hw.a_st_ctrl0[pt] |= V_TX_LI;
+ /* line setup */
+ HFC_outb(hc, A_ST_CTRL0, hc->hw.a_st_ctrl0[pt]);
+ /* disable E-channel */
+ if ((dch->dev.D.protocol == ISDN_P_NT_S0) ||
+ test_bit(HFC_CFG_DIS_ECHANNEL, &hc->chan[i].cfg))
+ HFC_outb(hc, A_ST_CTRL1, V_E_IGNO);
+ else
+ HFC_outb(hc, A_ST_CTRL1, 0);
+ /* enable B-channel receive */
+ HFC_outb(hc, A_ST_CTRL2, V_B1_RX_EN | V_B2_RX_EN);
+ /* state machine setup */
+ HFC_outb(hc, A_ST_WR_STATE, a_st_wr_state | V_ST_LD_STA);
+ udelay(6); /* wait at least 5,21us */
+ HFC_outb(hc, A_ST_WR_STATE, a_st_wr_state);
+ hc->hw.r_sci_msk |= 1 << pt;
+ /* state machine interrupts */
+ HFC_outb(hc, R_SCI_MSK, hc->hw.r_sci_msk);
+ /* unset sync on port */
+ if (test_bit(HFC_CHIP_PLXSD, &hc->chip)) {
+ hc->syncronized &=
+ ~(1 << hc->chan[dch->slot].port);
+ plxsd_checksync(hc, 0);
+ }
+ }
+ if (debug & DEBUG_HFCMULTI_INIT)
+ printk("%s: done\n", __func__);
+}
+
+
+static int
+open_dchannel(struct hfc_multi *hc, struct dchannel *dch,
+ struct channel_req *rq)
+{
+ int err = 0;
+ u_long flags;
+
+ if (debug & DEBUG_HW_OPEN)
+ printk(KERN_DEBUG "%s: dev(%d) open from %p\n", __func__,
+ dch->dev.id, __builtin_return_address(0));
+ if (rq->protocol == ISDN_P_NONE)
+ return -EINVAL;
+ if ((dch->dev.D.protocol != ISDN_P_NONE) &&
+ (dch->dev.D.protocol != rq->protocol)) {
+ if (debug & DEBUG_HFCMULTI_MODE)
+ printk(KERN_WARNING "%s: change protocol %x to %x\n",
+ __func__, dch->dev.D.protocol, rq->protocol);
+ }
+ if ((dch->dev.D.protocol == ISDN_P_TE_S0)
+ && (rq->protocol != ISDN_P_TE_S0))
+ l1_event(dch->l1, CLOSE_CHANNEL);
+ if (dch->dev.D.protocol != rq->protocol) {
+ if (rq->protocol == ISDN_P_TE_S0) {
+ err = create_l1(dch, hfcm_l1callback);
+ if (err)
+ return err;
+ }
+ dch->dev.D.protocol = rq->protocol;
+ spin_lock_irqsave(&hc->lock, flags);
+ hfcmulti_initmode(dch);
+ spin_unlock_irqrestore(&hc->lock, flags);
+ }
+
+ if (((rq->protocol == ISDN_P_NT_S0) && (dch->state == 3)) ||
+ ((rq->protocol == ISDN_P_TE_S0) && (dch->state == 7)) ||
+ ((rq->protocol == ISDN_P_NT_E1) && (dch->state == 1)) ||
+ ((rq->protocol == ISDN_P_TE_E1) && (dch->state == 1))) {
+ _queue_data(&dch->dev.D, PH_ACTIVATE_IND, MISDN_ID_ANY,
+ 0, NULL, GFP_KERNEL);
+ }
+ rq->ch = &dch->dev.D;
+ if (!try_module_get(THIS_MODULE))
+ printk(KERN_WARNING "%s:cannot get module\n", __func__);
+ return 0;
+}
+
+static int
+open_bchannel(struct hfc_multi *hc, struct dchannel *dch,
+ struct channel_req *rq)
+{
+ struct bchannel *bch;
+ int ch;
+
+ if (!test_bit(rq->adr.channel, &dch->dev.channelmap[0]))
+ return -EINVAL;
+ if (rq->protocol == ISDN_P_NONE)
+ return -EINVAL;
+ if (hc->type == 1)
+ ch = rq->adr.channel;
+ else
+ ch = (rq->adr.channel - 1) + (dch->slot - 2);
+ bch = hc->chan[ch].bch;
+ if (!bch) {
+ printk(KERN_ERR "%s:internal error ch %d has no bch\n",
+ __func__, ch);
+ return -EINVAL;
+ }
+ if (test_and_set_bit(FLG_OPEN, &bch->Flags))
+ return -EBUSY; /* b-channel can be only open once */
+ bch->ch.protocol = rq->protocol;
+ hc->chan[ch].rx_off = 0;
+ rq->ch = &bch->ch;
+ if (!try_module_get(THIS_MODULE))
+ printk(KERN_WARNING "%s:cannot get module\n", __func__);
+ return 0;
+}
+
+/*
+ * device control function
+ */
+static int
+channel_dctrl(struct dchannel *dch, struct mISDN_ctrl_req *cq)
+{
+ int ret = 0;
+
+ switch (cq->op) {
+ case MISDN_CTRL_GETOP:
+ cq->op = 0;
+ break;
+ default:
+ printk(KERN_WARNING "%s: unknown Op %x\n",
+ __func__, cq->op);
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+static int
+hfcm_dctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
+{
+ struct mISDNdevice *dev = container_of(ch, struct mISDNdevice, D);
+ struct dchannel *dch = container_of(dev, struct dchannel, dev);
+ struct hfc_multi *hc = dch->hw;
+ struct channel_req *rq;
+ int err = 0;
+ u_long flags;
+
+ if (dch->debug & DEBUG_HW)
+ printk(KERN_DEBUG "%s: cmd:%x %p\n",
+ __func__, cmd, arg);
+ switch (cmd) {
+ case OPEN_CHANNEL:
+ rq = arg;
+ switch (rq->protocol) {
+ case ISDN_P_TE_S0:
+ case ISDN_P_NT_S0:
+ if (hc->type == 1) {
+ err = -EINVAL;
+ break;
+ }
+ err = open_dchannel(hc, dch, rq); /* locked there */
+ break;
+ case ISDN_P_TE_E1:
+ case ISDN_P_NT_E1:
+ if (hc->type != 1) {
+ err = -EINVAL;
+ break;
+ }
+ err = open_dchannel(hc, dch, rq); /* locked there */
+ break;
+ default:
+ spin_lock_irqsave(&hc->lock, flags);
+ err = open_bchannel(hc, dch, rq);
+ spin_unlock_irqrestore(&hc->lock, flags);
+ }
+ break;
+ case CLOSE_CHANNEL:
+ if (debug & DEBUG_HW_OPEN)
+ printk(KERN_DEBUG "%s: dev(%d) close from %p\n",
+ __func__, dch->dev.id,
+ __builtin_return_address(0));
+ module_put(THIS_MODULE);
+ break;
+ case CONTROL_CHANNEL:
+ spin_lock_irqsave(&hc->lock, flags);
+ err = channel_dctrl(dch, arg);
+ spin_unlock_irqrestore(&hc->lock, flags);
+ break;
+ default:
+ if (dch->debug & DEBUG_HW)
+ printk(KERN_DEBUG "%s: unknown command %x\n",
+ __func__, cmd);
+ err = -EINVAL;
+ }
+ return err;
+}
+
+/*
+ * initialize the card
+ */
+
+/*
+ * start timer irq, wait some time and check if we have interrupts.
+ * if not, reset chip and try again.
+ */
+static int
+init_card(struct hfc_multi *hc)
+{
+ int err = -EIO;
+ u_long flags;
+ u_short *plx_acc;
+ u_long plx_flags;
+
+ if (debug & DEBUG_HFCMULTI_INIT)
+ printk(KERN_DEBUG "%s: entered\n", __func__);
+
+ spin_lock_irqsave(&hc->lock, flags);
+ /* set interrupts but leave global interrupt disabled */
+ hc->hw.r_irq_ctrl = V_FIFO_IRQ;
+ disable_hwirq(hc);
+ spin_unlock_irqrestore(&hc->lock, flags);
+
+ if (request_irq(hc->pci_dev->irq, hfcmulti_interrupt, IRQF_SHARED,
+ "HFC-multi", hc)) {
+ printk(KERN_WARNING "mISDN: Could not get interrupt %d.\n",
+ hc->pci_dev->irq);
+ return -EIO;
+ }
+ hc->irq = hc->pci_dev->irq;
+
+ if (test_bit(HFC_CHIP_PLXSD, &hc->chip)) {
+ spin_lock_irqsave(&plx_lock, plx_flags);
+ plx_acc = (u_short *)(hc->plx_membase+PLX_INTCSR);
+ writew((PLX_INTCSR_PCIINT_ENABLE | PLX_INTCSR_LINTI1_ENABLE),
+ plx_acc); /* enable PCI & LINT1 irq */
+ spin_unlock_irqrestore(&plx_lock, plx_flags);
+ }
+
+ if (debug & DEBUG_HFCMULTI_INIT)
+ printk(KERN_DEBUG "%s: IRQ %d count %d\n",
+ __func__, hc->irq, hc->irqcnt);
+ err = init_chip(hc);
+ if (err)
+ goto error;
+ /*
+ * Finally enable IRQ output
+ * this is only allowed, if an IRQ routine is allready
+ * established for this HFC, so don't do that earlier
+ */
+ spin_lock_irqsave(&hc->lock, flags);
+ enable_hwirq(hc);
+ spin_unlock_irqrestore(&hc->lock, flags);
+ /* printk(KERN_DEBUG "no master irq set!!!\n"); */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout((100*HZ)/1000); /* Timeout 100ms */
+ /* turn IRQ off until chip is completely initialized */
+ spin_lock_irqsave(&hc->lock, flags);
+ disable_hwirq(hc);
+ spin_unlock_irqrestore(&hc->lock, flags);
+ if (debug & DEBUG_HFCMULTI_INIT)
+ printk(KERN_DEBUG "%s: IRQ %d count %d\n",
+ __func__, hc->irq, hc->irqcnt);
+ if (hc->irqcnt) {
+ if (debug & DEBUG_HFCMULTI_INIT)
+ printk(KERN_DEBUG "%s: done\n", __func__);
+
+ return 0;
+ }
+ if (test_bit(HFC_CHIP_PCM_SLAVE, &hc->chip)) {
+ printk(KERN_INFO "ignoring missing interrupts\n");
+ return 0;
+ }
+
+ printk(KERN_ERR "HFC PCI: IRQ(%d) getting no interrupts during init.\n",
+ hc->irq);
+
+ err = -EIO;
+
+error:
+ if (test_bit(HFC_CHIP_PLXSD, &hc->chip)) {
+ spin_lock_irqsave(&plx_lock, plx_flags);
+ plx_acc = (u_short *)(hc->plx_membase+PLX_INTCSR);
+ writew(0x00, plx_acc); /*disable IRQs*/
+ spin_unlock_irqrestore(&plx_lock, plx_flags);
+ }
+
+ if (debug & DEBUG_HFCMULTI_INIT)
+ printk(KERN_WARNING "%s: free irq %d\n", __func__, hc->irq);
+ if (hc->irq) {
+ free_irq(hc->irq, hc);
+ hc->irq = 0;
+ }
+
+ if (debug & DEBUG_HFCMULTI_INIT)
+ printk(KERN_DEBUG "%s: done (err=%d)\n", __func__, err);
+ return err;
+}
+
+/*
+ * find pci device and set it up
+ */
+
+static int
+setup_pci(struct hfc_multi *hc, struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct hm_map *m = (struct hm_map *)ent->driver_data;
+
+ printk(KERN_INFO
+ "HFC-multi: card manufacturer: '%s' card name: '%s' clock: %s\n",
+ m->vendor_name, m->card_name, m->clock2 ? "double" : "normal");
+
+ hc->pci_dev = pdev;
+ if (m->clock2)
+ test_and_set_bit(HFC_CHIP_CLOCK2, &hc->chip);
+
+ if (ent->device == 0xB410) {
+ test_and_set_bit(HFC_CHIP_B410P, &hc->chip);
+ test_and_set_bit(HFC_CHIP_PCM_MASTER, &hc->chip);
+ test_and_clear_bit(HFC_CHIP_PCM_SLAVE, &hc->chip);
+ hc->slots = 32;
+ }
+
+ if (hc->pci_dev->irq <= 0) {
+ printk(KERN_WARNING "HFC-multi: No IRQ for PCI card found.\n");
+ return -EIO;
+ }
+ if (pci_enable_device(hc->pci_dev)) {
+ printk(KERN_WARNING "HFC-multi: Error enabling PCI card.\n");
+ return -EIO;
+ }
+ hc->leds = m->leds;
+ hc->ledstate = 0xAFFEAFFE;
+ hc->opticalsupport = m->opticalsupport;
+
+ /* set memory access methods */
+ if (m->io_mode) /* use mode from card config */
+ hc->io_mode = m->io_mode;
+ switch (hc->io_mode) {
+ case HFC_IO_MODE_PLXSD:
+ test_and_set_bit(HFC_CHIP_PLXSD, &hc->chip);
+ hc->slots = 128; /* required */
+ /* fall through */
+ case HFC_IO_MODE_PCIMEM:
+ hc->HFC_outb = HFC_outb_pcimem;
+ hc->HFC_inb = HFC_inb_pcimem;
+ hc->HFC_inw = HFC_inw_pcimem;
+ hc->HFC_wait = HFC_wait_pcimem;
+ hc->read_fifo = read_fifo_pcimem;
+ hc->write_fifo = write_fifo_pcimem;
+ break;
+ case HFC_IO_MODE_REGIO:
+ hc->HFC_outb = HFC_outb_regio;
+ hc->HFC_inb = HFC_inb_regio;
+ hc->HFC_inw = HFC_inw_regio;
+ hc->HFC_wait = HFC_wait_regio;
+ hc->read_fifo = read_fifo_regio;
+ hc->write_fifo = write_fifo_regio;
+ break;
+ default:
+ printk(KERN_WARNING "HFC-multi: Invalid IO mode.\n");
+ pci_disable_device(hc->pci_dev);
+ return -EIO;
+ }
+ hc->HFC_outb_nodebug = hc->HFC_outb;
+ hc->HFC_inb_nodebug = hc->HFC_inb;
+ hc->HFC_inw_nodebug = hc->HFC_inw;
+ hc->HFC_wait_nodebug = hc->HFC_wait;
+#ifdef HFC_REGISTER_DEBUG
+ hc->HFC_outb = HFC_outb_debug;
+ hc->HFC_inb = HFC_inb_debug;
+ hc->HFC_inw = HFC_inw_debug;
+ hc->HFC_wait = HFC_wait_debug;
+#endif
+ hc->pci_iobase = 0;
+ hc->pci_membase = NULL;
+ hc->plx_membase = NULL;
+
+ switch (hc->io_mode) {
+ case HFC_IO_MODE_PLXSD:
+ hc->plx_origmembase = hc->pci_dev->resource[0].start;
+ /* MEMBASE 1 is PLX PCI Bridge */
+
+ if (!hc->plx_origmembase) {
+ printk(KERN_WARNING
+ "HFC-multi: No IO-Memory for PCI PLX bridge found\n");
+ pci_disable_device(hc->pci_dev);
+ return -EIO;
+ }
+
+ hc->plx_membase = ioremap(hc->plx_origmembase, 0x80);
+ if (!hc->plx_membase) {
+ printk(KERN_WARNING
+ "HFC-multi: failed to remap plx address space. "
+ "(internal error)\n");
+ pci_disable_device(hc->pci_dev);
+ return -EIO;
+ }
+ printk(KERN_INFO
+ "HFC-multi: plx_membase:%#lx plx_origmembase:%#lx\n",
+ (u_long)hc->plx_membase, hc->plx_origmembase);
+
+ hc->pci_origmembase = hc->pci_dev->resource[2].start;
+ /* MEMBASE 1 is PLX PCI Bridge */
+ if (!hc->pci_origmembase) {
+ printk(KERN_WARNING
+ "HFC-multi: No IO-Memory for PCI card found\n");
+ pci_disable_device(hc->pci_dev);
+ return -EIO;
+ }
+
+ hc->pci_membase = ioremap(hc->pci_origmembase, 0x400);
+ if (!hc->pci_membase) {
+ printk(KERN_WARNING "HFC-multi: failed to remap io "
+ "address space. (internal error)\n");
+ pci_disable_device(hc->pci_dev);
+ return -EIO;
+ }
+
+ printk(KERN_INFO
+ "card %d: defined at MEMBASE %#lx (%#lx) IRQ %d HZ %d "
+ "leds-type %d\n",
+ hc->id, (u_long)hc->pci_membase, hc->pci_origmembase,
+ hc->pci_dev->irq, HZ, hc->leds);
+ pci_write_config_word(hc->pci_dev, PCI_COMMAND, PCI_ENA_MEMIO);
+ break;
+ case HFC_IO_MODE_PCIMEM:
+ hc->pci_origmembase = hc->pci_dev->resource[1].start;
+ if (!hc->pci_origmembase) {
+ printk(KERN_WARNING
+ "HFC-multi: No IO-Memory for PCI card found\n");
+ pci_disable_device(hc->pci_dev);
+ return -EIO;
+ }
+
+ hc->pci_membase = ioremap(hc->pci_origmembase, 256);
+ if (!hc->pci_membase) {
+ printk(KERN_WARNING
+ "HFC-multi: failed to remap io address space. "
+ "(internal error)\n");
+ pci_disable_device(hc->pci_dev);
+ return -EIO;
+ }
+ printk(KERN_INFO "card %d: defined at MEMBASE %#lx (%#lx) IRQ %d "
+ "HZ %d leds-type %d\n", hc->id, (u_long)hc->pci_membase,
+ hc->pci_origmembase, hc->pci_dev->irq, HZ, hc->leds);
+ pci_write_config_word(hc->pci_dev, PCI_COMMAND, PCI_ENA_MEMIO);
+ break;
+ case HFC_IO_MODE_REGIO:
+ hc->pci_iobase = (u_int) hc->pci_dev->resource[0].start;
+ if (!hc->pci_iobase) {
+ printk(KERN_WARNING
+ "HFC-multi: No IO for PCI card found\n");
+ pci_disable_device(hc->pci_dev);
+ return -EIO;
+ }
+
+ if (!request_region(hc->pci_iobase, 8, "hfcmulti")) {
+ printk(KERN_WARNING "HFC-multi: failed to request "
+ "address space at 0x%08lx (internal error)\n",
+ hc->pci_iobase);
+ pci_disable_device(hc->pci_dev);
+ return -EIO;
+ }
+
+ printk(KERN_INFO
+ "%s %s: defined at IOBASE %#x IRQ %d HZ %d leds-type %d\n",
+ m->vendor_name, m->card_name, (u_int) hc->pci_iobase,
+ hc->pci_dev->irq, HZ, hc->leds);
+ pci_write_config_word(hc->pci_dev, PCI_COMMAND, PCI_ENA_REGIO);
+ break;
+ default:
+ printk(KERN_WARNING "HFC-multi: Invalid IO mode.\n");
+ pci_disable_device(hc->pci_dev);
+ return -EIO;
+ }
+
+ pci_set_drvdata(hc->pci_dev, hc);
+
+ /* At this point the needed PCI config is done */
+ /* fifos are still not enabled */
+ return 0;
+}
+
+
+/*
+ * remove port
+ */
+
+static void
+release_port(struct hfc_multi *hc, struct dchannel *dch)
+{
+ int pt, ci, i = 0;
+ u_long flags;
+ struct bchannel *pb;
+
+ ci = dch->slot;
+ pt = hc->chan[ci].port;
+
+ if (debug & DEBUG_HFCMULTI_INIT)
+ printk(KERN_DEBUG "%s: entered for port %d\n",
+ __func__, pt + 1);
+
+ if (pt >= hc->ports) {
+ printk(KERN_WARNING "%s: ERROR port out of range (%d).\n",
+ __func__, pt + 1);
+ return;
+ }
+
+ if (debug & DEBUG_HFCMULTI_INIT)
+ printk(KERN_DEBUG "%s: releasing port=%d\n",
+ __func__, pt + 1);
+
+ if (dch->dev.D.protocol == ISDN_P_TE_S0)
+ l1_event(dch->l1, CLOSE_CHANNEL);
+
+ hc->chan[ci].dch = NULL;
+
+ if (hc->created[pt]) {
+ hc->created[pt] = 0;
+ mISDN_unregister_device(&dch->dev);
+ }
+
+ spin_lock_irqsave(&hc->lock, flags);
+
+ if (dch->timer.function) {
+ del_timer(&dch->timer);
+ dch->timer.function = NULL;
+ }
+
+ if (hc->type == 1) { /* E1 */
+ /* remove sync */
+ if (test_bit(HFC_CHIP_PLXSD, &hc->chip)) {
+ hc->syncronized = 0;
+ plxsd_checksync(hc, 1);
+ }
+ /* free channels */
+ for (i = 0; i <= 31; i++) {
+ if (hc->chan[i].bch) {
+ if (debug & DEBUG_HFCMULTI_INIT)
+ printk(KERN_DEBUG
+ "%s: free port %d channel %d\n",
+ __func__, hc->chan[i].port+1, i);
+ pb = hc->chan[i].bch;
+ hc->chan[i].bch = NULL;
+ spin_unlock_irqrestore(&hc->lock, flags);
+ mISDN_freebchannel(pb);
+ kfree(pb);
+ kfree(hc->chan[i].coeff);
+ spin_lock_irqsave(&hc->lock, flags);
+ }
+ }
+ } else {
+ /* remove sync */
+ if (test_bit(HFC_CHIP_PLXSD, &hc->chip)) {
+ hc->syncronized &=
+ ~(1 << hc->chan[ci].port);
+ plxsd_checksync(hc, 1);
+ }
+ /* free channels */
+ if (hc->chan[ci - 2].bch) {
+ if (debug & DEBUG_HFCMULTI_INIT)
+ printk(KERN_DEBUG
+ "%s: free port %d channel %d\n",
+ __func__, hc->chan[ci - 2].port+1,
+ ci - 2);
+ pb = hc->chan[ci - 2].bch;
+ hc->chan[ci - 2].bch = NULL;
+ spin_unlock_irqrestore(&hc->lock, flags);
+ mISDN_freebchannel(pb);
+ kfree(pb);
+ kfree(hc->chan[ci - 2].coeff);
+ spin_lock_irqsave(&hc->lock, flags);
+ }
+ if (hc->chan[ci - 1].bch) {
+ if (debug & DEBUG_HFCMULTI_INIT)
+ printk(KERN_DEBUG
+ "%s: free port %d channel %d\n",
+ __func__, hc->chan[ci - 1].port+1,
+ ci - 1);
+ pb = hc->chan[ci - 1].bch;
+ hc->chan[ci - 1].bch = NULL;
+ spin_unlock_irqrestore(&hc->lock, flags);
+ mISDN_freebchannel(pb);
+ kfree(pb);
+ kfree(hc->chan[ci - 1].coeff);
+ spin_lock_irqsave(&hc->lock, flags);
+ }
+ }
+
+ spin_unlock_irqrestore(&hc->lock, flags);
+
+ if (debug & DEBUG_HFCMULTI_INIT)
+ printk(KERN_DEBUG "%s: free port %d channel D\n", __func__, pt);
+ mISDN_freedchannel(dch);
+ kfree(dch);
+
+ if (debug & DEBUG_HFCMULTI_INIT)
+ printk(KERN_DEBUG "%s: done!\n", __func__);
+}
+
+static void
+release_card(struct hfc_multi *hc)
+{
+ u_long flags;
+ int ch;
+
+ if (debug & DEBUG_HFCMULTI_INIT)
+ printk(KERN_WARNING "%s: release card (%d) entered\n",
+ __func__, hc->id);
+
+ spin_lock_irqsave(&hc->lock, flags);
+ disable_hwirq(hc);
+ spin_unlock_irqrestore(&hc->lock, flags);
+
+ udelay(1000);
+
+ /* dimm leds */
+ if (hc->leds)
+ hfcmulti_leds(hc);
+
+ /* disable D-channels & B-channels */
+ if (debug & DEBUG_HFCMULTI_INIT)
+ printk(KERN_DEBUG "%s: disable all channels (d and b)\n",
+ __func__);
+ for (ch = 0; ch <= 31; ch++) {
+ if (hc->chan[ch].dch)
+ release_port(hc, hc->chan[ch].dch);
+ }
+
+ /* release hardware & irq */
+ if (hc->irq) {
+ if (debug & DEBUG_HFCMULTI_INIT)
+ printk(KERN_WARNING "%s: free irq %d\n",
+ __func__, hc->irq);
+ free_irq(hc->irq, hc);
+ hc->irq = 0;
+
+ }
+ release_io_hfcmulti(hc);
+
+ if (debug & DEBUG_HFCMULTI_INIT)
+ printk(KERN_WARNING "%s: remove instance from list\n",
+ __func__);
+ list_del(&hc->list);
+
+ if (debug & DEBUG_HFCMULTI_INIT)
+ printk(KERN_WARNING "%s: delete instance\n", __func__);
+ if (hc == syncmaster)
+ syncmaster = NULL;
+ kfree(hc);
+ if (debug & DEBUG_HFCMULTI_INIT)
+ printk(KERN_WARNING "%s: card successfully removed\n",
+ __func__);
+}
+
+static int
+init_e1_port(struct hfc_multi *hc, struct hm_map *m)
+{
+ struct dchannel *dch;
+ struct bchannel *bch;
+ int ch, ret = 0;
+ char name[MISDN_MAX_IDLEN];
+
+ dch = kzalloc(sizeof(struct dchannel), GFP_KERNEL);
+ if (!dch)
+ return -ENOMEM;
+ dch->debug = debug;
+ mISDN_initdchannel(dch, MAX_DFRAME_LEN_L1, ph_state_change);
+ dch->hw = hc;
+ dch->dev.Dprotocols = (1 << ISDN_P_TE_E1) | (1 << ISDN_P_NT_E1);
+ dch->dev.Bprotocols = (1 << (ISDN_P_B_RAW & ISDN_P_B_MASK)) |
+ (1 << (ISDN_P_B_HDLC & ISDN_P_B_MASK));
+ dch->dev.D.send = handle_dmsg;
+ dch->dev.D.ctrl = hfcm_dctrl;
+ dch->dev.nrbchan = (hc->dslot)?30:31;
+ dch->slot = hc->dslot;
+ hc->chan[hc->dslot].dch = dch;
+ hc->chan[hc->dslot].port = 0;
+ hc->chan[hc->dslot].nt_timer = -1;
+ for (ch = 1; ch <= 31; ch++) {
+ if (ch == hc->dslot) /* skip dchannel */
+ continue;
+ bch = kzalloc(sizeof(struct bchannel), GFP_KERNEL);
+ if (!bch) {
+ printk(KERN_ERR "%s: no memory for bchannel\n",
+ __func__);
+ ret = -ENOMEM;
+ goto free_chan;
+ }
+ hc->chan[ch].coeff = kzalloc(512, GFP_KERNEL);
+ if (!hc->chan[ch].coeff) {
+ printk(KERN_ERR "%s: no memory for coeffs\n",
+ __func__);
+ ret = -ENOMEM;
+ goto free_chan;
+ }
+ bch->nr = ch;
+ bch->slot = ch;
+ bch->debug = debug;
+ mISDN_initbchannel(bch, MAX_DATA_MEM);
+ bch->hw = hc;
+ bch->ch.send = handle_bmsg;
+ bch->ch.ctrl = hfcm_bctrl;
+ bch->ch.nr = ch;
+ list_add(&bch->ch.list, &dch->dev.bchannels);
+ hc->chan[ch].bch = bch;
+ hc->chan[ch].port = 0;
+ test_and_set_bit(bch->nr, &dch->dev.channelmap[0]);
+ }
+ /* set optical line type */
+ if (port[Port_cnt] & 0x001) {
+ if (!m->opticalsupport) {
+ printk(KERN_INFO
+ "This board has no optical "
+ "support\n");
+ } else {
+ if (debug & DEBUG_HFCMULTI_INIT)
+ printk(KERN_DEBUG
+ "%s: PORT set optical "
+ "interfacs: card(%d) "
+ "port(%d)\n",
+ __func__,
+ HFC_cnt + 1, 1);
+ test_and_set_bit(HFC_CFG_OPTICAL,
+ &hc->chan[hc->dslot].cfg);
+ }
+ }
+ /* set LOS report */
+ if (port[Port_cnt] & 0x004) {
+ if (debug & DEBUG_HFCMULTI_INIT)
+ printk(KERN_DEBUG "%s: PORT set "
+ "LOS report: card(%d) port(%d)\n",
+ __func__, HFC_cnt + 1, 1);
+ test_and_set_bit(HFC_CFG_REPORT_LOS,
+ &hc->chan[hc->dslot].cfg);
+ }
+ /* set AIS report */
+ if (port[Port_cnt] & 0x008) {
+ if (debug & DEBUG_HFCMULTI_INIT)
+ printk(KERN_DEBUG "%s: PORT set "
+ "AIS report: card(%d) port(%d)\n",
+ __func__, HFC_cnt + 1, 1);
+ test_and_set_bit(HFC_CFG_REPORT_AIS,
+ &hc->chan[hc->dslot].cfg);
+ }
+ /* set SLIP report */
+ if (port[Port_cnt] & 0x010) {
+ if (debug & DEBUG_HFCMULTI_INIT)
+ printk(KERN_DEBUG
+ "%s: PORT set SLIP report: "
+ "card(%d) port(%d)\n",
+ __func__, HFC_cnt + 1, 1);
+ test_and_set_bit(HFC_CFG_REPORT_SLIP,
+ &hc->chan[hc->dslot].cfg);
+ }
+ /* set RDI report */
+ if (port[Port_cnt] & 0x020) {
+ if (debug & DEBUG_HFCMULTI_INIT)
+ printk(KERN_DEBUG
+ "%s: PORT set RDI report: "
+ "card(%d) port(%d)\n",
+ __func__, HFC_cnt + 1, 1);
+ test_and_set_bit(HFC_CFG_REPORT_RDI,
+ &hc->chan[hc->dslot].cfg);
+ }
+ /* set CRC-4 Mode */
+ if (!(port[Port_cnt] & 0x100)) {
+ if (debug & DEBUG_HFCMULTI_INIT)
+ printk(KERN_DEBUG "%s: PORT turn on CRC4 report:"
+ " card(%d) port(%d)\n",
+ __func__, HFC_cnt + 1, 1);
+ test_and_set_bit(HFC_CFG_CRC4,
+ &hc->chan[hc->dslot].cfg);
+ } else {
+ if (debug & DEBUG_HFCMULTI_INIT)
+ printk(KERN_DEBUG "%s: PORT turn off CRC4"
+ " report: card(%d) port(%d)\n",
+ __func__, HFC_cnt + 1, 1);
+ }
+ /* set forced clock */
+ if (port[Port_cnt] & 0x0200) {
+ if (debug & DEBUG_HFCMULTI_INIT)
+ printk(KERN_DEBUG "%s: PORT force getting clock from "
+ "E1: card(%d) port(%d)\n",
+ __func__, HFC_cnt + 1, 1);
+ test_and_set_bit(HFC_CHIP_E1CLOCK_GET, &hc->chip);
+ } else
+ if (port[Port_cnt] & 0x0400) {
+ if (debug & DEBUG_HFCMULTI_INIT)
+ printk(KERN_DEBUG "%s: PORT force putting clock to "
+ "E1: card(%d) port(%d)\n",
+ __func__, HFC_cnt + 1, 1);
+ test_and_set_bit(HFC_CHIP_E1CLOCK_PUT, &hc->chip);
+ }
+ /* set JATT PLL */
+ if (port[Port_cnt] & 0x0800) {
+ if (debug & DEBUG_HFCMULTI_INIT)
+ printk(KERN_DEBUG "%s: PORT disable JATT PLL on "
+ "E1: card(%d) port(%d)\n",
+ __func__, HFC_cnt + 1, 1);
+ test_and_set_bit(HFC_CHIP_RX_SYNC, &hc->chip);
+ }
+ /* set elastic jitter buffer */
+ if (port[Port_cnt] & 0x3000) {
+ hc->chan[hc->dslot].jitter = (port[Port_cnt]>>12) & 0x3;
+ if (debug & DEBUG_HFCMULTI_INIT)
+ printk(KERN_DEBUG
+ "%s: PORT set elastic "
+ "buffer to %d: card(%d) port(%d)\n",
+ __func__, hc->chan[hc->dslot].jitter,
+ HFC_cnt + 1, 1);
+ } else
+ hc->chan[hc->dslot].jitter = 2; /* default */
+ snprintf(name, MISDN_MAX_IDLEN - 1, "hfc-e1.%d", HFC_cnt + 1);
+ ret = mISDN_register_device(&dch->dev, name);
+ if (ret)
+ goto free_chan;
+ hc->created[0] = 1;
+ return ret;
+free_chan:
+ release_port(hc, dch);
+ return ret;
+}
+
+static int
+init_multi_port(struct hfc_multi *hc, int pt)
+{
+ struct dchannel *dch;
+ struct bchannel *bch;
+ int ch, i, ret = 0;
+ char name[MISDN_MAX_IDLEN];
+
+ dch = kzalloc(sizeof(struct dchannel), GFP_KERNEL);
+ if (!dch)
+ return -ENOMEM;
+ dch->debug = debug;
+ mISDN_initdchannel(dch, MAX_DFRAME_LEN_L1, ph_state_change);
+ dch->hw = hc;
+ dch->dev.Dprotocols = (1 << ISDN_P_TE_S0) | (1 << ISDN_P_NT_S0);
+ dch->dev.Bprotocols = (1 << (ISDN_P_B_RAW & ISDN_P_B_MASK)) |
+ (1 << (ISDN_P_B_HDLC & ISDN_P_B_MASK));
+ dch->dev.D.send = handle_dmsg;
+ dch->dev.D.ctrl = hfcm_dctrl;
+ dch->dev.nrbchan = 2;
+ i = pt << 2;
+ dch->slot = i + 2;
+ hc->chan[i + 2].dch = dch;
+ hc->chan[i + 2].port = pt;
+ hc->chan[i + 2].nt_timer = -1;
+ for (ch = 0; ch < dch->dev.nrbchan; ch++) {
+ bch = kzalloc(sizeof(struct bchannel), GFP_KERNEL);
+ if (!bch) {
+ printk(KERN_ERR "%s: no memory for bchannel\n",
+ __func__);
+ ret = -ENOMEM;
+ goto free_chan;
+ }
+ hc->chan[i + ch].coeff = kzalloc(512, GFP_KERNEL);
+ if (!hc->chan[i + ch].coeff) {
+ printk(KERN_ERR "%s: no memory for coeffs\n",
+ __func__);
+ ret = -ENOMEM;
+ goto free_chan;
+ }
+ bch->nr = ch + 1;
+ bch->slot = i + ch;
+ bch->debug = debug;
+ mISDN_initbchannel(bch, MAX_DATA_MEM);
+ bch->hw = hc;
+ bch->ch.send = handle_bmsg;
+ bch->ch.ctrl = hfcm_bctrl;
+ bch->ch.nr = ch + 1;
+ list_add(&bch->ch.list, &dch->dev.bchannels);
+ hc->chan[i + ch].bch = bch;
+ hc->chan[i + ch].port = pt;
+ test_and_set_bit(bch->nr, &dch->dev.channelmap[0]);
+ }
+ /* set master clock */
+ if (port[Port_cnt] & 0x001) {
+ if (debug & DEBUG_HFCMULTI_INIT)
+ printk(KERN_DEBUG
+ "%s: PROTOCOL set master clock: "
+ "card(%d) port(%d)\n",
+ __func__, HFC_cnt + 1, pt + 1);
+ if (dch->dev.D.protocol != ISDN_P_TE_S0) {
+ printk(KERN_ERR "Error: Master clock "
+ "for port(%d) of card(%d) is only"
+ " possible with TE-mode\n",
+ pt + 1, HFC_cnt + 1);
+ ret = -EINVAL;
+ goto free_chan;
+ }
+ if (hc->masterclk >= 0) {
+ printk(KERN_ERR "Error: Master clock "
+ "for port(%d) of card(%d) already "
+ "defined for port(%d)\n",
+ pt + 1, HFC_cnt + 1, hc->masterclk+1);
+ ret = -EINVAL;
+ goto free_chan;
+ }
+ hc->masterclk = pt;
+ }
+ /* set transmitter line to non capacitive */
+ if (port[Port_cnt] & 0x002) {
+ if (debug & DEBUG_HFCMULTI_INIT)
+ printk(KERN_DEBUG
+ "%s: PROTOCOL set non capacitive "
+ "transmitter: card(%d) port(%d)\n",
+ __func__, HFC_cnt + 1, pt + 1);
+ test_and_set_bit(HFC_CFG_NONCAP_TX,
+ &hc->chan[i + 2].cfg);
+ }
+ /* disable E-channel */
+ if (port[Port_cnt] & 0x004) {
+ if (debug & DEBUG_HFCMULTI_INIT)
+ printk(KERN_DEBUG
+ "%s: PROTOCOL disable E-channel: "
+ "card(%d) port(%d)\n",
+ __func__, HFC_cnt + 1, pt + 1);
+ test_and_set_bit(HFC_CFG_DIS_ECHANNEL,
+ &hc->chan[i + 2].cfg);
+ }
+ snprintf(name, MISDN_MAX_IDLEN - 1, "hfc-%ds.%d/%d",
+ hc->type, HFC_cnt + 1, pt + 1);
+ ret = mISDN_register_device(&dch->dev, name);
+ if (ret)
+ goto free_chan;
+ hc->created[pt] = 1;
+ return ret;
+free_chan:
+ release_port(hc, dch);
+ return ret;
+}
+
+static int
+hfcmulti_init(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct hm_map *m = (struct hm_map *)ent->driver_data;
+ int ret_err = 0;
+ int pt;
+ struct hfc_multi *hc;
+ u_long flags;
+ u_char dips = 0, pmj = 0; /* dip settings, port mode Jumpers */
+
+ if (HFC_cnt >= MAX_CARDS) {
+ printk(KERN_ERR "too many cards (max=%d).\n",
+ MAX_CARDS);
+ return -EINVAL;
+ }
+ if ((type[HFC_cnt] & 0xff) && (type[HFC_cnt] & 0xff) != m->type) {
+ printk(KERN_WARNING "HFC-MULTI: Card '%s:%s' type %d found but "
+ "type[%d] %d was supplied as module parameter\n",
+ m->vendor_name, m->card_name, m->type, HFC_cnt,
+ type[HFC_cnt] & 0xff);
+ printk(KERN_WARNING "HFC-MULTI: Load module without parameters "
+ "first, to see cards and their types.");
+ return -EINVAL;
+ }
+ if (debug & DEBUG_HFCMULTI_INIT)
+ printk(KERN_DEBUG "%s: Registering %s:%s chip type %d (0x%x)\n",
+ __func__, m->vendor_name, m->card_name, m->type,
+ type[HFC_cnt]);
+
+ /* allocate card+fifo structure */
+ hc = kzalloc(sizeof(struct hfc_multi), GFP_KERNEL);
+ if (!hc) {
+ printk(KERN_ERR "No kmem for HFC-Multi card\n");
+ return -ENOMEM;
+ }
+ spin_lock_init(&hc->lock);
+ hc->mtyp = m;
+ hc->type = m->type;
+ hc->ports = m->ports;
+ hc->id = HFC_cnt;
+ hc->pcm = pcm[HFC_cnt];
+ hc->io_mode = iomode[HFC_cnt];
+ if (dslot[HFC_cnt] < 0) {
+ hc->dslot = 0;
+ printk(KERN_INFO "HFC-E1 card has disabled D-channel, but "
+ "31 B-channels\n");
+ } if (dslot[HFC_cnt] > 0 && dslot[HFC_cnt] < 32) {
+ hc->dslot = dslot[HFC_cnt];
+ printk(KERN_INFO "HFC-E1 card has alternating D-channel on "
+ "time slot %d\n", dslot[HFC_cnt]);
+ } else
+ hc->dslot = 16;
+
+ /* set chip specific features */
+ hc->masterclk = -1;
+ if (type[HFC_cnt] & 0x100) {
+ test_and_set_bit(HFC_CHIP_ULAW, &hc->chip);
+ silence = 0xff; /* ulaw silence */
+ } else
+ silence = 0x2a; /* alaw silence */
+ if (!(type[HFC_cnt] & 0x200))
+ test_and_set_bit(HFC_CHIP_DTMF, &hc->chip);
+
+ if (type[HFC_cnt] & 0x800)
+ test_and_set_bit(HFC_CHIP_PCM_SLAVE, &hc->chip);
+ if (type[HFC_cnt] & 0x1000) {
+ test_and_set_bit(HFC_CHIP_PCM_MASTER, &hc->chip);
+ test_and_clear_bit(HFC_CHIP_PCM_SLAVE, &hc->chip);
+ }
+ if (type[HFC_cnt] & 0x4000)
+ test_and_set_bit(HFC_CHIP_EXRAM_128, &hc->chip);
+ if (type[HFC_cnt] & 0x8000)
+ test_and_set_bit(HFC_CHIP_EXRAM_512, &hc->chip);
+ hc->slots = 32;
+ if (type[HFC_cnt] & 0x10000)
+ hc->slots = 64;
+ if (type[HFC_cnt] & 0x20000)
+ hc->slots = 128;
+ if (type[HFC_cnt] & 0x80000) {
+ test_and_set_bit(HFC_CHIP_WATCHDOG, &hc->chip);
+ hc->wdcount = 0;
+ hc->wdbyte = V_GPIO_OUT2;
+ printk(KERN_NOTICE "Watchdog enabled\n");
+ }
+
+ /* setup pci, hc->slots may change due to PLXSD */
+ ret_err = setup_pci(hc, pdev, ent);
+ if (ret_err) {
+ if (hc == syncmaster)
+ syncmaster = NULL;
+ kfree(hc);
+ return ret_err;
+ }
+
+ /* crate channels */
+ for (pt = 0; pt < hc->ports; pt++) {
+ if (Port_cnt >= MAX_PORTS) {
+ printk(KERN_ERR "too many ports (max=%d).\n",
+ MAX_PORTS);
+ ret_err = -EINVAL;
+ goto free_card;
+ }
+ if (hc->type == 1)
+ ret_err = init_e1_port(hc, m);
+ else
+ ret_err = init_multi_port(hc, pt);
+ if (debug & DEBUG_HFCMULTI_INIT)
+ printk(KERN_DEBUG
+ "%s: Registering D-channel, card(%d) port(%d)"
+ "result %d\n",
+ __func__, HFC_cnt + 1, pt, ret_err);
+
+ if (ret_err) {
+ while (pt) { /* release already registered ports */
+ pt--;
+ release_port(hc, hc->chan[(pt << 2) + 2].dch);
+ }
+ goto free_card;
+ }
+ Port_cnt++;
+ }
+
+ /* disp switches */
+ switch (m->dip_type) {
+ case DIP_4S:
+ /*
+ * get DIP Setting for beroNet 1S/2S/4S cards
+ * check if Port Jumper config matches
+ * module param 'protocol'
+ * DIP Setting: (collect GPIO 13/14/15 (R_GPIO_IN1) +
+ * GPI 19/23 (R_GPI_IN2))
+ */
+ dips = ((~HFC_inb(hc, R_GPIO_IN1) & 0xE0) >> 5) |
+ ((~HFC_inb(hc, R_GPI_IN2) & 0x80) >> 3) |
+ (~HFC_inb(hc, R_GPI_IN2) & 0x08);
+
+ /* Port mode (TE/NT) jumpers */
+ pmj = ((HFC_inb(hc, R_GPI_IN3) >> 4) & 0xf);
+
+ if (test_bit(HFC_CHIP_B410P, &hc->chip))
+ pmj = ~pmj & 0xf;
+
+ printk(KERN_INFO "%s: %s DIPs(0x%x) jumpers(0x%x)\n",
+ m->vendor_name, m->card_name, dips, pmj);
+ break;
+ case DIP_8S:
+ /*
+ * get DIP Setting for beroNet 8S0+ cards
+ *
+ * enable PCI auxbridge function
+ */
+ HFC_outb(hc, R_BRG_PCM_CFG, 1 | V_PCM_CLK);
+ /* prepare access to auxport */
+ outw(0x4000, hc->pci_iobase + 4);
+ /*
+ * some dummy reads are required to
+ * read valid DIP switch data
+ */
+ dips = inb(hc->pci_iobase);
+ dips = inb(hc->pci_iobase);
+ dips = inb(hc->pci_iobase);
+ dips = ~inb(hc->pci_iobase) & 0x3F;
+ outw(0x0, hc->pci_iobase + 4);
+ /* disable PCI auxbridge function */
+ HFC_outb(hc, R_BRG_PCM_CFG, V_PCM_CLK);
+ printk(KERN_INFO "%s: %s DIPs(0x%x)\n",
+ m->vendor_name, m->card_name, dips);
+ break;
+ case DIP_E1:
+ /*
+ * get DIP Setting for beroNet E1 cards
+ * DIP Setting: collect GPI 4/5/6/7 (R_GPI_IN0)
+ */
+ dips = (~HFC_inb(hc, R_GPI_IN0) & 0xF0)>>4;
+ printk(KERN_INFO "%s: %s DIPs(0x%x)\n",
+ m->vendor_name, m->card_name, dips);
+ break;
+ }
+
+ /* add to list */
+ spin_lock_irqsave(&HFClock, flags);
+ list_add_tail(&hc->list, &HFClist);
+ spin_unlock_irqrestore(&HFClock, flags);
+
+ /* initialize hardware */
+ ret_err = init_card(hc);
+ if (ret_err) {
+ printk(KERN_ERR "init card returns %d\n", ret_err);
+ release_card(hc);
+ return ret_err;
+ }
+
+ /* start IRQ and return */
+ spin_lock_irqsave(&hc->lock, flags);
+ enable_hwirq(hc);
+ spin_unlock_irqrestore(&hc->lock, flags);
+ return 0;
+
+free_card:
+ release_io_hfcmulti(hc);
+ if (hc == syncmaster)
+ syncmaster = NULL;
+ kfree(hc);
+ return ret_err;
+}
+
+static void __devexit hfc_remove_pci(struct pci_dev *pdev)
+{
+ struct hfc_multi *card = pci_get_drvdata(pdev);
+ u_long flags;
+
+ if (debug)
+ printk(KERN_INFO "removing hfc_multi card vendor:%x "
+ "device:%x subvendor:%x subdevice:%x\n",
+ pdev->vendor, pdev->device,
+ pdev->subsystem_vendor, pdev->subsystem_device);
+
+ if (card) {
+ spin_lock_irqsave(&HFClock, flags);
+ release_card(card);
+ spin_unlock_irqrestore(&HFClock, flags);
+ } else {
+ if (debug)
+ printk(KERN_WARNING "%s: drvdata allready removed\n",
+ __func__);
+ }
+}
+
+#define VENDOR_CCD "Cologne Chip AG"
+#define VENDOR_BN "beroNet GmbH"
+#define VENDOR_DIG "Digium Inc."
+#define VENDOR_JH "Junghanns.NET GmbH"
+#define VENDOR_PRIM "PrimuX"
+
+static const struct hm_map hfcm_map[] = {
+/*0*/ {VENDOR_BN, "HFC-1S Card (mini PCI)", 4, 1, 1, 3, 0, DIP_4S, 0},
+/*1*/ {VENDOR_BN, "HFC-2S Card", 4, 2, 1, 3, 0, DIP_4S},
+/*2*/ {VENDOR_BN, "HFC-2S Card (mini PCI)", 4, 2, 1, 3, 0, DIP_4S, 0},
+/*3*/ {VENDOR_BN, "HFC-4S Card", 4, 4, 1, 2, 0, DIP_4S, 0},
+/*4*/ {VENDOR_BN, "HFC-4S Card (mini PCI)", 4, 4, 1, 2, 0, 0, 0},
+/*5*/ {VENDOR_CCD, "HFC-4S Eval (old)", 4, 4, 0, 0, 0, 0, 0},
+/*6*/ {VENDOR_CCD, "HFC-4S IOB4ST", 4, 4, 1, 2, 0, 0, 0},
+/*7*/ {VENDOR_CCD, "HFC-4S", 4, 4, 1, 2, 0, 0, 0},
+/*8*/ {VENDOR_DIG, "HFC-4S Card", 4, 4, 0, 2, 0, 0, HFC_IO_MODE_REGIO},
+/*9*/ {VENDOR_CCD, "HFC-4S Swyx 4xS0 SX2 QuadBri", 4, 4, 1, 2, 0, 0, 0},
+/*10*/ {VENDOR_JH, "HFC-4S (junghanns 2.0)", 4, 4, 1, 2, 0, 0, 0},
+/*11*/ {VENDOR_PRIM, "HFC-2S Primux Card", 4, 2, 0, 0, 0, 0, 0},
+
+/*12*/ {VENDOR_BN, "HFC-8S Card", 8, 8, 1, 0, 0, 0, 0},
+/*13*/ {VENDOR_BN, "HFC-8S Card (+)", 8, 8, 1, 8, 0, DIP_8S,
+ HFC_IO_MODE_REGIO},
+/*14*/ {VENDOR_CCD, "HFC-8S Eval (old)", 8, 8, 0, 0, 0, 0, 0},
+/*15*/ {VENDOR_CCD, "HFC-8S IOB4ST Recording", 8, 8, 1, 0, 0, 0, 0},
+
+/*16*/ {VENDOR_CCD, "HFC-8S IOB8ST", 8, 8, 1, 0, 0, 0, 0},
+/*17*/ {VENDOR_CCD, "HFC-8S", 8, 8, 1, 0, 0, 0, 0},
+/*18*/ {VENDOR_CCD, "HFC-8S", 8, 8, 1, 0, 0, 0, 0},
+
+/*19*/ {VENDOR_BN, "HFC-E1 Card", 1, 1, 0, 1, 0, DIP_E1, 0},
+/*20*/ {VENDOR_BN, "HFC-E1 Card (mini PCI)", 1, 1, 0, 1, 0, 0, 0},
+/*21*/ {VENDOR_BN, "HFC-E1+ Card (Dual)", 1, 1, 0, 1, 0, DIP_E1, 0},
+/*22*/ {VENDOR_BN, "HFC-E1 Card (Dual)", 1, 1, 0, 1, 0, DIP_E1, 0},
+
+/*23*/ {VENDOR_CCD, "HFC-E1 Eval (old)", 1, 1, 0, 0, 0, 0, 0},
+/*24*/ {VENDOR_CCD, "HFC-E1 IOB1E1", 1, 1, 0, 1, 0, 0, 0},
+/*25*/ {VENDOR_CCD, "HFC-E1", 1, 1, 0, 1, 0, 0, 0},
+
+/*26*/ {VENDOR_CCD, "HFC-4S Speech Design", 4, 4, 0, 0, 0, 0,
+ HFC_IO_MODE_PLXSD},
+/*27*/ {VENDOR_CCD, "HFC-E1 Speech Design", 1, 1, 0, 0, 0, 0,
+ HFC_IO_MODE_PLXSD},
+/*28*/ {VENDOR_CCD, "HFC-4S OpenVox", 4, 4, 1, 0, 0, 0, 0},
+/*29*/ {VENDOR_CCD, "HFC-2S OpenVox", 4, 2, 1, 0, 0, 0, 0},
+/*30*/ {VENDOR_CCD, "HFC-8S OpenVox", 8, 8, 1, 0, 0, 0, 0},
+};
+
+#undef H
+#define H(x) ((unsigned long)&hfcm_map[x])
+static struct pci_device_id hfmultipci_ids[] __devinitdata = {
+
+ /* Cards with HFC-4S Chip */
+ { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC4S, PCI_VENDOR_ID_CCD,
+ PCI_SUBDEVICE_ID_CCD_BN1SM, 0, 0, H(0)}, /* BN1S mini PCI */
+ { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC4S, PCI_VENDOR_ID_CCD,
+ PCI_SUBDEVICE_ID_CCD_BN2S, 0, 0, H(1)}, /* BN2S */
+ { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC4S, PCI_VENDOR_ID_CCD,
+ PCI_SUBDEVICE_ID_CCD_BN2SM, 0, 0, H(2)}, /* BN2S mini PCI */
+ { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC4S, PCI_VENDOR_ID_CCD,
+ PCI_SUBDEVICE_ID_CCD_BN4S, 0, 0, H(3)}, /* BN4S */
+ { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC4S, PCI_VENDOR_ID_CCD,
+ PCI_SUBDEVICE_ID_CCD_BN4SM, 0, 0, H(4)}, /* BN4S mini PCI */
+ { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC4S, PCI_VENDOR_ID_CCD,
+ PCI_DEVICE_ID_CCD_HFC4S, 0, 0, H(5)}, /* Old Eval */
+ { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC4S, PCI_VENDOR_ID_CCD,
+ PCI_SUBDEVICE_ID_CCD_IOB4ST, 0, 0, H(6)}, /* IOB4ST */
+ { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC4S, PCI_VENDOR_ID_CCD,
+ PCI_SUBDEVICE_ID_CCD_HFC4S, 0, 0, H(7)}, /* 4S */
+ { PCI_VENDOR_ID_DIGIUM, PCI_DEVICE_ID_DIGIUM_HFC4S,
+ PCI_VENDOR_ID_DIGIUM, PCI_DEVICE_ID_DIGIUM_HFC4S, 0, 0, H(8)},
+ { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC4S, PCI_VENDOR_ID_CCD,
+ PCI_SUBDEVICE_ID_CCD_SWYX4S, 0, 0, H(9)}, /* 4S Swyx */
+ { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC4S, PCI_VENDOR_ID_CCD,
+ PCI_SUBDEVICE_ID_CCD_JH4S20, 0, 0, H(10)},
+ { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC4S, PCI_VENDOR_ID_CCD,
+ PCI_SUBDEVICE_ID_CCD_PMX2S, 0, 0, H(11)}, /* Primux */
+ { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC4S, PCI_VENDOR_ID_CCD,
+ PCI_SUBDEVICE_ID_CCD_OV4S, 0, 0, H(28)}, /* OpenVox 4 */
+ { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC4S, PCI_VENDOR_ID_CCD,
+ PCI_SUBDEVICE_ID_CCD_OV2S, 0, 0, H(29)}, /* OpenVox 2 */
+
+ /* Cards with HFC-8S Chip */
+ { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC8S, PCI_VENDOR_ID_CCD,
+ PCI_SUBDEVICE_ID_CCD_BN8S, 0, 0, H(12)}, /* BN8S */
+ { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC8S, PCI_VENDOR_ID_CCD,
+ PCI_SUBDEVICE_ID_CCD_BN8SP, 0, 0, H(13)}, /* BN8S+ */
+ { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC8S, PCI_VENDOR_ID_CCD,
+ PCI_DEVICE_ID_CCD_HFC8S, 0, 0, H(14)}, /* old Eval */
+ { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC8S, PCI_VENDOR_ID_CCD,
+ PCI_SUBDEVICE_ID_CCD_IOB8STR, 0, 0, H(15)},
+ /* IOB8ST Recording */
+ { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC8S, PCI_VENDOR_ID_CCD,
+ PCI_SUBDEVICE_ID_CCD_IOB8ST, 0, 0, H(16)}, /* IOB8ST */
+ { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC8S, PCI_VENDOR_ID_CCD,
+ PCI_SUBDEVICE_ID_CCD_IOB8ST_1, 0, 0, H(17)}, /* IOB8ST */
+ { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC8S, PCI_VENDOR_ID_CCD,
+ PCI_SUBDEVICE_ID_CCD_HFC8S, 0, 0, H(18)}, /* 8S */
+ { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC8S, PCI_VENDOR_ID_CCD,
+ PCI_SUBDEVICE_ID_CCD_OV8S, 0, 0, H(30)}, /* OpenVox 8 */
+
+
+ /* Cards with HFC-E1 Chip */
+ { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFCE1, PCI_VENDOR_ID_CCD,
+ PCI_SUBDEVICE_ID_CCD_BNE1, 0, 0, H(19)}, /* BNE1 */
+ { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFCE1, PCI_VENDOR_ID_CCD,
+ PCI_SUBDEVICE_ID_CCD_BNE1M, 0, 0, H(20)}, /* BNE1 mini PCI */
+ { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFCE1, PCI_VENDOR_ID_CCD,
+ PCI_SUBDEVICE_ID_CCD_BNE1DP, 0, 0, H(21)}, /* BNE1 + (Dual) */
+ { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFCE1, PCI_VENDOR_ID_CCD,
+ PCI_SUBDEVICE_ID_CCD_BNE1D, 0, 0, H(22)}, /* BNE1 (Dual) */
+
+ { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFCE1, PCI_VENDOR_ID_CCD,
+ PCI_DEVICE_ID_CCD_HFCE1, 0, 0, H(23)}, /* Old Eval */
+ { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFCE1, PCI_VENDOR_ID_CCD,
+ PCI_SUBDEVICE_ID_CCD_IOB1E1, 0, 0, H(24)}, /* IOB1E1 */
+ { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFCE1, PCI_VENDOR_ID_CCD,
+ PCI_SUBDEVICE_ID_CCD_HFCE1, 0, 0, H(25)}, /* E1 */
+
+ { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, PCI_VENDOR_ID_CCD,
+ PCI_SUBDEVICE_ID_CCD_SPD4S, 0, 0, H(26)}, /* PLX PCI Bridge */
+ { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, PCI_VENDOR_ID_CCD,
+ PCI_SUBDEVICE_ID_CCD_SPDE1, 0, 0, H(27)}, /* PLX PCI Bridge */
+ { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC4S, PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0, 0},
+ { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC8S, PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0, 0},
+ { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFCE1, PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0, 0},
+ {0, }
+};
+#undef H
+
+MODULE_DEVICE_TABLE(pci, hfmultipci_ids);
+
+static int
+hfcmulti_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct hm_map *m = (struct hm_map *)ent->driver_data;
+ int ret;
+
+ if (m == NULL) {
+ if (ent->vendor == PCI_VENDOR_ID_CCD)
+ if (ent->device == PCI_DEVICE_ID_CCD_HFC4S ||
+ ent->device == PCI_DEVICE_ID_CCD_HFC8S ||
+ ent->device == PCI_DEVICE_ID_CCD_HFCE1)
+ printk(KERN_ERR
+ "unknown HFC multiport controller "
+ "(vendor:%x device:%x subvendor:%x "
+ "subdevice:%x) Please contact the "
+ "driver maintainer for support.\n",
+ ent->vendor, ent->device,
+ ent->subvendor, ent->subdevice);
+ return -ENODEV;
+ }
+ ret = hfcmulti_init(pdev, ent);
+ if (ret)
+ return ret;
+ HFC_cnt++;
+ printk(KERN_INFO "%d devices registered\n", HFC_cnt);
+ return 0;
+}
+
+static struct pci_driver hfcmultipci_driver = {
+ .name = "hfc_multi",
+ .probe = hfcmulti_probe,
+ .remove = __devexit_p(hfc_remove_pci),
+ .id_table = hfmultipci_ids,
+};
+
+static void __exit
+HFCmulti_cleanup(void)
+{
+ struct hfc_multi *card, *next;
+
+ /* unload interrupt function symbol */
+ if (hfc_interrupt)
+ symbol_put(ztdummy_extern_interrupt);
+ if (register_interrupt)
+ symbol_put(ztdummy_register_interrupt);
+ if (unregister_interrupt) {
+ if (interrupt_registered) {
+ interrupt_registered = 0;
+ unregister_interrupt();
+ }
+ symbol_put(ztdummy_unregister_interrupt);
+ }
+
+ list_for_each_entry_safe(card, next, &HFClist, list)
+ release_card(card);
+ /* get rid of all devices of this driver */
+ pci_unregister_driver(&hfcmultipci_driver);
+}
+
+static int __init
+HFCmulti_init(void)
+{
+ int err;
+
+#ifdef IRQ_DEBUG
+ printk(KERN_ERR "%s: IRQ_DEBUG IS ENABLED!\n", __func__);
+#endif
+
+ spin_lock_init(&HFClock);
+ spin_lock_init(&plx_lock);
+
+ if (debug & DEBUG_HFCMULTI_INIT)
+ printk(KERN_DEBUG "%s: init entered\n", __func__);
+
+#ifdef __BIG_ENDIAN
+#error "not running on big endian machines now"
+#endif
+ hfc_interrupt = symbol_get(ztdummy_extern_interrupt);
+ register_interrupt = symbol_get(ztdummy_register_interrupt);
+ unregister_interrupt = symbol_get(ztdummy_unregister_interrupt);
+ printk(KERN_INFO "mISDN: HFC-multi driver %s\n",
+ hfcmulti_revision);
+
+ switch (poll) {
+ case 0:
+ poll_timer = 6;
+ poll = 128;
+ break;
+ /*
+ * wenn dieses break nochmal verschwindet,
+ * gibt es heisse ohren :-)
+ * "without the break you will get hot ears ???"
+ */
+ case 8:
+ poll_timer = 2;
+ break;
+ case 16:
+ poll_timer = 3;
+ break;
+ case 32:
+ poll_timer = 4;
+ break;
+ case 64:
+ poll_timer = 5;
+ break;
+ case 128:
+ poll_timer = 6;
+ break;
+ case 256:
+ poll_timer = 7;
+ break;
+ default:
+ printk(KERN_ERR
+ "%s: Wrong poll value (%d).\n", __func__, poll);
+ err = -EINVAL;
+ return err;
+
+ }
+
+ err = pci_register_driver(&hfcmultipci_driver);
+ if (err < 0) {
+ printk(KERN_ERR "error registering pci driver: %x\n", err);
+ if (hfc_interrupt)
+ symbol_put(ztdummy_extern_interrupt);
+ if (register_interrupt)
+ symbol_put(ztdummy_register_interrupt);
+ if (unregister_interrupt) {
+ if (interrupt_registered) {
+ interrupt_registered = 0;
+ unregister_interrupt();
+ }
+ symbol_put(ztdummy_unregister_interrupt);
+ }
+ return err;
+ }
+ return 0;
+}
+
+
+module_init(HFCmulti_init);
+module_exit(HFCmulti_cleanup);
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
new file mode 100644
index 000000000000..917968530e1e
--- /dev/null
+++ b/drivers/isdn/hardware/mISDN/hfcpci.c
@@ -0,0 +1,2256 @@
+/*
+ *
+ * hfcpci.c low level driver for CCD's hfc-pci based cards
+ *
+ * Author Werner Cornelius (werner@isdn4linux.de)
+ * based on existing driver for CCD hfc ISA cards
+ * type approval valid for HFC-S PCI A based card
+ *
+ * Copyright 1999 by Werner Cornelius (werner@isdn-development.de)
+ * Copyright 2008 by Karsten Keil <kkeil@novell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/mISDNhw.h>
+
+#include "hfc_pci.h"
+
+static const char *hfcpci_revision = "2.0";
+
+#define MAX_CARDS 8
+static int HFC_cnt;
+static uint debug;
+
+MODULE_AUTHOR("Karsten Keil");
+MODULE_LICENSE("GPL");
+module_param(debug, uint, 0);
+
+static LIST_HEAD(HFClist);
+DEFINE_RWLOCK(HFClock);
+
+enum {
+ HFC_CCD_2BD0,
+ HFC_CCD_B000,
+ HFC_CCD_B006,
+ HFC_CCD_B007,
+ HFC_CCD_B008,
+ HFC_CCD_B009,
+ HFC_CCD_B00A,
+ HFC_CCD_B00B,
+ HFC_CCD_B00C,
+ HFC_CCD_B100,
+ HFC_CCD_B700,
+ HFC_CCD_B701,
+ HFC_ASUS_0675,
+ HFC_BERKOM_A1T,
+ HFC_BERKOM_TCONCEPT,
+ HFC_ANIGMA_MC145575,
+ HFC_ZOLTRIX_2BD0,
+ HFC_DIGI_DF_M_IOM2_E,
+ HFC_DIGI_DF_M_E,
+ HFC_DIGI_DF_M_IOM2_A,
+ HFC_DIGI_DF_M_A,
+ HFC_ABOCOM_2BD1,
+ HFC_SITECOM_DC105V2,
+};
+
+struct hfcPCI_hw {
+ unsigned char cirm;
+ unsigned char ctmt;
+ unsigned char clkdel;
+ unsigned char states;
+ unsigned char conn;
+ unsigned char mst_m;
+ unsigned char int_m1;
+ unsigned char int_m2;
+ unsigned char sctrl;
+ unsigned char sctrl_r;
+ unsigned char sctrl_e;
+ unsigned char trm;
+ unsigned char fifo_en;
+ unsigned char bswapped;
+ unsigned char protocol;
+ int nt_timer;
+ unsigned char *pci_io; /* start of PCI IO memory */
+ dma_addr_t dmahandle;
+ void *fifos; /* FIFO memory */
+ int last_bfifo_cnt[2];
+ /* marker saving last b-fifo frame count */
+ struct timer_list timer;
+};
+
+#define HFC_CFG_MASTER 1
+#define HFC_CFG_SLAVE 2
+#define HFC_CFG_PCM 3
+#define HFC_CFG_2HFC 4
+#define HFC_CFG_SLAVEHFC 5
+#define HFC_CFG_NEG_F0 6
+#define HFC_CFG_SW_DD_DU 7
+
+#define FLG_HFC_TIMER_T1 16
+#define FLG_HFC_TIMER_T3 17
+
+#define NT_T1_COUNT 1120 /* number of 3.125ms interrupts (3.5s) */
+#define NT_T3_COUNT 31 /* number of 3.125ms interrupts (97 ms) */
+#define CLKDEL_TE 0x0e /* CLKDEL in TE mode */
+#define CLKDEL_NT 0x6c /* CLKDEL in NT mode */
+
+
+struct hfc_pci {
+ struct list_head list;
+ u_char subtype;
+ u_char chanlimit;
+ u_char initdone;
+ u_long cfg;
+ u_int irq;
+ u_int irqcnt;
+ struct pci_dev *pdev;
+ struct hfcPCI_hw hw;
+ spinlock_t lock; /* card lock */
+ struct dchannel dch;
+ struct bchannel bch[2];
+};
+
+/* Interface functions */
+static void
+enable_hwirq(struct hfc_pci *hc)
+{
+ hc->hw.int_m2 |= HFCPCI_IRQ_ENABLE;
+ Write_hfc(hc, HFCPCI_INT_M2, hc->hw.int_m2);
+}
+
+static void
+disable_hwirq(struct hfc_pci *hc)
+{
+ hc->hw.int_m2 &= ~((u_char)HFCPCI_IRQ_ENABLE);
+ Write_hfc(hc, HFCPCI_INT_M2, hc->hw.int_m2);
+}
+
+/*
+ * free hardware resources used by driver
+ */
+static void
+release_io_hfcpci(struct hfc_pci *hc)
+{
+ /* disable memory mapped ports + busmaster */
+ pci_write_config_word(hc->pdev, PCI_COMMAND, 0);
+ del_timer(&hc->hw.timer);
+ pci_free_consistent(hc->pdev, 0x8000, hc->hw.fifos, hc->hw.dmahandle);
+ iounmap((void *)hc->hw.pci_io);
+}
+
+/*
+ * set mode (NT or TE)
+ */
+static void
+hfcpci_setmode(struct hfc_pci *hc)
+{
+ if (hc->hw.protocol == ISDN_P_NT_S0) {
+ hc->hw.clkdel = CLKDEL_NT; /* ST-Bit delay for NT-Mode */
+ hc->hw.sctrl |= SCTRL_MODE_NT; /* NT-MODE */
+ hc->hw.states = 1; /* G1 */
+ } else {
+ hc->hw.clkdel = CLKDEL_TE; /* ST-Bit delay for TE-Mode */
+ hc->hw.sctrl &= ~SCTRL_MODE_NT; /* TE-MODE */
+ hc->hw.states = 2; /* F2 */
+ }
+ Write_hfc(hc, HFCPCI_CLKDEL, hc->hw.clkdel);
+ Write_hfc(hc, HFCPCI_STATES, HFCPCI_LOAD_STATE | hc->hw.states);
+ udelay(10);
+ Write_hfc(hc, HFCPCI_STATES, hc->hw.states | 0x40); /* Deactivate */
+ Write_hfc(hc, HFCPCI_SCTRL, hc->hw.sctrl);
+}
+
+/*
+ * function called to reset the HFC PCI chip. A complete software reset of chip
+ * and fifos is done.
+ */
+static void
+reset_hfcpci(struct hfc_pci *hc)
+{
+ u_char val;
+ int cnt = 0;
+
+ printk(KERN_DEBUG "reset_hfcpci: entered\n");
+ val = Read_hfc(hc, HFCPCI_CHIP_ID);
+ printk(KERN_INFO "HFC_PCI: resetting HFC ChipId(%x)\n", val);
+ /* enable memory mapped ports, disable busmaster */
+ pci_write_config_word(hc->pdev, PCI_COMMAND, PCI_ENA_MEMIO);
+ disable_hwirq(hc);
+ /* enable memory ports + busmaster */
+ pci_write_config_word(hc->pdev, PCI_COMMAND,
+ PCI_ENA_MEMIO + PCI_ENA_MASTER);
+ val = Read_hfc(hc, HFCPCI_STATUS);
+ printk(KERN_DEBUG "HFC-PCI status(%x) before reset\n", val);
+ hc->hw.cirm = HFCPCI_RESET; /* Reset On */
+ Write_hfc(hc, HFCPCI_CIRM, hc->hw.cirm);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ mdelay(10); /* Timeout 10ms */
+ hc->hw.cirm = 0; /* Reset Off */
+ Write_hfc(hc, HFCPCI_CIRM, hc->hw.cirm);
+ val = Read_hfc(hc, HFCPCI_STATUS);
+ printk(KERN_DEBUG "HFC-PCI status(%x) after reset\n", val);
+ while (cnt < 50000) { /* max 50000 us */
+ udelay(5);
+ cnt += 5;
+ val = Read_hfc(hc, HFCPCI_STATUS);
+ if (!(val & 2))
+ break;
+ }
+ printk(KERN_DEBUG "HFC-PCI status(%x) after %dus\n", val, cnt);
+
+ hc->hw.fifo_en = 0x30; /* only D fifos enabled */
+
+ hc->hw.bswapped = 0; /* no exchange */
+ hc->hw.ctmt = HFCPCI_TIM3_125 | HFCPCI_AUTO_TIMER;
+ hc->hw.trm = HFCPCI_BTRANS_THRESMASK; /* no echo connect , threshold */
+ hc->hw.sctrl = 0x40; /* set tx_lo mode, error in datasheet ! */
+ hc->hw.sctrl_r = 0;
+ hc->hw.sctrl_e = HFCPCI_AUTO_AWAKE; /* S/T Auto awake */
+ hc->hw.mst_m = 0;
+ if (test_bit(HFC_CFG_MASTER, &hc->cfg))
+ hc->hw.mst_m |= HFCPCI_MASTER; /* HFC Master Mode */
+ if (test_bit(HFC_CFG_NEG_F0, &hc->cfg))
+ hc->hw.mst_m |= HFCPCI_F0_NEGATIV;
+ Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
+ Write_hfc(hc, HFCPCI_TRM, hc->hw.trm);
+ Write_hfc(hc, HFCPCI_SCTRL_E, hc->hw.sctrl_e);
+ Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt);
+
+ hc->hw.int_m1 = HFCPCI_INTS_DTRANS | HFCPCI_INTS_DREC |
+ HFCPCI_INTS_L1STATE | HFCPCI_INTS_TIMER;
+ Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
+
+ /* Clear already pending ints */
+ if (Read_hfc(hc, HFCPCI_INT_S1));
+
+ /* set NT/TE mode */
+ hfcpci_setmode(hc);
+
+ Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
+ Write_hfc(hc, HFCPCI_SCTRL_R, hc->hw.sctrl_r);
+
+ /*
+ * Init GCI/IOM2 in master mode
+ * Slots 0 and 1 are set for B-chan 1 and 2
+ * D- and monitor/CI channel are not enabled
+ * STIO1 is used as output for data, B1+B2 from ST->IOM+HFC
+ * STIO2 is used as data input, B1+B2 from IOM->ST
+ * ST B-channel send disabled -> continous 1s
+ * The IOM slots are always enabled
+ */
+ if (test_bit(HFC_CFG_PCM, &hc->cfg)) {
+ /* set data flow directions: connect B1,B2: HFC to/from PCM */
+ hc->hw.conn = 0x09;
+ } else {
+ hc->hw.conn = 0x36; /* set data flow directions */
+ if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg)) {
+ Write_hfc(hc, HFCPCI_B1_SSL, 0xC0);
+ Write_hfc(hc, HFCPCI_B2_SSL, 0xC1);
+ Write_hfc(hc, HFCPCI_B1_RSL, 0xC0);
+ Write_hfc(hc, HFCPCI_B2_RSL, 0xC1);
+ } else {
+ Write_hfc(hc, HFCPCI_B1_SSL, 0x80);
+ Write_hfc(hc, HFCPCI_B2_SSL, 0x81);
+ Write_hfc(hc, HFCPCI_B1_RSL, 0x80);
+ Write_hfc(hc, HFCPCI_B2_RSL, 0x81);
+ }
+ }
+ Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
+ val = Read_hfc(hc, HFCPCI_INT_S2);
+}
+
+/*
+ * Timer function called when kernel timer expires
+ */
+static void
+hfcpci_Timer(struct hfc_pci *hc)
+{
+ hc->hw.timer.expires = jiffies + 75;
+ /* WD RESET */
+/*
+ * WriteReg(hc, HFCD_DATA, HFCD_CTMT, hc->hw.ctmt | 0x80);
+ * add_timer(&hc->hw.timer);
+ */
+}
+
+
+/*
+ * select a b-channel entry matching and active
+ */
+static struct bchannel *
+Sel_BCS(struct hfc_pci *hc, int channel)
+{
+ if (test_bit(FLG_ACTIVE, &hc->bch[0].Flags) &&
+ (hc->bch[0].nr & channel))
+ return &hc->bch[0];
+ else if (test_bit(FLG_ACTIVE, &hc->bch[1].Flags) &&
+ (hc->bch[1].nr & channel))
+ return &hc->bch[1];
+ else
+ return NULL;
+}
+
+/*
+ * clear the desired B-channel rx fifo
+ */
+static void
+hfcpci_clear_fifo_rx(struct hfc_pci *hc, int fifo)
+{
+ u_char fifo_state;
+ struct bzfifo *bzr;
+
+ if (fifo) {
+ bzr = &((union fifo_area *)(hc->hw.fifos))->b_chans.rxbz_b2;
+ fifo_state = hc->hw.fifo_en & HFCPCI_FIFOEN_B2RX;
+ } else {
+ bzr = &((union fifo_area *)(hc->hw.fifos))->b_chans.rxbz_b1;
+ fifo_state = hc->hw.fifo_en & HFCPCI_FIFOEN_B1RX;
+ }
+ if (fifo_state)
+ hc->hw.fifo_en ^= fifo_state;
+ Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
+ hc->hw.last_bfifo_cnt[fifo] = 0;
+ bzr->f1 = MAX_B_FRAMES;
+ bzr->f2 = bzr->f1; /* init F pointers to remain constant */
+ bzr->za[MAX_B_FRAMES].z1 = cpu_to_le16(B_FIFO_SIZE + B_SUB_VAL - 1);
+ bzr->za[MAX_B_FRAMES].z2 = cpu_to_le16(
+ le16_to_cpu(bzr->za[MAX_B_FRAMES].z1));
+ if (fifo_state)
+ hc->hw.fifo_en |= fifo_state;
+ Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
+}
+
+/*
+ * clear the desired B-channel tx fifo
+ */
+static void hfcpci_clear_fifo_tx(struct hfc_pci *hc, int fifo)
+{
+ u_char fifo_state;
+ struct bzfifo *bzt;
+
+ if (fifo) {
+ bzt = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b2;
+ fifo_state = hc->hw.fifo_en & HFCPCI_FIFOEN_B2TX;
+ } else {
+ bzt = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b1;
+ fifo_state = hc->hw.fifo_en & HFCPCI_FIFOEN_B1TX;
+ }
+ if (fifo_state)
+ hc->hw.fifo_en ^= fifo_state;
+ Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
+ if (hc->bch[fifo].debug & DEBUG_HW_BCHANNEL)
+ printk(KERN_DEBUG "hfcpci_clear_fifo_tx%d f1(%x) f2(%x) "
+ "z1(%x) z2(%x) state(%x)\n",
+ fifo, bzt->f1, bzt->f2,
+ le16_to_cpu(bzt->za[MAX_B_FRAMES].z1),
+ le16_to_cpu(bzt->za[MAX_B_FRAMES].z2),
+ fifo_state);
+ bzt->f2 = MAX_B_FRAMES;
+ bzt->f1 = bzt->f2; /* init F pointers to remain constant */
+ bzt->za[MAX_B_FRAMES].z1 = cpu_to_le16(B_FIFO_SIZE + B_SUB_VAL - 1);
+ bzt->za[MAX_B_FRAMES].z2 = cpu_to_le16(
+ le16_to_cpu(bzt->za[MAX_B_FRAMES].z1 - 1));
+ if (fifo_state)
+ hc->hw.fifo_en |= fifo_state;
+ Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
+ if (hc->bch[fifo].debug & DEBUG_HW_BCHANNEL)
+ printk(KERN_DEBUG
+ "hfcpci_clear_fifo_tx%d f1(%x) f2(%x) z1(%x) z2(%x)\n",
+ fifo, bzt->f1, bzt->f2,
+ le16_to_cpu(bzt->za[MAX_B_FRAMES].z1),
+ le16_to_cpu(bzt->za[MAX_B_FRAMES].z2));
+}
+
+/*
+ * read a complete B-frame out of the buffer
+ */
+static void
+hfcpci_empty_bfifo(struct bchannel *bch, struct bzfifo *bz,
+ u_char *bdata, int count)
+{
+ u_char *ptr, *ptr1, new_f2;
+ int total, maxlen, new_z2;
+ struct zt *zp;
+
+ if ((bch->debug & DEBUG_HW_BCHANNEL) && !(bch->debug & DEBUG_HW_BFIFO))
+ printk(KERN_DEBUG "hfcpci_empty_fifo\n");
+ zp = &bz->za[bz->f2]; /* point to Z-Regs */
+ new_z2 = le16_to_cpu(zp->z2) + count; /* new position in fifo */
+ if (new_z2 >= (B_FIFO_SIZE + B_SUB_VAL))
+ new_z2 -= B_FIFO_SIZE; /* buffer wrap */
+ new_f2 = (bz->f2 + 1) & MAX_B_FRAMES;
+ if ((count > MAX_DATA_SIZE + 3) || (count < 4) ||
+ (*(bdata + (le16_to_cpu(zp->z1) - B_SUB_VAL)))) {
+ if (bch->debug & DEBUG_HW)
+ printk(KERN_DEBUG "hfcpci_empty_fifo: incoming packet "
+ "invalid length %d or crc\n", count);
+#ifdef ERROR_STATISTIC
+ bch->err_inv++;
+#endif
+ bz->za[new_f2].z2 = cpu_to_le16(new_z2);
+ bz->f2 = new_f2; /* next buffer */
+ } else {
+ bch->rx_skb = mI_alloc_skb(count - 3, GFP_ATOMIC);
+ if (!bch->rx_skb) {
+ printk(KERN_WARNING "HFCPCI: receive out of memory\n");
+ return;
+ }
+ total = count;
+ count -= 3;
+ ptr = skb_put(bch->rx_skb, count);
+
+ if (le16_to_cpu(zp->z2) + count <= B_FIFO_SIZE + B_SUB_VAL)
+ maxlen = count; /* complete transfer */
+ else
+ maxlen = B_FIFO_SIZE + B_SUB_VAL -
+ le16_to_cpu(zp->z2); /* maximum */
+
+ ptr1 = bdata + (le16_to_cpu(zp->z2) - B_SUB_VAL);
+ /* start of data */
+ memcpy(ptr, ptr1, maxlen); /* copy data */
+ count -= maxlen;
+
+ if (count) { /* rest remaining */
+ ptr += maxlen;
+ ptr1 = bdata; /* start of buffer */
+ memcpy(ptr, ptr1, count); /* rest */
+ }
+ bz->za[new_f2].z2 = cpu_to_le16(new_z2);
+ bz->f2 = new_f2; /* next buffer */
+ recv_Bchannel(bch);
+ }
+}
+
+/*
+ * D-channel receive procedure
+ */
+static int
+receive_dmsg(struct hfc_pci *hc)
+{
+ struct dchannel *dch = &hc->dch;
+ int maxlen;
+ int rcnt, total;
+ int count = 5;
+ u_char *ptr, *ptr1;
+ struct dfifo *df;
+ struct zt *zp;
+
+ df = &((union fifo_area *)(hc->hw.fifos))->d_chan.d_rx;
+ while (((df->f1 & D_FREG_MASK) != (df->f2 & D_FREG_MASK)) && count--) {
+ zp = &df->za[df->f2 & D_FREG_MASK];
+ rcnt = le16_to_cpu(zp->z1) - le16_to_cpu(zp->z2);
+ if (rcnt < 0)
+ rcnt += D_FIFO_SIZE;
+ rcnt++;
+ if (dch->debug & DEBUG_HW_DCHANNEL)
+ printk(KERN_DEBUG
+ "hfcpci recd f1(%d) f2(%d) z1(%x) z2(%x) cnt(%d)\n",
+ df->f1, df->f2,
+ le16_to_cpu(zp->z1),
+ le16_to_cpu(zp->z2),
+ rcnt);
+
+ if ((rcnt > MAX_DFRAME_LEN + 3) || (rcnt < 4) ||
+ (df->data[le16_to_cpu(zp->z1)])) {
+ if (dch->debug & DEBUG_HW)
+ printk(KERN_DEBUG
+ "empty_fifo hfcpci paket inv. len "
+ "%d or crc %d\n",
+ rcnt,
+ df->data[le16_to_cpu(zp->z1)]);
+#ifdef ERROR_STATISTIC
+ cs->err_rx++;
+#endif
+ df->f2 = ((df->f2 + 1) & MAX_D_FRAMES) |
+ (MAX_D_FRAMES + 1); /* next buffer */
+ df->za[df->f2 & D_FREG_MASK].z2 =
+ cpu_to_le16((zp->z2 + rcnt) & (D_FIFO_SIZE - 1));
+ } else {
+ dch->rx_skb = mI_alloc_skb(rcnt - 3, GFP_ATOMIC);
+ if (!dch->rx_skb) {
+ printk(KERN_WARNING
+ "HFC-PCI: D receive out of memory\n");
+ break;
+ }
+ total = rcnt;
+ rcnt -= 3;
+ ptr = skb_put(dch->rx_skb, rcnt);
+
+ if (le16_to_cpu(zp->z2) + rcnt <= D_FIFO_SIZE)
+ maxlen = rcnt; /* complete transfer */
+ else
+ maxlen = D_FIFO_SIZE - le16_to_cpu(zp->z2);
+ /* maximum */
+
+ ptr1 = df->data + le16_to_cpu(zp->z2);
+ /* start of data */
+ memcpy(ptr, ptr1, maxlen); /* copy data */
+ rcnt -= maxlen;
+
+ if (rcnt) { /* rest remaining */
+ ptr += maxlen;
+ ptr1 = df->data; /* start of buffer */
+ memcpy(ptr, ptr1, rcnt); /* rest */
+ }
+ df->f2 = ((df->f2 + 1) & MAX_D_FRAMES) |
+ (MAX_D_FRAMES + 1); /* next buffer */
+ df->za[df->f2 & D_FREG_MASK].z2 = cpu_to_le16((
+ le16_to_cpu(zp->z2) + total) & (D_FIFO_SIZE - 1));
+ recv_Dchannel(dch);
+ }
+ }
+ return 1;
+}
+
+/*
+ * check for transparent receive data and read max one threshold size if avail
+ */
+int
+hfcpci_empty_fifo_trans(struct bchannel *bch, struct bzfifo *bz, u_char *bdata)
+{
+ unsigned short *z1r, *z2r;
+ int new_z2, fcnt, maxlen;
+ u_char *ptr, *ptr1;
+
+ z1r = &bz->za[MAX_B_FRAMES].z1; /* pointer to z reg */
+ z2r = z1r + 1;
+
+ fcnt = le16_to_cpu(*z1r) - le16_to_cpu(*z2r);
+ if (!fcnt)
+ return 0; /* no data avail */
+
+ if (fcnt <= 0)
+ fcnt += B_FIFO_SIZE; /* bytes actually buffered */
+ if (fcnt > HFCPCI_BTRANS_THRESHOLD)
+ fcnt = HFCPCI_BTRANS_THRESHOLD; /* limit size */
+
+ new_z2 = le16_to_cpu(*z2r) + fcnt; /* new position in fifo */
+ if (new_z2 >= (B_FIFO_SIZE + B_SUB_VAL))
+ new_z2 -= B_FIFO_SIZE; /* buffer wrap */
+
+ bch->rx_skb = mI_alloc_skb(fcnt, GFP_ATOMIC);
+ if (bch->rx_skb) {
+ ptr = skb_put(bch->rx_skb, fcnt);
+ if (le16_to_cpu(*z2r) + fcnt <= B_FIFO_SIZE + B_SUB_VAL)
+ maxlen = fcnt; /* complete transfer */
+ else
+ maxlen = B_FIFO_SIZE + B_SUB_VAL - le16_to_cpu(*z2r);
+ /* maximum */
+
+ ptr1 = bdata + (le16_to_cpu(*z2r) - B_SUB_VAL);
+ /* start of data */
+ memcpy(ptr, ptr1, maxlen); /* copy data */
+ fcnt -= maxlen;
+
+ if (fcnt) { /* rest remaining */
+ ptr += maxlen;
+ ptr1 = bdata; /* start of buffer */
+ memcpy(ptr, ptr1, fcnt); /* rest */
+ }
+ recv_Bchannel(bch);
+ } else
+ printk(KERN_WARNING "HFCPCI: receive out of memory\n");
+
+ *z2r = cpu_to_le16(new_z2); /* new position */
+ return 1;
+}
+
+/*
+ * B-channel main receive routine
+ */
+void
+main_rec_hfcpci(struct bchannel *bch)
+{
+ struct hfc_pci *hc = bch->hw;
+ int rcnt, real_fifo;
+ int receive, count = 5;
+ struct bzfifo *bz;
+ u_char *bdata;
+ struct zt *zp;
+
+
+ if ((bch->nr & 2) && (!hc->hw.bswapped)) {
+ bz = &((union fifo_area *)(hc->hw.fifos))->b_chans.rxbz_b2;
+ bdata = ((union fifo_area *)(hc->hw.fifos))->b_chans.rxdat_b2;
+ real_fifo = 1;
+ } else {
+ bz = &((union fifo_area *)(hc->hw.fifos))->b_chans.rxbz_b1;
+ bdata = ((union fifo_area *)(hc->hw.fifos))->b_chans.rxdat_b1;
+ real_fifo = 0;
+ }
+Begin:
+ count--;
+ if (bz->f1 != bz->f2) {
+ if (bch->debug & DEBUG_HW_BCHANNEL)
+ printk(KERN_DEBUG "hfcpci rec ch(%x) f1(%d) f2(%d)\n",
+ bch->nr, bz->f1, bz->f2);
+ zp = &bz->za[bz->f2];
+
+ rcnt = le16_to_cpu(zp->z1) - le16_to_cpu(zp->z2);
+ if (rcnt < 0)
+ rcnt += B_FIFO_SIZE;
+ rcnt++;
+ if (bch->debug & DEBUG_HW_BCHANNEL)
+ printk(KERN_DEBUG
+ "hfcpci rec ch(%x) z1(%x) z2(%x) cnt(%d)\n",
+ bch->nr, le16_to_cpu(zp->z1),
+ le16_to_cpu(zp->z2), rcnt);
+ hfcpci_empty_bfifo(bch, bz, bdata, rcnt);
+ rcnt = bz->f1 - bz->f2;
+ if (rcnt < 0)
+ rcnt += MAX_B_FRAMES + 1;
+ if (hc->hw.last_bfifo_cnt[real_fifo] > rcnt + 1) {
+ rcnt = 0;
+ hfcpci_clear_fifo_rx(hc, real_fifo);
+ }
+ hc->hw.last_bfifo_cnt[real_fifo] = rcnt;
+ if (rcnt > 1)
+ receive = 1;
+ else
+ receive = 0;
+ } else if (test_bit(FLG_TRANSPARENT, &bch->Flags))
+ receive = hfcpci_empty_fifo_trans(bch, bz, bdata);
+ else
+ receive = 0;
+ if (count && receive)
+ goto Begin;
+
+}
+
+/*
+ * D-channel send routine
+ */
+static void
+hfcpci_fill_dfifo(struct hfc_pci *hc)
+{
+ struct dchannel *dch = &hc->dch;
+ int fcnt;
+ int count, new_z1, maxlen;
+ struct dfifo *df;
+ u_char *src, *dst, new_f1;
+
+ if ((dch->debug & DEBUG_HW_DCHANNEL) && !(dch->debug & DEBUG_HW_DFIFO))
+ printk(KERN_DEBUG "%s\n", __func__);
+
+ if (!dch->tx_skb)
+ return;
+ count = dch->tx_skb->len - dch->tx_idx;
+ if (count <= 0)
+ return;
+ df = &((union fifo_area *) (hc->hw.fifos))->d_chan.d_tx;
+
+ if (dch->debug & DEBUG_HW_DFIFO)
+ printk(KERN_DEBUG "%s:f1(%d) f2(%d) z1(f1)(%x)\n", __func__,
+ df->f1, df->f2,
+ le16_to_cpu(df->za[df->f1 & D_FREG_MASK].z1));
+ fcnt = df->f1 - df->f2; /* frame count actually buffered */
+ if (fcnt < 0)
+ fcnt += (MAX_D_FRAMES + 1); /* if wrap around */
+ if (fcnt > (MAX_D_FRAMES - 1)) {
+ if (dch->debug & DEBUG_HW_DCHANNEL)
+ printk(KERN_DEBUG
+ "hfcpci_fill_Dfifo more as 14 frames\n");
+#ifdef ERROR_STATISTIC
+ cs->err_tx++;
+#endif
+ return;
+ }
+ /* now determine free bytes in FIFO buffer */
+ maxlen = le16_to_cpu(df->za[df->f2 & D_FREG_MASK].z2) -
+ le16_to_cpu(df->za[df->f1 & D_FREG_MASK].z1) - 1;
+ if (maxlen <= 0)
+ maxlen += D_FIFO_SIZE; /* count now contains available bytes */
+
+ if (dch->debug & DEBUG_HW_DCHANNEL)
+ printk(KERN_DEBUG "hfcpci_fill_Dfifo count(%d/%d)\n",
+ count, maxlen);
+ if (count > maxlen) {
+ if (dch->debug & DEBUG_HW_DCHANNEL)
+ printk(KERN_DEBUG "hfcpci_fill_Dfifo no fifo mem\n");
+ return;
+ }
+ new_z1 = (le16_to_cpu(df->za[df->f1 & D_FREG_MASK].z1) + count) &
+ (D_FIFO_SIZE - 1);
+ new_f1 = ((df->f1 + 1) & D_FREG_MASK) | (D_FREG_MASK + 1);
+ src = dch->tx_skb->data + dch->tx_idx; /* source pointer */
+ dst = df->data + le16_to_cpu(df->za[df->f1 & D_FREG_MASK].z1);
+ maxlen = D_FIFO_SIZE - le16_to_cpu(df->za[df->f1 & D_FREG_MASK].z1);
+ /* end fifo */
+ if (maxlen > count)
+ maxlen = count; /* limit size */
+ memcpy(dst, src, maxlen); /* first copy */
+
+ count -= maxlen; /* remaining bytes */
+ if (count) {
+ dst = df->data; /* start of buffer */
+ src += maxlen; /* new position */
+ memcpy(dst, src, count);
+ }
+ df->za[new_f1 & D_FREG_MASK].z1 = cpu_to_le16(new_z1);
+ /* for next buffer */
+ df->za[df->f1 & D_FREG_MASK].z1 = cpu_to_le16(new_z1);
+ /* new pos actual buffer */
+ df->f1 = new_f1; /* next frame */
+ dch->tx_idx = dch->tx_skb->len;
+}
+
+/*
+ * B-channel send routine
+ */
+static void
+hfcpci_fill_fifo(struct bchannel *bch)
+{
+ struct hfc_pci *hc = bch->hw;
+ int maxlen, fcnt;
+ int count, new_z1;
+ struct bzfifo *bz;
+ u_char *bdata;
+ u_char new_f1, *src, *dst;
+ unsigned short *z1t, *z2t;
+
+ if ((bch->debug & DEBUG_HW_BCHANNEL) && !(bch->debug & DEBUG_HW_BFIFO))
+ printk(KERN_DEBUG "%s\n", __func__);
+ if ((!bch->tx_skb) || bch->tx_skb->len <= 0)
+ return;
+ count = bch->tx_skb->len - bch->tx_idx;
+ if ((bch->nr & 2) && (!hc->hw.bswapped)) {
+ bz = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b2;
+ bdata = ((union fifo_area *)(hc->hw.fifos))->b_chans.txdat_b2;
+ } else {
+ bz = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b1;
+ bdata = ((union fifo_area *)(hc->hw.fifos))->b_chans.txdat_b1;
+ }
+
+ if (test_bit(FLG_TRANSPARENT, &bch->Flags)) {
+ z1t = &bz->za[MAX_B_FRAMES].z1;
+ z2t = z1t + 1;
+ if (bch->debug & DEBUG_HW_BCHANNEL)
+ printk(KERN_DEBUG "hfcpci_fill_fifo_trans ch(%x) "
+ "cnt(%d) z1(%x) z2(%x)\n", bch->nr, count,
+ le16_to_cpu(*z1t), le16_to_cpu(*z2t));
+ fcnt = le16_to_cpu(*z2t) - le16_to_cpu(*z1t);
+ if (fcnt <= 0)
+ fcnt += B_FIFO_SIZE;
+ /* fcnt contains available bytes in fifo */
+ fcnt = B_FIFO_SIZE - fcnt;
+ /* remaining bytes to send (bytes in fifo) */
+next_t_frame:
+ count = bch->tx_skb->len - bch->tx_idx;
+ /* maximum fill shall be HFCPCI_BTRANS_MAX */
+ if (count > HFCPCI_BTRANS_MAX - fcnt)
+ count = HFCPCI_BTRANS_MAX - fcnt;
+ if (count <= 0)
+ return;
+ /* data is suitable for fifo */
+ new_z1 = le16_to_cpu(*z1t) + count;
+ /* new buffer Position */
+ if (new_z1 >= (B_FIFO_SIZE + B_SUB_VAL))
+ new_z1 -= B_FIFO_SIZE; /* buffer wrap */
+ src = bch->tx_skb->data + bch->tx_idx;
+ /* source pointer */
+ dst = bdata + (le16_to_cpu(*z1t) - B_SUB_VAL);
+ maxlen = (B_FIFO_SIZE + B_SUB_VAL) - le16_to_cpu(*z1t);
+ /* end of fifo */
+ if (bch->debug & DEBUG_HW_BFIFO)
+ printk(KERN_DEBUG "hfcpci_FFt fcnt(%d) "
+ "maxl(%d) nz1(%x) dst(%p)\n",
+ fcnt, maxlen, new_z1, dst);
+ fcnt += count;
+ bch->tx_idx += count;
+ if (maxlen > count)
+ maxlen = count; /* limit size */
+ memcpy(dst, src, maxlen); /* first copy */
+ count -= maxlen; /* remaining bytes */
+ if (count) {
+ dst = bdata; /* start of buffer */
+ src += maxlen; /* new position */
+ memcpy(dst, src, count);
+ }
+ *z1t = cpu_to_le16(new_z1); /* now send data */
+ if (bch->tx_idx < bch->tx_skb->len)
+ return;
+ /* send confirm, on trans, free on hdlc. */
+ if (test_bit(FLG_TRANSPARENT, &bch->Flags))
+ confirm_Bsend(bch);
+ dev_kfree_skb(bch->tx_skb);
+ if (get_next_bframe(bch))
+ goto next_t_frame;
+ return;
+ }
+ if (bch->debug & DEBUG_HW_BCHANNEL)
+ printk(KERN_DEBUG
+ "%s: ch(%x) f1(%d) f2(%d) z1(f1)(%x)\n",
+ __func__, bch->nr, bz->f1, bz->f2,
+ bz->za[bz->f1].z1);
+ fcnt = bz->f1 - bz->f2; /* frame count actually buffered */
+ if (fcnt < 0)
+ fcnt += (MAX_B_FRAMES + 1); /* if wrap around */
+ if (fcnt > (MAX_B_FRAMES - 1)) {
+ if (bch->debug & DEBUG_HW_BCHANNEL)
+ printk(KERN_DEBUG
+ "hfcpci_fill_Bfifo more as 14 frames\n");
+ return;
+ }
+ /* now determine free bytes in FIFO buffer */
+ maxlen = le16_to_cpu(bz->za[bz->f2].z2) -
+ le16_to_cpu(bz->za[bz->f1].z1) - 1;
+ if (maxlen <= 0)
+ maxlen += B_FIFO_SIZE; /* count now contains available bytes */
+
+ if (bch->debug & DEBUG_HW_BCHANNEL)
+ printk(KERN_DEBUG "hfcpci_fill_fifo ch(%x) count(%d/%d)\n",
+ bch->nr, count, maxlen);
+
+ if (maxlen < count) {
+ if (bch->debug & DEBUG_HW_BCHANNEL)
+ printk(KERN_DEBUG "hfcpci_fill_fifo no fifo mem\n");
+ return;
+ }
+ new_z1 = le16_to_cpu(bz->za[bz->f1].z1) + count;
+ /* new buffer Position */
+ if (new_z1 >= (B_FIFO_SIZE + B_SUB_VAL))
+ new_z1 -= B_FIFO_SIZE; /* buffer wrap */
+
+ new_f1 = ((bz->f1 + 1) & MAX_B_FRAMES);
+ src = bch->tx_skb->data + bch->tx_idx; /* source pointer */
+ dst = bdata + (le16_to_cpu(bz->za[bz->f1].z1) - B_SUB_VAL);
+ maxlen = (B_FIFO_SIZE + B_SUB_VAL) - le16_to_cpu(bz->za[bz->f1].z1);
+ /* end fifo */
+ if (maxlen > count)
+ maxlen = count; /* limit size */
+ memcpy(dst, src, maxlen); /* first copy */
+
+ count -= maxlen; /* remaining bytes */
+ if (count) {
+ dst = bdata; /* start of buffer */
+ src += maxlen; /* new position */
+ memcpy(dst, src, count);
+ }
+ bz->za[new_f1].z1 = cpu_to_le16(new_z1); /* for next buffer */
+ bz->f1 = new_f1; /* next frame */
+ dev_kfree_skb(bch->tx_skb);
+ get_next_bframe(bch);
+}
+
+
+
+/*
+ * handle L1 state changes TE
+ */
+
+static void
+ph_state_te(struct dchannel *dch)
+{
+ if (dch->debug)
+ printk(KERN_DEBUG "%s: TE newstate %x\n",
+ __func__, dch->state);
+ switch (dch->state) {
+ case 0:
+ l1_event(dch->l1, HW_RESET_IND);
+ break;
+ case 3:
+ l1_event(dch->l1, HW_DEACT_IND);
+ break;
+ case 5:
+ case 8:
+ l1_event(dch->l1, ANYSIGNAL);
+ break;
+ case 6:
+ l1_event(dch->l1, INFO2);
+ break;
+ case 7:
+ l1_event(dch->l1, INFO4_P8);
+ break;
+ }
+}
+
+/*
+ * handle L1 state changes NT
+ */
+
+static void
+handle_nt_timer3(struct dchannel *dch) {
+ struct hfc_pci *hc = dch->hw;
+
+ test_and_clear_bit(FLG_HFC_TIMER_T3, &dch->Flags);
+ hc->hw.int_m1 &= ~HFCPCI_INTS_TIMER;
+ Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
+ hc->hw.nt_timer = 0;
+ test_and_set_bit(FLG_ACTIVE, &dch->Flags);
+ if (test_bit(HFC_CFG_MASTER, &hc->cfg))
+ hc->hw.mst_m |= HFCPCI_MASTER;
+ Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
+ _queue_data(&dch->dev.D, PH_ACTIVATE_IND,
+ MISDN_ID_ANY, 0, NULL, GFP_ATOMIC);
+}
+
+static void
+ph_state_nt(struct dchannel *dch)
+{
+ struct hfc_pci *hc = dch->hw;
+
+ if (dch->debug)
+ printk(KERN_DEBUG "%s: NT newstate %x\n",
+ __func__, dch->state);
+ switch (dch->state) {
+ case 2:
+ if (hc->hw.nt_timer < 0) {
+ hc->hw.nt_timer = 0;
+ test_and_clear_bit(FLG_HFC_TIMER_T3, &dch->Flags);
+ test_and_clear_bit(FLG_HFC_TIMER_T1, &dch->Flags);
+ hc->hw.int_m1 &= ~HFCPCI_INTS_TIMER;
+ Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
+ /* Clear already pending ints */
+ if (Read_hfc(hc, HFCPCI_INT_S1));
+ Write_hfc(hc, HFCPCI_STATES, 4 | HFCPCI_LOAD_STATE);
+ udelay(10);
+ Write_hfc(hc, HFCPCI_STATES, 4);
+ dch->state = 4;
+ } else if (hc->hw.nt_timer == 0) {
+ hc->hw.int_m1 |= HFCPCI_INTS_TIMER;
+ Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
+ hc->hw.nt_timer = NT_T1_COUNT;
+ hc->hw.ctmt &= ~HFCPCI_AUTO_TIMER;
+ hc->hw.ctmt |= HFCPCI_TIM3_125;
+ Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt |
+ HFCPCI_CLTIMER);
+ test_and_clear_bit(FLG_HFC_TIMER_T3, &dch->Flags);
+ test_and_set_bit(FLG_HFC_TIMER_T1, &dch->Flags);
+ /* allow G2 -> G3 transition */
+ Write_hfc(hc, HFCPCI_STATES, 2 | HFCPCI_NT_G2_G3);
+ } else {
+ Write_hfc(hc, HFCPCI_STATES, 2 | HFCPCI_NT_G2_G3);
+ }
+ break;
+ case 1:
+ hc->hw.nt_timer = 0;
+ test_and_clear_bit(FLG_HFC_TIMER_T3, &dch->Flags);
+ test_and_clear_bit(FLG_HFC_TIMER_T1, &dch->Flags);
+ hc->hw.int_m1 &= ~HFCPCI_INTS_TIMER;
+ Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
+ test_and_clear_bit(FLG_ACTIVE, &dch->Flags);
+ hc->hw.mst_m &= ~HFCPCI_MASTER;
+ Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
+ test_and_clear_bit(FLG_L2_ACTIVATED, &dch->Flags);
+ _queue_data(&dch->dev.D, PH_DEACTIVATE_IND,
+ MISDN_ID_ANY, 0, NULL, GFP_ATOMIC);
+ break;
+ case 4:
+ hc->hw.nt_timer = 0;
+ test_and_clear_bit(FLG_HFC_TIMER_T3, &dch->Flags);
+ test_and_clear_bit(FLG_HFC_TIMER_T1, &dch->Flags);
+ hc->hw.int_m1 &= ~HFCPCI_INTS_TIMER;
+ Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
+ break;
+ case 3:
+ if (!test_and_set_bit(FLG_HFC_TIMER_T3, &dch->Flags)) {
+ if (!test_and_clear_bit(FLG_L2_ACTIVATED,
+ &dch->Flags)) {
+ handle_nt_timer3(dch);
+ break;
+ }
+ test_and_clear_bit(FLG_HFC_TIMER_T1, &dch->Flags);
+ hc->hw.int_m1 |= HFCPCI_INTS_TIMER;
+ Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
+ hc->hw.nt_timer = NT_T3_COUNT;
+ hc->hw.ctmt &= ~HFCPCI_AUTO_TIMER;
+ hc->hw.ctmt |= HFCPCI_TIM3_125;
+ Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt |
+ HFCPCI_CLTIMER);
+ }
+ break;
+ }
+}
+
+static void
+ph_state(struct dchannel *dch)
+{
+ struct hfc_pci *hc = dch->hw;
+
+ if (hc->hw.protocol == ISDN_P_NT_S0) {
+ if (test_bit(FLG_HFC_TIMER_T3, &dch->Flags) &&
+ hc->hw.nt_timer < 0)
+ handle_nt_timer3(dch);
+ else
+ ph_state_nt(dch);
+ } else
+ ph_state_te(dch);
+}
+
+/*
+ * Layer 1 callback function
+ */
+static int
+hfc_l1callback(struct dchannel *dch, u_int cmd)
+{
+ struct hfc_pci *hc = dch->hw;
+
+ switch (cmd) {
+ case INFO3_P8:
+ case INFO3_P10:
+ if (test_bit(HFC_CFG_MASTER, &hc->cfg))
+ hc->hw.mst_m |= HFCPCI_MASTER;
+ Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
+ break;
+ case HW_RESET_REQ:
+ Write_hfc(hc, HFCPCI_STATES, HFCPCI_LOAD_STATE | 3);
+ /* HFC ST 3 */
+ udelay(6);
+ Write_hfc(hc, HFCPCI_STATES, 3); /* HFC ST 2 */
+ if (test_bit(HFC_CFG_MASTER, &hc->cfg))
+ hc->hw.mst_m |= HFCPCI_MASTER;
+ Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
+ Write_hfc(hc, HFCPCI_STATES, HFCPCI_ACTIVATE |
+ HFCPCI_DO_ACTION);
+ l1_event(dch->l1, HW_POWERUP_IND);
+ break;
+ case HW_DEACT_REQ:
+ hc->hw.mst_m &= ~HFCPCI_MASTER;
+ Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
+ skb_queue_purge(&dch->squeue);
+ if (dch->tx_skb) {
+ dev_kfree_skb(dch->tx_skb);
+ dch->tx_skb = NULL;
+ }
+ dch->tx_idx = 0;
+ if (dch->rx_skb) {
+ dev_kfree_skb(dch->rx_skb);
+ dch->rx_skb = NULL;
+ }
+ test_and_clear_bit(FLG_TX_BUSY, &dch->Flags);
+ if (test_and_clear_bit(FLG_BUSY_TIMER, &dch->Flags))
+ del_timer(&dch->timer);
+ break;
+ case HW_POWERUP_REQ:
+ Write_hfc(hc, HFCPCI_STATES, HFCPCI_DO_ACTION);
+ break;
+ case PH_ACTIVATE_IND:
+ test_and_set_bit(FLG_ACTIVE, &dch->Flags);
+ _queue_data(&dch->dev.D, cmd, MISDN_ID_ANY, 0, NULL,
+ GFP_ATOMIC);
+ break;
+ case PH_DEACTIVATE_IND:
+ test_and_clear_bit(FLG_ACTIVE, &dch->Flags);
+ _queue_data(&dch->dev.D, cmd, MISDN_ID_ANY, 0, NULL,
+ GFP_ATOMIC);
+ break;
+ default:
+ if (dch->debug & DEBUG_HW)
+ printk(KERN_DEBUG "%s: unknown command %x\n",
+ __func__, cmd);
+ return -1;
+ }
+ return 0;
+}
+
+/*
+ * Interrupt handler
+ */
+static inline void
+tx_birq(struct bchannel *bch)
+{
+ if (bch->tx_skb && bch->tx_idx < bch->tx_skb->len)
+ hfcpci_fill_fifo(bch);
+ else {
+ if (bch->tx_skb)
+ dev_kfree_skb(bch->tx_skb);
+ if (get_next_bframe(bch))
+ hfcpci_fill_fifo(bch);
+ }
+}
+
+static inline void
+tx_dirq(struct dchannel *dch)
+{
+ if (dch->tx_skb && dch->tx_idx < dch->tx_skb->len)
+ hfcpci_fill_dfifo(dch->hw);
+ else {
+ if (dch->tx_skb)
+ dev_kfree_skb(dch->tx_skb);
+ if (get_next_dframe(dch))
+ hfcpci_fill_dfifo(dch->hw);
+ }
+}
+
+static irqreturn_t
+hfcpci_int(int intno, void *dev_id)
+{
+ struct hfc_pci *hc = dev_id;
+ u_char exval;
+ struct bchannel *bch;
+ u_char val, stat;
+
+ spin_lock(&hc->lock);
+ if (!(hc->hw.int_m2 & 0x08)) {
+ spin_unlock(&hc->lock);
+ return IRQ_NONE; /* not initialised */
+ }
+ stat = Read_hfc(hc, HFCPCI_STATUS);
+ if (HFCPCI_ANYINT & stat) {
+ val = Read_hfc(hc, HFCPCI_INT_S1);
+ if (hc->dch.debug & DEBUG_HW_DCHANNEL)
+ printk(KERN_DEBUG
+ "HFC-PCI: stat(%02x) s1(%02x)\n", stat, val);
+ } else {
+ /* shared */
+ spin_unlock(&hc->lock);
+ return IRQ_NONE;
+ }
+ hc->irqcnt++;
+
+ if (hc->dch.debug & DEBUG_HW_DCHANNEL)
+ printk(KERN_DEBUG "HFC-PCI irq %x\n", val);
+ val &= hc->hw.int_m1;
+ if (val & 0x40) { /* state machine irq */
+ exval = Read_hfc(hc, HFCPCI_STATES) & 0xf;
+ if (hc->dch.debug & DEBUG_HW_DCHANNEL)
+ printk(KERN_DEBUG "ph_state chg %d->%d\n",
+ hc->dch.state, exval);
+ hc->dch.state = exval;
+ schedule_event(&hc->dch, FLG_PHCHANGE);
+ val &= ~0x40;
+ }
+ if (val & 0x80) { /* timer irq */
+ if (hc->hw.protocol == ISDN_P_NT_S0) {
+ if ((--hc->hw.nt_timer) < 0)
+ schedule_event(&hc->dch, FLG_PHCHANGE);
+ }
+ val &= ~0x80;
+ Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt | HFCPCI_CLTIMER);
+ }
+ if (val & 0x08) {
+ bch = Sel_BCS(hc, hc->hw.bswapped ? 2 : 1);
+ if (bch)
+ main_rec_hfcpci(bch);
+ else if (hc->dch.debug)
+ printk(KERN_DEBUG "hfcpci spurious 0x08 IRQ\n");
+ }
+ if (val & 0x10) {
+ bch = Sel_BCS(hc, 2);
+ if (bch)
+ main_rec_hfcpci(bch);
+ else if (hc->dch.debug)
+ printk(KERN_DEBUG "hfcpci spurious 0x10 IRQ\n");
+ }
+ if (val & 0x01) {
+ bch = Sel_BCS(hc, hc->hw.bswapped ? 2 : 1);
+ if (bch)
+ tx_birq(bch);
+ else if (hc->dch.debug)
+ printk(KERN_DEBUG "hfcpci spurious 0x01 IRQ\n");
+ }
+ if (val & 0x02) {
+ bch = Sel_BCS(hc, 2);
+ if (bch)
+ tx_birq(bch);
+ else if (hc->dch.debug)
+ printk(KERN_DEBUG "hfcpci spurious 0x02 IRQ\n");
+ }
+ if (val & 0x20)
+ receive_dmsg(hc);
+ if (val & 0x04) { /* dframe transmitted */
+ if (test_and_clear_bit(FLG_BUSY_TIMER, &hc->dch.Flags))
+ del_timer(&hc->dch.timer);
+ tx_dirq(&hc->dch);
+ }
+ spin_unlock(&hc->lock);
+ return IRQ_HANDLED;
+}
+
+/*
+ * timer callback for D-chan busy resolution. Currently no function
+ */
+static void
+hfcpci_dbusy_timer(struct hfc_pci *hc)
+{
+}
+
+/*
+ * activate/deactivate hardware for selected channels and mode
+ */
+static int
+mode_hfcpci(struct bchannel *bch, int bc, int protocol)
+{
+ struct hfc_pci *hc = bch->hw;
+ int fifo2;
+ u_char rx_slot = 0, tx_slot = 0, pcm_mode;
+
+ if (bch->debug & DEBUG_HW_BCHANNEL)
+ printk(KERN_DEBUG
+ "HFCPCI bchannel protocol %x-->%x ch %x-->%x\n",
+ bch->state, protocol, bch->nr, bc);
+
+ fifo2 = bc;
+ pcm_mode = (bc>>24) & 0xff;
+ if (pcm_mode) { /* PCM SLOT USE */
+ if (!test_bit(HFC_CFG_PCM, &hc->cfg))
+ printk(KERN_WARNING
+ "%s: pcm channel id without HFC_CFG_PCM\n",
+ __func__);
+ rx_slot = (bc>>8) & 0xff;
+ tx_slot = (bc>>16) & 0xff;
+ bc = bc & 0xff;
+ } else if (test_bit(HFC_CFG_PCM, &hc->cfg) &&
+ (protocol > ISDN_P_NONE))
+ printk(KERN_WARNING "%s: no pcm channel id but HFC_CFG_PCM\n",
+ __func__);
+ if (hc->chanlimit > 1) {
+ hc->hw.bswapped = 0; /* B1 and B2 normal mode */
+ hc->hw.sctrl_e &= ~0x80;
+ } else {
+ if (bc & 2) {
+ if (protocol != ISDN_P_NONE) {
+ hc->hw.bswapped = 1; /* B1 and B2 exchanged */
+ hc->hw.sctrl_e |= 0x80;
+ } else {
+ hc->hw.bswapped = 0; /* B1 and B2 normal mode */
+ hc->hw.sctrl_e &= ~0x80;
+ }
+ fifo2 = 1;
+ } else {
+ hc->hw.bswapped = 0; /* B1 and B2 normal mode */
+ hc->hw.sctrl_e &= ~0x80;
+ }
+ }
+ switch (protocol) {
+ case (-1): /* used for init */
+ bch->state = -1;
+ bch->nr = bc;
+ case (ISDN_P_NONE):
+ if (bch->state == ISDN_P_NONE)
+ return 0;
+ if (bc & 2) {
+ hc->hw.sctrl &= ~SCTRL_B2_ENA;
+ hc->hw.sctrl_r &= ~SCTRL_B2_ENA;
+ } else {
+ hc->hw.sctrl &= ~SCTRL_B1_ENA;
+ hc->hw.sctrl_r &= ~SCTRL_B1_ENA;
+ }
+ if (fifo2 & 2) {
+ hc->hw.fifo_en &= ~HFCPCI_FIFOEN_B2;
+ hc->hw.int_m1 &= ~(HFCPCI_INTS_B2TRANS +
+ HFCPCI_INTS_B2REC);
+ } else {
+ hc->hw.fifo_en &= ~HFCPCI_FIFOEN_B1;
+ hc->hw.int_m1 &= ~(HFCPCI_INTS_B1TRANS +
+ HFCPCI_INTS_B1REC);
+ }
+#ifdef REVERSE_BITORDER
+ if (bch->nr & 2)
+ hc->hw.cirm &= 0x7f;
+ else
+ hc->hw.cirm &= 0xbf;
+#endif
+ bch->state = ISDN_P_NONE;
+ bch->nr = bc;
+ test_and_clear_bit(FLG_HDLC, &bch->Flags);
+ test_and_clear_bit(FLG_TRANSPARENT, &bch->Flags);
+ break;
+ case (ISDN_P_B_RAW):
+ bch->state = protocol;
+ bch->nr = bc;
+ hfcpci_clear_fifo_rx(hc, (fifo2 & 2)?1:0);
+ hfcpci_clear_fifo_tx(hc, (fifo2 & 2)?1:0);
+ if (bc & 2) {
+ hc->hw.sctrl |= SCTRL_B2_ENA;
+ hc->hw.sctrl_r |= SCTRL_B2_ENA;
+#ifdef REVERSE_BITORDER
+ hc->hw.cirm |= 0x80;
+#endif
+ } else {
+ hc->hw.sctrl |= SCTRL_B1_ENA;
+ hc->hw.sctrl_r |= SCTRL_B1_ENA;
+#ifdef REVERSE_BITORDER
+ hc->hw.cirm |= 0x40;
+#endif
+ }
+ if (fifo2 & 2) {
+ hc->hw.fifo_en |= HFCPCI_FIFOEN_B2;
+ hc->hw.int_m1 |= (HFCPCI_INTS_B2TRANS +
+ HFCPCI_INTS_B2REC);
+ hc->hw.ctmt |= 2;
+ hc->hw.conn &= ~0x18;
+ } else {
+ hc->hw.fifo_en |= HFCPCI_FIFOEN_B1;
+ hc->hw.int_m1 |= (HFCPCI_INTS_B1TRANS +
+ HFCPCI_INTS_B1REC);
+ hc->hw.ctmt |= 1;
+ hc->hw.conn &= ~0x03;
+ }
+ test_and_set_bit(FLG_TRANSPARENT, &bch->Flags);
+ break;
+ case (ISDN_P_B_HDLC):
+ bch->state = protocol;
+ bch->nr = bc;
+ hfcpci_clear_fifo_rx(hc, (fifo2 & 2)?1:0);
+ hfcpci_clear_fifo_tx(hc, (fifo2 & 2)?1:0);
+ if (bc & 2) {
+ hc->hw.sctrl |= SCTRL_B2_ENA;
+ hc->hw.sctrl_r |= SCTRL_B2_ENA;
+ } else {
+ hc->hw.sctrl |= SCTRL_B1_ENA;
+ hc->hw.sctrl_r |= SCTRL_B1_ENA;
+ }
+ if (fifo2 & 2) {
+ hc->hw.last_bfifo_cnt[1] = 0;
+ hc->hw.fifo_en |= HFCPCI_FIFOEN_B2;
+ hc->hw.int_m1 |= (HFCPCI_INTS_B2TRANS +
+ HFCPCI_INTS_B2REC);
+ hc->hw.ctmt &= ~2;
+ hc->hw.conn &= ~0x18;
+ } else {
+ hc->hw.last_bfifo_cnt[0] = 0;
+ hc->hw.fifo_en |= HFCPCI_FIFOEN_B1;
+ hc->hw.int_m1 |= (HFCPCI_INTS_B1TRANS +
+ HFCPCI_INTS_B1REC);
+ hc->hw.ctmt &= ~1;
+ hc->hw.conn &= ~0x03;
+ }
+ test_and_set_bit(FLG_HDLC, &bch->Flags);
+ break;
+ default:
+ printk(KERN_DEBUG "prot not known %x\n", protocol);
+ return -ENOPROTOOPT;
+ }
+ if (test_bit(HFC_CFG_PCM, &hc->cfg)) {
+ if ((protocol == ISDN_P_NONE) ||
+ (protocol == -1)) { /* init case */
+ rx_slot = 0;
+ tx_slot = 0;
+ } else {
+ if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg)) {
+ rx_slot |= 0xC0;
+ tx_slot |= 0xC0;
+ } else {
+ rx_slot |= 0x80;
+ tx_slot |= 0x80;
+ }
+ }
+ if (bc & 2) {
+ hc->hw.conn &= 0xc7;
+ hc->hw.conn |= 0x08;
+ printk(KERN_DEBUG "%s: Write_hfc: B2_SSL 0x%x\n",
+ __func__, tx_slot);
+ printk(KERN_DEBUG "%s: Write_hfc: B2_RSL 0x%x\n",
+ __func__, rx_slot);
+ Write_hfc(hc, HFCPCI_B2_SSL, tx_slot);
+ Write_hfc(hc, HFCPCI_B2_RSL, rx_slot);
+ } else {
+ hc->hw.conn &= 0xf8;
+ hc->hw.conn |= 0x01;
+ printk(KERN_DEBUG "%s: Write_hfc: B1_SSL 0x%x\n",
+ __func__, tx_slot);
+ printk(KERN_DEBUG "%s: Write_hfc: B1_RSL 0x%x\n",
+ __func__, rx_slot);
+ Write_hfc(hc, HFCPCI_B1_SSL, tx_slot);
+ Write_hfc(hc, HFCPCI_B1_RSL, rx_slot);
+ }
+ }
+ Write_hfc(hc, HFCPCI_SCTRL_E, hc->hw.sctrl_e);
+ Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
+ Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
+ Write_hfc(hc, HFCPCI_SCTRL, hc->hw.sctrl);
+ Write_hfc(hc, HFCPCI_SCTRL_R, hc->hw.sctrl_r);
+ Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt);
+ Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
+#ifdef REVERSE_BITORDER
+ Write_hfc(hc, HFCPCI_CIRM, hc->hw.cirm);
+#endif
+ return 0;
+}
+
+static int
+set_hfcpci_rxtest(struct bchannel *bch, int protocol, int chan)
+{
+ struct hfc_pci *hc = bch->hw;
+
+ if (bch->debug & DEBUG_HW_BCHANNEL)
+ printk(KERN_DEBUG
+ "HFCPCI bchannel test rx protocol %x-->%x ch %x-->%x\n",
+ bch->state, protocol, bch->nr, chan);
+ if (bch->nr != chan) {
+ printk(KERN_DEBUG
+ "HFCPCI rxtest wrong channel parameter %x/%x\n",
+ bch->nr, chan);
+ return -EINVAL;
+ }
+ switch (protocol) {
+ case (ISDN_P_B_RAW):
+ bch->state = protocol;
+ hfcpci_clear_fifo_rx(hc, (chan & 2)?1:0);
+ if (chan & 2) {
+ hc->hw.sctrl_r |= SCTRL_B2_ENA;
+ hc->hw.fifo_en |= HFCPCI_FIFOEN_B2RX;
+ hc->hw.int_m1 |= HFCPCI_INTS_B2REC;
+ hc->hw.ctmt |= 2;
+ hc->hw.conn &= ~0x18;
+#ifdef REVERSE_BITORDER
+ hc->hw.cirm |= 0x80;
+#endif
+ } else {
+ hc->hw.sctrl_r |= SCTRL_B1_ENA;
+ hc->hw.fifo_en |= HFCPCI_FIFOEN_B1RX;
+ hc->hw.int_m1 |= HFCPCI_INTS_B1REC;
+ hc->hw.ctmt |= 1;
+ hc->hw.conn &= ~0x03;
+#ifdef REVERSE_BITORDER
+ hc->hw.cirm |= 0x40;
+#endif
+ }
+ break;
+ case (ISDN_P_B_HDLC):
+ bch->state = protocol;
+ hfcpci_clear_fifo_rx(hc, (chan & 2)?1:0);
+ if (chan & 2) {
+ hc->hw.sctrl_r |= SCTRL_B2_ENA;
+ hc->hw.last_bfifo_cnt[1] = 0;
+ hc->hw.fifo_en |= HFCPCI_FIFOEN_B2RX;
+ hc->hw.int_m1 |= HFCPCI_INTS_B2REC;
+ hc->hw.ctmt &= ~2;
+ hc->hw.conn &= ~0x18;
+ } else {
+ hc->hw.sctrl_r |= SCTRL_B1_ENA;
+ hc->hw.last_bfifo_cnt[0] = 0;
+ hc->hw.fifo_en |= HFCPCI_FIFOEN_B1RX;
+ hc->hw.int_m1 |= HFCPCI_INTS_B1REC;
+ hc->hw.ctmt &= ~1;
+ hc->hw.conn &= ~0x03;
+ }
+ break;
+ default:
+ printk(KERN_DEBUG "prot not known %x\n", protocol);
+ return -ENOPROTOOPT;
+ }
+ Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
+ Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
+ Write_hfc(hc, HFCPCI_SCTRL_R, hc->hw.sctrl_r);
+ Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt);
+ Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
+#ifdef REVERSE_BITORDER
+ Write_hfc(hc, HFCPCI_CIRM, hc->hw.cirm);
+#endif
+ return 0;
+}
+
+static void
+deactivate_bchannel(struct bchannel *bch)
+{
+ struct hfc_pci *hc = bch->hw;
+ u_long flags;
+
+ spin_lock_irqsave(&hc->lock, flags);
+ if (test_and_clear_bit(FLG_TX_NEXT, &bch->Flags)) {
+ dev_kfree_skb(bch->next_skb);
+ bch->next_skb = NULL;
+ }
+ if (bch->tx_skb) {
+ dev_kfree_skb(bch->tx_skb);
+ bch->tx_skb = NULL;
+ }
+ bch->tx_idx = 0;
+ if (bch->rx_skb) {
+ dev_kfree_skb(bch->rx_skb);
+ bch->rx_skb = NULL;
+ }
+ mode_hfcpci(bch, bch->nr, ISDN_P_NONE);
+ test_and_clear_bit(FLG_ACTIVE, &bch->Flags);
+ test_and_clear_bit(FLG_TX_BUSY, &bch->Flags);
+ spin_unlock_irqrestore(&hc->lock, flags);
+}
+
+/*
+ * Layer 1 B-channel hardware access
+ */
+static int
+channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq)
+{
+ int ret = 0;
+
+ switch (cq->op) {
+ case MISDN_CTRL_GETOP:
+ cq->op = 0;
+ break;
+ default:
+ printk(KERN_WARNING "%s: unknown Op %x\n", __func__, cq->op);
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+static int
+hfc_bctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
+{
+ struct bchannel *bch = container_of(ch, struct bchannel, ch);
+ struct hfc_pci *hc = bch->hw;
+ int ret = -EINVAL;
+ u_long flags;
+
+ if (bch->debug & DEBUG_HW)
+ printk(KERN_DEBUG "%s: cmd:%x %p\n", __func__, cmd, arg);
+ switch (cmd) {
+ case HW_TESTRX_RAW:
+ spin_lock_irqsave(&hc->lock, flags);
+ ret = set_hfcpci_rxtest(bch, ISDN_P_B_RAW, (int)(long)arg);
+ spin_unlock_irqrestore(&hc->lock, flags);
+ break;
+ case HW_TESTRX_HDLC:
+ spin_lock_irqsave(&hc->lock, flags);
+ ret = set_hfcpci_rxtest(bch, ISDN_P_B_HDLC, (int)(long)arg);
+ spin_unlock_irqrestore(&hc->lock, flags);
+ break;
+ case HW_TESTRX_OFF:
+ spin_lock_irqsave(&hc->lock, flags);
+ mode_hfcpci(bch, bch->nr, ISDN_P_NONE);
+ spin_unlock_irqrestore(&hc->lock, flags);
+ ret = 0;
+ break;
+ case CLOSE_CHANNEL:
+ test_and_clear_bit(FLG_OPEN, &bch->Flags);
+ if (test_bit(FLG_ACTIVE, &bch->Flags))
+ deactivate_bchannel(bch);
+ ch->protocol = ISDN_P_NONE;
+ ch->peer = NULL;
+ module_put(THIS_MODULE);
+ ret = 0;
+ break;
+ case CONTROL_CHANNEL:
+ ret = channel_bctrl(bch, arg);
+ break;
+ default:
+ printk(KERN_WARNING "%s: unknown prim(%x)\n",
+ __func__, cmd);
+ }
+ return ret;
+}
+
+/*
+ * Layer2 -> Layer 1 Dchannel data
+ */
+static int
+hfcpci_l2l1D(struct mISDNchannel *ch, struct sk_buff *skb)
+{
+ struct mISDNdevice *dev = container_of(ch, struct mISDNdevice, D);
+ struct dchannel *dch = container_of(dev, struct dchannel, dev);
+ struct hfc_pci *hc = dch->hw;
+ int ret = -EINVAL;
+ struct mISDNhead *hh = mISDN_HEAD_P(skb);
+ unsigned int id;
+ u_long flags;
+
+ switch (hh->prim) {
+ case PH_DATA_REQ:
+ spin_lock_irqsave(&hc->lock, flags);
+ ret = dchannel_senddata(dch, skb);
+ if (ret > 0) { /* direct TX */
+ id = hh->id; /* skb can be freed */
+ hfcpci_fill_dfifo(dch->hw);
+ ret = 0;
+ spin_unlock_irqrestore(&hc->lock, flags);
+ queue_ch_frame(ch, PH_DATA_CNF, id, NULL);
+ } else
+ spin_unlock_irqrestore(&hc->lock, flags);
+ return ret;
+ case PH_ACTIVATE_REQ:
+ spin_lock_irqsave(&hc->lock, flags);
+ if (hc->hw.protocol == ISDN_P_NT_S0) {
+ ret = 0;
+ if (test_bit(HFC_CFG_MASTER, &hc->cfg))
+ hc->hw.mst_m |= HFCPCI_MASTER;
+ Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
+ if (test_bit(FLG_ACTIVE, &dch->Flags)) {
+ spin_unlock_irqrestore(&hc->lock, flags);
+ _queue_data(&dch->dev.D, PH_ACTIVATE_IND,
+ MISDN_ID_ANY, 0, NULL, GFP_ATOMIC);
+ break;
+ }
+ test_and_set_bit(FLG_L2_ACTIVATED, &dch->Flags);
+ Write_hfc(hc, HFCPCI_STATES, HFCPCI_ACTIVATE |
+ HFCPCI_DO_ACTION | 1);
+ } else
+ ret = l1_event(dch->l1, hh->prim);
+ spin_unlock_irqrestore(&hc->lock, flags);
+ break;
+ case PH_DEACTIVATE_REQ:
+ test_and_clear_bit(FLG_L2_ACTIVATED, &dch->Flags);
+ spin_lock_irqsave(&hc->lock, flags);
+ if (hc->hw.protocol == ISDN_P_NT_S0) {
+ /* prepare deactivation */
+ Write_hfc(hc, HFCPCI_STATES, 0x40);
+ skb_queue_purge(&dch->squeue);
+ if (dch->tx_skb) {
+ dev_kfree_skb(dch->tx_skb);
+ dch->tx_skb = NULL;
+ }
+ dch->tx_idx = 0;
+ if (dch->rx_skb) {
+ dev_kfree_skb(dch->rx_skb);
+ dch->rx_skb = NULL;
+ }
+ test_and_clear_bit(FLG_TX_BUSY, &dch->Flags);
+ if (test_and_clear_bit(FLG_BUSY_TIMER, &dch->Flags))
+ del_timer(&dch->timer);
+#ifdef FIXME
+ if (test_and_clear_bit(FLG_L1_BUSY, &dch->Flags))
+ dchannel_sched_event(&hc->dch, D_CLEARBUSY);
+#endif
+ hc->hw.mst_m &= ~HFCPCI_MASTER;
+ Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
+ ret = 0;
+ } else {
+ ret = l1_event(dch->l1, hh->prim);
+ }
+ spin_unlock_irqrestore(&hc->lock, flags);
+ break;
+ }
+ if (!ret)
+ dev_kfree_skb(skb);
+ return ret;
+}
+
+/*
+ * Layer2 -> Layer 1 Bchannel data
+ */
+static int
+hfcpci_l2l1B(struct mISDNchannel *ch, struct sk_buff *skb)
+{
+ struct bchannel *bch = container_of(ch, struct bchannel, ch);
+ struct hfc_pci *hc = bch->hw;
+ int ret = -EINVAL;
+ struct mISDNhead *hh = mISDN_HEAD_P(skb);
+ unsigned int id;
+ u_long flags;
+
+ switch (hh->prim) {
+ case PH_DATA_REQ:
+ spin_lock_irqsave(&hc->lock, flags);
+ ret = bchannel_senddata(bch, skb);
+ if (ret > 0) { /* direct TX */
+ id = hh->id; /* skb can be freed */
+ hfcpci_fill_fifo(bch);
+ ret = 0;
+ spin_unlock_irqrestore(&hc->lock, flags);
+ if (!test_bit(FLG_TRANSPARENT, &bch->Flags))
+ queue_ch_frame(ch, PH_DATA_CNF, id, NULL);
+ } else
+ spin_unlock_irqrestore(&hc->lock, flags);
+ return ret;
+ case PH_ACTIVATE_REQ:
+ spin_lock_irqsave(&hc->lock, flags);
+ if (!test_and_set_bit(FLG_ACTIVE, &bch->Flags))
+ ret = mode_hfcpci(bch, bch->nr, ch->protocol);
+ else
+ ret = 0;
+ spin_unlock_irqrestore(&hc->lock, flags);
+ if (!ret)
+ _queue_data(ch, PH_ACTIVATE_IND, MISDN_ID_ANY, 0,
+ NULL, GFP_KERNEL);
+ break;
+ case PH_DEACTIVATE_REQ:
+ deactivate_bchannel(bch);
+ _queue_data(ch, PH_DEACTIVATE_IND, MISDN_ID_ANY, 0,
+ NULL, GFP_KERNEL);
+ ret = 0;
+ break;
+ }
+ if (!ret)
+ dev_kfree_skb(skb);
+ return ret;
+}
+
+/*
+ * called for card init message
+ */
+
+void
+inithfcpci(struct hfc_pci *hc)
+{
+ printk(KERN_DEBUG "inithfcpci: entered\n");
+ hc->dch.timer.function = (void *) hfcpci_dbusy_timer;
+ hc->dch.timer.data = (long) &hc->dch;
+ init_timer(&hc->dch.timer);
+ hc->chanlimit = 2;
+ mode_hfcpci(&hc->bch[0], 1, -1);
+ mode_hfcpci(&hc->bch[1], 2, -1);
+}
+
+
+static int
+init_card(struct hfc_pci *hc)
+{
+ int cnt = 3;
+ u_long flags;
+
+ printk(KERN_DEBUG "init_card: entered\n");
+
+
+ spin_lock_irqsave(&hc->lock, flags);
+ disable_hwirq(hc);
+ spin_unlock_irqrestore(&hc->lock, flags);
+ if (request_irq(hc->irq, hfcpci_int, IRQF_SHARED, "HFC PCI", hc)) {
+ printk(KERN_WARNING
+ "mISDN: couldn't get interrupt %d\n", hc->irq);
+ return -EIO;
+ }
+ spin_lock_irqsave(&hc->lock, flags);
+ reset_hfcpci(hc);
+ while (cnt) {
+ inithfcpci(hc);
+ /*
+ * Finally enable IRQ output
+ * this is only allowed, if an IRQ routine is allready
+ * established for this HFC, so don't do that earlier
+ */
+ enable_hwirq(hc);
+ spin_unlock_irqrestore(&hc->lock, flags);
+ /* Timeout 80ms */
+ current->state = TASK_UNINTERRUPTIBLE;
+ schedule_timeout((80*HZ)/1000);
+ printk(KERN_INFO "HFC PCI: IRQ %d count %d\n",
+ hc->irq, hc->irqcnt);
+ /* now switch timer interrupt off */
+ spin_lock_irqsave(&hc->lock, flags);
+ hc->hw.int_m1 &= ~HFCPCI_INTS_TIMER;
+ Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
+ /* reinit mode reg */
+ Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
+ if (!hc->irqcnt) {
+ printk(KERN_WARNING
+ "HFC PCI: IRQ(%d) getting no interrupts "
+ "during init %d\n", hc->irq, 4 - cnt);
+ if (cnt == 1) {
+ spin_unlock_irqrestore(&hc->lock, flags);
+ return -EIO;
+ } else {
+ reset_hfcpci(hc);
+ cnt--;
+ }
+ } else {
+ spin_unlock_irqrestore(&hc->lock, flags);
+ hc->initdone = 1;
+ return 0;
+ }
+ }
+ disable_hwirq(hc);
+ spin_unlock_irqrestore(&hc->lock, flags);
+ free_irq(hc->irq, hc);
+ return -EIO;
+}
+
+static int
+channel_ctrl(struct hfc_pci *hc, struct mISDN_ctrl_req *cq)
+{
+ int ret = 0;
+ u_char slot;
+
+ switch (cq->op) {
+ case MISDN_CTRL_GETOP:
+ cq->op = MISDN_CTRL_LOOP | MISDN_CTRL_CONNECT |
+ MISDN_CTRL_DISCONNECT;
+ break;
+ case MISDN_CTRL_LOOP:
+ /* channel 0 disabled loop */
+ if (cq->channel < 0 || cq->channel > 2) {
+ ret = -EINVAL;
+ break;
+ }
+ if (cq->channel & 1) {
+ if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg))
+ slot = 0xC0;
+ else
+ slot = 0x80;
+ printk(KERN_DEBUG "%s: Write_hfc: B1_SSL/RSL 0x%x\n",
+ __func__, slot);
+ Write_hfc(hc, HFCPCI_B1_SSL, slot);
+ Write_hfc(hc, HFCPCI_B1_RSL, slot);
+ hc->hw.conn = (hc->hw.conn & ~7) | 6;
+ Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
+ }
+ if (cq->channel & 2) {
+ if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg))
+ slot = 0xC1;
+ else
+ slot = 0x81;
+ printk(KERN_DEBUG "%s: Write_hfc: B2_SSL/RSL 0x%x\n",
+ __func__, slot);
+ Write_hfc(hc, HFCPCI_B2_SSL, slot);
+ Write_hfc(hc, HFCPCI_B2_RSL, slot);
+ hc->hw.conn = (hc->hw.conn & ~0x38) | 0x30;
+ Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
+ }
+ if (cq->channel & 3)
+ hc->hw.trm |= 0x80; /* enable IOM-loop */
+ else {
+ hc->hw.conn = (hc->hw.conn & ~0x3f) | 0x09;
+ Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
+ hc->hw.trm &= 0x7f; /* disable IOM-loop */
+ }
+ Write_hfc(hc, HFCPCI_TRM, hc->hw.trm);
+ break;
+ case MISDN_CTRL_CONNECT:
+ if (cq->channel == cq->p1) {
+ ret = -EINVAL;
+ break;
+ }
+ if (cq->channel < 1 || cq->channel > 2 ||
+ cq->p1 < 1 || cq->p1 > 2) {
+ ret = -EINVAL;
+ break;
+ }
+ if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg))
+ slot = 0xC0;
+ else
+ slot = 0x80;
+ printk(KERN_DEBUG "%s: Write_hfc: B1_SSL/RSL 0x%x\n",
+ __func__, slot);
+ Write_hfc(hc, HFCPCI_B1_SSL, slot);
+ Write_hfc(hc, HFCPCI_B2_RSL, slot);
+ if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg))
+ slot = 0xC1;
+ else
+ slot = 0x81;
+ printk(KERN_DEBUG "%s: Write_hfc: B2_SSL/RSL 0x%x\n",
+ __func__, slot);
+ Write_hfc(hc, HFCPCI_B2_SSL, slot);
+ Write_hfc(hc, HFCPCI_B1_RSL, slot);
+ hc->hw.conn = (hc->hw.conn & ~0x3f) | 0x36;
+ Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
+ hc->hw.trm |= 0x80;
+ Write_hfc(hc, HFCPCI_TRM, hc->hw.trm);
+ break;
+ case MISDN_CTRL_DISCONNECT:
+ hc->hw.conn = (hc->hw.conn & ~0x3f) | 0x09;
+ Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
+ hc->hw.trm &= 0x7f; /* disable IOM-loop */
+ break;
+ default:
+ printk(KERN_WARNING "%s: unknown Op %x\n",
+ __func__, cq->op);
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+static int
+open_dchannel(struct hfc_pci *hc, struct mISDNchannel *ch,
+ struct channel_req *rq)
+{
+ int err = 0;
+
+ if (debug & DEBUG_HW_OPEN)
+ printk(KERN_DEBUG "%s: dev(%d) open from %p\n", __func__,
+ hc->dch.dev.id, __builtin_return_address(0));
+ if (rq->protocol == ISDN_P_NONE)
+ return -EINVAL;
+ if (!hc->initdone) {
+ if (rq->protocol == ISDN_P_TE_S0) {
+ err = create_l1(&hc->dch, hfc_l1callback);
+ if (err)
+ return err;
+ }
+ hc->hw.protocol = rq->protocol;
+ ch->protocol = rq->protocol;
+ err = init_card(hc);
+ if (err)
+ return err;
+ } else {
+ if (rq->protocol != ch->protocol) {
+ if (hc->hw.protocol == ISDN_P_TE_S0)
+ l1_event(hc->dch.l1, CLOSE_CHANNEL);
+ hc->hw.protocol = rq->protocol;
+ ch->protocol = rq->protocol;
+ hfcpci_setmode(hc);
+ }
+ }
+
+ if (((ch->protocol == ISDN_P_NT_S0) && (hc->dch.state == 3)) ||
+ ((ch->protocol == ISDN_P_TE_S0) && (hc->dch.state == 7))) {
+ _queue_data(ch, PH_ACTIVATE_IND, MISDN_ID_ANY,
+ 0, NULL, GFP_KERNEL);
+ }
+ rq->ch = ch;
+ if (!try_module_get(THIS_MODULE))
+ printk(KERN_WARNING "%s:cannot get module\n", __func__);
+ return 0;
+}
+
+static int
+open_bchannel(struct hfc_pci *hc, struct channel_req *rq)
+{
+ struct bchannel *bch;
+
+ if (rq->adr.channel > 2)
+ return -EINVAL;
+ if (rq->protocol == ISDN_P_NONE)
+ return -EINVAL;
+ bch = &hc->bch[rq->adr.channel - 1];
+ if (test_and_set_bit(FLG_OPEN, &bch->Flags))
+ return -EBUSY; /* b-channel can be only open once */
+ bch->ch.protocol = rq->protocol;
+ rq->ch = &bch->ch; /* TODO: E-channel */
+ if (!try_module_get(THIS_MODULE))
+ printk(KERN_WARNING "%s:cannot get module\n", __func__);
+ return 0;
+}
+
+/*
+ * device control function
+ */
+static int
+hfc_dctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
+{
+ struct mISDNdevice *dev = container_of(ch, struct mISDNdevice, D);
+ struct dchannel *dch = container_of(dev, struct dchannel, dev);
+ struct hfc_pci *hc = dch->hw;
+ struct channel_req *rq;
+ int err = 0;
+
+ if (dch->debug & DEBUG_HW)
+ printk(KERN_DEBUG "%s: cmd:%x %p\n",
+ __func__, cmd, arg);
+ switch (cmd) {
+ case OPEN_CHANNEL:
+ rq = arg;
+ if (rq->adr.channel == 0)
+ err = open_dchannel(hc, ch, rq);
+ else
+ err = open_bchannel(hc, rq);
+ break;
+ case CLOSE_CHANNEL:
+ if (debug & DEBUG_HW_OPEN)
+ printk(KERN_DEBUG "%s: dev(%d) close from %p\n",
+ __func__, hc->dch.dev.id,
+ __builtin_return_address(0));
+ module_put(THIS_MODULE);
+ break;
+ case CONTROL_CHANNEL:
+ err = channel_ctrl(hc, arg);
+ break;
+ default:
+ if (dch->debug & DEBUG_HW)
+ printk(KERN_DEBUG "%s: unknown command %x\n",
+ __func__, cmd);
+ return -EINVAL;
+ }
+ return err;
+}
+
+static int
+setup_hw(struct hfc_pci *hc)
+{
+ void *buffer;
+
+ printk(KERN_INFO "mISDN: HFC-PCI driver %s\n", hfcpci_revision);
+ hc->hw.cirm = 0;
+ hc->dch.state = 0;
+ pci_set_master(hc->pdev);
+ if (!hc->irq) {
+ printk(KERN_WARNING "HFC-PCI: No IRQ for PCI card found\n");
+ return 1;
+ }
+ hc->hw.pci_io = (char *)(ulong)hc->pdev->resource[1].start;
+
+ if (!hc->hw.pci_io) {
+ printk(KERN_WARNING "HFC-PCI: No IO-Mem for PCI card found\n");
+ return 1;
+ }
+ /* Allocate memory for FIFOS */
+ /* the memory needs to be on a 32k boundary within the first 4G */
+ pci_set_dma_mask(hc->pdev, 0xFFFF8000);
+ buffer = pci_alloc_consistent(hc->pdev, 0x8000, &hc->hw.dmahandle);
+ /* We silently assume the address is okay if nonzero */
+ if (!buffer) {
+ printk(KERN_WARNING
+ "HFC-PCI: Error allocating memory for FIFO!\n");
+ return 1;
+ }
+ hc->hw.fifos = buffer;
+ pci_write_config_dword(hc->pdev, 0x80, hc->hw.dmahandle);
+ hc->hw.pci_io = ioremap((ulong) hc->hw.pci_io, 256);
+ printk(KERN_INFO
+ "HFC-PCI: defined at mem %#lx fifo %#lx(%#lx) IRQ %d HZ %d\n",
+ (u_long) hc->hw.pci_io, (u_long) hc->hw.fifos,
+ (u_long) virt_to_bus(hc->hw.fifos),
+ hc->irq, HZ);
+ /* enable memory mapped ports, disable busmaster */
+ pci_write_config_word(hc->pdev, PCI_COMMAND, PCI_ENA_MEMIO);
+ hc->hw.int_m2 = 0;
+ disable_hwirq(hc);
+ hc->hw.int_m1 = 0;
+ Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
+ /* At this point the needed PCI config is done */
+ /* fifos are still not enabled */
+ hc->hw.timer.function = (void *) hfcpci_Timer;
+ hc->hw.timer.data = (long) hc;
+ init_timer(&hc->hw.timer);
+ /* default PCM master */
+ test_and_set_bit(HFC_CFG_MASTER, &hc->cfg);
+ return 0;
+}
+
+static void
+release_card(struct hfc_pci *hc) {
+ u_long flags;
+
+ spin_lock_irqsave(&hc->lock, flags);
+ hc->hw.int_m2 = 0; /* interrupt output off ! */
+ disable_hwirq(hc);
+ mode_hfcpci(&hc->bch[0], 1, ISDN_P_NONE);
+ mode_hfcpci(&hc->bch[1], 2, ISDN_P_NONE);
+ if (hc->dch.timer.function != NULL) {
+ del_timer(&hc->dch.timer);
+ hc->dch.timer.function = NULL;
+ }
+ spin_unlock_irqrestore(&hc->lock, flags);
+ if (hc->hw.protocol == ISDN_P_TE_S0)
+ l1_event(hc->dch.l1, CLOSE_CHANNEL);
+ if (hc->initdone)
+ free_irq(hc->irq, hc);
+ release_io_hfcpci(hc); /* must release after free_irq! */
+ mISDN_unregister_device(&hc->dch.dev);
+ mISDN_freebchannel(&hc->bch[1]);
+ mISDN_freebchannel(&hc->bch[0]);
+ mISDN_freedchannel(&hc->dch);
+ list_del(&hc->list);
+ pci_set_drvdata(hc->pdev, NULL);
+ kfree(hc);
+}
+
+static int
+setup_card(struct hfc_pci *card)
+{
+ int err = -EINVAL;
+ u_int i;
+ u_long flags;
+ char name[MISDN_MAX_IDLEN];
+
+ if (HFC_cnt >= MAX_CARDS)
+ return -EINVAL; /* maybe better value */
+
+ card->dch.debug = debug;
+ spin_lock_init(&card->lock);
+ mISDN_initdchannel(&card->dch, MAX_DFRAME_LEN_L1, ph_state);
+ card->dch.hw = card;
+ card->dch.dev.Dprotocols = (1 << ISDN_P_TE_S0) | (1 << ISDN_P_NT_S0);
+ card->dch.dev.Bprotocols = (1 << (ISDN_P_B_RAW & ISDN_P_B_MASK)) |
+ (1 << (ISDN_P_B_HDLC & ISDN_P_B_MASK));
+ card->dch.dev.D.send = hfcpci_l2l1D;
+ card->dch.dev.D.ctrl = hfc_dctrl;
+ card->dch.dev.nrbchan = 2;
+ for (i = 0; i < 2; i++) {
+ card->bch[i].nr = i + 1;
+ test_and_set_bit(i + 1, &card->dch.dev.channelmap[0]);
+ card->bch[i].debug = debug;
+ mISDN_initbchannel(&card->bch[i], MAX_DATA_MEM);
+ card->bch[i].hw = card;
+ card->bch[i].ch.send = hfcpci_l2l1B;
+ card->bch[i].ch.ctrl = hfc_bctrl;
+ card->bch[i].ch.nr = i + 1;
+ list_add(&card->bch[i].ch.list, &card->dch.dev.bchannels);
+ }
+ err = setup_hw(card);
+ if (err)
+ goto error;
+ snprintf(name, MISDN_MAX_IDLEN - 1, "hfc-pci.%d", HFC_cnt + 1);
+ err = mISDN_register_device(&card->dch.dev, name);
+ if (err)
+ goto error;
+ HFC_cnt++;
+ write_lock_irqsave(&HFClock, flags);
+ list_add_tail(&card->list, &HFClist);
+ write_unlock_irqrestore(&HFClock, flags);
+ printk(KERN_INFO "HFC %d cards installed\n", HFC_cnt);
+ return 0;
+error:
+ mISDN_freebchannel(&card->bch[1]);
+ mISDN_freebchannel(&card->bch[0]);
+ mISDN_freedchannel(&card->dch);
+ kfree(card);
+ return err;
+}
+
+/* private data in the PCI devices list */
+struct _hfc_map {
+ u_int subtype;
+ u_int flag;
+ char *name;
+};
+
+static const struct _hfc_map hfc_map[] =
+{
+ {HFC_CCD_2BD0, 0, "CCD/Billion/Asuscom 2BD0"},
+ {HFC_CCD_B000, 0, "Billion B000"},
+ {HFC_CCD_B006, 0, "Billion B006"},
+ {HFC_CCD_B007, 0, "Billion B007"},
+ {HFC_CCD_B008, 0, "Billion B008"},
+ {HFC_CCD_B009, 0, "Billion B009"},
+ {HFC_CCD_B00A, 0, "Billion B00A"},
+ {HFC_CCD_B00B, 0, "Billion B00B"},
+ {HFC_CCD_B00C, 0, "Billion B00C"},
+ {HFC_CCD_B100, 0, "Seyeon B100"},
+ {HFC_CCD_B700, 0, "Primux II S0 B700"},
+ {HFC_CCD_B701, 0, "Primux II S0 NT B701"},
+ {HFC_ABOCOM_2BD1, 0, "Abocom/Magitek 2BD1"},
+ {HFC_ASUS_0675, 0, "Asuscom/Askey 675"},
+ {HFC_BERKOM_TCONCEPT, 0, "German telekom T-Concept"},
+ {HFC_BERKOM_A1T, 0, "German telekom A1T"},
+ {HFC_ANIGMA_MC145575, 0, "Motorola MC145575"},
+ {HFC_ZOLTRIX_2BD0, 0, "Zoltrix 2BD0"},
+ {HFC_DIGI_DF_M_IOM2_E, 0,
+ "Digi International DataFire Micro V IOM2 (Europe)"},
+ {HFC_DIGI_DF_M_E, 0,
+ "Digi International DataFire Micro V (Europe)"},
+ {HFC_DIGI_DF_M_IOM2_A, 0,
+ "Digi International DataFire Micro V IOM2 (North America)"},
+ {HFC_DIGI_DF_M_A, 0,
+ "Digi International DataFire Micro V (North America)"},
+ {HFC_SITECOM_DC105V2, 0, "Sitecom Connectivity DC-105 ISDN TA"},
+ {},
+};
+
+static struct pci_device_id hfc_ids[] =
+{
+ {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_2BD0,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[0]},
+ {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B000,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[1]},
+ {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B006,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[2]},
+ {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B007,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[3]},
+ {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B008,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[4]},
+ {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B009,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[5]},
+ {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B00A,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[6]},
+ {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B00B,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[7]},
+ {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B00C,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[8]},
+ {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B100,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[9]},
+ {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B700,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[10]},
+ {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B701,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[11]},
+ {PCI_VENDOR_ID_ABOCOM, PCI_DEVICE_ID_ABOCOM_2BD1,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[12]},
+ {PCI_VENDOR_ID_ASUSTEK, PCI_DEVICE_ID_ASUSTEK_0675,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[13]},
+ {PCI_VENDOR_ID_BERKOM, PCI_DEVICE_ID_BERKOM_T_CONCEPT,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[14]},
+ {PCI_VENDOR_ID_BERKOM, PCI_DEVICE_ID_BERKOM_A1T,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[15]},
+ {PCI_VENDOR_ID_ANIGMA, PCI_DEVICE_ID_ANIGMA_MC145575,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[16]},
+ {PCI_VENDOR_ID_ZOLTRIX, PCI_DEVICE_ID_ZOLTRIX_2BD0,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[17]},
+ {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_IOM2_E,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[18]},
+ {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_E,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[19]},
+ {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_IOM2_A,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[20]},
+ {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_A,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[21]},
+ {PCI_VENDOR_ID_SITECOM, PCI_DEVICE_ID_SITECOM_DC105V2,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[22]},
+ {},
+};
+
+static int __devinit
+hfc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ int err = -ENOMEM;
+ struct hfc_pci *card;
+ struct _hfc_map *m = (struct _hfc_map *)ent->driver_data;
+
+ card = kzalloc(sizeof(struct hfc_pci), GFP_ATOMIC);
+ if (!card) {
+ printk(KERN_ERR "No kmem for HFC card\n");
+ return err;
+ }
+ card->pdev = pdev;
+ card->subtype = m->subtype;
+ err = pci_enable_device(pdev);
+ if (err) {
+ kfree(card);
+ return err;
+ }
+
+ printk(KERN_INFO "mISDN_hfcpci: found adapter %s at %s\n",
+ m->name, pci_name(pdev));
+
+ card->irq = pdev->irq;
+ pci_set_drvdata(pdev, card);
+ err = setup_card(card);
+ if (err)
+ pci_set_drvdata(pdev, NULL);
+ return err;
+}
+
+static void __devexit
+hfc_remove_pci(struct pci_dev *pdev)
+{
+ struct hfc_pci *card = pci_get_drvdata(pdev);
+ u_long flags;
+
+ if (card) {
+ write_lock_irqsave(&HFClock, flags);
+ release_card(card);
+ write_unlock_irqrestore(&HFClock, flags);
+ } else
+ if (debug)
+ printk(KERN_WARNING "%s: drvdata allready removed\n",
+ __func__);
+}
+
+
+static struct pci_driver hfc_driver = {
+ .name = "hfcpci",
+ .probe = hfc_probe,
+ .remove = __devexit_p(hfc_remove_pci),
+ .id_table = hfc_ids,
+};
+
+static int __init
+HFC_init(void)
+{
+ int err;
+
+ err = pci_register_driver(&hfc_driver);
+ return err;
+}
+
+static void __exit
+HFC_cleanup(void)
+{
+ struct hfc_pci *card, *next;
+
+ list_for_each_entry_safe(card, next, &HFClist, list) {
+ release_card(card);
+ }
+ pci_unregister_driver(&hfc_driver);
+}
+
+module_init(HFC_init);
+module_exit(HFC_cleanup);
diff --git a/drivers/isdn/hisax/hisax_fcpcipnp.c b/drivers/isdn/hisax/hisax_fcpcipnp.c
index c0b4db2f8364..1925118122f8 100644
--- a/drivers/isdn/hisax/hisax_fcpcipnp.c
+++ b/drivers/isdn/hisax/hisax_fcpcipnp.c
@@ -974,8 +974,6 @@ static struct pnp_driver fcpnp_driver = {
.remove = __devexit_p(fcpnp_remove),
.id_table = fcpnp_ids,
};
-#else
-static struct pnp_driver fcpnp_driver;
#endif
static void __devexit fcpci_remove(struct pci_dev *pdev)
diff --git a/drivers/isdn/mISDN/Kconfig b/drivers/isdn/mISDN/Kconfig
new file mode 100644
index 000000000000..4938355c4072
--- /dev/null
+++ b/drivers/isdn/mISDN/Kconfig
@@ -0,0 +1,44 @@
+#
+# modularer ISDN driver
+#
+
+menuconfig MISDN
+ tristate "Modular ISDN driver"
+ help
+ Enable support for the modular ISDN driver.
+
+if MISDN != n
+
+config MISDN_DSP
+ tristate "Digital Audio Processing of transparent data"
+ depends on MISDN
+ help
+ Enable support for digital audio processing capability.
+ This module may be used for special applications that require
+ cross connecting of bchannels, conferencing, dtmf decoding
+ echo cancelation, tone generation, and Blowfish encryption and
+ decryption.
+ It may use hardware features if available.
+ E.g. it is required for PBX4Linux. Go to http://isdn.eversberg.eu
+ and get more informations about this module and it's usage.
+ If unsure, say 'N'.
+
+config MISDN_L1OIP
+ tristate "ISDN over IP tunnel"
+ depends on MISDN
+ help
+ Enable support for ISDN over IP tunnel.
+
+ It features:
+ - dynamic IP exchange, if one or both peers have dynamic IPs
+ - BRI (S0) and PRI (S2M) interface
+ - layer 1 control via network keepalive frames
+ - direct tunneling of physical interface via IP
+
+ NOTE: This protocol is called 'Layer 1 over IP' and is not
+ compatible with ISDNoIP (Agfeo) or TDMoIP. Protocol description is
+ provided in the source code.
+
+source "drivers/isdn/hardware/mISDN/Kconfig"
+
+endif #MISDN
diff --git a/drivers/isdn/mISDN/Makefile b/drivers/isdn/mISDN/Makefile
new file mode 100644
index 000000000000..1cb5e633cf75
--- /dev/null
+++ b/drivers/isdn/mISDN/Makefile
@@ -0,0 +1,13 @@
+#
+# Makefile for the modular ISDN driver
+#
+
+obj-$(CONFIG_MISDN) += mISDN_core.o
+obj-$(CONFIG_MISDN_DSP) += mISDN_dsp.o
+obj-$(CONFIG_MISDN_L1OIP) += l1oip.o
+
+# multi objects
+
+mISDN_core-objs := core.o fsm.o socket.o hwchannel.o stack.o layer1.o layer2.o tei.o timerdev.o
+mISDN_dsp-objs := dsp_core.o dsp_cmx.o dsp_tones.o dsp_dtmf.o dsp_audio.o dsp_blowfish.o dsp_pipeline.o dsp_hwec.o
+l1oip-objs := l1oip_core.o l1oip_codec.o
diff --git a/drivers/isdn/mISDN/core.c b/drivers/isdn/mISDN/core.c
new file mode 100644
index 000000000000..33068177b7c9
--- /dev/null
+++ b/drivers/isdn/mISDN/core.c
@@ -0,0 +1,244 @@
+/*
+ * Copyright 2008 by Karsten Keil <kkeil@novell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/stddef.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/mISDNif.h>
+#include "core.h"
+
+static u_int debug;
+
+MODULE_AUTHOR("Karsten Keil");
+MODULE_LICENSE("GPL");
+module_param(debug, uint, S_IRUGO | S_IWUSR);
+
+static LIST_HEAD(devices);
+DEFINE_RWLOCK(device_lock);
+static u64 device_ids;
+#define MAX_DEVICE_ID 63
+
+static LIST_HEAD(Bprotocols);
+DEFINE_RWLOCK(bp_lock);
+
+struct mISDNdevice
+*get_mdevice(u_int id)
+{
+ struct mISDNdevice *dev;
+
+ read_lock(&device_lock);
+ list_for_each_entry(dev, &devices, D.list)
+ if (dev->id == id) {
+ read_unlock(&device_lock);
+ return dev;
+ }
+ read_unlock(&device_lock);
+ return NULL;
+}
+
+int
+get_mdevice_count(void)
+{
+ struct mISDNdevice *dev;
+ int cnt = 0;
+
+ read_lock(&device_lock);
+ list_for_each_entry(dev, &devices, D.list)
+ cnt++;
+ read_unlock(&device_lock);
+ return cnt;
+}
+
+static int
+get_free_devid(void)
+{
+ u_int i;
+
+ for (i = 0; i <= MAX_DEVICE_ID; i++)
+ if (!test_and_set_bit(i, (u_long *)&device_ids))
+ return i;
+ return -1;
+}
+
+int
+mISDN_register_device(struct mISDNdevice *dev, char *name)
+{
+ u_long flags;
+ int err;
+
+ dev->id = get_free_devid();
+ if (dev->id < 0)
+ return -EBUSY;
+ if (name && name[0])
+ strcpy(dev->name, name);
+ else
+ sprintf(dev->name, "mISDN%d", dev->id);
+ if (debug & DEBUG_CORE)
+ printk(KERN_DEBUG "mISDN_register %s %d\n",
+ dev->name, dev->id);
+ err = create_stack(dev);
+ if (err)
+ return err;
+ write_lock_irqsave(&device_lock, flags);
+ list_add_tail(&dev->D.list, &devices);
+ write_unlock_irqrestore(&device_lock, flags);
+ return 0;
+}
+EXPORT_SYMBOL(mISDN_register_device);
+
+void
+mISDN_unregister_device(struct mISDNdevice *dev) {
+ u_long flags;
+
+ if (debug & DEBUG_CORE)
+ printk(KERN_DEBUG "mISDN_unregister %s %d\n",
+ dev->name, dev->id);
+ write_lock_irqsave(&device_lock, flags);
+ list_del(&dev->D.list);
+ write_unlock_irqrestore(&device_lock, flags);
+ test_and_clear_bit(dev->id, (u_long *)&device_ids);
+ delete_stack(dev);
+}
+EXPORT_SYMBOL(mISDN_unregister_device);
+
+u_int
+get_all_Bprotocols(void)
+{
+ struct Bprotocol *bp;
+ u_int m = 0;
+
+ read_lock(&bp_lock);
+ list_for_each_entry(bp, &Bprotocols, list)
+ m |= bp->Bprotocols;
+ read_unlock(&bp_lock);
+ return m;
+}
+
+struct Bprotocol *
+get_Bprotocol4mask(u_int m)
+{
+ struct Bprotocol *bp;
+
+ read_lock(&bp_lock);
+ list_for_each_entry(bp, &Bprotocols, list)
+ if (bp->Bprotocols & m) {
+ read_unlock(&bp_lock);
+ return bp;
+ }
+ read_unlock(&bp_lock);
+ return NULL;
+}
+
+struct Bprotocol *
+get_Bprotocol4id(u_int id)
+{
+ u_int m;
+
+ if (id < ISDN_P_B_START || id > 63) {
+ printk(KERN_WARNING "%s id not in range %d\n",
+ __func__, id);
+ return NULL;
+ }
+ m = 1 << (id & ISDN_P_B_MASK);
+ return get_Bprotocol4mask(m);
+}
+
+int
+mISDN_register_Bprotocol(struct Bprotocol *bp)
+{
+ u_long flags;
+ struct Bprotocol *old;
+
+ if (debug & DEBUG_CORE)
+ printk(KERN_DEBUG "%s: %s/%x\n", __func__,
+ bp->name, bp->Bprotocols);
+ old = get_Bprotocol4mask(bp->Bprotocols);
+ if (old) {
+ printk(KERN_WARNING
+ "register duplicate protocol old %s/%x new %s/%x\n",
+ old->name, old->Bprotocols, bp->name, bp->Bprotocols);
+ return -EBUSY;
+ }
+ write_lock_irqsave(&bp_lock, flags);
+ list_add_tail(&bp->list, &Bprotocols);
+ write_unlock_irqrestore(&bp_lock, flags);
+ return 0;
+}
+EXPORT_SYMBOL(mISDN_register_Bprotocol);
+
+void
+mISDN_unregister_Bprotocol(struct Bprotocol *bp)
+{
+ u_long flags;
+
+ if (debug & DEBUG_CORE)
+ printk(KERN_DEBUG "%s: %s/%x\n", __func__, bp->name,
+ bp->Bprotocols);
+ write_lock_irqsave(&bp_lock, flags);
+ list_del(&bp->list);
+ write_unlock_irqrestore(&bp_lock, flags);
+}
+EXPORT_SYMBOL(mISDN_unregister_Bprotocol);
+
+int
+mISDNInit(void)
+{
+ int err;
+
+ printk(KERN_INFO "Modular ISDN core version %d.%d.%d\n",
+ MISDN_MAJOR_VERSION, MISDN_MINOR_VERSION, MISDN_RELEASE);
+ mISDN_initstack(&debug);
+ err = mISDN_inittimer(&debug);
+ if (err)
+ goto error;
+ err = l1_init(&debug);
+ if (err) {
+ mISDN_timer_cleanup();
+ goto error;
+ }
+ err = Isdnl2_Init(&debug);
+ if (err) {
+ mISDN_timer_cleanup();
+ l1_cleanup();
+ goto error;
+ }
+ err = misdn_sock_init(&debug);
+ if (err) {
+ mISDN_timer_cleanup();
+ l1_cleanup();
+ Isdnl2_cleanup();
+ }
+error:
+ return err;
+}
+
+void mISDN_cleanup(void)
+{
+ misdn_sock_cleanup();
+ mISDN_timer_cleanup();
+ l1_cleanup();
+ Isdnl2_cleanup();
+
+ if (!list_empty(&devices))
+ printk(KERN_ERR "%s devices still registered\n", __func__);
+
+ if (!list_empty(&Bprotocols))
+ printk(KERN_ERR "%s Bprotocols still registered\n", __func__);
+ printk(KERN_DEBUG "mISDNcore unloaded\n");
+}
+
+module_init(mISDNInit);
+module_exit(mISDN_cleanup);
+
diff --git a/drivers/isdn/mISDN/core.h b/drivers/isdn/mISDN/core.h
new file mode 100644
index 000000000000..7da7233b4c1a
--- /dev/null
+++ b/drivers/isdn/mISDN/core.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2008 by Karsten Keil <kkeil@novell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef mISDN_CORE_H
+#define mISDN_CORE_H
+
+extern struct mISDNdevice *get_mdevice(u_int);
+extern int get_mdevice_count(void);
+
+/* stack status flag */
+#define mISDN_STACK_ACTION_MASK 0x0000ffff
+#define mISDN_STACK_COMMAND_MASK 0x000f0000
+#define mISDN_STACK_STATUS_MASK 0xfff00000
+/* action bits 0-15 */
+#define mISDN_STACK_WORK 0
+#define mISDN_STACK_SETUP 1
+#define mISDN_STACK_CLEARING 2
+#define mISDN_STACK_RESTART 3
+#define mISDN_STACK_WAKEUP 4
+#define mISDN_STACK_ABORT 15
+/* command bits 16-19 */
+#define mISDN_STACK_STOPPED 16
+#define mISDN_STACK_INIT 17
+#define mISDN_STACK_THREADSTART 18
+/* status bits 20-31 */
+#define mISDN_STACK_BCHANNEL 20
+#define mISDN_STACK_ACTIVE 29
+#define mISDN_STACK_RUNNING 30
+#define mISDN_STACK_KILLED 31
+
+
+/* manager options */
+#define MGR_OPT_USER 24
+#define MGR_OPT_NETWORK 25
+
+extern int connect_Bstack(struct mISDNdevice *, struct mISDNchannel *,
+ u_int, struct sockaddr_mISDN *);
+extern int connect_layer1(struct mISDNdevice *, struct mISDNchannel *,
+ u_int, struct sockaddr_mISDN *);
+extern int create_l2entity(struct mISDNdevice *, struct mISDNchannel *,
+ u_int, struct sockaddr_mISDN *);
+
+extern int create_stack(struct mISDNdevice *);
+extern int create_teimanager(struct mISDNdevice *);
+extern void delete_teimanager(struct mISDNchannel *);
+extern void delete_channel(struct mISDNchannel *);
+extern void delete_stack(struct mISDNdevice *);
+extern void mISDN_initstack(u_int *);
+extern int misdn_sock_init(u_int *);
+extern void misdn_sock_cleanup(void);
+extern void add_layer2(struct mISDNchannel *, struct mISDNstack *);
+extern void __add_layer2(struct mISDNchannel *, struct mISDNstack *);
+
+extern u_int get_all_Bprotocols(void);
+struct Bprotocol *get_Bprotocol4mask(u_int);
+struct Bprotocol *get_Bprotocol4id(u_int);
+
+extern int mISDN_inittimer(u_int *);
+extern void mISDN_timer_cleanup(void);
+
+extern int l1_init(u_int *);
+extern void l1_cleanup(void);
+extern int Isdnl2_Init(u_int *);
+extern void Isdnl2_cleanup(void);
+
+#endif
diff --git a/drivers/isdn/mISDN/dsp.h b/drivers/isdn/mISDN/dsp.h
new file mode 100644
index 000000000000..6c3fed6b8d4f
--- /dev/null
+++ b/drivers/isdn/mISDN/dsp.h
@@ -0,0 +1,263 @@
+/*
+ * Audio support data for ISDN4Linux.
+ *
+ * Copyright 2002/2003 by Andreas Eversberg (jolly@eversberg.eu)
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ */
+
+#define DEBUG_DSP_CTRL 0x0001
+#define DEBUG_DSP_CORE 0x0002
+#define DEBUG_DSP_DTMF 0x0004
+#define DEBUG_DSP_CMX 0x0010
+#define DEBUG_DSP_TONE 0x0020
+#define DEBUG_DSP_BLOWFISH 0x0040
+#define DEBUG_DSP_DELAY 0x0100
+#define DEBUG_DSP_DTMFCOEFF 0x8000 /* heavy output */
+
+/* options may be:
+ *
+ * bit 0 = use ulaw instead of alaw
+ * bit 1 = enable hfc hardware accelleration for all channels
+ *
+ */
+#define DSP_OPT_ULAW (1<<0)
+#define DSP_OPT_NOHARDWARE (1<<1)
+
+#include <linux/timer.h>
+#include <linux/workqueue.h>
+
+#include "dsp_ecdis.h"
+
+extern int dsp_options;
+extern int dsp_debug;
+extern int dsp_poll;
+extern int dsp_tics;
+extern spinlock_t dsp_lock;
+extern struct work_struct dsp_workq;
+extern u32 dsp_poll_diff; /* calculated fix-comma corrected poll value */
+
+/***************
+ * audio stuff *
+ ***************/
+
+extern s32 dsp_audio_alaw_to_s32[256];
+extern s32 dsp_audio_ulaw_to_s32[256];
+extern s32 *dsp_audio_law_to_s32;
+extern u8 dsp_audio_s16_to_law[65536];
+extern u8 dsp_audio_alaw_to_ulaw[256];
+extern u8 dsp_audio_mix_law[65536];
+extern u8 dsp_audio_seven2law[128];
+extern u8 dsp_audio_law2seven[256];
+extern void dsp_audio_generate_law_tables(void);
+extern void dsp_audio_generate_s2law_table(void);
+extern void dsp_audio_generate_seven(void);
+extern void dsp_audio_generate_mix_table(void);
+extern void dsp_audio_generate_ulaw_samples(void);
+extern void dsp_audio_generate_volume_changes(void);
+extern u8 dsp_silence;
+
+
+/*************
+ * cmx stuff *
+ *************/
+
+#define MAX_POLL 256 /* maximum number of send-chunks */
+
+#define CMX_BUFF_SIZE 0x8000 /* must be 2**n (0x1000 about 1/2 second) */
+#define CMX_BUFF_HALF 0x4000 /* CMX_BUFF_SIZE / 2 */
+#define CMX_BUFF_MASK 0x7fff /* CMX_BUFF_SIZE - 1 */
+
+/* how many seconds will we check the lowest delay until the jitter buffer
+ is reduced by that delay */
+#define MAX_SECONDS_JITTER_CHECK 5
+
+extern struct timer_list dsp_spl_tl;
+extern u32 dsp_spl_jiffies;
+
+/* the structure of conferences:
+ *
+ * each conference has a unique number, given by user space.
+ * the conferences are linked in a chain.
+ * each conference has members linked in a chain.
+ * each dsplayer points to a member, each member points to a dsplayer.
+ */
+
+/* all members within a conference (this is linked 1:1 with the dsp) */
+struct dsp;
+struct dsp_conf_member {
+ struct list_head list;
+ struct dsp *dsp;
+};
+
+/* the list of all conferences */
+struct dsp_conf {
+ struct list_head list;
+ u32 id;
+ /* all cmx stacks with the same ID are
+ connected */
+ struct list_head mlist;
+ int software; /* conf is processed by software */
+ int hardware; /* conf is processed by hardware */
+ /* note: if both unset, has only one member */
+};
+
+
+/**************
+ * DTMF stuff *
+ **************/
+
+#define DSP_DTMF_NPOINTS 102
+
+#define ECHOCAN_BUFLEN (4*128)
+
+struct dsp_dtmf {
+ int treshold; /* above this is dtmf (square of) */
+ int software; /* dtmf uses software decoding */
+ int hardware; /* dtmf uses hardware decoding */
+ int size; /* number of bytes in buffer */
+ signed short buffer[DSP_DTMF_NPOINTS];
+ /* buffers one full dtmf frame */
+ u8 lastwhat, lastdigit;
+ int count;
+ u8 digits[16]; /* just the dtmf result */
+};
+
+
+/******************
+ * pipeline stuff *
+ ******************/
+struct dsp_pipeline {
+ rwlock_t lock;
+ struct list_head list;
+ int inuse;
+};
+
+/***************
+ * tones stuff *
+ ***************/
+
+struct dsp_tone {
+ int software; /* tones are generated by software */
+ int hardware; /* tones are generated by hardware */
+ int tone;
+ void *pattern;
+ int count;
+ int index;
+ struct timer_list tl;
+};
+
+/*****************
+ * general stuff *
+ *****************/
+
+struct dsp {
+ struct list_head list;
+ struct mISDNchannel ch;
+ struct mISDNchannel *up;
+ unsigned char name[64];
+ int b_active;
+ int echo; /* echo is enabled */
+ int rx_disabled; /* what the user wants */
+ int rx_is_off; /* what the card is */
+ int tx_mix;
+ struct dsp_tone tone;
+ struct dsp_dtmf dtmf;
+ int tx_volume, rx_volume;
+
+ /* queue for sending frames */
+ struct work_struct workq;
+ struct sk_buff_head sendq;
+ int hdlc; /* if mode is hdlc */
+ int data_pending; /* currently an unconfirmed frame */
+
+ /* conference stuff */
+ u32 conf_id;
+ struct dsp_conf *conf;
+ struct dsp_conf_member
+ *member;
+
+ /* buffer stuff */
+ int rx_W; /* current write pos for data without timestamp */
+ int rx_R; /* current read pos for transmit clock */
+ int rx_init; /* if set, pointers will be adjusted first */
+ int tx_W; /* current write pos for transmit data */
+ int tx_R; /* current read pos for transmit clock */
+ int rx_delay[MAX_SECONDS_JITTER_CHECK];
+ int tx_delay[MAX_SECONDS_JITTER_CHECK];
+ u8 tx_buff[CMX_BUFF_SIZE];
+ u8 rx_buff[CMX_BUFF_SIZE];
+ int last_tx; /* if set, we transmitted last poll interval */
+ int cmx_delay; /* initial delay of buffers,
+ or 0 for dynamic jitter buffer */
+ int tx_dejitter; /* if set, dejitter tx buffer */
+ int tx_data; /* enables tx-data of CMX to upper layer */
+
+ /* hardware stuff */
+ struct dsp_features features;
+ int features_rx_off; /* set if rx_off is featured */
+ int pcm_slot_rx; /* current PCM slot (or -1) */
+ int pcm_bank_rx;
+ int pcm_slot_tx;
+ int pcm_bank_tx;
+ int hfc_conf; /* unique id of current conference (or -1) */
+
+ /* encryption stuff */
+ int bf_enable;
+ u32 bf_p[18];
+ u32 bf_s[1024];
+ int bf_crypt_pos;
+ u8 bf_data_in[9];
+ u8 bf_crypt_out[9];
+ int bf_decrypt_in_pos;
+ int bf_decrypt_out_pos;
+ u8 bf_crypt_inring[16];
+ u8 bf_data_out[9];
+ int bf_sync;
+
+ struct dsp_pipeline
+ pipeline;
+};
+
+/* functions */
+
+extern void dsp_change_volume(struct sk_buff *skb, int volume);
+
+extern struct list_head dsp_ilist;
+extern struct list_head conf_ilist;
+extern void dsp_cmx_debug(struct dsp *dsp);
+extern void dsp_cmx_hardware(struct dsp_conf *conf, struct dsp *dsp);
+extern int dsp_cmx_conf(struct dsp *dsp, u32 conf_id);
+extern void dsp_cmx_receive(struct dsp *dsp, struct sk_buff *skb);
+extern void dsp_cmx_hdlc(struct dsp *dsp, struct sk_buff *skb);
+extern void dsp_cmx_send(void *arg);
+extern void dsp_cmx_transmit(struct dsp *dsp, struct sk_buff *skb);
+extern int dsp_cmx_del_conf_member(struct dsp *dsp);
+extern int dsp_cmx_del_conf(struct dsp_conf *conf);
+
+extern void dsp_dtmf_goertzel_init(struct dsp *dsp);
+extern void dsp_dtmf_hardware(struct dsp *dsp);
+extern u8 *dsp_dtmf_goertzel_decode(struct dsp *dsp, u8 *data, int len,
+ int fmt);
+
+extern int dsp_tone(struct dsp *dsp, int tone);
+extern void dsp_tone_copy(struct dsp *dsp, u8 *data, int len);
+extern void dsp_tone_timeout(void *arg);
+
+extern void dsp_bf_encrypt(struct dsp *dsp, u8 *data, int len);
+extern void dsp_bf_decrypt(struct dsp *dsp, u8 *data, int len);
+extern int dsp_bf_init(struct dsp *dsp, const u8 *key, unsigned int keylen);
+extern void dsp_bf_cleanup(struct dsp *dsp);
+
+extern int dsp_pipeline_module_init(void);
+extern void dsp_pipeline_module_exit(void);
+extern int dsp_pipeline_init(struct dsp_pipeline *pipeline);
+extern void dsp_pipeline_destroy(struct dsp_pipeline *pipeline);
+extern int dsp_pipeline_build(struct dsp_pipeline *pipeline, const char *cfg);
+extern void dsp_pipeline_process_tx(struct dsp_pipeline *pipeline, u8 *data,
+ int len);
+extern void dsp_pipeline_process_rx(struct dsp_pipeline *pipeline, u8 *data,
+ int len);
+
diff --git a/drivers/isdn/mISDN/dsp_audio.c b/drivers/isdn/mISDN/dsp_audio.c
new file mode 100644
index 000000000000..1c2dd5694773
--- /dev/null
+++ b/drivers/isdn/mISDN/dsp_audio.c
@@ -0,0 +1,434 @@
+/*
+ * Audio support data for mISDN_dsp.
+ *
+ * Copyright 2002/2003 by Andreas Eversberg (jolly@eversberg.eu)
+ * Rewritten by Peter
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/mISDNif.h>
+#include <linux/mISDNdsp.h>
+#include "core.h"
+#include "dsp.h"
+
+/* ulaw[unsigned char] -> signed 16-bit */
+s32 dsp_audio_ulaw_to_s32[256];
+/* alaw[unsigned char] -> signed 16-bit */
+s32 dsp_audio_alaw_to_s32[256];
+
+s32 *dsp_audio_law_to_s32;
+EXPORT_SYMBOL(dsp_audio_law_to_s32);
+
+/* signed 16-bit -> law */
+u8 dsp_audio_s16_to_law[65536];
+EXPORT_SYMBOL(dsp_audio_s16_to_law);
+
+/* alaw -> ulaw */
+u8 dsp_audio_alaw_to_ulaw[256];
+/* ulaw -> alaw */
+u8 dsp_audio_ulaw_to_alaw[256];
+u8 dsp_silence;
+
+
+/*****************************************************
+ * generate table for conversion of s16 to alaw/ulaw *
+ *****************************************************/
+
+#define AMI_MASK 0x55
+
+static inline unsigned char linear2alaw(short int linear)
+{
+ int mask;
+ int seg;
+ int pcm_val;
+ static int seg_end[8] = {
+ 0xFF, 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF
+ };
+
+ pcm_val = linear;
+ if (pcm_val >= 0) {
+ /* Sign (7th) bit = 1 */
+ mask = AMI_MASK | 0x80;
+ } else {
+ /* Sign bit = 0 */
+ mask = AMI_MASK;
+ pcm_val = -pcm_val;
+ }
+
+ /* Convert the scaled magnitude to segment number. */
+ for (seg = 0; seg < 8; seg++) {
+ if (pcm_val <= seg_end[seg])
+ break;
+ }
+ /* Combine the sign, segment, and quantization bits. */
+ return ((seg << 4) |
+ ((pcm_val >> ((seg) ? (seg + 3) : 4)) & 0x0F)) ^ mask;
+}
+
+
+static inline short int alaw2linear(unsigned char alaw)
+{
+ int i;
+ int seg;
+
+ alaw ^= AMI_MASK;
+ i = ((alaw & 0x0F) << 4) + 8 /* rounding error */;
+ seg = (((int) alaw & 0x70) >> 4);
+ if (seg)
+ i = (i + 0x100) << (seg - 1);
+ return (short int) ((alaw & 0x80) ? i : -i);
+}
+
+static inline short int ulaw2linear(unsigned char ulaw)
+{
+ short mu, e, f, y;
+ static short etab[] = {0, 132, 396, 924, 1980, 4092, 8316, 16764};
+
+ mu = 255 - ulaw;
+ e = (mu & 0x70) / 16;
+ f = mu & 0x0f;
+ y = f * (1 << (e + 3));
+ y += etab[e];
+ if (mu & 0x80)
+ y = -y;
+ return y;
+}
+
+#define BIAS 0x84 /*!< define the add-in bias for 16 bit samples */
+
+static unsigned char linear2ulaw(short sample)
+{
+ static int exp_lut[256] = {
+ 0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3,
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+ 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+ 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+ 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+ 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+ 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7};
+ int sign, exponent, mantissa;
+ unsigned char ulawbyte;
+
+ /* Get the sample into sign-magnitude. */
+ sign = (sample >> 8) & 0x80; /* set aside the sign */
+ if (sign != 0)
+ sample = -sample; /* get magnitude */
+
+ /* Convert from 16 bit linear to ulaw. */
+ sample = sample + BIAS;
+ exponent = exp_lut[(sample >> 7) & 0xFF];
+ mantissa = (sample >> (exponent + 3)) & 0x0F;
+ ulawbyte = ~(sign | (exponent << 4) | mantissa);
+
+ return ulawbyte;
+}
+
+static int reverse_bits(int i)
+{
+ int z, j;
+ z = 0;
+
+ for (j = 0; j < 8; j++) {
+ if ((i & (1 << j)) != 0)
+ z |= 1 << (7 - j);
+ }
+ return z;
+}
+
+
+void dsp_audio_generate_law_tables(void)
+{
+ int i;
+ for (i = 0; i < 256; i++)
+ dsp_audio_alaw_to_s32[i] = alaw2linear(reverse_bits(i));
+
+ for (i = 0; i < 256; i++)
+ dsp_audio_ulaw_to_s32[i] = ulaw2linear(reverse_bits(i));
+
+ for (i = 0; i < 256; i++) {
+ dsp_audio_alaw_to_ulaw[i] =
+ linear2ulaw(dsp_audio_alaw_to_s32[i]);
+ dsp_audio_ulaw_to_alaw[i] =
+ linear2alaw(dsp_audio_ulaw_to_s32[i]);
+ }
+}
+
+void
+dsp_audio_generate_s2law_table(void)
+{
+ int i;
+
+ if (dsp_options & DSP_OPT_ULAW) {
+ /* generating ulaw-table */
+ for (i = -32768; i < 32768; i++) {
+ dsp_audio_s16_to_law[i & 0xffff] =
+ reverse_bits(linear2ulaw(i));
+ }
+ } else {
+ /* generating alaw-table */
+ for (i = -32768; i < 32768; i++) {
+ dsp_audio_s16_to_law[i & 0xffff] =
+ reverse_bits(linear2alaw(i));
+ }
+ }
+}
+
+
+/*
+ * the seven bit sample is the number of every second alaw-sample ordered by
+ * aplitude. 0x00 is negative, 0x7f is positive amplitude.
+ */
+u8 dsp_audio_seven2law[128];
+u8 dsp_audio_law2seven[256];
+
+/********************************************************************
+ * generate table for conversion law from/to 7-bit alaw-like sample *
+ ********************************************************************/
+
+void
+dsp_audio_generate_seven(void)
+{
+ int i, j, k;
+ u8 spl;
+ u8 sorted_alaw[256];
+
+ /* generate alaw table, sorted by the linear value */
+ for (i = 0; i < 256; i++) {
+ j = 0;
+ for (k = 0; k < 256; k++) {
+ if (dsp_audio_alaw_to_s32[k]
+ < dsp_audio_alaw_to_s32[i]) {
+ j++;
+ }
+ }
+ sorted_alaw[j] = i;
+ }
+
+ /* generate tabels */
+ for (i = 0; i < 256; i++) {
+ /* spl is the source: the law-sample (converted to alaw) */
+ spl = i;
+ if (dsp_options & DSP_OPT_ULAW)
+ spl = dsp_audio_ulaw_to_alaw[i];
+ /* find the 7-bit-sample */
+ for (j = 0; j < 256; j++) {
+ if (sorted_alaw[j] == spl)
+ break;
+ }
+ /* write 7-bit audio value */
+ dsp_audio_law2seven[i] = j >> 1;
+ }
+ for (i = 0; i < 128; i++) {
+ spl = sorted_alaw[i << 1];
+ if (dsp_options & DSP_OPT_ULAW)
+ spl = dsp_audio_alaw_to_ulaw[spl];
+ dsp_audio_seven2law[i] = spl;
+ }
+}
+
+
+/* mix 2*law -> law */
+u8 dsp_audio_mix_law[65536];
+
+/******************************************************
+ * generate mix table to mix two law samples into one *
+ ******************************************************/
+
+void
+dsp_audio_generate_mix_table(void)
+{
+ int i, j;
+ s32 sample;
+
+ i = 0;
+ while (i < 256) {
+ j = 0;
+ while (j < 256) {
+ sample = dsp_audio_law_to_s32[i];
+ sample += dsp_audio_law_to_s32[j];
+ if (sample > 32767)
+ sample = 32767;
+ if (sample < -32768)
+ sample = -32768;
+ dsp_audio_mix_law[(i<<8)|j] =
+ dsp_audio_s16_to_law[sample & 0xffff];
+ j++;
+ }
+ i++;
+ }
+}
+
+
+/*************************************
+ * generate different volume changes *
+ *************************************/
+
+static u8 dsp_audio_reduce8[256];
+static u8 dsp_audio_reduce7[256];
+static u8 dsp_audio_reduce6[256];
+static u8 dsp_audio_reduce5[256];
+static u8 dsp_audio_reduce4[256];
+static u8 dsp_audio_reduce3[256];
+static u8 dsp_audio_reduce2[256];
+static u8 dsp_audio_reduce1[256];
+static u8 dsp_audio_increase1[256];
+static u8 dsp_audio_increase2[256];
+static u8 dsp_audio_increase3[256];
+static u8 dsp_audio_increase4[256];
+static u8 dsp_audio_increase5[256];
+static u8 dsp_audio_increase6[256];
+static u8 dsp_audio_increase7[256];
+static u8 dsp_audio_increase8[256];
+
+static u8 *dsp_audio_volume_change[16] = {
+ dsp_audio_reduce8,
+ dsp_audio_reduce7,
+ dsp_audio_reduce6,
+ dsp_audio_reduce5,
+ dsp_audio_reduce4,
+ dsp_audio_reduce3,
+ dsp_audio_reduce2,
+ dsp_audio_reduce1,
+ dsp_audio_increase1,
+ dsp_audio_increase2,
+ dsp_audio_increase3,
+ dsp_audio_increase4,
+ dsp_audio_increase5,
+ dsp_audio_increase6,
+ dsp_audio_increase7,
+ dsp_audio_increase8,
+};
+
+void
+dsp_audio_generate_volume_changes(void)
+{
+ register s32 sample;
+ int i;
+ int num[] = { 110, 125, 150, 175, 200, 300, 400, 500 };
+ int denum[] = { 100, 100, 100, 100, 100, 100, 100, 100 };
+
+ i = 0;
+ while (i < 256) {
+ dsp_audio_reduce8[i] = dsp_audio_s16_to_law[
+ (dsp_audio_law_to_s32[i] * denum[7] / num[7]) & 0xffff];
+ dsp_audio_reduce7[i] = dsp_audio_s16_to_law[
+ (dsp_audio_law_to_s32[i] * denum[6] / num[6]) & 0xffff];
+ dsp_audio_reduce6[i] = dsp_audio_s16_to_law[
+ (dsp_audio_law_to_s32[i] * denum[5] / num[5]) & 0xffff];
+ dsp_audio_reduce5[i] = dsp_audio_s16_to_law[
+ (dsp_audio_law_to_s32[i] * denum[4] / num[4]) & 0xffff];
+ dsp_audio_reduce4[i] = dsp_audio_s16_to_law[
+ (dsp_audio_law_to_s32[i] * denum[3] / num[3]) & 0xffff];
+ dsp_audio_reduce3[i] = dsp_audio_s16_to_law[
+ (dsp_audio_law_to_s32[i] * denum[2] / num[2]) & 0xffff];
+ dsp_audio_reduce2[i] = dsp_audio_s16_to_law[
+ (dsp_audio_law_to_s32[i] * denum[1] / num[1]) & 0xffff];
+ dsp_audio_reduce1[i] = dsp_audio_s16_to_law[
+ (dsp_audio_law_to_s32[i] * denum[0] / num[0]) & 0xffff];
+ sample = dsp_audio_law_to_s32[i] * num[0] / denum[0];
+ if (sample < -32768)
+ sample = -32768;
+ else if (sample > 32767)
+ sample = 32767;
+ dsp_audio_increase1[i] = dsp_audio_s16_to_law[sample & 0xffff];
+ sample = dsp_audio_law_to_s32[i] * num[1] / denum[1];
+ if (sample < -32768)
+ sample = -32768;
+ else if (sample > 32767)
+ sample = 32767;
+ dsp_audio_increase2[i] = dsp_audio_s16_to_law[sample & 0xffff];
+ sample = dsp_audio_law_to_s32[i] * num[2] / denum[2];
+ if (sample < -32768)
+ sample = -32768;
+ else if (sample > 32767)
+ sample = 32767;
+ dsp_audio_increase3[i] = dsp_audio_s16_to_law[sample & 0xffff];
+ sample = dsp_audio_law_to_s32[i] * num[3] / denum[3];
+ if (sample < -32768)
+ sample = -32768;
+ else if (sample > 32767)
+ sample = 32767;
+ dsp_audio_increase4[i] = dsp_audio_s16_to_law[sample & 0xffff];
+ sample = dsp_audio_law_to_s32[i] * num[4] / denum[4];
+ if (sample < -32768)
+ sample = -32768;
+ else if (sample > 32767)
+ sample = 32767;
+ dsp_audio_increase5[i] = dsp_audio_s16_to_law[sample & 0xffff];
+ sample = dsp_audio_law_to_s32[i] * num[5] / denum[5];
+ if (sample < -32768)
+ sample = -32768;
+ else if (sample > 32767)
+ sample = 32767;
+ dsp_audio_increase6[i] = dsp_audio_s16_to_law[sample & 0xffff];
+ sample = dsp_audio_law_to_s32[i] * num[6] / denum[6];
+ if (sample < -32768)
+ sample = -32768;
+ else if (sample > 32767)
+ sample = 32767;
+ dsp_audio_increase7[i] = dsp_audio_s16_to_law[sample & 0xffff];
+ sample = dsp_audio_law_to_s32[i] * num[7] / denum[7];
+ if (sample < -32768)
+ sample = -32768;
+ else if (sample > 32767)
+ sample = 32767;
+ dsp_audio_increase8[i] = dsp_audio_s16_to_law[sample & 0xffff];
+
+ i++;
+ }
+}
+
+
+/**************************************
+ * change the volume of the given skb *
+ **************************************/
+
+/* this is a helper function for changing volume of skb. the range may be
+ * -8 to 8, which is a shift to the power of 2. 0 == no volume, 3 == volume*8
+ */
+void
+dsp_change_volume(struct sk_buff *skb, int volume)
+{
+ u8 *volume_change;
+ int i, ii;
+ u8 *p;
+ int shift;
+
+ if (volume == 0)
+ return;
+
+ /* get correct conversion table */
+ if (volume < 0) {
+ shift = volume + 8;
+ if (shift < 0)
+ shift = 0;
+ } else {
+ shift = volume + 7;
+ if (shift > 15)
+ shift = 15;
+ }
+ volume_change = dsp_audio_volume_change[shift];
+ i = 0;
+ ii = skb->len;
+ p = skb->data;
+ /* change volume */
+ while (i < ii) {
+ *p = volume_change[*p];
+ p++;
+ i++;
+ }
+}
+
diff --git a/drivers/isdn/mISDN/dsp_biquad.h b/drivers/isdn/mISDN/dsp_biquad.h
new file mode 100644
index 000000000000..038191bc45f5
--- /dev/null
+++ b/drivers/isdn/mISDN/dsp_biquad.h
@@ -0,0 +1,65 @@
+/*
+ * SpanDSP - a series of DSP components for telephony
+ *
+ * biquad.h - General telephony bi-quad section routines (currently this just
+ * handles canonic/type 2 form)
+ *
+ * Written by Steve Underwood <steveu@coppice.org>
+ *
+ * Copyright (C) 2001 Steve Underwood
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+struct biquad2_state {
+ int32_t gain;
+ int32_t a1;
+ int32_t a2;
+ int32_t b1;
+ int32_t b2;
+
+ int32_t z1;
+ int32_t z2;
+};
+
+static inline void biquad2_init(struct biquad2_state *bq,
+ int32_t gain, int32_t a1, int32_t a2, int32_t b1, int32_t b2)
+{
+ bq->gain = gain;
+ bq->a1 = a1;
+ bq->a2 = a2;
+ bq->b1 = b1;
+ bq->b2 = b2;
+
+ bq->z1 = 0;
+ bq->z2 = 0;
+}
+
+static inline int16_t biquad2(struct biquad2_state *bq, int16_t sample)
+{
+ int32_t y;
+ int32_t z0;
+
+ z0 = sample*bq->gain + bq->z1*bq->a1 + bq->z2*bq->a2;
+ y = z0 + bq->z1*bq->b1 + bq->z2*bq->b2;
+
+ bq->z2 = bq->z1;
+ bq->z1 = z0 >> 15;
+ y >>= 15;
+ return y;
+}
diff --git a/drivers/isdn/mISDN/dsp_blowfish.c b/drivers/isdn/mISDN/dsp_blowfish.c
new file mode 100644
index 000000000000..18e411e95bba
--- /dev/null
+++ b/drivers/isdn/mISDN/dsp_blowfish.c
@@ -0,0 +1,672 @@
+/*
+ * Blowfish encryption/decryption for mISDN_dsp.
+ *
+ * Copyright Andreas Eversberg (jolly@eversberg.eu)
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ */
+
+#include <linux/mISDNif.h>
+#include <linux/mISDNdsp.h>
+#include "core.h"
+#include "dsp.h"
+
+/*
+ * how to encode a sample stream to 64-bit blocks that will be encryped
+ *
+ * first of all, data is collected until a block of 9 samples are received.
+ * of course, a packet may have much more than 9 sample, but is may have
+ * not excacly the multiple of 9 samples. if there is a rest, the next
+ * received data will complete the block.
+ *
+ * the block is then converted to 9 uLAW samples without the least sigificant
+ * bit. the result is a 7-bit encoded sample.
+ *
+ * the samples will be reoganised to form 8 bytes of data:
+ * (5(6) means: encoded sample no. 5, bit 6)
+ *
+ * 0(6) 0(5) 0(4) 0(3) 0(2) 0(1) 0(0) 1(6)
+ * 1(5) 1(4) 1(3) 1(2) 1(1) 1(0) 2(6) 2(5)
+ * 2(4) 2(3) 2(2) 2(1) 2(0) 3(6) 3(5) 3(4)
+ * 3(3) 3(2) 3(1) 3(0) 4(6) 4(5) 4(4) 4(3)
+ * 4(2) 4(1) 4(0) 5(6) 5(5) 5(4) 5(3) 5(2)
+ * 5(1) 5(0) 6(6) 6(5) 6(4) 6(3) 6(2) 6(1)
+ * 6(0) 7(6) 7(5) 7(4) 7(3) 7(2) 7(1) 7(0)
+ * 8(6) 8(5) 8(4) 8(3) 8(2) 8(1) 8(0)
+ *
+ * the missing bit 0 of the last byte is filled with some
+ * random noise, to fill all 8 bytes.
+ *
+ * the 8 bytes will be encrypted using blowfish.
+ *
+ * the result will be converted into 9 bytes. the bit 7 is used for
+ * checksumme (CS) for sync (0, 1) and for the last bit:
+ * (5(6) means: crypted byte 5, bit 6)
+ *
+ * 1 0(7) 0(6) 0(5) 0(4) 0(3) 0(2) 0(1)
+ * 0 0(0) 1(7) 1(6) 1(5) 1(4) 1(3) 1(2)
+ * 0 1(1) 1(0) 2(7) 2(6) 2(5) 2(4) 2(3)
+ * 0 2(2) 2(1) 2(0) 3(7) 3(6) 3(5) 3(4)
+ * 0 3(3) 3(2) 3(1) 3(0) 4(7) 4(6) 4(5)
+ * CS 4(4) 4(3) 4(2) 4(1) 4(0) 5(7) 5(6)
+ * CS 5(5) 5(4) 5(3) 5(2) 5(1) 5(0) 6(7)
+ * CS 6(6) 6(5) 6(4) 6(3) 6(2) 6(1) 6(0)
+ * 7(0) 7(6) 7(5) 7(4) 7(3) 7(2) 7(1) 7(0)
+ *
+ * the checksum is used to detect transmission errors and frame drops.
+ *
+ * synchronisation of received block is done by shifting the upper bit of each
+ * byte (bit 7) to a shift register. if the rigister has the first five bits
+ * (10000), this is used to find the sync. only if sync has been found, the
+ * current block of 9 received bytes are decrypted. before that the check
+ * sum is calculated. if it is incorrect the block is dropped.
+ * this will avoid loud noise due to corrupt encrypted data.
+ *
+ * if the last block is corrupt, the current decoded block is repeated
+ * until a valid block has been received.
+ */
+
+/*
+ * some blowfish parts are taken from the
+ * crypto-api for faster implementation
+ */
+
+struct bf_ctx {
+ u32 p[18];
+ u32 s[1024];
+};
+
+static const u32 bf_pbox[16 + 2] = {
+ 0x243f6a88, 0x85a308d3, 0x13198a2e, 0x03707344,
+ 0xa4093822, 0x299f31d0, 0x082efa98, 0xec4e6c89,
+ 0x452821e6, 0x38d01377, 0xbe5466cf, 0x34e90c6c,
+ 0xc0ac29b7, 0xc97c50dd, 0x3f84d5b5, 0xb5470917,
+ 0x9216d5d9, 0x8979fb1b,
+};
+
+static const u32 bf_sbox[256 * 4] = {
+ 0xd1310ba6, 0x98dfb5ac, 0x2ffd72db, 0xd01adfb7,
+ 0xb8e1afed, 0x6a267e96, 0xba7c9045, 0xf12c7f99,
+ 0x24a19947, 0xb3916cf7, 0x0801f2e2, 0x858efc16,
+ 0x636920d8, 0x71574e69, 0xa458fea3, 0xf4933d7e,
+ 0x0d95748f, 0x728eb658, 0x718bcd58, 0x82154aee,
+ 0x7b54a41d, 0xc25a59b5, 0x9c30d539, 0x2af26013,
+ 0xc5d1b023, 0x286085f0, 0xca417918, 0xb8db38ef,
+ 0x8e79dcb0, 0x603a180e, 0x6c9e0e8b, 0xb01e8a3e,
+ 0xd71577c1, 0xbd314b27, 0x78af2fda, 0x55605c60,
+ 0xe65525f3, 0xaa55ab94, 0x57489862, 0x63e81440,
+ 0x55ca396a, 0x2aab10b6, 0xb4cc5c34, 0x1141e8ce,
+ 0xa15486af, 0x7c72e993, 0xb3ee1411, 0x636fbc2a,
+ 0x2ba9c55d, 0x741831f6, 0xce5c3e16, 0x9b87931e,
+ 0xafd6ba33, 0x6c24cf5c, 0x7a325381, 0x28958677,
+ 0x3b8f4898, 0x6b4bb9af, 0xc4bfe81b, 0x66282193,
+ 0x61d809cc, 0xfb21a991, 0x487cac60, 0x5dec8032,
+ 0xef845d5d, 0xe98575b1, 0xdc262302, 0xeb651b88,
+ 0x23893e81, 0xd396acc5, 0x0f6d6ff3, 0x83f44239,
+ 0x2e0b4482, 0xa4842004, 0x69c8f04a, 0x9e1f9b5e,
+ 0x21c66842, 0xf6e96c9a, 0x670c9c61, 0xabd388f0,
+ 0x6a51a0d2, 0xd8542f68, 0x960fa728, 0xab5133a3,
+ 0x6eef0b6c, 0x137a3be4, 0xba3bf050, 0x7efb2a98,
+ 0xa1f1651d, 0x39af0176, 0x66ca593e, 0x82430e88,
+ 0x8cee8619, 0x456f9fb4, 0x7d84a5c3, 0x3b8b5ebe,
+ 0xe06f75d8, 0x85c12073, 0x401a449f, 0x56c16aa6,
+ 0x4ed3aa62, 0x363f7706, 0x1bfedf72, 0x429b023d,
+ 0x37d0d724, 0xd00a1248, 0xdb0fead3, 0x49f1c09b,
+ 0x075372c9, 0x80991b7b, 0x25d479d8, 0xf6e8def7,
+ 0xe3fe501a, 0xb6794c3b, 0x976ce0bd, 0x04c006ba,
+ 0xc1a94fb6, 0x409f60c4, 0x5e5c9ec2, 0x196a2463,
+ 0x68fb6faf, 0x3e6c53b5, 0x1339b2eb, 0x3b52ec6f,
+ 0x6dfc511f, 0x9b30952c, 0xcc814544, 0xaf5ebd09,
+ 0xbee3d004, 0xde334afd, 0x660f2807, 0x192e4bb3,
+ 0xc0cba857, 0x45c8740f, 0xd20b5f39, 0xb9d3fbdb,
+ 0x5579c0bd, 0x1a60320a, 0xd6a100c6, 0x402c7279,
+ 0x679f25fe, 0xfb1fa3cc, 0x8ea5e9f8, 0xdb3222f8,
+ 0x3c7516df, 0xfd616b15, 0x2f501ec8, 0xad0552ab,
+ 0x323db5fa, 0xfd238760, 0x53317b48, 0x3e00df82,
+ 0x9e5c57bb, 0xca6f8ca0, 0x1a87562e, 0xdf1769db,
+ 0xd542a8f6, 0x287effc3, 0xac6732c6, 0x8c4f5573,
+ 0x695b27b0, 0xbbca58c8, 0xe1ffa35d, 0xb8f011a0,
+ 0x10fa3d98, 0xfd2183b8, 0x4afcb56c, 0x2dd1d35b,
+ 0x9a53e479, 0xb6f84565, 0xd28e49bc, 0x4bfb9790,
+ 0xe1ddf2da, 0xa4cb7e33, 0x62fb1341, 0xcee4c6e8,
+ 0xef20cada, 0x36774c01, 0xd07e9efe, 0x2bf11fb4,
+ 0x95dbda4d, 0xae909198, 0xeaad8e71, 0x6b93d5a0,
+ 0xd08ed1d0, 0xafc725e0, 0x8e3c5b2f, 0x8e7594b7,
+ 0x8ff6e2fb, 0xf2122b64, 0x8888b812, 0x900df01c,
+ 0x4fad5ea0, 0x688fc31c, 0xd1cff191, 0xb3a8c1ad,
+ 0x2f2f2218, 0xbe0e1777, 0xea752dfe, 0x8b021fa1,
+ 0xe5a0cc0f, 0xb56f74e8, 0x18acf3d6, 0xce89e299,
+ 0xb4a84fe0, 0xfd13e0b7, 0x7cc43b81, 0xd2ada8d9,
+ 0x165fa266, 0x80957705, 0x93cc7314, 0x211a1477,
+ 0xe6ad2065, 0x77b5fa86, 0xc75442f5, 0xfb9d35cf,
+ 0xebcdaf0c, 0x7b3e89a0, 0xd6411bd3, 0xae1e7e49,
+ 0x00250e2d, 0x2071b35e, 0x226800bb, 0x57b8e0af,
+ 0x2464369b, 0xf009b91e, 0x5563911d, 0x59dfa6aa,
+ 0x78c14389, 0xd95a537f, 0x207d5ba2, 0x02e5b9c5,
+ 0x83260376, 0x6295cfa9, 0x11c81968, 0x4e734a41,
+ 0xb3472dca, 0x7b14a94a, 0x1b510052, 0x9a532915,
+ 0xd60f573f, 0xbc9bc6e4, 0x2b60a476, 0x81e67400,
+ 0x08ba6fb5, 0x571be91f, 0xf296ec6b, 0x2a0dd915,
+ 0xb6636521, 0xe7b9f9b6, 0xff34052e, 0xc5855664,
+ 0x53b02d5d, 0xa99f8fa1, 0x08ba4799, 0x6e85076a,
+ 0x4b7a70e9, 0xb5b32944, 0xdb75092e, 0xc4192623,
+ 0xad6ea6b0, 0x49a7df7d, 0x9cee60b8, 0x8fedb266,
+ 0xecaa8c71, 0x699a17ff, 0x5664526c, 0xc2b19ee1,
+ 0x193602a5, 0x75094c29, 0xa0591340, 0xe4183a3e,
+ 0x3f54989a, 0x5b429d65, 0x6b8fe4d6, 0x99f73fd6,
+ 0xa1d29c07, 0xefe830f5, 0x4d2d38e6, 0xf0255dc1,
+ 0x4cdd2086, 0x8470eb26, 0x6382e9c6, 0x021ecc5e,
+ 0x09686b3f, 0x3ebaefc9, 0x3c971814, 0x6b6a70a1,
+ 0x687f3584, 0x52a0e286, 0xb79c5305, 0xaa500737,
+ 0x3e07841c, 0x7fdeae5c, 0x8e7d44ec, 0x5716f2b8,
+ 0xb03ada37, 0xf0500c0d, 0xf01c1f04, 0x0200b3ff,
+ 0xae0cf51a, 0x3cb574b2, 0x25837a58, 0xdc0921bd,
+ 0xd19113f9, 0x7ca92ff6, 0x94324773, 0x22f54701,
+ 0x3ae5e581, 0x37c2dadc, 0xc8b57634, 0x9af3dda7,
+ 0xa9446146, 0x0fd0030e, 0xecc8c73e, 0xa4751e41,
+ 0xe238cd99, 0x3bea0e2f, 0x3280bba1, 0x183eb331,
+ 0x4e548b38, 0x4f6db908, 0x6f420d03, 0xf60a04bf,
+ 0x2cb81290, 0x24977c79, 0x5679b072, 0xbcaf89af,
+ 0xde9a771f, 0xd9930810, 0xb38bae12, 0xdccf3f2e,
+ 0x5512721f, 0x2e6b7124, 0x501adde6, 0x9f84cd87,
+ 0x7a584718, 0x7408da17, 0xbc9f9abc, 0xe94b7d8c,
+ 0xec7aec3a, 0xdb851dfa, 0x63094366, 0xc464c3d2,
+ 0xef1c1847, 0x3215d908, 0xdd433b37, 0x24c2ba16,
+ 0x12a14d43, 0x2a65c451, 0x50940002, 0x133ae4dd,
+ 0x71dff89e, 0x10314e55, 0x81ac77d6, 0x5f11199b,
+ 0x043556f1, 0xd7a3c76b, 0x3c11183b, 0x5924a509,
+ 0xf28fe6ed, 0x97f1fbfa, 0x9ebabf2c, 0x1e153c6e,
+ 0x86e34570, 0xeae96fb1, 0x860e5e0a, 0x5a3e2ab3,
+ 0x771fe71c, 0x4e3d06fa, 0x2965dcb9, 0x99e71d0f,
+ 0x803e89d6, 0x5266c825, 0x2e4cc978, 0x9c10b36a,
+ 0xc6150eba, 0x94e2ea78, 0xa5fc3c53, 0x1e0a2df4,
+ 0xf2f74ea7, 0x361d2b3d, 0x1939260f, 0x19c27960,
+ 0x5223a708, 0xf71312b6, 0xebadfe6e, 0xeac31f66,
+ 0xe3bc4595, 0xa67bc883, 0xb17f37d1, 0x018cff28,
+ 0xc332ddef, 0xbe6c5aa5, 0x65582185, 0x68ab9802,
+ 0xeecea50f, 0xdb2f953b, 0x2aef7dad, 0x5b6e2f84,
+ 0x1521b628, 0x29076170, 0xecdd4775, 0x619f1510,
+ 0x13cca830, 0xeb61bd96, 0x0334fe1e, 0xaa0363cf,
+ 0xb5735c90, 0x4c70a239, 0xd59e9e0b, 0xcbaade14,
+ 0xeecc86bc, 0x60622ca7, 0x9cab5cab, 0xb2f3846e,
+ 0x648b1eaf, 0x19bdf0ca, 0xa02369b9, 0x655abb50,
+ 0x40685a32, 0x3c2ab4b3, 0x319ee9d5, 0xc021b8f7,
+ 0x9b540b19, 0x875fa099, 0x95f7997e, 0x623d7da8,
+ 0xf837889a, 0x97e32d77, 0x11ed935f, 0x16681281,
+ 0x0e358829, 0xc7e61fd6, 0x96dedfa1, 0x7858ba99,
+ 0x57f584a5, 0x1b227263, 0x9b83c3ff, 0x1ac24696,
+ 0xcdb30aeb, 0x532e3054, 0x8fd948e4, 0x6dbc3128,
+ 0x58ebf2ef, 0x34c6ffea, 0xfe28ed61, 0xee7c3c73,
+ 0x5d4a14d9, 0xe864b7e3, 0x42105d14, 0x203e13e0,
+ 0x45eee2b6, 0xa3aaabea, 0xdb6c4f15, 0xfacb4fd0,
+ 0xc742f442, 0xef6abbb5, 0x654f3b1d, 0x41cd2105,
+ 0xd81e799e, 0x86854dc7, 0xe44b476a, 0x3d816250,
+ 0xcf62a1f2, 0x5b8d2646, 0xfc8883a0, 0xc1c7b6a3,
+ 0x7f1524c3, 0x69cb7492, 0x47848a0b, 0x5692b285,
+ 0x095bbf00, 0xad19489d, 0x1462b174, 0x23820e00,
+ 0x58428d2a, 0x0c55f5ea, 0x1dadf43e, 0x233f7061,
+ 0x3372f092, 0x8d937e41, 0xd65fecf1, 0x6c223bdb,
+ 0x7cde3759, 0xcbee7460, 0x4085f2a7, 0xce77326e,
+ 0xa6078084, 0x19f8509e, 0xe8efd855, 0x61d99735,
+ 0xa969a7aa, 0xc50c06c2, 0x5a04abfc, 0x800bcadc,
+ 0x9e447a2e, 0xc3453484, 0xfdd56705, 0x0e1e9ec9,
+ 0xdb73dbd3, 0x105588cd, 0x675fda79, 0xe3674340,
+ 0xc5c43465, 0x713e38d8, 0x3d28f89e, 0xf16dff20,
+ 0x153e21e7, 0x8fb03d4a, 0xe6e39f2b, 0xdb83adf7,
+ 0xe93d5a68, 0x948140f7, 0xf64c261c, 0x94692934,
+ 0x411520f7, 0x7602d4f7, 0xbcf46b2e, 0xd4a20068,
+ 0xd4082471, 0x3320f46a, 0x43b7d4b7, 0x500061af,
+ 0x1e39f62e, 0x97244546, 0x14214f74, 0xbf8b8840,
+ 0x4d95fc1d, 0x96b591af, 0x70f4ddd3, 0x66a02f45,
+ 0xbfbc09ec, 0x03bd9785, 0x7fac6dd0, 0x31cb8504,
+ 0x96eb27b3, 0x55fd3941, 0xda2547e6, 0xabca0a9a,
+ 0x28507825, 0x530429f4, 0x0a2c86da, 0xe9b66dfb,
+ 0x68dc1462, 0xd7486900, 0x680ec0a4, 0x27a18dee,
+ 0x4f3ffea2, 0xe887ad8c, 0xb58ce006, 0x7af4d6b6,
+ 0xaace1e7c, 0xd3375fec, 0xce78a399, 0x406b2a42,
+ 0x20fe9e35, 0xd9f385b9, 0xee39d7ab, 0x3b124e8b,
+ 0x1dc9faf7, 0x4b6d1856, 0x26a36631, 0xeae397b2,
+ 0x3a6efa74, 0xdd5b4332, 0x6841e7f7, 0xca7820fb,
+ 0xfb0af54e, 0xd8feb397, 0x454056ac, 0xba489527,
+ 0x55533a3a, 0x20838d87, 0xfe6ba9b7, 0xd096954b,
+ 0x55a867bc, 0xa1159a58, 0xcca92963, 0x99e1db33,
+ 0xa62a4a56, 0x3f3125f9, 0x5ef47e1c, 0x9029317c,
+ 0xfdf8e802, 0x04272f70, 0x80bb155c, 0x05282ce3,
+ 0x95c11548, 0xe4c66d22, 0x48c1133f, 0xc70f86dc,
+ 0x07f9c9ee, 0x41041f0f, 0x404779a4, 0x5d886e17,
+ 0x325f51eb, 0xd59bc0d1, 0xf2bcc18f, 0x41113564,
+ 0x257b7834, 0x602a9c60, 0xdff8e8a3, 0x1f636c1b,
+ 0x0e12b4c2, 0x02e1329e, 0xaf664fd1, 0xcad18115,
+ 0x6b2395e0, 0x333e92e1, 0x3b240b62, 0xeebeb922,
+ 0x85b2a20e, 0xe6ba0d99, 0xde720c8c, 0x2da2f728,
+ 0xd0127845, 0x95b794fd, 0x647d0862, 0xe7ccf5f0,
+ 0x5449a36f, 0x877d48fa, 0xc39dfd27, 0xf33e8d1e,
+ 0x0a476341, 0x992eff74, 0x3a6f6eab, 0xf4f8fd37,
+ 0xa812dc60, 0xa1ebddf8, 0x991be14c, 0xdb6e6b0d,
+ 0xc67b5510, 0x6d672c37, 0x2765d43b, 0xdcd0e804,
+ 0xf1290dc7, 0xcc00ffa3, 0xb5390f92, 0x690fed0b,
+ 0x667b9ffb, 0xcedb7d9c, 0xa091cf0b, 0xd9155ea3,
+ 0xbb132f88, 0x515bad24, 0x7b9479bf, 0x763bd6eb,
+ 0x37392eb3, 0xcc115979, 0x8026e297, 0xf42e312d,
+ 0x6842ada7, 0xc66a2b3b, 0x12754ccc, 0x782ef11c,
+ 0x6a124237, 0xb79251e7, 0x06a1bbe6, 0x4bfb6350,
+ 0x1a6b1018, 0x11caedfa, 0x3d25bdd8, 0xe2e1c3c9,
+ 0x44421659, 0x0a121386, 0xd90cec6e, 0xd5abea2a,
+ 0x64af674e, 0xda86a85f, 0xbebfe988, 0x64e4c3fe,
+ 0x9dbc8057, 0xf0f7c086, 0x60787bf8, 0x6003604d,
+ 0xd1fd8346, 0xf6381fb0, 0x7745ae04, 0xd736fccc,
+ 0x83426b33, 0xf01eab71, 0xb0804187, 0x3c005e5f,
+ 0x77a057be, 0xbde8ae24, 0x55464299, 0xbf582e61,
+ 0x4e58f48f, 0xf2ddfda2, 0xf474ef38, 0x8789bdc2,
+ 0x5366f9c3, 0xc8b38e74, 0xb475f255, 0x46fcd9b9,
+ 0x7aeb2661, 0x8b1ddf84, 0x846a0e79, 0x915f95e2,
+ 0x466e598e, 0x20b45770, 0x8cd55591, 0xc902de4c,
+ 0xb90bace1, 0xbb8205d0, 0x11a86248, 0x7574a99e,
+ 0xb77f19b6, 0xe0a9dc09, 0x662d09a1, 0xc4324633,
+ 0xe85a1f02, 0x09f0be8c, 0x4a99a025, 0x1d6efe10,
+ 0x1ab93d1d, 0x0ba5a4df, 0xa186f20f, 0x2868f169,
+ 0xdcb7da83, 0x573906fe, 0xa1e2ce9b, 0x4fcd7f52,
+ 0x50115e01, 0xa70683fa, 0xa002b5c4, 0x0de6d027,
+ 0x9af88c27, 0x773f8641, 0xc3604c06, 0x61a806b5,
+ 0xf0177a28, 0xc0f586e0, 0x006058aa, 0x30dc7d62,
+ 0x11e69ed7, 0x2338ea63, 0x53c2dd94, 0xc2c21634,
+ 0xbbcbee56, 0x90bcb6de, 0xebfc7da1, 0xce591d76,
+ 0x6f05e409, 0x4b7c0188, 0x39720a3d, 0x7c927c24,
+ 0x86e3725f, 0x724d9db9, 0x1ac15bb4, 0xd39eb8fc,
+ 0xed545578, 0x08fca5b5, 0xd83d7cd3, 0x4dad0fc4,
+ 0x1e50ef5e, 0xb161e6f8, 0xa28514d9, 0x6c51133c,
+ 0x6fd5c7e7, 0x56e14ec4, 0x362abfce, 0xddc6c837,
+ 0xd79a3234, 0x92638212, 0x670efa8e, 0x406000e0,
+ 0x3a39ce37, 0xd3faf5cf, 0xabc27737, 0x5ac52d1b,
+ 0x5cb0679e, 0x4fa33742, 0xd3822740, 0x99bc9bbe,
+ 0xd5118e9d, 0xbf0f7315, 0xd62d1c7e, 0xc700c47b,
+ 0xb78c1b6b, 0x21a19045, 0xb26eb1be, 0x6a366eb4,
+ 0x5748ab2f, 0xbc946e79, 0xc6a376d2, 0x6549c2c8,
+ 0x530ff8ee, 0x468dde7d, 0xd5730a1d, 0x4cd04dc6,
+ 0x2939bbdb, 0xa9ba4650, 0xac9526e8, 0xbe5ee304,
+ 0xa1fad5f0, 0x6a2d519a, 0x63ef8ce2, 0x9a86ee22,
+ 0xc089c2b8, 0x43242ef6, 0xa51e03aa, 0x9cf2d0a4,
+ 0x83c061ba, 0x9be96a4d, 0x8fe51550, 0xba645bd6,
+ 0x2826a2f9, 0xa73a3ae1, 0x4ba99586, 0xef5562e9,
+ 0xc72fefd3, 0xf752f7da, 0x3f046f69, 0x77fa0a59,
+ 0x80e4a915, 0x87b08601, 0x9b09e6ad, 0x3b3ee593,
+ 0xe990fd5a, 0x9e34d797, 0x2cf0b7d9, 0x022b8b51,
+ 0x96d5ac3a, 0x017da67d, 0xd1cf3ed6, 0x7c7d2d28,
+ 0x1f9f25cf, 0xadf2b89b, 0x5ad6b472, 0x5a88f54c,
+ 0xe029ac71, 0xe019a5e6, 0x47b0acfd, 0xed93fa9b,
+ 0xe8d3c48d, 0x283b57cc, 0xf8d56629, 0x79132e28,
+ 0x785f0191, 0xed756055, 0xf7960e44, 0xe3d35e8c,
+ 0x15056dd4, 0x88f46dba, 0x03a16125, 0x0564f0bd,
+ 0xc3eb9e15, 0x3c9057a2, 0x97271aec, 0xa93a072a,
+ 0x1b3f6d9b, 0x1e6321f5, 0xf59c66fb, 0x26dcf319,
+ 0x7533d928, 0xb155fdf5, 0x03563482, 0x8aba3cbb,
+ 0x28517711, 0xc20ad9f8, 0xabcc5167, 0xccad925f,
+ 0x4de81751, 0x3830dc8e, 0x379d5862, 0x9320f991,
+ 0xea7a90c2, 0xfb3e7bce, 0x5121ce64, 0x774fbe32,
+ 0xa8b6e37e, 0xc3293d46, 0x48de5369, 0x6413e680,
+ 0xa2ae0810, 0xdd6db224, 0x69852dfd, 0x09072166,
+ 0xb39a460a, 0x6445c0dd, 0x586cdecf, 0x1c20c8ae,
+ 0x5bbef7dd, 0x1b588d40, 0xccd2017f, 0x6bb4e3bb,
+ 0xdda26a7e, 0x3a59ff45, 0x3e350a44, 0xbcb4cdd5,
+ 0x72eacea8, 0xfa6484bb, 0x8d6612ae, 0xbf3c6f47,
+ 0xd29be463, 0x542f5d9e, 0xaec2771b, 0xf64e6370,
+ 0x740e0d8d, 0xe75b1357, 0xf8721671, 0xaf537d5d,
+ 0x4040cb08, 0x4eb4e2cc, 0x34d2466a, 0x0115af84,
+ 0xe1b00428, 0x95983a1d, 0x06b89fb4, 0xce6ea048,
+ 0x6f3f3b82, 0x3520ab82, 0x011a1d4b, 0x277227f8,
+ 0x611560b1, 0xe7933fdc, 0xbb3a792b, 0x344525bd,
+ 0xa08839e1, 0x51ce794b, 0x2f32c9b7, 0xa01fbac9,
+ 0xe01cc87e, 0xbcc7d1f6, 0xcf0111c3, 0xa1e8aac7,
+ 0x1a908749, 0xd44fbd9a, 0xd0dadecb, 0xd50ada38,
+ 0x0339c32a, 0xc6913667, 0x8df9317c, 0xe0b12b4f,
+ 0xf79e59b7, 0x43f5bb3a, 0xf2d519ff, 0x27d9459c,
+ 0xbf97222c, 0x15e6fc2a, 0x0f91fc71, 0x9b941525,
+ 0xfae59361, 0xceb69ceb, 0xc2a86459, 0x12baa8d1,
+ 0xb6c1075e, 0xe3056a0c, 0x10d25065, 0xcb03a442,
+ 0xe0ec6e0e, 0x1698db3b, 0x4c98a0be, 0x3278e964,
+ 0x9f1f9532, 0xe0d392df, 0xd3a0342b, 0x8971f21e,
+ 0x1b0a7441, 0x4ba3348c, 0xc5be7120, 0xc37632d8,
+ 0xdf359f8d, 0x9b992f2e, 0xe60b6f47, 0x0fe3f11d,
+ 0xe54cda54, 0x1edad891, 0xce6279cf, 0xcd3e7e6f,
+ 0x1618b166, 0xfd2c1d05, 0x848fd2c5, 0xf6fb2299,
+ 0xf523f357, 0xa6327623, 0x93a83531, 0x56cccd02,
+ 0xacf08162, 0x5a75ebb5, 0x6e163697, 0x88d273cc,
+ 0xde966292, 0x81b949d0, 0x4c50901b, 0x71c65614,
+ 0xe6c6c7bd, 0x327a140a, 0x45e1d006, 0xc3f27b9a,
+ 0xc9aa53fd, 0x62a80f00, 0xbb25bfe2, 0x35bdd2f6,
+ 0x71126905, 0xb2040222, 0xb6cbcf7c, 0xcd769c2b,
+ 0x53113ec0, 0x1640e3d3, 0x38abbd60, 0x2547adf0,
+ 0xba38209c, 0xf746ce76, 0x77afa1c5, 0x20756060,
+ 0x85cbfe4e, 0x8ae88dd8, 0x7aaaf9b0, 0x4cf9aa7e,
+ 0x1948c25c, 0x02fb8a8c, 0x01c36ae4, 0xd6ebe1f9,
+ 0x90d4f869, 0xa65cdea0, 0x3f09252d, 0xc208e69f,
+ 0xb74e6132, 0xce77e25b, 0x578fdfe3, 0x3ac372e6,
+};
+
+/*
+ * Round loop unrolling macros, S is a pointer to a S-Box array
+ * organized in 4 unsigned longs at a row.
+ */
+#define GET32_3(x) (((x) & 0xff))
+#define GET32_2(x) (((x) >> (8)) & (0xff))
+#define GET32_1(x) (((x) >> (16)) & (0xff))
+#define GET32_0(x) (((x) >> (24)) & (0xff))
+
+#define bf_F(x) (((S[GET32_0(x)] + S[256 + GET32_1(x)]) ^ \
+ S[512 + GET32_2(x)]) + S[768 + GET32_3(x)])
+
+#define EROUND(a, b, n) do { b ^= P[n]; a ^= bf_F(b); } while (0)
+#define DROUND(a, b, n) do { a ^= bf_F(b); b ^= P[n]; } while (0)
+
+
+/*
+ * encrypt isdn data frame
+ * every block with 9 samples is encrypted
+ */
+void
+dsp_bf_encrypt(struct dsp *dsp, u8 *data, int len)
+{
+ int i = 0, j = dsp->bf_crypt_pos;
+ u8 *bf_data_in = dsp->bf_data_in;
+ u8 *bf_crypt_out = dsp->bf_crypt_out;
+ u32 *P = dsp->bf_p;
+ u32 *S = dsp->bf_s;
+ u32 yl, yr;
+ u32 cs;
+ u8 nibble;
+
+ while (i < len) {
+ /* collect a block of 9 samples */
+ if (j < 9) {
+ bf_data_in[j] = *data;
+ *data++ = bf_crypt_out[j++];
+ i++;
+ continue;
+ }
+ j = 0;
+ /* transcode 9 samples xlaw to 8 bytes */
+ yl = dsp_audio_law2seven[bf_data_in[0]];
+ yl = (yl<<7) | dsp_audio_law2seven[bf_data_in[1]];
+ yl = (yl<<7) | dsp_audio_law2seven[bf_data_in[2]];
+ yl = (yl<<7) | dsp_audio_law2seven[bf_data_in[3]];
+ nibble = dsp_audio_law2seven[bf_data_in[4]];
+ yr = nibble;
+ yl = (yl<<4) | (nibble>>3);
+ yr = (yr<<7) | dsp_audio_law2seven[bf_data_in[5]];
+ yr = (yr<<7) | dsp_audio_law2seven[bf_data_in[6]];
+ yr = (yr<<7) | dsp_audio_law2seven[bf_data_in[7]];
+ yr = (yr<<7) | dsp_audio_law2seven[bf_data_in[8]];
+ yr = (yr<<1) | (bf_data_in[0] & 1);
+
+ /* fill unused bit with random noise of audio input */
+ /* encrypt */
+
+ EROUND(yr, yl, 0);
+ EROUND(yl, yr, 1);
+ EROUND(yr, yl, 2);
+ EROUND(yl, yr, 3);
+ EROUND(yr, yl, 4);
+ EROUND(yl, yr, 5);
+ EROUND(yr, yl, 6);
+ EROUND(yl, yr, 7);
+ EROUND(yr, yl, 8);
+ EROUND(yl, yr, 9);
+ EROUND(yr, yl, 10);
+ EROUND(yl, yr, 11);
+ EROUND(yr, yl, 12);
+ EROUND(yl, yr, 13);
+ EROUND(yr, yl, 14);
+ EROUND(yl, yr, 15);
+ yl ^= P[16];
+ yr ^= P[17];
+
+ /* calculate 3-bit checksumme */
+ cs = yl ^ (yl>>3) ^ (yl>>6) ^ (yl>>9) ^ (yl>>12) ^ (yl>>15)
+ ^ (yl>>18) ^ (yl>>21) ^ (yl>>24) ^ (yl>>27) ^ (yl>>30)
+ ^ (yr<<2) ^ (yr>>1) ^ (yr>>4) ^ (yr>>7) ^ (yr>>10)
+ ^ (yr>>13) ^ (yr>>16) ^ (yr>>19) ^ (yr>>22) ^ (yr>>25)
+ ^ (yr>>28) ^ (yr>>31);
+
+ /*
+ * transcode 8 crypted bytes to 9 data bytes with sync
+ * and checksum information
+ */
+ bf_crypt_out[0] = (yl>>25) | 0x80;
+ bf_crypt_out[1] = (yl>>18) & 0x7f;
+ bf_crypt_out[2] = (yl>>11) & 0x7f;
+ bf_crypt_out[3] = (yl>>4) & 0x7f;
+ bf_crypt_out[4] = ((yl<<3) & 0x78) | ((yr>>29) & 0x07);
+ bf_crypt_out[5] = ((yr>>22) & 0x7f) | ((cs<<5) & 0x80);
+ bf_crypt_out[6] = ((yr>>15) & 0x7f) | ((cs<<6) & 0x80);
+ bf_crypt_out[7] = ((yr>>8) & 0x7f) | (cs<<7);
+ bf_crypt_out[8] = yr;
+ }
+
+ /* write current count */
+ dsp->bf_crypt_pos = j;
+
+}
+
+
+/*
+ * decrypt isdn data frame
+ * every block with 9 bytes is decrypted
+ */
+void
+dsp_bf_decrypt(struct dsp *dsp, u8 *data, int len)
+{
+ int i = 0;
+ u8 j = dsp->bf_decrypt_in_pos;
+ u8 k = dsp->bf_decrypt_out_pos;
+ u8 *bf_crypt_inring = dsp->bf_crypt_inring;
+ u8 *bf_data_out = dsp->bf_data_out;
+ u16 sync = dsp->bf_sync;
+ u32 *P = dsp->bf_p;
+ u32 *S = dsp->bf_s;
+ u32 yl, yr;
+ u8 nibble;
+ u8 cs, cs0, cs1, cs2;
+
+ while (i < len) {
+ /*
+ * shift upper bit and rotate data to buffer ring
+ * send current decrypted data
+ */
+ sync = (sync<<1) | ((*data)>>7);
+ bf_crypt_inring[j++ & 15] = *data;
+ *data++ = bf_data_out[k++];
+ i++;
+ if (k == 9)
+ k = 0; /* repeat if no sync has been found */
+ /* check if not in sync */
+ if ((sync&0x1f0) != 0x100)
+ continue;
+ j -= 9;
+ /* transcode receive data to 64 bit block of encrypted data */
+ yl = bf_crypt_inring[j++ & 15];
+ yl = (yl<<7) | bf_crypt_inring[j++ & 15]; /* bit7 = 0 */
+ yl = (yl<<7) | bf_crypt_inring[j++ & 15]; /* bit7 = 0 */
+ yl = (yl<<7) | bf_crypt_inring[j++ & 15]; /* bit7 = 0 */
+ nibble = bf_crypt_inring[j++ & 15]; /* bit7 = 0 */
+ yr = nibble;
+ yl = (yl<<4) | (nibble>>3);
+ cs2 = bf_crypt_inring[j++ & 15];
+ yr = (yr<<7) | (cs2 & 0x7f);
+ cs1 = bf_crypt_inring[j++ & 15];
+ yr = (yr<<7) | (cs1 & 0x7f);
+ cs0 = bf_crypt_inring[j++ & 15];
+ yr = (yr<<7) | (cs0 & 0x7f);
+ yr = (yr<<8) | bf_crypt_inring[j++ & 15];
+
+ /* calculate 3-bit checksumme */
+ cs = yl ^ (yl>>3) ^ (yl>>6) ^ (yl>>9) ^ (yl>>12) ^ (yl>>15)
+ ^ (yl>>18) ^ (yl>>21) ^ (yl>>24) ^ (yl>>27) ^ (yl>>30)
+ ^ (yr<<2) ^ (yr>>1) ^ (yr>>4) ^ (yr>>7) ^ (yr>>10)
+ ^ (yr>>13) ^ (yr>>16) ^ (yr>>19) ^ (yr>>22) ^ (yr>>25)
+ ^ (yr>>28) ^ (yr>>31);
+
+ /* check if frame is valid */
+ if ((cs&0x7) != (((cs2>>5)&4) | ((cs1>>6)&2) | (cs0 >> 7))) {
+ if (dsp_debug & DEBUG_DSP_BLOWFISH)
+ printk(KERN_DEBUG
+ "DSP BLOWFISH: received corrupt frame, "
+ "checksumme is not correct\n");
+ continue;
+ }
+
+ /* decrypt */
+ yr ^= P[17];
+ yl ^= P[16];
+ DROUND(yl, yr, 15);
+ DROUND(yr, yl, 14);
+ DROUND(yl, yr, 13);
+ DROUND(yr, yl, 12);
+ DROUND(yl, yr, 11);
+ DROUND(yr, yl, 10);
+ DROUND(yl, yr, 9);
+ DROUND(yr, yl, 8);
+ DROUND(yl, yr, 7);
+ DROUND(yr, yl, 6);
+ DROUND(yl, yr, 5);
+ DROUND(yr, yl, 4);
+ DROUND(yl, yr, 3);
+ DROUND(yr, yl, 2);
+ DROUND(yl, yr, 1);
+ DROUND(yr, yl, 0);
+
+ /* transcode 8 crypted bytes to 9 sample bytes */
+ bf_data_out[0] = dsp_audio_seven2law[(yl>>25) & 0x7f];
+ bf_data_out[1] = dsp_audio_seven2law[(yl>>18) & 0x7f];
+ bf_data_out[2] = dsp_audio_seven2law[(yl>>11) & 0x7f];
+ bf_data_out[3] = dsp_audio_seven2law[(yl>>4) & 0x7f];
+ bf_data_out[4] = dsp_audio_seven2law[((yl<<3) & 0x78) |
+ ((yr>>29) & 0x07)];
+
+ bf_data_out[5] = dsp_audio_seven2law[(yr>>22) & 0x7f];
+ bf_data_out[6] = dsp_audio_seven2law[(yr>>15) & 0x7f];
+ bf_data_out[7] = dsp_audio_seven2law[(yr>>8) & 0x7f];
+ bf_data_out[8] = dsp_audio_seven2law[(yr>>1) & 0x7f];
+ k = 0; /* start with new decoded frame */
+ }
+
+ /* write current count and sync */
+ dsp->bf_decrypt_in_pos = j;
+ dsp->bf_decrypt_out_pos = k;
+ dsp->bf_sync = sync;
+}
+
+
+/* used to encrypt S and P boxes */
+static inline void
+encrypt_block(const u32 *P, const u32 *S, u32 *dst, u32 *src)
+{
+ u32 yl = src[0];
+ u32 yr = src[1];
+
+ EROUND(yr, yl, 0);
+ EROUND(yl, yr, 1);
+ EROUND(yr, yl, 2);
+ EROUND(yl, yr, 3);
+ EROUND(yr, yl, 4);
+ EROUND(yl, yr, 5);
+ EROUND(yr, yl, 6);
+ EROUND(yl, yr, 7);
+ EROUND(yr, yl, 8);
+ EROUND(yl, yr, 9);
+ EROUND(yr, yl, 10);
+ EROUND(yl, yr, 11);
+ EROUND(yr, yl, 12);
+ EROUND(yl, yr, 13);
+ EROUND(yr, yl, 14);
+ EROUND(yl, yr, 15);
+
+ yl ^= P[16];
+ yr ^= P[17];
+
+ dst[0] = yr;
+ dst[1] = yl;
+}
+
+/*
+ * initialize the dsp for encryption and decryption using the same key
+ * Calculates the blowfish S and P boxes for encryption and decryption.
+ * The margin of keylen must be 4-56 bytes.
+ * returns 0 if ok.
+ */
+int
+dsp_bf_init(struct dsp *dsp, const u8 *key, uint keylen)
+{
+ short i, j, count;
+ u32 data[2], temp;
+ u32 *P = (u32 *)dsp->bf_p;
+ u32 *S = (u32 *)dsp->bf_s;
+
+ if (keylen < 4 || keylen > 56)
+ return 1;
+
+ /* Set dsp states */
+ i = 0;
+ while (i < 9) {
+ dsp->bf_crypt_out[i] = 0xff;
+ dsp->bf_data_out[i] = dsp_silence;
+ i++;
+ }
+ dsp->bf_crypt_pos = 0;
+ dsp->bf_decrypt_in_pos = 0;
+ dsp->bf_decrypt_out_pos = 0;
+ dsp->bf_sync = 0x1ff;
+ dsp->bf_enable = 1;
+
+ /* Copy the initialization s-boxes */
+ for (i = 0, count = 0; i < 256; i++)
+ for (j = 0; j < 4; j++, count++)
+ S[count] = bf_sbox[count];
+
+ /* Set the p-boxes */
+ for (i = 0; i < 16 + 2; i++)
+ P[i] = bf_pbox[i];
+
+ /* Actual subkey generation */
+ for (j = 0, i = 0; i < 16 + 2; i++) {
+ temp = (((u32)key[j] << 24) |
+ ((u32)key[(j + 1) % keylen] << 16) |
+ ((u32)key[(j + 2) % keylen] << 8) |
+ ((u32)key[(j + 3) % keylen]));
+
+ P[i] = P[i] ^ temp;
+ j = (j + 4) % keylen;
+ }
+
+ data[0] = 0x00000000;
+ data[1] = 0x00000000;
+
+ for (i = 0; i < 16 + 2; i += 2) {
+ encrypt_block(P, S, data, data);
+
+ P[i] = data[0];
+ P[i + 1] = data[1];
+ }
+
+ for (i = 0; i < 4; i++) {
+ for (j = 0, count = i * 256; j < 256; j += 2, count += 2) {
+ encrypt_block(P, S, data, data);
+
+ S[count] = data[0];
+ S[count + 1] = data[1];
+ }
+ }
+
+ return 0;
+}
+
+
+/*
+ * turn encryption off
+ */
+void
+dsp_bf_cleanup(struct dsp *dsp)
+{
+ dsp->bf_enable = 0;
+}
diff --git a/drivers/isdn/mISDN/dsp_cmx.c b/drivers/isdn/mISDN/dsp_cmx.c
new file mode 100644
index 000000000000..e92b1ba4b45e
--- /dev/null
+++ b/drivers/isdn/mISDN/dsp_cmx.c
@@ -0,0 +1,1886 @@
+/*
+ * Audio crossconnecting/conferrencing (hardware level).
+ *
+ * Copyright 2002 by Andreas Eversberg (jolly@eversberg.eu)
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ */
+
+/*
+ * The process of adding and removing parties to/from a conference:
+ *
+ * There is a chain of struct dsp_conf which has one or more members in a chain
+ * of struct dsp_conf_member.
+ *
+ * After a party is added, the conference is checked for hardware capability.
+ * Also if a party is removed, the conference is checked again.
+ *
+ * There are 3 different solutions: -1 = software, 0 = hardware-crossconnect
+ * 1-n = hardware-conference. The n will give the conference number.
+ *
+ * Depending on the change after removal or insertion of a party, hardware
+ * commands are given.
+ *
+ * The current solution is stored within the struct dsp_conf entry.
+ */
+
+/*
+ * HOW THE CMX WORKS:
+ *
+ * There are 3 types of interaction: One member is alone, in this case only
+ * data flow from upper to lower layer is done.
+ * Two members will also exchange their data so they are crossconnected.
+ * Three or more members will be added in a conference and will hear each
+ * other but will not receive their own speech (echo) if not enabled.
+ *
+ * Features of CMX are:
+ * - Crossconnecting or even conference, if more than two members are together.
+ * - Force mixing of transmit data with other crossconnect/conference members.
+ * - Echo generation to benchmark the delay of audio processing.
+ * - Use hardware to minimize cpu load, disable FIFO load and minimize delay.
+ * - Dejittering and clock generation.
+ *
+ * There are 2 buffers:
+ *
+ *
+ * RX-Buffer
+ * R W
+ * | |
+ * ----------------+-------------+-------------------
+ *
+ * The rx-buffer is a ring buffer used to store the received data for each
+ * individual member. This is only the case if data needs to be dejittered
+ * or in case of a conference where different clocks require reclocking.
+ * The transmit-clock (R) will read the buffer.
+ * If the clock overruns the write-pointer, we will have a buffer underrun.
+ * If the write pointer always has a certain distance from the transmit-
+ * clock, we will have a delay. The delay will dynamically be increased and
+ * reduced.
+ *
+ *
+ * TX-Buffer
+ * R W
+ * | |
+ * -----------------+--------+-----------------------
+ *
+ * The tx-buffer is a ring buffer to queue the transmit data from user space
+ * until it will be mixed or sent. There are two pointers, R and W. If the write
+ * pointer W would reach or overrun R, the buffer would overrun. In this case
+ * (some) data is dropped so that it will not overrun.
+ * Additionally a dynamic dejittering can be enabled. this allows data from
+ * user space that have jitter and different clock source.
+ *
+ *
+ * Clock:
+ *
+ * A Clock is not required, if the data source has exactly one clock. In this
+ * case the data source is forwarded to the destination.
+ *
+ * A Clock is required, because the data source
+ * - has multiple clocks.
+ * - has no usable clock due to jitter or packet loss (VoIP).
+ * In this case the system's clock is used. The clock resolution depends on
+ * the jiffie resolution.
+ *
+ * If a member joins a conference:
+ *
+ * - If a member joins, its rx_buff is set to silence and change read pointer
+ * to transmit clock.
+ *
+ * The procedure of received data from card is explained in cmx_receive.
+ * The procedure of received data from user space is explained in cmx_transmit.
+ * The procedure of transmit data to card is cmx_send.
+ *
+ *
+ * Interaction with other features:
+ *
+ * DTMF:
+ * DTMF decoding is done before the data is crossconnected.
+ *
+ * Volume change:
+ * Changing rx-volume is done before the data is crossconnected. The tx-volume
+ * must be changed whenever data is transmitted to the card by the cmx.
+ *
+ * Tones:
+ * If a tone is enabled, it will be processed whenever data is transmitted to
+ * the card. It will replace the tx-data from the user space.
+ * If tones are generated by hardware, this conference member is removed for
+ * this time.
+ *
+ * Disable rx-data:
+ * If cmx is realized in hardware, rx data will be disabled if requested by
+ * the upper layer. If dtmf decoding is done by software and enabled, rx data
+ * will not be diabled but blocked to the upper layer.
+ *
+ * HFC conference engine:
+ * If it is possible to realize all features using hardware, hardware will be
+ * used if not forbidden by control command. Disabling rx-data provides
+ * absolutely traffic free audio processing. (except for the quick 1-frame
+ * upload of a tone loop, only once for a new tone)
+ *
+ */
+
+/* delay.h is required for hw_lock.h */
+
+#include <linux/delay.h>
+#include <linux/mISDNif.h>
+#include <linux/mISDNdsp.h>
+#include "core.h"
+#include "dsp.h"
+/*
+ * debugging of multi party conference,
+ * by using conference even with two members
+ */
+
+/* #define CMX_CONF_DEBUG */
+
+/*#define CMX_DEBUG * massive read/write pointer output */
+/*#define CMX_TX_DEBUG * massive read/write on tx-buffer with content */
+
+static inline int
+count_list_member(struct list_head *head)
+{
+ int cnt = 0;
+ struct list_head *m;
+
+ list_for_each(m, head)
+ cnt++;
+ return cnt;
+}
+
+/*
+ * debug cmx memory structure
+ */
+void
+dsp_cmx_debug(struct dsp *dsp)
+{
+ struct dsp_conf *conf;
+ struct dsp_conf_member *member;
+ struct dsp *odsp;
+
+ printk(KERN_DEBUG "-----Current DSP\n");
+ list_for_each_entry(odsp, &dsp_ilist, list) {
+ printk(KERN_DEBUG "* %s echo=%d txmix=%d",
+ odsp->name, odsp->echo, odsp->tx_mix);
+ if (odsp->conf)
+ printk(" (Conf %d)", odsp->conf->id);
+ if (dsp == odsp)
+ printk(" *this*");
+ printk("\n");
+ }
+ printk(KERN_DEBUG "-----Current Conf:\n");
+ list_for_each_entry(conf, &conf_ilist, list) {
+ printk(KERN_DEBUG "* Conf %d (%p)\n", conf->id, conf);
+ list_for_each_entry(member, &conf->mlist, list) {
+ printk(KERN_DEBUG
+ " - member = %s (slot_tx %d, bank_tx %d, "
+ "slot_rx %d, bank_rx %d hfc_conf %d)%s\n",
+ member->dsp->name, member->dsp->pcm_slot_tx,
+ member->dsp->pcm_bank_tx, member->dsp->pcm_slot_rx,
+ member->dsp->pcm_bank_rx, member->dsp->hfc_conf,
+ (member->dsp == dsp) ? " *this*" : "");
+ }
+ }
+ printk(KERN_DEBUG "-----end\n");
+}
+
+/*
+ * search conference
+ */
+static struct dsp_conf *
+dsp_cmx_search_conf(u32 id)
+{
+ struct dsp_conf *conf;
+
+ if (!id) {
+ printk(KERN_WARNING "%s: conference ID is 0.\n", __func__);
+ return NULL;
+ }
+
+ /* search conference */
+ list_for_each_entry(conf, &conf_ilist, list)
+ if (conf->id == id)
+ return conf;
+
+ return NULL;
+}
+
+
+/*
+ * add member to conference
+ */
+static int
+dsp_cmx_add_conf_member(struct dsp *dsp, struct dsp_conf *conf)
+{
+ struct dsp_conf_member *member;
+
+ if (!conf || !dsp) {
+ printk(KERN_WARNING "%s: conf or dsp is 0.\n", __func__);
+ return -EINVAL;
+ }
+ if (dsp->member) {
+ printk(KERN_WARNING "%s: dsp is already member in a conf.\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (dsp->conf) {
+ printk(KERN_WARNING "%s: dsp is already in a conf.\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ member = kzalloc(sizeof(struct dsp_conf_member), GFP_ATOMIC);
+ if (!member) {
+ printk(KERN_ERR "kmalloc struct dsp_conf_member failed\n");
+ return -ENOMEM;
+ }
+ member->dsp = dsp;
+ /* clear rx buffer */
+ memset(dsp->rx_buff, dsp_silence, sizeof(dsp->rx_buff));
+ dsp->rx_init = 1; /* rx_W and rx_R will be adjusted on first frame */
+ dsp->rx_W = 0;
+ dsp->rx_R = 0;
+
+ list_add_tail(&member->list, &conf->mlist);
+
+ dsp->conf = conf;
+ dsp->member = member;
+
+ return 0;
+}
+
+
+/*
+ * del member from conference
+ */
+int
+dsp_cmx_del_conf_member(struct dsp *dsp)
+{
+ struct dsp_conf_member *member;
+
+ if (!dsp) {
+ printk(KERN_WARNING "%s: dsp is 0.\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (!dsp->conf) {
+ printk(KERN_WARNING "%s: dsp is not in a conf.\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (list_empty(&dsp->conf->mlist)) {
+ printk(KERN_WARNING "%s: dsp has linked an empty conf.\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* find us in conf */
+ list_for_each_entry(member, &dsp->conf->mlist, list) {
+ if (member->dsp == dsp) {
+ list_del(&member->list);
+ dsp->conf = NULL;
+ dsp->member = NULL;
+ kfree(member);
+ return 0;
+ }
+ }
+ printk(KERN_WARNING
+ "%s: dsp is not present in its own conf_meber list.\n",
+ __func__);
+
+ return -EINVAL;
+}
+
+
+/*
+ * new conference
+ */
+static struct dsp_conf
+*dsp_cmx_new_conf(u32 id)
+{
+ struct dsp_conf *conf;
+
+ if (!id) {
+ printk(KERN_WARNING "%s: id is 0.\n",
+ __func__);
+ return NULL;
+ }
+
+ conf = kzalloc(sizeof(struct dsp_conf), GFP_ATOMIC);
+ if (!conf) {
+ printk(KERN_ERR "kmalloc struct dsp_conf failed\n");
+ return NULL;
+ }
+ INIT_LIST_HEAD(&conf->mlist);
+ conf->id = id;
+
+ list_add_tail(&conf->list, &conf_ilist);
+
+ return conf;
+}
+
+
+/*
+ * del conference
+ */
+int
+dsp_cmx_del_conf(struct dsp_conf *conf)
+{
+ if (!conf) {
+ printk(KERN_WARNING "%s: conf is null.\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (!list_empty(&conf->mlist)) {
+ printk(KERN_WARNING "%s: conf not empty.\n",
+ __func__);
+ return -EINVAL;
+ }
+ list_del(&conf->list);
+ kfree(conf);
+
+ return 0;
+}
+
+
+/*
+ * send HW message to hfc card
+ */
+static void
+dsp_cmx_hw_message(struct dsp *dsp, u32 message, u32 param1, u32 param2,
+ u32 param3, u32 param4)
+{
+ struct mISDN_ctrl_req cq;
+
+ memset(&cq, 0, sizeof(cq));
+ cq.op = message;
+ cq.p1 = param1 | (param2 << 8);
+ cq.p2 = param3 | (param4 << 8);
+ if (dsp->ch.peer)
+ dsp->ch.peer->ctrl(dsp->ch.peer, CONTROL_CHANNEL, &cq);
+}
+
+
+/*
+ * do hardware update and set the software/hardware flag
+ *
+ * either a conference or a dsp instance can be given
+ * if only dsp instance is given, the instance is not associated with a conf
+ * and therefore removed. if a conference is given, the dsp is expected to
+ * be member of that conference.
+ */
+void
+dsp_cmx_hardware(struct dsp_conf *conf, struct dsp *dsp)
+{
+ struct dsp_conf_member *member, *nextm;
+ struct dsp *finddsp;
+ int memb = 0, i, ii, i1, i2;
+ int freeunits[8];
+ u_char freeslots[256];
+ int same_hfc = -1, same_pcm = -1, current_conf = -1,
+ all_conf = 1;
+
+ /* dsp gets updated (no conf) */
+ if (!conf) {
+ if (!dsp)
+ return;
+ if (dsp_debug & DEBUG_DSP_CMX)
+ printk(KERN_DEBUG "%s checking dsp %s\n",
+ __func__, dsp->name);
+one_member:
+ /* remove HFC conference if enabled */
+ if (dsp->hfc_conf >= 0) {
+ if (dsp_debug & DEBUG_DSP_CMX)
+ printk(KERN_DEBUG
+ "%s removing %s from HFC conf %d "
+ "because dsp is split\n", __func__,
+ dsp->name, dsp->hfc_conf);
+ dsp_cmx_hw_message(dsp, MISDN_CTRL_HFC_CONF_SPLIT,
+ 0, 0, 0, 0);
+ dsp->hfc_conf = -1;
+ }
+ /* process hw echo */
+ if (dsp->features.pcm_banks < 1)
+ return;
+ if (!dsp->echo) {
+ /* NO ECHO: remove PCM slot if assigned */
+ if (dsp->pcm_slot_tx >= 0 || dsp->pcm_slot_rx >= 0) {
+ if (dsp_debug & DEBUG_DSP_CMX)
+ printk(KERN_DEBUG "%s removing %s from"
+ " PCM slot %d (TX) %d (RX) because"
+ " dsp is split (no echo)\n",
+ __func__, dsp->name,
+ dsp->pcm_slot_tx, dsp->pcm_slot_rx);
+ dsp_cmx_hw_message(dsp, MISDN_CTRL_HFC_PCM_DISC,
+ 0, 0, 0, 0);
+ dsp->pcm_slot_tx = -1;
+ dsp->pcm_bank_tx = -1;
+ dsp->pcm_slot_rx = -1;
+ dsp->pcm_bank_rx = -1;
+ }
+ return;
+ }
+ /* ECHO: already echo */
+ if (dsp->pcm_slot_tx >= 0 && dsp->pcm_slot_rx < 0 &&
+ dsp->pcm_bank_tx == 2 && dsp->pcm_bank_rx == 2)
+ return;
+ /* ECHO: if slot already assigned */
+ if (dsp->pcm_slot_tx >= 0) {
+ dsp->pcm_slot_rx = dsp->pcm_slot_tx;
+ dsp->pcm_bank_tx = 2; /* 2 means loop */
+ dsp->pcm_bank_rx = 2;
+ if (dsp_debug & DEBUG_DSP_CMX)
+ printk(KERN_DEBUG
+ "%s refresh %s for echo using slot %d\n",
+ __func__, dsp->name,
+ dsp->pcm_slot_tx);
+ dsp_cmx_hw_message(dsp, MISDN_CTRL_HFC_PCM_CONN,
+ dsp->pcm_slot_tx, 2, dsp->pcm_slot_rx, 2);
+ return;
+ }
+ /* ECHO: find slot */
+ dsp->pcm_slot_tx = -1;
+ dsp->pcm_slot_rx = -1;
+ memset(freeslots, 1, sizeof(freeslots));
+ list_for_each_entry(finddsp, &dsp_ilist, list) {
+ if (finddsp->features.pcm_id == dsp->features.pcm_id) {
+ if (finddsp->pcm_slot_rx >= 0 &&
+ finddsp->pcm_slot_rx < sizeof(freeslots))
+ freeslots[finddsp->pcm_slot_tx] = 0;
+ if (finddsp->pcm_slot_tx >= 0 &&
+ finddsp->pcm_slot_tx < sizeof(freeslots))
+ freeslots[finddsp->pcm_slot_rx] = 0;
+ }
+ }
+ i = 0;
+ ii = dsp->features.pcm_slots;
+ while (i < ii) {
+ if (freeslots[i])
+ break;
+ i++;
+ }
+ if (i == ii) {
+ if (dsp_debug & DEBUG_DSP_CMX)
+ printk(KERN_DEBUG
+ "%s no slot available for echo\n",
+ __func__);
+ /* no more slots available */
+ return;
+ }
+ /* assign free slot */
+ dsp->pcm_slot_tx = i;
+ dsp->pcm_slot_rx = i;
+ dsp->pcm_bank_tx = 2; /* loop */
+ dsp->pcm_bank_rx = 2;
+ if (dsp_debug & DEBUG_DSP_CMX)
+ printk(KERN_DEBUG
+ "%s assign echo for %s using slot %d\n",
+ __func__, dsp->name, dsp->pcm_slot_tx);
+ dsp_cmx_hw_message(dsp, MISDN_CTRL_HFC_PCM_CONN,
+ dsp->pcm_slot_tx, 2, dsp->pcm_slot_rx, 2);
+ return;
+ }
+
+ /* conf gets updated (all members) */
+ if (dsp_debug & DEBUG_DSP_CMX)
+ printk(KERN_DEBUG "%s checking conference %d\n",
+ __func__, conf->id);
+
+ if (list_empty(&conf->mlist)) {
+ printk(KERN_ERR "%s: conference whithout members\n",
+ __func__);
+ return;
+ }
+ member = list_entry(conf->mlist.next, struct dsp_conf_member, list);
+ same_hfc = member->dsp->features.hfc_id;
+ same_pcm = member->dsp->features.pcm_id;
+ /* check all members in our conference */
+ list_for_each_entry(member, &conf->mlist, list) {
+ /* check if member uses mixing */
+ if (member->dsp->tx_mix) {
+ if (dsp_debug & DEBUG_DSP_CMX)
+ printk(KERN_DEBUG
+ "%s dsp %s cannot form a conf, because "
+ "tx_mix is turned on\n", __func__,
+ member->dsp->name);
+conf_software:
+ list_for_each_entry(member, &conf->mlist, list) {
+ dsp = member->dsp;
+ /* remove HFC conference if enabled */
+ if (dsp->hfc_conf >= 0) {
+ if (dsp_debug & DEBUG_DSP_CMX)
+ printk(KERN_DEBUG
+ "%s removing %s from HFC "
+ "conf %d because not "
+ "possible with hardware\n",
+ __func__,
+ dsp->name,
+ dsp->hfc_conf);
+ dsp_cmx_hw_message(dsp,
+ MISDN_CTRL_HFC_CONF_SPLIT,
+ 0, 0, 0, 0);
+ dsp->hfc_conf = -1;
+ }
+ /* remove PCM slot if assigned */
+ if (dsp->pcm_slot_tx >= 0 ||
+ dsp->pcm_slot_rx >= 0) {
+ if (dsp_debug & DEBUG_DSP_CMX)
+ printk(KERN_DEBUG "%s removing "
+ "%s from PCM slot %d (TX)"
+ " slot %d (RX) because not"
+ " possible with hardware\n",
+ __func__,
+ dsp->name,
+ dsp->pcm_slot_tx,
+ dsp->pcm_slot_rx);
+ dsp_cmx_hw_message(dsp,
+ MISDN_CTRL_HFC_PCM_DISC,
+ 0, 0, 0, 0);
+ dsp->pcm_slot_tx = -1;
+ dsp->pcm_bank_tx = -1;
+ dsp->pcm_slot_rx = -1;
+ dsp->pcm_bank_rx = -1;
+ }
+ }
+ conf->hardware = 0;
+ conf->software = 1;
+ return;
+ }
+ /* check if member has echo turned on */
+ if (member->dsp->echo) {
+ if (dsp_debug & DEBUG_DSP_CMX)
+ printk(KERN_DEBUG
+ "%s dsp %s cannot form a conf, because "
+ "echo is turned on\n", __func__,
+ member->dsp->name);
+ goto conf_software;
+ }
+ /* check if member has tx_mix turned on */
+ if (member->dsp->tx_mix) {
+ if (dsp_debug & DEBUG_DSP_CMX)
+ printk(KERN_DEBUG
+ "%s dsp %s cannot form a conf, because "
+ "tx_mix is turned on\n",
+ __func__, member->dsp->name);
+ goto conf_software;
+ }
+ /* check if member changes volume at an not suppoted level */
+ if (member->dsp->tx_volume) {
+ if (dsp_debug & DEBUG_DSP_CMX)
+ printk(KERN_DEBUG
+ "%s dsp %s cannot form a conf, because "
+ "tx_volume is changed\n",
+ __func__, member->dsp->name);
+ goto conf_software;
+ }
+ if (member->dsp->rx_volume) {
+ if (dsp_debug & DEBUG_DSP_CMX)
+ printk(KERN_DEBUG
+ "%s dsp %s cannot form a conf, because "
+ "rx_volume is changed\n",
+ __func__, member->dsp->name);
+ goto conf_software;
+ }
+ /* check if tx-data turned on */
+ if (member->dsp->tx_data) {
+ if (dsp_debug & DEBUG_DSP_CMX)
+ printk(KERN_DEBUG
+ "%s dsp %s cannot form a conf, because "
+ "tx_data is turned on\n",
+ __func__, member->dsp->name);
+ goto conf_software;
+ }
+ /* check if pipeline exists */
+ if (member->dsp->pipeline.inuse) {
+ if (dsp_debug & DEBUG_DSP_CMX)
+ printk(KERN_DEBUG
+ "%s dsp %s cannot form a conf, because "
+ "pipeline exists\n", __func__,
+ member->dsp->name);
+ goto conf_software;
+ }
+ /* check if encryption is enabled */
+ if (member->dsp->bf_enable) {
+ if (dsp_debug & DEBUG_DSP_CMX)
+ printk(KERN_DEBUG "%s dsp %s cannot form a "
+ "conf, because encryption is enabled\n",
+ __func__, member->dsp->name);
+ goto conf_software;
+ }
+ /* check if member is on a card with PCM support */
+ if (member->dsp->features.pcm_id < 0) {
+ if (dsp_debug & DEBUG_DSP_CMX)
+ printk(KERN_DEBUG
+ "%s dsp %s cannot form a conf, because "
+ "dsp has no PCM bus\n",
+ __func__, member->dsp->name);
+ goto conf_software;
+ }
+ /* check if relations are on the same PCM bus */
+ if (member->dsp->features.pcm_id != same_pcm) {
+ if (dsp_debug & DEBUG_DSP_CMX)
+ printk(KERN_DEBUG
+ "%s dsp %s cannot form a conf, because "
+ "dsp is on a different PCM bus than the "
+ "first dsp\n",
+ __func__, member->dsp->name);
+ goto conf_software;
+ }
+ /* determine if members are on the same hfc chip */
+ if (same_hfc != member->dsp->features.hfc_id)
+ same_hfc = -1;
+ /* if there are members already in a conference */
+ if (current_conf < 0 && member->dsp->hfc_conf >= 0)
+ current_conf = member->dsp->hfc_conf;
+ /* if any member is not in a conference */
+ if (member->dsp->hfc_conf < 0)
+ all_conf = 0;
+
+ memb++;
+ }
+
+ /* if no member, this is an error */
+ if (memb < 1)
+ return;
+
+ /* one member */
+ if (memb == 1) {
+ if (dsp_debug & DEBUG_DSP_CMX)
+ printk(KERN_DEBUG
+ "%s conf %d cannot form a HW conference, "
+ "because dsp is alone\n", __func__, conf->id);
+ conf->hardware = 0;
+ conf->software = 0;
+ member = list_entry(conf->mlist.next, struct dsp_conf_member,
+ list);
+ dsp = member->dsp;
+ goto one_member;
+ }
+
+ /*
+ * ok, now we are sure that all members are on the same pcm.
+ * now we will see if we have only two members, so we can do
+ * crossconnections, which don't have any limitations.
+ */
+
+ /* if we have only two members */
+ if (memb == 2) {
+ member = list_entry(conf->mlist.next, struct dsp_conf_member,
+ list);
+ nextm = list_entry(member->list.next, struct dsp_conf_member,
+ list);
+ /* remove HFC conference if enabled */
+ if (member->dsp->hfc_conf >= 0) {
+ if (dsp_debug & DEBUG_DSP_CMX)
+ printk(KERN_DEBUG
+ "%s removing %s from HFC conf %d because "
+ "two parties require only a PCM slot\n",
+ __func__, member->dsp->name,
+ member->dsp->hfc_conf);
+ dsp_cmx_hw_message(member->dsp,
+ MISDN_CTRL_HFC_CONF_SPLIT, 0, 0, 0, 0);
+ member->dsp->hfc_conf = -1;
+ }
+ if (nextm->dsp->hfc_conf >= 0) {
+ if (dsp_debug & DEBUG_DSP_CMX)
+ printk(KERN_DEBUG
+ "%s removing %s from HFC conf %d because "
+ "two parties require only a PCM slot\n",
+ __func__, nextm->dsp->name,
+ nextm->dsp->hfc_conf);
+ dsp_cmx_hw_message(nextm->dsp,
+ MISDN_CTRL_HFC_CONF_SPLIT, 0, 0, 0, 0);
+ nextm->dsp->hfc_conf = -1;
+ }
+ /* if members have two banks (and not on the same chip) */
+ if (member->dsp->features.pcm_banks > 1 &&
+ nextm->dsp->features.pcm_banks > 1 &&
+ member->dsp->features.hfc_id !=
+ nextm->dsp->features.hfc_id) {
+ /* if both members have same slots with crossed banks */
+ if (member->dsp->pcm_slot_tx >= 0 &&
+ member->dsp->pcm_slot_rx >= 0 &&
+ nextm->dsp->pcm_slot_tx >= 0 &&
+ nextm->dsp->pcm_slot_rx >= 0 &&
+ nextm->dsp->pcm_slot_tx ==
+ member->dsp->pcm_slot_rx &&
+ nextm->dsp->pcm_slot_rx ==
+ member->dsp->pcm_slot_tx &&
+ nextm->dsp->pcm_slot_tx ==
+ member->dsp->pcm_slot_tx &&
+ member->dsp->pcm_bank_tx !=
+ member->dsp->pcm_bank_rx &&
+ nextm->dsp->pcm_bank_tx !=
+ nextm->dsp->pcm_bank_rx) {
+ /* all members have same slot */
+ if (dsp_debug & DEBUG_DSP_CMX)
+ printk(KERN_DEBUG
+ "%s dsp %s & %s stay joined on "
+ "PCM slot %d bank %d (TX) bank %d "
+ "(RX) (on different chips)\n",
+ __func__,
+ member->dsp->name,
+ nextm->dsp->name,
+ member->dsp->pcm_slot_tx,
+ member->dsp->pcm_bank_tx,
+ member->dsp->pcm_bank_rx);
+ conf->hardware = 0;
+ conf->software = 1;
+ return;
+ }
+ /* find a new slot */
+ memset(freeslots, 1, sizeof(freeslots));
+ list_for_each_entry(dsp, &dsp_ilist, list) {
+ if (dsp != member->dsp &&
+ dsp != nextm->dsp &&
+ member->dsp->features.pcm_id ==
+ dsp->features.pcm_id) {
+ if (dsp->pcm_slot_rx >= 0 &&
+ dsp->pcm_slot_rx <
+ sizeof(freeslots))
+ freeslots[dsp->pcm_slot_tx] = 0;
+ if (dsp->pcm_slot_tx >= 0 &&
+ dsp->pcm_slot_tx <
+ sizeof(freeslots))
+ freeslots[dsp->pcm_slot_rx] = 0;
+ }
+ }
+ i = 0;
+ ii = member->dsp->features.pcm_slots;
+ while (i < ii) {
+ if (freeslots[i])
+ break;
+ i++;
+ }
+ if (i == ii) {
+ if (dsp_debug & DEBUG_DSP_CMX)
+ printk(KERN_DEBUG
+ "%s no slot available for "
+ "%s & %s\n", __func__,
+ member->dsp->name,
+ nextm->dsp->name);
+ /* no more slots available */
+ goto conf_software;
+ }
+ /* assign free slot */
+ member->dsp->pcm_slot_tx = i;
+ member->dsp->pcm_slot_rx = i;
+ nextm->dsp->pcm_slot_tx = i;
+ nextm->dsp->pcm_slot_rx = i;
+ member->dsp->pcm_bank_rx = 0;
+ member->dsp->pcm_bank_tx = 1;
+ nextm->dsp->pcm_bank_rx = 1;
+ nextm->dsp->pcm_bank_tx = 0;
+ if (dsp_debug & DEBUG_DSP_CMX)
+ printk(KERN_DEBUG
+ "%s adding %s & %s to new PCM slot %d "
+ "(TX and RX on different chips) because "
+ "both members have not same slots\n",
+ __func__,
+ member->dsp->name,
+ nextm->dsp->name,
+ member->dsp->pcm_slot_tx);
+ dsp_cmx_hw_message(member->dsp, MISDN_CTRL_HFC_PCM_CONN,
+ member->dsp->pcm_slot_tx, member->dsp->pcm_bank_tx,
+ member->dsp->pcm_slot_rx, member->dsp->pcm_bank_rx);
+ dsp_cmx_hw_message(nextm->dsp, MISDN_CTRL_HFC_PCM_CONN,
+ nextm->dsp->pcm_slot_tx, nextm->dsp->pcm_bank_tx,
+ nextm->dsp->pcm_slot_rx, nextm->dsp->pcm_bank_rx);
+ conf->hardware = 1;
+ conf->software = 0;
+ return;
+ /* if members have one bank (or on the same chip) */
+ } else {
+ /* if both members have different crossed slots */
+ if (member->dsp->pcm_slot_tx >= 0 &&
+ member->dsp->pcm_slot_rx >= 0 &&
+ nextm->dsp->pcm_slot_tx >= 0 &&
+ nextm->dsp->pcm_slot_rx >= 0 &&
+ nextm->dsp->pcm_slot_tx ==
+ member->dsp->pcm_slot_rx &&
+ nextm->dsp->pcm_slot_rx ==
+ member->dsp->pcm_slot_tx &&
+ member->dsp->pcm_slot_tx !=
+ member->dsp->pcm_slot_rx &&
+ member->dsp->pcm_bank_tx == 0 &&
+ member->dsp->pcm_bank_rx == 0 &&
+ nextm->dsp->pcm_bank_tx == 0 &&
+ nextm->dsp->pcm_bank_rx == 0) {
+ /* all members have same slot */
+ if (dsp_debug & DEBUG_DSP_CMX)
+ printk(KERN_DEBUG
+ "%s dsp %s & %s stay joined on PCM "
+ "slot %d (TX) %d (RX) on same chip "
+ "or one bank PCM)\n", __func__,
+ member->dsp->name,
+ nextm->dsp->name,
+ member->dsp->pcm_slot_tx,
+ member->dsp->pcm_slot_rx);
+ conf->hardware = 0;
+ conf->software = 1;
+ return;
+ }
+ /* find two new slot */
+ memset(freeslots, 1, sizeof(freeslots));
+ list_for_each_entry(dsp, &dsp_ilist, list) {
+ if (dsp != member->dsp &&
+ dsp != nextm->dsp &&
+ member->dsp->features.pcm_id ==
+ dsp->features.pcm_id) {
+ if (dsp->pcm_slot_rx >= 0 &&
+ dsp->pcm_slot_rx <
+ sizeof(freeslots))
+ freeslots[dsp->pcm_slot_tx] = 0;
+ if (dsp->pcm_slot_tx >= 0 &&
+ dsp->pcm_slot_tx <
+ sizeof(freeslots))
+ freeslots[dsp->pcm_slot_rx] = 0;
+ }
+ }
+ i1 = 0;
+ ii = member->dsp->features.pcm_slots;
+ while (i1 < ii) {
+ if (freeslots[i1])
+ break;
+ i1++;
+ }
+ if (i1 == ii) {
+ if (dsp_debug & DEBUG_DSP_CMX)
+ printk(KERN_DEBUG
+ "%s no slot available "
+ "for %s & %s\n", __func__,
+ member->dsp->name,
+ nextm->dsp->name);
+ /* no more slots available */
+ goto conf_software;
+ }
+ i2 = i1+1;
+ while (i2 < ii) {
+ if (freeslots[i2])
+ break;
+ i2++;
+ }
+ if (i2 == ii) {
+ if (dsp_debug & DEBUG_DSP_CMX)
+ printk(KERN_DEBUG
+ "%s no slot available "
+ "for %s & %s\n",
+ __func__,
+ member->dsp->name,
+ nextm->dsp->name);
+ /* no more slots available */
+ goto conf_software;
+ }
+ /* assign free slots */
+ member->dsp->pcm_slot_tx = i1;
+ member->dsp->pcm_slot_rx = i2;
+ nextm->dsp->pcm_slot_tx = i2;
+ nextm->dsp->pcm_slot_rx = i1;
+ member->dsp->pcm_bank_rx = 0;
+ member->dsp->pcm_bank_tx = 0;
+ nextm->dsp->pcm_bank_rx = 0;
+ nextm->dsp->pcm_bank_tx = 0;
+ if (dsp_debug & DEBUG_DSP_CMX)
+ printk(KERN_DEBUG
+ "%s adding %s & %s to new PCM slot %d "
+ "(TX) %d (RX) on same chip or one bank "
+ "PCM, because both members have not "
+ "crossed slots\n", __func__,
+ member->dsp->name,
+ nextm->dsp->name,
+ member->dsp->pcm_slot_tx,
+ member->dsp->pcm_slot_rx);
+ dsp_cmx_hw_message(member->dsp, MISDN_CTRL_HFC_PCM_CONN,
+ member->dsp->pcm_slot_tx, member->dsp->pcm_bank_tx,
+ member->dsp->pcm_slot_rx, member->dsp->pcm_bank_rx);
+ dsp_cmx_hw_message(nextm->dsp, MISDN_CTRL_HFC_PCM_CONN,
+ nextm->dsp->pcm_slot_tx, nextm->dsp->pcm_bank_tx,
+ nextm->dsp->pcm_slot_rx, nextm->dsp->pcm_bank_rx);
+ conf->hardware = 1;
+ conf->software = 0;
+ return;
+ }
+ }
+
+ /*
+ * if we have more than two, we may check if we have a conference
+ * unit available on the chip. also all members must be on the same
+ */
+
+ /* if not the same HFC chip */
+ if (same_hfc < 0) {
+ if (dsp_debug & DEBUG_DSP_CMX)
+ printk(KERN_DEBUG
+ "%s conference %d cannot be formed, because "
+ "members are on different chips or not "
+ "on HFC chip\n",
+ __func__, conf->id);
+ goto conf_software;
+ }
+
+ /* for more than two members.. */
+
+ /* in case of hdlc, we change to software */
+ if (dsp->hdlc)
+ goto conf_software;
+
+ /* if all members already have the same conference */
+ if (all_conf)
+ return;
+
+ /*
+ * if there is an existing conference, but not all members have joined
+ */
+ if (current_conf >= 0) {
+join_members:
+ list_for_each_entry(member, &conf->mlist, list) {
+ /* join to current conference */
+ if (member->dsp->hfc_conf == current_conf)
+ continue;
+ /* get a free timeslot first */
+ memset(freeslots, 1, sizeof(freeslots));
+ list_for_each_entry(dsp, &dsp_ilist, list) {
+ /*
+ * not checking current member, because
+ * slot will be overwritten.
+ */
+ if (
+ dsp != member->dsp &&
+ /* dsp must be on the same PCM */
+ member->dsp->features.pcm_id ==
+ dsp->features.pcm_id) {
+ /* dsp must be on a slot */
+ if (dsp->pcm_slot_tx >= 0 &&
+ dsp->pcm_slot_tx <
+ sizeof(freeslots))
+ freeslots[dsp->pcm_slot_tx] = 0;
+ if (dsp->pcm_slot_rx >= 0 &&
+ dsp->pcm_slot_rx <
+ sizeof(freeslots))
+ freeslots[dsp->pcm_slot_rx] = 0;
+ }
+ }
+ i = 0;
+ ii = member->dsp->features.pcm_slots;
+ while (i < ii) {
+ if (freeslots[i])
+ break;
+ i++;
+ }
+ if (i == ii) {
+ /* no more slots available */
+ if (dsp_debug & DEBUG_DSP_CMX)
+ printk(KERN_DEBUG
+ "%s conference %d cannot be formed,"
+ " because no slot free\n",
+ __func__, conf->id);
+ goto conf_software;
+ }
+ if (dsp_debug & DEBUG_DSP_CMX)
+ printk(KERN_DEBUG
+ "%s changing dsp %s to HW conference "
+ "%d slot %d\n", __func__,
+ member->dsp->name, current_conf, i);
+ /* assign free slot & set PCM & join conf */
+ member->dsp->pcm_slot_tx = i;
+ member->dsp->pcm_slot_rx = i;
+ member->dsp->pcm_bank_tx = 2; /* loop */
+ member->dsp->pcm_bank_rx = 2;
+ member->dsp->hfc_conf = current_conf;
+ dsp_cmx_hw_message(member->dsp, MISDN_CTRL_HFC_PCM_CONN,
+ i, 2, i, 2);
+ dsp_cmx_hw_message(member->dsp,
+ MISDN_CTRL_HFC_CONF_JOIN, current_conf, 0, 0, 0);
+ }
+ return;
+ }
+
+ /*
+ * no member is in a conference yet, so we find a free one
+ */
+ memset(freeunits, 1, sizeof(freeunits));
+ list_for_each_entry(dsp, &dsp_ilist, list) {
+ /* dsp must be on the same chip */
+ if (dsp->features.hfc_id == same_hfc &&
+ /* dsp must have joined a HW conference */
+ dsp->hfc_conf >= 0 &&
+ /* slot must be within range */
+ dsp->hfc_conf < 8)
+ freeunits[dsp->hfc_conf] = 0;
+ }
+ i = 0;
+ ii = 8;
+ while (i < ii) {
+ if (freeunits[i])
+ break;
+ i++;
+ }
+ if (i == ii) {
+ /* no more conferences available */
+ if (dsp_debug & DEBUG_DSP_CMX)
+ printk(KERN_DEBUG
+ "%s conference %d cannot be formed, because "
+ "no conference number free\n",
+ __func__, conf->id);
+ goto conf_software;
+ }
+ /* join all members */
+ current_conf = i;
+ goto join_members;
+}
+
+
+/*
+ * conf_id != 0: join or change conference
+ * conf_id == 0: split from conference if not already
+ */
+int
+dsp_cmx_conf(struct dsp *dsp, u32 conf_id)
+{
+ int err;
+ struct dsp_conf *conf;
+ struct dsp_conf_member *member;
+
+ /* if conference doesn't change */
+ if (dsp->conf_id == conf_id)
+ return 0;
+
+ /* first remove us from current conf */
+ if (dsp->conf_id) {
+ if (dsp_debug & DEBUG_DSP_CMX)
+ printk(KERN_DEBUG "removing us from conference %d\n",
+ dsp->conf->id);
+ /* remove us from conf */
+ conf = dsp->conf;
+ err = dsp_cmx_del_conf_member(dsp);
+ if (err)
+ return err;
+ dsp->conf_id = 0;
+
+ /* update hardware */
+ dsp_cmx_hardware(NULL, dsp);
+
+ /* conf now empty? */
+ if (list_empty(&conf->mlist)) {
+ if (dsp_debug & DEBUG_DSP_CMX)
+ printk(KERN_DEBUG
+ "conference is empty, so we remove it.\n");
+ err = dsp_cmx_del_conf(conf);
+ if (err)
+ return err;
+ } else {
+ /* update members left on conf */
+ dsp_cmx_hardware(conf, NULL);
+ }
+ }
+
+ /* if split */
+ if (!conf_id)
+ return 0;
+
+ /* now add us to conf */
+ if (dsp_debug & DEBUG_DSP_CMX)
+ printk(KERN_DEBUG "searching conference %d\n",
+ conf_id);
+ conf = dsp_cmx_search_conf(conf_id);
+ if (!conf) {
+ if (dsp_debug & DEBUG_DSP_CMX)
+ printk(KERN_DEBUG
+ "conference doesn't exist yet, creating.\n");
+ /* the conference doesn't exist, so we create */
+ conf = dsp_cmx_new_conf(conf_id);
+ if (!conf)
+ return -EINVAL;
+ } else if (!list_empty(&conf->mlist)) {
+ member = list_entry(conf->mlist.next, struct dsp_conf_member,
+ list);
+ if (dsp->hdlc && !member->dsp->hdlc) {
+ if (dsp_debug & DEBUG_DSP_CMX)
+ printk(KERN_DEBUG
+ "cannot join transparent conference.\n");
+ return -EINVAL;
+ }
+ if (!dsp->hdlc && member->dsp->hdlc) {
+ if (dsp_debug & DEBUG_DSP_CMX)
+ printk(KERN_DEBUG
+ "cannot join hdlc conference.\n");
+ return -EINVAL;
+ }
+ }
+ /* add conference member */
+ err = dsp_cmx_add_conf_member(dsp, conf);
+ if (err)
+ return err;
+ dsp->conf_id = conf_id;
+
+ /* if we are alone, we do nothing! */
+ if (list_empty(&conf->mlist)) {
+ if (dsp_debug & DEBUG_DSP_CMX)
+ printk(KERN_DEBUG
+ "we are alone in this conference, so exit.\n");
+ /* update hardware */
+ dsp_cmx_hardware(NULL, dsp);
+ return 0;
+ }
+
+ /* update members on conf */
+ dsp_cmx_hardware(conf, NULL);
+
+ return 0;
+}
+
+
+/*
+ * audio data is received from card
+ */
+void
+dsp_cmx_receive(struct dsp *dsp, struct sk_buff *skb)
+{
+ u8 *d, *p;
+ int len = skb->len;
+ struct mISDNhead *hh = mISDN_HEAD_P(skb);
+ int w, i, ii;
+
+ /* check if we have sompen */
+ if (len < 1)
+ return;
+
+ /* half of the buffer should be larger than maximum packet size */
+ if (len >= CMX_BUFF_HALF) {
+ printk(KERN_ERR
+ "%s line %d: packet from card is too large (%d bytes). "
+ "please make card send smaller packets OR increase "
+ "CMX_BUFF_SIZE\n", __FILE__, __LINE__, len);
+ return;
+ }
+
+ /*
+ * initialize pointers if not already -
+ * also add delay if requested by PH_SIGNAL
+ */
+ if (dsp->rx_init) {
+ dsp->rx_init = 0;
+ if (dsp->features.unordered) {
+ dsp->rx_R = (hh->id & CMX_BUFF_MASK);
+ dsp->rx_W = (dsp->rx_R + dsp->cmx_delay)
+ & CMX_BUFF_MASK;
+ } else {
+ dsp->rx_R = 0;
+ dsp->rx_W = dsp->cmx_delay;
+ }
+ }
+ /* if frame contains time code, write directly */
+ if (dsp->features.unordered) {
+ dsp->rx_W = (hh->id & CMX_BUFF_MASK);
+ /* printk(KERN_DEBUG "%s %08x\n", dsp->name, hh->id); */
+ }
+ /*
+ * if we underrun (or maybe overrun),
+ * we set our new read pointer, and write silence to buffer
+ */
+ if (((dsp->rx_W-dsp->rx_R) & CMX_BUFF_MASK) >= CMX_BUFF_HALF) {
+ if (dsp_debug & DEBUG_DSP_CMX)
+ printk(KERN_DEBUG
+ "cmx_receive(dsp=%lx): UNDERRUN (or overrun the "
+ "maximum delay), adjusting read pointer! "
+ "(inst %s)\n", (u_long)dsp, dsp->name);
+ /* flush buffer */
+ if (dsp->features.unordered) {
+ dsp->rx_R = (hh->id & CMX_BUFF_MASK);
+ dsp->rx_W = (dsp->rx_R + dsp->cmx_delay)
+ & CMX_BUFF_MASK;
+ } else {
+ dsp->rx_R = 0;
+ dsp->rx_W = dsp->cmx_delay;
+ }
+ memset(dsp->rx_buff, dsp_silence, sizeof(dsp->rx_buff));
+ }
+ /* if we have reached double delay, jump back to middle */
+ if (dsp->cmx_delay)
+ if (((dsp->rx_W - dsp->rx_R) & CMX_BUFF_MASK) >=
+ (dsp->cmx_delay << 1)) {
+ if (dsp_debug & DEBUG_DSP_CMX)
+ printk(KERN_DEBUG
+ "cmx_receive(dsp=%lx): OVERRUN (because "
+ "twice the delay is reached), adjusting "
+ "read pointer! (inst %s)\n",
+ (u_long)dsp, dsp->name);
+ /* flush buffer */
+ if (dsp->features.unordered) {
+ dsp->rx_R = (hh->id & CMX_BUFF_MASK);
+ dsp->rx_W = (dsp->rx_R + dsp->cmx_delay)
+ & CMX_BUFF_MASK;
+ } else {
+ dsp->rx_R = 0;
+ dsp->rx_W = dsp->cmx_delay;
+ }
+ memset(dsp->rx_buff, dsp_silence, sizeof(dsp->rx_buff));
+ }
+
+ /* show where to write */
+#ifdef CMX_DEBUG
+ printk(KERN_DEBUG
+ "cmx_receive(dsp=%lx): rx_R(dsp)=%05x rx_W(dsp)=%05x len=%d %s\n",
+ (u_long)dsp, dsp->rx_R, dsp->rx_W, len, dsp->name);
+#endif
+
+ /* write data into rx_buffer */
+ p = skb->data;
+ d = dsp->rx_buff;
+ w = dsp->rx_W;
+ i = 0;
+ ii = len;
+ while (i < ii) {
+ d[w++ & CMX_BUFF_MASK] = *p++;
+ i++;
+ }
+
+ /* increase write-pointer */
+ dsp->rx_W = ((dsp->rx_W+len) & CMX_BUFF_MASK);
+}
+
+
+/*
+ * send (mixed) audio data to card and control jitter
+ */
+static void
+dsp_cmx_send_member(struct dsp *dsp, int len, s32 *c, int members)
+{
+ struct dsp_conf *conf = dsp->conf;
+ struct dsp *member, *other;
+ register s32 sample;
+ u8 *d, *p, *q, *o_q;
+ struct sk_buff *nskb, *txskb;
+ int r, rr, t, tt, o_r, o_rr;
+ int preload = 0;
+ struct mISDNhead *hh, *thh;
+
+ /* don't process if: */
+ if (!dsp->b_active) { /* if not active */
+ dsp->last_tx = 0;
+ return;
+ }
+ if (dsp->pcm_slot_tx >= 0 && /* connected to pcm slot */
+ dsp->tx_R == dsp->tx_W && /* AND no tx-data */
+ !(dsp->tone.tone && dsp->tone.software)) { /* AND not soft tones */
+ dsp->last_tx = 0;
+ return;
+ }
+
+#ifdef CMX_DEBUG
+ printk(KERN_DEBUG
+ "SEND members=%d dsp=%s, conf=%p, rx_R=%05x rx_W=%05x\n",
+ members, dsp->name, conf, dsp->rx_R, dsp->rx_W);
+#endif
+
+ /* preload if we have delay set */
+ if (dsp->cmx_delay && !dsp->last_tx) {
+ preload = len;
+ if (preload < 128)
+ preload = 128;
+ }
+
+ /* PREPARE RESULT */
+ nskb = mI_alloc_skb(len + preload, GFP_ATOMIC);
+ if (!nskb) {
+ printk(KERN_ERR
+ "FATAL ERROR in mISDN_dsp.o: cannot alloc %d bytes\n",
+ len + preload);
+ return;
+ }
+ hh = mISDN_HEAD_P(nskb);
+ hh->prim = PH_DATA_REQ;
+ hh->id = 0;
+ dsp->last_tx = 1;
+
+ /* set pointers, indexes and stuff */
+ member = dsp;
+ p = dsp->tx_buff; /* transmit data */
+ q = dsp->rx_buff; /* received data */
+ d = skb_put(nskb, preload + len); /* result */
+ t = dsp->tx_R; /* tx-pointers */
+ tt = dsp->tx_W;
+ r = dsp->rx_R; /* rx-pointers */
+ rr = (r + len) & CMX_BUFF_MASK;
+
+ /* preload with silence, if required */
+ if (preload) {
+ memset(d, dsp_silence, preload);
+ d += preload;
+ }
+
+ /* PROCESS TONES/TX-DATA ONLY */
+ if (dsp->tone.tone && dsp->tone.software) {
+ /* -> copy tone */
+ dsp_tone_copy(dsp, d, len);
+ dsp->tx_R = 0; /* clear tx buffer */
+ dsp->tx_W = 0;
+ goto send_packet;
+ }
+ /* if we have tx-data but do not use mixing */
+ if (!dsp->tx_mix && t != tt) {
+ /* -> send tx-data and continue when not enough */
+#ifdef CMX_TX_DEBUG
+ sprintf(debugbuf, "TX sending (%04x-%04x)%p: ", t, tt, p);
+#endif
+ while (r != rr && t != tt) {
+#ifdef CMX_TX_DEBUG
+ if (strlen(debugbuf) < 48)
+ sprintf(debugbuf+strlen(debugbuf), " %02x", p[t]);
+#endif
+ *d++ = p[t]; /* write tx_buff */
+ t = (t+1) & CMX_BUFF_MASK;
+ r = (r+1) & CMX_BUFF_MASK;
+ }
+ if (r == rr) {
+ dsp->tx_R = t;
+#ifdef CMX_TX_DEBUG
+ printk(KERN_DEBUG "%s\n", debugbuf);
+#endif
+ goto send_packet;
+ }
+ }
+#ifdef CMX_TX_DEBUG
+ printk(KERN_DEBUG "%s\n", debugbuf);
+#endif
+
+ /* PROCESS DATA (one member / no conf) */
+ if (!conf || members <= 1) {
+ /* -> if echo is NOT enabled */
+ if (!dsp->echo) {
+ /* -> send tx-data if available or use 0-volume */
+ while (r != rr && t != tt) {
+ *d++ = p[t]; /* write tx_buff */
+ t = (t+1) & CMX_BUFF_MASK;
+ r = (r+1) & CMX_BUFF_MASK;
+ }
+ if (r != rr)
+ memset(d, dsp_silence, (rr-r)&CMX_BUFF_MASK);
+ /* -> if echo is enabled */
+ } else {
+ /*
+ * -> mix tx-data with echo if available,
+ * or use echo only
+ */
+ while (r != rr && t != tt) {
+ *d++ = dsp_audio_mix_law[(p[t]<<8)|q[r]];
+ t = (t+1) & CMX_BUFF_MASK;
+ r = (r+1) & CMX_BUFF_MASK;
+ }
+ while (r != rr) {
+ *d++ = q[r]; /* echo */
+ r = (r+1) & CMX_BUFF_MASK;
+ }
+ }
+ dsp->tx_R = t;
+ goto send_packet;
+ }
+ /* PROCESS DATA (two members) */
+#ifdef CMX_CONF_DEBUG
+ if (0) {
+#else
+ if (members == 2) {
+#endif
+ /* "other" becomes other party */
+ other = (list_entry(conf->mlist.next,
+ struct dsp_conf_member, list))->dsp;
+ if (other == member)
+ other = (list_entry(conf->mlist.prev,
+ struct dsp_conf_member, list))->dsp;
+ o_q = other->rx_buff; /* received data */
+ o_rr = (other->rx_R + len) & CMX_BUFF_MASK;
+ /* end of rx-pointer */
+ o_r = (o_rr - rr + r) & CMX_BUFF_MASK;
+ /* start rx-pointer at current read position*/
+ /* -> if echo is NOT enabled */
+ if (!dsp->echo) {
+ /*
+ * -> copy other member's rx-data,
+ * if tx-data is available, mix
+ */
+ while (o_r != o_rr && t != tt) {
+ *d++ = dsp_audio_mix_law[(p[t]<<8)|o_q[o_r]];
+ t = (t+1) & CMX_BUFF_MASK;
+ o_r = (o_r+1) & CMX_BUFF_MASK;
+ }
+ while (o_r != o_rr) {
+ *d++ = o_q[o_r];
+ o_r = (o_r+1) & CMX_BUFF_MASK;
+ }
+ /* -> if echo is enabled */
+ } else {
+ /*
+ * -> mix other member's rx-data with echo,
+ * if tx-data is available, mix
+ */
+ while (r != rr && t != tt) {
+ sample = dsp_audio_law_to_s32[p[t]] +
+ dsp_audio_law_to_s32[q[r]] +
+ dsp_audio_law_to_s32[o_q[o_r]];
+ if (sample < -32768)
+ sample = -32768;
+ else if (sample > 32767)
+ sample = 32767;
+ *d++ = dsp_audio_s16_to_law[sample & 0xffff];
+ /* tx-data + rx_data + echo */
+ t = (t+1) & CMX_BUFF_MASK;
+ r = (r+1) & CMX_BUFF_MASK;
+ o_r = (o_r+1) & CMX_BUFF_MASK;
+ }
+ while (r != rr) {
+ *d++ = dsp_audio_mix_law[(q[r]<<8)|o_q[o_r]];
+ r = (r+1) & CMX_BUFF_MASK;
+ o_r = (o_r+1) & CMX_BUFF_MASK;
+ }
+ }
+ dsp->tx_R = t;
+ goto send_packet;
+ }
+#ifdef DSP_NEVER_DEFINED
+ }
+#endif
+ /* PROCESS DATA (three or more members) */
+ /* -> if echo is NOT enabled */
+ if (!dsp->echo) {
+ /*
+ * -> substract rx-data from conf-data,
+ * if tx-data is available, mix
+ */
+ while (r != rr && t != tt) {
+ sample = dsp_audio_law_to_s32[p[t]] + *c++ -
+ dsp_audio_law_to_s32[q[r]];
+ if (sample < -32768)
+ sample = -32768;
+ else if (sample > 32767)
+ sample = 32767;
+ *d++ = dsp_audio_s16_to_law[sample & 0xffff];
+ /* conf-rx+tx */
+ r = (r+1) & CMX_BUFF_MASK;
+ t = (t+1) & CMX_BUFF_MASK;
+ }
+ while (r != rr) {
+ sample = *c++ - dsp_audio_law_to_s32[q[r]];
+ if (sample < -32768)
+ sample = -32768;
+ else if (sample > 32767)
+ sample = 32767;
+ *d++ = dsp_audio_s16_to_law[sample & 0xffff];
+ /* conf-rx */
+ r = (r+1) & CMX_BUFF_MASK;
+ }
+ /* -> if echo is enabled */
+ } else {
+ /*
+ * -> encode conf-data, if tx-data
+ * is available, mix
+ */
+ while (r != rr && t != tt) {
+ sample = dsp_audio_law_to_s32[p[t]] + *c++;
+ if (sample < -32768)
+ sample = -32768;
+ else if (sample > 32767)
+ sample = 32767;
+ *d++ = dsp_audio_s16_to_law[sample & 0xffff];
+ /* conf(echo)+tx */
+ t = (t+1) & CMX_BUFF_MASK;
+ r = (r+1) & CMX_BUFF_MASK;
+ }
+ while (r != rr) {
+ sample = *c++;
+ if (sample < -32768)
+ sample = -32768;
+ else if (sample > 32767)
+ sample = 32767;
+ *d++ = dsp_audio_s16_to_law[sample & 0xffff];
+ /* conf(echo) */
+ r = (r+1) & CMX_BUFF_MASK;
+ }
+ }
+ dsp->tx_R = t;
+ goto send_packet;
+
+send_packet:
+ /*
+ * send tx-data if enabled - don't filter,
+ * becuase we want what we send, not what we filtered
+ */
+ if (dsp->tx_data) {
+ /* PREPARE RESULT */
+ txskb = mI_alloc_skb(len, GFP_ATOMIC);
+ if (!txskb) {
+ printk(KERN_ERR
+ "FATAL ERROR in mISDN_dsp.o: "
+ "cannot alloc %d bytes\n", len);
+ } else {
+ thh = mISDN_HEAD_P(txskb);
+ thh->prim = DL_DATA_REQ;
+ thh->id = 0;
+ memcpy(skb_put(txskb, len), nskb->data+preload, len);
+ /* queue (trigger later) */
+ skb_queue_tail(&dsp->sendq, txskb);
+ }
+ }
+ /* adjust volume */
+ if (dsp->tx_volume)
+ dsp_change_volume(nskb, dsp->tx_volume);
+ /* pipeline */
+ if (dsp->pipeline.inuse)
+ dsp_pipeline_process_tx(&dsp->pipeline, nskb->data, nskb->len);
+ /* crypt */
+ if (dsp->bf_enable)
+ dsp_bf_encrypt(dsp, nskb->data, nskb->len);
+ /* queue and trigger */
+ skb_queue_tail(&dsp->sendq, nskb);
+ schedule_work(&dsp->workq);
+}
+
+u32 samplecount;
+struct timer_list dsp_spl_tl;
+u32 dsp_spl_jiffies; /* calculate the next time to fire */
+u32 dsp_start_jiffies; /* jiffies at the time, the calculation begins */
+struct timeval dsp_start_tv; /* time at start of calculation */
+
+void
+dsp_cmx_send(void *arg)
+{
+ struct dsp_conf *conf;
+ struct dsp_conf_member *member;
+ struct dsp *dsp;
+ int mustmix, members;
+ s32 mixbuffer[MAX_POLL+100], *c;
+ u8 *p, *q;
+ int r, rr;
+ int jittercheck = 0, delay, i;
+ u_long flags;
+ struct timeval tv;
+ u32 elapsed;
+ s16 length;
+
+ /* lock */
+ spin_lock_irqsave(&dsp_lock, flags);
+
+ if (!dsp_start_tv.tv_sec) {
+ do_gettimeofday(&dsp_start_tv);
+ length = dsp_poll;
+ } else {
+ do_gettimeofday(&tv);
+ elapsed = ((tv.tv_sec - dsp_start_tv.tv_sec) * 8000)
+ + ((s32)(tv.tv_usec / 125) - (dsp_start_tv.tv_usec / 125));
+ dsp_start_tv.tv_sec = tv.tv_sec;
+ dsp_start_tv.tv_usec = tv.tv_usec;
+ length = elapsed;
+ }
+ if (length > MAX_POLL + 100)
+ length = MAX_POLL + 100;
+/* printk(KERN_DEBUG "len=%d dsp_count=0x%x.%04x dsp_poll_diff=0x%x.%04x\n",
+ length, dsp_count >> 16, dsp_count & 0xffff, dsp_poll_diff >> 16,
+ dsp_poll_diff & 0xffff);
+ */
+
+ /*
+ * check if jitter needs to be checked
+ * (this is about every second = 8192 samples)
+ */
+ samplecount += length;
+ if ((samplecount & 8191) < length)
+ jittercheck = 1;
+
+ /* loop all members that do not require conference mixing */
+ list_for_each_entry(dsp, &dsp_ilist, list) {
+ if (dsp->hdlc)
+ continue;
+ conf = dsp->conf;
+ mustmix = 0;
+ members = 0;
+ if (conf) {
+ members = count_list_member(&conf->mlist);
+#ifdef CMX_CONF_DEBUG
+ if (conf->software && members > 1)
+#else
+ if (conf->software && members > 2)
+#endif
+ mustmix = 1;
+ }
+
+ /* transmission required */
+ if (!mustmix) {
+ dsp_cmx_send_member(dsp, length, mixbuffer, members);
+
+ /*
+ * unused mixbuffer is given to prevent a
+ * potential null-pointer-bug
+ */
+ }
+ }
+
+ /* loop all members that require conference mixing */
+ list_for_each_entry(conf, &conf_ilist, list) {
+ /* count members and check hardware */
+ members = count_list_member(&conf->mlist);
+#ifdef CMX_CONF_DEBUG
+ if (conf->software && members > 1) {
+#else
+ if (conf->software && members > 2) {
+#endif
+ /* check for hdlc conf */
+ member = list_entry(conf->mlist.next,
+ struct dsp_conf_member, list);
+ if (member->dsp->hdlc)
+ continue;
+ /* mix all data */
+ memset(mixbuffer, 0, length*sizeof(s32));
+ list_for_each_entry(member, &conf->mlist, list) {
+ dsp = member->dsp;
+ /* get range of data to mix */
+ c = mixbuffer;
+ q = dsp->rx_buff;
+ r = dsp->rx_R;
+ rr = (r + length) & CMX_BUFF_MASK;
+ /* add member's data */
+ while (r != rr) {
+ *c++ += dsp_audio_law_to_s32[q[r]];
+ r = (r+1) & CMX_BUFF_MASK;
+ }
+ }
+
+ /* process each member */
+ list_for_each_entry(member, &conf->mlist, list) {
+ /* transmission */
+ dsp_cmx_send_member(member->dsp, length,
+ mixbuffer, members);
+ }
+ }
+ }
+
+ /* delete rx-data, increment buffers, change pointers */
+ list_for_each_entry(dsp, &dsp_ilist, list) {
+ if (dsp->hdlc)
+ continue;
+ p = dsp->rx_buff;
+ q = dsp->tx_buff;
+ r = dsp->rx_R;
+ /* move receive pointer when receiving */
+ if (!dsp->rx_is_off) {
+ rr = (r + length) & CMX_BUFF_MASK;
+ /* delete rx-data */
+ while (r != rr) {
+ p[r] = dsp_silence;
+ r = (r+1) & CMX_BUFF_MASK;
+ }
+ /* increment rx-buffer pointer */
+ dsp->rx_R = r; /* write incremented read pointer */
+ }
+
+ /* check current rx_delay */
+ delay = (dsp->rx_W-dsp->rx_R) & CMX_BUFF_MASK;
+ if (delay >= CMX_BUFF_HALF)
+ delay = 0; /* will be the delay before next write */
+ /* check for lower delay */
+ if (delay < dsp->rx_delay[0])
+ dsp->rx_delay[0] = delay;
+ /* check current tx_delay */
+ delay = (dsp->tx_W-dsp->tx_R) & CMX_BUFF_MASK;
+ if (delay >= CMX_BUFF_HALF)
+ delay = 0; /* will be the delay before next write */
+ /* check for lower delay */
+ if (delay < dsp->tx_delay[0])
+ dsp->tx_delay[0] = delay;
+ if (jittercheck) {
+ /* find the lowest of all rx_delays */
+ delay = dsp->rx_delay[0];
+ i = 1;
+ while (i < MAX_SECONDS_JITTER_CHECK) {
+ if (delay > dsp->rx_delay[i])
+ delay = dsp->rx_delay[i];
+ i++;
+ }
+ /*
+ * remove rx_delay only if we have delay AND we
+ * have not preset cmx_delay
+ */
+ if (delay && !dsp->cmx_delay) {
+ if (dsp_debug & DEBUG_DSP_CMX)
+ printk(KERN_DEBUG
+ "%s lowest rx_delay of %d bytes for"
+ " dsp %s are now removed.\n",
+ __func__, delay,
+ dsp->name);
+ r = dsp->rx_R;
+ rr = (r + delay) & CMX_BUFF_MASK;
+ /* delete rx-data */
+ while (r != rr) {
+ p[r] = dsp_silence;
+ r = (r+1) & CMX_BUFF_MASK;
+ }
+ /* increment rx-buffer pointer */
+ dsp->rx_R = r;
+ /* write incremented read pointer */
+ }
+ /* find the lowest of all tx_delays */
+ delay = dsp->tx_delay[0];
+ i = 1;
+ while (i < MAX_SECONDS_JITTER_CHECK) {
+ if (delay > dsp->tx_delay[i])
+ delay = dsp->tx_delay[i];
+ i++;
+ }
+ /*
+ * remove delay only if we have delay AND we
+ * have enabled tx_dejitter
+ */
+ if (delay && dsp->tx_dejitter) {
+ if (dsp_debug & DEBUG_DSP_CMX)
+ printk(KERN_DEBUG
+ "%s lowest tx_delay of %d bytes for"
+ " dsp %s are now removed.\n",
+ __func__, delay,
+ dsp->name);
+ r = dsp->tx_R;
+ rr = (r + delay) & CMX_BUFF_MASK;
+ /* delete tx-data */
+ while (r != rr) {
+ q[r] = dsp_silence;
+ r = (r+1) & CMX_BUFF_MASK;
+ }
+ /* increment rx-buffer pointer */
+ dsp->tx_R = r;
+ /* write incremented read pointer */
+ }
+ /* scroll up delays */
+ i = MAX_SECONDS_JITTER_CHECK - 1;
+ while (i) {
+ dsp->rx_delay[i] = dsp->rx_delay[i-1];
+ dsp->tx_delay[i] = dsp->tx_delay[i-1];
+ i--;
+ }
+ dsp->tx_delay[0] = CMX_BUFF_HALF; /* (infinite) delay */
+ dsp->rx_delay[0] = CMX_BUFF_HALF; /* (infinite) delay */
+ }
+ }
+
+ /* if next event would be in the past ... */
+ if ((s32)(dsp_spl_jiffies+dsp_tics-jiffies) <= 0)
+ dsp_spl_jiffies = jiffies + 1;
+ else
+ dsp_spl_jiffies += dsp_tics;
+
+ dsp_spl_tl.expires = dsp_spl_jiffies;
+ add_timer(&dsp_spl_tl);
+
+ /* unlock */
+ spin_unlock_irqrestore(&dsp_lock, flags);
+}
+
+/*
+ * audio data is transmitted from upper layer to the dsp
+ */
+void
+dsp_cmx_transmit(struct dsp *dsp, struct sk_buff *skb)
+{
+ u_int w, ww;
+ u8 *d, *p;
+ int space; /* todo: , l = skb->len; */
+#ifdef CMX_TX_DEBUG
+ char debugbuf[256] = "";
+#endif
+
+ /* check if there is enough space, and then copy */
+ w = dsp->tx_W;
+ ww = dsp->tx_R;
+ p = dsp->tx_buff;
+ d = skb->data;
+ space = ww-w;
+ if (space <= 0)
+ space += CMX_BUFF_SIZE;
+ /* write-pointer should not overrun nor reach read pointer */
+ if (space-1 < skb->len)
+ /* write to the space we have left */
+ ww = (ww - 1) & CMX_BUFF_MASK;
+ else
+ /* write until all byte are copied */
+ ww = (w + skb->len) & CMX_BUFF_MASK;
+ dsp->tx_W = ww;
+
+ /* show current buffer */
+#ifdef CMX_DEBUG
+ printk(KERN_DEBUG
+ "cmx_transmit(dsp=%lx) %d bytes to 0x%x-0x%x. %s\n",
+ (u_long)dsp, (ww-w)&CMX_BUFF_MASK, w, ww, dsp->name);
+#endif
+
+ /* copy transmit data to tx-buffer */
+#ifdef CMX_TX_DEBUG
+ sprintf(debugbuf, "TX getting (%04x-%04x)%p: ", w, ww, p);
+#endif
+ while (w != ww) {
+#ifdef CMX_TX_DEBUG
+ if (strlen(debugbuf) < 48)
+ sprintf(debugbuf+strlen(debugbuf), " %02x", *d);
+#endif
+ p[w] = *d++;
+ w = (w+1) & CMX_BUFF_MASK;
+ }
+#ifdef CMX_TX_DEBUG
+ printk(KERN_DEBUG "%s\n", debugbuf);
+#endif
+
+}
+
+/*
+ * hdlc data is received from card and sent to all members.
+ */
+void
+dsp_cmx_hdlc(struct dsp *dsp, struct sk_buff *skb)
+{
+ struct sk_buff *nskb = NULL;
+ struct dsp_conf_member *member;
+ struct mISDNhead *hh;
+
+ /* not if not active */
+ if (!dsp->b_active)
+ return;
+
+ /* check if we have sompen */
+ if (skb->len < 1)
+ return;
+
+ /* no conf */
+ if (!dsp->conf) {
+ /* in case of hardware (echo) */
+ if (dsp->pcm_slot_tx >= 0)
+ return;
+ if (dsp->echo)
+ nskb = skb_clone(skb, GFP_ATOMIC);
+ if (nskb) {
+ hh = mISDN_HEAD_P(nskb);
+ hh->prim = PH_DATA_REQ;
+ hh->id = 0;
+ skb_queue_tail(&dsp->sendq, nskb);
+ schedule_work(&dsp->workq);
+ }
+ return;
+ }
+ /* in case of hardware conference */
+ if (dsp->conf->hardware)
+ return;
+ list_for_each_entry(member, &dsp->conf->mlist, list) {
+ if (dsp->echo || member->dsp != dsp) {
+ nskb = skb_clone(skb, GFP_ATOMIC);
+ if (nskb) {
+ hh = mISDN_HEAD_P(nskb);
+ hh->prim = PH_DATA_REQ;
+ hh->id = 0;
+ skb_queue_tail(&member->dsp->sendq, nskb);
+ schedule_work(&member->dsp->workq);
+ }
+ }
+ }
+}
+
+
diff --git a/drivers/isdn/mISDN/dsp_core.c b/drivers/isdn/mISDN/dsp_core.c
new file mode 100644
index 000000000000..2f10ed82c0db
--- /dev/null
+++ b/drivers/isdn/mISDN/dsp_core.c
@@ -0,0 +1,1191 @@
+/*
+ * Author Andreas Eversberg (jolly@eversberg.eu)
+ * Based on source code structure by
+ * Karsten Keil (keil@isdn4linux.de)
+ *
+ * This file is (c) under GNU PUBLIC LICENSE
+ * For changes and modifications please read
+ * ../../../Documentation/isdn/mISDN.cert
+ *
+ * Thanks to Karsten Keil (great drivers)
+ * Cologne Chip (great chips)
+ *
+ * This module does:
+ * Real-time tone generation
+ * DTMF detection
+ * Real-time cross-connection and conferrence
+ * Compensate jitter due to system load and hardware fault.
+ * All features are done in kernel space and will be realized
+ * using hardware, if available and supported by chip set.
+ * Blowfish encryption/decryption
+ */
+
+/* STRUCTURE:
+ *
+ * The dsp module provides layer 2 for b-channels (64kbit). It provides
+ * transparent audio forwarding with special digital signal processing:
+ *
+ * - (1) generation of tones
+ * - (2) detection of dtmf tones
+ * - (3) crossconnecting and conferences (clocking)
+ * - (4) echo generation for delay test
+ * - (5) volume control
+ * - (6) disable receive data
+ * - (7) pipeline
+ * - (8) encryption/decryption
+ *
+ * Look:
+ * TX RX
+ * ------upper layer------
+ * | ^
+ * | |(6)
+ * v |
+ * +-----+-------------+-----+
+ * |(3)(4) |
+ * | CMX |
+ * | |
+ * | +-------------+
+ * | | ^
+ * | | |
+ * |+---------+| +----+----+
+ * ||(1) || |(2) |
+ * || || | |
+ * || Tones || | DTMF |
+ * || || | |
+ * || || | |
+ * |+----+----+| +----+----+
+ * +-----+-----+ ^
+ * | |
+ * v |
+ * +----+----+ +----+----+
+ * |(5) | |(5) |
+ * | | | |
+ * |TX Volume| |RX Volume|
+ * | | | |
+ * | | | |
+ * +----+----+ +----+----+
+ * | ^
+ * | |
+ * v |
+ * +----+-------------+----+
+ * |(7) |
+ * | |
+ * | Pipeline Processing |
+ * | |
+ * | |
+ * +----+-------------+----+
+ * | ^
+ * | |
+ * v |
+ * +----+----+ +----+----+
+ * |(8) | |(8) |
+ * | | | |
+ * | Encrypt | | Decrypt |
+ * | | | |
+ * | | | |
+ * +----+----+ +----+----+
+ * | ^
+ * | |
+ * v |
+ * ------card layer------
+ * TX RX
+ *
+ * Above you can see the logical data flow. If software is used to do the
+ * process, it is actually the real data flow. If hardware is used, data
+ * may not flow, but hardware commands to the card, to provide the data flow
+ * as shown.
+ *
+ * NOTE: The channel must be activated in order to make dsp work, even if
+ * no data flow to the upper layer is intended. Activation can be done
+ * after and before controlling the setting using PH_CONTROL requests.
+ *
+ * DTMF: Will be detected by hardware if possible. It is done before CMX
+ * processing.
+ *
+ * Tones: Will be generated via software if endless looped audio fifos are
+ * not supported by hardware. Tones will override all data from CMX.
+ * It is not required to join a conference to use tones at any time.
+ *
+ * CMX: Is transparent when not used. When it is used, it will do
+ * crossconnections and conferences via software if not possible through
+ * hardware. If hardware capability is available, hardware is used.
+ *
+ * Echo: Is generated by CMX and is used to check performane of hard and
+ * software CMX.
+ *
+ * The CMX has special functions for conferences with one, two and more
+ * members. It will allow different types of data flow. Receive and transmit
+ * data to/form upper layer may be swithed on/off individually without loosing
+ * features of CMX, Tones and DTMF.
+ *
+ * Echo Cancellation: Sometimes we like to cancel echo from the interface.
+ * Note that a VoIP call may not have echo caused by the IP phone. The echo
+ * is generated by the telephone line connected to it. Because the delay
+ * is high, it becomes an echo. RESULT: Echo Cachelation is required if
+ * both echo AND delay is applied to an interface.
+ * Remember that software CMX always generates a more or less delay.
+ *
+ * If all used features can be realized in hardware, and if transmit and/or
+ * receive data ist disabled, the card may not send/receive any data at all.
+ * Not receiving is usefull if only announcements are played. Not sending is
+ * usefull if an answering machine records audio. Not sending and receiving is
+ * usefull during most states of the call. If supported by hardware, tones
+ * will be played without cpu load. Small PBXs and NT-Mode applications will
+ * not need expensive hardware when processing calls.
+ *
+ *
+ * LOCKING:
+ *
+ * When data is received from upper or lower layer (card), the complete dsp
+ * module is locked by a global lock. This lock MUST lock irq, because it
+ * must lock timer events by DSP poll timer.
+ * When data is ready to be transmitted down, the data is queued and sent
+ * outside lock and timer event.
+ * PH_CONTROL must not change any settings, join or split conference members
+ * during process of data.
+ *
+ * HDLC:
+ *
+ * It works quite the same as transparent, except that HDLC data is forwarded
+ * to all other conference members if no hardware bridging is possible.
+ * Send data will be writte to sendq. Sendq will be sent if confirm is received.
+ * Conference cannot join, if one member is not hdlc.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/mISDNif.h>
+#include <linux/mISDNdsp.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include "core.h"
+#include "dsp.h"
+
+const char *mISDN_dsp_revision = "2.0";
+
+static int debug;
+static int options;
+static int poll;
+static int dtmfthreshold = 100;
+
+MODULE_AUTHOR("Andreas Eversberg");
+module_param(debug, uint, S_IRUGO | S_IWUSR);
+module_param(options, uint, S_IRUGO | S_IWUSR);
+module_param(poll, uint, S_IRUGO | S_IWUSR);
+module_param(dtmfthreshold, uint, S_IRUGO | S_IWUSR);
+MODULE_LICENSE("GPL");
+
+/*int spinnest = 0;*/
+
+spinlock_t dsp_lock; /* global dsp lock */
+struct list_head dsp_ilist;
+struct list_head conf_ilist;
+int dsp_debug;
+int dsp_options;
+int dsp_poll, dsp_tics;
+
+/* check if rx may be turned off or must be turned on */
+static void
+dsp_rx_off_member(struct dsp *dsp)
+{
+ struct mISDN_ctrl_req cq;
+ int rx_off = 1;
+
+ if (!dsp->features_rx_off)
+ return;
+
+ /* not disabled */
+ if (!dsp->rx_disabled)
+ rx_off = 0;
+ /* software dtmf */
+ else if (dsp->dtmf.software)
+ rx_off = 0;
+ /* echo in software */
+ else if (dsp->echo && dsp->pcm_slot_tx < 0)
+ rx_off = 0;
+ /* bridge in software */
+ else if (dsp->conf) {
+ if (dsp->conf->software)
+ rx_off = 0;
+ }
+
+ if (rx_off == dsp->rx_is_off)
+ return;
+
+ if (!dsp->ch.peer) {
+ if (dsp_debug & DEBUG_DSP_CORE)
+ printk(KERN_DEBUG "%s: no peer, no rx_off\n",
+ __func__);
+ return;
+ }
+ cq.op = MISDN_CTRL_RX_OFF;
+ cq.p1 = rx_off;
+ if (dsp->ch.peer->ctrl(dsp->ch.peer, CONTROL_CHANNEL, &cq)) {
+ printk(KERN_DEBUG "%s: 2nd CONTROL_CHANNEL failed\n",
+ __func__);
+ return;
+ }
+ dsp->rx_is_off = rx_off;
+ if (dsp_debug & DEBUG_DSP_CORE)
+ printk(KERN_DEBUG "%s: %s set rx_off = %d\n",
+ __func__, dsp->name, rx_off);
+}
+static void
+dsp_rx_off(struct dsp *dsp)
+{
+ struct dsp_conf_member *member;
+
+ if (dsp_options & DSP_OPT_NOHARDWARE)
+ return;
+
+ /* no conf */
+ if (!dsp->conf) {
+ dsp_rx_off_member(dsp);
+ return;
+ }
+ /* check all members in conf */
+ list_for_each_entry(member, &dsp->conf->mlist, list) {
+ dsp_rx_off_member(member->dsp);
+ }
+}
+
+static int
+dsp_control_req(struct dsp *dsp, struct mISDNhead *hh, struct sk_buff *skb)
+{
+ struct sk_buff *nskb;
+ int ret = 0;
+ int cont;
+ u8 *data;
+ int len;
+
+ if (skb->len < sizeof(int))
+ printk(KERN_ERR "%s: PH_CONTROL message too short\n", __func__);
+ cont = *((int *)skb->data);
+ len = skb->len - sizeof(int);
+ data = skb->data + sizeof(int);
+
+ switch (cont) {
+ case DTMF_TONE_START: /* turn on DTMF */
+ if (dsp->hdlc) {
+ ret = -EINVAL;
+ break;
+ }
+ if (dsp_debug & DEBUG_DSP_CORE)
+ printk(KERN_DEBUG "%s: start dtmf\n", __func__);
+ if (len == sizeof(int)) {
+ printk(KERN_NOTICE "changing DTMF Threshold "
+ "to %d\n", *((int *)data));
+ dsp->dtmf.treshold = (*(int *)data) * 10000;
+ }
+ /* init goertzel */
+ dsp_dtmf_goertzel_init(dsp);
+
+ /* check dtmf hardware */
+ dsp_dtmf_hardware(dsp);
+ break;
+ case DTMF_TONE_STOP: /* turn off DTMF */
+ if (dsp_debug & DEBUG_DSP_CORE)
+ printk(KERN_DEBUG "%s: stop dtmf\n", __func__);
+ dsp->dtmf.hardware = 0;
+ dsp->dtmf.software = 0;
+ break;
+ case DSP_CONF_JOIN: /* join / update conference */
+ if (len < sizeof(int)) {
+ ret = -EINVAL;
+ break;
+ }
+ if (*((u32 *)data) == 0)
+ goto conf_split;
+ if (dsp_debug & DEBUG_DSP_CORE)
+ printk(KERN_DEBUG "%s: join conference %d\n",
+ __func__, *((u32 *)data));
+ ret = dsp_cmx_conf(dsp, *((u32 *)data));
+ /* dsp_cmx_hardware will also be called here */
+ dsp_rx_off(dsp);
+ if (dsp_debug & DEBUG_DSP_CMX)
+ dsp_cmx_debug(dsp);
+ break;
+ case DSP_CONF_SPLIT: /* remove from conference */
+conf_split:
+ if (dsp_debug & DEBUG_DSP_CORE)
+ printk(KERN_DEBUG "%s: release conference\n", __func__);
+ ret = dsp_cmx_conf(dsp, 0);
+ /* dsp_cmx_hardware will also be called here */
+ if (dsp_debug & DEBUG_DSP_CMX)
+ dsp_cmx_debug(dsp);
+ dsp_rx_off(dsp);
+ break;
+ case DSP_TONE_PATT_ON: /* play tone */
+ if (dsp->hdlc) {
+ ret = -EINVAL;
+ break;
+ }
+ if (len < sizeof(int)) {
+ ret = -EINVAL;
+ break;
+ }
+ if (dsp_debug & DEBUG_DSP_CORE)
+ printk(KERN_DEBUG "%s: turn tone 0x%x on\n",
+ __func__, *((int *)skb->data));
+ ret = dsp_tone(dsp, *((int *)data));
+ if (!ret) {
+ dsp_cmx_hardware(dsp->conf, dsp);
+ dsp_rx_off(dsp);
+ }
+ if (!dsp->tone.tone)
+ goto tone_off;
+ break;
+ case DSP_TONE_PATT_OFF: /* stop tone */
+ if (dsp->hdlc) {
+ ret = -EINVAL;
+ break;
+ }
+ if (dsp_debug & DEBUG_DSP_CORE)
+ printk(KERN_DEBUG "%s: turn tone off\n", __func__);
+ dsp_tone(dsp, 0);
+ dsp_cmx_hardware(dsp->conf, dsp);
+ dsp_rx_off(dsp);
+ /* reset tx buffers (user space data) */
+tone_off:
+ dsp->rx_W = 0;
+ dsp->rx_R = 0;
+ break;
+ case DSP_VOL_CHANGE_TX: /* change volume */
+ if (dsp->hdlc) {
+ ret = -EINVAL;
+ break;
+ }
+ if (len < sizeof(int)) {
+ ret = -EINVAL;
+ break;
+ }
+ dsp->tx_volume = *((int *)data);
+ if (dsp_debug & DEBUG_DSP_CORE)
+ printk(KERN_DEBUG "%s: change tx vol to %d\n",
+ __func__, dsp->tx_volume);
+ dsp_cmx_hardware(dsp->conf, dsp);
+ dsp_dtmf_hardware(dsp);
+ dsp_rx_off(dsp);
+ break;
+ case DSP_VOL_CHANGE_RX: /* change volume */
+ if (dsp->hdlc) {
+ ret = -EINVAL;
+ break;
+ }
+ if (len < sizeof(int)) {
+ ret = -EINVAL;
+ break;
+ }
+ dsp->rx_volume = *((int *)data);
+ if (dsp_debug & DEBUG_DSP_CORE)
+ printk(KERN_DEBUG "%s: change rx vol to %d\n",
+ __func__, dsp->tx_volume);
+ dsp_cmx_hardware(dsp->conf, dsp);
+ dsp_dtmf_hardware(dsp);
+ dsp_rx_off(dsp);
+ break;
+ case DSP_ECHO_ON: /* enable echo */
+ dsp->echo = 1; /* soft echo */
+ if (dsp_debug & DEBUG_DSP_CORE)
+ printk(KERN_DEBUG "%s: enable cmx-echo\n", __func__);
+ dsp_cmx_hardware(dsp->conf, dsp);
+ dsp_rx_off(dsp);
+ if (dsp_debug & DEBUG_DSP_CMX)
+ dsp_cmx_debug(dsp);
+ break;
+ case DSP_ECHO_OFF: /* disable echo */
+ dsp->echo = 0;
+ if (dsp_debug & DEBUG_DSP_CORE)
+ printk(KERN_DEBUG "%s: disable cmx-echo\n", __func__);
+ dsp_cmx_hardware(dsp->conf, dsp);
+ dsp_rx_off(dsp);
+ if (dsp_debug & DEBUG_DSP_CMX)
+ dsp_cmx_debug(dsp);
+ break;
+ case DSP_RECEIVE_ON: /* enable receive to user space */
+ if (dsp_debug & DEBUG_DSP_CORE)
+ printk(KERN_DEBUG "%s: enable receive to user "
+ "space\n", __func__);
+ dsp->rx_disabled = 0;
+ dsp_rx_off(dsp);
+ break;
+ case DSP_RECEIVE_OFF: /* disable receive to user space */
+ if (dsp_debug & DEBUG_DSP_CORE)
+ printk(KERN_DEBUG "%s: disable receive to "
+ "user space\n", __func__);
+ dsp->rx_disabled = 1;
+ dsp_rx_off(dsp);
+ break;
+ case DSP_MIX_ON: /* enable mixing of tx data */
+ if (dsp->hdlc) {
+ ret = -EINVAL;
+ break;
+ }
+ if (dsp_debug & DEBUG_DSP_CORE)
+ printk(KERN_DEBUG "%s: enable mixing of "
+ "tx-data with conf mebers\n", __func__);
+ dsp->tx_mix = 1;
+ dsp_cmx_hardware(dsp->conf, dsp);
+ dsp_rx_off(dsp);
+ if (dsp_debug & DEBUG_DSP_CMX)
+ dsp_cmx_debug(dsp);
+ break;
+ case DSP_MIX_OFF: /* disable mixing of tx data */
+ if (dsp->hdlc) {
+ ret = -EINVAL;
+ break;
+ }
+ if (dsp_debug & DEBUG_DSP_CORE)
+ printk(KERN_DEBUG "%s: disable mixing of "
+ "tx-data with conf mebers\n", __func__);
+ dsp->tx_mix = 0;
+ dsp_cmx_hardware(dsp->conf, dsp);
+ dsp_rx_off(dsp);
+ if (dsp_debug & DEBUG_DSP_CMX)
+ dsp_cmx_debug(dsp);
+ break;
+ case DSP_TXDATA_ON: /* enable txdata */
+ dsp->tx_data = 1;
+ if (dsp_debug & DEBUG_DSP_CORE)
+ printk(KERN_DEBUG "%s: enable tx-data\n", __func__);
+ dsp_cmx_hardware(dsp->conf, dsp);
+ dsp_rx_off(dsp);
+ if (dsp_debug & DEBUG_DSP_CMX)
+ dsp_cmx_debug(dsp);
+ break;
+ case DSP_TXDATA_OFF: /* disable txdata */
+ dsp->tx_data = 0;
+ if (dsp_debug & DEBUG_DSP_CORE)
+ printk(KERN_DEBUG "%s: disable tx-data\n", __func__);
+ dsp_cmx_hardware(dsp->conf, dsp);
+ dsp_rx_off(dsp);
+ if (dsp_debug & DEBUG_DSP_CMX)
+ dsp_cmx_debug(dsp);
+ break;
+ case DSP_DELAY: /* use delay algorithm instead of dynamic
+ jitter algorithm */
+ if (dsp->hdlc) {
+ ret = -EINVAL;
+ break;
+ }
+ if (len < sizeof(int)) {
+ ret = -EINVAL;
+ break;
+ }
+ dsp->cmx_delay = (*((int *)data)) << 3;
+ /* miliseconds to samples */
+ if (dsp->cmx_delay >= (CMX_BUFF_HALF>>1))
+ /* clip to half of maximum usable buffer
+ (half of half buffer) */
+ dsp->cmx_delay = (CMX_BUFF_HALF>>1) - 1;
+ if (dsp_debug & DEBUG_DSP_CORE)
+ printk(KERN_DEBUG "%s: use delay algorithm to "
+ "compensate jitter (%d samples)\n",
+ __func__, dsp->cmx_delay);
+ break;
+ case DSP_JITTER: /* use dynamic jitter algorithm instead of
+ delay algorithm */
+ if (dsp->hdlc) {
+ ret = -EINVAL;
+ break;
+ }
+ dsp->cmx_delay = 0;
+ if (dsp_debug & DEBUG_DSP_CORE)
+ printk(KERN_DEBUG "%s: use jitter algorithm to "
+ "compensate jitter\n", __func__);
+ break;
+ case DSP_TX_DEJITTER: /* use dynamic jitter algorithm for tx-buffer */
+ if (dsp->hdlc) {
+ ret = -EINVAL;
+ break;
+ }
+ dsp->tx_dejitter = 1;
+ if (dsp_debug & DEBUG_DSP_CORE)
+ printk(KERN_DEBUG "%s: use dejitter on TX "
+ "buffer\n", __func__);
+ break;
+ case DSP_TX_DEJ_OFF: /* use tx-buffer without dejittering*/
+ if (dsp->hdlc) {
+ ret = -EINVAL;
+ break;
+ }
+ dsp->tx_dejitter = 0;
+ if (dsp_debug & DEBUG_DSP_CORE)
+ printk(KERN_DEBUG "%s: use TX buffer without "
+ "dejittering\n", __func__);
+ break;
+ case DSP_PIPELINE_CFG:
+ if (dsp->hdlc) {
+ ret = -EINVAL;
+ break;
+ }
+ if (len > 0 && ((char *)data)[len - 1]) {
+ printk(KERN_DEBUG "%s: pipeline config string "
+ "is not NULL terminated!\n", __func__);
+ ret = -EINVAL;
+ } else {
+ dsp->pipeline.inuse = 1;
+ dsp_cmx_hardware(dsp->conf, dsp);
+ ret = dsp_pipeline_build(&dsp->pipeline,
+ len > 0 ? (char *)data : NULL);
+ dsp_cmx_hardware(dsp->conf, dsp);
+ dsp_rx_off(dsp);
+ }
+ break;
+ case DSP_BF_ENABLE_KEY: /* turn blowfish on */
+ if (dsp->hdlc) {
+ ret = -EINVAL;
+ break;
+ }
+ if (len < 4 || len > 56) {
+ ret = -EINVAL;
+ break;
+ }
+ if (dsp_debug & DEBUG_DSP_CORE)
+ printk(KERN_DEBUG "%s: turn blowfish on (key "
+ "not shown)\n", __func__);
+ ret = dsp_bf_init(dsp, (u8 *)data, len);
+ /* set new cont */
+ if (!ret)
+ cont = DSP_BF_ACCEPT;
+ else
+ cont = DSP_BF_REJECT;
+ /* send indication if it worked to set it */
+ nskb = _alloc_mISDN_skb(PH_CONTROL_IND, MISDN_ID_ANY,
+ sizeof(int), &cont, GFP_ATOMIC);
+ if (nskb) {
+ if (dsp->up) {
+ if (dsp->up->send(dsp->up, nskb))
+ dev_kfree_skb(nskb);
+ } else
+ dev_kfree_skb(nskb);
+ }
+ if (!ret) {
+ dsp_cmx_hardware(dsp->conf, dsp);
+ dsp_dtmf_hardware(dsp);
+ dsp_rx_off(dsp);
+ }
+ break;
+ case DSP_BF_DISABLE: /* turn blowfish off */
+ if (dsp->hdlc) {
+ ret = -EINVAL;
+ break;
+ }
+ if (dsp_debug & DEBUG_DSP_CORE)
+ printk(KERN_DEBUG "%s: turn blowfish off\n", __func__);
+ dsp_bf_cleanup(dsp);
+ dsp_cmx_hardware(dsp->conf, dsp);
+ dsp_dtmf_hardware(dsp);
+ dsp_rx_off(dsp);
+ break;
+ default:
+ if (dsp_debug & DEBUG_DSP_CORE)
+ printk(KERN_DEBUG "%s: ctrl req %x unhandled\n",
+ __func__, cont);
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+static void
+get_features(struct mISDNchannel *ch)
+{
+ struct dsp *dsp = container_of(ch, struct dsp, ch);
+ struct mISDN_ctrl_req cq;
+
+ if (dsp_options & DSP_OPT_NOHARDWARE)
+ return;
+ if (!ch->peer) {
+ if (dsp_debug & DEBUG_DSP_CORE)
+ printk(KERN_DEBUG "%s: no peer, no features\n",
+ __func__);
+ return;
+ }
+ memset(&cq, 0, sizeof(cq));
+ cq.op = MISDN_CTRL_GETOP;
+ if (ch->peer->ctrl(ch->peer, CONTROL_CHANNEL, &cq) < 0) {
+ printk(KERN_DEBUG "%s: CONTROL_CHANNEL failed\n",
+ __func__);
+ return;
+ }
+ if (cq.op & MISDN_CTRL_RX_OFF)
+ dsp->features_rx_off = 1;
+ if ((cq.op & MISDN_CTRL_HW_FEATURES_OP)) {
+ cq.op = MISDN_CTRL_HW_FEATURES;
+ *((u_long *)&cq.p1) = (u_long)&dsp->features;
+ if (ch->peer->ctrl(ch->peer, CONTROL_CHANNEL, &cq)) {
+ printk(KERN_DEBUG "%s: 2nd CONTROL_CHANNEL failed\n",
+ __func__);
+ }
+ } else
+ if (dsp_debug & DEBUG_DSP_CORE)
+ printk(KERN_DEBUG "%s: features not supported for %s\n",
+ __func__, dsp->name);
+}
+
+static int
+dsp_function(struct mISDNchannel *ch, struct sk_buff *skb)
+{
+ struct dsp *dsp = container_of(ch, struct dsp, ch);
+ struct mISDNhead *hh;
+ int ret = 0;
+ u8 *digits;
+ int cont;
+ struct sk_buff *nskb;
+ u_long flags;
+
+ hh = mISDN_HEAD_P(skb);
+ switch (hh->prim) {
+ /* FROM DOWN */
+ case (PH_DATA_CNF):
+ dsp->data_pending = 0;
+ /* trigger next hdlc frame, if any */
+ if (dsp->hdlc) {
+ spin_lock_irqsave(&dsp_lock, flags);
+ if (dsp->b_active)
+ schedule_work(&dsp->workq);
+ spin_unlock_irqrestore(&dsp_lock, flags);
+ }
+ break;
+ case (PH_DATA_IND):
+ case (DL_DATA_IND):
+ if (skb->len < 1) {
+ ret = -EINVAL;
+ break;
+ }
+ if (dsp->rx_is_off) {
+ if (dsp_debug & DEBUG_DSP_CORE)
+ printk(KERN_DEBUG "%s: rx-data during rx_off"
+ " for %s\n",
+ __func__, dsp->name);
+ }
+ if (dsp->hdlc) {
+ /* hdlc */
+ spin_lock_irqsave(&dsp_lock, flags);
+ dsp_cmx_hdlc(dsp, skb);
+ spin_unlock_irqrestore(&dsp_lock, flags);
+ if (dsp->rx_disabled) {
+ /* if receive is not allowed */
+ break;
+ }
+ hh->prim = DL_DATA_IND;
+ if (dsp->up)
+ return dsp->up->send(dsp->up, skb);
+ break;
+ }
+
+ /* decrypt if enabled */
+ if (dsp->bf_enable)
+ dsp_bf_decrypt(dsp, skb->data, skb->len);
+ /* pipeline */
+ if (dsp->pipeline.inuse)
+ dsp_pipeline_process_rx(&dsp->pipeline, skb->data,
+ skb->len);
+ /* change volume if requested */
+ if (dsp->rx_volume)
+ dsp_change_volume(skb, dsp->rx_volume);
+
+ /* check if dtmf soft decoding is turned on */
+ if (dsp->dtmf.software) {
+ digits = dsp_dtmf_goertzel_decode(dsp, skb->data,
+ skb->len, (dsp_options&DSP_OPT_ULAW)?1:0);
+ while (*digits) {
+ if (dsp_debug & DEBUG_DSP_DTMF)
+ printk(KERN_DEBUG "%s: digit"
+ "(%c) to layer %s\n",
+ __func__, *digits, dsp->name);
+ cont = DTMF_TONE_VAL | *digits;
+ nskb = _alloc_mISDN_skb(PH_CONTROL_IND,
+ MISDN_ID_ANY, sizeof(int), &cont,
+ GFP_ATOMIC);
+ if (nskb) {
+ if (dsp->up) {
+ if (dsp->up->send(
+ dsp->up, nskb))
+ dev_kfree_skb(nskb);
+ } else
+ dev_kfree_skb(nskb);
+ }
+ digits++;
+ }
+ }
+ /* we need to process receive data if software */
+ spin_lock_irqsave(&dsp_lock, flags);
+ if (dsp->pcm_slot_tx < 0 && dsp->pcm_slot_rx < 0) {
+ /* process data from card at cmx */
+ dsp_cmx_receive(dsp, skb);
+ }
+ spin_unlock_irqrestore(&dsp_lock, flags);
+
+ if (dsp->rx_disabled) {
+ /* if receive is not allowed */
+ break;
+ }
+ hh->prim = DL_DATA_IND;
+ if (dsp->up)
+ return dsp->up->send(dsp->up, skb);
+ break;
+ case (PH_CONTROL_IND):
+ if (dsp_debug & DEBUG_DSP_DTMFCOEFF)
+ printk(KERN_DEBUG "%s: PH_CONTROL INDICATION "
+ "received: %x (len %d) %s\n", __func__,
+ hh->id, skb->len, dsp->name);
+ switch (hh->id) {
+ case (DTMF_HFC_COEF): /* getting coefficients */
+ if (!dsp->dtmf.hardware) {
+ if (dsp_debug & DEBUG_DSP_DTMFCOEFF)
+ printk(KERN_DEBUG "%s: ignoring DTMF "
+ "coefficients from HFC\n",
+ __func__);
+ break;
+ }
+ digits = dsp_dtmf_goertzel_decode(dsp, skb->data,
+ skb->len, 2);
+ while (*digits) {
+ int k;
+ struct sk_buff *nskb;
+ if (dsp_debug & DEBUG_DSP_DTMF)
+ printk(KERN_DEBUG "%s: digit"
+ "(%c) to layer %s\n",
+ __func__, *digits, dsp->name);
+ k = *digits | DTMF_TONE_VAL;
+ nskb = _alloc_mISDN_skb(PH_CONTROL_IND,
+ MISDN_ID_ANY, sizeof(int), &k,
+ GFP_ATOMIC);
+ if (nskb) {
+ if (dsp->up) {
+ if (dsp->up->send(
+ dsp->up, nskb))
+ dev_kfree_skb(nskb);
+ } else
+ dev_kfree_skb(nskb);
+ }
+ digits++;
+ }
+ break;
+ case (HFC_VOL_CHANGE_TX): /* change volume */
+ if (skb->len != sizeof(int)) {
+ ret = -EINVAL;
+ break;
+ }
+ spin_lock_irqsave(&dsp_lock, flags);
+ dsp->tx_volume = *((int *)skb->data);
+ if (dsp_debug & DEBUG_DSP_CORE)
+ printk(KERN_DEBUG "%s: change tx volume to "
+ "%d\n", __func__, dsp->tx_volume);
+ dsp_cmx_hardware(dsp->conf, dsp);
+ dsp_dtmf_hardware(dsp);
+ dsp_rx_off(dsp);
+ spin_unlock_irqrestore(&dsp_lock, flags);
+ break;
+ default:
+ if (dsp_debug & DEBUG_DSP_CORE)
+ printk(KERN_DEBUG "%s: ctrl ind %x unhandled "
+ "%s\n", __func__, hh->id, dsp->name);
+ ret = -EINVAL;
+ }
+ break;
+ case (PH_ACTIVATE_IND):
+ case (PH_ACTIVATE_CNF):
+ if (dsp_debug & DEBUG_DSP_CORE)
+ printk(KERN_DEBUG "%s: b_channel is now active %s\n",
+ __func__, dsp->name);
+ /* bchannel now active */
+ spin_lock_irqsave(&dsp_lock, flags);
+ dsp->b_active = 1;
+ dsp->data_pending = 0;
+ dsp->rx_init = 1;
+ /* rx_W and rx_R will be adjusted on first frame */
+ dsp->rx_W = 0;
+ dsp->rx_R = 0;
+ memset(dsp->rx_buff, 0, sizeof(dsp->rx_buff));
+ dsp_cmx_hardware(dsp->conf, dsp);
+ dsp_dtmf_hardware(dsp);
+ dsp_rx_off(dsp);
+ spin_unlock_irqrestore(&dsp_lock, flags);
+ if (dsp_debug & DEBUG_DSP_CORE)
+ printk(KERN_DEBUG "%s: done with activation, sending "
+ "confirm to user space. %s\n", __func__,
+ dsp->name);
+ /* send activation to upper layer */
+ hh->prim = DL_ESTABLISH_CNF;
+ if (dsp->up)
+ return dsp->up->send(dsp->up, skb);
+ break;
+ case (PH_DEACTIVATE_IND):
+ case (PH_DEACTIVATE_CNF):
+ if (dsp_debug & DEBUG_DSP_CORE)
+ printk(KERN_DEBUG "%s: b_channel is now inactive %s\n",
+ __func__, dsp->name);
+ /* bchannel now inactive */
+ spin_lock_irqsave(&dsp_lock, flags);
+ dsp->b_active = 0;
+ dsp->data_pending = 0;
+ dsp_cmx_hardware(dsp->conf, dsp);
+ dsp_rx_off(dsp);
+ spin_unlock_irqrestore(&dsp_lock, flags);
+ hh->prim = DL_RELEASE_CNF;
+ if (dsp->up)
+ return dsp->up->send(dsp->up, skb);
+ break;
+ /* FROM UP */
+ case (DL_DATA_REQ):
+ case (PH_DATA_REQ):
+ if (skb->len < 1) {
+ ret = -EINVAL;
+ break;
+ }
+ if (dsp->hdlc) {
+ /* hdlc */
+ spin_lock_irqsave(&dsp_lock, flags);
+ if (dsp->b_active) {
+ skb_queue_tail(&dsp->sendq, skb);
+ schedule_work(&dsp->workq);
+ }
+ spin_unlock_irqrestore(&dsp_lock, flags);
+ return 0;
+ }
+ /* send data to tx-buffer (if no tone is played) */
+ if (!dsp->tone.tone) {
+ spin_lock_irqsave(&dsp_lock, flags);
+ dsp_cmx_transmit(dsp, skb);
+ spin_unlock_irqrestore(&dsp_lock, flags);
+ }
+ break;
+ case (PH_CONTROL_REQ):
+ spin_lock_irqsave(&dsp_lock, flags);
+ ret = dsp_control_req(dsp, hh, skb);
+ spin_unlock_irqrestore(&dsp_lock, flags);
+ break;
+ case (DL_ESTABLISH_REQ):
+ case (PH_ACTIVATE_REQ):
+ if (dsp_debug & DEBUG_DSP_CORE)
+ printk(KERN_DEBUG "%s: activating b_channel %s\n",
+ __func__, dsp->name);
+ if (dsp->dtmf.hardware || dsp->dtmf.software)
+ dsp_dtmf_goertzel_init(dsp);
+ get_features(ch);
+ /* send ph_activate */
+ hh->prim = PH_ACTIVATE_REQ;
+ if (ch->peer)
+ return ch->recv(ch->peer, skb);
+ break;
+ case (DL_RELEASE_REQ):
+ case (PH_DEACTIVATE_REQ):
+ if (dsp_debug & DEBUG_DSP_CORE)
+ printk(KERN_DEBUG "%s: releasing b_channel %s\n",
+ __func__, dsp->name);
+ spin_lock_irqsave(&dsp_lock, flags);
+ dsp->tone.tone = 0;
+ dsp->tone.hardware = 0;
+ dsp->tone.software = 0;
+ if (timer_pending(&dsp->tone.tl))
+ del_timer(&dsp->tone.tl);
+ if (dsp->conf)
+ dsp_cmx_conf(dsp, 0); /* dsp_cmx_hardware will also be
+ called here */
+ skb_queue_purge(&dsp->sendq);
+ spin_unlock_irqrestore(&dsp_lock, flags);
+ hh->prim = PH_DEACTIVATE_REQ;
+ if (ch->peer)
+ return ch->recv(ch->peer, skb);
+ break;
+ default:
+ if (dsp_debug & DEBUG_DSP_CORE)
+ printk(KERN_DEBUG "%s: msg %x unhandled %s\n",
+ __func__, hh->prim, dsp->name);
+ ret = -EINVAL;
+ }
+ if (!ret)
+ dev_kfree_skb(skb);
+ return ret;
+}
+
+static int
+dsp_ctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
+{
+ struct dsp *dsp = container_of(ch, struct dsp, ch);
+ u_long flags;
+ int err = 0;
+
+ if (debug & DEBUG_DSP_CTRL)
+ printk(KERN_DEBUG "%s:(%x)\n", __func__, cmd);
+
+ switch (cmd) {
+ case OPEN_CHANNEL:
+ break;
+ case CLOSE_CHANNEL:
+ if (dsp->ch.peer)
+ dsp->ch.peer->ctrl(dsp->ch.peer, CLOSE_CHANNEL, NULL);
+
+ /* wait until workqueue has finished,
+ * must lock here, or we may hit send-process currently
+ * queueing. */
+ spin_lock_irqsave(&dsp_lock, flags);
+ dsp->b_active = 0;
+ spin_unlock_irqrestore(&dsp_lock, flags);
+ /* MUST not be locked, because it waits until queue is done. */
+ cancel_work_sync(&dsp->workq);
+ spin_lock_irqsave(&dsp_lock, flags);
+ if (timer_pending(&dsp->tone.tl))
+ del_timer(&dsp->tone.tl);
+ skb_queue_purge(&dsp->sendq);
+ if (dsp_debug & DEBUG_DSP_CTRL)
+ printk(KERN_DEBUG "%s: releasing member %s\n",
+ __func__, dsp->name);
+ dsp->b_active = 0;
+ dsp_cmx_conf(dsp, 0); /* dsp_cmx_hardware will also be called
+ here */
+ dsp_pipeline_destroy(&dsp->pipeline);
+
+ if (dsp_debug & DEBUG_DSP_CTRL)
+ printk(KERN_DEBUG "%s: remove & destroy object %s\n",
+ __func__, dsp->name);
+ list_del(&dsp->list);
+ spin_unlock_irqrestore(&dsp_lock, flags);
+
+ if (dsp_debug & DEBUG_DSP_CTRL)
+ printk(KERN_DEBUG "%s: dsp instance released\n",
+ __func__);
+ vfree(dsp);
+ module_put(THIS_MODULE);
+ break;
+ }
+ return err;
+}
+
+static void
+dsp_send_bh(struct work_struct *work)
+{
+ struct dsp *dsp = container_of(work, struct dsp, workq);
+ struct sk_buff *skb;
+ struct mISDNhead *hh;
+
+ if (dsp->hdlc && dsp->data_pending)
+ return; /* wait until data has been acknowledged */
+
+ /* send queued data */
+ while ((skb = skb_dequeue(&dsp->sendq))) {
+ /* in locked date, we must have still data in queue */
+ if (dsp->data_pending) {
+ if (dsp_debug & DEBUG_DSP_CORE)
+ printk(KERN_DEBUG "%s: fifo full %s, this is "
+ "no bug!\n", __func__, dsp->name);
+ /* flush transparent data, if not acked */
+ dev_kfree_skb(skb);
+ continue;
+ }
+ hh = mISDN_HEAD_P(skb);
+ if (hh->prim == DL_DATA_REQ) {
+ /* send packet up */
+ if (dsp->up) {
+ if (dsp->up->send(dsp->up, skb))
+ dev_kfree_skb(skb);
+ } else
+ dev_kfree_skb(skb);
+ } else {
+ /* send packet down */
+ if (dsp->ch.peer) {
+ dsp->data_pending = 1;
+ if (dsp->ch.recv(dsp->ch.peer, skb)) {
+ dev_kfree_skb(skb);
+ dsp->data_pending = 0;
+ }
+ } else
+ dev_kfree_skb(skb);
+ }
+ }
+}
+
+static int
+dspcreate(struct channel_req *crq)
+{
+ struct dsp *ndsp;
+ u_long flags;
+
+ if (crq->protocol != ISDN_P_B_L2DSP
+ && crq->protocol != ISDN_P_B_L2DSPHDLC)
+ return -EPROTONOSUPPORT;
+ ndsp = vmalloc(sizeof(struct dsp));
+ if (!ndsp) {
+ printk(KERN_ERR "%s: vmalloc struct dsp failed\n", __func__);
+ return -ENOMEM;
+ }
+ memset(ndsp, 0, sizeof(struct dsp));
+ if (dsp_debug & DEBUG_DSP_CTRL)
+ printk(KERN_DEBUG "%s: creating new dsp instance\n", __func__);
+
+ /* default enabled */
+ INIT_WORK(&ndsp->workq, (void *)dsp_send_bh);
+ skb_queue_head_init(&ndsp->sendq);
+ ndsp->ch.send = dsp_function;
+ ndsp->ch.ctrl = dsp_ctrl;
+ ndsp->up = crq->ch;
+ crq->ch = &ndsp->ch;
+ if (crq->protocol == ISDN_P_B_L2DSP) {
+ crq->protocol = ISDN_P_B_RAW;
+ ndsp->hdlc = 0;
+ } else {
+ crq->protocol = ISDN_P_B_HDLC;
+ ndsp->hdlc = 1;
+ }
+ if (!try_module_get(THIS_MODULE))
+ printk(KERN_WARNING "%s:cannot get module\n",
+ __func__);
+
+ sprintf(ndsp->name, "DSP_C%x(0x%p)",
+ ndsp->up->st->dev->id + 1, ndsp);
+ /* set frame size to start */
+ ndsp->features.hfc_id = -1; /* current PCM id */
+ ndsp->features.pcm_id = -1; /* current PCM id */
+ ndsp->pcm_slot_rx = -1; /* current CPM slot */
+ ndsp->pcm_slot_tx = -1;
+ ndsp->pcm_bank_rx = -1;
+ ndsp->pcm_bank_tx = -1;
+ ndsp->hfc_conf = -1; /* current conference number */
+ /* set tone timer */
+ ndsp->tone.tl.function = (void *)dsp_tone_timeout;
+ ndsp->tone.tl.data = (long) ndsp;
+ init_timer(&ndsp->tone.tl);
+
+ if (dtmfthreshold < 20 || dtmfthreshold > 500)
+ dtmfthreshold = 200;
+ ndsp->dtmf.treshold = dtmfthreshold*10000;
+
+ /* init pipeline append to list */
+ spin_lock_irqsave(&dsp_lock, flags);
+ dsp_pipeline_init(&ndsp->pipeline);
+ list_add_tail(&ndsp->list, &dsp_ilist);
+ spin_unlock_irqrestore(&dsp_lock, flags);
+
+ return 0;
+}
+
+
+static struct Bprotocol DSP = {
+ .Bprotocols = (1 << (ISDN_P_B_L2DSP & ISDN_P_B_MASK))
+ | (1 << (ISDN_P_B_L2DSPHDLC & ISDN_P_B_MASK)),
+ .name = "dsp",
+ .create = dspcreate
+};
+
+static int dsp_init(void)
+{
+ int err;
+ int tics;
+
+ printk(KERN_INFO "DSP modul %s\n", mISDN_dsp_revision);
+
+ dsp_options = options;
+ dsp_debug = debug;
+
+ /* set packet size */
+ dsp_poll = poll;
+ if (dsp_poll) {
+ if (dsp_poll > MAX_POLL) {
+ printk(KERN_ERR "%s: Wrong poll value (%d), use %d "
+ "maximum.\n", __func__, poll, MAX_POLL);
+ err = -EINVAL;
+ return err;
+ }
+ if (dsp_poll < 8) {
+ printk(KERN_ERR "%s: Wrong poll value (%d), use 8 "
+ "minimum.\n", __func__, dsp_poll);
+ err = -EINVAL;
+ return err;
+ }
+ dsp_tics = poll * HZ / 8000;
+ if (dsp_tics * 8000 != poll * HZ) {
+ printk(KERN_INFO "mISDN_dsp: Cannot clock every %d "
+ "samples (0,125 ms). It is not a multiple of "
+ "%d HZ.\n", poll, HZ);
+ err = -EINVAL;
+ return err;
+ }
+ } else {
+ poll = 8;
+ while (poll <= MAX_POLL) {
+ tics = poll * HZ / 8000;
+ if (tics * 8000 == poll * HZ) {
+ dsp_tics = tics;
+ dsp_poll = poll;
+ if (poll >= 64)
+ break;
+ }
+ poll++;
+ }
+ }
+ if (dsp_poll == 0) {
+ printk(KERN_INFO "mISDN_dsp: There is no multiple of kernel "
+ "clock that equals exactly the duration of 8-256 "
+ "samples. (Choose kernel clock speed like 100, 250, "
+ "300, 1000)\n");
+ err = -EINVAL;
+ return err;
+ }
+ printk(KERN_INFO "mISDN_dsp: DSP clocks every %d samples. This equals "
+ "%d jiffies.\n", dsp_poll, dsp_tics);
+
+ spin_lock_init(&dsp_lock);
+ INIT_LIST_HEAD(&dsp_ilist);
+ INIT_LIST_HEAD(&conf_ilist);
+
+ /* init conversion tables */
+ dsp_audio_generate_law_tables();
+ dsp_silence = (dsp_options&DSP_OPT_ULAW)?0xff:0x2a;
+ dsp_audio_law_to_s32 = (dsp_options&DSP_OPT_ULAW)?dsp_audio_ulaw_to_s32:
+ dsp_audio_alaw_to_s32;
+ dsp_audio_generate_s2law_table();
+ dsp_audio_generate_seven();
+ dsp_audio_generate_mix_table();
+ if (dsp_options & DSP_OPT_ULAW)
+ dsp_audio_generate_ulaw_samples();
+ dsp_audio_generate_volume_changes();
+
+ err = dsp_pipeline_module_init();
+ if (err) {
+ printk(KERN_ERR "mISDN_dsp: Can't initialize pipeline, "
+ "error(%d)\n", err);
+ return err;
+ }
+
+ err = mISDN_register_Bprotocol(&DSP);
+ if (err) {
+ printk(KERN_ERR "Can't register %s error(%d)\n", DSP.name, err);
+ return err;
+ }
+
+ /* set sample timer */
+ dsp_spl_tl.function = (void *)dsp_cmx_send;
+ dsp_spl_tl.data = 0;
+ init_timer(&dsp_spl_tl);
+ dsp_spl_tl.expires = jiffies + dsp_tics;
+ dsp_spl_jiffies = dsp_spl_tl.expires;
+ add_timer(&dsp_spl_tl);
+
+ return 0;
+}
+
+
+static void dsp_cleanup(void)
+{
+ mISDN_unregister_Bprotocol(&DSP);
+
+ if (timer_pending(&dsp_spl_tl))
+ del_timer(&dsp_spl_tl);
+
+ if (!list_empty(&dsp_ilist)) {
+ printk(KERN_ERR "mISDN_dsp: Audio DSP object inst list not "
+ "empty.\n");
+ }
+ if (!list_empty(&conf_ilist)) {
+ printk(KERN_ERR "mISDN_dsp: Conference list not empty. Not "
+ "all memory freed.\n");
+ }
+
+ dsp_pipeline_module_exit();
+}
+
+module_init(dsp_init);
+module_exit(dsp_cleanup);
+
diff --git a/drivers/isdn/mISDN/dsp_dtmf.c b/drivers/isdn/mISDN/dsp_dtmf.c
new file mode 100644
index 000000000000..efc371c1f0dc
--- /dev/null
+++ b/drivers/isdn/mISDN/dsp_dtmf.c
@@ -0,0 +1,303 @@
+/*
+ * DTMF decoder.
+ *
+ * Copyright by Andreas Eversberg (jolly@eversberg.eu)
+ * based on different decoders such as ISDN4Linux
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ */
+
+#include <linux/mISDNif.h>
+#include <linux/mISDNdsp.h>
+#include "core.h"
+#include "dsp.h"
+
+#define NCOEFF 8 /* number of frequencies to be analyzed */
+
+/* For DTMF recognition:
+ * 2 * cos(2 * PI * k / N) precalculated for all k
+ */
+static u64 cos2pik[NCOEFF] =
+{
+ /* k << 15 (source: hfc-4s/8s documentation (www.colognechip.de)) */
+ 55960, 53912, 51402, 48438, 38146, 32650, 26170, 18630
+};
+
+/* digit matrix */
+static char dtmf_matrix[4][4] =
+{
+ {'1', '2', '3', 'A'},
+ {'4', '5', '6', 'B'},
+ {'7', '8', '9', 'C'},
+ {'*', '0', '#', 'D'}
+};
+
+/* dtmf detection using goertzel algorithm
+ * init function
+ */
+void dsp_dtmf_goertzel_init(struct dsp *dsp)
+{
+ dsp->dtmf.size = 0;
+ dsp->dtmf.lastwhat = '\0';
+ dsp->dtmf.lastdigit = '\0';
+ dsp->dtmf.count = 0;
+}
+
+/* check for hardware or software features
+ */
+void dsp_dtmf_hardware(struct dsp *dsp)
+{
+ int hardware = 1;
+
+ if (!dsp->features.hfc_dtmf)
+ hardware = 0;
+
+ /* check for volume change */
+ if (dsp->tx_volume) {
+ if (dsp_debug & DEBUG_DSP_DTMF)
+ printk(KERN_DEBUG "%s dsp %s cannot do hardware DTMF, "
+ "because tx_volume is changed\n",
+ __func__, dsp->name);
+ hardware = 0;
+ }
+ if (dsp->rx_volume) {
+ if (dsp_debug & DEBUG_DSP_DTMF)
+ printk(KERN_DEBUG "%s dsp %s cannot do hardware DTMF, "
+ "because rx_volume is changed\n",
+ __func__, dsp->name);
+ hardware = 0;
+ }
+ /* check if encryption is enabled */
+ if (dsp->bf_enable) {
+ if (dsp_debug & DEBUG_DSP_DTMF)
+ printk(KERN_DEBUG "%s dsp %s cannot do hardware DTMF, "
+ "because encryption is enabled\n",
+ __func__, dsp->name);
+ hardware = 0;
+ }
+ /* check if pipeline exists */
+ if (dsp->pipeline.inuse) {
+ if (dsp_debug & DEBUG_DSP_DTMF)
+ printk(KERN_DEBUG "%s dsp %s cannot do hardware DTMF, "
+ "because pipeline exists.\n",
+ __func__, dsp->name);
+ hardware = 0;
+ }
+
+ dsp->dtmf.hardware = hardware;
+ dsp->dtmf.software = !hardware;
+}
+
+
+/*************************************************************
+ * calculate the coefficients of the given sample and decode *
+ *************************************************************/
+
+/* the given sample is decoded. if the sample is not long enough for a
+ * complete frame, the decoding is finished and continued with the next
+ * call of this function.
+ *
+ * the algorithm is very good for detection with a minimum of errors. i
+ * tested it allot. it even works with very short tones (40ms). the only
+ * disadvantage is, that it doesn't work good with different volumes of both
+ * tones. this will happen, if accoustically coupled dialers are used.
+ * it sometimes detects tones during speach, which is normal for decoders.
+ * use sequences to given commands during calls.
+ *
+ * dtmf - points to a structure of the current dtmf state
+ * spl and len - the sample
+ * fmt - 0 = alaw, 1 = ulaw, 2 = coefficients from HFC DTMF hw-decoder
+ */
+
+u8
+*dsp_dtmf_goertzel_decode(struct dsp *dsp, u8 *data, int len, int fmt)
+{
+ u8 what;
+ int size;
+ signed short *buf;
+ s32 sk, sk1, sk2;
+ int k, n, i;
+ s32 *hfccoeff;
+ s32 result[NCOEFF], tresh, treshl;
+ int lowgroup, highgroup;
+ s64 cos2pik_;
+
+ dsp->dtmf.digits[0] = '\0';
+
+ /* Note: The function will loop until the buffer has not enough samples
+ * left to decode a full frame.
+ */
+again:
+ /* convert samples */
+ size = dsp->dtmf.size;
+ buf = dsp->dtmf.buffer;
+ switch (fmt) {
+ case 0: /* alaw */
+ case 1: /* ulaw */
+ while (size < DSP_DTMF_NPOINTS && len) {
+ buf[size++] = dsp_audio_law_to_s32[*data++];
+ len--;
+ }
+ break;
+
+ case 2: /* HFC coefficients */
+ default:
+ if (len < 64) {
+ if (len > 0)
+ printk(KERN_ERR "%s: coefficients have invalid "
+ "size. (is=%d < must=%d)\n",
+ __func__, len, 64);
+ return dsp->dtmf.digits;
+ }
+ hfccoeff = (s32 *)data;
+ for (k = 0; k < NCOEFF; k++) {
+ sk2 = (*hfccoeff++)>>4;
+ sk = (*hfccoeff++)>>4;
+ if (sk > 32767 || sk < -32767 || sk2 > 32767
+ || sk2 < -32767)
+ printk(KERN_WARNING
+ "DTMF-Detection overflow\n");
+ /* compute |X(k)|**2 */
+ result[k] =
+ (sk * sk) -
+ (((cos2pik[k] * sk) >> 15) * sk2) +
+ (sk2 * sk2);
+ }
+ data += 64;
+ len -= 64;
+ goto coefficients;
+ break;
+ }
+ dsp->dtmf.size = size;
+
+ if (size < DSP_DTMF_NPOINTS)
+ return dsp->dtmf.digits;
+
+ dsp->dtmf.size = 0;
+
+ /* now we have a full buffer of signed long samples - we do goertzel */
+ for (k = 0; k < NCOEFF; k++) {
+ sk = 0;
+ sk1 = 0;
+ sk2 = 0;
+ buf = dsp->dtmf.buffer;
+ cos2pik_ = cos2pik[k];
+ for (n = 0; n < DSP_DTMF_NPOINTS; n++) {
+ sk = ((cos2pik_*sk1)>>15) - sk2 + (*buf++);
+ sk2 = sk1;
+ sk1 = sk;
+ }
+ sk >>= 8;
+ sk2 >>= 8;
+ if (sk > 32767 || sk < -32767 || sk2 > 32767 || sk2 < -32767)
+ printk(KERN_WARNING "DTMF-Detection overflow\n");
+ /* compute |X(k)|**2 */
+ result[k] =
+ (sk * sk) -
+ (((cos2pik[k] * sk) >> 15) * sk2) +
+ (sk2 * sk2);
+ }
+
+ /* our (squared) coefficients have been calculated, we need to process
+ * them.
+ */
+coefficients:
+ tresh = 0;
+ for (i = 0; i < NCOEFF; i++) {
+ if (result[i] < 0)
+ result[i] = 0;
+ if (result[i] > dsp->dtmf.treshold) {
+ if (result[i] > tresh)
+ tresh = result[i];
+ }
+ }
+
+ if (tresh == 0) {
+ what = 0;
+ goto storedigit;
+ }
+
+ if (dsp_debug & DEBUG_DSP_DTMFCOEFF)
+ printk(KERN_DEBUG "a %3d %3d %3d %3d %3d %3d %3d %3d"
+ " tr:%3d r %3d %3d %3d %3d %3d %3d %3d %3d\n",
+ result[0]/10000, result[1]/10000, result[2]/10000,
+ result[3]/10000, result[4]/10000, result[5]/10000,
+ result[6]/10000, result[7]/10000, tresh/10000,
+ result[0]/(tresh/100), result[1]/(tresh/100),
+ result[2]/(tresh/100), result[3]/(tresh/100),
+ result[4]/(tresh/100), result[5]/(tresh/100),
+ result[6]/(tresh/100), result[7]/(tresh/100));
+
+ /* calc digit (lowgroup/highgroup) */
+ lowgroup = -1;
+ highgroup = -1;
+ treshl = tresh >> 3; /* tones which are not on, must be below 9 dB */
+ tresh = tresh >> 2; /* touchtones must match within 6 dB */
+ for (i = 0; i < NCOEFF; i++) {
+ if (result[i] < treshl)
+ continue; /* ignore */
+ if (result[i] < tresh) {
+ lowgroup = -1;
+ highgroup = -1;
+ break; /* noise inbetween */
+ }
+ /* good level found. This is allowed only one time per group */
+ if (i < NCOEFF/2) {
+ /* lowgroup */
+ if (lowgroup >= 0) {
+ /* Bad. Another tone found. */
+ lowgroup = -1;
+ break;
+ } else
+ lowgroup = i;
+ } else {
+ /* higroup */
+ if (highgroup >= 0) {
+ /* Bad. Another tone found. */
+ highgroup = -1;
+ break;
+ } else
+ highgroup = i-(NCOEFF/2);
+ }
+ }
+
+ /* get digit or null */
+ what = 0;
+ if (lowgroup >= 0 && highgroup >= 0)
+ what = dtmf_matrix[lowgroup][highgroup];
+
+storedigit:
+ if (what && (dsp_debug & DEBUG_DSP_DTMF))
+ printk(KERN_DEBUG "DTMF what: %c\n", what);
+
+ if (dsp->dtmf.lastwhat != what)
+ dsp->dtmf.count = 0;
+
+ /* the tone (or no tone) must remain 3 times without change */
+ if (dsp->dtmf.count == 2) {
+ if (dsp->dtmf.lastdigit != what) {
+ dsp->dtmf.lastdigit = what;
+ if (what) {
+ if (dsp_debug & DEBUG_DSP_DTMF)
+ printk(KERN_DEBUG "DTMF digit: %c\n",
+ what);
+ if ((strlen(dsp->dtmf.digits)+1)
+ < sizeof(dsp->dtmf.digits)) {
+ dsp->dtmf.digits[strlen(
+ dsp->dtmf.digits)+1] = '\0';
+ dsp->dtmf.digits[strlen(
+ dsp->dtmf.digits)] = what;
+ }
+ }
+ }
+ } else
+ dsp->dtmf.count++;
+
+ dsp->dtmf.lastwhat = what;
+
+ goto again;
+}
+
+
diff --git a/drivers/isdn/mISDN/dsp_ecdis.h b/drivers/isdn/mISDN/dsp_ecdis.h
new file mode 100644
index 000000000000..8a20af43308b
--- /dev/null
+++ b/drivers/isdn/mISDN/dsp_ecdis.h
@@ -0,0 +1,110 @@
+/*
+ * SpanDSP - a series of DSP components for telephony
+ *
+ * ec_disable_detector.h - A detector which should eventually meet the
+ * G.164/G.165 requirements for detecting the
+ * 2100Hz echo cancellor disable tone.
+ *
+ * Written by Steve Underwood <steveu@coppice.org>
+ *
+ * Copyright (C) 2001 Steve Underwood
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include "dsp_biquad.h"
+
+struct ec_disable_detector_state {
+ struct biquad2_state notch;
+ int notch_level;
+ int channel_level;
+ int tone_present;
+ int tone_cycle_duration;
+ int good_cycles;
+ int hit;
+};
+
+
+#define FALSE 0
+#define TRUE (!FALSE)
+
+static inline void
+echo_can_disable_detector_init(struct ec_disable_detector_state *det)
+{
+ /* Elliptic notch */
+ /* This is actually centred at 2095Hz, but gets the balance we want, due
+ to the asymmetric walls of the notch */
+ biquad2_init(&det->notch,
+ (int32_t) (-0.7600000*32768.0),
+ (int32_t) (-0.1183852*32768.0),
+ (int32_t) (-0.5104039*32768.0),
+ (int32_t) (0.1567596*32768.0),
+ (int32_t) (1.0000000*32768.0));
+
+ det->channel_level = 0;
+ det->notch_level = 0;
+ det->tone_present = FALSE;
+ det->tone_cycle_duration = 0;
+ det->good_cycles = 0;
+ det->hit = 0;
+}
+/*- End of function --------------------------------------------------------*/
+
+static inline int
+echo_can_disable_detector_update(struct ec_disable_detector_state *det,
+int16_t amp)
+{
+ int16_t notched;
+
+ notched = biquad2(&det->notch, amp);
+ /* Estimate the overall energy in the channel, and the energy in
+ the notch (i.e. overall channel energy - tone energy => noise).
+ Use abs instead of multiply for speed (is it really faster?).
+ Damp the overall energy a little more for a stable result.
+ Damp the notch energy a little less, so we don't damp out the
+ blip every time the phase reverses */
+ det->channel_level += ((abs(amp) - det->channel_level) >> 5);
+ det->notch_level += ((abs(notched) - det->notch_level) >> 4);
+ if (det->channel_level > 280) {
+ /* There is adequate energy in the channel.
+ Is it mostly at 2100Hz? */
+ if (det->notch_level*6 < det->channel_level) {
+ /* The notch says yes, so we have the tone. */
+ if (!det->tone_present) {
+ /* Do we get a kick every 450+-25ms? */
+ if (det->tone_cycle_duration >= 425*8
+ && det->tone_cycle_duration <= 475*8) {
+ det->good_cycles++;
+ if (det->good_cycles > 2)
+ det->hit = TRUE;
+ }
+ det->tone_cycle_duration = 0;
+ }
+ det->tone_present = TRUE;
+ } else
+ det->tone_present = FALSE;
+ det->tone_cycle_duration++;
+ } else {
+ det->tone_present = FALSE;
+ det->tone_cycle_duration = 0;
+ det->good_cycles = 0;
+ }
+ return det->hit;
+}
+/*- End of function --------------------------------------------------------*/
+/*- End of file ------------------------------------------------------------*/
diff --git a/drivers/isdn/mISDN/dsp_hwec.c b/drivers/isdn/mISDN/dsp_hwec.c
new file mode 100644
index 000000000000..eb892d9dd5c6
--- /dev/null
+++ b/drivers/isdn/mISDN/dsp_hwec.c
@@ -0,0 +1,138 @@
+/*
+ * dsp_hwec.c:
+ * builtin mISDN dsp pipeline element for enabling the hw echocanceller
+ *
+ * Copyright (C) 2007, Nadi Sarrar
+ *
+ * Nadi Sarrar <nadi@beronet.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/mISDNdsp.h>
+#include <linux/mISDNif.h>
+#include "core.h"
+#include "dsp.h"
+#include "dsp_hwec.h"
+
+static struct mISDN_dsp_element_arg args[] = {
+ { "deftaps", "128", "Set the number of taps of cancellation." },
+};
+
+static struct mISDN_dsp_element dsp_hwec_p = {
+ .name = "hwec",
+ .new = NULL,
+ .free = NULL,
+ .process_tx = NULL,
+ .process_rx = NULL,
+ .num_args = sizeof(args) / sizeof(struct mISDN_dsp_element_arg),
+ .args = args,
+};
+struct mISDN_dsp_element *dsp_hwec = &dsp_hwec_p;
+
+void dsp_hwec_enable(struct dsp *dsp, const char *arg)
+{
+ int deftaps = 128,
+ len;
+ struct mISDN_ctrl_req cq;
+
+ if (!dsp) {
+ printk(KERN_ERR "%s: failed to enable hwec: dsp is NULL\n",
+ __func__);
+ return;
+ }
+
+ if (!arg)
+ goto _do;
+
+ len = strlen(arg);
+ if (!len)
+ goto _do;
+
+ {
+ char _dup[len + 1];
+ char *dup, *tok, *name, *val;
+ int tmp;
+
+ strcpy(_dup, arg);
+ dup = _dup;
+
+ while ((tok = strsep(&dup, ","))) {
+ if (!strlen(tok))
+ continue;
+ name = strsep(&tok, "=");
+ val = tok;
+
+ if (!val)
+ continue;
+
+ if (!strcmp(name, "deftaps")) {
+ if (sscanf(val, "%d", &tmp) == 1)
+ deftaps = tmp;
+ }
+ }
+ }
+
+_do:
+ printk(KERN_DEBUG "%s: enabling hwec with deftaps=%d\n",
+ __func__, deftaps);
+ memset(&cq, 0, sizeof(cq));
+ cq.op = MISDN_CTRL_HFC_ECHOCAN_ON;
+ cq.p1 = deftaps;
+ if (!dsp->ch.peer->ctrl(&dsp->ch, CONTROL_CHANNEL, &cq)) {
+ printk(KERN_DEBUG "%s: CONTROL_CHANNEL failed\n",
+ __func__);
+ return;
+ }
+}
+
+void dsp_hwec_disable(struct dsp *dsp)
+{
+ struct mISDN_ctrl_req cq;
+
+ if (!dsp) {
+ printk(KERN_ERR "%s: failed to disable hwec: dsp is NULL\n",
+ __func__);
+ return;
+ }
+
+ printk(KERN_DEBUG "%s: disabling hwec\n", __func__);
+ memset(&cq, 0, sizeof(cq));
+ cq.op = MISDN_CTRL_HFC_ECHOCAN_OFF;
+ if (!dsp->ch.peer->ctrl(&dsp->ch, CONTROL_CHANNEL, &cq)) {
+ printk(KERN_DEBUG "%s: CONTROL_CHANNEL failed\n",
+ __func__);
+ return;
+ }
+}
+
+int dsp_hwec_init(void)
+{
+ mISDN_dsp_element_register(dsp_hwec);
+
+ return 0;
+}
+
+void dsp_hwec_exit(void)
+{
+ mISDN_dsp_element_unregister(dsp_hwec);
+}
+
diff --git a/drivers/isdn/mISDN/dsp_hwec.h b/drivers/isdn/mISDN/dsp_hwec.h
new file mode 100644
index 000000000000..eebe80c3f713
--- /dev/null
+++ b/drivers/isdn/mISDN/dsp_hwec.h
@@ -0,0 +1,10 @@
+/*
+ * dsp_hwec.h
+ */
+
+extern struct mISDN_dsp_element *dsp_hwec;
+extern void dsp_hwec_enable(struct dsp *dsp, const char *arg);
+extern void dsp_hwec_disable(struct dsp *dsp);
+extern int dsp_hwec_init(void);
+extern void dsp_hwec_exit(void);
+
diff --git a/drivers/isdn/mISDN/dsp_pipeline.c b/drivers/isdn/mISDN/dsp_pipeline.c
new file mode 100644
index 000000000000..850260ab57d0
--- /dev/null
+++ b/drivers/isdn/mISDN/dsp_pipeline.c
@@ -0,0 +1,348 @@
+/*
+ * dsp_pipeline.c: pipelined audio processing
+ *
+ * Copyright (C) 2007, Nadi Sarrar
+ *
+ * Nadi Sarrar <nadi@beronet.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/string.h>
+#include <linux/mISDNif.h>
+#include <linux/mISDNdsp.h>
+#include "dsp.h"
+#include "dsp_hwec.h"
+
+/* uncomment for debugging */
+/*#define PIPELINE_DEBUG*/
+
+struct dsp_pipeline_entry {
+ struct mISDN_dsp_element *elem;
+ void *p;
+ struct list_head list;
+};
+struct dsp_element_entry {
+ struct mISDN_dsp_element *elem;
+ struct device dev;
+ struct list_head list;
+};
+
+static LIST_HEAD(dsp_elements);
+
+/* sysfs */
+static struct class *elements_class;
+
+static ssize_t
+attr_show_args(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct mISDN_dsp_element *elem = dev_get_drvdata(dev);
+ ssize_t len = 0;
+ int i = 0;
+
+ *buf = 0;
+ for (; i < elem->num_args; ++i)
+ len = sprintf(buf, "%sName: %s\n%s%s%sDescription: %s\n"
+ "\n", buf,
+ elem->args[i].name,
+ elem->args[i].def ? "Default: " : "",
+ elem->args[i].def ? elem->args[i].def : "",
+ elem->args[i].def ? "\n" : "",
+ elem->args[i].desc);
+
+ return len;
+}
+
+static struct device_attribute element_attributes[] = {
+ __ATTR(args, 0444, attr_show_args, NULL),
+};
+
+int mISDN_dsp_element_register(struct mISDN_dsp_element *elem)
+{
+ struct dsp_element_entry *entry;
+ int ret, i;
+
+ if (!elem)
+ return -EINVAL;
+
+ entry = kzalloc(sizeof(struct dsp_element_entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ entry->elem = elem;
+
+ entry->dev.class = elements_class;
+ dev_set_drvdata(&entry->dev, elem);
+ snprintf(entry->dev.bus_id, BUS_ID_SIZE, elem->name);
+ ret = device_register(&entry->dev);
+ if (ret) {
+ printk(KERN_ERR "%s: failed to register %s\n",
+ __func__, elem->name);
+ goto err1;
+ }
+
+ for (i = 0; i < (sizeof(element_attributes)
+ / sizeof(struct device_attribute)); ++i)
+ ret = device_create_file(&entry->dev,
+ &element_attributes[i]);
+ if (ret) {
+ printk(KERN_ERR "%s: failed to create device file\n",
+ __func__);
+ goto err2;
+ }
+
+ list_add_tail(&entry->list, &dsp_elements);
+
+ printk(KERN_DEBUG "%s: %s registered\n", __func__, elem->name);
+
+ return 0;
+
+err2:
+ device_unregister(&entry->dev);
+err1:
+ kfree(entry);
+ return ret;
+}
+EXPORT_SYMBOL(mISDN_dsp_element_register);
+
+void mISDN_dsp_element_unregister(struct mISDN_dsp_element *elem)
+{
+ struct dsp_element_entry *entry, *n;
+
+ if (!elem)
+ return;
+
+ list_for_each_entry_safe(entry, n, &dsp_elements, list)
+ if (entry->elem == elem) {
+ list_del(&entry->list);
+ device_unregister(&entry->dev);
+ kfree(entry);
+ printk(KERN_DEBUG "%s: %s unregistered\n",
+ __func__, elem->name);
+ return;
+ }
+ printk(KERN_ERR "%s: element %s not in list.\n", __func__, elem->name);
+}
+EXPORT_SYMBOL(mISDN_dsp_element_unregister);
+
+int dsp_pipeline_module_init(void)
+{
+ elements_class = class_create(THIS_MODULE, "dsp_pipeline");
+ if (IS_ERR(elements_class))
+ return PTR_ERR(elements_class);
+
+#ifdef PIPELINE_DEBUG
+ printk(KERN_DEBUG "%s: dsp pipeline module initialized\n", __func__);
+#endif
+
+ dsp_hwec_init();
+
+ return 0;
+}
+
+void dsp_pipeline_module_exit(void)
+{
+ struct dsp_element_entry *entry, *n;
+
+ dsp_hwec_exit();
+
+ class_destroy(elements_class);
+
+ list_for_each_entry_safe(entry, n, &dsp_elements, list) {
+ list_del(&entry->list);
+ printk(KERN_WARNING "%s: element was still registered: %s\n",
+ __func__, entry->elem->name);
+ kfree(entry);
+ }
+
+ printk(KERN_DEBUG "%s: dsp pipeline module exited\n", __func__);
+}
+
+int dsp_pipeline_init(struct dsp_pipeline *pipeline)
+{
+ if (!pipeline)
+ return -EINVAL;
+
+ INIT_LIST_HEAD(&pipeline->list);
+
+#ifdef PIPELINE_DEBUG
+ printk(KERN_DEBUG "%s: dsp pipeline ready\n", __func__);
+#endif
+
+ return 0;
+}
+
+static inline void _dsp_pipeline_destroy(struct dsp_pipeline *pipeline)
+{
+ struct dsp_pipeline_entry *entry, *n;
+
+ list_for_each_entry_safe(entry, n, &pipeline->list, list) {
+ list_del(&entry->list);
+ if (entry->elem == dsp_hwec)
+ dsp_hwec_disable(container_of(pipeline, struct dsp,
+ pipeline));
+ else
+ entry->elem->free(entry->p);
+ kfree(entry);
+ }
+}
+
+void dsp_pipeline_destroy(struct dsp_pipeline *pipeline)
+{
+
+ if (!pipeline)
+ return;
+
+ _dsp_pipeline_destroy(pipeline);
+
+#ifdef PIPELINE_DEBUG
+ printk(KERN_DEBUG "%s: dsp pipeline destroyed\n", __func__);
+#endif
+}
+
+int dsp_pipeline_build(struct dsp_pipeline *pipeline, const char *cfg)
+{
+ int len, incomplete = 0, found = 0;
+ char *dup, *tok, *name, *args;
+ struct dsp_element_entry *entry, *n;
+ struct dsp_pipeline_entry *pipeline_entry;
+ struct mISDN_dsp_element *elem;
+
+ if (!pipeline)
+ return -EINVAL;
+
+ if (!list_empty(&pipeline->list))
+ _dsp_pipeline_destroy(pipeline);
+
+ if (!cfg)
+ return 0;
+
+ len = strlen(cfg);
+ if (!len)
+ return 0;
+
+ dup = kmalloc(len + 1, GFP_KERNEL);
+ if (!dup)
+ return 0;
+ strcpy(dup, cfg);
+ while ((tok = strsep(&dup, "|"))) {
+ if (!strlen(tok))
+ continue;
+ name = strsep(&tok, "(");
+ args = strsep(&tok, ")");
+ if (args && !*args)
+ args = 0;
+
+ list_for_each_entry_safe(entry, n, &dsp_elements, list)
+ if (!strcmp(entry->elem->name, name)) {
+ elem = entry->elem;
+
+ pipeline_entry = kmalloc(sizeof(struct
+ dsp_pipeline_entry), GFP_KERNEL);
+ if (!pipeline_entry) {
+ printk(KERN_DEBUG "%s: failed to add "
+ "entry to pipeline: %s (out of "
+ "memory)\n", __func__, elem->name);
+ incomplete = 1;
+ goto _out;
+ }
+ pipeline_entry->elem = elem;
+
+ if (elem == dsp_hwec) {
+ /* This is a hack to make the hwec
+ available as a pipeline module */
+ dsp_hwec_enable(container_of(pipeline,
+ struct dsp, pipeline), args);
+ list_add_tail(&pipeline_entry->list,
+ &pipeline->list);
+ } else {
+ pipeline_entry->p = elem->new(args);
+ if (pipeline_entry->p) {
+ list_add_tail(&pipeline_entry->
+ list, &pipeline->list);
+#ifdef PIPELINE_DEBUG
+ printk(KERN_DEBUG "%s: created "
+ "instance of %s%s%s\n",
+ __func__, name, args ?
+ " with args " : "", args ?
+ args : "");
+#endif
+ } else {
+ printk(KERN_DEBUG "%s: failed "
+ "to add entry to pipeline: "
+ "%s (new() returned NULL)\n",
+ __func__, elem->name);
+ kfree(pipeline_entry);
+ incomplete = 1;
+ }
+ }
+ found = 1;
+ break;
+ }
+
+ if (found)
+ found = 0;
+ else {
+ printk(KERN_DEBUG "%s: element not found, skipping: "
+ "%s\n", __func__, name);
+ incomplete = 1;
+ }
+ }
+
+_out:
+ if (!list_empty(&pipeline->list))
+ pipeline->inuse = 1;
+ else
+ pipeline->inuse = 0;
+
+#ifdef PIPELINE_DEBUG
+ printk(KERN_DEBUG "%s: dsp pipeline built%s: %s\n",
+ __func__, incomplete ? " incomplete" : "", cfg);
+#endif
+ kfree(dup);
+ return 0;
+}
+
+void dsp_pipeline_process_tx(struct dsp_pipeline *pipeline, u8 *data, int len)
+{
+ struct dsp_pipeline_entry *entry;
+
+ if (!pipeline)
+ return;
+
+ list_for_each_entry(entry, &pipeline->list, list)
+ if (entry->elem->process_tx)
+ entry->elem->process_tx(entry->p, data, len);
+}
+
+void dsp_pipeline_process_rx(struct dsp_pipeline *pipeline, u8 *data, int len)
+{
+ struct dsp_pipeline_entry *entry;
+
+ if (!pipeline)
+ return;
+
+ list_for_each_entry_reverse(entry, &pipeline->list, list)
+ if (entry->elem->process_rx)
+ entry->elem->process_rx(entry->p, data, len);
+}
+
+
diff --git a/drivers/isdn/mISDN/dsp_tones.c b/drivers/isdn/mISDN/dsp_tones.c
new file mode 100644
index 000000000000..23dd0dd21524
--- /dev/null
+++ b/drivers/isdn/mISDN/dsp_tones.c
@@ -0,0 +1,551 @@
+/*
+ * Audio support data for ISDN4Linux.
+ *
+ * Copyright Andreas Eversberg (jolly@eversberg.eu)
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ */
+
+#include <linux/mISDNif.h>
+#include <linux/mISDNdsp.h>
+#include "core.h"
+#include "dsp.h"
+
+
+#define DATA_S sample_silence
+#define SIZE_S (&sizeof_silence)
+#define DATA_GA sample_german_all
+#define SIZE_GA (&sizeof_german_all)
+#define DATA_GO sample_german_old
+#define SIZE_GO (&sizeof_german_old)
+#define DATA_DT sample_american_dialtone
+#define SIZE_DT (&sizeof_american_dialtone)
+#define DATA_RI sample_american_ringing
+#define SIZE_RI (&sizeof_american_ringing)
+#define DATA_BU sample_american_busy
+#define SIZE_BU (&sizeof_american_busy)
+#define DATA_S1 sample_special1
+#define SIZE_S1 (&sizeof_special1)
+#define DATA_S2 sample_special2
+#define SIZE_S2 (&sizeof_special2)
+#define DATA_S3 sample_special3
+#define SIZE_S3 (&sizeof_special3)
+
+/***************/
+/* tones loops */
+/***************/
+
+/* all tones are alaw encoded */
+/* the last sample+1 is in phase with the first sample. the error is low */
+
+static u8 sample_german_all[] = {
+ 0x80, 0xab, 0x81, 0x6d, 0xfd, 0xdd, 0x5d, 0x9d,
+ 0x4d, 0xd1, 0x89, 0x88, 0xd0, 0x4c, 0x9c, 0x5c,
+ 0xdc, 0xfc, 0x6c,
+ 0x80, 0xab, 0x81, 0x6d, 0xfd, 0xdd, 0x5d, 0x9d,
+ 0x4d, 0xd1, 0x89, 0x88, 0xd0, 0x4c, 0x9c, 0x5c,
+ 0xdc, 0xfc, 0x6c,
+ 0x80, 0xab, 0x81, 0x6d, 0xfd, 0xdd, 0x5d, 0x9d,
+ 0x4d, 0xd1, 0x89, 0x88, 0xd0, 0x4c, 0x9c, 0x5c,
+ 0xdc, 0xfc, 0x6c,
+ 0x80, 0xab, 0x81, 0x6d, 0xfd, 0xdd, 0x5d, 0x9d,
+ 0x4d, 0xd1, 0x89, 0x88, 0xd0, 0x4c, 0x9c, 0x5c,
+ 0xdc, 0xfc, 0x6c,
+};
+static u32 sizeof_german_all = sizeof(sample_german_all);
+
+static u8 sample_german_old[] = {
+ 0xec, 0x68, 0xe1, 0x6d, 0x6d, 0x91, 0x51, 0xed,
+ 0x6d, 0x01, 0x1e, 0x10, 0x0c, 0x90, 0x60, 0x70,
+ 0x8c,
+ 0xec, 0x68, 0xe1, 0x6d, 0x6d, 0x91, 0x51, 0xed,
+ 0x6d, 0x01, 0x1e, 0x10, 0x0c, 0x90, 0x60, 0x70,
+ 0x8c,
+ 0xec, 0x68, 0xe1, 0x6d, 0x6d, 0x91, 0x51, 0xed,
+ 0x6d, 0x01, 0x1e, 0x10, 0x0c, 0x90, 0x60, 0x70,
+ 0x8c,
+ 0xec, 0x68, 0xe1, 0x6d, 0x6d, 0x91, 0x51, 0xed,
+ 0x6d, 0x01, 0x1e, 0x10, 0x0c, 0x90, 0x60, 0x70,
+ 0x8c,
+};
+static u32 sizeof_german_old = sizeof(sample_german_old);
+
+static u8 sample_american_dialtone[] = {
+ 0x2a, 0x18, 0x90, 0x6c, 0x4c, 0xbc, 0x4c, 0x6c,
+ 0x10, 0x58, 0x32, 0xb9, 0x31, 0x2d, 0x8d, 0x0d,
+ 0x8d, 0x2d, 0x31, 0x99, 0x0f, 0x28, 0x60, 0xf0,
+ 0xd0, 0x50, 0xd0, 0x30, 0x60, 0x08, 0x8e, 0x67,
+ 0x09, 0x19, 0x21, 0xe1, 0xd9, 0xb9, 0x29, 0x67,
+ 0x83, 0x02, 0xce, 0xbe, 0xee, 0x1a, 0x1b, 0xef,
+ 0xbf, 0xcf, 0x03, 0x82, 0x66, 0x28, 0xb8, 0xd8,
+ 0xe0, 0x20, 0x18, 0x08, 0x66, 0x8f, 0x09, 0x61,
+ 0x31, 0xd1, 0x51, 0xd1, 0xf1, 0x61, 0x29, 0x0e,
+ 0x98, 0x30, 0x2c, 0x8c, 0x0c, 0x8c, 0x2c, 0x30,
+ 0xb8, 0x33, 0x59, 0x11, 0x6d, 0x4d, 0xbd, 0x4d,
+ 0x6d, 0x91, 0x19,
+};
+static u32 sizeof_american_dialtone = sizeof(sample_american_dialtone);
+
+static u8 sample_american_ringing[] = {
+ 0x2a, 0xe0, 0xac, 0x0c, 0xbc, 0x4c, 0x8c, 0x90,
+ 0x48, 0xc7, 0xc1, 0xed, 0xcd, 0x4d, 0xcd, 0xed,
+ 0xc1, 0xb7, 0x08, 0x30, 0xec, 0xcc, 0xcc, 0x8c,
+ 0x10, 0x58, 0x1a, 0x99, 0x71, 0xed, 0x8d, 0x8d,
+ 0x2d, 0x41, 0x89, 0x9e, 0x20, 0x70, 0x2c, 0xec,
+ 0x2c, 0x70, 0x20, 0x86, 0x77, 0xe1, 0x31, 0x11,
+ 0xd1, 0xf1, 0x81, 0x09, 0xa3, 0x56, 0x58, 0x00,
+ 0x40, 0xc0, 0x60, 0x38, 0x46, 0x43, 0x57, 0x39,
+ 0xd9, 0x59, 0x99, 0xc9, 0x77, 0x2f, 0x2e, 0xc6,
+ 0xd6, 0x28, 0xd6, 0x36, 0x26, 0x2e, 0x8a, 0xa3,
+ 0x43, 0x63, 0x4b, 0x4a, 0x62, 0x42, 0xa2, 0x8b,
+ 0x2f, 0x27, 0x37, 0xd7, 0x29, 0xd7, 0xc7, 0x2f,
+ 0x2e, 0x76, 0xc8, 0x98, 0x58, 0xd8, 0x38, 0x56,
+ 0x42, 0x47, 0x39, 0x61, 0xc1, 0x41, 0x01, 0x59,
+ 0x57, 0xa2, 0x08, 0x80, 0xf0, 0xd0, 0x10, 0x30,
+ 0xe0, 0x76, 0x87, 0x21, 0x71, 0x2d, 0xed, 0x2d,
+ 0x71, 0x21, 0x9f, 0x88, 0x40, 0x2c, 0x8c, 0x8c,
+ 0xec, 0x70, 0x98, 0x1b, 0x59, 0x11, 0x8d, 0xcd,
+ 0xcd, 0xed, 0x31, 0x09, 0xb6, 0xc0, 0xec, 0xcc,
+ 0x4c, 0xcc, 0xec, 0xc0, 0xc6, 0x49, 0x91, 0x8d,
+ 0x4d, 0xbd, 0x0d, 0xad, 0xe1,
+};
+static u32 sizeof_american_ringing = sizeof(sample_american_ringing);
+
+static u8 sample_american_busy[] = {
+ 0x2a, 0x00, 0x6c, 0x4c, 0x4c, 0x6c, 0xb0, 0x66,
+ 0x99, 0x11, 0x6d, 0x8d, 0x2d, 0x41, 0xd7, 0x96,
+ 0x60, 0xf0, 0x70, 0x40, 0x58, 0xf6, 0x53, 0x57,
+ 0x09, 0x89, 0xd7, 0x5f, 0xe3, 0x2a, 0xe3, 0x5f,
+ 0xd7, 0x89, 0x09, 0x57, 0x53, 0xf6, 0x58, 0x40,
+ 0x70, 0xf0, 0x60, 0x96, 0xd7, 0x41, 0x2d, 0x8d,
+ 0x6d, 0x11, 0x99, 0x66, 0xb0, 0x6c, 0x4c, 0x4c,
+ 0x6c, 0x00, 0x2a, 0x01, 0x6d, 0x4d, 0x4d, 0x6d,
+ 0xb1, 0x67, 0x98, 0x10, 0x6c, 0x8c, 0x2c, 0x40,
+ 0xd6, 0x97, 0x61, 0xf1, 0x71, 0x41, 0x59, 0xf7,
+ 0x52, 0x56, 0x08, 0x88, 0xd6, 0x5e, 0xe2, 0x2a,
+ 0xe2, 0x5e, 0xd6, 0x88, 0x08, 0x56, 0x52, 0xf7,
+ 0x59, 0x41, 0x71, 0xf1, 0x61, 0x97, 0xd6, 0x40,
+ 0x2c, 0x8c, 0x6c, 0x10, 0x98, 0x67, 0xb1, 0x6d,
+ 0x4d, 0x4d, 0x6d, 0x01,
+};
+static u32 sizeof_american_busy = sizeof(sample_american_busy);
+
+static u8 sample_special1[] = {
+ 0x2a, 0x2c, 0xbc, 0x6c, 0xd6, 0x71, 0xbd, 0x0d,
+ 0xd9, 0x80, 0xcc, 0x4c, 0x40, 0x39, 0x0d, 0xbd,
+ 0x11, 0x86, 0xec, 0xbc, 0xec, 0x0e, 0x51, 0xbd,
+ 0x8d, 0x89, 0x30, 0x4c, 0xcc, 0xe0, 0xe1, 0xcd,
+ 0x4d, 0x31, 0x88, 0x8c, 0xbc, 0x50, 0x0f, 0xed,
+ 0xbd, 0xed, 0x87, 0x10, 0xbc, 0x0c, 0x38, 0x41,
+ 0x4d, 0xcd, 0x81, 0xd8, 0x0c, 0xbc, 0x70, 0xd7,
+ 0x6d, 0xbd, 0x2d,
+};
+static u32 sizeof_special1 = sizeof(sample_special1);
+
+static u8 sample_special2[] = {
+ 0x2a, 0xcc, 0x8c, 0xd7, 0x4d, 0x2d, 0x18, 0xbc,
+ 0x10, 0xc1, 0xbd, 0xc1, 0x10, 0xbc, 0x18, 0x2d,
+ 0x4d, 0xd7, 0x8c, 0xcc, 0x2a, 0xcd, 0x8d, 0xd6,
+ 0x4c, 0x2c, 0x19, 0xbd, 0x11, 0xc0, 0xbc, 0xc0,
+ 0x11, 0xbd, 0x19, 0x2c, 0x4c, 0xd6, 0x8d, 0xcd,
+ 0x2a, 0xcc, 0x8c, 0xd7, 0x4d, 0x2d, 0x18, 0xbc,
+ 0x10, 0xc1, 0xbd, 0xc1, 0x10, 0xbc, 0x18, 0x2d,
+ 0x4d, 0xd7, 0x8c, 0xcc, 0x2a, 0xcd, 0x8d, 0xd6,
+ 0x4c, 0x2c, 0x19, 0xbd, 0x11, 0xc0, 0xbc, 0xc0,
+ 0x11, 0xbd, 0x19, 0x2c, 0x4c, 0xd6, 0x8d, 0xcd,
+};
+static u32 sizeof_special2 = sizeof(sample_special2);
+
+static u8 sample_special3[] = {
+ 0x2a, 0xbc, 0x18, 0xcd, 0x11, 0x2c, 0x8c, 0xc1,
+ 0x4d, 0xd6, 0xbc, 0xd6, 0x4d, 0xc1, 0x8c, 0x2c,
+ 0x11, 0xcd, 0x18, 0xbc, 0x2a, 0xbd, 0x19, 0xcc,
+ 0x10, 0x2d, 0x8d, 0xc0, 0x4c, 0xd7, 0xbd, 0xd7,
+ 0x4c, 0xc0, 0x8d, 0x2d, 0x10, 0xcc, 0x19, 0xbd,
+ 0x2a, 0xbc, 0x18, 0xcd, 0x11, 0x2c, 0x8c, 0xc1,
+ 0x4d, 0xd6, 0xbc, 0xd6, 0x4d, 0xc1, 0x8c, 0x2c,
+ 0x11, 0xcd, 0x18, 0xbc, 0x2a, 0xbd, 0x19, 0xcc,
+ 0x10, 0x2d, 0x8d, 0xc0, 0x4c, 0xd7, 0xbd, 0xd7,
+ 0x4c, 0xc0, 0x8d, 0x2d, 0x10, 0xcc, 0x19, 0xbd,
+};
+static u32 sizeof_special3 = sizeof(sample_special3);
+
+static u8 sample_silence[] = {
+ 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a,
+ 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a,
+ 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a,
+ 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a,
+ 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a,
+ 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a,
+ 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a,
+ 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a,
+ 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a,
+ 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a,
+ 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a,
+ 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a,
+};
+static u32 sizeof_silence = sizeof(sample_silence);
+
+struct tones_samples {
+ u32 *len;
+ u8 *data;
+};
+static struct
+tones_samples samples[] = {
+ {&sizeof_german_all, sample_german_all},
+ {&sizeof_german_old, sample_german_old},
+ {&sizeof_american_dialtone, sample_american_dialtone},
+ {&sizeof_american_ringing, sample_american_ringing},
+ {&sizeof_american_busy, sample_american_busy},
+ {&sizeof_special1, sample_special1},
+ {&sizeof_special2, sample_special2},
+ {&sizeof_special3, sample_special3},
+ {NULL, NULL},
+};
+
+/***********************************
+ * generate ulaw from alaw samples *
+ ***********************************/
+
+void
+dsp_audio_generate_ulaw_samples(void)
+{
+ int i, j;
+
+ i = 0;
+ while (samples[i].len) {
+ j = 0;
+ while (j < (*samples[i].len)) {
+ samples[i].data[j] =
+ dsp_audio_alaw_to_ulaw[samples[i].data[j]];
+ j++;
+ }
+ i++;
+ }
+}
+
+
+/****************************
+ * tone sequence definition *
+ ****************************/
+
+struct pattern {
+ int tone;
+ u8 *data[10];
+ u32 *siz[10];
+ u32 seq[10];
+} pattern[] = {
+ {TONE_GERMAN_DIALTONE,
+ {DATA_GA, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ {SIZE_GA, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ {1900, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+
+ {TONE_GERMAN_OLDDIALTONE,
+ {DATA_GO, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ {SIZE_GO, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ {1998, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+
+ {TONE_AMERICAN_DIALTONE,
+ {DATA_DT, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ {SIZE_DT, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ {8000, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+
+ {TONE_GERMAN_DIALPBX,
+ {DATA_GA, DATA_S, DATA_GA, DATA_S, DATA_GA, DATA_S, 0, 0, 0, 0},
+ {SIZE_GA, SIZE_S, SIZE_GA, SIZE_S, SIZE_GA, SIZE_S, 0, 0, 0, 0},
+ {2000, 2000, 2000, 2000, 2000, 12000, 0, 0, 0, 0} },
+
+ {TONE_GERMAN_OLDDIALPBX,
+ {DATA_GO, DATA_S, DATA_GO, DATA_S, DATA_GO, DATA_S, 0, 0, 0, 0},
+ {SIZE_GO, SIZE_S, SIZE_GO, SIZE_S, SIZE_GO, SIZE_S, 0, 0, 0, 0},
+ {2000, 2000, 2000, 2000, 2000, 12000, 0, 0, 0, 0} },
+
+ {TONE_AMERICAN_DIALPBX,
+ {DATA_DT, DATA_S, DATA_DT, DATA_S, DATA_DT, DATA_S, 0, 0, 0, 0},
+ {SIZE_DT, SIZE_S, SIZE_DT, SIZE_S, SIZE_DT, SIZE_S, 0, 0, 0, 0},
+ {2000, 2000, 2000, 2000, 2000, 12000, 0, 0, 0, 0} },
+
+ {TONE_GERMAN_RINGING,
+ {DATA_GA, DATA_S, 0, 0, 0, 0, 0, 0, 0, 0},
+ {SIZE_GA, SIZE_S, 0, 0, 0, 0, 0, 0, 0, 0},
+ {8000, 32000, 0, 0, 0, 0, 0, 0, 0, 0} },
+
+ {TONE_GERMAN_OLDRINGING,
+ {DATA_GO, DATA_S, 0, 0, 0, 0, 0, 0, 0, 0},
+ {SIZE_GO, SIZE_S, 0, 0, 0, 0, 0, 0, 0, 0},
+ {8000, 40000, 0, 0, 0, 0, 0, 0, 0, 0} },
+
+ {TONE_AMERICAN_RINGING,
+ {DATA_RI, DATA_S, 0, 0, 0, 0, 0, 0, 0, 0},
+ {SIZE_RI, SIZE_S, 0, 0, 0, 0, 0, 0, 0, 0},
+ {8000, 32000, 0, 0, 0, 0, 0, 0, 0, 0} },
+
+ {TONE_GERMAN_RINGPBX,
+ {DATA_GA, DATA_S, DATA_GA, DATA_S, 0, 0, 0, 0, 0, 0},
+ {SIZE_GA, SIZE_S, SIZE_GA, SIZE_S, 0, 0, 0, 0, 0, 0},
+ {4000, 4000, 4000, 28000, 0, 0, 0, 0, 0, 0} },
+
+ {TONE_GERMAN_OLDRINGPBX,
+ {DATA_GO, DATA_S, DATA_GO, DATA_S, 0, 0, 0, 0, 0, 0},
+ {SIZE_GO, SIZE_S, SIZE_GO, SIZE_S, 0, 0, 0, 0, 0, 0},
+ {4000, 4000, 4000, 28000, 0, 0, 0, 0, 0, 0} },
+
+ {TONE_AMERICAN_RINGPBX,
+ {DATA_RI, DATA_S, DATA_RI, DATA_S, 0, 0, 0, 0, 0, 0},
+ {SIZE_RI, SIZE_S, SIZE_RI, SIZE_S, 0, 0, 0, 0, 0, 0},
+ {4000, 4000, 4000, 28000, 0, 0, 0, 0, 0, 0} },
+
+ {TONE_GERMAN_BUSY,
+ {DATA_GA, DATA_S, 0, 0, 0, 0, 0, 0, 0, 0},
+ {SIZE_GA, SIZE_S, 0, 0, 0, 0, 0, 0, 0, 0},
+ {4000, 4000, 0, 0, 0, 0, 0, 0, 0, 0} },
+
+ {TONE_GERMAN_OLDBUSY,
+ {DATA_GO, DATA_S, 0, 0, 0, 0, 0, 0, 0, 0},
+ {SIZE_GO, SIZE_S, 0, 0, 0, 0, 0, 0, 0, 0},
+ {1000, 5000, 0, 0, 0, 0, 0, 0, 0, 0} },
+
+ {TONE_AMERICAN_BUSY,
+ {DATA_BU, DATA_S, 0, 0, 0, 0, 0, 0, 0, 0},
+ {SIZE_BU, SIZE_S, 0, 0, 0, 0, 0, 0, 0, 0},
+ {4000, 4000, 0, 0, 0, 0, 0, 0, 0, 0} },
+
+ {TONE_GERMAN_HANGUP,
+ {DATA_GA, DATA_S, 0, 0, 0, 0, 0, 0, 0, 0},
+ {SIZE_GA, SIZE_S, 0, 0, 0, 0, 0, 0, 0, 0},
+ {4000, 4000, 0, 0, 0, 0, 0, 0, 0, 0} },
+
+ {TONE_GERMAN_OLDHANGUP,
+ {DATA_GO, DATA_S, 0, 0, 0, 0, 0, 0, 0, 0},
+ {SIZE_GO, SIZE_S, 0, 0, 0, 0, 0, 0, 0, 0},
+ {1000, 5000, 0, 0, 0, 0, 0, 0, 0, 0} },
+
+ {TONE_AMERICAN_HANGUP,
+ {DATA_DT, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ {SIZE_DT, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ {8000, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+
+ {TONE_SPECIAL_INFO,
+ {DATA_S1, DATA_S2, DATA_S3, DATA_S, 0, 0, 0, 0, 0, 0},
+ {SIZE_S1, SIZE_S2, SIZE_S3, SIZE_S, 0, 0, 0, 0, 0, 0},
+ {2666, 2666, 2666, 8002, 0, 0, 0, 0, 0, 0} },
+
+ {TONE_GERMAN_GASSENBESETZT,
+ {DATA_GA, DATA_S, 0, 0, 0, 0, 0, 0, 0, 0},
+ {SIZE_GA, SIZE_S, 0, 0, 0, 0, 0, 0, 0, 0},
+ {2000, 2000, 0, 0, 0, 0, 0, 0, 0, 0} },
+
+ {TONE_GERMAN_AUFSCHALTTON,
+ {DATA_GO, DATA_S, DATA_GO, DATA_S, 0, 0, 0, 0, 0, 0},
+ {SIZE_GO, SIZE_S, SIZE_GO, SIZE_S, 0, 0, 0, 0, 0, 0},
+ {1000, 5000, 1000, 17000, 0, 0, 0, 0, 0, 0} },
+
+ {0,
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+};
+
+/******************
+ * copy tone data *
+ ******************/
+
+/* an sk_buff is generated from the number of samples needed.
+ * the count will be changed and may begin from 0 each pattern period.
+ * the clue is to precalculate the pointers and legths to use only one
+ * memcpy per function call, or two memcpy if the tone sequence changes.
+ *
+ * pattern - the type of the pattern
+ * count - the sample from the beginning of the pattern (phase)
+ * len - the number of bytes
+ *
+ * return - the sk_buff with the sample
+ *
+ * if tones has finished (e.g. knocking tone), dsp->tones is turned off
+ */
+void dsp_tone_copy(struct dsp *dsp, u8 *data, int len)
+{
+ int index, count, start, num;
+ struct pattern *pat;
+ struct dsp_tone *tone = &dsp->tone;
+
+ /* if we have no tone, we copy silence */
+ if (!tone->tone) {
+ memset(data, dsp_silence, len);
+ return;
+ }
+
+ /* process pattern */
+ pat = (struct pattern *)tone->pattern;
+ /* points to the current pattern */
+ index = tone->index; /* gives current sequence index */
+ count = tone->count; /* gives current sample */
+
+ /* copy sample */
+ while (len) {
+ /* find sample to start with */
+ while (42) {
+ /* warp arround */
+ if (!pat->seq[index]) {
+ count = 0;
+ index = 0;
+ }
+ /* check if we are currently playing this tone */
+ if (count < pat->seq[index])
+ break;
+ if (dsp_debug & DEBUG_DSP_TONE)
+ printk(KERN_DEBUG "%s: reaching next sequence "
+ "(index=%d)\n", __func__, index);
+ count -= pat->seq[index];
+ index++;
+ }
+ /* calculate start and number of samples */
+ start = count % (*(pat->siz[index]));
+ num = len;
+ if (num+count > pat->seq[index])
+ num = pat->seq[index] - count;
+ if (num+start > (*(pat->siz[index])))
+ num = (*(pat->siz[index])) - start;
+ /* copy memory */
+ memcpy(data, pat->data[index]+start, num);
+ /* reduce length */
+ data += num;
+ count += num;
+ len -= num;
+ }
+ tone->index = index;
+ tone->count = count;
+
+ /* return sk_buff */
+ return;
+}
+
+
+/*******************************
+ * send HW message to hfc card *
+ *******************************/
+
+static void
+dsp_tone_hw_message(struct dsp *dsp, u8 *sample, int len)
+{
+ struct sk_buff *nskb;
+
+ /* unlocking is not required, because we don't expect a response */
+ nskb = _alloc_mISDN_skb(PH_CONTROL_REQ,
+ (len)?HFC_SPL_LOOP_ON:HFC_SPL_LOOP_OFF, len, sample,
+ GFP_ATOMIC);
+ if (nskb) {
+ if (dsp->ch.peer) {
+ if (dsp->ch.recv(dsp->ch.peer, nskb))
+ dev_kfree_skb(nskb);
+ } else
+ dev_kfree_skb(nskb);
+ }
+}
+
+
+/*****************
+ * timer expires *
+ *****************/
+void
+dsp_tone_timeout(void *arg)
+{
+ struct dsp *dsp = arg;
+ struct dsp_tone *tone = &dsp->tone;
+ struct pattern *pat = (struct pattern *)tone->pattern;
+ int index = tone->index;
+
+ if (!tone->tone)
+ return;
+
+ index++;
+ if (!pat->seq[index])
+ index = 0;
+ tone->index = index;
+
+ /* set next tone */
+ if (pat->data[index] == DATA_S)
+ dsp_tone_hw_message(dsp, 0, 0);
+ else
+ dsp_tone_hw_message(dsp, pat->data[index], *(pat->siz[index]));
+ /* set timer */
+ init_timer(&tone->tl);
+ tone->tl.expires = jiffies + (pat->seq[index] * HZ) / 8000;
+ add_timer(&tone->tl);
+}
+
+
+/********************
+ * set/release tone *
+ ********************/
+
+/*
+ * tones are relaized by streaming or by special loop commands if supported
+ * by hardware. when hardware is used, the patterns will be controlled by
+ * timers.
+ */
+int
+dsp_tone(struct dsp *dsp, int tone)
+{
+ struct pattern *pat;
+ int i;
+ struct dsp_tone *tonet = &dsp->tone;
+
+ tonet->software = 0;
+ tonet->hardware = 0;
+
+ /* we turn off the tone */
+ if (!tone) {
+ if (dsp->features.hfc_loops)
+ if (timer_pending(&tonet->tl))
+ del_timer(&tonet->tl);
+ if (dsp->features.hfc_loops)
+ dsp_tone_hw_message(dsp, NULL, 0);
+ tonet->tone = 0;
+ return 0;
+ }
+
+ pat = NULL;
+ i = 0;
+ while (pattern[i].tone) {
+ if (pattern[i].tone == tone) {
+ pat = &pattern[i];
+ break;
+ }
+ i++;
+ }
+ if (!pat) {
+ printk(KERN_WARNING "dsp: given tone 0x%x is invalid\n", tone);
+ return -EINVAL;
+ }
+ if (dsp_debug & DEBUG_DSP_TONE)
+ printk(KERN_DEBUG "%s: now starting tone %d (index=%d)\n",
+ __func__, tone, 0);
+ tonet->tone = tone;
+ tonet->pattern = pat;
+ tonet->index = 0;
+ tonet->count = 0;
+
+ if (dsp->features.hfc_loops) {
+ tonet->hardware = 1;
+ /* set first tone */
+ dsp_tone_hw_message(dsp, pat->data[0], *(pat->siz[0]));
+ /* set timer */
+ if (timer_pending(&tonet->tl))
+ del_timer(&tonet->tl);
+ init_timer(&tonet->tl);
+ tonet->tl.expires = jiffies + (pat->seq[0] * HZ) / 8000;
+ add_timer(&tonet->tl);
+ } else {
+ tonet->software = 1;
+ }
+
+ return 0;
+}
+
+
+
+
+
diff --git a/drivers/isdn/mISDN/fsm.c b/drivers/isdn/mISDN/fsm.c
new file mode 100644
index 000000000000..b5d6553f2dc8
--- /dev/null
+++ b/drivers/isdn/mISDN/fsm.c
@@ -0,0 +1,183 @@
+/*
+ * finite state machine implementation
+ *
+ * Author Karsten Keil <kkeil@novell.com>
+ *
+ * Thanks to Jan den Ouden
+ * Fritz Elfert
+ * Copyright 2008 by Karsten Keil <kkeil@novell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include "fsm.h"
+
+#define FSM_TIMER_DEBUG 0
+
+void
+mISDN_FsmNew(struct Fsm *fsm,
+ struct FsmNode *fnlist, int fncount)
+{
+ int i;
+
+ fsm->jumpmatrix = kzalloc(sizeof(FSMFNPTR) * fsm->state_count *
+ fsm->event_count, GFP_KERNEL);
+
+ for (i = 0; i < fncount; i++)
+ if ((fnlist[i].state >= fsm->state_count) ||
+ (fnlist[i].event >= fsm->event_count)) {
+ printk(KERN_ERR
+ "mISDN_FsmNew Error: %d st(%ld/%ld) ev(%ld/%ld)\n",
+ i, (long)fnlist[i].state, (long)fsm->state_count,
+ (long)fnlist[i].event, (long)fsm->event_count);
+ } else
+ fsm->jumpmatrix[fsm->state_count * fnlist[i].event +
+ fnlist[i].state] = (FSMFNPTR) fnlist[i].routine;
+}
+EXPORT_SYMBOL(mISDN_FsmNew);
+
+void
+mISDN_FsmFree(struct Fsm *fsm)
+{
+ kfree((void *) fsm->jumpmatrix);
+}
+EXPORT_SYMBOL(mISDN_FsmFree);
+
+int
+mISDN_FsmEvent(struct FsmInst *fi, int event, void *arg)
+{
+ FSMFNPTR r;
+
+ if ((fi->state >= fi->fsm->state_count) ||
+ (event >= fi->fsm->event_count)) {
+ printk(KERN_ERR
+ "mISDN_FsmEvent Error st(%ld/%ld) ev(%d/%ld)\n",
+ (long)fi->state, (long)fi->fsm->state_count, event,
+ (long)fi->fsm->event_count);
+ return 1;
+ }
+ r = fi->fsm->jumpmatrix[fi->fsm->state_count * event + fi->state];
+ if (r) {
+ if (fi->debug)
+ fi->printdebug(fi, "State %s Event %s",
+ fi->fsm->strState[fi->state],
+ fi->fsm->strEvent[event]);
+ r(fi, event, arg);
+ return 0;
+ } else {
+ if (fi->debug)
+ fi->printdebug(fi, "State %s Event %s no action",
+ fi->fsm->strState[fi->state],
+ fi->fsm->strEvent[event]);
+ return 1;
+ }
+}
+EXPORT_SYMBOL(mISDN_FsmEvent);
+
+void
+mISDN_FsmChangeState(struct FsmInst *fi, int newstate)
+{
+ fi->state = newstate;
+ if (fi->debug)
+ fi->printdebug(fi, "ChangeState %s",
+ fi->fsm->strState[newstate]);
+}
+EXPORT_SYMBOL(mISDN_FsmChangeState);
+
+static void
+FsmExpireTimer(struct FsmTimer *ft)
+{
+#if FSM_TIMER_DEBUG
+ if (ft->fi->debug)
+ ft->fi->printdebug(ft->fi, "FsmExpireTimer %lx", (long) ft);
+#endif
+ mISDN_FsmEvent(ft->fi, ft->event, ft->arg);
+}
+
+void
+mISDN_FsmInitTimer(struct FsmInst *fi, struct FsmTimer *ft)
+{
+ ft->fi = fi;
+ ft->tl.function = (void *) FsmExpireTimer;
+ ft->tl.data = (long) ft;
+#if FSM_TIMER_DEBUG
+ if (ft->fi->debug)
+ ft->fi->printdebug(ft->fi, "mISDN_FsmInitTimer %lx", (long) ft);
+#endif
+ init_timer(&ft->tl);
+}
+EXPORT_SYMBOL(mISDN_FsmInitTimer);
+
+void
+mISDN_FsmDelTimer(struct FsmTimer *ft, int where)
+{
+#if FSM_TIMER_DEBUG
+ if (ft->fi->debug)
+ ft->fi->printdebug(ft->fi, "mISDN_FsmDelTimer %lx %d",
+ (long) ft, where);
+#endif
+ del_timer(&ft->tl);
+}
+EXPORT_SYMBOL(mISDN_FsmDelTimer);
+
+int
+mISDN_FsmAddTimer(struct FsmTimer *ft,
+ int millisec, int event, void *arg, int where)
+{
+
+#if FSM_TIMER_DEBUG
+ if (ft->fi->debug)
+ ft->fi->printdebug(ft->fi, "mISDN_FsmAddTimer %lx %d %d",
+ (long) ft, millisec, where);
+#endif
+
+ if (timer_pending(&ft->tl)) {
+ if (ft->fi->debug) {
+ printk(KERN_WARNING
+ "mISDN_FsmAddTimer: timer already active!\n");
+ ft->fi->printdebug(ft->fi,
+ "mISDN_FsmAddTimer already active!");
+ }
+ return -1;
+ }
+ init_timer(&ft->tl);
+ ft->event = event;
+ ft->arg = arg;
+ ft->tl.expires = jiffies + (millisec * HZ) / 1000;
+ add_timer(&ft->tl);
+ return 0;
+}
+EXPORT_SYMBOL(mISDN_FsmAddTimer);
+
+void
+mISDN_FsmRestartTimer(struct FsmTimer *ft,
+ int millisec, int event, void *arg, int where)
+{
+
+#if FSM_TIMER_DEBUG
+ if (ft->fi->debug)
+ ft->fi->printdebug(ft->fi, "mISDN_FsmRestartTimer %lx %d %d",
+ (long) ft, millisec, where);
+#endif
+
+ if (timer_pending(&ft->tl))
+ del_timer(&ft->tl);
+ init_timer(&ft->tl);
+ ft->event = event;
+ ft->arg = arg;
+ ft->tl.expires = jiffies + (millisec * HZ) / 1000;
+ add_timer(&ft->tl);
+}
+EXPORT_SYMBOL(mISDN_FsmRestartTimer);
diff --git a/drivers/isdn/mISDN/fsm.h b/drivers/isdn/mISDN/fsm.h
new file mode 100644
index 000000000000..928f5be192c1
--- /dev/null
+++ b/drivers/isdn/mISDN/fsm.h
@@ -0,0 +1,67 @@
+/*
+ *
+ * Author Karsten Keil <kkeil@novell.com>
+ *
+ * Thanks to Jan den Ouden
+ * Fritz Elfert
+ * Copyright 2008 by Karsten Keil <kkeil@novell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _MISDN_FSM_H
+#define _MISDN_FSM_H
+
+#include <linux/timer.h>
+
+/* Statemachine */
+
+struct FsmInst;
+
+typedef void (*FSMFNPTR)(struct FsmInst *, int, void *);
+
+struct Fsm {
+ FSMFNPTR *jumpmatrix;
+ int state_count, event_count;
+ char **strEvent, **strState;
+};
+
+struct FsmInst {
+ struct Fsm *fsm;
+ int state;
+ int debug;
+ void *userdata;
+ int userint;
+ void (*printdebug) (struct FsmInst *, char *, ...);
+};
+
+struct FsmNode {
+ int state, event;
+ void (*routine) (struct FsmInst *, int, void *);
+};
+
+struct FsmTimer {
+ struct FsmInst *fi;
+ struct timer_list tl;
+ int event;
+ void *arg;
+};
+
+extern void mISDN_FsmNew(struct Fsm *, struct FsmNode *, int);
+extern void mISDN_FsmFree(struct Fsm *);
+extern int mISDN_FsmEvent(struct FsmInst *, int , void *);
+extern void mISDN_FsmChangeState(struct FsmInst *, int);
+extern void mISDN_FsmInitTimer(struct FsmInst *, struct FsmTimer *);
+extern int mISDN_FsmAddTimer(struct FsmTimer *, int, int, void *, int);
+extern void mISDN_FsmRestartTimer(struct FsmTimer *, int, int, void *, int);
+extern void mISDN_FsmDelTimer(struct FsmTimer *, int);
+
+#endif
diff --git a/drivers/isdn/mISDN/hwchannel.c b/drivers/isdn/mISDN/hwchannel.c
new file mode 100644
index 000000000000..2596fba4e614
--- /dev/null
+++ b/drivers/isdn/mISDN/hwchannel.c
@@ -0,0 +1,365 @@
+/*
+ *
+ * Author Karsten Keil <kkeil@novell.com>
+ *
+ * Copyright 2008 by Karsten Keil <kkeil@novell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/mISDNhw.h>
+
+static void
+dchannel_bh(struct work_struct *ws)
+{
+ struct dchannel *dch = container_of(ws, struct dchannel, workq);
+ struct sk_buff *skb;
+ int err;
+
+ if (test_and_clear_bit(FLG_RECVQUEUE, &dch->Flags)) {
+ while ((skb = skb_dequeue(&dch->rqueue))) {
+ if (likely(dch->dev.D.peer)) {
+ err = dch->dev.D.recv(dch->dev.D.peer, skb);
+ if (err)
+ dev_kfree_skb(skb);
+ } else
+ dev_kfree_skb(skb);
+ }
+ }
+ if (test_and_clear_bit(FLG_PHCHANGE, &dch->Flags)) {
+ if (dch->phfunc)
+ dch->phfunc(dch);
+ }
+}
+
+static void
+bchannel_bh(struct work_struct *ws)
+{
+ struct bchannel *bch = container_of(ws, struct bchannel, workq);
+ struct sk_buff *skb;
+ int err;
+
+ if (test_and_clear_bit(FLG_RECVQUEUE, &bch->Flags)) {
+ while ((skb = skb_dequeue(&bch->rqueue))) {
+ if (bch->rcount >= 64)
+ printk(KERN_WARNING "B-channel %p receive "
+ "queue if full, but empties...\n", bch);
+ bch->rcount--;
+ if (likely(bch->ch.peer)) {
+ err = bch->ch.recv(bch->ch.peer, skb);
+ if (err)
+ dev_kfree_skb(skb);
+ } else
+ dev_kfree_skb(skb);
+ }
+ }
+}
+
+int
+mISDN_initdchannel(struct dchannel *ch, int maxlen, void *phf)
+{
+ test_and_set_bit(FLG_HDLC, &ch->Flags);
+ ch->maxlen = maxlen;
+ ch->hw = NULL;
+ ch->rx_skb = NULL;
+ ch->tx_skb = NULL;
+ ch->tx_idx = 0;
+ ch->phfunc = phf;
+ skb_queue_head_init(&ch->squeue);
+ skb_queue_head_init(&ch->rqueue);
+ INIT_LIST_HEAD(&ch->dev.bchannels);
+ INIT_WORK(&ch->workq, dchannel_bh);
+ return 0;
+}
+EXPORT_SYMBOL(mISDN_initdchannel);
+
+int
+mISDN_initbchannel(struct bchannel *ch, int maxlen)
+{
+ ch->Flags = 0;
+ ch->maxlen = maxlen;
+ ch->hw = NULL;
+ ch->rx_skb = NULL;
+ ch->tx_skb = NULL;
+ ch->tx_idx = 0;
+ skb_queue_head_init(&ch->rqueue);
+ ch->rcount = 0;
+ ch->next_skb = NULL;
+ INIT_WORK(&ch->workq, bchannel_bh);
+ return 0;
+}
+EXPORT_SYMBOL(mISDN_initbchannel);
+
+int
+mISDN_freedchannel(struct dchannel *ch)
+{
+ if (ch->tx_skb) {
+ dev_kfree_skb(ch->tx_skb);
+ ch->tx_skb = NULL;
+ }
+ if (ch->rx_skb) {
+ dev_kfree_skb(ch->rx_skb);
+ ch->rx_skb = NULL;
+ }
+ skb_queue_purge(&ch->squeue);
+ skb_queue_purge(&ch->rqueue);
+ flush_scheduled_work();
+ return 0;
+}
+EXPORT_SYMBOL(mISDN_freedchannel);
+
+int
+mISDN_freebchannel(struct bchannel *ch)
+{
+ if (ch->tx_skb) {
+ dev_kfree_skb(ch->tx_skb);
+ ch->tx_skb = NULL;
+ }
+ if (ch->rx_skb) {
+ dev_kfree_skb(ch->rx_skb);
+ ch->rx_skb = NULL;
+ }
+ if (ch->next_skb) {
+ dev_kfree_skb(ch->next_skb);
+ ch->next_skb = NULL;
+ }
+ skb_queue_purge(&ch->rqueue);
+ ch->rcount = 0;
+ flush_scheduled_work();
+ return 0;
+}
+EXPORT_SYMBOL(mISDN_freebchannel);
+
+static inline u_int
+get_sapi_tei(u_char *p)
+{
+ u_int sapi, tei;
+
+ sapi = *p >> 2;
+ tei = p[1] >> 1;
+ return sapi | (tei << 8);
+}
+
+void
+recv_Dchannel(struct dchannel *dch)
+{
+ struct mISDNhead *hh;
+
+ if (dch->rx_skb->len < 2) { /* at least 2 for sapi / tei */
+ dev_kfree_skb(dch->rx_skb);
+ dch->rx_skb = NULL;
+ return;
+ }
+ hh = mISDN_HEAD_P(dch->rx_skb);
+ hh->prim = PH_DATA_IND;
+ hh->id = get_sapi_tei(dch->rx_skb->data);
+ skb_queue_tail(&dch->rqueue, dch->rx_skb);
+ dch->rx_skb = NULL;
+ schedule_event(dch, FLG_RECVQUEUE);
+}
+EXPORT_SYMBOL(recv_Dchannel);
+
+void
+recv_Bchannel(struct bchannel *bch)
+{
+ struct mISDNhead *hh;
+
+ hh = mISDN_HEAD_P(bch->rx_skb);
+ hh->prim = PH_DATA_IND;
+ hh->id = MISDN_ID_ANY;
+ if (bch->rcount >= 64) {
+ dev_kfree_skb(bch->rx_skb);
+ bch->rx_skb = NULL;
+ return;
+ }
+ bch->rcount++;
+ skb_queue_tail(&bch->rqueue, bch->rx_skb);
+ bch->rx_skb = NULL;
+ schedule_event(bch, FLG_RECVQUEUE);
+}
+EXPORT_SYMBOL(recv_Bchannel);
+
+void
+recv_Dchannel_skb(struct dchannel *dch, struct sk_buff *skb)
+{
+ skb_queue_tail(&dch->rqueue, skb);
+ schedule_event(dch, FLG_RECVQUEUE);
+}
+EXPORT_SYMBOL(recv_Dchannel_skb);
+
+void
+recv_Bchannel_skb(struct bchannel *bch, struct sk_buff *skb)
+{
+ if (bch->rcount >= 64) {
+ dev_kfree_skb(skb);
+ return;
+ }
+ bch->rcount++;
+ skb_queue_tail(&bch->rqueue, skb);
+ schedule_event(bch, FLG_RECVQUEUE);
+}
+EXPORT_SYMBOL(recv_Bchannel_skb);
+
+static void
+confirm_Dsend(struct dchannel *dch)
+{
+ struct sk_buff *skb;
+
+ skb = _alloc_mISDN_skb(PH_DATA_CNF, mISDN_HEAD_ID(dch->tx_skb),
+ 0, NULL, GFP_ATOMIC);
+ if (!skb) {
+ printk(KERN_ERR "%s: no skb id %x\n", __func__,
+ mISDN_HEAD_ID(dch->tx_skb));
+ return;
+ }
+ skb_queue_tail(&dch->rqueue, skb);
+ schedule_event(dch, FLG_RECVQUEUE);
+}
+
+int
+get_next_dframe(struct dchannel *dch)
+{
+ dch->tx_idx = 0;
+ dch->tx_skb = skb_dequeue(&dch->squeue);
+ if (dch->tx_skb) {
+ confirm_Dsend(dch);
+ return 1;
+ }
+ dch->tx_skb = NULL;
+ test_and_clear_bit(FLG_TX_BUSY, &dch->Flags);
+ return 0;
+}
+EXPORT_SYMBOL(get_next_dframe);
+
+void
+confirm_Bsend(struct bchannel *bch)
+{
+ struct sk_buff *skb;
+
+ if (bch->rcount >= 64)
+ return;
+ skb = _alloc_mISDN_skb(PH_DATA_CNF, mISDN_HEAD_ID(bch->tx_skb),
+ 0, NULL, GFP_ATOMIC);
+ if (!skb) {
+ printk(KERN_ERR "%s: no skb id %x\n", __func__,
+ mISDN_HEAD_ID(bch->tx_skb));
+ return;
+ }
+ bch->rcount++;
+ skb_queue_tail(&bch->rqueue, skb);
+ schedule_event(bch, FLG_RECVQUEUE);
+}
+EXPORT_SYMBOL(confirm_Bsend);
+
+int
+get_next_bframe(struct bchannel *bch)
+{
+ bch->tx_idx = 0;
+ if (test_bit(FLG_TX_NEXT, &bch->Flags)) {
+ bch->tx_skb = bch->next_skb;
+ if (bch->tx_skb) {
+ bch->next_skb = NULL;
+ test_and_clear_bit(FLG_TX_NEXT, &bch->Flags);
+ if (!test_bit(FLG_TRANSPARENT, &bch->Flags))
+ confirm_Bsend(bch); /* not for transparent */
+ return 1;
+ } else {
+ test_and_clear_bit(FLG_TX_NEXT, &bch->Flags);
+ printk(KERN_WARNING "B TX_NEXT without skb\n");
+ }
+ }
+ bch->tx_skb = NULL;
+ test_and_clear_bit(FLG_TX_BUSY, &bch->Flags);
+ return 0;
+}
+EXPORT_SYMBOL(get_next_bframe);
+
+void
+queue_ch_frame(struct mISDNchannel *ch, u_int pr, int id, struct sk_buff *skb)
+{
+ struct mISDNhead *hh;
+
+ if (!skb) {
+ _queue_data(ch, pr, id, 0, NULL, GFP_ATOMIC);
+ } else {
+ if (ch->peer) {
+ hh = mISDN_HEAD_P(skb);
+ hh->prim = pr;
+ hh->id = id;
+ if (!ch->recv(ch->peer, skb))
+ return;
+ }
+ dev_kfree_skb(skb);
+ }
+}
+EXPORT_SYMBOL(queue_ch_frame);
+
+int
+dchannel_senddata(struct dchannel *ch, struct sk_buff *skb)
+{
+ /* check oversize */
+ if (skb->len <= 0) {
+ printk(KERN_WARNING "%s: skb too small\n", __func__);
+ return -EINVAL;
+ }
+ if (skb->len > ch->maxlen) {
+ printk(KERN_WARNING "%s: skb too large(%d/%d)\n",
+ __func__, skb->len, ch->maxlen);
+ return -EINVAL;
+ }
+ /* HW lock must be obtained */
+ if (test_and_set_bit(FLG_TX_BUSY, &ch->Flags)) {
+ skb_queue_tail(&ch->squeue, skb);
+ return 0;
+ } else {
+ /* write to fifo */
+ ch->tx_skb = skb;
+ ch->tx_idx = 0;
+ return 1;
+ }
+}
+EXPORT_SYMBOL(dchannel_senddata);
+
+int
+bchannel_senddata(struct bchannel *ch, struct sk_buff *skb)
+{
+
+ /* check oversize */
+ if (skb->len <= 0) {
+ printk(KERN_WARNING "%s: skb too small\n", __func__);
+ return -EINVAL;
+ }
+ if (skb->len > ch->maxlen) {
+ printk(KERN_WARNING "%s: skb too large(%d/%d)\n",
+ __func__, skb->len, ch->maxlen);
+ return -EINVAL;
+ }
+ /* HW lock must be obtained */
+ /* check for pending next_skb */
+ if (ch->next_skb) {
+ printk(KERN_WARNING
+ "%s: next_skb exist ERROR (skb->len=%d next_skb->len=%d)\n",
+ __func__, skb->len, ch->next_skb->len);
+ return -EBUSY;
+ }
+ if (test_and_set_bit(FLG_TX_BUSY, &ch->Flags)) {
+ test_and_set_bit(FLG_TX_NEXT, &ch->Flags);
+ ch->next_skb = skb;
+ return 0;
+ } else {
+ /* write to fifo */
+ ch->tx_skb = skb;
+ ch->tx_idx = 0;
+ return 1;
+ }
+}
+EXPORT_SYMBOL(bchannel_senddata);
diff --git a/drivers/isdn/mISDN/l1oip.h b/drivers/isdn/mISDN/l1oip.h
new file mode 100644
index 000000000000..a23d575449f6
--- /dev/null
+++ b/drivers/isdn/mISDN/l1oip.h
@@ -0,0 +1,91 @@
+/*
+ * see notice in l1oip.c
+ */
+
+/* debugging */
+#define DEBUG_L1OIP_INIT 0x00010000
+#define DEBUG_L1OIP_SOCKET 0x00020000
+#define DEBUG_L1OIP_MGR 0x00040000
+#define DEBUG_L1OIP_MSG 0x00080000
+
+/* enable to disorder received bchannels by sequence 2143658798... */
+/*
+#define REORDER_DEBUG
+*/
+
+/* frames */
+#define L1OIP_MAX_LEN 2048 /* max packet size form l2 */
+#define L1OIP_MAX_PERFRAME 1400 /* max data size in one frame */
+
+
+/* timers */
+#define L1OIP_KEEPALIVE 15
+#define L1OIP_TIMEOUT 65
+
+
+/* socket */
+#define L1OIP_DEFAULTPORT 931
+
+
+/* channel structure */
+struct l1oip_chan {
+ struct dchannel *dch;
+ struct bchannel *bch;
+ u32 tx_counter; /* counts xmit bytes/packets */
+ u32 rx_counter; /* counts recv bytes/packets */
+ u32 codecstate; /* used by codec to save data */
+#ifdef REORDER_DEBUG
+ int disorder_flag;
+ struct sk_buff *disorder_skb;
+ u32 disorder_cnt;
+#endif
+};
+
+
+/* card structure */
+struct l1oip {
+ struct list_head list;
+
+ /* card */
+ int registered; /* if registered with mISDN */
+ char name[MISDN_MAX_IDLEN];
+ int idx; /* card index */
+ int pri; /* 1=pri, 0=bri */
+ int d_idx; /* current dchannel number */
+ int b_num; /* number of bchannels */
+ u32 id; /* id of connection */
+ int ondemand; /* if transmis. is on demand */
+ int bundle; /* bundle channels in one frm */
+ int codec; /* codec to use for transmis. */
+ int limit; /* limit number of bchannels */
+
+ /* timer */
+ struct timer_list keep_tl;
+ struct timer_list timeout_tl;
+ int timeout_on;
+ struct work_struct workq;
+
+ /* socket */
+ struct socket *socket; /* if set, socket is created */
+ struct completion socket_complete;/* completion of sock thread */
+ struct task_struct *socket_thread;
+ spinlock_t socket_lock; /* access sock outside thread */
+ u32 remoteip; /* if all set, ip is assigned */
+ u16 localport; /* must always be set */
+ u16 remoteport; /* must always be set */
+ struct sockaddr_in sin_local; /* local socket name */
+ struct sockaddr_in sin_remote; /* remote socket name */
+ struct msghdr sendmsg; /* ip message to send */
+ struct iovec sendiov; /* iov for message */
+
+ /* frame */
+ struct l1oip_chan chan[128]; /* channel instances */
+};
+
+extern int l1oip_law_to_4bit(u8 *data, int len, u8 *result, u32 *state);
+extern int l1oip_4bit_to_law(u8 *data, int len, u8 *result);
+extern int l1oip_alaw_to_ulaw(u8 *data, int len, u8 *result);
+extern int l1oip_ulaw_to_alaw(u8 *data, int len, u8 *result);
+extern void l1oip_4bit_free(void);
+extern int l1oip_4bit_alloc(int ulaw);
+
diff --git a/drivers/isdn/mISDN/l1oip_codec.c b/drivers/isdn/mISDN/l1oip_codec.c
new file mode 100644
index 000000000000..a2dc4570ef43
--- /dev/null
+++ b/drivers/isdn/mISDN/l1oip_codec.c
@@ -0,0 +1,374 @@
+/*
+
+ * l1oip_codec.c generic codec using lookup table
+ * -> conversion from a-Law to u-Law
+ * -> conversion from u-Law to a-Law
+ * -> compression by reducing the number of sample resolution to 4
+ *
+ * NOTE: It is not compatible with any standard codec like ADPCM.
+ *
+ * Author Andreas Eversberg (jolly@eversberg.eu)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+ */
+
+/*
+
+How the codec works:
+--------------------
+
+The volume is increased to increase the dynamic range of the audio signal.
+Each sample is converted to a-LAW with only 16 steps of level resolution.
+A pair of two samples are stored in one byte.
+
+The first byte is stored in the upper bits, the second byte is stored in the
+lower bits.
+
+To speed up compression and decompression, two lookup tables are formed:
+
+- 16 bits index for two samples (law encoded) with 8 bit compressed result.
+- 8 bits index for one compressed data with 16 bits decompressed result.
+
+NOTE: The bytes are handled as they are law-encoded.
+
+*/
+
+#include <linux/vmalloc.h>
+#include <linux/mISDNif.h>
+#include "core.h"
+
+/* definitions of codec. don't use calculations, code may run slower. */
+
+static u8 *table_com;
+static u16 *table_dec;
+
+
+/* alaw -> ulaw */
+static u8 alaw_to_ulaw[256] =
+{
+ 0xab, 0x2b, 0xe3, 0x63, 0x8b, 0x0b, 0xc9, 0x49,
+ 0xba, 0x3a, 0xf6, 0x76, 0x9b, 0x1b, 0xd7, 0x57,
+ 0xa3, 0x23, 0xdd, 0x5d, 0x83, 0x03, 0xc1, 0x41,
+ 0xb2, 0x32, 0xeb, 0x6b, 0x93, 0x13, 0xcf, 0x4f,
+ 0xaf, 0x2f, 0xe7, 0x67, 0x8f, 0x0f, 0xcd, 0x4d,
+ 0xbe, 0x3e, 0xfe, 0x7e, 0x9f, 0x1f, 0xdb, 0x5b,
+ 0xa7, 0x27, 0xdf, 0x5f, 0x87, 0x07, 0xc5, 0x45,
+ 0xb6, 0x36, 0xef, 0x6f, 0x97, 0x17, 0xd3, 0x53,
+ 0xa9, 0x29, 0xe1, 0x61, 0x89, 0x09, 0xc7, 0x47,
+ 0xb8, 0x38, 0xf2, 0x72, 0x99, 0x19, 0xd5, 0x55,
+ 0xa1, 0x21, 0xdc, 0x5c, 0x81, 0x01, 0xbf, 0x3f,
+ 0xb0, 0x30, 0xe9, 0x69, 0x91, 0x11, 0xce, 0x4e,
+ 0xad, 0x2d, 0xe5, 0x65, 0x8d, 0x0d, 0xcb, 0x4b,
+ 0xbc, 0x3c, 0xfa, 0x7a, 0x9d, 0x1d, 0xd9, 0x59,
+ 0xa5, 0x25, 0xde, 0x5e, 0x85, 0x05, 0xc3, 0x43,
+ 0xb4, 0x34, 0xed, 0x6d, 0x95, 0x15, 0xd1, 0x51,
+ 0xac, 0x2c, 0xe4, 0x64, 0x8c, 0x0c, 0xca, 0x4a,
+ 0xbb, 0x3b, 0xf8, 0x78, 0x9c, 0x1c, 0xd8, 0x58,
+ 0xa4, 0x24, 0xde, 0x5e, 0x84, 0x04, 0xc2, 0x42,
+ 0xb3, 0x33, 0xec, 0x6c, 0x94, 0x14, 0xd0, 0x50,
+ 0xb0, 0x30, 0xe8, 0x68, 0x90, 0x10, 0xce, 0x4e,
+ 0xbf, 0x3f, 0xfe, 0x7e, 0xa0, 0x20, 0xdc, 0x5c,
+ 0xa8, 0x28, 0xe0, 0x60, 0x88, 0x08, 0xc6, 0x46,
+ 0xb7, 0x37, 0xf0, 0x70, 0x98, 0x18, 0xd4, 0x54,
+ 0xaa, 0x2a, 0xe2, 0x62, 0x8a, 0x0a, 0xc8, 0x48,
+ 0xb9, 0x39, 0xf4, 0x74, 0x9a, 0x1a, 0xd6, 0x56,
+ 0xa2, 0x22, 0xdd, 0x5d, 0x82, 0x02, 0xc0, 0x40,
+ 0xb1, 0x31, 0xea, 0x6a, 0x92, 0x12, 0xcf, 0x4f,
+ 0xae, 0x2e, 0xe6, 0x66, 0x8e, 0x0e, 0xcc, 0x4c,
+ 0xbd, 0x3d, 0xfc, 0x7c, 0x9e, 0x1e, 0xda, 0x5a,
+ 0xa6, 0x26, 0xdf, 0x5f, 0x86, 0x06, 0xc4, 0x44,
+ 0xb5, 0x35, 0xee, 0x6e, 0x96, 0x16, 0xd2, 0x52
+};
+
+/* ulaw -> alaw */
+static u8 ulaw_to_alaw[256] =
+{
+ 0xab, 0x55, 0xd5, 0x15, 0x95, 0x75, 0xf5, 0x35,
+ 0xb5, 0x45, 0xc5, 0x05, 0x85, 0x65, 0xe5, 0x25,
+ 0xa5, 0x5d, 0xdd, 0x1d, 0x9d, 0x7d, 0xfd, 0x3d,
+ 0xbd, 0x4d, 0xcd, 0x0d, 0x8d, 0x6d, 0xed, 0x2d,
+ 0xad, 0x51, 0xd1, 0x11, 0x91, 0x71, 0xf1, 0x31,
+ 0xb1, 0x41, 0xc1, 0x01, 0x81, 0x61, 0xe1, 0x21,
+ 0x59, 0xd9, 0x19, 0x99, 0x79, 0xf9, 0x39, 0xb9,
+ 0x49, 0xc9, 0x09, 0x89, 0x69, 0xe9, 0x29, 0xa9,
+ 0xd7, 0x17, 0x97, 0x77, 0xf7, 0x37, 0xb7, 0x47,
+ 0xc7, 0x07, 0x87, 0x67, 0xe7, 0x27, 0xa7, 0xdf,
+ 0x9f, 0x7f, 0xff, 0x3f, 0xbf, 0x4f, 0xcf, 0x0f,
+ 0x8f, 0x6f, 0xef, 0x2f, 0x53, 0x13, 0x73, 0x33,
+ 0xb3, 0x43, 0xc3, 0x03, 0x83, 0x63, 0xe3, 0x23,
+ 0xa3, 0x5b, 0xdb, 0x1b, 0x9b, 0x7b, 0xfb, 0x3b,
+ 0xbb, 0xbb, 0x4b, 0x4b, 0xcb, 0xcb, 0x0b, 0x0b,
+ 0x8b, 0x8b, 0x6b, 0x6b, 0xeb, 0xeb, 0x2b, 0x2b,
+ 0xab, 0x54, 0xd4, 0x14, 0x94, 0x74, 0xf4, 0x34,
+ 0xb4, 0x44, 0xc4, 0x04, 0x84, 0x64, 0xe4, 0x24,
+ 0xa4, 0x5c, 0xdc, 0x1c, 0x9c, 0x7c, 0xfc, 0x3c,
+ 0xbc, 0x4c, 0xcc, 0x0c, 0x8c, 0x6c, 0xec, 0x2c,
+ 0xac, 0x50, 0xd0, 0x10, 0x90, 0x70, 0xf0, 0x30,
+ 0xb0, 0x40, 0xc0, 0x00, 0x80, 0x60, 0xe0, 0x20,
+ 0x58, 0xd8, 0x18, 0x98, 0x78, 0xf8, 0x38, 0xb8,
+ 0x48, 0xc8, 0x08, 0x88, 0x68, 0xe8, 0x28, 0xa8,
+ 0xd6, 0x16, 0x96, 0x76, 0xf6, 0x36, 0xb6, 0x46,
+ 0xc6, 0x06, 0x86, 0x66, 0xe6, 0x26, 0xa6, 0xde,
+ 0x9e, 0x7e, 0xfe, 0x3e, 0xbe, 0x4e, 0xce, 0x0e,
+ 0x8e, 0x6e, 0xee, 0x2e, 0x52, 0x12, 0x72, 0x32,
+ 0xb2, 0x42, 0xc2, 0x02, 0x82, 0x62, 0xe2, 0x22,
+ 0xa2, 0x5a, 0xda, 0x1a, 0x9a, 0x7a, 0xfa, 0x3a,
+ 0xba, 0xba, 0x4a, 0x4a, 0xca, 0xca, 0x0a, 0x0a,
+ 0x8a, 0x8a, 0x6a, 0x6a, 0xea, 0xea, 0x2a, 0x2a
+};
+
+/* alaw -> 4bit compression */
+static u8 alaw_to_4bit[256] = {
+ 0x0e, 0x01, 0x0a, 0x05, 0x0f, 0x00, 0x0c, 0x03,
+ 0x0d, 0x02, 0x08, 0x07, 0x0f, 0x00, 0x0b, 0x04,
+ 0x0e, 0x01, 0x0a, 0x05, 0x0f, 0x00, 0x0c, 0x03,
+ 0x0d, 0x02, 0x09, 0x06, 0x0f, 0x00, 0x0b, 0x04,
+ 0x0e, 0x01, 0x0a, 0x05, 0x0f, 0x00, 0x0c, 0x03,
+ 0x0d, 0x02, 0x08, 0x07, 0x0f, 0x00, 0x0b, 0x04,
+ 0x0e, 0x01, 0x0a, 0x05, 0x0f, 0x00, 0x0c, 0x03,
+ 0x0d, 0x02, 0x09, 0x06, 0x0f, 0x00, 0x0b, 0x04,
+ 0x0e, 0x01, 0x0a, 0x05, 0x0f, 0x00, 0x0c, 0x03,
+ 0x0d, 0x02, 0x08, 0x07, 0x0f, 0x00, 0x0b, 0x04,
+ 0x0e, 0x01, 0x0a, 0x05, 0x0f, 0x00, 0x0d, 0x02,
+ 0x0e, 0x02, 0x09, 0x06, 0x0f, 0x00, 0x0b, 0x04,
+ 0x0e, 0x01, 0x0a, 0x05, 0x0f, 0x00, 0x0c, 0x03,
+ 0x0d, 0x02, 0x08, 0x07, 0x0f, 0x00, 0x0b, 0x04,
+ 0x0e, 0x01, 0x0a, 0x05, 0x0f, 0x00, 0x0c, 0x03,
+ 0x0d, 0x02, 0x09, 0x06, 0x0f, 0x00, 0x0b, 0x04,
+ 0x0e, 0x01, 0x0a, 0x05, 0x0f, 0x00, 0x0c, 0x03,
+ 0x0d, 0x02, 0x08, 0x07, 0x0f, 0x00, 0x0b, 0x04,
+ 0x0e, 0x01, 0x0a, 0x05, 0x0f, 0x00, 0x0c, 0x03,
+ 0x0d, 0x02, 0x09, 0x06, 0x0f, 0x00, 0x0b, 0x04,
+ 0x0e, 0x02, 0x09, 0x06, 0x0f, 0x00, 0x0b, 0x04,
+ 0x0d, 0x02, 0x08, 0x07, 0x0f, 0x01, 0x0a, 0x05,
+ 0x0e, 0x01, 0x0a, 0x05, 0x0f, 0x00, 0x0c, 0x03,
+ 0x0d, 0x02, 0x09, 0x07, 0x0f, 0x00, 0x0b, 0x04,
+ 0x0e, 0x01, 0x0a, 0x05, 0x0f, 0x00, 0x0c, 0x03,
+ 0x0d, 0x02, 0x08, 0x07, 0x0f, 0x00, 0x0b, 0x04,
+ 0x0e, 0x01, 0x0a, 0x05, 0x0f, 0x00, 0x0c, 0x03,
+ 0x0d, 0x02, 0x09, 0x06, 0x0f, 0x00, 0x0b, 0x04,
+ 0x0e, 0x01, 0x0a, 0x05, 0x0f, 0x00, 0x0c, 0x03,
+ 0x0d, 0x02, 0x08, 0x07, 0x0f, 0x00, 0x0b, 0x04,
+ 0x0e, 0x01, 0x0a, 0x05, 0x0f, 0x00, 0x0c, 0x03,
+ 0x0d, 0x02, 0x09, 0x06, 0x0f, 0x00, 0x0b, 0x04,
+};
+
+/* 4bit -> alaw decompression */
+static u8 _4bit_to_alaw[16] = {
+ 0x5d, 0x51, 0xd9, 0xd7, 0x5f, 0x53, 0xa3, 0x4b,
+ 0x2a, 0x3a, 0x22, 0x2e, 0x26, 0x56, 0x20, 0x2c,
+};
+
+/* ulaw -> 4bit compression */
+static u8 ulaw_to_4bit[256] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
+ 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x05, 0x05, 0x05, 0x05,
+ 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05,
+ 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06,
+ 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
+ 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x08,
+ 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f,
+ 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f,
+ 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f,
+ 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f,
+ 0x0f, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e,
+ 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e,
+ 0x0e, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d,
+ 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d,
+ 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0b, 0x0b,
+ 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
+ 0x0b, 0x0b, 0x0b, 0x0b, 0x0a, 0x0a, 0x0a, 0x0a,
+ 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a,
+ 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09,
+ 0x09, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
+ 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
+};
+
+/* 4bit -> ulaw decompression */
+static u8 _4bit_to_ulaw[16] = {
+ 0x11, 0x21, 0x31, 0x40, 0x4e, 0x5c, 0x68, 0x71,
+ 0xfe, 0xef, 0xe7, 0xdb, 0xcd, 0xbf, 0xaf, 0x9f,
+};
+
+
+/*
+ * Compresses data to the result buffer
+ * The result size must be at least half of the input buffer.
+ * The number of samples also must be even!
+ */
+int
+l1oip_law_to_4bit(u8 *data, int len, u8 *result, u32 *state)
+{
+ int ii, i = 0, o = 0;
+
+ if (!len)
+ return 0;
+
+ /* send saved byte and first input byte */
+ if (*state) {
+ *result++ = table_com[(((*state)<<8)&0xff00) | (*data++)];
+ len--;
+ o++;
+ }
+
+ ii = len >> 1;
+
+ while (i < ii) {
+ *result++ = table_com[(data[0]<<8) | (data[1])];
+ data += 2;
+ i++;
+ o++;
+ }
+
+ /* if len has an odd number, we save byte for next call */
+ if (len & 1)
+ *state = 0x100 + *data;
+ else
+ *state = 0;
+
+ return o;
+}
+
+/* Decompress data to the result buffer
+ * The result size must be the number of sample in packet. (2 * input data)
+ * The number of samples in the result are even!
+ */
+int
+l1oip_4bit_to_law(u8 *data, int len, u8 *result)
+{
+ int i = 0;
+ u16 r;
+
+ while (i < len) {
+ r = table_dec[*data++];
+ *result++ = r>>8;
+ *result++ = r;
+ i++;
+ }
+
+ return len << 1;
+}
+
+
+/*
+ * law conversion
+ */
+int
+l1oip_alaw_to_ulaw(u8 *data, int len, u8 *result)
+{
+ int i = 0;
+
+ while (i < len) {
+ *result++ = alaw_to_ulaw[*data++];
+ i++;
+ }
+
+ return len;
+}
+
+int
+l1oip_ulaw_to_alaw(u8 *data, int len, u8 *result)
+{
+ int i = 0;
+
+ while (i < len) {
+ *result++ = ulaw_to_alaw[*data++];
+ i++;
+ }
+
+ return len;
+}
+
+
+/*
+ * generate/free compression and decompression table
+ */
+void
+l1oip_4bit_free(void)
+{
+ if (table_dec)
+ vfree(table_dec);
+ if (table_com)
+ vfree(table_com);
+ table_com = NULL;
+ table_dec = NULL;
+}
+
+int
+l1oip_4bit_alloc(int ulaw)
+{
+ int i1, i2, c, sample;
+
+ /* in case, it is called again */
+ if (table_dec)
+ return 0;
+
+ /* alloc conversion tables */
+ table_com = vmalloc(65536);
+ table_dec = vmalloc(512);
+ if (!table_com | !table_dec) {
+ l1oip_4bit_free();
+ return -ENOMEM;
+ }
+ memset(table_com, 0, 65536);
+ memset(table_dec, 0, 512);
+ /* generate compression table */
+ i1 = 0;
+ while (i1 < 256) {
+ if (ulaw)
+ c = ulaw_to_4bit[i1];
+ else
+ c = alaw_to_4bit[i1];
+ i2 = 0;
+ while (i2 < 256) {
+ table_com[(i1<<8) | i2] |= (c<<4);
+ table_com[(i2<<8) | i1] |= c;
+ i2++;
+ }
+ i1++;
+ }
+
+ /* generate decompression table */
+ i1 = 0;
+ while (i1 < 16) {
+ if (ulaw)
+ sample = _4bit_to_ulaw[i1];
+ else
+ sample = _4bit_to_alaw[i1];
+ i2 = 0;
+ while (i2 < 16) {
+ table_dec[(i1<<4) | i2] |= (sample<<8);
+ table_dec[(i2<<4) | i1] |= sample;
+ i2++;
+ }
+ i1++;
+ }
+
+ return 0;
+}
+
+
diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c
new file mode 100644
index 000000000000..155b99780c4f
--- /dev/null
+++ b/drivers/isdn/mISDN/l1oip_core.c
@@ -0,0 +1,1518 @@
+/*
+
+ * l1oip.c low level driver for tunneling layer 1 over IP
+ *
+ * NOTE: It is not compatible with TDMoIP nor "ISDN over IP".
+ *
+ * Author Andreas Eversberg (jolly@eversberg.eu)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+/* module parameters:
+ * type:
+ Value 1 = BRI
+ Value 2 = PRI
+ Value 3 = BRI (multi channel frame, not supported yet)
+ Value 4 = PRI (multi channel frame, not supported yet)
+ A multi channel frame reduces overhead to a single frame for all
+ b-channels, but increases delay.
+ (NOTE: Multi channel frames are not implemented yet.)
+
+ * codec:
+ Value 0 = transparent (default)
+ Value 1 = transfer ALAW
+ Value 2 = transfer ULAW
+ Value 3 = transfer generic 4 bit compression.
+
+ * ulaw:
+ 0 = we use a-Law (default)
+ 1 = we use u-Law
+
+ * limit:
+ limitation of B-channels to control bandwidth (1...126)
+ BRI: 1 or 2
+ PRI: 1-30, 31-126 (126, because dchannel ist not counted here)
+ Also limited ressources are used for stack, resulting in less channels.
+ It is possible to have more channels than 30 in PRI mode, this must
+ be supported by the application.
+
+ * ip:
+ byte representation of remote ip address (127.0.0.1 -> 127,0,0,1)
+ If not given or four 0, no remote address is set.
+ For multiple interfaces, concat ip addresses. (127,0,0,1,127,0,0,1)
+
+ * port:
+ port number (local interface)
+ If not given or 0, port 931 is used for fist instance, 932 for next...
+ For multiple interfaces, different ports must be given.
+
+ * remoteport:
+ port number (remote interface)
+ If not given or 0, remote port equals local port
+ For multiple interfaces on equal sites, different ports must be given.
+
+ * ondemand:
+ 0 = fixed (always transmit packets, even when remote side timed out)
+ 1 = on demand (only transmit packets, when remote side is detected)
+ the default is 0
+ NOTE: ID must also be set for on demand.
+
+ * id:
+ optional value to identify frames. This value must be equal on both
+ peers and should be random. If omitted or 0, no ID is transmitted.
+
+ * debug:
+ NOTE: only one debug value must be given for all cards
+ enable debugging (see l1oip.h for debug options)
+
+
+Special mISDN controls:
+
+ op = MISDN_CTRL_SETPEER*
+ p1 = bytes 0-3 : remote IP address in network order (left element first)
+ p2 = bytes 1-2 : remote port in network order (high byte first)
+ optional:
+ p2 = bytes 3-4 : local port in network order (high byte first)
+
+ op = MISDN_CTRL_UNSETPEER*
+
+ * Use l1oipctrl for comfortable setting or removing ip address.
+ (Layer 1 Over IP CTRL)
+
+
+L1oIP-Protocol
+--------------
+
+Frame Header:
+
+ 7 6 5 4 3 2 1 0
++---------------+
+|Ver|T|I|Coding |
++---------------+
+| ID byte 3 * |
++---------------+
+| ID byte 2 * |
++---------------+
+| ID byte 1 * |
++---------------+
+| ID byte 0 * |
++---------------+
+|M| Channel |
++---------------+
+| Length * |
++---------------+
+| Time Base MSB |
++---------------+
+| Time Base LSB |
++---------------+
+| Data.... |
+
+...
+
+| |
++---------------+
+|M| Channel |
++---------------+
+| Length * |
++---------------+
+| Time Base MSB |
++---------------+
+| Time Base LSB |
++---------------+
+| Data.... |
+
+...
+
+
+* Only included in some cases.
+
+- Ver = Version
+If version is missmatch, the frame must be ignored.
+
+- T = Type of interface
+Must be 0 for S0 or 1 for E1.
+
+- I = Id present
+If bit is set, four ID bytes are included in frame.
+
+- ID = Connection ID
+Additional ID to prevent Denial of Service attacs. Also it prevents hijacking
+connections with dynamic IP. The ID should be random and must not be 0.
+
+- Coding = Type of codec
+Must be 0 for no transcoding. Also for D-channel and other HDLC frames.
+ 1 and 2 are reserved for explicitly use of a-LAW or u-LAW codec.
+ 3 is used for generic table compressor.
+
+- M = More channels to come. If this flag is 1, the following byte contains
+the length of the channel data. After the data block, the next channel will
+be defined. The flag for the last channel block (or if only one channel is
+transmitted), must be 0 and no length is given.
+
+- Channel = Channel number
+0 reserved
+1-3 channel data for S0 (3 is D-channel)
+1-31 channel data for E1 (16 is D-channel)
+32-127 channel data for extended E1 (16 is D-channel)
+
+- The length is used if the M-flag is 1. It is used to find the next channel
+inside frame.
+NOTE: A value of 0 equals 256 bytes of data.
+ -> For larger data blocks, a single frame must be used.
+ -> For larger streams, a single frame or multiple blocks with same channel ID
+ must be used.
+
+- Time Base = Timestamp of first sample in frame
+The "Time Base" is used to rearange packets and to detect packet loss.
+The 16 bits are sent in network order (MSB first) and count 1/8000 th of a
+second. This causes a wrap arround each 8,192 seconds. There is no requirement
+for the initial "Time Base", but 0 should be used for the first packet.
+In case of HDLC data, this timestamp counts the packet or byte number.
+
+
+Two Timers:
+
+After initialisation, a timer of 15 seconds is started. Whenever a packet is
+transmitted, the timer is reset to 15 seconds again. If the timer expires, an
+empty packet is transmitted. This keep the connection alive.
+
+When a valid packet is received, a timer 65 seconds is started. The interface
+become ACTIVE. If the timer expires, the interface becomes INACTIVE.
+
+
+Dynamic IP handling:
+
+To allow dynamic IP, the ID must be non 0. In this case, any packet with the
+correct port number and ID will be accepted. If the remote side changes its IP
+the new IP is used for all transmitted packets until it changes again.
+
+
+On Demand:
+
+If the ondemand parameter is given, the remote IP is set to 0 on timeout.
+This will stop keepalive traffic to remote. If the remote is online again,
+traffic will continue to the remote address. This is usefull for road warriors.
+This feature only works with ID set, otherwhise it is highly unsecure.
+
+
+Socket and Thread
+-----------------
+
+The complete socket opening and closing is done by a thread.
+When the thread opened a socket, the hc->socket descriptor is set. Whenever a
+packet shall be sent to the socket, the hc->socket must be checked wheter not
+NULL. To prevent change in socket descriptor, the hc->socket_lock must be used.
+To change the socket, a recall of l1oip_socket_open() will safely kill the
+socket process and create a new one.
+
+*/
+
+#define L1OIP_VERSION 0 /* 0...3 */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/mISDNif.h>
+#include <linux/mISDNhw.h>
+#include <linux/mISDNdsp.h>
+#include <linux/init.h>
+#include <linux/in.h>
+#include <linux/inet.h>
+#include <linux/workqueue.h>
+#include <linux/kthread.h>
+#include <net/sock.h>
+#include "core.h"
+#include "l1oip.h"
+
+static const char *l1oip_revision = "2.00";
+
+static int l1oip_cnt;
+static spinlock_t l1oip_lock;
+static struct list_head l1oip_ilist;
+
+#define MAX_CARDS 16
+static u_int type[MAX_CARDS];
+static u_int codec[MAX_CARDS];
+static u_int ip[MAX_CARDS*4];
+static u_int port[MAX_CARDS];
+static u_int remoteport[MAX_CARDS];
+static u_int ondemand[MAX_CARDS];
+static u_int limit[MAX_CARDS];
+static u_int id[MAX_CARDS];
+static int debug;
+static int ulaw;
+
+MODULE_AUTHOR("Andreas Eversberg");
+MODULE_LICENSE("GPL");
+module_param_array(type, uint, NULL, S_IRUGO | S_IWUSR);
+module_param_array(codec, uint, NULL, S_IRUGO | S_IWUSR);
+module_param_array(ip, uint, NULL, S_IRUGO | S_IWUSR);
+module_param_array(port, uint, NULL, S_IRUGO | S_IWUSR);
+module_param_array(remoteport, uint, NULL, S_IRUGO | S_IWUSR);
+module_param_array(ondemand, uint, NULL, S_IRUGO | S_IWUSR);
+module_param_array(limit, uint, NULL, S_IRUGO | S_IWUSR);
+module_param_array(id, uint, NULL, S_IRUGO | S_IWUSR);
+module_param(ulaw, uint, S_IRUGO | S_IWUSR);
+module_param(debug, uint, S_IRUGO | S_IWUSR);
+
+/*
+ * send a frame via socket, if open and restart timer
+ */
+static int
+l1oip_socket_send(struct l1oip *hc, u8 localcodec, u8 channel, u32 chanmask,
+ u16 timebase, u8 *buf, int len)
+{
+ u8 *p;
+ int multi = 0;
+ u8 frame[len+32];
+ struct socket *socket = NULL;
+ mm_segment_t oldfs;
+
+ if (debug & DEBUG_L1OIP_MSG)
+ printk(KERN_DEBUG "%s: sending data to socket (len = %d)\n",
+ __func__, len);
+
+ p = frame;
+
+ /* restart timer */
+ if ((int)(hc->keep_tl.expires-jiffies) < 5*HZ) {
+ del_timer(&hc->keep_tl);
+ hc->keep_tl.expires = jiffies + L1OIP_KEEPALIVE*HZ;
+ add_timer(&hc->keep_tl);
+ } else
+ hc->keep_tl.expires = jiffies + L1OIP_KEEPALIVE*HZ;
+
+ if (debug & DEBUG_L1OIP_MSG)
+ printk(KERN_DEBUG "%s: resetting timer\n", __func__);
+
+ /* drop if we have no remote ip or port */
+ if (!hc->sin_remote.sin_addr.s_addr || !hc->sin_remote.sin_port) {
+ if (debug & DEBUG_L1OIP_MSG)
+ printk(KERN_DEBUG "%s: dropping frame, because remote "
+ "IP is not set.\n", __func__);
+ return len;
+ }
+
+ /* assemble frame */
+ *p++ = (L1OIP_VERSION<<6) /* version and coding */
+ | (hc->pri?0x20:0x00) /* type */
+ | (hc->id?0x10:0x00) /* id */
+ | localcodec;
+ if (hc->id) {
+ *p++ = hc->id>>24; /* id */
+ *p++ = hc->id>>16;
+ *p++ = hc->id>>8;
+ *p++ = hc->id;
+ }
+ *p++ = (multi == 1)?0x80:0x00 + channel; /* m-flag, channel */
+ if (multi == 1)
+ *p++ = len; /* length */
+ *p++ = timebase>>8; /* time base */
+ *p++ = timebase;
+
+ if (buf && len) { /* add data to frame */
+ if (localcodec == 1 && ulaw)
+ l1oip_ulaw_to_alaw(buf, len, p);
+ else if (localcodec == 2 && !ulaw)
+ l1oip_alaw_to_ulaw(buf, len, p);
+ else if (localcodec == 3)
+ len = l1oip_law_to_4bit(buf, len, p,
+ &hc->chan[channel].codecstate);
+ else
+ memcpy(p, buf, len);
+ }
+ len += p - frame;
+
+ /* check for socket in safe condition */
+ spin_lock(&hc->socket_lock);
+ if (!hc->socket) {
+ spin_unlock(&hc->socket_lock);
+ return 0;
+ }
+ /* seize socket */
+ socket = hc->socket;
+ hc->socket = NULL;
+ spin_unlock(&hc->socket_lock);
+ /* send packet */
+ if (debug & DEBUG_L1OIP_MSG)
+ printk(KERN_DEBUG "%s: sending packet to socket (len "
+ "= %d)\n", __func__, len);
+ hc->sendiov.iov_base = frame;
+ hc->sendiov.iov_len = len;
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+ len = sock_sendmsg(socket, &hc->sendmsg, len);
+ set_fs(oldfs);
+ /* give socket back */
+ hc->socket = socket; /* no locking required */
+
+ return len;
+}
+
+
+/*
+ * receive channel data from socket
+ */
+static void
+l1oip_socket_recv(struct l1oip *hc, u8 remotecodec, u8 channel, u16 timebase,
+ u8 *buf, int len)
+{
+ struct sk_buff *nskb;
+ struct bchannel *bch;
+ struct dchannel *dch;
+ u8 *p;
+ u32 rx_counter;
+
+ if (len == 0) {
+ if (debug & DEBUG_L1OIP_MSG)
+ printk(KERN_DEBUG "%s: received empty keepalive data, "
+ "ignoring\n", __func__);
+ return;
+ }
+
+ if (debug & DEBUG_L1OIP_MSG)
+ printk(KERN_DEBUG "%s: received data, sending to mISDN (%d)\n",
+ __func__, len);
+
+ if (channel < 1 || channel > 127) {
+ printk(KERN_WARNING "%s: packet error - channel %d out of "
+ "range\n", __func__, channel);
+ return;
+ }
+ dch = hc->chan[channel].dch;
+ bch = hc->chan[channel].bch;
+ if (!dch && !bch) {
+ printk(KERN_WARNING "%s: packet error - channel %d not in "
+ "stack\n", __func__, channel);
+ return;
+ }
+
+ /* prepare message */
+ nskb = mI_alloc_skb((remotecodec == 3)?(len<<1):len, GFP_ATOMIC);
+ if (!nskb) {
+ printk(KERN_ERR "%s: No mem for skb.\n", __func__);
+ return;
+ }
+ p = skb_put(nskb, (remotecodec == 3)?(len<<1):len);
+
+ if (remotecodec == 1 && ulaw)
+ l1oip_alaw_to_ulaw(buf, len, p);
+ else if (remotecodec == 2 && !ulaw)
+ l1oip_ulaw_to_alaw(buf, len, p);
+ else if (remotecodec == 3)
+ len = l1oip_4bit_to_law(buf, len, p);
+ else
+ memcpy(p, buf, len);
+
+ /* send message up */
+ if (dch && len >= 2) {
+ dch->rx_skb = nskb;
+ recv_Dchannel(dch);
+ }
+ if (bch) {
+ /* expand 16 bit sequence number to 32 bit sequence number */
+ rx_counter = hc->chan[channel].rx_counter;
+ if (((s16)(timebase - rx_counter)) >= 0) {
+ /* time has changed forward */
+ if (timebase >= (rx_counter & 0xffff))
+ rx_counter =
+ (rx_counter & 0xffff0000) | timebase;
+ else
+ rx_counter = ((rx_counter & 0xffff0000)+0x10000)
+ | timebase;
+ } else {
+ /* time has changed backwards */
+ if (timebase < (rx_counter & 0xffff))
+ rx_counter =
+ (rx_counter & 0xffff0000) | timebase;
+ else
+ rx_counter = ((rx_counter & 0xffff0000)-0x10000)
+ | timebase;
+ }
+ hc->chan[channel].rx_counter = rx_counter;
+
+#ifdef REORDER_DEBUG
+ if (hc->chan[channel].disorder_flag) {
+ struct sk_buff *skb;
+ int cnt;
+ skb = hc->chan[channel].disorder_skb;
+ hc->chan[channel].disorder_skb = nskb;
+ nskb = skb;
+ cnt = hc->chan[channel].disorder_cnt;
+ hc->chan[channel].disorder_cnt = rx_counter;
+ rx_counter = cnt;
+ }
+ hc->chan[channel].disorder_flag ^= 1;
+ if (nskb)
+#endif
+ queue_ch_frame(&bch->ch, PH_DATA_IND, rx_counter, nskb);
+ }
+}
+
+
+/*
+ * parse frame and extract channel data
+ */
+static void
+l1oip_socket_parse(struct l1oip *hc, struct sockaddr_in *sin, u8 *buf, int len)
+{
+ u32 id;
+ u8 channel;
+ u8 remotecodec;
+ u16 timebase;
+ int m, mlen;
+ int len_start = len; /* initial frame length */
+ struct dchannel *dch = hc->chan[hc->d_idx].dch;
+
+ if (debug & DEBUG_L1OIP_MSG)
+ printk(KERN_DEBUG "%s: received frame, parsing... (%d)\n",
+ __func__, len);
+
+ /* check lenght */
+ if (len < 1+1+2) {
+ printk(KERN_WARNING "%s: packet error - length %d below "
+ "4 bytes\n", __func__, len);
+ return;
+ }
+
+ /* check version */
+ if (((*buf)>>6) != L1OIP_VERSION) {
+ printk(KERN_WARNING "%s: packet error - unknown version %d\n",
+ __func__, buf[0]>>6);
+ return;
+ }
+
+ /* check type */
+ if (((*buf)&0x20) && !hc->pri) {
+ printk(KERN_WARNING "%s: packet error - received E1 packet "
+ "on S0 interface\n", __func__);
+ return;
+ }
+ if (!((*buf)&0x20) && hc->pri) {
+ printk(KERN_WARNING "%s: packet error - received S0 packet "
+ "on E1 interface\n", __func__);
+ return;
+ }
+
+ /* get id flag */
+ id = (*buf>>4)&1;
+
+ /* check coding */
+ remotecodec = (*buf) & 0x0f;
+ if (remotecodec > 3) {
+ printk(KERN_WARNING "%s: packet error - remotecodec %d "
+ "unsupported\n", __func__, remotecodec);
+ return;
+ }
+ buf++;
+ len--;
+
+ /* check id */
+ if (id) {
+ if (!hc->id) {
+ printk(KERN_WARNING "%s: packet error - packet has id "
+ "0x%x, but we have not\n", __func__, id);
+ return;
+ }
+ if (len < 4) {
+ printk(KERN_WARNING "%s: packet error - packet too "
+ "short for ID value\n", __func__);
+ return;
+ }
+ id = (*buf++) << 24;
+ id += (*buf++) << 16;
+ id += (*buf++) << 8;
+ id += (*buf++);
+ len -= 4;
+
+ if (id != hc->id) {
+ printk(KERN_WARNING "%s: packet error - ID mismatch, "
+ "got 0x%x, we 0x%x\n",
+ __func__, id, hc->id);
+ return;
+ }
+ } else {
+ if (hc->id) {
+ printk(KERN_WARNING "%s: packet error - packet has no "
+ "ID, but we have\n", __func__);
+ return;
+ }
+ }
+
+multiframe:
+ if (len < 1) {
+ printk(KERN_WARNING "%s: packet error - packet too short, "
+ "channel expected at position %d.\n",
+ __func__, len-len_start+1);
+ return;
+ }
+
+ /* get channel and multiframe flag */
+ channel = *buf&0x7f;
+ m = *buf >> 7;
+ buf++;
+ len--;
+
+ /* check length on multiframe */
+ if (m) {
+ if (len < 1) {
+ printk(KERN_WARNING "%s: packet error - packet too "
+ "short, length expected at position %d.\n",
+ __func__, len_start-len-1);
+ return;
+ }
+
+ mlen = *buf++;
+ len--;
+ if (mlen == 0)
+ mlen = 256;
+ if (len < mlen+3) {
+ printk(KERN_WARNING "%s: packet error - length %d at "
+ "position %d exceeds total length %d.\n",
+ __func__, mlen, len_start-len-1, len_start);
+ return;
+ }
+ if (len == mlen+3) {
+ printk(KERN_WARNING "%s: packet error - length %d at "
+ "position %d will not allow additional "
+ "packet.\n",
+ __func__, mlen, len_start-len+1);
+ return;
+ }
+ } else
+ mlen = len-2; /* single frame, substract timebase */
+
+ if (len < 2) {
+ printk(KERN_WARNING "%s: packet error - packet too short, time "
+ "base expected at position %d.\n",
+ __func__, len-len_start+1);
+ return;
+ }
+
+ /* get time base */
+ timebase = (*buf++) << 8;
+ timebase |= (*buf++);
+ len -= 2;
+
+ /* if inactive, we send up a PH_ACTIVATE and activate */
+ if (!test_bit(FLG_ACTIVE, &dch->Flags)) {
+ if (debug & (DEBUG_L1OIP_MSG|DEBUG_L1OIP_SOCKET))
+ printk(KERN_DEBUG "%s: interface become active due to "
+ "received packet\n", __func__);
+ test_and_set_bit(FLG_ACTIVE, &dch->Flags);
+ _queue_data(&dch->dev.D, PH_ACTIVATE_IND, MISDN_ID_ANY, 0,
+ NULL, GFP_ATOMIC);
+ }
+
+ /* distribute packet */
+ l1oip_socket_recv(hc, remotecodec, channel, timebase, buf, mlen);
+ buf += mlen;
+ len -= mlen;
+
+ /* multiframe */
+ if (m)
+ goto multiframe;
+
+ /* restart timer */
+ if ((int)(hc->timeout_tl.expires-jiffies) < 5*HZ || !hc->timeout_on) {
+ hc->timeout_on = 1;
+ del_timer(&hc->timeout_tl);
+ hc->timeout_tl.expires = jiffies + L1OIP_TIMEOUT*HZ;
+ add_timer(&hc->timeout_tl);
+ } else /* only adjust timer */
+ hc->timeout_tl.expires = jiffies + L1OIP_TIMEOUT*HZ;
+
+ /* if ip or source port changes */
+ if ((hc->sin_remote.sin_addr.s_addr != sin->sin_addr.s_addr)
+ || (hc->sin_remote.sin_port != sin->sin_port)) {
+ if (debug & DEBUG_L1OIP_SOCKET)
+ printk(KERN_DEBUG "%s: remote address changes from "
+ "0x%08x to 0x%08x (port %d to %d)\n", __func__,
+ ntohl(hc->sin_remote.sin_addr.s_addr),
+ ntohl(sin->sin_addr.s_addr),
+ ntohs(hc->sin_remote.sin_port),
+ ntohs(sin->sin_port));
+ hc->sin_remote.sin_addr.s_addr = sin->sin_addr.s_addr;
+ hc->sin_remote.sin_port = sin->sin_port;
+ }
+}
+
+
+/*
+ * socket stuff
+ */
+static int
+l1oip_socket_thread(void *data)
+{
+ struct l1oip *hc = (struct l1oip *)data;
+ int ret = 0;
+ struct msghdr msg;
+ struct iovec iov;
+ mm_segment_t oldfs;
+ struct sockaddr_in sin_rx;
+ unsigned char recvbuf[1500];
+ int recvlen;
+ struct socket *socket = NULL;
+ DECLARE_COMPLETION(wait);
+
+ /* make daemon */
+ allow_signal(SIGTERM);
+
+ /* create socket */
+ if (sock_create(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &socket)) {
+ printk(KERN_ERR "%s: Failed to create socket.\n", __func__);
+ return -EIO;
+ }
+
+ /* set incoming address */
+ hc->sin_local.sin_family = AF_INET;
+ hc->sin_local.sin_addr.s_addr = INADDR_ANY;
+ hc->sin_local.sin_port = htons((unsigned short)hc->localport);
+
+ /* set outgoing address */
+ hc->sin_remote.sin_family = AF_INET;
+ hc->sin_remote.sin_addr.s_addr = htonl(hc->remoteip);
+ hc->sin_remote.sin_port = htons((unsigned short)hc->remoteport);
+
+ /* bind to incomming port */
+ if (socket->ops->bind(socket, (struct sockaddr *)&hc->sin_local,
+ sizeof(hc->sin_local))) {
+ printk(KERN_ERR "%s: Failed to bind socket to port %d.\n",
+ __func__, hc->localport);
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ /* check sk */
+ if (socket->sk == NULL) {
+ printk(KERN_ERR "%s: socket->sk == NULL\n", __func__);
+ ret = -EIO;
+ goto fail;
+ }
+
+ /* build receive message */
+ msg.msg_name = &sin_rx;
+ msg.msg_namelen = sizeof(sin_rx);
+ msg.msg_control = NULL;
+ msg.msg_controllen = 0;
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+
+ /* build send message */
+ hc->sendmsg.msg_name = &hc->sin_remote;
+ hc->sendmsg.msg_namelen = sizeof(hc->sin_remote);
+ hc->sendmsg.msg_control = NULL;
+ hc->sendmsg.msg_controllen = 0;
+ hc->sendmsg.msg_iov = &hc->sendiov;
+ hc->sendmsg.msg_iovlen = 1;
+
+ /* give away socket */
+ spin_lock(&hc->socket_lock);
+ hc->socket = socket;
+ spin_unlock(&hc->socket_lock);
+
+ /* read loop */
+ if (debug & DEBUG_L1OIP_SOCKET)
+ printk(KERN_DEBUG "%s: socket created and open\n",
+ __func__);
+ while (!signal_pending(current)) {
+ iov.iov_base = recvbuf;
+ iov.iov_len = sizeof(recvbuf);
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+ recvlen = sock_recvmsg(socket, &msg, sizeof(recvbuf), 0);
+ set_fs(oldfs);
+ if (recvlen > 0) {
+ l1oip_socket_parse(hc, &sin_rx, recvbuf, recvlen);
+ } else {
+ if (debug & DEBUG_L1OIP_SOCKET)
+ printk(KERN_WARNING "%s: broken pipe on socket\n",
+ __func__);
+ }
+ }
+
+ /* get socket back, check first if in use, maybe by send function */
+ spin_lock(&hc->socket_lock);
+ /* if hc->socket is NULL, it is in use until it is given back */
+ while (!hc->socket) {
+ spin_unlock(&hc->socket_lock);
+ schedule_timeout(HZ/10);
+ spin_lock(&hc->socket_lock);
+ }
+ hc->socket = NULL;
+ spin_unlock(&hc->socket_lock);
+
+ if (debug & DEBUG_L1OIP_SOCKET)
+ printk(KERN_DEBUG "%s: socket thread terminating\n",
+ __func__);
+
+fail:
+ /* close socket */
+ if (socket)
+ sock_release(socket);
+
+ /* if we got killed, signal completion */
+ complete(&hc->socket_complete);
+ hc->socket_thread = NULL; /* show termination of thread */
+
+ if (debug & DEBUG_L1OIP_SOCKET)
+ printk(KERN_DEBUG "%s: socket thread terminated\n",
+ __func__);
+ return ret;
+}
+
+static void
+l1oip_socket_close(struct l1oip *hc)
+{
+ /* kill thread */
+ if (hc->socket_thread) {
+ if (debug & DEBUG_L1OIP_SOCKET)
+ printk(KERN_DEBUG "%s: socket thread exists, "
+ "killing...\n", __func__);
+ send_sig(SIGTERM, hc->socket_thread, 0);
+ wait_for_completion(&hc->socket_complete);
+ }
+}
+
+static int
+l1oip_socket_open(struct l1oip *hc)
+{
+ /* in case of reopen, we need to close first */
+ l1oip_socket_close(hc);
+
+ init_completion(&hc->socket_complete);
+
+ /* create receive process */
+ hc->socket_thread = kthread_run(l1oip_socket_thread, hc, "l1oip_%s",
+ hc->name);
+ if (IS_ERR(hc->socket_thread)) {
+ int err = PTR_ERR(hc->socket_thread);
+ printk(KERN_ERR "%s: Failed (%d) to create socket process.\n",
+ __func__, err);
+ hc->socket_thread = NULL;
+ sock_release(hc->socket);
+ return err;
+ }
+ if (debug & DEBUG_L1OIP_SOCKET)
+ printk(KERN_DEBUG "%s: socket thread created\n", __func__);
+
+ return 0;
+}
+
+
+static void
+l1oip_send_bh(struct work_struct *work)
+{
+ struct l1oip *hc = container_of(work, struct l1oip, workq);
+
+ if (debug & (DEBUG_L1OIP_MSG|DEBUG_L1OIP_SOCKET))
+ printk(KERN_DEBUG "%s: keepalive timer expired, sending empty "
+ "frame on dchannel\n", __func__);
+
+ /* send an empty l1oip frame at D-channel */
+ l1oip_socket_send(hc, 0, hc->d_idx, 0, 0, NULL, 0);
+}
+
+
+/*
+ * timer stuff
+ */
+static void
+l1oip_keepalive(void *data)
+{
+ struct l1oip *hc = (struct l1oip *)data;
+
+ schedule_work(&hc->workq);
+}
+
+static void
+l1oip_timeout(void *data)
+{
+ struct l1oip *hc = (struct l1oip *)data;
+ struct dchannel *dch = hc->chan[hc->d_idx].dch;
+
+ if (debug & DEBUG_L1OIP_MSG)
+ printk(KERN_DEBUG "%s: timeout timer expired, turn layer one "
+ "down.\n", __func__);
+
+ hc->timeout_on = 0; /* state that timer must be initialized next time */
+
+ /* if timeout, we send up a PH_DEACTIVATE and deactivate */
+ if (test_bit(FLG_ACTIVE, &dch->Flags)) {
+ if (debug & (DEBUG_L1OIP_MSG|DEBUG_L1OIP_SOCKET))
+ printk(KERN_DEBUG "%s: interface become deactivated "
+ "due to timeout\n", __func__);
+ test_and_clear_bit(FLG_ACTIVE, &dch->Flags);
+ _queue_data(&dch->dev.D, PH_DEACTIVATE_IND, MISDN_ID_ANY, 0,
+ NULL, GFP_ATOMIC);
+ }
+
+ /* if we have ondemand set, we remove ip address */
+ if (hc->ondemand) {
+ if (debug & DEBUG_L1OIP_MSG)
+ printk(KERN_DEBUG "%s: on demand causes ip address to "
+ "be removed\n", __func__);
+ hc->sin_remote.sin_addr.s_addr = 0;
+ }
+}
+
+
+/*
+ * message handling
+ */
+static int
+handle_dmsg(struct mISDNchannel *ch, struct sk_buff *skb)
+{
+ struct mISDNdevice *dev = container_of(ch, struct mISDNdevice, D);
+ struct dchannel *dch = container_of(dev, struct dchannel, dev);
+ struct l1oip *hc = dch->hw;
+ struct mISDNhead *hh = mISDN_HEAD_P(skb);
+ int ret = -EINVAL;
+ int l, ll;
+ unsigned char *p;
+
+ switch (hh->prim) {
+ case PH_DATA_REQ:
+ if (skb->len < 1) {
+ printk(KERN_WARNING "%s: skb too small\n",
+ __func__);
+ break;
+ }
+ if (skb->len > MAX_DFRAME_LEN_L1 || skb->len > L1OIP_MAX_LEN) {
+ printk(KERN_WARNING "%s: skb too large\n",
+ __func__);
+ break;
+ }
+ /* send frame */
+ p = skb->data;
+ l = skb->len;
+ while (l) {
+ ll = (l < L1OIP_MAX_PERFRAME)?l:L1OIP_MAX_PERFRAME;
+ l1oip_socket_send(hc, 0, dch->slot, 0,
+ hc->chan[dch->slot].tx_counter++, p, ll);
+ p += ll;
+ l -= ll;
+ }
+ skb_trim(skb, 0);
+ queue_ch_frame(ch, PH_DATA_CNF, hh->id, skb);
+ return 0;
+ case PH_ACTIVATE_REQ:
+ if (debug & (DEBUG_L1OIP_MSG|DEBUG_L1OIP_SOCKET))
+ printk(KERN_DEBUG "%s: PH_ACTIVATE channel %d (1..%d)\n"
+ , __func__, dch->slot, hc->b_num+1);
+ skb_trim(skb, 0);
+ if (test_bit(FLG_ACTIVE, &dch->Flags))
+ queue_ch_frame(ch, PH_ACTIVATE_IND, hh->id, skb);
+ else
+ queue_ch_frame(ch, PH_DEACTIVATE_IND, hh->id, skb);
+ return 0;
+ case PH_DEACTIVATE_REQ:
+ if (debug & (DEBUG_L1OIP_MSG|DEBUG_L1OIP_SOCKET))
+ printk(KERN_DEBUG "%s: PH_DEACTIVATE channel %d "
+ "(1..%d)\n", __func__, dch->slot,
+ hc->b_num+1);
+ skb_trim(skb, 0);
+ if (test_bit(FLG_ACTIVE, &dch->Flags))
+ queue_ch_frame(ch, PH_ACTIVATE_IND, hh->id, skb);
+ else
+ queue_ch_frame(ch, PH_DEACTIVATE_IND, hh->id, skb);
+ return 0;
+ }
+ if (!ret)
+ dev_kfree_skb(skb);
+ return ret;
+}
+
+static int
+channel_dctrl(struct dchannel *dch, struct mISDN_ctrl_req *cq)
+{
+ int ret = 0;
+ struct l1oip *hc = dch->hw;
+
+ switch (cq->op) {
+ case MISDN_CTRL_GETOP:
+ cq->op = MISDN_CTRL_SETPEER | MISDN_CTRL_UNSETPEER;
+ break;
+ case MISDN_CTRL_SETPEER:
+ hc->remoteip = (u32)cq->p1;
+ hc->remoteport = cq->p2 & 0xffff;
+ hc->localport = cq->p2 >> 16;
+ if (!hc->remoteport)
+ hc->remoteport = hc->localport;
+ if (debug & DEBUG_L1OIP_SOCKET)
+ printk(KERN_DEBUG "%s: got new ip address from user "
+ "space.\n", __func__);
+ l1oip_socket_open(hc);
+ break;
+ case MISDN_CTRL_UNSETPEER:
+ if (debug & DEBUG_L1OIP_SOCKET)
+ printk(KERN_DEBUG "%s: removing ip address.\n",
+ __func__);
+ hc->remoteip = 0;
+ l1oip_socket_open(hc);
+ break;
+ default:
+ printk(KERN_WARNING "%s: unknown Op %x\n",
+ __func__, cq->op);
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+static int
+open_dchannel(struct l1oip *hc, struct dchannel *dch, struct channel_req *rq)
+{
+ if (debug & DEBUG_HW_OPEN)
+ printk(KERN_DEBUG "%s: dev(%d) open from %p\n", __func__,
+ dch->dev.id, __builtin_return_address(0));
+ if (rq->protocol == ISDN_P_NONE)
+ return -EINVAL;
+ if ((dch->dev.D.protocol != ISDN_P_NONE) &&
+ (dch->dev.D.protocol != rq->protocol)) {
+ if (debug & DEBUG_HW_OPEN)
+ printk(KERN_WARNING "%s: change protocol %x to %x\n",
+ __func__, dch->dev.D.protocol, rq->protocol);
+ }
+ if (dch->dev.D.protocol != rq->protocol)
+ dch->dev.D.protocol = rq->protocol;
+
+ if (test_bit(FLG_ACTIVE, &dch->Flags)) {
+ _queue_data(&dch->dev.D, PH_ACTIVATE_IND, MISDN_ID_ANY,
+ 0, NULL, GFP_KERNEL);
+ }
+ rq->ch = &dch->dev.D;
+ if (!try_module_get(THIS_MODULE))
+ printk(KERN_WARNING "%s:cannot get module\n", __func__);
+ return 0;
+}
+
+static int
+open_bchannel(struct l1oip *hc, struct dchannel *dch, struct channel_req *rq)
+{
+ struct bchannel *bch;
+ int ch;
+
+ if (!test_bit(rq->adr.channel & 0x1f,
+ &dch->dev.channelmap[rq->adr.channel >> 5]))
+ return -EINVAL;
+ if (rq->protocol == ISDN_P_NONE)
+ return -EINVAL;
+ ch = rq->adr.channel; /* BRI: 1=B1 2=B2 PRI: 1..15,17.. */
+ bch = hc->chan[ch].bch;
+ if (!bch) {
+ printk(KERN_ERR "%s:internal error ch %d has no bch\n",
+ __func__, ch);
+ return -EINVAL;
+ }
+ if (test_and_set_bit(FLG_OPEN, &bch->Flags))
+ return -EBUSY; /* b-channel can be only open once */
+ bch->ch.protocol = rq->protocol;
+ rq->ch = &bch->ch;
+ if (!try_module_get(THIS_MODULE))
+ printk(KERN_WARNING "%s:cannot get module\n", __func__);
+ return 0;
+}
+
+static int
+l1oip_dctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
+{
+ struct mISDNdevice *dev = container_of(ch, struct mISDNdevice, D);
+ struct dchannel *dch = container_of(dev, struct dchannel, dev);
+ struct l1oip *hc = dch->hw;
+ struct channel_req *rq;
+ int err = 0;
+
+ if (dch->debug & DEBUG_HW)
+ printk(KERN_DEBUG "%s: cmd:%x %p\n",
+ __func__, cmd, arg);
+ switch (cmd) {
+ case OPEN_CHANNEL:
+ rq = arg;
+ switch (rq->protocol) {
+ case ISDN_P_TE_S0:
+ case ISDN_P_NT_S0:
+ if (hc->pri) {
+ err = -EINVAL;
+ break;
+ }
+ err = open_dchannel(hc, dch, rq);
+ break;
+ case ISDN_P_TE_E1:
+ case ISDN_P_NT_E1:
+ if (!hc->pri) {
+ err = -EINVAL;
+ break;
+ }
+ err = open_dchannel(hc, dch, rq);
+ break;
+ default:
+ err = open_bchannel(hc, dch, rq);
+ }
+ break;
+ case CLOSE_CHANNEL:
+ if (debug & DEBUG_HW_OPEN)
+ printk(KERN_DEBUG "%s: dev(%d) close from %p\n",
+ __func__, dch->dev.id,
+ __builtin_return_address(0));
+ module_put(THIS_MODULE);
+ break;
+ case CONTROL_CHANNEL:
+ err = channel_dctrl(dch, arg);
+ break;
+ default:
+ if (dch->debug & DEBUG_HW)
+ printk(KERN_DEBUG "%s: unknown command %x\n",
+ __func__, cmd);
+ err = -EINVAL;
+ }
+ return err;
+}
+
+static int
+handle_bmsg(struct mISDNchannel *ch, struct sk_buff *skb)
+{
+ struct bchannel *bch = container_of(ch, struct bchannel, ch);
+ struct l1oip *hc = bch->hw;
+ int ret = -EINVAL;
+ struct mISDNhead *hh = mISDN_HEAD_P(skb);
+ int l, ll, i;
+ unsigned char *p;
+
+ switch (hh->prim) {
+ case PH_DATA_REQ:
+ if (skb->len <= 0) {
+ printk(KERN_WARNING "%s: skb too small\n",
+ __func__);
+ break;
+ }
+ if (skb->len > MAX_DFRAME_LEN_L1 || skb->len > L1OIP_MAX_LEN) {
+ printk(KERN_WARNING "%s: skb too large\n",
+ __func__);
+ break;
+ }
+ /* check for AIS / ulaw-silence */
+ p = skb->data;
+ l = skb->len;
+ for (i = 0; i < l; i++) {
+ if (*p++ != 0xff)
+ break;
+ }
+ if (i == l) {
+ if (debug & DEBUG_L1OIP_MSG)
+ printk(KERN_DEBUG "%s: got AIS, not sending, "
+ "but counting\n", __func__);
+ hc->chan[bch->slot].tx_counter += l;
+ skb_trim(skb, 0);
+ queue_ch_frame(ch, PH_DATA_CNF, hh->id, skb);
+ return 0;
+ }
+ /* check for silence */
+ p = skb->data;
+ l = skb->len;
+ for (i = 0; i < l; i++) {
+ if (*p++ != 0x2a)
+ break;
+ }
+ if (i == l) {
+ if (debug & DEBUG_L1OIP_MSG)
+ printk(KERN_DEBUG "%s: got silence, not sending"
+ ", but counting\n", __func__);
+ hc->chan[bch->slot].tx_counter += l;
+ skb_trim(skb, 0);
+ queue_ch_frame(ch, PH_DATA_CNF, hh->id, skb);
+ return 0;
+ }
+
+ /* send frame */
+ p = skb->data;
+ l = skb->len;
+ while (l) {
+ ll = (l < L1OIP_MAX_PERFRAME)?l:L1OIP_MAX_PERFRAME;
+ l1oip_socket_send(hc, hc->codec, bch->slot, 0,
+ hc->chan[bch->slot].tx_counter, p, ll);
+ hc->chan[bch->slot].tx_counter += ll;
+ p += ll;
+ l -= ll;
+ }
+ skb_trim(skb, 0);
+ queue_ch_frame(ch, PH_DATA_CNF, hh->id, skb);
+ return 0;
+ case PH_ACTIVATE_REQ:
+ if (debug & (DEBUG_L1OIP_MSG|DEBUG_L1OIP_SOCKET))
+ printk(KERN_DEBUG "%s: PH_ACTIVATE channel %d (1..%d)\n"
+ , __func__, bch->slot, hc->b_num+1);
+ hc->chan[bch->slot].codecstate = 0;
+ test_and_set_bit(FLG_ACTIVE, &bch->Flags);
+ skb_trim(skb, 0);
+ queue_ch_frame(ch, PH_ACTIVATE_IND, hh->id, skb);
+ return 0;
+ case PH_DEACTIVATE_REQ:
+ if (debug & (DEBUG_L1OIP_MSG|DEBUG_L1OIP_SOCKET))
+ printk(KERN_DEBUG "%s: PH_DEACTIVATE channel %d "
+ "(1..%d)\n", __func__, bch->slot,
+ hc->b_num+1);
+ test_and_clear_bit(FLG_ACTIVE, &bch->Flags);
+ skb_trim(skb, 0);
+ queue_ch_frame(ch, PH_DEACTIVATE_IND, hh->id, skb);
+ return 0;
+ }
+ if (!ret)
+ dev_kfree_skb(skb);
+ return ret;
+}
+
+static int
+channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq)
+{
+ int ret = 0;
+ struct dsp_features *features =
+ (struct dsp_features *)(*((u_long *)&cq->p1));
+
+ switch (cq->op) {
+ case MISDN_CTRL_GETOP:
+ cq->op = MISDN_CTRL_HW_FEATURES_OP;
+ break;
+ case MISDN_CTRL_HW_FEATURES: /* fill features structure */
+ if (debug & DEBUG_L1OIP_MSG)
+ printk(KERN_DEBUG "%s: HW_FEATURE request\n",
+ __func__);
+ /* create confirm */
+ features->unclocked = 1;
+ features->unordered = 1;
+ break;
+ default:
+ printk(KERN_WARNING "%s: unknown Op %x\n",
+ __func__, cq->op);
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+static int
+l1oip_bctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
+{
+ struct bchannel *bch = container_of(ch, struct bchannel, ch);
+ int err = -EINVAL;
+
+ if (bch->debug & DEBUG_HW)
+ printk(KERN_DEBUG "%s: cmd:%x %p\n",
+ __func__, cmd, arg);
+ switch (cmd) {
+ case CLOSE_CHANNEL:
+ test_and_clear_bit(FLG_OPEN, &bch->Flags);
+ test_and_clear_bit(FLG_ACTIVE, &bch->Flags);
+ ch->protocol = ISDN_P_NONE;
+ ch->peer = NULL;
+ module_put(THIS_MODULE);
+ err = 0;
+ break;
+ case CONTROL_CHANNEL:
+ err = channel_bctrl(bch, arg);
+ break;
+ default:
+ printk(KERN_WARNING "%s: unknown prim(%x)\n",
+ __func__, cmd);
+ }
+ return err;
+}
+
+
+/*
+ * cleanup module and stack
+ */
+static void
+release_card(struct l1oip *hc)
+{
+ int ch;
+
+ if (timer_pending(&hc->keep_tl))
+ del_timer(&hc->keep_tl);
+
+ if (timer_pending(&hc->timeout_tl))
+ del_timer(&hc->timeout_tl);
+
+ if (hc->socket_thread)
+ l1oip_socket_close(hc);
+
+ if (hc->registered && hc->chan[hc->d_idx].dch)
+ mISDN_unregister_device(&hc->chan[hc->d_idx].dch->dev);
+ for (ch = 0; ch < 128; ch++) {
+ if (hc->chan[ch].dch) {
+ mISDN_freedchannel(hc->chan[ch].dch);
+ kfree(hc->chan[ch].dch);
+ }
+ if (hc->chan[ch].bch) {
+ mISDN_freebchannel(hc->chan[ch].bch);
+ kfree(hc->chan[ch].bch);
+#ifdef REORDER_DEBUG
+ if (hc->chan[ch].disorder_skb)
+ dev_kfree_skb(hc->chan[ch].disorder_skb);
+#endif
+ }
+ }
+
+ spin_lock(&l1oip_lock);
+ list_del(&hc->list);
+ spin_unlock(&l1oip_lock);
+
+ kfree(hc);
+}
+
+static void
+l1oip_cleanup(void)
+{
+ struct l1oip *hc, *next;
+
+ list_for_each_entry_safe(hc, next, &l1oip_ilist, list)
+ release_card(hc);
+
+ l1oip_4bit_free();
+}
+
+
+/*
+ * module and stack init
+ */
+static int
+init_card(struct l1oip *hc, int pri, int bundle)
+{
+ struct dchannel *dch;
+ struct bchannel *bch;
+ int ret;
+ int i, ch;
+
+ spin_lock_init(&hc->socket_lock);
+ hc->idx = l1oip_cnt;
+ hc->pri = pri;
+ hc->d_idx = pri?16:3;
+ hc->b_num = pri?30:2;
+ hc->bundle = bundle;
+ if (hc->pri)
+ sprintf(hc->name, "l1oip-e1.%d", l1oip_cnt + 1);
+ else
+ sprintf(hc->name, "l1oip-s0.%d", l1oip_cnt + 1);
+
+ switch (codec[l1oip_cnt]) {
+ case 0: /* as is */
+ case 1: /* alaw */
+ case 2: /* ulaw */
+ case 3: /* 4bit */
+ break;
+ default:
+ printk(KERN_ERR "Codec(%d) not supported.\n",
+ codec[l1oip_cnt]);
+ return -EINVAL;
+ }
+ hc->codec = codec[l1oip_cnt];
+ if (debug & DEBUG_L1OIP_INIT)
+ printk(KERN_DEBUG "%s: using codec %d\n",
+ __func__, hc->codec);
+
+ if (id[l1oip_cnt] == 0) {
+ printk(KERN_WARNING "Warning: No 'id' value given or "
+ "0, this is highly unsecure. Please use 32 "
+ "bit randmom number 0x...\n");
+ }
+ hc->id = id[l1oip_cnt];
+ if (debug & DEBUG_L1OIP_INIT)
+ printk(KERN_DEBUG "%s: using id 0x%x\n", __func__, hc->id);
+
+ hc->ondemand = ondemand[l1oip_cnt];
+ if (hc->ondemand && !hc->id) {
+ printk(KERN_ERR "%s: ondemand option only allowed in "
+ "conjunction with non 0 ID\n", __func__);
+ return -EINVAL;
+ }
+
+ if (limit[l1oip_cnt])
+ hc->b_num = limit[l1oip_cnt];
+ if (!pri && hc->b_num > 2) {
+ printk(KERN_ERR "Maximum limit for BRI interface is 2 "
+ "channels.\n");
+ return -EINVAL;
+ }
+ if (pri && hc->b_num > 126) {
+ printk(KERN_ERR "Maximum limit for PRI interface is 126 "
+ "channels.\n");
+ return -EINVAL;
+ }
+ if (pri && hc->b_num > 30) {
+ printk(KERN_WARNING "Maximum limit for BRI interface is 30 "
+ "channels.\n");
+ printk(KERN_WARNING "Your selection of %d channels must be "
+ "supported by application.\n", hc->limit);
+ }
+
+ hc->remoteip = ip[l1oip_cnt<<2] << 24
+ | ip[(l1oip_cnt<<2)+1] << 16
+ | ip[(l1oip_cnt<<2)+2] << 8
+ | ip[(l1oip_cnt<<2)+3];
+ hc->localport = port[l1oip_cnt]?:(L1OIP_DEFAULTPORT+l1oip_cnt);
+ if (remoteport[l1oip_cnt])
+ hc->remoteport = remoteport[l1oip_cnt];
+ else
+ hc->remoteport = hc->localport;
+ if (debug & DEBUG_L1OIP_INIT)
+ printk(KERN_DEBUG "%s: using local port %d remote ip "
+ "%d.%d.%d.%d port %d ondemand %d\n", __func__,
+ hc->localport, hc->remoteip >> 24,
+ (hc->remoteip >> 16) & 0xff,
+ (hc->remoteip >> 8) & 0xff, hc->remoteip & 0xff,
+ hc->remoteport, hc->ondemand);
+
+ dch = kzalloc(sizeof(struct dchannel), GFP_KERNEL);
+ if (!dch)
+ return -ENOMEM;
+ dch->debug = debug;
+ mISDN_initdchannel(dch, MAX_DFRAME_LEN_L1, NULL);
+ dch->hw = hc;
+ if (pri)
+ dch->dev.Dprotocols = (1 << ISDN_P_TE_E1) | (1 << ISDN_P_NT_E1);
+ else
+ dch->dev.Dprotocols = (1 << ISDN_P_TE_S0) | (1 << ISDN_P_NT_S0);
+ dch->dev.Bprotocols = (1 << (ISDN_P_B_RAW & ISDN_P_B_MASK)) |
+ (1 << (ISDN_P_B_HDLC & ISDN_P_B_MASK));
+ dch->dev.D.send = handle_dmsg;
+ dch->dev.D.ctrl = l1oip_dctrl;
+ dch->dev.nrbchan = hc->b_num;
+ dch->slot = hc->d_idx;
+ hc->chan[hc->d_idx].dch = dch;
+ i = 1;
+ for (ch = 0; ch < dch->dev.nrbchan; ch++) {
+ if (ch == 15)
+ i++;
+ bch = kzalloc(sizeof(struct bchannel), GFP_KERNEL);
+ if (!bch) {
+ printk(KERN_ERR "%s: no memory for bchannel\n",
+ __func__);
+ return -ENOMEM;
+ }
+ bch->nr = i + ch;
+ bch->slot = i + ch;
+ bch->debug = debug;
+ mISDN_initbchannel(bch, MAX_DATA_MEM);
+ bch->hw = hc;
+ bch->ch.send = handle_bmsg;
+ bch->ch.ctrl = l1oip_bctrl;
+ bch->ch.nr = i + ch;
+ list_add(&bch->ch.list, &dch->dev.bchannels);
+ hc->chan[i + ch].bch = bch;
+ test_and_set_bit(bch->nr & 0x1f,
+ &dch->dev.channelmap[bch->nr >> 5]);
+ }
+ ret = mISDN_register_device(&dch->dev, hc->name);
+ if (ret)
+ return ret;
+ hc->registered = 1;
+
+ if (debug & DEBUG_L1OIP_INIT)
+ printk(KERN_DEBUG "%s: Setting up network card(%d)\n",
+ __func__, l1oip_cnt + 1);
+ ret = l1oip_socket_open(hc);
+ if (ret)
+ return ret;
+
+ hc->keep_tl.function = (void *)l1oip_keepalive;
+ hc->keep_tl.data = (ulong)hc;
+ init_timer(&hc->keep_tl);
+ hc->keep_tl.expires = jiffies + 2*HZ; /* two seconds first time */
+ add_timer(&hc->keep_tl);
+
+ hc->timeout_tl.function = (void *)l1oip_timeout;
+ hc->timeout_tl.data = (ulong)hc;
+ init_timer(&hc->timeout_tl);
+ hc->timeout_on = 0; /* state that we have timer off */
+
+ return 0;
+}
+
+static int __init
+l1oip_init(void)
+{
+ int pri, bundle;
+ struct l1oip *hc;
+ int ret;
+
+ printk(KERN_INFO "mISDN: Layer-1-over-IP driver Rev. %s\n",
+ l1oip_revision);
+
+ INIT_LIST_HEAD(&l1oip_ilist);
+ spin_lock_init(&l1oip_lock);
+
+ if (l1oip_4bit_alloc(ulaw))
+ return -ENOMEM;
+
+ l1oip_cnt = 0;
+ while (type[l1oip_cnt] && l1oip_cnt < MAX_CARDS) {
+ switch (type[l1oip_cnt] & 0xff) {
+ case 1:
+ pri = 0;
+ bundle = 0;
+ break;
+ case 2:
+ pri = 1;
+ bundle = 0;
+ break;
+ case 3:
+ pri = 0;
+ bundle = 1;
+ break;
+ case 4:
+ pri = 1;
+ bundle = 1;
+ break;
+ default:
+ printk(KERN_ERR "Card type(%d) not supported.\n",
+ type[l1oip_cnt] & 0xff);
+ l1oip_cleanup();
+ return -EINVAL;
+ }
+
+ if (debug & DEBUG_L1OIP_INIT)
+ printk(KERN_DEBUG "%s: interface %d is %s with %s.\n",
+ __func__, l1oip_cnt, pri?"PRI":"BRI",
+ bundle?"bundled IP packet for all B-channels"
+ :"seperate IP packets for every B-channel");
+
+ hc = kzalloc(sizeof(struct l1oip), GFP_ATOMIC);
+ if (!hc) {
+ printk(KERN_ERR "No kmem for L1-over-IP driver.\n");
+ l1oip_cleanup();
+ return -ENOMEM;
+ }
+ INIT_WORK(&hc->workq, (void *)l1oip_send_bh);
+
+ spin_lock(&l1oip_lock);
+ list_add_tail(&hc->list, &l1oip_ilist);
+ spin_unlock(&l1oip_lock);
+
+ ret = init_card(hc, pri, bundle);
+ if (ret) {
+ l1oip_cleanup();
+ return ret;
+ }
+
+ l1oip_cnt++;
+ }
+ printk(KERN_INFO "%d virtual devices registered\n", l1oip_cnt);
+ return 0;
+}
+
+module_init(l1oip_init);
+module_exit(l1oip_cleanup);
+
diff --git a/drivers/isdn/mISDN/layer1.c b/drivers/isdn/mISDN/layer1.c
new file mode 100644
index 000000000000..fced1a2755f8
--- /dev/null
+++ b/drivers/isdn/mISDN/layer1.c
@@ -0,0 +1,403 @@
+/*
+ *
+ * Author Karsten Keil <kkeil@novell.com>
+ *
+ * Copyright 2008 by Karsten Keil <kkeil@novell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+
+#include <linux/module.h>
+#include <linux/mISDNhw.h>
+#include "layer1.h"
+#include "fsm.h"
+
+static int *debug;
+
+struct layer1 {
+ u_long Flags;
+ struct FsmInst l1m;
+ struct FsmTimer timer;
+ int delay;
+ struct dchannel *dch;
+ dchannel_l1callback *dcb;
+};
+
+#define TIMER3_VALUE 7000
+
+static
+struct Fsm l1fsm_s = {NULL, 0, 0, NULL, NULL};
+
+enum {
+ ST_L1_F2,
+ ST_L1_F3,
+ ST_L1_F4,
+ ST_L1_F5,
+ ST_L1_F6,
+ ST_L1_F7,
+ ST_L1_F8,
+};
+
+#define L1S_STATE_COUNT (ST_L1_F8+1)
+
+static char *strL1SState[] =
+{
+ "ST_L1_F2",
+ "ST_L1_F3",
+ "ST_L1_F4",
+ "ST_L1_F5",
+ "ST_L1_F6",
+ "ST_L1_F7",
+ "ST_L1_F8",
+};
+
+enum {
+ EV_PH_ACTIVATE,
+ EV_PH_DEACTIVATE,
+ EV_RESET_IND,
+ EV_DEACT_CNF,
+ EV_DEACT_IND,
+ EV_POWER_UP,
+ EV_ANYSIG_IND,
+ EV_INFO2_IND,
+ EV_INFO4_IND,
+ EV_TIMER_DEACT,
+ EV_TIMER_ACT,
+ EV_TIMER3,
+};
+
+#define L1_EVENT_COUNT (EV_TIMER3 + 1)
+
+static char *strL1Event[] =
+{
+ "EV_PH_ACTIVATE",
+ "EV_PH_DEACTIVATE",
+ "EV_RESET_IND",
+ "EV_DEACT_CNF",
+ "EV_DEACT_IND",
+ "EV_POWER_UP",
+ "EV_ANYSIG_IND",
+ "EV_INFO2_IND",
+ "EV_INFO4_IND",
+ "EV_TIMER_DEACT",
+ "EV_TIMER_ACT",
+ "EV_TIMER3",
+};
+
+static void
+l1m_debug(struct FsmInst *fi, char *fmt, ...)
+{
+ struct layer1 *l1 = fi->userdata;
+ va_list va;
+
+ va_start(va, fmt);
+ printk(KERN_DEBUG "%s: ", l1->dch->dev.name);
+ vprintk(fmt, va);
+ printk("\n");
+ va_end(va);
+}
+
+static void
+l1_reset(struct FsmInst *fi, int event, void *arg)
+{
+ mISDN_FsmChangeState(fi, ST_L1_F3);
+}
+
+static void
+l1_deact_cnf(struct FsmInst *fi, int event, void *arg)
+{
+ struct layer1 *l1 = fi->userdata;
+
+ mISDN_FsmChangeState(fi, ST_L1_F3);
+ if (test_bit(FLG_L1_ACTIVATING, &l1->Flags))
+ l1->dcb(l1->dch, HW_POWERUP_REQ);
+}
+
+static void
+l1_deact_req_s(struct FsmInst *fi, int event, void *arg)
+{
+ struct layer1 *l1 = fi->userdata;
+
+ mISDN_FsmChangeState(fi, ST_L1_F3);
+ mISDN_FsmRestartTimer(&l1->timer, 550, EV_TIMER_DEACT, NULL, 2);
+ test_and_set_bit(FLG_L1_DEACTTIMER, &l1->Flags);
+}
+
+static void
+l1_power_up_s(struct FsmInst *fi, int event, void *arg)
+{
+ struct layer1 *l1 = fi->userdata;
+
+ if (test_bit(FLG_L1_ACTIVATING, &l1->Flags)) {
+ mISDN_FsmChangeState(fi, ST_L1_F4);
+ l1->dcb(l1->dch, INFO3_P8);
+ } else
+ mISDN_FsmChangeState(fi, ST_L1_F3);
+}
+
+static void
+l1_go_F5(struct FsmInst *fi, int event, void *arg)
+{
+ mISDN_FsmChangeState(fi, ST_L1_F5);
+}
+
+static void
+l1_go_F8(struct FsmInst *fi, int event, void *arg)
+{
+ mISDN_FsmChangeState(fi, ST_L1_F8);
+}
+
+static void
+l1_info2_ind(struct FsmInst *fi, int event, void *arg)
+{
+ struct layer1 *l1 = fi->userdata;
+
+ mISDN_FsmChangeState(fi, ST_L1_F6);
+ l1->dcb(l1->dch, INFO3_P8);
+}
+
+static void
+l1_info4_ind(struct FsmInst *fi, int event, void *arg)
+{
+ struct layer1 *l1 = fi->userdata;
+
+ mISDN_FsmChangeState(fi, ST_L1_F7);
+ l1->dcb(l1->dch, INFO3_P8);
+ if (test_and_clear_bit(FLG_L1_DEACTTIMER, &l1->Flags))
+ mISDN_FsmDelTimer(&l1->timer, 4);
+ if (!test_bit(FLG_L1_ACTIVATED, &l1->Flags)) {
+ if (test_and_clear_bit(FLG_L1_T3RUN, &l1->Flags))
+ mISDN_FsmDelTimer(&l1->timer, 3);
+ mISDN_FsmRestartTimer(&l1->timer, 110, EV_TIMER_ACT, NULL, 2);
+ test_and_set_bit(FLG_L1_ACTTIMER, &l1->Flags);
+ }
+}
+
+static void
+l1_timer3(struct FsmInst *fi, int event, void *arg)
+{
+ struct layer1 *l1 = fi->userdata;
+
+ test_and_clear_bit(FLG_L1_T3RUN, &l1->Flags);
+ if (test_and_clear_bit(FLG_L1_ACTIVATING, &l1->Flags)) {
+ if (test_and_clear_bit(FLG_L1_DBLOCKED, &l1->Flags))
+ l1->dcb(l1->dch, HW_D_NOBLOCKED);
+ l1->dcb(l1->dch, PH_DEACTIVATE_IND);
+ }
+ if (l1->l1m.state != ST_L1_F6) {
+ mISDN_FsmChangeState(fi, ST_L1_F3);
+ l1->dcb(l1->dch, HW_POWERUP_REQ);
+ }
+}
+
+static void
+l1_timer_act(struct FsmInst *fi, int event, void *arg)
+{
+ struct layer1 *l1 = fi->userdata;
+
+ test_and_clear_bit(FLG_L1_ACTTIMER, &l1->Flags);
+ test_and_set_bit(FLG_L1_ACTIVATED, &l1->Flags);
+ l1->dcb(l1->dch, PH_ACTIVATE_IND);
+}
+
+static void
+l1_timer_deact(struct FsmInst *fi, int event, void *arg)
+{
+ struct layer1 *l1 = fi->userdata;
+
+ test_and_clear_bit(FLG_L1_DEACTTIMER, &l1->Flags);
+ test_and_clear_bit(FLG_L1_ACTIVATED, &l1->Flags);
+ if (test_and_clear_bit(FLG_L1_DBLOCKED, &l1->Flags))
+ l1->dcb(l1->dch, HW_D_NOBLOCKED);
+ l1->dcb(l1->dch, PH_DEACTIVATE_IND);
+ l1->dcb(l1->dch, HW_DEACT_REQ);
+}
+
+static void
+l1_activate_s(struct FsmInst *fi, int event, void *arg)
+{
+ struct layer1 *l1 = fi->userdata;
+
+ mISDN_FsmRestartTimer(&l1->timer, TIMER3_VALUE, EV_TIMER3, NULL, 2);
+ test_and_set_bit(FLG_L1_T3RUN, &l1->Flags);
+ l1->dcb(l1->dch, HW_RESET_REQ);
+}
+
+static void
+l1_activate_no(struct FsmInst *fi, int event, void *arg)
+{
+ struct layer1 *l1 = fi->userdata;
+
+ if ((!test_bit(FLG_L1_DEACTTIMER, &l1->Flags)) &&
+ (!test_bit(FLG_L1_T3RUN, &l1->Flags))) {
+ test_and_clear_bit(FLG_L1_ACTIVATING, &l1->Flags);
+ if (test_and_clear_bit(FLG_L1_DBLOCKED, &l1->Flags))
+ l1->dcb(l1->dch, HW_D_NOBLOCKED);
+ l1->dcb(l1->dch, PH_DEACTIVATE_IND);
+ }
+}
+
+static struct FsmNode L1SFnList[] =
+{
+ {ST_L1_F3, EV_PH_ACTIVATE, l1_activate_s},
+ {ST_L1_F6, EV_PH_ACTIVATE, l1_activate_no},
+ {ST_L1_F8, EV_PH_ACTIVATE, l1_activate_no},
+ {ST_L1_F3, EV_RESET_IND, l1_reset},
+ {ST_L1_F4, EV_RESET_IND, l1_reset},
+ {ST_L1_F5, EV_RESET_IND, l1_reset},
+ {ST_L1_F6, EV_RESET_IND, l1_reset},
+ {ST_L1_F7, EV_RESET_IND, l1_reset},
+ {ST_L1_F8, EV_RESET_IND, l1_reset},
+ {ST_L1_F3, EV_DEACT_CNF, l1_deact_cnf},
+ {ST_L1_F4, EV_DEACT_CNF, l1_deact_cnf},
+ {ST_L1_F5, EV_DEACT_CNF, l1_deact_cnf},
+ {ST_L1_F6, EV_DEACT_CNF, l1_deact_cnf},
+ {ST_L1_F7, EV_DEACT_CNF, l1_deact_cnf},
+ {ST_L1_F8, EV_DEACT_CNF, l1_deact_cnf},
+ {ST_L1_F6, EV_DEACT_IND, l1_deact_req_s},
+ {ST_L1_F7, EV_DEACT_IND, l1_deact_req_s},
+ {ST_L1_F8, EV_DEACT_IND, l1_deact_req_s},
+ {ST_L1_F3, EV_POWER_UP, l1_power_up_s},
+ {ST_L1_F4, EV_ANYSIG_IND, l1_go_F5},
+ {ST_L1_F6, EV_ANYSIG_IND, l1_go_F8},
+ {ST_L1_F7, EV_ANYSIG_IND, l1_go_F8},
+ {ST_L1_F3, EV_INFO2_IND, l1_info2_ind},
+ {ST_L1_F4, EV_INFO2_IND, l1_info2_ind},
+ {ST_L1_F5, EV_INFO2_IND, l1_info2_ind},
+ {ST_L1_F7, EV_INFO2_IND, l1_info2_ind},
+ {ST_L1_F8, EV_INFO2_IND, l1_info2_ind},
+ {ST_L1_F3, EV_INFO4_IND, l1_info4_ind},
+ {ST_L1_F4, EV_INFO4_IND, l1_info4_ind},
+ {ST_L1_F5, EV_INFO4_IND, l1_info4_ind},
+ {ST_L1_F6, EV_INFO4_IND, l1_info4_ind},
+ {ST_L1_F8, EV_INFO4_IND, l1_info4_ind},
+ {ST_L1_F3, EV_TIMER3, l1_timer3},
+ {ST_L1_F4, EV_TIMER3, l1_timer3},
+ {ST_L1_F5, EV_TIMER3, l1_timer3},
+ {ST_L1_F6, EV_TIMER3, l1_timer3},
+ {ST_L1_F8, EV_TIMER3, l1_timer3},
+ {ST_L1_F7, EV_TIMER_ACT, l1_timer_act},
+ {ST_L1_F3, EV_TIMER_DEACT, l1_timer_deact},
+ {ST_L1_F4, EV_TIMER_DEACT, l1_timer_deact},
+ {ST_L1_F5, EV_TIMER_DEACT, l1_timer_deact},
+ {ST_L1_F6, EV_TIMER_DEACT, l1_timer_deact},
+ {ST_L1_F7, EV_TIMER_DEACT, l1_timer_deact},
+ {ST_L1_F8, EV_TIMER_DEACT, l1_timer_deact},
+};
+
+static void
+release_l1(struct layer1 *l1) {
+ mISDN_FsmDelTimer(&l1->timer, 0);
+ if (l1->dch)
+ l1->dch->l1 = NULL;
+ module_put(THIS_MODULE);
+ kfree(l1);
+}
+
+int
+l1_event(struct layer1 *l1, u_int event)
+{
+ int err = 0;
+
+ if (!l1)
+ return -EINVAL;
+ switch (event) {
+ case HW_RESET_IND:
+ mISDN_FsmEvent(&l1->l1m, EV_RESET_IND, NULL);
+ break;
+ case HW_DEACT_IND:
+ mISDN_FsmEvent(&l1->l1m, EV_DEACT_IND, NULL);
+ break;
+ case HW_POWERUP_IND:
+ mISDN_FsmEvent(&l1->l1m, EV_POWER_UP, NULL);
+ break;
+ case HW_DEACT_CNF:
+ mISDN_FsmEvent(&l1->l1m, EV_DEACT_CNF, NULL);
+ break;
+ case ANYSIGNAL:
+ mISDN_FsmEvent(&l1->l1m, EV_ANYSIG_IND, NULL);
+ break;
+ case LOSTFRAMING:
+ mISDN_FsmEvent(&l1->l1m, EV_ANYSIG_IND, NULL);
+ break;
+ case INFO2:
+ mISDN_FsmEvent(&l1->l1m, EV_INFO2_IND, NULL);
+ break;
+ case INFO4_P8:
+ mISDN_FsmEvent(&l1->l1m, EV_INFO4_IND, NULL);
+ break;
+ case INFO4_P10:
+ mISDN_FsmEvent(&l1->l1m, EV_INFO4_IND, NULL);
+ break;
+ case PH_ACTIVATE_REQ:
+ if (test_bit(FLG_L1_ACTIVATED, &l1->Flags))
+ l1->dcb(l1->dch, PH_ACTIVATE_IND);
+ else {
+ test_and_set_bit(FLG_L1_ACTIVATING, &l1->Flags);
+ mISDN_FsmEvent(&l1->l1m, EV_PH_ACTIVATE, NULL);
+ }
+ break;
+ case CLOSE_CHANNEL:
+ release_l1(l1);
+ break;
+ default:
+ if (*debug & DEBUG_L1)
+ printk(KERN_DEBUG "%s %x unhandled\n",
+ __func__, event);
+ err = -EINVAL;
+ }
+ return err;
+}
+EXPORT_SYMBOL(l1_event);
+
+int
+create_l1(struct dchannel *dch, dchannel_l1callback *dcb) {
+ struct layer1 *nl1;
+
+ nl1 = kzalloc(sizeof(struct layer1), GFP_ATOMIC);
+ if (!nl1) {
+ printk(KERN_ERR "kmalloc struct layer1 failed\n");
+ return -ENOMEM;
+ }
+ nl1->l1m.fsm = &l1fsm_s;
+ nl1->l1m.state = ST_L1_F3;
+ nl1->Flags = 0;
+ nl1->l1m.debug = *debug & DEBUG_L1_FSM;
+ nl1->l1m.userdata = nl1;
+ nl1->l1m.userint = 0;
+ nl1->l1m.printdebug = l1m_debug;
+ nl1->dch = dch;
+ nl1->dcb = dcb;
+ mISDN_FsmInitTimer(&nl1->l1m, &nl1->timer);
+ __module_get(THIS_MODULE);
+ dch->l1 = nl1;
+ return 0;
+}
+EXPORT_SYMBOL(create_l1);
+
+int
+l1_init(u_int *deb)
+{
+ debug = deb;
+ l1fsm_s.state_count = L1S_STATE_COUNT;
+ l1fsm_s.event_count = L1_EVENT_COUNT;
+ l1fsm_s.strEvent = strL1Event;
+ l1fsm_s.strState = strL1SState;
+ mISDN_FsmNew(&l1fsm_s, L1SFnList, ARRAY_SIZE(L1SFnList));
+ return 0;
+}
+
+void
+l1_cleanup(void)
+{
+ mISDN_FsmFree(&l1fsm_s);
+}
diff --git a/drivers/isdn/mISDN/layer1.h b/drivers/isdn/mISDN/layer1.h
new file mode 100644
index 000000000000..9c8125fd89af
--- /dev/null
+++ b/drivers/isdn/mISDN/layer1.h
@@ -0,0 +1,26 @@
+/*
+ *
+ * Layer 1 defines
+ *
+ * Copyright 2008 by Karsten Keil <kkeil@novell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define FLG_L1_ACTIVATING 1
+#define FLG_L1_ACTIVATED 2
+#define FLG_L1_DEACTTIMER 3
+#define FLG_L1_ACTTIMER 4
+#define FLG_L1_T3RUN 5
+#define FLG_L1_PULL_REQ 6
+#define FLG_L1_UINT 7
+#define FLG_L1_DBLOCKED 8
+
diff --git a/drivers/isdn/mISDN/layer2.c b/drivers/isdn/mISDN/layer2.c
new file mode 100644
index 000000000000..a7915a156c04
--- /dev/null
+++ b/drivers/isdn/mISDN/layer2.c
@@ -0,0 +1,2216 @@
+/*
+ *
+ * Author Karsten Keil <kkeil@novell.com>
+ *
+ * Copyright 2008 by Karsten Keil <kkeil@novell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "fsm.h"
+#include "layer2.h"
+
+static int *debug;
+
+static
+struct Fsm l2fsm = {NULL, 0, 0, NULL, NULL};
+
+static char *strL2State[] =
+{
+ "ST_L2_1",
+ "ST_L2_2",
+ "ST_L2_3",
+ "ST_L2_4",
+ "ST_L2_5",
+ "ST_L2_6",
+ "ST_L2_7",
+ "ST_L2_8",
+};
+
+enum {
+ EV_L2_UI,
+ EV_L2_SABME,
+ EV_L2_DISC,
+ EV_L2_DM,
+ EV_L2_UA,
+ EV_L2_FRMR,
+ EV_L2_SUPER,
+ EV_L2_I,
+ EV_L2_DL_DATA,
+ EV_L2_ACK_PULL,
+ EV_L2_DL_UNITDATA,
+ EV_L2_DL_ESTABLISH_REQ,
+ EV_L2_DL_RELEASE_REQ,
+ EV_L2_MDL_ASSIGN,
+ EV_L2_MDL_REMOVE,
+ EV_L2_MDL_ERROR,
+ EV_L1_DEACTIVATE,
+ EV_L2_T200,
+ EV_L2_T203,
+ EV_L2_SET_OWN_BUSY,
+ EV_L2_CLEAR_OWN_BUSY,
+ EV_L2_FRAME_ERROR,
+};
+
+#define L2_EVENT_COUNT (EV_L2_FRAME_ERROR+1)
+
+static char *strL2Event[] =
+{
+ "EV_L2_UI",
+ "EV_L2_SABME",
+ "EV_L2_DISC",
+ "EV_L2_DM",
+ "EV_L2_UA",
+ "EV_L2_FRMR",
+ "EV_L2_SUPER",
+ "EV_L2_I",
+ "EV_L2_DL_DATA",
+ "EV_L2_ACK_PULL",
+ "EV_L2_DL_UNITDATA",
+ "EV_L2_DL_ESTABLISH_REQ",
+ "EV_L2_DL_RELEASE_REQ",
+ "EV_L2_MDL_ASSIGN",
+ "EV_L2_MDL_REMOVE",
+ "EV_L2_MDL_ERROR",
+ "EV_L1_DEACTIVATE",
+ "EV_L2_T200",
+ "EV_L2_T203",
+ "EV_L2_SET_OWN_BUSY",
+ "EV_L2_CLEAR_OWN_BUSY",
+ "EV_L2_FRAME_ERROR",
+};
+
+static void
+l2m_debug(struct FsmInst *fi, char *fmt, ...)
+{
+ struct layer2 *l2 = fi->userdata;
+ va_list va;
+
+ if (!(*debug & DEBUG_L2_FSM))
+ return;
+ va_start(va, fmt);
+ printk(KERN_DEBUG "l2 (tei %d): ", l2->tei);
+ vprintk(fmt, va);
+ printk("\n");
+ va_end(va);
+}
+
+inline u_int
+l2headersize(struct layer2 *l2, int ui)
+{
+ return ((test_bit(FLG_MOD128, &l2->flag) && (!ui)) ? 2 : 1) +
+ (test_bit(FLG_LAPD, &l2->flag) ? 2 : 1);
+}
+
+inline u_int
+l2addrsize(struct layer2 *l2)
+{
+ return test_bit(FLG_LAPD, &l2->flag) ? 2 : 1;
+}
+
+static u_int
+l2_newid(struct layer2 *l2)
+{
+ u_int id;
+
+ id = l2->next_id++;
+ if (id == 0x7fff)
+ l2->next_id = 1;
+ id <<= 16;
+ id |= l2->tei << 8;
+ id |= l2->sapi;
+ return id;
+}
+
+static void
+l2up(struct layer2 *l2, u_int prim, struct sk_buff *skb)
+{
+ int err;
+
+ if (!l2->up)
+ return;
+ mISDN_HEAD_PRIM(skb) = prim;
+ mISDN_HEAD_ID(skb) = (l2->ch.nr << 16) | l2->ch.addr;
+ err = l2->up->send(l2->up, skb);
+ if (err) {
+ printk(KERN_WARNING "%s: err=%d\n", __func__, err);
+ dev_kfree_skb(skb);
+ }
+}
+
+static void
+l2up_create(struct layer2 *l2, u_int prim, int len, void *arg)
+{
+ struct sk_buff *skb;
+ struct mISDNhead *hh;
+ int err;
+
+ if (!l2->up)
+ return;
+ skb = mI_alloc_skb(len, GFP_ATOMIC);
+ if (!skb)
+ return;
+ hh = mISDN_HEAD_P(skb);
+ hh->prim = prim;
+ hh->id = (l2->ch.nr << 16) | l2->ch.addr;
+ if (len)
+ memcpy(skb_put(skb, len), arg, len);
+ err = l2->up->send(l2->up, skb);
+ if (err) {
+ printk(KERN_WARNING "%s: err=%d\n", __func__, err);
+ dev_kfree_skb(skb);
+ }
+}
+
+static int
+l2down_skb(struct layer2 *l2, struct sk_buff *skb) {
+ int ret;
+
+ ret = l2->ch.recv(l2->ch.peer, skb);
+ if (ret && (*debug & DEBUG_L2_RECV))
+ printk(KERN_DEBUG "l2down_skb: ret(%d)\n", ret);
+ return ret;
+}
+
+static int
+l2down_raw(struct layer2 *l2, struct sk_buff *skb)
+{
+ struct mISDNhead *hh = mISDN_HEAD_P(skb);
+
+ if (hh->prim == PH_DATA_REQ) {
+ if (test_and_set_bit(FLG_L1_NOTREADY, &l2->flag)) {
+ skb_queue_tail(&l2->down_queue, skb);
+ return 0;
+ }
+ l2->down_id = mISDN_HEAD_ID(skb);
+ }
+ return l2down_skb(l2, skb);
+}
+
+static int
+l2down(struct layer2 *l2, u_int prim, u_int id, struct sk_buff *skb)
+{
+ struct mISDNhead *hh = mISDN_HEAD_P(skb);
+
+ hh->prim = prim;
+ hh->id = id;
+ return l2down_raw(l2, skb);
+}
+
+static int
+l2down_create(struct layer2 *l2, u_int prim, u_int id, int len, void *arg)
+{
+ struct sk_buff *skb;
+ int err;
+ struct mISDNhead *hh;
+
+ skb = mI_alloc_skb(len, GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+ hh = mISDN_HEAD_P(skb);
+ hh->prim = prim;
+ hh->id = id;
+ if (len)
+ memcpy(skb_put(skb, len), arg, len);
+ err = l2down_raw(l2, skb);
+ if (err)
+ dev_kfree_skb(skb);
+ return err;
+}
+
+static int
+ph_data_confirm(struct layer2 *l2, struct mISDNhead *hh, struct sk_buff *skb) {
+ struct sk_buff *nskb = skb;
+ int ret = -EAGAIN;
+
+ if (test_bit(FLG_L1_NOTREADY, &l2->flag)) {
+ if (hh->id == l2->down_id) {
+ nskb = skb_dequeue(&l2->down_queue);
+ if (nskb) {
+ l2->down_id = mISDN_HEAD_ID(nskb);
+ if (l2down_skb(l2, nskb)) {
+ dev_kfree_skb(nskb);
+ l2->down_id = MISDN_ID_NONE;
+ }
+ } else
+ l2->down_id = MISDN_ID_NONE;
+ if (ret) {
+ dev_kfree_skb(skb);
+ ret = 0;
+ }
+ if (l2->down_id == MISDN_ID_NONE) {
+ test_and_clear_bit(FLG_L1_NOTREADY, &l2->flag);
+ mISDN_FsmEvent(&l2->l2m, EV_L2_ACK_PULL, NULL);
+ }
+ }
+ }
+ if (!test_and_set_bit(FLG_L1_NOTREADY, &l2->flag)) {
+ nskb = skb_dequeue(&l2->down_queue);
+ if (nskb) {
+ l2->down_id = mISDN_HEAD_ID(nskb);
+ if (l2down_skb(l2, nskb)) {
+ dev_kfree_skb(nskb);
+ l2->down_id = MISDN_ID_NONE;
+ test_and_clear_bit(FLG_L1_NOTREADY, &l2->flag);
+ }
+ } else
+ test_and_clear_bit(FLG_L1_NOTREADY, &l2->flag);
+ }
+ return ret;
+}
+
+static int
+l2mgr(struct layer2 *l2, u_int prim, void *arg) {
+ long c = (long)arg;
+
+ printk(KERN_WARNING
+ "l2mgr: addr:%x prim %x %c\n", l2->id, prim, (char)c);
+ if (test_bit(FLG_LAPD, &l2->flag) &&
+ !test_bit(FLG_FIXED_TEI, &l2->flag)) {
+ switch (c) {
+ case 'C':
+ case 'D':
+ case 'G':
+ case 'H':
+ l2_tei(l2, prim, (u_long)arg);
+ break;
+ }
+ }
+ return 0;
+}
+
+static void
+set_peer_busy(struct layer2 *l2) {
+ test_and_set_bit(FLG_PEER_BUSY, &l2->flag);
+ if (skb_queue_len(&l2->i_queue) || skb_queue_len(&l2->ui_queue))
+ test_and_set_bit(FLG_L2BLOCK, &l2->flag);
+}
+
+static void
+clear_peer_busy(struct layer2 *l2) {
+ if (test_and_clear_bit(FLG_PEER_BUSY, &l2->flag))
+ test_and_clear_bit(FLG_L2BLOCK, &l2->flag);
+}
+
+static void
+InitWin(struct layer2 *l2)
+{
+ int i;
+
+ for (i = 0; i < MAX_WINDOW; i++)
+ l2->windowar[i] = NULL;
+}
+
+static int
+freewin(struct layer2 *l2)
+{
+ int i, cnt = 0;
+
+ for (i = 0; i < MAX_WINDOW; i++) {
+ if (l2->windowar[i]) {
+ cnt++;
+ dev_kfree_skb(l2->windowar[i]);
+ l2->windowar[i] = NULL;
+ }
+ }
+ return cnt;
+}
+
+static void
+ReleaseWin(struct layer2 *l2)
+{
+ int cnt = freewin(l2);
+
+ if (cnt)
+ printk(KERN_WARNING
+ "isdnl2 freed %d skbuffs in release\n", cnt);
+}
+
+inline unsigned int
+cansend(struct layer2 *l2)
+{
+ unsigned int p1;
+
+ if (test_bit(FLG_MOD128, &l2->flag))
+ p1 = (l2->vs - l2->va) % 128;
+ else
+ p1 = (l2->vs - l2->va) % 8;
+ return (p1 < l2->window) && !test_bit(FLG_PEER_BUSY, &l2->flag);
+}
+
+inline void
+clear_exception(struct layer2 *l2)
+{
+ test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
+ test_and_clear_bit(FLG_REJEXC, &l2->flag);
+ test_and_clear_bit(FLG_OWN_BUSY, &l2->flag);
+ clear_peer_busy(l2);
+}
+
+static int
+sethdraddr(struct layer2 *l2, u_char *header, int rsp)
+{
+ u_char *ptr = header;
+ int crbit = rsp;
+
+ if (test_bit(FLG_LAPD, &l2->flag)) {
+ if (test_bit(FLG_LAPD_NET, &l2->flag))
+ crbit = !crbit;
+ *ptr++ = (l2->sapi << 2) | (crbit ? 2 : 0);
+ *ptr++ = (l2->tei << 1) | 1;
+ return 2;
+ } else {
+ if (test_bit(FLG_ORIG, &l2->flag))
+ crbit = !crbit;
+ if (crbit)
+ *ptr++ = l2->addr.B;
+ else
+ *ptr++ = l2->addr.A;
+ return 1;
+ }
+}
+
+static inline void
+enqueue_super(struct layer2 *l2, struct sk_buff *skb)
+{
+ if (l2down(l2, PH_DATA_REQ, l2_newid(l2), skb))
+ dev_kfree_skb(skb);
+}
+
+static inline void
+enqueue_ui(struct layer2 *l2, struct sk_buff *skb)
+{
+ if (l2->tm)
+ l2_tei(l2, MDL_STATUS_UI_IND, 0);
+ if (l2down(l2, PH_DATA_REQ, l2_newid(l2), skb))
+ dev_kfree_skb(skb);
+}
+
+inline int
+IsUI(u_char *data)
+{
+ return (data[0] & 0xef) == UI;
+}
+
+inline int
+IsUA(u_char *data)
+{
+ return (data[0] & 0xef) == UA;
+}
+
+inline int
+IsDM(u_char *data)
+{
+ return (data[0] & 0xef) == DM;
+}
+
+inline int
+IsDISC(u_char *data)
+{
+ return (data[0] & 0xef) == DISC;
+}
+
+inline int
+IsRR(u_char *data, struct layer2 *l2)
+{
+ if (test_bit(FLG_MOD128, &l2->flag))
+ return data[0] == RR;
+ else
+ return (data[0] & 0xf) == 1;
+}
+
+inline int
+IsSFrame(u_char *data, struct layer2 *l2)
+{
+ register u_char d = *data;
+
+ if (!test_bit(FLG_MOD128, &l2->flag))
+ d &= 0xf;
+ return ((d & 0xf3) == 1) && ((d & 0x0c) != 0x0c);
+}
+
+inline int
+IsSABME(u_char *data, struct layer2 *l2)
+{
+ u_char d = data[0] & ~0x10;
+
+ return test_bit(FLG_MOD128, &l2->flag) ? d == SABME : d == SABM;
+}
+
+inline int
+IsREJ(u_char *data, struct layer2 *l2)
+{
+ return test_bit(FLG_MOD128, &l2->flag) ?
+ data[0] == REJ : (data[0] & 0xf) == REJ;
+}
+
+inline int
+IsFRMR(u_char *data)
+{
+ return (data[0] & 0xef) == FRMR;
+}
+
+inline int
+IsRNR(u_char *data, struct layer2 *l2)
+{
+ return test_bit(FLG_MOD128, &l2->flag) ?
+ data[0] == RNR : (data[0] & 0xf) == RNR;
+}
+
+int
+iframe_error(struct layer2 *l2, struct sk_buff *skb)
+{
+ u_int i;
+ int rsp = *skb->data & 0x2;
+
+ i = l2addrsize(l2) + (test_bit(FLG_MOD128, &l2->flag) ? 2 : 1);
+ if (test_bit(FLG_ORIG, &l2->flag))
+ rsp = !rsp;
+ if (rsp)
+ return 'L';
+ if (skb->len < i)
+ return 'N';
+ if ((skb->len - i) > l2->maxlen)
+ return 'O';
+ return 0;
+}
+
+int
+super_error(struct layer2 *l2, struct sk_buff *skb)
+{
+ if (skb->len != l2addrsize(l2) +
+ (test_bit(FLG_MOD128, &l2->flag) ? 2 : 1))
+ return 'N';
+ return 0;
+}
+
+int
+unnum_error(struct layer2 *l2, struct sk_buff *skb, int wantrsp)
+{
+ int rsp = (*skb->data & 0x2) >> 1;
+ if (test_bit(FLG_ORIG, &l2->flag))
+ rsp = !rsp;
+ if (rsp != wantrsp)
+ return 'L';
+ if (skb->len != l2addrsize(l2) + 1)
+ return 'N';
+ return 0;
+}
+
+int
+UI_error(struct layer2 *l2, struct sk_buff *skb)
+{
+ int rsp = *skb->data & 0x2;
+ if (test_bit(FLG_ORIG, &l2->flag))
+ rsp = !rsp;
+ if (rsp)
+ return 'L';
+ if (skb->len > l2->maxlen + l2addrsize(l2) + 1)
+ return 'O';
+ return 0;
+}
+
+int
+FRMR_error(struct layer2 *l2, struct sk_buff *skb)
+{
+ u_int headers = l2addrsize(l2) + 1;
+ u_char *datap = skb->data + headers;
+ int rsp = *skb->data & 0x2;
+
+ if (test_bit(FLG_ORIG, &l2->flag))
+ rsp = !rsp;
+ if (!rsp)
+ return 'L';
+ if (test_bit(FLG_MOD128, &l2->flag)) {
+ if (skb->len < headers + 5)
+ return 'N';
+ else if (*debug & DEBUG_L2)
+ l2m_debug(&l2->l2m,
+ "FRMR information %2x %2x %2x %2x %2x",
+ datap[0], datap[1], datap[2], datap[3], datap[4]);
+ } else {
+ if (skb->len < headers + 3)
+ return 'N';
+ else if (*debug & DEBUG_L2)
+ l2m_debug(&l2->l2m,
+ "FRMR information %2x %2x %2x",
+ datap[0], datap[1], datap[2]);
+ }
+ return 0;
+}
+
+static unsigned int
+legalnr(struct layer2 *l2, unsigned int nr)
+{
+ if (test_bit(FLG_MOD128, &l2->flag))
+ return ((nr - l2->va) % 128) <= ((l2->vs - l2->va) % 128);
+ else
+ return ((nr - l2->va) % 8) <= ((l2->vs - l2->va) % 8);
+}
+
+static void
+setva(struct layer2 *l2, unsigned int nr)
+{
+ struct sk_buff *skb;
+
+ while (l2->va != nr) {
+ l2->va++;
+ if (test_bit(FLG_MOD128, &l2->flag))
+ l2->va %= 128;
+ else
+ l2->va %= 8;
+ if (l2->windowar[l2->sow]) {
+ skb_trim(l2->windowar[l2->sow], 0);
+ skb_queue_tail(&l2->tmp_queue, l2->windowar[l2->sow]);
+ l2->windowar[l2->sow] = NULL;
+ }
+ l2->sow = (l2->sow + 1) % l2->window;
+ }
+ skb = skb_dequeue(&l2->tmp_queue);
+ while (skb) {
+ dev_kfree_skb(skb);
+ skb = skb_dequeue(&l2->tmp_queue);
+ }
+}
+
+static void
+send_uframe(struct layer2 *l2, struct sk_buff *skb, u_char cmd, u_char cr)
+{
+ u_char tmp[MAX_L2HEADER_LEN];
+ int i;
+
+ i = sethdraddr(l2, tmp, cr);
+ tmp[i++] = cmd;
+ if (skb)
+ skb_trim(skb, 0);
+ else {
+ skb = mI_alloc_skb(i, GFP_ATOMIC);
+ if (!skb) {
+ printk(KERN_WARNING "%s: can't alloc skbuff\n",
+ __func__);
+ return;
+ }
+ }
+ memcpy(skb_put(skb, i), tmp, i);
+ enqueue_super(l2, skb);
+}
+
+
+inline u_char
+get_PollFlag(struct layer2 *l2, struct sk_buff *skb)
+{
+ return skb->data[l2addrsize(l2)] & 0x10;
+}
+
+inline u_char
+get_PollFlagFree(struct layer2 *l2, struct sk_buff *skb)
+{
+ u_char PF;
+
+ PF = get_PollFlag(l2, skb);
+ dev_kfree_skb(skb);
+ return PF;
+}
+
+inline void
+start_t200(struct layer2 *l2, int i)
+{
+ mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, i);
+ test_and_set_bit(FLG_T200_RUN, &l2->flag);
+}
+
+inline void
+restart_t200(struct layer2 *l2, int i)
+{
+ mISDN_FsmRestartTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, i);
+ test_and_set_bit(FLG_T200_RUN, &l2->flag);
+}
+
+inline void
+stop_t200(struct layer2 *l2, int i)
+{
+ if (test_and_clear_bit(FLG_T200_RUN, &l2->flag))
+ mISDN_FsmDelTimer(&l2->t200, i);
+}
+
+inline void
+st5_dl_release_l2l3(struct layer2 *l2)
+{
+ int pr;
+
+ if (test_and_clear_bit(FLG_PEND_REL, &l2->flag))
+ pr = DL_RELEASE_CNF;
+ else
+ pr = DL_RELEASE_IND;
+ l2up_create(l2, pr, 0, NULL);
+}
+
+inline void
+lapb_dl_release_l2l3(struct layer2 *l2, int f)
+{
+ if (test_bit(FLG_LAPB, &l2->flag))
+ l2down_create(l2, PH_DEACTIVATE_REQ, l2_newid(l2), 0, NULL);
+ l2up_create(l2, f, 0, NULL);
+}
+
+static void
+establishlink(struct FsmInst *fi)
+{
+ struct layer2 *l2 = fi->userdata;
+ u_char cmd;
+
+ clear_exception(l2);
+ l2->rc = 0;
+ cmd = (test_bit(FLG_MOD128, &l2->flag) ? SABME : SABM) | 0x10;
+ send_uframe(l2, NULL, cmd, CMD);
+ mISDN_FsmDelTimer(&l2->t203, 1);
+ restart_t200(l2, 1);
+ test_and_clear_bit(FLG_PEND_REL, &l2->flag);
+ freewin(l2);
+ mISDN_FsmChangeState(fi, ST_L2_5);
+}
+
+static void
+l2_mdl_error_ua(struct FsmInst *fi, int event, void *arg)
+{
+ struct sk_buff *skb = arg;
+ struct layer2 *l2 = fi->userdata;
+
+ if (get_PollFlagFree(l2, skb))
+ l2mgr(l2, MDL_ERROR_IND, (void *) 'C');
+ else
+ l2mgr(l2, MDL_ERROR_IND, (void *) 'D');
+
+}
+
+static void
+l2_mdl_error_dm(struct FsmInst *fi, int event, void *arg)
+{
+ struct sk_buff *skb = arg;
+ struct layer2 *l2 = fi->userdata;
+
+ if (get_PollFlagFree(l2, skb))
+ l2mgr(l2, MDL_ERROR_IND, (void *) 'B');
+ else {
+ l2mgr(l2, MDL_ERROR_IND, (void *) 'E');
+ establishlink(fi);
+ test_and_clear_bit(FLG_L3_INIT, &l2->flag);
+ }
+}
+
+static void
+l2_st8_mdl_error_dm(struct FsmInst *fi, int event, void *arg)
+{
+ struct sk_buff *skb = arg;
+ struct layer2 *l2 = fi->userdata;
+
+ if (get_PollFlagFree(l2, skb))
+ l2mgr(l2, MDL_ERROR_IND, (void *) 'B');
+ else
+ l2mgr(l2, MDL_ERROR_IND, (void *) 'E');
+ establishlink(fi);
+ test_and_clear_bit(FLG_L3_INIT, &l2->flag);
+}
+
+static void
+l2_go_st3(struct FsmInst *fi, int event, void *arg)
+{
+ dev_kfree_skb((struct sk_buff *)arg);
+ mISDN_FsmChangeState(fi, ST_L2_3);
+}
+
+static void
+l2_mdl_assign(struct FsmInst *fi, int event, void *arg)
+{
+ struct layer2 *l2 = fi->userdata;
+
+ mISDN_FsmChangeState(fi, ST_L2_3);
+ dev_kfree_skb((struct sk_buff *)arg);
+ l2_tei(l2, MDL_ASSIGN_IND, 0);
+}
+
+static void
+l2_queue_ui_assign(struct FsmInst *fi, int event, void *arg)
+{
+ struct layer2 *l2 = fi->userdata;
+ struct sk_buff *skb = arg;
+
+ skb_queue_tail(&l2->ui_queue, skb);
+ mISDN_FsmChangeState(fi, ST_L2_2);
+ l2_tei(l2, MDL_ASSIGN_IND, 0);
+}
+
+static void
+l2_queue_ui(struct FsmInst *fi, int event, void *arg)
+{
+ struct layer2 *l2 = fi->userdata;
+ struct sk_buff *skb = arg;
+
+ skb_queue_tail(&l2->ui_queue, skb);
+}
+
+static void
+tx_ui(struct layer2 *l2)
+{
+ struct sk_buff *skb;
+ u_char header[MAX_L2HEADER_LEN];
+ int i;
+
+ i = sethdraddr(l2, header, CMD);
+ if (test_bit(FLG_LAPD_NET, &l2->flag))
+ header[1] = 0xff; /* tei 127 */
+ header[i++] = UI;
+ while ((skb = skb_dequeue(&l2->ui_queue))) {
+ memcpy(skb_push(skb, i), header, i);
+ enqueue_ui(l2, skb);
+ }
+}
+
+static void
+l2_send_ui(struct FsmInst *fi, int event, void *arg)
+{
+ struct layer2 *l2 = fi->userdata;
+ struct sk_buff *skb = arg;
+
+ skb_queue_tail(&l2->ui_queue, skb);
+ tx_ui(l2);
+}
+
+static void
+l2_got_ui(struct FsmInst *fi, int event, void *arg)
+{
+ struct layer2 *l2 = fi->userdata;
+ struct sk_buff *skb = arg;
+
+ skb_pull(skb, l2headersize(l2, 1));
+/*
+ * in states 1-3 for broadcast
+ */
+
+ if (l2->tm)
+ l2_tei(l2, MDL_STATUS_UI_IND, 0);
+ l2up(l2, DL_UNITDATA_IND, skb);
+}
+
+static void
+l2_establish(struct FsmInst *fi, int event, void *arg)
+{
+ struct sk_buff *skb = arg;
+ struct layer2 *l2 = fi->userdata;
+
+ establishlink(fi);
+ test_and_set_bit(FLG_L3_INIT, &l2->flag);
+ dev_kfree_skb(skb);
+}
+
+static void
+l2_discard_i_setl3(struct FsmInst *fi, int event, void *arg)
+{
+ struct sk_buff *skb = arg;
+ struct layer2 *l2 = fi->userdata;
+
+ skb_queue_purge(&l2->i_queue);
+ test_and_set_bit(FLG_L3_INIT, &l2->flag);
+ test_and_clear_bit(FLG_PEND_REL, &l2->flag);
+ dev_kfree_skb(skb);
+}
+
+static void
+l2_l3_reestablish(struct FsmInst *fi, int event, void *arg)
+{
+ struct sk_buff *skb = arg;
+ struct layer2 *l2 = fi->userdata;
+
+ skb_queue_purge(&l2->i_queue);
+ establishlink(fi);
+ test_and_set_bit(FLG_L3_INIT, &l2->flag);
+ dev_kfree_skb(skb);
+}
+
+static void
+l2_release(struct FsmInst *fi, int event, void *arg)
+{
+ struct layer2 *l2 = fi->userdata;
+ struct sk_buff *skb = arg;
+
+ skb_trim(skb, 0);
+ l2up(l2, DL_RELEASE_CNF, skb);
+}
+
+static void
+l2_pend_rel(struct FsmInst *fi, int event, void *arg)
+{
+ struct sk_buff *skb = arg;
+ struct layer2 *l2 = fi->userdata;
+
+ test_and_set_bit(FLG_PEND_REL, &l2->flag);
+ dev_kfree_skb(skb);
+}
+
+static void
+l2_disconnect(struct FsmInst *fi, int event, void *arg)
+{
+ struct layer2 *l2 = fi->userdata;
+ struct sk_buff *skb = arg;
+
+ skb_queue_purge(&l2->i_queue);
+ freewin(l2);
+ mISDN_FsmChangeState(fi, ST_L2_6);
+ l2->rc = 0;
+ send_uframe(l2, NULL, DISC | 0x10, CMD);
+ mISDN_FsmDelTimer(&l2->t203, 1);
+ restart_t200(l2, 2);
+ if (skb)
+ dev_kfree_skb(skb);
+}
+
+static void
+l2_start_multi(struct FsmInst *fi, int event, void *arg)
+{
+ struct layer2 *l2 = fi->userdata;
+ struct sk_buff *skb = arg;
+
+ l2->vs = 0;
+ l2->va = 0;
+ l2->vr = 0;
+ l2->sow = 0;
+ clear_exception(l2);
+ send_uframe(l2, NULL, UA | get_PollFlag(l2, skb), RSP);
+ mISDN_FsmChangeState(fi, ST_L2_7);
+ mISDN_FsmAddTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 3);
+ skb_trim(skb, 0);
+ l2up(l2, DL_ESTABLISH_IND, skb);
+ if (l2->tm)
+ l2_tei(l2, MDL_STATUS_UP_IND, 0);
+}
+
+static void
+l2_send_UA(struct FsmInst *fi, int event, void *arg)
+{
+ struct layer2 *l2 = fi->userdata;
+ struct sk_buff *skb = arg;
+
+ send_uframe(l2, skb, UA | get_PollFlag(l2, skb), RSP);
+}
+
+static void
+l2_send_DM(struct FsmInst *fi, int event, void *arg)
+{
+ struct layer2 *l2 = fi->userdata;
+ struct sk_buff *skb = arg;
+
+ send_uframe(l2, skb, DM | get_PollFlag(l2, skb), RSP);
+}
+
+static void
+l2_restart_multi(struct FsmInst *fi, int event, void *arg)
+{
+ struct layer2 *l2 = fi->userdata;
+ struct sk_buff *skb = arg;
+ int est = 0;
+
+ send_uframe(l2, skb, UA | get_PollFlag(l2, skb), RSP);
+
+ l2mgr(l2, MDL_ERROR_IND, (void *) 'F');
+
+ if (l2->vs != l2->va) {
+ skb_queue_purge(&l2->i_queue);
+ est = 1;
+ }
+
+ clear_exception(l2);
+ l2->vs = 0;
+ l2->va = 0;
+ l2->vr = 0;
+ l2->sow = 0;
+ mISDN_FsmChangeState(fi, ST_L2_7);
+ stop_t200(l2, 3);
+ mISDN_FsmRestartTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 3);
+
+ if (est)
+ l2up_create(l2, DL_ESTABLISH_IND, 0, NULL);
+/* mISDN_queue_data(&l2->inst, l2->inst.id | MSG_BROADCAST,
+ * MGR_SHORTSTATUS | INDICATION, SSTATUS_L2_ESTABLISHED,
+ * 0, NULL, 0);
+ */
+ if (skb_queue_len(&l2->i_queue) && cansend(l2))
+ mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
+}
+
+static void
+l2_stop_multi(struct FsmInst *fi, int event, void *arg)
+{
+ struct layer2 *l2 = fi->userdata;
+ struct sk_buff *skb = arg;
+
+ mISDN_FsmChangeState(fi, ST_L2_4);
+ mISDN_FsmDelTimer(&l2->t203, 3);
+ stop_t200(l2, 4);
+
+ send_uframe(l2, skb, UA | get_PollFlag(l2, skb), RSP);
+ skb_queue_purge(&l2->i_queue);
+ freewin(l2);
+ lapb_dl_release_l2l3(l2, DL_RELEASE_IND);
+ if (l2->tm)
+ l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
+}
+
+static void
+l2_connected(struct FsmInst *fi, int event, void *arg)
+{
+ struct layer2 *l2 = fi->userdata;
+ struct sk_buff *skb = arg;
+ int pr = -1;
+
+ if (!get_PollFlag(l2, skb)) {
+ l2_mdl_error_ua(fi, event, arg);
+ return;
+ }
+ dev_kfree_skb(skb);
+ if (test_and_clear_bit(FLG_PEND_REL, &l2->flag))
+ l2_disconnect(fi, event, NULL);
+ if (test_and_clear_bit(FLG_L3_INIT, &l2->flag)) {
+ pr = DL_ESTABLISH_CNF;
+ } else if (l2->vs != l2->va) {
+ skb_queue_purge(&l2->i_queue);
+ pr = DL_ESTABLISH_IND;
+ }
+ stop_t200(l2, 5);
+ l2->vr = 0;
+ l2->vs = 0;
+ l2->va = 0;
+ l2->sow = 0;
+ mISDN_FsmChangeState(fi, ST_L2_7);
+ mISDN_FsmAddTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 4);
+ if (pr != -1)
+ l2up_create(l2, pr, 0, NULL);
+
+ if (skb_queue_len(&l2->i_queue) && cansend(l2))
+ mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
+
+ if (l2->tm)
+ l2_tei(l2, MDL_STATUS_UP_IND, 0);
+}
+
+static void
+l2_released(struct FsmInst *fi, int event, void *arg)
+{
+ struct layer2 *l2 = fi->userdata;
+ struct sk_buff *skb = arg;
+
+ if (!get_PollFlag(l2, skb)) {
+ l2_mdl_error_ua(fi, event, arg);
+ return;
+ }
+ dev_kfree_skb(skb);
+ stop_t200(l2, 6);
+ lapb_dl_release_l2l3(l2, DL_RELEASE_CNF);
+ mISDN_FsmChangeState(fi, ST_L2_4);
+ if (l2->tm)
+ l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
+}
+
+static void
+l2_reestablish(struct FsmInst *fi, int event, void *arg)
+{
+ struct layer2 *l2 = fi->userdata;
+ struct sk_buff *skb = arg;
+
+ if (!get_PollFlagFree(l2, skb)) {
+ establishlink(fi);
+ test_and_set_bit(FLG_L3_INIT, &l2->flag);
+ }
+}
+
+static void
+l2_st5_dm_release(struct FsmInst *fi, int event, void *arg)
+{
+ struct layer2 *l2 = fi->userdata;
+ struct sk_buff *skb = arg;
+
+ if (get_PollFlagFree(l2, skb)) {
+ stop_t200(l2, 7);
+ if (!test_bit(FLG_L3_INIT, &l2->flag))
+ skb_queue_purge(&l2->i_queue);
+ if (test_bit(FLG_LAPB, &l2->flag))
+ l2down_create(l2, PH_DEACTIVATE_REQ,
+ l2_newid(l2), 0, NULL);
+ st5_dl_release_l2l3(l2);
+ mISDN_FsmChangeState(fi, ST_L2_4);
+ if (l2->tm)
+ l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
+ }
+}
+
+static void
+l2_st6_dm_release(struct FsmInst *fi, int event, void *arg)
+{
+ struct layer2 *l2 = fi->userdata;
+ struct sk_buff *skb = arg;
+
+ if (get_PollFlagFree(l2, skb)) {
+ stop_t200(l2, 8);
+ lapb_dl_release_l2l3(l2, DL_RELEASE_CNF);
+ mISDN_FsmChangeState(fi, ST_L2_4);
+ if (l2->tm)
+ l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
+ }
+}
+
+void
+enquiry_cr(struct layer2 *l2, u_char typ, u_char cr, u_char pf)
+{
+ struct sk_buff *skb;
+ u_char tmp[MAX_L2HEADER_LEN];
+ int i;
+
+ i = sethdraddr(l2, tmp, cr);
+ if (test_bit(FLG_MOD128, &l2->flag)) {
+ tmp[i++] = typ;
+ tmp[i++] = (l2->vr << 1) | (pf ? 1 : 0);
+ } else
+ tmp[i++] = (l2->vr << 5) | typ | (pf ? 0x10 : 0);
+ skb = mI_alloc_skb(i, GFP_ATOMIC);
+ if (!skb) {
+ printk(KERN_WARNING
+ "isdnl2 can't alloc sbbuff for enquiry_cr\n");
+ return;
+ }
+ memcpy(skb_put(skb, i), tmp, i);
+ enqueue_super(l2, skb);
+}
+
+inline void
+enquiry_response(struct layer2 *l2)
+{
+ if (test_bit(FLG_OWN_BUSY, &l2->flag))
+ enquiry_cr(l2, RNR, RSP, 1);
+ else
+ enquiry_cr(l2, RR, RSP, 1);
+ test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
+}
+
+inline void
+transmit_enquiry(struct layer2 *l2)
+{
+ if (test_bit(FLG_OWN_BUSY, &l2->flag))
+ enquiry_cr(l2, RNR, CMD, 1);
+ else
+ enquiry_cr(l2, RR, CMD, 1);
+ test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
+ start_t200(l2, 9);
+}
+
+
+static void
+nrerrorrecovery(struct FsmInst *fi)
+{
+ struct layer2 *l2 = fi->userdata;
+
+ l2mgr(l2, MDL_ERROR_IND, (void *) 'J');
+ establishlink(fi);
+ test_and_clear_bit(FLG_L3_INIT, &l2->flag);
+}
+
+static void
+invoke_retransmission(struct layer2 *l2, unsigned int nr)
+{
+ u_int p1;
+
+ if (l2->vs != nr) {
+ while (l2->vs != nr) {
+ (l2->vs)--;
+ if (test_bit(FLG_MOD128, &l2->flag)) {
+ l2->vs %= 128;
+ p1 = (l2->vs - l2->va) % 128;
+ } else {
+ l2->vs %= 8;
+ p1 = (l2->vs - l2->va) % 8;
+ }
+ p1 = (p1 + l2->sow) % l2->window;
+ if (l2->windowar[p1])
+ skb_queue_head(&l2->i_queue, l2->windowar[p1]);
+ else
+ printk(KERN_WARNING
+ "%s: windowar[%d] is NULL\n",
+ __func__, p1);
+ l2->windowar[p1] = NULL;
+ }
+ mISDN_FsmEvent(&l2->l2m, EV_L2_ACK_PULL, NULL);
+ }
+}
+
+static void
+l2_st7_got_super(struct FsmInst *fi, int event, void *arg)
+{
+ struct layer2 *l2 = fi->userdata;
+ struct sk_buff *skb = arg;
+ int PollFlag, rsp, typ = RR;
+ unsigned int nr;
+
+ rsp = *skb->data & 0x2;
+ if (test_bit(FLG_ORIG, &l2->flag))
+ rsp = !rsp;
+
+ skb_pull(skb, l2addrsize(l2));
+ if (IsRNR(skb->data, l2)) {
+ set_peer_busy(l2);
+ typ = RNR;
+ } else
+ clear_peer_busy(l2);
+ if (IsREJ(skb->data, l2))
+ typ = REJ;
+
+ if (test_bit(FLG_MOD128, &l2->flag)) {
+ PollFlag = (skb->data[1] & 0x1) == 0x1;
+ nr = skb->data[1] >> 1;
+ } else {
+ PollFlag = (skb->data[0] & 0x10);
+ nr = (skb->data[0] >> 5) & 0x7;
+ }
+ dev_kfree_skb(skb);
+
+ if (PollFlag) {
+ if (rsp)
+ l2mgr(l2, MDL_ERROR_IND, (void *) 'A');
+ else
+ enquiry_response(l2);
+ }
+ if (legalnr(l2, nr)) {
+ if (typ == REJ) {
+ setva(l2, nr);
+ invoke_retransmission(l2, nr);
+ stop_t200(l2, 10);
+ if (mISDN_FsmAddTimer(&l2->t203, l2->T203,
+ EV_L2_T203, NULL, 6))
+ l2m_debug(&l2->l2m, "Restart T203 ST7 REJ");
+ } else if ((nr == l2->vs) && (typ == RR)) {
+ setva(l2, nr);
+ stop_t200(l2, 11);
+ mISDN_FsmRestartTimer(&l2->t203, l2->T203,
+ EV_L2_T203, NULL, 7);
+ } else if ((l2->va != nr) || (typ == RNR)) {
+ setva(l2, nr);
+ if (typ != RR)
+ mISDN_FsmDelTimer(&l2->t203, 9);
+ restart_t200(l2, 12);
+ }
+ if (skb_queue_len(&l2->i_queue) && (typ == RR))
+ mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
+ } else
+ nrerrorrecovery(fi);
+}
+
+static void
+l2_feed_i_if_reest(struct FsmInst *fi, int event, void *arg)
+{
+ struct layer2 *l2 = fi->userdata;
+ struct sk_buff *skb = arg;
+
+ if (!test_bit(FLG_L3_INIT, &l2->flag))
+ skb_queue_tail(&l2->i_queue, skb);
+ else
+ dev_kfree_skb(skb);
+}
+
+static void
+l2_feed_i_pull(struct FsmInst *fi, int event, void *arg)
+{
+ struct layer2 *l2 = fi->userdata;
+ struct sk_buff *skb = arg;
+
+ skb_queue_tail(&l2->i_queue, skb);
+ mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
+}
+
+static void
+l2_feed_iqueue(struct FsmInst *fi, int event, void *arg)
+{
+ struct layer2 *l2 = fi->userdata;
+ struct sk_buff *skb = arg;
+
+ skb_queue_tail(&l2->i_queue, skb);
+}
+
+static void
+l2_got_iframe(struct FsmInst *fi, int event, void *arg)
+{
+ struct layer2 *l2 = fi->userdata;
+ struct sk_buff *skb = arg;
+ int PollFlag, i;
+ u_int ns, nr;
+
+ i = l2addrsize(l2);
+ if (test_bit(FLG_MOD128, &l2->flag)) {
+ PollFlag = ((skb->data[i + 1] & 0x1) == 0x1);
+ ns = skb->data[i] >> 1;
+ nr = (skb->data[i + 1] >> 1) & 0x7f;
+ } else {
+ PollFlag = (skb->data[i] & 0x10);
+ ns = (skb->data[i] >> 1) & 0x7;
+ nr = (skb->data[i] >> 5) & 0x7;
+ }
+ if (test_bit(FLG_OWN_BUSY, &l2->flag)) {
+ dev_kfree_skb(skb);
+ if (PollFlag)
+ enquiry_response(l2);
+ } else {
+ if (l2->vr == ns) {
+ l2->vr++;
+ if (test_bit(FLG_MOD128, &l2->flag))
+ l2->vr %= 128;
+ else
+ l2->vr %= 8;
+ test_and_clear_bit(FLG_REJEXC, &l2->flag);
+ if (PollFlag)
+ enquiry_response(l2);
+ else
+ test_and_set_bit(FLG_ACK_PEND, &l2->flag);
+ skb_pull(skb, l2headersize(l2, 0));
+ l2up(l2, DL_DATA_IND, skb);
+ } else {
+ /* n(s)!=v(r) */
+ dev_kfree_skb(skb);
+ if (test_and_set_bit(FLG_REJEXC, &l2->flag)) {
+ if (PollFlag)
+ enquiry_response(l2);
+ } else {
+ enquiry_cr(l2, REJ, RSP, PollFlag);
+ test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
+ }
+ }
+ }
+ if (legalnr(l2, nr)) {
+ if (!test_bit(FLG_PEER_BUSY, &l2->flag) &&
+ (fi->state == ST_L2_7)) {
+ if (nr == l2->vs) {
+ stop_t200(l2, 13);
+ mISDN_FsmRestartTimer(&l2->t203, l2->T203,
+ EV_L2_T203, NULL, 7);
+ } else if (nr != l2->va)
+ restart_t200(l2, 14);
+ }
+ setva(l2, nr);
+ } else {
+ nrerrorrecovery(fi);
+ return;
+ }
+ if (skb_queue_len(&l2->i_queue) && (fi->state == ST_L2_7))
+ mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
+ if (test_and_clear_bit(FLG_ACK_PEND, &l2->flag))
+ enquiry_cr(l2, RR, RSP, 0);
+}
+
+static void
+l2_got_tei(struct FsmInst *fi, int event, void *arg)
+{
+ struct layer2 *l2 = fi->userdata;
+ u_int info;
+
+ l2->tei = (signed char)(long)arg;
+ set_channel_address(&l2->ch, l2->sapi, l2->tei);
+ info = DL_INFO_L2_CONNECT;
+ l2up_create(l2, DL_INFORMATION_IND, sizeof(info), &info);
+ if (fi->state == ST_L2_3) {
+ establishlink(fi);
+ test_and_set_bit(FLG_L3_INIT, &l2->flag);
+ } else
+ mISDN_FsmChangeState(fi, ST_L2_4);
+ if (skb_queue_len(&l2->ui_queue))
+ tx_ui(l2);
+}
+
+static void
+l2_st5_tout_200(struct FsmInst *fi, int event, void *arg)
+{
+ struct layer2 *l2 = fi->userdata;
+
+ if (test_bit(FLG_LAPD, &l2->flag) &&
+ test_bit(FLG_DCHAN_BUSY, &l2->flag)) {
+ mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9);
+ } else if (l2->rc == l2->N200) {
+ mISDN_FsmChangeState(fi, ST_L2_4);
+ test_and_clear_bit(FLG_T200_RUN, &l2->flag);
+ skb_queue_purge(&l2->i_queue);
+ l2mgr(l2, MDL_ERROR_IND, (void *) 'G');
+ if (test_bit(FLG_LAPB, &l2->flag))
+ l2down_create(l2, PH_DEACTIVATE_REQ,
+ l2_newid(l2), 0, NULL);
+ st5_dl_release_l2l3(l2);
+ if (l2->tm)
+ l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
+ } else {
+ l2->rc++;
+ mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9);
+ send_uframe(l2, NULL, (test_bit(FLG_MOD128, &l2->flag) ?
+ SABME : SABM) | 0x10, CMD);
+ }
+}
+
+static void
+l2_st6_tout_200(struct FsmInst *fi, int event, void *arg)
+{
+ struct layer2 *l2 = fi->userdata;
+
+ if (test_bit(FLG_LAPD, &l2->flag) &&
+ test_bit(FLG_DCHAN_BUSY, &l2->flag)) {
+ mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9);
+ } else if (l2->rc == l2->N200) {
+ mISDN_FsmChangeState(fi, ST_L2_4);
+ test_and_clear_bit(FLG_T200_RUN, &l2->flag);
+ l2mgr(l2, MDL_ERROR_IND, (void *) 'H');
+ lapb_dl_release_l2l3(l2, DL_RELEASE_CNF);
+ if (l2->tm)
+ l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
+ } else {
+ l2->rc++;
+ mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200,
+ NULL, 9);
+ send_uframe(l2, NULL, DISC | 0x10, CMD);
+ }
+}
+
+static void
+l2_st7_tout_200(struct FsmInst *fi, int event, void *arg)
+{
+ struct layer2 *l2 = fi->userdata;
+
+ if (test_bit(FLG_LAPD, &l2->flag) &&
+ test_bit(FLG_DCHAN_BUSY, &l2->flag)) {
+ mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9);
+ return;
+ }
+ test_and_clear_bit(FLG_T200_RUN, &l2->flag);
+ l2->rc = 0;
+ mISDN_FsmChangeState(fi, ST_L2_8);
+ transmit_enquiry(l2);
+ l2->rc++;
+}
+
+static void
+l2_st8_tout_200(struct FsmInst *fi, int event, void *arg)
+{
+ struct layer2 *l2 = fi->userdata;
+
+ if (test_bit(FLG_LAPD, &l2->flag) &&
+ test_bit(FLG_DCHAN_BUSY, &l2->flag)) {
+ mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9);
+ return;
+ }
+ test_and_clear_bit(FLG_T200_RUN, &l2->flag);
+ if (l2->rc == l2->N200) {
+ l2mgr(l2, MDL_ERROR_IND, (void *) 'I');
+ establishlink(fi);
+ test_and_clear_bit(FLG_L3_INIT, &l2->flag);
+ } else {
+ transmit_enquiry(l2);
+ l2->rc++;
+ }
+}
+
+static void
+l2_st7_tout_203(struct FsmInst *fi, int event, void *arg)
+{
+ struct layer2 *l2 = fi->userdata;
+
+ if (test_bit(FLG_LAPD, &l2->flag) &&
+ test_bit(FLG_DCHAN_BUSY, &l2->flag)) {
+ mISDN_FsmAddTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 9);
+ return;
+ }
+ mISDN_FsmChangeState(fi, ST_L2_8);
+ transmit_enquiry(l2);
+ l2->rc = 0;
+}
+
+static void
+l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
+{
+ struct layer2 *l2 = fi->userdata;
+ struct sk_buff *skb, *nskb, *oskb;
+ u_char header[MAX_L2HEADER_LEN];
+ u_int i, p1;
+
+ if (!cansend(l2))
+ return;
+
+ skb = skb_dequeue(&l2->i_queue);
+ if (!skb)
+ return;
+
+ if (test_bit(FLG_MOD128, &l2->flag))
+ p1 = (l2->vs - l2->va) % 128;
+ else
+ p1 = (l2->vs - l2->va) % 8;
+ p1 = (p1 + l2->sow) % l2->window;
+ if (l2->windowar[p1]) {
+ printk(KERN_WARNING "isdnl2 try overwrite ack queue entry %d\n",
+ p1);
+ dev_kfree_skb(l2->windowar[p1]);
+ }
+ l2->windowar[p1] = skb;
+ i = sethdraddr(l2, header, CMD);
+ if (test_bit(FLG_MOD128, &l2->flag)) {
+ header[i++] = l2->vs << 1;
+ header[i++] = l2->vr << 1;
+ l2->vs = (l2->vs + 1) % 128;
+ } else {
+ header[i++] = (l2->vr << 5) | (l2->vs << 1);
+ l2->vs = (l2->vs + 1) % 8;
+ }
+
+ nskb = skb_clone(skb, GFP_ATOMIC);
+ p1 = skb_headroom(nskb);
+ if (p1 >= i)
+ memcpy(skb_push(nskb, i), header, i);
+ else {
+ printk(KERN_WARNING
+ "isdnl2 pull_iqueue skb header(%d/%d) too short\n", i, p1);
+ oskb = nskb;
+ nskb = mI_alloc_skb(oskb->len + i, GFP_ATOMIC);
+ if (!nskb) {
+ dev_kfree_skb(oskb);
+ printk(KERN_WARNING "%s: no skb mem\n", __func__);
+ return;
+ }
+ memcpy(skb_put(nskb, i), header, i);
+ memcpy(skb_put(nskb, oskb->len), oskb->data, oskb->len);
+ dev_kfree_skb(oskb);
+ }
+ l2down(l2, PH_DATA_REQ, l2_newid(l2), nskb);
+ test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
+ if (!test_and_set_bit(FLG_T200_RUN, &l2->flag)) {
+ mISDN_FsmDelTimer(&l2->t203, 13);
+ mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 11);
+ }
+}
+
+static void
+l2_st8_got_super(struct FsmInst *fi, int event, void *arg)
+{
+ struct layer2 *l2 = fi->userdata;
+ struct sk_buff *skb = arg;
+ int PollFlag, rsp, rnr = 0;
+ unsigned int nr;
+
+ rsp = *skb->data & 0x2;
+ if (test_bit(FLG_ORIG, &l2->flag))
+ rsp = !rsp;
+
+ skb_pull(skb, l2addrsize(l2));
+
+ if (IsRNR(skb->data, l2)) {
+ set_peer_busy(l2);
+ rnr = 1;
+ } else
+ clear_peer_busy(l2);
+
+ if (test_bit(FLG_MOD128, &l2->flag)) {
+ PollFlag = (skb->data[1] & 0x1) == 0x1;
+ nr = skb->data[1] >> 1;
+ } else {
+ PollFlag = (skb->data[0] & 0x10);
+ nr = (skb->data[0] >> 5) & 0x7;
+ }
+ dev_kfree_skb(skb);
+ if (rsp && PollFlag) {
+ if (legalnr(l2, nr)) {
+ if (rnr) {
+ restart_t200(l2, 15);
+ } else {
+ stop_t200(l2, 16);
+ mISDN_FsmAddTimer(&l2->t203, l2->T203,
+ EV_L2_T203, NULL, 5);
+ setva(l2, nr);
+ }
+ invoke_retransmission(l2, nr);
+ mISDN_FsmChangeState(fi, ST_L2_7);
+ if (skb_queue_len(&l2->i_queue) && cansend(l2))
+ mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
+ } else
+ nrerrorrecovery(fi);
+ } else {
+ if (!rsp && PollFlag)
+ enquiry_response(l2);
+ if (legalnr(l2, nr))
+ setva(l2, nr);
+ else
+ nrerrorrecovery(fi);
+ }
+}
+
+static void
+l2_got_FRMR(struct FsmInst *fi, int event, void *arg)
+{
+ struct layer2 *l2 = fi->userdata;
+ struct sk_buff *skb = arg;
+
+ skb_pull(skb, l2addrsize(l2) + 1);
+
+ if (!(skb->data[0] & 1) || ((skb->data[0] & 3) == 1) || /* I or S */
+ (IsUA(skb->data) && (fi->state == ST_L2_7))) {
+ l2mgr(l2, MDL_ERROR_IND, (void *) 'K');
+ establishlink(fi);
+ test_and_clear_bit(FLG_L3_INIT, &l2->flag);
+ }
+ dev_kfree_skb(skb);
+}
+
+static void
+l2_st24_tei_remove(struct FsmInst *fi, int event, void *arg)
+{
+ struct layer2 *l2 = fi->userdata;
+
+ skb_queue_purge(&l2->ui_queue);
+ l2->tei = GROUP_TEI;
+ mISDN_FsmChangeState(fi, ST_L2_1);
+}
+
+static void
+l2_st3_tei_remove(struct FsmInst *fi, int event, void *arg)
+{
+ struct layer2 *l2 = fi->userdata;
+
+ skb_queue_purge(&l2->ui_queue);
+ l2->tei = GROUP_TEI;
+ l2up_create(l2, DL_RELEASE_IND, 0, NULL);
+ mISDN_FsmChangeState(fi, ST_L2_1);
+}
+
+static void
+l2_st5_tei_remove(struct FsmInst *fi, int event, void *arg)
+{
+ struct layer2 *l2 = fi->userdata;
+
+ skb_queue_purge(&l2->i_queue);
+ skb_queue_purge(&l2->ui_queue);
+ freewin(l2);
+ l2->tei = GROUP_TEI;
+ stop_t200(l2, 17);
+ st5_dl_release_l2l3(l2);
+ mISDN_FsmChangeState(fi, ST_L2_1);
+}
+
+static void
+l2_st6_tei_remove(struct FsmInst *fi, int event, void *arg)
+{
+ struct layer2 *l2 = fi->userdata;
+
+ skb_queue_purge(&l2->ui_queue);
+ l2->tei = GROUP_TEI;
+ stop_t200(l2, 18);
+ l2up_create(l2, DL_RELEASE_IND, 0, NULL);
+ mISDN_FsmChangeState(fi, ST_L2_1);
+}
+
+static void
+l2_tei_remove(struct FsmInst *fi, int event, void *arg)
+{
+ struct layer2 *l2 = fi->userdata;
+
+ skb_queue_purge(&l2->i_queue);
+ skb_queue_purge(&l2->ui_queue);
+ freewin(l2);
+ l2->tei = GROUP_TEI;
+ stop_t200(l2, 17);
+ mISDN_FsmDelTimer(&l2->t203, 19);
+ l2up_create(l2, DL_RELEASE_IND, 0, NULL);
+/* mISDN_queue_data(&l2->inst, l2->inst.id | MSG_BROADCAST,
+ * MGR_SHORTSTATUS_IND, SSTATUS_L2_RELEASED,
+ * 0, NULL, 0);
+ */
+ mISDN_FsmChangeState(fi, ST_L2_1);
+}
+
+static void
+l2_st14_persistant_da(struct FsmInst *fi, int event, void *arg)
+{
+ struct layer2 *l2 = fi->userdata;
+ struct sk_buff *skb = arg;
+
+ skb_queue_purge(&l2->i_queue);
+ skb_queue_purge(&l2->ui_queue);
+ if (test_and_clear_bit(FLG_ESTAB_PEND, &l2->flag))
+ l2up(l2, DL_RELEASE_IND, skb);
+ else
+ dev_kfree_skb(skb);
+}
+
+static void
+l2_st5_persistant_da(struct FsmInst *fi, int event, void *arg)
+{
+ struct layer2 *l2 = fi->userdata;
+ struct sk_buff *skb = arg;
+
+ skb_queue_purge(&l2->i_queue);
+ skb_queue_purge(&l2->ui_queue);
+ freewin(l2);
+ stop_t200(l2, 19);
+ st5_dl_release_l2l3(l2);
+ mISDN_FsmChangeState(fi, ST_L2_4);
+ if (l2->tm)
+ l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
+ dev_kfree_skb(skb);
+}
+
+static void
+l2_st6_persistant_da(struct FsmInst *fi, int event, void *arg)
+{
+ struct layer2 *l2 = fi->userdata;
+ struct sk_buff *skb = arg;
+
+ skb_queue_purge(&l2->ui_queue);
+ stop_t200(l2, 20);
+ l2up(l2, DL_RELEASE_CNF, skb);
+ mISDN_FsmChangeState(fi, ST_L2_4);
+ if (l2->tm)
+ l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
+}
+
+static void
+l2_persistant_da(struct FsmInst *fi, int event, void *arg)
+{
+ struct layer2 *l2 = fi->userdata;
+ struct sk_buff *skb = arg;
+
+ skb_queue_purge(&l2->i_queue);
+ skb_queue_purge(&l2->ui_queue);
+ freewin(l2);
+ stop_t200(l2, 19);
+ mISDN_FsmDelTimer(&l2->t203, 19);
+ l2up(l2, DL_RELEASE_IND, skb);
+ mISDN_FsmChangeState(fi, ST_L2_4);
+ if (l2->tm)
+ l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
+}
+
+static void
+l2_set_own_busy(struct FsmInst *fi, int event, void *arg)
+{
+ struct layer2 *l2 = fi->userdata;
+ struct sk_buff *skb = arg;
+
+ if (!test_and_set_bit(FLG_OWN_BUSY, &l2->flag)) {
+ enquiry_cr(l2, RNR, RSP, 0);
+ test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
+ }
+ if (skb)
+ dev_kfree_skb(skb);
+}
+
+static void
+l2_clear_own_busy(struct FsmInst *fi, int event, void *arg)
+{
+ struct layer2 *l2 = fi->userdata;
+ struct sk_buff *skb = arg;
+
+ if (!test_and_clear_bit(FLG_OWN_BUSY, &l2->flag)) {
+ enquiry_cr(l2, RR, RSP, 0);
+ test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
+ }
+ if (skb)
+ dev_kfree_skb(skb);
+}
+
+static void
+l2_frame_error(struct FsmInst *fi, int event, void *arg)
+{
+ struct layer2 *l2 = fi->userdata;
+
+ l2mgr(l2, MDL_ERROR_IND, arg);
+}
+
+static void
+l2_frame_error_reest(struct FsmInst *fi, int event, void *arg)
+{
+ struct layer2 *l2 = fi->userdata;
+
+ l2mgr(l2, MDL_ERROR_IND, arg);
+ establishlink(fi);
+ test_and_clear_bit(FLG_L3_INIT, &l2->flag);
+}
+
+static struct FsmNode L2FnList[] =
+{
+ {ST_L2_1, EV_L2_DL_ESTABLISH_REQ, l2_mdl_assign},
+ {ST_L2_2, EV_L2_DL_ESTABLISH_REQ, l2_go_st3},
+ {ST_L2_4, EV_L2_DL_ESTABLISH_REQ, l2_establish},
+ {ST_L2_5, EV_L2_DL_ESTABLISH_REQ, l2_discard_i_setl3},
+ {ST_L2_7, EV_L2_DL_ESTABLISH_REQ, l2_l3_reestablish},
+ {ST_L2_8, EV_L2_DL_ESTABLISH_REQ, l2_l3_reestablish},
+ {ST_L2_4, EV_L2_DL_RELEASE_REQ, l2_release},
+ {ST_L2_5, EV_L2_DL_RELEASE_REQ, l2_pend_rel},
+ {ST_L2_7, EV_L2_DL_RELEASE_REQ, l2_disconnect},
+ {ST_L2_8, EV_L2_DL_RELEASE_REQ, l2_disconnect},
+ {ST_L2_5, EV_L2_DL_DATA, l2_feed_i_if_reest},
+ {ST_L2_7, EV_L2_DL_DATA, l2_feed_i_pull},
+ {ST_L2_8, EV_L2_DL_DATA, l2_feed_iqueue},
+ {ST_L2_1, EV_L2_DL_UNITDATA, l2_queue_ui_assign},
+ {ST_L2_2, EV_L2_DL_UNITDATA, l2_queue_ui},
+ {ST_L2_3, EV_L2_DL_UNITDATA, l2_queue_ui},
+ {ST_L2_4, EV_L2_DL_UNITDATA, l2_send_ui},
+ {ST_L2_5, EV_L2_DL_UNITDATA, l2_send_ui},
+ {ST_L2_6, EV_L2_DL_UNITDATA, l2_send_ui},
+ {ST_L2_7, EV_L2_DL_UNITDATA, l2_send_ui},
+ {ST_L2_8, EV_L2_DL_UNITDATA, l2_send_ui},
+ {ST_L2_1, EV_L2_MDL_ASSIGN, l2_got_tei},
+ {ST_L2_2, EV_L2_MDL_ASSIGN, l2_got_tei},
+ {ST_L2_3, EV_L2_MDL_ASSIGN, l2_got_tei},
+ {ST_L2_2, EV_L2_MDL_ERROR, l2_st24_tei_remove},
+ {ST_L2_3, EV_L2_MDL_ERROR, l2_st3_tei_remove},
+ {ST_L2_4, EV_L2_MDL_REMOVE, l2_st24_tei_remove},
+ {ST_L2_5, EV_L2_MDL_REMOVE, l2_st5_tei_remove},
+ {ST_L2_6, EV_L2_MDL_REMOVE, l2_st6_tei_remove},
+ {ST_L2_7, EV_L2_MDL_REMOVE, l2_tei_remove},
+ {ST_L2_8, EV_L2_MDL_REMOVE, l2_tei_remove},
+ {ST_L2_4, EV_L2_SABME, l2_start_multi},
+ {ST_L2_5, EV_L2_SABME, l2_send_UA},
+ {ST_L2_6, EV_L2_SABME, l2_send_DM},
+ {ST_L2_7, EV_L2_SABME, l2_restart_multi},
+ {ST_L2_8, EV_L2_SABME, l2_restart_multi},
+ {ST_L2_4, EV_L2_DISC, l2_send_DM},
+ {ST_L2_5, EV_L2_DISC, l2_send_DM},
+ {ST_L2_6, EV_L2_DISC, l2_send_UA},
+ {ST_L2_7, EV_L2_DISC, l2_stop_multi},
+ {ST_L2_8, EV_L2_DISC, l2_stop_multi},
+ {ST_L2_4, EV_L2_UA, l2_mdl_error_ua},
+ {ST_L2_5, EV_L2_UA, l2_connected},
+ {ST_L2_6, EV_L2_UA, l2_released},
+ {ST_L2_7, EV_L2_UA, l2_mdl_error_ua},
+ {ST_L2_8, EV_L2_UA, l2_mdl_error_ua},
+ {ST_L2_4, EV_L2_DM, l2_reestablish},
+ {ST_L2_5, EV_L2_DM, l2_st5_dm_release},
+ {ST_L2_6, EV_L2_DM, l2_st6_dm_release},
+ {ST_L2_7, EV_L2_DM, l2_mdl_error_dm},
+ {ST_L2_8, EV_L2_DM, l2_st8_mdl_error_dm},
+ {ST_L2_1, EV_L2_UI, l2_got_ui},
+ {ST_L2_2, EV_L2_UI, l2_got_ui},
+ {ST_L2_3, EV_L2_UI, l2_got_ui},
+ {ST_L2_4, EV_L2_UI, l2_got_ui},
+ {ST_L2_5, EV_L2_UI, l2_got_ui},
+ {ST_L2_6, EV_L2_UI, l2_got_ui},
+ {ST_L2_7, EV_L2_UI, l2_got_ui},
+ {ST_L2_8, EV_L2_UI, l2_got_ui},
+ {ST_L2_7, EV_L2_FRMR, l2_got_FRMR},
+ {ST_L2_8, EV_L2_FRMR, l2_got_FRMR},
+ {ST_L2_7, EV_L2_SUPER, l2_st7_got_super},
+ {ST_L2_8, EV_L2_SUPER, l2_st8_got_super},
+ {ST_L2_7, EV_L2_I, l2_got_iframe},
+ {ST_L2_8, EV_L2_I, l2_got_iframe},
+ {ST_L2_5, EV_L2_T200, l2_st5_tout_200},
+ {ST_L2_6, EV_L2_T200, l2_st6_tout_200},
+ {ST_L2_7, EV_L2_T200, l2_st7_tout_200},
+ {ST_L2_8, EV_L2_T200, l2_st8_tout_200},
+ {ST_L2_7, EV_L2_T203, l2_st7_tout_203},
+ {ST_L2_7, EV_L2_ACK_PULL, l2_pull_iqueue},
+ {ST_L2_7, EV_L2_SET_OWN_BUSY, l2_set_own_busy},
+ {ST_L2_8, EV_L2_SET_OWN_BUSY, l2_set_own_busy},
+ {ST_L2_7, EV_L2_CLEAR_OWN_BUSY, l2_clear_own_busy},
+ {ST_L2_8, EV_L2_CLEAR_OWN_BUSY, l2_clear_own_busy},
+ {ST_L2_4, EV_L2_FRAME_ERROR, l2_frame_error},
+ {ST_L2_5, EV_L2_FRAME_ERROR, l2_frame_error},
+ {ST_L2_6, EV_L2_FRAME_ERROR, l2_frame_error},
+ {ST_L2_7, EV_L2_FRAME_ERROR, l2_frame_error_reest},
+ {ST_L2_8, EV_L2_FRAME_ERROR, l2_frame_error_reest},
+ {ST_L2_1, EV_L1_DEACTIVATE, l2_st14_persistant_da},
+ {ST_L2_2, EV_L1_DEACTIVATE, l2_st24_tei_remove},
+ {ST_L2_3, EV_L1_DEACTIVATE, l2_st3_tei_remove},
+ {ST_L2_4, EV_L1_DEACTIVATE, l2_st14_persistant_da},
+ {ST_L2_5, EV_L1_DEACTIVATE, l2_st5_persistant_da},
+ {ST_L2_6, EV_L1_DEACTIVATE, l2_st6_persistant_da},
+ {ST_L2_7, EV_L1_DEACTIVATE, l2_persistant_da},
+ {ST_L2_8, EV_L1_DEACTIVATE, l2_persistant_da},
+};
+
+#define L2_FN_COUNT (sizeof(L2FnList)/sizeof(struct FsmNode))
+
+static int
+ph_data_indication(struct layer2 *l2, struct mISDNhead *hh, struct sk_buff *skb)
+{
+ u_char *datap = skb->data;
+ int ret = -EINVAL;
+ int psapi, ptei;
+ u_int l;
+ int c = 0;
+
+ l = l2addrsize(l2);
+ if (skb->len <= l) {
+ mISDN_FsmEvent(&l2->l2m, EV_L2_FRAME_ERROR, (void *) 'N');
+ return ret;
+ }
+ if (test_bit(FLG_LAPD, &l2->flag)) { /* Maybe not needed */
+ psapi = *datap++;
+ ptei = *datap++;
+ if ((psapi & 1) || !(ptei & 1)) {
+ printk(KERN_WARNING
+ "l2 D-channel frame wrong EA0/EA1\n");
+ return ret;
+ }
+ psapi >>= 2;
+ ptei >>= 1;
+ if (psapi != l2->sapi) {
+ /* not our bussiness
+ * printk(KERN_DEBUG "%s: sapi %d/%d sapi mismatch\n",
+ * __func__,
+ * psapi, l2->sapi);
+ */
+ dev_kfree_skb(skb);
+ return 0;
+ }
+ if ((ptei != l2->tei) && (ptei != GROUP_TEI)) {
+ /* not our bussiness
+ * printk(KERN_DEBUG "%s: tei %d/%d sapi %d mismatch\n",
+ * __func__,
+ * ptei, l2->tei, psapi);
+ */
+ dev_kfree_skb(skb);
+ return 0;
+ }
+ } else
+ datap += l;
+ if (!(*datap & 1)) { /* I-Frame */
+ c = iframe_error(l2, skb);
+ if (!c)
+ ret = mISDN_FsmEvent(&l2->l2m, EV_L2_I, skb);
+ } else if (IsSFrame(datap, l2)) { /* S-Frame */
+ c = super_error(l2, skb);
+ if (!c)
+ ret = mISDN_FsmEvent(&l2->l2m, EV_L2_SUPER, skb);
+ } else if (IsUI(datap)) {
+ c = UI_error(l2, skb);
+ if (!c)
+ ret = mISDN_FsmEvent(&l2->l2m, EV_L2_UI, skb);
+ } else if (IsSABME(datap, l2)) {
+ c = unnum_error(l2, skb, CMD);
+ if (!c)
+ ret = mISDN_FsmEvent(&l2->l2m, EV_L2_SABME, skb);
+ } else if (IsUA(datap)) {
+ c = unnum_error(l2, skb, RSP);
+ if (!c)
+ ret = mISDN_FsmEvent(&l2->l2m, EV_L2_UA, skb);
+ } else if (IsDISC(datap)) {
+ c = unnum_error(l2, skb, CMD);
+ if (!c)
+ ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DISC, skb);
+ } else if (IsDM(datap)) {
+ c = unnum_error(l2, skb, RSP);
+ if (!c)
+ ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DM, skb);
+ } else if (IsFRMR(datap)) {
+ c = FRMR_error(l2, skb);
+ if (!c)
+ ret = mISDN_FsmEvent(&l2->l2m, EV_L2_FRMR, skb);
+ } else
+ c = 'L';
+ if (c) {
+ printk(KERN_WARNING "l2 D-channel frame error %c\n", c);
+ mISDN_FsmEvent(&l2->l2m, EV_L2_FRAME_ERROR, (void *)(long)c);
+ }
+ return ret;
+}
+
+static int
+l2_send(struct mISDNchannel *ch, struct sk_buff *skb)
+{
+ struct layer2 *l2 = container_of(ch, struct layer2, ch);
+ struct mISDNhead *hh = mISDN_HEAD_P(skb);
+ int ret = -EINVAL;
+
+ if (*debug & DEBUG_L2_RECV)
+ printk(KERN_DEBUG "%s: prim(%x) id(%x) tei(%d)\n",
+ __func__, hh->prim, hh->id, l2->tei);
+ switch (hh->prim) {
+ case PH_DATA_IND:
+ ret = ph_data_indication(l2, hh, skb);
+ break;
+ case PH_DATA_CNF:
+ ret = ph_data_confirm(l2, hh, skb);
+ break;
+ case PH_ACTIVATE_IND:
+ test_and_set_bit(FLG_L1_ACTIV, &l2->flag);
+ l2up_create(l2, MPH_ACTIVATE_IND, 0, NULL);
+ if (test_and_clear_bit(FLG_ESTAB_PEND, &l2->flag))
+ ret = mISDN_FsmEvent(&l2->l2m,
+ EV_L2_DL_ESTABLISH_REQ, skb);
+ break;
+ case PH_DEACTIVATE_IND:
+ test_and_clear_bit(FLG_L1_ACTIV, &l2->flag);
+ l2up_create(l2, MPH_DEACTIVATE_IND, 0, NULL);
+ ret = mISDN_FsmEvent(&l2->l2m, EV_L1_DEACTIVATE, skb);
+ break;
+ case MPH_INFORMATION_IND:
+ if (!l2->up)
+ break;
+ ret = l2->up->send(l2->up, skb);
+ break;
+ case DL_DATA_REQ:
+ ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DL_DATA, skb);
+ break;
+ case DL_UNITDATA_REQ:
+ ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DL_UNITDATA, skb);
+ break;
+ case DL_ESTABLISH_REQ:
+ if (test_bit(FLG_LAPB, &l2->flag))
+ test_and_set_bit(FLG_ORIG, &l2->flag);
+ if (test_bit(FLG_L1_ACTIV, &l2->flag)) {
+ if (test_bit(FLG_LAPD, &l2->flag) ||
+ test_bit(FLG_ORIG, &l2->flag))
+ ret = mISDN_FsmEvent(&l2->l2m,
+ EV_L2_DL_ESTABLISH_REQ, skb);
+ } else {
+ if (test_bit(FLG_LAPD, &l2->flag) ||
+ test_bit(FLG_ORIG, &l2->flag)) {
+ test_and_set_bit(FLG_ESTAB_PEND,
+ &l2->flag);
+ }
+ ret = l2down(l2, PH_ACTIVATE_REQ, l2_newid(l2),
+ skb);
+ }
+ break;
+ case DL_RELEASE_REQ:
+ if (test_bit(FLG_LAPB, &l2->flag))
+ l2down_create(l2, PH_DEACTIVATE_REQ,
+ l2_newid(l2), 0, NULL);
+ ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DL_RELEASE_REQ,
+ skb);
+ break;
+ default:
+ if (*debug & DEBUG_L2)
+ l2m_debug(&l2->l2m, "l2 unknown pr %04x",
+ hh->prim);
+ }
+ if (ret) {
+ dev_kfree_skb(skb);
+ ret = 0;
+ }
+ return ret;
+}
+
+int
+tei_l2(struct layer2 *l2, u_int cmd, u_long arg)
+{
+ int ret = -EINVAL;
+
+ if (*debug & DEBUG_L2_TEI)
+ printk(KERN_DEBUG "%s: cmd(%x)\n", __func__, cmd);
+ switch (cmd) {
+ case (MDL_ASSIGN_REQ):
+ ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_ASSIGN, (void *)arg);
+ break;
+ case (MDL_REMOVE_REQ):
+ ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_REMOVE, NULL);
+ break;
+ case (MDL_ERROR_IND):
+ ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_ERROR, NULL);
+ break;
+ case (MDL_ERROR_RSP):
+ /* ETS 300-125 5.3.2.1 Test: TC13010 */
+ printk(KERN_NOTICE "MDL_ERROR|REQ (tei_l2)\n");
+ ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_ERROR, NULL);
+ break;
+ }
+ return ret;
+}
+
+static void
+release_l2(struct layer2 *l2)
+{
+ mISDN_FsmDelTimer(&l2->t200, 21);
+ mISDN_FsmDelTimer(&l2->t203, 16);
+ skb_queue_purge(&l2->i_queue);
+ skb_queue_purge(&l2->ui_queue);
+ skb_queue_purge(&l2->down_queue);
+ ReleaseWin(l2);
+ if (test_bit(FLG_LAPD, &l2->flag)) {
+ TEIrelease(l2);
+ if (l2->ch.st)
+ l2->ch.st->dev->D.ctrl(&l2->ch.st->dev->D,
+ CLOSE_CHANNEL, NULL);
+ }
+ kfree(l2);
+}
+
+static int
+l2_ctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
+{
+ struct layer2 *l2 = container_of(ch, struct layer2, ch);
+ u_int info;
+
+ if (*debug & DEBUG_L2_CTRL)
+ printk(KERN_DEBUG "%s:(%x)\n", __func__, cmd);
+
+ switch (cmd) {
+ case OPEN_CHANNEL:
+ if (test_bit(FLG_LAPD, &l2->flag)) {
+ set_channel_address(&l2->ch, l2->sapi, l2->tei);
+ info = DL_INFO_L2_CONNECT;
+ l2up_create(l2, DL_INFORMATION_IND,
+ sizeof(info), &info);
+ }
+ break;
+ case CLOSE_CHANNEL:
+ if (l2->ch.peer)
+ l2->ch.peer->ctrl(l2->ch.peer, CLOSE_CHANNEL, NULL);
+ release_l2(l2);
+ break;
+ }
+ return 0;
+}
+
+struct layer2 *
+create_l2(struct mISDNchannel *ch, u_int protocol, u_long options, u_long arg)
+{
+ struct layer2 *l2;
+ struct channel_req rq;
+
+ l2 = kzalloc(sizeof(struct layer2), GFP_KERNEL);
+ if (!l2) {
+ printk(KERN_ERR "kzalloc layer2 failed\n");
+ return NULL;
+ }
+ l2->next_id = 1;
+ l2->down_id = MISDN_ID_NONE;
+ l2->up = ch;
+ l2->ch.st = ch->st;
+ l2->ch.send = l2_send;
+ l2->ch.ctrl = l2_ctrl;
+ switch (protocol) {
+ case ISDN_P_LAPD_NT:
+ test_and_set_bit(FLG_LAPD, &l2->flag);
+ test_and_set_bit(FLG_LAPD_NET, &l2->flag);
+ test_and_set_bit(FLG_MOD128, &l2->flag);
+ l2->sapi = 0;
+ l2->maxlen = MAX_DFRAME_LEN;
+ if (test_bit(OPTION_L2_PMX, &options))
+ l2->window = 7;
+ else
+ l2->window = 1;
+ if (test_bit(OPTION_L2_PTP, &options))
+ test_and_set_bit(FLG_PTP, &l2->flag);
+ if (test_bit(OPTION_L2_FIXEDTEI, &options))
+ test_and_set_bit(FLG_FIXED_TEI, &l2->flag);
+ l2->tei = (u_int)arg;
+ l2->T200 = 1000;
+ l2->N200 = 3;
+ l2->T203 = 10000;
+ if (test_bit(OPTION_L2_PMX, &options))
+ rq.protocol = ISDN_P_NT_E1;
+ else
+ rq.protocol = ISDN_P_NT_S0;
+ rq.adr.channel = 0;
+ l2->ch.st->dev->D.ctrl(&l2->ch.st->dev->D, OPEN_CHANNEL, &rq);
+ break;
+ case ISDN_P_LAPD_TE:
+ test_and_set_bit(FLG_LAPD, &l2->flag);
+ test_and_set_bit(FLG_MOD128, &l2->flag);
+ test_and_set_bit(FLG_ORIG, &l2->flag);
+ l2->sapi = 0;
+ l2->maxlen = MAX_DFRAME_LEN;
+ if (test_bit(OPTION_L2_PMX, &options))
+ l2->window = 7;
+ else
+ l2->window = 1;
+ if (test_bit(OPTION_L2_PTP, &options))
+ test_and_set_bit(FLG_PTP, &l2->flag);
+ if (test_bit(OPTION_L2_FIXEDTEI, &options))
+ test_and_set_bit(FLG_FIXED_TEI, &l2->flag);
+ l2->tei = (u_int)arg;
+ l2->T200 = 1000;
+ l2->N200 = 3;
+ l2->T203 = 10000;
+ if (test_bit(OPTION_L2_PMX, &options))
+ rq.protocol = ISDN_P_TE_E1;
+ else
+ rq.protocol = ISDN_P_TE_S0;
+ rq.adr.channel = 0;
+ l2->ch.st->dev->D.ctrl(&l2->ch.st->dev->D, OPEN_CHANNEL, &rq);
+ break;
+ case ISDN_P_B_X75SLP:
+ test_and_set_bit(FLG_LAPB, &l2->flag);
+ l2->window = 7;
+ l2->maxlen = MAX_DATA_SIZE;
+ l2->T200 = 1000;
+ l2->N200 = 4;
+ l2->T203 = 5000;
+ l2->addr.A = 3;
+ l2->addr.B = 1;
+ break;
+ default:
+ printk(KERN_ERR "layer2 create failed prt %x\n",
+ protocol);
+ kfree(l2);
+ return NULL;
+ }
+ skb_queue_head_init(&l2->i_queue);
+ skb_queue_head_init(&l2->ui_queue);
+ skb_queue_head_init(&l2->down_queue);
+ skb_queue_head_init(&l2->tmp_queue);
+ InitWin(l2);
+ l2->l2m.fsm = &l2fsm;
+ if (test_bit(FLG_LAPB, &l2->flag) ||
+ test_bit(FLG_PTP, &l2->flag) ||
+ test_bit(FLG_LAPD_NET, &l2->flag))
+ l2->l2m.state = ST_L2_4;
+ else
+ l2->l2m.state = ST_L2_1;
+ l2->l2m.debug = *debug;
+ l2->l2m.userdata = l2;
+ l2->l2m.userint = 0;
+ l2->l2m.printdebug = l2m_debug;
+
+ mISDN_FsmInitTimer(&l2->l2m, &l2->t200);
+ mISDN_FsmInitTimer(&l2->l2m, &l2->t203);
+ return l2;
+}
+
+static int
+x75create(struct channel_req *crq)
+{
+ struct layer2 *l2;
+
+ if (crq->protocol != ISDN_P_B_X75SLP)
+ return -EPROTONOSUPPORT;
+ l2 = create_l2(crq->ch, crq->protocol, 0, 0);
+ if (!l2)
+ return -ENOMEM;
+ crq->ch = &l2->ch;
+ crq->protocol = ISDN_P_B_HDLC;
+ return 0;
+}
+
+static struct Bprotocol X75SLP = {
+ .Bprotocols = (1 << (ISDN_P_B_X75SLP & ISDN_P_B_MASK)),
+ .name = "X75SLP",
+ .create = x75create
+};
+
+int
+Isdnl2_Init(u_int *deb)
+{
+ debug = deb;
+ mISDN_register_Bprotocol(&X75SLP);
+ l2fsm.state_count = L2_STATE_COUNT;
+ l2fsm.event_count = L2_EVENT_COUNT;
+ l2fsm.strEvent = strL2Event;
+ l2fsm.strState = strL2State;
+ mISDN_FsmNew(&l2fsm, L2FnList, ARRAY_SIZE(L2FnList));
+ TEIInit(deb);
+ return 0;
+}
+
+void
+Isdnl2_cleanup(void)
+{
+ mISDN_unregister_Bprotocol(&X75SLP);
+ TEIFree();
+ mISDN_FsmFree(&l2fsm);
+}
+
diff --git a/drivers/isdn/mISDN/layer2.h b/drivers/isdn/mISDN/layer2.h
new file mode 100644
index 000000000000..6293f80dc2d3
--- /dev/null
+++ b/drivers/isdn/mISDN/layer2.h
@@ -0,0 +1,140 @@
+/*
+ * Layer 2 defines
+ *
+ * Copyright 2008 by Karsten Keil <kkeil@novell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/mISDNif.h>
+#include <linux/skbuff.h>
+#include "fsm.h"
+
+#define MAX_WINDOW 8
+
+struct manager {
+ struct mISDNchannel ch;
+ struct mISDNchannel bcast;
+ u_long options;
+ struct list_head layer2;
+ rwlock_t lock;
+ struct FsmInst deact;
+ struct FsmTimer datimer;
+ struct sk_buff_head sendq;
+ struct mISDNchannel *up;
+ u_int nextid;
+ u_int lastid;
+};
+
+struct teimgr {
+ int ri;
+ int rcnt;
+ struct FsmInst tei_m;
+ struct FsmTimer timer;
+ int tval, nval;
+ struct layer2 *l2;
+ struct manager *mgr;
+};
+
+struct laddr {
+ u_char A;
+ u_char B;
+};
+
+struct layer2 {
+ struct list_head list;
+ struct mISDNchannel ch;
+ u_long flag;
+ int id;
+ struct mISDNchannel *up;
+ signed char sapi;
+ signed char tei;
+ struct laddr addr;
+ u_int maxlen;
+ struct teimgr *tm;
+ u_int vs, va, vr;
+ int rc;
+ u_int window;
+ u_int sow;
+ struct FsmInst l2m;
+ struct FsmTimer t200, t203;
+ int T200, N200, T203;
+ u_int next_id;
+ u_int down_id;
+ struct sk_buff *windowar[MAX_WINDOW];
+ struct sk_buff_head i_queue;
+ struct sk_buff_head ui_queue;
+ struct sk_buff_head down_queue;
+ struct sk_buff_head tmp_queue;
+};
+
+enum {
+ ST_L2_1,
+ ST_L2_2,
+ ST_L2_3,
+ ST_L2_4,
+ ST_L2_5,
+ ST_L2_6,
+ ST_L2_7,
+ ST_L2_8,
+};
+
+#define L2_STATE_COUNT (ST_L2_8+1)
+
+extern struct layer2 *create_l2(struct mISDNchannel *, u_int,
+ u_long, u_long);
+extern int tei_l2(struct layer2 *, u_int, u_long arg);
+
+
+/* from tei.c */
+extern int l2_tei(struct layer2 *, u_int, u_long arg);
+extern void TEIrelease(struct layer2 *);
+extern int TEIInit(u_int *);
+extern void TEIFree(void);
+
+#define MAX_L2HEADER_LEN 4
+
+#define RR 0x01
+#define RNR 0x05
+#define REJ 0x09
+#define SABME 0x6f
+#define SABM 0x2f
+#define DM 0x0f
+#define UI 0x03
+#define DISC 0x43
+#define UA 0x63
+#define FRMR 0x87
+#define XID 0xaf
+
+#define CMD 0
+#define RSP 1
+
+#define LC_FLUSH_WAIT 1
+
+#define FLG_LAPB 0
+#define FLG_LAPD 1
+#define FLG_ORIG 2
+#define FLG_MOD128 3
+#define FLG_PEND_REL 4
+#define FLG_L3_INIT 5
+#define FLG_T200_RUN 6
+#define FLG_ACK_PEND 7
+#define FLG_REJEXC 8
+#define FLG_OWN_BUSY 9
+#define FLG_PEER_BUSY 10
+#define FLG_DCHAN_BUSY 11
+#define FLG_L1_ACTIV 12
+#define FLG_ESTAB_PEND 13
+#define FLG_PTP 14
+#define FLG_FIXED_TEI 15
+#define FLG_L2BLOCK 16
+#define FLG_L1_NOTREADY 17
+#define FLG_LAPD_NET 18
diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
new file mode 100644
index 000000000000..4ba4cc364c9e
--- /dev/null
+++ b/drivers/isdn/mISDN/socket.c
@@ -0,0 +1,781 @@
+/*
+ *
+ * Author Karsten Keil <kkeil@novell.com>
+ *
+ * Copyright 2008 by Karsten Keil <kkeil@novell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/mISDNif.h>
+#include "core.h"
+
+static int *debug;
+
+static struct proto mISDN_proto = {
+ .name = "misdn",
+ .owner = THIS_MODULE,
+ .obj_size = sizeof(struct mISDN_sock)
+};
+
+#define _pms(sk) ((struct mISDN_sock *)sk)
+
+static struct mISDN_sock_list data_sockets = {
+ .lock = __RW_LOCK_UNLOCKED(data_sockets.lock)
+};
+
+static struct mISDN_sock_list base_sockets = {
+ .lock = __RW_LOCK_UNLOCKED(base_sockets.lock)
+};
+
+#define L2_HEADER_LEN 4
+
+static inline struct sk_buff *
+_l2_alloc_skb(unsigned int len, gfp_t gfp_mask)
+{
+ struct sk_buff *skb;
+
+ skb = alloc_skb(len + L2_HEADER_LEN, gfp_mask);
+ if (likely(skb))
+ skb_reserve(skb, L2_HEADER_LEN);
+ return skb;
+}
+
+static void
+mISDN_sock_link(struct mISDN_sock_list *l, struct sock *sk)
+{
+ write_lock_bh(&l->lock);
+ sk_add_node(sk, &l->head);
+ write_unlock_bh(&l->lock);
+}
+
+static void mISDN_sock_unlink(struct mISDN_sock_list *l, struct sock *sk)
+{
+ write_lock_bh(&l->lock);
+ sk_del_node_init(sk);
+ write_unlock_bh(&l->lock);
+}
+
+static int
+mISDN_send(struct mISDNchannel *ch, struct sk_buff *skb)
+{
+ struct mISDN_sock *msk;
+ int err;
+
+ msk = container_of(ch, struct mISDN_sock, ch);
+ if (*debug & DEBUG_SOCKET)
+ printk(KERN_DEBUG "%s len %d %p\n", __func__, skb->len, skb);
+ if (msk->sk.sk_state == MISDN_CLOSED)
+ return -EUNATCH;
+ __net_timestamp(skb);
+ err = sock_queue_rcv_skb(&msk->sk, skb);
+ if (err)
+ printk(KERN_WARNING "%s: error %d\n", __func__, err);
+ return err;
+}
+
+static int
+mISDN_ctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
+{
+ struct mISDN_sock *msk;
+
+ msk = container_of(ch, struct mISDN_sock, ch);
+ if (*debug & DEBUG_SOCKET)
+ printk(KERN_DEBUG "%s(%p, %x, %p)\n", __func__, ch, cmd, arg);
+ switch (cmd) {
+ case CLOSE_CHANNEL:
+ msk->sk.sk_state = MISDN_CLOSED;
+ break;
+ }
+ return 0;
+}
+
+static inline void
+mISDN_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
+{
+ struct timeval tv;
+
+ if (_pms(sk)->cmask & MISDN_TIME_STAMP) {
+ skb_get_timestamp(skb, &tv);
+ put_cmsg(msg, SOL_MISDN, MISDN_TIME_STAMP, sizeof(tv), &tv);
+ }
+}
+
+static int
+mISDN_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *msg, size_t len, int flags)
+{
+ struct sk_buff *skb;
+ struct sock *sk = sock->sk;
+ struct sockaddr_mISDN *maddr;
+
+ int copied, err;
+
+ if (*debug & DEBUG_SOCKET)
+ printk(KERN_DEBUG "%s: len %d, flags %x ch.nr %d, proto %x\n",
+ __func__, (int)len, flags, _pms(sk)->ch.nr,
+ sk->sk_protocol);
+ if (flags & (MSG_OOB))
+ return -EOPNOTSUPP;
+
+ if (sk->sk_state == MISDN_CLOSED)
+ return 0;
+
+ skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
+ if (!skb)
+ return err;
+
+ if (msg->msg_namelen >= sizeof(struct sockaddr_mISDN)) {
+ msg->msg_namelen = sizeof(struct sockaddr_mISDN);
+ maddr = (struct sockaddr_mISDN *)msg->msg_name;
+ maddr->family = AF_ISDN;
+ maddr->dev = _pms(sk)->dev->id;
+ if ((sk->sk_protocol == ISDN_P_LAPD_TE) ||
+ (sk->sk_protocol == ISDN_P_LAPD_NT)) {
+ maddr->channel = (mISDN_HEAD_ID(skb) >> 16) & 0xff;
+ maddr->tei = (mISDN_HEAD_ID(skb) >> 8) & 0xff;
+ maddr->sapi = mISDN_HEAD_ID(skb) & 0xff;
+ } else {
+ maddr->channel = _pms(sk)->ch.nr;
+ maddr->sapi = _pms(sk)->ch.addr & 0xFF;
+ maddr->tei = (_pms(sk)->ch.addr >> 8) & 0xFF;
+ }
+ } else {
+ if (msg->msg_namelen)
+ printk(KERN_WARNING "%s: too small namelen %d\n",
+ __func__, msg->msg_namelen);
+ msg->msg_namelen = 0;
+ }
+
+ copied = skb->len + MISDN_HEADER_LEN;
+ if (len < copied) {
+ if (flags & MSG_PEEK)
+ atomic_dec(&skb->users);
+ else
+ skb_queue_head(&sk->sk_receive_queue, skb);
+ return -ENOSPC;
+ }
+ memcpy(skb_push(skb, MISDN_HEADER_LEN), mISDN_HEAD_P(skb),
+ MISDN_HEADER_LEN);
+
+ err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+
+ mISDN_sock_cmsg(sk, msg, skb);
+
+ skb_free_datagram(sk, skb);
+
+ return err ? : copied;
+}
+
+static int
+mISDN_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *msg, size_t len)
+{
+ struct sock *sk = sock->sk;
+ struct sk_buff *skb;
+ int err = -ENOMEM;
+ struct sockaddr_mISDN *maddr;
+
+ if (*debug & DEBUG_SOCKET)
+ printk(KERN_DEBUG "%s: len %d flags %x ch %d proto %x\n",
+ __func__, (int)len, msg->msg_flags, _pms(sk)->ch.nr,
+ sk->sk_protocol);
+
+ if (msg->msg_flags & MSG_OOB)
+ return -EOPNOTSUPP;
+
+ if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
+ return -EINVAL;
+
+ if (len < MISDN_HEADER_LEN)
+ return -EINVAL;
+
+ if (sk->sk_state != MISDN_BOUND)
+ return -EBADFD;
+
+ lock_sock(sk);
+
+ skb = _l2_alloc_skb(len, GFP_KERNEL);
+ if (!skb)
+ goto done;
+
+ if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
+ err = -EFAULT;
+ goto drop;
+ }
+
+ memcpy(mISDN_HEAD_P(skb), skb->data, MISDN_HEADER_LEN);
+ skb_pull(skb, MISDN_HEADER_LEN);
+
+ if (msg->msg_namelen >= sizeof(struct sockaddr_mISDN)) {
+ /* if we have a address, we use it */
+ maddr = (struct sockaddr_mISDN *)msg->msg_name;
+ mISDN_HEAD_ID(skb) = maddr->channel;
+ } else { /* use default for L2 messages */
+ if ((sk->sk_protocol == ISDN_P_LAPD_TE) ||
+ (sk->sk_protocol == ISDN_P_LAPD_NT))
+ mISDN_HEAD_ID(skb) = _pms(sk)->ch.nr;
+ }
+
+ if (*debug & DEBUG_SOCKET)
+ printk(KERN_DEBUG "%s: ID:%x\n",
+ __func__, mISDN_HEAD_ID(skb));
+
+ err = -ENODEV;
+ if (!_pms(sk)->ch.peer ||
+ (err = _pms(sk)->ch.recv(_pms(sk)->ch.peer, skb)))
+ goto drop;
+
+ err = len;
+
+done:
+ release_sock(sk);
+ return err;
+
+drop:
+ kfree_skb(skb);
+ goto done;
+}
+
+static int
+data_sock_release(struct socket *sock)
+{
+ struct sock *sk = sock->sk;
+
+ if (*debug & DEBUG_SOCKET)
+ printk(KERN_DEBUG "%s(%p) sk=%p\n", __func__, sock, sk);
+ if (!sk)
+ return 0;
+ switch (sk->sk_protocol) {
+ case ISDN_P_TE_S0:
+ case ISDN_P_NT_S0:
+ case ISDN_P_TE_E1:
+ case ISDN_P_NT_E1:
+ if (sk->sk_state == MISDN_BOUND)
+ delete_channel(&_pms(sk)->ch);
+ else
+ mISDN_sock_unlink(&data_sockets, sk);
+ break;
+ case ISDN_P_LAPD_TE:
+ case ISDN_P_LAPD_NT:
+ case ISDN_P_B_RAW:
+ case ISDN_P_B_HDLC:
+ case ISDN_P_B_X75SLP:
+ case ISDN_P_B_L2DTMF:
+ case ISDN_P_B_L2DSP:
+ case ISDN_P_B_L2DSPHDLC:
+ delete_channel(&_pms(sk)->ch);
+ mISDN_sock_unlink(&data_sockets, sk);
+ break;
+ }
+
+ lock_sock(sk);
+
+ sock_orphan(sk);
+ skb_queue_purge(&sk->sk_receive_queue);
+
+ release_sock(sk);
+ sock_put(sk);
+
+ return 0;
+}
+
+static int
+data_sock_ioctl_bound(struct sock *sk, unsigned int cmd, void __user *p)
+{
+ struct mISDN_ctrl_req cq;
+ int err = -EINVAL, val;
+ struct mISDNchannel *bchan, *next;
+
+ lock_sock(sk);
+ if (!_pms(sk)->dev) {
+ err = -ENODEV;
+ goto done;
+ }
+ switch (cmd) {
+ case IMCTRLREQ:
+ if (copy_from_user(&cq, p, sizeof(cq))) {
+ err = -EFAULT;
+ break;
+ }
+ if ((sk->sk_protocol & ~ISDN_P_B_MASK) == ISDN_P_B_START) {
+ list_for_each_entry_safe(bchan, next,
+ &_pms(sk)->dev->bchannels, list) {
+ if (bchan->nr == cq.channel) {
+ err = bchan->ctrl(bchan,
+ CONTROL_CHANNEL, &cq);
+ break;
+ }
+ }
+ } else
+ err = _pms(sk)->dev->D.ctrl(&_pms(sk)->dev->D,
+ CONTROL_CHANNEL, &cq);
+ if (err)
+ break;
+ if (copy_to_user(p, &cq, sizeof(cq)))
+ err = -EFAULT;
+ break;
+ case IMCLEAR_L2:
+ if (sk->sk_protocol != ISDN_P_LAPD_NT) {
+ err = -EINVAL;
+ break;
+ }
+ if (get_user(val, (int __user *)p)) {
+ err = -EFAULT;
+ break;
+ }
+ err = _pms(sk)->dev->teimgr->ctrl(_pms(sk)->dev->teimgr,
+ CONTROL_CHANNEL, &val);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+done:
+ release_sock(sk);
+ return err;
+}
+
+static int
+data_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+{
+ int err = 0, id;
+ struct sock *sk = sock->sk;
+ struct mISDNdevice *dev;
+ struct mISDNversion ver;
+
+ switch (cmd) {
+ case IMGETVERSION:
+ ver.major = MISDN_MAJOR_VERSION;
+ ver.minor = MISDN_MINOR_VERSION;
+ ver.release = MISDN_RELEASE;
+ if (copy_to_user((void __user *)arg, &ver, sizeof(ver)))
+ err = -EFAULT;
+ break;
+ case IMGETCOUNT:
+ id = get_mdevice_count();
+ if (put_user(id, (int __user *)arg))
+ err = -EFAULT;
+ break;
+ case IMGETDEVINFO:
+ if (get_user(id, (int __user *)arg)) {
+ err = -EFAULT;
+ break;
+ }
+ dev = get_mdevice(id);
+ if (dev) {
+ struct mISDN_devinfo di;
+
+ di.id = dev->id;
+ di.Dprotocols = dev->Dprotocols;
+ di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
+ di.protocol = dev->D.protocol;
+ memcpy(di.channelmap, dev->channelmap,
+ MISDN_CHMAP_SIZE * 4);
+ di.nrbchan = dev->nrbchan;
+ strcpy(di.name, dev->name);
+ if (copy_to_user((void __user *)arg, &di, sizeof(di)))
+ err = -EFAULT;
+ } else
+ err = -ENODEV;
+ break;
+ default:
+ if (sk->sk_state == MISDN_BOUND)
+ err = data_sock_ioctl_bound(sk, cmd,
+ (void __user *)arg);
+ else
+ err = -ENOTCONN;
+ }
+ return err;
+}
+
+static int data_sock_setsockopt(struct socket *sock, int level, int optname,
+ char __user *optval, int len)
+{
+ struct sock *sk = sock->sk;
+ int err = 0, opt = 0;
+
+ if (*debug & DEBUG_SOCKET)
+ printk(KERN_DEBUG "%s(%p, %d, %x, %p, %d)\n", __func__, sock,
+ level, optname, optval, len);
+
+ lock_sock(sk);
+
+ switch (optname) {
+ case MISDN_TIME_STAMP:
+ if (get_user(opt, (int __user *)optval)) {
+ err = -EFAULT;
+ break;
+ }
+
+ if (opt)
+ _pms(sk)->cmask |= MISDN_TIME_STAMP;
+ else
+ _pms(sk)->cmask &= ~MISDN_TIME_STAMP;
+ break;
+ default:
+ err = -ENOPROTOOPT;
+ break;
+ }
+ release_sock(sk);
+ return err;
+}
+
+static int data_sock_getsockopt(struct socket *sock, int level, int optname,
+ char __user *optval, int __user *optlen)
+{
+ struct sock *sk = sock->sk;
+ int len, opt;
+
+ if (get_user(len, optlen))
+ return -EFAULT;
+
+ switch (optname) {
+ case MISDN_TIME_STAMP:
+ if (_pms(sk)->cmask & MISDN_TIME_STAMP)
+ opt = 1;
+ else
+ opt = 0;
+
+ if (put_user(opt, optval))
+ return -EFAULT;
+ break;
+ default:
+ return -ENOPROTOOPT;
+ }
+
+ return 0;
+}
+
+static int
+data_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
+{
+ struct sockaddr_mISDN *maddr = (struct sockaddr_mISDN *) addr;
+ struct sock *sk = sock->sk;
+ int err = 0;
+
+ if (*debug & DEBUG_SOCKET)
+ printk(KERN_DEBUG "%s(%p) sk=%p\n", __func__, sock, sk);
+ if (addr_len != sizeof(struct sockaddr_mISDN))
+ return -EINVAL;
+ if (!maddr || maddr->family != AF_ISDN)
+ return -EINVAL;
+
+ lock_sock(sk);
+
+ if (_pms(sk)->dev) {
+ err = -EALREADY;
+ goto done;
+ }
+ _pms(sk)->dev = get_mdevice(maddr->dev);
+ if (!_pms(sk)->dev) {
+ err = -ENODEV;
+ goto done;
+ }
+ _pms(sk)->ch.send = mISDN_send;
+ _pms(sk)->ch.ctrl = mISDN_ctrl;
+
+ switch (sk->sk_protocol) {
+ case ISDN_P_TE_S0:
+ case ISDN_P_NT_S0:
+ case ISDN_P_TE_E1:
+ case ISDN_P_NT_E1:
+ mISDN_sock_unlink(&data_sockets, sk);
+ err = connect_layer1(_pms(sk)->dev, &_pms(sk)->ch,
+ sk->sk_protocol, maddr);
+ if (err)
+ mISDN_sock_link(&data_sockets, sk);
+ break;
+ case ISDN_P_LAPD_TE:
+ case ISDN_P_LAPD_NT:
+ err = create_l2entity(_pms(sk)->dev, &_pms(sk)->ch,
+ sk->sk_protocol, maddr);
+ break;
+ case ISDN_P_B_RAW:
+ case ISDN_P_B_HDLC:
+ case ISDN_P_B_X75SLP:
+ case ISDN_P_B_L2DTMF:
+ case ISDN_P_B_L2DSP:
+ case ISDN_P_B_L2DSPHDLC:
+ err = connect_Bstack(_pms(sk)->dev, &_pms(sk)->ch,
+ sk->sk_protocol, maddr);
+ break;
+ default:
+ err = -EPROTONOSUPPORT;
+ }
+ if (err)
+ goto done;
+ sk->sk_state = MISDN_BOUND;
+ _pms(sk)->ch.protocol = sk->sk_protocol;
+
+done:
+ release_sock(sk);
+ return err;
+}
+
+static int
+data_sock_getname(struct socket *sock, struct sockaddr *addr,
+ int *addr_len, int peer)
+{
+ struct sockaddr_mISDN *maddr = (struct sockaddr_mISDN *) addr;
+ struct sock *sk = sock->sk;
+
+ if (!_pms(sk)->dev)
+ return -EBADFD;
+
+ lock_sock(sk);
+
+ *addr_len = sizeof(*maddr);
+ maddr->dev = _pms(sk)->dev->id;
+ maddr->channel = _pms(sk)->ch.nr;
+ maddr->sapi = _pms(sk)->ch.addr & 0xff;
+ maddr->tei = (_pms(sk)->ch.addr >> 8) & 0xff;
+ release_sock(sk);
+ return 0;
+}
+
+static const struct proto_ops data_sock_ops = {
+ .family = PF_ISDN,
+ .owner = THIS_MODULE,
+ .release = data_sock_release,
+ .ioctl = data_sock_ioctl,
+ .bind = data_sock_bind,
+ .getname = data_sock_getname,
+ .sendmsg = mISDN_sock_sendmsg,
+ .recvmsg = mISDN_sock_recvmsg,
+ .poll = datagram_poll,
+ .listen = sock_no_listen,
+ .shutdown = sock_no_shutdown,
+ .setsockopt = data_sock_setsockopt,
+ .getsockopt = data_sock_getsockopt,
+ .connect = sock_no_connect,
+ .socketpair = sock_no_socketpair,
+ .accept = sock_no_accept,
+ .mmap = sock_no_mmap
+};
+
+static int
+data_sock_create(struct net *net, struct socket *sock, int protocol)
+{
+ struct sock *sk;
+
+ if (sock->type != SOCK_DGRAM)
+ return -ESOCKTNOSUPPORT;
+
+ sk = sk_alloc(net, PF_ISDN, GFP_KERNEL, &mISDN_proto);
+ if (!sk)
+ return -ENOMEM;
+
+ sock_init_data(sock, sk);
+
+ sock->ops = &data_sock_ops;
+ sock->state = SS_UNCONNECTED;
+ sock_reset_flag(sk, SOCK_ZAPPED);
+
+ sk->sk_protocol = protocol;
+ sk->sk_state = MISDN_OPEN;
+ mISDN_sock_link(&data_sockets, sk);
+
+ return 0;
+}
+
+static int
+base_sock_release(struct socket *sock)
+{
+ struct sock *sk = sock->sk;
+
+ printk(KERN_DEBUG "%s(%p) sk=%p\n", __func__, sock, sk);
+ if (!sk)
+ return 0;
+
+ mISDN_sock_unlink(&base_sockets, sk);
+ sock_orphan(sk);
+ sock_put(sk);
+
+ return 0;
+}
+
+static int
+base_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+{
+ int err = 0, id;
+ struct mISDNdevice *dev;
+ struct mISDNversion ver;
+
+ switch (cmd) {
+ case IMGETVERSION:
+ ver.major = MISDN_MAJOR_VERSION;
+ ver.minor = MISDN_MINOR_VERSION;
+ ver.release = MISDN_RELEASE;
+ if (copy_to_user((void __user *)arg, &ver, sizeof(ver)))
+ err = -EFAULT;
+ break;
+ case IMGETCOUNT:
+ id = get_mdevice_count();
+ if (put_user(id, (int __user *)arg))
+ err = -EFAULT;
+ break;
+ case IMGETDEVINFO:
+ if (get_user(id, (int __user *)arg)) {
+ err = -EFAULT;
+ break;
+ }
+ dev = get_mdevice(id);
+ if (dev) {
+ struct mISDN_devinfo di;
+
+ di.id = dev->id;
+ di.Dprotocols = dev->Dprotocols;
+ di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
+ di.protocol = dev->D.protocol;
+ memcpy(di.channelmap, dev->channelmap,
+ MISDN_CHMAP_SIZE * 4);
+ di.nrbchan = dev->nrbchan;
+ strcpy(di.name, dev->name);
+ if (copy_to_user((void __user *)arg, &di, sizeof(di)))
+ err = -EFAULT;
+ } else
+ err = -ENODEV;
+ break;
+ default:
+ err = -EINVAL;
+ }
+ return err;
+}
+
+static int
+base_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
+{
+ struct sockaddr_mISDN *maddr = (struct sockaddr_mISDN *) addr;
+ struct sock *sk = sock->sk;
+ int err = 0;
+
+ if (!maddr || maddr->family != AF_ISDN)
+ return -EINVAL;
+
+ lock_sock(sk);
+
+ if (_pms(sk)->dev) {
+ err = -EALREADY;
+ goto done;
+ }
+
+ _pms(sk)->dev = get_mdevice(maddr->dev);
+ if (!_pms(sk)->dev) {
+ err = -ENODEV;
+ goto done;
+ }
+ sk->sk_state = MISDN_BOUND;
+
+done:
+ release_sock(sk);
+ return err;
+}
+
+static const struct proto_ops base_sock_ops = {
+ .family = PF_ISDN,
+ .owner = THIS_MODULE,
+ .release = base_sock_release,
+ .ioctl = base_sock_ioctl,
+ .bind = base_sock_bind,
+ .getname = sock_no_getname,
+ .sendmsg = sock_no_sendmsg,
+ .recvmsg = sock_no_recvmsg,
+ .poll = sock_no_poll,
+ .listen = sock_no_listen,
+ .shutdown = sock_no_shutdown,
+ .setsockopt = sock_no_setsockopt,
+ .getsockopt = sock_no_getsockopt,
+ .connect = sock_no_connect,
+ .socketpair = sock_no_socketpair,
+ .accept = sock_no_accept,
+ .mmap = sock_no_mmap
+};
+
+
+static int
+base_sock_create(struct net *net, struct socket *sock, int protocol)
+{
+ struct sock *sk;
+
+ if (sock->type != SOCK_RAW)
+ return -ESOCKTNOSUPPORT;
+
+ sk = sk_alloc(net, PF_ISDN, GFP_KERNEL, &mISDN_proto);
+ if (!sk)
+ return -ENOMEM;
+
+ sock_init_data(sock, sk);
+ sock->ops = &base_sock_ops;
+ sock->state = SS_UNCONNECTED;
+ sock_reset_flag(sk, SOCK_ZAPPED);
+ sk->sk_protocol = protocol;
+ sk->sk_state = MISDN_OPEN;
+ mISDN_sock_link(&base_sockets, sk);
+
+ return 0;
+}
+
+static int
+mISDN_sock_create(struct net *net, struct socket *sock, int proto)
+{
+ int err = -EPROTONOSUPPORT;
+
+ switch (proto) {
+ case ISDN_P_BASE:
+ err = base_sock_create(net, sock, proto);
+ break;
+ case ISDN_P_TE_S0:
+ case ISDN_P_NT_S0:
+ case ISDN_P_TE_E1:
+ case ISDN_P_NT_E1:
+ case ISDN_P_LAPD_TE:
+ case ISDN_P_LAPD_NT:
+ case ISDN_P_B_RAW:
+ case ISDN_P_B_HDLC:
+ case ISDN_P_B_X75SLP:
+ case ISDN_P_B_L2DTMF:
+ case ISDN_P_B_L2DSP:
+ case ISDN_P_B_L2DSPHDLC:
+ err = data_sock_create(net, sock, proto);
+ break;
+ default:
+ return err;
+ }
+
+ return err;
+}
+
+static struct
+net_proto_family mISDN_sock_family_ops = {
+ .owner = THIS_MODULE,
+ .family = PF_ISDN,
+ .create = mISDN_sock_create,
+};
+
+int
+misdn_sock_init(u_int *deb)
+{
+ int err;
+
+ debug = deb;
+ err = sock_register(&mISDN_sock_family_ops);
+ if (err)
+ printk(KERN_ERR "%s: error(%d)\n", __func__, err);
+ return err;
+}
+
+void
+misdn_sock_cleanup(void)
+{
+ sock_unregister(PF_ISDN);
+}
+
diff --git a/drivers/isdn/mISDN/stack.c b/drivers/isdn/mISDN/stack.c
new file mode 100644
index 000000000000..54cfddcc4784
--- /dev/null
+++ b/drivers/isdn/mISDN/stack.c
@@ -0,0 +1,674 @@
+/*
+ *
+ * Author Karsten Keil <kkeil@novell.com>
+ *
+ * Copyright 2008 by Karsten Keil <kkeil@novell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/mISDNif.h>
+#include <linux/kthread.h>
+#include "core.h"
+
+static u_int *debug;
+
+static inline void
+_queue_message(struct mISDNstack *st, struct sk_buff *skb)
+{
+ struct mISDNhead *hh = mISDN_HEAD_P(skb);
+
+ if (*debug & DEBUG_QUEUE_FUNC)
+ printk(KERN_DEBUG "%s prim(%x) id(%x) %p\n",
+ __func__, hh->prim, hh->id, skb);
+ skb_queue_tail(&st->msgq, skb);
+ if (likely(!test_bit(mISDN_STACK_STOPPED, &st->status))) {
+ test_and_set_bit(mISDN_STACK_WORK, &st->status);
+ wake_up_interruptible(&st->workq);
+ }
+}
+
+int
+mISDN_queue_message(struct mISDNchannel *ch, struct sk_buff *skb)
+{
+ _queue_message(ch->st, skb);
+ return 0;
+}
+
+static struct mISDNchannel *
+get_channel4id(struct mISDNstack *st, u_int id)
+{
+ struct mISDNchannel *ch;
+
+ mutex_lock(&st->lmutex);
+ list_for_each_entry(ch, &st->layer2, list) {
+ if (id == ch->nr)
+ goto unlock;
+ }
+ ch = NULL;
+unlock:
+ mutex_unlock(&st->lmutex);
+ return ch;
+}
+
+static void
+send_socklist(struct mISDN_sock_list *sl, struct sk_buff *skb)
+{
+ struct hlist_node *node;
+ struct sock *sk;
+ struct sk_buff *cskb = NULL;
+
+ read_lock(&sl->lock);
+ sk_for_each(sk, node, &sl->head) {
+ if (sk->sk_state != MISDN_BOUND)
+ continue;
+ if (!cskb)
+ cskb = skb_copy(skb, GFP_KERNEL);
+ if (!cskb) {
+ printk(KERN_WARNING "%s no skb\n", __func__);
+ break;
+ }
+ if (!sock_queue_rcv_skb(sk, cskb))
+ cskb = NULL;
+ }
+ read_unlock(&sl->lock);
+ if (cskb)
+ dev_kfree_skb(cskb);
+}
+
+static void
+send_layer2(struct mISDNstack *st, struct sk_buff *skb)
+{
+ struct sk_buff *cskb;
+ struct mISDNhead *hh = mISDN_HEAD_P(skb);
+ struct mISDNchannel *ch;
+ int ret;
+
+ if (!st)
+ return;
+ mutex_lock(&st->lmutex);
+ if ((hh->id & MISDN_ID_ADDR_MASK) == MISDN_ID_ANY) { /* L2 for all */
+ list_for_each_entry(ch, &st->layer2, list) {
+ if (list_is_last(&ch->list, &st->layer2)) {
+ cskb = skb;
+ skb = NULL;
+ } else {
+ cskb = skb_copy(skb, GFP_KERNEL);
+ }
+ if (cskb) {
+ ret = ch->send(ch, cskb);
+ if (ret) {
+ if (*debug & DEBUG_SEND_ERR)
+ printk(KERN_DEBUG
+ "%s ch%d prim(%x) addr(%x)"
+ " err %d\n",
+ __func__, ch->nr,
+ hh->prim, ch->addr, ret);
+ dev_kfree_skb(cskb);
+ }
+ } else {
+ printk(KERN_WARNING "%s ch%d addr %x no mem\n",
+ __func__, ch->nr, ch->addr);
+ goto out;
+ }
+ }
+ } else {
+ list_for_each_entry(ch, &st->layer2, list) {
+ if ((hh->id & MISDN_ID_ADDR_MASK) == ch->addr) {
+ ret = ch->send(ch, skb);
+ if (!ret)
+ skb = NULL;
+ goto out;
+ }
+ }
+ ret = st->dev->teimgr->ctrl(st->dev->teimgr, CHECK_DATA, skb);
+ if (!ret)
+ skb = NULL;
+ else if (*debug & DEBUG_SEND_ERR)
+ printk(KERN_DEBUG
+ "%s ch%d mgr prim(%x) addr(%x) err %d\n",
+ __func__, ch->nr, hh->prim, ch->addr, ret);
+ }
+out:
+ mutex_unlock(&st->lmutex);
+ if (skb)
+ dev_kfree_skb(skb);
+}
+
+static inline int
+send_msg_to_layer(struct mISDNstack *st, struct sk_buff *skb)
+{
+ struct mISDNhead *hh = mISDN_HEAD_P(skb);
+ struct mISDNchannel *ch;
+ int lm;
+
+ lm = hh->prim & MISDN_LAYERMASK;
+ if (*debug & DEBUG_QUEUE_FUNC)
+ printk(KERN_DEBUG "%s prim(%x) id(%x) %p\n",
+ __func__, hh->prim, hh->id, skb);
+ if (lm == 0x1) {
+ if (!hlist_empty(&st->l1sock.head)) {
+ __net_timestamp(skb);
+ send_socklist(&st->l1sock, skb);
+ }
+ return st->layer1->send(st->layer1, skb);
+ } else if (lm == 0x2) {
+ if (!hlist_empty(&st->l1sock.head))
+ send_socklist(&st->l1sock, skb);
+ send_layer2(st, skb);
+ return 0;
+ } else if (lm == 0x4) {
+ ch = get_channel4id(st, hh->id);
+ if (ch)
+ return ch->send(ch, skb);
+ else
+ printk(KERN_WARNING
+ "%s: dev(%s) prim(%x) id(%x) no channel\n",
+ __func__, st->dev->name, hh->prim, hh->id);
+ } else if (lm == 0x8) {
+ WARN_ON(lm == 0x8);
+ ch = get_channel4id(st, hh->id);
+ if (ch)
+ return ch->send(ch, skb);
+ else
+ printk(KERN_WARNING
+ "%s: dev(%s) prim(%x) id(%x) no channel\n",
+ __func__, st->dev->name, hh->prim, hh->id);
+ } else {
+ /* broadcast not handled yet */
+ printk(KERN_WARNING "%s: dev(%s) prim %x not delivered\n",
+ __func__, st->dev->name, hh->prim);
+ }
+ return -ESRCH;
+}
+
+static void
+do_clear_stack(struct mISDNstack *st)
+{
+}
+
+static int
+mISDNStackd(void *data)
+{
+ struct mISDNstack *st = data;
+ int err = 0;
+
+#ifdef CONFIG_SMP
+ lock_kernel();
+#endif
+ sigfillset(&current->blocked);
+#ifdef CONFIG_SMP
+ unlock_kernel();
+#endif
+ if (*debug & DEBUG_MSG_THREAD)
+ printk(KERN_DEBUG "mISDNStackd %s started\n", st->dev->name);
+
+ if (st->notify != NULL) {
+ complete(st->notify);
+ st->notify = NULL;
+ }
+
+ for (;;) {
+ struct sk_buff *skb;
+
+ if (unlikely(test_bit(mISDN_STACK_STOPPED, &st->status))) {
+ test_and_clear_bit(mISDN_STACK_WORK, &st->status);
+ test_and_clear_bit(mISDN_STACK_RUNNING, &st->status);
+ } else
+ test_and_set_bit(mISDN_STACK_RUNNING, &st->status);
+ while (test_bit(mISDN_STACK_WORK, &st->status)) {
+ skb = skb_dequeue(&st->msgq);
+ if (!skb) {
+ test_and_clear_bit(mISDN_STACK_WORK,
+ &st->status);
+ /* test if a race happens */
+ skb = skb_dequeue(&st->msgq);
+ if (!skb)
+ continue;
+ test_and_set_bit(mISDN_STACK_WORK,
+ &st->status);
+ }
+#ifdef MISDN_MSG_STATS
+ st->msg_cnt++;
+#endif
+ err = send_msg_to_layer(st, skb);
+ if (unlikely(err)) {
+ if (*debug & DEBUG_SEND_ERR)
+ printk(KERN_DEBUG
+ "%s: %s prim(%x) id(%x) "
+ "send call(%d)\n",
+ __func__, st->dev->name,
+ mISDN_HEAD_PRIM(skb),
+ mISDN_HEAD_ID(skb), err);
+ dev_kfree_skb(skb);
+ continue;
+ }
+ if (unlikely(test_bit(mISDN_STACK_STOPPED,
+ &st->status))) {
+ test_and_clear_bit(mISDN_STACK_WORK,
+ &st->status);
+ test_and_clear_bit(mISDN_STACK_RUNNING,
+ &st->status);
+ break;
+ }
+ }
+ if (test_bit(mISDN_STACK_CLEARING, &st->status)) {
+ test_and_set_bit(mISDN_STACK_STOPPED, &st->status);
+ test_and_clear_bit(mISDN_STACK_RUNNING, &st->status);
+ do_clear_stack(st);
+ test_and_clear_bit(mISDN_STACK_CLEARING, &st->status);
+ test_and_set_bit(mISDN_STACK_RESTART, &st->status);
+ }
+ if (test_and_clear_bit(mISDN_STACK_RESTART, &st->status)) {
+ test_and_clear_bit(mISDN_STACK_STOPPED, &st->status);
+ test_and_set_bit(mISDN_STACK_RUNNING, &st->status);
+ if (!skb_queue_empty(&st->msgq))
+ test_and_set_bit(mISDN_STACK_WORK,
+ &st->status);
+ }
+ if (test_bit(mISDN_STACK_ABORT, &st->status))
+ break;
+ if (st->notify != NULL) {
+ complete(st->notify);
+ st->notify = NULL;
+ }
+#ifdef MISDN_MSG_STATS
+ st->sleep_cnt++;
+#endif
+ test_and_clear_bit(mISDN_STACK_ACTIVE, &st->status);
+ wait_event_interruptible(st->workq, (st->status &
+ mISDN_STACK_ACTION_MASK));
+ if (*debug & DEBUG_MSG_THREAD)
+ printk(KERN_DEBUG "%s: %s wake status %08lx\n",
+ __func__, st->dev->name, st->status);
+ test_and_set_bit(mISDN_STACK_ACTIVE, &st->status);
+
+ test_and_clear_bit(mISDN_STACK_WAKEUP, &st->status);
+
+ if (test_bit(mISDN_STACK_STOPPED, &st->status)) {
+ test_and_clear_bit(mISDN_STACK_RUNNING, &st->status);
+#ifdef MISDN_MSG_STATS
+ st->stopped_cnt++;
+#endif
+ }
+ }
+#ifdef MISDN_MSG_STATS
+ printk(KERN_DEBUG "mISDNStackd daemon for %s proceed %d "
+ "msg %d sleep %d stopped\n",
+ st->dev->name, st->msg_cnt, st->sleep_cnt, st->stopped_cnt);
+ printk(KERN_DEBUG
+ "mISDNStackd daemon for %s utime(%ld) stime(%ld)\n",
+ st->dev->name, st->thread->utime, st->thread->stime);
+ printk(KERN_DEBUG
+ "mISDNStackd daemon for %s nvcsw(%ld) nivcsw(%ld)\n",
+ st->dev->name, st->thread->nvcsw, st->thread->nivcsw);
+ printk(KERN_DEBUG "mISDNStackd daemon for %s killed now\n",
+ st->dev->name);
+#endif
+ test_and_set_bit(mISDN_STACK_KILLED, &st->status);
+ test_and_clear_bit(mISDN_STACK_RUNNING, &st->status);
+ test_and_clear_bit(mISDN_STACK_ACTIVE, &st->status);
+ test_and_clear_bit(mISDN_STACK_ABORT, &st->status);
+ skb_queue_purge(&st->msgq);
+ st->thread = NULL;
+ if (st->notify != NULL) {
+ complete(st->notify);
+ st->notify = NULL;
+ }
+ return 0;
+}
+
+static int
+l1_receive(struct mISDNchannel *ch, struct sk_buff *skb)
+{
+ if (!ch->st)
+ return -ENODEV;
+ __net_timestamp(skb);
+ _queue_message(ch->st, skb);
+ return 0;
+}
+
+void
+set_channel_address(struct mISDNchannel *ch, u_int sapi, u_int tei)
+{
+ ch->addr = sapi | (tei << 8);
+}
+
+void
+__add_layer2(struct mISDNchannel *ch, struct mISDNstack *st)
+{
+ list_add_tail(&ch->list, &st->layer2);
+}
+
+void
+add_layer2(struct mISDNchannel *ch, struct mISDNstack *st)
+{
+ mutex_lock(&st->lmutex);
+ __add_layer2(ch, st);
+ mutex_unlock(&st->lmutex);
+}
+
+static int
+st_own_ctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
+{
+ if (!ch->st || ch->st->layer1)
+ return -EINVAL;
+ return ch->st->layer1->ctrl(ch->st->layer1, cmd, arg);
+}
+
+int
+create_stack(struct mISDNdevice *dev)
+{
+ struct mISDNstack *newst;
+ int err;
+ DECLARE_COMPLETION_ONSTACK(done);
+
+ newst = kzalloc(sizeof(struct mISDNstack), GFP_KERNEL);
+ if (!newst) {
+ printk(KERN_ERR "kmalloc mISDN_stack failed\n");
+ return -ENOMEM;
+ }
+ newst->dev = dev;
+ INIT_LIST_HEAD(&newst->layer2);
+ INIT_HLIST_HEAD(&newst->l1sock.head);
+ rwlock_init(&newst->l1sock.lock);
+ init_waitqueue_head(&newst->workq);
+ skb_queue_head_init(&newst->msgq);
+ mutex_init(&newst->lmutex);
+ dev->D.st = newst;
+ err = create_teimanager(dev);
+ if (err) {
+ printk(KERN_ERR "kmalloc teimanager failed\n");
+ kfree(newst);
+ return err;
+ }
+ dev->teimgr->peer = &newst->own;
+ dev->teimgr->recv = mISDN_queue_message;
+ dev->teimgr->st = newst;
+ newst->layer1 = &dev->D;
+ dev->D.recv = l1_receive;
+ dev->D.peer = &newst->own;
+ newst->own.st = newst;
+ newst->own.ctrl = st_own_ctrl;
+ newst->own.send = mISDN_queue_message;
+ newst->own.recv = mISDN_queue_message;
+ if (*debug & DEBUG_CORE_FUNC)
+ printk(KERN_DEBUG "%s: st(%s)\n", __func__, newst->dev->name);
+ newst->notify = &done;
+ newst->thread = kthread_run(mISDNStackd, (void *)newst, "mISDN_%s",
+ newst->dev->name);
+ if (IS_ERR(newst->thread)) {
+ err = PTR_ERR(newst->thread);
+ printk(KERN_ERR
+ "mISDN:cannot create kernel thread for %s (%d)\n",
+ newst->dev->name, err);
+ delete_teimanager(dev->teimgr);
+ kfree(newst);
+ } else
+ wait_for_completion(&done);
+ return err;
+}
+
+int
+connect_layer1(struct mISDNdevice *dev, struct mISDNchannel *ch,
+ u_int protocol, struct sockaddr_mISDN *adr)
+{
+ struct mISDN_sock *msk = container_of(ch, struct mISDN_sock, ch);
+ struct channel_req rq;
+ int err;
+
+
+ if (*debug & DEBUG_CORE_FUNC)
+ printk(KERN_DEBUG "%s: %s proto(%x) adr(%d %d %d %d)\n",
+ __func__, dev->name, protocol, adr->dev, adr->channel,
+ adr->sapi, adr->tei);
+ switch (protocol) {
+ case ISDN_P_NT_S0:
+ case ISDN_P_NT_E1:
+ case ISDN_P_TE_S0:
+ case ISDN_P_TE_E1:
+#ifdef PROTOCOL_CHECK
+ /* this should be enhanced */
+ if (!list_empty(&dev->D.st->layer2)
+ && dev->D.protocol != protocol)
+ return -EBUSY;
+ if (!hlist_empty(&dev->D.st->l1sock.head)
+ && dev->D.protocol != protocol)
+ return -EBUSY;
+#endif
+ ch->recv = mISDN_queue_message;
+ ch->peer = &dev->D.st->own;
+ ch->st = dev->D.st;
+ rq.protocol = protocol;
+ rq.adr.channel = 0;
+ err = dev->D.ctrl(&dev->D, OPEN_CHANNEL, &rq);
+ printk(KERN_DEBUG "%s: ret 1 %d\n", __func__, err);
+ if (err)
+ return err;
+ write_lock_bh(&dev->D.st->l1sock.lock);
+ sk_add_node(&msk->sk, &dev->D.st->l1sock.head);
+ write_unlock_bh(&dev->D.st->l1sock.lock);
+ break;
+ default:
+ return -ENOPROTOOPT;
+ }
+ return 0;
+}
+
+int
+connect_Bstack(struct mISDNdevice *dev, struct mISDNchannel *ch,
+ u_int protocol, struct sockaddr_mISDN *adr)
+{
+ struct channel_req rq, rq2;
+ int pmask, err;
+ struct Bprotocol *bp;
+
+ if (*debug & DEBUG_CORE_FUNC)
+ printk(KERN_DEBUG "%s: %s proto(%x) adr(%d %d %d %d)\n",
+ __func__, dev->name, protocol,
+ adr->dev, adr->channel, adr->sapi,
+ adr->tei);
+ ch->st = dev->D.st;
+ pmask = 1 << (protocol & ISDN_P_B_MASK);
+ if (pmask & dev->Bprotocols) {
+ rq.protocol = protocol;
+ rq.adr = *adr;
+ err = dev->D.ctrl(&dev->D, OPEN_CHANNEL, &rq);
+ if (err)
+ return err;
+ ch->recv = rq.ch->send;
+ ch->peer = rq.ch;
+ rq.ch->recv = ch->send;
+ rq.ch->peer = ch;
+ rq.ch->st = dev->D.st;
+ } else {
+ bp = get_Bprotocol4mask(pmask);
+ if (!bp)
+ return -ENOPROTOOPT;
+ rq2.protocol = protocol;
+ rq2.adr = *adr;
+ rq2.ch = ch;
+ err = bp->create(&rq2);
+ if (err)
+ return err;
+ ch->recv = rq2.ch->send;
+ ch->peer = rq2.ch;
+ rq2.ch->st = dev->D.st;
+ rq.protocol = rq2.protocol;
+ rq.adr = *adr;
+ err = dev->D.ctrl(&dev->D, OPEN_CHANNEL, &rq);
+ if (err) {
+ rq2.ch->ctrl(rq2.ch, CLOSE_CHANNEL, NULL);
+ return err;
+ }
+ rq2.ch->recv = rq.ch->send;
+ rq2.ch->peer = rq.ch;
+ rq.ch->recv = rq2.ch->send;
+ rq.ch->peer = rq2.ch;
+ rq.ch->st = dev->D.st;
+ }
+ ch->protocol = protocol;
+ ch->nr = rq.ch->nr;
+ return 0;
+}
+
+int
+create_l2entity(struct mISDNdevice *dev, struct mISDNchannel *ch,
+ u_int protocol, struct sockaddr_mISDN *adr)
+{
+ struct channel_req rq;
+ int err;
+
+ if (*debug & DEBUG_CORE_FUNC)
+ printk(KERN_DEBUG "%s: %s proto(%x) adr(%d %d %d %d)\n",
+ __func__, dev->name, protocol,
+ adr->dev, adr->channel, adr->sapi,
+ adr->tei);
+ rq.protocol = ISDN_P_TE_S0;
+ if (dev->Dprotocols & (1 << ISDN_P_TE_E1))
+ rq.protocol = ISDN_P_TE_E1;
+ switch (protocol) {
+ case ISDN_P_LAPD_NT:
+ rq.protocol = ISDN_P_NT_S0;
+ if (dev->Dprotocols & (1 << ISDN_P_NT_E1))
+ rq.protocol = ISDN_P_NT_E1;
+ case ISDN_P_LAPD_TE:
+#ifdef PROTOCOL_CHECK
+ /* this should be enhanced */
+ if (!list_empty(&dev->D.st->layer2)
+ && dev->D.protocol != protocol)
+ return -EBUSY;
+ if (!hlist_empty(&dev->D.st->l1sock.head)
+ && dev->D.protocol != protocol)
+ return -EBUSY;
+#endif
+ ch->recv = mISDN_queue_message;
+ ch->peer = &dev->D.st->own;
+ ch->st = dev->D.st;
+ rq.adr.channel = 0;
+ err = dev->D.ctrl(&dev->D, OPEN_CHANNEL, &rq);
+ printk(KERN_DEBUG "%s: ret 1 %d\n", __func__, err);
+ if (err)
+ break;
+ rq.protocol = protocol;
+ rq.adr = *adr;
+ rq.ch = ch;
+ err = dev->teimgr->ctrl(dev->teimgr, OPEN_CHANNEL, &rq);
+ printk(KERN_DEBUG "%s: ret 2 %d\n", __func__, err);
+ if (!err) {
+ if ((protocol == ISDN_P_LAPD_NT) && !rq.ch)
+ break;
+ add_layer2(rq.ch, dev->D.st);
+ rq.ch->recv = mISDN_queue_message;
+ rq.ch->peer = &dev->D.st->own;
+ rq.ch->ctrl(rq.ch, OPEN_CHANNEL, NULL); /* can't fail */
+ }
+ break;
+ default:
+ err = -EPROTONOSUPPORT;
+ }
+ return err;
+}
+
+void
+delete_channel(struct mISDNchannel *ch)
+{
+ struct mISDN_sock *msk = container_of(ch, struct mISDN_sock, ch);
+ struct mISDNchannel *pch;
+
+ if (!ch->st) {
+ printk(KERN_WARNING "%s: no stack\n", __func__);
+ return;
+ }
+ if (*debug & DEBUG_CORE_FUNC)
+ printk(KERN_DEBUG "%s: st(%s) protocol(%x)\n", __func__,
+ ch->st->dev->name, ch->protocol);
+ if (ch->protocol >= ISDN_P_B_START) {
+ if (ch->peer) {
+ ch->peer->ctrl(ch->peer, CLOSE_CHANNEL, NULL);
+ ch->peer = NULL;
+ }
+ return;
+ }
+ switch (ch->protocol) {
+ case ISDN_P_NT_S0:
+ case ISDN_P_TE_S0:
+ case ISDN_P_NT_E1:
+ case ISDN_P_TE_E1:
+ write_lock_bh(&ch->st->l1sock.lock);
+ sk_del_node_init(&msk->sk);
+ write_unlock_bh(&ch->st->l1sock.lock);
+ ch->st->dev->D.ctrl(&ch->st->dev->D, CLOSE_CHANNEL, NULL);
+ break;
+ case ISDN_P_LAPD_TE:
+ pch = get_channel4id(ch->st, ch->nr);
+ if (pch) {
+ mutex_lock(&ch->st->lmutex);
+ list_del(&pch->list);
+ mutex_unlock(&ch->st->lmutex);
+ pch->ctrl(pch, CLOSE_CHANNEL, NULL);
+ pch = ch->st->dev->teimgr;
+ pch->ctrl(pch, CLOSE_CHANNEL, NULL);
+ } else
+ printk(KERN_WARNING "%s: no l2 channel\n",
+ __func__);
+ break;
+ case ISDN_P_LAPD_NT:
+ pch = ch->st->dev->teimgr;
+ if (pch) {
+ pch->ctrl(pch, CLOSE_CHANNEL, NULL);
+ } else
+ printk(KERN_WARNING "%s: no l2 channel\n",
+ __func__);
+ break;
+ default:
+ break;
+ }
+ return;
+}
+
+void
+delete_stack(struct mISDNdevice *dev)
+{
+ struct mISDNstack *st = dev->D.st;
+ DECLARE_COMPLETION_ONSTACK(done);
+
+ if (*debug & DEBUG_CORE_FUNC)
+ printk(KERN_DEBUG "%s: st(%s)\n", __func__,
+ st->dev->name);
+ if (dev->teimgr)
+ delete_teimanager(dev->teimgr);
+ if (st->thread) {
+ if (st->notify) {
+ printk(KERN_WARNING "%s: notifier in use\n",
+ __func__);
+ complete(st->notify);
+ }
+ st->notify = &done;
+ test_and_set_bit(mISDN_STACK_ABORT, &st->status);
+ test_and_set_bit(mISDN_STACK_WAKEUP, &st->status);
+ wake_up_interruptible(&st->workq);
+ wait_for_completion(&done);
+ }
+ if (!list_empty(&st->layer2))
+ printk(KERN_WARNING "%s: layer2 list not empty\n",
+ __func__);
+ if (!hlist_empty(&st->l1sock.head))
+ printk(KERN_WARNING "%s: layer1 list not empty\n",
+ __func__);
+ kfree(st);
+}
+
+void
+mISDN_initstack(u_int *dp)
+{
+ debug = dp;
+}
diff --git a/drivers/isdn/mISDN/tei.c b/drivers/isdn/mISDN/tei.c
new file mode 100644
index 000000000000..6fbae42127bf
--- /dev/null
+++ b/drivers/isdn/mISDN/tei.c
@@ -0,0 +1,1340 @@
+/*
+ *
+ * Author Karsten Keil <kkeil@novell.com>
+ *
+ * Copyright 2008 by Karsten Keil <kkeil@novell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include "layer2.h"
+#include <linux/random.h>
+#include "core.h"
+
+#define ID_REQUEST 1
+#define ID_ASSIGNED 2
+#define ID_DENIED 3
+#define ID_CHK_REQ 4
+#define ID_CHK_RES 5
+#define ID_REMOVE 6
+#define ID_VERIFY 7
+
+#define TEI_ENTITY_ID 0xf
+
+#define MGR_PH_ACTIVE 16
+#define MGR_PH_NOTREADY 17
+
+#define DATIMER_VAL 10000
+
+static u_int *debug;
+
+static struct Fsm deactfsm = {NULL, 0, 0, NULL, NULL};
+static struct Fsm teifsmu = {NULL, 0, 0, NULL, NULL};
+static struct Fsm teifsmn = {NULL, 0, 0, NULL, NULL};
+
+enum {
+ ST_L1_DEACT,
+ ST_L1_DEACT_PENDING,
+ ST_L1_ACTIV,
+};
+#define DEACT_STATE_COUNT (ST_L1_ACTIV+1)
+
+static char *strDeactState[] =
+{
+ "ST_L1_DEACT",
+ "ST_L1_DEACT_PENDING",
+ "ST_L1_ACTIV",
+};
+
+enum {
+ EV_ACTIVATE,
+ EV_ACTIVATE_IND,
+ EV_DEACTIVATE,
+ EV_DEACTIVATE_IND,
+ EV_UI,
+ EV_DATIMER,
+};
+
+#define DEACT_EVENT_COUNT (EV_DATIMER+1)
+
+static char *strDeactEvent[] =
+{
+ "EV_ACTIVATE",
+ "EV_ACTIVATE_IND",
+ "EV_DEACTIVATE",
+ "EV_DEACTIVATE_IND",
+ "EV_UI",
+ "EV_DATIMER",
+};
+
+static void
+da_debug(struct FsmInst *fi, char *fmt, ...)
+{
+ struct manager *mgr = fi->userdata;
+ va_list va;
+
+ if (!(*debug & DEBUG_L2_TEIFSM))
+ return;
+ va_start(va, fmt);
+ printk(KERN_DEBUG "mgr(%d): ", mgr->ch.st->dev->id);
+ vprintk(fmt, va);
+ printk("\n");
+ va_end(va);
+}
+
+static void
+da_activate(struct FsmInst *fi, int event, void *arg)
+{
+ struct manager *mgr = fi->userdata;
+
+ if (fi->state == ST_L1_DEACT_PENDING)
+ mISDN_FsmDelTimer(&mgr->datimer, 1);
+ mISDN_FsmChangeState(fi, ST_L1_ACTIV);
+}
+
+static void
+da_deactivate_ind(struct FsmInst *fi, int event, void *arg)
+{
+ mISDN_FsmChangeState(fi, ST_L1_DEACT);
+}
+
+static void
+da_deactivate(struct FsmInst *fi, int event, void *arg)
+{
+ struct manager *mgr = fi->userdata;
+ struct layer2 *l2;
+ u_long flags;
+
+ read_lock_irqsave(&mgr->lock, flags);
+ list_for_each_entry(l2, &mgr->layer2, list) {
+ if (l2->l2m.state > ST_L2_4) {
+ /* have still activ TEI */
+ read_unlock_irqrestore(&mgr->lock, flags);
+ return;
+ }
+ }
+ read_unlock_irqrestore(&mgr->lock, flags);
+ /* All TEI are inactiv */
+ mISDN_FsmAddTimer(&mgr->datimer, DATIMER_VAL, EV_DATIMER, NULL, 1);
+ mISDN_FsmChangeState(fi, ST_L1_DEACT_PENDING);
+}
+
+static void
+da_ui(struct FsmInst *fi, int event, void *arg)
+{
+ struct manager *mgr = fi->userdata;
+
+ /* restart da timer */
+ mISDN_FsmDelTimer(&mgr->datimer, 2);
+ mISDN_FsmAddTimer(&mgr->datimer, DATIMER_VAL, EV_DATIMER, NULL, 2);
+
+}
+
+static void
+da_timer(struct FsmInst *fi, int event, void *arg)
+{
+ struct manager *mgr = fi->userdata;
+ struct layer2 *l2;
+ u_long flags;
+
+ /* check again */
+ read_lock_irqsave(&mgr->lock, flags);
+ list_for_each_entry(l2, &mgr->layer2, list) {
+ if (l2->l2m.state > ST_L2_4) {
+ /* have still activ TEI */
+ read_unlock_irqrestore(&mgr->lock, flags);
+ mISDN_FsmChangeState(fi, ST_L1_ACTIV);
+ return;
+ }
+ }
+ read_unlock_irqrestore(&mgr->lock, flags);
+ /* All TEI are inactiv */
+ mISDN_FsmChangeState(fi, ST_L1_DEACT);
+ _queue_data(&mgr->ch, PH_DEACTIVATE_REQ, MISDN_ID_ANY, 0, NULL,
+ GFP_ATOMIC);
+}
+
+static struct FsmNode DeactFnList[] =
+{
+ {ST_L1_DEACT, EV_ACTIVATE_IND, da_activate},
+ {ST_L1_ACTIV, EV_DEACTIVATE_IND, da_deactivate_ind},
+ {ST_L1_ACTIV, EV_DEACTIVATE, da_deactivate},
+ {ST_L1_DEACT_PENDING, EV_ACTIVATE, da_activate},
+ {ST_L1_DEACT_PENDING, EV_UI, da_ui},
+ {ST_L1_DEACT_PENDING, EV_DATIMER, da_timer},
+};
+
+enum {
+ ST_TEI_NOP,
+ ST_TEI_IDREQ,
+ ST_TEI_IDVERIFY,
+};
+
+#define TEI_STATE_COUNT (ST_TEI_IDVERIFY+1)
+
+static char *strTeiState[] =
+{
+ "ST_TEI_NOP",
+ "ST_TEI_IDREQ",
+ "ST_TEI_IDVERIFY",
+};
+
+enum {
+ EV_IDREQ,
+ EV_ASSIGN,
+ EV_ASSIGN_REQ,
+ EV_DENIED,
+ EV_CHKREQ,
+ EV_CHKRESP,
+ EV_REMOVE,
+ EV_VERIFY,
+ EV_TIMER,
+};
+
+#define TEI_EVENT_COUNT (EV_TIMER+1)
+
+static char *strTeiEvent[] =
+{
+ "EV_IDREQ",
+ "EV_ASSIGN",
+ "EV_ASSIGN_REQ",
+ "EV_DENIED",
+ "EV_CHKREQ",
+ "EV_CHKRESP",
+ "EV_REMOVE",
+ "EV_VERIFY",
+ "EV_TIMER",
+};
+
+static void
+tei_debug(struct FsmInst *fi, char *fmt, ...)
+{
+ struct teimgr *tm = fi->userdata;
+ va_list va;
+
+ if (!(*debug & DEBUG_L2_TEIFSM))
+ return;
+ va_start(va, fmt);
+ printk(KERN_DEBUG "tei(%d): ", tm->l2->tei);
+ vprintk(fmt, va);
+ printk("\n");
+ va_end(va);
+}
+
+
+
+static int
+get_free_id(struct manager *mgr)
+{
+ u64 ids = 0;
+ int i;
+ struct layer2 *l2;
+
+ list_for_each_entry(l2, &mgr->layer2, list) {
+ if (l2->ch.nr > 63) {
+ printk(KERN_WARNING
+ "%s: more as 63 layer2 for one device\n",
+ __func__);
+ return -EBUSY;
+ }
+ test_and_set_bit(l2->ch.nr, (u_long *)&ids);
+ }
+ for (i = 1; i < 64; i++)
+ if (!test_bit(i, (u_long *)&ids))
+ return i;
+ printk(KERN_WARNING "%s: more as 63 layer2 for one device\n",
+ __func__);
+ return -EBUSY;
+}
+
+static int
+get_free_tei(struct manager *mgr)
+{
+ u64 ids = 0;
+ int i;
+ struct layer2 *l2;
+
+ list_for_each_entry(l2, &mgr->layer2, list) {
+ if (l2->ch.nr == 0)
+ continue;
+ if ((l2->ch.addr & 0xff) != 0)
+ continue;
+ i = l2->ch.addr >> 8;
+ if (i < 64)
+ continue;
+ i -= 64;
+
+ test_and_set_bit(i, (u_long *)&ids);
+ }
+ for (i = 0; i < 64; i++)
+ if (!test_bit(i, (u_long *)&ids))
+ return i + 64;
+ printk(KERN_WARNING "%s: more as 63 dynamic tei for one device\n",
+ __func__);
+ return -1;
+}
+
+static void
+teiup_create(struct manager *mgr, u_int prim, int len, void *arg)
+{
+ struct sk_buff *skb;
+ struct mISDNhead *hh;
+ int err;
+
+ skb = mI_alloc_skb(len, GFP_ATOMIC);
+ if (!skb)
+ return;
+ hh = mISDN_HEAD_P(skb);
+ hh->prim = prim;
+ hh->id = (mgr->ch.nr << 16) | mgr->ch.addr;
+ if (len)
+ memcpy(skb_put(skb, len), arg, len);
+ err = mgr->up->send(mgr->up, skb);
+ if (err) {
+ printk(KERN_WARNING "%s: err=%d\n", __func__, err);
+ dev_kfree_skb(skb);
+ }
+}
+
+static u_int
+new_id(struct manager *mgr)
+{
+ u_int id;
+
+ id = mgr->nextid++;
+ if (id == 0x7fff)
+ mgr->nextid = 1;
+ id <<= 16;
+ id |= GROUP_TEI << 8;
+ id |= TEI_SAPI;
+ return id;
+}
+
+static void
+do_send(struct manager *mgr)
+{
+ if (!test_bit(MGR_PH_ACTIVE, &mgr->options))
+ return;
+
+ if (!test_and_set_bit(MGR_PH_NOTREADY, &mgr->options)) {
+ struct sk_buff *skb = skb_dequeue(&mgr->sendq);
+
+ if (!skb) {
+ test_and_clear_bit(MGR_PH_NOTREADY, &mgr->options);
+ return;
+ }
+ mgr->lastid = mISDN_HEAD_ID(skb);
+ mISDN_FsmEvent(&mgr->deact, EV_UI, NULL);
+ if (mgr->ch.recv(mgr->ch.peer, skb)) {
+ dev_kfree_skb(skb);
+ test_and_clear_bit(MGR_PH_NOTREADY, &mgr->options);
+ mgr->lastid = MISDN_ID_NONE;
+ }
+ }
+}
+
+static void
+do_ack(struct manager *mgr, u_int id)
+{
+ if (test_bit(MGR_PH_NOTREADY, &mgr->options)) {
+ if (id == mgr->lastid) {
+ if (test_bit(MGR_PH_ACTIVE, &mgr->options)) {
+ struct sk_buff *skb;
+
+ skb = skb_dequeue(&mgr->sendq);
+ if (skb) {
+ mgr->lastid = mISDN_HEAD_ID(skb);
+ if (!mgr->ch.recv(mgr->ch.peer, skb))
+ return;
+ dev_kfree_skb(skb);
+ }
+ }
+ mgr->lastid = MISDN_ID_NONE;
+ test_and_clear_bit(MGR_PH_NOTREADY, &mgr->options);
+ }
+ }
+}
+
+static void
+mgr_send_down(struct manager *mgr, struct sk_buff *skb)
+{
+ skb_queue_tail(&mgr->sendq, skb);
+ if (!test_bit(MGR_PH_ACTIVE, &mgr->options)) {
+ _queue_data(&mgr->ch, PH_ACTIVATE_REQ, MISDN_ID_ANY, 0,
+ NULL, GFP_KERNEL);
+ } else {
+ do_send(mgr);
+ }
+}
+
+static int
+dl_unit_data(struct manager *mgr, struct sk_buff *skb)
+{
+ if (!test_bit(MGR_OPT_NETWORK, &mgr->options)) /* only net send UI */
+ return -EINVAL;
+ if (!test_bit(MGR_PH_ACTIVE, &mgr->options))
+ _queue_data(&mgr->ch, PH_ACTIVATE_REQ, MISDN_ID_ANY, 0,
+ NULL, GFP_KERNEL);
+ skb_push(skb, 3);
+ skb->data[0] = 0x02; /* SAPI 0 C/R = 1 */
+ skb->data[1] = 0xff; /* TEI 127 */
+ skb->data[2] = UI; /* UI frame */
+ mISDN_HEAD_PRIM(skb) = PH_DATA_REQ;
+ mISDN_HEAD_ID(skb) = new_id(mgr);
+ skb_queue_tail(&mgr->sendq, skb);
+ do_send(mgr);
+ return 0;
+}
+
+unsigned int
+random_ri(void)
+{
+ u16 x;
+
+ get_random_bytes(&x, sizeof(x));
+ return x;
+}
+
+static struct layer2 *
+findtei(struct manager *mgr, int tei)
+{
+ struct layer2 *l2;
+ u_long flags;
+
+ read_lock_irqsave(&mgr->lock, flags);
+ list_for_each_entry(l2, &mgr->layer2, list) {
+ if ((l2->sapi == 0) && (l2->tei > 0) &&
+ (l2->tei != GROUP_TEI) && (l2->tei == tei))
+ goto done;
+ }
+ l2 = NULL;
+done:
+ read_unlock_irqrestore(&mgr->lock, flags);
+ return l2;
+}
+
+static void
+put_tei_msg(struct manager *mgr, u_char m_id, unsigned int ri, u_char tei)
+{
+ struct sk_buff *skb;
+ u_char bp[8];
+
+ bp[0] = (TEI_SAPI << 2);
+ if (test_bit(MGR_OPT_NETWORK, &mgr->options))
+ bp[0] |= 2; /* CR:=1 for net command */
+ bp[1] = (GROUP_TEI << 1) | 0x1;
+ bp[2] = UI;
+ bp[3] = TEI_ENTITY_ID;
+ bp[4] = ri >> 8;
+ bp[5] = ri & 0xff;
+ bp[6] = m_id;
+ bp[7] = (tei << 1) | 1;
+ skb = _alloc_mISDN_skb(PH_DATA_REQ, new_id(mgr),
+ 8, bp, GFP_ATOMIC);
+ if (!skb) {
+ printk(KERN_WARNING "%s: no skb for tei msg\n", __func__);
+ return;
+ }
+ mgr_send_down(mgr, skb);
+}
+
+static void
+tei_id_request(struct FsmInst *fi, int event, void *arg)
+{
+ struct teimgr *tm = fi->userdata;
+
+ if (tm->l2->tei != GROUP_TEI) {
+ tm->tei_m.printdebug(&tm->tei_m,
+ "assign request for allready assigned tei %d",
+ tm->l2->tei);
+ return;
+ }
+ tm->ri = random_ri();
+ if (*debug & DEBUG_L2_TEI)
+ tm->tei_m.printdebug(&tm->tei_m,
+ "assign request ri %d", tm->ri);
+ put_tei_msg(tm->mgr, ID_REQUEST, tm->ri, GROUP_TEI);
+ mISDN_FsmChangeState(fi, ST_TEI_IDREQ);
+ mISDN_FsmAddTimer(&tm->timer, tm->tval, EV_TIMER, NULL, 1);
+ tm->nval = 3;
+}
+
+static void
+tei_id_assign(struct FsmInst *fi, int event, void *arg)
+{
+ struct teimgr *tm = fi->userdata;
+ struct layer2 *l2;
+ u_char *dp = arg;
+ int ri, tei;
+
+ ri = ((unsigned int) *dp++ << 8);
+ ri += *dp++;
+ dp++;
+ tei = *dp >> 1;
+ if (*debug & DEBUG_L2_TEI)
+ tm->tei_m.printdebug(fi, "identity assign ri %d tei %d",
+ ri, tei);
+ l2 = findtei(tm->mgr, tei);
+ if (l2) { /* same tei is in use */
+ if (ri != l2->tm->ri) {
+ tm->tei_m.printdebug(fi,
+ "possible duplicate assignment tei %d", tei);
+ tei_l2(l2, MDL_ERROR_RSP, 0);
+ }
+ } else if (ri == tm->ri) {
+ mISDN_FsmDelTimer(&tm->timer, 1);
+ mISDN_FsmChangeState(fi, ST_TEI_NOP);
+ tei_l2(tm->l2, MDL_ASSIGN_REQ, tei);
+ }
+}
+
+static void
+tei_id_test_dup(struct FsmInst *fi, int event, void *arg)
+{
+ struct teimgr *tm = fi->userdata;
+ struct layer2 *l2;
+ u_char *dp = arg;
+ int tei, ri;
+
+ ri = ((unsigned int) *dp++ << 8);
+ ri += *dp++;
+ dp++;
+ tei = *dp >> 1;
+ if (*debug & DEBUG_L2_TEI)
+ tm->tei_m.printdebug(fi, "foreign identity assign ri %d tei %d",
+ ri, tei);
+ l2 = findtei(tm->mgr, tei);
+ if (l2) { /* same tei is in use */
+ if (ri != l2->tm->ri) { /* and it wasn't our request */
+ tm->tei_m.printdebug(fi,
+ "possible duplicate assignment tei %d", tei);
+ mISDN_FsmEvent(&l2->tm->tei_m, EV_VERIFY, NULL);
+ }
+ }
+}
+
+static void
+tei_id_denied(struct FsmInst *fi, int event, void *arg)
+{
+ struct teimgr *tm = fi->userdata;
+ u_char *dp = arg;
+ int ri, tei;
+
+ ri = ((unsigned int) *dp++ << 8);
+ ri += *dp++;
+ dp++;
+ tei = *dp >> 1;
+ if (*debug & DEBUG_L2_TEI)
+ tm->tei_m.printdebug(fi, "identity denied ri %d tei %d",
+ ri, tei);
+}
+
+static void
+tei_id_chk_req(struct FsmInst *fi, int event, void *arg)
+{
+ struct teimgr *tm = fi->userdata;
+ u_char *dp = arg;
+ int tei;
+
+ tei = *(dp+3) >> 1;
+ if (*debug & DEBUG_L2_TEI)
+ tm->tei_m.printdebug(fi, "identity check req tei %d", tei);
+ if ((tm->l2->tei != GROUP_TEI) && ((tei == GROUP_TEI) ||
+ (tei == tm->l2->tei))) {
+ mISDN_FsmDelTimer(&tm->timer, 4);
+ mISDN_FsmChangeState(&tm->tei_m, ST_TEI_NOP);
+ put_tei_msg(tm->mgr, ID_CHK_RES, random_ri(), tm->l2->tei);
+ }
+}
+
+static void
+tei_id_remove(struct FsmInst *fi, int event, void *arg)
+{
+ struct teimgr *tm = fi->userdata;
+ u_char *dp = arg;
+ int tei;
+
+ tei = *(dp+3) >> 1;
+ if (*debug & DEBUG_L2_TEI)
+ tm->tei_m.printdebug(fi, "identity remove tei %d", tei);
+ if ((tm->l2->tei != GROUP_TEI) &&
+ ((tei == GROUP_TEI) || (tei == tm->l2->tei))) {
+ mISDN_FsmDelTimer(&tm->timer, 5);
+ mISDN_FsmChangeState(&tm->tei_m, ST_TEI_NOP);
+ tei_l2(tm->l2, MDL_REMOVE_REQ, 0);
+ }
+}
+
+static void
+tei_id_verify(struct FsmInst *fi, int event, void *arg)
+{
+ struct teimgr *tm = fi->userdata;
+
+ if (*debug & DEBUG_L2_TEI)
+ tm->tei_m.printdebug(fi, "id verify request for tei %d",
+ tm->l2->tei);
+ put_tei_msg(tm->mgr, ID_VERIFY, 0, tm->l2->tei);
+ mISDN_FsmChangeState(&tm->tei_m, ST_TEI_IDVERIFY);
+ mISDN_FsmAddTimer(&tm->timer, tm->tval, EV_TIMER, NULL, 2);
+ tm->nval = 2;
+}
+
+static void
+tei_id_req_tout(struct FsmInst *fi, int event, void *arg)
+{
+ struct teimgr *tm = fi->userdata;
+
+ if (--tm->nval) {
+ tm->ri = random_ri();
+ if (*debug & DEBUG_L2_TEI)
+ tm->tei_m.printdebug(fi, "assign req(%d) ri %d",
+ 4 - tm->nval, tm->ri);
+ put_tei_msg(tm->mgr, ID_REQUEST, tm->ri, GROUP_TEI);
+ mISDN_FsmAddTimer(&tm->timer, tm->tval, EV_TIMER, NULL, 3);
+ } else {
+ tm->tei_m.printdebug(fi, "assign req failed");
+ tei_l2(tm->l2, MDL_ERROR_RSP, 0);
+ mISDN_FsmChangeState(fi, ST_TEI_NOP);
+ }
+}
+
+static void
+tei_id_ver_tout(struct FsmInst *fi, int event, void *arg)
+{
+ struct teimgr *tm = fi->userdata;
+
+ if (--tm->nval) {
+ if (*debug & DEBUG_L2_TEI)
+ tm->tei_m.printdebug(fi,
+ "id verify req(%d) for tei %d",
+ 3 - tm->nval, tm->l2->tei);
+ put_tei_msg(tm->mgr, ID_VERIFY, 0, tm->l2->tei);
+ mISDN_FsmAddTimer(&tm->timer, tm->tval, EV_TIMER, NULL, 4);
+ } else {
+ tm->tei_m.printdebug(fi, "verify req for tei %d failed",
+ tm->l2->tei);
+ tei_l2(tm->l2, MDL_REMOVE_REQ, 0);
+ mISDN_FsmChangeState(fi, ST_TEI_NOP);
+ }
+}
+
+static struct FsmNode TeiFnListUser[] =
+{
+ {ST_TEI_NOP, EV_IDREQ, tei_id_request},
+ {ST_TEI_NOP, EV_ASSIGN, tei_id_test_dup},
+ {ST_TEI_NOP, EV_VERIFY, tei_id_verify},
+ {ST_TEI_NOP, EV_REMOVE, tei_id_remove},
+ {ST_TEI_NOP, EV_CHKREQ, tei_id_chk_req},
+ {ST_TEI_IDREQ, EV_TIMER, tei_id_req_tout},
+ {ST_TEI_IDREQ, EV_ASSIGN, tei_id_assign},
+ {ST_TEI_IDREQ, EV_DENIED, tei_id_denied},
+ {ST_TEI_IDVERIFY, EV_TIMER, tei_id_ver_tout},
+ {ST_TEI_IDVERIFY, EV_REMOVE, tei_id_remove},
+ {ST_TEI_IDVERIFY, EV_CHKREQ, tei_id_chk_req},
+};
+
+static void
+tei_l2remove(struct layer2 *l2)
+{
+ put_tei_msg(l2->tm->mgr, ID_REMOVE, 0, l2->tei);
+ tei_l2(l2, MDL_REMOVE_REQ, 0);
+ list_del(&l2->ch.list);
+ l2->ch.ctrl(&l2->ch, CLOSE_CHANNEL, NULL);
+}
+
+static void
+tei_assign_req(struct FsmInst *fi, int event, void *arg)
+{
+ struct teimgr *tm = fi->userdata;
+ u_char *dp = arg;
+
+ if (tm->l2->tei == GROUP_TEI) {
+ tm->tei_m.printdebug(&tm->tei_m,
+ "net tei assign request without tei");
+ return;
+ }
+ tm->ri = ((unsigned int) *dp++ << 8);
+ tm->ri += *dp++;
+ if (*debug & DEBUG_L2_TEI)
+ tm->tei_m.printdebug(&tm->tei_m,
+ "net assign request ri %d teim %d", tm->ri, *dp);
+ put_tei_msg(tm->mgr, ID_ASSIGNED, tm->ri, tm->l2->tei);
+ mISDN_FsmChangeState(fi, ST_TEI_NOP);
+}
+
+static void
+tei_id_chk_req_net(struct FsmInst *fi, int event, void *arg)
+{
+ struct teimgr *tm = fi->userdata;
+
+ if (*debug & DEBUG_L2_TEI)
+ tm->tei_m.printdebug(fi, "id check request for tei %d",
+ tm->l2->tei);
+ tm->rcnt = 0;
+ put_tei_msg(tm->mgr, ID_CHK_REQ, 0, tm->l2->tei);
+ mISDN_FsmChangeState(&tm->tei_m, ST_TEI_IDVERIFY);
+ mISDN_FsmAddTimer(&tm->timer, tm->tval, EV_TIMER, NULL, 2);
+ tm->nval = 2;
+}
+
+static void
+tei_id_chk_resp(struct FsmInst *fi, int event, void *arg)
+{
+ struct teimgr *tm = fi->userdata;
+ u_char *dp = arg;
+ int tei;
+
+ tei = dp[3] >> 1;
+ if (*debug & DEBUG_L2_TEI)
+ tm->tei_m.printdebug(fi, "identity check resp tei %d", tei);
+ if (tei == tm->l2->tei)
+ tm->rcnt++;
+}
+
+static void
+tei_id_verify_net(struct FsmInst *fi, int event, void *arg)
+{
+ struct teimgr *tm = fi->userdata;
+ u_char *dp = arg;
+ int tei;
+
+ tei = dp[3] >> 1;
+ if (*debug & DEBUG_L2_TEI)
+ tm->tei_m.printdebug(fi, "identity verify req tei %d/%d",
+ tei, tm->l2->tei);
+ if (tei == tm->l2->tei)
+ tei_id_chk_req_net(fi, event, arg);
+}
+
+static void
+tei_id_ver_tout_net(struct FsmInst *fi, int event, void *arg)
+{
+ struct teimgr *tm = fi->userdata;
+
+ if (tm->rcnt == 1) {
+ if (*debug & DEBUG_L2_TEI)
+ tm->tei_m.printdebug(fi,
+ "check req for tei %d sucessful\n", tm->l2->tei);
+ mISDN_FsmChangeState(fi, ST_TEI_NOP);
+ } else if (tm->rcnt > 1) {
+ /* duplicate assignment; remove */
+ tei_l2remove(tm->l2);
+ } else if (--tm->nval) {
+ if (*debug & DEBUG_L2_TEI)
+ tm->tei_m.printdebug(fi,
+ "id check req(%d) for tei %d",
+ 3 - tm->nval, tm->l2->tei);
+ put_tei_msg(tm->mgr, ID_CHK_REQ, 0, tm->l2->tei);
+ mISDN_FsmAddTimer(&tm->timer, tm->tval, EV_TIMER, NULL, 4);
+ } else {
+ tm->tei_m.printdebug(fi, "check req for tei %d failed",
+ tm->l2->tei);
+ mISDN_FsmChangeState(fi, ST_TEI_NOP);
+ tei_l2remove(tm->l2);
+ }
+}
+
+static struct FsmNode TeiFnListNet[] =
+{
+ {ST_TEI_NOP, EV_ASSIGN_REQ, tei_assign_req},
+ {ST_TEI_NOP, EV_VERIFY, tei_id_verify_net},
+ {ST_TEI_NOP, EV_CHKREQ, tei_id_chk_req_net},
+ {ST_TEI_IDVERIFY, EV_TIMER, tei_id_ver_tout_net},
+ {ST_TEI_IDVERIFY, EV_CHKRESP, tei_id_chk_resp},
+};
+
+static void
+tei_ph_data_ind(struct teimgr *tm, u_int mt, u_char *dp, int len)
+{
+ if (test_bit(FLG_FIXED_TEI, &tm->l2->flag))
+ return;
+ if (*debug & DEBUG_L2_TEI)
+ tm->tei_m.printdebug(&tm->tei_m, "tei handler mt %x", mt);
+ if (mt == ID_ASSIGNED)
+ mISDN_FsmEvent(&tm->tei_m, EV_ASSIGN, dp);
+ else if (mt == ID_DENIED)
+ mISDN_FsmEvent(&tm->tei_m, EV_DENIED, dp);
+ else if (mt == ID_CHK_REQ)
+ mISDN_FsmEvent(&tm->tei_m, EV_CHKREQ, dp);
+ else if (mt == ID_REMOVE)
+ mISDN_FsmEvent(&tm->tei_m, EV_REMOVE, dp);
+ else if (mt == ID_VERIFY)
+ mISDN_FsmEvent(&tm->tei_m, EV_VERIFY, dp);
+ else if (mt == ID_CHK_RES)
+ mISDN_FsmEvent(&tm->tei_m, EV_CHKRESP, dp);
+}
+
+static struct layer2 *
+create_new_tei(struct manager *mgr, int tei)
+{
+ u_long opt = 0;
+ u_long flags;
+ int id;
+ struct layer2 *l2;
+
+ if (!mgr->up)
+ return NULL;
+ if (tei < 64)
+ test_and_set_bit(OPTION_L2_FIXEDTEI, &opt);
+ if (mgr->ch.st->dev->Dprotocols
+ & ((1 << ISDN_P_TE_E1) | (1 << ISDN_P_NT_E1)))
+ test_and_set_bit(OPTION_L2_PMX, &opt);
+ l2 = create_l2(mgr->up, ISDN_P_LAPD_NT, (u_int)opt, (u_long)tei);
+ if (!l2) {
+ printk(KERN_WARNING "%s:no memory for layer2\n", __func__);
+ return NULL;
+ }
+ l2->tm = kzalloc(sizeof(struct teimgr), GFP_KERNEL);
+ if (!l2->tm) {
+ kfree(l2);
+ printk(KERN_WARNING "%s:no memory for teimgr\n", __func__);
+ return NULL;
+ }
+ l2->tm->mgr = mgr;
+ l2->tm->l2 = l2;
+ l2->tm->tei_m.debug = *debug & DEBUG_L2_TEIFSM;
+ l2->tm->tei_m.userdata = l2->tm;
+ l2->tm->tei_m.printdebug = tei_debug;
+ l2->tm->tei_m.fsm = &teifsmn;
+ l2->tm->tei_m.state = ST_TEI_NOP;
+ l2->tm->tval = 2000; /* T202 2 sec */
+ mISDN_FsmInitTimer(&l2->tm->tei_m, &l2->tm->timer);
+ write_lock_irqsave(&mgr->lock, flags);
+ id = get_free_id(mgr);
+ list_add_tail(&l2->list, &mgr->layer2);
+ write_unlock_irqrestore(&mgr->lock, flags);
+ if (id < 0) {
+ l2->ch.ctrl(&l2->ch, CLOSE_CHANNEL, NULL);
+ printk(KERN_WARNING "%s:no free id\n", __func__);
+ return NULL;
+ } else {
+ l2->ch.nr = id;
+ __add_layer2(&l2->ch, mgr->ch.st);
+ l2->ch.recv = mgr->ch.recv;
+ l2->ch.peer = mgr->ch.peer;
+ l2->ch.ctrl(&l2->ch, OPEN_CHANNEL, NULL);
+ }
+ return l2;
+}
+
+static void
+new_tei_req(struct manager *mgr, u_char *dp)
+{
+ int tei, ri;
+ struct layer2 *l2;
+
+ ri = dp[0] << 8;
+ ri += dp[1];
+ if (!mgr->up)
+ goto denied;
+ tei = get_free_tei(mgr);
+ if (tei < 0) {
+ printk(KERN_WARNING "%s:No free tei\n", __func__);
+ goto denied;
+ }
+ l2 = create_new_tei(mgr, tei);
+ if (!l2)
+ goto denied;
+ else
+ mISDN_FsmEvent(&l2->tm->tei_m, EV_ASSIGN_REQ, dp);
+ return;
+denied:
+ put_tei_msg(mgr, ID_DENIED, ri, GROUP_TEI);
+}
+
+static int
+ph_data_ind(struct manager *mgr, struct sk_buff *skb)
+{
+ int ret = -EINVAL;
+ struct layer2 *l2;
+ u_long flags;
+ u_char mt;
+
+ if (skb->len < 8) {
+ if (*debug & DEBUG_L2_TEI)
+ printk(KERN_DEBUG "%s: short mgr frame %d/8\n",
+ __func__, skb->len);
+ goto done;
+ }
+ if (*debug & DEBUG_L2_TEI)
+
+ if ((skb->data[0] >> 2) != TEI_SAPI) /* not for us */
+ goto done;
+ if (skb->data[0] & 1) /* EA0 formal error */
+ goto done;
+ if (!(skb->data[1] & 1)) /* EA1 formal error */
+ goto done;
+ if ((skb->data[1] >> 1) != GROUP_TEI) /* not for us */
+ goto done;
+ if ((skb->data[2] & 0xef) != UI) /* not UI */
+ goto done;
+ if (skb->data[3] != TEI_ENTITY_ID) /* not tei entity */
+ goto done;
+ mt = skb->data[6];
+ switch (mt) {
+ case ID_REQUEST:
+ case ID_CHK_RES:
+ case ID_VERIFY:
+ if (!test_bit(MGR_OPT_NETWORK, &mgr->options))
+ goto done;
+ break;
+ case ID_ASSIGNED:
+ case ID_DENIED:
+ case ID_CHK_REQ:
+ case ID_REMOVE:
+ if (test_bit(MGR_OPT_NETWORK, &mgr->options))
+ goto done;
+ break;
+ default:
+ goto done;
+ }
+ ret = 0;
+ if (mt == ID_REQUEST) {
+ new_tei_req(mgr, &skb->data[4]);
+ goto done;
+ }
+ read_lock_irqsave(&mgr->lock, flags);
+ list_for_each_entry(l2, &mgr->layer2, list) {
+ tei_ph_data_ind(l2->tm, mt, &skb->data[4], skb->len - 4);
+ }
+ read_unlock_irqrestore(&mgr->lock, flags);
+done:
+ return ret;
+}
+
+int
+l2_tei(struct layer2 *l2, u_int cmd, u_long arg)
+{
+ struct teimgr *tm = l2->tm;
+
+ if (test_bit(FLG_FIXED_TEI, &l2->flag))
+ return 0;
+ if (*debug & DEBUG_L2_TEI)
+ printk(KERN_DEBUG "%s: cmd(%x)\n", __func__, cmd);
+ switch (cmd) {
+ case MDL_ASSIGN_IND:
+ mISDN_FsmEvent(&tm->tei_m, EV_IDREQ, NULL);
+ break;
+ case MDL_ERROR_IND:
+ if (test_bit(MGR_OPT_NETWORK, &tm->mgr->options))
+ mISDN_FsmEvent(&tm->tei_m, EV_CHKREQ, &l2->tei);
+ if (test_bit(MGR_OPT_USER, &tm->mgr->options))
+ mISDN_FsmEvent(&tm->tei_m, EV_VERIFY, NULL);
+ break;
+ case MDL_STATUS_UP_IND:
+ if (test_bit(MGR_OPT_NETWORK, &tm->mgr->options))
+ mISDN_FsmEvent(&tm->mgr->deact, EV_ACTIVATE, NULL);
+ break;
+ case MDL_STATUS_DOWN_IND:
+ if (test_bit(MGR_OPT_NETWORK, &tm->mgr->options))
+ mISDN_FsmEvent(&tm->mgr->deact, EV_DEACTIVATE, NULL);
+ break;
+ case MDL_STATUS_UI_IND:
+ if (test_bit(MGR_OPT_NETWORK, &tm->mgr->options))
+ mISDN_FsmEvent(&tm->mgr->deact, EV_UI, NULL);
+ break;
+ }
+ return 0;
+}
+
+void
+TEIrelease(struct layer2 *l2)
+{
+ struct teimgr *tm = l2->tm;
+ u_long flags;
+
+ mISDN_FsmDelTimer(&tm->timer, 1);
+ write_lock_irqsave(&tm->mgr->lock, flags);
+ list_del(&l2->list);
+ write_unlock_irqrestore(&tm->mgr->lock, flags);
+ l2->tm = NULL;
+ kfree(tm);
+}
+
+static int
+create_teimgr(struct manager *mgr, struct channel_req *crq)
+{
+ struct layer2 *l2;
+ u_long opt = 0;
+ u_long flags;
+ int id;
+
+ if (*debug & DEBUG_L2_TEI)
+ printk(KERN_DEBUG "%s: %s proto(%x) adr(%d %d %d %d)\n",
+ __func__, mgr->ch.st->dev->name, crq->protocol,
+ crq->adr.dev, crq->adr.channel, crq->adr.sapi,
+ crq->adr.tei);
+ if (crq->adr.sapi != 0) /* not supported yet */
+ return -EINVAL;
+ if (crq->adr.tei > GROUP_TEI)
+ return -EINVAL;
+ if (crq->adr.tei < 64)
+ test_and_set_bit(OPTION_L2_FIXEDTEI, &opt);
+ if (crq->adr.tei == 0)
+ test_and_set_bit(OPTION_L2_PTP, &opt);
+ if (test_bit(MGR_OPT_NETWORK, &mgr->options)) {
+ if (crq->protocol == ISDN_P_LAPD_TE)
+ return -EPROTONOSUPPORT;
+ if ((crq->adr.tei != 0) && (crq->adr.tei != 127))
+ return -EINVAL;
+ if (mgr->up) {
+ printk(KERN_WARNING
+ "%s: only one network manager is allowed\n",
+ __func__);
+ return -EBUSY;
+ }
+ } else if (test_bit(MGR_OPT_USER, &mgr->options)) {
+ if (crq->protocol == ISDN_P_LAPD_NT)
+ return -EPROTONOSUPPORT;
+ if ((crq->adr.tei >= 64) && (crq->adr.tei < GROUP_TEI))
+ return -EINVAL; /* dyn tei */
+ } else {
+ if (crq->protocol == ISDN_P_LAPD_NT)
+ test_and_set_bit(MGR_OPT_NETWORK, &mgr->options);
+ if (crq->protocol == ISDN_P_LAPD_TE)
+ test_and_set_bit(MGR_OPT_USER, &mgr->options);
+ }
+ if (mgr->ch.st->dev->Dprotocols
+ & ((1 << ISDN_P_TE_E1) | (1 << ISDN_P_NT_E1)))
+ test_and_set_bit(OPTION_L2_PMX, &opt);
+ if ((crq->protocol == ISDN_P_LAPD_NT) && (crq->adr.tei == 127)) {
+ mgr->up = crq->ch;
+ id = DL_INFO_L2_CONNECT;
+ teiup_create(mgr, DL_INFORMATION_IND, sizeof(id), &id);
+ crq->ch = NULL;
+ if (!list_empty(&mgr->layer2)) {
+ read_lock_irqsave(&mgr->lock, flags);
+ list_for_each_entry(l2, &mgr->layer2, list) {
+ l2->up = mgr->up;
+ l2->ch.ctrl(&l2->ch, OPEN_CHANNEL, NULL);
+ }
+ read_unlock_irqrestore(&mgr->lock, flags);
+ }
+ return 0;
+ }
+ l2 = create_l2(crq->ch, crq->protocol, (u_int)opt,
+ (u_long)crq->adr.tei);
+ if (!l2)
+ return -ENOMEM;
+ l2->tm = kzalloc(sizeof(struct teimgr), GFP_KERNEL);
+ if (!l2->tm) {
+ kfree(l2);
+ printk(KERN_ERR "kmalloc teimgr failed\n");
+ return -ENOMEM;
+ }
+ l2->tm->mgr = mgr;
+ l2->tm->l2 = l2;
+ l2->tm->tei_m.debug = *debug & DEBUG_L2_TEIFSM;
+ l2->tm->tei_m.userdata = l2->tm;
+ l2->tm->tei_m.printdebug = tei_debug;
+ if (crq->protocol == ISDN_P_LAPD_TE) {
+ l2->tm->tei_m.fsm = &teifsmu;
+ l2->tm->tei_m.state = ST_TEI_NOP;
+ l2->tm->tval = 1000; /* T201 1 sec */
+ } else {
+ l2->tm->tei_m.fsm = &teifsmn;
+ l2->tm->tei_m.state = ST_TEI_NOP;
+ l2->tm->tval = 2000; /* T202 2 sec */
+ }
+ mISDN_FsmInitTimer(&l2->tm->tei_m, &l2->tm->timer);
+ write_lock_irqsave(&mgr->lock, flags);
+ id = get_free_id(mgr);
+ list_add_tail(&l2->list, &mgr->layer2);
+ write_unlock_irqrestore(&mgr->lock, flags);
+ if (id < 0) {
+ l2->ch.ctrl(&l2->ch, CLOSE_CHANNEL, NULL);
+ } else {
+ l2->ch.nr = id;
+ l2->up->nr = id;
+ crq->ch = &l2->ch;
+ id = 0;
+ }
+ return id;
+}
+
+static int
+mgr_send(struct mISDNchannel *ch, struct sk_buff *skb)
+{
+ struct manager *mgr;
+ struct mISDNhead *hh = mISDN_HEAD_P(skb);
+ int ret = -EINVAL;
+
+ mgr = container_of(ch, struct manager, ch);
+ if (*debug & DEBUG_L2_RECV)
+ printk(KERN_DEBUG "%s: prim(%x) id(%x)\n",
+ __func__, hh->prim, hh->id);
+ switch (hh->prim) {
+ case PH_DATA_IND:
+ mISDN_FsmEvent(&mgr->deact, EV_UI, NULL);
+ ret = ph_data_ind(mgr, skb);
+ break;
+ case PH_DATA_CNF:
+ do_ack(mgr, hh->id);
+ ret = 0;
+ break;
+ case PH_ACTIVATE_IND:
+ test_and_set_bit(MGR_PH_ACTIVE, &mgr->options);
+ mISDN_FsmEvent(&mgr->deact, EV_ACTIVATE_IND, NULL);
+ do_send(mgr);
+ ret = 0;
+ break;
+ case PH_DEACTIVATE_IND:
+ test_and_clear_bit(MGR_PH_ACTIVE, &mgr->options);
+ mISDN_FsmEvent(&mgr->deact, EV_DEACTIVATE_IND, NULL);
+ ret = 0;
+ break;
+ case DL_UNITDATA_REQ:
+ return dl_unit_data(mgr, skb);
+ }
+ if (!ret)
+ dev_kfree_skb(skb);
+ return ret;
+}
+
+static int
+free_teimanager(struct manager *mgr)
+{
+ struct layer2 *l2, *nl2;
+
+ if (test_bit(MGR_OPT_NETWORK, &mgr->options)) {
+ /* not locked lock is taken in release tei */
+ mgr->up = NULL;
+ if (test_bit(OPTION_L2_CLEANUP, &mgr->options)) {
+ list_for_each_entry_safe(l2, nl2, &mgr->layer2, list) {
+ put_tei_msg(mgr, ID_REMOVE, 0, l2->tei);
+ mutex_lock(&mgr->ch.st->lmutex);
+ list_del(&l2->ch.list);
+ mutex_unlock(&mgr->ch.st->lmutex);
+ l2->ch.ctrl(&l2->ch, CLOSE_CHANNEL, NULL);
+ }
+ test_and_clear_bit(MGR_OPT_NETWORK, &mgr->options);
+ } else {
+ list_for_each_entry_safe(l2, nl2, &mgr->layer2, list) {
+ l2->up = NULL;
+ }
+ }
+ }
+ if (test_bit(MGR_OPT_USER, &mgr->options)) {
+ if (list_empty(&mgr->layer2))
+ test_and_clear_bit(MGR_OPT_USER, &mgr->options);
+ }
+ mgr->ch.st->dev->D.ctrl(&mgr->ch.st->dev->D, CLOSE_CHANNEL, NULL);
+ return 0;
+}
+
+static int
+ctrl_teimanager(struct manager *mgr, void *arg)
+{
+ /* currently we only have one option */
+ int clean = *((int *)arg);
+
+ if (clean)
+ test_and_set_bit(OPTION_L2_CLEANUP, &mgr->options);
+ else
+ test_and_clear_bit(OPTION_L2_CLEANUP, &mgr->options);
+ return 0;
+}
+
+/* This function does create a L2 for fixed TEI in NT Mode */
+static int
+check_data(struct manager *mgr, struct sk_buff *skb)
+{
+ struct mISDNhead *hh = mISDN_HEAD_P(skb);
+ int ret, tei;
+ struct layer2 *l2;
+
+ if (*debug & DEBUG_L2_CTRL)
+ printk(KERN_DEBUG "%s: prim(%x) id(%x)\n",
+ __func__, hh->prim, hh->id);
+ if (test_bit(MGR_OPT_USER, &mgr->options))
+ return -ENOTCONN;
+ if (hh->prim != PH_DATA_IND)
+ return -ENOTCONN;
+ if (skb->len != 3)
+ return -ENOTCONN;
+ if (skb->data[0] != 0)
+ /* only SAPI 0 command */
+ return -ENOTCONN;
+ if (!(skb->data[1] & 1)) /* invalid EA1 */
+ return -EINVAL;
+ tei = skb->data[1] >> 0;
+ if (tei > 63) /* not a fixed tei */
+ return -ENOTCONN;
+ if ((skb->data[2] & ~0x10) != SABME)
+ return -ENOTCONN;
+ /* We got a SABME for a fixed TEI */
+ l2 = create_new_tei(mgr, tei);
+ if (!l2)
+ return -ENOMEM;
+ ret = l2->ch.send(&l2->ch, skb);
+ return ret;
+}
+
+void
+delete_teimanager(struct mISDNchannel *ch)
+{
+ struct manager *mgr;
+ struct layer2 *l2, *nl2;
+
+ mgr = container_of(ch, struct manager, ch);
+ /* not locked lock is taken in release tei */
+ list_for_each_entry_safe(l2, nl2, &mgr->layer2, list) {
+ mutex_lock(&mgr->ch.st->lmutex);
+ list_del(&l2->ch.list);
+ mutex_unlock(&mgr->ch.st->lmutex);
+ l2->ch.ctrl(&l2->ch, CLOSE_CHANNEL, NULL);
+ }
+ list_del(&mgr->ch.list);
+ list_del(&mgr->bcast.list);
+ skb_queue_purge(&mgr->sendq);
+ kfree(mgr);
+}
+
+static int
+mgr_ctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
+{
+ struct manager *mgr;
+ int ret = -EINVAL;
+
+ mgr = container_of(ch, struct manager, ch);
+ if (*debug & DEBUG_L2_CTRL)
+ printk(KERN_DEBUG "%s(%x, %p)\n", __func__, cmd, arg);
+ switch (cmd) {
+ case OPEN_CHANNEL:
+ ret = create_teimgr(mgr, arg);
+ break;
+ case CLOSE_CHANNEL:
+ ret = free_teimanager(mgr);
+ break;
+ case CONTROL_CHANNEL:
+ ret = ctrl_teimanager(mgr, arg);
+ break;
+ case CHECK_DATA:
+ ret = check_data(mgr, arg);
+ break;
+ }
+ return ret;
+}
+
+static int
+mgr_bcast(struct mISDNchannel *ch, struct sk_buff *skb)
+{
+ struct manager *mgr = container_of(ch, struct manager, bcast);
+ struct mISDNhead *hh = mISDN_HEAD_P(skb);
+ struct sk_buff *cskb = NULL;
+ struct layer2 *l2;
+ u_long flags;
+ int ret;
+
+ read_lock_irqsave(&mgr->lock, flags);
+ list_for_each_entry(l2, &mgr->layer2, list) {
+ if ((hh->id & MISDN_ID_SAPI_MASK) ==
+ (l2->ch.addr & MISDN_ID_SAPI_MASK)) {
+ if (list_is_last(&l2->list, &mgr->layer2)) {
+ cskb = skb;
+ skb = NULL;
+ } else {
+ if (!cskb)
+ cskb = skb_copy(skb, GFP_KERNEL);
+ }
+ if (cskb) {
+ ret = l2->ch.send(&l2->ch, cskb);
+ if (ret) {
+ if (*debug & DEBUG_SEND_ERR)
+ printk(KERN_DEBUG
+ "%s ch%d prim(%x) addr(%x)"
+ " err %d\n",
+ __func__, l2->ch.nr,
+ hh->prim, l2->ch.addr, ret);
+ } else
+ cskb = NULL;
+ } else {
+ printk(KERN_WARNING "%s ch%d addr %x no mem\n",
+ __func__, ch->nr, ch->addr);
+ goto out;
+ }
+ }
+ }
+out:
+ read_unlock_irqrestore(&mgr->lock, flags);
+ if (cskb)
+ dev_kfree_skb(cskb);
+ if (skb)
+ dev_kfree_skb(skb);
+ return 0;
+}
+
+static int
+mgr_bcast_ctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
+{
+
+ return -EINVAL;
+}
+
+int
+create_teimanager(struct mISDNdevice *dev)
+{
+ struct manager *mgr;
+
+ mgr = kzalloc(sizeof(struct manager), GFP_KERNEL);
+ if (!mgr)
+ return -ENOMEM;
+ INIT_LIST_HEAD(&mgr->layer2);
+ mgr->lock = __RW_LOCK_UNLOCKED(mgr->lock);
+ skb_queue_head_init(&mgr->sendq);
+ mgr->nextid = 1;
+ mgr->lastid = MISDN_ID_NONE;
+ mgr->ch.send = mgr_send;
+ mgr->ch.ctrl = mgr_ctrl;
+ mgr->ch.st = dev->D.st;
+ set_channel_address(&mgr->ch, TEI_SAPI, GROUP_TEI);
+ add_layer2(&mgr->ch, dev->D.st);
+ mgr->bcast.send = mgr_bcast;
+ mgr->bcast.ctrl = mgr_bcast_ctrl;
+ mgr->bcast.st = dev->D.st;
+ set_channel_address(&mgr->bcast, 0, GROUP_TEI);
+ add_layer2(&mgr->bcast, dev->D.st);
+ mgr->deact.debug = *debug & DEBUG_MANAGER;
+ mgr->deact.userdata = mgr;
+ mgr->deact.printdebug = da_debug;
+ mgr->deact.fsm = &deactfsm;
+ mgr->deact.state = ST_L1_DEACT;
+ mISDN_FsmInitTimer(&mgr->deact, &mgr->datimer);
+ dev->teimgr = &mgr->ch;
+ return 0;
+}
+
+int TEIInit(u_int *deb)
+{
+ debug = deb;
+ teifsmu.state_count = TEI_STATE_COUNT;
+ teifsmu.event_count = TEI_EVENT_COUNT;
+ teifsmu.strEvent = strTeiEvent;
+ teifsmu.strState = strTeiState;
+ mISDN_FsmNew(&teifsmu, TeiFnListUser, ARRAY_SIZE(TeiFnListUser));
+ teifsmn.state_count = TEI_STATE_COUNT;
+ teifsmn.event_count = TEI_EVENT_COUNT;
+ teifsmn.strEvent = strTeiEvent;
+ teifsmn.strState = strTeiState;
+ mISDN_FsmNew(&teifsmn, TeiFnListNet, ARRAY_SIZE(TeiFnListNet));
+ deactfsm.state_count = DEACT_STATE_COUNT;
+ deactfsm.event_count = DEACT_EVENT_COUNT;
+ deactfsm.strEvent = strDeactEvent;
+ deactfsm.strState = strDeactState;
+ mISDN_FsmNew(&deactfsm, DeactFnList, ARRAY_SIZE(DeactFnList));
+ return 0;
+}
+
+void TEIFree(void)
+{
+ mISDN_FsmFree(&teifsmu);
+ mISDN_FsmFree(&teifsmn);
+ mISDN_FsmFree(&deactfsm);
+}
diff --git a/drivers/isdn/mISDN/timerdev.c b/drivers/isdn/mISDN/timerdev.c
new file mode 100644
index 000000000000..b5fabc7019d8
--- /dev/null
+++ b/drivers/isdn/mISDN/timerdev.c
@@ -0,0 +1,301 @@
+/*
+ *
+ * general timer device for using in ISDN stacks
+ *
+ * Author Karsten Keil <kkeil@novell.com>
+ *
+ * Copyright 2008 by Karsten Keil <kkeil@novell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/poll.h>
+#include <linux/vmalloc.h>
+#include <linux/timer.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/mISDNif.h>
+
+static int *debug;
+
+
+struct mISDNtimerdev {
+ int next_id;
+ struct list_head pending;
+ struct list_head expired;
+ wait_queue_head_t wait;
+ u_int work;
+ spinlock_t lock; /* protect lists */
+};
+
+struct mISDNtimer {
+ struct list_head list;
+ struct mISDNtimerdev *dev;
+ struct timer_list tl;
+ int id;
+};
+
+static int
+mISDN_open(struct inode *ino, struct file *filep)
+{
+ struct mISDNtimerdev *dev;
+
+ if (*debug & DEBUG_TIMER)
+ printk(KERN_DEBUG "%s(%p,%p)\n", __func__, ino, filep);
+ dev = kmalloc(sizeof(struct mISDNtimerdev) , GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+ dev->next_id = 1;
+ INIT_LIST_HEAD(&dev->pending);
+ INIT_LIST_HEAD(&dev->expired);
+ spin_lock_init(&dev->lock);
+ dev->work = 0;
+ init_waitqueue_head(&dev->wait);
+ filep->private_data = dev;
+ __module_get(THIS_MODULE);
+ return 0;
+}
+
+static int
+mISDN_close(struct inode *ino, struct file *filep)
+{
+ struct mISDNtimerdev *dev = filep->private_data;
+ struct mISDNtimer *timer, *next;
+
+ if (*debug & DEBUG_TIMER)
+ printk(KERN_DEBUG "%s(%p,%p)\n", __func__, ino, filep);
+ list_for_each_entry_safe(timer, next, &dev->pending, list) {
+ del_timer(&timer->tl);
+ kfree(timer);
+ }
+ list_for_each_entry_safe(timer, next, &dev->expired, list) {
+ kfree(timer);
+ }
+ kfree(dev);
+ module_put(THIS_MODULE);
+ return 0;
+}
+
+static ssize_t
+mISDN_read(struct file *filep, char *buf, size_t count, loff_t *off)
+{
+ struct mISDNtimerdev *dev = filep->private_data;
+ struct mISDNtimer *timer;
+ u_long flags;
+ int ret = 0;
+
+ if (*debug & DEBUG_TIMER)
+ printk(KERN_DEBUG "%s(%p, %p, %d, %p)\n", __func__,
+ filep, buf, (int)count, off);
+ if (*off != filep->f_pos)
+ return -ESPIPE;
+
+ if (list_empty(&dev->expired) && (dev->work == 0)) {
+ if (filep->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+ wait_event_interruptible(dev->wait, (dev->work ||
+ !list_empty(&dev->expired)));
+ if (signal_pending(current))
+ return -ERESTARTSYS;
+ }
+ if (count < sizeof(int))
+ return -ENOSPC;
+ if (dev->work)
+ dev->work = 0;
+ if (!list_empty(&dev->expired)) {
+ spin_lock_irqsave(&dev->lock, flags);
+ timer = (struct mISDNtimer *)dev->expired.next;
+ list_del(&timer->list);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ if (put_user(timer->id, (int *)buf))
+ ret = -EFAULT;
+ else
+ ret = sizeof(int);
+ kfree(timer);
+ }
+ return ret;
+}
+
+static loff_t
+mISDN_llseek(struct file *filep, loff_t offset, int orig)
+{
+ return -ESPIPE;
+}
+
+static ssize_t
+mISDN_write(struct file *filep, const char *buf, size_t count, loff_t *off)
+{
+ return -EOPNOTSUPP;
+}
+
+static unsigned int
+mISDN_poll(struct file *filep, poll_table *wait)
+{
+ struct mISDNtimerdev *dev = filep->private_data;
+ unsigned int mask = POLLERR;
+
+ if (*debug & DEBUG_TIMER)
+ printk(KERN_DEBUG "%s(%p, %p)\n", __func__, filep, wait);
+ if (dev) {
+ poll_wait(filep, &dev->wait, wait);
+ mask = 0;
+ if (dev->work || !list_empty(&dev->expired))
+ mask |= (POLLIN | POLLRDNORM);
+ if (*debug & DEBUG_TIMER)
+ printk(KERN_DEBUG "%s work(%d) empty(%d)\n", __func__,
+ dev->work, list_empty(&dev->expired));
+ }
+ return mask;
+}
+
+static void
+dev_expire_timer(struct mISDNtimer *timer)
+{
+ u_long flags;
+
+ spin_lock_irqsave(&timer->dev->lock, flags);
+ list_del(&timer->list);
+ list_add_tail(&timer->list, &timer->dev->expired);
+ spin_unlock_irqrestore(&timer->dev->lock, flags);
+ wake_up_interruptible(&timer->dev->wait);
+}
+
+static int
+misdn_add_timer(struct mISDNtimerdev *dev, int timeout)
+{
+ int id;
+ u_long flags;
+ struct mISDNtimer *timer;
+
+ if (!timeout) {
+ dev->work = 1;
+ wake_up_interruptible(&dev->wait);
+ id = 0;
+ } else {
+ timer = kzalloc(sizeof(struct mISDNtimer), GFP_KERNEL);
+ if (!timer)
+ return -ENOMEM;
+ spin_lock_irqsave(&dev->lock, flags);
+ timer->id = dev->next_id++;
+ if (dev->next_id < 0)
+ dev->next_id = 1;
+ list_add_tail(&timer->list, &dev->pending);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ timer->dev = dev;
+ timer->tl.data = (long)timer;
+ timer->tl.function = (void *) dev_expire_timer;
+ init_timer(&timer->tl);
+ timer->tl.expires = jiffies + ((HZ * (u_long)timeout) / 1000);
+ add_timer(&timer->tl);
+ id = timer->id;
+ }
+ return id;
+}
+
+static int
+misdn_del_timer(struct mISDNtimerdev *dev, int id)
+{
+ u_long flags;
+ struct mISDNtimer *timer;
+ int ret = 0;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ list_for_each_entry(timer, &dev->pending, list) {
+ if (timer->id == id) {
+ list_del_init(&timer->list);
+ del_timer(&timer->tl);
+ ret = timer->id;
+ kfree(timer);
+ goto unlock;
+ }
+ }
+unlock:
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return ret;
+}
+
+static int
+mISDN_ioctl(struct inode *inode, struct file *filep, unsigned int cmd,
+ unsigned long arg)
+{
+ struct mISDNtimerdev *dev = filep->private_data;
+ int id, tout, ret = 0;
+
+
+ if (*debug & DEBUG_TIMER)
+ printk(KERN_DEBUG "%s(%p, %x, %lx)\n", __func__,
+ filep, cmd, arg);
+ switch (cmd) {
+ case IMADDTIMER:
+ if (get_user(tout, (int __user *)arg)) {
+ ret = -EFAULT;
+ break;
+ }
+ id = misdn_add_timer(dev, tout);
+ if (*debug & DEBUG_TIMER)
+ printk(KERN_DEBUG "%s add %d id %d\n", __func__,
+ tout, id);
+ if (id < 0) {
+ ret = id;
+ break;
+ }
+ if (put_user(id, (int __user *)arg))
+ ret = -EFAULT;
+ break;
+ case IMDELTIMER:
+ if (get_user(id, (int __user *)arg)) {
+ ret = -EFAULT;
+ break;
+ }
+ if (*debug & DEBUG_TIMER)
+ printk(KERN_DEBUG "%s del id %d\n", __func__, id);
+ id = misdn_del_timer(dev, id);
+ if (put_user(id, (int __user *)arg))
+ ret = -EFAULT;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+static struct file_operations mISDN_fops = {
+ .llseek = mISDN_llseek,
+ .read = mISDN_read,
+ .write = mISDN_write,
+ .poll = mISDN_poll,
+ .ioctl = mISDN_ioctl,
+ .open = mISDN_open,
+ .release = mISDN_close,
+};
+
+static struct miscdevice mISDNtimer = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "mISDNtimer",
+ .fops = &mISDN_fops,
+};
+
+int
+mISDN_inittimer(int *deb)
+{
+ int err;
+
+ debug = deb;
+ err = misc_register(&mISDNtimer);
+ if (err)
+ printk(KERN_WARNING "mISDN: Could not register timer device\n");
+ return err;
+}
+
+void mISDN_timer_cleanup(void)
+{
+ misc_deregister(&mISDNtimer);
+}
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index fea966d66f98..71dd65aa31b6 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -147,9 +147,12 @@ static struct priority_group *alloc_priority_group(void)
static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
{
struct pgpath *pgpath, *tmp;
+ struct multipath *m = ti->private;
list_for_each_entry_safe(pgpath, tmp, pgpaths, list) {
list_del(&pgpath->list);
+ if (m->hw_handler_name)
+ scsi_dh_detach(bdev_get_queue(pgpath->path.dev->bdev));
dm_put_device(ti, pgpath->path.dev);
free_pgpath(pgpath);
}
@@ -548,6 +551,7 @@ static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps,
{
int r;
struct pgpath *p;
+ struct multipath *m = ti->private;
/* we need at least a path arg */
if (as->argc < 1) {
@@ -566,6 +570,15 @@ static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps,
goto bad;
}
+ if (m->hw_handler_name) {
+ r = scsi_dh_attach(bdev_get_queue(p->path.dev->bdev),
+ m->hw_handler_name);
+ if (r < 0) {
+ dm_put_device(ti, p->path.dev);
+ goto bad;
+ }
+ }
+
r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error);
if (r) {
dm_put_device(ti, p->path.dev);
diff --git a/drivers/media/dvb/pluto2/pluto2.c b/drivers/media/dvb/pluto2/pluto2.c
index 1360403b88b6..a9653c63f4db 100644
--- a/drivers/media/dvb/pluto2/pluto2.c
+++ b/drivers/media/dvb/pluto2/pluto2.c
@@ -242,7 +242,7 @@ static int __devinit pluto_dma_map(struct pluto *pluto)
pluto->dma_addr = pci_map_single(pluto->pdev, pluto->dma_buf,
TS_DMA_BYTES, PCI_DMA_FROMDEVICE);
- return pci_dma_mapping_error(pluto->dma_addr);
+ return pci_dma_mapping_error(pluto->pdev, pluto->dma_addr);
}
static void pluto_dma_unmap(struct pluto *pluto)
diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c
index 61b98c333cb0..a38005008a20 100644
--- a/drivers/memstick/core/memstick.c
+++ b/drivers/memstick/core/memstick.c
@@ -249,8 +249,11 @@ EXPORT_SYMBOL(memstick_next_req);
*/
void memstick_new_req(struct memstick_host *host)
{
- host->retries = cmd_retries;
- host->request(host);
+ if (host->card) {
+ host->retries = cmd_retries;
+ INIT_COMPLETION(host->card->mrq_complete);
+ host->request(host);
+ }
}
EXPORT_SYMBOL(memstick_new_req);
@@ -415,10 +418,14 @@ err_out:
return NULL;
}
-static void memstick_power_on(struct memstick_host *host)
+static int memstick_power_on(struct memstick_host *host)
{
- host->set_param(host, MEMSTICK_POWER, MEMSTICK_POWER_ON);
- host->set_param(host, MEMSTICK_INTERFACE, MEMSTICK_SERIAL);
+ int rc = host->set_param(host, MEMSTICK_POWER, MEMSTICK_POWER_ON);
+
+ if (!rc)
+ rc = host->set_param(host, MEMSTICK_INTERFACE, MEMSTICK_SERIAL);
+
+ return rc;
}
static void memstick_check(struct work_struct *work)
@@ -429,8 +436,11 @@ static void memstick_check(struct work_struct *work)
dev_dbg(&host->dev, "memstick_check started\n");
mutex_lock(&host->lock);
- if (!host->card)
- memstick_power_on(host);
+ if (!host->card) {
+ if (memstick_power_on(host))
+ goto out_power_off;
+ } else
+ host->card->stop(host->card);
card = memstick_alloc_card(host);
@@ -448,7 +458,8 @@ static void memstick_check(struct work_struct *work)
|| !(host->card->check(host->card))) {
device_unregister(&host->card->dev);
host->card = NULL;
- }
+ } else
+ host->card->start(host->card);
}
if (!host->card) {
@@ -461,6 +472,7 @@ static void memstick_check(struct work_struct *work)
kfree(card);
}
+out_power_off:
if (!host->card)
host->set_param(host, MEMSTICK_POWER, MEMSTICK_POWER_OFF);
@@ -573,11 +585,15 @@ EXPORT_SYMBOL(memstick_suspend_host);
*/
void memstick_resume_host(struct memstick_host *host)
{
+ int rc = 0;
+
mutex_lock(&host->lock);
if (host->card)
- memstick_power_on(host);
+ rc = memstick_power_on(host);
mutex_unlock(&host->lock);
- memstick_detect_change(host);
+
+ if (!rc)
+ memstick_detect_change(host);
}
EXPORT_SYMBOL(memstick_resume_host);
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index 477d0fb6e588..44b1817f2f2f 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -136,9 +136,8 @@ struct mspro_block_data {
unsigned int caps;
struct gendisk *disk;
struct request_queue *queue;
+ struct request *block_req;
spinlock_t q_lock;
- wait_queue_head_t q_wait;
- struct task_struct *q_thread;
unsigned short page_size;
unsigned short cylinders;
@@ -147,9 +146,10 @@ struct mspro_block_data {
unsigned char system;
unsigned char read_only:1,
- active:1,
+ eject:1,
has_request:1,
- data_dir:1;
+ data_dir:1,
+ active:1;
unsigned char transfer_cmd;
int (*mrq_handler)(struct memstick_dev *card,
@@ -160,12 +160,14 @@ struct mspro_block_data {
struct scatterlist req_sg[MSPRO_BLOCK_MAX_SEGS];
unsigned int seg_count;
unsigned int current_seg;
- unsigned short current_page;
+ unsigned int current_page;
};
static DEFINE_IDR(mspro_block_disk_idr);
static DEFINE_MUTEX(mspro_block_disk_lock);
+static int mspro_block_complete_req(struct memstick_dev *card, int error);
+
/*** Block device ***/
static int mspro_block_bd_open(struct inode *inode, struct file *filp)
@@ -197,8 +199,10 @@ static int mspro_block_disk_release(struct gendisk *disk)
mutex_lock(&mspro_block_disk_lock);
- if (msb->usage_count) {
- msb->usage_count--;
+ if (msb) {
+ if (msb->usage_count)
+ msb->usage_count--;
+
if (!msb->usage_count) {
kfree(msb);
disk->private_data = NULL;
@@ -523,11 +527,13 @@ static int h_mspro_block_req_init(struct memstick_dev *card,
static int h_mspro_block_default(struct memstick_dev *card,
struct memstick_request **mrq)
{
- complete(&card->mrq_complete);
- if (!(*mrq)->error)
- return -EAGAIN;
- else
- return (*mrq)->error;
+ return mspro_block_complete_req(card, (*mrq)->error);
+}
+
+static int h_mspro_block_default_bad(struct memstick_dev *card,
+ struct memstick_request **mrq)
+{
+ return -ENXIO;
}
static int h_mspro_block_get_ro(struct memstick_dev *card,
@@ -535,44 +541,30 @@ static int h_mspro_block_get_ro(struct memstick_dev *card,
{
struct mspro_block_data *msb = memstick_get_drvdata(card);
- if ((*mrq)->error) {
- complete(&card->mrq_complete);
- return (*mrq)->error;
+ if (!(*mrq)->error) {
+ if ((*mrq)->data[offsetof(struct ms_status_register, status0)]
+ & MEMSTICK_STATUS0_WP)
+ msb->read_only = 1;
+ else
+ msb->read_only = 0;
}
- if ((*mrq)->data[offsetof(struct ms_status_register, status0)]
- & MEMSTICK_STATUS0_WP)
- msb->read_only = 1;
- else
- msb->read_only = 0;
-
- complete(&card->mrq_complete);
- return -EAGAIN;
+ return mspro_block_complete_req(card, (*mrq)->error);
}
static int h_mspro_block_wait_for_ced(struct memstick_dev *card,
struct memstick_request **mrq)
{
- if ((*mrq)->error) {
- complete(&card->mrq_complete);
- return (*mrq)->error;
- }
-
dev_dbg(&card->dev, "wait for ced: value %x\n", (*mrq)->data[0]);
- if ((*mrq)->data[0] & (MEMSTICK_INT_CMDNAK | MEMSTICK_INT_ERR)) {
- card->current_mrq.error = -EFAULT;
- complete(&card->mrq_complete);
- return card->current_mrq.error;
+ if (!(*mrq)->error) {
+ if ((*mrq)->data[0] & (MEMSTICK_INT_CMDNAK | MEMSTICK_INT_ERR))
+ (*mrq)->error = -EFAULT;
+ else if (!((*mrq)->data[0] & MEMSTICK_INT_CED))
+ return 0;
}
- if (!((*mrq)->data[0] & MEMSTICK_INT_CED))
- return 0;
- else {
- card->current_mrq.error = 0;
- complete(&card->mrq_complete);
- return -EAGAIN;
- }
+ return mspro_block_complete_req(card, (*mrq)->error);
}
static int h_mspro_block_transfer_data(struct memstick_dev *card,
@@ -583,10 +575,8 @@ static int h_mspro_block_transfer_data(struct memstick_dev *card,
struct scatterlist t_sg = { 0 };
size_t t_offset;
- if ((*mrq)->error) {
- complete(&card->mrq_complete);
- return (*mrq)->error;
- }
+ if ((*mrq)->error)
+ return mspro_block_complete_req(card, (*mrq)->error);
switch ((*mrq)->tpc) {
case MS_TPC_WRITE_REG:
@@ -617,8 +607,8 @@ has_int_reg:
if (msb->current_seg == msb->seg_count) {
if (t_val & MEMSTICK_INT_CED) {
- complete(&card->mrq_complete);
- return -EAGAIN;
+ return mspro_block_complete_req(card,
+ 0);
} else {
card->next_request
= h_mspro_block_wait_for_ced;
@@ -666,140 +656,184 @@ has_int_reg:
/*** Data transfer ***/
-static void mspro_block_process_request(struct memstick_dev *card,
- struct request *req)
+static int mspro_block_issue_req(struct memstick_dev *card, int chunk)
{
struct mspro_block_data *msb = memstick_get_drvdata(card);
- struct mspro_param_register param;
- int rc, chunk, cnt;
- unsigned short page_count;
sector_t t_sec;
- unsigned long flags;
+ unsigned int count;
+ struct mspro_param_register param;
- do {
- page_count = 0;
+try_again:
+ while (chunk) {
+ msb->current_page = 0;
msb->current_seg = 0;
- msb->seg_count = blk_rq_map_sg(req->q, req, msb->req_sg);
+ msb->seg_count = blk_rq_map_sg(msb->block_req->q,
+ msb->block_req,
+ msb->req_sg);
- if (msb->seg_count) {
- msb->current_page = 0;
- for (rc = 0; rc < msb->seg_count; rc++)
- page_count += msb->req_sg[rc].length
- / msb->page_size;
-
- t_sec = req->sector;
- sector_div(t_sec, msb->page_size >> 9);
- param.system = msb->system;
- param.data_count = cpu_to_be16(page_count);
- param.data_address = cpu_to_be32((uint32_t)t_sec);
- param.tpc_param = 0;
-
- msb->data_dir = rq_data_dir(req);
- msb->transfer_cmd = msb->data_dir == READ
- ? MSPRO_CMD_READ_DATA
- : MSPRO_CMD_WRITE_DATA;
-
- dev_dbg(&card->dev, "data transfer: cmd %x, "
- "lba %x, count %x\n", msb->transfer_cmd,
- be32_to_cpu(param.data_address),
- page_count);
-
- card->next_request = h_mspro_block_req_init;
- msb->mrq_handler = h_mspro_block_transfer_data;
- memstick_init_req(&card->current_mrq, MS_TPC_WRITE_REG,
- &param, sizeof(param));
- memstick_new_req(card->host);
- wait_for_completion(&card->mrq_complete);
- rc = card->current_mrq.error;
+ if (!msb->seg_count) {
+ chunk = __blk_end_request(msb->block_req, -ENOMEM,
+ blk_rq_cur_bytes(msb->block_req));
+ continue;
+ }
- if (rc || (card->current_mrq.tpc == MSPRO_CMD_STOP)) {
- for (cnt = 0; cnt < msb->current_seg; cnt++)
- page_count += msb->req_sg[cnt].length
- / msb->page_size;
-
- if (msb->current_page)
- page_count += msb->current_page - 1;
-
- if (page_count && (msb->data_dir == READ))
- rc = msb->page_size * page_count;
- else
- rc = -EIO;
- } else
- rc = msb->page_size * page_count;
- } else
- rc = -EFAULT;
+ t_sec = msb->block_req->sector << 9;
+ sector_div(t_sec, msb->page_size);
- spin_lock_irqsave(&msb->q_lock, flags);
- if (rc >= 0)
- chunk = __blk_end_request(req, 0, rc);
- else
- chunk = __blk_end_request(req, rc, 0);
+ count = msb->block_req->nr_sectors << 9;
+ count /= msb->page_size;
- dev_dbg(&card->dev, "end chunk %d, %d\n", rc, chunk);
- spin_unlock_irqrestore(&msb->q_lock, flags);
- } while (chunk);
+ param.system = msb->system;
+ param.data_count = cpu_to_be16(count);
+ param.data_address = cpu_to_be32((uint32_t)t_sec);
+ param.tpc_param = 0;
+
+ msb->data_dir = rq_data_dir(msb->block_req);
+ msb->transfer_cmd = msb->data_dir == READ
+ ? MSPRO_CMD_READ_DATA
+ : MSPRO_CMD_WRITE_DATA;
+
+ dev_dbg(&card->dev, "data transfer: cmd %x, "
+ "lba %x, count %x\n", msb->transfer_cmd,
+ be32_to_cpu(param.data_address), count);
+
+ card->next_request = h_mspro_block_req_init;
+ msb->mrq_handler = h_mspro_block_transfer_data;
+ memstick_init_req(&card->current_mrq, MS_TPC_WRITE_REG,
+ &param, sizeof(param));
+ memstick_new_req(card->host);
+ return 0;
+ }
+
+ dev_dbg(&card->dev, "elv_next\n");
+ msb->block_req = elv_next_request(msb->queue);
+ if (!msb->block_req) {
+ dev_dbg(&card->dev, "issue end\n");
+ return -EAGAIN;
+ }
+
+ dev_dbg(&card->dev, "trying again\n");
+ chunk = 1;
+ goto try_again;
}
-static int mspro_block_has_request(struct mspro_block_data *msb)
+static int mspro_block_complete_req(struct memstick_dev *card, int error)
{
- int rc = 0;
+ struct mspro_block_data *msb = memstick_get_drvdata(card);
+ int chunk, cnt;
+ unsigned int t_len = 0;
unsigned long flags;
spin_lock_irqsave(&msb->q_lock, flags);
- if (kthread_should_stop() || msb->has_request)
- rc = 1;
+ dev_dbg(&card->dev, "complete %d, %d\n", msb->has_request ? 1 : 0,
+ error);
+
+ if (msb->has_request) {
+ /* Nothing to do - not really an error */
+ if (error == -EAGAIN)
+ error = 0;
+
+ if (error || (card->current_mrq.tpc == MSPRO_CMD_STOP)) {
+ if (msb->data_dir == READ) {
+ for (cnt = 0; cnt < msb->current_seg; cnt++)
+ t_len += msb->req_sg[cnt].length
+ / msb->page_size;
+
+ if (msb->current_page)
+ t_len += msb->current_page - 1;
+
+ t_len *= msb->page_size;
+ }
+ } else
+ t_len = msb->block_req->nr_sectors << 9;
+
+ dev_dbg(&card->dev, "transferred %x (%d)\n", t_len, error);
+
+ if (error && !t_len)
+ t_len = blk_rq_cur_bytes(msb->block_req);
+
+ chunk = __blk_end_request(msb->block_req, error, t_len);
+
+ error = mspro_block_issue_req(card, chunk);
+
+ if (!error)
+ goto out;
+ else
+ msb->has_request = 0;
+ } else {
+ if (!error)
+ error = -EAGAIN;
+ }
+
+ card->next_request = h_mspro_block_default_bad;
+ complete_all(&card->mrq_complete);
+out:
spin_unlock_irqrestore(&msb->q_lock, flags);
- return rc;
+ return error;
}
-static int mspro_block_queue_thread(void *data)
+static void mspro_block_stop(struct memstick_dev *card)
{
- struct memstick_dev *card = data;
- struct memstick_host *host = card->host;
struct mspro_block_data *msb = memstick_get_drvdata(card);
- struct request *req;
+ int rc = 0;
unsigned long flags;
while (1) {
- wait_event(msb->q_wait, mspro_block_has_request(msb));
- dev_dbg(&card->dev, "thread iter\n");
-
spin_lock_irqsave(&msb->q_lock, flags);
- req = elv_next_request(msb->queue);
- dev_dbg(&card->dev, "next req %p\n", req);
- if (!req) {
- msb->has_request = 0;
- if (kthread_should_stop()) {
- spin_unlock_irqrestore(&msb->q_lock, flags);
- break;
- }
- } else
- msb->has_request = 1;
+ if (!msb->has_request) {
+ blk_stop_queue(msb->queue);
+ rc = 1;
+ }
spin_unlock_irqrestore(&msb->q_lock, flags);
- if (req) {
- mutex_lock(&host->lock);
- mspro_block_process_request(card, req);
- mutex_unlock(&host->lock);
- }
+ if (rc)
+ break;
+
+ wait_for_completion(&card->mrq_complete);
}
- dev_dbg(&card->dev, "thread finished\n");
- return 0;
}
-static void mspro_block_request(struct request_queue *q)
+static void mspro_block_start(struct memstick_dev *card)
+{
+ struct mspro_block_data *msb = memstick_get_drvdata(card);
+ unsigned long flags;
+
+ spin_lock_irqsave(&msb->q_lock, flags);
+ blk_start_queue(msb->queue);
+ spin_unlock_irqrestore(&msb->q_lock, flags);
+}
+
+static int mspro_block_prepare_req(struct request_queue *q, struct request *req)
+{
+ if (!blk_fs_request(req) && !blk_pc_request(req)) {
+ blk_dump_rq_flags(req, "MSPro unsupported request");
+ return BLKPREP_KILL;
+ }
+
+ req->cmd_flags |= REQ_DONTPREP;
+
+ return BLKPREP_OK;
+}
+
+static void mspro_block_submit_req(struct request_queue *q)
{
struct memstick_dev *card = q->queuedata;
struct mspro_block_data *msb = memstick_get_drvdata(card);
struct request *req = NULL;
- if (msb->q_thread) {
- msb->has_request = 1;
- wake_up_all(&msb->q_wait);
- } else {
+ if (msb->has_request)
+ return;
+
+ if (msb->eject) {
while ((req = elv_next_request(q)) != NULL)
end_queued_request(req, -ENODEV);
+
+ return;
}
+
+ msb->has_request = 1;
+ if (mspro_block_issue_req(card, 0))
+ msb->has_request = 0;
}
/*** Initialization ***/
@@ -1169,16 +1203,14 @@ static int mspro_block_init_disk(struct memstick_dev *card)
goto out_release_id;
}
- spin_lock_init(&msb->q_lock);
- init_waitqueue_head(&msb->q_wait);
-
- msb->queue = blk_init_queue(mspro_block_request, &msb->q_lock);
+ msb->queue = blk_init_queue(mspro_block_submit_req, &msb->q_lock);
if (!msb->queue) {
rc = -ENOMEM;
goto out_put_disk;
}
msb->queue->queuedata = card;
+ blk_queue_prep_rq(msb->queue, mspro_block_prepare_req);
blk_queue_bounce_limit(msb->queue, limit);
blk_queue_max_sectors(msb->queue, MSPRO_BLOCK_MAX_PAGES);
@@ -1204,14 +1236,8 @@ static int mspro_block_init_disk(struct memstick_dev *card)
capacity *= msb->page_size >> 9;
set_capacity(msb->disk, capacity);
dev_dbg(&card->dev, "capacity set %ld\n", capacity);
- msb->q_thread = kthread_run(mspro_block_queue_thread, card,
- DRIVER_NAME"d");
- if (IS_ERR(msb->q_thread))
- goto out_put_disk;
- mutex_unlock(&host->lock);
add_disk(msb->disk);
- mutex_lock(&host->lock);
msb->active = 1;
return 0;
@@ -1259,6 +1285,7 @@ static int mspro_block_probe(struct memstick_dev *card)
return -ENOMEM;
memstick_set_drvdata(card, msb);
msb->card = card;
+ spin_lock_init(&msb->q_lock);
rc = mspro_block_init_card(card);
@@ -1272,6 +1299,8 @@ static int mspro_block_probe(struct memstick_dev *card)
rc = mspro_block_init_disk(card);
if (!rc) {
card->check = mspro_block_check_card;
+ card->stop = mspro_block_stop;
+ card->start = mspro_block_start;
return 0;
}
@@ -1286,26 +1315,17 @@ out_free:
static void mspro_block_remove(struct memstick_dev *card)
{
struct mspro_block_data *msb = memstick_get_drvdata(card);
- struct task_struct *q_thread = NULL;
unsigned long flags;
del_gendisk(msb->disk);
dev_dbg(&card->dev, "mspro block remove\n");
spin_lock_irqsave(&msb->q_lock, flags);
- q_thread = msb->q_thread;
- msb->q_thread = NULL;
- msb->active = 0;
+ msb->eject = 1;
+ blk_start_queue(msb->queue);
spin_unlock_irqrestore(&msb->q_lock, flags);
- if (q_thread) {
- mutex_unlock(&card->host->lock);
- kthread_stop(q_thread);
- mutex_lock(&card->host->lock);
- }
-
- dev_dbg(&card->dev, "queue thread stopped\n");
-
blk_cleanup_queue(msb->queue);
+ msb->queue = NULL;
sysfs_remove_group(&card->dev.kobj, &msb->attr_group);
@@ -1322,19 +1342,13 @@ static void mspro_block_remove(struct memstick_dev *card)
static int mspro_block_suspend(struct memstick_dev *card, pm_message_t state)
{
struct mspro_block_data *msb = memstick_get_drvdata(card);
- struct task_struct *q_thread = NULL;
unsigned long flags;
spin_lock_irqsave(&msb->q_lock, flags);
- q_thread = msb->q_thread;
- msb->q_thread = NULL;
- msb->active = 0;
blk_stop_queue(msb->queue);
+ msb->active = 0;
spin_unlock_irqrestore(&msb->q_lock, flags);
- if (q_thread)
- kthread_stop(q_thread);
-
return 0;
}
@@ -1373,14 +1387,7 @@ static int mspro_block_resume(struct memstick_dev *card)
if (memcmp(s_attr->data, r_attr->data, s_attr->size))
break;
- memstick_set_drvdata(card, msb);
- msb->q_thread = kthread_run(mspro_block_queue_thread,
- card, DRIVER_NAME"d");
- if (IS_ERR(msb->q_thread))
- msb->q_thread = NULL;
- else
- msb->active = 1;
-
+ msb->active = 1;
break;
}
}
diff --git a/drivers/memstick/host/jmb38x_ms.c b/drivers/memstick/host/jmb38x_ms.c
index 4e3bfbcdf155..3485c63d20b0 100644
--- a/drivers/memstick/host/jmb38x_ms.c
+++ b/drivers/memstick/host/jmb38x_ms.c
@@ -50,6 +50,7 @@ struct jmb38x_ms_host {
struct jmb38x_ms *chip;
void __iomem *addr;
spinlock_t lock;
+ struct tasklet_struct notify;
int id;
char host_id[32];
int irq;
@@ -590,55 +591,97 @@ static void jmb38x_ms_abort(unsigned long data)
spin_unlock_irqrestore(&host->lock, flags);
}
-static void jmb38x_ms_request(struct memstick_host *msh)
+static void jmb38x_ms_req_tasklet(unsigned long data)
{
+ struct memstick_host *msh = (struct memstick_host *)data;
struct jmb38x_ms_host *host = memstick_priv(msh);
unsigned long flags;
int rc;
spin_lock_irqsave(&host->lock, flags);
- if (host->req) {
- spin_unlock_irqrestore(&host->lock, flags);
- BUG();
- return;
+ if (!host->req) {
+ do {
+ rc = memstick_next_req(msh, &host->req);
+ dev_dbg(&host->chip->pdev->dev, "tasklet req %d\n", rc);
+ } while (!rc && jmb38x_ms_issue_cmd(msh));
}
-
- do {
- rc = memstick_next_req(msh, &host->req);
- } while (!rc && jmb38x_ms_issue_cmd(msh));
spin_unlock_irqrestore(&host->lock, flags);
}
-static void jmb38x_ms_reset(struct jmb38x_ms_host *host)
+static void jmb38x_ms_dummy_submit(struct memstick_host *msh)
{
- unsigned int host_ctl = readl(host->addr + HOST_CONTROL);
+ return;
+}
+
+static void jmb38x_ms_submit_req(struct memstick_host *msh)
+{
+ struct jmb38x_ms_host *host = memstick_priv(msh);
+
+ tasklet_schedule(&host->notify);
+}
+
+static int jmb38x_ms_reset(struct jmb38x_ms_host *host)
+{
+ int cnt;
+
+ writel(HOST_CONTROL_RESET_REQ | HOST_CONTROL_CLOCK_EN
+ | readl(host->addr + HOST_CONTROL),
+ host->addr + HOST_CONTROL);
+ mmiowb();
+
+ for (cnt = 0; cnt < 20; ++cnt) {
+ if (!(HOST_CONTROL_RESET_REQ
+ & readl(host->addr + HOST_CONTROL)))
+ goto reset_next;
- writel(HOST_CONTROL_RESET_REQ, host->addr + HOST_CONTROL);
+ ndelay(20);
+ }
+ dev_dbg(&host->chip->pdev->dev, "reset_req timeout\n");
+ return -EIO;
+
+reset_next:
+ writel(HOST_CONTROL_RESET | HOST_CONTROL_CLOCK_EN
+ | readl(host->addr + HOST_CONTROL),
+ host->addr + HOST_CONTROL);
+ mmiowb();
+
+ for (cnt = 0; cnt < 20; ++cnt) {
+ if (!(HOST_CONTROL_RESET
+ & readl(host->addr + HOST_CONTROL)))
+ goto reset_ok;
- while (HOST_CONTROL_RESET_REQ
- & (host_ctl = readl(host->addr + HOST_CONTROL))) {
ndelay(20);
- dev_dbg(&host->chip->pdev->dev, "reset %08x\n", host_ctl);
}
+ dev_dbg(&host->chip->pdev->dev, "reset timeout\n");
+ return -EIO;
- writel(HOST_CONTROL_RESET, host->addr + HOST_CONTROL);
+reset_ok:
mmiowb();
writel(INT_STATUS_ALL, host->addr + INT_SIGNAL_ENABLE);
writel(INT_STATUS_ALL, host->addr + INT_STATUS_ENABLE);
+ return 0;
}
-static void jmb38x_ms_set_param(struct memstick_host *msh,
- enum memstick_param param,
- int value)
+static int jmb38x_ms_set_param(struct memstick_host *msh,
+ enum memstick_param param,
+ int value)
{
struct jmb38x_ms_host *host = memstick_priv(msh);
unsigned int host_ctl = readl(host->addr + HOST_CONTROL);
unsigned int clock_ctl = CLOCK_CONTROL_40MHZ, clock_delay = 0;
+ int rc = 0;
switch (param) {
case MEMSTICK_POWER:
if (value == MEMSTICK_POWER_ON) {
- jmb38x_ms_reset(host);
+ rc = jmb38x_ms_reset(host);
+ if (rc)
+ return rc;
+
+ host_ctl = 7;
+ host_ctl |= HOST_CONTROL_POWER_EN
+ | HOST_CONTROL_CLOCK_EN;
+ writel(host_ctl, host->addr + HOST_CONTROL);
writel(host->id ? PAD_PU_PD_ON_MS_SOCK1
: PAD_PU_PD_ON_MS_SOCK0,
@@ -647,11 +690,7 @@ static void jmb38x_ms_set_param(struct memstick_host *msh,
writel(PAD_OUTPUT_ENABLE_MS,
host->addr + PAD_OUTPUT_ENABLE);
- host_ctl = 7;
- host_ctl |= HOST_CONTROL_POWER_EN
- | HOST_CONTROL_CLOCK_EN;
- writel(host_ctl, host->addr + HOST_CONTROL);
-
+ msleep(10);
dev_dbg(&host->chip->pdev->dev, "power on\n");
} else if (value == MEMSTICK_POWER_OFF) {
host_ctl &= ~(HOST_CONTROL_POWER_EN
@@ -660,7 +699,8 @@ static void jmb38x_ms_set_param(struct memstick_host *msh,
writel(0, host->addr + PAD_OUTPUT_ENABLE);
writel(PAD_PU_PD_OFF, host->addr + PAD_PU_PD);
dev_dbg(&host->chip->pdev->dev, "power off\n");
- }
+ } else
+ return -EINVAL;
break;
case MEMSTICK_INTERFACE:
host_ctl &= ~(3 << HOST_CONTROL_IF_SHIFT);
@@ -686,12 +726,14 @@ static void jmb38x_ms_set_param(struct memstick_host *msh,
host_ctl &= ~HOST_CONTROL_REI;
clock_ctl = CLOCK_CONTROL_60MHZ;
clock_delay = 0;
- }
+ } else
+ return -EINVAL;
writel(host_ctl, host->addr + HOST_CONTROL);
writel(clock_ctl, host->addr + CLOCK_CONTROL);
writel(clock_delay, host->addr + CLOCK_DELAY);
break;
};
+ return 0;
}
#ifdef CONFIG_PM
@@ -785,7 +827,9 @@ static struct memstick_host *jmb38x_ms_alloc_host(struct jmb38x_ms *jm, int cnt)
host->id);
host->irq = jm->pdev->irq;
host->timeout_jiffies = msecs_to_jiffies(1000);
- msh->request = jmb38x_ms_request;
+
+ tasklet_init(&host->notify, jmb38x_ms_req_tasklet, (unsigned long)msh);
+ msh->request = jmb38x_ms_submit_req;
msh->set_param = jmb38x_ms_set_param;
msh->caps = MEMSTICK_CAP_PAR4 | MEMSTICK_CAP_PAR8;
@@ -897,6 +941,8 @@ static void jmb38x_ms_remove(struct pci_dev *dev)
host = memstick_priv(jm->hosts[cnt]);
+ jm->hosts[cnt]->request = jmb38x_ms_dummy_submit;
+ tasklet_kill(&host->notify);
writel(0, host->addr + INT_SIGNAL_ENABLE);
writel(0, host->addr + INT_STATUS_ENABLE);
mmiowb();
diff --git a/drivers/memstick/host/tifm_ms.c b/drivers/memstick/host/tifm_ms.c
index 8577de4ebb0e..d32d6ad8f3fc 100644
--- a/drivers/memstick/host/tifm_ms.c
+++ b/drivers/memstick/host/tifm_ms.c
@@ -71,6 +71,7 @@ struct tifm_ms {
struct tifm_dev *dev;
struct timer_list timer;
struct memstick_request *req;
+ struct tasklet_struct notify;
unsigned int mode_mask;
unsigned int block_pos;
unsigned long timeout_jiffies;
@@ -455,49 +456,51 @@ static void tifm_ms_card_event(struct tifm_dev *sock)
return;
}
-static void tifm_ms_request(struct memstick_host *msh)
+static void tifm_ms_req_tasklet(unsigned long data)
{
+ struct memstick_host *msh = (struct memstick_host *)data;
struct tifm_ms *host = memstick_priv(msh);
struct tifm_dev *sock = host->dev;
unsigned long flags;
int rc;
spin_lock_irqsave(&sock->lock, flags);
- if (host->req) {
- printk(KERN_ERR "%s : unfinished request detected\n",
- sock->dev.bus_id);
- spin_unlock_irqrestore(&sock->lock, flags);
- tifm_eject(host->dev);
- return;
- }
+ if (!host->req) {
+ if (host->eject) {
+ do {
+ rc = memstick_next_req(msh, &host->req);
+ if (!rc)
+ host->req->error = -ETIME;
+ } while (!rc);
+ spin_unlock_irqrestore(&sock->lock, flags);
+ return;
+ }
- if (host->eject) {
do {
rc = memstick_next_req(msh, &host->req);
- if (!rc)
- host->req->error = -ETIME;
- } while (!rc);
- spin_unlock_irqrestore(&sock->lock, flags);
- return;
+ } while (!rc && tifm_ms_issue_cmd(host));
}
-
- do {
- rc = memstick_next_req(msh, &host->req);
- } while (!rc && tifm_ms_issue_cmd(host));
-
spin_unlock_irqrestore(&sock->lock, flags);
+}
+
+static void tifm_ms_dummy_submit(struct memstick_host *msh)
+{
return;
}
-static void tifm_ms_set_param(struct memstick_host *msh,
- enum memstick_param param,
- int value)
+static void tifm_ms_submit_req(struct memstick_host *msh)
{
struct tifm_ms *host = memstick_priv(msh);
- struct tifm_dev *sock = host->dev;
- unsigned long flags;
- spin_lock_irqsave(&sock->lock, flags);
+ tasklet_schedule(&host->notify);
+}
+
+static int tifm_ms_set_param(struct memstick_host *msh,
+ enum memstick_param param,
+ int value)
+{
+ struct tifm_ms *host = memstick_priv(msh);
+ struct tifm_dev *sock = host->dev;
switch (param) {
case MEMSTICK_POWER:
@@ -512,7 +515,8 @@ static void tifm_ms_set_param(struct memstick_host *msh,
writel(TIFM_MS_SYS_FCLR | TIFM_MS_SYS_INTCLR,
sock->addr + SOCK_MS_SYSTEM);
writel(0xffffffff, sock->addr + SOCK_MS_STATUS);
- }
+ } else
+ return -EINVAL;
break;
case MEMSTICK_INTERFACE:
if (value == MEMSTICK_SERIAL) {
@@ -525,11 +529,12 @@ static void tifm_ms_set_param(struct memstick_host *msh,
writel(TIFM_CTRL_FAST_CLK
| readl(sock->addr + SOCK_CONTROL),
sock->addr + SOCK_CONTROL);
- }
+ } else
+ return -EINVAL;
break;
};
- spin_unlock_irqrestore(&sock->lock, flags);
+ return 0;
}
static void tifm_ms_abort(unsigned long data)
@@ -570,8 +575,9 @@ static int tifm_ms_probe(struct tifm_dev *sock)
host->timeout_jiffies = msecs_to_jiffies(1000);
setup_timer(&host->timer, tifm_ms_abort, (unsigned long)host);
+ tasklet_init(&host->notify, tifm_ms_req_tasklet, (unsigned long)msh);
- msh->request = tifm_ms_request;
+ msh->request = tifm_ms_submit_req;
msh->set_param = tifm_ms_set_param;
sock->card_event = tifm_ms_card_event;
sock->data_event = tifm_ms_data_event;
@@ -593,6 +599,8 @@ static void tifm_ms_remove(struct tifm_dev *sock)
int rc = 0;
unsigned long flags;
+ msh->request = tifm_ms_dummy_submit;
+ tasklet_kill(&host->notify);
spin_lock_irqsave(&sock->lock, flags);
host->eject = 1;
if (host->req) {
diff --git a/drivers/message/fusion/lsi/mpi_history.txt b/drivers/message/fusion/lsi/mpi_history.txt
index 241592ab13ad..3f15fcfe4a2e 100644
--- a/drivers/message/fusion/lsi/mpi_history.txt
+++ b/drivers/message/fusion/lsi/mpi_history.txt
@@ -127,7 +127,7 @@ mpi_ioc.h
* 08-08-01 01.02.01 Original release for v1.2 work.
* New format for FWVersion and ProductId in
* MSG_IOC_FACTS_REPLY and MPI_FW_HEADER.
- * 08-31-01 01.02.02 Addded event MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE and
+ * 08-31-01 01.02.02 Added event MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE and
* related structure and defines.
* Added event MPI_EVENT_ON_BUS_TIMER_EXPIRED.
* Added MPI_IOCINIT_FLAGS_DISCARD_FW_IMAGE.
@@ -187,7 +187,7 @@ mpi_ioc.h
* 10-11-06 01.05.12 Added MPI_IOCFACTS_EXCEPT_METADATA_UNSUPPORTED.
* Added MaxInitiators field to PortFacts reply.
* Added SAS Device Status Change ReasonCode for
- * asynchronous notificaiton.
+ * asynchronous notification.
* Added MPI_EVENT_SAS_EXPANDER_STATUS_CHANGE and event
* data structure.
* Added new ImageType values for FWDownload and FWUpload
@@ -213,7 +213,7 @@ mpi_cnfg.h
* Added _RESPONSE_ID_MASK definition to SCSI_PORT_1
* page and updated the page version.
* Added Information field and _INFO_PARAMS_NEGOTIATED
- * definitionto SCSI_DEVICE_0 page.
+ * definition to SCSI_DEVICE_0 page.
* 06-22-00 01.00.03 Removed batch controls from LAN_0 page and updated the
* page version.
* Added BucketsRemaining to LAN_1 page, redefined the
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 34402c47027e..d6a0074b9dc3 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -273,12 +273,12 @@ mpt_fault_reset_work(struct work_struct *work)
ioc_raw_state = mpt_GetIocState(ioc, 0);
if ((ioc_raw_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_FAULT) {
printk(MYIOC_s_WARN_FMT "IOC is in FAULT state (%04xh)!!!\n",
- ioc->name, ioc_raw_state & MPI_DOORBELL_DATA_MASK);
+ ioc->name, ioc_raw_state & MPI_DOORBELL_DATA_MASK);
printk(MYIOC_s_WARN_FMT "Issuing HardReset from %s!!\n",
- ioc->name, __FUNCTION__);
+ ioc->name, __func__);
rc = mpt_HardResetHandler(ioc, CAN_SLEEP);
printk(MYIOC_s_WARN_FMT "%s: HardReset: %s\n", ioc->name,
- __FUNCTION__, (rc == 0) ? "success" : "failed");
+ __func__, (rc == 0) ? "success" : "failed");
ioc_raw_state = mpt_GetIocState(ioc, 0);
if ((ioc_raw_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_FAULT)
printk(MYIOC_s_WARN_FMT "IOC is in FAULT state after "
@@ -356,7 +356,7 @@ mpt_turbo_reply(MPT_ADAPTER *ioc, u32 pa)
if (!cb_idx || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS ||
MptCallbacks[cb_idx] == NULL) {
printk(MYIOC_s_WARN_FMT "%s: Invalid cb_idx (%d)!\n",
- __FUNCTION__, ioc->name, cb_idx);
+ __func__, ioc->name, cb_idx);
goto out;
}
@@ -420,7 +420,7 @@ mpt_reply(MPT_ADAPTER *ioc, u32 pa)
if (!cb_idx || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS ||
MptCallbacks[cb_idx] == NULL) {
printk(MYIOC_s_WARN_FMT "%s: Invalid cb_idx (%d)!\n",
- __FUNCTION__, ioc->name, cb_idx);
+ __func__, ioc->name, cb_idx);
freeme = 0;
goto out;
}
@@ -2434,7 +2434,7 @@ mpt_adapter_disable(MPT_ADAPTER *ioc)
if (ioc->cached_fw != NULL) {
ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: Pushing FW onto "
- "adapter\n", __FUNCTION__, ioc->name));
+ "adapter\n", __func__, ioc->name));
if ((ret = mpt_downloadboot(ioc, (MpiFwHeader_t *)
ioc->cached_fw, CAN_SLEEP)) < 0) {
printk(MYIOC_s_WARN_FMT
@@ -3693,7 +3693,7 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag)
if (ioc->pcidev->device == MPI_MANUFACTPAGE_DEVID_SAS1078) {
drsprintk(ioc, printk(MYIOC_s_WARN_FMT "%s: Doorbell=%p; 1078 reset "
- "address=%p\n", ioc->name, __FUNCTION__,
+ "address=%p\n", ioc->name, __func__,
&ioc->chip->Doorbell, &ioc->chip->Reset_1078));
CHIPREG_WRITE32(&ioc->chip->Reset_1078, 0x07);
if (sleepFlag == CAN_SLEEP)
@@ -4742,12 +4742,12 @@ mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode)
break;
}
- printk("%s: persist_opcode=%x\n",__FUNCTION__, persist_opcode);
+ printk("%s: persist_opcode=%x\n",__func__, persist_opcode);
/* Get a MF for this command.
*/
if ((mf = mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) {
- printk("%s: no msg frames!\n",__FUNCTION__);
+ printk("%s: no msg frames!\n",__func__);
return -1;
}
@@ -4771,13 +4771,13 @@ mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode)
(SasIoUnitControlReply_t *)ioc->persist_reply_frame;
if (le16_to_cpu(sasIoUnitCntrReply->IOCStatus) != MPI_IOCSTATUS_SUCCESS) {
printk("%s: IOCStatus=0x%X IOCLogInfo=0x%X\n",
- __FUNCTION__,
+ __func__,
sasIoUnitCntrReply->IOCStatus,
sasIoUnitCntrReply->IOCLogInfo);
return -1;
}
- printk("%s: success\n",__FUNCTION__);
+ printk("%s: success\n",__func__);
return 0;
}
@@ -5784,7 +5784,7 @@ SendEventAck(MPT_ADAPTER *ioc, EventNotificationReply_t *evnp)
if ((pAck = (EventAck_t *) mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) {
dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, no msg frames!!\n",
- ioc->name,__FUNCTION__));
+ ioc->name,__func__));
return -1;
}
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index a5920423e2b2..f5233f3d9eff 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -505,7 +505,7 @@ mptctl_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
event = le32_to_cpu(pEvReply->Event) & 0xFF;
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s() called\n",
- ioc->name, __FUNCTION__));
+ ioc->name, __func__));
if(async_queue == NULL)
return 1;
@@ -2482,7 +2482,7 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
*/
if ((mf = mpt_get_msg_frame(mptctl_id, ioc)) == NULL) {
dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, no msg frames!!\n",
- ioc->name,__FUNCTION__));
+ ioc->name,__func__));
goto out;
}
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index b36cae9ec6db..c3c24fdf9fb6 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -231,28 +231,28 @@ static int
mptfc_abort(struct scsi_cmnd *SCpnt)
{
return
- mptfc_block_error_handler(SCpnt, mptscsih_abort, __FUNCTION__);
+ mptfc_block_error_handler(SCpnt, mptscsih_abort, __func__);
}
static int
mptfc_dev_reset(struct scsi_cmnd *SCpnt)
{
return
- mptfc_block_error_handler(SCpnt, mptscsih_dev_reset, __FUNCTION__);
+ mptfc_block_error_handler(SCpnt, mptscsih_dev_reset, __func__);
}
static int
mptfc_bus_reset(struct scsi_cmnd *SCpnt)
{
return
- mptfc_block_error_handler(SCpnt, mptscsih_bus_reset, __FUNCTION__);
+ mptfc_block_error_handler(SCpnt, mptscsih_bus_reset, __func__);
}
static int
mptfc_host_reset(struct scsi_cmnd *SCpnt)
{
return
- mptfc_block_error_handler(SCpnt, mptscsih_host_reset, __FUNCTION__);
+ mptfc_block_error_handler(SCpnt, mptscsih_host_reset, __func__);
}
static void
diff --git a/drivers/message/fusion/mptlan.c b/drivers/message/fusion/mptlan.c
index d709d92b7b30..a1abf95cf751 100644
--- a/drivers/message/fusion/mptlan.c
+++ b/drivers/message/fusion/mptlan.c
@@ -610,7 +610,7 @@ mpt_lan_send_turbo(struct net_device *dev, u32 tmsg)
dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
IOC_AND_NETDEV_NAMES_s_s(dev),
- __FUNCTION__, sent));
+ __func__, sent));
priv->SendCtl[ctx].skb = NULL;
pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
@@ -676,7 +676,7 @@ mpt_lan_send_reply(struct net_device *dev, LANSendReply_t *pSendRep)
dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
IOC_AND_NETDEV_NAMES_s_s(dev),
- __FUNCTION__, sent));
+ __func__, sent));
priv->SendCtl[ctx].skb = NULL;
pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
@@ -715,7 +715,7 @@ mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
u16 cur_naa = 0x1000;
dioprintk((KERN_INFO MYNAM ": %s called, skb_addr = %p\n",
- __FUNCTION__, skb));
+ __func__, skb));
spin_lock_irqsave(&priv->txfidx_lock, flags);
if (priv->mpt_txfidx_tail < 0) {
@@ -723,7 +723,7 @@ mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
spin_unlock_irqrestore(&priv->txfidx_lock, flags);
printk (KERN_ERR "%s: no tx context available: %u\n",
- __FUNCTION__, priv->mpt_txfidx_tail);
+ __func__, priv->mpt_txfidx_tail);
return 1;
}
@@ -733,7 +733,7 @@ mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
spin_unlock_irqrestore(&priv->txfidx_lock, flags);
printk (KERN_ERR "%s: Unable to alloc request frame\n",
- __FUNCTION__);
+ __func__);
return 1;
}
@@ -1208,7 +1208,7 @@ mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, Start_buckets = %u, buckets_out = %u\n",
IOC_AND_NETDEV_NAMES_s_s(dev),
- __FUNCTION__, buckets, curr));
+ __func__, buckets, curr));
max = (mpt_dev->req_sz - MPT_LAN_RECEIVE_POST_REQUEST_SIZE) /
(MPT_LAN_TRANSACTION32_SIZE + sizeof(SGESimple64_t));
@@ -1217,9 +1217,9 @@ mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
mf = mpt_get_msg_frame(LanCtx, mpt_dev);
if (mf == NULL) {
printk (KERN_ERR "%s: Unable to alloc request frame\n",
- __FUNCTION__);
+ __func__);
dioprintk((KERN_ERR "%s: %u buckets remaining\n",
- __FUNCTION__, buckets));
+ __func__, buckets));
goto out;
}
pRecvReq = (LANReceivePostRequest_t *) mf;
@@ -1244,7 +1244,7 @@ mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
spin_lock_irqsave(&priv->rxfidx_lock, flags);
if (priv->mpt_rxfidx_tail < 0) {
printk (KERN_ERR "%s: Can't alloc context\n",
- __FUNCTION__);
+ __func__);
spin_unlock_irqrestore(&priv->rxfidx_lock,
flags);
break;
@@ -1267,7 +1267,7 @@ mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
if (skb == NULL) {
printk (KERN_WARNING
MYNAM "/%s: Can't alloc skb\n",
- __FUNCTION__);
+ __func__);
priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
break;
@@ -1305,7 +1305,7 @@ mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
if (pSimple == NULL) {
/**/ printk (KERN_WARNING MYNAM "/%s: No buckets posted\n",
-/**/ __FUNCTION__);
+/**/ __func__);
mpt_free_msg_frame(mpt_dev, mf);
goto out;
}
@@ -1329,9 +1329,9 @@ mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
out:
dioprintk((KERN_INFO MYNAM "/%s: End_buckets = %u, priv->buckets_out = %u\n",
- __FUNCTION__, buckets, atomic_read(&priv->buckets_out)));
+ __func__, buckets, atomic_read(&priv->buckets_out)));
dioprintk((KERN_INFO MYNAM "/%s: Posted %u buckets and received %u back\n",
- __FUNCTION__, priv->total_posted, priv->total_received));
+ __func__, priv->total_posted, priv->total_received));
clear_bit(0, &priv->post_buckets_active);
}
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index b1147aa7afde..12b732512e57 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -300,7 +300,7 @@ mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_detai
phy_info = port_info->phy_info;
dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: [%p]: num_phys=%02d "
- "bitmask=0x%016llX\n", ioc->name, __FUNCTION__, port_details,
+ "bitmask=0x%016llX\n", ioc->name, __func__, port_details,
port_details->num_phys, (unsigned long long)
port_details->phy_bitmask));
@@ -411,7 +411,7 @@ mptsas_setup_wide_ports(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
*/
dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"%s: [%p]: deleting phy = %d\n",
- ioc->name, __FUNCTION__, port_details, i));
+ ioc->name, __func__, port_details, i));
port_details->num_phys--;
port_details->phy_bitmask &= ~ (1 << phy_info->phy_id);
memset(&phy_info->attached, 0, sizeof(struct mptsas_devinfo));
@@ -497,7 +497,7 @@ mptsas_setup_wide_ports(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
continue;
dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"%s: [%p]: phy_id=%02d num_phys=%02d "
- "bitmask=0x%016llX\n", ioc->name, __FUNCTION__,
+ "bitmask=0x%016llX\n", ioc->name, __func__,
port_details, i, port_details->num_phys,
(unsigned long long)port_details->phy_bitmask));
dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "\t\tport = %p rphy=%p\n",
@@ -553,7 +553,7 @@ mptsas_target_reset(MPT_ADAPTER *ioc, u8 channel, u8 id)
if ((mf = mpt_get_msg_frame(ioc->TaskCtx, ioc)) == NULL) {
dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, no msg frames @%d!!\n",
- ioc->name,__FUNCTION__, __LINE__));
+ ioc->name,__func__, __LINE__));
return 0;
}
@@ -606,7 +606,7 @@ mptsas_target_reset_queue(MPT_ADAPTER *ioc,
GFP_ATOMIC);
if (!target_reset_list) {
dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, failed to allocate mem @%d..!!\n",
- ioc->name,__FUNCTION__, __LINE__));
+ ioc->name,__func__, __LINE__));
return;
}
@@ -673,7 +673,7 @@ mptsas_dev_reset_complete(MPT_ADAPTER *ioc)
ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
if (!ev) {
dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, failed to allocate mem @%d..!!\n",
- ioc->name,__FUNCTION__, __LINE__));
+ ioc->name,__func__, __LINE__));
return;
}
@@ -1183,7 +1183,7 @@ static int mptsas_phy_reset(struct sas_phy *phy, int hard_reset)
reply = (SasIoUnitControlReply_t *)ioc->sas_mgmt.reply;
if (reply->IOCStatus != MPI_IOCSTATUS_SUCCESS) {
printk(MYIOC_s_INFO_FMT "%s: IOCStatus=0x%X IOCLogInfo=0x%X\n",
- ioc->name, __FUNCTION__, reply->IOCStatus, reply->IOCLogInfo);
+ ioc->name, __func__, reply->IOCStatus, reply->IOCLogInfo);
error = -ENXIO;
goto out_unlock;
}
@@ -1270,14 +1270,14 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
if (!rsp) {
printk(MYIOC_s_ERR_FMT "%s: the smp response space is missing\n",
- ioc->name, __FUNCTION__);
+ ioc->name, __func__);
return -EINVAL;
}
/* do we need to support multiple segments? */
if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) {
printk(MYIOC_s_ERR_FMT "%s: multiple segments req %u %u, rsp %u %u\n",
- ioc->name, __FUNCTION__, req->bio->bi_vcnt, req->data_len,
+ ioc->name, __func__, req->bio->bi_vcnt, req->data_len,
rsp->bio->bi_vcnt, rsp->data_len);
return -EINVAL;
}
@@ -1343,7 +1343,7 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
timeleft = wait_for_completion_timeout(&ioc->sas_mgmt.done, 10 * HZ);
if (!timeleft) {
- printk(MYIOC_s_ERR_FMT "%s: smp timeout!\n", ioc->name, __FUNCTION__);
+ printk(MYIOC_s_ERR_FMT "%s: smp timeout!\n", ioc->name, __func__);
/* On timeout reset the board */
mpt_HardResetHandler(ioc, CAN_SLEEP);
ret = -ETIMEDOUT;
@@ -1361,7 +1361,7 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
rsp->data_len -= smprep->ResponseDataLength;
} else {
printk(MYIOC_s_ERR_FMT "%s: smp passthru reply failed to be returned\n",
- ioc->name, __FUNCTION__);
+ ioc->name, __func__);
ret = -ENXIO;
}
unmap:
@@ -2006,7 +2006,7 @@ static int mptsas_probe_one_phy(struct device *dev,
if (error) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: exit at line=%d\n", ioc->name,
- __FUNCTION__, __LINE__));
+ __func__, __LINE__));
goto out;
}
mptsas_set_port(ioc, phy_info, port);
@@ -2076,7 +2076,7 @@ static int mptsas_probe_one_phy(struct device *dev,
if (!rphy) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: exit at line=%d\n", ioc->name,
- __FUNCTION__, __LINE__));
+ __func__, __LINE__));
goto out;
}
@@ -2085,7 +2085,7 @@ static int mptsas_probe_one_phy(struct device *dev,
if (error) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: exit at line=%d\n", ioc->name,
- __FUNCTION__, __LINE__));
+ __func__, __LINE__));
sas_rphy_free(rphy);
goto out;
}
@@ -2613,7 +2613,7 @@ mptsas_hotplug_work(struct work_struct *work)
(ev->channel << 8) + ev->id)) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: exit at line=%d\n", ioc->name,
- __FUNCTION__, __LINE__));
+ __func__, __LINE__));
break;
}
phy_info = mptsas_find_phyinfo_by_sas_address(
@@ -2633,20 +2633,20 @@ mptsas_hotplug_work(struct work_struct *work)
if (!phy_info){
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: exit at line=%d\n", ioc->name,
- __FUNCTION__, __LINE__));
+ __func__, __LINE__));
break;
}
if (!phy_info->port_details) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: exit at line=%d\n", ioc->name,
- __FUNCTION__, __LINE__));
+ __func__, __LINE__));
break;
}
rphy = mptsas_get_rphy(phy_info);
if (!rphy) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: exit at line=%d\n", ioc->name,
- __FUNCTION__, __LINE__));
+ __func__, __LINE__));
break;
}
@@ -2654,7 +2654,7 @@ mptsas_hotplug_work(struct work_struct *work)
if (!port) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: exit at line=%d\n", ioc->name,
- __FUNCTION__, __LINE__));
+ __func__, __LINE__));
break;
}
@@ -2665,7 +2665,7 @@ mptsas_hotplug_work(struct work_struct *work)
if (!vtarget) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: exit at line=%d\n", ioc->name,
- __FUNCTION__, __LINE__));
+ __func__, __LINE__));
break;
}
@@ -2720,7 +2720,7 @@ mptsas_hotplug_work(struct work_struct *work)
(ev->channel << 8) + ev->id)) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: exit at line=%d\n", ioc->name,
- __FUNCTION__, __LINE__));
+ __func__, __LINE__));
break;
}
@@ -2732,7 +2732,7 @@ mptsas_hotplug_work(struct work_struct *work)
if (!phy_info || !phy_info->port_details) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: exit at line=%d\n", ioc->name,
- __FUNCTION__, __LINE__));
+ __func__, __LINE__));
break;
}
@@ -2744,7 +2744,7 @@ mptsas_hotplug_work(struct work_struct *work)
if (!vtarget) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: exit at line=%d\n", ioc->name,
- __FUNCTION__, __LINE__));
+ __func__, __LINE__));
break;
}
/*
@@ -2767,7 +2767,7 @@ mptsas_hotplug_work(struct work_struct *work)
if (mptsas_get_rphy(phy_info)) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: exit at line=%d\n", ioc->name,
- __FUNCTION__, __LINE__));
+ __func__, __LINE__));
if (ev->channel) printk("%d\n", __LINE__);
break;
}
@@ -2776,7 +2776,7 @@ mptsas_hotplug_work(struct work_struct *work)
if (!port) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: exit at line=%d\n", ioc->name,
- __FUNCTION__, __LINE__));
+ __func__, __LINE__));
break;
}
memcpy(&phy_info->attached, &sas_device,
@@ -2801,7 +2801,7 @@ mptsas_hotplug_work(struct work_struct *work)
if (!rphy) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: exit at line=%d\n", ioc->name,
- __FUNCTION__, __LINE__));
+ __func__, __LINE__));
break; /* non-fatal: an rphy can be added later */
}
@@ -2809,7 +2809,7 @@ mptsas_hotplug_work(struct work_struct *work)
if (sas_rphy_add(rphy)) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: exit at line=%d\n", ioc->name,
- __FUNCTION__, __LINE__));
+ __func__, __LINE__));
sas_rphy_free(rphy);
break;
}
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index d142b6b4b976..9f9354fd3516 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -461,7 +461,7 @@ mptscsih_issue_sep_command(MPT_ADAPTER *ioc, VirtTarget *vtarget,
if ((mf = mpt_get_msg_frame(ioc->InternalCtx, ioc)) == NULL) {
dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s: no msg frames!!\n",
- ioc->name,__FUNCTION__));
+ ioc->name,__func__));
return;
}
@@ -2187,7 +2187,7 @@ mptscsih_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *m
(ioc->debug_level & MPT_DEBUG_TM ))
printk("%s: ha=%d [%d:%d:0] task_type=0x%02X "
"iocstatus=0x%04X\n\tloginfo=0x%08X response_code=0x%02X "
- "term_cmnds=%d\n", __FUNCTION__, ioc->id, pScsiTmReply->Bus,
+ "term_cmnds=%d\n", __func__, ioc->id, pScsiTmReply->Bus,
pScsiTmReply->TargetID, pScsiTmReq->TaskType,
le16_to_cpu(pScsiTmReply->IOCStatus),
le32_to_cpu(pScsiTmReply->IOCLogInfo),pScsiTmReply->ResponseCode,
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c
index 3b870e7fb3e1..c6408a62d95e 100644
--- a/drivers/mfd/asic3.c
+++ b/drivers/mfd/asic3.c
@@ -256,28 +256,28 @@ static int asic3_gpio_irq_type(unsigned int irq, unsigned int type)
bank + ASIC3_GPIO_TRIGGER_TYPE);
asic->irq_bothedge[(irq - asic->irq_base) >> 4] &= ~bit;
- if (type == IRQT_RISING) {
+ if (type == IRQ_TYPE_EDGE_RISING) {
trigger |= bit;
edge |= bit;
- } else if (type == IRQT_FALLING) {
+ } else if (type == IRQ_TYPE_EDGE_FALLING) {
trigger |= bit;
edge &= ~bit;
- } else if (type == IRQT_BOTHEDGE) {
+ } else if (type == IRQ_TYPE_EDGE_BOTH) {
trigger |= bit;
if (asic3_gpio_get(&asic->gpio, irq - asic->irq_base))
edge &= ~bit;
else
edge |= bit;
asic->irq_bothedge[(irq - asic->irq_base) >> 4] |= bit;
- } else if (type == IRQT_LOW) {
+ } else if (type == IRQ_TYPE_LEVEL_LOW) {
trigger &= ~bit;
level &= ~bit;
- } else if (type == IRQT_HIGH) {
+ } else if (type == IRQ_TYPE_LEVEL_HIGH) {
trigger &= ~bit;
level |= bit;
} else {
/*
- * if type == IRQT_NOEDGE, we should mask interrupts, but
+ * if type == IRQ_TYPE_NONE, we should mask interrupts, but
* be careful to not unmask them if mask was also called.
* Probably need internal state for mask.
*/
@@ -314,10 +314,12 @@ static int __init asic3_irq_probe(struct platform_device *pdev)
unsigned long clksel = 0;
unsigned int irq, irq_base;
int map_size;
+ int ret;
- asic->irq_nr = platform_get_irq(pdev, 0);
- if (asic->irq_nr < 0)
- return asic->irq_nr;
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ return ret;
+ asic->irq_nr = ret;
/* turn on clock to IRQ controller */
clksel |= CLOCK_SEL_CX;
@@ -341,7 +343,7 @@ static int __init asic3_irq_probe(struct platform_device *pdev)
ASIC3_INTMASK_GINTMASK);
set_irq_chained_handler(asic->irq_nr, asic3_irq_demux);
- set_irq_type(asic->irq_nr, IRQT_RISING);
+ set_irq_type(asic->irq_nr, IRQ_TYPE_EDGE_RISING);
set_irq_data(asic->irq_nr, asic);
return 0;
diff --git a/drivers/mfd/tc6393xb.c b/drivers/mfd/tc6393xb.c
index 2d87501b6fd4..94e55e8e7ce6 100644
--- a/drivers/mfd/tc6393xb.c
+++ b/drivers/mfd/tc6393xb.c
@@ -324,7 +324,7 @@ static void tc6393xb_attach_irq(struct platform_device *dev)
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
}
- set_irq_type(tc6393xb->irq, IRQT_FALLING);
+ set_irq_type(tc6393xb->irq, IRQ_TYPE_EDGE_FALLING);
set_irq_data(tc6393xb->irq, tc6393xb);
set_irq_chained_handler(tc6393xb->irq, tc6393xb_irq);
}
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 321eb9134635..f5ade1904aad 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -360,7 +360,7 @@ config THINKPAD_ACPI_VIDEO
If you are not sure, say Y here.
config THINKPAD_ACPI_HOTKEY_POLL
- bool "Suport NVRAM polling for hot keys"
+ bool "Support NVRAM polling for hot keys"
depends on THINKPAD_ACPI
default y
---help---
diff --git a/drivers/misc/atmel-ssc.c b/drivers/misc/atmel-ssc.c
index e171650766ce..bf5e4d065436 100644
--- a/drivers/misc/atmel-ssc.c
+++ b/drivers/misc/atmel-ssc.c
@@ -13,7 +13,6 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
-#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/atmel-ssc.h>
diff --git a/drivers/mmc/core/Makefile b/drivers/mmc/core/Makefile
index 19a1a254a0c5..889e5f898f6f 100644
--- a/drivers/mmc/core/Makefile
+++ b/drivers/mmc/core/Makefile
@@ -12,3 +12,4 @@ mmc_core-y := core.o bus.o host.o \
sdio.o sdio_ops.o sdio_bus.o \
sdio_cis.o sdio_io.o sdio_irq.o
+mmc_core-$(CONFIG_DEBUG_FS) += debugfs.o
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index fd95b18e988b..0d9b2d6f9ebf 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -252,6 +252,10 @@ int mmc_add_card(struct mmc_card *card)
if (ret)
return ret;
+#ifdef CONFIG_DEBUG_FS
+ mmc_add_card_debugfs(card);
+#endif
+
mmc_card_set_present(card);
return 0;
@@ -263,6 +267,10 @@ int mmc_add_card(struct mmc_card *card)
*/
void mmc_remove_card(struct mmc_card *card)
{
+#ifdef CONFIG_DEBUG_FS
+ mmc_remove_card_debugfs(card);
+#endif
+
if (mmc_card_present(card)) {
if (mmc_host_is_spi(card->host)) {
printk(KERN_INFO "%s: SPI card removed\n",
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index cdb332b7dedc..c819effa1032 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -52,5 +52,12 @@ int mmc_attach_sdio(struct mmc_host *host, u32 ocr);
extern int use_spi_crc;
+/* Debugfs information for hosts and cards */
+void mmc_add_host_debugfs(struct mmc_host *host);
+void mmc_remove_host_debugfs(struct mmc_host *host);
+
+void mmc_add_card_debugfs(struct mmc_card *card);
+void mmc_remove_card_debugfs(struct mmc_card *card);
+
#endif
diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c
new file mode 100644
index 000000000000..1237bb4c722b
--- /dev/null
+++ b/drivers/mmc/core/debugfs.c
@@ -0,0 +1,225 @@
+/*
+ * Debugfs support for hosts and cards
+ *
+ * Copyright (C) 2008 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/debugfs.h>
+#include <linux/fs.h>
+#include <linux/seq_file.h>
+#include <linux/stat.h>
+
+#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
+
+#include "core.h"
+#include "mmc_ops.h"
+
+/* The debugfs functions are optimized away when CONFIG_DEBUG_FS isn't set. */
+static int mmc_ios_show(struct seq_file *s, void *data)
+{
+ static const char *vdd_str[] = {
+ [8] = "2.0",
+ [9] = "2.1",
+ [10] = "2.2",
+ [11] = "2.3",
+ [12] = "2.4",
+ [13] = "2.5",
+ [14] = "2.6",
+ [15] = "2.7",
+ [16] = "2.8",
+ [17] = "2.9",
+ [18] = "3.0",
+ [19] = "3.1",
+ [20] = "3.2",
+ [21] = "3.3",
+ [22] = "3.4",
+ [23] = "3.5",
+ [24] = "3.6",
+ };
+ struct mmc_host *host = s->private;
+ struct mmc_ios *ios = &host->ios;
+ const char *str;
+
+ seq_printf(s, "clock:\t\t%u Hz\n", ios->clock);
+ seq_printf(s, "vdd:\t\t%u ", ios->vdd);
+ if ((1 << ios->vdd) & MMC_VDD_165_195)
+ seq_printf(s, "(1.65 - 1.95 V)\n");
+ else if (ios->vdd < (ARRAY_SIZE(vdd_str) - 1)
+ && vdd_str[ios->vdd] && vdd_str[ios->vdd + 1])
+ seq_printf(s, "(%s ~ %s V)\n", vdd_str[ios->vdd],
+ vdd_str[ios->vdd + 1]);
+ else
+ seq_printf(s, "(invalid)\n");
+
+ switch (ios->bus_mode) {
+ case MMC_BUSMODE_OPENDRAIN:
+ str = "open drain";
+ break;
+ case MMC_BUSMODE_PUSHPULL:
+ str = "push-pull";
+ break;
+ default:
+ str = "invalid";
+ break;
+ }
+ seq_printf(s, "bus mode:\t%u (%s)\n", ios->bus_mode, str);
+
+ switch (ios->chip_select) {
+ case MMC_CS_DONTCARE:
+ str = "don't care";
+ break;
+ case MMC_CS_HIGH:
+ str = "active high";
+ break;
+ case MMC_CS_LOW:
+ str = "active low";
+ break;
+ default:
+ str = "invalid";
+ break;
+ }
+ seq_printf(s, "chip select:\t%u (%s)\n", ios->chip_select, str);
+
+ switch (ios->power_mode) {
+ case MMC_POWER_OFF:
+ str = "off";
+ break;
+ case MMC_POWER_UP:
+ str = "up";
+ break;
+ case MMC_POWER_ON:
+ str = "on";
+ break;
+ default:
+ str = "invalid";
+ break;
+ }
+ seq_printf(s, "power mode:\t%u (%s)\n", ios->power_mode, str);
+ seq_printf(s, "bus width:\t%u (%u bits)\n",
+ ios->bus_width, 1 << ios->bus_width);
+
+ switch (ios->timing) {
+ case MMC_TIMING_LEGACY:
+ str = "legacy";
+ break;
+ case MMC_TIMING_MMC_HS:
+ str = "mmc high-speed";
+ break;
+ case MMC_TIMING_SD_HS:
+ str = "sd high-speed";
+ break;
+ default:
+ str = "invalid";
+ break;
+ }
+ seq_printf(s, "timing spec:\t%u (%s)\n", ios->timing, str);
+
+ return 0;
+}
+
+static int mmc_ios_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mmc_ios_show, inode->i_private);
+}
+
+static const struct file_operations mmc_ios_fops = {
+ .open = mmc_ios_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void mmc_add_host_debugfs(struct mmc_host *host)
+{
+ struct dentry *root;
+
+ root = debugfs_create_dir(mmc_hostname(host), NULL);
+ if (IS_ERR(root))
+ /* Don't complain -- debugfs just isn't enabled */
+ return;
+ if (!root)
+ /* Complain -- debugfs is enabled, but it failed to
+ * create the directory. */
+ goto err_root;
+
+ host->debugfs_root = root;
+
+ if (!debugfs_create_file("ios", S_IRUSR, root, host, &mmc_ios_fops))
+ goto err_ios;
+
+ return;
+
+err_ios:
+ debugfs_remove_recursive(root);
+ host->debugfs_root = NULL;
+err_root:
+ dev_err(&host->class_dev, "failed to initialize debugfs\n");
+}
+
+void mmc_remove_host_debugfs(struct mmc_host *host)
+{
+ debugfs_remove_recursive(host->debugfs_root);
+}
+
+static int mmc_dbg_card_status_get(void *data, u64 *val)
+{
+ struct mmc_card *card = data;
+ u32 status;
+ int ret;
+
+ mmc_claim_host(card->host);
+
+ ret = mmc_send_status(data, &status);
+ if (!ret)
+ *val = status;
+
+ mmc_release_host(card->host);
+
+ return ret;
+}
+DEFINE_SIMPLE_ATTRIBUTE(mmc_dbg_card_status_fops, mmc_dbg_card_status_get,
+ NULL, "%08llx\n");
+
+void mmc_add_card_debugfs(struct mmc_card *card)
+{
+ struct mmc_host *host = card->host;
+ struct dentry *root;
+
+ if (!host->debugfs_root)
+ return;
+
+ root = debugfs_create_dir(mmc_card_id(card), host->debugfs_root);
+ if (IS_ERR(root))
+ /* Don't complain -- debugfs just isn't enabled */
+ return;
+ if (!root)
+ /* Complain -- debugfs is enabled, but it failed to
+ * create the directory. */
+ goto err;
+
+ card->debugfs_root = root;
+
+ if (!debugfs_create_x32("state", S_IRUSR, root, &card->state))
+ goto err;
+
+ if (mmc_card_mmc(card) || mmc_card_sd(card))
+ if (!debugfs_create_file("status", S_IRUSR, root, card,
+ &mmc_dbg_card_status_fops))
+ goto err;
+
+ return;
+
+err:
+ debugfs_remove_recursive(root);
+ card->debugfs_root = NULL;
+ dev_err(&card->dev, "failed to initialize debugfs\n");
+}
+
+void mmc_remove_card_debugfs(struct mmc_card *card)
+{
+ debugfs_remove_recursive(card->debugfs_root);
+}
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 1d795c5379b5..6da80fd4d974 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -127,6 +127,10 @@ int mmc_add_host(struct mmc_host *host)
if (err)
return err;
+#ifdef CONFIG_DEBUG_FS
+ mmc_add_host_debugfs(host);
+#endif
+
mmc_start_host(host);
return 0;
@@ -146,6 +150,10 @@ void mmc_remove_host(struct mmc_host *host)
{
mmc_stop_host(host);
+#ifdef CONFIG_DEBUG_FS
+ mmc_remove_host_debugfs(host);
+#endif
+
device_del(&host->class_dev);
led_trigger_unregister_simple(host->led);
diff --git a/drivers/mmc/host/atmel-mci-regs.h b/drivers/mmc/host/atmel-mci-regs.h
index a9a5657706c6..26bd80e65031 100644
--- a/drivers/mmc/host/atmel-mci-regs.h
+++ b/drivers/mmc/host/atmel-mci-regs.h
@@ -82,6 +82,8 @@
# define MCI_OVRE ( 1 << 30) /* RX Overrun Error */
# define MCI_UNRE ( 1 << 31) /* TX Underrun Error */
+#define MCI_REGS_SIZE 0x100
+
/* Register access macros */
#define mci_readl(port,reg) \
__raw_readl((port)->regs + MCI_##reg)
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index cce873c5a149..992b4beb757c 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -9,13 +9,18 @@
*/
#include <linux/blkdev.h>
#include <linux/clk.h>
+#include <linux/debugfs.h>
#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/scatterlist.h>
+#include <linux/seq_file.h>
+#include <linux/stat.h>
#include <linux/mmc/host.h>
@@ -24,7 +29,6 @@
#include <asm/unaligned.h>
#include <asm/arch/board.h>
-#include <asm/arch/gpio.h>
#include "atmel-mci-regs.h"
@@ -88,6 +92,188 @@ struct atmel_mci {
#define atmci_clear_pending(host, event) \
clear_bit(event, &host->pending_events)
+/*
+ * The debugfs stuff below is mostly optimized away when
+ * CONFIG_DEBUG_FS is not set.
+ */
+static int atmci_req_show(struct seq_file *s, void *v)
+{
+ struct atmel_mci *host = s->private;
+ struct mmc_request *mrq = host->mrq;
+ struct mmc_command *cmd;
+ struct mmc_command *stop;
+ struct mmc_data *data;
+
+ /* Make sure we get a consistent snapshot */
+ spin_lock_irq(&host->mmc->lock);
+
+ if (mrq) {
+ cmd = mrq->cmd;
+ data = mrq->data;
+ stop = mrq->stop;
+
+ if (cmd)
+ seq_printf(s,
+ "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
+ cmd->opcode, cmd->arg, cmd->flags,
+ cmd->resp[0], cmd->resp[1], cmd->resp[2],
+ cmd->resp[2], cmd->error);
+ if (data)
+ seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
+ data->bytes_xfered, data->blocks,
+ data->blksz, data->flags, data->error);
+ if (stop)
+ seq_printf(s,
+ "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
+ stop->opcode, stop->arg, stop->flags,
+ stop->resp[0], stop->resp[1], stop->resp[2],
+ stop->resp[2], stop->error);
+ }
+
+ spin_unlock_irq(&host->mmc->lock);
+
+ return 0;
+}
+
+static int atmci_req_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, atmci_req_show, inode->i_private);
+}
+
+static const struct file_operations atmci_req_fops = {
+ .owner = THIS_MODULE,
+ .open = atmci_req_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void atmci_show_status_reg(struct seq_file *s,
+ const char *regname, u32 value)
+{
+ static const char *sr_bit[] = {
+ [0] = "CMDRDY",
+ [1] = "RXRDY",
+ [2] = "TXRDY",
+ [3] = "BLKE",
+ [4] = "DTIP",
+ [5] = "NOTBUSY",
+ [8] = "SDIOIRQA",
+ [9] = "SDIOIRQB",
+ [16] = "RINDE",
+ [17] = "RDIRE",
+ [18] = "RCRCE",
+ [19] = "RENDE",
+ [20] = "RTOE",
+ [21] = "DCRCE",
+ [22] = "DTOE",
+ [30] = "OVRE",
+ [31] = "UNRE",
+ };
+ unsigned int i;
+
+ seq_printf(s, "%s:\t0x%08x", regname, value);
+ for (i = 0; i < ARRAY_SIZE(sr_bit); i++) {
+ if (value & (1 << i)) {
+ if (sr_bit[i])
+ seq_printf(s, " %s", sr_bit[i]);
+ else
+ seq_puts(s, " UNKNOWN");
+ }
+ }
+ seq_putc(s, '\n');
+}
+
+static int atmci_regs_show(struct seq_file *s, void *v)
+{
+ struct atmel_mci *host = s->private;
+ u32 *buf;
+
+ buf = kmalloc(MCI_REGS_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ /* Grab a more or less consistent snapshot */
+ spin_lock_irq(&host->mmc->lock);
+ memcpy_fromio(buf, host->regs, MCI_REGS_SIZE);
+ spin_unlock_irq(&host->mmc->lock);
+
+ seq_printf(s, "MR:\t0x%08x%s%s CLKDIV=%u\n",
+ buf[MCI_MR / 4],
+ buf[MCI_MR / 4] & MCI_MR_RDPROOF ? " RDPROOF" : "",
+ buf[MCI_MR / 4] & MCI_MR_WRPROOF ? " WRPROOF" : "",
+ buf[MCI_MR / 4] & 0xff);
+ seq_printf(s, "DTOR:\t0x%08x\n", buf[MCI_DTOR / 4]);
+ seq_printf(s, "SDCR:\t0x%08x\n", buf[MCI_SDCR / 4]);
+ seq_printf(s, "ARGR:\t0x%08x\n", buf[MCI_ARGR / 4]);
+ seq_printf(s, "BLKR:\t0x%08x BCNT=%u BLKLEN=%u\n",
+ buf[MCI_BLKR / 4],
+ buf[MCI_BLKR / 4] & 0xffff,
+ (buf[MCI_BLKR / 4] >> 16) & 0xffff);
+
+ /* Don't read RSPR and RDR; it will consume the data there */
+
+ atmci_show_status_reg(s, "SR", buf[MCI_SR / 4]);
+ atmci_show_status_reg(s, "IMR", buf[MCI_IMR / 4]);
+
+ return 0;
+}
+
+static int atmci_regs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, atmci_regs_show, inode->i_private);
+}
+
+static const struct file_operations atmci_regs_fops = {
+ .owner = THIS_MODULE,
+ .open = atmci_regs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void atmci_init_debugfs(struct atmel_mci *host)
+{
+ struct mmc_host *mmc;
+ struct dentry *root;
+ struct dentry *node;
+ struct resource *res;
+
+ mmc = host->mmc;
+ root = mmc->debugfs_root;
+ if (!root)
+ return;
+
+ node = debugfs_create_file("regs", S_IRUSR, root, host,
+ &atmci_regs_fops);
+ if (IS_ERR(node))
+ return;
+ if (!node)
+ goto err;
+
+ res = platform_get_resource(host->pdev, IORESOURCE_MEM, 0);
+ node->d_inode->i_size = res->end - res->start + 1;
+
+ node = debugfs_create_file("req", S_IRUSR, root, host, &atmci_req_fops);
+ if (!node)
+ goto err;
+
+ node = debugfs_create_x32("pending_events", S_IRUSR, root,
+ (u32 *)&host->pending_events);
+ if (!node)
+ goto err;
+
+ node = debugfs_create_x32("completed_events", S_IRUSR, root,
+ (u32 *)&host->completed_events);
+ if (!node)
+ goto err;
+
+ return;
+
+err:
+ dev_err(&host->pdev->dev,
+ "failed to initialize debugfs for controller\n");
+}
static void atmci_enable(struct atmel_mci *host)
{
@@ -388,7 +574,7 @@ static int atmci_get_ro(struct mmc_host *mmc)
int read_only = 0;
struct atmel_mci *host = mmc_priv(mmc);
- if (host->wp_pin >= 0) {
+ if (gpio_is_valid(host->wp_pin)) {
read_only = gpio_get_value(host->wp_pin);
dev_dbg(&mmc->class_dev, "card is %s\n",
read_only ? "read-only" : "read-write");
@@ -450,7 +636,7 @@ static void atmci_detect_change(unsigned long data)
* been freed.
*/
smp_rmb();
- if (host->detect_pin < 0)
+ if (!gpio_is_valid(host->detect_pin))
return;
enable_irq(gpio_to_irq(host->detect_pin));
@@ -865,7 +1051,7 @@ static int __init atmci_probe(struct platform_device *pdev)
/* Assume card is present if we don't have a detect pin */
host->present = 1;
- if (host->detect_pin >= 0) {
+ if (gpio_is_valid(host->detect_pin)) {
if (gpio_request(host->detect_pin, "mmc_detect")) {
dev_dbg(&mmc->class_dev, "no detect pin available\n");
host->detect_pin = -1;
@@ -873,7 +1059,7 @@ static int __init atmci_probe(struct platform_device *pdev)
host->present = !gpio_get_value(host->detect_pin);
}
}
- if (host->wp_pin >= 0) {
+ if (gpio_is_valid(host->wp_pin)) {
if (gpio_request(host->wp_pin, "mmc_wp")) {
dev_dbg(&mmc->class_dev, "no WP pin available\n");
host->wp_pin = -1;
@@ -884,7 +1070,7 @@ static int __init atmci_probe(struct platform_device *pdev)
mmc_add_host(mmc);
- if (host->detect_pin >= 0) {
+ if (gpio_is_valid(host->detect_pin)) {
setup_timer(&host->detect_timer, atmci_detect_change,
(unsigned long)host);
@@ -905,6 +1091,8 @@ static int __init atmci_probe(struct platform_device *pdev)
"Atmel MCI controller at 0x%08lx irq %d\n",
host->mapbase, irq);
+ atmci_init_debugfs(host);
+
return 0;
err_request_irq:
@@ -923,7 +1111,9 @@ static int __exit atmci_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
if (host) {
- if (host->detect_pin >= 0) {
+ /* Debugfs stuff is cleaned up by mmc core */
+
+ if (gpio_is_valid(host->detect_pin)) {
int pin = host->detect_pin;
/* Make sure the timer doesn't enable the interrupt */
@@ -943,7 +1133,7 @@ static int __exit atmci_remove(struct platform_device *pdev)
mci_readl(host, SR);
clk_disable(host->mck);
- if (host->wp_pin >= 0)
+ if (gpio_is_valid(host->wp_pin))
gpio_free(host->wp_pin);
free_irq(platform_get_irq(pdev, 0), host->mmc);
diff --git a/drivers/mmc/host/imxmmc.c b/drivers/mmc/host/imxmmc.c
index 5e880c0f1349..f61406da65d2 100644
--- a/drivers/mmc/host/imxmmc.c
+++ b/drivers/mmc/host/imxmmc.c
@@ -26,12 +26,6 @@
*
*/
-#ifdef CONFIG_MMC_DEBUG
-#define DEBUG
-#else
-#undef DEBUG
-#endif
-
#include <linux/module.h>
#include <linux/init.h>
#include <linux/ioport.h>
@@ -907,31 +901,12 @@ static const struct mmc_host_ops imxmci_ops = {
.get_ro = imxmci_get_ro,
};
-static struct resource *platform_device_resource(struct platform_device *dev, unsigned int mask, int nr)
-{
- int i;
-
- for (i = 0; i < dev->num_resources; i++)
- if (dev->resource[i].flags == mask && nr-- == 0)
- return &dev->resource[i];
- return NULL;
-}
-
-static int platform_device_irq(struct platform_device *dev, int nr)
-{
- int i;
-
- for (i = 0; i < dev->num_resources; i++)
- if (dev->resource[i].flags == IORESOURCE_IRQ && nr-- == 0)
- return dev->resource[i].start;
- return NO_IRQ;
-}
-
static void imxmci_check_status(unsigned long data)
{
struct imxmci_host *host = (struct imxmci_host *)data;
- if( host->pdata->card_present(mmc_dev(host->mmc)) != host->present ) {
+ if (host->pdata && host->pdata->card_present &&
+ host->pdata->card_present(mmc_dev(host->mmc)) != host->present) {
host->present ^= 1;
dev_info(mmc_dev(host->mmc), "card %s\n",
host->present ? "inserted" : "removed");
@@ -962,13 +937,12 @@ static int imxmci_probe(struct platform_device *pdev)
printk(KERN_INFO "i.MX mmc driver\n");
- r = platform_device_resource(pdev, IORESOURCE_MEM, 0);
- irq = platform_device_irq(pdev, 0);
- if (!r || irq == NO_IRQ)
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ irq = platform_get_irq(pdev, 0);
+ if (!r || irq < 0)
return -ENXIO;
- r = request_mem_region(r->start, 0x100, "IMXMCI");
- if (!r)
+ if (!request_mem_region(r->start, 0x100, pdev->name))
return -EBUSY;
mmc = mmc_alloc_host(sizeof(struct imxmci_host), &pdev->dev);
@@ -995,6 +969,8 @@ static int imxmci_probe(struct platform_device *pdev)
host->mmc = mmc;
host->dma_allocated = 0;
host->pdata = pdev->dev.platform_data;
+ if (!host->pdata)
+ dev_warn(&pdev->dev, "No platform data provided!\n");
spin_lock_init(&host->lock);
host->res = r;
@@ -1047,7 +1023,11 @@ static int imxmci_probe(struct platform_device *pdev)
if (ret)
goto out;
- host->present = host->pdata->card_present(mmc_dev(mmc));
+ if (host->pdata && host->pdata->card_present)
+ host->present = host->pdata->card_present(mmc_dev(mmc));
+ else /* if there is no way to detect assume that card is present */
+ host->present = 1;
+
init_timer(&host->timer);
host->timer.data = (unsigned long)host;
host->timer.function = imxmci_check_status;
@@ -1073,7 +1053,7 @@ out:
}
if (mmc)
mmc_free_host(mmc);
- release_resource(r);
+ release_mem_region(r->start, 0x100);
return ret;
}
@@ -1102,7 +1082,7 @@ static int imxmci_remove(struct platform_device *pdev)
clk_disable(host->clk);
clk_put(host->clk);
- release_resource(host->res);
+ release_mem_region(host->res->start, 0x100);
mmc_free_host(mmc);
}
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 41cc63360e43..7503b81374e0 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -1076,6 +1076,7 @@ static void mmc_spi_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
*/
if (canpower && ios->power_mode == MMC_POWER_OFF) {
int mres;
+ u8 nullbyte = 0;
host->spi->mode &= ~(SPI_CPOL|SPI_CPHA);
mres = spi_setup(host->spi);
@@ -1083,7 +1084,7 @@ static void mmc_spi_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
dev_dbg(&host->spi->dev,
"switch to SPI mode 0 failed\n");
- if (spi_w8r8(host->spi, 0x00) < 0)
+ if (spi_write(host->spi, &nullbyte, 1) < 0)
dev_dbg(&host->spi->dev,
"put spi signals to low failed\n");
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index c3a5db72ddd7..5f95e10229b5 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -337,7 +337,7 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
host->align_addr = dma_map_single(mmc_dev(host->mmc),
host->align_buffer, 128 * 4, direction);
- if (dma_mapping_error(host->align_addr))
+ if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr))
goto fail;
BUG_ON(host->align_addr & 0x3);
@@ -439,7 +439,7 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
host->adma_addr = dma_map_single(mmc_dev(host->mmc),
host->adma_desc, (128 * 2 + 1) * 4, DMA_TO_DEVICE);
- if (dma_mapping_error(host->align_addr))
+ if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr))
goto unmap_entries;
BUG_ON(host->adma_addr & 0x3);
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index a06bf8b89343..e354faee5df0 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -9,6 +9,8 @@
* your option) any later version.
*/
+#include <linux/scatterlist.h>
+
/*
* Controller registers
*/
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index eed06d068fd1..14f11f8b9e5f 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -1,5 +1,3 @@
-# $Id: Kconfig,v 1.11 2005/11/07 11:14:19 gleixner Exp $
-
menuconfig MTD
tristate "Memory Technology Device (MTD) support"
depends on HAS_IOMEM
diff --git a/drivers/mtd/afs.c b/drivers/mtd/afs.c
index 52d51eb91c16..d072ca5be689 100644
--- a/drivers/mtd/afs.c
+++ b/drivers/mtd/afs.c
@@ -21,8 +21,6 @@
This is access code for flashes using ARM's flash partitioning
standards.
- $Id: afs.c,v 1.15 2005/11/07 11:14:19 gleixner Exp $
-
======================================================================*/
#include <linux/module.h>
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index fcd1aeccdf93..5f1b472137a0 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -4,8 +4,6 @@
*
* (C) 2000 Red Hat. GPL'd
*
- * $Id: cfi_cmdset_0001.c,v 1.186 2005/11/23 22:07:52 nico Exp $
- *
*
* 10/10/2000 Nicolas Pitre <nico@cam.org>
* - completely revamped method functions so they are aware and
@@ -50,6 +48,8 @@
#define I82802AC 0x00ac
#define MANUFACTURER_ST 0x0020
#define M50LPW080 0x002F
+#define M50FLW080A 0x0080
+#define M50FLW080B 0x0081
#define AT49BV640D 0x02de
static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
@@ -204,7 +204,7 @@ static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
- struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
+ struct cfi_pri_intelext *extp = cfi->cmdset_priv;
printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
"erase on write disabled.\n");
@@ -301,6 +301,8 @@ static struct cfi_fixup jedec_fixup_table[] = {
{ MANUFACTURER_INTEL, I82802AB, fixup_use_fwh_lock, NULL, },
{ MANUFACTURER_INTEL, I82802AC, fixup_use_fwh_lock, NULL, },
{ MANUFACTURER_ST, M50LPW080, fixup_use_fwh_lock, NULL, },
+ { MANUFACTURER_ST, M50FLW080A, fixup_use_fwh_lock, NULL, },
+ { MANUFACTURER_ST, M50FLW080B, fixup_use_fwh_lock, NULL, },
{ 0, 0, NULL, NULL }
};
static struct cfi_fixup fixup_table[] = {
@@ -1147,7 +1149,7 @@ static int inval_cache_and_wait_for_operation(
struct cfi_private *cfi = map->fldrv_priv;
map_word status, status_OK = CMD(0x80);
int chip_state = chip->state;
- unsigned int timeo, sleep_time;
+ unsigned int timeo, sleep_time, reset_timeo;
spin_unlock(chip->mutex);
if (inval_len)
@@ -1158,6 +1160,7 @@ static int inval_cache_and_wait_for_operation(
timeo = chip_op_time * 8;
if (!timeo)
timeo = 500000;
+ reset_timeo = timeo;
sleep_time = chip_op_time / 2;
for (;;) {
@@ -1199,6 +1202,12 @@ static int inval_cache_and_wait_for_operation(
remove_wait_queue(&chip->wq, &wait);
spin_lock(chip->mutex);
}
+ if (chip->erase_suspended || chip->write_suspended) {
+ /* Suspend has occured while sleep: reset timeout */
+ timeo = reset_timeo;
+ chip->erase_suspended = 0;
+ chip->write_suspended = 0;
+ }
}
/* Done and happy. */
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index f7fcc6389533..a972cc6be436 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -16,9 +16,6 @@
* Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
*
* This code is GPL
- *
- * $Id: cfi_cmdset_0002.c,v 1.122 2005/11/07 11:14:22 gleixner Exp $
- *
*/
#include <linux/module.h>
diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
index 1b720cc571f3..d4714dd9f7ab 100644
--- a/drivers/mtd/chips/cfi_cmdset_0020.c
+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
@@ -4,8 +4,6 @@
*
* (C) 2000 Red Hat. GPL'd
*
- * $Id: cfi_cmdset_0020.c,v 1.22 2005/11/07 11:14:22 gleixner Exp $
- *
* 10/10/2000 Nicolas Pitre <nico@cam.org>
* - completely revamped method functions so they are aware and
* independent of the flash geometry (buswidth, interleave, etc.)
diff --git a/drivers/mtd/chips/cfi_probe.c b/drivers/mtd/chips/cfi_probe.c
index a4463a91ce31..c418e92e1d92 100644
--- a/drivers/mtd/chips/cfi_probe.c
+++ b/drivers/mtd/chips/cfi_probe.c
@@ -1,7 +1,6 @@
/*
Common Flash Interface probe code.
(C) 2000 Red Hat. GPL'd.
- $Id: cfi_probe.c,v 1.86 2005/11/29 14:48:31 gleixner Exp $
*/
#include <linux/module.h>
diff --git a/drivers/mtd/chips/cfi_util.c b/drivers/mtd/chips/cfi_util.c
index 72e0022a47bf..0ee457018016 100644
--- a/drivers/mtd/chips/cfi_util.c
+++ b/drivers/mtd/chips/cfi_util.c
@@ -6,9 +6,6 @@
* Copyright (C) 2003 STMicroelectronics Limited
*
* This code is covered by the GPL.
- *
- * $Id: cfi_util.c,v 1.10 2005/11/07 11:14:23 gleixner Exp $
- *
*/
#include <linux/module.h>
diff --git a/drivers/mtd/chips/chipreg.c b/drivers/mtd/chips/chipreg.c
index 2174c97549f0..c85760968227 100644
--- a/drivers/mtd/chips/chipreg.c
+++ b/drivers/mtd/chips/chipreg.c
@@ -1,6 +1,4 @@
/*
- * $Id: chipreg.c,v 1.17 2004/11/16 18:29:00 dwmw2 Exp $
- *
* Registration for chip drivers
*
*/
diff --git a/drivers/mtd/chips/gen_probe.c b/drivers/mtd/chips/gen_probe.c
index d338b8c92780..f061885b2812 100644
--- a/drivers/mtd/chips/gen_probe.c
+++ b/drivers/mtd/chips/gen_probe.c
@@ -2,7 +2,6 @@
* Routines common to all CFI-type probes.
* (C) 2001-2003 Red Hat, Inc.
* GPL'd
- * $Id: gen_probe.c,v 1.24 2005/11/07 11:14:23 gleixner Exp $
*/
#include <linux/kernel.h>
@@ -71,8 +70,8 @@ static struct cfi_private *genprobe_ident_chips(struct map_info *map, struct chi
interleave and device type, etc. */
if (!genprobe_new_chip(map, cp, &cfi)) {
/* The probe didn't like it */
- printk(KERN_DEBUG "%s: Found no %s device at location zero\n",
- cp->name, map->name);
+ pr_debug("%s: Found no %s device at location zero\n",
+ cp->name, map->name);
return NULL;
}
diff --git a/drivers/mtd/chips/jedec_probe.c b/drivers/mtd/chips/jedec_probe.c
index aa07575eb288..dbba5abf0db8 100644
--- a/drivers/mtd/chips/jedec_probe.c
+++ b/drivers/mtd/chips/jedec_probe.c
@@ -1,7 +1,6 @@
/*
Common Flash Interface probe code.
(C) 2000 Red Hat. GPL'd.
- $Id: jedec_probe.c,v 1.66 2005/11/07 11:14:23 gleixner Exp $
See JEDEC (http://www.jedec.org/) standard JESD21C (section 3.5)
for the standard this probe goes back to.
@@ -26,6 +25,7 @@
/* Manufacturers */
#define MANUFACTURER_AMD 0x0001
#define MANUFACTURER_ATMEL 0x001f
+#define MANUFACTURER_EON 0x001c
#define MANUFACTURER_FUJITSU 0x0004
#define MANUFACTURER_HYUNDAI 0x00AD
#define MANUFACTURER_INTEL 0x0089
@@ -37,6 +37,7 @@
#define MANUFACTURER_ST 0x0020
#define MANUFACTURER_TOSHIBA 0x0098
#define MANUFACTURER_WINBOND 0x00da
+#define CONTINUATION_CODE 0x007f
/* AMD */
@@ -58,6 +59,8 @@
#define AM29LV040B 0x004F
#define AM29F032B 0x0041
#define AM29F002T 0x00B0
+#define AM29SL800DB 0x226B
+#define AM29SL800DT 0x22EA
/* Atmel */
#define AT49BV512 0x0003
@@ -67,6 +70,10 @@
#define AT49BV32X 0x00C8
#define AT49BV32XT 0x00C9
+/* Eon */
+#define EN29SL800BB 0x226B
+#define EN29SL800BT 0x22EA
+
/* Fujitsu */
#define MBM29F040C 0x00A4
#define MBM29F800BA 0x2258
@@ -141,6 +148,8 @@
#define M50FW080 0x002D
#define M50FW016 0x002E
#define M50LPW080 0x002F
+#define M50FLW080A 0x0080
+#define M50FLW080B 0x0081
/* SST */
#define SST29EE020 0x0010
@@ -191,6 +200,7 @@ enum uaddr {
MTD_UADDR_0x0555_0x0AAA,
MTD_UADDR_0x5555_0x2AAA,
MTD_UADDR_0x0AAA_0x0555,
+ MTD_UADDR_0xAAAA_0x5555,
MTD_UADDR_DONT_CARE, /* Requires an arbitrary address */
MTD_UADDR_UNNECESSARY, /* Does not require any address */
};
@@ -238,6 +248,11 @@ static const struct unlock_addr unlock_addrs[] = {
.addr2 = 0x0555
},
+ [MTD_UADDR_0xAAAA_0x5555] = {
+ .addr1 = 0xaaaa,
+ .addr2 = 0x5555
+ },
+
[MTD_UADDR_DONT_CARE] = {
.addr1 = 0x0000, /* Doesn't matter which address */
.addr2 = 0x0000 /* is used - must be last entry */
@@ -522,6 +537,36 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x04000,1),
}
}, {
+ .mfr_id = MANUFACTURER_AMD,
+ .dev_id = AM29SL800DT,
+ .name = "AMD AM29SL800DT",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x10000,15),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x04000,1),
+ }
+ }, {
+ .mfr_id = MANUFACTURER_AMD,
+ .dev_id = AM29SL800DB,
+ .name = "AMD AM29SL800DB",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x04000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x10000,15),
+ }
+ }, {
.mfr_id = MANUFACTURER_ATMEL,
.dev_id = AT49BV512,
.name = "Atmel AT49BV512",
@@ -599,6 +644,36 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x02000,8)
}
}, {
+ .mfr_id = MANUFACTURER_EON,
+ .dev_id = EN29SL800BT,
+ .name = "Eon EN29SL800BT",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x10000,15),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x04000,1),
+ }
+ }, {
+ .mfr_id = MANUFACTURER_EON,
+ .dev_id = EN29SL800BB,
+ .name = "Eon EN29SL800BB",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x04000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x10000,15),
+ }
+ }, {
.mfr_id = MANUFACTURER_FUJITSU,
.dev_id = MBM29F040C,
.name = "Fujitsu MBM29F040C",
@@ -1392,8 +1467,8 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_SST, /* should be CFI */
.dev_id = SST39LF160,
.name = "SST 39LF160",
- .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
- .uaddr = MTD_UADDR_0x5555_0x2AAA, /* ???? */
+ .devtypes = CFI_DEVICETYPE_X16,
+ .uaddr = MTD_UADDR_0xAAAA_0x5555,
.dev_size = SIZE_2MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 2,
@@ -1405,8 +1480,8 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_SST, /* should be CFI */
.dev_id = SST39VF1601,
.name = "SST 39VF1601",
- .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
- .uaddr = MTD_UADDR_0x5555_0x2AAA, /* ???? */
+ .devtypes = CFI_DEVICETYPE_X16,
+ .uaddr = MTD_UADDR_0xAAAA_0x5555,
.dev_size = SIZE_2MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 2,
@@ -1590,6 +1665,36 @@ static const struct amd_flash_info jedec_table[] = {
.nr_regions = 1,
.regions = {
ERASEINFO(0x10000,16),
+ },
+ }, {
+ .mfr_id = MANUFACTURER_ST,
+ .dev_id = M50FLW080A,
+ .name = "ST M50FLW080A",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_INTEL_EXT,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x1000,16),
+ ERASEINFO(0x10000,13),
+ ERASEINFO(0x1000,16),
+ ERASEINFO(0x1000,16),
+ }
+ }, {
+ .mfr_id = MANUFACTURER_ST,
+ .dev_id = M50FLW080B,
+ .name = "ST M50FLW080B",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_INTEL_EXT,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x1000,16),
+ ERASEINFO(0x1000,16),
+ ERASEINFO(0x10000,13),
+ ERASEINFO(0x1000,16),
}
}, {
.mfr_id = MANUFACTURER_TOSHIBA,
@@ -1696,9 +1801,21 @@ static inline u32 jedec_read_mfr(struct map_info *map, uint32_t base,
{
map_word result;
unsigned long mask;
- u32 ofs = cfi_build_cmd_addr(0, cfi_interleave(cfi), cfi->device_type);
- mask = (1 << (cfi->device_type * 8)) -1;
- result = map_read(map, base + ofs);
+ int bank = 0;
+
+ /* According to JEDEC "Standard Manufacturer's Identification Code"
+ * (http://www.jedec.org/download/search/jep106W.pdf)
+ * several first banks can contain 0x7f instead of actual ID
+ */
+ do {
+ uint32_t ofs = cfi_build_cmd_addr(0 + (bank << 8),
+ cfi_interleave(cfi),
+ cfi->device_type);
+ mask = (1 << (cfi->device_type * 8)) - 1;
+ result = map_read(map, base + ofs);
+ bank++;
+ } while ((result.x[0] & mask) == CONTINUATION_CODE);
+
return result.x[0] & mask;
}
diff --git a/drivers/mtd/chips/map_absent.c b/drivers/mtd/chips/map_absent.c
index fc478c0f93f5..494d30d0631a 100644
--- a/drivers/mtd/chips/map_absent.c
+++ b/drivers/mtd/chips/map_absent.c
@@ -1,7 +1,6 @@
/*
* Common code to handle absent "placeholder" devices
* Copyright 2001 Resilience Corporation <ebrower@resilience.com>
- * $Id: map_absent.c,v 1.6 2005/11/07 11:14:23 gleixner Exp $
*
* This map driver is used to allocate "placeholder" MTD
* devices on systems that have socketed/removable media.
diff --git a/drivers/mtd/chips/map_ram.c b/drivers/mtd/chips/map_ram.c
index 5cb6d5263661..072dd8abf33a 100644
--- a/drivers/mtd/chips/map_ram.c
+++ b/drivers/mtd/chips/map_ram.c
@@ -1,7 +1,6 @@
/*
* Common code to handle map devices which are simple RAM
* (C) 2000 Red Hat. GPL'd.
- * $Id: map_ram.c,v 1.22 2005/01/05 18:05:12 dwmw2 Exp $
*/
#include <linux/module.h>
diff --git a/drivers/mtd/chips/map_rom.c b/drivers/mtd/chips/map_rom.c
index cb27f855074c..821d0ed6bae3 100644
--- a/drivers/mtd/chips/map_rom.c
+++ b/drivers/mtd/chips/map_rom.c
@@ -1,7 +1,6 @@
/*
* Common code to handle map devices which are simple ROM
* (C) 2000 Red Hat. GPL'd.
- * $Id: map_rom.c,v 1.23 2005/01/05 18:05:12 dwmw2 Exp $
*/
#include <linux/module.h>
diff --git a/drivers/mtd/cmdlinepart.c b/drivers/mtd/cmdlinepart.c
index e472a0e9de9d..71bc07f149b7 100644
--- a/drivers/mtd/cmdlinepart.c
+++ b/drivers/mtd/cmdlinepart.c
@@ -1,6 +1,4 @@
/*
- * $Id: cmdlinepart.c,v 1.19 2005/11/07 11:14:19 gleixner Exp $
- *
* Read flash partition table from command line
*
* Copyright 2002 SYSGO Real-Time Solutions GmbH
@@ -308,7 +306,7 @@ static int parse_cmdline_partitions(struct mtd_info *master,
unsigned long offset;
int i;
struct cmdline_mtd_partition *part;
- char *mtd_id = master->name;
+ const char *mtd_id = master->name;
/* parse command line */
if (!cmdline_parsed)
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index 35ed1103dbb2..9c613f06623c 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -1,5 +1,4 @@
# drivers/mtd/maps/Kconfig
-# $Id: Kconfig,v 1.18 2005/11/07 11:14:24 gleixner Exp $
menu "Self-contained MTD device drivers"
depends on MTD!=n
diff --git a/drivers/mtd/devices/Makefile b/drivers/mtd/devices/Makefile
index 0f788d5c4bf8..0993d5cf3923 100644
--- a/drivers/mtd/devices/Makefile
+++ b/drivers/mtd/devices/Makefile
@@ -1,7 +1,6 @@
#
# linux/drivers/devices/Makefile
#
-# $Id: Makefile.common,v 1.7 2004/12/22 17:51:15 joern Exp $
obj-$(CONFIG_MTD_DOC2000) += doc2000.o
obj-$(CONFIG_MTD_DOC2001) += doc2001.o
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index 7b72a1b36115..91fbba767635 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -1,6 +1,4 @@
/*
- * $Id: block2mtd.c,v 1.30 2005/11/29 14:48:32 gleixner Exp $
- *
* block2mtd.c - create an mtd from a block device
*
* Copyright (C) 2001,2002 Simon Evans <spse@secret.org.uk>
@@ -20,9 +18,6 @@
#include <linux/mutex.h>
#include <linux/mount.h>
-#define VERSION "$Revision: 1.30 $"
-
-
#define ERROR(fmt, args...) printk(KERN_ERR "block2mtd: " fmt "\n" , ## args)
#define INFO(fmt, args...) printk(KERN_INFO "block2mtd: " fmt "\n" , ## args)
@@ -453,7 +448,6 @@ MODULE_PARM_DESC(block2mtd, "Device to use. \"block2mtd=<dev>[,<erasesize>]\"");
static int __init block2mtd_init(void)
{
int ret = 0;
- INFO("version " VERSION);
#ifndef MODULE
if (strlen(block2mtd_paramline))
diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
index 846989f292e3..50de839c77a9 100644
--- a/drivers/mtd/devices/doc2000.c
+++ b/drivers/mtd/devices/doc2000.c
@@ -3,8 +3,6 @@
* Linux driver for Disk-On-Chip 2000 and Millennium
* (c) 1999 Machine Vision Holdings, Inc.
* (c) 1999, 2000 David Woodhouse <dwmw2@infradead.org>
- *
- * $Id: doc2000.c,v 1.67 2005/11/07 11:14:24 gleixner Exp $
*/
#include <linux/kernel.h>
diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
index 6413efc045e0..e32c568c1145 100644
--- a/drivers/mtd/devices/doc2001.c
+++ b/drivers/mtd/devices/doc2001.c
@@ -3,8 +3,6 @@
* Linux driver for Disk-On-Chip Millennium
* (c) 1999 Machine Vision Holdings, Inc.
* (c) 1999, 2000 David Woodhouse <dwmw2@infradead.org>
- *
- * $Id: doc2001.c,v 1.49 2005/11/07 11:14:24 gleixner Exp $
*/
#include <linux/kernel.h>
diff --git a/drivers/mtd/devices/doc2001plus.c b/drivers/mtd/devices/doc2001plus.c
index 83be3461658f..d853f891b586 100644
--- a/drivers/mtd/devices/doc2001plus.c
+++ b/drivers/mtd/devices/doc2001plus.c
@@ -6,8 +6,6 @@
* (c) 1999 Machine Vision Holdings, Inc.
* (c) 1999, 2000 David Woodhouse <dwmw2@infradead.org>
*
- * $Id: doc2001plus.c,v 1.14 2005/11/07 11:14:24 gleixner Exp $
- *
* Released under GPL
*/
diff --git a/drivers/mtd/devices/docecc.c b/drivers/mtd/devices/docecc.c
index fd8a8daba3a8..874e51b110a2 100644
--- a/drivers/mtd/devices/docecc.c
+++ b/drivers/mtd/devices/docecc.c
@@ -7,8 +7,6 @@
* Author: Fabrice Bellard (fabrice.bellard@netgem.com)
* Copyright (C) 2000 Netgem S.A.
*
- * $Id: docecc.c,v 1.7 2005/11/07 11:14:25 gleixner Exp $
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
diff --git a/drivers/mtd/devices/docprobe.c b/drivers/mtd/devices/docprobe.c
index d8cc94ec4e50..6e62922942b1 100644
--- a/drivers/mtd/devices/docprobe.c
+++ b/drivers/mtd/devices/docprobe.c
@@ -4,9 +4,6 @@
/* (C) 1999 Machine Vision Holdings, Inc. */
/* (C) 1999-2003 David Woodhouse <dwmw2@infradead.org> */
-/* $Id: docprobe.c,v 1.46 2005/11/07 11:14:25 gleixner Exp $ */
-
-
/* DOC_PASSIVE_PROBE:
In order to ensure that the BIOS checksum is correct at boot time, and
@@ -79,8 +76,6 @@ static unsigned long __initdata doc_locations[] = {
0xe0000, 0xe2000, 0xe4000, 0xe6000,
0xe8000, 0xea000, 0xec000, 0xee000,
#endif /* CONFIG_MTD_DOCPROBE_HIGH */
-#elif defined(__PPC__)
- 0xe4000000,
#else
#warning Unknown architecture for DiskOnChip. No default probe locations defined
#endif
diff --git a/drivers/mtd/devices/lart.c b/drivers/mtd/devices/lart.c
index 1d324e5c412d..f4bda4cee495 100644
--- a/drivers/mtd/devices/lart.c
+++ b/drivers/mtd/devices/lart.c
@@ -2,8 +2,6 @@
/*
* MTD driver for the 28F160F3 Flash Memory (non-CFI) on LART.
*
- * $Id: lart.c,v 1.9 2005/11/07 11:14:25 gleixner Exp $
- *
* Author: Abraham vd Merwe <abraham@2d3d.co.za>
*
* Copyright (c) 2001, 2d3D, Inc.
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index b402269301f6..b35c3333e210 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -33,6 +33,7 @@
/* Flash opcodes. */
#define OPCODE_WREN 0x06 /* Write enable */
#define OPCODE_RDSR 0x05 /* Read status register */
+#define OPCODE_WRSR 0x01 /* Write status register 1 byte */
#define OPCODE_NORM_READ 0x03 /* Read data bytes (low frequency) */
#define OPCODE_FAST_READ 0x0b /* Read data bytes (high frequency) */
#define OPCODE_PP 0x02 /* Page program (up to 256 bytes) */
@@ -112,6 +113,17 @@ static int read_sr(struct m25p *flash)
return val;
}
+/*
+ * Write status register 1 byte
+ * Returns negative if error occurred.
+ */
+static int write_sr(struct m25p *flash, u8 val)
+{
+ flash->command[0] = OPCODE_WRSR;
+ flash->command[1] = val;
+
+ return spi_write(flash->spi, flash->command, 2);
+}
/*
* Set write enable latch with Write Enable command.
@@ -589,6 +601,16 @@ static int __devinit m25p_probe(struct spi_device *spi)
mutex_init(&flash->lock);
dev_set_drvdata(&spi->dev, flash);
+ /*
+ * Atmel serial flash tend to power up
+ * with the software protection bits set
+ */
+
+ if (info->jedec_id >> 16 == 0x1f) {
+ write_enable(flash);
+ write_sr(flash, 0);
+ }
+
if (data && data->name)
flash->mtd.name = data->name;
else
diff --git a/drivers/mtd/devices/ms02-nv.c b/drivers/mtd/devices/ms02-nv.c
index 9cff119a2024..6a9a24a80a6d 100644
--- a/drivers/mtd/devices/ms02-nv.c
+++ b/drivers/mtd/devices/ms02-nv.c
@@ -5,8 +5,6 @@
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
- *
- * $Id: ms02-nv.c,v 1.11 2005/11/14 13:41:47 macro Exp $
*/
#include <linux/init.h>
diff --git a/drivers/mtd/devices/ms02-nv.h b/drivers/mtd/devices/ms02-nv.h
index 8a6eef7cfee3..04deafd3a771 100644
--- a/drivers/mtd/devices/ms02-nv.h
+++ b/drivers/mtd/devices/ms02-nv.h
@@ -9,8 +9,6 @@
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
- *
- * $Id: ms02-nv.h,v 1.3 2003/08/19 09:25:36 dwmw2 Exp $
*/
#include <linux/ioport.h>
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index b35e4813a3a5..54e36bfc2c3b 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -82,7 +82,7 @@
struct dataflash {
- u8 command[4];
+ uint8_t command[4];
char name[24];
unsigned partitioned:1;
@@ -150,7 +150,7 @@ static int dataflash_erase(struct mtd_info *mtd, struct erase_info *instr)
struct spi_transfer x = { .tx_dma = 0, };
struct spi_message msg;
unsigned blocksize = priv->page_size << 3;
- u8 *command;
+ uint8_t *command;
DEBUG(MTD_DEBUG_LEVEL2, "%s: erase addr=0x%x len 0x%x\n",
spi->dev.bus_id,
@@ -182,8 +182,8 @@ static int dataflash_erase(struct mtd_info *mtd, struct erase_info *instr)
pageaddr = pageaddr << priv->page_offset;
command[0] = do_block ? OP_ERASE_BLOCK : OP_ERASE_PAGE;
- command[1] = (u8)(pageaddr >> 16);
- command[2] = (u8)(pageaddr >> 8);
+ command[1] = (uint8_t)(pageaddr >> 16);
+ command[2] = (uint8_t)(pageaddr >> 8);
command[3] = 0;
DEBUG(MTD_DEBUG_LEVEL3, "ERASE %s: (%x) %x %x %x [%i]\n",
@@ -234,7 +234,7 @@ static int dataflash_read(struct mtd_info *mtd, loff_t from, size_t len,
struct spi_transfer x[2] = { { .tx_dma = 0, }, };
struct spi_message msg;
unsigned int addr;
- u8 *command;
+ uint8_t *command;
int status;
DEBUG(MTD_DEBUG_LEVEL2, "%s: read 0x%x..0x%x\n",
@@ -274,9 +274,9 @@ static int dataflash_read(struct mtd_info *mtd, loff_t from, size_t len,
* fewer "don't care" bytes. Both buffers stay unchanged.
*/
command[0] = OP_READ_CONTINUOUS;
- command[1] = (u8)(addr >> 16);
- command[2] = (u8)(addr >> 8);
- command[3] = (u8)(addr >> 0);
+ command[1] = (uint8_t)(addr >> 16);
+ command[2] = (uint8_t)(addr >> 8);
+ command[3] = (uint8_t)(addr >> 0);
/* plus 4 "don't care" bytes */
status = spi_sync(priv->spi, &msg);
@@ -311,7 +311,7 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t remaining = len;
u_char *writebuf = (u_char *) buf;
int status = -EINVAL;
- u8 *command;
+ uint8_t *command;
DEBUG(MTD_DEBUG_LEVEL2, "%s: write 0x%x..0x%x\n",
spi->dev.bus_id, (unsigned)to, (unsigned)(to + len));
@@ -487,7 +487,9 @@ add_dataflash(struct spi_device *spi, char *name,
device->write = dataflash_write;
device->priv = priv;
- dev_info(&spi->dev, "%s (%d KBytes)\n", name, device->size/1024);
+ dev_info(&spi->dev, "%s (%d KBytes) pagesize %d bytes, "
+ "erasesize %d bytes\n", name, device->size/1024,
+ pagesize, pagesize * 8); /* 8 pages = 1 block */
dev_set_drvdata(&spi->dev, priv);
if (mtd_has_partitions()) {
@@ -521,7 +523,7 @@ add_dataflash(struct spi_device *spi, char *name,
*
* Device Density ID code #Pages PageSize Offset
* AT45DB011B 1Mbit (128K) xx0011xx (0x0c) 512 264 9
- * AT45DB021B 2Mbit (256K) xx0101xx (0x14) 1025 264 9
+ * AT45DB021B 2Mbit (256K) xx0101xx (0x14) 1024 264 9
* AT45DB041B 4Mbit (512K) xx0111xx (0x1c) 2048 264 9
* AT45DB081B 8Mbit (1M) xx1001xx (0x24) 4096 264 9
* AT45DB0161B 16Mbit (2M) xx1011xx (0x2c) 4096 528 10
@@ -529,9 +531,114 @@ add_dataflash(struct spi_device *spi, char *name,
* AT45DB0642 64Mbit (8M) xx111xxx (0x3c) 8192 1056 11
* AT45DB1282 128Mbit (16M) xx0100xx (0x10) 16384 1056 11
*/
+
+struct flash_info {
+ char *name;
+
+ /* JEDEC id zero means "no ID" (most older chips); otherwise it has
+ * a high byte of zero plus three data bytes: the manufacturer id,
+ * then a two byte device id.
+ */
+ uint32_t jedec_id;
+
+ /* The size listed here is what works with OPCODE_SE, which isn't
+ * necessarily called a "sector" by the vendor.
+ */
+ unsigned nr_pages;
+ uint16_t pagesize;
+ uint16_t pageoffset;
+
+ uint16_t flags;
+#define SUP_POW2PS 0x02
+#define IS_POW2PS 0x01
+};
+
+static struct flash_info __devinitdata dataflash_data [] = {
+
+ { "at45db011d", 0x1f2200, 512, 264, 9, SUP_POW2PS},
+ { "at45db011d", 0x1f2200, 512, 256, 8, SUP_POW2PS | IS_POW2PS},
+
+ { "at45db021d", 0x1f2300, 1024, 264, 9, SUP_POW2PS},
+ { "at45db021d", 0x1f2300, 1024, 256, 8, SUP_POW2PS | IS_POW2PS},
+
+ { "at45db041d", 0x1f2400, 2048, 264, 9, SUP_POW2PS},
+ { "at45db041d", 0x1f2400, 2048, 256, 8, SUP_POW2PS | IS_POW2PS},
+
+ { "at45db081d", 0x1f2500, 4096, 264, 9, SUP_POW2PS},
+ { "at45db081d", 0x1f2500, 4096, 256, 8, SUP_POW2PS | IS_POW2PS},
+
+ { "at45db161d", 0x1f2600, 4096, 528, 10, SUP_POW2PS},
+ { "at45db161d", 0x1f2600, 4096, 512, 9, SUP_POW2PS | IS_POW2PS},
+
+ { "at45db321c", 0x1f2700, 8192, 528, 10, },
+
+ { "at45db321d", 0x1f2701, 8192, 528, 10, SUP_POW2PS},
+ { "at45db321d", 0x1f2701, 8192, 512, 9, SUP_POW2PS | IS_POW2PS},
+
+ { "at45db641d", 0x1f2800, 8192, 1056, 11, SUP_POW2PS},
+ { "at45db641d", 0x1f2800, 8192, 1024, 10, SUP_POW2PS | IS_POW2PS},
+};
+
+static struct flash_info *__devinit jedec_probe(struct spi_device *spi)
+{
+ int tmp;
+ uint8_t code = OP_READ_ID;
+ uint8_t id[3];
+ uint32_t jedec;
+ struct flash_info *info;
+ int status;
+
+
+ /* JEDEC also defines an optional "extended device information"
+ * string for after vendor-specific data, after the three bytes
+ * we use here. Supporting some chips might require using it.
+ */
+ tmp = spi_write_then_read(spi, &code, 1, id, 3);
+ if (tmp < 0) {
+ DEBUG(MTD_DEBUG_LEVEL0, "%s: error %d reading JEDEC ID\n",
+ spi->dev.bus_id, tmp);
+ return NULL;
+ }
+ jedec = id[0];
+ jedec = jedec << 8;
+ jedec |= id[1];
+ jedec = jedec << 8;
+ jedec |= id[2];
+
+ for (tmp = 0, info = dataflash_data;
+ tmp < ARRAY_SIZE(dataflash_data);
+ tmp++, info++) {
+ if (info->jedec_id == jedec) {
+ if (info->flags & SUP_POW2PS) {
+ status = dataflash_status(spi);
+ if (status & 0x1)
+ /* return power of 2 pagesize */
+ return ++info;
+ else
+ return info;
+ }
+ }
+ }
+ return NULL;
+}
+
static int __devinit dataflash_probe(struct spi_device *spi)
{
int status;
+ struct flash_info *info;
+
+ /*
+ * Try to detect dataflash by JEDEC ID.
+ * If it succeeds we know we have either a C or D part.
+ * D will support power of 2 pagesize option.
+ */
+
+ info = jedec_probe(spi);
+
+ if (info != NULL)
+ return add_dataflash(spi, info->name, info->nr_pages,
+ info->pagesize, info->pageoffset);
+
status = dataflash_status(spi);
if (status <= 0 || status == 0xff) {
@@ -551,16 +658,16 @@ static int __devinit dataflash_probe(struct spi_device *spi)
status = add_dataflash(spi, "AT45DB011B", 512, 264, 9);
break;
case 0x14: /* 0 1 0 1 x x */
- status = add_dataflash(spi, "AT45DB021B", 1025, 264, 9);
+ status = add_dataflash(spi, "AT45DB021B", 1024, 264, 9);
break;
case 0x1c: /* 0 1 1 1 x x */
- status = add_dataflash(spi, "AT45DB041x", 2048, 264, 9);
+ status = add_dataflash(spi, "AT45DB041B", 2048, 264, 9);
break;
case 0x24: /* 1 0 0 1 x x */
status = add_dataflash(spi, "AT45DB081B", 4096, 264, 9);
break;
case 0x2c: /* 1 0 1 1 x x */
- status = add_dataflash(spi, "AT45DB161x", 4096, 528, 10);
+ status = add_dataflash(spi, "AT45DB161B", 4096, 528, 10);
break;
case 0x34: /* 1 1 0 1 x x */
status = add_dataflash(spi, "AT45DB321x", 8192, 528, 10);
diff --git a/drivers/mtd/devices/mtdram.c b/drivers/mtd/devices/mtdram.c
index 0399be178620..3aaca88847d3 100644
--- a/drivers/mtd/devices/mtdram.c
+++ b/drivers/mtd/devices/mtdram.c
@@ -1,6 +1,5 @@
/*
* mtdram - a test mtd device
- * $Id: mtdram.c,v 1.37 2005/04/21 03:42:11 joern Exp $
* Author: Alexander Larsson <alex@cendio.se>
*
* Copyright (c) 1999 Alexander Larsson <alex@cendio.se>
diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c
index c7987b1c5e01..088fbb7595b5 100644
--- a/drivers/mtd/devices/phram.c
+++ b/drivers/mtd/devices/phram.c
@@ -1,6 +1,4 @@
/**
- * $Id: phram.c,v 1.16 2005/11/07 11:14:25 gleixner Exp $
- *
* Copyright (c) ???? Jochen Schäuble <psionic@psionic.de>
* Copyright (c) 2003-2004 Joern Engel <joern@wh.fh-wedel.de>
*
diff --git a/drivers/mtd/devices/pmc551.c b/drivers/mtd/devices/pmc551.c
index bc9981749064..d38bca64bb15 100644
--- a/drivers/mtd/devices/pmc551.c
+++ b/drivers/mtd/devices/pmc551.c
@@ -1,6 +1,4 @@
/*
- * $Id: pmc551.c,v 1.32 2005/11/07 11:14:25 gleixner Exp $
- *
* PMC551 PCI Mezzanine Ram Device
*
* Author:
diff --git a/drivers/mtd/devices/slram.c b/drivers/mtd/devices/slram.c
index cb86db746f28..a425d09f35a0 100644
--- a/drivers/mtd/devices/slram.c
+++ b/drivers/mtd/devices/slram.c
@@ -1,7 +1,5 @@
/*======================================================================
- $Id: slram.c,v 1.36 2005/11/07 11:14:25 gleixner Exp $
-
This driver provides a method to access memory not used by the kernel
itself (i.e. if the kernel commandline mem=xxx is used). To actually
use slram at least mtdblock or mtdchar is required (for block or
diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
index 5c29872184e6..f34f20c78911 100644
--- a/drivers/mtd/ftl.c
+++ b/drivers/mtd/ftl.c
@@ -1,5 +1,4 @@
/* This version ported to the Linux-MTD system by dwmw2@infradead.org
- * $Id: ftl.c,v 1.59 2005/11/29 14:48:31 gleixner Exp $
*
* Fixes: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
* - fixes some leaks on failure in build_maps and ftl_notify_add, cleanups
@@ -1078,8 +1077,6 @@ static struct mtd_blktrans_ops ftl_tr = {
static int init_ftl(void)
{
- DEBUG(0, "$Id: ftl.c,v 1.59 2005/11/29 14:48:31 gleixner Exp $\n");
-
return register_mtd_blktrans(&ftl_tr);
}
diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
index b0e396504e67..c4f9d3378b24 100644
--- a/drivers/mtd/inftlcore.c
+++ b/drivers/mtd/inftlcore.c
@@ -7,8 +7,6 @@
* (c) 1999 Machine Vision Holdings, Inc.
* Author: David Woodhouse <dwmw2@infradead.org>
*
- * $Id: inftlcore.c,v 1.19 2005/11/07 11:14:20 gleixner Exp $
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
@@ -953,9 +951,6 @@ static struct mtd_blktrans_ops inftl_tr = {
static int __init init_inftl(void)
{
- printk(KERN_INFO "INFTL: inftlcore.c $Revision: 1.19 $, "
- "inftlmount.c %s\n", inftlmountrev);
-
return register_mtd_blktrans(&inftl_tr);
}
diff --git a/drivers/mtd/inftlmount.c b/drivers/mtd/inftlmount.c
index c551d2f0779c..9113628ed1ef 100644
--- a/drivers/mtd/inftlmount.c
+++ b/drivers/mtd/inftlmount.c
@@ -8,8 +8,6 @@
* Author: Fabrice Bellard (fabrice.bellard@netgem.com)
* Copyright (C) 2000 Netgem S.A.
*
- * $Id: inftlmount.c,v 1.18 2005/11/07 11:14:20 gleixner Exp $
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
@@ -39,8 +37,6 @@
#include <linux/mtd/inftl.h>
#include <linux/mtd/compatmac.h>
-char inftlmountrev[]="$Revision: 1.18 $";
-
/*
* find_boot_record: Find the INFTL Media Header and its Spare copy which
* contains the various device information of the INFTL partition and
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index d2fbc2964523..df8e00bba07b 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -1,5 +1,4 @@
# drivers/mtd/maps/Kconfig
-# $Id: Kconfig,v 1.61 2005/11/07 11:14:26 gleixner Exp $
menu "Mapping drivers for chip access"
depends on MTD!=n
@@ -510,6 +509,17 @@ config MTD_PCMCIA_ANONYMOUS
If unsure, say N.
+config MTD_BFIN_ASYNC
+ tristate "Blackfin BF533-STAMP Flash Chip Support"
+ depends on BFIN533_STAMP && MTD_CFI
+ select MTD_PARTITIONS
+ default y
+ help
+ Map driver which allows for simultaneous utilization of
+ ethernet and CFI parallel flash.
+
+ If compiled as a module, it will be called bfin-async-flash.
+
config MTD_UCLINUX
tristate "Generic uClinux RAM/ROM filesystem support"
depends on MTD_PARTITIONS && !MMU
@@ -539,24 +549,6 @@ config MTD_DMV182
help
Map driver for Dy-4 SVME/DMV-182 board.
-config MTD_BAST
- tristate "Map driver for Simtec BAST (EB2410ITX) or Thorcom VR1000"
- depends on ARCH_BAST || MACH_VR1000
- select MTD_PARTITIONS
- select MTD_MAP_BANK_WIDTH_16
- select MTD_JEDECPROBE
- help
- Map driver for NOR flash on the Simtec BAST (EB2410ITX), or the
- Thorcom VR1000
-
- Note, this driver *cannot* over-ride the WP link on the
- board, or currently detect the state of the link.
-
-config MTD_BAST_MAXSIZE
- int "Maximum size for BAST flash area (MiB)"
- depends on MTD_BAST
- default "4"
-
config MTD_SHARP_SL
tristate "ROM mapped on Sharp SL Series"
depends on ARCH_PXA
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index c6ce8673dab2..6cda6df973e5 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -1,7 +1,6 @@
#
# linux/drivers/maps/Makefile
#
-# $Id: Makefile.common,v 1.34 2005/11/07 11:14:26 gleixner Exp $
ifeq ($(CONFIG_MTD_COMPLEX_MAPPINGS),y)
obj-$(CONFIG_MTD) += map_funcs.o
@@ -10,7 +9,6 @@ endif
# Chip mappings
obj-$(CONFIG_MTD_CDB89712) += cdb89712.o
obj-$(CONFIG_MTD_ARM_INTEGRATOR)+= integrator-flash.o
-obj-$(CONFIG_MTD_BAST) += bast-flash.o
obj-$(CONFIG_MTD_CFI_FLAGADM) += cfi_flagadm.o
obj-$(CONFIG_MTD_DC21285) += dc21285.o
obj-$(CONFIG_MTD_DILNETPC) += dilnetpc.o
@@ -66,3 +64,4 @@ obj-$(CONFIG_MTD_SHARP_SL) += sharpsl-flash.o
obj-$(CONFIG_MTD_PLATRAM) += plat-ram.o
obj-$(CONFIG_MTD_OMAP_NOR) += omap_nor.o
obj-$(CONFIG_MTD_INTEL_VR_NOR) += intel_vr_nor.o
+obj-$(CONFIG_MTD_BFIN_ASYNC) += bfin-async-flash.o
diff --git a/drivers/mtd/maps/amd76xrom.c b/drivers/mtd/maps/amd76xrom.c
index 728aed6ad722..948b86f35ef4 100644
--- a/drivers/mtd/maps/amd76xrom.c
+++ b/drivers/mtd/maps/amd76xrom.c
@@ -2,7 +2,6 @@
* amd76xrom.c
*
* Normal mappings of chips in physical memory
- * $Id: amd76xrom.c,v 1.21 2005/11/07 11:14:26 gleixner Exp $
*/
#include <linux/module.h>
diff --git a/drivers/mtd/maps/autcpu12-nvram.c b/drivers/mtd/maps/autcpu12-nvram.c
index 7ed3424dd959..cf32267263df 100644
--- a/drivers/mtd/maps/autcpu12-nvram.c
+++ b/drivers/mtd/maps/autcpu12-nvram.c
@@ -2,8 +2,6 @@
* NV-RAM memory access on autcpu12
* (C) 2002 Thomas Gleixner (gleixner@autronix.de)
*
- * $Id: autcpu12-nvram.c,v 1.9 2005/11/07 11:14:26 gleixner Exp $
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
diff --git a/drivers/mtd/maps/bast-flash.c b/drivers/mtd/maps/bast-flash.c
deleted file mode 100644
index 1f492062f8ca..000000000000
--- a/drivers/mtd/maps/bast-flash.c
+++ /dev/null
@@ -1,226 +0,0 @@
-/* linux/drivers/mtd/maps/bast-flash.c
- *
- * Copyright (c) 2004-2005 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- *
- * Simtec Bast (EB2410ITX) NOR MTD Mapping driver
- *
- * Changelog:
- * 20-Sep-2004 BJD Initial version
- * 17-Jan-2005 BJD Add whole device if no partitions found
- *
- * $Id: bast-flash.c,v 1.5 2005/11/07 11:14:26 gleixner Exp $
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-*/
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/ioport.h>
-#include <linux/device.h>
-#include <linux/slab.h>
-#include <linux/platform_device.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-
-#include <asm/io.h>
-#include <asm/mach/flash.h>
-
-#include <asm/arch/map.h>
-#include <asm/arch/bast-map.h>
-#include <asm/arch/bast-cpld.h>
-
-#ifdef CONFIG_MTD_BAST_MAXSIZE
-#define AREA_MAXSIZE (CONFIG_MTD_BAST_MAXSIZE * SZ_1M)
-#else
-#define AREA_MAXSIZE (32 * SZ_1M)
-#endif
-
-#define PFX "bast-flash: "
-
-struct bast_flash_info {
- struct mtd_info *mtd;
- struct map_info map;
- struct mtd_partition *partitions;
- struct resource *area;
-};
-
-static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
-
-static void bast_flash_setrw(int to)
-{
- unsigned int val;
- unsigned long flags;
-
- local_irq_save(flags);
- val = __raw_readb(BAST_VA_CTRL3);
-
- if (to)
- val |= BAST_CPLD_CTRL3_ROMWEN;
- else
- val &= ~BAST_CPLD_CTRL3_ROMWEN;
-
- pr_debug("new cpld ctrl3=%02x\n", val);
-
- __raw_writeb(val, BAST_VA_CTRL3);
- local_irq_restore(flags);
-}
-
-static int bast_flash_remove(struct platform_device *pdev)
-{
- struct bast_flash_info *info = platform_get_drvdata(pdev);
-
- platform_set_drvdata(pdev, NULL);
-
- if (info == NULL)
- return 0;
-
- if (info->map.virt != NULL)
- iounmap(info->map.virt);
-
- if (info->mtd) {
- del_mtd_partitions(info->mtd);
- map_destroy(info->mtd);
- }
-
- kfree(info->partitions);
-
- if (info->area) {
- release_resource(info->area);
- kfree(info->area);
- }
-
- kfree(info);
-
- return 0;
-}
-
-static int bast_flash_probe(struct platform_device *pdev)
-{
- struct bast_flash_info *info;
- struct resource *res;
- int err = 0;
-
- info = kmalloc(sizeof(*info), GFP_KERNEL);
- if (info == NULL) {
- printk(KERN_ERR PFX "no memory for flash info\n");
- err = -ENOMEM;
- goto exit_error;
- }
-
- memzero(info, sizeof(*info));
- platform_set_drvdata(pdev, info);
-
- res = pdev->resource; /* assume that the flash has one resource */
-
- info->map.phys = res->start;
- info->map.size = res->end - res->start + 1;
- info->map.name = pdev->dev.bus_id;
- info->map.bankwidth = 2;
-
- if (info->map.size > AREA_MAXSIZE)
- info->map.size = AREA_MAXSIZE;
-
- pr_debug("%s: area %08lx, size %ld\n", __func__,
- info->map.phys, info->map.size);
-
- info->area = request_mem_region(res->start, info->map.size,
- pdev->name);
- if (info->area == NULL) {
- printk(KERN_ERR PFX "cannot reserve flash memory region\n");
- err = -ENOENT;
- goto exit_error;
- }
-
- info->map.virt = ioremap(res->start, info->map.size);
- pr_debug("%s: virt at %08x\n", __func__, (int)info->map.virt);
-
- if (info->map.virt == 0) {
- printk(KERN_ERR PFX "failed to ioremap() region\n");
- err = -EIO;
- goto exit_error;
- }
-
- simple_map_init(&info->map);
-
- /* enable the write to the flash area */
-
- bast_flash_setrw(1);
-
- /* probe for the device(s) */
-
- info->mtd = do_map_probe("jedec_probe", &info->map);
- if (info->mtd == NULL)
- info->mtd = do_map_probe("cfi_probe", &info->map);
-
- if (info->mtd == NULL) {
- printk(KERN_ERR PFX "map_probe() failed\n");
- err = -ENXIO;
- goto exit_error;
- }
-
- /* mark ourselves as the owner */
- info->mtd->owner = THIS_MODULE;
-
- err = parse_mtd_partitions(info->mtd, probes, &info->partitions, 0);
- if (err > 0) {
- err = add_mtd_partitions(info->mtd, info->partitions, err);
- if (err)
- printk(KERN_ERR PFX "cannot add/parse partitions\n");
- } else {
- err = add_mtd_device(info->mtd);
- }
-
- if (err == 0)
- return 0;
-
- /* fall through to exit error */
-
- exit_error:
- bast_flash_remove(pdev);
- return err;
-}
-
-static struct platform_driver bast_flash_driver = {
- .probe = bast_flash_probe,
- .remove = bast_flash_remove,
- .driver = {
- .name = "bast-nor",
- .owner = THIS_MODULE,
- },
-};
-
-static int __init bast_flash_init(void)
-{
- printk("BAST NOR-Flash Driver, (c) 2004 Simtec Electronics\n");
- return platform_driver_register(&bast_flash_driver);
-}
-
-static void __exit bast_flash_exit(void)
-{
- platform_driver_unregister(&bast_flash_driver);
-}
-
-module_init(bast_flash_init);
-module_exit(bast_flash_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
-MODULE_DESCRIPTION("BAST MTD Map driver");
-MODULE_ALIAS("platform:bast-nor");
diff --git a/drivers/mtd/maps/bfin-async-flash.c b/drivers/mtd/maps/bfin-async-flash.c
new file mode 100644
index 000000000000..6fec86aaed7e
--- /dev/null
+++ b/drivers/mtd/maps/bfin-async-flash.c
@@ -0,0 +1,219 @@
+/*
+ * drivers/mtd/maps/bfin-async-flash.c
+ *
+ * Handle the case where flash memory and ethernet mac/phy are
+ * mapped onto the same async bank. The BF533-STAMP does this
+ * for example. All board-specific configuration goes in your
+ * board resources file.
+ *
+ * Copyright 2000 Nicolas Pitre <nico@cam.org>
+ * Copyright 2005-2008 Analog Devices Inc.
+ *
+ * Enter bugs at http://blackfin.uclinux.org/
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/physmap.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#include <asm/blackfin.h>
+#include <linux/gpio.h>
+#include <linux/io.h>
+#include <asm/unaligned.h>
+
+#define pr_devinit(fmt, args...) ({ static const __devinitconst char __fmt[] = fmt; printk(__fmt, ## args); })
+
+#define DRIVER_NAME "bfin-async-flash"
+
+struct async_state {
+ struct mtd_info *mtd;
+ struct map_info map;
+ int enet_flash_pin;
+ uint32_t flash_ambctl0, flash_ambctl1;
+ uint32_t save_ambctl0, save_ambctl1;
+ unsigned long irq_flags;
+};
+
+static void switch_to_flash(struct async_state *state)
+{
+ local_irq_save(state->irq_flags);
+
+ gpio_set_value(state->enet_flash_pin, 0);
+
+ state->save_ambctl0 = bfin_read_EBIU_AMBCTL0();
+ state->save_ambctl1 = bfin_read_EBIU_AMBCTL1();
+ bfin_write_EBIU_AMBCTL0(state->flash_ambctl0);
+ bfin_write_EBIU_AMBCTL1(state->flash_ambctl1);
+ SSYNC();
+}
+
+static void switch_back(struct async_state *state)
+{
+ bfin_write_EBIU_AMBCTL0(state->save_ambctl0);
+ bfin_write_EBIU_AMBCTL1(state->save_ambctl1);
+ SSYNC();
+
+ gpio_set_value(state->enet_flash_pin, 1);
+
+ local_irq_restore(state->irq_flags);
+}
+
+static map_word bfin_read(struct map_info *map, unsigned long ofs)
+{
+ struct async_state *state = (struct async_state *)map->map_priv_1;
+ uint16_t word;
+ map_word test;
+
+ switch_to_flash(state);
+
+ word = readw(map->virt + ofs);
+
+ switch_back(state);
+
+ test.x[0] = word;
+ return test;
+}
+
+static void bfin_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
+{
+ struct async_state *state = (struct async_state *)map->map_priv_1;
+
+ switch_to_flash(state);
+
+ memcpy(to, map->virt + from, len);
+
+ switch_back(state);
+}
+
+static void bfin_write(struct map_info *map, map_word d1, unsigned long ofs)
+{
+ struct async_state *state = (struct async_state *)map->map_priv_1;
+ uint16_t d;
+
+ d = d1.x[0];
+
+ switch_to_flash(state);
+
+ writew(d, map->virt + ofs);
+ SSYNC();
+
+ switch_back(state);
+}
+
+static void bfin_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
+{
+ struct async_state *state = (struct async_state *)map->map_priv_1;
+
+ switch_to_flash(state);
+
+ memcpy(map->virt + to, from, len);
+ SSYNC();
+
+ switch_back(state);
+}
+
+#ifdef CONFIG_MTD_PARTITIONS
+static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", NULL };
+#endif
+
+static int __devinit bfin_flash_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct physmap_flash_data *pdata = pdev->dev.platform_data;
+ struct resource *memory = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ struct resource *flash_ambctl = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ struct async_state *state;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ state->map.name = DRIVER_NAME;
+ state->map.read = bfin_read;
+ state->map.copy_from = bfin_copy_from;
+ state->map.write = bfin_write;
+ state->map.copy_to = bfin_copy_to;
+ state->map.bankwidth = pdata->width;
+ state->map.size = memory->end - memory->start + 1;
+ state->map.virt = (void __iomem *)memory->start;
+ state->map.phys = memory->start;
+ state->map.map_priv_1 = (unsigned long)state;
+ state->enet_flash_pin = platform_get_irq(pdev, 0);
+ state->flash_ambctl0 = flash_ambctl->start;
+ state->flash_ambctl1 = flash_ambctl->end;
+
+ if (gpio_request(state->enet_flash_pin, DRIVER_NAME)) {
+ pr_devinit(KERN_ERR DRIVER_NAME ": Failed to request gpio %d\n", state->enet_flash_pin);
+ return -EBUSY;
+ }
+ gpio_direction_output(state->enet_flash_pin, 1);
+
+ pr_devinit(KERN_NOTICE DRIVER_NAME ": probing %d-bit flash bus\n", state->map.bankwidth * 8);
+ state->mtd = do_map_probe(memory->name, &state->map);
+ if (!state->mtd)
+ return -ENXIO;
+
+#ifdef CONFIG_MTD_PARTITIONS
+ ret = parse_mtd_partitions(state->mtd, part_probe_types, &pdata->parts, 0);
+ if (ret > 0) {
+ pr_devinit(KERN_NOTICE DRIVER_NAME ": Using commandline partition definition\n");
+ add_mtd_partitions(state->mtd, pdata->parts, ret);
+
+ } else if (pdata->nr_parts) {
+ pr_devinit(KERN_NOTICE DRIVER_NAME ": Using board partition definition\n");
+ add_mtd_partitions(state->mtd, pdata->parts, pdata->nr_parts);
+
+ } else
+#endif
+ {
+ pr_devinit(KERN_NOTICE DRIVER_NAME ": no partition info available, registering whole flash at once\n");
+ add_mtd_device(state->mtd);
+ }
+
+ platform_set_drvdata(pdev, state);
+
+ return 0;
+}
+
+static int __devexit bfin_flash_remove(struct platform_device *pdev)
+{
+ struct async_state *state = platform_get_drvdata(pdev);
+ gpio_free(state->enet_flash_pin);
+#ifdef CONFIG_MTD_PARTITIONS
+ del_mtd_partitions(state->mtd);
+#endif
+ map_destroy(state->mtd);
+ kfree(state);
+ return 0;
+}
+
+static struct platform_driver bfin_flash_driver = {
+ .probe = bfin_flash_probe,
+ .remove = __devexit_p(bfin_flash_remove),
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+};
+
+static int __init bfin_flash_init(void)
+{
+ return platform_driver_register(&bfin_flash_driver);
+}
+module_init(bfin_flash_init);
+
+static void __exit bfin_flash_exit(void)
+{
+ platform_driver_unregister(&bfin_flash_driver);
+}
+module_exit(bfin_flash_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("MTD map driver for Blackfins with flash/ethernet on same async bank");
diff --git a/drivers/mtd/maps/cdb89712.c b/drivers/mtd/maps/cdb89712.c
index 9f17bb6c5a9d..cb507da0a87d 100644
--- a/drivers/mtd/maps/cdb89712.c
+++ b/drivers/mtd/maps/cdb89712.c
@@ -1,7 +1,6 @@
/*
* Flash on Cirrus CDB89712
*
- * $Id: cdb89712.c,v 1.11 2005/11/07 11:14:26 gleixner Exp $
*/
#include <linux/module.h>
diff --git a/drivers/mtd/maps/ceiva.c b/drivers/mtd/maps/ceiva.c
index 629e6e2641a8..6464d487eb1a 100644
--- a/drivers/mtd/maps/ceiva.c
+++ b/drivers/mtd/maps/ceiva.c
@@ -11,7 +11,6 @@
*
* (C) 2000 Nicolas Pitre <nico@cam.org>
*
- * $Id: ceiva.c,v 1.11 2004/09/16 23:27:12 gleixner Exp $
*/
#include <linux/module.h>
diff --git a/drivers/mtd/maps/cfi_flagadm.c b/drivers/mtd/maps/cfi_flagadm.c
index 65e5ee552010..0ecc3f6d735b 100644
--- a/drivers/mtd/maps/cfi_flagadm.c
+++ b/drivers/mtd/maps/cfi_flagadm.c
@@ -1,8 +1,6 @@
/*
* Copyright © 2001 Flaga hf. Medical Devices, Kári Davíðsson <kd@flaga.is>
*
- * $Id: cfi_flagadm.c,v 1.15 2005/11/07 11:14:26 gleixner Exp $
- *
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
diff --git a/drivers/mtd/maps/dbox2-flash.c b/drivers/mtd/maps/dbox2-flash.c
index 92a9c7fac993..e115667bf1d0 100644
--- a/drivers/mtd/maps/dbox2-flash.c
+++ b/drivers/mtd/maps/dbox2-flash.c
@@ -1,6 +1,4 @@
/*
- * $Id: dbox2-flash.c,v 1.14 2005/11/07 11:14:26 gleixner Exp $
- *
* D-Box 2 flash driver
*/
diff --git a/drivers/mtd/maps/dc21285.c b/drivers/mtd/maps/dc21285.c
index b32bb9347d71..3aa018c092f8 100644
--- a/drivers/mtd/maps/dc21285.c
+++ b/drivers/mtd/maps/dc21285.c
@@ -4,8 +4,6 @@
* (C) 2000 Nicolas Pitre <nico@cam.org>
*
* This code is GPL
- *
- * $Id: dc21285.c,v 1.24 2005/11/07 11:14:26 gleixner Exp $
*/
#include <linux/module.h>
#include <linux/types.h>
diff --git a/drivers/mtd/maps/dilnetpc.c b/drivers/mtd/maps/dilnetpc.c
index 1c3b34ad7325..0713e3a5a22c 100644
--- a/drivers/mtd/maps/dilnetpc.c
+++ b/drivers/mtd/maps/dilnetpc.c
@@ -14,8 +14,6 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*
- * $Id: dilnetpc.c,v 1.20 2005/11/07 11:14:26 gleixner Exp $
- *
* The DIL/Net PC is a tiny embedded PC board made by SSV Embedded Systems
* featuring the AMD Elan SC410 processor. There are two variants of this
* board: DNP/1486 and ADNP/1486. The DNP version has 2 megs of flash
diff --git a/drivers/mtd/maps/dmv182.c b/drivers/mtd/maps/dmv182.c
index e0558b0b2fe6..d171674eb2ed 100644
--- a/drivers/mtd/maps/dmv182.c
+++ b/drivers/mtd/maps/dmv182.c
@@ -4,8 +4,6 @@
*
* Flash map driver for the Dy4 SVME182 board
*
- * $Id: dmv182.c,v 1.6 2005/11/07 11:14:26 gleixner Exp $
- *
* Copyright 2003-2004, TimeSys Corporation
*
* Based on the SVME181 flash map, by Tom Nelson, Dot4, Inc. for TimeSys Corp.
diff --git a/drivers/mtd/maps/ebony.c b/drivers/mtd/maps/ebony.c
index 1488bb92f26f..d92b7c70d3ed 100644
--- a/drivers/mtd/maps/ebony.c
+++ b/drivers/mtd/maps/ebony.c
@@ -1,6 +1,4 @@
/*
- * $Id: ebony.c,v 1.16 2005/11/07 11:14:26 gleixner Exp $
- *
* Mapping for Ebony user flash
*
* Matt Porter <mporter@kernel.crashing.org>
diff --git a/drivers/mtd/maps/edb7312.c b/drivers/mtd/maps/edb7312.c
index 1c5b97c89685..9433738c1664 100644
--- a/drivers/mtd/maps/edb7312.c
+++ b/drivers/mtd/maps/edb7312.c
@@ -1,6 +1,4 @@
/*
- * $Id: edb7312.c,v 1.14 2005/11/07 11:14:27 gleixner Exp $
- *
* Handle mapping of the NOR flash on Cogent EDB7312 boards
*
* Copyright 2002 SYSGO Real-Time Solutions GmbH
diff --git a/drivers/mtd/maps/fortunet.c b/drivers/mtd/maps/fortunet.c
index 7c50c271651c..a8e3fde4cbd5 100644
--- a/drivers/mtd/maps/fortunet.c
+++ b/drivers/mtd/maps/fortunet.c
@@ -1,6 +1,5 @@
/* fortunet.c memory map
*
- * $Id: fortunet.c,v 1.11 2005/11/07 11:14:27 gleixner Exp $
*/
#include <linux/module.h>
diff --git a/drivers/mtd/maps/h720x-flash.c b/drivers/mtd/maps/h720x-flash.c
index 6dde3182d64a..ef8915474462 100644
--- a/drivers/mtd/maps/h720x-flash.c
+++ b/drivers/mtd/maps/h720x-flash.c
@@ -2,8 +2,6 @@
* Flash memory access on Hynix GMS30C7201/HMS30C7202 based
* evaluation boards
*
- * $Id: h720x-flash.c,v 1.12 2005/11/07 11:14:27 gleixner Exp $
- *
* (C) 2002 Jungjun Kim <jungjun.kim@hynix.com>
* 2003 Thomas Gleixner <tglx@linutronix.de>
*/
diff --git a/drivers/mtd/maps/ichxrom.c b/drivers/mtd/maps/ichxrom.c
index 2c884c49e84a..aeb6c916e23f 100644
--- a/drivers/mtd/maps/ichxrom.c
+++ b/drivers/mtd/maps/ichxrom.c
@@ -2,7 +2,6 @@
* ichxrom.c
*
* Normal mappings of chips in physical memory
- * $Id: ichxrom.c,v 1.19 2005/11/07 11:14:27 gleixner Exp $
*/
#include <linux/module.h>
diff --git a/drivers/mtd/maps/impa7.c b/drivers/mtd/maps/impa7.c
index a0b4dc7155dc..2682ab51a367 100644
--- a/drivers/mtd/maps/impa7.c
+++ b/drivers/mtd/maps/impa7.c
@@ -1,6 +1,4 @@
/*
- * $Id: impa7.c,v 1.14 2005/11/07 11:14:27 gleixner Exp $
- *
* Handle mapping of the NOR flash on implementa A7 boards
*
* Copyright 2002 SYSGO Real-Time Solutions GmbH
diff --git a/drivers/mtd/maps/integrator-flash.c b/drivers/mtd/maps/integrator-flash.c
index 325c8880c437..ee361aaadb1e 100644
--- a/drivers/mtd/maps/integrator-flash.c
+++ b/drivers/mtd/maps/integrator-flash.c
@@ -22,8 +22,6 @@
This is access code for flashes using ARM's flash partitioning
standards.
- $Id: integrator-flash.c,v 1.20 2005/11/07 11:14:27 gleixner Exp $
-
======================================================================*/
#include <linux/module.h>
diff --git a/drivers/mtd/maps/ipaq-flash.c b/drivers/mtd/maps/ipaq-flash.c
index f27c132794c3..a806119797e0 100644
--- a/drivers/mtd/maps/ipaq-flash.c
+++ b/drivers/mtd/maps/ipaq-flash.c
@@ -4,8 +4,6 @@
* (C) 2000 Nicolas Pitre <nico@cam.org>
* (C) 2002 Hewlett-Packard Company <jamey.hicks@hp.com>
* (C) 2003 Christian Pellegrin <chri@ascensit.com>, <chri@infis.univ.ts.it>: concatenation of multiple flashes
- *
- * $Id: ipaq-flash.c,v 1.5 2005/11/07 11:14:27 gleixner Exp $
*/
#include <linux/module.h>
diff --git a/drivers/mtd/maps/ixp2000.c b/drivers/mtd/maps/ixp2000.c
index c8396b8574c4..c2264792a20b 100644
--- a/drivers/mtd/maps/ixp2000.c
+++ b/drivers/mtd/maps/ixp2000.c
@@ -1,6 +1,4 @@
/*
- * $Id: ixp2000.c,v 1.9 2005/11/07 11:14:27 gleixner Exp $
- *
* drivers/mtd/maps/ixp2000.c
*
* Mapping for the Intel XScale IXP2000 based systems
diff --git a/drivers/mtd/maps/ixp4xx.c b/drivers/mtd/maps/ixp4xx.c
index 01f19a4714b5..9c7a5fbd4e51 100644
--- a/drivers/mtd/maps/ixp4xx.c
+++ b/drivers/mtd/maps/ixp4xx.c
@@ -1,6 +1,4 @@
/*
- * $Id: ixp4xx.c,v 1.13 2005/11/16 16:23:21 dvrabel Exp $
- *
* drivers/mtd/maps/ixp4xx.c
*
* MTD Map file for IXP4XX based systems. Please do not make per-board
diff --git a/drivers/mtd/maps/l440gx.c b/drivers/mtd/maps/l440gx.c
index 67620adf4811..9e054503c4cf 100644
--- a/drivers/mtd/maps/l440gx.c
+++ b/drivers/mtd/maps/l440gx.c
@@ -1,6 +1,4 @@
/*
- * $Id: l440gx.c,v 1.18 2005/11/07 11:14:27 gleixner Exp $
- *
* BIOS Flash chip on Intel 440GX board.
*
* Bugs this currently does not work under linuxBIOS.
diff --git a/drivers/mtd/maps/map_funcs.c b/drivers/mtd/maps/map_funcs.c
index 9105e6ca0aa6..3f268370eeca 100644
--- a/drivers/mtd/maps/map_funcs.c
+++ b/drivers/mtd/maps/map_funcs.c
@@ -1,6 +1,4 @@
/*
- * $Id: map_funcs.c,v 1.10 2005/06/06 23:04:36 tpoynor Exp $
- *
* Out-of-line map I/O functions for simple maps when CONFIG_COMPLEX_MAPPINGS
* is enabled.
*/
diff --git a/drivers/mtd/maps/mbx860.c b/drivers/mtd/maps/mbx860.c
index 06b118727846..706f67394b07 100644
--- a/drivers/mtd/maps/mbx860.c
+++ b/drivers/mtd/maps/mbx860.c
@@ -1,6 +1,4 @@
/*
- * $Id: mbx860.c,v 1.9 2005/11/07 11:14:27 gleixner Exp $
- *
* Handle mapping of the flash on MBX860 boards
*
* Author: Anton Todorov
diff --git a/drivers/mtd/maps/netsc520.c b/drivers/mtd/maps/netsc520.c
index 95dcab2146ad..c0cb319b2b70 100644
--- a/drivers/mtd/maps/netsc520.c
+++ b/drivers/mtd/maps/netsc520.c
@@ -3,8 +3,6 @@
* Copyright (C) 2001 Mark Langsdorf (mark.langsdorf@amd.com)
* based on sc520cdp.c by Sysgo Real-Time Solutions GmbH
*
- * $Id: netsc520.c,v 1.14 2005/11/07 11:14:27 gleixner Exp $
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
diff --git a/drivers/mtd/maps/nettel.c b/drivers/mtd/maps/nettel.c
index 0c9b305a72e0..965e6c6d6ab0 100644
--- a/drivers/mtd/maps/nettel.c
+++ b/drivers/mtd/maps/nettel.c
@@ -5,8 +5,6 @@
*
* (C) Copyright 2000-2001, Greg Ungerer (gerg@snapgear.com)
* (C) Copyright 2001-2002, SnapGear (www.snapgear.com)
- *
- * $Id: nettel.c,v 1.12 2005/11/29 14:30:00 gleixner Exp $
*/
/****************************************************************************/
diff --git a/drivers/mtd/maps/octagon-5066.c b/drivers/mtd/maps/octagon-5066.c
index a6642db3d325..43e04c1d22a9 100644
--- a/drivers/mtd/maps/octagon-5066.c
+++ b/drivers/mtd/maps/octagon-5066.c
@@ -1,4 +1,3 @@
-// $Id: octagon-5066.c,v 1.28 2005/11/07 11:14:27 gleixner Exp $
/* ######################################################################
Octagon 5066 MTD Driver.
diff --git a/drivers/mtd/maps/omap-toto-flash.c b/drivers/mtd/maps/omap-toto-flash.c
index e6e391efbeb6..0a60ebbc2175 100644
--- a/drivers/mtd/maps/omap-toto-flash.c
+++ b/drivers/mtd/maps/omap-toto-flash.c
@@ -4,8 +4,6 @@
* jzhang@ti.com (C) 2003 Texas Instruments.
*
* (C) 2002 MontVista Software, Inc.
- *
- * $Id: omap-toto-flash.c,v 1.5 2005/11/07 11:14:27 gleixner Exp $
*/
#include <linux/module.h>
diff --git a/drivers/mtd/maps/pci.c b/drivers/mtd/maps/pci.c
index d2ab1bae9c34..5c6a25c90380 100644
--- a/drivers/mtd/maps/pci.c
+++ b/drivers/mtd/maps/pci.c
@@ -7,8 +7,6 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
- * $Id: pci.c,v 1.14 2005/11/17 08:20:27 dwmw2 Exp $
- *
* Generic PCI memory map driver. We support the following boards:
* - Intel IQ80310 ATU.
* - Intel EBSA285 (blank rom programming mode). Tested working 27/09/2001
diff --git a/drivers/mtd/maps/pcmciamtd.c b/drivers/mtd/maps/pcmciamtd.c
index 0cc31675aeb9..90924fb00481 100644
--- a/drivers/mtd/maps/pcmciamtd.c
+++ b/drivers/mtd/maps/pcmciamtd.c
@@ -1,6 +1,4 @@
/*
- * $Id: pcmciamtd.c,v 1.55 2005/11/07 11:14:28 gleixner Exp $
- *
* pcmciamtd.c - MTD driver for PCMCIA flash memory cards
*
* Author: Simon Evans <spse@secret.org.uk>
@@ -48,7 +46,6 @@ static const int debug = 0;
#define DRIVER_DESC "PCMCIA Flash memory card driver"
-#define DRIVER_VERSION "$Revision: 1.55 $"
/* Size of the PCMCIA address space: 26 bits = 64 MB */
#define MAX_PCMCIA_ADDR 0x4000000
@@ -785,7 +782,7 @@ static struct pcmcia_driver pcmciamtd_driver = {
static int __init init_pcmciamtd(void)
{
- info(DRIVER_DESC " " DRIVER_VERSION);
+ info(DRIVER_DESC);
if(bankwidth && bankwidth != 1 && bankwidth != 2) {
info("bad bankwidth (%d), using default", bankwidth);
diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c
index 183255fcfdcb..42d844f8f6bf 100644
--- a/drivers/mtd/maps/physmap.c
+++ b/drivers/mtd/maps/physmap.c
@@ -1,6 +1,4 @@
/*
- * $Id: physmap.c,v 1.39 2005/11/29 14:49:36 gleixner Exp $
- *
* Normal mappings of chips in physical memory
*
* Copyright (C) 2003 MontaVista Software Inc.
@@ -203,7 +201,19 @@ static int physmap_flash_suspend(struct platform_device *dev, pm_message_t state
int i;
for (i = 0; i < MAX_RESOURCES && info->mtd[i]; i++)
- ret |= info->mtd[i]->suspend(info->mtd[i]);
+ if (info->mtd[i]->suspend) {
+ ret = info->mtd[i]->suspend(info->mtd[i]);
+ if (ret)
+ goto fail;
+ }
+
+ return 0;
+fail:
+ for (--i; i >= 0; --i)
+ if (info->mtd[i]->suspend) {
+ BUG_ON(!info->mtd[i]->resume);
+ info->mtd[i]->resume(info->mtd[i]);
+ }
return ret;
}
@@ -214,7 +224,8 @@ static int physmap_flash_resume(struct platform_device *dev)
int i;
for (i = 0; i < MAX_RESOURCES && info->mtd[i]; i++)
- info->mtd[i]->resume(info->mtd[i]);
+ if (info->mtd[i]->resume)
+ info->mtd[i]->resume(info->mtd[i]);
return 0;
}
@@ -225,8 +236,9 @@ static void physmap_flash_shutdown(struct platform_device *dev)
int i;
for (i = 0; i < MAX_RESOURCES && info->mtd[i]; i++)
- if (info->mtd[i]->suspend(info->mtd[i]) == 0)
- info->mtd[i]->resume(info->mtd[i]);
+ if (info->mtd[i]->suspend && info->mtd[i]->resume)
+ if (info->mtd[i]->suspend(info->mtd[i]) == 0)
+ info->mtd[i]->resume(info->mtd[i]);
}
#else
#define physmap_flash_suspend NULL
diff --git a/drivers/mtd/maps/plat-ram.c b/drivers/mtd/maps/plat-ram.c
index 3eb2643b2328..e7dd9c8a965e 100644
--- a/drivers/mtd/maps/plat-ram.c
+++ b/drivers/mtd/maps/plat-ram.c
@@ -6,8 +6,6 @@
*
* Generic platfrom device based RAM map
*
- * $Id: plat-ram.c,v 1.7 2005/11/07 11:14:28 gleixner Exp $
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
diff --git a/drivers/mtd/maps/redwood.c b/drivers/mtd/maps/redwood.c
index 4d858b3d5f82..de002eb1a7fe 100644
--- a/drivers/mtd/maps/redwood.c
+++ b/drivers/mtd/maps/redwood.c
@@ -1,6 +1,4 @@
/*
- * $Id: redwood.c,v 1.11 2005/11/07 11:14:28 gleixner Exp $
- *
* drivers/mtd/maps/redwood.c
*
* FLASH map for the IBM Redwood 4/5/6 boards.
diff --git a/drivers/mtd/maps/rpxlite.c b/drivers/mtd/maps/rpxlite.c
index 809a0c8e7aaf..14d90edb4430 100644
--- a/drivers/mtd/maps/rpxlite.c
+++ b/drivers/mtd/maps/rpxlite.c
@@ -1,6 +1,4 @@
/*
- * $Id: rpxlite.c,v 1.22 2004/11/04 13:24:15 gleixner Exp $
- *
* Handle mapping of the flash on the RPX Lite and CLLF boards
*/
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c
index c7d5a52a2d55..e177a43dfff0 100644
--- a/drivers/mtd/maps/sa1100-flash.c
+++ b/drivers/mtd/maps/sa1100-flash.c
@@ -2,8 +2,6 @@
* Flash memory access on SA11x0 based devices
*
* (C) 2000 Nicolas Pitre <nico@cam.org>
- *
- * $Id: sa1100-flash.c,v 1.51 2005/11/07 11:14:28 gleixner Exp $
*/
#include <linux/module.h>
#include <linux/types.h>
diff --git a/drivers/mtd/maps/sbc8240.c b/drivers/mtd/maps/sbc8240.c
index b8c1331b7a04..6e1e99cd2b59 100644
--- a/drivers/mtd/maps/sbc8240.c
+++ b/drivers/mtd/maps/sbc8240.c
@@ -4,9 +4,6 @@
* Carolyn Smith, Tektronix, Inc.
*
* This code is GPLed
- *
- * $Id: sbc8240.c,v 1.5 2005/11/07 11:14:28 gleixner Exp $
- *
*/
/*
diff --git a/drivers/mtd/maps/sbc_gxx.c b/drivers/mtd/maps/sbc_gxx.c
index 7cc4041d096d..1b1c0b7e11ef 100644
--- a/drivers/mtd/maps/sbc_gxx.c
+++ b/drivers/mtd/maps/sbc_gxx.c
@@ -17,8 +17,6 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
- $Id: sbc_gxx.c,v 1.35 2005/11/07 11:14:28 gleixner Exp $
-
The SBC-MediaGX / SBC-GXx has up to 16 MiB of
Intel StrataFlash (28F320/28F640) in x8 mode.
diff --git a/drivers/mtd/maps/sc520cdp.c b/drivers/mtd/maps/sc520cdp.c
index 4045e372b90d..85c1e56309ec 100644
--- a/drivers/mtd/maps/sc520cdp.c
+++ b/drivers/mtd/maps/sc520cdp.c
@@ -16,8 +16,6 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*
- * $Id: sc520cdp.c,v 1.23 2005/11/17 08:20:27 dwmw2 Exp $
- *
*
* The SC520CDP is an evaluation board for the Elan SC520 processor available
* from AMD. It has two banks of 32-bit Flash ROM, each 8 Megabytes in size,
diff --git a/drivers/mtd/maps/scb2_flash.c b/drivers/mtd/maps/scb2_flash.c
index 0fc5584324e3..21169e6d646c 100644
--- a/drivers/mtd/maps/scb2_flash.c
+++ b/drivers/mtd/maps/scb2_flash.c
@@ -1,6 +1,5 @@
/*
* MTD map driver for BIOS Flash on Intel SCB2 boards
- * $Id: scb2_flash.c,v 1.12 2005/03/18 14:04:35 gleixner Exp $
* Copyright (C) 2002 Sun Microsystems, Inc.
* Tim Hockin <thockin@sun.com>
*
diff --git a/drivers/mtd/maps/scx200_docflash.c b/drivers/mtd/maps/scx200_docflash.c
index 5e2bce22f37c..b5391ebb736e 100644
--- a/drivers/mtd/maps/scx200_docflash.c
+++ b/drivers/mtd/maps/scx200_docflash.c
@@ -2,8 +2,6 @@
Copyright (c) 2001,2002 Christer Weinigel <wingel@nano-system.com>
- $Id: scx200_docflash.c,v 1.12 2005/11/07 11:14:28 gleixner Exp $
-
National Semiconductor SCx200 flash mapped with DOCCS
*/
diff --git a/drivers/mtd/maps/sharpsl-flash.c b/drivers/mtd/maps/sharpsl-flash.c
index 917dc778f24e..026eab028189 100644
--- a/drivers/mtd/maps/sharpsl-flash.c
+++ b/drivers/mtd/maps/sharpsl-flash.c
@@ -4,8 +4,6 @@
* Copyright (C) 2001 Lineo Japan, Inc.
* Copyright (C) 2002 SHARP
*
- * $Id: sharpsl-flash.c,v 1.7 2005/11/07 11:14:28 gleixner Exp $
- *
* based on rpxlite.c,v 1.15 2001/10/02 15:05:14 dwmw2 Exp
* Handle mapping of the flash on the RPX Lite and CLLF boards
*
diff --git a/drivers/mtd/maps/solutionengine.c b/drivers/mtd/maps/solutionengine.c
index d76ceef453ce..0eb41d9c6786 100644
--- a/drivers/mtd/maps/solutionengine.c
+++ b/drivers/mtd/maps/solutionengine.c
@@ -1,6 +1,4 @@
/*
- * $Id: solutionengine.c,v 1.15 2005/11/07 11:14:28 gleixner Exp $
- *
* Flash and EPROM on Hitachi Solution Engine and similar boards.
*
* (C) 2001 Red Hat, Inc.
diff --git a/drivers/mtd/maps/sun_uflash.c b/drivers/mtd/maps/sun_uflash.c
index 001af7f7ddda..0d7c88396c88 100644
--- a/drivers/mtd/maps/sun_uflash.c
+++ b/drivers/mtd/maps/sun_uflash.c
@@ -1,4 +1,4 @@
-/* $Id: sun_uflash.c,v 1.13 2005/11/07 11:14:28 gleixner Exp $
+/*
*
* sun_uflash - Driver implementation for user-programmable flash
* present on many Sun Microsystems SME boardsets.
diff --git a/drivers/mtd/maps/tqm8xxl.c b/drivers/mtd/maps/tqm8xxl.c
index 521734057314..a5d3d8531faa 100644
--- a/drivers/mtd/maps/tqm8xxl.c
+++ b/drivers/mtd/maps/tqm8xxl.c
@@ -2,8 +2,6 @@
* Handle mapping of the flash memory access routines
* on TQM8xxL based devices.
*
- * $Id: tqm8xxl.c,v 1.15 2005/11/07 11:14:28 gleixner Exp $
- *
* based on rpxlite.c
*
* Copyright(C) 2001 Kirk Lee <kirk@hpc.ee.ntu.edu.tw>
diff --git a/drivers/mtd/maps/ts5500_flash.c b/drivers/mtd/maps/ts5500_flash.c
index b47270e850bc..e2147bf11c88 100644
--- a/drivers/mtd/maps/ts5500_flash.c
+++ b/drivers/mtd/maps/ts5500_flash.c
@@ -22,8 +22,6 @@
* - Drive A and B use the resident flash disk (RFD) flash translation layer.
* - If you have created your own jffs file system and the bios overwrites
* it during boot, try disabling Drive A: and B: in the boot order.
- *
- * $Id: ts5500_flash.c,v 1.5 2005/11/07 11:14:28 gleixner Exp $
*/
#include <linux/init.h>
diff --git a/drivers/mtd/maps/tsunami_flash.c b/drivers/mtd/maps/tsunami_flash.c
index 0f915ac3102e..77a8bfc02577 100644
--- a/drivers/mtd/maps/tsunami_flash.c
+++ b/drivers/mtd/maps/tsunami_flash.c
@@ -2,7 +2,6 @@
* tsunami_flash.c
*
* flash chip on alpha ds10...
- * $Id: tsunami_flash.c,v 1.10 2005/11/07 11:14:29 gleixner Exp $
*/
#include <asm/io.h>
#include <asm/core_tsunami.h>
diff --git a/drivers/mtd/maps/uclinux.c b/drivers/mtd/maps/uclinux.c
index 3fcf92130aa4..0dc645f8152f 100644
--- a/drivers/mtd/maps/uclinux.c
+++ b/drivers/mtd/maps/uclinux.c
@@ -4,8 +4,6 @@
* uclinux.c -- generic memory mapped MTD driver for uclinux
*
* (C) Copyright 2002, Greg Ungerer (gerg@snapgear.com)
- *
- * $Id: uclinux.c,v 1.12 2005/11/07 11:14:29 gleixner Exp $
*/
/****************************************************************************/
diff --git a/drivers/mtd/maps/vmax301.c b/drivers/mtd/maps/vmax301.c
index b3e487395435..5a0c9a353b0f 100644
--- a/drivers/mtd/maps/vmax301.c
+++ b/drivers/mtd/maps/vmax301.c
@@ -1,4 +1,3 @@
-// $Id: vmax301.c,v 1.32 2005/11/07 11:14:29 gleixner Exp $
/* ######################################################################
Tempustech VMAX SBC301 MTD Driver.
diff --git a/drivers/mtd/maps/walnut.c b/drivers/mtd/maps/walnut.c
index ca932122fb64..e243476c8171 100644
--- a/drivers/mtd/maps/walnut.c
+++ b/drivers/mtd/maps/walnut.c
@@ -1,6 +1,4 @@
/*
- * $Id: walnut.c,v 1.3 2005/11/07 11:14:29 gleixner Exp $
- *
* Mapping for Walnut flash
* (used ebony.c as a "framework")
*
diff --git a/drivers/mtd/maps/wr_sbc82xx_flash.c b/drivers/mtd/maps/wr_sbc82xx_flash.c
index ac5b8105b6ef..413b0cf9bbd2 100644
--- a/drivers/mtd/maps/wr_sbc82xx_flash.c
+++ b/drivers/mtd/maps/wr_sbc82xx_flash.c
@@ -1,6 +1,4 @@
/*
- * $Id: wr_sbc82xx_flash.c,v 1.8 2005/11/07 11:14:29 gleixner Exp $
- *
* Map for flash chips on Wind River PowerQUICC II SBC82xx board.
*
* Copyright (C) 2004 Red Hat, Inc.
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 839eed8430a2..9ff007c4962c 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -1,6 +1,4 @@
/*
- * $Id: mtd_blkdevs.c,v 1.27 2005/11/07 11:14:20 gleixner Exp $
- *
* (C) 2003 David Woodhouse <dwmw2@infradead.org>
*
* Interface to Linux 2.5 block layer for MTD 'translation layers'.
@@ -212,7 +210,7 @@ static struct block_device_operations mtd_blktrans_ops = {
int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
{
struct mtd_blktrans_ops *tr = new->tr;
- struct list_head *this;
+ struct mtd_blktrans_dev *d;
int last_devnum = -1;
struct gendisk *gd;
@@ -221,8 +219,7 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
BUG();
}
- list_for_each(this, &tr->devs) {
- struct mtd_blktrans_dev *d = list_entry(this, struct mtd_blktrans_dev, list);
+ list_for_each_entry(d, &tr->devs, list) {
if (new->devnum == -1) {
/* Use first free number */
if (d->devnum != last_devnum+1) {
@@ -309,33 +306,24 @@ int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
static void blktrans_notify_remove(struct mtd_info *mtd)
{
- struct list_head *this, *this2, *next;
-
- list_for_each(this, &blktrans_majors) {
- struct mtd_blktrans_ops *tr = list_entry(this, struct mtd_blktrans_ops, list);
-
- list_for_each_safe(this2, next, &tr->devs) {
- struct mtd_blktrans_dev *dev = list_entry(this2, struct mtd_blktrans_dev, list);
+ struct mtd_blktrans_ops *tr;
+ struct mtd_blktrans_dev *dev, *next;
+ list_for_each_entry(tr, &blktrans_majors, list)
+ list_for_each_entry_safe(dev, next, &tr->devs, list)
if (dev->mtd == mtd)
tr->remove_dev(dev);
- }
- }
}
static void blktrans_notify_add(struct mtd_info *mtd)
{
- struct list_head *this;
+ struct mtd_blktrans_ops *tr;
if (mtd->type == MTD_ABSENT)
return;
- list_for_each(this, &blktrans_majors) {
- struct mtd_blktrans_ops *tr = list_entry(this, struct mtd_blktrans_ops, list);
-
+ list_for_each_entry(tr, &blktrans_majors, list)
tr->add_mtd(tr, mtd);
- }
-
}
static struct mtd_notifier blktrans_notifier = {
@@ -406,7 +394,7 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr)
{
- struct list_head *this, *next;
+ struct mtd_blktrans_dev *dev, *next;
mutex_lock(&mtd_table_mutex);
@@ -416,10 +404,8 @@ int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr)
/* Remove it from the list of active majors */
list_del(&tr->list);
- list_for_each_safe(this, next, &tr->devs) {
- struct mtd_blktrans_dev *dev = list_entry(this, struct mtd_blktrans_dev, list);
+ list_for_each_entry_safe(dev, next, &tr->devs, list)
tr->remove_dev(dev);
- }
blk_cleanup_queue(tr->blkcore_priv->rq);
unregister_blkdev(tr->major, tr->name);
diff --git a/drivers/mtd/mtdblock.c b/drivers/mtd/mtdblock.c
index 952da30b1745..208c6faa0358 100644
--- a/drivers/mtd/mtdblock.c
+++ b/drivers/mtd/mtdblock.c
@@ -1,8 +1,6 @@
/*
* Direct MTD block device access
*
- * $Id: mtdblock.c,v 1.68 2005/11/07 11:14:20 gleixner Exp $
- *
* (C) 2000-2003 Nicolas Pitre <nico@cam.org>
* (C) 1999-2003 David Woodhouse <dwmw2@infradead.org>
*/
diff --git a/drivers/mtd/mtdblock_ro.c b/drivers/mtd/mtdblock_ro.c
index f79dbb49b1a2..852165f8b1c3 100644
--- a/drivers/mtd/mtdblock_ro.c
+++ b/drivers/mtd/mtdblock_ro.c
@@ -1,6 +1,4 @@
/*
- * $Id: mtdblock_ro.c,v 1.19 2004/11/16 18:28:59 dwmw2 Exp $
- *
* (C) 2003 David Woodhouse <dwmw2@infradead.org>
*
* Simple read-only (writable only for RAM) mtdblock driver
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index aef9f4b687c9..d2f331876e4c 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -1,6 +1,4 @@
/*
- * $Id: mtdchar.c,v 1.76 2005/11/07 11:14:20 gleixner Exp $
- *
* Character-device access to raw MTD devices.
*
*/
@@ -494,6 +492,7 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
{
struct mtd_oob_buf buf;
struct mtd_oob_ops ops;
+ struct mtd_oob_buf __user *user_buf = argp;
uint32_t retlen;
if(!(file->f_mode & 2))
@@ -537,8 +536,7 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
if (ops.oobretlen > 0xFFFFFFFFU)
ret = -EOVERFLOW;
retlen = ops.oobretlen;
- if (copy_to_user(&((struct mtd_oob_buf *)argp)->length,
- &retlen, sizeof(buf.length)))
+ if (copy_to_user(&user_buf->length, &retlen, sizeof(buf.length)))
ret = -EFAULT;
kfree(ops.oobbuf);
@@ -592,29 +590,29 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
case MEMLOCK:
{
- struct erase_info_user info;
+ struct erase_info_user einfo;
- if (copy_from_user(&info, argp, sizeof(info)))
+ if (copy_from_user(&einfo, argp, sizeof(einfo)))
return -EFAULT;
if (!mtd->lock)
ret = -EOPNOTSUPP;
else
- ret = mtd->lock(mtd, info.start, info.length);
+ ret = mtd->lock(mtd, einfo.start, einfo.length);
break;
}
case MEMUNLOCK:
{
- struct erase_info_user info;
+ struct erase_info_user einfo;
- if (copy_from_user(&info, argp, sizeof(info)))
+ if (copy_from_user(&einfo, argp, sizeof(einfo)))
return -EFAULT;
if (!mtd->unlock)
ret = -EOPNOTSUPP;
else
- ret = mtd->unlock(mtd, info.start, info.length);
+ ret = mtd->unlock(mtd, einfo.start, einfo.length);
break;
}
@@ -714,15 +712,15 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
case OTPLOCK:
{
- struct otp_info info;
+ struct otp_info oinfo;
if (mfi->mode != MTD_MODE_OTP_USER)
return -EINVAL;
- if (copy_from_user(&info, argp, sizeof(info)))
+ if (copy_from_user(&oinfo, argp, sizeof(oinfo)))
return -EFAULT;
if (!mtd->lock_user_prot_reg)
return -EOPNOTSUPP;
- ret = mtd->lock_user_prot_reg(mtd, info.start, info.length);
+ ret = mtd->lock_user_prot_reg(mtd, oinfo.start, oinfo.length);
break;
}
#endif
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
index d563dcd4b264..2972a5edb73d 100644
--- a/drivers/mtd/mtdconcat.c
+++ b/drivers/mtd/mtdconcat.c
@@ -6,8 +6,6 @@
* NAND support by Christian Gan <cgan@iders.ca>
*
* This code is GPL
- *
- * $Id: mtdconcat.c,v 1.11 2005/11/07 11:14:20 gleixner Exp $
*/
#include <linux/kernel.h>
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index f7e7890e5bc6..a9d246949820 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -1,6 +1,4 @@
/*
- * $Id: mtdcore.c,v 1.47 2005/11/07 11:14:20 gleixner Exp $
- *
* Core registration and callback routines for MTD
* drivers and users.
*
@@ -53,7 +51,7 @@ int add_mtd_device(struct mtd_info *mtd)
for (i=0; i < MAX_MTD_DEVICES; i++)
if (!mtd_table[i]) {
- struct list_head *this;
+ struct mtd_notifier *not;
mtd_table[i] = mtd;
mtd->index = i;
@@ -72,10 +70,8 @@ int add_mtd_device(struct mtd_info *mtd)
DEBUG(0, "mtd: Giving out device %d to %s\n",i, mtd->name);
/* No need to get a refcount on the module containing
the notifier, since we hold the mtd_table_mutex */
- list_for_each(this, &mtd_notifiers) {
- struct mtd_notifier *not = list_entry(this, struct mtd_notifier, list);
+ list_for_each_entry(not, &mtd_notifiers, list)
not->add(mtd);
- }
mutex_unlock(&mtd_table_mutex);
/* We _know_ we aren't being removed, because
@@ -113,14 +109,12 @@ int del_mtd_device (struct mtd_info *mtd)
mtd->index, mtd->name, mtd->usecount);
ret = -EBUSY;
} else {
- struct list_head *this;
+ struct mtd_notifier *not;
/* No need to get a refcount on the module containing
the notifier, since we hold the mtd_table_mutex */
- list_for_each(this, &mtd_notifiers) {
- struct mtd_notifier *not = list_entry(this, struct mtd_notifier, list);
+ list_for_each_entry(not, &mtd_notifiers, list)
not->remove(mtd);
- }
mtd_table[mtd->index] = NULL;
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 07c701169344..edb90b58a9b1 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -5,8 +5,6 @@
*
* This code is GPL
*
- * $Id: mtdpart.c,v 1.55 2005/11/07 11:14:20 gleixner Exp $
- *
* 02-21-2002 Thomas Gleixner <gleixner@autronix.de>
* added support for read_oob, write_oob
*/
@@ -46,8 +44,8 @@ struct mtd_part {
* to the _real_ device.
*/
-static int part_read (struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, u_char *buf)
+static int part_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
{
struct mtd_part *part = PART(mtd);
int res;
@@ -56,7 +54,7 @@ static int part_read (struct mtd_info *mtd, loff_t from, size_t len,
len = 0;
else if (from + len > mtd->size)
len = mtd->size - from;
- res = part->master->read (part->master, from + part->offset,
+ res = part->master->read(part->master, from + part->offset,
len, retlen, buf);
if (unlikely(res)) {
if (res == -EUCLEAN)
@@ -67,8 +65,8 @@ static int part_read (struct mtd_info *mtd, loff_t from, size_t len,
return res;
}
-static int part_point (struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, void **virt, resource_size_t *phys)
+static int part_point(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, void **virt, resource_size_t *phys)
{
struct mtd_part *part = PART(mtd);
if (from >= mtd->size)
@@ -87,7 +85,7 @@ static void part_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
}
static int part_read_oob(struct mtd_info *mtd, loff_t from,
- struct mtd_oob_ops *ops)
+ struct mtd_oob_ops *ops)
{
struct mtd_part *part = PART(mtd);
int res;
@@ -107,38 +105,38 @@ static int part_read_oob(struct mtd_info *mtd, loff_t from,
return res;
}
-static int part_read_user_prot_reg (struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, u_char *buf)
+static int part_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
+ size_t len, size_t *retlen, u_char *buf)
{
struct mtd_part *part = PART(mtd);
- return part->master->read_user_prot_reg (part->master, from,
+ return part->master->read_user_prot_reg(part->master, from,
len, retlen, buf);
}
-static int part_get_user_prot_info (struct mtd_info *mtd,
- struct otp_info *buf, size_t len)
+static int part_get_user_prot_info(struct mtd_info *mtd,
+ struct otp_info *buf, size_t len)
{
struct mtd_part *part = PART(mtd);
- return part->master->get_user_prot_info (part->master, buf, len);
+ return part->master->get_user_prot_info(part->master, buf, len);
}
-static int part_read_fact_prot_reg (struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, u_char *buf)
+static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
+ size_t len, size_t *retlen, u_char *buf)
{
struct mtd_part *part = PART(mtd);
- return part->master->read_fact_prot_reg (part->master, from,
+ return part->master->read_fact_prot_reg(part->master, from,
len, retlen, buf);
}
-static int part_get_fact_prot_info (struct mtd_info *mtd,
- struct otp_info *buf, size_t len)
+static int part_get_fact_prot_info(struct mtd_info *mtd, struct otp_info *buf,
+ size_t len)
{
struct mtd_part *part = PART(mtd);
- return part->master->get_fact_prot_info (part->master, buf, len);
+ return part->master->get_fact_prot_info(part->master, buf, len);
}
-static int part_write (struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const u_char *buf)
+static int part_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
{
struct mtd_part *part = PART(mtd);
if (!(mtd->flags & MTD_WRITEABLE))
@@ -147,12 +145,12 @@ static int part_write (struct mtd_info *mtd, loff_t to, size_t len,
len = 0;
else if (to + len > mtd->size)
len = mtd->size - to;
- return part->master->write (part->master, to + part->offset,
+ return part->master->write(part->master, to + part->offset,
len, retlen, buf);
}
-static int part_panic_write (struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const u_char *buf)
+static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
{
struct mtd_part *part = PART(mtd);
if (!(mtd->flags & MTD_WRITEABLE))
@@ -161,12 +159,12 @@ static int part_panic_write (struct mtd_info *mtd, loff_t to, size_t len,
len = 0;
else if (to + len > mtd->size)
len = mtd->size - to;
- return part->master->panic_write (part->master, to + part->offset,
+ return part->master->panic_write(part->master, to + part->offset,
len, retlen, buf);
}
static int part_write_oob(struct mtd_info *mtd, loff_t to,
- struct mtd_oob_ops *ops)
+ struct mtd_oob_ops *ops)
{
struct mtd_part *part = PART(mtd);
@@ -180,31 +178,32 @@ static int part_write_oob(struct mtd_info *mtd, loff_t to,
return part->master->write_oob(part->master, to + part->offset, ops);
}
-static int part_write_user_prot_reg (struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, u_char *buf)
+static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
+ size_t len, size_t *retlen, u_char *buf)
{
struct mtd_part *part = PART(mtd);
- return part->master->write_user_prot_reg (part->master, from,
+ return part->master->write_user_prot_reg(part->master, from,
len, retlen, buf);
}
-static int part_lock_user_prot_reg (struct mtd_info *mtd, loff_t from, size_t len)
+static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
+ size_t len)
{
struct mtd_part *part = PART(mtd);
- return part->master->lock_user_prot_reg (part->master, from, len);
+ return part->master->lock_user_prot_reg(part->master, from, len);
}
-static int part_writev (struct mtd_info *mtd, const struct kvec *vecs,
- unsigned long count, loff_t to, size_t *retlen)
+static int part_writev(struct mtd_info *mtd, const struct kvec *vecs,
+ unsigned long count, loff_t to, size_t *retlen)
{
struct mtd_part *part = PART(mtd);
if (!(mtd->flags & MTD_WRITEABLE))
return -EROFS;
- return part->master->writev (part->master, vecs, count,
+ return part->master->writev(part->master, vecs, count,
to + part->offset, retlen);
}
-static int part_erase (struct mtd_info *mtd, struct erase_info *instr)
+static int part_erase(struct mtd_info *mtd, struct erase_info *instr)
{
struct mtd_part *part = PART(mtd);
int ret;
@@ -236,7 +235,7 @@ void mtd_erase_callback(struct erase_info *instr)
}
EXPORT_SYMBOL_GPL(mtd_erase_callback);
-static int part_lock (struct mtd_info *mtd, loff_t ofs, size_t len)
+static int part_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
{
struct mtd_part *part = PART(mtd);
if ((len + ofs) > mtd->size)
@@ -244,7 +243,7 @@ static int part_lock (struct mtd_info *mtd, loff_t ofs, size_t len)
return part->master->lock(part->master, ofs + part->offset, len);
}
-static int part_unlock (struct mtd_info *mtd, loff_t ofs, size_t len)
+static int part_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
{
struct mtd_part *part = PART(mtd);
if ((len + ofs) > mtd->size)
@@ -270,7 +269,7 @@ static void part_resume(struct mtd_info *mtd)
part->master->resume(part->master);
}
-static int part_block_isbad (struct mtd_info *mtd, loff_t ofs)
+static int part_block_isbad(struct mtd_info *mtd, loff_t ofs)
{
struct mtd_part *part = PART(mtd);
if (ofs >= mtd->size)
@@ -279,7 +278,7 @@ static int part_block_isbad (struct mtd_info *mtd, loff_t ofs)
return part->master->block_isbad(part->master, ofs);
}
-static int part_block_markbad (struct mtd_info *mtd, loff_t ofs)
+static int part_block_markbad(struct mtd_info *mtd, loff_t ofs)
{
struct mtd_part *part = PART(mtd);
int res;
@@ -302,229 +301,237 @@ static int part_block_markbad (struct mtd_info *mtd, loff_t ofs)
int del_mtd_partitions(struct mtd_info *master)
{
- struct list_head *node;
- struct mtd_part *slave;
+ struct mtd_part *slave, *next;
- for (node = mtd_partitions.next;
- node != &mtd_partitions;
- node = node->next) {
- slave = list_entry(node, struct mtd_part, list);
+ list_for_each_entry_safe(slave, next, &mtd_partitions, list)
if (slave->master == master) {
- struct list_head *prev = node->prev;
- __list_del(prev, node->next);
- if(slave->registered)
+ list_del(&slave->list);
+ if (slave->registered)
del_mtd_device(&slave->mtd);
kfree(slave);
- node = prev;
}
- }
return 0;
}
+EXPORT_SYMBOL(del_mtd_partitions);
-/*
- * This function, given a master MTD object and a partition table, creates
- * and registers slave MTD objects which are bound to the master according to
- * the partition definitions.
- * (Q: should we register the master MTD object as well?)
- */
-
-int add_mtd_partitions(struct mtd_info *master,
- const struct mtd_partition *parts,
- int nbparts)
+static struct mtd_part *add_one_partition(struct mtd_info *master,
+ const struct mtd_partition *part, int partno,
+ u_int32_t cur_offset)
{
struct mtd_part *slave;
- u_int32_t cur_offset = 0;
- int i;
-
- printk (KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name);
-
- for (i = 0; i < nbparts; i++) {
- /* allocate the partition structure */
- slave = kzalloc (sizeof(*slave), GFP_KERNEL);
- if (!slave) {
- printk ("memory allocation error while creating partitions for \"%s\"\n",
- master->name);
- del_mtd_partitions(master);
- return -ENOMEM;
- }
- list_add(&slave->list, &mtd_partitions);
+ /* allocate the partition structure */
+ slave = kzalloc(sizeof(*slave), GFP_KERNEL);
+ if (!slave) {
+ printk(KERN_ERR"memory allocation error while creating partitions for \"%s\"\n",
+ master->name);
+ del_mtd_partitions(master);
+ return NULL;
+ }
+ list_add(&slave->list, &mtd_partitions);
- /* set up the MTD object for this partition */
- slave->mtd.type = master->type;
- slave->mtd.flags = master->flags & ~parts[i].mask_flags;
- slave->mtd.size = parts[i].size;
- slave->mtd.writesize = master->writesize;
- slave->mtd.oobsize = master->oobsize;
- slave->mtd.oobavail = master->oobavail;
- slave->mtd.subpage_sft = master->subpage_sft;
+ /* set up the MTD object for this partition */
+ slave->mtd.type = master->type;
+ slave->mtd.flags = master->flags & ~part->mask_flags;
+ slave->mtd.size = part->size;
+ slave->mtd.writesize = master->writesize;
+ slave->mtd.oobsize = master->oobsize;
+ slave->mtd.oobavail = master->oobavail;
+ slave->mtd.subpage_sft = master->subpage_sft;
- slave->mtd.name = parts[i].name;
- slave->mtd.owner = master->owner;
+ slave->mtd.name = part->name;
+ slave->mtd.owner = master->owner;
- slave->mtd.read = part_read;
- slave->mtd.write = part_write;
+ slave->mtd.read = part_read;
+ slave->mtd.write = part_write;
- if (master->panic_write)
- slave->mtd.panic_write = part_panic_write;
+ if (master->panic_write)
+ slave->mtd.panic_write = part_panic_write;
- if(master->point && master->unpoint){
- slave->mtd.point = part_point;
- slave->mtd.unpoint = part_unpoint;
- }
+ if (master->point && master->unpoint) {
+ slave->mtd.point = part_point;
+ slave->mtd.unpoint = part_unpoint;
+ }
- if (master->read_oob)
- slave->mtd.read_oob = part_read_oob;
- if (master->write_oob)
- slave->mtd.write_oob = part_write_oob;
- if(master->read_user_prot_reg)
- slave->mtd.read_user_prot_reg = part_read_user_prot_reg;
- if(master->read_fact_prot_reg)
- slave->mtd.read_fact_prot_reg = part_read_fact_prot_reg;
- if(master->write_user_prot_reg)
- slave->mtd.write_user_prot_reg = part_write_user_prot_reg;
- if(master->lock_user_prot_reg)
- slave->mtd.lock_user_prot_reg = part_lock_user_prot_reg;
- if(master->get_user_prot_info)
- slave->mtd.get_user_prot_info = part_get_user_prot_info;
- if(master->get_fact_prot_info)
- slave->mtd.get_fact_prot_info = part_get_fact_prot_info;
- if (master->sync)
- slave->mtd.sync = part_sync;
- if (!i && master->suspend && master->resume) {
- slave->mtd.suspend = part_suspend;
- slave->mtd.resume = part_resume;
+ if (master->read_oob)
+ slave->mtd.read_oob = part_read_oob;
+ if (master->write_oob)
+ slave->mtd.write_oob = part_write_oob;
+ if (master->read_user_prot_reg)
+ slave->mtd.read_user_prot_reg = part_read_user_prot_reg;
+ if (master->read_fact_prot_reg)
+ slave->mtd.read_fact_prot_reg = part_read_fact_prot_reg;
+ if (master->write_user_prot_reg)
+ slave->mtd.write_user_prot_reg = part_write_user_prot_reg;
+ if (master->lock_user_prot_reg)
+ slave->mtd.lock_user_prot_reg = part_lock_user_prot_reg;
+ if (master->get_user_prot_info)
+ slave->mtd.get_user_prot_info = part_get_user_prot_info;
+ if (master->get_fact_prot_info)
+ slave->mtd.get_fact_prot_info = part_get_fact_prot_info;
+ if (master->sync)
+ slave->mtd.sync = part_sync;
+ if (!partno && master->suspend && master->resume) {
+ slave->mtd.suspend = part_suspend;
+ slave->mtd.resume = part_resume;
+ }
+ if (master->writev)
+ slave->mtd.writev = part_writev;
+ if (master->lock)
+ slave->mtd.lock = part_lock;
+ if (master->unlock)
+ slave->mtd.unlock = part_unlock;
+ if (master->block_isbad)
+ slave->mtd.block_isbad = part_block_isbad;
+ if (master->block_markbad)
+ slave->mtd.block_markbad = part_block_markbad;
+ slave->mtd.erase = part_erase;
+ slave->master = master;
+ slave->offset = part->offset;
+ slave->index = partno;
+
+ if (slave->offset == MTDPART_OFS_APPEND)
+ slave->offset = cur_offset;
+ if (slave->offset == MTDPART_OFS_NXTBLK) {
+ slave->offset = cur_offset;
+ if ((cur_offset % master->erasesize) != 0) {
+ /* Round up to next erasesize */
+ slave->offset = ((cur_offset / master->erasesize) + 1) * master->erasesize;
+ printk(KERN_NOTICE "Moving partition %d: "
+ "0x%08x -> 0x%08x\n", partno,
+ cur_offset, slave->offset);
}
- if (master->writev)
- slave->mtd.writev = part_writev;
- if (master->lock)
- slave->mtd.lock = part_lock;
- if (master->unlock)
- slave->mtd.unlock = part_unlock;
- if (master->block_isbad)
- slave->mtd.block_isbad = part_block_isbad;
- if (master->block_markbad)
- slave->mtd.block_markbad = part_block_markbad;
- slave->mtd.erase = part_erase;
- slave->master = master;
- slave->offset = parts[i].offset;
- slave->index = i;
-
- if (slave->offset == MTDPART_OFS_APPEND)
- slave->offset = cur_offset;
- if (slave->offset == MTDPART_OFS_NXTBLK) {
- slave->offset = cur_offset;
- if ((cur_offset % master->erasesize) != 0) {
- /* Round up to next erasesize */
- slave->offset = ((cur_offset / master->erasesize) + 1) * master->erasesize;
- printk(KERN_NOTICE "Moving partition %d: "
- "0x%08x -> 0x%08x\n", i,
- cur_offset, slave->offset);
+ }
+ if (slave->mtd.size == MTDPART_SIZ_FULL)
+ slave->mtd.size = master->size - slave->offset;
+
+ printk(KERN_NOTICE "0x%08x-0x%08x : \"%s\"\n", slave->offset,
+ slave->offset + slave->mtd.size, slave->mtd.name);
+
+ /* let's do some sanity checks */
+ if (slave->offset >= master->size) {
+ /* let's register it anyway to preserve ordering */
+ slave->offset = 0;
+ slave->mtd.size = 0;
+ printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n",
+ part->name);
+ goto out_register;
+ }
+ if (slave->offset + slave->mtd.size > master->size) {
+ slave->mtd.size = master->size - slave->offset;
+ printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#x\n",
+ part->name, master->name, slave->mtd.size);
+ }
+ if (master->numeraseregions > 1) {
+ /* Deal with variable erase size stuff */
+ int i, max = master->numeraseregions;
+ u32 end = slave->offset + slave->mtd.size;
+ struct mtd_erase_region_info *regions = master->eraseregions;
+
+ /* Find the first erase regions which is part of this
+ * partition. */
+ for (i = 0; i < max && regions[i].offset <= slave->offset; i++)
+ ;
+ /* The loop searched for the region _behind_ the first one */
+ i--;
+
+ /* Pick biggest erasesize */
+ for (; i < max && regions[i].offset < end; i++) {
+ if (slave->mtd.erasesize < regions[i].erasesize) {
+ slave->mtd.erasesize = regions[i].erasesize;
}
}
- if (slave->mtd.size == MTDPART_SIZ_FULL)
- slave->mtd.size = master->size - slave->offset;
- cur_offset = slave->offset + slave->mtd.size;
+ BUG_ON(slave->mtd.erasesize == 0);
+ } else {
+ /* Single erase size */
+ slave->mtd.erasesize = master->erasesize;
+ }
- printk (KERN_NOTICE "0x%08x-0x%08x : \"%s\"\n", slave->offset,
- slave->offset + slave->mtd.size, slave->mtd.name);
+ if ((slave->mtd.flags & MTD_WRITEABLE) &&
+ (slave->offset % slave->mtd.erasesize)) {
+ /* Doesn't start on a boundary of major erase size */
+ /* FIXME: Let it be writable if it is on a boundary of
+ * _minor_ erase size though */
+ slave->mtd.flags &= ~MTD_WRITEABLE;
+ printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n",
+ part->name);
+ }
+ if ((slave->mtd.flags & MTD_WRITEABLE) &&
+ (slave->mtd.size % slave->mtd.erasesize)) {
+ slave->mtd.flags &= ~MTD_WRITEABLE;
+ printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n",
+ part->name);
+ }
- /* let's do some sanity checks */
- if (slave->offset >= master->size) {
- /* let's register it anyway to preserve ordering */
- slave->offset = 0;
- slave->mtd.size = 0;
- printk ("mtd: partition \"%s\" is out of reach -- disabled\n",
- parts[i].name);
- }
- if (slave->offset + slave->mtd.size > master->size) {
- slave->mtd.size = master->size - slave->offset;
- printk ("mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#x\n",
- parts[i].name, master->name, slave->mtd.size);
- }
- if (master->numeraseregions>1) {
- /* Deal with variable erase size stuff */
- int i;
- struct mtd_erase_region_info *regions = master->eraseregions;
-
- /* Find the first erase regions which is part of this partition. */
- for (i=0; i < master->numeraseregions && slave->offset >= regions[i].offset; i++)
- ;
-
- for (i--; i < master->numeraseregions && slave->offset + slave->mtd.size > regions[i].offset; i++) {
- if (slave->mtd.erasesize < regions[i].erasesize) {
- slave->mtd.erasesize = regions[i].erasesize;
- }
- }
- } else {
- /* Single erase size */
- slave->mtd.erasesize = master->erasesize;
- }
+ slave->mtd.ecclayout = master->ecclayout;
+ if (master->block_isbad) {
+ uint32_t offs = 0;
- if ((slave->mtd.flags & MTD_WRITEABLE) &&
- (slave->offset % slave->mtd.erasesize)) {
- /* Doesn't start on a boundary of major erase size */
- /* FIXME: Let it be writable if it is on a boundary of _minor_ erase size though */
- slave->mtd.flags &= ~MTD_WRITEABLE;
- printk ("mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n",
- parts[i].name);
- }
- if ((slave->mtd.flags & MTD_WRITEABLE) &&
- (slave->mtd.size % slave->mtd.erasesize)) {
- slave->mtd.flags &= ~MTD_WRITEABLE;
- printk ("mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n",
- parts[i].name);
+ while (offs < slave->mtd.size) {
+ if (master->block_isbad(master,
+ offs + slave->offset))
+ slave->mtd.ecc_stats.badblocks++;
+ offs += slave->mtd.erasesize;
}
+ }
- slave->mtd.ecclayout = master->ecclayout;
- if (master->block_isbad) {
- uint32_t offs = 0;
+out_register:
+ if (part->mtdp) {
+ /* store the object pointer (caller may or may not register it*/
+ *part->mtdp = &slave->mtd;
+ slave->registered = 0;
+ } else {
+ /* register our partition */
+ add_mtd_device(&slave->mtd);
+ slave->registered = 1;
+ }
+ return slave;
+}
- while(offs < slave->mtd.size) {
- if (master->block_isbad(master,
- offs + slave->offset))
- slave->mtd.ecc_stats.badblocks++;
- offs += slave->mtd.erasesize;
- }
- }
+/*
+ * This function, given a master MTD object and a partition table, creates
+ * and registers slave MTD objects which are bound to the master according to
+ * the partition definitions.
+ * (Q: should we register the master MTD object as well?)
+ */
- if(parts[i].mtdp)
- { /* store the object pointer (caller may or may not register it */
- *parts[i].mtdp = &slave->mtd;
- slave->registered = 0;
- }
- else
- {
- /* register our partition */
- add_mtd_device(&slave->mtd);
- slave->registered = 1;
- }
+int add_mtd_partitions(struct mtd_info *master,
+ const struct mtd_partition *parts,
+ int nbparts)
+{
+ struct mtd_part *slave;
+ u_int32_t cur_offset = 0;
+ int i;
+
+ printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name);
+
+ for (i = 0; i < nbparts; i++) {
+ slave = add_one_partition(master, parts + i, i, cur_offset);
+ if (!slave)
+ return -ENOMEM;
+ cur_offset = slave->offset + slave->mtd.size;
}
return 0;
}
-
EXPORT_SYMBOL(add_mtd_partitions);
-EXPORT_SYMBOL(del_mtd_partitions);
static DEFINE_SPINLOCK(part_parser_lock);
static LIST_HEAD(part_parsers);
static struct mtd_part_parser *get_partition_parser(const char *name)
{
- struct list_head *this;
- void *ret = NULL;
- spin_lock(&part_parser_lock);
+ struct mtd_part_parser *p, *ret = NULL;
- list_for_each(this, &part_parsers) {
- struct mtd_part_parser *p = list_entry(this, struct mtd_part_parser, list);
+ spin_lock(&part_parser_lock);
+ list_for_each_entry(p, &part_parsers, list)
if (!strcmp(p->name, name) && try_module_get(p->owner)) {
ret = p;
break;
}
- }
+
spin_unlock(&part_parser_lock);
return ret;
@@ -538,6 +545,7 @@ int register_mtd_parser(struct mtd_part_parser *p)
return 0;
}
+EXPORT_SYMBOL_GPL(register_mtd_parser);
int deregister_mtd_parser(struct mtd_part_parser *p)
{
@@ -546,6 +554,7 @@ int deregister_mtd_parser(struct mtd_part_parser *p)
spin_unlock(&part_parser_lock);
return 0;
}
+EXPORT_SYMBOL_GPL(deregister_mtd_parser);
int parse_mtd_partitions(struct mtd_info *master, const char **types,
struct mtd_partition **pparts, unsigned long origin)
@@ -573,7 +582,4 @@ int parse_mtd_partitions(struct mtd_info *master, const char **types,
}
return ret;
}
-
EXPORT_SYMBOL_GPL(parse_mtd_partitions);
-EXPORT_SYMBOL_GPL(register_mtd_parser);
-EXPORT_SYMBOL_GPL(deregister_mtd_parser);
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 5076faf9ca66..71406e517857 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -1,5 +1,4 @@
# drivers/mtd/nand/Kconfig
-# $Id: Kconfig,v 1.35 2005/11/07 11:14:30 gleixner Exp $
menuconfig MTD_NAND
tristate "NAND Device Support"
@@ -272,22 +271,23 @@ config MTD_NAND_CS553X
If you say "m", the module will be called "cs553x_nand.ko".
-config MTD_NAND_AT91
- bool "Support for NAND Flash / SmartMedia on AT91"
- depends on ARCH_AT91
+config MTD_NAND_ATMEL
+ tristate "Support for NAND Flash / SmartMedia on AT91 and AVR32"
+ depends on ARCH_AT91 || AVR32
help
Enables support for NAND Flash / Smart Media Card interface
- on Atmel AT91 processors.
+ on Atmel AT91 and AVR32 processors.
choice
- prompt "ECC management for NAND Flash / SmartMedia on AT91"
- depends on MTD_NAND_AT91
+ prompt "ECC management for NAND Flash / SmartMedia on AT91 / AVR32"
+ depends on MTD_NAND_ATMEL
-config MTD_NAND_AT91_ECC_HW
+config MTD_NAND_ATMEL_ECC_HW
bool "Hardware ECC"
- depends on ARCH_AT91SAM9263 || ARCH_AT91SAM9260
+ depends on ARCH_AT91SAM9263 || ARCH_AT91SAM9260 || AVR32
help
- Uses hardware ECC provided by the at91sam9260/at91sam9263 chip
- instead of software ECC.
+ Use hardware ECC instead of software ECC when the chip
+ supports it.
+
The hardware ECC controller is capable of single bit error
correction and 2-bit random detection per page.
@@ -297,16 +297,16 @@ config MTD_NAND_AT91_ECC_HW
If unsure, say Y
-config MTD_NAND_AT91_ECC_SOFT
+config MTD_NAND_ATMEL_ECC_SOFT
bool "Software ECC"
help
- Uses software ECC.
+ Use software ECC.
NB : hardware and software ECC schemes are incompatible.
If you switch from one to another, you'll have to erase your
mtd partition.
-config MTD_NAND_AT91_ECC_NONE
+config MTD_NAND_ATMEL_ECC_NONE
bool "No ECC (testing only, DANGEROUS)"
depends on DEBUG_KERNEL
help
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index a6e74a46992a..d772581de573 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -1,7 +1,6 @@
#
# linux/drivers/nand/Makefile
#
-# $Id: Makefile.common,v 1.15 2004/11/26 12:28:22 dedekind Exp $
obj-$(CONFIG_MTD_NAND) += nand.o nand_ecc.o
obj-$(CONFIG_MTD_NAND_IDS) += nand_ids.o
@@ -24,7 +23,7 @@ obj-$(CONFIG_MTD_NAND_TS7250) += ts7250.o
obj-$(CONFIG_MTD_NAND_NANDSIM) += nandsim.o
obj-$(CONFIG_MTD_NAND_CS553X) += cs553x_nand.o
obj-$(CONFIG_MTD_NAND_NDFC) += ndfc.o
-obj-$(CONFIG_MTD_NAND_AT91) += at91_nand.o
+obj-$(CONFIG_MTD_NAND_ATMEL) += atmel_nand.o
obj-$(CONFIG_MTD_NAND_CM_X270) += cmx270_nand.o
obj-$(CONFIG_MTD_NAND_BASLER_EXCITE) += excite_nandflash.o
obj-$(CONFIG_MTD_NAND_PXA3xx) += pxa3xx_nand.o
diff --git a/drivers/mtd/nand/at91_nand.c b/drivers/mtd/nand/atmel_nand.c
index 0adb287027a2..99aec46e2145 100644
--- a/drivers/mtd/nand/at91_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -1,6 +1,4 @@
/*
- * drivers/mtd/nand/at91_nand.c
- *
* Copyright (C) 2003 Rick Bronson
*
* Derived from drivers/mtd/nand/autcpu12.c
@@ -31,20 +29,19 @@
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
-#include <asm/io.h>
-#include <asm/sizes.h>
+#include <linux/gpio.h>
+#include <linux/io.h>
-#include <asm/hardware.h>
#include <asm/arch/board.h>
-#include <asm/arch/gpio.h>
+#include <asm/arch/cpu.h>
-#ifdef CONFIG_MTD_NAND_AT91_ECC_HW
+#ifdef CONFIG_MTD_NAND_ATMEL_ECC_HW
#define hard_ecc 1
#else
#define hard_ecc 0
#endif
-#ifdef CONFIG_MTD_NAND_AT91_ECC_NONE
+#ifdef CONFIG_MTD_NAND_ATMEL_ECC_NONE
#define no_ecc 1
#else
#define no_ecc 0
@@ -52,18 +49,18 @@
/* Register access macros */
#define ecc_readl(add, reg) \
- __raw_readl(add + AT91_ECC_##reg)
+ __raw_readl(add + ATMEL_ECC_##reg)
#define ecc_writel(add, reg, value) \
- __raw_writel((value), add + AT91_ECC_##reg)
+ __raw_writel((value), add + ATMEL_ECC_##reg)
-#include <asm/arch/at91_ecc.h> /* AT91SAM9260/3 ECC registers */
+#include "atmel_nand_ecc.h" /* Hardware ECC registers */
/* oob layout for large page size
* bad block info is on bytes 0 and 1
* the bytes have to be consecutives to avoid
* several NAND_CMD_RNDOUT during read
*/
-static struct nand_ecclayout at91_oobinfo_large = {
+static struct nand_ecclayout atmel_oobinfo_large = {
.eccbytes = 4,
.eccpos = {60, 61, 62, 63},
.oobfree = {
@@ -76,7 +73,7 @@ static struct nand_ecclayout at91_oobinfo_large = {
* the bytes have to be consecutives to avoid
* several NAND_CMD_RNDOUT during read
*/
-static struct nand_ecclayout at91_oobinfo_small = {
+static struct nand_ecclayout atmel_oobinfo_small = {
.eccbytes = 4,
.eccpos = {0, 1, 2, 3},
.oobfree = {
@@ -84,11 +81,11 @@ static struct nand_ecclayout at91_oobinfo_small = {
},
};
-struct at91_nand_host {
+struct atmel_nand_host {
struct nand_chip nand_chip;
struct mtd_info mtd;
void __iomem *io_base;
- struct at91_nand_data *board;
+ struct atmel_nand_data *board;
struct device *dev;
void __iomem *ecc;
};
@@ -96,34 +93,34 @@ struct at91_nand_host {
/*
* Enable NAND.
*/
-static void at91_nand_enable(struct at91_nand_host *host)
+static void atmel_nand_enable(struct atmel_nand_host *host)
{
if (host->board->enable_pin)
- at91_set_gpio_value(host->board->enable_pin, 0);
+ gpio_set_value(host->board->enable_pin, 0);
}
/*
* Disable NAND.
*/
-static void at91_nand_disable(struct at91_nand_host *host)
+static void atmel_nand_disable(struct atmel_nand_host *host)
{
if (host->board->enable_pin)
- at91_set_gpio_value(host->board->enable_pin, 1);
+ gpio_set_value(host->board->enable_pin, 1);
}
/*
* Hardware specific access to control-lines
*/
-static void at91_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
+static void atmel_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
{
struct nand_chip *nand_chip = mtd->priv;
- struct at91_nand_host *host = nand_chip->priv;
+ struct atmel_nand_host *host = nand_chip->priv;
if (ctrl & NAND_CTRL_CHANGE) {
if (ctrl & NAND_NCE)
- at91_nand_enable(host);
+ atmel_nand_enable(host);
else
- at91_nand_disable(host);
+ atmel_nand_disable(host);
}
if (cmd == NAND_CMD_NONE)
return;
@@ -137,18 +134,49 @@ static void at91_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
/*
* Read the Device Ready pin.
*/
-static int at91_nand_device_ready(struct mtd_info *mtd)
+static int atmel_nand_device_ready(struct mtd_info *mtd)
{
struct nand_chip *nand_chip = mtd->priv;
- struct at91_nand_host *host = nand_chip->priv;
+ struct atmel_nand_host *host = nand_chip->priv;
- return at91_get_gpio_value(host->board->rdy_pin);
+ return gpio_get_value(host->board->rdy_pin);
+}
+
+/*
+ * Minimal-overhead PIO for data access.
+ */
+static void atmel_read_buf(struct mtd_info *mtd, u8 *buf, int len)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+
+ __raw_readsb(nand_chip->IO_ADDR_R, buf, len);
+}
+
+static void atmel_read_buf16(struct mtd_info *mtd, u8 *buf, int len)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+
+ __raw_readsw(nand_chip->IO_ADDR_R, buf, len / 2);
+}
+
+static void atmel_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+
+ __raw_writesb(nand_chip->IO_ADDR_W, buf, len);
+}
+
+static void atmel_write_buf16(struct mtd_info *mtd, const u8 *buf, int len)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+
+ __raw_writesw(nand_chip->IO_ADDR_W, buf, len / 2);
}
/*
* write oob for small pages
*/
-static int at91_nand_write_oob_512(struct mtd_info *mtd,
+static int atmel_nand_write_oob_512(struct mtd_info *mtd,
struct nand_chip *chip, int page)
{
int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
@@ -176,7 +204,7 @@ static int at91_nand_write_oob_512(struct mtd_info *mtd,
/*
* read oob for small pages
*/
-static int at91_nand_read_oob_512(struct mtd_info *mtd,
+static int atmel_nand_read_oob_512(struct mtd_info *mtd,
struct nand_chip *chip, int page, int sndcmd)
{
if (sndcmd) {
@@ -196,11 +224,11 @@ static int at91_nand_read_oob_512(struct mtd_info *mtd,
* dat: raw data (unused)
* ecc_code: buffer for ECC
*/
-static int at91_nand_calculate(struct mtd_info *mtd,
+static int atmel_nand_calculate(struct mtd_info *mtd,
const u_char *dat, unsigned char *ecc_code)
{
struct nand_chip *nand_chip = mtd->priv;
- struct at91_nand_host *host = nand_chip->priv;
+ struct atmel_nand_host *host = nand_chip->priv;
uint32_t *eccpos = nand_chip->ecc.layout->eccpos;
unsigned int ecc_value;
@@ -211,7 +239,7 @@ static int at91_nand_calculate(struct mtd_info *mtd,
ecc_code[eccpos[1]] = (ecc_value >> 8) & 0xFF;
/* get the last 2 ECC bytes */
- ecc_value = ecc_readl(host->ecc, NPR) & AT91_ECC_NPARITY;
+ ecc_value = ecc_readl(host->ecc, NPR) & ATMEL_ECC_NPARITY;
ecc_code[eccpos[2]] = ecc_value & 0xFF;
ecc_code[eccpos[3]] = (ecc_value >> 8) & 0xFF;
@@ -226,7 +254,7 @@ static int at91_nand_calculate(struct mtd_info *mtd,
* chip: nand chip info structure
* buf: buffer to store read data
*/
-static int at91_nand_read_page(struct mtd_info *mtd,
+static int atmel_nand_read_page(struct mtd_info *mtd,
struct nand_chip *chip, uint8_t *buf)
{
int eccsize = chip->ecc.size;
@@ -237,6 +265,19 @@ static int at91_nand_read_page(struct mtd_info *mtd,
uint8_t *ecc_pos;
int stat;
+ /*
+ * Errata: ALE is incorrectly wired up to the ECC controller
+ * on the AP7000, so it will include the address cycles in the
+ * ECC calculation.
+ *
+ * Workaround: Reset the parity registers before reading the
+ * actual data.
+ */
+ if (cpu_is_at32ap7000()) {
+ struct atmel_nand_host *host = chip->priv;
+ ecc_writel(host->ecc, CR, ATMEL_ECC_RST);
+ }
+
/* read the page */
chip->read_buf(mtd, p, eccsize);
@@ -285,11 +326,11 @@ static int at91_nand_read_page(struct mtd_info *mtd,
*
* Detect and correct a 1 bit error for a page
*/
-static int at91_nand_correct(struct mtd_info *mtd, u_char *dat,
+static int atmel_nand_correct(struct mtd_info *mtd, u_char *dat,
u_char *read_ecc, u_char *isnull)
{
struct nand_chip *nand_chip = mtd->priv;
- struct at91_nand_host *host = nand_chip->priv;
+ struct atmel_nand_host *host = nand_chip->priv;
unsigned int ecc_status;
unsigned int ecc_word, ecc_bit;
@@ -297,43 +338,43 @@ static int at91_nand_correct(struct mtd_info *mtd, u_char *dat,
ecc_status = ecc_readl(host->ecc, SR);
/* if there's no error */
- if (likely(!(ecc_status & AT91_ECC_RECERR)))
+ if (likely(!(ecc_status & ATMEL_ECC_RECERR)))
return 0;
/* get error bit offset (4 bits) */
- ecc_bit = ecc_readl(host->ecc, PR) & AT91_ECC_BITADDR;
+ ecc_bit = ecc_readl(host->ecc, PR) & ATMEL_ECC_BITADDR;
/* get word address (12 bits) */
- ecc_word = ecc_readl(host->ecc, PR) & AT91_ECC_WORDADDR;
+ ecc_word = ecc_readl(host->ecc, PR) & ATMEL_ECC_WORDADDR;
ecc_word >>= 4;
/* if there are multiple errors */
- if (ecc_status & AT91_ECC_MULERR) {
+ if (ecc_status & ATMEL_ECC_MULERR) {
/* check if it is a freshly erased block
* (filled with 0xff) */
- if ((ecc_bit == AT91_ECC_BITADDR)
- && (ecc_word == (AT91_ECC_WORDADDR >> 4))) {
+ if ((ecc_bit == ATMEL_ECC_BITADDR)
+ && (ecc_word == (ATMEL_ECC_WORDADDR >> 4))) {
/* the block has just been erased, return OK */
return 0;
}
/* it doesn't seems to be a freshly
* erased block.
* We can't correct so many errors */
- dev_dbg(host->dev, "at91_nand : multiple errors detected."
+ dev_dbg(host->dev, "atmel_nand : multiple errors detected."
" Unable to correct.\n");
return -EIO;
}
/* if there's a single bit error : we can correct it */
- if (ecc_status & AT91_ECC_ECCERR) {
+ if (ecc_status & ATMEL_ECC_ECCERR) {
/* there's nothing much to do here.
* the bit error is on the ECC itself.
*/
- dev_dbg(host->dev, "at91_nand : one bit error on ECC code."
+ dev_dbg(host->dev, "atmel_nand : one bit error on ECC code."
" Nothing to correct\n");
return 0;
}
- dev_dbg(host->dev, "at91_nand : one bit error on data."
+ dev_dbg(host->dev, "atmel_nand : one bit error on data."
" (word offset in the page :"
" 0x%x bit offset : 0x%x)\n",
ecc_word, ecc_bit);
@@ -345,14 +386,21 @@ static int at91_nand_correct(struct mtd_info *mtd, u_char *dat,
/* 8 bits words */
dat[ecc_word] ^= (1 << ecc_bit);
}
- dev_dbg(host->dev, "at91_nand : error corrected\n");
+ dev_dbg(host->dev, "atmel_nand : error corrected\n");
return 1;
}
/*
- * Enable HW ECC : unsused
+ * Enable HW ECC : unused on most chips
*/
-static void at91_nand_hwctl(struct mtd_info *mtd, int mode) { ; }
+static void atmel_nand_hwctl(struct mtd_info *mtd, int mode)
+{
+ if (cpu_is_at32ap7000()) {
+ struct nand_chip *nand_chip = mtd->priv;
+ struct atmel_nand_host *host = nand_chip->priv;
+ ecc_writel(host->ecc, CR, ATMEL_ECC_RST);
+ }
+}
#ifdef CONFIG_MTD_PARTITIONS
static const char *part_probes[] = { "cmdlinepart", NULL };
@@ -361,9 +409,9 @@ static const char *part_probes[] = { "cmdlinepart", NULL };
/*
* Probe for the NAND device.
*/
-static int __init at91_nand_probe(struct platform_device *pdev)
+static int __init atmel_nand_probe(struct platform_device *pdev)
{
- struct at91_nand_host *host;
+ struct atmel_nand_host *host;
struct mtd_info *mtd;
struct nand_chip *nand_chip;
struct resource *regs;
@@ -375,24 +423,24 @@ static int __init at91_nand_probe(struct platform_device *pdev)
int num_partitions = 0;
#endif
- /* Allocate memory for the device structure (and zero it) */
- host = kzalloc(sizeof(struct at91_nand_host), GFP_KERNEL);
- if (!host) {
- printk(KERN_ERR "at91_nand: failed to allocate device structure.\n");
- return -ENOMEM;
- }
-
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mem) {
- printk(KERN_ERR "at91_nand: can't get I/O resource mem\n");
+ printk(KERN_ERR "atmel_nand: can't get I/O resource mem\n");
return -ENXIO;
}
+ /* Allocate memory for the device structure (and zero it) */
+ host = kzalloc(sizeof(struct atmel_nand_host), GFP_KERNEL);
+ if (!host) {
+ printk(KERN_ERR "atmel_nand: failed to allocate device structure.\n");
+ return -ENOMEM;
+ }
+
host->io_base = ioremap(mem->start, mem->end - mem->start + 1);
if (host->io_base == NULL) {
- printk(KERN_ERR "at91_nand: ioremap failed\n");
- kfree(host);
- return -EIO;
+ printk(KERN_ERR "atmel_nand: ioremap failed\n");
+ res = -EIO;
+ goto err_nand_ioremap;
}
mtd = &host->mtd;
@@ -407,14 +455,14 @@ static int __init at91_nand_probe(struct platform_device *pdev)
/* Set address of NAND IO lines */
nand_chip->IO_ADDR_R = host->io_base;
nand_chip->IO_ADDR_W = host->io_base;
- nand_chip->cmd_ctrl = at91_nand_cmd_ctrl;
+ nand_chip->cmd_ctrl = atmel_nand_cmd_ctrl;
if (host->board->rdy_pin)
- nand_chip->dev_ready = at91_nand_device_ready;
+ nand_chip->dev_ready = atmel_nand_device_ready;
regs = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!regs && hard_ecc) {
- printk(KERN_ERR "at91_nand: can't get I/O resource "
+ printk(KERN_ERR "atmel_nand: can't get I/O resource "
"regs\nFalling back on software ECC\n");
}
@@ -424,15 +472,15 @@ static int __init at91_nand_probe(struct platform_device *pdev)
if (hard_ecc && regs) {
host->ecc = ioremap(regs->start, regs->end - regs->start + 1);
if (host->ecc == NULL) {
- printk(KERN_ERR "at91_nand: ioremap failed\n");
+ printk(KERN_ERR "atmel_nand: ioremap failed\n");
res = -EIO;
goto err_ecc_ioremap;
}
nand_chip->ecc.mode = NAND_ECC_HW_SYNDROME;
- nand_chip->ecc.calculate = at91_nand_calculate;
- nand_chip->ecc.correct = at91_nand_correct;
- nand_chip->ecc.hwctl = at91_nand_hwctl;
- nand_chip->ecc.read_page = at91_nand_read_page;
+ nand_chip->ecc.calculate = atmel_nand_calculate;
+ nand_chip->ecc.correct = atmel_nand_correct;
+ nand_chip->ecc.hwctl = atmel_nand_hwctl;
+ nand_chip->ecc.read_page = atmel_nand_read_page;
nand_chip->ecc.bytes = 4;
nand_chip->ecc.prepad = 0;
nand_chip->ecc.postpad = 0;
@@ -440,24 +488,30 @@ static int __init at91_nand_probe(struct platform_device *pdev)
nand_chip->chip_delay = 20; /* 20us command delay time */
- if (host->board->bus_width_16) /* 16-bit bus width */
+ if (host->board->bus_width_16) { /* 16-bit bus width */
nand_chip->options |= NAND_BUSWIDTH_16;
+ nand_chip->read_buf = atmel_read_buf16;
+ nand_chip->write_buf = atmel_write_buf16;
+ } else {
+ nand_chip->read_buf = atmel_read_buf;
+ nand_chip->write_buf = atmel_write_buf;
+ }
platform_set_drvdata(pdev, host);
- at91_nand_enable(host);
+ atmel_nand_enable(host);
if (host->board->det_pin) {
- if (at91_get_gpio_value(host->board->det_pin)) {
- printk ("No SmartMedia card inserted.\n");
+ if (gpio_get_value(host->board->det_pin)) {
+ printk("No SmartMedia card inserted.\n");
res = ENXIO;
- goto out;
+ goto err_no_card;
}
}
/* first scan to find the device and get the page size */
if (nand_scan_ident(mtd, 1)) {
res = -ENXIO;
- goto out;
+ goto err_scan_ident;
}
if (nand_chip->ecc.mode == NAND_ECC_HW_SYNDROME) {
@@ -467,22 +521,22 @@ static int __init at91_nand_probe(struct platform_device *pdev)
/* set ECC page size and oob layout */
switch (mtd->writesize) {
case 512:
- nand_chip->ecc.layout = &at91_oobinfo_small;
- nand_chip->ecc.read_oob = at91_nand_read_oob_512;
- nand_chip->ecc.write_oob = at91_nand_write_oob_512;
- ecc_writel(host->ecc, MR, AT91_ECC_PAGESIZE_528);
+ nand_chip->ecc.layout = &atmel_oobinfo_small;
+ nand_chip->ecc.read_oob = atmel_nand_read_oob_512;
+ nand_chip->ecc.write_oob = atmel_nand_write_oob_512;
+ ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_528);
break;
case 1024:
- nand_chip->ecc.layout = &at91_oobinfo_large;
- ecc_writel(host->ecc, MR, AT91_ECC_PAGESIZE_1056);
+ nand_chip->ecc.layout = &atmel_oobinfo_large;
+ ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_1056);
break;
case 2048:
- nand_chip->ecc.layout = &at91_oobinfo_large;
- ecc_writel(host->ecc, MR, AT91_ECC_PAGESIZE_2112);
+ nand_chip->ecc.layout = &atmel_oobinfo_large;
+ ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_2112);
break;
case 4096:
- nand_chip->ecc.layout = &at91_oobinfo_large;
- ecc_writel(host->ecc, MR, AT91_ECC_PAGESIZE_4224);
+ nand_chip->ecc.layout = &atmel_oobinfo_large;
+ ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_4224);
break;
default:
/* page size not handled by HW ECC */
@@ -502,12 +556,12 @@ static int __init at91_nand_probe(struct platform_device *pdev)
/* second phase scan */
if (nand_scan_tail(mtd)) {
res = -ENXIO;
- goto out;
+ goto err_scan_tail;
}
#ifdef CONFIG_MTD_PARTITIONS
#ifdef CONFIG_MTD_CMDLINE_PARTS
- mtd->name = "at91_nand";
+ mtd->name = "atmel_nand";
num_partitions = parse_mtd_partitions(mtd, part_probes,
&partitions, 0);
#endif
@@ -516,9 +570,9 @@ static int __init at91_nand_probe(struct platform_device *pdev)
&num_partitions);
if ((!partitions) || (num_partitions == 0)) {
- printk(KERN_ERR "at91_nand: No parititions defined, or unsupported device.\n");
+ printk(KERN_ERR "atmel_nand: No parititions defined, or unsupported device.\n");
res = ENXIO;
- goto release;
+ goto err_no_partitions;
}
res = add_mtd_partitions(mtd, partitions, num_partitions);
@@ -530,17 +584,19 @@ static int __init at91_nand_probe(struct platform_device *pdev)
return res;
#ifdef CONFIG_MTD_PARTITIONS
-release:
+err_no_partitions:
#endif
nand_release(mtd);
-
-out:
- iounmap(host->ecc);
-
-err_ecc_ioremap:
- at91_nand_disable(host);
+err_scan_tail:
+err_scan_ident:
+err_no_card:
+ atmel_nand_disable(host);
platform_set_drvdata(pdev, NULL);
+ if (host->ecc)
+ iounmap(host->ecc);
+err_ecc_ioremap:
iounmap(host->io_base);
+err_nand_ioremap:
kfree(host);
return res;
}
@@ -548,47 +604,47 @@ err_ecc_ioremap:
/*
* Remove a NAND device.
*/
-static int __devexit at91_nand_remove(struct platform_device *pdev)
+static int __exit atmel_nand_remove(struct platform_device *pdev)
{
- struct at91_nand_host *host = platform_get_drvdata(pdev);
+ struct atmel_nand_host *host = platform_get_drvdata(pdev);
struct mtd_info *mtd = &host->mtd;
nand_release(mtd);
- at91_nand_disable(host);
+ atmel_nand_disable(host);
+ if (host->ecc)
+ iounmap(host->ecc);
iounmap(host->io_base);
- iounmap(host->ecc);
kfree(host);
return 0;
}
-static struct platform_driver at91_nand_driver = {
- .probe = at91_nand_probe,
- .remove = at91_nand_remove,
+static struct platform_driver atmel_nand_driver = {
+ .remove = __exit_p(atmel_nand_remove),
.driver = {
- .name = "at91_nand",
+ .name = "atmel_nand",
.owner = THIS_MODULE,
},
};
-static int __init at91_nand_init(void)
+static int __init atmel_nand_init(void)
{
- return platform_driver_register(&at91_nand_driver);
+ return platform_driver_probe(&atmel_nand_driver, atmel_nand_probe);
}
-static void __exit at91_nand_exit(void)
+static void __exit atmel_nand_exit(void)
{
- platform_driver_unregister(&at91_nand_driver);
+ platform_driver_unregister(&atmel_nand_driver);
}
-module_init(at91_nand_init);
-module_exit(at91_nand_exit);
+module_init(atmel_nand_init);
+module_exit(atmel_nand_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Rick Bronson");
-MODULE_DESCRIPTION("NAND/SmartMedia driver for AT91RM9200 / AT91SAM9");
-MODULE_ALIAS("platform:at91_nand");
+MODULE_DESCRIPTION("NAND/SmartMedia driver for AT91 / AVR32");
+MODULE_ALIAS("platform:atmel_nand");
diff --git a/drivers/mtd/nand/atmel_nand_ecc.h b/drivers/mtd/nand/atmel_nand_ecc.h
new file mode 100644
index 000000000000..1ee7f993db1c
--- /dev/null
+++ b/drivers/mtd/nand/atmel_nand_ecc.h
@@ -0,0 +1,36 @@
+/*
+ * Error Corrected Code Controller (ECC) - System peripherals regsters.
+ * Based on AT91SAM9260 datasheet revision B.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef ATMEL_NAND_ECC_H
+#define ATMEL_NAND_ECC_H
+
+#define ATMEL_ECC_CR 0x00 /* Control register */
+#define ATMEL_ECC_RST (1 << 0) /* Reset parity */
+
+#define ATMEL_ECC_MR 0x04 /* Mode register */
+#define ATMEL_ECC_PAGESIZE (3 << 0) /* Page Size */
+#define ATMEL_ECC_PAGESIZE_528 (0)
+#define ATMEL_ECC_PAGESIZE_1056 (1)
+#define ATMEL_ECC_PAGESIZE_2112 (2)
+#define ATMEL_ECC_PAGESIZE_4224 (3)
+
+#define ATMEL_ECC_SR 0x08 /* Status register */
+#define ATMEL_ECC_RECERR (1 << 0) /* Recoverable Error */
+#define ATMEL_ECC_ECCERR (1 << 1) /* ECC Single Bit Error */
+#define ATMEL_ECC_MULERR (1 << 2) /* Multiple Errors */
+
+#define ATMEL_ECC_PR 0x0c /* Parity register */
+#define ATMEL_ECC_BITADDR (0xf << 0) /* Bit Error Address */
+#define ATMEL_ECC_WORDADDR (0xfff << 4) /* Word Error Address */
+
+#define ATMEL_ECC_NPR 0x10 /* NParity register */
+#define ATMEL_ECC_NPARITY (0xffff << 0) /* NParity */
+
+#endif
diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c
index 09e421a96893..761946ea45b1 100644
--- a/drivers/mtd/nand/au1550nd.c
+++ b/drivers/mtd/nand/au1550nd.c
@@ -3,8 +3,6 @@
*
* Copyright (C) 2004 Embedded Edge, LLC
*
- * $Id: au1550nd.c,v 1.13 2005/11/07 11:14:30 gleixner Exp $
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
@@ -604,8 +602,6 @@ module_init(au1xxx_nand_init);
*/
static void __exit au1550_cleanup(void)
{
- struct nand_chip *this = (struct nand_chip *)&au1550_mtd[1];
-
/* Release resources, unregister device */
nand_release(au1550_mtd);
diff --git a/drivers/mtd/nand/autcpu12.c b/drivers/mtd/nand/autcpu12.c
index dd38011ee0b7..553dd7e9b41c 100644
--- a/drivers/mtd/nand/autcpu12.c
+++ b/drivers/mtd/nand/autcpu12.c
@@ -6,8 +6,6 @@
* Derived from drivers/mtd/spia.c
* Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
*
- * $Id: autcpu12.c,v 1.23 2005/11/07 11:14:30 gleixner Exp $
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
index da6ceaa80ba1..95345d051579 100644
--- a/drivers/mtd/nand/cafe_nand.c
+++ b/drivers/mtd/nand/cafe_nand.c
@@ -626,10 +626,12 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
{
struct mtd_info *mtd;
struct cafe_priv *cafe;
- struct mtd_partition *parts;
uint32_t ctrl;
- int nr_parts;
int err = 0;
+#ifdef CONFIG_MTD_PARTITIONS
+ struct mtd_partition *parts;
+ int nr_parts;
+#endif
/* Very old versions shared the same PCI ident for all three
functions on the chip. Verify the class too... */
diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c
index 0e72153b3297..765d4f0f7c86 100644
--- a/drivers/mtd/nand/diskonchip.c
+++ b/drivers/mtd/nand/diskonchip.c
@@ -15,8 +15,6 @@
* converted to the generic Reed-Solomon library by Thomas Gleixner <tglx@linutronix.de>
*
* Interface to generic NAND code for M-Systems DiskOnChip devices
- *
- * $Id: diskonchip.c,v 1.55 2005/11/07 11:14:30 gleixner Exp $
*/
#include <linux/kernel.h>
@@ -54,8 +52,6 @@ static unsigned long __initdata doc_locations[] = {
0xe0000, 0xe2000, 0xe4000, 0xe6000,
0xe8000, 0xea000, 0xec000, 0xee000,
#endif /* CONFIG_MTD_DOCPROBE_HIGH */
-#elif defined(__PPC__)
- 0xe4000000,
#else
#warning Unknown architecture for DiskOnChip. No default probe locations defined
#endif
diff --git a/drivers/mtd/nand/edb7312.c b/drivers/mtd/nand/edb7312.c
index ba67bbec20d3..387e4352903e 100644
--- a/drivers/mtd/nand/edb7312.c
+++ b/drivers/mtd/nand/edb7312.c
@@ -6,8 +6,6 @@
* Derived from drivers/mtd/nand/autcpu12.c
* Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de)
*
- * $Id: edb7312.c,v 1.12 2005/11/07 11:14:30 gleixner Exp $
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
diff --git a/drivers/mtd/nand/excite_nandflash.c b/drivers/mtd/nand/excite_nandflash.c
index bed87290decc..ced14b5294d5 100644
--- a/drivers/mtd/nand/excite_nandflash.c
+++ b/drivers/mtd/nand/excite_nandflash.c
@@ -209,7 +209,7 @@ static int __init excite_nand_probe(struct device *dev)
if (likely(!scan_res)) {
DEBUG(MTD_DEBUG_LEVEL2, "%s: register partitions\n", module_id);
add_mtd_partitions(&drvdata->board_mtd, partition_info,
- sizeof partition_info / sizeof partition_info[0]);
+ ARRAY_SIZE(partition_info));
} else {
iounmap(drvdata->regs);
kfree(drvdata);
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index 4b69aacdf5ca..9dff51351f4f 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -89,7 +89,6 @@ static struct nand_ecclayout fsl_elbc_oob_sp_eccm0 = {
.eccbytes = 3,
.eccpos = {6, 7, 8},
.oobfree = { {0, 5}, {9, 7} },
- .oobavail = 12,
};
/* Small Page FLASH with FMR[ECCM] = 1 */
@@ -97,7 +96,6 @@ static struct nand_ecclayout fsl_elbc_oob_sp_eccm1 = {
.eccbytes = 3,
.eccpos = {8, 9, 10},
.oobfree = { {0, 5}, {6, 2}, {11, 5} },
- .oobavail = 12,
};
/* Large Page FLASH with FMR[ECCM] = 0 */
@@ -105,7 +103,6 @@ static struct nand_ecclayout fsl_elbc_oob_lp_eccm0 = {
.eccbytes = 12,
.eccpos = {6, 7, 8, 22, 23, 24, 38, 39, 40, 54, 55, 56},
.oobfree = { {1, 5}, {9, 13}, {25, 13}, {41, 13}, {57, 7} },
- .oobavail = 48,
};
/* Large Page FLASH with FMR[ECCM] = 1 */
@@ -113,7 +110,48 @@ static struct nand_ecclayout fsl_elbc_oob_lp_eccm1 = {
.eccbytes = 12,
.eccpos = {8, 9, 10, 24, 25, 26, 40, 41, 42, 56, 57, 58},
.oobfree = { {1, 7}, {11, 13}, {27, 13}, {43, 13}, {59, 5} },
- .oobavail = 48,
+};
+
+/*
+ * fsl_elbc_oob_lp_eccm* specify that LP NAND's OOB free area starts at offset
+ * 1, so we have to adjust bad block pattern. This pattern should be used for
+ * x8 chips only. So far hardware does not support x16 chips anyway.
+ */
+static u8 scan_ff_pattern[] = { 0xff, };
+
+static struct nand_bbt_descr largepage_memorybased = {
+ .options = 0,
+ .offs = 0,
+ .len = 1,
+ .pattern = scan_ff_pattern,
+};
+
+/*
+ * ELBC may use HW ECC, so that OOB offsets, that NAND core uses for bbt,
+ * interfere with ECC positions, that's why we implement our own descriptors.
+ * OOB {11, 5}, works for both SP and LP chips, with ECCM = 1 and ECCM = 0.
+ */
+static u8 bbt_pattern[] = {'B', 'b', 't', '0' };
+static u8 mirror_pattern[] = {'1', 't', 'b', 'B' };
+
+static struct nand_bbt_descr bbt_main_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
+ NAND_BBT_2BIT | NAND_BBT_VERSION,
+ .offs = 11,
+ .len = 4,
+ .veroffs = 15,
+ .maxblocks = 4,
+ .pattern = bbt_pattern,
+};
+
+static struct nand_bbt_descr bbt_mirror_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
+ NAND_BBT_2BIT | NAND_BBT_VERSION,
+ .offs = 11,
+ .len = 4,
+ .veroffs = 15,
+ .maxblocks = 4,
+ .pattern = mirror_pattern,
};
/*=================================*/
@@ -687,8 +725,7 @@ static int fsl_elbc_chip_init_tail(struct mtd_info *mtd)
chip->ecc.layout = (priv->fmr & FMR_ECCM) ?
&fsl_elbc_oob_lp_eccm1 :
&fsl_elbc_oob_lp_eccm0;
- mtd->ecclayout = chip->ecc.layout;
- mtd->oobavail = chip->ecc.layout->oobavail;
+ chip->badblock_pattern = &largepage_memorybased;
}
} else {
dev_err(ctrl->dev,
@@ -752,8 +789,12 @@ static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
chip->cmdfunc = fsl_elbc_cmdfunc;
chip->waitfunc = fsl_elbc_wait;
+ chip->bbt_td = &bbt_main_descr;
+ chip->bbt_md = &bbt_mirror_descr;
+
/* set up nand options */
- chip->options = NAND_NO_READRDY | NAND_NO_AUTOINCR;
+ chip->options = NAND_NO_READRDY | NAND_NO_AUTOINCR |
+ NAND_USE_FLASH_BBT;
chip->controller = &ctrl->controller;
chip->priv = priv;
@@ -795,8 +836,8 @@ static int fsl_elbc_chip_remove(struct fsl_elbc_mtd *priv)
return 0;
}
-static int fsl_elbc_chip_probe(struct fsl_elbc_ctrl *ctrl,
- struct device_node *node)
+static int __devinit fsl_elbc_chip_probe(struct fsl_elbc_ctrl *ctrl,
+ struct device_node *node)
{
struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
struct fsl_elbc_mtd *priv;
@@ -917,7 +958,7 @@ static int __devinit fsl_elbc_ctrl_init(struct fsl_elbc_ctrl *ctrl)
return 0;
}
-static int __devexit fsl_elbc_ctrl_remove(struct of_device *ofdev)
+static int fsl_elbc_ctrl_remove(struct of_device *ofdev)
{
struct fsl_elbc_ctrl *ctrl = dev_get_drvdata(&ofdev->dev);
int i;
@@ -1041,7 +1082,7 @@ static struct of_platform_driver fsl_elbc_ctrl_driver = {
},
.match_table = fsl_elbc_match,
.probe = fsl_elbc_ctrl_probe,
- .remove = __devexit_p(fsl_elbc_ctrl_remove),
+ .remove = fsl_elbc_ctrl_remove,
};
static int __init fsl_elbc_init(void)
diff --git a/drivers/mtd/nand/h1910.c b/drivers/mtd/nand/h1910.c
index 2d585d2d090c..9e59de501c2e 100644
--- a/drivers/mtd/nand/h1910.c
+++ b/drivers/mtd/nand/h1910.c
@@ -7,8 +7,6 @@
* Copyright (C) 2002 Marius Gröger (mag@sysgo.de)
* Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de)
*
- * $Id: h1910.c,v 1.6 2005/11/07 11:14:30 gleixner Exp $
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index ba1bdf787323..d1129bae6c27 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -798,6 +798,87 @@ static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
}
/**
+ * nand_read_subpage - [REPLACABLE] software ecc based sub-page read function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @dataofs offset of requested data within the page
+ * @readlen data length
+ * @buf: buffer to store read data
+ */
+static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip, uint32_t data_offs, uint32_t readlen, uint8_t *bufpoi)
+{
+ int start_step, end_step, num_steps;
+ uint32_t *eccpos = chip->ecc.layout->eccpos;
+ uint8_t *p;
+ int data_col_addr, i, gaps = 0;
+ int datafrag_len, eccfrag_len, aligned_len, aligned_pos;
+ int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
+
+ /* Column address wihin the page aligned to ECC size (256bytes). */
+ start_step = data_offs / chip->ecc.size;
+ end_step = (data_offs + readlen - 1) / chip->ecc.size;
+ num_steps = end_step - start_step + 1;
+
+ /* Data size aligned to ECC ecc.size*/
+ datafrag_len = num_steps * chip->ecc.size;
+ eccfrag_len = num_steps * chip->ecc.bytes;
+
+ data_col_addr = start_step * chip->ecc.size;
+ /* If we read not a page aligned data */
+ if (data_col_addr != 0)
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, data_col_addr, -1);
+
+ p = bufpoi + data_col_addr;
+ chip->read_buf(mtd, p, datafrag_len);
+
+ /* Calculate ECC */
+ for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size)
+ chip->ecc.calculate(mtd, p, &chip->buffers->ecccalc[i]);
+
+ /* The performance is faster if to position offsets
+ according to ecc.pos. Let make sure here that
+ there are no gaps in ecc positions */
+ for (i = 0; i < eccfrag_len - 1; i++) {
+ if (eccpos[i + start_step * chip->ecc.bytes] + 1 !=
+ eccpos[i + start_step * chip->ecc.bytes + 1]) {
+ gaps = 1;
+ break;
+ }
+ }
+ if (gaps) {
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize, -1);
+ chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+ } else {
+ /* send the command to read the particular ecc bytes */
+ /* take care about buswidth alignment in read_buf */
+ aligned_pos = eccpos[start_step * chip->ecc.bytes] & ~(busw - 1);
+ aligned_len = eccfrag_len;
+ if (eccpos[start_step * chip->ecc.bytes] & (busw - 1))
+ aligned_len++;
+ if (eccpos[(start_step + num_steps) * chip->ecc.bytes] & (busw - 1))
+ aligned_len++;
+
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize + aligned_pos, -1);
+ chip->read_buf(mtd, &chip->oob_poi[aligned_pos], aligned_len);
+ }
+
+ for (i = 0; i < eccfrag_len; i++)
+ chip->buffers->ecccode[i] = chip->oob_poi[eccpos[i + start_step * chip->ecc.bytes]];
+
+ p = bufpoi + data_col_addr;
+ for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) {
+ int stat;
+
+ stat = chip->ecc.correct(mtd, p, &chip->buffers->ecccode[i], &chip->buffers->ecccalc[i]);
+ if (stat == -1)
+ mtd->ecc_stats.failed++;
+ else
+ mtd->ecc_stats.corrected += stat;
+ }
+ return 0;
+}
+
+/**
* nand_read_page_hwecc - [REPLACABLE] hardware ecc based page read function
* @mtd: mtd info structure
* @chip: nand chip info structure
@@ -994,6 +1075,8 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
/* Now read the page into the buffer */
if (unlikely(ops->mode == MTD_OOB_RAW))
ret = chip->ecc.read_page_raw(mtd, chip, bufpoi);
+ else if (!aligned && NAND_SUBPAGE_READ(chip) && !oob)
+ ret = chip->ecc.read_subpage(mtd, chip, col, bytes, bufpoi);
else
ret = chip->ecc.read_page(mtd, chip, bufpoi);
if (ret < 0)
@@ -1001,7 +1084,8 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
/* Transfer not aligned data */
if (!aligned) {
- chip->pagebuf = realpage;
+ if (!NAND_SUBPAGE_READ(chip) && !oob)
+ chip->pagebuf = realpage;
memcpy(buf, chip->buffers->databuf + col, bytes);
}
@@ -2521,6 +2605,7 @@ int nand_scan_tail(struct mtd_info *mtd)
chip->ecc.calculate = nand_calculate_ecc;
chip->ecc.correct = nand_correct_data;
chip->ecc.read_page = nand_read_page_swecc;
+ chip->ecc.read_subpage = nand_read_subpage;
chip->ecc.write_page = nand_write_page_swecc;
chip->ecc.read_oob = nand_read_oob_std;
chip->ecc.write_oob = nand_write_oob_std;
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
index 5e121ceaa598..0b1c48595f12 100644
--- a/drivers/mtd/nand/nand_bbt.c
+++ b/drivers/mtd/nand/nand_bbt.c
@@ -6,8 +6,6 @@
*
* Copyright (C) 2004 Thomas Gleixner (tglx@linutronix.de)
*
- * $Id: nand_bbt.c,v 1.36 2005/11/07 11:14:30 gleixner Exp $
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
diff --git a/drivers/mtd/nand/nand_ecc.c b/drivers/mtd/nand/nand_ecc.c
index 9003a135e050..918a806a8471 100644
--- a/drivers/mtd/nand/nand_ecc.c
+++ b/drivers/mtd/nand/nand_ecc.c
@@ -9,8 +9,6 @@
*
* Copyright (C) 2006 Thomas Gleixner <tglx@linutronix.de>
*
- * $Id: nand_ecc.c,v 1.15 2005/11/07 11:14:30 gleixner Exp $
- *
* This file is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 or (at your option) any
diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c
index a3e3ab0185d5..69ee2c90eb0b 100644
--- a/drivers/mtd/nand/nand_ids.c
+++ b/drivers/mtd/nand/nand_ids.c
@@ -3,8 +3,6 @@
*
* Copyright (C) 2002 Thomas Gleixner (tglx@linutronix.de)
*
- * $Id: nand_ids.c,v 1.16 2005/11/07 11:14:31 gleixner Exp $
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index bb885d1fcab5..ecd70e2504f6 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -21,8 +21,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA
- *
- * $Id: nandsim.c,v 1.8 2005/03/19 15:33:56 dedekind Exp $
*/
#include <linux/init.h>
@@ -39,6 +37,7 @@
#include <linux/delay.h>
#include <linux/list.h>
#include <linux/random.h>
+#include <asm/div64.h>
/* Default simulator parameters values */
#if !defined(CONFIG_NANDSIM_FIRST_ID_BYTE) || \
@@ -298,11 +297,11 @@ struct nandsim {
/* NAND flash "geometry" */
struct nandsin_geometry {
- uint32_t totsz; /* total flash size, bytes */
+ uint64_t totsz; /* total flash size, bytes */
uint32_t secsz; /* flash sector (erase block) size, bytes */
uint pgsz; /* NAND flash page size, bytes */
uint oobsz; /* page OOB area size, bytes */
- uint32_t totszoob; /* total flash size including OOB, bytes */
+ uint64_t totszoob; /* total flash size including OOB, bytes */
uint pgszoob; /* page size including OOB , bytes*/
uint secszoob; /* sector size including OOB, bytes */
uint pgnum; /* total number of pages */
@@ -459,6 +458,12 @@ static char *get_partition_name(int i)
return kstrdup(buf, GFP_KERNEL);
}
+static u_int64_t divide(u_int64_t n, u_int32_t d)
+{
+ do_div(n, d);
+ return n;
+}
+
/*
* Initialize the nandsim structure.
*
@@ -469,8 +474,8 @@ static int init_nandsim(struct mtd_info *mtd)
struct nand_chip *chip = (struct nand_chip *)mtd->priv;
struct nandsim *ns = (struct nandsim *)(chip->priv);
int i, ret = 0;
- u_int32_t remains;
- u_int32_t next_offset;
+ u_int64_t remains;
+ u_int64_t next_offset;
if (NS_IS_INITIALIZED(ns)) {
NS_ERR("init_nandsim: nandsim is already initialized\n");
@@ -487,8 +492,8 @@ static int init_nandsim(struct mtd_info *mtd)
ns->geom.oobsz = mtd->oobsize;
ns->geom.secsz = mtd->erasesize;
ns->geom.pgszoob = ns->geom.pgsz + ns->geom.oobsz;
- ns->geom.pgnum = ns->geom.totsz / ns->geom.pgsz;
- ns->geom.totszoob = ns->geom.totsz + ns->geom.pgnum * ns->geom.oobsz;
+ ns->geom.pgnum = divide(ns->geom.totsz, ns->geom.pgsz);
+ ns->geom.totszoob = ns->geom.totsz + (uint64_t)ns->geom.pgnum * ns->geom.oobsz;
ns->geom.secshift = ffs(ns->geom.secsz) - 1;
ns->geom.pgshift = chip->page_shift;
ns->geom.oobshift = ffs(ns->geom.oobsz) - 1;
@@ -511,7 +516,7 @@ static int init_nandsim(struct mtd_info *mtd)
}
if (ns->options & OPT_SMALLPAGE) {
- if (ns->geom.totsz < (32 << 20)) {
+ if (ns->geom.totsz <= (32 << 20)) {
ns->geom.pgaddrbytes = 3;
ns->geom.secaddrbytes = 2;
} else {
@@ -537,15 +542,16 @@ static int init_nandsim(struct mtd_info *mtd)
remains = ns->geom.totsz;
next_offset = 0;
for (i = 0; i < parts_num; ++i) {
- unsigned long part = parts[i];
- if (!part || part > remains / ns->geom.secsz) {
+ u_int64_t part_sz = (u_int64_t)parts[i] * ns->geom.secsz;
+
+ if (!part_sz || part_sz > remains) {
NS_ERR("bad partition size.\n");
ret = -EINVAL;
goto error;
}
ns->partitions[i].name = get_partition_name(i);
ns->partitions[i].offset = next_offset;
- ns->partitions[i].size = part * ns->geom.secsz;
+ ns->partitions[i].size = part_sz;
next_offset += ns->partitions[i].size;
remains -= ns->partitions[i].size;
}
@@ -573,7 +579,7 @@ static int init_nandsim(struct mtd_info *mtd)
if (ns->busw == 16)
NS_WARN("16-bit flashes support wasn't tested\n");
- printk("flash size: %u MiB\n", ns->geom.totsz >> 20);
+ printk("flash size: %llu MiB\n", ns->geom.totsz >> 20);
printk("page size: %u bytes\n", ns->geom.pgsz);
printk("OOB area size: %u bytes\n", ns->geom.oobsz);
printk("sector size: %u KiB\n", ns->geom.secsz >> 10);
@@ -583,7 +589,7 @@ static int init_nandsim(struct mtd_info *mtd)
printk("bits in sector size: %u\n", ns->geom.secshift);
printk("bits in page size: %u\n", ns->geom.pgshift);
printk("bits in OOB size: %u\n", ns->geom.oobshift);
- printk("flash size with OOB: %u KiB\n", ns->geom.totszoob >> 10);
+ printk("flash size with OOB: %llu KiB\n", ns->geom.totszoob >> 10);
printk("page address bytes: %u\n", ns->geom.pgaddrbytes);
printk("sector address bytes: %u\n", ns->geom.secaddrbytes);
printk("options: %#x\n", ns->options);
@@ -825,7 +831,7 @@ static int setup_wear_reporting(struct mtd_info *mtd)
if (!rptwear)
return 0;
- wear_eb_count = mtd->size / mtd->erasesize;
+ wear_eb_count = divide(mtd->size, mtd->erasesize);
mem = wear_eb_count * sizeof(unsigned long);
if (mem / sizeof(unsigned long) != wear_eb_count) {
NS_ERR("Too many erase blocks for wear reporting\n");
@@ -2013,7 +2019,7 @@ static int __init ns_init_module(void)
}
if (overridesize) {
- u_int32_t new_size = nsmtd->erasesize << overridesize;
+ u_int64_t new_size = (u_int64_t)nsmtd->erasesize << overridesize;
if (new_size >> overridesize != nsmtd->erasesize) {
NS_ERR("overridesize is too big\n");
goto err_exit;
@@ -2021,7 +2027,8 @@ static int __init ns_init_module(void)
/* N.B. This relies on nand_scan not doing anything with the size before we change it */
nsmtd->size = new_size;
chip->chipsize = new_size;
- chip->chip_shift = ffs(new_size) - 1;
+ chip->chip_shift = ffs(nsmtd->erasesize) + overridesize - 1;
+ chip->pagemask = (chip->chipsize >> chip->page_shift) - 1;
}
if ((retval = setup_wear_reporting(nsmtd)) != 0)
diff --git a/drivers/mtd/nand/ppchameleonevb.c b/drivers/mtd/nand/ppchameleonevb.c
index 082073acf20f..cc8658431851 100644
--- a/drivers/mtd/nand/ppchameleonevb.c
+++ b/drivers/mtd/nand/ppchameleonevb.c
@@ -6,8 +6,6 @@
* Derived from drivers/mtd/nand/edb7312.c
*
*
- * $Id: ppchameleonevb.c,v 1.7 2005/11/07 11:14:31 gleixner Exp $
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
diff --git a/drivers/mtd/nand/rtc_from4.c b/drivers/mtd/nand/rtc_from4.c
index 26f88215bc47..a033c4cd8e16 100644
--- a/drivers/mtd/nand/rtc_from4.c
+++ b/drivers/mtd/nand/rtc_from4.c
@@ -6,8 +6,6 @@
* Derived from drivers/mtd/nand/spia.c
* Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
*
- * $Id: rtc_from4.c,v 1.10 2005/11/07 11:14:31 gleixner Exp $
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index b34a460ab679..556139ed1fdf 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -1,26 +1,10 @@
/* linux/drivers/mtd/nand/s3c2410.c
*
- * Copyright (c) 2004,2005 Simtec Electronics
- * http://www.simtec.co.uk/products/SWLINUX/
+ * Copyright © 2004-2008 Simtec Electronics
+ * http://armlinux.simtec.co.uk/
* Ben Dooks <ben@simtec.co.uk>
*
- * Samsung S3C2410/S3C240 NAND driver
- *
- * Changelog:
- * 21-Sep-2004 BJD Initial version
- * 23-Sep-2004 BJD Multiple device support
- * 28-Sep-2004 BJD Fixed ECC placement for Hardware mode
- * 12-Oct-2004 BJD Fixed errors in use of platform data
- * 18-Feb-2005 BJD Fix sparse errors
- * 14-Mar-2005 BJD Applied tglx's code reduction patch
- * 02-May-2005 BJD Fixed s3c2440 support
- * 02-May-2005 BJD Reduced hwcontrol decode
- * 20-Jun-2005 BJD Updated s3c2440 support, fixed timing bug
- * 08-Jul-2005 BJD Fix OOPS when no platform data supplied
- * 20-Oct-2005 BJD Fix timing calculation bug
- * 14-Jan-2006 BJD Allow clock to be stopped when idle
- *
- * $Id: s3c2410.c,v 1.23 2006/04/01 18:06:29 bjd Exp $
+ * Samsung S3C2410/S3C2440/S3C2412 NAND driver
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -52,6 +36,7 @@
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/clk.h>
+#include <linux/cpufreq.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
@@ -120,8 +105,13 @@ struct s3c2410_nand_info {
int sel_bit;
int mtd_count;
unsigned long save_sel;
+ unsigned long clk_rate;
enum s3c_cpu_type cpu_type;
+
+#ifdef CONFIG_CPU_FREQ
+ struct notifier_block freq_transition;
+#endif
};
/* conversion functions */
@@ -179,17 +169,18 @@ static int s3c_nand_calc_rate(int wanted, unsigned long clk, int max)
/* controller setup */
-static int s3c2410_nand_inithw(struct s3c2410_nand_info *info,
- struct platform_device *pdev)
+static int s3c2410_nand_setrate(struct s3c2410_nand_info *info)
{
- struct s3c2410_platform_nand *plat = to_nand_plat(pdev);
- unsigned long clkrate = clk_get_rate(info->clk);
+ struct s3c2410_platform_nand *plat = info->platform;
int tacls_max = (info->cpu_type == TYPE_S3C2412) ? 8 : 4;
int tacls, twrph0, twrph1;
- unsigned long cfg = 0;
+ unsigned long clkrate = clk_get_rate(info->clk);
+ unsigned long set, cfg, mask;
+ unsigned long flags;
/* calculate the timing information for the controller */
+ info->clk_rate = clkrate;
clkrate /= 1000; /* turn clock into kHz for ease of use */
if (plat != NULL) {
@@ -211,28 +202,69 @@ static int s3c2410_nand_inithw(struct s3c2410_nand_info *info,
dev_info(info->device, "Tacls=%d, %dns Twrph0=%d %dns, Twrph1=%d %dns\n",
tacls, to_ns(tacls, clkrate), twrph0, to_ns(twrph0, clkrate), twrph1, to_ns(twrph1, clkrate));
+ switch (info->cpu_type) {
+ case TYPE_S3C2410:
+ mask = (S3C2410_NFCONF_TACLS(3) |
+ S3C2410_NFCONF_TWRPH0(7) |
+ S3C2410_NFCONF_TWRPH1(7));
+ set = S3C2410_NFCONF_EN;
+ set |= S3C2410_NFCONF_TACLS(tacls - 1);
+ set |= S3C2410_NFCONF_TWRPH0(twrph0 - 1);
+ set |= S3C2410_NFCONF_TWRPH1(twrph1 - 1);
+ break;
+
+ case TYPE_S3C2440:
+ case TYPE_S3C2412:
+ mask = (S3C2410_NFCONF_TACLS(tacls_max - 1) |
+ S3C2410_NFCONF_TWRPH0(7) |
+ S3C2410_NFCONF_TWRPH1(7));
+
+ set = S3C2440_NFCONF_TACLS(tacls - 1);
+ set |= S3C2440_NFCONF_TWRPH0(twrph0 - 1);
+ set |= S3C2440_NFCONF_TWRPH1(twrph1 - 1);
+ break;
+
+ default:
+ /* keep compiler happy */
+ mask = 0;
+ set = 0;
+ BUG();
+ }
+
+ dev_dbg(info->device, "NF_CONF is 0x%lx\n", cfg);
+
+ local_irq_save(flags);
+
+ cfg = readl(info->regs + S3C2410_NFCONF);
+ cfg &= ~mask;
+ cfg |= set;
+ writel(cfg, info->regs + S3C2410_NFCONF);
+
+ local_irq_restore(flags);
+
+ return 0;
+}
+
+static int s3c2410_nand_inithw(struct s3c2410_nand_info *info)
+{
+ int ret;
+
+ ret = s3c2410_nand_setrate(info);
+ if (ret < 0)
+ return ret;
+
switch (info->cpu_type) {
case TYPE_S3C2410:
- cfg = S3C2410_NFCONF_EN;
- cfg |= S3C2410_NFCONF_TACLS(tacls - 1);
- cfg |= S3C2410_NFCONF_TWRPH0(twrph0 - 1);
- cfg |= S3C2410_NFCONF_TWRPH1(twrph1 - 1);
+ default:
break;
case TYPE_S3C2440:
case TYPE_S3C2412:
- cfg = S3C2440_NFCONF_TACLS(tacls - 1);
- cfg |= S3C2440_NFCONF_TWRPH0(twrph0 - 1);
- cfg |= S3C2440_NFCONF_TWRPH1(twrph1 - 1);
-
/* enable the controller and de-assert nFCE */
writel(S3C2440_NFCONT_ENABLE, info->regs + S3C2440_NFCONT);
}
- dev_dbg(info->device, "NF_CONF is 0x%lx\n", cfg);
-
- writel(cfg, info->regs + S3C2410_NFCONF);
return 0;
}
@@ -513,6 +545,52 @@ static void s3c2440_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int
writesl(info->regs + S3C2440_NFDATA, buf, len / 4);
}
+/* cpufreq driver support */
+
+#ifdef CONFIG_CPU_FREQ
+
+static int s3c2410_nand_cpufreq_transition(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ struct s3c2410_nand_info *info;
+ unsigned long newclk;
+
+ info = container_of(nb, struct s3c2410_nand_info, freq_transition);
+ newclk = clk_get_rate(info->clk);
+
+ if ((val == CPUFREQ_POSTCHANGE && newclk < info->clk_rate) ||
+ (val == CPUFREQ_PRECHANGE && newclk > info->clk_rate)) {
+ s3c2410_nand_setrate(info);
+ }
+
+ return 0;
+}
+
+static inline int s3c2410_nand_cpufreq_register(struct s3c2410_nand_info *info)
+{
+ info->freq_transition.notifier_call = s3c2410_nand_cpufreq_transition;
+
+ return cpufreq_register_notifier(&info->freq_transition,
+ CPUFREQ_TRANSITION_NOTIFIER);
+}
+
+static inline void s3c2410_nand_cpufreq_deregister(struct s3c2410_nand_info *info)
+{
+ cpufreq_unregister_notifier(&info->freq_transition,
+ CPUFREQ_TRANSITION_NOTIFIER);
+}
+
+#else
+static inline int s3c2410_nand_cpufreq_register(struct s3c2410_nand_info *info)
+{
+ return 0;
+}
+
+static inline void s3c2410_nand_cpufreq_deregister(struct s3c2410_nand_info *info)
+{
+}
+#endif
+
/* device management functions */
static int s3c2410_nand_remove(struct platform_device *pdev)
@@ -524,9 +602,10 @@ static int s3c2410_nand_remove(struct platform_device *pdev)
if (info == NULL)
return 0;
- /* first thing we need to do is release all our mtds
- * and their partitions, then go through freeing the
- * resources used
+ s3c2410_nand_cpufreq_deregister(info);
+
+ /* Release all our mtds and their partitions, then go through
+ * freeing the resources used
*/
if (info->mtds != NULL) {
@@ -691,7 +770,8 @@ static void s3c2410_nand_update_chip(struct s3c2410_nand_info *info,
{
struct nand_chip *chip = &nmtd->chip;
- printk("%s: chip %p: %d\n", __func__, chip, chip->page_shift);
+ dev_dbg(info->device, "chip %p => page shift %d\n",
+ chip, chip->page_shift);
if (hardware_ecc) {
/* change the behaviour depending on wether we are using
@@ -784,7 +864,7 @@ static int s3c24xx_nand_probe(struct platform_device *pdev,
/* initialise the hardware */
- err = s3c2410_nand_inithw(info, pdev);
+ err = s3c2410_nand_inithw(info);
if (err != 0)
goto exit_error;
@@ -827,6 +907,12 @@ static int s3c24xx_nand_probe(struct platform_device *pdev,
sets++;
}
+ err = s3c2410_nand_cpufreq_register(info);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to init cpufreq support\n");
+ goto exit_error;
+ }
+
if (allow_clk_stop(info)) {
dev_info(&pdev->dev, "clock idle support enabled\n");
clk_disable(info->clk);
@@ -874,7 +960,7 @@ static int s3c24xx_nand_resume(struct platform_device *dev)
if (info) {
clk_enable(info->clk);
- s3c2410_nand_inithw(info, dev);
+ s3c2410_nand_inithw(info);
/* Restore the state of the nFCE line. */
diff --git a/drivers/mtd/nand/sharpsl.c b/drivers/mtd/nand/sharpsl.c
index 033f8800b1e6..6dba2fb66ae5 100644
--- a/drivers/mtd/nand/sharpsl.c
+++ b/drivers/mtd/nand/sharpsl.c
@@ -3,8 +3,6 @@
*
* Copyright (C) 2004 Richard Purdie
*
- * $Id: sharpsl.c,v 1.7 2005/11/07 11:14:31 gleixner Exp $
- *
* Based on Sharp's NAND driver sharp_sl.c
*
* This program is free software; you can redistribute it and/or modify
diff --git a/drivers/mtd/nand/spia.c b/drivers/mtd/nand/spia.c
index 1f6d429b1583..0cc6d0acb8fe 100644
--- a/drivers/mtd/nand/spia.c
+++ b/drivers/mtd/nand/spia.c
@@ -8,8 +8,6 @@
* to controllines (due to change in nand.c)
* page_cache added
*
- * $Id: spia.c,v 1.25 2005/11/07 11:14:31 gleixner Exp $
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
diff --git a/drivers/mtd/nand/toto.c b/drivers/mtd/nand/toto.c
index f9e2d4a0ab8c..bbf492e6830d 100644
--- a/drivers/mtd/nand/toto.c
+++ b/drivers/mtd/nand/toto.c
@@ -14,8 +14,6 @@
* Overview:
* This is a device driver for the NAND flash device found on the
* TI fido board. It supports 32MiB and 64MiB cards
- *
- * $Id: toto.c,v 1.5 2005/11/07 11:14:31 gleixner Exp $
*/
#include <linux/slab.h>
diff --git a/drivers/mtd/nand/ts7250.c b/drivers/mtd/nand/ts7250.c
index f40081069ab2..807a72752eeb 100644
--- a/drivers/mtd/nand/ts7250.c
+++ b/drivers/mtd/nand/ts7250.c
@@ -9,8 +9,6 @@
* Derived from drivers/mtd/nand/autcpu12.c
* Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de)
*
- * $Id: ts7250.c,v 1.4 2004/12/30 22:02:07 joff Exp $
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c
index 0c9ce19ea27a..320b929abe79 100644
--- a/drivers/mtd/nftlcore.c
+++ b/drivers/mtd/nftlcore.c
@@ -1,7 +1,6 @@
/* Linux driver for NAND Flash Translation Layer */
/* (c) 1999 Machine Vision Holdings, Inc. */
/* Author: David Woodhouse <dwmw2@infradead.org> */
-/* $Id: nftlcore.c,v 1.98 2005/11/07 11:14:21 gleixner Exp $ */
/*
The contents of this file are distributed under the GNU General
@@ -803,12 +802,8 @@ static struct mtd_blktrans_ops nftl_tr = {
.owner = THIS_MODULE,
};
-extern char nftlmountrev[];
-
static int __init init_nftl(void)
{
- printk(KERN_INFO "NFTL driver: nftlcore.c $Revision: 1.98 $, nftlmount.c %s\n", nftlmountrev);
-
return register_mtd_blktrans(&nftl_tr);
}
diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
index 345e6eff89ce..ccc4f209fbb5 100644
--- a/drivers/mtd/nftlmount.c
+++ b/drivers/mtd/nftlmount.c
@@ -4,8 +4,6 @@
* Author: Fabrice Bellard (fabrice.bellard@netgem.com)
* Copyright (C) 2000 Netgem S.A.
*
- * $Id: nftlmount.c,v 1.41 2005/11/07 11:14:21 gleixner Exp $
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
@@ -31,8 +29,6 @@
#define SECTORSIZE 512
-char nftlmountrev[]="$Revision: 1.41 $";
-
/* find_boot_record: Find the NFTL Media Header and its Spare copy which contains the
* various device information of the NFTL partition and Bad Unit Table. Update
* the ReplUnitTable[] table accroding to the Bad Unit Table. ReplUnitTable[]
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index 5d7965f7e9ce..926cf3a4135d 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -325,28 +325,11 @@ static int onenand_wait(struct mtd_info *mtd, int state)
ctrl = this->read_word(this->base + ONENAND_REG_CTRL_STATUS);
- if (ctrl & ONENAND_CTRL_ERROR) {
- printk(KERN_ERR "onenand_wait: controller error = 0x%04x\n", ctrl);
- if (ctrl & ONENAND_CTRL_LOCK)
- printk(KERN_ERR "onenand_wait: it's locked error.\n");
- if (state == FL_READING) {
- /*
- * A power loss while writing can result in a page
- * becoming unreadable. When the device is mounted
- * again, reading that page gives controller errors.
- * Upper level software like JFFS2 treat -EIO as fatal,
- * refusing to mount at all. That means it is necessary
- * to treat the error as an ECC error to allow recovery.
- * Note that typically in this case, the eraseblock can
- * still be erased and rewritten i.e. it has not become
- * a bad block.
- */
- mtd->ecc_stats.failed++;
- return -EBADMSG;
- }
- return -EIO;
- }
-
+ /*
+ * In the Spec. it checks the controller status first
+ * However if you get the correct information in case of
+ * power off recovery (POR) test, it should read ECC status first
+ */
if (interrupt & ONENAND_INT_READ) {
int ecc = this->read_word(this->base + ONENAND_REG_ECC_STATUS);
if (ecc) {
@@ -364,6 +347,15 @@ static int onenand_wait(struct mtd_info *mtd, int state)
return -EIO;
}
+ /* If there's controller error, it's a real error */
+ if (ctrl & ONENAND_CTRL_ERROR) {
+ printk(KERN_ERR "onenand_wait: controller error = 0x%04x\n",
+ ctrl);
+ if (ctrl & ONENAND_CTRL_LOCK)
+ printk(KERN_ERR "onenand_wait: it's locked error.\n");
+ return -EIO;
+ }
+
return 0;
}
@@ -1135,22 +1127,26 @@ static int onenand_bbt_wait(struct mtd_info *mtd, int state)
interrupt = this->read_word(this->base + ONENAND_REG_INTERRUPT);
ctrl = this->read_word(this->base + ONENAND_REG_CTRL_STATUS);
- /* Initial bad block case: 0x2400 or 0x0400 */
- if (ctrl & ONENAND_CTRL_ERROR) {
- printk(KERN_DEBUG "onenand_bbt_wait: controller error = 0x%04x\n", ctrl);
- return ONENAND_BBT_READ_ERROR;
- }
-
if (interrupt & ONENAND_INT_READ) {
int ecc = this->read_word(this->base + ONENAND_REG_ECC_STATUS);
- if (ecc & ONENAND_ECC_2BIT_ALL)
+ if (ecc & ONENAND_ECC_2BIT_ALL) {
+ printk(KERN_INFO "onenand_bbt_wait: ecc error = 0x%04x"
+ ", controller error 0x%04x\n", ecc, ctrl);
return ONENAND_BBT_READ_ERROR;
+ }
} else {
printk(KERN_ERR "onenand_bbt_wait: read timeout!"
"ctrl=0x%04x intr=0x%04x\n", ctrl, interrupt);
return ONENAND_BBT_READ_FATAL_ERROR;
}
+ /* Initial bad block case: 0x2400 or 0x0400 */
+ if (ctrl & ONENAND_CTRL_ERROR) {
+ printk(KERN_DEBUG "onenand_bbt_wait: "
+ "controller error = 0x%04x\n", ctrl);
+ return ONENAND_BBT_READ_ERROR;
+ }
+
return 0;
}
diff --git a/drivers/mtd/redboot.c b/drivers/mtd/redboot.c
index c5030f94f04e..2d600a1bf2aa 100644
--- a/drivers/mtd/redboot.c
+++ b/drivers/mtd/redboot.c
@@ -1,6 +1,4 @@
/*
- * $Id: redboot.c,v 1.21 2006/03/30 18:34:37 bjd Exp $
- *
* Parse RedBoot-style Flash Image System (FIS) tables and
* produce a Linux partition array to match.
*/
diff --git a/drivers/mtd/rfd_ftl.c b/drivers/mtd/rfd_ftl.c
index c84e45465499..e538c0a72abb 100644
--- a/drivers/mtd/rfd_ftl.c
+++ b/drivers/mtd/rfd_ftl.c
@@ -3,8 +3,6 @@
*
* Copyright (C) 2005 Sean Young <sean@mess.org>
*
- * $Id: rfd_ftl.c,v 1.8 2006/01/15 12:51:44 sean Exp $
- *
* This type of flash translation layer (FTL) is used by the Embedded BIOS
* by General Software. It is known as the Resident Flash Disk (RFD), see:
*
diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c
index 7a14980f3472..18d3eeb7eab2 100644
--- a/drivers/net/arm/ep93xx_eth.c
+++ b/drivers/net/arm/ep93xx_eth.c
@@ -482,7 +482,7 @@ static int ep93xx_alloc_buffers(struct ep93xx_priv *ep)
goto err;
d = dma_map_single(NULL, page, PAGE_SIZE, DMA_FROM_DEVICE);
- if (dma_mapping_error(d)) {
+ if (dma_mapping_error(NULL, d)) {
free_page((unsigned long)page);
goto err;
}
@@ -505,7 +505,7 @@ static int ep93xx_alloc_buffers(struct ep93xx_priv *ep)
goto err;
d = dma_map_single(NULL, page, PAGE_SIZE, DMA_TO_DEVICE);
- if (dma_mapping_error(d)) {
+ if (dma_mapping_error(NULL, d)) {
free_page((unsigned long)page);
goto err;
}
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index 0263bef9cc6d..af251a5df844 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -814,7 +814,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
}
/* release skb */
- BUG_TRAP(skb);
+ WARN_ON(!skb);
dev_kfree_skb(skb);
tx_buf->first_bd = 0;
tx_buf->skb = NULL;
@@ -837,9 +837,9 @@ static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
#ifdef BNX2X_STOP_ON_ERROR
- BUG_TRAP(used >= 0);
- BUG_TRAP(used <= fp->bp->tx_ring_size);
- BUG_TRAP((fp->bp->tx_ring_size - used) <= MAX_TX_AVAIL);
+ WARN_ON(used < 0);
+ WARN_ON(used > fp->bp->tx_ring_size);
+ WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
#endif
return (s16)(fp->bp->tx_ring_size) - used;
@@ -1020,7 +1020,7 @@ static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
mapping = pci_map_page(bp->pdev, page, 0, BCM_PAGE_SIZE*PAGES_PER_SGE,
PCI_DMA_FROMDEVICE);
- if (unlikely(dma_mapping_error(mapping))) {
+ if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
__free_pages(page, PAGES_PER_SGE_SHIFT);
return -ENOMEM;
}
@@ -1048,7 +1048,7 @@ static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
PCI_DMA_FROMDEVICE);
- if (unlikely(dma_mapping_error(mapping))) {
+ if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
dev_kfree_skb(skb);
return -ENOMEM;
}
@@ -4374,7 +4374,7 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp)
}
ring_prod = NEXT_RX_IDX(ring_prod);
cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
- BUG_TRAP(ring_prod > i);
+ WARN_ON(ring_prod <= i);
}
fp->rx_bd_prod = ring_prod;
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index 83768df27806..f1936d51b458 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -576,6 +576,18 @@ static void cas_spare_recover(struct cas *cp, const gfp_t flags)
list_for_each_safe(elem, tmp, &list) {
cas_page_t *page = list_entry(elem, cas_page_t, list);
+ /*
+ * With the lockless pagecache, cassini buffering scheme gets
+ * slightly less accurate: we might find that a page has an
+ * elevated reference count here, due to a speculative ref,
+ * and skip it as in-use. Ideally we would be able to reclaim
+ * it. However this would be such a rare case, it doesn't
+ * matter too much as we should pick it up the next time round.
+ *
+ * Importantly, if we find that the page has a refcount of 1
+ * here (our refcount), then we know it is definitely not inuse
+ * so we can reuse it.
+ */
if (page_count(page->buffer) > 1)
continue;
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index a96331c875e6..1b0861d73ab7 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -386,7 +386,7 @@ static inline int add_one_rx_buf(void *va, unsigned int len,
dma_addr_t mapping;
mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE);
- if (unlikely(pci_dma_mapping_error(mapping)))
+ if (unlikely(pci_dma_mapping_error(pdev, mapping)))
return -ENOMEM;
pci_unmap_addr_set(sd, dma_addr, mapping);
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 1037b1332312..19d32a227be1 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -1790,7 +1790,7 @@ static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(rx->dma_addr)) {
+ if (pci_dma_mapping_error(nic->pdev, rx->dma_addr)) {
dev_kfree_skb_any(rx->skb);
rx->skb = NULL;
rx->dma_addr = 0;
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index a14561f40db0..9350564065e7 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -1090,7 +1090,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
tx_ring->buffer_info[i].dma =
pci_map_single(pdev, skb->data, skb->len,
PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(tx_ring->buffer_info[i].dma)) {
+ if (pci_dma_mapping_error(pdev, tx_ring->buffer_info[i].dma)) {
ret_val = 4;
goto err_nomem;
}
@@ -1153,7 +1153,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
rx_ring->buffer_info[i].dma =
pci_map_single(pdev, skb->data, 2048,
PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(rx_ring->buffer_info[i].dma)) {
+ if (pci_dma_mapping_error(pdev, rx_ring->buffer_info[i].dma)) {
ret_val = 8;
goto err_nomem;
}
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 9c0f56b3c518..d13677899767 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -195,7 +195,7 @@ map_skb:
buffer_info->dma = pci_map_single(pdev, skb->data,
adapter->rx_buffer_len,
PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(buffer_info->dma)) {
+ if (pci_dma_mapping_error(pdev, buffer_info->dma)) {
dev_err(&pdev->dev, "RX DMA map failed\n");
adapter->rx_dma_failed++;
break;
@@ -265,7 +265,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
ps_page->page,
0, PAGE_SIZE,
PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(ps_page->dma)) {
+ if (pci_dma_mapping_error(pdev, ps_page->dma)) {
dev_err(&adapter->pdev->dev,
"RX DMA page map failed\n");
adapter->rx_dma_failed++;
@@ -300,7 +300,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
buffer_info->dma = pci_map_single(pdev, skb->data,
adapter->rx_ps_bsize0,
PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(buffer_info->dma)) {
+ if (pci_dma_mapping_error(pdev, buffer_info->dma)) {
dev_err(&pdev->dev, "RX DMA map failed\n");
adapter->rx_dma_failed++;
/* cleanup skb */
@@ -3344,7 +3344,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
skb->data + offset,
size,
PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(buffer_info->dma)) {
+ if (pci_dma_mapping_error(adapter->pdev, buffer_info->dma)) {
dev_err(&adapter->pdev->dev, "TX DMA map failed\n");
adapter->tx_dma_failed++;
return -1;
@@ -3382,7 +3382,8 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
offset,
size,
PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(buffer_info->dma)) {
+ if (pci_dma_mapping_error(adapter->pdev,
+ buffer_info->dma)) {
dev_err(&adapter->pdev->dev,
"TX DMA page map failed\n");
adapter->tx_dma_failed++;
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index e5a6e2e84540..91ec9fdc7184 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -260,7 +260,7 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
pool->buff_size, DMA_FROM_DEVICE);
- if (dma_mapping_error(dma_addr))
+ if (dma_mapping_error((&adapter->vdev->dev, dma_addr))
goto failure;
pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
@@ -294,7 +294,7 @@ failure:
pool->consumer_index = pool->size - 1;
else
pool->consumer_index--;
- if (!dma_mapping_error(dma_addr))
+ if (!dma_mapping_error((&adapter->vdev->dev, dma_addr))
dma_unmap_single(&adapter->vdev->dev,
pool->dma_addr[index], pool->buff_size,
DMA_FROM_DEVICE);
@@ -448,11 +448,11 @@ static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
{
int i;
+ struct device *dev = &adapter->vdev->dev;
if(adapter->buffer_list_addr != NULL) {
- if(!dma_mapping_error(adapter->buffer_list_dma)) {
- dma_unmap_single(&adapter->vdev->dev,
- adapter->buffer_list_dma, 4096,
+ if (!dma_mapping_error(dev, adapter->buffer_list_dma)) {
+ dma_unmap_single(dev, adapter->buffer_list_dma, 4096,
DMA_BIDIRECTIONAL);
adapter->buffer_list_dma = DMA_ERROR_CODE;
}
@@ -461,9 +461,8 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
}
if(adapter->filter_list_addr != NULL) {
- if(!dma_mapping_error(adapter->filter_list_dma)) {
- dma_unmap_single(&adapter->vdev->dev,
- adapter->filter_list_dma, 4096,
+ if (!dma_mapping_error(dev, adapter->filter_list_dma)) {
+ dma_unmap_single(dev, adapter->filter_list_dma, 4096,
DMA_BIDIRECTIONAL);
adapter->filter_list_dma = DMA_ERROR_CODE;
}
@@ -472,8 +471,8 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
}
if(adapter->rx_queue.queue_addr != NULL) {
- if(!dma_mapping_error(adapter->rx_queue.queue_dma)) {
- dma_unmap_single(&adapter->vdev->dev,
+ if (!dma_mapping_error(dev, adapter->rx_queue.queue_dma)) {
+ dma_unmap_single(dev,
adapter->rx_queue.queue_dma,
adapter->rx_queue.queue_len,
DMA_BIDIRECTIONAL);
@@ -535,6 +534,7 @@ static int ibmveth_open(struct net_device *netdev)
int rc;
union ibmveth_buf_desc rxq_desc;
int i;
+ struct device *dev;
ibmveth_debug_printk("open starting\n");
@@ -563,17 +563,19 @@ static int ibmveth_open(struct net_device *netdev)
return -ENOMEM;
}
- adapter->buffer_list_dma = dma_map_single(&adapter->vdev->dev,
+ dev = &adapter->vdev->dev;
+
+ adapter->buffer_list_dma = dma_map_single(dev,
adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL);
- adapter->filter_list_dma = dma_map_single(&adapter->vdev->dev,
+ adapter->filter_list_dma = dma_map_single(dev,
adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL);
- adapter->rx_queue.queue_dma = dma_map_single(&adapter->vdev->dev,
+ adapter->rx_queue.queue_dma = dma_map_single(dev,
adapter->rx_queue.queue_addr,
adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL);
- if((dma_mapping_error(adapter->buffer_list_dma) ) ||
- (dma_mapping_error(adapter->filter_list_dma)) ||
- (dma_mapping_error(adapter->rx_queue.queue_dma))) {
+ if ((dma_mapping_error(dev, adapter->buffer_list_dma)) ||
+ (dma_mapping_error(dev, adapter->filter_list_dma)) ||
+ (dma_mapping_error(dev, adapter->rx_queue.queue_dma))) {
ibmveth_error_printk("unable to map filter or buffer list pages\n");
ibmveth_cleanup(adapter);
napi_disable(&adapter->napi);
@@ -645,7 +647,7 @@ static int ibmveth_open(struct net_device *netdev)
adapter->bounce_buffer_dma =
dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer,
netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL);
- if (dma_mapping_error(adapter->bounce_buffer_dma)) {
+ if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
ibmveth_error_printk("unable to map bounce buffer\n");
ibmveth_cleanup(adapter);
napi_disable(&adapter->napi);
@@ -922,7 +924,7 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
buf[1] = 0;
}
- if (dma_mapping_error(data_dma_addr)) {
+ if (dma_mapping_error((&adapter->vdev->dev, data_dma_addr)) {
if (!firmware_has_feature(FW_FEATURE_CMO))
ibmveth_error_printk("tx: unable to map xmit buffer\n");
skb_copy_from_linear_data(skb, adapter->bounce_buffer,
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
index b8d0639c1cdf..c46864d626b2 100644
--- a/drivers/net/iseries_veth.c
+++ b/drivers/net/iseries_veth.c
@@ -1128,7 +1128,7 @@ static int veth_transmit_to_one(struct sk_buff *skb, HvLpIndex rlp,
msg->data.addr[0] = dma_map_single(port->dev, skb->data,
skb->len, DMA_TO_DEVICE);
- if (dma_mapping_error(msg->data.addr[0]))
+ if (dma_mapping_error(port->dev, msg->data.addr[0]))
goto recycle_and_drop;
msg->dev = port->dev;
@@ -1226,7 +1226,7 @@ static void veth_recycle_msg(struct veth_lpar_connection *cnx,
dma_address = msg->data.addr[0];
dma_length = msg->data.len[0];
- if (!dma_mapping_error(dma_address))
+ if (!dma_mapping_error(msg->dev, dma_address))
dma_unmap_single(msg->dev, dma_address, dma_length,
DMA_TO_DEVICE);
diff --git a/drivers/net/mlx4/alloc.c b/drivers/net/mlx4/alloc.c
index f9d6b4dca180..096bca54bcf7 100644
--- a/drivers/net/mlx4/alloc.c
+++ b/drivers/net/mlx4/alloc.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/net/mlx4/catas.c b/drivers/net/mlx4/catas.c
index aa9528779044..f094ee00c416 100644
--- a/drivers/net/mlx4/catas.c
+++ b/drivers/net/mlx4/catas.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/net/mlx4/cmd.c b/drivers/net/mlx4/cmd.c
index 04d5bc69a6f8..2845a0560b84 100644
--- a/drivers/net/mlx4/cmd.c
+++ b/drivers/net/mlx4/cmd.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
- * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
* Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
diff --git a/drivers/net/mlx4/cq.c b/drivers/net/mlx4/cq.c
index 95e87a2f8896..9bb50e3f8974 100644
--- a/drivers/net/mlx4/cq.c
+++ b/drivers/net/mlx4/cq.c
@@ -2,7 +2,7 @@
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
* Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
- * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
* Copyright (c) 2004 Voltaire, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c
index ea3a09aaa844..8a8b56135a58 100644
--- a/drivers/net/mlx4/eq.c
+++ b/drivers/net/mlx4/eq.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
* Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@@ -526,7 +526,7 @@ int mlx4_map_eq_icm(struct mlx4_dev *dev, u64 icm_virt)
return -ENOMEM;
priv->eq_table.icm_dma = pci_map_page(dev->pdev, priv->eq_table.icm_page, 0,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(priv->eq_table.icm_dma)) {
+ if (pci_dma_mapping_error(dev->pdev, priv->eq_table.icm_dma)) {
__free_page(priv->eq_table.icm_page);
return -ENOMEM;
}
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c
index 57278224ba1e..7e32955da982 100644
--- a/drivers/net/mlx4/fw.c
+++ b/drivers/net/mlx4/fw.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
- * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
* Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
diff --git a/drivers/net/mlx4/fw.h b/drivers/net/mlx4/fw.h
index fbf0e22be122..decbb5c2ad41 100644
--- a/drivers/net/mlx4/fw.h
+++ b/drivers/net/mlx4/fw.h
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
- * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
* Copyright (c) 2006, 2007 Cisco Systems. All rights reserved.
*
* This software is available to you under a choice of one of two
diff --git a/drivers/net/mlx4/icm.c b/drivers/net/mlx4/icm.c
index 2a5bef6388fe..baf4bf66062c 100644
--- a/drivers/net/mlx4/icm.c
+++ b/drivers/net/mlx4/icm.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
* Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
diff --git a/drivers/net/mlx4/icm.h b/drivers/net/mlx4/icm.h
index 6c44edf35847..ab56a2f89b65 100644
--- a/drivers/net/mlx4/icm.h
+++ b/drivers/net/mlx4/icm.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
* Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
diff --git a/drivers/net/mlx4/intf.c b/drivers/net/mlx4/intf.c
index 4a6c4d526f1b..0e7eb1038f9f 100644
--- a/drivers/net/mlx4/intf.c
+++ b/drivers/net/mlx4/intf.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 8e1d24cda1b0..1252a919de2e 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -1,7 +1,7 @@
/*
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
- * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
* Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
diff --git a/drivers/net/mlx4/mcg.c b/drivers/net/mlx4/mcg.c
index b4b57870ddfd..c83f88ce0736 100644
--- a/drivers/net/mlx4/mcg.c
+++ b/drivers/net/mlx4/mcg.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
index 78038499cff5..5337e3ac3e78 100644
--- a/drivers/net/mlx4/mlx4.h
+++ b/drivers/net/mlx4/mlx4.h
@@ -2,7 +2,7 @@
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
* Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.
- * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
* Copyright (c) 2004 Voltaire, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c
index a3c04c5f12c2..62071d9c4a55 100644
--- a/drivers/net/mlx4/mr.c
+++ b/drivers/net/mlx4/mr.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2004 Topspin Communications. All rights reserved.
- * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
* Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
diff --git a/drivers/net/mlx4/qp.c b/drivers/net/mlx4/qp.c
index ee5484c44a18..c49a86044bf7 100644
--- a/drivers/net/mlx4/qp.c
+++ b/drivers/net/mlx4/qp.c
@@ -1,7 +1,7 @@
/*
* Copyright (c) 2004 Topspin Communications. All rights reserved.
* Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
- * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
* Copyright (c) 2004 Voltaire, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
diff --git a/drivers/net/mlx4/reset.c b/drivers/net/mlx4/reset.c
index e199715fabd0..3951b884c0fb 100644
--- a/drivers/net/mlx4/reset.c
+++ b/drivers/net/mlx4/reset.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/net/mlx4/srq.c b/drivers/net/mlx4/srq.c
index d23f46d692ef..533eb6db24b3 100644
--- a/drivers/net/mlx4/srq.c
+++ b/drivers/net/mlx4/srq.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c
index 993d87c9296f..edc0fd588985 100644
--- a/drivers/net/pasemi_mac.c
+++ b/drivers/net/pasemi_mac.c
@@ -650,7 +650,7 @@ static void pasemi_mac_replenish_rx_ring(const struct net_device *dev,
mac->bufsz - LOCAL_SKB_ALIGN,
PCI_DMA_FROMDEVICE);
- if (unlikely(dma_mapping_error(dma))) {
+ if (unlikely(pci_dma_mapping_error(mac->dma_pdev, dma))) {
dev_kfree_skb_irq(info->skb);
break;
}
@@ -1519,7 +1519,7 @@ static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev)
map[0] = pci_map_single(mac->dma_pdev, skb->data, skb_headlen(skb),
PCI_DMA_TODEVICE);
map_size[0] = skb_headlen(skb);
- if (dma_mapping_error(map[0]))
+ if (pci_dma_mapping_error(mac->dma_pdev, map[0]))
goto out_err_nolock;
for (i = 0; i < nfrags; i++) {
@@ -1529,7 +1529,7 @@ static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev)
frag->page_offset, frag->size,
PCI_DMA_TODEVICE);
map_size[i+1] = frag->size;
- if (dma_mapping_error(map[i+1])) {
+ if (pci_dma_mapping_error(mac->dma_pdev, map[i+1])) {
nfrags = i;
goto out_err_nolock;
}
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 739b3ab7bccc..ddccc074a76a 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -581,12 +581,12 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (file == ppp->owner)
ppp_shutdown_interface(ppp);
}
- if (atomic_read(&file->f_count) <= 2) {
+ if (atomic_long_read(&file->f_count) <= 2) {
ppp_release(NULL, file);
err = 0;
} else
- printk(KERN_DEBUG "PPPIOCDETACH file->f_count=%d\n",
- atomic_read(&file->f_count));
+ printk(KERN_DEBUG "PPPIOCDETACH file->f_count=%ld\n",
+ atomic_long_read(&file->f_count));
unlock_kernel();
return err;
}
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index e7d48a352beb..e82b37bbd6c3 100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -328,7 +328,7 @@ static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
qdev->lrg_buffer_len -
QL_HEADER_SPACE,
PCI_DMA_FROMDEVICE);
- err = pci_dma_mapping_error(map);
+ err = pci_dma_mapping_error(qdev->pdev, map);
if(err) {
printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
qdev->ndev->name, err);
@@ -1919,7 +1919,7 @@ static int ql_populate_free_queue(struct ql3_adapter *qdev)
QL_HEADER_SPACE,
PCI_DMA_FROMDEVICE);
- err = pci_dma_mapping_error(map);
+ err = pci_dma_mapping_error(qdev->pdev, map);
if(err) {
printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
qdev->ndev->name, err);
@@ -2454,7 +2454,7 @@ static int ql_send_map(struct ql3_adapter *qdev,
*/
map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
- err = pci_dma_mapping_error(map);
+ err = pci_dma_mapping_error(qdev->pdev, map);
if(err) {
printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
qdev->ndev->name, err);
@@ -2487,7 +2487,7 @@ static int ql_send_map(struct ql3_adapter *qdev,
sizeof(struct oal),
PCI_DMA_TODEVICE);
- err = pci_dma_mapping_error(map);
+ err = pci_dma_mapping_error(qdev->pdev, map);
if(err) {
printk(KERN_ERR "%s: PCI mapping outbound address list with error: %d\n",
@@ -2514,7 +2514,7 @@ static int ql_send_map(struct ql3_adapter *qdev,
frag->page_offset, frag->size,
PCI_DMA_TODEVICE);
- err = pci_dma_mapping_error(map);
+ err = pci_dma_mapping_error(qdev->pdev, map);
if(err) {
printk(KERN_ERR "%s: PCI mapping frags failed with error: %d\n",
qdev->ndev->name, err);
@@ -2916,7 +2916,7 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
QL_HEADER_SPACE,
PCI_DMA_FROMDEVICE);
- err = pci_dma_mapping_error(map);
+ err = pci_dma_mapping_error(qdev->pdev, map);
if(err) {
printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
qdev->ndev->name, err);
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 9dae40ccf048..86d77d05190a 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -2512,8 +2512,8 @@ static void stop_nic(struct s2io_nic *nic)
* Return Value:
* SUCCESS on success or an appropriate -ve value on failure.
*/
-
-static int fill_rx_buffers(struct ring_info *ring, int from_card_up)
+static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring,
+ int from_card_up)
{
struct sk_buff *skb;
struct RxD_t *rxdp;
@@ -2602,7 +2602,8 @@ static int fill_rx_buffers(struct ring_info *ring, int from_card_up)
rxdp1->Buffer0_ptr = pci_map_single
(ring->pdev, skb->data, size - NET_IP_ALIGN,
PCI_DMA_FROMDEVICE);
- if(pci_dma_mapping_error(rxdp1->Buffer0_ptr))
+ if (pci_dma_mapping_error(nic->pdev,
+ rxdp1->Buffer0_ptr))
goto pci_map_failed;
rxdp->Control_2 =
@@ -2636,7 +2637,8 @@ static int fill_rx_buffers(struct ring_info *ring, int from_card_up)
rxdp3->Buffer0_ptr =
pci_map_single(ring->pdev, ba->ba_0,
BUF0_LEN, PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(rxdp3->Buffer0_ptr))
+ if (pci_dma_mapping_error(nic->pdev,
+ rxdp3->Buffer0_ptr))
goto pci_map_failed;
} else
pci_dma_sync_single_for_device(ring->pdev,
@@ -2655,7 +2657,8 @@ static int fill_rx_buffers(struct ring_info *ring, int from_card_up)
(ring->pdev, skb->data, ring->mtu + 4,
PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(rxdp3->Buffer2_ptr))
+ if (pci_dma_mapping_error(nic->pdev,
+ rxdp3->Buffer2_ptr))
goto pci_map_failed;
if (from_card_up) {
@@ -2664,8 +2667,8 @@ static int fill_rx_buffers(struct ring_info *ring, int from_card_up)
ba->ba_1, BUF1_LEN,
PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error
- (rxdp3->Buffer1_ptr)) {
+ if (pci_dma_mapping_error(nic->pdev,
+ rxdp3->Buffer1_ptr)) {
pci_unmap_single
(ring->pdev,
(dma_addr_t)(unsigned long)
@@ -2806,9 +2809,9 @@ static void free_rx_buffers(struct s2io_nic *sp)
}
}
-static int s2io_chk_rx_buffers(struct ring_info *ring)
+static int s2io_chk_rx_buffers(struct s2io_nic *nic, struct ring_info *ring)
{
- if (fill_rx_buffers(ring, 0) == -ENOMEM) {
+ if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
DBG_PRINT(INFO_DBG, "%s:Out of memory", ring->dev->name);
DBG_PRINT(INFO_DBG, " in Rx Intr!!\n");
}
@@ -2848,7 +2851,7 @@ static int s2io_poll_msix(struct napi_struct *napi, int budget)
return 0;
pkts_processed = rx_intr_handler(ring, budget);
- s2io_chk_rx_buffers(ring);
+ s2io_chk_rx_buffers(nic, ring);
if (pkts_processed < budget_org) {
netif_rx_complete(dev, napi);
@@ -2882,7 +2885,7 @@ static int s2io_poll_inta(struct napi_struct *napi, int budget)
for (i = 0; i < config->rx_ring_num; i++) {
ring = &mac_control->rings[i];
ring_pkts_processed = rx_intr_handler(ring, budget);
- s2io_chk_rx_buffers(ring);
+ s2io_chk_rx_buffers(nic, ring);
pkts_processed += ring_pkts_processed;
budget -= ring_pkts_processed;
if (budget <= 0)
@@ -2939,7 +2942,8 @@ static void s2io_netpoll(struct net_device *dev)
rx_intr_handler(&mac_control->rings[i], 0);
for (i = 0; i < config->rx_ring_num; i++) {
- if (fill_rx_buffers(&mac_control->rings[i], 0) == -ENOMEM) {
+ if (fill_rx_buffers(nic, &mac_control->rings[i], 0) ==
+ -ENOMEM) {
DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n");
break;
@@ -4235,14 +4239,14 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
txdp->Buffer_Pointer = pci_map_single(sp->pdev,
fifo->ufo_in_band_v,
sizeof(u64), PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(txdp->Buffer_Pointer))
+ if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer))
goto pci_map_failed;
txdp++;
}
txdp->Buffer_Pointer = pci_map_single
(sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(txdp->Buffer_Pointer))
+ if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer))
goto pci_map_failed;
txdp->Host_Control = (unsigned long) skb;
@@ -4345,7 +4349,7 @@ static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
netif_rx_schedule(dev, &ring->napi);
} else {
rx_intr_handler(ring, 0);
- s2io_chk_rx_buffers(ring);
+ s2io_chk_rx_buffers(sp, ring);
}
return IRQ_HANDLED;
@@ -4826,7 +4830,7 @@ static irqreturn_t s2io_isr(int irq, void *dev_id)
*/
if (!config->napi) {
for (i = 0; i < config->rx_ring_num; i++)
- s2io_chk_rx_buffers(&mac_control->rings[i]);
+ s2io_chk_rx_buffers(sp, &mac_control->rings[i]);
}
writeq(sp->general_int_mask, &bar0->general_int_mask);
readl(&bar0->general_int_status);
@@ -6859,7 +6863,7 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
pci_map_single( sp->pdev, (*skb)->data,
size - NET_IP_ALIGN,
PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(rxdp1->Buffer0_ptr))
+ if (pci_dma_mapping_error(sp->pdev, rxdp1->Buffer0_ptr))
goto memalloc_failed;
rxdp->Host_Control = (unsigned long) (*skb);
}
@@ -6886,12 +6890,13 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
pci_map_single(sp->pdev, (*skb)->data,
dev->mtu + 4,
PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(rxdp3->Buffer2_ptr))
+ if (pci_dma_mapping_error(sp->pdev, rxdp3->Buffer2_ptr))
goto memalloc_failed;
rxdp3->Buffer0_ptr = *temp0 =
pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN,
PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(rxdp3->Buffer0_ptr)) {
+ if (pci_dma_mapping_error(sp->pdev,
+ rxdp3->Buffer0_ptr)) {
pci_unmap_single (sp->pdev,
(dma_addr_t)rxdp3->Buffer2_ptr,
dev->mtu + 4, PCI_DMA_FROMDEVICE);
@@ -6903,7 +6908,8 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
rxdp3->Buffer1_ptr = *temp1 =
pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(rxdp3->Buffer1_ptr)) {
+ if (pci_dma_mapping_error(sp->pdev,
+ rxdp3->Buffer1_ptr)) {
pci_unmap_single (sp->pdev,
(dma_addr_t)rxdp3->Buffer0_ptr,
BUF0_LEN, PCI_DMA_FROMDEVICE);
@@ -7187,7 +7193,7 @@ static int s2io_card_up(struct s2io_nic * sp)
for (i = 0; i < config->rx_ring_num; i++) {
mac_control->rings[i].mtu = dev->mtu;
- ret = fill_rx_buffers(&mac_control->rings[i], 1);
+ ret = fill_rx_buffers(sp, &mac_control->rings[i], 1);
if (ret) {
DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
dev->name);
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c
index 601b001437c0..0d27dd39bc09 100644
--- a/drivers/net/sfc/rx.c
+++ b/drivers/net/sfc/rx.c
@@ -233,7 +233,7 @@ static inline int efx_init_rx_buffer_skb(struct efx_rx_queue *rx_queue,
rx_buf->data, rx_buf->len,
PCI_DMA_FROMDEVICE);
- if (unlikely(pci_dma_mapping_error(rx_buf->dma_addr))) {
+ if (unlikely(pci_dma_mapping_error(efx->pci_dev, rx_buf->dma_addr))) {
dev_kfree_skb_any(rx_buf->skb);
rx_buf->skb = NULL;
return -EIO;
@@ -275,7 +275,7 @@ static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue,
0, efx_rx_buf_size(efx),
PCI_DMA_FROMDEVICE);
- if (unlikely(pci_dma_mapping_error(dma_addr))) {
+ if (unlikely(pci_dma_mapping_error(efx->pci_dev, dma_addr))) {
__free_pages(rx_buf->page, efx->rx_buffer_order);
rx_buf->page = NULL;
return -EIO;
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index 5cdd082ab8f6..5e8374ab28ee 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -172,7 +172,7 @@ static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue,
/* Process all fragments */
while (1) {
- if (unlikely(pci_dma_mapping_error(dma_addr)))
+ if (unlikely(pci_dma_mapping_error(pci_dev, dma_addr)))
goto pci_err;
/* Store fields for marking in the per-fragment final
@@ -661,7 +661,8 @@ efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len)
tsoh->dma_addr = pci_map_single(tx_queue->efx->pci_dev,
TSOH_BUFFER(tsoh), header_len,
PCI_DMA_TODEVICE);
- if (unlikely(pci_dma_mapping_error(tsoh->dma_addr))) {
+ if (unlikely(pci_dma_mapping_error(tx_queue->efx->pci_dev,
+ tsoh->dma_addr))) {
kfree(tsoh);
return NULL;
}
@@ -863,7 +864,7 @@ static inline int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
st->ifc.unmap_addr = pci_map_page(efx->pci_dev, page, page_off,
len, PCI_DMA_TODEVICE);
- if (likely(!pci_dma_mapping_error(st->ifc.unmap_addr))) {
+ if (likely(!pci_dma_mapping_error(efx->pci_dev, st->ifc.unmap_addr))) {
st->ifc.unmap_len = len;
st->ifc.len = len;
st->ifc.dma_addr = st->ifc.unmap_addr;
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index 00aa0b108cb9..b6435d0d71f9 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -452,7 +452,7 @@ spider_net_prepare_rx_descr(struct spider_net_card *card,
/* iommu-map the skb */
buf = pci_map_single(card->pdev, descr->skb->data,
SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(buf)) {
+ if (pci_dma_mapping_error(card->pdev, buf)) {
dev_kfree_skb_any(descr->skb);
descr->skb = NULL;
if (netif_msg_rx_err(card) && net_ratelimit())
@@ -691,7 +691,7 @@ spider_net_prepare_tx_descr(struct spider_net_card *card,
unsigned long flags;
buf = pci_map_single(card->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(buf)) {
+ if (pci_dma_mapping_error(card->pdev, buf)) {
if (netif_msg_tx_err(card) && net_ratelimit())
dev_err(&card->netdev->dev, "could not iommu-map packet (%p, %i). "
"Dropping packet\n", skb->data, skb->len);
diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c
index a645e5028c14..8487ace9d2e3 100644
--- a/drivers/net/tc35815.c
+++ b/drivers/net/tc35815.c
@@ -506,7 +506,7 @@ static void *alloc_rxbuf_page(struct pci_dev *hwdev, dma_addr_t *dma_handle)
return NULL;
*dma_handle = pci_map_single(hwdev, buf, PAGE_SIZE,
PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(*dma_handle)) {
+ if (pci_dma_mapping_error(hwdev, *dma_handle)) {
free_page((unsigned long)buf);
return NULL;
}
@@ -536,7 +536,7 @@ static struct sk_buff *alloc_rxbuf_skb(struct net_device *dev,
return NULL;
*dma_handle = pci_map_single(hwdev, skb->data, RX_BUF_SIZE,
PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(*dma_handle)) {
+ if (pci_dma_mapping_error(hwdev, *dma_handle)) {
dev_kfree_skb_any(skb);
return NULL;
}
diff --git a/drivers/net/wireless/ath5k/base.c b/drivers/net/wireless/ath5k/base.c
index 217d506527a9..d9769c527346 100644
--- a/drivers/net/wireless/ath5k/base.c
+++ b/drivers/net/wireless/ath5k/base.c
@@ -1166,7 +1166,7 @@ ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
bf->skb = skb;
bf->skbaddr = pci_map_single(sc->pdev,
skb->data, sc->rxbufsize, PCI_DMA_FROMDEVICE);
- if (unlikely(pci_dma_mapping_error(bf->skbaddr))) {
+ if (unlikely(pci_dma_mapping_error(sc->pdev, bf->skbaddr))) {
ATH5K_ERR(sc, "%s: DMA mapping failed\n", __func__);
dev_kfree_skb(skb);
bf->skb = NULL;
@@ -1918,7 +1918,7 @@ ath5k_beacon_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, "skb %p [data %p len %u] "
"skbaddr %llx\n", skb, skb->data, skb->len,
(unsigned long long)bf->skbaddr);
- if (pci_dma_mapping_error(bf->skbaddr)) {
+ if (pci_dma_mapping_error(sc->pdev, bf->skbaddr)) {
ATH5K_ERR(sc, "beacon DMA mapping failed\n");
return -EIO;
}
diff --git a/drivers/parport/ieee1284.c b/drivers/parport/ieee1284.c
index 0338b0912674..e97059415ab4 100644
--- a/drivers/parport/ieee1284.c
+++ b/drivers/parport/ieee1284.c
@@ -199,8 +199,6 @@ int parport_wait_peripheral(struct parport *port,
/* 40ms of slow polling. */
deadline = jiffies + msecs_to_jiffies(40);
while (time_before (jiffies, deadline)) {
- int ret;
-
if (signal_pending (current))
return -EINTR;
diff --git a/drivers/parport/parport_cs.c b/drivers/parport/parport_cs.c
index 802a81d47367..00e1d9620f7c 100644
--- a/drivers/parport/parport_cs.c
+++ b/drivers/parport/parport_cs.c
@@ -235,7 +235,7 @@ failed:
======================================================================*/
-void parport_cs_release(struct pcmcia_device *link)
+static void parport_cs_release(struct pcmcia_device *link)
{
parport_info_t *info = link->priv;
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index e0c2a4584ec6..8a846adf1dcf 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -2867,7 +2867,7 @@ static struct parport_pc_pci {
* and 840 locks up if you write 1 to bit 2! */
/* oxsemi_952 */ { 1, { { 0, 1 }, } },
/* oxsemi_954 */ { 1, { { 0, -1 }, } },
- /* oxsemi_840 */ { 1, { { 0, -1 }, } },
+ /* oxsemi_840 */ { 1, { { 0, 1 }, } },
/* aks_0100 */ { 1, { { 0, -1 }, } },
/* mobility_pp */ { 1, { { 0, 1 }, } },
/* netmos_9705 */ { 1, { { 0, -1 }, } }, /* untested */
diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
index d950fc34320a..554e11f9e1ce 100644
--- a/drivers/parport/procfs.c
+++ b/drivers/parport/procfs.c
@@ -429,9 +429,6 @@ struct parport_default_sysctl_table
ctl_table dev_dir[2];
};
-extern unsigned long parport_default_timeslice;
-extern int parport_default_spintime;
-
static struct parport_default_sysctl_table
parport_default_sysctl_table = {
.sysctl_header = NULL,
diff --git a/drivers/pcmcia/soc_common.c b/drivers/pcmcia/soc_common.c
index 420a77540f41..8c21446996f2 100644
--- a/drivers/pcmcia/soc_common.c
+++ b/drivers/pcmcia/soc_common.c
@@ -149,10 +149,10 @@ soc_common_pcmcia_config_skt(struct soc_pcmcia_socket *skt, socket_state_t *stat
*/
if (skt->irq_state != 1 && state->io_irq) {
skt->irq_state = 1;
- set_irq_type(skt->irq, IRQT_FALLING);
+ set_irq_type(skt->irq, IRQ_TYPE_EDGE_FALLING);
} else if (skt->irq_state == 1 && state->io_irq == 0) {
skt->irq_state = 0;
- set_irq_type(skt->irq, IRQT_NOEDGE);
+ set_irq_type(skt->irq, IRQ_TYPE_NONE);
}
skt->cs_state = *state;
@@ -527,7 +527,7 @@ int soc_pcmcia_request_irqs(struct soc_pcmcia_socket *skt,
IRQF_DISABLED, irqs[i].str, skt);
if (res)
break;
- set_irq_type(irqs[i].irq, IRQT_NOEDGE);
+ set_irq_type(irqs[i].irq, IRQ_TYPE_NONE);
}
if (res) {
@@ -560,7 +560,7 @@ void soc_pcmcia_disable_irqs(struct soc_pcmcia_socket *skt,
for (i = 0; i < nr; i++)
if (irqs[i].sock == skt->nr)
- set_irq_type(irqs[i].irq, IRQT_NOEDGE);
+ set_irq_type(irqs[i].irq, IRQ_TYPE_NONE);
}
EXPORT_SYMBOL(soc_pcmcia_disable_irqs);
@@ -571,8 +571,8 @@ void soc_pcmcia_enable_irqs(struct soc_pcmcia_socket *skt,
for (i = 0; i < nr; i++)
if (irqs[i].sock == skt->nr) {
- set_irq_type(irqs[i].irq, IRQT_RISING);
- set_irq_type(irqs[i].irq, IRQT_BOTHEDGE);
+ set_irq_type(irqs[i].irq, IRQ_TYPE_EDGE_RISING);
+ set_irq_type(irqs[i].irq, IRQ_TYPE_EDGE_BOTH);
}
}
EXPORT_SYMBOL(soc_pcmcia_enable_irqs);
diff --git a/drivers/pnp/base.h b/drivers/pnp/base.h
index e3fa9a2d9a3d..9fd7bb9b7dce 100644
--- a/drivers/pnp/base.h
+++ b/drivers/pnp/base.h
@@ -19,7 +19,6 @@ struct pnp_id *pnp_add_id(struct pnp_dev *dev, char *id);
int pnp_interface_attach_device(struct pnp_dev *dev);
int pnp_add_card(struct pnp_card *card);
-struct pnp_id *pnp_add_card_id(struct pnp_card *card, char *id);
void pnp_remove_card(struct pnp_card *card);
int pnp_add_card_device(struct pnp_card *card, struct pnp_dev *dev);
void pnp_remove_card_device(struct pnp_dev *dev);
diff --git a/drivers/pnp/card.c b/drivers/pnp/card.c
index a762a4176736..e75b060daa95 100644
--- a/drivers/pnp/card.c
+++ b/drivers/pnp/card.c
@@ -8,6 +8,7 @@
#include <linux/ctype.h>
#include <linux/slab.h>
#include <linux/pnp.h>
+#include <linux/dma-mapping.h>
#include "base.h"
LIST_HEAD(pnp_cards);
@@ -101,7 +102,7 @@ static int card_probe(struct pnp_card *card, struct pnp_card_driver *drv)
* @id: pointer to a pnp_id structure
* @card: pointer to the desired card
*/
-struct pnp_id *pnp_add_card_id(struct pnp_card *card, char *id)
+static struct pnp_id *pnp_add_card_id(struct pnp_card *card, char *id)
{
struct pnp_id *dev_id, *ptr;
@@ -167,6 +168,9 @@ struct pnp_card *pnp_alloc_card(struct pnp_protocol *protocol, int id, char *pnp
sprintf(card->dev.bus_id, "%02x:%02x", card->protocol->number,
card->number);
+ card->dev.coherent_dma_mask = DMA_24BIT_MASK;
+ card->dev.dma_mask = &card->dev.coherent_dma_mask;
+
dev_id = pnp_add_card_id(card, pnpid);
if (!dev_id) {
kfree(card);
diff --git a/drivers/pnp/quirks.c b/drivers/pnp/quirks.c
index 55f55ed72dc7..0bdf9b8a5e58 100644
--- a/drivers/pnp/quirks.c
+++ b/drivers/pnp/quirks.c
@@ -245,15 +245,17 @@ static void quirk_system_pci_resources(struct pnp_dev *dev)
*/
for_each_pci_dev(pdev) {
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
- if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM) ||
- pci_resource_len(pdev, i) == 0)
+ unsigned int type;
+
+ type = pci_resource_flags(pdev, i) &
+ (IORESOURCE_IO | IORESOURCE_MEM);
+ if (!type || pci_resource_len(pdev, i) == 0)
continue;
pci_start = pci_resource_start(pdev, i);
pci_end = pci_resource_end(pdev, i);
for (j = 0;
- (res = pnp_get_resource(dev, IORESOURCE_MEM, j));
- j++) {
+ (res = pnp_get_resource(dev, type, j)); j++) {
if (res->start == 0 && res->end == 0)
continue;
@@ -283,9 +285,10 @@ static void quirk_system_pci_resources(struct pnp_dev *dev)
* the PCI region, and that might prevent a PCI
* driver from requesting its resources.
*/
- dev_warn(&dev->dev, "mem resource "
+ dev_warn(&dev->dev, "%s resource "
"(0x%llx-0x%llx) overlaps %s BAR %d "
"(0x%llx-0x%llx), disabling\n",
+ pnp_resource_type_name(res),
(unsigned long long) pnp_start,
(unsigned long long) pnp_end,
pci_name(pdev), i,
diff --git a/drivers/s390/kvm/Makefile b/drivers/s390/kvm/Makefile
index 4a5ec39f9ca6..0815690ac1e0 100644
--- a/drivers/s390/kvm/Makefile
+++ b/drivers/s390/kvm/Makefile
@@ -6,4 +6,4 @@
# it under the terms of the GNU General Public License (version 2 only)
# as published by the Free Software Foundation.
-obj-$(CONFIG_VIRTIO) += kvm_virtio.o
+obj-$(CONFIG_S390_GUEST) += kvm_virtio.o
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index c3ad89e302bd..cebb25e36e82 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -3321,7 +3321,7 @@ int qeth_change_mtu(struct net_device *dev, int new_mtu)
struct qeth_card *card;
char dbf_text[15];
- card = netdev_priv(dev);
+ card = dev->ml_priv;
QETH_DBF_TEXT(TRACE, 4, "chgmtu");
sprintf(dbf_text, "%8x", new_mtu);
@@ -3343,7 +3343,7 @@ struct net_device_stats *qeth_get_stats(struct net_device *dev)
{
struct qeth_card *card;
- card = netdev_priv(dev);
+ card = dev->ml_priv;
QETH_DBF_TEXT(TRACE, 5, "getstat");
@@ -3395,7 +3395,7 @@ void qeth_tx_timeout(struct net_device *dev)
{
struct qeth_card *card;
- card = netdev_priv(dev);
+ card = dev->ml_priv;
card->stats.tx_errors++;
qeth_schedule_recovery(card);
}
@@ -3403,7 +3403,7 @@ EXPORT_SYMBOL_GPL(qeth_tx_timeout);
int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
{
- struct qeth_card *card = netdev_priv(dev);
+ struct qeth_card *card = dev->ml_priv;
int rc = 0;
switch (regnum) {
@@ -4253,7 +4253,7 @@ EXPORT_SYMBOL_GPL(qeth_core_get_stats_count);
void qeth_core_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
- struct qeth_card *card = netdev_priv(dev);
+ struct qeth_card *card = dev->ml_priv;
data[0] = card->stats.rx_packets -
card->perf_stats.initial_rx_packets;
data[1] = card->perf_stats.bufs_rec;
@@ -4313,7 +4313,7 @@ EXPORT_SYMBOL_GPL(qeth_core_get_strings);
void qeth_core_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- struct qeth_card *card = netdev_priv(dev);
+ struct qeth_card *card = dev->ml_priv;
if (card->options.layer2)
strcpy(info->driver, "qeth_l2");
else
@@ -4331,7 +4331,7 @@ EXPORT_SYMBOL_GPL(qeth_core_get_drvinfo);
int qeth_core_ethtool_get_settings(struct net_device *netdev,
struct ethtool_cmd *ecmd)
{
- struct qeth_card *card = netdev_priv(netdev);
+ struct qeth_card *card = netdev->ml_priv;
enum qeth_link_types link_type;
if ((card->info.type == QETH_CARD_TYPE_IQD) || (card->info.guestlan))
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 3fbc3bdec0c5..a8b069cd9a4c 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -35,7 +35,7 @@ static int qeth_l2_recover(void *);
static int qeth_l2_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
- struct qeth_card *card = netdev_priv(dev);
+ struct qeth_card *card = dev->ml_priv;
struct mii_ioctl_data *mii_data;
int rc = 0;
@@ -317,7 +317,7 @@ static void qeth_l2_process_vlans(struct qeth_card *card, int clear)
static void qeth_l2_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
{
- struct qeth_card *card = netdev_priv(dev);
+ struct qeth_card *card = dev->ml_priv;
struct qeth_vlan_vid *id;
QETH_DBF_TEXT_(TRACE, 4, "aid:%d", vid);
@@ -334,7 +334,7 @@ static void qeth_l2_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
static void qeth_l2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
{
struct qeth_vlan_vid *id, *tmpid = NULL;
- struct qeth_card *card = netdev_priv(dev);
+ struct qeth_card *card = dev->ml_priv;
QETH_DBF_TEXT_(TRACE, 4, "kid:%d", vid);
spin_lock_bh(&card->vlanlock);
@@ -566,7 +566,7 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card)
static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
{
struct sockaddr *addr = p;
- struct qeth_card *card = netdev_priv(dev);
+ struct qeth_card *card = dev->ml_priv;
int rc = 0;
QETH_DBF_TEXT(TRACE, 3, "setmac");
@@ -590,7 +590,7 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
static void qeth_l2_set_multicast_list(struct net_device *dev)
{
- struct qeth_card *card = netdev_priv(dev);
+ struct qeth_card *card = dev->ml_priv;
struct dev_mc_list *dm;
if (card->info.type == QETH_CARD_TYPE_OSN)
@@ -612,7 +612,7 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
int rc;
struct qeth_hdr *hdr = NULL;
int elements = 0;
- struct qeth_card *card = netdev_priv(dev);
+ struct qeth_card *card = dev->ml_priv;
struct sk_buff *new_skb = skb;
int ipv = qeth_get_ip_version(skb);
int cast_type = qeth_get_cast_type(card, skb);
@@ -767,7 +767,7 @@ static void qeth_l2_qdio_input_handler(struct ccw_device *ccwdev,
static int qeth_l2_open(struct net_device *dev)
{
- struct qeth_card *card = netdev_priv(dev);
+ struct qeth_card *card = dev->ml_priv;
QETH_DBF_TEXT(TRACE, 4, "qethopen");
if (card->state != CARD_STATE_SOFTSETUP)
@@ -791,7 +791,7 @@ static int qeth_l2_open(struct net_device *dev)
static int qeth_l2_stop(struct net_device *dev)
{
- struct qeth_card *card = netdev_priv(dev);
+ struct qeth_card *card = dev->ml_priv;
QETH_DBF_TEXT(TRACE, 4, "qethstop");
netif_tx_disable(dev);
@@ -838,7 +838,7 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
static int qeth_l2_ethtool_set_tso(struct net_device *dev, u32 data)
{
- struct qeth_card *card = netdev_priv(dev);
+ struct qeth_card *card = dev->ml_priv;
if (data) {
if (card->options.large_send == QETH_LARGE_SEND_NO) {
@@ -894,7 +894,7 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
if (!card->dev)
return -ENODEV;
- card->dev->priv = card;
+ card->dev->ml_priv = card;
card->dev->tx_timeout = &qeth_tx_timeout;
card->dev->watchdog_timeo = QETH_TX_TIMEOUT;
card->dev->open = qeth_l2_open;
@@ -1178,7 +1178,7 @@ int qeth_osn_assist(struct net_device *dev, void *data, int data_len)
QETH_DBF_TEXT(TRACE, 2, "osnsdmc");
if (!dev)
return -ENODEV;
- card = netdev_priv(dev);
+ card = dev->ml_priv;
if (!card)
return -ENODEV;
if ((card->state != CARD_STATE_UP) &&
@@ -1201,7 +1201,7 @@ int qeth_osn_register(unsigned char *read_dev_no, struct net_device **dev,
*dev = qeth_l2_netdev_by_devno(read_dev_no);
if (*dev == NULL)
return -ENODEV;
- card = netdev_priv(*dev);
+ card = (*dev)->ml_priv;
if (!card)
return -ENODEV;
if ((assist_cb == NULL) || (data_cb == NULL))
@@ -1219,7 +1219,7 @@ void qeth_osn_deregister(struct net_device *dev)
QETH_DBF_TEXT(TRACE, 2, "osndereg");
if (!dev)
return;
- card = netdev_priv(dev);
+ card = dev->ml_priv;
if (!card)
return;
card->osn_info.assist_cb = NULL;
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 38de31b55708..3e1d13857350 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1813,7 +1813,7 @@ static void qeth_l3_free_vlan_addresses(struct qeth_card *card,
static void qeth_l3_vlan_rx_register(struct net_device *dev,
struct vlan_group *grp)
{
- struct qeth_card *card = netdev_priv(dev);
+ struct qeth_card *card = dev->ml_priv;
unsigned long flags;
QETH_DBF_TEXT(TRACE, 4, "vlanreg");
@@ -1825,7 +1825,7 @@ static void qeth_l3_vlan_rx_register(struct net_device *dev,
static void qeth_l3_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
{
struct net_device *vlandev;
- struct qeth_card *card = (struct qeth_card *) dev->priv;
+ struct qeth_card *card = dev->ml_priv;
struct in_device *in_dev;
if (card->info.type == QETH_CARD_TYPE_IQD)
@@ -1851,7 +1851,7 @@ static void qeth_l3_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
static void qeth_l3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
{
- struct qeth_card *card = netdev_priv(dev);
+ struct qeth_card *card = dev->ml_priv;
unsigned long flags;
QETH_DBF_TEXT_(TRACE, 4, "kid:%d", vid);
@@ -2013,7 +2013,7 @@ static int qeth_l3_verify_vlan_dev(struct net_device *dev,
}
}
- if (rc && !(netdev_priv(vlan_dev_real_dev(dev)) == (void *)card))
+ if (rc && !(vlan_dev_real_dev(dev)->ml_priv == (void *)card))
return 0;
return rc;
@@ -2047,9 +2047,9 @@ static struct qeth_card *qeth_l3_get_card_from_dev(struct net_device *dev)
rc = qeth_l3_verify_dev(dev);
if (rc == QETH_REAL_CARD)
- card = netdev_priv(dev);
+ card = dev->ml_priv;
else if (rc == QETH_VLAN_CARD)
- card = netdev_priv(vlan_dev_real_dev(dev));
+ card = vlan_dev_real_dev(dev)->ml_priv;
if (card && card->options.layer2)
card = NULL;
QETH_DBF_TEXT_(TRACE, 4, "%d", rc);
@@ -2110,7 +2110,7 @@ static int qeth_l3_stop_card(struct qeth_card *card, int recovery_mode)
static void qeth_l3_set_multicast_list(struct net_device *dev)
{
- struct qeth_card *card = netdev_priv(dev);
+ struct qeth_card *card = dev->ml_priv;
QETH_DBF_TEXT(TRACE, 3, "setmulti");
qeth_l3_delete_mc_addresses(card);
@@ -2438,7 +2438,7 @@ static int qeth_l3_arp_flush_cache(struct qeth_card *card)
static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
- struct qeth_card *card = netdev_priv(dev);
+ struct qeth_card *card = dev->ml_priv;
struct qeth_arp_cache_entry arp_entry;
struct mii_ioctl_data *mii_data;
int rc = 0;
@@ -2595,7 +2595,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
u16 *tag;
struct qeth_hdr *hdr = NULL;
int elements_needed = 0;
- struct qeth_card *card = netdev_priv(dev);
+ struct qeth_card *card = dev->ml_priv;
struct sk_buff *new_skb = NULL;
int ipv = qeth_get_ip_version(skb);
int cast_type = qeth_get_cast_type(card, skb);
@@ -2763,7 +2763,7 @@ tx_drop:
static int qeth_l3_open(struct net_device *dev)
{
- struct qeth_card *card = netdev_priv(dev);
+ struct qeth_card *card = dev->ml_priv;
QETH_DBF_TEXT(TRACE, 4, "qethopen");
if (card->state != CARD_STATE_SOFTSETUP)
@@ -2780,7 +2780,7 @@ static int qeth_l3_open(struct net_device *dev)
static int qeth_l3_stop(struct net_device *dev)
{
- struct qeth_card *card = netdev_priv(dev);
+ struct qeth_card *card = dev->ml_priv;
QETH_DBF_TEXT(TRACE, 4, "qethstop");
netif_tx_disable(dev);
@@ -2792,14 +2792,14 @@ static int qeth_l3_stop(struct net_device *dev)
static u32 qeth_l3_ethtool_get_rx_csum(struct net_device *dev)
{
- struct qeth_card *card = netdev_priv(dev);
+ struct qeth_card *card = dev->ml_priv;
return (card->options.checksum_type == HW_CHECKSUMMING);
}
static int qeth_l3_ethtool_set_rx_csum(struct net_device *dev, u32 data)
{
- struct qeth_card *card = netdev_priv(dev);
+ struct qeth_card *card = dev->ml_priv;
enum qeth_card_states old_state;
enum qeth_checksum_types csum_type;
@@ -2825,7 +2825,7 @@ static int qeth_l3_ethtool_set_rx_csum(struct net_device *dev, u32 data)
static int qeth_l3_ethtool_set_tso(struct net_device *dev, u32 data)
{
- struct qeth_card *card = netdev_priv(dev);
+ struct qeth_card *card = dev->ml_priv;
if (data) {
if (card->options.large_send == QETH_LARGE_SEND_NO) {
@@ -2915,7 +2915,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
return -ENODEV;
card->dev->hard_start_xmit = qeth_l3_hard_start_xmit;
- card->dev->priv = card;
+ card->dev->ml_priv = card;
card->dev->tx_timeout = &qeth_tx_timeout;
card->dev->watchdog_timeo = QETH_TX_TIMEOUT;
card->dev->open = qeth_l3_open;
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index 7045511f9ad2..b92c19bb6876 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -4,7 +4,7 @@
Written By: Adam Radford <linuxraid@amcc.com>
Modifications By: Tom Couch <linuxraid@amcc.com>
- Copyright (C) 2004-2007 Applied Micro Circuits Corporation.
+ Copyright (C) 2004-2008 Applied Micro Circuits Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -71,6 +71,10 @@
Add support for 9650SE controllers.
2.26.02.009 - Fix dma mask setting to fallback to 32-bit if 64-bit fails.
2.26.02.010 - Add support for 9690SA controllers.
+ 2.26.02.011 - Increase max AENs drained to 256.
+ Add MSI support and "use_msi" module parameter.
+ Fix bug in twa_get_param() on 4GB+.
+ Use pci_resource_len() for ioremap().
*/
#include <linux/module.h>
@@ -95,7 +99,7 @@
#include "3w-9xxx.h"
/* Globals */
-#define TW_DRIVER_VERSION "2.26.02.010"
+#define TW_DRIVER_VERSION "2.26.02.011"
static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT];
static unsigned int twa_device_extension_count;
static int twa_major = -1;
@@ -107,6 +111,10 @@ MODULE_DESCRIPTION ("3ware 9000 Storage Controller Linux Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(TW_DRIVER_VERSION);
+static int use_msi = 0;
+module_param(use_msi, int, S_IRUGO);
+MODULE_PARM_DESC(use_msi, "Use Message Signaled Interrupts. Default: 0");
+
/* Function prototypes */
static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header);
static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id);
@@ -1038,7 +1046,6 @@ static void *twa_get_param(TW_Device_Extension *tw_dev, int request_id, int tabl
TW_Command_Full *full_command_packet;
TW_Command *command_packet;
TW_Param_Apache *param;
- unsigned long param_value;
void *retval = NULL;
/* Setup the command packet */
@@ -1057,9 +1064,8 @@ static void *twa_get_param(TW_Device_Extension *tw_dev, int request_id, int tabl
param->table_id = cpu_to_le16(table_id | 0x8000);
param->parameter_id = cpu_to_le16(parameter_id);
param->parameter_size_bytes = cpu_to_le16(parameter_size_bytes);
- param_value = tw_dev->generic_buffer_phys[request_id];
- command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(param_value);
+ command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE);
/* Post the command packet to the board */
@@ -2000,7 +2006,7 @@ static int __devinit twa_probe(struct pci_dev *pdev, const struct pci_device_id
{
struct Scsi_Host *host = NULL;
TW_Device_Extension *tw_dev;
- u32 mem_addr;
+ unsigned long mem_addr, mem_len;
int retval = -ENODEV;
retval = pci_enable_device(pdev);
@@ -2045,13 +2051,16 @@ static int __devinit twa_probe(struct pci_dev *pdev, const struct pci_device_id
goto out_free_device_extension;
}
- if (pdev->device == PCI_DEVICE_ID_3WARE_9000)
+ if (pdev->device == PCI_DEVICE_ID_3WARE_9000) {
mem_addr = pci_resource_start(pdev, 1);
- else
+ mem_len = pci_resource_len(pdev, 1);
+ } else {
mem_addr = pci_resource_start(pdev, 2);
+ mem_len = pci_resource_len(pdev, 2);
+ }
/* Save base address */
- tw_dev->base_addr = ioremap(mem_addr, PAGE_SIZE);
+ tw_dev->base_addr = ioremap(mem_addr, mem_len);
if (!tw_dev->base_addr) {
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x35, "Failed to ioremap");
goto out_release_mem_region;
@@ -2086,7 +2095,7 @@ static int __devinit twa_probe(struct pci_dev *pdev, const struct pci_device_id
pci_set_drvdata(pdev, host);
- printk(KERN_WARNING "3w-9xxx: scsi%d: Found a 3ware 9000 Storage Controller at 0x%x, IRQ: %d.\n",
+ printk(KERN_WARNING "3w-9xxx: scsi%d: Found a 3ware 9000 Storage Controller at 0x%lx, IRQ: %d.\n",
host->host_no, mem_addr, pdev->irq);
printk(KERN_WARNING "3w-9xxx: scsi%d: Firmware %s, BIOS %s, Ports: %d.\n",
host->host_no,
@@ -2097,6 +2106,11 @@ static int __devinit twa_probe(struct pci_dev *pdev, const struct pci_device_id
le32_to_cpu(*(int *)twa_get_param(tw_dev, 2, TW_INFORMATION_TABLE,
TW_PARAM_PORTCOUNT, TW_PARAM_PORTCOUNT_LENGTH)));
+ /* Try to enable MSI */
+ if (use_msi && (pdev->device != PCI_DEVICE_ID_3WARE_9000) &&
+ !pci_enable_msi(pdev))
+ set_bit(TW_USING_MSI, &tw_dev->flags);
+
/* Now setup the interrupt handler */
retval = request_irq(pdev->irq, twa_interrupt, IRQF_SHARED, "3w-9xxx", tw_dev);
if (retval) {
@@ -2120,6 +2134,8 @@ static int __devinit twa_probe(struct pci_dev *pdev, const struct pci_device_id
return 0;
out_remove_host:
+ if (test_bit(TW_USING_MSI, &tw_dev->flags))
+ pci_disable_msi(pdev);
scsi_remove_host(host);
out_iounmap:
iounmap(tw_dev->base_addr);
@@ -2151,6 +2167,10 @@ static void twa_remove(struct pci_dev *pdev)
/* Shutdown the card */
__twa_shutdown(tw_dev);
+ /* Disable MSI if enabled */
+ if (test_bit(TW_USING_MSI, &tw_dev->flags))
+ pci_disable_msi(pdev);
+
/* Free IO remapping */
iounmap(tw_dev->base_addr);
diff --git a/drivers/scsi/3w-9xxx.h b/drivers/scsi/3w-9xxx.h
index d14a9479e389..1729a8785fea 100644
--- a/drivers/scsi/3w-9xxx.h
+++ b/drivers/scsi/3w-9xxx.h
@@ -4,7 +4,7 @@
Written By: Adam Radford <linuxraid@amcc.com>
Modifications By: Tom Couch <linuxraid@amcc.com>
- Copyright (C) 2004-2007 Applied Micro Circuits Corporation.
+ Copyright (C) 2004-2008 Applied Micro Circuits Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -319,8 +319,8 @@ static twa_message_type twa_error_table[] = {
/* Compatibility defines */
#define TW_9000_ARCH_ID 0x5
-#define TW_CURRENT_DRIVER_SRL 30
-#define TW_CURRENT_DRIVER_BUILD 80
+#define TW_CURRENT_DRIVER_SRL 35
+#define TW_CURRENT_DRIVER_BUILD 0
#define TW_CURRENT_DRIVER_BRANCH 0
/* Phase defines */
@@ -352,8 +352,9 @@ static twa_message_type twa_error_table[] = {
#define TW_MAX_RESET_TRIES 2
#define TW_MAX_CMDS_PER_LUN 254
#define TW_MAX_RESPONSE_DRAIN 256
-#define TW_MAX_AEN_DRAIN 40
+#define TW_MAX_AEN_DRAIN 255
#define TW_IN_RESET 2
+#define TW_USING_MSI 3
#define TW_IN_ATTENTION_LOOP 4
#define TW_MAX_SECTORS 256
#define TW_AEN_WAIT_TIME 1000
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 26be540d1dd3..c7f06298bd3c 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -63,6 +63,7 @@ comment "SCSI support type (disk, tape, CD-ROM)"
config BLK_DEV_SD
tristate "SCSI disk support"
depends on SCSI
+ select CRC_T10DIF
---help---
If you want to use SCSI hard disks, Fibre Channel disks,
Serial ATA (SATA) or Parallel ATA (PATA) hard disks,
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index a8149677de23..72fd5043cfa1 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -151,6 +151,8 @@ scsi_mod-$(CONFIG_SCSI_PROC_FS) += scsi_proc.o
scsi_tgt-y += scsi_tgt_lib.o scsi_tgt_if.o
sd_mod-objs := sd.o
+sd_mod-$(CONFIG_BLK_DEV_INTEGRITY) += sd_dif.o
+
sr_mod-objs := sr.o sr_ioctl.o sr_vendor.o
ncr53c8xx-flags-$(CONFIG_SCSI_ZALON) \
:= -DCONFIG_NCR53C8XX_PREFETCH -DSCSI_NCR_BIG_ENDIAN \
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index 8591585e5cc5..218777bfc143 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -2278,7 +2278,7 @@ do { \
#define ASC_DBG(lvl, format, arg...) { \
if (asc_dbglvl >= (lvl)) \
printk(KERN_DEBUG "%s: %s: " format, DRV_NAME, \
- __FUNCTION__ , ## arg); \
+ __func__ , ## arg); \
}
#define ASC_DBG_PRT_SCSI_HOST(lvl, s) \
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index 0899cb61e3dd..b5a868d85eb4 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -288,20 +288,20 @@ static LIST_HEAD(aha152x_host_list);
#define DO_LOCK(flags) \
do { \
if(spin_is_locked(&QLOCK)) { \
- DPRINTK(debug_intr, DEBUG_LEAD "(%s:%d) already locked at %s:%d\n", CMDINFO(CURRENT_SC), __FUNCTION__, __LINE__, QLOCKER, QLOCKERL); \
+ DPRINTK(debug_intr, DEBUG_LEAD "(%s:%d) already locked at %s:%d\n", CMDINFO(CURRENT_SC), __func__, __LINE__, QLOCKER, QLOCKERL); \
} \
- DPRINTK(debug_locking, DEBUG_LEAD "(%s:%d) locking\n", CMDINFO(CURRENT_SC), __FUNCTION__, __LINE__); \
+ DPRINTK(debug_locking, DEBUG_LEAD "(%s:%d) locking\n", CMDINFO(CURRENT_SC), __func__, __LINE__); \
spin_lock_irqsave(&QLOCK,flags); \
- DPRINTK(debug_locking, DEBUG_LEAD "(%s:%d) locked\n", CMDINFO(CURRENT_SC), __FUNCTION__, __LINE__); \
- QLOCKER=__FUNCTION__; \
+ DPRINTK(debug_locking, DEBUG_LEAD "(%s:%d) locked\n", CMDINFO(CURRENT_SC), __func__, __LINE__); \
+ QLOCKER=__func__; \
QLOCKERL=__LINE__; \
} while(0)
#define DO_UNLOCK(flags) \
do { \
- DPRINTK(debug_locking, DEBUG_LEAD "(%s:%d) unlocking (locked at %s:%d)\n", CMDINFO(CURRENT_SC), __FUNCTION__, __LINE__, QLOCKER, QLOCKERL); \
+ DPRINTK(debug_locking, DEBUG_LEAD "(%s:%d) unlocking (locked at %s:%d)\n", CMDINFO(CURRENT_SC), __func__, __LINE__, QLOCKER, QLOCKERL); \
spin_unlock_irqrestore(&QLOCK,flags); \
- DPRINTK(debug_locking, DEBUG_LEAD "(%s:%d) unlocked\n", CMDINFO(CURRENT_SC), __FUNCTION__, __LINE__); \
+ DPRINTK(debug_locking, DEBUG_LEAD "(%s:%d) unlocked\n", CMDINFO(CURRENT_SC), __func__, __LINE__); \
QLOCKER="(not locked)"; \
QLOCKERL=0; \
} while(0)
diff --git a/drivers/scsi/aic94xx/aic94xx.h b/drivers/scsi/aic94xx/aic94xx.h
index 2ef459e9cda1..2863a9d22851 100644
--- a/drivers/scsi/aic94xx/aic94xx.h
+++ b/drivers/scsi/aic94xx/aic94xx.h
@@ -39,9 +39,9 @@
#ifdef ASD_ENTER_EXIT
#define ENTER printk(KERN_NOTICE "%s: ENTER %s\n", ASD_DRIVER_NAME, \
- __FUNCTION__)
+ __func__)
#define EXIT printk(KERN_NOTICE "%s: --EXIT %s\n", ASD_DRIVER_NAME, \
- __FUNCTION__)
+ __func__)
#else
#define ENTER
#define EXIT
diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.c b/drivers/scsi/aic94xx/aic94xx_hwi.c
index 83a78222896d..eb9dc3195fdf 100644
--- a/drivers/scsi/aic94xx/aic94xx_hwi.c
+++ b/drivers/scsi/aic94xx/aic94xx_hwi.c
@@ -1359,7 +1359,7 @@ int asd_enable_phys(struct asd_ha_struct *asd_ha, const u8 phy_mask)
struct asd_ascb *ascb_list;
if (!phy_mask) {
- asd_printk("%s called with phy_mask of 0!?\n", __FUNCTION__);
+ asd_printk("%s called with phy_mask of 0!?\n", __func__);
return 0;
}
diff --git a/drivers/scsi/aic94xx/aic94xx_scb.c b/drivers/scsi/aic94xx/aic94xx_scb.c
index 46643319c520..ca55013b6ae5 100644
--- a/drivers/scsi/aic94xx/aic94xx_scb.c
+++ b/drivers/scsi/aic94xx/aic94xx_scb.c
@@ -211,7 +211,7 @@ static void asd_form_port(struct asd_ha_struct *asd_ha, struct asd_phy *phy)
phy->asd_port = port;
}
ASD_DPRINTK("%s: updating phy_mask 0x%x for phy%d\n",
- __FUNCTION__, phy->asd_port->phy_mask, sas_phy->id);
+ __func__, phy->asd_port->phy_mask, sas_phy->id);
asd_update_port_links(asd_ha, phy);
spin_unlock_irqrestore(&asd_ha->asd_ports_lock, flags);
}
@@ -294,7 +294,7 @@ static void asd_link_reset_err_tasklet(struct asd_ascb *ascb,
struct asd_ascb *cp = asd_ascb_alloc_list(ascb->ha, &num,
GFP_ATOMIC);
if (!cp) {
- asd_printk("%s: out of memory\n", __FUNCTION__);
+ asd_printk("%s: out of memory\n", __func__);
goto out;
}
ASD_DPRINTK("phy%d: retries:0 performing link reset seq\n",
@@ -446,7 +446,7 @@ static void escb_tasklet_complete(struct asd_ascb *ascb,
struct domain_device *failed_dev = NULL;
ASD_DPRINTK("%s: REQ_TASK_ABORT, reason=0x%X\n",
- __FUNCTION__, dl->status_block[3]);
+ __func__, dl->status_block[3]);
/*
* Find the task that caused the abort and abort it first.
@@ -474,7 +474,7 @@ static void escb_tasklet_complete(struct asd_ascb *ascb,
if (!failed_dev) {
ASD_DPRINTK("%s: Can't find task (tc=%d) to abort!\n",
- __FUNCTION__, tc_abort);
+ __func__, tc_abort);
goto out;
}
@@ -502,7 +502,7 @@ static void escb_tasklet_complete(struct asd_ascb *ascb,
conn_handle = *((u16*)(&dl->status_block[1]));
conn_handle = le16_to_cpu(conn_handle);
- ASD_DPRINTK("%s: REQ_DEVICE_RESET, reason=0x%X\n", __FUNCTION__,
+ ASD_DPRINTK("%s: REQ_DEVICE_RESET, reason=0x%X\n", __func__,
dl->status_block[3]);
/* Find the last pending task for the device... */
@@ -522,7 +522,7 @@ static void escb_tasklet_complete(struct asd_ascb *ascb,
if (!last_dev_task) {
ASD_DPRINTK("%s: Device reset for idle device %d?\n",
- __FUNCTION__, conn_handle);
+ __func__, conn_handle);
goto out;
}
@@ -549,10 +549,10 @@ static void escb_tasklet_complete(struct asd_ascb *ascb,
goto out;
}
case SIGNAL_NCQ_ERROR:
- ASD_DPRINTK("%s: SIGNAL_NCQ_ERROR\n", __FUNCTION__);
+ ASD_DPRINTK("%s: SIGNAL_NCQ_ERROR\n", __func__);
goto out;
case CLEAR_NCQ_ERROR:
- ASD_DPRINTK("%s: CLEAR_NCQ_ERROR\n", __FUNCTION__);
+ ASD_DPRINTK("%s: CLEAR_NCQ_ERROR\n", __func__);
goto out;
}
@@ -560,26 +560,26 @@ static void escb_tasklet_complete(struct asd_ascb *ascb,
switch (sb_opcode) {
case BYTES_DMAED:
- ASD_DPRINTK("%s: phy%d: BYTES_DMAED\n", __FUNCTION__, phy_id);
+ ASD_DPRINTK("%s: phy%d: BYTES_DMAED\n", __func__, phy_id);
asd_bytes_dmaed_tasklet(ascb, dl, edb, phy_id);
break;
case PRIMITIVE_RECVD:
- ASD_DPRINTK("%s: phy%d: PRIMITIVE_RECVD\n", __FUNCTION__,
+ ASD_DPRINTK("%s: phy%d: PRIMITIVE_RECVD\n", __func__,
phy_id);
asd_primitive_rcvd_tasklet(ascb, dl, phy_id);
break;
case PHY_EVENT:
- ASD_DPRINTK("%s: phy%d: PHY_EVENT\n", __FUNCTION__, phy_id);
+ ASD_DPRINTK("%s: phy%d: PHY_EVENT\n", __func__, phy_id);
asd_phy_event_tasklet(ascb, dl);
break;
case LINK_RESET_ERROR:
- ASD_DPRINTK("%s: phy%d: LINK_RESET_ERROR\n", __FUNCTION__,
+ ASD_DPRINTK("%s: phy%d: LINK_RESET_ERROR\n", __func__,
phy_id);
asd_link_reset_err_tasklet(ascb, dl, phy_id);
break;
case TIMER_EVENT:
ASD_DPRINTK("%s: phy%d: TIMER_EVENT, lost dw sync\n",
- __FUNCTION__, phy_id);
+ __func__, phy_id);
asd_turn_led(asd_ha, phy_id, 0);
/* the device is gone */
sas_phy_disconnected(sas_phy);
@@ -587,7 +587,7 @@ static void escb_tasklet_complete(struct asd_ascb *ascb,
sas_ha->notify_port_event(sas_phy, PORTE_TIMER_EVENT);
break;
default:
- ASD_DPRINTK("%s: phy%d: unknown event:0x%x\n", __FUNCTION__,
+ ASD_DPRINTK("%s: phy%d: unknown event:0x%x\n", __func__,
phy_id, sb_opcode);
ASD_DPRINTK("edb is 0x%x! dl->opcode is 0x%x\n",
edb, dl->opcode);
@@ -654,7 +654,7 @@ static void control_phy_tasklet_complete(struct asd_ascb *ascb,
if (status != 0) {
ASD_DPRINTK("%s: phy%d status block opcode:0x%x\n",
- __FUNCTION__, phy_id, status);
+ __func__, phy_id, status);
goto out;
}
@@ -663,7 +663,7 @@ static void control_phy_tasklet_complete(struct asd_ascb *ascb,
asd_ha->hw_prof.enabled_phys &= ~(1 << phy_id);
asd_turn_led(asd_ha, phy_id, 0);
asd_control_led(asd_ha, phy_id, 0);
- ASD_DPRINTK("%s: disable phy%d\n", __FUNCTION__, phy_id);
+ ASD_DPRINTK("%s: disable phy%d\n", __func__, phy_id);
break;
case ENABLE_PHY:
@@ -673,40 +673,40 @@ static void control_phy_tasklet_complete(struct asd_ascb *ascb,
get_lrate_mode(phy, oob_mode);
asd_turn_led(asd_ha, phy_id, 1);
ASD_DPRINTK("%s: phy%d, lrate:0x%x, proto:0x%x\n",
- __FUNCTION__, phy_id,phy->sas_phy.linkrate,
+ __func__, phy_id,phy->sas_phy.linkrate,
phy->sas_phy.iproto);
} else if (oob_status & CURRENT_SPINUP_HOLD) {
asd_ha->hw_prof.enabled_phys |= (1 << phy_id);
asd_turn_led(asd_ha, phy_id, 1);
- ASD_DPRINTK("%s: phy%d, spinup hold\n", __FUNCTION__,
+ ASD_DPRINTK("%s: phy%d, spinup hold\n", __func__,
phy_id);
} else if (oob_status & CURRENT_ERR_MASK) {
asd_turn_led(asd_ha, phy_id, 0);
ASD_DPRINTK("%s: phy%d: error: oob status:0x%02x\n",
- __FUNCTION__, phy_id, oob_status);
+ __func__, phy_id, oob_status);
} else if (oob_status & (CURRENT_HOT_PLUG_CNCT
| CURRENT_DEVICE_PRESENT)) {
asd_ha->hw_prof.enabled_phys |= (1 << phy_id);
asd_turn_led(asd_ha, phy_id, 1);
ASD_DPRINTK("%s: phy%d: hot plug or device present\n",
- __FUNCTION__, phy_id);
+ __func__, phy_id);
} else {
asd_ha->hw_prof.enabled_phys |= (1 << phy_id);
asd_turn_led(asd_ha, phy_id, 0);
ASD_DPRINTK("%s: phy%d: no device present: "
"oob_status:0x%x\n",
- __FUNCTION__, phy_id, oob_status);
+ __func__, phy_id, oob_status);
}
break;
case RELEASE_SPINUP_HOLD:
case PHY_NO_OP:
case EXECUTE_HARD_RESET:
- ASD_DPRINTK("%s: phy%d: sub_func:0x%x\n", __FUNCTION__,
+ ASD_DPRINTK("%s: phy%d: sub_func:0x%x\n", __func__,
phy_id, control_phy->sub_func);
/* XXX finish */
break;
default:
- ASD_DPRINTK("%s: phy%d: sub_func:0x%x?\n", __FUNCTION__,
+ ASD_DPRINTK("%s: phy%d: sub_func:0x%x?\n", __func__,
phy_id, control_phy->sub_func);
break;
}
diff --git a/drivers/scsi/aic94xx/aic94xx_task.c b/drivers/scsi/aic94xx/aic94xx_task.c
index 326765c9caf8..75d20f72501f 100644
--- a/drivers/scsi/aic94xx/aic94xx_task.c
+++ b/drivers/scsi/aic94xx/aic94xx_task.c
@@ -320,7 +320,7 @@ Again:
case TC_RESUME:
case TC_PARTIAL_SG_LIST:
default:
- ASD_DPRINTK("%s: dl opcode: 0x%x?\n", __FUNCTION__, opcode);
+ ASD_DPRINTK("%s: dl opcode: 0x%x?\n", __func__, opcode);
break;
}
diff --git a/drivers/scsi/aic94xx/aic94xx_tmf.c b/drivers/scsi/aic94xx/aic94xx_tmf.c
index 633ff40c736a..d4640ef6d44f 100644
--- a/drivers/scsi/aic94xx/aic94xx_tmf.c
+++ b/drivers/scsi/aic94xx/aic94xx_tmf.c
@@ -75,12 +75,12 @@ static void asd_clear_nexus_tasklet_complete(struct asd_ascb *ascb,
struct done_list_struct *dl)
{
struct tasklet_completion_status *tcs = ascb->uldd_task;
- ASD_DPRINTK("%s: here\n", __FUNCTION__);
+ ASD_DPRINTK("%s: here\n", __func__);
if (!del_timer(&ascb->timer)) {
- ASD_DPRINTK("%s: couldn't delete timer\n", __FUNCTION__);
+ ASD_DPRINTK("%s: couldn't delete timer\n", __func__);
return;
}
- ASD_DPRINTK("%s: opcode: 0x%x\n", __FUNCTION__, dl->opcode);
+ ASD_DPRINTK("%s: opcode: 0x%x\n", __func__, dl->opcode);
tcs->dl_opcode = dl->opcode;
complete(ascb->completion);
asd_ascb_free(ascb);
@@ -91,7 +91,7 @@ static void asd_clear_nexus_timedout(unsigned long data)
struct asd_ascb *ascb = (void *)data;
struct tasklet_completion_status *tcs = ascb->uldd_task;
- ASD_DPRINTK("%s: here\n", __FUNCTION__);
+ ASD_DPRINTK("%s: here\n", __func__);
tcs->dl_opcode = TMF_RESP_FUNC_FAILED;
complete(ascb->completion);
}
@@ -103,7 +103,7 @@ static void asd_clear_nexus_timedout(unsigned long data)
DECLARE_COMPLETION_ONSTACK(completion); \
DECLARE_TCS(tcs); \
\
- ASD_DPRINTK("%s: PRE\n", __FUNCTION__); \
+ ASD_DPRINTK("%s: PRE\n", __func__); \
res = 1; \
ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL); \
if (!ascb) \
@@ -115,12 +115,12 @@ static void asd_clear_nexus_timedout(unsigned long data)
scb->header.opcode = CLEAR_NEXUS
#define CLEAR_NEXUS_POST \
- ASD_DPRINTK("%s: POST\n", __FUNCTION__); \
+ ASD_DPRINTK("%s: POST\n", __func__); \
res = asd_enqueue_internal(ascb, asd_clear_nexus_tasklet_complete, \
asd_clear_nexus_timedout); \
if (res) \
goto out_err; \
- ASD_DPRINTK("%s: clear nexus posted, waiting...\n", __FUNCTION__); \
+ ASD_DPRINTK("%s: clear nexus posted, waiting...\n", __func__); \
wait_for_completion(&completion); \
res = tcs.dl_opcode; \
if (res == TC_NO_ERROR) \
@@ -417,7 +417,7 @@ int asd_abort_task(struct sas_task *task)
if (task->task_state_flags & SAS_TASK_STATE_DONE) {
spin_unlock_irqrestore(&task->task_state_lock, flags);
res = TMF_RESP_FUNC_COMPLETE;
- ASD_DPRINTK("%s: task 0x%p done\n", __FUNCTION__, task);
+ ASD_DPRINTK("%s: task 0x%p done\n", __func__, task);
goto out_done;
}
spin_unlock_irqrestore(&task->task_state_lock, flags);
@@ -481,7 +481,7 @@ int asd_abort_task(struct sas_task *task)
if (task->task_state_flags & SAS_TASK_STATE_DONE) {
spin_unlock_irqrestore(&task->task_state_lock, flags);
res = TMF_RESP_FUNC_COMPLETE;
- ASD_DPRINTK("%s: task 0x%p done\n", __FUNCTION__, task);
+ ASD_DPRINTK("%s: task 0x%p done\n", __func__, task);
goto out_done;
}
spin_unlock_irqrestore(&task->task_state_lock, flags);
diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c
index a715632e19d4..477542602284 100644
--- a/drivers/scsi/arm/fas216.c
+++ b/drivers/scsi/arm/fas216.c
@@ -240,7 +240,7 @@ static void __fas216_checkmagic(FAS216_Info *info, const char *func)
panic("scsi memory space corrupted in %s", func);
}
}
-#define fas216_checkmagic(info) __fas216_checkmagic((info), __FUNCTION__)
+#define fas216_checkmagic(info) __fas216_checkmagic((info), __func__)
#else
#define fas216_checkmagic(info)
#endif
@@ -2658,7 +2658,7 @@ int fas216_eh_host_reset(struct scsi_cmnd *SCpnt)
fas216_checkmagic(info);
printk("scsi%d.%c: %s: resetting host\n",
- info->host->host_no, '0' + SCpnt->device->id, __FUNCTION__);
+ info->host->host_no, '0' + SCpnt->device->id, __func__);
/*
* Reset the SCSI chip.
diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c
index aa2011b64683..3c257fe0893e 100644
--- a/drivers/scsi/ch.c
+++ b/drivers/scsi/ch.c
@@ -930,6 +930,7 @@ static int ch_probe(struct device *dev)
if (init)
ch_init_elem(ch);
+ dev_set_drvdata(dev, ch);
sdev_printk(KERN_INFO, sd, "Attached scsi changer %s\n", ch->name);
return 0;
diff --git a/drivers/scsi/device_handler/Kconfig b/drivers/scsi/device_handler/Kconfig
index 2adc0f666b68..67070257919f 100644
--- a/drivers/scsi/device_handler/Kconfig
+++ b/drivers/scsi/device_handler/Kconfig
@@ -30,3 +30,11 @@ config SCSI_DH_EMC
depends on SCSI_DH
help
If you have a EMC CLARiiON select y. Otherwise, say N.
+
+config SCSI_DH_ALUA
+ tristate "SPC-3 ALUA Device Handler (EXPERIMENTAL)"
+ depends on SCSI_DH && EXPERIMENTAL
+ help
+ SCSI Device handler for generic SPC-3 Asymmetric Logical Unit
+ Access (ALUA).
+
diff --git a/drivers/scsi/device_handler/Makefile b/drivers/scsi/device_handler/Makefile
index 35272e93b1c8..e1d2ea083e15 100644
--- a/drivers/scsi/device_handler/Makefile
+++ b/drivers/scsi/device_handler/Makefile
@@ -5,3 +5,4 @@ obj-$(CONFIG_SCSI_DH) += scsi_dh.o
obj-$(CONFIG_SCSI_DH_RDAC) += scsi_dh_rdac.o
obj-$(CONFIG_SCSI_DH_HP_SW) += scsi_dh_hp_sw.o
obj-$(CONFIG_SCSI_DH_EMC) += scsi_dh_emc.o
+obj-$(CONFIG_SCSI_DH_ALUA) += scsi_dh_alua.o
diff --git a/drivers/scsi/device_handler/scsi_dh.c b/drivers/scsi/device_handler/scsi_dh.c
index ab6c21cd9689..a518f2eff19a 100644
--- a/drivers/scsi/device_handler/scsi_dh.c
+++ b/drivers/scsi/device_handler/scsi_dh.c
@@ -24,8 +24,16 @@
#include <scsi/scsi_dh.h>
#include "../scsi_priv.h"
+struct scsi_dh_devinfo_list {
+ struct list_head node;
+ char vendor[9];
+ char model[17];
+ struct scsi_device_handler *handler;
+};
+
static DEFINE_SPINLOCK(list_lock);
static LIST_HEAD(scsi_dh_list);
+static LIST_HEAD(scsi_dh_dev_list);
static struct scsi_device_handler *get_device_handler(const char *name)
{
@@ -33,7 +41,7 @@ static struct scsi_device_handler *get_device_handler(const char *name)
spin_lock(&list_lock);
list_for_each_entry(tmp, &scsi_dh_list, list) {
- if (!strcmp(tmp->name, name)) {
+ if (!strncmp(tmp->name, name, strlen(tmp->name))) {
found = tmp;
break;
}
@@ -42,11 +50,307 @@ static struct scsi_device_handler *get_device_handler(const char *name)
return found;
}
+
+static struct scsi_device_handler *
+scsi_dh_cache_lookup(struct scsi_device *sdev)
+{
+ struct scsi_dh_devinfo_list *tmp;
+ struct scsi_device_handler *found_dh = NULL;
+
+ spin_lock(&list_lock);
+ list_for_each_entry(tmp, &scsi_dh_dev_list, node) {
+ if (!strncmp(sdev->vendor, tmp->vendor, strlen(tmp->vendor)) &&
+ !strncmp(sdev->model, tmp->model, strlen(tmp->model))) {
+ found_dh = tmp->handler;
+ break;
+ }
+ }
+ spin_unlock(&list_lock);
+
+ return found_dh;
+}
+
+static int scsi_dh_handler_lookup(struct scsi_device_handler *scsi_dh,
+ struct scsi_device *sdev)
+{
+ int i, found = 0;
+
+ for(i = 0; scsi_dh->devlist[i].vendor; i++) {
+ if (!strncmp(sdev->vendor, scsi_dh->devlist[i].vendor,
+ strlen(scsi_dh->devlist[i].vendor)) &&
+ !strncmp(sdev->model, scsi_dh->devlist[i].model,
+ strlen(scsi_dh->devlist[i].model))) {
+ found = 1;
+ break;
+ }
+ }
+ return found;
+}
+
+/*
+ * device_handler_match - Attach a device handler to a device
+ * @scsi_dh - The device handler to match against or NULL
+ * @sdev - SCSI device to be tested against @scsi_dh
+ *
+ * Tests @sdev against the device handler @scsi_dh or against
+ * all registered device_handler if @scsi_dh == NULL.
+ * Returns the found device handler or NULL if not found.
+ */
+static struct scsi_device_handler *
+device_handler_match(struct scsi_device_handler *scsi_dh,
+ struct scsi_device *sdev)
+{
+ struct scsi_device_handler *found_dh = NULL;
+ struct scsi_dh_devinfo_list *tmp;
+
+ found_dh = scsi_dh_cache_lookup(sdev);
+ if (found_dh)
+ return found_dh;
+
+ if (scsi_dh) {
+ if (scsi_dh_handler_lookup(scsi_dh, sdev))
+ found_dh = scsi_dh;
+ } else {
+ struct scsi_device_handler *tmp_dh;
+
+ spin_lock(&list_lock);
+ list_for_each_entry(tmp_dh, &scsi_dh_list, list) {
+ if (scsi_dh_handler_lookup(tmp_dh, sdev))
+ found_dh = tmp_dh;
+ }
+ spin_unlock(&list_lock);
+ }
+
+ if (found_dh) { /* If device is found, add it to the cache */
+ tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
+ if (tmp) {
+ strncpy(tmp->vendor, sdev->vendor, 8);
+ strncpy(tmp->model, sdev->model, 16);
+ tmp->vendor[8] = '\0';
+ tmp->model[16] = '\0';
+ tmp->handler = found_dh;
+ spin_lock(&list_lock);
+ list_add(&tmp->node, &scsi_dh_dev_list);
+ spin_unlock(&list_lock);
+ } else {
+ found_dh = NULL;
+ }
+ }
+
+ return found_dh;
+}
+
+/*
+ * scsi_dh_handler_attach - Attach a device handler to a device
+ * @sdev - SCSI device the device handler should attach to
+ * @scsi_dh - The device handler to attach
+ */
+static int scsi_dh_handler_attach(struct scsi_device *sdev,
+ struct scsi_device_handler *scsi_dh)
+{
+ int err = 0;
+
+ if (sdev->scsi_dh_data) {
+ if (sdev->scsi_dh_data->scsi_dh != scsi_dh)
+ err = -EBUSY;
+ } else if (scsi_dh->attach)
+ err = scsi_dh->attach(sdev);
+
+ return err;
+}
+
+/*
+ * scsi_dh_handler_detach - Detach a device handler from a device
+ * @sdev - SCSI device the device handler should be detached from
+ * @scsi_dh - Device handler to be detached
+ *
+ * Detach from a device handler. If a device handler is specified,
+ * only detach if the currently attached handler matches @scsi_dh.
+ */
+static void scsi_dh_handler_detach(struct scsi_device *sdev,
+ struct scsi_device_handler *scsi_dh)
+{
+ if (!sdev->scsi_dh_data)
+ return;
+
+ if (scsi_dh && scsi_dh != sdev->scsi_dh_data->scsi_dh)
+ return;
+
+ if (!scsi_dh)
+ scsi_dh = sdev->scsi_dh_data->scsi_dh;
+
+ if (scsi_dh && scsi_dh->detach)
+ scsi_dh->detach(sdev);
+}
+
+/*
+ * Functions for sysfs attribute 'dh_state'
+ */
+static ssize_t
+store_dh_state(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct scsi_device_handler *scsi_dh;
+ int err = -EINVAL;
+
+ if (!sdev->scsi_dh_data) {
+ /*
+ * Attach to a device handler
+ */
+ if (!(scsi_dh = get_device_handler(buf)))
+ return err;
+ err = scsi_dh_handler_attach(sdev, scsi_dh);
+ } else {
+ scsi_dh = sdev->scsi_dh_data->scsi_dh;
+ if (!strncmp(buf, "detach", 6)) {
+ /*
+ * Detach from a device handler
+ */
+ scsi_dh_handler_detach(sdev, scsi_dh);
+ err = 0;
+ } else if (!strncmp(buf, "activate", 8)) {
+ /*
+ * Activate a device handler
+ */
+ if (scsi_dh->activate)
+ err = scsi_dh->activate(sdev);
+ else
+ err = 0;
+ }
+ }
+
+ return err<0?err:count;
+}
+
+static ssize_t
+show_dh_state(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+
+ if (!sdev->scsi_dh_data)
+ return snprintf(buf, 20, "detached\n");
+
+ return snprintf(buf, 20, "%s\n", sdev->scsi_dh_data->scsi_dh->name);
+}
+
+static struct device_attribute scsi_dh_state_attr =
+ __ATTR(dh_state, S_IRUGO | S_IWUSR, show_dh_state,
+ store_dh_state);
+
+/*
+ * scsi_dh_sysfs_attr_add - Callback for scsi_init_dh
+ */
+static int scsi_dh_sysfs_attr_add(struct device *dev, void *data)
+{
+ struct scsi_device *sdev;
+ int err;
+
+ if (!scsi_is_sdev_device(dev))
+ return 0;
+
+ sdev = to_scsi_device(dev);
+
+ err = device_create_file(&sdev->sdev_gendev,
+ &scsi_dh_state_attr);
+
+ return 0;
+}
+
+/*
+ * scsi_dh_sysfs_attr_remove - Callback for scsi_exit_dh
+ */
+static int scsi_dh_sysfs_attr_remove(struct device *dev, void *data)
+{
+ struct scsi_device *sdev;
+
+ if (!scsi_is_sdev_device(dev))
+ return 0;
+
+ sdev = to_scsi_device(dev);
+
+ device_remove_file(&sdev->sdev_gendev,
+ &scsi_dh_state_attr);
+
+ return 0;
+}
+
+/*
+ * scsi_dh_notifier - notifier chain callback
+ */
+static int scsi_dh_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct device *dev = data;
+ struct scsi_device *sdev;
+ int err = 0;
+ struct scsi_device_handler *devinfo = NULL;
+
+ if (!scsi_is_sdev_device(dev))
+ return 0;
+
+ sdev = to_scsi_device(dev);
+
+ if (action == BUS_NOTIFY_ADD_DEVICE) {
+ devinfo = device_handler_match(NULL, sdev);
+ if (!devinfo)
+ goto out;
+
+ err = scsi_dh_handler_attach(sdev, devinfo);
+ if (!err)
+ err = device_create_file(dev, &scsi_dh_state_attr);
+ } else if (action == BUS_NOTIFY_DEL_DEVICE) {
+ device_remove_file(dev, &scsi_dh_state_attr);
+ scsi_dh_handler_detach(sdev, NULL);
+ }
+out:
+ return err;
+}
+
+/*
+ * scsi_dh_notifier_add - Callback for scsi_register_device_handler
+ */
static int scsi_dh_notifier_add(struct device *dev, void *data)
{
struct scsi_device_handler *scsi_dh = data;
+ struct scsi_device *sdev;
+
+ if (!scsi_is_sdev_device(dev))
+ return 0;
+
+ if (!get_device(dev))
+ return 0;
+
+ sdev = to_scsi_device(dev);
+
+ if (device_handler_match(scsi_dh, sdev))
+ scsi_dh_handler_attach(sdev, scsi_dh);
+
+ put_device(dev);
+
+ return 0;
+}
+
+/*
+ * scsi_dh_notifier_remove - Callback for scsi_unregister_device_handler
+ */
+static int scsi_dh_notifier_remove(struct device *dev, void *data)
+{
+ struct scsi_device_handler *scsi_dh = data;
+ struct scsi_device *sdev;
+
+ if (!scsi_is_sdev_device(dev))
+ return 0;
+
+ if (!get_device(dev))
+ return 0;
+
+ sdev = to_scsi_device(dev);
+
+ scsi_dh_handler_detach(sdev, scsi_dh);
+
+ put_device(dev);
- scsi_dh->nb.notifier_call(&scsi_dh->nb, BUS_NOTIFY_ADD_DEVICE, dev);
return 0;
}
@@ -59,33 +363,19 @@ static int scsi_dh_notifier_add(struct device *dev, void *data)
*/
int scsi_register_device_handler(struct scsi_device_handler *scsi_dh)
{
- int ret = -EBUSY;
- struct scsi_device_handler *tmp;
+ if (get_device_handler(scsi_dh->name))
+ return -EBUSY;
- tmp = get_device_handler(scsi_dh->name);
- if (tmp)
- goto done;
-
- ret = bus_register_notifier(&scsi_bus_type, &scsi_dh->nb);
-
- bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh, scsi_dh_notifier_add);
spin_lock(&list_lock);
list_add(&scsi_dh->list, &scsi_dh_list);
spin_unlock(&list_lock);
+ bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh, scsi_dh_notifier_add);
+ printk(KERN_INFO "%s: device handler registered\n", scsi_dh->name);
-done:
- return ret;
+ return SCSI_DH_OK;
}
EXPORT_SYMBOL_GPL(scsi_register_device_handler);
-static int scsi_dh_notifier_remove(struct device *dev, void *data)
-{
- struct scsi_device_handler *scsi_dh = data;
-
- scsi_dh->nb.notifier_call(&scsi_dh->nb, BUS_NOTIFY_DEL_DEVICE, dev);
- return 0;
-}
-
/*
* scsi_unregister_device_handler - register a device handler personality
* module.
@@ -95,23 +385,26 @@ static int scsi_dh_notifier_remove(struct device *dev, void *data)
*/
int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh)
{
- int ret = -ENODEV;
- struct scsi_device_handler *tmp;
-
- tmp = get_device_handler(scsi_dh->name);
- if (!tmp)
- goto done;
+ struct scsi_dh_devinfo_list *tmp, *pos;
- ret = bus_unregister_notifier(&scsi_bus_type, &scsi_dh->nb);
+ if (!get_device_handler(scsi_dh->name))
+ return -ENODEV;
bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh,
- scsi_dh_notifier_remove);
+ scsi_dh_notifier_remove);
+
spin_lock(&list_lock);
list_del(&scsi_dh->list);
+ list_for_each_entry_safe(pos, tmp, &scsi_dh_dev_list, node) {
+ if (pos->handler == scsi_dh) {
+ list_del(&pos->node);
+ kfree(pos);
+ }
+ }
spin_unlock(&list_lock);
+ printk(KERN_INFO "%s: device handler unregistered\n", scsi_dh->name);
-done:
- return ret;
+ return SCSI_DH_OK;
}
EXPORT_SYMBOL_GPL(scsi_unregister_device_handler);
@@ -157,6 +450,97 @@ int scsi_dh_handler_exist(const char *name)
}
EXPORT_SYMBOL_GPL(scsi_dh_handler_exist);
+/*
+ * scsi_dh_handler_attach - Attach device handler
+ * @sdev - sdev the handler should be attached to
+ * @name - name of the handler to attach
+ */
+int scsi_dh_attach(struct request_queue *q, const char *name)
+{
+ unsigned long flags;
+ struct scsi_device *sdev;
+ struct scsi_device_handler *scsi_dh;
+ int err = 0;
+
+ scsi_dh = get_device_handler(name);
+ if (!scsi_dh)
+ return -EINVAL;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ sdev = q->queuedata;
+ if (!sdev || !get_device(&sdev->sdev_gendev))
+ err = -ENODEV;
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ if (!err) {
+ err = scsi_dh_handler_attach(sdev, scsi_dh);
+
+ put_device(&sdev->sdev_gendev);
+ }
+ return err;
+}
+EXPORT_SYMBOL_GPL(scsi_dh_attach);
+
+/*
+ * scsi_dh_handler_detach - Detach device handler
+ * @sdev - sdev the handler should be detached from
+ *
+ * This function will detach the device handler only
+ * if the sdev is not part of the internal list, ie
+ * if it has been attached manually.
+ */
+void scsi_dh_detach(struct request_queue *q)
+{
+ unsigned long flags;
+ struct scsi_device *sdev;
+ struct scsi_device_handler *scsi_dh = NULL;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ sdev = q->queuedata;
+ if (!sdev || !get_device(&sdev->sdev_gendev))
+ sdev = NULL;
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ if (!sdev)
+ return;
+
+ if (sdev->scsi_dh_data) {
+ /* if sdev is not on internal list, detach */
+ scsi_dh = sdev->scsi_dh_data->scsi_dh;
+ if (!device_handler_match(scsi_dh, sdev))
+ scsi_dh_handler_detach(sdev, scsi_dh);
+ }
+ put_device(&sdev->sdev_gendev);
+}
+EXPORT_SYMBOL_GPL(scsi_dh_detach);
+
+static struct notifier_block scsi_dh_nb = {
+ .notifier_call = scsi_dh_notifier
+};
+
+static int __init scsi_dh_init(void)
+{
+ int r;
+
+ r = bus_register_notifier(&scsi_bus_type, &scsi_dh_nb);
+
+ if (!r)
+ bus_for_each_dev(&scsi_bus_type, NULL, NULL,
+ scsi_dh_sysfs_attr_add);
+
+ return r;
+}
+
+static void __exit scsi_dh_exit(void)
+{
+ bus_for_each_dev(&scsi_bus_type, NULL, NULL,
+ scsi_dh_sysfs_attr_remove);
+ bus_unregister_notifier(&scsi_bus_type, &scsi_dh_nb);
+}
+
+module_init(scsi_dh_init);
+module_exit(scsi_dh_exit);
+
MODULE_DESCRIPTION("SCSI device handler");
MODULE_AUTHOR("Chandra Seetharaman <sekharan@us.ibm.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
new file mode 100644
index 000000000000..fcdd73f25625
--- /dev/null
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -0,0 +1,802 @@
+/*
+ * Generic SCSI-3 ALUA SCSI Device Handler
+ *
+ * Copyright (C) 2007, 2008 Hannes Reinecke, SUSE Linux Products GmbH.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ */
+#include <scsi/scsi.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_dh.h>
+
+#define ALUA_DH_NAME "alua"
+#define ALUA_DH_VER "1.2"
+
+#define TPGS_STATE_OPTIMIZED 0x0
+#define TPGS_STATE_NONOPTIMIZED 0x1
+#define TPGS_STATE_STANDBY 0x2
+#define TPGS_STATE_UNAVAILABLE 0x3
+#define TPGS_STATE_OFFLINE 0xe
+#define TPGS_STATE_TRANSITIONING 0xf
+
+#define TPGS_SUPPORT_NONE 0x00
+#define TPGS_SUPPORT_OPTIMIZED 0x01
+#define TPGS_SUPPORT_NONOPTIMIZED 0x02
+#define TPGS_SUPPORT_STANDBY 0x04
+#define TPGS_SUPPORT_UNAVAILABLE 0x08
+#define TPGS_SUPPORT_OFFLINE 0x40
+#define TPGS_SUPPORT_TRANSITION 0x80
+
+#define TPGS_MODE_UNINITIALIZED -1
+#define TPGS_MODE_NONE 0x0
+#define TPGS_MODE_IMPLICIT 0x1
+#define TPGS_MODE_EXPLICIT 0x2
+
+#define ALUA_INQUIRY_SIZE 36
+#define ALUA_FAILOVER_TIMEOUT (60 * HZ)
+#define ALUA_FAILOVER_RETRIES 5
+
+struct alua_dh_data {
+ int group_id;
+ int rel_port;
+ int tpgs;
+ int state;
+ unsigned char inq[ALUA_INQUIRY_SIZE];
+ unsigned char *buff;
+ int bufflen;
+ unsigned char sense[SCSI_SENSE_BUFFERSIZE];
+ int senselen;
+};
+
+#define ALUA_POLICY_SWITCH_CURRENT 0
+#define ALUA_POLICY_SWITCH_ALL 1
+
+static inline struct alua_dh_data *get_alua_data(struct scsi_device *sdev)
+{
+ struct scsi_dh_data *scsi_dh_data = sdev->scsi_dh_data;
+ BUG_ON(scsi_dh_data == NULL);
+ return ((struct alua_dh_data *) scsi_dh_data->buf);
+}
+
+static int realloc_buffer(struct alua_dh_data *h, unsigned len)
+{
+ if (h->buff && h->buff != h->inq)
+ kfree(h->buff);
+
+ h->buff = kmalloc(len, GFP_NOIO);
+ if (!h->buff) {
+ h->buff = h->inq;
+ h->bufflen = ALUA_INQUIRY_SIZE;
+ return 1;
+ }
+ h->bufflen = len;
+ return 0;
+}
+
+static struct request *get_alua_req(struct scsi_device *sdev,
+ void *buffer, unsigned buflen, int rw)
+{
+ struct request *rq;
+ struct request_queue *q = sdev->request_queue;
+
+ rq = blk_get_request(q, rw, GFP_NOIO);
+
+ if (!rq) {
+ sdev_printk(KERN_INFO, sdev,
+ "%s: blk_get_request failed\n", __func__);
+ return NULL;
+ }
+
+ if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_NOIO)) {
+ blk_put_request(rq);
+ sdev_printk(KERN_INFO, sdev,
+ "%s: blk_rq_map_kern failed\n", __func__);
+ return NULL;
+ }
+
+ rq->cmd_type = REQ_TYPE_BLOCK_PC;
+ rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE;
+ rq->retries = ALUA_FAILOVER_RETRIES;
+ rq->timeout = ALUA_FAILOVER_TIMEOUT;
+
+ return rq;
+}
+
+/*
+ * submit_std_inquiry - Issue a standard INQUIRY command
+ * @sdev: sdev the command should be send to
+ */
+static int submit_std_inquiry(struct scsi_device *sdev, struct alua_dh_data *h)
+{
+ struct request *rq;
+ int err = SCSI_DH_RES_TEMP_UNAVAIL;
+
+ rq = get_alua_req(sdev, h->inq, ALUA_INQUIRY_SIZE, READ);
+ if (!rq)
+ goto done;
+
+ /* Prepare the command. */
+ rq->cmd[0] = INQUIRY;
+ rq->cmd[1] = 0;
+ rq->cmd[2] = 0;
+ rq->cmd[4] = ALUA_INQUIRY_SIZE;
+ rq->cmd_len = COMMAND_SIZE(INQUIRY);
+
+ rq->sense = h->sense;
+ memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
+ rq->sense_len = h->senselen = 0;
+
+ err = blk_execute_rq(rq->q, NULL, rq, 1);
+ if (err == -EIO) {
+ sdev_printk(KERN_INFO, sdev,
+ "%s: std inquiry failed with %x\n",
+ ALUA_DH_NAME, rq->errors);
+ h->senselen = rq->sense_len;
+ err = SCSI_DH_IO;
+ }
+ blk_put_request(rq);
+done:
+ return err;
+}
+
+/*
+ * submit_vpd_inquiry - Issue an INQUIRY VPD page 0x83 command
+ * @sdev: sdev the command should be sent to
+ */
+static int submit_vpd_inquiry(struct scsi_device *sdev, struct alua_dh_data *h)
+{
+ struct request *rq;
+ int err = SCSI_DH_RES_TEMP_UNAVAIL;
+
+ rq = get_alua_req(sdev, h->buff, h->bufflen, READ);
+ if (!rq)
+ goto done;
+
+ /* Prepare the command. */
+ rq->cmd[0] = INQUIRY;
+ rq->cmd[1] = 1;
+ rq->cmd[2] = 0x83;
+ rq->cmd[4] = h->bufflen;
+ rq->cmd_len = COMMAND_SIZE(INQUIRY);
+
+ rq->sense = h->sense;
+ memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
+ rq->sense_len = h->senselen = 0;
+
+ err = blk_execute_rq(rq->q, NULL, rq, 1);
+ if (err == -EIO) {
+ sdev_printk(KERN_INFO, sdev,
+ "%s: evpd inquiry failed with %x\n",
+ ALUA_DH_NAME, rq->errors);
+ h->senselen = rq->sense_len;
+ err = SCSI_DH_IO;
+ }
+ blk_put_request(rq);
+done:
+ return err;
+}
+
+/*
+ * submit_rtpg - Issue a REPORT TARGET GROUP STATES command
+ * @sdev: sdev the command should be sent to
+ */
+static unsigned submit_rtpg(struct scsi_device *sdev, struct alua_dh_data *h)
+{
+ struct request *rq;
+ int err = SCSI_DH_RES_TEMP_UNAVAIL;
+
+ rq = get_alua_req(sdev, h->buff, h->bufflen, READ);
+ if (!rq)
+ goto done;
+
+ /* Prepare the command. */
+ rq->cmd[0] = MAINTENANCE_IN;
+ rq->cmd[1] = MI_REPORT_TARGET_PGS;
+ rq->cmd[6] = (h->bufflen >> 24) & 0xff;
+ rq->cmd[7] = (h->bufflen >> 16) & 0xff;
+ rq->cmd[8] = (h->bufflen >> 8) & 0xff;
+ rq->cmd[9] = h->bufflen & 0xff;
+ rq->cmd_len = COMMAND_SIZE(MAINTENANCE_IN);
+
+ rq->sense = h->sense;
+ memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
+ rq->sense_len = h->senselen = 0;
+
+ err = blk_execute_rq(rq->q, NULL, rq, 1);
+ if (err == -EIO) {
+ sdev_printk(KERN_INFO, sdev,
+ "%s: rtpg failed with %x\n",
+ ALUA_DH_NAME, rq->errors);
+ h->senselen = rq->sense_len;
+ err = SCSI_DH_IO;
+ }
+ blk_put_request(rq);
+done:
+ return err;
+}
+
+/*
+ * submit_stpg - Issue a SET TARGET GROUP STATES command
+ * @sdev: sdev the command should be sent to
+ *
+ * Currently we're only setting the current target port group state
+ * to 'active/optimized' and let the array firmware figure out
+ * the states of the remaining groups.
+ */
+static unsigned submit_stpg(struct scsi_device *sdev, struct alua_dh_data *h)
+{
+ struct request *rq;
+ int err = SCSI_DH_RES_TEMP_UNAVAIL;
+ int stpg_len = 8;
+
+ /* Prepare the data buffer */
+ memset(h->buff, 0, stpg_len);
+ h->buff[4] = TPGS_STATE_OPTIMIZED & 0x0f;
+ h->buff[6] = (h->group_id >> 8) & 0x0f;
+ h->buff[7] = h->group_id & 0x0f;
+
+ rq = get_alua_req(sdev, h->buff, stpg_len, WRITE);
+ if (!rq)
+ goto done;
+
+ /* Prepare the command. */
+ rq->cmd[0] = MAINTENANCE_OUT;
+ rq->cmd[1] = MO_SET_TARGET_PGS;
+ rq->cmd[6] = (stpg_len >> 24) & 0xff;
+ rq->cmd[7] = (stpg_len >> 16) & 0xff;
+ rq->cmd[8] = (stpg_len >> 8) & 0xff;
+ rq->cmd[9] = stpg_len & 0xff;
+ rq->cmd_len = COMMAND_SIZE(MAINTENANCE_OUT);
+
+ rq->sense = h->sense;
+ memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
+ rq->sense_len = h->senselen = 0;
+
+ err = blk_execute_rq(rq->q, NULL, rq, 1);
+ if (err == -EIO) {
+ sdev_printk(KERN_INFO, sdev,
+ "%s: stpg failed with %x\n",
+ ALUA_DH_NAME, rq->errors);
+ h->senselen = rq->sense_len;
+ err = SCSI_DH_IO;
+ }
+ blk_put_request(rq);
+done:
+ return err;
+}
+
+/*
+ * alua_std_inquiry - Evaluate standard INQUIRY command
+ * @sdev: device to be checked
+ *
+ * Just extract the TPGS setting to find out if ALUA
+ * is supported.
+ */
+static int alua_std_inquiry(struct scsi_device *sdev, struct alua_dh_data *h)
+{
+ int err;
+
+ err = submit_std_inquiry(sdev, h);
+
+ if (err != SCSI_DH_OK)
+ return err;
+
+ /* Check TPGS setting */
+ h->tpgs = (h->inq[5] >> 4) & 0x3;
+ switch (h->tpgs) {
+ case TPGS_MODE_EXPLICIT|TPGS_MODE_IMPLICIT:
+ sdev_printk(KERN_INFO, sdev,
+ "%s: supports implicit and explicit TPGS\n",
+ ALUA_DH_NAME);
+ break;
+ case TPGS_MODE_EXPLICIT:
+ sdev_printk(KERN_INFO, sdev, "%s: supports explicit TPGS\n",
+ ALUA_DH_NAME);
+ break;
+ case TPGS_MODE_IMPLICIT:
+ sdev_printk(KERN_INFO, sdev, "%s: supports implicit TPGS\n",
+ ALUA_DH_NAME);
+ break;
+ default:
+ h->tpgs = TPGS_MODE_NONE;
+ sdev_printk(KERN_INFO, sdev, "%s: not supported\n",
+ ALUA_DH_NAME);
+ err = SCSI_DH_DEV_UNSUPP;
+ break;
+ }
+
+ return err;
+}
+
+/*
+ * alua_vpd_inquiry - Evaluate INQUIRY vpd page 0x83
+ * @sdev: device to be checked
+ *
+ * Extract the relative target port and the target port group
+ * descriptor from the list of identificators.
+ */
+static int alua_vpd_inquiry(struct scsi_device *sdev, struct alua_dh_data *h)
+{
+ int len;
+ unsigned err;
+ unsigned char *d;
+
+ retry:
+ err = submit_vpd_inquiry(sdev, h);
+
+ if (err != SCSI_DH_OK)
+ return err;
+
+ /* Check if vpd page exceeds initial buffer */
+ len = (h->buff[2] << 8) + h->buff[3] + 4;
+ if (len > h->bufflen) {
+ /* Resubmit with the correct length */
+ if (realloc_buffer(h, len)) {
+ sdev_printk(KERN_WARNING, sdev,
+ "%s: kmalloc buffer failed\n",
+ ALUA_DH_NAME);
+ /* Temporary failure, bypass */
+ return SCSI_DH_DEV_TEMP_BUSY;
+ }
+ goto retry;
+ }
+
+ /*
+ * Now look for the correct descriptor.
+ */
+ d = h->buff + 4;
+ while (d < h->buff + len) {
+ switch (d[1] & 0xf) {
+ case 0x4:
+ /* Relative target port */
+ h->rel_port = (d[6] << 8) + d[7];
+ break;
+ case 0x5:
+ /* Target port group */
+ h->group_id = (d[6] << 8) + d[7];
+ break;
+ default:
+ break;
+ }
+ d += d[3] + 4;
+ }
+
+ if (h->group_id == -1) {
+ /*
+ * Internal error; TPGS supported but required
+ * VPD identification descriptors not present.
+ * Disable ALUA support
+ */
+ sdev_printk(KERN_INFO, sdev,
+ "%s: No target port descriptors found\n",
+ ALUA_DH_NAME);
+ h->state = TPGS_STATE_OPTIMIZED;
+ h->tpgs = TPGS_MODE_NONE;
+ err = SCSI_DH_DEV_UNSUPP;
+ } else {
+ sdev_printk(KERN_INFO, sdev,
+ "%s: port group %02x rel port %02x\n",
+ ALUA_DH_NAME, h->group_id, h->rel_port);
+ }
+
+ return err;
+}
+
+static char print_alua_state(int state)
+{
+ switch (state) {
+ case TPGS_STATE_OPTIMIZED:
+ return 'A';
+ case TPGS_STATE_NONOPTIMIZED:
+ return 'N';
+ case TPGS_STATE_STANDBY:
+ return 'S';
+ case TPGS_STATE_UNAVAILABLE:
+ return 'U';
+ case TPGS_STATE_OFFLINE:
+ return 'O';
+ case TPGS_STATE_TRANSITIONING:
+ return 'T';
+ default:
+ return 'X';
+ }
+}
+
+static int alua_check_sense(struct scsi_device *sdev,
+ struct scsi_sense_hdr *sense_hdr)
+{
+ switch (sense_hdr->sense_key) {
+ case NOT_READY:
+ if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x0a)
+ /*
+ * LUN Not Accessible - ALUA state transition
+ */
+ return NEEDS_RETRY;
+ if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x0b)
+ /*
+ * LUN Not Accessible -- Target port in standby state
+ */
+ return SUCCESS;
+ if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x0c)
+ /*
+ * LUN Not Accessible -- Target port in unavailable state
+ */
+ return SUCCESS;
+ if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x12)
+ /*
+ * LUN Not Ready -- Offline
+ */
+ return SUCCESS;
+ break;
+ case UNIT_ATTENTION:
+ if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00)
+ /*
+ * Power On, Reset, or Bus Device Reset, just retry.
+ */
+ return NEEDS_RETRY;
+ if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x06) {
+ /*
+ * ALUA state changed
+ */
+ return NEEDS_RETRY;
+ }
+ if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x07) {
+ /*
+ * Implicit ALUA state transition failed
+ */
+ return NEEDS_RETRY;
+ }
+ break;
+ }
+
+ return SCSI_RETURN_NOT_HANDLED;
+}
+
+/*
+ * alua_stpg - Evaluate SET TARGET GROUP STATES
+ * @sdev: the device to be evaluated
+ * @state: the new target group state
+ *
+ * Send a SET TARGET GROUP STATES command to the device.
+ * We only have to test here if we should resubmit the command;
+ * any other error is assumed as a failure.
+ */
+static int alua_stpg(struct scsi_device *sdev, int state,
+ struct alua_dh_data *h)
+{
+ struct scsi_sense_hdr sense_hdr;
+ unsigned err;
+ int retry = ALUA_FAILOVER_RETRIES;
+
+ retry:
+ err = submit_stpg(sdev, h);
+ if (err == SCSI_DH_IO && h->senselen > 0) {
+ err = scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE,
+ &sense_hdr);
+ if (!err)
+ return SCSI_DH_IO;
+ err = alua_check_sense(sdev, &sense_hdr);
+ if (retry > 0 && err == NEEDS_RETRY) {
+ retry--;
+ goto retry;
+ }
+ sdev_printk(KERN_INFO, sdev,
+ "%s: stpg sense code: %02x/%02x/%02x\n",
+ ALUA_DH_NAME, sense_hdr.sense_key,
+ sense_hdr.asc, sense_hdr.ascq);
+ err = SCSI_DH_IO;
+ }
+ if (err == SCSI_DH_OK) {
+ h->state = state;
+ sdev_printk(KERN_INFO, sdev,
+ "%s: port group %02x switched to state %c\n",
+ ALUA_DH_NAME, h->group_id,
+ print_alua_state(h->state) );
+ }
+ return err;
+}
+
+/*
+ * alua_rtpg - Evaluate REPORT TARGET GROUP STATES
+ * @sdev: the device to be evaluated.
+ *
+ * Evaluate the Target Port Group State.
+ * Returns SCSI_DH_DEV_OFFLINED if the path is
+ * found to be unuseable.
+ */
+static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h)
+{
+ struct scsi_sense_hdr sense_hdr;
+ int len, k, off, valid_states = 0;
+ char *ucp;
+ unsigned err;
+
+ retry:
+ err = submit_rtpg(sdev, h);
+
+ if (err == SCSI_DH_IO && h->senselen > 0) {
+ err = scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE,
+ &sense_hdr);
+ if (!err)
+ return SCSI_DH_IO;
+
+ err = alua_check_sense(sdev, &sense_hdr);
+ if (err == NEEDS_RETRY)
+ goto retry;
+ sdev_printk(KERN_INFO, sdev,
+ "%s: rtpg sense code %02x/%02x/%02x\n",
+ ALUA_DH_NAME, sense_hdr.sense_key,
+ sense_hdr.asc, sense_hdr.ascq);
+ err = SCSI_DH_IO;
+ }
+ if (err != SCSI_DH_OK)
+ return err;
+
+ len = (h->buff[0] << 24) + (h->buff[1] << 16) +
+ (h->buff[2] << 8) + h->buff[3] + 4;
+
+ if (len > h->bufflen) {
+ /* Resubmit with the correct length */
+ if (realloc_buffer(h, len)) {
+ sdev_printk(KERN_WARNING, sdev,
+ "%s: kmalloc buffer failed\n",__func__);
+ /* Temporary failure, bypass */
+ return SCSI_DH_DEV_TEMP_BUSY;
+ }
+ goto retry;
+ }
+
+ for (k = 4, ucp = h->buff + 4; k < len; k += off, ucp += off) {
+ if (h->group_id == (ucp[2] << 8) + ucp[3]) {
+ h->state = ucp[0] & 0x0f;
+ valid_states = ucp[1];
+ }
+ off = 8 + (ucp[7] * 4);
+ }
+
+ sdev_printk(KERN_INFO, sdev,
+ "%s: port group %02x state %c supports %c%c%c%c%c%c\n",
+ ALUA_DH_NAME, h->group_id, print_alua_state(h->state),
+ valid_states&TPGS_SUPPORT_TRANSITION?'T':'t',
+ valid_states&TPGS_SUPPORT_OFFLINE?'O':'o',
+ valid_states&TPGS_SUPPORT_UNAVAILABLE?'U':'u',
+ valid_states&TPGS_SUPPORT_STANDBY?'S':'s',
+ valid_states&TPGS_SUPPORT_NONOPTIMIZED?'N':'n',
+ valid_states&TPGS_SUPPORT_OPTIMIZED?'A':'a');
+
+ if (h->tpgs & TPGS_MODE_EXPLICIT) {
+ switch (h->state) {
+ case TPGS_STATE_TRANSITIONING:
+ /* State transition, retry */
+ goto retry;
+ break;
+ case TPGS_STATE_OFFLINE:
+ /* Path is offline, fail */
+ err = SCSI_DH_DEV_OFFLINED;
+ break;
+ default:
+ break;
+ }
+ } else {
+ /* Only Implicit ALUA support */
+ if (h->state == TPGS_STATE_OPTIMIZED ||
+ h->state == TPGS_STATE_NONOPTIMIZED ||
+ h->state == TPGS_STATE_STANDBY)
+ /* Useable path if active */
+ err = SCSI_DH_OK;
+ else
+ /* Path unuseable for unavailable/offline */
+ err = SCSI_DH_DEV_OFFLINED;
+ }
+ return err;
+}
+
+/*
+ * alua_initialize - Initialize ALUA state
+ * @sdev: the device to be initialized
+ *
+ * For the prep_fn to work correctly we have
+ * to initialize the ALUA state for the device.
+ */
+static int alua_initialize(struct scsi_device *sdev, struct alua_dh_data *h)
+{
+ int err;
+
+ err = alua_std_inquiry(sdev, h);
+ if (err != SCSI_DH_OK)
+ goto out;
+
+ err = alua_vpd_inquiry(sdev, h);
+ if (err != SCSI_DH_OK)
+ goto out;
+
+ err = alua_rtpg(sdev, h);
+ if (err != SCSI_DH_OK)
+ goto out;
+
+out:
+ return err;
+}
+
+/*
+ * alua_activate - activate a path
+ * @sdev: device on the path to be activated
+ *
+ * We're currently switching the port group to be activated only and
+ * let the array figure out the rest.
+ * There may be other arrays which require us to switch all port groups
+ * based on a certain policy. But until we actually encounter them it
+ * should be okay.
+ */
+static int alua_activate(struct scsi_device *sdev)
+{
+ struct alua_dh_data *h = get_alua_data(sdev);
+ int err = SCSI_DH_OK;
+
+ if (h->group_id != -1) {
+ err = alua_rtpg(sdev, h);
+ if (err != SCSI_DH_OK)
+ goto out;
+ }
+
+ if (h->tpgs == TPGS_MODE_EXPLICIT && h->state != TPGS_STATE_OPTIMIZED)
+ err = alua_stpg(sdev, TPGS_STATE_OPTIMIZED, h);
+
+out:
+ return err;
+}
+
+/*
+ * alua_prep_fn - request callback
+ *
+ * Fail I/O to all paths not in state
+ * active/optimized or active/non-optimized.
+ */
+static int alua_prep_fn(struct scsi_device *sdev, struct request *req)
+{
+ struct alua_dh_data *h = get_alua_data(sdev);
+ int ret = BLKPREP_OK;
+
+ if (h->state != TPGS_STATE_OPTIMIZED &&
+ h->state != TPGS_STATE_NONOPTIMIZED) {
+ ret = BLKPREP_KILL;
+ req->cmd_flags |= REQ_QUIET;
+ }
+ return ret;
+
+}
+
+const struct scsi_dh_devlist alua_dev_list[] = {
+ {"HP", "MSA VOLUME" },
+ {"HP", "HSV101" },
+ {"HP", "HSV111" },
+ {"HP", "HSV200" },
+ {"HP", "HSV210" },
+ {"HP", "HSV300" },
+ {"IBM", "2107900" },
+ {"IBM", "2145" },
+ {"Pillar", "Axiom" },
+ {NULL, NULL}
+};
+
+static int alua_bus_attach(struct scsi_device *sdev);
+static void alua_bus_detach(struct scsi_device *sdev);
+
+static struct scsi_device_handler alua_dh = {
+ .name = ALUA_DH_NAME,
+ .module = THIS_MODULE,
+ .devlist = alua_dev_list,
+ .attach = alua_bus_attach,
+ .detach = alua_bus_detach,
+ .prep_fn = alua_prep_fn,
+ .check_sense = alua_check_sense,
+ .activate = alua_activate,
+};
+
+/*
+ * alua_bus_attach - Attach device handler
+ * @sdev: device to be attached to
+ */
+static int alua_bus_attach(struct scsi_device *sdev)
+{
+ struct scsi_dh_data *scsi_dh_data;
+ struct alua_dh_data *h;
+ unsigned long flags;
+ int err = SCSI_DH_OK;
+
+ scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
+ + sizeof(*h) , GFP_KERNEL);
+ if (!scsi_dh_data) {
+ sdev_printk(KERN_ERR, sdev, "%s: Attach failed\n",
+ ALUA_DH_NAME);
+ return -ENOMEM;
+ }
+
+ scsi_dh_data->scsi_dh = &alua_dh;
+ h = (struct alua_dh_data *) scsi_dh_data->buf;
+ h->tpgs = TPGS_MODE_UNINITIALIZED;
+ h->state = TPGS_STATE_OPTIMIZED;
+ h->group_id = -1;
+ h->rel_port = -1;
+ h->buff = h->inq;
+ h->bufflen = ALUA_INQUIRY_SIZE;
+
+ err = alua_initialize(sdev, h);
+ if (err != SCSI_DH_OK)
+ goto failed;
+
+ if (!try_module_get(THIS_MODULE))
+ goto failed;
+
+ spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
+ sdev->scsi_dh_data = scsi_dh_data;
+ spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
+
+ return 0;
+
+failed:
+ kfree(scsi_dh_data);
+ sdev_printk(KERN_ERR, sdev, "%s: not attached\n", ALUA_DH_NAME);
+ return -EINVAL;
+}
+
+/*
+ * alua_bus_detach - Detach device handler
+ * @sdev: device to be detached from
+ */
+static void alua_bus_detach(struct scsi_device *sdev)
+{
+ struct scsi_dh_data *scsi_dh_data;
+ struct alua_dh_data *h;
+ unsigned long flags;
+
+ spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
+ scsi_dh_data = sdev->scsi_dh_data;
+ sdev->scsi_dh_data = NULL;
+ spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
+
+ h = (struct alua_dh_data *) scsi_dh_data->buf;
+ if (h->buff && h->inq != h->buff)
+ kfree(h->buff);
+ kfree(scsi_dh_data);
+ module_put(THIS_MODULE);
+ sdev_printk(KERN_NOTICE, sdev, "%s: Detached\n", ALUA_DH_NAME);
+}
+
+static int __init alua_init(void)
+{
+ int r;
+
+ r = scsi_register_device_handler(&alua_dh);
+ if (r != 0)
+ printk(KERN_ERR "%s: Failed to register scsi device handler",
+ ALUA_DH_NAME);
+ return r;
+}
+
+static void __exit alua_exit(void)
+{
+ scsi_unregister_device_handler(&alua_dh);
+}
+
+module_init(alua_init);
+module_exit(alua_exit);
+
+MODULE_DESCRIPTION("DM Multipath ALUA support");
+MODULE_AUTHOR("Hannes Reinecke <hare@suse.de>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(ALUA_DH_VER);
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
index f2467e936e55..aa46b131b20e 100644
--- a/drivers/scsi/device_handler/scsi_dh_emc.c
+++ b/drivers/scsi/device_handler/scsi_dh_emc.c
@@ -25,28 +25,31 @@
#include <scsi/scsi_dh.h>
#include <scsi/scsi_device.h>
-#define CLARIION_NAME "emc_clariion"
+#define CLARIION_NAME "emc"
#define CLARIION_TRESPASS_PAGE 0x22
-#define CLARIION_BUFFER_SIZE 0x80
+#define CLARIION_BUFFER_SIZE 0xFC
#define CLARIION_TIMEOUT (60 * HZ)
#define CLARIION_RETRIES 3
#define CLARIION_UNBOUND_LU -1
+#define CLARIION_SP_A 0
+#define CLARIION_SP_B 1
-static unsigned char long_trespass[] = {
- 0, 0, 0, 0,
- CLARIION_TRESPASS_PAGE, /* Page code */
- 0x09, /* Page length - 2 */
- 0x81, /* Trespass code + Honor reservation bit */
- 0xff, 0xff, /* Trespass target */
- 0, 0, 0, 0, 0, 0 /* Reserved bytes / unknown */
-};
+/* Flags */
+#define CLARIION_SHORT_TRESPASS 1
+#define CLARIION_HONOR_RESERVATIONS 2
-static unsigned char long_trespass_hr[] = {
- 0, 0, 0, 0,
+/* LUN states */
+#define CLARIION_LUN_UNINITIALIZED -1
+#define CLARIION_LUN_UNBOUND 0
+#define CLARIION_LUN_BOUND 1
+#define CLARIION_LUN_OWNED 2
+
+static unsigned char long_trespass[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
CLARIION_TRESPASS_PAGE, /* Page code */
0x09, /* Page length - 2 */
- 0x01, /* Trespass code + Honor reservation bit */
+ 0x01, /* Trespass code */
0xff, 0xff, /* Trespass target */
0, 0, 0, 0, 0, 0 /* Reserved bytes / unknown */
};
@@ -55,39 +58,56 @@ static unsigned char short_trespass[] = {
0, 0, 0, 0,
CLARIION_TRESPASS_PAGE, /* Page code */
0x02, /* Page length - 2 */
- 0x81, /* Trespass code + Honor reservation bit */
+ 0x01, /* Trespass code */
0xff, /* Trespass target */
};
-static unsigned char short_trespass_hr[] = {
- 0, 0, 0, 0,
- CLARIION_TRESPASS_PAGE, /* Page code */
- 0x02, /* Page length - 2 */
- 0x01, /* Trespass code + Honor reservation bit */
- 0xff, /* Trespass target */
+static const char * lun_state[] =
+{
+ "not bound",
+ "bound",
+ "owned",
};
struct clariion_dh_data {
/*
+ * Flags:
+ * CLARIION_SHORT_TRESPASS
* Use short trespass command (FC-series) or the long version
* (default for AX/CX CLARiiON arrays).
- */
- unsigned short_trespass;
- /*
+ *
+ * CLARIION_HONOR_RESERVATIONS
* Whether or not (default) to honor SCSI reservations when
* initiating a switch-over.
*/
- unsigned hr;
- /* I/O buffer for both MODE_SELECT and INQUIRY commands. */
+ unsigned flags;
+ /*
+ * I/O buffer for both MODE_SELECT and INQUIRY commands.
+ */
char buffer[CLARIION_BUFFER_SIZE];
/*
* SCSI sense buffer for commands -- assumes serial issuance
* and completion sequence of all commands for same multipath.
*/
unsigned char sense[SCSI_SENSE_BUFFERSIZE];
- /* which SP (A=0,B=1,UNBOUND=-1) is dflt SP for path's mapped dev */
+ unsigned int senselen;
+ /*
+ * LUN state
+ */
+ int lun_state;
+ /*
+ * SP Port number
+ */
+ int port;
+ /*
+ * which SP (A=0,B=1,UNBOUND=-1) is the default SP for this
+ * path's mapped LUN
+ */
int default_sp;
- /* which SP (A=0,B=1,UNBOUND=-1) is active for path's mapped dev */
+ /*
+ * which SP (A=0,B=1,UNBOUND=-1) is the active SP for this
+ * path's mapped LUN
+ */
int current_sp;
};
@@ -102,19 +122,16 @@ static inline struct clariion_dh_data
/*
* Parse MODE_SELECT cmd reply.
*/
-static int trespass_endio(struct scsi_device *sdev, int result)
+static int trespass_endio(struct scsi_device *sdev, char *sense)
{
- int err = SCSI_DH_OK;
+ int err = SCSI_DH_IO;
struct scsi_sense_hdr sshdr;
- struct clariion_dh_data *csdev = get_clariion_data(sdev);
- char *sense = csdev->sense;
- if (status_byte(result) == CHECK_CONDITION &&
- scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr)) {
- sdev_printk(KERN_ERR, sdev, "Found valid sense data 0x%2x, "
+ if (!scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr)) {
+ sdev_printk(KERN_ERR, sdev, "%s: Found valid sense data 0x%2x, "
"0x%2x, 0x%2x while sending CLARiiON trespass "
- "command.\n", sshdr.sense_key, sshdr.asc,
- sshdr.ascq);
+ "command.\n", CLARIION_NAME, sshdr.sense_key,
+ sshdr.asc, sshdr.ascq);
if ((sshdr.sense_key == 0x05) && (sshdr.asc == 0x04) &&
(sshdr.ascq == 0x00)) {
@@ -122,9 +139,9 @@ static int trespass_endio(struct scsi_device *sdev, int result)
* Array based copy in progress -- do not send
* mode_select or copy will be aborted mid-stream.
*/
- sdev_printk(KERN_INFO, sdev, "Array Based Copy in "
+ sdev_printk(KERN_INFO, sdev, "%s: Array Based Copy in "
"progress while sending CLARiiON trespass "
- "command.\n");
+ "command.\n", CLARIION_NAME);
err = SCSI_DH_DEV_TEMP_BUSY;
} else if ((sshdr.sense_key == 0x02) && (sshdr.asc == 0x04) &&
(sshdr.ascq == 0x03)) {
@@ -132,160 +149,153 @@ static int trespass_endio(struct scsi_device *sdev, int result)
* LUN Not Ready - Manual Intervention Required
* indicates in-progress ucode upgrade (NDU).
*/
- sdev_printk(KERN_INFO, sdev, "Detected in-progress "
+ sdev_printk(KERN_INFO, sdev, "%s: Detected in-progress "
"ucode upgrade NDU operation while sending "
- "CLARiiON trespass command.\n");
+ "CLARiiON trespass command.\n", CLARIION_NAME);
err = SCSI_DH_DEV_TEMP_BUSY;
} else
err = SCSI_DH_DEV_FAILED;
- } else if (result) {
- sdev_printk(KERN_ERR, sdev, "Error 0x%x while sending "
- "CLARiiON trespass command.\n", result);
- err = SCSI_DH_IO;
+ } else {
+ sdev_printk(KERN_INFO, sdev,
+ "%s: failed to send MODE SELECT, no sense available\n",
+ CLARIION_NAME);
}
-
return err;
}
-static int parse_sp_info_reply(struct scsi_device *sdev, int result,
- int *default_sp, int *current_sp, int *new_current_sp)
+static int parse_sp_info_reply(struct scsi_device *sdev,
+ struct clariion_dh_data *csdev)
{
int err = SCSI_DH_OK;
- struct clariion_dh_data *csdev = get_clariion_data(sdev);
- if (result == 0) {
- /* check for in-progress ucode upgrade (NDU) */
- if (csdev->buffer[48] != 0) {
- sdev_printk(KERN_NOTICE, sdev, "Detected in-progress "
- "ucode upgrade NDU operation while finding "
- "current active SP.");
- err = SCSI_DH_DEV_TEMP_BUSY;
- } else {
- *default_sp = csdev->buffer[5];
-
- if (csdev->buffer[4] == 2)
- /* SP for path is current */
- *current_sp = csdev->buffer[8];
- else {
- if (csdev->buffer[4] == 1)
- /* SP for this path is NOT current */
- if (csdev->buffer[8] == 0)
- *current_sp = 1;
- else
- *current_sp = 0;
- else
- /* unbound LU or LUNZ */
- *current_sp = CLARIION_UNBOUND_LU;
- }
- *new_current_sp = csdev->buffer[8];
- }
- } else {
- struct scsi_sense_hdr sshdr;
-
- err = SCSI_DH_IO;
-
- if (scsi_normalize_sense(csdev->sense, SCSI_SENSE_BUFFERSIZE,
- &sshdr))
- sdev_printk(KERN_ERR, sdev, "Found valid sense data "
- "0x%2x, 0x%2x, 0x%2x while finding current "
- "active SP.", sshdr.sense_key, sshdr.asc,
- sshdr.ascq);
- else
- sdev_printk(KERN_ERR, sdev, "Error 0x%x finding "
- "current active SP.", result);
+ /* check for in-progress ucode upgrade (NDU) */
+ if (csdev->buffer[48] != 0) {
+ sdev_printk(KERN_NOTICE, sdev, "%s: Detected in-progress "
+ "ucode upgrade NDU operation while finding "
+ "current active SP.", CLARIION_NAME);
+ err = SCSI_DH_DEV_TEMP_BUSY;
+ goto out;
+ }
+ if (csdev->buffer[4] < 0 || csdev->buffer[4] > 2) {
+ /* Invalid buffer format */
+ sdev_printk(KERN_NOTICE, sdev,
+ "%s: invalid VPD page 0xC0 format\n",
+ CLARIION_NAME);
+ err = SCSI_DH_NOSYS;
+ goto out;
+ }
+ switch (csdev->buffer[28] & 0x0f) {
+ case 6:
+ sdev_printk(KERN_NOTICE, sdev,
+ "%s: ALUA failover mode detected\n",
+ CLARIION_NAME);
+ break;
+ case 4:
+ /* Linux failover */
+ break;
+ default:
+ sdev_printk(KERN_WARNING, sdev,
+ "%s: Invalid failover mode %d\n",
+ CLARIION_NAME, csdev->buffer[28] & 0x0f);
+ err = SCSI_DH_NOSYS;
+ goto out;
}
+ csdev->default_sp = csdev->buffer[5];
+ csdev->lun_state = csdev->buffer[4];
+ csdev->current_sp = csdev->buffer[8];
+ csdev->port = csdev->buffer[7];
+
+out:
return err;
}
-static int sp_info_endio(struct scsi_device *sdev, int result,
- int mode_select_sent, int *done)
+#define emc_default_str "FC (Legacy)"
+
+static char * parse_sp_model(struct scsi_device *sdev, unsigned char *buffer)
{
- struct clariion_dh_data *csdev = get_clariion_data(sdev);
- int err_flags, default_sp, current_sp, new_current_sp;
+ unsigned char len = buffer[4] + 5;
+ char *sp_model = NULL;
+ unsigned char sp_len, serial_len;
+
+ if (len < 160) {
+ sdev_printk(KERN_WARNING, sdev,
+ "%s: Invalid information section length %d\n",
+ CLARIION_NAME, len);
+ /* Check for old FC arrays */
+ if (!strncmp(buffer + 8, "DGC", 3)) {
+ /* Old FC array, not supporting extended information */
+ sp_model = emc_default_str;
+ }
+ goto out;
+ }
- err_flags = parse_sp_info_reply(sdev, result, &default_sp,
- &current_sp, &new_current_sp);
+ /*
+ * Parse extended information for SP model number
+ */
+ serial_len = buffer[160];
+ if (serial_len == 0 || serial_len + 161 > len) {
+ sdev_printk(KERN_WARNING, sdev,
+ "%s: Invalid array serial number length %d\n",
+ CLARIION_NAME, serial_len);
+ goto out;
+ }
+ sp_len = buffer[99];
+ if (sp_len == 0 || serial_len + sp_len + 161 > len) {
+ sdev_printk(KERN_WARNING, sdev,
+ "%s: Invalid model number length %d\n",
+ CLARIION_NAME, sp_len);
+ goto out;
+ }
+ sp_model = &buffer[serial_len + 161];
+ /* Strip whitespace at the end */
+ while (sp_len > 1 && sp_model[sp_len - 1] == ' ')
+ sp_len--;
- if (err_flags != SCSI_DH_OK)
- goto done;
+ sp_model[sp_len] = '\0';
- if (mode_select_sent) {
- csdev->default_sp = default_sp;
- csdev->current_sp = current_sp;
- } else {
- /*
- * Issue the actual module_selec request IFF either
- * (1) we do not know the identity of the current SP OR
- * (2) what we think we know is actually correct.
- */
- if ((current_sp != CLARIION_UNBOUND_LU) &&
- (new_current_sp != current_sp)) {
-
- csdev->default_sp = default_sp;
- csdev->current_sp = current_sp;
-
- sdev_printk(KERN_INFO, sdev, "Ignoring path group "
- "switch-over command for CLARiiON SP%s since "
- " mapped device is already initialized.",
- current_sp ? "B" : "A");
- if (done)
- *done = 1; /* as good as doing it */
- }
- }
-done:
- return err_flags;
+out:
+ return sp_model;
}
/*
-* Get block request for REQ_BLOCK_PC command issued to path. Currently
-* limited to MODE_SELECT (trespass) and INQUIRY (VPD page 0xC0) commands.
-*
-* Uses data and sense buffers in hardware handler context structure and
-* assumes serial servicing of commands, both issuance and completion.
-*/
-static struct request *get_req(struct scsi_device *sdev, int cmd)
+ * Get block request for REQ_BLOCK_PC command issued to path. Currently
+ * limited to MODE_SELECT (trespass) and INQUIRY (VPD page 0xC0) commands.
+ *
+ * Uses data and sense buffers in hardware handler context structure and
+ * assumes serial servicing of commands, both issuance and completion.
+ */
+static struct request *get_req(struct scsi_device *sdev, int cmd,
+ unsigned char *buffer)
{
- struct clariion_dh_data *csdev = get_clariion_data(sdev);
struct request *rq;
- unsigned char *page22;
int len = 0;
rq = blk_get_request(sdev->request_queue,
- (cmd == MODE_SELECT) ? WRITE : READ, GFP_ATOMIC);
+ (cmd == MODE_SELECT) ? WRITE : READ, GFP_NOIO);
if (!rq) {
sdev_printk(KERN_INFO, sdev, "get_req: blk_get_request failed");
return NULL;
}
- memset(&rq->cmd, 0, BLK_MAX_CDB);
+ memset(rq->cmd, 0, BLK_MAX_CDB);
+ rq->cmd_len = COMMAND_SIZE(cmd);
rq->cmd[0] = cmd;
- rq->cmd_len = COMMAND_SIZE(rq->cmd[0]);
switch (cmd) {
case MODE_SELECT:
- if (csdev->short_trespass) {
- page22 = csdev->hr ? short_trespass_hr : short_trespass;
- len = sizeof(short_trespass);
- } else {
- page22 = csdev->hr ? long_trespass_hr : long_trespass;
- len = sizeof(long_trespass);
- }
- /*
- * Can't DMA from kernel BSS -- must copy selected trespass
- * command mode page contents to context buffer which is
- * allocated by kmalloc.
- */
- BUG_ON((len > CLARIION_BUFFER_SIZE));
- memcpy(csdev->buffer, page22, len);
+ len = sizeof(short_trespass);
+ rq->cmd_flags |= REQ_RW;
+ rq->cmd[1] = 0x10;
+ break;
+ case MODE_SELECT_10:
+ len = sizeof(long_trespass);
rq->cmd_flags |= REQ_RW;
rq->cmd[1] = 0x10;
break;
case INQUIRY:
- rq->cmd[1] = 0x1;
- rq->cmd[2] = 0xC0;
len = CLARIION_BUFFER_SIZE;
- memset(csdev->buffer, 0, CLARIION_BUFFER_SIZE);
+ memset(buffer, 0, len);
break;
default:
BUG_ON(1);
@@ -298,47 +308,94 @@ static struct request *get_req(struct scsi_device *sdev, int cmd)
rq->timeout = CLARIION_TIMEOUT;
rq->retries = CLARIION_RETRIES;
- rq->sense = csdev->sense;
- memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
- rq->sense_len = 0;
-
- if (blk_rq_map_kern(sdev->request_queue, rq, csdev->buffer,
- len, GFP_ATOMIC)) {
- __blk_put_request(rq->q, rq);
+ if (blk_rq_map_kern(rq->q, rq, buffer, len, GFP_NOIO)) {
+ blk_put_request(rq);
return NULL;
}
return rq;
}
-static int send_cmd(struct scsi_device *sdev, int cmd)
+static int send_inquiry_cmd(struct scsi_device *sdev, int page,
+ struct clariion_dh_data *csdev)
{
- struct request *rq = get_req(sdev, cmd);
+ struct request *rq = get_req(sdev, INQUIRY, csdev->buffer);
+ int err;
if (!rq)
return SCSI_DH_RES_TEMP_UNAVAIL;
- return blk_execute_rq(sdev->request_queue, NULL, rq, 1);
+ rq->sense = csdev->sense;
+ memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
+ rq->sense_len = csdev->senselen = 0;
+
+ rq->cmd[0] = INQUIRY;
+ if (page != 0) {
+ rq->cmd[1] = 1;
+ rq->cmd[2] = page;
+ }
+ err = blk_execute_rq(sdev->request_queue, NULL, rq, 1);
+ if (err == -EIO) {
+ sdev_printk(KERN_INFO, sdev,
+ "%s: failed to send %s INQUIRY: %x\n",
+ CLARIION_NAME, page?"EVPD":"standard",
+ rq->errors);
+ csdev->senselen = rq->sense_len;
+ err = SCSI_DH_IO;
+ }
+
+ blk_put_request(rq);
+
+ return err;
}
-static int clariion_activate(struct scsi_device *sdev)
+static int send_trespass_cmd(struct scsi_device *sdev,
+ struct clariion_dh_data *csdev)
{
- int result, done = 0;
+ struct request *rq;
+ unsigned char *page22;
+ int err, len, cmd;
+
+ if (csdev->flags & CLARIION_SHORT_TRESPASS) {
+ page22 = short_trespass;
+ if (!(csdev->flags & CLARIION_HONOR_RESERVATIONS))
+ /* Set Honor Reservations bit */
+ page22[6] |= 0x80;
+ len = sizeof(short_trespass);
+ cmd = MODE_SELECT;
+ } else {
+ page22 = long_trespass;
+ if (!(csdev->flags & CLARIION_HONOR_RESERVATIONS))
+ /* Set Honor Reservations bit */
+ page22[10] |= 0x80;
+ len = sizeof(long_trespass);
+ cmd = MODE_SELECT_10;
+ }
+ BUG_ON((len > CLARIION_BUFFER_SIZE));
+ memcpy(csdev->buffer, page22, len);
- result = send_cmd(sdev, INQUIRY);
- result = sp_info_endio(sdev, result, 0, &done);
- if (result || done)
- goto done;
+ rq = get_req(sdev, cmd, csdev->buffer);
+ if (!rq)
+ return SCSI_DH_RES_TEMP_UNAVAIL;
- result = send_cmd(sdev, MODE_SELECT);
- result = trespass_endio(sdev, result);
- if (result)
- goto done;
+ rq->sense = csdev->sense;
+ memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
+ rq->sense_len = csdev->senselen = 0;
- result = send_cmd(sdev, INQUIRY);
- result = sp_info_endio(sdev, result, 1, NULL);
-done:
- return result;
+ err = blk_execute_rq(sdev->request_queue, NULL, rq, 1);
+ if (err == -EIO) {
+ if (rq->sense_len) {
+ err = trespass_endio(sdev, csdev->sense);
+ } else {
+ sdev_printk(KERN_INFO, sdev,
+ "%s: failed to send MODE SELECT: %x\n",
+ CLARIION_NAME, rq->errors);
+ }
+ }
+
+ blk_put_request(rq);
+
+ return err;
}
static int clariion_check_sense(struct scsi_device *sdev,
@@ -386,99 +443,215 @@ static int clariion_check_sense(struct scsi_device *sdev,
break;
}
- /* success just means we do not care what scsi-ml does */
- return SUCCESS;
+ return SCSI_RETURN_NOT_HANDLED;
+}
+
+static int clariion_prep_fn(struct scsi_device *sdev, struct request *req)
+{
+ struct clariion_dh_data *h = get_clariion_data(sdev);
+ int ret = BLKPREP_OK;
+
+ if (h->lun_state != CLARIION_LUN_OWNED) {
+ ret = BLKPREP_KILL;
+ req->cmd_flags |= REQ_QUIET;
+ }
+ return ret;
+
+}
+
+static int clariion_std_inquiry(struct scsi_device *sdev,
+ struct clariion_dh_data *csdev)
+{
+ int err;
+ char *sp_model;
+
+ err = send_inquiry_cmd(sdev, 0, csdev);
+ if (err != SCSI_DH_OK && csdev->senselen) {
+ struct scsi_sense_hdr sshdr;
+
+ if (scsi_normalize_sense(csdev->sense, SCSI_SENSE_BUFFERSIZE,
+ &sshdr)) {
+ sdev_printk(KERN_ERR, sdev, "%s: INQUIRY sense code "
+ "%02x/%02x/%02x\n", CLARIION_NAME,
+ sshdr.sense_key, sshdr.asc, sshdr.ascq);
+ }
+ err = SCSI_DH_IO;
+ goto out;
+ }
+
+ sp_model = parse_sp_model(sdev, csdev->buffer);
+ if (!sp_model) {
+ err = SCSI_DH_DEV_UNSUPP;
+ goto out;
+ }
+
+ /*
+ * FC Series arrays do not support long trespass
+ */
+ if (!strlen(sp_model) || !strncmp(sp_model, "FC",2))
+ csdev->flags |= CLARIION_SHORT_TRESPASS;
+
+ sdev_printk(KERN_INFO, sdev,
+ "%s: detected Clariion %s, flags %x\n",
+ CLARIION_NAME, sp_model, csdev->flags);
+out:
+ return err;
}
-static const struct {
- char *vendor;
- char *model;
-} clariion_dev_list[] = {
+static int clariion_send_inquiry(struct scsi_device *sdev,
+ struct clariion_dh_data *csdev)
+{
+ int err, retry = CLARIION_RETRIES;
+
+retry:
+ err = send_inquiry_cmd(sdev, 0xC0, csdev);
+ if (err != SCSI_DH_OK && csdev->senselen) {
+ struct scsi_sense_hdr sshdr;
+
+ err = scsi_normalize_sense(csdev->sense, SCSI_SENSE_BUFFERSIZE,
+ &sshdr);
+ if (!err)
+ return SCSI_DH_IO;
+
+ err = clariion_check_sense(sdev, &sshdr);
+ if (retry > 0 && err == NEEDS_RETRY) {
+ retry--;
+ goto retry;
+ }
+ sdev_printk(KERN_ERR, sdev, "%s: INQUIRY sense code "
+ "%02x/%02x/%02x\n", CLARIION_NAME,
+ sshdr.sense_key, sshdr.asc, sshdr.ascq);
+ err = SCSI_DH_IO;
+ } else {
+ err = parse_sp_info_reply(sdev, csdev);
+ }
+ return err;
+}
+
+static int clariion_activate(struct scsi_device *sdev)
+{
+ struct clariion_dh_data *csdev = get_clariion_data(sdev);
+ int result;
+
+ result = clariion_send_inquiry(sdev, csdev);
+ if (result != SCSI_DH_OK)
+ goto done;
+
+ if (csdev->lun_state == CLARIION_LUN_OWNED)
+ goto done;
+
+ result = send_trespass_cmd(sdev, csdev);
+ if (result != SCSI_DH_OK)
+ goto done;
+ sdev_printk(KERN_INFO, sdev,"%s: %s trespass command sent\n",
+ CLARIION_NAME,
+ csdev->flags&CLARIION_SHORT_TRESPASS?"short":"long" );
+
+ /* Update status */
+ result = clariion_send_inquiry(sdev, csdev);
+ if (result != SCSI_DH_OK)
+ goto done;
+
+done:
+ sdev_printk(KERN_INFO, sdev,
+ "%s: at SP %c Port %d (%s, default SP %c)\n",
+ CLARIION_NAME, csdev->current_sp + 'A',
+ csdev->port, lun_state[csdev->lun_state],
+ csdev->default_sp + 'A');
+
+ return result;
+}
+
+const struct scsi_dh_devlist clariion_dev_list[] = {
{"DGC", "RAID"},
{"DGC", "DISK"},
+ {"DGC", "VRAID"},
{NULL, NULL},
};
-static int clariion_bus_notify(struct notifier_block *, unsigned long, void *);
+static int clariion_bus_attach(struct scsi_device *sdev);
+static void clariion_bus_detach(struct scsi_device *sdev);
static struct scsi_device_handler clariion_dh = {
.name = CLARIION_NAME,
.module = THIS_MODULE,
- .nb.notifier_call = clariion_bus_notify,
+ .devlist = clariion_dev_list,
+ .attach = clariion_bus_attach,
+ .detach = clariion_bus_detach,
.check_sense = clariion_check_sense,
.activate = clariion_activate,
+ .prep_fn = clariion_prep_fn,
};
/*
* TODO: need some interface so we can set trespass values
*/
-static int clariion_bus_notify(struct notifier_block *nb,
- unsigned long action, void *data)
+static int clariion_bus_attach(struct scsi_device *sdev)
{
- struct device *dev = data;
- struct scsi_device *sdev;
struct scsi_dh_data *scsi_dh_data;
struct clariion_dh_data *h;
- int i, found = 0;
unsigned long flags;
+ int err;
- if (!scsi_is_sdev_device(dev))
- return 0;
+ scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
+ + sizeof(*h) , GFP_KERNEL);
+ if (!scsi_dh_data) {
+ sdev_printk(KERN_ERR, sdev, "%s: Attach failed\n",
+ CLARIION_NAME);
+ return -ENOMEM;
+ }
- sdev = to_scsi_device(dev);
+ scsi_dh_data->scsi_dh = &clariion_dh;
+ h = (struct clariion_dh_data *) scsi_dh_data->buf;
+ h->lun_state = CLARIION_LUN_UNINITIALIZED;
+ h->default_sp = CLARIION_UNBOUND_LU;
+ h->current_sp = CLARIION_UNBOUND_LU;
- if (action == BUS_NOTIFY_ADD_DEVICE) {
- for (i = 0; clariion_dev_list[i].vendor; i++) {
- if (!strncmp(sdev->vendor, clariion_dev_list[i].vendor,
- strlen(clariion_dev_list[i].vendor)) &&
- !strncmp(sdev->model, clariion_dev_list[i].model,
- strlen(clariion_dev_list[i].model))) {
- found = 1;
- break;
- }
- }
- if (!found)
- goto out;
-
- scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
- + sizeof(*h) , GFP_KERNEL);
- if (!scsi_dh_data) {
- sdev_printk(KERN_ERR, sdev, "Attach failed %s.\n",
- CLARIION_NAME);
- goto out;
- }
+ err = clariion_std_inquiry(sdev, h);
+ if (err != SCSI_DH_OK)
+ goto failed;
- scsi_dh_data->scsi_dh = &clariion_dh;
- h = (struct clariion_dh_data *) scsi_dh_data->buf;
- h->default_sp = CLARIION_UNBOUND_LU;
- h->current_sp = CLARIION_UNBOUND_LU;
+ err = clariion_send_inquiry(sdev, h);
+ if (err != SCSI_DH_OK)
+ goto failed;
- spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
- sdev->scsi_dh_data = scsi_dh_data;
- spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
+ if (!try_module_get(THIS_MODULE))
+ goto failed;
- sdev_printk(KERN_NOTICE, sdev, "Attached %s.\n", CLARIION_NAME);
- try_module_get(THIS_MODULE);
+ spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
+ sdev->scsi_dh_data = scsi_dh_data;
+ spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
- } else if (action == BUS_NOTIFY_DEL_DEVICE) {
- if (sdev->scsi_dh_data == NULL ||
- sdev->scsi_dh_data->scsi_dh != &clariion_dh)
- goto out;
+ sdev_printk(KERN_INFO, sdev,
+ "%s: connected to SP %c Port %d (%s, default SP %c)\n",
+ CLARIION_NAME, h->current_sp + 'A',
+ h->port, lun_state[h->lun_state],
+ h->default_sp + 'A');
- spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
- scsi_dh_data = sdev->scsi_dh_data;
- sdev->scsi_dh_data = NULL;
- spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
+ return 0;
- sdev_printk(KERN_NOTICE, sdev, "Dettached %s.\n",
- CLARIION_NAME);
+failed:
+ kfree(scsi_dh_data);
+ sdev_printk(KERN_ERR, sdev, "%s: not attached\n",
+ CLARIION_NAME);
+ return -EINVAL;
+}
- kfree(scsi_dh_data);
- module_put(THIS_MODULE);
- }
+static void clariion_bus_detach(struct scsi_device *sdev)
+{
+ struct scsi_dh_data *scsi_dh_data;
+ unsigned long flags;
-out:
- return 0;
+ spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
+ scsi_dh_data = sdev->scsi_dh_data;
+ sdev->scsi_dh_data = NULL;
+ spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
+
+ sdev_printk(KERN_NOTICE, sdev, "%s: Detached\n",
+ CLARIION_NAME);
+
+ kfree(scsi_dh_data);
+ module_put(THIS_MODULE);
}
static int __init clariion_init(void)
@@ -487,7 +660,8 @@ static int __init clariion_init(void)
r = scsi_register_device_handler(&clariion_dh);
if (r != 0)
- printk(KERN_ERR "Failed to register scsi device handler.");
+ printk(KERN_ERR "%s: Failed to register scsi device handler.",
+ CLARIION_NAME);
return r;
}
diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
index ae6be87d6a83..9c7a1f8ebb72 100644
--- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c
+++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
@@ -4,6 +4,7 @@
*
* Copyright (C) 2006 Red Hat, Inc. All rights reserved.
* Copyright (C) 2006 Mike Christie
+ * Copyright (C) 2008 Hannes Reinecke <hare@suse.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -25,13 +26,18 @@
#include <scsi/scsi_eh.h>
#include <scsi/scsi_dh.h>
-#define HP_SW_NAME "hp_sw"
+#define HP_SW_NAME "hp_sw"
-#define HP_SW_TIMEOUT (60 * HZ)
-#define HP_SW_RETRIES 3
+#define HP_SW_TIMEOUT (60 * HZ)
+#define HP_SW_RETRIES 3
+
+#define HP_SW_PATH_UNINITIALIZED -1
+#define HP_SW_PATH_ACTIVE 0
+#define HP_SW_PATH_PASSIVE 1
struct hp_sw_dh_data {
unsigned char sense[SCSI_SENSE_BUFFERSIZE];
+ int path_state;
int retries;
};
@@ -42,51 +48,161 @@ static inline struct hp_sw_dh_data *get_hp_sw_data(struct scsi_device *sdev)
return ((struct hp_sw_dh_data *) scsi_dh_data->buf);
}
-static int hp_sw_done(struct scsi_device *sdev)
+/*
+ * tur_done - Handle TEST UNIT READY return status
+ * @sdev: sdev the command has been sent to
+ * @errors: blk error code
+ *
+ * Returns SCSI_DH_DEV_OFFLINED if the sdev is on the passive path
+ */
+static int tur_done(struct scsi_device *sdev, unsigned char *sense)
{
- struct hp_sw_dh_data *h = get_hp_sw_data(sdev);
struct scsi_sense_hdr sshdr;
- int rc;
-
- sdev_printk(KERN_INFO, sdev, "hp_sw_done\n");
+ int ret;
- rc = scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE, &sshdr);
- if (!rc)
+ ret = scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr);
+ if (!ret) {
+ sdev_printk(KERN_WARNING, sdev,
+ "%s: sending tur failed, no sense available\n",
+ HP_SW_NAME);
+ ret = SCSI_DH_IO;
goto done;
+ }
switch (sshdr.sense_key) {
+ case UNIT_ATTENTION:
+ ret = SCSI_DH_IMM_RETRY;
+ break;
case NOT_READY:
- if ((sshdr.asc == 0x04) && (sshdr.ascq == 3)) {
- rc = SCSI_DH_RETRY;
- h->retries++;
+ if ((sshdr.asc == 0x04) && (sshdr.ascq == 2)) {
+ /*
+ * LUN not ready - Initialization command required
+ *
+ * This is the passive path
+ */
+ ret = SCSI_DH_DEV_OFFLINED;
break;
}
- /* fall through */
+ /* Fallthrough */
default:
- h->retries++;
- rc = SCSI_DH_IMM_RETRY;
+ sdev_printk(KERN_WARNING, sdev,
+ "%s: sending tur failed, sense %x/%x/%x\n",
+ HP_SW_NAME, sshdr.sense_key, sshdr.asc,
+ sshdr.ascq);
+ break;
}
done:
- if (rc == SCSI_DH_OK || rc == SCSI_DH_IO)
- h->retries = 0;
- else if (h->retries > HP_SW_RETRIES) {
- h->retries = 0;
+ return ret;
+}
+
+/*
+ * hp_sw_tur - Send TEST UNIT READY
+ * @sdev: sdev command should be sent to
+ *
+ * Use the TEST UNIT READY command to determine
+ * the path state.
+ */
+static int hp_sw_tur(struct scsi_device *sdev, struct hp_sw_dh_data *h)
+{
+ struct request *req;
+ int ret;
+
+ req = blk_get_request(sdev->request_queue, WRITE, GFP_NOIO);
+ if (!req)
+ return SCSI_DH_RES_TEMP_UNAVAIL;
+
+ req->cmd_type = REQ_TYPE_BLOCK_PC;
+ req->cmd_flags |= REQ_FAILFAST;
+ req->cmd_len = COMMAND_SIZE(TEST_UNIT_READY);
+ memset(req->cmd, 0, MAX_COMMAND_SIZE);
+ req->cmd[0] = TEST_UNIT_READY;
+ req->timeout = HP_SW_TIMEOUT;
+ req->sense = h->sense;
+ memset(req->sense, 0, SCSI_SENSE_BUFFERSIZE);
+ req->sense_len = 0;
+
+retry:
+ ret = blk_execute_rq(req->q, NULL, req, 1);
+ if (ret == -EIO) {
+ if (req->sense_len > 0) {
+ ret = tur_done(sdev, h->sense);
+ } else {
+ sdev_printk(KERN_WARNING, sdev,
+ "%s: sending tur failed with %x\n",
+ HP_SW_NAME, req->errors);
+ ret = SCSI_DH_IO;
+ }
+ } else {
+ h->path_state = HP_SW_PATH_ACTIVE;
+ ret = SCSI_DH_OK;
+ }
+ if (ret == SCSI_DH_IMM_RETRY)
+ goto retry;
+ if (ret == SCSI_DH_DEV_OFFLINED) {
+ h->path_state = HP_SW_PATH_PASSIVE;
+ ret = SCSI_DH_OK;
+ }
+
+ blk_put_request(req);
+
+ return ret;
+}
+
+/*
+ * start_done - Handle START STOP UNIT return status
+ * @sdev: sdev the command has been sent to
+ * @errors: blk error code
+ */
+static int start_done(struct scsi_device *sdev, unsigned char *sense)
+{
+ struct scsi_sense_hdr sshdr;
+ int rc;
+
+ rc = scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr);
+ if (!rc) {
+ sdev_printk(KERN_WARNING, sdev,
+ "%s: sending start_stop_unit failed, "
+ "no sense available\n",
+ HP_SW_NAME);
+ return SCSI_DH_IO;
+ }
+ switch (sshdr.sense_key) {
+ case NOT_READY:
+ if ((sshdr.asc == 0x04) && (sshdr.ascq == 3)) {
+ /*
+ * LUN not ready - manual intervention required
+ *
+ * Switch-over in progress, retry.
+ */
+ rc = SCSI_DH_RETRY;
+ break;
+ }
+ /* fall through */
+ default:
+ sdev_printk(KERN_WARNING, sdev,
+ "%s: sending start_stop_unit failed, sense %x/%x/%x\n",
+ HP_SW_NAME, sshdr.sense_key, sshdr.asc,
+ sshdr.ascq);
rc = SCSI_DH_IO;
}
+
return rc;
}
-static int hp_sw_activate(struct scsi_device *sdev)
+/*
+ * hp_sw_start_stop - Send START STOP UNIT command
+ * @sdev: sdev command should be sent to
+ *
+ * Sending START STOP UNIT activates the SP.
+ */
+static int hp_sw_start_stop(struct scsi_device *sdev, struct hp_sw_dh_data *h)
{
- struct hp_sw_dh_data *h = get_hp_sw_data(sdev);
struct request *req;
- int ret = SCSI_DH_RES_TEMP_UNAVAIL;
+ int ret, retry;
- req = blk_get_request(sdev->request_queue, WRITE, GFP_ATOMIC);
+ req = blk_get_request(sdev->request_queue, WRITE, GFP_NOIO);
if (!req)
- goto done;
-
- sdev_printk(KERN_INFO, sdev, "sending START_STOP.");
+ return SCSI_DH_RES_TEMP_UNAVAIL;
req->cmd_type = REQ_TYPE_BLOCK_PC;
req->cmd_flags |= REQ_FAILFAST;
@@ -98,95 +214,153 @@ static int hp_sw_activate(struct scsi_device *sdev)
req->sense = h->sense;
memset(req->sense, 0, SCSI_SENSE_BUFFERSIZE);
req->sense_len = 0;
+ retry = h->retries;
+retry:
ret = blk_execute_rq(req->q, NULL, req, 1);
- if (!ret) /* SUCCESS */
- ret = hp_sw_done(sdev);
- else
+ if (ret == -EIO) {
+ if (req->sense_len > 0) {
+ ret = start_done(sdev, h->sense);
+ } else {
+ sdev_printk(KERN_WARNING, sdev,
+ "%s: sending start_stop_unit failed with %x\n",
+ HP_SW_NAME, req->errors);
+ ret = SCSI_DH_IO;
+ }
+ } else
+ ret = SCSI_DH_OK;
+
+ if (ret == SCSI_DH_RETRY) {
+ if (--retry)
+ goto retry;
ret = SCSI_DH_IO;
-done:
+ }
+
+ blk_put_request(req);
+
+ return ret;
+}
+
+static int hp_sw_prep_fn(struct scsi_device *sdev, struct request *req)
+{
+ struct hp_sw_dh_data *h = get_hp_sw_data(sdev);
+ int ret = BLKPREP_OK;
+
+ if (h->path_state != HP_SW_PATH_ACTIVE) {
+ ret = BLKPREP_KILL;
+ req->cmd_flags |= REQ_QUIET;
+ }
+ return ret;
+
+}
+
+/*
+ * hp_sw_activate - Activate a path
+ * @sdev: sdev on the path to be activated
+ *
+ * The HP Active/Passive firmware is pretty simple;
+ * the passive path reports NOT READY with sense codes
+ * 0x04/0x02; a START STOP UNIT command will then
+ * activate the passive path (and deactivate the
+ * previously active one).
+ */
+static int hp_sw_activate(struct scsi_device *sdev)
+{
+ int ret = SCSI_DH_OK;
+ struct hp_sw_dh_data *h = get_hp_sw_data(sdev);
+
+ ret = hp_sw_tur(sdev, h);
+
+ if (ret == SCSI_DH_OK && h->path_state == HP_SW_PATH_PASSIVE) {
+ ret = hp_sw_start_stop(sdev, h);
+ if (ret == SCSI_DH_OK)
+ sdev_printk(KERN_INFO, sdev,
+ "%s: activated path\n",
+ HP_SW_NAME);
+ }
+
return ret;
}
-static const struct {
- char *vendor;
- char *model;
-} hp_sw_dh_data_list[] = {
- {"COMPAQ", "MSA"},
- {"HP", "HSV"},
+const struct scsi_dh_devlist hp_sw_dh_data_list[] = {
+ {"COMPAQ", "MSA1000 VOLUME"},
+ {"COMPAQ", "HSV110"},
+ {"HP", "HSV100"},
{"DEC", "HSG80"},
{NULL, NULL},
};
-static int hp_sw_bus_notify(struct notifier_block *, unsigned long, void *);
+static int hp_sw_bus_attach(struct scsi_device *sdev);
+static void hp_sw_bus_detach(struct scsi_device *sdev);
static struct scsi_device_handler hp_sw_dh = {
.name = HP_SW_NAME,
.module = THIS_MODULE,
- .nb.notifier_call = hp_sw_bus_notify,
+ .devlist = hp_sw_dh_data_list,
+ .attach = hp_sw_bus_attach,
+ .detach = hp_sw_bus_detach,
.activate = hp_sw_activate,
+ .prep_fn = hp_sw_prep_fn,
};
-static int hp_sw_bus_notify(struct notifier_block *nb,
- unsigned long action, void *data)
+static int hp_sw_bus_attach(struct scsi_device *sdev)
{
- struct device *dev = data;
- struct scsi_device *sdev;
struct scsi_dh_data *scsi_dh_data;
- int i, found = 0;
+ struct hp_sw_dh_data *h;
unsigned long flags;
+ int ret;
- if (!scsi_is_sdev_device(dev))
+ scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
+ + sizeof(struct hp_sw_dh_data) , GFP_KERNEL);
+ if (!scsi_dh_data) {
+ sdev_printk(KERN_ERR, sdev, "%s: Attach Failed\n",
+ HP_SW_NAME);
return 0;
+ }
- sdev = to_scsi_device(dev);
-
- if (action == BUS_NOTIFY_ADD_DEVICE) {
- for (i = 0; hp_sw_dh_data_list[i].vendor; i++) {
- if (!strncmp(sdev->vendor, hp_sw_dh_data_list[i].vendor,
- strlen(hp_sw_dh_data_list[i].vendor)) &&
- !strncmp(sdev->model, hp_sw_dh_data_list[i].model,
- strlen(hp_sw_dh_data_list[i].model))) {
- found = 1;
- break;
- }
- }
- if (!found)
- goto out;
+ scsi_dh_data->scsi_dh = &hp_sw_dh;
+ h = (struct hp_sw_dh_data *) scsi_dh_data->buf;
+ h->path_state = HP_SW_PATH_UNINITIALIZED;
+ h->retries = HP_SW_RETRIES;
- scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
- + sizeof(struct hp_sw_dh_data) , GFP_KERNEL);
- if (!scsi_dh_data) {
- sdev_printk(KERN_ERR, sdev, "Attach Failed %s.\n",
- HP_SW_NAME);
- goto out;
- }
+ ret = hp_sw_tur(sdev, h);
+ if (ret != SCSI_DH_OK || h->path_state == HP_SW_PATH_UNINITIALIZED)
+ goto failed;
- scsi_dh_data->scsi_dh = &hp_sw_dh;
- spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
- sdev->scsi_dh_data = scsi_dh_data;
- spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
- try_module_get(THIS_MODULE);
+ if (!try_module_get(THIS_MODULE))
+ goto failed;
- sdev_printk(KERN_NOTICE, sdev, "Attached %s.\n", HP_SW_NAME);
- } else if (action == BUS_NOTIFY_DEL_DEVICE) {
- if (sdev->scsi_dh_data == NULL ||
- sdev->scsi_dh_data->scsi_dh != &hp_sw_dh)
- goto out;
+ spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
+ sdev->scsi_dh_data = scsi_dh_data;
+ spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
- spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
- scsi_dh_data = sdev->scsi_dh_data;
- sdev->scsi_dh_data = NULL;
- spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
- module_put(THIS_MODULE);
+ sdev_printk(KERN_INFO, sdev, "%s: attached to %s path\n",
+ HP_SW_NAME, h->path_state == HP_SW_PATH_ACTIVE?
+ "active":"passive");
- sdev_printk(KERN_NOTICE, sdev, "Dettached %s.\n", HP_SW_NAME);
+ return 0;
- kfree(scsi_dh_data);
- }
+failed:
+ kfree(scsi_dh_data);
+ sdev_printk(KERN_ERR, sdev, "%s: not attached\n",
+ HP_SW_NAME);
+ return -EINVAL;
+}
-out:
- return 0;
+static void hp_sw_bus_detach( struct scsi_device *sdev )
+{
+ struct scsi_dh_data *scsi_dh_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
+ scsi_dh_data = sdev->scsi_dh_data;
+ sdev->scsi_dh_data = NULL;
+ spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
+ module_put(THIS_MODULE);
+
+ sdev_printk(KERN_NOTICE, sdev, "%s: Detached\n", HP_SW_NAME);
+
+ kfree(scsi_dh_data);
}
static int __init hp_sw_init(void)
@@ -202,6 +376,6 @@ static void __exit hp_sw_exit(void)
module_init(hp_sw_init);
module_exit(hp_sw_exit);
-MODULE_DESCRIPTION("HP MSA 1000");
+MODULE_DESCRIPTION("HP Active/Passive driver");
MODULE_AUTHOR("Mike Christie <michaelc@cs.wisc.edu");
MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index fdf34b0ec6e1..b093a501f8ae 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -173,6 +173,11 @@ struct rdac_dh_data {
#define RDAC_STATE_ACTIVE 0
#define RDAC_STATE_PASSIVE 1
unsigned char state;
+
+#define RDAC_LUN_UNOWNED 0
+#define RDAC_LUN_OWNED 1
+#define RDAC_LUN_AVT 2
+ char lun_state;
unsigned char sense[SCSI_SENSE_BUFFERSIZE];
union {
struct c2_inquiry c2;
@@ -182,6 +187,13 @@ struct rdac_dh_data {
} inq;
};
+static const char *lun_state[] =
+{
+ "unowned",
+ "owned",
+ "owned (AVT mode)",
+};
+
static LIST_HEAD(ctlr_list);
static DEFINE_SPINLOCK(list_lock);
@@ -197,9 +209,8 @@ static struct request *get_rdac_req(struct scsi_device *sdev,
{
struct request *rq;
struct request_queue *q = sdev->request_queue;
- struct rdac_dh_data *h = get_rdac_data(sdev);
- rq = blk_get_request(q, rw, GFP_KERNEL);
+ rq = blk_get_request(q, rw, GFP_NOIO);
if (!rq) {
sdev_printk(KERN_INFO, sdev,
@@ -207,17 +218,14 @@ static struct request *get_rdac_req(struct scsi_device *sdev,
return NULL;
}
- if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_KERNEL)) {
+ if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_NOIO)) {
blk_put_request(rq);
sdev_printk(KERN_INFO, sdev,
"get_rdac_req: blk_rq_map_kern failed.\n");
return NULL;
}
- memset(&rq->cmd, 0, BLK_MAX_CDB);
- rq->sense = h->sense;
- memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
- rq->sense_len = 0;
+ memset(rq->cmd, 0, BLK_MAX_CDB);
rq->cmd_type = REQ_TYPE_BLOCK_PC;
rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE;
@@ -227,12 +235,12 @@ static struct request *get_rdac_req(struct scsi_device *sdev,
return rq;
}
-static struct request *rdac_failover_get(struct scsi_device *sdev)
+static struct request *rdac_failover_get(struct scsi_device *sdev,
+ struct rdac_dh_data *h)
{
struct request *rq;
struct rdac_mode_common *common;
unsigned data_size;
- struct rdac_dh_data *h = get_rdac_data(sdev);
if (h->ctlr->use_ms10) {
struct rdac_pg_expanded *rdac_pg;
@@ -277,6 +285,10 @@ static struct request *rdac_failover_get(struct scsi_device *sdev)
}
rq->cmd_len = COMMAND_SIZE(rq->cmd[0]);
+ rq->sense = h->sense;
+ memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
+ rq->sense_len = 0;
+
return rq;
}
@@ -321,11 +333,10 @@ done:
}
static int submit_inquiry(struct scsi_device *sdev, int page_code,
- unsigned int len)
+ unsigned int len, struct rdac_dh_data *h)
{
struct request *rq;
struct request_queue *q = sdev->request_queue;
- struct rdac_dh_data *h = get_rdac_data(sdev);
int err = SCSI_DH_RES_TEMP_UNAVAIL;
rq = get_rdac_req(sdev, &h->inq, len, READ);
@@ -338,59 +349,68 @@ static int submit_inquiry(struct scsi_device *sdev, int page_code,
rq->cmd[2] = page_code;
rq->cmd[4] = len;
rq->cmd_len = COMMAND_SIZE(INQUIRY);
+
+ rq->sense = h->sense;
+ memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
+ rq->sense_len = 0;
+
err = blk_execute_rq(q, NULL, rq, 1);
if (err == -EIO)
err = SCSI_DH_IO;
+
+ blk_put_request(rq);
done:
return err;
}
-static int get_lun(struct scsi_device *sdev)
+static int get_lun(struct scsi_device *sdev, struct rdac_dh_data *h)
{
int err;
struct c8_inquiry *inqp;
- struct rdac_dh_data *h = get_rdac_data(sdev);
- err = submit_inquiry(sdev, 0xC8, sizeof(struct c8_inquiry));
+ err = submit_inquiry(sdev, 0xC8, sizeof(struct c8_inquiry), h);
if (err == SCSI_DH_OK) {
inqp = &h->inq.c8;
- h->lun = inqp->lun[7]; /* currently it uses only one byte */
+ if (inqp->page_code != 0xc8)
+ return SCSI_DH_NOSYS;
+ if (inqp->page_id[0] != 'e' || inqp->page_id[1] != 'd' ||
+ inqp->page_id[2] != 'i' || inqp->page_id[3] != 'd')
+ return SCSI_DH_NOSYS;
+ h->lun = scsilun_to_int((struct scsi_lun *)inqp->lun);
}
return err;
}
-#define RDAC_OWNED 0
-#define RDAC_UNOWNED 1
-#define RDAC_FAILED 2
-static int check_ownership(struct scsi_device *sdev)
+static int check_ownership(struct scsi_device *sdev, struct rdac_dh_data *h)
{
int err;
struct c9_inquiry *inqp;
- struct rdac_dh_data *h = get_rdac_data(sdev);
- err = submit_inquiry(sdev, 0xC9, sizeof(struct c9_inquiry));
+ err = submit_inquiry(sdev, 0xC9, sizeof(struct c9_inquiry), h);
if (err == SCSI_DH_OK) {
- err = RDAC_UNOWNED;
inqp = &h->inq.c9;
- /*
- * If in AVT mode or if the path already owns the LUN,
- * return RDAC_OWNED;
- */
- if (((inqp->avte_cvp >> 7) == 0x1) ||
- ((inqp->avte_cvp & 0x1) != 0))
- err = RDAC_OWNED;
- } else
- err = RDAC_FAILED;
+ if ((inqp->avte_cvp >> 7) == 0x1) {
+ /* LUN in AVT mode */
+ sdev_printk(KERN_NOTICE, sdev,
+ "%s: AVT mode detected\n",
+ RDAC_NAME);
+ h->lun_state = RDAC_LUN_AVT;
+ } else if ((inqp->avte_cvp & 0x1) != 0) {
+ /* LUN was owned by the controller */
+ h->lun_state = RDAC_LUN_OWNED;
+ }
+ }
+
return err;
}
-static int initialize_controller(struct scsi_device *sdev)
+static int initialize_controller(struct scsi_device *sdev,
+ struct rdac_dh_data *h)
{
int err;
struct c4_inquiry *inqp;
- struct rdac_dh_data *h = get_rdac_data(sdev);
- err = submit_inquiry(sdev, 0xC4, sizeof(struct c4_inquiry));
+ err = submit_inquiry(sdev, 0xC4, sizeof(struct c4_inquiry), h);
if (err == SCSI_DH_OK) {
inqp = &h->inq.c4;
h->ctlr = get_controller(inqp->subsys_id, inqp->slot_id);
@@ -400,13 +420,12 @@ static int initialize_controller(struct scsi_device *sdev)
return err;
}
-static int set_mode_select(struct scsi_device *sdev)
+static int set_mode_select(struct scsi_device *sdev, struct rdac_dh_data *h)
{
int err;
struct c2_inquiry *inqp;
- struct rdac_dh_data *h = get_rdac_data(sdev);
- err = submit_inquiry(sdev, 0xC2, sizeof(struct c2_inquiry));
+ err = submit_inquiry(sdev, 0xC2, sizeof(struct c2_inquiry), h);
if (err == SCSI_DH_OK) {
inqp = &h->inq.c2;
/*
@@ -421,13 +440,13 @@ static int set_mode_select(struct scsi_device *sdev)
return err;
}
-static int mode_select_handle_sense(struct scsi_device *sdev)
+static int mode_select_handle_sense(struct scsi_device *sdev,
+ unsigned char *sensebuf)
{
struct scsi_sense_hdr sense_hdr;
- struct rdac_dh_data *h = get_rdac_data(sdev);
int sense, err = SCSI_DH_IO, ret;
- ret = scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE, &sense_hdr);
+ ret = scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, &sense_hdr);
if (!ret)
goto done;
@@ -451,14 +470,13 @@ done:
return err;
}
-static int send_mode_select(struct scsi_device *sdev)
+static int send_mode_select(struct scsi_device *sdev, struct rdac_dh_data *h)
{
struct request *rq;
struct request_queue *q = sdev->request_queue;
- struct rdac_dh_data *h = get_rdac_data(sdev);
int err = SCSI_DH_RES_TEMP_UNAVAIL;
- rq = rdac_failover_get(sdev);
+ rq = rdac_failover_get(sdev, h);
if (!rq)
goto done;
@@ -466,9 +484,11 @@ static int send_mode_select(struct scsi_device *sdev)
err = blk_execute_rq(q, NULL, rq, 1);
if (err != SCSI_DH_OK)
- err = mode_select_handle_sense(sdev);
+ err = mode_select_handle_sense(sdev, h->sense);
if (err == SCSI_DH_OK)
h->state = RDAC_STATE_ACTIVE;
+
+ blk_put_request(rq);
done:
return err;
}
@@ -478,38 +498,23 @@ static int rdac_activate(struct scsi_device *sdev)
struct rdac_dh_data *h = get_rdac_data(sdev);
int err = SCSI_DH_OK;
- if (h->lun == UNINITIALIZED_LUN) {
- err = get_lun(sdev);
- if (err != SCSI_DH_OK)
- goto done;
- }
-
- err = check_ownership(sdev);
- switch (err) {
- case RDAC_UNOWNED:
- break;
- case RDAC_OWNED:
- err = SCSI_DH_OK;
- goto done;
- case RDAC_FAILED:
- default:
- err = SCSI_DH_IO;
+ err = check_ownership(sdev, h);
+ if (err != SCSI_DH_OK)
goto done;
- }
if (!h->ctlr) {
- err = initialize_controller(sdev);
+ err = initialize_controller(sdev, h);
if (err != SCSI_DH_OK)
goto done;
}
if (h->ctlr->use_ms10 == -1) {
- err = set_mode_select(sdev);
+ err = set_mode_select(sdev, h);
if (err != SCSI_DH_OK)
goto done;
}
-
- err = send_mode_select(sdev);
+ if (h->lun_state == RDAC_LUN_UNOWNED)
+ err = send_mode_select(sdev, h);
done:
return err;
}
@@ -569,10 +574,7 @@ static int rdac_check_sense(struct scsi_device *sdev,
return SCSI_RETURN_NOT_HANDLED;
}
-static const struct {
- char *vendor;
- char *model;
-} rdac_dev_list[] = {
+const struct scsi_dh_devlist rdac_dev_list[] = {
{"IBM", "1722"},
{"IBM", "1724"},
{"IBM", "1726"},
@@ -590,89 +592,89 @@ static const struct {
{NULL, NULL},
};
-static int rdac_bus_notify(struct notifier_block *, unsigned long, void *);
+static int rdac_bus_attach(struct scsi_device *sdev);
+static void rdac_bus_detach(struct scsi_device *sdev);
static struct scsi_device_handler rdac_dh = {
.name = RDAC_NAME,
.module = THIS_MODULE,
- .nb.notifier_call = rdac_bus_notify,
+ .devlist = rdac_dev_list,
.prep_fn = rdac_prep_fn,
.check_sense = rdac_check_sense,
+ .attach = rdac_bus_attach,
+ .detach = rdac_bus_detach,
.activate = rdac_activate,
};
-/*
- * TODO: need some interface so we can set trespass values
- */
-static int rdac_bus_notify(struct notifier_block *nb,
- unsigned long action, void *data)
+static int rdac_bus_attach(struct scsi_device *sdev)
{
- struct device *dev = data;
- struct scsi_device *sdev;
struct scsi_dh_data *scsi_dh_data;
struct rdac_dh_data *h;
- int i, found = 0;
unsigned long flags;
+ int err;
- if (!scsi_is_sdev_device(dev))
+ scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
+ + sizeof(*h) , GFP_KERNEL);
+ if (!scsi_dh_data) {
+ sdev_printk(KERN_ERR, sdev, "%s: Attach failed\n",
+ RDAC_NAME);
return 0;
+ }
- sdev = to_scsi_device(dev);
-
- if (action == BUS_NOTIFY_ADD_DEVICE) {
- for (i = 0; rdac_dev_list[i].vendor; i++) {
- if (!strncmp(sdev->vendor, rdac_dev_list[i].vendor,
- strlen(rdac_dev_list[i].vendor)) &&
- !strncmp(sdev->model, rdac_dev_list[i].model,
- strlen(rdac_dev_list[i].model))) {
- found = 1;
- break;
- }
- }
- if (!found)
- goto out;
+ scsi_dh_data->scsi_dh = &rdac_dh;
+ h = (struct rdac_dh_data *) scsi_dh_data->buf;
+ h->lun = UNINITIALIZED_LUN;
+ h->state = RDAC_STATE_ACTIVE;
- scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
- + sizeof(*h) , GFP_KERNEL);
- if (!scsi_dh_data) {
- sdev_printk(KERN_ERR, sdev, "Attach failed %s.\n",
- RDAC_NAME);
- goto out;
- }
+ err = get_lun(sdev, h);
+ if (err != SCSI_DH_OK)
+ goto failed;
- scsi_dh_data->scsi_dh = &rdac_dh;
- h = (struct rdac_dh_data *) scsi_dh_data->buf;
- h->lun = UNINITIALIZED_LUN;
- h->state = RDAC_STATE_ACTIVE;
- spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
- sdev->scsi_dh_data = scsi_dh_data;
- spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
- try_module_get(THIS_MODULE);
-
- sdev_printk(KERN_NOTICE, sdev, "Attached %s.\n", RDAC_NAME);
-
- } else if (action == BUS_NOTIFY_DEL_DEVICE) {
- if (sdev->scsi_dh_data == NULL ||
- sdev->scsi_dh_data->scsi_dh != &rdac_dh)
- goto out;
-
- spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
- scsi_dh_data = sdev->scsi_dh_data;
- sdev->scsi_dh_data = NULL;
- spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
-
- h = (struct rdac_dh_data *) scsi_dh_data->buf;
- if (h->ctlr)
- kref_put(&h->ctlr->kref, release_controller);
- kfree(scsi_dh_data);
- module_put(THIS_MODULE);
- sdev_printk(KERN_NOTICE, sdev, "Dettached %s.\n", RDAC_NAME);
- }
+ err = check_ownership(sdev, h);
+ if (err != SCSI_DH_OK)
+ goto failed;
+
+ if (!try_module_get(THIS_MODULE))
+ goto failed;
+
+ spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
+ sdev->scsi_dh_data = scsi_dh_data;
+ spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
+
+ sdev_printk(KERN_NOTICE, sdev,
+ "%s: LUN %d (%s)\n",
+ RDAC_NAME, h->lun, lun_state[(int)h->lun_state]);
-out:
return 0;
+
+failed:
+ kfree(scsi_dh_data);
+ sdev_printk(KERN_ERR, sdev, "%s: not attached\n",
+ RDAC_NAME);
+ return -EINVAL;
+}
+
+static void rdac_bus_detach( struct scsi_device *sdev )
+{
+ struct scsi_dh_data *scsi_dh_data;
+ struct rdac_dh_data *h;
+ unsigned long flags;
+
+ spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
+ scsi_dh_data = sdev->scsi_dh_data;
+ sdev->scsi_dh_data = NULL;
+ spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
+
+ h = (struct rdac_dh_data *) scsi_dh_data->buf;
+ if (h->ctlr)
+ kref_put(&h->ctlr->kref, release_controller);
+ kfree(scsi_dh_data);
+ module_put(THIS_MODULE);
+ sdev_printk(KERN_NOTICE, sdev, "%s: Detached\n", RDAC_NAME);
}
+
+
static int __init rdac_init(void)
{
int r;
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index c4a7c06793c5..ae560bc04f9d 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -521,9 +521,10 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
static void ibmvfc_reinit_host(struct ibmvfc_host *vhost)
{
if (vhost->action == IBMVFC_HOST_ACTION_NONE) {
- scsi_block_requests(vhost->host);
- ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING);
- ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
+ if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
+ scsi_block_requests(vhost->host);
+ ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
+ }
} else
vhost->reinit = 1;
@@ -854,39 +855,41 @@ static void ibmvfc_retry_host_init(struct ibmvfc_host *vhost)
}
/**
- * __ibmvfc_find_target - Find the specified scsi_target (no locking)
+ * __ibmvfc_get_target - Find the specified scsi_target (no locking)
* @starget: scsi target struct
*
* Return value:
* ibmvfc_target struct / NULL if not found
**/
-static struct ibmvfc_target *__ibmvfc_find_target(struct scsi_target *starget)
+static struct ibmvfc_target *__ibmvfc_get_target(struct scsi_target *starget)
{
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
struct ibmvfc_host *vhost = shost_priv(shost);
struct ibmvfc_target *tgt;
list_for_each_entry(tgt, &vhost->targets, queue)
- if (tgt->target_id == starget->id)
+ if (tgt->target_id == starget->id) {
+ kref_get(&tgt->kref);
return tgt;
+ }
return NULL;
}
/**
- * ibmvfc_find_target - Find the specified scsi_target
+ * ibmvfc_get_target - Find the specified scsi_target
* @starget: scsi target struct
*
* Return value:
* ibmvfc_target struct / NULL if not found
**/
-static struct ibmvfc_target *ibmvfc_find_target(struct scsi_target *starget)
+static struct ibmvfc_target *ibmvfc_get_target(struct scsi_target *starget)
{
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
struct ibmvfc_target *tgt;
unsigned long flags;
spin_lock_irqsave(shost->host_lock, flags);
- tgt = __ibmvfc_find_target(starget);
+ tgt = __ibmvfc_get_target(starget);
spin_unlock_irqrestore(shost->host_lock, flags);
return tgt;
}
@@ -963,6 +966,9 @@ static void ibmvfc_get_host_port_state(struct Scsi_Host *shost)
case IBMVFC_HALTED:
fc_host_port_state(shost) = FC_PORTSTATE_BLOCKED;
break;
+ case IBMVFC_NO_CRQ:
+ fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
+ break;
default:
ibmvfc_log(vhost, 3, "Unknown port state: %d\n", vhost->state);
fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
@@ -988,6 +994,17 @@ static void ibmvfc_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout)
}
/**
+ * ibmvfc_release_tgt - Free memory allocated for a target
+ * @kref: kref struct
+ *
+ **/
+static void ibmvfc_release_tgt(struct kref *kref)
+{
+ struct ibmvfc_target *tgt = container_of(kref, struct ibmvfc_target, kref);
+ kfree(tgt);
+}
+
+/**
* ibmvfc_get_starget_node_name - Get SCSI target's node name
* @starget: scsi target struct
*
@@ -996,8 +1013,10 @@ static void ibmvfc_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout)
**/
static void ibmvfc_get_starget_node_name(struct scsi_target *starget)
{
- struct ibmvfc_target *tgt = ibmvfc_find_target(starget);
+ struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
fc_starget_port_name(starget) = tgt ? tgt->ids.node_name : 0;
+ if (tgt)
+ kref_put(&tgt->kref, ibmvfc_release_tgt);
}
/**
@@ -1009,8 +1028,10 @@ static void ibmvfc_get_starget_node_name(struct scsi_target *starget)
**/
static void ibmvfc_get_starget_port_name(struct scsi_target *starget)
{
- struct ibmvfc_target *tgt = ibmvfc_find_target(starget);
+ struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
fc_starget_port_name(starget) = tgt ? tgt->ids.port_name : 0;
+ if (tgt)
+ kref_put(&tgt->kref, ibmvfc_release_tgt);
}
/**
@@ -1022,8 +1043,10 @@ static void ibmvfc_get_starget_port_name(struct scsi_target *starget)
**/
static void ibmvfc_get_starget_port_id(struct scsi_target *starget)
{
- struct ibmvfc_target *tgt = ibmvfc_find_target(starget);
+ struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
fc_starget_port_id(starget) = tgt ? tgt->scsi_id : -1;
+ if (tgt)
+ kref_put(&tgt->kref, ibmvfc_release_tgt);
}
/**
@@ -1113,7 +1136,7 @@ static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
login_info->max_cmds = max_requests + IBMVFC_NUM_INTERNAL_REQ;
login_info->capabilities = IBMVFC_CAN_MIGRATE;
login_info->async.va = vhost->async_crq.msg_token;
- login_info->async.len = vhost->async_crq.size;
+ login_info->async.len = vhost->async_crq.size * sizeof(*vhost->async_crq.msgs);
strncpy(login_info->partition_name, vhost->partition_name, IBMVFC_MAX_NAME);
strncpy(login_info->device_name,
vhost->host->shost_gendev.bus_id, IBMVFC_MAX_NAME);
@@ -1404,7 +1427,7 @@ static void ibmvfc_log_error(struct ibmvfc_event *evt)
err = cmd_status[index].name;
}
- if (!logerr && (vhost->log_level <= IBMVFC_DEFAULT_LOG_LEVEL))
+ if (!logerr && (vhost->log_level <= (IBMVFC_DEFAULT_LOG_LEVEL + 1)))
return;
if (rsp->flags & FCP_RSP_LEN_VALID)
@@ -2054,7 +2077,7 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
{
const char *desc = ibmvfc_get_ae_desc(crq->event);
- ibmvfc_log(vhost, 2, "%s event received\n", desc);
+ ibmvfc_log(vhost, 3, "%s event received\n", desc);
switch (crq->event) {
case IBMVFC_AE_LINK_UP:
@@ -2648,17 +2671,6 @@ static void ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt,
}
/**
- * ibmvfc_release_tgt - Free memory allocated for a target
- * @kref: kref struct
- *
- **/
-static void ibmvfc_release_tgt(struct kref *kref)
-{
- struct ibmvfc_target *tgt = container_of(kref, struct ibmvfc_target, kref);
- kfree(tgt);
-}
-
-/**
* ibmvfc_tgt_prli_done - Completion handler for Process Login
* @evt: ibmvfc event struct
*
@@ -2902,6 +2914,139 @@ static void ibmvfc_tgt_implicit_logout(struct ibmvfc_target *tgt)
}
/**
+ * ibmvfc_adisc_needs_plogi - Does device need PLOGI?
+ * @mad: ibmvfc passthru mad struct
+ * @tgt: ibmvfc target struct
+ *
+ * Returns:
+ * 1 if PLOGI needed / 0 if PLOGI not needed
+ **/
+static int ibmvfc_adisc_needs_plogi(struct ibmvfc_passthru_mad *mad,
+ struct ibmvfc_target *tgt)
+{
+ if (memcmp(&mad->fc_iu.response[2], &tgt->ids.port_name,
+ sizeof(tgt->ids.port_name)))
+ return 1;
+ if (memcmp(&mad->fc_iu.response[4], &tgt->ids.node_name,
+ sizeof(tgt->ids.node_name)))
+ return 1;
+ if (mad->fc_iu.response[6] != tgt->scsi_id)
+ return 1;
+ return 0;
+}
+
+/**
+ * ibmvfc_tgt_adisc_done - Completion handler for ADISC
+ * @evt: ibmvfc event struct
+ *
+ **/
+static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt)
+{
+ struct ibmvfc_target *tgt = evt->tgt;
+ struct ibmvfc_host *vhost = evt->vhost;
+ struct ibmvfc_passthru_mad *mad = &evt->xfer_iu->passthru;
+ u32 status = mad->common.status;
+ u8 fc_reason, fc_explain;
+
+ vhost->discovery_threads--;
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
+
+ switch (status) {
+ case IBMVFC_MAD_SUCCESS:
+ tgt_dbg(tgt, "ADISC succeeded\n");
+ if (ibmvfc_adisc_needs_plogi(mad, tgt))
+ tgt->need_login = 1;
+ break;
+ case IBMVFC_MAD_DRIVER_FAILED:
+ break;
+ case IBMVFC_MAD_FAILED:
+ default:
+ tgt->need_login = 1;
+ fc_reason = (mad->fc_iu.response[1] & 0x00ff0000) >> 16;
+ fc_explain = (mad->fc_iu.response[1] & 0x0000ff00) >> 8;
+ tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
+ ibmvfc_get_cmd_error(mad->iu.status, mad->iu.error),
+ mad->iu.status, mad->iu.error,
+ ibmvfc_get_fc_type(fc_reason), fc_reason,
+ ibmvfc_get_ls_explain(fc_explain), fc_explain, status);
+ break;
+ };
+
+ kref_put(&tgt->kref, ibmvfc_release_tgt);
+ ibmvfc_free_event(evt);
+ wake_up(&vhost->work_wait_q);
+}
+
+/**
+ * ibmvfc_init_passthru - Initialize an event struct for FC passthru
+ * @evt: ibmvfc event struct
+ *
+ **/
+static void ibmvfc_init_passthru(struct ibmvfc_event *evt)
+{
+ struct ibmvfc_passthru_mad *mad = &evt->iu.passthru;
+
+ memset(mad, 0, sizeof(*mad));
+ mad->common.version = 1;
+ mad->common.opcode = IBMVFC_PASSTHRU;
+ mad->common.length = sizeof(*mad) - sizeof(mad->fc_iu) - sizeof(mad->iu);
+ mad->cmd_ioba.va = (u64)evt->crq.ioba +
+ offsetof(struct ibmvfc_passthru_mad, iu);
+ mad->cmd_ioba.len = sizeof(mad->iu);
+ mad->iu.cmd_len = sizeof(mad->fc_iu.payload);
+ mad->iu.rsp_len = sizeof(mad->fc_iu.response);
+ mad->iu.cmd.va = (u64)evt->crq.ioba +
+ offsetof(struct ibmvfc_passthru_mad, fc_iu) +
+ offsetof(struct ibmvfc_passthru_fc_iu, payload);
+ mad->iu.cmd.len = sizeof(mad->fc_iu.payload);
+ mad->iu.rsp.va = (u64)evt->crq.ioba +
+ offsetof(struct ibmvfc_passthru_mad, fc_iu) +
+ offsetof(struct ibmvfc_passthru_fc_iu, response);
+ mad->iu.rsp.len = sizeof(mad->fc_iu.response);
+}
+
+/**
+ * ibmvfc_tgt_adisc - Initiate an ADISC for specified target
+ * @tgt: ibmvfc target struct
+ *
+ **/
+static void ibmvfc_tgt_adisc(struct ibmvfc_target *tgt)
+{
+ struct ibmvfc_passthru_mad *mad;
+ struct ibmvfc_host *vhost = tgt->vhost;
+ struct ibmvfc_event *evt;
+
+ if (vhost->discovery_threads >= disc_threads)
+ return;
+
+ kref_get(&tgt->kref);
+ evt = ibmvfc_get_event(vhost);
+ vhost->discovery_threads++;
+ ibmvfc_init_event(evt, ibmvfc_tgt_adisc_done, IBMVFC_MAD_FORMAT);
+ evt->tgt = tgt;
+
+ ibmvfc_init_passthru(evt);
+ mad = &evt->iu.passthru;
+ mad->iu.flags = IBMVFC_FC_ELS;
+ mad->iu.scsi_id = tgt->scsi_id;
+
+ mad->fc_iu.payload[0] = IBMVFC_ADISC;
+ memcpy(&mad->fc_iu.payload[2], &vhost->login_buf->resp.port_name,
+ sizeof(vhost->login_buf->resp.port_name));
+ memcpy(&mad->fc_iu.payload[4], &vhost->login_buf->resp.node_name,
+ sizeof(vhost->login_buf->resp.node_name));
+ mad->fc_iu.payload[6] = vhost->login_buf->resp.scsi_id & 0x00ffffff;
+
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
+ if (ibmvfc_send_event(evt, vhost, default_timeout)) {
+ vhost->discovery_threads--;
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
+ kref_put(&tgt->kref, ibmvfc_release_tgt);
+ } else
+ tgt_dbg(tgt, "Sent ADISC\n");
+}
+
+/**
* ibmvfc_tgt_query_target_done - Completion handler for Query Target MAD
* @evt: ibmvfc event struct
*
@@ -2921,6 +3066,8 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
tgt->new_scsi_id = rsp->scsi_id;
if (rsp->scsi_id != tgt->scsi_id)
ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
+ else
+ ibmvfc_init_tgt(tgt, ibmvfc_tgt_adisc);
break;
case IBMVFC_MAD_DRIVER_FAILED:
break;
@@ -3336,6 +3483,7 @@ static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt)
tgt_dbg(tgt, "rport add succeeded\n");
rport->maxframe_size = tgt->service_parms.common.bb_rcv_sz & 0x0fff;
rport->supported_classes = 0;
+ tgt->target_id = rport->scsi_target_id;
if (tgt->service_parms.class1_parms[0] & 0x80000000)
rport->supported_classes |= FC_COS_CLASS1;
if (tgt->service_parms.class2_parms[0] & 0x80000000)
@@ -3525,7 +3673,7 @@ static int ibmvfc_init_crq(struct ibmvfc_host *vhost)
crq->msg_token = dma_map_single(dev, crq->msgs,
PAGE_SIZE, DMA_BIDIRECTIONAL);
- if (dma_mapping_error(crq->msg_token))
+ if (dma_mapping_error(dev, crq->msg_token))
goto map_failed;
retrc = rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
@@ -3618,7 +3766,7 @@ static int ibmvfc_alloc_mem(struct ibmvfc_host *vhost)
async_q->size * sizeof(*async_q->msgs),
DMA_BIDIRECTIONAL);
- if (dma_mapping_error(async_q->msg_token)) {
+ if (dma_mapping_error(dev, async_q->msg_token)) {
dev_err(dev, "Failed to map async queue\n");
goto free_async_crq;
}
@@ -3800,10 +3948,12 @@ static int ibmvfc_remove(struct vio_dev *vdev)
ENTER;
ibmvfc_remove_trace_file(&vhost->host->shost_dev.kobj, &ibmvfc_trace_attr);
+ ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
+ ibmvfc_wait_while_resetting(vhost);
+ ibmvfc_release_crq_queue(vhost);
kthread_stop(vhost->work_thread);
fc_remove_host(vhost->host);
scsi_remove_host(vhost->host);
- ibmvfc_release_crq_queue(vhost);
spin_lock_irqsave(vhost->host->host_lock, flags);
ibmvfc_purge_requests(vhost, DID_ERROR);
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index 057f3c01ed61..4bf6e374f076 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -29,8 +29,8 @@
#include "viosrp.h"
#define IBMVFC_NAME "ibmvfc"
-#define IBMVFC_DRIVER_VERSION "1.0.0"
-#define IBMVFC_DRIVER_DATE "(July 1, 2008)"
+#define IBMVFC_DRIVER_VERSION "1.0.1"
+#define IBMVFC_DRIVER_DATE "(July 11, 2008)"
#define IBMVFC_DEFAULT_TIMEOUT 15
#define IBMVFC_INIT_TIMEOUT 30
@@ -119,6 +119,7 @@ enum ibmvfc_mad_types {
IBMVFC_PROCESS_LOGIN = 0x0008,
IBMVFC_QUERY_TARGET = 0x0010,
IBMVFC_IMPLICIT_LOGOUT = 0x0040,
+ IBMVFC_PASSTHRU = 0x0200,
IBMVFC_TMF_MAD = 0x0100,
};
@@ -439,6 +440,37 @@ struct ibmvfc_cmd {
struct ibmvfc_fcp_rsp rsp;
}__attribute__((packed, aligned (8)));
+struct ibmvfc_passthru_fc_iu {
+ u32 payload[7];
+#define IBMVFC_ADISC 0x52000000
+ u32 response[7];
+};
+
+struct ibmvfc_passthru_iu {
+ u64 task_tag;
+ u32 cmd_len;
+ u32 rsp_len;
+ u16 status;
+ u16 error;
+ u32 flags;
+#define IBMVFC_FC_ELS 0x01
+ u32 cancel_key;
+ u32 reserved;
+ struct srp_direct_buf cmd;
+ struct srp_direct_buf rsp;
+ u64 correlation;
+ u64 scsi_id;
+ u64 tag;
+ u64 reserved2[2];
+}__attribute__((packed, aligned (8)));
+
+struct ibmvfc_passthru_mad {
+ struct ibmvfc_mad_common common;
+ struct srp_direct_buf cmd_ioba;
+ struct ibmvfc_passthru_iu iu;
+ struct ibmvfc_passthru_fc_iu fc_iu;
+}__attribute__((packed, aligned (8)));
+
struct ibmvfc_trace_start_entry {
u32 xfer_len;
}__attribute__((packed));
@@ -531,6 +563,7 @@ union ibmvfc_iu {
struct ibmvfc_implicit_logout implicit_logout;
struct ibmvfc_tmf tmf;
struct ibmvfc_cmd cmd;
+ struct ibmvfc_passthru_mad passthru;
}__attribute__((packed, aligned (8)));
enum ibmvfc_target_action {
@@ -656,6 +689,9 @@ struct ibmvfc_host {
#define tgt_dbg(t, fmt, ...) \
DBG_CMD(dev_info((t)->vhost->dev, "%lX: " fmt, (t)->scsi_id, ##__VA_ARGS__))
+#define tgt_info(t, fmt, ...) \
+ dev_info((t)->vhost->dev, "%lX: " fmt, (t)->scsi_id, ##__VA_ARGS__)
+
#define tgt_err(t, fmt, ...) \
dev_err((t)->vhost->dev, "%lX: " fmt, (t)->scsi_id, ##__VA_ARGS__)
@@ -668,8 +704,8 @@ struct ibmvfc_host {
dev_err((vhost)->dev, ##__VA_ARGS__); \
} while (0)
-#define ENTER DBG_CMD(printk(KERN_INFO IBMVFC_NAME": Entering %s\n", __FUNCTION__))
-#define LEAVE DBG_CMD(printk(KERN_INFO IBMVFC_NAME": Leaving %s\n", __FUNCTION__))
+#define ENTER DBG_CMD(printk(KERN_INFO IBMVFC_NAME": Entering %s\n", __func__))
+#define LEAVE DBG_CMD(printk(KERN_INFO IBMVFC_NAME": Leaving %s\n", __func__))
#ifdef CONFIG_SCSI_IBMVFC_TRACE
#define ibmvfc_create_trace_file(kobj, attr) sysfs_create_bin_file(kobj, attr)
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 20000ec79b04..6b24b9cdb04c 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -859,7 +859,7 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
sizeof(hostdata->madapter_info),
DMA_BIDIRECTIONAL);
- if (dma_mapping_error(req->buffer)) {
+ if (dma_mapping_error(hostdata->dev, req->buffer)) {
if (!firmware_has_feature(FW_FEATURE_CMO))
dev_err(hostdata->dev,
"Unable to map request_buffer for "
@@ -1407,7 +1407,7 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
length,
DMA_BIDIRECTIONAL);
- if (dma_mapping_error(host_config->buffer)) {
+ if (dma_mapping_error(hostdata->dev, host_config->buffer)) {
if (!firmware_has_feature(FW_FEATURE_CMO))
dev_err(hostdata->dev,
"dma_mapping error getting host config\n");
diff --git a/drivers/scsi/ibmvscsi/ibmvstgt.c b/drivers/scsi/ibmvscsi/ibmvstgt.c
index 3b9514c8f1f1..2a5b29d12172 100644
--- a/drivers/scsi/ibmvscsi/ibmvstgt.c
+++ b/drivers/scsi/ibmvscsi/ibmvstgt.c
@@ -55,7 +55,7 @@
/* tmp - will replace with SCSI logging stuff */
#define eprintk(fmt, args...) \
do { \
- printk("%s(%d) " fmt, __FUNCTION__, __LINE__, ##args); \
+ printk("%s(%d) " fmt, __func__, __LINE__, ##args); \
} while (0)
/* #define dprintk eprintk */
#define dprintk(fmt, args...)
@@ -564,7 +564,7 @@ static int crq_queue_create(struct crq_queue *queue, struct srp_target *target)
queue->size * sizeof(*queue->msgs),
DMA_BIDIRECTIONAL);
- if (dma_mapping_error(queue->msg_token))
+ if (dma_mapping_error(target->dev, queue->msg_token))
goto map_failed;
err = h_reg_crq(vport->dma_dev->unit_address, queue->msg_token,
diff --git a/drivers/scsi/ibmvscsi/rpa_vscsi.c b/drivers/scsi/ibmvscsi/rpa_vscsi.c
index 182146100dc1..462a8574dad9 100644
--- a/drivers/scsi/ibmvscsi/rpa_vscsi.c
+++ b/drivers/scsi/ibmvscsi/rpa_vscsi.c
@@ -253,7 +253,7 @@ static int rpavscsi_init_crq_queue(struct crq_queue *queue,
queue->size * sizeof(*queue->msgs),
DMA_BIDIRECTIONAL);
- if (dma_mapping_error(queue->msg_token))
+ if (dma_mapping_error(hostdata->dev, queue->msg_token))
goto map_failed;
gather_partition_info();
diff --git a/drivers/scsi/imm.c b/drivers/scsi/imm.c
index f97d172844be..c2a9a13d788f 100644
--- a/drivers/scsi/imm.c
+++ b/drivers/scsi/imm.c
@@ -163,7 +163,7 @@ static int imm_proc_info(struct Scsi_Host *host, char *buffer, char **start,
#if IMM_DEBUG > 0
#define imm_fail(x,y) printk("imm: imm_fail(%i) from %s at line %d\n",\
- y, __FUNCTION__, __LINE__); imm_fail_func(x,y);
+ y, __func__, __LINE__); imm_fail_func(x,y);
static inline void
imm_fail_func(imm_struct *dev, int error_code)
#else
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index d93156671e93..4871dd1f2582 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -1403,10 +1403,10 @@ struct ipr_ucode_image_header {
}
#define ipr_trace ipr_dbg("%s: %s: Line: %d\n",\
- __FILE__, __FUNCTION__, __LINE__)
+ __FILE__, __func__, __LINE__)
-#define ENTER IPR_DBG_CMD(printk(KERN_INFO IPR_NAME": Entering %s\n", __FUNCTION__))
-#define LEAVE IPR_DBG_CMD(printk(KERN_INFO IPR_NAME": Leaving %s\n", __FUNCTION__))
+#define ENTER IPR_DBG_CMD(printk(KERN_INFO IPR_NAME": Entering %s\n", __func__))
+#define LEAVE IPR_DBG_CMD(printk(KERN_INFO IPR_NAME": Leaving %s\n", __func__))
#define ipr_err_separator \
ipr_err("----------------------------------------------------------\n")
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 744f06d04a36..48ee8c7f5bdd 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -74,7 +74,7 @@ static enum ata_completion_errors sas_to_ata_err(struct task_status_struct *ts)
case SAS_OPEN_TO:
case SAS_OPEN_REJECT:
SAS_DPRINTK("%s: Saw error %d. What to do?\n",
- __FUNCTION__, ts->stat);
+ __func__, ts->stat);
return AC_ERR_OTHER;
case SAS_ABORTED_TASK:
@@ -115,7 +115,7 @@ static void sas_ata_task_done(struct sas_task *task)
} else if (stat->stat != SAM_STAT_GOOD) {
ac = sas_to_ata_err(stat);
if (ac) {
- SAS_DPRINTK("%s: SAS error %x\n", __FUNCTION__,
+ SAS_DPRINTK("%s: SAS error %x\n", __func__,
stat->stat);
/* We saw a SAS error. Send a vague error. */
qc->err_mask = ac;
@@ -244,20 +244,20 @@ static void sas_ata_phy_reset(struct ata_port *ap)
res = i->dft->lldd_I_T_nexus_reset(dev);
if (res != TMF_RESP_FUNC_COMPLETE)
- SAS_DPRINTK("%s: Unable to reset I T nexus?\n", __FUNCTION__);
+ SAS_DPRINTK("%s: Unable to reset I T nexus?\n", __func__);
switch (dev->sata_dev.command_set) {
case ATA_COMMAND_SET:
- SAS_DPRINTK("%s: Found ATA device.\n", __FUNCTION__);
+ SAS_DPRINTK("%s: Found ATA device.\n", __func__);
ap->link.device[0].class = ATA_DEV_ATA;
break;
case ATAPI_COMMAND_SET:
- SAS_DPRINTK("%s: Found ATAPI device.\n", __FUNCTION__);
+ SAS_DPRINTK("%s: Found ATAPI device.\n", __func__);
ap->link.device[0].class = ATA_DEV_ATAPI;
break;
default:
SAS_DPRINTK("%s: Unknown SATA command set: %d.\n",
- __FUNCTION__,
+ __func__,
dev->sata_dev.command_set);
ap->link.device[0].class = ATA_DEV_UNKNOWN;
break;
@@ -299,7 +299,7 @@ static int sas_ata_scr_write(struct ata_port *ap, unsigned int sc_reg_in,
{
struct domain_device *dev = ap->private_data;
- SAS_DPRINTK("STUB %s\n", __FUNCTION__);
+ SAS_DPRINTK("STUB %s\n", __func__);
switch (sc_reg_in) {
case SCR_STATUS:
dev->sata_dev.sstatus = val;
@@ -324,7 +324,7 @@ static int sas_ata_scr_read(struct ata_port *ap, unsigned int sc_reg_in,
{
struct domain_device *dev = ap->private_data;
- SAS_DPRINTK("STUB %s\n", __FUNCTION__);
+ SAS_DPRINTK("STUB %s\n", __func__);
switch (sc_reg_in) {
case SCR_STATUS:
*val = dev->sata_dev.sstatus;
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index aefd865a5788..3da02e436788 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -121,7 +121,7 @@ static int smp_execute_task(struct domain_device *dev, void *req, int req_size,
break;
} else {
SAS_DPRINTK("%s: task to dev %016llx response: 0x%x "
- "status 0x%x\n", __FUNCTION__,
+ "status 0x%x\n", __func__,
SAS_ADDR(dev->sas_addr),
task->task_status.resp,
task->task_status.stat);
@@ -1279,7 +1279,7 @@ static int sas_configure_present(struct domain_device *dev, int phy_id,
goto out;
} else if (res != SMP_RESP_FUNC_ACC) {
SAS_DPRINTK("%s: dev %016llx phy 0x%x index 0x%x "
- "result 0x%x\n", __FUNCTION__,
+ "result 0x%x\n", __func__,
SAS_ADDR(dev->sas_addr), phy_id, i, res);
goto out;
}
@@ -1901,7 +1901,7 @@ int sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
if (!rsp) {
printk("%s: space for a smp response is missing\n",
- __FUNCTION__);
+ __func__);
return -EINVAL;
}
@@ -1914,20 +1914,20 @@ int sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
if (type != SAS_EDGE_EXPANDER_DEVICE &&
type != SAS_FANOUT_EXPANDER_DEVICE) {
printk("%s: can we send a smp request to a device?\n",
- __FUNCTION__);
+ __func__);
return -EINVAL;
}
dev = sas_find_dev_by_rphy(rphy);
if (!dev) {
- printk("%s: fail to find a domain_device?\n", __FUNCTION__);
+ printk("%s: fail to find a domain_device?\n", __func__);
return -EINVAL;
}
/* do we need to support multiple segments? */
if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) {
printk("%s: multiple segments req %u %u, rsp %u %u\n",
- __FUNCTION__, req->bio->bi_vcnt, req->data_len,
+ __func__, req->bio->bi_vcnt, req->data_len,
rsp->bio->bi_vcnt, rsp->data_len);
return -EINVAL;
}
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c
index 39ae68a3b0ef..139935a121b4 100644
--- a/drivers/scsi/libsas/sas_port.c
+++ b/drivers/scsi/libsas/sas_port.c
@@ -50,7 +50,7 @@ static void sas_form_port(struct asd_sas_phy *phy)
sas_deform_port(phy);
else {
SAS_DPRINTK("%s: phy%d belongs to port%d already(%d)!\n",
- __FUNCTION__, phy->id, phy->port->id,
+ __func__, phy->id, phy->port->id,
phy->port->num_phys);
return;
}
@@ -78,7 +78,7 @@ static void sas_form_port(struct asd_sas_phy *phy)
if (i >= sas_ha->num_phys) {
printk(KERN_NOTICE "%s: couldn't find a free port, bug?\n",
- __FUNCTION__);
+ __func__);
spin_unlock_irqrestore(&sas_ha->phy_port_lock, flags);
return;
}
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 601ec5b6a7f6..a8e3ef309070 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -343,7 +343,7 @@ static enum task_disposition sas_scsi_find_task(struct sas_task *task)
flags);
SAS_DPRINTK("%s: task 0x%p aborted from "
"task_queue\n",
- __FUNCTION__, task);
+ __func__, task);
return TASK_IS_ABORTED;
}
}
@@ -351,13 +351,13 @@ static enum task_disposition sas_scsi_find_task(struct sas_task *task)
}
for (i = 0; i < 5; i++) {
- SAS_DPRINTK("%s: aborting task 0x%p\n", __FUNCTION__, task);
+ SAS_DPRINTK("%s: aborting task 0x%p\n", __func__, task);
res = si->dft->lldd_abort_task(task);
spin_lock_irqsave(&task->task_state_lock, flags);
if (task->task_state_flags & SAS_TASK_STATE_DONE) {
spin_unlock_irqrestore(&task->task_state_lock, flags);
- SAS_DPRINTK("%s: task 0x%p is done\n", __FUNCTION__,
+ SAS_DPRINTK("%s: task 0x%p is done\n", __func__,
task);
return TASK_IS_DONE;
}
@@ -365,24 +365,24 @@ static enum task_disposition sas_scsi_find_task(struct sas_task *task)
if (res == TMF_RESP_FUNC_COMPLETE) {
SAS_DPRINTK("%s: task 0x%p is aborted\n",
- __FUNCTION__, task);
+ __func__, task);
return TASK_IS_ABORTED;
} else if (si->dft->lldd_query_task) {
SAS_DPRINTK("%s: querying task 0x%p\n",
- __FUNCTION__, task);
+ __func__, task);
res = si->dft->lldd_query_task(task);
switch (res) {
case TMF_RESP_FUNC_SUCC:
SAS_DPRINTK("%s: task 0x%p at LU\n",
- __FUNCTION__, task);
+ __func__, task);
return TASK_IS_AT_LU;
case TMF_RESP_FUNC_COMPLETE:
SAS_DPRINTK("%s: task 0x%p not at LU\n",
- __FUNCTION__, task);
+ __func__, task);
return TASK_IS_NOT_AT_LU;
case TMF_RESP_FUNC_FAILED:
SAS_DPRINTK("%s: task 0x%p failed to abort\n",
- __FUNCTION__, task);
+ __func__, task);
return TASK_ABORT_FAILED;
}
@@ -545,7 +545,7 @@ Again:
if (need_reset) {
SAS_DPRINTK("%s: task 0x%p requests reset\n",
- __FUNCTION__, task);
+ __func__, task);
goto reset;
}
@@ -556,13 +556,13 @@ Again:
switch (res) {
case TASK_IS_DONE:
- SAS_DPRINTK("%s: task 0x%p is done\n", __FUNCTION__,
+ SAS_DPRINTK("%s: task 0x%p is done\n", __func__,
task);
sas_eh_finish_cmd(cmd);
continue;
case TASK_IS_ABORTED:
SAS_DPRINTK("%s: task 0x%p is aborted\n",
- __FUNCTION__, task);
+ __func__, task);
sas_eh_finish_cmd(cmd);
continue;
case TASK_IS_AT_LU:
@@ -633,7 +633,7 @@ Again:
}
return list_empty(work_q);
clear_q:
- SAS_DPRINTK("--- Exit %s -- clear_q\n", __FUNCTION__);
+ SAS_DPRINTK("--- Exit %s -- clear_q\n", __func__);
list_for_each_entry_safe(cmd, n, work_q, eh_entry)
sas_eh_finish_cmd(cmd);
@@ -650,7 +650,7 @@ void sas_scsi_recover_host(struct Scsi_Host *shost)
list_splice_init(&shost->eh_cmd_q, &eh_work_q);
spin_unlock_irqrestore(shost->host_lock, flags);
- SAS_DPRINTK("Enter %s\n", __FUNCTION__);
+ SAS_DPRINTK("Enter %s\n", __func__);
/*
* Deal with commands that still have SAS tasks (i.e. they didn't
* complete via the normal sas_task completion mechanism)
@@ -669,7 +669,7 @@ void sas_scsi_recover_host(struct Scsi_Host *shost)
out:
scsi_eh_flush_done_q(&ha->eh_done_q);
- SAS_DPRINTK("--- Exit %s\n", __FUNCTION__);
+ SAS_DPRINTK("--- Exit %s\n", __func__);
return;
}
@@ -990,7 +990,7 @@ int __sas_task_abort(struct sas_task *task)
if (task->task_state_flags & SAS_TASK_STATE_ABORTED ||
task->task_state_flags & SAS_TASK_STATE_DONE) {
spin_unlock_irqrestore(&task->task_state_lock, flags);
- SAS_DPRINTK("%s: Task %p already finished.\n", __FUNCTION__,
+ SAS_DPRINTK("%s: Task %p already finished.\n", __func__,
task);
return 0;
}
diff --git a/drivers/scsi/libsrp.c b/drivers/scsi/libsrp.c
index 6d6a76e65a6c..15e2d132e8b9 100644
--- a/drivers/scsi/libsrp.c
+++ b/drivers/scsi/libsrp.c
@@ -39,7 +39,7 @@ enum srp_task_attributes {
/* tmp - will replace with SCSI logging stuff */
#define eprintk(fmt, args...) \
do { \
- printk("%s(%d) " fmt, __FUNCTION__, __LINE__, ##args); \
+ printk("%s(%d) " fmt, __func__, __LINE__, ##args); \
} while (0)
/* #define dprintk eprintk */
#define dprintk(fmt, args...)
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 5b6e5395c8eb..d51a2a4b43eb 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -2083,7 +2083,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
if (iocbq_entry == NULL) {
printk(KERN_ERR "%s: only allocated %d iocbs of "
"expected %d count. Unloading driver.\n",
- __FUNCTION__, i, LPFC_IOCB_LIST_CNT);
+ __func__, i, LPFC_IOCB_LIST_CNT);
error = -ENOMEM;
goto out_free_iocbq;
}
@@ -2093,7 +2093,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
kfree (iocbq_entry);
printk(KERN_ERR "%s: failed to allocate IOTAG. "
"Unloading driver.\n",
- __FUNCTION__);
+ __func__);
error = -ENOMEM;
goto out_free_iocbq;
}
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index c94da4f2b8a6..1bcebbd3dfac 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -341,7 +341,7 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
printk(KERN_ERR "%s: Too many sg segments from "
"dma_map_sg. Config %d, seg_cnt %d",
- __FUNCTION__, phba->cfg_sg_seg_cnt,
+ __func__, phba->cfg_sg_seg_cnt,
lpfc_cmd->seg_cnt);
scsi_dma_unmap(scsi_cmnd);
return 1;
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index f40aa7b905f7..50fe07646738 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -219,7 +219,7 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
case CMD_IOCB_LOGENTRY_CN:
case CMD_IOCB_LOGENTRY_ASYNC_CN:
printk("%s - Unhandled SLI-3 Command x%x\n",
- __FUNCTION__, iocb_cmnd);
+ __func__, iocb_cmnd);
type = LPFC_UNKNOWN_IOCB;
break;
default:
@@ -1715,7 +1715,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
rspiocbp = __lpfc_sli_get_iocbq(phba);
if (rspiocbp == NULL) {
printk(KERN_ERR "%s: out of buffers! Failing "
- "completion.\n", __FUNCTION__);
+ "completion.\n", __func__);
break;
}
@@ -3793,7 +3793,7 @@ lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
break;
default:
printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
- __FUNCTION__, ctx_cmd);
+ __func__, ctx_cmd);
break;
}
diff --git a/drivers/scsi/megaraid/mega_common.h b/drivers/scsi/megaraid/mega_common.h
index f62ed468ada0..5ead1283a844 100644
--- a/drivers/scsi/megaraid/mega_common.h
+++ b/drivers/scsi/megaraid/mega_common.h
@@ -265,7 +265,7 @@ typedef struct {
#define ASSERT(expression) \
if (!(expression)) { \
ASSERT_ACTION("assertion failed:(%s), file: %s, line: %d:%s\n", \
- #expression, __FILE__, __LINE__, __FUNCTION__); \
+ #expression, __FILE__, __LINE__, __func__); \
}
#else
#define ASSERT(expression)
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index 70a0f11f48b2..805bb61dde18 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -458,7 +458,7 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
if (adapter == NULL) {
con_log(CL_ANN, (KERN_WARNING
- "megaraid: out of memory, %s %d.\n", __FUNCTION__, __LINE__));
+ "megaraid: out of memory, %s %d.\n", __func__, __LINE__));
goto out_probe_one;
}
@@ -1002,7 +1002,7 @@ megaraid_alloc_cmd_packets(adapter_t *adapter)
if (!raid_dev->una_mbox64) {
con_log(CL_ANN, (KERN_WARNING
- "megaraid: out of memory, %s %d\n", __FUNCTION__,
+ "megaraid: out of memory, %s %d\n", __func__,
__LINE__));
return -1;
}
@@ -1030,7 +1030,7 @@ megaraid_alloc_cmd_packets(adapter_t *adapter)
if (!adapter->ibuf) {
con_log(CL_ANN, (KERN_WARNING
- "megaraid: out of memory, %s %d\n", __FUNCTION__,
+ "megaraid: out of memory, %s %d\n", __func__,
__LINE__));
goto out_free_common_mbox;
@@ -1052,7 +1052,7 @@ megaraid_alloc_cmd_packets(adapter_t *adapter)
if (adapter->kscb_list == NULL) {
con_log(CL_ANN, (KERN_WARNING
- "megaraid: out of memory, %s %d\n", __FUNCTION__,
+ "megaraid: out of memory, %s %d\n", __func__,
__LINE__));
goto out_free_ibuf;
}
@@ -1060,7 +1060,7 @@ megaraid_alloc_cmd_packets(adapter_t *adapter)
// memory allocation for our command packets
if (megaraid_mbox_setup_dma_pools(adapter) != 0) {
con_log(CL_ANN, (KERN_WARNING
- "megaraid: out of memory, %s %d\n", __FUNCTION__,
+ "megaraid: out of memory, %s %d\n", __func__,
__LINE__));
goto out_free_scb_list;
}
@@ -2981,7 +2981,7 @@ megaraid_mbox_product_info(adapter_t *adapter)
if (pinfo == NULL) {
con_log(CL_ANN, (KERN_WARNING
- "megaraid: out of memory, %s %d\n", __FUNCTION__,
+ "megaraid: out of memory, %s %d\n", __func__,
__LINE__));
return -1;
@@ -3508,7 +3508,7 @@ megaraid_cmm_register(adapter_t *adapter)
if (adapter->uscb_list == NULL) {
con_log(CL_ANN, (KERN_WARNING
- "megaraid: out of memory, %s %d\n", __FUNCTION__,
+ "megaraid: out of memory, %s %d\n", __func__,
__LINE__));
return -1;
}
@@ -3879,7 +3879,7 @@ megaraid_sysfs_alloc_resources(adapter_t *adapter)
!raid_dev->sysfs_buffer) {
con_log(CL_ANN, (KERN_WARNING
- "megaraid: out of memory, %s %d\n", __FUNCTION__,
+ "megaraid: out of memory, %s %d\n", __func__,
__LINE__));
rval = -ENOMEM;
diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c
index ac3b280c2a72..f680561d2c6f 100644
--- a/drivers/scsi/megaraid/megaraid_mm.c
+++ b/drivers/scsi/megaraid/megaraid_mm.c
@@ -929,7 +929,7 @@ mraid_mm_register_adp(mraid_mmadp_t *lld_adp)
!adapter->pthru_dma_pool) {
con_log(CL_ANN, (KERN_WARNING
- "megaraid cmm: out of memory, %s %d\n", __FUNCTION__,
+ "megaraid cmm: out of memory, %s %d\n", __func__,
__LINE__));
rval = (-ENOMEM);
@@ -957,7 +957,7 @@ mraid_mm_register_adp(mraid_mmadp_t *lld_adp)
con_log(CL_ANN, (KERN_WARNING
"megaraid cmm: out of memory, %s %d\n",
- __FUNCTION__, __LINE__));
+ __func__, __LINE__));
rval = (-ENOMEM);
diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c
index 7fed35372150..edf9fdb3cb3c 100644
--- a/drivers/scsi/nsp32.c
+++ b/drivers/scsi/nsp32.c
@@ -299,9 +299,9 @@ static struct scsi_host_template nsp32_template = {
#else
# define NSP32_DEBUG_MASK 0xffffff
# define nsp32_msg(type, args...) \
- nsp32_message (__FUNCTION__, __LINE__, (type), args)
+ nsp32_message (__func__, __LINE__, (type), args)
# define nsp32_dbg(mask, args...) \
- nsp32_dmessage(__FUNCTION__, __LINE__, (mask), args)
+ nsp32_dmessage(__func__, __LINE__, (mask), args)
#endif
#define NSP32_DEBUG_QUEUECOMMAND BIT(0)
diff --git a/drivers/scsi/nsp32_debug.c b/drivers/scsi/nsp32_debug.c
index ef3c59cbcff6..2fb3fb58858d 100644
--- a/drivers/scsi/nsp32_debug.c
+++ b/drivers/scsi/nsp32_debug.c
@@ -88,7 +88,7 @@ static void print_commandk (unsigned char *command)
int i,s;
// printk(KERN_DEBUG);
print_opcodek(command[0]);
- /*printk(KERN_DEBUG "%s ", __FUNCTION__);*/
+ /*printk(KERN_DEBUG "%s ", __func__);*/
if ((command[0] >> 5) == 6 ||
(command[0] >> 5) == 7 ) {
s = 12; /* vender specific */
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c
index 5082ca3c6876..a221b6ef9fa9 100644
--- a/drivers/scsi/pcmcia/nsp_cs.c
+++ b/drivers/scsi/pcmcia/nsp_cs.c
@@ -107,9 +107,9 @@ static nsp_hw_data nsp_data_base; /* attach <-> detect glue */
#else
# define NSP_DEBUG_MASK 0xffffff
# define nsp_msg(type, args...) \
- nsp_cs_message (__FUNCTION__, __LINE__, (type), args)
+ nsp_cs_message (__func__, __LINE__, (type), args)
# define nsp_dbg(mask, args...) \
- nsp_cs_dmessage(__FUNCTION__, __LINE__, (mask), args)
+ nsp_cs_dmessage(__func__, __LINE__, (mask), args)
#endif
#define NSP_DEBUG_QUEUECOMMAND BIT(0)
diff --git a/drivers/scsi/pcmcia/nsp_debug.c b/drivers/scsi/pcmcia/nsp_debug.c
index 2f75fe6e35a7..3c6ef64fcbff 100644
--- a/drivers/scsi/pcmcia/nsp_debug.c
+++ b/drivers/scsi/pcmcia/nsp_debug.c
@@ -90,7 +90,7 @@ static void print_commandk (unsigned char *command)
int i, s;
printk(KERN_DEBUG);
print_opcodek(command[0]);
- /*printk(KERN_DEBUG "%s ", __FUNCTION__);*/
+ /*printk(KERN_DEBUG "%s ", __func__);*/
if ((command[0] >> 5) == 6 ||
(command[0] >> 5) == 7 ) {
s = 12; /* vender specific */
diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c
index f655ae320b48..8aa0bd987e29 100644
--- a/drivers/scsi/ppa.c
+++ b/drivers/scsi/ppa.c
@@ -171,7 +171,7 @@ static int device_check(ppa_struct *dev);
#if PPA_DEBUG > 0
#define ppa_fail(x,y) printk("ppa: ppa_fail(%i) from %s at line %d\n",\
- y, __FUNCTION__, __LINE__); ppa_fail_func(x,y);
+ y, __func__, __LINE__); ppa_fail_func(x,y);
static inline void ppa_fail_func(ppa_struct *dev, int error_code)
#else
static inline void ppa_fail(ppa_struct *dev, int error_code)
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 3754ab87f89a..37f9ba0cd798 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -1695,7 +1695,7 @@ qla1280_load_firmware_dma(struct scsi_qla_host *ha)
risc_code_size = *ql1280_board_tbl[ha->devnum].fwlen;
dprintk(1, "%s: DMA RISC code (%i) words\n",
- __FUNCTION__, risc_code_size);
+ __func__, risc_code_size);
num = 0;
while (risc_code_size > 0) {
@@ -1721,7 +1721,7 @@ qla1280_load_firmware_dma(struct scsi_qla_host *ha)
mb[7] = pci_dma_hi32(ha->request_dma) & 0xffff;
mb[6] = pci_dma_hi32(ha->request_dma) >> 16;
dprintk(2, "%s: op=%d 0x%p = 0x%4x,0x%4x,0x%4x,0x%4x\n",
- __FUNCTION__, mb[0],
+ __func__, mb[0],
(void *)(long)ha->request_dma,
mb[6], mb[7], mb[2], mb[3]);
err = qla1280_mailbox_command(ha, BIT_4 | BIT_3 | BIT_2 |
@@ -1753,10 +1753,10 @@ qla1280_load_firmware_dma(struct scsi_qla_host *ha)
if (tbuf[i] != sp[i] && warn++ < 10) {
printk(KERN_ERR "%s: FW compare error @ "
"byte(0x%x) loop#=%x\n",
- __FUNCTION__, i, num);
+ __func__, i, num);
printk(KERN_ERR "%s: FWbyte=%x "
"FWfromChip=%x\n",
- __FUNCTION__, sp[i], tbuf[i]);
+ __func__, sp[i], tbuf[i]);
/*break; */
}
}
@@ -1781,7 +1781,7 @@ qla1280_start_firmware(struct scsi_qla_host *ha)
int err;
dprintk(1, "%s: Verifying checksum of loaded RISC code.\n",
- __FUNCTION__);
+ __func__);
/* Verify checksum of loaded RISC code. */
mb[0] = MBC_VERIFY_CHECKSUM;
@@ -1794,7 +1794,7 @@ qla1280_start_firmware(struct scsi_qla_host *ha)
}
/* Start firmware execution. */
- dprintk(1, "%s: start firmware running.\n", __FUNCTION__);
+ dprintk(1, "%s: start firmware running.\n", __func__);
mb[0] = MBC_EXECUTE_FIRMWARE;
mb[1] = *ql1280_board_tbl[ha->devnum].fwstart;
err = qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 8dd88fc1244a..7a4409ab30ea 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -20,18 +20,12 @@ qla2x00_sysfs_read_fw_dump(struct kobject *kobj,
{
struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj,
struct device, kobj)));
- char *rbuf = (char *)ha->fw_dump;
if (ha->fw_dump_reading == 0)
return 0;
- if (off > ha->fw_dump_len)
- return 0;
- if (off + count > ha->fw_dump_len)
- count = ha->fw_dump_len - off;
- memcpy(buf, &rbuf[off], count);
-
- return (count);
+ return memory_read_from_buffer(buf, count, &off, ha->fw_dump,
+ ha->fw_dump_len);
}
static ssize_t
@@ -94,20 +88,13 @@ qla2x00_sysfs_read_nvram(struct kobject *kobj,
{
struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj,
struct device, kobj)));
- int size = ha->nvram_size;
- char *nvram_cache = ha->nvram;
- if (!capable(CAP_SYS_ADMIN) || off > size || count == 0)
+ if (!capable(CAP_SYS_ADMIN))
return 0;
- if (off + count > size) {
- size -= off;
- count = size;
- }
/* Read NVRAM data from cache. */
- memcpy(buf, &nvram_cache[off], count);
-
- return count;
+ return memory_read_from_buffer(buf, count, &off, ha->nvram,
+ ha->nvram_size);
}
static ssize_t
@@ -175,14 +162,9 @@ qla2x00_sysfs_read_optrom(struct kobject *kobj,
if (ha->optrom_state != QLA_SREADING)
return 0;
- if (off > ha->optrom_region_size)
- return 0;
- if (off + count > ha->optrom_region_size)
- count = ha->optrom_region_size - off;
-
- memcpy(buf, &ha->optrom_buffer[off], count);
- return count;
+ return memory_read_from_buffer(buf, count, &off, ha->optrom_buffer,
+ ha->optrom_region_size);
}
static ssize_t
@@ -374,20 +356,12 @@ qla2x00_sysfs_read_vpd(struct kobject *kobj,
{
struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj,
struct device, kobj)));
- int size = ha->vpd_size;
- char *vpd_cache = ha->vpd;
- if (!capable(CAP_SYS_ADMIN) || off > size || count == 0)
+ if (!capable(CAP_SYS_ADMIN))
return 0;
- if (off + count > size) {
- size -= off;
- count = size;
- }
/* Read NVRAM data from cache. */
- memcpy(buf, &vpd_cache[off], count);
-
- return count;
+ return memory_read_from_buffer(buf, count, &off, ha->vpd, ha->vpd_size);
}
static ssize_t
@@ -557,8 +531,10 @@ qla2x00_serial_num_show(struct device *dev, struct device_attribute *attr,
scsi_qla_host_t *ha = shost_priv(class_to_shost(dev));
uint32_t sn;
- if (IS_FWI2_CAPABLE(ha))
- return snprintf(buf, PAGE_SIZE, "\n");
+ if (IS_FWI2_CAPABLE(ha)) {
+ qla2xxx_get_vpd_field(ha, "SN", buf, PAGE_SIZE);
+ return snprintf(buf, PAGE_SIZE, "%s\n", buf);
+ }
sn = ((ha->serial0 & 0x1f) << 16) | (ha->serial2 << 8) | ha->serial1;
return snprintf(buf, PAGE_SIZE, "%c%05d\n", 'A' + sn / 100000,
@@ -809,6 +785,16 @@ qla2x00_optrom_fw_version_show(struct device *dev,
ha->fw_revision[3]);
}
+static ssize_t
+qla2x00_total_isp_aborts_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ scsi_qla_host_t *ha = shost_priv(class_to_shost(dev));
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ ha->qla_stats.total_isp_aborts);
+}
+
static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL);
static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
@@ -831,6 +817,8 @@ static DEVICE_ATTR(optrom_fcode_version, S_IRUGO,
qla2x00_optrom_fcode_version_show, NULL);
static DEVICE_ATTR(optrom_fw_version, S_IRUGO, qla2x00_optrom_fw_version_show,
NULL);
+static DEVICE_ATTR(total_isp_aborts, S_IRUGO, qla2x00_total_isp_aborts_show,
+ NULL);
struct device_attribute *qla2x00_host_attrs[] = {
&dev_attr_driver_version,
@@ -849,6 +837,7 @@ struct device_attribute *qla2x00_host_attrs[] = {
&dev_attr_optrom_efi_version,
&dev_attr_optrom_fcode_version,
&dev_attr_optrom_fw_version,
+ &dev_attr_total_isp_aborts,
NULL,
};
@@ -972,26 +961,39 @@ qla2x00_get_starget_port_id(struct scsi_target *starget)
}
static void
-qla2x00_get_rport_loss_tmo(struct fc_rport *rport)
+qla2x00_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
{
- struct Scsi_Host *host = rport_to_shost(rport);
- scsi_qla_host_t *ha = shost_priv(host);
-
- rport->dev_loss_tmo = ha->port_down_retry_count + 5;
+ if (timeout)
+ rport->dev_loss_tmo = timeout;
+ else
+ rport->dev_loss_tmo = 1;
}
static void
-qla2x00_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
+qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
{
struct Scsi_Host *host = rport_to_shost(rport);
- scsi_qla_host_t *ha = shost_priv(host);
+ fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
+
+ qla2x00_abort_fcport_cmds(fcport);
+
+ /*
+ * Transport has effectively 'deleted' the rport, clear
+ * all local references.
+ */
+ spin_lock_irq(host->host_lock);
+ fcport->rport = NULL;
+ *((fc_port_t **)rport->dd_data) = NULL;
+ spin_unlock_irq(host->host_lock);
+}
- if (timeout)
- ha->port_down_retry_count = timeout;
- else
- ha->port_down_retry_count = 1;
+static void
+qla2x00_terminate_rport_io(struct fc_rport *rport)
+{
+ fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
- rport->dev_loss_tmo = ha->port_down_retry_count + 5;
+ qla2x00_abort_fcport_cmds(fcport);
+ scsi_target_unblock(&rport->dev);
}
static int
@@ -1045,6 +1047,7 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
pfc_host_stat->invalid_tx_word_count = stats->inval_xmit_word_cnt;
pfc_host_stat->invalid_crc_count = stats->inval_crc_cnt;
if (IS_FWI2_CAPABLE(ha)) {
+ pfc_host_stat->lip_count = stats->lip_cnt;
pfc_host_stat->tx_frames = stats->tx_frames;
pfc_host_stat->rx_frames = stats->rx_frames;
pfc_host_stat->dumped_frames = stats->dumped_frames;
@@ -1173,17 +1176,16 @@ vport_create_failed_2:
static int
qla24xx_vport_delete(struct fc_vport *fc_vport)
{
- scsi_qla_host_t *ha = shost_priv(fc_vport->shost);
scsi_qla_host_t *vha = fc_vport->dd_data;
+ scsi_qla_host_t *pha = to_qla_parent(vha);
+
+ while (test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags) ||
+ test_bit(FCPORT_UPDATE_NEEDED, &pha->dpc_flags))
+ msleep(1000);
qla24xx_disable_vp(vha);
qla24xx_deallocate_vp_id(vha);
- mutex_lock(&ha->vport_lock);
- ha->cur_vport_count--;
- clear_bit(vha->vp_idx, ha->vp_idx_map);
- mutex_unlock(&ha->vport_lock);
-
kfree(vha->node_name);
kfree(vha->port_name);
@@ -1248,11 +1250,12 @@ struct fc_function_template qla2xxx_transport_functions = {
.get_starget_port_id = qla2x00_get_starget_port_id,
.show_starget_port_id = 1,
- .get_rport_dev_loss_tmo = qla2x00_get_rport_loss_tmo,
.set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo,
.show_rport_dev_loss_tmo = 1,
.issue_fc_host_lip = qla2x00_issue_lip,
+ .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
+ .terminate_rport_io = qla2x00_terminate_rport_io,
.get_fc_host_stats = qla2x00_get_fc_host_stats,
.vport_create = qla24xx_vport_create,
@@ -1291,11 +1294,12 @@ struct fc_function_template qla2xxx_transport_vport_functions = {
.get_starget_port_id = qla2x00_get_starget_port_id,
.show_starget_port_id = 1,
- .get_rport_dev_loss_tmo = qla2x00_get_rport_loss_tmo,
.set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo,
.show_rport_dev_loss_tmo = 1,
.issue_fc_host_lip = qla2x00_issue_lip,
+ .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
+ .terminate_rport_io = qla2x00_terminate_rport_io,
.get_fc_host_stats = qla2x00_get_fc_host_stats,
};
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index cbef785765cf..510ba64bc286 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -216,7 +216,7 @@ qla24xx_soft_reset(scsi_qla_host_t *ha)
static int
qla2xxx_dump_ram(scsi_qla_host_t *ha, uint32_t addr, uint16_t *ram,
- uint16_t ram_words, void **nxt)
+ uint32_t ram_words, void **nxt)
{
int rval;
uint32_t cnt, stat, timer, words, idx;
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 8dd600013bd1..6da31ba94404 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -864,7 +864,8 @@ struct link_statistics {
uint32_t prim_seq_err_cnt;
uint32_t inval_xmit_word_cnt;
uint32_t inval_crc_cnt;
- uint32_t unused1[0x1b];
+ uint32_t lip_cnt;
+ uint32_t unused1[0x1a];
uint32_t tx_frames;
uint32_t rx_frames;
uint32_t dumped_frames;
@@ -1544,7 +1545,6 @@ typedef struct fc_port {
int login_retry;
atomic_t port_down_timer;
- spinlock_t rport_lock;
struct fc_rport *rport, *drport;
u32 supported_classes;
@@ -2155,6 +2155,10 @@ struct qla_chip_state_84xx {
uint32_t gold_fw_version;
};
+struct qla_statistics {
+ uint32_t total_isp_aborts;
+};
+
/*
* Linux Host Adapter structure
*/
@@ -2166,7 +2170,6 @@ typedef struct scsi_qla_host {
struct pci_dev *pdev;
unsigned long host_no;
- unsigned long instance;
volatile struct {
uint32_t init_done :1;
@@ -2515,7 +2518,7 @@ typedef struct scsi_qla_host {
uint8_t model_number[16+1];
#define BINZERO "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
- char *model_desc;
+ char model_desc[80];
uint8_t adapter_id[16+1];
uint8_t *node_name;
@@ -2596,6 +2599,7 @@ typedef struct scsi_qla_host {
int cur_vport_count;
struct qla_chip_state_84xx *cs84xx;
+ struct qla_statistics qla_stats;
} scsi_qla_host_t;
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 9b4bebee6879..0b156735e9a6 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -62,7 +62,7 @@ extern int ql2xfdmienable;
extern int ql2xallocfwdump;
extern int ql2xextended_error_logging;
extern int ql2xqfullrampup;
-extern int num_hosts;
+extern int ql2xiidmaenable;
extern int qla2x00_loop_reset(scsi_qla_host_t *);
extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
@@ -71,6 +71,8 @@ extern int qla2x00_post_aen_work(struct scsi_qla_host *, enum
extern int qla2x00_post_hwe_work(struct scsi_qla_host *, uint16_t , uint16_t,
uint16_t, uint16_t);
+extern void qla2x00_abort_fcport_cmds(fc_port_t *);
+
/*
* Global Functions in qla_mid.c source file.
*/
@@ -312,6 +314,7 @@ extern int qla2xxx_hw_event_log(scsi_qla_host_t *, uint16_t , uint16_t,
uint16_t, uint16_t);
extern void qla2xxx_get_flash_info(scsi_qla_host_t *);
+extern int qla2xxx_get_vpd_field(scsi_qla_host_t *, char *, char *, size_t);
/*
* Global Function Prototypes in qla_dbg.c source file.
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 4cb80b476c85..c2a4bfbcb05b 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -1661,6 +1661,12 @@ qla2x00_fdmi_register(scsi_qla_host_t *ha)
{
int rval;
+ if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
+ DEBUG2(printk("scsi(%ld): FDMI unsupported on "
+ "ISP2100/ISP2200.\n", ha->host_no));
+ return QLA_SUCCESS;
+ }
+
rval = qla2x00_mgmt_svr_login(ha);
if (rval)
return rval;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index bbbc5a632a1d..601a6b29750c 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -334,6 +334,8 @@ static int
qla2x00_isp_firmware(scsi_qla_host_t *ha)
{
int rval;
+ uint16_t loop_id, topo, sw_cap;
+ uint8_t domain, area, al_pa;
/* Assume loading risc code */
rval = QLA_FUNCTION_FAILED;
@@ -345,6 +347,11 @@ qla2x00_isp_firmware(scsi_qla_host_t *ha)
/* Verify checksum of loaded RISC code. */
rval = qla2x00_verify_checksum(ha, ha->fw_srisc_address);
+ if (rval == QLA_SUCCESS) {
+ /* And, verify we are not in ROM code. */
+ rval = qla2x00_get_adapter_id(ha, &loop_id, &al_pa,
+ &area, &domain, &topo, &sw_cap);
+ }
}
if (rval) {
@@ -722,7 +729,7 @@ qla24xx_chip_diag(scsi_qla_host_t *ha)
/* Perform RISC reset. */
qla24xx_reset_risc(ha);
- ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 1024;
+ ha->fw_transfer_size = REQUEST_ENTRY_SIZE * ha->request_q_length;
rval = qla2x00_mbx_reg_test(ha);
if (rval) {
@@ -768,42 +775,16 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *ha)
mem_size = (ha->fw_memory_size - 0x100000 + 1) *
sizeof(uint32_t);
- /* Allocate memory for Extended Trace Buffer. */
- tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
- GFP_KERNEL);
- if (!tc) {
- qla_printk(KERN_WARNING, ha, "Unable to allocate "
- "(%d KB) for EFT.\n", EFT_SIZE / 1024);
- goto cont_alloc;
- }
-
- memset(tc, 0, EFT_SIZE);
- rval = qla2x00_enable_eft_trace(ha, tc_dma, EFT_NUM_BUFFERS);
- if (rval) {
- qla_printk(KERN_WARNING, ha, "Unable to initialize "
- "EFT (%d).\n", rval);
- dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc,
- tc_dma);
- goto cont_alloc;
- }
-
- qla_printk(KERN_INFO, ha, "Allocated (%d KB) for EFT...\n",
- EFT_SIZE / 1024);
-
- eft_size = EFT_SIZE;
- ha->eft_dma = tc_dma;
- ha->eft = tc;
-
/* Allocate memory for Fibre Channel Event Buffer. */
if (!IS_QLA25XX(ha))
- goto cont_alloc;
+ goto try_eft;
tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
GFP_KERNEL);
if (!tc) {
qla_printk(KERN_WARNING, ha, "Unable to allocate "
"(%d KB) for FCE.\n", FCE_SIZE / 1024);
- goto cont_alloc;
+ goto try_eft;
}
memset(tc, 0, FCE_SIZE);
@@ -815,7 +796,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *ha)
dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc,
tc_dma);
ha->flags.fce_enabled = 0;
- goto cont_alloc;
+ goto try_eft;
}
qla_printk(KERN_INFO, ha, "Allocated (%d KB) for FCE...\n",
@@ -825,6 +806,32 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *ha)
ha->flags.fce_enabled = 1;
ha->fce_dma = tc_dma;
ha->fce = tc;
+try_eft:
+ /* Allocate memory for Extended Trace Buffer. */
+ tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
+ GFP_KERNEL);
+ if (!tc) {
+ qla_printk(KERN_WARNING, ha, "Unable to allocate "
+ "(%d KB) for EFT.\n", EFT_SIZE / 1024);
+ goto cont_alloc;
+ }
+
+ memset(tc, 0, EFT_SIZE);
+ rval = qla2x00_enable_eft_trace(ha, tc_dma, EFT_NUM_BUFFERS);
+ if (rval) {
+ qla_printk(KERN_WARNING, ha, "Unable to initialize "
+ "EFT (%d).\n", rval);
+ dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc,
+ tc_dma);
+ goto cont_alloc;
+ }
+
+ qla_printk(KERN_INFO, ha, "Allocated (%d KB) for EFT...\n",
+ EFT_SIZE / 1024);
+
+ eft_size = EFT_SIZE;
+ ha->eft_dma = tc_dma;
+ ha->eft = tc;
}
cont_alloc:
req_q_size = ha->request_q_length * sizeof(request_t);
@@ -1501,18 +1508,25 @@ qla2x00_set_model_info(scsi_qla_host_t *ha, uint8_t *model, size_t len, char *de
index = (ha->pdev->subsystem_device & 0xff);
if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
index < QLA_MODEL_NAMES)
- ha->model_desc = qla2x00_model_name[index * 2 + 1];
+ strncpy(ha->model_desc,
+ qla2x00_model_name[index * 2 + 1],
+ sizeof(ha->model_desc) - 1);
} else {
index = (ha->pdev->subsystem_device & 0xff);
if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
index < QLA_MODEL_NAMES) {
strcpy(ha->model_number,
qla2x00_model_name[index * 2]);
- ha->model_desc = qla2x00_model_name[index * 2 + 1];
+ strncpy(ha->model_desc,
+ qla2x00_model_name[index * 2 + 1],
+ sizeof(ha->model_desc) - 1);
} else {
strcpy(ha->model_number, def);
}
}
+ if (IS_FWI2_CAPABLE(ha))
+ qla2xxx_get_vpd_field(ha, "\x82", ha->model_desc,
+ sizeof(ha->model_desc));
}
/* On sparc systems, obtain port and node WWN from firmware
@@ -1864,12 +1878,11 @@ qla2x00_rport_del(void *data)
{
fc_port_t *fcport = data;
struct fc_rport *rport;
- unsigned long flags;
- spin_lock_irqsave(&fcport->rport_lock, flags);
+ spin_lock_irq(fcport->ha->host->host_lock);
rport = fcport->drport;
fcport->drport = NULL;
- spin_unlock_irqrestore(&fcport->rport_lock, flags);
+ spin_unlock_irq(fcport->ha->host->host_lock);
if (rport)
fc_remote_port_delete(rport);
}
@@ -1898,7 +1911,6 @@ qla2x00_alloc_fcport(scsi_qla_host_t *ha, gfp_t flags)
atomic_set(&fcport->state, FCS_UNCONFIGURED);
fcport->flags = FCF_RLC_SUPPORT;
fcport->supported_classes = FC_COS_UNSPECIFIED;
- spin_lock_init(&fcport->rport_lock);
return fcport;
}
@@ -2007,8 +2019,10 @@ qla2x00_configure_loop(scsi_qla_host_t *ha)
if (test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) {
if (test_bit(LOCAL_LOOP_UPDATE, &save_flags))
set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
- if (test_bit(RSCN_UPDATE, &save_flags))
+ if (test_bit(RSCN_UPDATE, &save_flags)) {
+ ha->flags.rscn_queue_overflow = 1;
set_bit(RSCN_UPDATE, &ha->dpc_flags);
+ }
}
return (rval);
@@ -2243,28 +2257,24 @@ qla2x00_reg_remote_port(scsi_qla_host_t *ha, fc_port_t *fcport)
{
struct fc_rport_identifiers rport_ids;
struct fc_rport *rport;
- unsigned long flags;
if (fcport->drport)
qla2x00_rport_del(fcport);
- if (fcport->rport)
- return;
rport_ids.node_name = wwn_to_u64(fcport->node_name);
rport_ids.port_name = wwn_to_u64(fcport->port_name);
rport_ids.port_id = fcport->d_id.b.domain << 16 |
fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
- rport = fc_remote_port_add(ha->host, 0, &rport_ids);
+ fcport->rport = rport = fc_remote_port_add(ha->host, 0, &rport_ids);
if (!rport) {
qla_printk(KERN_WARNING, ha,
"Unable to allocate fc remote port!\n");
return;
}
- spin_lock_irqsave(&fcport->rport_lock, flags);
- fcport->rport = rport;
+ spin_lock_irq(fcport->ha->host->host_lock);
*((fc_port_t **)rport->dd_data) = fcport;
- spin_unlock_irqrestore(&fcport->rport_lock, flags);
+ spin_unlock_irq(fcport->ha->host->host_lock);
rport->supported_classes = fcport->supported_classes;
@@ -2565,7 +2575,8 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
} else if (qla2x00_gnn_id(ha, swl) != QLA_SUCCESS) {
kfree(swl);
swl = NULL;
- } else if (qla2x00_gfpn_id(ha, swl) == QLA_SUCCESS) {
+ } else if (ql2xiidmaenable &&
+ qla2x00_gfpn_id(ha, swl) == QLA_SUCCESS) {
qla2x00_gpsc(ha, swl);
}
}
@@ -3220,7 +3231,8 @@ qla2x00_update_fcports(scsi_qla_host_t *ha)
/* Go with deferred removal of rport references. */
list_for_each_entry(fcport, &ha->fcports, list)
- if (fcport->drport)
+ if (fcport->drport &&
+ atomic_read(&fcport->state) != FCS_UNCONFIGURED)
qla2x00_rport_del(fcport);
}
@@ -3243,6 +3255,7 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
if (ha->flags.online) {
ha->flags.online = 0;
clear_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
+ ha->qla_stats.total_isp_aborts++;
qla_printk(KERN_INFO, ha,
"Performing ISP error recovery - ha= %p.\n", ha);
@@ -3283,17 +3296,6 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
ha->isp_abort_cnt = 0;
clear_bit(ISP_ABORT_RETRY, &ha->dpc_flags);
- if (ha->eft) {
- memset(ha->eft, 0, EFT_SIZE);
- rval = qla2x00_enable_eft_trace(ha,
- ha->eft_dma, EFT_NUM_BUFFERS);
- if (rval) {
- qla_printk(KERN_WARNING, ha,
- "Unable to reinitialize EFT "
- "(%d).\n", rval);
- }
- }
-
if (ha->fce) {
ha->flags.fce_enabled = 1;
memset(ha->fce, 0,
@@ -3308,6 +3310,17 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
ha->flags.fce_enabled = 0;
}
}
+
+ if (ha->eft) {
+ memset(ha->eft, 0, EFT_SIZE);
+ rval = qla2x00_enable_eft_trace(ha,
+ ha->eft_dma, EFT_NUM_BUFFERS);
+ if (rval) {
+ qla_printk(KERN_WARNING, ha,
+ "Unable to reinitialize EFT "
+ "(%d).\n", rval);
+ }
+ }
} else { /* failed the ISP abort */
ha->flags.online = 1;
if (test_bit(ISP_ABORT_RETRY, &ha->dpc_flags)) {
@@ -4026,8 +4039,8 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *ha)
ret = qla2x00_stop_firmware(ha);
for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT &&
retries ; retries--) {
- qla2x00_reset_chip(ha);
- if (qla2x00_chip_diag(ha) != QLA_SUCCESS)
+ ha->isp_ops->reset_chip(ha);
+ if (ha->isp_ops->chip_diag(ha) != QLA_SUCCESS)
continue;
if (qla2x00_setup_chip(ha) != QLA_SUCCESS)
continue;
@@ -4049,7 +4062,7 @@ qla24xx_configure_vhba(scsi_qla_host_t *ha)
rval = qla2x00_fw_ready(ha->parent);
if (rval == QLA_SUCCESS) {
clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
- qla2x00_marker(ha->parent, 0, 0, MK_SYNC_ALL);
+ qla2x00_marker(ha, 0, 0, MK_SYNC_ALL);
}
ha->flags.management_server_logged_in = 0;
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 5489d5024673..d57669aa4615 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -454,10 +454,11 @@ qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
{
int ret;
unsigned long flags = 0;
+ scsi_qla_host_t *pha = to_qla_parent(ha);
- spin_lock_irqsave(&ha->hardware_lock, flags);
+ spin_lock_irqsave(&pha->hardware_lock, flags);
ret = __qla2x00_marker(ha, loop_id, lun, type);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock_irqrestore(&pha->hardware_lock, flags);
return (ret);
}
@@ -672,7 +673,7 @@ qla24xx_start_scsi(srb_t *sp)
{
int ret, nseg;
unsigned long flags;
- scsi_qla_host_t *ha;
+ scsi_qla_host_t *ha, *pha;
struct scsi_cmnd *cmd;
uint32_t *clr_ptr;
uint32_t index;
@@ -686,6 +687,7 @@ qla24xx_start_scsi(srb_t *sp)
/* Setup device pointers. */
ret = 0;
ha = sp->ha;
+ pha = to_qla_parent(ha);
reg = &ha->iobase->isp24;
cmd = sp->cmd;
/* So we know we haven't pci_map'ed anything yet */
@@ -700,7 +702,7 @@ qla24xx_start_scsi(srb_t *sp)
}
/* Acquire ring specific lock */
- spin_lock_irqsave(&ha->hardware_lock, flags);
+ spin_lock_irqsave(&pha->hardware_lock, flags);
/* Check for room in outstanding command list. */
handle = ha->current_outstanding_cmd;
@@ -795,14 +797,14 @@ qla24xx_start_scsi(srb_t *sp)
ha->response_ring_ptr->signature != RESPONSE_PROCESSED)
qla24xx_process_response_queue(ha);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock_irqrestore(&pha->hardware_lock, flags);
return QLA_SUCCESS;
queuing_error:
if (tot_dsds)
scsi_dma_unmap(cmd);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock_irqrestore(&pha->hardware_lock, flags);
return QLA_FUNCTION_FAILED;
}
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index ec63b79f900a..874d802edb7d 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -542,10 +542,6 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
break;
case MBA_PORT_UPDATE: /* Port database update */
- /* Only handle SCNs for our Vport index. */
- if (ha->parent && ha->vp_idx != (mb[3] & 0xff))
- break;
-
/*
* If PORT UPDATE is global (recieved LIP_OCCURED/LIP_RESET
* event etc. earlier indicating loop is down) then process
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 250d2f604397..bc90d6b8d0a0 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -918,6 +918,8 @@ qla2x00_get_adapter_id(scsi_qla_host_t *ha, uint16_t *id, uint8_t *al_pa,
rval = qla2x00_mailbox_command(ha, mcp);
if (mcp->mb[0] == MBS_COMMAND_ERROR)
rval = QLA_COMMAND_ERROR;
+ else if (mcp->mb[0] == MBS_INVALID_COMMAND)
+ rval = QLA_INVALID_COMMAND;
/* Return data. */
*id = mcp->mb[1];
@@ -2161,17 +2163,18 @@ qla24xx_abort_command(scsi_qla_host_t *ha, srb_t *sp)
struct abort_entry_24xx *abt;
dma_addr_t abt_dma;
uint32_t handle;
+ scsi_qla_host_t *pha = to_qla_parent(ha);
DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
fcport = sp->fcport;
- spin_lock_irqsave(&ha->hardware_lock, flags);
+ spin_lock_irqsave(&pha->hardware_lock, flags);
for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
- if (ha->outstanding_cmds[handle] == sp)
+ if (pha->outstanding_cmds[handle] == sp)
break;
}
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock_irqrestore(&pha->hardware_lock, flags);
if (handle == MAX_OUTSTANDING_COMMANDS) {
/* Command not found. */
return QLA_FUNCTION_FAILED;
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 62a3ad6e8ecb..50baf6a1d67c 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -43,6 +43,7 @@ qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
set_bit(vp_id, ha->vp_idx_map);
ha->num_vhosts++;
+ ha->cur_vport_count++;
vha->vp_idx = vp_id;
list_add_tail(&vha->vp_list, &ha->vp_list);
mutex_unlock(&ha->vport_lock);
@@ -58,6 +59,7 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
mutex_lock(&ha->vport_lock);
vp_id = vha->vp_idx;
ha->num_vhosts--;
+ ha->cur_vport_count--;
clear_bit(vp_id, ha->vp_idx_map);
list_del(&vha->vp_list);
mutex_unlock(&ha->vport_lock);
@@ -103,8 +105,8 @@ qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
"loop_id=0x%04x :%x\n",
vha->host_no, fcport->loop_id, fcport->vp_idx));
- atomic_set(&fcport->state, FCS_DEVICE_DEAD);
qla2x00_mark_device_lost(vha, fcport, 0, 0);
+ atomic_set(&fcport->state, FCS_UNCONFIGURED);
}
}
@@ -276,7 +278,8 @@ qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
clear_bit(RESET_ACTIVE, &vha->dpc_flags);
}
- if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
+ if (atomic_read(&vha->vp_state) == VP_ACTIVE &&
+ test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))) {
qla2x00_loop_resync(vha);
clear_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags);
@@ -390,7 +393,6 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
vha->parent = ha;
vha->fc_vport = fc_vport;
vha->device_flags = 0;
- vha->instance = num_hosts;
vha->vp_idx = qla24xx_allocate_vp_id(vha);
if (vha->vp_idx > ha->max_npiv_vports) {
DEBUG15(printk("scsi(%ld): Couldn't allocate vp_id.\n",
@@ -428,7 +430,7 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
host->max_cmd_len = MAX_CMDSZ;
host->max_channel = MAX_BUSES - 1;
host->max_lun = MAX_LUNS;
- host->unique_id = vha->instance;
+ host->unique_id = host->host_no;
host->max_id = MAX_TARGETS_2200;
host->transportt = qla2xxx_transport_vport_template;
@@ -436,12 +438,6 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
vha->host_no, vha));
vha->flags.init_done = 1;
- num_hosts++;
-
- mutex_lock(&ha->vport_lock);
- set_bit(vha->vp_idx, ha->vp_idx_map);
- ha->cur_vport_count++;
- mutex_unlock(&ha->vport_lock);
return vha;
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 48eaa3bb5433..7c8af7ed2a5d 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -27,7 +27,6 @@ char qla2x00_version_str[40];
*/
static struct kmem_cache *srb_cachep;
-int num_hosts;
int ql2xlogintimeout = 20;
module_param(ql2xlogintimeout, int, S_IRUGO|S_IRUSR);
MODULE_PARM_DESC(ql2xlogintimeout,
@@ -87,6 +86,13 @@ MODULE_PARM_DESC(ql2xqfullrampup,
"depth for a device after a queue-full condition has been "
"detected. Default is 120 seconds.");
+int ql2xiidmaenable=1;
+module_param(ql2xiidmaenable, int, S_IRUGO|S_IRUSR);
+MODULE_PARM_DESC(ql2xiidmaenable,
+ "Enables iIDMA settings "
+ "Default is 1 - perform iIDMA. 0 - no iIDMA.");
+
+
/*
* SCSI host template entry points
*/
@@ -388,7 +394,7 @@ qla2x00_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
}
/* Close window on fcport/rport state-transitioning. */
- if (!*(fc_port_t **)rport->dd_data) {
+ if (fcport->drport) {
cmd->result = DID_IMM_RETRY << 16;
goto qc_fail_command;
}
@@ -443,7 +449,7 @@ qla24xx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
int rval;
scsi_qla_host_t *pha = to_qla_parent(ha);
- if (unlikely(pci_channel_offline(ha->pdev))) {
+ if (unlikely(pci_channel_offline(pha->pdev))) {
cmd->result = DID_REQUEUE << 16;
goto qc24_fail_command;
}
@@ -455,7 +461,7 @@ qla24xx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
}
/* Close window on fcport/rport state-transitioning. */
- if (!*(fc_port_t **)rport->dd_data) {
+ if (fcport->drport) {
cmd->result = DID_IMM_RETRY << 16;
goto qc24_fail_command;
}
@@ -617,6 +623,40 @@ qla2x00_wait_for_loop_ready(scsi_qla_host_t *ha)
return (return_status);
}
+void
+qla2x00_abort_fcport_cmds(fc_port_t *fcport)
+{
+ int cnt;
+ unsigned long flags;
+ srb_t *sp;
+ scsi_qla_host_t *ha = fcport->ha;
+ scsi_qla_host_t *pha = to_qla_parent(ha);
+
+ spin_lock_irqsave(&pha->hardware_lock, flags);
+ for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
+ sp = pha->outstanding_cmds[cnt];
+ if (!sp)
+ continue;
+ if (sp->fcport != fcport)
+ continue;
+
+ spin_unlock_irqrestore(&pha->hardware_lock, flags);
+ if (ha->isp_ops->abort_command(ha, sp)) {
+ DEBUG2(qla_printk(KERN_WARNING, ha,
+ "Abort failed -- %lx\n", sp->cmd->serial_number));
+ } else {
+ if (qla2x00_eh_wait_on_command(ha, sp->cmd) !=
+ QLA_SUCCESS)
+ DEBUG2(qla_printk(KERN_WARNING, ha,
+ "Abort failed while waiting -- %lx\n",
+ sp->cmd->serial_number));
+
+ }
+ spin_lock_irqsave(&pha->hardware_lock, flags);
+ }
+ spin_unlock_irqrestore(&pha->hardware_lock, flags);
+}
+
static void
qla2x00_block_error_handler(struct scsi_cmnd *cmnd)
{
@@ -1073,7 +1113,7 @@ qla2xxx_slave_configure(struct scsi_device *sdev)
else
scsi_deactivate_tcq(sdev, ha->max_q_depth);
- rport->dev_loss_tmo = ha->port_down_retry_count + 5;
+ rport->dev_loss_tmo = ha->port_down_retry_count;
return 0;
}
@@ -1629,9 +1669,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
}
host->can_queue = ha->request_q_length + 128;
- /* load the F/W, read paramaters, and init the H/W */
- ha->instance = num_hosts;
-
mutex_init(&ha->vport_lock);
init_completion(&ha->mbx_cmd_comp);
complete(&ha->mbx_cmd_comp);
@@ -1679,7 +1716,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
host->this_id = 255;
host->cmd_per_lun = 3;
- host->unique_id = ha->instance;
+ host->unique_id = host->host_no;
host->max_cmd_len = MAX_CMDSZ;
host->max_channel = MAX_BUSES - 1;
host->max_lun = MAX_LUNS;
@@ -1700,8 +1737,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->flags.init_done = 1;
ha->flags.online = 1;
- num_hosts++;
-
ret = scsi_add_host(host, &pdev->dev);
if (ret)
goto probe_failed;
@@ -1813,27 +1848,21 @@ static inline void
qla2x00_schedule_rport_del(struct scsi_qla_host *ha, fc_port_t *fcport,
int defer)
{
- unsigned long flags;
struct fc_rport *rport;
+ scsi_qla_host_t *pha = to_qla_parent(ha);
if (!fcport->rport)
return;
rport = fcport->rport;
if (defer) {
- spin_lock_irqsave(&fcport->rport_lock, flags);
+ spin_lock_irq(ha->host->host_lock);
fcport->drport = rport;
- fcport->rport = NULL;
- *(fc_port_t **)rport->dd_data = NULL;
- spin_unlock_irqrestore(&fcport->rport_lock, flags);
- set_bit(FCPORT_UPDATE_NEEDED, &ha->dpc_flags);
- } else {
- spin_lock_irqsave(&fcport->rport_lock, flags);
- fcport->rport = NULL;
- *(fc_port_t **)rport->dd_data = NULL;
- spin_unlock_irqrestore(&fcport->rport_lock, flags);
+ spin_unlock_irq(ha->host->host_lock);
+ set_bit(FCPORT_UPDATE_NEEDED, &pha->dpc_flags);
+ qla2xxx_wake_dpc(pha);
+ } else
fc_remote_port_delete(rport);
- }
}
/*
@@ -1903,7 +1932,7 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *ha, int defer)
scsi_qla_host_t *pha = to_qla_parent(ha);
list_for_each_entry(fcport, &pha->fcports, list) {
- if (ha->vp_idx != 0 && ha->vp_idx != fcport->vp_idx)
+ if (ha->vp_idx != fcport->vp_idx)
continue;
/*
* No point in marking the device as lost, if the device is
@@ -1911,17 +1940,10 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *ha, int defer)
*/
if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD)
continue;
- if (atomic_read(&fcport->state) == FCS_ONLINE) {
- if (defer)
- qla2x00_schedule_rport_del(ha, fcport, defer);
- else if (ha->vp_idx == fcport->vp_idx)
- qla2x00_schedule_rport_del(ha, fcport, defer);
- }
+ if (atomic_read(&fcport->state) == FCS_ONLINE)
+ qla2x00_schedule_rport_del(ha, fcport, defer);
atomic_set(&fcport->state, FCS_DEVICE_LOST);
}
-
- if (defer)
- qla2xxx_wake_dpc(ha);
}
/*
@@ -2156,7 +2178,7 @@ qla2x00_alloc_work(struct scsi_qla_host *ha, enum qla_work_type type,
static int
qla2x00_post_work(struct scsi_qla_host *ha, struct qla_work_evt *e, int locked)
{
- unsigned long flags;
+ unsigned long uninitialized_var(flags);
scsi_qla_host_t *pha = to_qla_parent(ha);
if (!locked)
@@ -2313,8 +2335,10 @@ qla2x00_do_dpc(void *data)
ha->host_no));
}
- if (test_and_clear_bit(FCPORT_UPDATE_NEEDED, &ha->dpc_flags))
+ if (test_bit(FCPORT_UPDATE_NEEDED, &ha->dpc_flags)) {
qla2x00_update_fcports(ha);
+ clear_bit(FCPORT_UPDATE_NEEDED, &ha->dpc_flags);
+ }
if (test_and_clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags) &&
(!(test_and_set_bit(RESET_ACTIVE, &ha->dpc_flags)))) {
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 1728ab3ccb20..1bca74474935 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -869,11 +869,9 @@ qla24xx_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
uint32_t i;
uint32_t *dwptr;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
- unsigned long flags;
ret = QLA_SUCCESS;
- spin_lock_irqsave(&ha->hardware_lock, flags);
/* Enable flash write. */
WRT_REG_DWORD(&reg->ctrl_status,
RD_REG_DWORD(&reg->ctrl_status) | CSRX_FLASH_ENABLE);
@@ -907,7 +905,6 @@ qla24xx_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
WRT_REG_DWORD(&reg->ctrl_status,
RD_REG_DWORD(&reg->ctrl_status) & ~CSRX_FLASH_ENABLE);
RD_REG_DWORD(&reg->ctrl_status); /* PCI Posting. */
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
return ret;
}
@@ -2306,6 +2303,51 @@ qla24xx_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
}
static int
+qla2xxx_is_vpd_valid(uint8_t *pos, uint8_t *end)
+{
+ if (pos >= end || *pos != 0x82)
+ return 0;
+
+ pos += 3 + pos[1];
+ if (pos >= end || *pos != 0x90)
+ return 0;
+
+ pos += 3 + pos[1];
+ if (pos >= end || *pos != 0x78)
+ return 0;
+
+ return 1;
+}
+
+int
+qla2xxx_get_vpd_field(scsi_qla_host_t *ha, char *key, char *str, size_t size)
+{
+ uint8_t *pos = ha->vpd;
+ uint8_t *end = pos + ha->vpd_size;
+ int len = 0;
+
+ if (!IS_FWI2_CAPABLE(ha) || !qla2xxx_is_vpd_valid(pos, end))
+ return 0;
+
+ while (pos < end && *pos != 0x78) {
+ len = (*pos == 0x82) ? pos[1] : pos[2];
+
+ if (!strncmp(pos, key, strlen(key)))
+ break;
+
+ if (*pos != 0x90 && *pos != 0x91)
+ pos += len;
+
+ pos += 3;
+ }
+
+ if (pos < end - len && *pos != 0x78)
+ return snprintf(str, size, "%.*s", len, pos + 3);
+
+ return 0;
+}
+
+static int
qla2xxx_hw_event_store(scsi_qla_host_t *ha, uint32_t *fdata)
{
uint32_t d[2], faddr;
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index d058c8862b35..676c390db354 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "8.02.01-k4"
+#define QLA2XXX_VERSION "8.02.01-k6"
#define QLA_DRIVER_MAJOR_VER 8
#define QLA_DRIVER_MINOR_VER 2
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 5822dd595826..88bebb13bc52 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -46,6 +46,8 @@ MODULE_PARM_DESC(ql4xextended_error_logging,
int ql4_mod_unload = 0;
+#define QL4_DEF_QDEPTH 32
+
/*
* SCSI host template entry points
*/
@@ -1387,7 +1389,7 @@ static int qla4xxx_slave_alloc(struct scsi_device *sdev)
sdev->hostdata = ddb;
sdev->tagged_supported = 1;
- scsi_activate_tcq(sdev, sdev->host->can_queue);
+ scsi_activate_tcq(sdev, QL4_DEF_QDEPTH);
return 0;
}
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 36c92f961e15..ee6be596503d 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -197,11 +197,43 @@ static void
scsi_pool_free_command(struct scsi_host_cmd_pool *pool,
struct scsi_cmnd *cmd)
{
+ if (cmd->prot_sdb)
+ kmem_cache_free(scsi_sdb_cache, cmd->prot_sdb);
+
kmem_cache_free(pool->sense_slab, cmd->sense_buffer);
kmem_cache_free(pool->cmd_slab, cmd);
}
/**
+ * scsi_host_alloc_command - internal function to allocate command
+ * @shost: SCSI host whose pool to allocate from
+ * @gfp_mask: mask for the allocation
+ *
+ * Returns a fully allocated command with sense buffer and protection
+ * data buffer (where applicable) or NULL on failure
+ */
+static struct scsi_cmnd *
+scsi_host_alloc_command(struct Scsi_Host *shost, gfp_t gfp_mask)
+{
+ struct scsi_cmnd *cmd;
+
+ cmd = scsi_pool_alloc_command(shost->cmd_pool, gfp_mask);
+ if (!cmd)
+ return NULL;
+
+ if (scsi_host_get_prot(shost) >= SHOST_DIX_TYPE0_PROTECTION) {
+ cmd->prot_sdb = kmem_cache_zalloc(scsi_sdb_cache, gfp_mask);
+
+ if (!cmd->prot_sdb) {
+ scsi_pool_free_command(shost->cmd_pool, cmd);
+ return NULL;
+ }
+ }
+
+ return cmd;
+}
+
+/**
* __scsi_get_command - Allocate a struct scsi_cmnd
* @shost: host to transmit command
* @gfp_mask: allocation mask
@@ -214,7 +246,7 @@ struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost, gfp_t gfp_mask)
struct scsi_cmnd *cmd;
unsigned char *buf;
- cmd = scsi_pool_alloc_command(shost->cmd_pool, gfp_mask);
+ cmd = scsi_host_alloc_command(shost, gfp_mask);
if (unlikely(!cmd)) {
unsigned long flags;
@@ -457,7 +489,7 @@ int scsi_setup_command_freelist(struct Scsi_Host *shost)
/*
* Get one backup command for this host.
*/
- cmd = scsi_pool_alloc_command(shost->cmd_pool, gfp_mask);
+ cmd = scsi_host_alloc_command(shost, gfp_mask);
if (!cmd) {
scsi_put_host_cmd_pool(gfp_mask);
shost->cmd_pool = NULL;
@@ -902,11 +934,20 @@ void scsi_adjust_queue_depth(struct scsi_device *sdev, int tagged, int tags)
spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
- /* Check to see if the queue is managed by the block layer.
- * If it is, and we fail to adjust the depth, exit. */
- if (blk_queue_tagged(sdev->request_queue) &&
- blk_queue_resize_tags(sdev->request_queue, tags) != 0)
- goto out;
+ /*
+ * Check to see if the queue is managed by the block layer.
+ * If it is, and we fail to adjust the depth, exit.
+ *
+ * Do not resize the tag map if it is a host wide share bqt,
+ * because the size should be the hosts's can_queue. If there
+ * is more IO than the LLD's can_queue (so there are not enuogh
+ * tags) request_fn's host queue ready check will handle it.
+ */
+ if (!sdev->host->bqt) {
+ if (blk_queue_tagged(sdev->request_queue) &&
+ blk_queue_resize_tags(sdev->request_queue, tags) != 0)
+ goto out;
+ }
sdev->queue_depth = tags;
switch (tagged) {
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 01d11a01ffbf..27c633f55794 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -1753,7 +1753,7 @@ static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev)
open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
if (!open_devip) {
printk(KERN_ERR "%s: out of memory at line %d\n",
- __FUNCTION__, __LINE__);
+ __func__, __LINE__);
return NULL;
}
}
@@ -2656,7 +2656,7 @@ static int sdebug_add_adapter(void)
sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL);
if (NULL == sdbg_host) {
printk(KERN_ERR "%s: out of memory at line %d\n",
- __FUNCTION__, __LINE__);
+ __func__, __LINE__);
return -ENOMEM;
}
@@ -2667,7 +2667,7 @@ static int sdebug_add_adapter(void)
sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
if (!sdbg_devinfo) {
printk(KERN_ERR "%s: out of memory at line %d\n",
- __FUNCTION__, __LINE__);
+ __func__, __LINE__);
error = -ENOMEM;
goto clean;
}
@@ -2987,7 +2987,7 @@ static int sdebug_driver_probe(struct device * dev)
hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
if (NULL == hpnt) {
- printk(KERN_ERR "%s: scsi_register failed\n", __FUNCTION__);
+ printk(KERN_ERR "%s: scsi_register failed\n", __func__);
error = -ENODEV;
return error;
}
@@ -3002,7 +3002,7 @@ static int sdebug_driver_probe(struct device * dev)
error = scsi_add_host(hpnt, &sdbg_host->dev);
if (error) {
- printk(KERN_ERR "%s: scsi_add_host failed\n", __FUNCTION__);
+ printk(KERN_ERR "%s: scsi_add_host failed\n", __func__);
error = -ENODEV;
scsi_host_put(hpnt);
} else
@@ -3021,7 +3021,7 @@ static int sdebug_driver_remove(struct device * dev)
if (!sdbg_host) {
printk(KERN_ERR "%s: Unable to locate host info\n",
- __FUNCTION__);
+ __func__);
return -ENODEV;
}
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index a235802f2981..4969e4ec75ea 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -272,7 +272,7 @@ static void scsi_strcpy_devinfo(char *name, char *to, size_t to_length,
}
if (from_length > to_length)
printk(KERN_WARNING "%s: %s string '%s' is too long\n",
- __FUNCTION__, name, from);
+ __func__, name, from);
}
/**
@@ -298,7 +298,7 @@ static int scsi_dev_info_list_add(int compatible, char *vendor, char *model,
devinfo = kmalloc(sizeof(*devinfo), GFP_KERNEL);
if (!devinfo) {
- printk(KERN_ERR "%s: no memory\n", __FUNCTION__);
+ printk(KERN_ERR "%s: no memory\n", __func__);
return -ENOMEM;
}
@@ -363,7 +363,7 @@ static int scsi_dev_info_list_add_str(char *dev_list)
strflags = strsep(&next, next_check);
if (!model || !strflags) {
printk(KERN_ERR "%s: bad dev info string '%s' '%s'"
- " '%s'\n", __FUNCTION__, vendor, model,
+ " '%s'\n", __func__, vendor, model,
strflags);
res = -EINVAL;
} else
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 006a95916f72..880051c89bde 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -139,7 +139,7 @@ void scsi_add_timer(struct scsi_cmnd *scmd, int timeout,
scmd->eh_timeout.function = (void (*)(unsigned long)) complete;
SCSI_LOG_ERROR_RECOVERY(5, printk("%s: scmd: %p, time:"
- " %d, (%p)\n", __FUNCTION__,
+ " %d, (%p)\n", __func__,
scmd, timeout, complete));
add_timer(&scmd->eh_timeout);
@@ -163,7 +163,7 @@ int scsi_delete_timer(struct scsi_cmnd *scmd)
rtn = del_timer(&scmd->eh_timeout);
SCSI_LOG_ERROR_RECOVERY(5, printk("%s: scmd: %p,"
- " rtn: %d\n", __FUNCTION__,
+ " rtn: %d\n", __func__,
scmd, rtn));
scmd->eh_timeout.data = (unsigned long)NULL;
@@ -233,7 +233,7 @@ int scsi_block_when_processing_errors(struct scsi_device *sdev)
online = scsi_device_online(sdev);
- SCSI_LOG_ERROR_RECOVERY(5, printk("%s: rtn: %d\n", __FUNCTION__,
+ SCSI_LOG_ERROR_RECOVERY(5, printk("%s: rtn: %d\n", __func__,
online));
return online;
@@ -271,7 +271,7 @@ static inline void scsi_eh_prt_fail_stats(struct Scsi_Host *shost,
SCSI_LOG_ERROR_RECOVERY(3,
sdev_printk(KERN_INFO, sdev,
"%s: cmds failed: %d, cancel: %d\n",
- __FUNCTION__, cmd_failed,
+ __func__, cmd_failed,
cmd_cancel));
cmd_cancel = 0;
cmd_failed = 0;
@@ -344,6 +344,9 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
return /* soft_error */ SUCCESS;
case ABORTED_COMMAND:
+ if (sshdr.asc == 0x10) /* DIF */
+ return SUCCESS;
+
return NEEDS_RETRY;
case NOT_READY:
case UNIT_ATTENTION:
@@ -470,7 +473,7 @@ static void scsi_eh_done(struct scsi_cmnd *scmd)
SCSI_LOG_ERROR_RECOVERY(3,
printk("%s scmd: %p result: %x\n",
- __FUNCTION__, scmd, scmd->result));
+ __func__, scmd, scmd->result));
eh_action = scmd->device->host->eh_action;
if (eh_action)
@@ -487,7 +490,7 @@ static int scsi_try_host_reset(struct scsi_cmnd *scmd)
int rtn;
SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Snd Host RST\n",
- __FUNCTION__));
+ __func__));
if (!scmd->device->host->hostt->eh_host_reset_handler)
return FAILED;
@@ -516,7 +519,7 @@ static int scsi_try_bus_reset(struct scsi_cmnd *scmd)
int rtn;
SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Snd Bus RST\n",
- __FUNCTION__));
+ __func__));
if (!scmd->device->host->hostt->eh_bus_reset_handler)
return FAILED;
@@ -664,7 +667,10 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
ses->sdb = scmd->sdb;
ses->next_rq = scmd->request->next_rq;
ses->result = scmd->result;
+ ses->underflow = scmd->underflow;
+ ses->prot_op = scmd->prot_op;
+ scmd->prot_op = SCSI_PROT_NORMAL;
scmd->cmnd = ses->eh_cmnd;
memset(scmd->cmnd, 0, BLK_MAX_CDB);
memset(&scmd->sdb, 0, sizeof(scmd->sdb));
@@ -722,6 +728,8 @@ void scsi_eh_restore_cmnd(struct scsi_cmnd* scmd, struct scsi_eh_save *ses)
scmd->sdb = ses->sdb;
scmd->request->next_rq = ses->next_rq;
scmd->result = ses->result;
+ scmd->underflow = ses->underflow;
+ scmd->prot_op = ses->prot_op;
}
EXPORT_SYMBOL(scsi_eh_restore_cmnd);
@@ -766,7 +774,7 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
SCSI_LOG_ERROR_RECOVERY(3,
printk("%s: scmd: %p, timeleft: %ld\n",
- __FUNCTION__, scmd, timeleft));
+ __func__, scmd, timeleft));
/*
* If there is time left scsi_eh_done got called, and we will
@@ -778,7 +786,7 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
rtn = scsi_eh_completed_normally(scmd);
SCSI_LOG_ERROR_RECOVERY(3,
printk("%s: scsi_eh_completed_normally %x\n",
- __FUNCTION__, rtn));
+ __func__, rtn));
switch (rtn) {
case SUCCESS:
@@ -913,7 +921,7 @@ retry_tur:
rtn = scsi_send_eh_cmnd(scmd, tur_command, 6, SENSE_TIMEOUT, 0);
SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd %p rtn %x\n",
- __FUNCTION__, scmd, rtn));
+ __func__, scmd, rtn));
switch (rtn) {
case NEEDS_RETRY:
@@ -1296,7 +1304,7 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
if (!scsi_device_online(scmd->device)) {
SCSI_LOG_ERROR_RECOVERY(5, printk("%s: device offline - report"
" as SUCCESS\n",
- __FUNCTION__));
+ __func__));
return SUCCESS;
}
@@ -1511,7 +1519,7 @@ static void scsi_restart_operations(struct Scsi_Host *shost)
* ioctls to queued block devices.
*/
SCSI_LOG_ERROR_RECOVERY(3, printk("%s: waking up host to restart\n",
- __FUNCTION__));
+ __func__));
spin_lock_irqsave(shost->host_lock, flags);
if (scsi_host_set_state(shost, SHOST_RUNNING))
@@ -1835,7 +1843,7 @@ scsi_reset_provider(struct scsi_device *dev, int flag)
*/
SCSI_LOG_ERROR_RECOVERY(3,
printk("%s: waking up host to restart after TMF\n",
- __FUNCTION__));
+ __func__));
wake_up(&shost->host_wait);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 88d1b5f44e59..ff5d56b3ee4d 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -65,7 +65,7 @@ static struct scsi_host_sg_pool scsi_sg_pools[] = {
};
#undef SP
-static struct kmem_cache *scsi_sdb_cache;
+struct kmem_cache *scsi_sdb_cache;
static void scsi_run_queue(struct request_queue *q);
@@ -787,6 +787,9 @@ void scsi_release_buffers(struct scsi_cmnd *cmd)
kmem_cache_free(scsi_sdb_cache, bidi_sdb);
cmd->request->next_rq->special = NULL;
}
+
+ if (scsi_prot_sg_count(cmd))
+ scsi_free_sgtable(cmd->prot_sdb);
}
EXPORT_SYMBOL(scsi_release_buffers);
@@ -947,9 +950,14 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
* 6-byte command.
*/
scsi_requeue_command(q, cmd);
- return;
- } else {
+ } else if (sshdr.asc == 0x10) /* DIX */
+ scsi_end_request(cmd, -EIO, this_count, 0);
+ else
scsi_end_request(cmd, -EIO, this_count, 1);
+ return;
+ case ABORTED_COMMAND:
+ if (sshdr.asc == 0x10) { /* DIF */
+ scsi_end_request(cmd, -EIO, this_count, 0);
return;
}
break;
@@ -1072,6 +1080,26 @@ int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)
goto err_exit;
}
+ if (blk_integrity_rq(cmd->request)) {
+ struct scsi_data_buffer *prot_sdb = cmd->prot_sdb;
+ int ivecs, count;
+
+ BUG_ON(prot_sdb == NULL);
+ ivecs = blk_rq_count_integrity_sg(cmd->request);
+
+ if (scsi_alloc_sgtable(prot_sdb, ivecs, gfp_mask)) {
+ error = BLKPREP_DEFER;
+ goto err_exit;
+ }
+
+ count = blk_rq_map_integrity_sg(cmd->request,
+ prot_sdb->table.sgl);
+ BUG_ON(unlikely(count > ivecs));
+
+ cmd->prot_sdb = prot_sdb;
+ cmd->prot_sdb->table.nents = count;
+ }
+
return BLKPREP_OK ;
err_exit:
@@ -1367,7 +1395,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
if (unlikely(cmd == NULL)) {
printk(KERN_CRIT "impossible request in %s.\n",
- __FUNCTION__);
+ __func__);
BUG();
}
@@ -1491,12 +1519,27 @@ static void scsi_request_fn(struct request_queue *q)
printk(KERN_CRIT "impossible request in %s.\n"
"please mail a stack trace to "
"linux-scsi@vger.kernel.org\n",
- __FUNCTION__);
+ __func__);
blk_dump_rq_flags(req, "foo");
BUG();
}
spin_lock(shost->host_lock);
+ /*
+ * We hit this when the driver is using a host wide
+ * tag map. For device level tag maps the queue_depth check
+ * in the device ready fn would prevent us from trying
+ * to allocate a tag. Since the map is a shared host resource
+ * we add the dev to the starved list so it eventually gets
+ * a run when a tag is freed.
+ */
+ if (blk_queue_tagged(q) && !blk_rq_tagged(req)) {
+ if (list_empty(&sdev->starved_entry))
+ list_add_tail(&sdev->starved_entry,
+ &shost->starved_list);
+ goto not_ready;
+ }
+
if (!scsi_host_queue_ready(q, shost, sdev))
goto not_ready;
if (scsi_target(sdev)->single_lun) {
@@ -2486,7 +2529,7 @@ void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count,
if (unlikely(i == sg_count)) {
printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, "
"elements %d\n",
- __FUNCTION__, sg_len, *offset, sg_count);
+ __func__, sg_len, *offset, sg_count);
WARN_ON(1);
return NULL;
}
diff --git a/drivers/scsi/scsi_netlink.c b/drivers/scsi/scsi_netlink.c
index 370c78cc1cb5..ae7ed9a22662 100644
--- a/drivers/scsi/scsi_netlink.c
+++ b/drivers/scsi/scsi_netlink.c
@@ -55,7 +55,7 @@ scsi_nl_rcv_msg(struct sk_buff *skb)
if ((nlh->nlmsg_len < (sizeof(*nlh) + sizeof(*hdr))) ||
(skb->len < nlh->nlmsg_len)) {
printk(KERN_WARNING "%s: discarding partial skb\n",
- __FUNCTION__);
+ __func__);
return;
}
@@ -82,7 +82,7 @@ scsi_nl_rcv_msg(struct sk_buff *skb)
if (nlh->nlmsg_len < (sizeof(*nlh) + hdr->msglen)) {
printk(KERN_WARNING "%s: discarding partial message\n",
- __FUNCTION__);
+ __func__);
return;
}
@@ -139,7 +139,7 @@ scsi_netlink_init(void)
error = netlink_register_notifier(&scsi_netlink_notifier);
if (error) {
printk(KERN_ERR "%s: register of event handler failed - %d\n",
- __FUNCTION__, error);
+ __func__, error);
return;
}
@@ -148,7 +148,7 @@ scsi_netlink_init(void)
THIS_MODULE);
if (!scsi_nl_sock) {
printk(KERN_ERR "%s: register of recieve handler failed\n",
- __FUNCTION__);
+ __func__);
netlink_unregister_notifier(&scsi_netlink_notifier);
}
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index b33e72516ef8..79f0f7511204 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -77,6 +77,7 @@ extern void scsi_exit_queue(void);
struct request_queue;
struct request;
extern int scsi_prep_fn(struct request_queue *, struct request *);
+extern struct kmem_cache *scsi_sdb_cache;
/* scsi_proc.c */
#ifdef CONFIG_SCSI_PROC_FS
diff --git a/drivers/scsi/scsi_proc.c b/drivers/scsi/scsi_proc.c
index e4a0d2f9b357..c6a904a45bf9 100644
--- a/drivers/scsi/scsi_proc.c
+++ b/drivers/scsi/scsi_proc.c
@@ -114,7 +114,7 @@ void scsi_proc_hostdir_add(struct scsi_host_template *sht)
sht->proc_dir = proc_mkdir(sht->proc_name, proc_scsi);
if (!sht->proc_dir)
printk(KERN_ERR "%s: proc_mkdir failed for %s\n",
- __FUNCTION__, sht->proc_name);
+ __func__, sht->proc_name);
else
sht->proc_dir->owner = sht->module;
}
@@ -157,7 +157,7 @@ void scsi_proc_host_add(struct Scsi_Host *shost)
sht->proc_dir, proc_scsi_read, shost);
if (!p) {
printk(KERN_ERR "%s: Failed to register host %d in"
- "%s\n", __FUNCTION__, shost->host_no,
+ "%s\n", __func__, shost->host_no,
sht->proc_name);
return;
}
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 196fe3af0d5e..84b4879cff11 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -318,7 +318,7 @@ out_device_destroy:
put_device(&sdev->sdev_gendev);
out:
if (display_failure_msg)
- printk(ALLOC_FAILURE_MSG, __FUNCTION__);
+ printk(ALLOC_FAILURE_MSG, __func__);
return NULL;
}
@@ -404,7 +404,7 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
starget = kzalloc(size, GFP_KERNEL);
if (!starget) {
- printk(KERN_ERR "%s: allocation failure\n", __FUNCTION__);
+ printk(KERN_ERR "%s: allocation failure\n", __func__);
return NULL;
}
dev = &starget->dev;
@@ -1337,7 +1337,7 @@ static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
lun_data = kmalloc(length, GFP_ATOMIC |
(sdev->host->unchecked_isa_dma ? __GFP_DMA : 0));
if (!lun_data) {
- printk(ALLOC_FAILURE_MSG, __FUNCTION__);
+ printk(ALLOC_FAILURE_MSG, __func__);
goto out;
}
@@ -1649,7 +1649,7 @@ int scsi_scan_host_selected(struct Scsi_Host *shost, unsigned int channel,
{
SCSI_LOG_SCAN_BUS(3, shost_printk (KERN_INFO, shost,
"%s: <%u:%u:%u>\n",
- __FUNCTION__, channel, id, lun));
+ __func__, channel, id, lun));
if (((channel != SCAN_WILD_CARD) && (channel > shost->max_channel)) ||
((id != SCAN_WILD_CARD) && (id >= shost->max_id)) ||
@@ -1703,7 +1703,7 @@ static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost)
return NULL;
if (shost->async_scan) {
- printk("%s called twice for host %d", __FUNCTION__,
+ printk("%s called twice for host %d", __func__,
shost->host_no);
dump_stack();
return NULL;
@@ -1757,9 +1757,10 @@ static void scsi_finish_async_scan(struct async_scan_data *data)
mutex_lock(&shost->scan_mutex);
if (!shost->async_scan) {
- printk("%s called twice for host %d", __FUNCTION__,
+ printk("%s called twice for host %d", __func__,
shost->host_no);
dump_stack();
+ mutex_unlock(&shost->scan_mutex);
return;
}
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index b6e561059779..ab3c71869be5 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -249,6 +249,8 @@ shost_rd_attr(cmd_per_lun, "%hd\n");
shost_rd_attr(can_queue, "%hd\n");
shost_rd_attr(sg_tablesize, "%hu\n");
shost_rd_attr(unchecked_isa_dma, "%d\n");
+shost_rd_attr(prot_capabilities, "%u\n");
+shost_rd_attr(prot_guard_type, "%hd\n");
shost_rd_attr2(proc_name, hostt->proc_name, "%s\n");
static struct attribute *scsi_sysfs_shost_attrs[] = {
@@ -263,6 +265,8 @@ static struct attribute *scsi_sysfs_shost_attrs[] = {
&dev_attr_hstate.attr,
&dev_attr_supported_mode.attr,
&dev_attr_active_mode.attr,
+ &dev_attr_prot_capabilities.attr,
+ &dev_attr_prot_guard_type.attr,
NULL
};
diff --git a/drivers/scsi/scsi_tgt_priv.h b/drivers/scsi/scsi_tgt_priv.h
index cb92888948f9..fe4c62177f78 100644
--- a/drivers/scsi/scsi_tgt_priv.h
+++ b/drivers/scsi/scsi_tgt_priv.h
@@ -6,7 +6,7 @@ struct task_struct;
/* tmp - will replace with SCSI logging stuff */
#define eprintk(fmt, args...) \
do { \
- printk("%s(%d) " fmt, __FUNCTION__, __LINE__, ##args); \
+ printk("%s(%d) " fmt, __func__, __LINE__, ##args); \
} while (0)
#define dprintk(fmt, args...)
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index a272b9a2c869..56823fd1fb84 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -571,7 +571,7 @@ send_fail:
name = get_fc_host_event_code_name(event_code);
printk(KERN_WARNING
"%s: Dropped Event : host %d %s data 0x%08x - err %d\n",
- __FUNCTION__, shost->host_no,
+ __func__, shost->host_no,
(name) ? name : "<unknown>", event_data, err);
return;
}
@@ -644,7 +644,7 @@ send_vendor_fail_skb:
send_vendor_fail:
printk(KERN_WARNING
"%s: Dropped Event : host %d vendor_unique - err %d\n",
- __FUNCTION__, shost->host_no, err);
+ __func__, shost->host_no, err);
return;
}
EXPORT_SYMBOL(fc_host_post_vendor_event);
@@ -2464,7 +2464,7 @@ fc_rport_create(struct Scsi_Host *shost, int channel,
size = (sizeof(struct fc_rport) + fci->f->dd_fcrport_size);
rport = kzalloc(size, GFP_KERNEL);
if (unlikely(!rport)) {
- printk(KERN_ERR "%s: allocation failure\n", __FUNCTION__);
+ printk(KERN_ERR "%s: allocation failure\n", __func__);
return NULL;
}
@@ -3137,7 +3137,7 @@ fc_vport_create(struct Scsi_Host *shost, int channel, struct device *pdev,
size = (sizeof(struct fc_vport) + fci->f->dd_fcvport_size);
vport = kzalloc(size, GFP_KERNEL);
if (unlikely(!vport)) {
- printk(KERN_ERR "%s: allocation failure\n", __FUNCTION__);
+ printk(KERN_ERR "%s: allocation failure\n", __func__);
return -ENOMEM;
}
@@ -3201,7 +3201,7 @@ fc_vport_create(struct Scsi_Host *shost, int channel, struct device *pdev,
printk(KERN_ERR
"%s: Cannot create vport symlinks for "
"%s, err=%d\n",
- __FUNCTION__, dev->bus_id, error);
+ __func__, dev->bus_id, error);
}
spin_lock_irqsave(shost->host_lock, flags);
vport->flags &= ~FC_VPORT_CREATING;
@@ -3314,7 +3314,7 @@ fc_vport_sched_delete(struct work_struct *work)
if (stat)
dev_printk(KERN_ERR, vport->dev.parent,
"%s: %s could not be deleted created via "
- "shost%d channel %d - error %d\n", __FUNCTION__,
+ "shost%d channel %d - error %d\n", __func__,
vport->dev.bus_id, vport->shost->host_no,
vport->channel, stat);
}
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index f4461d35ffb9..366609386be1 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -779,7 +779,7 @@ static void sas_port_create_link(struct sas_port *port,
return;
err:
printk(KERN_ERR "%s: Cannot create port links, err=%d\n",
- __FUNCTION__, res);
+ __func__, res);
}
static void sas_port_delete_link(struct sas_port *port,
@@ -1029,7 +1029,7 @@ void sas_port_mark_backlink(struct sas_port *port)
return;
err:
printk(KERN_ERR "%s: Cannot create port backlink, err=%d\n",
- __FUNCTION__, res);
+ __func__, res);
}
EXPORT_SYMBOL(sas_port_mark_backlink);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 0c63947d8a9d..e5e7d7856454 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -99,8 +99,7 @@ static void scsi_disk_release(struct device *cdev);
static void sd_print_sense_hdr(struct scsi_disk *, struct scsi_sense_hdr *);
static void sd_print_result(struct scsi_disk *, int);
-static DEFINE_IDR(sd_index_idr);
-static DEFINE_SPINLOCK(sd_index_lock);
+static DEFINE_IDA(sd_index_ida);
/* This semaphore is used to mediate the 0->1 reference get in the
* face of object destruction (i.e. we can't allow a get on an
@@ -234,6 +233,24 @@ sd_show_allow_restart(struct device *dev, struct device_attribute *attr,
return snprintf(buf, 40, "%d\n", sdkp->device->allow_restart);
}
+static ssize_t
+sd_show_protection_type(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_disk *sdkp = to_scsi_disk(dev);
+
+ return snprintf(buf, 20, "%u\n", sdkp->protection_type);
+}
+
+static ssize_t
+sd_show_app_tag_own(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_disk *sdkp = to_scsi_disk(dev);
+
+ return snprintf(buf, 20, "%u\n", sdkp->ATO);
+}
+
static struct device_attribute sd_disk_attrs[] = {
__ATTR(cache_type, S_IRUGO|S_IWUSR, sd_show_cache_type,
sd_store_cache_type),
@@ -242,6 +259,8 @@ static struct device_attribute sd_disk_attrs[] = {
sd_store_allow_restart),
__ATTR(manage_start_stop, S_IRUGO|S_IWUSR, sd_show_manage_start_stop,
sd_store_manage_start_stop),
+ __ATTR(protection_type, S_IRUGO, sd_show_protection_type, NULL),
+ __ATTR(app_tag_own, S_IRUGO, sd_show_app_tag_own, NULL),
__ATTR_NULL,
};
@@ -354,7 +373,9 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
struct scsi_cmnd *SCpnt;
struct scsi_device *sdp = q->queuedata;
struct gendisk *disk = rq->rq_disk;
+ struct scsi_disk *sdkp;
sector_t block = rq->sector;
+ sector_t threshold;
unsigned int this_count = rq->nr_sectors;
unsigned int timeout = sdp->timeout;
int ret;
@@ -370,6 +391,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
if (ret != BLKPREP_OK)
goto out;
SCpnt = rq->special;
+ sdkp = scsi_disk(disk);
/* from here on until we're complete, any goto out
* is used for a killable error condition */
@@ -401,13 +423,21 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
}
/*
- * Some devices (some sdcards for one) don't like it if the
- * last sector gets read in a larger then 1 sector read.
+ * Some SD card readers can't handle multi-sector accesses which touch
+ * the last one or two hardware sectors. Split accesses as needed.
*/
- if (unlikely(sdp->last_sector_bug &&
- rq->nr_sectors > sdp->sector_size / 512 &&
- block + this_count == get_capacity(disk)))
- this_count -= sdp->sector_size / 512;
+ threshold = get_capacity(disk) - SD_LAST_BUGGY_SECTORS *
+ (sdp->sector_size / 512);
+
+ if (unlikely(sdp->last_sector_bug && block + this_count > threshold)) {
+ if (block < threshold) {
+ /* Access up to the threshold but not beyond */
+ this_count = threshold - block;
+ } else {
+ /* Access only a single hardware sector */
+ this_count = sdp->sector_size / 512;
+ }
+ }
SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt, "block=%llu\n",
(unsigned long long)block));
@@ -459,6 +489,11 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
}
SCpnt->cmnd[0] = WRITE_6;
SCpnt->sc_data_direction = DMA_TO_DEVICE;
+
+ if (blk_integrity_rq(rq) &&
+ sd_dif_prepare(rq, block, sdp->sector_size) == -EIO)
+ goto out;
+
} else if (rq_data_dir(rq) == READ) {
SCpnt->cmnd[0] = READ_6;
SCpnt->sc_data_direction = DMA_FROM_DEVICE;
@@ -473,8 +508,12 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
"writing" : "reading", this_count,
rq->nr_sectors));
- SCpnt->cmnd[1] = 0;
-
+ /* Set RDPROTECT/WRPROTECT if disk is formatted with DIF */
+ if (scsi_host_dif_capable(sdp->host, sdkp->protection_type))
+ SCpnt->cmnd[1] = 1 << 5;
+ else
+ SCpnt->cmnd[1] = 0;
+
if (block > 0xffffffff) {
SCpnt->cmnd[0] += READ_16 - READ_6;
SCpnt->cmnd[1] |= blk_fua_rq(rq) ? 0x8 : 0;
@@ -492,6 +531,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
SCpnt->cmnd[13] = (unsigned char) this_count & 0xff;
SCpnt->cmnd[14] = SCpnt->cmnd[15] = 0;
} else if ((this_count > 0xff) || (block > 0x1fffff) ||
+ scsi_device_protection(SCpnt->device) ||
SCpnt->device->use_10_for_rw) {
if (this_count > 0xffff)
this_count = 0xffff;
@@ -526,6 +566,10 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
}
SCpnt->sdb.length = this_count * sdp->sector_size;
+ /* If DIF or DIX is enabled, tell HBA how to handle request */
+ if (sdkp->protection_type || scsi_prot_sg_count(SCpnt))
+ sd_dif_op(SCpnt, sdkp->protection_type, scsi_prot_sg_count(SCpnt));
+
/*
* We shouldn't disconnect in the middle of a sector, so with a dumb
* host adapter, it's safe to assume that we can at least transfer
@@ -920,6 +964,48 @@ static struct block_device_operations sd_fops = {
.revalidate_disk = sd_revalidate_disk,
};
+static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
+{
+ u64 start_lba = scmd->request->sector;
+ u64 end_lba = scmd->request->sector + (scsi_bufflen(scmd) / 512);
+ u64 bad_lba;
+ int info_valid;
+
+ if (!blk_fs_request(scmd->request))
+ return 0;
+
+ info_valid = scsi_get_sense_info_fld(scmd->sense_buffer,
+ SCSI_SENSE_BUFFERSIZE,
+ &bad_lba);
+ if (!info_valid)
+ return 0;
+
+ if (scsi_bufflen(scmd) <= scmd->device->sector_size)
+ return 0;
+
+ if (scmd->device->sector_size < 512) {
+ /* only legitimate sector_size here is 256 */
+ start_lba <<= 1;
+ end_lba <<= 1;
+ } else {
+ /* be careful ... don't want any overflows */
+ u64 factor = scmd->device->sector_size / 512;
+ do_div(start_lba, factor);
+ do_div(end_lba, factor);
+ }
+
+ /* The bad lba was reported incorrectly, we have no idea where
+ * the error is.
+ */
+ if (bad_lba < start_lba || bad_lba >= end_lba)
+ return 0;
+
+ /* This computation should always be done in terms of
+ * the resolution of the device's medium.
+ */
+ return (bad_lba - start_lba) * scmd->device->sector_size;
+}
+
/**
* sd_done - bottom half handler: called when the lower level
* driver has completed (successfully or otherwise) a scsi command.
@@ -930,15 +1016,10 @@ static struct block_device_operations sd_fops = {
static int sd_done(struct scsi_cmnd *SCpnt)
{
int result = SCpnt->result;
- unsigned int xfer_size = scsi_bufflen(SCpnt);
- unsigned int good_bytes = result ? 0 : xfer_size;
- u64 start_lba = SCpnt->request->sector;
- u64 end_lba = SCpnt->request->sector + (xfer_size / 512);
- u64 bad_lba;
+ unsigned int good_bytes = result ? 0 : scsi_bufflen(SCpnt);
struct scsi_sense_hdr sshdr;
int sense_valid = 0;
int sense_deferred = 0;
- int info_valid;
if (result) {
sense_valid = scsi_command_normalize_sense(SCpnt, &sshdr);
@@ -963,36 +1044,7 @@ static int sd_done(struct scsi_cmnd *SCpnt)
switch (sshdr.sense_key) {
case HARDWARE_ERROR:
case MEDIUM_ERROR:
- if (!blk_fs_request(SCpnt->request))
- goto out;
- info_valid = scsi_get_sense_info_fld(SCpnt->sense_buffer,
- SCSI_SENSE_BUFFERSIZE,
- &bad_lba);
- if (!info_valid)
- goto out;
- if (xfer_size <= SCpnt->device->sector_size)
- goto out;
- if (SCpnt->device->sector_size < 512) {
- /* only legitimate sector_size here is 256 */
- start_lba <<= 1;
- end_lba <<= 1;
- } else {
- /* be careful ... don't want any overflows */
- u64 factor = SCpnt->device->sector_size / 512;
- do_div(start_lba, factor);
- do_div(end_lba, factor);
- }
-
- if (bad_lba < start_lba || bad_lba >= end_lba)
- /* the bad lba was reported incorrectly, we have
- * no idea where the error is
- */
- goto out;
-
- /* This computation should always be done in terms of
- * the resolution of the device's medium.
- */
- good_bytes = (bad_lba - start_lba)*SCpnt->device->sector_size;
+ good_bytes = sd_completed_bytes(SCpnt);
break;
case RECOVERED_ERROR:
case NO_SENSE:
@@ -1002,10 +1054,23 @@ static int sd_done(struct scsi_cmnd *SCpnt)
scsi_print_sense("sd", SCpnt);
SCpnt->result = 0;
memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
- good_bytes = xfer_size;
+ good_bytes = scsi_bufflen(SCpnt);
+ break;
+ case ABORTED_COMMAND:
+ if (sshdr.asc == 0x10) { /* DIF: Disk detected corruption */
+ scsi_print_result(SCpnt);
+ scsi_print_sense("sd", SCpnt);
+ good_bytes = sd_completed_bytes(SCpnt);
+ }
break;
case ILLEGAL_REQUEST:
- if (SCpnt->device->use_10_for_rw &&
+ if (sshdr.asc == 0x10) { /* DIX: HBA detected corruption */
+ scsi_print_result(SCpnt);
+ scsi_print_sense("sd", SCpnt);
+ good_bytes = sd_completed_bytes(SCpnt);
+ }
+ if (!scsi_device_protection(SCpnt->device) &&
+ SCpnt->device->use_10_for_rw &&
(SCpnt->cmnd[0] == READ_10 ||
SCpnt->cmnd[0] == WRITE_10))
SCpnt->device->use_10_for_rw = 0;
@@ -1018,6 +1083,9 @@ static int sd_done(struct scsi_cmnd *SCpnt)
break;
}
out:
+ if (rq_data_dir(SCpnt->request) == READ && scsi_prot_sg_count(SCpnt))
+ sd_dif_complete(SCpnt, good_bytes);
+
return good_bytes;
}
@@ -1165,6 +1233,49 @@ sd_spinup_disk(struct scsi_disk *sdkp)
}
}
+
+/*
+ * Determine whether disk supports Data Integrity Field.
+ */
+void sd_read_protection_type(struct scsi_disk *sdkp, unsigned char *buffer)
+{
+ struct scsi_device *sdp = sdkp->device;
+ u8 type;
+
+ if (scsi_device_protection(sdp) == 0 || (buffer[12] & 1) == 0)
+ type = 0;
+ else
+ type = ((buffer[12] >> 1) & 7) + 1; /* P_TYPE 0 = Type 1 */
+
+ switch (type) {
+ case SD_DIF_TYPE0_PROTECTION:
+ sdkp->protection_type = 0;
+ break;
+
+ case SD_DIF_TYPE1_PROTECTION:
+ case SD_DIF_TYPE3_PROTECTION:
+ sdkp->protection_type = type;
+ break;
+
+ case SD_DIF_TYPE2_PROTECTION:
+ sd_printk(KERN_ERR, sdkp, "formatted with DIF Type 2 " \
+ "protection which is currently unsupported. " \
+ "Disabling disk!\n");
+ goto disable;
+
+ default:
+ sd_printk(KERN_ERR, sdkp, "formatted with unknown " \
+ "protection type %d. Disabling disk!\n", type);
+ goto disable;
+ }
+
+ return;
+
+disable:
+ sdkp->protection_type = 0;
+ sdkp->capacity = 0;
+}
+
/*
* read disk capacity
*/
@@ -1174,7 +1285,8 @@ sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer)
unsigned char cmd[16];
int the_result, retries;
int sector_size = 0;
- int longrc = 0;
+ /* Force READ CAPACITY(16) when PROTECT=1 */
+ int longrc = scsi_device_protection(sdkp->device) ? 1 : 0;
struct scsi_sense_hdr sshdr;
int sense_valid = 0;
struct scsi_device *sdp = sdkp->device;
@@ -1186,8 +1298,8 @@ repeat:
memset((void *) cmd, 0, 16);
cmd[0] = SERVICE_ACTION_IN;
cmd[1] = SAI_READ_CAPACITY_16;
- cmd[13] = 12;
- memset((void *) buffer, 0, 12);
+ cmd[13] = 13;
+ memset((void *) buffer, 0, 13);
} else {
cmd[0] = READ_CAPACITY;
memset((void *) &cmd[1], 0, 9);
@@ -1195,7 +1307,7 @@ repeat:
}
the_result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE,
- buffer, longrc ? 12 : 8, &sshdr,
+ buffer, longrc ? 13 : 8, &sshdr,
SD_TIMEOUT, SD_MAX_RETRIES);
if (media_not_present(sdkp, &sshdr))
@@ -1270,6 +1382,8 @@ repeat:
sector_size = (buffer[8] << 24) |
(buffer[9] << 16) | (buffer[10] << 8) | buffer[11];
+
+ sd_read_protection_type(sdkp, buffer);
}
/* Some devices return the total number of sectors, not the
@@ -1531,6 +1645,52 @@ defaults:
sdkp->DPOFUA = 0;
}
+/*
+ * The ATO bit indicates whether the DIF application tag is available
+ * for use by the operating system.
+ */
+void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer)
+{
+ int res, offset;
+ struct scsi_device *sdp = sdkp->device;
+ struct scsi_mode_data data;
+ struct scsi_sense_hdr sshdr;
+
+ if (sdp->type != TYPE_DISK)
+ return;
+
+ if (sdkp->protection_type == 0)
+ return;
+
+ res = scsi_mode_sense(sdp, 1, 0x0a, buffer, 36, SD_TIMEOUT,
+ SD_MAX_RETRIES, &data, &sshdr);
+
+ if (!scsi_status_is_good(res) || !data.header_length ||
+ data.length < 6) {
+ sd_printk(KERN_WARNING, sdkp,
+ "getting Control mode page failed, assume no ATO\n");
+
+ if (scsi_sense_valid(&sshdr))
+ sd_print_sense_hdr(sdkp, &sshdr);
+
+ return;
+ }
+
+ offset = data.header_length + data.block_descriptor_length;
+
+ if ((buffer[offset] & 0x3f) != 0x0a) {
+ sd_printk(KERN_ERR, sdkp, "ATO Got wrong page\n");
+ return;
+ }
+
+ if ((buffer[offset + 5] & 0x80) == 0)
+ return;
+
+ sdkp->ATO = 1;
+
+ return;
+}
+
/**
* sd_revalidate_disk - called the first time a new disk is seen,
* performs disk spin up, read_capacity, etc.
@@ -1567,6 +1727,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
sdkp->write_prot = 0;
sdkp->WCE = 0;
sdkp->RCD = 0;
+ sdkp->ATO = 0;
sd_spinup_disk(sdkp);
@@ -1578,6 +1739,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
sd_read_capacity(sdkp, buffer);
sd_read_write_protect_flag(sdkp, buffer);
sd_read_cache_type(sdkp, buffer);
+ sd_read_app_tag_own(sdkp, buffer);
}
/*
@@ -1643,18 +1805,20 @@ static int sd_probe(struct device *dev)
if (!gd)
goto out_free;
- if (!idr_pre_get(&sd_index_idr, GFP_KERNEL))
- goto out_put;
+ do {
+ if (!ida_pre_get(&sd_index_ida, GFP_KERNEL))
+ goto out_put;
- spin_lock(&sd_index_lock);
- error = idr_get_new(&sd_index_idr, NULL, &index);
- spin_unlock(&sd_index_lock);
+ error = ida_get_new(&sd_index_ida, &index);
+ } while (error == -EAGAIN);
- if (index >= SD_MAX_DISKS)
- error = -EBUSY;
if (error)
goto out_put;
+ error = -EBUSY;
+ if (index >= SD_MAX_DISKS)
+ goto out_free_index;
+
sdkp->device = sdp;
sdkp->driver = &sd_template;
sdkp->disk = gd;
@@ -1675,7 +1839,7 @@ static int sd_probe(struct device *dev)
strncpy(sdkp->dev.bus_id, sdp->sdev_gendev.bus_id, BUS_ID_SIZE);
if (device_add(&sdkp->dev))
- goto out_put;
+ goto out_free_index;
get_device(&sdp->sdev_gendev);
@@ -1711,12 +1875,15 @@ static int sd_probe(struct device *dev)
dev_set_drvdata(dev, sdkp);
add_disk(gd);
+ sd_dif_config_host(sdkp);
sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n",
sdp->removable ? "removable " : "");
return 0;
+ out_free_index:
+ ida_remove(&sd_index_ida, index);
out_put:
put_disk(gd);
out_free:
@@ -1766,9 +1933,7 @@ static void scsi_disk_release(struct device *dev)
struct scsi_disk *sdkp = to_scsi_disk(dev);
struct gendisk *disk = sdkp->disk;
- spin_lock(&sd_index_lock);
- idr_remove(&sd_index_idr, sdkp->index);
- spin_unlock(&sd_index_lock);
+ ida_remove(&sd_index_ida, sdkp->index);
disk->private_data = NULL;
put_disk(disk);
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 03a3d45cfa42..95b9f06534d5 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -31,6 +31,12 @@
*/
#define SD_BUF_SIZE 512
+/*
+ * Number of sectors at the end of the device to avoid multi-sector
+ * accesses to in the case of last_sector_bug
+ */
+#define SD_LAST_BUGGY_SECTORS 8
+
struct scsi_disk {
struct scsi_driver *driver; /* always &sd_template */
struct scsi_device *device;
@@ -41,7 +47,9 @@ struct scsi_disk {
u32 index;
u8 media_present;
u8 write_prot;
+ u8 protection_type;/* Data Integrity Field */
unsigned previous_state : 1;
+ unsigned ATO : 1; /* state of disk ATO bit */
unsigned WCE : 1; /* state of disk WCE bit */
unsigned RCD : 1; /* state of disk RCD bit, unused */
unsigned DPOFUA : 1; /* state of disk DPOFUA bit */
@@ -59,4 +67,50 @@ static inline struct scsi_disk *scsi_disk(struct gendisk *disk)
(sdsk)->disk->disk_name, ##a) : \
sdev_printk(prefix, (sdsk)->device, fmt, ##a)
+/*
+ * A DIF-capable target device can be formatted with different
+ * protection schemes. Currently 0 through 3 are defined:
+ *
+ * Type 0 is regular (unprotected) I/O
+ *
+ * Type 1 defines the contents of the guard and reference tags
+ *
+ * Type 2 defines the contents of the guard and reference tags and
+ * uses 32-byte commands to seed the latter
+ *
+ * Type 3 defines the contents of the guard tag only
+ */
+
+enum sd_dif_target_protection_types {
+ SD_DIF_TYPE0_PROTECTION = 0x0,
+ SD_DIF_TYPE1_PROTECTION = 0x1,
+ SD_DIF_TYPE2_PROTECTION = 0x2,
+ SD_DIF_TYPE3_PROTECTION = 0x3,
+};
+
+/*
+ * Data Integrity Field tuple.
+ */
+struct sd_dif_tuple {
+ __be16 guard_tag; /* Checksum */
+ __be16 app_tag; /* Opaque storage */
+ __be32 ref_tag; /* Target LBA or indirect LBA */
+};
+
+#if defined(CONFIG_BLK_DEV_INTEGRITY)
+
+extern void sd_dif_op(struct scsi_cmnd *, unsigned int, unsigned int);
+extern void sd_dif_config_host(struct scsi_disk *);
+extern int sd_dif_prepare(struct request *rq, sector_t, unsigned int);
+extern void sd_dif_complete(struct scsi_cmnd *, unsigned int);
+
+#else /* CONFIG_BLK_DEV_INTEGRITY */
+
+#define sd_dif_op(a, b, c) do { } while (0)
+#define sd_dif_config_host(a) do { } while (0)
+#define sd_dif_prepare(a, b, c) (0)
+#define sd_dif_complete(a, b) (0)
+
+#endif /* CONFIG_BLK_DEV_INTEGRITY */
+
#endif /* _SCSI_DISK_H */
diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c
new file mode 100644
index 000000000000..4d17f3d35aac
--- /dev/null
+++ b/drivers/scsi/sd_dif.c
@@ -0,0 +1,538 @@
+/*
+ * sd_dif.c - SCSI Data Integrity Field
+ *
+ * Copyright (C) 2007, 2008 Oracle Corporation
+ * Written by: Martin K. Petersen <martin.petersen@oracle.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
+ * USA.
+ *
+ */
+
+#include <linux/blkdev.h>
+#include <linux/crc-t10dif.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_driver.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_ioctl.h>
+#include <scsi/scsicam.h>
+
+#include <net/checksum.h>
+
+#include "sd.h"
+
+typedef __u16 (csum_fn) (void *, unsigned int);
+
+static __u16 sd_dif_crc_fn(void *data, unsigned int len)
+{
+ return cpu_to_be16(crc_t10dif(data, len));
+}
+
+static __u16 sd_dif_ip_fn(void *data, unsigned int len)
+{
+ return ip_compute_csum(data, len);
+}
+
+/*
+ * Type 1 and Type 2 protection use the same format: 16 bit guard tag,
+ * 16 bit app tag, 32 bit reference tag.
+ */
+static void sd_dif_type1_generate(struct blk_integrity_exchg *bix, csum_fn *fn)
+{
+ void *buf = bix->data_buf;
+ struct sd_dif_tuple *sdt = bix->prot_buf;
+ sector_t sector = bix->sector;
+ unsigned int i;
+
+ for (i = 0 ; i < bix->data_size ; i += bix->sector_size, sdt++) {
+ sdt->guard_tag = fn(buf, bix->sector_size);
+ sdt->ref_tag = cpu_to_be32(sector & 0xffffffff);
+ sdt->app_tag = 0;
+
+ buf += bix->sector_size;
+ sector++;
+ }
+}
+
+static void sd_dif_type1_generate_crc(struct blk_integrity_exchg *bix)
+{
+ sd_dif_type1_generate(bix, sd_dif_crc_fn);
+}
+
+static void sd_dif_type1_generate_ip(struct blk_integrity_exchg *bix)
+{
+ sd_dif_type1_generate(bix, sd_dif_ip_fn);
+}
+
+static int sd_dif_type1_verify(struct blk_integrity_exchg *bix, csum_fn *fn)
+{
+ void *buf = bix->data_buf;
+ struct sd_dif_tuple *sdt = bix->prot_buf;
+ sector_t sector = bix->sector;
+ unsigned int i;
+ __u16 csum;
+
+ for (i = 0 ; i < bix->data_size ; i += bix->sector_size, sdt++) {
+ /* Unwritten sectors */
+ if (sdt->app_tag == 0xffff)
+ return 0;
+
+ /* Bad ref tag received from disk */
+ if (sdt->ref_tag == 0xffffffff) {
+ printk(KERN_ERR
+ "%s: bad phys ref tag on sector %lu\n",
+ bix->disk_name, (unsigned long)sector);
+ return -EIO;
+ }
+
+ if (be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
+ printk(KERN_ERR
+ "%s: ref tag error on sector %lu (rcvd %u)\n",
+ bix->disk_name, (unsigned long)sector,
+ be32_to_cpu(sdt->ref_tag));
+ return -EIO;
+ }
+
+ csum = fn(buf, bix->sector_size);
+
+ if (sdt->guard_tag != csum) {
+ printk(KERN_ERR "%s: guard tag error on sector %lu " \
+ "(rcvd %04x, data %04x)\n", bix->disk_name,
+ (unsigned long)sector,
+ be16_to_cpu(sdt->guard_tag), be16_to_cpu(csum));
+ return -EIO;
+ }
+
+ buf += bix->sector_size;
+ sector++;
+ }
+
+ return 0;
+}
+
+static int sd_dif_type1_verify_crc(struct blk_integrity_exchg *bix)
+{
+ return sd_dif_type1_verify(bix, sd_dif_crc_fn);
+}
+
+static int sd_dif_type1_verify_ip(struct blk_integrity_exchg *bix)
+{
+ return sd_dif_type1_verify(bix, sd_dif_ip_fn);
+}
+
+/*
+ * Functions for interleaving and deinterleaving application tags
+ */
+static void sd_dif_type1_set_tag(void *prot, void *tag_buf, unsigned int sectors)
+{
+ struct sd_dif_tuple *sdt = prot;
+ char *tag = tag_buf;
+ unsigned int i, j;
+
+ for (i = 0, j = 0 ; i < sectors ; i++, j += 2, sdt++) {
+ sdt->app_tag = tag[j] << 8 | tag[j+1];
+ BUG_ON(sdt->app_tag == 0xffff);
+ }
+}
+
+static void sd_dif_type1_get_tag(void *prot, void *tag_buf, unsigned int sectors)
+{
+ struct sd_dif_tuple *sdt = prot;
+ char *tag = tag_buf;
+ unsigned int i, j;
+
+ for (i = 0, j = 0 ; i < sectors ; i++, j += 2, sdt++) {
+ tag[j] = (sdt->app_tag & 0xff00) >> 8;
+ tag[j+1] = sdt->app_tag & 0xff;
+ }
+}
+
+static struct blk_integrity dif_type1_integrity_crc = {
+ .name = "T10-DIF-TYPE1-CRC",
+ .generate_fn = sd_dif_type1_generate_crc,
+ .verify_fn = sd_dif_type1_verify_crc,
+ .get_tag_fn = sd_dif_type1_get_tag,
+ .set_tag_fn = sd_dif_type1_set_tag,
+ .tuple_size = sizeof(struct sd_dif_tuple),
+ .tag_size = 0,
+};
+
+static struct blk_integrity dif_type1_integrity_ip = {
+ .name = "T10-DIF-TYPE1-IP",
+ .generate_fn = sd_dif_type1_generate_ip,
+ .verify_fn = sd_dif_type1_verify_ip,
+ .get_tag_fn = sd_dif_type1_get_tag,
+ .set_tag_fn = sd_dif_type1_set_tag,
+ .tuple_size = sizeof(struct sd_dif_tuple),
+ .tag_size = 0,
+};
+
+
+/*
+ * Type 3 protection has a 16-bit guard tag and 16 + 32 bits of opaque
+ * tag space.
+ */
+static void sd_dif_type3_generate(struct blk_integrity_exchg *bix, csum_fn *fn)
+{
+ void *buf = bix->data_buf;
+ struct sd_dif_tuple *sdt = bix->prot_buf;
+ unsigned int i;
+
+ for (i = 0 ; i < bix->data_size ; i += bix->sector_size, sdt++) {
+ sdt->guard_tag = fn(buf, bix->sector_size);
+ sdt->ref_tag = 0;
+ sdt->app_tag = 0;
+
+ buf += bix->sector_size;
+ }
+}
+
+static void sd_dif_type3_generate_crc(struct blk_integrity_exchg *bix)
+{
+ sd_dif_type3_generate(bix, sd_dif_crc_fn);
+}
+
+static void sd_dif_type3_generate_ip(struct blk_integrity_exchg *bix)
+{
+ sd_dif_type3_generate(bix, sd_dif_ip_fn);
+}
+
+static int sd_dif_type3_verify(struct blk_integrity_exchg *bix, csum_fn *fn)
+{
+ void *buf = bix->data_buf;
+ struct sd_dif_tuple *sdt = bix->prot_buf;
+ sector_t sector = bix->sector;
+ unsigned int i;
+ __u16 csum;
+
+ for (i = 0 ; i < bix->data_size ; i += bix->sector_size, sdt++) {
+ /* Unwritten sectors */
+ if (sdt->app_tag == 0xffff && sdt->ref_tag == 0xffffffff)
+ return 0;
+
+ csum = fn(buf, bix->sector_size);
+
+ if (sdt->guard_tag != csum) {
+ printk(KERN_ERR "%s: guard tag error on sector %lu " \
+ "(rcvd %04x, data %04x)\n", bix->disk_name,
+ (unsigned long)sector,
+ be16_to_cpu(sdt->guard_tag), be16_to_cpu(csum));
+ return -EIO;
+ }
+
+ buf += bix->sector_size;
+ sector++;
+ }
+
+ return 0;
+}
+
+static int sd_dif_type3_verify_crc(struct blk_integrity_exchg *bix)
+{
+ return sd_dif_type3_verify(bix, sd_dif_crc_fn);
+}
+
+static int sd_dif_type3_verify_ip(struct blk_integrity_exchg *bix)
+{
+ return sd_dif_type3_verify(bix, sd_dif_ip_fn);
+}
+
+static void sd_dif_type3_set_tag(void *prot, void *tag_buf, unsigned int sectors)
+{
+ struct sd_dif_tuple *sdt = prot;
+ char *tag = tag_buf;
+ unsigned int i, j;
+
+ for (i = 0, j = 0 ; i < sectors ; i++, j += 6, sdt++) {
+ sdt->app_tag = tag[j] << 8 | tag[j+1];
+ sdt->ref_tag = tag[j+2] << 24 | tag[j+3] << 16 |
+ tag[j+4] << 8 | tag[j+5];
+ }
+}
+
+static void sd_dif_type3_get_tag(void *prot, void *tag_buf, unsigned int sectors)
+{
+ struct sd_dif_tuple *sdt = prot;
+ char *tag = tag_buf;
+ unsigned int i, j;
+
+ for (i = 0, j = 0 ; i < sectors ; i++, j += 2, sdt++) {
+ tag[j] = (sdt->app_tag & 0xff00) >> 8;
+ tag[j+1] = sdt->app_tag & 0xff;
+ tag[j+2] = (sdt->ref_tag & 0xff000000) >> 24;
+ tag[j+3] = (sdt->ref_tag & 0xff0000) >> 16;
+ tag[j+4] = (sdt->ref_tag & 0xff00) >> 8;
+ tag[j+5] = sdt->ref_tag & 0xff;
+ BUG_ON(sdt->app_tag == 0xffff || sdt->ref_tag == 0xffffffff);
+ }
+}
+
+static struct blk_integrity dif_type3_integrity_crc = {
+ .name = "T10-DIF-TYPE3-CRC",
+ .generate_fn = sd_dif_type3_generate_crc,
+ .verify_fn = sd_dif_type3_verify_crc,
+ .get_tag_fn = sd_dif_type3_get_tag,
+ .set_tag_fn = sd_dif_type3_set_tag,
+ .tuple_size = sizeof(struct sd_dif_tuple),
+ .tag_size = 0,
+};
+
+static struct blk_integrity dif_type3_integrity_ip = {
+ .name = "T10-DIF-TYPE3-IP",
+ .generate_fn = sd_dif_type3_generate_ip,
+ .verify_fn = sd_dif_type3_verify_ip,
+ .get_tag_fn = sd_dif_type3_get_tag,
+ .set_tag_fn = sd_dif_type3_set_tag,
+ .tuple_size = sizeof(struct sd_dif_tuple),
+ .tag_size = 0,
+};
+
+/*
+ * Configure exchange of protection information between OS and HBA.
+ */
+void sd_dif_config_host(struct scsi_disk *sdkp)
+{
+ struct scsi_device *sdp = sdkp->device;
+ struct gendisk *disk = sdkp->disk;
+ u8 type = sdkp->protection_type;
+
+ /* If this HBA doesn't support DIX, resort to normal I/O or DIF */
+ if (scsi_host_dix_capable(sdp->host, type) == 0) {
+
+ if (type == SD_DIF_TYPE0_PROTECTION)
+ return;
+
+ if (scsi_host_dif_capable(sdp->host, type) == 0) {
+ sd_printk(KERN_INFO, sdkp, "Type %d protection " \
+ "unsupported by HBA. Disabling DIF.\n", type);
+ sdkp->protection_type = 0;
+ return;
+ }
+
+ sd_printk(KERN_INFO, sdkp, "Enabling DIF Type %d protection\n",
+ type);
+
+ return;
+ }
+
+ /* Enable DMA of protection information */
+ if (scsi_host_get_guard(sdkp->device->host) & SHOST_DIX_GUARD_IP)
+ if (type == SD_DIF_TYPE3_PROTECTION)
+ blk_integrity_register(disk, &dif_type3_integrity_ip);
+ else
+ blk_integrity_register(disk, &dif_type1_integrity_ip);
+ else
+ if (type == SD_DIF_TYPE3_PROTECTION)
+ blk_integrity_register(disk, &dif_type3_integrity_crc);
+ else
+ blk_integrity_register(disk, &dif_type1_integrity_crc);
+
+ sd_printk(KERN_INFO, sdkp,
+ "Enabling %s integrity protection\n", disk->integrity->name);
+
+ /* Signal to block layer that we support sector tagging */
+ if (type && sdkp->ATO) {
+ if (type == SD_DIF_TYPE3_PROTECTION)
+ disk->integrity->tag_size = sizeof(u16) + sizeof(u32);
+ else
+ disk->integrity->tag_size = sizeof(u16);
+
+ sd_printk(KERN_INFO, sdkp, "DIF application tag size %u\n",
+ disk->integrity->tag_size);
+ }
+}
+
+/*
+ * DIF DMA operation magic decoder ring.
+ */
+void sd_dif_op(struct scsi_cmnd *scmd, unsigned int dif, unsigned int dix)
+{
+ int csum_convert, prot_op;
+
+ prot_op = 0;
+
+ /* Convert checksum? */
+ if (scsi_host_get_guard(scmd->device->host) != SHOST_DIX_GUARD_CRC)
+ csum_convert = 1;
+ else
+ csum_convert = 0;
+
+ switch (scmd->cmnd[0]) {
+ case READ_10:
+ case READ_12:
+ case READ_16:
+ if (dif && dix)
+ if (csum_convert)
+ prot_op = SCSI_PROT_READ_CONVERT;
+ else
+ prot_op = SCSI_PROT_READ_PASS;
+ else if (dif && !dix)
+ prot_op = SCSI_PROT_READ_STRIP;
+ else if (!dif && dix)
+ prot_op = SCSI_PROT_READ_INSERT;
+
+ break;
+
+ case WRITE_10:
+ case WRITE_12:
+ case WRITE_16:
+ if (dif && dix)
+ if (csum_convert)
+ prot_op = SCSI_PROT_WRITE_CONVERT;
+ else
+ prot_op = SCSI_PROT_WRITE_PASS;
+ else if (dif && !dix)
+ prot_op = SCSI_PROT_WRITE_INSERT;
+ else if (!dif && dix)
+ prot_op = SCSI_PROT_WRITE_STRIP;
+
+ break;
+ }
+
+ scsi_set_prot_op(scmd, prot_op);
+ scsi_set_prot_type(scmd, dif);
+}
+
+/*
+ * The virtual start sector is the one that was originally submitted
+ * by the block layer. Due to partitioning, MD/DM cloning, etc. the
+ * actual physical start sector is likely to be different. Remap
+ * protection information to match the physical LBA.
+ *
+ * From a protocol perspective there's a slight difference between
+ * Type 1 and 2. The latter uses 32-byte CDBs exclusively, and the
+ * reference tag is seeded in the CDB. This gives us the potential to
+ * avoid virt->phys remapping during write. However, at read time we
+ * don't know whether the virt sector is the same as when we wrote it
+ * (we could be reading from real disk as opposed to MD/DM device. So
+ * we always remap Type 2 making it identical to Type 1.
+ *
+ * Type 3 does not have a reference tag so no remapping is required.
+ */
+int sd_dif_prepare(struct request *rq, sector_t hw_sector, unsigned int sector_sz)
+{
+ const int tuple_sz = sizeof(struct sd_dif_tuple);
+ struct bio *bio;
+ struct scsi_disk *sdkp;
+ struct sd_dif_tuple *sdt;
+ unsigned int i, j;
+ u32 phys, virt;
+
+ /* Already remapped? */
+ if (rq->cmd_flags & REQ_INTEGRITY)
+ return 0;
+
+ sdkp = rq->bio->bi_bdev->bd_disk->private_data;
+
+ if (sdkp->protection_type == SD_DIF_TYPE3_PROTECTION)
+ return 0;
+
+ rq->cmd_flags |= REQ_INTEGRITY;
+ phys = hw_sector & 0xffffffff;
+
+ __rq_for_each_bio(bio, rq) {
+ struct bio_vec *iv;
+
+ virt = bio->bi_integrity->bip_sector & 0xffffffff;
+
+ bip_for_each_vec(iv, bio->bi_integrity, i) {
+ sdt = kmap_atomic(iv->bv_page, KM_USER0)
+ + iv->bv_offset;
+
+ for (j = 0 ; j < iv->bv_len ; j += tuple_sz, sdt++) {
+
+ if (be32_to_cpu(sdt->ref_tag) != virt)
+ goto error;
+
+ sdt->ref_tag = cpu_to_be32(phys);
+ virt++;
+ phys++;
+ }
+
+ kunmap_atomic(sdt, KM_USER0);
+ }
+ }
+
+ return 0;
+
+error:
+ kunmap_atomic(sdt, KM_USER0);
+ sd_printk(KERN_ERR, sdkp, "%s: virt %u, phys %u, ref %u\n",
+ __func__, virt, phys, be32_to_cpu(sdt->ref_tag));
+
+ return -EIO;
+}
+
+/*
+ * Remap physical sector values in the reference tag to the virtual
+ * values expected by the block layer.
+ */
+void sd_dif_complete(struct scsi_cmnd *scmd, unsigned int good_bytes)
+{
+ const int tuple_sz = sizeof(struct sd_dif_tuple);
+ struct scsi_disk *sdkp;
+ struct bio *bio;
+ struct sd_dif_tuple *sdt;
+ unsigned int i, j, sectors, sector_sz;
+ u32 phys, virt;
+
+ sdkp = scsi_disk(scmd->request->rq_disk);
+
+ if (sdkp->protection_type == SD_DIF_TYPE3_PROTECTION || good_bytes == 0)
+ return;
+
+ sector_sz = scmd->device->sector_size;
+ sectors = good_bytes / sector_sz;
+
+ phys = scmd->request->sector & 0xffffffff;
+ if (sector_sz == 4096)
+ phys >>= 3;
+
+ __rq_for_each_bio(bio, scmd->request) {
+ struct bio_vec *iv;
+
+ virt = bio->bi_integrity->bip_sector & 0xffffffff;
+
+ bip_for_each_vec(iv, bio->bi_integrity, i) {
+ sdt = kmap_atomic(iv->bv_page, KM_USER0)
+ + iv->bv_offset;
+
+ for (j = 0 ; j < iv->bv_len ; j += tuple_sz, sdt++) {
+
+ if (sectors == 0) {
+ kunmap_atomic(sdt, KM_USER0);
+ return;
+ }
+
+ if (be32_to_cpu(sdt->ref_tag) != phys &&
+ sdt->app_tag != 0xffff)
+ sdt->ref_tag = 0xffffffff; /* Bad ref */
+ else
+ sdt->ref_tag = cpu_to_be32(virt);
+
+ virt++;
+ phys++;
+ sectors--;
+ }
+
+ kunmap_atomic(sdt, KM_USER0);
+ }
+ }
+}
+
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 4684cc716aa4..c2bb53e3d941 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -17,7 +17,7 @@
Last modified: 18-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support
*/
-static const char *verstr = "20080224";
+static const char *verstr = "20080504";
#include <linux/module.h>
@@ -631,7 +631,7 @@ static int cross_eof(struct scsi_tape * STp, int forward)
/* Flush the write buffer (never need to write if variable blocksize). */
static int st_flush_write_buffer(struct scsi_tape * STp)
{
- int offset, transfer, blks;
+ int transfer, blks;
int result;
unsigned char cmd[MAX_COMMAND_SIZE];
struct st_request *SRpnt;
@@ -644,14 +644,10 @@ static int st_flush_write_buffer(struct scsi_tape * STp)
result = 0;
if (STp->dirty == 1) {
- offset = (STp->buffer)->buffer_bytes;
- transfer = ((offset + STp->block_size - 1) /
- STp->block_size) * STp->block_size;
+ transfer = STp->buffer->buffer_bytes;
DEBC(printk(ST_DEB_MSG "%s: Flushing %d bytes.\n",
tape_name(STp), transfer));
- memset((STp->buffer)->b_data + offset, 0, transfer - offset);
-
memset(cmd, 0, MAX_COMMAND_SIZE);
cmd[0] = WRITE_6;
cmd[1] = 1;
@@ -1670,6 +1666,7 @@ st_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
if (undone <= do_count) {
/* Only data from this write is not written */
count += undone;
+ b_point -= undone;
do_count -= undone;
if (STp->block_size)
blks = (transfer - undone) / STp->block_size;
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index f308a0308829..3790906a77d1 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -467,7 +467,7 @@ stex_slave_alloc(struct scsi_device *sdev)
/* Cheat: usually extracted from Inquiry data */
sdev->tagged_supported = 1;
- scsi_activate_tcq(sdev, sdev->host->can_queue);
+ scsi_activate_tcq(sdev, ST_CMD_PER_LUN);
return 0;
}
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c
index 22a6aae78699..98df1651404f 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.c
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c
@@ -5741,6 +5741,8 @@ void sym_hcb_free(struct sym_hcb *np)
for (target = 0; target < SYM_CONF_MAX_TARGET ; target++) {
tp = &np->target[target];
+ if (tp->luntbl)
+ sym_mfree_dma(tp->luntbl, 256, "LUNTBL");
#if SYM_CONF_MAX_LUN > 1
kfree(tp->lunmp);
#endif
diff --git a/drivers/scsi/tmscsim.c b/drivers/scsi/tmscsim.c
index 5b04ddfed26c..1723d71cbf3f 100644
--- a/drivers/scsi/tmscsim.c
+++ b/drivers/scsi/tmscsim.c
@@ -452,7 +452,7 @@ static int dc390_pci_map (struct dc390_srb* pSRB)
/* TODO: error handling */
if (pSRB->SGcount != 1)
error = 1;
- DEBUG1(printk("%s(): Mapped sense buffer %p at %x\n", __FUNCTION__, pcmd->sense_buffer, cmdp->saved_dma_handle));
+ DEBUG1(printk("%s(): Mapped sense buffer %p at %x\n", __func__, pcmd->sense_buffer, cmdp->saved_dma_handle));
/* Map SG list */
} else if (scsi_sg_count(pcmd)) {
int nseg;
@@ -466,7 +466,7 @@ static int dc390_pci_map (struct dc390_srb* pSRB)
if (nseg < 0)
error = 1;
DEBUG1(printk("%s(): Mapped SG %p with %d (%d) elements\n",\
- __FUNCTION__, scsi_sglist(pcmd), nseg, scsi_sg_count(pcmd)));
+ __func__, scsi_sglist(pcmd), nseg, scsi_sg_count(pcmd)));
/* Map single segment */
} else
pSRB->SGcount = 0;
@@ -483,11 +483,11 @@ static void dc390_pci_unmap (struct dc390_srb* pSRB)
if (pSRB->SRBFlag) {
pci_unmap_sg(pdev, &pSRB->Segmentx, 1, DMA_FROM_DEVICE);
- DEBUG1(printk("%s(): Unmapped sense buffer at %x\n", __FUNCTION__, cmdp->saved_dma_handle));
+ DEBUG1(printk("%s(): Unmapped sense buffer at %x\n", __func__, cmdp->saved_dma_handle));
} else {
scsi_dma_unmap(pcmd);
DEBUG1(printk("%s(): Unmapped SG at %p with %d elements\n",
- __FUNCTION__, scsi_sglist(pcmd), scsi_sg_count(pcmd)));
+ __func__, scsi_sglist(pcmd), scsi_sg_count(pcmd)));
}
}
diff --git a/drivers/scsi/wd7000.c b/drivers/scsi/wd7000.c
index c975c01b3a02..d4c13561f4a6 100644
--- a/drivers/scsi/wd7000.c
+++ b/drivers/scsi/wd7000.c
@@ -148,7 +148,7 @@
*
* 2002/10/04 - Alan Cox <alan@redhat.com>
*
- * Use dev_id for interrupts, kill __FUNCTION__ pasting
+ * Use dev_id for interrupts, kill __func__ pasting
* Add a lock for the scb pool, clean up all other cli/sti usage stuff
* Use the adapter lock for the other places we had the cli's
*
@@ -640,12 +640,12 @@ static int __init wd7000_setup(char *str)
(void) get_options(str, ARRAY_SIZE(ints), ints);
if (wd7000_card_num >= NUM_CONFIGS) {
- printk(KERN_ERR "%s: Too many \"wd7000=\" configurations in " "command line!\n", __FUNCTION__);
+ printk(KERN_ERR "%s: Too many \"wd7000=\" configurations in " "command line!\n", __func__);
return 0;
}
if ((ints[0] < 3) || (ints[0] > 5)) {
- printk(KERN_ERR "%s: Error in command line! " "Usage: wd7000=<IRQ>,<DMA>,IO>[,<BUS_ON>" "[,<BUS_OFF>]]\n", __FUNCTION__);
+ printk(KERN_ERR "%s: Error in command line! " "Usage: wd7000=<IRQ>,<DMA>,IO>[,<BUS_ON>" "[,<BUS_OFF>]]\n", __func__);
} else {
for (i = 0; i < NUM_IRQS; i++)
if (ints[1] == wd7000_irq[i])
@@ -1642,7 +1642,7 @@ static int wd7000_biosparam(struct scsi_device *sdev,
ip[2] = info[2];
if (info[0] == 255)
- printk(KERN_INFO "%s: current partition table is " "using extended translation.\n", __FUNCTION__);
+ printk(KERN_INFO "%s: current partition table is " "using extended translation.\n", __func__);
}
}
diff --git a/drivers/scsi/zalon.c b/drivers/scsi/zalon.c
index 4b5f908d35c3..3c4a300494a4 100644
--- a/drivers/scsi/zalon.c
+++ b/drivers/scsi/zalon.c
@@ -68,11 +68,11 @@ lasi_scsi_clock(void * hpa, int defaultclock)
if (status == PDC_RET_OK) {
clock = (int) pdc_result[16];
} else {
- printk(KERN_WARNING "%s: pdc_iodc_read returned %d\n", __FUNCTION__, status);
+ printk(KERN_WARNING "%s: pdc_iodc_read returned %d\n", __func__, status);
clock = defaultclock;
}
- printk(KERN_DEBUG "%s: SCSI clock %d\n", __FUNCTION__, clock);
+ printk(KERN_DEBUG "%s: SCSI clock %d\n", __func__, clock);
return clock;
}
#endif
@@ -108,13 +108,13 @@ zalon_probe(struct parisc_device *dev)
*/
dev->irq = gsc_alloc_irq(&gsc_irq);
- printk(KERN_INFO "%s: Zalon version %d, IRQ %d\n", __FUNCTION__,
+ printk(KERN_INFO "%s: Zalon version %d, IRQ %d\n", __func__,
zalon_vers, dev->irq);
__raw_writel(gsc_irq.txn_addr | gsc_irq.txn_data, zalon + IO_MODULE_EIM);
if (zalon_vers == 0)
- printk(KERN_WARNING "%s: Zalon 1.1 or earlier\n", __FUNCTION__);
+ printk(KERN_WARNING "%s: Zalon 1.1 or earlier\n", __func__);
memset(&device, 0, sizeof(struct ncr_device));
diff --git a/drivers/spi/atmel_spi.c b/drivers/spi/atmel_spi.c
index e81d59d78910..0c7165660853 100644
--- a/drivers/spi/atmel_spi.c
+++ b/drivers/spi/atmel_spi.c
@@ -313,14 +313,14 @@ atmel_spi_dma_map_xfer(struct atmel_spi *as, struct spi_transfer *xfer)
xfer->tx_dma = dma_map_single(dev,
(void *) xfer->tx_buf, xfer->len,
DMA_TO_DEVICE);
- if (dma_mapping_error(xfer->tx_dma))
+ if (dma_mapping_error(dev, xfer->tx_dma))
return -ENOMEM;
}
if (xfer->rx_buf) {
xfer->rx_dma = dma_map_single(dev,
xfer->rx_buf, xfer->len,
DMA_FROM_DEVICE);
- if (dma_mapping_error(xfer->rx_dma)) {
+ if (dma_mapping_error(dev, xfer->rx_dma)) {
if (xfer->tx_buf)
dma_unmap_single(dev,
xfer->tx_dma, xfer->len,
diff --git a/drivers/spi/au1550_spi.c b/drivers/spi/au1550_spi.c
index 9149689c79d9..87b73e0169c5 100644
--- a/drivers/spi/au1550_spi.c
+++ b/drivers/spi/au1550_spi.c
@@ -334,7 +334,7 @@ static int au1550_spi_dma_rxtmp_alloc(struct au1550_spi *hw, unsigned size)
hw->dma_rx_tmpbuf_size = size;
hw->dma_rx_tmpbuf_addr = dma_map_single(hw->dev, hw->dma_rx_tmpbuf,
size, DMA_FROM_DEVICE);
- if (dma_mapping_error(hw->dma_rx_tmpbuf_addr)) {
+ if (dma_mapping_error(hw->dev, hw->dma_rx_tmpbuf_addr)) {
kfree(hw->dma_rx_tmpbuf);
hw->dma_rx_tmpbuf = 0;
hw->dma_rx_tmpbuf_size = 0;
@@ -378,7 +378,7 @@ static int au1550_spi_dma_txrxb(struct spi_device *spi, struct spi_transfer *t)
dma_rx_addr = dma_map_single(hw->dev,
(void *)t->rx_buf,
t->len, DMA_FROM_DEVICE);
- if (dma_mapping_error(dma_rx_addr))
+ if (dma_mapping_error(hw->dev, dma_rx_addr))
dev_err(hw->dev, "rx dma map error\n");
}
} else {
@@ -401,7 +401,7 @@ static int au1550_spi_dma_txrxb(struct spi_device *spi, struct spi_transfer *t)
dma_tx_addr = dma_map_single(hw->dev,
(void *)t->tx_buf,
t->len, DMA_TO_DEVICE);
- if (dma_mapping_error(dma_tx_addr))
+ if (dma_mapping_error(hw->dev, dma_tx_addr))
dev_err(hw->dev, "tx dma map error\n");
}
} else {
diff --git a/drivers/spi/omap2_mcspi.c b/drivers/spi/omap2_mcspi.c
index b1cc148036c1..f6f987bb71ca 100644
--- a/drivers/spi/omap2_mcspi.c
+++ b/drivers/spi/omap2_mcspi.c
@@ -836,7 +836,7 @@ static int omap2_mcspi_transfer(struct spi_device *spi, struct spi_message *m)
if (tx_buf != NULL) {
t->tx_dma = dma_map_single(&spi->dev, (void *) tx_buf,
len, DMA_TO_DEVICE);
- if (dma_mapping_error(t->tx_dma)) {
+ if (dma_mapping_error(&spi->dev, t->tx_dma)) {
dev_dbg(&spi->dev, "dma %cX %d bytes error\n",
'T', len);
return -EINVAL;
@@ -845,7 +845,7 @@ static int omap2_mcspi_transfer(struct spi_device *spi, struct spi_message *m)
if (rx_buf != NULL) {
t->rx_dma = dma_map_single(&spi->dev, rx_buf, t->len,
DMA_FROM_DEVICE);
- if (dma_mapping_error(t->rx_dma)) {
+ if (dma_mapping_error(&spi->dev, t->rx_dma)) {
dev_dbg(&spi->dev, "dma %cX %d bytes error\n",
'R', len);
if (tx_buf != NULL)
diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/pxa2xx_spi.c
index 0c452c46ab07..067299d6d192 100644
--- a/drivers/spi/pxa2xx_spi.c
+++ b/drivers/spi/pxa2xx_spi.c
@@ -353,7 +353,7 @@ static int map_dma_buffers(struct driver_data *drv_data)
drv_data->rx_dma = dma_map_single(dev, drv_data->rx,
drv_data->rx_map_len,
DMA_FROM_DEVICE);
- if (dma_mapping_error(drv_data->rx_dma))
+ if (dma_mapping_error(dev, drv_data->rx_dma))
return 0;
/* Stream map the tx buffer */
@@ -361,7 +361,7 @@ static int map_dma_buffers(struct driver_data *drv_data)
drv_data->tx_map_len,
DMA_TO_DEVICE);
- if (dma_mapping_error(drv_data->tx_dma)) {
+ if (dma_mapping_error(dev, drv_data->tx_dma)) {
dma_unmap_single(dev, drv_data->rx_dma,
drv_data->rx_map_len, DMA_FROM_DEVICE);
return 0;
diff --git a/drivers/spi/spi_imx.c b/drivers/spi/spi_imx.c
index 54ac7bea5f8c..6fb77fcc4971 100644
--- a/drivers/spi/spi_imx.c
+++ b/drivers/spi/spi_imx.c
@@ -491,7 +491,7 @@ static int map_dma_buffers(struct driver_data *drv_data)
buf,
drv_data->tx_map_len,
DMA_TO_DEVICE);
- if (dma_mapping_error(drv_data->tx_dma))
+ if (dma_mapping_error(dev, drv_data->tx_dma))
return -1;
drv_data->tx_dma_needs_unmap = 1;
@@ -516,7 +516,7 @@ static int map_dma_buffers(struct driver_data *drv_data)
buf,
drv_data->len,
DMA_FROM_DEVICE);
- if (dma_mapping_error(drv_data->rx_dma))
+ if (dma_mapping_error(dev, drv_data->rx_dma))
return -1;
drv_data->rx_dma_needs_unmap = 1;
}
@@ -534,7 +534,7 @@ static int map_dma_buffers(struct driver_data *drv_data)
buf,
drv_data->tx_map_len,
DMA_TO_DEVICE);
- if (dma_mapping_error(drv_data->tx_dma)) {
+ if (dma_mapping_error(dev, drv_data->tx_dma)) {
if (drv_data->rx_dma) {
dma_unmap_single(dev,
drv_data->rx_dma,
diff --git a/drivers/usb/mon/mon_text.c b/drivers/usb/mon/mon_text.c
index 5e3e4e9b6c77..1f715436d6d3 100644
--- a/drivers/usb/mon/mon_text.c
+++ b/drivers/usb/mon/mon_text.c
@@ -87,7 +87,7 @@ struct mon_reader_text {
static struct dentry *mon_dir; /* Usually /sys/kernel/debug/usbmon */
-static void mon_text_ctor(struct kmem_cache *, void *);
+static void mon_text_ctor(void *);
struct mon_text_ptr {
int cnt, limit;
@@ -720,7 +720,7 @@ void mon_text_del(struct mon_bus *mbus)
/*
* Slab interface: constructor.
*/
-static void mon_text_ctor(struct kmem_cache *slab, void *mem)
+static void mon_text_ctor(void *mem)
{
/*
* Nothing to initialize. No, really!
diff --git a/drivers/usb/serial/ipaq.c b/drivers/usb/serial/ipaq.c
index 832a5a4f3cb3..cd9a2e138c8b 100644
--- a/drivers/usb/serial/ipaq.c
+++ b/drivers/usb/serial/ipaq.c
@@ -651,15 +651,17 @@ static int ipaq_open(struct tty_struct *tty,
*/
kfree(port->bulk_in_buffer);
+ kfree(port->bulk_out_buffer);
+ /* make sure the generic serial code knows */
+ port->bulk_out_buffer = NULL;
+
port->bulk_in_buffer = kmalloc(URBDATA_SIZE, GFP_KERNEL);
- if (port->bulk_in_buffer == NULL) {
- port->bulk_out_buffer = NULL; /* prevent double free */
+ if (port->bulk_in_buffer == NULL)
goto enomem;
- }
- kfree(port->bulk_out_buffer);
port->bulk_out_buffer = kmalloc(URBDATA_SIZE, GFP_KERNEL);
if (port->bulk_out_buffer == NULL) {
+ /* the buffer is useless, free it */
kfree(port->bulk_in_buffer);
port->bulk_in_buffer = NULL;
goto enomem;
diff --git a/drivers/video/am200epd.c b/drivers/video/am200epd.c
index 51e26c1f5e8b..32dd85126931 100644
--- a/drivers/video/am200epd.c
+++ b/drivers/video/am200epd.c
@@ -221,7 +221,7 @@ static int am200_setup_irq(struct fb_info *info)
return retval;
}
- return set_irq_type(IRQ_GPIO(RDY_GPIO_PIN), IRQT_FALLING);
+ return set_irq_type(IRQ_GPIO(RDY_GPIO_PIN), IRQ_TYPE_EDGE_FALLING);
}
static void am200_set_rst(struct metronomefb_par *par, int state)
diff --git a/drivers/video/console/sticon.c b/drivers/video/console/sticon.c
index a11cc2fdd4cd..4055dbdd1b42 100644
--- a/drivers/video/console/sticon.c
+++ b/drivers/video/console/sticon.c
@@ -370,7 +370,7 @@ static const struct consw sti_con = {
-int __init sticonsole_init(void)
+static int __init sticonsole_init(void)
{
/* already initialized ? */
if (sticon_sti)
diff --git a/drivers/video/console/sticore.c b/drivers/video/console/sticore.c
index e9ab657f0bb7..d7822af0e00a 100644
--- a/drivers/video/console/sticore.c
+++ b/drivers/video/console/sticore.c
@@ -29,7 +29,7 @@
#define STI_DRIVERVERSION "Version 0.9a"
-struct sti_struct *default_sti __read_mostly;
+static struct sti_struct *default_sti __read_mostly;
/* number of STI ROMS found and their ptrs to each struct */
static int num_sti_roms __read_mostly;
@@ -68,8 +68,7 @@ static const struct sti_init_flags default_init_flags = {
.init_cmap_tx = 1,
};
-int
-sti_init_graph(struct sti_struct *sti)
+static int sti_init_graph(struct sti_struct *sti)
{
struct sti_init_inptr_ext inptr_ext = { 0, };
struct sti_init_inptr inptr = {
@@ -100,8 +99,7 @@ static const struct sti_conf_flags default_conf_flags = {
.wait = STI_WAIT,
};
-void
-sti_inq_conf(struct sti_struct *sti)
+static void sti_inq_conf(struct sti_struct *sti)
{
struct sti_conf_inptr inptr = { 0, };
unsigned long flags;
@@ -237,8 +235,8 @@ static void sti_flush(unsigned long start, unsigned long end)
flush_icache_range(start, end);
}
-void __devinit
-sti_rom_copy(unsigned long base, unsigned long count, void *dest)
+static void __devinit sti_rom_copy(unsigned long base, unsigned long count,
+ void *dest)
{
unsigned long dest_start = (unsigned long) dest;
@@ -478,8 +476,8 @@ sti_init_glob_cfg(struct sti_struct *sti,
}
#ifdef CONFIG_FB
-struct sti_cooked_font * __devinit
-sti_select_fbfont(struct sti_cooked_rom *cooked_rom, const char *fbfont_name)
+static struct sti_cooked_font __devinit
+*sti_select_fbfont(struct sti_cooked_rom *cooked_rom, const char *fbfont_name)
{
const struct font_desc *fbfont;
unsigned int size, bpc;
@@ -534,16 +532,16 @@ sti_select_fbfont(struct sti_cooked_rom *cooked_rom, const char *fbfont_name)
return cooked_font;
}
#else
-struct sti_cooked_font * __devinit
-sti_select_fbfont(struct sti_cooked_rom *cooked_rom, const char *fbfont_name)
+static struct sti_cooked_font __devinit
+*sti_select_fbfont(struct sti_cooked_rom *cooked_rom, const char *fbfont_name)
{
return NULL;
}
#endif
-struct sti_cooked_font * __devinit
-sti_select_font(struct sti_cooked_rom *rom,
- int (*search_font_fnc) (struct sti_cooked_rom *,int,int) )
+static struct sti_cooked_font __devinit
+*sti_select_font(struct sti_cooked_rom *rom,
+ int (*search_font_fnc)(struct sti_cooked_rom *, int, int))
{
struct sti_cooked_font *font;
int i;
@@ -707,8 +705,7 @@ sti_get_bmode_rom (unsigned long address)
return raw;
}
-struct sti_rom * __devinit
-sti_get_wmode_rom (unsigned long address)
+static struct sti_rom __devinit *sti_get_wmode_rom(unsigned long address)
{
struct sti_rom *raw;
unsigned long size;
@@ -723,8 +720,8 @@ sti_get_wmode_rom (unsigned long address)
return raw;
}
-int __devinit
-sti_read_rom(int wordmode, struct sti_struct *sti, unsigned long address)
+static int __devinit sti_read_rom(int wordmode, struct sti_struct *sti,
+ unsigned long address)
{
struct sti_cooked_rom *cooked;
struct sti_rom *raw = NULL;
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index 5d84b3431098..6b487801eeae 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -35,7 +35,6 @@
#include <linux/device.h>
#include <linux/efi.h>
#include <linux/fb.h>
-#include <linux/major.h>
#include <asm/fb.h>
diff --git a/drivers/video/macfb.c b/drivers/video/macfb.c
index aa8c714d6245..b790ddff76f9 100644
--- a/drivers/video/macfb.c
+++ b/drivers/video/macfb.c
@@ -596,7 +596,7 @@ static struct fb_ops macfb_ops = {
.fb_imageblit = cfb_imageblit,
};
-void __init macfb_setup(char *options)
+static void __init macfb_setup(char *options)
{
char *this_opt;
diff --git a/drivers/video/omap/sossi.c b/drivers/video/omap/sossi.c
index 81dbcf53cf0e..fafd0f26b90f 100644
--- a/drivers/video/omap/sossi.c
+++ b/drivers/video/omap/sossi.c
@@ -646,7 +646,7 @@ static int sossi_init(struct omapfb_device *fbdev)
sossi_write_reg(SOSSI_INIT1_REG, l);
if ((r = request_irq(INT_1610_SoSSI_MATCH, sossi_match_irq,
- IRQT_FALLING,
+ IRQ_TYPE_EDGE_FALLING,
"sossi_match", sossi.fbdev->dev)) < 0) {
dev_err(sossi.fbdev->dev, "can't get SoSSI match IRQ\n");
goto err;
diff --git a/drivers/video/pxafb.c b/drivers/video/pxafb.c
index 2b707a8ce5de..69de2fed6c58 100644
--- a/drivers/video/pxafb.c
+++ b/drivers/video/pxafb.c
@@ -1336,7 +1336,7 @@ static int __devinit pxafb_map_video_memory(struct pxafb_info *fbi)
fbi->dma_buff_phys = fbi->map_dma;
fbi->palette_cpu = (u16 *) fbi->dma_buff->palette;
- pr_debug("pxafb: palette_mem_size = 0x%08lx\n", fbi->palette_size*sizeof(u16));
+ pr_debug("pxafb: palette_mem_size = 0x%08x\n", fbi->palette_size*sizeof(u16));
#ifdef CONFIG_FB_PXA_SMARTPANEL
fbi->smart_cmds = (uint16_t *) fbi->dma_buff->cmd_buff;
diff --git a/drivers/video/sticore.h b/drivers/video/sticore.h
index 1a9a60c74be3..7fe5be4bc70e 100644
--- a/drivers/video/sticore.h
+++ b/drivers/video/sticore.h
@@ -352,8 +352,6 @@ struct sti_struct *sti_get_rom(unsigned int index); /* 0: default sti */
/* functions to call the STI ROM directly */
-int sti_init_graph(struct sti_struct *sti);
-void sti_inq_conf(struct sti_struct *sti);
void sti_putc(struct sti_struct *sti, int c, int y, int x);
void sti_set(struct sti_struct *sti, int src_y, int src_x,
int height, int width, u8 color);
diff --git a/drivers/video/stifb.c b/drivers/video/stifb.c
index 598d35eff935..166481402412 100644
--- a/drivers/video/stifb.c
+++ b/drivers/video/stifb.c
@@ -1078,8 +1078,7 @@ static struct fb_ops stifb_ops = {
* Initialization
*/
-int __init
-stifb_init_fb(struct sti_struct *sti, int bpp_pref)
+static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref)
{
struct fb_fix_screeninfo *fix;
struct fb_var_screeninfo *var;
@@ -1315,8 +1314,7 @@ static int stifb_disabled __initdata;
int __init
stifb_setup(char *options);
-int __init
-stifb_init(void)
+static int __init stifb_init(void)
{
struct sti_struct *sti;
struct sti_struct *def_sti;
OpenPOWER on IntegriCloud