summaryrefslogtreecommitdiffstats
path: root/drivers/staging
diff options
context:
space:
mode:
authorMauro Carvalho Chehab <mchehab@redhat.com>2012-03-27 11:32:29 -0300
committerMauro Carvalho Chehab <mchehab@redhat.com>2012-03-27 11:32:29 -0300
commit50953e0640b3473dcb409d5d0d938c2742c93b0d (patch)
tree3b0dc374e61564fbbd8adff92c8fae16fdeb423a /drivers/staging
parentf92c97c8bd77992ff8bd6ef29a23dc82dca799cb (diff)
parent626cf236608505d376e4799adb4f7eb00a8594af (diff)
downloadblackbird-obmc-linux-50953e0640b3473dcb409d5d0d938c2742c93b0d.tar.gz
blackbird-obmc-linux-50953e0640b3473dcb409d5d0d938c2742c93b0d.zip
Merge branch 'poll' into staging/for_v3.4
* poll: (5970 commits) poll: add poll_requested_events() and poll_does_not_wait() functions crc32: select an algorithm via Kconfig crc32: add self-test code for crc32c crypto: crc32c should use library implementation crc32: bolt on crc32c crc32: add note about this patchset to crc32.c crc32: optimize loop counter for x86 crc32: add slice-by-8 algorithm to existing code crc32: make CRC_*_BITS definition correspond to actual bit counts crc32: fix mixing of endian-specific types crc32: miscellaneous cleanups crc32: simplify unit test code crc32: move long comment about crc32 fundamentals to Documentation/ crc32: remove two instances of trailing whitespaces checkpatch: check for quoted strings broken across lines checkpatch: whitespace - add/remove blank lines checkpatch: warn on use of yield() checkpatch: add --strict tests for braces, comments and casts checkpatch: add [] to type extensions checkpatch: high precedence operators do not require additional parentheses in #defines ...
Diffstat (limited to 'drivers/staging')
-rw-r--r--drivers/staging/Kconfig12
-rw-r--r--drivers/staging/Makefile6
-rw-r--r--drivers/staging/android/Kconfig86
-rw-r--r--drivers/staging/android/Makefile3
-rw-r--r--drivers/staging/android/TODO2
-rw-r--r--drivers/staging/android/alarm-dev.c297
-rw-r--r--drivers/staging/android/alarm.c601
-rw-r--r--drivers/staging/android/android_alarm.h121
-rw-r--r--drivers/staging/android/ashmem.c4
-rw-r--r--drivers/staging/android/binder.c9
-rw-r--r--drivers/staging/android/logger.c78
-rw-r--r--drivers/staging/android/lowmemorykiller.c91
-rw-r--r--drivers/staging/android/persistent_ram.c470
-rw-r--r--drivers/staging/android/persistent_ram.h78
-rw-r--r--drivers/staging/android/ram_console.c420
-rw-r--r--drivers/staging/android/timed_gpio.c6
-rw-r--r--drivers/staging/android/timed_gpio.h6
-rw-r--r--drivers/staging/asus_oled/asus_oled.c19
-rw-r--r--drivers/staging/bcm/Bcmchar.c41
-rw-r--r--drivers/staging/bcm/CmHost.c3113
-rw-r--r--drivers/staging/bcm/led_control.h80
-rw-r--r--drivers/staging/comedi/Kconfig5
-rw-r--r--drivers/staging/comedi/drivers/adv_pci_dio.c29
-rw-r--r--drivers/staging/comedi/drivers/dt2801.c12
-rw-r--r--drivers/staging/comedi/drivers/dt9812.c4
-rw-r--r--drivers/staging/comedi/drivers/me4000.c12
-rw-r--r--drivers/staging/comedi/drivers/ni_pcidio.c61
-rw-r--r--drivers/staging/comedi/drivers/ni_pcimio.c27
-rw-r--r--drivers/staging/comedi/drivers/unioxx5.c2
-rw-r--r--drivers/staging/comedi/drivers/usbduxsigma.c42
-rw-r--r--drivers/staging/crystalhd/bc_dts_glob_lnx.h3
-rw-r--r--drivers/staging/crystalhd/bc_dts_types.h40
-rw-r--r--drivers/staging/crystalhd/crystalhd.h14
-rw-r--r--drivers/staging/crystalhd/crystalhd_cmds.c3
-rw-r--r--drivers/staging/crystalhd/crystalhd_cmds.h4
-rw-r--r--drivers/staging/crystalhd/crystalhd_hw.c11
-rw-r--r--drivers/staging/crystalhd/crystalhd_hw.h3
-rw-r--r--drivers/staging/crystalhd/crystalhd_lnx.c7
-rw-r--r--drivers/staging/crystalhd/crystalhd_lnx.h5
-rw-r--r--drivers/staging/crystalhd/crystalhd_misc.c5
-rw-r--r--drivers/staging/crystalhd/crystalhd_misc.h34
-rw-r--r--drivers/staging/et131x/README2
-rw-r--r--drivers/staging/et131x/et131x.c10
-rw-r--r--drivers/staging/et131x/et131x.h4
-rw-r--r--drivers/staging/frontier/alphatrack.c2
-rw-r--r--drivers/staging/frontier/tranzport.c2
-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c7
-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c6
-rw-r--r--drivers/staging/hv/Kconfig5
-rw-r--r--drivers/staging/hv/Makefile3
-rw-r--r--drivers/staging/hv/TODO5
-rw-r--r--drivers/staging/hv/storvsc_drv.c1586
-rw-r--r--drivers/staging/iio/Documentation/device.txt2
-rw-r--r--drivers/staging/iio/Documentation/iio_event_monitor.c241
-rw-r--r--drivers/staging/iio/Documentation/inkernel.txt58
-rw-r--r--drivers/staging/iio/Kconfig9
-rw-r--r--drivers/staging/iio/Makefile4
-rw-r--r--drivers/staging/iio/accel/adis16201_ring.c2
-rw-r--r--drivers/staging/iio/accel/adis16203_ring.c2
-rw-r--r--drivers/staging/iio/accel/adis16204_ring.c2
-rw-r--r--drivers/staging/iio/accel/adis16209_ring.c2
-rw-r--r--drivers/staging/iio/accel/adis16240_ring.c2
-rw-r--r--drivers/staging/iio/accel/lis3l02dq.h2
-rw-r--r--drivers/staging/iio/accel/lis3l02dq_ring.c4
-rw-r--r--drivers/staging/iio/accel/sca3000.h2
-rw-r--r--drivers/staging/iio/adc/Kconfig9
-rw-r--r--drivers/staging/iio/adc/Makefile1
-rw-r--r--drivers/staging/iio/adc/ad7192.c45
-rw-r--r--drivers/staging/iio/adc/ad7291.c14
-rw-r--r--drivers/staging/iio/adc/ad7298_ring.c3
-rw-r--r--drivers/staging/iio/adc/ad7476_ring.c4
-rw-r--r--drivers/staging/iio/adc/ad7606_core.c83
-rw-r--r--drivers/staging/iio/adc/ad7606_par.c13
-rw-r--r--drivers/staging/iio/adc/ad7606_ring.c2
-rw-r--r--drivers/staging/iio/adc/ad7793.c2
-rw-r--r--drivers/staging/iio/adc/ad7887_ring.c2
-rw-r--r--drivers/staging/iio/adc/ad799x_core.c4
-rw-r--r--drivers/staging/iio/adc/ad799x_ring.c4
-rw-r--r--drivers/staging/iio/adc/adt7310.c21
-rw-r--r--drivers/staging/iio/adc/adt7410.c21
-rw-r--r--drivers/staging/iio/adc/lpc32xx_adc.c237
-rw-r--r--drivers/staging/iio/adc/max1363_core.c50
-rw-r--r--drivers/staging/iio/adc/max1363_ring.c2
-rw-r--r--drivers/staging/iio/addac/adt7316-i2c.c18
-rw-r--r--drivers/staging/iio/addac/adt7316-spi.c18
-rw-r--r--drivers/staging/iio/addac/adt7316.c11
-rw-r--r--drivers/staging/iio/addac/adt7316.h9
-rw-r--r--drivers/staging/iio/buffer.h2
-rw-r--r--drivers/staging/iio/cdc/ad7150.c10
-rw-r--r--drivers/staging/iio/consumer.h96
-rw-r--r--drivers/staging/iio/dac/Kconfig7
-rw-r--r--drivers/staging/iio/dac/ad5064.c369
-rw-r--r--drivers/staging/iio/dac/ad5360.c4
-rw-r--r--drivers/staging/iio/dac/ad5380.c4
-rw-r--r--drivers/staging/iio/dac/ad5421.c13
-rw-r--r--drivers/staging/iio/dac/ad5446.c35
-rw-r--r--drivers/staging/iio/dac/ad5686.c1
-rw-r--r--drivers/staging/iio/dac/ad5764.c13
-rw-r--r--drivers/staging/iio/dac/max517.c18
-rw-r--r--drivers/staging/iio/dds/ad9834.c53
-rw-r--r--drivers/staging/iio/driver.h34
-rw-r--r--drivers/staging/iio/events.h4
-rw-r--r--drivers/staging/iio/gyro/adis16260_ring.c2
-rw-r--r--drivers/staging/iio/iio.h70
-rw-r--r--drivers/staging/iio/iio_core.h4
-rw-r--r--drivers/staging/iio/iio_dummy_evgen.c2
-rw-r--r--drivers/staging/iio/iio_hwmon.c232
-rw-r--r--drivers/staging/iio/iio_simple_dummy_buffer.c2
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.c3
-rw-r--r--drivers/staging/iio/imu/adis16400_ring.c2
-rw-r--r--drivers/staging/iio/industrialio-buffer.c6
-rw-r--r--drivers/staging/iio/industrialio-core.c658
-rw-r--r--drivers/staging/iio/industrialio-event.c453
-rw-r--r--drivers/staging/iio/inkern.c292
-rw-r--r--drivers/staging/iio/kfifo_buf.c46
-rw-r--r--drivers/staging/iio/kfifo_buf.h2
-rw-r--r--drivers/staging/iio/light/isl29018.c7
-rw-r--r--drivers/staging/iio/light/tsl2563.c65
-rw-r--r--drivers/staging/iio/light/tsl2583.c19
-rw-r--r--drivers/staging/iio/machine.h24
-rw-r--r--drivers/staging/iio/magnetometer/ak8975.c8
-rw-r--r--drivers/staging/iio/magnetometer/hmc5843.c26
-rw-r--r--drivers/staging/iio/meter/ade7758_ring.c4
-rw-r--r--drivers/staging/iio/meter/meter.h2
-rw-r--r--drivers/staging/iio/ring_sw.c26
-rw-r--r--drivers/staging/iio/ring_sw.h5
-rw-r--r--drivers/staging/iio/trigger/iio-trig-bfin-timer.c12
-rw-r--r--drivers/staging/iio/trigger/iio-trig-gpio.c12
-rw-r--r--drivers/staging/iio/trigger/iio-trig-periodic-rtc.c12
-rw-r--r--drivers/staging/iio/types.h4
-rw-r--r--drivers/staging/keucr/TODO2
-rw-r--r--drivers/staging/keucr/transport.h37
-rw-r--r--drivers/staging/line6/capture.c54
-rw-r--r--drivers/staging/line6/capture.h2
-rw-r--r--drivers/staging/line6/driver.c2
-rw-r--r--drivers/staging/line6/pcm.c109
-rw-r--r--drivers/staging/line6/pcm.h167
-rw-r--r--drivers/staging/line6/playback.c68
-rw-r--r--drivers/staging/line6/playback.h2
-rw-r--r--drivers/staging/line6/toneport.c12
-rw-r--r--drivers/staging/line6/usbdefs.h44
-rw-r--r--drivers/staging/media/lirc/lirc_sasem.c17
-rw-r--r--drivers/staging/mei/TODO3
-rw-r--r--drivers/staging/mei/hw.h47
-rw-r--r--drivers/staging/mei/init.c24
-rw-r--r--drivers/staging/mei/interface.c72
-rw-r--r--drivers/staging/mei/interface.h7
-rw-r--r--drivers/staging/mei/interrupt.c106
-rw-r--r--drivers/staging/mei/iorw.c17
-rw-r--r--drivers/staging/mei/main.c14
-rw-r--r--drivers/staging/mei/mei-amt-version.c481
-rw-r--r--drivers/staging/mei/mei.h127
-rw-r--r--drivers/staging/mei/mei.txt6
-rw-r--r--drivers/staging/mei/mei_dev.h10
-rw-r--r--drivers/staging/mei/mei_version.h31
-rw-r--r--drivers/staging/mei/wd.c8
-rw-r--r--drivers/staging/nvec/Kconfig6
-rw-r--r--drivers/staging/nvec/nvec.c19
-rw-r--r--drivers/staging/nvec/nvec_ps2.c53
-rw-r--r--drivers/staging/octeon/ethernet-mdio.c4
-rw-r--r--drivers/staging/omapdrm/omap_crtc.c37
-rw-r--r--drivers/staging/omapdrm/omap_debugfs.c97
-rw-r--r--drivers/staging/omapdrm/omap_dmm_tiler.c91
-rw-r--r--drivers/staging/omapdrm/omap_dmm_tiler.h15
-rw-r--r--drivers/staging/omapdrm/omap_drv.c16
-rw-r--r--drivers/staging/omapdrm/omap_drv.h19
-rw-r--r--drivers/staging/omapdrm/omap_fb.c124
-rw-r--r--drivers/staging/omapdrm/omap_fbdev.c26
-rw-r--r--drivers/staging/omapdrm/omap_gem.c172
-rw-r--r--drivers/staging/omapdrm/omap_gem_helpers.c2
-rw-r--r--drivers/staging/omapdrm/omap_plane.c197
-rw-r--r--drivers/staging/ozwpan/Kbuild19
-rw-r--r--drivers/staging/ozwpan/Kconfig9
-rw-r--r--drivers/staging/ozwpan/README25
-rw-r--r--drivers/staging/ozwpan/TODO12
-rw-r--r--drivers/staging/ozwpan/ozappif.h46
-rw-r--r--drivers/staging/ozwpan/ozcdev.c521
-rw-r--r--drivers/staging/ozwpan/ozcdev.h18
-rw-r--r--drivers/staging/ozwpan/ozconfig.h27
-rw-r--r--drivers/staging/ozwpan/ozeltbuf.c339
-rw-r--r--drivers/staging/ozwpan/ozeltbuf.h70
-rw-r--r--drivers/staging/ozwpan/ozevent.c116
-rw-r--r--drivers/staging/ozwpan/ozevent.h31
-rw-r--r--drivers/staging/ozwpan/ozeventdef.h47
-rw-r--r--drivers/staging/ozwpan/ozhcd.c2256
-rw-r--r--drivers/staging/ozwpan/ozhcd.h15
-rw-r--r--drivers/staging/ozwpan/ozmain.c58
-rw-r--r--drivers/staging/ozwpan/ozpd.c832
-rw-r--r--drivers/staging/ozwpan/ozpd.h121
-rw-r--r--drivers/staging/ozwpan/ozproto.c957
-rw-r--r--drivers/staging/ozwpan/ozproto.h69
-rw-r--r--drivers/staging/ozwpan/ozprotocol.h372
-rw-r--r--drivers/staging/ozwpan/oztrace.c36
-rw-r--r--drivers/staging/ozwpan/oztrace.h35
-rw-r--r--drivers/staging/ozwpan/ozurbparanoia.c53
-rw-r--r--drivers/staging/ozwpan/ozurbparanoia.h19
-rw-r--r--drivers/staging/ozwpan/ozusbif.h43
-rw-r--r--drivers/staging/ozwpan/ozusbsvc.c245
-rw-r--r--drivers/staging/ozwpan/ozusbsvc.h32
-rw-r--r--drivers/staging/ozwpan/ozusbsvc1.c437
-rw-r--r--drivers/staging/quatech_usb2/quatech_usb2.c40
-rw-r--r--drivers/staging/quickstart/quickstart.c370
-rw-r--r--drivers/staging/ramster/Kconfig17
-rw-r--r--drivers/staging/ramster/Makefile1
-rw-r--r--drivers/staging/ramster/TODO13
-rw-r--r--drivers/staging/ramster/cluster/Makefile3
-rw-r--r--drivers/staging/ramster/cluster/heartbeat.c464
-rw-r--r--drivers/staging/ramster/cluster/heartbeat.h87
-rw-r--r--drivers/staging/ramster/cluster/masklog.c155
-rw-r--r--drivers/staging/ramster/cluster/masklog.h220
-rw-r--r--drivers/staging/ramster/cluster/nodemanager.c992
-rw-r--r--drivers/staging/ramster/cluster/nodemanager.h88
-rw-r--r--drivers/staging/ramster/cluster/ramster_nodemanager.h39
-rw-r--r--drivers/staging/ramster/cluster/tcp.c2256
-rw-r--r--drivers/staging/ramster/cluster/tcp.h159
-rw-r--r--drivers/staging/ramster/cluster/tcp_internal.h248
-rw-r--r--drivers/staging/ramster/r2net.c401
-rw-r--r--drivers/staging/ramster/ramster.h118
-rw-r--r--drivers/staging/ramster/tmem.c851
-rw-r--r--drivers/staging/ramster/tmem.h244
-rw-r--r--drivers/staging/ramster/xvmalloc.c (renamed from drivers/staging/zram/xvmalloc.c)39
-rw-r--r--drivers/staging/ramster/xvmalloc.h (renamed from drivers/staging/zram/xvmalloc.h)0
-rw-r--r--drivers/staging/ramster/xvmalloc_int.h (renamed from drivers/staging/zram/xvmalloc_int.h)0
-rw-r--r--drivers/staging/ramster/zcache-main.c3320
-rw-r--r--drivers/staging/ramster/zcache.h22
-rw-r--r--drivers/staging/rtl8187se/r8180_core.c111
-rw-r--r--drivers/staging/rtl8187se/r8180_dm.c1792
-rw-r--r--drivers/staging/rtl8187se/r8180_wx.c286
-rw-r--r--drivers/staging/rtl8192e/rtllib_rx.c2
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac.c3
-rw-r--r--drivers/staging/rtl8192u/ieee80211/cipher.c8
-rw-r--r--drivers/staging/rtl8192u/ieee80211/digest.c8
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/internal.h17
-rw-r--r--drivers/staging/rtl8192u/ieee80211/kmap_types.h20
-rw-r--r--drivers/staging/rtl8192u/ieee80211/scatterwalk.c19
-rw-r--r--drivers/staging/rtl8192u/r8192U_core.c1
-rw-r--r--drivers/staging/rtl8712/Kconfig7
-rw-r--r--drivers/staging/rtl8712/drv_types.h1
-rw-r--r--drivers/staging/rtl8712/os_intfs.c6
-rw-r--r--drivers/staging/rtl8712/osdep_service.h17
-rw-r--r--drivers/staging/rtl8712/rtl8712_recv.c2
-rw-r--r--drivers/staging/rtl8712/rtl871x_io.c1
-rw-r--r--drivers/staging/rtl8712/rtl871x_io.h1
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_linux.c8
-rw-r--r--drivers/staging/rtl8712/rtl871x_pwrctrl.c11
-rw-r--r--drivers/staging/rtl8712/rtl871x_pwrctrl.h1
-rw-r--r--drivers/staging/rtl8712/rtl871x_recv.c1
-rw-r--r--drivers/staging/rtl8712/rtl871x_recv.h3
-rw-r--r--drivers/staging/rtl8712/rtl871x_sta_mgt.c4
-rw-r--r--drivers/staging/rtl8712/rtl871x_xmit.c3
-rw-r--r--drivers/staging/rtl8712/rtl871x_xmit.h3
-rw-r--r--drivers/staging/rtl8712/sta_info.h4
-rw-r--r--drivers/staging/rtl8712/usb_intf.c9
-rw-r--r--drivers/staging/rts5139/TODO6
-rw-r--r--drivers/staging/rts5139/ms.h4
-rw-r--r--drivers/staging/rts5139/rts51x_chip.c14
-rw-r--r--drivers/staging/rts5139/rts51x_chip.h6
-rw-r--r--drivers/staging/rts5139/rts51x_fop.h2
-rw-r--r--drivers/staging/rts5139/rts51x_transport.c2
-rw-r--r--drivers/staging/rts5139/rts51x_transport.h2
-rw-r--r--drivers/staging/rts5139/sd_cprm.c2
-rw-r--r--drivers/staging/rts_pstor/TODO6
-rw-r--r--drivers/staging/sbe-2t3e3/intr.c2
-rw-r--r--drivers/staging/sep/Kconfig3
-rw-r--r--drivers/staging/sep/Makefile5
-rw-r--r--drivers/staging/sep/TODO5
-rw-r--r--drivers/staging/sep/sep_crypto.c4058
-rw-r--r--drivers/staging/sep/sep_crypto.h359
-rw-r--r--drivers/staging/sep/sep_dev.h98
-rw-r--r--drivers/staging/sep/sep_driver.c2932
-rw-r--r--drivers/staging/sep/sep_driver_api.h293
-rw-r--r--drivers/staging/sep/sep_driver_config.h79
-rw-r--r--drivers/staging/sep/sep_driver_hw_defs.h185
-rw-r--r--drivers/staging/sep/sep_main.c4518
-rw-r--r--drivers/staging/sep/sep_trace_events.h188
-rw-r--r--drivers/staging/serqt_usb2/serqt_usb2.c40
-rw-r--r--drivers/staging/slicoss/README2
-rw-r--r--drivers/staging/sm7xx/smtcfb.c3
-rw-r--r--drivers/staging/sm7xx/smtcfb.h2
-rw-r--r--drivers/staging/speakup/main.c8
-rw-r--r--drivers/staging/speakup/serialio.c11
-rw-r--r--drivers/staging/speakup/serialio.h13
-rw-r--r--drivers/staging/speakup/spk_priv.h2
-rw-r--r--drivers/staging/speakup/synth.c2
-rw-r--r--drivers/staging/telephony/Kconfig47
-rw-r--r--drivers/staging/telephony/Makefile7
-rw-r--r--drivers/staging/telephony/TODO10
-rw-r--r--drivers/staging/telephony/ixj-ver.h4
-rw-r--r--drivers/staging/telephony/ixj.c10571
-rw-r--r--drivers/staging/telephony/ixj.h1322
-rw-r--r--drivers/staging/telephony/ixj_pcmcia.c187
-rw-r--r--drivers/staging/telephony/phonedev.c167
-rw-r--r--drivers/staging/tidspbridge/Kconfig22
-rw-r--r--drivers/staging/tidspbridge/Makefile4
-rw-r--r--drivers/staging/tidspbridge/core/chnl_sm.c34
-rw-r--r--drivers/staging/tidspbridge/core/dsp-clock.c3
-rw-r--r--drivers/staging/tidspbridge/core/io_sm.c29
-rw-r--r--drivers/staging/tidspbridge/core/msg_sm.c3
-rw-r--r--drivers/staging/tidspbridge/core/tiomap3430.c19
-rw-r--r--drivers/staging/tidspbridge/core/tiomap3430_pwr.c1
-rw-r--r--drivers/staging/tidspbridge/core/tiomap_io.c18
-rw-r--r--drivers/staging/tidspbridge/core/wdt.c24
-rw-r--r--drivers/staging/tidspbridge/gen/gh.c18
-rw-r--r--drivers/staging/tidspbridge/gen/uuidutil.c7
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/_chnl_sm.h4
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/chnl.h29
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/cmm.h30
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/cod.h29
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/dbc.h46
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/dev.h27
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/disp.h31
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/dmm.h4
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/drv.h23
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/gh.h2
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/io.h29
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/io_sm.h2
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/msg.h27
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/nldr.h2
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/nldrdefs.h34
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/node.h41
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/nodepriv.h1
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/proc.h28
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/rmm.h25
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/strm.h38
-rw-r--r--drivers/staging/tidspbridge/pmgr/chnl.c47
-rw-r--r--drivers/staging/tidspbridge/pmgr/cmm.c97
-rw-r--r--drivers/staging/tidspbridge/pmgr/cod.c103
-rw-r--r--drivers/staging/tidspbridge/pmgr/dbll.c125
-rw-r--r--drivers/staging/tidspbridge/pmgr/dev.c182
-rw-r--r--drivers/staging/tidspbridge/pmgr/dmm.c46
-rw-r--r--drivers/staging/tidspbridge/pmgr/dspapi.c82
-rw-r--r--drivers/staging/tidspbridge/pmgr/io.c45
-rw-r--r--drivers/staging/tidspbridge/pmgr/msg.c38
-rw-r--r--drivers/staging/tidspbridge/rmgr/dbdcd.c103
-rw-r--r--drivers/staging/tidspbridge/rmgr/disp.c69
-rw-r--r--drivers/staging/tidspbridge/rmgr/drv.c74
-rw-r--r--drivers/staging/tidspbridge/rmgr/drv_interface.c366
-rw-r--r--drivers/staging/tidspbridge/rmgr/drv_interface.h28
-rw-r--r--drivers/staging/tidspbridge/rmgr/dspdrv.c5
-rw-r--r--drivers/staging/tidspbridge/rmgr/mgr.c45
-rw-r--r--drivers/staging/tidspbridge/rmgr/nldr.c99
-rw-r--r--drivers/staging/tidspbridge/rmgr/node.c129
-rw-r--r--drivers/staging/tidspbridge/rmgr/proc.c119
-rw-r--r--drivers/staging/tidspbridge/rmgr/rmm.c56
-rw-r--r--drivers/staging/tidspbridge/rmgr/strm.c114
-rw-r--r--drivers/staging/usbip/stub.h1
-rw-r--r--drivers/staging/usbip/stub_dev.c2
-rw-r--r--drivers/staging/usbip/stub_rx.c9
-rw-r--r--drivers/staging/usbip/usbip_common.c11
-rw-r--r--drivers/staging/usbip/usbip_common.h2
-rw-r--r--drivers/staging/usbip/vhci_hcd.c41
-rw-r--r--drivers/staging/usbip/vhci_rx.c3
-rw-r--r--drivers/staging/vme/devices/vme_pio2.h4
-rw-r--r--drivers/staging/vme/devices/vme_pio2_gpio.c4
-rw-r--r--drivers/staging/vme/vme.h2
-rw-r--r--drivers/staging/vt6655/bssdb.c4
-rw-r--r--drivers/staging/vt6655/ioctl.c23
-rw-r--r--drivers/staging/vt6656/bssdb.c4
-rw-r--r--drivers/staging/vt6656/iwctl.c230
-rw-r--r--drivers/staging/vt6656/iwctl.h13
-rw-r--r--drivers/staging/vt6656/main_usb.c13
-rw-r--r--drivers/staging/vt6656/wpactl.c937
-rw-r--r--drivers/staging/wlags49_h2/wl_cs.c7
-rw-r--r--drivers/staging/wlan-ng/cfg80211.c2
-rw-r--r--drivers/staging/wlan-ng/p80211netdev.c2
-rw-r--r--drivers/staging/wlan-ng/prism2mgmt.c1
-rw-r--r--drivers/staging/xgifb/XGI_main.h78
-rw-r--r--drivers/staging/xgifb/XGI_main_26.c208
-rw-r--r--drivers/staging/xgifb/XGIfb.h2
-rw-r--r--drivers/staging/xgifb/vb_def.h178
-rw-r--r--drivers/staging/xgifb/vb_init.c20
-rw-r--r--drivers/staging/xgifb/vb_setmode.c836
-rw-r--r--drivers/staging/xgifb/vb_struct.h79
-rw-r--r--drivers/staging/xgifb/vb_table.h346
-rw-r--r--drivers/staging/xgifb/vgatypes.h9
-rw-r--r--drivers/staging/zcache/Kconfig13
-rw-r--r--drivers/staging/zcache/tmem.h2
-rw-r--r--drivers/staging/zcache/zcache-main.c259
-rw-r--r--drivers/staging/zram/Kconfig10
-rw-r--r--drivers/staging/zram/Makefile1
-rw-r--r--drivers/staging/zram/zram_drv.c142
-rw-r--r--drivers/staging/zram/zram_drv.h12
-rw-r--r--drivers/staging/zram/zram_sysfs.c4
-rw-r--r--drivers/staging/zsmalloc/Kconfig14
-rw-r--r--drivers/staging/zsmalloc/Makefile3
-rw-r--r--drivers/staging/zsmalloc/zsmalloc-main.c745
-rw-r--r--drivers/staging/zsmalloc/zsmalloc.h31
-rw-r--r--drivers/staging/zsmalloc/zsmalloc_int.h155
389 files changed, 49701 insertions, 15305 deletions
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 9e6347249783..97d412d91458 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -64,8 +64,6 @@ source "drivers/staging/phison/Kconfig"
source "drivers/staging/line6/Kconfig"
-source "drivers/gpu/drm/nouveau/Kconfig"
-
source "drivers/staging/octeon/Kconfig"
source "drivers/staging/serqt_usb2/Kconfig"
@@ -76,8 +74,6 @@ source "drivers/staging/vt6655/Kconfig"
source "drivers/staging/vt6656/Kconfig"
-source "drivers/staging/hv/Kconfig"
-
source "drivers/staging/vme/Kconfig"
source "drivers/staging/sep/Kconfig"
@@ -88,6 +84,8 @@ source "drivers/staging/zram/Kconfig"
source "drivers/staging/zcache/Kconfig"
+source "drivers/staging/zsmalloc/Kconfig"
+
source "drivers/staging/wlags49_h2/Kconfig"
source "drivers/staging/wlags49_h25/Kconfig"
@@ -128,4 +126,10 @@ source "drivers/staging/omapdrm/Kconfig"
source "drivers/staging/android/Kconfig"
+source "drivers/staging/telephony/Kconfig"
+
+source "drivers/staging/ramster/Kconfig"
+
+source "drivers/staging/ozwpan/Kconfig"
+
endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 943e14830753..ffe7d44374e6 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -29,13 +29,12 @@ obj-$(CONFIG_USB_SERIAL_QUATECH_USB2) += quatech_usb2/
obj-$(CONFIG_OCTEON_ETHERNET) += octeon/
obj-$(CONFIG_VT6655) += vt6655/
obj-$(CONFIG_VT6656) += vt6656/
-obj-$(CONFIG_HYPERV) += hv/
obj-$(CONFIG_VME_BUS) += vme/
obj-$(CONFIG_DX_SEP) += sep/
obj-$(CONFIG_IIO) += iio/
obj-$(CONFIG_ZRAM) += zram/
-obj-$(CONFIG_XVMALLOC) += zram/
obj-$(CONFIG_ZCACHE) += zcache/
+obj-$(CONFIG_ZSMALLOC) += zsmalloc/
obj-$(CONFIG_WLAGS49_H2) += wlags49_h2/
obj-$(CONFIG_WLAGS49_H25) += wlags49_h25/
obj-$(CONFIG_FB_SM7XX) += sm7xx/
@@ -55,3 +54,6 @@ obj-$(CONFIG_INTEL_MEI) += mei/
obj-$(CONFIG_MFD_NVEC) += nvec/
obj-$(CONFIG_DRM_OMAP) += omapdrm/
obj-$(CONFIG_ANDROID) += android/
+obj-$(CONFIG_PHONE) += telephony/
+obj-$(CONFIG_RAMSTER) += ramster/
+obj-$(CONFIG_USB_WPAN_HCD) += ozwpan/
diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig
index fef3580ce8de..08a3b1133d29 100644
--- a/drivers/staging/android/Kconfig
+++ b/drivers/staging/android/Kconfig
@@ -25,65 +25,17 @@ config ANDROID_LOGGER
tristate "Android log driver"
default n
-config ANDROID_RAM_CONSOLE
- bool "Android RAM buffer console"
- depends on !S390 && !UML
- default n
-
-config ANDROID_RAM_CONSOLE_ENABLE_VERBOSE
- bool "Enable verbose console messages on Android RAM console"
- default y
- depends on ANDROID_RAM_CONSOLE
-
-menuconfig ANDROID_RAM_CONSOLE_ERROR_CORRECTION
- bool "Android RAM Console Enable error correction"
- default n
- depends on ANDROID_RAM_CONSOLE
- depends on !ANDROID_RAM_CONSOLE_EARLY_INIT
+config ANDROID_PERSISTENT_RAM
+ bool
select REED_SOLOMON
select REED_SOLOMON_ENC8
select REED_SOLOMON_DEC8
-if ANDROID_RAM_CONSOLE_ERROR_CORRECTION
-
-config ANDROID_RAM_CONSOLE_ERROR_CORRECTION_DATA_SIZE
- int "Android RAM Console Data data size"
- default 128
- help
- Must be a power of 2.
-
-config ANDROID_RAM_CONSOLE_ERROR_CORRECTION_ECC_SIZE
- int "Android RAM Console ECC size"
- default 16
-
-config ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE
- int "Android RAM Console Symbol size"
- default 8
-
-config ANDROID_RAM_CONSOLE_ERROR_CORRECTION_POLYNOMIAL
- hex "Android RAM Console Polynomial"
- default 0x19 if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 4)
- default 0x29 if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 5)
- default 0x61 if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 6)
- default 0x89 if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 7)
- default 0x11d if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 8)
-
-endif # ANDROID_RAM_CONSOLE_ERROR_CORRECTION
-
-config ANDROID_RAM_CONSOLE_EARLY_INIT
- bool "Start Android RAM console early"
+config ANDROID_RAM_CONSOLE
+ bool "Android RAM buffer console"
+ depends on !S390 && !UML
+ select ANDROID_PERSISTENT_RAM
default n
- depends on ANDROID_RAM_CONSOLE
-
-config ANDROID_RAM_CONSOLE_EARLY_ADDR
- hex "Android RAM console virtual address"
- default 0
- depends on ANDROID_RAM_CONSOLE_EARLY_INIT
-
-config ANDROID_RAM_CONSOLE_EARLY_SIZE
- hex "Android RAM console buffer size"
- default 0
- depends on ANDROID_RAM_CONSOLE_EARLY_INIT
config ANDROID_TIMED_OUTPUT
bool "Timed output class driver"
@@ -102,6 +54,32 @@ config ANDROID_LOW_MEMORY_KILLER
source "drivers/staging/android/switch/Kconfig"
+config ANDROID_INTF_ALARM
+ bool "Android alarm driver"
+ depends on RTC_CLASS
+ default n
+ help
+ Provides non-wakeup and rtc backed wakeup alarms based on rtc or
+ elapsed realtime, and a non-wakeup alarm on the monotonic clock.
+ Also provides an interface to set the wall time which must be used
+ for elapsed realtime to work.
+
+config ANDROID_INTF_ALARM_DEV
+ bool "Android alarm device"
+ depends on ANDROID_INTF_ALARM
+ default y
+ help
+ Exports the alarm interface to user-space.
+
+config ANDROID_ALARM_OLDDRV_COMPAT
+ bool "Android Alarm compatability with old drivers"
+ depends on ANDROID_INTF_ALARM
+ default n
+ help
+ Provides preprocessor alias to aid compatability with
+ older out-of-tree drivers that use the Android Alarm
+ in-kernel API. This will be removed eventually.
+
endif # if ANDROID
endmenu
diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile
index 5fcc24ffdd58..9b6c9ed91f69 100644
--- a/drivers/staging/android/Makefile
+++ b/drivers/staging/android/Makefile
@@ -1,8 +1,11 @@
obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o
obj-$(CONFIG_ASHMEM) += ashmem.o
obj-$(CONFIG_ANDROID_LOGGER) += logger.o
+obj-$(CONFIG_ANDROID_PERSISTENT_RAM) += persistent_ram.o
obj-$(CONFIG_ANDROID_RAM_CONSOLE) += ram_console.o
obj-$(CONFIG_ANDROID_TIMED_OUTPUT) += timed_output.o
obj-$(CONFIG_ANDROID_TIMED_GPIO) += timed_gpio.o
obj-$(CONFIG_ANDROID_LOW_MEMORY_KILLER) += lowmemorykiller.o
obj-$(CONFIG_ANDROID_SWITCH) += switch/
+obj-$(CONFIG_ANDROID_INTF_ALARM) += alarm.o
+obj-$(CONFIG_ANDROID_INTF_ALARM_DEV) += alarm-dev.o
diff --git a/drivers/staging/android/TODO b/drivers/staging/android/TODO
index e59c5be4be2b..b15fb0d6b152 100644
--- a/drivers/staging/android/TODO
+++ b/drivers/staging/android/TODO
@@ -3,7 +3,7 @@ TODO:
- sparse fixes
- rename files to be not so "generic"
- make sure things build as modules properly
- - add proper arch dependancies as needed
+ - add proper arch dependencies as needed
- audit userspace interfaces to make sure they are sane
Please send patches to Greg Kroah-Hartman <greg@kroah.com> and Cc:
diff --git a/drivers/staging/android/alarm-dev.c b/drivers/staging/android/alarm-dev.c
new file mode 100644
index 000000000000..03efb34cbe2e
--- /dev/null
+++ b/drivers/staging/android/alarm-dev.c
@@ -0,0 +1,297 @@
+/* drivers/rtc/alarm-dev.c
+ *
+ * Copyright (C) 2007-2009 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/time.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+#include "android_alarm.h"
+
+/* XXX - Hack out wakelocks, while they are out of tree */
+struct wake_lock {
+ int i;
+};
+#define wake_lock(x)
+#define wake_lock_timeout(x, y)
+#define wake_unlock(x)
+#define WAKE_LOCK_SUSPEND 0
+#define wake_lock_init(x, y, z) ((x)->i = 1)
+#define wake_lock_destroy(x)
+
+#define ANDROID_ALARM_PRINT_INFO (1U << 0)
+#define ANDROID_ALARM_PRINT_IO (1U << 1)
+#define ANDROID_ALARM_PRINT_INT (1U << 2)
+
+
+static int debug_mask = ANDROID_ALARM_PRINT_INFO;
+module_param_named(debug_mask, debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+#define pr_alarm(debug_level_mask, args...) \
+ do { \
+ if (debug_mask & ANDROID_ALARM_PRINT_##debug_level_mask) { \
+ pr_info(args); \
+ } \
+ } while (0)
+
+#define ANDROID_ALARM_WAKEUP_MASK ( \
+ ANDROID_ALARM_RTC_WAKEUP_MASK | \
+ ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP_MASK)
+
+/* support old usespace code */
+#define ANDROID_ALARM_SET_OLD _IOW('a', 2, time_t) /* set alarm */
+#define ANDROID_ALARM_SET_AND_WAIT_OLD _IOW('a', 3, time_t)
+
+static int alarm_opened;
+static DEFINE_SPINLOCK(alarm_slock);
+static struct wake_lock alarm_wake_lock;
+static DECLARE_WAIT_QUEUE_HEAD(alarm_wait_queue);
+static uint32_t alarm_pending;
+static uint32_t alarm_enabled;
+static uint32_t wait_pending;
+
+static struct android_alarm alarms[ANDROID_ALARM_TYPE_COUNT];
+
+static long alarm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ int rv = 0;
+ unsigned long flags;
+ struct timespec new_alarm_time;
+ struct timespec new_rtc_time;
+ struct timespec tmp_time;
+ enum android_alarm_type alarm_type = ANDROID_ALARM_IOCTL_TO_TYPE(cmd);
+ uint32_t alarm_type_mask = 1U << alarm_type;
+
+ if (alarm_type >= ANDROID_ALARM_TYPE_COUNT)
+ return -EINVAL;
+
+ if (ANDROID_ALARM_BASE_CMD(cmd) != ANDROID_ALARM_GET_TIME(0)) {
+ if ((file->f_flags & O_ACCMODE) == O_RDONLY)
+ return -EPERM;
+ if (file->private_data == NULL &&
+ cmd != ANDROID_ALARM_SET_RTC) {
+ spin_lock_irqsave(&alarm_slock, flags);
+ if (alarm_opened) {
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ return -EBUSY;
+ }
+ alarm_opened = 1;
+ file->private_data = (void *)1;
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ }
+ }
+
+ switch (ANDROID_ALARM_BASE_CMD(cmd)) {
+ case ANDROID_ALARM_CLEAR(0):
+ spin_lock_irqsave(&alarm_slock, flags);
+ pr_alarm(IO, "alarm %d clear\n", alarm_type);
+ android_alarm_try_to_cancel(&alarms[alarm_type]);
+ if (alarm_pending) {
+ alarm_pending &= ~alarm_type_mask;
+ if (!alarm_pending && !wait_pending)
+ wake_unlock(&alarm_wake_lock);
+ }
+ alarm_enabled &= ~alarm_type_mask;
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ break;
+
+ case ANDROID_ALARM_SET_OLD:
+ case ANDROID_ALARM_SET_AND_WAIT_OLD:
+ if (get_user(new_alarm_time.tv_sec, (int __user *)arg)) {
+ rv = -EFAULT;
+ goto err1;
+ }
+ new_alarm_time.tv_nsec = 0;
+ goto from_old_alarm_set;
+
+ case ANDROID_ALARM_SET_AND_WAIT(0):
+ case ANDROID_ALARM_SET(0):
+ if (copy_from_user(&new_alarm_time, (void __user *)arg,
+ sizeof(new_alarm_time))) {
+ rv = -EFAULT;
+ goto err1;
+ }
+from_old_alarm_set:
+ spin_lock_irqsave(&alarm_slock, flags);
+ pr_alarm(IO, "alarm %d set %ld.%09ld\n", alarm_type,
+ new_alarm_time.tv_sec, new_alarm_time.tv_nsec);
+ alarm_enabled |= alarm_type_mask;
+ android_alarm_start_range(&alarms[alarm_type],
+ timespec_to_ktime(new_alarm_time),
+ timespec_to_ktime(new_alarm_time));
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ if (ANDROID_ALARM_BASE_CMD(cmd) != ANDROID_ALARM_SET_AND_WAIT(0)
+ && cmd != ANDROID_ALARM_SET_AND_WAIT_OLD)
+ break;
+ /* fall though */
+ case ANDROID_ALARM_WAIT:
+ spin_lock_irqsave(&alarm_slock, flags);
+ pr_alarm(IO, "alarm wait\n");
+ if (!alarm_pending && wait_pending) {
+ wake_unlock(&alarm_wake_lock);
+ wait_pending = 0;
+ }
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ rv = wait_event_interruptible(alarm_wait_queue, alarm_pending);
+ if (rv)
+ goto err1;
+ spin_lock_irqsave(&alarm_slock, flags);
+ rv = alarm_pending;
+ wait_pending = 1;
+ alarm_pending = 0;
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ break;
+ case ANDROID_ALARM_SET_RTC:
+ if (copy_from_user(&new_rtc_time, (void __user *)arg,
+ sizeof(new_rtc_time))) {
+ rv = -EFAULT;
+ goto err1;
+ }
+ rv = android_alarm_set_rtc(new_rtc_time);
+ spin_lock_irqsave(&alarm_slock, flags);
+ alarm_pending |= ANDROID_ALARM_TIME_CHANGE_MASK;
+ wake_up(&alarm_wait_queue);
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ if (rv < 0)
+ goto err1;
+ break;
+ case ANDROID_ALARM_GET_TIME(0):
+ switch (alarm_type) {
+ case ANDROID_ALARM_RTC_WAKEUP:
+ case ANDROID_ALARM_RTC:
+ getnstimeofday(&tmp_time);
+ break;
+ case ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP:
+ case ANDROID_ALARM_ELAPSED_REALTIME:
+ tmp_time =
+ ktime_to_timespec(alarm_get_elapsed_realtime());
+ break;
+ case ANDROID_ALARM_TYPE_COUNT:
+ case ANDROID_ALARM_SYSTEMTIME:
+ ktime_get_ts(&tmp_time);
+ break;
+ }
+ if (copy_to_user((void __user *)arg, &tmp_time,
+ sizeof(tmp_time))) {
+ rv = -EFAULT;
+ goto err1;
+ }
+ break;
+
+ default:
+ rv = -EINVAL;
+ goto err1;
+ }
+err1:
+ return rv;
+}
+
+static int alarm_open(struct inode *inode, struct file *file)
+{
+ file->private_data = NULL;
+ return 0;
+}
+
+static int alarm_release(struct inode *inode, struct file *file)
+{
+ int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&alarm_slock, flags);
+ if (file->private_data != 0) {
+ for (i = 0; i < ANDROID_ALARM_TYPE_COUNT; i++) {
+ uint32_t alarm_type_mask = 1U << i;
+ if (alarm_enabled & alarm_type_mask) {
+ pr_alarm(INFO, "alarm_release: clear alarm, "
+ "pending %d\n",
+ !!(alarm_pending & alarm_type_mask));
+ alarm_enabled &= ~alarm_type_mask;
+ }
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ android_alarm_cancel(&alarms[i]);
+ spin_lock_irqsave(&alarm_slock, flags);
+ }
+ if (alarm_pending | wait_pending) {
+ if (alarm_pending)
+ pr_alarm(INFO, "alarm_release: clear "
+ "pending alarms %x\n", alarm_pending);
+ wake_unlock(&alarm_wake_lock);
+ wait_pending = 0;
+ alarm_pending = 0;
+ }
+ alarm_opened = 0;
+ }
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ return 0;
+}
+
+static void alarm_triggered(struct android_alarm *alarm)
+{
+ unsigned long flags;
+ uint32_t alarm_type_mask = 1U << alarm->type;
+
+ pr_alarm(INT, "alarm_triggered type %d\n", alarm->type);
+ spin_lock_irqsave(&alarm_slock, flags);
+ if (alarm_enabled & alarm_type_mask) {
+ wake_lock_timeout(&alarm_wake_lock, 5 * HZ);
+ alarm_enabled &= ~alarm_type_mask;
+ alarm_pending |= alarm_type_mask;
+ wake_up(&alarm_wait_queue);
+ }
+ spin_unlock_irqrestore(&alarm_slock, flags);
+}
+
+static const struct file_operations alarm_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = alarm_ioctl,
+ .open = alarm_open,
+ .release = alarm_release,
+};
+
+static struct miscdevice alarm_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "alarm",
+ .fops = &alarm_fops,
+};
+
+static int __init alarm_dev_init(void)
+{
+ int err;
+ int i;
+
+ err = misc_register(&alarm_device);
+ if (err)
+ return err;
+
+ for (i = 0; i < ANDROID_ALARM_TYPE_COUNT; i++)
+ android_alarm_init(&alarms[i], i, alarm_triggered);
+ wake_lock_init(&alarm_wake_lock, WAKE_LOCK_SUSPEND, "alarm");
+
+ return 0;
+}
+
+static void __exit alarm_dev_exit(void)
+{
+ misc_deregister(&alarm_device);
+ wake_lock_destroy(&alarm_wake_lock);
+}
+
+module_init(alarm_dev_init);
+module_exit(alarm_dev_exit);
+
diff --git a/drivers/staging/android/alarm.c b/drivers/staging/android/alarm.c
new file mode 100644
index 000000000000..c68950b9e08f
--- /dev/null
+++ b/drivers/staging/android/alarm.c
@@ -0,0 +1,601 @@
+/* drivers/rtc/alarm.c
+ *
+ * Copyright (C) 2007-2009 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/time.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include "android_alarm.h"
+
+/* XXX - Hack out wakelocks, while they are out of tree */
+struct wake_lock {
+ int i;
+};
+#define wake_lock(x)
+#define wake_lock_timeout(x, y)
+#define wake_unlock(x)
+#define WAKE_LOCK_SUSPEND 0
+#define wake_lock_init(x, y, z) ((x)->i = 1)
+#define wake_lock_destroy(x)
+
+#define ANDROID_ALARM_PRINT_ERROR (1U << 0)
+#define ANDROID_ALARM_PRINT_INIT_STATUS (1U << 1)
+#define ANDROID_ALARM_PRINT_TSET (1U << 2)
+#define ANDROID_ALARM_PRINT_CALL (1U << 3)
+#define ANDROID_ALARM_PRINT_SUSPEND (1U << 4)
+#define ANDROID_ALARM_PRINT_INT (1U << 5)
+#define ANDROID_ALARM_PRINT_FLOW (1U << 6)
+
+static int debug_mask = ANDROID_ALARM_PRINT_ERROR | \
+ ANDROID_ALARM_PRINT_INIT_STATUS;
+module_param_named(debug_mask, debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+#define pr_alarm(debug_level_mask, args...) \
+ do { \
+ if (debug_mask & ANDROID_ALARM_PRINT_##debug_level_mask) { \
+ pr_info(args); \
+ } \
+ } while (0)
+
+#define ANDROID_ALARM_WAKEUP_MASK ( \
+ ANDROID_ALARM_RTC_WAKEUP_MASK | \
+ ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP_MASK)
+
+/* support old usespace code */
+#define ANDROID_ALARM_SET_OLD _IOW('a', 2, time_t) /* set alarm */
+#define ANDROID_ALARM_SET_AND_WAIT_OLD _IOW('a', 3, time_t)
+
+struct alarm_queue {
+ struct rb_root alarms;
+ struct rb_node *first;
+ struct hrtimer timer;
+ ktime_t delta;
+ bool stopped;
+ ktime_t stopped_time;
+};
+
+static struct rtc_device *alarm_rtc_dev;
+static DEFINE_SPINLOCK(alarm_slock);
+static DEFINE_MUTEX(alarm_setrtc_mutex);
+static struct wake_lock alarm_rtc_wake_lock;
+static struct platform_device *alarm_platform_dev;
+struct alarm_queue alarms[ANDROID_ALARM_TYPE_COUNT];
+static bool suspended;
+
+static void update_timer_locked(struct alarm_queue *base, bool head_removed)
+{
+ struct android_alarm *alarm;
+ bool is_wakeup = base == &alarms[ANDROID_ALARM_RTC_WAKEUP] ||
+ base == &alarms[ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP];
+
+ if (base->stopped) {
+ pr_alarm(FLOW, "changed alarm while setting the wall time\n");
+ return;
+ }
+
+ if (is_wakeup && !suspended && head_removed)
+ wake_unlock(&alarm_rtc_wake_lock);
+
+ if (!base->first)
+ return;
+
+ alarm = container_of(base->first, struct android_alarm, node);
+
+ pr_alarm(FLOW, "selected alarm, type %d, func %pF at %lld\n",
+ alarm->type, alarm->function, ktime_to_ns(alarm->expires));
+
+ if (is_wakeup && suspended) {
+ pr_alarm(FLOW, "changed alarm while suspened\n");
+ wake_lock_timeout(&alarm_rtc_wake_lock, 1 * HZ);
+ return;
+ }
+
+ hrtimer_try_to_cancel(&base->timer);
+ base->timer.node.expires = ktime_add(base->delta, alarm->expires);
+ base->timer._softexpires = ktime_add(base->delta, alarm->softexpires);
+ hrtimer_start_expires(&base->timer, HRTIMER_MODE_ABS);
+}
+
+static void alarm_enqueue_locked(struct android_alarm *alarm)
+{
+ struct alarm_queue *base = &alarms[alarm->type];
+ struct rb_node **link = &base->alarms.rb_node;
+ struct rb_node *parent = NULL;
+ struct android_alarm *entry;
+ int leftmost = 1;
+ bool was_first = false;
+
+ pr_alarm(FLOW, "added alarm, type %d, func %pF at %lld\n",
+ alarm->type, alarm->function, ktime_to_ns(alarm->expires));
+
+ if (base->first == &alarm->node) {
+ base->first = rb_next(&alarm->node);
+ was_first = true;
+ }
+ if (!RB_EMPTY_NODE(&alarm->node)) {
+ rb_erase(&alarm->node, &base->alarms);
+ RB_CLEAR_NODE(&alarm->node);
+ }
+
+ while (*link) {
+ parent = *link;
+ entry = rb_entry(parent, struct android_alarm, node);
+ /*
+ * We dont care about collisions. Nodes with
+ * the same expiry time stay together.
+ */
+ if (alarm->expires.tv64 < entry->expires.tv64) {
+ link = &(*link)->rb_left;
+ } else {
+ link = &(*link)->rb_right;
+ leftmost = 0;
+ }
+ }
+ if (leftmost)
+ base->first = &alarm->node;
+ if (leftmost || was_first)
+ update_timer_locked(base, was_first);
+
+ rb_link_node(&alarm->node, parent, link);
+ rb_insert_color(&alarm->node, &base->alarms);
+}
+
+/**
+ * android_alarm_init - initialize an alarm
+ * @alarm: the alarm to be initialized
+ * @type: the alarm type to be used
+ * @function: alarm callback function
+ */
+void android_alarm_init(struct android_alarm *alarm,
+ enum android_alarm_type type, void (*function)(struct android_alarm *))
+{
+ RB_CLEAR_NODE(&alarm->node);
+ alarm->type = type;
+ alarm->function = function;
+
+ pr_alarm(FLOW, "created alarm, type %d, func %pF\n", type, function);
+}
+
+
+/**
+ * android_alarm_start_range - (re)start an alarm
+ * @alarm: the alarm to be added
+ * @start: earliest expiry time
+ * @end: expiry time
+ */
+void android_alarm_start_range(struct android_alarm *alarm, ktime_t start,
+ ktime_t end)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&alarm_slock, flags);
+ alarm->softexpires = start;
+ alarm->expires = end;
+ alarm_enqueue_locked(alarm);
+ spin_unlock_irqrestore(&alarm_slock, flags);
+}
+
+/**
+ * android_alarm_try_to_cancel - try to deactivate an alarm
+ * @alarm: alarm to stop
+ *
+ * Returns:
+ * 0 when the alarm was not active
+ * 1 when the alarm was active
+ * -1 when the alarm may currently be excuting the callback function and
+ * cannot be stopped (it may also be inactive)
+ */
+int android_alarm_try_to_cancel(struct android_alarm *alarm)
+{
+ struct alarm_queue *base = &alarms[alarm->type];
+ unsigned long flags;
+ bool first = false;
+ int ret = 0;
+
+ spin_lock_irqsave(&alarm_slock, flags);
+ if (!RB_EMPTY_NODE(&alarm->node)) {
+ pr_alarm(FLOW, "canceled alarm, type %d, func %pF at %lld\n",
+ alarm->type, alarm->function,
+ ktime_to_ns(alarm->expires));
+ ret = 1;
+ if (base->first == &alarm->node) {
+ base->first = rb_next(&alarm->node);
+ first = true;
+ }
+ rb_erase(&alarm->node, &base->alarms);
+ RB_CLEAR_NODE(&alarm->node);
+ if (first)
+ update_timer_locked(base, true);
+ } else
+ pr_alarm(FLOW, "tried to cancel alarm, type %d, func %pF\n",
+ alarm->type, alarm->function);
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ if (!ret && hrtimer_callback_running(&base->timer))
+ ret = -1;
+ return ret;
+}
+
+/**
+ * android_alarm_cancel - cancel an alarm and wait for the handler to finish.
+ * @alarm: the alarm to be cancelled
+ *
+ * Returns:
+ * 0 when the alarm was not active
+ * 1 when the alarm was active
+ */
+int android_alarm_cancel(struct android_alarm *alarm)
+{
+ for (;;) {
+ int ret = android_alarm_try_to_cancel(alarm);
+ if (ret >= 0)
+ return ret;
+ cpu_relax();
+ }
+}
+
+/**
+ * alarm_set_rtc - set the kernel and rtc walltime
+ * @new_time: timespec value containing the new time
+ */
+int android_alarm_set_rtc(struct timespec new_time)
+{
+ int i;
+ int ret;
+ unsigned long flags;
+ struct rtc_time rtc_new_rtc_time;
+ struct timespec tmp_time;
+
+ rtc_time_to_tm(new_time.tv_sec, &rtc_new_rtc_time);
+
+ pr_alarm(TSET, "set rtc %ld %ld - rtc %02d:%02d:%02d %02d/%02d/%04d\n",
+ new_time.tv_sec, new_time.tv_nsec,
+ rtc_new_rtc_time.tm_hour, rtc_new_rtc_time.tm_min,
+ rtc_new_rtc_time.tm_sec, rtc_new_rtc_time.tm_mon + 1,
+ rtc_new_rtc_time.tm_mday,
+ rtc_new_rtc_time.tm_year + 1900);
+
+ mutex_lock(&alarm_setrtc_mutex);
+ spin_lock_irqsave(&alarm_slock, flags);
+ wake_lock(&alarm_rtc_wake_lock);
+ getnstimeofday(&tmp_time);
+ for (i = 0; i < ANDROID_ALARM_SYSTEMTIME; i++) {
+ hrtimer_try_to_cancel(&alarms[i].timer);
+ alarms[i].stopped = true;
+ alarms[i].stopped_time = timespec_to_ktime(tmp_time);
+ }
+ alarms[ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP].delta =
+ alarms[ANDROID_ALARM_ELAPSED_REALTIME].delta =
+ ktime_sub(alarms[ANDROID_ALARM_ELAPSED_REALTIME].delta,
+ timespec_to_ktime(timespec_sub(tmp_time, new_time)));
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ ret = do_settimeofday(&new_time);
+ spin_lock_irqsave(&alarm_slock, flags);
+ for (i = 0; i < ANDROID_ALARM_SYSTEMTIME; i++) {
+ alarms[i].stopped = false;
+ update_timer_locked(&alarms[i], false);
+ }
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ if (ret < 0) {
+ pr_alarm(ERROR, "alarm_set_rtc: Failed to set time\n");
+ goto err;
+ }
+ if (!alarm_rtc_dev) {
+ pr_alarm(ERROR,
+ "alarm_set_rtc: no RTC, time will be lost on reboot\n");
+ goto err;
+ }
+ ret = rtc_set_time(alarm_rtc_dev, &rtc_new_rtc_time);
+ if (ret < 0)
+ pr_alarm(ERROR, "alarm_set_rtc: "
+ "Failed to set RTC, time will be lost on reboot\n");
+err:
+ wake_unlock(&alarm_rtc_wake_lock);
+ mutex_unlock(&alarm_setrtc_mutex);
+ return ret;
+}
+
+/**
+ * alarm_get_elapsed_realtime - get the elapsed real time in ktime_t format
+ *
+ * returns the time in ktime_t format
+ */
+ktime_t alarm_get_elapsed_realtime(void)
+{
+ ktime_t now;
+ unsigned long flags;
+ struct alarm_queue *base = &alarms[ANDROID_ALARM_ELAPSED_REALTIME];
+
+ spin_lock_irqsave(&alarm_slock, flags);
+ now = base->stopped ? base->stopped_time : ktime_get_real();
+ now = ktime_sub(now, base->delta);
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ return now;
+}
+
+static enum hrtimer_restart alarm_timer_triggered(struct hrtimer *timer)
+{
+ struct alarm_queue *base;
+ struct android_alarm *alarm;
+ unsigned long flags;
+ ktime_t now;
+
+ spin_lock_irqsave(&alarm_slock, flags);
+
+ base = container_of(timer, struct alarm_queue, timer);
+ now = base->stopped ? base->stopped_time : hrtimer_cb_get_time(timer);
+ now = ktime_sub(now, base->delta);
+
+ pr_alarm(INT, "alarm_timer_triggered type %ld at %lld\n",
+ base - alarms, ktime_to_ns(now));
+
+ while (base->first) {
+ alarm = container_of(base->first, struct android_alarm, node);
+ if (alarm->softexpires.tv64 > now.tv64) {
+ pr_alarm(FLOW, "don't call alarm, %pF, %lld (s %lld)\n",
+ alarm->function, ktime_to_ns(alarm->expires),
+ ktime_to_ns(alarm->softexpires));
+ break;
+ }
+ base->first = rb_next(&alarm->node);
+ rb_erase(&alarm->node, &base->alarms);
+ RB_CLEAR_NODE(&alarm->node);
+ pr_alarm(CALL, "call alarm, type %d, func %pF, %lld (s %lld)\n",
+ alarm->type, alarm->function,
+ ktime_to_ns(alarm->expires),
+ ktime_to_ns(alarm->softexpires));
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ alarm->function(alarm);
+ spin_lock_irqsave(&alarm_slock, flags);
+ }
+ if (!base->first)
+ pr_alarm(FLOW, "no more alarms of type %ld\n", base - alarms);
+ update_timer_locked(base, true);
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ return HRTIMER_NORESTART;
+}
+
+static void alarm_triggered_func(void *p)
+{
+ struct rtc_device *rtc = alarm_rtc_dev;
+ if (!(rtc->irq_data & RTC_AF))
+ return;
+ pr_alarm(INT, "rtc alarm triggered\n");
+ wake_lock_timeout(&alarm_rtc_wake_lock, 1 * HZ);
+}
+
+static int alarm_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ int err = 0;
+ unsigned long flags;
+ struct rtc_wkalrm rtc_alarm;
+ struct rtc_time rtc_current_rtc_time;
+ unsigned long rtc_current_time;
+ unsigned long rtc_alarm_time;
+ struct timespec rtc_delta;
+ struct timespec wall_time;
+ struct alarm_queue *wakeup_queue = NULL;
+ struct alarm_queue *tmp_queue = NULL;
+
+ pr_alarm(SUSPEND, "alarm_suspend(%p, %d)\n", pdev, state.event);
+
+ spin_lock_irqsave(&alarm_slock, flags);
+ suspended = true;
+ spin_unlock_irqrestore(&alarm_slock, flags);
+
+ hrtimer_cancel(&alarms[ANDROID_ALARM_RTC_WAKEUP].timer);
+ hrtimer_cancel(&alarms[
+ ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP].timer);
+
+ tmp_queue = &alarms[ANDROID_ALARM_RTC_WAKEUP];
+ if (tmp_queue->first)
+ wakeup_queue = tmp_queue;
+ tmp_queue = &alarms[ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP];
+ if (tmp_queue->first && (!wakeup_queue ||
+ hrtimer_get_expires(&tmp_queue->timer).tv64 <
+ hrtimer_get_expires(&wakeup_queue->timer).tv64))
+ wakeup_queue = tmp_queue;
+ if (wakeup_queue) {
+ rtc_read_time(alarm_rtc_dev, &rtc_current_rtc_time);
+ getnstimeofday(&wall_time);
+ rtc_tm_to_time(&rtc_current_rtc_time, &rtc_current_time);
+ set_normalized_timespec(&rtc_delta,
+ wall_time.tv_sec - rtc_current_time,
+ wall_time.tv_nsec);
+
+ rtc_alarm_time = timespec_sub(ktime_to_timespec(
+ hrtimer_get_expires(&wakeup_queue->timer)),
+ rtc_delta).tv_sec;
+
+ rtc_time_to_tm(rtc_alarm_time, &rtc_alarm.time);
+ rtc_alarm.enabled = 1;
+ rtc_set_alarm(alarm_rtc_dev, &rtc_alarm);
+ rtc_read_time(alarm_rtc_dev, &rtc_current_rtc_time);
+ rtc_tm_to_time(&rtc_current_rtc_time, &rtc_current_time);
+ pr_alarm(SUSPEND,
+ "rtc alarm set at %ld, now %ld, rtc delta %ld.%09ld\n",
+ rtc_alarm_time, rtc_current_time,
+ rtc_delta.tv_sec, rtc_delta.tv_nsec);
+ if (rtc_current_time + 1 >= rtc_alarm_time) {
+ pr_alarm(SUSPEND, "alarm about to go off\n");
+ memset(&rtc_alarm, 0, sizeof(rtc_alarm));
+ rtc_alarm.enabled = 0;
+ rtc_set_alarm(alarm_rtc_dev, &rtc_alarm);
+
+ spin_lock_irqsave(&alarm_slock, flags);
+ suspended = false;
+ wake_lock_timeout(&alarm_rtc_wake_lock, 2 * HZ);
+ update_timer_locked(&alarms[ANDROID_ALARM_RTC_WAKEUP],
+ false);
+ update_timer_locked(&alarms[
+ ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP], false);
+ err = -EBUSY;
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ }
+ }
+ return err;
+}
+
+static int alarm_resume(struct platform_device *pdev)
+{
+ struct rtc_wkalrm alarm;
+ unsigned long flags;
+
+ pr_alarm(SUSPEND, "alarm_resume(%p)\n", pdev);
+
+ memset(&alarm, 0, sizeof(alarm));
+ alarm.enabled = 0;
+ rtc_set_alarm(alarm_rtc_dev, &alarm);
+
+ spin_lock_irqsave(&alarm_slock, flags);
+ suspended = false;
+ update_timer_locked(&alarms[ANDROID_ALARM_RTC_WAKEUP], false);
+ update_timer_locked(&alarms[ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP],
+ false);
+ spin_unlock_irqrestore(&alarm_slock, flags);
+
+ return 0;
+}
+
+static struct rtc_task alarm_rtc_task = {
+ .func = alarm_triggered_func
+};
+
+static int rtc_alarm_add_device(struct device *dev,
+ struct class_interface *class_intf)
+{
+ int err;
+ struct rtc_device *rtc = to_rtc_device(dev);
+
+ mutex_lock(&alarm_setrtc_mutex);
+
+ if (alarm_rtc_dev) {
+ err = -EBUSY;
+ goto err1;
+ }
+
+ alarm_platform_dev =
+ platform_device_register_simple("alarm", -1, NULL, 0);
+ if (IS_ERR(alarm_platform_dev)) {
+ err = PTR_ERR(alarm_platform_dev);
+ goto err2;
+ }
+ err = rtc_irq_register(rtc, &alarm_rtc_task);
+ if (err)
+ goto err3;
+ alarm_rtc_dev = rtc;
+ pr_alarm(INIT_STATUS, "using rtc device, %s, for alarms", rtc->name);
+ mutex_unlock(&alarm_setrtc_mutex);
+
+ return 0;
+
+err3:
+ platform_device_unregister(alarm_platform_dev);
+err2:
+err1:
+ mutex_unlock(&alarm_setrtc_mutex);
+ return err;
+}
+
+static void rtc_alarm_remove_device(struct device *dev,
+ struct class_interface *class_intf)
+{
+ if (dev == &alarm_rtc_dev->dev) {
+ pr_alarm(INIT_STATUS, "lost rtc device for alarms");
+ rtc_irq_unregister(alarm_rtc_dev, &alarm_rtc_task);
+ platform_device_unregister(alarm_platform_dev);
+ alarm_rtc_dev = NULL;
+ }
+}
+
+static struct class_interface rtc_alarm_interface = {
+ .add_dev = &rtc_alarm_add_device,
+ .remove_dev = &rtc_alarm_remove_device,
+};
+
+static struct platform_driver alarm_driver = {
+ .suspend = alarm_suspend,
+ .resume = alarm_resume,
+ .driver = {
+ .name = "alarm"
+ }
+};
+
+static int __init alarm_late_init(void)
+{
+ unsigned long flags;
+ struct timespec tmp_time, system_time;
+
+ /* this needs to run after the rtc is read at boot */
+ spin_lock_irqsave(&alarm_slock, flags);
+ /* We read the current rtc and system time so we can later calulate
+ * elasped realtime to be (boot_systemtime + rtc - boot_rtc) ==
+ * (rtc - (boot_rtc - boot_systemtime))
+ */
+ getnstimeofday(&tmp_time);
+ ktime_get_ts(&system_time);
+ alarms[ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP].delta =
+ alarms[ANDROID_ALARM_ELAPSED_REALTIME].delta =
+ timespec_to_ktime(timespec_sub(tmp_time, system_time));
+
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ return 0;
+}
+
+static int __init alarm_driver_init(void)
+{
+ int err;
+ int i;
+
+ for (i = 0; i < ANDROID_ALARM_SYSTEMTIME; i++) {
+ hrtimer_init(&alarms[i].timer,
+ CLOCK_REALTIME, HRTIMER_MODE_ABS);
+ alarms[i].timer.function = alarm_timer_triggered;
+ }
+ hrtimer_init(&alarms[ANDROID_ALARM_SYSTEMTIME].timer,
+ CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+ alarms[ANDROID_ALARM_SYSTEMTIME].timer.function = alarm_timer_triggered;
+ err = platform_driver_register(&alarm_driver);
+ if (err < 0)
+ goto err1;
+ wake_lock_init(&alarm_rtc_wake_lock, WAKE_LOCK_SUSPEND, "alarm_rtc");
+ rtc_alarm_interface.class = rtc_class;
+ err = class_interface_register(&rtc_alarm_interface);
+ if (err < 0)
+ goto err2;
+
+ return 0;
+
+err2:
+ wake_lock_destroy(&alarm_rtc_wake_lock);
+ platform_driver_unregister(&alarm_driver);
+err1:
+ return err;
+}
+
+static void __exit alarm_exit(void)
+{
+ class_interface_unregister(&rtc_alarm_interface);
+ wake_lock_destroy(&alarm_rtc_wake_lock);
+ platform_driver_unregister(&alarm_driver);
+}
+
+late_initcall(alarm_late_init);
+module_init(alarm_driver_init);
+module_exit(alarm_exit);
+
diff --git a/drivers/staging/android/android_alarm.h b/drivers/staging/android/android_alarm.h
new file mode 100644
index 000000000000..6eecbde2ef6f
--- /dev/null
+++ b/drivers/staging/android/android_alarm.h
@@ -0,0 +1,121 @@
+/* include/linux/android_alarm.h
+ *
+ * Copyright (C) 2006-2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_ANDROID_ALARM_H
+#define _LINUX_ANDROID_ALARM_H
+
+#include <linux/ioctl.h>
+#include <linux/time.h>
+
+enum android_alarm_type {
+ /* return code bit numbers or set alarm arg */
+ ANDROID_ALARM_RTC_WAKEUP,
+ ANDROID_ALARM_RTC,
+ ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP,
+ ANDROID_ALARM_ELAPSED_REALTIME,
+ ANDROID_ALARM_SYSTEMTIME,
+
+ ANDROID_ALARM_TYPE_COUNT,
+
+ /* return code bit numbers */
+ /* ANDROID_ALARM_TIME_CHANGE = 16 */
+};
+
+#ifdef __KERNEL__
+
+#include <linux/ktime.h>
+#include <linux/rbtree.h>
+
+/*
+ * The alarm interface is similar to the hrtimer interface but adds support
+ * for wakeup from suspend. It also adds an elapsed realtime clock that can
+ * be used for periodic timers that need to keep runing while the system is
+ * suspended and not be disrupted when the wall time is set.
+ */
+
+/**
+ * struct alarm - the basic alarm structure
+ * @node: red black tree node for time ordered insertion
+ * @type: alarm type. rtc/elapsed-realtime/systemtime, wakeup/non-wakeup.
+ * @softexpires: the absolute earliest expiry time of the alarm.
+ * @expires: the absolute expiry time.
+ * @function: alarm expiry callback function
+ *
+ * The alarm structure must be initialized by alarm_init()
+ *
+ */
+
+struct android_alarm {
+ struct rb_node node;
+ enum android_alarm_type type;
+ ktime_t softexpires;
+ ktime_t expires;
+ void (*function)(struct android_alarm *);
+};
+
+void android_alarm_init(struct android_alarm *alarm,
+ enum android_alarm_type type, void (*function)(struct android_alarm *));
+void android_alarm_start_range(struct android_alarm *alarm, ktime_t start,
+ ktime_t end);
+int android_alarm_try_to_cancel(struct android_alarm *alarm);
+int android_alarm_cancel(struct android_alarm *alarm);
+ktime_t alarm_get_elapsed_realtime(void);
+
+/* set rtc while preserving elapsed realtime */
+int android_alarm_set_rtc(const struct timespec ts);
+
+#ifdef CONFIG_ANDROID_ALARM_OLDDRV_COMPAT
+/*
+ * Some older drivers depend on the old API,
+ * so provide compatability macros for now.
+ */
+#define alarm android_alarm
+#define alarm_init(x, y, z) android_alarm_init(x, y, z)
+#define alarm_start_range(x, y, z) android_alarm_start_range(x, y, z)
+#define alarm_try_to_cancel(x) android_alarm_try_to_cancel(x)
+#define alarm_cancel(x) android_alarm_cancel(x)
+#define alarm_set_rtc(x) android_alarm_set_rtc(x)
+#endif
+
+
+#endif
+
+enum android_alarm_return_flags {
+ ANDROID_ALARM_RTC_WAKEUP_MASK = 1U << ANDROID_ALARM_RTC_WAKEUP,
+ ANDROID_ALARM_RTC_MASK = 1U << ANDROID_ALARM_RTC,
+ ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP_MASK =
+ 1U << ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP,
+ ANDROID_ALARM_ELAPSED_REALTIME_MASK =
+ 1U << ANDROID_ALARM_ELAPSED_REALTIME,
+ ANDROID_ALARM_SYSTEMTIME_MASK = 1U << ANDROID_ALARM_SYSTEMTIME,
+ ANDROID_ALARM_TIME_CHANGE_MASK = 1U << 16
+};
+
+/* Disable alarm */
+#define ANDROID_ALARM_CLEAR(type) _IO('a', 0 | ((type) << 4))
+
+/* Ack last alarm and wait for next */
+#define ANDROID_ALARM_WAIT _IO('a', 1)
+
+#define ALARM_IOW(c, type, size) _IOW('a', (c) | ((type) << 4), size)
+/* Set alarm */
+#define ANDROID_ALARM_SET(type) ALARM_IOW(2, type, struct timespec)
+#define ANDROID_ALARM_SET_AND_WAIT(type) ALARM_IOW(3, type, struct timespec)
+#define ANDROID_ALARM_GET_TIME(type) ALARM_IOW(4, type, struct timespec)
+#define ANDROID_ALARM_SET_RTC _IOW('a', 5, struct timespec)
+#define ANDROID_ALARM_BASE_CMD(cmd) (cmd & ~(_IOC(0, 0, 0xf0, 0)))
+#define ANDROID_ALARM_IOCTL_TO_TYPE(cmd) (_IOC_NR(cmd) >> 4)
+
+#endif
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index 99052bfd3a2d..9f1f27e7c86e 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -315,7 +315,7 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
get_file(asma->file);
/*
- * XXX - Reworked to use shmem_zero_setup() instead of
+ * XXX - Reworked to use shmem_zero_setup() instead of
* shmem_set_file while we're in staging. -jstultz
*/
if (vma->vm_flags & VM_SHARED) {
@@ -680,7 +680,7 @@ static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return ret;
}
-static struct file_operations ashmem_fops = {
+static const struct file_operations ashmem_fops = {
.owner = THIS_MODULE,
.open = ashmem_open,
.release = ashmem_release,
diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c
index f0b7e6605ab5..59e095362c81 100644
--- a/drivers/staging/android/binder.c
+++ b/drivers/staging/android/binder.c
@@ -103,7 +103,7 @@ static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
-static int binder_debug_no_lock;
+static bool binder_debug_no_lock;
module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO);
static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
@@ -258,7 +258,7 @@ struct binder_ref {
};
struct binder_buffer {
- struct list_head entry; /* free and allocated entries by addesss */
+ struct list_head entry; /* free and allocated entries by address */
struct rb_node rb_node; /* free entry by size or allocated entry */
/* by address */
unsigned free:1;
@@ -288,6 +288,7 @@ struct binder_proc {
struct rb_root refs_by_node;
int pid;
struct vm_area_struct *vma;
+ struct mm_struct *vma_vm_mm;
struct task_struct *tsk;
struct files_struct *files;
struct hlist_node deferred_work_node;
@@ -633,7 +634,7 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate,
if (mm) {
down_write(&mm->mmap_sem);
vma = proc->vma;
- if (vma && mm != vma->vm_mm) {
+ if (vma && mm != proc->vma_vm_mm) {
pr_err("binder: %d: vma mm and task mm mismatch\n",
proc->pid);
vma = NULL;
@@ -2776,6 +2777,7 @@ static void binder_vma_close(struct vm_area_struct *vma)
(vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
(unsigned long)pgprot_val(vma->vm_page_prot));
proc->vma = NULL;
+ proc->vma_vm_mm = NULL;
binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
}
@@ -2858,6 +2860,7 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
barrier();
proc->files = get_files_struct(proc->tsk);
proc->vma = vma;
+ proc->vma_vm_mm = vma->vm_mm;
/*printk(KERN_INFO "binder_mmap: %d %lx-%lx maps %p\n",
proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/
diff --git a/drivers/staging/android/logger.c b/drivers/staging/android/logger.c
index ffc2d043dd8e..ea69b6a77dac 100644
--- a/drivers/staging/android/logger.c
+++ b/drivers/staging/android/logger.c
@@ -60,7 +60,11 @@ struct logger_reader {
};
/* logger_offset - returns index 'n' into the log via (optimized) modulus */
-#define logger_offset(n) ((n) & (log->size - 1))
+size_t logger_offset(struct logger_log *log, size_t n)
+{
+ return n & (log->size-1);
+}
+
/*
* file_get_log - Given a file structure, return the associated log
@@ -89,20 +93,24 @@ static inline struct logger_log *file_get_log(struct file *file)
* get_entry_len - Grabs the length of the payload of the next entry starting
* from 'off'.
*
+ * An entry length is 2 bytes (16 bits) in host endian order.
+ * In the log, the length does not include the size of the log entry structure.
+ * This function returns the size including the log entry structure.
+ *
* Caller needs to hold log->mutex.
*/
static __u32 get_entry_len(struct logger_log *log, size_t off)
{
__u16 val;
- switch (log->size - off) {
- case 1:
- memcpy(&val, log->buffer + off, 1);
- memcpy(((char *) &val) + 1, log->buffer, 1);
- break;
- default:
- memcpy(&val, log->buffer + off, 2);
- }
+ /* copy 2 bytes from buffer, in memcpy order, */
+ /* handling possible wrap at end of buffer */
+
+ ((__u8 *)&val)[0] = log->buffer[off];
+ if (likely(off+1 < log->size))
+ ((__u8 *)&val)[1] = log->buffer[off+1];
+ else
+ ((__u8 *)&val)[1] = log->buffer[0];
return sizeof(struct logger_entry) + val;
}
@@ -137,7 +145,7 @@ static ssize_t do_read_log_to_user(struct logger_log *log,
if (copy_to_user(buf + len, log->buffer, count - len))
return -EFAULT;
- reader->r_off = logger_offset(reader->r_off + count);
+ reader->r_off = logger_offset(log, reader->r_off + count);
return count;
}
@@ -164,9 +172,10 @@ static ssize_t logger_read(struct file *file, char __user *buf,
start:
while (1) {
+ mutex_lock(&log->mutex);
+
prepare_to_wait(&log->wq, &wait, TASK_INTERRUPTIBLE);
- mutex_lock(&log->mutex);
ret = (log->w_off == reader->r_off);
mutex_unlock(&log->mutex);
if (!ret)
@@ -225,7 +234,7 @@ static size_t get_next_entry(struct logger_log *log, size_t off, size_t len)
do {
size_t nr = get_entry_len(log, off);
- off = logger_offset(off + nr);
+ off = logger_offset(log, off + nr);
count += nr;
} while (count < len);
@@ -233,16 +242,28 @@ static size_t get_next_entry(struct logger_log *log, size_t off, size_t len)
}
/*
- * clock_interval - is a < c < b in mod-space? Put another way, does the line
- * from a to b cross c?
+ * is_between - is a < c < b, accounting for wrapping of a, b, and c
+ * positions in the buffer
+ *
+ * That is, if a<b, check for c between a and b
+ * and if a>b, check for c outside (not between) a and b
+ *
+ * |------- a xxxxxxxx b --------|
+ * c^
+ *
+ * |xxxxx b --------- a xxxxxxxxx|
+ * c^
+ * or c^
*/
-static inline int clock_interval(size_t a, size_t b, size_t c)
+static inline int is_between(size_t a, size_t b, size_t c)
{
- if (b < a) {
- if (a < c || b >= c)
+ if (a < b) {
+ /* is c between a and b? */
+ if (a < c && c <= b)
return 1;
} else {
- if (a < c && b >= c)
+ /* is c outside of b through a? */
+ if (c <= b || a < c)
return 1;
}
@@ -260,14 +281,14 @@ static inline int clock_interval(size_t a, size_t b, size_t c)
static void fix_up_readers(struct logger_log *log, size_t len)
{
size_t old = log->w_off;
- size_t new = logger_offset(old + len);
+ size_t new = logger_offset(log, old + len);
struct logger_reader *reader;
- if (clock_interval(old, new, log->head))
+ if (is_between(old, new, log->head))
log->head = get_next_entry(log, log->head, len);
list_for_each_entry(reader, &log->readers, list)
- if (clock_interval(old, new, reader->r_off))
+ if (is_between(old, new, reader->r_off))
reader->r_off = get_next_entry(log, reader->r_off, len);
}
@@ -286,7 +307,7 @@ static void do_write_log(struct logger_log *log, const void *buf, size_t count)
if (count != len)
memcpy(log->buffer, buf + len, count - len);
- log->w_off = logger_offset(log->w_off + count);
+ log->w_off = logger_offset(log, log->w_off + count);
}
@@ -309,9 +330,15 @@ static ssize_t do_write_log_from_user(struct logger_log *log,
if (count != len)
if (copy_from_user(log->buffer, buf + len, count - len))
+ /*
+ * Note that by not updating w_off, this abandons the
+ * portion of the new entry that *was* successfully
+ * copied, just above. This is intentional to avoid
+ * message corruption from missing fragments.
+ */
return -EFAULT;
- log->w_off = logger_offset(log->w_off + count);
+ log->w_off = logger_offset(log, log->w_off + count);
return count;
}
@@ -432,7 +459,12 @@ static int logger_release(struct inode *ignored, struct file *file)
{
if (file->f_mode & FMODE_READ) {
struct logger_reader *reader = file->private_data;
+ struct logger_log *log = reader->log;
+
+ mutex_lock(&log->mutex);
list_del(&reader->list);
+ mutex_unlock(&log->mutex);
+
kfree(reader);
}
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c
index efc7dc1f4831..052b43e4e505 100644
--- a/drivers/staging/android/lowmemorykiller.c
+++ b/drivers/staging/android/lowmemorykiller.c
@@ -1,16 +1,17 @@
/* drivers/misc/lowmemorykiller.c
*
* The lowmemorykiller driver lets user-space specify a set of memory thresholds
- * where processes with a range of oom_adj values will get killed. Specify the
- * minimum oom_adj values in /sys/module/lowmemorykiller/parameters/adj and the
- * number of free pages in /sys/module/lowmemorykiller/parameters/minfree. Both
- * files take a comma separated list of numbers in ascending order.
+ * where processes with a range of oom_score_adj values will get killed. Specify
+ * the minimum oom_score_adj values in
+ * /sys/module/lowmemorykiller/parameters/adj and the number of free pages in
+ * /sys/module/lowmemorykiller/parameters/minfree. Both files take a comma
+ * separated list of numbers in ascending order.
*
* For example, write "0,8" to /sys/module/lowmemorykiller/parameters/adj and
* "1024,4096" to /sys/module/lowmemorykiller/parameters/minfree to kill
- * processes with a oom_adj value of 8 or higher when the free memory drops
- * below 4096 pages and kill processes with a oom_adj value of 0 or higher
- * when the free memory drops below 1024 pages.
+ * processes with a oom_score_adj value of 8 or higher when the free memory
+ * drops below 4096 pages and kill processes with a oom_score_adj value of 0 or
+ * higher when the free memory drops below 1024 pages.
*
* The driver considers memory used for caches to be free, but if a large
* percentage of the cached memory is locked this can be very inaccurate
@@ -34,6 +35,7 @@
#include <linux/mm.h>
#include <linux/oom.h>
#include <linux/sched.h>
+#include <linux/rcupdate.h>
#include <linux/profile.h>
#include <linux/notifier.h>
@@ -45,7 +47,7 @@ static int lowmem_adj[6] = {
12,
};
static int lowmem_adj_size = 4;
-static size_t lowmem_minfree[6] = {
+static int lowmem_minfree[6] = {
3 * 512, /* 6MB */
2 * 1024, /* 8MB */
4 * 1024, /* 16MB */
@@ -73,23 +75,23 @@ static int
task_notify_func(struct notifier_block *self, unsigned long val, void *data)
{
struct task_struct *task = data;
- if (task == lowmem_deathpending) {
+
+ if (task == lowmem_deathpending)
lowmem_deathpending = NULL;
- task_handoff_unregister(&task_nb);
- }
+
return NOTIFY_OK;
}
static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
{
- struct task_struct *p;
+ struct task_struct *tsk;
struct task_struct *selected = NULL;
int rem = 0;
int tasksize;
int i;
- int min_adj = OOM_ADJUST_MAX + 1;
+ int min_score_adj = OOM_SCORE_ADJ_MAX + 1;
int selected_tasksize = 0;
- int selected_oom_adj;
+ int selected_oom_score_adj;
int array_size = ARRAY_SIZE(lowmem_adj);
int other_free = global_page_state(NR_FREE_PAGES);
int other_file = global_page_state(NR_FILE_PAGES) -
@@ -115,80 +117,77 @@ static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
for (i = 0; i < array_size; i++) {
if (other_free < lowmem_minfree[i] &&
other_file < lowmem_minfree[i]) {
- min_adj = lowmem_adj[i];
+ min_score_adj = lowmem_adj[i];
break;
}
}
if (sc->nr_to_scan > 0)
lowmem_print(3, "lowmem_shrink %lu, %x, ofree %d %d, ma %d\n",
sc->nr_to_scan, sc->gfp_mask, other_free,
- other_file, min_adj);
+ other_file, min_score_adj);
rem = global_page_state(NR_ACTIVE_ANON) +
global_page_state(NR_ACTIVE_FILE) +
global_page_state(NR_INACTIVE_ANON) +
global_page_state(NR_INACTIVE_FILE);
- if (sc->nr_to_scan <= 0 || min_adj == OOM_ADJUST_MAX + 1) {
+ if (sc->nr_to_scan <= 0 || min_score_adj == OOM_SCORE_ADJ_MAX + 1) {
lowmem_print(5, "lowmem_shrink %lu, %x, return %d\n",
sc->nr_to_scan, sc->gfp_mask, rem);
return rem;
}
- selected_oom_adj = min_adj;
-
- read_lock(&tasklist_lock);
- for_each_process(p) {
- struct mm_struct *mm;
- struct signal_struct *sig;
- int oom_adj;
-
- task_lock(p);
- mm = p->mm;
- sig = p->signal;
- if (!mm || !sig) {
- task_unlock(p);
+ selected_oom_score_adj = min_score_adj;
+
+ rcu_read_lock();
+ for_each_process(tsk) {
+ struct task_struct *p;
+ int oom_score_adj;
+
+ if (tsk->flags & PF_KTHREAD)
continue;
- }
- oom_adj = sig->oom_adj;
- if (oom_adj < min_adj) {
+
+ p = find_lock_task_mm(tsk);
+ if (!p)
+ continue;
+
+ oom_score_adj = p->signal->oom_score_adj;
+ if (oom_score_adj < min_score_adj) {
task_unlock(p);
continue;
}
- tasksize = get_mm_rss(mm);
+ tasksize = get_mm_rss(p->mm);
task_unlock(p);
if (tasksize <= 0)
continue;
if (selected) {
- if (oom_adj < selected_oom_adj)
+ if (oom_score_adj < selected_oom_score_adj)
continue;
- if (oom_adj == selected_oom_adj &&
+ if (oom_score_adj == selected_oom_score_adj &&
tasksize <= selected_tasksize)
continue;
}
selected = p;
selected_tasksize = tasksize;
- selected_oom_adj = oom_adj;
+ selected_oom_score_adj = oom_score_adj;
lowmem_print(2, "select %d (%s), adj %d, size %d, to kill\n",
- p->pid, p->comm, oom_adj, tasksize);
+ p->pid, p->comm, oom_score_adj, tasksize);
}
if (selected) {
lowmem_print(1, "send sigkill to %d (%s), adj %d, size %d\n",
selected->pid, selected->comm,
- selected_oom_adj, selected_tasksize);
+ selected_oom_score_adj, selected_tasksize);
/*
- * If CONFIG_PROFILING is off, then task_handoff_register()
- * is a nop. In that case we don't want to stall the killer
- * by setting lowmem_deathpending.
+ * If CONFIG_PROFILING is off, then we don't want to stall
+ * the killer by setting lowmem_deathpending.
*/
#ifdef CONFIG_PROFILING
lowmem_deathpending = selected;
lowmem_deathpending_timeout = jiffies + HZ;
- task_handoff_register(&task_nb);
#endif
- force_sig(SIGKILL, selected);
+ send_sig(SIGKILL, selected, 0);
rem -= selected_tasksize;
}
lowmem_print(4, "lowmem_shrink %lu, %x, return %d\n",
sc->nr_to_scan, sc->gfp_mask, rem);
- read_unlock(&tasklist_lock);
+ rcu_read_unlock();
return rem;
}
@@ -199,6 +198,7 @@ static struct shrinker lowmem_shrinker = {
static int __init lowmem_init(void)
{
+ task_handoff_register(&task_nb);
register_shrinker(&lowmem_shrinker);
return 0;
}
@@ -206,6 +206,7 @@ static int __init lowmem_init(void)
static void __exit lowmem_exit(void)
{
unregister_shrinker(&lowmem_shrinker);
+ task_handoff_unregister(&task_nb);
}
module_param_named(cost, lowmem_shrinker.seeks, int, S_IRUGO | S_IWUSR);
diff --git a/drivers/staging/android/persistent_ram.c b/drivers/staging/android/persistent_ram.c
new file mode 100644
index 000000000000..e08f2574e30a
--- /dev/null
+++ b/drivers/staging/android/persistent_ram.c
@@ -0,0 +1,470 @@
+/*
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/memblock.h>
+#include <linux/rslib.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include "persistent_ram.h"
+
+struct persistent_ram_buffer {
+ uint32_t sig;
+ atomic_t start;
+ atomic_t size;
+ uint8_t data[0];
+};
+
+#define PERSISTENT_RAM_SIG (0x43474244) /* DBGC */
+
+static __initdata LIST_HEAD(persistent_ram_list);
+
+static inline size_t buffer_size(struct persistent_ram_zone *prz)
+{
+ return atomic_read(&prz->buffer->size);
+}
+
+static inline size_t buffer_start(struct persistent_ram_zone *prz)
+{
+ return atomic_read(&prz->buffer->start);
+}
+
+/* increase and wrap the start pointer, returning the old value */
+static inline size_t buffer_start_add(struct persistent_ram_zone *prz, size_t a)
+{
+ int old;
+ int new;
+
+ do {
+ old = atomic_read(&prz->buffer->start);
+ new = old + a;
+ while (unlikely(new > prz->buffer_size))
+ new -= prz->buffer_size;
+ } while (atomic_cmpxchg(&prz->buffer->start, old, new) != old);
+
+ return old;
+}
+
+/* increase the size counter until it hits the max size */
+static inline void buffer_size_add(struct persistent_ram_zone *prz, size_t a)
+{
+ size_t old;
+ size_t new;
+
+ if (atomic_read(&prz->buffer->size) == prz->buffer_size)
+ return;
+
+ do {
+ old = atomic_read(&prz->buffer->size);
+ new = old + a;
+ if (new > prz->buffer_size)
+ new = prz->buffer_size;
+ } while (atomic_cmpxchg(&prz->buffer->size, old, new) != old);
+}
+
+/* increase the size counter, retuning an error if it hits the max size */
+static inline ssize_t buffer_size_add_clamp(struct persistent_ram_zone *prz,
+ size_t a)
+{
+ size_t old;
+ size_t new;
+
+ do {
+ old = atomic_read(&prz->buffer->size);
+ new = old + a;
+ if (new > prz->buffer_size)
+ return -ENOMEM;
+ } while (atomic_cmpxchg(&prz->buffer->size, old, new) != old);
+
+ return 0;
+}
+
+static void notrace persistent_ram_encode_rs8(struct persistent_ram_zone *prz,
+ uint8_t *data, size_t len, uint8_t *ecc)
+{
+ int i;
+ uint16_t par[prz->ecc_size];
+
+ /* Initialize the parity buffer */
+ memset(par, 0, sizeof(par));
+ encode_rs8(prz->rs_decoder, data, len, par, 0);
+ for (i = 0; i < prz->ecc_size; i++)
+ ecc[i] = par[i];
+}
+
+static int persistent_ram_decode_rs8(struct persistent_ram_zone *prz,
+ void *data, size_t len, uint8_t *ecc)
+{
+ int i;
+ uint16_t par[prz->ecc_size];
+
+ for (i = 0; i < prz->ecc_size; i++)
+ par[i] = ecc[i];
+ return decode_rs8(prz->rs_decoder, data, par, len,
+ NULL, 0, NULL, 0, NULL);
+}
+
+static void notrace persistent_ram_update_ecc(struct persistent_ram_zone *prz,
+ unsigned int start, unsigned int count)
+{
+ struct persistent_ram_buffer *buffer = prz->buffer;
+ uint8_t *buffer_end = buffer->data + prz->buffer_size;
+ uint8_t *block;
+ uint8_t *par;
+ int ecc_block_size = prz->ecc_block_size;
+ int ecc_size = prz->ecc_size;
+ int size = prz->ecc_block_size;
+
+ if (!prz->ecc)
+ return;
+
+ block = buffer->data + (start & ~(ecc_block_size - 1));
+ par = prz->par_buffer + (start / ecc_block_size) * prz->ecc_size;
+
+ do {
+ if (block + ecc_block_size > buffer_end)
+ size = buffer_end - block;
+ persistent_ram_encode_rs8(prz, block, size, par);
+ block += ecc_block_size;
+ par += ecc_size;
+ } while (block < buffer->data + start + count);
+}
+
+static void persistent_ram_update_header_ecc(struct persistent_ram_zone *prz)
+{
+ struct persistent_ram_buffer *buffer = prz->buffer;
+
+ if (!prz->ecc)
+ return;
+
+ persistent_ram_encode_rs8(prz, (uint8_t *)buffer, sizeof(*buffer),
+ prz->par_header);
+}
+
+static void persistent_ram_ecc_old(struct persistent_ram_zone *prz)
+{
+ struct persistent_ram_buffer *buffer = prz->buffer;
+ uint8_t *block;
+ uint8_t *par;
+
+ if (!prz->ecc)
+ return;
+
+ block = buffer->data;
+ par = prz->par_buffer;
+ while (block < buffer->data + buffer_size(prz)) {
+ int numerr;
+ int size = prz->ecc_block_size;
+ if (block + size > buffer->data + prz->buffer_size)
+ size = buffer->data + prz->buffer_size - block;
+ numerr = persistent_ram_decode_rs8(prz, block, size, par);
+ if (numerr > 0) {
+ pr_devel("persistent_ram: error in block %p, %d\n",
+ block, numerr);
+ prz->corrected_bytes += numerr;
+ } else if (numerr < 0) {
+ pr_devel("persistent_ram: uncorrectable error in block %p\n",
+ block);
+ prz->bad_blocks++;
+ }
+ block += prz->ecc_block_size;
+ par += prz->ecc_size;
+ }
+}
+
+static int persistent_ram_init_ecc(struct persistent_ram_zone *prz,
+ size_t buffer_size)
+{
+ int numerr;
+ struct persistent_ram_buffer *buffer = prz->buffer;
+ int ecc_blocks;
+
+ if (!prz->ecc)
+ return 0;
+
+ prz->ecc_block_size = 128;
+ prz->ecc_size = 16;
+ prz->ecc_symsize = 8;
+ prz->ecc_poly = 0x11d;
+
+ ecc_blocks = DIV_ROUND_UP(prz->buffer_size, prz->ecc_block_size);
+ prz->buffer_size -= (ecc_blocks + 1) * prz->ecc_size;
+
+ if (prz->buffer_size > buffer_size) {
+ pr_err("persistent_ram: invalid size %zu, non-ecc datasize %zu\n",
+ buffer_size, prz->buffer_size);
+ return -EINVAL;
+ }
+
+ prz->par_buffer = buffer->data + prz->buffer_size;
+ prz->par_header = prz->par_buffer + ecc_blocks * prz->ecc_size;
+
+ /*
+ * first consecutive root is 0
+ * primitive element to generate roots = 1
+ */
+ prz->rs_decoder = init_rs(prz->ecc_symsize, prz->ecc_poly, 0, 1,
+ prz->ecc_size);
+ if (prz->rs_decoder == NULL) {
+ pr_info("persistent_ram: init_rs failed\n");
+ return -EINVAL;
+ }
+
+ prz->corrected_bytes = 0;
+ prz->bad_blocks = 0;
+
+ numerr = persistent_ram_decode_rs8(prz, buffer, sizeof(*buffer),
+ prz->par_header);
+ if (numerr > 0) {
+ pr_info("persistent_ram: error in header, %d\n", numerr);
+ prz->corrected_bytes += numerr;
+ } else if (numerr < 0) {
+ pr_info("persistent_ram: uncorrectable error in header\n");
+ prz->bad_blocks++;
+ }
+
+ return 0;
+}
+
+ssize_t persistent_ram_ecc_string(struct persistent_ram_zone *prz,
+ char *str, size_t len)
+{
+ ssize_t ret;
+
+ if (prz->corrected_bytes || prz->bad_blocks)
+ ret = snprintf(str, len, ""
+ "\n%d Corrected bytes, %d unrecoverable blocks\n",
+ prz->corrected_bytes, prz->bad_blocks);
+ else
+ ret = snprintf(str, len, "\nNo errors detected\n");
+
+ return ret;
+}
+
+static void notrace persistent_ram_update(struct persistent_ram_zone *prz,
+ const void *s, unsigned int start, unsigned int count)
+{
+ struct persistent_ram_buffer *buffer = prz->buffer;
+ memcpy(buffer->data + start, s, count);
+ persistent_ram_update_ecc(prz, start, count);
+}
+
+static void __init
+persistent_ram_save_old(struct persistent_ram_zone *prz)
+{
+ struct persistent_ram_buffer *buffer = prz->buffer;
+ size_t size = buffer_size(prz);
+ size_t start = buffer_start(prz);
+ char *dest;
+
+ persistent_ram_ecc_old(prz);
+
+ dest = kmalloc(size, GFP_KERNEL);
+ if (dest == NULL) {
+ pr_err("persistent_ram: failed to allocate buffer\n");
+ return;
+ }
+
+ prz->old_log = dest;
+ prz->old_log_size = size;
+ memcpy(prz->old_log, &buffer->data[start], size - start);
+ memcpy(prz->old_log + size - start, &buffer->data[0], start);
+}
+
+int notrace persistent_ram_write(struct persistent_ram_zone *prz,
+ const void *s, unsigned int count)
+{
+ int rem;
+ int c = count;
+ size_t start;
+
+ if (unlikely(c > prz->buffer_size)) {
+ s += c - prz->buffer_size;
+ c = prz->buffer_size;
+ }
+
+ buffer_size_add_clamp(prz, c);
+
+ start = buffer_start_add(prz, c);
+
+ rem = prz->buffer_size - start;
+ if (unlikely(rem < c)) {
+ persistent_ram_update(prz, s, start, rem);
+ s += rem;
+ c -= rem;
+ start = 0;
+ }
+ persistent_ram_update(prz, s, start, c);
+
+ persistent_ram_update_header_ecc(prz);
+
+ return count;
+}
+
+size_t persistent_ram_old_size(struct persistent_ram_zone *prz)
+{
+ return prz->old_log_size;
+}
+
+void *persistent_ram_old(struct persistent_ram_zone *prz)
+{
+ return prz->old_log;
+}
+
+void persistent_ram_free_old(struct persistent_ram_zone *prz)
+{
+ kfree(prz->old_log);
+ prz->old_log = NULL;
+ prz->old_log_size = 0;
+}
+
+static int persistent_ram_buffer_map(phys_addr_t start, phys_addr_t size,
+ struct persistent_ram_zone *prz)
+{
+ struct page **pages;
+ phys_addr_t page_start;
+ unsigned int page_count;
+ pgprot_t prot;
+ unsigned int i;
+
+ page_start = start - offset_in_page(start);
+ page_count = DIV_ROUND_UP(size + offset_in_page(start), PAGE_SIZE);
+
+ prot = pgprot_noncached(PAGE_KERNEL);
+
+ pages = kmalloc(sizeof(struct page *) * page_count, GFP_KERNEL);
+ if (!pages) {
+ pr_err("%s: Failed to allocate array for %u pages\n", __func__,
+ page_count);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < page_count; i++) {
+ phys_addr_t addr = page_start + i * PAGE_SIZE;
+ pages[i] = pfn_to_page(addr >> PAGE_SHIFT);
+ }
+ prz->vaddr = vmap(pages, page_count, VM_MAP, prot);
+ kfree(pages);
+ if (!prz->vaddr) {
+ pr_err("%s: Failed to map %u pages\n", __func__, page_count);
+ return -ENOMEM;
+ }
+
+ prz->buffer = prz->vaddr + offset_in_page(start);
+ prz->buffer_size = size - sizeof(struct persistent_ram_buffer);
+
+ return 0;
+}
+
+static int __init persistent_ram_buffer_init(const char *name,
+ struct persistent_ram_zone *prz)
+{
+ int i;
+ struct persistent_ram *ram;
+ struct persistent_ram_descriptor *desc;
+ phys_addr_t start;
+
+ list_for_each_entry(ram, &persistent_ram_list, node) {
+ start = ram->start;
+ for (i = 0; i < ram->num_descs; i++) {
+ desc = &ram->descs[i];
+ if (!strcmp(desc->name, name))
+ return persistent_ram_buffer_map(start,
+ desc->size, prz);
+ start += desc->size;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static __init
+struct persistent_ram_zone *__persistent_ram_init(struct device *dev, bool ecc)
+{
+ struct persistent_ram_zone *prz;
+ int ret;
+
+ prz = kzalloc(sizeof(struct persistent_ram_zone), GFP_KERNEL);
+ if (!prz) {
+ pr_err("persistent_ram: failed to allocate persistent ram zone\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ INIT_LIST_HEAD(&prz->node);
+
+ ret = persistent_ram_buffer_init(dev_name(dev), prz);
+ if (ret) {
+ pr_err("persistent_ram: failed to initialize buffer\n");
+ return ERR_PTR(ret);
+ }
+
+ prz->ecc = ecc;
+ ret = persistent_ram_init_ecc(prz, prz->buffer_size);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (prz->buffer->sig == PERSISTENT_RAM_SIG) {
+ if (buffer_size(prz) > prz->buffer_size ||
+ buffer_start(prz) > buffer_size(prz))
+ pr_info("persistent_ram: found existing invalid buffer,"
+ " size %ld, start %ld\n",
+ buffer_size(prz), buffer_start(prz));
+ else {
+ pr_info("persistent_ram: found existing buffer,"
+ " size %ld, start %ld\n",
+ buffer_size(prz), buffer_start(prz));
+ persistent_ram_save_old(prz);
+ }
+ } else {
+ pr_info("persistent_ram: no valid data in buffer"
+ " (sig = 0x%08x)\n", prz->buffer->sig);
+ }
+
+ prz->buffer->sig = PERSISTENT_RAM_SIG;
+ atomic_set(&prz->buffer->start, 0);
+ atomic_set(&prz->buffer->size, 0);
+
+ return prz;
+}
+
+struct persistent_ram_zone * __init
+persistent_ram_init_ringbuffer(struct device *dev, bool ecc)
+{
+ return __persistent_ram_init(dev, ecc);
+}
+
+int __init persistent_ram_early_init(struct persistent_ram *ram)
+{
+ int ret;
+
+ ret = memblock_reserve(ram->start, ram->size);
+ if (ret) {
+ pr_err("Failed to reserve persistent memory from %08lx-%08lx\n",
+ (long)ram->start, (long)(ram->start + ram->size - 1));
+ return ret;
+ }
+
+ list_add_tail(&ram->node, &persistent_ram_list);
+
+ pr_info("Initialized persistent memory from %08lx-%08lx\n",
+ (long)ram->start, (long)(ram->start + ram->size - 1));
+
+ return 0;
+}
diff --git a/drivers/staging/android/persistent_ram.h b/drivers/staging/android/persistent_ram.h
new file mode 100644
index 000000000000..f41e2086c645
--- /dev/null
+++ b/drivers/staging/android/persistent_ram.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __LINUX_PERSISTENT_RAM_H__
+#define __LINUX_PERSISTENT_RAM_H__
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/types.h>
+
+struct persistent_ram_buffer;
+
+struct persistent_ram_descriptor {
+ const char *name;
+ phys_addr_t size;
+};
+
+struct persistent_ram {
+ phys_addr_t start;
+ phys_addr_t size;
+
+ int num_descs;
+ struct persistent_ram_descriptor *descs;
+
+ struct list_head node;
+};
+
+struct persistent_ram_zone {
+ struct list_head node;
+ void *vaddr;
+ struct persistent_ram_buffer *buffer;
+ size_t buffer_size;
+
+ /* ECC correction */
+ bool ecc;
+ char *par_buffer;
+ char *par_header;
+ struct rs_control *rs_decoder;
+ int corrected_bytes;
+ int bad_blocks;
+ int ecc_block_size;
+ int ecc_size;
+ int ecc_symsize;
+ int ecc_poly;
+
+ char *old_log;
+ size_t old_log_size;
+ size_t old_log_footer_size;
+ bool early;
+};
+
+int persistent_ram_early_init(struct persistent_ram *ram);
+
+struct persistent_ram_zone *persistent_ram_init_ringbuffer(struct device *dev,
+ bool ecc);
+
+int persistent_ram_write(struct persistent_ram_zone *prz, const void *s,
+ unsigned int count);
+
+size_t persistent_ram_old_size(struct persistent_ram_zone *prz);
+void *persistent_ram_old(struct persistent_ram_zone *prz);
+void persistent_ram_free_old(struct persistent_ram_zone *prz);
+ssize_t persistent_ram_ecc_string(struct persistent_ram_zone *prz,
+ char *str, size_t len);
+
+#endif
diff --git a/drivers/staging/android/ram_console.c b/drivers/staging/android/ram_console.c
index 6d4d67924f22..ce140ffc54ea 100644
--- a/drivers/staging/android/ram_console.c
+++ b/drivers/staging/android/ram_console.c
@@ -21,129 +21,24 @@
#include <linux/string.h>
#include <linux/uaccess.h>
#include <linux/io.h>
+#include "persistent_ram.h"
#include "ram_console.h"
-#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
-#include <linux/rslib.h>
-#endif
-
-struct ram_console_buffer {
- uint32_t sig;
- uint32_t start;
- uint32_t size;
- uint8_t data[0];
-};
-
-#define RAM_CONSOLE_SIG (0x43474244) /* DBGC */
-
-#ifdef CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT
-static char __initdata
- ram_console_old_log_init_buffer[CONFIG_ANDROID_RAM_CONSOLE_EARLY_SIZE];
-#endif
-static char *ram_console_old_log;
-static size_t ram_console_old_log_size;
-
-static struct ram_console_buffer *ram_console_buffer;
-static size_t ram_console_buffer_size;
-#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
-static char *ram_console_par_buffer;
-static struct rs_control *ram_console_rs_decoder;
-static int ram_console_corrected_bytes;
-static int ram_console_bad_blocks;
-#define ECC_BLOCK_SIZE CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_DATA_SIZE
-#define ECC_SIZE CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_ECC_SIZE
-#define ECC_SYMSIZE CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE
-#define ECC_POLY CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_POLYNOMIAL
-#endif
-
-#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
-static void ram_console_encode_rs8(uint8_t *data, size_t len, uint8_t *ecc)
-{
- int i;
- uint16_t par[ECC_SIZE];
- /* Initialize the parity buffer */
- memset(par, 0, sizeof(par));
- encode_rs8(ram_console_rs_decoder, data, len, par, 0);
- for (i = 0; i < ECC_SIZE; i++)
- ecc[i] = par[i];
-}
-
-static int ram_console_decode_rs8(void *data, size_t len, uint8_t *ecc)
-{
- int i;
- uint16_t par[ECC_SIZE];
- for (i = 0; i < ECC_SIZE; i++)
- par[i] = ecc[i];
- return decode_rs8(ram_console_rs_decoder, data, par, len,
- NULL, 0, NULL, 0, NULL);
-}
-#endif
-
-static void ram_console_update(const char *s, unsigned int count)
-{
- struct ram_console_buffer *buffer = ram_console_buffer;
-#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
- uint8_t *buffer_end = buffer->data + ram_console_buffer_size;
- uint8_t *block;
- uint8_t *par;
- int size = ECC_BLOCK_SIZE;
-#endif
- memcpy(buffer->data + buffer->start, s, count);
-#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
- block = buffer->data + (buffer->start & ~(ECC_BLOCK_SIZE - 1));
- par = ram_console_par_buffer +
- (buffer->start / ECC_BLOCK_SIZE) * ECC_SIZE;
- do {
- if (block + ECC_BLOCK_SIZE > buffer_end)
- size = buffer_end - block;
- ram_console_encode_rs8(block, size, par);
- block += ECC_BLOCK_SIZE;
- par += ECC_SIZE;
- } while (block < buffer->data + buffer->start + count);
-#endif
-}
-
-static void ram_console_update_header(void)
-{
-#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
- struct ram_console_buffer *buffer = ram_console_buffer;
- uint8_t *par;
- par = ram_console_par_buffer +
- DIV_ROUND_UP(ram_console_buffer_size, ECC_BLOCK_SIZE) * ECC_SIZE;
- ram_console_encode_rs8((uint8_t *)buffer, sizeof(*buffer), par);
-#endif
-}
+static struct persistent_ram_zone *ram_console_zone;
+static const char *bootinfo;
+static size_t bootinfo_size;
static void
ram_console_write(struct console *console, const char *s, unsigned int count)
{
- int rem;
- struct ram_console_buffer *buffer = ram_console_buffer;
-
- if (count > ram_console_buffer_size) {
- s += count - ram_console_buffer_size;
- count = ram_console_buffer_size;
- }
- rem = ram_console_buffer_size - buffer->start;
- if (rem < count) {
- ram_console_update(s, rem);
- s += rem;
- count -= rem;
- buffer->start = 0;
- buffer->size = ram_console_buffer_size;
- }
- ram_console_update(s, count);
-
- buffer->start += count;
- if (buffer->size < ram_console_buffer_size)
- buffer->size += count;
- ram_console_update_header();
+ struct persistent_ram_zone *prz = console->data;
+ persistent_ram_write(prz, s, count);
}
static struct console ram_console = {
.name = "ram",
.write = ram_console_write,
- .flags = CON_PRINTBUFFER | CON_ENABLED,
+ .flags = CON_PRINTBUFFER | CON_ENABLED | CON_ANYTIME,
.index = -1,
};
@@ -155,220 +50,31 @@ void ram_console_enable_console(int enabled)
ram_console.flags &= ~CON_ENABLED;
}
-static void __init
-ram_console_save_old(struct ram_console_buffer *buffer, const char *bootinfo,
- char *dest)
-{
- size_t old_log_size = buffer->size;
- size_t bootinfo_size = 0;
- size_t total_size = old_log_size;
- char *ptr;
- const char *bootinfo_label = "Boot info:\n";
-
-#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
- uint8_t *block;
- uint8_t *par;
- char strbuf[80];
- int strbuf_len = 0;
-
- block = buffer->data;
- par = ram_console_par_buffer;
- while (block < buffer->data + buffer->size) {
- int numerr;
- int size = ECC_BLOCK_SIZE;
- if (block + size > buffer->data + ram_console_buffer_size)
- size = buffer->data + ram_console_buffer_size - block;
- numerr = ram_console_decode_rs8(block, size, par);
- if (numerr > 0) {
-#if 0
- printk(KERN_INFO "ram_console: error in block %p, %d\n",
- block, numerr);
-#endif
- ram_console_corrected_bytes += numerr;
- } else if (numerr < 0) {
-#if 0
- printk(KERN_INFO "ram_console: uncorrectable error in "
- "block %p\n", block);
-#endif
- ram_console_bad_blocks++;
- }
- block += ECC_BLOCK_SIZE;
- par += ECC_SIZE;
- }
- if (ram_console_corrected_bytes || ram_console_bad_blocks)
- strbuf_len = snprintf(strbuf, sizeof(strbuf),
- "\n%d Corrected bytes, %d unrecoverable blocks\n",
- ram_console_corrected_bytes, ram_console_bad_blocks);
- else
- strbuf_len = snprintf(strbuf, sizeof(strbuf),
- "\nNo errors detected\n");
- if (strbuf_len >= sizeof(strbuf))
- strbuf_len = sizeof(strbuf) - 1;
- total_size += strbuf_len;
-#endif
-
- if (bootinfo)
- bootinfo_size = strlen(bootinfo) + strlen(bootinfo_label);
- total_size += bootinfo_size;
-
- if (dest == NULL) {
- dest = kmalloc(total_size, GFP_KERNEL);
- if (dest == NULL) {
- printk(KERN_ERR
- "ram_console: failed to allocate buffer\n");
- return;
- }
- }
-
- ram_console_old_log = dest;
- ram_console_old_log_size = total_size;
- memcpy(ram_console_old_log,
- &buffer->data[buffer->start], buffer->size - buffer->start);
- memcpy(ram_console_old_log + buffer->size - buffer->start,
- &buffer->data[0], buffer->start);
- ptr = ram_console_old_log + old_log_size;
-#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
- memcpy(ptr, strbuf, strbuf_len);
- ptr += strbuf_len;
-#endif
- if (bootinfo) {
- memcpy(ptr, bootinfo_label, strlen(bootinfo_label));
- ptr += strlen(bootinfo_label);
- memcpy(ptr, bootinfo, bootinfo_size);
- ptr += bootinfo_size;
- }
-}
-
-static int __init ram_console_init(struct ram_console_buffer *buffer,
- size_t buffer_size, const char *bootinfo,
- char *old_buf)
+static int __init ram_console_probe(struct platform_device *pdev)
{
-#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
- int numerr;
- uint8_t *par;
-#endif
- ram_console_buffer = buffer;
- ram_console_buffer_size =
- buffer_size - sizeof(struct ram_console_buffer);
-
- if (ram_console_buffer_size > buffer_size) {
- pr_err("ram_console: buffer %p, invalid size %zu, "
- "datasize %zu\n", buffer, buffer_size,
- ram_console_buffer_size);
- return 0;
- }
-
-#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
- ram_console_buffer_size -= (DIV_ROUND_UP(ram_console_buffer_size,
- ECC_BLOCK_SIZE) + 1) * ECC_SIZE;
-
- if (ram_console_buffer_size > buffer_size) {
- pr_err("ram_console: buffer %p, invalid size %zu, "
- "non-ecc datasize %zu\n",
- buffer, buffer_size, ram_console_buffer_size);
- return 0;
- }
-
- ram_console_par_buffer = buffer->data + ram_console_buffer_size;
-
-
- /* first consecutive root is 0
- * primitive element to generate roots = 1
- */
- ram_console_rs_decoder = init_rs(ECC_SYMSIZE, ECC_POLY, 0, 1, ECC_SIZE);
- if (ram_console_rs_decoder == NULL) {
- printk(KERN_INFO "ram_console: init_rs failed\n");
- return 0;
- }
-
- ram_console_corrected_bytes = 0;
- ram_console_bad_blocks = 0;
+ struct ram_console_platform_data *pdata = pdev->dev.platform_data;
+ struct persistent_ram_zone *prz;
- par = ram_console_par_buffer +
- DIV_ROUND_UP(ram_console_buffer_size, ECC_BLOCK_SIZE) * ECC_SIZE;
+ prz = persistent_ram_init_ringbuffer(&pdev->dev, true);
+ if (IS_ERR(prz))
+ return PTR_ERR(prz);
- numerr = ram_console_decode_rs8(buffer, sizeof(*buffer), par);
- if (numerr > 0) {
- printk(KERN_INFO "ram_console: error in header, %d\n", numerr);
- ram_console_corrected_bytes += numerr;
- } else if (numerr < 0) {
- printk(KERN_INFO
- "ram_console: uncorrectable error in header\n");
- ram_console_bad_blocks++;
- }
-#endif
- if (buffer->sig == RAM_CONSOLE_SIG) {
- if (buffer->size > ram_console_buffer_size
- || buffer->start > buffer->size)
- printk(KERN_INFO "ram_console: found existing invalid "
- "buffer, size %d, start %d\n",
- buffer->size, buffer->start);
- else {
- printk(KERN_INFO "ram_console: found existing buffer, "
- "size %d, start %d\n",
- buffer->size, buffer->start);
- ram_console_save_old(buffer, bootinfo, old_buf);
- }
- } else {
- printk(KERN_INFO "ram_console: no valid data in buffer "
- "(sig = 0x%08x)\n", buffer->sig);
+ if (pdata) {
+ bootinfo = kstrdup(pdata->bootinfo, GFP_KERNEL);
+ if (bootinfo)
+ bootinfo_size = strlen(bootinfo);
}
- buffer->sig = RAM_CONSOLE_SIG;
- buffer->start = 0;
- buffer->size = 0;
+ ram_console_zone = prz;
+ ram_console.data = prz;
register_console(&ram_console);
-#ifdef CONFIG_ANDROID_RAM_CONSOLE_ENABLE_VERBOSE
- console_verbose();
-#endif
- return 0;
-}
-#ifdef CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT
-static int __init ram_console_early_init(void)
-{
- return ram_console_init((struct ram_console_buffer *)
- CONFIG_ANDROID_RAM_CONSOLE_EARLY_ADDR,
- CONFIG_ANDROID_RAM_CONSOLE_EARLY_SIZE,
- NULL,
- ram_console_old_log_init_buffer);
-}
-#else
-static int ram_console_driver_probe(struct platform_device *pdev)
-{
- struct resource *res = pdev->resource;
- size_t start;
- size_t buffer_size;
- void *buffer;
- const char *bootinfo = NULL;
- struct ram_console_platform_data *pdata = pdev->dev.platform_data;
-
- if (res == NULL || pdev->num_resources != 1 ||
- !(res->flags & IORESOURCE_MEM)) {
- printk(KERN_ERR "ram_console: invalid resource, %p %d flags "
- "%lx\n", res, pdev->num_resources, res ? res->flags : 0);
- return -ENXIO;
- }
- buffer_size = res->end - res->start + 1;
- start = res->start;
- printk(KERN_INFO "ram_console: got buffer at %zx, size %zx\n",
- start, buffer_size);
- buffer = ioremap(res->start, buffer_size);
- if (buffer == NULL) {
- printk(KERN_ERR "ram_console: failed to map memory\n");
- return -ENOMEM;
- }
-
- if (pdata)
- bootinfo = pdata->bootinfo;
-
- return ram_console_init(buffer, buffer_size, bootinfo, NULL/* allocate */);
+ return 0;
}
static struct platform_driver ram_console_driver = {
- .probe = ram_console_driver_probe,
.driver = {
.name = "ram_console",
},
@@ -376,10 +82,11 @@ static struct platform_driver ram_console_driver = {
static int __init ram_console_module_init(void)
{
- int err;
- err = platform_driver_register(&ram_console_driver);
- return err;
+ return platform_driver_probe(&ram_console_driver, ram_console_probe);
}
+
+#ifndef CONFIG_PRINTK
+#define dmesg_restrict 0
#endif
static ssize_t ram_console_read_old(struct file *file, char __user *buf,
@@ -387,14 +94,52 @@ static ssize_t ram_console_read_old(struct file *file, char __user *buf,
{
loff_t pos = *offset;
ssize_t count;
+ struct persistent_ram_zone *prz = ram_console_zone;
+ size_t old_log_size = persistent_ram_old_size(prz);
+ const char *old_log = persistent_ram_old(prz);
+ char *str;
+ int ret;
+
+ if (dmesg_restrict && !capable(CAP_SYSLOG))
+ return -EPERM;
+
+ /* Main last_kmsg log */
+ if (pos < old_log_size) {
+ count = min(len, (size_t)(old_log_size - pos));
+ if (copy_to_user(buf, old_log + pos, count))
+ return -EFAULT;
+ goto out;
+ }
- if (pos >= ram_console_old_log_size)
- return 0;
+ /* ECC correction notice */
+ pos -= old_log_size;
+ count = persistent_ram_ecc_string(prz, NULL, 0);
+ if (pos < count) {
+ str = kmalloc(count, GFP_KERNEL);
+ if (!str)
+ return -ENOMEM;
+ persistent_ram_ecc_string(prz, str, count + 1);
+ count = min(len, (size_t)(count - pos));
+ ret = copy_to_user(buf, str + pos, count);
+ kfree(str);
+ if (ret)
+ return -EFAULT;
+ goto out;
+ }
+
+ /* Boot info passed through pdata */
+ pos -= count;
+ if (pos < bootinfo_size) {
+ count = min(len, (size_t)(bootinfo_size - pos));
+ if (copy_to_user(buf, bootinfo + pos, count))
+ return -EFAULT;
+ goto out;
+ }
- count = min(len, (size_t)(ram_console_old_log_size - pos));
- if (copy_to_user(buf, ram_console_old_log + pos, count))
- return -EFAULT;
+ /* EOF */
+ return 0;
+out:
*offset += count;
return count;
}
@@ -407,37 +152,28 @@ static const struct file_operations ram_console_file_ops = {
static int __init ram_console_late_init(void)
{
struct proc_dir_entry *entry;
+ struct persistent_ram_zone *prz = ram_console_zone;
- if (ram_console_old_log == NULL)
+ if (!prz)
return 0;
-#ifdef CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT
- ram_console_old_log = kmalloc(ram_console_old_log_size, GFP_KERNEL);
- if (ram_console_old_log == NULL) {
- printk(KERN_ERR
- "ram_console: failed to allocate buffer for old log\n");
- ram_console_old_log_size = 0;
+
+ if (persistent_ram_old_size(prz) == 0)
return 0;
- }
- memcpy(ram_console_old_log,
- ram_console_old_log_init_buffer, ram_console_old_log_size);
-#endif
+
entry = create_proc_entry("last_kmsg", S_IFREG | S_IRUGO, NULL);
if (!entry) {
printk(KERN_ERR "ram_console: failed to create proc entry\n");
- kfree(ram_console_old_log);
- ram_console_old_log = NULL;
+ persistent_ram_free_old(prz);
return 0;
}
entry->proc_fops = &ram_console_file_ops;
- entry->size = ram_console_old_log_size;
+ entry->size = persistent_ram_old_size(prz) +
+ persistent_ram_ecc_string(prz, NULL, 0) +
+ bootinfo_size;
+
return 0;
}
-#ifdef CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT
-console_initcall(ram_console_early_init);
-#else
-postcore_initcall(ram_console_module_init);
-#endif
late_initcall(ram_console_late_init);
-
+postcore_initcall(ram_console_module_init);
diff --git a/drivers/staging/android/timed_gpio.c b/drivers/staging/android/timed_gpio.c
index a64481c3e86d..bc723eff11af 100644
--- a/drivers/staging/android/timed_gpio.c
+++ b/drivers/staging/android/timed_gpio.c
@@ -29,9 +29,9 @@ struct timed_gpio_data {
struct timed_output_dev dev;
struct hrtimer timer;
spinlock_t lock;
- unsigned gpio;
- int max_timeout;
- u8 active_low;
+ unsigned gpio;
+ int max_timeout;
+ u8 active_low;
};
static enum hrtimer_restart gpio_timer_func(struct hrtimer *timer)
diff --git a/drivers/staging/android/timed_gpio.h b/drivers/staging/android/timed_gpio.h
index a0e15f8be3f7..d29e169d7ebe 100644
--- a/drivers/staging/android/timed_gpio.h
+++ b/drivers/staging/android/timed_gpio.h
@@ -20,13 +20,13 @@
struct timed_gpio {
const char *name;
- unsigned gpio;
+ unsigned gpio;
int max_timeout;
- u8 active_low;
+ u8 active_low;
};
struct timed_gpio_platform_data {
- int num_gpios;
+ int num_gpios;
struct timed_gpio *gpios;
};
diff --git a/drivers/staging/asus_oled/asus_oled.c b/drivers/staging/asus_oled/asus_oled.c
index 1df9586f2730..83549d9cfefc 100644
--- a/drivers/staging/asus_oled/asus_oled.c
+++ b/drivers/staging/asus_oled/asus_oled.c
@@ -159,7 +159,6 @@ static void setup_packet_header(struct asus_oled_packet *packet, char flags,
static void enable_oled(struct asus_oled_dev *odev, uint8_t enabl)
{
- int a;
int retval;
int act_len;
struct asus_oled_packet *packet;
@@ -178,17 +177,15 @@ static void enable_oled(struct asus_oled_dev *odev, uint8_t enabl)
else
packet->bitmap[0] = 0xae;
- for (a = 0; a < 1; a++) {
- retval = usb_bulk_msg(odev->udev,
- usb_sndbulkpipe(odev->udev, 2),
- packet,
- sizeof(struct asus_oled_header) + 1,
- &act_len,
- -1);
+ retval = usb_bulk_msg(odev->udev,
+ usb_sndbulkpipe(odev->udev, 2),
+ packet,
+ sizeof(struct asus_oled_header) + 1,
+ &act_len,
+ -1);
- if (retval)
- dev_dbg(&odev->udev->dev, "retval = %d\n", retval);
- }
+ if (retval)
+ dev_dbg(&odev->udev->dev, "retval = %d\n", retval);
odev->enabled = enabl;
diff --git a/drivers/staging/bcm/Bcmchar.c b/drivers/staging/bcm/Bcmchar.c
index 179707b5e7c7..cf3059216958 100644
--- a/drivers/staging/bcm/Bcmchar.c
+++ b/drivers/staging/bcm/Bcmchar.c
@@ -728,14 +728,10 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
if (IoBuffer.InputLength > MAX_CNTL_PKT_SIZE)
return -EINVAL;
- pvBuffer = kmalloc(IoBuffer.InputLength, GFP_KERNEL);
- if (!pvBuffer)
- return -ENOMEM;
-
- if (copy_from_user(pvBuffer, IoBuffer.InputBuffer, IoBuffer.InputLength)) {
- kfree(pvBuffer);
- return -EFAULT;
- }
+ pvBuffer = memdup_user(IoBuffer.InputBuffer,
+ IoBuffer.InputLength);
+ if (IS_ERR(pvBuffer))
+ return PTR_ERR(pvBuffer);
down(&Adapter->LowPowerModeSync);
Status = wait_event_interruptible_timeout(Adapter->lowpower_mode_wait_queue,
@@ -1140,15 +1136,10 @@ cntrlEnd:
if (IoBuffer.InputLength < sizeof(ULONG) * 2)
return -EINVAL;
- pvBuffer = kmalloc(IoBuffer.InputLength, GFP_KERNEL);
- if (!pvBuffer)
- return -ENOMEM;
-
- /* Get WrmBuffer structure */
- if (copy_from_user(pvBuffer, IoBuffer.InputBuffer, IoBuffer.InputLength)) {
- kfree(pvBuffer);
- return -EFAULT;
- }
+ pvBuffer = memdup_user(IoBuffer.InputBuffer,
+ IoBuffer.InputLength);
+ if (IS_ERR(pvBuffer))
+ return PTR_ERR(pvBuffer);
pBulkBuffer = (PBULKWRM_BUFFER)pvBuffer;
@@ -1302,20 +1293,18 @@ cntrlEnd:
/*
* Deny the access if the offset crosses the cal area limit.
*/
+ if (stNVMReadWrite.uiNumBytes > Adapter->uiNVMDSDSize)
+ return STATUS_FAILURE;
- if ((stNVMReadWrite.uiOffset + stNVMReadWrite.uiNumBytes) > Adapter->uiNVMDSDSize) {
+ if (stNVMReadWrite.uiOffset > Adapter->uiNVMDSDSize - stNVMReadWrite.uiNumBytes) {
/* BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Can't allow access beyond NVM Size: 0x%x 0x%x\n", stNVMReadWrite.uiOffset, stNVMReadWrite.uiNumBytes); */
return STATUS_FAILURE;
}
- pReadData = kzalloc(stNVMReadWrite.uiNumBytes, GFP_KERNEL);
- if (!pReadData)
- return -ENOMEM;
-
- if (copy_from_user(pReadData, stNVMReadWrite.pBuffer, stNVMReadWrite.uiNumBytes)) {
- kfree(pReadData);
- return -EFAULT;
- }
+ pReadData = memdup_user(stNVMReadWrite.pBuffer,
+ stNVMReadWrite.uiNumBytes);
+ if (IS_ERR(pReadData))
+ return PTR_ERR(pReadData);
do_gettimeofday(&tv0);
if (IOCTL_BCM_NVM_READ == cmd) {
diff --git a/drivers/staging/bcm/CmHost.c b/drivers/staging/bcm/CmHost.c
index c0ee95a71343..7e38af5e1765 100644
--- a/drivers/staging/bcm/CmHost.c
+++ b/drivers/staging/bcm/CmHost.c
@@ -1,431 +1,359 @@
/************************************************************
-* CMHOST.C
-* This file contains the routines for handling Connection
-* Management.
-************************************************************/
+ * CMHOST.C
+ * This file contains the routines for handling Connection
+ * Management.
+ ************************************************************/
-//#define CONN_MSG
+/* #define CONN_MSG */
#include "headers.h"
-typedef enum _E_CLASSIFIER_ACTION
-{
+enum E_CLASSIFIER_ACTION {
eInvalidClassifierAction,
eAddClassifier,
eReplaceClassifier,
eDeleteClassifier
-}E_CLASSIFIER_ACTION;
+};
-static ULONG GetNextTargetBufferLocation(PMINI_ADAPTER Adapter,B_UINT16 tid);
+static ULONG GetNextTargetBufferLocation(PMINI_ADAPTER Adapter, B_UINT16 tid);
/************************************************************
-* Function - SearchSfid
-*
-* Description - This routinue would search QOS queues having
-* specified SFID as input parameter.
-*
-* Parameters - Adapter: Pointer to the Adapter structure
-* uiSfid : Given SFID for matching
-*
-* Returns - Queue index for this SFID(If matched)
- Else Invalid Queue Index(If Not matched)
-************************************************************/
-INT SearchSfid(PMINI_ADAPTER Adapter,UINT uiSfid)
+ * Function - SearchSfid
+ *
+ * Description - This routinue would search QOS queues having
+ * specified SFID as input parameter.
+ *
+ * Parameters - Adapter: Pointer to the Adapter structure
+ * uiSfid : Given SFID for matching
+ *
+ * Returns - Queue index for this SFID(If matched)
+ * Else Invalid Queue Index(If Not matched)
+ ************************************************************/
+int SearchSfid(PMINI_ADAPTER Adapter, UINT uiSfid)
{
- INT iIndex=0;
- for(iIndex=(NO_OF_QUEUES-1); iIndex>=0; iIndex--)
- if(Adapter->PackInfo[iIndex].ulSFID==uiSfid)
- return iIndex;
+ int i;
+
+ for (i = (NO_OF_QUEUES-1); i >= 0; i--)
+ if (Adapter->PackInfo[i].ulSFID == uiSfid)
+ return i;
+
return NO_OF_QUEUES+1;
}
/***************************************************************
-* Function - SearchFreeSfid
-*
-* Description - This routinue would search Free available SFID.
-*
-* Parameter - Adapter: Pointer to the Adapter structure
-*
-* Returns - Queue index for the free SFID
-* Else returns Invalid Index.
-****************************************************************/
-static INT SearchFreeSfid(PMINI_ADAPTER Adapter)
+ * Function -SearchFreeSfid
+ *
+ * Description - This routinue would search Free available SFID.
+ *
+ * Parameter - Adapter: Pointer to the Adapter structure
+ *
+ * Returns - Queue index for the free SFID
+ * Else returns Invalid Index.
+ ****************************************************************/
+static int SearchFreeSfid(PMINI_ADAPTER Adapter)
{
- UINT uiIndex=0;
+ int i;
+
+ for (i = 0; i < (NO_OF_QUEUES-1); i++)
+ if (Adapter->PackInfo[i].ulSFID == 0)
+ return i;
- for(uiIndex=0; uiIndex < (NO_OF_QUEUES-1); uiIndex++)
- if(Adapter->PackInfo[uiIndex].ulSFID==0)
- return uiIndex;
return NO_OF_QUEUES+1;
}
/*
-Function: SearchClsid
-Description: This routinue would search Classifier having specified ClassifierID as input parameter
-Input parameters: PMINI_ADAPTER Adapter - Adapter Context
- unsigned int uiSfid - The SF in which the classifier is to searched
- B_UINT16 uiClassifierID - The classifier ID to be searched
-Return: int :Classifier table index of matching entry
-*/
-
-static int SearchClsid(PMINI_ADAPTER Adapter,ULONG ulSFID,B_UINT16 uiClassifierID)
+ * Function: SearchClsid
+ * Description: This routinue would search Classifier having specified ClassifierID as input parameter
+ * Input parameters: PMINI_ADAPTER Adapter - Adapter Context
+ * unsigned int uiSfid - The SF in which the classifier is to searched
+ * B_UINT16 uiClassifierID - The classifier ID to be searched
+ * Return: int :Classifier table index of matching entry
+ */
+static int SearchClsid(PMINI_ADAPTER Adapter, ULONG ulSFID, B_UINT16 uiClassifierID)
{
- unsigned int uiClassifierIndex = 0;
- for(uiClassifierIndex=0;uiClassifierIndex<MAX_CLASSIFIERS;uiClassifierIndex++)
- {
- if((Adapter->astClassifierTable[uiClassifierIndex].bUsed) &&
- (Adapter->astClassifierTable[uiClassifierIndex].uiClassifierRuleIndex == uiClassifierID)&&
- (Adapter->astClassifierTable[uiClassifierIndex].ulSFID == ulSFID))
- return uiClassifierIndex;
+ int i;
+
+ for (i = 0; i < MAX_CLASSIFIERS; i++) {
+ if ((Adapter->astClassifierTable[i].bUsed) &&
+ (Adapter->astClassifierTable[i].uiClassifierRuleIndex == uiClassifierID) &&
+ (Adapter->astClassifierTable[i].ulSFID == ulSFID))
+ return i;
}
+
return MAX_CLASSIFIERS+1;
}
-/**
-@ingroup ctrl_pkt_functions
-This routinue would search Free available Classifier entry in classifier table.
-@return free Classifier Entry index in classifier table for specified SF
-*/
-static int SearchFreeClsid(PMINI_ADAPTER Adapter /**Adapter Context*/
- )
+/*
+ * @ingroup ctrl_pkt_functions
+ * This routinue would search Free available Classifier entry in classifier table.
+ * @return free Classifier Entry index in classifier table for specified SF
+ */
+static int SearchFreeClsid(PMINI_ADAPTER Adapter /**Adapter Context*/)
{
- unsigned int uiClassifierIndex = 0;
- for(uiClassifierIndex=0;uiClassifierIndex<MAX_CLASSIFIERS;uiClassifierIndex++)
- {
- if(!Adapter->astClassifierTable[uiClassifierIndex].bUsed)
- return uiClassifierIndex;
+ int i;
+
+ for (i = 0; i < MAX_CLASSIFIERS; i++) {
+ if (!Adapter->astClassifierTable[i].bUsed)
+ return i;
}
+
return MAX_CLASSIFIERS+1;
}
static VOID deleteSFBySfid(PMINI_ADAPTER Adapter, UINT uiSearchRuleIndex)
{
- //deleting all the packet held in the SF
- flush_queue(Adapter,uiSearchRuleIndex);
+ /* deleting all the packet held in the SF */
+ flush_queue(Adapter, uiSearchRuleIndex);
- //Deleting the all classifiers for this SF
- DeleteAllClassifiersForSF(Adapter,uiSearchRuleIndex);
+ /* Deleting the all classifiers for this SF */
+ DeleteAllClassifiersForSF(Adapter, uiSearchRuleIndex);
- //Resetting only MIBS related entries in the SF
+ /* Resetting only MIBS related entries in the SF */
memset((PVOID)&Adapter->PackInfo[uiSearchRuleIndex], 0, sizeof(S_MIBS_SERVICEFLOW_TABLE));
}
static inline VOID
-CopyIpAddrToClassifier(S_CLASSIFIER_RULE *pstClassifierEntry ,
- B_UINT8 u8IpAddressLen , B_UINT8 *pu8IpAddressMaskSrc ,
- BOOLEAN bIpVersion6 , E_IPADDR_CONTEXT eIpAddrContext)
+CopyIpAddrToClassifier(S_CLASSIFIER_RULE *pstClassifierEntry,
+ B_UINT8 u8IpAddressLen, B_UINT8 *pu8IpAddressMaskSrc,
+ BOOLEAN bIpVersion6, E_IPADDR_CONTEXT eIpAddrContext)
{
- UINT ucLoopIndex=0;
- UINT nSizeOfIPAddressInBytes = IP_LENGTH_OF_ADDRESS;
- UCHAR *ptrClassifierIpAddress = NULL;
- UCHAR *ptrClassifierIpMask = NULL;
- PMINI_ADAPTER Adapter = GET_BCM_ADAPTER(gblpnetdev);
+ int i = 0;
+ UINT nSizeOfIPAddressInBytes = IP_LENGTH_OF_ADDRESS;
+ UCHAR *ptrClassifierIpAddress = NULL;
+ UCHAR *ptrClassifierIpMask = NULL;
+ PMINI_ADAPTER Adapter = GET_BCM_ADAPTER(gblpnetdev);
- if(bIpVersion6)
- {
+ if (bIpVersion6)
nSizeOfIPAddressInBytes = IPV6_ADDRESS_SIZEINBYTES;
- }
- //Destination Ip Address
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Ip Address Range Length:0x%X ",
- u8IpAddressLen);
- if((bIpVersion6?(IPV6_ADDRESS_SIZEINBYTES * MAX_IP_RANGE_LENGTH * 2):
- (TOTAL_MASKED_ADDRESS_IN_BYTES)) >= u8IpAddressLen)
- {
+
+ /* Destination Ip Address */
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Ip Address Range Length:0x%X ", u8IpAddressLen);
+ if ((bIpVersion6 ? (IPV6_ADDRESS_SIZEINBYTES * MAX_IP_RANGE_LENGTH * 2) :
+ (TOTAL_MASKED_ADDRESS_IN_BYTES)) >= u8IpAddressLen) {
/*
- //checking both the mask and address togethor in Classification.
- //So length will be : TotalLengthInBytes/nSizeOfIPAddressInBytes * 2
- //(nSizeOfIPAddressInBytes for address and nSizeOfIPAddressInBytes for mask)
- */
- if(eIpAddrContext == eDestIpAddress)
- {
- pstClassifierEntry->ucIPDestinationAddressLength =
- u8IpAddressLen/(nSizeOfIPAddressInBytes * 2);
- if(bIpVersion6)
- {
- ptrClassifierIpAddress =
- pstClassifierEntry->stDestIpAddress.ucIpv6Address;
- ptrClassifierIpMask =
- pstClassifierEntry->stDestIpAddress.ucIpv6Mask;
- }
- else
- {
- ptrClassifierIpAddress =
- pstClassifierEntry->stDestIpAddress.ucIpv4Address;
- ptrClassifierIpMask =
- pstClassifierEntry->stDestIpAddress.ucIpv4Mask;
- }
- }
- else if(eIpAddrContext == eSrcIpAddress)
- {
- pstClassifierEntry->ucIPSourceAddressLength =
- u8IpAddressLen/(nSizeOfIPAddressInBytes * 2);
- if(bIpVersion6)
- {
- ptrClassifierIpAddress =
- pstClassifierEntry->stSrcIpAddress.ucIpv6Address;
- ptrClassifierIpMask =
- pstClassifierEntry->stSrcIpAddress.ucIpv6Mask;
+ * checking both the mask and address togethor in Classification.
+ * So length will be : TotalLengthInBytes/nSizeOfIPAddressInBytes * 2
+ * (nSizeOfIPAddressInBytes for address and nSizeOfIPAddressInBytes for mask)
+ */
+ if (eIpAddrContext == eDestIpAddress) {
+ pstClassifierEntry->ucIPDestinationAddressLength = u8IpAddressLen/(nSizeOfIPAddressInBytes * 2);
+ if (bIpVersion6) {
+ ptrClassifierIpAddress = pstClassifierEntry->stDestIpAddress.ucIpv6Address;
+ ptrClassifierIpMask = pstClassifierEntry->stDestIpAddress.ucIpv6Mask;
+ } else {
+ ptrClassifierIpAddress = pstClassifierEntry->stDestIpAddress.ucIpv4Address;
+ ptrClassifierIpMask = pstClassifierEntry->stDestIpAddress.ucIpv4Mask;
}
- else
- {
- ptrClassifierIpAddress =
- pstClassifierEntry->stSrcIpAddress.ucIpv4Address;
- ptrClassifierIpMask =
- pstClassifierEntry->stSrcIpAddress.ucIpv4Mask;
+ } else if (eIpAddrContext == eSrcIpAddress) {
+ pstClassifierEntry->ucIPSourceAddressLength = u8IpAddressLen/(nSizeOfIPAddressInBytes * 2);
+ if (bIpVersion6) {
+ ptrClassifierIpAddress = pstClassifierEntry->stSrcIpAddress.ucIpv6Address;
+ ptrClassifierIpMask = pstClassifierEntry->stSrcIpAddress.ucIpv6Mask;
+ } else {
+ ptrClassifierIpAddress = pstClassifierEntry->stSrcIpAddress.ucIpv4Address;
+ ptrClassifierIpMask = pstClassifierEntry->stSrcIpAddress.ucIpv4Mask;
}
}
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Address Length:0x%X \n",
- pstClassifierEntry->ucIPDestinationAddressLength);
- while((u8IpAddressLen>= nSizeOfIPAddressInBytes) &&
- (ucLoopIndex < MAX_IP_RANGE_LENGTH))
- {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Address Length:0x%X\n", pstClassifierEntry->ucIPDestinationAddressLength);
+ while ((u8IpAddressLen >= nSizeOfIPAddressInBytes) && (i < MAX_IP_RANGE_LENGTH)) {
memcpy(ptrClassifierIpAddress +
- (ucLoopIndex * nSizeOfIPAddressInBytes),
- (pu8IpAddressMaskSrc+(ucLoopIndex*nSizeOfIPAddressInBytes*2)),
+ (i * nSizeOfIPAddressInBytes),
+ (pu8IpAddressMaskSrc+(i*nSizeOfIPAddressInBytes*2)),
nSizeOfIPAddressInBytes);
- if(!bIpVersion6)
- {
- if(eIpAddrContext == eSrcIpAddress)
- {
- pstClassifierEntry->stSrcIpAddress.ulIpv4Addr[ucLoopIndex]=
- ntohl(pstClassifierEntry->stSrcIpAddress.
- ulIpv4Addr[ucLoopIndex]);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Src Ip Address:0x%luX ",pstClassifierEntry->stSrcIpAddress.ulIpv4Addr[ucLoopIndex]);
- }
- else if(eIpAddrContext == eDestIpAddress)
- {
- pstClassifierEntry->stDestIpAddress.ulIpv4Addr[ucLoopIndex]= ntohl(pstClassifierEntry->stDestIpAddress.
- ulIpv4Addr[ucLoopIndex]);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Dest Ip Address:0x%luX ",pstClassifierEntry->stDestIpAddress.ulIpv4Addr[ucLoopIndex]);
+
+ if (!bIpVersion6) {
+ if (eIpAddrContext == eSrcIpAddress) {
+ pstClassifierEntry->stSrcIpAddress.ulIpv4Addr[i] = ntohl(pstClassifierEntry->stSrcIpAddress.ulIpv4Addr[i]);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Src Ip Address:0x%luX ",
+ pstClassifierEntry->stSrcIpAddress.ulIpv4Addr[i]);
+ } else if (eIpAddrContext == eDestIpAddress) {
+ pstClassifierEntry->stDestIpAddress.ulIpv4Addr[i] = ntohl(pstClassifierEntry->stDestIpAddress.ulIpv4Addr[i]);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Dest Ip Address:0x%luX ",
+ pstClassifierEntry->stDestIpAddress.ulIpv4Addr[i]);
}
}
- u8IpAddressLen-=nSizeOfIPAddressInBytes;
- if(u8IpAddressLen >= nSizeOfIPAddressInBytes)
- {
+ u8IpAddressLen -= nSizeOfIPAddressInBytes;
+ if (u8IpAddressLen >= nSizeOfIPAddressInBytes) {
memcpy(ptrClassifierIpMask +
- (ucLoopIndex * nSizeOfIPAddressInBytes),
+ (i * nSizeOfIPAddressInBytes),
(pu8IpAddressMaskSrc+nSizeOfIPAddressInBytes +
- (ucLoopIndex*nSizeOfIPAddressInBytes*2)),
+ (i*nSizeOfIPAddressInBytes*2)),
nSizeOfIPAddressInBytes);
- if(!bIpVersion6)
- {
- if(eIpAddrContext == eSrcIpAddress)
- {
- pstClassifierEntry->stSrcIpAddress.
- ulIpv4Mask[ucLoopIndex]=
- ntohl(pstClassifierEntry->stSrcIpAddress.
- ulIpv4Mask[ucLoopIndex]);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Src Ip Mask Address:0x%luX ",pstClassifierEntry->stSrcIpAddress.ulIpv4Mask[ucLoopIndex]);
- }
- else if(eIpAddrContext == eDestIpAddress)
- {
- pstClassifierEntry->stDestIpAddress.
- ulIpv4Mask[ucLoopIndex] =
- ntohl(pstClassifierEntry->stDestIpAddress.
- ulIpv4Mask[ucLoopIndex]);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Dest Ip Mask Address:0x%luX ",pstClassifierEntry->stDestIpAddress.ulIpv4Mask[ucLoopIndex]);
+
+ if (!bIpVersion6) {
+ if (eIpAddrContext == eSrcIpAddress) {
+ pstClassifierEntry->stSrcIpAddress.ulIpv4Mask[i] =
+ ntohl(pstClassifierEntry->stSrcIpAddress.ulIpv4Mask[i]);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Src Ip Mask Address:0x%luX ",
+ pstClassifierEntry->stSrcIpAddress.ulIpv4Mask[i]);
+ } else if (eIpAddrContext == eDestIpAddress) {
+ pstClassifierEntry->stDestIpAddress.ulIpv4Mask[i] =
+ ntohl(pstClassifierEntry->stDestIpAddress.ulIpv4Mask[i]);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Dest Ip Mask Address:0x%luX ",
+ pstClassifierEntry->stDestIpAddress.ulIpv4Mask[i]);
}
}
- u8IpAddressLen-=nSizeOfIPAddressInBytes;
- }
- if(0==u8IpAddressLen)
- {
- pstClassifierEntry->bDestIpValid=TRUE;
+ u8IpAddressLen -= nSizeOfIPAddressInBytes;
}
- ucLoopIndex++;
+ if (u8IpAddressLen == 0)
+ pstClassifierEntry->bDestIpValid = TRUE;
+
+ i++;
}
- if(bIpVersion6)
- {
- //Restore EndianNess of Struct
- for(ucLoopIndex =0 ; ucLoopIndex < MAX_IP_RANGE_LENGTH * 4 ;
- ucLoopIndex++)
- {
- if(eIpAddrContext == eSrcIpAddress)
- {
- pstClassifierEntry->stSrcIpAddress.ulIpv6Addr[ucLoopIndex]=
- ntohl(pstClassifierEntry->stSrcIpAddress.
- ulIpv6Addr[ucLoopIndex]);
- pstClassifierEntry->stSrcIpAddress.ulIpv6Mask[ucLoopIndex]= ntohl(pstClassifierEntry->stSrcIpAddress.
- ulIpv6Mask[ucLoopIndex]);
- }
- else if(eIpAddrContext == eDestIpAddress)
- {
- pstClassifierEntry->stDestIpAddress.ulIpv6Addr[ucLoopIndex]= ntohl(pstClassifierEntry->stDestIpAddress.
- ulIpv6Addr[ucLoopIndex]);
- pstClassifierEntry->stDestIpAddress.ulIpv6Mask[ucLoopIndex]= ntohl(pstClassifierEntry->stDestIpAddress.
- ulIpv6Mask[ucLoopIndex]);
+ if (bIpVersion6) {
+ /* Restore EndianNess of Struct */
+ for (i = 0; i < MAX_IP_RANGE_LENGTH * 4; i++) {
+ if (eIpAddrContext == eSrcIpAddress) {
+ pstClassifierEntry->stSrcIpAddress.ulIpv6Addr[i] = ntohl(pstClassifierEntry->stSrcIpAddress.ulIpv6Addr[i]);
+ pstClassifierEntry->stSrcIpAddress.ulIpv6Mask[i] = ntohl(pstClassifierEntry->stSrcIpAddress.ulIpv6Mask[i]);
+ } else if (eIpAddrContext == eDestIpAddress) {
+ pstClassifierEntry->stDestIpAddress.ulIpv6Addr[i] = ntohl(pstClassifierEntry->stDestIpAddress.ulIpv6Addr[i]);
+ pstClassifierEntry->stDestIpAddress.ulIpv6Mask[i] = ntohl(pstClassifierEntry->stDestIpAddress.ulIpv6Mask[i]);
}
}
}
}
}
-
-void ClearTargetDSXBuffer(PMINI_ADAPTER Adapter,B_UINT16 TID,BOOLEAN bFreeAll)
+void ClearTargetDSXBuffer(PMINI_ADAPTER Adapter, B_UINT16 TID, BOOLEAN bFreeAll)
{
- ULONG ulIndex;
- for(ulIndex=0; ulIndex < Adapter->ulTotalTargetBuffersAvailable; ulIndex++)
- {
- if(Adapter->astTargetDsxBuffer[ulIndex].valid)
+ int i;
+
+ for (i = 0; i < Adapter->ulTotalTargetBuffersAvailable; i++) {
+ if (Adapter->astTargetDsxBuffer[i].valid)
continue;
- if ((bFreeAll) || (Adapter->astTargetDsxBuffer[ulIndex].tid == TID)){
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "ClearTargetDSXBuffer: found tid %d buffer cleared %lx\n",
- TID, Adapter->astTargetDsxBuffer[ulIndex].ulTargetDsxBuffer);
- Adapter->astTargetDsxBuffer[ulIndex].valid=1;
- Adapter->astTargetDsxBuffer[ulIndex].tid=0;
+
+ if ((bFreeAll) || (Adapter->astTargetDsxBuffer[i].tid == TID)) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "ClearTargetDSXBuffer: found tid %d buffer cleared %lx\n",
+ TID, Adapter->astTargetDsxBuffer[i].ulTargetDsxBuffer);
+ Adapter->astTargetDsxBuffer[i].valid = 1;
+ Adapter->astTargetDsxBuffer[i].tid = 0;
Adapter->ulFreeTargetBufferCnt++;
- }
+ }
}
}
-/**
-@ingroup ctrl_pkt_functions
-copy classifier rule into the specified SF index
-*/
-static inline VOID CopyClassifierRuleToSF(PMINI_ADAPTER Adapter,stConvergenceSLTypes *psfCSType,UINT uiSearchRuleIndex,UINT nClassifierIndex)
+/*
+ * @ingroup ctrl_pkt_functions
+ * copy classifier rule into the specified SF index
+ */
+static inline VOID CopyClassifierRuleToSF(PMINI_ADAPTER Adapter, stConvergenceSLTypes *psfCSType, UINT uiSearchRuleIndex, UINT nClassifierIndex)
{
S_CLASSIFIER_RULE *pstClassifierEntry = NULL;
- //VOID *pvPhsContext = NULL;
- UINT ucLoopIndex=0;
- //UCHAR ucProtocolLength=0;
- //ULONG ulPhsStatus;
-
+ /* VOID *pvPhsContext = NULL; */
+ int i;
+ /* UCHAR ucProtocolLength=0; */
+ /* ULONG ulPhsStatus; */
- if(Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value == 0 ||
+ if (Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value == 0 ||
nClassifierIndex > (MAX_CLASSIFIERS-1))
return;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Storing Classifier Rule Index : %X",
+ ntohs(psfCSType->cCPacketClassificationRule.u16PacketClassificationRuleIndex));
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Storing Classifier Rule Index : %X",ntohs(psfCSType->cCPacketClassificationRule.u16PacketClassificationRuleIndex));
-
- if(nClassifierIndex > MAX_CLASSIFIERS-1)
+ if (nClassifierIndex > MAX_CLASSIFIERS-1)
return;
pstClassifierEntry = &Adapter->astClassifierTable[nClassifierIndex];
- if(pstClassifierEntry)
- {
- //Store if Ipv6
- pstClassifierEntry->bIpv6Protocol =
- (Adapter->PackInfo[uiSearchRuleIndex].ucIpVersion == IPV6)?TRUE:FALSE;
-
- //Destinaiton Port
- pstClassifierEntry->ucDestPortRangeLength=psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRangeLength/4;
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Destination Port Range Length:0x%X ",pstClassifierEntry->ucDestPortRangeLength);
- if( MAX_PORT_RANGE >= psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRangeLength)
- {
- for(ucLoopIndex=0;ucLoopIndex<(pstClassifierEntry->ucDestPortRangeLength);ucLoopIndex++)
- {
- pstClassifierEntry->usDestPortRangeLo[ucLoopIndex] =
- *((PUSHORT)(psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange+ucLoopIndex));
- pstClassifierEntry->usDestPortRangeHi[ucLoopIndex] =
- *((PUSHORT)(psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange+2+ucLoopIndex));
- pstClassifierEntry->usDestPortRangeLo[ucLoopIndex]=ntohs(pstClassifierEntry->usDestPortRangeLo[ucLoopIndex]);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Destination Port Range Lo:0x%X ",pstClassifierEntry->usDestPortRangeLo[ucLoopIndex]);
- pstClassifierEntry->usDestPortRangeHi[ucLoopIndex]=ntohs(pstClassifierEntry->usDestPortRangeHi[ucLoopIndex]);
+ if (pstClassifierEntry) {
+ /* Store if Ipv6 */
+ pstClassifierEntry->bIpv6Protocol = (Adapter->PackInfo[uiSearchRuleIndex].ucIpVersion == IPV6) ? TRUE : FALSE;
+
+ /* Destinaiton Port */
+ pstClassifierEntry->ucDestPortRangeLength = psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRangeLength / 4;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Destination Port Range Length:0x%X ", pstClassifierEntry->ucDestPortRangeLength);
+
+ if (psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRangeLength <= MAX_PORT_RANGE) {
+ for (i = 0; i < (pstClassifierEntry->ucDestPortRangeLength); i++) {
+ pstClassifierEntry->usDestPortRangeLo[i] = *((PUSHORT)(psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange+i));
+ pstClassifierEntry->usDestPortRangeHi[i] =
+ *((PUSHORT)(psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange+2+i));
+ pstClassifierEntry->usDestPortRangeLo[i] = ntohs(pstClassifierEntry->usDestPortRangeLo[i]);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Destination Port Range Lo:0x%X ",
+ pstClassifierEntry->usDestPortRangeLo[i]);
+ pstClassifierEntry->usDestPortRangeHi[i] = ntohs(pstClassifierEntry->usDestPortRangeHi[i]);
}
+ } else {
+ pstClassifierEntry->ucDestPortRangeLength = 0;
}
- else
- {
- pstClassifierEntry->ucDestPortRangeLength=0;
- }
- //Source Port
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Source Port Range Length:0x%X ",psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRangeLength);
- if(MAX_PORT_RANGE >=
- psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRangeLength)
- {
- pstClassifierEntry->ucSrcPortRangeLength =
- psfCSType->cCPacketClassificationRule.
- u8ProtocolSourcePortRangeLength/4;
- for(ucLoopIndex = 0; ucLoopIndex <
- (pstClassifierEntry->ucSrcPortRangeLength); ucLoopIndex++)
- {
- pstClassifierEntry->usSrcPortRangeLo[ucLoopIndex] =
- *((PUSHORT)(psfCSType->cCPacketClassificationRule.
- u8ProtocolSourcePortRange+ucLoopIndex));
- pstClassifierEntry->usSrcPortRangeHi[ucLoopIndex] =
- *((PUSHORT)(psfCSType->cCPacketClassificationRule.
- u8ProtocolSourcePortRange+2+ucLoopIndex));
- pstClassifierEntry->usSrcPortRangeLo[ucLoopIndex] =
- ntohs(pstClassifierEntry->usSrcPortRangeLo[ucLoopIndex]);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Source Port Range Lo:0x%X ",pstClassifierEntry->usSrcPortRangeLo[ucLoopIndex]);
- pstClassifierEntry->usSrcPortRangeHi[ucLoopIndex]=ntohs(pstClassifierEntry->usSrcPortRangeHi[ucLoopIndex]);
+
+ /* Source Port */
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Source Port Range Length:0x%X ",
+ psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRangeLength);
+ if (psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRangeLength <= MAX_PORT_RANGE) {
+ pstClassifierEntry->ucSrcPortRangeLength = psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRangeLength/4;
+ for (i = 0; i < (pstClassifierEntry->ucSrcPortRangeLength); i++) {
+ pstClassifierEntry->usSrcPortRangeLo[i] =
+ *((PUSHORT)(psfCSType->cCPacketClassificationRule.
+ u8ProtocolSourcePortRange+i));
+ pstClassifierEntry->usSrcPortRangeHi[i] =
+ *((PUSHORT)(psfCSType->cCPacketClassificationRule.
+ u8ProtocolSourcePortRange+2+i));
+ pstClassifierEntry->usSrcPortRangeLo[i] =
+ ntohs(pstClassifierEntry->usSrcPortRangeLo[i]);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Source Port Range Lo:0x%X ",
+ pstClassifierEntry->usSrcPortRangeLo[i]);
+ pstClassifierEntry->usSrcPortRangeHi[i] = ntohs(pstClassifierEntry->usSrcPortRangeHi[i]);
}
}
- //Destination Ip Address and Mask
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Ip Destination Parameters : ");
-
+ /* Destination Ip Address and Mask */
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Ip Destination Parameters : ");
CopyIpAddrToClassifier(pstClassifierEntry,
- psfCSType->cCPacketClassificationRule.u8IPDestinationAddressLength,
- psfCSType->cCPacketClassificationRule.u8IPDestinationAddress,
- (Adapter->PackInfo[uiSearchRuleIndex].ucIpVersion == IPV6)?
- TRUE:FALSE, eDestIpAddress);
+ psfCSType->cCPacketClassificationRule.u8IPDestinationAddressLength,
+ psfCSType->cCPacketClassificationRule.u8IPDestinationAddress,
+ (Adapter->PackInfo[uiSearchRuleIndex].ucIpVersion == IPV6) ?
+ TRUE : FALSE, eDestIpAddress);
- //Source Ip Address and Mask
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Ip Source Parameters : ");
+ /* Source Ip Address and Mask */
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Ip Source Parameters : ");
CopyIpAddrToClassifier(pstClassifierEntry,
- psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddressLength,
- psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddress,
- (Adapter->PackInfo[uiSearchRuleIndex].ucIpVersion == IPV6)?TRUE:FALSE,
- eSrcIpAddress);
-
- //TOS
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"TOS Length:0x%X ",psfCSType->cCPacketClassificationRule.u8IPTypeOfServiceLength);
- if(3 == psfCSType->cCPacketClassificationRule.u8IPTypeOfServiceLength)
- {
- pstClassifierEntry->ucIPTypeOfServiceLength =
- psfCSType->cCPacketClassificationRule.u8IPTypeOfServiceLength;
- pstClassifierEntry->ucTosLow =
- psfCSType->cCPacketClassificationRule.u8IPTypeOfService[0];
- pstClassifierEntry->ucTosHigh =
- psfCSType->cCPacketClassificationRule.u8IPTypeOfService[1];
- pstClassifierEntry->ucTosMask =
- psfCSType->cCPacketClassificationRule.u8IPTypeOfService[2];
+ psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddressLength,
+ psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddress,
+ (Adapter->PackInfo[uiSearchRuleIndex].ucIpVersion == IPV6) ? TRUE : FALSE,
+ eSrcIpAddress);
+
+ /* TOS */
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "TOS Length:0x%X ", psfCSType->cCPacketClassificationRule.u8IPTypeOfServiceLength);
+ if (psfCSType->cCPacketClassificationRule.u8IPTypeOfServiceLength == 3) {
+ pstClassifierEntry->ucIPTypeOfServiceLength = psfCSType->cCPacketClassificationRule.u8IPTypeOfServiceLength;
+ pstClassifierEntry->ucTosLow = psfCSType->cCPacketClassificationRule.u8IPTypeOfService[0];
+ pstClassifierEntry->ucTosHigh = psfCSType->cCPacketClassificationRule.u8IPTypeOfService[1];
+ pstClassifierEntry->ucTosMask = psfCSType->cCPacketClassificationRule.u8IPTypeOfService[2];
pstClassifierEntry->bTOSValid = TRUE;
}
- if(psfCSType->cCPacketClassificationRule.u8Protocol == 0)
- {
- //we didn't get protocol field filled in by the BS
- pstClassifierEntry->ucProtocolLength=0;
- }
- else
- {
- pstClassifierEntry->ucProtocolLength=1;// 1 valid protocol
+ if (psfCSType->cCPacketClassificationRule.u8Protocol == 0) {
+ /* we didn't get protocol field filled in by the BS */
+ pstClassifierEntry->ucProtocolLength = 0;
+ } else {
+ pstClassifierEntry->ucProtocolLength = 1; /* 1 valid protocol */
}
- pstClassifierEntry->ucProtocol[0] =
- psfCSType->cCPacketClassificationRule.u8Protocol;
-
- pstClassifierEntry->u8ClassifierRulePriority =
- psfCSType->cCPacketClassificationRule.u8ClassifierRulePriority;
-
- //store the classifier rule ID and set this classifier entry as valid
- pstClassifierEntry->ucDirection =
- Adapter->PackInfo[uiSearchRuleIndex].ucDirection;
- pstClassifierEntry->uiClassifierRuleIndex = ntohs(psfCSType->
- cCPacketClassificationRule.u16PacketClassificationRuleIndex);
- pstClassifierEntry->usVCID_Value =
- Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value;
- pstClassifierEntry->ulSFID =
- Adapter->PackInfo[uiSearchRuleIndex].ulSFID;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Search Index %d Dir: %d, Index: %d, Vcid: %d\n",
- uiSearchRuleIndex, pstClassifierEntry->ucDirection,
- pstClassifierEntry->uiClassifierRuleIndex,
- pstClassifierEntry->usVCID_Value);
-
- if(psfCSType->cCPacketClassificationRule.u8AssociatedPHSI)
- {
+ pstClassifierEntry->ucProtocol[0] = psfCSType->cCPacketClassificationRule.u8Protocol;
+ pstClassifierEntry->u8ClassifierRulePriority = psfCSType->cCPacketClassificationRule.u8ClassifierRulePriority;
+
+ /* store the classifier rule ID and set this classifier entry as valid */
+ pstClassifierEntry->ucDirection = Adapter->PackInfo[uiSearchRuleIndex].ucDirection;
+ pstClassifierEntry->uiClassifierRuleIndex = ntohs(psfCSType->cCPacketClassificationRule.u16PacketClassificationRuleIndex);
+ pstClassifierEntry->usVCID_Value = Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value;
+ pstClassifierEntry->ulSFID = Adapter->PackInfo[uiSearchRuleIndex].ulSFID;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Search Index %d Dir: %d, Index: %d, Vcid: %d\n",
+ uiSearchRuleIndex, pstClassifierEntry->ucDirection,
+ pstClassifierEntry->uiClassifierRuleIndex,
+ pstClassifierEntry->usVCID_Value);
+
+ if (psfCSType->cCPacketClassificationRule.u8AssociatedPHSI)
pstClassifierEntry->u8AssociatedPHSI = psfCSType->cCPacketClassificationRule.u8AssociatedPHSI;
- }
- //Copy ETH CS Parameters
+ /* Copy ETH CS Parameters */
pstClassifierEntry->ucEthCSSrcMACLen = (psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddressLength);
- memcpy(pstClassifierEntry->au8EThCSSrcMAC,psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress,MAC_ADDRESS_SIZE);
- memcpy(pstClassifierEntry->au8EThCSSrcMACMask,psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress+MAC_ADDRESS_SIZE,MAC_ADDRESS_SIZE);
+ memcpy(pstClassifierEntry->au8EThCSSrcMAC, psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress, MAC_ADDRESS_SIZE);
+ memcpy(pstClassifierEntry->au8EThCSSrcMACMask, psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress + MAC_ADDRESS_SIZE, MAC_ADDRESS_SIZE);
pstClassifierEntry->ucEthCSDestMACLen = (psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength);
- memcpy(pstClassifierEntry->au8EThCSDestMAC,psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress,MAC_ADDRESS_SIZE);
- memcpy(pstClassifierEntry->au8EThCSDestMACMask,psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress+MAC_ADDRESS_SIZE,MAC_ADDRESS_SIZE);
+ memcpy(pstClassifierEntry->au8EThCSDestMAC, psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress, MAC_ADDRESS_SIZE);
+ memcpy(pstClassifierEntry->au8EThCSDestMACMask, psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress + MAC_ADDRESS_SIZE, MAC_ADDRESS_SIZE);
pstClassifierEntry->ucEtherTypeLen = (psfCSType->cCPacketClassificationRule.u8EthertypeLength);
- memcpy(pstClassifierEntry->au8EthCSEtherType,psfCSType->cCPacketClassificationRule.u8Ethertype,NUM_ETHERTYPE_BYTES);
+ memcpy(pstClassifierEntry->au8EthCSEtherType, psfCSType->cCPacketClassificationRule.u8Ethertype, NUM_ETHERTYPE_BYTES);
memcpy(pstClassifierEntry->usUserPriority, &psfCSType->cCPacketClassificationRule.u16UserPriority, 2);
pstClassifierEntry->usVLANID = ntohs(psfCSType->cCPacketClassificationRule.u16VLANID);
pstClassifierEntry->usValidityBitMap = ntohs(psfCSType->cCPacketClassificationRule.u16ValidityBitMap);
@@ -434,244 +362,199 @@ static inline VOID CopyClassifierRuleToSF(PMINI_ADAPTER Adapter,stConvergenceSLT
}
}
-
-/**
-@ingroup ctrl_pkt_functions
-*/
-static inline VOID DeleteClassifierRuleFromSF(PMINI_ADAPTER Adapter,UINT uiSearchRuleIndex,UINT nClassifierIndex)
+/*
+ * @ingroup ctrl_pkt_functions
+ */
+static inline VOID DeleteClassifierRuleFromSF(PMINI_ADAPTER Adapter, UINT uiSearchRuleIndex, UINT nClassifierIndex)
{
S_CLASSIFIER_RULE *pstClassifierEntry = NULL;
- B_UINT16 u16PacketClassificationRuleIndex;
- USHORT usVCID;
- //VOID *pvPhsContext = NULL;
- //ULONG ulPhsStatus;
+ B_UINT16 u16PacketClassificationRuleIndex;
+ USHORT usVCID;
+ /* VOID *pvPhsContext = NULL; */
+ /*ULONG ulPhsStatus; */
usVCID = Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value;
- if(nClassifierIndex > MAX_CLASSIFIERS-1)
+ if (nClassifierIndex > MAX_CLASSIFIERS-1)
return;
- if(usVCID == 0)
+ if (usVCID == 0)
return;
u16PacketClassificationRuleIndex = Adapter->astClassifierTable[nClassifierIndex].uiClassifierRuleIndex;
-
-
pstClassifierEntry = &Adapter->astClassifierTable[nClassifierIndex];
- if(pstClassifierEntry)
- {
+ if (pstClassifierEntry) {
pstClassifierEntry->bUsed = FALSE;
pstClassifierEntry->uiClassifierRuleIndex = 0;
- memset(pstClassifierEntry,0,sizeof(S_CLASSIFIER_RULE));
+ memset(pstClassifierEntry, 0, sizeof(S_CLASSIFIER_RULE));
- //Delete the PHS Rule for this classifier
- PhsDeleteClassifierRule(
- &Adapter->stBCMPhsContext,
- usVCID,
- u16PacketClassificationRuleIndex);
+ /* Delete the PHS Rule for this classifier */
+ PhsDeleteClassifierRule(&Adapter->stBCMPhsContext, usVCID, u16PacketClassificationRuleIndex);
}
}
-/**
-@ingroup ctrl_pkt_functions
-*/
-VOID DeleteAllClassifiersForSF(PMINI_ADAPTER Adapter,UINT uiSearchRuleIndex)
+/*
+ * @ingroup ctrl_pkt_functions
+ */
+VOID DeleteAllClassifiersForSF(PMINI_ADAPTER Adapter, UINT uiSearchRuleIndex)
{
S_CLASSIFIER_RULE *pstClassifierEntry = NULL;
- UINT nClassifierIndex;
- //B_UINT16 u16PacketClassificationRuleIndex;
- USHORT ulVCID;
- //VOID *pvPhsContext = NULL;
- //ULONG ulPhsStatus;
+ int i;
+ /* B_UINT16 u16PacketClassificationRuleIndex; */
+ USHORT ulVCID;
+ /* VOID *pvPhsContext = NULL; */
+ /* ULONG ulPhsStatus; */
ulVCID = Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value;
- if(ulVCID == 0)
+ if (ulVCID == 0)
return;
+ for (i = 0; i < MAX_CLASSIFIERS; i++) {
+ if (Adapter->astClassifierTable[i].usVCID_Value == ulVCID) {
+ pstClassifierEntry = &Adapter->astClassifierTable[i];
- for(nClassifierIndex =0 ; nClassifierIndex < MAX_CLASSIFIERS ; nClassifierIndex++)
- {
- if(Adapter->astClassifierTable[nClassifierIndex].usVCID_Value == ulVCID)
- {
- pstClassifierEntry = &Adapter->astClassifierTable[nClassifierIndex];
- if(pstClassifierEntry->bUsed)
- {
- DeleteClassifierRuleFromSF(Adapter,uiSearchRuleIndex,nClassifierIndex);
- }
+ if (pstClassifierEntry->bUsed)
+ DeleteClassifierRuleFromSF(Adapter, uiSearchRuleIndex, i);
}
}
- //Delete All Phs Rules Associated with this SF
- PhsDeleteSFRules(
- &Adapter->stBCMPhsContext,
- ulVCID);
-
+ /* Delete All Phs Rules Associated with this SF */
+ PhsDeleteSFRules(&Adapter->stBCMPhsContext, ulVCID);
}
-
-/**
-This routinue copies the Connection Management
-related data into the Adapter structure.
-@ingroup ctrl_pkt_functions
-*/
-
-static VOID CopyToAdapter( register PMINI_ADAPTER Adapter, /**<Pointer to the Adapter structure*/
- register pstServiceFlowParamSI psfLocalSet, /**<Pointer to the ServiceFlowParamSI structure*/
- register UINT uiSearchRuleIndex, /**<Index of Queue, to which this data belongs*/
- register UCHAR ucDsxType,
- stLocalSFAddIndicationAlt *pstAddIndication)
-{
- //UCHAR ucProtocolLength=0;
- ULONG ulSFID;
- UINT nClassifierIndex = 0;
- E_CLASSIFIER_ACTION eClassifierAction = eInvalidClassifierAction;
- B_UINT16 u16PacketClassificationRuleIndex=0;
- UINT nIndex=0;
+/*
+ * This routinue copies the Connection Management
+ * related data into the Adapter structure.
+ * @ingroup ctrl_pkt_functions
+ */
+static VOID CopyToAdapter(register PMINI_ADAPTER Adapter, /* <Pointer to the Adapter structure */
+ register pstServiceFlowParamSI psfLocalSet, /* <Pointer to the ServiceFlowParamSI structure */
+ register UINT uiSearchRuleIndex, /* <Index of Queue, to which this data belongs */
+ register UCHAR ucDsxType,
+ stLocalSFAddIndicationAlt *pstAddIndication) {
+
+ /* UCHAR ucProtocolLength = 0; */
+ ULONG ulSFID;
+ UINT nClassifierIndex = 0;
+ enum E_CLASSIFIER_ACTION eClassifierAction = eInvalidClassifierAction;
+ B_UINT16 u16PacketClassificationRuleIndex = 0;
+ int i;
stConvergenceSLTypes *psfCSType = NULL;
S_PHS_RULE sPhsRule;
USHORT uVCID = Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value;
UINT UGIValue = 0;
-
- Adapter->PackInfo[uiSearchRuleIndex].bValid=TRUE;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Search Rule Index = %d\n", uiSearchRuleIndex);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"%s: SFID= %x ",__FUNCTION__, ntohl(psfLocalSet->u32SFID));
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Updating Queue %d",uiSearchRuleIndex);
+ Adapter->PackInfo[uiSearchRuleIndex].bValid = TRUE;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Search Rule Index = %d\n", uiSearchRuleIndex);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "%s: SFID= %x ", __func__, ntohl(psfLocalSet->u32SFID));
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Updating Queue %d", uiSearchRuleIndex);
ulSFID = ntohl(psfLocalSet->u32SFID);
- //Store IP Version used
- //Get The Version Of IP used (IPv6 or IPv4) from CSSpecification field of SF
+ /* Store IP Version used */
+ /* Get The Version Of IP used (IPv6 or IPv4) from CSSpecification field of SF */
Adapter->PackInfo[uiSearchRuleIndex].bIPCSSupport = 0;
Adapter->PackInfo[uiSearchRuleIndex].bEthCSSupport = 0;
- /*Enable IP/ETh CS Support As Required*/
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"CopyToAdapter : u8CSSpecification : %X\n",psfLocalSet->u8CSSpecification);
- switch(psfLocalSet->u8CSSpecification)
+ /* Enable IP/ETh CS Support As Required */
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "CopyToAdapter : u8CSSpecification : %X\n", psfLocalSet->u8CSSpecification);
+ switch (psfLocalSet->u8CSSpecification) {
+ case eCSPacketIPV4:
{
- case eCSPacketIPV4:
- {
- Adapter->PackInfo[uiSearchRuleIndex].bIPCSSupport = IPV4_CS;
- break;
- }
- case eCSPacketIPV6:
- {
- Adapter->PackInfo[uiSearchRuleIndex].bIPCSSupport = IPV6_CS;
- break;
- }
-
- case eCS802_3PacketEthernet:
- case eCS802_1QPacketVLAN:
- {
- Adapter->PackInfo[uiSearchRuleIndex].bEthCSSupport = ETH_CS_802_3;
- break;
- }
-
- case eCSPacketIPV4Over802_1QVLAN:
- case eCSPacketIPV4Over802_3Ethernet:
- {
- Adapter->PackInfo[uiSearchRuleIndex].bIPCSSupport = IPV4_CS;
- Adapter->PackInfo[uiSearchRuleIndex].bEthCSSupport = ETH_CS_802_3;
- break;
- }
-
- case eCSPacketIPV6Over802_1QVLAN:
- case eCSPacketIPV6Over802_3Ethernet:
- {
- Adapter->PackInfo[uiSearchRuleIndex].bIPCSSupport = IPV6_CS;
- Adapter->PackInfo[uiSearchRuleIndex].bEthCSSupport = ETH_CS_802_3;
- break;
- }
-
- default:
- {
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Error in value of CS Classification.. setting default to IP CS\n");
- Adapter->PackInfo[uiSearchRuleIndex].bIPCSSupport = IPV4_CS;
- break;
- }
+ Adapter->PackInfo[uiSearchRuleIndex].bIPCSSupport = IPV4_CS;
+ break;
+ }
+ case eCSPacketIPV6:
+ {
+ Adapter->PackInfo[uiSearchRuleIndex].bIPCSSupport = IPV6_CS;
+ break;
+ }
+ case eCS802_3PacketEthernet:
+ case eCS802_1QPacketVLAN:
+ {
+ Adapter->PackInfo[uiSearchRuleIndex].bEthCSSupport = ETH_CS_802_3;
+ break;
+ }
+ case eCSPacketIPV4Over802_1QVLAN:
+ case eCSPacketIPV4Over802_3Ethernet:
+ {
+ Adapter->PackInfo[uiSearchRuleIndex].bIPCSSupport = IPV4_CS;
+ Adapter->PackInfo[uiSearchRuleIndex].bEthCSSupport = ETH_CS_802_3;
+ break;
+ }
+ case eCSPacketIPV6Over802_1QVLAN:
+ case eCSPacketIPV6Over802_3Ethernet:
+ {
+ Adapter->PackInfo[uiSearchRuleIndex].bIPCSSupport = IPV6_CS;
+ Adapter->PackInfo[uiSearchRuleIndex].bEthCSSupport = ETH_CS_802_3;
+ break;
+ }
+ default:
+ {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Error in value of CS Classification.. setting default to IP CS\n");
+ Adapter->PackInfo[uiSearchRuleIndex].bIPCSSupport = IPV4_CS;
+ break;
+ }
}
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"CopyToAdapter : Queue No : %X ETH CS Support : %X , IP CS Support : %X \n",
- uiSearchRuleIndex,
- Adapter->PackInfo[uiSearchRuleIndex].bEthCSSupport,
- Adapter->PackInfo[uiSearchRuleIndex].bIPCSSupport);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "CopyToAdapter : Queue No : %X ETH CS Support : %X , IP CS Support : %X\n",
+ uiSearchRuleIndex,
+ Adapter->PackInfo[uiSearchRuleIndex].bEthCSSupport,
+ Adapter->PackInfo[uiSearchRuleIndex].bIPCSSupport);
- //Store IP Version used
- //Get The Version Of IP used (IPv6 or IPv4) from CSSpecification field of SF
- if(Adapter->PackInfo[uiSearchRuleIndex].bIPCSSupport == IPV6_CS)
- {
+ /* Store IP Version used */
+ /* Get The Version Of IP used (IPv6 or IPv4) from CSSpecification field of SF */
+ if (Adapter->PackInfo[uiSearchRuleIndex].bIPCSSupport == IPV6_CS)
Adapter->PackInfo[uiSearchRuleIndex].ucIpVersion = IPV6;
- }
else
- {
Adapter->PackInfo[uiSearchRuleIndex].ucIpVersion = IPV4;
- }
/* To ensure that the ETH CS code doesn't gets executed if the BS doesn't supports ETH CS */
- if(!Adapter->bETHCSEnabled)
+ if (!Adapter->bETHCSEnabled)
Adapter->PackInfo[uiSearchRuleIndex].bEthCSSupport = 0;
- if(psfLocalSet->u8ServiceClassNameLength > 0 &&
- psfLocalSet->u8ServiceClassNameLength < 32)
- {
- memcpy(Adapter->PackInfo[uiSearchRuleIndex].ucServiceClassName,
- psfLocalSet->u8ServiceClassName,
- psfLocalSet->u8ServiceClassNameLength);
- }
- Adapter->PackInfo[uiSearchRuleIndex].u8QueueType =
- psfLocalSet->u8ServiceFlowSchedulingType;
+ if (psfLocalSet->u8ServiceClassNameLength > 0 && psfLocalSet->u8ServiceClassNameLength < 32)
+ memcpy(Adapter->PackInfo[uiSearchRuleIndex].ucServiceClassName, psfLocalSet->u8ServiceClassName, psfLocalSet->u8ServiceClassNameLength);
- if(Adapter->PackInfo[uiSearchRuleIndex].u8QueueType==BE &&
- Adapter->PackInfo[uiSearchRuleIndex].ucDirection)
- {
- Adapter->usBestEffortQueueIndex=uiSearchRuleIndex;
- }
+ Adapter->PackInfo[uiSearchRuleIndex].u8QueueType = psfLocalSet->u8ServiceFlowSchedulingType;
+
+ if (Adapter->PackInfo[uiSearchRuleIndex].u8QueueType == BE && Adapter->PackInfo[uiSearchRuleIndex].ucDirection)
+ Adapter->usBestEffortQueueIndex = uiSearchRuleIndex;
Adapter->PackInfo[uiSearchRuleIndex].ulSFID = ntohl(psfLocalSet->u32SFID);
Adapter->PackInfo[uiSearchRuleIndex].u8TrafficPriority = psfLocalSet->u8TrafficPriority;
- //copy all the classifier in the Service Flow param structure
- for(nIndex=0; nIndex<psfLocalSet->u8TotalClassifiers; nIndex++)
- {
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Classifier index =%d",nIndex);
- psfCSType = &psfLocalSet->cConvergenceSLTypes[nIndex];
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Classifier index =%d",nIndex);
+ /* copy all the classifier in the Service Flow param structure */
+ for (i = 0; i < psfLocalSet->u8TotalClassifiers; i++) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Classifier index =%d", i);
+ psfCSType = &psfLocalSet->cConvergenceSLTypes[i];
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Classifier index =%d", i);
- if(psfCSType->cCPacketClassificationRule.u8ClassifierRulePriority)
- {
- Adapter->PackInfo[uiSearchRuleIndex].bClassifierPriority=TRUE;
- }
-
- if(psfCSType->cCPacketClassificationRule.u8ClassifierRulePriority)
- {
- Adapter->PackInfo[uiSearchRuleIndex].bClassifierPriority=TRUE;
- }
+ if (psfCSType->cCPacketClassificationRule.u8ClassifierRulePriority)
+ Adapter->PackInfo[uiSearchRuleIndex].bClassifierPriority = TRUE;
+ if (psfCSType->cCPacketClassificationRule.u8ClassifierRulePriority)
+ Adapter->PackInfo[uiSearchRuleIndex].bClassifierPriority = TRUE;
- if(ucDsxType== DSA_ACK)
- {
+ if (ucDsxType == DSA_ACK) {
eClassifierAction = eAddClassifier;
- }
- else if(ucDsxType == DSC_ACK)
- {
- switch(psfCSType->u8ClassfierDSCAction)
- {
- case 0://DSC Add Classifier
+ } else if (ucDsxType == DSC_ACK) {
+ switch (psfCSType->u8ClassfierDSCAction) {
+ case 0: /* DSC Add Classifier */
{
eClassifierAction = eAddClassifier;
}
break;
- case 1://DSC Replace Classifier
+ case 1: /* DSC Replace Classifier */
{
eClassifierAction = eReplaceClassifier;
}
break;
- case 2://DSC Delete Classifier
+ case 2: /* DSC Delete Classifier */
{
eClassifierAction = eDeleteClassifier;
-
}
break;
default:
@@ -683,163 +566,133 @@ static VOID CopyToAdapter( register PMINI_ADAPTER Adapter, /**<Pointer to the A
u16PacketClassificationRuleIndex = ntohs(psfCSType->cCPacketClassificationRule.u16PacketClassificationRuleIndex);
- switch(eClassifierAction)
- {
+ switch (eClassifierAction) {
case eAddClassifier:
{
- //Get a Free Classifier Index From Classifier table for this SF to add the Classifier
- //Contained in this message
- nClassifierIndex = SearchClsid(Adapter,ulSFID,u16PacketClassificationRuleIndex);
+ /* Get a Free Classifier Index From Classifier table for this SF to add the Classifier */
+ /* Contained in this message */
+ nClassifierIndex = SearchClsid(Adapter, ulSFID, u16PacketClassificationRuleIndex);
- if(nClassifierIndex > MAX_CLASSIFIERS)
- {
+ if (nClassifierIndex > MAX_CLASSIFIERS) {
nClassifierIndex = SearchFreeClsid(Adapter);
- if(nClassifierIndex > MAX_CLASSIFIERS)
- {
- //Failed To get a free Entry
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Error Failed To get a free Classifier Entry");
+ if (nClassifierIndex > MAX_CLASSIFIERS) {
+ /* Failed To get a free Entry */
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Error Failed To get a free Classifier Entry");
break;
}
- //Copy the Classifier Rule for this service flow into our Classifier table maintained per SF.
- CopyClassifierRuleToSF(Adapter,psfCSType,uiSearchRuleIndex,nClassifierIndex);
- }
-
- else
- {
- //This Classifier Already Exists and it is invalid to Add Classifier with existing PCRI
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"CopyToAdapter : Error The Specified Classifier Already Exists \
- and attempted To Add Classifier with Same PCRI : 0x%x\n", u16PacketClassificationRuleIndex);
+ /* Copy the Classifier Rule for this service flow into our Classifier table maintained per SF. */
+ CopyClassifierRuleToSF(Adapter, psfCSType, uiSearchRuleIndex, nClassifierIndex);
+ } else {
+ /* This Classifier Already Exists and it is invalid to Add Classifier with existing PCRI */
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
+ "CopyToAdapter: Error The Specified Classifier Already Exists and attempted To Add Classifier with Same PCRI : 0x%x\n",
+ u16PacketClassificationRuleIndex);
}
}
break;
-
case eReplaceClassifier:
{
- //Get the Classifier Index From Classifier table for this SF and replace existing Classifier
- //with the new classifier Contained in this message
- nClassifierIndex = SearchClsid(Adapter,ulSFID,u16PacketClassificationRuleIndex);
- if(nClassifierIndex > MAX_CLASSIFIERS)
- {
- //Failed To search the classifier
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Error Search for Classifier To be replaced failed");
+ /* Get the Classifier Index From Classifier table for this SF and replace existing Classifier */
+ /* with the new classifier Contained in this message */
+ nClassifierIndex = SearchClsid(Adapter, ulSFID, u16PacketClassificationRuleIndex);
+ if (nClassifierIndex > MAX_CLASSIFIERS) {
+ /* Failed To search the classifier */
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Error Search for Classifier To be replaced failed");
break;
}
- //Copy the Classifier Rule for this service flow into our Classifier table maintained per SF.
- CopyClassifierRuleToSF(Adapter,psfCSType,uiSearchRuleIndex,nClassifierIndex);
+ /* Copy the Classifier Rule for this service flow into our Classifier table maintained per SF. */
+ CopyClassifierRuleToSF(Adapter, psfCSType, uiSearchRuleIndex, nClassifierIndex);
}
break;
-
case eDeleteClassifier:
{
- //Get the Classifier Index From Classifier table for this SF and replace existing Classifier
- //with the new classifier Contained in this message
- nClassifierIndex = SearchClsid(Adapter,ulSFID,u16PacketClassificationRuleIndex);
- if(nClassifierIndex > MAX_CLASSIFIERS)
- {
- //Failed To search the classifier
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Error Search for Classifier To be deleted failed");
+ /* Get the Classifier Index From Classifier table for this SF and replace existing Classifier */
+ /* with the new classifier Contained in this message */
+ nClassifierIndex = SearchClsid(Adapter, ulSFID, u16PacketClassificationRuleIndex);
+ if (nClassifierIndex > MAX_CLASSIFIERS) {
+ /* Failed To search the classifier */
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Error Search for Classifier To be deleted failed");
break;
}
- //Delete This classifier
- DeleteClassifierRuleFromSF(Adapter,uiSearchRuleIndex,nClassifierIndex);
+ /* Delete This classifier */
+ DeleteClassifierRuleFromSF(Adapter, uiSearchRuleIndex, nClassifierIndex);
}
break;
-
default:
{
- //Invalid Action for classifier
+ /* Invalid Action for classifier */
break;
}
}
}
- //Repeat parsing Classification Entries to process PHS Rules
- for(nIndex=0; nIndex < psfLocalSet->u8TotalClassifiers; nIndex++)
- {
- psfCSType = &psfLocalSet->cConvergenceSLTypes[nIndex];
-
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "psfCSType->u8PhsDSCAction : 0x%x\n",
- psfCSType->u8PhsDSCAction );
+ /* Repeat parsing Classification Entries to process PHS Rules */
+ for (i = 0; i < psfLocalSet->u8TotalClassifiers; i++) {
+ psfCSType = &psfLocalSet->cConvergenceSLTypes[i];
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "psfCSType->u8PhsDSCAction : 0x%x\n", psfCSType->u8PhsDSCAction);
- switch (psfCSType->u8PhsDSCAction)
- {
+ switch (psfCSType->u8PhsDSCAction) {
case eDeleteAllPHSRules:
{
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Deleting All PHS Rules For VCID: 0x%X\n",uVCID);
-
- //Delete All the PHS rules for this Service flow
-
- PhsDeleteSFRules(
- &Adapter->stBCMPhsContext,
- uVCID);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Deleting All PHS Rules For VCID: 0x%X\n", uVCID);
+ /* Delete All the PHS rules for this Service flow */
+ PhsDeleteSFRules(&Adapter->stBCMPhsContext, uVCID);
break;
}
case eDeletePHSRule:
{
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"PHS DSC Action = Delete PHS Rule \n");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "PHS DSC Action = Delete PHS Rule\n");
+
+ if (psfCSType->cPhsRule.u8PHSI)
+ PhsDeletePHSRule(&Adapter->stBCMPhsContext, uVCID, psfCSType->cCPacketClassificationRule.u8AssociatedPHSI);
- if(psfCSType->cPhsRule.u8PHSI)
- {
- PhsDeletePHSRule(
- &Adapter->stBCMPhsContext,
- uVCID,
- psfCSType->cCPacketClassificationRule.u8AssociatedPHSI);
- }
- else
- {
- //BCM_DEBUG_PRINT(CONN_MSG,("Error CPHSRule.PHSI is ZERO \n"));
- }
break;
}
- default :
+ default:
{
- if(ucDsxType == DSC_ACK)
- {
- //BCM_DEBUG_PRINT(CONN_MSG,("Invalid PHS DSC Action For DSC \n",psfCSType->cPhsRule.u8PHSI));
- break; //FOr DSC ACK Case PHS DSC Action must be in valid set
+ if (ucDsxType == DSC_ACK) {
+ /* BCM_DEBUG_PRINT(CONN_MSG,("Invalid PHS DSC Action For DSC\n",psfCSType->cPhsRule.u8PHSI)); */
+ break; /* FOr DSC ACK Case PHS DSC Action must be in valid set */
}
}
- //Proceed To Add PHS rule for DSA_ACK case even if PHS DSC action is unspecified
- //No Break Here . Intentionally!
+ /* Proceed To Add PHS rule for DSA_ACK case even if PHS DSC action is unspecified */
+ /* No Break Here . Intentionally! */
case eAddPHSRule:
case eSetPHSRule:
{
- if(psfCSType->cPhsRule.u8PHSI)
- {
- //Apply This PHS Rule to all classifiers whose Associated PHSI Match
+ if (psfCSType->cPhsRule.u8PHSI) {
+ /* Apply This PHS Rule to all classifiers whose Associated PHSI Match */
unsigned int uiClassifierIndex = 0;
- if(pstAddIndication->u8Direction == UPLINK_DIR )
- {
- for(uiClassifierIndex=0;uiClassifierIndex<MAX_CLASSIFIERS;uiClassifierIndex++)
- {
- if((Adapter->astClassifierTable[uiClassifierIndex].bUsed) &&
+ if (pstAddIndication->u8Direction == UPLINK_DIR) {
+ for (uiClassifierIndex = 0; uiClassifierIndex < MAX_CLASSIFIERS; uiClassifierIndex++) {
+ if ((Adapter->astClassifierTable[uiClassifierIndex].bUsed) &&
(Adapter->astClassifierTable[uiClassifierIndex].ulSFID == Adapter->PackInfo[uiSearchRuleIndex].ulSFID) &&
- (Adapter->astClassifierTable[uiClassifierIndex].u8AssociatedPHSI == psfCSType->cPhsRule.u8PHSI))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Adding PHS Rule For Classifier : 0x%x cPhsRule.u8PHSI : 0x%x\n",
- Adapter->astClassifierTable[uiClassifierIndex].uiClassifierRuleIndex,
- psfCSType->cPhsRule.u8PHSI);
- //Update The PHS Rule for this classifier as Associated PHSI id defined
-
- //Copy the PHS Rule
- sPhsRule.u8PHSI = psfCSType->cPhsRule.u8PHSI;
- sPhsRule.u8PHSFLength = psfCSType->cPhsRule.u8PHSFLength;
+ (Adapter->astClassifierTable[uiClassifierIndex].u8AssociatedPHSI == psfCSType->cPhsRule.u8PHSI)) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
+ "Adding PHS Rule For Classifier: 0x%x cPhsRule.u8PHSI: 0x%x\n",
+ Adapter->astClassifierTable[uiClassifierIndex].uiClassifierRuleIndex,
+ psfCSType->cPhsRule.u8PHSI);
+ /* Update The PHS Rule for this classifier as Associated PHSI id defined */
+
+ /* Copy the PHS Rule */
+ sPhsRule.u8PHSI = psfCSType->cPhsRule.u8PHSI;
+ sPhsRule.u8PHSFLength = psfCSType->cPhsRule.u8PHSFLength;
sPhsRule.u8PHSMLength = psfCSType->cPhsRule.u8PHSMLength;
sPhsRule.u8PHSS = psfCSType->cPhsRule.u8PHSS;
sPhsRule.u8PHSV = psfCSType->cPhsRule.u8PHSV;
- memcpy(sPhsRule.u8PHSF,psfCSType->cPhsRule.u8PHSF,MAX_PHS_LENGTHS);
- memcpy(sPhsRule.u8PHSM,psfCSType->cPhsRule.u8PHSM,MAX_PHS_LENGTHS);
+ memcpy(sPhsRule.u8PHSF, psfCSType->cPhsRule.u8PHSF, MAX_PHS_LENGTHS);
+ memcpy(sPhsRule.u8PHSM, psfCSType->cPhsRule.u8PHSM, MAX_PHS_LENGTHS);
sPhsRule.u8RefCnt = 0;
sPhsRule.bUnclassifiedPHSRule = FALSE;
sPhsRule.PHSModifiedBytes = 0;
sPhsRule.PHSModifiedNumPackets = 0;
sPhsRule.PHSErrorNumPackets = 0;
- //bPHSRuleAssociated = TRUE;
- //Store The PHS Rule for this classifier
+ /* bPHSRuleAssociated = TRUE; */
+ /* Store The PHS Rule for this classifier */
PhsUpdateClassifierRule(
&Adapter->stBCMPhsContext,
@@ -848,184 +701,157 @@ static VOID CopyToAdapter( register PMINI_ADAPTER Adapter, /**<Pointer to the A
&sPhsRule,
Adapter->astClassifierTable[uiClassifierIndex].u8AssociatedPHSI);
- //Update PHS Rule For the Classifier
- if(sPhsRule.u8PHSI)
- {
+ /* Update PHS Rule For the Classifier */
+ if (sPhsRule.u8PHSI) {
Adapter->astClassifierTable[uiClassifierIndex].u32PHSRuleID = sPhsRule.u8PHSI;
- memcpy(&Adapter->astClassifierTable[uiClassifierIndex].sPhsRule,&sPhsRule,sizeof(S_PHS_RULE));
+ memcpy(&Adapter->astClassifierTable[uiClassifierIndex].sPhsRule, &sPhsRule, sizeof(S_PHS_RULE));
}
-
}
}
+ } else {
+ /* Error PHS Rule specified in signaling could not be applied to any classifier */
+
+ /* Copy the PHS Rule */
+ sPhsRule.u8PHSI = psfCSType->cPhsRule.u8PHSI;
+ sPhsRule.u8PHSFLength = psfCSType->cPhsRule.u8PHSFLength;
+ sPhsRule.u8PHSMLength = psfCSType->cPhsRule.u8PHSMLength;
+ sPhsRule.u8PHSS = psfCSType->cPhsRule.u8PHSS;
+ sPhsRule.u8PHSV = psfCSType->cPhsRule.u8PHSV;
+ memcpy(sPhsRule.u8PHSF, psfCSType->cPhsRule.u8PHSF, MAX_PHS_LENGTHS);
+ memcpy(sPhsRule.u8PHSM, psfCSType->cPhsRule.u8PHSM, MAX_PHS_LENGTHS);
+ sPhsRule.u8RefCnt = 0;
+ sPhsRule.bUnclassifiedPHSRule = TRUE;
+ sPhsRule.PHSModifiedBytes = 0;
+ sPhsRule.PHSModifiedNumPackets = 0;
+ sPhsRule.PHSErrorNumPackets = 0;
+ /* Store The PHS Rule for this classifier */
+
+ /*
+ * Passing the argument u8PHSI instead of clsid. Because for DL with no classifier rule,
+ * clsid will be zero hence we can't have multiple PHS rules for the same SF.
+ * To support multiple PHS rule, passing u8PHSI.
+ */
+ PhsUpdateClassifierRule(
+ &Adapter->stBCMPhsContext,
+ uVCID,
+ sPhsRule.u8PHSI,
+ &sPhsRule,
+ sPhsRule.u8PHSI);
}
- else
- {
- //Error PHS Rule specified in signaling could not be applied to any classifier
-
- //Copy the PHS Rule
- sPhsRule.u8PHSI = psfCSType->cPhsRule.u8PHSI;
- sPhsRule.u8PHSFLength = psfCSType->cPhsRule.u8PHSFLength;
- sPhsRule.u8PHSMLength = psfCSType->cPhsRule.u8PHSMLength;
- sPhsRule.u8PHSS = psfCSType->cPhsRule.u8PHSS;
- sPhsRule.u8PHSV = psfCSType->cPhsRule.u8PHSV;
- memcpy(sPhsRule.u8PHSF,psfCSType->cPhsRule.u8PHSF,MAX_PHS_LENGTHS);
- memcpy(sPhsRule.u8PHSM,psfCSType->cPhsRule.u8PHSM,MAX_PHS_LENGTHS);
- sPhsRule.u8RefCnt = 0;
- sPhsRule.bUnclassifiedPHSRule = TRUE;
- sPhsRule.PHSModifiedBytes = 0;
- sPhsRule.PHSModifiedNumPackets = 0;
- sPhsRule.PHSErrorNumPackets = 0;
- //Store The PHS Rule for this classifier
-
- /*
- Passing the argument u8PHSI instead of clsid. Because for DL with no classifier rule,
- clsid will be zero hence we can't have multiple PHS rules for the same SF.
- To support multiple PHS rule, passing u8PHSI.
- */
-
- PhsUpdateClassifierRule(
- &Adapter->stBCMPhsContext,
- uVCID,
- sPhsRule.u8PHSI,
- &sPhsRule,
- sPhsRule.u8PHSI);
-
- }
-
}
}
break;
}
}
- if(psfLocalSet->u32MaxSustainedTrafficRate == 0 )
- {
- //No Rate Limit . Set Max Sustained Traffic Rate to Maximum
- Adapter->PackInfo[uiSearchRuleIndex].uiMaxAllowedRate =
- WIMAX_MAX_ALLOWED_RATE;
-
- }
- else if (ntohl(psfLocalSet->u32MaxSustainedTrafficRate) >
- WIMAX_MAX_ALLOWED_RATE)
- {
- //Too large Allowed Rate specified. Limiting to Wi Max Allowed rate
- Adapter->PackInfo[uiSearchRuleIndex].uiMaxAllowedRate =
- WIMAX_MAX_ALLOWED_RATE;
- }
- else
- {
- Adapter->PackInfo[uiSearchRuleIndex].uiMaxAllowedRate =
- ntohl(psfLocalSet->u32MaxSustainedTrafficRate);
+ if (psfLocalSet->u32MaxSustainedTrafficRate == 0) {
+ /* No Rate Limit . Set Max Sustained Traffic Rate to Maximum */
+ Adapter->PackInfo[uiSearchRuleIndex].uiMaxAllowedRate = WIMAX_MAX_ALLOWED_RATE;
+ } else if (ntohl(psfLocalSet->u32MaxSustainedTrafficRate) > WIMAX_MAX_ALLOWED_RATE) {
+ /* Too large Allowed Rate specified. Limiting to Wi Max Allowed rate */
+ Adapter->PackInfo[uiSearchRuleIndex].uiMaxAllowedRate = WIMAX_MAX_ALLOWED_RATE;
+ } else {
+ Adapter->PackInfo[uiSearchRuleIndex].uiMaxAllowedRate = ntohl(psfLocalSet->u32MaxSustainedTrafficRate);
}
Adapter->PackInfo[uiSearchRuleIndex].uiMaxLatency = ntohl(psfLocalSet->u32MaximumLatency);
-
- if(Adapter->PackInfo[uiSearchRuleIndex].uiMaxLatency == 0) /* 0 should be treated as infinite */
+ if (Adapter->PackInfo[uiSearchRuleIndex].uiMaxLatency == 0) /* 0 should be treated as infinite */
Adapter->PackInfo[uiSearchRuleIndex].uiMaxLatency = MAX_LATENCY_ALLOWED;
+ if ((Adapter->PackInfo[uiSearchRuleIndex].u8QueueType == ERTPS ||
+ Adapter->PackInfo[uiSearchRuleIndex].u8QueueType == UGS))
+ UGIValue = ntohs(psfLocalSet->u16UnsolicitedGrantInterval);
- if(( Adapter->PackInfo[uiSearchRuleIndex].u8QueueType == ERTPS ||
- Adapter->PackInfo[uiSearchRuleIndex].u8QueueType == UGS ) )
- UGIValue = ntohs(psfLocalSet->u16UnsolicitedGrantInterval);
-
- if(UGIValue == 0)
+ if (UGIValue == 0)
UGIValue = DEFAULT_UG_INTERVAL;
/*
- For UGI based connections...
- DEFAULT_UGI_FACTOR*UGIInterval worth of data is the max token count at host...
- The extra amount of token is to ensure that a large amount of jitter won't have loss in throughput...
- In case of non-UGI based connection, 200 frames worth of data is the max token count at host...
- */
-
+ * For UGI based connections...
+ * DEFAULT_UGI_FACTOR*UGIInterval worth of data is the max token count at host...
+ * The extra amount of token is to ensure that a large amount of jitter won't have loss in throughput...
+ * In case of non-UGI based connection, 200 frames worth of data is the max token count at host...
+ */
Adapter->PackInfo[uiSearchRuleIndex].uiMaxBucketSize =
- (DEFAULT_UGI_FACTOR*Adapter->PackInfo[uiSearchRuleIndex].uiMaxAllowedRate*UGIValue)/1000;
+ (DEFAULT_UGI_FACTOR*Adapter->PackInfo[uiSearchRuleIndex].uiMaxAllowedRate*UGIValue)/1000;
- if(Adapter->PackInfo[uiSearchRuleIndex].uiMaxBucketSize < WIMAX_MAX_MTU*8)
- {
+ if (Adapter->PackInfo[uiSearchRuleIndex].uiMaxBucketSize < WIMAX_MAX_MTU*8) {
UINT UGIFactor = 0;
/* Special Handling to ensure the biggest size of packet can go out from host to FW as follows:
- 1. Any packet from Host to FW can go out in different packet size.
- 2. So in case the Bucket count is smaller than MTU, the packets of size (Size > TokenCount), will get dropped.
- 3. We can allow packets of MaxSize from Host->FW that can go out from FW in multiple SDUs by fragmentation at Wimax Layer
- */
+ * 1. Any packet from Host to FW can go out in different packet size.
+ * 2. So in case the Bucket count is smaller than MTU, the packets of size (Size > TokenCount), will get dropped.
+ * 3. We can allow packets of MaxSize from Host->FW that can go out from FW in multiple SDUs by fragmentation at Wimax Layer
+ */
UGIFactor = (Adapter->PackInfo[uiSearchRuleIndex].uiMaxLatency/UGIValue + 1);
- if(UGIFactor > DEFAULT_UGI_FACTOR)
- Adapter->PackInfo[uiSearchRuleIndex].uiMaxBucketSize =
- (UGIFactor*Adapter->PackInfo[uiSearchRuleIndex].uiMaxAllowedRate*UGIValue)/1000;
+ if (UGIFactor > DEFAULT_UGI_FACTOR)
+ Adapter->PackInfo[uiSearchRuleIndex].uiMaxBucketSize =
+ (UGIFactor*Adapter->PackInfo[uiSearchRuleIndex].uiMaxAllowedRate*UGIValue)/1000;
- if(Adapter->PackInfo[uiSearchRuleIndex].uiMaxBucketSize > WIMAX_MAX_MTU*8)
+ if (Adapter->PackInfo[uiSearchRuleIndex].uiMaxBucketSize > WIMAX_MAX_MTU*8)
Adapter->PackInfo[uiSearchRuleIndex].uiMaxBucketSize = WIMAX_MAX_MTU*8;
}
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "LAT: %d, UGI: %d\n", Adapter->PackInfo[uiSearchRuleIndex].uiMaxLatency, UGIValue);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "uiMaxAllowedRate: 0x%x, u32MaxSustainedTrafficRate: 0x%x ,uiMaxBucketSize: 0x%x",
+ Adapter->PackInfo[uiSearchRuleIndex].uiMaxAllowedRate,
+ ntohl(psfLocalSet->u32MaxSustainedTrafficRate),
+ Adapter->PackInfo[uiSearchRuleIndex].uiMaxBucketSize);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"LAT: %d, UGI: %d \n", Adapter->PackInfo[uiSearchRuleIndex].uiMaxLatency, UGIValue);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"uiMaxAllowedRate: 0x%x, u32MaxSustainedTrafficRate: 0x%x ,uiMaxBucketSize: 0x%x",
- Adapter->PackInfo[uiSearchRuleIndex].uiMaxAllowedRate,
- ntohl(psfLocalSet->u32MaxSustainedTrafficRate),
- Adapter->PackInfo[uiSearchRuleIndex].uiMaxBucketSize);
-
- //copy the extended SF Parameters to Support MIBS
- CopyMIBSExtendedSFParameters(Adapter,psfLocalSet,uiSearchRuleIndex);
+ /* copy the extended SF Parameters to Support MIBS */
+ CopyMIBSExtendedSFParameters(Adapter, psfLocalSet, uiSearchRuleIndex);
- //store header suppression enabled flag per SF
+ /* store header suppression enabled flag per SF */
Adapter->PackInfo[uiSearchRuleIndex].bHeaderSuppressionEnabled =
- !(psfLocalSet->u8RequesttransmissionPolicy &
- MASK_DISABLE_HEADER_SUPPRESSION);
+ !(psfLocalSet->u8RequesttransmissionPolicy &
+ MASK_DISABLE_HEADER_SUPPRESSION);
kfree(Adapter->PackInfo[uiSearchRuleIndex].pstSFIndication);
Adapter->PackInfo[uiSearchRuleIndex].pstSFIndication = pstAddIndication;
- //Re Sort the SF list in PackInfo according to Traffic Priority
+ /* Re Sort the SF list in PackInfo according to Traffic Priority */
SortPackInfo(Adapter);
/* Re Sort the Classifier Rules table and re - arrange
- according to Classifier Rule Priority */
+ * according to Classifier Rule Priority
+ */
SortClassifiers(Adapter);
-
DumpPhsRules(&Adapter->stBCMPhsContext);
-
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"%s <=====", __FUNCTION__);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "%s <=====", __func__);
}
-
/***********************************************************************
-* Function - DumpCmControlPacket
-*
-* Description - This routinue Dumps the Contents of the AddIndication
-* Structure in the Connection Management Control Packet
-*
-* Parameter - pvBuffer: Pointer to the buffer containing the
-* AddIndication data.
-*
-* Returns - None
-*************************************************************************/
+ * Function - DumpCmControlPacket
+ *
+ * Description - This routinue Dumps the Contents of the AddIndication
+ * Structure in the Connection Management Control Packet
+ *
+ * Parameter - pvBuffer: Pointer to the buffer containing the
+ * AddIndication data.
+ *
+ * Returns - None
+ *************************************************************************/
static VOID DumpCmControlPacket(PVOID pvBuffer)
{
- UINT uiLoopIndex;
- UINT nIndex;
- stLocalSFAddIndicationAlt *pstAddIndication;
- UINT nCurClassifierCnt;
- PMINI_ADAPTER Adapter = GET_BCM_ADAPTER(gblpnetdev);
+ int uiLoopIndex;
+ int nIndex;
+ stLocalSFAddIndicationAlt *pstAddIndication;
+ UINT nCurClassifierCnt;
+ PMINI_ADAPTER Adapter = GET_BCM_ADAPTER(gblpnetdev);
pstAddIndication = (stLocalSFAddIndicationAlt *)pvBuffer;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "======>");
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8Type : 0x%X",pstAddIndication->u8Type);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8Direction : 0x%X",pstAddIndication->u8Direction);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16TID: 0x%X", ntohs(pstAddIndication->u16TID));
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16CID : 0x%X",ntohs(pstAddIndication->u16CID));
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16VCID : 0x%X",ntohs(pstAddIndication->u16VCID));
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " AuthorizedSet--->");
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32SFID : 0x%X",htonl(pstAddIndication->sfAuthorizedSet.u32SFID));
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16CID : 0x%X",htons(pstAddIndication->sfAuthorizedSet.u16CID));
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceClassNameLength : 0x%X",
- pstAddIndication->sfAuthorizedSet.u8ServiceClassNameLength);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceClassName : 0x%X ,0x%X , 0x%X, 0x%X, 0x%X, 0x%X",
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "======>");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8Type: 0x%X", pstAddIndication->u8Type);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8Direction: 0x%X", pstAddIndication->u8Direction);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16TID: 0x%X", ntohs(pstAddIndication->u16TID));
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16CID: 0x%X", ntohs(pstAddIndication->u16CID));
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16VCID: 0x%X", ntohs(pstAddIndication->u16VCID));
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " AuthorizedSet--->");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32SFID: 0x%X", htonl(pstAddIndication->sfAuthorizedSet.u32SFID));
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16CID: 0x%X", htons(pstAddIndication->sfAuthorizedSet.u16CID));
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceClassNameLength: 0x%X",
+ pstAddIndication->sfAuthorizedSet.u8ServiceClassNameLength);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceClassName: 0x%X ,0x%X , 0x%X, 0x%X, 0x%X, 0x%X",
pstAddIndication->sfAuthorizedSet.u8ServiceClassName[0],
pstAddIndication->sfAuthorizedSet.u8ServiceClassName[1],
pstAddIndication->sfAuthorizedSet.u8ServiceClassName[2],
@@ -1033,207 +859,170 @@ static VOID DumpCmControlPacket(PVOID pvBuffer)
pstAddIndication->sfAuthorizedSet.u8ServiceClassName[4],
pstAddIndication->sfAuthorizedSet.u8ServiceClassName[5]);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8MBSService : 0x%X",
- pstAddIndication->sfAuthorizedSet.u8MBSService);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8QosParamSet : 0x%X",
- pstAddIndication->sfAuthorizedSet.u8QosParamSet);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TrafficPriority : 0x%X, %p",
- pstAddIndication->sfAuthorizedSet.u8TrafficPriority, &pstAddIndication->sfAuthorizedSet.u8TrafficPriority);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaxSustainedTrafficRate : 0x%X 0x%p",
- pstAddIndication->sfAuthorizedSet.u32MaxSustainedTrafficRate,
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8MBSService: 0x%X", pstAddIndication->sfAuthorizedSet.u8MBSService);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8QosParamSet: 0x%X", pstAddIndication->sfAuthorizedSet.u8QosParamSet);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TrafficPriority: 0x%X, %p",
+ pstAddIndication->sfAuthorizedSet.u8TrafficPriority, &pstAddIndication->sfAuthorizedSet.u8TrafficPriority);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaxSustainedTrafficRate: 0x%X 0x%p",
+ pstAddIndication->sfAuthorizedSet.u32MaxSustainedTrafficRate,
&pstAddIndication->sfAuthorizedSet.u32MaxSustainedTrafficRate);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaxTrafficBurst : 0x%X",
- pstAddIndication->sfAuthorizedSet.u32MaxTrafficBurst);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MinReservedTrafficRate : 0x%X",
- pstAddIndication->sfAuthorizedSet.u32MinReservedTrafficRate);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParamLength : 0x%X",
- pstAddIndication->sfAuthorizedSet.u8VendorSpecificQoSParamLength);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParam : 0x%X",
- pstAddIndication->sfAuthorizedSet.u8VendorSpecificQoSParam[0]);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceFlowSchedulingType : 0x%X",
- pstAddIndication->sfAuthorizedSet.u8ServiceFlowSchedulingType);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32ToleratedJitter : 0x%X",
- pstAddIndication->sfAuthorizedSet.u32ToleratedJitter);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaximumLatency : 0x%X",
- pstAddIndication->sfAuthorizedSet.u32MaximumLatency);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8FixedLengthVSVariableLengthSDUIndicator: 0x%X",
- pstAddIndication->sfAuthorizedSet.u8FixedLengthVSVariableLengthSDUIndicator);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8SDUSize : 0x%X",
- pstAddIndication->sfAuthorizedSet.u8SDUSize);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16TargetSAID : 0x%X",
- pstAddIndication->sfAuthorizedSet.u16TargetSAID);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ARQEnable : 0x%X",
- pstAddIndication->sfAuthorizedSet.u8ARQEnable);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQWindowSize : 0x%X",
- pstAddIndication->sfAuthorizedSet.u16ARQWindowSize);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQRetryTxTimeOut : 0x%X",
- pstAddIndication->sfAuthorizedSet.u16ARQRetryTxTimeOut);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQRetryRxTimeOut : 0x%X",
- pstAddIndication->sfAuthorizedSet.u16ARQRetryRxTimeOut);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQBlockLifeTime : 0x%X",
- pstAddIndication->sfAuthorizedSet.u16ARQBlockLifeTime);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQSyncLossTimeOut : 0x%X",
- pstAddIndication->sfAuthorizedSet.u16ARQSyncLossTimeOut);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ARQDeliverInOrder : 0x%X",
- pstAddIndication->sfAuthorizedSet.u8ARQDeliverInOrder);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQRxPurgeTimeOut : 0x%X",
- pstAddIndication->sfAuthorizedSet.u16ARQRxPurgeTimeOut);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQBlockSize : 0x%X",
- pstAddIndication->sfAuthorizedSet.u16ARQBlockSize);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8CSSpecification : 0x%X",
- pstAddIndication->sfAuthorizedSet.u8CSSpecification);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TypeOfDataDeliveryService : 0x%X",
- pstAddIndication->sfAuthorizedSet.u8TypeOfDataDeliveryService);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16SDUInterArrivalTime : 0x%X",
- pstAddIndication->sfAuthorizedSet.u16SDUInterArrivalTime);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16TimeBase : 0x%X",
- pstAddIndication->sfAuthorizedSet.u16TimeBase);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8PagingPreference : 0x%X",
- pstAddIndication->sfAuthorizedSet.u8PagingPreference);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16UnsolicitedPollingInterval : 0x%X",
- pstAddIndication->sfAuthorizedSet.u16UnsolicitedPollingInterval);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "sfAuthorizedSet.u8HARQChannelMapping %x %x %x ",
- *(unsigned int*)pstAddIndication->sfAuthorizedSet.u8HARQChannelMapping,
- *(unsigned int*)&pstAddIndication->sfAuthorizedSet.u8HARQChannelMapping[4],
- *(USHORT*) &pstAddIndication->sfAuthorizedSet.u8HARQChannelMapping[8]);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TrafficIndicationPreference : 0x%X",
- pstAddIndication->sfAuthorizedSet.u8TrafficIndicationPreference);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " Total Classifiers Received : 0x%X",pstAddIndication->sfAuthorizedSet.u8TotalClassifiers);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaxTrafficBurst: 0x%X", pstAddIndication->sfAuthorizedSet.u32MaxTrafficBurst);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MinReservedTrafficRate : 0x%X",
+ pstAddIndication->sfAuthorizedSet.u32MinReservedTrafficRate);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParamLength: 0x%X",
+ pstAddIndication->sfAuthorizedSet.u8VendorSpecificQoSParamLength);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParam: 0x%X",
+ pstAddIndication->sfAuthorizedSet.u8VendorSpecificQoSParam[0]);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceFlowSchedulingType: 0x%X",
+ pstAddIndication->sfAuthorizedSet.u8ServiceFlowSchedulingType);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32ToleratedJitter: 0x%X", pstAddIndication->sfAuthorizedSet.u32ToleratedJitter);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaximumLatency: 0x%X", pstAddIndication->sfAuthorizedSet.u32MaximumLatency);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8FixedLengthVSVariableLengthSDUIndicator: 0x%X",
+ pstAddIndication->sfAuthorizedSet.u8FixedLengthVSVariableLengthSDUIndicator);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8SDUSize: 0x%X", pstAddIndication->sfAuthorizedSet.u8SDUSize);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16TargetSAID: 0x%X", pstAddIndication->sfAuthorizedSet.u16TargetSAID);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ARQEnable: 0x%X", pstAddIndication->sfAuthorizedSet.u8ARQEnable);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQWindowSize: 0x%X", pstAddIndication->sfAuthorizedSet.u16ARQWindowSize);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQRetryTxTimeOut: 0x%X", pstAddIndication->sfAuthorizedSet.u16ARQRetryTxTimeOut);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQRetryRxTimeOut: 0x%X", pstAddIndication->sfAuthorizedSet.u16ARQRetryRxTimeOut);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQBlockLifeTime: 0x%X", pstAddIndication->sfAuthorizedSet.u16ARQBlockLifeTime);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQSyncLossTimeOut: 0x%X", pstAddIndication->sfAuthorizedSet.u16ARQSyncLossTimeOut);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ARQDeliverInOrder: 0x%X", pstAddIndication->sfAuthorizedSet.u8ARQDeliverInOrder);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQRxPurgeTimeOut: 0x%X", pstAddIndication->sfAuthorizedSet.u16ARQRxPurgeTimeOut);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQBlockSize: 0x%X", pstAddIndication->sfAuthorizedSet.u16ARQBlockSize);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8CSSpecification: 0x%X", pstAddIndication->sfAuthorizedSet.u8CSSpecification);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TypeOfDataDeliveryService: 0x%X",
+ pstAddIndication->sfAuthorizedSet.u8TypeOfDataDeliveryService);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16SDUInterArrivalTime: 0x%X", pstAddIndication->sfAuthorizedSet.u16SDUInterArrivalTime);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16TimeBase: 0x%X", pstAddIndication->sfAuthorizedSet.u16TimeBase);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8PagingPreference: 0x%X", pstAddIndication->sfAuthorizedSet.u8PagingPreference);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16UnsolicitedPollingInterval: 0x%X",
+ pstAddIndication->sfAuthorizedSet.u16UnsolicitedPollingInterval);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "sfAuthorizedSet.u8HARQChannelMapping %x %x %x ",
+ *(unsigned int *)pstAddIndication->sfAuthorizedSet.u8HARQChannelMapping,
+ *(unsigned int *)&pstAddIndication->sfAuthorizedSet.u8HARQChannelMapping[4],
+ *(USHORT *)&pstAddIndication->sfAuthorizedSet.u8HARQChannelMapping[8]);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TrafficIndicationPreference: 0x%X",
+ pstAddIndication->sfAuthorizedSet.u8TrafficIndicationPreference);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " Total Classifiers Received: 0x%X", pstAddIndication->sfAuthorizedSet.u8TotalClassifiers);
nCurClassifierCnt = pstAddIndication->sfAuthorizedSet.u8TotalClassifiers;
-
- if(nCurClassifierCnt > MAX_CLASSIFIERS_IN_SF)
- {
+ if (nCurClassifierCnt > MAX_CLASSIFIERS_IN_SF)
nCurClassifierCnt = MAX_CLASSIFIERS_IN_SF;
- }
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "pstAddIndication->sfAuthorizedSet.bValid %d", pstAddIndication->sfAuthorizedSet.bValid);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "pstAddIndication->sfAuthorizedSet.u16MacOverhead %x", pstAddIndication->sfAuthorizedSet.u16MacOverhead);
- if(!pstAddIndication->sfAuthorizedSet.bValid)
- pstAddIndication->sfAuthorizedSet.bValid=1;
- for(nIndex = 0 ; nIndex < nCurClassifierCnt ; nIndex++)
- {
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "pstAddIndication->sfAuthorizedSet.bValid %d", pstAddIndication->sfAuthorizedSet.bValid);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "pstAddIndication->sfAuthorizedSet.u16MacOverhead %x", pstAddIndication->sfAuthorizedSet.u16MacOverhead);
+ if (!pstAddIndication->sfAuthorizedSet.bValid)
+ pstAddIndication->sfAuthorizedSet.bValid = 1;
+ for (nIndex = 0; nIndex < nCurClassifierCnt; nIndex++) {
stConvergenceSLTypes *psfCSType = NULL;
psfCSType = &pstAddIndication->sfAuthorizedSet.cConvergenceSLTypes[nIndex];
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "psfCSType = %p", psfCSType);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "CCPacketClassificationRuleSI====>");
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ClassifierRulePriority :0x%X ",
- psfCSType->cCPacketClassificationRule.u8ClassifierRulePriority);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPTypeOfServiceLength :0x%X ",
- psfCSType->cCPacketClassificationRule.u8IPTypeOfServiceLength);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPTypeOfService[3] :0x%X ,0x%X ,0x%X ",
- psfCSType->cCPacketClassificationRule.u8IPTypeOfService[0],
- psfCSType->cCPacketClassificationRule.u8IPTypeOfService[1],
- psfCSType->cCPacketClassificationRule.u8IPTypeOfService[2]);
-
- for(uiLoopIndex=0; uiLoopIndex < 1; uiLoopIndex++)
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8Protocol : 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8Protocol);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPMaskedSourceAddressLength :0x%X ",
- psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddressLength);
-
- for(uiLoopIndex=0; uiLoopIndex < 32; uiLoopIndex++)
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPMaskedSourceAddress[32] : 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddress[uiLoopIndex]);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPDestinationAddressLength : 0x%X ",
- psfCSType->cCPacketClassificationRule.u8IPDestinationAddressLength);
-
- for(uiLoopIndex=0; uiLoopIndex < 32; uiLoopIndex++)
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPDestinationAddress[32] : 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8IPDestinationAddress[uiLoopIndex]);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolSourcePortRangeLength:0x%X ",
- psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRangeLength);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolSourcePortRange[4]: 0x%02X ,0x%02X ,0x%02X ,0x%02X ",
- psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[0],
- psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[1],
- psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[2],
- psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[3]);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolDestPortRangeLength : 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRangeLength);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolDestPortRange[4]: 0x%02X ,0x%02X ,0x%02X ,0x%02X ",
- psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[0],
- psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[1],
- psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[2],
- psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[3]);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetDestMacAddressLength : 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetDestMacAddress[6] : 0x %02X %02X %02X %02X %02X %02X",
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[0],
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[1],
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[2],
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[3],
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[4],
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[5]);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetSourceMACAddressLength : 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetSourceMACAddress[6] : 0x %02X %02X %02X %02X %02X %02X",
- psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[0],
- psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[1],
- psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[2],
- psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[3],
- psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[4],
- psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[5]);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthertypeLength : 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8EthertypeLength);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8Ethertype[3] : 0x%02X ,0x%02X ,0x%02X ",
- psfCSType->cCPacketClassificationRule.u8Ethertype[0],
- psfCSType->cCPacketClassificationRule.u8Ethertype[1],
- psfCSType->cCPacketClassificationRule.u8Ethertype[2]);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16UserPriority : 0x%X ",
- psfCSType->cCPacketClassificationRule.u16UserPriority);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16VLANID : 0x%X ",
- psfCSType->cCPacketClassificationRule.u16VLANID);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8AssociatedPHSI : 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8AssociatedPHSI);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16PacketClassificationRuleIndex : 0x%X ",
- psfCSType->cCPacketClassificationRule.u16PacketClassificationRuleIndex);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificClassifierParamLength : 0x%X ",
- psfCSType->cCPacketClassificationRule.u8VendorSpecificClassifierParamLength);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificClassifierParam[1] : 0x%X ",
- psfCSType->cCPacketClassificationRule.u8VendorSpecificClassifierParam[0]);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "psfCSType = %p", psfCSType);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "CCPacketClassificationRuleSI====>");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ClassifierRulePriority: 0x%X ",
+ psfCSType->cCPacketClassificationRule.u8ClassifierRulePriority);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPTypeOfServiceLength: 0x%X ",
+ psfCSType->cCPacketClassificationRule.u8IPTypeOfServiceLength);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPTypeOfService[3]: 0x%X ,0x%X ,0x%X ",
+ psfCSType->cCPacketClassificationRule.u8IPTypeOfService[0],
+ psfCSType->cCPacketClassificationRule.u8IPTypeOfService[1],
+ psfCSType->cCPacketClassificationRule.u8IPTypeOfService[2]);
+
+ for (uiLoopIndex = 0; uiLoopIndex < 1; uiLoopIndex++)
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8Protocol: 0x%02X ",
+ psfCSType->cCPacketClassificationRule.u8Protocol);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPMaskedSourceAddressLength: 0x%X ",
+ psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddressLength);
+
+ for (uiLoopIndex = 0; uiLoopIndex < 32; uiLoopIndex++)
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPMaskedSourceAddress[32]: 0x%02X ",
+ psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddress[uiLoopIndex]);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPDestinationAddressLength: 0x%X ",
+ psfCSType->cCPacketClassificationRule.u8IPDestinationAddressLength);
+
+ for (uiLoopIndex = 0; uiLoopIndex < 32; uiLoopIndex++)
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPDestinationAddress[32]: 0x%02X ",
+ psfCSType->cCPacketClassificationRule.u8IPDestinationAddress[uiLoopIndex]);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolSourcePortRangeLength:0x%X ",
+ psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRangeLength);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolSourcePortRange[4]: 0x%02X ,0x%02X ,0x%02X ,0x%02X ",
+ psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[0],
+ psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[1],
+ psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[2],
+ psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[3]);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolDestPortRangeLength: 0x%02X ",
+ psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRangeLength);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolDestPortRange[4]: 0x%02X ,0x%02X ,0x%02X ,0x%02X ",
+ psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[0],
+ psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[1],
+ psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[2],
+ psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[3]);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetDestMacAddressLength: 0x%02X ",
+ psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetDestMacAddress[6]: 0x %02X %02X %02X %02X %02X %02X",
+ psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[0],
+ psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[1],
+ psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[2],
+ psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[3],
+ psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[4],
+ psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[5]);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetSourceMACAddressLength: 0x%02X ",
+ psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetSourceMACAddress[6]: 0x %02X %02X %02X %02X %02X %02X",
+ psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[0],
+ psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[1],
+ psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[2],
+ psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[3],
+ psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[4],
+ psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[5]);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthertypeLength: 0x%02X ",
+ psfCSType->cCPacketClassificationRule.u8EthertypeLength);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8Ethertype[3]: 0x%02X ,0x%02X ,0x%02X ",
+ psfCSType->cCPacketClassificationRule.u8Ethertype[0],
+ psfCSType->cCPacketClassificationRule.u8Ethertype[1],
+ psfCSType->cCPacketClassificationRule.u8Ethertype[2]);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16UserPriority: 0x%X ", psfCSType->cCPacketClassificationRule.u16UserPriority);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16VLANID: 0x%X ", psfCSType->cCPacketClassificationRule.u16VLANID);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8AssociatedPHSI: 0x%02X ", psfCSType->cCPacketClassificationRule.u8AssociatedPHSI);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16PacketClassificationRuleIndex: 0x%X ",
+ psfCSType->cCPacketClassificationRule.u16PacketClassificationRuleIndex);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificClassifierParamLength: 0x%X ",
+ psfCSType->cCPacketClassificationRule.u8VendorSpecificClassifierParamLength);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificClassifierParam[1]: 0x%X ",
+ psfCSType->cCPacketClassificationRule.u8VendorSpecificClassifierParam[0]);
#ifdef VERSION_D5
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPv6FlowLableLength :0x%X ",
- psfCSType->cCPacketClassificationRule.u8IPv6FlowLableLength);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPv6FlowLable[6] : 0x %02X %02X %02X %02X %02X %02X ",
- psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[0],
- psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[1],
- psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[2],
- psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[3],
- psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[4],
- psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[5]);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPv6FlowLableLength: 0x%X ",
+ psfCSType->cCPacketClassificationRule.u8IPv6FlowLableLength);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPv6FlowLable[6]: 0x %02X %02X %02X %02X %02X %02X ",
+ psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[0],
+ psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[1],
+ psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[2],
+ psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[3],
+ psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[4],
+ psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[5]);
#endif
}
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "bValid : 0x%02X",pstAddIndication->sfAuthorizedSet.bValid);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "AdmittedSet--->");
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32SFID : 0x%X",pstAddIndication->sfAdmittedSet.u32SFID);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16CID : 0x%X",pstAddIndication->sfAdmittedSet.u16CID);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceClassNameLength : 0x%X",
- pstAddIndication->sfAdmittedSet.u8ServiceClassNameLength);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceClassName : 0x %02X %02X %02X %02X %02X %02X",
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "bValid: 0x%02X", pstAddIndication->sfAuthorizedSet.bValid);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "AdmittedSet--->");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32SFID: 0x%X", pstAddIndication->sfAdmittedSet.u32SFID);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16CID: 0x%X", pstAddIndication->sfAdmittedSet.u16CID);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceClassNameLength: 0x%X",
+ pstAddIndication->sfAdmittedSet.u8ServiceClassNameLength);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceClassName: 0x %02X %02X %02X %02X %02X %02X",
pstAddIndication->sfAdmittedSet.u8ServiceClassName[0],
pstAddIndication->sfAdmittedSet.u8ServiceClassName[1],
pstAddIndication->sfAdmittedSet.u8ServiceClassName[2],
@@ -1241,429 +1030,338 @@ static VOID DumpCmControlPacket(PVOID pvBuffer)
pstAddIndication->sfAdmittedSet.u8ServiceClassName[4],
pstAddIndication->sfAdmittedSet.u8ServiceClassName[5]);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8MBSService : 0x%02X",
- pstAddIndication->sfAdmittedSet.u8MBSService);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8QosParamSet : 0x%02X",
- pstAddIndication->sfAdmittedSet.u8QosParamSet);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TrafficPriority : 0x%02X",
- pstAddIndication->sfAdmittedSet.u8TrafficPriority);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaxTrafficBurst : 0x%X",
- pstAddIndication->sfAdmittedSet.u32MaxTrafficBurst);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MinReservedTrafficRate : 0x%X",
- pstAddIndication->sfAdmittedSet.u32MinReservedTrafficRate);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParamLength : 0x%02X",
- pstAddIndication->sfAdmittedSet.u8VendorSpecificQoSParamLength);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParam : 0x%02X",
- pstAddIndication->sfAdmittedSet.u8VendorSpecificQoSParam[0]);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceFlowSchedulingType : 0x%02X",
- pstAddIndication->sfAdmittedSet.u8ServiceFlowSchedulingType);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32ToleratedJitter : 0x%X",
- pstAddIndication->sfAdmittedSet.u32ToleratedJitter);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaximumLatency : 0x%X",
- pstAddIndication->sfAdmittedSet.u32MaximumLatency);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8FixedLengthVSVariableLengthSDUIndicator: 0x%02X",
- pstAddIndication->sfAdmittedSet.u8FixedLengthVSVariableLengthSDUIndicator);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8SDUSize : 0x%02X",
- pstAddIndication->sfAdmittedSet.u8SDUSize);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16TargetSAID : 0x%02X",
- pstAddIndication->sfAdmittedSet.u16TargetSAID);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ARQEnable : 0x%02X",
- pstAddIndication->sfAdmittedSet.u8ARQEnable);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQWindowSize : 0x%X",
- pstAddIndication->sfAdmittedSet.u16ARQWindowSize);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQRetryTxTimeOut : 0x%X",
- pstAddIndication->sfAdmittedSet.u16ARQRetryTxTimeOut);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQRetryRxTimeOut : 0x%X",
- pstAddIndication->sfAdmittedSet.u16ARQRetryRxTimeOut);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQBlockLifeTime : 0x%X",
- pstAddIndication->sfAdmittedSet.u16ARQBlockLifeTime);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQSyncLossTimeOut : 0x%X",
- pstAddIndication->sfAdmittedSet.u16ARQSyncLossTimeOut);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ARQDeliverInOrder : 0x%02X",
- pstAddIndication->sfAdmittedSet.u8ARQDeliverInOrder);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQRxPurgeTimeOut : 0x%X",
- pstAddIndication->sfAdmittedSet.u16ARQRxPurgeTimeOut);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQBlockSize : 0x%X",
- pstAddIndication->sfAdmittedSet.u16ARQBlockSize);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8CSSpecification : 0x%02X",
- pstAddIndication->sfAdmittedSet.u8CSSpecification);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TypeOfDataDeliveryService : 0x%02X",
- pstAddIndication->sfAdmittedSet.u8TypeOfDataDeliveryService);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16SDUInterArrivalTime : 0x%X",
- pstAddIndication->sfAdmittedSet.u16SDUInterArrivalTime);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16TimeBase : 0x%X",
- pstAddIndication->sfAdmittedSet.u16TimeBase);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8PagingPreference : 0x%X",
- pstAddIndication->sfAdmittedSet.u8PagingPreference);
-
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TrafficIndicationPreference : 0x%02X",
- pstAddIndication->sfAdmittedSet.u8TrafficIndicationPreference);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " Total Classifiers Received : 0x%X",pstAddIndication->sfAdmittedSet.u8TotalClassifiers);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8MBSService: 0x%02X", pstAddIndication->sfAdmittedSet.u8MBSService);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8QosParamSet: 0x%02X", pstAddIndication->sfAdmittedSet.u8QosParamSet);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TrafficPriority: 0x%02X", pstAddIndication->sfAdmittedSet.u8TrafficPriority);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaxTrafficBurst: 0x%X", pstAddIndication->sfAdmittedSet.u32MaxTrafficBurst);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MinReservedTrafficRate: 0x%X",
+ pstAddIndication->sfAdmittedSet.u32MinReservedTrafficRate);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParamLength: 0x%02X",
+ pstAddIndication->sfAdmittedSet.u8VendorSpecificQoSParamLength);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParam: 0x%02X",
+ pstAddIndication->sfAdmittedSet.u8VendorSpecificQoSParam[0]);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceFlowSchedulingType: 0x%02X",
+ pstAddIndication->sfAdmittedSet.u8ServiceFlowSchedulingType);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32ToleratedJitter: 0x%X", pstAddIndication->sfAdmittedSet.u32ToleratedJitter);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaximumLatency: 0x%X", pstAddIndication->sfAdmittedSet.u32MaximumLatency);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8FixedLengthVSVariableLengthSDUIndicator: 0x%02X",
+ pstAddIndication->sfAdmittedSet.u8FixedLengthVSVariableLengthSDUIndicator);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8SDUSize: 0x%02X", pstAddIndication->sfAdmittedSet.u8SDUSize);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16TargetSAID: 0x%02X", pstAddIndication->sfAdmittedSet.u16TargetSAID);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ARQEnable: 0x%02X", pstAddIndication->sfAdmittedSet.u8ARQEnable);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQWindowSize: 0x%X", pstAddIndication->sfAdmittedSet.u16ARQWindowSize);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQRetryTxTimeOut: 0x%X", pstAddIndication->sfAdmittedSet.u16ARQRetryTxTimeOut);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQRetryRxTimeOut: 0x%X", pstAddIndication->sfAdmittedSet.u16ARQRetryRxTimeOut);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQBlockLifeTime: 0x%X", pstAddIndication->sfAdmittedSet.u16ARQBlockLifeTime);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQSyncLossTimeOut: 0x%X", pstAddIndication->sfAdmittedSet.u16ARQSyncLossTimeOut);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ARQDeliverInOrder: 0x%02X", pstAddIndication->sfAdmittedSet.u8ARQDeliverInOrder);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQRxPurgeTimeOut: 0x%X", pstAddIndication->sfAdmittedSet.u16ARQRxPurgeTimeOut);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQBlockSize: 0x%X", pstAddIndication->sfAdmittedSet.u16ARQBlockSize);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8CSSpecification: 0x%02X", pstAddIndication->sfAdmittedSet.u8CSSpecification);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TypeOfDataDeliveryService: 0x%02X",
+ pstAddIndication->sfAdmittedSet.u8TypeOfDataDeliveryService);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16SDUInterArrivalTime: 0x%X", pstAddIndication->sfAdmittedSet.u16SDUInterArrivalTime);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16TimeBase: 0x%X", pstAddIndication->sfAdmittedSet.u16TimeBase);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8PagingPreference: 0x%X", pstAddIndication->sfAdmittedSet.u8PagingPreference);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TrafficIndicationPreference: 0x%02X",
+ pstAddIndication->sfAdmittedSet.u8TrafficIndicationPreference);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " Total Classifiers Received: 0x%X", pstAddIndication->sfAdmittedSet.u8TotalClassifiers);
nCurClassifierCnt = pstAddIndication->sfAdmittedSet.u8TotalClassifiers;
-
- if(nCurClassifierCnt > MAX_CLASSIFIERS_IN_SF)
- {
+ if (nCurClassifierCnt > MAX_CLASSIFIERS_IN_SF)
nCurClassifierCnt = MAX_CLASSIFIERS_IN_SF;
- }
-
-
- for(nIndex = 0 ; nIndex < nCurClassifierCnt ; nIndex++)
- {
+ for (nIndex = 0; nIndex < nCurClassifierCnt; nIndex++) {
stConvergenceSLTypes *psfCSType = NULL;
- psfCSType = &pstAddIndication->sfAdmittedSet.cConvergenceSLTypes[nIndex];
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " CCPacketClassificationRuleSI====>");
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ClassifierRulePriority :0x%02X ",
- psfCSType->cCPacketClassificationRule.u8ClassifierRulePriority);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPTypeOfServiceLength :0x%02X",
- psfCSType->cCPacketClassificationRule.u8IPTypeOfServiceLength);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPTypeOfService[3] :0x%02X %02X %02X",
- psfCSType->cCPacketClassificationRule.u8IPTypeOfService[0],
- psfCSType->cCPacketClassificationRule.u8IPTypeOfService[1],
- psfCSType->cCPacketClassificationRule.u8IPTypeOfService[2]);
- for(uiLoopIndex=0; uiLoopIndex < 1; uiLoopIndex++)
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8Protocol: 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8Protocol);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPMaskedSourceAddressLength :0x%02X ",
- psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddressLength);
-
- for(uiLoopIndex=0; uiLoopIndex < 32; uiLoopIndex++)
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPMaskedSourceAddress[32] : 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddress[uiLoopIndex]);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPDestinationAddressLength : 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8IPDestinationAddressLength);
-
- for(uiLoopIndex=0; uiLoopIndex < 32; uiLoopIndex++)
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPDestinationAddress[32] : 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8IPDestinationAddress[uiLoopIndex]);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolSourcePortRangeLength : 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRangeLength);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolSourcePortRange[4] : 0x %02X %02X %02X %02X ",
- psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[0],
- psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[1],
- psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[2],
- psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[3]);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolDestPortRangeLength : 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRangeLength);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolDestPortRange[4] : 0x %02X %02X %02X %02X ",
- psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[0],
- psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[1],
- psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[2],
- psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[3]);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetDestMacAddressLength : 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetDestMacAddress[6] : 0x %02X %02X %02X %02X %02X %02X",
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[0],
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[1],
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[2],
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[3],
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[4],
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[5]);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetSourceMACAddressLength : 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetSourceMACAddress[6] : 0x %02X %02X %02X %02X %02X %02X",
- psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[0],
- psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[1],
- psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[2],
- psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[3],
- psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[4],
- psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[5]);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthertypeLength : 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8EthertypeLength);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8Ethertype[3] : 0x%02X %02X %02X",
- psfCSType->cCPacketClassificationRule.u8Ethertype[0],
- psfCSType->cCPacketClassificationRule.u8Ethertype[1],
- psfCSType->cCPacketClassificationRule.u8Ethertype[2]);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16UserPriority : 0x%X ",
- psfCSType->cCPacketClassificationRule.u16UserPriority);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16VLANID : 0x%X ",
- psfCSType->cCPacketClassificationRule.u16VLANID);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8AssociatedPHSI : 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8AssociatedPHSI);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16PacketClassificationRuleIndex : 0x%X ",
- psfCSType->cCPacketClassificationRule.u16PacketClassificationRuleIndex);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificClassifierParamLength : 0x%02X",
- psfCSType->cCPacketClassificationRule.u8VendorSpecificClassifierParamLength);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificClassifierParam[1] : 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8VendorSpecificClassifierParam[0]);
+ psfCSType = &pstAddIndication->sfAdmittedSet.cConvergenceSLTypes[nIndex];
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " CCPacketClassificationRuleSI====>");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ClassifierRulePriority: 0x%02X ",
+ psfCSType->cCPacketClassificationRule.u8ClassifierRulePriority);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPTypeOfServiceLength: 0x%02X",
+ psfCSType->cCPacketClassificationRule.u8IPTypeOfServiceLength);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPTypeOfService[3]: 0x%02X %02X %02X",
+ psfCSType->cCPacketClassificationRule.u8IPTypeOfService[0],
+ psfCSType->cCPacketClassificationRule.u8IPTypeOfService[1],
+ psfCSType->cCPacketClassificationRule.u8IPTypeOfService[2]);
+ for (uiLoopIndex = 0; uiLoopIndex < 1; uiLoopIndex++)
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8Protocol: 0x%02X ", psfCSType->cCPacketClassificationRule.u8Protocol);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPMaskedSourceAddressLength: 0x%02X ",
+ psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddressLength);
+
+ for (uiLoopIndex = 0; uiLoopIndex < 32; uiLoopIndex++)
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPMaskedSourceAddress[32]: 0x%02X ",
+ psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddress[uiLoopIndex]);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPDestinationAddressLength: 0x%02X ",
+ psfCSType->cCPacketClassificationRule.u8IPDestinationAddressLength);
+
+ for (uiLoopIndex = 0; uiLoopIndex < 32; uiLoopIndex++)
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPDestinationAddress[32]: 0x%02X ",
+ psfCSType->cCPacketClassificationRule.u8IPDestinationAddress[uiLoopIndex]);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolSourcePortRangeLength: 0x%02X ",
+ psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRangeLength);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolSourcePortRange[4]: 0x %02X %02X %02X %02X ",
+ psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[0],
+ psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[1],
+ psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[2],
+ psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[3]);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolDestPortRangeLength: 0x%02X ",
+ psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRangeLength);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolDestPortRange[4]: 0x %02X %02X %02X %02X ",
+ psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[0],
+ psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[1],
+ psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[2],
+ psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[3]);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetDestMacAddressLength: 0x%02X ",
+ psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetDestMacAddress[6]: 0x %02X %02X %02X %02X %02X %02X",
+ psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[0],
+ psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[1],
+ psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[2],
+ psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[3],
+ psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[4],
+ psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[5]);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetSourceMACAddressLength: 0x%02X ",
+ psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetSourceMACAddress[6]: 0x %02X %02X %02X %02X %02X %02X",
+ psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[0],
+ psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[1],
+ psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[2],
+ psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[3],
+ psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[4],
+ psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[5]);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthertypeLength: 0x%02X ", psfCSType->cCPacketClassificationRule.u8EthertypeLength);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8Ethertype[3]: 0x%02X %02X %02X",
+ psfCSType->cCPacketClassificationRule.u8Ethertype[0],
+ psfCSType->cCPacketClassificationRule.u8Ethertype[1],
+ psfCSType->cCPacketClassificationRule.u8Ethertype[2]);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16UserPriority: 0x%X ", psfCSType->cCPacketClassificationRule.u16UserPriority);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16VLANID: 0x%X ", psfCSType->cCPacketClassificationRule.u16VLANID);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8AssociatedPHSI: 0x%02X ", psfCSType->cCPacketClassificationRule.u8AssociatedPHSI);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16PacketClassificationRuleIndex: 0x%X ",
+ psfCSType->cCPacketClassificationRule.u16PacketClassificationRuleIndex);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificClassifierParamLength: 0x%02X",
+ psfCSType->cCPacketClassificationRule.u8VendorSpecificClassifierParamLength);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificClassifierParam[1]: 0x%02X ",
+ psfCSType->cCPacketClassificationRule.u8VendorSpecificClassifierParam[0]);
#ifdef VERSION_D5
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPv6FlowLableLength : 0x%X ",
- psfCSType->cCPacketClassificationRule.u8IPv6FlowLableLength);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPv6FlowLable[6] : 0x %02X %02X %02X %02X %02X %02X ",
- psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[0],
- psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[1],
- psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[2],
- psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[3],
- psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[4],
- psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[5]);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPv6FlowLableLength: 0x%X ",
+ psfCSType->cCPacketClassificationRule.u8IPv6FlowLableLength);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPv6FlowLable[6]: 0x %02X %02X %02X %02X %02X %02X ",
+ psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[0],
+ psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[1],
+ psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[2],
+ psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[3],
+ psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[4],
+ psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[5]);
#endif
}
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "bValid : 0x%X",pstAddIndication->sfAdmittedSet.bValid);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " ActiveSet--->");
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32SFID : 0x%X",pstAddIndication->sfActiveSet.u32SFID);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16CID : 0x%X",pstAddIndication->sfActiveSet.u16CID);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceClassNameLength : 0x%X",
- pstAddIndication->sfActiveSet.u8ServiceClassNameLength);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceClassName : 0x %02X %02X %02X %02X %02X %02X",
- pstAddIndication->sfActiveSet.u8ServiceClassName[0],
- pstAddIndication->sfActiveSet.u8ServiceClassName[1],
- pstAddIndication->sfActiveSet.u8ServiceClassName[2],
- pstAddIndication->sfActiveSet.u8ServiceClassName[3],
- pstAddIndication->sfActiveSet.u8ServiceClassName[4],
- pstAddIndication->sfActiveSet.u8ServiceClassName[5]);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8MBSService : 0x%02X",
- pstAddIndication->sfActiveSet.u8MBSService);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8QosParamSet : 0x%02X",
- pstAddIndication->sfActiveSet.u8QosParamSet);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TrafficPriority : 0x%02X",
- pstAddIndication->sfActiveSet.u8TrafficPriority);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaxTrafficBurst : 0x%X",
- pstAddIndication->sfActiveSet.u32MaxTrafficBurst);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MinReservedTrafficRate : 0x%X",
- pstAddIndication->sfActiveSet.u32MinReservedTrafficRate);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParamLength : 0x%02X",
- pstAddIndication->sfActiveSet.u8VendorSpecificQoSParamLength);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParam : 0x%02X",
- pstAddIndication->sfActiveSet.u8VendorSpecificQoSParam[0]);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceFlowSchedulingType : 0x%02X",
- pstAddIndication->sfActiveSet.u8ServiceFlowSchedulingType);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32ToleratedJitter : 0x%X",
- pstAddIndication->sfActiveSet.u32ToleratedJitter);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaximumLatency : 0x%X",
- pstAddIndication->sfActiveSet.u32MaximumLatency);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8FixedLengthVSVariableLengthSDUIndicator: 0x%02X",
- pstAddIndication->sfActiveSet.u8FixedLengthVSVariableLengthSDUIndicator);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8SDUSize : 0x%X",
- pstAddIndication->sfActiveSet.u8SDUSize);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16TargetSAID : 0x%X",
- pstAddIndication->sfActiveSet.u16TargetSAID);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8ARQEnable : 0x%X",
- pstAddIndication->sfActiveSet.u8ARQEnable);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQWindowSize : 0x%X",
- pstAddIndication->sfActiveSet.u16ARQWindowSize);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQRetryTxTimeOut : 0x%X",
- pstAddIndication->sfActiveSet.u16ARQRetryTxTimeOut);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQRetryRxTimeOut : 0x%X",
- pstAddIndication->sfActiveSet.u16ARQRetryRxTimeOut);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQBlockLifeTime : 0x%X",
- pstAddIndication->sfActiveSet.u16ARQBlockLifeTime);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQSyncLossTimeOut : 0x%X",
- pstAddIndication->sfActiveSet.u16ARQSyncLossTimeOut);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8ARQDeliverInOrder : 0x%X",
- pstAddIndication->sfActiveSet.u8ARQDeliverInOrder);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQRxPurgeTimeOut : 0x%X",
- pstAddIndication->sfActiveSet.u16ARQRxPurgeTimeOut);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQBlockSize : 0x%X",
- pstAddIndication->sfActiveSet.u16ARQBlockSize);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8CSSpecification : 0x%X",
- pstAddIndication->sfActiveSet.u8CSSpecification);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8TypeOfDataDeliveryService : 0x%X",
- pstAddIndication->sfActiveSet.u8TypeOfDataDeliveryService);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16SDUInterArrivalTime : 0x%X",
- pstAddIndication->sfActiveSet.u16SDUInterArrivalTime);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16TimeBase : 0x%X",
- pstAddIndication->sfActiveSet.u16TimeBase);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8PagingPreference : 0x%X",
- pstAddIndication->sfActiveSet.u8PagingPreference);
-
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8TrafficIndicationPreference : 0x%X",
- pstAddIndication->sfActiveSet.u8TrafficIndicationPreference);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " Total Classifiers Received : 0x%X",pstAddIndication->sfActiveSet.u8TotalClassifiers);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "bValid: 0x%X", pstAddIndication->sfAdmittedSet.bValid);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " ActiveSet--->");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32SFID: 0x%X", pstAddIndication->sfActiveSet.u32SFID);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16CID: 0x%X", pstAddIndication->sfActiveSet.u16CID);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceClassNameLength: 0x%X", pstAddIndication->sfActiveSet.u8ServiceClassNameLength);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceClassName: 0x %02X %02X %02X %02X %02X %02X",
+ pstAddIndication->sfActiveSet.u8ServiceClassName[0],
+ pstAddIndication->sfActiveSet.u8ServiceClassName[1],
+ pstAddIndication->sfActiveSet.u8ServiceClassName[2],
+ pstAddIndication->sfActiveSet.u8ServiceClassName[3],
+ pstAddIndication->sfActiveSet.u8ServiceClassName[4],
+ pstAddIndication->sfActiveSet.u8ServiceClassName[5]);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8MBSService: 0x%02X", pstAddIndication->sfActiveSet.u8MBSService);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8QosParamSet: 0x%02X", pstAddIndication->sfActiveSet.u8QosParamSet);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TrafficPriority: 0x%02X", pstAddIndication->sfActiveSet.u8TrafficPriority);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaxTrafficBurst: 0x%X", pstAddIndication->sfActiveSet.u32MaxTrafficBurst);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MinReservedTrafficRate: 0x%X",
+ pstAddIndication->sfActiveSet.u32MinReservedTrafficRate);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParamLength: 0x%02X",
+ pstAddIndication->sfActiveSet.u8VendorSpecificQoSParamLength);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParam: 0x%02X",
+ pstAddIndication->sfActiveSet.u8VendorSpecificQoSParam[0]);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceFlowSchedulingType: 0x%02X",
+ pstAddIndication->sfActiveSet.u8ServiceFlowSchedulingType);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32ToleratedJitter: 0x%X", pstAddIndication->sfActiveSet.u32ToleratedJitter);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaximumLatency: 0x%X", pstAddIndication->sfActiveSet.u32MaximumLatency);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8FixedLengthVSVariableLengthSDUIndicator: 0x%02X",
+ pstAddIndication->sfActiveSet.u8FixedLengthVSVariableLengthSDUIndicator);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8SDUSize: 0x%X", pstAddIndication->sfActiveSet.u8SDUSize);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16TargetSAID: 0x%X", pstAddIndication->sfActiveSet.u16TargetSAID);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8ARQEnable: 0x%X", pstAddIndication->sfActiveSet.u8ARQEnable);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQWindowSize: 0x%X", pstAddIndication->sfActiveSet.u16ARQWindowSize);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQRetryTxTimeOut: 0x%X", pstAddIndication->sfActiveSet.u16ARQRetryTxTimeOut);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQRetryRxTimeOut: 0x%X", pstAddIndication->sfActiveSet.u16ARQRetryRxTimeOut);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQBlockLifeTime: 0x%X", pstAddIndication->sfActiveSet.u16ARQBlockLifeTime);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQSyncLossTimeOut: 0x%X", pstAddIndication->sfActiveSet.u16ARQSyncLossTimeOut);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8ARQDeliverInOrder: 0x%X", pstAddIndication->sfActiveSet.u8ARQDeliverInOrder);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQRxPurgeTimeOut: 0x%X", pstAddIndication->sfActiveSet.u16ARQRxPurgeTimeOut);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQBlockSize: 0x%X", pstAddIndication->sfActiveSet.u16ARQBlockSize);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8CSSpecification: 0x%X", pstAddIndication->sfActiveSet.u8CSSpecification);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8TypeOfDataDeliveryService: 0x%X",
+ pstAddIndication->sfActiveSet.u8TypeOfDataDeliveryService);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16SDUInterArrivalTime: 0x%X", pstAddIndication->sfActiveSet.u16SDUInterArrivalTime);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16TimeBase: 0x%X", pstAddIndication->sfActiveSet.u16TimeBase);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8PagingPreference: 0x%X", pstAddIndication->sfActiveSet.u8PagingPreference);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8TrafficIndicationPreference: 0x%X",
+ pstAddIndication->sfActiveSet.u8TrafficIndicationPreference);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " Total Classifiers Received: 0x%X", pstAddIndication->sfActiveSet.u8TotalClassifiers);
nCurClassifierCnt = pstAddIndication->sfActiveSet.u8TotalClassifiers;
-
- if(nCurClassifierCnt > MAX_CLASSIFIERS_IN_SF)
- {
+ if (nCurClassifierCnt > MAX_CLASSIFIERS_IN_SF)
nCurClassifierCnt = MAX_CLASSIFIERS_IN_SF;
- }
-
- for(nIndex = 0 ; nIndex < nCurClassifierCnt ; nIndex++)
- {
+ for (nIndex = 0; nIndex < nCurClassifierCnt; nIndex++) {
stConvergenceSLTypes *psfCSType = NULL;
- psfCSType = &pstAddIndication->sfActiveSet.cConvergenceSLTypes[nIndex];
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " CCPacketClassificationRuleSI====>");
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8ClassifierRulePriority :0x%X ",
- psfCSType->cCPacketClassificationRule.u8ClassifierRulePriority);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8IPTypeOfServiceLength :0x%X ",
- psfCSType->cCPacketClassificationRule.u8IPTypeOfServiceLength);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8IPTypeOfService[3] :0x%X ,0x%X ,0x%X ",
- psfCSType->cCPacketClassificationRule.u8IPTypeOfService[0],
- psfCSType->cCPacketClassificationRule.u8IPTypeOfService[1],
- psfCSType->cCPacketClassificationRule.u8IPTypeOfService[2]);
- for(uiLoopIndex=0; uiLoopIndex < 1; uiLoopIndex++)
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8Protocol : 0x%X ",
- psfCSType->cCPacketClassificationRule.u8Protocol);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPMaskedSourceAddressLength :0x%X ",
- psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddressLength);
-
- for(uiLoopIndex=0; uiLoopIndex < 32; uiLoopIndex++)
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPMaskedSourceAddress[32]:0x%X ",
- psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddress[uiLoopIndex]);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPDestinationAddressLength : 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8IPDestinationAddressLength);
-
- for(uiLoopIndex=0;uiLoopIndex<32;uiLoopIndex++)
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8IPDestinationAddress[32]:0x%X ",
- psfCSType->cCPacketClassificationRule.u8IPDestinationAddress[uiLoopIndex]);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8ProtocolSourcePortRangeLength:0x%X ",
- psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRangeLength);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8ProtocolSourcePortRange[4]:0x%X ,0x%X ,0x%X ,0x%X ",
- psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[0],
- psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[1],
- psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[2],
- psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[3]);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8ProtocolDestPortRangeLength:0x%X ",
- psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRangeLength);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8ProtocolDestPortRange[4]:0x%X ,0x%X ,0x%X ,0x%X ",
- psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[0],
- psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[1],
- psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[2],
- psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[3]);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8EthernetDestMacAddressLength:0x%X ",
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8EthernetDestMacAddress[6]:0x%X ,0x%X ,0x%X ,0x%X ,0x%X ,0x%X",
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[0],
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[1],
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[2],
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[3],
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[4],
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[5]);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8EthernetSourceMACAddressLength:0x%X ",
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetSourceMACAddress[6]:0x%X ,0x%X ,0x%X ,0x%X ,0x%X ,0x%X",
- psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[0],
- psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[1],
- psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[2],
- psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[3],
- psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[4],
- psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[5]);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8EthertypeLength :0x%X ",
- psfCSType->cCPacketClassificationRule.u8EthertypeLength);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8Ethertype[3] :0x%X ,0x%X ,0x%X ",
- psfCSType->cCPacketClassificationRule.u8Ethertype[0],
- psfCSType->cCPacketClassificationRule.u8Ethertype[1],
- psfCSType->cCPacketClassificationRule.u8Ethertype[2]);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16UserPriority :0x%X ",
- psfCSType->cCPacketClassificationRule.u16UserPriority);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16VLANID :0x%X ",
- psfCSType->cCPacketClassificationRule.u16VLANID);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8AssociatedPHSI :0x%X ",
- psfCSType->cCPacketClassificationRule.u8AssociatedPHSI);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16PacketClassificationRuleIndex:0x%X ",
- psfCSType->cCPacketClassificationRule.u16PacketClassificationRuleIndex);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8VendorSpecificClassifierParamLength:0x%X ",
- psfCSType->cCPacketClassificationRule.u8VendorSpecificClassifierParamLength);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8VendorSpecificClassifierParam[1]:0x%X ",
- psfCSType->cCPacketClassificationRule.u8VendorSpecificClassifierParam[0]);
+ psfCSType = &pstAddIndication->sfActiveSet.cConvergenceSLTypes[nIndex];
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " CCPacketClassificationRuleSI====>");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8ClassifierRulePriority: 0x%X ",
+ psfCSType->cCPacketClassificationRule.u8ClassifierRulePriority);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8IPTypeOfServiceLength: 0x%X ",
+ psfCSType->cCPacketClassificationRule.u8IPTypeOfServiceLength);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8IPTypeOfService[3]: 0x%X ,0x%X ,0x%X ",
+ psfCSType->cCPacketClassificationRule.u8IPTypeOfService[0],
+ psfCSType->cCPacketClassificationRule.u8IPTypeOfService[1],
+ psfCSType->cCPacketClassificationRule.u8IPTypeOfService[2]);
+
+ for (uiLoopIndex = 0; uiLoopIndex < 1; uiLoopIndex++)
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8Protocol: 0x%X ", psfCSType->cCPacketClassificationRule.u8Protocol);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPMaskedSourceAddressLength: 0x%X ",
+ psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddressLength);
+
+ for (uiLoopIndex = 0; uiLoopIndex < 32; uiLoopIndex++)
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPMaskedSourceAddress[32]: 0x%X ",
+ psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddress[uiLoopIndex]);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPDestinationAddressLength: 0x%02X ",
+ psfCSType->cCPacketClassificationRule.u8IPDestinationAddressLength);
+
+ for (uiLoopIndex = 0; uiLoopIndex < 32; uiLoopIndex++)
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8IPDestinationAddress[32]:0x%X ",
+ psfCSType->cCPacketClassificationRule.u8IPDestinationAddress[uiLoopIndex]);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8ProtocolSourcePortRangeLength: 0x%X ",
+ psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRangeLength);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8ProtocolSourcePortRange[4]: 0x%X ,0x%X ,0x%X ,0x%X ",
+ psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[0],
+ psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[1],
+ psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[2],
+ psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[3]);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8ProtocolDestPortRangeLength: 0x%X ",
+ psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRangeLength);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8ProtocolDestPortRange[4]: 0x%X ,0x%X ,0x%X ,0x%X ",
+ psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[0],
+ psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[1],
+ psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[2],
+ psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[3]);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8EthernetDestMacAddressLength: 0x%X ",
+ psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8EthernetDestMacAddress[6]: 0x%X ,0x%X ,0x%X ,0x%X ,0x%X ,0x%X",
+ psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[0],
+ psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[1],
+ psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[2],
+ psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[3],
+ psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[4],
+ psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[5]);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8EthernetSourceMACAddressLength: 0x%X ",
+ psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetSourceMACAddress[6]: 0x%X ,0x%X ,0x%X ,0x%X ,0x%X ,0x%X",
+ psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[0],
+ psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[1],
+ psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[2],
+ psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[3],
+ psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[4],
+ psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[5]);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8EthertypeLength: 0x%X ",
+ psfCSType->cCPacketClassificationRule.u8EthertypeLength);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8Ethertype[3]: 0x%X ,0x%X ,0x%X ",
+ psfCSType->cCPacketClassificationRule.u8Ethertype[0],
+ psfCSType->cCPacketClassificationRule.u8Ethertype[1],
+ psfCSType->cCPacketClassificationRule.u8Ethertype[2]);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16UserPriority: 0x%X ",
+ psfCSType->cCPacketClassificationRule.u16UserPriority);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16VLANID: 0x%X ", psfCSType->cCPacketClassificationRule.u16VLANID);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8AssociatedPHSI: 0x%X ", psfCSType->cCPacketClassificationRule.u8AssociatedPHSI);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16PacketClassificationRuleIndex:0x%X ",
+ psfCSType->cCPacketClassificationRule.u16PacketClassificationRuleIndex);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8VendorSpecificClassifierParamLength:0x%X ",
+ psfCSType->cCPacketClassificationRule.u8VendorSpecificClassifierParamLength);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8VendorSpecificClassifierParam[1]:0x%X ",
+ psfCSType->cCPacketClassificationRule.u8VendorSpecificClassifierParam[0]);
#ifdef VERSION_D5
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8IPv6FlowLableLength :0x%X ",
- psfCSType->cCPacketClassificationRule.u8IPv6FlowLableLength);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8IPv6FlowLable[6] :0x%X ,0x%X ,0x%X ,0x%X ,0x%X ,0x%X ",
- psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[0],
- psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[1],
- psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[2],
- psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[3],
- psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[4],
- psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[5]);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8IPv6FlowLableLength: 0x%X ",
+ psfCSType->cCPacketClassificationRule.u8IPv6FlowLableLength);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8IPv6FlowLable[6]: 0x%X ,0x%X ,0x%X ,0x%X ,0x%X ,0x%X ",
+ psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[0],
+ psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[1],
+ psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[2],
+ psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[3],
+ psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[4],
+ psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[5]);
#endif
}
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " bValid : 0x%X",pstAddIndication->sfActiveSet.bValid);
-
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " bValid: 0x%X", pstAddIndication->sfActiveSet.bValid);
}
-static inline ULONG RestoreSFParam(PMINI_ADAPTER Adapter, ULONG ulAddrSFParamSet,PUCHAR pucDestBuffer)
+static inline ULONG RestoreSFParam(PMINI_ADAPTER Adapter, ULONG ulAddrSFParamSet, PUCHAR pucDestBuffer)
{
UINT nBytesToRead = sizeof(stServiceFlowParamSI);
- if(ulAddrSFParamSet == 0 || NULL == pucDestBuffer)
- {
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Got Param address as 0!!");
+ if (ulAddrSFParamSet == 0 || NULL == pucDestBuffer) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Got Param address as 0!!");
return 0;
}
ulAddrSFParamSet = ntohl(ulAddrSFParamSet);
- //Read out the SF Param Set At the indicated Location
- if(rdm(Adapter, ulAddrSFParamSet, (PUCHAR)pucDestBuffer, nBytesToRead) < 0)
+ /* Read out the SF Param Set At the indicated Location */
+ if (rdm(Adapter, ulAddrSFParamSet, (PUCHAR)pucDestBuffer, nBytesToRead) < 0)
return STATUS_FAILURE;
return 1;
}
-
-static ULONG StoreSFParam(PMINI_ADAPTER Adapter,PUCHAR pucSrcBuffer,ULONG ulAddrSFParamSet)
+static ULONG StoreSFParam(PMINI_ADAPTER Adapter, PUCHAR pucSrcBuffer, ULONG ulAddrSFParamSet)
{
- UINT nBytesToWrite = sizeof(stServiceFlowParamSI);
+ UINT nBytesToWrite = sizeof(stServiceFlowParamSI);
int ret = 0;
- if(ulAddrSFParamSet == 0 || NULL == pucSrcBuffer)
- {
+ if (ulAddrSFParamSet == 0 || NULL == pucSrcBuffer)
return 0;
- }
ret = wrm(Adapter, ulAddrSFParamSet, (u8 *)pucSrcBuffer, nBytesToWrite);
if (ret < 0) {
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "%s:%d WRM failed",__FUNCTION__, __LINE__);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "%s:%d WRM failed", __func__, __LINE__);
return ret;
}
return 1;
}
-ULONG StoreCmControlResponseMessage(PMINI_ADAPTER Adapter,PVOID pvBuffer,UINT *puBufferLength)
+ULONG StoreCmControlResponseMessage(PMINI_ADAPTER Adapter, PVOID pvBuffer, UINT *puBufferLength)
{
stLocalSFAddIndicationAlt *pstAddIndicationAlt = NULL;
- stLocalSFAddIndication * pstAddIndication = NULL;
+ stLocalSFAddIndication *pstAddIndication = NULL;
stLocalSFDeleteRequest *pstDeletionRequest;
UINT uiSearchRuleIndex;
ULONG ulSFID;
@@ -1671,52 +1369,51 @@ ULONG StoreCmControlResponseMessage(PMINI_ADAPTER Adapter,PVOID pvBuffer,UINT *p
pstAddIndicationAlt = (stLocalSFAddIndicationAlt *)(pvBuffer);
/*
- * In case of DSD Req By MS, we should immediately delete this SF so that
- * we can stop the further classifying the pkt for this SF.
- */
- if(pstAddIndicationAlt->u8Type == DSD_REQ)
- {
+ * In case of DSD Req By MS, we should immediately delete this SF so that
+ * we can stop the further classifying the pkt for this SF.
+ */
+ if (pstAddIndicationAlt->u8Type == DSD_REQ) {
pstDeletionRequest = (stLocalSFDeleteRequest *)pvBuffer;
ulSFID = ntohl(pstDeletionRequest->u32SFID);
- uiSearchRuleIndex=SearchSfid(Adapter,ulSFID);
+ uiSearchRuleIndex = SearchSfid(Adapter, ulSFID);
- if(uiSearchRuleIndex < NO_OF_QUEUES)
- {
- deleteSFBySfid(Adapter,uiSearchRuleIndex);
+ if (uiSearchRuleIndex < NO_OF_QUEUES) {
+ deleteSFBySfid(Adapter, uiSearchRuleIndex);
Adapter->u32TotalDSD++;
}
return 1;
}
-
- if( (pstAddIndicationAlt->u8Type == DSD_RSP) ||
- (pstAddIndicationAlt->u8Type == DSD_ACK))
- {
- //No Special handling send the message as it is
+ if ((pstAddIndicationAlt->u8Type == DSD_RSP) ||
+ (pstAddIndicationAlt->u8Type == DSD_ACK)) {
+ /* No Special handling send the message as it is */
return 1;
}
- // For DSA_REQ, only up to "psfAuthorizedSet" parameter should be accessed by driver!
+ /* For DSA_REQ, only up to "psfAuthorizedSet" parameter should be accessed by driver! */
- pstAddIndication=kmalloc(sizeof(*pstAddIndication), GFP_KERNEL);
- if(NULL==pstAddIndication)
+ pstAddIndication = kmalloc(sizeof(*pstAddIndication), GFP_KERNEL);
+ if (pstAddIndication == NULL)
return 0;
/* AUTHORIZED SET */
pstAddIndication->psfAuthorizedSet = (stServiceFlowParamSI *)
GetNextTargetBufferLocation(Adapter, pstAddIndicationAlt->u16TID);
- if(!pstAddIndication->psfAuthorizedSet)
+ if (!pstAddIndication->psfAuthorizedSet) {
+ kfree(pstAddIndication);
return 0;
+ }
- if(StoreSFParam(Adapter,(PUCHAR)&pstAddIndicationAlt->sfAuthorizedSet,
- (ULONG)pstAddIndication->psfAuthorizedSet)!= 1)
+ if (StoreSFParam(Adapter, (PUCHAR)&pstAddIndicationAlt->sfAuthorizedSet,
+ (ULONG)pstAddIndication->psfAuthorizedSet) != 1) {
+ kfree(pstAddIndication);
return 0;
+ }
/* this can't possibly be right */
pstAddIndication->psfAuthorizedSet = (stServiceFlowParamSI *)ntohl((ULONG)pstAddIndication->psfAuthorizedSet);
- if(pstAddIndicationAlt->u8Type == DSA_REQ)
- {
+ if (pstAddIndicationAlt->u8Type == DSA_REQ) {
stLocalSFAddRequest AddRequest;
AddRequest.u8Type = pstAddIndicationAlt->u8Type;
@@ -1724,18 +1421,18 @@ ULONG StoreCmControlResponseMessage(PMINI_ADAPTER Adapter,PVOID pvBuffer,UINT *p
AddRequest.u16TID = pstAddIndicationAlt->u16TID;
AddRequest.u16CID = pstAddIndicationAlt->u16CID;
AddRequest.u16VCID = pstAddIndicationAlt->u16VCID;
- AddRequest.psfParameterSet =pstAddIndication->psfAuthorizedSet ;
+ AddRequest.psfParameterSet = pstAddIndication->psfAuthorizedSet;
(*puBufferLength) = sizeof(stLocalSFAddRequest);
- memcpy(pvBuffer,&AddRequest,sizeof(stLocalSFAddRequest));
+ memcpy(pvBuffer, &AddRequest, sizeof(stLocalSFAddRequest));
+ kfree(pstAddIndication);
return 1;
}
- // Since it's not DSA_REQ, we can access all field in pstAddIndicationAlt
-
- //We need to extract the structure from the buffer and pack it differently
+ /* Since it's not DSA_REQ, we can access all field in pstAddIndicationAlt */
+ /* We need to extract the structure from the buffer and pack it differently */
pstAddIndication->u8Type = pstAddIndicationAlt->u8Type;
- pstAddIndication->eConnectionDir= pstAddIndicationAlt->u8Direction ;
+ pstAddIndication->eConnectionDir = pstAddIndicationAlt->u8Direction;
pstAddIndication->u16TID = pstAddIndicationAlt->u16TID;
pstAddIndication->u16CID = pstAddIndicationAlt->u16CID;
pstAddIndication->u16VCID = pstAddIndicationAlt->u16VCID;
@@ -1744,21 +1441,28 @@ ULONG StoreCmControlResponseMessage(PMINI_ADAPTER Adapter,PVOID pvBuffer,UINT *p
/* ADMITTED SET */
pstAddIndication->psfAdmittedSet = (stServiceFlowParamSI *)
GetNextTargetBufferLocation(Adapter, pstAddIndicationAlt->u16TID);
- if(!pstAddIndication->psfAdmittedSet)
+ if (!pstAddIndication->psfAdmittedSet) {
+ kfree(pstAddIndication);
return 0;
- if(StoreSFParam(Adapter,(PUCHAR)&pstAddIndicationAlt->sfAdmittedSet,(ULONG)pstAddIndication->psfAdmittedSet) != 1)
+ }
+ if (StoreSFParam(Adapter, (PUCHAR)&pstAddIndicationAlt->sfAdmittedSet, (ULONG)pstAddIndication->psfAdmittedSet) != 1) {
+ kfree(pstAddIndication);
return 0;
+ }
pstAddIndication->psfAdmittedSet = (stServiceFlowParamSI *)ntohl((ULONG)pstAddIndication->psfAdmittedSet);
-
/* ACTIVE SET */
pstAddIndication->psfActiveSet = (stServiceFlowParamSI *)
GetNextTargetBufferLocation(Adapter, pstAddIndicationAlt->u16TID);
- if(!pstAddIndication->psfActiveSet)
+ if (!pstAddIndication->psfActiveSet) {
+ kfree(pstAddIndication);
return 0;
- if(StoreSFParam(Adapter,(PUCHAR)&pstAddIndicationAlt->sfActiveSet,(ULONG)pstAddIndication->psfActiveSet) != 1)
+ }
+ if (StoreSFParam(Adapter, (PUCHAR)&pstAddIndicationAlt->sfActiveSet, (ULONG)pstAddIndication->psfActiveSet) != 1) {
+ kfree(pstAddIndication);
return 0;
+ }
pstAddIndication->psfActiveSet = (stServiceFlowParamSI *)ntohl((ULONG)pstAddIndication->psfActiveSet);
@@ -1768,47 +1472,41 @@ ULONG StoreCmControlResponseMessage(PMINI_ADAPTER Adapter,PVOID pvBuffer,UINT *p
return 1;
}
-
static inline stLocalSFAddIndicationAlt
-*RestoreCmControlResponseMessage(register PMINI_ADAPTER Adapter,register PVOID pvBuffer)
+*RestoreCmControlResponseMessage(register PMINI_ADAPTER Adapter, register PVOID pvBuffer)
{
- ULONG ulStatus=0;
+ ULONG ulStatus = 0;
stLocalSFAddIndication *pstAddIndication = NULL;
stLocalSFAddIndicationAlt *pstAddIndicationDest = NULL;
- pstAddIndication = (stLocalSFAddIndication *)(pvBuffer);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "=====>" );
+ pstAddIndication = (stLocalSFAddIndication *)(pvBuffer);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "=====>");
if ((pstAddIndication->u8Type == DSD_REQ) ||
(pstAddIndication->u8Type == DSD_RSP) ||
(pstAddIndication->u8Type == DSD_ACK))
- {
return (stLocalSFAddIndicationAlt *)pvBuffer;
- }
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Inside RestoreCmControlResponseMessage ");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Inside RestoreCmControlResponseMessage ");
/*
- //Need to Allocate memory to contain the SUPER Large structures
- //Our driver can't create these structures on Stack :(
- */
- pstAddIndicationDest=kmalloc(sizeof(stLocalSFAddIndicationAlt), GFP_KERNEL);
-
- if(pstAddIndicationDest)
- {
- memset(pstAddIndicationDest,0,sizeof(stLocalSFAddIndicationAlt));
- }
- else
- {
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Failed to allocate memory for SF Add Indication Structure ");
+ * Need to Allocate memory to contain the SUPER Large structures
+ * Our driver can't create these structures on Stack :(
+ */
+ pstAddIndicationDest = kmalloc(sizeof(stLocalSFAddIndicationAlt), GFP_KERNEL);
+
+ if (pstAddIndicationDest) {
+ memset(pstAddIndicationDest, 0, sizeof(stLocalSFAddIndicationAlt));
+ } else {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Failed to allocate memory for SF Add Indication Structure ");
return NULL;
}
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "AddIndication-u8Type : 0x%X",pstAddIndication->u8Type);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "AddIndication-u8Direction : 0x%X",pstAddIndication->eConnectionDir);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "AddIndication-u8TID : 0x%X",ntohs(pstAddIndication->u16TID));
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "AddIndication-u8CID : 0x%X",ntohs(pstAddIndication->u16CID));
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "AddIndication-u16VCID : 0x%X",ntohs(pstAddIndication->u16VCID));
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "AddIndication-autorized set loc : %p",pstAddIndication->psfAuthorizedSet);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "AddIndication-admitted set loc : %p",pstAddIndication->psfAdmittedSet);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "AddIndication-Active set loc : %p",pstAddIndication->psfActiveSet);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "AddIndication-u8Type : 0x%X", pstAddIndication->u8Type);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "AddIndication-u8Direction : 0x%X", pstAddIndication->eConnectionDir);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "AddIndication-u8TID : 0x%X", ntohs(pstAddIndication->u16TID));
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "AddIndication-u8CID : 0x%X", ntohs(pstAddIndication->u16CID));
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "AddIndication-u16VCID : 0x%X", ntohs(pstAddIndication->u16VCID));
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "AddIndication-autorized set loc : %p", pstAddIndication->psfAuthorizedSet);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "AddIndication-admitted set loc : %p", pstAddIndication->psfAdmittedSet);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "AddIndication-Active set loc : %p", pstAddIndication->psfActiveSet);
pstAddIndicationDest->u8Type = pstAddIndication->u8Type;
pstAddIndicationDest->u8Direction = pstAddIndication->eConnectionDir;
@@ -1817,42 +1515,39 @@ static inline stLocalSFAddIndicationAlt
pstAddIndicationDest->u16VCID = pstAddIndication->u16VCID;
pstAddIndicationDest->u8CC = pstAddIndication->u8CC;
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Restoring Active Set ");
- ulStatus=RestoreSFParam(Adapter,(ULONG)pstAddIndication->psfActiveSet, (PUCHAR)&pstAddIndicationDest->sfActiveSet);
- if(ulStatus != 1)
- {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Restoring Active Set ");
+ ulStatus = RestoreSFParam(Adapter, (ULONG)pstAddIndication->psfActiveSet, (PUCHAR)&pstAddIndicationDest->sfActiveSet);
+ if (ulStatus != 1)
goto failed_restore_sf_param;
- }
- if(pstAddIndicationDest->sfActiveSet.u8TotalClassifiers > MAX_CLASSIFIERS_IN_SF)
+
+ if (pstAddIndicationDest->sfActiveSet.u8TotalClassifiers > MAX_CLASSIFIERS_IN_SF)
pstAddIndicationDest->sfActiveSet.u8TotalClassifiers = MAX_CLASSIFIERS_IN_SF;
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Restoring Admitted Set ");
- ulStatus=RestoreSFParam(Adapter,(ULONG)pstAddIndication->psfAdmittedSet,(PUCHAR)&pstAddIndicationDest->sfAdmittedSet);
- if(ulStatus != 1)
- {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Restoring Admitted Set ");
+ ulStatus = RestoreSFParam(Adapter, (ULONG)pstAddIndication->psfAdmittedSet, (PUCHAR)&pstAddIndicationDest->sfAdmittedSet);
+ if (ulStatus != 1)
goto failed_restore_sf_param;
- }
- if(pstAddIndicationDest->sfAdmittedSet.u8TotalClassifiers > MAX_CLASSIFIERS_IN_SF)
+
+ if (pstAddIndicationDest->sfAdmittedSet.u8TotalClassifiers > MAX_CLASSIFIERS_IN_SF)
pstAddIndicationDest->sfAdmittedSet.u8TotalClassifiers = MAX_CLASSIFIERS_IN_SF;
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Restoring Authorized Set ");
- ulStatus=RestoreSFParam(Adapter,(ULONG)pstAddIndication->psfAuthorizedSet,(PUCHAR)&pstAddIndicationDest->sfAuthorizedSet);
- if(ulStatus != 1)
- {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Restoring Authorized Set ");
+ ulStatus = RestoreSFParam(Adapter, (ULONG)pstAddIndication->psfAuthorizedSet, (PUCHAR)&pstAddIndicationDest->sfAuthorizedSet);
+ if (ulStatus != 1)
goto failed_restore_sf_param;
- }
- if(pstAddIndicationDest->sfAuthorizedSet.u8TotalClassifiers > MAX_CLASSIFIERS_IN_SF)
+
+ if (pstAddIndicationDest->sfAuthorizedSet.u8TotalClassifiers > MAX_CLASSIFIERS_IN_SF)
pstAddIndicationDest->sfAuthorizedSet.u8TotalClassifiers = MAX_CLASSIFIERS_IN_SF;
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Dumping the whole raw packet");
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "============================================================");
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, " pstAddIndicationDest->sfActiveSet size %zx %p", sizeof(*pstAddIndicationDest), pstAddIndicationDest);
- //BCM_DEBUG_PRINT_BUFFER(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, (unsigned char *)pstAddIndicationDest, sizeof(*pstAddIndicationDest));
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "============================================================");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Dumping the whole raw packet");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "============================================================");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, " pstAddIndicationDest->sfActiveSet size %zx %p", sizeof(*pstAddIndicationDest), pstAddIndicationDest);
+ /* BCM_DEBUG_PRINT_BUFFER(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, (unsigned char *)pstAddIndicationDest, sizeof(*pstAddIndicationDest)); */
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "============================================================");
return pstAddIndicationDest;
failed_restore_sf_param:
kfree(pstAddIndicationDest);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "<=====" );
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "<=====");
return NULL;
}
@@ -1860,7 +1555,7 @@ ULONG SetUpTargetDsxBuffers(PMINI_ADAPTER Adapter)
{
ULONG ulTargetDsxBuffersBase = 0;
ULONG ulCntTargetBuffers;
- ULONG ulIndex=0;
+ ULONG i;
int Status;
if (!Adapter) {
@@ -1868,411 +1563,354 @@ ULONG SetUpTargetDsxBuffers(PMINI_ADAPTER Adapter)
return 0;
}
- if(Adapter->astTargetDsxBuffer[0].ulTargetDsxBuffer)
+ if (Adapter->astTargetDsxBuffer[0].ulTargetDsxBuffer)
return 1;
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Size of Each DSX Buffer(Also size of ServiceFlowParamSI): %zx ",sizeof(stServiceFlowParamSI));
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Reading DSX buffer From Target location %x ",DSX_MESSAGE_EXCHANGE_BUFFER);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Size of Each DSX Buffer(Also size of ServiceFlowParamSI): %zx ", sizeof(stServiceFlowParamSI));
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Reading DSX buffer From Target location %x ", DSX_MESSAGE_EXCHANGE_BUFFER);
- Status = rdmalt(Adapter, DSX_MESSAGE_EXCHANGE_BUFFER,
- (PUINT)&ulTargetDsxBuffersBase, sizeof(UINT));
- if(Status < 0)
- {
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "RDM failed!!");
+ Status = rdmalt(Adapter, DSX_MESSAGE_EXCHANGE_BUFFER, (PUINT)&ulTargetDsxBuffersBase, sizeof(UINT));
+ if (Status < 0) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "RDM failed!!");
return 0;
}
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Base Address Of DSX Target Buffer : 0x%lx",ulTargetDsxBuffersBase);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Tgt Buffer is Now %lx :",ulTargetDsxBuffersBase);
-
- ulCntTargetBuffers = DSX_MESSAGE_EXCHANGE_BUFFER_SIZE/sizeof(stServiceFlowParamSI);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Base Address Of DSX Target Buffer : 0x%lx", ulTargetDsxBuffersBase);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Tgt Buffer is Now %lx :", ulTargetDsxBuffersBase);
+ ulCntTargetBuffers = DSX_MESSAGE_EXCHANGE_BUFFER_SIZE / sizeof(stServiceFlowParamSI);
Adapter->ulTotalTargetBuffersAvailable =
ulCntTargetBuffers > MAX_TARGET_DSX_BUFFERS ?
MAX_TARGET_DSX_BUFFERS : ulCntTargetBuffers;
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, " Total Target DSX Buffer setup %lx ",Adapter->ulTotalTargetBuffersAvailable);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, " Total Target DSX Buffer setup %lx ", Adapter->ulTotalTargetBuffersAvailable);
- for(ulIndex=0; ulIndex < Adapter->ulTotalTargetBuffersAvailable ; ulIndex++)
- {
- Adapter->astTargetDsxBuffer[ulIndex].ulTargetDsxBuffer = ulTargetDsxBuffersBase;
- Adapter->astTargetDsxBuffer[ulIndex].valid=1;
- Adapter->astTargetDsxBuffer[ulIndex].tid=0;
- ulTargetDsxBuffersBase+=sizeof(stServiceFlowParamSI);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, " Target DSX Buffer %lx setup at 0x%lx",
- ulIndex, Adapter->astTargetDsxBuffer[ulIndex].ulTargetDsxBuffer);
+ for (i = 0; i < Adapter->ulTotalTargetBuffersAvailable; i++) {
+ Adapter->astTargetDsxBuffer[i].ulTargetDsxBuffer = ulTargetDsxBuffersBase;
+ Adapter->astTargetDsxBuffer[i].valid = 1;
+ Adapter->astTargetDsxBuffer[i].tid = 0;
+ ulTargetDsxBuffersBase += sizeof(stServiceFlowParamSI);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, " Target DSX Buffer %lx setup at 0x%lx",
+ i, Adapter->astTargetDsxBuffer[i].ulTargetDsxBuffer);
}
Adapter->ulCurrentTargetBuffer = 0;
Adapter->ulFreeTargetBufferCnt = Adapter->ulTotalTargetBuffersAvailable;
return 1;
}
-static ULONG GetNextTargetBufferLocation(PMINI_ADAPTER Adapter,B_UINT16 tid)
+static ULONG GetNextTargetBufferLocation(PMINI_ADAPTER Adapter, B_UINT16 tid)
{
- ULONG ulTargetDSXBufferAddress;
- ULONG ulTargetDsxBufferIndexToUse,ulMaxTry;
+ ULONG ulTargetDSXBufferAddress;
+ ULONG ulTargetDsxBufferIndexToUse, ulMaxTry;
- if((Adapter->ulTotalTargetBuffersAvailable == 0)||
- (Adapter->ulFreeTargetBufferCnt == 0))
- {
- ClearTargetDSXBuffer(Adapter,tid,FALSE);
+ if ((Adapter->ulTotalTargetBuffersAvailable == 0) || (Adapter->ulFreeTargetBufferCnt == 0)) {
+ ClearTargetDSXBuffer(Adapter, tid, FALSE);
return 0;
}
- ulTargetDsxBufferIndexToUse = Adapter->ulCurrentTargetBuffer;
- ulMaxTry = Adapter->ulTotalTargetBuffersAvailable;
- while((ulMaxTry)&&(Adapter->astTargetDsxBuffer[ulTargetDsxBufferIndexToUse].valid != 1))
- {
- ulTargetDsxBufferIndexToUse = (ulTargetDsxBufferIndexToUse+1)%
- Adapter->ulTotalTargetBuffersAvailable;
- ulMaxTry--;
+ ulTargetDsxBufferIndexToUse = Adapter->ulCurrentTargetBuffer;
+ ulMaxTry = Adapter->ulTotalTargetBuffersAvailable;
+ while ((ulMaxTry) && (Adapter->astTargetDsxBuffer[ulTargetDsxBufferIndexToUse].valid != 1)) {
+ ulTargetDsxBufferIndexToUse = (ulTargetDsxBufferIndexToUse+1) % Adapter->ulTotalTargetBuffersAvailable;
+ ulMaxTry--;
}
- if(ulMaxTry==0)
- {
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_PRINTK, 0, 0, "\n GetNextTargetBufferLocation : Error No Free Target DSX Buffers FreeCnt : %lx ",Adapter->ulFreeTargetBufferCnt);
- ClearTargetDSXBuffer(Adapter,tid,FALSE);
+ if (ulMaxTry == 0) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "\n GetNextTargetBufferLocation : Error No Free Target DSX Buffers FreeCnt : %lx ", Adapter->ulFreeTargetBufferCnt);
+ ClearTargetDSXBuffer(Adapter, tid, FALSE);
return 0;
}
-
- ulTargetDSXBufferAddress =
- Adapter->astTargetDsxBuffer[ulTargetDsxBufferIndexToUse].ulTargetDsxBuffer;
- Adapter->astTargetDsxBuffer[ulTargetDsxBufferIndexToUse].valid=0;
- Adapter->astTargetDsxBuffer[ulTargetDsxBufferIndexToUse].tid=tid;
+ ulTargetDSXBufferAddress = Adapter->astTargetDsxBuffer[ulTargetDsxBufferIndexToUse].ulTargetDsxBuffer;
+ Adapter->astTargetDsxBuffer[ulTargetDsxBufferIndexToUse].valid = 0;
+ Adapter->astTargetDsxBuffer[ulTargetDsxBufferIndexToUse].tid = tid;
Adapter->ulFreeTargetBufferCnt--;
-
-
- ulTargetDsxBufferIndexToUse =
- (ulTargetDsxBufferIndexToUse+1)%Adapter->ulTotalTargetBuffersAvailable;
+ ulTargetDsxBufferIndexToUse = (ulTargetDsxBufferIndexToUse+1)%Adapter->ulTotalTargetBuffersAvailable;
Adapter->ulCurrentTargetBuffer = ulTargetDsxBufferIndexToUse;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "GetNextTargetBufferLocation :Returning address %lx tid %d\n",
- ulTargetDSXBufferAddress,tid);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "GetNextTargetBufferLocation :Returning address %lx tid %d\n", ulTargetDSXBufferAddress, tid);
+
return ulTargetDSXBufferAddress;
}
-
-INT AllocAdapterDsxBuffer(PMINI_ADAPTER Adapter)
+int AllocAdapterDsxBuffer(PMINI_ADAPTER Adapter)
{
/*
- //Need to Allocate memory to contain the SUPER Large structures
- //Our driver can't create these structures on Stack
- */
- Adapter->caDsxReqResp=kmalloc(sizeof(stLocalSFAddIndicationAlt)+LEADER_SIZE, GFP_KERNEL);
- if(!Adapter->caDsxReqResp)
+ * Need to Allocate memory to contain the SUPER Large structures
+ * Our driver can't create these structures on Stack
+ */
+ Adapter->caDsxReqResp = kmalloc(sizeof(stLocalSFAddIndicationAlt)+LEADER_SIZE, GFP_KERNEL);
+ if (!Adapter->caDsxReqResp)
return -ENOMEM;
+
return 0;
}
-INT FreeAdapterDsxBuffer(PMINI_ADAPTER Adapter)
+int FreeAdapterDsxBuffer(PMINI_ADAPTER Adapter)
{
kfree(Adapter->caDsxReqResp);
return 0;
-
}
-/**
-@ingroup ctrl_pkt_functions
-This routinue would process the Control responses
-for the Connection Management.
-@return - Queue index for the free SFID else returns Invalid Index.
-*/
-BOOLEAN CmControlResponseMessage(PMINI_ADAPTER Adapter, /**<Pointer to the Adapter structure*/
- PVOID pvBuffer /**Starting Address of the Buffer, that contains the AddIndication Data*/
- )
+
+/*
+ * @ingroup ctrl_pkt_functions
+ * This routinue would process the Control responses
+ * for the Connection Management.
+ * @return - Queue index for the free SFID else returns Invalid Index.
+ */
+BOOLEAN CmControlResponseMessage(PMINI_ADAPTER Adapter, /* <Pointer to the Adapter structure */
+ PVOID pvBuffer /* Starting Address of the Buffer, that contains the AddIndication Data */)
{
- stServiceFlowParamSI *psfLocalSet=NULL;
- stLocalSFAddIndicationAlt *pstAddIndication = NULL;
- stLocalSFChangeIndicationAlt *pstChangeIndication = NULL;
- PLEADER pLeader=NULL;
+ stServiceFlowParamSI *psfLocalSet = NULL;
+ stLocalSFAddIndicationAlt *pstAddIndication = NULL;
+ stLocalSFChangeIndicationAlt *pstChangeIndication = NULL;
+ PLEADER pLeader = NULL;
+
/*
- //Otherwise the message contains a target address from where we need to
- //read out the rest of the service flow param structure
- */
- if((pstAddIndication = RestoreCmControlResponseMessage(Adapter,pvBuffer))
- == NULL)
- {
- ClearTargetDSXBuffer(Adapter,((stLocalSFAddIndication *)pvBuffer)->u16TID, FALSE);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_PRINTK, 0, 0, "Error in restoring Service Flow param structure from DSx message");
+ * Otherwise the message contains a target address from where we need to
+ * read out the rest of the service flow param structure
+ */
+ pstAddIndication = RestoreCmControlResponseMessage(Adapter, pvBuffer);
+ if (pstAddIndication == NULL) {
+ ClearTargetDSXBuffer(Adapter, ((stLocalSFAddIndication *)pvBuffer)->u16TID, FALSE);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Error in restoring Service Flow param structure from DSx message");
return FALSE;
}
DumpCmControlPacket(pstAddIndication);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "====>");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "====>");
pLeader = (PLEADER)Adapter->caDsxReqResp;
- pLeader->Status =CM_CONTROL_NEWDSX_MULTICLASSIFIER_REQ;
+ pLeader->Status = CM_CONTROL_NEWDSX_MULTICLASSIFIER_REQ;
pLeader->Vcid = 0;
- ClearTargetDSXBuffer(Adapter,pstAddIndication->u16TID,FALSE);
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_PRINTK, 0, 0, "### TID RECEIVED %d\n",pstAddIndication->u16TID);
- switch(pstAddIndication->u8Type)
+ ClearTargetDSXBuffer(Adapter, pstAddIndication->u16TID, FALSE);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "### TID RECEIVED %d\n", pstAddIndication->u16TID);
+ switch (pstAddIndication->u8Type) {
+ case DSA_REQ:
{
- case DSA_REQ:
- {
- pLeader->PLength = sizeof(stLocalSFAddIndicationAlt);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Sending DSA Response....\n");
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SENDING DSA RESPONSE TO MAC %d", pLeader->PLength );
- *((stLocalSFAddIndicationAlt*)&(Adapter->caDsxReqResp[LEADER_SIZE]))
- = *pstAddIndication;
- ((stLocalSFAddIndicationAlt*)&(Adapter->caDsxReqResp[LEADER_SIZE]))->u8Type = DSA_RSP;
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, " VCID = %x", ntohs(pstAddIndication->u16VCID));
- CopyBufferToControlPacket(Adapter,(PVOID)Adapter->caDsxReqResp);
- kfree(pstAddIndication);
- }
- break;
- case DSA_RSP:
- {
- pLeader->PLength = sizeof(stLocalSFAddIndicationAlt);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SENDING DSA ACK TO MAC %d",
+ pLeader->PLength = sizeof(stLocalSFAddIndicationAlt);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Sending DSA Response....\n");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SENDING DSA RESPONSE TO MAC %d", pLeader->PLength);
+ *((stLocalSFAddIndicationAlt *)&(Adapter->caDsxReqResp[LEADER_SIZE]))
+ = *pstAddIndication;
+ ((stLocalSFAddIndicationAlt *)&(Adapter->caDsxReqResp[LEADER_SIZE]))->u8Type = DSA_RSP;
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, " VCID = %x", ntohs(pstAddIndication->u16VCID));
+ CopyBufferToControlPacket(Adapter, (PVOID)Adapter->caDsxReqResp);
+ kfree(pstAddIndication);
+ }
+ break;
+ case DSA_RSP:
+ {
+ pLeader->PLength = sizeof(stLocalSFAddIndicationAlt);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SENDING DSA ACK TO MAC %d",
pLeader->PLength);
- *((stLocalSFAddIndicationAlt*)&(Adapter->caDsxReqResp[LEADER_SIZE]))
- = *pstAddIndication;
- ((stLocalSFAddIndicationAlt*)&(Adapter->caDsxReqResp[LEADER_SIZE]))->u8Type = DSA_ACK;
+ *((stLocalSFAddIndicationAlt *)&(Adapter->caDsxReqResp[LEADER_SIZE]))
+ = *pstAddIndication;
+ ((stLocalSFAddIndicationAlt *)&(Adapter->caDsxReqResp[LEADER_SIZE]))->u8Type = DSA_ACK;
- }//no break here..we should go down.
- case DSA_ACK:
- {
- UINT uiSearchRuleIndex=0;
+ } /* no break here..we should go down. */
+ case DSA_ACK:
+ {
+ UINT uiSearchRuleIndex = 0;
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "VCID:0x%X",
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "VCID:0x%X",
ntohs(pstAddIndication->u16VCID));
- uiSearchRuleIndex=SearchFreeSfid(Adapter);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"uiSearchRuleIndex:0x%X ",
+ uiSearchRuleIndex = SearchFreeSfid(Adapter);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "uiSearchRuleIndex:0x%X ",
uiSearchRuleIndex);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Direction:0x%X ",
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Direction:0x%X ",
pstAddIndication->u8Direction);
- if((uiSearchRuleIndex< NO_OF_QUEUES) )
- {
- Adapter->PackInfo[uiSearchRuleIndex].ucDirection =
- pstAddIndication->u8Direction;
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "bValid:0x%X ",
+ if ((uiSearchRuleIndex < NO_OF_QUEUES)) {
+ Adapter->PackInfo[uiSearchRuleIndex].ucDirection =
+ pstAddIndication->u8Direction;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "bValid:0x%X ",
pstAddIndication->sfActiveSet.bValid);
- if(pstAddIndication->sfActiveSet.bValid==TRUE)
- {
- Adapter->PackInfo[uiSearchRuleIndex].bActiveSet=TRUE;
- }
- if(pstAddIndication->sfAuthorizedSet.bValid==TRUE)
- {
- Adapter->PackInfo[uiSearchRuleIndex].bAuthorizedSet=TRUE;
- }
- if(pstAddIndication->sfAdmittedSet.bValid==TRUE)
- {
- Adapter->PackInfo[uiSearchRuleIndex].bAdmittedSet=TRUE;
- }
- if(FALSE == pstAddIndication->sfActiveSet.bValid)
- {
- Adapter->PackInfo[uiSearchRuleIndex].bActive = FALSE;
- Adapter->PackInfo[uiSearchRuleIndex].bActivateRequestSent = FALSE;
- if(pstAddIndication->sfAdmittedSet.bValid)
- {
- psfLocalSet = &pstAddIndication->sfAdmittedSet;
- }
- else if(pstAddIndication->sfAuthorizedSet.bValid)
- {
- psfLocalSet = &pstAddIndication->sfAuthorizedSet;
- }
- }
- else
- {
- psfLocalSet = &pstAddIndication->sfActiveSet;
- Adapter->PackInfo[uiSearchRuleIndex].bActive=TRUE;
- }
-
- if(!psfLocalSet)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "No set is valid\n");
- Adapter->PackInfo[uiSearchRuleIndex].bActive=FALSE;
- Adapter->PackInfo[uiSearchRuleIndex].bValid=FALSE;
- Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value=0;
- kfree(pstAddIndication);
- }
+ if (pstAddIndication->sfActiveSet.bValid == TRUE)
+ Adapter->PackInfo[uiSearchRuleIndex].bActiveSet = TRUE;
+
+ if (pstAddIndication->sfAuthorizedSet.bValid == TRUE)
+ Adapter->PackInfo[uiSearchRuleIndex].bAuthorizedSet = TRUE;
+
+ if (pstAddIndication->sfAdmittedSet.bValid == TRUE)
+ Adapter->PackInfo[uiSearchRuleIndex].bAdmittedSet = TRUE;
+
+ if (pstAddIndication->sfActiveSet.bValid == FALSE) {
+ Adapter->PackInfo[uiSearchRuleIndex].bActive = FALSE;
+ Adapter->PackInfo[uiSearchRuleIndex].bActivateRequestSent = FALSE;
+ if (pstAddIndication->sfAdmittedSet.bValid)
+ psfLocalSet = &pstAddIndication->sfAdmittedSet;
+ else if (pstAddIndication->sfAuthorizedSet.bValid)
+ psfLocalSet = &pstAddIndication->sfAuthorizedSet;
+ } else {
+ psfLocalSet = &pstAddIndication->sfActiveSet;
+ Adapter->PackInfo[uiSearchRuleIndex].bActive = TRUE;
+ }
- else if(psfLocalSet->bValid && (pstAddIndication->u8CC == 0))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "DSA ACK");
- Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value =
- ntohs(pstAddIndication->u16VCID);
- Adapter->PackInfo[uiSearchRuleIndex].usCID =
- ntohs(pstAddIndication->u16CID);
-
- if(UPLINK_DIR == pstAddIndication->u8Direction)
- atomic_set(&Adapter->PackInfo[uiSearchRuleIndex].uiPerSFTxResourceCount, DEFAULT_PERSFCOUNT);
- CopyToAdapter(Adapter,psfLocalSet,uiSearchRuleIndex,
- DSA_ACK, pstAddIndication);
- // don't free pstAddIndication
-
- /* Inside CopyToAdapter, Sorting of all the SFs take place.
- Hence any access to the newly added SF through uiSearchRuleIndex is invalid.
- SHOULD BE STRICTLY AVOIDED.
- */
-// *(PULONG)(((PUCHAR)pvBuffer)+1)=psfLocalSet->u32SFID;
- memcpy((((PUCHAR)pvBuffer)+1), &psfLocalSet->u32SFID, 4);
-
- if(pstAddIndication->sfActiveSet.bValid == TRUE)
- {
- if(UPLINK_DIR == pstAddIndication->u8Direction)
- {
- if(!Adapter->LinkUpStatus)
- {
- netif_carrier_on(Adapter->dev);
- netif_start_queue(Adapter->dev);
- Adapter->LinkUpStatus = 1;
- if (netif_msg_link(Adapter))
- pr_info(PFX "%s: link up\n", Adapter->dev->name);
- atomic_set(&Adapter->TxPktAvail, 1);
- wake_up(&Adapter->tx_packet_wait_queue);
- Adapter->liTimeSinceLastNetEntry = get_seconds();
- }
+ if (!psfLocalSet) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "No set is valid\n");
+ Adapter->PackInfo[uiSearchRuleIndex].bActive = FALSE;
+ Adapter->PackInfo[uiSearchRuleIndex].bValid = FALSE;
+ Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value = 0;
+ kfree(pstAddIndication);
+ } else if (psfLocalSet->bValid && (pstAddIndication->u8CC == 0)) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "DSA ACK");
+ Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value = ntohs(pstAddIndication->u16VCID);
+ Adapter->PackInfo[uiSearchRuleIndex].usCID = ntohs(pstAddIndication->u16CID);
+
+ if (UPLINK_DIR == pstAddIndication->u8Direction)
+ atomic_set(&Adapter->PackInfo[uiSearchRuleIndex].uiPerSFTxResourceCount, DEFAULT_PERSFCOUNT);
+
+ CopyToAdapter(Adapter, psfLocalSet, uiSearchRuleIndex, DSA_ACK, pstAddIndication);
+ /* don't free pstAddIndication */
+
+ /* Inside CopyToAdapter, Sorting of all the SFs take place.
+ * Hence any access to the newly added SF through uiSearchRuleIndex is invalid.
+ * SHOULD BE STRICTLY AVOIDED.
+ */
+ /* *(PULONG)(((PUCHAR)pvBuffer)+1)=psfLocalSet->u32SFID; */
+ memcpy((((PUCHAR)pvBuffer)+1), &psfLocalSet->u32SFID, 4);
+
+ if (pstAddIndication->sfActiveSet.bValid == TRUE) {
+ if (UPLINK_DIR == pstAddIndication->u8Direction) {
+ if (!Adapter->LinkUpStatus) {
+ netif_carrier_on(Adapter->dev);
+ netif_start_queue(Adapter->dev);
+ Adapter->LinkUpStatus = 1;
+ if (netif_msg_link(Adapter))
+ pr_info(PFX "%s: link up\n", Adapter->dev->name);
+ atomic_set(&Adapter->TxPktAvail, 1);
+ wake_up(&Adapter->tx_packet_wait_queue);
+ Adapter->liTimeSinceLastNetEntry = get_seconds();
}
}
}
-
- else
- {
- Adapter->PackInfo[uiSearchRuleIndex].bActive=FALSE;
- Adapter->PackInfo[uiSearchRuleIndex].bValid=FALSE;
- Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value=0;
- kfree(pstAddIndication);
- }
- }
- else
- {
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_PRINTK, 0, 0, "DSA ACK did not get valid SFID");
+ } else {
+ Adapter->PackInfo[uiSearchRuleIndex].bActive = FALSE;
+ Adapter->PackInfo[uiSearchRuleIndex].bValid = FALSE;
+ Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value = 0;
kfree(pstAddIndication);
- return FALSE;
}
- }
- break;
- case DSC_REQ:
- {
- pLeader->PLength = sizeof(stLocalSFChangeIndicationAlt);
- pstChangeIndication = (stLocalSFChangeIndicationAlt*)pstAddIndication;
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SENDING DSC RESPONSE TO MAC %d", pLeader->PLength);
-
- *((stLocalSFChangeIndicationAlt*)&(Adapter->caDsxReqResp[LEADER_SIZE])) = *pstChangeIndication;
- ((stLocalSFChangeIndicationAlt*)&(Adapter->caDsxReqResp[LEADER_SIZE]))->u8Type = DSC_RSP;
-
- CopyBufferToControlPacket(Adapter,(PVOID)Adapter->caDsxReqResp);
+ } else {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "DSA ACK did not get valid SFID");
kfree(pstAddIndication);
+ return FALSE;
}
- break;
- case DSC_RSP:
- {
- pLeader->PLength = sizeof(stLocalSFChangeIndicationAlt);
- pstChangeIndication = (stLocalSFChangeIndicationAlt*)pstAddIndication;
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SENDING DSC ACK TO MAC %d", pLeader->PLength);
- *((stLocalSFChangeIndicationAlt*)&(Adapter->caDsxReqResp[LEADER_SIZE])) = *pstChangeIndication;
- ((stLocalSFChangeIndicationAlt*)&(Adapter->caDsxReqResp[LEADER_SIZE]))->u8Type = DSC_ACK;
- }
- case DSC_ACK:
- {
- UINT uiSearchRuleIndex=0;
+ }
+ break;
+ case DSC_REQ:
+ {
+ pLeader->PLength = sizeof(stLocalSFChangeIndicationAlt);
+ pstChangeIndication = (stLocalSFChangeIndicationAlt *)pstAddIndication;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SENDING DSC RESPONSE TO MAC %d", pLeader->PLength);
- pstChangeIndication = (stLocalSFChangeIndicationAlt *)pstAddIndication;
- uiSearchRuleIndex=SearchSfid(Adapter,ntohl(pstChangeIndication->sfActiveSet.u32SFID));
- if(uiSearchRuleIndex > NO_OF_QUEUES-1)
- {
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_PRINTK, 0, 0, "SF doesn't exist for which DSC_ACK is received");
- }
- if((uiSearchRuleIndex < NO_OF_QUEUES))
- {
- Adapter->PackInfo[uiSearchRuleIndex].ucDirection = pstChangeIndication->u8Direction;
- if(pstChangeIndication->sfActiveSet.bValid==TRUE)
- {
- Adapter->PackInfo[uiSearchRuleIndex].bActiveSet=TRUE;
- }
- if(pstChangeIndication->sfAuthorizedSet.bValid==TRUE)
- {
- Adapter->PackInfo[uiSearchRuleIndex].bAuthorizedSet=TRUE;
- }
- if(pstChangeIndication->sfAdmittedSet.bValid==TRUE)
- {
- Adapter->PackInfo[uiSearchRuleIndex].bAdmittedSet=TRUE;
- }
+ *((stLocalSFChangeIndicationAlt *)&(Adapter->caDsxReqResp[LEADER_SIZE])) = *pstChangeIndication;
+ ((stLocalSFChangeIndicationAlt *)&(Adapter->caDsxReqResp[LEADER_SIZE]))->u8Type = DSC_RSP;
- if(FALSE==pstChangeIndication->sfActiveSet.bValid)
- {
- Adapter->PackInfo[uiSearchRuleIndex].bActive = FALSE;
- Adapter->PackInfo[uiSearchRuleIndex].bActivateRequestSent = FALSE;
- if(pstChangeIndication->sfAdmittedSet.bValid)
- {
- psfLocalSet = &pstChangeIndication->sfAdmittedSet;
- }
- else if(pstChangeIndication->sfAuthorizedSet.bValid)
- {
- psfLocalSet = &pstChangeIndication->sfAuthorizedSet;
- }
- }
-
- else
- {
- psfLocalSet = &pstChangeIndication->sfActiveSet;
- Adapter->PackInfo[uiSearchRuleIndex].bActive=TRUE;
- }
- if(psfLocalSet->bValid && (pstChangeIndication->u8CC == 0))
- {
- Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value =
- ntohs(pstChangeIndication->u16VCID);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "CC field is %d bvalid = %d\n",
- pstChangeIndication->u8CC, psfLocalSet->bValid);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "VCID= %d\n", ntohs(pstChangeIndication->u16VCID));
- Adapter->PackInfo[uiSearchRuleIndex].usCID =
- ntohs(pstChangeIndication->u16CID);
- CopyToAdapter(Adapter,psfLocalSet,uiSearchRuleIndex,
- DSC_ACK, pstAddIndication);
-
- *(PULONG)(((PUCHAR)pvBuffer)+1)=psfLocalSet->u32SFID;
- }
- else if(pstChangeIndication->u8CC == 6)
- {
- deleteSFBySfid(Adapter,uiSearchRuleIndex);
- kfree(pstAddIndication);
- }
+ CopyBufferToControlPacket(Adapter, (PVOID)Adapter->caDsxReqResp);
+ kfree(pstAddIndication);
+ }
+ break;
+ case DSC_RSP:
+ {
+ pLeader->PLength = sizeof(stLocalSFChangeIndicationAlt);
+ pstChangeIndication = (stLocalSFChangeIndicationAlt *)pstAddIndication;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SENDING DSC ACK TO MAC %d", pLeader->PLength);
+ *((stLocalSFChangeIndicationAlt *)&(Adapter->caDsxReqResp[LEADER_SIZE])) = *pstChangeIndication;
+ ((stLocalSFChangeIndicationAlt *)&(Adapter->caDsxReqResp[LEADER_SIZE]))->u8Type = DSC_ACK;
+ }
+ case DSC_ACK:
+ {
+ UINT uiSearchRuleIndex = 0;
+
+ pstChangeIndication = (stLocalSFChangeIndicationAlt *)pstAddIndication;
+ uiSearchRuleIndex = SearchSfid(Adapter, ntohl(pstChangeIndication->sfActiveSet.u32SFID));
+ if (uiSearchRuleIndex > NO_OF_QUEUES-1)
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "SF doesn't exist for which DSC_ACK is received");
+
+ if ((uiSearchRuleIndex < NO_OF_QUEUES)) {
+ Adapter->PackInfo[uiSearchRuleIndex].ucDirection = pstChangeIndication->u8Direction;
+ if (pstChangeIndication->sfActiveSet.bValid == TRUE)
+ Adapter->PackInfo[uiSearchRuleIndex].bActiveSet = TRUE;
+
+ if (pstChangeIndication->sfAuthorizedSet.bValid == TRUE)
+ Adapter->PackInfo[uiSearchRuleIndex].bAuthorizedSet = TRUE;
+
+ if (pstChangeIndication->sfAdmittedSet.bValid == TRUE)
+ Adapter->PackInfo[uiSearchRuleIndex].bAdmittedSet = TRUE;
+
+ if (pstChangeIndication->sfActiveSet.bValid == FALSE) {
+ Adapter->PackInfo[uiSearchRuleIndex].bActive = FALSE;
+ Adapter->PackInfo[uiSearchRuleIndex].bActivateRequestSent = FALSE;
+
+ if (pstChangeIndication->sfAdmittedSet.bValid)
+ psfLocalSet = &pstChangeIndication->sfAdmittedSet;
+ else if (pstChangeIndication->sfAuthorizedSet.bValid)
+ psfLocalSet = &pstChangeIndication->sfAuthorizedSet;
+ } else {
+ psfLocalSet = &pstChangeIndication->sfActiveSet;
+ Adapter->PackInfo[uiSearchRuleIndex].bActive = TRUE;
}
- else
- {
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_PRINTK, 0, 0, "DSC ACK did not get valid SFID");
+
+ if (!psfLocalSet) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "No set is valid\n");
+ Adapter->PackInfo[uiSearchRuleIndex].bActive = FALSE;
+ Adapter->PackInfo[uiSearchRuleIndex].bValid = FALSE;
+ Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value = 0;
+ kfree(pstAddIndication);
+ } else if (psfLocalSet->bValid && (pstChangeIndication->u8CC == 0)) {
+ Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value = ntohs(pstChangeIndication->u16VCID);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "CC field is %d bvalid = %d\n",
+ pstChangeIndication->u8CC, psfLocalSet->bValid);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "VCID= %d\n", ntohs(pstChangeIndication->u16VCID));
+ Adapter->PackInfo[uiSearchRuleIndex].usCID = ntohs(pstChangeIndication->u16CID);
+ CopyToAdapter(Adapter, psfLocalSet, uiSearchRuleIndex, DSC_ACK, pstAddIndication);
+
+ *(PULONG)(((PUCHAR)pvBuffer)+1) = psfLocalSet->u32SFID;
+ } else if (pstChangeIndication->u8CC == 6) {
+ deleteSFBySfid(Adapter, uiSearchRuleIndex);
kfree(pstAddIndication);
- return FALSE;
}
+ } else {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "DSC ACK did not get valid SFID");
+ kfree(pstAddIndication);
+ return FALSE;
}
- break;
- case DSD_REQ:
- {
- UINT uiSearchRuleIndex;
- ULONG ulSFID;
-
- pLeader->PLength = sizeof(stLocalSFDeleteIndication);
- *((stLocalSFDeleteIndication*)&(Adapter->caDsxReqResp[LEADER_SIZE])) = *((stLocalSFDeleteIndication*)pstAddIndication);
+ }
+ break;
+ case DSD_REQ:
+ {
+ UINT uiSearchRuleIndex;
+ ULONG ulSFID;
- ulSFID = ntohl(((stLocalSFDeleteIndication*)pstAddIndication)->u32SFID);
- uiSearchRuleIndex=SearchSfid(Adapter,ulSFID);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "DSD - Removing connection %x",uiSearchRuleIndex);
+ pLeader->PLength = sizeof(stLocalSFDeleteIndication);
+ *((stLocalSFDeleteIndication *)&(Adapter->caDsxReqResp[LEADER_SIZE])) = *((stLocalSFDeleteIndication *)pstAddIndication);
- if(uiSearchRuleIndex < NO_OF_QUEUES)
- {
- //Delete All Classifiers Associated with this SFID
- deleteSFBySfid(Adapter,uiSearchRuleIndex);
- Adapter->u32TotalDSD++;
- }
+ ulSFID = ntohl(((stLocalSFDeleteIndication *)pstAddIndication)->u32SFID);
+ uiSearchRuleIndex = SearchSfid(Adapter, ulSFID);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "DSD - Removing connection %x", uiSearchRuleIndex);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SENDING DSD RESPONSE TO MAC");
- ((stLocalSFDeleteIndication*)&(Adapter->caDsxReqResp[LEADER_SIZE]))->u8Type = DSD_RSP;
- CopyBufferToControlPacket(Adapter,(PVOID)Adapter->caDsxReqResp);
- }
- case DSD_RSP:
- {
- //Do nothing as SF has already got Deleted
+ if (uiSearchRuleIndex < NO_OF_QUEUES) {
+ /* Delete All Classifiers Associated with this SFID */
+ deleteSFBySfid(Adapter, uiSearchRuleIndex);
+ Adapter->u32TotalDSD++;
}
- break;
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SENDING DSD RESPONSE TO MAC");
+ ((stLocalSFDeleteIndication *)&(Adapter->caDsxReqResp[LEADER_SIZE]))->u8Type = DSD_RSP;
+ CopyBufferToControlPacket(Adapter, (PVOID)Adapter->caDsxReqResp);
+ }
+ case DSD_RSP:
+ {
+ /* Do nothing as SF has already got Deleted */
+ }
+ break;
case DSD_ACK:
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "DSD ACK Rcd, let App handle it\n");
- break;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "DSD ACK Rcd, let App handle it\n");
+ break;
default:
kfree(pstAddIndication);
- return FALSE ;
+ return FALSE;
}
return TRUE;
}
@@ -2280,78 +1918,67 @@ BOOLEAN CmControlResponseMessage(PMINI_ADAPTER Adapter, /**<Pointer to the Adap
int get_dsx_sf_data_to_application(PMINI_ADAPTER Adapter, UINT uiSFId, void __user *user_buffer)
{
int status = 0;
- struct _packet_info *psSfInfo=NULL;
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "status =%d",status);
+ struct _packet_info *psSfInfo = NULL;
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "status =%d", status);
status = SearchSfid(Adapter, uiSFId);
if (status >= NO_OF_QUEUES) {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SFID %d not present in queue !!!", uiSFId );
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SFID %d not present in queue !!!", uiSFId);
return -EINVAL;
}
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "status =%d",status);
- psSfInfo=&Adapter->PackInfo[status];
- if(psSfInfo->pstSFIndication && copy_to_user(user_buffer,
- psSfInfo->pstSFIndication, sizeof(stLocalSFAddIndicationAlt)))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "copy to user failed SFID %d, present in queue !!!", uiSFId );
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "status =%d", status);
+ psSfInfo = &Adapter->PackInfo[status];
+ if (psSfInfo->pstSFIndication && copy_to_user(user_buffer,
+ psSfInfo->pstSFIndication, sizeof(stLocalSFAddIndicationAlt))) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "copy to user failed SFID %d, present in queue !!!", uiSFId);
status = -EFAULT;
return status;
}
return STATUS_SUCCESS;
}
-VOID OverrideServiceFlowParams(PMINI_ADAPTER Adapter,PUINT puiBuffer)
+VOID OverrideServiceFlowParams(PMINI_ADAPTER Adapter, PUINT puiBuffer)
{
- B_UINT32 u32NumofSFsinMsg = ntohl(*(puiBuffer + 1));
+ B_UINT32 u32NumofSFsinMsg = ntohl(*(puiBuffer + 1));
stIM_SFHostNotify *pHostInfo = NULL;
- UINT uiSearchRuleIndex = 0;
- ULONG ulSFID = 0;
+ UINT uiSearchRuleIndex = 0;
+ ULONG ulSFID = 0;
- puiBuffer+=2;
+ puiBuffer += 2;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "u32NumofSFsinMsg: 0x%x\n", u32NumofSFsinMsg);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "u32NumofSFsinMsg: 0x%x\n",u32NumofSFsinMsg);
-
- while(u32NumofSFsinMsg != 0 && u32NumofSFsinMsg < NO_OF_QUEUES)
- {
+ while (u32NumofSFsinMsg != 0 && u32NumofSFsinMsg < NO_OF_QUEUES) {
u32NumofSFsinMsg--;
pHostInfo = (stIM_SFHostNotify *)puiBuffer;
puiBuffer = (PUINT)(pHostInfo + 1);
ulSFID = ntohl(pHostInfo->SFID);
- uiSearchRuleIndex=SearchSfid(Adapter,ulSFID);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"SFID: 0x%lx\n",ulSFID);
+ uiSearchRuleIndex = SearchSfid(Adapter, ulSFID);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SFID: 0x%lx\n", ulSFID);
- if(uiSearchRuleIndex >= NO_OF_QUEUES || uiSearchRuleIndex == HiPriority)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"The SFID <%lx> doesn't exist in host entry or is Invalid\n", ulSFID);
+ if (uiSearchRuleIndex >= NO_OF_QUEUES || uiSearchRuleIndex == HiPriority) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "The SFID <%lx> doesn't exist in host entry or is Invalid\n", ulSFID);
continue;
}
- if(pHostInfo->RetainSF == FALSE)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Going to Delete SF");
- deleteSFBySfid(Adapter,uiSearchRuleIndex);
- }
- else
- {
-
+ if (pHostInfo->RetainSF == FALSE) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Going to Delete SF");
+ deleteSFBySfid(Adapter, uiSearchRuleIndex);
+ } else {
Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value = ntohs(pHostInfo->VCID);
Adapter->PackInfo[uiSearchRuleIndex].usCID = ntohs(pHostInfo->newCID);
- Adapter->PackInfo[uiSearchRuleIndex].bActive=FALSE;
+ Adapter->PackInfo[uiSearchRuleIndex].bActive = FALSE;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"pHostInfo->QoSParamSet: 0x%x\n",pHostInfo->QoSParamSet);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "pHostInfo->QoSParamSet: 0x%x\n", pHostInfo->QoSParamSet);
- if(pHostInfo->QoSParamSet & 0x1)
- Adapter->PackInfo[uiSearchRuleIndex].bAuthorizedSet =TRUE;
- if(pHostInfo->QoSParamSet & 0x2)
- Adapter->PackInfo[uiSearchRuleIndex].bAdmittedSet =TRUE;
- if(pHostInfo->QoSParamSet & 0x4)
- {
- Adapter->PackInfo[uiSearchRuleIndex].bActiveSet =TRUE;
- Adapter->PackInfo[uiSearchRuleIndex].bActive=TRUE;
+ if (pHostInfo->QoSParamSet & 0x1)
+ Adapter->PackInfo[uiSearchRuleIndex].bAuthorizedSet = TRUE;
+ if (pHostInfo->QoSParamSet & 0x2)
+ Adapter->PackInfo[uiSearchRuleIndex].bAdmittedSet = TRUE;
+ if (pHostInfo->QoSParamSet & 0x4) {
+ Adapter->PackInfo[uiSearchRuleIndex].bActiveSet = TRUE;
+ Adapter->PackInfo[uiSearchRuleIndex].bActive = TRUE;
}
}
}
}
-
-
-
diff --git a/drivers/staging/bcm/led_control.h b/drivers/staging/bcm/led_control.h
index 0711ac20f6fc..ed8fbc091115 100644
--- a/drivers/staging/bcm/led_control.h
+++ b/drivers/staging/bcm/led_control.h
@@ -4,11 +4,11 @@
/*************************TYPE DEF**********************/
#define NUM_OF_LEDS 4
-#define DSD_START_OFFSET 0x0200
-#define EEPROM_VERSION_OFFSET 0x020E
-#define EEPROM_HW_PARAM_POINTER_ADDRESS 0x0218
-#define EEPROM_HW_PARAM_POINTER_ADDRRES_MAP5 0x0220
-#define GPIO_SECTION_START_OFFSET 0x03
+#define DSD_START_OFFSET 0x0200
+#define EEPROM_VERSION_OFFSET 0x020E
+#define EEPROM_HW_PARAM_POINTER_ADDRESS 0x0218
+#define EEPROM_HW_PARAM_POINTER_ADDRRES_MAP5 0x0220
+#define GPIO_SECTION_START_OFFSET 0x03
#define COMPATIBILITY_SECTION_LENGTH 42
#define COMPATIBILITY_SECTION_LENGTH_MAP5 84
@@ -18,27 +18,27 @@
#define EEPROM_MAP5_MINORVERSION 0
-#define MAX_NUM_OF_BLINKS 10
-#define NUM_OF_GPIO_PINS 16
+#define MAX_NUM_OF_BLINKS 10
+#define NUM_OF_GPIO_PINS 16
-#define DISABLE_GPIO_NUM 0xFF
-#define EVENT_SIGNALED 1
+#define DISABLE_GPIO_NUM 0xFF
+#define EVENT_SIGNALED 1
-#define MAX_FILE_NAME_BUFFER_SIZE 100
+#define MAX_FILE_NAME_BUFFER_SIZE 100
-#define TURN_ON_LED(GPIO, index) do{ \
+#define TURN_ON_LED(GPIO, index) do { \
UINT gpio_val = GPIO; \
(Adapter->LEDInfo.LEDState[index].BitPolarity == 1) ? \
- wrmaltWithLock(Adapter,BCM_GPIO_OUTPUT_SET_REG, &gpio_val ,sizeof(gpio_val)) : \
- wrmaltWithLock(Adapter,BCM_GPIO_OUTPUT_CLR_REG, &gpio_val, sizeof(gpio_val)); \
- }while(0);
+ wrmaltWithLock(Adapter, BCM_GPIO_OUTPUT_SET_REG, &gpio_val, sizeof(gpio_val)) : \
+ wrmaltWithLock(Adapter, BCM_GPIO_OUTPUT_CLR_REG, &gpio_val, sizeof(gpio_val)); \
+ } while (0);
#define TURN_OFF_LED(GPIO, index) do { \
UINT gpio_val = GPIO; \
(Adapter->LEDInfo.LEDState[index].BitPolarity == 1) ? \
- wrmaltWithLock(Adapter,BCM_GPIO_OUTPUT_CLR_REG,&gpio_val ,sizeof(gpio_val)) : \
- wrmaltWithLock(Adapter,BCM_GPIO_OUTPUT_SET_REG,&gpio_val ,sizeof(gpio_val)); \
- }while(0);
+ wrmaltWithLock(Adapter, BCM_GPIO_OUTPUT_CLR_REG, &gpio_val, sizeof(gpio_val)) : \
+ wrmaltWithLock(Adapter, BCM_GPIO_OUTPUT_SET_REG, &gpio_val, sizeof(gpio_val)); \
+ } while (0);
#define B_ULONG32 unsigned long
@@ -50,7 +50,7 @@ typedef enum _LEDColors{
BLUE_LED = 2,
YELLOW_LED = 3,
GREEN_LED = 4
-} LEDColors; /*Enumerated values of different LED types*/
+} LEDColors; /*Enumerated values of different LED types*/
typedef enum LedEvents {
SHUTDOWN_EXIT = 0x00,
@@ -62,43 +62,39 @@ typedef enum LedEvents {
LOWPOWER_MODE_ENTER = 0x20,
IDLEMODE_CONTINUE = 0x40,
IDLEMODE_EXIT = 0x80,
- LED_THREAD_INACTIVE = 0x100, //Makes the LED thread Inactivce. It wil be equivallent to putting the thread on hold.
- LED_THREAD_ACTIVE = 0x200 //Makes the LED Thread Active back.
-} LedEventInfo_t; /*Enumerated values of different driver states*/
-
-#define DRIVER_HALT 0xff
-
-
-/*Structure which stores the information of different LED types
- * and corresponding LED state information of driver states*/
-typedef struct LedStateInfo_t
-{
+ LED_THREAD_INACTIVE = 0x100, /* Makes the LED thread Inactivce. It wil be equivallent to putting the thread on hold. */
+ LED_THREAD_ACTIVE = 0x200, /* Makes the LED Thread Active back. */
+ DRIVER_HALT = 0xff
+} LedEventInfo_t; /* Enumerated values of different driver states */
+
+/*
+ * Structure which stores the information of different LED types
+ * and corresponding LED state information of driver states
+ */
+typedef struct LedStateInfo_t {
UCHAR LED_Type; /* specify GPIO number - use 0xFF if not used */
UCHAR LED_On_State; /* Bits set or reset for different states */
UCHAR LED_Blink_State; /* Bits set or reset for blinking LEDs for different states */
UCHAR GPIO_Num;
- UCHAR BitPolarity; /*To represent whether H/W is normal polarity or reverse
- polarity*/
-}LEDStateInfo, *pLEDStateInfo;
+ UCHAR BitPolarity; /* To represent whether H/W is normal polarity or reverse polarity */
+} LEDStateInfo, *pLEDStateInfo;
-typedef struct _LED_INFO_STRUCT
-{
+typedef struct _LED_INFO_STRUCT {
LEDStateInfo LEDState[NUM_OF_LEDS];
- BOOLEAN bIdleMode_tx_from_host; /*Variable to notify whether driver came out
- from idlemode due to Host or target*/
+ BOOLEAN bIdleMode_tx_from_host; /* Variable to notify whether driver came out from idlemode due to Host or target*/
BOOLEAN bIdle_led_off;
wait_queue_head_t notify_led_event;
wait_queue_head_t idleModeSyncEvent;
- struct task_struct *led_cntrl_threadid;
- int led_thread_running;
+ struct task_struct *led_cntrl_threadid;
+ int led_thread_running;
BOOLEAN bLedInitDone;
} LED_INFO_STRUCT, *PLED_INFO_STRUCT;
-//LED Thread state.
-#define BCM_LED_THREAD_DISABLED 0 //LED Thread is not running.
-#define BCM_LED_THREAD_RUNNING_ACTIVELY 1 //LED thread is running.
-#define BCM_LED_THREAD_RUNNING_INACTIVELY 2 //LED thread has been put on hold
+/* LED Thread state. */
+#define BCM_LED_THREAD_DISABLED 0 /* LED Thread is not running. */
+#define BCM_LED_THREAD_RUNNING_ACTIVELY 1 /* LED thread is running. */
+#define BCM_LED_THREAD_RUNNING_INACTIVELY 2 /*LED thread has been put on hold*/
diff --git a/drivers/staging/comedi/Kconfig b/drivers/staging/comedi/Kconfig
index 4c77e508066b..12c691d90900 100644
--- a/drivers/staging/comedi/Kconfig
+++ b/drivers/staging/comedi/Kconfig
@@ -765,8 +765,9 @@ config COMEDI_ADV_PCI_DIO
default N
---help---
Enable support for Advantech PCI DIO cards
- PCI-1730, PCI-1733, PCI-1734, PCI-1736UP, PCI-1750, PCI-1751,
- PCI-1752, PCI-1753/E, PCI-1754, PCI-1756 and PCI-1762
+ PCI-1730, PCI-1733, PCI-1734, PCI-1735U, PCI-1736UP, PCI-1739U,
+ PCI-1750, PCI-1751, PCI-1752, PCI-1753/E, PCI-1754, PCI-1756,
+ PCI-1760 and PCI-1762
To compile this driver as a module, choose M here: the module will be
called adv_pci_dio.
diff --git a/drivers/staging/comedi/drivers/adv_pci_dio.c b/drivers/staging/comedi/drivers/adv_pci_dio.c
index 537e58534275..7af068f4a749 100644
--- a/drivers/staging/comedi/drivers/adv_pci_dio.c
+++ b/drivers/staging/comedi/drivers/adv_pci_dio.c
@@ -8,16 +8,16 @@
/*
Driver: adv_pci_dio
Description: Advantech PCI-1730, PCI-1733, PCI-1734, PCI-1735U,
- PCI-1736UP, PCI-1750, PCI-1751, PCI-1752, PCI-1753/E,
- PCI-1754, PCI-1756, PCI-1762
+ PCI-1736UP, PCI-1739U, PCI-1750, PCI-1751, PCI-1752,
+ PCI-1753/E, PCI-1754, PCI-1756, PCI-1760, PCI-1762
Author: Michal Dobes <dobes@tesnet.cz>
Devices: [Advantech] PCI-1730 (adv_pci_dio), PCI-1733,
- PCI-1734, PCI-1735U, PCI-1736UP, PCI-1750,
+ PCI-1734, PCI-1735U, PCI-1736UP, PCI-1739U, PCI-1750,
PCI-1751, PCI-1752, PCI-1753,
PCI-1753+PCI-1753E, PCI-1754, PCI-1756,
PCI-1760, PCI-1762
Status: untested
-Updated: Tue, 04 May 2010 13:00:00 +0000
+Updated: Mon, 09 Jan 2012 12:40:46 +0000
This driver supports now only insn interface for DI/DO/DIO.
@@ -51,6 +51,7 @@ Configuration options:
/* hardware types of the cards */
enum hw_cards_id {
TYPE_PCI1730, TYPE_PCI1733, TYPE_PCI1734, TYPE_PCI1735, TYPE_PCI1736,
+ TYPE_PCI1739,
TYPE_PCI1750,
TYPE_PCI1751,
TYPE_PCI1752,
@@ -109,6 +110,12 @@ enum hw_io_access {
#define PCI1736_BOARDID 4 /* R: Board I/D switch for 1736UP */
#define PCI1736_MAINREG 0 /* Normal register (2) doesn't work */
+/* Advantech PCI-1739U */
+#define PCI1739_DIO 0 /* R/W: begin of 8255 registers block */
+#define PCI1739_ICR 32 /* W: Interrupt control register */
+#define PCI1739_ISR 32 /* R: Interrupt status register */
+#define PCI1739_BOARDID 8 /* R: Board I/D switch for 1739U */
+
/* Advantech PCI-1750 */
#define PCI1750_IDI 0 /* R: Isolated digital input 0-15 */
#define PCI1750_IDO 0 /* W: Isolated digital output 0-15 */
@@ -262,6 +269,7 @@ static DEFINE_PCI_DEVICE_TABLE(pci_dio_pci_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_ADVANTECH, 0x1734) },
{ PCI_DEVICE(PCI_VENDOR_ID_ADVANTECH, 0x1735) },
{ PCI_DEVICE(PCI_VENDOR_ID_ADVANTECH, 0x1736) },
+ { PCI_DEVICE(PCI_VENDOR_ID_ADVANTECH, 0x1739) },
{ PCI_DEVICE(PCI_VENDOR_ID_ADVANTECH, 0x1750) },
{ PCI_DEVICE(PCI_VENDOR_ID_ADVANTECH, 0x1751) },
{ PCI_DEVICE(PCI_VENDOR_ID_ADVANTECH, 0x1752) },
@@ -316,6 +324,14 @@ static const struct dio_boardtype boardtypes[] = {
{4, PCI1736_BOARDID, 1, SDF_INTERNAL},
{ {0, 0, 0, 0} },
IO_8b},
+ {"pci1739", PCI_VENDOR_ID_ADVANTECH, 0x1739, PCIDIO_MAINREG,
+ TYPE_PCI1739,
+ { {0, 0, 0, 0}, {0, 0, 0, 0} },
+ { {0, 0, 0, 0}, {0, 0, 0, 0} },
+ { {48, PCI1739_DIO, 2, 0}, {0, 0, 0, 0} },
+ {0, 0, 0, 0},
+ { {0, 0, 0, 0} },
+ IO_8b},
{"pci1750", PCI_VENDOR_ID_ADVANTECH, 0x1750, PCIDIO_MAINREG,
TYPE_PCI1750,
{ {0, 0, 0, 0}, {16, PCI1750_IDI, 2, 0} },
@@ -883,6 +899,11 @@ static int pci_dio_reset(struct comedi_device *dev)
outb(0, dev->iobase + PCI1736_3_INT_RF);
break;
+ case TYPE_PCI1739:
+ /* disable & clear interrupts */
+ outb(0x88, dev->iobase + PCI1739_ICR);
+ break;
+
case TYPE_PCI1750:
case TYPE_PCI1751:
/* disable & clear interrupts */
diff --git a/drivers/staging/comedi/drivers/dt2801.c b/drivers/staging/comedi/drivers/dt2801.c
index 5cce1b5f4484..b85c8366a396 100644
--- a/drivers/staging/comedi/drivers/dt2801.c
+++ b/drivers/staging/comedi/drivers/dt2801.c
@@ -720,12 +720,20 @@ static int dt2801_dio_insn_config(struct comedi_device *dev,
which = 1;
/* configure */
- if (data[0]) {
+ switch (data[0]) {
+ case INSN_CONFIG_DIO_OUTPUT:
s->io_bits = 0xff;
dt2801_writecmd(dev, DT_C_SET_DIGOUT);
- } else {
+ break;
+ case INSN_CONFIG_DIO_INPUT:
s->io_bits = 0;
dt2801_writecmd(dev, DT_C_SET_DIGIN);
+ break;
+ case INSN_CONFIG_DIO_QUERY:
+ data[1] = s->io_bits ? COMEDI_OUTPUT : COMEDI_INPUT;
+ return insn->n;
+ default:
+ return -EINVAL;
}
dt2801_writedata(dev, which);
diff --git a/drivers/staging/comedi/drivers/dt9812.c b/drivers/staging/comedi/drivers/dt9812.c
index 32d9c42e9659..e86ab5862895 100644
--- a/drivers/staging/comedi/drivers/dt9812.c
+++ b/drivers/staging/comedi/drivers/dt9812.c
@@ -527,7 +527,7 @@ static void dt9812_configure_gain(struct usb_dt9812 *dev,
* 11x -> Gain = 0.5
*/
case DT9812_GAIN_0PT5:
- rmw->or_value = F020_MASK_ADC0CF_AMP0GN2 ||
+ rmw->or_value = F020_MASK_ADC0CF_AMP0GN2 |
F020_MASK_ADC0CF_AMP0GN1;
break;
case DT9812_GAIN_1:
@@ -540,7 +540,7 @@ static void dt9812_configure_gain(struct usb_dt9812 *dev,
rmw->or_value = F020_MASK_ADC0CF_AMP0GN1;
break;
case DT9812_GAIN_8:
- rmw->or_value = F020_MASK_ADC0CF_AMP0GN1 ||
+ rmw->or_value = F020_MASK_ADC0CF_AMP0GN1 |
F020_MASK_ADC0CF_AMP0GN0;
break;
case DT9812_GAIN_16:
diff --git a/drivers/staging/comedi/drivers/me4000.c b/drivers/staging/comedi/drivers/me4000.c
index b692fea0d2b0..b0bc6bb877ab 100644
--- a/drivers/staging/comedi/drivers/me4000.c
+++ b/drivers/staging/comedi/drivers/me4000.c
@@ -2098,23 +2098,29 @@ static int me4000_dio_insn_config(struct comedi_device *dev,
CALL_PDEBUG("In me4000_dio_insn_config()\n");
- if (data[0] == INSN_CONFIG_DIO_QUERY) {
+ switch (data[0]) {
+ default:
+ return -EINVAL;
+ case INSN_CONFIG_DIO_QUERY:
data[1] =
(s->io_bits & (1 << chan)) ? COMEDI_OUTPUT : COMEDI_INPUT;
return insn->n;
+ case INSN_CONFIG_DIO_INPUT:
+ case INSN_CONFIG_DIO_OUTPUT:
+ break;
}
/*
* The input or output configuration of each digital line is
* configured by a special insn_config instruction. chanspec
* contains the channel to be changed, and data[0] contains the
- * value COMEDI_INPUT or COMEDI_OUTPUT.
+ * value INSN_CONFIG_DIO_INPUT or INSN_CONFIG_DIO_OUTPUT.
* On the ME-4000 it is only possible to switch port wise (8 bit)
*/
tmp = me4000_inl(dev, info->dio_context.ctrl_reg);
- if (data[0] == COMEDI_OUTPUT) {
+ if (data[0] == INSN_CONFIG_DIO_OUTPUT) {
if (chan < 8) {
s->io_bits |= 0xFF;
tmp &= ~(ME4000_DIO_CTRL_BIT_MODE_0 |
diff --git a/drivers/staging/comedi/drivers/ni_pcidio.c b/drivers/staging/comedi/drivers/ni_pcidio.c
index 045a4c00f346..1df8fcbcd108 100644
--- a/drivers/staging/comedi/drivers/ni_pcidio.c
+++ b/drivers/staging/comedi/drivers/ni_pcidio.c
@@ -30,7 +30,7 @@ Status: works
Devices: [National Instruments] PCI-DIO-32HS (ni_pcidio), PXI-6533,
PCI-DIO-96, PCI-DIO-96B, PXI-6508, PCI-6503, PCI-6503B, PCI-6503X,
PXI-6503, PCI-6533, PCI-6534
-Updated: Sun, 21 Apr 2002 21:03:38 -0700
+Updated: Mon, 09 Jan 2012 14:27:23 +0000
The DIO-96 appears as four 8255 subdevices. See the 8255
driver notes for details.
@@ -42,6 +42,11 @@ supports simple digital I/O; no handshaking is supported.
DMA mostly works for the PCI-DIO32HS, but only in timed input mode.
+The PCI-DIO-32HS/PCI-6533 has a configurable external trigger. Setting
+scan_begin_arg to 0 or CR_EDGE triggers on the leading edge. Setting
+scan_begin_arg to CR_INVERT or (CR_EDGE | CR_INVERT) triggers on the
+trailing edge.
+
This driver could be easily modified to support AT-MIO32HS and
AT-MIO96.
@@ -436,6 +441,7 @@ static int ni_pcidio_request_di_mite_channel(struct comedi_device *dev)
comedi_error(dev, "failed to reserve mite dma channel.");
return -EBUSY;
}
+ devpriv->di_mite_chan->dir = COMEDI_INPUT;
writeb(primary_DMAChannel_bits(devpriv->di_mite_chan->channel) |
secondary_DMAChannel_bits(devpriv->di_mite_chan->channel),
devpriv->mite->daq_io_addr + DMA_Line_Control_Group1);
@@ -482,6 +488,21 @@ void ni_pcidio_event(struct comedi_device *dev, struct comedi_subdevice *s)
comedi_event(dev, s);
}
+static int ni_pcidio_poll(struct comedi_device *dev, struct comedi_subdevice *s)
+{
+ unsigned long irq_flags;
+ int count;
+
+ spin_lock_irqsave(&dev->spinlock, irq_flags);
+ spin_lock(&devpriv->mite_channel_lock);
+ if (devpriv->di_mite_chan)
+ mite_sync_input_dma(devpriv->di_mite_chan, s->async);
+ spin_unlock(&devpriv->mite_channel_lock);
+ count = s->async->buf_write_count - s->async->buf_read_count;
+ spin_unlock_irqrestore(&dev->spinlock, irq_flags);
+ return count;
+}
+
static irqreturn_t nidio_interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
@@ -497,7 +518,6 @@ static irqreturn_t nidio_interrupt(int irq, void *d)
int status;
int work = 0;
unsigned int m_status = 0;
- unsigned long irq_flags;
/* interrupcions parasites */
if (dev->attached == 0) {
@@ -505,6 +525,9 @@ static irqreturn_t nidio_interrupt(int irq, void *d)
return IRQ_NONE;
}
+ /* Lock to avoid race with comedi_poll */
+ spin_lock(&dev->spinlock);
+
status = readb(devpriv->mite->daq_io_addr +
Interrupt_And_Window_Status);
flags = readb(devpriv->mite->daq_io_addr + Group_1_Flags);
@@ -518,7 +541,7 @@ static irqreturn_t nidio_interrupt(int irq, void *d)
/* printk("buf[4096]=%08x\n",
*(unsigned int *)(async->prealloc_buf+4096)); */
- spin_lock_irqsave(&devpriv->mite_channel_lock, irq_flags);
+ spin_lock(&devpriv->mite_channel_lock);
if (devpriv->di_mite_chan)
m_status = mite_get_status(devpriv->di_mite_chan);
#ifdef MITE_DEBUG
@@ -543,7 +566,7 @@ static irqreturn_t nidio_interrupt(int irq, void *d)
disable_irq(dev->irq);
}
}
- spin_unlock_irqrestore(&devpriv->mite_channel_lock, irq_flags);
+ spin_unlock(&devpriv->mite_channel_lock);
while (status & DataLeft) {
work++;
@@ -645,6 +668,8 @@ out:
Master_DMA_And_Interrupt_Control);
}
#endif
+
+ spin_unlock(&dev->spinlock);
return IRQ_HANDLED;
}
@@ -825,8 +850,8 @@ static int ni_pcidio_cmdtest(struct comedi_device *dev,
} else {
/* TRIG_EXT */
/* should be level/edge, hi/lo specification here */
- if (cmd->scan_begin_arg != 0) {
- cmd->scan_begin_arg = 0;
+ if ((cmd->scan_begin_arg & ~(CR_EDGE | CR_INVERT)) != 0) {
+ cmd->scan_begin_arg &= (CR_EDGE | CR_INVERT);
err++;
}
}
@@ -941,7 +966,13 @@ static int ni_pcidio_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
writeb(0, devpriv->mite->daq_io_addr + Sequence);
writeb(0x00, devpriv->mite->daq_io_addr + ReqReg);
writeb(4, devpriv->mite->daq_io_addr + BlockMode);
- writeb(0, devpriv->mite->daq_io_addr + LinePolarities);
+ if (!(cmd->scan_begin_arg & CR_INVERT)) {
+ /* Leading Edge pulse mode */
+ writeb(0, devpriv->mite->daq_io_addr + LinePolarities);
+ } else {
+ /* Trailing Edge pulse mode */
+ writeb(2, devpriv->mite->daq_io_addr + LinePolarities);
+ }
writeb(0x00, devpriv->mite->daq_io_addr + AckSer);
writel(1, devpriv->mite->daq_io_addr + StartDelay);
writeb(1, devpriv->mite->daq_io_addr + ReqDelay);
@@ -1005,17 +1036,24 @@ static int ni_pcidio_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
static int setup_mite_dma(struct comedi_device *dev, struct comedi_subdevice *s)
{
int retval;
+ unsigned long flags;
retval = ni_pcidio_request_di_mite_channel(dev);
if (retval)
return retval;
- devpriv->di_mite_chan->dir = COMEDI_INPUT;
+ /* write alloc the entire buffer */
+ comedi_buf_write_alloc(s->async, s->async->prealloc_bufsz);
- mite_prep_dma(devpriv->di_mite_chan, 32, 32);
+ spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
+ if (devpriv->di_mite_chan) {
+ mite_prep_dma(devpriv->di_mite_chan, 32, 32);
+ mite_dma_arm(devpriv->di_mite_chan);
+ } else
+ retval = -EIO;
+ spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
- mite_dma_arm(devpriv->di_mite_chan);
- return 0;
+ return retval;
}
static int ni_pcidio_inttrig(struct comedi_device *dev,
@@ -1244,6 +1282,7 @@ static int nidio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
s->len_chanlist = 32; /* XXX */
s->buf_change = &ni_pcidio_change;
s->async_dma_dir = DMA_BIDIRECTIONAL;
+ s->poll = &ni_pcidio_poll;
writel(0, devpriv->mite->daq_io_addr + Port_IO(0));
writel(0, devpriv->mite->daq_io_addr + Port_Pin_Directions(0));
diff --git a/drivers/staging/comedi/drivers/ni_pcimio.c b/drivers/staging/comedi/drivers/ni_pcimio.c
index 0f0d995f137c..27baefa32b17 100644
--- a/drivers/staging/comedi/drivers/ni_pcimio.c
+++ b/drivers/staging/comedi/drivers/ni_pcimio.c
@@ -29,14 +29,15 @@ Devices: [National Instruments] PCI-MIO-16XE-50 (ni_pcimio),
PCI-MIO-16XE-10, PXI-6030E, PCI-MIO-16E-1, PCI-MIO-16E-4, PCI-6014, PCI-6040E,
PXI-6040E, PCI-6030E, PCI-6031E, PCI-6032E, PCI-6033E, PCI-6071E, PCI-6023E,
PCI-6024E, PCI-6025E, PXI-6025E, PCI-6034E, PCI-6035E, PCI-6052E,
- PCI-6110, PCI-6111, PCI-6220, PCI-6221, PCI-6224, PXI-6224, PCI-6225, PXI-6225,
- PCI-6229, PCI-6250, PCI-6251, PCIe-6251, PCI-6254, PCI-6259, PCIe-6259,
+ PCI-6110, PCI-6111, PCI-6220, PCI-6221, PCI-6224, PXI-6224,
+ PCI-6225, PXI-6225, PCI-6229, PCI-6250, PCI-6251, PCIe-6251, PXIe-6251,
+ PCI-6254, PCI-6259, PCIe-6259,
PCI-6280, PCI-6281, PXI-6281, PCI-6284, PCI-6289,
PCI-6711, PXI-6711, PCI-6713, PXI-6713,
PXI-6071E, PCI-6070E, PXI-6070E,
PXI-6052E, PCI-6036E, PCI-6731, PCI-6733, PXI-6733,
PCI-6143, PXI-6143
-Updated: Wed, 03 Dec 2008 10:51:47 +0000
+Updated: Mon, 09 Jan 2012 14:52:48 +0000
These boards are almost identical to the AT-MIO E series, except that
they use the PCI bus instead of ISA (i.e., AT). See the notes for
@@ -182,6 +183,7 @@ static DEFINE_PCI_DEVICE_TABLE(ni_pci_table) = {
{PCI_DEVICE(PCI_VENDOR_ID_NI, 0x717f)},
{PCI_DEVICE(PCI_VENDOR_ID_NI, 0x71bc)},
{PCI_DEVICE(PCI_VENDOR_ID_NI, 0x717d)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x72e8)},
{0}
};
@@ -1046,6 +1048,25 @@ static const struct ni_board_struct ni_boards[] = {
.has_8255 = 0,
},
{
+ .device_id = 0x72e8,
+ .name = "pxie-6251",
+ .n_adchan = 16,
+ .adbits = 16,
+ .ai_fifo_depth = 4095,
+ .gainlkup = ai_gain_628x,
+ .ai_speed = 800,
+ .n_aochan = 2,
+ .aobits = 16,
+ .ao_fifo_depth = 8191,
+ .ao_range_table = &range_ni_M_625x_ao,
+ .reg_type = ni_reg_625x,
+ .ao_unipolar = 0,
+ .ao_speed = 357,
+ .num_p0_dio_channels = 8,
+ .caldac = {caldac_none},
+ .has_8255 = 0,
+ },
+ {
.device_id = 0x70b7,
.name = "pci-6254",
.n_adchan = 32,
diff --git a/drivers/staging/comedi/drivers/unioxx5.c b/drivers/staging/comedi/drivers/unioxx5.c
index 89e62aa134b0..f45824f0d86b 100644
--- a/drivers/staging/comedi/drivers/unioxx5.c
+++ b/drivers/staging/comedi/drivers/unioxx5.c
@@ -306,7 +306,7 @@ static int __unioxx5_subdev_init(struct comedi_subdevice *subdev,
usp = kzalloc(sizeof(*usp), GFP_KERNEL);
if (usp == NULL) {
- printk(KERN_ERR "comedi%d: erorr! --> out of memory!\n", minor);
+ printk(KERN_ERR "comedi%d: error! --> out of memory!\n", minor);
return -1;
}
diff --git a/drivers/staging/comedi/drivers/usbduxsigma.c b/drivers/staging/comedi/drivers/usbduxsigma.c
index ca6bcf8b0231..63c9b6dbc317 100644
--- a/drivers/staging/comedi/drivers/usbduxsigma.c
+++ b/drivers/staging/comedi/drivers/usbduxsigma.c
@@ -39,7 +39,7 @@ Status: testing
*
*
* Revision history:
- * 0.1: inital version
+ * 0.1: initial version
* 0.2: all basic functions implemented, digital I/O only for one port
* 0.3: proper vendor ID and driver name
* 0.4: fixed D/A voltage range
@@ -235,16 +235,16 @@ struct usbduxsub {
short int ao_cmd_running;
/* pwm is running */
short int pwm_cmd_running;
- /* continous aquisition */
- short int ai_continous;
- short int ao_continous;
+ /* continuous acquisition */
+ short int ai_continuous;
+ short int ao_continuous;
/* number of samples to acquire */
int ai_sample_count;
int ao_sample_count;
/* time between samples in units of the timer */
unsigned int ai_timer;
unsigned int ao_timer;
- /* counter between aquisitions */
+ /* counter between acquisitions */
unsigned int ai_counter;
unsigned int ao_counter;
/* interval in frames/uframes */
@@ -455,8 +455,8 @@ static void usbduxsub_ai_IsocIrq(struct urb *urb)
this_usbduxsub->ai_counter = this_usbduxsub->ai_timer;
/* test, if we transmit only a fixed number of samples */
- if (!(this_usbduxsub->ai_continous)) {
- /* not continous, fixed number of samples */
+ if (!(this_usbduxsub->ai_continuous)) {
+ /* not continuous, fixed number of samples */
this_usbduxsub->ai_sample_count--;
/* all samples received? */
if (this_usbduxsub->ai_sample_count < 0) {
@@ -607,8 +607,8 @@ static void usbduxsub_ao_IsocIrq(struct urb *urb)
/* timer zero */
this_usbduxsub->ao_counter = this_usbduxsub->ao_timer;
- /* handle non continous aquisition */
- if (!(this_usbduxsub->ao_continous)) {
+ /* handle non continuous acquisition */
+ if (!(this_usbduxsub->ao_continuous)) {
/* fixed number of samples */
this_usbduxsub->ao_sample_count--;
if (this_usbduxsub->ao_sample_count < 0) {
@@ -925,7 +925,7 @@ static int usbdux_ai_cmdtest(struct comedi_device *dev,
if (!cmd->scan_begin_src || tmp != cmd->scan_begin_src)
err++;
- /* scanning is continous */
+ /* scanning is continuous */
tmp = cmd->convert_src;
cmd->convert_src &= TRIG_NOW;
if (!cmd->convert_src || tmp != cmd->convert_src)
@@ -1193,7 +1193,7 @@ static int usbdux_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
up(&this_usbduxsub->sem);
return -EBUSY;
}
- /* set current channel of the running aquisition to zero */
+ /* set current channel of the running acquisition to zero */
s->async->cur_chan = 0;
/* first the number of channels per time step */
@@ -1261,10 +1261,10 @@ static int usbdux_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
if (cmd->stop_src == TRIG_COUNT) {
/* data arrives as one packet */
this_usbduxsub->ai_sample_count = cmd->stop_arg;
- this_usbduxsub->ai_continous = 0;
+ this_usbduxsub->ai_continuous = 0;
} else {
- /* continous aquisition */
- this_usbduxsub->ai_continous = 1;
+ /* continuous acquisition */
+ this_usbduxsub->ai_continuous = 1;
this_usbduxsub->ai_sample_count = 0;
}
@@ -1586,7 +1586,7 @@ static int usbdux_ao_cmdtest(struct comedi_device *dev,
/* just now we scan also in the high speed mode every frame */
/* this is due to ehci driver limitations */
if (0) { /* (this_usbduxsub->high_speed) */
- /* start immidiately a new scan */
+ /* start immediately a new scan */
/* the sampling rate is set by the coversion rate */
cmd->scan_begin_src &= TRIG_FOLLOW;
} else {
@@ -1596,7 +1596,7 @@ static int usbdux_ao_cmdtest(struct comedi_device *dev,
if (!cmd->scan_begin_src || tmp != cmd->scan_begin_src)
err++;
- /* scanning is continous */
+ /* scanning is continuous */
tmp = cmd->convert_src;
/* all conversion events happen simultaneously */
@@ -1710,7 +1710,7 @@ static int usbdux_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
dev_dbg(&this_usbduxsub->interface->dev,
"comedi%d: %s\n", dev->minor, __func__);
- /* set current channel of the running aquisition to zero */
+ /* set current channel of the running acquisition to zero */
s->async->cur_chan = 0;
for (i = 0; i < cmd->chanlist_len; ++i) {
chan = CR_CHAN(cmd->chanlist[i]);
@@ -1759,7 +1759,7 @@ static int usbdux_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
this_usbduxsub->ao_counter = this_usbduxsub->ao_timer;
if (cmd->stop_src == TRIG_COUNT) {
- /* not continous */
+ /* not continuous */
/* counter */
/* high speed also scans everything at once */
if (0) { /* (this_usbduxsub->high_speed) */
@@ -1771,10 +1771,10 @@ static int usbdux_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
/* data arrives as one packet */
this_usbduxsub->ao_sample_count = cmd->stop_arg;
}
- this_usbduxsub->ao_continous = 0;
+ this_usbduxsub->ao_continuous = 0;
} else {
- /* continous aquisition */
- this_usbduxsub->ao_continous = 1;
+ /* continuous acquisition */
+ this_usbduxsub->ao_continuous = 1;
this_usbduxsub->ao_sample_count = 0;
}
diff --git a/drivers/staging/crystalhd/bc_dts_glob_lnx.h b/drivers/staging/crystalhd/bc_dts_glob_lnx.h
index bbe5119761fa..fd1a6e680c8a 100644
--- a/drivers/staging/crystalhd/bc_dts_glob_lnx.h
+++ b/drivers/staging/crystalhd/bc_dts_glob_lnx.h
@@ -48,8 +48,7 @@
#endif
-#include "bc_dts_defs.h"
-#include "bcm_70012_regs.h" /* Link Register defs */
+#include "crystalhd.h"
#define CRYSTALHD_API_NAME "crystalhd"
#define CRYSTALHD_API_DEV_NAME "/dev/crystalhd"
diff --git a/drivers/staging/crystalhd/bc_dts_types.h b/drivers/staging/crystalhd/bc_dts_types.h
deleted file mode 100644
index 1085a91221b8..000000000000
--- a/drivers/staging/crystalhd/bc_dts_types.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/********************************************************************
- * Copyright(c) 2006-2009 Broadcom Corporation.
- *
- * Name: bc_dts_types.h
- *
- * Description: Data types
- *
- * AU
- *
- * HISTORY:
- *
- ********************************************************************
- * This header is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published
- * by the Free Software Foundation, either version 2.1 of the License.
- *
- * This header is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- * You should have received a copy of the GNU Lesser General Public License
- * along with this header. If not, see <http://www.gnu.org/licenses/>.
- *******************************************************************/
-
-#ifndef _BC_DTS_TYPES_H_
-#define _BC_DTS_TYPES_H_
-
-#include <stdint.h>
-
-#ifndef TRUE
- #define TRUE 1
-#endif
-
-#ifndef FALSE
- #define FALSE 0
-#endif
-
-#define TEXT
-
-#endif
diff --git a/drivers/staging/crystalhd/crystalhd.h b/drivers/staging/crystalhd/crystalhd.h
new file mode 100644
index 000000000000..3f4d79515026
--- /dev/null
+++ b/drivers/staging/crystalhd/crystalhd.h
@@ -0,0 +1,14 @@
+#ifndef _CRYSTALHD_H_
+#define _CRYSTALHD_H_
+
+#include <asm/system.h>
+#include "bc_dts_defs.h"
+#include "crystalhd_misc.h"
+#include "bc_dts_glob_lnx.h"
+#include "crystalhd_hw.h"
+#include "crystalhd_cmds.h"
+#include "crystalhd_lnx.h"
+#include "bcm_70012_regs.h"
+#include "crystalhd_fw_if.h"
+
+#endif
diff --git a/drivers/staging/crystalhd/crystalhd_cmds.c b/drivers/staging/crystalhd/crystalhd_cmds.c
index 3735ed3da4c6..05fe78748dfc 100644
--- a/drivers/staging/crystalhd/crystalhd_cmds.c
+++ b/drivers/staging/crystalhd/crystalhd_cmds.c
@@ -24,8 +24,7 @@
* along with this driver. If not, see <http://www.gnu.org/licenses/>.
**********************************************************************/
-#include "crystalhd_cmds.h"
-#include "crystalhd_hw.h"
+#include "crystalhd.h"
static struct crystalhd_user *bc_cproc_get_uid(struct crystalhd_cmd *ctx)
{
diff --git a/drivers/staging/crystalhd/crystalhd_cmds.h b/drivers/staging/crystalhd/crystalhd_cmds.h
index f0a2796045c2..4066ba393a17 100644
--- a/drivers/staging/crystalhd/crystalhd_cmds.h
+++ b/drivers/staging/crystalhd/crystalhd_cmds.h
@@ -33,8 +33,8 @@
* from _dts_glob and dts_defs etc.. which are defined for
* windows.
*/
-#include "crystalhd_misc.h"
-#include "crystalhd_hw.h"
+
+#include "crystalhd.h"
enum crystalhd_state {
BC_LINK_INVALID = 0x00,
diff --git a/drivers/staging/crystalhd/crystalhd_hw.c b/drivers/staging/crystalhd/crystalhd_hw.c
index 5acf39e7cdef..e617d2fcbb1f 100644
--- a/drivers/staging/crystalhd/crystalhd_hw.c
+++ b/drivers/staging/crystalhd/crystalhd_hw.c
@@ -22,10 +22,11 @@
* along with this driver. If not, see <http://www.gnu.org/licenses/>.
**********************************************************************/
+#include "crystalhd.h"
+
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/delay.h>
-#include "crystalhd_hw.h"
/* Functions internal to this file */
@@ -766,7 +767,7 @@ static enum BC_STATUS crystalhd_hw_fill_desc(struct crystalhd_dio_req *ioreq,
crystalhd_hw_dump_desc(desc, last_desc_ix, 1);
if (count != xfr_sz) {
- BCMLOG_ERR("interal error sz curr:%x exp:%x\n", count, xfr_sz);
+ BCMLOG_ERR("internal error sz curr:%x exp:%x\n", count, xfr_sz);
return BC_STS_ERROR;
}
@@ -868,8 +869,7 @@ static enum BC_STATUS crystalhd_stop_tx_dma_engine(struct crystalhd_hw *hw)
BCMLOG(BCMLOG_DBG, "Stopping TX DMA Engine..\n");
- /* FIXME: jarod: invert dma_ctrl and check bit? or are there missing parens? */
- if (!dma_cntrl & DMA_START_BIT) {
+ if (!(dma_cntrl & DMA_START_BIT)) {
BCMLOG(BCMLOG_DBG, "Already Stopped\n");
return BC_STS_SUCCESS;
}
@@ -1628,7 +1628,6 @@ enum BC_STATUS crystalhd_download_fw(struct crystalhd_adp *adp, void *buffer, ui
uint32_t fw_sig_len = 36;
uint32_t dram_offset = BC_FWIMG_ST_ADDR, sig_reg;
- BCMLOG_ENTER;
if (!adp || !buffer || !sz) {
BCMLOG_ERR("Invalid Params.\n");
@@ -1725,8 +1724,6 @@ enum BC_STATUS crystalhd_do_fw_cmd(struct crystalhd_hw *hw,
crystalhd_create_event(&fw_cmd_event);
- BCMLOG_ENTER;
-
if (!hw || !fw_cmd) {
BCMLOG_ERR("Invalid Arguments\n");
return BC_STS_INV_ARG;
diff --git a/drivers/staging/crystalhd/crystalhd_hw.h b/drivers/staging/crystalhd/crystalhd_hw.h
index 3efbf9d4ff5d..2d0e6c6005e5 100644
--- a/drivers/staging/crystalhd/crystalhd_hw.h
+++ b/drivers/staging/crystalhd/crystalhd_hw.h
@@ -27,8 +27,7 @@
#ifndef _CRYSTALHD_HW_H_
#define _CRYSTALHD_HW_H_
-#include "crystalhd_misc.h"
-#include "crystalhd_fw_if.h"
+#include "crystalhd.h"
/* HW constants..*/
#define DMA_ENGINE_CNT 2
diff --git a/drivers/staging/crystalhd/crystalhd_lnx.c b/drivers/staging/crystalhd/crystalhd_lnx.c
index 7e0c199f6893..d9e3d618f7f4 100644
--- a/drivers/staging/crystalhd/crystalhd_lnx.c
+++ b/drivers/staging/crystalhd/crystalhd_lnx.c
@@ -15,10 +15,11 @@
along with this driver. If not, see <http://www.gnu.org/licenses/>.
***************************************************************************/
+#include "crystalhd.h"
+
#include <linux/mutex.h>
#include <linux/slab.h>
-#include "crystalhd_lnx.h"
static DEFINE_MUTEX(chd_dec_mutex);
static struct class *crystalhd_class;
@@ -298,7 +299,6 @@ static int chd_dec_open(struct inode *in, struct file *fd)
enum BC_STATUS sts = BC_STS_SUCCESS;
struct crystalhd_user *uc = NULL;
- BCMLOG_ENTER;
if (!adp) {
BCMLOG_ERR("Invalid adp\n");
return -EINVAL;
@@ -327,7 +327,6 @@ static int chd_dec_close(struct inode *in, struct file *fd)
struct crystalhd_adp *adp = chd_get_adp();
struct crystalhd_user *uc;
- BCMLOG_ENTER;
if (!adp) {
BCMLOG_ERR("Invalid adp\n");
return -EINVAL;
@@ -513,8 +512,6 @@ static void __devexit chd_dec_pci_remove(struct pci_dev *pdev)
struct crystalhd_adp *pinfo;
enum BC_STATUS sts = BC_STS_SUCCESS;
- BCMLOG_ENTER;
-
pinfo = pci_get_drvdata(pdev);
if (!pinfo) {
BCMLOG_ERR("could not get adp\n");
diff --git a/drivers/staging/crystalhd/crystalhd_lnx.h b/drivers/staging/crystalhd/crystalhd_lnx.h
index a2b5a56be6dd..a81f9298b0a1 100644
--- a/drivers/staging/crystalhd/crystalhd_lnx.h
+++ b/drivers/staging/crystalhd/crystalhd_lnx.h
@@ -1,7 +1,7 @@
/***************************************************************************
* Copyright (c) 2005-2009, Broadcom Corporation.
*
- * Name: crystalhd_lnx . c
+ * Name: crystalhd_lnx . h
*
* Description:
* BCM70012 Linux driver
@@ -48,11 +48,10 @@
#include <asm/system.h>
#include <linux/uaccess.h>
-#include "crystalhd_cmds.h"
+#include "crystalhd.h"
#define CRYSTAL_HD_NAME "Broadcom Crystal HD Decoder (BCM70012) Driver"
-
/* OS specific PCI information structure and adapter information. */
struct crystalhd_adp {
/* Hardware borad/PCI specifics */
diff --git a/drivers/staging/crystalhd/crystalhd_misc.c b/drivers/staging/crystalhd/crystalhd_misc.c
index 5fa0c6e10ce2..b3a637814a16 100644
--- a/drivers/staging/crystalhd/crystalhd_misc.c
+++ b/drivers/staging/crystalhd/crystalhd_misc.c
@@ -24,10 +24,9 @@
* along with this driver. If not, see <http://www.gnu.org/licenses/>.
**********************************************************************/
-#include <linux/slab.h>
+#include "crystalhd.h"
-#include "crystalhd_misc.h"
-#include "crystalhd_lnx.h"
+#include <linux/slab.h>
uint32_t g_linklog_level;
diff --git a/drivers/staging/crystalhd/crystalhd_misc.h b/drivers/staging/crystalhd/crystalhd_misc.h
index 4d6172357428..84c87938a831 100644
--- a/drivers/staging/crystalhd/crystalhd_misc.h
+++ b/drivers/staging/crystalhd/crystalhd_misc.h
@@ -28,6 +28,8 @@
#ifndef _CRYSTALHD_MISC_H_
#define _CRYSTALHD_MISC_H_
+#include "crystalhd.h"
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -35,8 +37,6 @@
#include <linux/ioctl.h>
#include <linux/dma-mapping.h>
#include <linux/sched.h>
-#include <asm/system.h>
-#include "bc_dts_glob_lnx.h"
/* Global log level variable defined in crystal_misc.c file */
extern uint32_t g_linklog_level;
@@ -200,29 +200,21 @@ enum _chd_log_levels {
BCMLOG_INFO = 0x00000001, /* Generic informational */
BCMLOG_DBG = 0x00000002, /* First level Debug info */
BCMLOG_SSTEP = 0x00000004, /* Stepping information */
- BCMLOG_ENTER_LEAVE = 0x00000008, /* stack tracking */
};
-#define BCMLOG_ENTER \
-if (g_linklog_level & BCMLOG_ENTER_LEAVE) { \
- printk(KERN_DEBUG "Entered %s\n", __func__); \
-}
-#define BCMLOG_LEAVE \
-if (g_linklog_level & BCMLOG_ENTER_LEAVE) { \
- printk(KERN_DEBUG "Leaving %s\n", __func__); \
-}
+#define BCMLOG(trace, fmt, args...) \
+do { \
+ if (g_linklog_level & trace) \
+ printk(fmt, ##args); \
+} while (0)
-#define BCMLOG(trace, fmt, args...) \
-if (g_linklog_level & trace) { \
- printk(fmt, ##args); \
-}
-#define BCMLOG_ERR(fmt, args...) \
-do { \
- if (g_linklog_level & BCMLOG_ERROR) { \
- printk(KERN_ERR "*ERR*:%s:%d: "fmt, __FILE__, __LINE__, ##args); \
- } \
-} while (0);
+#define BCMLOG_ERR(fmt, args...) \
+do { \
+ if (g_linklog_level & BCMLOG_ERROR) \
+ printk(KERN_ERR "*ERR*:%s:%d: "fmt, \
+ __FILE__, __LINE__, ##args); \
+} while (0)
#endif
diff --git a/drivers/staging/et131x/README b/drivers/staging/et131x/README
index 3458aa713a33..82657233c8b6 100644
--- a/drivers/staging/et131x/README
+++ b/drivers/staging/et131x/README
@@ -11,6 +11,6 @@ TODO:
- Use of kmem_cache seems a bit unusual
Please send patches to:
- Greg Kroah-Hartman <gregkh@suse.de>
+ Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Mark Einon <mark.einon@gmail.com>
diff --git a/drivers/staging/et131x/et131x.c b/drivers/staging/et131x/et131x.c
index 2c4069fcd981..3f919babe79b 100644
--- a/drivers/staging/et131x/et131x.c
+++ b/drivers/staging/et131x/et131x.c
@@ -802,7 +802,7 @@ static int et131x_init_eeprom(struct et131x_adapter *adapter)
/* THIS IS A WORKAROUND:
* I need to call this function twice to get my card in a
* LG M1 Express Dual running. I tried also a msleep before this
- * function, because I thougth there could be some time condidions
+ * function, because I thought there could be some time condidions
* but it didn't work. Call the whole function twice also work.
*/
if (pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, &eestatus)) {
@@ -987,7 +987,7 @@ static void et1310_config_mac_regs1(struct et131x_adapter *adapter)
writel(station1, &macregs->station_addr_1);
writel(station2, &macregs->station_addr_2);
- /* Max ethernet packet in bytes that will passed by the mac without
+ /* Max ethernet packet in bytes that will be passed by the mac without
* being truncated. Allow the MAC to pass 4 more than our max packet
* size. This is 4 for the Ethernet CRC.
*
@@ -3109,7 +3109,7 @@ static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter)
skb->protocol = eth_type_trans(skb, adapter->netdev);
skb->ip_summed = CHECKSUM_NONE;
- netif_rx(skb);
+ netif_rx_ni(skb);
} else {
rfd->len = 0;
}
@@ -4413,7 +4413,7 @@ static void et131x_up(struct net_device *netdev)
/**
* et131x_down - Bring down the device
- * @netdev: device to be broght down
+ * @netdev: device to be brought down
*/
static void et131x_down(struct net_device *netdev)
{
@@ -5177,7 +5177,7 @@ static int et131x_set_mac_addr(struct net_device *netdev, void *new_mac)
/* Make sure the requested MAC is valid */
if (!is_valid_ether_addr(address->sa_data))
- return -EINVAL;
+ return -EADDRNOTAVAIL;
et131x_disable_txrx(netdev);
et131x_handle_send_interrupt(adapter);
diff --git a/drivers/staging/et131x/et131x.h b/drivers/staging/et131x/et131x.h
index 7eed3c8986f1..864379b4e8df 100644
--- a/drivers/staging/et131x/et131x.h
+++ b/drivers/staging/et131x/et131x.h
@@ -596,7 +596,7 @@ struct rxdma_regs { /* Location: */
* structure for tx test reg in txmac address map
* located at address 0x3014
* 31-17: unused
- * 16: reserved1
+ * 16: reserved
* 15: txtest_en
* 14-11: unused
* 10-0: txq test pointer
@@ -1485,7 +1485,7 @@ struct address_map {
* 3: reserved
* 2: ignore_10g_fr
* 1: reserved
- * 0: preamble_supress_en
+ * 0: preamble_suppress_en
*/
/* MI Register 22: PHY Configuration Reg(0x16)
diff --git a/drivers/staging/frontier/alphatrack.c b/drivers/staging/frontier/alphatrack.c
index d8efed657440..3bf0f40e97fd 100644
--- a/drivers/staging/frontier/alphatrack.c
+++ b/drivers/staging/frontier/alphatrack.c
@@ -450,7 +450,7 @@ exit:
/**
* usb_alphatrack_poll
*/
-static unsigned int usb_alphatrack_poll(struct file *file, poll_table * wait)
+static unsigned int usb_alphatrack_poll(struct file *file, poll_table *wait)
{
struct usb_alphatrack *dev;
unsigned int mask = 0;
diff --git a/drivers/staging/frontier/tranzport.c b/drivers/staging/frontier/tranzport.c
index cf47a5d191fc..29e99bbcae48 100644
--- a/drivers/staging/frontier/tranzport.c
+++ b/drivers/staging/frontier/tranzport.c
@@ -471,7 +471,7 @@ exit:
/**
* usb_tranzport_poll
*/
-static unsigned int usb_tranzport_poll(struct file *file, poll_table * wait)
+static unsigned int usb_tranzport_poll(struct file *file, poll_table *wait)
{
struct usb_tranzport *dev;
unsigned int mask = 0;
diff --git a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c
index 917bbb082a6e..7569aa0f24d1 100644
--- a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c
+++ b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c
@@ -2211,11 +2211,8 @@ struct net_device *init_ft1000_card(struct pcmcia_device *link,
ft1000InitProc(dev);
ft1000_card_present = 1;
SET_ETHTOOL_OPS(dev, &ops);
- printk(KERN_INFO
- "ft1000: %s: addr 0x%04lx irq %d, MAC addr %02x:%02x:%02x:%02x:%02x:%02x\n",
- dev->name, dev->base_addr, dev->irq, dev->dev_addr[0],
- dev->dev_addr[1], dev->dev_addr[2], dev->dev_addr[3],
- dev->dev_addr[4], dev->dev_addr[5]);
+ printk(KERN_INFO "ft1000: %s: addr 0x%04lx irq %d, MAC addr %pM\n",
+ dev->name, dev->base_addr, dev->irq, dev->dev_addr);
return dev;
err_unreg:
diff --git a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c
index 7faeadad1fff..71aaad31270b 100644
--- a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c
+++ b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c
@@ -29,10 +29,10 @@
#define FT1000_PROC "ft1000"
#define MAX_FILE_LEN 255
-#define PUTM_TO_PAGE(len,page,args...) \
+#define PUTM_TO_PAGE(len, page, args...) \
len += snprintf(page+len, PAGE_SIZE - len, args)
-#define PUTX_TO_PAGE(len,page,message,size,var) \
+#define PUTX_TO_PAGE(len, page, message, size, var) \
len += snprintf(page+len, PAGE_SIZE - len, message); \
for(i = 0; i < (size - 1); i++) \
{ \
@@ -40,7 +40,7 @@
} \
len += snprintf(page+len, PAGE_SIZE - len, "%02x\n", var[i])
-#define PUTD_TO_PAGE(len,page,message,size,var) \
+#define PUTD_TO_PAGE(len, page, message, size, var) \
len += snprintf(page+len, PAGE_SIZE - len, message); \
for(i = 0; i < (size - 1); i++) \
{ \
diff --git a/drivers/staging/hv/Kconfig b/drivers/staging/hv/Kconfig
deleted file mode 100644
index 60ac479a2909..000000000000
--- a/drivers/staging/hv/Kconfig
+++ /dev/null
@@ -1,5 +0,0 @@
-config HYPERV_STORAGE
- tristate "Microsoft Hyper-V virtual storage driver"
- depends on HYPERV && SCSI
- help
- Select this option to enable the Hyper-V virtual storage driver.
diff --git a/drivers/staging/hv/Makefile b/drivers/staging/hv/Makefile
deleted file mode 100644
index af95a6b7e436..000000000000
--- a/drivers/staging/hv/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-obj-$(CONFIG_HYPERV_STORAGE) += hv_storvsc.o
-
-hv_storvsc-y := storvsc_drv.o
diff --git a/drivers/staging/hv/TODO b/drivers/staging/hv/TODO
deleted file mode 100644
index dea7d92dfdc1..000000000000
--- a/drivers/staging/hv/TODO
+++ /dev/null
@@ -1,5 +0,0 @@
-TODO:
- - audit the scsi driver
-
-Please send patches for this code to Greg Kroah-Hartman <gregkh@suse.de>,
-Haiyang Zhang <haiyangz@microsoft.com>, and K. Y. Srinivasan <kys@microsoft.com>
diff --git a/drivers/staging/hv/storvsc_drv.c b/drivers/staging/hv/storvsc_drv.c
deleted file mode 100644
index eb853f71089a..000000000000
--- a/drivers/staging/hv/storvsc_drv.c
+++ /dev/null
@@ -1,1586 +0,0 @@
-/*
- * Copyright (c) 2009, Microsoft Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- * Authors:
- * Haiyang Zhang <haiyangz@microsoft.com>
- * Hank Janssen <hjanssen@microsoft.com>
- * K. Y. Srinivasan <kys@microsoft.com>
- */
-
-#include <linux/kernel.h>
-#include <linux/wait.h>
-#include <linux/sched.h>
-#include <linux/completion.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/hyperv.h>
-#include <linux/mempool.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_host.h>
-#include <scsi/scsi_device.h>
-#include <scsi/scsi_tcq.h>
-#include <scsi/scsi_eh.h>
-#include <scsi/scsi_devinfo.h>
-#include <scsi/scsi_dbg.h>
-
-
-#define STORVSC_MIN_BUF_NR 64
-#define STORVSC_RING_BUFFER_SIZE (20*PAGE_SIZE)
-static int storvsc_ringbuffer_size = STORVSC_RING_BUFFER_SIZE;
-
-module_param(storvsc_ringbuffer_size, int, S_IRUGO);
-MODULE_PARM_DESC(storvsc_ringbuffer_size, "Ring buffer size (bytes)");
-
-/* to alert the user that structure sizes may be mismatched even though the */
-/* protocol versions match. */
-
-
-#define REVISION_STRING(REVISION_) #REVISION_
-#define FILL_VMSTOR_REVISION(RESULT_LVALUE_) \
- do { \
- char *revision_string \
- = REVISION_STRING($Rev : 6 $) + 6; \
- RESULT_LVALUE_ = 0; \
- while (*revision_string >= '0' \
- && *revision_string <= '9') { \
- RESULT_LVALUE_ *= 10; \
- RESULT_LVALUE_ += *revision_string - '0'; \
- revision_string++; \
- } \
- } while (0)
-
-/* Major/minor macros. Minor version is in LSB, meaning that earlier flat */
-/* version numbers will be interpreted as "0.x" (i.e., 1 becomes 0.1). */
-#define VMSTOR_PROTOCOL_MAJOR(VERSION_) (((VERSION_) >> 8) & 0xff)
-#define VMSTOR_PROTOCOL_MINOR(VERSION_) (((VERSION_)) & 0xff)
-#define VMSTOR_PROTOCOL_VERSION(MAJOR_, MINOR_) ((((MAJOR_) & 0xff) << 8) | \
- (((MINOR_) & 0xff)))
-#define VMSTOR_INVALID_PROTOCOL_VERSION (-1)
-
-/* Version history: */
-/* V1 Beta 0.1 */
-/* V1 RC < 2008/1/31 1.0 */
-/* V1 RC > 2008/1/31 2.0 */
-#define VMSTOR_PROTOCOL_VERSION_CURRENT VMSTOR_PROTOCOL_VERSION(4, 2)
-
-
-
-
-/* This will get replaced with the max transfer length that is possible on */
-/* the host adapter. */
-/* The max transfer length will be published when we offer a vmbus channel. */
-#define MAX_TRANSFER_LENGTH 0x40000
-#define DEFAULT_PACKET_SIZE (sizeof(struct vmdata_gpa_direct) + \
- sizeof(struct vstor_packet) + \
- sizesizeof(u64) * (MAX_TRANSFER_LENGTH / PAGE_SIZE)))
-
-
-/* Packet structure describing virtual storage requests. */
-enum vstor_packet_operation {
- VSTOR_OPERATION_COMPLETE_IO = 1,
- VSTOR_OPERATION_REMOVE_DEVICE = 2,
- VSTOR_OPERATION_EXECUTE_SRB = 3,
- VSTOR_OPERATION_RESET_LUN = 4,
- VSTOR_OPERATION_RESET_ADAPTER = 5,
- VSTOR_OPERATION_RESET_BUS = 6,
- VSTOR_OPERATION_BEGIN_INITIALIZATION = 7,
- VSTOR_OPERATION_END_INITIALIZATION = 8,
- VSTOR_OPERATION_QUERY_PROTOCOL_VERSION = 9,
- VSTOR_OPERATION_QUERY_PROPERTIES = 10,
- VSTOR_OPERATION_ENUMERATE_BUS = 11,
- VSTOR_OPERATION_MAXIMUM = 11
-};
-
-/*
- * Platform neutral description of a scsi request -
- * this remains the same across the write regardless of 32/64 bit
- * note: it's patterned off the SCSI_PASS_THROUGH structure
- */
-#define CDB16GENERIC_LENGTH 0x10
-
-#ifndef SENSE_BUFFER_SIZE
-#define SENSE_BUFFER_SIZE 0x12
-#endif
-
-#define MAX_DATA_BUF_LEN_WITH_PADDING 0x14
-
-struct vmscsi_request {
- unsigned short length;
- unsigned char srb_status;
- unsigned char scsi_status;
-
- unsigned char port_number;
- unsigned char path_id;
- unsigned char target_id;
- unsigned char lun;
-
- unsigned char cdb_length;
- unsigned char sense_info_length;
- unsigned char data_in;
- unsigned char reserved;
-
- unsigned int data_transfer_length;
-
- union {
- unsigned char cdb[CDB16GENERIC_LENGTH];
- unsigned char sense_data[SENSE_BUFFER_SIZE];
- unsigned char reserved_array[MAX_DATA_BUF_LEN_WITH_PADDING];
- };
-} __attribute((packed));
-
-
-/*
- * This structure is sent during the intialization phase to get the different
- * properties of the channel.
- */
-struct vmstorage_channel_properties {
- unsigned short protocol_version;
- unsigned char path_id;
- unsigned char target_id;
-
- /* Note: port number is only really known on the client side */
- unsigned int port_number;
- unsigned int flags;
- unsigned int max_transfer_bytes;
-
- /* This id is unique for each channel and will correspond with */
- /* vendor specific data in the inquirydata */
- unsigned long long unique_id;
-} __packed;
-
-/* This structure is sent during the storage protocol negotiations. */
-struct vmstorage_protocol_version {
- /* Major (MSW) and minor (LSW) version numbers. */
- unsigned short major_minor;
-
- /*
- * Revision number is auto-incremented whenever this file is changed
- * (See FILL_VMSTOR_REVISION macro above). Mismatch does not
- * definitely indicate incompatibility--but it does indicate mismatched
- * builds.
- */
- unsigned short revision;
-} __packed;
-
-/* Channel Property Flags */
-#define STORAGE_CHANNEL_REMOVABLE_FLAG 0x1
-#define STORAGE_CHANNEL_EMULATED_IDE_FLAG 0x2
-
-struct vstor_packet {
- /* Requested operation type */
- enum vstor_packet_operation operation;
-
- /* Flags - see below for values */
- unsigned int flags;
-
- /* Status of the request returned from the server side. */
- unsigned int status;
-
- /* Data payload area */
- union {
- /*
- * Structure used to forward SCSI commands from the
- * client to the server.
- */
- struct vmscsi_request vm_srb;
-
- /* Structure used to query channel properties. */
- struct vmstorage_channel_properties storage_channel_properties;
-
- /* Used during version negotiations. */
- struct vmstorage_protocol_version version;
- };
-} __packed;
-
-/* Packet flags */
-/*
- * This flag indicates that the server should send back a completion for this
- * packet.
- */
-#define REQUEST_COMPLETION_FLAG 0x1
-
-/* This is the set of flags that the vsc can set in any packets it sends */
-#define VSC_LEGAL_FLAGS (REQUEST_COMPLETION_FLAG)
-
-
-/* Defines */
-
-#define STORVSC_MAX_IO_REQUESTS 128
-
-/*
- * In Hyper-V, each port/path/target maps to 1 scsi host adapter. In
- * reality, the path/target is not used (ie always set to 0) so our
- * scsi host adapter essentially has 1 bus with 1 target that contains
- * up to 256 luns.
- */
-#define STORVSC_MAX_LUNS_PER_TARGET 64
-#define STORVSC_MAX_TARGETS 1
-#define STORVSC_MAX_CHANNELS 1
-#define STORVSC_MAX_CMD_LEN 16
-
-/* Matches Windows-end */
-enum storvsc_request_type {
- WRITE_TYPE,
- READ_TYPE,
- UNKNOWN_TYPE,
-};
-
-
-struct hv_storvsc_request {
- struct hv_device *device;
-
- /* Synchronize the request/response if needed */
- struct completion wait_event;
-
- unsigned char *sense_buffer;
- void *context;
- void (*on_io_completion)(struct hv_storvsc_request *request);
- struct hv_multipage_buffer data_buffer;
-
- struct vstor_packet vstor_packet;
-};
-
-
-/* A storvsc device is a device object that contains a vmbus channel */
-struct storvsc_device {
- struct hv_device *device;
-
- bool destroy;
- bool drain_notify;
- atomic_t num_outstanding_req;
- struct Scsi_Host *host;
-
- wait_queue_head_t waiting_to_drain;
-
- /*
- * Each unique Port/Path/Target represents 1 channel ie scsi
- * controller. In reality, the pathid, targetid is always 0
- * and the port is set by us
- */
- unsigned int port_number;
- unsigned char path_id;
- unsigned char target_id;
-
- /* Used for vsc/vsp channel reset process */
- struct hv_storvsc_request init_request;
- struct hv_storvsc_request reset_request;
-};
-
-struct stor_mem_pools {
- struct kmem_cache *request_pool;
- mempool_t *request_mempool;
-};
-
-struct hv_host_device {
- struct hv_device *dev;
- unsigned int port;
- unsigned char path;
- unsigned char target;
-};
-
-struct storvsc_cmd_request {
- struct list_head entry;
- struct scsi_cmnd *cmd;
-
- unsigned int bounce_sgl_count;
- struct scatterlist *bounce_sgl;
-
- struct hv_storvsc_request request;
-};
-
-struct storvsc_scan_work {
- struct work_struct work;
- struct Scsi_Host *host;
- uint lun;
-};
-
-static void storvsc_bus_scan(struct work_struct *work)
-{
- struct storvsc_scan_work *wrk;
- int id, order_id;
-
- wrk = container_of(work, struct storvsc_scan_work, work);
- for (id = 0; id < wrk->host->max_id; ++id) {
- if (wrk->host->reverse_ordering)
- order_id = wrk->host->max_id - id - 1;
- else
- order_id = id;
-
- scsi_scan_target(&wrk->host->shost_gendev, 0,
- order_id, SCAN_WILD_CARD, 1);
- }
- kfree(wrk);
-}
-
-static void storvsc_remove_lun(struct work_struct *work)
-{
- struct storvsc_scan_work *wrk;
- struct scsi_device *sdev;
-
- wrk = container_of(work, struct storvsc_scan_work, work);
- if (!scsi_host_get(wrk->host))
- goto done;
-
- sdev = scsi_device_lookup(wrk->host, 0, 0, wrk->lun);
-
- if (sdev) {
- scsi_remove_device(sdev);
- scsi_device_put(sdev);
- }
- scsi_host_put(wrk->host);
-
-done:
- kfree(wrk);
-}
-
-static inline struct storvsc_device *get_out_stor_device(
- struct hv_device *device)
-{
- struct storvsc_device *stor_device;
-
- stor_device = hv_get_drvdata(device);
-
- if (stor_device && stor_device->destroy)
- stor_device = NULL;
-
- return stor_device;
-}
-
-
-static inline void storvsc_wait_to_drain(struct storvsc_device *dev)
-{
- dev->drain_notify = true;
- wait_event(dev->waiting_to_drain,
- atomic_read(&dev->num_outstanding_req) == 0);
- dev->drain_notify = false;
-}
-
-static inline struct storvsc_device *get_in_stor_device(
- struct hv_device *device)
-{
- struct storvsc_device *stor_device;
-
- stor_device = hv_get_drvdata(device);
-
- if (!stor_device)
- goto get_in_err;
-
- /*
- * If the device is being destroyed; allow incoming
- * traffic only to cleanup outstanding requests.
- */
-
- if (stor_device->destroy &&
- (atomic_read(&stor_device->num_outstanding_req) == 0))
- stor_device = NULL;
-
-get_in_err:
- return stor_device;
-
-}
-
-static int storvsc_channel_init(struct hv_device *device)
-{
- struct storvsc_device *stor_device;
- struct hv_storvsc_request *request;
- struct vstor_packet *vstor_packet;
- int ret, t;
-
- stor_device = get_out_stor_device(device);
- if (!stor_device)
- return -ENODEV;
-
- request = &stor_device->init_request;
- vstor_packet = &request->vstor_packet;
-
- /*
- * Now, initiate the vsc/vsp initialization protocol on the open
- * channel
- */
- memset(request, 0, sizeof(struct hv_storvsc_request));
- init_completion(&request->wait_event);
- vstor_packet->operation = VSTOR_OPERATION_BEGIN_INITIALIZATION;
- vstor_packet->flags = REQUEST_COMPLETION_FLAG;
-
- ret = vmbus_sendpacket(device->channel, vstor_packet,
- sizeof(struct vstor_packet),
- (unsigned long)request,
- VM_PKT_DATA_INBAND,
- VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
- if (ret != 0)
- goto cleanup;
-
- t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
- if (t == 0) {
- ret = -ETIMEDOUT;
- goto cleanup;
- }
-
- if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
- vstor_packet->status != 0)
- goto cleanup;
-
-
- /* reuse the packet for version range supported */
- memset(vstor_packet, 0, sizeof(struct vstor_packet));
- vstor_packet->operation = VSTOR_OPERATION_QUERY_PROTOCOL_VERSION;
- vstor_packet->flags = REQUEST_COMPLETION_FLAG;
-
- vstor_packet->version.major_minor = VMSTOR_PROTOCOL_VERSION_CURRENT;
- FILL_VMSTOR_REVISION(vstor_packet->version.revision);
-
- ret = vmbus_sendpacket(device->channel, vstor_packet,
- sizeof(struct vstor_packet),
- (unsigned long)request,
- VM_PKT_DATA_INBAND,
- VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
- if (ret != 0)
- goto cleanup;
-
- t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
- if (t == 0) {
- ret = -ETIMEDOUT;
- goto cleanup;
- }
-
- if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
- vstor_packet->status != 0)
- goto cleanup;
-
-
- memset(vstor_packet, 0, sizeof(struct vstor_packet));
- vstor_packet->operation = VSTOR_OPERATION_QUERY_PROPERTIES;
- vstor_packet->flags = REQUEST_COMPLETION_FLAG;
- vstor_packet->storage_channel_properties.port_number =
- stor_device->port_number;
-
- ret = vmbus_sendpacket(device->channel, vstor_packet,
- sizeof(struct vstor_packet),
- (unsigned long)request,
- VM_PKT_DATA_INBAND,
- VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
-
- if (ret != 0)
- goto cleanup;
-
- t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
- if (t == 0) {
- ret = -ETIMEDOUT;
- goto cleanup;
- }
-
- if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
- vstor_packet->status != 0)
- goto cleanup;
-
- stor_device->path_id = vstor_packet->storage_channel_properties.path_id;
- stor_device->target_id
- = vstor_packet->storage_channel_properties.target_id;
-
- memset(vstor_packet, 0, sizeof(struct vstor_packet));
- vstor_packet->operation = VSTOR_OPERATION_END_INITIALIZATION;
- vstor_packet->flags = REQUEST_COMPLETION_FLAG;
-
- ret = vmbus_sendpacket(device->channel, vstor_packet,
- sizeof(struct vstor_packet),
- (unsigned long)request,
- VM_PKT_DATA_INBAND,
- VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
-
- if (ret != 0)
- goto cleanup;
-
- t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
- if (t == 0) {
- ret = -ETIMEDOUT;
- goto cleanup;
- }
-
- if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
- vstor_packet->status != 0)
- goto cleanup;
-
-
-cleanup:
- return ret;
-}
-
-static void storvsc_on_io_completion(struct hv_device *device,
- struct vstor_packet *vstor_packet,
- struct hv_storvsc_request *request)
-{
- struct storvsc_device *stor_device;
- struct vstor_packet *stor_pkt;
-
- stor_device = hv_get_drvdata(device);
- stor_pkt = &request->vstor_packet;
-
- /*
- * The current SCSI handling on the host side does
- * not correctly handle:
- * INQUIRY command with page code parameter set to 0x80
- * MODE_SENSE command with cmd[2] == 0x1c
- *
- * Setup srb and scsi status so this won't be fatal.
- * We do this so we can distinguish truly fatal failues
- * (srb status == 0x4) and off-line the device in that case.
- */
-
- if ((stor_pkt->vm_srb.cdb[0] == INQUIRY) ||
- (stor_pkt->vm_srb.cdb[0] == MODE_SENSE)) {
- vstor_packet->vm_srb.scsi_status = 0;
- vstor_packet->vm_srb.srb_status = 0x1;
- }
-
-
- /* Copy over the status...etc */
- stor_pkt->vm_srb.scsi_status = vstor_packet->vm_srb.scsi_status;
- stor_pkt->vm_srb.srb_status = vstor_packet->vm_srb.srb_status;
- stor_pkt->vm_srb.sense_info_length =
- vstor_packet->vm_srb.sense_info_length;
-
- if (vstor_packet->vm_srb.scsi_status != 0 ||
- vstor_packet->vm_srb.srb_status != 1){
- dev_warn(&device->device,
- "cmd 0x%x scsi status 0x%x srb status 0x%x\n",
- stor_pkt->vm_srb.cdb[0],
- vstor_packet->vm_srb.scsi_status,
- vstor_packet->vm_srb.srb_status);
- }
-
- if ((vstor_packet->vm_srb.scsi_status & 0xFF) == 0x02) {
- /* CHECK_CONDITION */
- if (vstor_packet->vm_srb.srb_status & 0x80) {
- /* autosense data available */
- dev_warn(&device->device,
- "stor pkt %p autosense data valid - len %d\n",
- request,
- vstor_packet->vm_srb.sense_info_length);
-
- memcpy(request->sense_buffer,
- vstor_packet->vm_srb.sense_data,
- vstor_packet->vm_srb.sense_info_length);
-
- }
- }
-
- stor_pkt->vm_srb.data_transfer_length =
- vstor_packet->vm_srb.data_transfer_length;
-
- request->on_io_completion(request);
-
- if (atomic_dec_and_test(&stor_device->num_outstanding_req) &&
- stor_device->drain_notify)
- wake_up(&stor_device->waiting_to_drain);
-
-
-}
-
-static void storvsc_on_receive(struct hv_device *device,
- struct vstor_packet *vstor_packet,
- struct hv_storvsc_request *request)
-{
- struct storvsc_scan_work *work;
- struct storvsc_device *stor_device;
-
- switch (vstor_packet->operation) {
- case VSTOR_OPERATION_COMPLETE_IO:
- storvsc_on_io_completion(device, vstor_packet, request);
- break;
-
- case VSTOR_OPERATION_REMOVE_DEVICE:
- case VSTOR_OPERATION_ENUMERATE_BUS:
- stor_device = get_in_stor_device(device);
- work = kmalloc(sizeof(struct storvsc_scan_work), GFP_ATOMIC);
- if (!work)
- return;
-
- INIT_WORK(&work->work, storvsc_bus_scan);
- work->host = stor_device->host;
- schedule_work(&work->work);
- break;
-
- default:
- break;
- }
-}
-
-static void storvsc_on_channel_callback(void *context)
-{
- struct hv_device *device = (struct hv_device *)context;
- struct storvsc_device *stor_device;
- u32 bytes_recvd;
- u64 request_id;
- unsigned char packet[ALIGN(sizeof(struct vstor_packet), 8)];
- struct hv_storvsc_request *request;
- int ret;
-
-
- stor_device = get_in_stor_device(device);
- if (!stor_device)
- return;
-
- do {
- ret = vmbus_recvpacket(device->channel, packet,
- ALIGN(sizeof(struct vstor_packet), 8),
- &bytes_recvd, &request_id);
- if (ret == 0 && bytes_recvd > 0) {
-
- request = (struct hv_storvsc_request *)
- (unsigned long)request_id;
-
- if ((request == &stor_device->init_request) ||
- (request == &stor_device->reset_request)) {
-
- memcpy(&request->vstor_packet, packet,
- sizeof(struct vstor_packet));
- complete(&request->wait_event);
- } else {
- storvsc_on_receive(device,
- (struct vstor_packet *)packet,
- request);
- }
- } else {
- break;
- }
- } while (1);
-
- return;
-}
-
-static int storvsc_connect_to_vsp(struct hv_device *device, u32 ring_size)
-{
- struct vmstorage_channel_properties props;
- int ret;
-
- memset(&props, 0, sizeof(struct vmstorage_channel_properties));
-
- /* Open the channel */
- ret = vmbus_open(device->channel,
- ring_size,
- ring_size,
- (void *)&props,
- sizeof(struct vmstorage_channel_properties),
- storvsc_on_channel_callback, device);
-
- if (ret != 0)
- return ret;
-
- ret = storvsc_channel_init(device);
-
- return ret;
-}
-
-static int storvsc_dev_remove(struct hv_device *device)
-{
- struct storvsc_device *stor_device;
- unsigned long flags;
-
- stor_device = hv_get_drvdata(device);
-
- spin_lock_irqsave(&device->channel->inbound_lock, flags);
- stor_device->destroy = true;
- spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
-
- /*
- * At this point, all outbound traffic should be disable. We
- * only allow inbound traffic (responses) to proceed so that
- * outstanding requests can be completed.
- */
-
- storvsc_wait_to_drain(stor_device);
-
- /*
- * Since we have already drained, we don't need to busy wait
- * as was done in final_release_stor_device()
- * Note that we cannot set the ext pointer to NULL until
- * we have drained - to drain the outgoing packets, we need to
- * allow incoming packets.
- */
- spin_lock_irqsave(&device->channel->inbound_lock, flags);
- hv_set_drvdata(device, NULL);
- spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
-
- /* Close the channel */
- vmbus_close(device->channel);
-
- kfree(stor_device);
- return 0;
-}
-
-static int storvsc_do_io(struct hv_device *device,
- struct hv_storvsc_request *request)
-{
- struct storvsc_device *stor_device;
- struct vstor_packet *vstor_packet;
- int ret = 0;
-
- vstor_packet = &request->vstor_packet;
- stor_device = get_out_stor_device(device);
-
- if (!stor_device)
- return -ENODEV;
-
-
- request->device = device;
-
-
- vstor_packet->flags |= REQUEST_COMPLETION_FLAG;
-
- vstor_packet->vm_srb.length = sizeof(struct vmscsi_request);
-
-
- vstor_packet->vm_srb.sense_info_length = SENSE_BUFFER_SIZE;
-
-
- vstor_packet->vm_srb.data_transfer_length =
- request->data_buffer.len;
-
- vstor_packet->operation = VSTOR_OPERATION_EXECUTE_SRB;
-
- if (request->data_buffer.len) {
- ret = vmbus_sendpacket_multipagebuffer(device->channel,
- &request->data_buffer,
- vstor_packet,
- sizeof(struct vstor_packet),
- (unsigned long)request);
- } else {
- ret = vmbus_sendpacket(device->channel, vstor_packet,
- sizeof(struct vstor_packet),
- (unsigned long)request,
- VM_PKT_DATA_INBAND,
- VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
- }
-
- if (ret != 0)
- return ret;
-
- atomic_inc(&stor_device->num_outstanding_req);
-
- return ret;
-}
-
-static void storvsc_get_ide_info(struct hv_device *dev, int *target, int *path)
-{
- *target =
- dev->dev_instance.b[5] << 8 | dev->dev_instance.b[4];
-
- *path =
- dev->dev_instance.b[3] << 24 |
- dev->dev_instance.b[2] << 16 |
- dev->dev_instance.b[1] << 8 | dev->dev_instance.b[0];
-}
-
-
-static int storvsc_device_alloc(struct scsi_device *sdevice)
-{
- struct stor_mem_pools *memp;
- int number = STORVSC_MIN_BUF_NR;
-
- memp = kzalloc(sizeof(struct stor_mem_pools), GFP_KERNEL);
- if (!memp)
- return -ENOMEM;
-
- memp->request_pool =
- kmem_cache_create(dev_name(&sdevice->sdev_dev),
- sizeof(struct storvsc_cmd_request), 0,
- SLAB_HWCACHE_ALIGN, NULL);
-
- if (!memp->request_pool)
- goto err0;
-
- memp->request_mempool = mempool_create(number, mempool_alloc_slab,
- mempool_free_slab,
- memp->request_pool);
-
- if (!memp->request_mempool)
- goto err1;
-
- sdevice->hostdata = memp;
-
- return 0;
-
-err1:
- kmem_cache_destroy(memp->request_pool);
-
-err0:
- kfree(memp);
- return -ENOMEM;
-}
-
-static void storvsc_device_destroy(struct scsi_device *sdevice)
-{
- struct stor_mem_pools *memp = sdevice->hostdata;
-
- mempool_destroy(memp->request_mempool);
- kmem_cache_destroy(memp->request_pool);
- kfree(memp);
- sdevice->hostdata = NULL;
-}
-
-static int storvsc_device_configure(struct scsi_device *sdevice)
-{
- scsi_adjust_queue_depth(sdevice, MSG_SIMPLE_TAG,
- STORVSC_MAX_IO_REQUESTS);
-
- blk_queue_max_segment_size(sdevice->request_queue, PAGE_SIZE);
-
- blk_queue_bounce_limit(sdevice->request_queue, BLK_BOUNCE_ANY);
-
- return 0;
-}
-
-static void destroy_bounce_buffer(struct scatterlist *sgl,
- unsigned int sg_count)
-{
- int i;
- struct page *page_buf;
-
- for (i = 0; i < sg_count; i++) {
- page_buf = sg_page((&sgl[i]));
- if (page_buf != NULL)
- __free_page(page_buf);
- }
-
- kfree(sgl);
-}
-
-static int do_bounce_buffer(struct scatterlist *sgl, unsigned int sg_count)
-{
- int i;
-
- /* No need to check */
- if (sg_count < 2)
- return -1;
-
- /* We have at least 2 sg entries */
- for (i = 0; i < sg_count; i++) {
- if (i == 0) {
- /* make sure 1st one does not have hole */
- if (sgl[i].offset + sgl[i].length != PAGE_SIZE)
- return i;
- } else if (i == sg_count - 1) {
- /* make sure last one does not have hole */
- if (sgl[i].offset != 0)
- return i;
- } else {
- /* make sure no hole in the middle */
- if (sgl[i].length != PAGE_SIZE || sgl[i].offset != 0)
- return i;
- }
- }
- return -1;
-}
-
-static struct scatterlist *create_bounce_buffer(struct scatterlist *sgl,
- unsigned int sg_count,
- unsigned int len,
- int write)
-{
- int i;
- int num_pages;
- struct scatterlist *bounce_sgl;
- struct page *page_buf;
- unsigned int buf_len = ((write == WRITE_TYPE) ? 0 : PAGE_SIZE);
-
- num_pages = ALIGN(len, PAGE_SIZE) >> PAGE_SHIFT;
-
- bounce_sgl = kcalloc(num_pages, sizeof(struct scatterlist), GFP_ATOMIC);
- if (!bounce_sgl)
- return NULL;
-
- for (i = 0; i < num_pages; i++) {
- page_buf = alloc_page(GFP_ATOMIC);
- if (!page_buf)
- goto cleanup;
- sg_set_page(&bounce_sgl[i], page_buf, buf_len, 0);
- }
-
- return bounce_sgl;
-
-cleanup:
- destroy_bounce_buffer(bounce_sgl, num_pages);
- return NULL;
-}
-
-
-/* Assume the original sgl has enough room */
-static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
- struct scatterlist *bounce_sgl,
- unsigned int orig_sgl_count,
- unsigned int bounce_sgl_count)
-{
- int i;
- int j = 0;
- unsigned long src, dest;
- unsigned int srclen, destlen, copylen;
- unsigned int total_copied = 0;
- unsigned long bounce_addr = 0;
- unsigned long dest_addr = 0;
- unsigned long flags;
-
- local_irq_save(flags);
-
- for (i = 0; i < orig_sgl_count; i++) {
- dest_addr = (unsigned long)kmap_atomic(sg_page((&orig_sgl[i])),
- KM_IRQ0) + orig_sgl[i].offset;
- dest = dest_addr;
- destlen = orig_sgl[i].length;
-
- if (bounce_addr == 0)
- bounce_addr =
- (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])),
- KM_IRQ0);
-
- while (destlen) {
- src = bounce_addr + bounce_sgl[j].offset;
- srclen = bounce_sgl[j].length - bounce_sgl[j].offset;
-
- copylen = min(srclen, destlen);
- memcpy((void *)dest, (void *)src, copylen);
-
- total_copied += copylen;
- bounce_sgl[j].offset += copylen;
- destlen -= copylen;
- dest += copylen;
-
- if (bounce_sgl[j].offset == bounce_sgl[j].length) {
- /* full */
- kunmap_atomic((void *)bounce_addr, KM_IRQ0);
- j++;
-
- /*
- * It is possible that the number of elements
- * in the bounce buffer may not be equal to
- * the number of elements in the original
- * scatter list. Handle this correctly.
- */
-
- if (j == bounce_sgl_count) {
- /*
- * We are done; cleanup and return.
- */
- kunmap_atomic((void *)(dest_addr -
- orig_sgl[i].offset),
- KM_IRQ0);
- local_irq_restore(flags);
- return total_copied;
- }
-
- /* if we need to use another bounce buffer */
- if (destlen || i != orig_sgl_count - 1)
- bounce_addr =
- (unsigned long)kmap_atomic(
- sg_page((&bounce_sgl[j])), KM_IRQ0);
- } else if (destlen == 0 && i == orig_sgl_count - 1) {
- /* unmap the last bounce that is < PAGE_SIZE */
- kunmap_atomic((void *)bounce_addr, KM_IRQ0);
- }
- }
-
- kunmap_atomic((void *)(dest_addr - orig_sgl[i].offset),
- KM_IRQ0);
- }
-
- local_irq_restore(flags);
-
- return total_copied;
-}
-
-
-/* Assume the bounce_sgl has enough room ie using the create_bounce_buffer() */
-static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl,
- struct scatterlist *bounce_sgl,
- unsigned int orig_sgl_count)
-{
- int i;
- int j = 0;
- unsigned long src, dest;
- unsigned int srclen, destlen, copylen;
- unsigned int total_copied = 0;
- unsigned long bounce_addr = 0;
- unsigned long src_addr = 0;
- unsigned long flags;
-
- local_irq_save(flags);
-
- for (i = 0; i < orig_sgl_count; i++) {
- src_addr = (unsigned long)kmap_atomic(sg_page((&orig_sgl[i])),
- KM_IRQ0) + orig_sgl[i].offset;
- src = src_addr;
- srclen = orig_sgl[i].length;
-
- if (bounce_addr == 0)
- bounce_addr =
- (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])),
- KM_IRQ0);
-
- while (srclen) {
- /* assume bounce offset always == 0 */
- dest = bounce_addr + bounce_sgl[j].length;
- destlen = PAGE_SIZE - bounce_sgl[j].length;
-
- copylen = min(srclen, destlen);
- memcpy((void *)dest, (void *)src, copylen);
-
- total_copied += copylen;
- bounce_sgl[j].length += copylen;
- srclen -= copylen;
- src += copylen;
-
- if (bounce_sgl[j].length == PAGE_SIZE) {
- /* full..move to next entry */
- kunmap_atomic((void *)bounce_addr, KM_IRQ0);
- j++;
-
- /* if we need to use another bounce buffer */
- if (srclen || i != orig_sgl_count - 1)
- bounce_addr =
- (unsigned long)kmap_atomic(
- sg_page((&bounce_sgl[j])), KM_IRQ0);
-
- } else if (srclen == 0 && i == orig_sgl_count - 1) {
- /* unmap the last bounce that is < PAGE_SIZE */
- kunmap_atomic((void *)bounce_addr, KM_IRQ0);
- }
- }
-
- kunmap_atomic((void *)(src_addr - orig_sgl[i].offset), KM_IRQ0);
- }
-
- local_irq_restore(flags);
-
- return total_copied;
-}
-
-
-static int storvsc_remove(struct hv_device *dev)
-{
- struct storvsc_device *stor_device = hv_get_drvdata(dev);
- struct Scsi_Host *host = stor_device->host;
-
- scsi_remove_host(host);
-
- scsi_host_put(host);
-
- storvsc_dev_remove(dev);
-
- return 0;
-}
-
-
-static int storvsc_get_chs(struct scsi_device *sdev, struct block_device * bdev,
- sector_t capacity, int *info)
-{
- sector_t nsect = capacity;
- sector_t cylinders = nsect;
- int heads, sectors_pt;
-
- /*
- * We are making up these values; let us keep it simple.
- */
- heads = 0xff;
- sectors_pt = 0x3f; /* Sectors per track */
- sector_div(cylinders, heads * sectors_pt);
- if ((sector_t)(cylinders + 1) * heads * sectors_pt < nsect)
- cylinders = 0xffff;
-
- info[0] = heads;
- info[1] = sectors_pt;
- info[2] = (int)cylinders;
-
- return 0;
-}
-
-static int storvsc_host_reset(struct hv_device *device)
-{
- struct storvsc_device *stor_device;
- struct hv_storvsc_request *request;
- struct vstor_packet *vstor_packet;
- int ret, t;
-
-
- stor_device = get_out_stor_device(device);
- if (!stor_device)
- return FAILED;
-
- request = &stor_device->reset_request;
- vstor_packet = &request->vstor_packet;
-
- init_completion(&request->wait_event);
-
- vstor_packet->operation = VSTOR_OPERATION_RESET_BUS;
- vstor_packet->flags = REQUEST_COMPLETION_FLAG;
- vstor_packet->vm_srb.path_id = stor_device->path_id;
-
- ret = vmbus_sendpacket(device->channel, vstor_packet,
- sizeof(struct vstor_packet),
- (unsigned long)&stor_device->reset_request,
- VM_PKT_DATA_INBAND,
- VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
- if (ret != 0)
- return FAILED;
-
- t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
- if (t == 0)
- return TIMEOUT_ERROR;
-
-
- /*
- * At this point, all outstanding requests in the adapter
- * should have been flushed out and return to us
- */
-
- return SUCCESS;
-}
-
-
-/*
- * storvsc_host_reset_handler - Reset the scsi HBA
- */
-static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd)
-{
- struct hv_host_device *host_dev = shost_priv(scmnd->device->host);
- struct hv_device *dev = host_dev->dev;
-
- return storvsc_host_reset(dev);
-}
-
-
-/*
- * storvsc_command_completion - Command completion processing
- */
-static void storvsc_command_completion(struct hv_storvsc_request *request)
-{
- struct storvsc_cmd_request *cmd_request =
- (struct storvsc_cmd_request *)request->context;
- struct scsi_cmnd *scmnd = cmd_request->cmd;
- struct hv_host_device *host_dev = shost_priv(scmnd->device->host);
- void (*scsi_done_fn)(struct scsi_cmnd *);
- struct scsi_sense_hdr sense_hdr;
- struct vmscsi_request *vm_srb;
- struct storvsc_scan_work *wrk;
- struct stor_mem_pools *memp = scmnd->device->hostdata;
-
- vm_srb = &request->vstor_packet.vm_srb;
- if (cmd_request->bounce_sgl_count) {
- if (vm_srb->data_in == READ_TYPE)
- copy_from_bounce_buffer(scsi_sglist(scmnd),
- cmd_request->bounce_sgl,
- scsi_sg_count(scmnd),
- cmd_request->bounce_sgl_count);
- destroy_bounce_buffer(cmd_request->bounce_sgl,
- cmd_request->bounce_sgl_count);
- }
-
- /*
- * If there is an error; offline the device since all
- * error recovery strategies would have already been
- * deployed on the host side.
- */
- if (vm_srb->srb_status == 0x4)
- scmnd->result = DID_TARGET_FAILURE << 16;
- else
- scmnd->result = vm_srb->scsi_status;
-
- /*
- * If the LUN is invalid; remove the device.
- */
- if (vm_srb->srb_status == 0x20) {
- struct storvsc_device *stor_dev;
- struct hv_device *dev = host_dev->dev;
- struct Scsi_Host *host;
-
- stor_dev = get_in_stor_device(dev);
- host = stor_dev->host;
-
- wrk = kmalloc(sizeof(struct storvsc_scan_work),
- GFP_ATOMIC);
- if (!wrk) {
- scmnd->result = DID_TARGET_FAILURE << 16;
- } else {
- wrk->host = host;
- wrk->lun = vm_srb->lun;
- INIT_WORK(&wrk->work, storvsc_remove_lun);
- schedule_work(&wrk->work);
- }
- }
-
- if (scmnd->result) {
- if (scsi_normalize_sense(scmnd->sense_buffer,
- SCSI_SENSE_BUFFERSIZE, &sense_hdr))
- scsi_print_sense_hdr("storvsc", &sense_hdr);
- }
-
- scsi_set_resid(scmnd,
- request->data_buffer.len -
- vm_srb->data_transfer_length);
-
- scsi_done_fn = scmnd->scsi_done;
-
- scmnd->host_scribble = NULL;
- scmnd->scsi_done = NULL;
-
- scsi_done_fn(scmnd);
-
- mempool_free(cmd_request, memp->request_mempool);
-}
-
-static bool storvsc_check_scsi_cmd(struct scsi_cmnd *scmnd)
-{
- bool allowed = true;
- u8 scsi_op = scmnd->cmnd[0];
-
- switch (scsi_op) {
- /* smartd sends this command, which will offline the device */
- case SET_WINDOW:
- scmnd->result = ILLEGAL_REQUEST << 16;
- allowed = false;
- break;
- default:
- break;
- }
- return allowed;
-}
-
-/*
- * storvsc_queuecommand - Initiate command processing
- */
-static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
-{
- int ret;
- struct hv_host_device *host_dev = shost_priv(host);
- struct hv_device *dev = host_dev->dev;
- struct hv_storvsc_request *request;
- struct storvsc_cmd_request *cmd_request;
- unsigned int request_size = 0;
- int i;
- struct scatterlist *sgl;
- unsigned int sg_count = 0;
- struct vmscsi_request *vm_srb;
- struct stor_mem_pools *memp = scmnd->device->hostdata;
-
- if (storvsc_check_scsi_cmd(scmnd) == false) {
- scmnd->scsi_done(scmnd);
- return 0;
- }
-
- /* If retrying, no need to prep the cmd */
- if (scmnd->host_scribble) {
-
- cmd_request =
- (struct storvsc_cmd_request *)scmnd->host_scribble;
-
- goto retry_request;
- }
-
- request_size = sizeof(struct storvsc_cmd_request);
-
- cmd_request = mempool_alloc(memp->request_mempool,
- GFP_ATOMIC);
- if (!cmd_request)
- return SCSI_MLQUEUE_DEVICE_BUSY;
-
- memset(cmd_request, 0, sizeof(struct storvsc_cmd_request));
-
- /* Setup the cmd request */
- cmd_request->bounce_sgl_count = 0;
- cmd_request->bounce_sgl = NULL;
- cmd_request->cmd = scmnd;
-
- scmnd->host_scribble = (unsigned char *)cmd_request;
-
- request = &cmd_request->request;
- vm_srb = &request->vstor_packet.vm_srb;
-
-
- /* Build the SRB */
- switch (scmnd->sc_data_direction) {
- case DMA_TO_DEVICE:
- vm_srb->data_in = WRITE_TYPE;
- break;
- case DMA_FROM_DEVICE:
- vm_srb->data_in = READ_TYPE;
- break;
- default:
- vm_srb->data_in = UNKNOWN_TYPE;
- break;
- }
-
- request->on_io_completion = storvsc_command_completion;
- request->context = cmd_request;/* scmnd; */
-
- vm_srb->port_number = host_dev->port;
- vm_srb->path_id = scmnd->device->channel;
- vm_srb->target_id = scmnd->device->id;
- vm_srb->lun = scmnd->device->lun;
-
- vm_srb->cdb_length = scmnd->cmd_len;
-
- memcpy(vm_srb->cdb, scmnd->cmnd, vm_srb->cdb_length);
-
- request->sense_buffer = scmnd->sense_buffer;
-
-
- request->data_buffer.len = scsi_bufflen(scmnd);
- if (scsi_sg_count(scmnd)) {
- sgl = (struct scatterlist *)scsi_sglist(scmnd);
- sg_count = scsi_sg_count(scmnd);
-
- /* check if we need to bounce the sgl */
- if (do_bounce_buffer(sgl, scsi_sg_count(scmnd)) != -1) {
- cmd_request->bounce_sgl =
- create_bounce_buffer(sgl, scsi_sg_count(scmnd),
- scsi_bufflen(scmnd),
- vm_srb->data_in);
- if (!cmd_request->bounce_sgl) {
- scmnd->host_scribble = NULL;
- mempool_free(cmd_request,
- memp->request_mempool);
-
- return SCSI_MLQUEUE_HOST_BUSY;
- }
-
- cmd_request->bounce_sgl_count =
- ALIGN(scsi_bufflen(scmnd), PAGE_SIZE) >>
- PAGE_SHIFT;
-
- if (vm_srb->data_in == WRITE_TYPE)
- copy_to_bounce_buffer(sgl,
- cmd_request->bounce_sgl,
- scsi_sg_count(scmnd));
-
- sgl = cmd_request->bounce_sgl;
- sg_count = cmd_request->bounce_sgl_count;
- }
-
- request->data_buffer.offset = sgl[0].offset;
-
- for (i = 0; i < sg_count; i++)
- request->data_buffer.pfn_array[i] =
- page_to_pfn(sg_page((&sgl[i])));
-
- } else if (scsi_sglist(scmnd)) {
- request->data_buffer.offset =
- virt_to_phys(scsi_sglist(scmnd)) & (PAGE_SIZE-1);
- request->data_buffer.pfn_array[0] =
- virt_to_phys(scsi_sglist(scmnd)) >> PAGE_SHIFT;
- }
-
-retry_request:
- /* Invokes the vsc to start an IO */
- ret = storvsc_do_io(dev, &cmd_request->request);
-
- if (ret == -EAGAIN) {
- /* no more space */
-
- if (cmd_request->bounce_sgl_count)
- destroy_bounce_buffer(cmd_request->bounce_sgl,
- cmd_request->bounce_sgl_count);
-
- mempool_free(cmd_request, memp->request_mempool);
-
- scmnd->host_scribble = NULL;
-
- ret = SCSI_MLQUEUE_DEVICE_BUSY;
- }
-
- return ret;
-}
-
-/* Scsi driver */
-static struct scsi_host_template scsi_driver = {
- .module = THIS_MODULE,
- .name = "storvsc_host_t",
- .bios_param = storvsc_get_chs,
- .queuecommand = storvsc_queuecommand,
- .eh_host_reset_handler = storvsc_host_reset_handler,
- .slave_alloc = storvsc_device_alloc,
- .slave_destroy = storvsc_device_destroy,
- .slave_configure = storvsc_device_configure,
- .cmd_per_lun = 1,
- /* 64 max_queue * 1 target */
- .can_queue = STORVSC_MAX_IO_REQUESTS*STORVSC_MAX_TARGETS,
- .this_id = -1,
- /* no use setting to 0 since ll_blk_rw reset it to 1 */
- /* currently 32 */
- .sg_tablesize = MAX_MULTIPAGE_BUFFER_COUNT,
- .use_clustering = DISABLE_CLUSTERING,
- /* Make sure we dont get a sg segment crosses a page boundary */
- .dma_boundary = PAGE_SIZE-1,
-};
-
-enum {
- SCSI_GUID,
- IDE_GUID,
-};
-
-static const struct hv_vmbus_device_id id_table[] = {
- /* SCSI guid */
- { VMBUS_DEVICE(0xd9, 0x63, 0x61, 0xba, 0xa1, 0x04, 0x29, 0x4d,
- 0xb6, 0x05, 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f)
- .driver_data = SCSI_GUID },
- /* IDE guid */
- { VMBUS_DEVICE(0x32, 0x26, 0x41, 0x32, 0xcb, 0x86, 0xa2, 0x44,
- 0x9b, 0x5c, 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5)
- .driver_data = IDE_GUID },
- { },
-};
-
-MODULE_DEVICE_TABLE(vmbus, id_table);
-
-
-/*
- * storvsc_probe - Add a new device for this driver
- */
-
-static int storvsc_probe(struct hv_device *device,
- const struct hv_vmbus_device_id *dev_id)
-{
- int ret;
- struct Scsi_Host *host;
- struct hv_host_device *host_dev;
- bool dev_is_ide = ((dev_id->driver_data == IDE_GUID) ? true : false);
- int path = 0;
- int target = 0;
- struct storvsc_device *stor_device;
-
- host = scsi_host_alloc(&scsi_driver,
- sizeof(struct hv_host_device));
- if (!host)
- return -ENOMEM;
-
- host_dev = shost_priv(host);
- memset(host_dev, 0, sizeof(struct hv_host_device));
-
- host_dev->port = host->host_no;
- host_dev->dev = device;
-
-
- stor_device = kzalloc(sizeof(struct storvsc_device), GFP_KERNEL);
- if (!stor_device) {
- ret = -ENOMEM;
- goto err_out0;
- }
-
- stor_device->destroy = false;
- init_waitqueue_head(&stor_device->waiting_to_drain);
- stor_device->device = device;
- stor_device->host = host;
- hv_set_drvdata(device, stor_device);
-
- stor_device->port_number = host->host_no;
- ret = storvsc_connect_to_vsp(device, storvsc_ringbuffer_size);
- if (ret)
- goto err_out1;
-
- if (dev_is_ide)
- storvsc_get_ide_info(device, &target, &path);
-
- host_dev->path = stor_device->path_id;
- host_dev->target = stor_device->target_id;
-
- /* max # of devices per target */
- host->max_lun = STORVSC_MAX_LUNS_PER_TARGET;
- /* max # of targets per channel */
- host->max_id = STORVSC_MAX_TARGETS;
- /* max # of channels */
- host->max_channel = STORVSC_MAX_CHANNELS - 1;
- /* max cmd length */
- host->max_cmd_len = STORVSC_MAX_CMD_LEN;
-
- /* Register the HBA and start the scsi bus scan */
- ret = scsi_add_host(host, &device->device);
- if (ret != 0)
- goto err_out2;
-
- if (!dev_is_ide) {
- scsi_scan_host(host);
- return 0;
- }
- ret = scsi_add_device(host, 0, target, 0);
- if (ret) {
- scsi_remove_host(host);
- goto err_out2;
- }
- return 0;
-
-err_out2:
- /*
- * Once we have connected with the host, we would need to
- * to invoke storvsc_dev_remove() to rollback this state and
- * this call also frees up the stor_device; hence the jump around
- * err_out1 label.
- */
- storvsc_dev_remove(device);
- goto err_out0;
-
-err_out1:
- kfree(stor_device);
-
-err_out0:
- scsi_host_put(host);
- return ret;
-}
-
-/* The one and only one */
-
-static struct hv_driver storvsc_drv = {
- .name = KBUILD_MODNAME,
- .id_table = id_table,
- .probe = storvsc_probe,
- .remove = storvsc_remove,
-};
-
-static int __init storvsc_drv_init(void)
-{
- u32 max_outstanding_req_per_channel;
-
- /*
- * Divide the ring buffer data size (which is 1 page less
- * than the ring buffer size since that page is reserved for
- * the ring buffer indices) by the max request size (which is
- * vmbus_channel_packet_multipage_buffer + struct vstor_packet + u64)
- */
- max_outstanding_req_per_channel =
- ((storvsc_ringbuffer_size - PAGE_SIZE) /
- ALIGN(MAX_MULTIPAGE_BUFFER_PACKET +
- sizeof(struct vstor_packet) + sizeof(u64),
- sizeof(u64)));
-
- if (max_outstanding_req_per_channel <
- STORVSC_MAX_IO_REQUESTS)
- return -EINVAL;
-
- return vmbus_driver_register(&storvsc_drv);
-}
-
-static void __exit storvsc_drv_exit(void)
-{
- vmbus_driver_unregister(&storvsc_drv);
-}
-
-MODULE_LICENSE("GPL");
-MODULE_VERSION(HV_DRV_VERSION);
-MODULE_DESCRIPTION("Microsoft Hyper-V virtual storage driver");
-module_init(storvsc_drv_init);
-module_exit(storvsc_drv_exit);
diff --git a/drivers/staging/iio/Documentation/device.txt b/drivers/staging/iio/Documentation/device.txt
index 1abb80cb884e..8926f2448cc9 100644
--- a/drivers/staging/iio/Documentation/device.txt
+++ b/drivers/staging/iio/Documentation/device.txt
@@ -62,7 +62,7 @@ Then fill in the following:
An optional associated buffer.
- indio_dev->pollfunc:
Poll function related elements. This controls what occurs when a trigger
- to which this device is attached sends and event.
+ to which this device is attached sends an event.
- indio_dev->channels:
Specification of device channels. Most attributes etc are built
form this spec.
diff --git a/drivers/staging/iio/Documentation/iio_event_monitor.c b/drivers/staging/iio/Documentation/iio_event_monitor.c
new file mode 100644
index 000000000000..0d21a277305f
--- /dev/null
+++ b/drivers/staging/iio/Documentation/iio_event_monitor.c
@@ -0,0 +1,241 @@
+/* Industrialio event test code.
+ *
+ * Copyright (c) 2011-2012 Lars-Peter Clausen <lars@metafoo.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is primarily intended as an example application.
+ * Reads the current buffer setup from sysfs and starts a short capture
+ * from the specified device, pretty printing the result after appropriate
+ * conversion.
+ *
+ * Usage:
+ * iio_event_monitor <device_name>
+ *
+ */
+
+#define _GNU_SOURCE
+
+#include <unistd.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <errno.h>
+#include <string.h>
+#include <poll.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include "iio_utils.h"
+#include "../events.h"
+
+static const char * const iio_chan_type_name_spec[] = {
+ [IIO_VOLTAGE] = "voltage",
+ [IIO_CURRENT] = "current",
+ [IIO_POWER] = "power",
+ [IIO_ACCEL] = "accel",
+ [IIO_ANGL_VEL] = "anglvel",
+ [IIO_MAGN] = "magn",
+ [IIO_LIGHT] = "illuminance",
+ [IIO_INTENSITY] = "intensity",
+ [IIO_PROXIMITY] = "proximity",
+ [IIO_TEMP] = "temp",
+ [IIO_INCLI] = "incli",
+ [IIO_ROT] = "rot",
+ [IIO_ANGL] = "angl",
+ [IIO_TIMESTAMP] = "timestamp",
+ [IIO_CAPACITANCE] = "capacitance",
+};
+
+static const char * const iio_ev_type_text[] = {
+ [IIO_EV_TYPE_THRESH] = "thresh",
+ [IIO_EV_TYPE_MAG] = "mag",
+ [IIO_EV_TYPE_ROC] = "roc",
+ [IIO_EV_TYPE_THRESH_ADAPTIVE] = "thresh_adaptive",
+ [IIO_EV_TYPE_MAG_ADAPTIVE] = "mag_adaptive",
+};
+
+static const char * const iio_ev_dir_text[] = {
+ [IIO_EV_DIR_EITHER] = "either",
+ [IIO_EV_DIR_RISING] = "rising",
+ [IIO_EV_DIR_FALLING] = "falling"
+};
+
+static const char * const iio_modifier_names[] = {
+ [IIO_MOD_X] = "x",
+ [IIO_MOD_Y] = "y",
+ [IIO_MOD_Z] = "z",
+ [IIO_MOD_LIGHT_BOTH] = "both",
+ [IIO_MOD_LIGHT_IR] = "ir",
+};
+
+static bool event_is_known(struct iio_event_data *event)
+{
+ enum iio_chan_type type = IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(event->id);
+ enum iio_modifier mod = IIO_EVENT_CODE_EXTRACT_MODIFIER(event->id);
+ enum iio_event_type ev_type = IIO_EVENT_CODE_EXTRACT_TYPE(event->id);
+ enum iio_event_direction dir = IIO_EVENT_CODE_EXTRACT_DIR(event->id);
+
+ switch (type) {
+ case IIO_VOLTAGE:
+ case IIO_CURRENT:
+ case IIO_POWER:
+ case IIO_ACCEL:
+ case IIO_ANGL_VEL:
+ case IIO_MAGN:
+ case IIO_LIGHT:
+ case IIO_INTENSITY:
+ case IIO_PROXIMITY:
+ case IIO_TEMP:
+ case IIO_INCLI:
+ case IIO_ROT:
+ case IIO_ANGL:
+ case IIO_TIMESTAMP:
+ case IIO_CAPACITANCE:
+ break;
+ default:
+ return false;
+ }
+
+ switch (mod) {
+ case IIO_NO_MOD:
+ case IIO_MOD_X:
+ case IIO_MOD_Y:
+ case IIO_MOD_Z:
+ case IIO_MOD_LIGHT_BOTH:
+ case IIO_MOD_LIGHT_IR:
+ break;
+ default:
+ return false;
+ }
+
+ switch (ev_type) {
+ case IIO_EV_TYPE_THRESH:
+ case IIO_EV_TYPE_MAG:
+ case IIO_EV_TYPE_ROC:
+ case IIO_EV_TYPE_THRESH_ADAPTIVE:
+ case IIO_EV_TYPE_MAG_ADAPTIVE:
+ break;
+ default:
+ return false;
+ }
+
+ switch (dir) {
+ case IIO_EV_DIR_EITHER:
+ case IIO_EV_DIR_RISING:
+ case IIO_EV_DIR_FALLING:
+ break;
+ default:
+ return false;
+ }
+
+ return true;
+}
+
+static void print_event(struct iio_event_data *event)
+{
+ enum iio_chan_type type = IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(event->id);
+ enum iio_modifier mod = IIO_EVENT_CODE_EXTRACT_MODIFIER(event->id);
+ enum iio_event_type ev_type = IIO_EVENT_CODE_EXTRACT_TYPE(event->id);
+ enum iio_event_direction dir = IIO_EVENT_CODE_EXTRACT_DIR(event->id);
+ int chan = IIO_EVENT_CODE_EXTRACT_CHAN(event->id);
+ int chan2 = IIO_EVENT_CODE_EXTRACT_CHAN2(event->id);
+ bool diff = IIO_EVENT_CODE_EXTRACT_DIFF(event->id);
+
+ if (!event_is_known(event)) {
+ printf("Unknown event: time: %lld, id: %llx\n",
+ event->timestamp, event->id);
+ return;
+ }
+
+ printf("Event: time: %lld, ", event->timestamp);
+
+ if (mod != IIO_NO_MOD) {
+ printf("type: %s(%s), ",
+ iio_chan_type_name_spec[type],
+ iio_modifier_names[mod]);
+ } else {
+ printf("type: %s, ",
+ iio_chan_type_name_spec[type]);
+ }
+
+ if (diff && chan >= 0 && chan2 >= 0)
+ printf("channel: %d-%d, ", chan, chan2);
+ else if (chan >= 0)
+ printf("channel: %d, ", chan);
+
+ printf("evtype: %s, direction: %s\n",
+ iio_ev_type_text[ev_type],
+ iio_ev_dir_text[dir]);
+}
+
+int main(int argc, char **argv)
+{
+ struct iio_event_data event;
+ const char *device_name;
+ char *chrdev_name;
+ int ret;
+ int dev_num;
+ int fd, event_fd;
+
+ if (argc <= 1) {
+ printf("Usage: %s <device_name>\n", argv[0]);
+ return -1;
+ }
+
+ device_name = argv[1];
+
+ dev_num = find_type_by_name(device_name, "iio:device");
+ if (dev_num >= 0) {
+ printf("Found IIO device with name %s with device number %d\n",
+ device_name, dev_num);
+ ret = asprintf(&chrdev_name, "/dev/iio:device%d", dev_num);
+ if (ret < 0) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ } else {
+ /* If we can't find a IIO device by name assume device_name is a
+ IIO chrdev */
+ chrdev_name = strdup(device_name);
+ }
+
+ fd = open(chrdev_name, 0);
+ if (fd == -1) {
+ fprintf(stdout, "Failed to open %s\n", chrdev_name);
+ ret = -errno;
+ goto error_free_chrdev_name;
+ }
+
+ ret = ioctl(fd, IIO_GET_EVENT_FD_IOCTL, &event_fd);
+
+ close(fd);
+
+ if (ret == -1 || event_fd == -1) {
+ fprintf(stdout, "Failed to retrieve event fd\n");
+ ret = -errno;
+ goto error_free_chrdev_name;
+ }
+
+ while (true) {
+ ret = read(event_fd, &event, sizeof(event));
+ if (ret == -1) {
+ if (errno == EAGAIN) {
+ printf("nothing available\n");
+ continue;
+ } else {
+ perror("Failed to read event from device");
+ ret = -errno;
+ break;
+ }
+ }
+
+ print_event(&event);
+ }
+
+ close(event_fd);
+error_free_chrdev_name:
+ free(chrdev_name);
+error_ret:
+ return ret;
+}
diff --git a/drivers/staging/iio/Documentation/inkernel.txt b/drivers/staging/iio/Documentation/inkernel.txt
new file mode 100644
index 000000000000..a05823e955d2
--- /dev/null
+++ b/drivers/staging/iio/Documentation/inkernel.txt
@@ -0,0 +1,58 @@
+Industrial I/O Subsystem in kernel consumers.
+
+The IIO subsystem can act as a layer under other elements of the kernel
+providing a means of obtaining ADC type readings or of driving DAC type
+signals. The functionality supported will grow as use cases arise.
+
+Describing the channel mapping (iio/machine.h)
+
+Channel associations are described using:
+
+struct iio_map {
+ const char *adc_channel_label;
+ const char *consumer_dev_name;
+ const char *consumer_channel;
+};
+
+adc_channel_label identifies the channel on the IIO device by being
+matched against the datasheet_name field of the iio_chan_spec.
+
+consumer_dev_name allows identification of the consumer device.
+This are then used to find the channel mapping from the consumer device (see
+below).
+
+Finally consumer_channel is a string identifying the channel to the consumer.
+(Perhaps 'battery_voltage' or similar).
+
+An array of these structures is then passed to the IIO driver.
+
+Supporting in kernel interfaces in the driver (driver.h)
+
+The driver must provide datasheet_name values for its channels and
+must pass the iio_map structures and a pointer to its own iio_dev structure
+ on to the core via a call to iio_map_array_register. On removal,
+iio_map_array_unregister reverses this process.
+
+The result of this is that the IIO core now has all the information needed
+to associate a given channel with the consumer requesting it.
+
+Acting as an IIO consumer (consumer.h)
+
+The consumer first has to obtain an iio_channel structure from the core
+by calling iio_channel_get(). The correct channel is identified by:
+
+* matching dev or dev_name against consumer_dev and consumer_dev_name
+* matching consumer_channel against consumer_channel in the map
+
+There are then a number of functions that can be used to get information
+about this channel such as it's current reading.
+
+e.g.
+iio_st_read_channel_raw() - get a reading
+iio_st_read_channel_type() - get the type of channel
+
+There is also provision for retrieving all of the channels associated
+with a given consumer. This is useful for generic drivers such as
+iio_hwmon where the number and naming of channels is not known by the
+consumer driver. To do this, use iio_st_channel_get_all.
+
diff --git a/drivers/staging/iio/Kconfig b/drivers/staging/iio/Kconfig
index 90162aa8b2df..fe1586718880 100644
--- a/drivers/staging/iio/Kconfig
+++ b/drivers/staging/iio/Kconfig
@@ -11,6 +11,13 @@ menuconfig IIO
number of different physical interfaces (i2c, spi, etc). See
drivers/staging/iio/Documentation for more information.
if IIO
+config IIO_ST_HWMON
+ tristate "Hwmon driver that uses channels specified via iio maps"
+ depends on HWMON
+ help
+ This is a platform driver that in combination with a suitable
+ map allows IIO devices to provide basic hwmon functionality
+ for those channels specified in the map.
config IIO_BUFFER
bool "Enable buffer support within IIO"
@@ -79,7 +86,7 @@ config IIO_SIMPLE_DUMMY
help
Driver intended mainly as documentation for how to write
a driver. May also be useful for testing userspace code
- without hardward.
+ without hardware.
if IIO_SIMPLE_DUMMY
diff --git a/drivers/staging/iio/Makefile b/drivers/staging/iio/Makefile
index 1340aead18b4..5075291dda7a 100644
--- a/drivers/staging/iio/Makefile
+++ b/drivers/staging/iio/Makefile
@@ -3,7 +3,7 @@
#
obj-$(CONFIG_IIO) += industrialio.o
-industrialio-y := industrialio-core.o
+industrialio-y := industrialio-core.o industrialio-event.o inkern.o
industrialio-$(CONFIG_IIO_BUFFER) += industrialio-buffer.o
industrialio-$(CONFIG_IIO_TRIGGER) += industrialio-trigger.o
@@ -17,6 +17,8 @@ iio_dummy-$(CONFIG_IIO_SIMPLE_DUMMY_BUFFER) += iio_simple_dummy_buffer.o
obj-$(CONFIG_IIO_DUMMY_EVGEN) += iio_dummy_evgen.o
+obj-$(CONFIG_IIO_ST_HWMON) += iio_hwmon.o
+
obj-y += accel/
obj-y += adc/
obj-y += addac/
diff --git a/drivers/staging/iio/accel/adis16201_ring.c b/drivers/staging/iio/accel/adis16201_ring.c
index 26c610faee3f..97f9e6b159d9 100644
--- a/drivers/staging/iio/accel/adis16201_ring.c
+++ b/drivers/staging/iio/accel/adis16201_ring.c
@@ -115,9 +115,7 @@ int adis16201_configure_ring(struct iio_dev *indio_dev)
return ret;
}
indio_dev->buffer = ring;
- /* Effectively select the ring buffer implementation */
ring->scan_timestamp = true;
- ring->access = &ring_sw_access_funcs;
indio_dev->setup_ops = &adis16201_ring_setup_ops;
indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
diff --git a/drivers/staging/iio/accel/adis16203_ring.c b/drivers/staging/iio/accel/adis16203_ring.c
index 064640d15e41..6a8963db4f60 100644
--- a/drivers/staging/iio/accel/adis16203_ring.c
+++ b/drivers/staging/iio/accel/adis16203_ring.c
@@ -117,9 +117,7 @@ int adis16203_configure_ring(struct iio_dev *indio_dev)
return ret;
}
indio_dev->buffer = ring;
- /* Effectively select the ring buffer implementation */
ring->scan_timestamp = true;
- ring->access = &ring_sw_access_funcs;
indio_dev->setup_ops = &adis16203_ring_setup_ops;
indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
diff --git a/drivers/staging/iio/accel/adis16204_ring.c b/drivers/staging/iio/accel/adis16204_ring.c
index 4081179dfa5c..5c8ab7338864 100644
--- a/drivers/staging/iio/accel/adis16204_ring.c
+++ b/drivers/staging/iio/accel/adis16204_ring.c
@@ -112,8 +112,6 @@ int adis16204_configure_ring(struct iio_dev *indio_dev)
return ret;
}
indio_dev->buffer = ring;
- /* Effectively select the ring buffer implementation */
- ring->access = &ring_sw_access_funcs;
ring->scan_timestamp = true;
indio_dev->setup_ops = &adis16204_ring_setup_ops;
diff --git a/drivers/staging/iio/accel/adis16209_ring.c b/drivers/staging/iio/accel/adis16209_ring.c
index 2a6fd334f5f1..57254b6b38b7 100644
--- a/drivers/staging/iio/accel/adis16209_ring.c
+++ b/drivers/staging/iio/accel/adis16209_ring.c
@@ -113,8 +113,6 @@ int adis16209_configure_ring(struct iio_dev *indio_dev)
return ret;
}
indio_dev->buffer = ring;
- /* Effectively select the ring buffer implementation */
- ring->access = &ring_sw_access_funcs;
ring->scan_timestamp = true;
indio_dev->setup_ops = &adis16209_ring_setup_ops;
diff --git a/drivers/staging/iio/accel/adis16240_ring.c b/drivers/staging/iio/accel/adis16240_ring.c
index e23622d96f9f..43ba84e993ad 100644
--- a/drivers/staging/iio/accel/adis16240_ring.c
+++ b/drivers/staging/iio/accel/adis16240_ring.c
@@ -110,8 +110,6 @@ int adis16240_configure_ring(struct iio_dev *indio_dev)
return ret;
}
indio_dev->buffer = ring;
- /* Effectively select the ring buffer implementation */
- ring->access = &ring_sw_access_funcs;
ring->scan_timestamp = true;
indio_dev->setup_ops = &adis16240_ring_setup_ops;
diff --git a/drivers/staging/iio/accel/lis3l02dq.h b/drivers/staging/iio/accel/lis3l02dq.h
index 2db383fc2743..ae5f225b4bb2 100644
--- a/drivers/staging/iio/accel/lis3l02dq.h
+++ b/drivers/staging/iio/accel/lis3l02dq.h
@@ -187,12 +187,10 @@ void lis3l02dq_unconfigure_buffer(struct iio_dev *indio_dev);
#ifdef CONFIG_LIS3L02DQ_BUF_RING_SW
#define lis3l02dq_free_buf iio_sw_rb_free
#define lis3l02dq_alloc_buf iio_sw_rb_allocate
-#define lis3l02dq_access_funcs ring_sw_access_funcs
#endif
#ifdef CONFIG_LIS3L02DQ_BUF_KFIFO
#define lis3l02dq_free_buf iio_kfifo_free
#define lis3l02dq_alloc_buf iio_kfifo_allocate
-#define lis3l02dq_access_funcs kfifo_access_funcs
#endif
irqreturn_t lis3l02dq_data_rdy_trig_poll(int irq, void *private);
#define lis3l02dq_th lis3l02dq_data_rdy_trig_poll
diff --git a/drivers/staging/iio/accel/lis3l02dq_ring.c b/drivers/staging/iio/accel/lis3l02dq_ring.c
index 98c5c92d3450..0fc3973f32ae 100644
--- a/drivers/staging/iio/accel/lis3l02dq_ring.c
+++ b/drivers/staging/iio/accel/lis3l02dq_ring.c
@@ -239,7 +239,7 @@ static int lis3l02dq_data_rdy_trigger_set_state(struct iio_trigger *trig,
__lis3l02dq_write_data_ready_config(&indio_dev->dev, state);
if (state == false) {
/*
- * A possible quirk with teh handler is currently worked around
+ * A possible quirk with the handler is currently worked around
* by ensuring outstanding read events are cleared.
*/
ret = lis3l02dq_read_all(indio_dev, NULL);
@@ -406,8 +406,6 @@ int lis3l02dq_configure_buffer(struct iio_dev *indio_dev)
return -ENOMEM;
indio_dev->buffer = buffer;
- /* Effectively select the buffer implementation */
- indio_dev->buffer->access = &lis3l02dq_access_funcs;
buffer->scan_timestamp = true;
indio_dev->setup_ops = &lis3l02dq_buffer_setup_ops;
diff --git a/drivers/staging/iio/accel/sca3000.h b/drivers/staging/iio/accel/sca3000.h
index ad38dd955cd4..131daac90012 100644
--- a/drivers/staging/iio/accel/sca3000.h
+++ b/drivers/staging/iio/accel/sca3000.h
@@ -136,7 +136,7 @@
#define SCA3000_INT_MASK_ACTIVE_HIGH 0x01
#define SCA3000_INT_MASK_ACTIVE_LOW 0x00
-/* Values of mulipexed registers (write to ctrl_data after select) */
+/* Values of multiplexed registers (write to ctrl_data after select) */
#define SCA3000_REG_ADDR_CTRL_DATA 0x22
/* Measurement modes available on some sca3000 series chips. Code assumes others
diff --git a/drivers/staging/iio/adc/Kconfig b/drivers/staging/iio/adc/Kconfig
index d9decea4fa62..592eabd85f36 100644
--- a/drivers/staging/iio/adc/Kconfig
+++ b/drivers/staging/iio/adc/Kconfig
@@ -193,4 +193,13 @@ config MAX1363_RING_BUFFER
Say yes here to include ring buffer support in the MAX1363
ADC driver.
+config LPC32XX_ADC
+ tristate "NXP LPC32XX ADC"
+ depends on ARCH_LPC32XX && !TOUCHSCREEN_LPC32XX
+ help
+ Say yes here to build support for the integrated ADC inside the
+ LPC32XX SoC. Note that this feature uses the same hardware as the
+ touchscreen driver, so you can only select one of the two drivers
+ (lpc32xx_adc or lpc32xx_ts). Provides direct access via sysfs.
+
endmenu
diff --git a/drivers/staging/iio/adc/Makefile b/drivers/staging/iio/adc/Makefile
index ceee7f3c3061..f83ab9551d8e 100644
--- a/drivers/staging/iio/adc/Makefile
+++ b/drivers/staging/iio/adc/Makefile
@@ -37,3 +37,4 @@ obj-$(CONFIG_AD7192) += ad7192.o
obj-$(CONFIG_ADT7310) += adt7310.o
obj-$(CONFIG_ADT7410) += adt7410.o
obj-$(CONFIG_AD7280) += ad7280a.o
+obj-$(CONFIG_LPC32XX_ADC) += lpc32xx_adc.o
diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c
index 45f4504ed927..9fd6d63d2999 100644
--- a/drivers/staging/iio/adc/ad7192.c
+++ b/drivers/staging/iio/adc/ad7192.c
@@ -561,8 +561,6 @@ static int ad7192_register_ring_funcs_and_init(struct iio_dev *indio_dev)
ret = -ENOMEM;
goto error_ret;
}
- /* Effectively select the ring buffer implementation */
- indio_dev->buffer->access = &ring_sw_access_funcs;
indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
&ad7192_trigger_handler,
IRQF_ONESHOT,
@@ -824,25 +822,20 @@ static struct attribute *ad7192_attributes[] = {
NULL
};
-static umode_t ad7192_attr_is_visible(struct kobject *kobj,
- struct attribute *attr, int n)
-{
- struct device *dev = container_of(kobj, struct device, kobj);
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ad7192_state *st = iio_priv(indio_dev);
-
- umode_t mode = attr->mode;
-
- if ((st->devid != ID_AD7195) &&
- (attr == &iio_dev_attr_ac_excitation_en.dev_attr.attr))
- mode = 0;
-
- return mode;
-}
-
static const struct attribute_group ad7192_attribute_group = {
.attrs = ad7192_attributes,
- .is_visible = ad7192_attr_is_visible,
+};
+
+static struct attribute *ad7195_attributes[] = {
+ &iio_dev_attr_sampling_frequency.dev_attr.attr,
+ &iio_dev_attr_in_v_m_v_scale_available.dev_attr.attr,
+ &iio_dev_attr_in_voltage_scale_available.dev_attr.attr,
+ &iio_dev_attr_bridge_switch_en.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group ad7195_attribute_group = {
+ .attrs = ad7195_attributes,
};
static int ad7192_read_raw(struct iio_dev *indio_dev,
@@ -972,6 +965,15 @@ static const struct iio_info ad7192_info = {
.driver_module = THIS_MODULE,
};
+static const struct iio_info ad7195_info = {
+ .read_raw = &ad7192_read_raw,
+ .write_raw = &ad7192_write_raw,
+ .write_raw_get_fmt = &ad7192_write_raw_get_fmt,
+ .attrs = &ad7195_attribute_group,
+ .validate_trigger = ad7192_validate_trigger,
+ .driver_module = THIS_MODULE,
+};
+
#define AD7192_CHAN_DIFF(_chan, _chan2, _name, _address, _si) \
{ .type = IIO_VOLTAGE, \
.differential = 1, \
@@ -1064,7 +1066,10 @@ static int __devinit ad7192_probe(struct spi_device *spi)
indio_dev->channels = ad7192_channels;
indio_dev->num_channels = ARRAY_SIZE(ad7192_channels);
indio_dev->available_scan_masks = st->available_scan_masks;
- indio_dev->info = &ad7192_info;
+ if (st->devid == ID_AD7195)
+ indio_dev->info = &ad7195_info;
+ else
+ indio_dev->info = &ad7192_info;
for (i = 0; i < indio_dev->num_channels; i++)
st->available_scan_masks[i] = (1 << i) | (1 <<
diff --git a/drivers/staging/iio/adc/ad7291.c b/drivers/staging/iio/adc/ad7291.c
index 0a13616e3db9..81d6b6128cb0 100644
--- a/drivers/staging/iio/adc/ad7291.c
+++ b/drivers/staging/iio/adc/ad7291.c
@@ -321,7 +321,7 @@ static int ad7291_read_event_value(struct iio_dev *indio_dev,
switch (IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(event_code)) {
case IIO_VOLTAGE:
- reg = ad7291_limit_regs[IIO_EVENT_CODE_EXTRACT_NUM(event_code)]
+ reg = ad7291_limit_regs[IIO_EVENT_CODE_EXTRACT_CHAN(event_code)]
[!(IIO_EVENT_CODE_EXTRACT_DIR(event_code) ==
IIO_EV_DIR_RISING)];
@@ -359,7 +359,7 @@ static int ad7291_write_event_value(struct iio_dev *indio_dev,
case IIO_VOLTAGE:
if (val > AD7291_VALUE_MASK || val < 0)
return -EINVAL;
- reg = ad7291_limit_regs[IIO_EVENT_CODE_EXTRACT_NUM(event_code)]
+ reg = ad7291_limit_regs[IIO_EVENT_CODE_EXTRACT_CHAN(event_code)]
[!(IIO_EVENT_CODE_EXTRACT_DIR(event_code) ==
IIO_EV_DIR_RISING)];
return ad7291_i2c_write(chip, reg, val);
@@ -386,7 +386,7 @@ static int ad7291_read_event_config(struct iio_dev *indio_dev,
switch (IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(event_code)) {
case IIO_VOLTAGE:
if (chip->c_mask &
- (1 << (15 - IIO_EVENT_CODE_EXTRACT_NUM(event_code))))
+ (1 << (15 - IIO_EVENT_CODE_EXTRACT_CHAN(event_code))))
return 1;
else
return 0;
@@ -418,12 +418,12 @@ static int ad7291_write_event_config(struct iio_dev *indio_dev,
switch (IIO_EVENT_CODE_EXTRACT_TYPE(event_code)) {
case IIO_VOLTAGE:
if ((!state) && (chip->c_mask & (1 << (15 -
- IIO_EVENT_CODE_EXTRACT_NUM(event_code)))))
- chip->c_mask &= ~(1 << (15 - IIO_EVENT_CODE_EXTRACT_NUM
+ IIO_EVENT_CODE_EXTRACT_CHAN(event_code)))))
+ chip->c_mask &= ~(1 << (15 - IIO_EVENT_CODE_EXTRACT_CHAN
(event_code)));
else if (state && (!(chip->c_mask & (1 << (15 -
- IIO_EVENT_CODE_EXTRACT_NUM(event_code))))))
- chip->c_mask |= (1 << (15 - IIO_EVENT_CODE_EXTRACT_NUM
+ IIO_EVENT_CODE_EXTRACT_CHAN(event_code))))))
+ chip->c_mask |= (1 << (15 - IIO_EVENT_CODE_EXTRACT_CHAN
(event_code)));
else
break;
diff --git a/drivers/staging/iio/adc/ad7298_ring.c b/drivers/staging/iio/adc/ad7298_ring.c
index d1a12dd015e2..feeb0eeba59a 100644
--- a/drivers/staging/iio/adc/ad7298_ring.c
+++ b/drivers/staging/iio/adc/ad7298_ring.c
@@ -131,9 +131,6 @@ int ad7298_register_ring_funcs_and_init(struct iio_dev *indio_dev)
ret = -ENOMEM;
goto error_ret;
}
- /* Effectively select the ring buffer implementation */
- indio_dev->buffer->access = &ring_sw_access_funcs;
-
indio_dev->pollfunc = iio_alloc_pollfunc(NULL,
&ad7298_trigger_handler,
IRQF_ONESHOT,
diff --git a/drivers/staging/iio/adc/ad7476_ring.c b/drivers/staging/iio/adc/ad7476_ring.c
index 4e298b2a05b2..d6af6c05ce1c 100644
--- a/drivers/staging/iio/adc/ad7476_ring.c
+++ b/drivers/staging/iio/adc/ad7476_ring.c
@@ -23,7 +23,7 @@
/**
* ad7476_ring_preenable() setup the parameters of the ring before enabling
*
- * The complex nature of the setting of the nuber of bytes per datum is due
+ * The complex nature of the setting of the number of bytes per datum is due
* to this driver currently ensuring that the timestamp is stored at an 8
* byte boundary.
**/
@@ -98,8 +98,6 @@ int ad7476_register_ring_funcs_and_init(struct iio_dev *indio_dev)
ret = -ENOMEM;
goto error_ret;
}
- /* Effectively select the ring buffer implementation */
- indio_dev->buffer->access = &ring_sw_access_funcs;
indio_dev->pollfunc
= iio_alloc_pollfunc(NULL,
&ad7476_trigger_handler,
diff --git a/drivers/staging/iio/adc/ad7606_core.c b/drivers/staging/iio/adc/ad7606_core.c
index ddb7ef92f5c1..97e8d3d4471e 100644
--- a/drivers/staging/iio/adc/ad7606_core.c
+++ b/drivers/staging/iio/adc/ad7606_core.c
@@ -197,7 +197,7 @@ static IIO_DEVICE_ATTR(oversampling_ratio, S_IRUGO | S_IWUSR,
ad7606_store_oversampling_ratio, 0);
static IIO_CONST_ATTR(oversampling_ratio_available, "0 2 4 8 16 32 64");
-static struct attribute *ad7606_attributes[] = {
+static struct attribute *ad7606_attributes_os_and_range[] = {
&iio_dev_attr_in_voltage_range.dev_attr.attr,
&iio_const_attr_in_voltage_range_available.dev_attr.attr,
&iio_dev_attr_oversampling_ratio.dev_attr.attr,
@@ -205,34 +205,28 @@ static struct attribute *ad7606_attributes[] = {
NULL,
};
-static umode_t ad7606_attr_is_visible(struct kobject *kobj,
- struct attribute *attr, int n)
-{
- struct device *dev = container_of(kobj, struct device, kobj);
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ad7606_state *st = iio_priv(indio_dev);
+static const struct attribute_group ad7606_attribute_group_os_and_range = {
+ .attrs = ad7606_attributes_os_and_range,
+};
- umode_t mode = attr->mode;
-
- if (!(gpio_is_valid(st->pdata->gpio_os0) &&
- gpio_is_valid(st->pdata->gpio_os1) &&
- gpio_is_valid(st->pdata->gpio_os2)) &&
- (attr == &iio_dev_attr_oversampling_ratio.dev_attr.attr ||
- attr ==
- &iio_const_attr_oversampling_ratio_available.dev_attr.attr))
- mode = 0;
- else if (!gpio_is_valid(st->pdata->gpio_range) &&
- (attr == &iio_dev_attr_in_voltage_range.dev_attr.attr ||
- attr ==
- &iio_const_attr_in_voltage_range_available.dev_attr.attr))
- mode = 0;
-
- return mode;
-}
+static struct attribute *ad7606_attributes_os[] = {
+ &iio_dev_attr_oversampling_ratio.dev_attr.attr,
+ &iio_const_attr_oversampling_ratio_available.dev_attr.attr,
+ NULL,
+};
-static const struct attribute_group ad7606_attribute_group = {
- .attrs = ad7606_attributes,
- .is_visible = ad7606_attr_is_visible,
+static const struct attribute_group ad7606_attribute_group_os = {
+ .attrs = ad7606_attributes_os,
+};
+
+static struct attribute *ad7606_attributes_range[] = {
+ &iio_dev_attr_in_voltage_range.dev_attr.attr,
+ &iio_const_attr_in_voltage_range_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group ad7606_attribute_group_range = {
+ .attrs = ad7606_attributes_range,
};
#define AD7606_CHANNEL(num) \
@@ -435,10 +429,27 @@ static irqreturn_t ad7606_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
};
-static const struct iio_info ad7606_info = {
+static const struct iio_info ad7606_info_no_os_or_range = {
.driver_module = THIS_MODULE,
.read_raw = &ad7606_read_raw,
- .attrs = &ad7606_attribute_group,
+};
+
+static const struct iio_info ad7606_info_os_and_range = {
+ .driver_module = THIS_MODULE,
+ .read_raw = &ad7606_read_raw,
+ .attrs = &ad7606_attribute_group_os_and_range,
+};
+
+static const struct iio_info ad7606_info_os = {
+ .driver_module = THIS_MODULE,
+ .read_raw = &ad7606_read_raw,
+ .attrs = &ad7606_attribute_group_os,
+};
+
+static const struct iio_info ad7606_info_range = {
+ .driver_module = THIS_MODULE,
+ .read_raw = &ad7606_read_raw,
+ .attrs = &ad7606_attribute_group_range,
};
struct iio_dev *ad7606_probe(struct device *dev, int irq,
@@ -483,7 +494,19 @@ struct iio_dev *ad7606_probe(struct device *dev, int irq,
st->chip_info = &ad7606_chip_info_tbl[id];
indio_dev->dev.parent = dev;
- indio_dev->info = &ad7606_info;
+ if (gpio_is_valid(st->pdata->gpio_os0) &&
+ gpio_is_valid(st->pdata->gpio_os1) &&
+ gpio_is_valid(st->pdata->gpio_os2)) {
+ if (gpio_is_valid(st->pdata->gpio_range))
+ indio_dev->info = &ad7606_info_os_and_range;
+ else
+ indio_dev->info = &ad7606_info_os;
+ } else {
+ if (gpio_is_valid(st->pdata->gpio_range))
+ indio_dev->info = &ad7606_info_range;
+ else
+ indio_dev->info = &ad7606_info_no_os_or_range;
+ }
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->name = st->chip_info->name;
indio_dev->channels = st->chip_info->channels;
diff --git a/drivers/staging/iio/adc/ad7606_par.c b/drivers/staging/iio/adc/ad7606_par.c
index cff97568189e..bb152a8e8c92 100644
--- a/drivers/staging/iio/adc/ad7606_par.c
+++ b/drivers/staging/iio/adc/ad7606_par.c
@@ -173,18 +173,7 @@ static struct platform_driver ad7606_driver = {
},
};
-static int __init ad7606_init(void)
-{
- return platform_driver_register(&ad7606_driver);
-}
-
-static void __exit ad7606_cleanup(void)
-{
- platform_driver_unregister(&ad7606_driver);
-}
-
-module_init(ad7606_init);
-module_exit(ad7606_cleanup);
+module_platform_driver(ad7606_driver);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("Analog Devices AD7606 ADC");
diff --git a/drivers/staging/iio/adc/ad7606_ring.c b/drivers/staging/iio/adc/ad7606_ring.c
index e8f94a18a943..1ef9fbcaf2de 100644
--- a/drivers/staging/iio/adc/ad7606_ring.c
+++ b/drivers/staging/iio/adc/ad7606_ring.c
@@ -110,8 +110,6 @@ int ad7606_register_ring_funcs_and_init(struct iio_dev *indio_dev)
goto error_ret;
}
- /* Effectively select the ring buffer implementation */
- indio_dev->buffer->access = &ring_sw_access_funcs;
indio_dev->pollfunc = iio_alloc_pollfunc(&ad7606_trigger_handler_th_bh,
&ad7606_trigger_handler_th_bh,
0,
diff --git a/drivers/staging/iio/adc/ad7793.c b/drivers/staging/iio/adc/ad7793.c
index 6a058b19c49a..84ecde1ad042 100644
--- a/drivers/staging/iio/adc/ad7793.c
+++ b/drivers/staging/iio/adc/ad7793.c
@@ -427,8 +427,6 @@ static int ad7793_register_ring_funcs_and_init(struct iio_dev *indio_dev)
ret = -ENOMEM;
goto error_ret;
}
- /* Effectively select the ring buffer implementation */
- indio_dev->buffer->access = &ring_sw_access_funcs;
indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
&ad7793_trigger_handler,
IRQF_ONESHOT,
diff --git a/drivers/staging/iio/adc/ad7887_ring.c b/drivers/staging/iio/adc/ad7887_ring.c
index 85076cd962e7..d1809079b63d 100644
--- a/drivers/staging/iio/adc/ad7887_ring.c
+++ b/drivers/staging/iio/adc/ad7887_ring.c
@@ -131,8 +131,6 @@ int ad7887_register_ring_funcs_and_init(struct iio_dev *indio_dev)
ret = -ENOMEM;
goto error_ret;
}
- /* Effectively select the ring buffer implementation */
- indio_dev->buffer->access = &ring_sw_access_funcs;
indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
&ad7887_trigger_handler,
IRQF_ONESHOT,
diff --git a/drivers/staging/iio/adc/ad799x_core.c b/drivers/staging/iio/adc/ad799x_core.c
index d5b581d8bc2b..a8458669350f 100644
--- a/drivers/staging/iio/adc/ad799x_core.c
+++ b/drivers/staging/iio/adc/ad799x_core.c
@@ -256,7 +256,7 @@ static int ad799x_write_event_value(struct iio_dev *indio_dev,
struct ad799x_state *st = iio_priv(indio_dev);
int direction = !!(IIO_EVENT_CODE_EXTRACT_DIR(event_code) ==
IIO_EV_DIR_FALLING);
- int number = IIO_EVENT_CODE_EXTRACT_NUM(event_code);
+ int number = IIO_EVENT_CODE_EXTRACT_CHAN(event_code);
mutex_lock(&indio_dev->mlock);
ret = ad799x_i2c_write16(st,
@@ -275,7 +275,7 @@ static int ad799x_read_event_value(struct iio_dev *indio_dev,
struct ad799x_state *st = iio_priv(indio_dev);
int direction = !!(IIO_EVENT_CODE_EXTRACT_DIR(event_code) ==
IIO_EV_DIR_FALLING);
- int number = IIO_EVENT_CODE_EXTRACT_NUM(event_code);
+ int number = IIO_EVENT_CODE_EXTRACT_CHAN(event_code);
u16 valin;
mutex_lock(&indio_dev->mlock);
diff --git a/drivers/staging/iio/adc/ad799x_ring.c b/drivers/staging/iio/adc/ad799x_ring.c
index 5dded9e7820a..069765cab275 100644
--- a/drivers/staging/iio/adc/ad799x_ring.c
+++ b/drivers/staging/iio/adc/ad799x_ring.c
@@ -26,7 +26,7 @@
/**
* ad799x_ring_preenable() setup the parameters of the ring before enabling
*
- * The complex nature of the setting of the nuber of bytes per datum is due
+ * The complex nature of the setting of the number of bytes per datum is due
* to this driver currently ensuring that the timestamp is stored at an 8
* byte boundary.
**/
@@ -141,8 +141,6 @@ int ad799x_register_ring_funcs_and_init(struct iio_dev *indio_dev)
ret = -ENOMEM;
goto error_ret;
}
- /* Effectively select the ring buffer implementation */
- indio_dev->buffer->access = &ring_sw_access_funcs;
indio_dev->pollfunc = iio_alloc_pollfunc(NULL,
&ad799x_trigger_handler,
IRQF_ONESHOT,
diff --git a/drivers/staging/iio/adc/adt7310.c b/drivers/staging/iio/adc/adt7310.c
index eec2f325d549..caf57c1169b1 100644
--- a/drivers/staging/iio/adc/adt7310.c
+++ b/drivers/staging/iio/adc/adt7310.c
@@ -725,32 +725,19 @@ static struct attribute *adt7310_event_int_attributes[] = {
&iio_dev_attr_fault_queue.dev_attr.attr,
&iio_dev_attr_t_alarm_high.dev_attr.attr,
&iio_dev_attr_t_alarm_low.dev_attr.attr,
- &iio_dev_attr_t_hyst.dev_attr.attr,
- NULL,
-};
-
-static struct attribute *adt7310_event_ct_attributes[] = {
- &iio_dev_attr_event_mode.dev_attr.attr,
- &iio_dev_attr_available_event_modes.dev_attr.attr,
- &iio_dev_attr_fault_queue.dev_attr.attr,
&iio_dev_attr_t_crit.dev_attr.attr,
&iio_dev_attr_t_hyst.dev_attr.attr,
NULL,
};
-static struct attribute_group adt7310_event_attribute_group[ADT7310_IRQS] = {
- {
- .attrs = adt7310_event_int_attributes,
- .name = "events",
- }, {
- .attrs = adt7310_event_ct_attributes,
- .name = "events",
- }
+static struct attribute_group adt7310_event_attribute_group = {
+ .attrs = adt7310_event_int_attributes,
+ .name = "events",
};
static const struct iio_info adt7310_info = {
.attrs = &adt7310_attribute_group,
- .event_attrs = adt7310_event_attribute_group,
+ .event_attrs = &adt7310_event_attribute_group,
.driver_module = THIS_MODULE,
};
diff --git a/drivers/staging/iio/adc/adt7410.c b/drivers/staging/iio/adc/adt7410.c
index c62248ceb37a..dff3e8ca2d78 100644
--- a/drivers/staging/iio/adc/adt7410.c
+++ b/drivers/staging/iio/adc/adt7410.c
@@ -693,32 +693,19 @@ static struct attribute *adt7410_event_int_attributes[] = {
&iio_dev_attr_fault_queue.dev_attr.attr,
&iio_dev_attr_t_alarm_high.dev_attr.attr,
&iio_dev_attr_t_alarm_low.dev_attr.attr,
- &iio_dev_attr_t_hyst.dev_attr.attr,
- NULL,
-};
-
-static struct attribute *adt7410_event_ct_attributes[] = {
- &iio_dev_attr_event_mode.dev_attr.attr,
- &iio_dev_attr_available_event_modes.dev_attr.attr,
- &iio_dev_attr_fault_queue.dev_attr.attr,
&iio_dev_attr_t_crit.dev_attr.attr,
&iio_dev_attr_t_hyst.dev_attr.attr,
NULL,
};
-static struct attribute_group adt7410_event_attribute_group[ADT7410_IRQS] = {
- {
- .attrs = adt7410_event_int_attributes,
- .name = "events",
- }, {
- .attrs = adt7410_event_ct_attributes,
- .name = "events",
- }
+static struct attribute_group adt7410_event_attribute_group = {
+ .attrs = adt7410_event_int_attributes,
+ .name = "events",
};
static const struct iio_info adt7410_info = {
.attrs = &adt7410_attribute_group,
- .event_attrs = adt7410_event_attribute_group,
+ .event_attrs = &adt7410_event_attribute_group,
.driver_module = THIS_MODULE,
};
diff --git a/drivers/staging/iio/adc/lpc32xx_adc.c b/drivers/staging/iio/adc/lpc32xx_adc.c
new file mode 100644
index 000000000000..dfc9033843a3
--- /dev/null
+++ b/drivers/staging/iio/adc/lpc32xx_adc.c
@@ -0,0 +1,237 @@
+/*
+ * lpc32xx_adc.c - Support for ADC in LPC32XX
+ *
+ * 3-channel, 10-bit ADC
+ *
+ * Copyright (C) 2011, 2012 Roland Stigge <stigge@antcom.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/completion.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+
+/*
+ * LPC32XX registers definitions
+ */
+#define LPC32XX_ADC_SELECT(x) ((x) + 0x04)
+#define LPC32XX_ADC_CTRL(x) ((x) + 0x08)
+#define LPC32XX_ADC_VALUE(x) ((x) + 0x48)
+
+/* Bit definitions for LPC32XX_ADC_SELECT: */
+#define AD_REFm 0x00000200 /* constant, always write this value! */
+#define AD_REFp 0x00000080 /* constant, always write this value! */
+#define AD_IN 0x00000010 /* multiple of this is the */
+ /* channel number: 0, 1, 2 */
+#define AD_INTERNAL 0x00000004 /* constant, always write this value! */
+
+/* Bit definitions for LPC32XX_ADC_CTRL: */
+#define AD_STROBE 0x00000002
+#define AD_PDN_CTRL 0x00000004
+
+/* Bit definitions for LPC32XX_ADC_VALUE: */
+#define ADC_VALUE_MASK 0x000003FF
+
+#define MOD_NAME "lpc32xx-adc"
+
+struct lpc32xx_adc_info {
+ void __iomem *adc_base;
+ struct clk *clk;
+ struct completion completion;
+
+ u32 value;
+};
+
+static int lpc32xx_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long mask)
+{
+ struct lpc32xx_adc_info *info = iio_priv(indio_dev);
+
+ if (mask == 0) {
+ mutex_lock(&indio_dev->mlock);
+ clk_enable(info->clk);
+ /* Measurement setup */
+ __raw_writel(AD_INTERNAL | (chan->address) | AD_REFp | AD_REFm,
+ LPC32XX_ADC_SELECT(info->adc_base));
+ /* Trigger conversion */
+ __raw_writel(AD_PDN_CTRL | AD_STROBE,
+ LPC32XX_ADC_CTRL(info->adc_base));
+ wait_for_completion(&info->completion); /* set by ISR */
+ clk_disable(info->clk);
+ *val = info->value;
+ mutex_unlock(&indio_dev->mlock);
+
+ return IIO_VAL_INT;
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_info lpc32xx_adc_iio_info = {
+ .read_raw = &lpc32xx_read_raw,
+ .driver_module = THIS_MODULE,
+};
+
+#define LPC32XX_ADC_CHANNEL(_index) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = _index, \
+ .address = AD_IN * _index, \
+ .scan_index = _index, \
+}
+
+static struct iio_chan_spec lpc32xx_adc_iio_channels[] = {
+ LPC32XX_ADC_CHANNEL(0),
+ LPC32XX_ADC_CHANNEL(1),
+ LPC32XX_ADC_CHANNEL(2),
+};
+
+static irqreturn_t lpc32xx_adc_isr(int irq, void *dev_id)
+{
+ struct lpc32xx_adc_info *info = (struct lpc32xx_adc_info *) dev_id;
+
+ /* Read value and clear irq */
+ info->value = __raw_readl(LPC32XX_ADC_VALUE(info->adc_base)) &
+ ADC_VALUE_MASK;
+ complete(&info->completion);
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit lpc32xx_adc_probe(struct platform_device *pdev)
+{
+ struct lpc32xx_adc_info *info = NULL;
+ struct resource *res;
+ int retval = -ENODEV;
+ struct iio_dev *iodev = NULL;
+ int irq;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "failed to get platform I/O memory\n");
+ retval = -EBUSY;
+ goto errout1;
+ }
+
+ iodev = iio_allocate_device(sizeof(struct lpc32xx_adc_info));
+ if (!iodev) {
+ dev_err(&pdev->dev, "failed allocating iio device\n");
+ retval = -ENOMEM;
+ goto errout1;
+ }
+
+ info = iio_priv(iodev);
+
+ info->adc_base = ioremap(res->start, res->end - res->start + 1);
+ if (!info->adc_base) {
+ dev_err(&pdev->dev, "failed mapping memory\n");
+ retval = -EBUSY;
+ goto errout2;
+ }
+
+ info->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(info->clk)) {
+ dev_err(&pdev->dev, "failed getting clock\n");
+ goto errout3;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if ((irq < 0) || (irq >= NR_IRQS)) {
+ dev_err(&pdev->dev, "failed getting interrupt resource\n");
+ retval = -EINVAL;
+ goto errout4;
+ }
+
+ retval = request_irq(irq, lpc32xx_adc_isr, 0, MOD_NAME, info);
+ if (retval < 0) {
+ dev_err(&pdev->dev, "failed requesting interrupt\n");
+ goto errout4;
+ }
+
+ platform_set_drvdata(pdev, iodev);
+
+ init_completion(&info->completion);
+
+ iodev->name = MOD_NAME;
+ iodev->dev.parent = &pdev->dev;
+ iodev->info = &lpc32xx_adc_iio_info;
+ iodev->modes = INDIO_DIRECT_MODE;
+ iodev->channels = lpc32xx_adc_iio_channels;
+ iodev->num_channels = ARRAY_SIZE(lpc32xx_adc_iio_channels);
+
+ retval = iio_device_register(iodev);
+ if (retval)
+ goto errout5;
+
+ dev_info(&pdev->dev, "LPC32XX ADC driver loaded, IRQ %d\n", irq);
+
+ return 0;
+
+errout5:
+ free_irq(irq, iodev);
+errout4:
+ clk_put(info->clk);
+errout3:
+ iounmap(info->adc_base);
+errout2:
+ iio_free_device(iodev);
+errout1:
+ return retval;
+}
+
+static int __devexit lpc32xx_adc_remove(struct platform_device *pdev)
+{
+ struct iio_dev *iodev = platform_get_drvdata(pdev);
+ struct lpc32xx_adc_info *info = iio_priv(iodev);
+ int irq = platform_get_irq(pdev, 0);
+
+ iio_device_unregister(iodev);
+ free_irq(irq, iodev);
+ platform_set_drvdata(pdev, NULL);
+ clk_put(info->clk);
+ iounmap(info->adc_base);
+ iio_free_device(iodev);
+
+ return 0;
+}
+
+static struct platform_driver lpc32xx_adc_driver = {
+ .probe = lpc32xx_adc_probe,
+ .remove = __devexit_p(lpc32xx_adc_remove),
+ .driver = {
+ .name = MOD_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+module_platform_driver(lpc32xx_adc_driver);
+
+MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>");
+MODULE_DESCRIPTION("LPC32XX ADC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/iio/adc/max1363_core.c b/drivers/staging/iio/adc/max1363_core.c
index b92cb4af18ce..cf3e2ca7e314 100644
--- a/drivers/staging/iio/adc/max1363_core.c
+++ b/drivers/staging/iio/adc/max1363_core.c
@@ -341,7 +341,7 @@ static struct iio_chan_spec max1361_channels[] =
static struct iio_chan_spec max1363_channels[] =
MAX1363_4X_CHANS(12, MAX1363_EV_M);
-/* Appies to max1236, max1237 */
+/* Applies to max1236, max1237 */
static const enum max1363_modes max1236_mode_list[] = {
_s0, _s1, _s2, _s3,
s0to1, s0to2, s0to3,
@@ -543,9 +543,9 @@ static int max1363_read_thresh(struct iio_dev *indio_dev,
{
struct max1363_state *st = iio_priv(indio_dev);
if (IIO_EVENT_CODE_EXTRACT_DIR(event_code) == IIO_EV_DIR_FALLING)
- *val = st->thresh_low[IIO_EVENT_CODE_EXTRACT_NUM(event_code)];
+ *val = st->thresh_low[IIO_EVENT_CODE_EXTRACT_CHAN(event_code)];
else
- *val = st->thresh_high[IIO_EVENT_CODE_EXTRACT_NUM(event_code)];
+ *val = st->thresh_high[IIO_EVENT_CODE_EXTRACT_CHAN(event_code)];
return 0;
}
@@ -568,10 +568,10 @@ static int max1363_write_thresh(struct iio_dev *indio_dev,
switch (IIO_EVENT_CODE_EXTRACT_DIR(event_code)) {
case IIO_EV_DIR_FALLING:
- st->thresh_low[IIO_EVENT_CODE_EXTRACT_NUM(event_code)] = val;
+ st->thresh_low[IIO_EVENT_CODE_EXTRACT_CHAN(event_code)] = val;
break;
case IIO_EV_DIR_RISING:
- st->thresh_high[IIO_EVENT_CODE_EXTRACT_NUM(event_code)] = val;
+ st->thresh_high[IIO_EVENT_CODE_EXTRACT_CHAN(event_code)] = val;
break;
}
@@ -622,7 +622,7 @@ static int max1363_read_event_config(struct iio_dev *indio_dev,
struct max1363_state *st = iio_priv(indio_dev);
int val;
- int number = IIO_EVENT_CODE_EXTRACT_NUM(event_code);
+ int number = IIO_EVENT_CODE_EXTRACT_CHAN(event_code);
mutex_lock(&indio_dev->mlock);
if (IIO_EVENT_CODE_EXTRACT_DIR(event_code) == IIO_EV_DIR_FALLING)
val = (1 << number) & st->mask_low;
@@ -775,7 +775,7 @@ static int max1363_write_event_config(struct iio_dev *indio_dev,
int ret = 0;
struct max1363_state *st = iio_priv(indio_dev);
u16 unifiedmask;
- int number = IIO_EVENT_CODE_EXTRACT_NUM(event_code);
+ int number = IIO_EVENT_CODE_EXTRACT_CHAN(event_code);
mutex_lock(&indio_dev->mlock);
unifiedmask = st->mask_low | st->mask_high;
@@ -1245,10 +1245,31 @@ static int max1363_initial_setup(struct max1363_state *st)
return max1363_set_scan_mode(st);
}
+static int __devinit max1363_alloc_scan_masks(struct iio_dev *indio_dev)
+{
+ struct max1363_state *st = iio_priv(indio_dev);
+ unsigned long *masks;
+ int i;
+
+ masks = kzalloc(BITS_TO_LONGS(MAX1363_MAX_CHANNELS)*sizeof(long)*
+ (st->chip_info->num_modes + 1), GFP_KERNEL);
+ if (!masks)
+ return -ENOMEM;
+
+ for (i = 0; i < st->chip_info->num_modes; i++)
+ bitmap_copy(masks + BITS_TO_LONGS(MAX1363_MAX_CHANNELS)*i,
+ max1363_mode_table[st->chip_info->mode_list[i]]
+ .modemask, MAX1363_MAX_CHANNELS);
+
+ indio_dev->available_scan_masks = masks;
+
+ return 0;
+}
+
static int __devinit max1363_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- int ret, i;
+ int ret;
struct max1363_state *st;
struct iio_dev *indio_dev;
struct regulator *reg;
@@ -1276,19 +1297,10 @@ static int __devinit max1363_probe(struct i2c_client *client,
st->chip_info = &max1363_chip_info_tbl[id->driver_data];
st->client = client;
- indio_dev->available_scan_masks
- = kzalloc(BITS_TO_LONGS(MAX1363_MAX_CHANNELS)*sizeof(long)*
- (st->chip_info->num_modes + 1), GFP_KERNEL);
- if (!indio_dev->available_scan_masks) {
- ret = -ENOMEM;
+ ret = max1363_alloc_scan_masks(indio_dev);
+ if (ret)
goto error_free_device;
- }
- for (i = 0; i < st->chip_info->num_modes; i++)
- bitmap_copy(indio_dev->available_scan_masks +
- BITS_TO_LONGS(MAX1363_MAX_CHANNELS)*i,
- max1363_mode_table[st->chip_info->mode_list[i]]
- .modemask, MAX1363_MAX_CHANNELS);
/* Estabilish that the iio_dev is a child of the i2c device */
indio_dev->dev.parent = &client->dev;
indio_dev->name = id->name;
diff --git a/drivers/staging/iio/adc/max1363_ring.c b/drivers/staging/iio/adc/max1363_ring.c
index f730b3fb971a..d0a60a382930 100644
--- a/drivers/staging/iio/adc/max1363_ring.c
+++ b/drivers/staging/iio/adc/max1363_ring.c
@@ -116,8 +116,6 @@ int max1363_register_ring_funcs_and_init(struct iio_dev *indio_dev)
ret = -ENOMEM;
goto error_deallocate_sw_rb;
}
- /* Effectively select the ring buffer implementation */
- indio_dev->buffer->access = &ring_sw_access_funcs;
/* Ring buffer functions - here trigger setup related */
indio_dev->setup_ops = &max1363_ring_setup_ops;
diff --git a/drivers/staging/iio/addac/adt7316-i2c.c b/drivers/staging/iio/addac/adt7316-i2c.c
index 2c03a39220e8..9e128dd7d457 100644
--- a/drivers/staging/iio/addac/adt7316-i2c.c
+++ b/drivers/staging/iio/addac/adt7316-i2c.c
@@ -125,30 +125,14 @@ static const struct i2c_device_id adt7316_i2c_id[] = {
MODULE_DEVICE_TABLE(i2c, adt7316_i2c_id);
-#ifdef CONFIG_PM
-static int adt7316_i2c_suspend(struct i2c_client *client, pm_message_t message)
-{
- return adt7316_disable(&client->dev);
-}
-
-static int adt7316_i2c_resume(struct i2c_client *client)
-{
- return adt7316_enable(&client->dev);
-}
-#else
-# define adt7316_i2c_suspend NULL
-# define adt7316_i2c_resume NULL
-#endif
-
static struct i2c_driver adt7316_driver = {
.driver = {
.name = "adt7316",
+ .pm = ADT7316_PM_OPS,
.owner = THIS_MODULE,
},
.probe = adt7316_i2c_probe,
.remove = __devexit_p(adt7316_i2c_remove),
- .suspend = adt7316_i2c_suspend,
- .resume = adt7316_i2c_resume,
.id_table = adt7316_i2c_id,
};
module_i2c_driver(adt7316_driver);
diff --git a/drivers/staging/iio/addac/adt7316-spi.c b/drivers/staging/iio/addac/adt7316-spi.c
index 1ea3cd06299d..985f7d8a6eb2 100644
--- a/drivers/staging/iio/addac/adt7316-spi.c
+++ b/drivers/staging/iio/addac/adt7316-spi.c
@@ -133,30 +133,14 @@ static const struct spi_device_id adt7316_spi_id[] = {
MODULE_DEVICE_TABLE(spi, adt7316_spi_id);
-#ifdef CONFIG_PM
-static int adt7316_spi_suspend(struct spi_device *spi_dev, pm_message_t message)
-{
- return adt7316_disable(&spi_dev->dev);
-}
-
-static int adt7316_spi_resume(struct spi_device *spi_dev)
-{
- return adt7316_enable(&spi_dev->dev);
-}
-#else
-# define adt7316_spi_suspend NULL
-# define adt7316_spi_resume NULL
-#endif
-
static struct spi_driver adt7316_driver = {
.driver = {
.name = "adt7316",
+ .pm = ADT7316_PM_OPS,
.owner = THIS_MODULE,
},
.probe = adt7316_spi_probe,
.remove = __devexit_p(adt7316_spi_remove),
- .suspend = adt7316_spi_suspend,
- .resume = adt7316_spi_resume,
.id_table = adt7316_spi_id,
};
module_spi_driver(adt7316_driver);
diff --git a/drivers/staging/iio/addac/adt7316.c b/drivers/staging/iio/addac/adt7316.c
index 13c39292d3f2..fd6a45444058 100644
--- a/drivers/staging/iio/addac/adt7316.c
+++ b/drivers/staging/iio/addac/adt7316.c
@@ -2089,24 +2089,25 @@ static struct attribute_group adt7516_event_attribute_group = {
.name = "events",
};
-#ifdef CONFIG_PM
-int adt7316_disable(struct device *dev)
+#ifdef CONFIG_PM_SLEEP
+static int adt7316_disable(struct device *dev)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
return _adt7316_store_enabled(chip, 0);
}
-EXPORT_SYMBOL(adt7316_disable);
-int adt7316_enable(struct device *dev)
+static int adt7316_enable(struct device *dev)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
return _adt7316_store_enabled(chip, 1);
}
-EXPORT_SYMBOL(adt7316_enable);
+
+SIMPLE_DEV_PM_OPS(adt7316_pm_ops, adt7316_disable, adt7316_enable);
+EXPORT_SYMBOL_GPL(adt7316_pm_ops);
#endif
static const struct iio_info adt7316_info = {
diff --git a/drivers/staging/iio/addac/adt7316.h b/drivers/staging/iio/addac/adt7316.h
index d34bd679bb4e..4d3efff46ae7 100644
--- a/drivers/staging/iio/addac/adt7316.h
+++ b/drivers/staging/iio/addac/adt7316.h
@@ -10,6 +10,7 @@
#define _ADT7316_H_
#include <linux/types.h>
+#include <linux/pm.h>
#define ADT7316_REG_MAX_ADDR 0x3F
@@ -23,9 +24,11 @@ struct adt7316_bus {
int (*multi_write) (void *client, u8 first_reg, u8 count, u8 *data);
};
-#ifdef CONFIG_PM
-int adt7316_disable(struct device *dev);
-int adt7316_enable(struct device *dev);
+#ifdef CONFIG_PM_SLEEP
+extern const struct dev_pm_ops adt7316_pm_ops;
+#define ADT7316_PM_OPS (&adt7316_pm_ops)
+#else
+#define ADT7316_PM_OPS NULL
#endif
int adt7316_probe(struct device *dev, struct adt7316_bus *bus, const char *name);
int adt7316_remove(struct device *dev);
diff --git a/drivers/staging/iio/buffer.h b/drivers/staging/iio/buffer.h
index 6fb6e64181a5..df2046dcb623 100644
--- a/drivers/staging/iio/buffer.h
+++ b/drivers/staging/iio/buffer.h
@@ -91,8 +91,6 @@ struct iio_buffer {
**/
void iio_buffer_init(struct iio_buffer *buffer);
-void iio_buffer_deinit(struct iio_buffer *buffer);
-
/**
* __iio_update_buffer() - update common elements of buffers
* @buffer: buffer that is the event source
diff --git a/drivers/staging/iio/cdc/ad7150.c b/drivers/staging/iio/cdc/ad7150.c
index b73007dcf4b3..e4a08dc9b6f5 100644
--- a/drivers/staging/iio/cdc/ad7150.c
+++ b/drivers/staging/iio/cdc/ad7150.c
@@ -167,7 +167,7 @@ static int ad7150_write_event_params(struct iio_dev *indio_dev, u64 event_code)
u16 value;
u8 sens, timeout;
struct ad7150_chip_info *chip = iio_priv(indio_dev);
- int chan = IIO_EVENT_CODE_EXTRACT_NUM(event_code);
+ int chan = IIO_EVENT_CODE_EXTRACT_CHAN(event_code);
int rising = !!(IIO_EVENT_CODE_EXTRACT_DIR(event_code) ==
IIO_EV_DIR_RISING);
@@ -279,7 +279,7 @@ static int ad7150_read_event_value(struct iio_dev *indio_dev,
u64 event_code,
int *val)
{
- int chan = IIO_EVENT_CODE_EXTRACT_NUM(event_code);
+ int chan = IIO_EVENT_CODE_EXTRACT_CHAN(event_code);
struct ad7150_chip_info *chip = iio_priv(indio_dev);
int rising = !!(IIO_EVENT_CODE_EXTRACT_DIR(event_code) ==
IIO_EV_DIR_RISING);
@@ -309,7 +309,7 @@ static int ad7150_write_event_value(struct iio_dev *indio_dev,
{
int ret;
struct ad7150_chip_info *chip = iio_priv(indio_dev);
- int chan = IIO_EVENT_CODE_EXTRACT_NUM(event_code);
+ int chan = IIO_EVENT_CODE_EXTRACT_CHAN(event_code);
int rising = !!(IIO_EVENT_CODE_EXTRACT_DIR(event_code) ==
IIO_EV_DIR_RISING);
@@ -347,7 +347,7 @@ static ssize_t ad7150_show_timeout(struct device *dev,
u8 value;
/* use the event code for consistency reasons */
- int chan = IIO_EVENT_CODE_EXTRACT_NUM(this_attr->address);
+ int chan = IIO_EVENT_CODE_EXTRACT_CHAN(this_attr->address);
int rising = !!(IIO_EVENT_CODE_EXTRACT_DIR(this_attr->address)
== IIO_EV_DIR_RISING);
@@ -373,7 +373,7 @@ static ssize_t ad7150_store_timeout(struct device *dev,
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ad7150_chip_info *chip = iio_priv(indio_dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- int chan = IIO_EVENT_CODE_EXTRACT_NUM(this_attr->address);
+ int chan = IIO_EVENT_CODE_EXTRACT_CHAN(this_attr->address);
int rising = !!(IIO_EVENT_CODE_EXTRACT_DIR(this_attr->address) ==
IIO_EV_DIR_RISING);
u8 data;
diff --git a/drivers/staging/iio/consumer.h b/drivers/staging/iio/consumer.h
new file mode 100644
index 000000000000..36a060cd3a21
--- /dev/null
+++ b/drivers/staging/iio/consumer.h
@@ -0,0 +1,96 @@
+/*
+ * Industrial I/O in kernel consumer interface
+ *
+ * Copyright (c) 2011 Jonathan Cameron
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+#ifndef _IIO_INKERN_CONSUMER_H_
+#define _IIO_INKERN_CONSUMER_H
+#include "types.h"
+
+struct iio_dev;
+struct iio_chan_spec;
+
+/**
+ * struct iio_channel - everything needed for a consumer to use a channel
+ * @indio_dev: Device on which the channel exists.
+ * @channel: Full description of the channel.
+ */
+struct iio_channel {
+ struct iio_dev *indio_dev;
+ const struct iio_chan_spec *channel;
+};
+
+/**
+ * iio_channel_get() - get description of all that is needed to access channel.
+ * @name: Unique name of the device as provided in the iio_map
+ * with which the desired provider to consumer mapping
+ * was registered.
+ * @consumer_channel: Unique name to identify the channel on the consumer
+ * side. This typically describes the channels use within
+ * the consumer. E.g. 'battery_voltage'
+ */
+struct iio_channel *iio_st_channel_get(const char *name,
+ const char *consumer_channel);
+
+/**
+ * iio_st_channel_release() - release channels obtained via iio_st_channel_get
+ * @chan: The channel to be released.
+ */
+void iio_st_channel_release(struct iio_channel *chan);
+
+/**
+ * iio_st_channel_get_all() - get all channels associated with a client
+ * @name: name of consumer device.
+ *
+ * Returns an array of iio_channel structures terminated with one with
+ * null iio_dev pointer.
+ * This function is used by fairly generic consumers to get all the
+ * channels registered as having this consumer.
+ */
+struct iio_channel *iio_st_channel_get_all(const char *name);
+
+/**
+ * iio_st_channel_release_all() - reverse iio_st_get_all
+ * @chan: Array of channels to be released.
+ */
+void iio_st_channel_release_all(struct iio_channel *chan);
+
+/**
+ * iio_st_read_channel_raw() - read from a given channel
+ * @channel: The channel being queried.
+ * @val: Value read back.
+ *
+ * Note raw reads from iio channels are in adc counts and hence
+ * scale will need to be applied if standard units required.
+ */
+int iio_st_read_channel_raw(struct iio_channel *chan,
+ int *val);
+
+/**
+ * iio_st_get_channel_type() - get the type of a channel
+ * @channel: The channel being queried.
+ * @type: The type of the channel.
+ *
+ * returns the enum iio_chan_type of the channel
+ */
+int iio_st_get_channel_type(struct iio_channel *channel,
+ enum iio_chan_type *type);
+
+/**
+ * iio_st_read_channel_scale() - read the scale value for a channel
+ * @channel: The channel being queried.
+ * @val: First part of value read back.
+ * @val2: Second part of value read back.
+ *
+ * Note returns a description of what is in val and val2, such
+ * as IIO_VAL_INT_PLUS_MICRO telling us we have a value of val
+ * + val2/1e6
+ */
+int iio_st_read_channel_scale(struct iio_channel *chan, int *val,
+ int *val2);
+
+#endif
diff --git a/drivers/staging/iio/dac/Kconfig b/drivers/staging/iio/dac/Kconfig
index 13e27979df24..a57803a5d1a7 100644
--- a/drivers/staging/iio/dac/Kconfig
+++ b/drivers/staging/iio/dac/Kconfig
@@ -4,11 +4,12 @@
menu "Digital to analog converters"
config AD5064
- tristate "Analog Devices AD5064/64-1/44/24 DAC driver"
+ tristate "Analog Devices AD5064/64-1/65/44/45/24/25, AD5628/48/66/68 DAC driver"
depends on SPI
help
- Say yes here to build support for Analog Devices AD5064, AD5064-1,
- AD5044, AD5024 Digital to Analog Converter.
+ Say yes here to build support for Analog Devices AD5024, AD5025, AD5044,
+ AD5045, AD5064, AD5064-1, AD5065, AD5628, AD5648, AD5666, AD5668 Digital
+ to Analog Converter.
To compile this driver as a module, choose M here: the
module will be called ad5064.
diff --git a/drivers/staging/iio/dac/ad5064.c b/drivers/staging/iio/dac/ad5064.c
index 049a855039c2..06b162745a3e 100644
--- a/drivers/staging/iio/dac/ad5064.c
+++ b/drivers/staging/iio/dac/ad5064.c
@@ -1,5 +1,6 @@
/*
- * AD5064, AD5064-1, AD5044, AD5024 Digital to analog converters driver
+ * AD5024, AD5025, AD5044, AD5045, AD5064, AD5064-1, AD5065, AD5628, AD5648,
+ * AD5666, AD5668 Digital to analog converters driver
*
* Copyright 2011 Analog Devices Inc.
*
@@ -19,7 +20,8 @@
#include "../sysfs.h"
#include "dac.h"
-#define AD5064_DAC_CHANNELS 4
+#define AD5064_MAX_DAC_CHANNELS 8
+#define AD5064_MAX_VREFS 4
#define AD5064_ADDR(x) ((x) << 20)
#define AD5064_CMD(x) ((x) << 24)
@@ -35,7 +37,10 @@
#define AD5064_CMD_CLEAR 0x5
#define AD5064_CMD_LDAC_MASK 0x6
#define AD5064_CMD_RESET 0x7
-#define AD5064_CMD_DAISY_CHAIN_ENABLE 0x8
+#define AD5064_CMD_CONFIG 0x8
+
+#define AD5064_CONFIG_DAISY_CHAIN_ENABLE BIT(1)
+#define AD5064_CONFIG_INT_VREF_ENABLE BIT(0)
#define AD5064_LDAC_PWRDN_NONE 0x0
#define AD5064_LDAC_PWRDN_1K 0x1
@@ -45,12 +50,17 @@
/**
* struct ad5064_chip_info - chip specific information
* @shared_vref: whether the vref supply is shared between channels
+ * @internal_vref: internal reference voltage. 0 if the chip has no internal
+ * vref.
* @channel: channel specification
-*/
+ * @num_channels: number of channels
+ */
struct ad5064_chip_info {
bool shared_vref;
- struct iio_chan_spec channel[AD5064_DAC_CHANNELS];
+ unsigned long internal_vref;
+ const struct iio_chan_spec *channels;
+ unsigned int num_channels;
};
/**
@@ -61,16 +71,19 @@ struct ad5064_chip_info {
* @pwr_down: whether channel is powered down
* @pwr_down_mode: channel's current power down mode
* @dac_cache: current DAC raw value (chip does not support readback)
+ * @use_internal_vref: set to true if the internal reference voltage should be
+ * used.
* @data: spi transfer buffers
*/
struct ad5064_state {
struct spi_device *spi;
const struct ad5064_chip_info *chip_info;
- struct regulator_bulk_data vref_reg[AD5064_DAC_CHANNELS];
- bool pwr_down[AD5064_DAC_CHANNELS];
- u8 pwr_down_mode[AD5064_DAC_CHANNELS];
- unsigned int dac_cache[AD5064_DAC_CHANNELS];
+ struct regulator_bulk_data vref_reg[AD5064_MAX_VREFS];
+ bool pwr_down[AD5064_MAX_DAC_CHANNELS];
+ u8 pwr_down_mode[AD5064_MAX_DAC_CHANNELS];
+ unsigned int dac_cache[AD5064_MAX_DAC_CHANNELS];
+ bool use_internal_vref;
/*
* DMA (thus cache coherency maintenance) requires the
@@ -81,50 +94,20 @@ struct ad5064_state {
enum ad5064_type {
ID_AD5024,
+ ID_AD5025,
ID_AD5044,
+ ID_AD5045,
ID_AD5064,
ID_AD5064_1,
-};
-
-#define AD5064_CHANNEL(chan, bits) { \
- .type = IIO_VOLTAGE, \
- .indexed = 1, \
- .output = 1, \
- .channel = (chan), \
- .info_mask = IIO_CHAN_INFO_SCALE_SEPARATE_BIT, \
- .address = AD5064_ADDR_DAC(chan), \
- .scan_type = IIO_ST('u', (bits), 16, 20 - (bits)) \
-}
-
-static const struct ad5064_chip_info ad5064_chip_info_tbl[] = {
- [ID_AD5024] = {
- .shared_vref = false,
- .channel[0] = AD5064_CHANNEL(0, 12),
- .channel[1] = AD5064_CHANNEL(1, 12),
- .channel[2] = AD5064_CHANNEL(2, 12),
- .channel[3] = AD5064_CHANNEL(3, 12),
- },
- [ID_AD5044] = {
- .shared_vref = false,
- .channel[0] = AD5064_CHANNEL(0, 14),
- .channel[1] = AD5064_CHANNEL(1, 14),
- .channel[2] = AD5064_CHANNEL(2, 14),
- .channel[3] = AD5064_CHANNEL(3, 14),
- },
- [ID_AD5064] = {
- .shared_vref = false,
- .channel[0] = AD5064_CHANNEL(0, 16),
- .channel[1] = AD5064_CHANNEL(1, 16),
- .channel[2] = AD5064_CHANNEL(2, 16),
- .channel[3] = AD5064_CHANNEL(3, 16),
- },
- [ID_AD5064_1] = {
- .shared_vref = true,
- .channel[0] = AD5064_CHANNEL(0, 16),
- .channel[1] = AD5064_CHANNEL(1, 16),
- .channel[2] = AD5064_CHANNEL(2, 16),
- .channel[3] = AD5064_CHANNEL(3, 16),
- },
+ ID_AD5065,
+ ID_AD5628_1,
+ ID_AD5628_2,
+ ID_AD5648_1,
+ ID_AD5648_2,
+ ID_AD5666_1,
+ ID_AD5666_2,
+ ID_AD5668_1,
+ ID_AD5668_2,
};
static int ad5064_spi_write(struct ad5064_state *st, unsigned int cmd,
@@ -160,22 +143,25 @@ static const char ad5064_powerdown_modes[][15] = {
[AD5064_LDAC_PWRDN_3STATE] = "three_state",
};
-static ssize_t ad5064_read_powerdown_mode(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t ad5064_read_powerdown_mode_available(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, char *buf)
+{
+ return sprintf(buf, "%s %s %s\n", ad5064_powerdown_modes[1],
+ ad5064_powerdown_modes[2], ad5064_powerdown_modes[3]);
+}
+
+static ssize_t ad5064_read_powerdown_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, char *buf)
{
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ad5064_state *st = iio_priv(indio_dev);
return sprintf(buf, "%s\n",
- ad5064_powerdown_modes[st->pwr_down_mode[this_attr->address]]);
+ ad5064_powerdown_modes[st->pwr_down_mode[chan->channel]]);
}
-static ssize_t ad5064_write_powerdown_mode(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+static ssize_t ad5064_write_powerdown_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, const char *buf, size_t len)
{
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ad5064_state *st = iio_priv(indio_dev);
unsigned int mode, i;
int ret;
@@ -192,31 +178,26 @@ static ssize_t ad5064_write_powerdown_mode(struct device *dev,
return -EINVAL;
mutex_lock(&indio_dev->mlock);
- st->pwr_down_mode[this_attr->address] = mode;
+ st->pwr_down_mode[chan->channel] = mode;
- ret = ad5064_sync_powerdown_mode(st, this_attr->address);
+ ret = ad5064_sync_powerdown_mode(st, chan->channel);
mutex_unlock(&indio_dev->mlock);
return ret ? ret : len;
}
-static ssize_t ad5064_read_dac_powerdown(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t ad5064_read_dac_powerdown(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, char *buf)
{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ad5064_state *st = iio_priv(indio_dev);
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- return sprintf(buf, "%d\n", st->pwr_down[this_attr->address]);
+ return sprintf(buf, "%d\n", st->pwr_down[chan->channel]);
}
-static ssize_t ad5064_write_dac_powerdown(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+static ssize_t ad5064_write_dac_powerdown(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, const char *buf, size_t len)
{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ad5064_state *st = iio_priv(indio_dev);
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
bool pwr_down;
int ret;
@@ -225,53 +206,24 @@ static ssize_t ad5064_write_dac_powerdown(struct device *dev,
return ret;
mutex_lock(&indio_dev->mlock);
- st->pwr_down[this_attr->address] = pwr_down;
+ st->pwr_down[chan->channel] = pwr_down;
- ret = ad5064_sync_powerdown_mode(st, this_attr->address);
+ ret = ad5064_sync_powerdown_mode(st, chan->channel);
mutex_unlock(&indio_dev->mlock);
return ret ? ret : len;
}
-static IIO_CONST_ATTR(out_voltage_powerdown_mode_available,
- "1kohm_to_gnd 100kohm_to_gnd three_state");
-
-#define IIO_DEV_ATTR_DAC_POWERDOWN_MODE(_chan) \
- IIO_DEVICE_ATTR(out_voltage##_chan##_powerdown_mode, \
- S_IRUGO | S_IWUSR, \
- ad5064_read_powerdown_mode, \
- ad5064_write_powerdown_mode, _chan);
-
-#define IIO_DEV_ATTR_DAC_POWERDOWN(_chan) \
- IIO_DEVICE_ATTR(out_voltage##_chan##_powerdown, \
- S_IRUGO | S_IWUSR, \
- ad5064_read_dac_powerdown, \
- ad5064_write_dac_powerdown, _chan)
-
-static IIO_DEV_ATTR_DAC_POWERDOWN(0);
-static IIO_DEV_ATTR_DAC_POWERDOWN_MODE(0);
-static IIO_DEV_ATTR_DAC_POWERDOWN(1);
-static IIO_DEV_ATTR_DAC_POWERDOWN_MODE(1);
-static IIO_DEV_ATTR_DAC_POWERDOWN(2);
-static IIO_DEV_ATTR_DAC_POWERDOWN_MODE(2);
-static IIO_DEV_ATTR_DAC_POWERDOWN(3);
-static IIO_DEV_ATTR_DAC_POWERDOWN_MODE(3);
-
-static struct attribute *ad5064_attributes[] = {
- &iio_dev_attr_out_voltage0_powerdown.dev_attr.attr,
- &iio_dev_attr_out_voltage1_powerdown.dev_attr.attr,
- &iio_dev_attr_out_voltage2_powerdown.dev_attr.attr,
- &iio_dev_attr_out_voltage3_powerdown.dev_attr.attr,
- &iio_dev_attr_out_voltage0_powerdown_mode.dev_attr.attr,
- &iio_dev_attr_out_voltage1_powerdown_mode.dev_attr.attr,
- &iio_dev_attr_out_voltage2_powerdown_mode.dev_attr.attr,
- &iio_dev_attr_out_voltage3_powerdown_mode.dev_attr.attr,
- &iio_const_attr_out_voltage_powerdown_mode_available.dev_attr.attr,
- NULL,
-};
+static int ad5064_get_vref(struct ad5064_state *st,
+ struct iio_chan_spec const *chan)
+{
+ unsigned int i;
-static const struct attribute_group ad5064_attribute_group = {
- .attrs = ad5064_attributes,
-};
+ if (st->use_internal_vref)
+ return st->chip_info->internal_vref;
+
+ i = st->chip_info->shared_vref ? 0 : chan->channel;
+ return regulator_get_voltage(st->vref_reg[i].consumer);
+}
static int ad5064_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
@@ -280,7 +232,6 @@ static int ad5064_read_raw(struct iio_dev *indio_dev,
long m)
{
struct ad5064_state *st = iio_priv(indio_dev);
- unsigned int vref;
int scale_uv;
switch (m) {
@@ -288,8 +239,7 @@ static int ad5064_read_raw(struct iio_dev *indio_dev,
*val = st->dac_cache[chan->channel];
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
- vref = st->chip_info->shared_vref ? 0 : chan->channel;
- scale_uv = regulator_get_voltage(st->vref_reg[vref].consumer);
+ scale_uv = ad5064_get_vref(st, chan);
if (scale_uv < 0)
return scale_uv;
@@ -331,13 +281,144 @@ static int ad5064_write_raw(struct iio_dev *indio_dev,
static const struct iio_info ad5064_info = {
.read_raw = ad5064_read_raw,
.write_raw = ad5064_write_raw,
- .attrs = &ad5064_attribute_group,
.driver_module = THIS_MODULE,
};
+static struct iio_chan_spec_ext_info ad5064_ext_info[] = {
+ {
+ .name = "powerdown",
+ .read = ad5064_read_dac_powerdown,
+ .write = ad5064_write_dac_powerdown,
+ },
+ {
+ .name = "powerdown_mode",
+ .read = ad5064_read_powerdown_mode,
+ .write = ad5064_write_powerdown_mode,
+ },
+ {
+ .name = "powerdown_mode_available",
+ .shared = true,
+ .read = ad5064_read_powerdown_mode_available,
+ },
+ { },
+};
+
+#define AD5064_CHANNEL(chan, bits) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .output = 1, \
+ .channel = (chan), \
+ .info_mask = IIO_CHAN_INFO_SCALE_SEPARATE_BIT, \
+ .address = AD5064_ADDR_DAC(chan), \
+ .scan_type = IIO_ST('u', (bits), 16, 20 - (bits)), \
+ .ext_info = ad5064_ext_info, \
+}
+
+#define DECLARE_AD5064_CHANNELS(name, bits) \
+const struct iio_chan_spec name[] = { \
+ AD5064_CHANNEL(0, bits), \
+ AD5064_CHANNEL(1, bits), \
+ AD5064_CHANNEL(2, bits), \
+ AD5064_CHANNEL(3, bits), \
+ AD5064_CHANNEL(4, bits), \
+ AD5064_CHANNEL(5, bits), \
+ AD5064_CHANNEL(6, bits), \
+ AD5064_CHANNEL(7, bits), \
+}
+
+static DECLARE_AD5064_CHANNELS(ad5024_channels, 12);
+static DECLARE_AD5064_CHANNELS(ad5044_channels, 14);
+static DECLARE_AD5064_CHANNELS(ad5064_channels, 16);
+
+static const struct ad5064_chip_info ad5064_chip_info_tbl[] = {
+ [ID_AD5024] = {
+ .shared_vref = false,
+ .channels = ad5024_channels,
+ .num_channels = 4,
+ },
+ [ID_AD5025] = {
+ .shared_vref = false,
+ .channels = ad5024_channels,
+ .num_channels = 2,
+ },
+ [ID_AD5044] = {
+ .shared_vref = false,
+ .channels = ad5044_channels,
+ .num_channels = 4,
+ },
+ [ID_AD5045] = {
+ .shared_vref = false,
+ .channels = ad5044_channels,
+ .num_channels = 2,
+ },
+ [ID_AD5064] = {
+ .shared_vref = false,
+ .channels = ad5064_channels,
+ .num_channels = 4,
+ },
+ [ID_AD5064_1] = {
+ .shared_vref = true,
+ .channels = ad5064_channels,
+ .num_channels = 4,
+ },
+ [ID_AD5065] = {
+ .shared_vref = false,
+ .channels = ad5064_channels,
+ .num_channels = 2,
+ },
+ [ID_AD5628_1] = {
+ .shared_vref = true,
+ .internal_vref = 2500000,
+ .channels = ad5024_channels,
+ .num_channels = 8,
+ },
+ [ID_AD5628_2] = {
+ .shared_vref = true,
+ .internal_vref = 5000000,
+ .channels = ad5024_channels,
+ .num_channels = 8,
+ },
+ [ID_AD5648_1] = {
+ .shared_vref = true,
+ .internal_vref = 2500000,
+ .channels = ad5044_channels,
+ .num_channels = 8,
+ },
+ [ID_AD5648_2] = {
+ .shared_vref = true,
+ .internal_vref = 5000000,
+ .channels = ad5044_channels,
+ .num_channels = 8,
+ },
+ [ID_AD5666_1] = {
+ .shared_vref = true,
+ .internal_vref = 2500000,
+ .channels = ad5064_channels,
+ .num_channels = 4,
+ },
+ [ID_AD5666_2] = {
+ .shared_vref = true,
+ .internal_vref = 5000000,
+ .channels = ad5064_channels,
+ .num_channels = 4,
+ },
+ [ID_AD5668_1] = {
+ .shared_vref = true,
+ .internal_vref = 2500000,
+ .channels = ad5064_channels,
+ .num_channels = 8,
+ },
+ [ID_AD5668_2] = {
+ .shared_vref = true,
+ .internal_vref = 5000000,
+ .channels = ad5064_channels,
+ .num_channels = 8,
+ },
+};
+
static inline unsigned int ad5064_num_vref(struct ad5064_state *st)
{
- return st->chip_info->shared_vref ? 1 : AD5064_DAC_CHANNELS;
+ return st->chip_info->shared_vref ? 1 : st->chip_info->num_channels;
}
static const char * const ad5064_vref_names[] = {
@@ -376,14 +457,24 @@ static int __devinit ad5064_probe(struct spi_device *spi)
ret = regulator_bulk_get(&st->spi->dev, ad5064_num_vref(st),
st->vref_reg);
- if (ret)
- goto error_free;
-
- ret = regulator_bulk_enable(ad5064_num_vref(st), st->vref_reg);
- if (ret)
- goto error_free_reg;
+ if (ret) {
+ if (!st->chip_info->internal_vref)
+ goto error_free;
+ st->use_internal_vref = true;
+ ret = ad5064_spi_write(st, AD5064_CMD_CONFIG, 0,
+ AD5064_CONFIG_INT_VREF_ENABLE, 0);
+ if (ret) {
+ dev_err(&spi->dev, "Failed to enable internal vref: %d\n",
+ ret);
+ goto error_free;
+ }
+ } else {
+ ret = regulator_bulk_enable(ad5064_num_vref(st), st->vref_reg);
+ if (ret)
+ goto error_free_reg;
+ }
- for (i = 0; i < AD5064_DAC_CHANNELS; ++i) {
+ for (i = 0; i < st->chip_info->num_channels; ++i) {
st->pwr_down_mode[i] = AD5064_LDAC_PWRDN_1K;
st->dac_cache[i] = 0x8000;
}
@@ -392,8 +483,8 @@ static int __devinit ad5064_probe(struct spi_device *spi)
indio_dev->name = spi_get_device_id(spi)->name;
indio_dev->info = &ad5064_info;
indio_dev->modes = INDIO_DIRECT_MODE;
- indio_dev->channels = st->chip_info->channel;
- indio_dev->num_channels = AD5064_DAC_CHANNELS;
+ indio_dev->channels = st->chip_info->channels;
+ indio_dev->num_channels = st->chip_info->num_channels;
ret = iio_device_register(indio_dev);
if (ret)
@@ -402,9 +493,11 @@ static int __devinit ad5064_probe(struct spi_device *spi)
return 0;
error_disable_reg:
- regulator_bulk_disable(ad5064_num_vref(st), st->vref_reg);
+ if (!st->use_internal_vref)
+ regulator_bulk_disable(ad5064_num_vref(st), st->vref_reg);
error_free_reg:
- regulator_bulk_free(ad5064_num_vref(st), st->vref_reg);
+ if (!st->use_internal_vref)
+ regulator_bulk_free(ad5064_num_vref(st), st->vref_reg);
error_free:
iio_free_device(indio_dev);
@@ -419,8 +512,10 @@ static int __devexit ad5064_remove(struct spi_device *spi)
iio_device_unregister(indio_dev);
- regulator_bulk_disable(ad5064_num_vref(st), st->vref_reg);
- regulator_bulk_free(ad5064_num_vref(st), st->vref_reg);
+ if (!st->use_internal_vref) {
+ regulator_bulk_disable(ad5064_num_vref(st), st->vref_reg);
+ regulator_bulk_free(ad5064_num_vref(st), st->vref_reg);
+ }
iio_free_device(indio_dev);
@@ -429,9 +524,21 @@ static int __devexit ad5064_remove(struct spi_device *spi)
static const struct spi_device_id ad5064_id[] = {
{"ad5024", ID_AD5024},
+ {"ad5025", ID_AD5025},
{"ad5044", ID_AD5044},
+ {"ad5045", ID_AD5045},
{"ad5064", ID_AD5064},
{"ad5064-1", ID_AD5064_1},
+ {"ad5065", ID_AD5065},
+ {"ad5628-1", ID_AD5628_1},
+ {"ad5628-2", ID_AD5628_2},
+ {"ad5648-1", ID_AD5648_1},
+ {"ad5648-2", ID_AD5648_2},
+ {"ad5666-1", ID_AD5666_1},
+ {"ad5666-2", ID_AD5666_2},
+ {"ad5668-1", ID_AD5668_1},
+ {"ad5668-2", ID_AD5668_2},
+ {"ad5668-3", ID_AD5668_2}, /* similar enough to ad5668-2 */
{}
};
MODULE_DEVICE_TABLE(spi, ad5064_id);
@@ -448,5 +555,5 @@ static struct spi_driver ad5064_driver = {
module_spi_driver(ad5064_driver);
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
-MODULE_DESCRIPTION("Analog Devices AD5064/64-1/44/24 DAC");
+MODULE_DESCRIPTION("Analog Devices AD5024/25/44/45/64/64-1/65, AD5628/48/66/68 DAC");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/dac/ad5360.c b/drivers/staging/iio/dac/ad5360.c
index 710b256affcc..cec3693b50a3 100644
--- a/drivers/staging/iio/dac/ad5360.c
+++ b/drivers/staging/iio/dac/ad5360.c
@@ -439,8 +439,8 @@ static int __devinit ad5360_alloc_channels(struct iio_dev *indio_dev)
struct iio_chan_spec *channels;
unsigned int i;
- channels = kcalloc(sizeof(struct iio_chan_spec),
- st->chip_info->num_channels, GFP_KERNEL);
+ channels = kcalloc(st->chip_info->num_channels,
+ sizeof(struct iio_chan_spec), GFP_KERNEL);
if (!channels)
return -ENOMEM;
diff --git a/drivers/staging/iio/dac/ad5380.c b/drivers/staging/iio/dac/ad5380.c
index eff97ae05c4b..4c50716fa801 100644
--- a/drivers/staging/iio/dac/ad5380.c
+++ b/drivers/staging/iio/dac/ad5380.c
@@ -363,8 +363,8 @@ static int __devinit ad5380_alloc_channels(struct iio_dev *indio_dev)
struct iio_chan_spec *channels;
unsigned int i;
- channels = kcalloc(sizeof(struct iio_chan_spec),
- st->chip_info->num_channels, GFP_KERNEL);
+ channels = kcalloc(st->chip_info->num_channels,
+ sizeof(struct iio_chan_spec), GFP_KERNEL);
if (!channels)
return -ENOMEM;
diff --git a/drivers/staging/iio/dac/ad5421.c b/drivers/staging/iio/dac/ad5421.c
index 71ee86824763..0b040b204697 100644
--- a/drivers/staging/iio/dac/ad5421.c
+++ b/drivers/staging/iio/dac/ad5421.c
@@ -536,18 +536,7 @@ static struct spi_driver ad5421_driver = {
.probe = ad5421_probe,
.remove = __devexit_p(ad5421_remove),
};
-
-static __init int ad5421_init(void)
-{
- return spi_register_driver(&ad5421_driver);
-}
-module_init(ad5421_init);
-
-static __exit void ad5421_exit(void)
-{
- spi_unregister_driver(&ad5421_driver);
-}
-module_exit(ad5421_exit);
+module_spi_driver(ad5421_driver);
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
MODULE_DESCRIPTION("Analog Devices AD5421 DAC");
diff --git a/drivers/staging/iio/dac/ad5446.c b/drivers/staging/iio/dac/ad5446.c
index 693e7482524c..633ffbb21814 100644
--- a/drivers/staging/iio/dac/ad5446.c
+++ b/drivers/staging/iio/dac/ad5446.c
@@ -149,30 +149,8 @@ static struct attribute *ad5446_attributes[] = {
NULL,
};
-static umode_t ad5446_attr_is_visible(struct kobject *kobj,
- struct attribute *attr, int n)
-{
- struct device *dev = container_of(kobj, struct device, kobj);
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ad5446_state *st = iio_priv(indio_dev);
-
- umode_t mode = attr->mode;
-
- if (!st->chip_info->store_pwr_down &&
- (attr == &iio_dev_attr_out_voltage0_powerdown.dev_attr.attr ||
- attr == &iio_dev_attr_out_voltage_powerdown_mode.
- dev_attr.attr ||
- attr ==
- &iio_const_attr_out_voltage_powerdown_mode_available.
- dev_attr.attr))
- mode = 0;
-
- return mode;
-}
-
static const struct attribute_group ad5446_attribute_group = {
.attrs = ad5446_attributes,
- .is_visible = ad5446_attr_is_visible,
};
#define AD5446_CHANNEL(bits, storage, shift) { \
@@ -321,6 +299,12 @@ static const struct iio_info ad5446_info = {
.driver_module = THIS_MODULE,
};
+static const struct iio_info ad5446_info_no_pwr_down = {
+ .read_raw = ad5446_read_raw,
+ .write_raw = ad5446_write_raw,
+ .driver_module = THIS_MODULE,
+};
+
static int __devinit ad5446_probe(struct spi_device *spi)
{
struct ad5446_state *st;
@@ -350,10 +334,13 @@ static int __devinit ad5446_probe(struct spi_device *spi)
st->reg = reg;
st->spi = spi;
- /* Estabilish that the iio_dev is a child of the spi device */
+ /* Establish that the iio_dev is a child of the spi device */
indio_dev->dev.parent = &spi->dev;
indio_dev->name = spi_get_device_id(spi)->name;
- indio_dev->info = &ad5446_info;
+ if (st->chip_info->store_pwr_down)
+ indio_dev->info = &ad5446_info;
+ else
+ indio_dev->info = &ad5446_info_no_pwr_down;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = &st->chip_info->channel;
indio_dev->num_channels = 1;
diff --git a/drivers/staging/iio/dac/ad5686.c b/drivers/staging/iio/dac/ad5686.c
index ce2d6193dd89..2415a6e60c77 100644
--- a/drivers/staging/iio/dac/ad5686.c
+++ b/drivers/staging/iio/dac/ad5686.c
@@ -15,7 +15,6 @@
#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/regulator/consumer.h>
-#include <linux/module.h>
#include "../iio.h"
#include "../sysfs.h"
diff --git a/drivers/staging/iio/dac/ad5764.c b/drivers/staging/iio/dac/ad5764.c
index ff91480ae65c..f73a73079490 100644
--- a/drivers/staging/iio/dac/ad5764.c
+++ b/drivers/staging/iio/dac/ad5764.c
@@ -375,18 +375,7 @@ static struct spi_driver ad5764_driver = {
.remove = __devexit_p(ad5764_remove),
.id_table = ad5764_ids,
};
-
-static int __init ad5764_spi_init(void)
-{
- return spi_register_driver(&ad5764_driver);
-}
-module_init(ad5764_spi_init);
-
-static void __exit ad5764_spi_exit(void)
-{
- spi_unregister_driver(&ad5764_driver);
-}
-module_exit(ad5764_spi_exit);
+module_spi_driver(ad5764_driver);
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
MODULE_DESCRIPTION("Analog Devices AD5744/AD5744R/AD5764/AD5764R DAC");
diff --git a/drivers/staging/iio/dac/max517.c b/drivers/staging/iio/dac/max517.c
index a4df6d7443c1..41483c72cec1 100644
--- a/drivers/staging/iio/dac/max517.c
+++ b/drivers/staging/iio/dac/max517.c
@@ -179,20 +179,27 @@ static struct attribute_group max518_attribute_group = {
.attrs = max518_attributes,
};
-static int max517_suspend(struct i2c_client *client, pm_message_t mesg)
+#ifdef CONFIG_PM_SLEEP
+static int max517_suspend(struct device *dev)
{
u8 outbuf = COMMAND_PD;
- return i2c_master_send(client, &outbuf, 1);
+ return i2c_master_send(to_i2c_client(dev), &outbuf, 1);
}
-static int max517_resume(struct i2c_client *client)
+static int max517_resume(struct device *dev)
{
u8 outbuf = 0;
- return i2c_master_send(client, &outbuf, 1);
+ return i2c_master_send(to_i2c_client(dev), &outbuf, 1);
}
+static SIMPLE_DEV_PM_OPS(max517_pm_ops, max517_suspend, max517_resume);
+#define MAX517_PM_OPS (&max517_pm_ops)
+#else
+#define MAX517_PM_OPS NULL
+#endif
+
static const struct iio_info max517_info = {
.attrs = &max517_attribute_group,
.driver_module = THIS_MODULE,
@@ -273,11 +280,10 @@ MODULE_DEVICE_TABLE(i2c, max517_id);
static struct i2c_driver max517_driver = {
.driver = {
.name = MAX517_DRV_NAME,
+ .pm = MAX517_PM_OPS,
},
.probe = max517_probe,
.remove = max517_remove,
- .suspend = max517_suspend,
- .resume = max517_resume,
.id_table = max517_id,
};
module_i2c_driver(max517_driver);
diff --git a/drivers/staging/iio/dds/ad9834.c b/drivers/staging/iio/dds/ad9834.c
index 5e67104fea18..38a2de08626f 100644
--- a/drivers/staging/iio/dds/ad9834.c
+++ b/drivers/staging/iio/dds/ad9834.c
@@ -281,29 +281,27 @@ static struct attribute *ad9834_attributes[] = {
NULL,
};
-static umode_t ad9834_attr_is_visible(struct kobject *kobj,
- struct attribute *attr, int n)
-{
- struct device *dev = container_of(kobj, struct device, kobj);
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ad9834_state *st = iio_priv(indio_dev);
-
- umode_t mode = attr->mode;
-
- if (((st->devid == ID_AD9833) || (st->devid == ID_AD9837)) &&
- ((attr == &iio_dev_attr_dds0_out1_enable.dev_attr.attr) ||
- (attr == &iio_dev_attr_dds0_out1_wavetype.dev_attr.attr) ||
- (attr ==
- &iio_dev_attr_dds0_out1_wavetype_available.dev_attr.attr) ||
- (attr == &iio_dev_attr_dds0_pincontrol_en.dev_attr.attr)))
- mode = 0;
-
- return mode;
-}
+static struct attribute *ad9833_attributes[] = {
+ &iio_dev_attr_dds0_freq0.dev_attr.attr,
+ &iio_dev_attr_dds0_freq1.dev_attr.attr,
+ &iio_const_attr_dds0_freq_scale.dev_attr.attr,
+ &iio_dev_attr_dds0_phase0.dev_attr.attr,
+ &iio_dev_attr_dds0_phase1.dev_attr.attr,
+ &iio_const_attr_dds0_phase_scale.dev_attr.attr,
+ &iio_dev_attr_dds0_freqsymbol.dev_attr.attr,
+ &iio_dev_attr_dds0_phasesymbol.dev_attr.attr,
+ &iio_dev_attr_dds0_out_enable.dev_attr.attr,
+ &iio_dev_attr_dds0_out0_wavetype.dev_attr.attr,
+ &iio_dev_attr_dds0_out0_wavetype_available.dev_attr.attr,
+ NULL,
+};
static const struct attribute_group ad9834_attribute_group = {
.attrs = ad9834_attributes,
- .is_visible = ad9834_attr_is_visible,
+};
+
+static const struct attribute_group ad9833_attribute_group = {
+ .attrs = ad9833_attributes,
};
static const struct iio_info ad9834_info = {
@@ -311,6 +309,11 @@ static const struct iio_info ad9834_info = {
.driver_module = THIS_MODULE,
};
+static const struct iio_info ad9833_info = {
+ .attrs = &ad9833_attribute_group,
+ .driver_module = THIS_MODULE,
+};
+
static int __devinit ad9834_probe(struct spi_device *spi)
{
struct ad9834_platform_data *pdata = spi->dev.platform_data;
@@ -344,7 +347,15 @@ static int __devinit ad9834_probe(struct spi_device *spi)
st->reg = reg;
indio_dev->dev.parent = &spi->dev;
indio_dev->name = spi_get_device_id(spi)->name;
- indio_dev->info = &ad9834_info;
+ switch (st->devid) {
+ case ID_AD9833:
+ case ID_AD9837:
+ indio_dev->info = &ad9833_info;
+ break;
+ default:
+ indio_dev->info = &ad9834_info;
+ break;
+ }
indio_dev->modes = INDIO_DIRECT_MODE;
/* Setup default messages */
diff --git a/drivers/staging/iio/driver.h b/drivers/staging/iio/driver.h
new file mode 100644
index 000000000000..a4f8b2e05af5
--- /dev/null
+++ b/drivers/staging/iio/driver.h
@@ -0,0 +1,34 @@
+/*
+ * Industrial I/O in kernel access map interface.
+ *
+ * Copyright (c) 2011 Jonathan Cameron
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef _IIO_INKERN_H_
+#define _IIO_INKERN_H_
+
+struct iio_map;
+
+/**
+ * iio_map_array_register() - tell the core about inkernel consumers
+ * @indio_dev: provider device
+ * @map: array of mappings specifying association of channel with client
+ */
+int iio_map_array_register(struct iio_dev *indio_dev,
+ struct iio_map *map);
+
+/**
+ * iio_map_array_unregister() - tell the core to remove consumer mappings
+ * @indio_dev: provider device
+ * @map: array of mappings to remove. Note these must have same memory
+ * addresses as those originally added not just equal parameter
+ * values.
+ */
+int iio_map_array_unregister(struct iio_dev *indio_dev,
+ struct iio_map *map);
+
+#endif
diff --git a/drivers/staging/iio/events.h b/drivers/staging/iio/events.h
index bfb63400fa60..c25f0e3c92e9 100644
--- a/drivers/staging/iio/events.h
+++ b/drivers/staging/iio/events.h
@@ -96,8 +96,10 @@ enum iio_event_direction {
/* Event code number extraction depends on which type of event we have.
* Perhaps review this function in the future*/
-#define IIO_EVENT_CODE_EXTRACT_NUM(mask) ((__s16)(mask & 0xFFFF))
+#define IIO_EVENT_CODE_EXTRACT_CHAN(mask) ((__s16)(mask & 0xFFFF))
+#define IIO_EVENT_CODE_EXTRACT_CHAN2(mask) ((__s16)(((mask) >> 16) & 0xFFFF))
#define IIO_EVENT_CODE_EXTRACT_MODIFIER(mask) ((mask >> 40) & 0xFF)
+#define IIO_EVENT_CODE_EXTRACT_DIFF(mask) (((mask) >> 55) & 0x1)
#endif
diff --git a/drivers/staging/iio/gyro/adis16260_ring.c b/drivers/staging/iio/gyro/adis16260_ring.c
index 699a6152c409..711f15122a08 100644
--- a/drivers/staging/iio/gyro/adis16260_ring.c
+++ b/drivers/staging/iio/gyro/adis16260_ring.c
@@ -115,8 +115,6 @@ int adis16260_configure_ring(struct iio_dev *indio_dev)
return ret;
}
indio_dev->buffer = ring;
- /* Effectively select the ring buffer implementation */
- ring->access = &ring_sw_access_funcs;
ring->scan_timestamp = true;
indio_dev->setup_ops = &adis16260_ring_setup_ops;
diff --git a/drivers/staging/iio/iio.h b/drivers/staging/iio/iio.h
index be6ced31f65e..b9cd454f69e2 100644
--- a/drivers/staging/iio/iio.h
+++ b/drivers/staging/iio/iio.h
@@ -26,7 +26,7 @@ enum iio_data_type {
/* Could add the raw attributes as well - allowing buffer only devices */
enum iio_chan_info_enum {
- /* 0 is reserverd for raw attributes */
+ /* 0 is reserved for raw attributes */
IIO_CHAN_INFO_SCALE = 1,
IIO_CHAN_INFO_OFFSET,
IIO_CHAN_INFO_CALIBSCALE,
@@ -88,10 +88,29 @@ enum iio_endian {
IIO_LE,
};
+struct iio_chan_spec;
+struct iio_dev;
+
+/**
+ * struct iio_chan_spec_ext_info - Extended channel info attribute
+ * @name: Info attribute name
+ * @shared: Whether this attribute is shared between all channels.
+ * @read: Read callback for this info attribute, may be NULL.
+ * @write: Write callback for this info attribute, may be NULL.
+ */
+struct iio_chan_spec_ext_info {
+ const char *name;
+ bool shared;
+ ssize_t (*read)(struct iio_dev *, struct iio_chan_spec const *,
+ char *buf);
+ ssize_t (*write)(struct iio_dev *, struct iio_chan_spec const *,
+ const char *buf, size_t len);
+};
+
/**
* struct iio_chan_spec - specification of a single channel
* @type: What type of measurement is the channel making.
- * @channel: What number or name do we wish to asign the channel.
+ * @channel: What number or name do we wish to assign the channel.
* @channel2: If there is a second number for a differential
* channel then this is it. If modified is set then the
* value here specifies the modifier.
@@ -107,11 +126,14 @@ enum iio_endian {
* @info_mask: What information is to be exported about this channel.
* This includes calibbias, scale etc.
* @event_mask: What events can this channel produce.
+ * @ext_info: Array of extended info attributes for this channel.
+ * The array is NULL terminated, the last element should
+ * have it's name field set to NULL.
* @extend_name: Allows labeling of channel attributes with an
* informative name. Note this has no effect codes etc,
* unlike modifiers.
* @datasheet_name: A name used in in kernel mapping of channels. It should
- * corrspond to the first name that the channel is referred
+ * correspond to the first name that the channel is referred
* to by in the datasheet (e.g. IND), or the nearest
* possible compound name (e.g. IND-INC).
* @processed_val: Flag to specify the data access attribute should be
@@ -141,6 +163,7 @@ struct iio_chan_spec {
} scan_type;
long info_mask;
long event_mask;
+ const struct iio_chan_spec_ext_info *ext_info;
char *extend_name;
const char *datasheet_name;
unsigned processed_val:1;
@@ -197,12 +220,6 @@ static inline s64 iio_get_time_ns(void)
#define INDIO_ALL_BUFFER_MODES \
(INDIO_BUFFER_TRIGGERED | INDIO_BUFFER_HARDWARE)
-/* Vast majority of this is set by the industrialio subsystem on a
- * call to iio_device_register. */
-#define IIO_VAL_INT 1
-#define IIO_VAL_INT_PLUS_MICRO 2
-#define IIO_VAL_INT_PLUS_NANO 3
-
struct iio_trigger; /* forward declaration */
struct iio_dev;
@@ -226,7 +243,7 @@ struct iio_dev;
* @write_event_config: set if the event is enabled.
* @read_event_value: read a value associated with the event. Meaning
* is event dependant. event_code specifies which event.
- * @write_event_value: write the value associate with the event.
+ * @write_event_value: write the value associated with the event.
* Meaning is event dependent.
* @validate_trigger: function to validate the trigger when the
* current trigger gets changed.
@@ -269,6 +286,9 @@ struct iio_info {
struct iio_trigger *trig);
int (*update_scan_mode)(struct iio_dev *indio_dev,
const unsigned long *scan_mask);
+ int (*debugfs_reg_access)(struct iio_dev *indio_dev,
+ unsigned reg, unsigned writeval,
+ unsigned *readval);
};
/**
@@ -310,11 +330,14 @@ struct iio_buffer_setup_ops {
* @chan_attr_group: [INTERN] group for all attrs in base directory
* @name: [DRIVER] name of the device.
* @info: [DRIVER] callbacks and constant info from driver
+ * @info_exist_lock: [INTERN] lock to prevent use during removal
* @chrdev: [INTERN] associated character device
* @groups: [INTERN] attribute groups
* @groupcounter: [INTERN] index of next attribute group
* @flags: [INTERN] file ops related flags including busy flag.
- **/
+ * @debugfs_dentry: [INTERN] device specific debugfs dentry.
+ * @cached_reg_addr: [INTERN] cached register address for debugfs reads.
+ */
struct iio_dev {
int id;
@@ -327,9 +350,9 @@ struct iio_dev {
struct iio_buffer *buffer;
struct mutex mlock;
- unsigned long *available_scan_masks;
+ const unsigned long *available_scan_masks;
unsigned masklength;
- unsigned long *active_scan_mask;
+ const unsigned long *active_scan_mask;
struct iio_trigger *trig;
struct iio_poll_func *pollfunc;
@@ -340,6 +363,7 @@ struct iio_dev {
struct attribute_group chan_attr_group;
const char *name;
const struct iio_info *info;
+ struct mutex info_exist_lock;
const struct iio_buffer_setup_ops *setup_ops;
struct cdev chrdev;
#define IIO_MAX_GROUPS 6
@@ -347,6 +371,10 @@ struct iio_dev {
int groupcounter;
unsigned long flags;
+#if defined(CONFIG_DEBUG_FS)
+ struct dentry *debugfs_dentry;
+ unsigned cached_reg_addr;
+#endif
};
/**
@@ -424,4 +452,20 @@ static inline bool iio_buffer_enabled(struct iio_dev *indio_dev)
& (INDIO_BUFFER_TRIGGERED | INDIO_BUFFER_HARDWARE);
};
+/**
+ * iio_get_debugfs_dentry() - helper function to get the debugfs_dentry
+ * @indio_dev: IIO device info structure for device
+ **/
+#if defined(CONFIG_DEBUG_FS)
+static inline struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev)
+{
+ return indio_dev->debugfs_dentry;
+};
+#else
+static inline struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev)
+{
+ return NULL;
+};
+#endif
+
#endif /* _INDUSTRIAL_IO_H_ */
diff --git a/drivers/staging/iio/iio_core.h b/drivers/staging/iio/iio_core.h
index 107cfb1cbb01..c9dfcba0bac8 100644
--- a/drivers/staging/iio/iio_core.h
+++ b/drivers/staging/iio/iio_core.h
@@ -49,4 +49,8 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
#endif
+int iio_device_register_eventset(struct iio_dev *indio_dev);
+void iio_device_unregister_eventset(struct iio_dev *indio_dev);
+int iio_event_getfd(struct iio_dev *indio_dev);
+
#endif
diff --git a/drivers/staging/iio/iio_dummy_evgen.c b/drivers/staging/iio/iio_dummy_evgen.c
index cdbf289bfe2d..f39f346bf04f 100644
--- a/drivers/staging/iio/iio_dummy_evgen.c
+++ b/drivers/staging/iio/iio_dummy_evgen.c
@@ -32,7 +32,7 @@
* @chip: irq chip we are faking
* @base: base of irq range
* @enabled: mask of which irqs are enabled
- * @inuse: mask of which irqs actually have anyone connected
+ * @inuse: mask of which irqs are connected
* @lock: protect the evgen state
*/
struct iio_dummy_eventgen {
diff --git a/drivers/staging/iio/iio_hwmon.c b/drivers/staging/iio/iio_hwmon.c
new file mode 100644
index 000000000000..a603a5f51f93
--- /dev/null
+++ b/drivers/staging/iio/iio_hwmon.c
@@ -0,0 +1,232 @@
+/* Hwmon client for industrial I/O devices
+ *
+ * Copyright (c) 2011 Jonathan Cameron
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include "consumer.h"
+#include "types.h"
+
+/**
+ * struct iio_hwmon_state - device instance state
+ * @channels: filled with array of channels from iio
+ * @num_channels: number of channels in channels (saves counting twice)
+ * @hwmon_dev: associated hwmon device
+ * @attr_group: the group of attributes
+ * @attrs: null terminated array of attribute pointers.
+ */
+struct iio_hwmon_state {
+ struct iio_channel *channels;
+ int num_channels;
+ struct device *hwmon_dev;
+ struct attribute_group attr_group;
+ struct attribute **attrs;
+};
+
+/*
+ * Assumes that IIO and hwmon operate in the same base units.
+ * This is supposed to be true, but needs verification for
+ * new channel types.
+ */
+static ssize_t iio_hwmon_read_val(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ long result;
+ int val, ret, scaleint, scalepart;
+ struct sensor_device_attribute *sattr = to_sensor_dev_attr(attr);
+ struct iio_hwmon_state *state = dev_get_drvdata(dev);
+
+ /*
+ * No locking between this pair, so theoretically possible
+ * the scale has changed.
+ */
+ ret = iio_st_read_channel_raw(&state->channels[sattr->index],
+ &val);
+ if (ret < 0)
+ return ret;
+
+ ret = iio_st_read_channel_scale(&state->channels[sattr->index],
+ &scaleint, &scalepart);
+ if (ret < 0)
+ return ret;
+ switch (ret) {
+ case IIO_VAL_INT:
+ result = val * scaleint;
+ break;
+ case IIO_VAL_INT_PLUS_MICRO:
+ result = (s64)val * (s64)scaleint +
+ div_s64((s64)val * (s64)scalepart, 1000000LL);
+ break;
+ case IIO_VAL_INT_PLUS_NANO:
+ result = (s64)val * (s64)scaleint +
+ div_s64((s64)val * (s64)scalepart, 1000000000LL);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return sprintf(buf, "%ld\n", result);
+}
+
+static void iio_hwmon_free_attrs(struct iio_hwmon_state *st)
+{
+ int i;
+ struct sensor_device_attribute *a;
+ for (i = 0; i < st->num_channels; i++)
+ if (st->attrs[i]) {
+ a = to_sensor_dev_attr(
+ container_of(st->attrs[i],
+ struct device_attribute,
+ attr));
+ kfree(a);
+ }
+}
+
+static int __devinit iio_hwmon_probe(struct platform_device *pdev)
+{
+ struct iio_hwmon_state *st;
+ struct sensor_device_attribute *a;
+ int ret, i;
+ int in_i = 1, temp_i = 1, curr_i = 1;
+ enum iio_chan_type type;
+
+ st = kzalloc(sizeof(*st), GFP_KERNEL);
+ if (st == NULL) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+
+ st->channels = iio_st_channel_get_all(dev_name(&pdev->dev));
+ if (IS_ERR(st->channels)) {
+ ret = PTR_ERR(st->channels);
+ goto error_free_state;
+ }
+
+ /* count how many attributes we have */
+ while (st->channels[st->num_channels].indio_dev)
+ st->num_channels++;
+
+ st->attrs = kzalloc(sizeof(st->attrs) * (st->num_channels + 1),
+ GFP_KERNEL);
+ if (st->attrs == NULL) {
+ ret = -ENOMEM;
+ goto error_release_channels;
+ }
+ for (i = 0; i < st->num_channels; i++) {
+ a = kzalloc(sizeof(*a), GFP_KERNEL);
+ if (a == NULL) {
+ ret = -ENOMEM;
+ goto error_free_attrs;
+ }
+
+ sysfs_attr_init(&a->dev_attr.attr);
+ ret = iio_st_get_channel_type(&st->channels[i], &type);
+ if (ret < 0) {
+ kfree(a);
+ goto error_free_attrs;
+ }
+ switch (type) {
+ case IIO_VOLTAGE:
+ a->dev_attr.attr.name = kasprintf(GFP_KERNEL,
+ "in%d_input",
+ in_i++);
+ break;
+ case IIO_TEMP:
+ a->dev_attr.attr.name = kasprintf(GFP_KERNEL,
+ "temp%d_input",
+ temp_i++);
+ break;
+ case IIO_CURRENT:
+ a->dev_attr.attr.name = kasprintf(GFP_KERNEL,
+ "curr%d_input",
+ curr_i++);
+ break;
+ default:
+ ret = -EINVAL;
+ kfree(a);
+ goto error_free_attrs;
+ }
+ if (a->dev_attr.attr.name == NULL) {
+ kfree(a);
+ ret = -ENOMEM;
+ goto error_free_attrs;
+ }
+ a->dev_attr.show = iio_hwmon_read_val;
+ a->dev_attr.attr.mode = S_IRUGO;
+ a->index = i;
+ st->attrs[i] = &a->dev_attr.attr;
+ }
+
+ st->attr_group.attrs = st->attrs;
+ platform_set_drvdata(pdev, st);
+ ret = sysfs_create_group(&pdev->dev.kobj, &st->attr_group);
+ if (ret < 0)
+ goto error_free_attrs;
+
+ st->hwmon_dev = hwmon_device_register(&pdev->dev);
+ if (IS_ERR(st->hwmon_dev)) {
+ ret = PTR_ERR(st->hwmon_dev);
+ goto error_remove_group;
+ }
+ return 0;
+
+error_remove_group:
+ sysfs_remove_group(&pdev->dev.kobj, &st->attr_group);
+error_free_attrs:
+ iio_hwmon_free_attrs(st);
+ kfree(st->attrs);
+error_release_channels:
+ iio_st_channel_release_all(st->channels);
+error_free_state:
+ kfree(st);
+error_ret:
+ return ret;
+}
+
+static int __devexit iio_hwmon_remove(struct platform_device *pdev)
+{
+ struct iio_hwmon_state *st = platform_get_drvdata(pdev);
+
+ hwmon_device_unregister(st->hwmon_dev);
+ sysfs_remove_group(&pdev->dev.kobj, &st->attr_group);
+ iio_hwmon_free_attrs(st);
+ kfree(st->attrs);
+ iio_st_channel_release_all(st->channels);
+
+ return 0;
+}
+
+static struct platform_driver __refdata iio_hwmon_driver = {
+ .driver = {
+ .name = "iio_hwmon",
+ .owner = THIS_MODULE,
+ },
+ .probe = iio_hwmon_probe,
+ .remove = __devexit_p(iio_hwmon_remove),
+};
+
+static int iio_inkern_init(void)
+{
+ return platform_driver_register(&iio_hwmon_driver);
+}
+module_init(iio_inkern_init);
+
+static void iio_inkern_exit(void)
+{
+ platform_driver_unregister(&iio_hwmon_driver);
+}
+module_exit(iio_inkern_exit);
+
+MODULE_AUTHOR("Jonathan Cameron <jic23@cam.ac.uk>");
+MODULE_DESCRIPTION("IIO to hwmon driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/iio_simple_dummy_buffer.c b/drivers/staging/iio/iio_simple_dummy_buffer.c
index d6a1c0e82a5b..bb4daf744362 100644
--- a/drivers/staging/iio/iio_simple_dummy_buffer.c
+++ b/drivers/staging/iio/iio_simple_dummy_buffer.c
@@ -142,8 +142,6 @@ int iio_simple_dummy_configure_buffer(struct iio_dev *indio_dev)
}
indio_dev->buffer = buffer;
- /* Tell the core how to access the buffer */
- buffer->access = &kfifo_access_funcs;
/* Enable timestamps by default */
buffer->scan_timestamp = true;
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
index 9a2ca55625f4..cd82b56d58af 100644
--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
+++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
@@ -607,9 +607,6 @@ static int ad5933_register_ring_funcs_and_init(struct iio_dev *indio_dev)
if (!indio_dev->buffer)
return -ENOMEM;
- /* Effectively select the ring buffer implementation */
- indio_dev->buffer->access = &ring_sw_access_funcs;
-
/* Ring buffer functions - here trigger setup related */
indio_dev->setup_ops = &ad5933_ring_setup_ops;
diff --git a/drivers/staging/iio/imu/adis16400_ring.c b/drivers/staging/iio/imu/adis16400_ring.c
index ac22de573f3e..8daa038b23e6 100644
--- a/drivers/staging/iio/imu/adis16400_ring.c
+++ b/drivers/staging/iio/imu/adis16400_ring.c
@@ -187,8 +187,6 @@ int adis16400_configure_ring(struct iio_dev *indio_dev)
return ret;
}
indio_dev->buffer = ring;
- /* Effectively select the ring buffer implementation */
- ring->access = &ring_sw_access_funcs;
ring->scan_timestamp = true;
indio_dev->setup_ops = &adis16400_ring_setup_ops;
diff --git a/drivers/staging/iio/industrialio-buffer.c b/drivers/staging/iio/industrialio-buffer.c
index d7b1e9e435ae..386ba760f3f1 100644
--- a/drivers/staging/iio/industrialio-buffer.c
+++ b/drivers/staging/iio/industrialio-buffer.c
@@ -489,9 +489,9 @@ ssize_t iio_buffer_show_enable(struct device *dev,
EXPORT_SYMBOL(iio_buffer_show_enable);
/* note NULL used as error indicator as it doesn't make sense. */
-static unsigned long *iio_scan_mask_match(unsigned long *av_masks,
+static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
unsigned int masklength,
- unsigned long *mask)
+ const unsigned long *mask)
{
if (bitmap_empty(mask, masklength))
return NULL;
@@ -554,7 +554,7 @@ EXPORT_SYMBOL(iio_sw_buffer_preenable);
int iio_scan_mask_set(struct iio_dev *indio_dev,
struct iio_buffer *buffer, int bit)
{
- unsigned long *mask;
+ const unsigned long *mask;
unsigned long *trialmask;
trialmask = kmalloc(sizeof(*trialmask)*
diff --git a/drivers/staging/iio/industrialio-core.c b/drivers/staging/iio/industrialio-core.c
index 19f897f3c85e..d303bfbff27f 100644
--- a/drivers/staging/iio/industrialio-core.c
+++ b/drivers/staging/iio/industrialio-core.c
@@ -22,6 +22,7 @@
#include <linux/cdev.h>
#include <linux/slab.h>
#include <linux/anon_inodes.h>
+#include <linux/debugfs.h>
#include "iio.h"
#include "iio_core.h"
#include "iio_core_trigger.h"
@@ -39,6 +40,8 @@ struct bus_type iio_bus_type = {
};
EXPORT_SYMBOL(iio_bus_type);
+static struct dentry *iio_debugfs_dentry;
+
static const char * const iio_data_type_name[] = {
[IIO_RAW] = "raw",
[IIO_PROCESSED] = "input",
@@ -100,71 +103,6 @@ const struct iio_chan_spec
return NULL;
}
-/**
- * struct iio_detected_event_list - list element for events that have occurred
- * @list: linked list header
- * @ev: the event itself
- */
-struct iio_detected_event_list {
- struct list_head list;
- struct iio_event_data ev;
-};
-
-/**
- * struct iio_event_interface - chrdev interface for an event line
- * @dev: device assocated with event interface
- * @wait: wait queue to allow blocking reads of events
- * @event_list_lock: mutex to protect the list of detected events
- * @det_events: list of detected events
- * @max_events: maximum number of events before new ones are dropped
- * @current_events: number of events in detected list
- * @flags: file operations related flags including busy flag.
- */
-struct iio_event_interface {
- wait_queue_head_t wait;
- struct mutex event_list_lock;
- struct list_head det_events;
- int max_events;
- int current_events;
- struct list_head dev_attr_list;
- unsigned long flags;
- struct attribute_group group;
-};
-
-int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp)
-{
- struct iio_event_interface *ev_int = indio_dev->event_interface;
- struct iio_detected_event_list *ev;
- int ret = 0;
-
- /* Does anyone care? */
- mutex_lock(&ev_int->event_list_lock);
- if (test_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
- if (ev_int->current_events == ev_int->max_events) {
- mutex_unlock(&ev_int->event_list_lock);
- return 0;
- }
- ev = kmalloc(sizeof(*ev), GFP_KERNEL);
- if (ev == NULL) {
- ret = -ENOMEM;
- mutex_unlock(&ev_int->event_list_lock);
- goto error_ret;
- }
- ev->ev.id = ev_code;
- ev->ev.timestamp = timestamp;
-
- list_add_tail(&ev->list, &ev_int->det_events);
- ev_int->current_events++;
- mutex_unlock(&ev_int->event_list_lock);
- wake_up_interruptible(&ev_int->wait);
- } else
- mutex_unlock(&ev_int->event_list_lock);
-
-error_ret:
- return ret;
-}
-EXPORT_SYMBOL(iio_push_event);
-
/* This turns up an awful lot */
ssize_t iio_read_const_attr(struct device *dev,
struct device_attribute *attr,
@@ -174,143 +112,189 @@ ssize_t iio_read_const_attr(struct device *dev,
}
EXPORT_SYMBOL(iio_read_const_attr);
-static ssize_t iio_event_chrdev_read(struct file *filep,
- char __user *buf,
- size_t count,
- loff_t *f_ps)
+static int __init iio_init(void)
{
- struct iio_event_interface *ev_int = filep->private_data;
- struct iio_detected_event_list *el;
- size_t len = sizeof(el->ev);
int ret;
- if (count < len)
- return -EINVAL;
-
- mutex_lock(&ev_int->event_list_lock);
- if (list_empty(&ev_int->det_events)) {
- if (filep->f_flags & O_NONBLOCK) {
- ret = -EAGAIN;
- goto error_mutex_unlock;
- }
- mutex_unlock(&ev_int->event_list_lock);
- /* Blocking on device; waiting for something to be there */
- ret = wait_event_interruptible(ev_int->wait,
- !list_empty(&ev_int
- ->det_events));
- if (ret)
- goto error_ret;
- /* Single access device so no one else can get the data */
- mutex_lock(&ev_int->event_list_lock);
+ /* Register sysfs bus */
+ ret = bus_register(&iio_bus_type);
+ if (ret < 0) {
+ printk(KERN_ERR
+ "%s could not register bus type\n",
+ __FILE__);
+ goto error_nothing;
}
- el = list_first_entry(&ev_int->det_events,
- struct iio_detected_event_list,
- list);
- if (copy_to_user(buf, &(el->ev), len)) {
- ret = -EFAULT;
- goto error_mutex_unlock;
+ ret = alloc_chrdev_region(&iio_devt, 0, IIO_DEV_MAX, "iio");
+ if (ret < 0) {
+ printk(KERN_ERR "%s: failed to allocate char dev region\n",
+ __FILE__);
+ goto error_unregister_bus_type;
}
- list_del(&el->list);
- ev_int->current_events--;
- mutex_unlock(&ev_int->event_list_lock);
- kfree(el);
- return len;
+ iio_debugfs_dentry = debugfs_create_dir("iio", NULL);
-error_mutex_unlock:
- mutex_unlock(&ev_int->event_list_lock);
-error_ret:
+ return 0;
+error_unregister_bus_type:
+ bus_unregister(&iio_bus_type);
+error_nothing:
return ret;
}
-static int iio_event_chrdev_release(struct inode *inode, struct file *filep)
+static void __exit iio_exit(void)
{
- struct iio_event_interface *ev_int = filep->private_data;
- struct iio_detected_event_list *el, *t;
+ if (iio_devt)
+ unregister_chrdev_region(iio_devt, IIO_DEV_MAX);
+ bus_unregister(&iio_bus_type);
+ debugfs_remove(iio_debugfs_dentry);
+}
- mutex_lock(&ev_int->event_list_lock);
- clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
- /*
- * In order to maintain a clean state for reopening,
- * clear out any awaiting events. The mask will prevent
- * any new __iio_push_event calls running.
- */
- list_for_each_entry_safe(el, t, &ev_int->det_events, list) {
- list_del(&el->list);
- kfree(el);
- }
- ev_int->current_events = 0;
- mutex_unlock(&ev_int->event_list_lock);
+#if defined(CONFIG_DEBUG_FS)
+static int iio_debugfs_open(struct inode *inode, struct file *file)
+{
+ if (inode->i_private)
+ file->private_data = inode->i_private;
return 0;
}
-static const struct file_operations iio_event_chrdev_fileops = {
- .read = iio_event_chrdev_read,
- .release = iio_event_chrdev_release,
- .owner = THIS_MODULE,
- .llseek = noop_llseek,
-};
-
-static int iio_event_getfd(struct iio_dev *indio_dev)
+static ssize_t iio_debugfs_read_reg(struct file *file, char __user *userbuf,
+ size_t count, loff_t *ppos)
{
- struct iio_event_interface *ev_int = indio_dev->event_interface;
- int fd;
+ struct iio_dev *indio_dev = file->private_data;
+ char buf[20];
+ unsigned val = 0;
+ ssize_t len;
+ int ret;
- if (ev_int == NULL)
- return -ENODEV;
+ ret = indio_dev->info->debugfs_reg_access(indio_dev,
+ indio_dev->cached_reg_addr,
+ 0, &val);
+ if (ret)
+ dev_err(indio_dev->dev.parent, "%s: read failed\n", __func__);
- mutex_lock(&ev_int->event_list_lock);
- if (test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
- mutex_unlock(&ev_int->event_list_lock);
- return -EBUSY;
- }
- mutex_unlock(&ev_int->event_list_lock);
- fd = anon_inode_getfd("iio:event",
- &iio_event_chrdev_fileops, ev_int, O_RDONLY);
- if (fd < 0) {
- mutex_lock(&ev_int->event_list_lock);
- clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
- mutex_unlock(&ev_int->event_list_lock);
- }
- return fd;
+ len = snprintf(buf, sizeof(buf), "0x%X\n", val);
+
+ return simple_read_from_buffer(userbuf, count, ppos, buf, len);
}
-static int __init iio_init(void)
+static ssize_t iio_debugfs_write_reg(struct file *file,
+ const char __user *userbuf, size_t count, loff_t *ppos)
{
+ struct iio_dev *indio_dev = file->private_data;
+ unsigned reg, val;
+ char buf[80];
int ret;
- /* Register sysfs bus */
- ret = bus_register(&iio_bus_type);
- if (ret < 0) {
- printk(KERN_ERR
- "%s could not register bus type\n",
- __FILE__);
- goto error_nothing;
+ count = min_t(size_t, count, (sizeof(buf)-1));
+ if (copy_from_user(buf, userbuf, count))
+ return -EFAULT;
+
+ buf[count] = 0;
+
+ ret = sscanf(buf, "%i %i", &reg, &val);
+
+ switch (ret) {
+ case 1:
+ indio_dev->cached_reg_addr = reg;
+ break;
+ case 2:
+ indio_dev->cached_reg_addr = reg;
+ ret = indio_dev->info->debugfs_reg_access(indio_dev, reg,
+ val, NULL);
+ if (ret) {
+ dev_err(indio_dev->dev.parent, "%s: write failed\n",
+ __func__);
+ return ret;
+ }
+ break;
+ default:
+ return -EINVAL;
}
- ret = alloc_chrdev_region(&iio_devt, 0, IIO_DEV_MAX, "iio");
- if (ret < 0) {
- printk(KERN_ERR "%s: failed to allocate char dev region\n",
- __FILE__);
- goto error_unregister_bus_type;
+ return count;
+}
+
+static const struct file_operations iio_debugfs_reg_fops = {
+ .open = iio_debugfs_open,
+ .read = iio_debugfs_read_reg,
+ .write = iio_debugfs_write_reg,
+};
+
+static void iio_device_unregister_debugfs(struct iio_dev *indio_dev)
+{
+ debugfs_remove_recursive(indio_dev->debugfs_dentry);
+}
+
+static int iio_device_register_debugfs(struct iio_dev *indio_dev)
+{
+ struct dentry *d;
+
+ if (indio_dev->info->debugfs_reg_access == NULL)
+ return 0;
+
+ if (IS_ERR(iio_debugfs_dentry))
+ return 0;
+
+ indio_dev->debugfs_dentry =
+ debugfs_create_dir(dev_name(&indio_dev->dev),
+ iio_debugfs_dentry);
+ if (IS_ERR(indio_dev->debugfs_dentry))
+ return PTR_ERR(indio_dev->debugfs_dentry);
+
+ if (indio_dev->debugfs_dentry == NULL) {
+ dev_warn(indio_dev->dev.parent,
+ "Failed to create debugfs directory\n");
+ return -EFAULT;
+ }
+
+ d = debugfs_create_file("direct_reg_access", 0644,
+ indio_dev->debugfs_dentry,
+ indio_dev, &iio_debugfs_reg_fops);
+ if (!d) {
+ iio_device_unregister_debugfs(indio_dev);
+ return -ENOMEM;
}
return 0;
+}
+#else
+static int iio_device_register_debugfs(struct iio_dev *indio_dev)
+{
+ return 0;
+}
-error_unregister_bus_type:
- bus_unregister(&iio_bus_type);
-error_nothing:
- return ret;
+static void iio_device_unregister_debugfs(struct iio_dev *indio_dev)
+{
}
+#endif /* CONFIG_DEBUG_FS */
-static void __exit iio_exit(void)
+static ssize_t iio_read_channel_ext_info(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
- if (iio_devt)
- unregister_chrdev_region(iio_devt, IIO_DEV_MAX);
- bus_unregister(&iio_bus_type);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ const struct iio_chan_spec_ext_info *ext_info;
+
+ ext_info = &this_attr->c->ext_info[this_attr->address];
+
+ return ext_info->read(indio_dev, this_attr->c, buf);
+}
+
+static ssize_t iio_write_channel_ext_info(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ const struct iio_chan_spec_ext_info *ext_info;
+
+ ext_info = &this_attr->c->ext_info[this_attr->address];
+
+ return ext_info->write(indio_dev, this_attr->c, buf, len);
}
static ssize_t iio_read_channel_info(struct device *dev,
@@ -455,7 +439,7 @@ int __iio_device_attr_init(struct device_attribute *dev_attr,
goto error_ret;
}
- if (chan->differential) { /* Differential can not have modifier */
+ if (chan->differential) { /* Differential can not have modifier */
if (generic)
name_format
= kasprintf(GFP_KERNEL, "%s_%s-%s_%s",
@@ -592,6 +576,7 @@ static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan)
{
int ret, i, attrcount = 0;
+ const struct iio_chan_spec_ext_info *ext_info;
if (chan->channel < 0)
return 0;
@@ -626,6 +611,31 @@ static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev,
goto error_ret;
attrcount++;
}
+
+ if (chan->ext_info) {
+ unsigned int i = 0;
+ for (ext_info = chan->ext_info; ext_info->name; ext_info++) {
+ ret = __iio_add_chan_devattr(ext_info->name,
+ chan,
+ ext_info->read ?
+ &iio_read_channel_ext_info : NULL,
+ ext_info->write ?
+ &iio_write_channel_ext_info : NULL,
+ i,
+ ext_info->shared,
+ &indio_dev->dev,
+ &indio_dev->channel_attr_list);
+ i++;
+ if (ret == -EBUSY && ext_info->shared)
+ continue;
+
+ if (ret)
+ goto error_ret;
+
+ attrcount++;
+ }
+ }
+
ret = attrcount;
error_ret:
return ret;
@@ -663,7 +673,7 @@ static int iio_device_register_sysfs(struct iio_dev *indio_dev)
attrcount = attrcount_orig;
/*
* New channel registration method - relies on the fact a group does
- * not need to be initialized if it is name is NULL.
+ * not need to be initialized if it is name is NULL.
*/
INIT_LIST_HEAD(&indio_dev->channel_attr_list);
if (indio_dev->channels)
@@ -726,295 +736,6 @@ static void iio_device_unregister_sysfs(struct iio_dev *indio_dev)
kfree(indio_dev->chan_attr_group.attrs);
}
-static const char * const iio_ev_type_text[] = {
- [IIO_EV_TYPE_THRESH] = "thresh",
- [IIO_EV_TYPE_MAG] = "mag",
- [IIO_EV_TYPE_ROC] = "roc",
- [IIO_EV_TYPE_THRESH_ADAPTIVE] = "thresh_adaptive",
- [IIO_EV_TYPE_MAG_ADAPTIVE] = "mag_adaptive",
-};
-
-static const char * const iio_ev_dir_text[] = {
- [IIO_EV_DIR_EITHER] = "either",
- [IIO_EV_DIR_RISING] = "rising",
- [IIO_EV_DIR_FALLING] = "falling"
-};
-
-static ssize_t iio_ev_state_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- int ret;
- bool val;
-
- ret = strtobool(buf, &val);
- if (ret < 0)
- return ret;
-
- ret = indio_dev->info->write_event_config(indio_dev,
- this_attr->address,
- val);
- return (ret < 0) ? ret : len;
-}
-
-static ssize_t iio_ev_state_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- int val = indio_dev->info->read_event_config(indio_dev,
- this_attr->address);
-
- if (val < 0)
- return val;
- else
- return sprintf(buf, "%d\n", val);
-}
-
-static ssize_t iio_ev_value_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- int val, ret;
-
- ret = indio_dev->info->read_event_value(indio_dev,
- this_attr->address, &val);
- if (ret < 0)
- return ret;
-
- return sprintf(buf, "%d\n", val);
-}
-
-static ssize_t iio_ev_value_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- unsigned long val;
- int ret;
-
- if (!indio_dev->info->write_event_value)
- return -EINVAL;
-
- ret = strict_strtoul(buf, 10, &val);
- if (ret)
- return ret;
-
- ret = indio_dev->info->write_event_value(indio_dev, this_attr->address,
- val);
- if (ret < 0)
- return ret;
-
- return len;
-}
-
-static int iio_device_add_event_sysfs(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan)
-{
- int ret = 0, i, attrcount = 0;
- u64 mask = 0;
- char *postfix;
- if (!chan->event_mask)
- return 0;
-
- for_each_set_bit(i, &chan->event_mask, sizeof(chan->event_mask)*8) {
- postfix = kasprintf(GFP_KERNEL, "%s_%s_en",
- iio_ev_type_text[i/IIO_EV_DIR_MAX],
- iio_ev_dir_text[i%IIO_EV_DIR_MAX]);
- if (postfix == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
- if (chan->modified)
- mask = IIO_MOD_EVENT_CODE(chan->type, 0, chan->channel,
- i/IIO_EV_DIR_MAX,
- i%IIO_EV_DIR_MAX);
- else if (chan->differential)
- mask = IIO_EVENT_CODE(chan->type,
- 0, 0,
- i%IIO_EV_DIR_MAX,
- i/IIO_EV_DIR_MAX,
- 0,
- chan->channel,
- chan->channel2);
- else
- mask = IIO_UNMOD_EVENT_CODE(chan->type,
- chan->channel,
- i/IIO_EV_DIR_MAX,
- i%IIO_EV_DIR_MAX);
-
- ret = __iio_add_chan_devattr(postfix,
- chan,
- &iio_ev_state_show,
- iio_ev_state_store,
- mask,
- 0,
- &indio_dev->dev,
- &indio_dev->event_interface->
- dev_attr_list);
- kfree(postfix);
- if (ret)
- goto error_ret;
- attrcount++;
- postfix = kasprintf(GFP_KERNEL, "%s_%s_value",
- iio_ev_type_text[i/IIO_EV_DIR_MAX],
- iio_ev_dir_text[i%IIO_EV_DIR_MAX]);
- if (postfix == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
- ret = __iio_add_chan_devattr(postfix, chan,
- iio_ev_value_show,
- iio_ev_value_store,
- mask,
- 0,
- &indio_dev->dev,
- &indio_dev->event_interface->
- dev_attr_list);
- kfree(postfix);
- if (ret)
- goto error_ret;
- attrcount++;
- }
- ret = attrcount;
-error_ret:
- return ret;
-}
-
-static inline void __iio_remove_event_config_attrs(struct iio_dev *indio_dev)
-{
- struct iio_dev_attr *p, *n;
- list_for_each_entry_safe(p, n,
- &indio_dev->event_interface->
- dev_attr_list, l) {
- kfree(p->dev_attr.attr.name);
- kfree(p);
- }
-}
-
-static inline int __iio_add_event_config_attrs(struct iio_dev *indio_dev)
-{
- int j, ret, attrcount = 0;
-
- INIT_LIST_HEAD(&indio_dev->event_interface->dev_attr_list);
- /* Dynically created from the channels array */
- for (j = 0; j < indio_dev->num_channels; j++) {
- ret = iio_device_add_event_sysfs(indio_dev,
- &indio_dev->channels[j]);
- if (ret < 0)
- goto error_clear_attrs;
- attrcount += ret;
- }
- return attrcount;
-
-error_clear_attrs:
- __iio_remove_event_config_attrs(indio_dev);
-
- return ret;
-}
-
-static bool iio_check_for_dynamic_events(struct iio_dev *indio_dev)
-{
- int j;
-
- for (j = 0; j < indio_dev->num_channels; j++)
- if (indio_dev->channels[j].event_mask != 0)
- return true;
- return false;
-}
-
-static void iio_setup_ev_int(struct iio_event_interface *ev_int)
-{
- mutex_init(&ev_int->event_list_lock);
- /* discussion point - make this variable? */
- ev_int->max_events = 10;
- ev_int->current_events = 0;
- INIT_LIST_HEAD(&ev_int->det_events);
- init_waitqueue_head(&ev_int->wait);
-}
-
-static const char *iio_event_group_name = "events";
-static int iio_device_register_eventset(struct iio_dev *indio_dev)
-{
- struct iio_dev_attr *p;
- int ret = 0, attrcount_orig = 0, attrcount, attrn;
- struct attribute **attr;
-
- if (!(indio_dev->info->event_attrs ||
- iio_check_for_dynamic_events(indio_dev)))
- return 0;
-
- indio_dev->event_interface =
- kzalloc(sizeof(struct iio_event_interface), GFP_KERNEL);
- if (indio_dev->event_interface == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
-
- iio_setup_ev_int(indio_dev->event_interface);
- if (indio_dev->info->event_attrs != NULL) {
- attr = indio_dev->info->event_attrs->attrs;
- while (*attr++ != NULL)
- attrcount_orig++;
- }
- attrcount = attrcount_orig;
- if (indio_dev->channels) {
- ret = __iio_add_event_config_attrs(indio_dev);
- if (ret < 0)
- goto error_free_setup_event_lines;
- attrcount += ret;
- }
-
- indio_dev->event_interface->group.name = iio_event_group_name;
- indio_dev->event_interface->group.attrs = kcalloc(attrcount + 1,
- sizeof(indio_dev->event_interface->group.attrs[0]),
- GFP_KERNEL);
- if (indio_dev->event_interface->group.attrs == NULL) {
- ret = -ENOMEM;
- goto error_free_setup_event_lines;
- }
- if (indio_dev->info->event_attrs)
- memcpy(indio_dev->event_interface->group.attrs,
- indio_dev->info->event_attrs->attrs,
- sizeof(indio_dev->event_interface->group.attrs[0])
- *attrcount_orig);
- attrn = attrcount_orig;
- /* Add all elements from the list. */
- list_for_each_entry(p,
- &indio_dev->event_interface->dev_attr_list,
- l)
- indio_dev->event_interface->group.attrs[attrn++] =
- &p->dev_attr.attr;
- indio_dev->groups[indio_dev->groupcounter++] =
- &indio_dev->event_interface->group;
-
- return 0;
-
-error_free_setup_event_lines:
- __iio_remove_event_config_attrs(indio_dev);
- kfree(indio_dev->event_interface);
-error_ret:
-
- return ret;
-}
-
-static void iio_device_unregister_eventset(struct iio_dev *indio_dev)
-{
- if (indio_dev->event_interface == NULL)
- return;
- __iio_remove_event_config_attrs(indio_dev);
- kfree(indio_dev->event_interface->group.attrs);
- kfree(indio_dev->event_interface);
-}
-
static void iio_dev_release(struct device *device)
{
struct iio_dev *indio_dev = container_of(device, struct iio_dev, dev);
@@ -1023,6 +744,7 @@ static void iio_dev_release(struct device *device)
iio_device_unregister_trigger_consumer(indio_dev);
iio_device_unregister_eventset(indio_dev);
iio_device_unregister_sysfs(indio_dev);
+ iio_device_unregister_debugfs(indio_dev);
}
static struct device_type iio_dev_type = {
@@ -1052,6 +774,7 @@ struct iio_dev *iio_allocate_device(int sizeof_priv)
device_initialize(&dev->dev);
dev_set_drvdata(&dev->dev, (void *)dev);
mutex_init(&dev->mlock);
+ mutex_init(&dev->info_exist_lock);
dev->id = ida_simple_get(&iio_ida, 0, 0, GFP_KERNEL);
if (dev->id < 0) {
@@ -1131,6 +854,8 @@ static const struct file_operations iio_buffer_fileops = {
.compat_ioctl = iio_ioctl,
};
+static const struct iio_buffer_setup_ops noop_ring_setup_ops;
+
int iio_device_register(struct iio_dev *indio_dev)
{
int ret;
@@ -1138,11 +863,17 @@ int iio_device_register(struct iio_dev *indio_dev)
/* configure elements for the chrdev */
indio_dev->dev.devt = MKDEV(MAJOR(iio_devt), indio_dev->id);
+ ret = iio_device_register_debugfs(indio_dev);
+ if (ret) {
+ dev_err(indio_dev->dev.parent,
+ "Failed to register debugfs interfaces\n");
+ goto error_ret;
+ }
ret = iio_device_register_sysfs(indio_dev);
if (ret) {
dev_err(indio_dev->dev.parent,
"Failed to register sysfs interfaces\n");
- goto error_ret;
+ goto error_unreg_debugfs;
}
ret = iio_device_register_eventset(indio_dev);
if (ret) {
@@ -1153,6 +884,10 @@ int iio_device_register(struct iio_dev *indio_dev)
if (indio_dev->modes & INDIO_BUFFER_TRIGGERED)
iio_device_register_trigger_consumer(indio_dev);
+ if ((indio_dev->modes & INDIO_ALL_BUFFER_MODES) &&
+ indio_dev->setup_ops == NULL)
+ indio_dev->setup_ops = &noop_ring_setup_ops;
+
ret = device_add(&indio_dev->dev);
if (ret < 0)
goto error_unreg_eventset;
@@ -1169,6 +904,8 @@ error_unreg_eventset:
iio_device_unregister_eventset(indio_dev);
error_free_sysfs:
iio_device_unregister_sysfs(indio_dev);
+error_unreg_debugfs:
+ iio_device_unregister_debugfs(indio_dev);
error_ret:
return ret;
}
@@ -1176,6 +913,9 @@ EXPORT_SYMBOL(iio_device_register);
void iio_device_unregister(struct iio_dev *indio_dev)
{
+ mutex_lock(&indio_dev->info_exist_lock);
+ indio_dev->info = NULL;
+ mutex_unlock(&indio_dev->info_exist_lock);
device_unregister(&indio_dev->dev);
}
EXPORT_SYMBOL(iio_device_unregister);
diff --git a/drivers/staging/iio/industrialio-event.c b/drivers/staging/iio/industrialio-event.c
new file mode 100644
index 000000000000..5fdf739e38f9
--- /dev/null
+++ b/drivers/staging/iio/industrialio-event.c
@@ -0,0 +1,453 @@
+/* Industrial I/O event handling
+ *
+ * Copyright (c) 2008 Jonathan Cameron
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * Based on elements of hwmon and input subsystems.
+ */
+
+#include <linux/anon_inodes.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/kfifo.h>
+#include <linux/module.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/wait.h>
+#include "iio.h"
+#include "iio_core.h"
+#include "sysfs.h"
+#include "events.h"
+
+/**
+ * struct iio_event_interface - chrdev interface for an event line
+ * @wait: wait queue to allow blocking reads of events
+ * @det_events: list of detected events
+ * @dev_attr_list: list of event interface sysfs attribute
+ * @flags: file operations related flags including busy flag.
+ * @group: event interface sysfs attribute group
+ */
+struct iio_event_interface {
+ wait_queue_head_t wait;
+ DECLARE_KFIFO(det_events, struct iio_event_data, 16);
+
+ struct list_head dev_attr_list;
+ unsigned long flags;
+ struct attribute_group group;
+};
+
+int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp)
+{
+ struct iio_event_interface *ev_int = indio_dev->event_interface;
+ struct iio_event_data ev;
+ int copied;
+
+ /* Does anyone care? */
+ spin_lock(&ev_int->wait.lock);
+ if (test_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
+
+ ev.id = ev_code;
+ ev.timestamp = timestamp;
+
+ copied = kfifo_put(&ev_int->det_events, &ev);
+ if (copied != 0)
+ wake_up_locked_poll(&ev_int->wait, POLLIN);
+ }
+ spin_unlock(&ev_int->wait.lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(iio_push_event);
+
+/**
+ * iio_event_poll() - poll the event queue to find out if it has data
+ */
+static unsigned int iio_event_poll(struct file *filep,
+ struct poll_table_struct *wait)
+{
+ struct iio_event_interface *ev_int = filep->private_data;
+ unsigned int events = 0;
+
+ poll_wait(filep, &ev_int->wait, wait);
+
+ spin_lock(&ev_int->wait.lock);
+ if (!kfifo_is_empty(&ev_int->det_events))
+ events = POLLIN | POLLRDNORM;
+ spin_unlock(&ev_int->wait.lock);
+
+ return events;
+}
+
+static ssize_t iio_event_chrdev_read(struct file *filep,
+ char __user *buf,
+ size_t count,
+ loff_t *f_ps)
+{
+ struct iio_event_interface *ev_int = filep->private_data;
+ unsigned int copied;
+ int ret;
+
+ if (count < sizeof(struct iio_event_data))
+ return -EINVAL;
+
+ spin_lock(&ev_int->wait.lock);
+ if (kfifo_is_empty(&ev_int->det_events)) {
+ if (filep->f_flags & O_NONBLOCK) {
+ ret = -EAGAIN;
+ goto error_unlock;
+ }
+ /* Blocking on device; waiting for something to be there */
+ ret = wait_event_interruptible_locked(ev_int->wait,
+ !kfifo_is_empty(&ev_int->det_events));
+ if (ret)
+ goto error_unlock;
+ /* Single access device so no one else can get the data */
+ }
+
+ ret = kfifo_to_user(&ev_int->det_events, buf, count, &copied);
+
+error_unlock:
+ spin_unlock(&ev_int->wait.lock);
+
+ return ret ? ret : copied;
+}
+
+static int iio_event_chrdev_release(struct inode *inode, struct file *filep)
+{
+ struct iio_event_interface *ev_int = filep->private_data;
+
+ spin_lock(&ev_int->wait.lock);
+ __clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
+ /*
+ * In order to maintain a clean state for reopening,
+ * clear out any awaiting events. The mask will prevent
+ * any new __iio_push_event calls running.
+ */
+ kfifo_reset_out(&ev_int->det_events);
+ spin_unlock(&ev_int->wait.lock);
+
+ return 0;
+}
+
+static const struct file_operations iio_event_chrdev_fileops = {
+ .read = iio_event_chrdev_read,
+ .poll = iio_event_poll,
+ .release = iio_event_chrdev_release,
+ .owner = THIS_MODULE,
+ .llseek = noop_llseek,
+};
+
+int iio_event_getfd(struct iio_dev *indio_dev)
+{
+ struct iio_event_interface *ev_int = indio_dev->event_interface;
+ int fd;
+
+ if (ev_int == NULL)
+ return -ENODEV;
+
+ spin_lock(&ev_int->wait.lock);
+ if (__test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
+ spin_unlock(&ev_int->wait.lock);
+ return -EBUSY;
+ }
+ spin_unlock(&ev_int->wait.lock);
+ fd = anon_inode_getfd("iio:event",
+ &iio_event_chrdev_fileops, ev_int, O_RDONLY);
+ if (fd < 0) {
+ spin_lock(&ev_int->wait.lock);
+ __clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
+ spin_unlock(&ev_int->wait.lock);
+ }
+ return fd;
+}
+
+static const char * const iio_ev_type_text[] = {
+ [IIO_EV_TYPE_THRESH] = "thresh",
+ [IIO_EV_TYPE_MAG] = "mag",
+ [IIO_EV_TYPE_ROC] = "roc",
+ [IIO_EV_TYPE_THRESH_ADAPTIVE] = "thresh_adaptive",
+ [IIO_EV_TYPE_MAG_ADAPTIVE] = "mag_adaptive",
+};
+
+static const char * const iio_ev_dir_text[] = {
+ [IIO_EV_DIR_EITHER] = "either",
+ [IIO_EV_DIR_RISING] = "rising",
+ [IIO_EV_DIR_FALLING] = "falling"
+};
+
+static ssize_t iio_ev_state_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ int ret;
+ bool val;
+
+ ret = strtobool(buf, &val);
+ if (ret < 0)
+ return ret;
+
+ ret = indio_dev->info->write_event_config(indio_dev,
+ this_attr->address,
+ val);
+ return (ret < 0) ? ret : len;
+}
+
+static ssize_t iio_ev_state_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ int val = indio_dev->info->read_event_config(indio_dev,
+ this_attr->address);
+
+ if (val < 0)
+ return val;
+ else
+ return sprintf(buf, "%d\n", val);
+}
+
+static ssize_t iio_ev_value_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ int val, ret;
+
+ ret = indio_dev->info->read_event_value(indio_dev,
+ this_attr->address, &val);
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%d\n", val);
+}
+
+static ssize_t iio_ev_value_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ unsigned long val;
+ int ret;
+
+ if (!indio_dev->info->write_event_value)
+ return -EINVAL;
+
+ ret = strict_strtoul(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ ret = indio_dev->info->write_event_value(indio_dev, this_attr->address,
+ val);
+ if (ret < 0)
+ return ret;
+
+ return len;
+}
+
+static int iio_device_add_event_sysfs(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan)
+{
+ int ret = 0, i, attrcount = 0;
+ u64 mask = 0;
+ char *postfix;
+ if (!chan->event_mask)
+ return 0;
+
+ for_each_set_bit(i, &chan->event_mask, sizeof(chan->event_mask)*8) {
+ postfix = kasprintf(GFP_KERNEL, "%s_%s_en",
+ iio_ev_type_text[i/IIO_EV_DIR_MAX],
+ iio_ev_dir_text[i%IIO_EV_DIR_MAX]);
+ if (postfix == NULL) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ if (chan->modified)
+ mask = IIO_MOD_EVENT_CODE(chan->type, 0, chan->channel,
+ i/IIO_EV_DIR_MAX,
+ i%IIO_EV_DIR_MAX);
+ else if (chan->differential)
+ mask = IIO_EVENT_CODE(chan->type,
+ 0, 0,
+ i%IIO_EV_DIR_MAX,
+ i/IIO_EV_DIR_MAX,
+ 0,
+ chan->channel,
+ chan->channel2);
+ else
+ mask = IIO_UNMOD_EVENT_CODE(chan->type,
+ chan->channel,
+ i/IIO_EV_DIR_MAX,
+ i%IIO_EV_DIR_MAX);
+
+ ret = __iio_add_chan_devattr(postfix,
+ chan,
+ &iio_ev_state_show,
+ iio_ev_state_store,
+ mask,
+ 0,
+ &indio_dev->dev,
+ &indio_dev->event_interface->
+ dev_attr_list);
+ kfree(postfix);
+ if (ret)
+ goto error_ret;
+ attrcount++;
+ postfix = kasprintf(GFP_KERNEL, "%s_%s_value",
+ iio_ev_type_text[i/IIO_EV_DIR_MAX],
+ iio_ev_dir_text[i%IIO_EV_DIR_MAX]);
+ if (postfix == NULL) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ ret = __iio_add_chan_devattr(postfix, chan,
+ iio_ev_value_show,
+ iio_ev_value_store,
+ mask,
+ 0,
+ &indio_dev->dev,
+ &indio_dev->event_interface->
+ dev_attr_list);
+ kfree(postfix);
+ if (ret)
+ goto error_ret;
+ attrcount++;
+ }
+ ret = attrcount;
+error_ret:
+ return ret;
+}
+
+static inline void __iio_remove_event_config_attrs(struct iio_dev *indio_dev)
+{
+ struct iio_dev_attr *p, *n;
+ list_for_each_entry_safe(p, n,
+ &indio_dev->event_interface->
+ dev_attr_list, l) {
+ kfree(p->dev_attr.attr.name);
+ kfree(p);
+ }
+}
+
+static inline int __iio_add_event_config_attrs(struct iio_dev *indio_dev)
+{
+ int j, ret, attrcount = 0;
+
+ INIT_LIST_HEAD(&indio_dev->event_interface->dev_attr_list);
+ /* Dynically created from the channels array */
+ for (j = 0; j < indio_dev->num_channels; j++) {
+ ret = iio_device_add_event_sysfs(indio_dev,
+ &indio_dev->channels[j]);
+ if (ret < 0)
+ goto error_clear_attrs;
+ attrcount += ret;
+ }
+ return attrcount;
+
+error_clear_attrs:
+ __iio_remove_event_config_attrs(indio_dev);
+
+ return ret;
+}
+
+static bool iio_check_for_dynamic_events(struct iio_dev *indio_dev)
+{
+ int j;
+
+ for (j = 0; j < indio_dev->num_channels; j++)
+ if (indio_dev->channels[j].event_mask != 0)
+ return true;
+ return false;
+}
+
+static void iio_setup_ev_int(struct iio_event_interface *ev_int)
+{
+ INIT_KFIFO(ev_int->det_events);
+ init_waitqueue_head(&ev_int->wait);
+}
+
+static const char *iio_event_group_name = "events";
+int iio_device_register_eventset(struct iio_dev *indio_dev)
+{
+ struct iio_dev_attr *p;
+ int ret = 0, attrcount_orig = 0, attrcount, attrn;
+ struct attribute **attr;
+
+ if (!(indio_dev->info->event_attrs ||
+ iio_check_for_dynamic_events(indio_dev)))
+ return 0;
+
+ indio_dev->event_interface =
+ kzalloc(sizeof(struct iio_event_interface), GFP_KERNEL);
+ if (indio_dev->event_interface == NULL) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+
+ iio_setup_ev_int(indio_dev->event_interface);
+ if (indio_dev->info->event_attrs != NULL) {
+ attr = indio_dev->info->event_attrs->attrs;
+ while (*attr++ != NULL)
+ attrcount_orig++;
+ }
+ attrcount = attrcount_orig;
+ if (indio_dev->channels) {
+ ret = __iio_add_event_config_attrs(indio_dev);
+ if (ret < 0)
+ goto error_free_setup_event_lines;
+ attrcount += ret;
+ }
+
+ indio_dev->event_interface->group.name = iio_event_group_name;
+ indio_dev->event_interface->group.attrs = kcalloc(attrcount + 1,
+ sizeof(indio_dev->event_interface->group.attrs[0]),
+ GFP_KERNEL);
+ if (indio_dev->event_interface->group.attrs == NULL) {
+ ret = -ENOMEM;
+ goto error_free_setup_event_lines;
+ }
+ if (indio_dev->info->event_attrs)
+ memcpy(indio_dev->event_interface->group.attrs,
+ indio_dev->info->event_attrs->attrs,
+ sizeof(indio_dev->event_interface->group.attrs[0])
+ *attrcount_orig);
+ attrn = attrcount_orig;
+ /* Add all elements from the list. */
+ list_for_each_entry(p,
+ &indio_dev->event_interface->dev_attr_list,
+ l)
+ indio_dev->event_interface->group.attrs[attrn++] =
+ &p->dev_attr.attr;
+ indio_dev->groups[indio_dev->groupcounter++] =
+ &indio_dev->event_interface->group;
+
+ return 0;
+
+error_free_setup_event_lines:
+ __iio_remove_event_config_attrs(indio_dev);
+ kfree(indio_dev->event_interface);
+error_ret:
+
+ return ret;
+}
+
+void iio_device_unregister_eventset(struct iio_dev *indio_dev)
+{
+ if (indio_dev->event_interface == NULL)
+ return;
+ __iio_remove_event_config_attrs(indio_dev);
+ kfree(indio_dev->event_interface->group.attrs);
+ kfree(indio_dev->event_interface);
+}
diff --git a/drivers/staging/iio/inkern.c b/drivers/staging/iio/inkern.c
new file mode 100644
index 000000000000..de2c8ea64965
--- /dev/null
+++ b/drivers/staging/iio/inkern.c
@@ -0,0 +1,292 @@
+/* The industrial I/O core in kernel channel mapping
+ *
+ * Copyright (c) 2011 Jonathan Cameron
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+
+#include "iio.h"
+#include "iio_core.h"
+#include "machine.h"
+#include "driver.h"
+#include "consumer.h"
+
+struct iio_map_internal {
+ struct iio_dev *indio_dev;
+ struct iio_map *map;
+ struct list_head l;
+};
+
+static LIST_HEAD(iio_map_list);
+static DEFINE_MUTEX(iio_map_list_lock);
+
+int iio_map_array_register(struct iio_dev *indio_dev, struct iio_map *maps)
+{
+ int i = 0, ret = 0;
+ struct iio_map_internal *mapi;
+
+ if (maps == NULL)
+ return 0;
+
+ mutex_lock(&iio_map_list_lock);
+ while (maps[i].consumer_dev_name != NULL) {
+ mapi = kzalloc(sizeof(*mapi), GFP_KERNEL);
+ if (mapi == NULL) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ mapi->map = &maps[i];
+ mapi->indio_dev = indio_dev;
+ list_add(&mapi->l, &iio_map_list);
+ i++;
+ }
+error_ret:
+ mutex_unlock(&iio_map_list_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iio_map_array_register);
+
+
+/* Assumes the exact same array (e.g. memory locations)
+ * used at unregistration as used at registration rather than
+ * more complex checking of contents.
+ */
+int iio_map_array_unregister(struct iio_dev *indio_dev,
+ struct iio_map *maps)
+{
+ int i = 0, ret = 0;
+ bool found_it;
+ struct iio_map_internal *mapi;
+
+ if (maps == NULL)
+ return 0;
+
+ mutex_lock(&iio_map_list_lock);
+ while (maps[i].consumer_dev_name != NULL) {
+ found_it = false;
+ list_for_each_entry(mapi, &iio_map_list, l)
+ if (&maps[i] == mapi->map) {
+ list_del(&mapi->l);
+ kfree(mapi);
+ found_it = true;
+ break;
+ }
+ if (found_it == false) {
+ ret = -ENODEV;
+ goto error_ret;
+ }
+ }
+error_ret:
+ mutex_unlock(&iio_map_list_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iio_map_array_unregister);
+
+static const struct iio_chan_spec
+*iio_chan_spec_from_name(const struct iio_dev *indio_dev,
+ const char *name)
+{
+ int i;
+ const struct iio_chan_spec *chan = NULL;
+
+ for (i = 0; i < indio_dev->num_channels; i++)
+ if (indio_dev->channels[i].datasheet_name &&
+ strcmp(name, indio_dev->channels[i].datasheet_name) == 0) {
+ chan = &indio_dev->channels[i];
+ break;
+ }
+ return chan;
+}
+
+
+struct iio_channel *iio_st_channel_get(const char *name,
+ const char *channel_name)
+{
+ struct iio_map_internal *c_i = NULL, *c = NULL;
+ struct iio_channel *channel;
+
+ if (name == NULL && channel_name == NULL)
+ return ERR_PTR(-ENODEV);
+
+ /* first find matching entry the channel map */
+ mutex_lock(&iio_map_list_lock);
+ list_for_each_entry(c_i, &iio_map_list, l) {
+ if ((name && strcmp(name, c_i->map->consumer_dev_name) != 0) ||
+ (channel_name &&
+ strcmp(channel_name, c_i->map->consumer_channel) != 0))
+ continue;
+ c = c_i;
+ get_device(&c->indio_dev->dev);
+ break;
+ }
+ mutex_unlock(&iio_map_list_lock);
+ if (c == NULL)
+ return ERR_PTR(-ENODEV);
+
+ channel = kmalloc(sizeof(*channel), GFP_KERNEL);
+ if (channel == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ channel->indio_dev = c->indio_dev;
+
+ if (c->map->adc_channel_label)
+ channel->channel =
+ iio_chan_spec_from_name(channel->indio_dev,
+ c->map->adc_channel_label);
+
+ return channel;
+}
+EXPORT_SYMBOL_GPL(iio_st_channel_get);
+
+void iio_st_channel_release(struct iio_channel *channel)
+{
+ put_device(&channel->indio_dev->dev);
+ kfree(channel);
+}
+EXPORT_SYMBOL_GPL(iio_st_channel_release);
+
+struct iio_channel *iio_st_channel_get_all(const char *name)
+{
+ struct iio_channel *chans;
+ struct iio_map_internal *c = NULL;
+ int nummaps = 0;
+ int mapind = 0;
+ int i, ret;
+
+ if (name == NULL)
+ return ERR_PTR(-EINVAL);
+
+ mutex_lock(&iio_map_list_lock);
+ /* first count the matching maps */
+ list_for_each_entry(c, &iio_map_list, l)
+ if (name && strcmp(name, c->map->consumer_dev_name) != 0)
+ continue;
+ else
+ nummaps++;
+
+ if (nummaps == 0) {
+ ret = -ENODEV;
+ goto error_ret;
+ }
+
+ /* NULL terminated array to save passing size */
+ chans = kzalloc(sizeof(*chans)*(nummaps + 1), GFP_KERNEL);
+ if (chans == NULL) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+
+ /* for each map fill in the chans element */
+ list_for_each_entry(c, &iio_map_list, l) {
+ if (name && strcmp(name, c->map->consumer_dev_name) != 0)
+ continue;
+ chans[mapind].indio_dev = c->indio_dev;
+ chans[mapind].channel =
+ iio_chan_spec_from_name(chans[mapind].indio_dev,
+ c->map->adc_channel_label);
+ if (chans[mapind].channel == NULL) {
+ ret = -EINVAL;
+ put_device(&chans[mapind].indio_dev->dev);
+ goto error_free_chans;
+ }
+ get_device(&chans[mapind].indio_dev->dev);
+ mapind++;
+ }
+ mutex_unlock(&iio_map_list_lock);
+ if (mapind == 0) {
+ ret = -ENODEV;
+ goto error_free_chans;
+ }
+ return chans;
+
+error_free_chans:
+ for (i = 0; i < nummaps; i++)
+ if (chans[i].indio_dev)
+ put_device(&chans[i].indio_dev->dev);
+ kfree(chans);
+error_ret:
+ mutex_unlock(&iio_map_list_lock);
+
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(iio_st_channel_get_all);
+
+void iio_st_channel_release_all(struct iio_channel *channels)
+{
+ struct iio_channel *chan = &channels[0];
+
+ while (chan->indio_dev) {
+ put_device(&chan->indio_dev->dev);
+ chan++;
+ }
+ kfree(channels);
+}
+EXPORT_SYMBOL_GPL(iio_st_channel_release_all);
+
+int iio_st_read_channel_raw(struct iio_channel *chan, int *val)
+{
+ int val2, ret;
+
+ mutex_lock(&chan->indio_dev->info_exist_lock);
+ if (chan->indio_dev->info == NULL) {
+ ret = -ENODEV;
+ goto err_unlock;
+ }
+
+ ret = chan->indio_dev->info->read_raw(chan->indio_dev, chan->channel,
+ val, &val2, 0);
+err_unlock:
+ mutex_unlock(&chan->indio_dev->info_exist_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iio_st_read_channel_raw);
+
+int iio_st_read_channel_scale(struct iio_channel *chan, int *val, int *val2)
+{
+ int ret;
+
+ mutex_lock(&chan->indio_dev->info_exist_lock);
+ if (chan->indio_dev->info == NULL) {
+ ret = -ENODEV;
+ goto err_unlock;
+ }
+
+ ret = chan->indio_dev->info->read_raw(chan->indio_dev,
+ chan->channel,
+ val, val2,
+ IIO_CHAN_INFO_SCALE);
+err_unlock:
+ mutex_unlock(&chan->indio_dev->info_exist_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iio_st_read_channel_scale);
+
+int iio_st_get_channel_type(struct iio_channel *chan,
+ enum iio_chan_type *type)
+{
+ int ret = 0;
+ /* Need to verify underlying driver has not gone away */
+
+ mutex_lock(&chan->indio_dev->info_exist_lock);
+ if (chan->indio_dev->info == NULL) {
+ ret = -ENODEV;
+ goto err_unlock;
+ }
+
+ *type = chan->channel->type;
+err_unlock:
+ mutex_unlock(&chan->indio_dev->info_exist_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iio_st_get_channel_type);
diff --git a/drivers/staging/iio/kfifo_buf.c b/drivers/staging/iio/kfifo_buf.c
index e1e9c06cde4a..9f3bd59c0e72 100644
--- a/drivers/staging/iio/kfifo_buf.c
+++ b/drivers/staging/iio/kfifo_buf.c
@@ -59,21 +59,6 @@ static struct attribute_group iio_kfifo_attribute_group = {
.name = "buffer",
};
-struct iio_buffer *iio_kfifo_allocate(struct iio_dev *indio_dev)
-{
- struct iio_kfifo *kf;
-
- kf = kzalloc(sizeof *kf, GFP_KERNEL);
- if (!kf)
- return NULL;
- kf->update_needed = true;
- iio_buffer_init(&kf->buffer);
- kf->buffer.attrs = &iio_kfifo_attribute_group;
-
- return &kf->buffer;
-}
-EXPORT_SYMBOL(iio_kfifo_allocate);
-
static int iio_get_bytes_per_datum_kfifo(struct iio_buffer *r)
{
return r->bytes_per_datum;
@@ -104,12 +89,6 @@ static int iio_set_length_kfifo(struct iio_buffer *r, int length)
return 0;
}
-void iio_kfifo_free(struct iio_buffer *r)
-{
- kfree(iio_to_kfifo(r));
-}
-EXPORT_SYMBOL(iio_kfifo_free);
-
static int iio_store_to_kfifo(struct iio_buffer *r,
u8 *data,
s64 timestamp)
@@ -137,7 +116,7 @@ static int iio_read_first_n_kfifo(struct iio_buffer *r,
return copied;
}
-const struct iio_buffer_access_funcs kfifo_access_funcs = {
+static const struct iio_buffer_access_funcs kfifo_access_funcs = {
.store_to = &iio_store_to_kfifo,
.read_first_n = &iio_read_first_n_kfifo,
.request_update = &iio_request_update_kfifo,
@@ -146,6 +125,27 @@ const struct iio_buffer_access_funcs kfifo_access_funcs = {
.get_length = &iio_get_length_kfifo,
.set_length = &iio_set_length_kfifo,
};
-EXPORT_SYMBOL(kfifo_access_funcs);
+
+struct iio_buffer *iio_kfifo_allocate(struct iio_dev *indio_dev)
+{
+ struct iio_kfifo *kf;
+
+ kf = kzalloc(sizeof *kf, GFP_KERNEL);
+ if (!kf)
+ return NULL;
+ kf->update_needed = true;
+ iio_buffer_init(&kf->buffer);
+ kf->buffer.attrs = &iio_kfifo_attribute_group;
+ kf->buffer.access = &kfifo_access_funcs;
+
+ return &kf->buffer;
+}
+EXPORT_SYMBOL(iio_kfifo_allocate);
+
+void iio_kfifo_free(struct iio_buffer *r)
+{
+ kfree(iio_to_kfifo(r));
+}
+EXPORT_SYMBOL(iio_kfifo_free);
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/iio/kfifo_buf.h b/drivers/staging/iio/kfifo_buf.h
index cc2bd9a1ccfe..9f7da016af04 100644
--- a/drivers/staging/iio/kfifo_buf.h
+++ b/drivers/staging/iio/kfifo_buf.h
@@ -3,8 +3,6 @@
#include "iio.h"
#include "buffer.h"
-extern const struct iio_buffer_access_funcs kfifo_access_funcs;
-
struct iio_buffer *iio_kfifo_allocate(struct iio_dev *indio_dev);
void iio_kfifo_free(struct iio_buffer *r);
diff --git a/drivers/staging/iio/light/isl29018.c b/drivers/staging/iio/light/isl29018.c
index 849d6a564afa..38ec52b65dfa 100644
--- a/drivers/staging/iio/light/isl29018.c
+++ b/drivers/staging/iio/light/isl29018.c
@@ -592,11 +592,18 @@ static const struct i2c_device_id isl29018_id[] = {
MODULE_DEVICE_TABLE(i2c, isl29018_id);
+static const struct of_device_id isl29018_of_match[] = {
+ { .compatible = "invn,isl29018", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, isl29018_of_match);
+
static struct i2c_driver isl29018_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
.name = "isl29018",
.owner = THIS_MODULE,
+ .of_match_table = isl29018_of_match,
},
.probe = isl29018_probe,
.remove = __devexit_p(isl29018_remove),
diff --git a/drivers/staging/iio/light/tsl2563.c b/drivers/staging/iio/light/tsl2563.c
index ffca85e81ef5..546c95a4ea9e 100644
--- a/drivers/staging/iio/light/tsl2563.c
+++ b/drivers/staging/iio/light/tsl2563.c
@@ -118,7 +118,7 @@ struct tsl2563_chip {
struct delayed_work poweroff_work;
/* Remember state for suspend and resume functions */
- pm_message_t state;
+ bool suspended;
struct tsl2563_gainlevel_coeff const *gainlevel;
@@ -315,7 +315,7 @@ static int tsl2563_get_adc(struct tsl2563_chip *chip)
int retry = 1;
int ret = 0;
- if (chip->state.event != PM_EVENT_ON)
+ if (chip->suspended)
goto out;
if (!chip->int_enabled) {
@@ -708,7 +708,6 @@ static int __devinit tsl2563_probe(struct i2c_client *client,
struct tsl2563_chip *chip;
struct tsl2563_platform_data *pdata = client->dev.platform_data;
int err = 0;
- int ret;
u8 id = 0;
indio_dev = iio_allocate_device(sizeof(*chip));
@@ -722,13 +721,15 @@ static int __devinit tsl2563_probe(struct i2c_client *client,
err = tsl2563_detect(chip);
if (err) {
- dev_err(&client->dev, "device not found, error %d\n", -err);
+ dev_err(&client->dev, "detect error %d\n", -err);
goto fail1;
}
err = tsl2563_read_id(chip, &id);
- if (err)
+ if (err) {
+ dev_err(&client->dev, "read id error %d\n", -err);
goto fail1;
+ }
mutex_init(&chip->lock);
@@ -751,40 +752,52 @@ static int __devinit tsl2563_probe(struct i2c_client *client,
indio_dev->num_channels = ARRAY_SIZE(tsl2563_channels);
indio_dev->dev.parent = &client->dev;
indio_dev->modes = INDIO_DIRECT_MODE;
+
if (client->irq)
indio_dev->info = &tsl2563_info;
else
indio_dev->info = &tsl2563_info_no_irq;
+
if (client->irq) {
- ret = request_threaded_irq(client->irq,
+ err = request_threaded_irq(client->irq,
NULL,
&tsl2563_event_handler,
IRQF_TRIGGER_RISING | IRQF_ONESHOT,
"tsl2563_event",
indio_dev);
- if (ret)
- goto fail2;
+ if (err) {
+ dev_err(&client->dev, "irq request error %d\n", -err);
+ goto fail1;
+ }
}
+
err = tsl2563_configure(chip);
- if (err)
- goto fail3;
+ if (err) {
+ dev_err(&client->dev, "configure error %d\n", -err);
+ goto fail2;
+ }
INIT_DELAYED_WORK(&chip->poweroff_work, tsl2563_poweroff_work);
+
/* The interrupt cannot yet be enabled so this is fine without lock */
schedule_delayed_work(&chip->poweroff_work, 5 * HZ);
- ret = iio_device_register(indio_dev);
- if (ret)
+ err = iio_device_register(indio_dev);
+ if (err) {
+ dev_err(&client->dev, "iio registration error %d\n", -err);
goto fail3;
+ }
return 0;
+
fail3:
+ cancel_delayed_work(&chip->poweroff_work);
+ flush_scheduled_work();
+fail2:
if (client->irq)
free_irq(client->irq, indio_dev);
-fail2:
- iio_free_device(indio_dev);
fail1:
- kfree(chip);
+ iio_free_device(indio_dev);
return err;
}
@@ -810,9 +823,10 @@ static int tsl2563_remove(struct i2c_client *client)
return 0;
}
-static int tsl2563_suspend(struct i2c_client *client, pm_message_t state)
+#ifdef CONFIG_PM_SLEEP
+static int tsl2563_suspend(struct device *dev)
{
- struct tsl2563_chip *chip = i2c_get_clientdata(client);
+ struct tsl2563_chip *chip = i2c_get_clientdata(to_i2c_client(dev));
int ret;
mutex_lock(&chip->lock);
@@ -821,16 +835,16 @@ static int tsl2563_suspend(struct i2c_client *client, pm_message_t state)
if (ret)
goto out;
- chip->state = state;
+ chip->suspended = true;
out:
mutex_unlock(&chip->lock);
return ret;
}
-static int tsl2563_resume(struct i2c_client *client)
+static int tsl2563_resume(struct device *dev)
{
- struct tsl2563_chip *chip = i2c_get_clientdata(client);
+ struct tsl2563_chip *chip = i2c_get_clientdata(to_i2c_client(dev));
int ret;
mutex_lock(&chip->lock);
@@ -843,13 +857,19 @@ static int tsl2563_resume(struct i2c_client *client)
if (ret)
goto out;
- chip->state.event = PM_EVENT_ON;
+ chip->suspended = false;
out:
mutex_unlock(&chip->lock);
return ret;
}
+static SIMPLE_DEV_PM_OPS(tsl2563_pm_ops, tsl2563_suspend, tsl2563_resume);
+#define TSL2563_PM_OPS (&tsl2563_pm_ops)
+#else
+#define TSL2563_PM_OPS NULL
+#endif
+
static const struct i2c_device_id tsl2563_id[] = {
{ "tsl2560", 0 },
{ "tsl2561", 1 },
@@ -862,9 +882,8 @@ MODULE_DEVICE_TABLE(i2c, tsl2563_id);
static struct i2c_driver tsl2563_i2c_driver = {
.driver = {
.name = "tsl2563",
+ .pm = TSL2563_PM_OPS,
},
- .suspend = tsl2563_suspend,
- .resume = tsl2563_resume,
.probe = tsl2563_probe,
.remove = __devexit_p(tsl2563_remove),
.id_table = tsl2563_id,
diff --git a/drivers/staging/iio/light/tsl2583.c b/drivers/staging/iio/light/tsl2583.c
index 5b6455a238d8..8671d98e0448 100644
--- a/drivers/staging/iio/light/tsl2583.c
+++ b/drivers/staging/iio/light/tsl2583.c
@@ -113,7 +113,7 @@ struct taos_lux {
/* This structure is intentionally large to accommodate updates via sysfs. */
/* Sized to 11 = max 10 segments + 1 termination segment */
-/* Assumption is is one and only one type of glass used */
+/* Assumption is one and only one type of glass used */
static struct taos_lux taos_device_lux[11] = {
{ 9830, 8520, 15729 },
{ 12452, 10807, 23344 },
@@ -884,9 +884,10 @@ fail2:
return ret;
}
-static int taos_suspend(struct i2c_client *client, pm_message_t state)
+#ifdef CONFIG_PM_SLEEP
+static int taos_suspend(struct device *dev)
{
- struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
struct tsl2583_chip *chip = iio_priv(indio_dev);
int ret = 0;
@@ -901,9 +902,9 @@ static int taos_suspend(struct i2c_client *client, pm_message_t state)
return ret;
}
-static int taos_resume(struct i2c_client *client)
+static int taos_resume(struct device *dev)
{
- struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
struct tsl2583_chip *chip = iio_priv(indio_dev);
int ret = 0;
@@ -916,6 +917,11 @@ static int taos_resume(struct i2c_client *client)
return ret;
}
+static SIMPLE_DEV_PM_OPS(taos_pm_ops, taos_suspend, taos_resume);
+#define TAOS_PM_OPS (&taos_pm_ops)
+#else
+#define TAOS_PM_OPS NULL
+#endif
static int __devexit taos_remove(struct i2c_client *client)
{
@@ -937,10 +943,9 @@ MODULE_DEVICE_TABLE(i2c, taos_idtable);
static struct i2c_driver taos_driver = {
.driver = {
.name = "tsl2583",
+ .pm = TAOS_PM_OPS,
},
.id_table = taos_idtable,
- .suspend = taos_suspend,
- .resume = taos_resume,
.probe = taos_probe,
.remove = __devexit_p(taos_remove),
};
diff --git a/drivers/staging/iio/machine.h b/drivers/staging/iio/machine.h
new file mode 100644
index 000000000000..0b1f19bfdc44
--- /dev/null
+++ b/drivers/staging/iio/machine.h
@@ -0,0 +1,24 @@
+/*
+ * Industrial I/O in kernel access map definitions for board files.
+ *
+ * Copyright (c) 2011 Jonathan Cameron
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+/**
+ * struct iio_map - description of link between consumer and device channels
+ * @adc_channel_label: Label used to identify the channel on the provider.
+ * This is matched against the datasheet_name element
+ * of struct iio_chan_spec.
+ * @consumer_dev_name: Name to uniquely identify the consumer device.
+ * @consumer_channel: Unique name used to idenitify the channel on the
+ * consumer side.
+ */
+struct iio_map {
+ const char *adc_channel_label;
+ const char *consumer_dev_name;
+ const char *consumer_channel;
+};
diff --git a/drivers/staging/iio/magnetometer/ak8975.c b/drivers/staging/iio/magnetometer/ak8975.c
index 3158f12cb051..d5ddac3d8831 100644
--- a/drivers/staging/iio/magnetometer/ak8975.c
+++ b/drivers/staging/iio/magnetometer/ak8975.c
@@ -564,9 +564,17 @@ static const struct i2c_device_id ak8975_id[] = {
MODULE_DEVICE_TABLE(i2c, ak8975_id);
+static const struct of_device_id ak8975_of_match[] = {
+ { .compatible = "asahi-kasei,ak8975", },
+ { .compatible = "ak8975", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ak8975_of_match);
+
static struct i2c_driver ak8975_driver = {
.driver = {
.name = "ak8975",
+ .of_match_table = ak8975_of_match,
},
.probe = ak8975_probe,
.remove = __devexit_p(ak8975_remove),
diff --git a/drivers/staging/iio/magnetometer/hmc5843.c b/drivers/staging/iio/magnetometer/hmc5843.c
index f2e85a9cf196..91dd3da70cb4 100644
--- a/drivers/staging/iio/magnetometer/hmc5843.c
+++ b/drivers/staging/iio/magnetometer/hmc5843.c
@@ -86,7 +86,7 @@
#define RATE_NOT_USED 0x07
/*
- * Device Configutration
+ * Device Configuration
*/
#define CONF_NORMAL 0x00
#define CONF_POSITIVE_BIAS 0x01
@@ -142,7 +142,7 @@ static s32 hmc5843_configure(struct i2c_client *client,
(operating_mode & 0x03));
}
-/* Return the measurement value from the specified channel */
+/* Return the measurement value from the specified channel */
static int hmc5843_read_measurement(struct iio_dev *indio_dev,
int address,
int *val)
@@ -169,7 +169,7 @@ static int hmc5843_read_measurement(struct iio_dev *indio_dev,
/*
* From the datasheet
* 0 - Continuous-Conversion Mode: In continuous-conversion mode, the
- * device continuously performs conversions an places the result in the
+ * device continuously performs conversions and places the result in the
* data register.
*
* 1 - Single-Conversion Mode : device performs a single measurement,
@@ -588,19 +588,26 @@ static int hmc5843_remove(struct i2c_client *client)
return 0;
}
-static int hmc5843_suspend(struct i2c_client *client, pm_message_t mesg)
+#ifdef CONFIG_PM_SLEEP
+static int hmc5843_suspend(struct device *dev)
{
- hmc5843_configure(client, MODE_SLEEP);
+ hmc5843_configure(to_i2c_client(dev), MODE_SLEEP);
return 0;
}
-static int hmc5843_resume(struct i2c_client *client)
+static int hmc5843_resume(struct device *dev)
{
- struct hmc5843_data *data = i2c_get_clientdata(client);
- hmc5843_configure(client, data->operating_mode);
+ struct hmc5843_data *data = i2c_get_clientdata(to_i2c_client(dev));
+ hmc5843_configure(to_i2c_client(dev), data->operating_mode);
return 0;
}
+static SIMPLE_DEV_PM_OPS(hmc5843_pm_ops, hmc5843_suspend, hmc5843_resume);
+#define HMC5843_PM_OPS (&hmc5843_pm_ops)
+#else
+#define HMC5843_PM_OPS NULL
+#endif
+
static const struct i2c_device_id hmc5843_id[] = {
{ "hmc5843", 0 },
{ }
@@ -610,14 +617,13 @@ MODULE_DEVICE_TABLE(i2c, hmc5843_id);
static struct i2c_driver hmc5843_driver = {
.driver = {
.name = "hmc5843",
+ .pm = HMC5843_PM_OPS,
},
.id_table = hmc5843_id,
.probe = hmc5843_probe,
.remove = hmc5843_remove,
.detect = hmc5843_detect,
.address_list = normal_i2c,
- .suspend = hmc5843_suspend,
- .resume = hmc5843_resume,
};
module_i2c_driver(hmc5843_driver);
diff --git a/drivers/staging/iio/meter/ade7758_ring.c b/drivers/staging/iio/meter/ade7758_ring.c
index f29f2b278fe4..c45b23bb1229 100644
--- a/drivers/staging/iio/meter/ade7758_ring.c
+++ b/drivers/staging/iio/meter/ade7758_ring.c
@@ -85,7 +85,7 @@ static irqreturn_t ade7758_trigger_handler(int irq, void *p)
/**
* ade7758_ring_preenable() setup the parameters of the ring before enabling
*
- * The complex nature of the setting of the nuber of bytes per datum is due
+ * The complex nature of the setting of the number of bytes per datum is due
* to this driver currently ensuring that the timestamp is stored at an 8
* byte boundary.
**/
@@ -144,8 +144,6 @@ int ade7758_configure_ring(struct iio_dev *indio_dev)
return ret;
}
- /* Effectively select the ring buffer implementation */
- indio_dev->buffer->access = &ring_sw_access_funcs;
indio_dev->setup_ops = &ade7758_ring_setup_ops;
indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
diff --git a/drivers/staging/iio/meter/meter.h b/drivers/staging/iio/meter/meter.h
index 142c50d71fda..6a3db1423631 100644
--- a/drivers/staging/iio/meter/meter.h
+++ b/drivers/staging/iio/meter/meter.h
@@ -362,7 +362,7 @@
#define IIO_EVENT_ATTR_CYCEND(_evlist, _show, _store, _mask) \
IIO_EVENT_ATTR_SH(cycend, _evlist, _show, _store, _mask)
-/* on the rising and falling edge of the the voltage waveform */
+/* on the rising and falling edge of the voltage waveform */
#define IIO_EVENT_ATTR_ZERO_CROSS(_evlist, _show, _store, _mask) \
IIO_EVENT_ATTR_SH(zero_cross, _evlist, _show, _store, _mask)
diff --git a/drivers/staging/iio/ring_sw.c b/drivers/staging/iio/ring_sw.c
index 3e24ec455854..b9945ec44faa 100644
--- a/drivers/staging/iio/ring_sw.c
+++ b/drivers/staging/iio/ring_sw.c
@@ -147,7 +147,7 @@ static int iio_read_first_n_sw_rb(struct iio_buffer *r,
size_t data_available, buffer_size;
/* A userspace program has probably made an error if it tries to
- * read something that is not a whole number of bpds.
+ * read something that is not a whole number of bpds.
* Return an error.
*/
if (n % ring->buf.bytes_per_datum) {
@@ -229,7 +229,7 @@ static int iio_read_first_n_sw_rb(struct iio_buffer *r,
/* setup the next read position */
/* Beware, this may fail due to concurrency fun and games.
- * Possible that sufficient fill commands have run to push the read
+ * Possible that sufficient fill commands have run to push the read
* pointer past where we would be after the rip. If this occurs, leave
* it be.
*/
@@ -329,6 +329,16 @@ static struct attribute_group iio_ring_attribute_group = {
.name = "buffer",
};
+static const struct iio_buffer_access_funcs ring_sw_access_funcs = {
+ .store_to = &iio_store_to_sw_rb,
+ .read_first_n = &iio_read_first_n_sw_rb,
+ .request_update = &iio_request_update_sw_rb,
+ .get_bytes_per_datum = &iio_get_bytes_per_datum_sw_rb,
+ .set_bytes_per_datum = &iio_set_bytes_per_datum_sw_rb,
+ .get_length = &iio_get_length_sw_rb,
+ .set_length = &iio_set_length_sw_rb,
+};
+
struct iio_buffer *iio_sw_rb_allocate(struct iio_dev *indio_dev)
{
struct iio_buffer *buf;
@@ -341,6 +351,7 @@ struct iio_buffer *iio_sw_rb_allocate(struct iio_dev *indio_dev)
buf = &ring->buf;
iio_buffer_init(buf);
buf->attrs = &iio_ring_attribute_group;
+ buf->access = &ring_sw_access_funcs;
return buf;
}
@@ -352,16 +363,5 @@ void iio_sw_rb_free(struct iio_buffer *r)
}
EXPORT_SYMBOL(iio_sw_rb_free);
-const struct iio_buffer_access_funcs ring_sw_access_funcs = {
- .store_to = &iio_store_to_sw_rb,
- .read_first_n = &iio_read_first_n_sw_rb,
- .request_update = &iio_request_update_sw_rb,
- .get_bytes_per_datum = &iio_get_bytes_per_datum_sw_rb,
- .set_bytes_per_datum = &iio_set_bytes_per_datum_sw_rb,
- .get_length = &iio_get_length_sw_rb,
- .set_length = &iio_set_length_sw_rb,
-};
-EXPORT_SYMBOL(ring_sw_access_funcs);
-
MODULE_DESCRIPTION("Industrialio I/O software ring buffer");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/iio/ring_sw.h b/drivers/staging/iio/ring_sw.h
index e6a6e2c40960..7556e2122367 100644
--- a/drivers/staging/iio/ring_sw.h
+++ b/drivers/staging/iio/ring_sw.h
@@ -25,11 +25,6 @@
#define _IIO_RING_SW_H_
#include "buffer.h"
-/**
- * ring_sw_access_funcs - access functions for a software ring buffer
- **/
-extern const struct iio_buffer_access_funcs ring_sw_access_funcs;
-
struct iio_buffer *iio_sw_rb_allocate(struct iio_dev *indio_dev);
void iio_sw_rb_free(struct iio_buffer *ring);
#endif /* _IIO_RING_SW_H_ */
diff --git a/drivers/staging/iio/trigger/iio-trig-bfin-timer.c b/drivers/staging/iio/trigger/iio-trig-bfin-timer.c
index 1cbb25dff8b5..665653d79f02 100644
--- a/drivers/staging/iio/trigger/iio-trig-bfin-timer.c
+++ b/drivers/staging/iio/trigger/iio-trig-bfin-timer.c
@@ -232,17 +232,7 @@ static struct platform_driver iio_bfin_tmr_trigger_driver = {
.remove = __devexit_p(iio_bfin_tmr_trigger_remove),
};
-static int __init iio_bfin_tmr_trig_init(void)
-{
- return platform_driver_register(&iio_bfin_tmr_trigger_driver);
-}
-module_init(iio_bfin_tmr_trig_init);
-
-static void __exit iio_bfin_tmr_trig_exit(void)
-{
- platform_driver_unregister(&iio_bfin_tmr_trigger_driver);
-}
-module_exit(iio_bfin_tmr_trig_exit);
+module_platform_driver(iio_bfin_tmr_trigger_driver);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("Blackfin system timer based trigger for the iio subsystem");
diff --git a/drivers/staging/iio/trigger/iio-trig-gpio.c b/drivers/staging/iio/trigger/iio-trig-gpio.c
index f2a655981622..a3465947235e 100644
--- a/drivers/staging/iio/trigger/iio-trig-gpio.c
+++ b/drivers/staging/iio/trigger/iio-trig-gpio.c
@@ -160,17 +160,7 @@ static struct platform_driver iio_gpio_trigger_driver = {
},
};
-static int __init iio_gpio_trig_init(void)
-{
- return platform_driver_register(&iio_gpio_trigger_driver);
-}
-module_init(iio_gpio_trig_init);
-
-static void __exit iio_gpio_trig_exit(void)
-{
- platform_driver_unregister(&iio_gpio_trigger_driver);
-}
-module_exit(iio_gpio_trig_exit);
+module_platform_driver(iio_gpio_trigger_driver);
MODULE_AUTHOR("Jonathan Cameron <jic23@cam.ac.uk>");
MODULE_DESCRIPTION("Example gpio trigger for the iio subsystem");
diff --git a/drivers/staging/iio/trigger/iio-trig-periodic-rtc.c b/drivers/staging/iio/trigger/iio-trig-periodic-rtc.c
index bd7416b2c561..a80cf67bf84d 100644
--- a/drivers/staging/iio/trigger/iio-trig-periodic-rtc.c
+++ b/drivers/staging/iio/trigger/iio-trig-periodic-rtc.c
@@ -195,18 +195,8 @@ static struct platform_driver iio_trig_periodic_rtc_driver = {
},
};
-static int __init iio_trig_periodic_rtc_init(void)
-{
- return platform_driver_register(&iio_trig_periodic_rtc_driver);
-}
-
-static void __exit iio_trig_periodic_rtc_exit(void)
-{
- return platform_driver_unregister(&iio_trig_periodic_rtc_driver);
-}
+module_platform_driver(iio_trig_periodic_rtc_driver);
-module_init(iio_trig_periodic_rtc_init);
-module_exit(iio_trig_periodic_rtc_exit);
MODULE_AUTHOR("Jonathan Cameron <jic23@cam.ac.uk>");
MODULE_DESCRIPTION("Periodic realtime clock trigger for the iio subsystem");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/types.h b/drivers/staging/iio/types.h
index b7d26474ad06..0c3213666901 100644
--- a/drivers/staging/iio/types.h
+++ b/drivers/staging/iio/types.h
@@ -46,4 +46,8 @@ enum iio_modifier {
IIO_MOD_LIGHT_IR,
};
+#define IIO_VAL_INT 1
+#define IIO_VAL_INT_PLUS_MICRO 2
+#define IIO_VAL_INT_PLUS_NANO 3
+
#endif /* _IIO_TYPES_H_ */
diff --git a/drivers/staging/keucr/TODO b/drivers/staging/keucr/TODO
index 1c48e40e2b2c..d6da656eee1d 100644
--- a/drivers/staging/keucr/TODO
+++ b/drivers/staging/keucr/TODO
@@ -9,4 +9,4 @@ TODO:
- smcommon.h & smilsub.c: use kernel hweight8(), hweight16()
Please send any patches for this driver to Al Cho <acho@novell.com> and
-Greg Kroah-Hartman <gregkh@suse.de>.
+Greg Kroah-Hartman <gregkh@linuxfoundation.org>.
diff --git a/drivers/staging/keucr/transport.h b/drivers/staging/keucr/transport.h
index 4ae57d0145b2..2a11a98375d7 100644
--- a/drivers/staging/keucr/transport.h
+++ b/drivers/staging/keucr/transport.h
@@ -3,43 +3,6 @@
#include <linux/blkdev.h>
-/* Bulk only data structures */
-
-/* command block wrapper */
-struct bulk_cb_wrap {
- __le32 Signature; /* contains 'USBC' */
- __u32 Tag; /* unique per command id */
- __le32 DataTransferLength; /* size of data */
- __u8 Flags; /* direction in bit 0 */
- __u8 Lun; /* LUN normally 0 */
- __u8 Length; /* of of the CDB */
- __u8 CDB[16]; /* max command */
-};
-
-#define US_BULK_CB_WRAP_LEN 31
-#define US_BULK_CB_SIGN 0x43425355 /*spells out USBC */
-#define US_BULK_FLAG_IN 1
-#define US_BULK_FLAG_OUT 0
-
-/* command status wrapper */
-struct bulk_cs_wrap {
- __le32 Signature; /* should = 'USBS' */
- __u32 Tag; /* same as original command */
- __le32 Residue; /* amount not transferred */
- __u8 Status; /* see below */
- __u8 Filler[18];
-};
-
-#define US_BULK_CS_WRAP_LEN 13
-#define US_BULK_CS_SIGN 0x53425355 /* spells out 'USBS' */
-#define US_BULK_STAT_OK 0
-#define US_BULK_STAT_FAIL 1
-#define US_BULK_STAT_PHASE 2
-
-/* bulk-only class specific requests */
-#define US_BULK_RESET_REQUEST 0xff
-#define US_BULK_GET_MAX_LUN 0xfe
-
/* usb_stor_bulk_transfer_xxx() return codes, in order of severity */
#define USB_STOR_XFER_GOOD 0 /* good transfer */
#define USB_STOR_XFER_SHORT 1 /* transferred less than expected */
diff --git a/drivers/staging/line6/capture.c b/drivers/staging/line6/capture.c
index 127f95247749..c85c5b6bffb7 100644
--- a/drivers/staging/line6/capture.c
+++ b/drivers/staging/line6/capture.c
@@ -107,7 +107,7 @@ void line6_unlink_audio_in_urbs(struct snd_line6_pcm *line6pcm)
Wait until unlinking of all currently active capture URBs has been
finished.
*/
-static void wait_clear_audio_in_urbs(struct snd_line6_pcm *line6pcm)
+void line6_wait_clear_audio_in_urbs(struct snd_line6_pcm *line6pcm)
{
int timeout = HZ;
unsigned int i;
@@ -134,7 +134,7 @@ static void wait_clear_audio_in_urbs(struct snd_line6_pcm *line6pcm)
void line6_unlink_wait_clear_audio_in_urbs(struct snd_line6_pcm *line6pcm)
{
line6_unlink_audio_in_urbs(line6pcm);
- wait_clear_audio_in_urbs(line6pcm);
+ line6_wait_clear_audio_in_urbs(line6pcm);
}
/*
@@ -193,25 +193,6 @@ void line6_capture_check_period(struct snd_line6_pcm *line6pcm, int length)
}
}
-int line6_alloc_capture_buffer(struct snd_line6_pcm *line6pcm)
-{
- /* We may be invoked multiple times in a row so allocate once only */
- if (line6pcm->buffer_in)
- return 0;
-
- line6pcm->buffer_in =
- kmalloc(LINE6_ISO_BUFFERS * LINE6_ISO_PACKETS *
- line6pcm->max_packet_size, GFP_KERNEL);
-
- if (!line6pcm->buffer_in) {
- dev_err(line6pcm->line6->ifcdev,
- "cannot malloc capture buffer\n");
- return -ENOMEM;
- }
-
- return 0;
-}
-
void line6_free_capture_buffer(struct snd_line6_pcm *line6pcm)
{
kfree(line6pcm->buffer_in);
@@ -273,9 +254,9 @@ static void audio_in_callback(struct urb *urb)
line6pcm->prev_fsize = fsize;
#ifdef CONFIG_LINE6_USB_IMPULSE_RESPONSE
- if (!(line6pcm->flags & MASK_PCM_IMPULSE))
+ if (!(line6pcm->flags & LINE6_BITS_PCM_IMPULSE))
#endif
- if (test_bit(BIT_PCM_ALSA_CAPTURE, &line6pcm->flags)
+ if (test_bit(LINE6_INDEX_PCM_ALSA_CAPTURE_STREAM, &line6pcm->flags)
&& (fsize > 0))
line6_capture_copy(line6pcm, fbuf, fsize);
}
@@ -291,9 +272,9 @@ static void audio_in_callback(struct urb *urb)
submit_audio_in_urb(line6pcm);
#ifdef CONFIG_LINE6_USB_IMPULSE_RESPONSE
- if (!(line6pcm->flags & MASK_PCM_IMPULSE))
+ if (!(line6pcm->flags & LINE6_BITS_PCM_IMPULSE))
#endif
- if (test_bit(BIT_PCM_ALSA_CAPTURE, &line6pcm->flags))
+ if (test_bit(LINE6_INDEX_PCM_ALSA_CAPTURE_STREAM, &line6pcm->flags))
line6_capture_check_period(line6pcm, length);
}
}
@@ -341,17 +322,17 @@ static int snd_line6_capture_hw_params(struct snd_pcm_substream *substream,
}
/* -- [FD] end */
- if ((line6pcm->flags & MASK_CAPTURE) == 0) {
- ret = line6_alloc_capture_buffer(line6pcm);
+ ret = line6_pcm_acquire(line6pcm, LINE6_BIT_PCM_ALSA_CAPTURE_BUFFER);
- if (ret < 0)
- return ret;
- }
+ if (ret < 0)
+ return ret;
ret = snd_pcm_lib_malloc_pages(substream,
params_buffer_bytes(hw_params));
- if (ret < 0)
+ if (ret < 0) {
+ line6_pcm_release(line6pcm, LINE6_BIT_PCM_ALSA_CAPTURE_BUFFER);
return ret;
+ }
line6pcm->period_in = params_period_bytes(hw_params);
return 0;
@@ -361,12 +342,7 @@ static int snd_line6_capture_hw_params(struct snd_pcm_substream *substream,
static int snd_line6_capture_hw_free(struct snd_pcm_substream *substream)
{
struct snd_line6_pcm *line6pcm = snd_pcm_substream_chip(substream);
-
- if ((line6pcm->flags & MASK_CAPTURE) == 0) {
- line6_unlink_wait_clear_audio_in_urbs(line6pcm);
- line6_free_capture_buffer(line6pcm);
- }
-
+ line6_pcm_release(line6pcm, LINE6_BIT_PCM_ALSA_CAPTURE_BUFFER);
return snd_pcm_lib_free_pages(substream);
}
@@ -380,7 +356,7 @@ int snd_line6_capture_trigger(struct snd_line6_pcm *line6pcm, int cmd)
#ifdef CONFIG_PM
case SNDRV_PCM_TRIGGER_RESUME:
#endif
- err = line6_pcm_start(line6pcm, MASK_PCM_ALSA_CAPTURE);
+ err = line6_pcm_acquire(line6pcm, LINE6_BIT_PCM_ALSA_CAPTURE_STREAM);
if (err < 0)
return err;
@@ -391,7 +367,7 @@ int snd_line6_capture_trigger(struct snd_line6_pcm *line6pcm, int cmd)
#ifdef CONFIG_PM
case SNDRV_PCM_TRIGGER_SUSPEND:
#endif
- err = line6_pcm_stop(line6pcm, MASK_PCM_ALSA_CAPTURE);
+ err = line6_pcm_release(line6pcm, LINE6_BIT_PCM_ALSA_CAPTURE_STREAM);
if (err < 0)
return err;
diff --git a/drivers/staging/line6/capture.h b/drivers/staging/line6/capture.h
index 366cbaa7c88d..4157bcb598a9 100644
--- a/drivers/staging/line6/capture.h
+++ b/drivers/staging/line6/capture.h
@@ -19,7 +19,6 @@
extern struct snd_pcm_ops snd_line6_capture_ops;
-extern int line6_alloc_capture_buffer(struct snd_line6_pcm *line6pcm);
extern void line6_capture_copy(struct snd_line6_pcm *line6pcm, char *fbuf,
int fsize);
extern void line6_capture_check_period(struct snd_line6_pcm *line6pcm,
@@ -30,6 +29,7 @@ extern int line6_submit_audio_in_all_urbs(struct snd_line6_pcm *line6pcm);
extern void line6_unlink_audio_in_urbs(struct snd_line6_pcm *line6pcm);
extern void line6_unlink_wait_clear_audio_in_urbs(struct snd_line6_pcm
*line6pcm);
+extern void line6_wait_clear_audio_in_urbs(struct snd_line6_pcm *line6pcm);
extern int snd_line6_capture_trigger(struct snd_line6_pcm *line6pcm, int cmd);
#endif
diff --git a/drivers/staging/line6/driver.c b/drivers/staging/line6/driver.c
index 6a1959e16e00..e8023afd3656 100644
--- a/drivers/staging/line6/driver.c
+++ b/drivers/staging/line6/driver.c
@@ -1346,7 +1346,7 @@ static void __exit line6_exit(void)
if (line6pcm == NULL)
continue;
- line6_pcm_stop(line6pcm, ~0);
+ line6_pcm_release(line6pcm, ~0);
}
usb_deregister(&line6_driver);
diff --git a/drivers/staging/line6/pcm.c b/drivers/staging/line6/pcm.c
index 37675e66da81..90d2d4475cb4 100644
--- a/drivers/staging/line6/pcm.c
+++ b/drivers/staging/line6/pcm.c
@@ -52,9 +52,9 @@ static ssize_t pcm_set_impulse_volume(struct device *dev,
line6pcm->impulse_volume = value;
if (value > 0)
- line6_pcm_start(line6pcm, MASK_PCM_IMPULSE);
+ line6_pcm_acquire(line6pcm, LINE6_BITS_PCM_IMPULSE);
else
- line6_pcm_stop(line6pcm, MASK_PCM_IMPULSE);
+ line6_pcm_release(line6pcm, LINE6_BITS_PCM_IMPULSE);
return count;
}
@@ -92,29 +92,43 @@ static bool test_flags(unsigned long flags0, unsigned long flags1,
return ((flags0 & mask) == 0) && ((flags1 & mask) != 0);
}
-int line6_pcm_start(struct snd_line6_pcm *line6pcm, int channels)
+int line6_pcm_acquire(struct snd_line6_pcm *line6pcm, int channels)
{
unsigned long flags_old =
__sync_fetch_and_or(&line6pcm->flags, channels);
unsigned long flags_new = flags_old | channels;
+ unsigned long flags_final = flags_old;
int err = 0;
line6pcm->prev_fbuf = NULL;
- if (test_flags(flags_old, flags_new, MASK_CAPTURE)) {
+ if (test_flags(flags_old, flags_new, LINE6_BITS_CAPTURE_BUFFER)) {
+ /* We may be invoked multiple times in a row so allocate once only */
+ if (!line6pcm->buffer_in) {
+ line6pcm->buffer_in =
+ kmalloc(LINE6_ISO_BUFFERS * LINE6_ISO_PACKETS *
+ line6pcm->max_packet_size, GFP_KERNEL);
+
+ if (!line6pcm->buffer_in) {
+ dev_err(line6pcm->line6->ifcdev,
+ "cannot malloc capture buffer\n");
+ err = -ENOMEM;
+ goto pcm_acquire_error;
+ }
+
+ flags_final |= channels & LINE6_BITS_CAPTURE_BUFFER;
+ }
+ }
+
+ if (test_flags(flags_old, flags_new, LINE6_BITS_CAPTURE_STREAM)) {
/*
Waiting for completion of active URBs in the stop handler is
a bug, we therefore report an error if capturing is restarted
too soon.
*/
- if (line6pcm->active_urb_in | line6pcm->unlink_urb_in)
+ if (line6pcm->active_urb_in | line6pcm->unlink_urb_in) {
+ dev_err(line6pcm->line6->ifcdev, "Device not yet ready\n");
return -EBUSY;
-
- if (!(flags_new & MASK_PCM_ALSA_CAPTURE)) {
- err = line6_alloc_capture_buffer(line6pcm);
-
- if (err < 0)
- goto pcm_start_error;
}
line6pcm->count_in = 0;
@@ -122,55 +136,78 @@ int line6_pcm_start(struct snd_line6_pcm *line6pcm, int channels)
err = line6_submit_audio_in_all_urbs(line6pcm);
if (err < 0)
- goto pcm_start_error;
+ goto pcm_acquire_error;
+
+ flags_final |= channels & LINE6_BITS_CAPTURE_STREAM;
}
- if (test_flags(flags_old, flags_new, MASK_PLAYBACK)) {
- /*
- See comment above regarding PCM restart.
- */
- if (line6pcm->active_urb_out | line6pcm->unlink_urb_out)
- return -EBUSY;
+ if (test_flags(flags_old, flags_new, LINE6_BITS_PLAYBACK_BUFFER)) {
+ /* We may be invoked multiple times in a row so allocate once only */
+ if (!line6pcm->buffer_out) {
+ line6pcm->buffer_out =
+ kmalloc(LINE6_ISO_BUFFERS * LINE6_ISO_PACKETS *
+ line6pcm->max_packet_size, GFP_KERNEL);
+
+ if (!line6pcm->buffer_out) {
+ dev_err(line6pcm->line6->ifcdev,
+ "cannot malloc playback buffer\n");
+ err = -ENOMEM;
+ goto pcm_acquire_error;
+ }
- if (!(flags_new & MASK_PCM_ALSA_PLAYBACK)) {
- err = line6_alloc_playback_buffer(line6pcm);
+ flags_final |= channels & LINE6_BITS_PLAYBACK_BUFFER;
+ }
+ }
- if (err < 0)
- goto pcm_start_error;
+ if (test_flags(flags_old, flags_new, LINE6_BITS_PLAYBACK_STREAM)) {
+ /*
+ See comment above regarding PCM restart.
+ */
+ if (line6pcm->active_urb_out | line6pcm->unlink_urb_out) {
+ dev_err(line6pcm->line6->ifcdev, "Device not yet ready\n");
+ return -EBUSY;
}
line6pcm->count_out = 0;
err = line6_submit_audio_out_all_urbs(line6pcm);
if (err < 0)
- goto pcm_start_error;
+ goto pcm_acquire_error;
+
+ flags_final |= channels & LINE6_BITS_PLAYBACK_STREAM;
}
return 0;
-pcm_start_error:
- __sync_fetch_and_and(&line6pcm->flags, ~channels);
+pcm_acquire_error:
+ /*
+ If not all requested resources/streams could be obtained, release
+ those which were successfully obtained (if any).
+ */
+ line6_pcm_release(line6pcm, flags_final & channels);
return err;
}
-int line6_pcm_stop(struct snd_line6_pcm *line6pcm, int channels)
+int line6_pcm_release(struct snd_line6_pcm *line6pcm, int channels)
{
unsigned long flags_old =
__sync_fetch_and_and(&line6pcm->flags, ~channels);
unsigned long flags_new = flags_old & ~channels;
- if (test_flags(flags_new, flags_old, MASK_CAPTURE)) {
+ if (test_flags(flags_new, flags_old, LINE6_BITS_CAPTURE_STREAM))
line6_unlink_audio_in_urbs(line6pcm);
- if (!(flags_old & MASK_PCM_ALSA_CAPTURE))
- line6_free_capture_buffer(line6pcm);
+ if (test_flags(flags_new, flags_old, LINE6_BITS_CAPTURE_BUFFER)) {
+ line6_wait_clear_audio_in_urbs(line6pcm);
+ line6_free_capture_buffer(line6pcm);
}
- if (test_flags(flags_new, flags_old, MASK_PLAYBACK)) {
+ if (test_flags(flags_new, flags_old, LINE6_BITS_PLAYBACK_STREAM))
line6_unlink_audio_out_urbs(line6pcm);
- if (!(flags_old & MASK_PCM_ALSA_PLAYBACK))
- line6_free_playback_buffer(line6pcm);
+ if (test_flags(flags_new, flags_old, LINE6_BITS_PLAYBACK_BUFFER)) {
+ line6_wait_clear_audio_out_urbs(line6pcm);
+ line6_free_playback_buffer(line6pcm);
}
return 0;
@@ -185,7 +222,7 @@ int snd_line6_trigger(struct snd_pcm_substream *substream, int cmd)
unsigned long flags;
spin_lock_irqsave(&line6pcm->lock_trigger, flags);
- clear_bit(BIT_PREPARED, &line6pcm->flags);
+ clear_bit(LINE6_INDEX_PREPARED, &line6pcm->flags);
snd_pcm_group_for_each_entry(s, substream) {
switch (s->stream) {
@@ -498,13 +535,13 @@ int snd_line6_prepare(struct snd_pcm_substream *substream)
switch (substream->stream) {
case SNDRV_PCM_STREAM_PLAYBACK:
- if ((line6pcm->flags & MASK_PLAYBACK) == 0)
+ if ((line6pcm->flags & LINE6_BITS_PLAYBACK_STREAM) == 0)
line6_unlink_wait_clear_audio_out_urbs(line6pcm);
break;
case SNDRV_PCM_STREAM_CAPTURE:
- if ((line6pcm->flags & MASK_CAPTURE) == 0)
+ if ((line6pcm->flags & LINE6_BITS_CAPTURE_STREAM) == 0)
line6_unlink_wait_clear_audio_in_urbs(line6pcm);
break;
@@ -513,7 +550,7 @@ int snd_line6_prepare(struct snd_pcm_substream *substream)
MISSING_CASE;
}
- if (!test_and_set_bit(BIT_PREPARED, &line6pcm->flags)) {
+ if (!test_and_set_bit(LINE6_INDEX_PREPARED, &line6pcm->flags)) {
line6pcm->count_out = 0;
line6pcm->pos_out = 0;
line6pcm->pos_out_done = 0;
diff --git a/drivers/staging/line6/pcm.h b/drivers/staging/line6/pcm.h
index 55d8297dd3d9..5210ec8dbe16 100644
--- a/drivers/staging/line6/pcm.h
+++ b/drivers/staging/line6/pcm.h
@@ -46,57 +46,131 @@
(line6pcm->pcm->streams[stream].substream)
/*
- PCM mode bits and masks.
- "ALSA": operations triggered by applications via ALSA
- "MONITOR": software monitoring
- "IMPULSE": optional impulse response operation
+ PCM mode bits.
+
+ There are several features of the Line6 USB driver which require PCM
+ data to be exchanged with the device:
+ *) PCM playback and capture via ALSA
+ *) software monitoring (for devices without hardware monitoring)
+ *) optional impulse response measurement
+ However, from the device's point of view, there is just a single
+ capture and playback stream, which must be shared between these
+ subsystems. It is therefore necessary to maintain the state of the
+ subsystems with respect to PCM usage. We define several constants of
+ the form LINE6_BIT_PCM_<subsystem>_<direction>_<resource> with the
+ following meanings:
+ *) <subsystem> is one of
+ -) ALSA: PCM playback and capture via ALSA
+ -) MONITOR: software monitoring
+ -) IMPULSE: optional impulse response measurement
+ *) <direction> is one of
+ -) PLAYBACK: audio output (from host to device)
+ -) CAPTURE: audio input (from device to host)
+ *) <resource> is one of
+ -) BUFFER: buffer required by PCM data stream
+ -) STREAM: actual PCM data stream
+
+ The subsystems call line6_pcm_acquire() to acquire the (shared)
+ resources needed for a particular operation (e.g., allocate the buffer
+ for ALSA playback or start the capture stream for software monitoring).
+ When a resource is no longer needed, it is released by calling
+ line6_pcm_release(). Buffer allocation and stream startup are handled
+ separately to allow the ALSA kernel driver to perform them at
+ appropriate places (since the callback which starts a PCM stream is not
+ allowed to sleep).
*/
enum {
- /* individual bits: */
- BIT_PCM_ALSA_PLAYBACK,
- BIT_PCM_ALSA_CAPTURE,
- BIT_PCM_MONITOR_PLAYBACK,
- BIT_PCM_MONITOR_CAPTURE,
+ /* individual bit indices: */
+ LINE6_INDEX_PCM_ALSA_PLAYBACK_BUFFER,
+ LINE6_INDEX_PCM_ALSA_PLAYBACK_STREAM,
+ LINE6_INDEX_PCM_ALSA_CAPTURE_BUFFER,
+ LINE6_INDEX_PCM_ALSA_CAPTURE_STREAM,
+ LINE6_INDEX_PCM_MONITOR_PLAYBACK_BUFFER,
+ LINE6_INDEX_PCM_MONITOR_PLAYBACK_STREAM,
+ LINE6_INDEX_PCM_MONITOR_CAPTURE_BUFFER,
+ LINE6_INDEX_PCM_MONITOR_CAPTURE_STREAM,
#ifdef CONFIG_LINE6_USB_IMPULSE_RESPONSE
- BIT_PCM_IMPULSE_PLAYBACK,
- BIT_PCM_IMPULSE_CAPTURE,
+ LINE6_INDEX_PCM_IMPULSE_PLAYBACK_BUFFER,
+ LINE6_INDEX_PCM_IMPULSE_PLAYBACK_STREAM,
+ LINE6_INDEX_PCM_IMPULSE_CAPTURE_BUFFER,
+ LINE6_INDEX_PCM_IMPULSE_CAPTURE_STREAM,
#endif
- BIT_PAUSE_PLAYBACK,
- BIT_PREPARED,
-
- /* individual masks: */
-/* *INDENT-OFF* */
- MASK_PCM_ALSA_PLAYBACK = 1 << BIT_PCM_ALSA_PLAYBACK,
- MASK_PCM_ALSA_CAPTURE = 1 << BIT_PCM_ALSA_CAPTURE,
- MASK_PCM_MONITOR_PLAYBACK = 1 << BIT_PCM_MONITOR_PLAYBACK,
- MASK_PCM_MONITOR_CAPTURE = 1 << BIT_PCM_MONITOR_CAPTURE,
+ LINE6_INDEX_PAUSE_PLAYBACK,
+ LINE6_INDEX_PREPARED,
+
+ /* individual bit masks: */
+ LINE6_BIT(PCM_ALSA_PLAYBACK_BUFFER),
+ LINE6_BIT(PCM_ALSA_PLAYBACK_STREAM),
+ LINE6_BIT(PCM_ALSA_CAPTURE_BUFFER),
+ LINE6_BIT(PCM_ALSA_CAPTURE_STREAM),
+ LINE6_BIT(PCM_MONITOR_PLAYBACK_BUFFER),
+ LINE6_BIT(PCM_MONITOR_PLAYBACK_STREAM),
+ LINE6_BIT(PCM_MONITOR_CAPTURE_BUFFER),
+ LINE6_BIT(PCM_MONITOR_CAPTURE_STREAM),
#ifdef CONFIG_LINE6_USB_IMPULSE_RESPONSE
- MASK_PCM_IMPULSE_PLAYBACK = 1 << BIT_PCM_IMPULSE_PLAYBACK,
- MASK_PCM_IMPULSE_CAPTURE = 1 << BIT_PCM_IMPULSE_CAPTURE,
+ LINE6_BIT(PCM_IMPULSE_PLAYBACK_BUFFER),
+ LINE6_BIT(PCM_IMPULSE_PLAYBACK_STREAM),
+ LINE6_BIT(PCM_IMPULSE_CAPTURE_BUFFER),
+ LINE6_BIT(PCM_IMPULSE_CAPTURE_STREAM),
#endif
- MASK_PAUSE_PLAYBACK = 1 << BIT_PAUSE_PLAYBACK,
- MASK_PREPARED = 1 << BIT_PREPARED,
-/* *INDENT-ON* */
+ LINE6_BIT(PAUSE_PLAYBACK),
+ LINE6_BIT(PREPARED),
- /* combined masks (by operation): */
- MASK_PCM_ALSA = MASK_PCM_ALSA_PLAYBACK | MASK_PCM_ALSA_CAPTURE,
- MASK_PCM_MONITOR = MASK_PCM_MONITOR_PLAYBACK | MASK_PCM_MONITOR_CAPTURE,
+ /* combined bit masks (by operation): */
+ LINE6_BITS_PCM_ALSA_BUFFER =
+ LINE6_BIT_PCM_ALSA_PLAYBACK_BUFFER |
+ LINE6_BIT_PCM_ALSA_CAPTURE_BUFFER,
+
+ LINE6_BITS_PCM_ALSA_STREAM =
+ LINE6_BIT_PCM_ALSA_PLAYBACK_STREAM |
+ LINE6_BIT_PCM_ALSA_CAPTURE_STREAM,
+
+ LINE6_BITS_PCM_MONITOR =
+ LINE6_BIT_PCM_MONITOR_PLAYBACK_BUFFER |
+ LINE6_BIT_PCM_MONITOR_PLAYBACK_STREAM |
+ LINE6_BIT_PCM_MONITOR_CAPTURE_BUFFER |
+ LINE6_BIT_PCM_MONITOR_CAPTURE_STREAM,
+
+#ifdef CONFIG_LINE6_USB_IMPULSE_RESPONSE
+ LINE6_BITS_PCM_IMPULSE =
+ LINE6_BIT_PCM_IMPULSE_PLAYBACK_BUFFER |
+ LINE6_BIT_PCM_IMPULSE_PLAYBACK_STREAM |
+ LINE6_BIT_PCM_IMPULSE_CAPTURE_BUFFER |
+ LINE6_BIT_PCM_IMPULSE_CAPTURE_STREAM,
+#endif
+
+ /* combined bit masks (by direction): */
+ LINE6_BITS_PLAYBACK_BUFFER =
+#ifdef CONFIG_LINE6_USB_IMPULSE_RESPONSE
+ LINE6_BIT_PCM_IMPULSE_PLAYBACK_BUFFER |
+#endif
+ LINE6_BIT_PCM_ALSA_PLAYBACK_BUFFER |
+ LINE6_BIT_PCM_MONITOR_PLAYBACK_BUFFER ,
+
+ LINE6_BITS_PLAYBACK_STREAM =
+#ifdef CONFIG_LINE6_USB_IMPULSE_RESPONSE
+ LINE6_BIT_PCM_IMPULSE_PLAYBACK_STREAM |
+#endif
+ LINE6_BIT_PCM_ALSA_PLAYBACK_STREAM |
+ LINE6_BIT_PCM_MONITOR_PLAYBACK_STREAM ,
+
+ LINE6_BITS_CAPTURE_BUFFER =
#ifdef CONFIG_LINE6_USB_IMPULSE_RESPONSE
- MASK_PCM_IMPULSE = MASK_PCM_IMPULSE_PLAYBACK | MASK_PCM_IMPULSE_CAPTURE,
+ LINE6_BIT_PCM_IMPULSE_CAPTURE_BUFFER |
#endif
+ LINE6_BIT_PCM_ALSA_CAPTURE_BUFFER |
+ LINE6_BIT_PCM_MONITOR_CAPTURE_BUFFER ,
- /* combined masks (by direction): */
+ LINE6_BITS_CAPTURE_STREAM =
#ifdef CONFIG_LINE6_USB_IMPULSE_RESPONSE
- MASK_PLAYBACK =
- MASK_PCM_ALSA_PLAYBACK | MASK_PCM_MONITOR_PLAYBACK |
- MASK_PCM_IMPULSE_PLAYBACK,
- MASK_CAPTURE =
- MASK_PCM_ALSA_CAPTURE | MASK_PCM_MONITOR_CAPTURE |
- MASK_PCM_IMPULSE_CAPTURE
-#else
- MASK_PLAYBACK = MASK_PCM_ALSA_PLAYBACK | MASK_PCM_MONITOR_PLAYBACK,
- MASK_CAPTURE = MASK_PCM_ALSA_CAPTURE | MASK_PCM_MONITOR_CAPTURE
+ LINE6_BIT_PCM_IMPULSE_CAPTURE_STREAM |
#endif
+ LINE6_BIT_PCM_ALSA_CAPTURE_STREAM |
+ LINE6_BIT_PCM_MONITOR_CAPTURE_STREAM,
+
+ LINE6_BITS_STREAM =
+ LINE6_BITS_PLAYBACK_STREAM |
+ LINE6_BITS_CAPTURE_STREAM
};
struct line6_pcm_properties {
@@ -290,7 +364,7 @@ struct snd_line6_pcm {
#endif
/**
- Several status bits (see BIT_*).
+ Several status bits (see LINE6_BIT_*).
*/
unsigned long flags;
@@ -302,16 +376,7 @@ extern int line6_init_pcm(struct usb_line6 *line6,
extern int snd_line6_trigger(struct snd_pcm_substream *substream, int cmd);
extern int snd_line6_prepare(struct snd_pcm_substream *substream);
extern void line6_pcm_disconnect(struct snd_line6_pcm *line6pcm);
-extern int line6_pcm_start(struct snd_line6_pcm *line6pcm, int channels);
-extern int line6_pcm_stop(struct snd_line6_pcm *line6pcm, int channels);
-
-#define PRINT_FRAME_DIFF(op) { \
- static int diff_prev = 1000; \
- int diff = line6pcm->last_frame_out - line6pcm->last_frame_in; \
- if ((diff != diff_prev) && (abs(diff) < 100)) { \
- printk(KERN_INFO "%s frame diff = %d\n", op, diff); \
- diff_prev = diff; \
- } \
-}
+extern int line6_pcm_acquire(struct snd_line6_pcm *line6pcm, int channels);
+extern int line6_pcm_release(struct snd_line6_pcm *line6pcm, int channels);
#endif
diff --git a/drivers/staging/line6/playback.c b/drivers/staging/line6/playback.c
index 4152db2328b7..a0ab9d0493fa 100644
--- a/drivers/staging/line6/playback.c
+++ b/drivers/staging/line6/playback.c
@@ -166,7 +166,7 @@ static int submit_audio_out_urb(struct snd_line6_pcm *line6pcm)
struct usb_iso_packet_descriptor *fout =
&urb_out->iso_frame_desc[i];
- if (line6pcm->flags & MASK_CAPTURE)
+ if (line6pcm->flags & LINE6_BITS_CAPTURE_STREAM)
fsize = line6pcm->prev_fsize;
if (fsize == 0) {
@@ -196,8 +196,8 @@ static int submit_audio_out_urb(struct snd_line6_pcm *line6pcm)
urb_out->transfer_buffer_length = urb_size;
urb_out->context = line6pcm;
- if (test_bit(BIT_PCM_ALSA_PLAYBACK, &line6pcm->flags) &&
- !test_bit(BIT_PAUSE_PLAYBACK, &line6pcm->flags)) {
+ if (test_bit(LINE6_INDEX_PCM_ALSA_PLAYBACK_STREAM, &line6pcm->flags) &&
+ !test_bit(LINE6_INDEX_PAUSE_PLAYBACK, &line6pcm->flags)) {
struct snd_pcm_runtime *runtime =
get_substream(line6pcm, SNDRV_PCM_STREAM_PLAYBACK)->runtime;
@@ -238,10 +238,10 @@ static int submit_audio_out_urb(struct snd_line6_pcm *line6pcm)
if (line6pcm->prev_fbuf != NULL) {
#ifdef CONFIG_LINE6_USB_IMPULSE_RESPONSE
- if (line6pcm->flags & MASK_PCM_IMPULSE) {
+ if (line6pcm->flags & LINE6_BITS_PCM_IMPULSE) {
create_impulse_test_signal(line6pcm, urb_out,
bytes_per_frame);
- if (line6pcm->flags & MASK_PCM_ALSA_CAPTURE) {
+ if (line6pcm->flags & LINE6_BIT_PCM_ALSA_CAPTURE_STREAM) {
line6_capture_copy(line6pcm,
urb_out->transfer_buffer,
urb_out->
@@ -254,8 +254,8 @@ static int submit_audio_out_urb(struct snd_line6_pcm *line6pcm)
if (!
(line6pcm->line6->
properties->capabilities & LINE6_BIT_HWMON)
-&& (line6pcm->flags & MASK_PLAYBACK)
-&& (line6pcm->flags & MASK_CAPTURE))
+ && (line6pcm->flags & LINE6_BITS_PLAYBACK_STREAM)
+ && (line6pcm->flags & LINE6_BITS_CAPTURE_STREAM))
add_monitor_signal(urb_out, line6pcm->prev_fbuf,
line6pcm->volume_monitor,
bytes_per_frame);
@@ -321,7 +321,7 @@ void line6_unlink_audio_out_urbs(struct snd_line6_pcm *line6pcm)
/*
Wait until unlinking of all currently active playback URBs has been finished.
*/
-static void wait_clear_audio_out_urbs(struct snd_line6_pcm *line6pcm)
+void line6_wait_clear_audio_out_urbs(struct snd_line6_pcm *line6pcm)
{
int timeout = HZ;
unsigned int i;
@@ -348,26 +348,7 @@ static void wait_clear_audio_out_urbs(struct snd_line6_pcm *line6pcm)
void line6_unlink_wait_clear_audio_out_urbs(struct snd_line6_pcm *line6pcm)
{
line6_unlink_audio_out_urbs(line6pcm);
- wait_clear_audio_out_urbs(line6pcm);
-}
-
-int line6_alloc_playback_buffer(struct snd_line6_pcm *line6pcm)
-{
- /* We may be invoked multiple times in a row so allocate once only */
- if (line6pcm->buffer_out)
- return 0;
-
- line6pcm->buffer_out =
- kmalloc(LINE6_ISO_BUFFERS * LINE6_ISO_PACKETS *
- line6pcm->max_packet_size, GFP_KERNEL);
-
- if (!line6pcm->buffer_out) {
- dev_err(line6pcm->line6->ifcdev,
- "cannot malloc playback buffer\n");
- return -ENOMEM;
- }
-
- return 0;
+ line6_wait_clear_audio_out_urbs(line6pcm);
}
void line6_free_playback_buffer(struct snd_line6_pcm *line6pcm)
@@ -407,7 +388,7 @@ static void audio_out_callback(struct urb *urb)
spin_lock_irqsave(&line6pcm->lock_audio_out, flags);
- if (test_bit(BIT_PCM_ALSA_PLAYBACK, &line6pcm->flags)) {
+ if (test_bit(LINE6_INDEX_PCM_ALSA_PLAYBACK_STREAM, &line6pcm->flags)) {
struct snd_pcm_runtime *runtime = substream->runtime;
line6pcm->pos_out_done +=
length / line6pcm->properties->bytes_per_frame;
@@ -432,7 +413,7 @@ static void audio_out_callback(struct urb *urb)
if (!shutdown) {
submit_audio_out_urb(line6pcm);
- if (test_bit(BIT_PCM_ALSA_PLAYBACK, &line6pcm->flags)) {
+ if (test_bit(LINE6_INDEX_PCM_ALSA_PLAYBACK_STREAM, &line6pcm->flags)) {
line6pcm->bytes_out += length;
if (line6pcm->bytes_out >= line6pcm->period_out) {
line6pcm->bytes_out %= line6pcm->period_out;
@@ -484,17 +465,17 @@ static int snd_line6_playback_hw_params(struct snd_pcm_substream *substream,
}
/* -- [FD] end */
- if ((line6pcm->flags & MASK_PLAYBACK) == 0) {
- ret = line6_alloc_playback_buffer(line6pcm);
+ ret = line6_pcm_acquire(line6pcm, LINE6_BIT_PCM_ALSA_PLAYBACK_BUFFER);
- if (ret < 0)
- return ret;
- }
+ if (ret < 0)
+ return ret;
ret = snd_pcm_lib_malloc_pages(substream,
params_buffer_bytes(hw_params));
- if (ret < 0)
+ if (ret < 0) {
+ line6_pcm_release(line6pcm, LINE6_BIT_PCM_ALSA_PLAYBACK_BUFFER);
return ret;
+ }
line6pcm->period_out = params_period_bytes(hw_params);
return 0;
@@ -504,12 +485,7 @@ static int snd_line6_playback_hw_params(struct snd_pcm_substream *substream,
static int snd_line6_playback_hw_free(struct snd_pcm_substream *substream)
{
struct snd_line6_pcm *line6pcm = snd_pcm_substream_chip(substream);
-
- if ((line6pcm->flags & MASK_PLAYBACK) == 0) {
- line6_unlink_wait_clear_audio_out_urbs(line6pcm);
- line6_free_playback_buffer(line6pcm);
- }
-
+ line6_pcm_release(line6pcm, LINE6_BIT_PCM_ALSA_PLAYBACK_BUFFER);
return snd_pcm_lib_free_pages(substream);
}
@@ -523,7 +499,7 @@ int snd_line6_playback_trigger(struct snd_line6_pcm *line6pcm, int cmd)
#ifdef CONFIG_PM
case SNDRV_PCM_TRIGGER_RESUME:
#endif
- err = line6_pcm_start(line6pcm, MASK_PCM_ALSA_PLAYBACK);
+ err = line6_pcm_acquire(line6pcm, LINE6_BIT_PCM_ALSA_PLAYBACK_STREAM);
if (err < 0)
return err;
@@ -534,7 +510,7 @@ int snd_line6_playback_trigger(struct snd_line6_pcm *line6pcm, int cmd)
#ifdef CONFIG_PM
case SNDRV_PCM_TRIGGER_SUSPEND:
#endif
- err = line6_pcm_stop(line6pcm, MASK_PCM_ALSA_PLAYBACK);
+ err = line6_pcm_release(line6pcm, LINE6_BIT_PCM_ALSA_PLAYBACK_STREAM);
if (err < 0)
return err;
@@ -542,11 +518,11 @@ int snd_line6_playback_trigger(struct snd_line6_pcm *line6pcm, int cmd)
break;
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
- set_bit(BIT_PAUSE_PLAYBACK, &line6pcm->flags);
+ set_bit(LINE6_INDEX_PAUSE_PLAYBACK, &line6pcm->flags);
break;
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
- clear_bit(BIT_PAUSE_PLAYBACK, &line6pcm->flags);
+ clear_bit(LINE6_INDEX_PAUSE_PLAYBACK, &line6pcm->flags);
break;
default:
diff --git a/drivers/staging/line6/playback.h b/drivers/staging/line6/playback.h
index 02487ff24538..743bd6f74c57 100644
--- a/drivers/staging/line6/playback.h
+++ b/drivers/staging/line6/playback.h
@@ -29,13 +29,13 @@
extern struct snd_pcm_ops snd_line6_playback_ops;
-extern int line6_alloc_playback_buffer(struct snd_line6_pcm *line6pcm);
extern int line6_create_audio_out_urbs(struct snd_line6_pcm *line6pcm);
extern void line6_free_playback_buffer(struct snd_line6_pcm *line6pcm);
extern int line6_submit_audio_out_all_urbs(struct snd_line6_pcm *line6pcm);
extern void line6_unlink_audio_out_urbs(struct snd_line6_pcm *line6pcm);
extern void line6_unlink_wait_clear_audio_out_urbs(struct snd_line6_pcm
*line6pcm);
+extern void line6_wait_clear_audio_out_urbs(struct snd_line6_pcm *line6pcm);
extern int snd_line6_playback_trigger(struct snd_line6_pcm *line6pcm, int cmd);
#endif
diff --git a/drivers/staging/line6/toneport.c b/drivers/staging/line6/toneport.c
index f31057830dbc..b754f69a29c4 100644
--- a/drivers/staging/line6/toneport.c
+++ b/drivers/staging/line6/toneport.c
@@ -207,9 +207,9 @@ static int snd_toneport_monitor_put(struct snd_kcontrol *kcontrol,
line6pcm->volume_monitor = ucontrol->value.integer.value[0];
if (line6pcm->volume_monitor > 0)
- line6_pcm_start(line6pcm, MASK_PCM_MONITOR);
+ line6_pcm_acquire(line6pcm, LINE6_BITS_PCM_MONITOR);
else
- line6_pcm_stop(line6pcm, MASK_PCM_MONITOR);
+ line6_pcm_release(line6pcm, LINE6_BITS_PCM_MONITOR);
return 1;
}
@@ -264,7 +264,7 @@ static void toneport_start_pcm(unsigned long arg)
{
struct usb_line6_toneport *toneport = (struct usb_line6_toneport *)arg;
struct usb_line6 *line6 = &toneport->line6;
- line6_pcm_start(line6->line6pcm, MASK_PCM_MONITOR);
+ line6_pcm_acquire(line6->line6pcm, LINE6_BITS_PCM_MONITOR);
}
/* control definition */
@@ -320,7 +320,9 @@ static void toneport_setup(struct usb_line6_toneport *toneport)
/* initialize source select: */
switch (usbdev->descriptor.idProduct) {
case LINE6_DEVID_TONEPORT_UX1:
+ case LINE6_DEVID_TONEPORT_UX2:
case LINE6_DEVID_PODSTUDIO_UX1:
+ case LINE6_DEVID_PODSTUDIO_UX2:
toneport_send_cmd(usbdev,
toneport_source_info[toneport->source].code,
0x0000);
@@ -363,7 +365,9 @@ static int toneport_try_init(struct usb_interface *interface,
/* register source select control: */
switch (usbdev->descriptor.idProduct) {
case LINE6_DEVID_TONEPORT_UX1:
+ case LINE6_DEVID_TONEPORT_UX2:
case LINE6_DEVID_PODSTUDIO_UX1:
+ case LINE6_DEVID_PODSTUDIO_UX2:
err =
snd_ctl_add(line6->card,
snd_ctl_new1(&toneport_control_source,
@@ -442,7 +446,7 @@ void line6_toneport_disconnect(struct usb_interface *interface)
struct snd_line6_pcm *line6pcm = toneport->line6.line6pcm;
if (line6pcm != NULL) {
- line6_pcm_stop(line6pcm, MASK_PCM_MONITOR);
+ line6_pcm_release(line6pcm, LINE6_BITS_PCM_MONITOR);
line6_pcm_disconnect(line6pcm);
}
}
diff --git a/drivers/staging/line6/usbdefs.h b/drivers/staging/line6/usbdefs.h
index aff9e5caea46..353d59d77b04 100644
--- a/drivers/staging/line6/usbdefs.h
+++ b/drivers/staging/line6/usbdefs.h
@@ -39,31 +39,29 @@
#define LINE6_DEVID_TONEPORT_UX2 0x4142
#define LINE6_DEVID_VARIAX 0x534d
-enum {
- LINE6_ID_BASSPODXT,
- LINE6_ID_BASSPODXTLIVE,
- LINE6_ID_BASSPODXTPRO,
- LINE6_ID_GUITARPORT,
- LINE6_ID_POCKETPOD,
- LINE6_ID_PODHD300,
- LINE6_ID_PODHD500,
- LINE6_ID_PODSTUDIO_GX,
- LINE6_ID_PODSTUDIO_UX1,
- LINE6_ID_PODSTUDIO_UX2,
- LINE6_ID_PODX3,
- LINE6_ID_PODX3LIVE,
- LINE6_ID_PODXT,
- LINE6_ID_PODXTLIVE,
- LINE6_ID_PODXTPRO,
- LINE6_ID_TONEPORT_GX,
- LINE6_ID_TONEPORT_UX1,
- LINE6_ID_TONEPORT_UX2,
- LINE6_ID_VARIAX
-};
-
-#define LINE6_BIT(x) LINE6_BIT_ ## x = 1 << LINE6_ID_ ## x
+#define LINE6_BIT(x) LINE6_BIT_ ## x = 1 << LINE6_INDEX_ ## x
enum {
+ LINE6_INDEX_BASSPODXT,
+ LINE6_INDEX_BASSPODXTLIVE,
+ LINE6_INDEX_BASSPODXTPRO,
+ LINE6_INDEX_GUITARPORT,
+ LINE6_INDEX_POCKETPOD,
+ LINE6_INDEX_PODHD300,
+ LINE6_INDEX_PODHD500,
+ LINE6_INDEX_PODSTUDIO_GX,
+ LINE6_INDEX_PODSTUDIO_UX1,
+ LINE6_INDEX_PODSTUDIO_UX2,
+ LINE6_INDEX_PODX3,
+ LINE6_INDEX_PODX3LIVE,
+ LINE6_INDEX_PODXT,
+ LINE6_INDEX_PODXTLIVE,
+ LINE6_INDEX_PODXTPRO,
+ LINE6_INDEX_TONEPORT_GX,
+ LINE6_INDEX_TONEPORT_UX1,
+ LINE6_INDEX_TONEPORT_UX2,
+ LINE6_INDEX_VARIAX,
+
LINE6_BIT(BASSPODXT),
LINE6_BIT(BASSPODXTLIVE),
LINE6_BIT(BASSPODXTPRO),
diff --git a/drivers/staging/media/lirc/lirc_sasem.c b/drivers/staging/media/lirc/lirc_sasem.c
index 7855baa18e75..74421043b954 100644
--- a/drivers/staging/media/lirc/lirc_sasem.c
+++ b/drivers/staging/media/lirc/lirc_sasem.c
@@ -90,11 +90,11 @@ static void __exit sasem_exit(void);
struct sasem_context {
struct usb_device *dev;
- int vfd_isopen; /* VFD port has been opened */
- unsigned int vfd_contrast; /* VFD contrast */
- int ir_isopen; /* IR port has been opened */
- int dev_present; /* USB device presence */
- struct mutex ctx_lock; /* to lock this object */
+ int vfd_isopen; /* VFD port has been opened */
+ unsigned int vfd_contrast; /* VFD contrast */
+ int ir_isopen; /* IR port has been opened */
+ int dev_present; /* USB device presence */
+ struct mutex ctx_lock; /* to lock this object */
wait_queue_head_t remove_ok; /* For unexpected USB disconnects */
struct lirc_driver *driver;
@@ -106,10 +106,11 @@ struct sasem_context {
unsigned char usb_tx_buf[8];
struct tx_t {
- unsigned char data_buf[SASEM_DATA_BUF_SZ]; /* user data buffer */
+ unsigned char data_buf[SASEM_DATA_BUF_SZ]; /* user data
+ * buffer */
struct completion finished; /* wait for write to finish */
- atomic_t busy; /* write in progress */
- int status; /* status of tx completion */
+ atomic_t busy; /* write in progress */
+ int status; /* status of tx completion */
} tx;
/* for dealing with repeat codes (wish there was a toggle bit!) */
diff --git a/drivers/staging/mei/TODO b/drivers/staging/mei/TODO
index 7d9a13b0f2dd..fc266018355e 100644
--- a/drivers/staging/mei/TODO
+++ b/drivers/staging/mei/TODO
@@ -3,5 +3,8 @@ TODO:
Upon Unstaging:
- move mei.h to include/linux/mei.h
- Documentation/ioctl/ioctl-number.txt
+ - move mei.txt under Documentation/mei/
+ - move mei-amt-version.c under Documentation/mei
+ - add hostprogs-y for mei-amt-version.c
- drop mei_version.h
- Updated MAINTAINERS
diff --git a/drivers/staging/mei/hw.h b/drivers/staging/mei/hw.h
index 9b9008cb6938..24c4c962819e 100644
--- a/drivers/staging/mei/hw.h
+++ b/drivers/staging/mei/hw.h
@@ -1,7 +1,7 @@
/*
*
* Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2011, Intel Corporation.
+ * Copyright (c) 2003-2012, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -141,6 +141,11 @@ access to ME_CBD */
#define HBM_MAJOR_VERSION 1
#define HBM_TIMEOUT 1 /* 1 second */
+/* Host bus message command opcode */
+#define MEI_HBM_CMD_OP_MSK 0x7f
+/* Host bus message command RESPONSE */
+#define MEI_HBM_CMD_RES_MSK 0x80
+
/*
* MEI Bus Message Command IDs
*/
@@ -164,7 +169,7 @@ access to ME_CBD */
#define CLIENT_DISCONNECT_REQ_CMD 0x07
#define CLIENT_DISCONNECT_RES_CMD 0x87
-#define MEI_FLOW_CONTROL_CMD 0x08
+#define MEI_FLOW_CONTROL_CMD 0x08
/*
* MEI Stop Reason
@@ -213,15 +218,9 @@ struct mei_msg_hdr {
} __packed;
-struct hbm_cmd {
- u8 cmd:7;
- u8 is_response:1;
-} __packed;
-
-
struct mei_bus_message {
- struct hbm_cmd cmd;
- u8 command_specific_data[];
+ u8 hbm_cmd;
+ u8 data[0];
} __packed;
struct hbm_version {
@@ -230,41 +229,41 @@ struct hbm_version {
} __packed;
struct hbm_host_version_request {
- struct hbm_cmd cmd;
+ u8 hbm_cmd;
u8 reserved;
struct hbm_version host_version;
} __packed;
struct hbm_host_version_response {
- struct hbm_cmd cmd;
- int host_version_supported;
+ u8 hbm_cmd;
+ u8 host_version_supported;
struct hbm_version me_max_version;
} __packed;
struct hbm_host_stop_request {
- struct hbm_cmd cmd;
+ u8 hbm_cmd;
u8 reason;
u8 reserved[2];
} __packed;
struct hbm_host_stop_response {
- struct hbm_cmd cmd;
+ u8 hbm_cmd;
u8 reserved[3];
} __packed;
struct hbm_me_stop_request {
- struct hbm_cmd cmd;
+ u8 hbm_cmd;
u8 reason;
u8 reserved[2];
} __packed;
struct hbm_host_enum_request {
- struct hbm_cmd cmd;
+ u8 hbm_cmd;
u8 reserved[3];
} __packed;
struct hbm_host_enum_response {
- struct hbm_cmd cmd;
+ u8 hbm_cmd;
u8 reserved[3];
u8 valid_addresses[32];
} __packed;
@@ -279,14 +278,14 @@ struct mei_client_properties {
} __packed;
struct hbm_props_request {
- struct hbm_cmd cmd;
+ u8 hbm_cmd;
u8 address;
u8 reserved[2];
} __packed;
struct hbm_props_response {
- struct hbm_cmd cmd;
+ u8 hbm_cmd;
u8 address;
u8 status;
u8 reserved[1];
@@ -294,21 +293,21 @@ struct hbm_props_response {
} __packed;
struct hbm_client_connect_request {
- struct hbm_cmd cmd;
+ u8 hbm_cmd;
u8 me_addr;
u8 host_addr;
u8 reserved;
} __packed;
struct hbm_client_connect_response {
- struct hbm_cmd cmd;
+ u8 hbm_cmd;
u8 me_addr;
u8 host_addr;
u8 status;
} __packed;
struct hbm_client_disconnect_request {
- struct hbm_cmd cmd;
+ u8 hbm_cmd;
u8 me_addr;
u8 host_addr;
u8 reserved[1];
@@ -317,7 +316,7 @@ struct hbm_client_disconnect_request {
#define MEI_FC_MESSAGE_RESERVED_LENGTH 5
struct hbm_flow_control {
- struct hbm_cmd cmd;
+ u8 hbm_cmd;
u8 me_addr;
u8 host_addr;
u8 reserved[MEI_FC_MESSAGE_RESERVED_LENGTH];
diff --git a/drivers/staging/mei/init.c b/drivers/staging/mei/init.c
index 4ac3696883cb..eab711fb5fc4 100644
--- a/drivers/staging/mei/init.c
+++ b/drivers/staging/mei/init.c
@@ -1,7 +1,7 @@
/*
*
* Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2011, Intel Corporation.
+ * Copyright (c) 2003-2012, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -362,11 +362,11 @@ void mei_host_start_message(struct mei_device *dev)
host_start_req =
(struct hbm_host_version_request *) &dev->wr_msg_buf[1];
memset(host_start_req, 0, sizeof(struct hbm_host_version_request));
- host_start_req->cmd.cmd = HOST_START_REQ_CMD;
+ host_start_req->hbm_cmd = HOST_START_REQ_CMD;
host_start_req->host_version.major_version = HBM_MAJOR_VERSION;
host_start_req->host_version.minor_version = HBM_MINOR_VERSION;
dev->recvd_msg = false;
- if (!mei_write_message(dev, mei_hdr, (unsigned char *)host_start_req,
+ if (mei_write_message(dev, mei_hdr, (unsigned char *)host_start_req,
mei_hdr->length)) {
dev_dbg(&dev->pdev->dev, "write send version message to FW fail.\n");
dev->mei_state = MEI_RESETING;
@@ -398,8 +398,8 @@ void mei_host_enum_clients_message(struct mei_device *dev)
host_enum_req = (struct hbm_host_enum_request *) &dev->wr_msg_buf[1];
memset(host_enum_req, 0, sizeof(struct hbm_host_enum_request));
- host_enum_req->cmd.cmd = HOST_ENUM_REQ_CMD;
- if (!mei_write_message(dev, mei_hdr, (unsigned char *)host_enum_req,
+ host_enum_req->hbm_cmd = HOST_ENUM_REQ_CMD;
+ if (mei_write_message(dev, mei_hdr, (unsigned char *)host_enum_req,
mei_hdr->length)) {
dev->mei_state = MEI_RESETING;
dev_dbg(&dev->pdev->dev, "write send enumeration request message to FW fail.\n");
@@ -407,7 +407,7 @@ void mei_host_enum_clients_message(struct mei_device *dev)
}
dev->init_clients_state = MEI_ENUM_CLIENTS_MESSAGE;
dev->init_clients_timer = INIT_CLIENTS_TIMEOUT;
- return ;
+ return;
}
@@ -482,10 +482,10 @@ int mei_host_client_properties(struct mei_device *dev)
memset(host_cli_req, 0, sizeof(struct hbm_props_request));
- host_cli_req->cmd.cmd = HOST_CLIENT_PROPERTIES_REQ_CMD;
+ host_cli_req->hbm_cmd = HOST_CLIENT_PROPERTIES_REQ_CMD;
host_cli_req->address = b;
- if (!mei_write_message(dev, mei_header,
+ if (mei_write_message(dev, mei_header,
(unsigned char *)host_cli_req,
mei_header->length)) {
dev->mei_state = MEI_RESETING;
@@ -608,7 +608,7 @@ void mei_host_init_iamthif(struct mei_device *dev)
dev->iamthif_msg_buf = msg_buf;
- if (!mei_connect(dev, &dev->iamthif_cl)) {
+ if (mei_connect(dev, &dev->iamthif_cl)) {
dev_dbg(&dev->pdev->dev, "Failed to connect to AMTHI client\n");
dev->iamthif_cl.state = MEI_FILE_DISCONNECTED;
dev->iamthif_cl.host_client_id = 0;
@@ -670,14 +670,12 @@ int mei_disconnect_host_client(struct mei_device *dev, struct mei_cl *cl)
if (dev->mei_host_buffer_is_empty) {
dev->mei_host_buffer_is_empty = false;
if (mei_disconnect(dev, cl)) {
- mdelay(10); /* Wait for hardware disconnection ready */
- list_add_tail(&cb->cb_list,
- &dev->ctrl_rd_list.mei_cb.cb_list);
- } else {
rets = -ENODEV;
dev_dbg(&dev->pdev->dev, "failed to call mei_disconnect.\n");
goto free;
}
+ mdelay(10); /* Wait for hardware disconnection ready */
+ list_add_tail(&cb->cb_list, &dev->ctrl_rd_list.mei_cb.cb_list);
} else {
dev_dbg(&dev->pdev->dev, "add disconnect cb to control write list\n");
list_add_tail(&cb->cb_list,
diff --git a/drivers/staging/mei/interface.c b/drivers/staging/mei/interface.c
index eb5df7fc2269..9a2cfafc52a6 100644
--- a/drivers/staging/mei/interface.c
+++ b/drivers/staging/mei/interface.c
@@ -1,7 +1,7 @@
/*
*
* Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2011, Intel Corporation.
+ * Copyright (c) 2003-2012, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -125,7 +125,7 @@ int mei_count_empty_write_slots(struct mei_device *dev)
* @write_buffer: message buffer will be written
* @write_length: message size will be written
*
- * returns 1 if success, 0 - otherwise.
+ * This function returns -EIO if write has failed
*/
int mei_write_message(struct mei_device *dev,
struct mei_msg_hdr *header,
@@ -157,7 +157,7 @@ int mei_write_message(struct mei_device *dev,
dw_to_write = ((write_length + 3) / 4);
if (dw_to_write > empty_slots)
- return 0;
+ return -EIO;
mei_reg_write(dev, H_CB_WW, *((u32 *) header));
@@ -177,9 +177,9 @@ int mei_write_message(struct mei_device *dev,
mei_hcsr_set(dev);
dev->me_hw_state = mei_mecsr_read(dev);
if ((dev->me_hw_state & ME_RDY_HRA) != ME_RDY_HRA)
- return 0;
+ return -EIO;
- return 1;
+ return 0;
}
/**
@@ -215,26 +215,17 @@ int mei_count_full_read_slots(struct mei_device *dev)
* @buffer: message buffer will be written
* @buffer_length: message size will be read
*/
-void mei_read_slots(struct mei_device *dev,
- unsigned char *buffer, unsigned long buffer_length)
+void mei_read_slots(struct mei_device *dev, unsigned char *buffer,
+ unsigned long buffer_length)
{
- u32 i = 0;
- unsigned char temp_buf[sizeof(u32)];
-
- while (buffer_length >= sizeof(u32)) {
- ((u32 *) buffer)[i] = mei_mecbrw_read(dev);
+ u32 *reg_buf = (u32 *)buffer;
- dev_dbg(&dev->pdev->dev,
- "buffer[%d]= %d\n",
- i, ((u32 *) buffer)[i]);
-
- i++;
- buffer_length -= sizeof(u32);
- }
+ for (; buffer_length >= sizeof(u32); buffer_length -= sizeof(u32))
+ *reg_buf++ = mei_mecbrw_read(dev);
if (buffer_length > 0) {
- *((u32 *) &temp_buf) = mei_mecbrw_read(dev);
- memcpy(&buffer[i * 4], temp_buf, buffer_length);
+ u32 reg = mei_mecbrw_read(dev);
+ memcpy(reg_buf, &reg, buffer_length);
}
dev->host_hw_state |= H_IG;
@@ -284,7 +275,7 @@ int mei_flow_ctrl_creds(struct mei_device *dev, struct mei_cl *cl)
* @returns
* 0 on success
* -ENOENT when me client is not found
- * -EINVAL wehn ctrl credits are <= 0
+ * -EINVAL when ctrl credits are <= 0
*/
int mei_flow_ctrl_reduce(struct mei_device *dev, struct mei_cl *cl)
{
@@ -317,7 +308,7 @@ int mei_flow_ctrl_reduce(struct mei_device *dev, struct mei_cl *cl)
* @dev: the device structure
* @cl: private data of the file object
*
- * returns 1 if success, 0 - otherwise.
+ * This function returns -EIO on write failure
*/
int mei_send_flow_control(struct mei_device *dev, struct mei_cl *cl)
{
@@ -335,18 +326,15 @@ int mei_send_flow_control(struct mei_device *dev, struct mei_cl *cl)
memset(mei_flow_control, 0, sizeof(*mei_flow_control));
mei_flow_control->host_addr = cl->host_client_id;
mei_flow_control->me_addr = cl->me_client_id;
- mei_flow_control->cmd.cmd = MEI_FLOW_CONTROL_CMD;
+ mei_flow_control->hbm_cmd = MEI_FLOW_CONTROL_CMD;
memset(mei_flow_control->reserved, 0,
sizeof(mei_flow_control->reserved));
dev_dbg(&dev->pdev->dev, "sending flow control host client = %d, ME client = %d\n",
- cl->host_client_id, cl->me_client_id);
- if (!mei_write_message(dev, mei_hdr,
- (unsigned char *) mei_flow_control,
- sizeof(struct hbm_flow_control)))
- return 0;
-
- return 1;
+ cl->host_client_id, cl->me_client_id);
+ return mei_write_message(dev, mei_hdr,
+ (unsigned char *) mei_flow_control,
+ sizeof(struct hbm_flow_control));
}
/**
@@ -380,7 +368,7 @@ int mei_other_client_is_connecting(struct mei_device *dev,
* @dev: the device structure
* @cl: private data of the file object
*
- * returns 1 if success, 0 - otherwise.
+ * This function returns -EIO on write failure
*/
int mei_disconnect(struct mei_device *dev, struct mei_cl *cl)
{
@@ -399,15 +387,12 @@ int mei_disconnect(struct mei_device *dev, struct mei_cl *cl)
memset(mei_cli_disconnect, 0, sizeof(*mei_cli_disconnect));
mei_cli_disconnect->host_addr = cl->host_client_id;
mei_cli_disconnect->me_addr = cl->me_client_id;
- mei_cli_disconnect->cmd.cmd = CLIENT_DISCONNECT_REQ_CMD;
+ mei_cli_disconnect->hbm_cmd = CLIENT_DISCONNECT_REQ_CMD;
mei_cli_disconnect->reserved[0] = 0;
- if (!mei_write_message(dev, mei_hdr,
+ return mei_write_message(dev, mei_hdr,
(unsigned char *) mei_cli_disconnect,
- sizeof(struct hbm_client_disconnect_request)))
- return 0;
-
- return 1;
+ sizeof(struct hbm_client_disconnect_request));
}
/**
@@ -416,7 +401,7 @@ int mei_disconnect(struct mei_device *dev, struct mei_cl *cl)
* @dev: the device structure
* @cl: private data of the file object
*
- * returns 1 if success, 0 - otherwise.
+ * This function returns -EIO on write failure
*/
int mei_connect(struct mei_device *dev, struct mei_cl *cl)
{
@@ -434,13 +419,10 @@ int mei_connect(struct mei_device *dev, struct mei_cl *cl)
(struct hbm_client_connect_request *) &dev->wr_msg_buf[1];
mei_cli_connect->host_addr = cl->host_client_id;
mei_cli_connect->me_addr = cl->me_client_id;
- mei_cli_connect->cmd.cmd = CLIENT_CONNECT_REQ_CMD;
+ mei_cli_connect->hbm_cmd = CLIENT_CONNECT_REQ_CMD;
mei_cli_connect->reserved = 0;
- if (!mei_write_message(dev, mei_hdr,
+ return mei_write_message(dev, mei_hdr,
(unsigned char *) mei_cli_connect,
- sizeof(struct hbm_client_connect_request)))
- return 0;
-
- return 1;
+ sizeof(struct hbm_client_connect_request));
}
diff --git a/drivers/staging/mei/interface.h b/drivers/staging/mei/interface.h
index aeae511419c7..fb90c6f8a759 100644
--- a/drivers/staging/mei/interface.h
+++ b/drivers/staging/mei/interface.h
@@ -1,7 +1,7 @@
/*
*
* Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2011, Intel Corporation.
+ * Copyright (c) 2003-2012, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -33,7 +33,8 @@
void mei_read_slots(struct mei_device *dev,
- unsigned char *buffer, unsigned long buffer_length);
+ unsigned char *buffer,
+ unsigned long buffer_length);
int mei_write_message(struct mei_device *dev,
struct mei_msg_hdr *header,
@@ -59,7 +60,7 @@ void mei_wd_set_start_timeout(struct mei_device *dev, u16 timeout);
*/
void mei_watchdog_register(struct mei_device *dev);
/*
- * mei_watchdog_unregister - Uegistering watchdog interface
+ * mei_watchdog_unregister - Unregistering watchdog interface
* @dev - mei device
*/
void mei_watchdog_unregister(struct mei_device *dev);
diff --git a/drivers/staging/mei/interrupt.c b/drivers/staging/mei/interrupt.c
index 3544fee34e48..2007d2447b1c 100644
--- a/drivers/staging/mei/interrupt.c
+++ b/drivers/staging/mei/interrupt.c
@@ -1,7 +1,7 @@
/*
*
* Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2011, Intel Corporation.
+ * Copyright (c) 2003-2012, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -123,8 +123,7 @@ static int mei_irq_thread_read_amthi_message(struct mei_io_list *complete_list,
BUG_ON(mei_hdr->me_addr != dev->iamthif_cl.me_client_id);
BUG_ON(dev->iamthif_state != MEI_IAMTHIF_READING);
- buffer = (unsigned char *) (dev->iamthif_msg_buf +
- dev->iamthif_msg_buf_index);
+ buffer = dev->iamthif_msg_buf + dev->iamthif_msg_buf_index;
BUG_ON(dev->iamthif_mtu < dev->iamthif_msg_buf_index + mei_hdr->length);
mei_read_slots(dev, buffer, mei_hdr->length);
@@ -206,9 +205,7 @@ static int mei_irq_thread_read_client_message(struct mei_io_list *complete_list,
cl = (struct mei_cl *)cb_pos->file_private;
if (cl && _mei_irq_thread_state_ok(cl, mei_hdr)) {
cl->reading_state = MEI_READING;
- buffer = (unsigned char *)
- (cb_pos->response_buffer.data +
- cb_pos->information);
+ buffer = cb_pos->response_buffer.data + cb_pos->information;
if (cb_pos->response_buffer.size <
mei_hdr->length + cb_pos->information) {
@@ -247,8 +244,7 @@ static int mei_irq_thread_read_client_message(struct mei_io_list *complete_list,
quit:
dev_dbg(&dev->pdev->dev, "message read\n");
if (!buffer) {
- mei_read_slots(dev, (unsigned char *) dev->rd_msg_buf,
- mei_hdr->length);
+ mei_read_slots(dev, dev->rd_msg_buf, mei_hdr->length);
dev_dbg(&dev->pdev->dev, "discarding message, header =%08x.\n",
*(u32 *) dev->rd_msg_buf);
}
@@ -267,26 +263,25 @@ quit:
static int _mei_irq_thread_iamthif_read(struct mei_device *dev, s32 *slots)
{
- if (((*slots) * sizeof(u32)) >= (sizeof(struct mei_msg_hdr)
+ if (((*slots) * sizeof(u32)) < (sizeof(struct mei_msg_hdr)
+ sizeof(struct hbm_flow_control))) {
- *slots -= (sizeof(struct mei_msg_hdr) +
- sizeof(struct hbm_flow_control) + 3) / 4;
- if (!mei_send_flow_control(dev, &dev->iamthif_cl)) {
- dev_dbg(&dev->pdev->dev, "iamthif flow control failed\n");
- } else {
- dev_dbg(&dev->pdev->dev, "iamthif flow control success\n");
- dev->iamthif_state = MEI_IAMTHIF_READING;
- dev->iamthif_flow_control_pending = false;
- dev->iamthif_msg_buf_index = 0;
- dev->iamthif_msg_buf_size = 0;
- dev->iamthif_stall_timer = IAMTHIF_STALL_TIMER;
- dev->mei_host_buffer_is_empty =
- mei_host_buffer_is_empty(dev);
- }
- return 0;
- } else {
return -EMSGSIZE;
}
+ *slots -= (sizeof(struct mei_msg_hdr) +
+ sizeof(struct hbm_flow_control) + 3) / 4;
+ if (mei_send_flow_control(dev, &dev->iamthif_cl)) {
+ dev_dbg(&dev->pdev->dev, "iamthif flow control failed\n");
+ return -EIO;
+ }
+
+ dev_dbg(&dev->pdev->dev, "iamthif flow control success\n");
+ dev->iamthif_state = MEI_IAMTHIF_READING;
+ dev->iamthif_flow_control_pending = false;
+ dev->iamthif_msg_buf_index = 0;
+ dev->iamthif_msg_buf_size = 0;
+ dev->iamthif_stall_timer = IAMTHIF_STALL_TIMER;
+ dev->mei_host_buffer_is_empty = mei_host_buffer_is_empty(dev);
+ return 0;
}
/**
@@ -310,7 +305,7 @@ static int _mei_irq_thread_close(struct mei_device *dev, s32 *slots,
*slots -= (sizeof(struct mei_msg_hdr) +
sizeof(struct hbm_client_disconnect_request) + 3) / 4;
- if (!mei_disconnect(dev, cl)) {
+ if (mei_disconnect(dev, cl)) {
cl->status = 0;
cb_pos->information = 0;
list_move_tail(&cb_pos->cb_list,
@@ -601,8 +596,7 @@ static void mei_client_disconnect_request(struct mei_device *dev,
&dev->ext_msg_buf[1];
disconnect_res->host_addr = cl_pos->host_client_id;
disconnect_res->me_addr = cl_pos->me_client_id;
- *(u8 *) (&disconnect_res->cmd) =
- CLIENT_DISCONNECT_RES_CMD;
+ disconnect_res->hbm_cmd = CLIENT_DISCONNECT_RES_CMD;
disconnect_res->status = 0;
dev->extra_write_index = 2;
break;
@@ -632,15 +626,13 @@ static void mei_irq_thread_read_bus_message(struct mei_device *dev,
struct hbm_host_stop_request *host_stop_req;
int res;
- unsigned char *buffer;
/* read the message to our buffer */
- buffer = (unsigned char *) dev->rd_msg_buf;
BUG_ON(mei_hdr->length >= sizeof(dev->rd_msg_buf));
- mei_read_slots(dev, buffer, mei_hdr->length);
- mei_msg = (struct mei_bus_message *) buffer;
+ mei_read_slots(dev, dev->rd_msg_buf, mei_hdr->length);
+ mei_msg = (struct mei_bus_message *)dev->rd_msg_buf;
- switch (*(u8 *) mei_msg) {
+ switch (mei_msg->hbm_cmd) {
case HOST_START_RES_CMD:
version_res = (struct hbm_host_version_response *) mei_msg;
if (version_res->host_version_supported) {
@@ -659,6 +651,7 @@ static void mei_irq_thread_read_bus_message(struct mei_device *dev,
} else {
dev->version = version_res->me_max_version;
/* send stop message */
+ mei_hdr = (struct mei_msg_hdr *)&dev->wr_msg_buf[0];
mei_hdr->host_addr = 0;
mei_hdr->me_addr = 0;
mei_hdr->length = sizeof(struct hbm_host_stop_request);
@@ -671,7 +664,7 @@ static void mei_irq_thread_read_bus_message(struct mei_device *dev,
memset(host_stop_req,
0,
sizeof(struct hbm_host_stop_request));
- host_stop_req->cmd.cmd = HOST_STOP_REQ_CMD;
+ host_stop_req->hbm_cmd = HOST_STOP_REQ_CMD;
host_stop_req->reason = DRIVER_STOP_REQUEST;
mei_write_message(dev, mei_hdr,
(unsigned char *) (host_stop_req),
@@ -725,7 +718,7 @@ static void mei_irq_thread_read_bus_message(struct mei_device *dev,
dev->me_client_index++;
dev->me_client_presentation_num++;
- /** Send Client Propeties request **/
+ /** Send Client Properties request **/
res = mei_host_client_properties(dev);
if (res < 0) {
dev_dbg(&dev->pdev->dev, "mei_host_client_properties() failed");
@@ -811,7 +804,7 @@ static void mei_irq_thread_read_bus_message(struct mei_device *dev,
host_stop_req =
(struct hbm_host_stop_request *) &dev->ext_msg_buf[1];
memset(host_stop_req, 0, sizeof(struct hbm_host_stop_request));
- host_stop_req->cmd.cmd = HOST_STOP_REQ_CMD;
+ host_stop_req->hbm_cmd = HOST_STOP_REQ_CMD;
host_stop_req->reason = DRIVER_STOP_REQUEST;
host_stop_req->reserved[0] = 0;
host_stop_req->reserved[1] = 0;
@@ -844,24 +837,21 @@ static int _mei_irq_thread_read(struct mei_device *dev, s32 *slots,
{
if ((*slots * sizeof(u32)) >= (sizeof(struct mei_msg_hdr) +
sizeof(struct hbm_flow_control))) {
- *slots -= (sizeof(struct mei_msg_hdr) +
- sizeof(struct hbm_flow_control) + 3) / 4;
- if (!mei_send_flow_control(dev, cl)) {
- cl->status = -ENODEV;
- cb_pos->information = 0;
- list_move_tail(&cb_pos->cb_list,
- &cmpl_list->mei_cb.cb_list);
- return -ENODEV;
- } else {
- list_move_tail(&cb_pos->cb_list,
- &dev->read_list.mei_cb.cb_list);
- }
- } else {
/* return the cancel routine */
list_del(&cb_pos->cb_list);
return -EBADMSG;
}
+ *slots -= (sizeof(struct mei_msg_hdr) +
+ sizeof(struct hbm_flow_control) + 3) / 4;
+ if (mei_send_flow_control(dev, cl)) {
+ cl->status = -ENODEV;
+ cb_pos->information = 0;
+ list_move_tail(&cb_pos->cb_list, &cmpl_list->mei_cb.cb_list);
+ return -ENODEV;
+ }
+ list_move_tail(&cb_pos->cb_list, &dev->read_list.mei_cb.cb_list);
+
return 0;
}
@@ -887,7 +877,7 @@ static int _mei_irq_thread_ioctl(struct mei_device *dev, s32 *slots,
cl->state = MEI_FILE_CONNECTING;
*slots -= (sizeof(struct mei_msg_hdr) +
sizeof(struct hbm_client_connect_request) + 3) / 4;
- if (!mei_connect(dev, cl)) {
+ if (mei_connect(dev, cl)) {
cl->status = -ENODEV;
cb_pos->information = 0;
list_del(&cb_pos->cb_list);
@@ -944,7 +934,7 @@ static int _mei_irq_thread_cmpl(struct mei_device *dev, s32 *slots,
mei_hdr->length);
*slots -= (sizeof(struct mei_msg_hdr) +
mei_hdr->length + 3) / 4;
- if (!mei_write_message(dev, mei_hdr,
+ if (mei_write_message(dev, mei_hdr,
(unsigned char *)
(cb_pos->request_buffer.data +
cb_pos->information),
@@ -973,7 +963,7 @@ static int _mei_irq_thread_cmpl(struct mei_device *dev, s32 *slots,
(*slots) -= (sizeof(struct mei_msg_hdr) +
mei_hdr->length + 3) / 4;
- if (!mei_write_message(dev, mei_hdr,
+ if (mei_write_message(dev, mei_hdr,
(unsigned char *)
(cb_pos->request_buffer.data +
cb_pos->information),
@@ -1034,7 +1024,7 @@ static int _mei_irq_thread_cmpl_iamthif(struct mei_device *dev, s32 *slots,
*slots -= (sizeof(struct mei_msg_hdr) +
mei_hdr->length + 3) / 4;
- if (!mei_write_message(dev, mei_hdr,
+ if (mei_write_message(dev, mei_hdr,
(dev->iamthif_msg_buf +
dev->iamthif_msg_buf_index),
mei_hdr->length)) {
@@ -1069,7 +1059,7 @@ static int _mei_irq_thread_cmpl_iamthif(struct mei_device *dev, s32 *slots,
*slots -= (sizeof(struct mei_msg_hdr) +
mei_hdr->length + 3) / 4;
- if (!mei_write_message(dev, mei_hdr,
+ if (mei_write_message(dev, mei_hdr,
(dev->iamthif_msg_buf +
dev->iamthif_msg_buf_index),
mei_hdr->length)) {
@@ -1286,7 +1276,7 @@ static int mei_irq_thread_write_handler(struct mei_io_list *cmpl_list,
}
}
if (dev->stop)
- return ~ENODEV;
+ return -ENODEV;
/* complete control write list CB */
dev_dbg(&dev->pdev->dev, "complete control write list cb.\n");
@@ -1423,7 +1413,7 @@ void mei_timer(struct work_struct *work)
if (dev->iamthif_stall_timer) {
if (--dev->iamthif_stall_timer == 0) {
- dev_dbg(&dev->pdev->dev, "reseting because of hang to amthi.\n");
+ dev_dbg(&dev->pdev->dev, "resetting because of hang to amthi.\n");
mei_reset(dev, 1);
dev->iamthif_msg_buf_size = 0;
dev->iamthif_msg_buf_index = 0;
@@ -1513,7 +1503,7 @@ irqreturn_t mei_interrupt_thread_handler(int irq, void *dev_id)
dev->host_hw_state = mei_hcsr_read(dev);
/* Ack the interrupt here
- * In case of MSI we don't go throuhg the quick handler */
+ * In case of MSI we don't go through the quick handler */
if (pci_dev_msi_enabled(dev->pdev))
mei_reg_write(dev, H_CSR, dev->host_hw_state);
@@ -1549,7 +1539,7 @@ irqreturn_t mei_interrupt_thread_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
}
- /* check slots avalable for reading */
+ /* check slots available for reading */
slots = mei_count_full_read_slots(dev);
dev_dbg(&dev->pdev->dev, "slots =%08x extra_write_index =%08x.\n",
slots, dev->extra_write_index);
diff --git a/drivers/staging/mei/iorw.c b/drivers/staging/mei/iorw.c
index 0752ead4269a..0a80dc4e62f3 100644
--- a/drivers/staging/mei/iorw.c
+++ b/drivers/staging/mei/iorw.c
@@ -1,7 +1,7 @@
/*
*
* Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2011, Intel Corporation.
+ * Copyright (c) 2003-2012, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -37,7 +37,6 @@
#include "hw.h"
#include "mei.h"
#include "interface.h"
-#include "mei_version.h"
@@ -109,8 +108,8 @@ int mei_ioctl_connect_client(struct file *file,
dev_dbg(&dev->pdev->dev, "FW Client - Max Msg Len = %d\n",
dev->me_clients[i].props.max_msg_length);
- /* if we're connecting to amthi client so we will use the exist
- * connection
+ /* if we're connecting to amthi client then we will use the
+ * existing connection
*/
if (uuid_le_cmp(data->in_client_uuid, mei_amthi_guid) == 0) {
dev_dbg(&dev->pdev->dev, "FW Client is amthi\n");
@@ -162,7 +161,7 @@ int mei_ioctl_connect_client(struct file *file,
&& !mei_other_client_is_connecting(dev, cl)) {
dev_dbg(&dev->pdev->dev, "Sending Connect Message\n");
dev->mei_host_buffer_is_empty = false;
- if (!mei_connect(dev, cl)) {
+ if (mei_connect(dev, cl)) {
dev_dbg(&dev->pdev->dev, "Sending connect message - failed\n");
rets = -ENODEV;
goto end;
@@ -434,13 +433,11 @@ int mei_start_read(struct mei_device *dev, struct mei_cl *cl)
cl->read_cb = cb;
if (dev->mei_host_buffer_is_empty) {
dev->mei_host_buffer_is_empty = false;
- if (!mei_send_flow_control(dev, cl)) {
+ if (mei_send_flow_control(dev, cl)) {
rets = -ENODEV;
goto unlock;
- } else {
- list_add_tail(&cb->cb_list,
- &dev->read_list.mei_cb.cb_list);
}
+ list_add_tail(&cb->cb_list, &dev->read_list.mei_cb.cb_list);
} else {
list_add_tail(&cb->cb_list, &dev->ctrl_wr_list.mei_cb.cb_list);
}
@@ -500,7 +497,7 @@ int amthi_write(struct mei_device *dev, struct mei_cl_cb *cb)
mei_hdr.me_addr = dev->iamthif_cl.me_client_id;
mei_hdr.reserved = 0;
dev->iamthif_msg_buf_index += mei_hdr.length;
- if (!mei_write_message(dev, &mei_hdr,
+ if (mei_write_message(dev, &mei_hdr,
(unsigned char *)(dev->iamthif_msg_buf),
mei_hdr.length))
return -ENODEV;
diff --git a/drivers/staging/mei/main.c b/drivers/staging/mei/main.c
index 1e1a9f996e7c..7c9321fa7bb1 100644
--- a/drivers/staging/mei/main.c
+++ b/drivers/staging/mei/main.c
@@ -1,7 +1,7 @@
/*
*
* Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2011, Intel Corporation.
+ * Copyright (c) 2003-2012, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -38,7 +38,6 @@
#include "mei_dev.h"
#include "mei.h"
#include "interface.h"
-#include "mei_version.h"
#define MEI_READ_TIMEOUT 45
@@ -50,7 +49,6 @@
*/
static char mei_driver_name[] = MEI_DRIVER_NAME;
static const char mei_driver_string[] = "Intel(R) Management Engine Interface";
-static const char mei_driver_version[] = MEI_DRIVER_VERSION;
/* The device pointer */
/* Currently this driver works as long as there is only a single AMT device. */
@@ -430,7 +428,7 @@ static ssize_t mei_read(struct file *file, char __user *ubuf,
goto free;
} else if ((!cl->read_cb || !cl->read_cb->information) &&
*offset > 0) {
- /*Offset needs to be cleaned for contingous reads*/
+ /*Offset needs to be cleaned for contiguous reads*/
*offset = 0;
rets = 0;
goto out;
@@ -493,7 +491,7 @@ copy_buffer:
goto free;
}
- /* length is being turncated to PAGE_SIZE, however, */
+ /* length is being truncated to PAGE_SIZE, however, */
/* information size may be longer */
length = min_t(size_t, length, (cb->information - *offset));
@@ -740,7 +738,7 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
mei_hdr.reserved = 0;
dev_dbg(&dev->pdev->dev, "call mei_write_message header=%08x.\n",
*((u32 *) &mei_hdr));
- if (!mei_write_message(dev, &mei_hdr,
+ if (mei_write_message(dev, &mei_hdr,
(unsigned char *) (write_cb->request_buffer.data),
mei_hdr.length)) {
rets = -ENODEV;
@@ -1206,8 +1204,7 @@ static int __init mei_init_module(void)
{
int ret;
- pr_debug("mei: %s - version %s\n",
- mei_driver_string, mei_driver_version);
+ pr_debug("mei: %s\n", mei_driver_string);
/* init pci module */
ret = pci_register_driver(&mei_driver);
if (ret < 0)
@@ -1238,4 +1235,3 @@ module_exit(mei_exit_module);
MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION("Intel(R) Management Engine Interface");
MODULE_LICENSE("GPL v2");
-MODULE_VERSION(MEI_DRIVER_VERSION);
diff --git a/drivers/staging/mei/mei-amt-version.c b/drivers/staging/mei/mei-amt-version.c
new file mode 100644
index 000000000000..ac2a507be253
--- /dev/null
+++ b/drivers/staging/mei/mei-amt-version.c
@@ -0,0 +1,481 @@
+/******************************************************************************
+ * Intel Management Engine Interface (Intel MEI) Linux driver
+ * Intel MEI Interface Header
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Corporation.
+ * linux-mei@linux.intel.com
+ * http://www.intel.com
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <unistd.h>
+#include <errno.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <bits/wordsize.h>
+#include "mei.h"
+
+/*****************************************************************************
+ * Intel Management Engine Interface
+ *****************************************************************************/
+
+#define mei_msg(_me, fmt, ARGS...) do { \
+ if (_me->verbose) \
+ fprintf(stderr, fmt, ##ARGS); \
+} while (0)
+
+#define mei_err(_me, fmt, ARGS...) do { \
+ fprintf(stderr, "Error: " fmt, ##ARGS); \
+} while (0)
+
+struct mei {
+ uuid_le guid;
+ bool initialized;
+ bool verbose;
+ unsigned int buf_size;
+ unsigned char prot_ver;
+ int fd;
+};
+
+static void mei_deinit(struct mei *cl)
+{
+ if (cl->fd != -1)
+ close(cl->fd);
+ cl->fd = -1;
+ cl->buf_size = 0;
+ cl->prot_ver = 0;
+ cl->initialized = false;
+}
+
+static bool mei_init(struct mei *me, const uuid_le *guid,
+ unsigned char req_protocol_version, bool verbose)
+{
+ int result;
+ struct mei_client *cl;
+ struct mei_connect_client_data data;
+
+ mei_deinit(me);
+
+ me->verbose = verbose;
+
+ me->fd = open("/dev/mei", O_RDWR);
+ if (me->fd == -1) {
+ mei_err(me, "Cannot establish a handle to the Intel MEI driver\n");
+ goto err;
+ }
+ memcpy(&me->guid, guid, sizeof(*guid));
+ memset(&data, 0, sizeof(data));
+ me->initialized = true;
+
+ memcpy(&data.in_client_uuid, &me->guid, sizeof(me->guid));
+ result = ioctl(me->fd, IOCTL_MEI_CONNECT_CLIENT, &data);
+ if (result) {
+ mei_err(me, "IOCTL_MEI_CONNECT_CLIENT receive message. err=%d\n", result);
+ goto err;
+ }
+ cl = &data.out_client_properties;
+ mei_msg(me, "max_message_length %d\n", cl->max_msg_length);
+ mei_msg(me, "protocol_version %d\n", cl->protocol_version);
+
+ if ((req_protocol_version > 0) &&
+ (cl->protocol_version != req_protocol_version)) {
+ mei_err(me, "Intel MEI protocol version not supported\n");
+ goto err;
+ }
+
+ me->buf_size = cl->max_msg_length;
+ me->prot_ver = cl->protocol_version;
+
+ return true;
+err:
+ mei_deinit(me);
+ return false;
+}
+
+static ssize_t mei_recv_msg(struct mei *me, unsigned char *buffer,
+ ssize_t len, unsigned long timeout)
+{
+ ssize_t rc;
+
+ mei_msg(me, "call read length = %zd\n", len);
+
+ rc = read(me->fd, buffer, len);
+ if (rc < 0) {
+ mei_err(me, "read failed with status %zd %s\n",
+ rc, strerror(errno));
+ mei_deinit(me);
+ } else {
+ mei_msg(me, "read succeeded with result %zd\n", rc);
+ }
+ return rc;
+}
+
+static ssize_t mei_send_msg(struct mei *me, const unsigned char *buffer,
+ ssize_t len, unsigned long timeout)
+{
+ struct timeval tv;
+ ssize_t written;
+ ssize_t rc;
+ fd_set set;
+
+ tv.tv_sec = timeout / 1000;
+ tv.tv_usec = (timeout % 1000) * 1000000;
+
+ mei_msg(me, "call write length = %zd\n", len);
+
+ written = write(me->fd, buffer, len);
+ if (written < 0) {
+ rc = -errno;
+ mei_err(me, "write failed with status %zd %s\n",
+ written, strerror(errno));
+ goto out;
+ }
+
+ FD_ZERO(&set);
+ FD_SET(me->fd, &set);
+ rc = select(me->fd + 1 , &set, NULL, NULL, &tv);
+ if (rc > 0 && FD_ISSET(me->fd, &set)) {
+ mei_msg(me, "write success\n");
+ } else if (rc == 0) {
+ mei_err(me, "write failed on timeout with status\n");
+ goto out;
+ } else { /* rc < 0 */
+ mei_err(me, "write failed on select with status %zd\n", rc);
+ goto out;
+ }
+
+ rc = written;
+out:
+ if (rc < 0)
+ mei_deinit(me);
+
+ return rc;
+}
+
+/***************************************************************************
+ * Intel Advanced Management Technolgy ME Client
+ ***************************************************************************/
+
+#define AMT_MAJOR_VERSION 1
+#define AMT_MINOR_VERSION 1
+
+#define AMT_STATUS_SUCCESS 0x0
+#define AMT_STATUS_INTERNAL_ERROR 0x1
+#define AMT_STATUS_NOT_READY 0x2
+#define AMT_STATUS_INVALID_AMT_MODE 0x3
+#define AMT_STATUS_INVALID_MESSAGE_LENGTH 0x4
+
+#define AMT_STATUS_HOST_IF_EMPTY_RESPONSE 0x4000
+#define AMT_STATUS_SDK_RESOURCES 0x1004
+
+
+#define AMT_BIOS_VERSION_LEN 65
+#define AMT_VERSIONS_NUMBER 50
+#define AMT_UNICODE_STRING_LEN 20
+
+struct amt_unicode_string {
+ uint16_t length;
+ char string[AMT_UNICODE_STRING_LEN];
+} __attribute__((packed));
+
+struct amt_version_type {
+ struct amt_unicode_string description;
+ struct amt_unicode_string version;
+} __attribute__((packed));
+
+struct amt_version {
+ uint8_t major;
+ uint8_t minor;
+} __attribute__((packed));
+
+struct amt_code_versions {
+ uint8_t bios[AMT_BIOS_VERSION_LEN];
+ uint32_t count;
+ struct amt_version_type versions[AMT_VERSIONS_NUMBER];
+} __attribute__((packed));
+
+/***************************************************************************
+ * Intel Advanced Management Technolgy Host Interface
+ ***************************************************************************/
+
+struct amt_host_if_msg_header {
+ struct amt_version version;
+ uint16_t _reserved;
+ uint32_t command;
+ uint32_t length;
+} __attribute__((packed));
+
+struct amt_host_if_resp_header {
+ struct amt_host_if_msg_header header;
+ uint32_t status;
+ unsigned char data[0];
+} __attribute__((packed));
+
+const uuid_le MEI_IAMTHIF = UUID_LE(0x12f80028, 0xb4b7, 0x4b2d, \
+ 0xac, 0xa8, 0x46, 0xe0, 0xff, 0x65, 0x81, 0x4c);
+
+#define AMT_HOST_IF_CODE_VERSIONS_REQUEST 0x0400001A
+#define AMT_HOST_IF_CODE_VERSIONS_RESPONSE 0x0480001A
+
+const struct amt_host_if_msg_header CODE_VERSION_REQ = {
+ .version = {AMT_MAJOR_VERSION, AMT_MINOR_VERSION},
+ ._reserved = 0,
+ .command = AMT_HOST_IF_CODE_VERSIONS_REQUEST,
+ .length = 0
+};
+
+
+struct amt_host_if {
+ struct mei mei_cl;
+ unsigned long send_timeout;
+ bool initialized;
+};
+
+
+static bool amt_host_if_init(struct amt_host_if *acmd,
+ unsigned long send_timeout, bool verbose)
+{
+ acmd->send_timeout = (send_timeout) ? send_timeout : 20000;
+ acmd->initialized = mei_init(&acmd->mei_cl, &MEI_IAMTHIF, 0, verbose);
+ return acmd->initialized;
+}
+
+static void amt_host_if_deinit(struct amt_host_if *acmd)
+{
+ mei_deinit(&acmd->mei_cl);
+ acmd->initialized = false;
+}
+
+static uint32_t amt_verify_code_versions(const struct amt_host_if_resp_header *resp)
+{
+ uint32_t status = AMT_STATUS_SUCCESS;
+ struct amt_code_versions *code_ver;
+ size_t code_ver_len;
+ uint32_t ver_type_cnt;
+ uint32_t len;
+ uint32_t i;
+
+ code_ver = (struct amt_code_versions *)resp->data;
+ /* length - sizeof(status) */
+ code_ver_len = resp->header.length - sizeof(uint32_t);
+ ver_type_cnt = code_ver_len -
+ sizeof(code_ver->bios) -
+ sizeof(code_ver->count);
+ if (code_ver->count != ver_type_cnt / sizeof(struct amt_version_type)) {
+ status = AMT_STATUS_INTERNAL_ERROR;
+ goto out;
+ }
+
+ for (i = 0; i < code_ver->count; i++) {
+ len = code_ver->versions[i].description.length;
+
+ if (len > AMT_UNICODE_STRING_LEN) {
+ status = AMT_STATUS_INTERNAL_ERROR;
+ goto out;
+ }
+
+ len = code_ver->versions[i].version.length;
+ if (code_ver->versions[i].version.string[len] != '\0' ||
+ len != strlen(code_ver->versions[i].version.string)) {
+ status = AMT_STATUS_INTERNAL_ERROR;
+ goto out;
+ }
+ }
+out:
+ return status;
+}
+
+static uint32_t amt_verify_response_header(uint32_t command,
+ const struct amt_host_if_msg_header *resp_hdr,
+ uint32_t response_size)
+{
+ if (response_size < sizeof(struct amt_host_if_resp_header)) {
+ return AMT_STATUS_INTERNAL_ERROR;
+ } else if (response_size != (resp_hdr->length +
+ sizeof(struct amt_host_if_msg_header))) {
+ return AMT_STATUS_INTERNAL_ERROR;
+ } else if (resp_hdr->command != command) {
+ return AMT_STATUS_INTERNAL_ERROR;
+ } else if (resp_hdr->_reserved != 0) {
+ return AMT_STATUS_INTERNAL_ERROR;
+ } else if (resp_hdr->version.major != AMT_MAJOR_VERSION ||
+ resp_hdr->version.minor < AMT_MINOR_VERSION) {
+ return AMT_STATUS_INTERNAL_ERROR;
+ }
+ return AMT_STATUS_SUCCESS;
+}
+
+static uint32_t amt_host_if_call(struct amt_host_if *acmd,
+ const unsigned char *command, ssize_t command_sz,
+ uint8_t **read_buf, uint32_t rcmd,
+ unsigned int expected_sz)
+{
+ uint32_t in_buf_sz;
+ uint32_t out_buf_sz;
+ ssize_t written;
+ uint32_t status;
+ struct amt_host_if_resp_header *msg_hdr;
+
+ in_buf_sz = acmd->mei_cl.buf_size;
+ *read_buf = (uint8_t *)malloc(sizeof(uint8_t) * in_buf_sz);
+ if (*read_buf == NULL)
+ return AMT_STATUS_SDK_RESOURCES;
+ memset(*read_buf, 0, in_buf_sz);
+ msg_hdr = (struct amt_host_if_resp_header *)*read_buf;
+
+ written = mei_send_msg(&acmd->mei_cl,
+ command, command_sz, acmd->send_timeout);
+ if (written != command_sz)
+ return AMT_STATUS_INTERNAL_ERROR;
+
+ out_buf_sz = mei_recv_msg(&acmd->mei_cl, *read_buf, in_buf_sz, 2000);
+ if (out_buf_sz <= 0)
+ return AMT_STATUS_HOST_IF_EMPTY_RESPONSE;
+
+ status = msg_hdr->status;
+ if (status != AMT_STATUS_SUCCESS)
+ return status;
+
+ status = amt_verify_response_header(rcmd,
+ &msg_hdr->header, out_buf_sz);
+ if (status != AMT_STATUS_SUCCESS)
+ return status;
+
+ if (expected_sz && expected_sz != out_buf_sz)
+ return AMT_STATUS_INTERNAL_ERROR;
+
+ return AMT_STATUS_SUCCESS;
+}
+
+
+static uint32_t amt_get_code_versions(struct amt_host_if *cmd,
+ struct amt_code_versions *versions)
+{
+ struct amt_host_if_resp_header *response = NULL;
+ uint32_t status;
+
+ status = amt_host_if_call(cmd,
+ (const unsigned char *)&CODE_VERSION_REQ,
+ sizeof(CODE_VERSION_REQ),
+ (uint8_t **)&response,
+ AMT_HOST_IF_CODE_VERSIONS_RESPONSE, 0);
+
+ if (status != AMT_STATUS_SUCCESS)
+ goto out;
+
+ status = amt_verify_code_versions(response);
+ if (status != AMT_STATUS_SUCCESS)
+ goto out;
+
+ memcpy(versions, response->data, sizeof(struct amt_code_versions));
+out:
+ if (response != NULL)
+ free(response);
+
+ return status;
+}
+
+/************************** end of amt_host_if_command ***********************/
+int main(int argc, char **argv)
+{
+ struct amt_code_versions ver;
+ struct amt_host_if acmd;
+ unsigned int i;
+ uint32_t status;
+ int ret;
+ bool verbose;
+
+ verbose = (argc > 1 && strcmp(argv[1], "-v") == 0);
+
+ if (!amt_host_if_init(&acmd, 5000, verbose)) {
+ ret = 1;
+ goto out;
+ }
+
+ status = amt_get_code_versions(&acmd, &ver);
+
+ amt_host_if_deinit(&acmd);
+
+ switch (status) {
+ case AMT_STATUS_HOST_IF_EMPTY_RESPONSE:
+ printf("Intel AMT: DISABLED\n");
+ ret = 0;
+ break;
+ case AMT_STATUS_SUCCESS:
+ printf("Intel AMT: ENABLED\n");
+ for (i = 0; i < ver.count; i++) {
+ printf("%s:\t%s\n", ver.versions[i].description.string,
+ ver.versions[i].version.string);
+ }
+ ret = 0;
+ break;
+ default:
+ printf("An error has occurred\n");
+ ret = 1;
+ break;
+ }
+
+out:
+ return ret;
+}
diff --git a/drivers/staging/mei/mei.h b/drivers/staging/mei/mei.h
index 6da7c4f33f91..bc0d8b69c49e 100644
--- a/drivers/staging/mei/mei.h
+++ b/drivers/staging/mei/mei.h
@@ -1,63 +1,68 @@
-/*
-
- Intel Management Engine Interface (Intel MEI) Linux driver
- Intel MEI Interface Header
-
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
-
- Copyright(c) 2003-2011 Intel Corporation. All rights reserved.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- Intel Corporation.
- linux-mei@linux.intel.com
- http://www.intel.com
-
-
- BSD LICENSE
-
- Copyright(c) 2003-2011 Intel Corporation. All rights reserved.
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-*/
-
+/******************************************************************************
+ * Intel Management Engine Interface (Intel MEI) Linux driver
+ * Intel MEI Interface Header
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Corporation.
+ * linux-mei@linux.intel.com
+ * http://www.intel.com
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
#ifndef _LINUX_MEI_H
#define _LINUX_MEI_H
@@ -72,7 +77,7 @@
* Only in close() (file_operation release()) the communication between
* the clients is disconnected
*
- * The IOCTL argument is a struct with a union the contains
+ * The IOCTL argument is a struct with a union that contains
* the input parameter and the output parameter for this IOCTL.
*
* The input parameter is UUID of the FW Client.
diff --git a/drivers/staging/mei/mei.txt b/drivers/staging/mei/mei.txt
index 516bfe7319a6..2785697da59d 100644
--- a/drivers/staging/mei/mei.txt
+++ b/drivers/staging/mei/mei.txt
@@ -4,7 +4,7 @@ Intel(R) Management Engine Interface (Intel(R) MEI)
Introduction
=======================
-The Intel Management Engine (Intel ME) is an isolated andprotected computing
+The Intel Management Engine (Intel ME) is an isolated and protected computing
resource (Co-processor) residing inside certain Intel chipsets. The Intel ME
provides support for computer/IT management features. The feature set
depends on the Intel chipset SKU.
@@ -176,8 +176,8 @@ Intel AMT OS Health Watchdog:
=============================
The Intel AMT Watchdog is an OS Health (Hang/Crash) watchdog.
Whenever the OS hangs or crashes, Intel AMT will send an event
-to any subsciber to this event. This mechanism means that
-IT knows when a platform crashes even when there is a hard failureon the host.
+to any subscriber to this event. This mechanism means that
+IT knows when a platform crashes even when there is a hard failure on the host.
The Intel AMT Watchdog is composed of two parts:
1) Firmware feature - receives the heartbeats
diff --git a/drivers/staging/mei/mei_dev.h b/drivers/staging/mei/mei_dev.h
index 82bacfc624c5..10b1b4e2f8ac 100644
--- a/drivers/staging/mei/mei_dev.h
+++ b/drivers/staging/mei/mei_dev.h
@@ -1,7 +1,7 @@
/*
*
* Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2011, Intel Corporation.
+ * Copyright (c) 2003-2012, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -30,6 +30,8 @@
#define MEI_WD_PARAMS_SIZE 4
#define MEI_WD_STATE_INDEPENDENCE_MSG_SENT (1 << 0)
+#define MEI_RD_MSG_BUF_SIZE (128 * sizeof(u32))
+
/*
* MEI PCI Device object
*/
@@ -87,7 +89,7 @@ enum mei_states {
MEI_POWER_UP
};
-/* init clients states*/
+/* init clients states*/
enum mei_init_clients_states {
MEI_START_MESSAGE = 0,
MEI_ENUM_CLIENTS_MESSAGE,
@@ -125,7 +127,7 @@ enum mei_cb_major_types {
*/
struct mei_message_data {
u32 size;
- char *data;
+ unsigned char *data;
} __packed;
@@ -219,7 +221,7 @@ struct mei_device {
bool need_reset;
u32 extra_write_index;
- u32 rd_msg_buf[128]; /* used for control messages */
+ unsigned char rd_msg_buf[MEI_RD_MSG_BUF_SIZE]; /* control messages */
u32 wr_msg_buf[128]; /* used for control messages */
u32 ext_msg_buf[8]; /* for control responses */
u32 rd_msg_hdr;
diff --git a/drivers/staging/mei/mei_version.h b/drivers/staging/mei/mei_version.h
deleted file mode 100644
index 075bad8f0bf5..000000000000
--- a/drivers/staging/mei/mei_version.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- *
- * Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2011, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- */
-
-
-#ifndef MEI_VERSION_H
-#define MEI_VERSION_H
-
-#define MAJOR_VERSION 7
-#define MINOR_VERSION 1
-#define QUICK_FIX_NUMBER 20
-#define VER_BUILD 1
-
-#define MEI_DRV_VER1 __stringify(MAJOR_VERSION) "." __stringify(MINOR_VERSION)
-#define MEI_DRV_VER2 __stringify(QUICK_FIX_NUMBER) "." __stringify(VER_BUILD)
-
-#define MEI_DRIVER_VERSION MEI_DRV_VER1 "." MEI_DRV_VER2
-
-#endif
diff --git a/drivers/staging/mei/wd.c b/drivers/staging/mei/wd.c
index 8094941a98f1..a6910da78a64 100644
--- a/drivers/staging/mei/wd.c
+++ b/drivers/staging/mei/wd.c
@@ -1,7 +1,7 @@
/*
*
* Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2011, Intel Corporation.
+ * Copyright (c) 2003-2012, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -74,7 +74,7 @@ bool mei_wd_host_init(struct mei_device *dev)
dev_dbg(&dev->pdev->dev, "check wd_cl\n");
if (MEI_FILE_CONNECTING == dev->wd_cl.state) {
- if (!mei_connect(dev, &dev->wd_cl)) {
+ if (mei_connect(dev, &dev->wd_cl)) {
dev_dbg(&dev->pdev->dev, "Failed to connect to WD client\n");
dev->wd_cl.state = MEI_FILE_DISCONNECTED;
dev->wd_cl.host_client_id = 0;
@@ -119,9 +119,7 @@ int mei_wd_send(struct mei_device *dev)
else
return -EINVAL;
- if (mei_write_message(dev, mei_hdr, dev->wd_data, mei_hdr->length))
- return 0;
- return -EIO;
+ return mei_write_message(dev, mei_hdr, dev->wd_data, mei_hdr->length);
}
/**
diff --git a/drivers/staging/nvec/Kconfig b/drivers/staging/nvec/Kconfig
index 86a8b8c418c0..731301f524a6 100644
--- a/drivers/staging/nvec/Kconfig
+++ b/drivers/staging/nvec/Kconfig
@@ -7,21 +7,21 @@ config MFD_NVEC
config KEYBOARD_NVEC
bool "Keyboard on nVidia compliant EC"
- depends on MFD_NVEC && INPUT=y
+ depends on MFD_NVEC && INPUT
help
Say Y here to enable support for a keyboard connected to
a nVidia compliant embedded controller.
config SERIO_NVEC_PS2
bool "PS2 on nVidia EC"
- depends on MFD_NVEC && MOUSE_PS2
+ depends on MFD_NVEC && SERIO
help
Say Y here to enable support for a Touchpad / Mouse connected
to a nVidia compliant embedded controller.
config NVEC_POWER
bool "NVEC charger and battery"
- depends on MFD_NVEC && POWER_SUPPLY=y
+ depends on MFD_NVEC && POWER_SUPPLY
help
Say Y to enable support for battery and charger interface for
nVidia compliant embedded controllers.
diff --git a/drivers/staging/nvec/nvec.c b/drivers/staging/nvec/nvec.c
index fafdfa25e139..3c60088871e0 100644
--- a/drivers/staging/nvec/nvec.c
+++ b/drivers/staging/nvec/nvec.c
@@ -49,7 +49,7 @@
#define I2C_CNFG_DEBOUNCE_CNT_SHIFT 12
#define I2C_SL_CNFG 0x20
-#define I2C_SL_NEWL (1<<2)
+#define I2C_SL_NEWSL (1<<2)
#define I2C_SL_NACK (1<<1)
#define I2C_SL_RESP (1<<0)
#define I2C_SL_IRQ (1<<3)
@@ -687,7 +687,7 @@ static void tegra_init_i2c_slave(struct nvec_chip *nvec)
clk_set_rate(nvec->i2c_clk, 8 * 80000);
- writel(I2C_SL_NEWL, nvec->base + I2C_SL_CNFG);
+ writel(I2C_SL_NEWSL, nvec->base + I2C_SL_CNFG);
writel(0x1E, nvec->base + I2C_SL_DELAY_COUNT);
writel(nvec->i2c_addr>>1, nvec->base + I2C_SL_ADDR1);
@@ -701,7 +701,7 @@ static void tegra_init_i2c_slave(struct nvec_chip *nvec)
static void nvec_disable_i2c_slave(struct nvec_chip *nvec)
{
disable_irq(nvec->irq);
- writel(I2C_SL_NEWL | I2C_SL_NACK, nvec->base + I2C_SL_CNFG);
+ writel(I2C_SL_NEWSL | I2C_SL_NACK, nvec->base + I2C_SL_CNFG);
clk_disable(nvec->i2c_clk);
}
@@ -784,11 +784,6 @@ static int __devinit tegra_nvec_probe(struct platform_device *pdev)
nvec->i2c_clk = i2c_clk;
nvec->rx = &nvec->msg_pool[0];
- /* Set the gpio to low when we've got something to say */
- err = gpio_request(nvec->gpio, "nvec gpio");
- if (err < 0)
- dev_err(nvec->dev, "couldn't request gpio\n");
-
ATOMIC_INIT_NOTIFIER_HEAD(&nvec->notifier_list);
init_completion(&nvec->sync_write);
@@ -802,6 +797,12 @@ static int __devinit tegra_nvec_probe(struct platform_device *pdev)
INIT_WORK(&nvec->tx_work, nvec_request_master);
nvec->wq = alloc_workqueue("nvec", WQ_NON_REENTRANT, 2);
+ err = gpio_request_one(nvec->gpio, GPIOF_OUT_INIT_HIGH, "nvec gpio");
+ if (err < 0) {
+ dev_err(nvec->dev, "couldn't request gpio\n");
+ goto failed;
+ }
+
err = request_irq(nvec->irq, nvec_interrupt, 0, "nvec", nvec);
if (err) {
dev_err(nvec->dev, "couldn't request irq\n");
@@ -813,8 +814,6 @@ static int __devinit tegra_nvec_probe(struct platform_device *pdev)
clk_enable(i2c_clk);
- gpio_direction_output(nvec->gpio, 1);
- gpio_set_value(nvec->gpio, 1);
/* enable event reporting */
nvec_write_async(nvec, EC_ENABLE_EVENT_REPORTING,
diff --git a/drivers/staging/nvec/nvec_ps2.c b/drivers/staging/nvec/nvec_ps2.c
index 742f5ccfe763..14a6f687cf75 100644
--- a/drivers/staging/nvec/nvec_ps2.c
+++ b/drivers/staging/nvec/nvec_ps2.c
@@ -21,10 +21,18 @@
#include "nvec.h"
-#define START_STREAMING {'\x06', '\x03', '\x04'}
+#define START_STREAMING {'\x06', '\x03', '\x06'}
#define STOP_STREAMING {'\x06', '\x04'}
#define SEND_COMMAND {'\x06', '\x01', '\xf4', '\x01'}
+#ifdef NVEC_PS2_DEBUG
+#define NVEC_PHD(str, buf, len) \
+ print_hex_dump(KERN_DEBUG, str, DUMP_PREFIX_NONE, \
+ 16, 1, buf, len, false)
+#else
+#define NVEC_PHD(str, buf, len)
+#endif
+
static const unsigned char MOUSE_RESET[] = {'\x06', '\x01', '\xff', '\x03'};
struct nvec_ps2 {
@@ -67,18 +75,18 @@ static int nvec_ps2_notifier(struct notifier_block *nb,
case NVEC_PS2_EVT:
for (i = 0; i < msg[1]; i++)
serio_interrupt(ps2_dev.ser_dev, msg[2 + i], 0);
+ NVEC_PHD("ps/2 mouse event: ", &msg[2], msg[1]);
return NOTIFY_STOP;
case NVEC_PS2:
- if (msg[2] == 1)
+ if (msg[2] == 1) {
for (i = 0; i < (msg[1] - 2); i++)
serio_interrupt(ps2_dev.ser_dev, msg[i + 4], 0);
- else if (msg[1] != 2) { /* !ack */
- print_hex_dump(KERN_WARNING, "unhandled mouse event: ",
- DUMP_PREFIX_NONE, 16, 1,
- msg, msg[1] + 2, true);
+ NVEC_PHD("ps/2 mouse reply: ", &msg[4], msg[1] - 2);
}
+ else if (msg[1] != 2) /* !ack */
+ NVEC_PHD("unhandled mouse event: ", msg, msg[1] + 2);
return NOTIFY_STOP;
}
@@ -90,10 +98,10 @@ static int __devinit nvec_mouse_probe(struct platform_device *pdev)
struct nvec_chip *nvec = dev_get_drvdata(pdev->dev.parent);
struct serio *ser_dev = kzalloc(sizeof(struct serio), GFP_KERNEL);
- ser_dev->id.type = SERIO_8042;
+ ser_dev->id.type = SERIO_PS_PSTHRU;
ser_dev->write = ps2_sendcommand;
- ser_dev->open = ps2_startstreaming;
- ser_dev->close = ps2_stopstreaming;
+ ser_dev->start = ps2_startstreaming;
+ ser_dev->stop = ps2_stopstreaming;
strlcpy(ser_dev->name, "nvec mouse", sizeof(ser_dev->name));
strlcpy(ser_dev->phys, "nvec", sizeof(ser_dev->phys));
@@ -111,8 +119,35 @@ static int __devinit nvec_mouse_probe(struct platform_device *pdev)
return 0;
}
+static int nvec_mouse_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct nvec_chip *nvec = dev_get_drvdata(pdev->dev.parent);
+
+ /* disable mouse */
+ nvec_write_async(nvec, "\x06\xf4", 2);
+
+ /* send cancel autoreceive */
+ nvec_write_async(nvec, "\x06\x04", 2);
+
+ return 0;
+}
+
+static int nvec_mouse_resume(struct platform_device *pdev)
+{
+ struct nvec_chip *nvec = dev_get_drvdata(pdev->dev.parent);
+
+ ps2_startstreaming(ps2_dev.ser_dev);
+
+ /* enable mouse */
+ nvec_write_async(nvec, "\x06\xf5", 2);
+
+ return 0;
+}
+
static struct platform_driver nvec_mouse_driver = {
.probe = nvec_mouse_probe,
+ .suspend = nvec_mouse_suspend,
+ .resume = nvec_mouse_resume,
.driver = {
.name = "nvec-mouse",
.owner = THIS_MODULE,
diff --git a/drivers/staging/octeon/ethernet-mdio.c b/drivers/staging/octeon/ethernet-mdio.c
index 63800ba71d06..e31949c9c87e 100644
--- a/drivers/staging/octeon/ethernet-mdio.c
+++ b/drivers/staging/octeon/ethernet-mdio.c
@@ -164,9 +164,9 @@ int cvm_oct_phy_setup_device(struct net_device *dev)
int phy_addr = cvmx_helper_board_get_mii_address(priv->port);
if (phy_addr != -1) {
- char phy_id[20];
+ char phy_id[MII_BUS_ID_SIZE + 3];
- snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, "0", phy_addr);
+ snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, "mdio-octeon-0", phy_addr);
priv->phydev = phy_connect(dev, phy_id, cvm_oct_adjust_link, 0,
PHY_INTERFACE_MODE_GMII);
diff --git a/drivers/staging/omapdrm/omap_crtc.c b/drivers/staging/omapdrm/omap_crtc.c
index 17ca163e5896..490a7f15604b 100644
--- a/drivers/staging/omapdrm/omap_crtc.c
+++ b/drivers/staging/omapdrm/omap_crtc.c
@@ -118,29 +118,35 @@ static void omap_crtc_load_lut(struct drm_crtc *crtc)
{
}
-static void page_flip_cb(void *arg)
+static void vblank_cb(void *arg)
{
+ static uint32_t sequence = 0;
struct drm_crtc *crtc = arg;
struct drm_device *dev = crtc->dev;
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
struct drm_pending_vblank_event *event = omap_crtc->event;
- struct drm_framebuffer *old_fb = omap_crtc->old_fb;
- struct timeval now;
unsigned long flags;
+ struct timeval now;
WARN_ON(!event);
omap_crtc->event = NULL;
- omap_crtc->old_fb = NULL;
-
- omap_crtc_mode_set_base(crtc, crtc->x, crtc->y, old_fb);
/* wakeup userspace */
- /* TODO: this should happen *after* flip in vsync IRQ handler */
if (event) {
+ do_gettimeofday(&now);
+
spin_lock_irqsave(&dev->event_lock, flags);
+ /* TODO: we can't yet use the vblank time accounting,
+ * because omapdss lower layer is the one that knows
+ * the irq # and registers the handler, which more or
+ * less defeats how drm_irq works.. for now just fake
+ * the sequence number and use gettimeofday..
+ *
event->event.sequence = drm_vblank_count_and_time(
dev, omap_crtc->id, &now);
+ */
+ event->event.sequence = sequence++;
event->event.tv_sec = now.tv_sec;
event->event.tv_usec = now.tv_usec;
list_add_tail(&event->base.link,
@@ -150,6 +156,23 @@ static void page_flip_cb(void *arg)
}
}
+static void page_flip_cb(void *arg)
+{
+ struct drm_crtc *crtc = arg;
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ struct drm_framebuffer *old_fb = omap_crtc->old_fb;
+
+ omap_crtc->old_fb = NULL;
+
+ omap_crtc_mode_set_base(crtc, crtc->x, crtc->y, old_fb);
+
+ /* really we'd like to setup the callback atomically w/ setting the
+ * new scanout buffer to avoid getting stuck waiting an extra vblank
+ * cycle.. for now go for correctness and later figure out speed..
+ */
+ omap_plane_on_endwin(omap_crtc->plane, vblank_cb, crtc);
+}
+
static int omap_crtc_page_flip_locked(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event)
diff --git a/drivers/staging/omapdrm/omap_debugfs.c b/drivers/staging/omapdrm/omap_debugfs.c
index da920dfdc59c..2f122e00b51d 100644
--- a/drivers/staging/omapdrm/omap_debugfs.c
+++ b/drivers/staging/omapdrm/omap_debugfs.c
@@ -20,23 +20,118 @@
#include "omap_drv.h"
#include "omap_dmm_tiler.h"
+#include "drm_fb_helper.h"
+
+
#ifdef CONFIG_DEBUG_FS
+static int gem_show(struct seq_file *m, void *arg)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct omap_drm_private *priv = dev->dev_private;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ seq_printf(m, "All Objects:\n");
+ omap_gem_describe_objects(&priv->obj_list, m);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
+static int mm_show(struct seq_file *m, void *arg)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ return drm_mm_dump_table(m, dev->mm_private);
+}
+
+static int fb_show(struct seq_file *m, void *arg)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct omap_drm_private *priv = dev->dev_private;
+ struct drm_framebuffer *fb;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->mode_config.mutex);
+ if (ret)
+ return ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret) {
+ mutex_unlock(&dev->mode_config.mutex);
+ return ret;
+ }
+
+ seq_printf(m, "fbcon ");
+ omap_framebuffer_describe(priv->fbdev->fb, m);
+
+ list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
+ if (fb == priv->fbdev->fb)
+ continue;
+
+ seq_printf(m, "user ");
+ omap_framebuffer_describe(fb, m);
+ }
+
+ mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev->mode_config.mutex);
+
+ return 0;
+}
+
+/* list of debufs files that are applicable to all devices */
static struct drm_info_list omap_debugfs_list[] = {
+ {"gem", gem_show, 0},
+ {"mm", mm_show, 0},
+ {"fb", fb_show, 0},
+};
+
+/* list of debugfs files that are specific to devices with dmm/tiler */
+static struct drm_info_list omap_dmm_debugfs_list[] = {
{"tiler_map", tiler_map_show, 0},
};
int omap_debugfs_init(struct drm_minor *minor)
{
- return drm_debugfs_create_files(omap_debugfs_list,
+ struct drm_device *dev = minor->dev;
+ int ret;
+
+ ret = drm_debugfs_create_files(omap_debugfs_list,
ARRAY_SIZE(omap_debugfs_list),
minor->debugfs_root, minor);
+
+ if (ret) {
+ dev_err(dev->dev, "could not install omap_debugfs_list\n");
+ return ret;
+ }
+
+ if (dmm_is_available())
+ ret = drm_debugfs_create_files(omap_dmm_debugfs_list,
+ ARRAY_SIZE(omap_dmm_debugfs_list),
+ minor->debugfs_root, minor);
+
+ if (ret) {
+ dev_err(dev->dev, "could not install omap_dmm_debugfs_list\n");
+ return ret;
+ }
+
+ return ret;
}
void omap_debugfs_cleanup(struct drm_minor *minor)
{
drm_debugfs_remove_files(omap_debugfs_list,
ARRAY_SIZE(omap_debugfs_list), minor);
+ if (dmm_is_available())
+ drm_debugfs_remove_files(omap_dmm_debugfs_list,
+ ARRAY_SIZE(omap_dmm_debugfs_list), minor);
}
#endif
diff --git a/drivers/staging/omapdrm/omap_dmm_tiler.c b/drivers/staging/omapdrm/omap_dmm_tiler.c
index 852d9440f725..1ecb6a73d790 100644
--- a/drivers/staging/omapdrm/omap_dmm_tiler.c
+++ b/drivers/staging/omapdrm/omap_dmm_tiler.c
@@ -34,6 +34,8 @@
#include "omap_dmm_tiler.h"
#include "omap_dmm_priv.h"
+#define DMM_DRIVER_NAME "dmm"
+
/* mappings for associating views to luts */
static struct tcm *containers[TILFMT_NFORMATS];
static struct dmm *omap_dmm;
@@ -465,7 +467,12 @@ size_t tiler_vsize(enum tiler_fmt fmt, uint16_t w, uint16_t h)
return round_up(geom[fmt].cpp * w, PAGE_SIZE) * h;
}
-int omap_dmm_remove(void)
+bool dmm_is_initialized(void)
+{
+ return omap_dmm ? true : false;
+}
+
+static int omap_dmm_remove(struct platform_device *dev)
{
struct tiler_block *block, *_block;
int i;
@@ -499,40 +506,49 @@ int omap_dmm_remove(void)
if (omap_dmm->irq != -1)
free_irq(omap_dmm->irq, omap_dmm);
+ iounmap(omap_dmm->base);
kfree(omap_dmm);
+ omap_dmm = NULL;
}
return 0;
}
-int omap_dmm_init(struct drm_device *dev)
+static int omap_dmm_probe(struct platform_device *dev)
{
int ret = -EFAULT, i;
struct tcm_area area = {0};
u32 hwinfo, pat_geom, lut_table_size;
- struct omap_drm_platform_data *pdata = dev->dev->platform_data;
-
- if (!pdata || !pdata->dmm_pdata) {
- dev_err(dev->dev, "dmm platform data not present, skipping\n");
- return ret;
- }
+ struct resource *mem;
omap_dmm = kzalloc(sizeof(*omap_dmm), GFP_KERNEL);
if (!omap_dmm) {
- dev_err(dev->dev, "failed to allocate driver data section\n");
+ dev_err(&dev->dev, "failed to allocate driver data section\n");
goto fail;
}
/* lookup hwmod data - base address and irq */
- omap_dmm->base = pdata->dmm_pdata->base;
- omap_dmm->irq = pdata->dmm_pdata->irq;
- omap_dmm->dev = dev->dev;
+ mem = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (!mem) {
+ dev_err(&dev->dev, "failed to get base address resource\n");
+ goto fail;
+ }
+
+ omap_dmm->base = ioremap(mem->start, SZ_2K);
if (!omap_dmm->base) {
- dev_err(dev->dev, "failed to get dmm base address\n");
+ dev_err(&dev->dev, "failed to get dmm base address\n");
+ goto fail;
+ }
+
+ omap_dmm->irq = platform_get_irq(dev, 0);
+ if (omap_dmm->irq < 0) {
+ dev_err(&dev->dev, "failed to get IRQ resource\n");
goto fail;
}
+ omap_dmm->dev = &dev->dev;
+
hwinfo = readl(omap_dmm->base + DMM_PAT_HWINFO);
omap_dmm->num_engines = (hwinfo >> 24) & 0x1F;
omap_dmm->num_lut = (hwinfo >> 16) & 0x1F;
@@ -556,7 +572,7 @@ int omap_dmm_init(struct drm_device *dev)
"omap_dmm_irq_handler", omap_dmm);
if (ret) {
- dev_err(dev->dev, "couldn't register IRQ %d, error %d\n",
+ dev_err(&dev->dev, "couldn't register IRQ %d, error %d\n",
omap_dmm->irq, ret);
omap_dmm->irq = -1;
goto fail;
@@ -575,25 +591,30 @@ int omap_dmm_init(struct drm_device *dev)
omap_dmm->lut = vmalloc(lut_table_size * sizeof(*omap_dmm->lut));
if (!omap_dmm->lut) {
- dev_err(dev->dev, "could not allocate lut table\n");
+ dev_err(&dev->dev, "could not allocate lut table\n");
ret = -ENOMEM;
goto fail;
}
omap_dmm->dummy_page = alloc_page(GFP_KERNEL | __GFP_DMA32);
if (!omap_dmm->dummy_page) {
- dev_err(dev->dev, "could not allocate dummy page\n");
+ dev_err(&dev->dev, "could not allocate dummy page\n");
ret = -ENOMEM;
goto fail;
}
+
+ /* set dma mask for device */
+ /* NOTE: this is a workaround for the hwmod not initializing properly */
+ dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+
omap_dmm->dummy_pa = page_to_phys(omap_dmm->dummy_page);
/* alloc refill memory */
- omap_dmm->refill_va = dma_alloc_coherent(dev->dev,
+ omap_dmm->refill_va = dma_alloc_coherent(&dev->dev,
REFILL_BUFFER_SIZE * omap_dmm->num_engines,
&omap_dmm->refill_pa, GFP_KERNEL);
if (!omap_dmm->refill_va) {
- dev_err(dev->dev, "could not allocate refill memory\n");
+ dev_err(&dev->dev, "could not allocate refill memory\n");
goto fail;
}
@@ -602,7 +623,7 @@ int omap_dmm_init(struct drm_device *dev)
omap_dmm->num_engines * sizeof(struct refill_engine),
GFP_KERNEL);
if (!omap_dmm->engines) {
- dev_err(dev->dev, "could not allocate engines\n");
+ dev_err(&dev->dev, "could not allocate engines\n");
ret = -ENOMEM;
goto fail;
}
@@ -624,7 +645,7 @@ int omap_dmm_init(struct drm_device *dev)
omap_dmm->tcm = kzalloc(omap_dmm->num_lut * sizeof(*omap_dmm->tcm),
GFP_KERNEL);
if (!omap_dmm->tcm) {
- dev_err(dev->dev, "failed to allocate lut ptrs\n");
+ dev_err(&dev->dev, "failed to allocate lut ptrs\n");
ret = -ENOMEM;
goto fail;
}
@@ -636,7 +657,7 @@ int omap_dmm_init(struct drm_device *dev)
NULL);
if (!omap_dmm->tcm[i]) {
- dev_err(dev->dev, "failed to allocate container\n");
+ dev_err(&dev->dev, "failed to allocate container\n");
ret = -ENOMEM;
goto fail;
}
@@ -676,7 +697,7 @@ int omap_dmm_init(struct drm_device *dev)
return 0;
fail:
- omap_dmm_remove();
+ omap_dmm_remove(dev);
return ret;
}
@@ -766,10 +787,18 @@ int tiler_map_show(struct seq_file *s, void *arg)
const char *a2d = special;
const char *m2dp = m2d, *a2dp = a2d;
char nice[128];
- int h_adj = omap_dmm->lut_height / ydiv;
- int w_adj = omap_dmm->lut_width / xdiv;
+ int h_adj;
+ int w_adj;
unsigned long flags;
+ if (!omap_dmm) {
+ /* early return if dmm/tiler device is not initialized */
+ return 0;
+ }
+
+ h_adj = omap_dmm->lut_height / ydiv;
+ w_adj = omap_dmm->lut_width / xdiv;
+
map = kzalloc(h_adj * sizeof(*map), GFP_KERNEL);
global_map = kzalloc((w_adj + 1) * h_adj, GFP_KERNEL);
@@ -828,3 +857,17 @@ error:
return 0;
}
#endif
+
+struct platform_driver omap_dmm_driver = {
+ .probe = omap_dmm_probe,
+ .remove = omap_dmm_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DMM_DRIVER_NAME,
+ },
+};
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Andy Gross <andy.gross@ti.com>");
+MODULE_DESCRIPTION("OMAP DMM/Tiler Driver");
+MODULE_ALIAS("platform:" DMM_DRIVER_NAME);
diff --git a/drivers/staging/omapdrm/omap_dmm_tiler.h b/drivers/staging/omapdrm/omap_dmm_tiler.h
index f87cb657d683..7b1052a329e4 100644
--- a/drivers/staging/omapdrm/omap_dmm_tiler.h
+++ b/drivers/staging/omapdrm/omap_dmm_tiler.h
@@ -16,6 +16,7 @@
#ifndef OMAP_DMM_TILER_H
#define OMAP_DMM_TILER_H
+#include <plat/cpu.h>
#include "omap_drv.h"
#include "tcm.h"
@@ -72,10 +73,6 @@ struct tiler_block {
#define TIL_ADDR(x, orient, a)\
((u32) (x) | (orient) | ((a) << SHIFT_ACC_MODE))
-/* externally accessible functions */
-int omap_dmm_init(struct drm_device *dev);
-int omap_dmm_remove(void);
-
#ifdef CONFIG_DEBUG_FS
int tiler_map_show(struct seq_file *s, void *arg);
#endif
@@ -97,7 +94,9 @@ uint32_t tiler_stride(enum tiler_fmt fmt);
size_t tiler_size(enum tiler_fmt fmt, uint16_t w, uint16_t h);
size_t tiler_vsize(enum tiler_fmt fmt, uint16_t w, uint16_t h);
void tiler_align(enum tiler_fmt fmt, uint16_t *w, uint16_t *h);
+bool dmm_is_initialized(void);
+extern struct platform_driver omap_dmm_driver;
/* GEM bo flags -> tiler fmt */
static inline enum tiler_fmt gem2fmt(uint32_t flags)
@@ -127,9 +126,9 @@ static inline bool validfmt(enum tiler_fmt fmt)
}
}
-struct omap_dmm_platform_data {
- void __iomem *base;
- int irq;
-};
+static inline int dmm_is_available(void)
+{
+ return cpu_is_omap44xx();
+}
#endif
diff --git a/drivers/staging/omapdrm/omap_drv.c b/drivers/staging/omapdrm/omap_drv.c
index 3bbea9aac404..3df5b4c58ecd 100644
--- a/drivers/staging/omapdrm/omap_drv.c
+++ b/drivers/staging/omapdrm/omap_drv.c
@@ -21,6 +21,7 @@
#include "drm_crtc_helper.h"
#include "drm_fb_helper.h"
+#include "omap_dmm_tiler.h"
#define DRIVER_NAME MODULE_NAME
#define DRIVER_DESC "OMAP DRM"
@@ -570,6 +571,11 @@ static int dev_load(struct drm_device *dev, unsigned long flags)
dev->dev_private = priv;
+ priv->wq = alloc_workqueue("omapdrm",
+ WQ_UNBOUND | WQ_NON_REENTRANT, 1);
+
+ INIT_LIST_HEAD(&priv->obj_list);
+
omap_gem_init(dev);
ret = omap_modeset_init(dev);
@@ -598,6 +604,8 @@ static int dev_load(struct drm_device *dev, unsigned long flags)
static int dev_unload(struct drm_device *dev)
{
+ struct omap_drm_private *priv = dev->dev_private;
+
DBG("unload: dev=%p", dev);
drm_vblank_cleanup(dev);
@@ -607,6 +615,9 @@ static int dev_unload(struct drm_device *dev)
omap_modeset_free(dev);
omap_gem_deinit(dev);
+ flush_workqueue(priv->wq);
+ destroy_workqueue(priv->wq);
+
kfree(dev->dev_private);
dev->dev_private = NULL;
@@ -792,6 +803,9 @@ static void pdev_shutdown(struct platform_device *device)
static int pdev_probe(struct platform_device *device)
{
DBG("%s", device->name);
+ if (platform_driver_register(&omap_dmm_driver))
+ dev_err(&device->dev, "DMM registration failed\n");
+
return drm_platform_init(&omap_drm_driver, device);
}
@@ -799,6 +813,8 @@ static int pdev_remove(struct platform_device *device)
{
DBG("");
drm_platform_exit(&omap_drm_driver, device);
+
+ platform_driver_unregister(&omap_dmm_driver);
return 0;
}
diff --git a/drivers/staging/omapdrm/omap_drv.h b/drivers/staging/omapdrm/omap_drv.h
index 61fe022dda5b..b7e0f0773003 100644
--- a/drivers/staging/omapdrm/omap_drv.h
+++ b/drivers/staging/omapdrm/omap_drv.h
@@ -42,21 +42,31 @@
struct omap_drm_private {
unsigned int num_crtcs;
struct drm_crtc *crtcs[8];
+
unsigned int num_planes;
struct drm_plane *planes[8];
+
unsigned int num_encoders;
struct drm_encoder *encoders[8];
+
unsigned int num_connectors;
struct drm_connector *connectors[8];
struct drm_fb_helper *fbdev;
+ struct workqueue_struct *wq;
+
+ struct list_head obj_list;
+
bool has_dmm;
};
#ifdef CONFIG_DEBUG_FS
int omap_debugfs_init(struct drm_minor *minor);
void omap_debugfs_cleanup(struct drm_minor *minor);
+void omap_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m);
+void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m);
+void omap_gem_describe_objects(struct list_head *list, struct seq_file *m);
#endif
struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev);
@@ -75,6 +85,8 @@ int omap_plane_mode_set(struct drm_plane *plane,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h);
+void omap_plane_on_endwin(struct drm_plane *plane,
+ void (*fxn)(void *), void *arg);
struct drm_encoder *omap_encoder_init(struct drm_device *dev,
struct omap_overlay_manager *mgr);
@@ -92,13 +104,16 @@ void omap_connector_mode_set(struct drm_connector *connector,
void omap_connector_flush(struct drm_connector *connector,
int x, int y, int w, int h);
+uint32_t omap_framebuffer_get_formats(uint32_t *pixel_formats,
+ uint32_t max_formats, enum omap_color_mode supported_modes);
struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
struct drm_file *file, struct drm_mode_fb_cmd2 *mode_cmd);
struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos);
struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p);
-int omap_framebuffer_pin(struct drm_framebuffer *fb);
-void omap_framebuffer_unpin(struct drm_framebuffer *fb);
+int omap_framebuffer_replace(struct drm_framebuffer *a,
+ struct drm_framebuffer *b, void *arg,
+ void (*unpin)(void *arg, struct drm_gem_object *bo));
void omap_framebuffer_update_scanout(struct drm_framebuffer *fb, int x, int y,
struct omap_overlay_info *info);
struct drm_connector *omap_framebuffer_get_next_connector(
diff --git a/drivers/staging/omapdrm/omap_fb.c b/drivers/staging/omapdrm/omap_fb.c
index d021a7ec58df..04b235b6724a 100644
--- a/drivers/staging/omapdrm/omap_fb.c
+++ b/drivers/staging/omapdrm/omap_fb.c
@@ -59,6 +59,20 @@ static const struct format formats[] = {
{ OMAP_DSS_COLOR_UYVY, DRM_FORMAT_UYVY, {{2, 1}}, true },
};
+/* convert from overlay's pixel formats bitmask to an array of fourcc's */
+uint32_t omap_framebuffer_get_formats(uint32_t *pixel_formats,
+ uint32_t max_formats, enum omap_color_mode supported_modes)
+{
+ uint32_t nformats = 0;
+ int i = 0;
+
+ for (i = 0; i < ARRAY_SIZE(formats) && nformats < max_formats; i++)
+ if (formats[i].dss_format & supported_modes)
+ pixel_formats[nformats++] = formats[i].pixel_format;
+
+ return nformats;
+}
+
/* per-plane info for the fb: */
struct plane {
struct drm_gem_object *bo;
@@ -87,7 +101,7 @@ static int omap_framebuffer_create_handle(struct drm_framebuffer *fb,
static void omap_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
- int i, n = drm_format_num_planes(omap_fb->format->pixel_format);
+ int i, n = drm_format_num_planes(fb->pixel_format);
DBG("destroy: FB ID: %d (%p)", fb->base.id, fb);
@@ -123,41 +137,6 @@ static const struct drm_framebuffer_funcs omap_framebuffer_funcs = {
.dirty = omap_framebuffer_dirty,
};
-/* pins buffer in preparation for scanout */
-int omap_framebuffer_pin(struct drm_framebuffer *fb)
-{
- struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
- int ret, i, n = drm_format_num_planes(omap_fb->format->pixel_format);
-
- for (i = 0; i < n; i++) {
- struct plane *plane = &omap_fb->planes[i];
- ret = omap_gem_get_paddr(plane->bo, &plane->paddr, true);
- if (ret)
- goto fail;
- }
-
- return 0;
-
-fail:
- while (--i > 0) {
- struct plane *plane = &omap_fb->planes[i];
- omap_gem_put_paddr(plane->bo);
- }
- return ret;
-}
-
-/* releases buffer when done with scanout */
-void omap_framebuffer_unpin(struct drm_framebuffer *fb)
-{
- struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
- int i, n = drm_format_num_planes(omap_fb->format->pixel_format);
-
- for (i = 0; i < n; i++) {
- struct plane *plane = &omap_fb->planes[i];
- omap_gem_put_paddr(plane->bo);
- }
-}
-
/* update ovl info for scanout, handles cases of multi-planar fb's, etc.
*/
void omap_framebuffer_update_scanout(struct drm_framebuffer *fb, int x, int y,
@@ -187,10 +166,59 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb, int x, int y,
}
}
+/* Call for unpin 'a' (if not NULL), and pin 'b' (if not NULL). Although
+ * buffers to unpin are just just pushed to the unpin fifo so that the
+ * caller can defer unpin until vblank.
+ *
+ * Note if this fails (ie. something went very wrong!), all buffers are
+ * unpinned, and the caller disables the overlay. We could have tried
+ * to revert back to the previous set of pinned buffers but if things are
+ * hosed there is no guarantee that would succeed.
+ */
+int omap_framebuffer_replace(struct drm_framebuffer *a,
+ struct drm_framebuffer *b, void *arg,
+ void (*unpin)(void *arg, struct drm_gem_object *bo))
+{
+ int ret = 0, i, na, nb;
+ struct omap_framebuffer *ofba = to_omap_framebuffer(a);
+ struct omap_framebuffer *ofbb = to_omap_framebuffer(b);
+
+ na = a ? drm_format_num_planes(a->pixel_format) : 0;
+ nb = b ? drm_format_num_planes(b->pixel_format) : 0;
+
+ for (i = 0; i < max(na, nb); i++) {
+ struct plane *pa, *pb;
+
+ pa = (i < na) ? &ofba->planes[i] : NULL;
+ pb = (i < nb) ? &ofbb->planes[i] : NULL;
+
+ if (pa) {
+ unpin(arg, pa->bo);
+ pa->paddr = 0;
+ }
+
+ if (pb && !ret)
+ ret = omap_gem_get_paddr(pb->bo, &pb->paddr, true);
+ }
+
+ if (ret) {
+ /* something went wrong.. unpin what has been pinned */
+ for (i = 0; i < nb; i++) {
+ struct plane *pb = &ofba->planes[i];
+ if (pb->paddr) {
+ unpin(arg, pb->bo);
+ pb->paddr = 0;
+ }
+ }
+ }
+
+ return ret;
+}
+
struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p)
{
struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
- if (p >= drm_format_num_planes(omap_fb->format->pixel_format))
+ if (p >= drm_format_num_planes(fb->pixel_format))
return NULL;
return omap_fb->planes[p].bo;
}
@@ -249,6 +277,24 @@ void omap_framebuffer_flush(struct drm_framebuffer *fb,
}
}
+#ifdef CONFIG_DEBUG_FS
+void omap_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
+{
+ struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
+ int i, n = drm_format_num_planes(fb->pixel_format);
+
+ seq_printf(m, "fb: %dx%d@%4.4s\n", fb->width, fb->height,
+ (char *)&fb->pixel_format);
+
+ for (i = 0; i < n; i++) {
+ struct plane *plane = &omap_fb->planes[i];
+ seq_printf(m, " %d: offset=%d pitch=%d, obj: ",
+ i, plane->offset, plane->pitch);
+ omap_gem_describe(plane->bo, m);
+ }
+}
+#endif
+
struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
struct drm_file *file, struct drm_mode_fb_cmd2 *mode_cmd)
{
@@ -337,8 +383,8 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
plane->bo = bos[i];
plane->offset = mode_cmd->offsets[i];
- plane->pitch = mode_cmd->pitches[i];
- plane->paddr = pitch;
+ plane->pitch = pitch;
+ plane->paddr = 0;
}
drm_helper_mode_fill_fb_struct(fb, mode_cmd);
diff --git a/drivers/staging/omapdrm/omap_fbdev.c b/drivers/staging/omapdrm/omap_fbdev.c
index 96940bbfc6f4..11acd4c35ed2 100644
--- a/drivers/staging/omapdrm/omap_fbdev.c
+++ b/drivers/staging/omapdrm/omap_fbdev.c
@@ -37,6 +37,9 @@ struct omap_fbdev {
struct drm_framebuffer *fb;
struct drm_gem_object *bo;
bool ywrap_enabled;
+
+ /* for deferred dmm roll when getting called in atomic ctx */
+ struct work_struct work;
};
static void omap_fbdev_flush(struct fb_info *fbi, int x, int y, int w, int h);
@@ -75,12 +78,22 @@ static void omap_fbdev_imageblit(struct fb_info *fbi,
image->width, image->height);
}
+static void pan_worker(struct work_struct *work)
+{
+ struct omap_fbdev *fbdev = container_of(work, struct omap_fbdev, work);
+ struct fb_info *fbi = fbdev->base.fbdev;
+ int npages;
+
+ /* DMM roll shifts in 4K pages: */
+ npages = fbi->fix.line_length >> PAGE_SHIFT;
+ omap_gem_roll(fbdev->bo, fbi->var.yoffset * npages);
+}
+
static int omap_fbdev_pan_display(struct fb_var_screeninfo *var,
struct fb_info *fbi)
{
struct drm_fb_helper *helper = get_fb(fbi);
struct omap_fbdev *fbdev = to_omap_fbdev(helper);
- int npages;
if (!helper)
goto fallback;
@@ -88,9 +101,12 @@ static int omap_fbdev_pan_display(struct fb_var_screeninfo *var,
if (!fbdev->ywrap_enabled)
goto fallback;
- /* DMM roll shifts in 4K pages: */
- npages = fbi->fix.line_length >> PAGE_SHIFT;
- omap_gem_roll(fbdev->bo, var->yoffset * npages);
+ if (drm_can_sleep()) {
+ pan_worker(&fbdev->work);
+ } else {
+ struct omap_drm_private *priv = helper->dev->dev_private;
+ queue_work(priv->wq, &fbdev->work);
+ }
return 0;
@@ -336,6 +352,8 @@ struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev)
goto fail;
}
+ INIT_WORK(&fbdev->work, pan_worker);
+
helper = &fbdev->base;
helper->funcs = &omap_fb_helper_funcs;
diff --git a/drivers/staging/omapdrm/omap_gem.c b/drivers/staging/omapdrm/omap_gem.c
index b7d6f886c5cf..921f058cc6a4 100644
--- a/drivers/staging/omapdrm/omap_gem.c
+++ b/drivers/staging/omapdrm/omap_gem.c
@@ -45,6 +45,8 @@ int _drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size);
struct omap_gem_object {
struct drm_gem_object base;
+ struct list_head mm_list;
+
uint32_t flags;
/** width/height for tiled formats (rounded up to slot boundaries) */
@@ -151,10 +153,23 @@ static void evict_entry(struct drm_gem_object *obj,
enum tiler_fmt fmt, struct usergart_entry *entry)
{
if (obj->dev->dev_mapping) {
- size_t size = PAGE_SIZE * usergart[fmt].height;
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ int n = usergart[fmt].height;
+ size_t size = PAGE_SIZE * n;
loff_t off = mmap_offset(obj) +
(entry->obj_pgoff << PAGE_SHIFT);
- unmap_mapping_range(obj->dev->dev_mapping, off, size, 1);
+ const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
+ if (m > 1) {
+ int i;
+ /* if stride > than PAGE_SIZE then sparse mapping: */
+ for (i = n; i > 0; i--) {
+ unmap_mapping_range(obj->dev->dev_mapping,
+ off, PAGE_SIZE, 1);
+ off += PAGE_SIZE * m;
+ }
+ } else {
+ unmap_mapping_range(obj->dev->dev_mapping, off, size, 1);
+ }
}
entry->obj = NULL;
@@ -254,13 +269,17 @@ static void omap_gem_detach_pages(struct drm_gem_object *obj)
/** get mmap offset */
static uint64_t mmap_offset(struct drm_gem_object *obj)
{
+ struct drm_device *dev = obj->dev;
+
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
if (!obj->map_list.map) {
/* Make it mmapable */
size_t size = omap_gem_mmap_size(obj);
int ret = _drm_gem_create_mmap_offset_size(obj, size);
if (ret) {
- dev_err(obj->dev->dev, "could not allocate mmap offset");
+ dev_err(dev->dev, "could not allocate mmap offset\n");
return 0;
}
}
@@ -336,26 +355,39 @@ static int fault_2d(struct drm_gem_object *obj,
void __user *vaddr;
int i, ret, slots;
- if (!usergart)
- return -EFAULT;
-
- /* TODO: this fxn might need a bit tweaking to deal w/ tiled buffers
- * that are wider than 4kb
+ /*
+ * Note the height of the slot is also equal to the number of pages
+ * that need to be mapped in to fill 4kb wide CPU page. If the slot
+ * height is 64, then 64 pages fill a 4kb wide by 64 row region.
+ */
+ const int n = usergart[fmt].height;
+ const int n_shift = usergart[fmt].height_shift;
+
+ /*
+ * If buffer width in bytes > PAGE_SIZE then the virtual stride is
+ * rounded up to next multiple of PAGE_SIZE.. this need to be taken
+ * into account in some of the math, so figure out virtual stride
+ * in pages
*/
+ const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
/* We don't use vmf->pgoff since that has the fake offset: */
pgoff = ((unsigned long)vmf->virtual_address -
vma->vm_start) >> PAGE_SHIFT;
- /* actual address we start mapping at is rounded down to previous slot
+ /*
+ * Actual address we start mapping at is rounded down to previous slot
* boundary in the y direction:
*/
- base_pgoff = round_down(pgoff, usergart[fmt].height);
- vaddr = vmf->virtual_address - ((pgoff - base_pgoff) << PAGE_SHIFT);
- entry = &usergart[fmt].entry[usergart[fmt].last];
+ base_pgoff = round_down(pgoff, m << n_shift);
+ /* figure out buffer width in slots */
slots = omap_obj->width >> usergart[fmt].slot_shift;
+ vaddr = vmf->virtual_address - ((pgoff - base_pgoff) << PAGE_SHIFT);
+
+ entry = &usergart[fmt].entry[usergart[fmt].last];
+
/* evict previous buffer using this usergart entry, if any: */
if (entry->obj)
evict_entry(entry->obj, fmt, entry);
@@ -363,23 +395,30 @@ static int fault_2d(struct drm_gem_object *obj,
entry->obj = obj;
entry->obj_pgoff = base_pgoff;
- /* now convert base_pgoff to phys offset from virt offset:
- */
- base_pgoff = (base_pgoff >> usergart[fmt].height_shift) * slots;
-
- /* map in pages. Note the height of the slot is also equal to the
- * number of pages that need to be mapped in to fill 4kb wide CPU page.
- * If the height is 64, then 64 pages fill a 4kb wide by 64 row region.
- * Beyond the valid pixel part of the buffer, we set pages[i] to NULL to
- * get a dummy page mapped in.. if someone reads/writes it they will get
- * random/undefined content, but at least it won't be corrupting
- * whatever other random page used to be mapped in, or other undefined
- * behavior.
+ /* now convert base_pgoff to phys offset from virt offset: */
+ base_pgoff = (base_pgoff >> n_shift) * slots;
+
+ /* for wider-than 4k.. figure out which part of the slot-row we want: */
+ if (m > 1) {
+ int off = pgoff % m;
+ entry->obj_pgoff += off;
+ base_pgoff /= m;
+ slots = min(slots - (off << n_shift), n);
+ base_pgoff += off << n_shift;
+ vaddr += off << PAGE_SHIFT;
+ }
+
+ /*
+ * Map in pages. Beyond the valid pixel part of the buffer, we set
+ * pages[i] to NULL to get a dummy page mapped in.. if someone
+ * reads/writes it they will get random/undefined content, but at
+ * least it won't be corrupting whatever other random page used to
+ * be mapped in, or other undefined behavior.
*/
memcpy(pages, &omap_obj->pages[base_pgoff],
sizeof(struct page *) * slots);
memset(pages + slots, 0,
- sizeof(struct page *) * (usergart[fmt].height - slots));
+ sizeof(struct page *) * (n - slots));
ret = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true);
if (ret) {
@@ -387,16 +426,15 @@ static int fault_2d(struct drm_gem_object *obj,
return ret;
}
- i = usergart[fmt].height;
pfn = entry->paddr >> PAGE_SHIFT;
VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
pfn, pfn << PAGE_SHIFT);
- while (i--) {
+ for (i = n; i > 0; i--) {
vm_insert_mixed(vma, (unsigned long)vaddr, pfn);
pfn += usergart[fmt].stride_pfn;
- vaddr += PAGE_SIZE;
+ vaddr += PAGE_SIZE * m;
}
/* simple round-robin: */
@@ -566,6 +604,8 @@ fail:
/* Set scrolling position. This allows us to implement fast scrolling
* for console.
+ *
+ * Call only from non-atomic contexts.
*/
int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll)
{
@@ -580,18 +620,6 @@ int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll)
omap_obj->roll = roll;
- if (in_atomic() || mutex_is_locked(&obj->dev->struct_mutex)) {
- /* this can get called from fbcon in atomic context.. so
- * just ignore it and wait for next time called from
- * interruptible context to update the PAT.. the result
- * may be that user sees wrap-around instead of scrolling
- * momentarily on the screen. If we wanted to be fancier
- * we could perhaps schedule some workqueue work at this
- * point.
- */
- return 0;
- }
-
mutex_lock(&obj->dev->struct_mutex);
/* if we aren't mapped yet, we don't need to do anything */
@@ -774,6 +802,56 @@ void *omap_gem_vaddr(struct drm_gem_object *obj)
return omap_obj->vaddr;
}
+#ifdef CONFIG_DEBUG_FS
+void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
+{
+ struct drm_device *dev = obj->dev;
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ uint64_t off = 0;
+
+ WARN_ON(! mutex_is_locked(&dev->struct_mutex));
+
+ if (obj->map_list.map)
+ off = (uint64_t)obj->map_list.hash.key;
+
+ seq_printf(m, "%08x: %2d (%2d) %08llx %08Zx (%2d) %p %4d",
+ omap_obj->flags, obj->name, obj->refcount.refcount.counter,
+ off, omap_obj->paddr, omap_obj->paddr_cnt,
+ omap_obj->vaddr, omap_obj->roll);
+
+ if (omap_obj->flags & OMAP_BO_TILED) {
+ seq_printf(m, " %dx%d", omap_obj->width, omap_obj->height);
+ if (omap_obj->block) {
+ struct tcm_area *area = &omap_obj->block->area;
+ seq_printf(m, " (%dx%d, %dx%d)",
+ area->p0.x, area->p0.y,
+ area->p1.x, area->p1.y);
+ }
+ } else {
+ seq_printf(m, " %d", obj->size);
+ }
+
+ seq_printf(m, "\n");
+}
+
+void omap_gem_describe_objects(struct list_head *list, struct seq_file *m)
+{
+ struct omap_gem_object *omap_obj;
+ int count = 0;
+ size_t size = 0;
+
+ list_for_each_entry(omap_obj, list, mm_list) {
+ struct drm_gem_object *obj = &omap_obj->base;
+ seq_printf(m, " ");
+ omap_gem_describe(obj, m);
+ count++;
+ size += obj->size;
+ }
+
+ seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
+}
+#endif
+
/* Buffer Synchronization:
*/
@@ -1040,6 +1118,10 @@ void omap_gem_free_object(struct drm_gem_object *obj)
evict(obj);
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+ list_del(&omap_obj->mm_list);
+
if (obj->map_list.map) {
drm_gem_free_mmap_offset(obj);
}
@@ -1140,6 +1222,8 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
goto fail;
}
+ list_add(&omap_obj->mm_list, &priv->obj_list);
+
obj = &omap_obj->base;
if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) {
@@ -1186,12 +1270,11 @@ void omap_gem_init(struct drm_device *dev)
const enum tiler_fmt fmts[] = {
TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT
};
- int i, j, ret;
+ int i, j;
- ret = omap_dmm_init(dev);
- if (ret) {
+ if (!dmm_is_initialized()) {
/* DMM only supported on OMAP4 and later, so this isn't fatal */
- dev_warn(dev->dev, "omap_dmm_init failed, disabling DMM\n");
+ dev_warn(dev->dev, "DMM not available, disable DMM support\n");
return;
}
@@ -1241,6 +1324,5 @@ void omap_gem_deinit(struct drm_device *dev)
/* I believe we can rely on there being no more outstanding GEM
* objects which could depend on usergart/dmm at this point.
*/
- omap_dmm_remove();
kfree(usergart);
}
diff --git a/drivers/staging/omapdrm/omap_gem_helpers.c b/drivers/staging/omapdrm/omap_gem_helpers.c
index 29275c7209e9..f895363a5e54 100644
--- a/drivers/staging/omapdrm/omap_gem_helpers.c
+++ b/drivers/staging/omapdrm/omap_gem_helpers.c
@@ -84,7 +84,7 @@ fail:
page_cache_release(pages[i]);
}
drm_free_large(pages);
- return ERR_PTR(PTR_ERR(p));
+ return ERR_CAST(p);
}
/**
diff --git a/drivers/staging/omapdrm/omap_plane.c b/drivers/staging/omapdrm/omap_plane.c
index 97909124a1fe..7997be74010d 100644
--- a/drivers/staging/omapdrm/omap_plane.c
+++ b/drivers/staging/omapdrm/omap_plane.c
@@ -17,6 +17,8 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/kfifo.h>
+
#include "omap_drv.h"
/* some hackery because omapdss has an 'enum omap_plane' (which would be
@@ -29,6 +31,11 @@
* plane funcs
*/
+struct callback {
+ void (*fxn)(void *);
+ void *arg;
+};
+
#define to_omap_plane(x) container_of(x, struct omap_plane, base)
struct omap_plane {
@@ -43,8 +50,84 @@ struct omap_plane {
/* last fb that we pinned: */
struct drm_framebuffer *pinned_fb;
+
+ uint32_t nformats;
+ uint32_t formats[32];
+
+ /* for synchronizing access to unpins fifo */
+ struct mutex unpin_mutex;
+
+ /* set of bo's pending unpin until next END_WIN irq */
+ DECLARE_KFIFO_PTR(unpin_fifo, struct drm_gem_object *);
+ int num_unpins, pending_num_unpins;
+
+ /* for deferred unpin when we need to wait for scanout complete irq */
+ struct work_struct work;
+
+ /* callback on next endwin irq */
+ struct callback endwin;
};
+/* map from ovl->id to the irq we are interested in for scanout-done */
+static const uint32_t id2irq[] = {
+ [OMAP_DSS_GFX] = DISPC_IRQ_GFX_END_WIN,
+ [OMAP_DSS_VIDEO1] = DISPC_IRQ_VID1_END_WIN,
+ [OMAP_DSS_VIDEO2] = DISPC_IRQ_VID2_END_WIN,
+ [OMAP_DSS_VIDEO3] = DISPC_IRQ_VID3_END_WIN,
+};
+
+static void dispc_isr(void *arg, uint32_t mask)
+{
+ struct drm_plane *plane = arg;
+ struct omap_plane *omap_plane = to_omap_plane(plane);
+ struct omap_drm_private *priv = plane->dev->dev_private;
+
+ omap_dispc_unregister_isr(dispc_isr, plane,
+ id2irq[omap_plane->ovl->id]);
+
+ queue_work(priv->wq, &omap_plane->work);
+}
+
+static void unpin_worker(struct work_struct *work)
+{
+ struct omap_plane *omap_plane =
+ container_of(work, struct omap_plane, work);
+ struct callback endwin;
+
+ mutex_lock(&omap_plane->unpin_mutex);
+ DBG("unpinning %d of %d", omap_plane->num_unpins,
+ omap_plane->num_unpins + omap_plane->pending_num_unpins);
+ while (omap_plane->num_unpins > 0) {
+ struct drm_gem_object *bo = NULL;
+ int ret = kfifo_get(&omap_plane->unpin_fifo, &bo);
+ WARN_ON(!ret);
+ omap_gem_put_paddr(bo);
+ drm_gem_object_unreference_unlocked(bo);
+ omap_plane->num_unpins--;
+ }
+ endwin = omap_plane->endwin;
+ omap_plane->endwin.fxn = NULL;
+ mutex_unlock(&omap_plane->unpin_mutex);
+
+ if (endwin.fxn)
+ endwin.fxn(endwin.arg);
+}
+
+static void install_irq(struct drm_plane *plane)
+{
+ struct omap_plane *omap_plane = to_omap_plane(plane);
+ struct omap_overlay *ovl = omap_plane->ovl;
+ int ret;
+
+ ret = omap_dispc_register_isr(dispc_isr, plane, id2irq[ovl->id]);
+
+ /*
+ * omapdss has upper limit on # of registered irq handlers,
+ * which we shouldn't hit.. but if we do the limit should
+ * be raised or bad things happen:
+ */
+ WARN_ON(ret == -EBUSY);
+}
/* push changes down to dss2 */
static int commit(struct drm_plane *plane)
@@ -71,6 +154,11 @@ static int commit(struct drm_plane *plane)
return ret;
}
+ mutex_lock(&omap_plane->unpin_mutex);
+ omap_plane->num_unpins += omap_plane->pending_num_unpins;
+ omap_plane->pending_num_unpins = 0;
+ mutex_unlock(&omap_plane->unpin_mutex);
+
/* our encoder doesn't necessarily get a commit() after this, in
* particular in the dpms() and mode_set_base() cases, so force the
* manager to update:
@@ -83,8 +171,20 @@ static int commit(struct drm_plane *plane)
dev_err(dev->dev, "could not apply settings\n");
return ret;
}
+
+ /*
+ * NOTE: really this should be atomic w/ mgr->apply() but
+ * omapdss does not expose such an API
+ */
+ if (omap_plane->num_unpins > 0)
+ install_irq(plane);
+
+ } else {
+ struct omap_drm_private *priv = dev->dev_private;
+ queue_work(priv->wq, &omap_plane->work);
}
+
if (ovl->is_enabled(ovl)) {
omap_framebuffer_flush(plane->fb, info->pos_x, info->pos_y,
info->out_width, info->out_height);
@@ -137,21 +237,48 @@ static void update_manager(struct drm_plane *plane)
}
}
+static void unpin(void *arg, struct drm_gem_object *bo)
+{
+ struct drm_plane *plane = arg;
+ struct omap_plane *omap_plane = to_omap_plane(plane);
+
+ if (kfifo_put(&omap_plane->unpin_fifo,
+ (const struct drm_gem_object **)&bo)) {
+ omap_plane->pending_num_unpins++;
+ /* also hold a ref so it isn't free'd while pinned */
+ drm_gem_object_reference(bo);
+ } else {
+ dev_err(plane->dev->dev, "unpin fifo full!\n");
+ omap_gem_put_paddr(bo);
+ }
+}
+
/* update which fb (if any) is pinned for scanout */
static int update_pin(struct drm_plane *plane, struct drm_framebuffer *fb)
{
struct omap_plane *omap_plane = to_omap_plane(plane);
- int ret = 0;
+ struct drm_framebuffer *pinned_fb = omap_plane->pinned_fb;
+
+ if (pinned_fb != fb) {
+ int ret;
+
+ DBG("%p -> %p", pinned_fb, fb);
+
+ mutex_lock(&omap_plane->unpin_mutex);
+ ret = omap_framebuffer_replace(pinned_fb, fb, plane, unpin);
+ mutex_unlock(&omap_plane->unpin_mutex);
+
+ if (ret) {
+ dev_err(plane->dev->dev, "could not swap %p -> %p\n",
+ omap_plane->pinned_fb, fb);
+ omap_plane->pinned_fb = NULL;
+ return ret;
+ }
- if (omap_plane->pinned_fb != fb) {
- if (omap_plane->pinned_fb)
- omap_framebuffer_unpin(omap_plane->pinned_fb);
omap_plane->pinned_fb = fb;
- if (fb)
- ret = omap_framebuffer_pin(fb);
}
- return ret;
+ return 0;
}
/* update parameters that are dependent on the framebuffer dimensions and
@@ -241,6 +368,8 @@ static void omap_plane_destroy(struct drm_plane *plane)
DBG("%s", omap_plane->ovl->name);
omap_plane_disable(plane);
drm_plane_cleanup(plane);
+ WARN_ON(omap_plane->pending_num_unpins + omap_plane->num_unpins > 0);
+ kfifo_free(&omap_plane->unpin_fifo);
kfree(omap_plane);
}
@@ -258,37 +387,34 @@ int omap_plane_dpms(struct drm_plane *plane, int mode)
if (!r)
r = ovl->enable(ovl);
} else {
+ struct omap_drm_private *priv = plane->dev->dev_private;
r = ovl->disable(ovl);
update_pin(plane, NULL);
+ queue_work(priv->wq, &omap_plane->work);
}
return r;
}
+void omap_plane_on_endwin(struct drm_plane *plane,
+ void (*fxn)(void *), void *arg)
+{
+ struct omap_plane *omap_plane = to_omap_plane(plane);
+
+ mutex_lock(&omap_plane->unpin_mutex);
+ omap_plane->endwin.fxn = fxn;
+ omap_plane->endwin.arg = arg;
+ mutex_unlock(&omap_plane->unpin_mutex);
+
+ install_irq(plane);
+}
+
static const struct drm_plane_funcs omap_plane_funcs = {
.update_plane = omap_plane_update,
.disable_plane = omap_plane_disable,
.destroy = omap_plane_destroy,
};
-static const uint32_t formats[] = {
- DRM_FORMAT_RGB565,
- DRM_FORMAT_RGBX4444,
- DRM_FORMAT_XRGB4444,
- DRM_FORMAT_RGBA4444,
- DRM_FORMAT_ABGR4444,
- DRM_FORMAT_XRGB1555,
- DRM_FORMAT_ARGB1555,
- DRM_FORMAT_RGB888,
- DRM_FORMAT_RGBX8888,
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_RGBA8888,
- DRM_FORMAT_ARGB8888,
- DRM_FORMAT_NV12,
- DRM_FORMAT_YUYV,
- DRM_FORMAT_UYVY,
-};
-
/* initialize plane */
struct drm_plane *omap_plane_init(struct drm_device *dev,
struct omap_overlay *ovl, unsigned int possible_crtcs,
@@ -296,21 +422,38 @@ struct drm_plane *omap_plane_init(struct drm_device *dev,
{
struct drm_plane *plane = NULL;
struct omap_plane *omap_plane;
+ int ret;
DBG("%s: possible_crtcs=%08x, priv=%d", ovl->name,
possible_crtcs, priv);
+ /* friendly reminder to update table for future hw: */
+ WARN_ON(ovl->id >= ARRAY_SIZE(id2irq));
+
omap_plane = kzalloc(sizeof(*omap_plane), GFP_KERNEL);
if (!omap_plane) {
dev_err(dev->dev, "could not allocate plane\n");
goto fail;
}
+ mutex_init(&omap_plane->unpin_mutex);
+
+ ret = kfifo_alloc(&omap_plane->unpin_fifo, 16, GFP_KERNEL);
+ if (ret) {
+ dev_err(dev->dev, "could not allocate unpin FIFO\n");
+ goto fail;
+ }
+
+ INIT_WORK(&omap_plane->work, unpin_worker);
+
+ omap_plane->nformats = omap_framebuffer_get_formats(
+ omap_plane->formats, ARRAY_SIZE(omap_plane->formats),
+ ovl->supported_modes);
omap_plane->ovl = ovl;
plane = &omap_plane->base;
drm_plane_init(dev, plane, possible_crtcs, &omap_plane_funcs,
- formats, ARRAY_SIZE(formats), priv);
+ omap_plane->formats, omap_plane->nformats, priv);
/* get our starting configuration, set defaults for parameters
* we don't currently use, etc:
@@ -330,7 +473,7 @@ struct drm_plane *omap_plane_init(struct drm_device *dev,
if (priv)
omap_plane->info.zorder = 0;
else
- omap_plane->info.zorder = 1;
+ omap_plane->info.zorder = ovl->id;
update_manager(plane);
diff --git a/drivers/staging/ozwpan/Kbuild b/drivers/staging/ozwpan/Kbuild
new file mode 100644
index 000000000000..6cc84cb3f0a6
--- /dev/null
+++ b/drivers/staging/ozwpan/Kbuild
@@ -0,0 +1,19 @@
+# -----------------------------------------------------------------------------
+# Copyright (c) 2011 Ozmo Inc
+# Released under the GNU General Public License Version 2 (GPLv2).
+# -----------------------------------------------------------------------------
+obj-$(CONFIG_USB_WPAN_HCD) += ozwpan.o
+ozwpan-y := \
+ ozmain.o \
+ ozpd.o \
+ ozusbsvc.o \
+ ozusbsvc1.o \
+ ozhcd.o \
+ ozeltbuf.o \
+ ozproto.o \
+ ozcdev.o \
+ ozurbparanoia.o \
+ oztrace.o \
+ ozevent.o
+
+
diff --git a/drivers/staging/ozwpan/Kconfig b/drivers/staging/ozwpan/Kconfig
new file mode 100644
index 000000000000..7904caec546a
--- /dev/null
+++ b/drivers/staging/ozwpan/Kconfig
@@ -0,0 +1,9 @@
+config USB_WPAN_HCD
+ tristate "USB over WiFi Host Controller"
+ depends on USB && NET
+ help
+ A driver for USB Host Controllers that are compatible with
+ Ozmo Devices USB over WiFi technology.
+
+ To compile this driver a module, choose M here: the module
+ will be called "ozwpan".
diff --git a/drivers/staging/ozwpan/README b/drivers/staging/ozwpan/README
new file mode 100644
index 000000000000..bb1a69b94541
--- /dev/null
+++ b/drivers/staging/ozwpan/README
@@ -0,0 +1,25 @@
+OZWPAN USB Host Controller Driver
+---------------------------------
+This driver is a USB HCD driver that does not have an associated a physical
+device but instead uses Wi-Fi to communicate with the wireless peripheral.
+The USB requests are converted into a layer 2 network protocol and transmitted
+on the network using an ethertype (0x892e) regestered to Ozmo Device Inc.
+This driver is compatible with existing wireless devices that use Ozmo Devices
+technology.
+
+To operate the driver must be bound to a suitable network interface. This can
+be done when the module is loaded (specifying the name of the network interface
+as a paramter - e.g. 'insmod ozwpan g_net_dev=go0') or can be bound after
+loading using an ioctl call. See the ozappif.h file and the ioctls
+OZ_IOCTL_ADD_BINDING and OZ_IOCTL_REMOVE_BINDING.
+
+The devices connect to the host use Wi-Fi Direct so a network card that supports
+Wi-Fi direct is required. A recent version (0.8.x or later) version of the
+wpa_supplicant can be used to setup the network interface to create a persistent
+autonomous group (for older pre-WFD peripherals) or put in a listen state to
+allow group negotiation to occur for more recent devices that support WFD.
+
+The protocol used over the network does not directly mimic the USB bus
+transactions as this would be rather busy and inefficient. Instead the chapter 9
+requests are converted into a request/response pair of messages. (See
+ozprotocol.h for data structures used in the protocol).
diff --git a/drivers/staging/ozwpan/TODO b/drivers/staging/ozwpan/TODO
new file mode 100644
index 000000000000..f7a9c122f596
--- /dev/null
+++ b/drivers/staging/ozwpan/TODO
@@ -0,0 +1,12 @@
+TODO:
+ - review user mode interface and determine if ioctls can be replaced
+ with something better. correctly export data structures to user mode
+ if ioctls are still required and allocate ioctl numbers from
+ ioctl-number.txt.
+ - check USB HCD implementation is complete and correct.
+ - remove any debug and trace code.
+ - code review by USB developer community.
+ - testing with as many devices as possible.
+
+Please send any patches for this driver to Chris Kelly <ckelly@ozmodevices.com>
+and Greg Kroah-Hartman <gregkh@linuxfoundation.org>.
diff --git a/drivers/staging/ozwpan/ozappif.h b/drivers/staging/ozwpan/ozappif.h
new file mode 100644
index 000000000000..af0273293872
--- /dev/null
+++ b/drivers/staging/ozwpan/ozappif.h
@@ -0,0 +1,46 @@
+/* -----------------------------------------------------------------------------
+ * Copyright (c) 2011 Ozmo Inc
+ * Released under the GNU General Public License Version 2 (GPLv2).
+ * -----------------------------------------------------------------------------
+ */
+#ifndef _OZAPPIF_H
+#define _OZAPPIF_H
+
+#include "ozeventdef.h"
+
+#define OZ_IOCTL_MAGIC 0xf4
+
+struct oz_mac_addr {
+ unsigned char a[6];
+};
+
+#define OZ_MAX_PDS 8
+
+struct oz_pd_list {
+ int count;
+ struct oz_mac_addr addr[OZ_MAX_PDS];
+};
+
+#define OZ_MAX_BINDING_LEN 32
+
+struct oz_binding_info {
+ char name[OZ_MAX_BINDING_LEN];
+};
+
+struct oz_test {
+ int action;
+};
+
+#define OZ_IOCTL_GET_PD_LIST _IOR(OZ_IOCTL_MAGIC, 0, struct oz_pd_list)
+#define OZ_IOCTL_SET_ACTIVE_PD _IOW(OZ_IOCTL_MAGIC, 1, struct oz_mac_addr)
+#define OZ_IOCTL_GET_ACTIVE_PD _IOR(OZ_IOCTL_MAGIC, 2, struct oz_mac_addr)
+#define OZ_IOCTL_CLEAR_EVENTS _IO(OZ_IOCTL_MAGIC, 3)
+#define OZ_IOCTL_GET_EVENTS _IOR(OZ_IOCTL_MAGIC, 4, struct oz_evtlist)
+#define OZ_IOCTL_ADD_BINDING _IOW(OZ_IOCTL_MAGIC, 5, struct oz_binding_info)
+#define OZ_IOCTL_TEST _IOWR(OZ_IOCTL_MAGIC, 6, struct oz_test)
+#define OZ_IOCTL_SET_EVENT_MASK _IOW(OZ_IOCTL_MAGIC, 7, unsigned long)
+#define OZ_IOCTL_REMOVE_BINDING _IOW(OZ_IOCTL_MAGIC, 8, struct oz_binding_info)
+#define OZ_IOCTL_MAX 9
+
+
+#endif /* _OZAPPIF_H */
diff --git a/drivers/staging/ozwpan/ozcdev.c b/drivers/staging/ozwpan/ozcdev.c
new file mode 100644
index 000000000000..1c380d687963
--- /dev/null
+++ b/drivers/staging/ozwpan/ozcdev.c
@@ -0,0 +1,521 @@
+/* -----------------------------------------------------------------------------
+ * Copyright (c) 2011 Ozmo Inc
+ * Released under the GNU General Public License Version 2 (GPLv2).
+ * -----------------------------------------------------------------------------
+ */
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/uaccess.h>
+#include <linux/netdevice.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include "ozconfig.h"
+#include "ozprotocol.h"
+#include "oztrace.h"
+#include "ozappif.h"
+#include "ozeltbuf.h"
+#include "ozpd.h"
+#include "ozproto.h"
+#include "ozevent.h"
+/*------------------------------------------------------------------------------
+ */
+#define OZ_RD_BUF_SZ 256
+struct oz_cdev {
+ dev_t devnum;
+ struct cdev cdev;
+ wait_queue_head_t rdq;
+ spinlock_t lock;
+ u8 active_addr[ETH_ALEN];
+ struct oz_pd *active_pd;
+};
+
+/* Per PD context for the serial service stored in the PD. */
+struct oz_serial_ctx {
+ atomic_t ref_count;
+ u8 tx_seq_num;
+ u8 rx_seq_num;
+ u8 rd_buf[OZ_RD_BUF_SZ];
+ int rd_in;
+ int rd_out;
+};
+/*------------------------------------------------------------------------------
+ */
+int g_taction;
+/*------------------------------------------------------------------------------
+ */
+static struct oz_cdev g_cdev;
+/*------------------------------------------------------------------------------
+ * Context: process and softirq
+ */
+static struct oz_serial_ctx *oz_cdev_claim_ctx(struct oz_pd *pd)
+{
+ struct oz_serial_ctx *ctx;
+ spin_lock_bh(&pd->app_lock[OZ_APPID_SERIAL-1]);
+ ctx = (struct oz_serial_ctx *)pd->app_ctx[OZ_APPID_SERIAL-1];
+ if (ctx)
+ atomic_inc(&ctx->ref_count);
+ spin_unlock_bh(&pd->app_lock[OZ_APPID_SERIAL-1]);
+ return ctx;
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq or process
+ */
+static void oz_cdev_release_ctx(struct oz_serial_ctx *ctx)
+{
+ if (atomic_dec_and_test(&ctx->ref_count)) {
+ oz_trace("Dealloc serial context.\n");
+ kfree(ctx);
+ }
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+int oz_cdev_open(struct inode *inode, struct file *filp)
+{
+ struct oz_cdev *dev;
+ oz_trace("oz_cdev_open()\n");
+ oz_trace("major = %d minor = %d\n", imajor(inode), iminor(inode));
+ dev = container_of(inode->i_cdev, struct oz_cdev, cdev);
+ filp->private_data = dev;
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+int oz_cdev_release(struct inode *inode, struct file *filp)
+{
+ oz_trace("oz_cdev_release()\n");
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+ssize_t oz_cdev_read(struct file *filp, char __user *buf, size_t count,
+ loff_t *fpos)
+{
+ int n;
+ int ix;
+
+ struct oz_pd *pd;
+ struct oz_serial_ctx *ctx = 0;
+
+ spin_lock_bh(&g_cdev.lock);
+ pd = g_cdev.active_pd;
+ if (pd)
+ oz_pd_get(pd);
+ spin_unlock_bh(&g_cdev.lock);
+ if (pd == 0)
+ return -1;
+ ctx = oz_cdev_claim_ctx(pd);
+ if (ctx == 0)
+ goto out2;
+ n = ctx->rd_in - ctx->rd_out;
+ if (n < 0)
+ n += OZ_RD_BUF_SZ;
+ if (count > n)
+ count = n;
+ ix = ctx->rd_out;
+ n = OZ_RD_BUF_SZ - ix;
+ if (n > count)
+ n = count;
+ if (copy_to_user(buf, &ctx->rd_buf[ix], n)) {
+ count = 0;
+ goto out1;
+ }
+ ix += n;
+ if (ix == OZ_RD_BUF_SZ)
+ ix = 0;
+ if (n < count) {
+ if (copy_to_user(&buf[n], ctx->rd_buf, count-n)) {
+ count = 0;
+ goto out1;
+ }
+ ix = count-n;
+ }
+ ctx->rd_out = ix;
+out1:
+ oz_cdev_release_ctx(ctx);
+out2:
+ oz_pd_put(pd);
+ return count;
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+ssize_t oz_cdev_write(struct file *filp, const char __user *buf, size_t count,
+ loff_t *fpos)
+{
+ struct oz_pd *pd;
+ struct oz_elt_buf *eb;
+ struct oz_elt_info *ei = 0;
+ struct oz_elt *elt;
+ struct oz_app_hdr *app_hdr;
+ struct oz_serial_ctx *ctx;
+
+ spin_lock_bh(&g_cdev.lock);
+ pd = g_cdev.active_pd;
+ if (pd)
+ oz_pd_get(pd);
+ spin_unlock_bh(&g_cdev.lock);
+ if (pd == 0)
+ return -1;
+ eb = &pd->elt_buff;
+ ei = oz_elt_info_alloc(eb);
+ if (ei == 0) {
+ count = 0;
+ goto out;
+ }
+ elt = (struct oz_elt *)ei->data;
+ app_hdr = (struct oz_app_hdr *)(elt+1);
+ elt->length = sizeof(struct oz_app_hdr) + count;
+ elt->type = OZ_ELT_APP_DATA;
+ ei->app_id = OZ_APPID_SERIAL;
+ ei->length = elt->length + sizeof(struct oz_elt);
+ app_hdr->app_id = OZ_APPID_SERIAL;
+ if (copy_from_user(app_hdr+1, buf, count))
+ goto out;
+ spin_lock_bh(&pd->app_lock[OZ_APPID_USB-1]);
+ ctx = (struct oz_serial_ctx *)pd->app_ctx[OZ_APPID_SERIAL-1];
+ if (ctx) {
+ app_hdr->elt_seq_num = ctx->tx_seq_num++;
+ if (ctx->tx_seq_num == 0)
+ ctx->tx_seq_num = 1;
+ spin_lock(&eb->lock);
+ if (oz_queue_elt_info(eb, 0, 0, ei) == 0)
+ ei = 0;
+ spin_unlock(&eb->lock);
+ }
+ spin_unlock_bh(&pd->app_lock[OZ_APPID_USB-1]);
+out:
+ if (ei) {
+ count = 0;
+ spin_lock_bh(&eb->lock);
+ oz_elt_info_free(eb, ei);
+ spin_unlock_bh(&eb->lock);
+ }
+ oz_pd_put(pd);
+ return count;
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+static int oz_set_active_pd(u8 *addr)
+{
+ int rc = 0;
+ struct oz_pd *pd;
+ struct oz_pd *old_pd;
+ pd = oz_pd_find(addr);
+ if (pd) {
+ spin_lock_bh(&g_cdev.lock);
+ memcpy(g_cdev.active_addr, addr, ETH_ALEN);
+ old_pd = g_cdev.active_pd;
+ g_cdev.active_pd = pd;
+ spin_unlock_bh(&g_cdev.lock);
+ if (old_pd)
+ oz_pd_put(old_pd);
+ } else {
+ if (!memcmp(addr, "\0\0\0\0\0\0", sizeof(addr))) {
+ spin_lock_bh(&g_cdev.lock);
+ pd = g_cdev.active_pd;
+ g_cdev.active_pd = 0;
+ memset(g_cdev.active_addr, 0,
+ sizeof(g_cdev.active_addr));
+ spin_unlock_bh(&g_cdev.lock);
+ if (pd)
+ oz_pd_put(pd);
+ } else {
+ rc = -1;
+ }
+ }
+ return rc;
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+long oz_cdev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ int rc = 0;
+ if (_IOC_TYPE(cmd) != OZ_IOCTL_MAGIC)
+ return -ENOTTY;
+ if (_IOC_NR(cmd) > OZ_IOCTL_MAX)
+ return -ENOTTY;
+ if (_IOC_DIR(cmd) & _IOC_READ)
+ rc = !access_ok(VERIFY_WRITE, (void __user *)arg,
+ _IOC_SIZE(cmd));
+ else if (_IOC_DIR(cmd) & _IOC_WRITE)
+ rc = !access_ok(VERIFY_READ, (void __user *)arg,
+ _IOC_SIZE(cmd));
+ if (rc)
+ return -EFAULT;
+ switch (cmd) {
+ case OZ_IOCTL_GET_PD_LIST: {
+ struct oz_pd_list list;
+ oz_trace("OZ_IOCTL_GET_PD_LIST\n");
+ list.count = oz_get_pd_list(list.addr, OZ_MAX_PDS);
+ if (copy_to_user((void __user *)arg, &list,
+ sizeof(list)))
+ return -EFAULT;
+ }
+ break;
+ case OZ_IOCTL_SET_ACTIVE_PD: {
+ u8 addr[ETH_ALEN];
+ oz_trace("OZ_IOCTL_SET_ACTIVE_PD\n");
+ if (copy_from_user(addr, (void __user *)arg, ETH_ALEN))
+ return -EFAULT;
+ rc = oz_set_active_pd(addr);
+ }
+ break;
+ case OZ_IOCTL_GET_ACTIVE_PD: {
+ u8 addr[ETH_ALEN];
+ oz_trace("OZ_IOCTL_GET_ACTIVE_PD\n");
+ spin_lock_bh(&g_cdev.lock);
+ memcpy(addr, g_cdev.active_addr, ETH_ALEN);
+ spin_unlock_bh(&g_cdev.lock);
+ if (copy_to_user((void __user *)arg, addr, ETH_ALEN))
+ return -EFAULT;
+ }
+ break;
+#ifdef WANT_EVENT_TRACE
+ case OZ_IOCTL_CLEAR_EVENTS:
+ oz_events_clear();
+ break;
+ case OZ_IOCTL_GET_EVENTS:
+ rc = oz_events_copy((void __user *)arg);
+ break;
+ case OZ_IOCTL_SET_EVENT_MASK:
+ if (copy_from_user(&g_evt_mask, (void __user *)arg,
+ sizeof(unsigned long))) {
+ return -EFAULT;
+ }
+ break;
+#endif /* WANT_EVENT_TRACE */
+ case OZ_IOCTL_ADD_BINDING:
+ case OZ_IOCTL_REMOVE_BINDING: {
+ struct oz_binding_info b;
+ if (copy_from_user(&b, (void __user *)arg,
+ sizeof(struct oz_binding_info))) {
+ return -EFAULT;
+ }
+ /* Make sure name is null terminated. */
+ b.name[OZ_MAX_BINDING_LEN-1] = 0;
+ if (cmd == OZ_IOCTL_ADD_BINDING)
+ oz_binding_add(b.name);
+ else
+ oz_binding_remove(b.name);
+ }
+ break;
+ }
+ return rc;
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+unsigned int oz_cdev_poll(struct file *filp, poll_table *wait)
+{
+ unsigned int ret = 0;
+ struct oz_cdev *dev = filp->private_data;
+ oz_trace("Poll called wait = %p\n", wait);
+ spin_lock_bh(&dev->lock);
+ if (dev->active_pd) {
+ struct oz_serial_ctx *ctx = oz_cdev_claim_ctx(dev->active_pd);
+ if (ctx) {
+ if (ctx->rd_in != ctx->rd_out)
+ ret |= POLLIN | POLLRDNORM;
+ oz_cdev_release_ctx(ctx);
+ }
+ }
+ spin_unlock_bh(&dev->lock);
+ if (wait)
+ poll_wait(filp, &dev->rdq, wait);
+ return ret;
+}
+/*------------------------------------------------------------------------------
+ */
+const struct file_operations oz_fops = {
+ .owner = THIS_MODULE,
+ .open = oz_cdev_open,
+ .release = oz_cdev_release,
+ .read = oz_cdev_read,
+ .write = oz_cdev_write,
+ .unlocked_ioctl = oz_cdev_ioctl,
+ .poll = oz_cdev_poll
+};
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+int oz_cdev_register(void)
+{
+ int err;
+ memset(&g_cdev, 0, sizeof(g_cdev));
+ err = alloc_chrdev_region(&g_cdev.devnum, 0, 1, "ozwpan");
+ if (err < 0)
+ return err;
+ oz_trace("Alloc dev number %d:%d\n", MAJOR(g_cdev.devnum),
+ MINOR(g_cdev.devnum));
+ cdev_init(&g_cdev.cdev, &oz_fops);
+ g_cdev.cdev.owner = THIS_MODULE;
+ g_cdev.cdev.ops = &oz_fops;
+ spin_lock_init(&g_cdev.lock);
+ init_waitqueue_head(&g_cdev.rdq);
+ err = cdev_add(&g_cdev.cdev, g_cdev.devnum, 1);
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+int oz_cdev_deregister(void)
+{
+ cdev_del(&g_cdev.cdev);
+ unregister_chrdev_region(g_cdev.devnum, 1);
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+int oz_cdev_init(void)
+{
+ oz_event_log(OZ_EVT_SERVICE, 1, OZ_APPID_SERIAL, 0, 0);
+ oz_app_enable(OZ_APPID_SERIAL, 1);
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+void oz_cdev_term(void)
+{
+ oz_event_log(OZ_EVT_SERVICE, 2, OZ_APPID_SERIAL, 0, 0);
+ oz_app_enable(OZ_APPID_SERIAL, 0);
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq-serialized
+ */
+int oz_cdev_start(struct oz_pd *pd, int resume)
+{
+ struct oz_serial_ctx *ctx;
+ struct oz_serial_ctx *old_ctx = 0;
+ oz_event_log(OZ_EVT_SERVICE, 3, OZ_APPID_SERIAL, 0, resume);
+ if (resume) {
+ oz_trace("Serial service resumed.\n");
+ return 0;
+ }
+ ctx = kzalloc(sizeof(struct oz_serial_ctx), GFP_ATOMIC);
+ if (ctx == 0)
+ return -ENOMEM;
+ atomic_set(&ctx->ref_count, 1);
+ ctx->tx_seq_num = 1;
+ spin_lock_bh(&pd->app_lock[OZ_APPID_SERIAL-1]);
+ old_ctx = pd->app_ctx[OZ_APPID_SERIAL-1];
+ if (old_ctx) {
+ spin_unlock_bh(&pd->app_lock[OZ_APPID_SERIAL-1]);
+ kfree(ctx);
+ } else {
+ pd->app_ctx[OZ_APPID_SERIAL-1] = ctx;
+ spin_unlock_bh(&pd->app_lock[OZ_APPID_SERIAL-1]);
+ }
+ spin_lock(&g_cdev.lock);
+ if ((g_cdev.active_pd == 0) &&
+ (memcmp(pd->mac_addr, g_cdev.active_addr, ETH_ALEN) == 0)) {
+ oz_pd_get(pd);
+ g_cdev.active_pd = pd;
+ oz_trace("Active PD arrived.\n");
+ }
+ spin_unlock(&g_cdev.lock);
+ oz_trace("Serial service started.\n");
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq or process
+ */
+void oz_cdev_stop(struct oz_pd *pd, int pause)
+{
+ struct oz_serial_ctx *ctx;
+ oz_event_log(OZ_EVT_SERVICE, 4, OZ_APPID_SERIAL, 0, pause);
+ if (pause) {
+ oz_trace("Serial service paused.\n");
+ return;
+ }
+ spin_lock_bh(&pd->app_lock[OZ_APPID_SERIAL-1]);
+ ctx = (struct oz_serial_ctx *)pd->app_ctx[OZ_APPID_SERIAL-1];
+ pd->app_ctx[OZ_APPID_SERIAL-1] = 0;
+ spin_unlock_bh(&pd->app_lock[OZ_APPID_SERIAL-1]);
+ if (ctx)
+ oz_cdev_release_ctx(ctx);
+ spin_lock(&g_cdev.lock);
+ if (pd == g_cdev.active_pd)
+ g_cdev.active_pd = 0;
+ else
+ pd = 0;
+ spin_unlock(&g_cdev.lock);
+ if (pd) {
+ oz_pd_put(pd);
+ oz_trace("Active PD departed.\n");
+ }
+ oz_trace("Serial service stopped.\n");
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq-serialized
+ */
+void oz_cdev_rx(struct oz_pd *pd, struct oz_elt *elt)
+{
+ struct oz_serial_ctx *ctx;
+ struct oz_app_hdr *app_hdr;
+ u8 *data;
+ int len;
+ int space;
+ int copy_sz;
+ int ix;
+
+ ctx = oz_cdev_claim_ctx(pd);
+ if (ctx == 0) {
+ oz_trace("Cannot claim serial context.\n");
+ return;
+ }
+
+ app_hdr = (struct oz_app_hdr *)(elt+1);
+ /* If sequence number is non-zero then check it is not a duplicate.
+ */
+ if (app_hdr->elt_seq_num != 0) {
+ if (((ctx->rx_seq_num - app_hdr->elt_seq_num) & 0x80) == 0) {
+ /* Reject duplicate element. */
+ oz_trace("Duplicate element:%02x %02x\n",
+ app_hdr->elt_seq_num, ctx->rx_seq_num);
+ goto out;
+ }
+ }
+ ctx->rx_seq_num = app_hdr->elt_seq_num;
+ len = elt->length - sizeof(struct oz_app_hdr);
+ data = ((u8 *)(elt+1)) + sizeof(struct oz_app_hdr);
+ if (len <= 0)
+ goto out;
+ space = ctx->rd_out - ctx->rd_in - 1;
+ if (space < 0)
+ space += OZ_RD_BUF_SZ;
+ if (len > space) {
+ oz_trace("Not enough space:%d %d\n", len, space);
+ len = space;
+ }
+ ix = ctx->rd_in;
+ copy_sz = OZ_RD_BUF_SZ - ix;
+ if (copy_sz > len)
+ copy_sz = len;
+ memcpy(&ctx->rd_buf[ix], data, copy_sz);
+ len -= copy_sz;
+ ix += copy_sz;
+ if (ix == OZ_RD_BUF_SZ)
+ ix = 0;
+ if (len) {
+ memcpy(ctx->rd_buf, data+copy_sz, len);
+ ix = len;
+ }
+ ctx->rd_in = ix;
+ wake_up(&g_cdev.rdq);
+out:
+ oz_cdev_release_ctx(ctx);
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+void oz_cdev_heartbeat(struct oz_pd *pd)
+{
+}
diff --git a/drivers/staging/ozwpan/ozcdev.h b/drivers/staging/ozwpan/ozcdev.h
new file mode 100644
index 000000000000..698014bb8d72
--- /dev/null
+++ b/drivers/staging/ozwpan/ozcdev.h
@@ -0,0 +1,18 @@
+/* -----------------------------------------------------------------------------
+ * Copyright (c) 2011 Ozmo Inc
+ * Released under the GNU General Public License Version 2 (GPLv2).
+ * -----------------------------------------------------------------------------
+ */
+#ifndef _OZCDEV_H
+#define _OZCDEV_H
+
+int oz_cdev_register(void);
+int oz_cdev_deregister(void);
+int oz_cdev_init(void);
+void oz_cdev_term(void);
+int oz_cdev_start(struct oz_pd *pd, int resume);
+void oz_cdev_stop(struct oz_pd *pd, int pause);
+void oz_cdev_rx(struct oz_pd *pd, struct oz_elt *elt);
+void oz_cdev_heartbeat(struct oz_pd *pd);
+
+#endif /* _OZCDEV_H */
diff --git a/drivers/staging/ozwpan/ozconfig.h b/drivers/staging/ozwpan/ozconfig.h
new file mode 100644
index 000000000000..43e6373a009c
--- /dev/null
+++ b/drivers/staging/ozwpan/ozconfig.h
@@ -0,0 +1,27 @@
+/* -----------------------------------------------------------------------------
+ * Copyright (c) 2011 Ozmo Inc
+ * Released under the GNU General Public License Version 2 (GPLv2).
+ * ---------------------------------------------------------------------------*/
+#ifndef _OZCONFIG_H
+#define _OZCONFIG_H
+
+/* #define WANT_TRACE */
+#ifdef WANT_TRACE
+#define WANT_VERBOSE_TRACE
+#endif /* #ifdef WANT_TRACE */
+/* #define WANT_URB_PARANOIA */
+
+/* #define WANT_PRE_2_6_39 */
+#define WANT_EVENT_TRACE
+
+/* These defines determine what verbose trace is displayed. */
+#ifdef WANT_VERBOSE_TRACE
+/* #define WANT_TRACE_STREAM */
+/* #define WANT_TRACE_URB */
+/* #define WANT_TRACE_CTRL_DETAIL */
+#define WANT_TRACE_HUB
+/* #define WANT_TRACE_RX_FRAMES */
+/* #define WANT_TRACE_TX_FRAMES */
+#endif /* WANT_VERBOSE_TRACE */
+
+#endif /* _OZCONFIG_H */
diff --git a/drivers/staging/ozwpan/ozeltbuf.c b/drivers/staging/ozwpan/ozeltbuf.c
new file mode 100644
index 000000000000..988f522475d9
--- /dev/null
+++ b/drivers/staging/ozwpan/ozeltbuf.c
@@ -0,0 +1,339 @@
+/* -----------------------------------------------------------------------------
+ * Copyright (c) 2011 Ozmo Inc
+ * Released under the GNU General Public License Version 2 (GPLv2).
+ * -----------------------------------------------------------------------------
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include "ozconfig.h"
+#include "ozprotocol.h"
+#include "ozeltbuf.h"
+#include "ozpd.h"
+#include "oztrace.h"
+/*------------------------------------------------------------------------------
+ */
+#define OZ_ELT_INFO_MAGIC_USED 0x35791057
+#define OZ_ELT_INFO_MAGIC_FREE 0x78940102
+/*------------------------------------------------------------------------------
+ * Context: softirq-serialized
+ */
+int oz_elt_buf_init(struct oz_elt_buf *buf)
+{
+ memset(buf, 0, sizeof(struct oz_elt_buf));
+ INIT_LIST_HEAD(&buf->stream_list);
+ INIT_LIST_HEAD(&buf->order_list);
+ INIT_LIST_HEAD(&buf->isoc_list);
+ buf->max_free_elts = 32;
+ spin_lock_init(&buf->lock);
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq or process
+ */
+void oz_elt_buf_term(struct oz_elt_buf *buf)
+{
+ struct list_head *e;
+ int i;
+ /* Free any elements in the order or isoc lists. */
+ for (i = 0; i < 2; i++) {
+ struct list_head *list;
+ if (i)
+ list = &buf->order_list;
+ else
+ list = &buf->isoc_list;
+ e = list->next;
+ while (e != list) {
+ struct oz_elt_info *ei =
+ container_of(e, struct oz_elt_info, link_order);
+ e = e->next;
+ kfree(ei);
+ }
+ }
+ /* Free any elelment in the pool. */
+ while (buf->elt_pool) {
+ struct oz_elt_info *ei =
+ container_of(buf->elt_pool, struct oz_elt_info, link);
+ buf->elt_pool = buf->elt_pool->next;
+ kfree(ei);
+ }
+ buf->free_elts = 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq or process
+ */
+struct oz_elt_info *oz_elt_info_alloc(struct oz_elt_buf *buf)
+{
+ struct oz_elt_info *ei = 0;
+ spin_lock_bh(&buf->lock);
+ if (buf->free_elts && buf->elt_pool) {
+ ei = container_of(buf->elt_pool, struct oz_elt_info, link);
+ buf->elt_pool = ei->link.next;
+ buf->free_elts--;
+ spin_unlock_bh(&buf->lock);
+ if (ei->magic != OZ_ELT_INFO_MAGIC_FREE) {
+ oz_trace("oz_elt_info_alloc: ei with bad magic: 0x%x\n",
+ ei->magic);
+ }
+ } else {
+ spin_unlock_bh(&buf->lock);
+ ei = kmalloc(sizeof(struct oz_elt_info), GFP_ATOMIC);
+ }
+ if (ei) {
+ ei->flags = 0;
+ ei->app_id = 0;
+ ei->callback = 0;
+ ei->context = 0;
+ ei->stream = 0;
+ ei->magic = OZ_ELT_INFO_MAGIC_USED;
+ INIT_LIST_HEAD(&ei->link);
+ INIT_LIST_HEAD(&ei->link_order);
+ }
+ return ei;
+}
+/*------------------------------------------------------------------------------
+ * Precondition: oz_elt_buf.lock must be held.
+ * Context: softirq or process
+ */
+void oz_elt_info_free(struct oz_elt_buf *buf, struct oz_elt_info *ei)
+{
+ if (ei) {
+ if (ei->magic == OZ_ELT_INFO_MAGIC_USED) {
+ buf->free_elts++;
+ ei->link.next = buf->elt_pool;
+ buf->elt_pool = &ei->link;
+ ei->magic = OZ_ELT_INFO_MAGIC_FREE;
+ } else {
+ oz_trace("oz_elt_info_free: bad magic ei: %p"
+ " magic: 0x%x\n",
+ ei, ei->magic);
+ }
+ }
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+void oz_elt_info_free_chain(struct oz_elt_buf *buf, struct list_head *list)
+{
+ struct list_head *e;
+ e = list->next;
+ spin_lock_bh(&buf->lock);
+ while (e != list) {
+ struct oz_elt_info *ei;
+ ei = container_of(e, struct oz_elt_info, link);
+ e = e->next;
+ oz_elt_info_free(buf, ei);
+ }
+ spin_unlock_bh(&buf->lock);
+}
+/*------------------------------------------------------------------------------
+ */
+int oz_elt_stream_create(struct oz_elt_buf *buf, u8 id, int max_buf_count)
+{
+ struct oz_elt_stream *st;
+
+ oz_trace("oz_elt_stream_create(0x%x)\n", id);
+
+ st = kzalloc(sizeof(struct oz_elt_stream), GFP_ATOMIC | __GFP_ZERO);
+ if (st == 0)
+ return -ENOMEM;
+ atomic_set(&st->ref_count, 1);
+ st->id = id;
+ st->max_buf_count = max_buf_count;
+ INIT_LIST_HEAD(&st->elt_list);
+ spin_lock_bh(&buf->lock);
+ list_add_tail(&st->link, &buf->stream_list);
+ spin_unlock_bh(&buf->lock);
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ */
+int oz_elt_stream_delete(struct oz_elt_buf *buf, u8 id)
+{
+ struct list_head *e;
+ struct oz_elt_stream *st;
+ oz_trace("oz_elt_stream_delete(0x%x)\n", id);
+ spin_lock_bh(&buf->lock);
+ e = buf->stream_list.next;
+ while (e != &buf->stream_list) {
+ st = container_of(e, struct oz_elt_stream, link);
+ if (st->id == id) {
+ list_del(e);
+ break;
+ }
+ st = 0;
+ }
+ if (!st) {
+ spin_unlock_bh(&buf->lock);
+ return -1;
+ }
+ e = st->elt_list.next;
+ while (e != &st->elt_list) {
+ struct oz_elt_info *ei =
+ container_of(e, struct oz_elt_info, link);
+ e = e->next;
+ list_del_init(&ei->link);
+ list_del_init(&ei->link_order);
+ st->buf_count -= ei->length;
+ oz_trace2(OZ_TRACE_STREAM, "Stream down: %d %d %d\n",
+ st->buf_count,
+ ei->length, atomic_read(&st->ref_count));
+ oz_elt_stream_put(st);
+ oz_elt_info_free(buf, ei);
+ }
+ spin_unlock_bh(&buf->lock);
+ oz_elt_stream_put(st);
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ */
+void oz_elt_stream_get(struct oz_elt_stream *st)
+{
+ atomic_inc(&st->ref_count);
+}
+/*------------------------------------------------------------------------------
+ */
+void oz_elt_stream_put(struct oz_elt_stream *st)
+{
+ if (atomic_dec_and_test(&st->ref_count)) {
+ oz_trace("Stream destroyed\n");
+ kfree(st);
+ }
+}
+/*------------------------------------------------------------------------------
+ * Precondition: Element buffer lock must be held.
+ * If this function fails the caller is responsible for deallocating the elt
+ * info structure.
+ */
+int oz_queue_elt_info(struct oz_elt_buf *buf, u8 isoc, u8 id,
+ struct oz_elt_info *ei)
+{
+ struct oz_elt_stream *st = 0;
+ struct list_head *e;
+ if (id) {
+ list_for_each(e, &buf->stream_list) {
+ st = container_of(e, struct oz_elt_stream, link);
+ if (st->id == id)
+ break;
+ }
+ if (e == &buf->stream_list) {
+ /* Stream specified but stream not known so fail.
+ * Caller deallocates element info. */
+ return -1;
+ }
+ }
+ if (st) {
+ /* If this is an ISOC fixed element that needs a frame number
+ * then insert that now. Earlier we stored the unit count in
+ * this field.
+ */
+ struct oz_isoc_fixed *body = (struct oz_isoc_fixed *)
+ &ei->data[sizeof(struct oz_elt)];
+ if ((body->app_id == OZ_APPID_USB) && (body->type
+ == OZ_USB_ENDPOINT_DATA) &&
+ (body->format == OZ_DATA_F_ISOC_FIXED)) {
+ u8 unit_count = body->frame_number;
+ body->frame_number = st->frame_number;
+ st->frame_number += unit_count;
+ }
+ /* Claim stream and update accounts */
+ oz_elt_stream_get(st);
+ ei->stream = st;
+ st->buf_count += ei->length;
+ /* Add to list in stream. */
+ list_add_tail(&ei->link, &st->elt_list);
+ oz_trace2(OZ_TRACE_STREAM, "Stream up: %d %d\n",
+ st->buf_count, ei->length);
+ /* Check if we have too much buffered for this stream. If so
+ * start dropping elements until we are back in bounds.
+ */
+ while ((st->buf_count > st->max_buf_count) &&
+ !list_empty(&st->elt_list)) {
+ struct oz_elt_info *ei2 =
+ list_first_entry(&st->elt_list,
+ struct oz_elt_info, link);
+ list_del_init(&ei2->link);
+ list_del_init(&ei2->link_order);
+ st->buf_count -= ei2->length;
+ oz_elt_info_free(buf, ei2);
+ oz_elt_stream_put(st);
+ }
+ }
+ list_add_tail(&ei->link_order, isoc ?
+ &buf->isoc_list : &buf->order_list);
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ */
+int oz_select_elts_for_tx(struct oz_elt_buf *buf, u8 isoc, unsigned *len,
+ unsigned max_len, struct list_head *list)
+{
+ int count = 0;
+ struct list_head *e;
+ struct list_head *el;
+ struct oz_elt_info *ei;
+ spin_lock_bh(&buf->lock);
+ if (isoc)
+ el = &buf->isoc_list;
+ else
+ el = &buf->order_list;
+ e = el->next;
+ while (e != el) {
+ struct oz_app_hdr *app_hdr;
+ ei = container_of(e, struct oz_elt_info, link_order);
+ e = e->next;
+ if ((*len + ei->length) <= max_len) {
+ app_hdr = (struct oz_app_hdr *)
+ &ei->data[sizeof(struct oz_elt)];
+ app_hdr->elt_seq_num = buf->tx_seq_num[ei->app_id]++;
+ if (buf->tx_seq_num[ei->app_id] == 0)
+ buf->tx_seq_num[ei->app_id] = 1;
+ *len += ei->length;
+ list_del(&ei->link);
+ list_del(&ei->link_order);
+ if (ei->stream) {
+ ei->stream->buf_count -= ei->length;
+ oz_trace2(OZ_TRACE_STREAM,
+ "Stream down: %d %d\n",
+ ei->stream->buf_count, ei->length);
+ oz_elt_stream_put(ei->stream);
+ ei->stream = 0;
+ }
+ INIT_LIST_HEAD(&ei->link_order);
+ list_add_tail(&ei->link, list);
+ count++;
+ } else {
+ break;
+ }
+ }
+ spin_unlock_bh(&buf->lock);
+ return count;
+}
+/*------------------------------------------------------------------------------
+ */
+int oz_are_elts_available(struct oz_elt_buf *buf)
+{
+ return buf->order_list.next != &buf->order_list;
+}
+/*------------------------------------------------------------------------------
+ */
+void oz_trim_elt_pool(struct oz_elt_buf *buf)
+{
+ struct list_head *free = 0;
+ struct list_head *e;
+ spin_lock_bh(&buf->lock);
+ while (buf->free_elts > buf->max_free_elts) {
+ e = buf->elt_pool;
+ buf->elt_pool = e->next;
+ e->next = free;
+ free = e;
+ buf->free_elts--;
+ }
+ spin_unlock_bh(&buf->lock);
+ while (free) {
+ struct oz_elt_info *ei =
+ container_of(free, struct oz_elt_info, link);
+ free = free->next;
+ kfree(ei);
+ }
+}
diff --git a/drivers/staging/ozwpan/ozeltbuf.h b/drivers/staging/ozwpan/ozeltbuf.h
new file mode 100644
index 000000000000..03c12f57b9bb
--- /dev/null
+++ b/drivers/staging/ozwpan/ozeltbuf.h
@@ -0,0 +1,70 @@
+/* -----------------------------------------------------------------------------
+ * Copyright (c) 2011 Ozmo Inc
+ * Released under the GNU General Public License Version 2 (GPLv2).
+ * -----------------------------------------------------------------------------
+ */
+#ifndef _OZELTBUF_H
+#define _OZELTBUF_H
+
+#include "ozprotocol.h"
+
+/*-----------------------------------------------------------------------------
+ */
+struct oz_pd;
+typedef void (*oz_elt_callback_t)(struct oz_pd *pd, long context);
+
+struct oz_elt_stream {
+ struct list_head link;
+ struct list_head elt_list;
+ atomic_t ref_count;
+ unsigned buf_count;
+ unsigned max_buf_count;
+ u8 frame_number;
+ u8 id;
+};
+
+#define OZ_MAX_ELT_PAYLOAD 255
+struct oz_elt_info {
+ struct list_head link;
+ struct list_head link_order;
+ u8 flags;
+ u8 app_id;
+ oz_elt_callback_t callback;
+ long context;
+ struct oz_elt_stream *stream;
+ u8 data[sizeof(struct oz_elt) + OZ_MAX_ELT_PAYLOAD];
+ int length;
+ unsigned magic;
+};
+/* Flags values */
+#define OZ_EI_F_MARKED 0x1
+
+struct oz_elt_buf {
+ spinlock_t lock;
+ struct list_head stream_list;
+ struct list_head order_list;
+ struct list_head isoc_list;
+ struct list_head *elt_pool;
+ int free_elts;
+ int max_free_elts;
+ u8 tx_seq_num[OZ_NB_APPS];
+};
+
+int oz_elt_buf_init(struct oz_elt_buf *buf);
+void oz_elt_buf_term(struct oz_elt_buf *buf);
+struct oz_elt_info *oz_elt_info_alloc(struct oz_elt_buf *buf);
+void oz_elt_info_free(struct oz_elt_buf *buf, struct oz_elt_info *ei);
+void oz_elt_info_free_chain(struct oz_elt_buf *buf, struct list_head *list);
+int oz_elt_stream_create(struct oz_elt_buf *buf, u8 id, int max_buf_count);
+int oz_elt_stream_delete(struct oz_elt_buf *buf, u8 id);
+void oz_elt_stream_get(struct oz_elt_stream *st);
+void oz_elt_stream_put(struct oz_elt_stream *st);
+int oz_queue_elt_info(struct oz_elt_buf *buf, u8 isoc, u8 id,
+ struct oz_elt_info *ei);
+int oz_select_elts_for_tx(struct oz_elt_buf *buf, u8 isoc, unsigned *len,
+ unsigned max_len, struct list_head *list);
+int oz_are_elts_available(struct oz_elt_buf *buf);
+void oz_trim_elt_pool(struct oz_elt_buf *buf);
+
+#endif /* _OZELTBUF_H */
+
diff --git a/drivers/staging/ozwpan/ozevent.c b/drivers/staging/ozwpan/ozevent.c
new file mode 100644
index 000000000000..73703d3e96bd
--- /dev/null
+++ b/drivers/staging/ozwpan/ozevent.c
@@ -0,0 +1,116 @@
+/* -----------------------------------------------------------------------------
+ * Copyright (c) 2011 Ozmo Inc
+ * Released under the GNU General Public License Version 2 (GPLv2).
+ * -----------------------------------------------------------------------------
+ */
+#include "ozconfig.h"
+#ifdef WANT_EVENT_TRACE
+#include <linux/jiffies.h>
+#include <linux/uaccess.h>
+#include "oztrace.h"
+#include "ozevent.h"
+/*------------------------------------------------------------------------------
+ */
+unsigned long g_evt_mask = 0xffffffff;
+/*------------------------------------------------------------------------------
+ */
+#define OZ_MAX_EVTS 2048 /* Must be power of 2 */
+DEFINE_SPINLOCK(g_eventlock);
+static int g_evt_in;
+static int g_evt_out;
+static int g_missed_events;
+static struct oz_event g_events[OZ_MAX_EVTS];
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+void oz_event_init(void)
+{
+ oz_trace("Event tracing initialized\n");
+ g_evt_in = g_evt_out = 0;
+ g_missed_events = 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+void oz_event_term(void)
+{
+ oz_trace("Event tracing terminated\n");
+}
+/*------------------------------------------------------------------------------
+ * Context: any
+ */
+void oz_event_log2(u8 evt, u8 ctx1, u16 ctx2, void *ctx3, unsigned ctx4)
+{
+ unsigned long irqstate;
+ int ix;
+ spin_lock_irqsave(&g_eventlock, irqstate);
+ ix = (g_evt_in + 1) & (OZ_MAX_EVTS - 1);
+ if (ix != g_evt_out) {
+ struct oz_event *e = &g_events[g_evt_in];
+ e->jiffies = jiffies;
+ e->evt = evt;
+ e->ctx1 = ctx1;
+ e->ctx2 = ctx2;
+ e->ctx3 = ctx3;
+ e->ctx4 = ctx4;
+ g_evt_in = ix;
+ } else {
+ g_missed_events++;
+ }
+ spin_unlock_irqrestore(&g_eventlock, irqstate);
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+int oz_events_copy(struct oz_evtlist __user *lst)
+{
+ int first;
+ int ix;
+ struct hdr {
+ int count;
+ int missed;
+ } hdr;
+ ix = g_evt_out;
+ hdr.count = g_evt_in - ix;
+ if (hdr.count < 0)
+ hdr.count += OZ_MAX_EVTS;
+ if (hdr.count > OZ_EVT_LIST_SZ)
+ hdr.count = OZ_EVT_LIST_SZ;
+ hdr.missed = g_missed_events;
+ g_missed_events = 0;
+ if (copy_to_user((void __user *)lst, &hdr, sizeof(hdr)))
+ return -EFAULT;
+ first = OZ_MAX_EVTS - ix;
+ if (first > hdr.count)
+ first = hdr.count;
+ if (first) {
+ int sz = first*sizeof(struct oz_event);
+ void __user *p = (void __user *)lst->evts;
+ if (copy_to_user(p, &g_events[ix], sz))
+ return -EFAULT;
+ if (hdr.count > first) {
+ p = (void __user *)&lst->evts[first];
+ sz = (hdr.count-first)*sizeof(struct oz_event);
+ if (copy_to_user(p, g_events, sz))
+ return -EFAULT;
+ }
+ }
+ ix += hdr.count;
+ if (ix >= OZ_MAX_EVTS)
+ ix -= OZ_MAX_EVTS;
+ g_evt_out = ix;
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+void oz_events_clear(void)
+{
+ unsigned long irqstate;
+ spin_lock_irqsave(&g_eventlock, irqstate);
+ g_evt_in = g_evt_out = 0;
+ g_missed_events = 0;
+ spin_unlock_irqrestore(&g_eventlock, irqstate);
+}
+#endif /* WANT_EVENT_TRACE */
+
diff --git a/drivers/staging/ozwpan/ozevent.h b/drivers/staging/ozwpan/ozevent.h
new file mode 100644
index 000000000000..f033d014c6f3
--- /dev/null
+++ b/drivers/staging/ozwpan/ozevent.h
@@ -0,0 +1,31 @@
+/* -----------------------------------------------------------------------------
+ * Copyright (c) 2011 Ozmo Inc
+ * Released under the GNU General Public License Version 2 (GPLv2).
+ * -----------------------------------------------------------------------------
+ */
+#ifndef _OZEVENT_H
+#define _OZEVENT_H
+#include "ozconfig.h"
+#include "ozeventdef.h"
+
+#ifdef WANT_EVENT_TRACE
+extern unsigned long g_evt_mask;
+void oz_event_init(void);
+void oz_event_term(void);
+void oz_event_log2(u8 evt, u8 ctx1, u16 ctx2, void *ctx3, unsigned ctx4);
+#define oz_event_log(__evt, __ctx1, __ctx2, __ctx3, __ctx4) \
+ do { \
+ if ((1<<(__evt)) & g_evt_mask) \
+ oz_event_log2(__evt, __ctx1, __ctx2, __ctx3, __ctx4); \
+ } while (0)
+int oz_events_copy(struct oz_evtlist __user *lst);
+void oz_events_clear(void);
+#else
+#define oz_event_init()
+#define oz_event_term()
+#define oz_event_log(__evt, __ctx1, __ctx2, __ctx3, __ctx4)
+#define oz_events_copy(__lst)
+#define oz_events_clear()
+#endif /* WANT_EVENT_TRACE */
+
+#endif /* _OZEVENT_H */
diff --git a/drivers/staging/ozwpan/ozeventdef.h b/drivers/staging/ozwpan/ozeventdef.h
new file mode 100644
index 000000000000..a880288bab11
--- /dev/null
+++ b/drivers/staging/ozwpan/ozeventdef.h
@@ -0,0 +1,47 @@
+/* -----------------------------------------------------------------------------
+ * Copyright (c) 2011 Ozmo Inc
+ * Released under the GNU General Public License Version 2 (GPLv2).
+ * -----------------------------------------------------------------------------
+ */
+#ifndef _OZEVENTDEF_H
+#define _OZEVENTDEF_H
+
+#define OZ_EVT_RX_FRAME 0
+#define OZ_EVT_RX_PROCESS 1
+#define OZ_EVT_TX_FRAME 2
+#define OZ_EVT_TX_ISOC 3
+#define OZ_EVT_URB_SUBMIT 4
+#define OZ_EVT_URB_DONE 5
+#define OZ_EVT_URB_CANCEL 6
+#define OZ_EVT_CTRL_REQ 7
+#define OZ_EVT_CTRL_CNF 8
+#define OZ_EVT_CTRL_LOCAL 9
+#define OZ_EVT_CONNECT_REQ 10
+#define OZ_EVT_CONNECT_RSP 11
+#define OZ_EVT_EP_CREDIT 12
+#define OZ_EVT_EP_BUFFERING 13
+#define OZ_EVT_TX_ISOC_DONE 14
+#define OZ_EVT_TX_ISOC_DROP 15
+#define OZ_EVT_TIMER_CTRL 16
+#define OZ_EVT_TIMER 17
+#define OZ_EVT_PD_STATE 18
+#define OZ_EVT_SERVICE 19
+#define OZ_EVT_DEBUG 20
+
+struct oz_event {
+ unsigned long jiffies;
+ unsigned char evt;
+ unsigned char ctx1;
+ unsigned short ctx2;
+ void *ctx3;
+ unsigned ctx4;
+};
+
+#define OZ_EVT_LIST_SZ 64
+struct oz_evtlist {
+ int count;
+ int missed;
+ struct oz_event evts[OZ_EVT_LIST_SZ];
+};
+
+#endif /* _OZEVENTDEF_H */
diff --git a/drivers/staging/ozwpan/ozhcd.c b/drivers/staging/ozwpan/ozhcd.c
new file mode 100644
index 000000000000..750b14eb505e
--- /dev/null
+++ b/drivers/staging/ozwpan/ozhcd.c
@@ -0,0 +1,2256 @@
+/* -----------------------------------------------------------------------------
+ * Copyright (c) 2011 Ozmo Inc
+ * Released under the GNU General Public License Version 2 (GPLv2).
+ *
+ * This file provides the implementation of a USB host controller device that
+ * does not have any associated hardware. Instead the virtual device is
+ * connected to the WiFi network and emulates the operation of a USB hcd by
+ * receiving and sending network frames.
+ * Note:
+ * We take great pains to reduce the amount of code where interrupts need to be
+ * disabled and in this respect we are different from standard HCD's. In
+ * particular we don't want in_irq() code bleeding over to the protocol side of
+ * the driver.
+ * The troublesome functions are the urb enqueue and dequeue functions both of
+ * which can be called in_irq(). So for these functions we put the urbs into a
+ * queue and request a tasklet to process them. This means that a spinlock with
+ * interrupts disabled must be held for insertion and removal but most code is
+ * is in tasklet or soft irq context. The lock that protects this list is called
+ * the tasklet lock and serves the purpose of the 'HCD lock' which must be held
+ * when calling the following functions.
+ * usb_hcd_link_urb_to_ep()
+ * usb_hcd_unlink_urb_from_ep()
+ * usb_hcd_flush_endpoint()
+ * usb_hcd_check_unlink_urb()
+ * -----------------------------------------------------------------------------
+ */
+#include <linux/platform_device.h>
+#include <linux/usb.h>
+#include <linux/jiffies.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+#include "linux/usb/hcd.h"
+#include <asm/unaligned.h>
+#include "ozconfig.h"
+#include "ozusbif.h"
+#include "oztrace.h"
+#include "ozurbparanoia.h"
+#include "ozevent.h"
+/*------------------------------------------------------------------------------
+ * Number of units of buffering to capture for an isochronous IN endpoint before
+ * allowing data to be indicated up.
+ */
+#define OZ_IN_BUFFERING_UNITS 50
+/* Name of our platform device.
+ */
+#define OZ_PLAT_DEV_NAME "ozwpan"
+/* Maximum number of free urb links that can be kept in the pool.
+ */
+#define OZ_MAX_LINK_POOL_SIZE 16
+/* Get endpoint object from the containing link.
+ */
+#define ep_from_link(__e) container_of((__e), struct oz_endpoint, link)
+/*------------------------------------------------------------------------------
+ * Used to link urbs together and also store some status information for each
+ * urb.
+ * A cache of these are kept in a pool to reduce number of calls to kmalloc.
+ */
+struct oz_urb_link {
+ struct list_head link;
+ struct urb *urb;
+ struct oz_port *port;
+ u8 req_id;
+ u8 ep_num;
+ unsigned long submit_jiffies;
+};
+
+/* Holds state information about a USB endpoint.
+ */
+struct oz_endpoint {
+ struct list_head urb_list; /* List of oz_urb_link items. */
+ struct list_head link; /* For isoc ep, links in to isoc
+ lists of oz_port. */
+ unsigned long last_jiffies;
+ int credit;
+ int credit_ceiling;
+ u8 ep_num;
+ u8 attrib;
+ u8 *buffer;
+ int buffer_size;
+ int in_ix;
+ int out_ix;
+ int buffered_units;
+ unsigned flags;
+ int start_frame;
+};
+/* Bits in the flags field. */
+#define OZ_F_EP_BUFFERING 0x1
+#define OZ_F_EP_HAVE_STREAM 0x2
+
+/* Holds state information about a USB interface.
+ */
+struct oz_interface {
+ unsigned ep_mask;
+ u8 alt;
+};
+
+/* Holds state information about an hcd port.
+ */
+#define OZ_NB_ENDPOINTS 16
+struct oz_port {
+ unsigned flags;
+ unsigned status;
+ void *hpd;
+ struct oz_hcd *ozhcd;
+ spinlock_t port_lock;
+ u8 bus_addr;
+ u8 next_req_id;
+ u8 config_num;
+ int num_iface;
+ struct oz_interface *iface;
+ struct oz_endpoint *out_ep[OZ_NB_ENDPOINTS];
+ struct oz_endpoint *in_ep[OZ_NB_ENDPOINTS];
+ struct list_head isoc_out_ep;
+ struct list_head isoc_in_ep;
+};
+#define OZ_PORT_F_PRESENT 0x1
+#define OZ_PORT_F_CHANGED 0x2
+#define OZ_PORT_F_DYING 0x4
+
+/* Data structure in the private context area of struct usb_hcd.
+ */
+#define OZ_NB_PORTS 8
+struct oz_hcd {
+ spinlock_t hcd_lock;
+ struct list_head urb_pending_list;
+ struct list_head urb_cancel_list;
+ struct list_head orphanage;
+ int conn_port; /* Port that is currently connecting, -1 if none.*/
+ struct oz_port ports[OZ_NB_PORTS];
+ uint flags;
+ struct usb_hcd *hcd;
+};
+/* Bits in flags field.
+ */
+#define OZ_HDC_F_SUSPENDED 0x1
+
+/*------------------------------------------------------------------------------
+ * Static function prototypes.
+ */
+static int oz_hcd_start(struct usb_hcd *hcd);
+static void oz_hcd_stop(struct usb_hcd *hcd);
+static void oz_hcd_shutdown(struct usb_hcd *hcd);
+static int oz_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
+ gfp_t mem_flags);
+static int oz_hcd_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status);
+static void oz_hcd_endpoint_disable(struct usb_hcd *hcd,
+ struct usb_host_endpoint *ep);
+static void oz_hcd_endpoint_reset(struct usb_hcd *hcd,
+ struct usb_host_endpoint *ep);
+static int oz_hcd_get_frame_number(struct usb_hcd *hcd);
+static int oz_hcd_hub_status_data(struct usb_hcd *hcd, char *buf);
+static int oz_hcd_hub_control(struct usb_hcd *hcd, u16 req_type, u16 wvalue,
+ u16 windex, char *buf, u16 wlength);
+static int oz_hcd_bus_suspend(struct usb_hcd *hcd);
+static int oz_hcd_bus_resume(struct usb_hcd *hcd);
+static int oz_plat_probe(struct platform_device *dev);
+static int oz_plat_remove(struct platform_device *dev);
+static void oz_plat_shutdown(struct platform_device *dev);
+static int oz_plat_suspend(struct platform_device *dev, pm_message_t msg);
+static int oz_plat_resume(struct platform_device *dev);
+static void oz_urb_process_tasklet(unsigned long unused);
+static int oz_build_endpoints_for_config(struct usb_hcd *hcd,
+ struct oz_port *port, struct usb_host_config *config,
+ gfp_t mem_flags);
+static void oz_clean_endpoints_for_config(struct usb_hcd *hcd,
+ struct oz_port *port);
+static int oz_build_endpoints_for_interface(struct usb_hcd *hcd,
+ struct oz_port *port,
+ struct usb_host_interface *intf, gfp_t mem_flags);
+static void oz_clean_endpoints_for_interface(struct usb_hcd *hcd,
+ struct oz_port *port, int if_ix);
+static void oz_process_ep0_urb(struct oz_hcd *ozhcd, struct urb *urb,
+ gfp_t mem_flags);
+static struct oz_urb_link *oz_remove_urb(struct oz_endpoint *ep,
+ struct urb *urb);
+static void oz_hcd_clear_orphanage(struct oz_hcd *ozhcd, int status);
+/*------------------------------------------------------------------------------
+ * Static external variables.
+ */
+static struct platform_device *g_plat_dev;
+static struct oz_hcd *g_ozhcd;
+static DEFINE_SPINLOCK(g_hcdlock); /* Guards g_ozhcd. */
+static const char g_hcd_name[] = "Ozmo WPAN";
+static struct list_head *g_link_pool;
+static int g_link_pool_size;
+static DEFINE_SPINLOCK(g_link_lock);
+static DEFINE_SPINLOCK(g_tasklet_lock);
+static struct tasklet_struct g_urb_process_tasklet;
+static struct tasklet_struct g_urb_cancel_tasklet;
+static atomic_t g_pending_urbs = ATOMIC_INIT(0);
+static const struct hc_driver g_oz_hc_drv = {
+ .description = g_hcd_name,
+ .product_desc = "Ozmo Devices WPAN",
+ .hcd_priv_size = sizeof(struct oz_hcd),
+ .flags = HCD_USB11,
+ .start = oz_hcd_start,
+ .stop = oz_hcd_stop,
+ .shutdown = oz_hcd_shutdown,
+ .urb_enqueue = oz_hcd_urb_enqueue,
+ .urb_dequeue = oz_hcd_urb_dequeue,
+ .endpoint_disable = oz_hcd_endpoint_disable,
+ .endpoint_reset = oz_hcd_endpoint_reset,
+ .get_frame_number = oz_hcd_get_frame_number,
+ .hub_status_data = oz_hcd_hub_status_data,
+ .hub_control = oz_hcd_hub_control,
+ .bus_suspend = oz_hcd_bus_suspend,
+ .bus_resume = oz_hcd_bus_resume,
+};
+
+static struct platform_driver g_oz_plat_drv = {
+ .probe = oz_plat_probe,
+ .remove = oz_plat_remove,
+ .shutdown = oz_plat_shutdown,
+ .suspend = oz_plat_suspend,
+ .resume = oz_plat_resume,
+ .driver = {
+ .name = OZ_PLAT_DEV_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+/*------------------------------------------------------------------------------
+ * Gets our private context area (which is of type struct oz_hcd) from the
+ * usb_hcd structure.
+ * Context: any
+ */
+static inline struct oz_hcd *oz_hcd_private(struct usb_hcd *hcd)
+{
+ return (struct oz_hcd *)hcd->hcd_priv;
+}
+/*------------------------------------------------------------------------------
+ * Searches list of ports to find the index of the one with a specified USB
+ * bus address. If none of the ports has the bus address then the connection
+ * port is returned, if there is one or -1 otherwise.
+ * Context: any
+ */
+static int oz_get_port_from_addr(struct oz_hcd *ozhcd, u8 bus_addr)
+{
+ int i;
+ for (i = 0; i < OZ_NB_PORTS; i++) {
+ if (ozhcd->ports[i].bus_addr == bus_addr)
+ return i;
+ }
+ return ozhcd->conn_port;
+}
+/*------------------------------------------------------------------------------
+ * Allocates an urb link, first trying the pool but going to heap if empty.
+ * Context: any
+ */
+static struct oz_urb_link *oz_alloc_urb_link(void)
+{
+ struct oz_urb_link *urbl = 0;
+ unsigned long irq_state;
+ spin_lock_irqsave(&g_link_lock, irq_state);
+ if (g_link_pool) {
+ urbl = container_of(g_link_pool, struct oz_urb_link, link);
+ g_link_pool = urbl->link.next;
+ --g_link_pool_size;
+ }
+ spin_unlock_irqrestore(&g_link_lock, irq_state);
+ if (urbl == 0)
+ urbl = kmalloc(sizeof(struct oz_urb_link), GFP_ATOMIC);
+ return urbl;
+}
+/*------------------------------------------------------------------------------
+ * Frees an urb link by putting it in the pool if there is enough space or
+ * deallocating it to heap otherwise.
+ * Context: any
+ */
+static void oz_free_urb_link(struct oz_urb_link *urbl)
+{
+ if (urbl) {
+ unsigned long irq_state;
+ spin_lock_irqsave(&g_link_lock, irq_state);
+ if (g_link_pool_size < OZ_MAX_LINK_POOL_SIZE) {
+ urbl->link.next = g_link_pool;
+ g_link_pool = &urbl->link;
+ urbl = 0;
+ g_link_pool_size++;
+ }
+ spin_unlock_irqrestore(&g_link_lock, irq_state);
+ if (urbl)
+ kfree(urbl);
+ }
+}
+/*------------------------------------------------------------------------------
+ * Deallocates all the urb links in the pool.
+ * Context: unknown
+ */
+static void oz_empty_link_pool(void)
+{
+ struct list_head *e;
+ unsigned long irq_state;
+ spin_lock_irqsave(&g_link_lock, irq_state);
+ e = g_link_pool;
+ g_link_pool = 0;
+ g_link_pool_size = 0;
+ spin_unlock_irqrestore(&g_link_lock, irq_state);
+ while (e) {
+ struct oz_urb_link *urbl =
+ container_of(e, struct oz_urb_link, link);
+ e = e->next;
+ kfree(urbl);
+ }
+}
+/*------------------------------------------------------------------------------
+ * Allocates endpoint structure and optionally a buffer. If a buffer is
+ * allocated it immediately follows the endpoint structure.
+ * Context: softirq
+ */
+static struct oz_endpoint *oz_ep_alloc(gfp_t mem_flags, int buffer_size)
+{
+ struct oz_endpoint *ep =
+ kzalloc(sizeof(struct oz_endpoint)+buffer_size, mem_flags);
+ if (ep) {
+ INIT_LIST_HEAD(&ep->urb_list);
+ INIT_LIST_HEAD(&ep->link);
+ ep->credit = -1;
+ if (buffer_size) {
+ ep->buffer_size = buffer_size;
+ ep->buffer = (u8 *)(ep+1);
+ }
+ }
+ return ep;
+}
+/*------------------------------------------------------------------------------
+ * Pre-condition: Must be called with g_tasklet_lock held and interrupts
+ * disabled.
+ * Context: softirq or process
+ */
+struct oz_urb_link *oz_uncancel_urb(struct oz_hcd *ozhcd, struct urb *urb)
+{
+ struct oz_urb_link *urbl;
+ struct list_head *e;
+ list_for_each(e, &ozhcd->urb_cancel_list) {
+ urbl = container_of(e, struct oz_urb_link, link);
+ if (urb == urbl->urb) {
+ list_del_init(e);
+ return urbl;
+ }
+ }
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ * This is called when we have finished processing an urb. It unlinks it from
+ * the ep and returns it to the core.
+ * Context: softirq or process
+ */
+static void oz_complete_urb(struct usb_hcd *hcd, struct urb *urb,
+ int status, unsigned long submit_jiffies)
+{
+ struct oz_hcd *ozhcd = oz_hcd_private(hcd);
+ unsigned long irq_state;
+ struct oz_urb_link *cancel_urbl = 0;
+ spin_lock_irqsave(&g_tasklet_lock, irq_state);
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
+ /* Clear hcpriv which will prevent it being put in the cancel list
+ * in the event that an attempt is made to cancel it.
+ */
+ urb->hcpriv = 0;
+ /* Walk the cancel list in case the urb is already sitting there.
+ * Since we process the cancel list in a tasklet rather than in
+ * the dequeue function this could happen.
+ */
+ cancel_urbl = oz_uncancel_urb(ozhcd, urb);
+ /* Note: we release lock but do not enable local irqs.
+ * It appears that usb_hcd_giveback_urb() expects irqs to be disabled,
+ * or at least other host controllers disable interrupts at this point
+ * so we do the same. We must, however, release the lock otherwise a
+ * deadlock will occur if an urb is submitted to our driver in the urb
+ * completion function. Because we disable interrupts it is possible
+ * that the urb_enqueue function can be called with them disabled.
+ */
+ spin_unlock(&g_tasklet_lock);
+ if (oz_forget_urb(urb)) {
+ oz_trace("OZWPAN: ERROR Unknown URB %p\n", urb);
+ } else {
+ static unsigned long last_time;
+ atomic_dec(&g_pending_urbs);
+ oz_trace2(OZ_TRACE_URB,
+ "%lu: giveback_urb(%p,%x) %lu %lu pending:%d\n",
+ jiffies, urb, status, jiffies-submit_jiffies,
+ jiffies-last_time, atomic_read(&g_pending_urbs));
+ last_time = jiffies;
+ oz_event_log(OZ_EVT_URB_DONE, 0, 0, urb, status);
+ usb_hcd_giveback_urb(hcd, urb, status);
+ }
+ spin_lock(&g_tasklet_lock);
+ spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
+ if (cancel_urbl)
+ oz_free_urb_link(cancel_urbl);
+}
+/*------------------------------------------------------------------------------
+ * Deallocates an endpoint including deallocating any associated stream and
+ * returning any queued urbs to the core.
+ * Context: softirq
+ */
+static void oz_ep_free(struct oz_port *port, struct oz_endpoint *ep)
+{
+ oz_trace("oz_ep_free()\n");
+ if (port) {
+ struct list_head list;
+ struct oz_hcd *ozhcd = port->ozhcd;
+ INIT_LIST_HEAD(&list);
+ if (ep->flags & OZ_F_EP_HAVE_STREAM)
+ oz_usb_stream_delete(port->hpd, ep->ep_num);
+ /* Transfer URBs to the orphanage while we hold the lock. */
+ spin_lock_bh(&ozhcd->hcd_lock);
+ /* Note: this works even if ep->urb_list is empty.*/
+ list_replace_init(&ep->urb_list, &list);
+ /* Put the URBs in the orphanage. */
+ list_splice_tail(&list, &ozhcd->orphanage);
+ spin_unlock_bh(&ozhcd->hcd_lock);
+ }
+ oz_trace("Freeing endpoint memory\n");
+ kfree(ep);
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+static int oz_enqueue_ep_urb(struct oz_port *port, u8 ep_addr, int in_dir,
+ struct urb *urb, u8 req_id)
+{
+ struct oz_urb_link *urbl;
+ struct oz_endpoint *ep;
+ int err = 0;
+ if (ep_addr >= OZ_NB_ENDPOINTS) {
+ oz_trace("Invalid endpoint number in oz_enqueue_ep_urb().\n");
+ return -EINVAL;
+ }
+ urbl = oz_alloc_urb_link();
+ if (!urbl)
+ return -ENOMEM;
+ urbl->submit_jiffies = jiffies;
+ urbl->urb = urb;
+ urbl->req_id = req_id;
+ urbl->ep_num = ep_addr;
+ /* Hold lock while we insert the URB into the list within the
+ * endpoint structure.
+ */
+ spin_lock_bh(&port->ozhcd->hcd_lock);
+ /* If the urb has been unlinked while out of any list then
+ * complete it now.
+ */
+ if (urb->unlinked) {
+ spin_unlock_bh(&port->ozhcd->hcd_lock);
+ oz_trace("urb %p unlinked so complete immediately\n", urb);
+ oz_complete_urb(port->ozhcd->hcd, urb, 0, 0);
+ oz_free_urb_link(urbl);
+ return 0;
+ }
+ if (in_dir)
+ ep = port->in_ep[ep_addr];
+ else
+ ep = port->out_ep[ep_addr];
+ if (ep && port->hpd) {
+ list_add_tail(&urbl->link, &ep->urb_list);
+ if (!in_dir && ep_addr && (ep->credit < 0)) {
+ ep->last_jiffies = jiffies;
+ ep->credit = 0;
+ oz_event_log(OZ_EVT_EP_CREDIT, ep->ep_num,
+ 0, 0, ep->credit);
+ }
+ } else {
+ err = -EPIPE;
+ }
+ spin_unlock_bh(&port->ozhcd->hcd_lock);
+ if (err)
+ oz_free_urb_link(urbl);
+ return err;
+}
+/*------------------------------------------------------------------------------
+ * Removes an urb from the queue in the endpoint.
+ * Returns 0 if it is found and -EIDRM otherwise.
+ * Context: softirq
+ */
+static int oz_dequeue_ep_urb(struct oz_port *port, u8 ep_addr, int in_dir,
+ struct urb *urb)
+{
+ struct oz_urb_link *urbl = 0;
+ struct oz_endpoint *ep;
+ spin_lock_bh(&port->ozhcd->hcd_lock);
+ if (in_dir)
+ ep = port->in_ep[ep_addr];
+ else
+ ep = port->out_ep[ep_addr];
+ if (ep) {
+ struct list_head *e;
+ list_for_each(e, &ep->urb_list) {
+ urbl = container_of(e, struct oz_urb_link, link);
+ if (urbl->urb == urb) {
+ list_del_init(e);
+ break;
+ }
+ urbl = 0;
+ }
+ }
+ spin_unlock_bh(&port->ozhcd->hcd_lock);
+ if (urbl)
+ oz_free_urb_link(urbl);
+ return urbl ? 0 : -EIDRM;
+}
+/*------------------------------------------------------------------------------
+ * Finds an urb given its request id.
+ * Context: softirq
+ */
+static struct urb *oz_find_urb_by_id(struct oz_port *port, int ep_ix,
+ u8 req_id)
+{
+ struct oz_hcd *ozhcd = port->ozhcd;
+ struct urb *urb = 0;
+ struct oz_urb_link *urbl = 0;
+ struct oz_endpoint *ep;
+
+ spin_lock_bh(&ozhcd->hcd_lock);
+ ep = port->out_ep[ep_ix];
+ if (ep) {
+ struct list_head *e;
+ list_for_each(e, &ep->urb_list) {
+ urbl = container_of(e, struct oz_urb_link, link);
+ if (urbl->req_id == req_id) {
+ urb = urbl->urb;
+ list_del_init(e);
+ break;
+ }
+ }
+ }
+ spin_unlock_bh(&ozhcd->hcd_lock);
+ /* If urb is non-zero then we we must have an urb link to delete.
+ */
+ if (urb)
+ oz_free_urb_link(urbl);
+ return urb;
+}
+/*------------------------------------------------------------------------------
+ * Pre-condition: Port lock must be held.
+ * Context: softirq
+ */
+static void oz_acquire_port(struct oz_port *port, void *hpd)
+{
+ INIT_LIST_HEAD(&port->isoc_out_ep);
+ INIT_LIST_HEAD(&port->isoc_in_ep);
+ port->flags |= OZ_PORT_F_PRESENT | OZ_PORT_F_CHANGED;
+ port->status |= USB_PORT_STAT_CONNECTION |
+ (USB_PORT_STAT_C_CONNECTION << 16);
+ oz_usb_get(hpd);
+ port->hpd = hpd;
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+static struct oz_hcd *oz_hcd_claim(void)
+{
+ struct oz_hcd *ozhcd;
+ spin_lock_bh(&g_hcdlock);
+ ozhcd = g_ozhcd;
+ if (ozhcd)
+ usb_get_hcd(ozhcd->hcd);
+ spin_unlock_bh(&g_hcdlock);
+ return ozhcd;
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+static inline void oz_hcd_put(struct oz_hcd *ozhcd)
+{
+ if (ozhcd)
+ usb_put_hcd(ozhcd->hcd);
+}
+/*------------------------------------------------------------------------------
+ * This is called by the protocol handler to notify that a PD has arrived.
+ * We allocate a port to associate with the PD and create a structure for
+ * endpoint 0. This port is made the connection port.
+ * In the event that one of the other port is already a connection port then
+ * we fail.
+ * TODO We should be able to do better than fail and should be able remember
+ * that this port needs configuring and make it the connection port once the
+ * current connection port has been assigned an address. Collisions here are
+ * probably very rare indeed.
+ * Context: softirq
+ */
+void *oz_hcd_pd_arrived(void *hpd)
+{
+ int i;
+ void *hport = 0;
+ struct oz_hcd *ozhcd = 0;
+ struct oz_endpoint *ep;
+ oz_trace("oz_hcd_pd_arrived()\n");
+ ozhcd = oz_hcd_claim();
+ if (ozhcd == 0)
+ return 0;
+ /* Allocate an endpoint object in advance (before holding hcd lock) to
+ * use for out endpoint 0.
+ */
+ ep = oz_ep_alloc(GFP_ATOMIC, 0);
+ spin_lock_bh(&ozhcd->hcd_lock);
+ if (ozhcd->conn_port >= 0) {
+ spin_unlock_bh(&ozhcd->hcd_lock);
+ oz_trace("conn_port >= 0\n");
+ goto out;
+ }
+ for (i = 0; i < OZ_NB_PORTS; i++) {
+ struct oz_port *port = &ozhcd->ports[i];
+ spin_lock(&port->port_lock);
+ if ((port->flags & OZ_PORT_F_PRESENT) == 0) {
+ oz_acquire_port(port, hpd);
+ spin_unlock(&port->port_lock);
+ break;
+ }
+ spin_unlock(&port->port_lock);
+ }
+ if (i < OZ_NB_PORTS) {
+ oz_trace("Setting conn_port = %d\n", i);
+ ozhcd->conn_port = i;
+ /* Attach out endpoint 0.
+ */
+ ozhcd->ports[i].out_ep[0] = ep;
+ ep = 0;
+ hport = &ozhcd->ports[i];
+ spin_unlock_bh(&ozhcd->hcd_lock);
+ if (ozhcd->flags & OZ_HDC_F_SUSPENDED) {
+ oz_trace("Resuming root hub\n");
+ usb_hcd_resume_root_hub(ozhcd->hcd);
+ }
+ usb_hcd_poll_rh_status(ozhcd->hcd);
+ } else {
+ spin_unlock_bh(&ozhcd->hcd_lock);
+ }
+out:
+ if (ep) /* ep is non-null if not used. */
+ oz_ep_free(0, ep);
+ oz_hcd_put(ozhcd);
+ return hport;
+}
+/*------------------------------------------------------------------------------
+ * This is called by the protocol handler to notify that the PD has gone away.
+ * We need to deallocate all resources and then request that the root hub is
+ * polled. We release the reference we hold on the PD.
+ * Context: softirq
+ */
+void oz_hcd_pd_departed(void *hport)
+{
+ struct oz_port *port = (struct oz_port *)hport;
+ struct oz_hcd *ozhcd;
+ void *hpd;
+ struct oz_endpoint *ep = 0;
+
+ oz_trace("oz_hcd_pd_departed()\n");
+ if (port == 0) {
+ oz_trace("oz_hcd_pd_departed() port = 0\n");
+ return;
+ }
+ ozhcd = port->ozhcd;
+ if (ozhcd == 0)
+ return;
+ /* Check if this is the connection port - if so clear it.
+ */
+ spin_lock_bh(&ozhcd->hcd_lock);
+ if ((ozhcd->conn_port >= 0) &&
+ (port == &ozhcd->ports[ozhcd->conn_port])) {
+ oz_trace("Clearing conn_port\n");
+ ozhcd->conn_port = -1;
+ }
+ spin_lock(&port->port_lock);
+ port->flags |= OZ_PORT_F_DYING;
+ spin_unlock(&port->port_lock);
+ spin_unlock_bh(&ozhcd->hcd_lock);
+
+ oz_clean_endpoints_for_config(ozhcd->hcd, port);
+ spin_lock_bh(&port->port_lock);
+ hpd = port->hpd;
+ port->hpd = 0;
+ port->bus_addr = 0xff;
+ port->flags &= ~(OZ_PORT_F_PRESENT | OZ_PORT_F_DYING);
+ port->flags |= OZ_PORT_F_CHANGED;
+ port->status &= ~USB_PORT_STAT_CONNECTION;
+ port->status |= (USB_PORT_STAT_C_CONNECTION << 16);
+ /* If there is an endpont 0 then clear the pointer while we hold
+ * the spinlock be we deallocate it after releasing the lock.
+ */
+ if (port->out_ep[0]) {
+ ep = port->out_ep[0];
+ port->out_ep[0] = 0;
+ }
+ spin_unlock_bh(&port->port_lock);
+ if (ep)
+ oz_ep_free(port, ep);
+ usb_hcd_poll_rh_status(ozhcd->hcd);
+ oz_usb_put(hpd);
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+void oz_hcd_pd_reset(void *hpd, void *hport)
+{
+ /* Cleanup the current configuration and report reset to the core.
+ */
+ struct oz_port *port = (struct oz_port *)hport;
+ struct oz_hcd *ozhcd = port->ozhcd;
+ oz_trace("PD Reset\n");
+ spin_lock_bh(&port->port_lock);
+ port->flags |= OZ_PORT_F_CHANGED;
+ port->status |= USB_PORT_STAT_RESET;
+ port->status |= (USB_PORT_STAT_C_RESET << 16);
+ spin_unlock_bh(&port->port_lock);
+ oz_clean_endpoints_for_config(ozhcd->hcd, port);
+ usb_hcd_poll_rh_status(ozhcd->hcd);
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+void oz_hcd_get_desc_cnf(void *hport, u8 req_id, int status, u8 *desc,
+ int length, int offset, int total_size)
+{
+ struct oz_port *port = (struct oz_port *)hport;
+ struct urb *urb;
+ int err = 0;
+
+ oz_event_log(OZ_EVT_CTRL_CNF, 0, req_id, 0, status);
+ oz_trace("oz_hcd_get_desc_cnf length = %d offs = %d tot_size = %d\n",
+ length, offset, total_size);
+ urb = oz_find_urb_by_id(port, 0, req_id);
+ if (!urb)
+ return;
+ if (status == 0) {
+ int copy_len;
+ int required_size = urb->transfer_buffer_length;
+ if (required_size > total_size)
+ required_size = total_size;
+ copy_len = required_size-offset;
+ if (length <= copy_len)
+ copy_len = length;
+ memcpy(urb->transfer_buffer+offset, desc, copy_len);
+ offset += copy_len;
+ if (offset < required_size) {
+ struct usb_ctrlrequest *setup =
+ (struct usb_ctrlrequest *)urb->setup_packet;
+ unsigned wvalue = le16_to_cpu(setup->wValue);
+ if (oz_enqueue_ep_urb(port, 0, 0, urb, req_id))
+ err = -ENOMEM;
+ else if (oz_usb_get_desc_req(port->hpd, req_id,
+ setup->bRequestType, (u8)(wvalue>>8),
+ (u8)wvalue, setup->wIndex, offset,
+ required_size-offset)) {
+ oz_dequeue_ep_urb(port, 0, 0, urb);
+ err = -ENOMEM;
+ }
+ if (err == 0)
+ return;
+ }
+ }
+ urb->actual_length = total_size;
+ oz_complete_urb(port->ozhcd->hcd, urb, 0, 0);
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+#ifdef WANT_TRACE
+static void oz_display_conf_type(u8 t)
+{
+ switch (t) {
+ case USB_REQ_GET_STATUS:
+ oz_trace("USB_REQ_GET_STATUS - cnf\n");
+ break;
+ case USB_REQ_CLEAR_FEATURE:
+ oz_trace("USB_REQ_CLEAR_FEATURE - cnf\n");
+ break;
+ case USB_REQ_SET_FEATURE:
+ oz_trace("USB_REQ_SET_FEATURE - cnf\n");
+ break;
+ case USB_REQ_SET_ADDRESS:
+ oz_trace("USB_REQ_SET_ADDRESS - cnf\n");
+ break;
+ case USB_REQ_GET_DESCRIPTOR:
+ oz_trace("USB_REQ_GET_DESCRIPTOR - cnf\n");
+ break;
+ case USB_REQ_SET_DESCRIPTOR:
+ oz_trace("USB_REQ_SET_DESCRIPTOR - cnf\n");
+ break;
+ case USB_REQ_GET_CONFIGURATION:
+ oz_trace("USB_REQ_GET_CONFIGURATION - cnf\n");
+ break;
+ case USB_REQ_SET_CONFIGURATION:
+ oz_trace("USB_REQ_SET_CONFIGURATION - cnf\n");
+ break;
+ case USB_REQ_GET_INTERFACE:
+ oz_trace("USB_REQ_GET_INTERFACE - cnf\n");
+ break;
+ case USB_REQ_SET_INTERFACE:
+ oz_trace("USB_REQ_SET_INTERFACE - cnf\n");
+ break;
+ case USB_REQ_SYNCH_FRAME:
+ oz_trace("USB_REQ_SYNCH_FRAME - cnf\n");
+ break;
+ }
+}
+#else
+#define oz_display_conf_type(__x)
+#endif /* WANT_TRACE */
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+static void oz_hcd_complete_set_config(struct oz_port *port, struct urb *urb,
+ u8 rcode, u8 config_num)
+{
+ int rc = 0;
+ struct usb_hcd *hcd = port->ozhcd->hcd;
+ if (rcode == 0) {
+ port->config_num = config_num;
+ oz_clean_endpoints_for_config(hcd, port);
+ if (oz_build_endpoints_for_config(hcd, port,
+ &urb->dev->config[port->config_num-1], GFP_ATOMIC)) {
+ rc = -ENOMEM;
+ }
+ } else {
+ rc = -ENOMEM;
+ }
+ oz_complete_urb(hcd, urb, rc, 0);
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+static void oz_hcd_complete_set_interface(struct oz_port *port, struct urb *urb,
+ u8 rcode, u8 if_num, u8 alt)
+{
+ struct usb_hcd *hcd = port->ozhcd->hcd;
+ int rc = 0;
+ if (rcode == 0) {
+ struct usb_host_config *config;
+ struct usb_host_interface *intf;
+ oz_trace("Set interface %d alt %d\n", if_num, alt);
+ oz_clean_endpoints_for_interface(hcd, port, if_num);
+ config = &urb->dev->config[port->config_num-1];
+ intf = &config->intf_cache[if_num]->altsetting[alt];
+ if (oz_build_endpoints_for_interface(hcd, port, intf,
+ GFP_ATOMIC))
+ rc = -ENOMEM;
+ else
+ port->iface[if_num].alt = alt;
+ } else {
+ rc = -ENOMEM;
+ }
+ oz_complete_urb(hcd, urb, rc, 0);
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+void oz_hcd_control_cnf(void *hport, u8 req_id, u8 rcode, u8 *data,
+ int data_len)
+{
+ struct oz_port *port = (struct oz_port *)hport;
+ struct urb *urb;
+ struct usb_ctrlrequest *setup;
+ struct usb_hcd *hcd = port->ozhcd->hcd;
+ unsigned windex;
+ unsigned wvalue;
+
+ oz_event_log(OZ_EVT_CTRL_CNF, 0, req_id, 0, rcode);
+ oz_trace("oz_hcd_control_cnf rcode=%u len=%d\n", rcode, data_len);
+ urb = oz_find_urb_by_id(port, 0, req_id);
+ if (!urb) {
+ oz_trace("URB not found\n");
+ return;
+ }
+ setup = (struct usb_ctrlrequest *)urb->setup_packet;
+ windex = le16_to_cpu(setup->wIndex);
+ wvalue = le16_to_cpu(setup->wValue);
+ if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
+ /* Standard requests */
+ oz_display_conf_type(setup->bRequest);
+ switch (setup->bRequest) {
+ case USB_REQ_SET_CONFIGURATION:
+ oz_hcd_complete_set_config(port, urb, rcode,
+ (u8)wvalue);
+ break;
+ case USB_REQ_SET_INTERFACE:
+ oz_hcd_complete_set_interface(port, urb, rcode,
+ (u8)windex, (u8)wvalue);
+ break;
+ default:
+ oz_complete_urb(hcd, urb, 0, 0);
+ }
+
+ } else {
+ int copy_len;
+ oz_trace("VENDOR-CLASS - cnf\n");
+ if (data_len <= urb->transfer_buffer_length)
+ copy_len = data_len;
+ else
+ copy_len = urb->transfer_buffer_length;
+ if (copy_len)
+ memcpy(urb->transfer_buffer, data, copy_len);
+ urb->actual_length = copy_len;
+ oz_complete_urb(hcd, urb, 0, 0);
+ }
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq-serialized
+ */
+static int oz_hcd_buffer_data(struct oz_endpoint *ep, u8 *data, int data_len)
+{
+ int space;
+ int copy_len;
+ if (!ep->buffer)
+ return -1;
+ space = ep->out_ix-ep->in_ix-1;
+ if (space < 0)
+ space += ep->buffer_size;
+ if (space < (data_len+1)) {
+ oz_trace("Buffer full\n");
+ return -1;
+ }
+ ep->buffer[ep->in_ix] = (u8)data_len;
+ if (++ep->in_ix == ep->buffer_size)
+ ep->in_ix = 0;
+ copy_len = ep->buffer_size - ep->in_ix;
+ if (copy_len > data_len)
+ copy_len = data_len;
+ memcpy(&ep->buffer[ep->in_ix], data, copy_len);
+
+ if (copy_len < data_len) {
+ memcpy(ep->buffer, data+copy_len, data_len-copy_len);
+ ep->in_ix = data_len-copy_len;
+ } else {
+ ep->in_ix += copy_len;
+ }
+ if (ep->in_ix == ep->buffer_size)
+ ep->in_ix = 0;
+ ep->buffered_units++;
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq-serialized
+ */
+void oz_hcd_data_ind(void *hport, u8 endpoint, u8 *data, int data_len)
+{
+ struct oz_port *port = (struct oz_port *)hport;
+ struct oz_endpoint *ep;
+ struct oz_hcd *ozhcd = port->ozhcd;
+ spin_lock_bh(&ozhcd->hcd_lock);
+ ep = port->in_ep[endpoint & USB_ENDPOINT_NUMBER_MASK];
+ if (ep == 0)
+ goto done;
+ switch (ep->attrib & USB_ENDPOINT_XFERTYPE_MASK) {
+ case USB_ENDPOINT_XFER_INT:
+ case USB_ENDPOINT_XFER_BULK:
+ if (!list_empty(&ep->urb_list)) {
+ struct oz_urb_link *urbl =
+ list_first_entry(&ep->urb_list,
+ struct oz_urb_link, link);
+ struct urb *urb;
+ int copy_len;
+ list_del_init(&urbl->link);
+ spin_unlock_bh(&ozhcd->hcd_lock);
+ urb = urbl->urb;
+ oz_free_urb_link(urbl);
+ if (data_len <= urb->transfer_buffer_length)
+ copy_len = data_len;
+ else
+ copy_len = urb->transfer_buffer_length;
+ memcpy(urb->transfer_buffer, data, copy_len);
+ urb->actual_length = copy_len;
+ oz_complete_urb(port->ozhcd->hcd, urb, 0, 0);
+ return;
+ }
+ break;
+ case USB_ENDPOINT_XFER_ISOC:
+ oz_hcd_buffer_data(ep, data, data_len);
+ break;
+ }
+done:
+ spin_unlock_bh(&ozhcd->hcd_lock);
+}
+/*------------------------------------------------------------------------------
+ * Context: unknown
+ */
+static inline int oz_usb_get_frame_number(void)
+{
+ return jiffies_to_msecs(get_jiffies_64());
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+int oz_hcd_heartbeat(void *hport)
+{
+ int rc = 0;
+ struct oz_port *port = (struct oz_port *)hport;
+ struct oz_hcd *ozhcd = port->ozhcd;
+ struct oz_urb_link *urbl;
+ struct list_head xfr_list;
+ struct list_head *e;
+ struct list_head *n;
+ struct urb *urb;
+ struct oz_endpoint *ep;
+ unsigned long now = jiffies;
+ INIT_LIST_HEAD(&xfr_list);
+ /* Check the OUT isoc endpoints to see if any URB data can be sent.
+ */
+ spin_lock_bh(&ozhcd->hcd_lock);
+ list_for_each(e, &port->isoc_out_ep) {
+ ep = ep_from_link(e);
+ if (ep->credit < 0)
+ continue;
+ ep->credit += (now - ep->last_jiffies);
+ if (ep->credit > ep->credit_ceiling)
+ ep->credit = ep->credit_ceiling;
+ oz_event_log(OZ_EVT_EP_CREDIT, ep->ep_num, 0, 0, ep->credit);
+ ep->last_jiffies = now;
+ while (ep->credit && !list_empty(&ep->urb_list)) {
+ urbl = list_first_entry(&ep->urb_list,
+ struct oz_urb_link, link);
+ urb = urbl->urb;
+ if (ep->credit < urb->number_of_packets)
+ break;
+ ep->credit -= urb->number_of_packets;
+ oz_event_log(OZ_EVT_EP_CREDIT, ep->ep_num, 0, 0,
+ ep->credit);
+ list_del(&urbl->link);
+ list_add_tail(&urbl->link, &xfr_list);
+ }
+ }
+ spin_unlock_bh(&ozhcd->hcd_lock);
+ /* Send to PD and complete URBs.
+ */
+ list_for_each_safe(e, n, &xfr_list) {
+ unsigned long t;
+ urbl = container_of(e, struct oz_urb_link, link);
+ urb = urbl->urb;
+ t = urbl->submit_jiffies;
+ list_del_init(e);
+ urb->error_count = 0;
+ urb->start_frame = oz_usb_get_frame_number();
+ oz_usb_send_isoc(port->hpd, urbl->ep_num, urb);
+ oz_free_urb_link(urbl);
+ oz_complete_urb(port->ozhcd->hcd, urb, 0, t);
+ }
+ /* Check the IN isoc endpoints to see if any URBs can be completed.
+ */
+ spin_lock_bh(&ozhcd->hcd_lock);
+ list_for_each(e, &port->isoc_in_ep) {
+ struct oz_endpoint *ep = ep_from_link(e);
+ if (ep->flags & OZ_F_EP_BUFFERING) {
+ if (ep->buffered_units * OZ_IN_BUFFERING_UNITS) {
+ ep->flags &= ~OZ_F_EP_BUFFERING;
+ ep->credit = 0;
+ oz_event_log(OZ_EVT_EP_CREDIT,
+ ep->ep_num | USB_DIR_IN,
+ 0, 0, ep->credit);
+ ep->last_jiffies = now;
+ ep->start_frame = 0;
+ oz_event_log(OZ_EVT_EP_BUFFERING,
+ ep->ep_num | USB_DIR_IN, 0, 0, 0);
+ }
+ continue;
+ }
+ ep->credit += (now - ep->last_jiffies);
+ oz_event_log(OZ_EVT_EP_CREDIT, ep->ep_num | USB_DIR_IN,
+ 0, 0, ep->credit);
+ ep->last_jiffies = now;
+ while (!list_empty(&ep->urb_list)) {
+ struct oz_urb_link *urbl =
+ list_first_entry(&ep->urb_list,
+ struct oz_urb_link, link);
+ struct urb *urb = urbl->urb;
+ int len = 0;
+ int copy_len;
+ int i;
+ if (ep->credit < urb->number_of_packets)
+ break;
+ if (ep->buffered_units < urb->number_of_packets)
+ break;
+ urb->actual_length = 0;
+ for (i = 0; i < urb->number_of_packets; i++) {
+ len = ep->buffer[ep->out_ix];
+ if (++ep->out_ix == ep->buffer_size)
+ ep->out_ix = 0;
+ copy_len = ep->buffer_size - ep->out_ix;
+ if (copy_len > len)
+ copy_len = len;
+ memcpy(urb->transfer_buffer,
+ &ep->buffer[ep->out_ix], copy_len);
+ if (copy_len < len) {
+ memcpy(urb->transfer_buffer+copy_len,
+ ep->buffer, len-copy_len);
+ ep->out_ix = len-copy_len;
+ } else
+ ep->out_ix += copy_len;
+ if (ep->out_ix == ep->buffer_size)
+ ep->out_ix = 0;
+ urb->iso_frame_desc[i].offset =
+ urb->actual_length;
+ urb->actual_length += len;
+ urb->iso_frame_desc[i].actual_length = len;
+ urb->iso_frame_desc[i].status = 0;
+ }
+ ep->buffered_units -= urb->number_of_packets;
+ urb->error_count = 0;
+ urb->start_frame = ep->start_frame;
+ ep->start_frame += urb->number_of_packets;
+ list_del(&urbl->link);
+ list_add_tail(&urbl->link, &xfr_list);
+ ep->credit -= urb->number_of_packets;
+ oz_event_log(OZ_EVT_EP_CREDIT, ep->ep_num | USB_DIR_IN,
+ 0, 0, ep->credit);
+ }
+ }
+ if (!list_empty(&port->isoc_out_ep) || !list_empty(&port->isoc_in_ep))
+ rc = 1;
+ spin_unlock_bh(&ozhcd->hcd_lock);
+ /* Complete the filled URBs.
+ */
+ list_for_each_safe(e, n, &xfr_list) {
+ urbl = container_of(e, struct oz_urb_link, link);
+ urb = urbl->urb;
+ list_del_init(e);
+ oz_free_urb_link(urbl);
+ oz_complete_urb(port->ozhcd->hcd, urb, 0, 0);
+ }
+ /* Check if there are any ep0 requests that have timed out.
+ * If so resent to PD.
+ */
+ ep = port->out_ep[0];
+ if (ep) {
+ struct list_head *e;
+ struct list_head *n;
+ spin_lock_bh(&ozhcd->hcd_lock);
+ list_for_each_safe(e, n, &ep->urb_list) {
+ urbl = container_of(e, struct oz_urb_link, link);
+ if (time_after(now, urbl->submit_jiffies+HZ/2)) {
+ oz_trace("%ld: Request 0x%p timeout\n",
+ now, urbl->urb);
+ urbl->submit_jiffies = now;
+ list_del(e);
+ list_add_tail(e, &xfr_list);
+ }
+ }
+ if (!list_empty(&ep->urb_list))
+ rc = 1;
+ spin_unlock_bh(&ozhcd->hcd_lock);
+ e = xfr_list.next;
+ while (e != &xfr_list) {
+ urbl = container_of(e, struct oz_urb_link, link);
+ e = e->next;
+ oz_trace("Resending request to PD.\n");
+ oz_process_ep0_urb(ozhcd, urbl->urb, GFP_ATOMIC);
+ oz_free_urb_link(urbl);
+ }
+ }
+ return rc;
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+static int oz_build_endpoints_for_interface(struct usb_hcd *hcd,
+ struct oz_port *port,
+ struct usb_host_interface *intf, gfp_t mem_flags)
+{
+ struct oz_hcd *ozhcd = port->ozhcd;
+ int i;
+ int if_ix = intf->desc.bInterfaceNumber;
+ int request_heartbeat = 0;
+ oz_trace("interface[%d] = %p\n", if_ix, intf);
+ for (i = 0; i < intf->desc.bNumEndpoints; i++) {
+ struct usb_host_endpoint *hep = &intf->endpoint[i];
+ u8 ep_addr = hep->desc.bEndpointAddress;
+ u8 ep_num = ep_addr & USB_ENDPOINT_NUMBER_MASK;
+ struct oz_endpoint *ep;
+ int buffer_size = 0;
+
+ oz_trace("%d bEndpointAddress = %x\n", i, ep_addr);
+ if ((ep_addr & USB_ENDPOINT_DIR_MASK) &&
+ ((hep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
+ == USB_ENDPOINT_XFER_ISOC)) {
+ buffer_size = 24*1024;
+ }
+
+ ep = oz_ep_alloc(mem_flags, buffer_size);
+ if (!ep) {
+ oz_clean_endpoints_for_interface(hcd, port, if_ix);
+ return -ENOMEM;
+ }
+ ep->attrib = hep->desc.bmAttributes;
+ ep->ep_num = ep_num;
+ if ((ep->attrib & USB_ENDPOINT_XFERTYPE_MASK)
+ == USB_ENDPOINT_XFER_ISOC) {
+ oz_trace("wMaxPacketSize = %d\n",
+ hep->desc.wMaxPacketSize);
+ ep->credit_ceiling = 200;
+ if (ep_addr & USB_ENDPOINT_DIR_MASK) {
+ ep->flags |= OZ_F_EP_BUFFERING;
+ oz_event_log(OZ_EVT_EP_BUFFERING,
+ ep->ep_num | USB_DIR_IN, 1, 0, 0);
+ } else {
+ ep->flags |= OZ_F_EP_HAVE_STREAM;
+ if (oz_usb_stream_create(port->hpd, ep_num))
+ ep->flags &= ~OZ_F_EP_HAVE_STREAM;
+ }
+ }
+ spin_lock_bh(&ozhcd->hcd_lock);
+ if (ep_addr & USB_ENDPOINT_DIR_MASK) {
+ port->in_ep[ep_num] = ep;
+ port->iface[if_ix].ep_mask |=
+ (1<<(ep_num+OZ_NB_ENDPOINTS));
+ if ((ep->attrib & USB_ENDPOINT_XFERTYPE_MASK)
+ == USB_ENDPOINT_XFER_ISOC) {
+ list_add_tail(&ep->link, &port->isoc_in_ep);
+ request_heartbeat = 1;
+ }
+ } else {
+ port->out_ep[ep_num] = ep;
+ port->iface[if_ix].ep_mask |= (1<<ep_num);
+ if ((ep->attrib & USB_ENDPOINT_XFERTYPE_MASK)
+ == USB_ENDPOINT_XFER_ISOC) {
+ list_add_tail(&ep->link, &port->isoc_out_ep);
+ request_heartbeat = 1;
+ }
+ }
+ spin_unlock_bh(&ozhcd->hcd_lock);
+ if (request_heartbeat && port->hpd)
+ oz_usb_request_heartbeat(port->hpd);
+ }
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+static void oz_clean_endpoints_for_interface(struct usb_hcd *hcd,
+ struct oz_port *port, int if_ix)
+{
+ struct oz_hcd *ozhcd = port->ozhcd;
+ unsigned mask;
+ int i;
+ struct list_head ep_list;
+
+ oz_trace("Deleting endpoints for interface %d\n", if_ix);
+ if (if_ix >= port->num_iface)
+ return;
+ INIT_LIST_HEAD(&ep_list);
+ spin_lock_bh(&ozhcd->hcd_lock);
+ mask = port->iface[if_ix].ep_mask;
+ port->iface[if_ix].ep_mask = 0;
+ for (i = 0; i < OZ_NB_ENDPOINTS; i++) {
+ struct list_head *e;
+ /* Gather OUT endpoints.
+ */
+ if ((mask & (1<<i)) && port->out_ep[i]) {
+ e = &port->out_ep[i]->link;
+ port->out_ep[i] = 0;
+ /* Remove from isoc list if present.
+ */
+ list_del(e);
+ list_add_tail(e, &ep_list);
+ }
+ /* Gather IN endpoints.
+ */
+ if ((mask & (1<<(i+OZ_NB_ENDPOINTS))) && port->in_ep[i]) {
+ e = &port->in_ep[i]->link;
+ port->in_ep[i] = 0;
+ list_del(e);
+ list_add_tail(e, &ep_list);
+ }
+ }
+ spin_unlock_bh(&ozhcd->hcd_lock);
+ while (!list_empty(&ep_list)) {
+ struct oz_endpoint *ep =
+ list_first_entry(&ep_list, struct oz_endpoint, link);
+ list_del_init(&ep->link);
+ oz_ep_free(port, ep);
+ }
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+static int oz_build_endpoints_for_config(struct usb_hcd *hcd,
+ struct oz_port *port, struct usb_host_config *config,
+ gfp_t mem_flags)
+{
+ struct oz_hcd *ozhcd = port->ozhcd;
+ int i;
+ int num_iface = config->desc.bNumInterfaces;
+ if (num_iface) {
+ struct oz_interface *iface;
+
+ iface = kmalloc(num_iface*sizeof(struct oz_interface),
+ mem_flags | __GFP_ZERO);
+ if (!iface)
+ return -ENOMEM;
+ spin_lock_bh(&ozhcd->hcd_lock);
+ port->iface = iface;
+ port->num_iface = num_iface;
+ spin_unlock_bh(&ozhcd->hcd_lock);
+ }
+ for (i = 0; i < num_iface; i++) {
+ struct usb_host_interface *intf =
+ &config->intf_cache[i]->altsetting[0];
+ if (oz_build_endpoints_for_interface(hcd, port, intf,
+ mem_flags))
+ goto fail;
+ }
+ return 0;
+fail:
+ oz_clean_endpoints_for_config(hcd, port);
+ return -1;
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+static void oz_clean_endpoints_for_config(struct usb_hcd *hcd,
+ struct oz_port *port)
+{
+ struct oz_hcd *ozhcd = port->ozhcd;
+ int i;
+ oz_trace("Deleting endpoints for configuration.\n");
+ for (i = 0; i < port->num_iface; i++)
+ oz_clean_endpoints_for_interface(hcd, port, i);
+ spin_lock_bh(&ozhcd->hcd_lock);
+ if (port->iface) {
+ oz_trace("Freeing interfaces object.\n");
+ kfree(port->iface);
+ port->iface = 0;
+ }
+ port->num_iface = 0;
+ spin_unlock_bh(&ozhcd->hcd_lock);
+}
+/*------------------------------------------------------------------------------
+ * Context: tasklet
+ */
+static void *oz_claim_hpd(struct oz_port *port)
+{
+ void *hpd = 0;
+ struct oz_hcd *ozhcd = port->ozhcd;
+ spin_lock_bh(&ozhcd->hcd_lock);
+ hpd = port->hpd;
+ if (hpd)
+ oz_usb_get(hpd);
+ spin_unlock_bh(&ozhcd->hcd_lock);
+ return hpd;
+}
+/*------------------------------------------------------------------------------
+ * Context: tasklet
+ */
+static void oz_process_ep0_urb(struct oz_hcd *ozhcd, struct urb *urb,
+ gfp_t mem_flags)
+{
+ struct usb_ctrlrequest *setup;
+ unsigned windex;
+ unsigned wvalue;
+ unsigned wlength;
+ void *hpd = 0;
+ u8 req_id;
+ int rc = 0;
+ unsigned complete = 0;
+
+ int port_ix = -1;
+ struct oz_port *port = 0;
+
+ oz_trace2(OZ_TRACE_URB, "%lu: oz_process_ep0_urb(%p)\n", jiffies, urb);
+ port_ix = oz_get_port_from_addr(ozhcd, urb->dev->devnum);
+ if (port_ix < 0) {
+ rc = -EPIPE;
+ goto out;
+ }
+ port = &ozhcd->ports[port_ix];
+ if (((port->flags & OZ_PORT_F_PRESENT) == 0)
+ || (port->flags & OZ_PORT_F_DYING)) {
+ oz_trace("Refusing URB port_ix = %d devnum = %d\n",
+ port_ix, urb->dev->devnum);
+ rc = -EPIPE;
+ goto out;
+ }
+ /* Store port in private context data.
+ */
+ urb->hcpriv = port;
+ setup = (struct usb_ctrlrequest *)urb->setup_packet;
+ windex = le16_to_cpu(setup->wIndex);
+ wvalue = le16_to_cpu(setup->wValue);
+ wlength = le16_to_cpu(setup->wLength);
+ oz_trace2(OZ_TRACE_CTRL_DETAIL, "bRequestType = %x\n",
+ setup->bRequestType);
+ oz_trace2(OZ_TRACE_CTRL_DETAIL, "bRequest = %x\n", setup->bRequest);
+ oz_trace2(OZ_TRACE_CTRL_DETAIL, "wValue = %x\n", wvalue);
+ oz_trace2(OZ_TRACE_CTRL_DETAIL, "wIndex = %x\n", windex);
+ oz_trace2(OZ_TRACE_CTRL_DETAIL, "wLength = %x\n", wlength);
+
+ req_id = port->next_req_id++;
+ hpd = oz_claim_hpd(port);
+ if (hpd == 0) {
+ oz_trace("Cannot claim port\n");
+ rc = -EPIPE;
+ goto out;
+ }
+
+ if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
+ /* Standard requests
+ */
+ switch (setup->bRequest) {
+ case USB_REQ_GET_DESCRIPTOR:
+ oz_trace("USB_REQ_GET_DESCRIPTOR - req\n");
+ break;
+ case USB_REQ_SET_ADDRESS:
+ oz_event_log(OZ_EVT_CTRL_LOCAL, setup->bRequest,
+ 0, 0, setup->bRequestType);
+ oz_trace("USB_REQ_SET_ADDRESS - req\n");
+ oz_trace("Port %d address is 0x%x\n", ozhcd->conn_port,
+ (u8)le16_to_cpu(setup->wValue));
+ spin_lock_bh(&ozhcd->hcd_lock);
+ if (ozhcd->conn_port >= 0) {
+ ozhcd->ports[ozhcd->conn_port].bus_addr =
+ (u8)le16_to_cpu(setup->wValue);
+ oz_trace("Clearing conn_port\n");
+ ozhcd->conn_port = -1;
+ }
+ spin_unlock_bh(&ozhcd->hcd_lock);
+ complete = 1;
+ break;
+ case USB_REQ_SET_CONFIGURATION:
+ oz_trace("USB_REQ_SET_CONFIGURATION - req\n");
+ break;
+ case USB_REQ_GET_CONFIGURATION:
+ /* We short curcuit this case and reply directly since
+ * we have the selected configuration number cached.
+ */
+ oz_event_log(OZ_EVT_CTRL_LOCAL, setup->bRequest, 0, 0,
+ setup->bRequestType);
+ oz_trace("USB_REQ_GET_CONFIGURATION - reply now\n");
+ if (urb->transfer_buffer_length >= 1) {
+ urb->actual_length = 1;
+ *((u8 *)urb->transfer_buffer) =
+ port->config_num;
+ complete = 1;
+ } else {
+ rc = -EPIPE;
+ }
+ break;
+ case USB_REQ_GET_INTERFACE:
+ /* We short curcuit this case and reply directly since
+ * we have the selected interface alternative cached.
+ */
+ oz_event_log(OZ_EVT_CTRL_LOCAL, setup->bRequest, 0, 0,
+ setup->bRequestType);
+ oz_trace("USB_REQ_GET_INTERFACE - reply now\n");
+ if (urb->transfer_buffer_length >= 1) {
+ urb->actual_length = 1;
+ *((u8 *)urb->transfer_buffer) =
+ port->iface[(u8)windex].alt;
+ oz_trace("interface = %d alt = %d\n",
+ windex, port->iface[(u8)windex].alt);
+ complete = 1;
+ } else {
+ rc = -EPIPE;
+ }
+ break;
+ case USB_REQ_SET_INTERFACE:
+ oz_trace("USB_REQ_SET_INTERFACE - req\n");
+ break;
+ }
+ }
+ if (!rc && !complete) {
+ int data_len = 0;
+ if ((setup->bRequestType & USB_DIR_IN) == 0)
+ data_len = wlength;
+ if (oz_usb_control_req(port->hpd, req_id, setup,
+ urb->transfer_buffer, data_len)) {
+ rc = -ENOMEM;
+ } else {
+ /* Note: we are queuing the request after we have
+ * submitted it to be tranmitted. If the request were
+ * to complete before we queued it then it would not
+ * be found in the queue. It seems impossible for
+ * this to happen but if it did the request would
+ * be resubmitted so the problem would hopefully
+ * resolve itself. Putting the request into the
+ * queue before it has been sent is worse since the
+ * urb could be cancelled while we are using it
+ * to build the request.
+ */
+ if (oz_enqueue_ep_urb(port, 0, 0, urb, req_id))
+ rc = -ENOMEM;
+ }
+ }
+ oz_usb_put(hpd);
+out:
+ if (rc || complete) {
+ oz_trace("Completing request locally\n");
+ oz_complete_urb(ozhcd->hcd, urb, rc, 0);
+ } else {
+ oz_usb_request_heartbeat(port->hpd);
+ }
+}
+/*------------------------------------------------------------------------------
+ * Context: tasklet
+ */
+static int oz_urb_process(struct oz_hcd *ozhcd, struct urb *urb)
+{
+ int rc = 0;
+ struct oz_port *port = urb->hcpriv;
+ u8 ep_addr;
+ /* When we are paranoid we keep a list of urbs which we check against
+ * before handing one back. This is just for debugging during
+ * development and should be turned off in the released driver.
+ */
+ oz_remember_urb(urb);
+ /* Check buffer is valid.
+ */
+ if (!urb->transfer_buffer && urb->transfer_buffer_length)
+ return -EINVAL;
+ /* Check if there is a device at the port - refuse if not.
+ */
+ if ((port->flags & OZ_PORT_F_PRESENT) == 0)
+ return -EPIPE;
+ ep_addr = usb_pipeendpoint(urb->pipe);
+ if (ep_addr) {
+ /* If the request is not for EP0 then queue it.
+ */
+ if (oz_enqueue_ep_urb(port, ep_addr, usb_pipein(urb->pipe),
+ urb, 0))
+ rc = -EPIPE;
+ } else {
+ oz_process_ep0_urb(ozhcd, urb, GFP_ATOMIC);
+ }
+ return rc;
+}
+/*------------------------------------------------------------------------------
+ * Context: tasklet
+ */
+static void oz_urb_process_tasklet(unsigned long unused)
+{
+ unsigned long irq_state;
+ struct urb *urb;
+ struct oz_hcd *ozhcd = oz_hcd_claim();
+ int rc = 0;
+ if (ozhcd == 0)
+ return;
+ /* This is called from a tasklet so is in softirq context but the urb
+ * list is filled from any context so we need to lock
+ * appropriately while removing urbs.
+ */
+ spin_lock_irqsave(&g_tasklet_lock, irq_state);
+ while (!list_empty(&ozhcd->urb_pending_list)) {
+ struct oz_urb_link *urbl =
+ list_first_entry(&ozhcd->urb_pending_list,
+ struct oz_urb_link, link);
+ list_del_init(&urbl->link);
+ spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
+ urb = urbl->urb;
+ oz_free_urb_link(urbl);
+ rc = oz_urb_process(ozhcd, urb);
+ if (rc)
+ oz_complete_urb(ozhcd->hcd, urb, rc, 0);
+ spin_lock_irqsave(&g_tasklet_lock, irq_state);
+ }
+ spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
+ oz_hcd_put(ozhcd);
+}
+/*------------------------------------------------------------------------------
+ * This function searches for the urb in any of the lists it could be in.
+ * If it is found it is removed from the list and completed. If the urb is
+ * being processed then it won't be in a list so won't be found. However, the
+ * call to usb_hcd_check_unlink_urb() will set the value of the unlinked field
+ * to a non-zero value. When an attempt is made to put the urb back in a list
+ * the unlinked field will be checked and the urb will then be completed.
+ * Context: tasklet
+ */
+static void oz_urb_cancel(struct oz_port *port, u8 ep_num, struct urb *urb)
+{
+ struct oz_urb_link *urbl = 0;
+ struct list_head *e;
+ struct oz_hcd *ozhcd;
+ unsigned long irq_state;
+ u8 ix;
+ if (port == 0) {
+ oz_trace("ERRORERROR: oz_urb_cancel(%p) port is null\n", urb);
+ return;
+ }
+ ozhcd = port->ozhcd;
+ if (ozhcd == 0) {
+ oz_trace("ERRORERROR: oz_urb_cancel(%p) ozhcd is null\n", urb);
+ return;
+ }
+
+ /* Look in the tasklet queue.
+ */
+ spin_lock_irqsave(&g_tasklet_lock, irq_state);
+ list_for_each(e, &ozhcd->urb_cancel_list) {
+ urbl = container_of(e, struct oz_urb_link, link);
+ if (urb == urbl->urb) {
+ list_del_init(e);
+ spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
+ goto out2;
+ }
+ }
+ spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
+ urbl = 0;
+
+ /* Look in the orphanage.
+ */
+ spin_lock_irqsave(&ozhcd->hcd_lock, irq_state);
+ list_for_each(e, &ozhcd->orphanage) {
+ urbl = container_of(e, struct oz_urb_link, link);
+ if (urbl->urb == urb) {
+ list_del(e);
+ oz_trace("Found urb in orphanage\n");
+ goto out;
+ }
+ }
+ ix = (ep_num & 0xf);
+ urbl = 0;
+ if ((ep_num & USB_DIR_IN) && ix)
+ urbl = oz_remove_urb(port->in_ep[ix], urb);
+ else
+ urbl = oz_remove_urb(port->out_ep[ix], urb);
+out:
+ spin_unlock_irqrestore(&ozhcd->hcd_lock, irq_state);
+out2:
+ if (urbl) {
+ urb->actual_length = 0;
+ oz_free_urb_link(urbl);
+ oz_complete_urb(ozhcd->hcd, urb, -EPIPE, 0);
+ }
+}
+/*------------------------------------------------------------------------------
+ * Context: tasklet
+ */
+static void oz_urb_cancel_tasklet(unsigned long unused)
+{
+ unsigned long irq_state;
+ struct urb *urb;
+ struct oz_hcd *ozhcd = oz_hcd_claim();
+ if (ozhcd == 0)
+ return;
+ spin_lock_irqsave(&g_tasklet_lock, irq_state);
+ while (!list_empty(&ozhcd->urb_cancel_list)) {
+ struct oz_urb_link *urbl =
+ list_first_entry(&ozhcd->urb_cancel_list,
+ struct oz_urb_link, link);
+ list_del_init(&urbl->link);
+ spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
+ urb = urbl->urb;
+ if (urb->unlinked)
+ oz_urb_cancel(urbl->port, urbl->ep_num, urb);
+ oz_free_urb_link(urbl);
+ spin_lock_irqsave(&g_tasklet_lock, irq_state);
+ }
+ spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
+ oz_hcd_put(ozhcd);
+}
+/*------------------------------------------------------------------------------
+ * Context: unknown
+ */
+static void oz_hcd_clear_orphanage(struct oz_hcd *ozhcd, int status)
+{
+ if (ozhcd) {
+ struct oz_urb_link *urbl;
+ while (!list_empty(&ozhcd->orphanage)) {
+ urbl = list_first_entry(&ozhcd->orphanage,
+ struct oz_urb_link, link);
+ list_del(&urbl->link);
+ oz_complete_urb(ozhcd->hcd, urbl->urb, status, 0);
+ oz_free_urb_link(urbl);
+ }
+ }
+}
+/*------------------------------------------------------------------------------
+ * Context: unknown
+ */
+static int oz_hcd_start(struct usb_hcd *hcd)
+{
+ oz_trace("oz_hcd_start()\n");
+ hcd->power_budget = 200;
+ hcd->state = HC_STATE_RUNNING;
+ hcd->uses_new_polling = 1;
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: unknown
+ */
+static void oz_hcd_stop(struct usb_hcd *hcd)
+{
+ oz_trace("oz_hcd_stop()\n");
+}
+/*------------------------------------------------------------------------------
+ * Context: unknown
+ */
+static void oz_hcd_shutdown(struct usb_hcd *hcd)
+{
+ oz_trace("oz_hcd_shutdown()\n");
+}
+/*------------------------------------------------------------------------------
+ * Context: any
+ */
+#ifdef WANT_EVENT_TRACE
+static u8 oz_get_irq_ctx(void)
+{
+ u8 irq_info = 0;
+ if (in_interrupt())
+ irq_info |= 1;
+ if (in_irq())
+ irq_info |= 2;
+ return irq_info;
+}
+#endif /* WANT_EVENT_TRACE */
+/*------------------------------------------------------------------------------
+ * Called to queue an urb for the device.
+ * This function should return a non-zero error code if it fails the urb but
+ * should not call usb_hcd_giveback_urb().
+ * Context: any
+ */
+static int oz_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
+ gfp_t mem_flags)
+{
+ struct oz_hcd *ozhcd = oz_hcd_private(hcd);
+ int rc = 0;
+ int port_ix;
+ struct oz_port *port;
+ unsigned long irq_state;
+ struct oz_urb_link *urbl;
+ oz_trace2(OZ_TRACE_URB, "%lu: oz_hcd_urb_enqueue(%p)\n",
+ jiffies, urb);
+ oz_event_log(OZ_EVT_URB_SUBMIT, oz_get_irq_ctx(),
+ (u16)urb->number_of_packets, urb, urb->pipe);
+ if (unlikely(ozhcd == 0)) {
+ oz_trace2(OZ_TRACE_URB, "%lu: Refused urb(%p) not ozhcd.\n",
+ jiffies, urb);
+ return -EPIPE;
+ }
+ if (unlikely(hcd->state != HC_STATE_RUNNING)) {
+ oz_trace2(OZ_TRACE_URB, "%lu: Refused urb(%p) not running.\n",
+ jiffies, urb);
+ return -EPIPE;
+ }
+ port_ix = oz_get_port_from_addr(ozhcd, urb->dev->devnum);
+ if (port_ix < 0)
+ return -EPIPE;
+ port = &ozhcd->ports[port_ix];
+ if (port == 0)
+ return -EPIPE;
+ if ((port->flags & OZ_PORT_F_PRESENT) == 0) {
+ oz_trace("Refusing URB port_ix = %d devnum = %d\n",
+ port_ix, urb->dev->devnum);
+ return -EPIPE;
+ }
+ urb->hcpriv = port;
+ /* Put request in queue for processing by tasklet.
+ */
+ urbl = oz_alloc_urb_link();
+ if (unlikely(urbl == 0))
+ return -ENOMEM;
+ urbl->urb = urb;
+ spin_lock_irqsave(&g_tasklet_lock, irq_state);
+ rc = usb_hcd_link_urb_to_ep(hcd, urb);
+ if (unlikely(rc)) {
+ spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
+ oz_free_urb_link(urbl);
+ return rc;
+ }
+ list_add_tail(&urbl->link, &ozhcd->urb_pending_list);
+ spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
+ tasklet_schedule(&g_urb_process_tasklet);
+ atomic_inc(&g_pending_urbs);
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: tasklet
+ */
+static struct oz_urb_link *oz_remove_urb(struct oz_endpoint *ep,
+ struct urb *urb)
+{
+ struct oz_urb_link *urbl = 0;
+ struct list_head *e;
+ if (unlikely(ep == 0))
+ return 0;
+ list_for_each(e, &ep->urb_list) {
+ urbl = container_of(e, struct oz_urb_link, link);
+ if (urbl->urb == urb) {
+ list_del_init(e);
+ if (usb_pipeisoc(urb->pipe)) {
+ ep->credit -= urb->number_of_packets;
+ if (ep->credit < 0)
+ ep->credit = 0;
+ oz_event_log(OZ_EVT_EP_CREDIT,
+ usb_pipein(urb->pipe) ?
+ (ep->ep_num | USB_DIR_IN) : ep->ep_num,
+ 0, 0, ep->credit);
+ }
+ return urbl;
+ }
+ }
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ * Called to dequeue a previously submitted urb for the device.
+ * Context: any
+ */
+static int oz_hcd_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+{
+ struct oz_hcd *ozhcd = oz_hcd_private(hcd);
+ struct oz_urb_link *urbl = 0;
+ int rc;
+ unsigned long irq_state;
+ oz_trace2(OZ_TRACE_URB, "%lu: oz_hcd_urb_dequeue(%p)\n", jiffies, urb);
+ urbl = oz_alloc_urb_link();
+ if (unlikely(urbl == 0))
+ return -ENOMEM;
+ spin_lock_irqsave(&g_tasklet_lock, irq_state);
+ /* The following function checks the urb is still in the queue
+ * maintained by the core and that the unlinked field is zero.
+ * If both are true the function sets the unlinked field and returns
+ * zero. Otherwise it returns an error.
+ */
+ rc = usb_hcd_check_unlink_urb(hcd, urb, status);
+ /* We have to check we haven't completed the urb or are about
+ * to complete it. When we do we set hcpriv to 0 so if this has
+ * already happened we don't put the urb in the cancel queue.
+ */
+ if ((rc == 0) && urb->hcpriv) {
+ urbl->urb = urb;
+ urbl->port = (struct oz_port *)urb->hcpriv;
+ urbl->ep_num = usb_pipeendpoint(urb->pipe);
+ if (usb_pipein(urb->pipe))
+ urbl->ep_num |= USB_DIR_IN;
+ list_add_tail(&urbl->link, &ozhcd->urb_cancel_list);
+ spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
+ tasklet_schedule(&g_urb_cancel_tasklet);
+ } else {
+ spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
+ oz_free_urb_link(urbl);
+ }
+ return rc;
+}
+/*------------------------------------------------------------------------------
+ * Context: unknown
+ */
+static void oz_hcd_endpoint_disable(struct usb_hcd *hcd,
+ struct usb_host_endpoint *ep)
+{
+ oz_trace("oz_hcd_endpoint_disable\n");
+}
+/*------------------------------------------------------------------------------
+ * Context: unknown
+ */
+static void oz_hcd_endpoint_reset(struct usb_hcd *hcd,
+ struct usb_host_endpoint *ep)
+{
+ oz_trace("oz_hcd_endpoint_reset\n");
+}
+/*------------------------------------------------------------------------------
+ * Context: unknown
+ */
+static int oz_hcd_get_frame_number(struct usb_hcd *hcd)
+{
+ oz_trace("oz_hcd_get_frame_number\n");
+ return oz_usb_get_frame_number();
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ * This is called as a consquence of us calling usb_hcd_poll_rh_status() and we
+ * always do that in softirq context.
+ */
+static int oz_hcd_hub_status_data(struct usb_hcd *hcd, char *buf)
+{
+ struct oz_hcd *ozhcd = oz_hcd_private(hcd);
+ int i;
+
+ oz_trace2(OZ_TRACE_HUB, "oz_hcd_hub_status_data()\n");
+ buf[0] = 0;
+
+ spin_lock_bh(&ozhcd->hcd_lock);
+ for (i = 0; i < OZ_NB_PORTS; i++) {
+ if (ozhcd->ports[i].flags & OZ_PORT_F_CHANGED) {
+ oz_trace2(OZ_TRACE_HUB, "Port %d changed\n", i);
+ ozhcd->ports[i].flags &= ~OZ_PORT_F_CHANGED;
+ buf[0] |= 1<<(i+1);
+ }
+ }
+ spin_unlock_bh(&ozhcd->hcd_lock);
+ return buf[0] ? 1 : 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+static void oz_get_hub_descriptor(struct usb_hcd *hcd,
+ struct usb_hub_descriptor *desc)
+{
+ oz_trace2(OZ_TRACE_HUB, "GetHubDescriptor\n");
+ memset(desc, 0, sizeof(*desc));
+ desc->bDescriptorType = 0x29;
+ desc->bDescLength = 9;
+ desc->wHubCharacteristics = (__force __u16)
+ __constant_cpu_to_le16(0x0001);
+ desc->bNbrPorts = OZ_NB_PORTS;
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+static int oz_set_port_feature(struct usb_hcd *hcd, u16 wvalue, u16 windex)
+{
+ struct oz_port *port;
+ int err = 0;
+ u8 port_id = (u8)windex;
+ struct oz_hcd *ozhcd = oz_hcd_private(hcd);
+ unsigned set_bits = 0;
+ unsigned clear_bits = 0;
+ oz_trace2(OZ_TRACE_HUB, "SetPortFeature\n");
+ if ((port_id < 1) || (port_id > OZ_NB_PORTS))
+ return -EPIPE;
+ port = &ozhcd->ports[port_id-1];
+ switch (wvalue) {
+ case USB_PORT_FEAT_CONNECTION:
+ oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_CONNECTION\n");
+ break;
+ case USB_PORT_FEAT_ENABLE:
+ oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_ENABLE\n");
+ break;
+ case USB_PORT_FEAT_SUSPEND:
+ oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_SUSPEND\n");
+ break;
+ case USB_PORT_FEAT_OVER_CURRENT:
+ oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_OVER_CURRENT\n");
+ break;
+ case USB_PORT_FEAT_RESET:
+ oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_RESET\n");
+ set_bits = USB_PORT_STAT_ENABLE | (USB_PORT_STAT_C_RESET<<16);
+ clear_bits = USB_PORT_STAT_RESET;
+ ozhcd->ports[port_id-1].bus_addr = 0;
+ break;
+ case USB_PORT_FEAT_POWER:
+ oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_POWER\n");
+ set_bits |= USB_PORT_STAT_POWER;
+ break;
+ case USB_PORT_FEAT_LOWSPEED:
+ oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_LOWSPEED\n");
+ break;
+ case USB_PORT_FEAT_C_CONNECTION:
+ oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_C_CONNECTION\n");
+ break;
+ case USB_PORT_FEAT_C_ENABLE:
+ oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_C_ENABLE\n");
+ break;
+ case USB_PORT_FEAT_C_SUSPEND:
+ oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_C_SUSPEND\n");
+ break;
+ case USB_PORT_FEAT_C_OVER_CURRENT:
+ oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_C_OVER_CURRENT\n");
+ break;
+ case USB_PORT_FEAT_C_RESET:
+ oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_C_RESET\n");
+ break;
+ case USB_PORT_FEAT_TEST:
+ oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_TEST\n");
+ break;
+ case USB_PORT_FEAT_INDICATOR:
+ oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_INDICATOR\n");
+ break;
+ default:
+ oz_trace2(OZ_TRACE_HUB, "Other %d\n", wvalue);
+ break;
+ }
+ if (set_bits || clear_bits) {
+ spin_lock_bh(&port->port_lock);
+ port->status &= ~clear_bits;
+ port->status |= set_bits;
+ spin_unlock_bh(&port->port_lock);
+ }
+ oz_trace2(OZ_TRACE_HUB, "Port[%d] status = 0x%x\n", port_id,
+ port->status);
+ return err;
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+static int oz_clear_port_feature(struct usb_hcd *hcd, u16 wvalue, u16 windex)
+{
+ struct oz_port *port;
+ int err = 0;
+ u8 port_id = (u8)windex;
+ struct oz_hcd *ozhcd = oz_hcd_private(hcd);
+ unsigned clear_bits = 0;
+ oz_trace2(OZ_TRACE_HUB, "ClearPortFeature\n");
+ if ((port_id < 1) || (port_id > OZ_NB_PORTS))
+ return -EPIPE;
+ port = &ozhcd->ports[port_id-1];
+ switch (wvalue) {
+ case USB_PORT_FEAT_CONNECTION:
+ oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_CONNECTION\n");
+ break;
+ case USB_PORT_FEAT_ENABLE:
+ oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_ENABLE\n");
+ clear_bits = USB_PORT_STAT_ENABLE;
+ break;
+ case USB_PORT_FEAT_SUSPEND:
+ oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_SUSPEND\n");
+ break;
+ case USB_PORT_FEAT_OVER_CURRENT:
+ oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_OVER_CURRENT\n");
+ break;
+ case USB_PORT_FEAT_RESET:
+ oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_RESET\n");
+ break;
+ case USB_PORT_FEAT_POWER:
+ oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_POWER\n");
+ clear_bits |= USB_PORT_STAT_POWER;
+ break;
+ case USB_PORT_FEAT_LOWSPEED:
+ oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_LOWSPEED\n");
+ break;
+ case USB_PORT_FEAT_C_CONNECTION:
+ oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_C_CONNECTION\n");
+ clear_bits = (USB_PORT_STAT_C_CONNECTION << 16);
+ break;
+ case USB_PORT_FEAT_C_ENABLE:
+ oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_C_ENABLE\n");
+ clear_bits = (USB_PORT_STAT_C_ENABLE << 16);
+ break;
+ case USB_PORT_FEAT_C_SUSPEND:
+ oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_C_SUSPEND\n");
+ break;
+ case USB_PORT_FEAT_C_OVER_CURRENT:
+ oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_C_OVER_CURRENT\n");
+ break;
+ case USB_PORT_FEAT_C_RESET:
+ oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_C_RESET\n");
+ clear_bits = (USB_PORT_FEAT_C_RESET << 16);
+ break;
+ case USB_PORT_FEAT_TEST:
+ oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_TEST\n");
+ break;
+ case USB_PORT_FEAT_INDICATOR:
+ oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_INDICATOR\n");
+ break;
+ default:
+ oz_trace2(OZ_TRACE_HUB, "Other %d\n", wvalue);
+ break;
+ }
+ if (clear_bits) {
+ spin_lock_bh(&port->port_lock);
+ port->status &= ~clear_bits;
+ spin_unlock_bh(&port->port_lock);
+ }
+ oz_trace2(OZ_TRACE_HUB, "Port[%d] status = 0x%x\n", port_id,
+ ozhcd->ports[port_id-1].status);
+ return err;
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+static int oz_get_port_status(struct usb_hcd *hcd, u16 windex, char *buf)
+{
+ struct oz_hcd *ozhcd;
+ u32 status = 0;
+ if ((windex < 1) || (windex > OZ_NB_PORTS))
+ return -EPIPE;
+ ozhcd = oz_hcd_private(hcd);
+ oz_trace2(OZ_TRACE_HUB, "GetPortStatus windex = %d\n", windex);
+ status = ozhcd->ports[windex-1].status;
+ put_unaligned(cpu_to_le32(status), (__le32 *)buf);
+ oz_trace2(OZ_TRACE_HUB, "Port[%d] status = %x\n", windex, status);
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+static int oz_hcd_hub_control(struct usb_hcd *hcd, u16 req_type, u16 wvalue,
+ u16 windex, char *buf, u16 wlength)
+{
+ int err = 0;
+ oz_trace2(OZ_TRACE_HUB, "oz_hcd_hub_control()\n");
+ switch (req_type) {
+ case ClearHubFeature:
+ oz_trace2(OZ_TRACE_HUB, "ClearHubFeature: %d\n", req_type);
+ break;
+ case ClearPortFeature:
+ err = oz_clear_port_feature(hcd, wvalue, windex);
+ break;
+ case GetHubDescriptor:
+ oz_get_hub_descriptor(hcd, (struct usb_hub_descriptor *)buf);
+ break;
+ case GetHubStatus:
+ oz_trace2(OZ_TRACE_HUB, "GetHubStatus: req_type = 0x%x\n",
+ req_type);
+ put_unaligned(__constant_cpu_to_le32(0), (__le32 *)buf);
+ break;
+ case GetPortStatus:
+ err = oz_get_port_status(hcd, windex, buf);
+ break;
+ case SetHubFeature:
+ oz_trace2(OZ_TRACE_HUB, "SetHubFeature: %d\n", req_type);
+ break;
+ case SetPortFeature:
+ err = oz_set_port_feature(hcd, wvalue, windex);
+ break;
+ default:
+ oz_trace2(OZ_TRACE_HUB, "Other: %d\n", req_type);
+ break;
+ }
+ return err;
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+static int oz_hcd_bus_suspend(struct usb_hcd *hcd)
+{
+ struct oz_hcd *ozhcd;
+ oz_trace2(OZ_TRACE_HUB, "oz_hcd_hub_suspend()\n");
+ ozhcd = oz_hcd_private(hcd);
+ spin_lock_bh(&ozhcd->hcd_lock);
+ hcd->state = HC_STATE_SUSPENDED;
+ ozhcd->flags |= OZ_HDC_F_SUSPENDED;
+ spin_unlock_bh(&ozhcd->hcd_lock);
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+static int oz_hcd_bus_resume(struct usb_hcd *hcd)
+{
+ struct oz_hcd *ozhcd;
+ oz_trace2(OZ_TRACE_HUB, "oz_hcd_hub_resume()\n");
+ ozhcd = oz_hcd_private(hcd);
+ spin_lock_bh(&ozhcd->hcd_lock);
+ ozhcd->flags &= ~OZ_HDC_F_SUSPENDED;
+ hcd->state = HC_STATE_RUNNING;
+ spin_unlock_bh(&ozhcd->hcd_lock);
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ */
+static void oz_plat_shutdown(struct platform_device *dev)
+{
+ oz_trace("oz_plat_shutdown()\n");
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+static int oz_plat_probe(struct platform_device *dev)
+{
+ int i;
+ int err;
+ struct usb_hcd *hcd;
+ struct oz_hcd *ozhcd;
+ oz_trace("oz_plat_probe()\n");
+ hcd = usb_create_hcd(&g_oz_hc_drv, &dev->dev, dev_name(&dev->dev));
+ if (hcd == 0) {
+ oz_trace("Failed to created hcd object OK\n");
+ return -ENOMEM;
+ }
+ ozhcd = oz_hcd_private(hcd);
+ memset(ozhcd, 0, sizeof(*ozhcd));
+ INIT_LIST_HEAD(&ozhcd->urb_pending_list);
+ INIT_LIST_HEAD(&ozhcd->urb_cancel_list);
+ INIT_LIST_HEAD(&ozhcd->orphanage);
+ ozhcd->hcd = hcd;
+ ozhcd->conn_port = -1;
+ spin_lock_init(&ozhcd->hcd_lock);
+ for (i = 0; i < OZ_NB_PORTS; i++) {
+ struct oz_port *port = &ozhcd->ports[i];
+ port->ozhcd = ozhcd;
+ port->flags = 0;
+ port->status = 0;
+ port->bus_addr = 0xff;
+ spin_lock_init(&port->port_lock);
+ }
+ err = usb_add_hcd(hcd, 0, 0);
+ if (err) {
+ oz_trace("Failed to add hcd object OK\n");
+ usb_put_hcd(hcd);
+ return -1;
+ }
+ spin_lock_bh(&g_hcdlock);
+ g_ozhcd = ozhcd;
+ spin_unlock_bh(&g_hcdlock);
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: unknown
+ */
+static int oz_plat_remove(struct platform_device *dev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(dev);
+ struct oz_hcd *ozhcd;
+ oz_trace("oz_plat_remove()\n");
+ if (hcd == 0)
+ return -1;
+ ozhcd = oz_hcd_private(hcd);
+ spin_lock_bh(&g_hcdlock);
+ if (ozhcd == g_ozhcd)
+ g_ozhcd = 0;
+ spin_unlock_bh(&g_hcdlock);
+ oz_trace("Clearing orphanage\n");
+ oz_hcd_clear_orphanage(ozhcd, -EPIPE);
+ oz_trace("Removing hcd\n");
+ usb_remove_hcd(hcd);
+ usb_put_hcd(hcd);
+ oz_empty_link_pool();
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: unknown
+ */
+static int oz_plat_suspend(struct platform_device *dev, pm_message_t msg)
+{
+ oz_trace("oz_plat_suspend()\n");
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: unknown
+ */
+static int oz_plat_resume(struct platform_device *dev)
+{
+ oz_trace("oz_plat_resume()\n");
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+int oz_hcd_init(void)
+{
+ int err;
+ if (usb_disabled())
+ return -ENODEV;
+ tasklet_init(&g_urb_process_tasklet, oz_urb_process_tasklet, 0);
+ tasklet_init(&g_urb_cancel_tasklet, oz_urb_cancel_tasklet, 0);
+ err = platform_driver_register(&g_oz_plat_drv);
+ oz_trace("platform_driver_register() returned %d\n", err);
+ if (err)
+ goto error;
+ g_plat_dev = platform_device_alloc(OZ_PLAT_DEV_NAME, -1);
+ if (g_plat_dev == 0) {
+ err = -ENOMEM;
+ goto error1;
+ }
+ oz_trace("platform_device_alloc() succeeded\n");
+ err = platform_device_add(g_plat_dev);
+ if (err)
+ goto error2;
+ oz_trace("platform_device_add() succeeded\n");
+ return 0;
+error2:
+ platform_device_put(g_plat_dev);
+error1:
+ platform_driver_unregister(&g_oz_plat_drv);
+error:
+ tasklet_disable(&g_urb_process_tasklet);
+ tasklet_disable(&g_urb_cancel_tasklet);
+ oz_trace("oz_hcd_init() failed %d\n", err);
+ return err;
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+void oz_hcd_term(void)
+{
+ tasklet_disable(&g_urb_process_tasklet);
+ tasklet_disable(&g_urb_cancel_tasklet);
+ platform_device_unregister(g_plat_dev);
+ platform_driver_unregister(&g_oz_plat_drv);
+ oz_trace("Pending urbs:%d\n", atomic_read(&g_pending_urbs));
+}
diff --git a/drivers/staging/ozwpan/ozhcd.h b/drivers/staging/ozwpan/ozhcd.h
new file mode 100644
index 000000000000..9b30dfd09973
--- /dev/null
+++ b/drivers/staging/ozwpan/ozhcd.h
@@ -0,0 +1,15 @@
+/* -----------------------------------------------------------------------------
+ * Copyright (c) 2011 Ozmo Inc
+ * Released under the GNU General Public License Version 2 (GPLv2).
+ * ---------------------------------------------------------------------------*/
+#ifndef _OZHCD_H
+#define _OZHCD_H
+
+int oz_hcd_init(void);
+void oz_hcd_term(void);
+void *oz_hcd_pd_arrived(void *ctx);
+void oz_hcd_pd_departed(void *ctx);
+void oz_hcd_pd_reset(void *hpd, void *hport);
+
+#endif /* _OZHCD_H */
+
diff --git a/drivers/staging/ozwpan/ozmain.c b/drivers/staging/ozwpan/ozmain.c
new file mode 100644
index 000000000000..aaf2ccc0bcfb
--- /dev/null
+++ b/drivers/staging/ozwpan/ozmain.c
@@ -0,0 +1,58 @@
+/* -----------------------------------------------------------------------------
+ * Copyright (c) 2011 Ozmo Inc
+ * Released under the GNU General Public License Version 2 (GPLv2).
+ * -----------------------------------------------------------------------------
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/timer.h>
+#include <linux/sched.h>
+#include <linux/netdevice.h>
+#include <linux/errno.h>
+#include <linux/ieee80211.h>
+#include "ozconfig.h"
+#include "ozpd.h"
+#include "ozproto.h"
+#include "ozcdev.h"
+#include "oztrace.h"
+#include "ozevent.h"
+/*------------------------------------------------------------------------------
+ * The name of the 802.11 mac device. Empty string is the default value but a
+ * value can be supplied as a parameter to the module. An empty string means
+ * bind to nothing. '*' means bind to all netcards - this includes non-802.11
+ * netcards. Bindings can be added later using an IOCTL.
+ */
+char *g_net_dev = "";
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+static int __init ozwpan_init(void)
+{
+ oz_event_init();
+ oz_cdev_register();
+ oz_protocol_init(g_net_dev);
+ oz_app_enable(OZ_APPID_USB, 1);
+ oz_apps_init();
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+static void __exit ozwpan_exit(void)
+{
+ oz_protocol_term();
+ oz_apps_term();
+ oz_cdev_deregister();
+ oz_event_term();
+}
+/*------------------------------------------------------------------------------
+ */
+module_param(g_net_dev, charp, S_IRUGO);
+module_init(ozwpan_init);
+module_exit(ozwpan_exit);
+
+MODULE_AUTHOR("Chris Kelly");
+MODULE_DESCRIPTION("Ozmo Devices USB over WiFi hcd driver");
+MODULE_VERSION("1.0.8");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/staging/ozwpan/ozpd.c b/drivers/staging/ozwpan/ozpd.c
new file mode 100644
index 000000000000..2b45d3d1800c
--- /dev/null
+++ b/drivers/staging/ozwpan/ozpd.c
@@ -0,0 +1,832 @@
+/* -----------------------------------------------------------------------------
+ * Copyright (c) 2011 Ozmo Inc
+ * Released under the GNU General Public License Version 2 (GPLv2).
+ * -----------------------------------------------------------------------------
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/timer.h>
+#include <linux/sched.h>
+#include <linux/netdevice.h>
+#include <linux/errno.h>
+#include "ozconfig.h"
+#include "ozprotocol.h"
+#include "ozeltbuf.h"
+#include "ozpd.h"
+#include "ozproto.h"
+#include "oztrace.h"
+#include "ozevent.h"
+#include "ozcdev.h"
+#include "ozusbsvc.h"
+#include <asm/unaligned.h>
+#include <linux/uaccess.h>
+#include <net/psnap.h>
+/*------------------------------------------------------------------------------
+ */
+#define OZ_MAX_TX_POOL_SIZE 6
+/* Maximum number of uncompleted isoc frames that can be pending.
+ */
+#define OZ_MAX_SUBMITTED_ISOC 16
+/*------------------------------------------------------------------------------
+ */
+static struct oz_tx_frame *oz_tx_frame_alloc(struct oz_pd *pd);
+static void oz_tx_frame_free(struct oz_pd *pd, struct oz_tx_frame *f);
+static struct sk_buff *oz_build_frame(struct oz_pd *pd, struct oz_tx_frame *f);
+static int oz_send_isoc_frame(struct oz_pd *pd);
+static void oz_retire_frame(struct oz_pd *pd, struct oz_tx_frame *f);
+static void oz_isoc_stream_free(struct oz_isoc_stream *st);
+static int oz_send_next_queued_frame(struct oz_pd *pd, int *more_data);
+static void oz_isoc_destructor(struct sk_buff *skb);
+static int oz_def_app_init(void);
+static void oz_def_app_term(void);
+static int oz_def_app_start(struct oz_pd *pd, int resume);
+static void oz_def_app_stop(struct oz_pd *pd, int pause);
+static void oz_def_app_rx(struct oz_pd *pd, struct oz_elt *elt);
+/*------------------------------------------------------------------------------
+ * Counts the uncompleted isoc frames submitted to netcard.
+ */
+static atomic_t g_submitted_isoc = ATOMIC_INIT(0);
+/* Application handler functions.
+ */
+static struct oz_app_if g_app_if[OZ_APPID_MAX] = {
+ {oz_usb_init,
+ oz_usb_term,
+ oz_usb_start,
+ oz_usb_stop,
+ oz_usb_rx,
+ oz_usb_heartbeat,
+ oz_usb_farewell,
+ OZ_APPID_USB},
+
+ {oz_def_app_init,
+ oz_def_app_term,
+ oz_def_app_start,
+ oz_def_app_stop,
+ oz_def_app_rx,
+ 0,
+ 0,
+ OZ_APPID_UNUSED1},
+
+ {oz_def_app_init,
+ oz_def_app_term,
+ oz_def_app_start,
+ oz_def_app_stop,
+ oz_def_app_rx,
+ 0,
+ 0,
+ OZ_APPID_UNUSED2},
+
+ {oz_cdev_init,
+ oz_cdev_term,
+ oz_cdev_start,
+ oz_cdev_stop,
+ oz_cdev_rx,
+ 0,
+ 0,
+ OZ_APPID_SERIAL},
+};
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+static int oz_def_app_init(void)
+{
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+static void oz_def_app_term(void)
+{
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+static int oz_def_app_start(struct oz_pd *pd, int resume)
+{
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+static void oz_def_app_stop(struct oz_pd *pd, int pause)
+{
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+static void oz_def_app_rx(struct oz_pd *pd, struct oz_elt *elt)
+{
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq or process
+ */
+void oz_pd_set_state(struct oz_pd *pd, unsigned state)
+{
+ pd->state = state;
+ oz_event_log(OZ_EVT_PD_STATE, 0, 0, 0, state);
+#ifdef WANT_TRACE
+ switch (state) {
+ case OZ_PD_S_IDLE:
+ oz_trace("PD State: OZ_PD_S_IDLE\n");
+ break;
+ case OZ_PD_S_CONNECTED:
+ oz_trace("PD State: OZ_PD_S_CONNECTED\n");
+ break;
+ case OZ_PD_S_STOPPED:
+ oz_trace("PD State: OZ_PD_S_STOPPED\n");
+ break;
+ case OZ_PD_S_SLEEP:
+ oz_trace("PD State: OZ_PD_S_SLEEP\n");
+ break;
+ }
+#endif /* WANT_TRACE */
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq or process
+ */
+void oz_pd_get(struct oz_pd *pd)
+{
+ atomic_inc(&pd->ref_count);
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq or process
+ */
+void oz_pd_put(struct oz_pd *pd)
+{
+ if (atomic_dec_and_test(&pd->ref_count))
+ oz_pd_destroy(pd);
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq-serialized
+ */
+struct oz_pd *oz_pd_alloc(u8 *mac_addr)
+{
+ struct oz_pd *pd = kzalloc(sizeof(struct oz_pd), GFP_ATOMIC);
+ if (pd) {
+ int i;
+ atomic_set(&pd->ref_count, 2);
+ for (i = 0; i < OZ_APPID_MAX; i++)
+ spin_lock_init(&pd->app_lock[i]);
+ pd->last_rx_pkt_num = 0xffffffff;
+ oz_pd_set_state(pd, OZ_PD_S_IDLE);
+ pd->max_tx_size = OZ_MAX_TX_SIZE;
+ memcpy(pd->mac_addr, mac_addr, ETH_ALEN);
+ if (0 != oz_elt_buf_init(&pd->elt_buff)) {
+ kfree(pd);
+ pd = 0;
+ }
+ spin_lock_init(&pd->tx_frame_lock);
+ INIT_LIST_HEAD(&pd->tx_queue);
+ INIT_LIST_HEAD(&pd->farewell_list);
+ pd->last_sent_frame = &pd->tx_queue;
+ spin_lock_init(&pd->stream_lock);
+ INIT_LIST_HEAD(&pd->stream_list);
+ }
+ return pd;
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq or process
+ */
+void oz_pd_destroy(struct oz_pd *pd)
+{
+ struct list_head *e;
+ struct oz_tx_frame *f;
+ struct oz_isoc_stream *st;
+ struct oz_farewell *fwell;
+ oz_trace("Destroying PD\n");
+ /* Delete any streams.
+ */
+ e = pd->stream_list.next;
+ while (e != &pd->stream_list) {
+ st = container_of(e, struct oz_isoc_stream, link);
+ e = e->next;
+ oz_isoc_stream_free(st);
+ }
+ /* Free any queued tx frames.
+ */
+ e = pd->tx_queue.next;
+ while (e != &pd->tx_queue) {
+ f = container_of(e, struct oz_tx_frame, link);
+ e = e->next;
+ oz_retire_frame(pd, f);
+ }
+ oz_elt_buf_term(&pd->elt_buff);
+ /* Free any farewells.
+ */
+ e = pd->farewell_list.next;
+ while (e != &pd->farewell_list) {
+ fwell = container_of(e, struct oz_farewell, link);
+ e = e->next;
+ kfree(fwell);
+ }
+ /* Deallocate all frames in tx pool.
+ */
+ while (pd->tx_pool) {
+ e = pd->tx_pool;
+ pd->tx_pool = e->next;
+ kfree(container_of(e, struct oz_tx_frame, link));
+ }
+ if (pd->net_dev)
+ dev_put(pd->net_dev);
+ kfree(pd);
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq-serialized
+ */
+int oz_services_start(struct oz_pd *pd, u16 apps, int resume)
+{
+ struct oz_app_if *ai;
+ int rc = 0;
+ oz_trace("oz_services_start(0x%x) resume(%d)\n", apps, resume);
+ for (ai = g_app_if; ai < &g_app_if[OZ_APPID_MAX]; ai++) {
+ if (apps & (1<<ai->app_id)) {
+ if (ai->start(pd, resume)) {
+ rc = -1;
+ oz_trace("Unabled to start service %d\n",
+ ai->app_id);
+ break;
+ }
+ oz_polling_lock_bh();
+ pd->total_apps |= (1<<ai->app_id);
+ if (resume)
+ pd->paused_apps &= ~(1<<ai->app_id);
+ oz_polling_unlock_bh();
+ }
+ }
+ return rc;
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq or process
+ */
+void oz_services_stop(struct oz_pd *pd, u16 apps, int pause)
+{
+ struct oz_app_if *ai;
+ oz_trace("oz_stop_services(0x%x) pause(%d)\n", apps, pause);
+ for (ai = g_app_if; ai < &g_app_if[OZ_APPID_MAX]; ai++) {
+ if (apps & (1<<ai->app_id)) {
+ oz_polling_lock_bh();
+ if (pause) {
+ pd->paused_apps |= (1<<ai->app_id);
+ } else {
+ pd->total_apps &= ~(1<<ai->app_id);
+ pd->paused_apps &= ~(1<<ai->app_id);
+ }
+ oz_polling_unlock_bh();
+ ai->stop(pd, pause);
+ }
+ }
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+void oz_pd_heartbeat(struct oz_pd *pd, u16 apps)
+{
+ struct oz_app_if *ai;
+ int more = 0;
+ for (ai = g_app_if; ai < &g_app_if[OZ_APPID_MAX]; ai++) {
+ if (ai->heartbeat && (apps & (1<<ai->app_id))) {
+ if (ai->heartbeat(pd))
+ more = 1;
+ }
+ }
+ if (more)
+ oz_pd_request_heartbeat(pd);
+ if (pd->mode & OZ_F_ISOC_ANYTIME) {
+ int count = 8;
+ while (count-- && (oz_send_isoc_frame(pd) >= 0))
+ ;
+ }
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq or process
+ */
+void oz_pd_stop(struct oz_pd *pd)
+{
+ u16 stop_apps = 0;
+ oz_trace("oz_pd_stop() State = 0x%x\n", pd->state);
+ oz_pd_indicate_farewells(pd);
+ oz_polling_lock_bh();
+ stop_apps = pd->total_apps;
+ pd->total_apps = 0;
+ pd->paused_apps = 0;
+ oz_polling_unlock_bh();
+ oz_services_stop(pd, stop_apps, 0);
+ oz_polling_lock_bh();
+ oz_pd_set_state(pd, OZ_PD_S_STOPPED);
+ /* Remove from PD list.*/
+ list_del(&pd->link);
+ oz_polling_unlock_bh();
+ oz_trace("pd ref count = %d\n", atomic_read(&pd->ref_count));
+ oz_timer_delete(pd, 0);
+ oz_pd_put(pd);
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+int oz_pd_sleep(struct oz_pd *pd)
+{
+ int do_stop = 0;
+ u16 stop_apps = 0;
+ oz_polling_lock_bh();
+ if (pd->state & (OZ_PD_S_SLEEP | OZ_PD_S_STOPPED)) {
+ oz_polling_unlock_bh();
+ return 0;
+ }
+ if (pd->keep_alive_j && pd->session_id) {
+ oz_pd_set_state(pd, OZ_PD_S_SLEEP);
+ pd->pulse_time_j = jiffies + pd->keep_alive_j;
+ oz_trace("Sleep Now %lu until %lu\n",
+ jiffies, pd->pulse_time_j);
+ } else {
+ do_stop = 1;
+ }
+ stop_apps = pd->total_apps;
+ oz_polling_unlock_bh();
+ if (do_stop) {
+ oz_pd_stop(pd);
+ } else {
+ oz_services_stop(pd, stop_apps, 1);
+ oz_timer_add(pd, OZ_TIMER_STOP, jiffies + pd->keep_alive_j, 1);
+ }
+ return do_stop;
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+static struct oz_tx_frame *oz_tx_frame_alloc(struct oz_pd *pd)
+{
+ struct oz_tx_frame *f = 0;
+ spin_lock_bh(&pd->tx_frame_lock);
+ if (pd->tx_pool) {
+ f = container_of(pd->tx_pool, struct oz_tx_frame, link);
+ pd->tx_pool = pd->tx_pool->next;
+ pd->tx_pool_count--;
+ }
+ spin_unlock_bh(&pd->tx_frame_lock);
+ if (f == 0)
+ f = kmalloc(sizeof(struct oz_tx_frame), GFP_ATOMIC);
+ if (f) {
+ f->total_size = sizeof(struct oz_hdr);
+ INIT_LIST_HEAD(&f->link);
+ INIT_LIST_HEAD(&f->elt_list);
+ }
+ return f;
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq or process
+ */
+static void oz_tx_frame_free(struct oz_pd *pd, struct oz_tx_frame *f)
+{
+ spin_lock_bh(&pd->tx_frame_lock);
+ if (pd->tx_pool_count < OZ_MAX_TX_POOL_SIZE) {
+ f->link.next = pd->tx_pool;
+ pd->tx_pool = &f->link;
+ pd->tx_pool_count++;
+ f = 0;
+ } else {
+ kfree(f);
+ }
+ spin_unlock_bh(&pd->tx_frame_lock);
+ if (f)
+ kfree(f);
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+int oz_prepare_frame(struct oz_pd *pd, int empty)
+{
+ struct oz_tx_frame *f;
+ if ((pd->mode & OZ_MODE_MASK) != OZ_MODE_TRIGGERED)
+ return -1;
+ if (pd->nb_queued_frames >= OZ_MAX_QUEUED_FRAMES)
+ return -1;
+ if (!empty && !oz_are_elts_available(&pd->elt_buff))
+ return -1;
+ f = oz_tx_frame_alloc(pd);
+ if (f == 0)
+ return -1;
+ f->hdr.control =
+ (OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT) | OZ_F_ACK_REQUESTED;
+ ++pd->last_tx_pkt_num;
+ put_unaligned(cpu_to_le32(pd->last_tx_pkt_num), &f->hdr.pkt_num);
+ if (empty == 0) {
+ oz_select_elts_for_tx(&pd->elt_buff, 0, &f->total_size,
+ pd->max_tx_size, &f->elt_list);
+ }
+ spin_lock(&pd->tx_frame_lock);
+ list_add_tail(&f->link, &pd->tx_queue);
+ pd->nb_queued_frames++;
+ spin_unlock(&pd->tx_frame_lock);
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq-serialized
+ */
+static struct sk_buff *oz_build_frame(struct oz_pd *pd, struct oz_tx_frame *f)
+{
+ struct sk_buff *skb = 0;
+ struct net_device *dev = pd->net_dev;
+ struct oz_hdr *oz_hdr;
+ struct oz_elt *elt;
+ struct list_head *e;
+ /* Allocate skb with enough space for the lower layers as well
+ * as the space we need.
+ */
+ skb = alloc_skb(f->total_size + OZ_ALLOCATED_SPACE(dev), GFP_ATOMIC);
+ if (skb == 0)
+ return 0;
+ /* Reserve the head room for lower layers.
+ */
+ skb_reserve(skb, LL_RESERVED_SPACE(dev));
+ skb_reset_network_header(skb);
+ skb->dev = dev;
+ skb->protocol = htons(OZ_ETHERTYPE);
+ if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
+ dev->dev_addr, skb->len) < 0)
+ goto fail;
+ /* Push the tail to the end of the area we are going to copy to.
+ */
+ oz_hdr = (struct oz_hdr *)skb_put(skb, f->total_size);
+ f->hdr.last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
+ memcpy(oz_hdr, &f->hdr, sizeof(struct oz_hdr));
+ /* Copy the elements into the frame body.
+ */
+ elt = (struct oz_elt *)(oz_hdr+1);
+ for (e = f->elt_list.next; e != &f->elt_list; e = e->next) {
+ struct oz_elt_info *ei;
+ ei = container_of(e, struct oz_elt_info, link);
+ memcpy(elt, ei->data, ei->length);
+ elt = oz_next_elt(elt);
+ }
+ return skb;
+fail:
+ kfree_skb(skb);
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq or process
+ */
+static void oz_retire_frame(struct oz_pd *pd, struct oz_tx_frame *f)
+{
+ struct list_head *e;
+ struct oz_elt_info *ei;
+ e = f->elt_list.next;
+ while (e != &f->elt_list) {
+ ei = container_of(e, struct oz_elt_info, link);
+ e = e->next;
+ list_del_init(&ei->link);
+ if (ei->callback)
+ ei->callback(pd, ei->context);
+ spin_lock_bh(&pd->elt_buff.lock);
+ oz_elt_info_free(&pd->elt_buff, ei);
+ spin_unlock_bh(&pd->elt_buff.lock);
+ }
+ oz_tx_frame_free(pd, f);
+ if (pd->elt_buff.free_elts > pd->elt_buff.max_free_elts)
+ oz_trim_elt_pool(&pd->elt_buff);
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq-serialized
+ */
+static int oz_send_next_queued_frame(struct oz_pd *pd, int *more_data)
+{
+ struct sk_buff *skb;
+ struct oz_tx_frame *f;
+ struct list_head *e;
+ *more_data = 0;
+ spin_lock(&pd->tx_frame_lock);
+ e = pd->last_sent_frame->next;
+ if (e == &pd->tx_queue) {
+ spin_unlock(&pd->tx_frame_lock);
+ return -1;
+ }
+ pd->last_sent_frame = e;
+ if (e->next != &pd->tx_queue)
+ *more_data = 1;
+ f = container_of(e, struct oz_tx_frame, link);
+ skb = oz_build_frame(pd, f);
+ spin_unlock(&pd->tx_frame_lock);
+ oz_trace2(OZ_TRACE_TX_FRAMES, "TX frame PN=0x%x\n", f->hdr.pkt_num);
+ if (skb) {
+ oz_event_log(OZ_EVT_TX_FRAME,
+ 0,
+ (((u16)f->hdr.control)<<8)|f->hdr.last_pkt_num,
+ 0, f->hdr.pkt_num);
+ if (dev_queue_xmit(skb) < 0)
+ return -1;
+ }
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq-serialized
+ */
+void oz_send_queued_frames(struct oz_pd *pd, int backlog)
+{
+ int more;
+ if (backlog < OZ_MAX_QUEUED_FRAMES) {
+ if (oz_send_next_queued_frame(pd, &more) >= 0) {
+ while (more && oz_send_next_queued_frame(pd, &more))
+ ;
+ } else {
+ if (((pd->mode & OZ_F_ISOC_ANYTIME) == 0)
+ || (pd->isoc_sent == 0)) {
+ if (oz_prepare_frame(pd, 1) >= 0)
+ oz_send_next_queued_frame(pd, &more);
+ }
+ }
+ } else {
+ oz_send_next_queued_frame(pd, &more);
+ }
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+static int oz_send_isoc_frame(struct oz_pd *pd)
+{
+ struct sk_buff *skb = 0;
+ struct net_device *dev = pd->net_dev;
+ struct oz_hdr *oz_hdr;
+ struct oz_elt *elt;
+ struct list_head *e;
+ struct list_head list;
+ int total_size = sizeof(struct oz_hdr);
+ INIT_LIST_HEAD(&list);
+
+ oz_select_elts_for_tx(&pd->elt_buff, 1, &total_size,
+ pd->max_tx_size, &list);
+ if (list.next == &list)
+ return 0;
+ skb = alloc_skb(total_size + OZ_ALLOCATED_SPACE(dev), GFP_ATOMIC);
+ if (skb == 0) {
+ oz_trace("Cannot alloc skb\n");
+ oz_elt_info_free_chain(&pd->elt_buff, &list);
+ return -1;
+ }
+ skb_reserve(skb, LL_RESERVED_SPACE(dev));
+ skb_reset_network_header(skb);
+ skb->dev = dev;
+ skb->protocol = htons(OZ_ETHERTYPE);
+ if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
+ dev->dev_addr, skb->len) < 0) {
+ kfree_skb(skb);
+ return -1;
+ }
+ oz_hdr = (struct oz_hdr *)skb_put(skb, total_size);
+ oz_hdr->control = (OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT) | OZ_F_ISOC;
+ oz_hdr->last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
+ elt = (struct oz_elt *)(oz_hdr+1);
+
+ for (e = list.next; e != &list; e = e->next) {
+ struct oz_elt_info *ei;
+ ei = container_of(e, struct oz_elt_info, link);
+ memcpy(elt, ei->data, ei->length);
+ elt = oz_next_elt(elt);
+ }
+ oz_event_log(OZ_EVT_TX_ISOC, 0, 0, 0, 0);
+ dev_queue_xmit(skb);
+ oz_elt_info_free_chain(&pd->elt_buff, &list);
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq-serialized
+ */
+void oz_retire_tx_frames(struct oz_pd *pd, u8 lpn)
+{
+ struct list_head *e;
+ struct oz_tx_frame *f;
+ struct list_head *first = 0;
+ struct list_head *last = 0;
+ u8 diff;
+ u32 pkt_num;
+
+ spin_lock(&pd->tx_frame_lock);
+ e = pd->tx_queue.next;
+ while (e != &pd->tx_queue) {
+ f = container_of(e, struct oz_tx_frame, link);
+ pkt_num = le32_to_cpu(get_unaligned(&f->hdr.pkt_num));
+ diff = (lpn - (pkt_num & OZ_LAST_PN_MASK)) & OZ_LAST_PN_MASK;
+ if (diff > OZ_LAST_PN_HALF_CYCLE)
+ break;
+ if (first == 0)
+ first = e;
+ last = e;
+ e = e->next;
+ pd->nb_queued_frames--;
+ }
+ if (first) {
+ last->next->prev = &pd->tx_queue;
+ pd->tx_queue.next = last->next;
+ last->next = 0;
+ }
+ pd->last_sent_frame = &pd->tx_queue;
+ spin_unlock(&pd->tx_frame_lock);
+ while (first) {
+ f = container_of(first, struct oz_tx_frame, link);
+ first = first->next;
+ oz_retire_frame(pd, f);
+ }
+}
+/*------------------------------------------------------------------------------
+ * Precondition: stream_lock must be held.
+ * Context: softirq
+ */
+static struct oz_isoc_stream *pd_stream_find(struct oz_pd *pd, u8 ep_num)
+{
+ struct list_head *e;
+ struct oz_isoc_stream *st;
+ list_for_each(e, &pd->stream_list) {
+ st = container_of(e, struct oz_isoc_stream, link);
+ if (st->ep_num == ep_num)
+ return st;
+ }
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+int oz_isoc_stream_create(struct oz_pd *pd, u8 ep_num)
+{
+ struct oz_isoc_stream *st =
+ kzalloc(sizeof(struct oz_isoc_stream), GFP_ATOMIC);
+ if (!st)
+ return -ENOMEM;
+ st->ep_num = ep_num;
+ spin_lock_bh(&pd->stream_lock);
+ if (!pd_stream_find(pd, ep_num)) {
+ list_add(&st->link, &pd->stream_list);
+ st = 0;
+ }
+ spin_unlock_bh(&pd->stream_lock);
+ if (st)
+ kfree(st);
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq or process
+ */
+static void oz_isoc_stream_free(struct oz_isoc_stream *st)
+{
+ if (st->skb)
+ kfree_skb(st->skb);
+ kfree(st);
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+int oz_isoc_stream_delete(struct oz_pd *pd, u8 ep_num)
+{
+ struct oz_isoc_stream *st;
+ spin_lock_bh(&pd->stream_lock);
+ st = pd_stream_find(pd, ep_num);
+ if (st)
+ list_del(&st->link);
+ spin_unlock_bh(&pd->stream_lock);
+ if (st)
+ oz_isoc_stream_free(st);
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: any
+ */
+static void oz_isoc_destructor(struct sk_buff *skb)
+{
+ atomic_dec(&g_submitted_isoc);
+ oz_event_log(OZ_EVT_TX_ISOC_DONE, atomic_read(&g_submitted_isoc),
+ 0, skb, 0);
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+int oz_send_isoc_unit(struct oz_pd *pd, u8 ep_num, u8 *data, int len)
+{
+ struct net_device *dev = pd->net_dev;
+ struct oz_isoc_stream *st;
+ u8 nb_units = 0;
+ struct sk_buff *skb = 0;
+ struct oz_hdr *oz_hdr = 0;
+ int size = 0;
+ spin_lock_bh(&pd->stream_lock);
+ st = pd_stream_find(pd, ep_num);
+ if (st) {
+ skb = st->skb;
+ st->skb = 0;
+ nb_units = st->nb_units;
+ st->nb_units = 0;
+ oz_hdr = st->oz_hdr;
+ size = st->size;
+ }
+ spin_unlock_bh(&pd->stream_lock);
+ if (!st)
+ return 0;
+ if (!skb) {
+ /* Allocate enough space for max size frame. */
+ skb = alloc_skb(pd->max_tx_size + OZ_ALLOCATED_SPACE(dev),
+ GFP_ATOMIC);
+ if (skb == 0)
+ return 0;
+ /* Reserve the head room for lower layers. */
+ skb_reserve(skb, LL_RESERVED_SPACE(dev));
+ skb_reset_network_header(skb);
+ skb->dev = dev;
+ skb->protocol = htons(OZ_ETHERTYPE);
+ size = sizeof(struct oz_hdr) + sizeof(struct oz_isoc_large);
+ oz_hdr = (struct oz_hdr *)skb_put(skb, size);
+ }
+ memcpy(skb_put(skb, len), data, len);
+ size += len;
+ if (++nb_units < pd->ms_per_isoc) {
+ spin_lock_bh(&pd->stream_lock);
+ st->skb = skb;
+ st->nb_units = nb_units;
+ st->oz_hdr = oz_hdr;
+ st->size = size;
+ spin_unlock_bh(&pd->stream_lock);
+ } else {
+ struct oz_hdr oz;
+ struct oz_isoc_large iso;
+ spin_lock_bh(&pd->stream_lock);
+ iso.frame_number = st->frame_num;
+ st->frame_num += nb_units;
+ spin_unlock_bh(&pd->stream_lock);
+ oz.control =
+ (OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT) | OZ_F_ISOC;
+ oz.last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
+ oz.pkt_num = 0;
+ iso.endpoint = ep_num;
+ iso.format = OZ_DATA_F_ISOC_LARGE;
+ iso.ms_data = nb_units;
+ memcpy(oz_hdr, &oz, sizeof(oz));
+ memcpy(oz_hdr+1, &iso, sizeof(iso));
+ if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
+ dev->dev_addr, skb->len) < 0) {
+ kfree_skb(skb);
+ return -1;
+ }
+ if (atomic_read(&g_submitted_isoc) < OZ_MAX_SUBMITTED_ISOC) {
+ skb->destructor = oz_isoc_destructor;
+ atomic_inc(&g_submitted_isoc);
+ oz_event_log(OZ_EVT_TX_ISOC, nb_units, iso.frame_number,
+ skb, atomic_read(&g_submitted_isoc));
+ if (dev_queue_xmit(skb) < 0)
+ return -1;
+ } else {
+ oz_event_log(OZ_EVT_TX_ISOC_DROP, 0, 0, 0, 0);
+ kfree_skb(skb);
+ }
+ }
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+void oz_apps_init(void)
+{
+ int i;
+ for (i = 0; i < OZ_APPID_MAX; i++)
+ if (g_app_if[i].init)
+ g_app_if[i].init();
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+void oz_apps_term(void)
+{
+ int i;
+ /* Terminate all the apps. */
+ for (i = 0; i < OZ_APPID_MAX; i++)
+ if (g_app_if[i].term)
+ g_app_if[i].term();
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq-serialized
+ */
+void oz_handle_app_elt(struct oz_pd *pd, u8 app_id, struct oz_elt *elt)
+{
+ struct oz_app_if *ai;
+ if (app_id == 0 || app_id > OZ_APPID_MAX)
+ return;
+ ai = &g_app_if[app_id-1];
+ ai->rx(pd, elt);
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq or process
+ */
+void oz_pd_indicate_farewells(struct oz_pd *pd)
+{
+ struct oz_farewell *f;
+ struct oz_app_if *ai = &g_app_if[OZ_APPID_USB-1];
+ while (1) {
+ oz_polling_lock_bh();
+ if (list_empty(&pd->farewell_list)) {
+ oz_polling_unlock_bh();
+ break;
+ }
+ f = list_first_entry(&pd->farewell_list,
+ struct oz_farewell, link);
+ list_del(&f->link);
+ oz_polling_unlock_bh();
+ if (ai->farewell)
+ ai->farewell(pd, f->ep_num, f->report, f->len);
+ kfree(f);
+ }
+}
diff --git a/drivers/staging/ozwpan/ozpd.h b/drivers/staging/ozwpan/ozpd.h
new file mode 100644
index 000000000000..afc77f0260f0
--- /dev/null
+++ b/drivers/staging/ozwpan/ozpd.h
@@ -0,0 +1,121 @@
+/* -----------------------------------------------------------------------------
+ * Copyright (c) 2011 Ozmo Inc
+ * Released under the GNU General Public License Version 2 (GPLv2).
+ * -----------------------------------------------------------------------------
+ */
+#ifndef _OZPD_H_
+#define _OZPD_H_
+
+#include "ozeltbuf.h"
+
+/* PD state
+ */
+#define OZ_PD_S_IDLE 0x1
+#define OZ_PD_S_CONNECTED 0x2
+#define OZ_PD_S_SLEEP 0x4
+#define OZ_PD_S_STOPPED 0x8
+
+/* Timer event types.
+ */
+#define OZ_TIMER_TOUT 1
+#define OZ_TIMER_HEARTBEAT 2
+#define OZ_TIMER_STOP 3
+
+/* Data structure that hold information on a frame for transmisson. This is
+ * built when the frame is first transmitted and is used to rebuild the frame
+ * if a re-transmission is required.
+ */
+struct oz_tx_frame {
+ struct list_head link;
+ struct list_head elt_list;
+ struct oz_hdr hdr;
+ int total_size;
+};
+
+struct oz_isoc_stream {
+ struct list_head link;
+ u8 ep_num;
+ u8 frame_num;
+ u8 nb_units;
+ int size;
+ struct sk_buff *skb;
+ struct oz_hdr *oz_hdr;
+};
+
+struct oz_farewell {
+ struct list_head link;
+ u8 ep_num;
+ u8 index;
+ u8 report[1];
+ u8 len;
+};
+
+/* Data structure that holds information on a specific peripheral device (PD).
+ */
+struct oz_pd {
+ struct list_head link;
+ atomic_t ref_count;
+ u8 mac_addr[ETH_ALEN];
+ unsigned state;
+ unsigned state_flags;
+ unsigned send_flags;
+ u16 total_apps;
+ u16 paused_apps;
+ u8 session_id;
+ u8 param_rsp_status;
+ u8 pd_info;
+ u8 isoc_sent;
+ u32 last_rx_pkt_num;
+ u32 last_tx_pkt_num;
+ u32 trigger_pkt_num;
+ unsigned long pulse_time_j;
+ unsigned long timeout_time_j;
+ unsigned long pulse_period_j;
+ unsigned long presleep_j;
+ unsigned long keep_alive_j;
+ unsigned long last_rx_time_j;
+ struct oz_elt_buf elt_buff;
+ void *app_ctx[OZ_APPID_MAX];
+ spinlock_t app_lock[OZ_APPID_MAX];
+ int max_tx_size;
+ u8 heartbeat_requested;
+ u8 mode;
+ u8 ms_per_isoc;
+ unsigned max_stream_buffering;
+ int nb_queued_frames;
+ struct list_head *tx_pool;
+ int tx_pool_count;
+ spinlock_t tx_frame_lock;
+ struct list_head *last_sent_frame;
+ struct list_head tx_queue;
+ struct list_head farewell_list;
+ spinlock_t stream_lock;
+ struct list_head stream_list;
+ struct net_device *net_dev;
+};
+
+#define OZ_MAX_QUEUED_FRAMES 4
+
+struct oz_pd *oz_pd_alloc(u8 *mac_addr);
+void oz_pd_destroy(struct oz_pd *pd);
+void oz_pd_get(struct oz_pd *pd);
+void oz_pd_put(struct oz_pd *pd);
+void oz_pd_set_state(struct oz_pd *pd, unsigned state);
+void oz_pd_indicate_farewells(struct oz_pd *pd);
+int oz_pd_sleep(struct oz_pd *pd);
+void oz_pd_stop(struct oz_pd *pd);
+void oz_pd_heartbeat(struct oz_pd *pd, u16 apps);
+int oz_services_start(struct oz_pd *pd, u16 apps, int resume);
+void oz_services_stop(struct oz_pd *pd, u16 apps, int pause);
+int oz_prepare_frame(struct oz_pd *pd, int empty);
+void oz_send_queued_frames(struct oz_pd *pd, int backlog);
+void oz_retire_tx_frames(struct oz_pd *pd, u8 lpn);
+int oz_isoc_stream_create(struct oz_pd *pd, u8 ep_num);
+int oz_isoc_stream_delete(struct oz_pd *pd, u8 ep_num);
+int oz_send_isoc_unit(struct oz_pd *pd, u8 ep_num, u8 *data, int len);
+void oz_handle_app_elt(struct oz_pd *pd, u8 app_id, struct oz_elt *elt);
+void oz_apps_init(void);
+void oz_apps_term(void);
+
+#endif /* Sentry */
+
diff --git a/drivers/staging/ozwpan/ozproto.c b/drivers/staging/ozwpan/ozproto.c
new file mode 100644
index 000000000000..ad857eeabbb7
--- /dev/null
+++ b/drivers/staging/ozwpan/ozproto.c
@@ -0,0 +1,957 @@
+/* -----------------------------------------------------------------------------
+ * Copyright (c) 2011 Ozmo Inc
+ * Released under the GNU General Public License Version 2 (GPLv2).
+ * -----------------------------------------------------------------------------
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/timer.h>
+#include <linux/sched.h>
+#include <linux/netdevice.h>
+#include <linux/errno.h>
+#include <linux/ieee80211.h>
+#include "ozconfig.h"
+#include "ozprotocol.h"
+#include "ozeltbuf.h"
+#include "ozpd.h"
+#include "ozproto.h"
+#include "ozusbsvc.h"
+#include "oztrace.h"
+#include "ozappif.h"
+#include "ozevent.h"
+#include <asm/unaligned.h>
+#include <linux/uaccess.h>
+#include <net/psnap.h>
+/*------------------------------------------------------------------------------
+ */
+#define OZ_CF_CONN_SUCCESS 1
+#define OZ_CF_CONN_FAILURE 2
+
+#define OZ_DO_STOP 1
+#define OZ_DO_SLEEP 2
+
+/* States of the timer.
+ */
+#define OZ_TIMER_IDLE 0
+#define OZ_TIMER_SET 1
+#define OZ_TIMER_IN_HANDLER 2
+
+#define OZ_MAX_TIMER_POOL_SIZE 16
+
+/*------------------------------------------------------------------------------
+ */
+struct oz_binding {
+ struct packet_type ptype;
+ char name[OZ_MAX_BINDING_LEN];
+ struct oz_binding *next;
+};
+
+struct oz_timer {
+ struct list_head link;
+ struct oz_pd *pd;
+ unsigned long due_time;
+ int type;
+};
+/*------------------------------------------------------------------------------
+ * Static external variables.
+ */
+static DEFINE_SPINLOCK(g_polling_lock);
+static LIST_HEAD(g_pd_list);
+static struct oz_binding *g_binding ;
+static DEFINE_SPINLOCK(g_binding_lock);
+static struct sk_buff_head g_rx_queue;
+static u8 g_session_id;
+static u16 g_apps = 0x1;
+static int g_processing_rx;
+static struct timer_list g_timer;
+static struct oz_timer *g_cur_timer;
+static struct list_head *g_timer_pool;
+static int g_timer_pool_count;
+static int g_timer_state = OZ_TIMER_IDLE;
+static LIST_HEAD(g_timer_list);
+/*------------------------------------------------------------------------------
+ */
+static void oz_protocol_timer_start(void);
+/*------------------------------------------------------------------------------
+ * Context: softirq-serialized
+ */
+static u8 oz_get_new_session_id(u8 exclude)
+{
+ if (++g_session_id == 0)
+ g_session_id = 1;
+ if (g_session_id == exclude) {
+ if (++g_session_id == 0)
+ g_session_id = 1;
+ }
+ return g_session_id;
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq-serialized
+ */
+static void oz_send_conn_rsp(struct oz_pd *pd, u8 status)
+{
+ struct sk_buff *skb;
+ struct net_device *dev = pd->net_dev;
+ struct oz_hdr *oz_hdr;
+ struct oz_elt *elt;
+ struct oz_elt_connect_rsp *body;
+ int sz = sizeof(struct oz_hdr) + sizeof(struct oz_elt) +
+ sizeof(struct oz_elt_connect_rsp);
+ skb = alloc_skb(sz + OZ_ALLOCATED_SPACE(dev), GFP_ATOMIC);
+ if (skb == 0)
+ return;
+ skb_reserve(skb, LL_RESERVED_SPACE(dev));
+ skb_reset_network_header(skb);
+ oz_hdr = (struct oz_hdr *)skb_put(skb, sz);
+ elt = (struct oz_elt *)(oz_hdr+1);
+ body = (struct oz_elt_connect_rsp *)(elt+1);
+ skb->dev = dev;
+ skb->protocol = htons(OZ_ETHERTYPE);
+ /* Fill in device header */
+ if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
+ dev->dev_addr, skb->len) < 0) {
+ kfree_skb(skb);
+ return;
+ }
+ oz_hdr->control = (OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT);
+ oz_hdr->last_pkt_num = 0;
+ put_unaligned(0, &oz_hdr->pkt_num);
+ oz_event_log(OZ_EVT_CONNECT_RSP, 0, 0, 0, 0);
+ elt->type = OZ_ELT_CONNECT_RSP;
+ elt->length = sizeof(struct oz_elt_connect_rsp);
+ memset(body, 0, sizeof(struct oz_elt_connect_rsp));
+ body->status = status;
+ if (status == 0) {
+ body->mode = pd->mode;
+ body->session_id = pd->session_id;
+ put_unaligned(cpu_to_le16(pd->total_apps), &body->apps);
+ }
+ oz_trace("TX: OZ_ELT_CONNECT_RSP %d", status);
+ dev_queue_xmit(skb);
+ return;
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq-serialized
+ */
+static void pd_set_keepalive(struct oz_pd *pd, u8 kalive)
+{
+ unsigned long keep_alive = kalive & OZ_KALIVE_VALUE_MASK;
+
+ switch (kalive & OZ_KALIVE_TYPE_MASK) {
+ case OZ_KALIVE_SPECIAL:
+ pd->keep_alive_j =
+ oz_ms_to_jiffies(keep_alive * 1000*60*60*24*20);
+ break;
+ case OZ_KALIVE_SECS:
+ pd->keep_alive_j = oz_ms_to_jiffies(keep_alive*1000);
+ break;
+ case OZ_KALIVE_MINS:
+ pd->keep_alive_j = oz_ms_to_jiffies(keep_alive*1000*60);
+ break;
+ case OZ_KALIVE_HOURS:
+ pd->keep_alive_j = oz_ms_to_jiffies(keep_alive*1000*60*60);
+ break;
+ default:
+ pd->keep_alive_j = 0;
+ }
+ oz_trace("Keepalive = %lu jiffies\n", pd->keep_alive_j);
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq-serialized
+ */
+static void pd_set_presleep(struct oz_pd *pd, u8 presleep)
+{
+ if (presleep)
+ pd->presleep_j = oz_ms_to_jiffies(presleep*100);
+ else
+ pd->presleep_j = OZ_PRESLEEP_TOUT_J;
+ oz_trace("Presleep time = %lu jiffies\n", pd->presleep_j);
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq-serialized
+ */
+static struct oz_pd *oz_connect_req(struct oz_pd *cur_pd, struct oz_elt *elt,
+ u8 *pd_addr, struct net_device *net_dev)
+{
+ struct oz_pd *pd;
+ struct oz_elt_connect_req *body =
+ (struct oz_elt_connect_req *)(elt+1);
+ u8 rsp_status = OZ_STATUS_SUCCESS;
+ u8 stop_needed = 0;
+ u16 new_apps = g_apps;
+ struct net_device *old_net_dev = 0;
+ struct oz_pd *free_pd = 0;
+ if (cur_pd) {
+ pd = cur_pd;
+ spin_lock_bh(&g_polling_lock);
+ } else {
+ struct oz_pd *pd2 = 0;
+ struct list_head *e;
+ pd = oz_pd_alloc(pd_addr);
+ if (pd == 0)
+ return 0;
+ pd->last_rx_time_j = jiffies;
+ spin_lock_bh(&g_polling_lock);
+ list_for_each(e, &g_pd_list) {
+ pd2 = container_of(e, struct oz_pd, link);
+ if (memcmp(pd2->mac_addr, pd_addr, ETH_ALEN) == 0) {
+ free_pd = pd;
+ pd = pd2;
+ break;
+ }
+ }
+ if (pd != pd2)
+ list_add_tail(&pd->link, &g_pd_list);
+ }
+ if (pd == 0) {
+ spin_unlock_bh(&g_polling_lock);
+ return 0;
+ }
+ if (pd->net_dev != net_dev) {
+ old_net_dev = pd->net_dev;
+ dev_hold(net_dev);
+ pd->net_dev = net_dev;
+ }
+ oz_trace("Host vendor: %d\n", body->host_vendor);
+ pd->max_tx_size = OZ_MAX_TX_SIZE;
+ pd->mode = body->mode;
+ pd->pd_info = body->pd_info;
+ if (pd->mode & OZ_F_ISOC_NO_ELTS) {
+ pd->mode |= OZ_F_ISOC_ANYTIME;
+ pd->ms_per_isoc = body->ms_per_isoc;
+ if (!pd->ms_per_isoc)
+ pd->ms_per_isoc = 4;
+ }
+ if (body->max_len_div16)
+ pd->max_tx_size = ((u16)body->max_len_div16)<<4;
+ oz_trace("Max frame:%u Ms per isoc:%u\n",
+ pd->max_tx_size, pd->ms_per_isoc);
+ pd->max_stream_buffering = 3*1024;
+ pd->timeout_time_j = jiffies + OZ_CONNECTION_TOUT_J;
+ pd->pulse_period_j = OZ_QUANTUM_J;
+ pd_set_presleep(pd, body->presleep);
+ pd_set_keepalive(pd, body->keep_alive);
+
+ new_apps &= le16_to_cpu(get_unaligned(&body->apps));
+ if ((new_apps & 0x1) && (body->session_id)) {
+ if (pd->session_id) {
+ if (pd->session_id != body->session_id) {
+ rsp_status = OZ_STATUS_SESSION_MISMATCH;
+ goto done;
+ }
+ } else {
+ new_apps &= ~0x1; /* Resume not permitted */
+ pd->session_id =
+ oz_get_new_session_id(body->session_id);
+ }
+ } else {
+ if (pd->session_id && !body->session_id) {
+ rsp_status = OZ_STATUS_SESSION_TEARDOWN;
+ stop_needed = 1;
+ } else {
+ new_apps &= ~0x1; /* Resume not permitted */
+ pd->session_id =
+ oz_get_new_session_id(body->session_id);
+ }
+ }
+done:
+ if (rsp_status == OZ_STATUS_SUCCESS) {
+ u16 start_apps = new_apps & ~pd->total_apps & ~0x1;
+ u16 stop_apps = pd->total_apps & ~new_apps & ~0x1;
+ u16 resume_apps = new_apps & pd->paused_apps & ~0x1;
+ spin_unlock_bh(&g_polling_lock);
+ oz_pd_set_state(pd, OZ_PD_S_CONNECTED);
+ oz_timer_delete(pd, OZ_TIMER_STOP);
+ oz_trace("new_apps=0x%x total_apps=0x%x paused_apps=0x%x\n",
+ new_apps, pd->total_apps, pd->paused_apps);
+ if (start_apps) {
+ if (oz_services_start(pd, start_apps, 0))
+ rsp_status = OZ_STATUS_TOO_MANY_PDS;
+ }
+ if (resume_apps)
+ if (oz_services_start(pd, resume_apps, 1))
+ rsp_status = OZ_STATUS_TOO_MANY_PDS;
+ if (stop_apps)
+ oz_services_stop(pd, stop_apps, 0);
+ oz_pd_request_heartbeat(pd);
+ } else {
+ spin_unlock_bh(&g_polling_lock);
+ }
+ oz_send_conn_rsp(pd, rsp_status);
+ if (rsp_status != OZ_STATUS_SUCCESS) {
+ if (stop_needed)
+ oz_pd_stop(pd);
+ oz_pd_put(pd);
+ pd = 0;
+ }
+ if (old_net_dev)
+ dev_put(old_net_dev);
+ if (free_pd)
+ oz_pd_destroy(free_pd);
+ return pd;
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq-serialized
+ */
+static void oz_add_farewell(struct oz_pd *pd, u8 ep_num, u8 index,
+ u8 *report, u8 len)
+{
+ struct oz_farewell *f;
+ struct oz_farewell *f2;
+ int found = 0;
+ f = kmalloc(sizeof(struct oz_farewell) + len - 1, GFP_ATOMIC);
+ if (!f)
+ return;
+ f->ep_num = ep_num;
+ f->index = index;
+ memcpy(f->report, report, len);
+ oz_trace("RX: Adding farewell report\n");
+ spin_lock(&g_polling_lock);
+ list_for_each_entry(f2, &pd->farewell_list, link) {
+ if ((f2->ep_num == ep_num) && (f2->index == index)) {
+ found = 1;
+ list_del(&f2->link);
+ break;
+ }
+ }
+ list_add_tail(&f->link, &pd->farewell_list);
+ spin_unlock(&g_polling_lock);
+ if (found)
+ kfree(f2);
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq-serialized
+ */
+static void oz_rx_frame(struct sk_buff *skb)
+{
+ u8 *mac_hdr;
+ u8 *src_addr;
+ struct oz_elt *elt;
+ int length;
+ struct oz_pd *pd = 0;
+ struct oz_hdr *oz_hdr = (struct oz_hdr *)skb_network_header(skb);
+ int dup = 0;
+ u32 pkt_num;
+
+ oz_event_log(OZ_EVT_RX_PROCESS, 0,
+ (((u16)oz_hdr->control)<<8)|oz_hdr->last_pkt_num,
+ 0, oz_hdr->pkt_num);
+ oz_trace2(OZ_TRACE_RX_FRAMES,
+ "RX frame PN=0x%x LPN=0x%x control=0x%x\n",
+ oz_hdr->pkt_num, oz_hdr->last_pkt_num, oz_hdr->control);
+ mac_hdr = skb_mac_header(skb);
+ src_addr = &mac_hdr[ETH_ALEN] ;
+ length = skb->len;
+
+ /* Check the version field */
+ if (oz_get_prot_ver(oz_hdr->control) != OZ_PROTOCOL_VERSION) {
+ oz_trace("Incorrect protocol version: %d\n",
+ oz_get_prot_ver(oz_hdr->control));
+ goto done;
+ }
+
+ pkt_num = le32_to_cpu(get_unaligned(&oz_hdr->pkt_num));
+
+ pd = oz_pd_find(src_addr);
+ if (pd) {
+ pd->last_rx_time_j = jiffies;
+ oz_timer_add(pd, OZ_TIMER_TOUT,
+ pd->last_rx_time_j + pd->presleep_j, 1);
+ if (pkt_num != pd->last_rx_pkt_num) {
+ pd->last_rx_pkt_num = pkt_num;
+ } else {
+ dup = 1;
+ oz_trace("Duplicate frame\n");
+ }
+ }
+
+ if (pd && !dup && ((pd->mode & OZ_MODE_MASK) == OZ_MODE_TRIGGERED)) {
+ pd->last_sent_frame = &pd->tx_queue;
+ if (oz_hdr->control & OZ_F_ACK) {
+ /* Retire completed frames */
+ oz_retire_tx_frames(pd, oz_hdr->last_pkt_num);
+ }
+ if ((oz_hdr->control & OZ_F_ACK_REQUESTED) &&
+ (pd->state == OZ_PD_S_CONNECTED)) {
+ int backlog = pd->nb_queued_frames;
+ pd->trigger_pkt_num = pkt_num;
+ /* Send queued frames */
+ while (oz_prepare_frame(pd, 0) >= 0)
+ ;
+ oz_send_queued_frames(pd, backlog);
+ }
+ }
+
+ length -= sizeof(struct oz_hdr);
+ elt = (struct oz_elt *)((u8 *)oz_hdr + sizeof(struct oz_hdr));
+
+ while (length >= sizeof(struct oz_elt)) {
+ length -= sizeof(struct oz_elt) + elt->length;
+ if (length < 0)
+ break;
+ switch (elt->type) {
+ case OZ_ELT_CONNECT_REQ:
+ oz_event_log(OZ_EVT_CONNECT_REQ, 0, 0, 0, 0);
+ oz_trace("RX: OZ_ELT_CONNECT_REQ\n");
+ pd = oz_connect_req(pd, elt, src_addr, skb->dev);
+ break;
+ case OZ_ELT_DISCONNECT:
+ oz_trace("RX: OZ_ELT_DISCONNECT\n");
+ if (pd)
+ oz_pd_sleep(pd);
+ break;
+ case OZ_ELT_UPDATE_PARAM_REQ: {
+ struct oz_elt_update_param *body =
+ (struct oz_elt_update_param *)(elt + 1);
+ oz_trace("RX: OZ_ELT_UPDATE_PARAM_REQ\n");
+ if (pd && (pd->state & OZ_PD_S_CONNECTED)) {
+ spin_lock(&g_polling_lock);
+ pd_set_keepalive(pd, body->keepalive);
+ pd_set_presleep(pd, body->presleep);
+ spin_unlock(&g_polling_lock);
+ }
+ }
+ break;
+ case OZ_ELT_FAREWELL_REQ: {
+ struct oz_elt_farewell *body =
+ (struct oz_elt_farewell *)(elt + 1);
+ oz_trace("RX: OZ_ELT_FAREWELL_REQ\n");
+ oz_add_farewell(pd, body->ep_num,
+ body->index, body->report,
+ elt->length + 1 - sizeof(*body));
+ }
+ break;
+ case OZ_ELT_APP_DATA:
+ if (pd && (pd->state & OZ_PD_S_CONNECTED)) {
+ struct oz_app_hdr *app_hdr =
+ (struct oz_app_hdr *)(elt+1);
+ if (dup)
+ break;
+ oz_handle_app_elt(pd, app_hdr->app_id, elt);
+ }
+ break;
+ default:
+ oz_trace("RX: Unknown elt %02x\n", elt->type);
+ }
+ elt = oz_next_elt(elt);
+ }
+done:
+ if (pd)
+ oz_pd_put(pd);
+ consume_skb(skb);
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+void oz_protocol_term(void)
+{
+ struct list_head *chain = 0;
+ del_timer_sync(&g_timer);
+ /* Walk the list of bindings and remove each one.
+ */
+ spin_lock_bh(&g_binding_lock);
+ while (g_binding) {
+ struct oz_binding *b = g_binding;
+ g_binding = b->next;
+ spin_unlock_bh(&g_binding_lock);
+ dev_remove_pack(&b->ptype);
+ if (b->ptype.dev)
+ dev_put(b->ptype.dev);
+ kfree(b);
+ spin_lock_bh(&g_binding_lock);
+ }
+ spin_unlock_bh(&g_binding_lock);
+ /* Walk the list of PDs and stop each one. This causes the PD to be
+ * removed from the list so we can just pull each one from the head
+ * of the list.
+ */
+ spin_lock_bh(&g_polling_lock);
+ while (!list_empty(&g_pd_list)) {
+ struct oz_pd *pd =
+ list_first_entry(&g_pd_list, struct oz_pd, link);
+ oz_pd_get(pd);
+ spin_unlock_bh(&g_polling_lock);
+ oz_pd_stop(pd);
+ oz_pd_put(pd);
+ spin_lock_bh(&g_polling_lock);
+ }
+ chain = g_timer_pool;
+ g_timer_pool = 0;
+ spin_unlock_bh(&g_polling_lock);
+ while (chain) {
+ struct oz_timer *t = container_of(chain, struct oz_timer, link);
+ chain = chain->next;
+ kfree(t);
+ }
+ oz_trace("Protocol stopped\n");
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+static void oz_pd_handle_timer(struct oz_pd *pd, int type)
+{
+ switch (type) {
+ case OZ_TIMER_TOUT:
+ oz_pd_sleep(pd);
+ break;
+ case OZ_TIMER_STOP:
+ oz_pd_stop(pd);
+ break;
+ case OZ_TIMER_HEARTBEAT: {
+ u16 apps = 0;
+ spin_lock_bh(&g_polling_lock);
+ pd->heartbeat_requested = 0;
+ if (pd->state & OZ_PD_S_CONNECTED)
+ apps = pd->total_apps;
+ spin_unlock_bh(&g_polling_lock);
+ if (apps)
+ oz_pd_heartbeat(pd, apps);
+ }
+ break;
+ }
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+static void oz_protocol_timer(unsigned long arg)
+{
+ struct oz_timer *t;
+ struct oz_timer *t2;
+ struct oz_pd *pd;
+ spin_lock_bh(&g_polling_lock);
+ if (!g_cur_timer) {
+ /* This happens if we remove the current timer but can't stop
+ * the timer from firing. In this case just get out.
+ */
+ oz_event_log(OZ_EVT_TIMER, 0, 0, 0, 0);
+ spin_unlock_bh(&g_polling_lock);
+ return;
+ }
+ g_timer_state = OZ_TIMER_IN_HANDLER;
+ t = g_cur_timer;
+ g_cur_timer = 0;
+ list_del(&t->link);
+ spin_unlock_bh(&g_polling_lock);
+ do {
+ pd = t->pd;
+ oz_event_log(OZ_EVT_TIMER, 0, t->type, 0, 0);
+ oz_pd_handle_timer(pd, t->type);
+ spin_lock_bh(&g_polling_lock);
+ if (g_timer_pool_count < OZ_MAX_TIMER_POOL_SIZE) {
+ t->link.next = g_timer_pool;
+ g_timer_pool = &t->link;
+ g_timer_pool_count++;
+ t = 0;
+ }
+ if (!list_empty(&g_timer_list)) {
+ t2 = container_of(g_timer_list.next,
+ struct oz_timer, link);
+ if (time_before_eq(t2->due_time, jiffies))
+ list_del(&t2->link);
+ else
+ t2 = 0;
+ } else {
+ t2 = 0;
+ }
+ spin_unlock_bh(&g_polling_lock);
+ oz_pd_put(pd);
+ if (t)
+ kfree(t);
+ t = t2;
+ } while (t);
+ g_timer_state = OZ_TIMER_IDLE;
+ oz_protocol_timer_start();
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+static void oz_protocol_timer_start(void)
+{
+ spin_lock_bh(&g_polling_lock);
+ if (!list_empty(&g_timer_list)) {
+ g_cur_timer =
+ container_of(g_timer_list.next, struct oz_timer, link);
+ if (g_timer_state == OZ_TIMER_SET) {
+ oz_event_log(OZ_EVT_TIMER_CTRL, 3,
+ (u16)g_cur_timer->type, 0,
+ (unsigned)g_cur_timer->due_time);
+ mod_timer(&g_timer, g_cur_timer->due_time);
+ } else {
+ oz_event_log(OZ_EVT_TIMER_CTRL, 4,
+ (u16)g_cur_timer->type, 0,
+ (unsigned)g_cur_timer->due_time);
+ g_timer.expires = g_cur_timer->due_time;
+ g_timer.function = oz_protocol_timer;
+ g_timer.data = 0;
+ add_timer(&g_timer);
+ }
+ g_timer_state = OZ_TIMER_SET;
+ } else {
+ oz_trace("No queued timers\n");
+ }
+ spin_unlock_bh(&g_polling_lock);
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq or process
+ */
+void oz_timer_add(struct oz_pd *pd, int type, unsigned long due_time,
+ int remove)
+{
+ struct list_head *e;
+ struct oz_timer *t = 0;
+ int restart_needed = 0;
+ oz_event_log(OZ_EVT_TIMER_CTRL, 1, (u16)type, 0, (unsigned)due_time);
+ spin_lock(&g_polling_lock);
+ if (remove) {
+ list_for_each(e, &g_timer_list) {
+ t = container_of(e, struct oz_timer, link);
+ if ((t->pd == pd) && (t->type == type)) {
+ if (g_cur_timer == t) {
+ restart_needed = 1;
+ g_cur_timer = 0;
+ }
+ list_del(e);
+ break;
+ }
+ t = 0;
+ }
+ }
+ if (!t) {
+ if (g_timer_pool) {
+ t = container_of(g_timer_pool, struct oz_timer, link);
+ g_timer_pool = g_timer_pool->next;
+ g_timer_pool_count--;
+ } else {
+ t = kmalloc(sizeof(struct oz_timer), GFP_ATOMIC);
+ }
+ if (t) {
+ t->pd = pd;
+ t->type = type;
+ oz_pd_get(pd);
+ }
+ }
+ if (t) {
+ struct oz_timer *t2;
+ t->due_time = due_time;
+ list_for_each(e, &g_timer_list) {
+ t2 = container_of(e, struct oz_timer, link);
+ if (time_before(due_time, t2->due_time)) {
+ if (t2 == g_cur_timer) {
+ g_cur_timer = 0;
+ restart_needed = 1;
+ }
+ break;
+ }
+ }
+ list_add_tail(&t->link, e);
+ }
+ if (g_timer_state == OZ_TIMER_IDLE)
+ restart_needed = 1;
+ else if (g_timer_state == OZ_TIMER_IN_HANDLER)
+ restart_needed = 0;
+ spin_unlock(&g_polling_lock);
+ if (restart_needed)
+ oz_protocol_timer_start();
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq or process
+ */
+void oz_timer_delete(struct oz_pd *pd, int type)
+{
+ struct list_head *chain = 0;
+ struct oz_timer *t;
+ struct oz_timer *n;
+ int restart_needed = 0;
+ int release = 0;
+ oz_event_log(OZ_EVT_TIMER_CTRL, 2, (u16)type, 0, 0);
+ spin_lock(&g_polling_lock);
+ list_for_each_entry_safe(t, n, &g_timer_list, link) {
+ if ((t->pd == pd) && ((type == 0) || (t->type == type))) {
+ if (g_cur_timer == t) {
+ restart_needed = 1;
+ g_cur_timer = 0;
+ del_timer(&g_timer);
+ }
+ list_del(&t->link);
+ release++;
+ if (g_timer_pool_count < OZ_MAX_TIMER_POOL_SIZE) {
+ t->link.next = g_timer_pool;
+ g_timer_pool = &t->link;
+ g_timer_pool_count++;
+ } else {
+ t->link.next = chain;
+ chain = &t->link;
+ }
+ if (type)
+ break;
+ }
+ }
+ if (g_timer_state == OZ_TIMER_IN_HANDLER)
+ restart_needed = 0;
+ else if (restart_needed)
+ g_timer_state = OZ_TIMER_IDLE;
+ spin_unlock(&g_polling_lock);
+ if (restart_needed)
+ oz_protocol_timer_start();
+ while (release--)
+ oz_pd_put(pd);
+ while (chain) {
+ t = container_of(chain, struct oz_timer, link);
+ chain = chain->next;
+ kfree(t);
+ }
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq or process
+ */
+void oz_pd_request_heartbeat(struct oz_pd *pd)
+{
+ unsigned long now = jiffies;
+ unsigned long t;
+ spin_lock(&g_polling_lock);
+ if (pd->heartbeat_requested) {
+ spin_unlock(&g_polling_lock);
+ return;
+ }
+ if (pd->pulse_period_j)
+ t = ((now / pd->pulse_period_j) + 1) * pd->pulse_period_j;
+ else
+ t = now + 1;
+ pd->heartbeat_requested = 1;
+ spin_unlock(&g_polling_lock);
+ oz_timer_add(pd, OZ_TIMER_HEARTBEAT, t, 0);
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq or process
+ */
+struct oz_pd *oz_pd_find(u8 *mac_addr)
+{
+ struct oz_pd *pd;
+ struct list_head *e;
+ spin_lock_bh(&g_polling_lock);
+ list_for_each(e, &g_pd_list) {
+ pd = container_of(e, struct oz_pd, link);
+ if (memcmp(pd->mac_addr, mac_addr, ETH_ALEN) == 0) {
+ atomic_inc(&pd->ref_count);
+ spin_unlock_bh(&g_polling_lock);
+ return pd;
+ }
+ }
+ spin_unlock_bh(&g_polling_lock);
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+void oz_app_enable(int app_id, int enable)
+{
+ if (app_id <= OZ_APPID_MAX) {
+ spin_lock_bh(&g_polling_lock);
+ if (enable)
+ g_apps |= (1<<app_id);
+ else
+ g_apps &= ~(1<<app_id);
+ spin_unlock_bh(&g_polling_lock);
+ }
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+static int oz_pkt_recv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt, struct net_device *orig_dev)
+{
+ oz_event_log(OZ_EVT_RX_FRAME, 0, 0, 0, 0);
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (skb == 0)
+ return 0;
+ spin_lock_bh(&g_rx_queue.lock);
+ if (g_processing_rx) {
+ /* We already hold the lock so use __ variant.
+ */
+ __skb_queue_head(&g_rx_queue, skb);
+ spin_unlock_bh(&g_rx_queue.lock);
+ } else {
+ g_processing_rx = 1;
+ do {
+
+ spin_unlock_bh(&g_rx_queue.lock);
+ oz_rx_frame(skb);
+ spin_lock_bh(&g_rx_queue.lock);
+ if (skb_queue_empty(&g_rx_queue)) {
+ g_processing_rx = 0;
+ spin_unlock_bh(&g_rx_queue.lock);
+ break;
+ }
+ /* We already hold the lock so use __ variant.
+ */
+ skb = __skb_dequeue(&g_rx_queue);
+ } while (1);
+ }
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+void oz_binding_add(char *net_dev)
+{
+ struct oz_binding *binding;
+
+ binding = kmalloc(sizeof(struct oz_binding), GFP_ATOMIC);
+ if (binding) {
+ binding->ptype.type = __constant_htons(OZ_ETHERTYPE);
+ binding->ptype.func = oz_pkt_recv;
+ memcpy(binding->name, net_dev, OZ_MAX_BINDING_LEN);
+ if (net_dev && *net_dev) {
+ oz_trace("Adding binding: %s\n", net_dev);
+ binding->ptype.dev =
+ dev_get_by_name(&init_net, net_dev);
+ if (binding->ptype.dev == 0) {
+ oz_trace("Netdev %s not found\n", net_dev);
+ kfree(binding);
+ binding = 0;
+ }
+ } else {
+ oz_trace("Binding to all netcards\n");
+ binding->ptype.dev = 0;
+ }
+ if (binding) {
+ dev_add_pack(&binding->ptype);
+ spin_lock_bh(&g_binding_lock);
+ binding->next = g_binding;
+ g_binding = binding;
+ spin_unlock_bh(&g_binding_lock);
+ }
+ }
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+static int compare_binding_name(char *s1, char *s2)
+{
+ int i;
+ for (i = 0; i < OZ_MAX_BINDING_LEN; i++) {
+ if (*s1 != *s2)
+ return 0;
+ if (!*s1++)
+ return 1;
+ s2++;
+ }
+ return 1;
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+static void pd_stop_all_for_device(struct net_device *net_dev)
+{
+ struct list_head h;
+ struct oz_pd *pd;
+ struct oz_pd *n;
+ INIT_LIST_HEAD(&h);
+ spin_lock_bh(&g_polling_lock);
+ list_for_each_entry_safe(pd, n, &g_pd_list, link) {
+ if (pd->net_dev == net_dev) {
+ list_move(&pd->link, &h);
+ oz_pd_get(pd);
+ }
+ }
+ spin_unlock_bh(&g_polling_lock);
+ while (!list_empty(&h)) {
+ pd = list_first_entry(&h, struct oz_pd, link);
+ oz_pd_stop(pd);
+ oz_pd_put(pd);
+ }
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+void oz_binding_remove(char *net_dev)
+{
+ struct oz_binding *binding = 0;
+ struct oz_binding **link;
+ oz_trace("Removing binding: %s\n", net_dev);
+ spin_lock_bh(&g_binding_lock);
+ binding = g_binding;
+ link = &g_binding;
+ while (binding) {
+ if (compare_binding_name(binding->name, net_dev)) {
+ oz_trace("Binding '%s' found\n", net_dev);
+ *link = binding->next;
+ break;
+ } else {
+ link = &binding;
+ binding = binding->next;
+ }
+ }
+ spin_unlock_bh(&g_binding_lock);
+ if (binding) {
+ dev_remove_pack(&binding->ptype);
+ if (binding->ptype.dev) {
+ dev_put(binding->ptype.dev);
+ pd_stop_all_for_device(binding->ptype.dev);
+ }
+ kfree(binding);
+ }
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+static char *oz_get_next_device_name(char *s, char *dname, int max_size)
+{
+ while (*s == ',')
+ s++;
+ while (*s && (*s != ',') && max_size > 1) {
+ *dname++ = *s++;
+ max_size--;
+ }
+ *dname = 0;
+ return s;
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+int oz_protocol_init(char *devs)
+{
+ skb_queue_head_init(&g_rx_queue);
+ if (devs && (devs[0] == '*')) {
+ oz_binding_add(0);
+ } else {
+ char d[32];
+ while (*devs) {
+ devs = oz_get_next_device_name(devs, d, sizeof(d));
+ if (d[0])
+ oz_binding_add(d);
+ }
+ }
+ init_timer(&g_timer);
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+int oz_get_pd_list(struct oz_mac_addr *addr, int max_count)
+{
+ struct oz_pd *pd;
+ struct list_head *e;
+ int count = 0;
+ spin_lock_bh(&g_polling_lock);
+ list_for_each(e, &g_pd_list) {
+ if (count >= max_count)
+ break;
+ pd = container_of(e, struct oz_pd, link);
+ memcpy(&addr[count++], pd->mac_addr, ETH_ALEN);
+ }
+ spin_unlock_bh(&g_polling_lock);
+ return count;
+}
+/*------------------------------------------------------------------------------
+*/
+void oz_polling_lock_bh(void)
+{
+ spin_lock_bh(&g_polling_lock);
+}
+/*------------------------------------------------------------------------------
+*/
+void oz_polling_unlock_bh(void)
+{
+ spin_unlock_bh(&g_polling_lock);
+}
diff --git a/drivers/staging/ozwpan/ozproto.h b/drivers/staging/ozwpan/ozproto.h
new file mode 100644
index 000000000000..89aea28bd8d5
--- /dev/null
+++ b/drivers/staging/ozwpan/ozproto.h
@@ -0,0 +1,69 @@
+/* -----------------------------------------------------------------------------
+ * Copyright (c) 2011 Ozmo Inc
+ * Released under the GNU General Public License Version 2 (GPLv2).
+ * -----------------------------------------------------------------------------
+ */
+#ifndef _OZPROTO_H
+#define _OZPROTO_H
+
+#include <asm/byteorder.h>
+#include "ozconfig.h"
+#include "ozappif.h"
+
+#define OZ_ALLOCATED_SPACE(__x) (LL_RESERVED_SPACE(__x)+(__x)->needed_tailroom)
+
+/* Converts millisecs to jiffies.
+ */
+#define oz_ms_to_jiffies(__x) (((__x)*1000)/HZ)
+
+/* Quantum milliseconds.
+ */
+#define OZ_QUANTUM_MS 8
+/* Quantum jiffies
+ */
+#define OZ_QUANTUM_J (oz_ms_to_jiffies(OZ_QUANTUM_MS))
+/* Default timeouts.
+ */
+#define OZ_CONNECTION_TOUT_J (2*HZ)
+#define OZ_PRESLEEP_TOUT_J (11*HZ)
+
+/* Maximun sizes of tx frames. */
+#define OZ_MAX_TX_SIZE 1514
+
+/* Application handler functions.
+ */
+typedef int (*oz_app_init_fn_t)(void);
+typedef void (*oz_app_term_fn_t)(void);
+typedef int (*oz_app_start_fn_t)(struct oz_pd *pd, int resume);
+typedef void (*oz_app_stop_fn_t)(struct oz_pd *pd, int pause);
+typedef void (*oz_app_rx_fn_t)(struct oz_pd *pd, struct oz_elt *elt);
+typedef int (*oz_app_hearbeat_fn_t)(struct oz_pd *pd);
+typedef void (*oz_app_farewell_fn_t)(struct oz_pd *pd, u8 ep_num,
+ u8 *data, u8 len);
+
+struct oz_app_if {
+ oz_app_init_fn_t init;
+ oz_app_term_fn_t term;
+ oz_app_start_fn_t start;
+ oz_app_stop_fn_t stop;
+ oz_app_rx_fn_t rx;
+ oz_app_hearbeat_fn_t heartbeat;
+ oz_app_farewell_fn_t farewell;
+ int app_id;
+};
+
+int oz_protocol_init(char *devs);
+void oz_protocol_term(void);
+int oz_get_pd_list(struct oz_mac_addr *addr, int max_count);
+void oz_app_enable(int app_id, int enable);
+struct oz_pd *oz_pd_find(u8 *mac_addr);
+void oz_binding_add(char *net_dev);
+void oz_binding_remove(char *net_dev);
+void oz_timer_add(struct oz_pd *pd, int type, unsigned long due_time,
+ int remove);
+void oz_timer_delete(struct oz_pd *pd, int type);
+void oz_pd_request_heartbeat(struct oz_pd *pd);
+void oz_polling_lock_bh(void);
+void oz_polling_unlock_bh(void);
+
+#endif /* _OZPROTO_H */
diff --git a/drivers/staging/ozwpan/ozprotocol.h b/drivers/staging/ozwpan/ozprotocol.h
new file mode 100644
index 000000000000..b3e7d77f3fff
--- /dev/null
+++ b/drivers/staging/ozwpan/ozprotocol.h
@@ -0,0 +1,372 @@
+/* -----------------------------------------------------------------------------
+ * Copyright (c) 2011 Ozmo Inc
+ * Released under the GNU General Public License Version 2 (GPLv2).
+ * -----------------------------------------------------------------------------
+ */
+#ifndef _OZPROTOCOL_H
+#define _OZPROTOCOL_H
+
+#define PACKED __packed
+
+#define OZ_ETHERTYPE 0x892e
+
+/* Status codes
+ */
+#define OZ_STATUS_SUCCESS 0
+#define OZ_STATUS_INVALID_PARAM 1
+#define OZ_STATUS_TOO_MANY_PDS 2
+#define OZ_STATUS_NOT_ALLOWED 4
+#define OZ_STATUS_SESSION_MISMATCH 5
+#define OZ_STATUS_SESSION_TEARDOWN 6
+
+/* This is the generic element header.
+ Every element starts with this.
+ */
+struct oz_elt {
+ u8 type;
+ u8 length;
+} PACKED;
+
+#define oz_next_elt(__elt) \
+ (struct oz_elt *)((u8 *)((__elt) + 1) + (__elt)->length)
+
+/* Protocol element IDs.
+ */
+#define OZ_ELT_CONNECT_REQ 0x06
+#define OZ_ELT_CONNECT_RSP 0x07
+#define OZ_ELT_DISCONNECT 0x08
+#define OZ_ELT_UPDATE_PARAM_REQ 0x11
+#define OZ_ELT_FAREWELL_REQ 0x12
+#define OZ_ELT_APP_DATA 0x31
+
+/* This is the Ozmo header which is the first Ozmo specific part
+ * of a frame and comes after the MAC header.
+ */
+struct oz_hdr {
+ u8 control;
+ u8 last_pkt_num;
+ u32 pkt_num;
+} PACKED;
+
+#define OZ_PROTOCOL_VERSION 0x1
+/* Bits in the control field. */
+#define OZ_VERSION_MASK 0xc
+#define OZ_VERSION_SHIFT 2
+#define OZ_F_ACK 0x10
+#define OZ_F_ISOC 0x20
+#define OZ_F_MORE_DATA 0x40
+#define OZ_F_ACK_REQUESTED 0x80
+
+#define oz_get_prot_ver(__x) (((__x) & OZ_VERSION_MASK) >> OZ_VERSION_SHIFT)
+
+/* Used to select the bits of packet number to put in the last_pkt_num.
+ */
+#define OZ_LAST_PN_MASK 0x00ff
+
+#define OZ_LAST_PN_HALF_CYCLE 127
+
+/* Connect request data structure.
+ */
+struct oz_elt_connect_req {
+ u8 mode;
+ u8 resv1[16];
+ u8 pd_info;
+ u8 session_id;
+ u8 presleep;
+ u8 resv2;
+ u8 host_vendor;
+ u8 keep_alive;
+ u16 apps;
+ u8 max_len_div16;
+ u8 ms_per_isoc;
+ u8 resv3[2];
+} PACKED;
+
+/* mode field bits.
+ */
+#define OZ_MODE_POLLED 0x0
+#define OZ_MODE_TRIGGERED 0x1
+#define OZ_MODE_MASK 0xf
+#define OZ_F_ISOC_NO_ELTS 0x40
+#define OZ_F_ISOC_ANYTIME 0x80
+
+/* Keep alive field.
+ */
+#define OZ_KALIVE_TYPE_MASK 0xc0
+#define OZ_KALIVE_VALUE_MASK 0x3f
+#define OZ_KALIVE_SPECIAL 0x00
+#define OZ_KALIVE_SECS 0x40
+#define OZ_KALIVE_MINS 0x80
+#define OZ_KALIVE_HOURS 0xc0
+
+/* Connect response data structure.
+ */
+struct oz_elt_connect_rsp {
+ u8 mode;
+ u8 status;
+ u8 resv1[3];
+ u8 session_id;
+ u16 apps;
+ u32 resv2;
+} PACKED;
+
+struct oz_elt_farewell {
+ u8 ep_num;
+ u8 index;
+ u8 report[1];
+} PACKED;
+
+struct oz_elt_update_param {
+ u8 resv1[16];
+ u8 presleep;
+ u8 resv2;
+ u8 host_vendor;
+ u8 keepalive;
+} PACKED;
+
+/* Header common to all application elements.
+ */
+struct oz_app_hdr {
+ u8 app_id;
+ u8 elt_seq_num;
+} PACKED;
+
+/* Values for app_id.
+ */
+#define OZ_APPID_USB 0x1
+#define OZ_APPID_UNUSED1 0x2
+#define OZ_APPID_UNUSED2 0x3
+#define OZ_APPID_SERIAL 0x4
+#define OZ_APPID_MAX OZ_APPID_SERIAL
+#define OZ_NB_APPS (OZ_APPID_MAX+1)
+
+/* USB header common to all elements for the USB application.
+ * This header extends the oz_app_hdr and comes directly after
+ * the element header in a USB application.
+ */
+struct oz_usb_hdr {
+ u8 app_id;
+ u8 elt_seq_num;
+ u8 type;
+} PACKED;
+
+
+
+/* USB requests element subtypes (type field of hs_usb_hdr).
+ */
+#define OZ_GET_DESC_REQ 1
+#define OZ_GET_DESC_RSP 2
+#define OZ_SET_CONFIG_REQ 3
+#define OZ_SET_CONFIG_RSP 4
+#define OZ_SET_INTERFACE_REQ 5
+#define OZ_SET_INTERFACE_RSP 6
+#define OZ_VENDOR_CLASS_REQ 7
+#define OZ_VENDOR_CLASS_RSP 8
+#define OZ_GET_STATUS_REQ 9
+#define OZ_GET_STATUS_RSP 10
+#define OZ_CLEAR_FEATURE_REQ 11
+#define OZ_CLEAR_FEATURE_RSP 12
+#define OZ_SET_FEATURE_REQ 13
+#define OZ_SET_FEATURE_RSP 14
+#define OZ_GET_CONFIGURATION_REQ 15
+#define OZ_GET_CONFIGURATION_RSP 16
+#define OZ_GET_INTERFACE_REQ 17
+#define OZ_GET_INTERFACE_RSP 18
+#define OZ_SYNCH_FRAME_REQ 19
+#define OZ_SYNCH_FRAME_RSP 20
+#define OZ_USB_ENDPOINT_DATA 23
+
+#define OZ_REQD_D2H 0x80
+
+struct oz_get_desc_req {
+ u8 app_id;
+ u8 elt_seq_num;
+ u8 type;
+ u8 req_id;
+ u16 offset;
+ u16 size;
+ u8 req_type;
+ u8 desc_type;
+ u16 w_index;
+ u8 index;
+} PACKED;
+
+/* Values for desc_type field.
+*/
+#define OZ_DESC_DEVICE 0x01
+#define OZ_DESC_CONFIG 0x02
+#define OZ_DESC_STRING 0x03
+
+/* Values for req_type field.
+ */
+#define OZ_RECP_MASK 0x1F
+#define OZ_RECP_DEVICE 0x00
+#define OZ_RECP_INTERFACE 0x01
+#define OZ_RECP_ENDPOINT 0x02
+
+#define OZ_REQT_MASK 0x60
+#define OZ_REQT_STD 0x00
+#define OZ_REQT_CLASS 0x20
+#define OZ_REQT_VENDOR 0x40
+
+struct oz_get_desc_rsp {
+ u8 app_id;
+ u8 elt_seq_num;
+ u8 type;
+ u8 req_id;
+ u16 offset;
+ u16 total_size;
+ u8 rcode;
+ u8 data[1];
+} PACKED;
+
+struct oz_feature_req {
+ u8 app_id;
+ u8 elt_seq_num;
+ u8 type;
+ u8 req_id;
+ u8 recipient;
+ u8 index;
+ u16 feature;
+} PACKED;
+
+struct oz_feature_rsp {
+ u8 app_id;
+ u8 elt_seq_num;
+ u8 type;
+ u8 req_id;
+ u8 rcode;
+} PACKED;
+
+struct oz_set_config_req {
+ u8 app_id;
+ u8 elt_seq_num;
+ u8 type;
+ u8 req_id;
+ u8 index;
+} PACKED;
+
+struct oz_set_config_rsp {
+ u8 app_id;
+ u8 elt_seq_num;
+ u8 type;
+ u8 req_id;
+ u8 rcode;
+} PACKED;
+
+struct oz_set_interface_req {
+ u8 app_id;
+ u8 elt_seq_num;
+ u8 type;
+ u8 req_id;
+ u8 index;
+ u8 alternative;
+} PACKED;
+
+struct oz_set_interface_rsp {
+ u8 app_id;
+ u8 elt_seq_num;
+ u8 type;
+ u8 req_id;
+ u8 rcode;
+} PACKED;
+
+struct oz_get_interface_req {
+ u8 app_id;
+ u8 elt_seq_num;
+ u8 type;
+ u8 req_id;
+ u8 index;
+} PACKED;
+
+struct oz_get_interface_rsp {
+ u8 app_id;
+ u8 elt_seq_num;
+ u8 type;
+ u8 req_id;
+ u8 rcode;
+ u8 alternative;
+} PACKED;
+
+struct oz_vendor_class_req {
+ u8 app_id;
+ u8 elt_seq_num;
+ u8 type;
+ u8 req_id;
+ u8 req_type;
+ u8 request;
+ u16 value;
+ u16 index;
+ u8 data[1];
+} PACKED;
+
+struct oz_vendor_class_rsp {
+ u8 app_id;
+ u8 elt_seq_num;
+ u8 type;
+ u8 req_id;
+ u8 rcode;
+ u8 data[1];
+} PACKED;
+
+struct oz_data {
+ u8 app_id;
+ u8 elt_seq_num;
+ u8 type;
+ u8 endpoint;
+ u8 format;
+} PACKED;
+
+struct oz_isoc_fixed {
+ u8 app_id;
+ u8 elt_seq_num;
+ u8 type;
+ u8 endpoint;
+ u8 format;
+ u8 unit_size;
+ u8 frame_number;
+ u8 data[1];
+} PACKED;
+
+struct oz_multiple_fixed {
+ u8 app_id;
+ u8 elt_seq_num;
+ u8 type;
+ u8 endpoint;
+ u8 format;
+ u8 unit_size;
+ u8 data[1];
+} PACKED;
+
+struct oz_fragmented {
+ u8 app_id;
+ u8 elt_seq_num;
+ u8 type;
+ u8 endpoint;
+ u8 format;
+ u16 total_size;
+ u16 offset;
+ u8 data[1];
+} PACKED;
+
+/* Note: the following does not get packaged in an element in the same way
+ * that other data formats are packaged. Instead the data is put in a frame
+ * directly after the oz_header and is the only permitted data in such a
+ * frame. The length of the data is directly determined from the frame size.
+ */
+struct oz_isoc_large {
+ u8 endpoint;
+ u8 format;
+ u8 ms_data;
+ u8 frame_number;
+} PACKED;
+
+#define OZ_DATA_F_TYPE_MASK 0xF
+#define OZ_DATA_F_MULTIPLE_FIXED 0x1
+#define OZ_DATA_F_MULTIPLE_VAR 0x2
+#define OZ_DATA_F_ISOC_FIXED 0x3
+#define OZ_DATA_F_ISOC_VAR 0x4
+#define OZ_DATA_F_FRAGMENTED 0x5
+#define OZ_DATA_F_ISOC_LARGE 0x7
+
+#endif /* _OZPROTOCOL_H */
diff --git a/drivers/staging/ozwpan/oztrace.c b/drivers/staging/ozwpan/oztrace.c
new file mode 100644
index 000000000000..353ead24fd7d
--- /dev/null
+++ b/drivers/staging/ozwpan/oztrace.c
@@ -0,0 +1,36 @@
+/* -----------------------------------------------------------------------------
+ * Copyright (c) 2011 Ozmo Inc
+ * Released under the GNU General Public License Version 2 (GPLv2).
+ * -----------------------------------------------------------------------------
+ */
+#include "ozconfig.h"
+#include "oztrace.h"
+
+#ifdef WANT_VERBOSE_TRACE
+unsigned long trace_flags =
+ 0
+#ifdef WANT_TRACE_STREAM
+ | OZ_TRACE_STREAM
+#endif /* WANT_TRACE_STREAM */
+#ifdef WANT_TRACE_URB
+ | OZ_TRACE_URB
+#endif /* WANT_TRACE_URB */
+
+#ifdef WANT_TRACE_CTRL_DETAIL
+ | OZ_TRACE_CTRL_DETAIL
+#endif /* WANT_TRACE_CTRL_DETAIL */
+
+#ifdef WANT_TRACE_HUB
+ | OZ_TRACE_HUB
+#endif /* WANT_TRACE_HUB */
+
+#ifdef WANT_TRACE_RX_FRAMES
+ | OZ_TRACE_RX_FRAMES
+#endif /* WANT_TRACE_RX_FRAMES */
+
+#ifdef WANT_TRACE_TX_FRAMES
+ | OZ_TRACE_TX_FRAMES
+#endif /* WANT_TRACE_TX_FRAMES */
+ ;
+#endif /* WANT_VERBOSE_TRACE */
+
diff --git a/drivers/staging/ozwpan/oztrace.h b/drivers/staging/ozwpan/oztrace.h
new file mode 100644
index 000000000000..8293b24c5a77
--- /dev/null
+++ b/drivers/staging/ozwpan/oztrace.h
@@ -0,0 +1,35 @@
+/* -----------------------------------------------------------------------------
+ * Copyright (c) 2011 Ozmo Inc
+ * Released under the GNU General Public License Version 2 (GPLv2).
+ * -----------------------------------------------------------------------------
+ */
+#ifndef _OZTRACE_H_
+#define _OZTRACE_H_
+#include "ozconfig.h"
+
+#define TRACE_PREFIX KERN_ALERT "OZWPAN: "
+
+#ifdef WANT_TRACE
+#define oz_trace(...) printk(TRACE_PREFIX __VA_ARGS__)
+#ifdef WANT_VERBOSE_TRACE
+extern unsigned long trace_flags;
+#define oz_trace2(_flag, ...) \
+ do { if (trace_flags & _flag) printk(TRACE_PREFIX __VA_ARGS__); \
+ } while (0)
+#else
+#define oz_trace2(...)
+#endif /* #ifdef WANT_VERBOSE_TRACE */
+#else
+#define oz_trace(...)
+#define oz_trace2(...)
+#endif /* #ifdef WANT_TRACE */
+
+#define OZ_TRACE_STREAM 0x1
+#define OZ_TRACE_URB 0x2
+#define OZ_TRACE_CTRL_DETAIL 0x4
+#define OZ_TRACE_HUB 0x8
+#define OZ_TRACE_RX_FRAMES 0x10
+#define OZ_TRACE_TX_FRAMES 0x20
+
+#endif /* Sentry */
+
diff --git a/drivers/staging/ozwpan/ozurbparanoia.c b/drivers/staging/ozwpan/ozurbparanoia.c
new file mode 100644
index 000000000000..55b9afbbe47b
--- /dev/null
+++ b/drivers/staging/ozwpan/ozurbparanoia.c
@@ -0,0 +1,53 @@
+/* -----------------------------------------------------------------------------
+ * Copyright (c) 2011 Ozmo Inc
+ * Released under the GNU General Public License Version 2 (GPLv2).
+ * -----------------------------------------------------------------------------
+ */
+#include <linux/usb.h>
+#include "ozconfig.h"
+#ifdef WANT_URB_PARANOIA
+#include "ozurbparanoia.h"
+#include "oztrace.h"
+/*-----------------------------------------------------------------------------
+ */
+#define OZ_MAX_URBS 1000
+struct urb *g_urb_memory[OZ_MAX_URBS];
+int g_nb_urbs;
+DEFINE_SPINLOCK(g_urb_mem_lock);
+/*-----------------------------------------------------------------------------
+ */
+void oz_remember_urb(struct urb *urb)
+{
+ unsigned long irq_state;
+ spin_lock_irqsave(&g_urb_mem_lock, irq_state);
+ if (g_nb_urbs < OZ_MAX_URBS) {
+ g_urb_memory[g_nb_urbs++] = urb;
+ oz_trace("%lu: urb up = %d %p\n", jiffies, g_nb_urbs, urb);
+ } else {
+ oz_trace("ERROR urb buffer full\n");
+ }
+ spin_unlock_irqrestore(&g_urb_mem_lock, irq_state);
+}
+/*------------------------------------------------------------------------------
+ */
+int oz_forget_urb(struct urb *urb)
+{
+ unsigned long irq_state;
+ int i;
+ int rc = -1;
+ spin_lock_irqsave(&g_urb_mem_lock, irq_state);
+ for (i = 0; i < g_nb_urbs; i++) {
+ if (g_urb_memory[i] == urb) {
+ rc = 0;
+ if (--g_nb_urbs > i)
+ memcpy(&g_urb_memory[i], &g_urb_memory[i+1],
+ (g_nb_urbs - i) * sizeof(struct urb *));
+ oz_trace("%lu: urb down = %d %p\n",
+ jiffies, g_nb_urbs, urb);
+ }
+ }
+ spin_unlock_irqrestore(&g_urb_mem_lock, irq_state);
+ return rc;
+}
+#endif /* #ifdef WANT_URB_PARANOIA */
+
diff --git a/drivers/staging/ozwpan/ozurbparanoia.h b/drivers/staging/ozwpan/ozurbparanoia.h
new file mode 100644
index 000000000000..00f5a3a81bc8
--- /dev/null
+++ b/drivers/staging/ozwpan/ozurbparanoia.h
@@ -0,0 +1,19 @@
+#ifndef _OZURBPARANOIA_H
+#define _OZURBPARANOIA_H
+/* -----------------------------------------------------------------------------
+ * Released under the GNU General Public License Version 2 (GPLv2).
+ * Copyright (c) 2011 Ozmo Inc
+ * -----------------------------------------------------------------------------
+ */
+
+#ifdef WANT_URB_PARANOIA
+void oz_remember_urb(struct urb *urb);
+int oz_forget_urb(struct urb *urb);
+#else
+#define oz_remember_urb(__x)
+#define oz_forget_urb(__x) 0
+#endif /* WANT_URB_PARANOIA */
+
+
+#endif /* _OZURBPARANOIA_H */
+
diff --git a/drivers/staging/ozwpan/ozusbif.h b/drivers/staging/ozwpan/ozusbif.h
new file mode 100644
index 000000000000..3acf5980d7cc
--- /dev/null
+++ b/drivers/staging/ozwpan/ozusbif.h
@@ -0,0 +1,43 @@
+/* -----------------------------------------------------------------------------
+ * Copyright (c) 2011 Ozmo Inc
+ * Released under the GNU General Public License Version 2 (GPLv2).
+ * -----------------------------------------------------------------------------
+ */
+#ifndef _OZUSBIF_H
+#define _OZUSBIF_H
+
+#include <linux/usb.h>
+
+/* Reference counting functions.
+ */
+void oz_usb_get(void *hpd);
+void oz_usb_put(void *hpd);
+
+/* Stream functions.
+ */
+int oz_usb_stream_create(void *hpd, u8 ep_num);
+int oz_usb_stream_delete(void *hpd, u8 ep_num);
+
+/* Request functions.
+ */
+int oz_usb_control_req(void *hpd, u8 req_id, struct usb_ctrlrequest *setup,
+ u8 *data, int data_len);
+int oz_usb_get_desc_req(void *hpd, u8 req_id, u8 req_type, u8 desc_type,
+ u8 index, u16 windex, int offset, int len);
+int oz_usb_send_isoc(void *hpd, u8 ep_num, struct urb *urb);
+void oz_usb_request_heartbeat(void *hpd);
+
+/* Confirmation functions.
+ */
+void oz_hcd_get_desc_cnf(void *hport, u8 req_id, int status,
+ u8 *desc, int length, int offset, int total_size);
+void oz_hcd_control_cnf(void *hport, u8 req_id, u8 rcode,
+ u8 *data, int data_len);
+
+/* Indication functions.
+ */
+void oz_hcd_data_ind(void *hport, u8 endpoint, u8 *data, int data_len);
+
+int oz_hcd_heartbeat(void *hport);
+
+#endif /* _OZUSBIF_H */
diff --git a/drivers/staging/ozwpan/ozusbsvc.c b/drivers/staging/ozwpan/ozusbsvc.c
new file mode 100644
index 000000000000..9e74f9602384
--- /dev/null
+++ b/drivers/staging/ozwpan/ozusbsvc.c
@@ -0,0 +1,245 @@
+/* -----------------------------------------------------------------------------
+ * Copyright (c) 2011 Ozmo Inc
+ * Released under the GNU General Public License Version 2 (GPLv2).
+ *
+ * This file provides protocol independent part of the implementation of the USB
+ * service for a PD.
+ * The implementation of this service is split into two parts the first of which
+ * is protocol independent and the second contains protocol specific details.
+ * This split is to allow alternative protocols to be defined.
+ * The implemenation of this service uses ozhcd.c to implement a USB HCD.
+ * -----------------------------------------------------------------------------
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/timer.h>
+#include <linux/sched.h>
+#include <linux/netdevice.h>
+#include <linux/errno.h>
+#include <linux/input.h>
+#include <asm/unaligned.h>
+#include "ozconfig.h"
+#include "ozprotocol.h"
+#include "ozeltbuf.h"
+#include "ozpd.h"
+#include "ozproto.h"
+#include "ozusbif.h"
+#include "ozhcd.h"
+#include "oztrace.h"
+#include "ozusbsvc.h"
+#include "ozevent.h"
+/*------------------------------------------------------------------------------
+ * This is called once when the driver is loaded to initialise the USB service.
+ * Context: process
+ */
+int oz_usb_init(void)
+{
+ oz_event_log(OZ_EVT_SERVICE, 1, OZ_APPID_USB, 0, 0);
+ return oz_hcd_init();
+}
+/*------------------------------------------------------------------------------
+ * This is called once when the driver is unloaded to terminate the USB service.
+ * Context: process
+ */
+void oz_usb_term(void)
+{
+ oz_event_log(OZ_EVT_SERVICE, 2, OZ_APPID_USB, 0, 0);
+ oz_hcd_term();
+}
+/*------------------------------------------------------------------------------
+ * This is called when the USB service is started or resumed for a PD.
+ * Context: softirq
+ */
+int oz_usb_start(struct oz_pd *pd, int resume)
+{
+ int rc = 0;
+ struct oz_usb_ctx *usb_ctx;
+ struct oz_usb_ctx *old_ctx = 0;
+ oz_event_log(OZ_EVT_SERVICE, 3, OZ_APPID_USB, 0, resume);
+ if (resume) {
+ oz_trace("USB service resumed.\n");
+ return 0;
+ }
+ oz_trace("USB service started.\n");
+ /* Create a USB context in case we need one. If we find the PD already
+ * has a USB context then we will destroy it.
+ */
+ usb_ctx = kzalloc(sizeof(struct oz_usb_ctx), GFP_ATOMIC);
+ if (usb_ctx == 0)
+ return -ENOMEM;
+ atomic_set(&usb_ctx->ref_count, 1);
+ usb_ctx->pd = pd;
+ usb_ctx->stopped = 0;
+ /* Install the USB context if the PD doesn't already have one.
+ * If it does already have one then destroy the one we have just
+ * created.
+ */
+ spin_lock_bh(&pd->app_lock[OZ_APPID_USB-1]);
+ old_ctx = pd->app_ctx[OZ_APPID_USB-1];
+ if (old_ctx == 0)
+ pd->app_ctx[OZ_APPID_USB-1] = usb_ctx;
+ oz_usb_get(pd->app_ctx[OZ_APPID_USB-1]);
+ spin_unlock_bh(&pd->app_lock[OZ_APPID_USB-1]);
+ if (old_ctx) {
+ oz_trace("Already have USB context.\n");
+ kfree(usb_ctx);
+ usb_ctx = old_ctx;
+ } else if (usb_ctx) {
+ /* Take a reference to the PD. This will be released when
+ * the USB context is destroyed.
+ */
+ oz_pd_get(pd);
+ }
+ /* If we already had a USB context and had obtained a port from
+ * the USB HCD then just reset the port. If we didn't have a port
+ * then report the arrival to the USB HCD so we get one.
+ */
+ if (usb_ctx->hport) {
+ oz_hcd_pd_reset(usb_ctx, usb_ctx->hport);
+ } else {
+ usb_ctx->hport = oz_hcd_pd_arrived(usb_ctx);
+ if (usb_ctx->hport == 0) {
+ oz_trace("USB hub returned null port.\n");
+ spin_lock_bh(&pd->app_lock[OZ_APPID_USB-1]);
+ pd->app_ctx[OZ_APPID_USB-1] = 0;
+ spin_unlock_bh(&pd->app_lock[OZ_APPID_USB-1]);
+ oz_usb_put(usb_ctx);
+ rc = -1;
+ }
+ }
+ oz_usb_put(usb_ctx);
+ return rc;
+}
+/*------------------------------------------------------------------------------
+ * This is called when the USB service is stopped or paused for a PD.
+ * Context: softirq or process
+ */
+void oz_usb_stop(struct oz_pd *pd, int pause)
+{
+ struct oz_usb_ctx *usb_ctx;
+ oz_event_log(OZ_EVT_SERVICE, 4, OZ_APPID_USB, 0, pause);
+ if (pause) {
+ oz_trace("USB service paused.\n");
+ return;
+ }
+ spin_lock_bh(&pd->app_lock[OZ_APPID_USB-1]);
+ usb_ctx = (struct oz_usb_ctx *)pd->app_ctx[OZ_APPID_USB-1];
+ pd->app_ctx[OZ_APPID_USB-1] = 0;
+ spin_unlock_bh(&pd->app_lock[OZ_APPID_USB-1]);
+ if (usb_ctx) {
+ unsigned long tout = jiffies + HZ;
+ oz_trace("USB service stopping...\n");
+ usb_ctx->stopped = 1;
+ /* At this point the reference count on the usb context should
+ * be 2 - one from when we created it and one from the hcd
+ * which claims a reference. Since stopped = 1 no one else
+ * should get in but someone may already be in. So wait
+ * until they leave but timeout after 1 second.
+ */
+ while ((atomic_read(&usb_ctx->ref_count) > 2) &&
+ time_before(jiffies, tout))
+ ;
+ oz_trace("USB service stopped.\n");
+ oz_hcd_pd_departed(usb_ctx->hport);
+ /* Release the reference taken in oz_usb_start.
+ */
+ oz_usb_put(usb_ctx);
+ }
+}
+/*------------------------------------------------------------------------------
+ * This increments the reference count of the context area for a specific PD.
+ * This ensures this context area does not disappear while still in use.
+ * Context: softirq
+ */
+void oz_usb_get(void *hpd)
+{
+ struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
+ atomic_inc(&usb_ctx->ref_count);
+}
+/*------------------------------------------------------------------------------
+ * This decrements the reference count of the context area for a specific PD
+ * and destroys the context area if the reference count becomes zero.
+ * Context: softirq or process
+ */
+void oz_usb_put(void *hpd)
+{
+ struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
+ if (atomic_dec_and_test(&usb_ctx->ref_count)) {
+ oz_trace("Dealloc USB context.\n");
+ oz_pd_put(usb_ctx->pd);
+ kfree(usb_ctx);
+ }
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+int oz_usb_heartbeat(struct oz_pd *pd)
+{
+ struct oz_usb_ctx *usb_ctx;
+ int rc = 0;
+ spin_lock_bh(&pd->app_lock[OZ_APPID_USB-1]);
+ usb_ctx = (struct oz_usb_ctx *)pd->app_ctx[OZ_APPID_USB-1];
+ if (usb_ctx)
+ oz_usb_get(usb_ctx);
+ spin_unlock_bh(&pd->app_lock[OZ_APPID_USB-1]);
+ if (usb_ctx == 0)
+ return rc;
+ if (usb_ctx->stopped)
+ goto done;
+ if (usb_ctx->hport)
+ if (oz_hcd_heartbeat(usb_ctx->hport))
+ rc = 1;
+done:
+ oz_usb_put(usb_ctx);
+ return rc;
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+int oz_usb_stream_create(void *hpd, u8 ep_num)
+{
+ struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
+ struct oz_pd *pd = usb_ctx->pd;
+ oz_trace("oz_usb_stream_create(0x%x)\n", ep_num);
+ if (pd->mode & OZ_F_ISOC_NO_ELTS) {
+ oz_isoc_stream_create(pd, ep_num);
+ } else {
+ oz_pd_get(pd);
+ if (oz_elt_stream_create(&pd->elt_buff, ep_num,
+ 4*pd->max_tx_size)) {
+ oz_pd_put(pd);
+ return -1;
+ }
+ }
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+int oz_usb_stream_delete(void *hpd, u8 ep_num)
+{
+ struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
+ if (usb_ctx) {
+ struct oz_pd *pd = usb_ctx->pd;
+ if (pd) {
+ oz_trace("oz_usb_stream_delete(0x%x)\n", ep_num);
+ if (pd->mode & OZ_F_ISOC_NO_ELTS) {
+ oz_isoc_stream_delete(pd, ep_num);
+ } else {
+ if (oz_elt_stream_delete(&pd->elt_buff, ep_num))
+ return -1;
+ oz_pd_put(pd);
+ }
+ }
+ }
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq or process
+ */
+void oz_usb_request_heartbeat(void *hpd)
+{
+ struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
+ if (usb_ctx && usb_ctx->pd)
+ oz_pd_request_heartbeat(usb_ctx->pd);
+}
diff --git a/drivers/staging/ozwpan/ozusbsvc.h b/drivers/staging/ozwpan/ozusbsvc.h
new file mode 100644
index 000000000000..58e05a59be31
--- /dev/null
+++ b/drivers/staging/ozwpan/ozusbsvc.h
@@ -0,0 +1,32 @@
+/* -----------------------------------------------------------------------------
+ * Copyright (c) 2011 Ozmo Inc
+ * Released under the GNU General Public License Version 2 (GPLv2).
+ * -----------------------------------------------------------------------------
+ */
+#ifndef _OZUSBSVC_H
+#define _OZUSBSVC_H
+
+/*------------------------------------------------------------------------------
+ * Per PD context info stored in application context area of PD.
+ * This object is reference counted to ensure it doesn't disappear while
+ * still in use.
+ */
+struct oz_usb_ctx {
+ atomic_t ref_count;
+ u8 tx_seq_num;
+ u8 rx_seq_num;
+ struct oz_pd *pd;
+ void *hport;
+ int stopped;
+};
+
+int oz_usb_init(void);
+void oz_usb_term(void);
+int oz_usb_start(struct oz_pd *pd, int resume);
+void oz_usb_stop(struct oz_pd *pd, int pause);
+void oz_usb_rx(struct oz_pd *pd, struct oz_elt *elt);
+int oz_usb_heartbeat(struct oz_pd *pd);
+void oz_usb_farewell(struct oz_pd *pd, u8 ep_num, u8 *data, u8 len);
+
+#endif /* _OZUSBSVC_H */
+
diff --git a/drivers/staging/ozwpan/ozusbsvc1.c b/drivers/staging/ozwpan/ozusbsvc1.c
new file mode 100644
index 000000000000..66bd576bb5e9
--- /dev/null
+++ b/drivers/staging/ozwpan/ozusbsvc1.c
@@ -0,0 +1,437 @@
+/* -----------------------------------------------------------------------------
+ * Copyright (c) 2011 Ozmo Inc
+ * Released under the GNU General Public License Version 2 (GPLv2).
+ *
+ * This file implements the protocol specific parts of the USB service for a PD.
+ * -----------------------------------------------------------------------------
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/timer.h>
+#include <linux/sched.h>
+#include <linux/netdevice.h>
+#include <linux/errno.h>
+#include <linux/input.h>
+#include <asm/unaligned.h>
+#include "ozconfig.h"
+#include "ozprotocol.h"
+#include "ozeltbuf.h"
+#include "ozpd.h"
+#include "ozproto.h"
+#include "ozusbif.h"
+#include "ozhcd.h"
+#include "oztrace.h"
+#include "ozusbsvc.h"
+#include "ozevent.h"
+/*------------------------------------------------------------------------------
+ */
+#define MAX_ISOC_FIXED_DATA (253-sizeof(struct oz_isoc_fixed))
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+static int oz_usb_submit_elt(struct oz_elt_buf *eb, struct oz_elt_info *ei,
+ struct oz_usb_ctx *usb_ctx, u8 strid, u8 isoc)
+{
+ int ret;
+ struct oz_elt *elt = (struct oz_elt *)ei->data;
+ struct oz_app_hdr *app_hdr = (struct oz_app_hdr *)(elt+1);
+ elt->type = OZ_ELT_APP_DATA;
+ ei->app_id = OZ_APPID_USB;
+ ei->length = elt->length + sizeof(struct oz_elt);
+ app_hdr->app_id = OZ_APPID_USB;
+ spin_lock_bh(&eb->lock);
+ if (isoc == 0) {
+ app_hdr->elt_seq_num = usb_ctx->tx_seq_num++;
+ if (usb_ctx->tx_seq_num == 0)
+ usb_ctx->tx_seq_num = 1;
+ }
+ ret = oz_queue_elt_info(eb, isoc, strid, ei);
+ if (ret)
+ oz_elt_info_free(eb, ei);
+ spin_unlock_bh(&eb->lock);
+ return ret;
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+int oz_usb_get_desc_req(void *hpd, u8 req_id, u8 req_type, u8 desc_type,
+ u8 index, u16 windex, int offset, int len)
+{
+ struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
+ struct oz_pd *pd = usb_ctx->pd;
+ struct oz_elt *elt;
+ struct oz_get_desc_req *body;
+ struct oz_elt_buf *eb = &pd->elt_buff;
+ struct oz_elt_info *ei = oz_elt_info_alloc(&pd->elt_buff);
+ oz_trace(" req_type = 0x%x\n", req_type);
+ oz_trace(" desc_type = 0x%x\n", desc_type);
+ oz_trace(" index = 0x%x\n", index);
+ oz_trace(" windex = 0x%x\n", windex);
+ oz_trace(" offset = 0x%x\n", offset);
+ oz_trace(" len = 0x%x\n", len);
+ if (len > 200)
+ len = 200;
+ if (ei == 0)
+ return -1;
+ elt = (struct oz_elt *)ei->data;
+ elt->length = sizeof(struct oz_get_desc_req);
+ body = (struct oz_get_desc_req *)(elt+1);
+ body->type = OZ_GET_DESC_REQ;
+ body->req_id = req_id;
+ put_unaligned(cpu_to_le16(offset), &body->offset);
+ put_unaligned(cpu_to_le16(len), &body->size);
+ body->req_type = req_type;
+ body->desc_type = desc_type;
+ body->w_index = windex;
+ body->index = index;
+ return oz_usb_submit_elt(eb, ei, usb_ctx, 0, 0);
+}
+/*------------------------------------------------------------------------------
+ * Context: tasklet
+ */
+static int oz_usb_set_config_req(void *hpd, u8 req_id, u8 index)
+{
+ struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
+ struct oz_pd *pd = usb_ctx->pd;
+ struct oz_elt *elt;
+ struct oz_elt_buf *eb = &pd->elt_buff;
+ struct oz_elt_info *ei = oz_elt_info_alloc(&pd->elt_buff);
+ struct oz_set_config_req *body;
+ if (ei == 0)
+ return -1;
+ elt = (struct oz_elt *)ei->data;
+ elt->length = sizeof(struct oz_set_config_req);
+ body = (struct oz_set_config_req *)(elt+1);
+ body->type = OZ_SET_CONFIG_REQ;
+ body->req_id = req_id;
+ body->index = index;
+ return oz_usb_submit_elt(eb, ei, usb_ctx, 0, 0);
+}
+/*------------------------------------------------------------------------------
+ * Context: tasklet
+ */
+static int oz_usb_set_interface_req(void *hpd, u8 req_id, u8 index, u8 alt)
+{
+ struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
+ struct oz_pd *pd = usb_ctx->pd;
+ struct oz_elt *elt;
+ struct oz_elt_buf *eb = &pd->elt_buff;
+ struct oz_elt_info *ei = oz_elt_info_alloc(&pd->elt_buff);
+ struct oz_set_interface_req *body;
+ if (ei == 0)
+ return -1;
+ elt = (struct oz_elt *)ei->data;
+ elt->length = sizeof(struct oz_set_interface_req);
+ body = (struct oz_set_interface_req *)(elt+1);
+ body->type = OZ_SET_INTERFACE_REQ;
+ body->req_id = req_id;
+ body->index = index;
+ body->alternative = alt;
+ return oz_usb_submit_elt(eb, ei, usb_ctx, 0, 0);
+}
+/*------------------------------------------------------------------------------
+ * Context: tasklet
+ */
+static int oz_usb_set_clear_feature_req(void *hpd, u8 req_id, u8 type,
+ u8 recipient, u8 index, __le16 feature)
+{
+ struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
+ struct oz_pd *pd = usb_ctx->pd;
+ struct oz_elt *elt;
+ struct oz_elt_buf *eb = &pd->elt_buff;
+ struct oz_elt_info *ei = oz_elt_info_alloc(&pd->elt_buff);
+ struct oz_feature_req *body;
+ if (ei == 0)
+ return -1;
+ elt = (struct oz_elt *)ei->data;
+ elt->length = sizeof(struct oz_feature_req);
+ body = (struct oz_feature_req *)(elt+1);
+ body->type = type;
+ body->req_id = req_id;
+ body->recipient = recipient;
+ body->index = index;
+ put_unaligned(feature, &body->feature);
+ return oz_usb_submit_elt(eb, ei, usb_ctx, 0, 0);
+}
+/*------------------------------------------------------------------------------
+ * Context: tasklet
+ */
+static int oz_usb_vendor_class_req(void *hpd, u8 req_id, u8 req_type,
+ u8 request, __le16 value, __le16 index, u8 *data, int data_len)
+{
+ struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
+ struct oz_pd *pd = usb_ctx->pd;
+ struct oz_elt *elt;
+ struct oz_elt_buf *eb = &pd->elt_buff;
+ struct oz_elt_info *ei = oz_elt_info_alloc(&pd->elt_buff);
+ struct oz_vendor_class_req *body;
+ if (ei == 0)
+ return -1;
+ elt = (struct oz_elt *)ei->data;
+ elt->length = sizeof(struct oz_vendor_class_req) - 1 + data_len;
+ body = (struct oz_vendor_class_req *)(elt+1);
+ body->type = OZ_VENDOR_CLASS_REQ;
+ body->req_id = req_id;
+ body->req_type = req_type;
+ body->request = request;
+ put_unaligned(value, &body->value);
+ put_unaligned(index, &body->index);
+ if (data_len)
+ memcpy(body->data, data, data_len);
+ return oz_usb_submit_elt(eb, ei, usb_ctx, 0, 0);
+}
+/*------------------------------------------------------------------------------
+ * Context: tasklet
+ */
+int oz_usb_control_req(void *hpd, u8 req_id, struct usb_ctrlrequest *setup,
+ u8 *data, int data_len)
+{
+ unsigned wvalue = le16_to_cpu(setup->wValue);
+ unsigned windex = le16_to_cpu(setup->wIndex);
+ unsigned wlength = le16_to_cpu(setup->wLength);
+ int rc = 0;
+ oz_event_log(OZ_EVT_CTRL_REQ, setup->bRequest, req_id,
+ (void *)(((unsigned long)(setup->wValue))<<16 |
+ ((unsigned long)setup->wIndex)),
+ setup->bRequestType);
+ if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
+ switch (setup->bRequest) {
+ case USB_REQ_GET_DESCRIPTOR:
+ rc = oz_usb_get_desc_req(hpd, req_id,
+ setup->bRequestType, (u8)(wvalue>>8),
+ (u8)wvalue, setup->wIndex, 0, wlength);
+ break;
+ case USB_REQ_SET_CONFIGURATION:
+ rc = oz_usb_set_config_req(hpd, req_id, (u8)wvalue);
+ break;
+ case USB_REQ_SET_INTERFACE: {
+ u8 if_num = (u8)windex;
+ u8 alt = (u8)wvalue;
+ rc = oz_usb_set_interface_req(hpd, req_id,
+ if_num, alt);
+ }
+ break;
+ case USB_REQ_SET_FEATURE:
+ rc = oz_usb_set_clear_feature_req(hpd, req_id,
+ OZ_SET_FEATURE_REQ,
+ setup->bRequestType & 0xf, (u8)windex,
+ setup->wValue);
+ break;
+ case USB_REQ_CLEAR_FEATURE:
+ rc = oz_usb_set_clear_feature_req(hpd, req_id,
+ OZ_CLEAR_FEATURE_REQ,
+ setup->bRequestType & 0xf,
+ (u8)windex, setup->wValue);
+ break;
+ }
+ } else {
+ rc = oz_usb_vendor_class_req(hpd, req_id, setup->bRequestType,
+ setup->bRequest, setup->wValue, setup->wIndex,
+ data, data_len);
+ }
+ return rc;
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+int oz_usb_send_isoc(void *hpd, u8 ep_num, struct urb *urb)
+{
+ struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
+ struct oz_pd *pd = usb_ctx->pd;
+ struct oz_elt_buf *eb;
+ int i;
+ int hdr_size;
+ u8 *data;
+ struct usb_iso_packet_descriptor *desc;
+
+ if (pd->mode & OZ_F_ISOC_NO_ELTS) {
+ for (i = 0; i < urb->number_of_packets; i++) {
+ u8 *data;
+ desc = &urb->iso_frame_desc[i];
+ data = ((u8 *)urb->transfer_buffer)+desc->offset;
+ oz_send_isoc_unit(pd, ep_num, data, desc->length);
+ }
+ return 0;
+ }
+
+ hdr_size = sizeof(struct oz_isoc_fixed) - 1;
+ eb = &pd->elt_buff;
+ i = 0;
+ while (i < urb->number_of_packets) {
+ struct oz_elt_info *ei = oz_elt_info_alloc(eb);
+ struct oz_elt *elt;
+ struct oz_isoc_fixed *body;
+ int unit_count;
+ int unit_size;
+ int rem;
+ if (ei == 0)
+ return -1;
+ rem = MAX_ISOC_FIXED_DATA;
+ elt = (struct oz_elt *)ei->data;
+ body = (struct oz_isoc_fixed *)(elt + 1);
+ body->type = OZ_USB_ENDPOINT_DATA;
+ body->endpoint = ep_num;
+ body->format = OZ_DATA_F_ISOC_FIXED;
+ unit_size = urb->iso_frame_desc[i].length;
+ body->unit_size = (u8)unit_size;
+ data = ((u8 *)(elt+1)) + hdr_size;
+ unit_count = 0;
+ while (i < urb->number_of_packets) {
+ desc = &urb->iso_frame_desc[i];
+ if ((unit_size == desc->length) &&
+ (desc->length <= rem)) {
+ memcpy(data, ((u8 *)urb->transfer_buffer) +
+ desc->offset, unit_size);
+ data += unit_size;
+ rem -= unit_size;
+ unit_count++;
+ desc->status = 0;
+ desc->actual_length = desc->length;
+ i++;
+ } else {
+ break;
+ }
+ }
+ elt->length = hdr_size + MAX_ISOC_FIXED_DATA - rem;
+ /* Store the number of units in body->frame_number for the
+ * moment. This field will be correctly determined before
+ * the element is sent. */
+ body->frame_number = (u8)unit_count;
+ oz_usb_submit_elt(eb, ei, usb_ctx, ep_num,
+ pd->mode & OZ_F_ISOC_ANYTIME);
+ }
+ return 0;
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq-serialized
+ */
+void oz_usb_handle_ep_data(struct oz_usb_ctx *usb_ctx,
+ struct oz_usb_hdr *usb_hdr, int len)
+{
+ struct oz_data *data_hdr = (struct oz_data *)usb_hdr;
+ switch (data_hdr->format) {
+ case OZ_DATA_F_MULTIPLE_FIXED: {
+ struct oz_multiple_fixed *body =
+ (struct oz_multiple_fixed *)data_hdr;
+ u8 *data = body->data;
+ int n = (len - sizeof(struct oz_multiple_fixed)+1)
+ / body->unit_size;
+ while (n--) {
+ oz_hcd_data_ind(usb_ctx->hport, body->endpoint,
+ data, body->unit_size);
+ data += body->unit_size;
+ }
+ }
+ break;
+ case OZ_DATA_F_ISOC_FIXED: {
+ struct oz_isoc_fixed *body =
+ (struct oz_isoc_fixed *)data_hdr;
+ int data_len = len-sizeof(struct oz_isoc_fixed)+1;
+ int unit_size = body->unit_size;
+ u8 *data = body->data;
+ int count;
+ int i;
+ if (!unit_size)
+ break;
+ count = data_len/unit_size;
+ for (i = 0; i < count; i++) {
+ oz_hcd_data_ind(usb_ctx->hport,
+ body->endpoint, data, unit_size);
+ data += unit_size;
+ }
+ }
+ break;
+ }
+
+}
+/*------------------------------------------------------------------------------
+ * This is called when the PD has received a USB element. The type of element
+ * is determined and is then passed to an appropriate handler function.
+ * Context: softirq-serialized
+ */
+void oz_usb_rx(struct oz_pd *pd, struct oz_elt *elt)
+{
+ struct oz_usb_hdr *usb_hdr = (struct oz_usb_hdr *)(elt + 1);
+ struct oz_usb_ctx *usb_ctx;
+
+ spin_lock_bh(&pd->app_lock[OZ_APPID_USB-1]);
+ usb_ctx = (struct oz_usb_ctx *)pd->app_ctx[OZ_APPID_USB-1];
+ if (usb_ctx)
+ oz_usb_get(usb_ctx);
+ spin_unlock_bh(&pd->app_lock[OZ_APPID_USB-1]);
+ if (usb_ctx == 0)
+ return; /* Context has gone so nothing to do. */
+ if (usb_ctx->stopped)
+ goto done;
+ /* If sequence number is non-zero then check it is not a duplicate.
+ * Zero sequence numbers are always accepted.
+ */
+ if (usb_hdr->elt_seq_num != 0) {
+ if (((usb_ctx->rx_seq_num - usb_hdr->elt_seq_num) & 0x80) == 0)
+ /* Reject duplicate element. */
+ goto done;
+ }
+ usb_ctx->rx_seq_num = usb_hdr->elt_seq_num;
+ switch (usb_hdr->type) {
+ case OZ_GET_DESC_RSP: {
+ struct oz_get_desc_rsp *body =
+ (struct oz_get_desc_rsp *)usb_hdr;
+ int data_len = elt->length -
+ sizeof(struct oz_get_desc_rsp) + 1;
+ u16 offs = le16_to_cpu(get_unaligned(&body->offset));
+ u16 total_size =
+ le16_to_cpu(get_unaligned(&body->total_size));
+ oz_trace("USB_REQ_GET_DESCRIPTOR - cnf\n");
+ oz_hcd_get_desc_cnf(usb_ctx->hport, body->req_id,
+ body->rcode, body->data,
+ data_len, offs, total_size);
+ }
+ break;
+ case OZ_SET_CONFIG_RSP: {
+ struct oz_set_config_rsp *body =
+ (struct oz_set_config_rsp *)usb_hdr;
+ oz_hcd_control_cnf(usb_ctx->hport, body->req_id,
+ body->rcode, 0, 0);
+ }
+ break;
+ case OZ_SET_INTERFACE_RSP: {
+ struct oz_set_interface_rsp *body =
+ (struct oz_set_interface_rsp *)usb_hdr;
+ oz_hcd_control_cnf(usb_ctx->hport,
+ body->req_id, body->rcode, 0, 0);
+ }
+ break;
+ case OZ_VENDOR_CLASS_RSP: {
+ struct oz_vendor_class_rsp *body =
+ (struct oz_vendor_class_rsp *)usb_hdr;
+ oz_hcd_control_cnf(usb_ctx->hport, body->req_id,
+ body->rcode, body->data, elt->length-
+ sizeof(struct oz_vendor_class_rsp)+1);
+ }
+ break;
+ case OZ_USB_ENDPOINT_DATA:
+ oz_usb_handle_ep_data(usb_ctx, usb_hdr, elt->length);
+ break;
+ }
+done:
+ oz_usb_put(usb_ctx);
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq, process
+ */
+void oz_usb_farewell(struct oz_pd *pd, u8 ep_num, u8 *data, u8 len)
+{
+ struct oz_usb_ctx *usb_ctx;
+ spin_lock_bh(&pd->app_lock[OZ_APPID_USB-1]);
+ usb_ctx = (struct oz_usb_ctx *)pd->app_ctx[OZ_APPID_USB-1];
+ if (usb_ctx)
+ oz_usb_get(usb_ctx);
+ spin_unlock_bh(&pd->app_lock[OZ_APPID_USB-1]);
+ if (usb_ctx == 0)
+ return; /* Context has gone so nothing to do. */
+ if (!usb_ctx->stopped) {
+ oz_trace("Farewell indicated ep = 0x%x\n", ep_num);
+ oz_hcd_data_ind(usb_ctx->hport, ep_num, data, len);
+ }
+ oz_usb_put(usb_ctx);
+}
diff --git a/drivers/staging/quatech_usb2/quatech_usb2.c b/drivers/staging/quatech_usb2/quatech_usb2.c
index 897a3a99c794..bb977e00cc86 100644
--- a/drivers/staging/quatech_usb2/quatech_usb2.c
+++ b/drivers/staging/quatech_usb2/quatech_usb2.c
@@ -135,7 +135,6 @@ static struct usb_driver quausb2_usb_driver = {
.probe = usb_serial_probe,
.disconnect = usb_serial_disconnect,
.id_table = quausb2_id_table,
- .no_dynamic_id = 1,
};
/**
@@ -1942,7 +1941,6 @@ static struct usb_serial_driver quatech2_device = {
.name = "quatech_usb2",
},
.description = DRIVER_DESC,
- .usb_driver = &quausb2_usb_driver,
.id_table = quausb2_id_table,
.num_ports = 8,
.open = qt2_open,
@@ -1964,41 +1962,11 @@ static struct usb_serial_driver quatech2_device = {
.write_bulk_callback = qt2_write_bulk_callback,
};
-static int __init quausb2_usb_init(void)
-{
- int retval;
-
- dbg("%s\n", __func__);
-
- /* register with usb-serial */
- retval = usb_serial_register(&quatech2_device);
-
- if (retval)
- goto failed_usb_serial_register;
-
- printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
- DRIVER_DESC "\n");
-
- /* register with usb */
-
- retval = usb_register(&quausb2_usb_driver);
- if (retval == 0)
- return 0;
-
- /* if we're here, usb_register() failed */
- usb_serial_deregister(&quatech2_device);
-failed_usb_serial_register:
- return retval;
-}
-
-static void __exit quausb2_usb_exit(void)
-{
- usb_deregister(&quausb2_usb_driver);
- usb_serial_deregister(&quatech2_device);
-}
+static struct usb_serial_driver * const serial_drivers[] = {
+ &quatech2_device, NULL
+};
-module_init(quausb2_usb_init);
-module_exit(quausb2_usb_exit);
+module_usb_serial_driver(quausb2_usb_driver, serial_drivers);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/staging/quickstart/quickstart.c b/drivers/staging/quickstart/quickstart.c
index c60911c6ab3f..cac320738142 100644
--- a/drivers/staging/quickstart/quickstart.c
+++ b/drivers/staging/quickstart/quickstart.c
@@ -4,7 +4,7 @@
*
* Copyright (C) 2007-2010 Angelo Arrifano <miknix@gmail.com>
*
- * Information gathered from disassebled dsdt and from here:
+ * Information gathered from disassembled dsdt and from here:
* <http://www.microsoft.com/whdc/system/platform/firmware/DirAppLaunch.mspx>
*
* This program is free software; you can redistribute it and/or modify
@@ -23,7 +23,9 @@
*
*/
-#define QUICKSTART_VERSION "1.03"
+#define QUICKSTART_VERSION "1.04"
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
@@ -37,118 +39,73 @@ MODULE_AUTHOR("Angelo Arrifano");
MODULE_DESCRIPTION("ACPI Direct App Launch driver");
MODULE_LICENSE("GPL");
-#define QUICKSTART_ACPI_DEVICE_NAME "quickstart"
-#define QUICKSTART_ACPI_CLASS "quickstart"
-#define QUICKSTART_ACPI_HID "PNP0C32"
-
-#define QUICKSTART_PF_DRIVER_NAME "quickstart"
-#define QUICKSTART_PF_DEVICE_NAME "quickstart"
-#define QUICKSTART_PF_DEVATTR_NAME "pressed_button"
+#define QUICKSTART_ACPI_DEVICE_NAME "quickstart"
+#define QUICKSTART_ACPI_CLASS "quickstart"
+#define QUICKSTART_ACPI_HID "PNP0C32"
-#define QUICKSTART_MAX_BTN_NAME_LEN 16
+#define QUICKSTART_PF_DRIVER_NAME "quickstart"
+#define QUICKSTART_PF_DEVICE_NAME "quickstart"
-/* There will be two events:
- * 0x02 - A hot button was pressed while device was off/sleeping.
- * 0x80 - A hot button was pressed while device was up. */
-#define QUICKSTART_EVENT_WAKE 0x02
-#define QUICKSTART_EVENT_RUNTIME 0x80
+/*
+ * There will be two events:
+ * 0x02 - A hot button was pressed while device was off/sleeping.
+ * 0x80 - A hot button was pressed while device was up.
+ */
+#define QUICKSTART_EVENT_WAKE 0x02
+#define QUICKSTART_EVENT_RUNTIME 0x80
-struct quickstart_btn {
+struct quickstart_button {
char *name;
unsigned int id;
- struct quickstart_btn *next;
+ struct list_head list;
};
-static struct quickstart_driver_data {
- struct quickstart_btn *btn_lst;
- struct quickstart_btn *pressed;
-} quickstart_data;
-
-/* ACPI driver Structs */
struct quickstart_acpi {
struct acpi_device *device;
- struct quickstart_btn *btn;
-};
-static int quickstart_acpi_add(struct acpi_device *device);
-static int quickstart_acpi_remove(struct acpi_device *device, int type);
-static const struct acpi_device_id quickstart_device_ids[] = {
- {QUICKSTART_ACPI_HID, 0},
- {"", 0},
+ struct quickstart_button *button;
};
-static struct acpi_driver quickstart_acpi_driver = {
- .name = "quickstart",
- .class = QUICKSTART_ACPI_CLASS,
- .ids = quickstart_device_ids,
- .ops = {
- .add = quickstart_acpi_add,
- .remove = quickstart_acpi_remove,
- },
-};
+static LIST_HEAD(buttons);
+static struct quickstart_button *pressed;
-/* Input device structs */
-struct input_dev *quickstart_input;
+static struct input_dev *quickstart_input;
-/* Platform driver structs */
-static ssize_t buttons_show(struct device *dev,
- struct device_attribute *attr,
- char *buf);
-static ssize_t pressed_button_show(struct device *dev,
+/* Platform driver functions */
+static ssize_t quickstart_buttons_show(struct device *dev,
struct device_attribute *attr,
- char *buf);
-static ssize_t pressed_button_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count);
-static DEVICE_ATTR(pressed_button, 0666, pressed_button_show,
- pressed_button_store);
-static DEVICE_ATTR(buttons, 0444, buttons_show, NULL);
-static struct platform_device *pf_device;
-static struct platform_driver pf_driver = {
- .driver = {
- .name = QUICKSTART_PF_DRIVER_NAME,
- .owner = THIS_MODULE,
- }
-};
-
-/*
- * Platform driver functions
- */
-static ssize_t buttons_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ char *buf)
{
int count = 0;
- struct quickstart_btn *ptr = quickstart_data.btn_lst;
+ struct quickstart_button *b;
- if (!ptr)
+ if (list_empty(&buttons))
return snprintf(buf, PAGE_SIZE, "none");
- while (ptr && (count < PAGE_SIZE)) {
- if (ptr->name) {
- count += snprintf(buf + count,
- PAGE_SIZE - count,
- "%d\t%s\n", ptr->id, ptr->name);
+ list_for_each_entry(b, &buttons, list) {
+ count += snprintf(buf + count, PAGE_SIZE - count, "%u\t%s\n",
+ b->id, b->name);
+
+ if (count >= PAGE_SIZE) {
+ count = PAGE_SIZE;
+ break;
}
- ptr = ptr->next;
}
return count;
}
-static ssize_t pressed_button_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t quickstart_pressed_button_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%s\n",
- (quickstart_data.pressed ?
- quickstart_data.pressed->name : "none"));
+ return scnprintf(buf, PAGE_SIZE, "%s\n",
+ (pressed ? pressed->name : "none"));
}
-static ssize_t pressed_button_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t quickstart_pressed_button_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
if (count < 2)
return -EINVAL;
@@ -156,60 +113,40 @@ static ssize_t pressed_button_store(struct device *dev,
if (strncasecmp(buf, "none", 4) != 0)
return -EINVAL;
- quickstart_data.pressed = NULL;
+ pressed = NULL;
return count;
}
-/* Hotstart Helper functions */
-static int quickstart_btnlst_add(struct quickstart_btn **data)
+/* Helper functions */
+static struct quickstart_button *quickstart_buttons_add(void)
{
- struct quickstart_btn **ptr = &quickstart_data.btn_lst;
+ struct quickstart_button *b;
- while (*ptr)
- ptr = &((*ptr)->next);
+ b = kzalloc(sizeof(*b), GFP_KERNEL);
+ if (!b)
+ return NULL;
- *ptr = kzalloc(sizeof(struct quickstart_btn), GFP_KERNEL);
- if (!*ptr) {
- *data = NULL;
- return -ENOMEM;
- }
- *data = *ptr;
+ list_add_tail(&b->list, &buttons);
- return 0;
+ return b;
}
-static void quickstart_btnlst_del(struct quickstart_btn *data)
+static void quickstart_button_del(struct quickstart_button *data)
{
- struct quickstart_btn **ptr = &quickstart_data.btn_lst;
-
if (!data)
return;
- while (*ptr) {
- if (*ptr == data) {
- *ptr = (*ptr)->next;
- kfree(data);
- return;
- }
- ptr = &((*ptr)->next);
- }
-
- return;
+ list_del(&data->list);
+ kfree(data->name);
+ kfree(data);
}
-static void quickstart_btnlst_free(void)
+static void quickstart_buttons_free(void)
{
- struct quickstart_btn *ptr = quickstart_data.btn_lst;
- struct quickstart_btn *lptr = NULL;
-
- while (ptr) {
- lptr = ptr;
- ptr = ptr->next;
- kfree(lptr->name);
- kfree(lptr);
- }
+ struct quickstart_button *b, *n;
- return;
+ list_for_each_entry_safe(b, n, &buttons, list)
+ quickstart_button_del(b);
}
/* ACPI Driver functions */
@@ -220,107 +157,137 @@ static void quickstart_acpi_notify(acpi_handle handle, u32 event, void *data)
if (!quickstart)
return;
- if (event == QUICKSTART_EVENT_WAKE)
- quickstart_data.pressed = quickstart->btn;
- else if (event == QUICKSTART_EVENT_RUNTIME) {
- input_report_key(quickstart_input, quickstart->btn->id, 1);
+ switch (event) {
+ case QUICKSTART_EVENT_WAKE:
+ pressed = quickstart->button;
+ break;
+ case QUICKSTART_EVENT_RUNTIME:
+ input_report_key(quickstart_input, quickstart->button->id, 1);
input_sync(quickstart_input);
- input_report_key(quickstart_input, quickstart->btn->id, 0);
+ input_report_key(quickstart_input, quickstart->button->id, 0);
input_sync(quickstart_input);
+ break;
+ default:
+ pr_err("Unexpected ACPI event notify (%u)\n", event);
+ break;
}
- return;
}
-static void quickstart_acpi_ghid(struct quickstart_acpi *quickstart)
+static int quickstart_acpi_ghid(struct quickstart_acpi *quickstart)
{
acpi_status status;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
- uint32_t usageid = 0;
-
- if (!quickstart)
- return;
+ int ret = 0;
- /* This returns a buffer telling the button usage ID,
- * and triggers pending notify events (The ones before booting). */
- status = acpi_evaluate_object(quickstart->device->handle,
- "GHID", NULL, &buffer);
- if (ACPI_FAILURE(status) || !buffer.pointer) {
- printk(KERN_ERR "quickstart: %s GHID method failed.\n",
- quickstart->btn->name);
- return;
+ /*
+ * This returns a buffer telling the button usage ID,
+ * and triggers pending notify events (The ones before booting).
+ */
+ status = acpi_evaluate_object(quickstart->device->handle, "GHID", NULL,
+ &buffer);
+ if (ACPI_FAILURE(status)) {
+ pr_err("%s GHID method failed\n", quickstart->button->name);
+ return -EINVAL;
}
- if (buffer.length < 8)
- return;
-
- /* <<The GHID method can return a BYTE, WORD, or DWORD.
+ /*
+ * <<The GHID method can return a BYTE, WORD, or DWORD.
* The value must be encoded in little-endian byte
- * order (least significant byte first).>> */
- usageid = *((uint32_t *)(buffer.pointer + (buffer.length - 8)));
- quickstart->btn->id = usageid;
+ * order (least significant byte first).>>
+ */
+ switch (buffer.length) {
+ case 1:
+ quickstart->button->id = *(uint8_t *)buffer.pointer;
+ break;
+ case 2:
+ quickstart->button->id = *(uint16_t *)buffer.pointer;
+ break;
+ case 4:
+ quickstart->button->id = *(uint32_t *)buffer.pointer;
+ break;
+ case 8:
+ quickstart->button->id = *(uint64_t *)buffer.pointer;
+ break;
+ default:
+ pr_err("%s GHID method returned buffer of unexpected length %lu\n",
+ quickstart->button->name,
+ (unsigned long)buffer.length);
+ ret = -EINVAL;
+ break;
+ }
kfree(buffer.pointer);
+
+ return ret;
}
-static int quickstart_acpi_config(struct quickstart_acpi *quickstart, char *bid)
+static int quickstart_acpi_config(struct quickstart_acpi *quickstart)
{
- int len = strlen(bid);
- int ret;
+ char *bid = acpi_device_bid(quickstart->device);
+ char *name;
- /* Add button to list */
- ret = quickstart_btnlst_add(&quickstart->btn);
- if (ret)
- return ret;
+ name = kmalloc(strlen(bid) + 1, GFP_KERNEL);
+ if (!name)
+ return -ENOMEM;
- quickstart->btn->name = kzalloc(len + 1, GFP_KERNEL);
- if (!quickstart->btn->name) {
- quickstart_btnlst_free();
+ /* Add new button to list */
+ quickstart->button = quickstart_buttons_add();
+ if (!quickstart->button) {
+ kfree(name);
return -ENOMEM;
}
- strcpy(quickstart->btn->name, bid);
+
+ quickstart->button->name = name;
+ strcpy(quickstart->button->name, bid);
return 0;
}
static int quickstart_acpi_add(struct acpi_device *device)
{
- int ret = 0;
- acpi_status status = AE_OK;
- struct quickstart_acpi *quickstart = NULL;
+ int ret;
+ acpi_status status;
+ struct quickstart_acpi *quickstart;
if (!device)
return -EINVAL;
- quickstart = kzalloc(sizeof(struct quickstart_acpi), GFP_KERNEL);
+ quickstart = kzalloc(sizeof(*quickstart), GFP_KERNEL);
if (!quickstart)
return -ENOMEM;
quickstart->device = device;
+
strcpy(acpi_device_name(device), QUICKSTART_ACPI_DEVICE_NAME);
strcpy(acpi_device_class(device), QUICKSTART_ACPI_CLASS);
device->driver_data = quickstart;
/* Add button to list and initialize some stuff */
- ret = quickstart_acpi_config(quickstart, acpi_device_bid(device));
- if (ret)
+ ret = quickstart_acpi_config(quickstart);
+ if (ret < 0)
goto fail_config;
- status = acpi_install_notify_handler(device->handle,
- ACPI_ALL_NOTIFY,
+ status = acpi_install_notify_handler(device->handle, ACPI_ALL_NOTIFY,
quickstart_acpi_notify,
quickstart);
if (ACPI_FAILURE(status)) {
- printk(KERN_ERR "quickstart: Notify handler install error\n");
+ pr_err("Notify handler install error\n");
ret = -ENODEV;
goto fail_installnotify;
}
- quickstart_acpi_ghid(quickstart);
+ ret = quickstart_acpi_ghid(quickstart);
+ if (ret < 0)
+ goto fail_ghid;
return 0;
+fail_ghid:
+ acpi_remove_notify_handler(device->handle, ACPI_ALL_NOTIFY,
+ quickstart_acpi_notify);
+
fail_installnotify:
- quickstart_btnlst_del(quickstart->btn);
+ quickstart_button_del(quickstart->button);
fail_config:
@@ -331,28 +298,54 @@ fail_config:
static int quickstart_acpi_remove(struct acpi_device *device, int type)
{
- acpi_status status = 0;
- struct quickstart_acpi *quickstart = NULL;
+ acpi_status status;
+ struct quickstart_acpi *quickstart;
- if (!device || !acpi_driver_data(device))
+ if (!device)
return -EINVAL;
quickstart = acpi_driver_data(device);
+ if (!quickstart)
+ return -EINVAL;
- status = acpi_remove_notify_handler(device->handle,
- ACPI_ALL_NOTIFY,
- quickstart_acpi_notify);
+ status = acpi_remove_notify_handler(device->handle, ACPI_ALL_NOTIFY,
+ quickstart_acpi_notify);
if (ACPI_FAILURE(status))
- printk(KERN_ERR "quickstart: Error removing notify handler\n");
-
+ pr_err("Error removing notify handler\n");
kfree(quickstart);
return 0;
}
-/* Module functions */
+/* Platform driver structs */
+static DEVICE_ATTR(pressed_button, 0666, quickstart_pressed_button_show,
+ quickstart_pressed_button_store);
+static DEVICE_ATTR(buttons, 0444, quickstart_buttons_show, NULL);
+static struct platform_device *pf_device;
+static struct platform_driver pf_driver = {
+ .driver = {
+ .name = QUICKSTART_PF_DRIVER_NAME,
+ .owner = THIS_MODULE,
+ }
+};
+
+static const struct acpi_device_id quickstart_device_ids[] = {
+ {QUICKSTART_ACPI_HID, 0},
+ {"", 0},
+};
+
+static struct acpi_driver quickstart_acpi_driver = {
+ .name = "quickstart",
+ .class = QUICKSTART_ACPI_CLASS,
+ .ids = quickstart_device_ids,
+ .ops = {
+ .add = quickstart_acpi_add,
+ .remove = quickstart_acpi_remove,
+ },
+};
+/* Module functions */
static void quickstart_exit(void)
{
input_unregister_device(quickstart_input);
@@ -366,15 +359,12 @@ static void quickstart_exit(void)
acpi_bus_unregister_driver(&quickstart_acpi_driver);
- quickstart_btnlst_free();
-
- return;
+ quickstart_buttons_free();
}
static int __init quickstart_init_input(void)
{
- struct quickstart_btn **ptr = &quickstart_data.btn_lst;
- int count;
+ struct quickstart_button *b;
int ret;
quickstart_input = input_allocate_device();
@@ -385,11 +375,9 @@ static int __init quickstart_init_input(void)
quickstart_input->name = "Quickstart ACPI Buttons";
quickstart_input->id.bustype = BUS_HOST;
- while (*ptr) {
- count++;
+ list_for_each_entry(b, &buttons, list) {
set_bit(EV_KEY, quickstart_input->evbit);
- set_bit((*ptr)->id, quickstart_input->keybit);
- ptr = &((*ptr)->next);
+ set_bit(b->id, quickstart_input->keybit);
}
ret = input_register_device(quickstart_input);
@@ -415,7 +403,7 @@ static int __init quickstart_init(void)
return ret;
/* If existing bus with no devices */
- if (!quickstart_data.btn_lst) {
+ if (list_empty(&buttons)) {
ret = -ENODEV;
goto fail_pfdrv_reg;
}
@@ -444,14 +432,12 @@ static int __init quickstart_init(void)
if (ret)
goto fail_dev_file2;
-
/* Input device */
ret = quickstart_init_input();
if (ret)
goto fail_input;
- printk(KERN_INFO "quickstart: ACPI Direct App Launch ver %s\n",
- QUICKSTART_VERSION);
+ pr_info("ACPI Direct App Launch ver %s\n", QUICKSTART_VERSION);
return 0;
fail_input:
diff --git a/drivers/staging/ramster/Kconfig b/drivers/staging/ramster/Kconfig
new file mode 100644
index 000000000000..8b57b87edda4
--- /dev/null
+++ b/drivers/staging/ramster/Kconfig
@@ -0,0 +1,17 @@
+# Dependency on CONFIG_BROKEN is because there is a commit dependency
+# on a cleancache naming change to be submitted by Konrad Wilk
+# a39c00ded70339603ffe1b0ffdf3ade85bcf009a "Merge branch 'stable/cleancache.v13'
+# into linux-next. Once this commit is present, BROKEN can be removed
+config RAMSTER
+ bool "Cross-machine RAM capacity sharing, aka peer-to-peer tmem"
+ depends on (CLEANCACHE || FRONTSWAP) && CONFIGFS_FS=y && !ZCACHE && !XVMALLOC && !HIGHMEM && BROKEN
+ select LZO_COMPRESS
+ select LZO_DECOMPRESS
+ default n
+ help
+ RAMster allows RAM on other machines in a cluster to be utilized
+ dynamically and symmetrically instead of swapping to a local swap
+ disk, thus improving performance on memory-constrained workloads
+ while minimizing total RAM across the cluster. RAMster, like
+ zcache, compresses swap pages into local RAM, but then remotifies
+ the compressed pages to another node in the RAMster cluster.
diff --git a/drivers/staging/ramster/Makefile b/drivers/staging/ramster/Makefile
new file mode 100644
index 000000000000..bcc13c87f996
--- /dev/null
+++ b/drivers/staging/ramster/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_RAMSTER) += zcache-main.o tmem.o r2net.o xvmalloc.o cluster/
diff --git a/drivers/staging/ramster/TODO b/drivers/staging/ramster/TODO
new file mode 100644
index 000000000000..46fcf0c58acf
--- /dev/null
+++ b/drivers/staging/ramster/TODO
@@ -0,0 +1,13 @@
+For this staging driver, RAMster duplicates code from drivers/staging/zcache
+then incorporates changes to the local copy of the code. For V5, it also
+directly incorporates the soon-to-be-removed drivers/staging/zram/xvmalloc.[ch]
+as all testing has been done with xvmalloc rather than the new zsmalloc.
+Before RAMster can be promoted from staging, the zcache and RAMster drivers
+should be either merged or reorganized to separate out common code.
+
+Until V4, RAMster duplicated code from fs/ocfs2/cluster, but this made
+RAMster incompatible with ocfs2 running in the same kernel and included
+lots of code that could be removed. As of V5, the ocfs2 code has been
+mined and made RAMster-specific, made to communicate with a userland
+ramster-tools package rather than ocfs2-tools, and can co-exist with ocfs2
+both in the same kernel and in userland on the same machine.
diff --git a/drivers/staging/ramster/cluster/Makefile b/drivers/staging/ramster/cluster/Makefile
new file mode 100644
index 000000000000..9c6943652c01
--- /dev/null
+++ b/drivers/staging/ramster/cluster/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_RAMSTER) += ramster_nodemanager.o
+
+ramster_nodemanager-objs := heartbeat.o masklog.o nodemanager.o tcp.o
diff --git a/drivers/staging/ramster/cluster/heartbeat.c b/drivers/staging/ramster/cluster/heartbeat.c
new file mode 100644
index 000000000000..00209490756e
--- /dev/null
+++ b/drivers/staging/ramster/cluster/heartbeat.c
@@ -0,0 +1,464 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * Copyright (C) 2004, 2005, 2012 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/configfs.h>
+
+#include "heartbeat.h"
+#include "tcp.h"
+#include "nodemanager.h"
+
+#include "masklog.h"
+
+/*
+ * The first heartbeat pass had one global thread that would serialize all hb
+ * callback calls. This global serializing sem should only be removed once
+ * we've made sure that all callees can deal with being called concurrently
+ * from multiple hb region threads.
+ */
+static DECLARE_RWSEM(r2hb_callback_sem);
+
+/*
+ * multiple hb threads are watching multiple regions. A node is live
+ * whenever any of the threads sees activity from the node in its region.
+ */
+static DEFINE_SPINLOCK(r2hb_live_lock);
+static unsigned long r2hb_live_node_bitmap[BITS_TO_LONGS(R2NM_MAX_NODES)];
+
+static struct r2hb_callback {
+ struct list_head list;
+} r2hb_callbacks[R2HB_NUM_CB];
+
+enum r2hb_heartbeat_modes {
+ R2HB_HEARTBEAT_LOCAL = 0,
+ R2HB_HEARTBEAT_GLOBAL,
+ R2HB_HEARTBEAT_NUM_MODES,
+};
+
+char *r2hb_heartbeat_mode_desc[R2HB_HEARTBEAT_NUM_MODES] = {
+ "local", /* R2HB_HEARTBEAT_LOCAL */
+ "global", /* R2HB_HEARTBEAT_GLOBAL */
+};
+
+unsigned int r2hb_dead_threshold = R2HB_DEFAULT_DEAD_THRESHOLD;
+unsigned int r2hb_heartbeat_mode = R2HB_HEARTBEAT_LOCAL;
+
+/* Only sets a new threshold if there are no active regions.
+ *
+ * No locking or otherwise interesting code is required for reading
+ * r2hb_dead_threshold as it can't change once regions are active and
+ * it's not interesting to anyone until then anyway. */
+static void r2hb_dead_threshold_set(unsigned int threshold)
+{
+ if (threshold > R2HB_MIN_DEAD_THRESHOLD) {
+ spin_lock(&r2hb_live_lock);
+ r2hb_dead_threshold = threshold;
+ spin_unlock(&r2hb_live_lock);
+ }
+}
+
+static int r2hb_global_hearbeat_mode_set(unsigned int hb_mode)
+{
+ int ret = -1;
+
+ if (hb_mode < R2HB_HEARTBEAT_NUM_MODES) {
+ spin_lock(&r2hb_live_lock);
+ r2hb_heartbeat_mode = hb_mode;
+ ret = 0;
+ spin_unlock(&r2hb_live_lock);
+ }
+
+ return ret;
+}
+
+void r2hb_exit(void)
+{
+}
+
+int r2hb_init(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(r2hb_callbacks); i++)
+ INIT_LIST_HEAD(&r2hb_callbacks[i].list);
+
+ memset(r2hb_live_node_bitmap, 0, sizeof(r2hb_live_node_bitmap));
+
+ return 0;
+}
+
+/* if we're already in a callback then we're already serialized by the sem */
+static void r2hb_fill_node_map_from_callback(unsigned long *map,
+ unsigned bytes)
+{
+ BUG_ON(bytes < (BITS_TO_LONGS(R2NM_MAX_NODES) * sizeof(unsigned long)));
+
+ memcpy(map, &r2hb_live_node_bitmap, bytes);
+}
+
+/*
+ * get a map of all nodes that are heartbeating in any regions
+ */
+void r2hb_fill_node_map(unsigned long *map, unsigned bytes)
+{
+ /* callers want to serialize this map and callbacks so that they
+ * can trust that they don't miss nodes coming to the party */
+ down_read(&r2hb_callback_sem);
+ spin_lock(&r2hb_live_lock);
+ r2hb_fill_node_map_from_callback(map, bytes);
+ spin_unlock(&r2hb_live_lock);
+ up_read(&r2hb_callback_sem);
+}
+EXPORT_SYMBOL_GPL(r2hb_fill_node_map);
+
+/*
+ * heartbeat configfs bits. The heartbeat set is a default set under
+ * the cluster set in nodemanager.c.
+ */
+
+/* heartbeat set */
+
+struct r2hb_hb_group {
+ struct config_group hs_group;
+ /* some stuff? */
+};
+
+static struct r2hb_hb_group *to_r2hb_hb_group(struct config_group *group)
+{
+ return group ?
+ container_of(group, struct r2hb_hb_group, hs_group)
+ : NULL;
+}
+
+static struct config_item r2hb_config_item;
+
+static struct config_item *r2hb_hb_group_make_item(struct config_group *group,
+ const char *name)
+{
+ int ret;
+
+ if (strlen(name) > R2HB_MAX_REGION_NAME_LEN) {
+ ret = -ENAMETOOLONG;
+ goto free;
+ }
+
+ config_item_put(&r2hb_config_item);
+
+ return &r2hb_config_item;
+free:
+ return ERR_PTR(ret);
+}
+
+static void r2hb_hb_group_drop_item(struct config_group *group,
+ struct config_item *item)
+{
+ if (r2hb_global_heartbeat_active()) {
+ printk(KERN_NOTICE "ramster: Heartbeat %s "
+ "on region %s (%s)\n",
+ "stopped/aborted", config_item_name(item),
+ "no region");
+ }
+
+ config_item_put(item);
+}
+
+struct r2hb_hb_group_attribute {
+ struct configfs_attribute attr;
+ ssize_t (*show)(struct r2hb_hb_group *, char *);
+ ssize_t (*store)(struct r2hb_hb_group *, const char *, size_t);
+};
+
+static ssize_t r2hb_hb_group_show(struct config_item *item,
+ struct configfs_attribute *attr,
+ char *page)
+{
+ struct r2hb_hb_group *reg = to_r2hb_hb_group(to_config_group(item));
+ struct r2hb_hb_group_attribute *r2hb_hb_group_attr =
+ container_of(attr, struct r2hb_hb_group_attribute, attr);
+ ssize_t ret = 0;
+
+ if (r2hb_hb_group_attr->show)
+ ret = r2hb_hb_group_attr->show(reg, page);
+ return ret;
+}
+
+static ssize_t r2hb_hb_group_store(struct config_item *item,
+ struct configfs_attribute *attr,
+ const char *page, size_t count)
+{
+ struct r2hb_hb_group *reg = to_r2hb_hb_group(to_config_group(item));
+ struct r2hb_hb_group_attribute *r2hb_hb_group_attr =
+ container_of(attr, struct r2hb_hb_group_attribute, attr);
+ ssize_t ret = -EINVAL;
+
+ if (r2hb_hb_group_attr->store)
+ ret = r2hb_hb_group_attr->store(reg, page, count);
+ return ret;
+}
+
+static ssize_t r2hb_hb_group_threshold_show(struct r2hb_hb_group *group,
+ char *page)
+{
+ return sprintf(page, "%u\n", r2hb_dead_threshold);
+}
+
+static ssize_t r2hb_hb_group_threshold_store(struct r2hb_hb_group *group,
+ const char *page,
+ size_t count)
+{
+ unsigned long tmp;
+ char *p = (char *)page;
+ int err;
+
+ err = kstrtoul(p, 10, &tmp);
+ if (err)
+ return err;
+
+ /* this will validate ranges for us. */
+ r2hb_dead_threshold_set((unsigned int) tmp);
+
+ return count;
+}
+
+static
+ssize_t r2hb_hb_group_mode_show(struct r2hb_hb_group *group,
+ char *page)
+{
+ return sprintf(page, "%s\n",
+ r2hb_heartbeat_mode_desc[r2hb_heartbeat_mode]);
+}
+
+static
+ssize_t r2hb_hb_group_mode_store(struct r2hb_hb_group *group,
+ const char *page, size_t count)
+{
+ unsigned int i;
+ int ret;
+ size_t len;
+
+ len = (page[count - 1] == '\n') ? count - 1 : count;
+ if (!len)
+ return -EINVAL;
+
+ for (i = 0; i < R2HB_HEARTBEAT_NUM_MODES; ++i) {
+ if (strnicmp(page, r2hb_heartbeat_mode_desc[i], len))
+ continue;
+
+ ret = r2hb_global_hearbeat_mode_set(i);
+ if (!ret)
+ printk(KERN_NOTICE "ramster: Heartbeat mode "
+ "set to %s\n",
+ r2hb_heartbeat_mode_desc[i]);
+ return count;
+ }
+
+ return -EINVAL;
+
+}
+
+static struct r2hb_hb_group_attribute r2hb_hb_group_attr_threshold = {
+ .attr = { .ca_owner = THIS_MODULE,
+ .ca_name = "dead_threshold",
+ .ca_mode = S_IRUGO | S_IWUSR },
+ .show = r2hb_hb_group_threshold_show,
+ .store = r2hb_hb_group_threshold_store,
+};
+
+static struct r2hb_hb_group_attribute r2hb_hb_group_attr_mode = {
+ .attr = { .ca_owner = THIS_MODULE,
+ .ca_name = "mode",
+ .ca_mode = S_IRUGO | S_IWUSR },
+ .show = r2hb_hb_group_mode_show,
+ .store = r2hb_hb_group_mode_store,
+};
+
+static struct configfs_attribute *r2hb_hb_group_attrs[] = {
+ &r2hb_hb_group_attr_threshold.attr,
+ &r2hb_hb_group_attr_mode.attr,
+ NULL,
+};
+
+static struct configfs_item_operations r2hb_hearbeat_group_item_ops = {
+ .show_attribute = r2hb_hb_group_show,
+ .store_attribute = r2hb_hb_group_store,
+};
+
+static struct configfs_group_operations r2hb_hb_group_group_ops = {
+ .make_item = r2hb_hb_group_make_item,
+ .drop_item = r2hb_hb_group_drop_item,
+};
+
+static struct config_item_type r2hb_hb_group_type = {
+ .ct_group_ops = &r2hb_hb_group_group_ops,
+ .ct_item_ops = &r2hb_hearbeat_group_item_ops,
+ .ct_attrs = r2hb_hb_group_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+/* this is just here to avoid touching group in heartbeat.h which the
+ * entire damn world #includes */
+struct config_group *r2hb_alloc_hb_set(void)
+{
+ struct r2hb_hb_group *hs = NULL;
+ struct config_group *ret = NULL;
+
+ hs = kzalloc(sizeof(struct r2hb_hb_group), GFP_KERNEL);
+ if (hs == NULL)
+ goto out;
+
+ config_group_init_type_name(&hs->hs_group, "heartbeat",
+ &r2hb_hb_group_type);
+
+ ret = &hs->hs_group;
+out:
+ if (ret == NULL)
+ kfree(hs);
+ return ret;
+}
+
+void r2hb_free_hb_set(struct config_group *group)
+{
+ struct r2hb_hb_group *hs = to_r2hb_hb_group(group);
+ kfree(hs);
+}
+
+/* hb callback registration and issuing */
+
+static struct r2hb_callback *hbcall_from_type(enum r2hb_callback_type type)
+{
+ if (type == R2HB_NUM_CB)
+ return ERR_PTR(-EINVAL);
+
+ return &r2hb_callbacks[type];
+}
+
+void r2hb_setup_callback(struct r2hb_callback_func *hc,
+ enum r2hb_callback_type type,
+ r2hb_cb_func *func,
+ void *data,
+ int priority)
+{
+ INIT_LIST_HEAD(&hc->hc_item);
+ hc->hc_func = func;
+ hc->hc_data = data;
+ hc->hc_priority = priority;
+ hc->hc_type = type;
+ hc->hc_magic = R2HB_CB_MAGIC;
+}
+EXPORT_SYMBOL_GPL(r2hb_setup_callback);
+
+int r2hb_register_callback(const char *region_uuid,
+ struct r2hb_callback_func *hc)
+{
+ struct r2hb_callback_func *tmp;
+ struct list_head *iter;
+ struct r2hb_callback *hbcall;
+ int ret;
+
+ BUG_ON(hc->hc_magic != R2HB_CB_MAGIC);
+ BUG_ON(!list_empty(&hc->hc_item));
+
+ hbcall = hbcall_from_type(hc->hc_type);
+ if (IS_ERR(hbcall)) {
+ ret = PTR_ERR(hbcall);
+ goto out;
+ }
+
+ down_write(&r2hb_callback_sem);
+
+ list_for_each(iter, &hbcall->list) {
+ tmp = list_entry(iter, struct r2hb_callback_func, hc_item);
+ if (hc->hc_priority < tmp->hc_priority) {
+ list_add_tail(&hc->hc_item, iter);
+ break;
+ }
+ }
+ if (list_empty(&hc->hc_item))
+ list_add_tail(&hc->hc_item, &hbcall->list);
+
+ up_write(&r2hb_callback_sem);
+ ret = 0;
+out:
+ mlog(ML_CLUSTER, "returning %d on behalf of %p for funcs %p\n",
+ ret, __builtin_return_address(0), hc);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(r2hb_register_callback);
+
+void r2hb_unregister_callback(const char *region_uuid,
+ struct r2hb_callback_func *hc)
+{
+ BUG_ON(hc->hc_magic != R2HB_CB_MAGIC);
+
+ mlog(ML_CLUSTER, "on behalf of %p for funcs %p\n",
+ __builtin_return_address(0), hc);
+
+ /* XXX Can this happen _with_ a region reference? */
+ if (list_empty(&hc->hc_item))
+ return;
+
+ down_write(&r2hb_callback_sem);
+
+ list_del_init(&hc->hc_item);
+
+ up_write(&r2hb_callback_sem);
+}
+EXPORT_SYMBOL_GPL(r2hb_unregister_callback);
+
+int r2hb_check_node_heartbeating_from_callback(u8 node_num)
+{
+ unsigned long testing_map[BITS_TO_LONGS(R2NM_MAX_NODES)];
+
+ r2hb_fill_node_map_from_callback(testing_map, sizeof(testing_map));
+ if (!test_bit(node_num, testing_map)) {
+ mlog(ML_HEARTBEAT,
+ "node (%u) does not have heartbeating enabled.\n",
+ node_num);
+ return 0;
+ }
+
+ return 1;
+}
+EXPORT_SYMBOL_GPL(r2hb_check_node_heartbeating_from_callback);
+
+void r2hb_stop_all_regions(void)
+{
+}
+EXPORT_SYMBOL_GPL(r2hb_stop_all_regions);
+
+/*
+ * this is just a hack until we get the plumbing which flips file systems
+ * read only and drops the hb ref instead of killing the node dead.
+ */
+int r2hb_global_heartbeat_active(void)
+{
+ return (r2hb_heartbeat_mode == R2HB_HEARTBEAT_GLOBAL);
+}
+EXPORT_SYMBOL(r2hb_global_heartbeat_active);
+
+/* added for RAMster */
+void r2hb_manual_set_node_heartbeating(int node_num)
+{
+ if (node_num < R2NM_MAX_NODES)
+ set_bit(node_num, r2hb_live_node_bitmap);
+}
+EXPORT_SYMBOL(r2hb_manual_set_node_heartbeating);
diff --git a/drivers/staging/ramster/cluster/heartbeat.h b/drivers/staging/ramster/cluster/heartbeat.h
new file mode 100644
index 000000000000..6cbc775bd63b
--- /dev/null
+++ b/drivers/staging/ramster/cluster/heartbeat.h
@@ -0,0 +1,87 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * heartbeat.h
+ *
+ * Function prototypes
+ *
+ * Copyright (C) 2004 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ *
+ */
+
+#ifndef R2CLUSTER_HEARTBEAT_H
+#define R2CLUSTER_HEARTBEAT_H
+
+#define R2HB_REGION_TIMEOUT_MS 2000
+
+#define R2HB_MAX_REGION_NAME_LEN 32
+
+/* number of changes to be seen as live */
+#define R2HB_LIVE_THRESHOLD 2
+/* number of equal samples to be seen as dead */
+extern unsigned int r2hb_dead_threshold;
+#define R2HB_DEFAULT_DEAD_THRESHOLD 31
+/* Otherwise MAX_WRITE_TIMEOUT will be zero... */
+#define R2HB_MIN_DEAD_THRESHOLD 2
+#define R2HB_MAX_WRITE_TIMEOUT_MS \
+ (R2HB_REGION_TIMEOUT_MS * (r2hb_dead_threshold - 1))
+
+#define R2HB_CB_MAGIC 0x51d1e4ec
+
+/* callback stuff */
+enum r2hb_callback_type {
+ R2HB_NODE_DOWN_CB = 0,
+ R2HB_NODE_UP_CB,
+ R2HB_NUM_CB
+};
+
+struct r2nm_node;
+typedef void (r2hb_cb_func)(struct r2nm_node *, int, void *);
+
+struct r2hb_callback_func {
+ u32 hc_magic;
+ struct list_head hc_item;
+ r2hb_cb_func *hc_func;
+ void *hc_data;
+ int hc_priority;
+ enum r2hb_callback_type hc_type;
+};
+
+struct config_group *r2hb_alloc_hb_set(void);
+void r2hb_free_hb_set(struct config_group *group);
+
+void r2hb_setup_callback(struct r2hb_callback_func *hc,
+ enum r2hb_callback_type type,
+ r2hb_cb_func *func,
+ void *data,
+ int priority);
+int r2hb_register_callback(const char *region_uuid,
+ struct r2hb_callback_func *hc);
+void r2hb_unregister_callback(const char *region_uuid,
+ struct r2hb_callback_func *hc);
+void r2hb_fill_node_map(unsigned long *map,
+ unsigned bytes);
+void r2hb_exit(void);
+int r2hb_init(void);
+int r2hb_check_node_heartbeating_from_callback(u8 node_num);
+void r2hb_stop_all_regions(void);
+int r2hb_get_all_regions(char *region_uuids, u8 numregions);
+int r2hb_global_heartbeat_active(void);
+void r2hb_manual_set_node_heartbeating(int);
+
+#endif /* R2CLUSTER_HEARTBEAT_H */
diff --git a/drivers/staging/ramster/cluster/masklog.c b/drivers/staging/ramster/cluster/masklog.c
new file mode 100644
index 000000000000..1261d8579aae
--- /dev/null
+++ b/drivers/staging/ramster/cluster/masklog.c
@@ -0,0 +1,155 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * Copyright (C) 2004, 2005, 2012 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+
+#include "masklog.h"
+
+struct mlog_bits r2_mlog_and_bits = MLOG_BITS_RHS(MLOG_INITIAL_AND_MASK);
+EXPORT_SYMBOL_GPL(r2_mlog_and_bits);
+struct mlog_bits r2_mlog_not_bits = MLOG_BITS_RHS(0);
+EXPORT_SYMBOL_GPL(r2_mlog_not_bits);
+
+static ssize_t mlog_mask_show(u64 mask, char *buf)
+{
+ char *state;
+
+ if (__mlog_test_u64(mask, r2_mlog_and_bits))
+ state = "allow";
+ else if (__mlog_test_u64(mask, r2_mlog_not_bits))
+ state = "deny";
+ else
+ state = "off";
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", state);
+}
+
+static ssize_t mlog_mask_store(u64 mask, const char *buf, size_t count)
+{
+ if (!strnicmp(buf, "allow", 5)) {
+ __mlog_set_u64(mask, r2_mlog_and_bits);
+ __mlog_clear_u64(mask, r2_mlog_not_bits);
+ } else if (!strnicmp(buf, "deny", 4)) {
+ __mlog_set_u64(mask, r2_mlog_not_bits);
+ __mlog_clear_u64(mask, r2_mlog_and_bits);
+ } else if (!strnicmp(buf, "off", 3)) {
+ __mlog_clear_u64(mask, r2_mlog_not_bits);
+ __mlog_clear_u64(mask, r2_mlog_and_bits);
+ } else
+ return -EINVAL;
+
+ return count;
+}
+
+struct mlog_attribute {
+ struct attribute attr;
+ u64 mask;
+};
+
+#define to_mlog_attr(_attr) container_of(_attr, struct mlog_attribute, attr)
+
+#define define_mask(_name) { \
+ .attr = { \
+ .name = #_name, \
+ .mode = S_IRUGO | S_IWUSR, \
+ }, \
+ .mask = ML_##_name, \
+}
+
+static struct mlog_attribute mlog_attrs[MLOG_MAX_BITS] = {
+ define_mask(TCP),
+ define_mask(MSG),
+ define_mask(SOCKET),
+ define_mask(HEARTBEAT),
+ define_mask(HB_BIO),
+ define_mask(DLMFS),
+ define_mask(DLM),
+ define_mask(DLM_DOMAIN),
+ define_mask(DLM_THREAD),
+ define_mask(DLM_MASTER),
+ define_mask(DLM_RECOVERY),
+ define_mask(DLM_GLUE),
+ define_mask(VOTE),
+ define_mask(CONN),
+ define_mask(QUORUM),
+ define_mask(BASTS),
+ define_mask(CLUSTER),
+ define_mask(ERROR),
+ define_mask(NOTICE),
+ define_mask(KTHREAD),
+};
+
+static struct attribute *mlog_attr_ptrs[MLOG_MAX_BITS] = {NULL, };
+
+static ssize_t mlog_show(struct kobject *obj, struct attribute *attr,
+ char *buf)
+{
+ struct mlog_attribute *mlog_attr = to_mlog_attr(attr);
+
+ return mlog_mask_show(mlog_attr->mask, buf);
+}
+
+static ssize_t mlog_store(struct kobject *obj, struct attribute *attr,
+ const char *buf, size_t count)
+{
+ struct mlog_attribute *mlog_attr = to_mlog_attr(attr);
+
+ return mlog_mask_store(mlog_attr->mask, buf, count);
+}
+
+static const struct sysfs_ops mlog_attr_ops = {
+ .show = mlog_show,
+ .store = mlog_store,
+};
+
+static struct kobj_type mlog_ktype = {
+ .default_attrs = mlog_attr_ptrs,
+ .sysfs_ops = &mlog_attr_ops,
+};
+
+static struct kset mlog_kset = {
+ .kobj = {.ktype = &mlog_ktype},
+};
+
+int r2_mlog_sys_init(struct kset *r2cb_kset)
+{
+ int i = 0;
+
+ while (mlog_attrs[i].attr.mode) {
+ mlog_attr_ptrs[i] = &mlog_attrs[i].attr;
+ i++;
+ }
+ mlog_attr_ptrs[i] = NULL;
+
+ kobject_set_name(&mlog_kset.kobj, "logmask");
+ mlog_kset.kobj.kset = r2cb_kset;
+ return kset_register(&mlog_kset);
+}
+
+void r2_mlog_sys_shutdown(void)
+{
+ kset_unregister(&mlog_kset);
+}
diff --git a/drivers/staging/ramster/cluster/masklog.h b/drivers/staging/ramster/cluster/masklog.h
new file mode 100644
index 000000000000..918ae110b699
--- /dev/null
+++ b/drivers/staging/ramster/cluster/masklog.h
@@ -0,0 +1,220 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * Copyright (C) 2005, 2012 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#ifndef R2CLUSTER_MASKLOG_H
+#define R2CLUSTER_MASKLOG_H
+
+/*
+ * For now this is a trivial wrapper around printk() that gives the critical
+ * ability to enable sets of debugging output at run-time. In the future this
+ * will almost certainly be redirected to relayfs so that it can pay a
+ * substantially lower heisenberg tax.
+ *
+ * Callers associate the message with a bitmask and a global bitmask is
+ * maintained with help from /proc. If any of the bits match the message is
+ * output.
+ *
+ * We must have efficient bit tests on i386 and it seems gcc still emits crazy
+ * code for the 64bit compare. It emits very good code for the dual unsigned
+ * long tests, though, completely avoiding tests that can never pass if the
+ * caller gives a constant bitmask that fills one of the longs with all 0s. So
+ * the desire is to have almost all of the calls decided on by comparing just
+ * one of the longs. This leads to having infrequently given bits that are
+ * frequently matched in the high bits.
+ *
+ * _ERROR and _NOTICE are used for messages that always go to the console and
+ * have appropriate KERN_ prefixes. We wrap these in our function instead of
+ * just calling printk() so that this can eventually make its way through
+ * relayfs along with the debugging messages. Everything else gets KERN_DEBUG.
+ * The inline tests and macro dance give GCC the opportunity to quite cleverly
+ * only emit the appropriage printk() when the caller passes in a constant
+ * mask, as is almost always the case.
+ *
+ * All this bitmask nonsense is managed from the files under
+ * /sys/fs/r2cb/logmask/. Reading the files gives a straightforward
+ * indication of which bits are allowed (allow) or denied (off/deny).
+ * ENTRY deny
+ * EXIT deny
+ * TCP off
+ * MSG off
+ * SOCKET off
+ * ERROR allow
+ * NOTICE allow
+ *
+ * Writing changes the state of a given bit and requires a strictly formatted
+ * single write() call:
+ *
+ * write(fd, "allow", 5);
+ *
+ * Echoing allow/deny/off string into the logmask files can flip the bits
+ * on or off as expected; here is the bash script for example:
+ *
+ * log_mask="/sys/fs/r2cb/log_mask"
+ * for node in ENTRY EXIT TCP MSG SOCKET ERROR NOTICE; do
+ * echo allow >"$log_mask"/"$node"
+ * done
+ *
+ * The debugfs.ramster tool can also flip the bits with the -l option:
+ *
+ * debugfs.ramster -l TCP allow
+ */
+
+/* for task_struct */
+#include <linux/sched.h>
+
+/* bits that are frequently given and infrequently matched in the low word */
+/* NOTE: If you add a flag, you need to also update masklog.c! */
+#define ML_TCP 0x0000000000000001ULL /* net cluster/tcp.c */
+#define ML_MSG 0x0000000000000002ULL /* net network messages */
+#define ML_SOCKET 0x0000000000000004ULL /* net socket lifetime */
+#define ML_HEARTBEAT 0x0000000000000008ULL /* hb all heartbeat tracking */
+#define ML_HB_BIO 0x0000000000000010ULL /* hb io tracing */
+#define ML_DLMFS 0x0000000000000020ULL /* dlm user dlmfs */
+#define ML_DLM 0x0000000000000040ULL /* dlm general debugging */
+#define ML_DLM_DOMAIN 0x0000000000000080ULL /* dlm domain debugging */
+#define ML_DLM_THREAD 0x0000000000000100ULL /* dlm domain thread */
+#define ML_DLM_MASTER 0x0000000000000200ULL /* dlm master functions */
+#define ML_DLM_RECOVERY 0x0000000000000400ULL /* dlm master functions */
+#define ML_DLM_GLUE 0x0000000000000800ULL /* ramster dlm glue layer */
+#define ML_VOTE 0x0000000000001000ULL /* ramster node messaging */
+#define ML_CONN 0x0000000000002000ULL /* net connection management */
+#define ML_QUORUM 0x0000000000004000ULL /* net connection quorum */
+#define ML_BASTS 0x0000000000008000ULL /* dlmglue asts and basts */
+#define ML_CLUSTER 0x0000000000010000ULL /* cluster stack */
+
+/* bits that are infrequently given and frequently matched in the high word */
+#define ML_ERROR 0x1000000000000000ULL /* sent to KERN_ERR */
+#define ML_NOTICE 0x2000000000000000ULL /* setn to KERN_NOTICE */
+#define ML_KTHREAD 0x4000000000000000ULL /* kernel thread activity */
+
+#define MLOG_INITIAL_AND_MASK (ML_ERROR|ML_NOTICE)
+#ifndef MLOG_MASK_PREFIX
+#define MLOG_MASK_PREFIX 0
+#endif
+
+/*
+ * When logging is disabled, force the bit test to 0 for anything other
+ * than errors and notices, allowing gcc to remove the code completely.
+ * When enabled, allow all masks.
+ */
+#if defined(CONFIG_RAMSTER_DEBUG_MASKLOG)
+#define ML_ALLOWED_BITS (~0)
+#else
+#define ML_ALLOWED_BITS (ML_ERROR|ML_NOTICE)
+#endif
+
+#define MLOG_MAX_BITS 64
+
+struct mlog_bits {
+ unsigned long words[MLOG_MAX_BITS / BITS_PER_LONG];
+};
+
+extern struct mlog_bits r2_mlog_and_bits, r2_mlog_not_bits;
+
+#if BITS_PER_LONG == 32
+
+#define __mlog_test_u64(mask, bits) \
+ ((u32)(mask & 0xffffffff) & bits.words[0] || \
+ ((u64)(mask) >> 32) & bits.words[1])
+#define __mlog_set_u64(mask, bits) do { \
+ bits.words[0] |= (u32)(mask & 0xffffffff); \
+ bits.words[1] |= (u64)(mask) >> 32; \
+} while (0)
+#define __mlog_clear_u64(mask, bits) do { \
+ bits.words[0] &= ~((u32)(mask & 0xffffffff)); \
+ bits.words[1] &= ~((u64)(mask) >> 32); \
+} while (0)
+#define MLOG_BITS_RHS(mask) { \
+ { \
+ [0] = (u32)(mask & 0xffffffff), \
+ [1] = (u64)(mask) >> 32, \
+ } \
+}
+
+#else /* 32bit long above, 64bit long below */
+
+#define __mlog_test_u64(mask, bits) ((mask) & bits.words[0])
+#define __mlog_set_u64(mask, bits) do { \
+ bits.words[0] |= (mask); \
+} while (0)
+#define __mlog_clear_u64(mask, bits) do { \
+ bits.words[0] &= ~(mask); \
+} while (0)
+#define MLOG_BITS_RHS(mask) { { (mask) } }
+
+#endif
+
+/*
+ * smp_processor_id() "helpfully" screams when called outside preemptible
+ * regions in current kernels. sles doesn't have the variants that don't
+ * scream. just do this instead of trying to guess which we're building
+ * against.. *sigh*.
+ */
+#define __mlog_cpu_guess ({ \
+ unsigned long _cpu = get_cpu(); \
+ put_cpu(); \
+ _cpu; \
+})
+
+/* In the following two macros, the whitespace after the ',' just
+ * before ##args is intentional. Otherwise, gcc 2.95 will eat the
+ * previous token if args expands to nothing.
+ */
+#define __mlog_printk(level, fmt, args...) \
+ printk(level "(%s,%u,%lu):%s:%d " fmt, current->comm, \
+ task_pid_nr(current), __mlog_cpu_guess, \
+ __PRETTY_FUNCTION__, __LINE__ , ##args)
+
+#define mlog(mask, fmt, args...) do { \
+ u64 __m = MLOG_MASK_PREFIX | (mask); \
+ if ((__m & ML_ALLOWED_BITS) && \
+ __mlog_test_u64(__m, r2_mlog_and_bits) && \
+ !__mlog_test_u64(__m, r2_mlog_not_bits)) { \
+ if (__m & ML_ERROR) \
+ __mlog_printk(KERN_ERR, "ERROR: "fmt , ##args); \
+ else if (__m & ML_NOTICE) \
+ __mlog_printk(KERN_NOTICE, fmt , ##args); \
+ else \
+ __mlog_printk(KERN_INFO, fmt , ##args); \
+ } \
+} while (0)
+
+#define mlog_errno(st) do { \
+ int _st = (st); \
+ if (_st != -ERESTARTSYS && _st != -EINTR && \
+ _st != AOP_TRUNCATED_PAGE && _st != -ENOSPC) \
+ mlog(ML_ERROR, "status = %lld\n", (long long)_st); \
+} while (0)
+
+#define mlog_bug_on_msg(cond, fmt, args...) do { \
+ if (cond) { \
+ mlog(ML_ERROR, "bug expression: " #cond "\n"); \
+ mlog(ML_ERROR, fmt, ##args); \
+ BUG(); \
+ } \
+} while (0)
+
+#include <linux/kobject.h>
+#include <linux/sysfs.h>
+int r2_mlog_sys_init(struct kset *r2cb_subsys);
+void r2_mlog_sys_shutdown(void);
+
+#endif /* R2CLUSTER_MASKLOG_H */
diff --git a/drivers/staging/ramster/cluster/nodemanager.c b/drivers/staging/ramster/cluster/nodemanager.c
new file mode 100644
index 000000000000..de0e5c8da6ea
--- /dev/null
+++ b/drivers/staging/ramster/cluster/nodemanager.c
@@ -0,0 +1,992 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * Copyright (C) 2004, 2005, 2012 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/configfs.h>
+
+#include "tcp.h"
+#include "nodemanager.h"
+#include "heartbeat.h"
+#include "masklog.h"
+
+/* for now we operate under the assertion that there can be only one
+ * cluster active at a time. Changing this will require trickling
+ * cluster references throughout where nodes are looked up */
+struct r2nm_cluster *r2nm_single_cluster;
+
+char *r2nm_fence_method_desc[R2NM_FENCE_METHODS] = {
+ "reset", /* R2NM_FENCE_RESET */
+ "panic", /* R2NM_FENCE_PANIC */
+};
+
+struct r2nm_node *r2nm_get_node_by_num(u8 node_num)
+{
+ struct r2nm_node *node = NULL;
+
+ if (node_num >= R2NM_MAX_NODES || r2nm_single_cluster == NULL)
+ goto out;
+
+ read_lock(&r2nm_single_cluster->cl_nodes_lock);
+ node = r2nm_single_cluster->cl_nodes[node_num];
+ if (node)
+ config_item_get(&node->nd_item);
+ read_unlock(&r2nm_single_cluster->cl_nodes_lock);
+out:
+ return node;
+}
+EXPORT_SYMBOL_GPL(r2nm_get_node_by_num);
+
+int r2nm_configured_node_map(unsigned long *map, unsigned bytes)
+{
+ struct r2nm_cluster *cluster = r2nm_single_cluster;
+
+ BUG_ON(bytes < (sizeof(cluster->cl_nodes_bitmap)));
+
+ if (cluster == NULL)
+ return -EINVAL;
+
+ read_lock(&cluster->cl_nodes_lock);
+ memcpy(map, cluster->cl_nodes_bitmap, sizeof(cluster->cl_nodes_bitmap));
+ read_unlock(&cluster->cl_nodes_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(r2nm_configured_node_map);
+
+static struct r2nm_node *r2nm_node_ip_tree_lookup(struct r2nm_cluster *cluster,
+ __be32 ip_needle,
+ struct rb_node ***ret_p,
+ struct rb_node **ret_parent)
+{
+ struct rb_node **p = &cluster->cl_node_ip_tree.rb_node;
+ struct rb_node *parent = NULL;
+ struct r2nm_node *node, *ret = NULL;
+
+ while (*p) {
+ int cmp;
+
+ parent = *p;
+ node = rb_entry(parent, struct r2nm_node, nd_ip_node);
+
+ cmp = memcmp(&ip_needle, &node->nd_ipv4_address,
+ sizeof(ip_needle));
+ if (cmp < 0)
+ p = &(*p)->rb_left;
+ else if (cmp > 0)
+ p = &(*p)->rb_right;
+ else {
+ ret = node;
+ break;
+ }
+ }
+
+ if (ret_p != NULL)
+ *ret_p = p;
+ if (ret_parent != NULL)
+ *ret_parent = parent;
+
+ return ret;
+}
+
+struct r2nm_node *r2nm_get_node_by_ip(__be32 addr)
+{
+ struct r2nm_node *node = NULL;
+ struct r2nm_cluster *cluster = r2nm_single_cluster;
+
+ if (cluster == NULL)
+ goto out;
+
+ read_lock(&cluster->cl_nodes_lock);
+ node = r2nm_node_ip_tree_lookup(cluster, addr, NULL, NULL);
+ if (node)
+ config_item_get(&node->nd_item);
+ read_unlock(&cluster->cl_nodes_lock);
+
+out:
+ return node;
+}
+EXPORT_SYMBOL_GPL(r2nm_get_node_by_ip);
+
+void r2nm_node_put(struct r2nm_node *node)
+{
+ config_item_put(&node->nd_item);
+}
+EXPORT_SYMBOL_GPL(r2nm_node_put);
+
+void r2nm_node_get(struct r2nm_node *node)
+{
+ config_item_get(&node->nd_item);
+}
+EXPORT_SYMBOL_GPL(r2nm_node_get);
+
+u8 r2nm_this_node(void)
+{
+ u8 node_num = R2NM_MAX_NODES;
+
+ if (r2nm_single_cluster && r2nm_single_cluster->cl_has_local)
+ node_num = r2nm_single_cluster->cl_local_node;
+
+ return node_num;
+}
+EXPORT_SYMBOL_GPL(r2nm_this_node);
+
+/* node configfs bits */
+
+static struct r2nm_cluster *to_r2nm_cluster(struct config_item *item)
+{
+ return item ?
+ container_of(to_config_group(item), struct r2nm_cluster,
+ cl_group)
+ : NULL;
+}
+
+static struct r2nm_node *to_r2nm_node(struct config_item *item)
+{
+ return item ? container_of(item, struct r2nm_node, nd_item) : NULL;
+}
+
+static void r2nm_node_release(struct config_item *item)
+{
+ struct r2nm_node *node = to_r2nm_node(item);
+ kfree(node);
+}
+
+static ssize_t r2nm_node_num_read(struct r2nm_node *node, char *page)
+{
+ return sprintf(page, "%d\n", node->nd_num);
+}
+
+static struct r2nm_cluster *to_r2nm_cluster_from_node(struct r2nm_node *node)
+{
+ /* through the first node_set .parent
+ * mycluster/nodes/mynode == r2nm_cluster->r2nm_node_group->r2nm_node */
+ return to_r2nm_cluster(node->nd_item.ci_parent->ci_parent);
+}
+
+enum {
+ R2NM_NODE_ATTR_NUM = 0,
+ R2NM_NODE_ATTR_PORT,
+ R2NM_NODE_ATTR_ADDRESS,
+ R2NM_NODE_ATTR_LOCAL,
+};
+
+static ssize_t r2nm_node_num_write(struct r2nm_node *node, const char *page,
+ size_t count)
+{
+ struct r2nm_cluster *cluster = to_r2nm_cluster_from_node(node);
+ unsigned long tmp;
+ char *p = (char *)page;
+ int err;
+
+ err = kstrtoul(p, 10, &tmp);
+ if (err)
+ return err;
+
+ if (tmp >= R2NM_MAX_NODES)
+ return -ERANGE;
+
+ /* once we're in the cl_nodes tree networking can look us up by
+ * node number and try to use our address and port attributes
+ * to connect to this node.. make sure that they've been set
+ * before writing the node attribute? */
+ if (!test_bit(R2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) ||
+ !test_bit(R2NM_NODE_ATTR_PORT, &node->nd_set_attributes))
+ return -EINVAL; /* XXX */
+
+ write_lock(&cluster->cl_nodes_lock);
+ if (cluster->cl_nodes[tmp])
+ p = NULL;
+ else {
+ cluster->cl_nodes[tmp] = node;
+ node->nd_num = tmp;
+ set_bit(tmp, cluster->cl_nodes_bitmap);
+ }
+ write_unlock(&cluster->cl_nodes_lock);
+ if (p == NULL)
+ return -EEXIST;
+
+ return count;
+}
+static ssize_t r2nm_node_ipv4_port_read(struct r2nm_node *node, char *page)
+{
+ return sprintf(page, "%u\n", ntohs(node->nd_ipv4_port));
+}
+
+static ssize_t r2nm_node_ipv4_port_write(struct r2nm_node *node,
+ const char *page, size_t count)
+{
+ unsigned long tmp;
+ char *p = (char *)page;
+ int err;
+
+ err = kstrtoul(p, 10, &tmp);
+ if (err)
+ return err;
+
+ if (tmp == 0)
+ return -EINVAL;
+ if (tmp >= (u16)-1)
+ return -ERANGE;
+
+ node->nd_ipv4_port = htons(tmp);
+
+ return count;
+}
+
+static ssize_t r2nm_node_ipv4_address_read(struct r2nm_node *node, char *page)
+{
+ return sprintf(page, "%pI4\n", &node->nd_ipv4_address);
+}
+
+static ssize_t r2nm_node_ipv4_address_write(struct r2nm_node *node,
+ const char *page,
+ size_t count)
+{
+ struct r2nm_cluster *cluster = to_r2nm_cluster_from_node(node);
+ int ret, i;
+ struct rb_node **p, *parent;
+ unsigned int octets[4];
+ __be32 ipv4_addr = 0;
+
+ ret = sscanf(page, "%3u.%3u.%3u.%3u", &octets[3], &octets[2],
+ &octets[1], &octets[0]);
+ if (ret != 4)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(octets); i++) {
+ if (octets[i] > 255)
+ return -ERANGE;
+ be32_add_cpu(&ipv4_addr, octets[i] << (i * 8));
+ }
+
+ ret = 0;
+ write_lock(&cluster->cl_nodes_lock);
+ if (r2nm_node_ip_tree_lookup(cluster, ipv4_addr, &p, &parent))
+ ret = -EEXIST;
+ else {
+ rb_link_node(&node->nd_ip_node, parent, p);
+ rb_insert_color(&node->nd_ip_node, &cluster->cl_node_ip_tree);
+ }
+ write_unlock(&cluster->cl_nodes_lock);
+ if (ret)
+ return ret;
+
+ memcpy(&node->nd_ipv4_address, &ipv4_addr, sizeof(ipv4_addr));
+
+ return count;
+}
+
+static ssize_t r2nm_node_local_read(struct r2nm_node *node, char *page)
+{
+ return sprintf(page, "%d\n", node->nd_local);
+}
+
+static ssize_t r2nm_node_local_write(struct r2nm_node *node, const char *page,
+ size_t count)
+{
+ struct r2nm_cluster *cluster = to_r2nm_cluster_from_node(node);
+ unsigned long tmp;
+ char *p = (char *)page;
+ ssize_t ret;
+ int err;
+
+ err = kstrtoul(p, 10, &tmp);
+ if (err)
+ return err;
+
+ tmp = !!tmp; /* boolean of whether this node wants to be local */
+
+ /* setting local turns on networking rx for now so we require having
+ * set everything else first */
+ if (!test_bit(R2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) ||
+ !test_bit(R2NM_NODE_ATTR_NUM, &node->nd_set_attributes) ||
+ !test_bit(R2NM_NODE_ATTR_PORT, &node->nd_set_attributes))
+ return -EINVAL; /* XXX */
+
+ /* the only failure case is trying to set a new local node
+ * when a different one is already set */
+ if (tmp && tmp == cluster->cl_has_local &&
+ cluster->cl_local_node != node->nd_num)
+ return -EBUSY;
+
+ /* bring up the rx thread if we're setting the new local node. */
+ if (tmp && !cluster->cl_has_local) {
+ ret = r2net_start_listening(node);
+ if (ret)
+ return ret;
+ }
+
+ if (!tmp && cluster->cl_has_local &&
+ cluster->cl_local_node == node->nd_num) {
+ r2net_stop_listening(node);
+ cluster->cl_local_node = R2NM_INVALID_NODE_NUM;
+ }
+
+ node->nd_local = tmp;
+ if (node->nd_local) {
+ cluster->cl_has_local = tmp;
+ cluster->cl_local_node = node->nd_num;
+ }
+
+ return count;
+}
+
+struct r2nm_node_attribute {
+ struct configfs_attribute attr;
+ ssize_t (*show)(struct r2nm_node *, char *);
+ ssize_t (*store)(struct r2nm_node *, const char *, size_t);
+};
+
+static struct r2nm_node_attribute r2nm_node_attr_num = {
+ .attr = { .ca_owner = THIS_MODULE,
+ .ca_name = "num",
+ .ca_mode = S_IRUGO | S_IWUSR },
+ .show = r2nm_node_num_read,
+ .store = r2nm_node_num_write,
+};
+
+static struct r2nm_node_attribute r2nm_node_attr_ipv4_port = {
+ .attr = { .ca_owner = THIS_MODULE,
+ .ca_name = "ipv4_port",
+ .ca_mode = S_IRUGO | S_IWUSR },
+ .show = r2nm_node_ipv4_port_read,
+ .store = r2nm_node_ipv4_port_write,
+};
+
+static struct r2nm_node_attribute r2nm_node_attr_ipv4_address = {
+ .attr = { .ca_owner = THIS_MODULE,
+ .ca_name = "ipv4_address",
+ .ca_mode = S_IRUGO | S_IWUSR },
+ .show = r2nm_node_ipv4_address_read,
+ .store = r2nm_node_ipv4_address_write,
+};
+
+static struct r2nm_node_attribute r2nm_node_attr_local = {
+ .attr = { .ca_owner = THIS_MODULE,
+ .ca_name = "local",
+ .ca_mode = S_IRUGO | S_IWUSR },
+ .show = r2nm_node_local_read,
+ .store = r2nm_node_local_write,
+};
+
+static struct configfs_attribute *r2nm_node_attrs[] = {
+ [R2NM_NODE_ATTR_NUM] = &r2nm_node_attr_num.attr,
+ [R2NM_NODE_ATTR_PORT] = &r2nm_node_attr_ipv4_port.attr,
+ [R2NM_NODE_ATTR_ADDRESS] = &r2nm_node_attr_ipv4_address.attr,
+ [R2NM_NODE_ATTR_LOCAL] = &r2nm_node_attr_local.attr,
+ NULL,
+};
+
+static int r2nm_attr_index(struct configfs_attribute *attr)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(r2nm_node_attrs); i++) {
+ if (attr == r2nm_node_attrs[i])
+ return i;
+ }
+ BUG();
+ return 0;
+}
+
+static ssize_t r2nm_node_show(struct config_item *item,
+ struct configfs_attribute *attr,
+ char *page)
+{
+ struct r2nm_node *node = to_r2nm_node(item);
+ struct r2nm_node_attribute *r2nm_node_attr =
+ container_of(attr, struct r2nm_node_attribute, attr);
+ ssize_t ret = 0;
+
+ if (r2nm_node_attr->show)
+ ret = r2nm_node_attr->show(node, page);
+ return ret;
+}
+
+static ssize_t r2nm_node_store(struct config_item *item,
+ struct configfs_attribute *attr,
+ const char *page, size_t count)
+{
+ struct r2nm_node *node = to_r2nm_node(item);
+ struct r2nm_node_attribute *r2nm_node_attr =
+ container_of(attr, struct r2nm_node_attribute, attr);
+ ssize_t ret;
+ int attr_index = r2nm_attr_index(attr);
+
+ if (r2nm_node_attr->store == NULL) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (test_bit(attr_index, &node->nd_set_attributes))
+ return -EBUSY;
+
+ ret = r2nm_node_attr->store(node, page, count);
+ if (ret < count)
+ goto out;
+
+ set_bit(attr_index, &node->nd_set_attributes);
+out:
+ return ret;
+}
+
+static struct configfs_item_operations r2nm_node_item_ops = {
+ .release = r2nm_node_release,
+ .show_attribute = r2nm_node_show,
+ .store_attribute = r2nm_node_store,
+};
+
+static struct config_item_type r2nm_node_type = {
+ .ct_item_ops = &r2nm_node_item_ops,
+ .ct_attrs = r2nm_node_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+/* node set */
+
+struct r2nm_node_group {
+ struct config_group ns_group;
+ /* some stuff? */
+};
+
+#if 0
+static struct r2nm_node_group *to_r2nm_node_group(struct config_group *group)
+{
+ return group ?
+ container_of(group, struct r2nm_node_group, ns_group)
+ : NULL;
+}
+#endif
+
+struct r2nm_cluster_attribute {
+ struct configfs_attribute attr;
+ ssize_t (*show)(struct r2nm_cluster *, char *);
+ ssize_t (*store)(struct r2nm_cluster *, const char *, size_t);
+};
+
+static ssize_t r2nm_cluster_attr_write(const char *page, ssize_t count,
+ unsigned int *val)
+{
+ unsigned long tmp;
+ char *p = (char *)page;
+ int err;
+
+ err = kstrtoul(p, 10, &tmp);
+ if (err)
+ return err;
+
+ if (tmp == 0)
+ return -EINVAL;
+ if (tmp >= (u32)-1)
+ return -ERANGE;
+
+ *val = tmp;
+
+ return count;
+}
+
+static ssize_t r2nm_cluster_attr_idle_timeout_ms_read(
+ struct r2nm_cluster *cluster, char *page)
+{
+ return sprintf(page, "%u\n", cluster->cl_idle_timeout_ms);
+}
+
+static ssize_t r2nm_cluster_attr_idle_timeout_ms_write(
+ struct r2nm_cluster *cluster, const char *page, size_t count)
+{
+ ssize_t ret;
+ unsigned int val = 0;
+
+ ret = r2nm_cluster_attr_write(page, count, &val);
+
+ if (ret > 0) {
+ if (cluster->cl_idle_timeout_ms != val
+ && r2net_num_connected_peers()) {
+ mlog(ML_NOTICE,
+ "r2net: cannot change idle timeout after "
+ "the first peer has agreed to it."
+ " %d connected peers\n",
+ r2net_num_connected_peers());
+ ret = -EINVAL;
+ } else if (val <= cluster->cl_keepalive_delay_ms) {
+ mlog(ML_NOTICE, "r2net: idle timeout must be larger "
+ "than keepalive delay\n");
+ ret = -EINVAL;
+ } else {
+ cluster->cl_idle_timeout_ms = val;
+ }
+ }
+
+ return ret;
+}
+
+static ssize_t r2nm_cluster_attr_keepalive_delay_ms_read(
+ struct r2nm_cluster *cluster, char *page)
+{
+ return sprintf(page, "%u\n", cluster->cl_keepalive_delay_ms);
+}
+
+static ssize_t r2nm_cluster_attr_keepalive_delay_ms_write(
+ struct r2nm_cluster *cluster, const char *page, size_t count)
+{
+ ssize_t ret;
+ unsigned int val = 0;
+
+ ret = r2nm_cluster_attr_write(page, count, &val);
+
+ if (ret > 0) {
+ if (cluster->cl_keepalive_delay_ms != val
+ && r2net_num_connected_peers()) {
+ mlog(ML_NOTICE,
+ "r2net: cannot change keepalive delay after"
+ " the first peer has agreed to it."
+ " %d connected peers\n",
+ r2net_num_connected_peers());
+ ret = -EINVAL;
+ } else if (val >= cluster->cl_idle_timeout_ms) {
+ mlog(ML_NOTICE, "r2net: keepalive delay must be "
+ "smaller than idle timeout\n");
+ ret = -EINVAL;
+ } else {
+ cluster->cl_keepalive_delay_ms = val;
+ }
+ }
+
+ return ret;
+}
+
+static ssize_t r2nm_cluster_attr_reconnect_delay_ms_read(
+ struct r2nm_cluster *cluster, char *page)
+{
+ return sprintf(page, "%u\n", cluster->cl_reconnect_delay_ms);
+}
+
+static ssize_t r2nm_cluster_attr_reconnect_delay_ms_write(
+ struct r2nm_cluster *cluster, const char *page, size_t count)
+{
+ return r2nm_cluster_attr_write(page, count,
+ &cluster->cl_reconnect_delay_ms);
+}
+
+static ssize_t r2nm_cluster_attr_fence_method_read(
+ struct r2nm_cluster *cluster, char *page)
+{
+ ssize_t ret = 0;
+
+ if (cluster)
+ ret = sprintf(page, "%s\n",
+ r2nm_fence_method_desc[cluster->cl_fence_method]);
+ return ret;
+}
+
+static ssize_t r2nm_cluster_attr_fence_method_write(
+ struct r2nm_cluster *cluster, const char *page, size_t count)
+{
+ unsigned int i;
+
+ if (page[count - 1] != '\n')
+ goto bail;
+
+ for (i = 0; i < R2NM_FENCE_METHODS; ++i) {
+ if (count != strlen(r2nm_fence_method_desc[i]) + 1)
+ continue;
+ if (strncasecmp(page, r2nm_fence_method_desc[i], count - 1))
+ continue;
+ if (cluster->cl_fence_method != i) {
+ printk(KERN_INFO "ramster: Changing fence method to %s\n",
+ r2nm_fence_method_desc[i]);
+ cluster->cl_fence_method = i;
+ }
+ return count;
+ }
+
+bail:
+ return -EINVAL;
+}
+
+static struct r2nm_cluster_attribute r2nm_cluster_attr_idle_timeout_ms = {
+ .attr = { .ca_owner = THIS_MODULE,
+ .ca_name = "idle_timeout_ms",
+ .ca_mode = S_IRUGO | S_IWUSR },
+ .show = r2nm_cluster_attr_idle_timeout_ms_read,
+ .store = r2nm_cluster_attr_idle_timeout_ms_write,
+};
+
+static struct r2nm_cluster_attribute r2nm_cluster_attr_keepalive_delay_ms = {
+ .attr = { .ca_owner = THIS_MODULE,
+ .ca_name = "keepalive_delay_ms",
+ .ca_mode = S_IRUGO | S_IWUSR },
+ .show = r2nm_cluster_attr_keepalive_delay_ms_read,
+ .store = r2nm_cluster_attr_keepalive_delay_ms_write,
+};
+
+static struct r2nm_cluster_attribute r2nm_cluster_attr_reconnect_delay_ms = {
+ .attr = { .ca_owner = THIS_MODULE,
+ .ca_name = "reconnect_delay_ms",
+ .ca_mode = S_IRUGO | S_IWUSR },
+ .show = r2nm_cluster_attr_reconnect_delay_ms_read,
+ .store = r2nm_cluster_attr_reconnect_delay_ms_write,
+};
+
+static struct r2nm_cluster_attribute r2nm_cluster_attr_fence_method = {
+ .attr = { .ca_owner = THIS_MODULE,
+ .ca_name = "fence_method",
+ .ca_mode = S_IRUGO | S_IWUSR },
+ .show = r2nm_cluster_attr_fence_method_read,
+ .store = r2nm_cluster_attr_fence_method_write,
+};
+
+static struct configfs_attribute *r2nm_cluster_attrs[] = {
+ &r2nm_cluster_attr_idle_timeout_ms.attr,
+ &r2nm_cluster_attr_keepalive_delay_ms.attr,
+ &r2nm_cluster_attr_reconnect_delay_ms.attr,
+ &r2nm_cluster_attr_fence_method.attr,
+ NULL,
+};
+static ssize_t r2nm_cluster_show(struct config_item *item,
+ struct configfs_attribute *attr,
+ char *page)
+{
+ struct r2nm_cluster *cluster = to_r2nm_cluster(item);
+ struct r2nm_cluster_attribute *r2nm_cluster_attr =
+ container_of(attr, struct r2nm_cluster_attribute, attr);
+ ssize_t ret = 0;
+
+ if (r2nm_cluster_attr->show)
+ ret = r2nm_cluster_attr->show(cluster, page);
+ return ret;
+}
+
+static ssize_t r2nm_cluster_store(struct config_item *item,
+ struct configfs_attribute *attr,
+ const char *page, size_t count)
+{
+ struct r2nm_cluster *cluster = to_r2nm_cluster(item);
+ struct r2nm_cluster_attribute *r2nm_cluster_attr =
+ container_of(attr, struct r2nm_cluster_attribute, attr);
+ ssize_t ret;
+
+ if (r2nm_cluster_attr->store == NULL) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = r2nm_cluster_attr->store(cluster, page, count);
+ if (ret < count)
+ goto out;
+out:
+ return ret;
+}
+
+static struct config_item *r2nm_node_group_make_item(struct config_group *group,
+ const char *name)
+{
+ struct r2nm_node *node = NULL;
+
+ if (strlen(name) > R2NM_MAX_NAME_LEN)
+ return ERR_PTR(-ENAMETOOLONG);
+
+ node = kzalloc(sizeof(struct r2nm_node), GFP_KERNEL);
+ if (node == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ strcpy(node->nd_name, name); /* use item.ci_namebuf instead? */
+ config_item_init_type_name(&node->nd_item, name, &r2nm_node_type);
+ spin_lock_init(&node->nd_lock);
+
+ mlog(ML_CLUSTER, "r2nm: Registering node %s\n", name);
+
+ return &node->nd_item;
+}
+
+static void r2nm_node_group_drop_item(struct config_group *group,
+ struct config_item *item)
+{
+ struct r2nm_node *node = to_r2nm_node(item);
+ struct r2nm_cluster *cluster =
+ to_r2nm_cluster(group->cg_item.ci_parent);
+
+ r2net_disconnect_node(node);
+
+ if (cluster->cl_has_local &&
+ (cluster->cl_local_node == node->nd_num)) {
+ cluster->cl_has_local = 0;
+ cluster->cl_local_node = R2NM_INVALID_NODE_NUM;
+ r2net_stop_listening(node);
+ }
+
+ /* XXX call into net to stop this node from trading messages */
+
+ write_lock(&cluster->cl_nodes_lock);
+
+ /* XXX sloppy */
+ if (node->nd_ipv4_address)
+ rb_erase(&node->nd_ip_node, &cluster->cl_node_ip_tree);
+
+ /* nd_num might be 0 if the node number hasn't been set.. */
+ if (cluster->cl_nodes[node->nd_num] == node) {
+ cluster->cl_nodes[node->nd_num] = NULL;
+ clear_bit(node->nd_num, cluster->cl_nodes_bitmap);
+ }
+ write_unlock(&cluster->cl_nodes_lock);
+
+ mlog(ML_CLUSTER, "r2nm: Unregistered node %s\n",
+ config_item_name(&node->nd_item));
+
+ config_item_put(item);
+}
+
+static struct configfs_group_operations r2nm_node_group_group_ops = {
+ .make_item = r2nm_node_group_make_item,
+ .drop_item = r2nm_node_group_drop_item,
+};
+
+static struct config_item_type r2nm_node_group_type = {
+ .ct_group_ops = &r2nm_node_group_group_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+/* cluster */
+
+static void r2nm_cluster_release(struct config_item *item)
+{
+ struct r2nm_cluster *cluster = to_r2nm_cluster(item);
+
+ kfree(cluster->cl_group.default_groups);
+ kfree(cluster);
+}
+
+static struct configfs_item_operations r2nm_cluster_item_ops = {
+ .release = r2nm_cluster_release,
+ .show_attribute = r2nm_cluster_show,
+ .store_attribute = r2nm_cluster_store,
+};
+
+static struct config_item_type r2nm_cluster_type = {
+ .ct_item_ops = &r2nm_cluster_item_ops,
+ .ct_attrs = r2nm_cluster_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+/* cluster set */
+
+struct r2nm_cluster_group {
+ struct configfs_subsystem cs_subsys;
+ /* some stuff? */
+};
+
+#if 0
+static struct r2nm_cluster_group *
+to_r2nm_cluster_group(struct config_group *group)
+{
+ return group ?
+ container_of(to_configfs_subsystem(group),
+ struct r2nm_cluster_group, cs_subsys)
+ : NULL;
+}
+#endif
+
+static struct config_group *
+r2nm_cluster_group_make_group(struct config_group *group,
+ const char *name)
+{
+ struct r2nm_cluster *cluster = NULL;
+ struct r2nm_node_group *ns = NULL;
+ struct config_group *r2hb_group = NULL, *ret = NULL;
+ void *defs = NULL;
+
+ /* this runs under the parent dir's i_mutex; there can be only
+ * one caller in here at a time */
+ if (r2nm_single_cluster)
+ return ERR_PTR(-ENOSPC);
+
+ cluster = kzalloc(sizeof(struct r2nm_cluster), GFP_KERNEL);
+ ns = kzalloc(sizeof(struct r2nm_node_group), GFP_KERNEL);
+ defs = kcalloc(3, sizeof(struct config_group *), GFP_KERNEL);
+ r2hb_group = r2hb_alloc_hb_set();
+ if (cluster == NULL || ns == NULL || r2hb_group == NULL || defs == NULL)
+ goto out;
+
+ config_group_init_type_name(&cluster->cl_group, name,
+ &r2nm_cluster_type);
+ config_group_init_type_name(&ns->ns_group, "node",
+ &r2nm_node_group_type);
+
+ cluster->cl_group.default_groups = defs;
+ cluster->cl_group.default_groups[0] = &ns->ns_group;
+ cluster->cl_group.default_groups[1] = r2hb_group;
+ cluster->cl_group.default_groups[2] = NULL;
+ rwlock_init(&cluster->cl_nodes_lock);
+ cluster->cl_node_ip_tree = RB_ROOT;
+ cluster->cl_reconnect_delay_ms = R2NET_RECONNECT_DELAY_MS_DEFAULT;
+ cluster->cl_idle_timeout_ms = R2NET_IDLE_TIMEOUT_MS_DEFAULT;
+ cluster->cl_keepalive_delay_ms = R2NET_KEEPALIVE_DELAY_MS_DEFAULT;
+ cluster->cl_fence_method = R2NM_FENCE_RESET;
+
+ ret = &cluster->cl_group;
+ r2nm_single_cluster = cluster;
+
+out:
+ if (ret == NULL) {
+ kfree(cluster);
+ kfree(ns);
+ r2hb_free_hb_set(r2hb_group);
+ kfree(defs);
+ ret = ERR_PTR(-ENOMEM);
+ }
+
+ return ret;
+}
+
+static void r2nm_cluster_group_drop_item(struct config_group *group,
+ struct config_item *item)
+{
+ struct r2nm_cluster *cluster = to_r2nm_cluster(item);
+ int i;
+ struct config_item *killme;
+
+ BUG_ON(r2nm_single_cluster != cluster);
+ r2nm_single_cluster = NULL;
+
+ for (i = 0; cluster->cl_group.default_groups[i]; i++) {
+ killme = &cluster->cl_group.default_groups[i]->cg_item;
+ cluster->cl_group.default_groups[i] = NULL;
+ config_item_put(killme);
+ }
+
+ config_item_put(item);
+}
+
+static struct configfs_group_operations r2nm_cluster_group_group_ops = {
+ .make_group = r2nm_cluster_group_make_group,
+ .drop_item = r2nm_cluster_group_drop_item,
+};
+
+static struct config_item_type r2nm_cluster_group_type = {
+ .ct_group_ops = &r2nm_cluster_group_group_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct r2nm_cluster_group r2nm_cluster_group = {
+ .cs_subsys = {
+ .su_group = {
+ .cg_item = {
+ .ci_namebuf = "cluster",
+ .ci_type = &r2nm_cluster_group_type,
+ },
+ },
+ },
+};
+
+int r2nm_depend_item(struct config_item *item)
+{
+ return configfs_depend_item(&r2nm_cluster_group.cs_subsys, item);
+}
+
+void r2nm_undepend_item(struct config_item *item)
+{
+ configfs_undepend_item(&r2nm_cluster_group.cs_subsys, item);
+}
+
+int r2nm_depend_this_node(void)
+{
+ int ret = 0;
+ struct r2nm_node *local_node;
+
+ local_node = r2nm_get_node_by_num(r2nm_this_node());
+ if (!local_node) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = r2nm_depend_item(&local_node->nd_item);
+ r2nm_node_put(local_node);
+
+out:
+ return ret;
+}
+
+void r2nm_undepend_this_node(void)
+{
+ struct r2nm_node *local_node;
+
+ local_node = r2nm_get_node_by_num(r2nm_this_node());
+ BUG_ON(!local_node);
+
+ r2nm_undepend_item(&local_node->nd_item);
+ r2nm_node_put(local_node);
+}
+
+
+static void __exit exit_r2nm(void)
+{
+ /* XXX sync with hb callbacks and shut down hb? */
+ r2net_unregister_hb_callbacks();
+ configfs_unregister_subsystem(&r2nm_cluster_group.cs_subsys);
+
+ r2net_exit();
+ r2hb_exit();
+}
+
+static int __init init_r2nm(void)
+{
+ int ret = -1;
+
+ ret = r2hb_init();
+ if (ret)
+ goto out;
+
+ ret = r2net_init();
+ if (ret)
+ goto out_r2hb;
+
+ ret = r2net_register_hb_callbacks();
+ if (ret)
+ goto out_r2net;
+
+ config_group_init(&r2nm_cluster_group.cs_subsys.su_group);
+ mutex_init(&r2nm_cluster_group.cs_subsys.su_mutex);
+ ret = configfs_register_subsystem(&r2nm_cluster_group.cs_subsys);
+ if (ret) {
+ printk(KERN_ERR "nodemanager: Registration returned %d\n", ret);
+ goto out_callbacks;
+ }
+
+ if (!ret)
+ goto out;
+
+ configfs_unregister_subsystem(&r2nm_cluster_group.cs_subsys);
+out_callbacks:
+ r2net_unregister_hb_callbacks();
+out_r2net:
+ r2net_exit();
+out_r2hb:
+ r2hb_exit();
+out:
+ return ret;
+}
+
+MODULE_AUTHOR("Oracle");
+MODULE_LICENSE("GPL");
+
+module_init(init_r2nm)
+module_exit(exit_r2nm)
diff --git a/drivers/staging/ramster/cluster/nodemanager.h b/drivers/staging/ramster/cluster/nodemanager.h
new file mode 100644
index 000000000000..41a04df5842c
--- /dev/null
+++ b/drivers/staging/ramster/cluster/nodemanager.h
@@ -0,0 +1,88 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * nodemanager.h
+ *
+ * Function prototypes
+ *
+ * Copyright (C) 2004 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ *
+ */
+
+#ifndef R2CLUSTER_NODEMANAGER_H
+#define R2CLUSTER_NODEMANAGER_H
+
+#include "ramster_nodemanager.h"
+
+/* This totally doesn't belong here. */
+#include <linux/configfs.h>
+#include <linux/rbtree.h>
+
+enum r2nm_fence_method {
+ R2NM_FENCE_RESET = 0,
+ R2NM_FENCE_PANIC,
+ R2NM_FENCE_METHODS, /* Number of fence methods */
+};
+
+struct r2nm_node {
+ spinlock_t nd_lock;
+ struct config_item nd_item;
+ char nd_name[R2NM_MAX_NAME_LEN+1]; /* replace? */
+ __u8 nd_num;
+ /* only one address per node, as attributes, for now. */
+ __be32 nd_ipv4_address;
+ __be16 nd_ipv4_port;
+ struct rb_node nd_ip_node;
+ /* there can be only one local node for now */
+ int nd_local;
+
+ unsigned long nd_set_attributes;
+};
+
+struct r2nm_cluster {
+ struct config_group cl_group;
+ unsigned cl_has_local:1;
+ u8 cl_local_node;
+ rwlock_t cl_nodes_lock;
+ struct r2nm_node *cl_nodes[R2NM_MAX_NODES];
+ struct rb_root cl_node_ip_tree;
+ unsigned int cl_idle_timeout_ms;
+ unsigned int cl_keepalive_delay_ms;
+ unsigned int cl_reconnect_delay_ms;
+ enum r2nm_fence_method cl_fence_method;
+
+ /* part of a hack for disk bitmap.. will go eventually. - zab */
+ unsigned long cl_nodes_bitmap[BITS_TO_LONGS(R2NM_MAX_NODES)];
+};
+
+extern struct r2nm_cluster *r2nm_single_cluster;
+
+u8 r2nm_this_node(void);
+
+int r2nm_configured_node_map(unsigned long *map, unsigned bytes);
+struct r2nm_node *r2nm_get_node_by_num(u8 node_num);
+struct r2nm_node *r2nm_get_node_by_ip(__be32 addr);
+void r2nm_node_get(struct r2nm_node *node);
+void r2nm_node_put(struct r2nm_node *node);
+
+int r2nm_depend_item(struct config_item *item);
+void r2nm_undepend_item(struct config_item *item);
+int r2nm_depend_this_node(void);
+void r2nm_undepend_this_node(void);
+
+#endif /* R2CLUSTER_NODEMANAGER_H */
diff --git a/drivers/staging/ramster/cluster/ramster_nodemanager.h b/drivers/staging/ramster/cluster/ramster_nodemanager.h
new file mode 100644
index 000000000000..49f879d943ab
--- /dev/null
+++ b/drivers/staging/ramster/cluster/ramster_nodemanager.h
@@ -0,0 +1,39 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * ramster_nodemanager.h
+ *
+ * Header describing the interface between userspace and the kernel
+ * for the ramster_nodemanager module.
+ *
+ * Copyright (C) 2002, 2004, 2012 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ *
+ */
+
+#ifndef _RAMSTER_NODEMANAGER_H
+#define _RAMSTER_NODEMANAGER_H
+
+#define R2NM_API_VERSION 5
+
+#define R2NM_MAX_NODES 255
+#define R2NM_INVALID_NODE_NUM 255
+
+/* host name, group name, cluster name all 64 bytes */
+#define R2NM_MAX_NAME_LEN 64 /* __NEW_UTS_LEN */
+
+#endif /* _RAMSTER_NODEMANAGER_H */
diff --git a/drivers/staging/ramster/cluster/tcp.c b/drivers/staging/ramster/cluster/tcp.c
new file mode 100644
index 000000000000..3af1b2c51b78
--- /dev/null
+++ b/drivers/staging/ramster/cluster/tcp.c
@@ -0,0 +1,2256 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ *
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * Copyright (C) 2004 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ *
+ * ----
+ *
+ * Callers for this were originally written against a very simple synchronus
+ * API. This implementation reflects those simple callers. Some day I'm sure
+ * we'll need to move to a more robust posting/callback mechanism.
+ *
+ * Transmit calls pass in kernel virtual addresses and block copying this into
+ * the socket's tx buffers via a usual blocking sendmsg. They'll block waiting
+ * for a failed socket to timeout. TX callers can also pass in a poniter to an
+ * 'int' which gets filled with an errno off the wire in response to the
+ * message they send.
+ *
+ * Handlers for unsolicited messages are registered. Each socket has a page
+ * that incoming data is copied into. First the header, then the data.
+ * Handlers are called from only one thread with a reference to this per-socket
+ * page. This page is destroyed after the handler call, so it can't be
+ * referenced beyond the call. Handlers may block but are discouraged from
+ * doing so.
+ *
+ * Any framing errors (bad magic, large payload lengths) close a connection.
+ *
+ * Our sock_container holds the state we associate with a socket. It's current
+ * framing state is held there as well as the refcounting we do around when it
+ * is safe to tear down the socket. The socket is only finally torn down from
+ * the container when the container loses all of its references -- so as long
+ * as you hold a ref on the container you can trust that the socket is valid
+ * for use with kernel socket APIs.
+ *
+ * Connections are initiated between a pair of nodes when the node with the
+ * higher node number gets a heartbeat callback which indicates that the lower
+ * numbered node has started heartbeating. The lower numbered node is passive
+ * and only accepts the connection if the higher numbered node is heartbeating.
+ */
+
+#include <linux/kernel.h>
+#include <linux/jiffies.h>
+#include <linux/slab.h>
+#include <linux/idr.h>
+#include <linux/kref.h>
+#include <linux/net.h>
+#include <linux/export.h>
+#include <linux/uaccess.h>
+#include <net/tcp.h>
+
+
+#include "heartbeat.h"
+#include "tcp.h"
+#include "nodemanager.h"
+#define MLOG_MASK_PREFIX ML_TCP
+#include "masklog.h"
+
+#include "tcp_internal.h"
+
+#define SC_NODEF_FMT "node %s (num %u) at %pI4:%u"
+
+/*
+ * In the following two log macros, the whitespace after the ',' just
+ * before ##args is intentional. Otherwise, gcc 2.95 will eat the
+ * previous token if args expands to nothing.
+ */
+#define msglog(hdr, fmt, args...) do { \
+ typeof(hdr) __hdr = (hdr); \
+ mlog(ML_MSG, "[mag %u len %u typ %u stat %d sys_stat %d " \
+ "key %08x num %u] " fmt, \
+ be16_to_cpu(__hdr->magic), be16_to_cpu(__hdr->data_len), \
+ be16_to_cpu(__hdr->msg_type), be32_to_cpu(__hdr->status), \
+ be32_to_cpu(__hdr->sys_status), be32_to_cpu(__hdr->key), \
+ be32_to_cpu(__hdr->msg_num) , ##args); \
+} while (0)
+
+#define sclog(sc, fmt, args...) do { \
+ typeof(sc) __sc = (sc); \
+ mlog(ML_SOCKET, "[sc %p refs %d sock %p node %u page %p " \
+ "pg_off %zu] " fmt, __sc, \
+ atomic_read(&__sc->sc_kref.refcount), __sc->sc_sock, \
+ __sc->sc_node->nd_num, __sc->sc_page, __sc->sc_page_off , \
+ ##args); \
+} while (0)
+
+static DEFINE_RWLOCK(r2net_handler_lock);
+static struct rb_root r2net_handler_tree = RB_ROOT;
+
+static struct r2net_node r2net_nodes[R2NM_MAX_NODES];
+
+/* XXX someday we'll need better accounting */
+static struct socket *r2net_listen_sock;
+
+/*
+ * listen work is only queued by the listening socket callbacks on the
+ * r2net_wq. teardown detaches the callbacks before destroying the workqueue.
+ * quorum work is queued as sock containers are shutdown.. stop_listening
+ * tears down all the node's sock containers, preventing future shutdowns
+ * and queued quroum work, before canceling delayed quorum work and
+ * destroying the work queue.
+ */
+static struct workqueue_struct *r2net_wq;
+static struct work_struct r2net_listen_work;
+
+static struct r2hb_callback_func r2net_hb_up, r2net_hb_down;
+#define R2NET_HB_PRI 0x1
+
+static struct r2net_handshake *r2net_hand;
+static struct r2net_msg *r2net_keep_req, *r2net_keep_resp;
+
+static int r2net_sys_err_translations[R2NET_ERR_MAX] = {
+ [R2NET_ERR_NONE] = 0,
+ [R2NET_ERR_NO_HNDLR] = -ENOPROTOOPT,
+ [R2NET_ERR_OVERFLOW] = -EOVERFLOW,
+ [R2NET_ERR_DIED] = -EHOSTDOWN,};
+
+/* can't quite avoid *all* internal declarations :/ */
+static void r2net_sc_connect_completed(struct work_struct *work);
+static void r2net_rx_until_empty(struct work_struct *work);
+static void r2net_shutdown_sc(struct work_struct *work);
+static void r2net_listen_data_ready(struct sock *sk, int bytes);
+static void r2net_sc_send_keep_req(struct work_struct *work);
+static void r2net_idle_timer(unsigned long data);
+static void r2net_sc_postpone_idle(struct r2net_sock_container *sc);
+static void r2net_sc_reset_idle_timer(struct r2net_sock_container *sc);
+
+#ifdef CONFIG_DEBUG_FS
+static void r2net_init_nst(struct r2net_send_tracking *nst, u32 msgtype,
+ u32 msgkey, struct task_struct *task, u8 node)
+{
+ INIT_LIST_HEAD(&nst->st_net_debug_item);
+ nst->st_task = task;
+ nst->st_msg_type = msgtype;
+ nst->st_msg_key = msgkey;
+ nst->st_node = node;
+}
+
+static inline void r2net_set_nst_sock_time(struct r2net_send_tracking *nst)
+{
+ nst->st_sock_time = ktime_get();
+}
+
+static inline void r2net_set_nst_send_time(struct r2net_send_tracking *nst)
+{
+ nst->st_send_time = ktime_get();
+}
+
+static inline void r2net_set_nst_status_time(struct r2net_send_tracking *nst)
+{
+ nst->st_status_time = ktime_get();
+}
+
+static inline void r2net_set_nst_sock_container(struct r2net_send_tracking *nst,
+ struct r2net_sock_container *sc)
+{
+ nst->st_sc = sc;
+}
+
+static inline void r2net_set_nst_msg_id(struct r2net_send_tracking *nst,
+ u32 msg_id)
+{
+ nst->st_id = msg_id;
+}
+
+static inline void r2net_set_sock_timer(struct r2net_sock_container *sc)
+{
+ sc->sc_tv_timer = ktime_get();
+}
+
+static inline void r2net_set_data_ready_time(struct r2net_sock_container *sc)
+{
+ sc->sc_tv_data_ready = ktime_get();
+}
+
+static inline void r2net_set_advance_start_time(struct r2net_sock_container *sc)
+{
+ sc->sc_tv_advance_start = ktime_get();
+}
+
+static inline void r2net_set_advance_stop_time(struct r2net_sock_container *sc)
+{
+ sc->sc_tv_advance_stop = ktime_get();
+}
+
+static inline void r2net_set_func_start_time(struct r2net_sock_container *sc)
+{
+ sc->sc_tv_func_start = ktime_get();
+}
+
+static inline void r2net_set_func_stop_time(struct r2net_sock_container *sc)
+{
+ sc->sc_tv_func_stop = ktime_get();
+}
+
+#else /* CONFIG_DEBUG_FS */
+# define r2net_init_nst(a, b, c, d, e)
+# define r2net_set_nst_sock_time(a)
+# define r2net_set_nst_send_time(a)
+# define r2net_set_nst_status_time(a)
+# define r2net_set_nst_sock_container(a, b)
+# define r2net_set_nst_msg_id(a, b)
+# define r2net_set_sock_timer(a)
+# define r2net_set_data_ready_time(a)
+# define r2net_set_advance_start_time(a)
+# define r2net_set_advance_stop_time(a)
+# define r2net_set_func_start_time(a)
+# define r2net_set_func_stop_time(a)
+#endif /* CONFIG_DEBUG_FS */
+
+#ifdef CONFIG_RAMSTER_FS_STATS
+static ktime_t r2net_get_func_run_time(struct r2net_sock_container *sc)
+{
+ return ktime_sub(sc->sc_tv_func_stop, sc->sc_tv_func_start);
+}
+
+static void r2net_update_send_stats(struct r2net_send_tracking *nst,
+ struct r2net_sock_container *sc)
+{
+ sc->sc_tv_status_total = ktime_add(sc->sc_tv_status_total,
+ ktime_sub(ktime_get(),
+ nst->st_status_time));
+ sc->sc_tv_send_total = ktime_add(sc->sc_tv_send_total,
+ ktime_sub(nst->st_status_time,
+ nst->st_send_time));
+ sc->sc_tv_acquiry_total = ktime_add(sc->sc_tv_acquiry_total,
+ ktime_sub(nst->st_send_time,
+ nst->st_sock_time));
+ sc->sc_send_count++;
+}
+
+static void r2net_update_recv_stats(struct r2net_sock_container *sc)
+{
+ sc->sc_tv_process_total = ktime_add(sc->sc_tv_process_total,
+ r2net_get_func_run_time(sc));
+ sc->sc_recv_count++;
+}
+
+#else
+
+# define r2net_update_send_stats(a, b)
+
+# define r2net_update_recv_stats(sc)
+
+#endif /* CONFIG_RAMSTER_FS_STATS */
+
+static inline int r2net_reconnect_delay(void)
+{
+ return r2nm_single_cluster->cl_reconnect_delay_ms;
+}
+
+static inline int r2net_keepalive_delay(void)
+{
+ return r2nm_single_cluster->cl_keepalive_delay_ms;
+}
+
+static inline int r2net_idle_timeout(void)
+{
+ return r2nm_single_cluster->cl_idle_timeout_ms;
+}
+
+static inline int r2net_sys_err_to_errno(enum r2net_system_error err)
+{
+ int trans;
+ BUG_ON(err >= R2NET_ERR_MAX);
+ trans = r2net_sys_err_translations[err];
+
+ /* Just in case we mess up the translation table above */
+ BUG_ON(err != R2NET_ERR_NONE && trans == 0);
+ return trans;
+}
+
+struct r2net_node *r2net_nn_from_num(u8 node_num)
+{
+ BUG_ON(node_num >= ARRAY_SIZE(r2net_nodes));
+ return &r2net_nodes[node_num];
+}
+
+static u8 r2net_num_from_nn(struct r2net_node *nn)
+{
+ BUG_ON(nn == NULL);
+ return nn - r2net_nodes;
+}
+
+/* ------------------------------------------------------------ */
+
+static int r2net_prep_nsw(struct r2net_node *nn, struct r2net_status_wait *nsw)
+{
+ int ret = 0;
+
+ do {
+ if (!idr_pre_get(&nn->nn_status_idr, GFP_ATOMIC)) {
+ ret = -EAGAIN;
+ break;
+ }
+ spin_lock(&nn->nn_lock);
+ ret = idr_get_new(&nn->nn_status_idr, nsw, &nsw->ns_id);
+ if (ret == 0)
+ list_add_tail(&nsw->ns_node_item,
+ &nn->nn_status_list);
+ spin_unlock(&nn->nn_lock);
+ } while (ret == -EAGAIN);
+
+ if (ret == 0) {
+ init_waitqueue_head(&nsw->ns_wq);
+ nsw->ns_sys_status = R2NET_ERR_NONE;
+ nsw->ns_status = 0;
+ }
+
+ return ret;
+}
+
+static void r2net_complete_nsw_locked(struct r2net_node *nn,
+ struct r2net_status_wait *nsw,
+ enum r2net_system_error sys_status,
+ s32 status)
+{
+ assert_spin_locked(&nn->nn_lock);
+
+ if (!list_empty(&nsw->ns_node_item)) {
+ list_del_init(&nsw->ns_node_item);
+ nsw->ns_sys_status = sys_status;
+ nsw->ns_status = status;
+ idr_remove(&nn->nn_status_idr, nsw->ns_id);
+ wake_up(&nsw->ns_wq);
+ }
+}
+
+static void r2net_complete_nsw(struct r2net_node *nn,
+ struct r2net_status_wait *nsw,
+ u64 id, enum r2net_system_error sys_status,
+ s32 status)
+{
+ spin_lock(&nn->nn_lock);
+ if (nsw == NULL) {
+ if (id > INT_MAX)
+ goto out;
+
+ nsw = idr_find(&nn->nn_status_idr, id);
+ if (nsw == NULL)
+ goto out;
+ }
+
+ r2net_complete_nsw_locked(nn, nsw, sys_status, status);
+
+out:
+ spin_unlock(&nn->nn_lock);
+ return;
+}
+
+static void r2net_complete_nodes_nsw(struct r2net_node *nn)
+{
+ struct r2net_status_wait *nsw, *tmp;
+ unsigned int num_kills = 0;
+
+ assert_spin_locked(&nn->nn_lock);
+
+ list_for_each_entry_safe(nsw, tmp, &nn->nn_status_list, ns_node_item) {
+ r2net_complete_nsw_locked(nn, nsw, R2NET_ERR_DIED, 0);
+ num_kills++;
+ }
+
+ mlog(0, "completed %d messages for node %u\n", num_kills,
+ r2net_num_from_nn(nn));
+}
+
+static int r2net_nsw_completed(struct r2net_node *nn,
+ struct r2net_status_wait *nsw)
+{
+ int completed;
+ spin_lock(&nn->nn_lock);
+ completed = list_empty(&nsw->ns_node_item);
+ spin_unlock(&nn->nn_lock);
+ return completed;
+}
+
+/* ------------------------------------------------------------ */
+
+static void sc_kref_release(struct kref *kref)
+{
+ struct r2net_sock_container *sc = container_of(kref,
+ struct r2net_sock_container, sc_kref);
+ BUG_ON(timer_pending(&sc->sc_idle_timeout));
+
+ sclog(sc, "releasing\n");
+
+ if (sc->sc_sock) {
+ sock_release(sc->sc_sock);
+ sc->sc_sock = NULL;
+ }
+
+ r2nm_undepend_item(&sc->sc_node->nd_item);
+ r2nm_node_put(sc->sc_node);
+ sc->sc_node = NULL;
+
+ r2net_debug_del_sc(sc);
+ kfree(sc);
+}
+
+static void sc_put(struct r2net_sock_container *sc)
+{
+ sclog(sc, "put\n");
+ kref_put(&sc->sc_kref, sc_kref_release);
+}
+static void sc_get(struct r2net_sock_container *sc)
+{
+ sclog(sc, "get\n");
+ kref_get(&sc->sc_kref);
+}
+static struct r2net_sock_container *sc_alloc(struct r2nm_node *node)
+{
+ struct r2net_sock_container *sc, *ret = NULL;
+ struct page *page = NULL;
+ int status = 0;
+
+ page = alloc_page(GFP_NOFS);
+ sc = kzalloc(sizeof(*sc), GFP_NOFS);
+ if (sc == NULL || page == NULL)
+ goto out;
+
+ kref_init(&sc->sc_kref);
+ r2nm_node_get(node);
+ sc->sc_node = node;
+
+ /* pin the node item of the remote node */
+ status = r2nm_depend_item(&node->nd_item);
+ if (status) {
+ mlog_errno(status);
+ r2nm_node_put(node);
+ goto out;
+ }
+ INIT_WORK(&sc->sc_connect_work, r2net_sc_connect_completed);
+ INIT_WORK(&sc->sc_rx_work, r2net_rx_until_empty);
+ INIT_WORK(&sc->sc_shutdown_work, r2net_shutdown_sc);
+ INIT_DELAYED_WORK(&sc->sc_keepalive_work, r2net_sc_send_keep_req);
+
+ init_timer(&sc->sc_idle_timeout);
+ sc->sc_idle_timeout.function = r2net_idle_timer;
+ sc->sc_idle_timeout.data = (unsigned long)sc;
+
+ sclog(sc, "alloced\n");
+
+ ret = sc;
+ sc->sc_page = page;
+ r2net_debug_add_sc(sc);
+ sc = NULL;
+ page = NULL;
+
+out:
+ if (page)
+ __free_page(page);
+ kfree(sc);
+
+ return ret;
+}
+
+/* ------------------------------------------------------------ */
+
+static void r2net_sc_queue_work(struct r2net_sock_container *sc,
+ struct work_struct *work)
+{
+ sc_get(sc);
+ if (!queue_work(r2net_wq, work))
+ sc_put(sc);
+}
+static void r2net_sc_queue_delayed_work(struct r2net_sock_container *sc,
+ struct delayed_work *work,
+ int delay)
+{
+ sc_get(sc);
+ if (!queue_delayed_work(r2net_wq, work, delay))
+ sc_put(sc);
+}
+static void r2net_sc_cancel_delayed_work(struct r2net_sock_container *sc,
+ struct delayed_work *work)
+{
+ if (cancel_delayed_work(work))
+ sc_put(sc);
+}
+
+static atomic_t r2net_connected_peers = ATOMIC_INIT(0);
+
+int r2net_num_connected_peers(void)
+{
+ return atomic_read(&r2net_connected_peers);
+}
+
+static void r2net_set_nn_state(struct r2net_node *nn,
+ struct r2net_sock_container *sc,
+ unsigned valid, int err)
+{
+ int was_valid = nn->nn_sc_valid;
+ int was_err = nn->nn_persistent_error;
+ struct r2net_sock_container *old_sc = nn->nn_sc;
+
+ assert_spin_locked(&nn->nn_lock);
+
+ if (old_sc && !sc)
+ atomic_dec(&r2net_connected_peers);
+ else if (!old_sc && sc)
+ atomic_inc(&r2net_connected_peers);
+
+ /* the node num comparison and single connect/accept path should stop
+ * an non-null sc from being overwritten with another */
+ BUG_ON(sc && nn->nn_sc && nn->nn_sc != sc);
+ mlog_bug_on_msg(err && valid, "err %d valid %u\n", err, valid);
+ mlog_bug_on_msg(valid && !sc, "valid %u sc %p\n", valid, sc);
+
+ if (was_valid && !valid && err == 0)
+ err = -ENOTCONN;
+
+ mlog(ML_CONN, "node %u sc: %p -> %p, valid %u -> %u, err %d -> %d\n",
+ r2net_num_from_nn(nn), nn->nn_sc, sc, nn->nn_sc_valid, valid,
+ nn->nn_persistent_error, err);
+
+ nn->nn_sc = sc;
+ nn->nn_sc_valid = valid ? 1 : 0;
+ nn->nn_persistent_error = err;
+
+ /* mirrors r2net_tx_can_proceed() */
+ if (nn->nn_persistent_error || nn->nn_sc_valid)
+ wake_up(&nn->nn_sc_wq);
+
+ if (!was_err && nn->nn_persistent_error) {
+ queue_delayed_work(r2net_wq, &nn->nn_still_up,
+ msecs_to_jiffies(R2NET_QUORUM_DELAY_MS));
+ }
+
+ if (was_valid && !valid) {
+ printk(KERN_NOTICE "ramster: No longer connected to "
+ SC_NODEF_FMT "\n",
+ old_sc->sc_node->nd_name, old_sc->sc_node->nd_num,
+ &old_sc->sc_node->nd_ipv4_address,
+ ntohs(old_sc->sc_node->nd_ipv4_port));
+ r2net_complete_nodes_nsw(nn);
+ }
+
+ if (!was_valid && valid) {
+ cancel_delayed_work(&nn->nn_connect_expired);
+ printk(KERN_NOTICE "ramster: %s " SC_NODEF_FMT "\n",
+ r2nm_this_node() > sc->sc_node->nd_num ?
+ "Connected to" : "Accepted connection from",
+ sc->sc_node->nd_name, sc->sc_node->nd_num,
+ &sc->sc_node->nd_ipv4_address,
+ ntohs(sc->sc_node->nd_ipv4_port));
+ }
+
+ /* trigger the connecting worker func as long as we're not valid,
+ * it will back off if it shouldn't connect. This can be called
+ * from node config teardown and so needs to be careful about
+ * the work queue actually being up. */
+ if (!valid && r2net_wq) {
+ unsigned long delay;
+ /* delay if we're within a RECONNECT_DELAY of the
+ * last attempt */
+ delay = (nn->nn_last_connect_attempt +
+ msecs_to_jiffies(r2net_reconnect_delay()))
+ - jiffies;
+ if (delay > msecs_to_jiffies(r2net_reconnect_delay()))
+ delay = 0;
+ mlog(ML_CONN, "queueing conn attempt in %lu jiffies\n", delay);
+ queue_delayed_work(r2net_wq, &nn->nn_connect_work, delay);
+
+ /*
+ * Delay the expired work after idle timeout.
+ *
+ * We might have lots of failed connection attempts that run
+ * through here but we only cancel the connect_expired work when
+ * a connection attempt succeeds. So only the first enqueue of
+ * the connect_expired work will do anything. The rest will see
+ * that it's already queued and do nothing.
+ */
+ delay += msecs_to_jiffies(r2net_idle_timeout());
+ queue_delayed_work(r2net_wq, &nn->nn_connect_expired, delay);
+ }
+
+ /* keep track of the nn's sc ref for the caller */
+ if ((old_sc == NULL) && sc)
+ sc_get(sc);
+ if (old_sc && (old_sc != sc)) {
+ r2net_sc_queue_work(old_sc, &old_sc->sc_shutdown_work);
+ sc_put(old_sc);
+ }
+}
+
+/* see r2net_register_callbacks() */
+static void r2net_data_ready(struct sock *sk, int bytes)
+{
+ void (*ready)(struct sock *sk, int bytes);
+
+ read_lock(&sk->sk_callback_lock);
+ if (sk->sk_user_data) {
+ struct r2net_sock_container *sc = sk->sk_user_data;
+ sclog(sc, "data_ready hit\n");
+ r2net_set_data_ready_time(sc);
+ r2net_sc_queue_work(sc, &sc->sc_rx_work);
+ ready = sc->sc_data_ready;
+ } else {
+ ready = sk->sk_data_ready;
+ }
+ read_unlock(&sk->sk_callback_lock);
+
+ ready(sk, bytes);
+}
+
+/* see r2net_register_callbacks() */
+static void r2net_state_change(struct sock *sk)
+{
+ void (*state_change)(struct sock *sk);
+ struct r2net_sock_container *sc;
+
+ read_lock(&sk->sk_callback_lock);
+ sc = sk->sk_user_data;
+ if (sc == NULL) {
+ state_change = sk->sk_state_change;
+ goto out;
+ }
+
+ sclog(sc, "state_change to %d\n", sk->sk_state);
+
+ state_change = sc->sc_state_change;
+
+ switch (sk->sk_state) {
+
+ /* ignore connecting sockets as they make progress */
+ case TCP_SYN_SENT:
+ case TCP_SYN_RECV:
+ break;
+ case TCP_ESTABLISHED:
+ r2net_sc_queue_work(sc, &sc->sc_connect_work);
+ break;
+ default:
+ printk(KERN_INFO "ramster: Connection to "
+ SC_NODEF_FMT " shutdown, state %d\n",
+ sc->sc_node->nd_name, sc->sc_node->nd_num,
+ &sc->sc_node->nd_ipv4_address,
+ ntohs(sc->sc_node->nd_ipv4_port), sk->sk_state);
+ r2net_sc_queue_work(sc, &sc->sc_shutdown_work);
+ break;
+
+ }
+out:
+ read_unlock(&sk->sk_callback_lock);
+ state_change(sk);
+}
+
+/*
+ * we register callbacks so we can queue work on events before calling
+ * the original callbacks. our callbacks our careful to test user_data
+ * to discover when they've reaced with r2net_unregister_callbacks().
+ */
+static void r2net_register_callbacks(struct sock *sk,
+ struct r2net_sock_container *sc)
+{
+ write_lock_bh(&sk->sk_callback_lock);
+
+ /* accepted sockets inherit the old listen socket data ready */
+ if (sk->sk_data_ready == r2net_listen_data_ready) {
+ sk->sk_data_ready = sk->sk_user_data;
+ sk->sk_user_data = NULL;
+ }
+
+ BUG_ON(sk->sk_user_data != NULL);
+ sk->sk_user_data = sc;
+ sc_get(sc);
+
+ sc->sc_data_ready = sk->sk_data_ready;
+ sc->sc_state_change = sk->sk_state_change;
+ sk->sk_data_ready = r2net_data_ready;
+ sk->sk_state_change = r2net_state_change;
+
+ mutex_init(&sc->sc_send_lock);
+
+ write_unlock_bh(&sk->sk_callback_lock);
+}
+
+static int r2net_unregister_callbacks(struct sock *sk,
+ struct r2net_sock_container *sc)
+{
+ int ret = 0;
+
+ write_lock_bh(&sk->sk_callback_lock);
+ if (sk->sk_user_data == sc) {
+ ret = 1;
+ sk->sk_user_data = NULL;
+ sk->sk_data_ready = sc->sc_data_ready;
+ sk->sk_state_change = sc->sc_state_change;
+ }
+ write_unlock_bh(&sk->sk_callback_lock);
+
+ return ret;
+}
+
+/*
+ * this is a little helper that is called by callers who have seen a problem
+ * with an sc and want to detach it from the nn if someone already hasn't beat
+ * them to it. if an error is given then the shutdown will be persistent
+ * and pending transmits will be canceled.
+ */
+static void r2net_ensure_shutdown(struct r2net_node *nn,
+ struct r2net_sock_container *sc,
+ int err)
+{
+ spin_lock(&nn->nn_lock);
+ if (nn->nn_sc == sc)
+ r2net_set_nn_state(nn, NULL, 0, err);
+ spin_unlock(&nn->nn_lock);
+}
+
+/*
+ * This work queue function performs the blocking parts of socket shutdown. A
+ * few paths lead here. set_nn_state will trigger this callback if it sees an
+ * sc detached from the nn. state_change will also trigger this callback
+ * directly when it sees errors. In that case we need to call set_nn_state
+ * ourselves as state_change couldn't get the nn_lock and call set_nn_state
+ * itself.
+ */
+static void r2net_shutdown_sc(struct work_struct *work)
+{
+ struct r2net_sock_container *sc =
+ container_of(work, struct r2net_sock_container,
+ sc_shutdown_work);
+ struct r2net_node *nn = r2net_nn_from_num(sc->sc_node->nd_num);
+
+ sclog(sc, "shutting down\n");
+
+ /* drop the callbacks ref and call shutdown only once */
+ if (r2net_unregister_callbacks(sc->sc_sock->sk, sc)) {
+ /* we shouldn't flush as we're in the thread, the
+ * races with pending sc work structs are harmless */
+ del_timer_sync(&sc->sc_idle_timeout);
+ r2net_sc_cancel_delayed_work(sc, &sc->sc_keepalive_work);
+ sc_put(sc);
+ kernel_sock_shutdown(sc->sc_sock, SHUT_RDWR);
+ }
+
+ /* not fatal so failed connects before the other guy has our
+ * heartbeat can be retried */
+ r2net_ensure_shutdown(nn, sc, 0);
+ sc_put(sc);
+}
+
+/* ------------------------------------------------------------ */
+
+static int r2net_handler_cmp(struct r2net_msg_handler *nmh, u32 msg_type,
+ u32 key)
+{
+ int ret = memcmp(&nmh->nh_key, &key, sizeof(key));
+
+ if (ret == 0)
+ ret = memcmp(&nmh->nh_msg_type, &msg_type, sizeof(msg_type));
+
+ return ret;
+}
+
+static struct r2net_msg_handler *
+r2net_handler_tree_lookup(u32 msg_type, u32 key, struct rb_node ***ret_p,
+ struct rb_node **ret_parent)
+{
+ struct rb_node **p = &r2net_handler_tree.rb_node;
+ struct rb_node *parent = NULL;
+ struct r2net_msg_handler *nmh, *ret = NULL;
+ int cmp;
+
+ while (*p) {
+ parent = *p;
+ nmh = rb_entry(parent, struct r2net_msg_handler, nh_node);
+ cmp = r2net_handler_cmp(nmh, msg_type, key);
+
+ if (cmp < 0)
+ p = &(*p)->rb_left;
+ else if (cmp > 0)
+ p = &(*p)->rb_right;
+ else {
+ ret = nmh;
+ break;
+ }
+ }
+
+ if (ret_p != NULL)
+ *ret_p = p;
+ if (ret_parent != NULL)
+ *ret_parent = parent;
+
+ return ret;
+}
+
+static void r2net_handler_kref_release(struct kref *kref)
+{
+ struct r2net_msg_handler *nmh;
+ nmh = container_of(kref, struct r2net_msg_handler, nh_kref);
+
+ kfree(nmh);
+}
+
+static void r2net_handler_put(struct r2net_msg_handler *nmh)
+{
+ kref_put(&nmh->nh_kref, r2net_handler_kref_release);
+}
+
+/* max_len is protection for the handler func. incoming messages won't
+ * be given to the handler if their payload is longer than the max. */
+int r2net_register_handler(u32 msg_type, u32 key, u32 max_len,
+ r2net_msg_handler_func *func, void *data,
+ r2net_post_msg_handler_func *post_func,
+ struct list_head *unreg_list)
+{
+ struct r2net_msg_handler *nmh = NULL;
+ struct rb_node **p, *parent;
+ int ret = 0;
+
+ if (max_len > R2NET_MAX_PAYLOAD_BYTES) {
+ mlog(0, "max_len for message handler out of range: %u\n",
+ max_len);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!msg_type) {
+ mlog(0, "no message type provided: %u, %p\n", msg_type, func);
+ ret = -EINVAL;
+ goto out;
+
+ }
+ if (!func) {
+ mlog(0, "no message handler provided: %u, %p\n",
+ msg_type, func);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ nmh = kzalloc(sizeof(struct r2net_msg_handler), GFP_NOFS);
+ if (nmh == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ nmh->nh_func = func;
+ nmh->nh_func_data = data;
+ nmh->nh_post_func = post_func;
+ nmh->nh_msg_type = msg_type;
+ nmh->nh_max_len = max_len;
+ nmh->nh_key = key;
+ /* the tree and list get this ref.. they're both removed in
+ * unregister when this ref is dropped */
+ kref_init(&nmh->nh_kref);
+ INIT_LIST_HEAD(&nmh->nh_unregister_item);
+
+ write_lock(&r2net_handler_lock);
+ if (r2net_handler_tree_lookup(msg_type, key, &p, &parent))
+ ret = -EEXIST;
+ else {
+ rb_link_node(&nmh->nh_node, parent, p);
+ rb_insert_color(&nmh->nh_node, &r2net_handler_tree);
+ list_add_tail(&nmh->nh_unregister_item, unreg_list);
+
+ mlog(ML_TCP, "registered handler func %p type %u key %08x\n",
+ func, msg_type, key);
+ /* we've had some trouble with handlers seemingly vanishing. */
+ mlog_bug_on_msg(r2net_handler_tree_lookup(msg_type, key, &p,
+ &parent) == NULL,
+ "couldn't find handler we *just* registered "
+ "for type %u key %08x\n", msg_type, key);
+ }
+ write_unlock(&r2net_handler_lock);
+ if (ret)
+ goto out;
+
+out:
+ if (ret)
+ kfree(nmh);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(r2net_register_handler);
+
+void r2net_unregister_handler_list(struct list_head *list)
+{
+ struct r2net_msg_handler *nmh, *n;
+
+ write_lock(&r2net_handler_lock);
+ list_for_each_entry_safe(nmh, n, list, nh_unregister_item) {
+ mlog(ML_TCP, "unregistering handler func %p type %u key %08x\n",
+ nmh->nh_func, nmh->nh_msg_type, nmh->nh_key);
+ rb_erase(&nmh->nh_node, &r2net_handler_tree);
+ list_del_init(&nmh->nh_unregister_item);
+ kref_put(&nmh->nh_kref, r2net_handler_kref_release);
+ }
+ write_unlock(&r2net_handler_lock);
+}
+EXPORT_SYMBOL_GPL(r2net_unregister_handler_list);
+
+static struct r2net_msg_handler *r2net_handler_get(u32 msg_type, u32 key)
+{
+ struct r2net_msg_handler *nmh;
+
+ read_lock(&r2net_handler_lock);
+ nmh = r2net_handler_tree_lookup(msg_type, key, NULL, NULL);
+ if (nmh)
+ kref_get(&nmh->nh_kref);
+ read_unlock(&r2net_handler_lock);
+
+ return nmh;
+}
+
+/* ------------------------------------------------------------ */
+
+static int r2net_recv_tcp_msg(struct socket *sock, void *data, size_t len)
+{
+ int ret;
+ mm_segment_t oldfs;
+ struct kvec vec = {
+ .iov_len = len,
+ .iov_base = data,
+ };
+ struct msghdr msg = {
+ .msg_iovlen = 1,
+ .msg_iov = (struct iovec *)&vec,
+ .msg_flags = MSG_DONTWAIT,
+ };
+
+ oldfs = get_fs();
+ set_fs(get_ds());
+ ret = sock_recvmsg(sock, &msg, len, msg.msg_flags);
+ set_fs(oldfs);
+
+ return ret;
+}
+
+static int r2net_send_tcp_msg(struct socket *sock, struct kvec *vec,
+ size_t veclen, size_t total)
+{
+ int ret;
+ mm_segment_t oldfs;
+ struct msghdr msg = {
+ .msg_iov = (struct iovec *)vec,
+ .msg_iovlen = veclen,
+ };
+
+ if (sock == NULL) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ oldfs = get_fs();
+ set_fs(get_ds());
+ ret = sock_sendmsg(sock, &msg, total);
+ set_fs(oldfs);
+ if (ret != total) {
+ mlog(ML_ERROR, "sendmsg returned %d instead of %zu\n", ret,
+ total);
+ if (ret >= 0)
+ ret = -EPIPE; /* should be smarter, I bet */
+ goto out;
+ }
+
+ ret = 0;
+out:
+ if (ret < 0)
+ mlog(0, "returning error: %d\n", ret);
+ return ret;
+}
+
+static void r2net_sendpage(struct r2net_sock_container *sc,
+ void *kmalloced_virt,
+ size_t size)
+{
+ struct r2net_node *nn = r2net_nn_from_num(sc->sc_node->nd_num);
+ ssize_t ret;
+
+ while (1) {
+ mutex_lock(&sc->sc_send_lock);
+ ret = sc->sc_sock->ops->sendpage(sc->sc_sock,
+ virt_to_page(kmalloced_virt),
+ (long)kmalloced_virt & ~PAGE_MASK,
+ size, MSG_DONTWAIT);
+ mutex_unlock(&sc->sc_send_lock);
+ if (ret == size)
+ break;
+ if (ret == (ssize_t)-EAGAIN) {
+ mlog(0, "sendpage of size %zu to " SC_NODEF_FMT
+ " returned EAGAIN\n", size, sc->sc_node->nd_name,
+ sc->sc_node->nd_num,
+ &sc->sc_node->nd_ipv4_address,
+ ntohs(sc->sc_node->nd_ipv4_port));
+ cond_resched();
+ continue;
+ }
+ mlog(ML_ERROR, "sendpage of size %zu to " SC_NODEF_FMT
+ " failed with %zd\n", size, sc->sc_node->nd_name,
+ sc->sc_node->nd_num, &sc->sc_node->nd_ipv4_address,
+ ntohs(sc->sc_node->nd_ipv4_port), ret);
+ r2net_ensure_shutdown(nn, sc, 0);
+ break;
+ }
+}
+
+static void r2net_init_msg(struct r2net_msg *msg, u16 data_len,
+ u16 msg_type, u32 key)
+{
+ memset(msg, 0, sizeof(struct r2net_msg));
+ msg->magic = cpu_to_be16(R2NET_MSG_MAGIC);
+ msg->data_len = cpu_to_be16(data_len);
+ msg->msg_type = cpu_to_be16(msg_type);
+ msg->sys_status = cpu_to_be32(R2NET_ERR_NONE);
+ msg->status = 0;
+ msg->key = cpu_to_be32(key);
+}
+
+static int r2net_tx_can_proceed(struct r2net_node *nn,
+ struct r2net_sock_container **sc_ret,
+ int *error)
+{
+ int ret = 0;
+
+ spin_lock(&nn->nn_lock);
+ if (nn->nn_persistent_error) {
+ ret = 1;
+ *sc_ret = NULL;
+ *error = nn->nn_persistent_error;
+ } else if (nn->nn_sc_valid) {
+ kref_get(&nn->nn_sc->sc_kref);
+
+ ret = 1;
+ *sc_ret = nn->nn_sc;
+ *error = 0;
+ }
+ spin_unlock(&nn->nn_lock);
+
+ return ret;
+}
+
+/* Get a map of all nodes to which this node is currently connected to */
+void r2net_fill_node_map(unsigned long *map, unsigned bytes)
+{
+ struct r2net_sock_container *sc;
+ int node, ret;
+
+ BUG_ON(bytes < (BITS_TO_LONGS(R2NM_MAX_NODES) * sizeof(unsigned long)));
+
+ memset(map, 0, bytes);
+ for (node = 0; node < R2NM_MAX_NODES; ++node) {
+ r2net_tx_can_proceed(r2net_nn_from_num(node), &sc, &ret);
+ if (!ret) {
+ set_bit(node, map);
+ sc_put(sc);
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(r2net_fill_node_map);
+
+int r2net_send_message_vec(u32 msg_type, u32 key, struct kvec *caller_vec,
+ size_t caller_veclen, u8 target_node, int *status)
+{
+ int ret = 0;
+ struct r2net_msg *msg = NULL;
+ size_t veclen, caller_bytes = 0;
+ struct kvec *vec = NULL;
+ struct r2net_sock_container *sc = NULL;
+ struct r2net_node *nn = r2net_nn_from_num(target_node);
+ struct r2net_status_wait nsw = {
+ .ns_node_item = LIST_HEAD_INIT(nsw.ns_node_item),
+ };
+ struct r2net_send_tracking nst;
+
+ /* this may be a general bug fix */
+ init_waitqueue_head(&nsw.ns_wq);
+
+ r2net_init_nst(&nst, msg_type, key, current, target_node);
+
+ if (r2net_wq == NULL) {
+ mlog(0, "attempt to tx without r2netd running\n");
+ ret = -ESRCH;
+ goto out;
+ }
+
+ if (caller_veclen == 0) {
+ mlog(0, "bad kvec array length\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ caller_bytes = iov_length((struct iovec *)caller_vec, caller_veclen);
+ if (caller_bytes > R2NET_MAX_PAYLOAD_BYTES) {
+ mlog(0, "total payload len %zu too large\n", caller_bytes);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (target_node == r2nm_this_node()) {
+ ret = -ELOOP;
+ goto out;
+ }
+
+ r2net_debug_add_nst(&nst);
+
+ r2net_set_nst_sock_time(&nst);
+
+ wait_event(nn->nn_sc_wq, r2net_tx_can_proceed(nn, &sc, &ret));
+ if (ret)
+ goto out;
+
+ r2net_set_nst_sock_container(&nst, sc);
+
+ veclen = caller_veclen + 1;
+ vec = kmalloc(sizeof(struct kvec) * veclen, GFP_ATOMIC);
+ if (vec == NULL) {
+ mlog(0, "failed to %zu element kvec!\n", veclen);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ msg = kmalloc(sizeof(struct r2net_msg), GFP_ATOMIC);
+ if (!msg) {
+ mlog(0, "failed to allocate a r2net_msg!\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ r2net_init_msg(msg, caller_bytes, msg_type, key);
+
+ vec[0].iov_len = sizeof(struct r2net_msg);
+ vec[0].iov_base = msg;
+ memcpy(&vec[1], caller_vec, caller_veclen * sizeof(struct kvec));
+
+ ret = r2net_prep_nsw(nn, &nsw);
+ if (ret)
+ goto out;
+
+ msg->msg_num = cpu_to_be32(nsw.ns_id);
+ r2net_set_nst_msg_id(&nst, nsw.ns_id);
+
+ r2net_set_nst_send_time(&nst);
+
+ /* finally, convert the message header to network byte-order
+ * and send */
+ mutex_lock(&sc->sc_send_lock);
+ ret = r2net_send_tcp_msg(sc->sc_sock, vec, veclen,
+ sizeof(struct r2net_msg) + caller_bytes);
+ mutex_unlock(&sc->sc_send_lock);
+ msglog(msg, "sending returned %d\n", ret);
+ if (ret < 0) {
+ mlog(0, "error returned from r2net_send_tcp_msg=%d\n", ret);
+ goto out;
+ }
+
+ /* wait on other node's handler */
+ r2net_set_nst_status_time(&nst);
+ wait_event(nsw.ns_wq, r2net_nsw_completed(nn, &nsw));
+
+ r2net_update_send_stats(&nst, sc);
+
+ /* Note that we avoid overwriting the callers status return
+ * variable if a system error was reported on the other
+ * side. Callers beware. */
+ ret = r2net_sys_err_to_errno(nsw.ns_sys_status);
+ if (status && !ret)
+ *status = nsw.ns_status;
+
+ mlog(0, "woken, returning system status %d, user status %d\n",
+ ret, nsw.ns_status);
+out:
+ r2net_debug_del_nst(&nst); /* must be before dropping sc and node */
+ if (sc)
+ sc_put(sc);
+ kfree(vec);
+ kfree(msg);
+ r2net_complete_nsw(nn, &nsw, 0, 0, 0);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(r2net_send_message_vec);
+
+int r2net_send_message(u32 msg_type, u32 key, void *data, u32 len,
+ u8 target_node, int *status)
+{
+ struct kvec vec = {
+ .iov_base = data,
+ .iov_len = len,
+ };
+ return r2net_send_message_vec(msg_type, key, &vec, 1,
+ target_node, status);
+}
+EXPORT_SYMBOL_GPL(r2net_send_message);
+
+static int r2net_send_status_magic(struct socket *sock, struct r2net_msg *hdr,
+ enum r2net_system_error syserr, int err)
+{
+ struct kvec vec = {
+ .iov_base = hdr,
+ .iov_len = sizeof(struct r2net_msg),
+ };
+
+ BUG_ON(syserr >= R2NET_ERR_MAX);
+
+ /* leave other fields intact from the incoming message, msg_num
+ * in particular */
+ hdr->sys_status = cpu_to_be32(syserr);
+ hdr->status = cpu_to_be32(err);
+ /* twiddle the magic */
+ hdr->magic = cpu_to_be16(R2NET_MSG_STATUS_MAGIC);
+ hdr->data_len = 0;
+
+ msglog(hdr, "about to send status magic %d\n", err);
+ /* hdr has been in host byteorder this whole time */
+ return r2net_send_tcp_msg(sock, &vec, 1, sizeof(struct r2net_msg));
+}
+
+/*
+ * "data magic" is a long version of "status magic" where the message
+ * payload actually contains data to be passed in reply to certain messages
+ */
+static int r2net_send_data_magic(struct r2net_sock_container *sc,
+ struct r2net_msg *hdr,
+ void *data, size_t data_len,
+ enum r2net_system_error syserr, int err)
+{
+ struct kvec vec[2];
+ int ret;
+
+ vec[0].iov_base = hdr;
+ vec[0].iov_len = sizeof(struct r2net_msg);
+ vec[1].iov_base = data;
+ vec[1].iov_len = data_len;
+
+ BUG_ON(syserr >= R2NET_ERR_MAX);
+
+ /* leave other fields intact from the incoming message, msg_num
+ * in particular */
+ hdr->sys_status = cpu_to_be32(syserr);
+ hdr->status = cpu_to_be32(err);
+ hdr->magic = cpu_to_be16(R2NET_MSG_DATA_MAGIC); /* twiddle magic */
+ hdr->data_len = cpu_to_be16(data_len);
+
+ msglog(hdr, "about to send data magic %d\n", err);
+ /* hdr has been in host byteorder this whole time */
+ ret = r2net_send_tcp_msg(sc->sc_sock, vec, 2,
+ sizeof(struct r2net_msg) + data_len);
+ return ret;
+}
+
+/*
+ * called by a message handler to convert an otherwise normal reply
+ * message into a "data magic" message
+ */
+void r2net_force_data_magic(struct r2net_msg *hdr, u16 msgtype, u32 msgkey)
+{
+ hdr->magic = cpu_to_be16(R2NET_MSG_DATA_MAGIC);
+ hdr->msg_type = cpu_to_be16(msgtype);
+ hdr->key = cpu_to_be32(msgkey);
+}
+
+/* this returns -errno if the header was unknown or too large, etc.
+ * after this is called the buffer us reused for the next message */
+static int r2net_process_message(struct r2net_sock_container *sc,
+ struct r2net_msg *hdr)
+{
+ struct r2net_node *nn = r2net_nn_from_num(sc->sc_node->nd_num);
+ int ret = 0, handler_status;
+ enum r2net_system_error syserr;
+ struct r2net_msg_handler *nmh = NULL;
+ void *ret_data = NULL;
+ int data_magic = 0;
+
+ msglog(hdr, "processing message\n");
+
+ r2net_sc_postpone_idle(sc);
+
+ switch (be16_to_cpu(hdr->magic)) {
+
+ case R2NET_MSG_STATUS_MAGIC:
+ /* special type for returning message status */
+ r2net_complete_nsw(nn, NULL, be32_to_cpu(hdr->msg_num),
+ be32_to_cpu(hdr->sys_status),
+ be32_to_cpu(hdr->status));
+ goto out;
+ case R2NET_MSG_KEEP_REQ_MAGIC:
+ r2net_sendpage(sc, r2net_keep_resp, sizeof(*r2net_keep_resp));
+ goto out;
+ case R2NET_MSG_KEEP_RESP_MAGIC:
+ goto out;
+ case R2NET_MSG_MAGIC:
+ break;
+ case R2NET_MSG_DATA_MAGIC:
+ /*
+ * unlike a normal status magic, a data magic DOES
+ * (MUST) have a handler, so the control flow is
+ * a little funky here as a result
+ */
+ data_magic = 1;
+ break;
+ default:
+ msglog(hdr, "bad magic\n");
+ ret = -EINVAL;
+ goto out;
+ break;
+ }
+
+ /* find a handler for it */
+ handler_status = 0;
+ nmh = r2net_handler_get(be16_to_cpu(hdr->msg_type),
+ be32_to_cpu(hdr->key));
+ if (!nmh) {
+ mlog(ML_TCP, "couldn't find handler for type %u key %08x\n",
+ be16_to_cpu(hdr->msg_type), be32_to_cpu(hdr->key));
+ syserr = R2NET_ERR_NO_HNDLR;
+ goto out_respond;
+ }
+
+ syserr = R2NET_ERR_NONE;
+
+ if (be16_to_cpu(hdr->data_len) > nmh->nh_max_len)
+ syserr = R2NET_ERR_OVERFLOW;
+
+ if (syserr != R2NET_ERR_NONE)
+ goto out_respond;
+
+ r2net_set_func_start_time(sc);
+ sc->sc_msg_key = be32_to_cpu(hdr->key);
+ sc->sc_msg_type = be16_to_cpu(hdr->msg_type);
+ handler_status = (nmh->nh_func)(hdr, sizeof(struct r2net_msg) +
+ be16_to_cpu(hdr->data_len),
+ nmh->nh_func_data, &ret_data);
+ if (data_magic) {
+ /*
+ * handler handled data sent in reply to request
+ * so complete the transaction
+ */
+ r2net_complete_nsw(nn, NULL, be32_to_cpu(hdr->msg_num),
+ be32_to_cpu(hdr->sys_status), handler_status);
+ goto out;
+ }
+ /*
+ * handler changed magic to DATA_MAGIC to reply to request for data,
+ * implies ret_data points to data to return and handler_status
+ * is the number of bytes of data
+ */
+ if (be16_to_cpu(hdr->magic) == R2NET_MSG_DATA_MAGIC) {
+ ret = r2net_send_data_magic(sc, hdr,
+ ret_data, handler_status,
+ syserr, 0);
+ hdr = NULL;
+ mlog(0, "sending data reply %d, syserr %d returned %d\n",
+ handler_status, syserr, ret);
+ r2net_set_func_stop_time(sc);
+
+ r2net_update_recv_stats(sc);
+ goto out;
+ }
+ r2net_set_func_stop_time(sc);
+
+ r2net_update_recv_stats(sc);
+
+out_respond:
+ /* this destroys the hdr, so don't use it after this */
+ mutex_lock(&sc->sc_send_lock);
+ ret = r2net_send_status_magic(sc->sc_sock, hdr, syserr,
+ handler_status);
+ mutex_unlock(&sc->sc_send_lock);
+ hdr = NULL;
+ mlog(0, "sending handler status %d, syserr %d returned %d\n",
+ handler_status, syserr, ret);
+
+ if (nmh) {
+ BUG_ON(ret_data != NULL && nmh->nh_post_func == NULL);
+ if (nmh->nh_post_func)
+ (nmh->nh_post_func)(handler_status, nmh->nh_func_data,
+ ret_data);
+ }
+
+out:
+ if (nmh)
+ r2net_handler_put(nmh);
+ return ret;
+}
+
+static int r2net_check_handshake(struct r2net_sock_container *sc)
+{
+ struct r2net_handshake *hand = page_address(sc->sc_page);
+ struct r2net_node *nn = r2net_nn_from_num(sc->sc_node->nd_num);
+
+ if (hand->protocol_version != cpu_to_be64(R2NET_PROTOCOL_VERSION)) {
+ printk(KERN_NOTICE "ramster: " SC_NODEF_FMT " Advertised net "
+ "protocol version %llu but %llu is required. "
+ "Disconnecting.\n", sc->sc_node->nd_name,
+ sc->sc_node->nd_num, &sc->sc_node->nd_ipv4_address,
+ ntohs(sc->sc_node->nd_ipv4_port),
+ (unsigned long long)be64_to_cpu(hand->protocol_version),
+ R2NET_PROTOCOL_VERSION);
+
+ /* don't bother reconnecting if its the wrong version. */
+ r2net_ensure_shutdown(nn, sc, -ENOTCONN);
+ return -1;
+ }
+
+ /*
+ * Ensure timeouts are consistent with other nodes, otherwise
+ * we can end up with one node thinking that the other must be down,
+ * but isn't. This can ultimately cause corruption.
+ */
+ if (be32_to_cpu(hand->r2net_idle_timeout_ms) !=
+ r2net_idle_timeout()) {
+ printk(KERN_NOTICE "ramster: " SC_NODEF_FMT " uses a network "
+ "idle timeout of %u ms, but we use %u ms locally. "
+ "Disconnecting.\n", sc->sc_node->nd_name,
+ sc->sc_node->nd_num, &sc->sc_node->nd_ipv4_address,
+ ntohs(sc->sc_node->nd_ipv4_port),
+ be32_to_cpu(hand->r2net_idle_timeout_ms),
+ r2net_idle_timeout());
+ r2net_ensure_shutdown(nn, sc, -ENOTCONN);
+ return -1;
+ }
+
+ if (be32_to_cpu(hand->r2net_keepalive_delay_ms) !=
+ r2net_keepalive_delay()) {
+ printk(KERN_NOTICE "ramster: " SC_NODEF_FMT " uses a keepalive "
+ "delay of %u ms, but we use %u ms locally. "
+ "Disconnecting.\n", sc->sc_node->nd_name,
+ sc->sc_node->nd_num, &sc->sc_node->nd_ipv4_address,
+ ntohs(sc->sc_node->nd_ipv4_port),
+ be32_to_cpu(hand->r2net_keepalive_delay_ms),
+ r2net_keepalive_delay());
+ r2net_ensure_shutdown(nn, sc, -ENOTCONN);
+ return -1;
+ }
+
+ if (be32_to_cpu(hand->r2hb_heartbeat_timeout_ms) !=
+ R2HB_MAX_WRITE_TIMEOUT_MS) {
+ printk(KERN_NOTICE "ramster: " SC_NODEF_FMT " uses a heartbeat "
+ "timeout of %u ms, but we use %u ms locally. "
+ "Disconnecting.\n", sc->sc_node->nd_name,
+ sc->sc_node->nd_num, &sc->sc_node->nd_ipv4_address,
+ ntohs(sc->sc_node->nd_ipv4_port),
+ be32_to_cpu(hand->r2hb_heartbeat_timeout_ms),
+ R2HB_MAX_WRITE_TIMEOUT_MS);
+ r2net_ensure_shutdown(nn, sc, -ENOTCONN);
+ return -1;
+ }
+
+ sc->sc_handshake_ok = 1;
+
+ spin_lock(&nn->nn_lock);
+ /* set valid and queue the idle timers only if it hasn't been
+ * shut down already */
+ if (nn->nn_sc == sc) {
+ r2net_sc_reset_idle_timer(sc);
+ atomic_set(&nn->nn_timeout, 0);
+ r2net_set_nn_state(nn, sc, 1, 0);
+ }
+ spin_unlock(&nn->nn_lock);
+
+ /* shift everything up as though it wasn't there */
+ sc->sc_page_off -= sizeof(struct r2net_handshake);
+ if (sc->sc_page_off)
+ memmove(hand, hand + 1, sc->sc_page_off);
+
+ return 0;
+}
+
+/* this demuxes the queued rx bytes into header or payload bits and calls
+ * handlers as each full message is read off the socket. it returns -error,
+ * == 0 eof, or > 0 for progress made.*/
+static int r2net_advance_rx(struct r2net_sock_container *sc)
+{
+ struct r2net_msg *hdr;
+ int ret = 0;
+ void *data;
+ size_t datalen;
+
+ sclog(sc, "receiving\n");
+ r2net_set_advance_start_time(sc);
+
+ if (unlikely(sc->sc_handshake_ok == 0)) {
+ if (sc->sc_page_off < sizeof(struct r2net_handshake)) {
+ data = page_address(sc->sc_page) + sc->sc_page_off;
+ datalen = sizeof(struct r2net_handshake) -
+ sc->sc_page_off;
+ ret = r2net_recv_tcp_msg(sc->sc_sock, data, datalen);
+ if (ret > 0)
+ sc->sc_page_off += ret;
+ }
+
+ if (sc->sc_page_off == sizeof(struct r2net_handshake)) {
+ r2net_check_handshake(sc);
+ if (unlikely(sc->sc_handshake_ok == 0))
+ ret = -EPROTO;
+ }
+ goto out;
+ }
+
+ /* do we need more header? */
+ if (sc->sc_page_off < sizeof(struct r2net_msg)) {
+ data = page_address(sc->sc_page) + sc->sc_page_off;
+ datalen = sizeof(struct r2net_msg) - sc->sc_page_off;
+ ret = r2net_recv_tcp_msg(sc->sc_sock, data, datalen);
+ if (ret > 0) {
+ sc->sc_page_off += ret;
+ /* only swab incoming here.. we can
+ * only get here once as we cross from
+ * being under to over */
+ if (sc->sc_page_off == sizeof(struct r2net_msg)) {
+ hdr = page_address(sc->sc_page);
+ if (be16_to_cpu(hdr->data_len) >
+ R2NET_MAX_PAYLOAD_BYTES)
+ ret = -EOVERFLOW;
+ }
+ }
+ if (ret <= 0)
+ goto out;
+ }
+
+ if (sc->sc_page_off < sizeof(struct r2net_msg)) {
+ /* oof, still don't have a header */
+ goto out;
+ }
+
+ /* this was swabbed above when we first read it */
+ hdr = page_address(sc->sc_page);
+
+ msglog(hdr, "at page_off %zu\n", sc->sc_page_off);
+
+ /* do we need more payload? */
+ if (sc->sc_page_off - sizeof(struct r2net_msg) <
+ be16_to_cpu(hdr->data_len)) {
+ /* need more payload */
+ data = page_address(sc->sc_page) + sc->sc_page_off;
+ datalen = (sizeof(struct r2net_msg) +
+ be16_to_cpu(hdr->data_len)) -
+ sc->sc_page_off;
+ ret = r2net_recv_tcp_msg(sc->sc_sock, data, datalen);
+ if (ret > 0)
+ sc->sc_page_off += ret;
+ if (ret <= 0)
+ goto out;
+ }
+
+ if (sc->sc_page_off - sizeof(struct r2net_msg) ==
+ be16_to_cpu(hdr->data_len)) {
+ /* we can only get here once, the first time we read
+ * the payload.. so set ret to progress if the handler
+ * works out. after calling this the message is toast */
+ ret = r2net_process_message(sc, hdr);
+ if (ret == 0)
+ ret = 1;
+ sc->sc_page_off = 0;
+ }
+
+out:
+ sclog(sc, "ret = %d\n", ret);
+ r2net_set_advance_stop_time(sc);
+ return ret;
+}
+
+/* this work func is triggerd by data ready. it reads until it can read no
+ * more. it interprets 0, eof, as fatal. if data_ready hits while we're doing
+ * our work the work struct will be marked and we'll be called again. */
+static void r2net_rx_until_empty(struct work_struct *work)
+{
+ struct r2net_sock_container *sc =
+ container_of(work, struct r2net_sock_container, sc_rx_work);
+ int ret;
+
+ do {
+ ret = r2net_advance_rx(sc);
+ } while (ret > 0);
+
+ if (ret <= 0 && ret != -EAGAIN) {
+ struct r2net_node *nn = r2net_nn_from_num(sc->sc_node->nd_num);
+ sclog(sc, "saw error %d, closing\n", ret);
+ /* not permanent so read failed handshake can retry */
+ r2net_ensure_shutdown(nn, sc, 0);
+ }
+
+ sc_put(sc);
+}
+
+static int r2net_set_nodelay(struct socket *sock)
+{
+ int ret, val = 1;
+ mm_segment_t oldfs;
+
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+
+ /*
+ * Dear unsuspecting programmer,
+ *
+ * Don't use sock_setsockopt() for SOL_TCP. It doesn't check its level
+ * argument and assumes SOL_SOCKET so, say, your TCP_NODELAY will
+ * silently turn into SO_DEBUG.
+ *
+ * Yours,
+ * Keeper of hilariously fragile interfaces.
+ */
+ ret = sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY,
+ (char __user *)&val, sizeof(val));
+
+ set_fs(oldfs);
+ return ret;
+}
+
+static void r2net_initialize_handshake(void)
+{
+ r2net_hand->r2hb_heartbeat_timeout_ms = cpu_to_be32(
+ R2HB_MAX_WRITE_TIMEOUT_MS);
+ r2net_hand->r2net_idle_timeout_ms = cpu_to_be32(r2net_idle_timeout());
+ r2net_hand->r2net_keepalive_delay_ms = cpu_to_be32(
+ r2net_keepalive_delay());
+ r2net_hand->r2net_reconnect_delay_ms = cpu_to_be32(
+ r2net_reconnect_delay());
+}
+
+/* ------------------------------------------------------------ */
+
+/* called when a connect completes and after a sock is accepted. the
+ * rx path will see the response and mark the sc valid */
+static void r2net_sc_connect_completed(struct work_struct *work)
+{
+ struct r2net_sock_container *sc =
+ container_of(work, struct r2net_sock_container,
+ sc_connect_work);
+
+ mlog(ML_MSG, "sc sending handshake with ver %llu id %llx\n",
+ (unsigned long long)R2NET_PROTOCOL_VERSION,
+ (unsigned long long)be64_to_cpu(r2net_hand->connector_id));
+
+ r2net_initialize_handshake();
+ r2net_sendpage(sc, r2net_hand, sizeof(*r2net_hand));
+ sc_put(sc);
+}
+
+/* this is called as a work_struct func. */
+static void r2net_sc_send_keep_req(struct work_struct *work)
+{
+ struct r2net_sock_container *sc =
+ container_of(work, struct r2net_sock_container,
+ sc_keepalive_work.work);
+
+ r2net_sendpage(sc, r2net_keep_req, sizeof(*r2net_keep_req));
+ sc_put(sc);
+}
+
+/* socket shutdown does a del_timer_sync against this as it tears down.
+ * we can't start this timer until we've got to the point in sc buildup
+ * where shutdown is going to be involved */
+static void r2net_idle_timer(unsigned long data)
+{
+ struct r2net_sock_container *sc = (struct r2net_sock_container *)data;
+#ifdef CONFIG_DEBUG_FS
+ unsigned long msecs = ktime_to_ms(ktime_get()) -
+ ktime_to_ms(sc->sc_tv_timer);
+#else
+ unsigned long msecs = r2net_idle_timeout();
+#endif
+
+ printk(KERN_NOTICE "ramster: Connection to " SC_NODEF_FMT " has been "
+ "idle for %lu.%lu secs, shutting it down.\n",
+ sc->sc_node->nd_name, sc->sc_node->nd_num,
+ &sc->sc_node->nd_ipv4_address, ntohs(sc->sc_node->nd_ipv4_port),
+ msecs / 1000, msecs % 1000);
+
+ /*
+ * Initialize the nn_timeout so that the next connection attempt
+ * will continue in r2net_start_connect.
+ */
+ /* Avoid spurious shutdowns... not sure if this is still necessary */
+ pr_err("ramster_idle_timer, skipping shutdown work\n");
+#if 0
+ /* old code used to do these two lines */
+ atomic_set(&nn->nn_timeout, 1);
+ r2net_sc_queue_work(sc, &sc->sc_shutdown_work);
+#endif
+}
+
+static void r2net_sc_reset_idle_timer(struct r2net_sock_container *sc)
+{
+ r2net_sc_cancel_delayed_work(sc, &sc->sc_keepalive_work);
+ r2net_sc_queue_delayed_work(sc, &sc->sc_keepalive_work,
+ msecs_to_jiffies(r2net_keepalive_delay()));
+ r2net_set_sock_timer(sc);
+ mod_timer(&sc->sc_idle_timeout,
+ jiffies + msecs_to_jiffies(r2net_idle_timeout()));
+}
+
+static void r2net_sc_postpone_idle(struct r2net_sock_container *sc)
+{
+ /* Only push out an existing timer */
+ if (timer_pending(&sc->sc_idle_timeout))
+ r2net_sc_reset_idle_timer(sc);
+}
+
+/* this work func is kicked whenever a path sets the nn state which doesn't
+ * have valid set. This includes seeing hb come up, losing a connection,
+ * having a connect attempt fail, etc. This centralizes the logic which decides
+ * if a connect attempt should be made or if we should give up and all future
+ * transmit attempts should fail */
+static void r2net_start_connect(struct work_struct *work)
+{
+ struct r2net_node *nn =
+ container_of(work, struct r2net_node, nn_connect_work.work);
+ struct r2net_sock_container *sc = NULL;
+ struct r2nm_node *node = NULL, *mynode = NULL;
+ struct socket *sock = NULL;
+ struct sockaddr_in myaddr = {0, }, remoteaddr = {0, };
+ int ret = 0, stop;
+ unsigned int timeout;
+
+ /* if we're greater we initiate tx, otherwise we accept */
+ if (r2nm_this_node() <= r2net_num_from_nn(nn))
+ goto out;
+
+ /* watch for racing with tearing a node down */
+ node = r2nm_get_node_by_num(r2net_num_from_nn(nn));
+ if (node == NULL) {
+ ret = 0;
+ goto out;
+ }
+
+ mynode = r2nm_get_node_by_num(r2nm_this_node());
+ if (mynode == NULL) {
+ ret = 0;
+ goto out;
+ }
+
+ spin_lock(&nn->nn_lock);
+ /*
+ * see if we already have one pending or have given up.
+ * For nn_timeout, it is set when we close the connection
+ * because of the idle time out. So it means that we have
+ * at least connected to that node successfully once,
+ * now try to connect to it again.
+ */
+ timeout = atomic_read(&nn->nn_timeout);
+ stop = (nn->nn_sc ||
+ (nn->nn_persistent_error &&
+ (nn->nn_persistent_error != -ENOTCONN || timeout == 0)));
+ spin_unlock(&nn->nn_lock);
+ if (stop)
+ goto out;
+
+ nn->nn_last_connect_attempt = jiffies;
+
+ sc = sc_alloc(node);
+ if (sc == NULL) {
+ mlog(0, "couldn't allocate sc\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = sock_create(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
+ if (ret < 0) {
+ mlog(0, "can't create socket: %d\n", ret);
+ goto out;
+ }
+ sc->sc_sock = sock; /* freed by sc_kref_release */
+
+ sock->sk->sk_allocation = GFP_ATOMIC;
+
+ myaddr.sin_family = AF_INET;
+ myaddr.sin_addr.s_addr = mynode->nd_ipv4_address;
+ myaddr.sin_port = htons(0); /* any port */
+
+ ret = sock->ops->bind(sock, (struct sockaddr *)&myaddr,
+ sizeof(myaddr));
+ if (ret) {
+ mlog(ML_ERROR, "bind failed with %d at address %pI4\n",
+ ret, &mynode->nd_ipv4_address);
+ goto out;
+ }
+
+ ret = r2net_set_nodelay(sc->sc_sock);
+ if (ret) {
+ mlog(ML_ERROR, "setting TCP_NODELAY failed with %d\n", ret);
+ goto out;
+ }
+
+ r2net_register_callbacks(sc->sc_sock->sk, sc);
+
+ spin_lock(&nn->nn_lock);
+ /* handshake completion will set nn->nn_sc_valid */
+ r2net_set_nn_state(nn, sc, 0, 0);
+ spin_unlock(&nn->nn_lock);
+
+ remoteaddr.sin_family = AF_INET;
+ remoteaddr.sin_addr.s_addr = node->nd_ipv4_address;
+ remoteaddr.sin_port = node->nd_ipv4_port;
+
+ ret = sc->sc_sock->ops->connect(sc->sc_sock,
+ (struct sockaddr *)&remoteaddr,
+ sizeof(remoteaddr),
+ O_NONBLOCK);
+ if (ret == -EINPROGRESS)
+ ret = 0;
+
+out:
+ if (ret) {
+ printk(KERN_NOTICE "ramster: Connect attempt to " SC_NODEF_FMT
+ " failed with errno %d\n", sc->sc_node->nd_name,
+ sc->sc_node->nd_num, &sc->sc_node->nd_ipv4_address,
+ ntohs(sc->sc_node->nd_ipv4_port), ret);
+ /* 0 err so that another will be queued and attempted
+ * from set_nn_state */
+ if (sc)
+ r2net_ensure_shutdown(nn, sc, 0);
+ }
+ if (sc)
+ sc_put(sc);
+ if (node)
+ r2nm_node_put(node);
+ if (mynode)
+ r2nm_node_put(mynode);
+
+ return;
+}
+
+static void r2net_connect_expired(struct work_struct *work)
+{
+ struct r2net_node *nn =
+ container_of(work, struct r2net_node, nn_connect_expired.work);
+
+ spin_lock(&nn->nn_lock);
+ if (!nn->nn_sc_valid) {
+ printk(KERN_NOTICE "ramster: No connection established with "
+ "node %u after %u.%u seconds, giving up.\n",
+ r2net_num_from_nn(nn),
+ r2net_idle_timeout() / 1000,
+ r2net_idle_timeout() % 1000);
+
+ r2net_set_nn_state(nn, NULL, 0, -ENOTCONN);
+ }
+ spin_unlock(&nn->nn_lock);
+}
+
+static void r2net_still_up(struct work_struct *work)
+{
+}
+
+/* ------------------------------------------------------------ */
+
+void r2net_disconnect_node(struct r2nm_node *node)
+{
+ struct r2net_node *nn = r2net_nn_from_num(node->nd_num);
+
+ /* don't reconnect until it's heartbeating again */
+ spin_lock(&nn->nn_lock);
+ atomic_set(&nn->nn_timeout, 0);
+ r2net_set_nn_state(nn, NULL, 0, -ENOTCONN);
+ spin_unlock(&nn->nn_lock);
+
+ if (r2net_wq) {
+ cancel_delayed_work(&nn->nn_connect_expired);
+ cancel_delayed_work(&nn->nn_connect_work);
+ cancel_delayed_work(&nn->nn_still_up);
+ flush_workqueue(r2net_wq);
+ }
+}
+
+static void r2net_hb_node_down_cb(struct r2nm_node *node, int node_num,
+ void *data)
+{
+ if (!node)
+ return;
+
+ if (node_num != r2nm_this_node())
+ r2net_disconnect_node(node);
+
+ BUG_ON(atomic_read(&r2net_connected_peers) < 0);
+}
+
+static void r2net_hb_node_up_cb(struct r2nm_node *node, int node_num,
+ void *data)
+{
+ struct r2net_node *nn = r2net_nn_from_num(node_num);
+
+ BUG_ON(!node);
+
+ /* ensure an immediate connect attempt */
+ nn->nn_last_connect_attempt = jiffies -
+ (msecs_to_jiffies(r2net_reconnect_delay()) + 1);
+
+ if (node_num != r2nm_this_node()) {
+ /* believe it or not, accept and node hearbeating testing
+ * can succeed for this node before we got here.. so
+ * only use set_nn_state to clear the persistent error
+ * if that hasn't already happened */
+ spin_lock(&nn->nn_lock);
+ atomic_set(&nn->nn_timeout, 0);
+ if (nn->nn_persistent_error)
+ r2net_set_nn_state(nn, NULL, 0, 0);
+ spin_unlock(&nn->nn_lock);
+ }
+}
+
+void r2net_unregister_hb_callbacks(void)
+{
+ r2hb_unregister_callback(NULL, &r2net_hb_up);
+ r2hb_unregister_callback(NULL, &r2net_hb_down);
+}
+
+int r2net_register_hb_callbacks(void)
+{
+ int ret;
+
+ r2hb_setup_callback(&r2net_hb_down, R2HB_NODE_DOWN_CB,
+ r2net_hb_node_down_cb, NULL, R2NET_HB_PRI);
+ r2hb_setup_callback(&r2net_hb_up, R2HB_NODE_UP_CB,
+ r2net_hb_node_up_cb, NULL, R2NET_HB_PRI);
+
+ ret = r2hb_register_callback(NULL, &r2net_hb_up);
+ if (ret == 0)
+ ret = r2hb_register_callback(NULL, &r2net_hb_down);
+
+ if (ret)
+ r2net_unregister_hb_callbacks();
+
+ return ret;
+}
+
+/* ------------------------------------------------------------ */
+
+static int r2net_accept_one(struct socket *sock)
+{
+ int ret, slen;
+ struct sockaddr_in sin;
+ struct socket *new_sock = NULL;
+ struct r2nm_node *node = NULL;
+ struct r2nm_node *local_node = NULL;
+ struct r2net_sock_container *sc = NULL;
+ struct r2net_node *nn;
+
+ BUG_ON(sock == NULL);
+ ret = sock_create_lite(sock->sk->sk_family, sock->sk->sk_type,
+ sock->sk->sk_protocol, &new_sock);
+ if (ret)
+ goto out;
+
+ new_sock->type = sock->type;
+ new_sock->ops = sock->ops;
+ ret = sock->ops->accept(sock, new_sock, O_NONBLOCK);
+ if (ret < 0)
+ goto out;
+
+ new_sock->sk->sk_allocation = GFP_ATOMIC;
+
+ ret = r2net_set_nodelay(new_sock);
+ if (ret) {
+ mlog(ML_ERROR, "setting TCP_NODELAY failed with %d\n", ret);
+ goto out;
+ }
+
+ slen = sizeof(sin);
+ ret = new_sock->ops->getname(new_sock, (struct sockaddr *) &sin,
+ &slen, 1);
+ if (ret < 0)
+ goto out;
+
+ node = r2nm_get_node_by_ip(sin.sin_addr.s_addr);
+ if (node == NULL) {
+ printk(KERN_NOTICE "ramster: Attempt to connect from unknown "
+ "node at %pI4:%d\n", &sin.sin_addr.s_addr,
+ ntohs(sin.sin_port));
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (r2nm_this_node() >= node->nd_num) {
+ local_node = r2nm_get_node_by_num(r2nm_this_node());
+ printk(KERN_NOTICE "ramster: Unexpected connect attempt seen "
+ "at node '%s' (%u, %pI4:%d) from node '%s' (%u, "
+ "%pI4:%d)\n", local_node->nd_name, local_node->nd_num,
+ &(local_node->nd_ipv4_address),
+ ntohs(local_node->nd_ipv4_port), node->nd_name,
+ node->nd_num, &sin.sin_addr.s_addr, ntohs(sin.sin_port));
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* this happens all the time when the other node sees our heartbeat
+ * and tries to connect before we see their heartbeat */
+ if (!r2hb_check_node_heartbeating_from_callback(node->nd_num)) {
+ mlog(ML_CONN, "attempt to connect from node '%s' at "
+ "%pI4:%d but it isn't heartbeating\n",
+ node->nd_name, &sin.sin_addr.s_addr,
+ ntohs(sin.sin_port));
+ ret = -EINVAL;
+ goto out;
+ }
+
+ nn = r2net_nn_from_num(node->nd_num);
+
+ spin_lock(&nn->nn_lock);
+ if (nn->nn_sc)
+ ret = -EBUSY;
+ else
+ ret = 0;
+ spin_unlock(&nn->nn_lock);
+ if (ret) {
+ printk(KERN_NOTICE "ramster: Attempt to connect from node '%s' "
+ "at %pI4:%d but it already has an open connection\n",
+ node->nd_name, &sin.sin_addr.s_addr,
+ ntohs(sin.sin_port));
+ goto out;
+ }
+
+ sc = sc_alloc(node);
+ if (sc == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ sc->sc_sock = new_sock;
+ new_sock = NULL;
+
+ spin_lock(&nn->nn_lock);
+ atomic_set(&nn->nn_timeout, 0);
+ r2net_set_nn_state(nn, sc, 0, 0);
+ spin_unlock(&nn->nn_lock);
+
+ r2net_register_callbacks(sc->sc_sock->sk, sc);
+ r2net_sc_queue_work(sc, &sc->sc_rx_work);
+
+ r2net_initialize_handshake();
+ r2net_sendpage(sc, r2net_hand, sizeof(*r2net_hand));
+
+out:
+ if (new_sock)
+ sock_release(new_sock);
+ if (node)
+ r2nm_node_put(node);
+ if (local_node)
+ r2nm_node_put(local_node);
+ if (sc)
+ sc_put(sc);
+ return ret;
+}
+
+static void r2net_accept_many(struct work_struct *work)
+{
+ struct socket *sock = r2net_listen_sock;
+ while (r2net_accept_one(sock) == 0)
+ cond_resched();
+}
+
+static void r2net_listen_data_ready(struct sock *sk, int bytes)
+{
+ void (*ready)(struct sock *sk, int bytes);
+
+ read_lock(&sk->sk_callback_lock);
+ ready = sk->sk_user_data;
+ if (ready == NULL) { /* check for teardown race */
+ ready = sk->sk_data_ready;
+ goto out;
+ }
+
+ /* ->sk_data_ready is also called for a newly established child socket
+ * before it has been accepted and the acceptor has set up their
+ * data_ready.. we only want to queue listen work for our listening
+ * socket */
+ if (sk->sk_state == TCP_LISTEN) {
+ mlog(ML_TCP, "bytes: %d\n", bytes);
+ queue_work(r2net_wq, &r2net_listen_work);
+ }
+
+out:
+ read_unlock(&sk->sk_callback_lock);
+ ready(sk, bytes);
+}
+
+static int r2net_open_listening_sock(__be32 addr, __be16 port)
+{
+ struct socket *sock = NULL;
+ int ret;
+ struct sockaddr_in sin = {
+ .sin_family = PF_INET,
+ .sin_addr = { .s_addr = addr },
+ .sin_port = port,
+ };
+
+ ret = sock_create(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
+ if (ret < 0) {
+ printk(KERN_ERR "ramster: Error %d while creating socket\n",
+ ret);
+ goto out;
+ }
+
+ sock->sk->sk_allocation = GFP_ATOMIC;
+
+ write_lock_bh(&sock->sk->sk_callback_lock);
+ sock->sk->sk_user_data = sock->sk->sk_data_ready;
+ sock->sk->sk_data_ready = r2net_listen_data_ready;
+ write_unlock_bh(&sock->sk->sk_callback_lock);
+
+ r2net_listen_sock = sock;
+ INIT_WORK(&r2net_listen_work, r2net_accept_many);
+
+ sock->sk->sk_reuse = 1;
+ ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin));
+ if (ret < 0) {
+ printk(KERN_ERR "ramster: Error %d while binding socket at "
+ "%pI4:%u\n", ret, &addr, ntohs(port));
+ goto out;
+ }
+
+ ret = sock->ops->listen(sock, 64);
+ if (ret < 0)
+ printk(KERN_ERR "ramster: Error %d while listening on %pI4:%u\n",
+ ret, &addr, ntohs(port));
+
+out:
+ if (ret) {
+ r2net_listen_sock = NULL;
+ if (sock)
+ sock_release(sock);
+ }
+ return ret;
+}
+
+/*
+ * called from node manager when we should bring up our network listening
+ * socket. node manager handles all the serialization to only call this
+ * once and to match it with r2net_stop_listening(). note,
+ * r2nm_this_node() doesn't work yet as we're being called while it
+ * is being set up.
+ */
+int r2net_start_listening(struct r2nm_node *node)
+{
+ int ret = 0;
+
+ BUG_ON(r2net_wq != NULL);
+ BUG_ON(r2net_listen_sock != NULL);
+
+ mlog(ML_KTHREAD, "starting r2net thread...\n");
+ r2net_wq = create_singlethread_workqueue("r2net");
+ if (r2net_wq == NULL) {
+ mlog(ML_ERROR, "unable to launch r2net thread\n");
+ return -ENOMEM; /* ? */
+ }
+
+ ret = r2net_open_listening_sock(node->nd_ipv4_address,
+ node->nd_ipv4_port);
+ if (ret) {
+ destroy_workqueue(r2net_wq);
+ r2net_wq = NULL;
+ }
+
+ return ret;
+}
+
+/* again, r2nm_this_node() doesn't work here as we're involved in
+ * tearing it down */
+void r2net_stop_listening(struct r2nm_node *node)
+{
+ struct socket *sock = r2net_listen_sock;
+ size_t i;
+
+ BUG_ON(r2net_wq == NULL);
+ BUG_ON(r2net_listen_sock == NULL);
+
+ /* stop the listening socket from generating work */
+ write_lock_bh(&sock->sk->sk_callback_lock);
+ sock->sk->sk_data_ready = sock->sk->sk_user_data;
+ sock->sk->sk_user_data = NULL;
+ write_unlock_bh(&sock->sk->sk_callback_lock);
+
+ for (i = 0; i < ARRAY_SIZE(r2net_nodes); i++) {
+ struct r2nm_node *node = r2nm_get_node_by_num(i);
+ if (node) {
+ r2net_disconnect_node(node);
+ r2nm_node_put(node);
+ }
+ }
+
+ /* finish all work and tear down the work queue */
+ mlog(ML_KTHREAD, "waiting for r2net thread to exit....\n");
+ destroy_workqueue(r2net_wq);
+ r2net_wq = NULL;
+
+ sock_release(r2net_listen_sock);
+ r2net_listen_sock = NULL;
+}
+
+void r2net_hb_node_up_manual(int node_num)
+{
+ struct r2nm_node dummy;
+ if (r2nm_single_cluster == NULL)
+ pr_err("ramster: cluster not alive, node_up_manual ignored\n");
+ else {
+ r2hb_manual_set_node_heartbeating(node_num);
+ r2net_hb_node_up_cb(&dummy, node_num, NULL);
+ }
+}
+
+/* ------------------------------------------------------------ */
+
+int r2net_init(void)
+{
+ unsigned long i;
+
+ if (r2net_debugfs_init())
+ return -ENOMEM;
+
+ r2net_hand = kzalloc(sizeof(struct r2net_handshake), GFP_KERNEL);
+ r2net_keep_req = kzalloc(sizeof(struct r2net_msg), GFP_KERNEL);
+ r2net_keep_resp = kzalloc(sizeof(struct r2net_msg), GFP_KERNEL);
+ if (!r2net_hand || !r2net_keep_req || !r2net_keep_resp) {
+ kfree(r2net_hand);
+ kfree(r2net_keep_req);
+ kfree(r2net_keep_resp);
+ return -ENOMEM;
+ }
+
+ r2net_hand->protocol_version = cpu_to_be64(R2NET_PROTOCOL_VERSION);
+ r2net_hand->connector_id = cpu_to_be64(1);
+
+ r2net_keep_req->magic = cpu_to_be16(R2NET_MSG_KEEP_REQ_MAGIC);
+ r2net_keep_resp->magic = cpu_to_be16(R2NET_MSG_KEEP_RESP_MAGIC);
+
+ for (i = 0; i < ARRAY_SIZE(r2net_nodes); i++) {
+ struct r2net_node *nn = r2net_nn_from_num(i);
+
+ atomic_set(&nn->nn_timeout, 0);
+ spin_lock_init(&nn->nn_lock);
+ INIT_DELAYED_WORK(&nn->nn_connect_work, r2net_start_connect);
+ INIT_DELAYED_WORK(&nn->nn_connect_expired,
+ r2net_connect_expired);
+ INIT_DELAYED_WORK(&nn->nn_still_up, r2net_still_up);
+ /* until we see hb from a node we'll return einval */
+ nn->nn_persistent_error = -ENOTCONN;
+ init_waitqueue_head(&nn->nn_sc_wq);
+ idr_init(&nn->nn_status_idr);
+ INIT_LIST_HEAD(&nn->nn_status_list);
+ }
+
+ return 0;
+}
+
+void r2net_exit(void)
+{
+ kfree(r2net_hand);
+ kfree(r2net_keep_req);
+ kfree(r2net_keep_resp);
+ r2net_debugfs_exit();
+}
diff --git a/drivers/staging/ramster/cluster/tcp.h b/drivers/staging/ramster/cluster/tcp.h
new file mode 100644
index 000000000000..9d05833452b5
--- /dev/null
+++ b/drivers/staging/ramster/cluster/tcp.h
@@ -0,0 +1,159 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * tcp.h
+ *
+ * Function prototypes
+ *
+ * Copyright (C) 2004 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ *
+ */
+
+#ifndef R2CLUSTER_TCP_H
+#define R2CLUSTER_TCP_H
+
+#include <linux/socket.h>
+#ifdef __KERNEL__
+#include <net/sock.h>
+#include <linux/tcp.h>
+#else
+#include <sys/socket.h>
+#endif
+#include <linux/inet.h>
+#include <linux/in.h>
+
+struct r2net_msg {
+ __be16 magic;
+ __be16 data_len;
+ __be16 msg_type;
+ __be16 pad1;
+ __be32 sys_status;
+ __be32 status;
+ __be32 key;
+ __be32 msg_num;
+ __u8 buf[0];
+};
+
+typedef int (r2net_msg_handler_func)(struct r2net_msg *msg, u32 len, void *data,
+ void **ret_data);
+typedef void (r2net_post_msg_handler_func)(int status, void *data,
+ void *ret_data);
+
+#define R2NET_MAX_PAYLOAD_BYTES (4096 - sizeof(struct r2net_msg))
+
+/* same as hb delay, we're waiting for another node to recognize our hb */
+#define R2NET_RECONNECT_DELAY_MS_DEFAULT 2000
+
+#define R2NET_KEEPALIVE_DELAY_MS_DEFAULT 2000
+#define R2NET_IDLE_TIMEOUT_MS_DEFAULT 30000
+
+
+/* TODO: figure this out.... */
+static inline int r2net_link_down(int err, struct socket *sock)
+{
+ if (sock) {
+ if (sock->sk->sk_state != TCP_ESTABLISHED &&
+ sock->sk->sk_state != TCP_CLOSE_WAIT)
+ return 1;
+ }
+
+ if (err >= 0)
+ return 0;
+ switch (err) {
+
+ /* ????????????????????????? */
+ case -ERESTARTSYS:
+ case -EBADF:
+ /* When the server has died, an ICMP port unreachable
+ * message prompts ECONNREFUSED. */
+ case -ECONNREFUSED:
+ case -ENOTCONN:
+ case -ECONNRESET:
+ case -EPIPE:
+ return 1;
+
+ }
+ return 0;
+}
+
+enum {
+ R2NET_DRIVER_UNINITED,
+ R2NET_DRIVER_READY,
+};
+
+int r2net_send_message(u32 msg_type, u32 key, void *data, u32 len,
+ u8 target_node, int *status);
+int r2net_send_message_vec(u32 msg_type, u32 key, struct kvec *vec,
+ size_t veclen, u8 target_node, int *status);
+
+int r2net_register_handler(u32 msg_type, u32 key, u32 max_len,
+ r2net_msg_handler_func *func, void *data,
+ r2net_post_msg_handler_func *post_func,
+ struct list_head *unreg_list);
+void r2net_unregister_handler_list(struct list_head *list);
+
+void r2net_fill_node_map(unsigned long *map, unsigned bytes);
+
+void r2net_force_data_magic(struct r2net_msg *, u16, u32);
+void r2net_hb_node_up_manual(int);
+struct r2net_node *r2net_nn_from_num(u8);
+
+struct r2nm_node;
+int r2net_register_hb_callbacks(void);
+void r2net_unregister_hb_callbacks(void);
+int r2net_start_listening(struct r2nm_node *node);
+void r2net_stop_listening(struct r2nm_node *node);
+void r2net_disconnect_node(struct r2nm_node *node);
+int r2net_num_connected_peers(void);
+
+int r2net_init(void);
+void r2net_exit(void);
+
+struct r2net_send_tracking;
+struct r2net_sock_container;
+
+#if 0
+int r2net_debugfs_init(void);
+void r2net_debugfs_exit(void);
+void r2net_debug_add_nst(struct r2net_send_tracking *nst);
+void r2net_debug_del_nst(struct r2net_send_tracking *nst);
+void r2net_debug_add_sc(struct r2net_sock_container *sc);
+void r2net_debug_del_sc(struct r2net_sock_container *sc);
+#else
+static inline int r2net_debugfs_init(void)
+{
+ return 0;
+}
+static inline void r2net_debugfs_exit(void)
+{
+}
+static inline void r2net_debug_add_nst(struct r2net_send_tracking *nst)
+{
+}
+static inline void r2net_debug_del_nst(struct r2net_send_tracking *nst)
+{
+}
+static inline void r2net_debug_add_sc(struct r2net_sock_container *sc)
+{
+}
+static inline void r2net_debug_del_sc(struct r2net_sock_container *sc)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+
+#endif /* R2CLUSTER_TCP_H */
diff --git a/drivers/staging/ramster/cluster/tcp_internal.h b/drivers/staging/ramster/cluster/tcp_internal.h
new file mode 100644
index 000000000000..4d8cc9f96fd2
--- /dev/null
+++ b/drivers/staging/ramster/cluster/tcp_internal.h
@@ -0,0 +1,248 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * Copyright (C) 2005 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#ifndef R2CLUSTER_TCP_INTERNAL_H
+#define R2CLUSTER_TCP_INTERNAL_H
+
+#define R2NET_MSG_MAGIC ((u16)0xfa55)
+#define R2NET_MSG_STATUS_MAGIC ((u16)0xfa56)
+#define R2NET_MSG_KEEP_REQ_MAGIC ((u16)0xfa57)
+#define R2NET_MSG_KEEP_RESP_MAGIC ((u16)0xfa58)
+/*
+ * "data magic" is a long version of "status magic" where the message
+ * payload actually contains data to be passed in reply to certain messages
+ */
+#define R2NET_MSG_DATA_MAGIC ((u16)0xfa59)
+
+/* we're delaying our quorum decision so that heartbeat will have timed
+ * out truly dead nodes by the time we come around to making decisions
+ * on their number */
+#define R2NET_QUORUM_DELAY_MS \
+ ((r2hb_dead_threshold + 2) * R2HB_REGION_TIMEOUT_MS)
+
+/*
+ * This version number represents quite a lot, unfortunately. It not
+ * only represents the raw network message protocol on the wire but also
+ * locking semantics of the file system using the protocol. It should
+ * be somewhere else, I'm sure, but right now it isn't.
+ *
+ * With version 11, we separate out the filesystem locking portion. The
+ * filesystem now has a major.minor version it negotiates. Version 11
+ * introduces this negotiation to the r2dlm protocol, and as such the
+ * version here in tcp_internal.h should not need to be bumped for
+ * filesystem locking changes.
+ *
+ * New in version 11
+ * - Negotiation of filesystem locking in the dlm join.
+ *
+ * New in version 10:
+ * - Meta/data locks combined
+ *
+ * New in version 9:
+ * - All votes removed
+ *
+ * New in version 8:
+ * - Replace delete inode votes with a cluster lock
+ *
+ * New in version 7:
+ * - DLM join domain includes the live nodemap
+ *
+ * New in version 6:
+ * - DLM lockres remote refcount fixes.
+ *
+ * New in version 5:
+ * - Network timeout checking protocol
+ *
+ * New in version 4:
+ * - Remove i_generation from lock names for better stat performance.
+ *
+ * New in version 3:
+ * - Replace dentry votes with a cluster lock
+ *
+ * New in version 2:
+ * - full 64 bit i_size in the metadata lock lvbs
+ * - introduction of "rw" lock and pushing meta/data locking down
+ */
+#define R2NET_PROTOCOL_VERSION 11ULL
+struct r2net_handshake {
+ __be64 protocol_version;
+ __be64 connector_id;
+ __be32 r2hb_heartbeat_timeout_ms;
+ __be32 r2net_idle_timeout_ms;
+ __be32 r2net_keepalive_delay_ms;
+ __be32 r2net_reconnect_delay_ms;
+};
+
+struct r2net_node {
+ /* this is never called from int/bh */
+ spinlock_t nn_lock;
+
+ /* set the moment an sc is allocated and a connect is started */
+ struct r2net_sock_container *nn_sc;
+ /* _valid is only set after the handshake passes and tx can happen */
+ unsigned nn_sc_valid:1;
+ /* if this is set tx just returns it */
+ int nn_persistent_error;
+ /* It is only set to 1 after the idle time out. */
+ atomic_t nn_timeout;
+
+ /* threads waiting for an sc to arrive wait on the wq for generation
+ * to increase. it is increased when a connecting socket succeeds
+ * or fails or when an accepted socket is attached. */
+ wait_queue_head_t nn_sc_wq;
+
+ struct idr nn_status_idr;
+ struct list_head nn_status_list;
+
+ /* connects are attempted from when heartbeat comes up until either hb
+ * goes down, the node is unconfigured, no connect attempts succeed
+ * before R2NET_CONN_IDLE_DELAY, or a connect succeeds. connect_work
+ * is queued from set_nn_state both from hb up and from itself if a
+ * connect attempt fails and so can be self-arming. shutdown is
+ * careful to first mark the nn such that no connects will be attempted
+ * before canceling delayed connect work and flushing the queue. */
+ struct delayed_work nn_connect_work;
+ unsigned long nn_last_connect_attempt;
+
+ /* this is queued as nodes come up and is canceled when a connection is
+ * established. this expiring gives up on the node and errors out
+ * transmits */
+ struct delayed_work nn_connect_expired;
+
+ /* after we give up on a socket we wait a while before deciding
+ * that it is still heartbeating and that we should do some
+ * quorum work */
+ struct delayed_work nn_still_up;
+};
+
+struct r2net_sock_container {
+ struct kref sc_kref;
+ /* the next two are valid for the life time of the sc */
+ struct socket *sc_sock;
+ struct r2nm_node *sc_node;
+
+ /* all of these sc work structs hold refs on the sc while they are
+ * queued. they should not be able to ref a freed sc. the teardown
+ * race is with r2net_wq destruction in r2net_stop_listening() */
+
+ /* rx and connect work are generated from socket callbacks. sc
+ * shutdown removes the callbacks and then flushes the work queue */
+ struct work_struct sc_rx_work;
+ struct work_struct sc_connect_work;
+ /* shutdown work is triggered in two ways. the simple way is
+ * for a code path calls ensure_shutdown which gets a lock, removes
+ * the sc from the nn, and queues the work. in this case the
+ * work is single-shot. the work is also queued from a sock
+ * callback, though, and in this case the work will find the sc
+ * still on the nn and will call ensure_shutdown itself.. this
+ * ends up triggering the shutdown work again, though nothing
+ * will be done in that second iteration. so work queue teardown
+ * has to be careful to remove the sc from the nn before waiting
+ * on the work queue so that the shutdown work doesn't remove the
+ * sc and rearm itself.
+ */
+ struct work_struct sc_shutdown_work;
+
+ struct timer_list sc_idle_timeout;
+ struct delayed_work sc_keepalive_work;
+
+ unsigned sc_handshake_ok:1;
+
+ struct page *sc_page;
+ size_t sc_page_off;
+
+ /* original handlers for the sockets */
+ void (*sc_state_change)(struct sock *sk);
+ void (*sc_data_ready)(struct sock *sk, int bytes);
+
+ u32 sc_msg_key;
+ u16 sc_msg_type;
+
+#ifdef CONFIG_DEBUG_FS
+ struct list_head sc_net_debug_item;
+ ktime_t sc_tv_timer;
+ ktime_t sc_tv_data_ready;
+ ktime_t sc_tv_advance_start;
+ ktime_t sc_tv_advance_stop;
+ ktime_t sc_tv_func_start;
+ ktime_t sc_tv_func_stop;
+#endif
+#ifdef CONFIG_RAMSTER_FS_STATS
+ ktime_t sc_tv_acquiry_total;
+ ktime_t sc_tv_send_total;
+ ktime_t sc_tv_status_total;
+ u32 sc_send_count;
+ u32 sc_recv_count;
+ ktime_t sc_tv_process_total;
+#endif
+ struct mutex sc_send_lock;
+};
+
+struct r2net_msg_handler {
+ struct rb_node nh_node;
+ u32 nh_max_len;
+ u32 nh_msg_type;
+ u32 nh_key;
+ r2net_msg_handler_func *nh_func;
+ r2net_msg_handler_func *nh_func_data;
+ r2net_post_msg_handler_func
+ *nh_post_func;
+ struct kref nh_kref;
+ struct list_head nh_unregister_item;
+};
+
+enum r2net_system_error {
+ R2NET_ERR_NONE = 0,
+ R2NET_ERR_NO_HNDLR,
+ R2NET_ERR_OVERFLOW,
+ R2NET_ERR_DIED,
+ R2NET_ERR_MAX
+};
+
+struct r2net_status_wait {
+ enum r2net_system_error ns_sys_status;
+ s32 ns_status;
+ int ns_id;
+ wait_queue_head_t ns_wq;
+ struct list_head ns_node_item;
+};
+
+#ifdef CONFIG_DEBUG_FS
+/* just for state dumps */
+struct r2net_send_tracking {
+ struct list_head st_net_debug_item;
+ struct task_struct *st_task;
+ struct r2net_sock_container *st_sc;
+ u32 st_id;
+ u32 st_msg_type;
+ u32 st_msg_key;
+ u8 st_node;
+ ktime_t st_sock_time;
+ ktime_t st_send_time;
+ ktime_t st_status_time;
+};
+#else
+struct r2net_send_tracking {
+ u32 dummy;
+};
+#endif /* CONFIG_DEBUG_FS */
+
+#endif /* R2CLUSTER_TCP_INTERNAL_H */
diff --git a/drivers/staging/ramster/r2net.c b/drivers/staging/ramster/r2net.c
new file mode 100644
index 000000000000..2ee02204c43d
--- /dev/null
+++ b/drivers/staging/ramster/r2net.c
@@ -0,0 +1,401 @@
+/*
+ * r2net.c
+ *
+ * Copyright (c) 2011, Dan Magenheimer, Oracle Corp.
+ *
+ * Ramster_r2net provides an interface between zcache and r2net.
+ *
+ * FIXME: support more than two nodes
+ */
+
+#include <linux/list.h>
+#include "cluster/tcp.h"
+#include "cluster/nodemanager.h"
+#include "tmem.h"
+#include "zcache.h"
+#include "ramster.h"
+
+#define RAMSTER_TESTING
+
+#define RMSTR_KEY 0x77347734
+
+enum {
+ RMSTR_TMEM_PUT_EPH = 100,
+ RMSTR_TMEM_PUT_PERS,
+ RMSTR_TMEM_ASYNC_GET_REQUEST,
+ RMSTR_TMEM_ASYNC_GET_AND_FREE_REQUEST,
+ RMSTR_TMEM_ASYNC_GET_REPLY,
+ RMSTR_TMEM_FLUSH,
+ RMSTR_TMEM_FLOBJ,
+ RMSTR_TMEM_DESTROY_POOL,
+};
+
+#define RMSTR_R2NET_MAX_LEN \
+ (R2NET_MAX_PAYLOAD_BYTES - sizeof(struct tmem_xhandle))
+
+#include "cluster/tcp_internal.h"
+
+static struct r2nm_node *r2net_target_node;
+static int r2net_target_nodenum;
+
+int r2net_remote_target_node_set(int node_num)
+{
+ int ret = -1;
+
+ r2net_target_node = r2nm_get_node_by_num(node_num);
+ if (r2net_target_node != NULL) {
+ r2net_target_nodenum = node_num;
+ r2nm_node_put(r2net_target_node);
+ ret = 0;
+ }
+ return ret;
+}
+
+/* FIXME following buffer should be per-cpu, protected by preempt_disable */
+static char ramster_async_get_buf[R2NET_MAX_PAYLOAD_BYTES];
+
+static int ramster_remote_async_get_request_handler(struct r2net_msg *msg,
+ u32 len, void *data, void **ret_data)
+{
+ char *pdata;
+ struct tmem_xhandle xh;
+ int found;
+ size_t size = RMSTR_R2NET_MAX_LEN;
+ u16 msgtype = be16_to_cpu(msg->msg_type);
+ bool get_and_free = (msgtype == RMSTR_TMEM_ASYNC_GET_AND_FREE_REQUEST);
+ unsigned long flags;
+
+ xh = *(struct tmem_xhandle *)msg->buf;
+ if (xh.xh_data_size > RMSTR_R2NET_MAX_LEN)
+ BUG();
+ pdata = ramster_async_get_buf;
+ *(struct tmem_xhandle *)pdata = xh;
+ pdata += sizeof(struct tmem_xhandle);
+ local_irq_save(flags);
+ found = zcache_get(xh.client_id, xh.pool_id, &xh.oid, xh.index,
+ pdata, &size, 1, get_and_free ? 1 : -1);
+ local_irq_restore(flags);
+ if (found < 0) {
+ /* a zero size indicates the get failed */
+ size = 0;
+ }
+ if (size > RMSTR_R2NET_MAX_LEN)
+ BUG();
+ *ret_data = pdata - sizeof(struct tmem_xhandle);
+ /* now make caller (r2net_process_message) handle specially */
+ r2net_force_data_magic(msg, RMSTR_TMEM_ASYNC_GET_REPLY, RMSTR_KEY);
+ return size + sizeof(struct tmem_xhandle);
+}
+
+static int ramster_remote_async_get_reply_handler(struct r2net_msg *msg,
+ u32 len, void *data, void **ret_data)
+{
+ char *in = (char *)msg->buf;
+ int datalen = len - sizeof(struct r2net_msg);
+ int ret = -1;
+ struct tmem_xhandle *xh = (struct tmem_xhandle *)in;
+
+ in += sizeof(struct tmem_xhandle);
+ datalen -= sizeof(struct tmem_xhandle);
+ BUG_ON(datalen < 0 || datalen > PAGE_SIZE);
+ ret = zcache_localify(xh->pool_id, &xh->oid, xh->index,
+ in, datalen, xh->extra);
+#ifdef RAMSTER_TESTING
+ if (ret == -EEXIST)
+ pr_err("TESTING ArrgREP, aborted overwrite on racy put\n");
+#endif
+ return ret;
+}
+
+int ramster_remote_put_handler(struct r2net_msg *msg,
+ u32 len, void *data, void **ret_data)
+{
+ struct tmem_xhandle *xh;
+ char *p = (char *)msg->buf;
+ int datalen = len - sizeof(struct r2net_msg) -
+ sizeof(struct tmem_xhandle);
+ u16 msgtype = be16_to_cpu(msg->msg_type);
+ bool ephemeral = (msgtype == RMSTR_TMEM_PUT_EPH);
+ unsigned long flags;
+ int ret;
+
+ xh = (struct tmem_xhandle *)p;
+ p += sizeof(struct tmem_xhandle);
+ zcache_autocreate_pool(xh->client_id, xh->pool_id, ephemeral);
+ local_irq_save(flags);
+ ret = zcache_put(xh->client_id, xh->pool_id, &xh->oid, xh->index,
+ p, datalen, 1, ephemeral ? 1 : -1);
+ local_irq_restore(flags);
+ return ret;
+}
+
+int ramster_remote_flush_handler(struct r2net_msg *msg,
+ u32 len, void *data, void **ret_data)
+{
+ struct tmem_xhandle *xh;
+ char *p = (char *)msg->buf;
+
+ xh = (struct tmem_xhandle *)p;
+ p += sizeof(struct tmem_xhandle);
+ (void)zcache_flush(xh->client_id, xh->pool_id, &xh->oid, xh->index);
+ return 0;
+}
+
+int ramster_remote_flobj_handler(struct r2net_msg *msg,
+ u32 len, void *data, void **ret_data)
+{
+ struct tmem_xhandle *xh;
+ char *p = (char *)msg->buf;
+
+ xh = (struct tmem_xhandle *)p;
+ p += sizeof(struct tmem_xhandle);
+ (void)zcache_flush_object(xh->client_id, xh->pool_id, &xh->oid);
+ return 0;
+}
+
+int ramster_remote_async_get(struct tmem_xhandle *xh, bool free, int remotenode,
+ size_t expect_size, uint8_t expect_cksum,
+ void *extra)
+{
+ int ret = -1, status;
+ struct r2nm_node *node = NULL;
+ struct kvec vec[1];
+ size_t veclen = 1;
+ u32 msg_type;
+
+ node = r2nm_get_node_by_num(remotenode);
+ if (node == NULL)
+ goto out;
+ xh->client_id = r2nm_this_node(); /* which node is getting */
+ xh->xh_data_cksum = expect_cksum;
+ xh->xh_data_size = expect_size;
+ xh->extra = extra;
+ vec[0].iov_len = sizeof(*xh);
+ vec[0].iov_base = xh;
+ if (free)
+ msg_type = RMSTR_TMEM_ASYNC_GET_AND_FREE_REQUEST;
+ else
+ msg_type = RMSTR_TMEM_ASYNC_GET_REQUEST;
+ ret = r2net_send_message_vec(msg_type, RMSTR_KEY,
+ vec, veclen, remotenode, &status);
+ r2nm_node_put(node);
+ if (ret < 0) {
+ /* FIXME handle bad message possibilities here? */
+ pr_err("UNTESTED ret<0 in ramster_remote_async_get\n");
+ }
+ ret = status;
+out:
+ return ret;
+}
+
+#ifdef RAMSTER_TESTING
+/* leave me here to see if it catches a weird crash */
+static void ramster_check_irq_counts(void)
+{
+ static int last_hardirq_cnt, last_softirq_cnt, last_preempt_cnt;
+ int cur_hardirq_cnt, cur_softirq_cnt, cur_preempt_cnt;
+
+ cur_hardirq_cnt = hardirq_count() >> HARDIRQ_SHIFT;
+ if (cur_hardirq_cnt > last_hardirq_cnt) {
+ last_hardirq_cnt = cur_hardirq_cnt;
+ if (!(last_hardirq_cnt&(last_hardirq_cnt-1)))
+ pr_err("RAMSTER TESTING RRP hardirq_count=%d\n",
+ last_hardirq_cnt);
+ }
+ cur_softirq_cnt = softirq_count() >> SOFTIRQ_SHIFT;
+ if (cur_softirq_cnt > last_softirq_cnt) {
+ last_softirq_cnt = cur_softirq_cnt;
+ if (!(last_softirq_cnt&(last_softirq_cnt-1)))
+ pr_err("RAMSTER TESTING RRP softirq_count=%d\n",
+ last_softirq_cnt);
+ }
+ cur_preempt_cnt = preempt_count() & PREEMPT_MASK;
+ if (cur_preempt_cnt > last_preempt_cnt) {
+ last_preempt_cnt = cur_preempt_cnt;
+ if (!(last_preempt_cnt&(last_preempt_cnt-1)))
+ pr_err("RAMSTER TESTING RRP preempt_count=%d\n",
+ last_preempt_cnt);
+ }
+}
+#endif
+
+int ramster_remote_put(struct tmem_xhandle *xh, char *data, size_t size,
+ bool ephemeral, int *remotenode)
+{
+ int nodenum, ret = -1, status;
+ struct r2nm_node *node = NULL;
+ struct kvec vec[2];
+ size_t veclen = 2;
+ u32 msg_type;
+#ifdef RAMSTER_TESTING
+ struct r2net_node *nn;
+#endif
+
+ BUG_ON(size > RMSTR_R2NET_MAX_LEN);
+ xh->client_id = r2nm_this_node(); /* which node is putting */
+ vec[0].iov_len = sizeof(*xh);
+ vec[0].iov_base = xh;
+ vec[1].iov_len = size;
+ vec[1].iov_base = data;
+ node = r2net_target_node;
+ if (!node)
+ goto out;
+
+ nodenum = r2net_target_nodenum;
+
+ r2nm_node_get(node);
+
+#ifdef RAMSTER_TESTING
+ nn = r2net_nn_from_num(nodenum);
+ WARN_ON_ONCE(nn->nn_persistent_error || !nn->nn_sc_valid);
+#endif
+
+ if (ephemeral)
+ msg_type = RMSTR_TMEM_PUT_EPH;
+ else
+ msg_type = RMSTR_TMEM_PUT_PERS;
+#ifdef RAMSTER_TESTING
+ /* leave me here to see if it catches a weird crash */
+ ramster_check_irq_counts();
+#endif
+
+ ret = r2net_send_message_vec(msg_type, RMSTR_KEY, vec, veclen,
+ nodenum, &status);
+#ifdef RAMSTER_TESTING
+ if (ret != 0) {
+ static unsigned long cnt;
+ cnt++;
+ if (!(cnt&(cnt-1)))
+ pr_err("ramster_remote_put: message failed, "
+ "ret=%d, cnt=%lu\n", ret, cnt);
+ ret = -1;
+ }
+#endif
+ if (ret < 0)
+ ret = -1;
+ else {
+ ret = status;
+ *remotenode = nodenum;
+ }
+
+ r2nm_node_put(node);
+out:
+ return ret;
+}
+
+int ramster_remote_flush(struct tmem_xhandle *xh, int remotenode)
+{
+ int ret = -1, status;
+ struct r2nm_node *node = NULL;
+ struct kvec vec[1];
+ size_t veclen = 1;
+
+ node = r2nm_get_node_by_num(remotenode);
+ BUG_ON(node == NULL);
+ xh->client_id = r2nm_this_node(); /* which node is flushing */
+ vec[0].iov_len = sizeof(*xh);
+ vec[0].iov_base = xh;
+ BUG_ON(irqs_disabled());
+ BUG_ON(in_softirq());
+ ret = r2net_send_message_vec(RMSTR_TMEM_FLUSH, RMSTR_KEY,
+ vec, veclen, remotenode, &status);
+ r2nm_node_put(node);
+ return ret;
+}
+
+int ramster_remote_flush_object(struct tmem_xhandle *xh, int remotenode)
+{
+ int ret = -1, status;
+ struct r2nm_node *node = NULL;
+ struct kvec vec[1];
+ size_t veclen = 1;
+
+ node = r2nm_get_node_by_num(remotenode);
+ BUG_ON(node == NULL);
+ xh->client_id = r2nm_this_node(); /* which node is flobjing */
+ vec[0].iov_len = sizeof(*xh);
+ vec[0].iov_base = xh;
+ ret = r2net_send_message_vec(RMSTR_TMEM_FLOBJ, RMSTR_KEY,
+ vec, veclen, remotenode, &status);
+ r2nm_node_put(node);
+ return ret;
+}
+
+/*
+ * Handler registration
+ */
+
+static LIST_HEAD(r2net_unreg_list);
+
+static void r2net_unregister_handlers(void)
+{
+ r2net_unregister_handler_list(&r2net_unreg_list);
+}
+
+int r2net_register_handlers(void)
+{
+ int status;
+
+ status = r2net_register_handler(RMSTR_TMEM_PUT_EPH, RMSTR_KEY,
+ RMSTR_R2NET_MAX_LEN,
+ ramster_remote_put_handler,
+ NULL, NULL, &r2net_unreg_list);
+ if (status)
+ goto bail;
+
+ status = r2net_register_handler(RMSTR_TMEM_PUT_PERS, RMSTR_KEY,
+ RMSTR_R2NET_MAX_LEN,
+ ramster_remote_put_handler,
+ NULL, NULL, &r2net_unreg_list);
+ if (status)
+ goto bail;
+
+ status = r2net_register_handler(RMSTR_TMEM_ASYNC_GET_REQUEST, RMSTR_KEY,
+ RMSTR_R2NET_MAX_LEN,
+ ramster_remote_async_get_request_handler,
+ NULL, NULL,
+ &r2net_unreg_list);
+ if (status)
+ goto bail;
+
+ status = r2net_register_handler(RMSTR_TMEM_ASYNC_GET_AND_FREE_REQUEST,
+ RMSTR_KEY, RMSTR_R2NET_MAX_LEN,
+ ramster_remote_async_get_request_handler,
+ NULL, NULL,
+ &r2net_unreg_list);
+ if (status)
+ goto bail;
+
+ status = r2net_register_handler(RMSTR_TMEM_ASYNC_GET_REPLY, RMSTR_KEY,
+ RMSTR_R2NET_MAX_LEN,
+ ramster_remote_async_get_reply_handler,
+ NULL, NULL,
+ &r2net_unreg_list);
+ if (status)
+ goto bail;
+
+ status = r2net_register_handler(RMSTR_TMEM_FLUSH, RMSTR_KEY,
+ RMSTR_R2NET_MAX_LEN,
+ ramster_remote_flush_handler,
+ NULL, NULL,
+ &r2net_unreg_list);
+ if (status)
+ goto bail;
+
+ status = r2net_register_handler(RMSTR_TMEM_FLOBJ, RMSTR_KEY,
+ RMSTR_R2NET_MAX_LEN,
+ ramster_remote_flobj_handler,
+ NULL, NULL,
+ &r2net_unreg_list);
+ if (status)
+ goto bail;
+
+ pr_info("ramster: r2net handlers registered\n");
+
+bail:
+ if (status) {
+ r2net_unregister_handlers();
+ pr_err("ramster: couldn't register r2net handlers\n");
+ }
+ return status;
+}
diff --git a/drivers/staging/ramster/ramster.h b/drivers/staging/ramster/ramster.h
new file mode 100644
index 000000000000..0c9455e8dcd8
--- /dev/null
+++ b/drivers/staging/ramster/ramster.h
@@ -0,0 +1,118 @@
+/*
+ * ramster.h
+ *
+ * Peer-to-peer transcendent memory
+ *
+ * Copyright (c) 2009-2012, Dan Magenheimer, Oracle Corp.
+ */
+
+#ifndef _RAMSTER_H_
+#define _RAMSTER_H_
+
+/*
+ * format of remote pampd:
+ * bit 0 == intransit
+ * bit 1 == is_remote... if this bit is set, then
+ * bit 2-9 == remotenode
+ * bit 10-22 == size
+ * bit 23-30 == cksum
+ */
+#define FAKE_PAMPD_INTRANSIT_BITS 1
+#define FAKE_PAMPD_ISREMOTE_BITS 1
+#define FAKE_PAMPD_REMOTENODE_BITS 8
+#define FAKE_PAMPD_REMOTESIZE_BITS 13
+#define FAKE_PAMPD_CHECKSUM_BITS 8
+
+#define FAKE_PAMPD_INTRANSIT_SHIFT 0
+#define FAKE_PAMPD_ISREMOTE_SHIFT (FAKE_PAMPD_INTRANSIT_SHIFT + \
+ FAKE_PAMPD_INTRANSIT_BITS)
+#define FAKE_PAMPD_REMOTENODE_SHIFT (FAKE_PAMPD_ISREMOTE_SHIFT + \
+ FAKE_PAMPD_ISREMOTE_BITS)
+#define FAKE_PAMPD_REMOTESIZE_SHIFT (FAKE_PAMPD_REMOTENODE_SHIFT + \
+ FAKE_PAMPD_REMOTENODE_BITS)
+#define FAKE_PAMPD_CHECKSUM_SHIFT (FAKE_PAMPD_REMOTESIZE_SHIFT + \
+ FAKE_PAMPD_REMOTESIZE_BITS)
+
+#define FAKE_PAMPD_MASK(x) ((1UL << (x)) - 1)
+
+static inline void *pampd_make_remote(int remotenode, size_t size,
+ unsigned char cksum)
+{
+ unsigned long fake_pampd = 0;
+ fake_pampd |= 1UL << FAKE_PAMPD_ISREMOTE_SHIFT;
+ fake_pampd |= ((unsigned long)remotenode &
+ FAKE_PAMPD_MASK(FAKE_PAMPD_REMOTENODE_BITS)) <<
+ FAKE_PAMPD_REMOTENODE_SHIFT;
+ fake_pampd |= ((unsigned long)size &
+ FAKE_PAMPD_MASK(FAKE_PAMPD_REMOTESIZE_BITS)) <<
+ FAKE_PAMPD_REMOTESIZE_SHIFT;
+ fake_pampd |= ((unsigned long)cksum &
+ FAKE_PAMPD_MASK(FAKE_PAMPD_CHECKSUM_BITS)) <<
+ FAKE_PAMPD_CHECKSUM_SHIFT;
+ return (void *)fake_pampd;
+}
+
+static inline unsigned int pampd_remote_node(void *pampd)
+{
+ unsigned long fake_pampd = (unsigned long)pampd;
+ return (fake_pampd >> FAKE_PAMPD_REMOTENODE_SHIFT) &
+ FAKE_PAMPD_MASK(FAKE_PAMPD_REMOTENODE_BITS);
+}
+
+static inline unsigned int pampd_remote_size(void *pampd)
+{
+ unsigned long fake_pampd = (unsigned long)pampd;
+ return (fake_pampd >> FAKE_PAMPD_REMOTESIZE_SHIFT) &
+ FAKE_PAMPD_MASK(FAKE_PAMPD_REMOTESIZE_BITS);
+}
+
+static inline unsigned char pampd_remote_cksum(void *pampd)
+{
+ unsigned long fake_pampd = (unsigned long)pampd;
+ return (fake_pampd >> FAKE_PAMPD_CHECKSUM_SHIFT) &
+ FAKE_PAMPD_MASK(FAKE_PAMPD_CHECKSUM_BITS);
+}
+
+static inline bool pampd_is_remote(void *pampd)
+{
+ unsigned long fake_pampd = (unsigned long)pampd;
+ return (fake_pampd >> FAKE_PAMPD_ISREMOTE_SHIFT) &
+ FAKE_PAMPD_MASK(FAKE_PAMPD_ISREMOTE_BITS);
+}
+
+static inline bool pampd_is_intransit(void *pampd)
+{
+ unsigned long fake_pampd = (unsigned long)pampd;
+ return (fake_pampd >> FAKE_PAMPD_INTRANSIT_SHIFT) &
+ FAKE_PAMPD_MASK(FAKE_PAMPD_INTRANSIT_BITS);
+}
+
+/* note that it is a BUG for intransit to be set without isremote also set */
+static inline void *pampd_mark_intransit(void *pampd)
+{
+ unsigned long fake_pampd = (unsigned long)pampd;
+
+ fake_pampd |= 1UL << FAKE_PAMPD_ISREMOTE_SHIFT;
+ fake_pampd |= 1UL << FAKE_PAMPD_INTRANSIT_SHIFT;
+ return (void *)fake_pampd;
+}
+
+static inline void *pampd_mask_intransit_and_remote(void *marked_pampd)
+{
+ unsigned long pampd = (unsigned long)marked_pampd;
+
+ pampd &= ~(1UL << FAKE_PAMPD_INTRANSIT_SHIFT);
+ pampd &= ~(1UL << FAKE_PAMPD_ISREMOTE_SHIFT);
+ return (void *)pampd;
+}
+
+extern int ramster_remote_async_get(struct tmem_xhandle *,
+ bool, int, size_t, uint8_t, void *extra);
+extern int ramster_remote_put(struct tmem_xhandle *, char *, size_t,
+ bool, int *);
+extern int ramster_remote_flush(struct tmem_xhandle *, int);
+extern int ramster_remote_flush_object(struct tmem_xhandle *, int);
+extern int r2net_register_handlers(void);
+extern int r2net_remote_target_node_set(int);
+
+#endif /* _TMEM_H */
diff --git a/drivers/staging/ramster/tmem.c b/drivers/staging/ramster/tmem.c
new file mode 100644
index 000000000000..8f2f6892d8d3
--- /dev/null
+++ b/drivers/staging/ramster/tmem.c
@@ -0,0 +1,851 @@
+/*
+ * In-kernel transcendent memory (generic implementation)
+ *
+ * Copyright (c) 2009-2011, Dan Magenheimer, Oracle Corp.
+ *
+ * The primary purpose of Transcedent Memory ("tmem") is to map object-oriented
+ * "handles" (triples containing a pool id, and object id, and an index), to
+ * pages in a page-accessible memory (PAM). Tmem references the PAM pages via
+ * an abstract "pampd" (PAM page-descriptor), which can be operated on by a
+ * set of functions (pamops). Each pampd contains some representation of
+ * PAGE_SIZE bytes worth of data. Tmem must support potentially millions of
+ * pages and must be able to insert, find, and delete these pages at a
+ * potential frequency of thousands per second concurrently across many CPUs,
+ * (and, if used with KVM, across many vcpus across many guests).
+ * Tmem is tracked with a hierarchy of data structures, organized by
+ * the elements in a handle-tuple: pool_id, object_id, and page index.
+ * One or more "clients" (e.g. guests) each provide one or more tmem_pools.
+ * Each pool, contains a hash table of rb_trees of tmem_objs. Each
+ * tmem_obj contains a radix-tree-like tree of pointers, with intermediate
+ * nodes called tmem_objnodes. Each leaf pointer in this tree points to
+ * a pampd, which is accessible only through a small set of callbacks
+ * registered by the PAM implementation (see tmem_register_pamops). Tmem
+ * does all memory allocation via a set of callbacks registered by the tmem
+ * host implementation (e.g. see tmem_register_hostops).
+ */
+
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/atomic.h>
+#include <linux/delay.h>
+
+#include "tmem.h"
+
+/* data structure sentinels used for debugging... see tmem.h */
+#define POOL_SENTINEL 0x87658765
+#define OBJ_SENTINEL 0x12345678
+#define OBJNODE_SENTINEL 0xfedcba09
+
+/*
+ * A tmem host implementation must use this function to register callbacks
+ * for memory allocation.
+ */
+static struct tmem_hostops tmem_hostops;
+
+static void tmem_objnode_tree_init(void);
+
+void tmem_register_hostops(struct tmem_hostops *m)
+{
+ tmem_objnode_tree_init();
+ tmem_hostops = *m;
+}
+
+/*
+ * A tmem host implementation must use this function to register
+ * callbacks for a page-accessible memory (PAM) implementation
+ */
+static struct tmem_pamops tmem_pamops;
+
+void tmem_register_pamops(struct tmem_pamops *m)
+{
+ tmem_pamops = *m;
+}
+
+/*
+ * Oid's are potentially very sparse and tmem_objs may have an indeterminately
+ * short life, being added and deleted at a relatively high frequency.
+ * So an rb_tree is an ideal data structure to manage tmem_objs. But because
+ * of the potentially huge number of tmem_objs, each pool manages a hashtable
+ * of rb_trees to reduce search, insert, delete, and rebalancing time.
+ * Each hashbucket also has a lock to manage concurrent access.
+ *
+ * The following routines manage tmem_objs. When any tmem_obj is accessed,
+ * the hashbucket lock must be held.
+ */
+
+/* searches for object==oid in pool, returns locked object if found */
+static struct tmem_obj *tmem_obj_find(struct tmem_hashbucket *hb,
+ struct tmem_oid *oidp)
+{
+ struct rb_node *rbnode;
+ struct tmem_obj *obj;
+
+ rbnode = hb->obj_rb_root.rb_node;
+ while (rbnode) {
+ BUG_ON(RB_EMPTY_NODE(rbnode));
+ obj = rb_entry(rbnode, struct tmem_obj, rb_tree_node);
+ switch (tmem_oid_compare(oidp, &obj->oid)) {
+ case 0: /* equal */
+ goto out;
+ case -1:
+ rbnode = rbnode->rb_left;
+ break;
+ case 1:
+ rbnode = rbnode->rb_right;
+ break;
+ }
+ }
+ obj = NULL;
+out:
+ return obj;
+}
+
+static void tmem_pampd_destroy_all_in_obj(struct tmem_obj *);
+
+/* free an object that has no more pampds in it */
+static void tmem_obj_free(struct tmem_obj *obj, struct tmem_hashbucket *hb)
+{
+ struct tmem_pool *pool;
+
+ BUG_ON(obj == NULL);
+ ASSERT_SENTINEL(obj, OBJ);
+ BUG_ON(obj->pampd_count > 0);
+ pool = obj->pool;
+ BUG_ON(pool == NULL);
+ if (obj->objnode_tree_root != NULL) /* may be "stump" with no leaves */
+ tmem_pampd_destroy_all_in_obj(obj);
+ BUG_ON(obj->objnode_tree_root != NULL);
+ BUG_ON((long)obj->objnode_count != 0);
+ atomic_dec(&pool->obj_count);
+ BUG_ON(atomic_read(&pool->obj_count) < 0);
+ INVERT_SENTINEL(obj, OBJ);
+ obj->pool = NULL;
+ tmem_oid_set_invalid(&obj->oid);
+ rb_erase(&obj->rb_tree_node, &hb->obj_rb_root);
+}
+
+/*
+ * initialize, and insert an tmem_object_root (called only if find failed)
+ */
+static void tmem_obj_init(struct tmem_obj *obj, struct tmem_hashbucket *hb,
+ struct tmem_pool *pool,
+ struct tmem_oid *oidp)
+{
+ struct rb_root *root = &hb->obj_rb_root;
+ struct rb_node **new = &(root->rb_node), *parent = NULL;
+ struct tmem_obj *this;
+
+ BUG_ON(pool == NULL);
+ atomic_inc(&pool->obj_count);
+ obj->objnode_tree_height = 0;
+ obj->objnode_tree_root = NULL;
+ obj->pool = pool;
+ obj->oid = *oidp;
+ obj->objnode_count = 0;
+ obj->pampd_count = 0;
+ (*tmem_pamops.new_obj)(obj);
+ SET_SENTINEL(obj, OBJ);
+ while (*new) {
+ BUG_ON(RB_EMPTY_NODE(*new));
+ this = rb_entry(*new, struct tmem_obj, rb_tree_node);
+ parent = *new;
+ switch (tmem_oid_compare(oidp, &this->oid)) {
+ case 0:
+ BUG(); /* already present; should never happen! */
+ break;
+ case -1:
+ new = &(*new)->rb_left;
+ break;
+ case 1:
+ new = &(*new)->rb_right;
+ break;
+ }
+ }
+ rb_link_node(&obj->rb_tree_node, parent, new);
+ rb_insert_color(&obj->rb_tree_node, root);
+}
+
+/*
+ * Tmem is managed as a set of tmem_pools with certain attributes, such as
+ * "ephemeral" vs "persistent". These attributes apply to all tmem_objs
+ * and all pampds that belong to a tmem_pool. A tmem_pool is created
+ * or deleted relatively rarely (for example, when a filesystem is
+ * mounted or unmounted.
+ */
+
+/* flush all data from a pool and, optionally, free it */
+static void tmem_pool_flush(struct tmem_pool *pool, bool destroy)
+{
+ struct rb_node *rbnode;
+ struct tmem_obj *obj;
+ struct tmem_hashbucket *hb = &pool->hashbucket[0];
+ int i;
+
+ BUG_ON(pool == NULL);
+ for (i = 0; i < TMEM_HASH_BUCKETS; i++, hb++) {
+ spin_lock(&hb->lock);
+ rbnode = rb_first(&hb->obj_rb_root);
+ while (rbnode != NULL) {
+ obj = rb_entry(rbnode, struct tmem_obj, rb_tree_node);
+ rbnode = rb_next(rbnode);
+ tmem_pampd_destroy_all_in_obj(obj);
+ tmem_obj_free(obj, hb);
+ (*tmem_hostops.obj_free)(obj, pool);
+ }
+ spin_unlock(&hb->lock);
+ }
+ if (destroy)
+ list_del(&pool->pool_list);
+}
+
+/*
+ * A tmem_obj contains a radix-tree-like tree in which the intermediate
+ * nodes are called tmem_objnodes. (The kernel lib/radix-tree.c implementation
+ * is very specialized and tuned for specific uses and is not particularly
+ * suited for use from this code, though some code from the core algorithms has
+ * been reused, thus the copyright notices below). Each tmem_objnode contains
+ * a set of pointers which point to either a set of intermediate tmem_objnodes
+ * or a set of of pampds.
+ *
+ * Portions Copyright (C) 2001 Momchil Velikov
+ * Portions Copyright (C) 2001 Christoph Hellwig
+ * Portions Copyright (C) 2005 SGI, Christoph Lameter <clameter@sgi.com>
+ */
+
+struct tmem_objnode_tree_path {
+ struct tmem_objnode *objnode;
+ int offset;
+};
+
+/* objnode height_to_maxindex translation */
+static unsigned long tmem_objnode_tree_h2max[OBJNODE_TREE_MAX_PATH + 1];
+
+static void tmem_objnode_tree_init(void)
+{
+ unsigned int ht, tmp;
+
+ for (ht = 0; ht < ARRAY_SIZE(tmem_objnode_tree_h2max); ht++) {
+ tmp = ht * OBJNODE_TREE_MAP_SHIFT;
+ if (tmp >= OBJNODE_TREE_INDEX_BITS)
+ tmem_objnode_tree_h2max[ht] = ~0UL;
+ else
+ tmem_objnode_tree_h2max[ht] =
+ (~0UL >> (OBJNODE_TREE_INDEX_BITS - tmp - 1)) >> 1;
+ }
+}
+
+static struct tmem_objnode *tmem_objnode_alloc(struct tmem_obj *obj)
+{
+ struct tmem_objnode *objnode;
+
+ ASSERT_SENTINEL(obj, OBJ);
+ BUG_ON(obj->pool == NULL);
+ ASSERT_SENTINEL(obj->pool, POOL);
+ objnode = (*tmem_hostops.objnode_alloc)(obj->pool);
+ if (unlikely(objnode == NULL))
+ goto out;
+ objnode->obj = obj;
+ SET_SENTINEL(objnode, OBJNODE);
+ memset(&objnode->slots, 0, sizeof(objnode->slots));
+ objnode->slots_in_use = 0;
+ obj->objnode_count++;
+out:
+ return objnode;
+}
+
+static void tmem_objnode_free(struct tmem_objnode *objnode)
+{
+ struct tmem_pool *pool;
+ int i;
+
+ BUG_ON(objnode == NULL);
+ for (i = 0; i < OBJNODE_TREE_MAP_SIZE; i++)
+ BUG_ON(objnode->slots[i] != NULL);
+ ASSERT_SENTINEL(objnode, OBJNODE);
+ INVERT_SENTINEL(objnode, OBJNODE);
+ BUG_ON(objnode->obj == NULL);
+ ASSERT_SENTINEL(objnode->obj, OBJ);
+ pool = objnode->obj->pool;
+ BUG_ON(pool == NULL);
+ ASSERT_SENTINEL(pool, POOL);
+ objnode->obj->objnode_count--;
+ objnode->obj = NULL;
+ (*tmem_hostops.objnode_free)(objnode, pool);
+}
+
+/*
+ * lookup index in object and return associated pampd (or NULL if not found)
+ */
+static void **__tmem_pampd_lookup_in_obj(struct tmem_obj *obj, uint32_t index)
+{
+ unsigned int height, shift;
+ struct tmem_objnode **slot = NULL;
+
+ BUG_ON(obj == NULL);
+ ASSERT_SENTINEL(obj, OBJ);
+ BUG_ON(obj->pool == NULL);
+ ASSERT_SENTINEL(obj->pool, POOL);
+
+ height = obj->objnode_tree_height;
+ if (index > tmem_objnode_tree_h2max[obj->objnode_tree_height])
+ goto out;
+ if (height == 0 && obj->objnode_tree_root) {
+ slot = &obj->objnode_tree_root;
+ goto out;
+ }
+ shift = (height-1) * OBJNODE_TREE_MAP_SHIFT;
+ slot = &obj->objnode_tree_root;
+ while (height > 0) {
+ if (*slot == NULL)
+ goto out;
+ slot = (struct tmem_objnode **)
+ ((*slot)->slots +
+ ((index >> shift) & OBJNODE_TREE_MAP_MASK));
+ shift -= OBJNODE_TREE_MAP_SHIFT;
+ height--;
+ }
+out:
+ return slot != NULL ? (void **)slot : NULL;
+}
+
+static void *tmem_pampd_lookup_in_obj(struct tmem_obj *obj, uint32_t index)
+{
+ struct tmem_objnode **slot;
+
+ slot = (struct tmem_objnode **)__tmem_pampd_lookup_in_obj(obj, index);
+ return slot != NULL ? *slot : NULL;
+}
+
+static void *tmem_pampd_replace_in_obj(struct tmem_obj *obj, uint32_t index,
+ void *new_pampd, bool no_free)
+{
+ struct tmem_objnode **slot;
+ void *ret = NULL;
+
+ slot = (struct tmem_objnode **)__tmem_pampd_lookup_in_obj(obj, index);
+ if ((slot != NULL) && (*slot != NULL)) {
+ void *old_pampd = *(void **)slot;
+ *(void **)slot = new_pampd;
+ if (!no_free)
+ (*tmem_pamops.free)(old_pampd, obj->pool,
+ NULL, 0, false);
+ ret = new_pampd;
+ }
+ return ret;
+}
+
+static int tmem_pampd_add_to_obj(struct tmem_obj *obj, uint32_t index,
+ void *pampd)
+{
+ int ret = 0;
+ struct tmem_objnode *objnode = NULL, *newnode, *slot;
+ unsigned int height, shift;
+ int offset = 0;
+
+ /* if necessary, extend the tree to be higher */
+ if (index > tmem_objnode_tree_h2max[obj->objnode_tree_height]) {
+ height = obj->objnode_tree_height + 1;
+ if (index > tmem_objnode_tree_h2max[height])
+ while (index > tmem_objnode_tree_h2max[height])
+ height++;
+ if (obj->objnode_tree_root == NULL) {
+ obj->objnode_tree_height = height;
+ goto insert;
+ }
+ do {
+ newnode = tmem_objnode_alloc(obj);
+ if (!newnode) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ newnode->slots[0] = obj->objnode_tree_root;
+ newnode->slots_in_use = 1;
+ obj->objnode_tree_root = newnode;
+ obj->objnode_tree_height++;
+ } while (height > obj->objnode_tree_height);
+ }
+insert:
+ slot = obj->objnode_tree_root;
+ height = obj->objnode_tree_height;
+ shift = (height-1) * OBJNODE_TREE_MAP_SHIFT;
+ while (height > 0) {
+ if (slot == NULL) {
+ /* add a child objnode. */
+ slot = tmem_objnode_alloc(obj);
+ if (!slot) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ if (objnode) {
+
+ objnode->slots[offset] = slot;
+ objnode->slots_in_use++;
+ } else
+ obj->objnode_tree_root = slot;
+ }
+ /* go down a level */
+ offset = (index >> shift) & OBJNODE_TREE_MAP_MASK;
+ objnode = slot;
+ slot = objnode->slots[offset];
+ shift -= OBJNODE_TREE_MAP_SHIFT;
+ height--;
+ }
+ BUG_ON(slot != NULL);
+ if (objnode) {
+ objnode->slots_in_use++;
+ objnode->slots[offset] = pampd;
+ } else
+ obj->objnode_tree_root = pampd;
+ obj->pampd_count++;
+out:
+ return ret;
+}
+
+static void *tmem_pampd_delete_from_obj(struct tmem_obj *obj, uint32_t index)
+{
+ struct tmem_objnode_tree_path path[OBJNODE_TREE_MAX_PATH + 1];
+ struct tmem_objnode_tree_path *pathp = path;
+ struct tmem_objnode *slot = NULL;
+ unsigned int height, shift;
+ int offset;
+
+ BUG_ON(obj == NULL);
+ ASSERT_SENTINEL(obj, OBJ);
+ BUG_ON(obj->pool == NULL);
+ ASSERT_SENTINEL(obj->pool, POOL);
+ height = obj->objnode_tree_height;
+ if (index > tmem_objnode_tree_h2max[height])
+ goto out;
+ slot = obj->objnode_tree_root;
+ if (height == 0 && obj->objnode_tree_root) {
+ obj->objnode_tree_root = NULL;
+ goto out;
+ }
+ shift = (height - 1) * OBJNODE_TREE_MAP_SHIFT;
+ pathp->objnode = NULL;
+ do {
+ if (slot == NULL)
+ goto out;
+ pathp++;
+ offset = (index >> shift) & OBJNODE_TREE_MAP_MASK;
+ pathp->offset = offset;
+ pathp->objnode = slot;
+ slot = slot->slots[offset];
+ shift -= OBJNODE_TREE_MAP_SHIFT;
+ height--;
+ } while (height > 0);
+ if (slot == NULL)
+ goto out;
+ while (pathp->objnode) {
+ pathp->objnode->slots[pathp->offset] = NULL;
+ pathp->objnode->slots_in_use--;
+ if (pathp->objnode->slots_in_use) {
+ if (pathp->objnode == obj->objnode_tree_root) {
+ while (obj->objnode_tree_height > 0 &&
+ obj->objnode_tree_root->slots_in_use == 1 &&
+ obj->objnode_tree_root->slots[0]) {
+ struct tmem_objnode *to_free =
+ obj->objnode_tree_root;
+
+ obj->objnode_tree_root =
+ to_free->slots[0];
+ obj->objnode_tree_height--;
+ to_free->slots[0] = NULL;
+ to_free->slots_in_use = 0;
+ tmem_objnode_free(to_free);
+ }
+ }
+ goto out;
+ }
+ tmem_objnode_free(pathp->objnode); /* 0 slots used, free it */
+ pathp--;
+ }
+ obj->objnode_tree_height = 0;
+ obj->objnode_tree_root = NULL;
+
+out:
+ if (slot != NULL)
+ obj->pampd_count--;
+ BUG_ON(obj->pampd_count < 0);
+ return slot;
+}
+
+/* recursively walk the objnode_tree destroying pampds and objnodes */
+static void tmem_objnode_node_destroy(struct tmem_obj *obj,
+ struct tmem_objnode *objnode,
+ unsigned int ht)
+{
+ int i;
+
+ if (ht == 0)
+ return;
+ for (i = 0; i < OBJNODE_TREE_MAP_SIZE; i++) {
+ if (objnode->slots[i]) {
+ if (ht == 1) {
+ obj->pampd_count--;
+ (*tmem_pamops.free)(objnode->slots[i],
+ obj->pool, NULL, 0, true);
+ objnode->slots[i] = NULL;
+ continue;
+ }
+ tmem_objnode_node_destroy(obj, objnode->slots[i], ht-1);
+ tmem_objnode_free(objnode->slots[i]);
+ objnode->slots[i] = NULL;
+ }
+ }
+}
+
+static void tmem_pampd_destroy_all_in_obj(struct tmem_obj *obj)
+{
+ if (obj->objnode_tree_root == NULL)
+ return;
+ if (obj->objnode_tree_height == 0) {
+ obj->pampd_count--;
+ (*tmem_pamops.free)(obj->objnode_tree_root,
+ obj->pool, NULL, 0, true);
+ } else {
+ tmem_objnode_node_destroy(obj, obj->objnode_tree_root,
+ obj->objnode_tree_height);
+ tmem_objnode_free(obj->objnode_tree_root);
+ obj->objnode_tree_height = 0;
+ }
+ obj->objnode_tree_root = NULL;
+ (*tmem_pamops.free_obj)(obj->pool, obj);
+}
+
+/*
+ * Tmem is operated on by a set of well-defined actions:
+ * "put", "get", "flush", "flush_object", "new pool" and "destroy pool".
+ * (The tmem ABI allows for subpages and exchanges but these operations
+ * are not included in this implementation.)
+ *
+ * These "tmem core" operations are implemented in the following functions.
+ */
+
+/*
+ * "Put" a page, e.g. copy a page from the kernel into newly allocated
+ * PAM space (if such space is available). Tmem_put is complicated by
+ * a corner case: What if a page with matching handle already exists in
+ * tmem? To guarantee coherency, one of two actions is necessary: Either
+ * the data for the page must be overwritten, or the page must be
+ * "flushed" so that the data is not accessible to a subsequent "get".
+ * Since these "duplicate puts" are relatively rare, this implementation
+ * always flushes for simplicity.
+ */
+int tmem_put(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
+ char *data, size_t size, bool raw, int ephemeral)
+{
+ struct tmem_obj *obj = NULL, *objfound = NULL, *objnew = NULL;
+ void *pampd = NULL, *pampd_del = NULL;
+ int ret = -ENOMEM;
+ struct tmem_hashbucket *hb;
+
+ hb = &pool->hashbucket[tmem_oid_hash(oidp)];
+ spin_lock(&hb->lock);
+ obj = objfound = tmem_obj_find(hb, oidp);
+ if (obj != NULL) {
+ pampd = tmem_pampd_lookup_in_obj(objfound, index);
+ if (pampd != NULL) {
+ /* if found, is a dup put, flush the old one */
+ pampd_del = tmem_pampd_delete_from_obj(obj, index);
+ BUG_ON(pampd_del != pampd);
+ (*tmem_pamops.free)(pampd, pool, oidp, index, true);
+ if (obj->pampd_count == 0) {
+ objnew = obj;
+ objfound = NULL;
+ }
+ pampd = NULL;
+ }
+ } else {
+ obj = objnew = (*tmem_hostops.obj_alloc)(pool);
+ if (unlikely(obj == NULL)) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ tmem_obj_init(obj, hb, pool, oidp);
+ }
+ BUG_ON(obj == NULL);
+ BUG_ON(((objnew != obj) && (objfound != obj)) || (objnew == objfound));
+ pampd = (*tmem_pamops.create)(data, size, raw, ephemeral,
+ obj->pool, &obj->oid, index);
+ if (unlikely(pampd == NULL))
+ goto free;
+ ret = tmem_pampd_add_to_obj(obj, index, pampd);
+ if (unlikely(ret == -ENOMEM))
+ /* may have partially built objnode tree ("stump") */
+ goto delete_and_free;
+ goto out;
+
+delete_and_free:
+ (void)tmem_pampd_delete_from_obj(obj, index);
+free:
+ if (pampd)
+ (*tmem_pamops.free)(pampd, pool, NULL, 0, true);
+ if (objnew) {
+ tmem_obj_free(objnew, hb);
+ (*tmem_hostops.obj_free)(objnew, pool);
+ }
+out:
+ spin_unlock(&hb->lock);
+ return ret;
+}
+
+void *tmem_localify_get_pampd(struct tmem_pool *pool, struct tmem_oid *oidp,
+ uint32_t index, struct tmem_obj **ret_obj,
+ void **saved_hb)
+{
+ struct tmem_hashbucket *hb;
+ struct tmem_obj *obj = NULL;
+ void *pampd = NULL;
+
+ hb = &pool->hashbucket[tmem_oid_hash(oidp)];
+ spin_lock(&hb->lock);
+ obj = tmem_obj_find(hb, oidp);
+ if (likely(obj != NULL))
+ pampd = tmem_pampd_lookup_in_obj(obj, index);
+ *ret_obj = obj;
+ *saved_hb = (void *)hb;
+ /* note, hashbucket remains locked */
+ return pampd;
+}
+
+void tmem_localify_finish(struct tmem_obj *obj, uint32_t index,
+ void *pampd, void *saved_hb, bool delete)
+{
+ struct tmem_hashbucket *hb = (struct tmem_hashbucket *)saved_hb;
+
+ BUG_ON(!spin_is_locked(&hb->lock));
+ if (pampd != NULL) {
+ BUG_ON(obj == NULL);
+ (void)tmem_pampd_replace_in_obj(obj, index, pampd, 1);
+ } else if (delete) {
+ BUG_ON(obj == NULL);
+ (void)tmem_pampd_delete_from_obj(obj, index);
+ }
+ spin_unlock(&hb->lock);
+}
+
+static int tmem_repatriate(void **ppampd, struct tmem_hashbucket *hb,
+ struct tmem_pool *pool, struct tmem_oid *oidp,
+ uint32_t index, bool free, char *data)
+{
+ void *old_pampd = *ppampd, *new_pampd = NULL;
+ bool intransit = false;
+ int ret = 0;
+
+
+ if (!is_ephemeral(pool))
+ new_pampd = (*tmem_pamops.repatriate_preload)(
+ old_pampd, pool, oidp, index, &intransit);
+ if (intransit)
+ ret = -EAGAIN;
+ else if (new_pampd != NULL)
+ *ppampd = new_pampd;
+ /* must release the hb->lock else repatriate can't sleep */
+ spin_unlock(&hb->lock);
+ if (!intransit)
+ ret = (*tmem_pamops.repatriate)(old_pampd, new_pampd, pool,
+ oidp, index, free, data);
+ return ret;
+}
+
+/*
+ * "Get" a page, e.g. if one can be found, copy the tmem page with the
+ * matching handle from PAM space to the kernel. By tmem definition,
+ * when a "get" is successful on an ephemeral page, the page is "flushed",
+ * and when a "get" is successful on a persistent page, the page is retained
+ * in tmem. Note that to preserve
+ * coherency, "get" can never be skipped if tmem contains the data.
+ * That is, if a get is done with a certain handle and fails, any
+ * subsequent "get" must also fail (unless of course there is a
+ * "put" done with the same handle).
+
+ */
+int tmem_get(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
+ char *data, size_t *size, bool raw, int get_and_free)
+{
+ struct tmem_obj *obj;
+ void *pampd;
+ bool ephemeral = is_ephemeral(pool);
+ int ret = -1;
+ struct tmem_hashbucket *hb;
+ bool free = (get_and_free == 1) || ((get_and_free == 0) && ephemeral);
+ bool lock_held = 0;
+ void **ppampd;
+
+again:
+ hb = &pool->hashbucket[tmem_oid_hash(oidp)];
+ spin_lock(&hb->lock);
+ lock_held = 1;
+ obj = tmem_obj_find(hb, oidp);
+ if (obj == NULL)
+ goto out;
+ ppampd = __tmem_pampd_lookup_in_obj(obj, index);
+ if (ppampd == NULL)
+ goto out;
+ if (tmem_pamops.is_remote(*ppampd)) {
+ ret = tmem_repatriate(ppampd, hb, pool, oidp,
+ index, free, data);
+ lock_held = 0; /* note hb->lock has been unlocked */
+ if (ret == -EAGAIN) {
+ /* rare I think, but should cond_resched()??? */
+ usleep_range(10, 1000);
+ goto again;
+ } else if (ret != 0) {
+ if (ret != -ENOENT)
+ pr_err("UNTESTED case in tmem_get, ret=%d\n",
+ ret);
+ ret = -1;
+ goto out;
+ }
+ goto out;
+ }
+ if (free)
+ pampd = tmem_pampd_delete_from_obj(obj, index);
+ else
+ pampd = tmem_pampd_lookup_in_obj(obj, index);
+ if (pampd == NULL)
+ goto out;
+ if (free) {
+ if (obj->pampd_count == 0) {
+ tmem_obj_free(obj, hb);
+ (*tmem_hostops.obj_free)(obj, pool);
+ obj = NULL;
+ }
+ }
+ if (free)
+ ret = (*tmem_pamops.get_data_and_free)(
+ data, size, raw, pampd, pool, oidp, index);
+ else
+ ret = (*tmem_pamops.get_data)(
+ data, size, raw, pampd, pool, oidp, index);
+ if (ret < 0)
+ goto out;
+ ret = 0;
+out:
+ if (lock_held)
+ spin_unlock(&hb->lock);
+ return ret;
+}
+
+/*
+ * If a page in tmem matches the handle, "flush" this page from tmem such
+ * that any subsequent "get" does not succeed (unless, of course, there
+ * was another "put" with the same handle).
+ */
+int tmem_flush_page(struct tmem_pool *pool,
+ struct tmem_oid *oidp, uint32_t index)
+{
+ struct tmem_obj *obj;
+ void *pampd;
+ int ret = -1;
+ struct tmem_hashbucket *hb;
+
+ hb = &pool->hashbucket[tmem_oid_hash(oidp)];
+ spin_lock(&hb->lock);
+ obj = tmem_obj_find(hb, oidp);
+ if (obj == NULL)
+ goto out;
+ pampd = tmem_pampd_delete_from_obj(obj, index);
+ if (pampd == NULL)
+ goto out;
+ (*tmem_pamops.free)(pampd, pool, oidp, index, true);
+ if (obj->pampd_count == 0) {
+ tmem_obj_free(obj, hb);
+ (*tmem_hostops.obj_free)(obj, pool);
+ }
+ ret = 0;
+
+out:
+ spin_unlock(&hb->lock);
+ return ret;
+}
+
+/*
+ * If a page in tmem matches the handle, replace the page so that any
+ * subsequent "get" gets the new page. Returns the new page if
+ * there was a page to replace, else returns NULL.
+ */
+int tmem_replace(struct tmem_pool *pool, struct tmem_oid *oidp,
+ uint32_t index, void *new_pampd)
+{
+ struct tmem_obj *obj;
+ int ret = -1;
+ struct tmem_hashbucket *hb;
+
+ hb = &pool->hashbucket[tmem_oid_hash(oidp)];
+ spin_lock(&hb->lock);
+ obj = tmem_obj_find(hb, oidp);
+ if (obj == NULL)
+ goto out;
+ new_pampd = tmem_pampd_replace_in_obj(obj, index, new_pampd, 0);
+ ret = (*tmem_pamops.replace_in_obj)(new_pampd, obj);
+out:
+ spin_unlock(&hb->lock);
+ return ret;
+}
+
+/*
+ * "Flush" all pages in tmem matching this oid.
+ */
+int tmem_flush_object(struct tmem_pool *pool, struct tmem_oid *oidp)
+{
+ struct tmem_obj *obj;
+ struct tmem_hashbucket *hb;
+ int ret = -1;
+
+ hb = &pool->hashbucket[tmem_oid_hash(oidp)];
+ spin_lock(&hb->lock);
+ obj = tmem_obj_find(hb, oidp);
+ if (obj == NULL)
+ goto out;
+ tmem_pampd_destroy_all_in_obj(obj);
+ tmem_obj_free(obj, hb);
+ (*tmem_hostops.obj_free)(obj, pool);
+ ret = 0;
+
+out:
+ spin_unlock(&hb->lock);
+ return ret;
+}
+
+/*
+ * "Flush" all pages (and tmem_objs) from this tmem_pool and disable
+ * all subsequent access to this tmem_pool.
+ */
+int tmem_destroy_pool(struct tmem_pool *pool)
+{
+ int ret = -1;
+
+ if (pool == NULL)
+ goto out;
+ tmem_pool_flush(pool, 1);
+ ret = 0;
+out:
+ return ret;
+}
+
+static LIST_HEAD(tmem_global_pool_list);
+
+/*
+ * Create a new tmem_pool with the provided flag and return
+ * a pool id provided by the tmem host implementation.
+ */
+void tmem_new_pool(struct tmem_pool *pool, uint32_t flags)
+{
+ int persistent = flags & TMEM_POOL_PERSIST;
+ int shared = flags & TMEM_POOL_SHARED;
+ struct tmem_hashbucket *hb = &pool->hashbucket[0];
+ int i;
+
+ for (i = 0; i < TMEM_HASH_BUCKETS; i++, hb++) {
+ hb->obj_rb_root = RB_ROOT;
+ spin_lock_init(&hb->lock);
+ }
+ INIT_LIST_HEAD(&pool->pool_list);
+ atomic_set(&pool->obj_count, 0);
+ SET_SENTINEL(pool, POOL);
+ list_add_tail(&pool->pool_list, &tmem_global_pool_list);
+ pool->persistent = persistent;
+ pool->shared = shared;
+}
diff --git a/drivers/staging/ramster/tmem.h b/drivers/staging/ramster/tmem.h
new file mode 100644
index 000000000000..47f1918c8314
--- /dev/null
+++ b/drivers/staging/ramster/tmem.h
@@ -0,0 +1,244 @@
+/*
+ * tmem.h
+ *
+ * Transcendent memory
+ *
+ * Copyright (c) 2009-2011, Dan Magenheimer, Oracle Corp.
+ */
+
+#ifndef _TMEM_H_
+#define _TMEM_H_
+
+#include <linux/highmem.h>
+#include <linux/hash.h>
+#include <linux/atomic.h>
+
+/*
+ * These are pre-defined by the Xen<->Linux ABI
+ */
+#define TMEM_PUT_PAGE 4
+#define TMEM_GET_PAGE 5
+#define TMEM_FLUSH_PAGE 6
+#define TMEM_FLUSH_OBJECT 7
+#define TMEM_POOL_PERSIST 1
+#define TMEM_POOL_SHARED 2
+#define TMEM_POOL_PRECOMPRESSED 4
+#define TMEM_POOL_PAGESIZE_SHIFT 4
+#define TMEM_POOL_PAGESIZE_MASK 0xf
+#define TMEM_POOL_RESERVED_BITS 0x00ffff00
+
+/*
+ * sentinels have proven very useful for debugging but can be removed
+ * or disabled before final merge.
+ */
+#define SENTINELS
+#ifdef SENTINELS
+#define DECL_SENTINEL uint32_t sentinel;
+#define SET_SENTINEL(_x, _y) (_x->sentinel = _y##_SENTINEL)
+#define INVERT_SENTINEL(_x, _y) (_x->sentinel = ~_y##_SENTINEL)
+#define ASSERT_SENTINEL(_x, _y) WARN_ON(_x->sentinel != _y##_SENTINEL)
+#define ASSERT_INVERTED_SENTINEL(_x, _y) WARN_ON(_x->sentinel != ~_y##_SENTINEL)
+#else
+#define DECL_SENTINEL
+#define SET_SENTINEL(_x, _y) do { } while (0)
+#define INVERT_SENTINEL(_x, _y) do { } while (0)
+#define ASSERT_SENTINEL(_x, _y) do { } while (0)
+#define ASSERT_INVERTED_SENTINEL(_x, _y) do { } while (0)
+#endif
+
+#define ASSERT_SPINLOCK(_l) WARN_ON(!spin_is_locked(_l))
+
+/*
+ * A pool is the highest-level data structure managed by tmem and
+ * usually corresponds to a large independent set of pages such as
+ * a filesystem. Each pool has an id, and certain attributes and counters.
+ * It also contains a set of hash buckets, each of which contains an rbtree
+ * of objects and a lock to manage concurrency within the pool.
+ */
+
+#define TMEM_HASH_BUCKET_BITS 8
+#define TMEM_HASH_BUCKETS (1<<TMEM_HASH_BUCKET_BITS)
+
+struct tmem_hashbucket {
+ struct rb_root obj_rb_root;
+ spinlock_t lock;
+};
+
+struct tmem_pool {
+ void *client; /* "up" for some clients, avoids table lookup */
+ struct list_head pool_list;
+ uint32_t pool_id;
+ bool persistent;
+ bool shared;
+ atomic_t obj_count;
+ atomic_t refcount;
+ struct tmem_hashbucket hashbucket[TMEM_HASH_BUCKETS];
+ DECL_SENTINEL
+};
+
+#define is_persistent(_p) (_p->persistent)
+#define is_ephemeral(_p) (!(_p->persistent))
+
+/*
+ * An object id ("oid") is large: 192-bits (to ensure, for example, files
+ * in a modern filesystem can be uniquely identified).
+ */
+
+struct tmem_oid {
+ uint64_t oid[3];
+};
+
+struct tmem_xhandle {
+ uint8_t client_id;
+ uint8_t xh_data_cksum;
+ uint16_t xh_data_size;
+ uint16_t pool_id;
+ struct tmem_oid oid;
+ uint32_t index;
+ void *extra;
+};
+
+static inline struct tmem_xhandle tmem_xhandle_fill(uint16_t client_id,
+ struct tmem_pool *pool,
+ struct tmem_oid *oidp,
+ uint32_t index)
+{
+ struct tmem_xhandle xh;
+ xh.client_id = client_id;
+ xh.xh_data_cksum = (uint8_t)-1;
+ xh.xh_data_size = (uint16_t)-1;
+ xh.pool_id = pool->pool_id;
+ xh.oid = *oidp;
+ xh.index = index;
+ return xh;
+}
+
+static inline void tmem_oid_set_invalid(struct tmem_oid *oidp)
+{
+ oidp->oid[0] = oidp->oid[1] = oidp->oid[2] = -1UL;
+}
+
+static inline bool tmem_oid_valid(struct tmem_oid *oidp)
+{
+ return oidp->oid[0] != -1UL || oidp->oid[1] != -1UL ||
+ oidp->oid[2] != -1UL;
+}
+
+static inline int tmem_oid_compare(struct tmem_oid *left,
+ struct tmem_oid *right)
+{
+ int ret;
+
+ if (left->oid[2] == right->oid[2]) {
+ if (left->oid[1] == right->oid[1]) {
+ if (left->oid[0] == right->oid[0])
+ ret = 0;
+ else if (left->oid[0] < right->oid[0])
+ ret = -1;
+ else
+ return 1;
+ } else if (left->oid[1] < right->oid[1])
+ ret = -1;
+ else
+ ret = 1;
+ } else if (left->oid[2] < right->oid[2])
+ ret = -1;
+ else
+ ret = 1;
+ return ret;
+}
+
+static inline unsigned tmem_oid_hash(struct tmem_oid *oidp)
+{
+ return hash_long(oidp->oid[0] ^ oidp->oid[1] ^ oidp->oid[2],
+ TMEM_HASH_BUCKET_BITS);
+}
+
+/*
+ * A tmem_obj contains an identifier (oid), pointers to the parent
+ * pool and the rb_tree to which it belongs, counters, and an ordered
+ * set of pampds, structured in a radix-tree-like tree. The intermediate
+ * nodes of the tree are called tmem_objnodes.
+ */
+
+struct tmem_objnode;
+
+struct tmem_obj {
+ struct tmem_oid oid;
+ struct tmem_pool *pool;
+ struct rb_node rb_tree_node;
+ struct tmem_objnode *objnode_tree_root;
+ unsigned int objnode_tree_height;
+ unsigned long objnode_count;
+ long pampd_count;
+ /* for current design of ramster, all pages belonging to
+ * an object reside on the same remotenode and extra is
+ * used to record the number of the remotenode so a
+ * flush-object operation can specify it */
+ void *extra; /* for use by pampd implementation */
+ DECL_SENTINEL
+};
+
+#define OBJNODE_TREE_MAP_SHIFT 6
+#define OBJNODE_TREE_MAP_SIZE (1UL << OBJNODE_TREE_MAP_SHIFT)
+#define OBJNODE_TREE_MAP_MASK (OBJNODE_TREE_MAP_SIZE-1)
+#define OBJNODE_TREE_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(unsigned long))
+#define OBJNODE_TREE_MAX_PATH \
+ (OBJNODE_TREE_INDEX_BITS/OBJNODE_TREE_MAP_SHIFT + 2)
+
+struct tmem_objnode {
+ struct tmem_obj *obj;
+ DECL_SENTINEL
+ void *slots[OBJNODE_TREE_MAP_SIZE];
+ unsigned int slots_in_use;
+};
+
+/* pampd abstract datatype methods provided by the PAM implementation */
+struct tmem_pamops {
+ void *(*create)(char *, size_t, bool, int,
+ struct tmem_pool *, struct tmem_oid *, uint32_t);
+ int (*get_data)(char *, size_t *, bool, void *, struct tmem_pool *,
+ struct tmem_oid *, uint32_t);
+ int (*get_data_and_free)(char *, size_t *, bool, void *,
+ struct tmem_pool *, struct tmem_oid *,
+ uint32_t);
+ void (*free)(void *, struct tmem_pool *,
+ struct tmem_oid *, uint32_t, bool);
+ void (*free_obj)(struct tmem_pool *, struct tmem_obj *);
+ bool (*is_remote)(void *);
+ void *(*repatriate_preload)(void *, struct tmem_pool *,
+ struct tmem_oid *, uint32_t, bool *);
+ int (*repatriate)(void *, void *, struct tmem_pool *,
+ struct tmem_oid *, uint32_t, bool, void *);
+ void (*new_obj)(struct tmem_obj *);
+ int (*replace_in_obj)(void *, struct tmem_obj *);
+};
+extern void tmem_register_pamops(struct tmem_pamops *m);
+
+/* memory allocation methods provided by the host implementation */
+struct tmem_hostops {
+ struct tmem_obj *(*obj_alloc)(struct tmem_pool *);
+ void (*obj_free)(struct tmem_obj *, struct tmem_pool *);
+ struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
+ void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
+};
+extern void tmem_register_hostops(struct tmem_hostops *m);
+
+/* core tmem accessor functions */
+extern int tmem_put(struct tmem_pool *, struct tmem_oid *, uint32_t index,
+ char *, size_t, bool, int);
+extern int tmem_get(struct tmem_pool *, struct tmem_oid *, uint32_t index,
+ char *, size_t *, bool, int);
+extern int tmem_replace(struct tmem_pool *, struct tmem_oid *, uint32_t index,
+ void *);
+extern void *tmem_localify_get_pampd(struct tmem_pool *, struct tmem_oid *,
+ uint32_t index, struct tmem_obj **,
+ void **);
+extern void tmem_localify_finish(struct tmem_obj *, uint32_t index,
+ void *, void *, bool);
+extern int tmem_flush_page(struct tmem_pool *, struct tmem_oid *,
+ uint32_t index);
+extern int tmem_flush_object(struct tmem_pool *, struct tmem_oid *);
+extern int tmem_destroy_pool(struct tmem_pool *);
+extern void tmem_new_pool(struct tmem_pool *, uint32_t);
+#endif /* _TMEM_H */
diff --git a/drivers/staging/zram/xvmalloc.c b/drivers/staging/ramster/xvmalloc.c
index 1f9c5082b6d5..93ba8e9407aa 100644
--- a/drivers/staging/zram/xvmalloc.c
+++ b/drivers/staging/ramster/xvmalloc.c
@@ -56,17 +56,17 @@ static void clear_flag(struct block_header *block, enum blockflags flag)
* This is called from xv_malloc/xv_free path, so it
* needs to be fast.
*/
-static void *get_ptr_atomic(struct page *page, u16 offset, enum km_type type)
+static void *get_ptr_atomic(struct page *page, u16 offset)
{
unsigned char *base;
- base = kmap_atomic(page, type);
+ base = kmap_atomic(page);
return base + offset;
}
-static void put_ptr_atomic(void *ptr, enum km_type type)
+static void put_ptr_atomic(void *ptr)
{
- kunmap_atomic(ptr, type);
+ kunmap_atomic(ptr);
}
static u32 get_blockprev(struct block_header *block)
@@ -202,10 +202,10 @@ static void insert_block(struct xv_pool *pool, struct page *page, u32 offset,
if (block->link.next_page) {
nextblock = get_ptr_atomic(block->link.next_page,
- block->link.next_offset, KM_USER1);
+ block->link.next_offset);
nextblock->link.prev_page = page;
nextblock->link.prev_offset = offset;
- put_ptr_atomic(nextblock, KM_USER1);
+ put_ptr_atomic(nextblock);
/* If there was a next page then the free bits are set. */
return;
}
@@ -225,18 +225,18 @@ static void remove_block(struct xv_pool *pool, struct page *page, u32 offset,
if (block->link.prev_page) {
tmpblock = get_ptr_atomic(block->link.prev_page,
- block->link.prev_offset, KM_USER1);
+ block->link.prev_offset);
tmpblock->link.next_page = block->link.next_page;
tmpblock->link.next_offset = block->link.next_offset;
- put_ptr_atomic(tmpblock, KM_USER1);
+ put_ptr_atomic(tmpblock);
}
if (block->link.next_page) {
tmpblock = get_ptr_atomic(block->link.next_page,
- block->link.next_offset, KM_USER1);
+ block->link.next_offset);
tmpblock->link.prev_page = block->link.prev_page;
tmpblock->link.prev_offset = block->link.prev_offset;
- put_ptr_atomic(tmpblock, KM_USER1);
+ put_ptr_atomic(tmpblock);
}
/* Is this block is at the head of the freelist? */
@@ -249,11 +249,10 @@ static void remove_block(struct xv_pool *pool, struct page *page, u32 offset,
if (pool->freelist[slindex].page) {
struct block_header *tmpblock;
tmpblock = get_ptr_atomic(pool->freelist[slindex].page,
- pool->freelist[slindex].offset,
- KM_USER1);
+ pool->freelist[slindex].offset);
tmpblock->link.prev_page = NULL;
tmpblock->link.prev_offset = 0;
- put_ptr_atomic(tmpblock, KM_USER1);
+ put_ptr_atomic(tmpblock);
} else {
/* This freelist bucket is empty */
__clear_bit(slindex % BITS_PER_LONG,
@@ -284,7 +283,7 @@ static int grow_pool(struct xv_pool *pool, gfp_t flags)
stat_inc(&pool->total_pages);
spin_lock(&pool->lock);
- block = get_ptr_atomic(page, 0, KM_USER0);
+ block = get_ptr_atomic(page, 0);
block->size = PAGE_SIZE - XV_ALIGN;
set_flag(block, BLOCK_FREE);
@@ -293,7 +292,7 @@ static int grow_pool(struct xv_pool *pool, gfp_t flags)
insert_block(pool, page, 0, block);
- put_ptr_atomic(block, KM_USER0);
+ put_ptr_atomic(block);
spin_unlock(&pool->lock);
return 0;
@@ -375,7 +374,7 @@ int xv_malloc(struct xv_pool *pool, u32 size, struct page **page,
return -ENOMEM;
}
- block = get_ptr_atomic(*page, *offset, KM_USER0);
+ block = get_ptr_atomic(*page, *offset);
remove_block(pool, *page, *offset, block, index);
@@ -405,7 +404,7 @@ int xv_malloc(struct xv_pool *pool, u32 size, struct page **page,
block->size = origsize;
clear_flag(block, BLOCK_FREE);
- put_ptr_atomic(block, KM_USER0);
+ put_ptr_atomic(block);
spin_unlock(&pool->lock);
*offset += XV_ALIGN;
@@ -426,7 +425,7 @@ void xv_free(struct xv_pool *pool, struct page *page, u32 offset)
spin_lock(&pool->lock);
- page_start = get_ptr_atomic(page, 0, KM_USER0);
+ page_start = get_ptr_atomic(page, 0);
block = (struct block_header *)((char *)page_start + offset);
/* Catch double free bugs */
@@ -468,7 +467,7 @@ void xv_free(struct xv_pool *pool, struct page *page, u32 offset)
/* No used objects in this page. Free it. */
if (block->size == PAGE_SIZE - XV_ALIGN) {
- put_ptr_atomic(page_start, KM_USER0);
+ put_ptr_atomic(page_start);
spin_unlock(&pool->lock);
__free_page(page);
@@ -486,7 +485,7 @@ void xv_free(struct xv_pool *pool, struct page *page, u32 offset)
set_blockprev(tmpblock, offset);
}
- put_ptr_atomic(page_start, KM_USER0);
+ put_ptr_atomic(page_start);
spin_unlock(&pool->lock);
}
EXPORT_SYMBOL_GPL(xv_free);
diff --git a/drivers/staging/zram/xvmalloc.h b/drivers/staging/ramster/xvmalloc.h
index 5b1a81aa5faf..5b1a81aa5faf 100644
--- a/drivers/staging/zram/xvmalloc.h
+++ b/drivers/staging/ramster/xvmalloc.h
diff --git a/drivers/staging/zram/xvmalloc_int.h b/drivers/staging/ramster/xvmalloc_int.h
index b5f1f7febcf6..b5f1f7febcf6 100644
--- a/drivers/staging/zram/xvmalloc_int.h
+++ b/drivers/staging/ramster/xvmalloc_int.h
diff --git a/drivers/staging/ramster/zcache-main.c b/drivers/staging/ramster/zcache-main.c
new file mode 100644
index 000000000000..68b2e053a0e6
--- /dev/null
+++ b/drivers/staging/ramster/zcache-main.c
@@ -0,0 +1,3320 @@
+/*
+ * zcache.c
+ *
+ * Copyright (c) 2010-2012, Dan Magenheimer, Oracle Corp.
+ * Copyright (c) 2010,2011, Nitin Gupta
+ *
+ * Zcache provides an in-kernel "host implementation" for transcendent memory
+ * and, thus indirectly, for cleancache and frontswap. Zcache includes two
+ * page-accessible memory [1] interfaces, both utilizing lzo1x compression:
+ * 1) "compression buddies" ("zbud") is used for ephemeral pages
+ * 2) xvmalloc is used for persistent pages.
+ * Xvmalloc (based on the TLSF allocator) has very low fragmentation
+ * so maximizes space efficiency, while zbud allows pairs (and potentially,
+ * in the future, more than a pair of) compressed pages to be closely linked
+ * so that reclaiming can be done via the kernel's physical-page-oriented
+ * "shrinker" interface.
+ *
+ * [1] For a definition of page-accessible memory (aka PAM), see:
+ * http://marc.info/?l=linux-mm&m=127811271605009
+ * RAMSTER TODO:
+ * - handle remotifying of buddied pages (see zbud_remotify_zbpg)
+ * - kernel boot params: nocleancache/nofrontswap don't always work?!?
+ */
+
+#include <linux/module.h>
+#include <linux/cpu.h>
+#include <linux/highmem.h>
+#include <linux/list.h>
+#include <linux/lzo.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/atomic.h>
+#include <linux/math64.h>
+#include "tmem.h"
+#include "zcache.h"
+#include "ramster.h"
+#include "cluster/tcp.h"
+
+#include "xvmalloc.h" /* temporary until change to zsmalloc */
+
+#define RAMSTER_TESTING
+
+#if (!defined(CONFIG_CLEANCACHE) && !defined(CONFIG_FRONTSWAP))
+#error "ramster is useless without CONFIG_CLEANCACHE or CONFIG_FRONTSWAP"
+#endif
+#ifdef CONFIG_CLEANCACHE
+#include <linux/cleancache.h>
+#endif
+#ifdef CONFIG_FRONTSWAP
+#include <linux/frontswap.h>
+#endif
+
+enum ramster_remotify_op {
+ RAMSTER_REMOTIFY_EPH_PUT,
+ RAMSTER_REMOTIFY_PERS_PUT,
+ RAMSTER_REMOTIFY_FLUSH_PAGE,
+ RAMSTER_REMOTIFY_FLUSH_OBJ,
+ RAMSTER_INTRANSIT_PERS
+};
+
+struct ramster_remotify_hdr {
+ enum ramster_remotify_op op;
+ struct list_head list;
+};
+
+#define ZBH_SENTINEL 0x43214321
+#define ZBPG_SENTINEL 0xdeadbeef
+
+#define ZBUD_MAX_BUDS 2
+
+struct zbud_hdr {
+ struct ramster_remotify_hdr rem_op;
+ uint16_t client_id;
+ uint16_t pool_id;
+ struct tmem_oid oid;
+ uint32_t index;
+ uint16_t size; /* compressed size in bytes, zero means unused */
+ DECL_SENTINEL
+};
+
+#define ZVH_SENTINEL 0x43214321
+static const int zv_max_page_size = (PAGE_SIZE / 8) * 7;
+
+struct zv_hdr {
+ struct ramster_remotify_hdr rem_op;
+ uint16_t client_id;
+ uint16_t pool_id;
+ struct tmem_oid oid;
+ uint32_t index;
+ DECL_SENTINEL
+};
+
+struct flushlist_node {
+ struct ramster_remotify_hdr rem_op;
+ struct tmem_xhandle xh;
+};
+
+union {
+ struct ramster_remotify_hdr rem_op;
+ struct zv_hdr zv;
+ struct zbud_hdr zbud;
+ struct flushlist_node flist;
+} remotify_list_node;
+
+static LIST_HEAD(zcache_rem_op_list);
+static DEFINE_SPINLOCK(zcache_rem_op_list_lock);
+
+#if 0
+/* this is more aggressive but may cause other problems? */
+#define ZCACHE_GFP_MASK (GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN)
+#else
+#define ZCACHE_GFP_MASK \
+ (__GFP_FS | __GFP_NORETRY | __GFP_NOWARN | __GFP_NOMEMALLOC)
+#endif
+
+#define MAX_POOLS_PER_CLIENT 16
+
+#define MAX_CLIENTS 16
+#define LOCAL_CLIENT ((uint16_t)-1)
+
+MODULE_LICENSE("GPL");
+
+struct zcache_client {
+ struct tmem_pool *tmem_pools[MAX_POOLS_PER_CLIENT];
+ struct xv_pool *xvpool;
+ bool allocated;
+ atomic_t refcount;
+};
+
+static struct zcache_client zcache_host;
+static struct zcache_client zcache_clients[MAX_CLIENTS];
+
+static inline uint16_t get_client_id_from_client(struct zcache_client *cli)
+{
+ BUG_ON(cli == NULL);
+ if (cli == &zcache_host)
+ return LOCAL_CLIENT;
+ return cli - &zcache_clients[0];
+}
+
+static inline bool is_local_client(struct zcache_client *cli)
+{
+ return cli == &zcache_host;
+}
+
+/**********
+ * Compression buddies ("zbud") provides for packing two (or, possibly
+ * in the future, more) compressed ephemeral pages into a single "raw"
+ * (physical) page and tracking them with data structures so that
+ * the raw pages can be easily reclaimed.
+ *
+ * A zbud page ("zbpg") is an aligned page containing a list_head,
+ * a lock, and two "zbud headers". The remainder of the physical
+ * page is divided up into aligned 64-byte "chunks" which contain
+ * the compressed data for zero, one, or two zbuds. Each zbpg
+ * resides on: (1) an "unused list" if it has no zbuds; (2) a
+ * "buddied" list if it is fully populated with two zbuds; or
+ * (3) one of PAGE_SIZE/64 "unbuddied" lists indexed by how many chunks
+ * the one unbuddied zbud uses. The data inside a zbpg cannot be
+ * read or written unless the zbpg's lock is held.
+ */
+
+struct zbud_page {
+ struct list_head bud_list;
+ spinlock_t lock;
+ struct zbud_hdr buddy[ZBUD_MAX_BUDS];
+ DECL_SENTINEL
+ /* followed by NUM_CHUNK aligned CHUNK_SIZE-byte chunks */
+};
+
+#define CHUNK_SHIFT 6
+#define CHUNK_SIZE (1 << CHUNK_SHIFT)
+#define CHUNK_MASK (~(CHUNK_SIZE-1))
+#define NCHUNKS (((PAGE_SIZE - sizeof(struct zbud_page)) & \
+ CHUNK_MASK) >> CHUNK_SHIFT)
+#define MAX_CHUNK (NCHUNKS-1)
+
+static struct {
+ struct list_head list;
+ unsigned count;
+} zbud_unbuddied[NCHUNKS];
+/* list N contains pages with N chunks USED and NCHUNKS-N unused */
+/* element 0 is never used but optimizing that isn't worth it */
+static unsigned long zbud_cumul_chunk_counts[NCHUNKS];
+
+struct list_head zbud_buddied_list;
+static unsigned long zcache_zbud_buddied_count;
+
+/* protects the buddied list and all unbuddied lists */
+static DEFINE_SPINLOCK(zbud_budlists_spinlock);
+
+static atomic_t zcache_zbud_curr_raw_pages;
+static atomic_t zcache_zbud_curr_zpages;
+static unsigned long zcache_zbud_curr_zbytes;
+static unsigned long zcache_zbud_cumul_zpages;
+static unsigned long zcache_zbud_cumul_zbytes;
+static unsigned long zcache_compress_poor;
+static unsigned long zcache_policy_percent_exceeded;
+static unsigned long zcache_mean_compress_poor;
+
+/*
+ * RAMster counters
+ * - Remote pages are pages with a local pampd but the data is remote
+ * - Foreign pages are pages stored locally but belonging to another node
+ */
+static atomic_t ramster_remote_pers_pages = ATOMIC_INIT(0);
+static unsigned long ramster_pers_remotify_enable;
+static unsigned long ramster_eph_remotify_enable;
+static unsigned long ramster_eph_pages_remoted;
+static unsigned long ramster_eph_pages_remote_failed;
+static unsigned long ramster_pers_pages_remoted;
+static unsigned long ramster_pers_pages_remote_failed;
+static unsigned long ramster_pers_pages_remote_nomem;
+static unsigned long ramster_remote_objects_flushed;
+static unsigned long ramster_remote_object_flushes_failed;
+static unsigned long ramster_remote_pages_flushed;
+static unsigned long ramster_remote_page_flushes_failed;
+static unsigned long ramster_remote_eph_pages_succ_get;
+static unsigned long ramster_remote_pers_pages_succ_get;
+static unsigned long ramster_remote_eph_pages_unsucc_get;
+static unsigned long ramster_remote_pers_pages_unsucc_get;
+static atomic_t ramster_curr_flnode_count = ATOMIC_INIT(0);
+static unsigned long ramster_curr_flnode_count_max;
+static atomic_t ramster_foreign_eph_pampd_count = ATOMIC_INIT(0);
+static unsigned long ramster_foreign_eph_pampd_count_max;
+static atomic_t ramster_foreign_pers_pampd_count = ATOMIC_INIT(0);
+static unsigned long ramster_foreign_pers_pampd_count_max;
+
+/* forward references */
+static void *zcache_get_free_page(void);
+static void zcache_free_page(void *p);
+
+/*
+ * zbud helper functions
+ */
+
+static inline unsigned zbud_max_buddy_size(void)
+{
+ return MAX_CHUNK << CHUNK_SHIFT;
+}
+
+static inline unsigned zbud_size_to_chunks(unsigned size)
+{
+ BUG_ON(size == 0 || size > zbud_max_buddy_size());
+ return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
+}
+
+static inline int zbud_budnum(struct zbud_hdr *zh)
+{
+ unsigned offset = (unsigned long)zh & (PAGE_SIZE - 1);
+ struct zbud_page *zbpg = NULL;
+ unsigned budnum = -1U;
+ int i;
+
+ for (i = 0; i < ZBUD_MAX_BUDS; i++)
+ if (offset == offsetof(typeof(*zbpg), buddy[i])) {
+ budnum = i;
+ break;
+ }
+ BUG_ON(budnum == -1U);
+ return budnum;
+}
+
+static char *zbud_data(struct zbud_hdr *zh, unsigned size)
+{
+ struct zbud_page *zbpg;
+ char *p;
+ unsigned budnum;
+
+ ASSERT_SENTINEL(zh, ZBH);
+ budnum = zbud_budnum(zh);
+ BUG_ON(size == 0 || size > zbud_max_buddy_size());
+ zbpg = container_of(zh, struct zbud_page, buddy[budnum]);
+ ASSERT_SPINLOCK(&zbpg->lock);
+ p = (char *)zbpg;
+ if (budnum == 0)
+ p += ((sizeof(struct zbud_page) + CHUNK_SIZE - 1) &
+ CHUNK_MASK);
+ else if (budnum == 1)
+ p += PAGE_SIZE - ((size + CHUNK_SIZE - 1) & CHUNK_MASK);
+ return p;
+}
+
+static void zbud_copy_from_pampd(char *data, size_t *size, struct zbud_hdr *zh)
+{
+ struct zbud_page *zbpg;
+ char *p;
+ unsigned budnum;
+
+ ASSERT_SENTINEL(zh, ZBH);
+ budnum = zbud_budnum(zh);
+ zbpg = container_of(zh, struct zbud_page, buddy[budnum]);
+ spin_lock(&zbpg->lock);
+ BUG_ON(zh->size > *size);
+ p = (char *)zbpg;
+ if (budnum == 0)
+ p += ((sizeof(struct zbud_page) + CHUNK_SIZE - 1) &
+ CHUNK_MASK);
+ else if (budnum == 1)
+ p += PAGE_SIZE - ((zh->size + CHUNK_SIZE - 1) & CHUNK_MASK);
+ /* client should be filled in by caller */
+ memcpy(data, p, zh->size);
+ *size = zh->size;
+ spin_unlock(&zbpg->lock);
+}
+
+/*
+ * zbud raw page management
+ */
+
+static struct zbud_page *zbud_alloc_raw_page(void)
+{
+ struct zbud_page *zbpg = NULL;
+ struct zbud_hdr *zh0, *zh1;
+ zbpg = zcache_get_free_page();
+ if (likely(zbpg != NULL)) {
+ INIT_LIST_HEAD(&zbpg->bud_list);
+ zh0 = &zbpg->buddy[0]; zh1 = &zbpg->buddy[1];
+ spin_lock_init(&zbpg->lock);
+ atomic_inc(&zcache_zbud_curr_raw_pages);
+ INIT_LIST_HEAD(&zbpg->bud_list);
+ SET_SENTINEL(zbpg, ZBPG);
+ zh0->size = 0; zh1->size = 0;
+ tmem_oid_set_invalid(&zh0->oid);
+ tmem_oid_set_invalid(&zh1->oid);
+ }
+ return zbpg;
+}
+
+static void zbud_free_raw_page(struct zbud_page *zbpg)
+{
+ struct zbud_hdr *zh0 = &zbpg->buddy[0], *zh1 = &zbpg->buddy[1];
+
+ ASSERT_SENTINEL(zbpg, ZBPG);
+ BUG_ON(!list_empty(&zbpg->bud_list));
+ ASSERT_SPINLOCK(&zbpg->lock);
+ BUG_ON(zh0->size != 0 || tmem_oid_valid(&zh0->oid));
+ BUG_ON(zh1->size != 0 || tmem_oid_valid(&zh1->oid));
+ INVERT_SENTINEL(zbpg, ZBPG);
+ spin_unlock(&zbpg->lock);
+ atomic_dec(&zcache_zbud_curr_raw_pages);
+ zcache_free_page(zbpg);
+}
+
+/*
+ * core zbud handling routines
+ */
+
+static unsigned zbud_free(struct zbud_hdr *zh)
+{
+ unsigned size;
+
+ ASSERT_SENTINEL(zh, ZBH);
+ BUG_ON(!tmem_oid_valid(&zh->oid));
+ size = zh->size;
+ BUG_ON(zh->size == 0 || zh->size > zbud_max_buddy_size());
+ zh->size = 0;
+ tmem_oid_set_invalid(&zh->oid);
+ INVERT_SENTINEL(zh, ZBH);
+ zcache_zbud_curr_zbytes -= size;
+ atomic_dec(&zcache_zbud_curr_zpages);
+ return size;
+}
+
+static void zbud_free_and_delist(struct zbud_hdr *zh)
+{
+ unsigned chunks;
+ struct zbud_hdr *zh_other;
+ unsigned budnum = zbud_budnum(zh), size;
+ struct zbud_page *zbpg =
+ container_of(zh, struct zbud_page, buddy[budnum]);
+
+ /* FIXME, should be BUG_ON, pool destruction path doesn't disable
+ * interrupts tmem_destroy_pool()->tmem_pampd_destroy_all_in_obj()->
+ * tmem_objnode_node_destroy()-> zcache_pampd_free() */
+ WARN_ON(!irqs_disabled());
+ spin_lock(&zbpg->lock);
+ if (list_empty(&zbpg->bud_list)) {
+ /* ignore zombie page... see zbud_evict_pages() */
+ spin_unlock(&zbpg->lock);
+ return;
+ }
+ size = zbud_free(zh);
+ ASSERT_SPINLOCK(&zbpg->lock);
+ zh_other = &zbpg->buddy[(budnum == 0) ? 1 : 0];
+ if (zh_other->size == 0) { /* was unbuddied: unlist and free */
+ chunks = zbud_size_to_chunks(size) ;
+ spin_lock(&zbud_budlists_spinlock);
+ BUG_ON(list_empty(&zbud_unbuddied[chunks].list));
+ list_del_init(&zbpg->bud_list);
+ zbud_unbuddied[chunks].count--;
+ spin_unlock(&zbud_budlists_spinlock);
+ zbud_free_raw_page(zbpg);
+ } else { /* was buddied: move remaining buddy to unbuddied list */
+ chunks = zbud_size_to_chunks(zh_other->size) ;
+ spin_lock(&zbud_budlists_spinlock);
+ list_del_init(&zbpg->bud_list);
+ zcache_zbud_buddied_count--;
+ list_add_tail(&zbpg->bud_list, &zbud_unbuddied[chunks].list);
+ zbud_unbuddied[chunks].count++;
+ spin_unlock(&zbud_budlists_spinlock);
+ spin_unlock(&zbpg->lock);
+ }
+}
+
+static struct zbud_hdr *zbud_create(uint16_t client_id, uint16_t pool_id,
+ struct tmem_oid *oid,
+ uint32_t index, struct page *page,
+ void *cdata, unsigned size)
+{
+ struct zbud_hdr *zh0, *zh1, *zh = NULL;
+ struct zbud_page *zbpg = NULL, *ztmp;
+ unsigned nchunks;
+ char *to;
+ int i, found_good_buddy = 0;
+
+ nchunks = zbud_size_to_chunks(size) ;
+ for (i = MAX_CHUNK - nchunks + 1; i > 0; i--) {
+ spin_lock(&zbud_budlists_spinlock);
+ if (!list_empty(&zbud_unbuddied[i].list)) {
+ list_for_each_entry_safe(zbpg, ztmp,
+ &zbud_unbuddied[i].list, bud_list) {
+ if (spin_trylock(&zbpg->lock)) {
+ found_good_buddy = i;
+ goto found_unbuddied;
+ }
+ }
+ }
+ spin_unlock(&zbud_budlists_spinlock);
+ }
+ /* didn't find a good buddy, try allocating a new page */
+ zbpg = zbud_alloc_raw_page();
+ if (unlikely(zbpg == NULL))
+ goto out;
+ /* ok, have a page, now compress the data before taking locks */
+ spin_lock(&zbud_budlists_spinlock);
+ spin_lock(&zbpg->lock);
+ list_add_tail(&zbpg->bud_list, &zbud_unbuddied[nchunks].list);
+ zbud_unbuddied[nchunks].count++;
+ zh = &zbpg->buddy[0];
+ goto init_zh;
+
+found_unbuddied:
+ ASSERT_SPINLOCK(&zbpg->lock);
+ zh0 = &zbpg->buddy[0]; zh1 = &zbpg->buddy[1];
+ BUG_ON(!((zh0->size == 0) ^ (zh1->size == 0)));
+ if (zh0->size != 0) { /* buddy0 in use, buddy1 is vacant */
+ ASSERT_SENTINEL(zh0, ZBH);
+ zh = zh1;
+ } else if (zh1->size != 0) { /* buddy1 in use, buddy0 is vacant */
+ ASSERT_SENTINEL(zh1, ZBH);
+ zh = zh0;
+ } else
+ BUG();
+ list_del_init(&zbpg->bud_list);
+ zbud_unbuddied[found_good_buddy].count--;
+ list_add_tail(&zbpg->bud_list, &zbud_buddied_list);
+ zcache_zbud_buddied_count++;
+
+init_zh:
+ SET_SENTINEL(zh, ZBH);
+ zh->size = size;
+ zh->index = index;
+ zh->oid = *oid;
+ zh->pool_id = pool_id;
+ zh->client_id = client_id;
+ to = zbud_data(zh, size);
+ memcpy(to, cdata, size);
+ spin_unlock(&zbpg->lock);
+ spin_unlock(&zbud_budlists_spinlock);
+ zbud_cumul_chunk_counts[nchunks]++;
+ atomic_inc(&zcache_zbud_curr_zpages);
+ zcache_zbud_cumul_zpages++;
+ zcache_zbud_curr_zbytes += size;
+ zcache_zbud_cumul_zbytes += size;
+out:
+ return zh;
+}
+
+static int zbud_decompress(struct page *page, struct zbud_hdr *zh)
+{
+ struct zbud_page *zbpg;
+ unsigned budnum = zbud_budnum(zh);
+ size_t out_len = PAGE_SIZE;
+ char *to_va, *from_va;
+ unsigned size;
+ int ret = 0;
+
+ zbpg = container_of(zh, struct zbud_page, buddy[budnum]);
+ spin_lock(&zbpg->lock);
+ if (list_empty(&zbpg->bud_list)) {
+ /* ignore zombie page... see zbud_evict_pages() */
+ ret = -EINVAL;
+ goto out;
+ }
+ ASSERT_SENTINEL(zh, ZBH);
+ BUG_ON(zh->size == 0 || zh->size > zbud_max_buddy_size());
+ to_va = kmap_atomic(page);
+ size = zh->size;
+ from_va = zbud_data(zh, size);
+ ret = lzo1x_decompress_safe(from_va, size, to_va, &out_len);
+ BUG_ON(ret != LZO_E_OK);
+ BUG_ON(out_len != PAGE_SIZE);
+ kunmap_atomic(to_va);
+out:
+ spin_unlock(&zbpg->lock);
+ return ret;
+}
+
+/*
+ * The following routines handle shrinking of ephemeral pages by evicting
+ * pages "least valuable" first.
+ */
+
+static unsigned long zcache_evicted_raw_pages;
+static unsigned long zcache_evicted_buddied_pages;
+static unsigned long zcache_evicted_unbuddied_pages;
+
+static struct tmem_pool *zcache_get_pool_by_id(uint16_t cli_id,
+ uint16_t poolid);
+static void zcache_put_pool(struct tmem_pool *pool);
+
+/*
+ * Flush and free all zbuds in a zbpg, then free the pageframe
+ */
+static void zbud_evict_zbpg(struct zbud_page *zbpg)
+{
+ struct zbud_hdr *zh;
+ int i, j;
+ uint32_t pool_id[ZBUD_MAX_BUDS], client_id[ZBUD_MAX_BUDS];
+ uint32_t index[ZBUD_MAX_BUDS];
+ struct tmem_oid oid[ZBUD_MAX_BUDS];
+ struct tmem_pool *pool;
+ unsigned long flags;
+
+ ASSERT_SPINLOCK(&zbpg->lock);
+ for (i = 0, j = 0; i < ZBUD_MAX_BUDS; i++) {
+ zh = &zbpg->buddy[i];
+ if (zh->size) {
+ client_id[j] = zh->client_id;
+ pool_id[j] = zh->pool_id;
+ oid[j] = zh->oid;
+ index[j] = zh->index;
+ j++;
+ }
+ }
+ spin_unlock(&zbpg->lock);
+ for (i = 0; i < j; i++) {
+ pool = zcache_get_pool_by_id(client_id[i], pool_id[i]);
+ BUG_ON(pool == NULL);
+ local_irq_save(flags);
+ /* these flushes should dispose of any local storage */
+ tmem_flush_page(pool, &oid[i], index[i]);
+ local_irq_restore(flags);
+ zcache_put_pool(pool);
+ }
+}
+
+/*
+ * Free nr pages. This code is funky because we want to hold the locks
+ * protecting various lists for as short a time as possible, and in some
+ * circumstances the list may change asynchronously when the list lock is
+ * not held. In some cases we also trylock not only to avoid waiting on a
+ * page in use by another cpu, but also to avoid potential deadlock due to
+ * lock inversion.
+ */
+static void zbud_evict_pages(int nr)
+{
+ struct zbud_page *zbpg;
+ int i, newly_unused_pages = 0;
+
+
+ /* now try freeing unbuddied pages, starting with least space avail */
+ for (i = 0; i < MAX_CHUNK; i++) {
+retry_unbud_list_i:
+ spin_lock_bh(&zbud_budlists_spinlock);
+ if (list_empty(&zbud_unbuddied[i].list)) {
+ spin_unlock_bh(&zbud_budlists_spinlock);
+ continue;
+ }
+ list_for_each_entry(zbpg, &zbud_unbuddied[i].list, bud_list) {
+ if (unlikely(!spin_trylock(&zbpg->lock)))
+ continue;
+ zbud_unbuddied[i].count--;
+ spin_unlock(&zbud_budlists_spinlock);
+ zcache_evicted_unbuddied_pages++;
+ /* want budlists unlocked when doing zbpg eviction */
+ zbud_evict_zbpg(zbpg);
+ newly_unused_pages++;
+ local_bh_enable();
+ if (--nr <= 0)
+ goto evict_unused;
+ goto retry_unbud_list_i;
+ }
+ spin_unlock_bh(&zbud_budlists_spinlock);
+ }
+
+ /* as a last resort, free buddied pages */
+retry_bud_list:
+ spin_lock_bh(&zbud_budlists_spinlock);
+ if (list_empty(&zbud_buddied_list)) {
+ spin_unlock_bh(&zbud_budlists_spinlock);
+ goto evict_unused;
+ }
+ list_for_each_entry(zbpg, &zbud_buddied_list, bud_list) {
+ if (unlikely(!spin_trylock(&zbpg->lock)))
+ continue;
+ zcache_zbud_buddied_count--;
+ spin_unlock(&zbud_budlists_spinlock);
+ zcache_evicted_buddied_pages++;
+ /* want budlists unlocked when doing zbpg eviction */
+ zbud_evict_zbpg(zbpg);
+ newly_unused_pages++;
+ local_bh_enable();
+ if (--nr <= 0)
+ goto evict_unused;
+ goto retry_bud_list;
+ }
+ spin_unlock_bh(&zbud_budlists_spinlock);
+
+evict_unused:
+ return;
+}
+
+static DEFINE_PER_CPU(unsigned char *, zcache_remoteputmem);
+
+static int zbud_remotify_zbud(struct tmem_xhandle *xh, char *data,
+ size_t size)
+{
+ struct tmem_pool *pool;
+ int i, remotenode, ret = -1;
+ unsigned char cksum, *p;
+ unsigned long flags;
+
+ for (p = data, cksum = 0, i = 0; i < size; i++)
+ cksum += *p;
+ ret = ramster_remote_put(xh, data, size, true, &remotenode);
+ if (ret == 0) {
+ /* data was successfully remoted so change the local version
+ * to point to the remote node where it landed */
+ pool = zcache_get_pool_by_id(LOCAL_CLIENT, xh->pool_id);
+ BUG_ON(pool == NULL);
+ local_irq_save(flags);
+ /* tmem_replace will also free up any local space */
+ (void)tmem_replace(pool, &xh->oid, xh->index,
+ pampd_make_remote(remotenode, size, cksum));
+ local_irq_restore(flags);
+ zcache_put_pool(pool);
+ ramster_eph_pages_remoted++;
+ ret = 0;
+ } else
+ ramster_eph_pages_remote_failed++;
+ return ret;
+}
+
+static int zbud_remotify_zbpg(struct zbud_page *zbpg)
+{
+ struct zbud_hdr *zh1, *zh2 = NULL;
+ struct tmem_xhandle xh1, xh2 = { 0 };
+ char *data1 = NULL, *data2 = NULL;
+ size_t size1 = 0, size2 = 0;
+ int ret = 0;
+ unsigned char *tmpmem = __get_cpu_var(zcache_remoteputmem);
+
+ ASSERT_SPINLOCK(&zbpg->lock);
+ if (zbpg->buddy[0].size == 0)
+ zh1 = &zbpg->buddy[1];
+ else if (zbpg->buddy[1].size == 0)
+ zh1 = &zbpg->buddy[0];
+ else {
+ zh1 = &zbpg->buddy[0];
+ zh2 = &zbpg->buddy[1];
+ }
+ /* don't remotify pages that are already remotified */
+ if (zh1->client_id != LOCAL_CLIENT)
+ zh1 = NULL;
+ if ((zh2 != NULL) && (zh2->client_id != LOCAL_CLIENT))
+ zh2 = NULL;
+
+ /* copy the data and metadata so can release lock */
+ if (zh1 != NULL) {
+ xh1.client_id = zh1->client_id;
+ xh1.pool_id = zh1->pool_id;
+ xh1.oid = zh1->oid;
+ xh1.index = zh1->index;
+ size1 = zh1->size;
+ data1 = zbud_data(zh1, size1);
+ memcpy(tmpmem, zbud_data(zh1, size1), size1);
+ data1 = tmpmem;
+ tmpmem += size1;
+ }
+ if (zh2 != NULL) {
+ xh2.client_id = zh2->client_id;
+ xh2.pool_id = zh2->pool_id;
+ xh2.oid = zh2->oid;
+ xh2.index = zh2->index;
+ size2 = zh2->size;
+ memcpy(tmpmem, zbud_data(zh2, size2), size2);
+ data2 = tmpmem;
+ }
+ spin_unlock(&zbpg->lock);
+ preempt_enable();
+
+ /* OK, no locks held anymore, remotify one or both zbuds */
+ if (zh1 != NULL)
+ ret = zbud_remotify_zbud(&xh1, data1, size1);
+ if (zh2 != NULL)
+ ret |= zbud_remotify_zbud(&xh2, data2, size2);
+ return ret;
+}
+
+void zbud_remotify_pages(int nr)
+{
+ struct zbud_page *zbpg;
+ int i, ret;
+
+ /*
+ * for now just try remotifying unbuddied pages, starting with
+ * least space avail
+ */
+ for (i = 0; i < MAX_CHUNK; i++) {
+retry_unbud_list_i:
+ preempt_disable(); /* enable in zbud_remotify_zbpg */
+ spin_lock_bh(&zbud_budlists_spinlock);
+ if (list_empty(&zbud_unbuddied[i].list)) {
+ spin_unlock_bh(&zbud_budlists_spinlock);
+ preempt_enable();
+ continue; /* next i in for loop */
+ }
+ list_for_each_entry(zbpg, &zbud_unbuddied[i].list, bud_list) {
+ if (unlikely(!spin_trylock(&zbpg->lock)))
+ continue; /* next list_for_each_entry */
+ zbud_unbuddied[i].count--;
+ /* want budlists unlocked when doing zbpg remotify */
+ spin_unlock_bh(&zbud_budlists_spinlock);
+ ret = zbud_remotify_zbpg(zbpg);
+ /* preemption is re-enabled in zbud_remotify_zbpg */
+ if (ret == 0) {
+ if (--nr <= 0)
+ goto out;
+ goto retry_unbud_list_i;
+ }
+ /* if fail to remotify any page, quit */
+ pr_err("TESTING zbud_remotify_pages failed on page,"
+ " trying to re-add\n");
+ spin_lock_bh(&zbud_budlists_spinlock);
+ spin_lock(&zbpg->lock);
+ list_add_tail(&zbpg->bud_list, &zbud_unbuddied[i].list);
+ zbud_unbuddied[i].count++;
+ spin_unlock(&zbpg->lock);
+ spin_unlock_bh(&zbud_budlists_spinlock);
+ pr_err("TESTING zbud_remotify_pages failed on page,"
+ " finished re-add\n");
+ goto out;
+ }
+ spin_unlock_bh(&zbud_budlists_spinlock);
+ preempt_enable();
+ }
+
+next_buddied_zbpg:
+ preempt_disable(); /* enable in zbud_remotify_zbpg */
+ spin_lock_bh(&zbud_budlists_spinlock);
+ if (list_empty(&zbud_buddied_list))
+ goto unlock_out;
+ list_for_each_entry(zbpg, &zbud_buddied_list, bud_list) {
+ if (unlikely(!spin_trylock(&zbpg->lock)))
+ continue; /* next list_for_each_entry */
+ zcache_zbud_buddied_count--;
+ /* want budlists unlocked when doing zbpg remotify */
+ spin_unlock_bh(&zbud_budlists_spinlock);
+ ret = zbud_remotify_zbpg(zbpg);
+ /* preemption is re-enabled in zbud_remotify_zbpg */
+ if (ret == 0) {
+ if (--nr <= 0)
+ goto out;
+ goto next_buddied_zbpg;
+ }
+ /* if fail to remotify any page, quit */
+ pr_err("TESTING zbud_remotify_pages failed on BUDDIED page,"
+ " trying to re-add\n");
+ spin_lock_bh(&zbud_budlists_spinlock);
+ spin_lock(&zbpg->lock);
+ list_add_tail(&zbpg->bud_list, &zbud_buddied_list);
+ zcache_zbud_buddied_count++;
+ spin_unlock(&zbpg->lock);
+ spin_unlock_bh(&zbud_budlists_spinlock);
+ pr_err("TESTING zbud_remotify_pages failed on BUDDIED page,"
+ " finished re-add\n");
+ goto out;
+ }
+unlock_out:
+ spin_unlock_bh(&zbud_budlists_spinlock);
+ preempt_enable();
+out:
+ return;
+}
+
+/* the "flush list" asynchronously collects pages to remotely flush */
+#define FLUSH_ENTIRE_OBJECT ((uint32_t)-1)
+static void ramster_flnode_free(struct flushlist_node *,
+ struct tmem_pool *);
+
+static void zcache_remote_flush_page(struct flushlist_node *flnode)
+{
+ struct tmem_xhandle *xh;
+ int remotenode, ret;
+
+ preempt_disable();
+ xh = &flnode->xh;
+ remotenode = flnode->xh.client_id;
+ ret = ramster_remote_flush(xh, remotenode);
+ if (ret >= 0)
+ ramster_remote_pages_flushed++;
+ else
+ ramster_remote_page_flushes_failed++;
+ preempt_enable_no_resched();
+ ramster_flnode_free(flnode, NULL);
+}
+
+static void zcache_remote_flush_object(struct flushlist_node *flnode)
+{
+ struct tmem_xhandle *xh;
+ int remotenode, ret;
+
+ preempt_disable();
+ xh = &flnode->xh;
+ remotenode = flnode->xh.client_id;
+ ret = ramster_remote_flush_object(xh, remotenode);
+ if (ret >= 0)
+ ramster_remote_objects_flushed++;
+ else
+ ramster_remote_object_flushes_failed++;
+ preempt_enable_no_resched();
+ ramster_flnode_free(flnode, NULL);
+}
+
+static void zcache_remote_eph_put(struct zbud_hdr *zbud)
+{
+ /* FIXME */
+}
+
+static void zcache_remote_pers_put(struct zv_hdr *zv)
+{
+ struct tmem_xhandle xh;
+ uint16_t size;
+ bool ephemeral;
+ int remotenode, ret = -1;
+ char *data;
+ struct tmem_pool *pool;
+ unsigned long flags;
+ unsigned char cksum;
+ char *p;
+ int i;
+ unsigned char *tmpmem = __get_cpu_var(zcache_remoteputmem);
+
+ ASSERT_SENTINEL(zv, ZVH);
+ BUG_ON(zv->client_id != LOCAL_CLIENT);
+ local_bh_disable();
+ xh.client_id = zv->client_id;
+ xh.pool_id = zv->pool_id;
+ xh.oid = zv->oid;
+ xh.index = zv->index;
+ size = xv_get_object_size(zv) - sizeof(*zv);
+ BUG_ON(size == 0 || size > zv_max_page_size);
+ data = (char *)zv + sizeof(*zv);
+ for (p = data, cksum = 0, i = 0; i < size; i++)
+ cksum += *p;
+ memcpy(tmpmem, data, size);
+ data = tmpmem;
+ pool = zcache_get_pool_by_id(zv->client_id, zv->pool_id);
+ ephemeral = is_ephemeral(pool);
+ zcache_put_pool(pool);
+ /* now OK to release lock set in caller */
+ spin_unlock(&zcache_rem_op_list_lock);
+ local_bh_enable();
+ preempt_disable();
+ ret = ramster_remote_put(&xh, data, size, ephemeral, &remotenode);
+ preempt_enable_no_resched();
+ if (ret != 0) {
+ /*
+ * This is some form of a memory leak... if the remote put
+ * fails, there will never be another attempt to remotify
+ * this page. But since we've dropped the zv pointer,
+ * the page may have been freed or the data replaced
+ * so we can't just "put it back" in the remote op list.
+ * Even if we could, not sure where to put it in the list
+ * because there may be flushes that must be strictly
+ * ordered vs the put. So leave this as a FIXME for now.
+ * But count them so we know if it becomes a problem.
+ */
+ ramster_pers_pages_remote_failed++;
+ goto out;
+ } else
+ atomic_inc(&ramster_remote_pers_pages);
+ ramster_pers_pages_remoted++;
+ /*
+ * data was successfully remoted so change the local version to
+ * point to the remote node where it landed
+ */
+ local_bh_disable();
+ pool = zcache_get_pool_by_id(LOCAL_CLIENT, xh.pool_id);
+ local_irq_save(flags);
+ (void)tmem_replace(pool, &xh.oid, xh.index,
+ pampd_make_remote(remotenode, size, cksum));
+ local_irq_restore(flags);
+ zcache_put_pool(pool);
+ local_bh_enable();
+out:
+ return;
+}
+
+static void zcache_do_remotify_ops(int nr)
+{
+ struct ramster_remotify_hdr *rem_op;
+ union remotify_list_node *u;
+
+ while (1) {
+ if (!nr)
+ goto out;
+ spin_lock(&zcache_rem_op_list_lock);
+ if (list_empty(&zcache_rem_op_list)) {
+ spin_unlock(&zcache_rem_op_list_lock);
+ goto out;
+ }
+ rem_op = list_first_entry(&zcache_rem_op_list,
+ struct ramster_remotify_hdr, list);
+ list_del_init(&rem_op->list);
+ if (rem_op->op != RAMSTER_REMOTIFY_PERS_PUT)
+ spin_unlock(&zcache_rem_op_list_lock);
+ u = (union remotify_list_node *)rem_op;
+ switch (rem_op->op) {
+ case RAMSTER_REMOTIFY_EPH_PUT:
+BUG();
+ zcache_remote_eph_put((struct zbud_hdr *)rem_op);
+ break;
+ case RAMSTER_REMOTIFY_PERS_PUT:
+ zcache_remote_pers_put((struct zv_hdr *)rem_op);
+ break;
+ case RAMSTER_REMOTIFY_FLUSH_PAGE:
+ zcache_remote_flush_page((struct flushlist_node *)u);
+ break;
+ case RAMSTER_REMOTIFY_FLUSH_OBJ:
+ zcache_remote_flush_object((struct flushlist_node *)u);
+ break;
+ default:
+ BUG();
+ }
+ }
+out:
+ return;
+}
+
+/*
+ * Communicate interface revision with userspace
+ */
+#include "cluster/ramster_nodemanager.h"
+static unsigned long ramster_interface_revision = R2NM_API_VERSION;
+
+/*
+ * For now, just push over a few pages every few seconds to
+ * ensure that it basically works
+ */
+static struct workqueue_struct *ramster_remotify_workqueue;
+static void ramster_remotify_process(struct work_struct *work);
+static DECLARE_DELAYED_WORK(ramster_remotify_worker,
+ ramster_remotify_process);
+
+static void ramster_remotify_queue_delayed_work(unsigned long delay)
+{
+ if (!queue_delayed_work(ramster_remotify_workqueue,
+ &ramster_remotify_worker, delay))
+ pr_err("ramster_remotify: bad workqueue\n");
+}
+
+
+static int use_frontswap;
+static int use_cleancache;
+static int ramster_remote_target_nodenum = -1;
+static void ramster_remotify_process(struct work_struct *work)
+{
+ static bool remotify_in_progress;
+
+ BUG_ON(irqs_disabled());
+ if (remotify_in_progress)
+ ramster_remotify_queue_delayed_work(HZ);
+ else if (ramster_remote_target_nodenum != -1) {
+ remotify_in_progress = true;
+#ifdef CONFIG_CLEANCACHE
+ if (use_cleancache && ramster_eph_remotify_enable)
+ zbud_remotify_pages(5000); /* FIXME is this a good number? */
+#endif
+#ifdef CONFIG_FRONTSWAP
+ if (use_frontswap && ramster_pers_remotify_enable)
+ zcache_do_remotify_ops(500); /* FIXME is this a good number? */
+#endif
+ remotify_in_progress = false;
+ ramster_remotify_queue_delayed_work(HZ);
+ }
+}
+
+static void ramster_remotify_init(void)
+{
+ unsigned long n = 60UL;
+ ramster_remotify_workqueue =
+ create_singlethread_workqueue("ramster_remotify");
+ ramster_remotify_queue_delayed_work(n * HZ);
+}
+
+
+static void zbud_init(void)
+{
+ int i;
+
+ INIT_LIST_HEAD(&zbud_buddied_list);
+ zcache_zbud_buddied_count = 0;
+ for (i = 0; i < NCHUNKS; i++) {
+ INIT_LIST_HEAD(&zbud_unbuddied[i].list);
+ zbud_unbuddied[i].count = 0;
+ }
+}
+
+#ifdef CONFIG_SYSFS
+/*
+ * These sysfs routines show a nice distribution of how many zbpg's are
+ * currently (and have ever been placed) in each unbuddied list. It's fun
+ * to watch but can probably go away before final merge.
+ */
+static int zbud_show_unbuddied_list_counts(char *buf)
+{
+ int i;
+ char *p = buf;
+
+ for (i = 0; i < NCHUNKS; i++)
+ p += sprintf(p, "%u ", zbud_unbuddied[i].count);
+ return p - buf;
+}
+
+static int zbud_show_cumul_chunk_counts(char *buf)
+{
+ unsigned long i, chunks = 0, total_chunks = 0, sum_total_chunks = 0;
+ unsigned long total_chunks_lte_21 = 0, total_chunks_lte_32 = 0;
+ unsigned long total_chunks_lte_42 = 0;
+ char *p = buf;
+
+ for (i = 0; i < NCHUNKS; i++) {
+ p += sprintf(p, "%lu ", zbud_cumul_chunk_counts[i]);
+ chunks += zbud_cumul_chunk_counts[i];
+ total_chunks += zbud_cumul_chunk_counts[i];
+ sum_total_chunks += i * zbud_cumul_chunk_counts[i];
+ if (i == 21)
+ total_chunks_lte_21 = total_chunks;
+ if (i == 32)
+ total_chunks_lte_32 = total_chunks;
+ if (i == 42)
+ total_chunks_lte_42 = total_chunks;
+ }
+ p += sprintf(p, "<=21:%lu <=32:%lu <=42:%lu, mean:%lu\n",
+ total_chunks_lte_21, total_chunks_lte_32, total_chunks_lte_42,
+ chunks == 0 ? 0 : sum_total_chunks / chunks);
+ return p - buf;
+}
+#endif
+
+/**********
+ * This "zv" PAM implementation combines the TLSF-based xvMalloc
+ * with lzo1x compression to maximize the amount of data that can
+ * be packed into a physical page.
+ *
+ * Zv represents a PAM page with the index and object (plus a "size" value
+ * necessary for decompression) immediately preceding the compressed data.
+ */
+
+/* rudimentary policy limits */
+/* total number of persistent pages may not exceed this percentage */
+static unsigned int zv_page_count_policy_percent = 75;
+/*
+ * byte count defining poor compression; pages with greater zsize will be
+ * rejected
+ */
+static unsigned int zv_max_zsize = (PAGE_SIZE / 8) * 7;
+/*
+ * byte count defining poor *mean* compression; pages with greater zsize
+ * will be rejected until sufficient better-compressed pages are accepted
+ * driving the mean below this threshold
+ */
+static unsigned int zv_max_mean_zsize = (PAGE_SIZE / 8) * 5;
+
+static atomic_t zv_curr_dist_counts[NCHUNKS];
+static atomic_t zv_cumul_dist_counts[NCHUNKS];
+
+
+static struct zv_hdr *zv_create(struct zcache_client *cli, uint32_t pool_id,
+ struct tmem_oid *oid, uint32_t index,
+ void *cdata, unsigned clen)
+{
+ struct page *page;
+ struct zv_hdr *zv = NULL;
+ uint32_t offset;
+ int alloc_size = clen + sizeof(struct zv_hdr);
+ int chunks = (alloc_size + (CHUNK_SIZE - 1)) >> CHUNK_SHIFT;
+ int ret;
+
+ BUG_ON(!irqs_disabled());
+ BUG_ON(chunks >= NCHUNKS);
+ ret = xv_malloc(cli->xvpool, clen + sizeof(struct zv_hdr),
+ &page, &offset, ZCACHE_GFP_MASK);
+ if (unlikely(ret))
+ goto out;
+ atomic_inc(&zv_curr_dist_counts[chunks]);
+ atomic_inc(&zv_cumul_dist_counts[chunks]);
+ zv = kmap_atomic(page) + offset;
+ zv->index = index;
+ zv->oid = *oid;
+ zv->pool_id = pool_id;
+ SET_SENTINEL(zv, ZVH);
+ INIT_LIST_HEAD(&zv->rem_op.list);
+ zv->client_id = get_client_id_from_client(cli);
+ zv->rem_op.op = RAMSTER_REMOTIFY_PERS_PUT;
+ if (zv->client_id == LOCAL_CLIENT) {
+ spin_lock(&zcache_rem_op_list_lock);
+ list_add_tail(&zv->rem_op.list, &zcache_rem_op_list);
+ spin_unlock(&zcache_rem_op_list_lock);
+ }
+ memcpy((char *)zv + sizeof(struct zv_hdr), cdata, clen);
+ kunmap_atomic(zv);
+out:
+ return zv;
+}
+
+/* similar to zv_create, but just reserve space, no data yet */
+static struct zv_hdr *zv_alloc(struct tmem_pool *pool,
+ struct tmem_oid *oid, uint32_t index,
+ unsigned clen)
+{
+ struct zcache_client *cli = pool->client;
+ struct page *page;
+ struct zv_hdr *zv = NULL;
+ uint32_t offset;
+ int ret;
+
+ BUG_ON(!irqs_disabled());
+ BUG_ON(!is_local_client(pool->client));
+ ret = xv_malloc(cli->xvpool, clen + sizeof(struct zv_hdr),
+ &page, &offset, ZCACHE_GFP_MASK);
+ if (unlikely(ret))
+ goto out;
+ zv = kmap_atomic(page) + offset;
+ SET_SENTINEL(zv, ZVH);
+ INIT_LIST_HEAD(&zv->rem_op.list);
+ zv->client_id = LOCAL_CLIENT;
+ zv->rem_op.op = RAMSTER_INTRANSIT_PERS;
+ zv->index = index;
+ zv->oid = *oid;
+ zv->pool_id = pool->pool_id;
+ kunmap_atomic(zv);
+out:
+ return zv;
+}
+
+static void zv_free(struct xv_pool *xvpool, struct zv_hdr *zv)
+{
+ unsigned long flags;
+ struct page *page;
+ uint32_t offset;
+ uint16_t size = xv_get_object_size(zv);
+ int chunks = (size + (CHUNK_SIZE - 1)) >> CHUNK_SHIFT;
+
+ ASSERT_SENTINEL(zv, ZVH);
+ BUG_ON(chunks >= NCHUNKS);
+ atomic_dec(&zv_curr_dist_counts[chunks]);
+ size -= sizeof(*zv);
+ spin_lock(&zcache_rem_op_list_lock);
+ size = xv_get_object_size(zv) - sizeof(*zv);
+ BUG_ON(size == 0);
+ INVERT_SENTINEL(zv, ZVH);
+ if (!list_empty(&zv->rem_op.list))
+ list_del_init(&zv->rem_op.list);
+ spin_unlock(&zcache_rem_op_list_lock);
+ page = virt_to_page(zv);
+ offset = (unsigned long)zv & ~PAGE_MASK;
+ local_irq_save(flags);
+ xv_free(xvpool, page, offset);
+ local_irq_restore(flags);
+}
+
+static void zv_decompress(struct page *page, struct zv_hdr *zv)
+{
+ size_t clen = PAGE_SIZE;
+ char *to_va;
+ unsigned size;
+ int ret;
+
+ ASSERT_SENTINEL(zv, ZVH);
+ size = xv_get_object_size(zv) - sizeof(*zv);
+ BUG_ON(size == 0);
+ to_va = kmap_atomic(page);
+ ret = lzo1x_decompress_safe((char *)zv + sizeof(*zv),
+ size, to_va, &clen);
+ kunmap_atomic(to_va);
+ BUG_ON(ret != LZO_E_OK);
+ BUG_ON(clen != PAGE_SIZE);
+}
+
+static void zv_copy_from_pampd(char *data, size_t *bufsize, struct zv_hdr *zv)
+{
+ unsigned size;
+
+ ASSERT_SENTINEL(zv, ZVH);
+ size = xv_get_object_size(zv) - sizeof(*zv);
+ BUG_ON(size == 0 || size > zv_max_page_size);
+ BUG_ON(size > *bufsize);
+ memcpy(data, (char *)zv + sizeof(*zv), size);
+ *bufsize = size;
+}
+
+static void zv_copy_to_pampd(struct zv_hdr *zv, char *data, size_t size)
+{
+ unsigned zv_size;
+
+ ASSERT_SENTINEL(zv, ZVH);
+ zv_size = xv_get_object_size(zv) - sizeof(*zv);
+ BUG_ON(zv_size != size);
+ BUG_ON(zv_size == 0 || zv_size > zv_max_page_size);
+ memcpy((char *)zv + sizeof(*zv), data, size);
+}
+
+#ifdef CONFIG_SYSFS
+/*
+ * show a distribution of compression stats for zv pages.
+ */
+
+static int zv_curr_dist_counts_show(char *buf)
+{
+ unsigned long i, n, chunks = 0, sum_total_chunks = 0;
+ char *p = buf;
+
+ for (i = 0; i < NCHUNKS; i++) {
+ n = atomic_read(&zv_curr_dist_counts[i]);
+ p += sprintf(p, "%lu ", n);
+ chunks += n;
+ sum_total_chunks += i * n;
+ }
+ p += sprintf(p, "mean:%lu\n",
+ chunks == 0 ? 0 : sum_total_chunks / chunks);
+ return p - buf;
+}
+
+static int zv_cumul_dist_counts_show(char *buf)
+{
+ unsigned long i, n, chunks = 0, sum_total_chunks = 0;
+ char *p = buf;
+
+ for (i = 0; i < NCHUNKS; i++) {
+ n = atomic_read(&zv_cumul_dist_counts[i]);
+ p += sprintf(p, "%lu ", n);
+ chunks += n;
+ sum_total_chunks += i * n;
+ }
+ p += sprintf(p, "mean:%lu\n",
+ chunks == 0 ? 0 : sum_total_chunks / chunks);
+ return p - buf;
+}
+
+/*
+ * setting zv_max_zsize via sysfs causes all persistent (e.g. swap)
+ * pages that don't compress to less than this value (including metadata
+ * overhead) to be rejected. We don't allow the value to get too close
+ * to PAGE_SIZE.
+ */
+static ssize_t zv_max_zsize_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%u\n", zv_max_zsize);
+}
+
+static ssize_t zv_max_zsize_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned long val;
+ int err;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ err = kstrtoul(buf, 10, &val);
+ if (err || (val == 0) || (val > (PAGE_SIZE / 8) * 7))
+ return -EINVAL;
+ zv_max_zsize = val;
+ return count;
+}
+
+/*
+ * setting zv_max_mean_zsize via sysfs causes all persistent (e.g. swap)
+ * pages that don't compress to less than this value (including metadata
+ * overhead) to be rejected UNLESS the mean compression is also smaller
+ * than this value. In other words, we are load-balancing-by-zsize the
+ * accepted pages. Again, we don't allow the value to get too close
+ * to PAGE_SIZE.
+ */
+static ssize_t zv_max_mean_zsize_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%u\n", zv_max_mean_zsize);
+}
+
+static ssize_t zv_max_mean_zsize_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned long val;
+ int err;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ err = kstrtoul(buf, 10, &val);
+ if (err || (val == 0) || (val > (PAGE_SIZE / 8) * 7))
+ return -EINVAL;
+ zv_max_mean_zsize = val;
+ return count;
+}
+
+/*
+ * setting zv_page_count_policy_percent via sysfs sets an upper bound of
+ * persistent (e.g. swap) pages that will be retained according to:
+ * (zv_page_count_policy_percent * totalram_pages) / 100)
+ * when that limit is reached, further puts will be rejected (until
+ * some pages have been flushed). Note that, due to compression,
+ * this number may exceed 100; it defaults to 75 and we set an
+ * arbitary limit of 150. A poor choice will almost certainly result
+ * in OOM's, so this value should only be changed prudently.
+ */
+static ssize_t zv_page_count_policy_percent_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%u\n", zv_page_count_policy_percent);
+}
+
+static ssize_t zv_page_count_policy_percent_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned long val;
+ int err;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ err = kstrtoul(buf, 10, &val);
+ if (err || (val == 0) || (val > 150))
+ return -EINVAL;
+ zv_page_count_policy_percent = val;
+ return count;
+}
+
+static struct kobj_attribute zcache_zv_max_zsize_attr = {
+ .attr = { .name = "zv_max_zsize", .mode = 0644 },
+ .show = zv_max_zsize_show,
+ .store = zv_max_zsize_store,
+};
+
+static struct kobj_attribute zcache_zv_max_mean_zsize_attr = {
+ .attr = { .name = "zv_max_mean_zsize", .mode = 0644 },
+ .show = zv_max_mean_zsize_show,
+ .store = zv_max_mean_zsize_store,
+};
+
+static struct kobj_attribute zcache_zv_page_count_policy_percent_attr = {
+ .attr = { .name = "zv_page_count_policy_percent",
+ .mode = 0644 },
+ .show = zv_page_count_policy_percent_show,
+ .store = zv_page_count_policy_percent_store,
+};
+#endif
+
+/*
+ * zcache core code starts here
+ */
+
+/* useful stats not collected by cleancache or frontswap */
+static unsigned long zcache_flush_total;
+static unsigned long zcache_flush_found;
+static unsigned long zcache_flobj_total;
+static unsigned long zcache_flobj_found;
+static unsigned long zcache_failed_eph_puts;
+static unsigned long zcache_nonactive_puts;
+static unsigned long zcache_failed_pers_puts;
+
+/*
+ * Tmem operations assume the poolid implies the invoking client.
+ * Zcache only has one client (the kernel itself): LOCAL_CLIENT.
+ * RAMster has each client numbered by cluster node, and a KVM version
+ * of zcache would have one client per guest and each client might
+ * have a poolid==N.
+ */
+static struct tmem_pool *zcache_get_pool_by_id(uint16_t cli_id, uint16_t poolid)
+{
+ struct tmem_pool *pool = NULL;
+ struct zcache_client *cli = NULL;
+
+ if (cli_id == LOCAL_CLIENT)
+ cli = &zcache_host;
+ else {
+ if (cli_id >= MAX_CLIENTS)
+ goto out;
+ cli = &zcache_clients[cli_id];
+ if (cli == NULL)
+ goto out;
+ atomic_inc(&cli->refcount);
+ }
+ if (poolid < MAX_POOLS_PER_CLIENT) {
+ pool = cli->tmem_pools[poolid];
+ if (pool != NULL)
+ atomic_inc(&pool->refcount);
+ }
+out:
+ return pool;
+}
+
+static void zcache_put_pool(struct tmem_pool *pool)
+{
+ struct zcache_client *cli = NULL;
+
+ if (pool == NULL)
+ BUG();
+ cli = pool->client;
+ atomic_dec(&pool->refcount);
+ atomic_dec(&cli->refcount);
+}
+
+int zcache_new_client(uint16_t cli_id)
+{
+ struct zcache_client *cli = NULL;
+ int ret = -1;
+
+ if (cli_id == LOCAL_CLIENT)
+ cli = &zcache_host;
+ else if ((unsigned int)cli_id < MAX_CLIENTS)
+ cli = &zcache_clients[cli_id];
+ if (cli == NULL)
+ goto out;
+ if (cli->allocated)
+ goto out;
+ cli->allocated = 1;
+#ifdef CONFIG_FRONTSWAP
+ cli->xvpool = xv_create_pool();
+ if (cli->xvpool == NULL)
+ goto out;
+#endif
+ ret = 0;
+out:
+ return ret;
+}
+
+/* counters for debugging */
+static unsigned long zcache_failed_get_free_pages;
+static unsigned long zcache_failed_alloc;
+static unsigned long zcache_put_to_flush;
+
+/*
+ * for now, used named slabs so can easily track usage; later can
+ * either just use kmalloc, or perhaps add a slab-like allocator
+ * to more carefully manage total memory utilization
+ */
+static struct kmem_cache *zcache_objnode_cache;
+static struct kmem_cache *zcache_obj_cache;
+static struct kmem_cache *ramster_flnode_cache;
+static atomic_t zcache_curr_obj_count = ATOMIC_INIT(0);
+static unsigned long zcache_curr_obj_count_max;
+static atomic_t zcache_curr_objnode_count = ATOMIC_INIT(0);
+static unsigned long zcache_curr_objnode_count_max;
+
+/*
+ * to avoid memory allocation recursion (e.g. due to direct reclaim), we
+ * preload all necessary data structures so the hostops callbacks never
+ * actually do a malloc
+ */
+struct zcache_preload {
+ void *page;
+ struct tmem_obj *obj;
+ int nr;
+ struct tmem_objnode *objnodes[OBJNODE_TREE_MAX_PATH];
+ struct flushlist_node *flnode;
+};
+static DEFINE_PER_CPU(struct zcache_preload, zcache_preloads) = { 0, };
+
+static int zcache_do_preload(struct tmem_pool *pool)
+{
+ struct zcache_preload *kp;
+ struct tmem_objnode *objnode;
+ struct tmem_obj *obj;
+ struct flushlist_node *flnode;
+ void *page;
+ int ret = -ENOMEM;
+
+ if (unlikely(zcache_objnode_cache == NULL))
+ goto out;
+ if (unlikely(zcache_obj_cache == NULL))
+ goto out;
+ preempt_disable();
+ kp = &__get_cpu_var(zcache_preloads);
+ while (kp->nr < ARRAY_SIZE(kp->objnodes)) {
+ preempt_enable_no_resched();
+ objnode = kmem_cache_alloc(zcache_objnode_cache,
+ ZCACHE_GFP_MASK);
+ if (unlikely(objnode == NULL)) {
+ zcache_failed_alloc++;
+ goto out;
+ }
+ preempt_disable();
+ kp = &__get_cpu_var(zcache_preloads);
+ if (kp->nr < ARRAY_SIZE(kp->objnodes))
+ kp->objnodes[kp->nr++] = objnode;
+ else
+ kmem_cache_free(zcache_objnode_cache, objnode);
+ }
+ preempt_enable_no_resched();
+ obj = kmem_cache_alloc(zcache_obj_cache, ZCACHE_GFP_MASK);
+ if (unlikely(obj == NULL)) {
+ zcache_failed_alloc++;
+ goto out;
+ }
+ flnode = kmem_cache_alloc(ramster_flnode_cache, ZCACHE_GFP_MASK);
+ if (unlikely(flnode == NULL)) {
+ zcache_failed_alloc++;
+ goto out;
+ }
+ if (is_ephemeral(pool)) {
+ page = (void *)__get_free_page(ZCACHE_GFP_MASK);
+ if (unlikely(page == NULL)) {
+ zcache_failed_get_free_pages++;
+ kmem_cache_free(zcache_obj_cache, obj);
+ kmem_cache_free(ramster_flnode_cache, flnode);
+ goto out;
+ }
+ }
+ preempt_disable();
+ kp = &__get_cpu_var(zcache_preloads);
+ if (kp->obj == NULL)
+ kp->obj = obj;
+ else
+ kmem_cache_free(zcache_obj_cache, obj);
+ if (kp->flnode == NULL)
+ kp->flnode = flnode;
+ else
+ kmem_cache_free(ramster_flnode_cache, flnode);
+ if (is_ephemeral(pool)) {
+ if (kp->page == NULL)
+ kp->page = page;
+ else
+ free_page((unsigned long)page);
+ }
+ ret = 0;
+out:
+ return ret;
+}
+
+static int ramster_do_preload_flnode_only(struct tmem_pool *pool)
+{
+ struct zcache_preload *kp;
+ struct flushlist_node *flnode;
+ int ret = -ENOMEM;
+
+ BUG_ON(!irqs_disabled());
+ if (unlikely(ramster_flnode_cache == NULL))
+ BUG();
+ kp = &__get_cpu_var(zcache_preloads);
+ flnode = kmem_cache_alloc(ramster_flnode_cache, GFP_ATOMIC);
+ if (unlikely(flnode == NULL) && kp->flnode == NULL)
+ BUG(); /* FIXME handle more gracefully, but how??? */
+ else if (kp->flnode == NULL)
+ kp->flnode = flnode;
+ else
+ kmem_cache_free(ramster_flnode_cache, flnode);
+ return ret;
+}
+
+static void *zcache_get_free_page(void)
+{
+ struct zcache_preload *kp;
+ void *page;
+
+ kp = &__get_cpu_var(zcache_preloads);
+ page = kp->page;
+ BUG_ON(page == NULL);
+ kp->page = NULL;
+ return page;
+}
+
+static void zcache_free_page(void *p)
+{
+ free_page((unsigned long)p);
+}
+
+/*
+ * zcache implementation for tmem host ops
+ */
+
+static struct tmem_objnode *zcache_objnode_alloc(struct tmem_pool *pool)
+{
+ struct tmem_objnode *objnode = NULL;
+ unsigned long count;
+ struct zcache_preload *kp;
+
+ kp = &__get_cpu_var(zcache_preloads);
+ if (kp->nr <= 0)
+ goto out;
+ objnode = kp->objnodes[kp->nr - 1];
+ BUG_ON(objnode == NULL);
+ kp->objnodes[kp->nr - 1] = NULL;
+ kp->nr--;
+ count = atomic_inc_return(&zcache_curr_objnode_count);
+ if (count > zcache_curr_objnode_count_max)
+ zcache_curr_objnode_count_max = count;
+out:
+ return objnode;
+}
+
+static void zcache_objnode_free(struct tmem_objnode *objnode,
+ struct tmem_pool *pool)
+{
+ atomic_dec(&zcache_curr_objnode_count);
+ BUG_ON(atomic_read(&zcache_curr_objnode_count) < 0);
+ kmem_cache_free(zcache_objnode_cache, objnode);
+}
+
+static struct tmem_obj *zcache_obj_alloc(struct tmem_pool *pool)
+{
+ struct tmem_obj *obj = NULL;
+ unsigned long count;
+ struct zcache_preload *kp;
+
+ kp = &__get_cpu_var(zcache_preloads);
+ obj = kp->obj;
+ BUG_ON(obj == NULL);
+ kp->obj = NULL;
+ count = atomic_inc_return(&zcache_curr_obj_count);
+ if (count > zcache_curr_obj_count_max)
+ zcache_curr_obj_count_max = count;
+ return obj;
+}
+
+static void zcache_obj_free(struct tmem_obj *obj, struct tmem_pool *pool)
+{
+ atomic_dec(&zcache_curr_obj_count);
+ BUG_ON(atomic_read(&zcache_curr_obj_count) < 0);
+ kmem_cache_free(zcache_obj_cache, obj);
+}
+
+static struct flushlist_node *ramster_flnode_alloc(struct tmem_pool *pool)
+{
+ struct flushlist_node *flnode = NULL;
+ struct zcache_preload *kp;
+ int count;
+
+ kp = &__get_cpu_var(zcache_preloads);
+ flnode = kp->flnode;
+ BUG_ON(flnode == NULL);
+ kp->flnode = NULL;
+ count = atomic_inc_return(&ramster_curr_flnode_count);
+ if (count > ramster_curr_flnode_count_max)
+ ramster_curr_flnode_count_max = count;
+ return flnode;
+}
+
+static void ramster_flnode_free(struct flushlist_node *flnode,
+ struct tmem_pool *pool)
+{
+ atomic_dec(&ramster_curr_flnode_count);
+ BUG_ON(atomic_read(&ramster_curr_flnode_count) < 0);
+ kmem_cache_free(ramster_flnode_cache, flnode);
+}
+
+static struct tmem_hostops zcache_hostops = {
+ .obj_alloc = zcache_obj_alloc,
+ .obj_free = zcache_obj_free,
+ .objnode_alloc = zcache_objnode_alloc,
+ .objnode_free = zcache_objnode_free,
+};
+
+/*
+ * zcache implementations for PAM page descriptor ops
+ */
+
+
+static inline void dec_and_check(atomic_t *pvar)
+{
+ atomic_dec(pvar);
+ /* later when all accounting is fixed, make this a BUG */
+ WARN_ON_ONCE(atomic_read(pvar) < 0);
+}
+
+static atomic_t zcache_curr_eph_pampd_count = ATOMIC_INIT(0);
+static unsigned long zcache_curr_eph_pampd_count_max;
+static atomic_t zcache_curr_pers_pampd_count = ATOMIC_INIT(0);
+static unsigned long zcache_curr_pers_pampd_count_max;
+
+/* forward reference */
+static int zcache_compress(struct page *from, void **out_va, size_t *out_len);
+
+static int zcache_pampd_eph_create(char *data, size_t size, bool raw,
+ struct tmem_pool *pool, struct tmem_oid *oid,
+ uint32_t index, void **pampd)
+{
+ int ret = -1;
+ void *cdata = data;
+ size_t clen = size;
+ struct zcache_client *cli = pool->client;
+ uint16_t client_id = get_client_id_from_client(cli);
+ struct page *page = NULL;
+ unsigned long count;
+
+ if (!raw) {
+ page = virt_to_page(data);
+ ret = zcache_compress(page, &cdata, &clen);
+ if (ret == 0)
+ goto out;
+ if (clen == 0 || clen > zbud_max_buddy_size()) {
+ zcache_compress_poor++;
+ goto out;
+ }
+ }
+ *pampd = (void *)zbud_create(client_id, pool->pool_id, oid,
+ index, page, cdata, clen);
+ if (*pampd == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = 0;
+ count = atomic_inc_return(&zcache_curr_eph_pampd_count);
+ if (count > zcache_curr_eph_pampd_count_max)
+ zcache_curr_eph_pampd_count_max = count;
+ if (client_id != LOCAL_CLIENT) {
+ count = atomic_inc_return(&ramster_foreign_eph_pampd_count);
+ if (count > ramster_foreign_eph_pampd_count_max)
+ ramster_foreign_eph_pampd_count_max = count;
+ }
+out:
+ return ret;
+}
+
+static int zcache_pampd_pers_create(char *data, size_t size, bool raw,
+ struct tmem_pool *pool, struct tmem_oid *oid,
+ uint32_t index, void **pampd)
+{
+ int ret = -1;
+ void *cdata = data;
+ size_t clen = size;
+ struct zcache_client *cli = pool->client;
+ struct page *page;
+ unsigned long count;
+ unsigned long zv_mean_zsize;
+ struct zv_hdr *zv;
+ long curr_pers_pampd_count;
+ u64 total_zsize;
+#ifdef RAMSTER_TESTING
+ static bool pampd_neg_warned;
+#endif
+
+ curr_pers_pampd_count = atomic_read(&zcache_curr_pers_pampd_count) -
+ atomic_read(&ramster_remote_pers_pages);
+#ifdef RAMSTER_TESTING
+ /* should always be positive, but warn if accounting is off */
+ if (!pampd_neg_warned) {
+ pr_warn("ramster: bad accounting for curr_pers_pampd_count\n");
+ pampd_neg_warned = true;
+ }
+#endif
+ if (curr_pers_pampd_count >
+ (zv_page_count_policy_percent * totalram_pages) / 100) {
+ zcache_policy_percent_exceeded++;
+ goto out;
+ }
+ if (raw)
+ goto ok_to_create;
+ page = virt_to_page(data);
+ if (zcache_compress(page, &cdata, &clen) == 0)
+ goto out;
+ /* reject if compression is too poor */
+ if (clen > zv_max_zsize) {
+ zcache_compress_poor++;
+ goto out;
+ }
+ /* reject if mean compression is too poor */
+ if ((clen > zv_max_mean_zsize) && (curr_pers_pampd_count > 0)) {
+ total_zsize = xv_get_total_size_bytes(cli->xvpool);
+ zv_mean_zsize = div_u64(total_zsize, curr_pers_pampd_count);
+ if (zv_mean_zsize > zv_max_mean_zsize) {
+ zcache_mean_compress_poor++;
+ goto out;
+ }
+ }
+ok_to_create:
+ *pampd = (void *)zv_create(cli, pool->pool_id, oid, index, cdata, clen);
+ if (*pampd == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = 0;
+ count = atomic_inc_return(&zcache_curr_pers_pampd_count);
+ if (count > zcache_curr_pers_pampd_count_max)
+ zcache_curr_pers_pampd_count_max = count;
+ if (is_local_client(cli))
+ goto out;
+ zv = *(struct zv_hdr **)pampd;
+ count = atomic_inc_return(&ramster_foreign_pers_pampd_count);
+ if (count > ramster_foreign_pers_pampd_count_max)
+ ramster_foreign_pers_pampd_count_max = count;
+out:
+ return ret;
+}
+
+static void *zcache_pampd_create(char *data, size_t size, bool raw, int eph,
+ struct tmem_pool *pool, struct tmem_oid *oid,
+ uint32_t index)
+{
+ void *pampd = NULL;
+ int ret;
+ bool ephemeral;
+
+ BUG_ON(preemptible());
+ ephemeral = (eph == 1) || ((eph == 0) && is_ephemeral(pool));
+ if (ephemeral)
+ ret = zcache_pampd_eph_create(data, size, raw, pool,
+ oid, index, &pampd);
+ else
+ ret = zcache_pampd_pers_create(data, size, raw, pool,
+ oid, index, &pampd);
+ /* FIXME add some counters here for failed creates? */
+ return pampd;
+}
+
+/*
+ * fill the pageframe corresponding to the struct page with the data
+ * from the passed pampd
+ */
+static int zcache_pampd_get_data(char *data, size_t *bufsize, bool raw,
+ void *pampd, struct tmem_pool *pool,
+ struct tmem_oid *oid, uint32_t index)
+{
+ int ret = 0;
+
+ BUG_ON(preemptible());
+ BUG_ON(is_ephemeral(pool)); /* Fix later for shared pools? */
+ BUG_ON(pampd_is_remote(pampd));
+ if (raw)
+ zv_copy_from_pampd(data, bufsize, pampd);
+ else
+ zv_decompress(virt_to_page(data), pampd);
+ return ret;
+}
+
+static int zcache_pampd_get_data_and_free(char *data, size_t *bufsize, bool raw,
+ void *pampd, struct tmem_pool *pool,
+ struct tmem_oid *oid, uint32_t index)
+{
+ int ret = 0;
+ unsigned long flags;
+ struct zcache_client *cli = pool->client;
+
+ BUG_ON(preemptible());
+ BUG_ON(pampd_is_remote(pampd));
+ if (is_ephemeral(pool)) {
+ local_irq_save(flags);
+ if (raw)
+ zbud_copy_from_pampd(data, bufsize, pampd);
+ else
+ ret = zbud_decompress(virt_to_page(data), pampd);
+ zbud_free_and_delist((struct zbud_hdr *)pampd);
+ local_irq_restore(flags);
+ if (!is_local_client(cli))
+ dec_and_check(&ramster_foreign_eph_pampd_count);
+ dec_and_check(&zcache_curr_eph_pampd_count);
+ } else {
+ if (is_local_client(cli))
+ BUG();
+ if (raw)
+ zv_copy_from_pampd(data, bufsize, pampd);
+ else
+ zv_decompress(virt_to_page(data), pampd);
+ zv_free(cli->xvpool, pampd);
+ if (!is_local_client(cli))
+ dec_and_check(&ramster_foreign_pers_pampd_count);
+ dec_and_check(&zcache_curr_pers_pampd_count);
+ ret = 0;
+ }
+ return ret;
+}
+
+static bool zcache_pampd_is_remote(void *pampd)
+{
+ return pampd_is_remote(pampd);
+}
+
+/*
+ * free the pampd and remove it from any zcache lists
+ * pampd must no longer be pointed to from any tmem data structures!
+ */
+static void zcache_pampd_free(void *pampd, struct tmem_pool *pool,
+ struct tmem_oid *oid, uint32_t index, bool acct)
+{
+ struct zcache_client *cli = pool->client;
+ bool eph = is_ephemeral(pool);
+ struct zv_hdr *zv;
+
+ BUG_ON(preemptible());
+ if (pampd_is_remote(pampd)) {
+ WARN_ON(acct == false);
+ if (oid == NULL) {
+ /*
+ * a NULL oid means to ignore this pampd free
+ * as the remote freeing will be handled elsewhere
+ */
+ } else if (eph) {
+ /* FIXME remote flush optional but probably good idea */
+ /* FIXME get these working properly again */
+ dec_and_check(&zcache_curr_eph_pampd_count);
+ } else if (pampd_is_intransit(pampd)) {
+ /* did a pers remote get_and_free, so just free local */
+ pampd = pampd_mask_intransit_and_remote(pampd);
+ goto local_pers;
+ } else {
+ struct flushlist_node *flnode =
+ ramster_flnode_alloc(pool);
+
+ flnode->xh.client_id = pampd_remote_node(pampd);
+ flnode->xh.pool_id = pool->pool_id;
+ flnode->xh.oid = *oid;
+ flnode->xh.index = index;
+ flnode->rem_op.op = RAMSTER_REMOTIFY_FLUSH_PAGE;
+ spin_lock(&zcache_rem_op_list_lock);
+ list_add(&flnode->rem_op.list, &zcache_rem_op_list);
+ spin_unlock(&zcache_rem_op_list_lock);
+ dec_and_check(&zcache_curr_pers_pampd_count);
+ dec_and_check(&ramster_remote_pers_pages);
+ }
+ } else if (eph) {
+ zbud_free_and_delist((struct zbud_hdr *)pampd);
+ if (!is_local_client(pool->client))
+ dec_and_check(&ramster_foreign_eph_pampd_count);
+ if (acct)
+ /* FIXME get these working properly again */
+ dec_and_check(&zcache_curr_eph_pampd_count);
+ } else {
+local_pers:
+ zv = (struct zv_hdr *)pampd;
+ if (!is_local_client(pool->client))
+ dec_and_check(&ramster_foreign_pers_pampd_count);
+ zv_free(cli->xvpool, zv);
+ if (acct)
+ /* FIXME get these working properly again */
+ dec_and_check(&zcache_curr_pers_pampd_count);
+ }
+}
+
+static void zcache_pampd_free_obj(struct tmem_pool *pool,
+ struct tmem_obj *obj)
+{
+ struct flushlist_node *flnode;
+
+ BUG_ON(preemptible());
+ if (obj->extra == NULL)
+ return;
+ BUG_ON(!pampd_is_remote(obj->extra));
+ flnode = ramster_flnode_alloc(pool);
+ flnode->xh.client_id = pampd_remote_node(obj->extra);
+ flnode->xh.pool_id = pool->pool_id;
+ flnode->xh.oid = obj->oid;
+ flnode->xh.index = FLUSH_ENTIRE_OBJECT;
+ flnode->rem_op.op = RAMSTER_REMOTIFY_FLUSH_OBJ;
+ spin_lock(&zcache_rem_op_list_lock);
+ list_add(&flnode->rem_op.list, &zcache_rem_op_list);
+ spin_unlock(&zcache_rem_op_list_lock);
+}
+
+void zcache_pampd_new_obj(struct tmem_obj *obj)
+{
+ obj->extra = NULL;
+}
+
+int zcache_pampd_replace_in_obj(void *new_pampd, struct tmem_obj *obj)
+{
+ int ret = -1;
+
+ if (new_pampd != NULL) {
+ if (obj->extra == NULL)
+ obj->extra = new_pampd;
+ /* enforce that all remote pages in an object reside
+ * in the same node! */
+ else if (pampd_remote_node(new_pampd) !=
+ pampd_remote_node((void *)(obj->extra)))
+ BUG();
+ ret = 0;
+ }
+ return ret;
+}
+
+/*
+ * Called by the message handler after a (still compressed) page has been
+ * fetched from the remote machine in response to an "is_remote" tmem_get
+ * or persistent tmem_localify. For a tmem_get, "extra" is the address of
+ * the page that is to be filled to succesfully resolve the tmem_get; for
+ * a (persistent) tmem_localify, "extra" is NULL (as the data is placed only
+ * in the local zcache). "data" points to "size" bytes of (compressed) data
+ * passed in the message. In the case of a persistent remote get, if
+ * pre-allocation was successful (see zcache_repatriate_preload), the page
+ * is placed into both local zcache and at "extra".
+ */
+int zcache_localify(int pool_id, struct tmem_oid *oidp,
+ uint32_t index, char *data, size_t size,
+ void *extra)
+{
+ int ret = -ENOENT;
+ unsigned long flags;
+ struct tmem_pool *pool;
+ bool ephemeral, delete = false;
+ size_t clen = PAGE_SIZE;
+ void *pampd, *saved_hb;
+ struct tmem_obj *obj;
+
+ pool = zcache_get_pool_by_id(LOCAL_CLIENT, pool_id);
+ if (unlikely(pool == NULL))
+ /* pool doesn't exist anymore */
+ goto out;
+ ephemeral = is_ephemeral(pool);
+ local_irq_save(flags); /* FIXME: maybe only disable softirqs? */
+ pampd = tmem_localify_get_pampd(pool, oidp, index, &obj, &saved_hb);
+ if (pampd == NULL) {
+ /* hmmm... must have been a flush while waiting */
+#ifdef RAMSTER_TESTING
+ pr_err("UNTESTED pampd==NULL in zcache_localify\n");
+#endif
+ if (ephemeral)
+ ramster_remote_eph_pages_unsucc_get++;
+ else
+ ramster_remote_pers_pages_unsucc_get++;
+ obj = NULL;
+ goto finish;
+ } else if (unlikely(!pampd_is_remote(pampd))) {
+ /* hmmm... must have been a dup put while waiting */
+#ifdef RAMSTER_TESTING
+ pr_err("UNTESTED dup while waiting in zcache_localify\n");
+#endif
+ if (ephemeral)
+ ramster_remote_eph_pages_unsucc_get++;
+ else
+ ramster_remote_pers_pages_unsucc_get++;
+ obj = NULL;
+ pampd = NULL;
+ ret = -EEXIST;
+ goto finish;
+ } else if (size == 0) {
+ /* no remote data, delete the local is_remote pampd */
+ pampd = NULL;
+ if (ephemeral)
+ ramster_remote_eph_pages_unsucc_get++;
+ else
+ BUG();
+ delete = true;
+ goto finish;
+ }
+ if (!ephemeral && pampd_is_intransit(pampd)) {
+ /* localify to zcache */
+ pampd = pampd_mask_intransit_and_remote(pampd);
+ zv_copy_to_pampd(pampd, data, size);
+ } else {
+ pampd = NULL;
+ obj = NULL;
+ }
+ if (extra != NULL) {
+ /* decompress direct-to-memory to complete remotify */
+ ret = lzo1x_decompress_safe((char *)data, size,
+ (char *)extra, &clen);
+ BUG_ON(ret != LZO_E_OK);
+ BUG_ON(clen != PAGE_SIZE);
+ }
+ if (ephemeral)
+ ramster_remote_eph_pages_succ_get++;
+ else
+ ramster_remote_pers_pages_succ_get++;
+ ret = 0;
+finish:
+ tmem_localify_finish(obj, index, pampd, saved_hb, delete);
+ zcache_put_pool(pool);
+ local_irq_restore(flags);
+out:
+ return ret;
+}
+
+/*
+ * Called on a remote persistent tmem_get to attempt to preallocate
+ * local storage for the data contained in the remote persistent page.
+ * If succesfully preallocated, returns the pampd, marked as remote and
+ * in_transit. Else returns NULL. Note that the appropriate tmem data
+ * structure must be locked.
+ */
+static void *zcache_pampd_repatriate_preload(void *pampd,
+ struct tmem_pool *pool,
+ struct tmem_oid *oid,
+ uint32_t index,
+ bool *intransit)
+{
+ int clen = pampd_remote_size(pampd);
+ void *ret_pampd = NULL;
+ unsigned long flags;
+
+ if (!pampd_is_remote(pampd))
+ BUG();
+ if (is_ephemeral(pool))
+ BUG();
+ if (pampd_is_intransit(pampd)) {
+ /*
+ * to avoid multiple allocations (and maybe a memory leak)
+ * don't preallocate if already in the process of being
+ * repatriated
+ */
+ *intransit = true;
+ goto out;
+ }
+ *intransit = false;
+ local_irq_save(flags);
+ ret_pampd = (void *)zv_alloc(pool, oid, index, clen);
+ if (ret_pampd != NULL) {
+ /*
+ * a pampd is marked intransit if it is remote and space has
+ * been allocated for it locally (note, only happens for
+ * persistent pages, in which case the remote copy is freed)
+ */
+ ret_pampd = pampd_mark_intransit(ret_pampd);
+ dec_and_check(&ramster_remote_pers_pages);
+ } else
+ ramster_pers_pages_remote_nomem++;
+ local_irq_restore(flags);
+out:
+ return ret_pampd;
+}
+
+/*
+ * Called on a remote tmem_get to invoke a message to fetch the page.
+ * Might sleep so no tmem locks can be held. "extra" is passed
+ * all the way through the round-trip messaging to zcache_localify.
+ */
+static int zcache_pampd_repatriate(void *fake_pampd, void *real_pampd,
+ struct tmem_pool *pool,
+ struct tmem_oid *oid, uint32_t index,
+ bool free, void *extra)
+{
+ struct tmem_xhandle xh;
+ int ret;
+
+ if (pampd_is_intransit(real_pampd))
+ /* have local space pre-reserved, so free remote copy */
+ free = true;
+ xh = tmem_xhandle_fill(LOCAL_CLIENT, pool, oid, index);
+ /* unreliable request/response for now */
+ ret = ramster_remote_async_get(&xh, free,
+ pampd_remote_node(fake_pampd),
+ pampd_remote_size(fake_pampd),
+ pampd_remote_cksum(fake_pampd),
+ extra);
+#ifdef RAMSTER_TESTING
+ if (ret != 0 && ret != -ENOENT)
+ pr_err("TESTING zcache_pampd_repatriate returns, ret=%d\n",
+ ret);
+#endif
+ return ret;
+}
+
+static struct tmem_pamops zcache_pamops = {
+ .create = zcache_pampd_create,
+ .get_data = zcache_pampd_get_data,
+ .free = zcache_pampd_free,
+ .get_data_and_free = zcache_pampd_get_data_and_free,
+ .free_obj = zcache_pampd_free_obj,
+ .is_remote = zcache_pampd_is_remote,
+ .repatriate_preload = zcache_pampd_repatriate_preload,
+ .repatriate = zcache_pampd_repatriate,
+ .new_obj = zcache_pampd_new_obj,
+ .replace_in_obj = zcache_pampd_replace_in_obj,
+};
+
+/*
+ * zcache compression/decompression and related per-cpu stuff
+ */
+
+#define LZO_WORKMEM_BYTES LZO1X_1_MEM_COMPRESS
+#define LZO_DSTMEM_PAGE_ORDER 1
+static DEFINE_PER_CPU(unsigned char *, zcache_workmem);
+static DEFINE_PER_CPU(unsigned char *, zcache_dstmem);
+
+static int zcache_compress(struct page *from, void **out_va, size_t *out_len)
+{
+ int ret = 0;
+ unsigned char *dmem = __get_cpu_var(zcache_dstmem);
+ unsigned char *wmem = __get_cpu_var(zcache_workmem);
+ char *from_va;
+
+ BUG_ON(!irqs_disabled());
+ if (unlikely(dmem == NULL || wmem == NULL))
+ goto out; /* no buffer, so can't compress */
+ from_va = kmap_atomic(from);
+ mb();
+ ret = lzo1x_1_compress(from_va, PAGE_SIZE, dmem, out_len, wmem);
+ BUG_ON(ret != LZO_E_OK);
+ *out_va = dmem;
+ kunmap_atomic(from_va);
+ ret = 1;
+out:
+ return ret;
+}
+
+
+static int zcache_cpu_notifier(struct notifier_block *nb,
+ unsigned long action, void *pcpu)
+{
+ int cpu = (long)pcpu;
+ struct zcache_preload *kp;
+
+ switch (action) {
+ case CPU_UP_PREPARE:
+ per_cpu(zcache_dstmem, cpu) = (void *)__get_free_pages(
+ GFP_KERNEL | __GFP_REPEAT,
+ LZO_DSTMEM_PAGE_ORDER),
+ per_cpu(zcache_workmem, cpu) =
+ kzalloc(LZO1X_MEM_COMPRESS,
+ GFP_KERNEL | __GFP_REPEAT);
+ per_cpu(zcache_remoteputmem, cpu) =
+ kzalloc(PAGE_SIZE, GFP_KERNEL | __GFP_REPEAT);
+ break;
+ case CPU_DEAD:
+ case CPU_UP_CANCELED:
+ kfree(per_cpu(zcache_remoteputmem, cpu));
+ per_cpu(zcache_remoteputmem, cpu) = NULL;
+ free_pages((unsigned long)per_cpu(zcache_dstmem, cpu),
+ LZO_DSTMEM_PAGE_ORDER);
+ per_cpu(zcache_dstmem, cpu) = NULL;
+ kfree(per_cpu(zcache_workmem, cpu));
+ per_cpu(zcache_workmem, cpu) = NULL;
+ kp = &per_cpu(zcache_preloads, cpu);
+ while (kp->nr) {
+ kmem_cache_free(zcache_objnode_cache,
+ kp->objnodes[kp->nr - 1]);
+ kp->objnodes[kp->nr - 1] = NULL;
+ kp->nr--;
+ }
+ if (kp->obj) {
+ kmem_cache_free(zcache_obj_cache, kp->obj);
+ kp->obj = NULL;
+ }
+ if (kp->flnode) {
+ kmem_cache_free(ramster_flnode_cache, kp->flnode);
+ kp->flnode = NULL;
+ }
+ if (kp->page) {
+ free_page((unsigned long)kp->page);
+ kp->page = NULL;
+ }
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block zcache_cpu_notifier_block = {
+ .notifier_call = zcache_cpu_notifier
+};
+
+#ifdef CONFIG_SYSFS
+#define ZCACHE_SYSFS_RO(_name) \
+ static ssize_t zcache_##_name##_show(struct kobject *kobj, \
+ struct kobj_attribute *attr, char *buf) \
+ { \
+ return sprintf(buf, "%lu\n", zcache_##_name); \
+ } \
+ static struct kobj_attribute zcache_##_name##_attr = { \
+ .attr = { .name = __stringify(_name), .mode = 0444 }, \
+ .show = zcache_##_name##_show, \
+ }
+
+#define ZCACHE_SYSFS_RO_ATOMIC(_name) \
+ static ssize_t zcache_##_name##_show(struct kobject *kobj, \
+ struct kobj_attribute *attr, char *buf) \
+ { \
+ return sprintf(buf, "%d\n", atomic_read(&zcache_##_name)); \
+ } \
+ static struct kobj_attribute zcache_##_name##_attr = { \
+ .attr = { .name = __stringify(_name), .mode = 0444 }, \
+ .show = zcache_##_name##_show, \
+ }
+
+#define ZCACHE_SYSFS_RO_CUSTOM(_name, _func) \
+ static ssize_t zcache_##_name##_show(struct kobject *kobj, \
+ struct kobj_attribute *attr, char *buf) \
+ { \
+ return _func(buf); \
+ } \
+ static struct kobj_attribute zcache_##_name##_attr = { \
+ .attr = { .name = __stringify(_name), .mode = 0444 }, \
+ .show = zcache_##_name##_show, \
+ }
+
+ZCACHE_SYSFS_RO(curr_obj_count_max);
+ZCACHE_SYSFS_RO(curr_objnode_count_max);
+ZCACHE_SYSFS_RO(flush_total);
+ZCACHE_SYSFS_RO(flush_found);
+ZCACHE_SYSFS_RO(flobj_total);
+ZCACHE_SYSFS_RO(flobj_found);
+ZCACHE_SYSFS_RO(failed_eph_puts);
+ZCACHE_SYSFS_RO(nonactive_puts);
+ZCACHE_SYSFS_RO(failed_pers_puts);
+ZCACHE_SYSFS_RO(zbud_curr_zbytes);
+ZCACHE_SYSFS_RO(zbud_cumul_zpages);
+ZCACHE_SYSFS_RO(zbud_cumul_zbytes);
+ZCACHE_SYSFS_RO(zbud_buddied_count);
+ZCACHE_SYSFS_RO(evicted_raw_pages);
+ZCACHE_SYSFS_RO(evicted_unbuddied_pages);
+ZCACHE_SYSFS_RO(evicted_buddied_pages);
+ZCACHE_SYSFS_RO(failed_get_free_pages);
+ZCACHE_SYSFS_RO(failed_alloc);
+ZCACHE_SYSFS_RO(put_to_flush);
+ZCACHE_SYSFS_RO(compress_poor);
+ZCACHE_SYSFS_RO(mean_compress_poor);
+ZCACHE_SYSFS_RO(policy_percent_exceeded);
+ZCACHE_SYSFS_RO_ATOMIC(zbud_curr_raw_pages);
+ZCACHE_SYSFS_RO_ATOMIC(zbud_curr_zpages);
+ZCACHE_SYSFS_RO_ATOMIC(curr_obj_count);
+ZCACHE_SYSFS_RO_ATOMIC(curr_objnode_count);
+ZCACHE_SYSFS_RO_CUSTOM(zbud_unbuddied_list_counts,
+ zbud_show_unbuddied_list_counts);
+ZCACHE_SYSFS_RO_CUSTOM(zbud_cumul_chunk_counts,
+ zbud_show_cumul_chunk_counts);
+ZCACHE_SYSFS_RO_CUSTOM(zv_curr_dist_counts,
+ zv_curr_dist_counts_show);
+ZCACHE_SYSFS_RO_CUSTOM(zv_cumul_dist_counts,
+ zv_cumul_dist_counts_show);
+
+static struct attribute *zcache_attrs[] = {
+ &zcache_curr_obj_count_attr.attr,
+ &zcache_curr_obj_count_max_attr.attr,
+ &zcache_curr_objnode_count_attr.attr,
+ &zcache_curr_objnode_count_max_attr.attr,
+ &zcache_flush_total_attr.attr,
+ &zcache_flobj_total_attr.attr,
+ &zcache_flush_found_attr.attr,
+ &zcache_flobj_found_attr.attr,
+ &zcache_failed_eph_puts_attr.attr,
+ &zcache_nonactive_puts_attr.attr,
+ &zcache_failed_pers_puts_attr.attr,
+ &zcache_policy_percent_exceeded_attr.attr,
+ &zcache_compress_poor_attr.attr,
+ &zcache_mean_compress_poor_attr.attr,
+ &zcache_zbud_curr_raw_pages_attr.attr,
+ &zcache_zbud_curr_zpages_attr.attr,
+ &zcache_zbud_curr_zbytes_attr.attr,
+ &zcache_zbud_cumul_zpages_attr.attr,
+ &zcache_zbud_cumul_zbytes_attr.attr,
+ &zcache_zbud_buddied_count_attr.attr,
+ &zcache_evicted_raw_pages_attr.attr,
+ &zcache_evicted_unbuddied_pages_attr.attr,
+ &zcache_evicted_buddied_pages_attr.attr,
+ &zcache_failed_get_free_pages_attr.attr,
+ &zcache_failed_alloc_attr.attr,
+ &zcache_put_to_flush_attr.attr,
+ &zcache_zbud_unbuddied_list_counts_attr.attr,
+ &zcache_zbud_cumul_chunk_counts_attr.attr,
+ &zcache_zv_curr_dist_counts_attr.attr,
+ &zcache_zv_cumul_dist_counts_attr.attr,
+ &zcache_zv_max_zsize_attr.attr,
+ &zcache_zv_max_mean_zsize_attr.attr,
+ &zcache_zv_page_count_policy_percent_attr.attr,
+ NULL,
+};
+
+static struct attribute_group zcache_attr_group = {
+ .attrs = zcache_attrs,
+ .name = "zcache",
+};
+
+#define RAMSTER_SYSFS_RO(_name) \
+ static ssize_t ramster_##_name##_show(struct kobject *kobj, \
+ struct kobj_attribute *attr, char *buf) \
+ { \
+ return sprintf(buf, "%lu\n", ramster_##_name); \
+ } \
+ static struct kobj_attribute ramster_##_name##_attr = { \
+ .attr = { .name = __stringify(_name), .mode = 0444 }, \
+ .show = ramster_##_name##_show, \
+ }
+
+#define RAMSTER_SYSFS_RW(_name) \
+ static ssize_t ramster_##_name##_show(struct kobject *kobj, \
+ struct kobj_attribute *attr, char *buf) \
+ { \
+ return sprintf(buf, "%lu\n", ramster_##_name); \
+ } \
+ static ssize_t ramster_##_name##_store(struct kobject *kobj, \
+ struct kobj_attribute *attr, const char *buf, size_t count) \
+ { \
+ int err; \
+ unsigned long enable; \
+ err = kstrtoul(buf, 10, &enable); \
+ if (err) \
+ return -EINVAL; \
+ ramster_##_name = enable; \
+ return count; \
+ } \
+ static struct kobj_attribute ramster_##_name##_attr = { \
+ .attr = { .name = __stringify(_name), .mode = 0644 }, \
+ .show = ramster_##_name##_show, \
+ .store = ramster_##_name##_store, \
+ }
+
+#define RAMSTER_SYSFS_RO_ATOMIC(_name) \
+ static ssize_t ramster_##_name##_show(struct kobject *kobj, \
+ struct kobj_attribute *attr, char *buf) \
+ { \
+ return sprintf(buf, "%d\n", atomic_read(&ramster_##_name)); \
+ } \
+ static struct kobj_attribute ramster_##_name##_attr = { \
+ .attr = { .name = __stringify(_name), .mode = 0444 }, \
+ .show = ramster_##_name##_show, \
+ }
+
+RAMSTER_SYSFS_RO(interface_revision);
+RAMSTER_SYSFS_RO_ATOMIC(remote_pers_pages);
+RAMSTER_SYSFS_RW(pers_remotify_enable);
+RAMSTER_SYSFS_RW(eph_remotify_enable);
+RAMSTER_SYSFS_RO(eph_pages_remoted);
+RAMSTER_SYSFS_RO(eph_pages_remote_failed);
+RAMSTER_SYSFS_RO(pers_pages_remoted);
+RAMSTER_SYSFS_RO(pers_pages_remote_failed);
+RAMSTER_SYSFS_RO(pers_pages_remote_nomem);
+RAMSTER_SYSFS_RO(remote_pages_flushed);
+RAMSTER_SYSFS_RO(remote_page_flushes_failed);
+RAMSTER_SYSFS_RO(remote_objects_flushed);
+RAMSTER_SYSFS_RO(remote_object_flushes_failed);
+RAMSTER_SYSFS_RO(remote_eph_pages_succ_get);
+RAMSTER_SYSFS_RO(remote_eph_pages_unsucc_get);
+RAMSTER_SYSFS_RO(remote_pers_pages_succ_get);
+RAMSTER_SYSFS_RO(remote_pers_pages_unsucc_get);
+RAMSTER_SYSFS_RO_ATOMIC(foreign_eph_pampd_count);
+RAMSTER_SYSFS_RO(foreign_eph_pampd_count_max);
+RAMSTER_SYSFS_RO_ATOMIC(foreign_pers_pampd_count);
+RAMSTER_SYSFS_RO(foreign_pers_pampd_count_max);
+RAMSTER_SYSFS_RO_ATOMIC(curr_flnode_count);
+RAMSTER_SYSFS_RO(curr_flnode_count_max);
+
+#define MANUAL_NODES 8
+static bool ramster_nodes_manual_up[MANUAL_NODES];
+static ssize_t ramster_manual_node_up_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ int i;
+ char *p = buf;
+ for (i = 0; i < MANUAL_NODES; i++)
+ if (ramster_nodes_manual_up[i])
+ p += sprintf(p, "%d ", i);
+ p += sprintf(p, "\n");
+ return p - buf;
+}
+
+static ssize_t ramster_manual_node_up_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ int err;
+ unsigned long node_num;
+
+ err = kstrtoul(buf, 10, &node_num);
+ if (err) {
+ pr_err("ramster: bad strtoul?\n");
+ return -EINVAL;
+ }
+ if (node_num >= MANUAL_NODES) {
+ pr_err("ramster: bad node_num=%lu?\n", node_num);
+ return -EINVAL;
+ }
+ if (ramster_nodes_manual_up[node_num]) {
+ pr_err("ramster: node %d already up, ignoring\n",
+ (int)node_num);
+ } else {
+ ramster_nodes_manual_up[node_num] = true;
+ r2net_hb_node_up_manual((int)node_num);
+ }
+ return count;
+}
+
+static struct kobj_attribute ramster_manual_node_up_attr = {
+ .attr = { .name = "manual_node_up", .mode = 0644 },
+ .show = ramster_manual_node_up_show,
+ .store = ramster_manual_node_up_store,
+};
+
+static ssize_t ramster_remote_target_nodenum_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ if (ramster_remote_target_nodenum == -1UL)
+ return sprintf(buf, "unset\n");
+ else
+ return sprintf(buf, "%d\n", ramster_remote_target_nodenum);
+}
+
+static ssize_t ramster_remote_target_nodenum_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ int err;
+ unsigned long node_num;
+
+ err = kstrtoul(buf, 10, &node_num);
+ if (err) {
+ pr_err("ramster: bad strtoul?\n");
+ return -EINVAL;
+ } else if (node_num == -1UL) {
+ pr_err("ramster: disabling all remotification, "
+ "data may still reside on remote nodes however\n");
+ return -EINVAL;
+ } else if (node_num >= MANUAL_NODES) {
+ pr_err("ramster: bad node_num=%lu?\n", node_num);
+ return -EINVAL;
+ } else if (!ramster_nodes_manual_up[node_num]) {
+ pr_err("ramster: node %d not up, ignoring setting "
+ "of remotification target\n", (int)node_num);
+ } else if (r2net_remote_target_node_set((int)node_num) >= 0) {
+ pr_info("ramster: node %d set as remotification target\n",
+ (int)node_num);
+ ramster_remote_target_nodenum = (int)node_num;
+ } else {
+ pr_err("ramster: bad num to node node_num=%d?\n",
+ (int)node_num);
+ return -EINVAL;
+ }
+ return count;
+}
+
+static struct kobj_attribute ramster_remote_target_nodenum_attr = {
+ .attr = { .name = "remote_target_nodenum", .mode = 0644 },
+ .show = ramster_remote_target_nodenum_show,
+ .store = ramster_remote_target_nodenum_store,
+};
+
+
+static struct attribute *ramster_attrs[] = {
+ &ramster_interface_revision_attr.attr,
+ &ramster_pers_remotify_enable_attr.attr,
+ &ramster_eph_remotify_enable_attr.attr,
+ &ramster_remote_pers_pages_attr.attr,
+ &ramster_eph_pages_remoted_attr.attr,
+ &ramster_eph_pages_remote_failed_attr.attr,
+ &ramster_pers_pages_remoted_attr.attr,
+ &ramster_pers_pages_remote_failed_attr.attr,
+ &ramster_pers_pages_remote_nomem_attr.attr,
+ &ramster_remote_pages_flushed_attr.attr,
+ &ramster_remote_page_flushes_failed_attr.attr,
+ &ramster_remote_objects_flushed_attr.attr,
+ &ramster_remote_object_flushes_failed_attr.attr,
+ &ramster_remote_eph_pages_succ_get_attr.attr,
+ &ramster_remote_eph_pages_unsucc_get_attr.attr,
+ &ramster_remote_pers_pages_succ_get_attr.attr,
+ &ramster_remote_pers_pages_unsucc_get_attr.attr,
+ &ramster_foreign_eph_pampd_count_attr.attr,
+ &ramster_foreign_eph_pampd_count_max_attr.attr,
+ &ramster_foreign_pers_pampd_count_attr.attr,
+ &ramster_foreign_pers_pampd_count_max_attr.attr,
+ &ramster_curr_flnode_count_attr.attr,
+ &ramster_curr_flnode_count_max_attr.attr,
+ &ramster_manual_node_up_attr.attr,
+ &ramster_remote_target_nodenum_attr.attr,
+ NULL,
+};
+
+static struct attribute_group ramster_attr_group = {
+ .attrs = ramster_attrs,
+ .name = "ramster",
+};
+
+#endif /* CONFIG_SYSFS */
+/*
+ * When zcache is disabled ("frozen"), pools can be created and destroyed,
+ * but all puts (and thus all other operations that require memory allocation)
+ * must fail. If zcache is unfrozen, accepts puts, then frozen again,
+ * data consistency requires all puts while frozen to be converted into
+ * flushes.
+ */
+static bool zcache_freeze;
+
+/*
+ * zcache shrinker interface (only useful for ephemeral pages, so zbud only)
+ */
+static int shrink_zcache_memory(struct shrinker *shrink,
+ struct shrink_control *sc)
+{
+ int ret = -1;
+ int nr = sc->nr_to_scan;
+ gfp_t gfp_mask = sc->gfp_mask;
+
+ if (nr >= 0) {
+ if (!(gfp_mask & __GFP_FS))
+ /* does this case really need to be skipped? */
+ goto out;
+ zbud_evict_pages(nr);
+ }
+ ret = (int)atomic_read(&zcache_zbud_curr_raw_pages);
+out:
+ return ret;
+}
+
+static struct shrinker zcache_shrinker = {
+ .shrink = shrink_zcache_memory,
+ .seeks = DEFAULT_SEEKS,
+};
+
+/*
+ * zcache shims between cleancache/frontswap ops and tmem
+ */
+
+int zcache_put(int cli_id, int pool_id, struct tmem_oid *oidp,
+ uint32_t index, char *data, size_t size,
+ bool raw, int ephemeral)
+{
+ struct tmem_pool *pool;
+ int ret = -1;
+
+ BUG_ON(!irqs_disabled());
+ pool = zcache_get_pool_by_id(cli_id, pool_id);
+ if (unlikely(pool == NULL))
+ goto out;
+ if (!zcache_freeze && zcache_do_preload(pool) == 0) {
+ /* preload does preempt_disable on success */
+ ret = tmem_put(pool, oidp, index, data, size, raw, ephemeral);
+ if (ret < 0) {
+ if (is_ephemeral(pool))
+ zcache_failed_eph_puts++;
+ else
+ zcache_failed_pers_puts++;
+ }
+ zcache_put_pool(pool);
+ preempt_enable_no_resched();
+ } else {
+ zcache_put_to_flush++;
+ if (atomic_read(&pool->obj_count) > 0)
+ /* the put fails whether the flush succeeds or not */
+ (void)tmem_flush_page(pool, oidp, index);
+ zcache_put_pool(pool);
+ }
+out:
+ return ret;
+}
+
+int zcache_get(int cli_id, int pool_id, struct tmem_oid *oidp,
+ uint32_t index, char *data, size_t *sizep,
+ bool raw, int get_and_free)
+{
+ struct tmem_pool *pool;
+ int ret = -1;
+ bool eph;
+
+ if (!raw) {
+ BUG_ON(irqs_disabled());
+ BUG_ON(in_softirq());
+ }
+ pool = zcache_get_pool_by_id(cli_id, pool_id);
+ eph = is_ephemeral(pool);
+ if (likely(pool != NULL)) {
+ if (atomic_read(&pool->obj_count) > 0)
+ ret = tmem_get(pool, oidp, index, data, sizep,
+ raw, get_and_free);
+ zcache_put_pool(pool);
+ }
+ WARN_ONCE((!eph && (ret != 0)), "zcache_get fails on persistent pool, "
+ "bad things are very likely to happen soon\n");
+#ifdef RAMSTER_TESTING
+ if (ret != 0 && ret != -1 && !(ret == -EINVAL && is_ephemeral(pool)))
+ pr_err("TESTING zcache_get tmem_get returns ret=%d\n", ret);
+#endif
+ if (ret == -EAGAIN)
+ BUG(); /* FIXME... don't need this anymore??? let's ensure */
+ return ret;
+}
+
+int zcache_flush(int cli_id, int pool_id,
+ struct tmem_oid *oidp, uint32_t index)
+{
+ struct tmem_pool *pool;
+ int ret = -1;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ zcache_flush_total++;
+ pool = zcache_get_pool_by_id(cli_id, pool_id);
+ ramster_do_preload_flnode_only(pool);
+ if (likely(pool != NULL)) {
+ if (atomic_read(&pool->obj_count) > 0)
+ ret = tmem_flush_page(pool, oidp, index);
+ zcache_put_pool(pool);
+ }
+ if (ret >= 0)
+ zcache_flush_found++;
+ local_irq_restore(flags);
+ return ret;
+}
+
+int zcache_flush_object(int cli_id, int pool_id, struct tmem_oid *oidp)
+{
+ struct tmem_pool *pool;
+ int ret = -1;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ zcache_flobj_total++;
+ pool = zcache_get_pool_by_id(cli_id, pool_id);
+ ramster_do_preload_flnode_only(pool);
+ if (likely(pool != NULL)) {
+ if (atomic_read(&pool->obj_count) > 0)
+ ret = tmem_flush_object(pool, oidp);
+ zcache_put_pool(pool);
+ }
+ if (ret >= 0)
+ zcache_flobj_found++;
+ local_irq_restore(flags);
+ return ret;
+}
+
+int zcache_client_destroy_pool(int cli_id, int pool_id)
+{
+ struct tmem_pool *pool = NULL;
+ struct zcache_client *cli = NULL;
+ int ret = -1;
+
+ if (pool_id < 0)
+ goto out;
+ if (cli_id == LOCAL_CLIENT)
+ cli = &zcache_host;
+ else if ((unsigned int)cli_id < MAX_CLIENTS)
+ cli = &zcache_clients[cli_id];
+ if (cli == NULL)
+ goto out;
+ atomic_inc(&cli->refcount);
+ pool = cli->tmem_pools[pool_id];
+ if (pool == NULL)
+ goto out;
+ cli->tmem_pools[pool_id] = NULL;
+ /* wait for pool activity on other cpus to quiesce */
+ while (atomic_read(&pool->refcount) != 0)
+ ;
+ atomic_dec(&cli->refcount);
+ local_bh_disable();
+ ret = tmem_destroy_pool(pool);
+ local_bh_enable();
+ kfree(pool);
+ pr_info("ramster: destroyed pool id=%d cli_id=%d\n", pool_id, cli_id);
+out:
+ return ret;
+}
+
+static int zcache_destroy_pool(int pool_id)
+{
+ return zcache_client_destroy_pool(LOCAL_CLIENT, pool_id);
+}
+
+int zcache_new_pool(uint16_t cli_id, uint32_t flags)
+{
+ int poolid = -1;
+ struct tmem_pool *pool;
+ struct zcache_client *cli = NULL;
+
+ if (cli_id == LOCAL_CLIENT)
+ cli = &zcache_host;
+ else if ((unsigned int)cli_id < MAX_CLIENTS)
+ cli = &zcache_clients[cli_id];
+ if (cli == NULL)
+ goto out;
+ atomic_inc(&cli->refcount);
+ pool = kmalloc(sizeof(struct tmem_pool), GFP_ATOMIC);
+ if (pool == NULL) {
+ pr_info("ramster: pool creation failed: out of memory\n");
+ goto out;
+ }
+
+ for (poolid = 0; poolid < MAX_POOLS_PER_CLIENT; poolid++)
+ if (cli->tmem_pools[poolid] == NULL)
+ break;
+ if (poolid >= MAX_POOLS_PER_CLIENT) {
+ pr_info("ramster: pool creation failed: max exceeded\n");
+ kfree(pool);
+ poolid = -1;
+ goto out;
+ }
+ atomic_set(&pool->refcount, 0);
+ pool->client = cli;
+ pool->pool_id = poolid;
+ tmem_new_pool(pool, flags);
+ cli->tmem_pools[poolid] = pool;
+ if (cli_id == LOCAL_CLIENT)
+ pr_info("ramster: created %s tmem pool, id=%d, local client\n",
+ flags & TMEM_POOL_PERSIST ? "persistent" : "ephemeral",
+ poolid);
+ else
+ pr_info("ramster: created %s tmem pool, id=%d, client=%d\n",
+ flags & TMEM_POOL_PERSIST ? "persistent" : "ephemeral",
+ poolid, cli_id);
+out:
+ if (cli != NULL)
+ atomic_dec(&cli->refcount);
+ return poolid;
+}
+
+static int zcache_local_new_pool(uint32_t flags)
+{
+ return zcache_new_pool(LOCAL_CLIENT, flags);
+}
+
+int zcache_autocreate_pool(int cli_id, int pool_id, bool ephemeral)
+{
+ struct tmem_pool *pool;
+ struct zcache_client *cli = NULL;
+ uint32_t flags = ephemeral ? 0 : TMEM_POOL_PERSIST;
+ int ret = -1;
+
+ if (cli_id == LOCAL_CLIENT)
+ goto out;
+ if (pool_id >= MAX_POOLS_PER_CLIENT)
+ goto out;
+ else if ((unsigned int)cli_id < MAX_CLIENTS)
+ cli = &zcache_clients[cli_id];
+ if ((ephemeral && !use_cleancache) || (!ephemeral && !use_frontswap))
+ BUG(); /* FIXME, handle more gracefully later */
+ if (!cli->allocated) {
+ if (zcache_new_client(cli_id))
+ BUG(); /* FIXME, handle more gracefully later */
+ cli = &zcache_clients[cli_id];
+ }
+ atomic_inc(&cli->refcount);
+ pool = cli->tmem_pools[pool_id];
+ if (pool != NULL) {
+ if (pool->persistent && ephemeral) {
+ pr_err("zcache_autocreate_pool: type mismatch\n");
+ goto out;
+ }
+ ret = 0;
+ goto out;
+ }
+ pool = kmalloc(sizeof(struct tmem_pool), GFP_KERNEL);
+ if (pool == NULL) {
+ pr_info("ramster: pool creation failed: out of memory\n");
+ goto out;
+ }
+ atomic_set(&pool->refcount, 0);
+ pool->client = cli;
+ pool->pool_id = pool_id;
+ tmem_new_pool(pool, flags);
+ cli->tmem_pools[pool_id] = pool;
+ pr_info("ramster: AUTOcreated %s tmem poolid=%d, for remote client=%d\n",
+ flags & TMEM_POOL_PERSIST ? "persistent" : "ephemeral",
+ pool_id, cli_id);
+ ret = 0;
+out:
+ if (cli == NULL)
+ BUG(); /* FIXME, handle more gracefully later */
+ /* pr_err("zcache_autocreate_pool: failed\n"); */
+ if (cli != NULL)
+ atomic_dec(&cli->refcount);
+ return ret;
+}
+
+/**********
+ * Two kernel functionalities currently can be layered on top of tmem.
+ * These are "cleancache" which is used as a second-chance cache for clean
+ * page cache pages; and "frontswap" which is used for swap pages
+ * to avoid writes to disk. A generic "shim" is provided here for each
+ * to translate in-kernel semantics to zcache semantics.
+ */
+
+#ifdef CONFIG_CLEANCACHE
+static void zcache_cleancache_put_page(int pool_id,
+ struct cleancache_filekey key,
+ pgoff_t index, struct page *page)
+{
+ u32 ind = (u32) index;
+ struct tmem_oid oid = *(struct tmem_oid *)&key;
+
+#ifdef __PG_WAS_ACTIVE
+ if (!PageWasActive(page)) {
+ zcache_nonactive_puts++;
+ return;
+ }
+#endif
+ if (likely(ind == index)) {
+ char *kva = page_address(page);
+
+ (void)zcache_put(LOCAL_CLIENT, pool_id, &oid, index,
+ kva, PAGE_SIZE, 0, 1);
+ }
+}
+
+static int zcache_cleancache_get_page(int pool_id,
+ struct cleancache_filekey key,
+ pgoff_t index, struct page *page)
+{
+ u32 ind = (u32) index;
+ struct tmem_oid oid = *(struct tmem_oid *)&key;
+ int ret = -1;
+
+ preempt_disable();
+ if (likely(ind == index)) {
+ char *kva = page_address(page);
+ size_t size = PAGE_SIZE;
+
+ ret = zcache_get(LOCAL_CLIENT, pool_id, &oid, index,
+ kva, &size, 0, 0);
+#ifdef __PG_WAS_ACTIVE
+ if (ret == 0)
+ SetPageWasActive(page);
+#endif
+ }
+ preempt_enable();
+ return ret;
+}
+
+static void zcache_cleancache_flush_page(int pool_id,
+ struct cleancache_filekey key,
+ pgoff_t index)
+{
+ u32 ind = (u32) index;
+ struct tmem_oid oid = *(struct tmem_oid *)&key;
+
+ if (likely(ind == index))
+ (void)zcache_flush(LOCAL_CLIENT, pool_id, &oid, ind);
+}
+
+static void zcache_cleancache_flush_inode(int pool_id,
+ struct cleancache_filekey key)
+{
+ struct tmem_oid oid = *(struct tmem_oid *)&key;
+
+ (void)zcache_flush_object(LOCAL_CLIENT, pool_id, &oid);
+}
+
+static void zcache_cleancache_flush_fs(int pool_id)
+{
+ if (pool_id >= 0)
+ (void)zcache_destroy_pool(pool_id);
+}
+
+static int zcache_cleancache_init_fs(size_t pagesize)
+{
+ BUG_ON(sizeof(struct cleancache_filekey) !=
+ sizeof(struct tmem_oid));
+ BUG_ON(pagesize != PAGE_SIZE);
+ return zcache_local_new_pool(0);
+}
+
+static int zcache_cleancache_init_shared_fs(char *uuid, size_t pagesize)
+{
+ /* shared pools are unsupported and map to private */
+ BUG_ON(sizeof(struct cleancache_filekey) !=
+ sizeof(struct tmem_oid));
+ BUG_ON(pagesize != PAGE_SIZE);
+ return zcache_local_new_pool(0);
+}
+
+static struct cleancache_ops zcache_cleancache_ops = {
+ .put_page = zcache_cleancache_put_page,
+ .get_page = zcache_cleancache_get_page,
+ .invalidate_page = zcache_cleancache_flush_page,
+ .invalidate_inode = zcache_cleancache_flush_inode,
+ .invalidate_fs = zcache_cleancache_flush_fs,
+ .init_shared_fs = zcache_cleancache_init_shared_fs,
+ .init_fs = zcache_cleancache_init_fs
+};
+
+struct cleancache_ops zcache_cleancache_register_ops(void)
+{
+ struct cleancache_ops old_ops =
+ cleancache_register_ops(&zcache_cleancache_ops);
+
+ return old_ops;
+}
+#endif
+
+#ifdef CONFIG_FRONTSWAP
+/* a single tmem poolid is used for all frontswap "types" (swapfiles) */
+static int zcache_frontswap_poolid = -1;
+
+/*
+ * Swizzling increases objects per swaptype, increasing tmem concurrency
+ * for heavy swaploads. Later, larger nr_cpus -> larger SWIZ_BITS
+ */
+#define SWIZ_BITS 8
+#define SWIZ_MASK ((1 << SWIZ_BITS) - 1)
+#define _oswiz(_type, _ind) ((_type << SWIZ_BITS) | (_ind & SWIZ_MASK))
+#define iswiz(_ind) (_ind >> SWIZ_BITS)
+
+static inline struct tmem_oid oswiz(unsigned type, u32 ind)
+{
+ struct tmem_oid oid = { .oid = { 0 } };
+ oid.oid[0] = _oswiz(type, ind);
+ return oid;
+}
+
+static int zcache_frontswap_put_page(unsigned type, pgoff_t offset,
+ struct page *page)
+{
+ u64 ind64 = (u64)offset;
+ u32 ind = (u32)offset;
+ struct tmem_oid oid = oswiz(type, ind);
+ int ret = -1;
+ unsigned long flags;
+ char *kva;
+
+ BUG_ON(!PageLocked(page));
+ if (likely(ind64 == ind)) {
+ local_irq_save(flags);
+ kva = page_address(page);
+ ret = zcache_put(LOCAL_CLIENT, zcache_frontswap_poolid,
+ &oid, iswiz(ind), kva, PAGE_SIZE, 0, 0);
+ local_irq_restore(flags);
+ }
+ return ret;
+}
+
+/* returns 0 if the page was successfully gotten from frontswap, -1 if
+ * was not present (should never happen!) */
+static int zcache_frontswap_get_page(unsigned type, pgoff_t offset,
+ struct page *page)
+{
+ u64 ind64 = (u64)offset;
+ u32 ind = (u32)offset;
+ struct tmem_oid oid = oswiz(type, ind);
+ int ret = -1;
+
+ preempt_disable(); /* FIXME, remove this? */
+ BUG_ON(!PageLocked(page));
+ if (likely(ind64 == ind)) {
+ char *kva = page_address(page);
+ size_t size = PAGE_SIZE;
+
+ ret = zcache_get(LOCAL_CLIENT, zcache_frontswap_poolid,
+ &oid, iswiz(ind), kva, &size, 0, -1);
+ }
+ preempt_enable(); /* FIXME, remove this? */
+ return ret;
+}
+
+/* flush a single page from frontswap */
+static void zcache_frontswap_flush_page(unsigned type, pgoff_t offset)
+{
+ u64 ind64 = (u64)offset;
+ u32 ind = (u32)offset;
+ struct tmem_oid oid = oswiz(type, ind);
+
+ if (likely(ind64 == ind))
+ (void)zcache_flush(LOCAL_CLIENT, zcache_frontswap_poolid,
+ &oid, iswiz(ind));
+}
+
+/* flush all pages from the passed swaptype */
+static void zcache_frontswap_flush_area(unsigned type)
+{
+ struct tmem_oid oid;
+ int ind;
+
+ for (ind = SWIZ_MASK; ind >= 0; ind--) {
+ oid = oswiz(type, ind);
+ (void)zcache_flush_object(LOCAL_CLIENT,
+ zcache_frontswap_poolid, &oid);
+ }
+}
+
+static void zcache_frontswap_init(unsigned ignored)
+{
+ /* a single tmem poolid is used for all frontswap "types" (swapfiles) */
+ if (zcache_frontswap_poolid < 0)
+ zcache_frontswap_poolid =
+ zcache_local_new_pool(TMEM_POOL_PERSIST);
+}
+
+static struct frontswap_ops zcache_frontswap_ops = {
+ .put_page = zcache_frontswap_put_page,
+ .get_page = zcache_frontswap_get_page,
+ .invalidate_page = zcache_frontswap_flush_page,
+ .invalidate_area = zcache_frontswap_flush_area,
+ .init = zcache_frontswap_init
+};
+
+struct frontswap_ops zcache_frontswap_register_ops(void)
+{
+ struct frontswap_ops old_ops =
+ frontswap_register_ops(&zcache_frontswap_ops);
+
+ return old_ops;
+}
+#endif
+
+/*
+ * frontswap selfshrinking
+ */
+
+#ifdef CONFIG_FRONTSWAP
+/* In HZ, controls frequency of worker invocation. */
+static unsigned int selfshrink_interval __read_mostly = 5;
+
+static void selfshrink_process(struct work_struct *work);
+static DECLARE_DELAYED_WORK(selfshrink_worker, selfshrink_process);
+
+/* Enable/disable with sysfs. */
+static bool frontswap_selfshrinking __read_mostly;
+
+/* Enable/disable with kernel boot option. */
+static bool use_frontswap_selfshrink __initdata = true;
+
+/*
+ * The default values for the following parameters were deemed reasonable
+ * by experimentation, may be workload-dependent, and can all be
+ * adjusted via sysfs.
+ */
+
+/* Control rate for frontswap shrinking. Higher hysteresis is slower. */
+static unsigned int frontswap_hysteresis __read_mostly = 20;
+
+/*
+ * Number of selfshrink worker invocations to wait before observing that
+ * frontswap selfshrinking should commence. Note that selfshrinking does
+ * not use a separate worker thread.
+ */
+static unsigned int frontswap_inertia __read_mostly = 3;
+
+/* Countdown to next invocation of frontswap_shrink() */
+static unsigned long frontswap_inertia_counter;
+
+/*
+ * Invoked by the selfshrink worker thread, uses current number of pages
+ * in frontswap (frontswap_curr_pages()), previous status, and control
+ * values (hysteresis and inertia) to determine if frontswap should be
+ * shrunk and what the new frontswap size should be. Note that
+ * frontswap_shrink is essentially a partial swapoff that immediately
+ * transfers pages from the "swap device" (frontswap) back into kernel
+ * RAM; despite the name, frontswap "shrinking" is very different from
+ * the "shrinker" interface used by the kernel MM subsystem to reclaim
+ * memory.
+ */
+static void frontswap_selfshrink(void)
+{
+ static unsigned long cur_frontswap_pages;
+ static unsigned long last_frontswap_pages;
+ static unsigned long tgt_frontswap_pages;
+
+ last_frontswap_pages = cur_frontswap_pages;
+ cur_frontswap_pages = frontswap_curr_pages();
+ if (!cur_frontswap_pages ||
+ (cur_frontswap_pages > last_frontswap_pages)) {
+ frontswap_inertia_counter = frontswap_inertia;
+ return;
+ }
+ if (frontswap_inertia_counter && --frontswap_inertia_counter)
+ return;
+ if (cur_frontswap_pages <= frontswap_hysteresis)
+ tgt_frontswap_pages = 0;
+ else
+ tgt_frontswap_pages = cur_frontswap_pages -
+ (cur_frontswap_pages / frontswap_hysteresis);
+ frontswap_shrink(tgt_frontswap_pages);
+}
+
+static int __init ramster_nofrontswap_selfshrink_setup(char *s)
+{
+ use_frontswap_selfshrink = false;
+ return 1;
+}
+
+__setup("noselfshrink", ramster_nofrontswap_selfshrink_setup);
+
+static void selfshrink_process(struct work_struct *work)
+{
+ if (frontswap_selfshrinking && frontswap_enabled) {
+ frontswap_selfshrink();
+ schedule_delayed_work(&selfshrink_worker,
+ selfshrink_interval * HZ);
+ }
+}
+
+static int ramster_enabled;
+
+static int __init ramster_selfshrink_init(void)
+{
+ frontswap_selfshrinking = ramster_enabled && use_frontswap_selfshrink;
+ if (frontswap_selfshrinking)
+ pr_info("ramster: Initializing frontswap "
+ "selfshrinking driver.\n");
+ else
+ return -ENODEV;
+
+ schedule_delayed_work(&selfshrink_worker, selfshrink_interval * HZ);
+
+ return 0;
+}
+
+subsys_initcall(ramster_selfshrink_init);
+#endif
+
+/*
+ * zcache initialization
+ * NOTE FOR NOW ramster MUST BE PROVIDED AS A KERNEL BOOT PARAMETER OR
+ * NOTHING HAPPENS!
+ */
+
+static int ramster_enabled;
+
+static int __init enable_ramster(char *s)
+{
+ ramster_enabled = 1;
+ return 1;
+}
+__setup("ramster", enable_ramster);
+
+/* allow independent dynamic disabling of cleancache and frontswap */
+
+static int use_cleancache = 1;
+
+static int __init no_cleancache(char *s)
+{
+ pr_info("INIT no_cleancache called\n");
+ use_cleancache = 0;
+ return 1;
+}
+
+/*
+ * FIXME: need to guarantee this gets checked before zcache_init is called
+ * What is the correct way to achieve this?
+ */
+early_param("nocleancache", no_cleancache);
+
+static int use_frontswap = 1;
+
+static int __init no_frontswap(char *s)
+{
+ pr_info("INIT no_frontswap called\n");
+ use_frontswap = 0;
+ return 1;
+}
+
+__setup("nofrontswap", no_frontswap);
+
+static int __init zcache_init(void)
+{
+ int ret = 0;
+
+#ifdef CONFIG_SYSFS
+ ret = sysfs_create_group(mm_kobj, &zcache_attr_group);
+ ret = sysfs_create_group(mm_kobj, &ramster_attr_group);
+ if (ret) {
+ pr_err("ramster: can't create sysfs\n");
+ goto out;
+ }
+#endif /* CONFIG_SYSFS */
+#if defined(CONFIG_CLEANCACHE) || defined(CONFIG_FRONTSWAP)
+ if (ramster_enabled) {
+ unsigned int cpu;
+
+ (void)r2net_register_handlers();
+ tmem_register_hostops(&zcache_hostops);
+ tmem_register_pamops(&zcache_pamops);
+ ret = register_cpu_notifier(&zcache_cpu_notifier_block);
+ if (ret) {
+ pr_err("ramster: can't register cpu notifier\n");
+ goto out;
+ }
+ for_each_online_cpu(cpu) {
+ void *pcpu = (void *)(long)cpu;
+ zcache_cpu_notifier(&zcache_cpu_notifier_block,
+ CPU_UP_PREPARE, pcpu);
+ }
+ }
+ zcache_objnode_cache = kmem_cache_create("zcache_objnode",
+ sizeof(struct tmem_objnode), 0, 0, NULL);
+ zcache_obj_cache = kmem_cache_create("zcache_obj",
+ sizeof(struct tmem_obj), 0, 0, NULL);
+ ramster_flnode_cache = kmem_cache_create("ramster_flnode",
+ sizeof(struct flushlist_node), 0, 0, NULL);
+#endif
+#ifdef CONFIG_CLEANCACHE
+ pr_info("INIT ramster_enabled=%d use_cleancache=%d\n",
+ ramster_enabled, use_cleancache);
+ if (ramster_enabled && use_cleancache) {
+ struct cleancache_ops old_ops;
+
+ zbud_init();
+ register_shrinker(&zcache_shrinker);
+ old_ops = zcache_cleancache_register_ops();
+ pr_info("ramster: cleancache enabled using kernel "
+ "transcendent memory and compression buddies\n");
+ if (old_ops.init_fs != NULL)
+ pr_warning("ramster: cleancache_ops overridden");
+ }
+#endif
+#ifdef CONFIG_FRONTSWAP
+ pr_info("INIT ramster_enabled=%d use_frontswap=%d\n",
+ ramster_enabled, use_frontswap);
+ if (ramster_enabled && use_frontswap) {
+ struct frontswap_ops old_ops;
+
+ zcache_new_client(LOCAL_CLIENT);
+ old_ops = zcache_frontswap_register_ops();
+ pr_info("ramster: frontswap enabled using kernel "
+ "transcendent memory and xvmalloc\n");
+ if (old_ops.init != NULL)
+ pr_warning("ramster: frontswap_ops overridden");
+ }
+ if (ramster_enabled && (use_frontswap || use_cleancache))
+ ramster_remotify_init();
+#endif
+out:
+ return ret;
+}
+
+module_init(zcache_init)
diff --git a/drivers/staging/ramster/zcache.h b/drivers/staging/ramster/zcache.h
new file mode 100644
index 000000000000..250b121c22e5
--- /dev/null
+++ b/drivers/staging/ramster/zcache.h
@@ -0,0 +1,22 @@
+/*
+ * zcache.h
+ *
+ * External zcache functions
+ *
+ * Copyright (c) 2009-2012, Dan Magenheimer, Oracle Corp.
+ */
+
+#ifndef _ZCACHE_H_
+#define _ZCACHE_H_
+
+extern int zcache_put(int, int, struct tmem_oid *, uint32_t,
+ char *, size_t, bool, int);
+extern int zcache_autocreate_pool(int, int, bool);
+extern int zcache_get(int, int, struct tmem_oid *, uint32_t,
+ char *, size_t *, bool, int);
+extern int zcache_flush(int, int, struct tmem_oid *, uint32_t);
+extern int zcache_flush_object(int, int, struct tmem_oid *);
+extern int zcache_localify(int, struct tmem_oid *, uint32_t,
+ char *, size_t, void *);
+
+#endif /* _ZCACHE_H */
diff --git a/drivers/staging/rtl8187se/r8180_core.c b/drivers/staging/rtl8187se/r8180_core.c
index 04c23919f4d6..e4ade550cfe5 100644
--- a/drivers/staging/rtl8187se/r8180_core.c
+++ b/drivers/staging/rtl8187se/r8180_core.c
@@ -439,8 +439,7 @@ void buffer_free(struct net_device *dev, struct buffer **buffer, int len, short
}
kfree(tmp);
tmp = next;
- }
- while (next != *buffer);
+ } while (next != *buffer);
*buffer = NULL;
}
@@ -1392,11 +1391,13 @@ void PerformUndecoratedSignalSmoothing8185(struct r8180_priv *priv,
priv->bCurCCKPkt = bCckRate;
if (priv->UndecoratedSmoothedSS >= 0)
- priv->UndecoratedSmoothedSS = ((priv->UndecoratedSmoothedSS * 5) + (priv->SignalStrength * 10)) / 6;
+ priv->UndecoratedSmoothedSS = ((priv->UndecoratedSmoothedSS * 5) +
+ (priv->SignalStrength * 10)) / 6;
else
priv->UndecoratedSmoothedSS = priv->SignalStrength * 10;
- priv->UndercorateSmoothedRxPower = ((priv->UndercorateSmoothedRxPower * 50) + (priv->RxPower * 11)) / 60;
+ priv->UndercorateSmoothedRxPower = ((priv->UndercorateSmoothedRxPower * 50) +
+ (priv->RxPower * 11)) / 60;
if (bCckRate)
priv->CurCCKRSSI = priv->RSSI;
@@ -1607,43 +1608,50 @@ void rtl8180_rx(struct net_device *dev)
/* printk("==========================>rx : RXAGC is %d,signalstrength is %d\n",RXAGC,stats.signalstrength); */
stats.rssi = priv->wstats.qual.qual = priv->SignalQuality;
stats.noise = priv->wstats.qual.noise = 100 - priv->wstats.qual.qual;
- bHwError = (((*(priv->rxringtail)) & (0x00000fff)) == 4080) | (((*(priv->rxringtail)) & (0x04000000)) != 0)
- | (((*(priv->rxringtail)) & (0x08000000)) != 0) | (((~(*(priv->rxringtail))) & (0x10000000)) != 0) | (((~(*(priv->rxringtail))) & (0x20000000)) != 0);
+ bHwError = (((*(priv->rxringtail)) & (0x00000fff)) == 4080) |
+ (((*(priv->rxringtail)) & (0x04000000)) != 0) |
+ (((*(priv->rxringtail)) & (0x08000000)) != 0) |
+ (((~(*(priv->rxringtail))) & (0x10000000)) != 0) |
+ (((~(*(priv->rxringtail))) & (0x20000000)) != 0);
bCRC = ((*(priv->rxringtail)) & (0x00002000)) >> 13;
bICV = ((*(priv->rxringtail)) & (0x00001000)) >> 12;
hdr = (struct ieee80211_hdr_4addr *)priv->rxbuffer->buf;
fc = le16_to_cpu(hdr->frame_ctl);
type = WLAN_FC_GET_TYPE(fc);
- if ((IEEE80211_FTYPE_CTL != type) &&
- (eqMacAddr(priv->ieee80211->current_network.bssid, (fc & IEEE80211_FCTL_TODS) ? hdr->addr1 : (fc & IEEE80211_FCTL_FROMDS) ? hdr->addr2 : hdr->addr3))
- && (!bHwError) && (!bCRC) && (!bICV)) {
- /* Perform signal smoothing for dynamic
- * mechanism on demand. This is different
- * with PerformSignalSmoothing8185 in smoothing
- * fomula. No dramatic adjustion is apply
- * because dynamic mechanism need some degree
- * of correctness. */
- PerformUndecoratedSignalSmoothing8185(priv, bCckRate);
-
- /* For good-looking singal strength. */
- SignalStrengthIndex = NetgearSignalStrengthTranslate(
- priv->LastSignalStrengthInPercent,
- priv->SignalStrength);
-
- priv->LastSignalStrengthInPercent = SignalStrengthIndex;
- priv->Stats_SignalStrength = TranslateToDbm8185((u8)SignalStrengthIndex);
+ if (IEEE80211_FTYPE_CTL != type &&
+ !bHwError && !bCRC && !bICV &&
+ eqMacAddr(priv->ieee80211->current_network.bssid,
+ fc & IEEE80211_FCTL_TODS ? hdr->addr1 :
+ fc & IEEE80211_FCTL_FROMDS ? hdr->addr2 :
+ hdr->addr3)) {
+
+ /* Perform signal smoothing for dynamic
+ * mechanism on demand. This is different
+ * with PerformSignalSmoothing8185 in smoothing
+ * fomula. No dramatic adjustion is apply
+ * because dynamic mechanism need some degree
+ * of correctness. */
+ PerformUndecoratedSignalSmoothing8185(priv, bCckRate);
+
+ /* For good-looking singal strength. */
+ SignalStrengthIndex = NetgearSignalStrengthTranslate(
+ priv->LastSignalStrengthInPercent,
+ priv->SignalStrength);
+
+ priv->LastSignalStrengthInPercent = SignalStrengthIndex;
+ priv->Stats_SignalStrength = TranslateToDbm8185((u8)SignalStrengthIndex);
/*
* We need more correct power of received packets and the "SignalStrength" of RxStats is beautified,
* so we record the correct power here.
*/
- priv->Stats_SignalQuality = (long)(priv->Stats_SignalQuality * 5 + (long)priv->SignalQuality + 5) / 6;
- priv->Stats_RecvSignalPower = (long)(priv->Stats_RecvSignalPower * 5 + priv->RecvSignalPower - 1) / 6;
+ priv->Stats_SignalQuality = (long)(priv->Stats_SignalQuality * 5 + (long)priv->SignalQuality + 5) / 6;
+ priv->Stats_RecvSignalPower = (long)(priv->Stats_RecvSignalPower * 5 + priv->RecvSignalPower - 1) / 6;
/* Figure out which antenna that received the lasted packet. */
- priv->LastRxPktAntenna = Antenna ? 1 : 0; /* 0: aux, 1: main. */
- SwAntennaDiversityRxOk8185(dev, priv->SignalStrength);
- }
+ priv->LastRxPktAntenna = Antenna ? 1 : 0; /* 0: aux, 1: main. */
+ SwAntennaDiversityRxOk8185(dev, priv->SignalStrength);
+ }
if (first) {
if (!priv->rx_skb_complete) {
@@ -1654,7 +1662,7 @@ void rtl8180_rx(struct net_device *dev)
}
/* support for prism header has been originally added by Christian */
if (priv->prism_hdr && priv->ieee80211->iw_mode == IW_MODE_MONITOR) {
-
+
} else {
priv->rx_skb = dev_alloc_skb(len+2);
if (!priv->rx_skb)
@@ -1766,7 +1774,7 @@ void rtl8180_data_hard_resume(struct net_device *dev)
rtl8180_set_mode(dev, EPROM_CMD_NORMAL);
}
-/*
+/*
* This function TX data frames when the ieee80211 stack requires this.
* It checks also if we need to stop the ieee tx queue, eventually do it
*/
@@ -1810,7 +1818,7 @@ rate) {
spin_unlock_irqrestore(&priv->tx_lock, flags);
}
-/*
+/*
* This is a rough attempt to TX a frame
* This is called by the ieee 80211 stack to TX management frames.
* If the ring is full packet are dropped (for data frame the queue
@@ -1916,7 +1924,7 @@ void rtl8180_prepare_beacon(struct net_device *dev)
}
}
-/*
+/*
* This function do the real dirty work: it enqueues a TX command
* descriptor in the ring buffer, copyes the frame in a TX buffer
* and kicks the NIC to ensure it does the DMA transfer.
@@ -2002,7 +2010,8 @@ short rtl8180_tx(struct net_device *dev, u8* txbuf, int len, int priority,
bRTSEnable = 0;
bCTSEnable = 0;
- ThisFrameTime = ComputeTxTime(len + sCrcLng, rtl8180_rate2rate(rate), 0, bUseShortPreamble);
+ ThisFrameTime = ComputeTxTime(len + sCrcLng, rtl8180_rate2rate(rate),
+ 0, bUseShortPreamble);
TxDescDuration = ThisFrameTime;
} else { /* Unicast packet */
u16 AckTime;
@@ -2040,7 +2049,8 @@ short rtl8180_tx(struct net_device *dev, u8* txbuf, int len, int priority,
bRTSEnable = 0;
RtsDur = 0;
- ThisFrameTime = ComputeTxTime(len + sCrcLng, rtl8180_rate2rate(rate), 0, bUseShortPreamble);
+ ThisFrameTime = ComputeTxTime(len + sCrcLng, rtl8180_rate2rate(rate),
+ 0, bUseShortPreamble);
TxDescDuration = ThisFrameTime + aSifsTime + AckTime;
}
@@ -2184,7 +2194,7 @@ short rtl8180_tx(struct net_device *dev, u8* txbuf, int len, int priority,
priv->txhpbufstail = buflist;
break;
case BEACON_PRIORITY:
- /*
+ /*
* The HW seems to be happy with the 1st
* descriptor filled and the 2nd empty...
* So always update descriptor 1 and never
@@ -2304,13 +2314,13 @@ void rtl8180_hw_sleep(struct net_device *dev, u32 th, u32 tl)
spin_lock_irqsave(&priv->ps_lock, flags);
- /*
+ /*
* Writing HW register with 0 equals to disable
* the timer, that is not really what we want
*/
tl -= MSECS(4+16+7);
- /*
+ /*
* If the interval in witch we are requested to sleep is too
* short then give up and remain awake
*/
@@ -2325,10 +2335,10 @@ void rtl8180_hw_sleep(struct net_device *dev, u32 th, u32 tl)
u32 tmp = (tl > rb) ? (tl-rb) : (rb-tl);
priv->DozePeriodInPast2Sec += jiffies_to_msecs(tmp);
-
- queue_delayed_work(priv->ieee80211->wq, &priv->ieee80211->hw_wakeup_wq, tmp); /* as tl may be less than rb */
+ /* as tl may be less than rb */
+ queue_delayed_work(priv->ieee80211->wq, &priv->ieee80211->hw_wakeup_wq, tmp);
}
- /*
+ /*
* If we suspect the TimerInt is gone beyond tl
* while setting it, then give up
*/
@@ -3086,7 +3096,8 @@ void rtl8185_set_rate(struct net_device *dev)
max_rr_rate = ieeerate2rtlrate(240);
write_nic_byte(dev, RESP_RATE,
- max_rr_rate<<MAX_RESP_RATE_SHIFT | min_rr_rate<<MIN_RESP_RATE_SHIFT);
+ max_rr_rate<<MAX_RESP_RATE_SHIFT |
+ min_rr_rate<<MIN_RESP_RATE_SHIFT);
word = read_nic_word(dev, BRSR);
word &= ~BRSR_MBR_8185;
@@ -3168,7 +3179,7 @@ void rtl8180_adapter_start(struct net_device *dev)
netif_start_queue(dev);
}
-/*
+/*
* This configures registers for beacon tx and enables it via
* rtl8180_beacon_tx_enable(). rtl8180_beacon_tx_disable() might
* be used to stop beacon transmission
@@ -3227,7 +3238,8 @@ void LeisurePSEnter(struct r8180_priv *priv)
{
if (priv->bLeisurePs) {
if (priv->ieee80211->ps == IEEE80211_PS_DISABLED)
- MgntActSet_802_11_PowerSaveMode(priv, IEEE80211_PS_MBCAST|IEEE80211_PS_UNICAST); /* IEEE80211_PS_ENABLE */
+ /* IEEE80211_PS_ENABLE */
+ MgntActSet_802_11_PowerSaveMode(priv, IEEE80211_PS_MBCAST|IEEE80211_PS_UNICAST);
}
}
@@ -3299,7 +3311,10 @@ void rtl8180_watch_dog(struct net_device *dev)
u16 SlotIndex = 0;
u16 i = 0;
if (priv->ieee80211->actscanning == false) {
- if ((priv->ieee80211->iw_mode != IW_MODE_ADHOC) && (priv->ieee80211->state == IEEE80211_NOLINK) && (priv->ieee80211->beinretry == false) && (priv->eRFPowerState == eRfOn))
+ if ((priv->ieee80211->iw_mode != IW_MODE_ADHOC) &&
+ (priv->ieee80211->state == IEEE80211_NOLINK) &&
+ (priv->ieee80211->beinretry == false) &&
+ (priv->eRFPowerState == eRfOn))
IPSEnter(dev);
}
/* YJ,add,080828,for link state check */
@@ -3732,7 +3747,7 @@ static int __init rtl8180_pci_module_init(void)
DMESG("Wireless extensions version %d", WIRELESS_EXT);
rtl8180_proc_module_init();
- if (pci_register_driver(&rtl8180_pci_driver)) {
+ if (pci_register_driver(&rtl8180_pci_driver)) {
DMESG("No device found");
return -ENODEV;
}
@@ -3839,7 +3854,7 @@ void rtl8180_tx_isr(struct net_device *dev, int pri, short error)
return;
}
- /*
+ /*
* We check all the descriptors between the head and the nic,
* but not the currently pointed by the nic (the next to be txed)
* and the previous of the pointed (might be in process ??)
@@ -3877,7 +3892,7 @@ void rtl8180_tx_isr(struct net_device *dev, int pri, short error)
head += 8;
}
- /*
+ /*
* The head has been moved to the last certainly TXed
* (or at least processed by the nic) packet.
* The driver take forcefully owning of all these packets
diff --git a/drivers/staging/rtl8187se/r8180_dm.c b/drivers/staging/rtl8187se/r8180_dm.c
index 261085d4b74a..4d7a5951486e 100644
--- a/drivers/staging/rtl8187se/r8180_dm.c
+++ b/drivers/staging/rtl8187se/r8180_dm.c
@@ -1,14 +1,8 @@
-//#include "r8180.h"
#include "r8180_dm.h"
#include "r8180_hw.h"
#include "r8180_93cx6.h"
-//{by amy 080312
-//
-// Description:
-// Return TRUE if we shall perform High Power Mecahnism, FALSE otherwise.
-//
-//+by amy 080312
+ /* Return TRUE if we shall perform High Power Mecahnism, FALSE otherwise. */
#define RATE_ADAPTIVE_TIMER_PERIOD 300
bool CheckHighPower(struct net_device *dev)
@@ -17,33 +11,26 @@ bool CheckHighPower(struct net_device *dev)
struct ieee80211_device *ieee = priv->ieee80211;
if(!priv->bRegHighPowerMechanism)
- {
return false;
- }
if(ieee->state == IEEE80211_LINKED_SCANNING)
- {
return false;
- }
return true;
}
-//
-// Description:
-// Update Tx power level if necessary.
-// See also DoRxHighPower() and SetTxPowerLevel8185() for reference.
-//
-// Note:
-// The reason why we udpate Tx power level here instead of DoRxHighPower()
-// is the number of IO to change Tx power is much more than channel TR switch
-// and they are related to OFDM and MAC registers.
-// So, we don't want to update it so frequently in per-Rx packet base.
-//
-void
-DoTxHighPower(
- struct net_device *dev
- )
+/*
+ * Description:
+ * Update Tx power level if necessary.
+ * See also DoRxHighPower() and SetTxPowerLevel8185() for reference.
+ *
+ * Note:
+ * The reason why we udpate Tx power level here instead of DoRxHighPower()
+ * is the number of IO to change Tx power is much more than channel TR switch
+ * and they are related to OFDM and MAC registers.
+ * So, we don't want to update it so frequently in per-Rx packet base.
+ */
+void DoTxHighPower(struct net_device *dev)
{
struct r8180_priv *priv = ieee80211_priv(dev);
u16 HiPwrUpperTh = 0;
@@ -53,8 +40,6 @@ DoTxHighPower(
u8 u1bTmp;
char OfdmTxPwrIdx, CckTxPwrIdx;
- //printk("----> DoTxHighPower()\n");
-
HiPwrUpperTh = priv->RegHiPwrUpperTh;
HiPwrLowerTh = priv->RegHiPwrLowerTh;
@@ -63,526 +48,411 @@ DoTxHighPower(
RSSIHiPwrUpperTh = priv->RegRSSIHiPwrUpperTh;
RSSIHiPwrLowerTh = priv->RegRSSIHiPwrLowerTh;
- //lzm add 080826
+ /* lzm add 080826 */
OfdmTxPwrIdx = priv->chtxpwr_ofdm[priv->ieee80211->current_network.channel];
CckTxPwrIdx = priv->chtxpwr[priv->ieee80211->current_network.channel];
- // printk("DoTxHighPower() - UndecoratedSmoothedSS:%d, CurCCKRSSI = %d , bCurCCKPkt= %d \n", priv->UndecoratedSmoothedSS, priv->CurCCKRSSI, priv->bCurCCKPkt );
+ if ((priv->UndecoratedSmoothedSS > HiPwrUpperTh) ||
+ (priv->bCurCCKPkt && (priv->CurCCKRSSI > RSSIHiPwrUpperTh))) {
+ /* Stevenl suggested that degrade 8dbm in high power sate. 2007-12-04 Isaiah */
- if((priv->UndecoratedSmoothedSS > HiPwrUpperTh) ||
- (priv->bCurCCKPkt && (priv->CurCCKRSSI > RSSIHiPwrUpperTh)))
- {
- // Stevenl suggested that degrade 8dbm in high power sate. 2007-12-04 Isaiah
-
- // printk("=====>DoTxHighPower() - High Power - UndecoratedSmoothedSS:%d, HiPwrUpperTh = %d \n", priv->UndecoratedSmoothedSS, HiPwrUpperTh );
priv->bToUpdateTxPwr = true;
u1bTmp= read_nic_byte(dev, CCK_TXAGC);
- // If it never enter High Power.
- if( CckTxPwrIdx == u1bTmp)
- {
- u1bTmp = (u1bTmp > 16) ? (u1bTmp -16): 0; // 8dbm
- write_nic_byte(dev, CCK_TXAGC, u1bTmp);
+ /* If it never enter High Power. */
+ if (CckTxPwrIdx == u1bTmp) {
+ u1bTmp = (u1bTmp > 16) ? (u1bTmp -16): 0; /* 8dbm */
+ write_nic_byte(dev, CCK_TXAGC, u1bTmp);
- u1bTmp= read_nic_byte(dev, OFDM_TXAGC);
- u1bTmp = (u1bTmp > 16) ? (u1bTmp -16): 0; // 8dbm
- write_nic_byte(dev, OFDM_TXAGC, u1bTmp);
+ u1bTmp= read_nic_byte(dev, OFDM_TXAGC);
+ u1bTmp = (u1bTmp > 16) ? (u1bTmp -16): 0; /* 8dbm */
+ write_nic_byte(dev, OFDM_TXAGC, u1bTmp);
}
- }
- else if((priv->UndecoratedSmoothedSS < HiPwrLowerTh) &&
- (!priv->bCurCCKPkt || priv->CurCCKRSSI < RSSIHiPwrLowerTh))
- {
- // printk("DoTxHighPower() - lower Power - UndecoratedSmoothedSS:%d, HiPwrUpperTh = %d \n", priv->UndecoratedSmoothedSS, HiPwrLowerTh );
- if(priv->bToUpdateTxPwr)
- {
+ } else if ((priv->UndecoratedSmoothedSS < HiPwrLowerTh) &&
+ (!priv->bCurCCKPkt || priv->CurCCKRSSI < RSSIHiPwrLowerTh)) {
+ if (priv->bToUpdateTxPwr) {
priv->bToUpdateTxPwr = false;
- //SD3 required.
+ /* SD3 required. */
u1bTmp= read_nic_byte(dev, CCK_TXAGC);
- if(u1bTmp < CckTxPwrIdx)
- {
- //u1bTmp = ((u1bTmp+16) > 35) ? 35: (u1bTmp+16); // 8dbm
- //write_nic_byte(dev, CCK_TXAGC, u1bTmp);
- write_nic_byte(dev, CCK_TXAGC, CckTxPwrIdx);
+ if (u1bTmp < CckTxPwrIdx) {
+ write_nic_byte(dev, CCK_TXAGC, CckTxPwrIdx);
}
u1bTmp= read_nic_byte(dev, OFDM_TXAGC);
- if(u1bTmp < OfdmTxPwrIdx)
- {
- //u1bTmp = ((u1bTmp+16) > 35) ? 35: (u1bTmp+16); // 8dbm
- //write_nic_byte(dev, OFDM_TXAGC, u1bTmp);
- write_nic_byte(dev, OFDM_TXAGC, OfdmTxPwrIdx);
+ if (u1bTmp < OfdmTxPwrIdx) {
+ write_nic_byte(dev, OFDM_TXAGC, OfdmTxPwrIdx);
}
}
}
-
- //printk("<---- DoTxHighPower()\n");
}
-//
-// Description:
-// Callback function of UpdateTxPowerWorkItem.
-// Because of some event happened, e.g. CCX TPC, High Power Mechanism,
-// We update Tx power of current channel again.
-//
-void rtl8180_tx_pw_wq (struct work_struct *work)
+/*
+ * Description:
+ * Callback function of UpdateTxPowerWorkItem.
+ * Because of some event happened, e.g. CCX TPC, High Power Mechanism,
+ * We update Tx power of current channel again.
+ */
+void rtl8180_tx_pw_wq(struct work_struct *work)
{
-// struct r8180_priv *priv = container_of(work, struct r8180_priv, watch_dog_wq);
-// struct ieee80211_device * ieee = (struct ieee80211_device*)
-// container_of(work, struct ieee80211_device, watch_dog_wq);
struct delayed_work *dwork = to_delayed_work(work);
- struct ieee80211_device *ieee = container_of(dwork,struct ieee80211_device,tx_pw_wq);
- struct net_device *dev = ieee->dev;
-
-// printk("----> UpdateTxPowerWorkItemCallback()\n");
+ struct ieee80211_device *ieee = container_of(dwork,struct ieee80211_device,tx_pw_wq);
+ struct net_device *dev = ieee->dev;
DoTxHighPower(dev);
-
-// printk("<---- UpdateTxPowerWorkItemCallback()\n");
}
-//
-// Description:
-// Return TRUE if we shall perform DIG Mecahnism, FALSE otherwise.
-//
-bool
-CheckDig(
- struct net_device *dev
- )
+/*
+ * Return TRUE if we shall perform DIG Mecahnism, FALSE otherwise.
+ */
+bool CheckDig(struct net_device *dev)
{
struct r8180_priv *priv = ieee80211_priv(dev);
struct ieee80211_device *ieee = priv->ieee80211;
- if(!priv->bDigMechanism)
+ if (!priv->bDigMechanism)
return false;
- if(ieee->state != IEEE80211_LINKED)
+ if (ieee->state != IEEE80211_LINKED)
return false;
- //if(priv->CurrentOperaRate < 36) // Schedule Dig under all OFDM rates. By Bruce, 2007-06-01.
- if((priv->ieee80211->rate/5) < 36) // Schedule Dig under all OFDM rates. By Bruce, 2007-06-01.
+ if ((priv->ieee80211->rate / 5) < 36) /* Schedule Dig under all OFDM rates. By Bruce, 2007-06-01. */
return false;
return true;
}
-//
-// Description:
-// Implementation of DIG for Zebra and Zebra2.
-//
-void
-DIG_Zebra(
- struct net_device *dev
- )
+/*
+ * Implementation of DIG for Zebra and Zebra2.
+ */
+void DIG_Zebra(struct net_device *dev)
{
struct r8180_priv *priv = ieee80211_priv(dev);
u16 CCKFalseAlarm, OFDMFalseAlarm;
u16 OfdmFA1, OfdmFA2;
- int InitialGainStep = 7; // The number of initial gain stages.
- int LowestGainStage = 4; // The capable lowest stage of performing dig workitem.
- u32 AwakePeriodIn2Sec=0;
-
- //printk("---------> DIG_Zebra()\n");
+ int InitialGainStep = 7; /* The number of initial gain stages. */
+ int LowestGainStage = 4; /* The capable lowest stage of performing dig workitem. */
+ u32 AwakePeriodIn2Sec = 0;
CCKFalseAlarm = (u16)(priv->FalseAlarmRegValue & 0x0000ffff);
OFDMFalseAlarm = (u16)((priv->FalseAlarmRegValue >> 16) & 0x0000ffff);
OfdmFA1 = 0x15;
OfdmFA2 = ((u16)(priv->RegDigOfdmFaUpTh)) << 8;
-// printk("DIG**********CCK False Alarm: %#X \n",CCKFalseAlarm);
-// printk("DIG**********OFDM False Alarm: %#X \n",OFDMFalseAlarm);
-
- // The number of initial gain steps is different, by Bruce, 2007-04-13.
- if (priv->InitialGain == 0 ) //autoDIG
- { // Advised from SD3 DZ
- priv->InitialGain = 4; // In 87B, m74dBm means State 4 (m82dBm)
- }
- { // Advised from SD3 DZ
- OfdmFA1 = 0x20;
+ /* The number of initial gain steps is different, by Bruce, 2007-04-13. */
+ if (priv->InitialGain == 0) { /* autoDIG */
+ /* Advised from SD3 DZ */
+ priv->InitialGain = 4; /* In 87B, m74dBm means State 4 (m82dBm) */
}
-
-#if 1 //lzm reserved 080826
- AwakePeriodIn2Sec = (2000-priv ->DozePeriodInPast2Sec);
- //printk("&&& DozePeriod=%d AwakePeriod=%d\n", priv->DozePeriodInPast2Sec, AwakePeriodIn2Sec);
- priv ->DozePeriodInPast2Sec=0;
-
- if(AwakePeriodIn2Sec)
- {
- //RT_TRACE(COMP_DIG, DBG_TRACE, ("DIG: AwakePeriodIn2Sec(%d) - FATh(0x%X , 0x%X) ->",AwakePeriodIn2Sec, OfdmFA1, OfdmFA2));
- // adjuest DIG threshold.
- OfdmFA1 = (u16)((OfdmFA1*AwakePeriodIn2Sec) / 2000) ;
- OfdmFA2 = (u16)((OfdmFA2*AwakePeriodIn2Sec) / 2000) ;
- //RT_TRACE(COMP_DIG, DBG_TRACE, ("( 0x%X , 0x%X)\n", OfdmFA1, OfdmFA2));
- }
- else
- {
- ;//RT_TRACE(COMP_DIG, DBG_WARNING, ("ERROR!! AwakePeriodIn2Sec should not be ZERO!!\n"));
+ /* Advised from SD3 DZ */
+ OfdmFA1 = 0x20;
+
+#if 1 /* lzm reserved 080826 */
+ AwakePeriodIn2Sec = (2000 - priv->DozePeriodInPast2Sec);
+ priv ->DozePeriodInPast2Sec = 0;
+
+ if (AwakePeriodIn2Sec) {
+ OfdmFA1 = (u16)((OfdmFA1 * AwakePeriodIn2Sec) / 2000) ;
+ OfdmFA2 = (u16)((OfdmFA2 * AwakePeriodIn2Sec) / 2000) ;
+ } else {
+ ;
}
#endif
InitialGainStep = 8;
- LowestGainStage = priv->RegBModeGainStage; // Lowest gain stage.
+ LowestGainStage = priv->RegBModeGainStage; /* Lowest gain stage. */
- if (OFDMFalseAlarm > OfdmFA1)
- {
- if (OFDMFalseAlarm > OfdmFA2)
- {
+ if (OFDMFalseAlarm > OfdmFA1) {
+ if (OFDMFalseAlarm > OfdmFA2) {
priv->DIG_NumberFallbackVote++;
- if (priv->DIG_NumberFallbackVote >1)
- {
- //serious OFDM False Alarm, need fallback
- if (priv->InitialGain < InitialGainStep)
- {
- priv->InitialGainBackUp= priv->InitialGain;
+ if (priv->DIG_NumberFallbackVote > 1) {
+ /* serious OFDM False Alarm, need fallback */
+ if (priv->InitialGain < InitialGainStep) {
+ priv->InitialGainBackUp = priv->InitialGain;
priv->InitialGain = (priv->InitialGain + 1);
-// printk("DIG**********OFDM False Alarm: %#X, OfdmFA1: %#X, OfdmFA2: %#X\n", OFDMFalseAlarm, OfdmFA1, OfdmFA2);
-// printk("DIG+++++++ fallback OFDM:%d \n", priv->InitialGain);
UpdateInitialGain(dev);
}
priv->DIG_NumberFallbackVote = 0;
- priv->DIG_NumberUpgradeVote=0;
+ priv->DIG_NumberUpgradeVote = 0;
}
- }
- else
- {
+ } else {
if (priv->DIG_NumberFallbackVote)
priv->DIG_NumberFallbackVote--;
}
- priv->DIG_NumberUpgradeVote=0;
- }
- else
- {
+ priv->DIG_NumberUpgradeVote = 0;
+ } else {
if (priv->DIG_NumberFallbackVote)
priv->DIG_NumberFallbackVote--;
priv->DIG_NumberUpgradeVote++;
- if (priv->DIG_NumberUpgradeVote>9)
- {
- if (priv->InitialGain > LowestGainStage) // In 87B, m78dBm means State 4 (m864dBm)
- {
- priv->InitialGainBackUp= priv->InitialGain;
+ if (priv->DIG_NumberUpgradeVote > 9) {
+ if (priv->InitialGain > LowestGainStage) { /* In 87B, m78dBm means State 4 (m864dBm) */
+ priv->InitialGainBackUp = priv->InitialGain;
priv->InitialGain = (priv->InitialGain - 1);
-// printk("DIG**********OFDM False Alarm: %#X, OfdmFA1: %#X, OfdmFA2: %#X\n", OFDMFalseAlarm, OfdmFA1, OfdmFA2);
-// printk("DIG--------- Upgrade OFDM:%d \n", priv->InitialGain);
UpdateInitialGain(dev);
}
priv->DIG_NumberFallbackVote = 0;
- priv->DIG_NumberUpgradeVote=0;
+ priv->DIG_NumberUpgradeVote = 0;
}
}
-
-// printk("DIG+++++++ OFDM:%d\n", priv->InitialGain);
- //printk("<--------- DIG_Zebra()\n");
}
-//
-// Description:
-// Dispatch DIG implementation according to RF.
-//
-void
-DynamicInitGain(struct net_device *dev)
+/*
+ * Dispatch DIG implementation according to RF.
+ */
+void DynamicInitGain(struct net_device *dev)
{
DIG_Zebra(dev);
}
-void rtl8180_hw_dig_wq (struct work_struct *work)
+void rtl8180_hw_dig_wq(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
- struct ieee80211_device *ieee = container_of(dwork,struct ieee80211_device,hw_dig_wq);
- struct net_device *dev = ieee->dev;
+ struct ieee80211_device *ieee = container_of(dwork,struct ieee80211_device,hw_dig_wq);
+ struct net_device *dev = ieee->dev;
struct r8180_priv *priv = ieee80211_priv(dev);
- // Read CCK and OFDM False Alarm.
+ /* Read CCK and OFDM False Alarm. */
priv->FalseAlarmRegValue = read_nic_dword(dev, CCK_FALSE_ALARM);
- // Adjust Initial Gain dynamically.
+ /* Adjust Initial Gain dynamically. */
DynamicInitGain(dev);
}
-int
-IncludedInSupportedRates(
- struct r8180_priv *priv,
- u8 TxRate )
+int IncludedInSupportedRates(struct r8180_priv *priv, u8 TxRate)
{
- u8 rate_len;
- u8 rate_ex_len;
- u8 RateMask = 0x7F;
- u8 idx;
- unsigned short Found = 0;
- u8 NaiveTxRate = TxRate&RateMask;
-
- rate_len = priv->ieee80211->current_network.rates_len;
- rate_ex_len = priv->ieee80211->current_network.rates_ex_len;
- for( idx=0; idx< rate_len; idx++ )
- {
- if( (priv->ieee80211->current_network.rates[idx] & RateMask) == NaiveTxRate )
- {
- Found = 1;
- goto found_rate;
- }
- }
- for( idx=0; idx< rate_ex_len; idx++ )
- {
- if( (priv->ieee80211->current_network.rates_ex[idx] & RateMask) == NaiveTxRate )
- {
- Found = 1;
- goto found_rate;
- }
- }
- return Found;
- found_rate:
- return Found;
+ u8 rate_len;
+ u8 rate_ex_len;
+ u8 RateMask = 0x7F;
+ u8 idx;
+ unsigned short Found = 0;
+ u8 NaiveTxRate = TxRate&RateMask;
+
+ rate_len = priv->ieee80211->current_network.rates_len;
+ rate_ex_len = priv->ieee80211->current_network.rates_ex_len;
+ for (idx=0; idx < rate_len; idx++) {
+ if ((priv->ieee80211->current_network.rates[idx] & RateMask) == NaiveTxRate) {
+ Found = 1;
+ goto found_rate;
+ }
+ }
+ for (idx = 0; idx < rate_ex_len; idx++) {
+ if ((priv->ieee80211->current_network.rates_ex[idx] & RateMask) == NaiveTxRate) {
+ Found = 1;
+ goto found_rate;
+ }
+ }
+ return Found;
+ found_rate:
+ return Found;
}
-//
-// Description:
-// Get the Tx rate one degree up form the input rate in the supported rates.
-// Return the upgrade rate if it is successed, otherwise return the input rate.
-// By Bruce, 2007-06-05.
-//
-u8
-GetUpgradeTxRate(
- struct net_device *dev,
- u8 rate
- )
+/*
+ * Get the Tx rate one degree up form the input rate in the supported rates.
+ * Return the upgrade rate if it is successed, otherwise return the input rate.
+ */
+u8 GetUpgradeTxRate(struct net_device *dev, u8 rate)
{
- struct r8180_priv *priv = ieee80211_priv(dev);
- u8 UpRate;
-
- // Upgrade 1 degree.
- switch(rate)
- {
- case 108: // Up to 54Mbps.
- UpRate = 108;
- break;
-
- case 96: // Up to 54Mbps.
- UpRate = 108;
- break;
-
- case 72: // Up to 48Mbps.
- UpRate = 96;
- break;
-
- case 48: // Up to 36Mbps.
- UpRate = 72;
- break;
-
- case 36: // Up to 24Mbps.
- UpRate = 48;
- break;
-
- case 22: // Up to 18Mbps.
- UpRate = 36;
- break;
-
- case 11: // Up to 11Mbps.
- UpRate = 22;
- break;
-
- case 4: // Up to 5.5Mbps.
- UpRate = 11;
- break;
-
- case 2: // Up to 2Mbps.
- UpRate = 4;
- break;
-
- default:
- printk("GetUpgradeTxRate(): Input Tx Rate(%d) is undefined!\n", rate);
- return rate;
- }
- // Check if the rate is valid.
- if(IncludedInSupportedRates(priv, UpRate))
- {
-// printk("GetUpgradeTxRate(): GetUpgrade Tx rate(%d) from %d !\n", UpRate, priv->CurrentOperaRate);
- return UpRate;
- }
- else
- {
- //printk("GetUpgradeTxRate(): Tx rate (%d) is not in supported rates\n", UpRate);
- return rate;
- }
- return rate;
+ struct r8180_priv *priv = ieee80211_priv(dev);
+ u8 UpRate;
+
+ /* Upgrade 1 degree. */
+ switch (rate) {
+ case 108: /* Up to 54Mbps. */
+ UpRate = 108;
+ break;
+
+ case 96: /* Up to 54Mbps. */
+ UpRate = 108;
+ break;
+
+ case 72: /* Up to 48Mbps. */
+ UpRate = 96;
+ break;
+
+ case 48: /* Up to 36Mbps. */
+ UpRate = 72;
+ break;
+
+ case 36: /* Up to 24Mbps. */
+ UpRate = 48;
+ break;
+
+ case 22: /* Up to 18Mbps. */
+ UpRate = 36;
+ break;
+
+ case 11: /* Up to 11Mbps. */
+ UpRate = 22;
+ break;
+
+ case 4: /* Up to 5.5Mbps. */
+ UpRate = 11;
+ break;
+
+ case 2: /* Up to 2Mbps. */
+ UpRate = 4;
+ break;
+
+ default:
+ printk("GetUpgradeTxRate(): Input Tx Rate(%d) is undefined!\n", rate);
+ return rate;
+ }
+ /* Check if the rate is valid. */
+ if (IncludedInSupportedRates(priv, UpRate)) {
+ return UpRate;
+ } else {
+ return rate;
+ }
+ return rate;
}
-//
-// Description:
-// Get the Tx rate one degree down form the input rate in the supported rates.
-// Return the degrade rate if it is successed, otherwise return the input rate.
-// By Bruce, 2007-06-05.
-//
-u8
-GetDegradeTxRate(
- struct net_device *dev,
- u8 rate
- )
+/*
+ * Get the Tx rate one degree down form the input rate in the supported rates.
+ * Return the degrade rate if it is successed, otherwise return the input rate.
+ */
+
+u8 GetDegradeTxRate(struct net_device *dev, u8 rate)
{
- struct r8180_priv *priv = ieee80211_priv(dev);
- u8 DownRate;
-
- // Upgrade 1 degree.
- switch(rate)
- {
- case 108: // Down to 48Mbps.
- DownRate = 96;
- break;
-
- case 96: // Down to 36Mbps.
- DownRate = 72;
- break;
-
- case 72: // Down to 24Mbps.
- DownRate = 48;
- break;
-
- case 48: // Down to 18Mbps.
- DownRate = 36;
- break;
-
- case 36: // Down to 11Mbps.
- DownRate = 22;
- break;
-
- case 22: // Down to 5.5Mbps.
- DownRate = 11;
- break;
-
- case 11: // Down to 2Mbps.
- DownRate = 4;
- break;
-
- case 4: // Down to 1Mbps.
- DownRate = 2;
- break;
-
- case 2: // Down to 1Mbps.
- DownRate = 2;
- break;
-
- default:
- printk("GetDegradeTxRate(): Input Tx Rate(%d) is undefined!\n", rate);
- return rate;
- }
- // Check if the rate is valid.
- if(IncludedInSupportedRates(priv, DownRate))
- {
-// printk("GetDegradeTxRate(): GetDegrade Tx rate(%d) from %d!\n", DownRate, priv->CurrentOperaRate);
- return DownRate;
- }
- else
- {
- //printk("GetDegradeTxRate(): Tx rate (%d) is not in supported rates\n", DownRate);
- return rate;
- }
- return rate;
+ struct r8180_priv *priv = ieee80211_priv(dev);
+ u8 DownRate;
+
+ /* Upgrade 1 degree. */
+ switch (rate) {
+ case 108: /* Down to 48Mbps. */
+ DownRate = 96;
+ break;
+
+ case 96: /* Down to 36Mbps. */
+ DownRate = 72;
+ break;
+
+ case 72: /* Down to 24Mbps. */
+ DownRate = 48;
+ break;
+
+ case 48: /* Down to 18Mbps. */
+ DownRate = 36;
+ break;
+
+ case 36: /* Down to 11Mbps. */
+ DownRate = 22;
+ break;
+
+ case 22: /* Down to 5.5Mbps. */
+ DownRate = 11;
+ break;
+
+ case 11: /* Down to 2Mbps. */
+ DownRate = 4;
+ break;
+
+ case 4: /* Down to 1Mbps. */
+ DownRate = 2;
+ break;
+
+ case 2: /* Down to 1Mbps. */
+ DownRate = 2;
+ break;
+
+ default:
+ printk("GetDegradeTxRate(): Input Tx Rate(%d) is undefined!\n", rate);
+ return rate;
+ }
+ /* Check if the rate is valid. */
+ if (IncludedInSupportedRates(priv, DownRate)) {
+ return DownRate;
+ } else {
+ return rate;
+ }
+ return rate;
}
-//
-// Helper function to determine if specified data rate is
-// CCK rate.
-// 2005.01.25, by rcnjko.
-//
-bool
-MgntIsCckRate(
- u16 rate
- )
+/*
+ * Helper function to determine if specified data rate is
+ * CCK rate.
+ */
+
+bool MgntIsCckRate(u16 rate)
{
- bool bReturn = false;
+ bool bReturn = false;
- if((rate <= 22) && (rate != 12) && (rate != 18))
- {
- bReturn = true;
- }
+ if ((rate <= 22) && (rate != 12) && (rate != 18)) {
+ bReturn = true;
+ }
- return bReturn;
+ return bReturn;
}
-//
-// Description:
-// Tx Power tracking mechanism routine on 87SE.
-// Created by Roger, 2007.12.11.
-//
-void
-TxPwrTracking87SE(
- struct net_device *dev
-)
+/*
+ * Description:
+ * Tx Power tracking mechanism routine on 87SE.
+ */
+void TxPwrTracking87SE(struct net_device *dev)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
u8 tmpu1Byte, CurrentThermal, Idx;
char CckTxPwrIdx, OfdmTxPwrIdx;
- //u32 u4bRfReg;
tmpu1Byte = read_nic_byte(dev, EN_LPF_CAL);
- CurrentThermal = (tmpu1Byte & 0xf0)>>4; //[ 7:4]: thermal meter indication.
- CurrentThermal = (CurrentThermal>0x0c)? 0x0c:CurrentThermal;//lzm add 080826
-
- //printk("TxPwrTracking87SE(): CurrentThermal(%d)\n", CurrentThermal);
+ CurrentThermal = (tmpu1Byte & 0xf0) >> 4; /*[ 7:4]: thermal meter indication. */
+ CurrentThermal = (CurrentThermal > 0x0c) ? 0x0c:CurrentThermal;/* lzm add 080826 */
- if( CurrentThermal != priv->ThermalMeter)
- {
-// printk("TxPwrTracking87SE(): Thermal meter changed!!!\n");
-
- // Update Tx Power level on each channel.
- for(Idx = 1; Idx<15; Idx++)
- {
+ if (CurrentThermal != priv->ThermalMeter) {
+ /* Update Tx Power level on each channel. */
+ for (Idx = 1; Idx < 15; Idx++) {
CckTxPwrIdx = priv->chtxpwr[Idx];
OfdmTxPwrIdx = priv->chtxpwr_ofdm[Idx];
- if( CurrentThermal > priv->ThermalMeter )
- { // higher thermal meter.
- CckTxPwrIdx += (CurrentThermal - priv->ThermalMeter)*2;
- OfdmTxPwrIdx += (CurrentThermal - priv->ThermalMeter)*2;
+ if (CurrentThermal > priv->ThermalMeter) {
+ /* higher thermal meter. */
+ CckTxPwrIdx += (CurrentThermal - priv->ThermalMeter) * 2;
+ OfdmTxPwrIdx += (CurrentThermal - priv->ThermalMeter) * 2;
- if(CckTxPwrIdx >35)
- CckTxPwrIdx = 35; // Force TxPower to maximal index.
- if(OfdmTxPwrIdx >35)
+ if (CckTxPwrIdx > 35)
+ CckTxPwrIdx = 35; /* Force TxPower to maximal index. */
+ if (OfdmTxPwrIdx > 35)
OfdmTxPwrIdx = 35;
- }
- else
- { // lower thermal meter.
- CckTxPwrIdx -= (priv->ThermalMeter - CurrentThermal)*2;
- OfdmTxPwrIdx -= (priv->ThermalMeter - CurrentThermal)*2;
+ } else {
+ /* lower thermal meter. */
+ CckTxPwrIdx -= (priv->ThermalMeter - CurrentThermal) * 2;
+ OfdmTxPwrIdx -= (priv->ThermalMeter - CurrentThermal) * 2;
- if(CckTxPwrIdx <0)
+ if (CckTxPwrIdx < 0)
CckTxPwrIdx = 0;
- if(OfdmTxPwrIdx <0)
+ if (OfdmTxPwrIdx < 0)
OfdmTxPwrIdx = 0;
}
- // Update TxPower level on CCK and OFDM resp.
+ /* Update TxPower level on CCK and OFDM resp. */
priv->chtxpwr[Idx] = CckTxPwrIdx;
priv->chtxpwr_ofdm[Idx] = OfdmTxPwrIdx;
}
- // Update TxPower level immediately.
+ /* Update TxPower level immediately. */
rtl8225z2_SetTXPowerLevel(dev, priv->ieee80211->current_network.channel);
}
priv->ThermalMeter = CurrentThermal;
}
-void
-StaRateAdaptive87SE(
- struct net_device *dev
- )
+void StaRateAdaptive87SE(struct net_device *dev)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
- unsigned long CurrTxokCnt;
- u16 CurrRetryCnt;
- u16 CurrRetryRate;
- //u16 i,idx;
- unsigned long CurrRxokCnt;
- bool bTryUp = false;
- bool bTryDown = false;
- u8 TryUpTh = 1;
- u8 TryDownTh = 2;
- u32 TxThroughput;
+ unsigned long CurrTxokCnt;
+ u16 CurrRetryCnt;
+ u16 CurrRetryRate;
+ unsigned long CurrRxokCnt;
+ bool bTryUp = false;
+ bool bTryDown = false;
+ u8 TryUpTh = 1;
+ u8 TryDownTh = 2;
+ u32 TxThroughput;
long CurrSignalStrength;
bool bUpdateInitialGain = false;
- u8 u1bOfdm=0, u1bCck = 0;
+ u8 u1bOfdm = 0, u1bCck = 0;
char OfdmTxPwrIdx, CckTxPwrIdx;
- priv->RateAdaptivePeriod= RATE_ADAPTIVE_TIMER_PERIOD;
+ priv->RateAdaptivePeriod = RATE_ADAPTIVE_TIMER_PERIOD;
CurrRetryCnt = priv->CurrRetryCnt;
@@ -591,707 +461,462 @@ StaRateAdaptive87SE(
CurrSignalStrength = priv->Stats_RecvSignalPower;
TxThroughput = (u32)(priv->NumTxOkBytesTotal - priv->LastTxOKBytes);
priv->LastTxOKBytes = priv->NumTxOkBytesTotal;
- priv->CurrentOperaRate = priv->ieee80211->rate/5;
- //printk("priv->CurrentOperaRate is %d\n",priv->CurrentOperaRate);
- //2 Compute retry ratio.
- if (CurrTxokCnt>0)
- {
- CurrRetryRate = (u16)(CurrRetryCnt*100/CurrTxokCnt);
+ priv->CurrentOperaRate = priv->ieee80211->rate / 5;
+ /* 2 Compute retry ratio. */
+ if (CurrTxokCnt > 0) {
+ CurrRetryRate = (u16)(CurrRetryCnt * 100 / CurrTxokCnt);
+ } else {
+ /* It may be serious retry. To distinguish serious retry or no packets modified by Bruce */
+ CurrRetryRate = (u16)(CurrRetryCnt * 100 / 1);
}
- else
- { // It may be serious retry. To distinguish serious retry or no packets modified by Bruce
- CurrRetryRate = (u16)(CurrRetryCnt*100/1);
- }
-
-
- //
- // Added by Roger, 2007.01.02.
- // For debug information.
- //
- //printk("\n(1) pHalData->LastRetryRate: %d \n",priv->LastRetryRate);
- //printk("(2) RetryCnt = %d \n", CurrRetryCnt);
- //printk("(3) TxokCnt = %d \n", CurrTxokCnt);
- //printk("(4) CurrRetryRate = %d \n", CurrRetryRate);
- //printk("(5) CurrSignalStrength = %d \n",CurrSignalStrength);
- //printk("(6) TxThroughput is %d\n",TxThroughput);
- //printk("priv->NumTxOkBytesTotal is %d\n",priv->NumTxOkBytesTotal);
priv->LastRetryCnt = priv->CurrRetryCnt;
priv->LastTxokCnt = priv->NumTxOkTotal;
priv->LastRxokCnt = priv->ieee80211->NumRxOkTotal;
priv->CurrRetryCnt = 0;
- //2No Tx packets, return to init_rate or not?
- if (CurrRetryRate==0 && CurrTxokCnt == 0)
- {
- //
- //After 9 (30*300ms) seconds in this condition, we try to raise rate.
- //
+ /* 2No Tx packets, return to init_rate or not? */
+ if (CurrRetryRate == 0 && CurrTxokCnt == 0) {
+ /*
+ * After 9 (30*300ms) seconds in this condition, we try to raise rate.
+ */
priv->TryupingCountNoData++;
-// printk("No Tx packets, TryupingCountNoData(%d)\n", priv->TryupingCountNoData);
- //[TRC Dell Lab] Extend raised period from 4.5sec to 9sec, Isaiah 2008-02-15 18:00
- if (priv->TryupingCountNoData>30)
- {
+ /* [TRC Dell Lab] Extend raised period from 4.5sec to 9sec, Isaiah 2008-02-15 18:00 */
+ if (priv->TryupingCountNoData > 30) {
priv->TryupingCountNoData = 0;
- priv->CurrentOperaRate = GetUpgradeTxRate(dev, priv->CurrentOperaRate);
- // Reset Fail Record
+ priv->CurrentOperaRate = GetUpgradeTxRate(dev, priv->CurrentOperaRate);
+ /* Reset Fail Record */
priv->LastFailTxRate = 0;
priv->LastFailTxRateSS = -200;
priv->FailTxRateCount = 0;
}
goto SetInitialGain;
- }
- else
- {
- priv->TryupingCountNoData=0; //Reset trying up times.
+ } else {
+ priv->TryupingCountNoData = 0; /*Reset trying up times. */
}
- //
- // For Netgear case, I comment out the following signal strength estimation,
- // which can results in lower rate to transmit when sample is NOT enough (e.g. PING request).
- // 2007.04.09, by Roger.
- //
-
- //
- // Restructure rate adaptive as the following main stages:
- // (1) Add retry threshold in 54M upgrading condition with signal strength.
- // (2) Add the mechanism to degrade to CCK rate according to signal strength
- // and retry rate.
- // (3) Remove all Initial Gain Updates over OFDM rate. To avoid the complicated
- // situation, Initial Gain Update is upon on DIG mechanism except CCK rate.
- // (4) Add the mehanism of trying to upgrade tx rate.
- // (5) Record the information of upping tx rate to avoid trying upping tx rate constantly.
- // By Bruce, 2007-06-05.
- //
- //
-
- // 11Mbps or 36Mbps
- // Check more times in these rate(key rates).
- //
- if(priv->CurrentOperaRate == 22 || priv->CurrentOperaRate == 72)
- {
+ /*
+ * For Netgear case, I comment out the following signal strength estimation,
+ * which can results in lower rate to transmit when sample is NOT enough (e.g. PING request).
+ *
+ * Restructure rate adaptive as the following main stages:
+ * (1) Add retry threshold in 54M upgrading condition with signal strength.
+ * (2) Add the mechanism to degrade to CCK rate according to signal strength
+ * and retry rate.
+ * (3) Remove all Initial Gain Updates over OFDM rate. To avoid the complicated
+ * situation, Initial Gain Update is upon on DIG mechanism except CCK rate.
+ * (4) Add the mehanism of trying to upgrade tx rate.
+ * (5) Record the information of upping tx rate to avoid trying upping tx rate constantly.
+ *
+ */
+
+ /*
+ * 11Mbps or 36Mbps
+ * Check more times in these rate(key rates).
+ */
+ if (priv->CurrentOperaRate == 22 || priv->CurrentOperaRate == 72)
TryUpTh += 9;
- }
- //
- // Let these rates down more difficult.
- //
- if(MgntIsCckRate(priv->CurrentOperaRate) || priv->CurrentOperaRate == 36)
- {
- TryDownTh += 1;
- }
-
- //1 Adjust Rate.
- if (priv->bTryuping == true)
- {
- //2 For Test Upgrading mechanism
- // Note:
- // Sometimes the throughput is upon on the capability bwtween the AP and NIC,
- // thus the low data rate does not improve the performance.
- // We randomly upgrade the data rate and check if the retry rate is improved.
-
- // Upgrading rate did not improve the retry rate, fallback to the original rate.
- if ( (CurrRetryRate > 25) && TxThroughput < priv->LastTxThroughput)
- {
- //Not necessary raising rate, fall back rate.
+ /*
+ * Let these rates down more difficult.
+ */
+ if (MgntIsCckRate(priv->CurrentOperaRate) || priv->CurrentOperaRate == 36)
+ TryDownTh += 1;
+
+ /* 1 Adjust Rate. */
+ if (priv->bTryuping == true) {
+ /* 2 For Test Upgrading mechanism
+ * Note:
+ * Sometimes the throughput is upon on the capability bwtween the AP and NIC,
+ * thus the low data rate does not improve the performance.
+ * We randomly upgrade the data rate and check if the retry rate is improved.
+ */
+
+ /* Upgrading rate did not improve the retry rate, fallback to the original rate. */
+ if ((CurrRetryRate > 25) && TxThroughput < priv->LastTxThroughput) {
+ /*Not necessary raising rate, fall back rate. */
bTryDown = true;
- //printk("case1-1: Not necessary raising rate, fall back rate....\n");
- //printk("case1-1: pMgntInfo->CurrentOperaRate =%d, TxThroughput = %d, LastThroughput = %d\n",
- // priv->CurrentOperaRate, TxThroughput, priv->LastTxThroughput);
- }
- else
- {
+ } else {
priv->bTryuping = false;
}
- }
- else if (CurrSignalStrength > -47 && (CurrRetryRate < 50))
- {
- //2For High Power
- //
- // Added by Roger, 2007.04.09.
- // Return to highest data rate, if signal strength is good enough.
- // SignalStrength threshold(-50dbm) is for RTL8186.
- // Revise SignalStrength threshold to -51dbm.
- //
- // Also need to check retry rate for safety, by Bruce, 2007-06-05.
- if(priv->CurrentOperaRate != priv->ieee80211->current_network.HighestOperaRate )
- {
+ } else if (CurrSignalStrength > -47 && (CurrRetryRate < 50)) {
+ /*
+ * 2For High Power
+ *
+ * Return to highest data rate, if signal strength is good enough.
+ * SignalStrength threshold(-50dbm) is for RTL8186.
+ * Revise SignalStrength threshold to -51dbm.
+ */
+ /* Also need to check retry rate for safety, by Bruce, 2007-06-05. */
+ if (priv->CurrentOperaRate != priv->ieee80211->current_network.HighestOperaRate) {
bTryUp = true;
- // Upgrade Tx Rate directly.
+ /* Upgrade Tx Rate directly. */
priv->TryupingCount += TryUpTh;
}
-// printk("case2: StaRateAdaptive87SE: Power(%d) is high enough!!. \n", CurrSignalStrength);
- }
- else if(CurrTxokCnt > 9 && CurrTxokCnt< 100 && CurrRetryRate >= 600)
- {
- //2 For Serious Retry
- //
- // Traffic is not busy but our Tx retry is serious.
- //
+ } else if (CurrTxokCnt > 9 && CurrTxokCnt < 100 && CurrRetryRate >= 600) {
+ /*
+ *2 For Serious Retry
+ *
+ * Traffic is not busy but our Tx retry is serious.
+ */
bTryDown = true;
- // Let Rate Mechanism to degrade tx rate directly.
+ /* Let Rate Mechanism to degrade tx rate directly. */
priv->TryDownCountLowData += TryDownTh;
-// printk("case3: RA: Tx Retry is serious. Degrade Tx Rate to %d directly...\n", priv->CurrentOperaRate);
- }
- else if ( priv->CurrentOperaRate == 108 )
- {
- //2For 54Mbps
- // Air Link
- if ( (CurrRetryRate>26)&&(priv->LastRetryRate>25))
-// if ( (CurrRetryRate>40)&&(priv->LastRetryRate>39))
- {
- //Down to rate 48Mbps.
+ } else if (priv->CurrentOperaRate == 108) {
+ /* 2For 54Mbps */
+ /* Air Link */
+ if ((CurrRetryRate > 26) && (priv->LastRetryRate > 25)) {
bTryDown = true;
}
- // Cable Link
- else if ( (CurrRetryRate>17)&&(priv->LastRetryRate>16) && (CurrSignalStrength > -72))
-// else if ( (CurrRetryRate>17)&&(priv->LastRetryRate>16) && (CurrSignalStrength > -72))
- {
- //Down to rate 48Mbps.
+ /* Cable Link */
+ else if ((CurrRetryRate > 17) && (priv->LastRetryRate > 16) && (CurrSignalStrength > -72)) {
bTryDown = true;
}
- if(bTryDown && (CurrSignalStrength < -75)) //cable link
- {
+ if (bTryDown && (CurrSignalStrength < -75)) /* cable link */
priv->TryDownCountLowData += TryDownTh;
- }
- //printk("case4---54M \n");
-
}
- else if ( priv->CurrentOperaRate == 96 )
- {
- //2For 48Mbps
- //Air Link
- if ( ((CurrRetryRate>48) && (priv->LastRetryRate>47)))
-// if ( ((CurrRetryRate>65) && (priv->LastRetryRate>64)))
-
- {
- //Down to rate 36Mbps.
+ else if (priv->CurrentOperaRate == 96) {
+ /* 2For 48Mbps */
+ /* Air Link */
+ if (((CurrRetryRate > 48) && (priv->LastRetryRate > 47))) {
bTryDown = true;
- }
- //Cable Link
- else if ( ((CurrRetryRate>21) && (priv->LastRetryRate>20)) && (CurrSignalStrength > -74))
- {
- //Down to rate 36Mbps.
+ } else if (((CurrRetryRate > 21) && (priv->LastRetryRate > 20)) && (CurrSignalStrength > -74)) { /* Cable Link */
+ /* Down to rate 36Mbps. */
bTryDown = true;
- }
- else if((CurrRetryRate> (priv->LastRetryRate + 50 )) && (priv->FailTxRateCount >2 ))
-// else if((CurrRetryRate> (priv->LastRetryRate + 70 )) && (priv->FailTxRateCount >2 ))
- {
+ } else if ((CurrRetryRate > (priv->LastRetryRate + 50)) && (priv->FailTxRateCount > 2)) {
bTryDown = true;
priv->TryDownCountLowData += TryDownTh;
- }
- else if ( (CurrRetryRate<8) && (priv->LastRetryRate<8) ) //TO DO: need to consider (RSSI)
-// else if ( (CurrRetryRate<28) && (priv->LastRetryRate<8) )
- {
+ } else if ((CurrRetryRate < 8) && (priv->LastRetryRate < 8)) { /* TO DO: need to consider (RSSI) */
bTryUp = true;
}
- if(bTryDown && (CurrSignalStrength < -75))
- {
+ if (bTryDown && (CurrSignalStrength < -75)){
priv->TryDownCountLowData += TryDownTh;
}
- //printk("case5---48M \n");
- }
- else if ( priv->CurrentOperaRate == 72 )
- {
- //2For 36Mbps
- if ( (CurrRetryRate>43) && (priv->LastRetryRate>41))
-// if ( (CurrRetryRate>60) && (priv->LastRetryRate>59))
- {
- //Down to rate 24Mbps.
+ } else if (priv->CurrentOperaRate == 72) {
+ /* 2For 36Mbps */
+ if ((CurrRetryRate > 43) && (priv->LastRetryRate > 41)) {
+ /* Down to rate 24Mbps. */
bTryDown = true;
- }
- else if((CurrRetryRate> (priv->LastRetryRate + 50 )) && (priv->FailTxRateCount >2 ))
-// else if((CurrRetryRate> (priv->LastRetryRate + 70 )) && (priv->FailTxRateCount >2 ))
- {
+ } else if ((CurrRetryRate > (priv->LastRetryRate + 50)) && (priv->FailTxRateCount > 2)) {
bTryDown = true;
priv->TryDownCountLowData += TryDownTh;
- }
- else if ( (CurrRetryRate<15) && (priv->LastRetryRate<16)) //TO DO: need to consider (RSSI)
-// else if ( (CurrRetryRate<35) && (priv->LastRetryRate<36))
- {
+ } else if ((CurrRetryRate < 15) && (priv->LastRetryRate < 16)) { /* TO DO: need to consider (RSSI) */
bTryUp = true;
}
- if(bTryDown && (CurrSignalStrength < -80))
- {
+ if (bTryDown && (CurrSignalStrength < -80))
priv->TryDownCountLowData += TryDownTh;
- }
- //printk("case6---36M \n");
- }
- else if ( priv->CurrentOperaRate == 48 )
- {
- //2For 24Mbps
- // Air Link
- if ( ((CurrRetryRate>63) && (priv->LastRetryRate>62)))
-// if ( ((CurrRetryRate>83) && (priv->LastRetryRate>82)))
- {
- //Down to rate 18Mbps.
+
+ } else if (priv->CurrentOperaRate == 48) {
+ /* 2For 24Mbps */
+ /* Air Link */
+ if (((CurrRetryRate > 63) && (priv->LastRetryRate > 62))) {
bTryDown = true;
- }
- //Cable Link
- else if ( ((CurrRetryRate>33) && (priv->LastRetryRate>32)) && (CurrSignalStrength > -82) )
-// else if ( ((CurrRetryRate>50) && (priv->LastRetryRate>49)) && (CurrSignalStrength > -82) )
- {
- //Down to rate 18Mbps.
+ } else if (((CurrRetryRate > 33) && (priv->LastRetryRate > 32)) && (CurrSignalStrength > -82)) { /* Cable Link */
bTryDown = true;
- }
- else if((CurrRetryRate> (priv->LastRetryRate + 50 )) && (priv->FailTxRateCount >2 ))
-// else if((CurrRetryRate> (priv->LastRetryRate + 70 )) && (priv->FailTxRateCount >2 ))
-
- {
+ } else if ((CurrRetryRate > (priv->LastRetryRate + 50)) && (priv->FailTxRateCount > 2 )) {
bTryDown = true;
priv->TryDownCountLowData += TryDownTh;
- }
- else if ( (CurrRetryRate<20) && (priv->LastRetryRate<21)) //TO DO: need to consider (RSSI)
-// else if ( (CurrRetryRate<40) && (priv->LastRetryRate<41))
- {
+ } else if ((CurrRetryRate < 20) && (priv->LastRetryRate < 21)) { /* TO DO: need to consider (RSSI) */
bTryUp = true;
}
- if(bTryDown && (CurrSignalStrength < -82))
- {
+ if (bTryDown && (CurrSignalStrength < -82))
priv->TryDownCountLowData += TryDownTh;
- }
- //printk("case7---24M \n");
- }
- else if ( priv->CurrentOperaRate == 36 )
- {
- //2For 18Mbps
- // original (109, 109)
- //[TRC Dell Lab] (90, 91), Isaiah 2008-02-18 23:24
- // (85, 86), Isaiah 2008-02-18 24:00
- if ( ((CurrRetryRate>85) && (priv->LastRetryRate>86)))
-// if ( ((CurrRetryRate>115) && (priv->LastRetryRate>116)))
- {
- //Down to rate 11Mbps.
+
+ } else if (priv->CurrentOperaRate == 36) {
+ if (((CurrRetryRate > 85) && (priv->LastRetryRate > 86))) {
bTryDown = true;
- }
- //[TRC Dell Lab] Isaiah 2008-02-18 23:24
- else if((CurrRetryRate> (priv->LastRetryRate + 50 )) && (priv->FailTxRateCount >2 ))
-// else if((CurrRetryRate> (priv->LastRetryRate + 70 )) && (priv->FailTxRateCount >2 ))
- {
+ } else if ((CurrRetryRate > (priv->LastRetryRate + 50)) && (priv->FailTxRateCount > 2)) {
bTryDown = true;
priv->TryDownCountLowData += TryDownTh;
- }
- else if ( (CurrRetryRate<22) && (priv->LastRetryRate<23)) //TO DO: need to consider (RSSI)
-// else if ( (CurrRetryRate<42) && (priv->LastRetryRate<43))
- {
+ } else if ((CurrRetryRate < 22) && (priv->LastRetryRate < 23)) { /* TO DO: need to consider (RSSI) */
bTryUp = true;
}
- //printk("case8---18M \n");
- }
- else if ( priv->CurrentOperaRate == 22 )
- {
- //2For 11Mbps
- if (CurrRetryRate>95)
-// if (CurrRetryRate>155)
- {
+ } else if (priv->CurrentOperaRate == 22) {
+ /* 2For 11Mbps */
+ if (CurrRetryRate > 95) {
bTryDown = true;
}
- else if ( (CurrRetryRate<29) && (priv->LastRetryRate <30) )//TO DO: need to consider (RSSI)
-// else if ( (CurrRetryRate<49) && (priv->LastRetryRate <50) )
- {
+ else if ((CurrRetryRate < 29) && (priv->LastRetryRate < 30)) { /*TO DO: need to consider (RSSI) */
bTryUp = true;
- }
- //printk("case9---11M \n");
}
- else if ( priv->CurrentOperaRate == 11 )
- {
- //2For 5.5Mbps
- if (CurrRetryRate>149)
-// if (CurrRetryRate>189)
- {
+ } else if (priv->CurrentOperaRate == 11) {
+ /* 2For 5.5Mbps */
+ if (CurrRetryRate > 149) {
bTryDown = true;
- }
- else if ( (CurrRetryRate<60) && (priv->LastRetryRate < 65))
-// else if ( (CurrRetryRate<80) && (priv->LastRetryRate < 85))
-
- {
+ } else if ((CurrRetryRate < 60) && (priv->LastRetryRate < 65)) {
bTryUp = true;
- }
- //printk("case10---5.5M \n");
}
- else if ( priv->CurrentOperaRate == 4 )
- {
- //2For 2 Mbps
- if((CurrRetryRate>99) && (priv->LastRetryRate>99))
-// if((CurrRetryRate>199) && (priv->LastRetryRate>199))
- {
+ } else if (priv->CurrentOperaRate == 4) {
+ /* 2For 2 Mbps */
+ if ((CurrRetryRate > 99) && (priv->LastRetryRate > 99)) {
bTryDown = true;
- }
- else if ( (CurrRetryRate < 65) && (priv->LastRetryRate < 70))
-// else if ( (CurrRetryRate < 85) && (priv->LastRetryRate < 90))
- {
+ } else if ((CurrRetryRate < 65) && (priv->LastRetryRate < 70)) {
bTryUp = true;
}
- //printk("case11---2M \n");
- }
- else if ( priv->CurrentOperaRate == 2 )
- {
- //2For 1 Mbps
- if( (CurrRetryRate<70) && (priv->LastRetryRate<75))
-// if( (CurrRetryRate<90) && (priv->LastRetryRate<95))
- {
+ } else if (priv->CurrentOperaRate == 2) {
+ /* 2For 1 Mbps */
+ if ((CurrRetryRate < 70) && (priv->LastRetryRate < 75)) {
bTryUp = true;
}
- //printk("case12---1M \n");
}
- if(bTryUp && bTryDown)
- printk("StaRateAdaptive87B(): Tx Rate tried upping and downing simultaneously!\n");
-
- //1 Test Upgrading Tx Rate
- // Sometimes the cause of the low throughput (high retry rate) is the compatibility between the AP and NIC.
- // To test if the upper rate may cause lower retry rate, this mechanism randomly occurs to test upgrading tx rate.
- if(!bTryUp && !bTryDown && (priv->TryupingCount == 0) && (priv->TryDownCountLowData == 0)
- && priv->CurrentOperaRate != priv->ieee80211->current_network.HighestOperaRate && priv->FailTxRateCount < 2)
- {
- if(jiffies% (CurrRetryRate + 101) == 0)
- {
+ if (bTryUp && bTryDown)
+ printk("StaRateAdaptive87B(): Tx Rate tried upping and downing simultaneously!\n");
+
+ /* 1 Test Upgrading Tx Rate
+ * Sometimes the cause of the low throughput (high retry rate) is the compatibility between the AP and NIC.
+ * To test if the upper rate may cause lower retry rate, this mechanism randomly occurs to test upgrading tx rate.
+ */
+ if (!bTryUp && !bTryDown && (priv->TryupingCount == 0) && (priv->TryDownCountLowData == 0)
+ && priv->CurrentOperaRate != priv->ieee80211->current_network.HighestOperaRate && priv->FailTxRateCount < 2) {
+ if (jiffies % (CurrRetryRate + 101) == 0) {
bTryUp = true;
priv->bTryuping = true;
- //printk("StaRateAdaptive87SE(): Randomly try upgrading...\n");
}
}
- //1 Rate Mechanism
- if(bTryUp)
- {
+ /* 1 Rate Mechanism */
+ if (bTryUp) {
priv->TryupingCount++;
priv->TryDownCountLowData = 0;
- {
-// printk("UP: pHalData->TryupingCount = %d\n", priv->TryupingCount);
-// printk("UP: TryUpTh(%d)+ (FailTxRateCount(%d))^2 =%d\n",
-// TryUpTh, priv->FailTxRateCount, (TryUpTh + priv->FailTxRateCount * priv->FailTxRateCount) );
-// printk("UP: pHalData->bTryuping=%d\n", priv->bTryuping);
-
- }
+ /*
+ * Check more times if we need to upgrade indeed.
+ * Because the largest value of pHalData->TryupingCount is 0xFFFF and
+ * the largest value of pHalData->FailTxRateCount is 0x14,
+ * this condition will be satisfied at most every 2 min.
+ */
- //
- // Check more times if we need to upgrade indeed.
- // Because the largest value of pHalData->TryupingCount is 0xFFFF and
- // the largest value of pHalData->FailTxRateCount is 0x14,
- // this condition will be satisfied at most every 2 min.
- //
-
- if((priv->TryupingCount > (TryUpTh + priv->FailTxRateCount * priv->FailTxRateCount)) ||
- (CurrSignalStrength > priv->LastFailTxRateSS) || priv->bTryuping)
- {
+ if ((priv->TryupingCount > (TryUpTh + priv->FailTxRateCount * priv->FailTxRateCount)) ||
+ (CurrSignalStrength > priv->LastFailTxRateSS) || priv->bTryuping) {
priv->TryupingCount = 0;
- //
- // When transferring from CCK to OFDM, DIG is an important issue.
- //
- if(priv->CurrentOperaRate == 22)
+ /*
+ * When transferring from CCK to OFDM, DIG is an important issue.
+ */
+ if (priv->CurrentOperaRate == 22)
bUpdateInitialGain = true;
- // The difference in throughput between 48Mbps and 36Mbps is 8M.
- // So, we must be carefully in this rate scale. Isaiah 2008-02-15.
- //
- if( ((priv->CurrentOperaRate == 72) || (priv->CurrentOperaRate == 48) || (priv->CurrentOperaRate == 36)) &&
- (priv->FailTxRateCount > 2) )
- priv->RateAdaptivePeriod= (RATE_ADAPTIVE_TIMER_PERIOD/2);
+ /*
+ * The difference in throughput between 48Mbps and 36Mbps is 8M.
+ * So, we must be carefully in this rate scale. Isaiah 2008-02-15.
+ */
+ if (((priv->CurrentOperaRate == 72) || (priv->CurrentOperaRate == 48) || (priv->CurrentOperaRate == 36)) &&
+ (priv->FailTxRateCount > 2))
+ priv->RateAdaptivePeriod = (RATE_ADAPTIVE_TIMER_PERIOD / 2);
- // (1)To avoid upgrade frequently to the fail tx rate, add the FailTxRateCount into the threshold.
- // (2)If the signal strength is increased, it may be able to upgrade.
+ /* (1)To avoid upgrade frequently to the fail tx rate, add the FailTxRateCount into the threshold. */
+ /* (2)If the signal strength is increased, it may be able to upgrade. */
priv->CurrentOperaRate = GetUpgradeTxRate(dev, priv->CurrentOperaRate);
-// printk("StaRateAdaptive87SE(): Upgrade Tx Rate to %d\n", priv->CurrentOperaRate);
-
- //[TRC Dell Lab] Bypass 12/9/6, Isaiah 2008-02-18 20:00
- if(priv->CurrentOperaRate ==36)
- {
- priv->bUpdateARFR=true;
- write_nic_word(dev, ARFR, 0x0F8F); //bypass 12/9/6
-// printk("UP: ARFR=0xF8F\n");
- }
- else if(priv->bUpdateARFR)
- {
- priv->bUpdateARFR=false;
- write_nic_word(dev, ARFR, 0x0FFF); //set 1M ~ 54Mbps.
-// printk("UP: ARFR=0xFFF\n");
+
+ if (priv->CurrentOperaRate == 36) {
+ priv->bUpdateARFR = true;
+ write_nic_word(dev, ARFR, 0x0F8F); /* bypass 12/9/6 */
+ } else if(priv->bUpdateARFR) {
+ priv->bUpdateARFR = false;
+ write_nic_word(dev, ARFR, 0x0FFF); /* set 1M ~ 54Mbps. */
}
- // Update Fail Tx rate and count.
- if(priv->LastFailTxRate != priv->CurrentOperaRate)
- {
+ /* Update Fail Tx rate and count. */
+ if (priv->LastFailTxRate != priv->CurrentOperaRate) {
priv->LastFailTxRate = priv->CurrentOperaRate;
priv->FailTxRateCount = 0;
- priv->LastFailTxRateSS = -200; // Set lowest power.
+ priv->LastFailTxRateSS = -200; /* Set lowest power. */
}
}
- }
- else
- {
- if(priv->TryupingCount > 0)
+ } else {
+ if (priv->TryupingCount > 0)
priv->TryupingCount --;
}
- if(bTryDown)
- {
+ if (bTryDown) {
priv->TryDownCountLowData++;
priv->TryupingCount = 0;
- {
-// printk("DN: pHalData->TryDownCountLowData = %d\n",priv->TryDownCountLowData);
-// printk("DN: TryDownTh =%d\n", TryDownTh);
-// printk("DN: pHalData->bTryuping=%d\n", priv->bTryuping);
- }
- //Check if Tx rate can be degraded or Test trying upgrading should fallback.
- if(priv->TryDownCountLowData > TryDownTh || priv->bTryuping)
- {
+ /* Check if Tx rate can be degraded or Test trying upgrading should fallback. */
+ if (priv->TryDownCountLowData > TryDownTh || priv->bTryuping) {
priv->TryDownCountLowData = 0;
priv->bTryuping = false;
- // Update fail information.
- if(priv->LastFailTxRate == priv->CurrentOperaRate)
- {
- priv->FailTxRateCount ++;
- // Record the Tx fail rate signal strength.
- if(CurrSignalStrength > priv->LastFailTxRateSS)
- {
+ /* Update fail information. */
+ if (priv->LastFailTxRate == priv->CurrentOperaRate) {
+ priv->FailTxRateCount++;
+ /* Record the Tx fail rate signal strength. */
+ if (CurrSignalStrength > priv->LastFailTxRateSS)
priv->LastFailTxRateSS = CurrSignalStrength;
- }
- }
- else
- {
+ } else {
priv->LastFailTxRate = priv->CurrentOperaRate;
priv->FailTxRateCount = 1;
priv->LastFailTxRateSS = CurrSignalStrength;
}
priv->CurrentOperaRate = GetDegradeTxRate(dev, priv->CurrentOperaRate);
- // Reduce chariot training time at weak signal strength situation. SD3 ED demand.
- //[TRC Dell Lab] Revise Signal Threshold from -75 to -80 , Isaiah 2008-02-18 20:00
- if( (CurrSignalStrength < -80) && (priv->CurrentOperaRate > 72 ))
- {
+ /* Reduce chariot training time at weak signal strength situation. SD3 ED demand. */
+ if ((CurrSignalStrength < -80) && (priv->CurrentOperaRate > 72 )) {
priv->CurrentOperaRate = 72;
-// printk("DN: weak signal strength (%d), degrade to 36Mbps\n", CurrSignalStrength);
}
- //[TRC Dell Lab] Bypass 12/9/6, Isaiah 2008-02-18 20:00
- if(priv->CurrentOperaRate ==36)
- {
- priv->bUpdateARFR=true;
- write_nic_word(dev, ARFR, 0x0F8F); //bypass 12/9/6
-// printk("DN: ARFR=0xF8F\n");
- }
- else if(priv->bUpdateARFR)
- {
- priv->bUpdateARFR=false;
- write_nic_word(dev, ARFR, 0x0FFF); //set 1M ~ 54Mbps.
-// printk("DN: ARFR=0xFFF\n");
+ if (priv->CurrentOperaRate == 36) {
+ priv->bUpdateARFR = true;
+ write_nic_word(dev, ARFR, 0x0F8F); /* bypass 12/9/6 */
+ } else if (priv->bUpdateARFR) {
+ priv->bUpdateARFR = false;
+ write_nic_word(dev, ARFR, 0x0FFF); /* set 1M ~ 54Mbps. */
}
- //
- // When it is CCK rate, it may need to update initial gain to receive lower power packets.
- //
- if(MgntIsCckRate(priv->CurrentOperaRate))
- {
+ /*
+ * When it is CCK rate, it may need to update initial gain to receive lower power packets.
+ */
+ if (MgntIsCckRate(priv->CurrentOperaRate)) {
bUpdateInitialGain = true;
}
-// printk("StaRateAdaptive87SE(): Degrade Tx Rate to %d\n", priv->CurrentOperaRate);
}
- }
- else
- {
- if(priv->TryDownCountLowData > 0)
- priv->TryDownCountLowData --;
+ } else {
+ if (priv->TryDownCountLowData > 0)
+ priv->TryDownCountLowData--;
}
- // Keep the Tx fail rate count to equal to 0x15 at most.
- // Reduce the fail count at least to 10 sec if tx rate is tending stable.
- if(priv->FailTxRateCount >= 0x15 ||
- (!bTryUp && !bTryDown && priv->TryDownCountLowData == 0 && priv->TryupingCount && priv->FailTxRateCount > 0x6))
- {
- priv->FailTxRateCount --;
+ /*
+ * Keep the Tx fail rate count to equal to 0x15 at most.
+ * Reduce the fail count at least to 10 sec if tx rate is tending stable.
+ */
+ if (priv->FailTxRateCount >= 0x15 ||
+ (!bTryUp && !bTryDown && priv->TryDownCountLowData == 0 && priv->TryupingCount && priv->FailTxRateCount > 0x6)) {
+ priv->FailTxRateCount--;
}
OfdmTxPwrIdx = priv->chtxpwr_ofdm[priv->ieee80211->current_network.channel];
CckTxPwrIdx = priv->chtxpwr[priv->ieee80211->current_network.channel];
- //[TRC Dell Lab] Mac0x9e increase 2 level in 36M~18M situation, Isaiah 2008-02-18 24:00
- if((priv->CurrentOperaRate < 96) &&(priv->CurrentOperaRate > 22))
- {
+ /* Mac0x9e increase 2 level in 36M~18M situation */
+ if ((priv->CurrentOperaRate < 96) && (priv->CurrentOperaRate > 22)) {
u1bCck = read_nic_byte(dev, CCK_TXAGC);
u1bOfdm = read_nic_byte(dev, OFDM_TXAGC);
- // case 1: Never enter High power
- if(u1bCck == CckTxPwrIdx )
- {
- if(u1bOfdm != (OfdmTxPwrIdx+2) )
- {
- priv->bEnhanceTxPwr= true;
- u1bOfdm = ((u1bOfdm+2) > 35) ? 35: (u1bOfdm+2);
+ /* case 1: Never enter High power */
+ if (u1bCck == CckTxPwrIdx) {
+ if (u1bOfdm != (OfdmTxPwrIdx + 2)) {
+ priv->bEnhanceTxPwr = true;
+ u1bOfdm = ((u1bOfdm + 2) > 35) ? 35: (u1bOfdm + 2);
write_nic_byte(dev, OFDM_TXAGC, u1bOfdm);
-// printk("Enhance OFDM_TXAGC : +++++ u1bOfdm= 0x%x\n", u1bOfdm);
}
- }
- // case 2: enter high power
- else if(u1bCck < CckTxPwrIdx)
- {
- if(!priv->bEnhanceTxPwr)
- {
- priv->bEnhanceTxPwr= true;
- u1bOfdm = ((u1bOfdm+2) > 35) ? 35: (u1bOfdm+2);
+ } else if (u1bCck < CckTxPwrIdx) {
+ /* case 2: enter high power */
+ if (!priv->bEnhanceTxPwr) {
+ priv->bEnhanceTxPwr = true;
+ u1bOfdm = ((u1bOfdm + 2) > 35) ? 35: (u1bOfdm + 2);
write_nic_byte(dev, OFDM_TXAGC, u1bOfdm);
- //RT_TRACE(COMP_RATE, DBG_TRACE, ("Enhance OFDM_TXAGC(2) : +++++ u1bOfdm= 0x%x\n", u1bOfdm));
}
}
- }
- else if(priv->bEnhanceTxPwr) //54/48/11/5.5/2/1
- {
+ } else if (priv->bEnhanceTxPwr) { /* 54/48/11/5.5/2/1 */
u1bCck = read_nic_byte(dev, CCK_TXAGC);
u1bOfdm = read_nic_byte(dev, OFDM_TXAGC);
- // case 1: Never enter High power
- if(u1bCck == CckTxPwrIdx )
- {
- priv->bEnhanceTxPwr= false;
- write_nic_byte(dev, OFDM_TXAGC, OfdmTxPwrIdx);
- //printk("Recover OFDM_TXAGC : ===== u1bOfdm= 0x%x\n", OfdmTxPwrIdx);
+ /* case 1: Never enter High power */
+ if (u1bCck == CckTxPwrIdx) {
+ priv->bEnhanceTxPwr = false;
+ write_nic_byte(dev, OFDM_TXAGC, OfdmTxPwrIdx);
}
- // case 2: enter high power
- else if(u1bCck < CckTxPwrIdx)
- {
- priv->bEnhanceTxPwr= false;
- u1bOfdm = ((u1bOfdm-2) > 0) ? (u1bOfdm-2): 0;
+ /* case 2: enter high power */
+ else if (u1bCck < CckTxPwrIdx) {
+ priv->bEnhanceTxPwr = false;
+ u1bOfdm = ((u1bOfdm - 2) > 0) ? (u1bOfdm - 2): 0;
write_nic_byte(dev, OFDM_TXAGC, u1bOfdm);
- //RT_TRACE(COMP_RATE, DBG_TRACE, ("Recover OFDM_TXAGC(2): ===== u1bOfdm= 0x%x\n", u1bOfdm));
-
}
}
- //
- // We need update initial gain when we set tx rate "from OFDM to CCK" or
- // "from CCK to OFDM".
- //
+ /*
+ * We need update initial gain when we set tx rate "from OFDM to CCK" or
+ * "from CCK to OFDM".
+ */
SetInitialGain:
- if(bUpdateInitialGain)
- {
- if(MgntIsCckRate(priv->CurrentOperaRate)) // CCK
- {
- if(priv->InitialGain > priv->RegBModeGainStage)
- {
- priv->InitialGainBackUp= priv->InitialGain;
-
- if(CurrSignalStrength < -85) // Low power, OFDM [0x17] = 26.
- {
- //SD3 SYs suggest that CurrSignalStrength < -65, ofdm 0x17=26.
+ if (bUpdateInitialGain) {
+ if (MgntIsCckRate(priv->CurrentOperaRate)) { /* CCK */
+ if (priv->InitialGain > priv->RegBModeGainStage) {
+ priv->InitialGainBackUp = priv->InitialGain;
+
+ if (CurrSignalStrength < -85) /* Low power, OFDM [0x17] = 26. */
+ /* SD3 SYs suggest that CurrSignalStrength < -65, ofdm 0x17=26. */
priv->InitialGain = priv->RegBModeGainStage;
- }
- else if(priv->InitialGain > priv->RegBModeGainStage + 1)
- {
+
+ else if (priv->InitialGain > priv->RegBModeGainStage + 1)
priv->InitialGain -= 2;
- }
+
else
- {
- priv->InitialGain --;
- }
+ priv->InitialGain--;
+
printk("StaRateAdaptive87SE(): update init_gain to index %d for date rate %d\n",priv->InitialGain, priv->CurrentOperaRate);
UpdateInitialGain(dev);
}
- }
- else // OFDM
- {
- if(priv->InitialGain < 4)
- {
- priv->InitialGainBackUp= priv->InitialGain;
+ } else { /* OFDM */
+ if (priv->InitialGain < 4) {
+ priv->InitialGainBackUp = priv->InitialGain;
- priv->InitialGain ++;
+ priv->InitialGain++;
printk("StaRateAdaptive87SE(): update init_gain to index %d for date rate %d\n",priv->InitialGain, priv->CurrentOperaRate);
UpdateInitialGain(dev);
}
}
}
- //Record the related info
+ /* Record the related info */
priv->LastRetryRate = CurrRetryRate;
priv->LastTxThroughput = TxThroughput;
priv->ieee80211->rate = priv->CurrentOperaRate * 5;
}
-void rtl8180_rate_adapter(struct work_struct * work)
+void rtl8180_rate_adapter(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
- struct ieee80211_device *ieee = container_of(dwork,struct ieee80211_device,rate_adapter_wq);
- struct net_device *dev = ieee->dev;
- //struct r8180_priv *priv = ieee80211_priv(dev);
-// DMESG("---->rtl8180_rate_adapter");
- StaRateAdaptive87SE(dev);
-// DMESG("<----rtl8180_rate_adapter");
+ struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, rate_adapter_wq);
+ struct net_device *dev = ieee->dev;
+ StaRateAdaptive87SE(dev);
}
void timer_rate_adaptive(unsigned long data)
{
- struct r8180_priv* priv = ieee80211_priv((struct net_device *)data);
- //DMESG("---->timer_rate_adaptive()\n");
- if(!priv->up)
- {
-// DMESG("<----timer_rate_adaptive():driver is not up!\n");
+ struct r8180_priv *priv = ieee80211_priv((struct net_device *)data);
+ if (!priv->up) {
return;
}
- if((priv->ieee80211->iw_mode != IW_MODE_MASTER)
+ if ((priv->ieee80211->iw_mode != IW_MODE_MASTER)
&& (priv->ieee80211->state == IEEE80211_LINKED) &&
- (priv->ForcedDataRate == 0) )
- {
-// DMESG("timer_rate_adaptive():schedule rate_adapter_wq\n");
+ (priv->ForcedDataRate == 0)) {
queue_work(priv->ieee80211->wq, (void *)&priv->ieee80211->rate_adapter_wq);
-// StaRateAdaptive87SE((struct net_device *)data);
}
priv->rateadapter_timer.expires = jiffies + MSECS(priv->RateAdaptivePeriod);
add_timer(&priv->rateadapter_timer);
- //DMESG("<----timer_rate_adaptive()\n");
}
-//by amy 080312}
-void
-SwAntennaDiversityRxOk8185(
- struct net_device *dev,
- u8 SignalStrength
- )
+
+void SwAntennaDiversityRxOk8185(struct net_device *dev, u8 SignalStrength)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
-// printk("+SwAntennaDiversityRxOk8185: RxSs: %d\n", SignalStrength);
-
priv->AdRxOkCnt++;
- if( priv->AdRxSignalStrength != -1)
- {
- priv->AdRxSignalStrength = ((priv->AdRxSignalStrength*7) + (SignalStrength*3)) / 10;
- }
- else
- { // Initialization case.
+ if (priv->AdRxSignalStrength != -1) {
+ priv->AdRxSignalStrength = ((priv->AdRxSignalStrength * 7) + (SignalStrength * 3)) / 10;
+ } else { /* Initialization case. */
priv->AdRxSignalStrength = SignalStrength;
}
-//{+by amy 080312
- if( priv->LastRxPktAntenna ) //Main antenna.
+
+ if (priv->LastRxPktAntenna) /* Main antenna. */
priv->AdMainAntennaRxOkCnt++;
- else // Aux antenna.
+ else /* Aux antenna. */
priv->AdAuxAntennaRxOkCnt++;
-//+by amy 080312
-// printk("-SwAntennaDiversityRxOk8185: AdRxOkCnt: %d AdRxSignalStrength: %d\n", priv->AdRxOkCnt, priv->AdRxSignalStrength);
}
-//
-// Description:
-// Change Antenna Switch.
-//
-bool
-SetAntenna8185(
- struct net_device *dev,
- u8 u1bAntennaIndex
- )
+ /* Change Antenna Switch. */
+bool SetAntenna8185(struct net_device *dev, u8 u1bAntennaIndex)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
bool bAntennaSwitched = false;
-// printk("+SetAntenna8185(): Antenna is switching to: %d \n", u1bAntennaIndex);
-
- switch(u1bAntennaIndex)
- {
+ switch (u1bAntennaIndex) {
case 0:
/* Mac register, main antenna */
write_nic_byte(dev, ANTSEL, 0x03);
@@ -1319,64 +944,35 @@ SetAntenna8185(
}
if(bAntennaSwitched)
- {
priv->CurrAntennaIndex = u1bAntennaIndex;
- }
-
-// printk("-SetAntenna8185(): return (%#X)\n", bAntennaSwitched);
return bAntennaSwitched;
}
-//
-// Description:
-// Toggle Antenna switch.
-//
-bool
-SwitchAntenna(
- struct net_device *dev
- )
+ /* Toggle Antenna switch. */
+bool SwitchAntenna(struct net_device *dev)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
bool bResult;
- if(priv->CurrAntennaIndex == 0)
- {
- bResult = SetAntenna8185(dev, 1);
-//by amy 080312
-// printk("SwitchAntenna(): switching to antenna 1 ......\n");
-// bResult = SetAntenna8185(dev, 1);//-by amy 080312
- }
- else
- {
- bResult = SetAntenna8185(dev, 0);
-//by amy 080312
-// printk("SwitchAntenna(): switching to antenna 0 ......\n");
-// bResult = SetAntenna8185(dev, 0);//-by amy 080312
+ if (priv->CurrAntennaIndex == 0) {
+ bResult = SetAntenna8185(dev, 1);
+ } else {
+ bResult = SetAntenna8185(dev, 0);
}
return bResult;
}
-//
-// Description:
-// Engine of SW Antenna Diversity mechanism.
-// Since 8187 has no Tx part information,
-// this implementation is only dependend on Rx part information.
-//
-// 2006.04.17, by rcnjko.
-//
-void
-SwAntennaDiversity(
- struct net_device *dev
- )
+/*
+ * Engine of SW Antenna Diversity mechanism.
+ * Since 8187 has no Tx part information,
+ * this implementation is only dependend on Rx part information.
+ */
+void SwAntennaDiversity(struct net_device *dev)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
- bool bSwCheckSS=false;
-// printk("+SwAntennaDiversity(): CurrAntennaIndex: %d\n", priv->CurrAntennaIndex);
-// printk("AdTickCount is %d\n",priv->AdTickCount);
-//by amy 080312
- if(bSwCheckSS)
- {
+ bool bSwCheckSS = false;
+ if (bSwCheckSS) {
priv->AdTickCount++;
printk("(1) AdTickCount: %d, AdCheckPeriod: %d\n",
@@ -1384,246 +980,162 @@ SwAntennaDiversity(
printk("(2) AdRxSignalStrength: %ld, AdRxSsThreshold: %ld\n",
priv->AdRxSignalStrength, priv->AdRxSsThreshold);
}
-// priv->AdTickCount++;//-by amy 080312
-
- // Case 1. No Link.
- if(priv->ieee80211->state != IEEE80211_LINKED)
- {
- // printk("SwAntennaDiversity(): Case 1. No Link.\n");
+ /* Case 1. No Link. */
+ if (priv->ieee80211->state != IEEE80211_LINKED) {
priv->bAdSwitchedChecking = false;
- // I switch antenna here to prevent any one of antenna is broken before link established, 2006.04.18, by rcnjko..
+ /* I switch antenna here to prevent any one of antenna is broken before link established, 2006.04.18, by rcnjko.. */
SwitchAntenna(dev);
- }
- // Case 2. Linked but no packet received.
- else if(priv->AdRxOkCnt == 0)
- {
- // printk("SwAntennaDiversity(): Case 2. Linked but no packet received.\n");
+ /* Case 2. Linked but no packet receive.d */
+ } else if (priv->AdRxOkCnt == 0) {
priv->bAdSwitchedChecking = false;
SwitchAntenna(dev);
- }
- // Case 3. Evaluate last antenna switch action and undo it if necessary.
- else if(priv->bAdSwitchedChecking == true)
- {
- // printk("SwAntennaDiversity(): Case 3. Evaluate last antenna switch action.\n");
+ /* Case 3. Evaluate last antenna switch action and undo it if necessary. */
+ } else if (priv->bAdSwitchedChecking == true) {
priv->bAdSwitchedChecking = false;
- // Adjust Rx signal strength threshold.
+ /* Adjust Rx signal strength threshold. */
priv->AdRxSsThreshold = (priv->AdRxSignalStrength + priv->AdRxSsBeforeSwitched) / 2;
priv->AdRxSsThreshold = (priv->AdRxSsThreshold > priv->AdMaxRxSsThreshold) ?
priv->AdMaxRxSsThreshold: priv->AdRxSsThreshold;
- if(priv->AdRxSignalStrength < priv->AdRxSsBeforeSwitched)
- { // Rx signal strength is not improved after we swtiched antenna. => Swich back.
-// printk("SwAntennaDiversity(): Rx Signal Strength is not improved, CurrRxSs: %d, LastRxSs: %d\n",
-// priv->AdRxSignalStrength, priv->AdRxSsBeforeSwitched);
-//by amy 080312
- // Increase Antenna Diversity checking period due to bad decision.
+ if(priv->AdRxSignalStrength < priv->AdRxSsBeforeSwitched) {
+ /* Rx signal strength is not improved after we swtiched antenna. => Swich back. */
+ /* Increase Antenna Diversity checking period due to bad decision. */
priv->AdCheckPeriod *= 2;
-//by amy 080312
- // Increase Antenna Diversity checking period.
- if(priv->AdCheckPeriod > priv->AdMaxCheckPeriod)
+ /* Increase Antenna Diversity checking period. */
+ if (priv->AdCheckPeriod > priv->AdMaxCheckPeriod)
priv->AdCheckPeriod = priv->AdMaxCheckPeriod;
- // Wrong deceision => switch back.
+ /* Wrong deceision => switch back. */
SwitchAntenna(dev);
- }
- else
- { // Rx Signal Strength is improved.
-// printk("SwAntennaDiversity(): Rx Signal Strength is improved, CurrRxSs: %d, LastRxSs: %d\n",
-// priv->AdRxSignalStrength, priv->AdRxSsBeforeSwitched);
+ } else {
+ /* Rx Signal Strength is improved. */
- // Reset Antenna Diversity checking period to its min value.
+ /* Reset Antenna Diversity checking period to its min value. */
priv->AdCheckPeriod = priv->AdMinCheckPeriod;
}
-// printk("SwAntennaDiversity(): AdRxSsThreshold: %d, AdCheckPeriod: %d\n",
-// priv->AdRxSsThreshold, priv->AdCheckPeriod);
}
- // Case 4. Evaluate if we shall switch antenna now.
- // Cause Table Speed is very fast in TRC Dell Lab, we check it every time.
- else// if(priv->AdTickCount >= priv->AdCheckPeriod)//-by amy 080312
- {
-// printk("SwAntennaDiversity(): Case 4. Evaluate if we shall switch antenna now.\n");
-
+ /* Case 4. Evaluate if we shall switch antenna now. */
+ /* Cause Table Speed is very fast in TRC Dell Lab, we check it every time. */
+ else {
priv->AdTickCount = 0;
- //
- // <Roger_Notes> We evaluate RxOk counts for each antenna first and than
- // evaluate signal strength.
- // The following operation can overcome the disability of CCA on both two antennas
- // When signal strength was extremely low or high.
- // 2008.01.30.
- //
-
- //
- // Evaluate RxOk count from each antenna if we shall switch default antenna now.
- // Added by Roger, 2008.02.21.
-//{by amy 080312
- if((priv->AdMainAntennaRxOkCnt < priv->AdAuxAntennaRxOkCnt)
- && (priv->CurrAntennaIndex == 0))
- { // We set Main antenna as default but RxOk count was less than Aux ones.
-
- // printk("SwAntennaDiversity(): Main antenna RxOK is poor, AdMainAntennaRxOkCnt: %d, AdAuxAntennaRxOkCnt: %d\n",
- // priv->AdMainAntennaRxOkCnt, priv->AdAuxAntennaRxOkCnt);
-
- // Switch to Aux antenna.
+ /*
+ * <Roger_Notes> We evaluate RxOk counts for each antenna first and than
+ * evaluate signal strength.
+ * The following operation can overcome the disability of CCA on both two antennas
+ * When signal strength was extremely low or high.
+ * 2008.01.30.
+ */
+
+ /*
+ * Evaluate RxOk count from each antenna if we shall switch default antenna now.
+ */
+ if ((priv->AdMainAntennaRxOkCnt < priv->AdAuxAntennaRxOkCnt)
+ && (priv->CurrAntennaIndex == 0)) {
+ /* We set Main antenna as default but RxOk count was less than Aux ones. */
+
+ /* Switch to Aux antenna. */
SwitchAntenna(dev);
priv->bHWAdSwitched = true;
- }
- else if((priv->AdAuxAntennaRxOkCnt < priv->AdMainAntennaRxOkCnt)
- && (priv->CurrAntennaIndex == 1))
- { // We set Aux antenna as default but RxOk count was less than Main ones.
+ } else if ((priv->AdAuxAntennaRxOkCnt < priv->AdMainAntennaRxOkCnt)
+ && (priv->CurrAntennaIndex == 1)) {
+ /* We set Aux antenna as default but RxOk count was less than Main ones. */
- // printk("SwAntennaDiversity(): Aux antenna RxOK is poor, AdMainAntennaRxOkCnt: %d, AdAuxAntennaRxOkCnt: %d\n",
- // priv->AdMainAntennaRxOkCnt, priv->AdAuxAntennaRxOkCnt);
-
- // Switch to Main antenna.
+ /* Switch to Main antenna. */
SwitchAntenna(dev);
priv->bHWAdSwitched = true;
- }
- else
- {// Default antenna is better.
+ } else {
+ /* Default antenna is better. */
- // printk("SwAntennaDiversity(): Default antenna is better., AdMainAntennaRxOkCnt: %d, AdAuxAntennaRxOkCnt: %d\n",
- // priv->AdMainAntennaRxOkCnt, priv->AdAuxAntennaRxOkCnt);
-
- // Still need to check current signal strength.
+ /* Still need to check current signal strength. */
priv->bHWAdSwitched = false;
}
- //
- // <Roger_Notes> We evaluate Rx signal strength ONLY when default antenna
- // didn't changed by HW evaluation.
- // 2008.02.27.
- //
- // [TRC Dell Lab] SignalStrength is inaccuracy. Isaiah 2008-03-05
- // For example, Throughput of aux is better than main antenna(about 10M v.s 2M),
- // but AdRxSignalStrength is less than main.
- // Our guess is that main antenna have lower throughput and get many change
- // to receive more CCK packets(ex.Beacon) which have stronger SignalStrength.
- //
- if( (!priv->bHWAdSwitched) && (bSwCheckSS))
- {
-//by amy 080312}
- // Evaluate Rx signal strength if we shall switch antenna now.
- if(priv->AdRxSignalStrength < priv->AdRxSsThreshold)
- { // Rx signal strength is weak => Switch Antenna.
-// printk("SwAntennaDiversity(): Rx Signal Strength is weak, CurrRxSs: %d, RxSsThreshold: %d\n",
-// priv->AdRxSignalStrength, priv->AdRxSsThreshold);
-
- priv->AdRxSsBeforeSwitched = priv->AdRxSignalStrength;
- priv->bAdSwitchedChecking = true;
-
- SwitchAntenna(dev);
- }
- else
- { // Rx signal strength is OK.
-// printk("SwAntennaDiversity(): Rx Signal Strength is OK, CurrRxSs: %d, RxSsThreshold: %d\n",
-// priv->AdRxSignalStrength, priv->AdRxSsThreshold);
-
- priv->bAdSwitchedChecking = false;
- // Increase Rx signal strength threshold if necessary.
- if( (priv->AdRxSignalStrength > (priv->AdRxSsThreshold + 10)) && // Signal is much stronger than current threshold
- priv->AdRxSsThreshold <= priv->AdMaxRxSsThreshold) // Current threhold is not yet reach upper limit.
- {
- priv->AdRxSsThreshold = (priv->AdRxSsThreshold + priv->AdRxSignalStrength) / 2;
- priv->AdRxSsThreshold = (priv->AdRxSsThreshold > priv->AdMaxRxSsThreshold) ?
- priv->AdMaxRxSsThreshold: priv->AdRxSsThreshold;//+by amy 080312
- }
+ /*
+ * <Roger_Notes> We evaluate Rx signal strength ONLY when default antenna
+ * didn't changed by HW evaluation.
+ * 2008.02.27.
+ *
+ * [TRC Dell Lab] SignalStrength is inaccuracy. Isaiah 2008-03-05
+ * For example, Throughput of aux is better than main antenna(about 10M v.s 2M),
+ * but AdRxSignalStrength is less than main.
+ * Our guess is that main antenna have lower throughput and get many change
+ * to receive more CCK packets(ex.Beacon) which have stronger SignalStrength.
+ */
+ if ((!priv->bHWAdSwitched) && (bSwCheckSS)) {
+ /* Evaluate Rx signal strength if we shall switch antenna now. */
+ if (priv->AdRxSignalStrength < priv->AdRxSsThreshold) {
+ /* Rx signal strength is weak => Switch Antenna. */
+ priv->AdRxSsBeforeSwitched = priv->AdRxSignalStrength;
+ priv->bAdSwitchedChecking = true;
+
+ SwitchAntenna(dev);
+ } else {
+ /* Rx signal strength is OK. */
+ priv->bAdSwitchedChecking = false;
+ /* Increase Rx signal strength threshold if necessary. */
+ if ((priv->AdRxSignalStrength > (priv->AdRxSsThreshold + 10)) && /* Signal is much stronger than current threshold */
+ priv->AdRxSsThreshold <= priv->AdMaxRxSsThreshold) { /* Current threhold is not yet reach upper limit. */
+
+ priv->AdRxSsThreshold = (priv->AdRxSsThreshold + priv->AdRxSignalStrength) / 2;
+ priv->AdRxSsThreshold = (priv->AdRxSsThreshold > priv->AdMaxRxSsThreshold) ?
+ priv->AdMaxRxSsThreshold: priv->AdRxSsThreshold;/* +by amy 080312 */
+ }
- // Reduce Antenna Diversity checking period if possible.
- if( priv->AdCheckPeriod > priv->AdMinCheckPeriod )
- {
- priv->AdCheckPeriod /= 2;
+ /* Reduce Antenna Diversity checking period if possible. */
+ if (priv->AdCheckPeriod > priv->AdMinCheckPeriod)
+ priv->AdCheckPeriod /= 2;
}
}
- }
}
-//by amy 080312
- // Reset antenna diversity Rx related statistics.
+ /* Reset antenna diversity Rx related statistics. */
priv->AdRxOkCnt = 0;
priv->AdMainAntennaRxOkCnt = 0;
priv->AdAuxAntennaRxOkCnt = 0;
-//by amy 080312
-
-// priv->AdRxOkCnt = 0;//-by amy 080312
-
-// printk("-SwAntennaDiversity()\n");
}
-//
-// Description:
-// Return TRUE if we shall perform Tx Power Tracking Mecahnism, FALSE otherwise.
-//
-bool
-CheckTxPwrTracking( struct net_device *dev)
+ /* Return TRUE if we shall perform Tx Power Tracking Mecahnism, FALSE otherwise. */
+bool CheckTxPwrTracking(struct net_device *dev)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
- if(!priv->bTxPowerTrack)
- {
+ if (!priv->bTxPowerTrack)
return false;
- }
-//lzm reserved 080826
- //if(priv->bScanInProgress)
- //{
- // return false;
- //}
-
- //if 87SE is in High Power , don't do Tx Power Tracking. asked by SD3 ED. 2008-08-08 Isaiah
- if(priv->bToUpdateTxPwr)
- {
+ /* if 87SE is in High Power , don't do Tx Power Tracking. asked by SD3 ED. 2008-08-08 Isaiah */
+ if (priv->bToUpdateTxPwr)
return false;
- }
return true;
}
-//
-// Description:
-// Timer callback function of SW Antenna Diversity.
-//
-void
-SwAntennaDiversityTimerCallback(
- struct net_device *dev
- )
+ /* Timer callback function of SW Antenna Diversity. */
+void SwAntennaDiversityTimerCallback(struct net_device *dev)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
RT_RF_POWER_STATE rtState;
- //printk("+SwAntennaDiversityTimerCallback()\n");
-
- //
- // We do NOT need to switch antenna while RF is off.
- // 2007.05.09, added by Roger.
- //
+ /* We do NOT need to switch antenna while RF is off. */
rtState = priv->eRFPowerState;
- do{
- if (rtState == eRfOff)
- {
-// printk("SwAntennaDiversityTimer - RF is OFF.\n");
+ do {
+ if (rtState == eRfOff) {
break;
- }
- else if (rtState == eRfSleep)
- {
- // Don't access BB/RF under Disable PLL situation.
- //RT_TRACE((COMP_RF|COMP_ANTENNA), DBG_LOUD, ("SwAntennaDiversityTimerCallback(): RF is Sleep => skip it\n"));
+ } else if (rtState == eRfSleep) {
+ /* Don't access BB/RF under Disable PLL situation. */
break;
}
SwAntennaDiversity(dev);
- }while(false);
+ } while (false);
- if(priv->up)
- {
+ if (priv->up) {
priv->SwAntennaDiversityTimer.expires = jiffies + MSECS(ANTENNA_DIVERSITY_TIMER_PERIOD);
add_timer(&priv->SwAntennaDiversityTimer);
}
-
- //printk("-SwAntennaDiversityTimerCallback()\n");
}
diff --git a/drivers/staging/rtl8187se/r8180_wx.c b/drivers/staging/rtl8187se/r8180_wx.c
index 39ef7e0193fb..303ec691262a 100644
--- a/drivers/staging/rtl8187se/r8180_wx.c
+++ b/drivers/staging/rtl8187se/r8180_wx.c
@@ -23,24 +23,22 @@
#include "ieee80211/dot11d.h"
-/* #define RATE_COUNT 4 */
u32 rtl8180_rates[] = {1000000, 2000000, 5500000, 11000000,
6000000, 9000000, 12000000, 18000000, 24000000, 36000000, 48000000, 54000000};
#define RATE_COUNT ARRAY_SIZE(rtl8180_rates)
static CHANNEL_LIST DefaultChannelPlan[] = {
-/* {{1,2,3,4,5,6,7,8,9,10,11,12,13,14},14}, */ /*Default channel plan */
- {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 36, 40, 44, 48, 52, 56, 60, 64}, 19}, /*FCC */
- {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, 11}, /*IC */
- {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 36, 40, 44, 48, 52, 56, 60, 64}, 21}, /*ETSI */
- {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 36, 40, 44, 48, 52, 56, 60, 64}, 21}, /*Spain. Change to ETSI. */
- {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 36, 40, 44, 48, 52, 56, 60, 64}, 21}, /*France. Change to ETSI. */
- {{14, 36, 40, 44, 48, 52, 56, 60, 64}, 9}, /*MKK */
- {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 36, 40, 44, 48, 52, 56, 60, 64}, 22},/*MKK1 */
- {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 36, 40, 44, 48, 52, 56, 60, 64}, 21}, /*Israel. */
- {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 34, 38, 42, 46}, 17}, /*For 11a , TELEC */
- {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}, 14} /*For Global Domain. 1-11:active scan, 12-14 passive scan.*/ /* +YJ, 080626 */
+ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 36, 40, 44, 48, 52, 56, 60, 64}, 19}, /* FCC */
+ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, 11}, /* IC */
+ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 36, 40, 44, 48, 52, 56, 60, 64}, 21}, /* ETSI */
+ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 36, 40, 44, 48, 52, 56, 60, 64}, 21}, /* Spain. Change to ETSI. */
+ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 36, 40, 44, 48, 52, 56, 60, 64}, 21}, /* France. Change to ETSI. */
+ {{14, 36, 40, 44, 48, 52, 56, 60, 64}, 9}, /* MKK */
+ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 36, 40, 44, 48, 52, 56, 60, 64}, 22}, /* MKK1 */
+ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 36, 40, 44, 48, 52, 56, 60, 64}, 21}, /* Israel */
+ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 34, 38, 42, 46}, 17}, /* For 11a , TELEC */
+ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}, 14} /* For Global Domain. 1-11:active scan, 12-14 passive scan.*/ /* +YJ, 080626 */
};
static int r8180_wx_get_freq(struct net_device *dev,
struct iw_request_info *a,
@@ -63,14 +61,7 @@ int r8180_wx_set_key(struct net_device *dev, struct iw_request_info *info,
if (erq->flags & IW_ENCODE_DISABLED)
-/* i = erq->flags & IW_ENCODE_INDEX;
- if (i < 1 || i > 4)
-*/
-
if (erq->length > 0) {
-
- /*int len = erq->length <= 5 ? 5 : 13; */
-
u32* tkey = (u32*) key;
priv->key0[0] = tkey[0];
priv->key0[1] = tkey[1];
@@ -192,33 +183,32 @@ static int r8180_wx_set_mode(struct net_device *dev, struct iw_request_info *a,
return 0;
down(&priv->wx_sem);
-/* printk("set mode ENABLE_IPS\n"); */
if (priv->bInactivePs) {
if (wrqu->mode == IW_MODE_ADHOC)
IPSLeave(dev);
}
ret = ieee80211_wx_set_mode(priv->ieee80211, a, wrqu, b);
-/* rtl8180_commit(dev); */
-
up(&priv->wx_sem);
return ret;
}
/* YJ,add,080819,for hidden ap */
struct iw_range_with_scan_capa {
- /* Informative stuff (to choose between different interface) */
- __u32 throughput; /* To give an idea... */
+ /* Informative stuff (to choose between different interface) */
+
+ __u32 throughput; /* To give an idea... */
+
/* In theory this value should be the maximum benchmarked
- * TCP/IP throughput, because with most of these devices the
- * bit rate is meaningless (overhead an co) to estimate how
- * fast the connection will go and pick the fastest one.
- * I suggest people to play with Netperf or any benchmark...
- */
+ * TCP/IP throughput, because with most of these devices the
+ * bit rate is meaningless (overhead an co) to estimate how
+ * fast the connection will go and pick the fastest one.
+ * I suggest people to play with Netperf or any benchmark...
+ */
/* NWID (or domain id) */
- __u32 min_nwid; /* Minimal NWID we are able to set */
- __u32 max_nwid; /* Maximal NWID we are able to set */
+ __u32 min_nwid; /* Minimal NWID we are able to set */
+ __u32 max_nwid; /* Maximal NWID we are able to set */
/* Old Frequency (backward compat - moved lower ) */
__u16 old_num_channels;
@@ -238,7 +228,6 @@ static int rtl8180_wx_get_range(struct net_device *dev,
struct r8180_priv *priv = ieee80211_priv(dev);
u16 val;
int i;
- /*struct iw_range_with_scan_capa* tmp = (struct iw_range_with_scan_capa*)range; */ /*YJ,add,080819,for hidden ap */
wrqu->data.length = sizeof(*range);
memset(range, 0, sizeof(*range));
@@ -291,14 +280,6 @@ static int rtl8180_wx_get_range(struct net_device *dev,
range->we_version_compiled = WIRELESS_EXT;
range->we_version_source = 16;
-/* range->retry_capa; */ /* What retry options are supported */
-/* range->retry_flags; */ /* How to decode max/min retry limit */
-/* range->r_time_flags;*/ /* How to decode max/min retry life */
-/* range->min_retry; */ /* Minimal number of retries */
-/* range->max_retry; */ /* Maximal number of retries */
-/* range->min_r_time; */ /* Minimal retry lifetime */
-/* range->max_r_time; */ /* Maximal retry lifetime */
-
range->num_channels = 14;
for (i = 0, val = 0; i < 14; i++) {
@@ -310,8 +291,8 @@ static int rtl8180_wx_get_range(struct net_device *dev,
range->freq[val].e = 1;
val++;
} else {
- /* FIXME: do we need to set anything for channels */
- /* we don't use ? */
+ /* FIXME: do we need to set anything for channels */
+ /* we don't use ? */
}
if (val == IW_MAX_FREQUENCIES)
@@ -322,8 +303,6 @@ static int rtl8180_wx_get_range(struct net_device *dev,
range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
- /*tmp->scan_capa = 0x01; */ /*YJ,add,080819,for hidden ap */
-
return 0;
}
@@ -339,50 +318,29 @@ static int r8180_wx_set_scan(struct net_device *dev, struct iw_request_info *a,
if (priv->ieee80211->bHwRadioOff)
return 0;
-/*YJ,add,080819, for hidden ap */
- /*printk("==*&*&*&==>%s in\n", __func__); */
- /*printk("=*&*&*&*===>flag:%x, %x\n", wrqu->data.flags, IW_SCAN_THIS_ESSID); */
if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
struct iw_scan_req* req = (struct iw_scan_req*)b;
if (req->essid_len) {
- /*printk("==**&*&*&**===>scan set ssid:%s\n", req->essid); */
ieee->current_network.ssid_len = req->essid_len;
memcpy(ieee->current_network.ssid, req->essid, req->essid_len);
- /*printk("=====>network ssid:%s\n", ieee->current_network.ssid); */
}
}
-/*YJ,add,080819, for hidden ap, end */
down(&priv->wx_sem);
if (priv->up) {
-/* printk("set scan ENABLE_IPS\n"); */
priv->ieee80211->actscanning = true;
if (priv->bInactivePs && (priv->ieee80211->state != IEEE80211_LINKED)) {
IPSLeave(dev);
- /*down(&priv->ieee80211->wx_sem); */
-/*
- if (priv->ieee80211->iw_mode == IW_MODE_MONITOR || !(priv->ieee80211->proto_started)){
- ret = -1;
- up(&priv->ieee80211->wx_sem);
- up(&priv->wx_sem);
- return ret;
- }
-*/
- /* queue_work(priv->ieee80211->wq, &priv->ieee80211->wx_sync_scan_wq); */
- /* printk("start scan============================>\n"); */
ieee80211_softmac_ips_scan_syncro(priv->ieee80211);
-/* ieee80211_rtl_start_scan(priv->ieee80211); */
- /* intentionally forget to up sem */
-/* up(&priv->ieee80211->wx_sem); */
ret = 0;
} else {
- /* YJ,add,080828, prevent scan in BusyTraffic */
+ /* prevent scan in BusyTraffic */
/* FIXME: Need to consider last scan time */
if ((priv->link_detect.bBusyTraffic) && (true)) {
ret = 0;
printk("Now traffic is busy, please try later!\n");
} else
- /* YJ,add,080828, prevent scan in BusyTraffic,end */
+ /* prevent scan in BusyTraffic,end */
ret = ieee80211_wx_set_scan(priv->ieee80211, a, wrqu, b);
}
} else
@@ -424,10 +382,8 @@ static int r8180_wx_set_essid(struct net_device *dev,
return 0;
down(&priv->wx_sem);
- /* printk("set essid ENABLE_IPS\n"); */
if (priv->bInactivePs)
IPSLeave(dev);
-/* printk("haha:set essid %s essid_len = %d essid_flgs = %d\n",b, wrqu->essid.length, wrqu->essid.flags); */
ret = ieee80211_wx_set_essid(priv->ieee80211, a, wrqu, b);
@@ -597,28 +553,6 @@ static int r8180_wx_set_scan_type(struct net_device *dev, struct iw_request_info
return 1;
}
-
-/* added by christian */
-/*
-static int r8180_wx_set_monitor_type(struct net_device *dev, struct iw_request_info *aa, union
- iwreq_data *wrqu, char *p){
-
- struct r8180_priv *priv = ieee80211_priv(dev);
- int *parms=(int*)p;
- int mode=parms[0];
-
- if(priv->ieee80211->iw_mode != IW_MODE_MONITOR) return -1;
- priv->prism_hdr = mode;
- if(!mode)dev->type=ARPHRD_IEEE80211;
- else dev->type=ARPHRD_IEEE80211_PRISM;
- DMESG("using %s RX encap", mode ? "AVS":"80211");
- return 0;
-
-}
-*/
-/*of r8180_wx_set_monitor_type */
-/* end added christian */
-
static int r8180_wx_set_retry(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
@@ -661,14 +595,6 @@ static int r8180_wx_set_retry(struct net_device *dev,
*/
rtl8180_commit(dev);
- /*
- if(priv->up){
- rtl8180_rtx_disable(dev);
- rtl8180_rx_enable(dev);
- rtl8180_tx_enable(dev);
-
- }
- */
exit:
up(&priv->wx_sem);
@@ -695,8 +621,6 @@ static int r8180_wx_get_retry(struct net_device *dev,
wrqu->retry.flags = IW_RETRY_LIMIT & IW_RETRY_MIN;
wrqu->retry.value = priv->retry_data;
}
- /* DMESG("returning %d",wrqu->retry.value); */
-
return 0;
}
@@ -726,7 +650,6 @@ static int r8180_wx_set_sens(struct net_device *dev,
return 0;
down(&priv->wx_sem);
- /* DMESG("attempt to set sensivity to %ddb",wrqu->sens.value); */
if (priv->rf_set_sens == NULL) {
err = -1; /* we have not this support for this radio */
goto exit;
@@ -847,58 +770,6 @@ static int dummy(struct net_device *dev, struct iw_request_info *a,
return -1;
}
-/*
-static int r8180_wx_get_psmode(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct r8180_priv *priv = ieee80211_priv(dev);
- struct ieee80211_device *ieee;
- int ret = 0;
-
-
-
- down(&priv->wx_sem);
-
- if(priv) {
- ieee = priv->ieee80211;
- if(ieee->ps == IEEE80211_PS_DISABLED) {
- *((unsigned int *)extra) = IEEE80211_PS_DISABLED;
- goto exit;
- }
- *((unsigned int *)extra) = IW_POWER_TIMEOUT;
- if (ieee->ps & IEEE80211_PS_MBCAST)
- *((unsigned int *)extra) |= IW_POWER_ALL_R;
- else
- *((unsigned int *)extra) |= IW_POWER_UNICAST_R;
- } else
- ret = -1;
-exit:
- up(&priv->wx_sem);
-
- return ret;
-}
-static int r8180_wx_set_psmode(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct r8180_priv *priv = ieee80211_priv(dev);
- //struct ieee80211_device *ieee;
- int ret = 0;
-
-
-
- down(&priv->wx_sem);
-
- ret = ieee80211_wx_set_power(priv->ieee80211, info, wrqu, extra);
-
- up(&priv->wx_sem);
-
- return ret;
-
-}
-*/
-
static int r8180_wx_get_iwmode(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
@@ -964,7 +835,6 @@ static int r8180_wx_set_iwmode(struct net_device *dev,
} else {
ieee->mode = mode;
ieee->modulation = modulation;
-/* ieee80211_start_protocol(ieee); */
}
up(&priv->wx_sem);
@@ -1016,7 +886,6 @@ static int r8180_wx_get_siglevel(struct net_device *dev,
union iwreq_data *wrqu, char *extra)
{
struct r8180_priv *priv = ieee80211_priv(dev);
- /* struct ieee80211_network *network = &(priv->ieee80211->current_network); */
int ret = 0;
@@ -1036,7 +905,6 @@ static int r8180_wx_get_sigqual(struct net_device *dev,
union iwreq_data *wrqu, char *extra)
{
struct r8180_priv *priv = ieee80211_priv(dev);
- /* struct ieee80211_network *network = &(priv->ieee80211->current_network); */
int ret = 0;
@@ -1150,7 +1018,6 @@ static int r8180_wx_set_channelplan(struct net_device *dev,
union iwreq_data *wrqu, char *extra)
{
struct r8180_priv *priv = ieee80211_priv(dev);
- /* struct ieee80211_device *ieee = netdev_priv(dev); */
int *val = (int *)extra;
int i;
printk("-----in fun %s\n", __func__);
@@ -1223,7 +1090,6 @@ static int r8180_wx_set_enc_ext(struct net_device *dev,
{
struct r8180_priv *priv = ieee80211_priv(dev);
- /* printk("===>%s()\n", __func__); */
int ret = 0;
@@ -1240,7 +1106,6 @@ static int r8180_wx_set_auth(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
- /* printk("====>%s()\n", __func__); */
struct r8180_priv *priv = ieee80211_priv(dev);
int ret = 0;
@@ -1257,8 +1122,6 @@ static int r8180_wx_set_mlme(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
- /* printk("====>%s()\n", __func__); */
-
int ret = 0;
struct r8180_priv *priv = ieee80211_priv(dev);
@@ -1278,7 +1141,6 @@ static int r8180_wx_set_gen_ie(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
-/* printk("====>%s(), len:%d\n", __func__, data->length); */
int ret = 0;
struct r8180_priv *priv = ieee80211_priv(dev);
@@ -1291,68 +1153,67 @@ static int r8180_wx_set_gen_ie(struct net_device *dev,
ret = ieee80211_wx_set_gen_ie(priv->ieee80211, extra, wrqu->data.length);
#endif
up(&priv->wx_sem);
- /* printk("<======%s(), ret:%d\n", __func__, ret); */
return ret;
}
static iw_handler r8180_wx_handlers[] = {
- NULL, /* SIOCSIWCOMMIT */
+ NULL, /* SIOCSIWCOMMIT */
r8180_wx_get_name, /* SIOCGIWNAME */
- dummy, /* SIOCSIWNWID */
- dummy, /* SIOCGIWNWID */
+ dummy, /* SIOCSIWNWID */
+ dummy, /* SIOCGIWNWID */
r8180_wx_set_freq, /* SIOCSIWFREQ */
r8180_wx_get_freq, /* SIOCGIWFREQ */
r8180_wx_set_mode, /* SIOCSIWMODE */
r8180_wx_get_mode, /* SIOCGIWMODE */
r8180_wx_set_sens, /* SIOCSIWSENS */
r8180_wx_get_sens, /* SIOCGIWSENS */
- NULL, /* SIOCSIWRANGE */
- rtl8180_wx_get_range, /* SIOCGIWRANGE */
- NULL, /* SIOCSIWPRIV */
- NULL, /* SIOCGIWPRIV */
- NULL, /* SIOCSIWSTATS */
- NULL, /* SIOCGIWSTATS */
- dummy, /* SIOCSIWSPY */
- dummy, /* SIOCGIWSPY */
- NULL, /* SIOCGIWTHRSPY */
- NULL, /* SIOCWIWTHRSPY */
+ NULL, /* SIOCSIWRANGE */
+ rtl8180_wx_get_range, /* SIOCGIWRANGE */
+ NULL, /* SIOCSIWPRIV */
+ NULL, /* SIOCGIWPRIV */
+ NULL, /* SIOCSIWSTATS */
+ NULL, /* SIOCGIWSTATS */
+ dummy, /* SIOCSIWSPY */
+ dummy, /* SIOCGIWSPY */
+ NULL, /* SIOCGIWTHRSPY */
+ NULL, /* SIOCWIWTHRSPY */
r8180_wx_set_wap, /* SIOCSIWAP */
r8180_wx_get_wap, /* SIOCGIWAP */
r8180_wx_set_mlme, /* SIOCSIWMLME*/
- dummy, /* SIOCGIWAPLIST -- depricated */
+ dummy, /* SIOCGIWAPLIST -- depricated */
r8180_wx_set_scan, /* SIOCSIWSCAN */
r8180_wx_get_scan, /* SIOCGIWSCAN */
r8180_wx_set_essid, /* SIOCSIWESSID */
r8180_wx_get_essid, /* SIOCGIWESSID */
- dummy, /* SIOCSIWNICKN */
- dummy, /* SIOCGIWNICKN */
- NULL, /* -- hole -- */
- NULL, /* -- hole -- */
+ dummy, /* SIOCSIWNICKN */
+ dummy, /* SIOCGIWNICKN */
+ NULL, /* -- hole -- */
+ NULL, /* -- hole -- */
r8180_wx_set_rate, /* SIOCSIWRATE */
r8180_wx_get_rate, /* SIOCGIWRATE */
r8180_wx_set_rts, /* SIOCSIWRTS */
r8180_wx_get_rts, /* SIOCGIWRTS */
r8180_wx_set_frag, /* SIOCSIWFRAG */
r8180_wx_get_frag, /* SIOCGIWFRAG */
- dummy, /* SIOCSIWTXPOW */
- dummy, /* SIOCGIWTXPOW */
+ dummy, /* SIOCSIWTXPOW */
+ dummy, /* SIOCGIWTXPOW */
r8180_wx_set_retry, /* SIOCSIWRETRY */
r8180_wx_get_retry, /* SIOCGIWRETRY */
r8180_wx_set_enc, /* SIOCSIWENCODE */
r8180_wx_get_enc, /* SIOCGIWENCODE */
r8180_wx_set_power, /* SIOCSIWPOWER */
r8180_wx_get_power, /* SIOCGIWPOWER */
- NULL, /*---hole---*/
- NULL, /*---hole---*/
- r8180_wx_set_gen_ie, /* SIOCSIWGENIE */
- NULL, /* SIOCSIWGENIE */
+ NULL, /*---hole---*/
+ NULL, /*---hole---*/
+ r8180_wx_set_gen_ie, /* SIOCSIWGENIE */
+ NULL, /* SIOCSIWGENIE */
r8180_wx_set_auth, /* SIOCSIWAUTH */
- NULL, /* SIOCSIWAUTH */
- r8180_wx_set_enc_ext, /* SIOCSIWENCODEEXT */
- NULL, /* SIOCSIWENCODEEXT */
- NULL, /* SIOCSIWPMKSA */
- NULL, /*---hole---*/
+ NULL, /* SIOCSIWAUTH */
+ r8180_wx_set_enc_ext, /* SIOCSIWENCODEEXT */
+ NULL, /* SIOCSIWENCODEEXT */
+ NULL, /* SIOCSIWPMKSA */
+ NULL, /*---hole---*/
};
@@ -1373,14 +1234,6 @@ static const struct iw_priv_args r8180_private_args[] = {
0, 0, "dummy"
},
- /* added by christian */
- /*
- {
- SIOCIWFIRSTPRIV + 0x2,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "prismhdr"
- },
- */
- /* end added by christian */
{
SIOCIWFIRSTPRIV + 0x4,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "activescan"
@@ -1399,18 +1252,6 @@ static const struct iw_priv_args r8180_private_args[] = {
0, 0, "dummy"
},
-/*
- {
- SIOCIWFIRSTPRIV + 0x5,
- 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getpsmode"
- },
- {
- SIOCIWFIRSTPRIV + 0x6,
- IW_PRIV_SIZE_FIXED, 0, "setpsmode"
- },
-*/
-/* set/get mode have been realized in public handlers */
-
{
SIOCIWFIRSTPRIV + 0x8,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "setiwmode"
@@ -1481,7 +1322,7 @@ static const struct iw_priv_args r8180_private_args[] = {
static iw_handler r8180_private_handler[] = {
- r8180_wx_set_crcmon, /*SIOCIWSECONDPRIV*/
+ r8180_wx_set_crcmon, /*SIOCIWSECONDPRIV*/
dummy,
r8180_wx_set_beaconinterval,
dummy,
@@ -1513,16 +1354,15 @@ static inline int is_same_network(struct ieee80211_network *src,
struct ieee80211_network *dst,
struct ieee80211_device *ieee)
{
- /* A network is only a duplicate if the channel, BSSID, ESSID
- * and the capability field (in particular IBSS and BSS) all match.
- * We treat all <hidden> with the same BSSID and channel
- * as one network */
- return (((src->ssid_len == dst->ssid_len) || (ieee->iw_mode == IW_MODE_INFRA)) && /* YJ,mod, 080819,for hidden ap */
- /* ((src->ssid_len == dst->ssid_len) && */
+ /* A network is only a duplicate if the channel, BSSID, ESSID
+ * and the capability field (in particular IBSS and BSS) all match.
+ * We treat all <hidden> with the same BSSID and channel
+ * as one network
+ */
+ return (((src->ssid_len == dst->ssid_len) || (ieee->iw_mode == IW_MODE_INFRA)) && /* YJ,mod, 080819,for hidden ap */
(src->channel == dst->channel) &&
!memcmp(src->bssid, dst->bssid, ETH_ALEN) &&
(!memcmp(src->ssid, dst->ssid, src->ssid_len) || (ieee->iw_mode == IW_MODE_INFRA)) && /* YJ,mod, 080819,for hidden ap */
- /*!memcmp(src->ssid, dst->ssid, src->ssid_len) && */
((src->capability & WLAN_CAPABILITY_IBSS) ==
(dst->capability & WLAN_CAPABILITY_IBSS)) &&
((src->capability & WLAN_CAPABILITY_BSS) ==
@@ -1535,11 +1375,9 @@ static struct iw_statistics *r8180_get_wireless_stats(struct net_device *dev)
struct r8180_priv *priv = ieee80211_priv(dev);
struct ieee80211_device* ieee = priv->ieee80211;
struct iw_statistics* wstats = &priv->wstats;
- /* struct ieee80211_network* target = NULL; */
int tmp_level = 0;
int tmp_qual = 0;
int tmp_noise = 0;
- /* unsigned long flag; */
if (ieee->state < IEEE80211_LINKED) {
wstats->qual.qual = 0;
@@ -1552,9 +1390,7 @@ static struct iw_statistics *r8180_get_wireless_stats(struct net_device *dev)
tmp_level = (&ieee->current_network)->stats.signal;
tmp_qual = (&ieee->current_network)->stats.signalstrength;
tmp_noise = (&ieee->current_network)->stats.noise;
- /* printk("level:%d, qual:%d, noise:%d\n", tmp_level, tmp_qual, tmp_noise); */
-/* printk("level:%d\n", tmp_level); */
wstats->qual.level = tmp_level;
wstats->qual.qual = tmp_qual;
wstats->qual.noise = tmp_noise;
diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c
index 6c5061f12bad..13979b5ea32a 100644
--- a/drivers/staging/rtl8192e/rtllib_rx.c
+++ b/drivers/staging/rtl8192e/rtllib_rx.c
@@ -2453,7 +2453,7 @@ static inline void update_network(struct rtllib_network *dst,
if (src->wmm_param[0].ac_aci_acm_aifsn ||
src->wmm_param[1].ac_aci_acm_aifsn ||
src->wmm_param[2].ac_aci_acm_aifsn ||
- src->wmm_param[1].ac_aci_acm_aifsn)
+ src->wmm_param[3].ac_aci_acm_aifsn)
memcpy(dst->wmm_param, src->wmm_param, WME_AC_PRAM_LEN);
dst->SignalStrength = src->SignalStrength;
diff --git a/drivers/staging/rtl8192e/rtllib_softmac.c b/drivers/staging/rtl8192e/rtllib_softmac.c
index 1637f1110991..c5a15dba1bf5 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac.c
@@ -2234,7 +2234,6 @@ inline int rtllib_rx_assoc_resp(struct rtllib_device *ieee, struct sk_buff *skb,
if (!network)
return 1;
- memset(network, 0, sizeof(*network));
ieee->state = RTLLIB_LINKED;
ieee->assoc_id = aid;
ieee->softmac_stats.rx_ass_ok++;
@@ -2259,8 +2258,8 @@ inline int rtllib_rx_assoc_resp(struct rtllib_device *ieee, struct sk_buff *skb,
ieee->handle_assoc_response(ieee->dev,
(struct rtllib_assoc_response_frame *)header,
network);
- kfree(network);
}
+ kfree(network);
kfree(ieee->assocresp_ies);
ieee->assocresp_ies = NULL;
diff --git a/drivers/staging/rtl8192u/ieee80211/cipher.c b/drivers/staging/rtl8192u/ieee80211/cipher.c
index 69dcc3176ebc..d47345c4adcf 100644
--- a/drivers/staging/rtl8192u/ieee80211/cipher.c
+++ b/drivers/staging/rtl8192u/ieee80211/cipher.c
@@ -71,8 +71,8 @@ static int crypt(struct crypto_tfm *tfm,
u8 *src_p, *dst_p;
int in_place;
- scatterwalk_map(&walk_in, 0);
- scatterwalk_map(&walk_out, 1);
+ scatterwalk_map(&walk_in);
+ scatterwalk_map(&walk_out);
src_p = scatterwalk_whichbuf(&walk_in, bsize, tmp_src);
dst_p = scatterwalk_whichbuf(&walk_out, bsize, tmp_dst);
in_place = scatterwalk_samebuf(&walk_in, &walk_out,
@@ -84,10 +84,10 @@ static int crypt(struct crypto_tfm *tfm,
prfn(tfm, dst_p, src_p, crfn, enc, info, in_place);
- scatterwalk_done(&walk_in, 0, nbytes);
+ scatterwalk_done(&walk_in, nbytes);
scatterwalk_copychunks(dst_p, &walk_out, bsize, 1);
- scatterwalk_done(&walk_out, 1, nbytes);
+ scatterwalk_done(&walk_out, nbytes);
if (!nbytes)
return 0;
diff --git a/drivers/staging/rtl8192u/ieee80211/digest.c b/drivers/staging/rtl8192u/ieee80211/digest.c
index 301ed514ac9e..05e7497fd106 100644
--- a/drivers/staging/rtl8192u/ieee80211/digest.c
+++ b/drivers/staging/rtl8192u/ieee80211/digest.c
@@ -39,12 +39,12 @@ static void update(struct crypto_tfm *tfm,
unsigned int bytes_from_page = min(l, ((unsigned int)
(PAGE_SIZE)) -
offset);
- char *p = crypto_kmap(pg, 0) + offset;
+ char *p = kmap_atomic(pg) + offset;
tfm->__crt_alg->cra_digest.dia_update
(crypto_tfm_ctx(tfm), p,
bytes_from_page);
- crypto_kunmap(p, 0);
+ kunmap_atomic(p);
crypto_yield(tfm);
offset = 0;
pg++;
@@ -75,10 +75,10 @@ static void digest(struct crypto_tfm *tfm,
tfm->crt_digest.dit_init(tfm);
for (i = 0; i < nsg; i++) {
- char *p = crypto_kmap(sg[i].page, 0) + sg[i].offset;
+ char *p = kmap_atomic(sg[i].page) + sg[i].offset;
tfm->__crt_alg->cra_digest.dia_update(crypto_tfm_ctx(tfm),
p, sg[i].length);
- crypto_kunmap(p, 0);
+ kunmap_atomic(p);
crypto_yield(tfm);
}
crypto_digest_final(tfm, out);
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
index c9bdc7f6bdce..be2a28cf8edd 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
@@ -237,7 +237,7 @@ ieee80211_rx_frame_mgmt(struct ieee80211_device *ieee, struct sk_buff *skb,
#ifdef NOT_YET
if (ieee->iw_mode == IW_MODE_MASTER) {
- printk(KERN_DEBUG "%s: Master mode not yet suppported.\n",
+ printk(KERN_DEBUG "%s: Master mode not yet supported.\n",
ieee->dev->name);
return 0;
/*
diff --git a/drivers/staging/rtl8192u/ieee80211/internal.h b/drivers/staging/rtl8192u/ieee80211/internal.h
index a7c096eb269f..bebe13ac53b7 100644
--- a/drivers/staging/rtl8192u/ieee80211/internal.h
+++ b/drivers/staging/rtl8192u/ieee80211/internal.h
@@ -23,23 +23,6 @@
#include <asm/kmap_types.h>
-extern enum km_type crypto_km_types[];
-
-static inline enum km_type crypto_kmap_type(int out)
-{
- return crypto_km_types[(in_softirq() ? 2 : 0) + out];
-}
-
-static inline void *crypto_kmap(struct page *page, int out)
-{
- return kmap_atomic(page, crypto_kmap_type(out));
-}
-
-static inline void crypto_kunmap(void *vaddr, int out)
-{
- kunmap_atomic(vaddr, crypto_kmap_type(out));
-}
-
static inline void crypto_yield(struct crypto_tfm *tfm)
{
if (!in_softirq())
diff --git a/drivers/staging/rtl8192u/ieee80211/kmap_types.h b/drivers/staging/rtl8192u/ieee80211/kmap_types.h
deleted file mode 100644
index de67bb01b5f5..000000000000
--- a/drivers/staging/rtl8192u/ieee80211/kmap_types.h
+++ /dev/null
@@ -1,20 +0,0 @@
-#ifndef __KMAP_TYPES_H
-
-#define __KMAP_TYPES_H
-
-
-enum km_type {
- KM_BOUNCE_READ,
- KM_SKB_SUNRPC_DATA,
- KM_SKB_DATA_SOFTIRQ,
- KM_USER0,
- KM_USER1,
- KM_BH_IRQ,
- KM_SOFTIRQ0,
- KM_SOFTIRQ1,
- KM_TYPE_NR
-};
-
-#define _ASM_KMAP_TYPES_H
-
-#endif
diff --git a/drivers/staging/rtl8192u/ieee80211/scatterwalk.c b/drivers/staging/rtl8192u/ieee80211/scatterwalk.c
index 3543a6145046..8b73f6cefcf9 100644
--- a/drivers/staging/rtl8192u/ieee80211/scatterwalk.c
+++ b/drivers/staging/rtl8192u/ieee80211/scatterwalk.c
@@ -13,8 +13,6 @@
* any later version.
*
*/
-#include "kmap_types.h"
-
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
@@ -23,13 +21,6 @@
#include "internal.h"
#include "scatterwalk.h"
-enum km_type crypto_km_types[] = {
- KM_USER0,
- KM_USER1,
- KM_SOFTIRQ0,
- KM_SOFTIRQ1,
-};
-
void *scatterwalk_whichbuf(struct scatter_walk *walk, unsigned int nbytes, void *scratch)
{
if (nbytes <= walk->len_this_page &&
@@ -62,9 +53,9 @@ void scatterwalk_start(struct scatter_walk *walk, struct scatterlist *sg)
walk->offset = sg->offset;
}
-void scatterwalk_map(struct scatter_walk *walk, int out)
+void scatterwalk_map(struct scatter_walk *walk)
{
- walk->data = crypto_kmap(walk->page, out) + walk->offset;
+ walk->data = kmap_atomic(walk->page) + walk->offset;
}
static void scatterwalk_pagedone(struct scatter_walk *walk, int out,
@@ -103,7 +94,7 @@ void scatterwalk_done(struct scatter_walk *walk, int out, int more)
* has been verified as multiple of the block size.
*/
int scatterwalk_copychunks(void *buf, struct scatter_walk *walk,
- size_t nbytes, int out)
+ size_t nbytes)
{
if (buf != walk->data) {
while (nbytes > walk->len_this_page) {
@@ -111,9 +102,9 @@ int scatterwalk_copychunks(void *buf, struct scatter_walk *walk,
buf += walk->len_this_page;
nbytes -= walk->len_this_page;
- crypto_kunmap(walk->data, out);
+ kunmap_atomic(walk->data);
scatterwalk_pagedone(walk, out, 1);
- scatterwalk_map(walk, out);
+ scatterwalk_map(walk);
}
memcpy_dir(buf, walk->data, nbytes, out);
diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
index c09be0a66467..9c00865f302a 100644
--- a/drivers/staging/rtl8192u/r8192U_core.c
+++ b/drivers/staging/rtl8192u/r8192U_core.c
@@ -105,7 +105,6 @@ u32 rt_global_debug_component = \
static const struct usb_device_id rtl8192_usb_id_tbl[] = {
/* Realtek */
- {USB_DEVICE(0x0bda, 0x8192)},
{USB_DEVICE(0x0bda, 0x8709)},
/* Corega */
{USB_DEVICE(0x07aa, 0x0043)},
diff --git a/drivers/staging/rtl8712/Kconfig b/drivers/staging/rtl8712/Kconfig
index ea37473f71e5..6a43312380e0 100644
--- a/drivers/staging/rtl8712/Kconfig
+++ b/drivers/staging/rtl8712/Kconfig
@@ -9,13 +9,6 @@ config R8712U
This option adds the Realtek RTL8712 USB device such as the D-Link DWA-130.
If built as a module, it will be called r8712u.
-config R8712_AP
- bool "Realtek RTL8712U AP code"
- depends on R8712U
- default N
- ---help---
- This option allows the Realtek RTL8712 USB device to be an Access Point.
-
config R8712_TX_AGGR
bool "Realtek RTL8712U Transmit Aggregation code"
depends on R8712U && BROKEN
diff --git a/drivers/staging/rtl8712/drv_types.h b/drivers/staging/rtl8712/drv_types.h
index ed85b4415207..e83665d06020 100644
--- a/drivers/staging/rtl8712/drv_types.h
+++ b/drivers/staging/rtl8712/drv_types.h
@@ -140,7 +140,6 @@ struct dvobj_priv {
u8 ishighspeed;
uint(*inirp_init)(struct _adapter *adapter);
uint(*inirp_deinit)(struct _adapter *adapter);
- struct semaphore usb_suspend_sema;
struct usb_device *pusbdev;
};
diff --git a/drivers/staging/rtl8712/os_intfs.c b/drivers/staging/rtl8712/os_intfs.c
index 98a3d684f9b2..7bbd53a410e3 100644
--- a/drivers/staging/rtl8712/os_intfs.c
+++ b/drivers/staging/rtl8712/os_intfs.c
@@ -330,7 +330,6 @@ u8 r8712_init_drv_sw(struct _adapter *padapter)
padapter->stapriv.padapter = padapter;
r8712_init_bcmc_stainfo(padapter);
r8712_init_pwrctrl_priv(padapter);
- sema_init(&(padapter->pwrctrlpriv.pnp_pwr_mgnt_sema), 0);
mp871xinit(padapter);
if (init_default_value(padapter) != _SUCCESS)
return _FAIL;
@@ -476,11 +475,6 @@ static int netdev_close(struct net_device *pnetdev)
r8712_free_assoc_resources(padapter);
/*s2-4.*/
r8712_free_network_queue(padapter);
- /* The interface is no longer Up: */
- padapter->bup = false;
- release_firmware(padapter->fw);
- /* never exit with a firmware callback pending */
- wait_for_completion(&padapter->rtl8712_fw_ready);
return 0;
}
diff --git a/drivers/staging/rtl8712/osdep_service.h b/drivers/staging/rtl8712/osdep_service.h
index 1ee943a58c4c..9ba603310fdc 100644
--- a/drivers/staging/rtl8712/osdep_service.h
+++ b/drivers/staging/rtl8712/osdep_service.h
@@ -72,18 +72,6 @@ static inline struct list_head *get_list_head(struct __queue *queue)
#define LIST_CONTAINOR(ptr, type, member) \
((type *)((char *)(ptr)-(SIZE_T)(&((type *)0)->member)))
-static inline void _enter_hwio_critical(struct semaphore *prwlock,
- unsigned long *pirqL)
-{
- down(prwlock);
-}
-
-static inline void _exit_hwio_critical(struct semaphore *prwlock,
- unsigned long *pirqL)
-{
- up(prwlock);
-}
-
static inline void list_delete(struct list_head *plist)
{
list_del_init(plist);
@@ -152,11 +140,6 @@ static inline u32 _down_sema(struct semaphore *sema)
return _SUCCESS;
}
-static inline void _rtl_rwlock_init(struct semaphore *prwlock)
-{
- sema_init(prwlock, 1);
-}
-
static inline void _init_listhead(struct list_head *list)
{
INIT_LIST_HEAD(list);
diff --git a/drivers/staging/rtl8712/rtl8712_recv.c b/drivers/staging/rtl8712/rtl8712_recv.c
index 6d692657e784..fa6dc9c09b3f 100644
--- a/drivers/staging/rtl8712/rtl8712_recv.c
+++ b/drivers/staging/rtl8712/rtl8712_recv.c
@@ -55,8 +55,6 @@ int r8712_init_recv_priv(struct recv_priv *precvpriv, struct _adapter *padapter)
int alignment = 0;
struct sk_buff *pskb = NULL;
- sema_init(&precvpriv->recv_sema, 0);
- sema_init(&precvpriv->terminate_recvthread_sema, 0);
/*init recv_buf*/
_init_queue(&precvpriv->free_recv_buf_queue);
precvpriv->pallocated_recv_buf = _malloc(NR_RECVBUFF *
diff --git a/drivers/staging/rtl8712/rtl871x_io.c b/drivers/staging/rtl8712/rtl871x_io.c
index ca84ee02eacc..abc1c97378f7 100644
--- a/drivers/staging/rtl8712/rtl871x_io.c
+++ b/drivers/staging/rtl8712/rtl871x_io.c
@@ -131,7 +131,6 @@ uint r8712_alloc_io_queue(struct _adapter *adapter)
pio_req = (struct io_req *)(pio_queue->free_ioreqs_buf);
for (i = 0; i < NUM_IOREQ; i++) {
_init_listhead(&pio_req->list);
- sema_init(&pio_req->sema, 0);
list_insert_tail(&pio_req->list, &pio_queue->free_ioreqs);
pio_req++;
}
diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
index 86308a0093ed..d3d8727c2ec5 100644
--- a/drivers/staging/rtl8712/rtl871x_io.h
+++ b/drivers/staging/rtl8712/rtl871x_io.h
@@ -117,7 +117,6 @@ struct io_req {
u32 command;
u32 status;
u8 *pbuf;
- struct semaphore sema;
void (*_async_io_callback)(struct _adapter *padater,
struct io_req *pio_req, u8 *cnxt);
u8 *cnxt;
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
index 507584b837c3..ef35bc29a3fa 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
+++ b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
@@ -2380,13 +2380,7 @@ static struct iw_statistics *r871x_get_wireless_stats(struct net_device *dev)
tmp_qual = padapter->recvpriv.signal;
tmp_noise = padapter->recvpriv.noise;
piwstats->qual.level = tmp_level;
- /*piwstats->qual.qual = tmp_qual;
- * The NetworkManager of Fedora 10, 13 will use the link
- * quality for its display.
- * So, use the fw_rssi on link quality variable because
- * fw_rssi will be updated per 2 seconds.
- */
- piwstats->qual.qual = tmp_level;
+ piwstats->qual.qual = tmp_qual;
piwstats->qual.noise = tmp_noise;
}
piwstats->qual.updated = IW_QUAL_ALL_UPDATED;
diff --git a/drivers/staging/rtl8712/rtl871x_pwrctrl.c b/drivers/staging/rtl8712/rtl871x_pwrctrl.c
index 23e72a0401a8..9fd2ec7596cc 100644
--- a/drivers/staging/rtl8712/rtl871x_pwrctrl.c
+++ b/drivers/staging/rtl8712/rtl871x_pwrctrl.c
@@ -100,7 +100,6 @@ void r8712_cpwm_int_hdl(struct _adapter *padapter,
{
struct pwrctrl_priv *pwrpriv = &(padapter->pwrctrlpriv);
struct cmd_priv *pcmdpriv = &(padapter->cmdpriv);
- struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
if (pwrpriv->cpwm_tog == ((preportpwrstate->state) & 0x80))
return;
@@ -110,8 +109,6 @@ void r8712_cpwm_int_hdl(struct _adapter *padapter,
if (pwrpriv->cpwm >= PS_STATE_S2) {
if (pwrpriv->alives & CMD_ALIVE)
up(&(pcmdpriv->cmd_queue_sema));
- if (pwrpriv->alives & XMIT_ALIVE)
- up(&(pxmitpriv->xmit_sema));
}
pwrpriv->cpwm_tog = (preportpwrstate->state) & 0x80;
up(&pwrpriv->lock);
@@ -145,12 +142,12 @@ static void SetPSModeWorkItemCallback(struct work_struct *work)
struct pwrctrl_priv, SetPSModeWorkItem);
struct _adapter *padapter = container_of(pwrpriv,
struct _adapter, pwrctrlpriv);
- _enter_pwrlock(&pwrpriv->lock);
if (!pwrpriv->bSleep) {
+ _enter_pwrlock(&pwrpriv->lock);
if (pwrpriv->pwr_mode == PS_MODE_ACTIVE)
r8712_set_rpwm(padapter, PS_STATE_S4);
+ up(&pwrpriv->lock);
}
- up(&pwrpriv->lock);
}
static void rpwm_workitem_callback(struct work_struct *work)
@@ -160,13 +157,13 @@ static void rpwm_workitem_callback(struct work_struct *work)
struct _adapter *padapter = container_of(pwrpriv,
struct _adapter, pwrctrlpriv);
u8 cpwm = pwrpriv->cpwm;
- _enter_pwrlock(&pwrpriv->lock);
if (pwrpriv->cpwm != pwrpriv->rpwm) {
+ _enter_pwrlock(&pwrpriv->lock);
cpwm = r8712_read8(padapter, SDIO_HCPWM);
pwrpriv->rpwm_retry = 1;
r8712_set_rpwm(padapter, pwrpriv->rpwm);
+ up(&pwrpriv->lock);
}
- up(&pwrpriv->lock);
}
static void rpwm_check_handler (void *FunctionContext)
diff --git a/drivers/staging/rtl8712/rtl871x_pwrctrl.h b/drivers/staging/rtl8712/rtl871x_pwrctrl.h
index b41ca2892be5..6024c4f63d5b 100644
--- a/drivers/staging/rtl8712/rtl871x_pwrctrl.h
+++ b/drivers/staging/rtl8712/rtl871x_pwrctrl.h
@@ -133,7 +133,6 @@ struct pwrctrl_priv {
u8 rpwm_retry;
uint bSetPSModeWorkItemInProgress;
- struct semaphore pnp_pwr_mgnt_sema;
spinlock_t pnp_pwr_mgnt_lock;
s32 pnp_current_pwr_state;
u8 pnp_bstop_trx;
diff --git a/drivers/staging/rtl8712/rtl871x_recv.c b/drivers/staging/rtl8712/rtl871x_recv.c
index 7069f06d9b5d..5b03b405883e 100644
--- a/drivers/staging/rtl8712/rtl871x_recv.c
+++ b/drivers/staging/rtl8712/rtl871x_recv.c
@@ -93,7 +93,6 @@ sint _r8712_init_recv_priv(struct recv_priv *precvpriv,
precvframe++;
}
precvpriv->rx_pending_cnt = 1;
- sema_init(&precvpriv->allrxreturnevt, 0);
return r8712_init_recv_priv(precvpriv, padapter);
}
diff --git a/drivers/staging/rtl8712/rtl871x_recv.h b/drivers/staging/rtl8712/rtl871x_recv.h
index cc7a72fee1c2..e42e6f0a15e6 100644
--- a/drivers/staging/rtl8712/rtl871x_recv.h
+++ b/drivers/staging/rtl8712/rtl871x_recv.h
@@ -85,8 +85,6 @@ using enter_critical section to protect
*/
struct recv_priv {
spinlock_t lock;
- struct semaphore recv_sema;
- struct semaphore terminate_recvthread_sema;
struct __queue free_recv_queue;
struct __queue recv_pending_queue;
u8 *pallocated_frame_buf;
@@ -100,7 +98,6 @@ struct recv_priv {
uint rx_largepacket_crcerr;
uint rx_smallpacket_crcerr;
uint rx_middlepacket_crcerr;
- struct semaphore allrxreturnevt;
u8 rx_pending_cnt;
uint ff_hwaddr;
struct tasklet_struct recv_tasklet;
diff --git a/drivers/staging/rtl8712/rtl871x_sta_mgt.c b/drivers/staging/rtl8712/rtl871x_sta_mgt.c
index 81bde803c59f..1247b3d9719d 100644
--- a/drivers/staging/rtl8712/rtl871x_sta_mgt.c
+++ b/drivers/staging/rtl8712/rtl871x_sta_mgt.c
@@ -42,10 +42,8 @@ static void _init_stainfo(struct sta_info *psta)
_init_listhead(&psta->hash_list);
_r8712_init_sta_xmit_priv(&psta->sta_xmitpriv);
_r8712_init_sta_recv_priv(&psta->sta_recvpriv);
-#ifdef CONFIG_R8712_AP
_init_listhead(&psta->asoc_list);
_init_listhead(&psta->auth_list);
-#endif
}
u32 _r8712_init_sta_priv(struct sta_priv *pstapriv)
@@ -72,10 +70,8 @@ u32 _r8712_init_sta_priv(struct sta_priv *pstapriv)
get_list_head(&pstapriv->free_sta_queue));
psta++;
}
-#ifdef CONFIG_R8712_AP
_init_listhead(&pstapriv->asoc_list);
_init_listhead(&pstapriv->auth_list);
-#endif
return _SUCCESS;
}
diff --git a/drivers/staging/rtl8712/rtl871x_xmit.c b/drivers/staging/rtl8712/rtl871x_xmit.c
index 8bbdee70f867..aa57e7754f04 100644
--- a/drivers/staging/rtl8712/rtl871x_xmit.c
+++ b/drivers/staging/rtl8712/rtl871x_xmit.c
@@ -71,8 +71,6 @@ sint _r8712_init_xmit_priv(struct xmit_priv *pxmitpriv,
memset((unsigned char *)pxmitpriv, 0, sizeof(struct xmit_priv));
spin_lock_init(&pxmitpriv->lock);
- sema_init(&pxmitpriv->xmit_sema, 0);
- sema_init(&pxmitpriv->terminate_xmitthread_sema, 0);
/*
Please insert all the queue initializaiton using _init_queue below
*/
@@ -121,7 +119,6 @@ sint _r8712_init_xmit_priv(struct xmit_priv *pxmitpriv,
_r8712_init_hw_txqueue(&pxmitpriv->bmc_txqueue, BMC_QUEUE_INX);
pxmitpriv->frag_len = MAX_FRAG_THRESHOLD;
pxmitpriv->txirp_cnt = 1;
- sema_init(&(pxmitpriv->tx_retevt), 0);
/*per AC pending irp*/
pxmitpriv->beq_cnt = 0;
pxmitpriv->bkq_cnt = 0;
diff --git a/drivers/staging/rtl8712/rtl871x_xmit.h b/drivers/staging/rtl8712/rtl871x_xmit.h
index a034c0fec718..638b79b4c5a8 100644
--- a/drivers/staging/rtl8712/rtl871x_xmit.h
+++ b/drivers/staging/rtl8712/rtl871x_xmit.h
@@ -202,8 +202,6 @@ struct hw_txqueue {
struct xmit_priv {
spinlock_t lock;
- struct semaphore xmit_sema;
- struct semaphore terminate_xmitthread_sema;
struct __queue be_pending;
struct __queue bk_pending;
struct __queue vi_pending;
@@ -233,7 +231,6 @@ struct xmit_priv {
uint tx_drop;
struct hw_xmit *hwxmits;
u8 hwxmit_entry;
- struct semaphore tx_retevt;/*all tx return event;*/
u8 txirp_cnt;
struct tasklet_struct xmit_tasklet;
_workitem xmit_pipe4_reset_wi;
diff --git a/drivers/staging/rtl8712/sta_info.h b/drivers/staging/rtl8712/sta_info.h
index 48d6a14c8f5f..f8016e9abffd 100644
--- a/drivers/staging/rtl8712/sta_info.h
+++ b/drivers/staging/rtl8712/sta_info.h
@@ -90,7 +90,6 @@ struct sta_info {
* curr_network(mlme_priv/security_priv/qos/ht) : AP CAP/INFO
* sta_info: (AP & STA) CAP/INFO
*/
-#ifdef CONFIG_R8712_AP
struct list_head asoc_list;
struct list_head auth_list;
unsigned int expire_to;
@@ -98,7 +97,6 @@ struct sta_info {
unsigned int authalg;
unsigned char chg_txt[128];
unsigned int tx_ra_bitmap;
-#endif
};
struct sta_priv {
@@ -111,13 +109,11 @@ struct sta_priv {
struct __queue sleep_q;
struct __queue wakeup_q;
struct _adapter *padapter;
-#ifdef CONFIG_R8712_AP
struct list_head asoc_list;
struct list_head auth_list;
unsigned int auth_to; /* sec, time to expire in authenticating. */
unsigned int assoc_to; /* sec, time to expire before associating. */
unsigned int expire_to; /* sec , time to expire after associated. */
-#endif
};
static inline u32 wifi_mac_hash(u8 *mac)
diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
index 9bade184883b..e419b4fd82b9 100644
--- a/drivers/staging/rtl8712/usb_intf.c
+++ b/drivers/staging/rtl8712/usb_intf.c
@@ -30,6 +30,7 @@
#include <linux/usb.h>
#include <linux/module.h>
+#include <linux/firmware.h>
#include "osdep_service.h"
#include "drv_types.h"
@@ -105,10 +106,10 @@ static struct usb_device_id rtl871x_usb_id_tbl[] = {
/* RTL8191SU */
/* Realtek */
{USB_DEVICE(0x0BDA, 0x8172)},
+ {USB_DEVICE(0x0BDA, 0x8192)},
/* Amigo */
{USB_DEVICE(0x0EB0, 0x9061)},
/* ASUS/EKB */
- {USB_DEVICE(0x0BDA, 0x8172)},
{USB_DEVICE(0x13D3, 0x3323)},
{USB_DEVICE(0x13D3, 0x3311)}, /* 11n mode disable */
{USB_DEVICE(0x13D3, 0x3342)},
@@ -160,7 +161,6 @@ static struct usb_device_id rtl871x_usb_id_tbl[] = {
/* RTL8192SU */
/* Realtek */
{USB_DEVICE(0x0BDA, 0x8174)},
- {USB_DEVICE(0x0BDA, 0x8174)},
/* Belkin */
{USB_DEVICE(0x050D, 0x845A)},
/* Corega */
@@ -281,7 +281,6 @@ static uint r8712_usb_dvobj_init(struct _adapter *padapter)
}
if ((r8712_alloc_io_queue(padapter)) == _FAIL)
status = _FAIL;
- sema_init(&(padapter->dvobjpriv.usb_suspend_sema), 0);
return status;
}
@@ -623,6 +622,10 @@ static void r871xu_dev_remove(struct usb_interface *pusb_intf)
usb_set_intfdata(pusb_intf, NULL);
if (padapter) {
+ if (padapter->fw_found)
+ release_firmware(padapter->fw);
+ /* never exit with a firmware callback pending */
+ wait_for_completion(&padapter->rtl8712_fw_ready);
if (drvpriv.drv_registered == true)
padapter->bSurpriseRemoved = true;
if (pnetdev != NULL) {
diff --git a/drivers/staging/rts5139/TODO b/drivers/staging/rts5139/TODO
index 4bde726ea5fa..dd5fabb8ea70 100644
--- a/drivers/staging/rts5139/TODO
+++ b/drivers/staging/rts5139/TODO
@@ -2,4 +2,8 @@ TODO:
- support more USB card reader of Realtek family
- use kernel coding style
- checkpatch.pl fixes
-
+- stop having thousands of lines of code duplicated with staging/rts_pstor
+- This driver contains an entire SD/MMC stack -- it should use the stack in
+ drivers/mmc instead, as a host driver e.g. drivers/mmc/host/realtek-usb.c;
+ see drivers/mmc/host/ushc.c as an example.
+- This driver presents cards as SCSI devices, but they should be MMC devices.
diff --git a/drivers/staging/rts5139/ms.h b/drivers/staging/rts5139/ms.h
index f9d46d210f23..3ce1dc90f19d 100644
--- a/drivers/staging/rts5139/ms.h
+++ b/drivers/staging/rts5139/ms.h
@@ -249,9 +249,9 @@ int ms_delay_write(struct rts51x_chip *chip);
#ifdef SUPPORT_MAGIC_GATE
int ms_switch_clock(struct rts51x_chip *chip);
-int ms_write_bytes(struct rts51x_chip *chip, u8 tpc, u8 cnt, u8 cfg, u8 * data,
+int ms_write_bytes(struct rts51x_chip *chip, u8 tpc, u8 cnt, u8 cfg, u8 *data,
int data_len);
-int ms_read_bytes(struct rts51x_chip *chip, u8 tpc, u8 cnt, u8 cfg, u8 * data,
+int ms_read_bytes(struct rts51x_chip *chip, u8 tpc, u8 cnt, u8 cfg, u8 *data,
int data_len);
int ms_set_rw_reg_addr(struct rts51x_chip *chip, u8 read_start, u8 read_cnt,
u8 write_start, u8 write_cnt);
diff --git a/drivers/staging/rts5139/rts51x_chip.c b/drivers/staging/rts5139/rts51x_chip.c
index adc0d0005735..b3e0bb22b0ff 100644
--- a/drivers/staging/rts5139/rts51x_chip.c
+++ b/drivers/staging/rts5139/rts51x_chip.c
@@ -541,7 +541,7 @@ int rts51x_get_rsp(struct rts51x_chip *chip, int rsp_len, int timeout)
return STATUS_SUCCESS;
}
-int rts51x_get_card_status(struct rts51x_chip *chip, u16 * status)
+int rts51x_get_card_status(struct rts51x_chip *chip, u16 *status)
{
int retval;
u16 val;
@@ -577,7 +577,7 @@ int rts51x_write_register(struct rts51x_chip *chip, u16 addr, u8 mask, u8 data)
return STATUS_SUCCESS;
}
-int rts51x_read_register(struct rts51x_chip *chip, u16 addr, u8 * data)
+int rts51x_read_register(struct rts51x_chip *chip, u16 addr, u8 *data)
{
int retval;
@@ -620,7 +620,7 @@ int rts51x_ep0_write_register(struct rts51x_chip *chip, u16 addr, u8 mask,
return STATUS_SUCCESS;
}
-int rts51x_ep0_read_register(struct rts51x_chip *chip, u16 addr, u8 * data)
+int rts51x_ep0_read_register(struct rts51x_chip *chip, u16 addr, u8 *data)
{
int retval;
u16 value = 0;
@@ -720,7 +720,7 @@ int rts51x_seq_read_register(struct rts51x_chip *chip, u16 addr, u16 len,
return STATUS_SUCCESS;
}
-int rts51x_read_ppbuf(struct rts51x_chip *chip, u8 * buf, int buf_len)
+int rts51x_read_ppbuf(struct rts51x_chip *chip, u8 *buf, int buf_len)
{
int retval;
@@ -735,7 +735,7 @@ int rts51x_read_ppbuf(struct rts51x_chip *chip, u8 * buf, int buf_len)
return STATUS_SUCCESS;
}
-int rts51x_write_ppbuf(struct rts51x_chip *chip, u8 * buf, int buf_len)
+int rts51x_write_ppbuf(struct rts51x_chip *chip, u8 *buf, int buf_len)
{
int retval;
@@ -776,7 +776,7 @@ int rts51x_write_phy_register(struct rts51x_chip *chip, u8 addr, u8 val)
return STATUS_SUCCESS;
}
-int rts51x_read_phy_register(struct rts51x_chip *chip, u8 addr, u8 * val)
+int rts51x_read_phy_register(struct rts51x_chip *chip, u8 addr, u8 *val)
{
int retval;
@@ -921,7 +921,7 @@ void rts51x_trace_msg(struct rts51x_chip *chip, unsigned char *buf, int clear)
}
#endif
-void rts51x_pp_status(struct rts51x_chip *chip, unsigned int lun, u8 * status,
+void rts51x_pp_status(struct rts51x_chip *chip, unsigned int lun, u8 *status,
u8 status_len)
{
struct sd_info *sd_card = &(chip->sd_card);
diff --git a/drivers/staging/rts5139/rts51x_chip.h b/drivers/staging/rts5139/rts51x_chip.h
index 321ece750ede..13fc2a410d90 100644
--- a/drivers/staging/rts5139/rts51x_chip.h
+++ b/drivers/staging/rts5139/rts51x_chip.h
@@ -857,12 +857,12 @@ static inline u8 *rts51x_get_rsp_data(struct rts51x_chip *chip)
return chip->rsp_buf;
}
-int rts51x_get_card_status(struct rts51x_chip *chip, u16 * status);
+int rts51x_get_card_status(struct rts51x_chip *chip, u16 *status);
int rts51x_write_register(struct rts51x_chip *chip, u16 addr, u8 mask, u8 data);
-int rts51x_read_register(struct rts51x_chip *chip, u16 addr, u8 * data);
+int rts51x_read_register(struct rts51x_chip *chip, u16 addr, u8 *data);
int rts51x_ep0_write_register(struct rts51x_chip *chip, u16 addr, u8 mask,
u8 data);
-int rts51x_ep0_read_register(struct rts51x_chip *chip, u16 addr, u8 * data);
+int rts51x_ep0_read_register(struct rts51x_chip *chip, u16 addr, u8 *data);
int rts51x_seq_write_register(struct rts51x_chip *chip, u16 addr, u16 len,
u8 *data);
int rts51x_seq_read_register(struct rts51x_chip *chip, u16 addr, u16 len,
diff --git a/drivers/staging/rts5139/rts51x_fop.h b/drivers/staging/rts5139/rts51x_fop.h
index 0453f57d1a84..94d75f08d255 100644
--- a/drivers/staging/rts5139/rts51x_fop.h
+++ b/drivers/staging/rts5139/rts51x_fop.h
@@ -48,7 +48,7 @@ int rts51x_open(struct inode *inode, struct file *filp);
int rts51x_release(struct inode *inode, struct file *filp);
ssize_t rts51x_read(struct file *filp, char __user *buf, size_t count,
loff_t *f_pos);
-ssize_t rts51x_write(struct file *filp, const char __user * buf, size_t count,
+ssize_t rts51x_write(struct file *filp, const char __user *buf, size_t count,
loff_t *f_pos);
#if 0 /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36) */
int rts51x_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
diff --git a/drivers/staging/rts5139/rts51x_transport.c b/drivers/staging/rts5139/rts51x_transport.c
index e11467acc57b..da9c83b49426 100644
--- a/drivers/staging/rts5139/rts51x_transport.c
+++ b/drivers/staging/rts5139/rts51x_transport.c
@@ -883,7 +883,7 @@ int rts51x_transfer_data_partial(struct rts51x_chip *chip, unsigned int pipe,
return result;
}
-int rts51x_get_epc_status(struct rts51x_chip *chip, u16 * status)
+int rts51x_get_epc_status(struct rts51x_chip *chip, u16 *status)
{
unsigned int pipe = RCV_INTR_PIPE(chip);
struct usb_host_endpoint *ep;
diff --git a/drivers/staging/rts5139/rts51x_transport.h b/drivers/staging/rts5139/rts51x_transport.h
index 8464c4836d5b..9dd556ea9c08 100644
--- a/drivers/staging/rts5139/rts51x_transport.h
+++ b/drivers/staging/rts5139/rts51x_transport.h
@@ -73,7 +73,7 @@ int rts51x_start_epc_transfer(struct rts51x_chip *chip);
void rts51x_cancel_epc_transfer(struct rts51x_chip *chip);
#endif
-int rts51x_get_epc_status(struct rts51x_chip *chip, u16 * status);
+int rts51x_get_epc_status(struct rts51x_chip *chip, u16 *status);
void rts51x_invoke_transport(struct scsi_cmnd *srb, struct rts51x_chip *chip);
#endif /* __RTS51X_TRANSPORT_H */
diff --git a/drivers/staging/rts5139/sd_cprm.c b/drivers/staging/rts5139/sd_cprm.c
index 407cd43ad3b1..d5969d992d84 100644
--- a/drivers/staging/rts5139/sd_cprm.c
+++ b/drivers/staging/rts5139/sd_cprm.c
@@ -233,7 +233,7 @@ RTY_SEND_CMD:
return STATUS_SUCCESS;
}
-int ext_sd_get_rsp(struct rts51x_chip *chip, int len, u8 * rsp, u8 rsp_type)
+int ext_sd_get_rsp(struct rts51x_chip *chip, int len, u8 *rsp, u8 rsp_type)
{
int retval, rsp_len;
u16 reg_addr;
diff --git a/drivers/staging/rts_pstor/TODO b/drivers/staging/rts_pstor/TODO
index 2f93a7c1b5ad..becb95e4f2cd 100644
--- a/drivers/staging/rts_pstor/TODO
+++ b/drivers/staging/rts_pstor/TODO
@@ -2,4 +2,8 @@ TODO:
- support more pcie card reader of Realtek family
- use kernel coding style
- checkpatch.pl fixes
-
+- stop having thousands of lines of code duplicated with staging/rts5139
+- This driver contains an entire SD/MMC stack -- it should use the stack in
+ drivers/mmc instead, as a host driver e.g. drivers/mmc/host/realtek-pci.c;
+ see drivers/mmc/host/via-sdmmc.c as an example.
+- This driver presents cards as SCSI devices, but they should be MMC devices.
diff --git a/drivers/staging/sbe-2t3e3/intr.c b/drivers/staging/sbe-2t3e3/intr.c
index 7ad1a8382037..1336aab11bdd 100644
--- a/drivers/staging/sbe-2t3e3/intr.c
+++ b/drivers/staging/sbe-2t3e3/intr.c
@@ -188,7 +188,7 @@ void dc_intr_rx(struct channel *sc)
}
if (sc->s.LOS) {
- error_mask &= ~(SBE_2T3E3_RX_DESC_DRIBBLING_BIT ||
+ error_mask &= ~(SBE_2T3E3_RX_DESC_DRIBBLING_BIT |
SBE_2T3E3_RX_DESC_MII_ERROR);
}
diff --git a/drivers/staging/sep/Kconfig b/drivers/staging/sep/Kconfig
index 92bf16667d04..185b676d858a 100644
--- a/drivers/staging/sep/Kconfig
+++ b/drivers/staging/sep/Kconfig
@@ -3,7 +3,8 @@ config DX_SEP
depends on PCI
help
Discretix SEP driver; used for the security processor subsystem
- on bard the Intel Mobile Internet Device.
+ on board the Intel Mobile Internet Device and adds SEP availability
+ to the kernel crypto infrastructure
The driver's name is sep_driver.
diff --git a/drivers/staging/sep/Makefile b/drivers/staging/sep/Makefile
index 628d5f919414..e48a7959289e 100644
--- a/drivers/staging/sep/Makefile
+++ b/drivers/staging/sep/Makefile
@@ -1,2 +1,3 @@
-obj-$(CONFIG_DX_SEP) := sep_driver.o
-
+ccflags-y += -I$(srctree)/$(src)
+obj-$(CONFIG_DX_SEP) += sep_driver.o
+sep_driver-objs := sep_crypto.o sep_main.o
diff --git a/drivers/staging/sep/TODO b/drivers/staging/sep/TODO
index 8f3b878ad8ae..3524d0cf84ba 100644
--- a/drivers/staging/sep/TODO
+++ b/drivers/staging/sep/TODO
@@ -1,4 +1,3 @@
Todo's so far (from Alan Cox)
-- Check whether it can be plugged into any of the kernel crypto API
- interfaces - Crypto API 'glue' is still not ready to submit
-- Clean up un-needed debug prints - Started to work on this
+- Clean up unused ioctls
+- Clean up unused fields in ioctl structures
diff --git a/drivers/staging/sep/sep_crypto.c b/drivers/staging/sep/sep_crypto.c
new file mode 100644
index 000000000000..1cc790e9fa07
--- /dev/null
+++ b/drivers/staging/sep/sep_crypto.c
@@ -0,0 +1,4058 @@
+/*
+ *
+ * sep_crypto.c - Crypto interface structures
+ *
+ * Copyright(c) 2009-2011 Intel Corporation. All rights reserved.
+ * Contributions(c) 2009-2010 Discretix. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * CONTACTS:
+ *
+ * Mark Allyn mark.a.allyn@intel.com
+ * Jayant Mangalampalli jayant.mangalampalli@intel.com
+ *
+ * CHANGES:
+ *
+ * 2009.06.26 Initial publish
+ * 2010.09.14 Upgrade to Medfield
+ * 2011.02.22 Enable Kernel Crypto
+ *
+ */
+
+/* #define DEBUG */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/kdev_t.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/poll.h>
+#include <linux/wait.h>
+#include <linux/pci.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+#include <linux/err.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/list.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include <linux/workqueue.h>
+#include <linux/crypto.h>
+#include <crypto/internal/hash.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/sha.h>
+#include <crypto/md5.h>
+#include <crypto/aes.h>
+#include <crypto/des.h>
+#include <crypto/hash.h>
+#include "sep_driver_hw_defs.h"
+#include "sep_driver_config.h"
+#include "sep_driver_api.h"
+#include "sep_dev.h"
+#include "sep_crypto.h"
+
+#if defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE)
+
+/* Globals for queuing */
+static spinlock_t queue_lock;
+static struct crypto_queue sep_queue;
+
+/* Declare of dequeuer */
+static void sep_dequeuer(void *data);
+
+/* TESTING */
+/**
+ * crypto_sep_dump_message - dump the message that is pending
+ * @sep: SEP device
+ * This will only print dump if DEBUG is set; it does
+ * follow kernel debug print enabling
+ */
+static void crypto_sep_dump_message(struct sep_device *sep, void *msg)
+{
+#if 0
+ u32 *p;
+ u32 *i;
+ int count;
+
+ p = sep->shared_addr;
+ i = (u32 *)msg;
+ for (count = 0; count < 10 * 4; count += 4)
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] Word %d of the message is %x (local)%x\n",
+ current->pid, count/4, *p++, *i++);
+#endif
+}
+
+/**
+ * sep_do_callback
+ * @work: pointer to work_struct
+ * This is what is called by the queue; it is generic so that it
+ * can be used by any type of operation as each different callback
+ * function can use the data parameter in its own way
+ */
+static void sep_do_callback(struct work_struct *work)
+{
+ struct sep_work_struct *sep_work = container_of(work,
+ struct sep_work_struct, work);
+ if (sep_work != NULL) {
+ (sep_work->callback)(sep_work->data);
+ kfree(sep_work);
+ } else {
+ pr_debug("sep crypto: do callback - NULL container\n");
+ }
+}
+
+/**
+ * sep_submit_work
+ * @work_queue: pointer to struct_workqueue
+ * @funct: pointer to function to execute
+ * @data: pointer to data; function will know
+ * how to use it
+ * This is a generic API to submit something to
+ * the queue. The callback function will depend
+ * on what operation is to be done
+ */
+static int sep_submit_work(struct workqueue_struct *work_queue,
+ void(*funct)(void *),
+ void *data)
+{
+ struct sep_work_struct *sep_work;
+ int result;
+
+ sep_work = kmalloc(sizeof(struct sep_work_struct), GFP_ATOMIC);
+
+ if (sep_work == NULL) {
+ pr_debug("sep crypto: cant allocate work structure\n");
+ return -ENOMEM;
+ }
+
+ sep_work->callback = funct;
+ sep_work->data = data;
+ INIT_WORK(&sep_work->work, sep_do_callback);
+ result = queue_work(work_queue, &sep_work->work);
+ if (!result) {
+ pr_debug("sep_crypto: queue_work failed\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * sep_alloc_sg_buf -
+ * @sep: pointer to struct sep_device
+ * @size: total size of area
+ * @block_size: minimum size of chunks
+ * each page is minimum or modulo this size
+ * @returns: pointer to struct scatterlist for new
+ * buffer
+ **/
+static struct scatterlist *sep_alloc_sg_buf(
+ struct sep_device *sep,
+ size_t size,
+ size_t block_size)
+{
+ u32 nbr_pages;
+ u32 ct1;
+ void *buf;
+ size_t current_size;
+ size_t real_page_size;
+
+ struct scatterlist *sg, *sg_temp;
+
+ if (size == 0)
+ return NULL;
+
+ dev_dbg(&sep->pdev->dev, "sep alloc sg buf\n");
+
+ current_size = 0;
+ nbr_pages = 0;
+ real_page_size = PAGE_SIZE - (PAGE_SIZE % block_size);
+ /**
+ * The size of each page must be modulo of the operation
+ * block size; increment by the modified page size until
+ * the total size is reached, then you have the number of
+ * pages
+ */
+ while (current_size < size) {
+ current_size += real_page_size;
+ nbr_pages += 1;
+ }
+
+ sg = kmalloc((sizeof(struct scatterlist) * nbr_pages), GFP_ATOMIC);
+ if (!sg) {
+ dev_warn(&sep->pdev->dev, "Cannot allocate page for new sg\n");
+ return NULL;
+ }
+
+ sg_init_table(sg, nbr_pages);
+
+ current_size = 0;
+ sg_temp = sg;
+ for (ct1 = 0; ct1 < nbr_pages; ct1 += 1) {
+ buf = (void *)get_zeroed_page(GFP_ATOMIC);
+ if (!buf) {
+ dev_warn(&sep->pdev->dev,
+ "Cannot allocate page for new buffer\n");
+ kfree(sg);
+ return NULL;
+ }
+
+ sg_set_buf(sg_temp, buf, real_page_size);
+ if ((size - current_size) > real_page_size) {
+ sg_temp->length = real_page_size;
+ current_size += real_page_size;
+ } else {
+ sg_temp->length = (size - current_size);
+ current_size = size;
+ }
+ sg_temp = sg_next(sg);
+ }
+ return sg;
+}
+
+/**
+ * sep_free_sg_buf -
+ * @sg: pointer to struct scatterlist; points to area to free
+ */
+static void sep_free_sg_buf(struct scatterlist *sg)
+{
+ struct scatterlist *sg_temp = sg;
+ while (sg_temp) {
+ free_page((unsigned long)sg_virt(sg_temp));
+ sg_temp = sg_next(sg_temp);
+ }
+ kfree(sg);
+}
+
+/**
+ * sep_copy_sg -
+ * @sep: pointer to struct sep_device
+ * @sg_src: pointer to struct scatterlist for source
+ * @sg_dst: pointer to struct scatterlist for destination
+ * @size: size (in bytes) of data to copy
+ *
+ * Copy data from one scatterlist to another; both must
+ * be the same size
+ */
+static void sep_copy_sg(
+ struct sep_device *sep,
+ struct scatterlist *sg_src,
+ struct scatterlist *sg_dst,
+ size_t size)
+{
+ u32 seg_size;
+ u32 in_offset, out_offset;
+
+ u32 count = 0;
+ struct scatterlist *sg_src_tmp = sg_src;
+ struct scatterlist *sg_dst_tmp = sg_dst;
+ in_offset = 0;
+ out_offset = 0;
+
+ dev_dbg(&sep->pdev->dev, "sep copy sg\n");
+
+ if ((sg_src == NULL) || (sg_dst == NULL) || (size == 0))
+ return;
+
+ dev_dbg(&sep->pdev->dev, "sep copy sg not null\n");
+
+ while (count < size) {
+ if ((sg_src_tmp->length - in_offset) >
+ (sg_dst_tmp->length - out_offset))
+ seg_size = sg_dst_tmp->length - out_offset;
+ else
+ seg_size = sg_src_tmp->length - in_offset;
+
+ if (seg_size > (size - count))
+ seg_size = (size = count);
+
+ memcpy(sg_virt(sg_dst_tmp) + out_offset,
+ sg_virt(sg_src_tmp) + in_offset,
+ seg_size);
+
+ in_offset += seg_size;
+ out_offset += seg_size;
+ count += seg_size;
+
+ if (in_offset >= sg_src_tmp->length) {
+ sg_src_tmp = sg_next(sg_src_tmp);
+ in_offset = 0;
+ }
+
+ if (out_offset >= sg_dst_tmp->length) {
+ sg_dst_tmp = sg_next(sg_dst_tmp);
+ out_offset = 0;
+ }
+ }
+}
+
+/**
+ * sep_oddball_pages -
+ * @sep: pointer to struct sep_device
+ * @sg: pointer to struct scatterlist - buffer to check
+ * @size: total data size
+ * @blocksize: minimum block size; must be multiples of this size
+ * @to_copy: 1 means do copy, 0 means do not copy
+ * @new_sg: pointer to location to put pointer to new sg area
+ * @returns: 1 if new scatterlist is needed; 0 if not needed;
+ * error value if operation failed
+ *
+ * The SEP device requires all pages to be multiples of the
+ * minimum block size appropriate for the operation
+ * This function check all pages; if any are oddball sizes
+ * (not multiple of block sizes), it creates a new scatterlist.
+ * If the to_copy parameter is set to 1, then a scatter list
+ * copy is performed. The pointer to the new scatterlist is
+ * put into the address supplied by the new_sg parameter; if
+ * no new scatterlist is needed, then a NULL is put into
+ * the location at new_sg.
+ *
+ */
+static int sep_oddball_pages(
+ struct sep_device *sep,
+ struct scatterlist *sg,
+ size_t data_size,
+ u32 block_size,
+ struct scatterlist **new_sg,
+ u32 do_copy)
+{
+ struct scatterlist *sg_temp;
+ u32 flag;
+ u32 nbr_pages, page_count;
+
+ dev_dbg(&sep->pdev->dev, "sep oddball\n");
+ if ((sg == NULL) || (data_size == 0) || (data_size < block_size))
+ return 0;
+
+ dev_dbg(&sep->pdev->dev, "sep oddball not null\n");
+ flag = 0;
+ nbr_pages = 0;
+ page_count = 0;
+ sg_temp = sg;
+
+ while (sg_temp) {
+ nbr_pages += 1;
+ sg_temp = sg_next(sg_temp);
+ }
+
+ sg_temp = sg;
+ while ((sg_temp) && (flag == 0)) {
+ page_count += 1;
+ if (sg_temp->length % block_size)
+ flag = 1;
+ else
+ sg_temp = sg_next(sg_temp);
+ }
+
+ /* Do not process if last (or only) page is oddball */
+ if (nbr_pages == page_count)
+ flag = 0;
+
+ if (flag) {
+ dev_dbg(&sep->pdev->dev, "sep oddball processing\n");
+ *new_sg = sep_alloc_sg_buf(sep, data_size, block_size);
+ if (*new_sg == NULL) {
+ dev_warn(&sep->pdev->dev, "cannot allocate new sg\n");
+ return -ENOMEM;
+ }
+
+ if (do_copy)
+ sep_copy_sg(sep, sg, *new_sg, data_size);
+
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+/**
+ * sep_copy_offset_sg -
+ * @sep: pointer to struct sep_device;
+ * @sg: pointer to struct scatterlist
+ * @offset: offset into scatterlist memory
+ * @dst: place to put data
+ * @len: length of data
+ * @returns: number of bytes copies
+ *
+ * This copies data from scatterlist buffer
+ * offset from beginning - it is needed for
+ * handling tail data in hash
+ */
+static size_t sep_copy_offset_sg(
+ struct sep_device *sep,
+ struct scatterlist *sg,
+ u32 offset,
+ void *dst,
+ u32 len)
+{
+ size_t page_start;
+ size_t page_end;
+ size_t offset_within_page;
+ size_t length_within_page;
+ size_t length_remaining;
+ size_t current_offset;
+
+ /* Find which page is beginning of segment */
+ page_start = 0;
+ page_end = sg->length;
+ while ((sg) && (offset > page_end)) {
+ page_start += sg->length;
+ sg = sg_next(sg);
+ if (sg)
+ page_end += sg->length;
+ }
+
+ if (sg == NULL)
+ return -ENOMEM;
+
+ offset_within_page = offset - page_start;
+ if ((sg->length - offset_within_page) >= len) {
+ /* All within this page */
+ memcpy(dst, sg_virt(sg) + offset_within_page, len);
+ return len;
+ } else {
+ /* Scattered multiple pages */
+ current_offset = 0;
+ length_remaining = len;
+ while ((sg) && (current_offset < len)) {
+ length_within_page = sg->length - offset_within_page;
+ if (length_within_page >= length_remaining) {
+ memcpy(dst+current_offset,
+ sg_virt(sg) + offset_within_page,
+ length_remaining);
+ length_remaining = 0;
+ current_offset = len;
+ } else {
+ memcpy(dst+current_offset,
+ sg_virt(sg) + offset_within_page,
+ length_within_page);
+ length_remaining -= length_within_page;
+ current_offset += length_within_page;
+ offset_within_page = 0;
+ sg = sg_next(sg);
+ }
+ }
+
+ if (sg == NULL)
+ return -ENOMEM;
+ }
+ return len;
+}
+
+/**
+ * partial_overlap -
+ * @src_ptr: source pointer
+ * @dst_ptr: destination pointer
+ * @nbytes: number of bytes
+ * @returns: 0 for success; -1 for failure
+ * We cannot have any partial overlap. Total overlap
+ * where src is the same as dst is okay
+ */
+static int partial_overlap(void *src_ptr, void *dst_ptr, u32 nbytes)
+{
+ /* Check for partial overlap */
+ if (src_ptr != dst_ptr) {
+ if (src_ptr < dst_ptr) {
+ if ((src_ptr + nbytes) > dst_ptr)
+ return -EINVAL;
+ } else {
+ if ((dst_ptr + nbytes) > src_ptr)
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/* Debug - prints only if DEBUG is defined; follows kernel debug model */
+static void sep_dump(struct sep_device *sep, char *stg, void *start, int len)
+{
+#if 0
+ int ct1;
+ u8 *ptt;
+
+ dev_dbg(&sep->pdev->dev,
+ "Dump of %s starting at %08lx for %08x bytes\n",
+ stg, (unsigned long)start, len);
+ for (ct1 = 0; ct1 < len; ct1 += 1) {
+ ptt = (u8 *)(start + ct1);
+ dev_dbg(&sep->pdev->dev, "%02x ", *ptt);
+ if (ct1 % 16 == 15)
+ dev_dbg(&sep->pdev->dev, "\n");
+ }
+ dev_dbg(&sep->pdev->dev, "\n");
+#endif
+}
+
+/* Debug - prints only if DEBUG is defined; follows kernel debug model */
+static void sep_dump_sg(struct sep_device *sep, char *stg,
+ struct scatterlist *sg)
+{
+#if 0
+ int ct1, ct2;
+ u8 *ptt;
+
+ dev_dbg(&sep->pdev->dev, "Dump of scatterlist %s\n", stg);
+
+ ct1 = 0;
+ while (sg) {
+ dev_dbg(&sep->pdev->dev, "page %x\n size %x", ct1,
+ sg->length);
+ dev_dbg(&sep->pdev->dev, "phys addr is %lx",
+ (unsigned long)sg_phys(sg));
+ ptt = sg_virt(sg);
+ for (ct2 = 0; ct2 < sg->length; ct2 += 1) {
+ dev_dbg(&sep->pdev->dev, "byte %x is %02x\n",
+ ct2, (unsigned char)*(ptt + ct2));
+ }
+
+ ct1 += 1;
+ sg = sg_next(sg);
+ }
+ dev_dbg(&sep->pdev->dev, "\n");
+#endif
+}
+
+/* Debug - prints only if DEBUG is defined */
+static void sep_dump_ivs(struct ablkcipher_request *req, char *reason)
+
+ {
+ unsigned char *cptr;
+ struct sep_aes_internal_context *aes_internal;
+ struct sep_des_internal_context *des_internal;
+ int ct1;
+
+ struct this_task_ctx *ta_ctx;
+ struct crypto_ablkcipher *tfm;
+ struct sep_system_ctx *sctx;
+
+ ta_ctx = ablkcipher_request_ctx(req);
+ tfm = crypto_ablkcipher_reqtfm(req);
+ sctx = crypto_ablkcipher_ctx(tfm);
+
+ dev_dbg(&ta_ctx->sep_used->pdev->dev, "IV DUMP - %s\n", reason);
+ if ((ta_ctx->current_request == DES_CBC) &&
+ (ta_ctx->des_opmode == SEP_DES_CBC)) {
+
+ des_internal = (struct sep_des_internal_context *)
+ sctx->des_private_ctx.ctx_buf;
+ /* print vendor */
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "sep - vendor iv for DES\n");
+ cptr = (unsigned char *)des_internal->iv_context;
+ for (ct1 = 0; ct1 < crypto_ablkcipher_ivsize(tfm); ct1 += 1)
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "%02x\n", *(cptr + ct1));
+
+ /* print walk */
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "sep - walk from kernel crypto iv for DES\n");
+ cptr = (unsigned char *)ta_ctx->walk.iv;
+ for (ct1 = 0; ct1 < crypto_ablkcipher_ivsize(tfm); ct1 += 1)
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "%02x\n", *(cptr + ct1));
+ } else if ((ta_ctx->current_request == AES_CBC) &&
+ (ta_ctx->aes_opmode == SEP_AES_CBC)) {
+
+ aes_internal = (struct sep_aes_internal_context *)
+ sctx->aes_private_ctx.cbuff;
+ /* print vendor */
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "sep - vendor iv for AES\n");
+ cptr = (unsigned char *)aes_internal->aes_ctx_iv;
+ for (ct1 = 0; ct1 < crypto_ablkcipher_ivsize(tfm); ct1 += 1)
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "%02x\n", *(cptr + ct1));
+
+ /* print walk */
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "sep - walk from kernel crypto iv for AES\n");
+ cptr = (unsigned char *)ta_ctx->walk.iv;
+ for (ct1 = 0; ct1 < crypto_ablkcipher_ivsize(tfm); ct1 += 1)
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "%02x\n", *(cptr + ct1));
+ }
+}
+
+/**
+ * RFC2451: Weak key check
+ * Returns: 1 (weak), 0 (not weak)
+ */
+static int sep_weak_key(const u8 *key, unsigned int keylen)
+{
+ static const u8 parity[] = {
+ 8, 1, 0, 8, 0, 8, 8, 0, 0, 8, 8, 0, 8, 0, 2, 8,
+ 0, 8, 8, 0, 8, 0, 0, 8, 8,
+ 0, 0, 8, 0, 8, 8, 3,
+ 0, 8, 8, 0, 8, 0, 0, 8, 8, 0, 0, 8, 0, 8, 8, 0,
+ 8, 0, 0, 8, 0, 8, 8, 0, 0,
+ 8, 8, 0, 8, 0, 0, 8,
+ 0, 8, 8, 0, 8, 0, 0, 8, 8, 0, 0, 8, 0, 8, 8, 0,
+ 8, 0, 0, 8, 0, 8, 8, 0, 0,
+ 8, 8, 0, 8, 0, 0, 8,
+ 8, 0, 0, 8, 0, 8, 8, 0, 0, 8, 8, 0, 8, 0, 0, 8,
+ 0, 8, 8, 0, 8, 0, 0, 8, 8,
+ 0, 0, 8, 0, 8, 8, 0,
+ 0, 8, 8, 0, 8, 0, 0, 8, 8, 0, 0, 8, 0, 8, 8, 0,
+ 8, 0, 0, 8, 0, 8, 8, 0, 0,
+ 8, 8, 0, 8, 0, 0, 8,
+ 8, 0, 0, 8, 0, 8, 8, 0, 0, 8, 8, 0, 8, 0, 0, 8,
+ 0, 8, 8, 0, 8, 0, 0, 8, 8,
+ 0, 0, 8, 0, 8, 8, 0,
+ 8, 0, 0, 8, 0, 8, 8, 0, 0, 8, 8, 0, 8, 0, 0, 8,
+ 0, 8, 8, 0, 8, 0, 0, 8, 8,
+ 0, 0, 8, 0, 8, 8, 0,
+ 4, 8, 8, 0, 8, 0, 0, 8, 8, 0, 0, 8, 0, 8, 8, 0,
+ 8, 5, 0, 8, 0, 8, 8, 0, 0,
+ 8, 8, 0, 8, 0, 6, 8,
+ };
+
+ u32 n, w;
+
+ n = parity[key[0]]; n <<= 4;
+ n |= parity[key[1]]; n <<= 4;
+ n |= parity[key[2]]; n <<= 4;
+ n |= parity[key[3]]; n <<= 4;
+ n |= parity[key[4]]; n <<= 4;
+ n |= parity[key[5]]; n <<= 4;
+ n |= parity[key[6]]; n <<= 4;
+ n |= parity[key[7]];
+ w = 0x88888888L;
+
+ /* 1 in 10^10 keys passes this test */
+ if (!((n - (w >> 3)) & w)) {
+ if (n < 0x41415151) {
+ if (n < 0x31312121) {
+ if (n < 0x14141515) {
+ /* 01 01 01 01 01 01 01 01 */
+ if (n == 0x11111111)
+ goto weak;
+ /* 01 1F 01 1F 01 0E 01 0E */
+ if (n == 0x13131212)
+ goto weak;
+ } else {
+ /* 01 E0 01 E0 01 F1 01 F1 */
+ if (n == 0x14141515)
+ goto weak;
+ /* 01 FE 01 FE 01 FE 01 FE */
+ if (n == 0x16161616)
+ goto weak;
+ }
+ } else {
+ if (n < 0x34342525) {
+ /* 1F 01 1F 01 0E 01 0E 01 */
+ if (n == 0x31312121)
+ goto weak;
+ /* 1F 1F 1F 1F 0E 0E 0E 0E (?) */
+ if (n == 0x33332222)
+ goto weak;
+ } else {
+ /* 1F E0 1F E0 0E F1 0E F1 */
+ if (n == 0x34342525)
+ goto weak;
+ /* 1F FE 1F FE 0E FE 0E FE */
+ if (n == 0x36362626)
+ goto weak;
+ }
+ }
+ } else {
+ if (n < 0x61616161) {
+ if (n < 0x44445555) {
+ /* E0 01 E0 01 F1 01 F1 01 */
+ if (n == 0x41415151)
+ goto weak;
+ /* E0 1F E0 1F F1 0E F1 0E */
+ if (n == 0x43435252)
+ goto weak;
+ } else {
+ /* E0 E0 E0 E0 F1 F1 F1 F1 (?) */
+ if (n == 0x44445555)
+ goto weak;
+ /* E0 FE E0 FE F1 FE F1 FE */
+ if (n == 0x46465656)
+ goto weak;
+ }
+ } else {
+ if (n < 0x64646565) {
+ /* FE 01 FE 01 FE 01 FE 01 */
+ if (n == 0x61616161)
+ goto weak;
+ /* FE 1F FE 1F FE 0E FE 0E */
+ if (n == 0x63636262)
+ goto weak;
+ } else {
+ /* FE E0 FE E0 FE F1 FE F1 */
+ if (n == 0x64646565)
+ goto weak;
+ /* FE FE FE FE FE FE FE FE */
+ if (n == 0x66666666)
+ goto weak;
+ }
+ }
+ }
+ }
+ return 0;
+weak:
+ return 1;
+}
+/**
+ * sep_sg_nents
+ */
+static u32 sep_sg_nents(struct scatterlist *sg)
+{
+ u32 ct1 = 0;
+ while (sg) {
+ ct1 += 1;
+ sg = sg_next(sg);
+ }
+
+ return ct1;
+}
+
+/**
+ * sep_start_msg -
+ * @ta_ctx: pointer to struct this_task_ctx
+ * @returns: offset to place for the next word in the message
+ * Set up pointer in message pool for new message
+ */
+static u32 sep_start_msg(struct this_task_ctx *ta_ctx)
+{
+ u32 *word_ptr;
+ ta_ctx->msg_len_words = 2;
+ ta_ctx->msgptr = ta_ctx->msg;
+ memset(ta_ctx->msg, 0, SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
+ ta_ctx->msgptr += sizeof(u32) * 2;
+ word_ptr = (u32 *)ta_ctx->msgptr;
+ *word_ptr = SEP_START_MSG_TOKEN;
+ return sizeof(u32) * 2;
+}
+
+/**
+ * sep_end_msg -
+ * @ta_ctx: pointer to struct this_task_ctx
+ * @messages_offset: current message offset
+ * Returns: 0 for success; <0 otherwise
+ * End message; set length and CRC; and
+ * send interrupt to the SEP
+ */
+static void sep_end_msg(struct this_task_ctx *ta_ctx, u32 msg_offset)
+{
+ u32 *word_ptr;
+ /* Msg size goes into msg after token */
+ ta_ctx->msg_len_words = msg_offset / sizeof(u32) + 1;
+ word_ptr = (u32 *)ta_ctx->msgptr;
+ word_ptr += 1;
+ *word_ptr = ta_ctx->msg_len_words;
+
+ /* CRC (currently 0) goes at end of msg */
+ word_ptr = (u32 *)(ta_ctx->msgptr + msg_offset);
+ *word_ptr = 0;
+}
+
+/**
+ * sep_start_inbound_msg -
+ * @ta_ctx: pointer to struct this_task_ctx
+ * @msg_offset: offset to place for the next word in the message
+ * @returns: 0 for success; error value for failure
+ * Set up pointer in message pool for inbound message
+ */
+static u32 sep_start_inbound_msg(struct this_task_ctx *ta_ctx, u32 *msg_offset)
+{
+ u32 *word_ptr;
+ u32 token;
+ u32 error = SEP_OK;
+
+ *msg_offset = sizeof(u32) * 2;
+ word_ptr = (u32 *)ta_ctx->msgptr;
+ token = *word_ptr;
+ ta_ctx->msg_len_words = *(word_ptr + 1);
+
+ if (token != SEP_START_MSG_TOKEN) {
+ error = SEP_INVALID_START;
+ goto end_function;
+ }
+
+end_function:
+
+ return error;
+}
+
+/**
+ * sep_write_msg -
+ * @ta_ctx: pointer to struct this_task_ctx
+ * @in_addr: pointer to start of parameter
+ * @size: size of parameter to copy (in bytes)
+ * @max_size: size to move up offset; SEP mesg is in word sizes
+ * @msg_offset: pointer to current offset (is updated)
+ * @byte_array: flag ti indicate wheter endian must be changed
+ * Copies data into the message area from caller
+ */
+static void sep_write_msg(struct this_task_ctx *ta_ctx, void *in_addr,
+ u32 size, u32 max_size, u32 *msg_offset, u32 byte_array)
+{
+ u32 *word_ptr;
+ void *void_ptr;
+ void_ptr = ta_ctx->msgptr + *msg_offset;
+ word_ptr = (u32 *)void_ptr;
+ memcpy(void_ptr, in_addr, size);
+ *msg_offset += max_size;
+
+ /* Do we need to manipulate endian? */
+ if (byte_array) {
+ u32 i;
+ for (i = 0; i < ((size + 3) / 4); i += 1)
+ *(word_ptr + i) = CHG_ENDIAN(*(word_ptr + i));
+ }
+}
+
+/**
+ * sep_make_header
+ * @ta_ctx: pointer to struct this_task_ctx
+ * @msg_offset: pointer to current offset (is updated)
+ * @op_code: op code to put into message
+ * Puts op code into message and updates offset
+ */
+static void sep_make_header(struct this_task_ctx *ta_ctx, u32 *msg_offset,
+ u32 op_code)
+{
+ u32 *word_ptr;
+
+ *msg_offset = sep_start_msg(ta_ctx);
+ word_ptr = (u32 *)(ta_ctx->msgptr + *msg_offset);
+ *word_ptr = op_code;
+ *msg_offset += sizeof(u32);
+}
+
+
+
+/**
+ * sep_read_msg -
+ * @ta_ctx: pointer to struct this_task_ctx
+ * @in_addr: pointer to start of parameter
+ * @size: size of parameter to copy (in bytes)
+ * @max_size: size to move up offset; SEP mesg is in word sizes
+ * @msg_offset: pointer to current offset (is updated)
+ * @byte_array: flag ti indicate wheter endian must be changed
+ * Copies data out of the message area to caller
+ */
+static void sep_read_msg(struct this_task_ctx *ta_ctx, void *in_addr,
+ u32 size, u32 max_size, u32 *msg_offset, u32 byte_array)
+{
+ u32 *word_ptr;
+ void *void_ptr;
+ void_ptr = ta_ctx->msgptr + *msg_offset;
+ word_ptr = (u32 *)void_ptr;
+
+ /* Do we need to manipulate endian? */
+ if (byte_array) {
+ u32 i;
+ for (i = 0; i < ((size + 3) / 4); i += 1)
+ *(word_ptr + i) = CHG_ENDIAN(*(word_ptr + i));
+ }
+
+ memcpy(in_addr, void_ptr, size);
+ *msg_offset += max_size;
+}
+
+/**
+ * sep_verify_op -
+ * @ta_ctx: pointer to struct this_task_ctx
+ * @op_code: expected op_code
+ * @msg_offset: pointer to current offset (is updated)
+ * @returns: 0 for success; error for failure
+ */
+static u32 sep_verify_op(struct this_task_ctx *ta_ctx, u32 op_code,
+ u32 *msg_offset)
+{
+ u32 error;
+ u32 in_ary[2];
+
+ struct sep_device *sep = ta_ctx->sep_used;
+
+ dev_dbg(&sep->pdev->dev, "dumping return message\n");
+ error = sep_start_inbound_msg(ta_ctx, msg_offset);
+ if (error) {
+ dev_warn(&sep->pdev->dev,
+ "sep_start_inbound_msg error\n");
+ return error;
+ }
+
+ sep_read_msg(ta_ctx, in_ary, sizeof(u32) * 2, sizeof(u32) * 2,
+ msg_offset, 0);
+
+ if (in_ary[0] != op_code) {
+ dev_warn(&sep->pdev->dev,
+ "sep got back wrong opcode\n");
+ dev_warn(&sep->pdev->dev,
+ "got back %x; expected %x\n",
+ in_ary[0], op_code);
+ return SEP_WRONG_OPCODE;
+ }
+
+ if (in_ary[1] != SEP_OK) {
+ dev_warn(&sep->pdev->dev,
+ "sep execution error\n");
+ dev_warn(&sep->pdev->dev,
+ "got back %x; expected %x\n",
+ in_ary[1], SEP_OK);
+ return in_ary[0];
+ }
+
+return 0;
+}
+
+/**
+ * sep_read_context -
+ * @ta_ctx: pointer to struct this_task_ctx
+ * @msg_offset: point to current place in SEP msg; is updated
+ * @dst: pointer to place to put the context
+ * @len: size of the context structure (differs for crypro/hash)
+ * This function reads the context from the msg area
+ * There is a special way the vendor needs to have the maximum
+ * length calculated so that the msg_offset is updated properly;
+ * it skips over some words in the msg area depending on the size
+ * of the context
+ */
+static void sep_read_context(struct this_task_ctx *ta_ctx, u32 *msg_offset,
+ void *dst, u32 len)
+{
+ u32 max_length = ((len + 3) / sizeof(u32)) * sizeof(u32);
+ sep_read_msg(ta_ctx, dst, len, max_length, msg_offset, 0);
+}
+
+/**
+ * sep_write_context -
+ * @ta_ctx: pointer to struct this_task_ctx
+ * @msg_offset: point to current place in SEP msg; is updated
+ * @src: pointer to the current context
+ * @len: size of the context structure (differs for crypro/hash)
+ * This function writes the context to the msg area
+ * There is a special way the vendor needs to have the maximum
+ * length calculated so that the msg_offset is updated properly;
+ * it skips over some words in the msg area depending on the size
+ * of the context
+ */
+static void sep_write_context(struct this_task_ctx *ta_ctx, u32 *msg_offset,
+ void *src, u32 len)
+{
+ u32 max_length = ((len + 3) / sizeof(u32)) * sizeof(u32);
+ sep_write_msg(ta_ctx, src, len, max_length, msg_offset, 0);
+}
+
+/**
+ * sep_clear_out -
+ * @ta_ctx: pointer to struct this_task_ctx
+ * Clear out crypto related values in sep device structure
+ * to enable device to be used by anyone; either kernel
+ * crypto or userspace app via middleware
+ */
+static void sep_clear_out(struct this_task_ctx *ta_ctx)
+{
+ if (ta_ctx->src_sg_hold) {
+ sep_free_sg_buf(ta_ctx->src_sg_hold);
+ ta_ctx->src_sg_hold = NULL;
+ }
+
+ if (ta_ctx->dst_sg_hold) {
+ sep_free_sg_buf(ta_ctx->dst_sg_hold);
+ ta_ctx->dst_sg_hold = NULL;
+ }
+
+ ta_ctx->src_sg = NULL;
+ ta_ctx->dst_sg = NULL;
+
+ sep_free_dma_table_data_handler(ta_ctx->sep_used, &ta_ctx->dma_ctx);
+
+ if (ta_ctx->i_own_sep) {
+ /**
+ * The following unlocks the sep and makes it available
+ * to any other application
+ * First, null out crypto entries in sep before relesing it
+ */
+ ta_ctx->sep_used->current_hash_req = NULL;
+ ta_ctx->sep_used->current_cypher_req = NULL;
+ ta_ctx->sep_used->current_request = 0;
+ ta_ctx->sep_used->current_hash_stage = 0;
+ ta_ctx->sep_used->ta_ctx = NULL;
+ ta_ctx->sep_used->in_kernel = 0;
+
+ ta_ctx->call_status.status = 0;
+
+ /* Remove anything confidentail */
+ memset(ta_ctx->sep_used->shared_addr, 0,
+ SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
+
+ sep_queue_status_remove(ta_ctx->sep_used, &ta_ctx->queue_elem);
+
+#ifdef SEP_ENABLE_RUNTIME_PM
+ ta_ctx->sep_used->in_use = 0;
+ pm_runtime_mark_last_busy(&ta_ctx->sep_used->pdev->dev);
+ pm_runtime_put_autosuspend(&ta_ctx->sep_used->pdev->dev);
+#endif
+
+ clear_bit(SEP_WORKING_LOCK_BIT,
+ &ta_ctx->sep_used->in_use_flags);
+ ta_ctx->sep_used->pid_doing_transaction = 0;
+
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "[PID%d] waking up next transaction\n",
+ current->pid);
+
+ clear_bit(SEP_TRANSACTION_STARTED_LOCK_BIT,
+ &ta_ctx->sep_used->in_use_flags);
+ wake_up(&ta_ctx->sep_used->event_transactions);
+
+ ta_ctx->i_own_sep = 0;
+ }
+}
+
+/**
+ * Release crypto infrastructure from EINPROGRESS and
+ * clear sep_dev so that SEP is available to anyone
+ */
+static void sep_crypto_release(struct sep_system_ctx *sctx,
+ struct this_task_ctx *ta_ctx, u32 error)
+{
+ struct ahash_request *hash_req = ta_ctx->current_hash_req;
+ struct ablkcipher_request *cypher_req =
+ ta_ctx->current_cypher_req;
+ struct sep_device *sep = ta_ctx->sep_used;
+
+ sep_clear_out(ta_ctx);
+
+ /**
+ * This may not yet exist depending when we
+ * chose to bail out. If it does exist, set
+ * it to 1
+ */
+ if (ta_ctx->are_we_done_yet != NULL)
+ *ta_ctx->are_we_done_yet = 1;
+
+ if (cypher_req != NULL) {
+ if ((sctx->key_sent == 1) ||
+ ((error != 0) && (error != -EINPROGRESS))) {
+ if (cypher_req->base.complete == NULL) {
+ dev_dbg(&sep->pdev->dev,
+ "release is null for cypher!");
+ } else {
+ cypher_req->base.complete(
+ &cypher_req->base, error);
+ }
+ }
+ }
+
+ if (hash_req != NULL) {
+ if (hash_req->base.complete == NULL) {
+ dev_dbg(&sep->pdev->dev,
+ "release is null for hash!");
+ } else {
+ hash_req->base.complete(
+ &hash_req->base, error);
+ }
+ }
+}
+
+/**
+ * This is where we grab the sep itself and tell it to do something.
+ * It will sleep if the sep is currently busy
+ * and it will return 0 if sep is now ours; error value if there
+ * were problems
+ */
+static int sep_crypto_take_sep(struct this_task_ctx *ta_ctx)
+{
+ struct sep_device *sep = ta_ctx->sep_used;
+ int result;
+ struct sep_msgarea_hdr *my_msg_header;
+
+ my_msg_header = (struct sep_msgarea_hdr *)ta_ctx->msg;
+
+ /* add to status queue */
+ ta_ctx->queue_elem = sep_queue_status_add(sep, my_msg_header->opcode,
+ ta_ctx->nbytes, current->pid,
+ current->comm, sizeof(current->comm));
+
+ if (!ta_ctx->queue_elem) {
+ dev_dbg(&sep->pdev->dev, "[PID%d] updating queue"
+ " status error\n", current->pid);
+ return -EINVAL;
+ }
+
+ /* get the device; this can sleep */
+ result = sep_wait_transaction(sep);
+ if (result)
+ return result;
+
+ if (sep_dev->power_save_setup == 1)
+ pm_runtime_get_sync(&sep_dev->pdev->dev);
+
+ /* Copy in the message */
+ memcpy(sep->shared_addr, ta_ctx->msg,
+ SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
+
+ /* Copy in the dcb information if there is any */
+ if (ta_ctx->dcb_region) {
+ result = sep_activate_dcb_dmatables_context(sep,
+ &ta_ctx->dcb_region, &ta_ctx->dmatables_region,
+ ta_ctx->dma_ctx);
+ if (result)
+ return result;
+ }
+
+ /* Mark the device so we know how to finish the job in the tasklet */
+ if (ta_ctx->current_hash_req)
+ sep->current_hash_req = ta_ctx->current_hash_req;
+ else
+ sep->current_cypher_req = ta_ctx->current_cypher_req;
+
+ sep->current_request = ta_ctx->current_request;
+ sep->current_hash_stage = ta_ctx->current_hash_stage;
+ sep->ta_ctx = ta_ctx;
+ sep->in_kernel = 1;
+ ta_ctx->i_own_sep = 1;
+
+ /* need to set bit first to avoid race condition with interrupt */
+ set_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET, &ta_ctx->call_status.status);
+
+ result = sep_send_command_handler(sep);
+
+ dev_dbg(&sep->pdev->dev, "[PID%d]: sending command to the sep\n",
+ current->pid);
+
+ if (!result)
+ dev_dbg(&sep->pdev->dev, "[PID%d]: command sent okay\n",
+ current->pid);
+ else {
+ dev_dbg(&sep->pdev->dev, "[PID%d]: cant send command\n",
+ current->pid);
+ clear_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
+ &ta_ctx->call_status.status);
+ }
+
+ return result;
+}
+
+/**
+ * This function sets things up for a crypto data block process
+ * This does all preparation, but does not try to grab the
+ * sep
+ * @req: pointer to struct ablkcipher_request
+ * returns: 0 if all went well, non zero if error
+ */
+static int sep_crypto_block_data(struct ablkcipher_request *req)
+{
+
+ int int_error;
+ u32 msg_offset;
+ static u32 msg[10];
+ void *src_ptr;
+ void *dst_ptr;
+
+ static char small_buf[100];
+ ssize_t copy_result;
+ int result;
+
+ struct scatterlist *new_sg;
+ struct this_task_ctx *ta_ctx;
+ struct crypto_ablkcipher *tfm;
+ struct sep_system_ctx *sctx;
+
+ struct sep_des_internal_context *des_internal;
+ struct sep_aes_internal_context *aes_internal;
+
+ ta_ctx = ablkcipher_request_ctx(req);
+ tfm = crypto_ablkcipher_reqtfm(req);
+ sctx = crypto_ablkcipher_ctx(tfm);
+
+ /* start the walk on scatterlists */
+ ablkcipher_walk_init(&ta_ctx->walk, req->src, req->dst, req->nbytes);
+ dev_dbg(&ta_ctx->sep_used->pdev->dev, "sep crypto block data size of %x\n",
+ req->nbytes);
+
+ int_error = ablkcipher_walk_phys(req, &ta_ctx->walk);
+ if (int_error) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev, "walk phys error %x\n",
+ int_error);
+ return -ENOMEM;
+ }
+
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "crypto block: src is %lx dst is %lx\n",
+ (unsigned long)req->src, (unsigned long)req->dst);
+
+ /* Make sure all pages are even block */
+ int_error = sep_oddball_pages(ta_ctx->sep_used, req->src,
+ req->nbytes, ta_ctx->walk.blocksize, &new_sg, 1);
+
+ if (int_error < 0) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev, "oddball page eerror\n");
+ return -ENOMEM;
+ } else if (int_error == 1) {
+ ta_ctx->src_sg = new_sg;
+ ta_ctx->src_sg_hold = new_sg;
+ } else {
+ ta_ctx->src_sg = req->src;
+ ta_ctx->src_sg_hold = NULL;
+ }
+
+ int_error = sep_oddball_pages(ta_ctx->sep_used, req->dst,
+ req->nbytes, ta_ctx->walk.blocksize, &new_sg, 0);
+
+ if (int_error < 0) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev, "walk phys error %x\n",
+ int_error);
+ return -ENOMEM;
+ } else if (int_error == 1) {
+ ta_ctx->dst_sg = new_sg;
+ ta_ctx->dst_sg_hold = new_sg;
+ } else {
+ ta_ctx->dst_sg = req->dst;
+ ta_ctx->dst_sg_hold = NULL;
+ }
+
+ /* set nbytes for queue status */
+ ta_ctx->nbytes = req->nbytes;
+
+ /* Key already done; this is for data */
+ dev_dbg(&ta_ctx->sep_used->pdev->dev, "sending data\n");
+
+ sep_dump_sg(ta_ctx->sep_used,
+ "block sg in", ta_ctx->src_sg);
+
+ /* check for valid data and proper spacing */
+ src_ptr = sg_virt(ta_ctx->src_sg);
+ dst_ptr = sg_virt(ta_ctx->dst_sg);
+
+ if (!src_ptr || !dst_ptr ||
+ (ta_ctx->current_cypher_req->nbytes %
+ crypto_ablkcipher_blocksize(tfm))) {
+
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "cipher block size odd\n");
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "cipher block size is %x\n",
+ crypto_ablkcipher_blocksize(tfm));
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "cipher data size is %x\n",
+ ta_ctx->current_cypher_req->nbytes);
+ return -EINVAL;
+ }
+
+ if (partial_overlap(src_ptr, dst_ptr,
+ ta_ctx->current_cypher_req->nbytes)) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "block partial overlap\n");
+ return -EINVAL;
+ }
+
+ /* Put together the message */
+ sep_make_header(ta_ctx, &msg_offset, ta_ctx->block_opcode);
+
+ /* If des, and size is 1 block, put directly in msg */
+ if ((ta_ctx->block_opcode == SEP_DES_BLOCK_OPCODE) &&
+ (req->nbytes == crypto_ablkcipher_blocksize(tfm))) {
+
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "writing out one block des\n");
+
+ copy_result = sg_copy_to_buffer(
+ ta_ctx->src_sg, sep_sg_nents(ta_ctx->src_sg),
+ small_buf, crypto_ablkcipher_blocksize(tfm));
+
+ if (copy_result != crypto_ablkcipher_blocksize(tfm)) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "des block copy faild\n");
+ return -ENOMEM;
+ }
+
+ /* Put data into message */
+ sep_write_msg(ta_ctx, small_buf,
+ crypto_ablkcipher_blocksize(tfm),
+ crypto_ablkcipher_blocksize(tfm) * 2,
+ &msg_offset, 1);
+
+ /* Put size into message */
+ sep_write_msg(ta_ctx, &req->nbytes,
+ sizeof(u32), sizeof(u32), &msg_offset, 0);
+ } else {
+ /* Otherwise, fill out dma tables */
+ ta_ctx->dcb_input_data.app_in_address = src_ptr;
+ ta_ctx->dcb_input_data.data_in_size = req->nbytes;
+ ta_ctx->dcb_input_data.app_out_address = dst_ptr;
+ ta_ctx->dcb_input_data.block_size =
+ crypto_ablkcipher_blocksize(tfm);
+ ta_ctx->dcb_input_data.tail_block_size = 0;
+ ta_ctx->dcb_input_data.is_applet = 0;
+ ta_ctx->dcb_input_data.src_sg = ta_ctx->src_sg;
+ ta_ctx->dcb_input_data.dst_sg = ta_ctx->dst_sg;
+
+ result = sep_create_dcb_dmatables_context_kernel(
+ ta_ctx->sep_used,
+ &ta_ctx->dcb_region,
+ &ta_ctx->dmatables_region,
+ &ta_ctx->dma_ctx,
+ &ta_ctx->dcb_input_data,
+ 1);
+ if (result) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "crypto dma table create failed\n");
+ return -EINVAL;
+ }
+
+ /* Portion of msg is nulled (no data) */
+ msg[0] = (u32)0;
+ msg[1] = (u32)0;
+ msg[2] = (u32)0;
+ msg[3] = (u32)0;
+ msg[4] = (u32)0;
+ sep_write_msg(ta_ctx, (void *)msg, sizeof(u32) * 5,
+ sizeof(u32) * 5, &msg_offset, 0);
+ }
+
+ /**
+ * Before we write the message, we need to overwrite the
+ * vendor's IV with the one from our own ablkcipher walk
+ * iv because this is needed for dm-crypt
+ */
+ sep_dump_ivs(req, "sending data block to sep\n");
+ if ((ta_ctx->current_request == DES_CBC) &&
+ (ta_ctx->des_opmode == SEP_DES_CBC)) {
+
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "overwrite vendor iv on DES\n");
+ des_internal = (struct sep_des_internal_context *)
+ sctx->des_private_ctx.ctx_buf;
+ memcpy((void *)des_internal->iv_context,
+ ta_ctx->walk.iv, crypto_ablkcipher_ivsize(tfm));
+ } else if ((ta_ctx->current_request == AES_CBC) &&
+ (ta_ctx->aes_opmode == SEP_AES_CBC)) {
+
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "overwrite vendor iv on AES\n");
+ aes_internal = (struct sep_aes_internal_context *)
+ sctx->aes_private_ctx.cbuff;
+ memcpy((void *)aes_internal->aes_ctx_iv,
+ ta_ctx->walk.iv, crypto_ablkcipher_ivsize(tfm));
+ }
+
+ /* Write context into message */
+ if (ta_ctx->block_opcode == SEP_DES_BLOCK_OPCODE) {
+ sep_write_context(ta_ctx, &msg_offset,
+ &sctx->des_private_ctx,
+ sizeof(struct sep_des_private_context));
+ sep_dump(ta_ctx->sep_used, "ctx to block des",
+ &sctx->des_private_ctx, 40);
+ } else {
+ sep_write_context(ta_ctx, &msg_offset,
+ &sctx->aes_private_ctx,
+ sizeof(struct sep_aes_private_context));
+ sep_dump(ta_ctx->sep_used, "ctx to block aes",
+ &sctx->aes_private_ctx, 20);
+ }
+
+ /* conclude message */
+ sep_end_msg(ta_ctx, msg_offset);
+
+ /* Parent (caller) is now ready to tell the sep to do ahead */
+ return 0;
+}
+
+
+/**
+ * This function sets things up for a crypto key submit process
+ * This does all preparation, but does not try to grab the
+ * sep
+ * @req: pointer to struct ablkcipher_request
+ * returns: 0 if all went well, non zero if error
+ */
+static int sep_crypto_send_key(struct ablkcipher_request *req)
+{
+
+ int int_error;
+ u32 msg_offset;
+ static u32 msg[10];
+
+ u32 max_length;
+ struct this_task_ctx *ta_ctx;
+ struct crypto_ablkcipher *tfm;
+ struct sep_system_ctx *sctx;
+
+ ta_ctx = ablkcipher_request_ctx(req);
+ tfm = crypto_ablkcipher_reqtfm(req);
+ sctx = crypto_ablkcipher_ctx(tfm);
+
+ dev_dbg(&ta_ctx->sep_used->pdev->dev, "sending key\n");
+
+ /* start the walk on scatterlists */
+ ablkcipher_walk_init(&ta_ctx->walk, req->src, req->dst, req->nbytes);
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "sep crypto block data size of %x\n", req->nbytes);
+
+ int_error = ablkcipher_walk_phys(req, &ta_ctx->walk);
+ if (int_error) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev, "walk phys error %x\n",
+ int_error);
+ return -ENOMEM;
+ }
+
+ /* check iv */
+ if ((ta_ctx->current_request == DES_CBC) &&
+ (ta_ctx->des_opmode == SEP_DES_CBC)) {
+ if (!ta_ctx->walk.iv) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev, "no iv found\n");
+ return -EINVAL;
+ }
+
+ memcpy(ta_ctx->iv, ta_ctx->walk.iv, SEP_DES_IV_SIZE_BYTES);
+ sep_dump(ta_ctx->sep_used, "iv",
+ ta_ctx->iv, SEP_DES_IV_SIZE_BYTES);
+ }
+
+ if ((ta_ctx->current_request == AES_CBC) &&
+ (ta_ctx->aes_opmode == SEP_AES_CBC)) {
+ if (!ta_ctx->walk.iv) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev, "no iv found\n");
+ return -EINVAL;
+ }
+
+ memcpy(ta_ctx->iv, ta_ctx->walk.iv, SEP_AES_IV_SIZE_BYTES);
+ sep_dump(ta_ctx->sep_used, "iv",
+ ta_ctx->iv, SEP_AES_IV_SIZE_BYTES);
+ }
+
+ /* put together message to SEP */
+ /* Start with op code */
+ sep_make_header(ta_ctx, &msg_offset, ta_ctx->init_opcode);
+
+ /* now deal with IV */
+ if (ta_ctx->init_opcode == SEP_DES_INIT_OPCODE) {
+ if (ta_ctx->des_opmode == SEP_DES_CBC) {
+ sep_write_msg(ta_ctx, ta_ctx->iv,
+ SEP_DES_IV_SIZE_BYTES, sizeof(u32) * 4,
+ &msg_offset, 1);
+ sep_dump(ta_ctx->sep_used, "initial IV",
+ ta_ctx->walk.iv, SEP_DES_IV_SIZE_BYTES);
+ } else {
+ /* Skip if ECB */
+ msg_offset += 4 * sizeof(u32);
+ }
+ } else {
+ max_length = ((SEP_AES_IV_SIZE_BYTES + 3) /
+ sizeof(u32)) * sizeof(u32);
+ if (ta_ctx->aes_opmode == SEP_AES_CBC) {
+ sep_write_msg(ta_ctx, ta_ctx->iv,
+ SEP_AES_IV_SIZE_BYTES, max_length,
+ &msg_offset, 1);
+ sep_dump(ta_ctx->sep_used, "initial IV",
+ ta_ctx->walk.iv, SEP_AES_IV_SIZE_BYTES);
+ } else {
+ /* Skip if ECB */
+ msg_offset += max_length;
+ }
+ }
+
+ /* load the key */
+ if (ta_ctx->init_opcode == SEP_DES_INIT_OPCODE) {
+ sep_write_msg(ta_ctx, (void *)&sctx->key.des.key1,
+ sizeof(u32) * 8, sizeof(u32) * 8,
+ &msg_offset, 1);
+
+ msg[0] = (u32)sctx->des_nbr_keys;
+ msg[1] = (u32)ta_ctx->des_encmode;
+ msg[2] = (u32)ta_ctx->des_opmode;
+
+ sep_write_msg(ta_ctx, (void *)msg,
+ sizeof(u32) * 3, sizeof(u32) * 3,
+ &msg_offset, 0);
+ } else {
+ sep_write_msg(ta_ctx, (void *)&sctx->key.aes,
+ sctx->keylen,
+ SEP_AES_MAX_KEY_SIZE_BYTES,
+ &msg_offset, 1);
+
+ msg[0] = (u32)sctx->aes_key_size;
+ msg[1] = (u32)ta_ctx->aes_encmode;
+ msg[2] = (u32)ta_ctx->aes_opmode;
+ msg[3] = (u32)0; /* Secret key is not used */
+ sep_write_msg(ta_ctx, (void *)msg,
+ sizeof(u32) * 4, sizeof(u32) * 4,
+ &msg_offset, 0);
+ }
+
+ /* conclude message */
+ sep_end_msg(ta_ctx, msg_offset);
+
+ /* Parent (caller) is now ready to tell the sep to do ahead */
+ return 0;
+}
+
+
+/* This needs to be run as a work queue as it can be put asleep */
+static void sep_crypto_block(void *data)
+{
+ unsigned long end_time;
+
+ int result;
+
+ struct ablkcipher_request *req;
+ struct this_task_ctx *ta_ctx;
+ struct crypto_ablkcipher *tfm;
+ struct sep_system_ctx *sctx;
+ int are_we_done_yet;
+
+ req = (struct ablkcipher_request *)data;
+ ta_ctx = ablkcipher_request_ctx(req);
+ tfm = crypto_ablkcipher_reqtfm(req);
+ sctx = crypto_ablkcipher_ctx(tfm);
+
+ ta_ctx->are_we_done_yet = &are_we_done_yet;
+
+ pr_debug("sep_crypto_block\n");
+ pr_debug("tfm is %p sctx is %p ta_ctx is %p\n",
+ tfm, sctx, ta_ctx);
+ pr_debug("key_sent is %d\n", sctx->key_sent);
+
+ /* do we need to send the key */
+ if (sctx->key_sent == 0) {
+ are_we_done_yet = 0;
+ result = sep_crypto_send_key(req); /* prep to send key */
+ if (result != 0) {
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "could not prep key %x\n", result);
+ sep_crypto_release(sctx, ta_ctx, result);
+ return;
+ }
+
+ result = sep_crypto_take_sep(ta_ctx);
+ if (result) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "sep_crypto_take_sep for key send failed\n");
+ sep_crypto_release(sctx, ta_ctx, result);
+ return;
+ }
+
+ /* now we sit and wait up to a fixed time for completion */
+ end_time = jiffies + (WAIT_TIME * HZ);
+ while ((time_before(jiffies, end_time)) &&
+ (are_we_done_yet == 0))
+ schedule();
+
+ /* Done waiting; still not done yet? */
+ if (are_we_done_yet == 0) {
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "Send key job never got done\n");
+ sep_crypto_release(sctx, ta_ctx, -EINVAL);
+ return;
+ }
+
+ /* Set the key sent variable so this can be skipped later */
+ sctx->key_sent = 1;
+ }
+
+ /* Key sent (or maybe not if we did not have to), now send block */
+ are_we_done_yet = 0;
+
+ result = sep_crypto_block_data(req);
+
+ if (result != 0) {
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "could prep not send block %x\n", result);
+ sep_crypto_release(sctx, ta_ctx, result);
+ return;
+ }
+
+ result = sep_crypto_take_sep(ta_ctx);
+ if (result) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "sep_crypto_take_sep for block send failed\n");
+ sep_crypto_release(sctx, ta_ctx, result);
+ return;
+ }
+
+ /* now we sit and wait up to a fixed time for completion */
+ end_time = jiffies + (WAIT_TIME * HZ);
+ while ((time_before(jiffies, end_time)) && (are_we_done_yet == 0))
+ schedule();
+
+ /* Done waiting; still not done yet? */
+ if (are_we_done_yet == 0) {
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "Send block job never got done\n");
+ sep_crypto_release(sctx, ta_ctx, -EINVAL);
+ return;
+ }
+
+ /* That's it; entire thing done, get out of queue */
+
+ pr_debug("crypto_block leaving\n");
+ pr_debug("tfm is %p sctx is %p ta_ctx is %p\n", tfm, sctx, ta_ctx);
+}
+
+/**
+ * Post operation (after interrupt) for crypto block
+ */
+static u32 crypto_post_op(struct sep_device *sep)
+{
+ /* HERE */
+ u32 u32_error;
+ u32 msg_offset;
+
+ ssize_t copy_result;
+ static char small_buf[100];
+
+ struct ablkcipher_request *req;
+ struct this_task_ctx *ta_ctx;
+ struct sep_system_ctx *sctx;
+ struct crypto_ablkcipher *tfm;
+
+ struct sep_des_internal_context *des_internal;
+ struct sep_aes_internal_context *aes_internal;
+
+ if (!sep->current_cypher_req)
+ return -EINVAL;
+
+ /* hold req since we need to submit work after clearing sep */
+ req = sep->current_cypher_req;
+
+ ta_ctx = ablkcipher_request_ctx(sep->current_cypher_req);
+ tfm = crypto_ablkcipher_reqtfm(sep->current_cypher_req);
+ sctx = crypto_ablkcipher_ctx(tfm);
+
+ pr_debug("crypto_post op\n");
+ pr_debug("key_sent is %d tfm is %p sctx is %p ta_ctx is %p\n",
+ sctx->key_sent, tfm, sctx, ta_ctx);
+
+ dev_dbg(&ta_ctx->sep_used->pdev->dev, "crypto post_op\n");
+ dev_dbg(&ta_ctx->sep_used->pdev->dev, "crypto post_op message dump\n");
+ crypto_sep_dump_message(ta_ctx->sep_used, ta_ctx->msg);
+
+ /* first bring msg from shared area to local area */
+ memcpy(ta_ctx->msg, sep->shared_addr,
+ SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
+
+ /* Is this the result of performing init (key to SEP */
+ if (sctx->key_sent == 0) {
+
+ /* Did SEP do it okay */
+ u32_error = sep_verify_op(ta_ctx, ta_ctx->init_opcode,
+ &msg_offset);
+ if (u32_error) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "aes init error %x\n", u32_error);
+ sep_crypto_release(sctx, ta_ctx, u32_error);
+ return u32_error;
+ }
+
+ /* Read Context */
+ if (ta_ctx->init_opcode == SEP_DES_INIT_OPCODE) {
+ sep_read_context(ta_ctx, &msg_offset,
+ &sctx->des_private_ctx,
+ sizeof(struct sep_des_private_context));
+
+ sep_dump(ta_ctx->sep_used, "ctx init des",
+ &sctx->des_private_ctx, 40);
+ } else {
+ sep_read_context(ta_ctx, &msg_offset,
+ &sctx->aes_private_ctx,
+ sizeof(struct sep_aes_private_context));
+
+ sep_dump(ta_ctx->sep_used, "ctx init aes",
+ &sctx->aes_private_ctx, 20);
+ }
+
+ sep_dump_ivs(req, "after sending key to sep\n");
+
+ /* key sent went okay; release sep, and set are_we_done_yet */
+ sctx->key_sent = 1;
+ sep_crypto_release(sctx, ta_ctx, -EINPROGRESS);
+
+ } else {
+
+ /**
+ * This is the result of a block request
+ */
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "crypto_post_op block response\n");
+
+ u32_error = sep_verify_op(ta_ctx, ta_ctx->block_opcode,
+ &msg_offset);
+
+ if (u32_error) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "sep block error %x\n", u32_error);
+ sep_crypto_release(sctx, ta_ctx, u32_error);
+ return -EINVAL;
+ }
+
+ if (ta_ctx->block_opcode == SEP_DES_BLOCK_OPCODE) {
+
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "post op for DES\n");
+
+ /* special case for 1 block des */
+ if (sep->current_cypher_req->nbytes ==
+ crypto_ablkcipher_blocksize(tfm)) {
+
+ sep_read_msg(ta_ctx, small_buf,
+ crypto_ablkcipher_blocksize(tfm),
+ crypto_ablkcipher_blocksize(tfm) * 2,
+ &msg_offset, 1);
+
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "reading in block des\n");
+
+ copy_result = sg_copy_from_buffer(
+ ta_ctx->dst_sg,
+ sep_sg_nents(ta_ctx->dst_sg),
+ small_buf,
+ crypto_ablkcipher_blocksize(tfm));
+
+ if (copy_result !=
+ crypto_ablkcipher_blocksize(tfm)) {
+
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "des block copy faild\n");
+ sep_crypto_release(sctx, ta_ctx,
+ -ENOMEM);
+ return -ENOMEM;
+ }
+ }
+
+ /* Read Context */
+ sep_read_context(ta_ctx, &msg_offset,
+ &sctx->des_private_ctx,
+ sizeof(struct sep_des_private_context));
+ } else {
+
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "post op for AES\n");
+
+ /* Skip the MAC Output */
+ msg_offset += (sizeof(u32) * 4);
+
+ /* Read Context */
+ sep_read_context(ta_ctx, &msg_offset,
+ &sctx->aes_private_ctx,
+ sizeof(struct sep_aes_private_context));
+ }
+
+ sep_dump_sg(ta_ctx->sep_used,
+ "block sg out", ta_ctx->dst_sg);
+
+ /* Copy to correct sg if this block had oddball pages */
+ if (ta_ctx->dst_sg_hold)
+ sep_copy_sg(ta_ctx->sep_used,
+ ta_ctx->dst_sg,
+ ta_ctx->current_cypher_req->dst,
+ ta_ctx->current_cypher_req->nbytes);
+
+ /**
+ * Copy the iv's back to the walk.iv
+ * This is required for dm_crypt
+ */
+ sep_dump_ivs(req, "got data block from sep\n");
+ if ((ta_ctx->current_request == DES_CBC) &&
+ (ta_ctx->des_opmode == SEP_DES_CBC)) {
+
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "returning result iv to walk on DES\n");
+ des_internal = (struct sep_des_internal_context *)
+ sctx->des_private_ctx.ctx_buf;
+ memcpy(ta_ctx->walk.iv,
+ (void *)des_internal->iv_context,
+ crypto_ablkcipher_ivsize(tfm));
+ } else if ((ta_ctx->current_request == AES_CBC) &&
+ (ta_ctx->aes_opmode == SEP_AES_CBC)) {
+
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "returning result iv to walk on AES\n");
+ aes_internal = (struct sep_aes_internal_context *)
+ sctx->aes_private_ctx.cbuff;
+ memcpy(ta_ctx->walk.iv,
+ (void *)aes_internal->aes_ctx_iv,
+ crypto_ablkcipher_ivsize(tfm));
+ }
+
+ /* finished, release everything */
+ sep_crypto_release(sctx, ta_ctx, 0);
+ }
+ pr_debug("crypto_post_op done\n");
+ pr_debug("key_sent is %d tfm is %p sctx is %p ta_ctx is %p\n",
+ sctx->key_sent, tfm, sctx, ta_ctx);
+
+ return 0;
+}
+
+static u32 hash_init_post_op(struct sep_device *sep)
+{
+ u32 u32_error;
+ u32 msg_offset;
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(sep->current_hash_req);
+ struct this_task_ctx *ta_ctx = ahash_request_ctx(sep->current_hash_req);
+ struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm);
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "hash init post op\n");
+
+ /* first bring msg from shared area to local area */
+ memcpy(ta_ctx->msg, sep->shared_addr,
+ SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
+
+ u32_error = sep_verify_op(ta_ctx, SEP_HASH_INIT_OPCODE,
+ &msg_offset);
+
+ if (u32_error) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev, "hash init error %x\n",
+ u32_error);
+ sep_crypto_release(sctx, ta_ctx, u32_error);
+ return u32_error;
+ }
+
+ /* Read Context */
+ sep_read_context(ta_ctx, &msg_offset,
+ &sctx->hash_private_ctx,
+ sizeof(struct sep_hash_private_context));
+
+ /* Signal to crypto infrastructure and clear out */
+ dev_dbg(&ta_ctx->sep_used->pdev->dev, "hash init post op done\n");
+ sep_crypto_release(sctx, ta_ctx, 0);
+ return 0;
+}
+
+static u32 hash_update_post_op(struct sep_device *sep)
+{
+ u32 u32_error;
+ u32 msg_offset;
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(sep->current_hash_req);
+ struct this_task_ctx *ta_ctx = ahash_request_ctx(sep->current_hash_req);
+ struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm);
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "hash update post op\n");
+
+ /* first bring msg from shared area to local area */
+ memcpy(ta_ctx->msg, sep->shared_addr,
+ SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
+
+ u32_error = sep_verify_op(ta_ctx, SEP_HASH_UPDATE_OPCODE,
+ &msg_offset);
+
+ if (u32_error) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev, "hash init error %x\n",
+ u32_error);
+ sep_crypto_release(sctx, ta_ctx, u32_error);
+ return u32_error;
+ }
+
+ /* Read Context */
+ sep_read_context(ta_ctx, &msg_offset,
+ &sctx->hash_private_ctx,
+ sizeof(struct sep_hash_private_context));
+
+ /**
+ * Following is only for finup; if we just completd the
+ * data portion of finup, we now need to kick off the
+ * finish portion of finup.
+ */
+
+ if (ta_ctx->sep_used->current_hash_stage == HASH_FINUP_DATA) {
+
+ /* first reset stage to HASH_FINUP_FINISH */
+ ta_ctx->sep_used->current_hash_stage = HASH_FINUP_FINISH;
+
+ /* now enqueue the finish operation */
+ spin_lock_irq(&queue_lock);
+ u32_error = crypto_enqueue_request(&sep_queue,
+ &ta_ctx->sep_used->current_hash_req->base);
+ spin_unlock_irq(&queue_lock);
+
+ if ((u32_error != 0) && (u32_error != -EINPROGRESS)) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "spe cypher post op cant queue\n");
+ sep_crypto_release(sctx, ta_ctx, u32_error);
+ return u32_error;
+ }
+
+ /* schedule the data send */
+ u32_error = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+
+ if (u32_error) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "cant submit work sep_crypto_block\n");
+ sep_crypto_release(sctx, ta_ctx, -EINVAL);
+ return -EINVAL;
+ }
+ }
+
+ /* Signal to crypto infrastructure and clear out */
+ dev_dbg(&ta_ctx->sep_used->pdev->dev, "hash update post op done\n");
+ sep_crypto_release(sctx, ta_ctx, 0);
+ return 0;
+}
+
+static u32 hash_final_post_op(struct sep_device *sep)
+{
+ int max_length;
+ u32 u32_error;
+ u32 msg_offset;
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(sep->current_hash_req);
+ struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm);
+ struct this_task_ctx *ta_ctx = ahash_request_ctx(sep->current_hash_req);
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "hash final post op\n");
+
+ /* first bring msg from shared area to local area */
+ memcpy(ta_ctx->msg, sep->shared_addr,
+ SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
+
+ u32_error = sep_verify_op(ta_ctx, SEP_HASH_FINISH_OPCODE,
+ &msg_offset);
+
+ if (u32_error) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev, "hash finish error %x\n",
+ u32_error);
+ sep_crypto_release(sctx, ta_ctx, u32_error);
+ return u32_error;
+ }
+
+ /* Grab the result */
+ if (ta_ctx->current_hash_req->result == NULL) {
+ /* Oops, null buffer; error out here */
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "hash finish null buffer\n");
+ sep_crypto_release(sctx, ta_ctx, (u32)-ENOMEM);
+ return -ENOMEM;
+ }
+
+ max_length = (((SEP_HASH_RESULT_SIZE_WORDS * sizeof(u32)) + 3) /
+ sizeof(u32)) * sizeof(u32);
+
+ sep_read_msg(ta_ctx,
+ ta_ctx->current_hash_req->result,
+ crypto_ahash_digestsize(tfm), max_length,
+ &msg_offset, 0);
+
+ /* Signal to crypto infrastructure and clear out */
+ dev_dbg(&ta_ctx->sep_used->pdev->dev, "hash finish post op done\n");
+ sep_crypto_release(sctx, ta_ctx, 0);
+ return 0;
+}
+
+static u32 hash_digest_post_op(struct sep_device *sep)
+{
+ int max_length;
+ u32 u32_error;
+ u32 msg_offset;
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(sep->current_hash_req);
+ struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm);
+ struct this_task_ctx *ta_ctx = ahash_request_ctx(sep->current_hash_req);
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "hash digest post op\n");
+
+ /* first bring msg from shared area to local area */
+ memcpy(ta_ctx->msg, sep->shared_addr,
+ SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
+
+ u32_error = sep_verify_op(ta_ctx, SEP_HASH_SINGLE_OPCODE,
+ &msg_offset);
+
+ if (u32_error) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "hash digest finish error %x\n", u32_error);
+
+ sep_crypto_release(sctx, ta_ctx, u32_error);
+ return u32_error;
+ }
+
+ /* Grab the result */
+ if (ta_ctx->current_hash_req->result == NULL) {
+ /* Oops, null buffer; error out here */
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "hash digest finish null buffer\n");
+ sep_crypto_release(sctx, ta_ctx, (u32)-ENOMEM);
+ return -ENOMEM;
+ }
+
+ max_length = (((SEP_HASH_RESULT_SIZE_WORDS * sizeof(u32)) + 3) /
+ sizeof(u32)) * sizeof(u32);
+
+ sep_read_msg(ta_ctx,
+ ta_ctx->current_hash_req->result,
+ crypto_ahash_digestsize(tfm), max_length,
+ &msg_offset, 0);
+
+ /* Signal to crypto infrastructure and clear out */
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "hash digest finish post op done\n");
+
+ sep_crypto_release(sctx, ta_ctx, 0);
+ return 0;
+}
+
+/**
+ * The sep_finish function is the function that is schedule (via tasket)
+ * by the interrupt service routine when the SEP sends and interrupt
+ * This is only called by the interrupt handler as a tasklet.
+ */
+static void sep_finish(unsigned long data)
+{
+ struct sep_device *sep_dev;
+ int res;
+
+ res = 0;
+
+ if (data == 0) {
+ pr_debug("sep_finish called with null data\n");
+ return;
+ }
+
+ sep_dev = (struct sep_device *)data;
+ if (sep_dev == NULL) {
+ pr_debug("sep_finish; sep_dev is NULL\n");
+ return;
+ }
+
+ if (sep_dev->in_kernel == (u32)0) {
+ dev_warn(&sep_dev->pdev->dev,
+ "sep_finish; not in kernel operation\n");
+ return;
+ }
+
+ /* Did we really do a sep command prior to this? */
+ if (0 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
+ &sep_dev->ta_ctx->call_status.status)) {
+
+ dev_warn(&sep_dev->pdev->dev, "[PID%d] sendmsg not called\n",
+ current->pid);
+ return;
+ }
+
+ if (sep_dev->send_ct != sep_dev->reply_ct) {
+ dev_warn(&sep_dev->pdev->dev,
+ "[PID%d] poll; no message came back\n",
+ current->pid);
+ return;
+ }
+
+ /* Check for error (In case time ran out) */
+ if ((res != 0x0) && (res != 0x8)) {
+ dev_warn(&sep_dev->pdev->dev,
+ "[PID%d] poll; poll error GPR3 is %x\n",
+ current->pid, res);
+ return;
+ }
+
+ /* What kind of interrupt from sep was this? */
+ res = sep_read_reg(sep_dev, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
+
+ dev_dbg(&sep_dev->pdev->dev, "[PID%d] GPR2 at crypto finish is %x\n",
+ current->pid, res);
+
+ /* Print request? */
+ if ((res >> 30) & 0x1) {
+ dev_dbg(&sep_dev->pdev->dev, "[PID%d] sep print req\n",
+ current->pid);
+ dev_dbg(&sep_dev->pdev->dev, "[PID%d] contents: %s\n",
+ current->pid,
+ (char *)(sep_dev->shared_addr +
+ SEP_DRIVER_PRINTF_OFFSET_IN_BYTES));
+ return;
+ }
+
+ /* Request for daemon (not currently in POR)? */
+ if (res >> 31) {
+ dev_dbg(&sep_dev->pdev->dev,
+ "[PID%d] sep request; ignoring\n",
+ current->pid);
+ return;
+ }
+
+ /* If we got here, then we have a replay to a sep command */
+
+ dev_dbg(&sep_dev->pdev->dev,
+ "[PID%d] sep reply to command; processing request: %x\n",
+ current->pid, sep_dev->current_request);
+
+ switch (sep_dev->current_request) {
+ case AES_CBC:
+ case AES_ECB:
+ case DES_CBC:
+ case DES_ECB:
+ res = crypto_post_op(sep_dev);
+ break;
+ case SHA1:
+ case MD5:
+ case SHA224:
+ case SHA256:
+ switch (sep_dev->current_hash_stage) {
+ case HASH_INIT:
+ res = hash_init_post_op(sep_dev);
+ break;
+ case HASH_UPDATE:
+ case HASH_FINUP_DATA:
+ res = hash_update_post_op(sep_dev);
+ break;
+ case HASH_FINUP_FINISH:
+ case HASH_FINISH:
+ res = hash_final_post_op(sep_dev);
+ break;
+ case HASH_DIGEST:
+ res = hash_digest_post_op(sep_dev);
+ break;
+ default:
+ pr_debug("sep - invalid stage for hash finish\n");
+ }
+ break;
+ default:
+ pr_debug("sep - invalid request for finish\n");
+ }
+
+ if (res)
+ pr_debug("sep - finish returned error %x\n", res);
+}
+
+static int sep_hash_cra_init(struct crypto_tfm *tfm)
+ {
+ const char *alg_name = crypto_tfm_alg_name(tfm);
+
+ pr_debug("sep_hash_cra_init name is %s\n", alg_name);
+
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct this_task_ctx));
+ return 0;
+ }
+
+static void sep_hash_cra_exit(struct crypto_tfm *tfm)
+{
+ pr_debug("sep_hash_cra_exit\n");
+}
+
+static void sep_hash_init(void *data)
+{
+ u32 msg_offset;
+ int result;
+ struct ahash_request *req;
+ struct crypto_ahash *tfm;
+ struct this_task_ctx *ta_ctx;
+ struct sep_system_ctx *sctx;
+ unsigned long end_time;
+ int are_we_done_yet;
+
+ req = (struct ahash_request *)data;
+ tfm = crypto_ahash_reqtfm(req);
+ sctx = crypto_ahash_ctx(tfm);
+ ta_ctx = ahash_request_ctx(req);
+ ta_ctx->sep_used = sep_dev;
+
+ ta_ctx->are_we_done_yet = &are_we_done_yet;
+
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "sep_hash_init\n");
+ ta_ctx->current_hash_stage = HASH_INIT;
+ /* opcode and mode */
+ sep_make_header(ta_ctx, &msg_offset, SEP_HASH_INIT_OPCODE);
+ sep_write_msg(ta_ctx, &ta_ctx->hash_opmode,
+ sizeof(u32), sizeof(u32), &msg_offset, 0);
+ sep_end_msg(ta_ctx, msg_offset);
+
+ are_we_done_yet = 0;
+ result = sep_crypto_take_sep(ta_ctx);
+ if (result) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "sep_hash_init take sep failed\n");
+ sep_crypto_release(sctx, ta_ctx, -EINVAL);
+ }
+
+ /* now we sit and wait up to a fixed time for completion */
+ end_time = jiffies + (WAIT_TIME * HZ);
+ while ((time_before(jiffies, end_time)) && (are_we_done_yet == 0))
+ schedule();
+
+ /* Done waiting; still not done yet? */
+ if (are_we_done_yet == 0) {
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "hash init never got done\n");
+ sep_crypto_release(sctx, ta_ctx, -EINVAL);
+ return;
+ }
+
+}
+
+static void sep_hash_update(void *data)
+{
+ int int_error;
+ u32 msg_offset;
+ u32 len;
+ struct sep_hash_internal_context *int_ctx;
+ u32 block_size;
+ u32 head_len;
+ u32 tail_len;
+ int are_we_done_yet;
+
+ static u32 msg[10];
+ static char small_buf[100];
+ void *src_ptr;
+ struct scatterlist *new_sg;
+ ssize_t copy_result;
+ struct ahash_request *req;
+ struct crypto_ahash *tfm;
+ struct this_task_ctx *ta_ctx;
+ struct sep_system_ctx *sctx;
+ unsigned long end_time;
+
+ req = (struct ahash_request *)data;
+ tfm = crypto_ahash_reqtfm(req);
+ sctx = crypto_ahash_ctx(tfm);
+ ta_ctx = ahash_request_ctx(req);
+ ta_ctx->sep_used = sep_dev;
+
+ ta_ctx->are_we_done_yet = &are_we_done_yet;
+
+ /* length for queue status */
+ ta_ctx->nbytes = req->nbytes;
+
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "sep_hash_update\n");
+ ta_ctx->current_hash_stage = HASH_UPDATE;
+ len = req->nbytes;
+
+ block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
+ tail_len = req->nbytes % block_size;
+ dev_dbg(&ta_ctx->sep_used->pdev->dev, "length is %x\n", len);
+ dev_dbg(&ta_ctx->sep_used->pdev->dev, "block_size is %x\n", block_size);
+ dev_dbg(&ta_ctx->sep_used->pdev->dev, "tail len is %x\n", tail_len);
+
+ /* Compute header/tail sizes */
+ int_ctx = (struct sep_hash_internal_context *)&sctx->
+ hash_private_ctx.internal_context;
+ head_len = (block_size - int_ctx->prev_update_bytes) % block_size;
+ tail_len = (req->nbytes - head_len) % block_size;
+
+ /* Make sure all pages are even block */
+ int_error = sep_oddball_pages(ta_ctx->sep_used, req->src,
+ req->nbytes,
+ block_size, &new_sg, 1);
+
+ if (int_error < 0) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "oddball pages error in crash update\n");
+ sep_crypto_release(sctx, ta_ctx, -ENOMEM);
+ return;
+ } else if (int_error == 1) {
+ ta_ctx->src_sg = new_sg;
+ ta_ctx->src_sg_hold = new_sg;
+ } else {
+ ta_ctx->src_sg = req->src;
+ ta_ctx->src_sg_hold = NULL;
+ }
+
+ src_ptr = sg_virt(ta_ctx->src_sg);
+
+ if ((!req->nbytes) || (!ta_ctx->src_sg)) {
+ /* null data */
+ src_ptr = NULL;
+ }
+
+ sep_dump_sg(ta_ctx->sep_used, "hash block sg in", ta_ctx->src_sg);
+
+ ta_ctx->dcb_input_data.app_in_address = src_ptr;
+ ta_ctx->dcb_input_data.data_in_size =
+ req->nbytes - (head_len + tail_len);
+ ta_ctx->dcb_input_data.app_out_address = NULL;
+ ta_ctx->dcb_input_data.block_size = block_size;
+ ta_ctx->dcb_input_data.tail_block_size = 0;
+ ta_ctx->dcb_input_data.is_applet = 0;
+ ta_ctx->dcb_input_data.src_sg = ta_ctx->src_sg;
+ ta_ctx->dcb_input_data.dst_sg = NULL;
+
+ int_error = sep_create_dcb_dmatables_context_kernel(
+ ta_ctx->sep_used,
+ &ta_ctx->dcb_region,
+ &ta_ctx->dmatables_region,
+ &ta_ctx->dma_ctx,
+ &ta_ctx->dcb_input_data,
+ 1);
+ if (int_error) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "hash update dma table create failed\n");
+ sep_crypto_release(sctx, ta_ctx, -EINVAL);
+ return;
+ }
+
+ /* Construct message to SEP */
+ sep_make_header(ta_ctx, &msg_offset, SEP_HASH_UPDATE_OPCODE);
+
+ msg[0] = (u32)0;
+ msg[1] = (u32)0;
+ msg[2] = (u32)0;
+
+ sep_write_msg(ta_ctx, msg, sizeof(u32) * 3, sizeof(u32) * 3,
+ &msg_offset, 0);
+
+ /* Handle remainders */
+
+ /* Head */
+ sep_write_msg(ta_ctx, &head_len, sizeof(u32),
+ sizeof(u32), &msg_offset, 0);
+
+ if (head_len) {
+ copy_result = sg_copy_to_buffer(
+ req->src,
+ sep_sg_nents(ta_ctx->src_sg),
+ small_buf, head_len);
+
+ if (copy_result != head_len) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "sg head copy failure in hash block\n");
+ sep_crypto_release(sctx, ta_ctx, -ENOMEM);
+ return;
+ }
+
+ sep_write_msg(ta_ctx, small_buf, head_len,
+ sizeof(u32) * 32, &msg_offset, 1);
+ } else {
+ msg_offset += sizeof(u32) * 32;
+ }
+
+ /* Tail */
+ sep_write_msg(ta_ctx, &tail_len, sizeof(u32),
+ sizeof(u32), &msg_offset, 0);
+
+ if (tail_len) {
+ copy_result = sep_copy_offset_sg(
+ ta_ctx->sep_used,
+ ta_ctx->src_sg,
+ req->nbytes - tail_len,
+ small_buf, tail_len);
+
+ if (copy_result != tail_len) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "sg tail copy failure in hash block\n");
+ sep_crypto_release(sctx, ta_ctx, -ENOMEM);
+ return;
+ }
+
+ sep_write_msg(ta_ctx, small_buf, tail_len,
+ sizeof(u32) * 32, &msg_offset, 1);
+ } else {
+ msg_offset += sizeof(u32) * 32;
+ }
+
+ /* Context */
+ sep_write_context(ta_ctx, &msg_offset, &sctx->hash_private_ctx,
+ sizeof(struct sep_hash_private_context));
+
+ sep_end_msg(ta_ctx, msg_offset);
+ are_we_done_yet = 0;
+ int_error = sep_crypto_take_sep(ta_ctx);
+ if (int_error) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "sep_hash_update take sep failed\n");
+ sep_crypto_release(sctx, ta_ctx, -EINVAL);
+ }
+
+ /* now we sit and wait up to a fixed time for completion */
+ end_time = jiffies + (WAIT_TIME * HZ);
+ while ((time_before(jiffies, end_time)) && (are_we_done_yet == 0))
+ schedule();
+
+ /* Done waiting; still not done yet? */
+ if (are_we_done_yet == 0) {
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "hash update never got done\n");
+ sep_crypto_release(sctx, ta_ctx, -EINVAL);
+ return;
+ }
+
+}
+
+static void sep_hash_final(void *data)
+{
+ u32 msg_offset;
+ struct ahash_request *req;
+ struct crypto_ahash *tfm;
+ struct this_task_ctx *ta_ctx;
+ struct sep_system_ctx *sctx;
+ int result;
+ unsigned long end_time;
+ int are_we_done_yet;
+
+ req = (struct ahash_request *)data;
+ tfm = crypto_ahash_reqtfm(req);
+ sctx = crypto_ahash_ctx(tfm);
+ ta_ctx = ahash_request_ctx(req);
+ ta_ctx->sep_used = sep_dev;
+
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "sep_hash_final\n");
+ ta_ctx->current_hash_stage = HASH_FINISH;
+
+ ta_ctx->are_we_done_yet = &are_we_done_yet;
+
+ /* opcode and mode */
+ sep_make_header(ta_ctx, &msg_offset, SEP_HASH_FINISH_OPCODE);
+
+ /* Context */
+ sep_write_context(ta_ctx, &msg_offset, &sctx->hash_private_ctx,
+ sizeof(struct sep_hash_private_context));
+
+ sep_end_msg(ta_ctx, msg_offset);
+ are_we_done_yet = 0;
+ result = sep_crypto_take_sep(ta_ctx);
+ if (result) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "sep_hash_final take sep failed\n");
+ sep_crypto_release(sctx, ta_ctx, -EINVAL);
+ }
+
+ /* now we sit and wait up to a fixed time for completion */
+ end_time = jiffies + (WAIT_TIME * HZ);
+ while ((time_before(jiffies, end_time)) && (are_we_done_yet == 0))
+ schedule();
+
+ /* Done waiting; still not done yet? */
+ if (are_we_done_yet == 0) {
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "hash final job never got done\n");
+ sep_crypto_release(sctx, ta_ctx, -EINVAL);
+ return;
+ }
+
+}
+
+static void sep_hash_digest(void *data)
+{
+ int int_error;
+ u32 msg_offset;
+ u32 block_size;
+ u32 msg[10];
+ size_t copy_result;
+ int result;
+ int are_we_done_yet;
+ u32 tail_len;
+ static char small_buf[100];
+ struct scatterlist *new_sg;
+ void *src_ptr;
+
+ struct ahash_request *req;
+ struct crypto_ahash *tfm;
+ struct this_task_ctx *ta_ctx;
+ struct sep_system_ctx *sctx;
+ unsigned long end_time;
+
+ req = (struct ahash_request *)data;
+ tfm = crypto_ahash_reqtfm(req);
+ sctx = crypto_ahash_ctx(tfm);
+ ta_ctx = ahash_request_ctx(req);
+ ta_ctx->sep_used = sep_dev;
+
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "sep_hash_digest\n");
+ ta_ctx->current_hash_stage = HASH_DIGEST;
+
+ ta_ctx->are_we_done_yet = &are_we_done_yet;
+
+ /* length for queue status */
+ ta_ctx->nbytes = req->nbytes;
+
+ block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
+ tail_len = req->nbytes % block_size;
+ dev_dbg(&ta_ctx->sep_used->pdev->dev, "length is %x\n", req->nbytes);
+ dev_dbg(&ta_ctx->sep_used->pdev->dev, "block_size is %x\n", block_size);
+ dev_dbg(&ta_ctx->sep_used->pdev->dev, "tail len is %x\n", tail_len);
+
+ /* Make sure all pages are even block */
+ int_error = sep_oddball_pages(ta_ctx->sep_used, req->src,
+ req->nbytes,
+ block_size, &new_sg, 1);
+
+ if (int_error < 0) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "oddball pages error in crash update\n");
+ sep_crypto_release(sctx, ta_ctx, -ENOMEM);
+ return;
+ } else if (int_error == 1) {
+ ta_ctx->src_sg = new_sg;
+ ta_ctx->src_sg_hold = new_sg;
+ } else {
+ ta_ctx->src_sg = req->src;
+ ta_ctx->src_sg_hold = NULL;
+ }
+
+ src_ptr = sg_virt(ta_ctx->src_sg);
+
+ if ((!req->nbytes) || (!ta_ctx->src_sg)) {
+ /* null data */
+ src_ptr = NULL;
+ }
+
+ sep_dump_sg(ta_ctx->sep_used, "hash block sg in", ta_ctx->src_sg);
+
+ ta_ctx->dcb_input_data.app_in_address = src_ptr;
+ ta_ctx->dcb_input_data.data_in_size = req->nbytes - tail_len;
+ ta_ctx->dcb_input_data.app_out_address = NULL;
+ ta_ctx->dcb_input_data.block_size = block_size;
+ ta_ctx->dcb_input_data.tail_block_size = 0;
+ ta_ctx->dcb_input_data.is_applet = 0;
+ ta_ctx->dcb_input_data.src_sg = ta_ctx->src_sg;
+ ta_ctx->dcb_input_data.dst_sg = NULL;
+
+ int_error = sep_create_dcb_dmatables_context_kernel(
+ ta_ctx->sep_used,
+ &ta_ctx->dcb_region,
+ &ta_ctx->dmatables_region,
+ &ta_ctx->dma_ctx,
+ &ta_ctx->dcb_input_data,
+ 1);
+ if (int_error) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "hash update dma table create failed\n");
+ sep_crypto_release(sctx, ta_ctx, -EINVAL);
+ return;
+ }
+
+ /* Construct message to SEP */
+ sep_make_header(ta_ctx, &msg_offset, SEP_HASH_SINGLE_OPCODE);
+ sep_write_msg(ta_ctx, &ta_ctx->hash_opmode,
+ sizeof(u32), sizeof(u32), &msg_offset, 0);
+
+ msg[0] = (u32)0;
+ msg[1] = (u32)0;
+ msg[2] = (u32)0;
+
+ sep_write_msg(ta_ctx, msg, sizeof(u32) * 3, sizeof(u32) * 3,
+ &msg_offset, 0);
+
+ /* Tail */
+ sep_write_msg(ta_ctx, &tail_len, sizeof(u32),
+ sizeof(u32), &msg_offset, 0);
+
+ if (tail_len) {
+ copy_result = sep_copy_offset_sg(
+ ta_ctx->sep_used,
+ ta_ctx->src_sg,
+ req->nbytes - tail_len,
+ small_buf, tail_len);
+
+ if (copy_result != tail_len) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "sg tail copy failure in hash block\n");
+ sep_crypto_release(sctx, ta_ctx, -ENOMEM);
+ return;
+ }
+
+ sep_write_msg(ta_ctx, small_buf, tail_len,
+ sizeof(u32) * 32, &msg_offset, 1);
+ } else {
+ msg_offset += sizeof(u32) * 32;
+ }
+
+ sep_end_msg(ta_ctx, msg_offset);
+
+ are_we_done_yet = 0;
+ result = sep_crypto_take_sep(ta_ctx);
+ if (result) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "sep_hash_digest take sep failed\n");
+ sep_crypto_release(sctx, ta_ctx, -EINVAL);
+ }
+
+ /* now we sit and wait up to a fixed time for completion */
+ end_time = jiffies + (WAIT_TIME * HZ);
+ while ((time_before(jiffies, end_time)) && (are_we_done_yet == 0))
+ schedule();
+
+ /* Done waiting; still not done yet? */
+ if (are_we_done_yet == 0) {
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "hash digest job never got done\n");
+ sep_crypto_release(sctx, ta_ctx, -EINVAL);
+ return;
+ }
+
+}
+
+/**
+ * This is what is called by each of the API's provided
+ * in the kernel crypto descriptors. It is run in a process
+ * context using the kernel workqueues. Therefore it can
+ * be put to sleep.
+ */
+static void sep_dequeuer(void *data)
+{
+ struct crypto_queue *this_queue;
+ struct crypto_async_request *async_req;
+ struct crypto_async_request *backlog;
+ struct ablkcipher_request *cypher_req;
+ struct ahash_request *hash_req;
+ struct sep_system_ctx *sctx;
+ struct crypto_ahash *hash_tfm;
+ struct this_task_ctx *ta_ctx;
+
+
+ this_queue = (struct crypto_queue *)data;
+
+ spin_lock_irq(&queue_lock);
+ backlog = crypto_get_backlog(this_queue);
+ async_req = crypto_dequeue_request(this_queue);
+ spin_unlock_irq(&queue_lock);
+
+ if (!async_req) {
+ pr_debug("sep crypto queue is empty\n");
+ return;
+ }
+
+ if (backlog) {
+ pr_debug("sep crypto backlog set\n");
+ if (backlog->complete)
+ backlog->complete(backlog, -EINPROGRESS);
+ backlog = NULL;
+ }
+
+ if (!async_req->tfm) {
+ pr_debug("sep crypto queue null tfm\n");
+ return;
+ }
+
+ if (!async_req->tfm->__crt_alg) {
+ pr_debug("sep crypto queue null __crt_alg\n");
+ return;
+ }
+
+ if (!async_req->tfm->__crt_alg->cra_type) {
+ pr_debug("sep crypto queue null cra_type\n");
+ return;
+ }
+
+ /* we have stuff in the queue */
+ if (async_req->tfm->__crt_alg->cra_type !=
+ &crypto_ahash_type) {
+ /* This is for a cypher */
+ pr_debug("sep crypto queue doing cipher\n");
+ cypher_req = container_of(async_req,
+ struct ablkcipher_request,
+ base);
+ if (!cypher_req) {
+ pr_debug("sep crypto queue null cypher_req\n");
+ return;
+ }
+
+ sep_crypto_block((void *)cypher_req);
+ return;
+ } else {
+ /* This is a hash */
+ pr_debug("sep crypto queue doing hash\n");
+ /**
+ * This is a bit more complex than cipher; we
+ * need to figure out what type of operation
+ */
+ hash_req = ahash_request_cast(async_req);
+ if (!hash_req) {
+ pr_debug("sep crypto queue null hash_req\n");
+ return;
+ }
+
+ hash_tfm = crypto_ahash_reqtfm(hash_req);
+ if (!hash_tfm) {
+ pr_debug("sep crypto queue null hash_tfm\n");
+ return;
+ }
+
+
+ sctx = crypto_ahash_ctx(hash_tfm);
+ if (!sctx) {
+ pr_debug("sep crypto queue null sctx\n");
+ return;
+ }
+
+ ta_ctx = ahash_request_ctx(hash_req);
+
+ if (ta_ctx->current_hash_stage == HASH_INIT) {
+ pr_debug("sep crypto queue hash init\n");
+ sep_hash_init((void *)hash_req);
+ return;
+ } else if (ta_ctx->current_hash_stage == HASH_UPDATE) {
+ pr_debug("sep crypto queue hash update\n");
+ sep_hash_update((void *)hash_req);
+ return;
+ } else if (ta_ctx->current_hash_stage == HASH_FINISH) {
+ pr_debug("sep crypto queue hash final\n");
+ sep_hash_final((void *)hash_req);
+ return;
+ } else if (ta_ctx->current_hash_stage == HASH_DIGEST) {
+ pr_debug("sep crypto queue hash digest\n");
+ sep_hash_digest((void *)hash_req);
+ return;
+ } else if (ta_ctx->current_hash_stage == HASH_FINUP_DATA) {
+ pr_debug("sep crypto queue hash digest\n");
+ sep_hash_update((void *)hash_req);
+ return;
+ } else if (ta_ctx->current_hash_stage == HASH_FINUP_FINISH) {
+ pr_debug("sep crypto queue hash digest\n");
+ sep_hash_final((void *)hash_req);
+ return;
+ } else {
+ pr_debug("sep crypto queue hash oops nothing\n");
+ return;
+ }
+ }
+}
+
+static int sep_sha1_init(struct ahash_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
+
+ pr_debug("sep - doing sha1 init\n");
+
+ /* Clear out task context */
+ memset(ta_ctx, 0, sizeof(struct this_task_ctx));
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = SHA1;
+ ta_ctx->current_hash_req = req;
+ ta_ctx->current_cypher_req = NULL;
+ ta_ctx->hash_opmode = SEP_HASH_SHA1;
+ ta_ctx->current_hash_stage = HASH_INIT;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_sha1_update(struct ahash_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
+
+ pr_debug("sep - doing sha1 update\n");
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = SHA1;
+ ta_ctx->current_hash_req = req;
+ ta_ctx->current_cypher_req = NULL;
+ ta_ctx->hash_opmode = SEP_HASH_SHA1;
+ ta_ctx->current_hash_stage = HASH_UPDATE;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_sha1_final(struct ahash_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
+ pr_debug("sep - doing sha1 final\n");
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = SHA1;
+ ta_ctx->current_hash_req = req;
+ ta_ctx->current_cypher_req = NULL;
+ ta_ctx->hash_opmode = SEP_HASH_SHA1;
+ ta_ctx->current_hash_stage = HASH_FINISH;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_sha1_digest(struct ahash_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
+ pr_debug("sep - doing sha1 digest\n");
+
+ /* Clear out task context */
+ memset(ta_ctx, 0, sizeof(struct this_task_ctx));
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = SHA1;
+ ta_ctx->current_hash_req = req;
+ ta_ctx->current_cypher_req = NULL;
+ ta_ctx->hash_opmode = SEP_HASH_SHA1;
+ ta_ctx->current_hash_stage = HASH_DIGEST;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_sha1_finup(struct ahash_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
+ pr_debug("sep - doing sha1 finup\n");
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = SHA1;
+ ta_ctx->current_hash_req = req;
+ ta_ctx->current_cypher_req = NULL;
+ ta_ctx->hash_opmode = SEP_HASH_SHA1;
+ ta_ctx->current_hash_stage = HASH_FINUP_DATA;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_md5_init(struct ahash_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
+ pr_debug("sep - doing md5 init\n");
+
+ /* Clear out task context */
+ memset(ta_ctx, 0, sizeof(struct this_task_ctx));
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = MD5;
+ ta_ctx->current_hash_req = req;
+ ta_ctx->current_cypher_req = NULL;
+ ta_ctx->hash_opmode = SEP_HASH_MD5;
+ ta_ctx->current_hash_stage = HASH_INIT;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_md5_update(struct ahash_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
+ pr_debug("sep - doing md5 update\n");
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = MD5;
+ ta_ctx->current_hash_req = req;
+ ta_ctx->current_cypher_req = NULL;
+ ta_ctx->hash_opmode = SEP_HASH_MD5;
+ ta_ctx->current_hash_stage = HASH_UPDATE;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_md5_final(struct ahash_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
+ pr_debug("sep - doing md5 final\n");
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = MD5;
+ ta_ctx->current_hash_req = req;
+ ta_ctx->current_cypher_req = NULL;
+ ta_ctx->hash_opmode = SEP_HASH_MD5;
+ ta_ctx->current_hash_stage = HASH_FINISH;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_md5_digest(struct ahash_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
+
+ pr_debug("sep - doing md5 digest\n");
+
+ /* Clear out task context */
+ memset(ta_ctx, 0, sizeof(struct this_task_ctx));
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = MD5;
+ ta_ctx->current_hash_req = req;
+ ta_ctx->current_cypher_req = NULL;
+ ta_ctx->hash_opmode = SEP_HASH_MD5;
+ ta_ctx->current_hash_stage = HASH_DIGEST;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_md5_finup(struct ahash_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
+
+ pr_debug("sep - doing md5 finup\n");
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = MD5;
+ ta_ctx->current_hash_req = req;
+ ta_ctx->current_cypher_req = NULL;
+ ta_ctx->hash_opmode = SEP_HASH_MD5;
+ ta_ctx->current_hash_stage = HASH_FINUP_DATA;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_sha224_init(struct ahash_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
+ pr_debug("sep - doing sha224 init\n");
+
+ /* Clear out task context */
+ memset(ta_ctx, 0, sizeof(struct this_task_ctx));
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = SHA224;
+ ta_ctx->current_hash_req = req;
+ ta_ctx->current_cypher_req = NULL;
+ ta_ctx->hash_opmode = SEP_HASH_SHA224;
+ ta_ctx->current_hash_stage = HASH_INIT;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_sha224_update(struct ahash_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
+ pr_debug("sep - doing sha224 update\n");
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = SHA224;
+ ta_ctx->current_hash_req = req;
+ ta_ctx->current_cypher_req = NULL;
+ ta_ctx->hash_opmode = SEP_HASH_SHA224;
+ ta_ctx->current_hash_stage = HASH_UPDATE;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_sha224_final(struct ahash_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
+ pr_debug("sep - doing sha224 final\n");
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = SHA224;
+ ta_ctx->current_hash_req = req;
+ ta_ctx->current_cypher_req = NULL;
+ ta_ctx->hash_opmode = SEP_HASH_SHA224;
+ ta_ctx->current_hash_stage = HASH_FINISH;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_sha224_digest(struct ahash_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
+
+ pr_debug("sep - doing sha224 digest\n");
+
+ /* Clear out task context */
+ memset(ta_ctx, 0, sizeof(struct this_task_ctx));
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = SHA224;
+ ta_ctx->current_hash_req = req;
+ ta_ctx->current_cypher_req = NULL;
+ ta_ctx->hash_opmode = SEP_HASH_SHA224;
+ ta_ctx->current_hash_stage = HASH_DIGEST;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_sha224_finup(struct ahash_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
+
+ pr_debug("sep - doing sha224 finup\n");
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = SHA224;
+ ta_ctx->current_hash_req = req;
+ ta_ctx->current_cypher_req = NULL;
+ ta_ctx->hash_opmode = SEP_HASH_SHA224;
+ ta_ctx->current_hash_stage = HASH_FINUP_DATA;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_sha256_init(struct ahash_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
+ pr_debug("sep - doing sha256 init\n");
+
+ /* Clear out task context */
+ memset(ta_ctx, 0, sizeof(struct this_task_ctx));
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = SHA256;
+ ta_ctx->current_hash_req = req;
+ ta_ctx->current_cypher_req = NULL;
+ ta_ctx->hash_opmode = SEP_HASH_SHA256;
+ ta_ctx->current_hash_stage = HASH_INIT;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_sha256_update(struct ahash_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
+ pr_debug("sep - doing sha256 update\n");
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = SHA256;
+ ta_ctx->current_hash_req = req;
+ ta_ctx->current_cypher_req = NULL;
+ ta_ctx->hash_opmode = SEP_HASH_SHA256;
+ ta_ctx->current_hash_stage = HASH_UPDATE;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_sha256_final(struct ahash_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
+ pr_debug("sep - doing sha256 final\n");
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = SHA256;
+ ta_ctx->current_hash_req = req;
+ ta_ctx->current_cypher_req = NULL;
+ ta_ctx->hash_opmode = SEP_HASH_SHA256;
+ ta_ctx->current_hash_stage = HASH_FINISH;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_sha256_digest(struct ahash_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
+
+ pr_debug("sep - doing sha256 digest\n");
+
+ /* Clear out task context */
+ memset(ta_ctx, 0, sizeof(struct this_task_ctx));
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = SHA256;
+ ta_ctx->current_hash_req = req;
+ ta_ctx->current_cypher_req = NULL;
+ ta_ctx->hash_opmode = SEP_HASH_SHA256;
+ ta_ctx->current_hash_stage = HASH_DIGEST;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_sha256_finup(struct ahash_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
+
+ pr_debug("sep - doing sha256 finup\n");
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = SHA256;
+ ta_ctx->current_hash_req = req;
+ ta_ctx->current_cypher_req = NULL;
+ ta_ctx->hash_opmode = SEP_HASH_SHA256;
+ ta_ctx->current_hash_stage = HASH_FINUP_DATA;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_crypto_init(struct crypto_tfm *tfm)
+{
+ const char *alg_name = crypto_tfm_alg_name(tfm);
+
+ if (alg_name == NULL)
+ pr_debug("sep_crypto_init alg is NULL\n");
+ else
+ pr_debug("sep_crypto_init alg is %s\n", alg_name);
+
+ tfm->crt_ablkcipher.reqsize = sizeof(struct this_task_ctx);
+ return 0;
+}
+
+static void sep_crypto_exit(struct crypto_tfm *tfm)
+{
+ pr_debug("sep_crypto_exit\n");
+}
+
+static int sep_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct sep_system_ctx *sctx = crypto_ablkcipher_ctx(tfm);
+
+ pr_debug("sep aes setkey\n");
+
+ pr_debug("tfm is %p sctx is %p\n", tfm, sctx);
+ switch (keylen) {
+ case SEP_AES_KEY_128_SIZE:
+ sctx->aes_key_size = AES_128;
+ break;
+ case SEP_AES_KEY_192_SIZE:
+ sctx->aes_key_size = AES_192;
+ break;
+ case SEP_AES_KEY_256_SIZE:
+ sctx->aes_key_size = AES_256;
+ break;
+ case SEP_AES_KEY_512_SIZE:
+ sctx->aes_key_size = AES_512;
+ break;
+ default:
+ pr_debug("invalid sep aes key size %x\n",
+ keylen);
+ return -EINVAL;
+ }
+
+ memset(&sctx->key.aes, 0, sizeof(u32) *
+ SEP_AES_MAX_KEY_SIZE_WORDS);
+ memcpy(&sctx->key.aes, key, keylen);
+ sctx->keylen = keylen;
+ /* Indicate to encrypt/decrypt function to send key to SEP */
+ sctx->key_sent = 0;
+
+ return 0;
+}
+
+static int sep_aes_ecb_encrypt(struct ablkcipher_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
+
+ pr_debug("sep - doing aes ecb encrypt\n");
+
+ /* Clear out task context */
+ memset(ta_ctx, 0, sizeof(struct this_task_ctx));
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = AES_ECB;
+ ta_ctx->current_hash_req = NULL;
+ ta_ctx->current_cypher_req = req;
+ ta_ctx->aes_encmode = SEP_AES_ENCRYPT;
+ ta_ctx->aes_opmode = SEP_AES_ECB;
+ ta_ctx->init_opcode = SEP_AES_INIT_OPCODE;
+ ta_ctx->block_opcode = SEP_AES_BLOCK_OPCODE;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_aes_ecb_decrypt(struct ablkcipher_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
+
+ pr_debug("sep - doing aes ecb decrypt\n");
+
+ /* Clear out task context */
+ memset(ta_ctx, 0, sizeof(struct this_task_ctx));
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = AES_ECB;
+ ta_ctx->current_hash_req = NULL;
+ ta_ctx->current_cypher_req = req;
+ ta_ctx->aes_encmode = SEP_AES_DECRYPT;
+ ta_ctx->aes_opmode = SEP_AES_ECB;
+ ta_ctx->init_opcode = SEP_AES_INIT_OPCODE;
+ ta_ctx->block_opcode = SEP_AES_BLOCK_OPCODE;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_aes_cbc_encrypt(struct ablkcipher_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
+ struct sep_system_ctx *sctx = crypto_ablkcipher_ctx(
+ crypto_ablkcipher_reqtfm(req));
+
+ pr_debug("sep - doing aes cbc encrypt\n");
+
+ /* Clear out task context */
+ memset(ta_ctx, 0, sizeof(struct this_task_ctx));
+
+ pr_debug("tfm is %p sctx is %p and ta_ctx is %p\n",
+ crypto_ablkcipher_reqtfm(req), sctx, ta_ctx);
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = AES_CBC;
+ ta_ctx->current_hash_req = NULL;
+ ta_ctx->current_cypher_req = req;
+ ta_ctx->aes_encmode = SEP_AES_ENCRYPT;
+ ta_ctx->aes_opmode = SEP_AES_CBC;
+ ta_ctx->init_opcode = SEP_AES_INIT_OPCODE;
+ ta_ctx->block_opcode = SEP_AES_BLOCK_OPCODE;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_aes_cbc_decrypt(struct ablkcipher_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
+ struct sep_system_ctx *sctx = crypto_ablkcipher_ctx(
+ crypto_ablkcipher_reqtfm(req));
+
+ pr_debug("sep - doing aes cbc decrypt\n");
+
+ pr_debug("tfm is %p sctx is %p and ta_ctx is %p\n",
+ crypto_ablkcipher_reqtfm(req), sctx, ta_ctx);
+
+ /* Clear out task context */
+ memset(ta_ctx, 0, sizeof(struct this_task_ctx));
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = AES_CBC;
+ ta_ctx->current_hash_req = NULL;
+ ta_ctx->current_cypher_req = req;
+ ta_ctx->aes_encmode = SEP_AES_DECRYPT;
+ ta_ctx->aes_opmode = SEP_AES_CBC;
+ ta_ctx->init_opcode = SEP_AES_INIT_OPCODE;
+ ta_ctx->block_opcode = SEP_AES_BLOCK_OPCODE;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct sep_system_ctx *sctx = crypto_ablkcipher_ctx(tfm);
+ struct crypto_tfm *ctfm = crypto_ablkcipher_tfm(tfm);
+ u32 *flags = &ctfm->crt_flags;
+
+ pr_debug("sep des setkey\n");
+
+ switch (keylen) {
+ case DES_KEY_SIZE:
+ sctx->des_nbr_keys = DES_KEY_1;
+ break;
+ case DES_KEY_SIZE * 2:
+ sctx->des_nbr_keys = DES_KEY_2;
+ break;
+ case DES_KEY_SIZE * 3:
+ sctx->des_nbr_keys = DES_KEY_3;
+ break;
+ default:
+ pr_debug("invalid key size %x\n",
+ keylen);
+ return -EINVAL;
+ }
+
+ if ((*flags & CRYPTO_TFM_REQ_WEAK_KEY) &&
+ (sep_weak_key(key, keylen))) {
+
+ *flags |= CRYPTO_TFM_RES_WEAK_KEY;
+ pr_debug("weak key\n");
+ return -EINVAL;
+ }
+
+ memset(&sctx->key.des, 0, sizeof(struct sep_des_key));
+ memcpy(&sctx->key.des.key1, key, keylen);
+ sctx->keylen = keylen;
+ /* Indicate to encrypt/decrypt function to send key to SEP */
+ sctx->key_sent = 0;
+
+ return 0;
+}
+
+static int sep_des_ebc_encrypt(struct ablkcipher_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
+
+ pr_debug("sep - doing des ecb encrypt\n");
+
+ /* Clear out task context */
+ memset(ta_ctx, 0, sizeof(struct this_task_ctx));
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = DES_ECB;
+ ta_ctx->current_hash_req = NULL;
+ ta_ctx->current_cypher_req = req;
+ ta_ctx->des_encmode = SEP_DES_ENCRYPT;
+ ta_ctx->des_opmode = SEP_DES_ECB;
+ ta_ctx->init_opcode = SEP_DES_INIT_OPCODE;
+ ta_ctx->block_opcode = SEP_DES_BLOCK_OPCODE;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_des_ebc_decrypt(struct ablkcipher_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
+
+ pr_debug("sep - doing des ecb decrypt\n");
+
+ /* Clear out task context */
+ memset(ta_ctx, 0, sizeof(struct this_task_ctx));
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = DES_ECB;
+ ta_ctx->current_hash_req = NULL;
+ ta_ctx->current_cypher_req = req;
+ ta_ctx->des_encmode = SEP_DES_DECRYPT;
+ ta_ctx->des_opmode = SEP_DES_ECB;
+ ta_ctx->init_opcode = SEP_DES_INIT_OPCODE;
+ ta_ctx->block_opcode = SEP_DES_BLOCK_OPCODE;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_des_cbc_encrypt(struct ablkcipher_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
+
+ pr_debug("sep - doing des cbc encrypt\n");
+
+ /* Clear out task context */
+ memset(ta_ctx, 0, sizeof(struct this_task_ctx));
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = DES_CBC;
+ ta_ctx->current_hash_req = NULL;
+ ta_ctx->current_cypher_req = req;
+ ta_ctx->des_encmode = SEP_DES_ENCRYPT;
+ ta_ctx->des_opmode = SEP_DES_CBC;
+ ta_ctx->init_opcode = SEP_DES_INIT_OPCODE;
+ ta_ctx->block_opcode = SEP_DES_BLOCK_OPCODE;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_des_cbc_decrypt(struct ablkcipher_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
+
+ pr_debug("sep - doing des ecb decrypt\n");
+
+ /* Clear out task context */
+ memset(ta_ctx, 0, sizeof(struct this_task_ctx));
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = DES_CBC;
+ ta_ctx->current_hash_req = NULL;
+ ta_ctx->current_cypher_req = req;
+ ta_ctx->des_encmode = SEP_DES_DECRYPT;
+ ta_ctx->des_opmode = SEP_DES_CBC;
+ ta_ctx->init_opcode = SEP_DES_INIT_OPCODE;
+ ta_ctx->block_opcode = SEP_DES_BLOCK_OPCODE;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static struct ahash_alg hash_algs[] = {
+{
+ .init = sep_sha1_init,
+ .update = sep_sha1_update,
+ .final = sep_sha1_final,
+ .digest = sep_sha1_digest,
+ .finup = sep_sha1_finup,
+ .halg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "sha1-sep",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sep_system_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = sep_hash_cra_init,
+ .cra_exit = sep_hash_cra_exit,
+ }
+ }
+},
+{
+ .init = sep_md5_init,
+ .update = sep_md5_update,
+ .final = sep_md5_final,
+ .digest = sep_md5_digest,
+ .finup = sep_md5_finup,
+ .halg = {
+ .digestsize = MD5_DIGEST_SIZE,
+ .base = {
+ .cra_name = "md5",
+ .cra_driver_name = "md5-sep",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sep_system_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = sep_hash_cra_init,
+ .cra_exit = sep_hash_cra_exit,
+ }
+ }
+},
+{
+ .init = sep_sha224_init,
+ .update = sep_sha224_update,
+ .final = sep_sha224_final,
+ .digest = sep_sha224_digest,
+ .finup = sep_sha224_finup,
+ .halg = {
+ .digestsize = SHA224_DIGEST_SIZE,
+ .base = {
+ .cra_name = "sha224",
+ .cra_driver_name = "sha224-sep",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sep_system_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = sep_hash_cra_init,
+ .cra_exit = sep_hash_cra_exit,
+ }
+ }
+},
+{
+ .init = sep_sha256_init,
+ .update = sep_sha256_update,
+ .final = sep_sha256_final,
+ .digest = sep_sha256_digest,
+ .finup = sep_sha256_finup,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "sha256-sep",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sep_system_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = sep_hash_cra_init,
+ .cra_exit = sep_hash_cra_exit,
+ }
+ }
+}
+};
+
+static struct crypto_alg crypto_algs[] = {
+{
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "ecb-aes-sep",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sep_system_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = sep_crypto_init,
+ .cra_exit = sep_crypto_exit,
+ .cra_u.ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = sep_aes_setkey,
+ .encrypt = sep_aes_ecb_encrypt,
+ .decrypt = sep_aes_ecb_decrypt,
+ }
+},
+{
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc-aes-sep",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sep_system_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = sep_crypto_init,
+ .cra_exit = sep_crypto_exit,
+ .cra_u.ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = sep_aes_setkey,
+ .encrypt = sep_aes_cbc_encrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .decrypt = sep_aes_cbc_decrypt,
+ }
+},
+{
+ .cra_name = "ebc(des)",
+ .cra_driver_name = "ebc-des-sep",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sep_system_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = sep_crypto_init,
+ .cra_exit = sep_crypto_exit,
+ .cra_u.ablkcipher = {
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = sep_des_setkey,
+ .encrypt = sep_des_ebc_encrypt,
+ .decrypt = sep_des_ebc_decrypt,
+ }
+},
+{
+ .cra_name = "cbc(des)",
+ .cra_driver_name = "cbc-des-sep",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sep_system_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = sep_crypto_init,
+ .cra_exit = sep_crypto_exit,
+ .cra_u.ablkcipher = {
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = sep_des_setkey,
+ .encrypt = sep_des_cbc_encrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .decrypt = sep_des_cbc_decrypt,
+ }
+},
+{
+ .cra_name = "ebc(des3-ede)",
+ .cra_driver_name = "ebc-des3-ede-sep",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sep_system_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = sep_crypto_init,
+ .cra_exit = sep_crypto_exit,
+ .cra_u.ablkcipher = {
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = sep_des_setkey,
+ .encrypt = sep_des_ebc_encrypt,
+ .decrypt = sep_des_ebc_decrypt,
+ }
+},
+{
+ .cra_name = "cbc(des3-ede)",
+ .cra_driver_name = "cbc-des3--ede-sep",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sep_system_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = sep_crypto_init,
+ .cra_exit = sep_crypto_exit,
+ .cra_u.ablkcipher = {
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = sep_des_setkey,
+ .encrypt = sep_des_cbc_encrypt,
+ .decrypt = sep_des_cbc_decrypt,
+ }
+}
+};
+
+int sep_crypto_setup(void)
+{
+ int err, i, j, k;
+ tasklet_init(&sep_dev->finish_tasklet, sep_finish,
+ (unsigned long)sep_dev);
+
+ crypto_init_queue(&sep_queue, SEP_QUEUE_LENGTH);
+
+ sep_dev->workqueue = create_singlethread_workqueue(
+ "sep_crypto_workqueue");
+ if (!sep_dev->workqueue) {
+ dev_warn(&sep_dev->pdev->dev, "cant create workqueue\n");
+ return -ENOMEM;
+ }
+
+ i = 0;
+ j = 0;
+
+ spin_lock_init(&queue_lock);
+
+ err = 0;
+
+ for (i = 0; i < ARRAY_SIZE(hash_algs); i++) {
+ err = crypto_register_ahash(&hash_algs[i]);
+ if (err)
+ goto err_algs;
+ }
+
+ err = 0;
+ for (j = 0; j < ARRAY_SIZE(crypto_algs); j++) {
+ err = crypto_register_alg(&crypto_algs[j]);
+ if (err)
+ goto err_crypto_algs;
+ }
+
+ return err;
+
+err_algs:
+ for (k = 0; k < i; k++)
+ crypto_unregister_ahash(&hash_algs[k]);
+ return err;
+
+err_crypto_algs:
+ for (k = 0; k < j; k++)
+ crypto_unregister_alg(&crypto_algs[k]);
+ goto err_algs;
+}
+
+void sep_crypto_takedown(void)
+{
+
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hash_algs); i++)
+ crypto_unregister_ahash(&hash_algs[i]);
+ for (i = 0; i < ARRAY_SIZE(crypto_algs); i++)
+ crypto_unregister_alg(&crypto_algs[i]);
+
+ tasklet_kill(&sep_dev->finish_tasklet);
+}
+
+#endif
diff --git a/drivers/staging/sep/sep_crypto.h b/drivers/staging/sep/sep_crypto.h
new file mode 100644
index 000000000000..155c3c9b87c2
--- /dev/null
+++ b/drivers/staging/sep/sep_crypto.h
@@ -0,0 +1,359 @@
+/*
+ *
+ * sep_crypto.h - Crypto interface structures
+ *
+ * Copyright(c) 2009-2011 Intel Corporation. All rights reserved.
+ * Contributions(c) 2009-2010 Discretix. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * CONTACTS:
+ *
+ * Mark Allyn mark.a.allyn@intel.com
+ * Jayant Mangalampalli jayant.mangalampalli@intel.com
+ *
+ * CHANGES:
+ *
+ * 2009.06.26 Initial publish
+ * 2011.02.22 Enable Kernel Crypto
+ *
+ */
+
+/* Constants for SEP (from vendor) */
+#define SEP_START_MSG_TOKEN 0x02558808
+
+#define SEP_DES_IV_SIZE_WORDS 2
+#define SEP_DES_IV_SIZE_BYTES (SEP_DES_IV_SIZE_WORDS * \
+ sizeof(u32))
+#define SEP_DES_KEY_SIZE_WORDS 2
+#define SEP_DES_KEY_SIZE_BYTES (SEP_DES_KEY_SIZE_WORDS * \
+ sizeof(u32))
+#define SEP_DES_BLOCK_SIZE 8
+#define SEP_DES_DUMMY_SIZE 16
+
+#define SEP_DES_INIT_OPCODE 0x10
+#define SEP_DES_BLOCK_OPCODE 0x11
+
+#define SEP_AES_BLOCK_SIZE_WORDS 4
+#define SEP_AES_BLOCK_SIZE_BYTES \
+ (SEP_AES_BLOCK_SIZE_WORDS * sizeof(u32))
+
+#define SEP_AES_DUMMY_BLOCK_SIZE 16
+#define SEP_AES_IV_SIZE_WORDS SEP_AES_BLOCK_SIZE_WORDS
+#define SEP_AES_IV_SIZE_BYTES \
+ (SEP_AES_IV_SIZE_WORDS * sizeof(u32))
+
+#define SEP_AES_KEY_128_SIZE 16
+#define SEP_AES_KEY_192_SIZE 24
+#define SEP_AES_KEY_256_SIZE 32
+#define SEP_AES_KEY_512_SIZE 64
+#define SEP_AES_MAX_KEY_SIZE_WORDS 16
+#define SEP_AES_MAX_KEY_SIZE_BYTES \
+ (SEP_AES_MAX_KEY_SIZE_WORDS * sizeof(u32))
+
+#define SEP_AES_WRAP_MIN_SIZE 8
+#define SEP_AES_WRAP_MAX_SIZE 0x10000000
+
+#define SEP_AES_WRAP_BLOCK_SIZE_WORDS 2
+#define SEP_AES_WRAP_BLOCK_SIZE_BYTES \
+ (SEP_AES_WRAP_BLOCK_SIZE_WORDS * sizeof(u32))
+
+#define SEP_AES_SECRET_RKEK1 0x1
+#define SEP_AES_SECRET_RKEK2 0x2
+
+#define SEP_AES_INIT_OPCODE 0x2
+#define SEP_AES_BLOCK_OPCODE 0x3
+#define SEP_AES_FINISH_OPCODE 0x4
+#define SEP_AES_WRAP_OPCODE 0x6
+#define SEP_AES_UNWRAP_OPCODE 0x7
+#define SEP_AES_XTS_FINISH_OPCODE 0x8
+
+#define SEP_HASH_RESULT_SIZE_WORDS 16
+#define SEP_MD5_DIGEST_SIZE_WORDS 4
+#define SEP_MD5_DIGEST_SIZE_BYTES \
+ (SEP_MD5_DIGEST_SIZE_WORDS * sizeof(u32))
+#define SEP_SHA1_DIGEST_SIZE_WORDS 5
+#define SEP_SHA1_DIGEST_SIZE_BYTES \
+ (SEP_SHA1_DIGEST_SIZE_WORDS * sizeof(u32))
+#define SEP_SHA224_DIGEST_SIZE_WORDS 7
+#define SEP_SHA224_DIGEST_SIZE_BYTES \
+ (SEP_SHA224_DIGEST_SIZE_WORDS * sizeof(u32))
+#define SEP_SHA256_DIGEST_SIZE_WORDS 8
+#define SEP_SHA256_DIGEST_SIZE_BYTES \
+ (SEP_SHA256_DIGEST_SIZE_WORDS * sizeof(u32))
+#define SEP_SHA384_DIGEST_SIZE_WORDS 12
+#define SEP_SHA384_DIGEST_SIZE_BYTES \
+ (SEP_SHA384_DIGEST_SIZE_WORDS * sizeof(u32))
+#define SEP_SHA512_DIGEST_SIZE_WORDS 16
+#define SEP_SHA512_DIGEST_SIZE_BYTES \
+ (SEP_SHA512_DIGEST_SIZE_WORDS * sizeof(u32))
+#define SEP_HASH_BLOCK_SIZE_WORDS 16
+#define SEP_HASH_BLOCK_SIZE_BYTES \
+ (SEP_HASH_BLOCK_SIZE_WORDS * sizeof(u32))
+#define SEP_SHA2_BLOCK_SIZE_WORDS 32
+#define SEP_SHA2_BLOCK_SIZE_BYTES \
+ (SEP_SHA2_BLOCK_SIZE_WORDS * sizeof(u32))
+
+#define SEP_HASH_INIT_OPCODE 0x20
+#define SEP_HASH_UPDATE_OPCODE 0x21
+#define SEP_HASH_FINISH_OPCODE 0x22
+#define SEP_HASH_SINGLE_OPCODE 0x23
+
+#define SEP_HOST_ERROR 0x0b000000
+#define SEP_OK 0x0
+#define SEP_INVALID_START (SEP_HOST_ERROR + 0x3)
+#define SEP_WRONG_OPCODE (SEP_HOST_ERROR + 0x1)
+
+#define SEP_TRANSACTION_WAIT_TIME 5
+
+#define SEP_QUEUE_LENGTH 2
+/* Macros */
+#ifndef __LITTLE_ENDIAN
+#define CHG_ENDIAN(val) \
+ (((val) >> 24) | \
+ (((val) & 0x00FF0000) >> 8) | \
+ (((val) & 0x0000FF00) << 8) | \
+ (((val) & 0x000000FF) << 24))
+#else
+#define CHG_ENDIAN(val) val
+#endif
+/* Enums for SEP (from vendor) */
+enum des_numkey {
+ DES_KEY_1 = 1,
+ DES_KEY_2 = 2,
+ DES_KEY_3 = 3,
+ SEP_NUMKEY_OPTIONS,
+ SEP_NUMKEY_LAST = 0x7fffffff,
+};
+
+enum des_enc_mode {
+ SEP_DES_ENCRYPT = 0,
+ SEP_DES_DECRYPT = 1,
+ SEP_DES_ENC_OPTIONS,
+ SEP_DES_ENC_LAST = 0x7fffffff,
+};
+
+enum des_op_mode {
+ SEP_DES_ECB = 0,
+ SEP_DES_CBC = 1,
+ SEP_OP_OPTIONS,
+ SEP_OP_LAST = 0x7fffffff,
+};
+
+enum aes_keysize {
+ AES_128 = 0,
+ AES_192 = 1,
+ AES_256 = 2,
+ AES_512 = 3,
+ AES_SIZE_OPTIONS,
+ AEA_SIZE_LAST = 0x7FFFFFFF,
+};
+
+enum aes_enc_mode {
+ SEP_AES_ENCRYPT = 0,
+ SEP_AES_DECRYPT = 1,
+ SEP_AES_ENC_OPTIONS,
+ SEP_AES_ENC_LAST = 0x7FFFFFFF,
+};
+
+enum aes_op_mode {
+ SEP_AES_ECB = 0,
+ SEP_AES_CBC = 1,
+ SEP_AES_MAC = 2,
+ SEP_AES_CTR = 3,
+ SEP_AES_XCBC = 4,
+ SEP_AES_CMAC = 5,
+ SEP_AES_XTS = 6,
+ SEP_AES_OP_OPTIONS,
+ SEP_AES_OP_LAST = 0x7FFFFFFF,
+};
+
+enum hash_op_mode {
+ SEP_HASH_SHA1 = 0,
+ SEP_HASH_SHA224 = 1,
+ SEP_HASH_SHA256 = 2,
+ SEP_HASH_SHA384 = 3,
+ SEP_HASH_SHA512 = 4,
+ SEP_HASH_MD5 = 5,
+ SEP_HASH_OPTIONS,
+ SEP_HASH_LAST_MODE = 0x7FFFFFFF,
+};
+
+/* Structures for SEP (from vendor) */
+struct sep_des_internal_key {
+ u32 key1[SEP_DES_KEY_SIZE_WORDS];
+ u32 key2[SEP_DES_KEY_SIZE_WORDS];
+ u32 key3[SEP_DES_KEY_SIZE_WORDS];
+};
+
+struct sep_des_internal_context {
+ u32 iv_context[SEP_DES_IV_SIZE_WORDS];
+ struct sep_des_internal_key context_key;
+ enum des_numkey nbr_keys;
+ enum des_enc_mode encryption;
+ enum des_op_mode operation;
+ u8 dummy_block[SEP_DES_DUMMY_SIZE];
+};
+
+struct sep_des_private_context {
+ u32 valid_tag;
+ u32 iv;
+ u8 ctx_buf[sizeof(struct sep_des_internal_context)];
+};
+
+/* This is the structure passed to SEP via msg area */
+struct sep_des_key {
+ u32 key1[SEP_DES_KEY_SIZE_WORDS];
+ u32 key2[SEP_DES_KEY_SIZE_WORDS];
+ u32 key3[SEP_DES_KEY_SIZE_WORDS];
+ u32 pad[SEP_DES_KEY_SIZE_WORDS];
+};
+
+struct sep_aes_internal_context {
+ u32 aes_ctx_iv[SEP_AES_IV_SIZE_WORDS];
+ u32 aes_ctx_key[SEP_AES_MAX_KEY_SIZE_WORDS / 2];
+ enum aes_keysize keysize;
+ enum aes_enc_mode encmode;
+ enum aes_op_mode opmode;
+ u8 secret_key;
+ u32 no_add_blocks;
+ u32 last_block_size;
+ u32 last_block[SEP_AES_BLOCK_SIZE_WORDS];
+ u32 prev_iv[SEP_AES_BLOCK_SIZE_WORDS];
+ u32 remaining_size;
+ union {
+ struct {
+ u32 dkey1[SEP_AES_BLOCK_SIZE_WORDS];
+ u32 dkey2[SEP_AES_BLOCK_SIZE_WORDS];
+ u32 dkey3[SEP_AES_BLOCK_SIZE_WORDS];
+ } cmac_data;
+ struct {
+ u32 xts_key[SEP_AES_MAX_KEY_SIZE_WORDS / 2];
+ u32 temp1[SEP_AES_BLOCK_SIZE_WORDS];
+ u32 temp2[SEP_AES_BLOCK_SIZE_WORDS];
+ } xtx_data;
+ } s_data;
+ u8 dummy_block[SEP_AES_DUMMY_BLOCK_SIZE];
+};
+
+struct sep_aes_private_context {
+ u32 valid_tag;
+ u32 aes_iv;
+ u32 op_mode;
+ u8 cbuff[sizeof(struct sep_aes_internal_context)];
+};
+
+struct sep_hash_internal_context {
+ u32 hash_result[SEP_HASH_RESULT_SIZE_WORDS];
+ enum hash_op_mode hash_opmode;
+ u32 previous_data[SEP_SHA2_BLOCK_SIZE_WORDS];
+ u16 prev_update_bytes;
+ u32 total_proc_128bit[4];
+ u16 op_mode_block_size;
+ u8 dummy_aes_block[SEP_AES_DUMMY_BLOCK_SIZE];
+};
+
+struct sep_hash_private_context {
+ u32 valid_tag;
+ u32 iv;
+ u8 internal_context[sizeof(struct sep_hash_internal_context)];
+};
+
+union key_t {
+ struct sep_des_key des;
+ u32 aes[SEP_AES_MAX_KEY_SIZE_WORDS];
+};
+
+/* Context structures for crypto API */
+/**
+ * Structure for this current task context
+ * This same structure is used for both hash
+ * and crypt in order to reduce duplicate code
+ * for stuff that is done for both hash operations
+ * and crypto operations. We cannot trust that the
+ * system context is not pulled out from under
+ * us during operation to operation, so all
+ * critical stuff such as data pointers must
+ * be in in a context that is exclusive for this
+ * particular task at hand.
+ */
+struct this_task_ctx {
+ struct sep_device *sep_used;
+ u32 done;
+ unsigned char iv[100];
+ enum des_enc_mode des_encmode;
+ enum des_op_mode des_opmode;
+ enum aes_enc_mode aes_encmode;
+ enum aes_op_mode aes_opmode;
+ u32 init_opcode;
+ u32 block_opcode;
+ size_t data_length;
+ size_t ivlen;
+ struct ablkcipher_walk walk;
+ int i_own_sep; /* Do I have custody of the sep? */
+ struct sep_call_status call_status;
+ struct build_dcb_struct_kernel dcb_input_data;
+ struct sep_dma_context *dma_ctx;
+ void *dmatables_region;
+ size_t nbytes;
+ struct sep_dcblock *dcb_region;
+ struct sep_queue_info *queue_elem;
+ int msg_len_words;
+ unsigned char msg[SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES];
+ void *msgptr;
+ struct scatterlist *src_sg;
+ struct scatterlist *dst_sg;
+ struct scatterlist *src_sg_hold;
+ struct scatterlist *dst_sg_hold;
+ struct ahash_request *current_hash_req;
+ struct ablkcipher_request *current_cypher_req;
+ enum type_of_request current_request;
+ int digest_size_words;
+ int digest_size_bytes;
+ int block_size_words;
+ int block_size_bytes;
+ enum hash_op_mode hash_opmode;
+ enum hash_stage current_hash_stage;
+ /**
+ * Not that this is a pointer. The are_we_done_yet variable is
+ * allocated by the task function. This way, even if the kernel
+ * crypto infrastructure has grabbed the task structure out from
+ * under us, the task function can still see this variable.
+ */
+ int *are_we_done_yet;
+ unsigned long end_time;
+ };
+
+struct sep_system_ctx {
+ union key_t key;
+ size_t keylen;
+ int key_sent;
+ enum des_numkey des_nbr_keys;
+ enum aes_keysize aes_key_size;
+ unsigned long end_time;
+ struct sep_des_private_context des_private_ctx;
+ struct sep_aes_private_context aes_private_ctx;
+ struct sep_hash_private_context hash_private_ctx;
+ };
+
+/* work queue structures */
+struct sep_work_struct {
+ struct work_struct work;
+ void (*callback)(void *);
+ void *data;
+ };
+
+/* Functions */
+int sep_crypto_setup(void);
+void sep_crypto_takedown(void);
diff --git a/drivers/staging/sep/sep_dev.h b/drivers/staging/sep/sep_dev.h
index 696ab0dd2b79..5f6a07f59dd7 100644
--- a/drivers/staging/sep/sep_dev.h
+++ b/drivers/staging/sep/sep_dev.h
@@ -5,8 +5,8 @@
*
* sep_dev.h - Security Processor Device Structures
*
- * Copyright(c) 2009,2010 Intel Corporation. All rights reserved.
- * Contributions(c) 2009,2010 Discretix. All rights reserved.
+ * Copyright(c) 2009-2011 Intel Corporation. All rights reserved.
+ * Contributions(c) 2009-2011 Discretix. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
@@ -28,6 +28,7 @@
*
* CHANGES
* 2010.09.14 upgrade to Medfield
+ * 2011.02.22 enable kernel crypto
*/
struct sep_device {
@@ -36,33 +37,21 @@ struct sep_device {
/* character device file */
struct cdev sep_cdev;
- struct cdev sep_daemon_cdev;
- struct cdev sep_singleton_cdev;
/* devices (using misc dev) */
struct miscdevice miscdev_sep;
- struct miscdevice miscdev_singleton;
- struct miscdevice miscdev_daemon;
/* major / minor numbers of device */
dev_t sep_devno;
- dev_t sep_daemon_devno;
- dev_t sep_singleton_devno;
-
- struct mutex sep_mutex;
- struct mutex ioctl_mutex;
+ /* guards command sent counter */
spinlock_t snd_rply_lck;
+ /* guards driver memory usage in fastcall if */
+ struct semaphore sep_doublebuf;
/* flags to indicate use and lock status of sep */
u32 pid_doing_transaction;
unsigned long in_use_flags;
- /* request daemon alread open */
- unsigned long request_daemon_open;
-
- /* 1 = Moorestown; 0 = Medfield */
- int mrst;
-
/* address of the shared memory allocated during init for SEP driver
(coherent alloc) */
dma_addr_t shared_bus;
@@ -74,36 +63,77 @@ struct sep_device {
dma_addr_t reg_physical_end;
void __iomem *reg_addr;
- /* wait queue head (event) of the driver */
- wait_queue_head_t event;
- wait_queue_head_t event_request_daemon;
- wait_queue_head_t event_mmap;
+ /* wait queue heads of the driver */
+ wait_queue_head_t event_interrupt;
+ wait_queue_head_t event_transactions;
- struct sep_caller_id_entry
- caller_id_table[SEP_CALLER_ID_TABLE_NUM_ENTRIES];
+ struct list_head sep_queue_status;
+ u32 sep_queue_num;
+ spinlock_t sep_queue_lock;
- /* access flag for singleton device */
- unsigned long singleton_access_flag;
+ /* Is this in use? */
+ u32 in_use;
+
+ /* indicates whether power save is set up */
+ u32 power_save_setup;
+
+ /* Power state */
+ u32 power_state;
/* transaction counter that coordinates the
transactions between SEP and HOST */
unsigned long send_ct;
/* counter for the messages from sep */
unsigned long reply_ct;
- /* counter for the number of bytes allocated in the pool for the
- current transaction */
- long data_pool_bytes_allocated;
- u32 num_of_data_allocations;
+ /* The following are used for kernel crypto client requests */
+ u32 in_kernel; /* Set for kernel client request */
+ struct tasklet_struct finish_tasklet;
+ enum type_of_request current_request;
+ enum hash_stage current_hash_stage;
+ struct ahash_request *current_hash_req;
+ struct ablkcipher_request *current_cypher_req;
+ struct this_task_ctx *ta_ctx;
+ struct workqueue_struct *workqueue;
+};
- /* number of the lli tables created in the current transaction */
- u32 num_lli_tables_created;
+extern struct sep_device *sep_dev;
- /* number of data control blocks */
- u32 nr_dcb_creat;
+/**
+ * SEP message header for a transaction
+ * @reserved: reserved memory (two words)
+ * @token: SEP message token
+ * @msg_len: message length
+ * @opcpde: message opcode
+ */
+struct sep_msgarea_hdr {
+ u32 reserved[2];
+ u32 token;
+ u32 msg_len;
+ u32 opcode;
+};
- struct sep_dma_resource dma_res_arr[SEP_MAX_NUM_SYNC_DMA_OPS];
+/**
+ * sep_queue_data - data to be maintained in status queue for a transaction
+ * @opcode : transaction opcode
+ * @size : message size
+ * @pid: owner process
+ * @name: owner process name
+ */
+struct sep_queue_data {
+ u32 opcode;
+ u32 size;
+ s32 pid;
+ u8 name[TASK_COMM_LEN];
+};
+/** sep_queue_info - maintains status info of all transactions
+ * @list: head of list
+ * @sep_queue_data : data for transaction
+ */
+struct sep_queue_info {
+ struct list_head list;
+ struct sep_queue_data data;
};
static inline void sep_write_reg(struct sep_device *dev, int reg, u32 value)
diff --git a/drivers/staging/sep/sep_driver.c b/drivers/staging/sep/sep_driver.c
deleted file mode 100644
index 6b3d156d4140..000000000000
--- a/drivers/staging/sep/sep_driver.c
+++ /dev/null
@@ -1,2932 +0,0 @@
-/*
- *
- * sep_driver.c - Security Processor Driver main group of functions
- *
- * Copyright(c) 2009,2010 Intel Corporation. All rights reserved.
- * Contributions(c) 2009,2010 Discretix. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * CONTACTS:
- *
- * Mark Allyn mark.a.allyn@intel.com
- * Jayant Mangalampalli jayant.mangalampalli@intel.com
- *
- * CHANGES:
- *
- * 2009.06.26 Initial publish
- * 2010.09.14 Upgrade to Medfield
- *
- */
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/miscdevice.h>
-#include <linux/fs.h>
-#include <linux/cdev.h>
-#include <linux/kdev_t.h>
-#include <linux/mutex.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/poll.h>
-#include <linux/wait.h>
-#include <linux/pci.h>
-#include <linux/firmware.h>
-#include <linux/slab.h>
-#include <linux/ioctl.h>
-#include <asm/current.h>
-#include <linux/ioport.h>
-#include <linux/io.h>
-#include <linux/interrupt.h>
-#include <linux/pagemap.h>
-#include <asm/cacheflush.h>
-#include <linux/delay.h>
-#include <linux/jiffies.h>
-#include <linux/rar_register.h>
-
-#include "sep_driver_hw_defs.h"
-#include "sep_driver_config.h"
-#include "sep_driver_api.h"
-#include "sep_dev.h"
-
-/*----------------------------------------
- DEFINES
------------------------------------------*/
-
-#define SEP_RAR_IO_MEM_REGION_SIZE 0x40000
-
-/*--------------------------------------------
- GLOBAL variables
---------------------------------------------*/
-
-/* Keep this a single static object for now to keep the conversion easy */
-
-static struct sep_device *sep_dev;
-
-/**
- * sep_dump_message - dump the message that is pending
- * @sep: SEP device
- */
-static void sep_dump_message(struct sep_device *sep)
-{
- int count;
- u32 *p = sep->shared_addr;
- for (count = 0; count < 12 * 4; count += 4)
- dev_dbg(&sep->pdev->dev, "Word %d of the message is %x\n",
- count, *p++);
-}
-
-/**
- * sep_map_and_alloc_shared_area - allocate shared block
- * @sep: security processor
- * @size: size of shared area
- */
-static int sep_map_and_alloc_shared_area(struct sep_device *sep)
-{
- sep->shared_addr = dma_alloc_coherent(&sep->pdev->dev,
- sep->shared_size,
- &sep->shared_bus, GFP_KERNEL);
-
- if (!sep->shared_addr) {
- dev_warn(&sep->pdev->dev,
- "shared memory dma_alloc_coherent failed\n");
- return -ENOMEM;
- }
- dev_dbg(&sep->pdev->dev,
- "shared_addr %zx bytes @%p (bus %llx)\n",
- sep->shared_size, sep->shared_addr,
- (unsigned long long)sep->shared_bus);
- return 0;
-}
-
-/**
- * sep_unmap_and_free_shared_area - free shared block
- * @sep: security processor
- */
-static void sep_unmap_and_free_shared_area(struct sep_device *sep)
-{
- dma_free_coherent(&sep->pdev->dev, sep->shared_size,
- sep->shared_addr, sep->shared_bus);
-}
-
-/**
- * sep_shared_bus_to_virt - convert bus/virt addresses
- * @sep: pointer to struct sep_device
- * @bus_address: address to convert
- *
- * Returns virtual address inside the shared area according
- * to the bus address.
- */
-static void *sep_shared_bus_to_virt(struct sep_device *sep,
- dma_addr_t bus_address)
-{
- return sep->shared_addr + (bus_address - sep->shared_bus);
-}
-
-/**
- * open function for the singleton driver
- * @inode_ptr struct inode *
- * @file_ptr struct file *
- *
- * Called when the user opens the singleton device interface
- */
-static int sep_singleton_open(struct inode *inode_ptr, struct file *file_ptr)
-{
- struct sep_device *sep;
-
- /*
- * Get the SEP device structure and use it for the
- * private_data field in filp for other methods
- */
- sep = sep_dev;
-
- file_ptr->private_data = sep;
-
- if (test_and_set_bit(0, &sep->singleton_access_flag))
- return -EBUSY;
- return 0;
-}
-
-/**
- * sep_open - device open method
- * @inode: inode of SEP device
- * @filp: file handle to SEP device
- *
- * Open method for the SEP device. Called when userspace opens
- * the SEP device node.
- *
- * Returns zero on success otherwise an error code.
- */
-static int sep_open(struct inode *inode, struct file *filp)
-{
- struct sep_device *sep;
-
- /*
- * Get the SEP device structure and use it for the
- * private_data field in filp for other methods
- */
- sep = sep_dev;
- filp->private_data = sep;
-
- /* Anyone can open; locking takes place at transaction level */
- return 0;
-}
-
-/**
- * sep_singleton_release - close a SEP singleton device
- * @inode: inode of SEP device
- * @filp: file handle being closed
- *
- * Called on the final close of a SEP device. As the open protects against
- * multiple simultaenous opens that means this method is called when the
- * final reference to the open handle is dropped.
- */
-static int sep_singleton_release(struct inode *inode, struct file *filp)
-{
- struct sep_device *sep = filp->private_data;
-
- clear_bit(0, &sep->singleton_access_flag);
- return 0;
-}
-
-/**
- * sep_request_daemon_open - request daemon open method
- * @inode: inode of SEP device
- * @filp: file handle to SEP device
- *
- * Open method for the SEP request daemon. Called when
- * request daemon in userspace opens the SEP device node.
- *
- * Returns zero on success otherwise an error code.
- */
-static int sep_request_daemon_open(struct inode *inode, struct file *filp)
-{
- struct sep_device *sep = sep_dev;
- int error = 0;
-
- filp->private_data = sep;
-
- /* There is supposed to be only one request daemon */
- if (test_and_set_bit(0, &sep->request_daemon_open))
- error = -EBUSY;
- return error;
-}
-
-/**
- * sep_request_daemon_release - close a SEP daemon
- * @inode: inode of SEP device
- * @filp: file handle being closed
- *
- * Called on the final close of a SEP daemon.
- */
-static int sep_request_daemon_release(struct inode *inode, struct file *filp)
-{
- struct sep_device *sep = filp->private_data;
-
- dev_dbg(&sep->pdev->dev, "Request daemon release for pid %d\n",
- current->pid);
-
- /* Clear the request_daemon_open flag */
- clear_bit(0, &sep->request_daemon_open);
- return 0;
-}
-
-/**
- * sep_req_daemon_send_reply_command_handler - poke the SEP
- * @sep: struct sep_device *
- *
- * This function raises interrupt to SEPm that signals that is has a
- * new command from HOST
- */
-static int sep_req_daemon_send_reply_command_handler(struct sep_device *sep)
-{
- unsigned long lck_flags;
-
- sep_dump_message(sep);
-
- /* Counters are lockable region */
- spin_lock_irqsave(&sep->snd_rply_lck, lck_flags);
- sep->send_ct++;
- sep->reply_ct++;
-
- /* Send the interrupt to SEP */
- sep_write_reg(sep, HW_HOST_HOST_SEP_GPR2_REG_ADDR, sep->send_ct);
- sep->send_ct++;
-
- spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
-
- dev_dbg(&sep->pdev->dev,
- "sep_req_daemon_send_reply send_ct %lx reply_ct %lx\n",
- sep->send_ct, sep->reply_ct);
-
- return 0;
-}
-
-
-/**
- * sep_free_dma_table_data_handler - free DMA table
- * @sep: pointere to struct sep_device
- *
- * Handles the request to free DMA table for synchronic actions
- */
-static int sep_free_dma_table_data_handler(struct sep_device *sep)
-{
- int count;
- int dcb_counter;
- /* Pointer to the current dma_resource struct */
- struct sep_dma_resource *dma;
-
- for (dcb_counter = 0; dcb_counter < sep->nr_dcb_creat; dcb_counter++) {
- dma = &sep->dma_res_arr[dcb_counter];
-
- /* Unmap and free input map array */
- if (dma->in_map_array) {
- for (count = 0; count < dma->in_num_pages; count++) {
- dma_unmap_page(&sep->pdev->dev,
- dma->in_map_array[count].dma_addr,
- dma->in_map_array[count].size,
- DMA_TO_DEVICE);
- }
- kfree(dma->in_map_array);
- }
-
- /* Unmap output map array, DON'T free it yet */
- if (dma->out_map_array) {
- for (count = 0; count < dma->out_num_pages; count++) {
- dma_unmap_page(&sep->pdev->dev,
- dma->out_map_array[count].dma_addr,
- dma->out_map_array[count].size,
- DMA_FROM_DEVICE);
- }
- kfree(dma->out_map_array);
- }
-
- /* Free page cache for output */
- if (dma->in_page_array) {
- for (count = 0; count < dma->in_num_pages; count++) {
- flush_dcache_page(dma->in_page_array[count]);
- page_cache_release(dma->in_page_array[count]);
- }
- kfree(dma->in_page_array);
- }
-
- if (dma->out_page_array) {
- for (count = 0; count < dma->out_num_pages; count++) {
- if (!PageReserved(dma->out_page_array[count]))
- SetPageDirty(dma->out_page_array[count]);
- flush_dcache_page(dma->out_page_array[count]);
- page_cache_release(dma->out_page_array[count]);
- }
- kfree(dma->out_page_array);
- }
-
- /* Reset all the values */
- dma->in_page_array = NULL;
- dma->out_page_array = NULL;
- dma->in_num_pages = 0;
- dma->out_num_pages = 0;
- dma->in_map_array = NULL;
- dma->out_map_array = NULL;
- dma->in_map_num_entries = 0;
- dma->out_map_num_entries = 0;
- }
-
- sep->nr_dcb_creat = 0;
- sep->num_lli_tables_created = 0;
-
- return 0;
-}
-
-/**
- * sep_request_daemon_mmap - maps the shared area to user space
- * @filp: pointer to struct file
- * @vma: pointer to vm_area_struct
- *
- * Called by the kernel when the daemon attempts an mmap() syscall
- * using our handle.
- */
-static int sep_request_daemon_mmap(struct file *filp,
- struct vm_area_struct *vma)
-{
- struct sep_device *sep = filp->private_data;
- dma_addr_t bus_address;
- int error = 0;
-
- if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) {
- error = -EINVAL;
- goto end_function;
- }
-
- /* Get physical address */
- bus_address = sep->shared_bus;
-
- if (remap_pfn_range(vma, vma->vm_start, bus_address >> PAGE_SHIFT,
- vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
-
- dev_warn(&sep->pdev->dev, "remap_page_range failed\n");
- error = -EAGAIN;
- goto end_function;
- }
-
-end_function:
- return error;
-}
-
-/**
- * sep_request_daemon_poll - poll implementation
- * @sep: struct sep_device * for current SEP device
- * @filp: struct file * for open file
- * @wait: poll_table * for poll
- *
- * Called when our device is part of a poll() or select() syscall
- */
-static unsigned int sep_request_daemon_poll(struct file *filp,
- poll_table *wait)
-{
- u32 mask = 0;
- /* GPR2 register */
- u32 retval2;
- unsigned long lck_flags;
- struct sep_device *sep = filp->private_data;
-
- poll_wait(filp, &sep->event_request_daemon, wait);
-
- dev_dbg(&sep->pdev->dev, "daemon poll: send_ct is %lx reply ct is %lx\n",
- sep->send_ct, sep->reply_ct);
-
- spin_lock_irqsave(&sep->snd_rply_lck, lck_flags);
- /* Check if the data is ready */
- if (sep->send_ct == sep->reply_ct) {
- spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
-
- retval2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
- dev_dbg(&sep->pdev->dev,
- "daemon poll: data check (GPR2) is %x\n", retval2);
-
- /* Check if PRINT request */
- if ((retval2 >> 30) & 0x1) {
- dev_dbg(&sep->pdev->dev, "daemon poll: PRINTF request in\n");
- mask |= POLLIN;
- goto end_function;
- }
- /* Check if NVS request */
- if (retval2 >> 31) {
- dev_dbg(&sep->pdev->dev, "daemon poll: NVS request in\n");
- mask |= POLLPRI | POLLWRNORM;
- }
- } else {
- spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
- dev_dbg(&sep->pdev->dev,
- "daemon poll: no reply received; returning 0\n");
- mask = 0;
- }
-end_function:
- return mask;
-}
-
-/**
- * sep_release - close a SEP device
- * @inode: inode of SEP device
- * @filp: file handle being closed
- *
- * Called on the final close of a SEP device.
- */
-static int sep_release(struct inode *inode, struct file *filp)
-{
- struct sep_device *sep = filp->private_data;
-
- dev_dbg(&sep->pdev->dev, "Release for pid %d\n", current->pid);
-
- mutex_lock(&sep->sep_mutex);
- /* Is this the process that has a transaction open?
- * If so, lets reset pid_doing_transaction to 0 and
- * clear the in use flags, and then wake up sep_event
- * so that other processes can do transactions
- */
- if (sep->pid_doing_transaction == current->pid) {
- clear_bit(SEP_MMAP_LOCK_BIT, &sep->in_use_flags);
- clear_bit(SEP_SEND_MSG_LOCK_BIT, &sep->in_use_flags);
- sep_free_dma_table_data_handler(sep);
- wake_up(&sep->event);
- sep->pid_doing_transaction = 0;
- }
-
- mutex_unlock(&sep->sep_mutex);
- return 0;
-}
-
-/**
- * sep_mmap - maps the shared area to user space
- * @filp: pointer to struct file
- * @vma: pointer to vm_area_struct
- *
- * Called on an mmap of our space via the normal SEP device
- */
-static int sep_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- dma_addr_t bus_addr;
- struct sep_device *sep = filp->private_data;
- unsigned long error = 0;
-
- /* Set the transaction busy (own the device) */
- wait_event_interruptible(sep->event,
- test_and_set_bit(SEP_MMAP_LOCK_BIT,
- &sep->in_use_flags) == 0);
-
- if (signal_pending(current)) {
- error = -EINTR;
- goto end_function_with_error;
- }
- /*
- * The pid_doing_transaction indicates that this process
- * now owns the facilities to performa a transaction with
- * the SEP. While this process is performing a transaction,
- * no other process who has the SEP device open can perform
- * any transactions. This method allows more than one process
- * to have the device open at any given time, which provides
- * finer granularity for device utilization by multiple
- * processes.
- */
- mutex_lock(&sep->sep_mutex);
- sep->pid_doing_transaction = current->pid;
- mutex_unlock(&sep->sep_mutex);
-
- /* Zero the pools and the number of data pool alocation pointers */
- sep->data_pool_bytes_allocated = 0;
- sep->num_of_data_allocations = 0;
-
- /*
- * Check that the size of the mapped range is as the size of the message
- * shared area
- */
- if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) {
- error = -EINVAL;
- goto end_function_with_error;
- }
-
- dev_dbg(&sep->pdev->dev, "shared_addr is %p\n", sep->shared_addr);
-
- /* Get bus address */
- bus_addr = sep->shared_bus;
-
- if (remap_pfn_range(vma, vma->vm_start, bus_addr >> PAGE_SHIFT,
- vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
- dev_warn(&sep->pdev->dev, "remap_page_range failed\n");
- error = -EAGAIN;
- goto end_function_with_error;
- }
- goto end_function;
-
-end_function_with_error:
- /* Clear the bit */
- clear_bit(SEP_MMAP_LOCK_BIT, &sep->in_use_flags);
- mutex_lock(&sep->sep_mutex);
- sep->pid_doing_transaction = 0;
- mutex_unlock(&sep->sep_mutex);
-
- /* Raise event for stuck contextes */
-
- wake_up(&sep->event);
-
-end_function:
- return error;
-}
-
-/**
- * sep_poll - poll handler
- * @filp: pointer to struct file
- * @wait: pointer to poll_table
- *
- * Called by the OS when the kernel is asked to do a poll on
- * a SEP file handle.
- */
-static unsigned int sep_poll(struct file *filp, poll_table *wait)
-{
- u32 mask = 0;
- u32 retval = 0;
- u32 retval2 = 0;
- unsigned long lck_flags;
-
- struct sep_device *sep = filp->private_data;
-
- /* Am I the process that owns the transaction? */
- mutex_lock(&sep->sep_mutex);
- if (current->pid != sep->pid_doing_transaction) {
- dev_dbg(&sep->pdev->dev, "poll; wrong pid\n");
- mask = POLLERR;
- mutex_unlock(&sep->sep_mutex);
- goto end_function;
- }
- mutex_unlock(&sep->sep_mutex);
-
- /* Check if send command or send_reply were activated previously */
- if (!test_bit(SEP_SEND_MSG_LOCK_BIT, &sep->in_use_flags)) {
- mask = POLLERR;
- goto end_function;
- }
-
- /* Add the event to the polling wait table */
- dev_dbg(&sep->pdev->dev, "poll: calling wait sep_event\n");
-
- poll_wait(filp, &sep->event, wait);
-
- dev_dbg(&sep->pdev->dev, "poll: send_ct is %lx reply ct is %lx\n",
- sep->send_ct, sep->reply_ct);
-
- /* Check if error occurred during poll */
- retval2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
- if (retval2 != 0x0) {
- dev_warn(&sep->pdev->dev, "poll; poll error %x\n", retval2);
- mask |= POLLERR;
- goto end_function;
- }
-
- spin_lock_irqsave(&sep->snd_rply_lck, lck_flags);
-
- if (sep->send_ct == sep->reply_ct) {
- spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
- retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
- dev_dbg(&sep->pdev->dev, "poll: data ready check (GPR2) %x\n",
- retval);
-
- /* Check if printf request */
- if ((retval >> 30) & 0x1) {
- dev_dbg(&sep->pdev->dev, "poll: SEP printf request\n");
- wake_up(&sep->event_request_daemon);
- goto end_function;
- }
-
- /* Check if the this is SEP reply or request */
- if (retval >> 31) {
- dev_dbg(&sep->pdev->dev, "poll: SEP request\n");
- wake_up(&sep->event_request_daemon);
- } else {
- dev_dbg(&sep->pdev->dev, "poll: normal return\n");
- /* In case it is again by send_reply_comand */
- clear_bit(SEP_SEND_MSG_LOCK_BIT, &sep->in_use_flags);
- sep_dump_message(sep);
- dev_dbg(&sep->pdev->dev,
- "poll; SEP reply POLLIN | POLLRDNORM\n");
- mask |= POLLIN | POLLRDNORM;
- }
- } else {
- spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
- dev_dbg(&sep->pdev->dev,
- "poll; no reply received; returning mask of 0\n");
- mask = 0;
- }
-
-end_function:
- return mask;
-}
-
-/**
- * sep_time_address - address in SEP memory of time
- * @sep: SEP device we want the address from
- *
- * Return the address of the two dwords in memory used for time
- * setting.
- */
-static u32 *sep_time_address(struct sep_device *sep)
-{
- return sep->shared_addr + SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES;
-}
-
-/**
- * sep_set_time - set the SEP time
- * @sep: the SEP we are setting the time for
- *
- * Calculates time and sets it at the predefined address.
- * Called with the SEP mutex held.
- */
-static unsigned long sep_set_time(struct sep_device *sep)
-{
- struct timeval time;
- u32 *time_addr; /* Address of time as seen by the kernel */
-
-
- do_gettimeofday(&time);
-
- /* Set value in the SYSTEM MEMORY offset */
- time_addr = sep_time_address(sep);
-
- time_addr[0] = SEP_TIME_VAL_TOKEN;
- time_addr[1] = time.tv_sec;
-
- dev_dbg(&sep->pdev->dev, "time.tv_sec is %lu\n", time.tv_sec);
- dev_dbg(&sep->pdev->dev, "time_addr is %p\n", time_addr);
- dev_dbg(&sep->pdev->dev, "sep->shared_addr is %p\n", sep->shared_addr);
-
- return time.tv_sec;
-}
-
-/**
- * sep_set_caller_id_handler - insert caller id entry
- * @sep: SEP device
- * @arg: pointer to struct caller_id_struct
- *
- * Inserts the data into the caller id table. Note that this function
- * falls under the ioctl lock
- */
-static int sep_set_caller_id_handler(struct sep_device *sep, unsigned long arg)
-{
- void __user *hash;
- int error = 0;
- int i;
- struct caller_id_struct command_args;
-
- for (i = 0; i < SEP_CALLER_ID_TABLE_NUM_ENTRIES; i++) {
- if (sep->caller_id_table[i].pid == 0)
- break;
- }
-
- if (i == SEP_CALLER_ID_TABLE_NUM_ENTRIES) {
- dev_dbg(&sep->pdev->dev, "no more caller id entries left\n");
- dev_dbg(&sep->pdev->dev, "maximum number is %d\n",
- SEP_CALLER_ID_TABLE_NUM_ENTRIES);
- error = -EUSERS;
- goto end_function;
- }
-
- /* Copy the data */
- if (copy_from_user(&command_args, (void __user *)arg,
- sizeof(command_args))) {
- error = -EFAULT;
- goto end_function;
- }
-
- hash = (void __user *)(unsigned long)command_args.callerIdAddress;
-
- if (!command_args.pid || !command_args.callerIdSizeInBytes) {
- error = -EINVAL;
- goto end_function;
- }
-
- dev_dbg(&sep->pdev->dev, "pid is %x\n", command_args.pid);
- dev_dbg(&sep->pdev->dev, "callerIdSizeInBytes is %x\n",
- command_args.callerIdSizeInBytes);
-
- if (command_args.callerIdSizeInBytes >
- SEP_CALLER_ID_HASH_SIZE_IN_BYTES) {
- error = -EMSGSIZE;
- goto end_function;
- }
-
- sep->caller_id_table[i].pid = command_args.pid;
-
- if (copy_from_user(sep->caller_id_table[i].callerIdHash,
- hash, command_args.callerIdSizeInBytes))
- error = -EFAULT;
-end_function:
- return error;
-}
-
-/**
- * sep_set_current_caller_id - set the caller id
- * @sep: pointer to struct_sep_device
- *
- * Set the caller ID (if it exists) to the SEP. Note that this
- * function falls under the ioctl lock
- */
-static int sep_set_current_caller_id(struct sep_device *sep)
-{
- int i;
- u32 *hash_buf_ptr;
-
- /* Zero the previous value */
- memset(sep->shared_addr + SEP_CALLER_ID_OFFSET_BYTES,
- 0, SEP_CALLER_ID_HASH_SIZE_IN_BYTES);
-
- for (i = 0; i < SEP_CALLER_ID_TABLE_NUM_ENTRIES; i++) {
- if (sep->caller_id_table[i].pid == current->pid) {
- dev_dbg(&sep->pdev->dev, "Caller Id found\n");
-
- memcpy(sep->shared_addr + SEP_CALLER_ID_OFFSET_BYTES,
- (void *)(sep->caller_id_table[i].callerIdHash),
- SEP_CALLER_ID_HASH_SIZE_IN_BYTES);
- break;
- }
- }
- /* Ensure data is in little endian */
- hash_buf_ptr = (u32 *)sep->shared_addr +
- SEP_CALLER_ID_OFFSET_BYTES;
-
- for (i = 0; i < SEP_CALLER_ID_HASH_SIZE_IN_WORDS; i++)
- hash_buf_ptr[i] = cpu_to_le32(hash_buf_ptr[i]);
-
- return 0;
-}
-
-/**
- * sep_send_command_handler - kick off a command
- * @sep: SEP being signalled
- *
- * This function raises interrupt to SEP that signals that is has a new
- * command from the host
- *
- * Note that this function does fall under the ioctl lock
- */
-static int sep_send_command_handler(struct sep_device *sep)
-{
- unsigned long lck_flags;
- int error = 0;
-
- if (test_and_set_bit(SEP_SEND_MSG_LOCK_BIT, &sep->in_use_flags)) {
- error = -EPROTO;
- goto end_function;
- }
- sep_set_time(sep);
-
- sep_set_current_caller_id(sep);
-
- sep_dump_message(sep);
-
- /* Update counter */
- spin_lock_irqsave(&sep->snd_rply_lck, lck_flags);
- sep->send_ct++;
- spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
-
- dev_dbg(&sep->pdev->dev,
- "sep_send_command_handler send_ct %lx reply_ct %lx\n",
- sep->send_ct, sep->reply_ct);
-
- /* Send interrupt to SEP */
- sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
-
-end_function:
- return error;
-}
-
-/**
- * sep_allocate_data_pool_memory_handler -allocate pool memory
- * @sep: pointer to struct sep_device
- * @arg: pointer to struct alloc_struct
- *
- * This function handles the allocate data pool memory request
- * This function returns calculates the bus address of the
- * allocated memory, and the offset of this area from the mapped address.
- * Therefore, the FVOs in user space can calculate the exact virtual
- * address of this allocated memory
- */
-static int sep_allocate_data_pool_memory_handler(struct sep_device *sep,
- unsigned long arg)
-{
- int error = 0;
- struct alloc_struct command_args;
-
- /* Holds the allocated buffer address in the system memory pool */
- u32 *token_addr;
-
- if (copy_from_user(&command_args, (void __user *)arg,
- sizeof(struct alloc_struct))) {
- error = -EFAULT;
- goto end_function;
- }
-
- /* Allocate memory */
- if ((sep->data_pool_bytes_allocated + command_args.num_bytes) >
- SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES) {
- error = -ENOMEM;
- goto end_function;
- }
-
- dev_dbg(&sep->pdev->dev,
- "data pool bytes_allocated: %x\n", (int)sep->data_pool_bytes_allocated);
- dev_dbg(&sep->pdev->dev,
- "offset: %x\n", SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES);
- /* Set the virtual and bus address */
- command_args.offset = SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES +
- sep->data_pool_bytes_allocated;
-
- /* Place in the shared area that is known by the SEP */
- token_addr = (u32 *)(sep->shared_addr +
- SEP_DRIVER_DATA_POOL_ALLOCATION_OFFSET_IN_BYTES +
- (sep->num_of_data_allocations)*2*sizeof(u32));
-
- token_addr[0] = SEP_DATA_POOL_POINTERS_VAL_TOKEN;
- token_addr[1] = (u32)sep->shared_bus +
- SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES +
- sep->data_pool_bytes_allocated;
-
- /* Write the memory back to the user space */
- error = copy_to_user((void *)arg, (void *)&command_args,
- sizeof(struct alloc_struct));
- if (error) {
- error = -EFAULT;
- goto end_function;
- }
-
- /* Update the allocation */
- sep->data_pool_bytes_allocated += command_args.num_bytes;
- sep->num_of_data_allocations += 1;
-
-end_function:
- return error;
-}
-
-/**
- * sep_lock_kernel_pages - map kernel pages for DMA
- * @sep: pointer to struct sep_device
- * @kernel_virt_addr: address of data buffer in kernel
- * @data_size: size of data
- * @lli_array_ptr: lli array
- * @in_out_flag: input into device or output from device
- *
- * This function locks all the physical pages of the kernel virtual buffer
- * and construct a basic lli array, where each entry holds the physical
- * page address and the size that application data holds in this page
- * This function is used only during kernel crypto mod calls from within
- * the kernel (when ioctl is not used)
- */
-static int sep_lock_kernel_pages(struct sep_device *sep,
- unsigned long kernel_virt_addr,
- u32 data_size,
- struct sep_lli_entry **lli_array_ptr,
- int in_out_flag)
-
-{
- int error = 0;
- /* Array of lli */
- struct sep_lli_entry *lli_array;
- /* Map array */
- struct sep_dma_map *map_array;
-
- dev_dbg(&sep->pdev->dev, "lock kernel pages kernel_virt_addr is %08lx\n",
- (unsigned long)kernel_virt_addr);
- dev_dbg(&sep->pdev->dev, "data_size is %x\n", data_size);
-
- lli_array = kmalloc(sizeof(struct sep_lli_entry), GFP_ATOMIC);
- if (!lli_array) {
- error = -ENOMEM;
- goto end_function;
- }
- map_array = kmalloc(sizeof(struct sep_dma_map), GFP_ATOMIC);
- if (!map_array) {
- error = -ENOMEM;
- goto end_function_with_error;
- }
-
- map_array[0].dma_addr =
- dma_map_single(&sep->pdev->dev, (void *)kernel_virt_addr,
- data_size, DMA_BIDIRECTIONAL);
- map_array[0].size = data_size;
-
-
- /*
- * Set the start address of the first page - app data may start not at
- * the beginning of the page
- */
- lli_array[0].bus_address = (u32)map_array[0].dma_addr;
- lli_array[0].block_size = map_array[0].size;
-
- dev_dbg(&sep->pdev->dev,
- "lli_array[0].bus_address is %08lx, lli_array[0].block_size is %x\n",
- (unsigned long)lli_array[0].bus_address,
- lli_array[0].block_size);
-
- /* Set the output parameters */
- if (in_out_flag == SEP_DRIVER_IN_FLAG) {
- *lli_array_ptr = lli_array;
- sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages = 1;
- sep->dma_res_arr[sep->nr_dcb_creat].in_page_array = NULL;
- sep->dma_res_arr[sep->nr_dcb_creat].in_map_array = map_array;
- sep->dma_res_arr[sep->nr_dcb_creat].in_map_num_entries = 1;
- } else {
- *lli_array_ptr = lli_array;
- sep->dma_res_arr[sep->nr_dcb_creat].out_num_pages = 1;
- sep->dma_res_arr[sep->nr_dcb_creat].out_page_array = NULL;
- sep->dma_res_arr[sep->nr_dcb_creat].out_map_array = map_array;
- sep->dma_res_arr[sep->nr_dcb_creat].out_map_num_entries = 1;
- }
- goto end_function;
-
-end_function_with_error:
- kfree(lli_array);
-
-end_function:
- return error;
-}
-
-/**
- * sep_lock_user_pages - lock and map user pages for DMA
- * @sep: pointer to struct sep_device
- * @app_virt_addr: user memory data buffer
- * @data_size: size of data buffer
- * @lli_array_ptr: lli array
- * @in_out_flag: input or output to device
- *
- * This function locks all the physical pages of the application
- * virtual buffer and construct a basic lli array, where each entry
- * holds the physical page address and the size that application
- * data holds in this physical pages
- */
-static int sep_lock_user_pages(struct sep_device *sep,
- u32 app_virt_addr,
- u32 data_size,
- struct sep_lli_entry **lli_array_ptr,
- int in_out_flag)
-
-{
- int error = 0;
- u32 count;
- int result;
- /* The the page of the end address of the user space buffer */
- u32 end_page;
- /* The page of the start address of the user space buffer */
- u32 start_page;
- /* The range in pages */
- u32 num_pages;
- /* Array of pointers to page */
- struct page **page_array;
- /* Array of lli */
- struct sep_lli_entry *lli_array;
- /* Map array */
- struct sep_dma_map *map_array;
- /* Direction of the DMA mapping for locked pages */
- enum dma_data_direction dir;
-
- /* Set start and end pages and num pages */
- end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
- start_page = app_virt_addr >> PAGE_SHIFT;
- num_pages = end_page - start_page + 1;
-
- dev_dbg(&sep->pdev->dev, "lock user pages app_virt_addr is %x\n", app_virt_addr);
- dev_dbg(&sep->pdev->dev, "data_size is %x\n", data_size);
- dev_dbg(&sep->pdev->dev, "start_page is %x\n", start_page);
- dev_dbg(&sep->pdev->dev, "end_page is %x\n", end_page);
- dev_dbg(&sep->pdev->dev, "num_pages is %x\n", num_pages);
-
- /* Allocate array of pages structure pointers */
- page_array = kmalloc(sizeof(struct page *) * num_pages, GFP_ATOMIC);
- if (!page_array) {
- error = -ENOMEM;
- goto end_function;
- }
- map_array = kmalloc(sizeof(struct sep_dma_map) * num_pages, GFP_ATOMIC);
- if (!map_array) {
- dev_warn(&sep->pdev->dev, "kmalloc for map_array failed\n");
- error = -ENOMEM;
- goto end_function_with_error1;
- }
-
- lli_array = kmalloc(sizeof(struct sep_lli_entry) * num_pages,
- GFP_ATOMIC);
-
- if (!lli_array) {
- dev_warn(&sep->pdev->dev, "kmalloc for lli_array failed\n");
- error = -ENOMEM;
- goto end_function_with_error2;
- }
-
- /* Convert the application virtual address into a set of physical */
- down_read(&current->mm->mmap_sem);
- result = get_user_pages(current, current->mm, app_virt_addr,
- num_pages,
- ((in_out_flag == SEP_DRIVER_IN_FLAG) ? 0 : 1),
- 0, page_array, NULL);
-
- up_read(&current->mm->mmap_sem);
-
- /* Check the number of pages locked - if not all then exit with error */
- if (result != num_pages) {
- dev_warn(&sep->pdev->dev,
- "not all pages locked by get_user_pages\n");
- error = -ENOMEM;
- goto end_function_with_error3;
- }
-
- dev_dbg(&sep->pdev->dev, "get_user_pages succeeded\n");
-
- /* Set direction */
- if (in_out_flag == SEP_DRIVER_IN_FLAG)
- dir = DMA_TO_DEVICE;
- else
- dir = DMA_FROM_DEVICE;
-
- /*
- * Fill the array using page array data and
- * map the pages - this action will also flush the cache as needed
- */
- for (count = 0; count < num_pages; count++) {
- /* Fill the map array */
- map_array[count].dma_addr =
- dma_map_page(&sep->pdev->dev, page_array[count],
- 0, PAGE_SIZE, /*dir*/DMA_BIDIRECTIONAL);
-
- map_array[count].size = PAGE_SIZE;
-
- /* Fill the lli array entry */
- lli_array[count].bus_address = (u32)map_array[count].dma_addr;
- lli_array[count].block_size = PAGE_SIZE;
-
- dev_warn(&sep->pdev->dev, "lli_array[%x].bus_address is %08lx, lli_array[%x].block_size is %x\n",
- count, (unsigned long)lli_array[count].bus_address,
- count, lli_array[count].block_size);
- }
-
- /* Check the offset for the first page */
- lli_array[0].bus_address =
- lli_array[0].bus_address + (app_virt_addr & (~PAGE_MASK));
-
- /* Check that not all the data is in the first page only */
- if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
- lli_array[0].block_size = data_size;
- else
- lli_array[0].block_size =
- PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
-
- dev_dbg(&sep->pdev->dev,
- "lli_array[0].bus_address is %08lx, lli_array[0].block_size is %x\n",
- (unsigned long)lli_array[count].bus_address,
- lli_array[count].block_size);
-
- /* Check the size of the last page */
- if (num_pages > 1) {
- lli_array[num_pages - 1].block_size =
- (app_virt_addr + data_size) & (~PAGE_MASK);
- if (lli_array[num_pages - 1].block_size == 0)
- lli_array[num_pages - 1].block_size = PAGE_SIZE;
-
- dev_warn(&sep->pdev->dev,
- "lli_array[%x].bus_address is "
- "%08lx, lli_array[%x].block_size is %x\n",
- num_pages - 1,
- (unsigned long)lli_array[num_pages - 1].bus_address,
- num_pages - 1,
- lli_array[num_pages - 1].block_size);
- }
-
- /* Set output params according to the in_out flag */
- if (in_out_flag == SEP_DRIVER_IN_FLAG) {
- *lli_array_ptr = lli_array;
- sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages = num_pages;
- sep->dma_res_arr[sep->nr_dcb_creat].in_page_array = page_array;
- sep->dma_res_arr[sep->nr_dcb_creat].in_map_array = map_array;
- sep->dma_res_arr[sep->nr_dcb_creat].in_map_num_entries =
- num_pages;
- } else {
- *lli_array_ptr = lli_array;
- sep->dma_res_arr[sep->nr_dcb_creat].out_num_pages = num_pages;
- sep->dma_res_arr[sep->nr_dcb_creat].out_page_array =
- page_array;
- sep->dma_res_arr[sep->nr_dcb_creat].out_map_array = map_array;
- sep->dma_res_arr[sep->nr_dcb_creat].out_map_num_entries =
- num_pages;
- }
- goto end_function;
-
-end_function_with_error3:
- /* Free lli array */
- kfree(lli_array);
-
-end_function_with_error2:
- kfree(map_array);
-
-end_function_with_error1:
- /* Free page array */
- kfree(page_array);
-
-end_function:
- return error;
-}
-
-/**
- * u32 sep_calculate_lli_table_max_size - size the LLI table
- * @sep: pointer to struct sep_device
- * @lli_in_array_ptr
- * @num_array_entries
- * @last_table_flag
- *
- * This function calculates the size of data that can be inserted into
- * the lli table from this array, such that either the table is full
- * (all entries are entered), or there are no more entries in the
- * lli array
- */
-static u32 sep_calculate_lli_table_max_size(struct sep_device *sep,
- struct sep_lli_entry *lli_in_array_ptr,
- u32 num_array_entries,
- u32 *last_table_flag)
-{
- u32 counter;
- /* Table data size */
- u32 table_data_size = 0;
- /* Data size for the next table */
- u32 next_table_data_size;
-
- *last_table_flag = 0;
-
- /*
- * Calculate the data in the out lli table till we fill the whole
- * table or till the data has ended
- */
- for (counter = 0;
- (counter < (SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP - 1)) &&
- (counter < num_array_entries); counter++)
- table_data_size += lli_in_array_ptr[counter].block_size;
-
- /*
- * Check if we reached the last entry,
- * meaning this ia the last table to build,
- * and no need to check the block alignment
- */
- if (counter == num_array_entries) {
- /* Set the last table flag */
- *last_table_flag = 1;
- goto end_function;
- }
-
- /*
- * Calculate the data size of the next table.
- * Stop if no entries left or if data size is more the DMA restriction
- */
- next_table_data_size = 0;
- for (; counter < num_array_entries; counter++) {
- next_table_data_size += lli_in_array_ptr[counter].block_size;
- if (next_table_data_size >= SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE)
- break;
- }
-
- /*
- * Check if the next table data size is less then DMA rstriction.
- * if it is - recalculate the current table size, so that the next
- * table data size will be adaquete for DMA
- */
- if (next_table_data_size &&
- next_table_data_size < SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE)
-
- table_data_size -= (SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE -
- next_table_data_size);
-
-end_function:
- return table_data_size;
-}
-
-/**
- * sep_build_lli_table - build an lli array for the given table
- * @sep: pointer to struct sep_device
- * @lli_array_ptr: pointer to lli array
- * @lli_table_ptr: pointer to lli table
- * @num_processed_entries_ptr: pointer to number of entries
- * @num_table_entries_ptr: pointer to number of tables
- * @table_data_size: total data size
- *
- * Builds ant lli table from the lli_array according to
- * the given size of data
- */
-static void sep_build_lli_table(struct sep_device *sep,
- struct sep_lli_entry *lli_array_ptr,
- struct sep_lli_entry *lli_table_ptr,
- u32 *num_processed_entries_ptr,
- u32 *num_table_entries_ptr,
- u32 table_data_size)
-{
- /* Current table data size */
- u32 curr_table_data_size;
- /* Counter of lli array entry */
- u32 array_counter;
-
- /* Init current table data size and lli array entry counter */
- curr_table_data_size = 0;
- array_counter = 0;
- *num_table_entries_ptr = 1;
-
- dev_dbg(&sep->pdev->dev, "build lli table table_data_size is %x\n", table_data_size);
-
- /* Fill the table till table size reaches the needed amount */
- while (curr_table_data_size < table_data_size) {
- /* Update the number of entries in table */
- (*num_table_entries_ptr)++;
-
- lli_table_ptr->bus_address =
- cpu_to_le32(lli_array_ptr[array_counter].bus_address);
-
- lli_table_ptr->block_size =
- cpu_to_le32(lli_array_ptr[array_counter].block_size);
-
- curr_table_data_size += lli_array_ptr[array_counter].block_size;
-
- dev_dbg(&sep->pdev->dev, "lli_table_ptr is %p\n",
- lli_table_ptr);
- dev_dbg(&sep->pdev->dev, "lli_table_ptr->bus_address is %08lx\n",
- (unsigned long)lli_table_ptr->bus_address);
- dev_dbg(&sep->pdev->dev, "lli_table_ptr->block_size is %x\n",
- lli_table_ptr->block_size);
-
- /* Check for overflow of the table data */
- if (curr_table_data_size > table_data_size) {
- dev_dbg(&sep->pdev->dev,
- "curr_table_data_size too large\n");
-
- /* Update the size of block in the table */
- lli_table_ptr->block_size -=
- cpu_to_le32((curr_table_data_size - table_data_size));
-
- /* Update the physical address in the lli array */
- lli_array_ptr[array_counter].bus_address +=
- cpu_to_le32(lli_table_ptr->block_size);
-
- /* Update the block size left in the lli array */
- lli_array_ptr[array_counter].block_size =
- (curr_table_data_size - table_data_size);
- } else
- /* Advance to the next entry in the lli_array */
- array_counter++;
-
- dev_dbg(&sep->pdev->dev,
- "lli_table_ptr->bus_address is %08lx\n",
- (unsigned long)lli_table_ptr->bus_address);
- dev_dbg(&sep->pdev->dev,
- "lli_table_ptr->block_size is %x\n",
- lli_table_ptr->block_size);
-
- /* Move to the next entry in table */
- lli_table_ptr++;
- }
-
- /* Set the info entry to default */
- lli_table_ptr->bus_address = 0xffffffff;
- lli_table_ptr->block_size = 0;
-
- /* Set the output parameter */
- *num_processed_entries_ptr += array_counter;
-
-}
-
-/**
- * sep_shared_area_virt_to_bus - map shared area to bus address
- * @sep: pointer to struct sep_device
- * @virt_address: virtual address to convert
- *
- * This functions returns the physical address inside shared area according
- * to the virtual address. It can be either on the externa RAM device
- * (ioremapped), or on the system RAM
- * This implementation is for the external RAM
- */
-static dma_addr_t sep_shared_area_virt_to_bus(struct sep_device *sep,
- void *virt_address)
-{
- dev_dbg(&sep->pdev->dev, "sh virt to phys v %p\n", virt_address);
- dev_dbg(&sep->pdev->dev, "sh virt to phys p %08lx\n",
- (unsigned long)
- sep->shared_bus + (virt_address - sep->shared_addr));
-
- return sep->shared_bus + (size_t)(virt_address - sep->shared_addr);
-}
-
-/**
- * sep_shared_area_bus_to_virt - map shared area bus address to kernel
- * @sep: pointer to struct sep_device
- * @bus_address: bus address to convert
- *
- * This functions returns the virtual address inside shared area
- * according to the physical address. It can be either on the
- * externa RAM device (ioremapped), or on the system RAM
- * This implementation is for the external RAM
- */
-static void *sep_shared_area_bus_to_virt(struct sep_device *sep,
- dma_addr_t bus_address)
-{
- dev_dbg(&sep->pdev->dev, "shared bus to virt b=%lx v=%lx\n",
- (unsigned long)bus_address, (unsigned long)(sep->shared_addr +
- (size_t)(bus_address - sep->shared_bus)));
-
- return sep->shared_addr + (size_t)(bus_address - sep->shared_bus);
-}
-
-/**
- * sep_debug_print_lli_tables - dump LLI table
- * @sep: pointer to struct sep_device
- * @lli_table_ptr: pointer to sep_lli_entry
- * @num_table_entries: number of entries
- * @table_data_size: total data size
- *
- * Walk the the list of the print created tables and print all the data
- */
-static void sep_debug_print_lli_tables(struct sep_device *sep,
- struct sep_lli_entry *lli_table_ptr,
- unsigned long num_table_entries,
- unsigned long table_data_size)
-{
- unsigned long table_count = 1;
- unsigned long entries_count = 0;
-
- dev_dbg(&sep->pdev->dev, "sep_debug_print_lli_tables start\n");
-
- while ((unsigned long) lli_table_ptr->bus_address != 0xffffffff) {
- dev_dbg(&sep->pdev->dev,
- "lli table %08lx, table_data_size is %lu\n",
- table_count, table_data_size);
- dev_dbg(&sep->pdev->dev, "num_table_entries is %lu\n",
- num_table_entries);
-
- /* Print entries of the table (without info entry) */
- for (entries_count = 0; entries_count < num_table_entries;
- entries_count++, lli_table_ptr++) {
-
- dev_dbg(&sep->pdev->dev,
- "lli_table_ptr address is %08lx\n",
- (unsigned long) lli_table_ptr);
-
- dev_dbg(&sep->pdev->dev,
- "phys address is %08lx block size is %x\n",
- (unsigned long)lli_table_ptr->bus_address,
- lli_table_ptr->block_size);
- }
- /* Point to the info entry */
- lli_table_ptr--;
-
- dev_dbg(&sep->pdev->dev,
- "phys lli_table_ptr->block_size is %x\n",
- lli_table_ptr->block_size);
-
- dev_dbg(&sep->pdev->dev,
- "phys lli_table_ptr->physical_address is %08lu\n",
- (unsigned long)lli_table_ptr->bus_address);
-
-
- table_data_size = lli_table_ptr->block_size & 0xffffff;
- num_table_entries = (lli_table_ptr->block_size >> 24) & 0xff;
-
- dev_dbg(&sep->pdev->dev,
- "phys table_data_size is %lu num_table_entries is"
- " %lu bus_address is%lu\n", table_data_size,
- num_table_entries, (unsigned long)lli_table_ptr->bus_address);
-
- if ((unsigned long)lli_table_ptr->bus_address != 0xffffffff)
- lli_table_ptr = (struct sep_lli_entry *)
- sep_shared_bus_to_virt(sep,
- (unsigned long)lli_table_ptr->bus_address);
-
- table_count++;
- }
- dev_dbg(&sep->pdev->dev, "sep_debug_print_lli_tables end\n");
-}
-
-
-/**
- * sep_prepare_empty_lli_table - create a blank LLI table
- * @sep: pointer to struct sep_device
- * @lli_table_addr_ptr: pointer to lli table
- * @num_entries_ptr: pointer to number of entries
- * @table_data_size_ptr: point to table data size
- *
- * This function creates empty lli tables when there is no data
- */
-static void sep_prepare_empty_lli_table(struct sep_device *sep,
- dma_addr_t *lli_table_addr_ptr,
- u32 *num_entries_ptr,
- u32 *table_data_size_ptr)
-{
- struct sep_lli_entry *lli_table_ptr;
-
- /* Find the area for new table */
- lli_table_ptr =
- (struct sep_lli_entry *)(sep->shared_addr +
- SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
- sep->num_lli_tables_created * sizeof(struct sep_lli_entry) *
- SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
-
- lli_table_ptr->bus_address = 0;
- lli_table_ptr->block_size = 0;
-
- lli_table_ptr++;
- lli_table_ptr->bus_address = 0xFFFFFFFF;
- lli_table_ptr->block_size = 0;
-
- /* Set the output parameter value */
- *lli_table_addr_ptr = sep->shared_bus +
- SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
- sep->num_lli_tables_created *
- sizeof(struct sep_lli_entry) *
- SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
-
- /* Set the num of entries and table data size for empty table */
- *num_entries_ptr = 2;
- *table_data_size_ptr = 0;
-
- /* Update the number of created tables */
- sep->num_lli_tables_created++;
-}
-
-/**
- * sep_prepare_input_dma_table - prepare input DMA mappings
- * @sep: pointer to struct sep_device
- * @data_size:
- * @block_size:
- * @lli_table_ptr:
- * @num_entries_ptr:
- * @table_data_size_ptr:
- * @is_kva: set for kernel data (kernel cryptio call)
- *
- * This function prepares only input DMA table for synhronic symmetric
- * operations (HASH)
- * Note that all bus addresses that are passed to the SEP
- * are in 32 bit format; the SEP is a 32 bit device
- */
-static int sep_prepare_input_dma_table(struct sep_device *sep,
- unsigned long app_virt_addr,
- u32 data_size,
- u32 block_size,
- dma_addr_t *lli_table_ptr,
- u32 *num_entries_ptr,
- u32 *table_data_size_ptr,
- bool is_kva)
-{
- int error = 0;
- /* Pointer to the info entry of the table - the last entry */
- struct sep_lli_entry *info_entry_ptr;
- /* Array of pointers to page */
- struct sep_lli_entry *lli_array_ptr;
- /* Points to the first entry to be processed in the lli_in_array */
- u32 current_entry = 0;
- /* Num entries in the virtual buffer */
- u32 sep_lli_entries = 0;
- /* Lli table pointer */
- struct sep_lli_entry *in_lli_table_ptr;
- /* The total data in one table */
- u32 table_data_size = 0;
- /* Flag for last table */
- u32 last_table_flag = 0;
- /* Number of entries in lli table */
- u32 num_entries_in_table = 0;
- /* Next table address */
- void *lli_table_alloc_addr = 0;
-
- dev_dbg(&sep->pdev->dev, "prepare intput dma table data_size is %x\n", data_size);
- dev_dbg(&sep->pdev->dev, "block_size is %x\n", block_size);
-
- /* Initialize the pages pointers */
- sep->dma_res_arr[sep->nr_dcb_creat].in_page_array = NULL;
- sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages = 0;
-
- /* Set the kernel address for first table to be allocated */
- lli_table_alloc_addr = (void *)(sep->shared_addr +
- SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
- sep->num_lli_tables_created * sizeof(struct sep_lli_entry) *
- SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
-
- if (data_size == 0) {
- /* Special case - create meptu table - 2 entries, zero data */
- sep_prepare_empty_lli_table(sep, lli_table_ptr,
- num_entries_ptr, table_data_size_ptr);
- goto update_dcb_counter;
- }
-
- /* Check if the pages are in Kernel Virtual Address layout */
- if (is_kva == true)
- /* Lock the pages in the kernel */
- error = sep_lock_kernel_pages(sep, app_virt_addr,
- data_size, &lli_array_ptr, SEP_DRIVER_IN_FLAG);
- else
- /*
- * Lock the pages of the user buffer
- * and translate them to pages
- */
- error = sep_lock_user_pages(sep, app_virt_addr,
- data_size, &lli_array_ptr, SEP_DRIVER_IN_FLAG);
-
- if (error)
- goto end_function;
-
- dev_dbg(&sep->pdev->dev, "output sep_in_num_pages is %x\n",
- sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages);
-
- current_entry = 0;
- info_entry_ptr = NULL;
-
- sep_lli_entries = sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages;
-
- /* Loop till all the entries in in array are not processed */
- while (current_entry < sep_lli_entries) {
-
- /* Set the new input and output tables */
- in_lli_table_ptr =
- (struct sep_lli_entry *)lli_table_alloc_addr;
-
- lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
- SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
-
- if (lli_table_alloc_addr >
- ((void *)sep->shared_addr +
- SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
- SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES)) {
-
- error = -ENOMEM;
- goto end_function_error;
-
- }
-
- /* Update the number of created tables */
- sep->num_lli_tables_created++;
-
- /* Calculate the maximum size of data for input table */
- table_data_size = sep_calculate_lli_table_max_size(sep,
- &lli_array_ptr[current_entry],
- (sep_lli_entries - current_entry),
- &last_table_flag);
-
- /*
- * If this is not the last table -
- * then align it to the block size
- */
- if (!last_table_flag)
- table_data_size =
- (table_data_size / block_size) * block_size;
-
- dev_dbg(&sep->pdev->dev, "output table_data_size is %x\n",
- table_data_size);
-
- /* Construct input lli table */
- sep_build_lli_table(sep, &lli_array_ptr[current_entry],
- in_lli_table_ptr,
- &current_entry, &num_entries_in_table, table_data_size);
-
- if (info_entry_ptr == NULL) {
-
- /* Set the output parameters to physical addresses */
- *lli_table_ptr = sep_shared_area_virt_to_bus(sep,
- in_lli_table_ptr);
- *num_entries_ptr = num_entries_in_table;
- *table_data_size_ptr = table_data_size;
-
- dev_dbg(&sep->pdev->dev,
- "output lli_table_in_ptr is %08lx\n",
- (unsigned long)*lli_table_ptr);
-
- } else {
- /* Update the info entry of the previous in table */
- info_entry_ptr->bus_address =
- sep_shared_area_virt_to_bus(sep,
- in_lli_table_ptr);
- info_entry_ptr->block_size =
- ((num_entries_in_table) << 24) |
- (table_data_size);
- }
- /* Save the pointer to the info entry of the current tables */
- info_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
- }
- /* Print input tables */
- sep_debug_print_lli_tables(sep, (struct sep_lli_entry *)
- sep_shared_area_bus_to_virt(sep, *lli_table_ptr),
- *num_entries_ptr, *table_data_size_ptr);
- /* The array of the pages */
- kfree(lli_array_ptr);
-
-update_dcb_counter:
- /* Update DCB counter */
- sep->nr_dcb_creat++;
- goto end_function;
-
-end_function_error:
- /* Free all the allocated resources */
- kfree(sep->dma_res_arr[sep->nr_dcb_creat].in_map_array);
- kfree(lli_array_ptr);
- kfree(sep->dma_res_arr[sep->nr_dcb_creat].in_page_array);
-
-end_function:
- return error;
-
-}
-/**
- * sep_construct_dma_tables_from_lli - prepare AES/DES mappings
- * @sep: pointer to struct sep_device
- * @lli_in_array:
- * @sep_in_lli_entries:
- * @lli_out_array:
- * @sep_out_lli_entries
- * @block_size
- * @lli_table_in_ptr
- * @lli_table_out_ptr
- * @in_num_entries_ptr
- * @out_num_entries_ptr
- * @table_data_size_ptr
- *
- * This function creates the input and output DMA tables for
- * symmetric operations (AES/DES) according to the block
- * size from LLI arays
- * Note that all bus addresses that are passed to the SEP
- * are in 32 bit format; the SEP is a 32 bit device
- */
-static int sep_construct_dma_tables_from_lli(
- struct sep_device *sep,
- struct sep_lli_entry *lli_in_array,
- u32 sep_in_lli_entries,
- struct sep_lli_entry *lli_out_array,
- u32 sep_out_lli_entries,
- u32 block_size,
- dma_addr_t *lli_table_in_ptr,
- dma_addr_t *lli_table_out_ptr,
- u32 *in_num_entries_ptr,
- u32 *out_num_entries_ptr,
- u32 *table_data_size_ptr)
-{
- /* Points to the area where next lli table can be allocated */
- void *lli_table_alloc_addr = 0;
- /* Input lli table */
- struct sep_lli_entry *in_lli_table_ptr = NULL;
- /* Output lli table */
- struct sep_lli_entry *out_lli_table_ptr = NULL;
- /* Pointer to the info entry of the table - the last entry */
- struct sep_lli_entry *info_in_entry_ptr = NULL;
- /* Pointer to the info entry of the table - the last entry */
- struct sep_lli_entry *info_out_entry_ptr = NULL;
- /* Points to the first entry to be processed in the lli_in_array */
- u32 current_in_entry = 0;
- /* Points to the first entry to be processed in the lli_out_array */
- u32 current_out_entry = 0;
- /* Max size of the input table */
- u32 in_table_data_size = 0;
- /* Max size of the output table */
- u32 out_table_data_size = 0;
- /* Flag te signifies if this is the last tables build */
- u32 last_table_flag = 0;
- /* The data size that should be in table */
- u32 table_data_size = 0;
- /* Number of etnries in the input table */
- u32 num_entries_in_table = 0;
- /* Number of etnries in the output table */
- u32 num_entries_out_table = 0;
-
- /* Initiate to point after the message area */
- lli_table_alloc_addr = (void *)(sep->shared_addr +
- SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
- (sep->num_lli_tables_created *
- (sizeof(struct sep_lli_entry) *
- SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP)));
-
- /* Loop till all the entries in in array are not processed */
- while (current_in_entry < sep_in_lli_entries) {
- /* Set the new input and output tables */
- in_lli_table_ptr =
- (struct sep_lli_entry *)lli_table_alloc_addr;
-
- lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
- SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
-
- /* Set the first output tables */
- out_lli_table_ptr =
- (struct sep_lli_entry *)lli_table_alloc_addr;
-
- /* Check if the DMA table area limit was overrun */
- if ((lli_table_alloc_addr + sizeof(struct sep_lli_entry) *
- SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP) >
- ((void *)sep->shared_addr +
- SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
- SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES)) {
-
- dev_warn(&sep->pdev->dev, "dma table limit overrun\n");
- return -ENOMEM;
- }
-
- /* Update the number of the lli tables created */
- sep->num_lli_tables_created += 2;
-
- lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
- SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
-
- /* Calculate the maximum size of data for input table */
- in_table_data_size =
- sep_calculate_lli_table_max_size(sep,
- &lli_in_array[current_in_entry],
- (sep_in_lli_entries - current_in_entry),
- &last_table_flag);
-
- /* Calculate the maximum size of data for output table */
- out_table_data_size =
- sep_calculate_lli_table_max_size(sep,
- &lli_out_array[current_out_entry],
- (sep_out_lli_entries - current_out_entry),
- &last_table_flag);
-
- dev_dbg(&sep->pdev->dev,
- "construct tables from lli in_table_data_size is %x\n",
- in_table_data_size);
-
- dev_dbg(&sep->pdev->dev,
- "construct tables from lli out_table_data_size is %x\n",
- out_table_data_size);
-
- table_data_size = in_table_data_size;
-
- if (!last_table_flag) {
- /*
- * If this is not the last table,
- * then must check where the data is smallest
- * and then align it to the block size
- */
- if (table_data_size > out_table_data_size)
- table_data_size = out_table_data_size;
-
- /*
- * Now calculate the table size so that
- * it will be module block size
- */
- table_data_size = (table_data_size / block_size) *
- block_size;
- }
-
- /* Construct input lli table */
- sep_build_lli_table(sep, &lli_in_array[current_in_entry],
- in_lli_table_ptr,
- &current_in_entry,
- &num_entries_in_table,
- table_data_size);
-
- /* Construct output lli table */
- sep_build_lli_table(sep, &lli_out_array[current_out_entry],
- out_lli_table_ptr,
- &current_out_entry,
- &num_entries_out_table,
- table_data_size);
-
- /* If info entry is null - this is the first table built */
- if (info_in_entry_ptr == NULL) {
- /* Set the output parameters to physical addresses */
- *lli_table_in_ptr =
- sep_shared_area_virt_to_bus(sep, in_lli_table_ptr);
-
- *in_num_entries_ptr = num_entries_in_table;
-
- *lli_table_out_ptr =
- sep_shared_area_virt_to_bus(sep,
- out_lli_table_ptr);
-
- *out_num_entries_ptr = num_entries_out_table;
- *table_data_size_ptr = table_data_size;
-
- dev_dbg(&sep->pdev->dev,
- "output lli_table_in_ptr is %08lx\n",
- (unsigned long)*lli_table_in_ptr);
- dev_dbg(&sep->pdev->dev,
- "output lli_table_out_ptr is %08lx\n",
- (unsigned long)*lli_table_out_ptr);
- } else {
- /* Update the info entry of the previous in table */
- info_in_entry_ptr->bus_address =
- sep_shared_area_virt_to_bus(sep,
- in_lli_table_ptr);
-
- info_in_entry_ptr->block_size =
- ((num_entries_in_table) << 24) |
- (table_data_size);
-
- /* Update the info entry of the previous in table */
- info_out_entry_ptr->bus_address =
- sep_shared_area_virt_to_bus(sep,
- out_lli_table_ptr);
-
- info_out_entry_ptr->block_size =
- ((num_entries_out_table) << 24) |
- (table_data_size);
-
- dev_dbg(&sep->pdev->dev,
- "output lli_table_in_ptr:%08lx %08x\n",
- (unsigned long)info_in_entry_ptr->bus_address,
- info_in_entry_ptr->block_size);
-
- dev_dbg(&sep->pdev->dev,
- "output lli_table_out_ptr:%08lx %08x\n",
- (unsigned long)info_out_entry_ptr->bus_address,
- info_out_entry_ptr->block_size);
- }
-
- /* Save the pointer to the info entry of the current tables */
- info_in_entry_ptr = in_lli_table_ptr +
- num_entries_in_table - 1;
- info_out_entry_ptr = out_lli_table_ptr +
- num_entries_out_table - 1;
-
- dev_dbg(&sep->pdev->dev,
- "output num_entries_out_table is %x\n",
- (u32)num_entries_out_table);
- dev_dbg(&sep->pdev->dev,
- "output info_in_entry_ptr is %lx\n",
- (unsigned long)info_in_entry_ptr);
- dev_dbg(&sep->pdev->dev,
- "output info_out_entry_ptr is %lx\n",
- (unsigned long)info_out_entry_ptr);
- }
-
- /* Print input tables */
- sep_debug_print_lli_tables(sep,
- (struct sep_lli_entry *)
- sep_shared_area_bus_to_virt(sep, *lli_table_in_ptr),
- *in_num_entries_ptr,
- *table_data_size_ptr);
-
- /* Print output tables */
- sep_debug_print_lli_tables(sep,
- (struct sep_lli_entry *)
- sep_shared_area_bus_to_virt(sep, *lli_table_out_ptr),
- *out_num_entries_ptr,
- *table_data_size_ptr);
-
- return 0;
-}
-
-/**
- * sep_prepare_input_output_dma_table - prepare DMA I/O table
- * @app_virt_in_addr:
- * @app_virt_out_addr:
- * @data_size:
- * @block_size:
- * @lli_table_in_ptr:
- * @lli_table_out_ptr:
- * @in_num_entries_ptr:
- * @out_num_entries_ptr:
- * @table_data_size_ptr:
- * @is_kva: set for kernel data; used only for kernel crypto module
- *
- * This function builds input and output DMA tables for synhronic
- * symmetric operations (AES, DES, HASH). It also checks that each table
- * is of the modular block size
- * Note that all bus addresses that are passed to the SEP
- * are in 32 bit format; the SEP is a 32 bit device
- */
-static int sep_prepare_input_output_dma_table(struct sep_device *sep,
- unsigned long app_virt_in_addr,
- unsigned long app_virt_out_addr,
- u32 data_size,
- u32 block_size,
- dma_addr_t *lli_table_in_ptr,
- dma_addr_t *lli_table_out_ptr,
- u32 *in_num_entries_ptr,
- u32 *out_num_entries_ptr,
- u32 *table_data_size_ptr,
- bool is_kva)
-
-{
- int error = 0;
- /* Array of pointers of page */
- struct sep_lli_entry *lli_in_array;
- /* Array of pointers of page */
- struct sep_lli_entry *lli_out_array;
-
- if (data_size == 0) {
- /* Prepare empty table for input and output */
- sep_prepare_empty_lli_table(sep, lli_table_in_ptr,
- in_num_entries_ptr, table_data_size_ptr);
-
- sep_prepare_empty_lli_table(sep, lli_table_out_ptr,
- out_num_entries_ptr, table_data_size_ptr);
-
- goto update_dcb_counter;
- }
-
- /* Initialize the pages pointers */
- sep->dma_res_arr[sep->nr_dcb_creat].in_page_array = NULL;
- sep->dma_res_arr[sep->nr_dcb_creat].out_page_array = NULL;
-
- /* Lock the pages of the buffer and translate them to pages */
- if (is_kva == true) {
- error = sep_lock_kernel_pages(sep, app_virt_in_addr,
- data_size, &lli_in_array, SEP_DRIVER_IN_FLAG);
-
- if (error) {
- dev_warn(&sep->pdev->dev,
- "lock kernel for in failed\n");
- goto end_function;
- }
-
- error = sep_lock_kernel_pages(sep, app_virt_out_addr,
- data_size, &lli_out_array, SEP_DRIVER_OUT_FLAG);
-
- if (error) {
- dev_warn(&sep->pdev->dev,
- "lock kernel for out failed\n");
- goto end_function;
- }
- }
-
- else {
- error = sep_lock_user_pages(sep, app_virt_in_addr,
- data_size, &lli_in_array, SEP_DRIVER_IN_FLAG);
- if (error) {
- dev_warn(&sep->pdev->dev,
- "sep_lock_user_pages for input virtual buffer failed\n");
- goto end_function;
- }
-
- error = sep_lock_user_pages(sep, app_virt_out_addr,
- data_size, &lli_out_array, SEP_DRIVER_OUT_FLAG);
-
- if (error) {
- dev_warn(&sep->pdev->dev,
- "sep_lock_user_pages for output virtual buffer failed\n");
- goto end_function_free_lli_in;
- }
- }
-
- dev_dbg(&sep->pdev->dev, "prep input output dma table sep_in_num_pages is %x\n",
- sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages);
- dev_dbg(&sep->pdev->dev, "sep_out_num_pages is %x\n",
- sep->dma_res_arr[sep->nr_dcb_creat].out_num_pages);
- dev_dbg(&sep->pdev->dev, "SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP is %x\n",
- SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
-
- /* Call the function that creates table from the lli arrays */
- error = sep_construct_dma_tables_from_lli(sep, lli_in_array,
- sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages,
- lli_out_array,
- sep->dma_res_arr[sep->nr_dcb_creat].out_num_pages,
- block_size, lli_table_in_ptr, lli_table_out_ptr,
- in_num_entries_ptr, out_num_entries_ptr, table_data_size_ptr);
-
- if (error) {
- dev_warn(&sep->pdev->dev,
- "sep_construct_dma_tables_from_lli failed\n");
- goto end_function_with_error;
- }
-
- kfree(lli_out_array);
- kfree(lli_in_array);
-
-update_dcb_counter:
- /* Update DCB counter */
- sep->nr_dcb_creat++;
-
- goto end_function;
-
-end_function_with_error:
- kfree(sep->dma_res_arr[sep->nr_dcb_creat].out_map_array);
- kfree(sep->dma_res_arr[sep->nr_dcb_creat].out_page_array);
- kfree(lli_out_array);
-
-
-end_function_free_lli_in:
- kfree(sep->dma_res_arr[sep->nr_dcb_creat].in_map_array);
- kfree(sep->dma_res_arr[sep->nr_dcb_creat].in_page_array);
- kfree(lli_in_array);
-
-end_function:
-
- return error;
-
-}
-
-/**
- * sep_prepare_input_output_dma_table_in_dcb - prepare control blocks
- * @app_in_address: unsigned long; for data buffer in (user space)
- * @app_out_address: unsigned long; for data buffer out (user space)
- * @data_in_size: u32; for size of data
- * @block_size: u32; for block size
- * @tail_block_size: u32; for size of tail block
- * @isapplet: bool; to indicate external app
- * @is_kva: bool; kernel buffer; only used for kernel crypto module
- *
- * This function prepares the linked DMA tables and puts the
- * address for the linked list of tables inta a DCB (data control
- * block) the address of which is known by the SEP hardware
- * Note that all bus addresses that are passed to the SEP
- * are in 32 bit format; the SEP is a 32 bit device
- */
-static int sep_prepare_input_output_dma_table_in_dcb(struct sep_device *sep,
- unsigned long app_in_address,
- unsigned long app_out_address,
- u32 data_in_size,
- u32 block_size,
- u32 tail_block_size,
- bool isapplet,
- bool is_kva)
-{
- int error = 0;
- /* Size of tail */
- u32 tail_size = 0;
- /* Address of the created DCB table */
- struct sep_dcblock *dcb_table_ptr = NULL;
- /* The physical address of the first input DMA table */
- dma_addr_t in_first_mlli_address = 0;
- /* Number of entries in the first input DMA table */
- u32 in_first_num_entries = 0;
- /* The physical address of the first output DMA table */
- dma_addr_t out_first_mlli_address = 0;
- /* Number of entries in the first output DMA table */
- u32 out_first_num_entries = 0;
- /* Data in the first input/output table */
- u32 first_data_size = 0;
-
- if (sep->nr_dcb_creat == SEP_MAX_NUM_SYNC_DMA_OPS) {
- /* No more DCBs to allocate */
- dev_warn(&sep->pdev->dev, "no more DCBs available\n");
- error = -ENOSPC;
- goto end_function;
- }
-
- /* Allocate new DCB */
- dcb_table_ptr = (struct sep_dcblock *)(sep->shared_addr +
- SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES +
- (sep->nr_dcb_creat * sizeof(struct sep_dcblock)));
-
- /* Set the default values in the DCB */
- dcb_table_ptr->input_mlli_address = 0;
- dcb_table_ptr->input_mlli_num_entries = 0;
- dcb_table_ptr->input_mlli_data_size = 0;
- dcb_table_ptr->output_mlli_address = 0;
- dcb_table_ptr->output_mlli_num_entries = 0;
- dcb_table_ptr->output_mlli_data_size = 0;
- dcb_table_ptr->tail_data_size = 0;
- dcb_table_ptr->out_vr_tail_pt = 0;
-
- if (isapplet == true) {
-
- /* Check if there is enough data for DMA operation */
- if (data_in_size < SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE) {
- if (is_kva == true) {
- memcpy(dcb_table_ptr->tail_data,
- (void *)app_in_address, data_in_size);
- } else {
- if (copy_from_user(dcb_table_ptr->tail_data,
- (void __user *)app_in_address,
- data_in_size)) {
- error = -EFAULT;
- goto end_function;
- }
- }
-
- dcb_table_ptr->tail_data_size = data_in_size;
-
- /* Set the output user-space address for mem2mem op */
- if (app_out_address)
- dcb_table_ptr->out_vr_tail_pt =
- (aligned_u64)app_out_address;
-
- /*
- * Update both data length parameters in order to avoid
- * second data copy and allow building of empty mlli
- * tables
- */
- tail_size = 0x0;
- data_in_size = 0x0;
-
- } else {
- if (!app_out_address) {
- tail_size = data_in_size % block_size;
- if (!tail_size) {
- if (tail_block_size == block_size)
- tail_size = block_size;
- }
- } else {
- tail_size = 0;
- }
- }
- if (tail_size) {
- if (tail_size > sizeof(dcb_table_ptr->tail_data))
- return -EINVAL;
- if (is_kva == true) {
- memcpy(dcb_table_ptr->tail_data,
- (void *)(app_in_address + data_in_size -
- tail_size), tail_size);
- } else {
- /* We have tail data - copy it to DCB */
- if (copy_from_user(dcb_table_ptr->tail_data,
- (void *)(app_in_address +
- data_in_size - tail_size), tail_size)) {
- error = -EFAULT;
- goto end_function;
- }
- }
- if (app_out_address)
- /*
- * Calculate the output address
- * according to tail data size
- */
- dcb_table_ptr->out_vr_tail_pt =
- (aligned_u64)app_out_address + data_in_size
- - tail_size;
-
- /* Save the real tail data size */
- dcb_table_ptr->tail_data_size = tail_size;
- /*
- * Update the data size without the tail
- * data size AKA data for the dma
- */
- data_in_size = (data_in_size - tail_size);
- }
- }
- /* Check if we need to build only input table or input/output */
- if (app_out_address) {
- /* Prepare input/output tables */
- error = sep_prepare_input_output_dma_table(sep,
- app_in_address,
- app_out_address,
- data_in_size,
- block_size,
- &in_first_mlli_address,
- &out_first_mlli_address,
- &in_first_num_entries,
- &out_first_num_entries,
- &first_data_size,
- is_kva);
- } else {
- /* Prepare input tables */
- error = sep_prepare_input_dma_table(sep,
- app_in_address,
- data_in_size,
- block_size,
- &in_first_mlli_address,
- &in_first_num_entries,
- &first_data_size,
- is_kva);
- }
-
- if (error) {
- dev_warn(&sep->pdev->dev, "prepare DMA table call failed from prepare DCB call\n");
- goto end_function;
- }
-
- /* Set the DCB values */
- dcb_table_ptr->input_mlli_address = in_first_mlli_address;
- dcb_table_ptr->input_mlli_num_entries = in_first_num_entries;
- dcb_table_ptr->input_mlli_data_size = first_data_size;
- dcb_table_ptr->output_mlli_address = out_first_mlli_address;
- dcb_table_ptr->output_mlli_num_entries = out_first_num_entries;
- dcb_table_ptr->output_mlli_data_size = first_data_size;
-
-end_function:
- return error;
-
-}
-
-/**
- * sep_free_dma_tables_and_dcb - free DMA tables and DCBs
- * @sep: pointer to struct sep_device
- * @isapplet: indicates external application (used for kernel access)
- * @is_kva: indicates kernel addresses (only used for kernel crypto)
- *
- * This function frees the DMA tables and DCB
- */
-static int sep_free_dma_tables_and_dcb(struct sep_device *sep, bool isapplet,
- bool is_kva)
-{
- int i = 0;
- int error = 0;
- int error_temp = 0;
- struct sep_dcblock *dcb_table_ptr;
- unsigned long pt_hold;
- void *tail_pt;
-
- if (isapplet == true) {
- /* Set pointer to first DCB table */
- dcb_table_ptr = (struct sep_dcblock *)
- (sep->shared_addr +
- SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES);
-
- /* Go over each DCB and see if tail pointer must be updated */
- for (i = 0; i < sep->nr_dcb_creat; i++, dcb_table_ptr++) {
- if (dcb_table_ptr->out_vr_tail_pt) {
- pt_hold = (unsigned long)dcb_table_ptr->out_vr_tail_pt;
- tail_pt = (void *)pt_hold;
- if (is_kva == true) {
- memcpy(tail_pt,
- dcb_table_ptr->tail_data,
- dcb_table_ptr->tail_data_size);
- } else {
- error_temp = copy_to_user(
- tail_pt,
- dcb_table_ptr->tail_data,
- dcb_table_ptr->tail_data_size);
- }
- if (error_temp) {
- /* Release the DMA resource */
- error = -EFAULT;
- break;
- }
- }
- }
- }
- /* Free the output pages, if any */
- sep_free_dma_table_data_handler(sep);
-
- return error;
-}
-
-/**
- * sep_get_static_pool_addr_handler - get static pool address
- * @sep: pointer to struct sep_device
- *
- * This function sets the bus and virtual addresses of the static pool
- */
-static int sep_get_static_pool_addr_handler(struct sep_device *sep)
-{
- u32 *static_pool_addr = NULL;
-
- static_pool_addr = (u32 *)(sep->shared_addr +
- SEP_DRIVER_SYSTEM_RAR_MEMORY_OFFSET_IN_BYTES);
-
- static_pool_addr[0] = SEP_STATIC_POOL_VAL_TOKEN;
- static_pool_addr[1] = (u32)sep->shared_bus +
- SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES;
-
- dev_dbg(&sep->pdev->dev, "static pool segment: physical %x\n",
- (u32)static_pool_addr[1]);
-
- return 0;
-}
-
-/**
- * sep_end_transaction_handler - end transaction
- * @sep: pointer to struct sep_device
- *
- * This API handles the end transaction request
- */
-static int sep_end_transaction_handler(struct sep_device *sep)
-{
- /* Clear the data pool pointers Token */
- memset((void *)(sep->shared_addr +
- SEP_DRIVER_DATA_POOL_ALLOCATION_OFFSET_IN_BYTES),
- 0, sep->num_of_data_allocations*2*sizeof(u32));
-
- /* Check that all the DMA resources were freed */
- sep_free_dma_table_data_handler(sep);
-
- clear_bit(SEP_MMAP_LOCK_BIT, &sep->in_use_flags);
-
- /*
- * We are now through with the transaction. Let's
- * allow other processes who have the device open
- * to perform transactions
- */
- mutex_lock(&sep->sep_mutex);
- sep->pid_doing_transaction = 0;
- mutex_unlock(&sep->sep_mutex);
- /* Raise event for stuck contextes */
- wake_up(&sep->event);
-
- return 0;
-}
-
-/**
- * sep_prepare_dcb_handler - prepare a control block
- * @sep: pointer to struct sep_device
- * @arg: pointer to user parameters
- *
- * This function will retrieve the RAR buffer physical addresses, type
- * & size corresponding to the RAR handles provided in the buffers vector.
- */
-static int sep_prepare_dcb_handler(struct sep_device *sep, unsigned long arg)
-{
- int error;
- /* Command arguments */
- struct build_dcb_struct command_args;
-
- /* Get the command arguments */
- if (copy_from_user(&command_args, (void __user *)arg,
- sizeof(struct build_dcb_struct))) {
- error = -EFAULT;
- goto end_function;
- }
-
- dev_dbg(&sep->pdev->dev, "prep dcb handler app_in_address is %08llx\n",
- command_args.app_in_address);
- dev_dbg(&sep->pdev->dev, "app_out_address is %08llx\n",
- command_args.app_out_address);
- dev_dbg(&sep->pdev->dev, "data_size is %x\n",
- command_args.data_in_size);
- dev_dbg(&sep->pdev->dev, "block_size is %x\n",
- command_args.block_size);
- dev_dbg(&sep->pdev->dev, "tail block_size is %x\n",
- command_args.tail_block_size);
-
- error = sep_prepare_input_output_dma_table_in_dcb(sep,
- (unsigned long)command_args.app_in_address,
- (unsigned long)command_args.app_out_address,
- command_args.data_in_size, command_args.block_size,
- command_args.tail_block_size, true, false);
-
-end_function:
- return error;
-
-}
-
-/**
- * sep_free_dcb_handler - free control block resources
- * @sep: pointer to struct sep_device
- *
- * This function frees the DCB resources and updates the needed
- * user-space buffers.
- */
-static int sep_free_dcb_handler(struct sep_device *sep)
-{
- return sep_free_dma_tables_and_dcb(sep, false, false);
-}
-
-/**
- * sep_rar_prepare_output_msg_handler - prepare an output message
- * @sep: pointer to struct sep_device
- * @arg: pointer to user parameters
- *
- * This function will retrieve the RAR buffer physical addresses, type
- * & size corresponding to the RAR handles provided in the buffers vector.
- */
-static int sep_rar_prepare_output_msg_handler(struct sep_device *sep,
- unsigned long arg)
-{
- int error = 0;
- /* Command args */
- struct rar_hndl_to_bus_struct command_args;
- /* Bus address */
- dma_addr_t rar_bus = 0;
- /* Holds the RAR address in the system memory offset */
- u32 *rar_addr;
-
- /* Copy the data */
- if (copy_from_user(&command_args, (void __user *)arg,
- sizeof(command_args))) {
- error = -EFAULT;
- goto end_function;
- }
-
- /* Call to translation function only if user handle is not NULL */
- if (command_args.rar_handle)
- return -EOPNOTSUPP;
- dev_dbg(&sep->pdev->dev, "rar msg; rar_addr_bus = %x\n", (u32)rar_bus);
-
- /* Set value in the SYSTEM MEMORY offset */
- rar_addr = (u32 *)(sep->shared_addr +
- SEP_DRIVER_SYSTEM_RAR_MEMORY_OFFSET_IN_BYTES);
-
- /* Copy the physical address to the System Area for the SEP */
- rar_addr[0] = SEP_RAR_VAL_TOKEN;
- rar_addr[1] = rar_bus;
-
-end_function:
- return error;
-}
-
-/**
- * sep_ioctl - ioctl api
- * @filp: pointer to struct file
- * @cmd: command
- * @arg: pointer to argument structure
- *
- * Implement the ioctl methods available on the SEP device.
- */
-static long sep_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
-{
- int error = 0;
- struct sep_device *sep = filp->private_data;
-
- /* Make sure we own this device */
- mutex_lock(&sep->sep_mutex);
- if ((current->pid != sep->pid_doing_transaction) &&
- (sep->pid_doing_transaction != 0)) {
- dev_dbg(&sep->pdev->dev, "ioctl pid is not owner\n");
- error = -EACCES;
- }
- mutex_unlock(&sep->sep_mutex);
-
- if (error)
- return error;
-
- if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER)
- return -ENOTTY;
-
- /* Lock to prevent the daemon to interfere with operation */
- mutex_lock(&sep->ioctl_mutex);
-
- switch (cmd) {
- case SEP_IOCSENDSEPCOMMAND:
- /* Send command to SEP */
- error = sep_send_command_handler(sep);
- break;
- case SEP_IOCALLOCDATAPOLL:
- /* Allocate data pool */
- error = sep_allocate_data_pool_memory_handler(sep, arg);
- break;
- case SEP_IOCGETSTATICPOOLADDR:
- /* Inform the SEP the bus address of the static pool */
- error = sep_get_static_pool_addr_handler(sep);
- break;
- case SEP_IOCENDTRANSACTION:
- error = sep_end_transaction_handler(sep);
- break;
- case SEP_IOCRARPREPAREMESSAGE:
- error = sep_rar_prepare_output_msg_handler(sep, arg);
- break;
- case SEP_IOCPREPAREDCB:
- error = sep_prepare_dcb_handler(sep, arg);
- break;
- case SEP_IOCFREEDCB:
- error = sep_free_dcb_handler(sep);
- break;
- default:
- error = -ENOTTY;
- break;
- }
-
- mutex_unlock(&sep->ioctl_mutex);
- return error;
-}
-
-/**
- * sep_singleton_ioctl - ioctl api for singleton interface
- * @filp: pointer to struct file
- * @cmd: command
- * @arg: pointer to argument structure
- *
- * Implement the additional ioctls for the singleton device
- */
-static long sep_singleton_ioctl(struct file *filp, u32 cmd, unsigned long arg)
-{
- long error = 0;
- struct sep_device *sep = filp->private_data;
-
- /* Check that the command is for the SEP device */
- if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER)
- return -ENOTTY;
-
- /* Make sure we own this device */
- mutex_lock(&sep->sep_mutex);
- if ((current->pid != sep->pid_doing_transaction) &&
- (sep->pid_doing_transaction != 0)) {
- dev_dbg(&sep->pdev->dev, "singleton ioctl pid is not owner\n");
- mutex_unlock(&sep->sep_mutex);
- return -EACCES;
- }
-
- mutex_unlock(&sep->sep_mutex);
-
- switch (cmd) {
- case SEP_IOCTLSETCALLERID:
- mutex_lock(&sep->ioctl_mutex);
- error = sep_set_caller_id_handler(sep, arg);
- mutex_unlock(&sep->ioctl_mutex);
- break;
- default:
- error = sep_ioctl(filp, cmd, arg);
- break;
- }
- return error;
-}
-
-/**
- * sep_request_daemon_ioctl - ioctl for daemon
- * @filp: pointer to struct file
- * @cmd: command
- * @arg: pointer to argument structure
- *
- * Called by the request daemon to perform ioctls on the daemon device
- */
-static long sep_request_daemon_ioctl(struct file *filp, u32 cmd,
- unsigned long arg)
-{
-
- long error;
- struct sep_device *sep = filp->private_data;
-
- /* Check that the command is for SEP device */
- if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER)
- return -ENOTTY;
-
- /* Only one process can access ioctl at any given time */
- mutex_lock(&sep->ioctl_mutex);
-
- switch (cmd) {
- case SEP_IOCSENDSEPRPLYCOMMAND:
- /* Send reply command to SEP */
- error = sep_req_daemon_send_reply_command_handler(sep);
- break;
- case SEP_IOCENDTRANSACTION:
- /*
- * End req daemon transaction, do nothing
- * will be removed upon update in middleware
- * API library
- */
- error = 0;
- break;
- default:
- error = -ENOTTY;
- }
- mutex_unlock(&sep->ioctl_mutex);
- return error;
-}
-
-/**
- * sep_inthandler - interrupt handler
- * @irq: interrupt
- * @dev_id: device id
- */
-static irqreturn_t sep_inthandler(int irq, void *dev_id)
-{
- irqreturn_t int_error = IRQ_HANDLED;
- unsigned long lck_flags;
- u32 reg_val, reg_val2 = 0;
- struct sep_device *sep = dev_id;
-
- /* Read the IRR register to check if this is SEP interrupt */
- reg_val = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
-
- if (reg_val & (0x1 << 13)) {
- /* Lock and update the counter of reply messages */
- spin_lock_irqsave(&sep->snd_rply_lck, lck_flags);
- sep->reply_ct++;
- spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
-
- dev_dbg(&sep->pdev->dev, "sep int: send_ct %lx reply_ct %lx\n",
- sep->send_ct, sep->reply_ct);
-
- /* Is this printf or daemon request? */
- reg_val2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
- dev_dbg(&sep->pdev->dev,
- "SEP Interrupt - reg2 is %08x\n", reg_val2);
-
- if ((reg_val2 >> 30) & 0x1) {
- dev_dbg(&sep->pdev->dev, "int: printf request\n");
- wake_up(&sep->event_request_daemon);
- } else if (reg_val2 >> 31) {
- dev_dbg(&sep->pdev->dev, "int: daemon request\n");
- wake_up(&sep->event_request_daemon);
- } else {
- dev_dbg(&sep->pdev->dev, "int: SEP reply\n");
- wake_up(&sep->event);
- }
- } else {
- dev_dbg(&sep->pdev->dev, "int: not SEP interrupt\n");
- int_error = IRQ_NONE;
- }
- if (int_error == IRQ_HANDLED)
- sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, reg_val);
-
- return int_error;
-}
-
-/**
- * sep_reconfig_shared_area - reconfigure shared area
- * @sep: pointer to struct sep_device
- *
- * Reconfig the shared area between HOST and SEP - needed in case
- * the DX_CC_Init function was called before OS loading.
- */
-static int sep_reconfig_shared_area(struct sep_device *sep)
-{
- int ret_val;
-
- /* use to limit waiting for SEP */
- unsigned long end_time;
-
- /* Send the new SHARED MESSAGE AREA to the SEP */
- dev_dbg(&sep->pdev->dev, "reconfig shared; sending %08llx to sep\n",
- (unsigned long long)sep->shared_bus);
-
- sep_write_reg(sep, HW_HOST_HOST_SEP_GPR1_REG_ADDR, sep->shared_bus);
-
- /* Poll for SEP response */
- ret_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
-
- end_time = jiffies + (WAIT_TIME * HZ);
-
- while ((time_before(jiffies, end_time)) && (ret_val != 0xffffffff) &&
- (ret_val != sep->shared_bus))
- ret_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
-
- /* Check the return value (register) */
- if (ret_val != sep->shared_bus) {
- dev_warn(&sep->pdev->dev, "could not reconfig shared area\n");
- dev_warn(&sep->pdev->dev, "result was %x\n", ret_val);
- ret_val = -ENOMEM;
- } else
- ret_val = 0;
-
- dev_dbg(&sep->pdev->dev, "reconfig shared area end\n");
- return ret_val;
-}
-
-/* File operation for singleton SEP operations */
-static const struct file_operations singleton_file_operations = {
- .owner = THIS_MODULE,
- .unlocked_ioctl = sep_singleton_ioctl,
- .poll = sep_poll,
- .open = sep_singleton_open,
- .release = sep_singleton_release,
- .mmap = sep_mmap,
-};
-
-/* File operation for daemon operations */
-static const struct file_operations daemon_file_operations = {
- .owner = THIS_MODULE,
- .unlocked_ioctl = sep_request_daemon_ioctl,
- .poll = sep_request_daemon_poll,
- .open = sep_request_daemon_open,
- .release = sep_request_daemon_release,
- .mmap = sep_request_daemon_mmap,
-};
-
-/* The files operations structure of the driver */
-static const struct file_operations sep_file_operations = {
- .owner = THIS_MODULE,
- .unlocked_ioctl = sep_ioctl,
- .poll = sep_poll,
- .open = sep_open,
- .release = sep_release,
- .mmap = sep_mmap,
-};
-
-/**
- * sep_register_driver_with_fs - register misc devices
- * @sep: pointer to struct sep_device
- *
- * This function registers the driver with the file system
- */
-static int sep_register_driver_with_fs(struct sep_device *sep)
-{
- int ret_val;
-
- sep->miscdev_sep.minor = MISC_DYNAMIC_MINOR;
- sep->miscdev_sep.name = SEP_DEV_NAME;
- sep->miscdev_sep.fops = &sep_file_operations;
-
- sep->miscdev_singleton.minor = MISC_DYNAMIC_MINOR;
- sep->miscdev_singleton.name = SEP_DEV_SINGLETON;
- sep->miscdev_singleton.fops = &singleton_file_operations;
-
- sep->miscdev_daemon.minor = MISC_DYNAMIC_MINOR;
- sep->miscdev_daemon.name = SEP_DEV_DAEMON;
- sep->miscdev_daemon.fops = &daemon_file_operations;
-
- ret_val = misc_register(&sep->miscdev_sep);
- if (ret_val) {
- dev_warn(&sep->pdev->dev, "misc reg fails for SEP %x\n",
- ret_val);
- return ret_val;
- }
-
- ret_val = misc_register(&sep->miscdev_singleton);
- if (ret_val) {
- dev_warn(&sep->pdev->dev, "misc reg fails for sing %x\n",
- ret_val);
- misc_deregister(&sep->miscdev_sep);
- return ret_val;
- }
-
- ret_val = misc_register(&sep->miscdev_daemon);
- if (ret_val) {
- dev_warn(&sep->pdev->dev, "misc reg fails for dmn %x\n",
- ret_val);
- misc_deregister(&sep->miscdev_sep);
- misc_deregister(&sep->miscdev_singleton);
-
- return ret_val;
- }
- return ret_val;
-}
-
-
-/**
- * sep_probe - probe a matching PCI device
- * @pdev: pci_device
- * @end: pci_device_id
- *
- * Attempt to set up and configure a SEP device that has been
- * discovered by the PCI layer.
- */
-static int __devinit sep_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- int error = 0;
- struct sep_device *sep;
-
- if (sep_dev != NULL) {
- dev_warn(&pdev->dev, "only one SEP supported.\n");
- return -EBUSY;
- }
-
- /* Enable the device */
- error = pci_enable_device(pdev);
- if (error) {
- dev_warn(&pdev->dev, "error enabling pci device\n");
- goto end_function;
- }
-
- /* Allocate the sep_device structure for this device */
- sep_dev = kzalloc(sizeof(struct sep_device), GFP_ATOMIC);
- if (sep_dev == NULL) {
- dev_warn(&pdev->dev,
- "can't kmalloc the sep_device structure\n");
- error = -ENOMEM;
- goto end_function_disable_device;
- }
-
- /*
- * We're going to use another variable for actually
- * working with the device; this way, if we have
- * multiple devices in the future, it would be easier
- * to make appropriate changes
- */
- sep = sep_dev;
-
- sep->pdev = pci_dev_get(pdev);
-
- init_waitqueue_head(&sep->event);
- init_waitqueue_head(&sep->event_request_daemon);
- spin_lock_init(&sep->snd_rply_lck);
- mutex_init(&sep->sep_mutex);
- mutex_init(&sep->ioctl_mutex);
-
- dev_dbg(&sep->pdev->dev, "sep probe: PCI obtained, device being prepared\n");
- dev_dbg(&sep->pdev->dev, "revision is %d\n", sep->pdev->revision);
-
- /* Set up our register area */
- sep->reg_physical_addr = pci_resource_start(sep->pdev, 0);
- if (!sep->reg_physical_addr) {
- dev_warn(&sep->pdev->dev, "Error getting register start\n");
- error = -ENODEV;
- goto end_function_free_sep_dev;
- }
-
- sep->reg_physical_end = pci_resource_end(sep->pdev, 0);
- if (!sep->reg_physical_end) {
- dev_warn(&sep->pdev->dev, "Error getting register end\n");
- error = -ENODEV;
- goto end_function_free_sep_dev;
- }
-
- sep->reg_addr = ioremap_nocache(sep->reg_physical_addr,
- (size_t)(sep->reg_physical_end - sep->reg_physical_addr + 1));
- if (!sep->reg_addr) {
- dev_warn(&sep->pdev->dev, "Error getting register virtual\n");
- error = -ENODEV;
- goto end_function_free_sep_dev;
- }
-
- dev_dbg(&sep->pdev->dev,
- "Register area start %llx end %llx virtual %p\n",
- (unsigned long long)sep->reg_physical_addr,
- (unsigned long long)sep->reg_physical_end,
- sep->reg_addr);
-
- /* Allocate the shared area */
- sep->shared_size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
- SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES +
- SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES +
- SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES +
- SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
-
- if (sep_map_and_alloc_shared_area(sep)) {
- error = -ENOMEM;
- /* Allocation failed */
- goto end_function_error;
- }
-
- /* Clear ICR register */
- sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
-
- /* Set the IMR register - open only GPR 2 */
- sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
-
- /* Read send/receive counters from SEP */
- sep->reply_ct = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
- sep->reply_ct &= 0x3FFFFFFF;
- sep->send_ct = sep->reply_ct;
-
- /* Get the interrupt line */
- error = request_irq(pdev->irq, sep_inthandler, IRQF_SHARED,
- "sep_driver", sep);
-
- if (error)
- goto end_function_deallocate_sep_shared_area;
-
- /* The new chip requires a shared area reconfigure */
- if (sep->pdev->revision == 4) { /* Only for new chip */
- error = sep_reconfig_shared_area(sep);
- if (error)
- goto end_function_free_irq;
- }
- /* Finally magic up the device nodes */
- /* Register driver with the fs */
- error = sep_register_driver_with_fs(sep);
- if (error == 0)
- /* Success */
- return 0;
-
-end_function_free_irq:
- free_irq(pdev->irq, sep);
-
-end_function_deallocate_sep_shared_area:
- /* De-allocate shared area */
- sep_unmap_and_free_shared_area(sep);
-
-end_function_error:
- iounmap(sep->reg_addr);
-
-end_function_free_sep_dev:
- pci_dev_put(sep_dev->pdev);
- kfree(sep_dev);
- sep_dev = NULL;
-
-end_function_disable_device:
- pci_disable_device(pdev);
-
-end_function:
- return error;
-}
-
-static void sep_remove(struct pci_dev *pdev)
-{
- struct sep_device *sep = sep_dev;
-
- /* Unregister from fs */
- misc_deregister(&sep->miscdev_sep);
- misc_deregister(&sep->miscdev_singleton);
- misc_deregister(&sep->miscdev_daemon);
-
- /* Free the irq */
- free_irq(sep->pdev->irq, sep);
-
- /* Free the shared area */
- sep_unmap_and_free_shared_area(sep_dev);
- iounmap((void *) sep_dev->reg_addr);
-}
-
-static DEFINE_PCI_DEVICE_TABLE(sep_pci_id_tbl) = {
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MFLD_PCI_DEVICE_ID)},
- {0}
-};
-
-MODULE_DEVICE_TABLE(pci, sep_pci_id_tbl);
-
-/* Field for registering driver to PCI device */
-static struct pci_driver sep_pci_driver = {
- .name = "sep_sec_driver",
- .id_table = sep_pci_id_tbl,
- .probe = sep_probe,
- .remove = sep_remove
-};
-
-
-/**
- * sep_init - init function
- *
- * Module load time. Register the PCI device driver.
- */
-static int __init sep_init(void)
-{
- return pci_register_driver(&sep_pci_driver);
-}
-
-
-/**
- * sep_exit - called to unload driver
- *
- * Drop the misc devices then remove and unmap the various resources
- * that are not released by the driver remove method.
- */
-static void __exit sep_exit(void)
-{
- pci_unregister_driver(&sep_pci_driver);
-}
-
-
-module_init(sep_init);
-module_exit(sep_exit);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/sep/sep_driver_api.h b/drivers/staging/sep/sep_driver_api.h
index c3aacfcc8ac6..8b797d5388bb 100644
--- a/drivers/staging/sep/sep_driver_api.h
+++ b/drivers/staging/sep/sep_driver_api.h
@@ -2,8 +2,8 @@
*
* sep_driver_api.h - Security Processor Driver api definitions
*
- * Copyright(c) 2009,2010 Intel Corporation. All rights reserved.
- * Contributions(c) 2009,2010 Discretix. All rights reserved.
+ * Copyright(c) 2009-2011 Intel Corporation. All rights reserved.
+ * Contributions(c) 2009-2011 Discretix. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
@@ -26,6 +26,7 @@
* CHANGES:
*
* 2010.09.14 Upgrade to Medfield
+ * 2011.02.22 Enable kernel crypto
*
*/
@@ -37,26 +38,32 @@
#define SEP_DRIVER_SRC_REQ 2
#define SEP_DRIVER_SRC_PRINTF 3
-
-/*-------------------------------------------
- TYPEDEFS
-----------------------------------------------*/
-
-struct alloc_struct {
- /* offset from start of shared pool area */
- u32 offset;
- /* number of bytes to allocate */
- u32 num_bytes;
-};
-
-/* command struct for getting caller id value and address */
-struct caller_id_struct {
- /* pid of the process */
- u32 pid;
- /* virtual address of the caller id hash */
- aligned_u64 callerIdAddress;
- /* caller id hash size in bytes */
- u32 callerIdSizeInBytes;
+/* Power state */
+#define SEP_DRIVER_POWERON 1
+#define SEP_DRIVER_POWEROFF 2
+
+/* Following enums are used only for kernel crypto api */
+enum type_of_request {
+ NO_REQUEST,
+ AES_CBC,
+ AES_ECB,
+ DES_CBC,
+ DES_ECB,
+ DES3_ECB,
+ DES3_CBC,
+ SHA1,
+ MD5,
+ SHA224,
+ SHA256
+ };
+
+enum hash_stage {
+ HASH_INIT,
+ HASH_UPDATE,
+ HASH_FINISH,
+ HASH_DIGEST,
+ HASH_FINUP_DATA,
+ HASH_FINUP_FINISH
};
/*
@@ -83,11 +90,6 @@ struct sep_dcblock {
u8 tail_data[68];
};
-struct sep_caller_id_entry {
- int pid;
- unsigned char callerIdHash[SEP_CALLER_ID_HASH_SIZE_IN_BYTES];
-};
-
/*
command structure for building dcb block (currently for ext app only
*/
@@ -104,6 +106,33 @@ struct build_dcb_struct {
/* the size of the block of the operation - if needed,
every table will be modulo this parameter */
u32 tail_block_size;
+
+ /* which application calls the driver DX or applet */
+ u32 is_applet;
+};
+
+/*
+ command structure for building dcb block for kernel crypto
+*/
+struct build_dcb_struct_kernel {
+ /* address value of the data in */
+ void *app_in_address;
+ /* size of data in */
+ ssize_t data_in_size;
+ /* address of the data out */
+ void *app_out_address;
+ /* the size of the block of the operation - if needed,
+ every table will be modulo this parameter */
+ u32 block_size;
+ /* the size of the block of the operation - if needed,
+ every table will be modulo this parameter */
+ u32 tail_block_size;
+
+ /* which application calls the driver DX or applet */
+ u32 is_applet;
+
+ struct scatterlist *src_sg;
+ struct scatterlist *dst_sg;
};
/**
@@ -147,6 +176,10 @@ struct sep_dma_resource {
/* number of entries of the output mapp array */
u32 out_map_num_entries;
+
+ /* Scatter list for kernel operations */
+ struct scatterlist *src_sg;
+ struct scatterlist *dst_sg;
};
@@ -169,47 +202,201 @@ struct sep_lli_entry {
u32 block_size;
};
-/*----------------------------------------------------------------
- IOCTL command defines
- -----------------------------------------------------------------*/
+/*
+ * header format for each fastcall write operation
+ */
+struct sep_fastcall_hdr {
+ u32 magic;
+ u32 secure_dma;
+ u32 msg_len;
+ u32 num_dcbs;
+};
-/* magic number 1 of the sep IOCTL command */
-#define SEP_IOC_MAGIC_NUMBER 's'
+/*
+ * structure used in file pointer's private data field
+ * to track the status of the calls to the various
+ * driver interface
+ */
+struct sep_call_status {
+ unsigned long status;
+};
-/* sends interrupt to sep that message is ready */
-#define SEP_IOCSENDSEPCOMMAND \
- _IO(SEP_IOC_MAGIC_NUMBER, 0)
+/*
+ * format of dma context buffer used to store all DMA-related
+ * context information of a particular transaction
+ */
+struct sep_dma_context {
+ /* number of data control blocks */
+ u32 nr_dcb_creat;
+ /* number of the lli tables created in the current transaction */
+ u32 num_lli_tables_created;
+ /* size of currently allocated dma tables region */
+ u32 dmatables_len;
+ /* size of input data */
+ u32 input_data_len;
+ /* secure dma use (for imr memory restriced area in output */
+ bool secure_dma;
+ struct sep_dma_resource dma_res_arr[SEP_MAX_NUM_SYNC_DMA_OPS];
+ /* Scatter gather for kernel crypto */
+ struct scatterlist *src_sg;
+ struct scatterlist *dst_sg;
+};
-/* sends interrupt to sep that message is ready */
-#define SEP_IOCSENDSEPRPLYCOMMAND \
- _IO(SEP_IOC_MAGIC_NUMBER, 1)
+/*
+ * format for file pointer's private_data field
+ */
+struct sep_private_data {
+ struct sep_queue_info *my_queue_elem;
+ struct sep_device *device;
+ struct sep_call_status call_status;
+ struct sep_dma_context *dma_ctx;
+};
-/* allocate memory in data pool */
-#define SEP_IOCALLOCDATAPOLL \
- _IOW(SEP_IOC_MAGIC_NUMBER, 2, struct alloc_struct)
-/* free dynamic data aalocated during table creation */
-#define SEP_IOCFREEDMATABLEDATA \
- _IO(SEP_IOC_MAGIC_NUMBER, 7)
+/* Functions used by sep_crypto */
-/* get the static pool area addersses (physical and virtual) */
-#define SEP_IOCGETSTATICPOOLADDR \
- _IO(SEP_IOC_MAGIC_NUMBER, 8)
+/**
+ * sep_queue_status_remove - Removes transaction from status queue
+ * @sep: SEP device
+ * @sep_queue_info: pointer to status queue
+ *
+ * This function will removes information about transaction from the queue.
+ */
+void sep_queue_status_remove(struct sep_device *sep,
+ struct sep_queue_info **queue_elem);
+/**
+ * sep_queue_status_add - Adds transaction to status queue
+ * @sep: SEP device
+ * @opcode: transaction opcode
+ * @size: input data size
+ * @pid: pid of current process
+ * @name: current process name
+ * @name_len: length of name (current process)
+ *
+ * This function adds information about about transaction started to the status
+ * queue.
+ */
+struct sep_queue_info *sep_queue_status_add(
+ struct sep_device *sep,
+ u32 opcode,
+ u32 size,
+ u32 pid,
+ u8 *name, size_t name_len);
+
+/**
+ * sep_create_dcb_dmatables_context_kernel - Creates DCB & MLLI/DMA table context
+ * for kernel crypto
+ * @sep: SEP device
+ * @dcb_region: DCB region buf to create for current transaction
+ * @dmatables_region: MLLI/DMA tables buf to create for current transaction
+ * @dma_ctx: DMA context buf to create for current transaction
+ * @user_dcb_args: User arguments for DCB/MLLI creation
+ * @num_dcbs: Number of DCBs to create
+ */
+int sep_create_dcb_dmatables_context_kernel(struct sep_device *sep,
+ struct sep_dcblock **dcb_region,
+ void **dmatables_region,
+ struct sep_dma_context **dma_ctx,
+ const struct build_dcb_struct_kernel *dcb_data,
+ const u32 num_dcbs);
+
+/**
+ * sep_activate_dcb_dmatables_context - Takes DCB & DMA tables
+ * contexts into use
+ * @sep: SEP device
+ * @dcb_region: DCB region copy
+ * @dmatables_region: MLLI/DMA tables copy
+ * @dma_ctx: DMA context for current transaction
+ */
+ssize_t sep_activate_dcb_dmatables_context(struct sep_device *sep,
+ struct sep_dcblock **dcb_region,
+ void **dmatables_region,
+ struct sep_dma_context *dma_ctx);
+
+/**
+ * sep_prepare_input_output_dma_table_in_dcb - prepare control blocks
+ * @app_in_address: unsigned long; for data buffer in (user space)
+ * @app_out_address: unsigned long; for data buffer out (user space)
+ * @data_in_size: u32; for size of data
+ * @block_size: u32; for block size
+ * @tail_block_size: u32; for size of tail block
+ * @isapplet: bool; to indicate external app
+ * @is_kva: bool; kernel buffer; only used for kernel crypto module
+ * @secure_dma; indicates whether this is secure_dma using IMR
+ *
+ * This function prepares the linked DMA tables and puts the
+ * address for the linked list of tables inta a DCB (data control
+ * block) the address of which is known by the SEP hardware
+ * Note that all bus addresses that are passed to the SEP
+ * are in 32 bit format; the SEP is a 32 bit device
+ */
+int sep_prepare_input_output_dma_table_in_dcb(struct sep_device *sep,
+ unsigned long app_in_address,
+ unsigned long app_out_address,
+ u32 data_in_size,
+ u32 block_size,
+ u32 tail_block_size,
+ bool isapplet,
+ bool is_kva,
+ bool secure_dma,
+ struct sep_dcblock *dcb_region,
+ void **dmatables_region,
+ struct sep_dma_context **dma_ctx,
+ struct scatterlist *src_sg,
+ struct scatterlist *dst_sg);
+
+/**
+ * sep_free_dma_table_data_handler - free DMA table
+ * @sep: pointere to struct sep_device
+ * @dma_ctx: dma context
+ *
+ * Handles the request to free DMA table for synchronic actions
+ */
+int sep_free_dma_table_data_handler(struct sep_device *sep,
+ struct sep_dma_context **dma_ctx);
+/**
+ * sep_send_command_handler - kick off a command
+ * @sep: SEP being signalled
+ *
+ * This function raises interrupt to SEP that signals that is has a new
+ * command from the host
+ *
+ * Note that this function does fall under the ioctl lock
+ */
+int sep_send_command_handler(struct sep_device *sep);
+
+/**
+ * sep_wait_transaction - Used for synchronizing transactions
+ * @sep: SEP device
+ */
+int sep_wait_transaction(struct sep_device *sep);
+
+/**
+ * IOCTL command defines
+ */
+/* magic number 1 of the sep IOCTL command */
+#define SEP_IOC_MAGIC_NUMBER 's'
+
+/* sends interrupt to sep that message is ready */
+#define SEP_IOCSENDSEPCOMMAND \
+ _IO(SEP_IOC_MAGIC_NUMBER, 0)
/* end transaction command */
#define SEP_IOCENDTRANSACTION \
_IO(SEP_IOC_MAGIC_NUMBER, 15)
-#define SEP_IOCRARPREPAREMESSAGE \
- _IOW(SEP_IOC_MAGIC_NUMBER, 20, struct rar_hndl_to_bus_struct)
-
-#define SEP_IOCTLSETCALLERID \
- _IOW(SEP_IOC_MAGIC_NUMBER, 34, struct caller_id_struct)
-
#define SEP_IOCPREPAREDCB \
_IOW(SEP_IOC_MAGIC_NUMBER, 35, struct build_dcb_struct)
#define SEP_IOCFREEDCB \
_IO(SEP_IOC_MAGIC_NUMBER, 36)
+struct sep_device;
+
+#define SEP_IOCPREPAREDCB_SECURE_DMA \
+ _IOW(SEP_IOC_MAGIC_NUMBER, 38, struct build_dcb_struct)
+
+#define SEP_IOCFREEDCB_SECURE_DMA \
+ _IO(SEP_IOC_MAGIC_NUMBER, 39)
+
#endif
diff --git a/drivers/staging/sep/sep_driver_config.h b/drivers/staging/sep/sep_driver_config.h
index d6bfd2455222..fa7c0d09bfa5 100644
--- a/drivers/staging/sep/sep_driver_config.h
+++ b/drivers/staging/sep/sep_driver_config.h
@@ -2,8 +2,8 @@
*
* sep_driver_config.h - Security Processor Driver configuration
*
- * Copyright(c) 2009,2010 Intel Corporation. All rights reserved.
- * Contributions(c) 2009,2010 Discretix. All rights reserved.
+ * Copyright(c) 2009-2011 Intel Corporation. All rights reserved.
+ * Contributions(c) 2009-2011 Discretix. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
@@ -26,6 +26,7 @@
* CHANGES:
*
* 2010.06.26 Upgrade to Medfield
+ * 2011.02.22 Enable kernel crypto
*
*/
@@ -48,6 +49,8 @@
/* the mode for running on the ARM1172 Evaluation platform (flag is 1) */
#define SEP_DRIVER_ARM_DEBUG_MODE 0
+/* Critical message area contents for sanity checking */
+#define SEP_START_MSG_TOKEN 0x02558808
/*-------------------------------------------
INTERNAL DATA CONFIGURATION
-------------------------------------------*/
@@ -65,21 +68,17 @@
#define SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE 16
/* flag that signifies tah the lock is
-currently held by the process (struct file) */
+currently held by the proccess (struct file) */
#define SEP_DRIVER_OWN_LOCK_FLAG 1
/* flag that signifies tah the lock is currently NOT
-held by the process (struct file) */
+held by the proccess (struct file) */
#define SEP_DRIVER_DISOWN_LOCK_FLAG 0
/* indicates whether driver has mapped/unmapped shared area */
#define SEP_REQUEST_DAEMON_MAPPED 1
#define SEP_REQUEST_DAEMON_UNMAPPED 0
-#define SEP_DEV_NAME "sep_sec_driver"
-#define SEP_DEV_SINGLETON "sep_sec_singleton_driver"
-#define SEP_DEV_DAEMON "sep_req_daemon_driver"
-
/*--------------------------------------------------------
SHARED AREA memory total size is 36K
it is divided is following:
@@ -90,7 +89,7 @@ held by the process (struct file) */
}
DATA_POOL_AREA 12K }
- SYNCHRONIC_DMA_TABLES_AREA 5K
+ SYNCHRONIC_DMA_TABLES_AREA 29K
placeholder until drver changes
FLOW_DMA_TABLES_AREA 4K
@@ -109,6 +108,12 @@ held by the process (struct file) */
/*
+ the minimum length of the message - includes 2 reserved fields
+ at the start, then token, message size and opcode fields. all dwords
+*/
+#define SEP_DRIVER_MIN_MESSAGE_SIZE_IN_BYTES (5*sizeof(u32))
+
+/*
the maximum length of the message - the rest of the message shared
area will be dedicated to the dma lli tables
*/
@@ -124,7 +129,7 @@ held by the process (struct file) */
#define SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES (16 * 1024)
/* the size of the message shared area in pages */
-#define SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES (1024 * 5)
+#define SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES (1024 * 29)
/* Placeholder until driver changes */
#define SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES (1024 * 4)
@@ -132,6 +137,9 @@ held by the process (struct file) */
/* system data (time, caller id etc') pool */
#define SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES (1024 * 3)
+/* Offset of the sep printf buffer in the message area */
+#define SEP_DRIVER_PRINTF_OFFSET_IN_BYTES (5888)
+
/* the size in bytes of the time memory */
#define SEP_DRIVER_TIME_MEMORY_SIZE_IN_BYTES 8
@@ -223,10 +231,10 @@ held by the process (struct file) */
#define SEP_ALREADY_INITIALIZED_ERR 12
/* bit that locks access to the shared area */
-#define SEP_MMAP_LOCK_BIT 0
+#define SEP_TRANSACTION_STARTED_LOCK_BIT 0
/* bit that lock access to the poll - after send_command */
-#define SEP_SEND_MSG_LOCK_BIT 1
+#define SEP_WORKING_LOCK_BIT 1
/* the token that defines the static pool address address */
#define SEP_STATIC_POOL_VAL_TOKEN 0xABBAABBA
@@ -240,4 +248,51 @@ held by the process (struct file) */
/* Time limit for SEP to finish */
#define WAIT_TIME 10
+/* Delay for pm runtime suspend (reduces pm thrashing with bursty traffic */
+#define SUSPEND_DELAY 10
+
+/* Number of delays to wait until scu boots after runtime resume */
+#define SCU_DELAY_MAX 50
+
+/* Delay for each iteration (usec) wait for scu boots after runtime resume */
+#define SCU_DELAY_ITERATION 10
+
+
+/*
+ * Bits used in struct sep_call_status to check that
+ * driver's APIs are called in valid order
+ */
+
+/* Bit offset which indicates status of sep_write() */
+#define SEP_FASTCALL_WRITE_DONE_OFFSET 0
+
+/* Bit offset which indicates status of sep_mmap() */
+#define SEP_LEGACY_MMAP_DONE_OFFSET 1
+
+/* Bit offset which indicates status of the SEP_IOCSENDSEPCOMMAND ioctl */
+#define SEP_LEGACY_SENDMSG_DONE_OFFSET 2
+
+/* Bit offset which indicates status of sep_poll() */
+#define SEP_LEGACY_POLL_DONE_OFFSET 3
+
+/* Bit offset which indicates status of the SEP_IOCENDTRANSACTION ioctl */
+#define SEP_LEGACY_ENDTRANSACTION_DONE_OFFSET 4
+
+/*
+ * Used to limit number of concurrent processes
+ * allowed to allocte dynamic buffers in fastcall
+ * interface.
+ */
+#define SEP_DOUBLEBUF_USERS_LIMIT 3
+
+/* Identifier for valid fastcall header */
+#define SEP_FC_MAGIC 0xFFAACCAA
+
+/*
+ * Used for enabling driver runtime power management.
+ * Useful for enabling/disabling it during performance
+ * testing
+ */
+#define SEP_ENABLE_RUNTIME_PM
+
#endif /* SEP DRIVER CONFIG */
diff --git a/drivers/staging/sep/sep_driver_hw_defs.h b/drivers/staging/sep/sep_driver_hw_defs.h
index 300f90963de3..a6a448170382 100644
--- a/drivers/staging/sep/sep_driver_hw_defs.h
+++ b/drivers/staging/sep/sep_driver_hw_defs.h
@@ -2,8 +2,8 @@
*
* sep_driver_hw_defs.h - Security Processor Driver hardware definitions
*
- * Copyright(c) 2009,2010 Intel Corporation. All rights reserved.
- * Contributions(c) 2009,2010 Discretix. All rights reserved.
+ * Copyright(c) 2009-2011 Intel Corporation. All rights reserved.
+ * Contributions(c) 2009-2011 Discretix. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
@@ -26,15 +26,13 @@
* CHANGES:
*
* 2010.09.20 Upgrade to Medfield
+ * 2011.02.22 Enable kernel crypto
*
*/
#ifndef SEP_DRIVER_HW_DEFS__H
#define SEP_DRIVER_HW_DEFS__H
-/* PCI ID's */
-#define MFLD_PCI_DEVICE_ID 0x0826
-
/*----------------------- */
/* HW Registers Defines. */
/* */
@@ -42,181 +40,9 @@
/* cf registers */
-#define HW_R0B_ADDR_0_REG_ADDR 0x0000UL
-#define HW_R0B_ADDR_1_REG_ADDR 0x0004UL
-#define HW_R0B_ADDR_2_REG_ADDR 0x0008UL
-#define HW_R0B_ADDR_3_REG_ADDR 0x000cUL
-#define HW_R0B_ADDR_4_REG_ADDR 0x0010UL
-#define HW_R0B_ADDR_5_REG_ADDR 0x0014UL
-#define HW_R0B_ADDR_6_REG_ADDR 0x0018UL
-#define HW_R0B_ADDR_7_REG_ADDR 0x001cUL
-#define HW_R0B_ADDR_8_REG_ADDR 0x0020UL
-#define HW_R2B_ADDR_0_REG_ADDR 0x0080UL
-#define HW_R2B_ADDR_1_REG_ADDR 0x0084UL
-#define HW_R2B_ADDR_2_REG_ADDR 0x0088UL
-#define HW_R2B_ADDR_3_REG_ADDR 0x008cUL
-#define HW_R2B_ADDR_4_REG_ADDR 0x0090UL
-#define HW_R2B_ADDR_5_REG_ADDR 0x0094UL
-#define HW_R2B_ADDR_6_REG_ADDR 0x0098UL
-#define HW_R2B_ADDR_7_REG_ADDR 0x009cUL
-#define HW_R2B_ADDR_8_REG_ADDR 0x00a0UL
-#define HW_R3B_REG_ADDR 0x00C0UL
-#define HW_R4B_REG_ADDR 0x0100UL
-#define HW_CSA_ADDR_0_REG_ADDR 0x0140UL
-#define HW_CSA_ADDR_1_REG_ADDR 0x0144UL
-#define HW_CSA_ADDR_2_REG_ADDR 0x0148UL
-#define HW_CSA_ADDR_3_REG_ADDR 0x014cUL
-#define HW_CSA_ADDR_4_REG_ADDR 0x0150UL
-#define HW_CSA_ADDR_5_REG_ADDR 0x0154UL
-#define HW_CSA_ADDR_6_REG_ADDR 0x0158UL
-#define HW_CSA_ADDR_7_REG_ADDR 0x015cUL
-#define HW_CSA_ADDR_8_REG_ADDR 0x0160UL
-#define HW_CSA_REG_ADDR 0x0140UL
-#define HW_SINB_REG_ADDR 0x0180UL
-#define HW_SOUTB_REG_ADDR 0x0184UL
-#define HW_PKI_CONTROL_REG_ADDR 0x01C0UL
-#define HW_PKI_STATUS_REG_ADDR 0x01C4UL
-#define HW_PKI_BUSY_REG_ADDR 0x01C8UL
-#define HW_PKI_A_1025_REG_ADDR 0x01CCUL
-#define HW_PKI_SDMA_CTL_REG_ADDR 0x01D0UL
-#define HW_PKI_SDMA_OFFSET_REG_ADDR 0x01D4UL
-#define HW_PKI_SDMA_POINTERS_REG_ADDR 0x01D8UL
-#define HW_PKI_SDMA_DLENG_REG_ADDR 0x01DCUL
-#define HW_PKI_SDMA_EXP_POINTERS_REG_ADDR 0x01E0UL
-#define HW_PKI_SDMA_RES_POINTERS_REG_ADDR 0x01E4UL
-#define HW_PKI_CLR_REG_ADDR 0x01E8UL
-#define HW_PKI_SDMA_BUSY_REG_ADDR 0x01E8UL
-#define HW_PKI_SDMA_FIRST_EXP_N_REG_ADDR 0x01ECUL
-#define HW_PKI_SDMA_MUL_BY1_REG_ADDR 0x01F0UL
-#define HW_PKI_SDMA_RMUL_SEL_REG_ADDR 0x01F4UL
-#define HW_DES_KEY_0_REG_ADDR 0x0208UL
-#define HW_DES_KEY_1_REG_ADDR 0x020CUL
-#define HW_DES_KEY_2_REG_ADDR 0x0210UL
-#define HW_DES_KEY_3_REG_ADDR 0x0214UL
-#define HW_DES_KEY_4_REG_ADDR 0x0218UL
-#define HW_DES_KEY_5_REG_ADDR 0x021CUL
-#define HW_DES_CONTROL_0_REG_ADDR 0x0220UL
-#define HW_DES_CONTROL_1_REG_ADDR 0x0224UL
-#define HW_DES_IV_0_REG_ADDR 0x0228UL
-#define HW_DES_IV_1_REG_ADDR 0x022CUL
-#define HW_AES_KEY_0_ADDR_0_REG_ADDR 0x0400UL
-#define HW_AES_KEY_0_ADDR_1_REG_ADDR 0x0404UL
-#define HW_AES_KEY_0_ADDR_2_REG_ADDR 0x0408UL
-#define HW_AES_KEY_0_ADDR_3_REG_ADDR 0x040cUL
-#define HW_AES_KEY_0_ADDR_4_REG_ADDR 0x0410UL
-#define HW_AES_KEY_0_ADDR_5_REG_ADDR 0x0414UL
-#define HW_AES_KEY_0_ADDR_6_REG_ADDR 0x0418UL
-#define HW_AES_KEY_0_ADDR_7_REG_ADDR 0x041cUL
-#define HW_AES_KEY_0_REG_ADDR 0x0400UL
-#define HW_AES_IV_0_ADDR_0_REG_ADDR 0x0440UL
-#define HW_AES_IV_0_ADDR_1_REG_ADDR 0x0444UL
-#define HW_AES_IV_0_ADDR_2_REG_ADDR 0x0448UL
-#define HW_AES_IV_0_ADDR_3_REG_ADDR 0x044cUL
-#define HW_AES_IV_0_REG_ADDR 0x0440UL
-#define HW_AES_CTR1_ADDR_0_REG_ADDR 0x0460UL
-#define HW_AES_CTR1_ADDR_1_REG_ADDR 0x0464UL
-#define HW_AES_CTR1_ADDR_2_REG_ADDR 0x0468UL
-#define HW_AES_CTR1_ADDR_3_REG_ADDR 0x046cUL
-#define HW_AES_CTR1_REG_ADDR 0x0460UL
-#define HW_AES_SK_REG_ADDR 0x0478UL
-#define HW_AES_MAC_OK_REG_ADDR 0x0480UL
-#define HW_AES_PREV_IV_0_ADDR_0_REG_ADDR 0x0490UL
-#define HW_AES_PREV_IV_0_ADDR_1_REG_ADDR 0x0494UL
-#define HW_AES_PREV_IV_0_ADDR_2_REG_ADDR 0x0498UL
-#define HW_AES_PREV_IV_0_ADDR_3_REG_ADDR 0x049cUL
-#define HW_AES_PREV_IV_0_REG_ADDR 0x0490UL
-#define HW_AES_CONTROL_REG_ADDR 0x04C0UL
-#define HW_HASH_H0_REG_ADDR 0x0640UL
-#define HW_HASH_H1_REG_ADDR 0x0644UL
-#define HW_HASH_H2_REG_ADDR 0x0648UL
-#define HW_HASH_H3_REG_ADDR 0x064CUL
-#define HW_HASH_H4_REG_ADDR 0x0650UL
-#define HW_HASH_H5_REG_ADDR 0x0654UL
-#define HW_HASH_H6_REG_ADDR 0x0658UL
-#define HW_HASH_H7_REG_ADDR 0x065CUL
-#define HW_HASH_H8_REG_ADDR 0x0660UL
-#define HW_HASH_H9_REG_ADDR 0x0664UL
-#define HW_HASH_H10_REG_ADDR 0x0668UL
-#define HW_HASH_H11_REG_ADDR 0x066CUL
-#define HW_HASH_H12_REG_ADDR 0x0670UL
-#define HW_HASH_H13_REG_ADDR 0x0674UL
-#define HW_HASH_H14_REG_ADDR 0x0678UL
-#define HW_HASH_H15_REG_ADDR 0x067CUL
-#define HW_HASH_CONTROL_REG_ADDR 0x07C0UL
-#define HW_HASH_PAD_EN_REG_ADDR 0x07C4UL
-#define HW_HASH_PAD_CFG_REG_ADDR 0x07C8UL
-#define HW_HASH_CUR_LEN_0_REG_ADDR 0x07CCUL
-#define HW_HASH_CUR_LEN_1_REG_ADDR 0x07D0UL
-#define HW_HASH_CUR_LEN_2_REG_ADDR 0x07D4UL
-#define HW_HASH_CUR_LEN_3_REG_ADDR 0x07D8UL
-#define HW_HASH_PARAM_REG_ADDR 0x07DCUL
-#define HW_HASH_INT_BUSY_REG_ADDR 0x07E0UL
-#define HW_HASH_SW_RESET_REG_ADDR 0x07E4UL
-#define HW_HASH_ENDIANESS_REG_ADDR 0x07E8UL
-#define HW_HASH_DATA_REG_ADDR 0x07ECUL
-#define HW_DRNG_CONTROL_REG_ADDR 0x0800UL
-#define HW_DRNG_VALID_REG_ADDR 0x0804UL
-#define HW_DRNG_DATA_REG_ADDR 0x0808UL
-#define HW_RND_SRC_EN_REG_ADDR 0x080CUL
-#define HW_AES_CLK_ENABLE_REG_ADDR 0x0810UL
-#define HW_DES_CLK_ENABLE_REG_ADDR 0x0814UL
-#define HW_HASH_CLK_ENABLE_REG_ADDR 0x0818UL
-#define HW_PKI_CLK_ENABLE_REG_ADDR 0x081CUL
-#define HW_CLK_STATUS_REG_ADDR 0x0824UL
-#define HW_CLK_ENABLE_REG_ADDR 0x0828UL
-#define HW_DRNG_SAMPLE_REG_ADDR 0x0850UL
-#define HW_RND_SRC_CTL_REG_ADDR 0x0858UL
-#define HW_CRYPTO_CTL_REG_ADDR 0x0900UL
-#define HW_CRYPTO_STATUS_REG_ADDR 0x090CUL
-#define HW_CRYPTO_BUSY_REG_ADDR 0x0910UL
-#define HW_AES_BUSY_REG_ADDR 0x0914UL
-#define HW_DES_BUSY_REG_ADDR 0x0918UL
-#define HW_HASH_BUSY_REG_ADDR 0x091CUL
-#define HW_CONTENT_REG_ADDR 0x0924UL
-#define HW_VERSION_REG_ADDR 0x0928UL
-#define HW_CONTEXT_ID_REG_ADDR 0x0930UL
-#define HW_DIN_BUFFER_REG_ADDR 0x0C00UL
-#define HW_DIN_MEM_DMA_BUSY_REG_ADDR 0x0c20UL
-#define HW_SRC_LLI_MEM_ADDR_REG_ADDR 0x0c24UL
-#define HW_SRC_LLI_WORD0_REG_ADDR 0x0C28UL
-#define HW_SRC_LLI_WORD1_REG_ADDR 0x0C2CUL
-#define HW_SRAM_SRC_ADDR_REG_ADDR 0x0c30UL
-#define HW_DIN_SRAM_BYTES_LEN_REG_ADDR 0x0c34UL
-#define HW_DIN_SRAM_DMA_BUSY_REG_ADDR 0x0C38UL
-#define HW_WRITE_ALIGN_REG_ADDR 0x0C3CUL
-#define HW_OLD_DATA_REG_ADDR 0x0C48UL
-#define HW_WRITE_ALIGN_LAST_REG_ADDR 0x0C4CUL
-#define HW_DOUT_BUFFER_REG_ADDR 0x0C00UL
-#define HW_DST_LLI_WORD0_REG_ADDR 0x0D28UL
-#define HW_DST_LLI_WORD1_REG_ADDR 0x0D2CUL
-#define HW_DST_LLI_MEM_ADDR_REG_ADDR 0x0D24UL
-#define HW_DOUT_MEM_DMA_BUSY_REG_ADDR 0x0D20UL
-#define HW_SRAM_DEST_ADDR_REG_ADDR 0x0D30UL
-#define HW_DOUT_SRAM_BYTES_LEN_REG_ADDR 0x0D34UL
-#define HW_DOUT_SRAM_DMA_BUSY_REG_ADDR 0x0D38UL
-#define HW_READ_ALIGN_REG_ADDR 0x0D3CUL
-#define HW_READ_LAST_DATA_REG_ADDR 0x0D44UL
-#define HW_RC4_THRU_CPU_REG_ADDR 0x0D4CUL
-#define HW_AHB_SINGLE_REG_ADDR 0x0E00UL
-#define HW_SRAM_DATA_REG_ADDR 0x0F00UL
-#define HW_SRAM_ADDR_REG_ADDR 0x0F04UL
-#define HW_SRAM_DATA_READY_REG_ADDR 0x0F08UL
#define HW_HOST_IRR_REG_ADDR 0x0A00UL
#define HW_HOST_IMR_REG_ADDR 0x0A04UL
#define HW_HOST_ICR_REG_ADDR 0x0A08UL
-#define HW_HOST_SEP_SRAM_THRESHOLD_REG_ADDR 0x0A10UL
-#define HW_HOST_SEP_BUSY_REG_ADDR 0x0A14UL
-#define HW_HOST_SEP_LCS_REG_ADDR 0x0A18UL
-#define HW_HOST_CC_SW_RST_REG_ADDR 0x0A40UL
-#define HW_HOST_SEP_SW_RST_REG_ADDR 0x0A44UL
-#define HW_HOST_FLOW_DMA_SW_INT0_REG_ADDR 0x0A80UL
-#define HW_HOST_FLOW_DMA_SW_INT1_REG_ADDR 0x0A84UL
-#define HW_HOST_FLOW_DMA_SW_INT2_REG_ADDR 0x0A88UL
-#define HW_HOST_FLOW_DMA_SW_INT3_REG_ADDR 0x0A8cUL
-#define HW_HOST_FLOW_DMA_SW_INT4_REG_ADDR 0x0A90UL
-#define HW_HOST_FLOW_DMA_SW_INT5_REG_ADDR 0x0A94UL
-#define HW_HOST_FLOW_DMA_SW_INT6_REG_ADDR 0x0A98UL
-#define HW_HOST_FLOW_DMA_SW_INT7_REG_ADDR 0x0A9cUL
#define HW_HOST_SEP_HOST_GPR0_REG_ADDR 0x0B00UL
#define HW_HOST_SEP_HOST_GPR1_REG_ADDR 0x0B04UL
#define HW_HOST_SEP_HOST_GPR2_REG_ADDR 0x0B08UL
@@ -225,9 +51,6 @@
#define HW_HOST_HOST_SEP_GPR1_REG_ADDR 0x0B84UL
#define HW_HOST_HOST_SEP_GPR2_REG_ADDR 0x0B88UL
#define HW_HOST_HOST_SEP_GPR3_REG_ADDR 0x0B8CUL
-#define HW_HOST_HOST_ENDIAN_REG_ADDR 0x0B90UL
-#define HW_HOST_HOST_COMM_CLK_EN_REG_ADDR 0x0B94UL
-#define HW_CLR_SRAM_BUSY_REG_REG_ADDR 0x0F0CUL
-#define HW_CC_SRAM_BASE_ADDRESS 0x5800UL
+#define HW_SRAM_DATA_READY_REG_ADDR 0x0F08UL
#endif /* ifndef HW_DEFS */
diff --git a/drivers/staging/sep/sep_main.c b/drivers/staging/sep/sep_main.c
new file mode 100644
index 000000000000..ad54c2e5c932
--- /dev/null
+++ b/drivers/staging/sep/sep_main.c
@@ -0,0 +1,4518 @@
+/*
+ *
+ * sep_main.c - Security Processor Driver main group of functions
+ *
+ * Copyright(c) 2009-2011 Intel Corporation. All rights reserved.
+ * Contributions(c) 2009-2011 Discretix. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * CONTACTS:
+ *
+ * Mark Allyn mark.a.allyn@intel.com
+ * Jayant Mangalampalli jayant.mangalampalli@intel.com
+ *
+ * CHANGES:
+ *
+ * 2009.06.26 Initial publish
+ * 2010.09.14 Upgrade to Medfield
+ * 2011.01.21 Move to sep_main.c to allow for sep_crypto.c
+ * 2011.02.22 Enable kernel crypto operation
+ *
+ * Please note that this driver is based on information in the Discretix
+ * CryptoCell 5.2 Driver Implementation Guide; the Discretix CryptoCell 5.2
+ * Integration Intel Medfield appendix; the Discretix CryptoCell 5.2
+ * Linux Driver Integration Guide; and the Discretix CryptoCell 5.2 System
+ * Overview and Integration Guide.
+ */
+/* #define DEBUG */
+/* #define SEP_PERF_DEBUG */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/kdev_t.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/poll.h>
+#include <linux/wait.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/ioctl.h>
+#include <asm/current.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/pagemap.h>
+#include <asm/cacheflush.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include <linux/async.h>
+#include <linux/crypto.h>
+#include <crypto/internal/hash.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/sha.h>
+#include <crypto/md5.h>
+#include <crypto/aes.h>
+#include <crypto/des.h>
+#include <crypto/hash.h>
+
+#include "sep_driver_hw_defs.h"
+#include "sep_driver_config.h"
+#include "sep_driver_api.h"
+#include "sep_dev.h"
+#include "sep_crypto.h"
+
+#define CREATE_TRACE_POINTS
+#include "sep_trace_events.h"
+
+/*
+ * Let's not spend cycles iterating over message
+ * area contents if debugging not enabled
+ */
+#ifdef DEBUG
+#define sep_dump_message(sep) _sep_dump_message(sep)
+#else
+#define sep_dump_message(sep)
+#endif
+
+/**
+ * Currenlty, there is only one SEP device per platform;
+ * In event platforms in the future have more than one SEP
+ * device, this will be a linked list
+ */
+
+struct sep_device *sep_dev;
+
+/**
+ * sep_queue_status_remove - Removes transaction from status queue
+ * @sep: SEP device
+ * @sep_queue_info: pointer to status queue
+ *
+ * This function will removes information about transaction from the queue.
+ */
+void sep_queue_status_remove(struct sep_device *sep,
+ struct sep_queue_info **queue_elem)
+{
+ unsigned long lck_flags;
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] sep_queue_status_remove\n",
+ current->pid);
+
+ if (!queue_elem || !(*queue_elem)) {
+ dev_dbg(&sep->pdev->dev, "PID%d %s null\n",
+ current->pid, __func__);
+ return;
+ }
+
+ spin_lock_irqsave(&sep->sep_queue_lock, lck_flags);
+ list_del(&(*queue_elem)->list);
+ sep->sep_queue_num--;
+ spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags);
+
+ kfree(*queue_elem);
+ *queue_elem = NULL;
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] sep_queue_status_remove return\n",
+ current->pid);
+ return;
+}
+
+/**
+ * sep_queue_status_add - Adds transaction to status queue
+ * @sep: SEP device
+ * @opcode: transaction opcode
+ * @size: input data size
+ * @pid: pid of current process
+ * @name: current process name
+ * @name_len: length of name (current process)
+ *
+ * This function adds information about about transaction started to the status
+ * queue.
+ */
+struct sep_queue_info *sep_queue_status_add(
+ struct sep_device *sep,
+ u32 opcode,
+ u32 size,
+ u32 pid,
+ u8 *name, size_t name_len)
+{
+ unsigned long lck_flags;
+ struct sep_queue_info *my_elem = NULL;
+
+ my_elem = kzalloc(sizeof(struct sep_queue_info), GFP_KERNEL);
+
+ if (!my_elem)
+ return NULL;
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] kzalloc ok\n", current->pid);
+
+ my_elem->data.opcode = opcode;
+ my_elem->data.size = size;
+ my_elem->data.pid = pid;
+
+ if (name_len > TASK_COMM_LEN)
+ name_len = TASK_COMM_LEN;
+
+ memcpy(&my_elem->data.name, name, name_len);
+
+ spin_lock_irqsave(&sep->sep_queue_lock, lck_flags);
+
+ list_add_tail(&my_elem->list, &sep->sep_queue_status);
+ sep->sep_queue_num++;
+
+ spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags);
+
+ return my_elem;
+}
+
+/**
+ * sep_allocate_dmatables_region - Allocates buf for the MLLI/DMA tables
+ * @sep: SEP device
+ * @dmatables_region: Destination pointer for the buffer
+ * @dma_ctx: DMA context for the transaction
+ * @table_count: Number of MLLI/DMA tables to create
+ * The buffer created will not work as-is for DMA operations,
+ * it needs to be copied over to the appropriate place in the
+ * shared area.
+ */
+static int sep_allocate_dmatables_region(struct sep_device *sep,
+ void **dmatables_region,
+ struct sep_dma_context *dma_ctx,
+ const u32 table_count)
+{
+ const size_t new_len =
+ SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES - 1;
+
+ void *tmp_region = NULL;
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] dma_ctx = 0x%p\n",
+ current->pid, dma_ctx);
+ dev_dbg(&sep->pdev->dev, "[PID%d] dmatables_region = 0x%p\n",
+ current->pid, dmatables_region);
+
+ if (!dma_ctx || !dmatables_region) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] dma context/region uninitialized\n",
+ current->pid);
+ return -EINVAL;
+ }
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] newlen = 0x%08zX\n",
+ current->pid, new_len);
+ dev_dbg(&sep->pdev->dev, "[PID%d] oldlen = 0x%08X\n", current->pid,
+ dma_ctx->dmatables_len);
+ tmp_region = kzalloc(new_len + dma_ctx->dmatables_len, GFP_KERNEL);
+ if (!tmp_region) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] no mem for dma tables region\n",
+ current->pid);
+ return -ENOMEM;
+ }
+
+ /* Were there any previous tables that need to be preserved ? */
+ if (*dmatables_region) {
+ memcpy(tmp_region, *dmatables_region, dma_ctx->dmatables_len);
+ kfree(*dmatables_region);
+ *dmatables_region = NULL;
+ }
+
+ *dmatables_region = tmp_region;
+
+ dma_ctx->dmatables_len += new_len;
+
+ return 0;
+}
+
+/**
+ * sep_wait_transaction - Used for synchronizing transactions
+ * @sep: SEP device
+ */
+int sep_wait_transaction(struct sep_device *sep)
+{
+ int error = 0;
+ DEFINE_WAIT(wait);
+
+ if (0 == test_and_set_bit(SEP_TRANSACTION_STARTED_LOCK_BIT,
+ &sep->in_use_flags)) {
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] no transactions, returning\n",
+ current->pid);
+ goto end_function_setpid;
+ }
+
+ /*
+ * Looping needed even for exclusive waitq entries
+ * due to process wakeup latencies, previous process
+ * might have already created another transaction.
+ */
+ for (;;) {
+ /*
+ * Exclusive waitq entry, so that only one process is
+ * woken up from the queue at a time.
+ */
+ prepare_to_wait_exclusive(&sep->event_transactions,
+ &wait,
+ TASK_INTERRUPTIBLE);
+ if (0 == test_and_set_bit(SEP_TRANSACTION_STARTED_LOCK_BIT,
+ &sep->in_use_flags)) {
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] no transactions, breaking\n",
+ current->pid);
+ break;
+ }
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] transactions ongoing, sleeping\n",
+ current->pid);
+ schedule();
+ dev_dbg(&sep->pdev->dev, "[PID%d] woken up\n", current->pid);
+
+ if (signal_pending(current)) {
+ dev_dbg(&sep->pdev->dev, "[PID%d] received signal\n",
+ current->pid);
+ error = -EINTR;
+ goto end_function;
+ }
+ }
+end_function_setpid:
+ /*
+ * The pid_doing_transaction indicates that this process
+ * now owns the facilities to performa a transaction with
+ * the SEP. While this process is performing a transaction,
+ * no other process who has the SEP device open can perform
+ * any transactions. This method allows more than one process
+ * to have the device open at any given time, which provides
+ * finer granularity for device utilization by multiple
+ * processes.
+ */
+ /* Only one process is able to progress here at a time */
+ sep->pid_doing_transaction = current->pid;
+
+end_function:
+ finish_wait(&sep->event_transactions, &wait);
+
+ return error;
+}
+
+/**
+ * sep_check_transaction_owner - Checks if current process owns transaction
+ * @sep: SEP device
+ */
+static inline int sep_check_transaction_owner(struct sep_device *sep)
+{
+ dev_dbg(&sep->pdev->dev, "[PID%d] transaction pid = %d\n",
+ current->pid,
+ sep->pid_doing_transaction);
+
+ if ((sep->pid_doing_transaction == 0) ||
+ (current->pid != sep->pid_doing_transaction)) {
+ return -EACCES;
+ }
+
+ /* We own the transaction */
+ return 0;
+}
+
+#ifdef DEBUG
+
+/**
+ * sep_dump_message - dump the message that is pending
+ * @sep: SEP device
+ * This will only print dump if DEBUG is set; it does
+ * follow kernel debug print enabling
+ */
+static void _sep_dump_message(struct sep_device *sep)
+{
+ int count;
+
+ u32 *p = sep->shared_addr;
+
+ for (count = 0; count < 10 * 4; count += 4)
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] Word %d of the message is %x\n",
+ current->pid, count/4, *p++);
+}
+
+#endif
+
+/**
+ * sep_map_and_alloc_shared_area -allocate shared block
+ * @sep: security processor
+ * @size: size of shared area
+ */
+static int sep_map_and_alloc_shared_area(struct sep_device *sep)
+{
+ sep->shared_addr = dma_alloc_coherent(&sep->pdev->dev,
+ sep->shared_size,
+ &sep->shared_bus, GFP_KERNEL);
+
+ if (!sep->shared_addr) {
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] shared memory dma_alloc_coherent failed\n",
+ current->pid);
+ return -ENOMEM;
+ }
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] shared_addr %zx bytes @%p (bus %llx)\n",
+ current->pid,
+ sep->shared_size, sep->shared_addr,
+ (unsigned long long)sep->shared_bus);
+ return 0;
+}
+
+/**
+ * sep_unmap_and_free_shared_area - free shared block
+ * @sep: security processor
+ */
+static void sep_unmap_and_free_shared_area(struct sep_device *sep)
+{
+ dma_free_coherent(&sep->pdev->dev, sep->shared_size,
+ sep->shared_addr, sep->shared_bus);
+}
+
+#ifdef DEBUG
+
+/**
+ * sep_shared_bus_to_virt - convert bus/virt addresses
+ * @sep: pointer to struct sep_device
+ * @bus_address: address to convert
+ *
+ * Returns virtual address inside the shared area according
+ * to the bus address.
+ */
+static void *sep_shared_bus_to_virt(struct sep_device *sep,
+ dma_addr_t bus_address)
+{
+ return sep->shared_addr + (bus_address - sep->shared_bus);
+}
+
+#endif
+
+/**
+ * sep_open - device open method
+ * @inode: inode of SEP device
+ * @filp: file handle to SEP device
+ *
+ * Open method for the SEP device. Called when userspace opens
+ * the SEP device node.
+ *
+ * Returns zero on success otherwise an error code.
+ */
+static int sep_open(struct inode *inode, struct file *filp)
+{
+ struct sep_device *sep;
+ struct sep_private_data *priv;
+
+ dev_dbg(&sep_dev->pdev->dev, "[PID%d] open\n", current->pid);
+
+ if (filp->f_flags & O_NONBLOCK)
+ return -ENOTSUPP;
+
+ /*
+ * Get the SEP device structure and use it for the
+ * private_data field in filp for other methods
+ */
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ sep = sep_dev;
+ priv->device = sep;
+ filp->private_data = priv;
+
+ dev_dbg(&sep_dev->pdev->dev, "[PID%d] priv is 0x%p\n",
+ current->pid, priv);
+
+ /* Anyone can open; locking takes place at transaction level */
+ return 0;
+}
+
+/**
+ * sep_free_dma_table_data_handler - free DMA table
+ * @sep: pointere to struct sep_device
+ * @dma_ctx: dma context
+ *
+ * Handles the request to free DMA table for synchronic actions
+ */
+int sep_free_dma_table_data_handler(struct sep_device *sep,
+ struct sep_dma_context **dma_ctx)
+{
+ int count;
+ int dcb_counter;
+ /* Pointer to the current dma_resource struct */
+ struct sep_dma_resource *dma;
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] sep_free_dma_table_data_handler\n",
+ current->pid);
+
+ if (!dma_ctx || !(*dma_ctx)) {
+ /* No context or context already freed */
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] no DMA context or context already freed\n",
+ current->pid);
+
+ return 0;
+ }
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] (*dma_ctx)->nr_dcb_creat 0x%x\n",
+ current->pid,
+ (*dma_ctx)->nr_dcb_creat);
+
+ for (dcb_counter = 0;
+ dcb_counter < (*dma_ctx)->nr_dcb_creat; dcb_counter++) {
+ dma = &(*dma_ctx)->dma_res_arr[dcb_counter];
+
+ /* Unmap and free input map array */
+ if (dma->in_map_array) {
+ for (count = 0; count < dma->in_num_pages; count++) {
+ dma_unmap_page(&sep->pdev->dev,
+ dma->in_map_array[count].dma_addr,
+ dma->in_map_array[count].size,
+ DMA_TO_DEVICE);
+ }
+ kfree(dma->in_map_array);
+ }
+
+ /**
+ * Output is handled different. If
+ * this was a secure dma into restricted memory,
+ * then we skip this step altogether as restricted
+ * memory is not available to the o/s at all.
+ */
+ if (((*dma_ctx)->secure_dma == false) &&
+ (dma->out_map_array)) {
+
+ for (count = 0; count < dma->out_num_pages; count++) {
+ dma_unmap_page(&sep->pdev->dev,
+ dma->out_map_array[count].dma_addr,
+ dma->out_map_array[count].size,
+ DMA_FROM_DEVICE);
+ }
+ kfree(dma->out_map_array);
+ }
+
+ /* Free page cache for output */
+ if (dma->in_page_array) {
+ for (count = 0; count < dma->in_num_pages; count++) {
+ flush_dcache_page(dma->in_page_array[count]);
+ page_cache_release(dma->in_page_array[count]);
+ }
+ kfree(dma->in_page_array);
+ }
+
+ /* Again, we do this only for non secure dma */
+ if (((*dma_ctx)->secure_dma == false) &&
+ (dma->out_page_array)) {
+
+ for (count = 0; count < dma->out_num_pages; count++) {
+ if (!PageReserved(dma->out_page_array[count]))
+
+ SetPageDirty(dma->
+ out_page_array[count]);
+
+ flush_dcache_page(dma->out_page_array[count]);
+ page_cache_release(dma->out_page_array[count]);
+ }
+ kfree(dma->out_page_array);
+ }
+
+ /**
+ * Note that here we use in_map_num_entries because we
+ * don't have a page array; the page array is generated
+ * only in the lock_user_pages, which is not called
+ * for kernel crypto, which is what the sg (scatter gather
+ * is used for exclusively
+ */
+ if (dma->src_sg) {
+ dma_unmap_sg(&sep->pdev->dev, dma->src_sg,
+ dma->in_map_num_entries, DMA_TO_DEVICE);
+ dma->src_sg = NULL;
+ }
+
+ if (dma->dst_sg) {
+ dma_unmap_sg(&sep->pdev->dev, dma->dst_sg,
+ dma->in_map_num_entries, DMA_FROM_DEVICE);
+ dma->dst_sg = NULL;
+ }
+
+ /* Reset all the values */
+ dma->in_page_array = NULL;
+ dma->out_page_array = NULL;
+ dma->in_num_pages = 0;
+ dma->out_num_pages = 0;
+ dma->in_map_array = NULL;
+ dma->out_map_array = NULL;
+ dma->in_map_num_entries = 0;
+ dma->out_map_num_entries = 0;
+ }
+
+ (*dma_ctx)->nr_dcb_creat = 0;
+ (*dma_ctx)->num_lli_tables_created = 0;
+
+ kfree(*dma_ctx);
+ *dma_ctx = NULL;
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] sep_free_dma_table_data_handler end\n",
+ current->pid);
+
+ return 0;
+}
+
+/**
+ * sep_end_transaction_handler - end transaction
+ * @sep: pointer to struct sep_device
+ * @dma_ctx: DMA context
+ * @call_status: Call status
+ *
+ * This API handles the end transaction request.
+ */
+static int sep_end_transaction_handler(struct sep_device *sep,
+ struct sep_dma_context **dma_ctx,
+ struct sep_call_status *call_status,
+ struct sep_queue_info **my_queue_elem)
+{
+ dev_dbg(&sep->pdev->dev, "[PID%d] ending transaction\n", current->pid);
+
+ /*
+ * Extraneous transaction clearing would mess up PM
+ * device usage counters and SEP would get suspended
+ * just before we send a command to SEP in the next
+ * transaction
+ * */
+ if (sep_check_transaction_owner(sep)) {
+ dev_dbg(&sep->pdev->dev, "[PID%d] not transaction owner\n",
+ current->pid);
+ return 0;
+ }
+
+ /* Update queue status */
+ sep_queue_status_remove(sep, my_queue_elem);
+
+ /* Check that all the DMA resources were freed */
+ if (dma_ctx)
+ sep_free_dma_table_data_handler(sep, dma_ctx);
+
+ /* Reset call status for next transaction */
+ if (call_status)
+ call_status->status = 0;
+
+ /* Clear the message area to avoid next transaction reading
+ * sensitive results from previous transaction */
+ memset(sep->shared_addr, 0,
+ SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
+
+ /* start suspend delay */
+#ifdef SEP_ENABLE_RUNTIME_PM
+ if (sep->in_use) {
+ sep->in_use = 0;
+ pm_runtime_mark_last_busy(&sep->pdev->dev);
+ pm_runtime_put_autosuspend(&sep->pdev->dev);
+ }
+#endif
+
+ clear_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags);
+ sep->pid_doing_transaction = 0;
+
+ /* Now it's safe for next process to proceed */
+ dev_dbg(&sep->pdev->dev, "[PID%d] waking up next transaction\n",
+ current->pid);
+ clear_bit(SEP_TRANSACTION_STARTED_LOCK_BIT, &sep->in_use_flags);
+ wake_up(&sep->event_transactions);
+
+ return 0;
+}
+
+
+/**
+ * sep_release - close a SEP device
+ * @inode: inode of SEP device
+ * @filp: file handle being closed
+ *
+ * Called on the final close of a SEP device.
+ */
+static int sep_release(struct inode *inode, struct file *filp)
+{
+ struct sep_private_data * const private_data = filp->private_data;
+ struct sep_call_status *call_status = &private_data->call_status;
+ struct sep_device *sep = private_data->device;
+ struct sep_dma_context **dma_ctx = &private_data->dma_ctx;
+ struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem;
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] release\n", current->pid);
+
+ sep_end_transaction_handler(sep, dma_ctx, call_status,
+ my_queue_elem);
+
+ kfree(filp->private_data);
+
+ return 0;
+}
+
+/**
+ * sep_mmap - maps the shared area to user space
+ * @filp: pointer to struct file
+ * @vma: pointer to vm_area_struct
+ *
+ * Called on an mmap of our space via the normal SEP device
+ */
+static int sep_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct sep_private_data * const private_data = filp->private_data;
+ struct sep_call_status *call_status = &private_data->call_status;
+ struct sep_device *sep = private_data->device;
+ struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem;
+ dma_addr_t bus_addr;
+ unsigned long error = 0;
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] sep_mmap\n", current->pid);
+
+ /* Set the transaction busy (own the device) */
+ /*
+ * Problem for multithreaded applications is that here we're
+ * possibly going to sleep while holding a write lock on
+ * current->mm->mmap_sem, which will cause deadlock for ongoing
+ * transaction trying to create DMA tables
+ */
+ error = sep_wait_transaction(sep);
+ if (error)
+ /* Interrupted by signal, don't clear transaction */
+ goto end_function;
+
+ /* Clear the message area to avoid next transaction reading
+ * sensitive results from previous transaction */
+ memset(sep->shared_addr, 0,
+ SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
+
+ /*
+ * Check that the size of the mapped range is as the size of the message
+ * shared area
+ */
+ if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) {
+ error = -EINVAL;
+ goto end_function_with_error;
+ }
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] shared_addr is %p\n",
+ current->pid, sep->shared_addr);
+
+ /* Get bus address */
+ bus_addr = sep->shared_bus;
+
+ if (remap_pfn_range(vma, vma->vm_start, bus_addr >> PAGE_SHIFT,
+ vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
+ dev_dbg(&sep->pdev->dev, "[PID%d] remap_page_range failed\n",
+ current->pid);
+ error = -EAGAIN;
+ goto end_function_with_error;
+ }
+
+ /* Update call status */
+ set_bit(SEP_LEGACY_MMAP_DONE_OFFSET, &call_status->status);
+
+ goto end_function;
+
+end_function_with_error:
+ /* Clear our transaction */
+ sep_end_transaction_handler(sep, NULL, call_status,
+ my_queue_elem);
+
+end_function:
+ return error;
+}
+
+/**
+ * sep_poll - poll handler
+ * @filp: pointer to struct file
+ * @wait: pointer to poll_table
+ *
+ * Called by the OS when the kernel is asked to do a poll on
+ * a SEP file handle.
+ */
+static unsigned int sep_poll(struct file *filp, poll_table *wait)
+{
+ struct sep_private_data * const private_data = filp->private_data;
+ struct sep_call_status *call_status = &private_data->call_status;
+ struct sep_device *sep = private_data->device;
+ u32 mask = 0;
+ u32 retval = 0;
+ u32 retval2 = 0;
+ unsigned long lock_irq_flag;
+
+ /* Am I the process that owns the transaction? */
+ if (sep_check_transaction_owner(sep)) {
+ dev_dbg(&sep->pdev->dev, "[PID%d] poll pid not owner\n",
+ current->pid);
+ mask = POLLERR;
+ goto end_function;
+ }
+
+ /* Check if send command or send_reply were activated previously */
+ if (0 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
+ &call_status->status)) {
+ dev_warn(&sep->pdev->dev, "[PID%d] sendmsg not called\n",
+ current->pid);
+ mask = POLLERR;
+ goto end_function;
+ }
+
+
+ /* Add the event to the polling wait table */
+ dev_dbg(&sep->pdev->dev, "[PID%d] poll: calling wait sep_event\n",
+ current->pid);
+
+ poll_wait(filp, &sep->event_interrupt, wait);
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] poll: send_ct is %lx reply ct is %lx\n",
+ current->pid, sep->send_ct, sep->reply_ct);
+
+ /* Check if error occured during poll */
+ retval2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
+ if ((retval2 != 0x0) && (retval2 != 0x8)) {
+ dev_dbg(&sep->pdev->dev, "[PID%d] poll; poll error %x\n",
+ current->pid, retval2);
+ mask |= POLLERR;
+ goto end_function;
+ }
+
+ spin_lock_irqsave(&sep->snd_rply_lck, lock_irq_flag);
+
+ if (sep->send_ct == sep->reply_ct) {
+ spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag);
+ retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] poll: data ready check (GPR2) %x\n",
+ current->pid, retval);
+
+ /* Check if printf request */
+ if ((retval >> 30) & 0x1) {
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] poll: SEP printf request\n",
+ current->pid);
+ goto end_function;
+ }
+
+ /* Check if the this is SEP reply or request */
+ if (retval >> 31) {
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] poll: SEP request\n",
+ current->pid);
+ } else {
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] poll: normal return\n",
+ current->pid);
+ sep_dump_message(sep);
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] poll; SEP reply POLLIN|POLLRDNORM\n",
+ current->pid);
+ mask |= POLLIN | POLLRDNORM;
+ }
+ set_bit(SEP_LEGACY_POLL_DONE_OFFSET, &call_status->status);
+ } else {
+ spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag);
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] poll; no reply; returning mask of 0\n",
+ current->pid);
+ mask = 0;
+ }
+
+end_function:
+ return mask;
+}
+
+/**
+ * sep_time_address - address in SEP memory of time
+ * @sep: SEP device we want the address from
+ *
+ * Return the address of the two dwords in memory used for time
+ * setting.
+ */
+static u32 *sep_time_address(struct sep_device *sep)
+{
+ return sep->shared_addr +
+ SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES;
+}
+
+/**
+ * sep_set_time - set the SEP time
+ * @sep: the SEP we are setting the time for
+ *
+ * Calculates time and sets it at the predefined address.
+ * Called with the SEP mutex held.
+ */
+static unsigned long sep_set_time(struct sep_device *sep)
+{
+ struct timeval time;
+ u32 *time_addr; /* Address of time as seen by the kernel */
+
+
+ do_gettimeofday(&time);
+
+ /* Set value in the SYSTEM MEMORY offset */
+ time_addr = sep_time_address(sep);
+
+ time_addr[0] = SEP_TIME_VAL_TOKEN;
+ time_addr[1] = time.tv_sec;
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] time.tv_sec is %lu\n",
+ current->pid, time.tv_sec);
+ dev_dbg(&sep->pdev->dev, "[PID%d] time_addr is %p\n",
+ current->pid, time_addr);
+ dev_dbg(&sep->pdev->dev, "[PID%d] sep->shared_addr is %p\n",
+ current->pid, sep->shared_addr);
+
+ return time.tv_sec;
+}
+
+/**
+ * sep_send_command_handler - kick off a command
+ * @sep: SEP being signalled
+ *
+ * This function raises interrupt to SEP that signals that is has a new
+ * command from the host
+ *
+ * Note that this function does fall under the ioctl lock
+ */
+int sep_send_command_handler(struct sep_device *sep)
+{
+ unsigned long lock_irq_flag;
+ u32 *msg_pool;
+ int error = 0;
+
+ /* Basic sanity check; set msg pool to start of shared area */
+ msg_pool = (u32 *)sep->shared_addr;
+ msg_pool += 2;
+
+ /* Look for start msg token */
+ if (*msg_pool != SEP_START_MSG_TOKEN) {
+ dev_warn(&sep->pdev->dev, "start message token not present\n");
+ error = -EPROTO;
+ goto end_function;
+ }
+
+ /* Do we have a reasonable size? */
+ msg_pool += 1;
+ if ((*msg_pool < 2) ||
+ (*msg_pool > SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES)) {
+
+ dev_warn(&sep->pdev->dev, "invalid message size\n");
+ error = -EPROTO;
+ goto end_function;
+ }
+
+ /* Does the command look reasonable? */
+ msg_pool += 1;
+ if (*msg_pool < 2) {
+ dev_warn(&sep->pdev->dev, "invalid message opcode\n");
+ error = -EPROTO;
+ goto end_function;
+ }
+
+#if defined(CONFIG_PM_RUNTIME) && defined(SEP_ENABLE_RUNTIME_PM)
+ dev_dbg(&sep->pdev->dev, "[PID%d] before pm sync status 0x%X\n",
+ current->pid,
+ sep->pdev->dev.power.runtime_status);
+ sep->in_use = 1; /* device is about to be used */
+ pm_runtime_get_sync(&sep->pdev->dev);
+#endif
+
+ if (test_and_set_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags)) {
+ error = -EPROTO;
+ goto end_function;
+ }
+ sep->in_use = 1; /* device is about to be used */
+ sep_set_time(sep);
+
+ sep_dump_message(sep);
+
+ /* Update counter */
+ spin_lock_irqsave(&sep->snd_rply_lck, lock_irq_flag);
+ sep->send_ct++;
+ spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag);
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] sep_send_command_handler send_ct %lx reply_ct %lx\n",
+ current->pid, sep->send_ct, sep->reply_ct);
+
+ /* Send interrupt to SEP */
+ sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
+
+end_function:
+ return error;
+}
+
+/**
+ * sep_crypto_dma -
+ * @sep: pointer to struct sep_device
+ * @sg: pointer to struct scatterlist
+ * @direction:
+ * @dma_maps: pointer to place a pointer to array of dma maps
+ * This is filled in; anything previous there will be lost
+ * The structure for dma maps is sep_dma_map
+ * @returns number of dma maps on success; negative on error
+ *
+ * This creates the dma table from the scatterlist
+ * It is used only for kernel crypto as it works with scatterlists
+ * representation of data buffers
+ *
+ */
+static int sep_crypto_dma(
+ struct sep_device *sep,
+ struct scatterlist *sg,
+ struct sep_dma_map **dma_maps,
+ enum dma_data_direction direction)
+{
+ struct scatterlist *temp_sg;
+
+ u32 count_segment;
+ u32 count_mapped;
+ struct sep_dma_map *sep_dma;
+ int ct1;
+
+ if (sg->length == 0)
+ return 0;
+
+ /* Count the segments */
+ temp_sg = sg;
+ count_segment = 0;
+ while (temp_sg) {
+ count_segment += 1;
+ temp_sg = scatterwalk_sg_next(temp_sg);
+ }
+ dev_dbg(&sep->pdev->dev,
+ "There are (hex) %x segments in sg\n", count_segment);
+
+ /* DMA map segments */
+ count_mapped = dma_map_sg(&sep->pdev->dev, sg,
+ count_segment, direction);
+
+ dev_dbg(&sep->pdev->dev,
+ "There are (hex) %x maps in sg\n", count_mapped);
+
+ if (count_mapped == 0) {
+ dev_dbg(&sep->pdev->dev, "Cannot dma_map_sg\n");
+ return -ENOMEM;
+ }
+
+ sep_dma = kmalloc(sizeof(struct sep_dma_map) *
+ count_mapped, GFP_ATOMIC);
+
+ if (sep_dma == NULL) {
+ dev_dbg(&sep->pdev->dev, "Cannot allocate dma_maps\n");
+ return -ENOMEM;
+ }
+
+ for_each_sg(sg, temp_sg, count_mapped, ct1) {
+ sep_dma[ct1].dma_addr = sg_dma_address(temp_sg);
+ sep_dma[ct1].size = sg_dma_len(temp_sg);
+ dev_dbg(&sep->pdev->dev, "(all hex) map %x dma %lx len %lx\n",
+ ct1, (unsigned long)sep_dma[ct1].dma_addr,
+ (unsigned long)sep_dma[ct1].size);
+ }
+
+ *dma_maps = sep_dma;
+ return count_mapped;
+
+}
+
+/**
+ * sep_crypto_lli -
+ * @sep: pointer to struct sep_device
+ * @sg: pointer to struct scatterlist
+ * @data_size: total data size
+ * @direction:
+ * @dma_maps: pointer to place a pointer to array of dma maps
+ * This is filled in; anything previous there will be lost
+ * The structure for dma maps is sep_dma_map
+ * @lli_maps: pointer to place a pointer to array of lli maps
+ * This is filled in; anything previous there will be lost
+ * The structure for dma maps is sep_dma_map
+ * @returns number of dma maps on success; negative on error
+ *
+ * This creates the LLI table from the scatterlist
+ * It is only used for kernel crypto as it works exclusively
+ * with scatterlists (struct scatterlist) representation of
+ * data buffers
+ */
+static int sep_crypto_lli(
+ struct sep_device *sep,
+ struct scatterlist *sg,
+ struct sep_dma_map **maps,
+ struct sep_lli_entry **llis,
+ u32 data_size,
+ enum dma_data_direction direction)
+{
+
+ int ct1;
+ struct sep_lli_entry *sep_lli;
+ struct sep_dma_map *sep_map;
+
+ int nbr_ents;
+
+ nbr_ents = sep_crypto_dma(sep, sg, maps, direction);
+ if (nbr_ents <= 0) {
+ dev_dbg(&sep->pdev->dev, "crypto_dma failed %x\n",
+ nbr_ents);
+ return nbr_ents;
+ }
+
+ sep_map = *maps;
+
+ sep_lli = kmalloc(sizeof(struct sep_lli_entry) * nbr_ents, GFP_ATOMIC);
+
+ if (sep_lli == NULL) {
+ dev_dbg(&sep->pdev->dev, "Cannot allocate lli_maps\n");
+
+ kfree(*maps);
+ *maps = NULL;
+ return -ENOMEM;
+ }
+
+ for (ct1 = 0; ct1 < nbr_ents; ct1 += 1) {
+ sep_lli[ct1].bus_address = (u32)sep_map[ct1].dma_addr;
+
+ /* Maximum for page is total data size */
+ if (sep_map[ct1].size > data_size)
+ sep_map[ct1].size = data_size;
+
+ sep_lli[ct1].block_size = (u32)sep_map[ct1].size;
+ }
+
+ *llis = sep_lli;
+ return nbr_ents;
+}
+
+/**
+ * sep_lock_kernel_pages - map kernel pages for DMA
+ * @sep: pointer to struct sep_device
+ * @kernel_virt_addr: address of data buffer in kernel
+ * @data_size: size of data
+ * @lli_array_ptr: lli array
+ * @in_out_flag: input into device or output from device
+ *
+ * This function locks all the physical pages of the kernel virtual buffer
+ * and construct a basic lli array, where each entry holds the physical
+ * page address and the size that application data holds in this page
+ * This function is used only during kernel crypto mod calls from within
+ * the kernel (when ioctl is not used)
+ *
+ * This is used only for kernel crypto. Kernel pages
+ * are handled differently as they are done via
+ * scatter gather lists (struct scatterlist)
+ */
+static int sep_lock_kernel_pages(struct sep_device *sep,
+ unsigned long kernel_virt_addr,
+ u32 data_size,
+ struct sep_lli_entry **lli_array_ptr,
+ int in_out_flag,
+ struct sep_dma_context *dma_ctx)
+
+{
+ u32 num_pages;
+ struct scatterlist *sg;
+
+ /* Array of lli */
+ struct sep_lli_entry *lli_array;
+ /* Map array */
+ struct sep_dma_map *map_array;
+
+ enum dma_data_direction direction;
+
+ lli_array = NULL;
+ map_array = NULL;
+
+ if (in_out_flag == SEP_DRIVER_IN_FLAG) {
+ direction = DMA_TO_DEVICE;
+ sg = dma_ctx->src_sg;
+ } else {
+ direction = DMA_FROM_DEVICE;
+ sg = dma_ctx->dst_sg;
+ }
+
+ num_pages = sep_crypto_lli(sep, sg, &map_array, &lli_array,
+ data_size, direction);
+
+ if (num_pages <= 0) {
+ dev_dbg(&sep->pdev->dev, "sep_crypto_lli returned error %x\n",
+ num_pages);
+ return -ENOMEM;
+ }
+
+ /* Put mapped kernel sg into kernel resource array */
+
+ /* Set output params acording to the in_out flag */
+ if (in_out_flag == SEP_DRIVER_IN_FLAG) {
+ *lli_array_ptr = lli_array;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages =
+ num_pages;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array =
+ NULL;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array =
+ map_array;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_num_entries =
+ num_pages;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].src_sg =
+ dma_ctx->src_sg;
+ } else {
+ *lli_array_ptr = lli_array;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages =
+ num_pages;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array =
+ NULL;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array =
+ map_array;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].
+ out_map_num_entries = num_pages;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].dst_sg =
+ dma_ctx->dst_sg;
+ }
+
+ return 0;
+}
+
+/**
+ * sep_lock_user_pages - lock and map user pages for DMA
+ * @sep: pointer to struct sep_device
+ * @app_virt_addr: user memory data buffer
+ * @data_size: size of data buffer
+ * @lli_array_ptr: lli array
+ * @in_out_flag: input or output to device
+ *
+ * This function locks all the physical pages of the application
+ * virtual buffer and construct a basic lli array, where each entry
+ * holds the physical page address and the size that application
+ * data holds in this physical pages
+ */
+static int sep_lock_user_pages(struct sep_device *sep,
+ u32 app_virt_addr,
+ u32 data_size,
+ struct sep_lli_entry **lli_array_ptr,
+ int in_out_flag,
+ struct sep_dma_context *dma_ctx)
+
+{
+ int error = 0;
+ u32 count;
+ int result;
+ /* The the page of the end address of the user space buffer */
+ u32 end_page;
+ /* The page of the start address of the user space buffer */
+ u32 start_page;
+ /* The range in pages */
+ u32 num_pages;
+ /* Array of pointers to page */
+ struct page **page_array;
+ /* Array of lli */
+ struct sep_lli_entry *lli_array;
+ /* Map array */
+ struct sep_dma_map *map_array;
+
+ /* Set start and end pages and num pages */
+ end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
+ start_page = app_virt_addr >> PAGE_SHIFT;
+ num_pages = end_page - start_page + 1;
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] lock user pages app_virt_addr is %x\n",
+ current->pid, app_virt_addr);
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] data_size is (hex) %x\n",
+ current->pid, data_size);
+ dev_dbg(&sep->pdev->dev, "[PID%d] start_page is (hex) %x\n",
+ current->pid, start_page);
+ dev_dbg(&sep->pdev->dev, "[PID%d] end_page is (hex) %x\n",
+ current->pid, end_page);
+ dev_dbg(&sep->pdev->dev, "[PID%d] num_pages is (hex) %x\n",
+ current->pid, num_pages);
+
+ /* Allocate array of pages structure pointers */
+ page_array = kmalloc(sizeof(struct page *) * num_pages, GFP_ATOMIC);
+ if (!page_array) {
+ error = -ENOMEM;
+ goto end_function;
+ }
+ map_array = kmalloc(sizeof(struct sep_dma_map) * num_pages, GFP_ATOMIC);
+ if (!map_array) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] kmalloc for map_array failed\n",
+ current->pid);
+ error = -ENOMEM;
+ goto end_function_with_error1;
+ }
+
+ lli_array = kmalloc(sizeof(struct sep_lli_entry) * num_pages,
+ GFP_ATOMIC);
+
+ if (!lli_array) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] kmalloc for lli_array failed\n",
+ current->pid);
+ error = -ENOMEM;
+ goto end_function_with_error2;
+ }
+
+ /* Convert the application virtual address into a set of physical */
+ down_read(&current->mm->mmap_sem);
+ result = get_user_pages(current, current->mm, app_virt_addr,
+ num_pages,
+ ((in_out_flag == SEP_DRIVER_IN_FLAG) ? 0 : 1),
+ 0, page_array, NULL);
+
+ up_read(&current->mm->mmap_sem);
+
+ /* Check the number of pages locked - if not all then exit with error */
+ if (result != num_pages) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] not all pages locked by get_user_pages, "
+ "result 0x%X, num_pages 0x%X\n",
+ current->pid, result, num_pages);
+ error = -ENOMEM;
+ goto end_function_with_error3;
+ }
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] get_user_pages succeeded\n",
+ current->pid);
+
+ /*
+ * Fill the array using page array data and
+ * map the pages - this action will also flush the cache as needed
+ */
+ for (count = 0; count < num_pages; count++) {
+ /* Fill the map array */
+ map_array[count].dma_addr =
+ dma_map_page(&sep->pdev->dev, page_array[count],
+ 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
+
+ map_array[count].size = PAGE_SIZE;
+
+ /* Fill the lli array entry */
+ lli_array[count].bus_address = (u32)map_array[count].dma_addr;
+ lli_array[count].block_size = PAGE_SIZE;
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] lli_array[%x].bus_address is %08lx, "
+ "lli_array[%x].block_size is (hex) %x\n", current->pid,
+ count, (unsigned long)lli_array[count].bus_address,
+ count, lli_array[count].block_size);
+ }
+
+ /* Check the offset for the first page */
+ lli_array[0].bus_address =
+ lli_array[0].bus_address + (app_virt_addr & (~PAGE_MASK));
+
+ /* Check that not all the data is in the first page only */
+ if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
+ lli_array[0].block_size = data_size;
+ else
+ lli_array[0].block_size =
+ PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] After check if page 0 has all data\n",
+ current->pid);
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] lli_array[0].bus_address is (hex) %08lx, "
+ "lli_array[0].block_size is (hex) %x\n",
+ current->pid,
+ (unsigned long)lli_array[0].bus_address,
+ lli_array[0].block_size);
+
+
+ /* Check the size of the last page */
+ if (num_pages > 1) {
+ lli_array[num_pages - 1].block_size =
+ (app_virt_addr + data_size) & (~PAGE_MASK);
+ if (lli_array[num_pages - 1].block_size == 0)
+ lli_array[num_pages - 1].block_size = PAGE_SIZE;
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] After last page size adjustment\n",
+ current->pid);
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] lli_array[%x].bus_address is (hex) %08lx, "
+ "lli_array[%x].block_size is (hex) %x\n",
+ current->pid,
+ num_pages - 1,
+ (unsigned long)lli_array[num_pages - 1].bus_address,
+ num_pages - 1,
+ lli_array[num_pages - 1].block_size);
+ }
+
+ /* Set output params acording to the in_out flag */
+ if (in_out_flag == SEP_DRIVER_IN_FLAG) {
+ *lli_array_ptr = lli_array;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages =
+ num_pages;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array =
+ page_array;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array =
+ map_array;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_num_entries =
+ num_pages;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].src_sg = NULL;
+ } else {
+ *lli_array_ptr = lli_array;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages =
+ num_pages;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array =
+ page_array;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array =
+ map_array;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].
+ out_map_num_entries = num_pages;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].dst_sg = NULL;
+ }
+ goto end_function;
+
+end_function_with_error3:
+ /* Free lli array */
+ kfree(lli_array);
+
+end_function_with_error2:
+ kfree(map_array);
+
+end_function_with_error1:
+ /* Free page array */
+ kfree(page_array);
+
+end_function:
+ return error;
+}
+
+/**
+ * sep_lli_table_secure_dma - get lli array for IMR addresses
+ * @sep: pointer to struct sep_device
+ * @app_virt_addr: user memory data buffer
+ * @data_size: size of data buffer
+ * @lli_array_ptr: lli array
+ * @in_out_flag: not used
+ * @dma_ctx: pointer to struct sep_dma_context
+ *
+ * This function creates lli tables for outputting data to
+ * IMR memory, which is memory that cannot be accessed by the
+ * the x86 processor.
+ */
+static int sep_lli_table_secure_dma(struct sep_device *sep,
+ u32 app_virt_addr,
+ u32 data_size,
+ struct sep_lli_entry **lli_array_ptr,
+ int in_out_flag,
+ struct sep_dma_context *dma_ctx)
+
+{
+ int error = 0;
+ u32 count;
+ /* The the page of the end address of the user space buffer */
+ u32 end_page;
+ /* The page of the start address of the user space buffer */
+ u32 start_page;
+ /* The range in pages */
+ u32 num_pages;
+ /* Array of lli */
+ struct sep_lli_entry *lli_array;
+
+ /* Set start and end pages and num pages */
+ end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
+ start_page = app_virt_addr >> PAGE_SHIFT;
+ num_pages = end_page - start_page + 1;
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] lock user pages"
+ " app_virt_addr is %x\n", current->pid, app_virt_addr);
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] data_size is (hex) %x\n",
+ current->pid, data_size);
+ dev_dbg(&sep->pdev->dev, "[PID%d] start_page is (hex) %x\n",
+ current->pid, start_page);
+ dev_dbg(&sep->pdev->dev, "[PID%d] end_page is (hex) %x\n",
+ current->pid, end_page);
+ dev_dbg(&sep->pdev->dev, "[PID%d] num_pages is (hex) %x\n",
+ current->pid, num_pages);
+
+ lli_array = kmalloc(sizeof(struct sep_lli_entry) * num_pages,
+ GFP_ATOMIC);
+
+ if (!lli_array) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] kmalloc for lli_array failed\n",
+ current->pid);
+ return -ENOMEM;
+ }
+
+ /*
+ * Fill the lli_array
+ */
+ start_page = start_page << PAGE_SHIFT;
+ for (count = 0; count < num_pages; count++) {
+ /* Fill the lli array entry */
+ lli_array[count].bus_address = start_page;
+ lli_array[count].block_size = PAGE_SIZE;
+
+ start_page += PAGE_SIZE;
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] lli_array[%x].bus_address is %08lx, "
+ "lli_array[%x].block_size is (hex) %x\n",
+ current->pid,
+ count, (unsigned long)lli_array[count].bus_address,
+ count, lli_array[count].block_size);
+ }
+
+ /* Check the offset for the first page */
+ lli_array[0].bus_address =
+ lli_array[0].bus_address + (app_virt_addr & (~PAGE_MASK));
+
+ /* Check that not all the data is in the first page only */
+ if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
+ lli_array[0].block_size = data_size;
+ else
+ lli_array[0].block_size =
+ PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] After check if page 0 has all data\n"
+ "lli_array[0].bus_address is (hex) %08lx, "
+ "lli_array[0].block_size is (hex) %x\n",
+ current->pid,
+ (unsigned long)lli_array[0].bus_address,
+ lli_array[0].block_size);
+
+ /* Check the size of the last page */
+ if (num_pages > 1) {
+ lli_array[num_pages - 1].block_size =
+ (app_virt_addr + data_size) & (~PAGE_MASK);
+ if (lli_array[num_pages - 1].block_size == 0)
+ lli_array[num_pages - 1].block_size = PAGE_SIZE;
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] After last page size adjustment\n"
+ "lli_array[%x].bus_address is (hex) %08lx, "
+ "lli_array[%x].block_size is (hex) %x\n",
+ current->pid, num_pages - 1,
+ (unsigned long)lli_array[num_pages - 1].bus_address,
+ num_pages - 1,
+ lli_array[num_pages - 1].block_size);
+ }
+ *lli_array_ptr = lli_array;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages = num_pages;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array = NULL;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array = NULL;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_num_entries = 0;
+
+ return error;
+}
+
+/**
+ * sep_calculate_lli_table_max_size - size the LLI table
+ * @sep: pointer to struct sep_device
+ * @lli_in_array_ptr
+ * @num_array_entries
+ * @last_table_flag
+ *
+ * This function calculates the size of data that can be inserted into
+ * the lli table from this array, such that either the table is full
+ * (all entries are entered), or there are no more entries in the
+ * lli array
+ */
+static u32 sep_calculate_lli_table_max_size(struct sep_device *sep,
+ struct sep_lli_entry *lli_in_array_ptr,
+ u32 num_array_entries,
+ u32 *last_table_flag)
+{
+ u32 counter;
+ /* Table data size */
+ u32 table_data_size = 0;
+ /* Data size for the next table */
+ u32 next_table_data_size;
+
+ *last_table_flag = 0;
+
+ /*
+ * Calculate the data in the out lli table till we fill the whole
+ * table or till the data has ended
+ */
+ for (counter = 0;
+ (counter < (SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP - 1)) &&
+ (counter < num_array_entries); counter++)
+ table_data_size += lli_in_array_ptr[counter].block_size;
+
+ /*
+ * Check if we reached the last entry,
+ * meaning this ia the last table to build,
+ * and no need to check the block alignment
+ */
+ if (counter == num_array_entries) {
+ /* Set the last table flag */
+ *last_table_flag = 1;
+ goto end_function;
+ }
+
+ /*
+ * Calculate the data size of the next table.
+ * Stop if no entries left or if data size is more the DMA restriction
+ */
+ next_table_data_size = 0;
+ for (; counter < num_array_entries; counter++) {
+ next_table_data_size += lli_in_array_ptr[counter].block_size;
+ if (next_table_data_size >= SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE)
+ break;
+ }
+
+ /*
+ * Check if the next table data size is less then DMA rstriction.
+ * if it is - recalculate the current table size, so that the next
+ * table data size will be adaquete for DMA
+ */
+ if (next_table_data_size &&
+ next_table_data_size < SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE)
+
+ table_data_size -= (SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE -
+ next_table_data_size);
+
+end_function:
+ return table_data_size;
+}
+
+/**
+ * sep_build_lli_table - build an lli array for the given table
+ * @sep: pointer to struct sep_device
+ * @lli_array_ptr: pointer to lli array
+ * @lli_table_ptr: pointer to lli table
+ * @num_processed_entries_ptr: pointer to number of entries
+ * @num_table_entries_ptr: pointer to number of tables
+ * @table_data_size: total data size
+ *
+ * Builds ant lli table from the lli_array according to
+ * the given size of data
+ */
+static void sep_build_lli_table(struct sep_device *sep,
+ struct sep_lli_entry *lli_array_ptr,
+ struct sep_lli_entry *lli_table_ptr,
+ u32 *num_processed_entries_ptr,
+ u32 *num_table_entries_ptr,
+ u32 table_data_size)
+{
+ /* Current table data size */
+ u32 curr_table_data_size;
+ /* Counter of lli array entry */
+ u32 array_counter;
+
+ /* Init current table data size and lli array entry counter */
+ curr_table_data_size = 0;
+ array_counter = 0;
+ *num_table_entries_ptr = 1;
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] build lli table table_data_size: (hex) %x\n",
+ current->pid, table_data_size);
+
+ /* Fill the table till table size reaches the needed amount */
+ while (curr_table_data_size < table_data_size) {
+ /* Update the number of entries in table */
+ (*num_table_entries_ptr)++;
+
+ lli_table_ptr->bus_address =
+ cpu_to_le32(lli_array_ptr[array_counter].bus_address);
+
+ lli_table_ptr->block_size =
+ cpu_to_le32(lli_array_ptr[array_counter].block_size);
+
+ curr_table_data_size += lli_array_ptr[array_counter].block_size;
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] lli_table_ptr is %p\n",
+ current->pid, lli_table_ptr);
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] lli_table_ptr->bus_address: %08lx\n",
+ current->pid,
+ (unsigned long)lli_table_ptr->bus_address);
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] lli_table_ptr->block_size is (hex) %x\n",
+ current->pid, lli_table_ptr->block_size);
+
+ /* Check for overflow of the table data */
+ if (curr_table_data_size > table_data_size) {
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] curr_table_data_size too large\n",
+ current->pid);
+
+ /* Update the size of block in the table */
+ lli_table_ptr->block_size =
+ cpu_to_le32(lli_table_ptr->block_size) -
+ (curr_table_data_size - table_data_size);
+
+ /* Update the physical address in the lli array */
+ lli_array_ptr[array_counter].bus_address +=
+ cpu_to_le32(lli_table_ptr->block_size);
+
+ /* Update the block size left in the lli array */
+ lli_array_ptr[array_counter].block_size =
+ (curr_table_data_size - table_data_size);
+ } else
+ /* Advance to the next entry in the lli_array */
+ array_counter++;
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] lli_table_ptr->bus_address is %08lx\n",
+ current->pid,
+ (unsigned long)lli_table_ptr->bus_address);
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] lli_table_ptr->block_size is (hex) %x\n",
+ current->pid,
+ lli_table_ptr->block_size);
+
+ /* Move to the next entry in table */
+ lli_table_ptr++;
+ }
+
+ /* Set the info entry to default */
+ lli_table_ptr->bus_address = 0xffffffff;
+ lli_table_ptr->block_size = 0;
+
+ /* Set the output parameter */
+ *num_processed_entries_ptr += array_counter;
+
+}
+
+/**
+ * sep_shared_area_virt_to_bus - map shared area to bus address
+ * @sep: pointer to struct sep_device
+ * @virt_address: virtual address to convert
+ *
+ * This functions returns the physical address inside shared area according
+ * to the virtual address. It can be either on the externa RAM device
+ * (ioremapped), or on the system RAM
+ * This implementation is for the external RAM
+ */
+static dma_addr_t sep_shared_area_virt_to_bus(struct sep_device *sep,
+ void *virt_address)
+{
+ dev_dbg(&sep->pdev->dev, "[PID%d] sh virt to phys v %p\n",
+ current->pid, virt_address);
+ dev_dbg(&sep->pdev->dev, "[PID%d] sh virt to phys p %08lx\n",
+ current->pid,
+ (unsigned long)
+ sep->shared_bus + (virt_address - sep->shared_addr));
+
+ return sep->shared_bus + (size_t)(virt_address - sep->shared_addr);
+}
+
+/**
+ * sep_shared_area_bus_to_virt - map shared area bus address to kernel
+ * @sep: pointer to struct sep_device
+ * @bus_address: bus address to convert
+ *
+ * This functions returns the virtual address inside shared area
+ * according to the physical address. It can be either on the
+ * externa RAM device (ioremapped), or on the system RAM
+ * This implementation is for the external RAM
+ */
+static void *sep_shared_area_bus_to_virt(struct sep_device *sep,
+ dma_addr_t bus_address)
+{
+ dev_dbg(&sep->pdev->dev, "[PID%d] shared bus to virt b=%lx v=%lx\n",
+ current->pid,
+ (unsigned long)bus_address, (unsigned long)(sep->shared_addr +
+ (size_t)(bus_address - sep->shared_bus)));
+
+ return sep->shared_addr + (size_t)(bus_address - sep->shared_bus);
+}
+
+/**
+ * sep_debug_print_lli_tables - dump LLI table
+ * @sep: pointer to struct sep_device
+ * @lli_table_ptr: pointer to sep_lli_entry
+ * @num_table_entries: number of entries
+ * @table_data_size: total data size
+ *
+ * Walk the the list of the print created tables and print all the data
+ */
+static void sep_debug_print_lli_tables(struct sep_device *sep,
+ struct sep_lli_entry *lli_table_ptr,
+ unsigned long num_table_entries,
+ unsigned long table_data_size)
+{
+#ifdef DEBUG
+ unsigned long table_count = 1;
+ unsigned long entries_count = 0;
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] sep_debug_print_lli_tables start\n",
+ current->pid);
+ if (num_table_entries == 0) {
+ dev_dbg(&sep->pdev->dev, "[PID%d] no table to print\n",
+ current->pid);
+ return;
+ }
+
+ while ((unsigned long) lli_table_ptr->bus_address != 0xffffffff) {
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] lli table %08lx, "
+ "table_data_size is (hex) %lx\n",
+ current->pid, table_count, table_data_size);
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] num_table_entries is (hex) %lx\n",
+ current->pid, num_table_entries);
+
+ /* Print entries of the table (without info entry) */
+ for (entries_count = 0; entries_count < num_table_entries;
+ entries_count++, lli_table_ptr++) {
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] lli_table_ptr address is %08lx\n",
+ current->pid,
+ (unsigned long) lli_table_ptr);
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] phys address is %08lx "
+ "block size is (hex) %x\n", current->pid,
+ (unsigned long)lli_table_ptr->bus_address,
+ lli_table_ptr->block_size);
+ }
+
+ /* Point to the info entry */
+ lli_table_ptr--;
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] phys lli_table_ptr->block_size "
+ "is (hex) %x\n",
+ current->pid,
+ lli_table_ptr->block_size);
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] phys lli_table_ptr->physical_address "
+ "is %08lx\n",
+ current->pid,
+ (unsigned long)lli_table_ptr->bus_address);
+
+
+ table_data_size = lli_table_ptr->block_size & 0xffffff;
+ num_table_entries = (lli_table_ptr->block_size >> 24) & 0xff;
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] phys table_data_size is "
+ "(hex) %lx num_table_entries is"
+ " %lx bus_address is%lx\n",
+ current->pid,
+ table_data_size,
+ num_table_entries,
+ (unsigned long)lli_table_ptr->bus_address);
+
+ if ((unsigned long)lli_table_ptr->bus_address != 0xffffffff)
+ lli_table_ptr = (struct sep_lli_entry *)
+ sep_shared_bus_to_virt(sep,
+ (unsigned long)lli_table_ptr->bus_address);
+
+ table_count++;
+ }
+ dev_dbg(&sep->pdev->dev, "[PID%d] sep_debug_print_lli_tables end\n",
+ current->pid);
+#endif
+}
+
+
+/**
+ * sep_prepare_empty_lli_table - create a blank LLI table
+ * @sep: pointer to struct sep_device
+ * @lli_table_addr_ptr: pointer to lli table
+ * @num_entries_ptr: pointer to number of entries
+ * @table_data_size_ptr: point to table data size
+ * @dmatables_region: Optional buffer for DMA tables
+ * @dma_ctx: DMA context
+ *
+ * This function creates empty lli tables when there is no data
+ */
+static void sep_prepare_empty_lli_table(struct sep_device *sep,
+ dma_addr_t *lli_table_addr_ptr,
+ u32 *num_entries_ptr,
+ u32 *table_data_size_ptr,
+ void **dmatables_region,
+ struct sep_dma_context *dma_ctx)
+{
+ struct sep_lli_entry *lli_table_ptr;
+
+ /* Find the area for new table */
+ lli_table_ptr =
+ (struct sep_lli_entry *)(sep->shared_addr +
+ SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
+ dma_ctx->num_lli_tables_created * sizeof(struct sep_lli_entry) *
+ SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
+
+ if (dmatables_region && *dmatables_region)
+ lli_table_ptr = *dmatables_region;
+
+ lli_table_ptr->bus_address = 0;
+ lli_table_ptr->block_size = 0;
+
+ lli_table_ptr++;
+ lli_table_ptr->bus_address = 0xFFFFFFFF;
+ lli_table_ptr->block_size = 0;
+
+ /* Set the output parameter value */
+ *lli_table_addr_ptr = sep->shared_bus +
+ SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
+ dma_ctx->num_lli_tables_created *
+ sizeof(struct sep_lli_entry) *
+ SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
+
+ /* Set the num of entries and table data size for empty table */
+ *num_entries_ptr = 2;
+ *table_data_size_ptr = 0;
+
+ /* Update the number of created tables */
+ dma_ctx->num_lli_tables_created++;
+}
+
+/**
+ * sep_prepare_input_dma_table - prepare input DMA mappings
+ * @sep: pointer to struct sep_device
+ * @data_size:
+ * @block_size:
+ * @lli_table_ptr:
+ * @num_entries_ptr:
+ * @table_data_size_ptr:
+ * @is_kva: set for kernel data (kernel cryptio call)
+ *
+ * This function prepares only input DMA table for synhronic symmetric
+ * operations (HASH)
+ * Note that all bus addresses that are passed to the SEP
+ * are in 32 bit format; the SEP is a 32 bit device
+ */
+static int sep_prepare_input_dma_table(struct sep_device *sep,
+ unsigned long app_virt_addr,
+ u32 data_size,
+ u32 block_size,
+ dma_addr_t *lli_table_ptr,
+ u32 *num_entries_ptr,
+ u32 *table_data_size_ptr,
+ bool is_kva,
+ void **dmatables_region,
+ struct sep_dma_context *dma_ctx
+)
+{
+ int error = 0;
+ /* Pointer to the info entry of the table - the last entry */
+ struct sep_lli_entry *info_entry_ptr;
+ /* Array of pointers to page */
+ struct sep_lli_entry *lli_array_ptr;
+ /* Points to the first entry to be processed in the lli_in_array */
+ u32 current_entry = 0;
+ /* Num entries in the virtual buffer */
+ u32 sep_lli_entries = 0;
+ /* Lli table pointer */
+ struct sep_lli_entry *in_lli_table_ptr;
+ /* The total data in one table */
+ u32 table_data_size = 0;
+ /* Flag for last table */
+ u32 last_table_flag = 0;
+ /* Number of entries in lli table */
+ u32 num_entries_in_table = 0;
+ /* Next table address */
+ void *lli_table_alloc_addr = NULL;
+ void *dma_lli_table_alloc_addr = NULL;
+ void *dma_in_lli_table_ptr = NULL;
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] prepare intput dma "
+ "tbl data size: (hex) %x\n",
+ current->pid, data_size);
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] block_size is (hex) %x\n",
+ current->pid, block_size);
+
+ /* Initialize the pages pointers */
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages = 0;
+
+ /* Set the kernel address for first table to be allocated */
+ lli_table_alloc_addr = (void *)(sep->shared_addr +
+ SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
+ dma_ctx->num_lli_tables_created * sizeof(struct sep_lli_entry) *
+ SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
+
+ if (data_size == 0) {
+ if (dmatables_region) {
+ error = sep_allocate_dmatables_region(sep,
+ dmatables_region,
+ dma_ctx,
+ 1);
+ if (error)
+ return error;
+ }
+ /* Special case - create meptu table - 2 entries, zero data */
+ sep_prepare_empty_lli_table(sep, lli_table_ptr,
+ num_entries_ptr, table_data_size_ptr,
+ dmatables_region, dma_ctx);
+ goto update_dcb_counter;
+ }
+
+ /* Check if the pages are in Kernel Virtual Address layout */
+ if (is_kva == true)
+ error = sep_lock_kernel_pages(sep, app_virt_addr,
+ data_size, &lli_array_ptr, SEP_DRIVER_IN_FLAG,
+ dma_ctx);
+ else
+ /*
+ * Lock the pages of the user buffer
+ * and translate them to pages
+ */
+ error = sep_lock_user_pages(sep, app_virt_addr,
+ data_size, &lli_array_ptr, SEP_DRIVER_IN_FLAG,
+ dma_ctx);
+
+ if (error)
+ goto end_function;
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] output sep_in_num_pages is (hex) %x\n",
+ current->pid,
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages);
+
+ current_entry = 0;
+ info_entry_ptr = NULL;
+
+ sep_lli_entries =
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages;
+
+ dma_lli_table_alloc_addr = lli_table_alloc_addr;
+ if (dmatables_region) {
+ error = sep_allocate_dmatables_region(sep,
+ dmatables_region,
+ dma_ctx,
+ sep_lli_entries);
+ if (error)
+ return error;
+ lli_table_alloc_addr = *dmatables_region;
+ }
+
+ /* Loop till all the entries in in array are processed */
+ while (current_entry < sep_lli_entries) {
+
+ /* Set the new input and output tables */
+ in_lli_table_ptr =
+ (struct sep_lli_entry *)lli_table_alloc_addr;
+ dma_in_lli_table_ptr =
+ (struct sep_lli_entry *)dma_lli_table_alloc_addr;
+
+ lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
+ SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
+ dma_lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
+ SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
+
+ if (dma_lli_table_alloc_addr >
+ ((void *)sep->shared_addr +
+ SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
+ SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES)) {
+
+ error = -ENOMEM;
+ goto end_function_error;
+
+ }
+
+ /* Update the number of created tables */
+ dma_ctx->num_lli_tables_created++;
+
+ /* Calculate the maximum size of data for input table */
+ table_data_size = sep_calculate_lli_table_max_size(sep,
+ &lli_array_ptr[current_entry],
+ (sep_lli_entries - current_entry),
+ &last_table_flag);
+
+ /*
+ * If this is not the last table -
+ * then allign it to the block size
+ */
+ if (!last_table_flag)
+ table_data_size =
+ (table_data_size / block_size) * block_size;
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] output table_data_size is (hex) %x\n",
+ current->pid,
+ table_data_size);
+
+ /* Construct input lli table */
+ sep_build_lli_table(sep, &lli_array_ptr[current_entry],
+ in_lli_table_ptr,
+ &current_entry, &num_entries_in_table, table_data_size);
+
+ if (info_entry_ptr == NULL) {
+
+ /* Set the output parameters to physical addresses */
+ *lli_table_ptr = sep_shared_area_virt_to_bus(sep,
+ dma_in_lli_table_ptr);
+ *num_entries_ptr = num_entries_in_table;
+ *table_data_size_ptr = table_data_size;
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] output lli_table_in_ptr is %08lx\n",
+ current->pid,
+ (unsigned long)*lli_table_ptr);
+
+ } else {
+ /* Update the info entry of the previous in table */
+ info_entry_ptr->bus_address =
+ sep_shared_area_virt_to_bus(sep,
+ dma_in_lli_table_ptr);
+ info_entry_ptr->block_size =
+ ((num_entries_in_table) << 24) |
+ (table_data_size);
+ }
+ /* Save the pointer to the info entry of the current tables */
+ info_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
+ }
+ /* Print input tables */
+ if (!dmatables_region) {
+ sep_debug_print_lli_tables(sep, (struct sep_lli_entry *)
+ sep_shared_area_bus_to_virt(sep, *lli_table_ptr),
+ *num_entries_ptr, *table_data_size_ptr);
+ }
+
+ /* The array of the pages */
+ kfree(lli_array_ptr);
+
+update_dcb_counter:
+ /* Update DCB counter */
+ dma_ctx->nr_dcb_creat++;
+ goto end_function;
+
+end_function_error:
+ /* Free all the allocated resources */
+ kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array);
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array = NULL;
+ kfree(lli_array_ptr);
+ kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array);
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL;
+
+end_function:
+ return error;
+
+}
+
+/**
+ * sep_construct_dma_tables_from_lli - prepare AES/DES mappings
+ * @sep: pointer to struct sep_device
+ * @lli_in_array:
+ * @sep_in_lli_entries:
+ * @lli_out_array:
+ * @sep_out_lli_entries
+ * @block_size
+ * @lli_table_in_ptr
+ * @lli_table_out_ptr
+ * @in_num_entries_ptr
+ * @out_num_entries_ptr
+ * @table_data_size_ptr
+ *
+ * This function creates the input and output DMA tables for
+ * symmetric operations (AES/DES) according to the block
+ * size from LLI arays
+ * Note that all bus addresses that are passed to the SEP
+ * are in 32 bit format; the SEP is a 32 bit device
+ */
+static int sep_construct_dma_tables_from_lli(
+ struct sep_device *sep,
+ struct sep_lli_entry *lli_in_array,
+ u32 sep_in_lli_entries,
+ struct sep_lli_entry *lli_out_array,
+ u32 sep_out_lli_entries,
+ u32 block_size,
+ dma_addr_t *lli_table_in_ptr,
+ dma_addr_t *lli_table_out_ptr,
+ u32 *in_num_entries_ptr,
+ u32 *out_num_entries_ptr,
+ u32 *table_data_size_ptr,
+ void **dmatables_region,
+ struct sep_dma_context *dma_ctx)
+{
+ /* Points to the area where next lli table can be allocated */
+ void *lli_table_alloc_addr = NULL;
+ /*
+ * Points to the area in shared region where next lli table
+ * can be allocated
+ */
+ void *dma_lli_table_alloc_addr = NULL;
+ /* Input lli table in dmatables_region or shared region */
+ struct sep_lli_entry *in_lli_table_ptr = NULL;
+ /* Input lli table location in the shared region */
+ struct sep_lli_entry *dma_in_lli_table_ptr = NULL;
+ /* Output lli table in dmatables_region or shared region */
+ struct sep_lli_entry *out_lli_table_ptr = NULL;
+ /* Output lli table location in the shared region */
+ struct sep_lli_entry *dma_out_lli_table_ptr = NULL;
+ /* Pointer to the info entry of the table - the last entry */
+ struct sep_lli_entry *info_in_entry_ptr = NULL;
+ /* Pointer to the info entry of the table - the last entry */
+ struct sep_lli_entry *info_out_entry_ptr = NULL;
+ /* Points to the first entry to be processed in the lli_in_array */
+ u32 current_in_entry = 0;
+ /* Points to the first entry to be processed in the lli_out_array */
+ u32 current_out_entry = 0;
+ /* Max size of the input table */
+ u32 in_table_data_size = 0;
+ /* Max size of the output table */
+ u32 out_table_data_size = 0;
+ /* Flag te signifies if this is the last tables build */
+ u32 last_table_flag = 0;
+ /* The data size that should be in table */
+ u32 table_data_size = 0;
+ /* Number of etnries in the input table */
+ u32 num_entries_in_table = 0;
+ /* Number of etnries in the output table */
+ u32 num_entries_out_table = 0;
+
+ if (!dma_ctx) {
+ dev_warn(&sep->pdev->dev, "DMA context uninitialized\n");
+ return -EINVAL;
+ }
+
+ /* Initiate to point after the message area */
+ lli_table_alloc_addr = (void *)(sep->shared_addr +
+ SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
+ (dma_ctx->num_lli_tables_created *
+ (sizeof(struct sep_lli_entry) *
+ SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP)));
+ dma_lli_table_alloc_addr = lli_table_alloc_addr;
+
+ if (dmatables_region) {
+ /* 2 for both in+out table */
+ if (sep_allocate_dmatables_region(sep,
+ dmatables_region,
+ dma_ctx,
+ 2*sep_in_lli_entries))
+ return -ENOMEM;
+ lli_table_alloc_addr = *dmatables_region;
+ }
+
+ /* Loop till all the entries in in array are not processed */
+ while (current_in_entry < sep_in_lli_entries) {
+ /* Set the new input and output tables */
+ in_lli_table_ptr =
+ (struct sep_lli_entry *)lli_table_alloc_addr;
+ dma_in_lli_table_ptr =
+ (struct sep_lli_entry *)dma_lli_table_alloc_addr;
+
+ lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
+ SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
+ dma_lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
+ SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
+
+ /* Set the first output tables */
+ out_lli_table_ptr =
+ (struct sep_lli_entry *)lli_table_alloc_addr;
+ dma_out_lli_table_ptr =
+ (struct sep_lli_entry *)dma_lli_table_alloc_addr;
+
+ /* Check if the DMA table area limit was overrun */
+ if ((dma_lli_table_alloc_addr + sizeof(struct sep_lli_entry) *
+ SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP) >
+ ((void *)sep->shared_addr +
+ SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
+ SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES)) {
+
+ dev_warn(&sep->pdev->dev, "dma table limit overrun\n");
+ return -ENOMEM;
+ }
+
+ /* Update the number of the lli tables created */
+ dma_ctx->num_lli_tables_created += 2;
+
+ lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
+ SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
+ dma_lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
+ SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
+
+ /* Calculate the maximum size of data for input table */
+ in_table_data_size =
+ sep_calculate_lli_table_max_size(sep,
+ &lli_in_array[current_in_entry],
+ (sep_in_lli_entries - current_in_entry),
+ &last_table_flag);
+
+ /* Calculate the maximum size of data for output table */
+ out_table_data_size =
+ sep_calculate_lli_table_max_size(sep,
+ &lli_out_array[current_out_entry],
+ (sep_out_lli_entries - current_out_entry),
+ &last_table_flag);
+
+ if (!last_table_flag) {
+ in_table_data_size = (in_table_data_size /
+ block_size) * block_size;
+ out_table_data_size = (out_table_data_size /
+ block_size) * block_size;
+ }
+
+ table_data_size = in_table_data_size;
+ if (table_data_size > out_table_data_size)
+ table_data_size = out_table_data_size;
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] construct tables from lli"
+ " in_table_data_size is (hex) %x\n", current->pid,
+ in_table_data_size);
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] construct tables from lli"
+ "out_table_data_size is (hex) %x\n", current->pid,
+ out_table_data_size);
+
+ /* Construct input lli table */
+ sep_build_lli_table(sep, &lli_in_array[current_in_entry],
+ in_lli_table_ptr,
+ &current_in_entry,
+ &num_entries_in_table,
+ table_data_size);
+
+ /* Construct output lli table */
+ sep_build_lli_table(sep, &lli_out_array[current_out_entry],
+ out_lli_table_ptr,
+ &current_out_entry,
+ &num_entries_out_table,
+ table_data_size);
+
+ /* If info entry is null - this is the first table built */
+ if (info_in_entry_ptr == NULL) {
+ /* Set the output parameters to physical addresses */
+ *lli_table_in_ptr =
+ sep_shared_area_virt_to_bus(sep, dma_in_lli_table_ptr);
+
+ *in_num_entries_ptr = num_entries_in_table;
+
+ *lli_table_out_ptr =
+ sep_shared_area_virt_to_bus(sep,
+ dma_out_lli_table_ptr);
+
+ *out_num_entries_ptr = num_entries_out_table;
+ *table_data_size_ptr = table_data_size;
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] output lli_table_in_ptr is %08lx\n",
+ current->pid,
+ (unsigned long)*lli_table_in_ptr);
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] output lli_table_out_ptr is %08lx\n",
+ current->pid,
+ (unsigned long)*lli_table_out_ptr);
+ } else {
+ /* Update the info entry of the previous in table */
+ info_in_entry_ptr->bus_address =
+ sep_shared_area_virt_to_bus(sep,
+ dma_in_lli_table_ptr);
+
+ info_in_entry_ptr->block_size =
+ ((num_entries_in_table) << 24) |
+ (table_data_size);
+
+ /* Update the info entry of the previous in table */
+ info_out_entry_ptr->bus_address =
+ sep_shared_area_virt_to_bus(sep,
+ dma_out_lli_table_ptr);
+
+ info_out_entry_ptr->block_size =
+ ((num_entries_out_table) << 24) |
+ (table_data_size);
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] output lli_table_in_ptr:%08lx %08x\n",
+ current->pid,
+ (unsigned long)info_in_entry_ptr->bus_address,
+ info_in_entry_ptr->block_size);
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] output lli_table_out_ptr:"
+ "%08lx %08x\n",
+ current->pid,
+ (unsigned long)info_out_entry_ptr->bus_address,
+ info_out_entry_ptr->block_size);
+ }
+
+ /* Save the pointer to the info entry of the current tables */
+ info_in_entry_ptr = in_lli_table_ptr +
+ num_entries_in_table - 1;
+ info_out_entry_ptr = out_lli_table_ptr +
+ num_entries_out_table - 1;
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] output num_entries_out_table is %x\n",
+ current->pid,
+ (u32)num_entries_out_table);
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] output info_in_entry_ptr is %lx\n",
+ current->pid,
+ (unsigned long)info_in_entry_ptr);
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] output info_out_entry_ptr is %lx\n",
+ current->pid,
+ (unsigned long)info_out_entry_ptr);
+ }
+
+ /* Print input tables */
+ if (!dmatables_region) {
+ sep_debug_print_lli_tables(
+ sep,
+ (struct sep_lli_entry *)
+ sep_shared_area_bus_to_virt(sep, *lli_table_in_ptr),
+ *in_num_entries_ptr,
+ *table_data_size_ptr);
+ }
+
+ /* Print output tables */
+ if (!dmatables_region) {
+ sep_debug_print_lli_tables(
+ sep,
+ (struct sep_lli_entry *)
+ sep_shared_area_bus_to_virt(sep, *lli_table_out_ptr),
+ *out_num_entries_ptr,
+ *table_data_size_ptr);
+ }
+
+ return 0;
+}
+
+/**
+ * sep_prepare_input_output_dma_table - prepare DMA I/O table
+ * @app_virt_in_addr:
+ * @app_virt_out_addr:
+ * @data_size:
+ * @block_size:
+ * @lli_table_in_ptr:
+ * @lli_table_out_ptr:
+ * @in_num_entries_ptr:
+ * @out_num_entries_ptr:
+ * @table_data_size_ptr:
+ * @is_kva: set for kernel data; used only for kernel crypto module
+ *
+ * This function builds input and output DMA tables for synhronic
+ * symmetric operations (AES, DES, HASH). It also checks that each table
+ * is of the modular block size
+ * Note that all bus addresses that are passed to the SEP
+ * are in 32 bit format; the SEP is a 32 bit device
+ */
+static int sep_prepare_input_output_dma_table(struct sep_device *sep,
+ unsigned long app_virt_in_addr,
+ unsigned long app_virt_out_addr,
+ u32 data_size,
+ u32 block_size,
+ dma_addr_t *lli_table_in_ptr,
+ dma_addr_t *lli_table_out_ptr,
+ u32 *in_num_entries_ptr,
+ u32 *out_num_entries_ptr,
+ u32 *table_data_size_ptr,
+ bool is_kva,
+ void **dmatables_region,
+ struct sep_dma_context *dma_ctx)
+
+{
+ int error = 0;
+ /* Array of pointers of page */
+ struct sep_lli_entry *lli_in_array;
+ /* Array of pointers of page */
+ struct sep_lli_entry *lli_out_array;
+
+ if (!dma_ctx) {
+ error = -EINVAL;
+ goto end_function;
+ }
+
+ if (data_size == 0) {
+ /* Prepare empty table for input and output */
+ if (dmatables_region) {
+ error = sep_allocate_dmatables_region(
+ sep,
+ dmatables_region,
+ dma_ctx,
+ 2);
+ if (error)
+ goto end_function;
+ }
+ sep_prepare_empty_lli_table(sep, lli_table_in_ptr,
+ in_num_entries_ptr, table_data_size_ptr,
+ dmatables_region, dma_ctx);
+
+ sep_prepare_empty_lli_table(sep, lli_table_out_ptr,
+ out_num_entries_ptr, table_data_size_ptr,
+ dmatables_region, dma_ctx);
+
+ goto update_dcb_counter;
+ }
+
+ /* Initialize the pages pointers */
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array = NULL;
+
+ /* Lock the pages of the buffer and translate them to pages */
+ if (is_kva == true) {
+ dev_dbg(&sep->pdev->dev, "[PID%d] Locking kernel input pages\n",
+ current->pid);
+ error = sep_lock_kernel_pages(sep, app_virt_in_addr,
+ data_size, &lli_in_array, SEP_DRIVER_IN_FLAG,
+ dma_ctx);
+ if (error) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] sep_lock_kernel_pages for input "
+ "virtual buffer failed\n", current->pid);
+
+ goto end_function;
+ }
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] Locking kernel output pages\n",
+ current->pid);
+ error = sep_lock_kernel_pages(sep, app_virt_out_addr,
+ data_size, &lli_out_array, SEP_DRIVER_OUT_FLAG,
+ dma_ctx);
+
+ if (error) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] sep_lock_kernel_pages for output "
+ "virtual buffer failed\n", current->pid);
+
+ goto end_function_free_lli_in;
+ }
+
+ }
+
+ else {
+ dev_dbg(&sep->pdev->dev, "[PID%d] Locking user input pages\n",
+ current->pid);
+ error = sep_lock_user_pages(sep, app_virt_in_addr,
+ data_size, &lli_in_array, SEP_DRIVER_IN_FLAG,
+ dma_ctx);
+ if (error) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] sep_lock_user_pages for input "
+ "virtual buffer failed\n", current->pid);
+
+ goto end_function;
+ }
+
+ if (dma_ctx->secure_dma == true) {
+ /* secure_dma requires use of non accessible memory */
+ dev_dbg(&sep->pdev->dev, "[PID%d] in secure_dma\n",
+ current->pid);
+ error = sep_lli_table_secure_dma(sep,
+ app_virt_out_addr, data_size, &lli_out_array,
+ SEP_DRIVER_OUT_FLAG, dma_ctx);
+ if (error) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] secure dma table setup "
+ " for output virtual buffer failed\n",
+ current->pid);
+
+ goto end_function_free_lli_in;
+ }
+ } else {
+ /* For normal, non-secure dma */
+ dev_dbg(&sep->pdev->dev, "[PID%d] not in secure_dma\n",
+ current->pid);
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] Locking user output pages\n",
+ current->pid);
+
+ error = sep_lock_user_pages(sep, app_virt_out_addr,
+ data_size, &lli_out_array, SEP_DRIVER_OUT_FLAG,
+ dma_ctx);
+
+ if (error) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] sep_lock_user_pages"
+ " for output virtual buffer failed\n",
+ current->pid);
+
+ goto end_function_free_lli_in;
+ }
+ }
+ }
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] After lock; prep input output dma "
+ "table sep_in_num_pages is (hex) %x\n", current->pid,
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages);
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] sep_out_num_pages is (hex) %x\n",
+ current->pid,
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages);
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP"
+ " is (hex) %x\n", current->pid,
+ SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
+
+ /* Call the fucntion that creates table from the lli arrays */
+ dev_dbg(&sep->pdev->dev, "[PID%d] calling create table from lli\n",
+ current->pid);
+ error = sep_construct_dma_tables_from_lli(
+ sep, lli_in_array,
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].
+ in_num_pages,
+ lli_out_array,
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].
+ out_num_pages,
+ block_size, lli_table_in_ptr, lli_table_out_ptr,
+ in_num_entries_ptr, out_num_entries_ptr,
+ table_data_size_ptr, dmatables_region, dma_ctx);
+
+ if (error) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] sep_construct_dma_tables_from_lli failed\n",
+ current->pid);
+ goto end_function_with_error;
+ }
+
+ kfree(lli_out_array);
+ kfree(lli_in_array);
+
+update_dcb_counter:
+ /* Update DCB counter */
+ dma_ctx->nr_dcb_creat++;
+
+ goto end_function;
+
+end_function_with_error:
+ kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array);
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array = NULL;
+ kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array);
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array = NULL;
+ kfree(lli_out_array);
+
+
+end_function_free_lli_in:
+ kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array);
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array = NULL;
+ kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array);
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL;
+ kfree(lli_in_array);
+
+end_function:
+
+ return error;
+
+}
+
+/**
+ * sep_prepare_input_output_dma_table_in_dcb - prepare control blocks
+ * @app_in_address: unsigned long; for data buffer in (user space)
+ * @app_out_address: unsigned long; for data buffer out (user space)
+ * @data_in_size: u32; for size of data
+ * @block_size: u32; for block size
+ * @tail_block_size: u32; for size of tail block
+ * @isapplet: bool; to indicate external app
+ * @is_kva: bool; kernel buffer; only used for kernel crypto module
+ * @secure_dma; indicates whether this is secure_dma using IMR
+ *
+ * This function prepares the linked DMA tables and puts the
+ * address for the linked list of tables inta a DCB (data control
+ * block) the address of which is known by the SEP hardware
+ * Note that all bus addresses that are passed to the SEP
+ * are in 32 bit format; the SEP is a 32 bit device
+ */
+int sep_prepare_input_output_dma_table_in_dcb(struct sep_device *sep,
+ unsigned long app_in_address,
+ unsigned long app_out_address,
+ u32 data_in_size,
+ u32 block_size,
+ u32 tail_block_size,
+ bool isapplet,
+ bool is_kva,
+ bool secure_dma,
+ struct sep_dcblock *dcb_region,
+ void **dmatables_region,
+ struct sep_dma_context **dma_ctx,
+ struct scatterlist *src_sg,
+ struct scatterlist *dst_sg)
+{
+ int error = 0;
+ /* Size of tail */
+ u32 tail_size = 0;
+ /* Address of the created DCB table */
+ struct sep_dcblock *dcb_table_ptr = NULL;
+ /* The physical address of the first input DMA table */
+ dma_addr_t in_first_mlli_address = 0;
+ /* Number of entries in the first input DMA table */
+ u32 in_first_num_entries = 0;
+ /* The physical address of the first output DMA table */
+ dma_addr_t out_first_mlli_address = 0;
+ /* Number of entries in the first output DMA table */
+ u32 out_first_num_entries = 0;
+ /* Data in the first input/output table */
+ u32 first_data_size = 0;
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] app_in_address %lx\n",
+ current->pid, app_in_address);
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] app_out_address %lx\n",
+ current->pid, app_out_address);
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] data_in_size %x\n",
+ current->pid, data_in_size);
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] block_size %x\n",
+ current->pid, block_size);
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] tail_block_size %x\n",
+ current->pid, tail_block_size);
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] isapplet %x\n",
+ current->pid, isapplet);
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] is_kva %x\n",
+ current->pid, is_kva);
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] src_sg %p\n",
+ current->pid, src_sg);
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] dst_sg %p\n",
+ current->pid, dst_sg);
+
+ if (!dma_ctx) {
+ dev_warn(&sep->pdev->dev, "[PID%d] no DMA context pointer\n",
+ current->pid);
+ error = -EINVAL;
+ goto end_function;
+ }
+
+ if (*dma_ctx) {
+ /* In case there are multiple DCBs for this transaction */
+ dev_dbg(&sep->pdev->dev, "[PID%d] DMA context already set\n",
+ current->pid);
+ } else {
+ *dma_ctx = kzalloc(sizeof(**dma_ctx), GFP_KERNEL);
+ if (!(*dma_ctx)) {
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] Not enough memory for DMA context\n",
+ current->pid);
+ error = -ENOMEM;
+ goto end_function;
+ }
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] Created DMA context addr at 0x%p\n",
+ current->pid, *dma_ctx);
+ }
+
+ (*dma_ctx)->secure_dma = secure_dma;
+
+ /* these are for kernel crypto only */
+ (*dma_ctx)->src_sg = src_sg;
+ (*dma_ctx)->dst_sg = dst_sg;
+
+ if ((*dma_ctx)->nr_dcb_creat == SEP_MAX_NUM_SYNC_DMA_OPS) {
+ /* No more DCBs to allocate */
+ dev_dbg(&sep->pdev->dev, "[PID%d] no more DCBs available\n",
+ current->pid);
+ error = -ENOSPC;
+ goto end_function_error;
+ }
+
+ /* Allocate new DCB */
+ if (dcb_region) {
+ dcb_table_ptr = dcb_region;
+ } else {
+ dcb_table_ptr = (struct sep_dcblock *)(sep->shared_addr +
+ SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES +
+ ((*dma_ctx)->nr_dcb_creat *
+ sizeof(struct sep_dcblock)));
+ }
+
+ /* Set the default values in the DCB */
+ dcb_table_ptr->input_mlli_address = 0;
+ dcb_table_ptr->input_mlli_num_entries = 0;
+ dcb_table_ptr->input_mlli_data_size = 0;
+ dcb_table_ptr->output_mlli_address = 0;
+ dcb_table_ptr->output_mlli_num_entries = 0;
+ dcb_table_ptr->output_mlli_data_size = 0;
+ dcb_table_ptr->tail_data_size = 0;
+ dcb_table_ptr->out_vr_tail_pt = 0;
+
+ if (isapplet == true) {
+
+ /* Check if there is enough data for DMA operation */
+ if (data_in_size < SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE) {
+ if (is_kva == true) {
+ error = -ENODEV;
+ goto end_function_error;
+ } else {
+ if (copy_from_user(dcb_table_ptr->tail_data,
+ (void __user *)app_in_address,
+ data_in_size)) {
+ error = -EFAULT;
+ goto end_function_error;
+ }
+ }
+
+ dcb_table_ptr->tail_data_size = data_in_size;
+
+ /* Set the output user-space address for mem2mem op */
+ if (app_out_address)
+ dcb_table_ptr->out_vr_tail_pt =
+ (aligned_u64)app_out_address;
+
+ /*
+ * Update both data length parameters in order to avoid
+ * second data copy and allow building of empty mlli
+ * tables
+ */
+ tail_size = 0x0;
+ data_in_size = 0x0;
+
+ } else {
+ if (!app_out_address) {
+ tail_size = data_in_size % block_size;
+ if (!tail_size) {
+ if (tail_block_size == block_size)
+ tail_size = block_size;
+ }
+ } else {
+ tail_size = 0;
+ }
+ }
+ if (tail_size) {
+ if (tail_size > sizeof(dcb_table_ptr->tail_data))
+ return -EINVAL;
+ if (is_kva == true) {
+ error = -ENODEV;
+ goto end_function_error;
+ } else {
+ /* We have tail data - copy it to DCB */
+ if (copy_from_user(dcb_table_ptr->tail_data,
+ (void __user *)(app_in_address +
+ data_in_size - tail_size), tail_size)) {
+ error = -EFAULT;
+ goto end_function_error;
+ }
+ }
+ if (app_out_address)
+ /*
+ * Calculate the output address
+ * according to tail data size
+ */
+ dcb_table_ptr->out_vr_tail_pt =
+ (aligned_u64)app_out_address +
+ data_in_size - tail_size;
+
+ /* Save the real tail data size */
+ dcb_table_ptr->tail_data_size = tail_size;
+ /*
+ * Update the data size without the tail
+ * data size AKA data for the dma
+ */
+ data_in_size = (data_in_size - tail_size);
+ }
+ }
+ /* Check if we need to build only input table or input/output */
+ if (app_out_address) {
+ /* Prepare input/output tables */
+ error = sep_prepare_input_output_dma_table(sep,
+ app_in_address,
+ app_out_address,
+ data_in_size,
+ block_size,
+ &in_first_mlli_address,
+ &out_first_mlli_address,
+ &in_first_num_entries,
+ &out_first_num_entries,
+ &first_data_size,
+ is_kva,
+ dmatables_region,
+ *dma_ctx);
+ } else {
+ /* Prepare input tables */
+ error = sep_prepare_input_dma_table(sep,
+ app_in_address,
+ data_in_size,
+ block_size,
+ &in_first_mlli_address,
+ &in_first_num_entries,
+ &first_data_size,
+ is_kva,
+ dmatables_region,
+ *dma_ctx);
+ }
+
+ if (error) {
+ dev_warn(&sep->pdev->dev,
+ "prepare DMA table call failed "
+ "from prepare DCB call\n");
+ goto end_function_error;
+ }
+
+ /* Set the DCB values */
+ dcb_table_ptr->input_mlli_address = in_first_mlli_address;
+ dcb_table_ptr->input_mlli_num_entries = in_first_num_entries;
+ dcb_table_ptr->input_mlli_data_size = first_data_size;
+ dcb_table_ptr->output_mlli_address = out_first_mlli_address;
+ dcb_table_ptr->output_mlli_num_entries = out_first_num_entries;
+ dcb_table_ptr->output_mlli_data_size = first_data_size;
+
+ goto end_function;
+
+end_function_error:
+ kfree(*dma_ctx);
+ *dma_ctx = NULL;
+
+end_function:
+ return error;
+
+}
+
+
+/**
+ * sep_free_dma_tables_and_dcb - free DMA tables and DCBs
+ * @sep: pointer to struct sep_device
+ * @isapplet: indicates external application (used for kernel access)
+ * @is_kva: indicates kernel addresses (only used for kernel crypto)
+ *
+ * This function frees the DMA tables and DCB
+ */
+static int sep_free_dma_tables_and_dcb(struct sep_device *sep, bool isapplet,
+ bool is_kva, struct sep_dma_context **dma_ctx)
+{
+ struct sep_dcblock *dcb_table_ptr;
+ unsigned long pt_hold;
+ void *tail_pt;
+
+ int i = 0;
+ int error = 0;
+ int error_temp = 0;
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] sep_free_dma_tables_and_dcb\n",
+ current->pid);
+
+ if (((*dma_ctx)->secure_dma == false) && (isapplet == true)) {
+ dev_dbg(&sep->pdev->dev, "[PID%d] handling applet\n",
+ current->pid);
+
+ /* Tail stuff is only for non secure_dma */
+ /* Set pointer to first DCB table */
+ dcb_table_ptr = (struct sep_dcblock *)
+ (sep->shared_addr +
+ SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES);
+
+ /**
+ * Go over each DCB and see if
+ * tail pointer must be updated
+ */
+ for (i = 0; dma_ctx && *dma_ctx &&
+ i < (*dma_ctx)->nr_dcb_creat; i++, dcb_table_ptr++) {
+ if (dcb_table_ptr->out_vr_tail_pt) {
+ pt_hold = (unsigned long)dcb_table_ptr->
+ out_vr_tail_pt;
+ tail_pt = (void *)pt_hold;
+ if (is_kva == true) {
+ error = -ENODEV;
+ break;
+ } else {
+ error_temp = copy_to_user(
+ (void __user *)tail_pt,
+ dcb_table_ptr->tail_data,
+ dcb_table_ptr->tail_data_size);
+ }
+ if (error_temp) {
+ /* Release the DMA resource */
+ error = -EFAULT;
+ break;
+ }
+ }
+ }
+ }
+
+ /* Free the output pages, if any */
+ sep_free_dma_table_data_handler(sep, dma_ctx);
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] sep_free_dma_tables_and_dcb end\n",
+ current->pid);
+
+ return error;
+}
+
+/**
+ * sep_prepare_dcb_handler - prepare a control block
+ * @sep: pointer to struct sep_device
+ * @arg: pointer to user parameters
+ * @secure_dma: indicate whether we are using secure_dma on IMR
+ *
+ * This function will retrieve the RAR buffer physical addresses, type
+ * & size corresponding to the RAR handles provided in the buffers vector.
+ */
+static int sep_prepare_dcb_handler(struct sep_device *sep, unsigned long arg,
+ bool secure_dma,
+ struct sep_dma_context **dma_ctx)
+{
+ int error;
+ /* Command arguments */
+ static struct build_dcb_struct command_args;
+
+ /* Get the command arguments */
+ if (copy_from_user(&command_args, (void __user *)arg,
+ sizeof(struct build_dcb_struct))) {
+ error = -EFAULT;
+ goto end_function;
+ }
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] prep dcb handler app_in_address is %08llx\n",
+ current->pid, command_args.app_in_address);
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] app_out_address is %08llx\n",
+ current->pid, command_args.app_out_address);
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] data_size is %x\n",
+ current->pid, command_args.data_in_size);
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] block_size is %x\n",
+ current->pid, command_args.block_size);
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] tail block_size is %x\n",
+ current->pid, command_args.tail_block_size);
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] is_applet is %x\n",
+ current->pid, command_args.is_applet);
+
+ if (!command_args.app_in_address) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] null app_in_address\n", current->pid);
+ error = -EINVAL;
+ goto end_function;
+ }
+
+ error = sep_prepare_input_output_dma_table_in_dcb(sep,
+ (unsigned long)command_args.app_in_address,
+ (unsigned long)command_args.app_out_address,
+ command_args.data_in_size, command_args.block_size,
+ command_args.tail_block_size,
+ command_args.is_applet, false,
+ secure_dma, NULL, NULL, dma_ctx, NULL, NULL);
+
+end_function:
+ return error;
+
+}
+
+/**
+ * sep_free_dcb_handler - free control block resources
+ * @sep: pointer to struct sep_device
+ *
+ * This function frees the DCB resources and updates the needed
+ * user-space buffers.
+ */
+static int sep_free_dcb_handler(struct sep_device *sep,
+ struct sep_dma_context **dma_ctx)
+{
+ if (!dma_ctx || !(*dma_ctx)) {
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] no dma context defined, nothing to free\n",
+ current->pid);
+ return -EINVAL;
+ }
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] free dcbs num of DCBs %x\n",
+ current->pid,
+ (*dma_ctx)->nr_dcb_creat);
+
+ return sep_free_dma_tables_and_dcb(sep, false, false, dma_ctx);
+}
+
+/**
+ * sep_ioctl - ioctl handler for sep device
+ * @filp: pointer to struct file
+ * @cmd: command
+ * @arg: pointer to argument structure
+ *
+ * Implement the ioctl methods availble on the SEP device.
+ */
+static long sep_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ struct sep_private_data * const private_data = filp->private_data;
+ struct sep_call_status *call_status = &private_data->call_status;
+ struct sep_device *sep = private_data->device;
+ struct sep_dma_context **dma_ctx = &private_data->dma_ctx;
+ struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem;
+ int error = 0;
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] ioctl cmd 0x%x\n",
+ current->pid, cmd);
+ dev_dbg(&sep->pdev->dev, "[PID%d] dma context addr 0x%p\n",
+ current->pid, *dma_ctx);
+
+ /* Make sure we own this device */
+ error = sep_check_transaction_owner(sep);
+ if (error) {
+ dev_dbg(&sep->pdev->dev, "[PID%d] ioctl pid is not owner\n",
+ current->pid);
+ goto end_function;
+ }
+
+ /* Check that sep_mmap has been called before */
+ if (0 == test_bit(SEP_LEGACY_MMAP_DONE_OFFSET,
+ &call_status->status)) {
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] mmap not called\n", current->pid);
+ error = -EPROTO;
+ goto end_function;
+ }
+
+ /* Check that the command is for SEP device */
+ if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER) {
+ error = -ENOTTY;
+ goto end_function;
+ }
+
+ switch (cmd) {
+ case SEP_IOCSENDSEPCOMMAND:
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] SEP_IOCSENDSEPCOMMAND start\n",
+ current->pid);
+ if (1 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
+ &call_status->status)) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] send msg already done\n",
+ current->pid);
+ error = -EPROTO;
+ goto end_function;
+ }
+ /* Send command to SEP */
+ error = sep_send_command_handler(sep);
+ if (!error)
+ set_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
+ &call_status->status);
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] SEP_IOCSENDSEPCOMMAND end\n",
+ current->pid);
+ break;
+ case SEP_IOCENDTRANSACTION:
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] SEP_IOCENDTRANSACTION start\n",
+ current->pid);
+ error = sep_end_transaction_handler(sep, dma_ctx, call_status,
+ my_queue_elem);
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] SEP_IOCENDTRANSACTION end\n",
+ current->pid);
+ break;
+ case SEP_IOCPREPAREDCB:
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] SEP_IOCPREPAREDCB start\n",
+ current->pid);
+ case SEP_IOCPREPAREDCB_SECURE_DMA:
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] SEP_IOCPREPAREDCB_SECURE_DMA start\n",
+ current->pid);
+ if (1 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
+ &call_status->status)) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] dcb prep needed before send msg\n",
+ current->pid);
+ error = -EPROTO;
+ goto end_function;
+ }
+
+ if (!arg) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] dcb null arg\n", current->pid);
+ error = EINVAL;
+ goto end_function;
+ }
+
+ if (cmd == SEP_IOCPREPAREDCB) {
+ /* No secure dma */
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] SEP_IOCPREPAREDCB (no secure_dma)\n",
+ current->pid);
+
+ error = sep_prepare_dcb_handler(sep, arg, false,
+ dma_ctx);
+ } else {
+ /* Secure dma */
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] SEP_IOC_POC (with secure_dma)\n",
+ current->pid);
+
+ error = sep_prepare_dcb_handler(sep, arg, true,
+ dma_ctx);
+ }
+ dev_dbg(&sep->pdev->dev, "[PID%d] dcb's end\n",
+ current->pid);
+ break;
+ case SEP_IOCFREEDCB:
+ dev_dbg(&sep->pdev->dev, "[PID%d] SEP_IOCFREEDCB start\n",
+ current->pid);
+ case SEP_IOCFREEDCB_SECURE_DMA:
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] SEP_IOCFREEDCB_SECURE_DMA start\n",
+ current->pid);
+ error = sep_free_dcb_handler(sep, dma_ctx);
+ dev_dbg(&sep->pdev->dev, "[PID%d] SEP_IOCFREEDCB end\n",
+ current->pid);
+ break;
+ default:
+ error = -ENOTTY;
+ dev_dbg(&sep->pdev->dev, "[PID%d] default end\n",
+ current->pid);
+ break;
+ }
+
+end_function:
+ dev_dbg(&sep->pdev->dev, "[PID%d] ioctl end\n", current->pid);
+
+ return error;
+}
+
+/**
+ * sep_inthandler - interrupt handler for sep device
+ * @irq: interrupt
+ * @dev_id: device id
+ */
+static irqreturn_t sep_inthandler(int irq, void *dev_id)
+{
+ unsigned long lock_irq_flag;
+ u32 reg_val, reg_val2 = 0;
+ struct sep_device *sep = dev_id;
+ irqreturn_t int_error = IRQ_HANDLED;
+
+ /* Are we in power save? */
+#if defined(CONFIG_PM_RUNTIME) && defined(SEP_ENABLE_RUNTIME_PM)
+ if (sep->pdev->dev.power.runtime_status != RPM_ACTIVE) {
+ dev_dbg(&sep->pdev->dev, "interrupt during pwr save\n");
+ return IRQ_NONE;
+ }
+#endif
+
+ if (test_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags) == 0) {
+ dev_dbg(&sep->pdev->dev, "interrupt while nobody using sep\n");
+ return IRQ_NONE;
+ }
+
+ /* Read the IRR register to check if this is SEP interrupt */
+ reg_val = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
+
+ dev_dbg(&sep->pdev->dev, "sep int: IRR REG val: %x\n", reg_val);
+
+ if (reg_val & (0x1 << 13)) {
+
+ /* Lock and update the counter of reply messages */
+ spin_lock_irqsave(&sep->snd_rply_lck, lock_irq_flag);
+ sep->reply_ct++;
+ spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag);
+
+ dev_dbg(&sep->pdev->dev, "sep int: send_ct %lx reply_ct %lx\n",
+ sep->send_ct, sep->reply_ct);
+
+ /* Is this a kernel client request */
+ if (sep->in_kernel) {
+ tasklet_schedule(&sep->finish_tasklet);
+ goto finished_interrupt;
+ }
+
+ /* Is this printf or daemon request? */
+ reg_val2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
+ dev_dbg(&sep->pdev->dev,
+ "SEP Interrupt - GPR2 is %08x\n", reg_val2);
+
+ clear_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags);
+
+ if ((reg_val2 >> 30) & 0x1) {
+ dev_dbg(&sep->pdev->dev, "int: printf request\n");
+ } else if (reg_val2 >> 31) {
+ dev_dbg(&sep->pdev->dev, "int: daemon request\n");
+ } else {
+ dev_dbg(&sep->pdev->dev, "int: SEP reply\n");
+ wake_up(&sep->event_interrupt);
+ }
+ } else {
+ dev_dbg(&sep->pdev->dev, "int: not SEP interrupt\n");
+ int_error = IRQ_NONE;
+ }
+
+finished_interrupt:
+
+ if (int_error == IRQ_HANDLED)
+ sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, reg_val);
+
+ return int_error;
+}
+
+/**
+ * sep_reconfig_shared_area - reconfigure shared area
+ * @sep: pointer to struct sep_device
+ *
+ * Reconfig the shared area between HOST and SEP - needed in case
+ * the DX_CC_Init function was called before OS loading.
+ */
+static int sep_reconfig_shared_area(struct sep_device *sep)
+{
+ int ret_val;
+
+ /* use to limit waiting for SEP */
+ unsigned long end_time;
+
+ /* Send the new SHARED MESSAGE AREA to the SEP */
+ dev_dbg(&sep->pdev->dev, "reconfig shared; sending %08llx to sep\n",
+ (unsigned long long)sep->shared_bus);
+
+ sep_write_reg(sep, HW_HOST_HOST_SEP_GPR1_REG_ADDR, sep->shared_bus);
+
+ /* Poll for SEP response */
+ ret_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
+
+ end_time = jiffies + (WAIT_TIME * HZ);
+
+ while ((time_before(jiffies, end_time)) && (ret_val != 0xffffffff) &&
+ (ret_val != sep->shared_bus))
+ ret_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
+
+ /* Check the return value (register) */
+ if (ret_val != sep->shared_bus) {
+ dev_warn(&sep->pdev->dev, "could not reconfig shared area\n");
+ dev_warn(&sep->pdev->dev, "result was %x\n", ret_val);
+ ret_val = -ENOMEM;
+ } else
+ ret_val = 0;
+
+ dev_dbg(&sep->pdev->dev, "reconfig shared area end\n");
+
+ return ret_val;
+}
+
+/**
+ * sep_activate_dcb_dmatables_context - Takes DCB & DMA tables
+ * contexts into use
+ * @sep: SEP device
+ * @dcb_region: DCB region copy
+ * @dmatables_region: MLLI/DMA tables copy
+ * @dma_ctx: DMA context for current transaction
+ */
+ssize_t sep_activate_dcb_dmatables_context(struct sep_device *sep,
+ struct sep_dcblock **dcb_region,
+ void **dmatables_region,
+ struct sep_dma_context *dma_ctx)
+{
+ void *dmaregion_free_start = NULL;
+ void *dmaregion_free_end = NULL;
+ void *dcbregion_free_start = NULL;
+ void *dcbregion_free_end = NULL;
+ ssize_t error = 0;
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] activating dcb/dma region\n",
+ current->pid);
+
+ if (1 > dma_ctx->nr_dcb_creat) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] invalid number of dcbs to activate 0x%08X\n",
+ current->pid, dma_ctx->nr_dcb_creat);
+ error = -EINVAL;
+ goto end_function;
+ }
+
+ dmaregion_free_start = sep->shared_addr
+ + SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES;
+ dmaregion_free_end = dmaregion_free_start
+ + SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES - 1;
+
+ if (dmaregion_free_start
+ + dma_ctx->dmatables_len > dmaregion_free_end) {
+ error = -ENOMEM;
+ goto end_function;
+ }
+ memcpy(dmaregion_free_start,
+ *dmatables_region,
+ dma_ctx->dmatables_len);
+ /* Free MLLI table copy */
+ kfree(*dmatables_region);
+ *dmatables_region = NULL;
+
+ /* Copy thread's DCB table copy to DCB table region */
+ dcbregion_free_start = sep->shared_addr +
+ SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES;
+ dcbregion_free_end = dcbregion_free_start +
+ (SEP_MAX_NUM_SYNC_DMA_OPS *
+ sizeof(struct sep_dcblock)) - 1;
+
+ if (dcbregion_free_start
+ + (dma_ctx->nr_dcb_creat * sizeof(struct sep_dcblock))
+ > dcbregion_free_end) {
+ error = -ENOMEM;
+ goto end_function;
+ }
+
+ memcpy(dcbregion_free_start,
+ *dcb_region,
+ dma_ctx->nr_dcb_creat * sizeof(struct sep_dcblock));
+
+ /* Print the tables */
+ dev_dbg(&sep->pdev->dev, "activate: input table\n");
+ sep_debug_print_lli_tables(sep,
+ (struct sep_lli_entry *)sep_shared_area_bus_to_virt(sep,
+ (*dcb_region)->input_mlli_address),
+ (*dcb_region)->input_mlli_num_entries,
+ (*dcb_region)->input_mlli_data_size);
+
+ dev_dbg(&sep->pdev->dev, "activate: output table\n");
+ sep_debug_print_lli_tables(sep,
+ (struct sep_lli_entry *)sep_shared_area_bus_to_virt(sep,
+ (*dcb_region)->output_mlli_address),
+ (*dcb_region)->output_mlli_num_entries,
+ (*dcb_region)->output_mlli_data_size);
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] printing activated tables\n", current->pid);
+
+end_function:
+ kfree(*dmatables_region);
+ *dmatables_region = NULL;
+
+ kfree(*dcb_region);
+ *dcb_region = NULL;
+
+ return error;
+}
+
+/**
+ * sep_create_dcb_dmatables_context - Creates DCB & MLLI/DMA table context
+ * @sep: SEP device
+ * @dcb_region: DCB region buf to create for current transaction
+ * @dmatables_region: MLLI/DMA tables buf to create for current transaction
+ * @dma_ctx: DMA context buf to create for current transaction
+ * @user_dcb_args: User arguments for DCB/MLLI creation
+ * @num_dcbs: Number of DCBs to create
+ * @secure_dma: Indicate use of IMR restricted memory secure dma
+ */
+static ssize_t sep_create_dcb_dmatables_context(struct sep_device *sep,
+ struct sep_dcblock **dcb_region,
+ void **dmatables_region,
+ struct sep_dma_context **dma_ctx,
+ const struct build_dcb_struct __user *user_dcb_args,
+ const u32 num_dcbs, bool secure_dma)
+{
+ int error = 0;
+ int i = 0;
+ struct build_dcb_struct *dcb_args = NULL;
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] creating dcb/dma region\n",
+ current->pid);
+
+ if (!dcb_region || !dma_ctx || !dmatables_region || !user_dcb_args) {
+ error = -EINVAL;
+ goto end_function;
+ }
+
+ if (SEP_MAX_NUM_SYNC_DMA_OPS < num_dcbs) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] invalid number of dcbs 0x%08X\n",
+ current->pid, num_dcbs);
+ error = -EINVAL;
+ goto end_function;
+ }
+
+ dcb_args = kzalloc(num_dcbs * sizeof(struct build_dcb_struct),
+ GFP_KERNEL);
+ if (!dcb_args) {
+ dev_warn(&sep->pdev->dev, "[PID%d] no memory for dcb args\n",
+ current->pid);
+ error = -ENOMEM;
+ goto end_function;
+ }
+
+ if (copy_from_user(dcb_args,
+ user_dcb_args,
+ num_dcbs * sizeof(struct build_dcb_struct))) {
+ error = -EINVAL;
+ goto end_function;
+ }
+
+ /* Allocate thread-specific memory for DCB */
+ *dcb_region = kzalloc(num_dcbs * sizeof(struct sep_dcblock),
+ GFP_KERNEL);
+ if (!(*dcb_region)) {
+ error = -ENOMEM;
+ goto end_function;
+ }
+
+ /* Prepare DCB and MLLI table into the allocated regions */
+ for (i = 0; i < num_dcbs; i++) {
+ error = sep_prepare_input_output_dma_table_in_dcb(sep,
+ (unsigned long)dcb_args[i].app_in_address,
+ (unsigned long)dcb_args[i].app_out_address,
+ dcb_args[i].data_in_size,
+ dcb_args[i].block_size,
+ dcb_args[i].tail_block_size,
+ dcb_args[i].is_applet,
+ false, secure_dma,
+ *dcb_region, dmatables_region,
+ dma_ctx,
+ NULL,
+ NULL);
+ if (error) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] dma table creation failed\n",
+ current->pid);
+ goto end_function;
+ }
+
+ if (dcb_args[i].app_in_address != 0)
+ (*dma_ctx)->input_data_len += dcb_args[i].data_in_size;
+ }
+
+end_function:
+ kfree(dcb_args);
+ return error;
+
+}
+
+/**
+ * sep_create_dcb_dmatables_context_kernel - Creates DCB & MLLI/DMA table context
+ * for kernel crypto
+ * @sep: SEP device
+ * @dcb_region: DCB region buf to create for current transaction
+ * @dmatables_region: MLLI/DMA tables buf to create for current transaction
+ * @dma_ctx: DMA context buf to create for current transaction
+ * @user_dcb_args: User arguments for DCB/MLLI creation
+ * @num_dcbs: Number of DCBs to create
+ * This does that same thing as sep_create_dcb_dmatables_context
+ * except that it is used only for the kernel crypto operation. It is
+ * separate because there is no user data involved; the dcb data structure
+ * is specific for kernel crypto (build_dcb_struct_kernel)
+ */
+int sep_create_dcb_dmatables_context_kernel(struct sep_device *sep,
+ struct sep_dcblock **dcb_region,
+ void **dmatables_region,
+ struct sep_dma_context **dma_ctx,
+ const struct build_dcb_struct_kernel *dcb_data,
+ const u32 num_dcbs)
+{
+ int error = 0;
+ int i = 0;
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] creating dcb/dma region\n",
+ current->pid);
+
+ if (!dcb_region || !dma_ctx || !dmatables_region || !dcb_data) {
+ error = -EINVAL;
+ goto end_function;
+ }
+
+ if (SEP_MAX_NUM_SYNC_DMA_OPS < num_dcbs) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] invalid number of dcbs 0x%08X\n",
+ current->pid, num_dcbs);
+ error = -EINVAL;
+ goto end_function;
+ }
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] num_dcbs is %d\n",
+ current->pid, num_dcbs);
+
+ /* Allocate thread-specific memory for DCB */
+ *dcb_region = kzalloc(num_dcbs * sizeof(struct sep_dcblock),
+ GFP_KERNEL);
+ if (!(*dcb_region)) {
+ error = -ENOMEM;
+ goto end_function;
+ }
+
+ /* Prepare DCB and MLLI table into the allocated regions */
+ for (i = 0; i < num_dcbs; i++) {
+ error = sep_prepare_input_output_dma_table_in_dcb(sep,
+ (unsigned long)dcb_data->app_in_address,
+ (unsigned long)dcb_data->app_out_address,
+ dcb_data->data_in_size,
+ dcb_data->block_size,
+ dcb_data->tail_block_size,
+ dcb_data->is_applet,
+ true,
+ false,
+ *dcb_region, dmatables_region,
+ dma_ctx,
+ dcb_data->src_sg,
+ dcb_data->dst_sg);
+ if (error) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] dma table creation failed\n",
+ current->pid);
+ goto end_function;
+ }
+ }
+
+end_function:
+ return error;
+
+}
+
+/**
+ * sep_activate_msgarea_context - Takes the message area context into use
+ * @sep: SEP device
+ * @msg_region: Message area context buf
+ * @msg_len: Message area context buffer size
+ */
+static ssize_t sep_activate_msgarea_context(struct sep_device *sep,
+ void **msg_region,
+ const size_t msg_len)
+{
+ dev_dbg(&sep->pdev->dev, "[PID%d] activating msg region\n",
+ current->pid);
+
+ if (!msg_region || !(*msg_region) ||
+ SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES < msg_len) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] invalid act msgarea len 0x%08zX\n",
+ current->pid, msg_len);
+ return -EINVAL;
+ }
+
+ memcpy(sep->shared_addr, *msg_region, msg_len);
+
+ return 0;
+}
+
+/**
+ * sep_create_msgarea_context - Creates message area context
+ * @sep: SEP device
+ * @msg_region: Msg area region buf to create for current transaction
+ * @msg_user: Content for msg area region from user
+ * @msg_len: Message area size
+ */
+static ssize_t sep_create_msgarea_context(struct sep_device *sep,
+ void **msg_region,
+ const void __user *msg_user,
+ const size_t msg_len)
+{
+ int error = 0;
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] creating msg region\n",
+ current->pid);
+
+ if (!msg_region ||
+ !msg_user ||
+ SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES < msg_len ||
+ SEP_DRIVER_MIN_MESSAGE_SIZE_IN_BYTES > msg_len) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] invalid creat msgarea len 0x%08zX\n",
+ current->pid, msg_len);
+ error = -EINVAL;
+ goto end_function;
+ }
+
+ /* Allocate thread-specific memory for message buffer */
+ *msg_region = kzalloc(msg_len, GFP_KERNEL);
+ if (!(*msg_region)) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] no mem for msgarea context\n",
+ current->pid);
+ error = -ENOMEM;
+ goto end_function;
+ }
+
+ /* Copy input data to write() to allocated message buffer */
+ if (copy_from_user(*msg_region, msg_user, msg_len)) {
+ error = -EINVAL;
+ goto end_function;
+ }
+
+end_function:
+ if (error && msg_region) {
+ kfree(*msg_region);
+ *msg_region = NULL;
+ }
+
+ return error;
+}
+
+
+/**
+ * sep_read - Returns results of an operation for fastcall interface
+ * @filp: File pointer
+ * @buf_user: User buffer for storing results
+ * @count_user: User buffer size
+ * @offset: File offset, not supported
+ *
+ * The implementation does not support reading in chunks, all data must be
+ * consumed during a single read system call.
+ */
+static ssize_t sep_read(struct file *filp,
+ char __user *buf_user, size_t count_user,
+ loff_t *offset)
+{
+ struct sep_private_data * const private_data = filp->private_data;
+ struct sep_call_status *call_status = &private_data->call_status;
+ struct sep_device *sep = private_data->device;
+ struct sep_dma_context **dma_ctx = &private_data->dma_ctx;
+ struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem;
+ ssize_t error = 0, error_tmp = 0;
+
+ /* Am I the process that owns the transaction? */
+ error = sep_check_transaction_owner(sep);
+ if (error) {
+ dev_dbg(&sep->pdev->dev, "[PID%d] read pid is not owner\n",
+ current->pid);
+ goto end_function;
+ }
+
+ /* Checks that user has called necessarry apis */
+ if (0 == test_bit(SEP_FASTCALL_WRITE_DONE_OFFSET,
+ &call_status->status)) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] fastcall write not called\n",
+ current->pid);
+ error = -EPROTO;
+ goto end_function_error;
+ }
+
+ if (!buf_user) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] null user buffer\n",
+ current->pid);
+ error = -EINVAL;
+ goto end_function_error;
+ }
+
+
+ /* Wait for SEP to finish */
+ wait_event(sep->event_interrupt,
+ test_bit(SEP_WORKING_LOCK_BIT,
+ &sep->in_use_flags) == 0);
+
+ sep_dump_message(sep);
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] count_user = 0x%08zX\n",
+ current->pid, count_user);
+
+ /* In case user has allocated bigger buffer */
+ if (count_user > SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES)
+ count_user = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES;
+
+ if (copy_to_user(buf_user, sep->shared_addr, count_user)) {
+ error = -EFAULT;
+ goto end_function_error;
+ }
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] read succeeded\n", current->pid);
+ error = count_user;
+
+end_function_error:
+ /* Copy possible tail data to user and free DCB and MLLIs */
+ error_tmp = sep_free_dcb_handler(sep, dma_ctx);
+ if (error_tmp)
+ dev_warn(&sep->pdev->dev, "[PID%d] dcb free failed\n",
+ current->pid);
+
+ /* End the transaction, wakeup pending ones */
+ error_tmp = sep_end_transaction_handler(sep, dma_ctx, call_status,
+ my_queue_elem);
+ if (error_tmp)
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] ending transaction failed\n",
+ current->pid);
+
+end_function:
+ return error;
+}
+
+/**
+ * sep_fastcall_args_get - Gets fastcall params from user
+ * sep: SEP device
+ * @args: Parameters buffer
+ * @buf_user: User buffer for operation parameters
+ * @count_user: User buffer size
+ */
+static inline ssize_t sep_fastcall_args_get(struct sep_device *sep,
+ struct sep_fastcall_hdr *args,
+ const char __user *buf_user,
+ const size_t count_user)
+{
+ ssize_t error = 0;
+ size_t actual_count = 0;
+
+ if (!buf_user) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] null user buffer\n",
+ current->pid);
+ error = -EINVAL;
+ goto end_function;
+ }
+
+ if (count_user < sizeof(struct sep_fastcall_hdr)) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] too small message size 0x%08zX\n",
+ current->pid, count_user);
+ error = -EINVAL;
+ goto end_function;
+ }
+
+
+ if (copy_from_user(args, buf_user, sizeof(struct sep_fastcall_hdr))) {
+ error = -EFAULT;
+ goto end_function;
+ }
+
+ if (SEP_FC_MAGIC != args->magic) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] invalid fastcall magic 0x%08X\n",
+ current->pid, args->magic);
+ error = -EINVAL;
+ goto end_function;
+ }
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] fastcall hdr num of DCBs 0x%08X\n",
+ current->pid, args->num_dcbs);
+ dev_dbg(&sep->pdev->dev, "[PID%d] fastcall hdr msg len 0x%08X\n",
+ current->pid, args->msg_len);
+
+ if (SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES < args->msg_len ||
+ SEP_DRIVER_MIN_MESSAGE_SIZE_IN_BYTES > args->msg_len) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] invalid message length\n",
+ current->pid);
+ error = -EINVAL;
+ goto end_function;
+ }
+
+ actual_count = sizeof(struct sep_fastcall_hdr)
+ + args->msg_len
+ + (args->num_dcbs * sizeof(struct build_dcb_struct));
+
+ if (actual_count != count_user) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] inconsistent message "
+ "sizes 0x%08zX vs 0x%08zX\n",
+ current->pid, actual_count, count_user);
+ error = -EMSGSIZE;
+ goto end_function;
+ }
+
+end_function:
+ return error;
+}
+
+/**
+ * sep_write - Starts an operation for fastcall interface
+ * @filp: File pointer
+ * @buf_user: User buffer for operation parameters
+ * @count_user: User buffer size
+ * @offset: File offset, not supported
+ *
+ * The implementation does not support writing in chunks,
+ * all data must be given during a single write system call.
+ */
+static ssize_t sep_write(struct file *filp,
+ const char __user *buf_user, size_t count_user,
+ loff_t *offset)
+{
+ struct sep_private_data * const private_data = filp->private_data;
+ struct sep_call_status *call_status = &private_data->call_status;
+ struct sep_device *sep = private_data->device;
+ struct sep_dma_context *dma_ctx = NULL;
+ struct sep_fastcall_hdr call_hdr = {0};
+ void *msg_region = NULL;
+ void *dmatables_region = NULL;
+ struct sep_dcblock *dcb_region = NULL;
+ ssize_t error = 0;
+ struct sep_queue_info *my_queue_elem = NULL;
+ bool my_secure_dma; /* are we using secure_dma (IMR)? */
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] sep dev is 0x%p\n",
+ current->pid, sep);
+ dev_dbg(&sep->pdev->dev, "[PID%d] private_data is 0x%p\n",
+ current->pid, private_data);
+
+ error = sep_fastcall_args_get(sep, &call_hdr, buf_user, count_user);
+ if (error)
+ goto end_function;
+
+ buf_user += sizeof(struct sep_fastcall_hdr);
+
+ if (call_hdr.secure_dma == 0)
+ my_secure_dma = false;
+ else
+ my_secure_dma = true;
+
+ /*
+ * Controlling driver memory usage by limiting amount of
+ * buffers created. Only SEP_DOUBLEBUF_USERS_LIMIT number
+ * of threads can progress further at a time
+ */
+ dev_dbg(&sep->pdev->dev, "[PID%d] waiting for double buffering "
+ "region access\n", current->pid);
+ error = down_interruptible(&sep->sep_doublebuf);
+ dev_dbg(&sep->pdev->dev, "[PID%d] double buffering region start\n",
+ current->pid);
+ if (error) {
+ /* Signal received */
+ goto end_function_error;
+ }
+
+
+ /*
+ * Prepare contents of the shared area regions for
+ * the operation into temporary buffers
+ */
+ if (0 < call_hdr.num_dcbs) {
+ error = sep_create_dcb_dmatables_context(sep,
+ &dcb_region,
+ &dmatables_region,
+ &dma_ctx,
+ (const struct build_dcb_struct __user *)
+ buf_user,
+ call_hdr.num_dcbs, my_secure_dma);
+ if (error)
+ goto end_function_error_doublebuf;
+
+ buf_user += call_hdr.num_dcbs * sizeof(struct build_dcb_struct);
+ }
+
+ error = sep_create_msgarea_context(sep,
+ &msg_region,
+ buf_user,
+ call_hdr.msg_len);
+ if (error)
+ goto end_function_error_doublebuf;
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] updating queue status\n",
+ current->pid);
+ my_queue_elem = sep_queue_status_add(sep,
+ ((struct sep_msgarea_hdr *)msg_region)->opcode,
+ (dma_ctx) ? dma_ctx->input_data_len : 0,
+ current->pid,
+ current->comm, sizeof(current->comm));
+
+ if (!my_queue_elem) {
+ dev_dbg(&sep->pdev->dev, "[PID%d] updating queue"
+ "status error\n", current->pid);
+ error = -ENOMEM;
+ goto end_function_error_doublebuf;
+ }
+
+ /* Wait until current process gets the transaction */
+ error = sep_wait_transaction(sep);
+
+ if (error) {
+ /* Interrupted by signal, don't clear transaction */
+ dev_dbg(&sep->pdev->dev, "[PID%d] interrupted by signal\n",
+ current->pid);
+ sep_queue_status_remove(sep, &my_queue_elem);
+ goto end_function_error_doublebuf;
+ }
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] saving queue element\n",
+ current->pid);
+ private_data->my_queue_elem = my_queue_elem;
+
+ /* Activate shared area regions for the transaction */
+ error = sep_activate_msgarea_context(sep, &msg_region,
+ call_hdr.msg_len);
+ if (error)
+ goto end_function_error_clear_transact;
+
+ sep_dump_message(sep);
+
+ if (0 < call_hdr.num_dcbs) {
+ error = sep_activate_dcb_dmatables_context(sep,
+ &dcb_region,
+ &dmatables_region,
+ dma_ctx);
+ if (error)
+ goto end_function_error_clear_transact;
+ }
+
+ /* Send command to SEP */
+ error = sep_send_command_handler(sep);
+ if (error)
+ goto end_function_error_clear_transact;
+
+ /* Store DMA context for the transaction */
+ private_data->dma_ctx = dma_ctx;
+ /* Update call status */
+ set_bit(SEP_FASTCALL_WRITE_DONE_OFFSET, &call_status->status);
+ error = count_user;
+
+ up(&sep->sep_doublebuf);
+ dev_dbg(&sep->pdev->dev, "[PID%d] double buffering region end\n",
+ current->pid);
+
+ goto end_function;
+
+end_function_error_clear_transact:
+ sep_end_transaction_handler(sep, &dma_ctx, call_status,
+ &private_data->my_queue_elem);
+
+end_function_error_doublebuf:
+ up(&sep->sep_doublebuf);
+ dev_dbg(&sep->pdev->dev, "[PID%d] double buffering region end\n",
+ current->pid);
+
+end_function_error:
+ if (dma_ctx)
+ sep_free_dma_table_data_handler(sep, &dma_ctx);
+
+end_function:
+ kfree(dcb_region);
+ kfree(dmatables_region);
+ kfree(msg_region);
+
+ return error;
+}
+/**
+ * sep_seek - Handler for seek system call
+ * @filp: File pointer
+ * @offset: File offset
+ * @origin: Options for offset
+ *
+ * Fastcall interface does not support seeking, all reads
+ * and writes are from/to offset zero
+ */
+static loff_t sep_seek(struct file *filp, loff_t offset, int origin)
+{
+ return -ENOSYS;
+}
+
+
+
+/**
+ * sep_file_operations - file operation on sep device
+ * @sep_ioctl: ioctl handler from user space call
+ * @sep_poll: poll handler
+ * @sep_open: handles sep device open request
+ * @sep_release:handles sep device release request
+ * @sep_mmap: handles memory mapping requests
+ * @sep_read: handles read request on sep device
+ * @sep_write: handles write request on sep device
+ * @sep_seek: handles seek request on sep device
+ */
+static const struct file_operations sep_file_operations = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = sep_ioctl,
+ .poll = sep_poll,
+ .open = sep_open,
+ .release = sep_release,
+ .mmap = sep_mmap,
+ .read = sep_read,
+ .write = sep_write,
+ .llseek = sep_seek,
+};
+
+/**
+ * sep_sysfs_read - read sysfs entry per gives arguments
+ * @filp: file pointer
+ * @kobj: kobject pointer
+ * @attr: binary file attributes
+ * @buf: read to this buffer
+ * @pos: offset to read
+ * @count: amount of data to read
+ *
+ * This function is to read sysfs entries for sep driver per given arguments.
+ */
+static ssize_t
+sep_sysfs_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t pos, size_t count)
+{
+ unsigned long lck_flags;
+ size_t nleft = count;
+ struct sep_device *sep = sep_dev;
+ struct sep_queue_info *queue_elem = NULL;
+ u32 queue_num = 0;
+ u32 i = 1;
+
+ spin_lock_irqsave(&sep->sep_queue_lock, lck_flags);
+
+ queue_num = sep->sep_queue_num;
+ if (queue_num > SEP_DOUBLEBUF_USERS_LIMIT)
+ queue_num = SEP_DOUBLEBUF_USERS_LIMIT;
+
+
+ if (count < sizeof(queue_num)
+ + (queue_num * sizeof(struct sep_queue_data))) {
+ spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags);
+ return -EINVAL;
+ }
+
+ memcpy(buf, &queue_num, sizeof(queue_num));
+ buf += sizeof(queue_num);
+ nleft -= sizeof(queue_num);
+
+ list_for_each_entry(queue_elem, &sep->sep_queue_status, list) {
+ if (i++ > queue_num)
+ break;
+
+ memcpy(buf, &queue_elem->data, sizeof(queue_elem->data));
+ nleft -= sizeof(queue_elem->data);
+ buf += sizeof(queue_elem->data);
+ }
+ spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags);
+
+ return count - nleft;
+}
+
+/**
+ * bin_attributes - defines attributes for queue_status
+ * @attr: attributes (name & permissions)
+ * @read: function pointer to read this file
+ * @size: maxinum size of binary attribute
+ */
+static const struct bin_attribute queue_status = {
+ .attr = {.name = "queue_status", .mode = 0444},
+ .read = sep_sysfs_read,
+ .size = sizeof(u32)
+ + (SEP_DOUBLEBUF_USERS_LIMIT * sizeof(struct sep_queue_data)),
+};
+
+/**
+ * sep_register_driver_with_fs - register misc devices
+ * @sep: pointer to struct sep_device
+ *
+ * This function registers the driver with the file system
+ */
+static int sep_register_driver_with_fs(struct sep_device *sep)
+{
+ int ret_val;
+
+ sep->miscdev_sep.minor = MISC_DYNAMIC_MINOR;
+ sep->miscdev_sep.name = SEP_DEV_NAME;
+ sep->miscdev_sep.fops = &sep_file_operations;
+
+ ret_val = misc_register(&sep->miscdev_sep);
+ if (ret_val) {
+ dev_warn(&sep->pdev->dev, "misc reg fails for SEP %x\n",
+ ret_val);
+ return ret_val;
+ }
+
+ ret_val = device_create_bin_file(sep->miscdev_sep.this_device,
+ &queue_status);
+ if (ret_val) {
+ dev_warn(&sep->pdev->dev, "sysfs attribute1 fails for SEP %x\n",
+ ret_val);
+ return ret_val;
+ }
+
+ return ret_val;
+}
+
+
+/**
+ *sep_probe - probe a matching PCI device
+ *@pdev: pci_device
+ *@ent: pci_device_id
+ *
+ *Attempt to set up and configure a SEP device that has been
+ *discovered by the PCI layer. Allocates all required resources.
+ */
+static int __devinit sep_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int error = 0;
+ struct sep_device *sep = NULL;
+
+ if (sep_dev != NULL) {
+ dev_dbg(&pdev->dev, "only one SEP supported.\n");
+ return -EBUSY;
+ }
+
+ /* Enable the device */
+ error = pci_enable_device(pdev);
+ if (error) {
+ dev_warn(&pdev->dev, "error enabling pci device\n");
+ goto end_function;
+ }
+
+ /* Allocate the sep_device structure for this device */
+ sep_dev = kzalloc(sizeof(struct sep_device), GFP_ATOMIC);
+ if (sep_dev == NULL) {
+ dev_warn(&pdev->dev,
+ "can't kmalloc the sep_device structure\n");
+ error = -ENOMEM;
+ goto end_function_disable_device;
+ }
+
+ /*
+ * We're going to use another variable for actually
+ * working with the device; this way, if we have
+ * multiple devices in the future, it would be easier
+ * to make appropriate changes
+ */
+ sep = sep_dev;
+
+ sep->pdev = pci_dev_get(pdev);
+
+ init_waitqueue_head(&sep->event_transactions);
+ init_waitqueue_head(&sep->event_interrupt);
+ spin_lock_init(&sep->snd_rply_lck);
+ spin_lock_init(&sep->sep_queue_lock);
+ sema_init(&sep->sep_doublebuf, SEP_DOUBLEBUF_USERS_LIMIT);
+
+ INIT_LIST_HEAD(&sep->sep_queue_status);
+
+ dev_dbg(&sep->pdev->dev, "sep probe: PCI obtained, "
+ "device being prepared\n");
+
+ /* Set up our register area */
+ sep->reg_physical_addr = pci_resource_start(sep->pdev, 0);
+ if (!sep->reg_physical_addr) {
+ dev_warn(&sep->pdev->dev, "Error getting register start\n");
+ error = -ENODEV;
+ goto end_function_free_sep_dev;
+ }
+
+ sep->reg_physical_end = pci_resource_end(sep->pdev, 0);
+ if (!sep->reg_physical_end) {
+ dev_warn(&sep->pdev->dev, "Error getting register end\n");
+ error = -ENODEV;
+ goto end_function_free_sep_dev;
+ }
+
+ sep->reg_addr = ioremap_nocache(sep->reg_physical_addr,
+ (size_t)(sep->reg_physical_end - sep->reg_physical_addr + 1));
+ if (!sep->reg_addr) {
+ dev_warn(&sep->pdev->dev, "Error getting register virtual\n");
+ error = -ENODEV;
+ goto end_function_free_sep_dev;
+ }
+
+ dev_dbg(&sep->pdev->dev,
+ "Register area start %llx end %llx virtual %p\n",
+ (unsigned long long)sep->reg_physical_addr,
+ (unsigned long long)sep->reg_physical_end,
+ sep->reg_addr);
+
+ /* Allocate the shared area */
+ sep->shared_size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
+ SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES +
+ SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES +
+ SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES +
+ SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
+
+ if (sep_map_and_alloc_shared_area(sep)) {
+ error = -ENOMEM;
+ /* Allocation failed */
+ goto end_function_error;
+ }
+
+ /* Clear ICR register */
+ sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
+
+ /* Set the IMR register - open only GPR 2 */
+ sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
+
+ /* Read send/receive counters from SEP */
+ sep->reply_ct = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
+ sep->reply_ct &= 0x3FFFFFFF;
+ sep->send_ct = sep->reply_ct;
+
+ /* Get the interrupt line */
+ error = request_irq(pdev->irq, sep_inthandler, IRQF_SHARED,
+ "sep_driver", sep);
+
+ if (error)
+ goto end_function_deallocate_sep_shared_area;
+
+ /* The new chip requires a shared area reconfigure */
+ error = sep_reconfig_shared_area(sep);
+ if (error)
+ goto end_function_free_irq;
+
+ sep->in_use = 1;
+
+ /* Finally magic up the device nodes */
+ /* Register driver with the fs */
+ error = sep_register_driver_with_fs(sep);
+
+ if (error) {
+ dev_err(&sep->pdev->dev, "error registering dev file\n");
+ goto end_function_free_irq;
+ }
+
+ sep->in_use = 0; /* through touching the device */
+#ifdef SEP_ENABLE_RUNTIME_PM
+ pm_runtime_put_noidle(&sep->pdev->dev);
+ pm_runtime_allow(&sep->pdev->dev);
+ pm_runtime_set_autosuspend_delay(&sep->pdev->dev,
+ SUSPEND_DELAY);
+ pm_runtime_use_autosuspend(&sep->pdev->dev);
+ pm_runtime_mark_last_busy(&sep->pdev->dev);
+ sep->power_save_setup = 1;
+#endif
+ /* register kernel crypto driver */
+#if defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE)
+ error = sep_crypto_setup();
+ if (error) {
+ dev_err(&sep->pdev->dev, "crypto setup failed\n");
+ goto end_function_free_irq;
+ }
+#endif
+ goto end_function;
+
+end_function_free_irq:
+ free_irq(pdev->irq, sep);
+
+end_function_deallocate_sep_shared_area:
+ /* De-allocate shared area */
+ sep_unmap_and_free_shared_area(sep);
+
+end_function_error:
+ iounmap(sep->reg_addr);
+
+end_function_free_sep_dev:
+ pci_dev_put(sep_dev->pdev);
+ kfree(sep_dev);
+ sep_dev = NULL;
+
+end_function_disable_device:
+ pci_disable_device(pdev);
+
+end_function:
+ return error;
+}
+
+/**
+ * sep_remove - handles removing device from pci subsystem
+ * @pdev: pointer to pci device
+ *
+ * This function will handle removing our sep device from pci subsystem on exit
+ * or unloading this module. It should free up all used resources, and unmap if
+ * any memory regions mapped.
+ */
+static void sep_remove(struct pci_dev *pdev)
+{
+ struct sep_device *sep = sep_dev;
+
+ /* Unregister from fs */
+ misc_deregister(&sep->miscdev_sep);
+
+ /* Unregister from kernel crypto */
+#if defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE)
+ sep_crypto_takedown();
+#endif
+ /* Free the irq */
+ free_irq(sep->pdev->irq, sep);
+
+ /* Free the shared area */
+ sep_unmap_and_free_shared_area(sep_dev);
+ iounmap(sep_dev->reg_addr);
+
+#ifdef SEP_ENABLE_RUNTIME_PM
+ if (sep->in_use) {
+ sep->in_use = 0;
+ pm_runtime_forbid(&sep->pdev->dev);
+ pm_runtime_get_noresume(&sep->pdev->dev);
+ }
+#endif
+ pci_dev_put(sep_dev->pdev);
+ kfree(sep_dev);
+ sep_dev = NULL;
+}
+
+/* Initialize struct pci_device_id for our driver */
+static DEFINE_PCI_DEVICE_TABLE(sep_pci_id_tbl) = {
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0826)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x08e9)},
+ {0}
+};
+
+/* Export our pci_device_id structure to user space */
+MODULE_DEVICE_TABLE(pci, sep_pci_id_tbl);
+
+#ifdef SEP_ENABLE_RUNTIME_PM
+
+/**
+ * sep_pm_resume - rsume routine while waking up from S3 state
+ * @dev: pointer to sep device
+ *
+ * This function is to be used to wake up sep driver while system awakes from S3
+ * state i.e. suspend to ram. The RAM in intact.
+ * Notes - revisit with more understanding of pm, ICR/IMR & counters.
+ */
+static int sep_pci_resume(struct device *dev)
+{
+ struct sep_device *sep = sep_dev;
+
+ dev_dbg(&sep->pdev->dev, "pci resume called\n");
+
+ if (sep->power_state == SEP_DRIVER_POWERON)
+ return 0;
+
+ /* Clear ICR register */
+ sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
+
+ /* Set the IMR register - open only GPR 2 */
+ sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
+
+ /* Read send/receive counters from SEP */
+ sep->reply_ct = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
+ sep->reply_ct &= 0x3FFFFFFF;
+ sep->send_ct = sep->reply_ct;
+
+ sep->power_state = SEP_DRIVER_POWERON;
+
+ return 0;
+}
+
+/**
+ * sep_pm_suspend - suspend routine while going to S3 state
+ * @dev: pointer to sep device
+ *
+ * This function is to be used to suspend sep driver while system goes to S3
+ * state i.e. suspend to ram. The RAM in intact and ON during this suspend.
+ * Notes - revisit with more understanding of pm, ICR/IMR
+ */
+static int sep_pci_suspend(struct device *dev)
+{
+ struct sep_device *sep = sep_dev;
+
+ dev_dbg(&sep->pdev->dev, "pci suspend called\n");
+ if (sep->in_use == 1)
+ return -EAGAIN;
+
+ sep->power_state = SEP_DRIVER_POWEROFF;
+
+ /* Clear ICR register */
+ sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
+
+ /* Set the IMR to block all */
+ sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, 0xFFFFFFFF);
+
+ return 0;
+}
+
+/**
+ * sep_pm_runtime_resume - runtime resume routine
+ * @dev: pointer to sep device
+ *
+ * Notes - revisit with more understanding of pm, ICR/IMR & counters
+ */
+static int sep_pm_runtime_resume(struct device *dev)
+{
+
+ u32 retval2;
+ u32 delay_count;
+ struct sep_device *sep = sep_dev;
+
+ dev_dbg(&sep->pdev->dev, "pm runtime resume called\n");
+
+ /**
+ * Wait until the SCU boot is ready
+ * This is done by iterating SCU_DELAY_ITERATION (10
+ * microseconds each) up to SCU_DELAY_MAX (50) times.
+ * This bit can be set in a random time that is less
+ * than 500 microseconds after each power resume
+ */
+ retval2 = 0;
+ delay_count = 0;
+ while ((!retval2) && (delay_count < SCU_DELAY_MAX)) {
+ retval2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
+ retval2 &= 0x00000008;
+ if (!retval2) {
+ udelay(SCU_DELAY_ITERATION);
+ delay_count += 1;
+ }
+ }
+
+ if (!retval2) {
+ dev_warn(&sep->pdev->dev, "scu boot bit not set at resume\n");
+ return -EINVAL;
+ }
+
+ /* Clear ICR register */
+ sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
+
+ /* Set the IMR register - open only GPR 2 */
+ sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
+
+ /* Read send/receive counters from SEP */
+ sep->reply_ct = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
+ sep->reply_ct &= 0x3FFFFFFF;
+ sep->send_ct = sep->reply_ct;
+
+ return 0;
+}
+
+/**
+ * sep_pm_runtime_suspend - runtime suspend routine
+ * @dev: pointer to sep device
+ *
+ * Notes - revisit with more understanding of pm
+ */
+static int sep_pm_runtime_suspend(struct device *dev)
+{
+ struct sep_device *sep = sep_dev;
+
+ dev_dbg(&sep->pdev->dev, "pm runtime suspend called\n");
+
+ /* Clear ICR register */
+ sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
+ return 0;
+}
+
+/**
+ * sep_pm - power management for sep driver
+ * @sep_pm_runtime_resume: resume- no communication with cpu & main memory
+ * @sep_pm_runtime_suspend: suspend- no communication with cpu & main memory
+ * @sep_pci_suspend: suspend - main memory is still ON
+ * @sep_pci_resume: resume - main meory is still ON
+ */
+static const struct dev_pm_ops sep_pm = {
+ .runtime_resume = sep_pm_runtime_resume,
+ .runtime_suspend = sep_pm_runtime_suspend,
+ .resume = sep_pci_resume,
+ .suspend = sep_pci_suspend,
+};
+#endif /* SEP_ENABLE_RUNTIME_PM */
+
+/**
+ * sep_pci_driver - registers this device with pci subsystem
+ * @name: name identifier for this driver
+ * @sep_pci_id_tbl: pointer to struct pci_device_id table
+ * @sep_probe: pointer to probe function in PCI driver
+ * @sep_remove: pointer to remove function in PCI driver
+ */
+static struct pci_driver sep_pci_driver = {
+#ifdef SEP_ENABLE_RUNTIME_PM
+ .driver = {
+ .pm = &sep_pm,
+ },
+#endif
+ .name = "sep_sec_driver",
+ .id_table = sep_pci_id_tbl,
+ .probe = sep_probe,
+ .remove = sep_remove
+};
+
+/**
+ * sep_init - init function
+ *
+ * Module load time. Register the PCI device driver.
+ */
+
+static int __init sep_init(void)
+{
+ return pci_register_driver(&sep_pci_driver);
+}
+
+
+/**
+ * sep_exit - called to unload driver
+ *
+ * Unregister the driver The device will perform all the cleanup required.
+ */
+static void __exit sep_exit(void)
+{
+ pci_unregister_driver(&sep_pci_driver);
+}
+
+
+module_init(sep_init);
+module_exit(sep_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/sep/sep_trace_events.h b/drivers/staging/sep/sep_trace_events.h
new file mode 100644
index 000000000000..2b053a93afe6
--- /dev/null
+++ b/drivers/staging/sep/sep_trace_events.h
@@ -0,0 +1,188 @@
+/*
+ * If TRACE_SYSTEM is defined, that will be the directory created
+ * in the ftrace directory under /sys/kernel/debug/tracing/events/<system>
+ *
+ * The define_trace.h below will also look for a file name of
+ * TRACE_SYSTEM.h where TRACE_SYSTEM is what is defined here.
+ * In this case, it would look for sample.h
+ *
+ * If the header name will be different than the system name
+ * (as in this case), then you can override the header name that
+ * define_trace.h will look up by defining TRACE_INCLUDE_FILE
+ *
+ * This file is called trace-events-sample.h but we want the system
+ * to be called "sample". Therefore we must define the name of this
+ * file:
+ *
+ * #define TRACE_INCLUDE_FILE trace-events-sample
+ *
+ * As we do an the bottom of this file.
+ *
+ * Notice that TRACE_SYSTEM should be defined outside of #if
+ * protection, just like TRACE_INCLUDE_FILE.
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM sep
+
+/*
+ * Notice that this file is not protected like a normal header.
+ * We also must allow for rereading of this file. The
+ *
+ * || defined(TRACE_HEADER_MULTI_READ)
+ *
+ * serves this purpose.
+ */
+#if !defined(_TRACE_SEP_EVENTS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_SEP_EVENTS_H
+
+#ifdef SEP_PERF_DEBUG
+#define SEP_TRACE_FUNC_IN() trace_sep_func_start(__func__, 0)
+#define SEP_TRACE_FUNC_OUT(branch) trace_sep_func_end(__func__, branch)
+#define SEP_TRACE_EVENT(branch) trace_sep_misc_event(__func__, branch)
+#else
+#define SEP_TRACE_FUNC_IN()
+#define SEP_TRACE_FUNC_OUT(branch)
+#define SEP_TRACE_EVENT(branch)
+#endif
+
+
+/*
+ * All trace headers should include tracepoint.h, until we finally
+ * make it into a standard header.
+ */
+#include <linux/tracepoint.h>
+
+/*
+ * The TRACE_EVENT macro is broken up into 5 parts.
+ *
+ * name: name of the trace point. This is also how to enable the tracepoint.
+ * A function called trace_foo_bar() will be created.
+ *
+ * proto: the prototype of the function trace_foo_bar()
+ * Here it is trace_foo_bar(char *foo, int bar).
+ *
+ * args: must match the arguments in the prototype.
+ * Here it is simply "foo, bar".
+ *
+ * struct: This defines the way the data will be stored in the ring buffer.
+ * There are currently two types of elements. __field and __array.
+ * a __field is broken up into (type, name). Where type can be any
+ * type but an array.
+ * For an array. there are three fields. (type, name, size). The
+ * type of elements in the array, the name of the field and the size
+ * of the array.
+ *
+ * __array( char, foo, 10) is the same as saying char foo[10].
+ *
+ * fast_assign: This is a C like function that is used to store the items
+ * into the ring buffer.
+ *
+ * printk: This is a way to print out the data in pretty print. This is
+ * useful if the system crashes and you are logging via a serial line,
+ * the data can be printed to the console using this "printk" method.
+ *
+ * Note, that for both the assign and the printk, __entry is the handler
+ * to the data structure in the ring buffer, and is defined by the
+ * TP_STRUCT__entry.
+ */
+TRACE_EVENT(sep_func_start,
+
+ TP_PROTO(const char *name, int branch),
+
+ TP_ARGS(name, branch),
+
+ TP_STRUCT__entry(
+ __array(char, name, 20)
+ __field(int, branch)
+ ),
+
+ TP_fast_assign(
+ strncpy(__entry->name, name, 20);
+ __entry->branch = branch;
+ ),
+
+ TP_printk("func_start %s %d", __entry->name, __entry->branch)
+);
+
+TRACE_EVENT(sep_func_end,
+
+ TP_PROTO(const char *name, int branch),
+
+ TP_ARGS(name, branch),
+
+ TP_STRUCT__entry(
+ __array(char, name, 20)
+ __field(int, branch)
+ ),
+
+ TP_fast_assign(
+ strncpy(__entry->name, name, 20);
+ __entry->branch = branch;
+ ),
+
+ TP_printk("func_end %s %d", __entry->name, __entry->branch)
+);
+
+TRACE_EVENT(sep_misc_event,
+
+ TP_PROTO(const char *name, int branch),
+
+ TP_ARGS(name, branch),
+
+ TP_STRUCT__entry(
+ __array(char, name, 20)
+ __field(int, branch)
+ ),
+
+ TP_fast_assign(
+ strncpy(__entry->name, name, 20);
+ __entry->branch = branch;
+ ),
+
+ TP_printk("misc_event %s %d", __entry->name, __entry->branch)
+);
+
+
+#endif
+
+/***** NOTICE! The #if protection ends here. *****/
+
+
+/*
+ * There are several ways I could have done this. If I left out the
+ * TRACE_INCLUDE_PATH, then it would default to the kernel source
+ * include/trace/events directory.
+ *
+ * I could specify a path from the define_trace.h file back to this
+ * file.
+ *
+ * #define TRACE_INCLUDE_PATH ../../samples/trace_events
+ *
+ * But the safest and easiest way to simply make it use the directory
+ * that the file is in is to add in the Makefile:
+ *
+ * CFLAGS_trace-events-sample.o := -I$(src)
+ *
+ * This will make sure the current path is part of the include
+ * structure for our file so that define_trace.h can find it.
+ *
+ * I could have made only the top level directory the include:
+ *
+ * CFLAGS_trace-events-sample.o := -I$(PWD)
+ *
+ * And then let the path to this directory be the TRACE_INCLUDE_PATH:
+ *
+ * #define TRACE_INCLUDE_PATH samples/trace_events
+ *
+ * But then if something defines "samples" or "trace_events" as a macro
+ * then we could risk that being converted too, and give us an unexpected
+ * result.
+ */
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+/*
+ * TRACE_INCLUDE_FILE is not needed if the filename and TRACE_SYSTEM are equal
+ */
+#define TRACE_INCLUDE_FILE sep_trace_events
+#include <trace/define_trace.h>
diff --git a/drivers/staging/serqt_usb2/serqt_usb2.c b/drivers/staging/serqt_usb2/serqt_usb2.c
index 1c5780f1571b..ae1d815e2a53 100644
--- a/drivers/staging/serqt_usb2/serqt_usb2.c
+++ b/drivers/staging/serqt_usb2/serqt_usb2.c
@@ -200,7 +200,6 @@ static struct usb_driver serqt_usb_driver = {
.probe = usb_serial_probe,
.disconnect = usb_serial_disconnect,
.id_table = serqt_id_table,
- .no_dynamic_id = 1,
};
static int port_paranoia_check(struct usb_serial_port *port,
@@ -1590,7 +1589,6 @@ static struct usb_serial_driver quatech_device = {
.name = "serqt",
},
.description = DRIVER_DESC,
- .usb_driver = &serqt_usb_driver,
.id_table = serqt_id_table,
.num_ports = 8,
.open = qt_open,
@@ -1610,41 +1608,11 @@ static struct usb_serial_driver quatech_device = {
.release = qt_release,
};
-static int __init serqt_usb_init(void)
-{
- int retval;
-
- dbg("%s\n", __func__);
-
- /* register with usb-serial */
- retval = usb_serial_register(&quatech_device);
-
- if (retval)
- goto failed_usb_serial_register;
-
- printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
- DRIVER_DESC "\n");
-
- /* register with usb */
-
- retval = usb_register(&serqt_usb_driver);
- if (retval == 0)
- return 0;
-
- /* if we're here, usb_register() failed */
- usb_serial_deregister(&quatech_device);
-failed_usb_serial_register:
- return retval;
-}
-
-static void __exit serqt_usb_exit(void)
-{
- usb_deregister(&serqt_usb_driver);
- usb_serial_deregister(&quatech_device);
-}
+static struct usb_serial_driver * const serial_drivers[] = {
+ &quatech_device, NULL
+};
-module_init(serqt_usb_init);
-module_exit(serqt_usb_exit);
+module_usb_serial_driver(serqt_usb_driver, serial_drivers);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/staging/slicoss/README b/drivers/staging/slicoss/README
index b83bba19b7f0..cb04a87b2017 100644
--- a/drivers/staging/slicoss/README
+++ b/drivers/staging/slicoss/README
@@ -42,7 +42,7 @@ TODO:
Please send patches to:
- Greg Kroah-Hartman <gregkh@suse.de>
+ Greg Kroah-Hartman <gregkh@linuxfoundation.org>
and Cc: Lior Dotan <liodot@gmail.com> and Christopher Harrer
<charrer@alacritech.com> as well as they are also able to test out any
changes.
diff --git a/drivers/staging/sm7xx/smtcfb.c b/drivers/staging/sm7xx/smtcfb.c
index ae0035f327e7..83c582ed12e5 100644
--- a/drivers/staging/sm7xx/smtcfb.c
+++ b/drivers/staging/sm7xx/smtcfb.c
@@ -41,7 +41,6 @@
#ifdef CONFIG_PM
#include <linux/pm.h>
-#include <linux/module.h>
#endif
#include "smtcfb.h"
@@ -443,7 +442,7 @@ static int smtc_setcolreg(unsigned regno, unsigned red, unsigned green,
}
#ifdef __BIG_ENDIAN
-static ssize_t smtcfb_read(struct fb_info *info, char __user * buf, size_t
+static ssize_t smtcfb_read(struct fb_info *info, char __user *buf, size_t
count, loff_t *ppos)
{
unsigned long p = *ppos;
diff --git a/drivers/staging/sm7xx/smtcfb.h b/drivers/staging/sm7xx/smtcfb.h
index c5e6989e65ab..ab95af2b9c07 100644
--- a/drivers/staging/sm7xx/smtcfb.h
+++ b/drivers/staging/sm7xx/smtcfb.h
@@ -38,7 +38,7 @@
#define dac_reg (0x3c8)
#define dac_val (0x3c9)
-extern char *smtc_RegBaseAddress;
+extern char __iomem *smtc_RegBaseAddress;
#define smtc_mmiowb(dat, reg) writeb(dat, smtc_RegBaseAddress + reg)
#define smtc_mmioww(dat, reg) writew(dat, smtc_RegBaseAddress + reg)
#define smtc_mmiowl(dat, reg) writel(dat, smtc_RegBaseAddress + reg)
diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c
index c7b03f0ef2dd..92b34e29ad06 100644
--- a/drivers/staging/speakup/main.c
+++ b/drivers/staging/speakup/main.c
@@ -1731,15 +1731,15 @@ static void do_handle_spec(struct vc_data *vc, u_char value, char up_flag)
switch (value) {
case KVAL(K_CAPS):
label = msg_get(MSG_KEYNAME_CAPSLOCK);
- on_off = (vc_kbd_led(kbd_table + vc->vc_num, VC_CAPSLOCK));
+ on_off = vt_get_leds(fg_console, VC_CAPSLOCK);
break;
case KVAL(K_NUM):
label = msg_get(MSG_KEYNAME_NUMLOCK);
- on_off = (vc_kbd_led(kbd_table + vc->vc_num, VC_NUMLOCK));
+ on_off = vt_get_leds(fg_console, VC_NUMLOCK);
break;
case KVAL(K_HOLD):
label = msg_get(MSG_KEYNAME_SCROLLLOCK);
- on_off = (vc_kbd_led(kbd_table + vc->vc_num, VC_SCROLLOCK));
+ on_off = vt_get_leds(fg_console, VC_SCROLLOCK);
if (speakup_console[vc->vc_num])
speakup_console[vc->vc_num]->tty_stopped = on_off;
break;
@@ -2020,7 +2020,7 @@ speakup_key(struct vc_data *vc, int shift_state, int keycode, u_short keysym,
if (type >= 0xf0)
type -= 0xf0;
if (type == KT_PAD
- && (vc_kbd_led(kbd_table + fg_console, VC_NUMLOCK))) {
+ && (vt_get_leds(fg_console, VC_NUMLOCK))) {
if (up_flag) {
spk_keydown = 0;
goto out;
diff --git a/drivers/staging/speakup/serialio.c b/drivers/staging/speakup/serialio.c
index 7f3d87bf5927..a97d3d5b58a4 100644
--- a/drivers/staging/speakup/serialio.c
+++ b/drivers/staging/speakup/serialio.c
@@ -8,21 +8,20 @@
static void start_serial_interrupt(int irq);
-static struct serial_state rs_table[] = {
+static const struct old_serial_port rs_table[] = {
SERIAL_PORT_DFNS
};
-static struct serial_state *serstate;
+static const struct old_serial_port *serstate;
static int timeouts;
-struct serial_state *spk_serial_init(int index)
+const struct old_serial_port *spk_serial_init(int index)
{
int baud = 9600, quot = 0;
unsigned int cval = 0;
int cflag = CREAD | HUPCL | CLOCAL | B9600 | CS8;
- struct serial_state *ser = NULL;
+ const struct old_serial_port *ser = rs_table + index;
int err;
- ser = rs_table + index;
/* Divisor, bytesize and parity */
quot = ser->baud_base / baud;
cval = cflag & (CSIZE | CSTOPB);
@@ -41,7 +40,7 @@ struct serial_state *spk_serial_init(int index)
__release_region(&ioport_resource, ser->port, 8);
err = synth_request_region(ser->port, 8);
if (err) {
- pr_warn("Unable to allocate port at %lx, errno %i",
+ pr_warn("Unable to allocate port at %x, errno %i",
ser->port, err);
return NULL;
}
diff --git a/drivers/staging/speakup/serialio.h b/drivers/staging/speakup/serialio.h
index d785b1f6a3b3..614271f9b99f 100644
--- a/drivers/staging/speakup/serialio.h
+++ b/drivers/staging/speakup/serialio.h
@@ -4,11 +4,22 @@
#include <linux/serial.h> /* for rs_table, serial constants &
serial_uart_config */
#include <linux/serial_reg.h> /* for more serial constants */
-#include <linux/serialP.h> /* for struct serial_state */
#ifndef __sparc__
#include <asm/serial.h>
#endif
+/*
+ * this is cut&paste from 8250.h. Get rid of the structure, the definitions
+ * and this whole broken driver.
+ */
+struct old_serial_port {
+ unsigned int uart; /* unused */
+ unsigned int baud_base;
+ unsigned int port;
+ unsigned int irq;
+ unsigned int flags; /* unused */
+};
+
/* countdown values for serial timeouts in us */
#define SPK_SERIAL_TIMEOUT 100000
/* countdown values transmitter/dsr timeouts in us */
diff --git a/drivers/staging/speakup/spk_priv.h b/drivers/staging/speakup/spk_priv.h
index 16ace4af68a9..a47c5b78d57d 100644
--- a/drivers/staging/speakup/spk_priv.h
+++ b/drivers/staging/speakup/spk_priv.h
@@ -44,7 +44,7 @@
#define KT_SPKUP 15
-extern struct serial_state *spk_serial_init(int index);
+extern const struct old_serial_port *spk_serial_init(int index);
extern void stop_serial_interrupt(void);
extern int wait_for_xmitr(void);
extern unsigned char spk_serial_in(void);
diff --git a/drivers/staging/speakup/synth.c b/drivers/staging/speakup/synth.c
index 2222d6919ef5..331eae788700 100644
--- a/drivers/staging/speakup/synth.c
+++ b/drivers/staging/speakup/synth.c
@@ -34,7 +34,7 @@ static int do_synth_init(struct spk_synth *in_synth);
int serial_synth_probe(struct spk_synth *synth)
{
- struct serial_state *ser;
+ const struct old_serial_port *ser;
int failed = 0;
if ((synth->ser >= SPK_LO_TTY) && (synth->ser <= SPK_HI_TTY)) {
diff --git a/drivers/staging/telephony/Kconfig b/drivers/staging/telephony/Kconfig
new file mode 100644
index 000000000000..b5f78b6ed2bd
--- /dev/null
+++ b/drivers/staging/telephony/Kconfig
@@ -0,0 +1,47 @@
+#
+# Telephony device configuration
+#
+
+menuconfig PHONE
+ tristate "Telephony support"
+ depends on HAS_IOMEM
+ ---help---
+ Say Y here if you have a telephony card, which for example allows
+ you to use a regular phone for voice-over-IP applications.
+
+ Note: this has nothing to do with modems. You do not need to say Y
+ here in order to be able to use a modem under Linux.
+
+ To compile this driver as a module, choose M here: the
+ module will be called phonedev.
+
+if PHONE
+
+config PHONE_IXJ
+ tristate "QuickNet Internet LineJack/PhoneJack support"
+ depends on ISA || PCI
+ ---help---
+ Say M if you have a telephony card manufactured by Quicknet
+ Technologies, Inc. These include the Internet PhoneJACK and
+ Internet LineJACK Telephony Cards. You will get a module called
+ ixj.
+
+ For the ISA versions of these products, you can configure the
+ cards using the isapnp tools (pnpdump/isapnp) or you can use the
+ isapnp support. Please read <file:Documentation/telephony/ixj.txt>.
+
+ For more information on these cards, see Quicknet's web site at:
+ <http://www.quicknet.net/>.
+
+ If you do not have any Quicknet telephony cards, you can safely
+ say N here.
+
+config PHONE_IXJ_PCMCIA
+ tristate "QuickNet Internet LineJack/PhoneJack PCMCIA support"
+ depends on PHONE_IXJ && PCMCIA
+ help
+ Say Y here to configure in PCMCIA service support for the Quicknet
+ cards manufactured by Quicknet Technologies, Inc. This changes the
+ card initialization code to work with the card manager daemon.
+
+endif # PHONE
diff --git a/drivers/staging/telephony/Makefile b/drivers/staging/telephony/Makefile
new file mode 100644
index 000000000000..1206615d69e4
--- /dev/null
+++ b/drivers/staging/telephony/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for drivers/telephony
+#
+
+obj-$(CONFIG_PHONE) += phonedev.o
+obj-$(CONFIG_PHONE_IXJ) += ixj.o
+obj-$(CONFIG_PHONE_IXJ_PCMCIA) += ixj_pcmcia.o
diff --git a/drivers/staging/telephony/TODO b/drivers/staging/telephony/TODO
new file mode 100644
index 000000000000..d47dec3508d7
--- /dev/null
+++ b/drivers/staging/telephony/TODO
@@ -0,0 +1,10 @@
+TODO
+. Determine if the boards are still in use
+ and move this module back to drivers/telephony if necessary
+. Coding style cleanups
+
+Please send patches to Greg Kroah-Hartman <greg@kroah.com> and
+cc Joe Perches <joe@perches.com> if the module should be reactivated.
+
+If no module activity occurs before version 3.6 is released, this
+module should be removed.
diff --git a/drivers/staging/telephony/ixj-ver.h b/drivers/staging/telephony/ixj-ver.h
new file mode 100644
index 000000000000..2031ac6c888c
--- /dev/null
+++ b/drivers/staging/telephony/ixj-ver.h
@@ -0,0 +1,4 @@
+/* configuration management identifiers */
+#define IXJ_VER_MAJOR 1
+#define IXJ_VER_MINOR 0
+#define IXJ_BLD_VER 1
diff --git a/drivers/staging/telephony/ixj.c b/drivers/staging/telephony/ixj.c
new file mode 100644
index 000000000000..f96027921f60
--- /dev/null
+++ b/drivers/staging/telephony/ixj.c
@@ -0,0 +1,10571 @@
+/****************************************************************************
+ * ixj.c
+ *
+ * Device Driver for Quicknet Technologies, Inc.'s Telephony cards
+ * including the Internet PhoneJACK, Internet PhoneJACK Lite,
+ * Internet PhoneJACK PCI, Internet LineJACK, Internet PhoneCARD and
+ * SmartCABLE
+ *
+ * (c) Copyright 1999-2001 Quicknet Technologies, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Author: Ed Okerson, <eokerson@quicknet.net>
+ *
+ * Contributors: Greg Herlein, <gherlein@quicknet.net>
+ * David W. Erhart, <derhart@quicknet.net>
+ * John Sellers, <jsellers@quicknet.net>
+ * Mike Preston, <mpreston@quicknet.net>
+ *
+ * Fixes: David Huggins-Daines, <dhd@cepstral.com>
+ * Fabio Ferrari, <fabio.ferrari@digitro.com.br>
+ * Artis Kugevics, <artis@mt.lv>
+ * Daniele Bellucci, <bellucda@tiscali.it>
+ *
+ * More information about the hardware related to this driver can be found
+ * at our website: http://www.quicknet.net
+ *
+ * IN NO EVENT SHALL QUICKNET TECHNOLOGIES, INC. BE LIABLE TO ANY PARTY FOR
+ * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT
+ * OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF QUICKNET
+ * TECHNOLOGIES, INC. HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * QUICKNET TECHNOLOGIES, INC. SPECIFICALLY DISCLAIMS ANY WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
+ * AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
+ * ON AN "AS IS" BASIS, AND QUICKNET TECHNOLOGIES, INC. HAS NO OBLIGATION
+ * TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
+ *
+ ***************************************************************************/
+
+/*
+ * Revision 4.8 2003/07/09 19:39:00 Daniele Bellucci
+ * Audit some copy_*_user and minor cleanup.
+ *
+ * Revision 4.7 2001/08/13 06:19:33 craigs
+ * Added additional changes from Alan Cox and John Anderson for
+ * 2.2 to 2.4 cleanup and bounds checking
+ *
+ * Revision 4.6 2001/08/13 01:05:05 craigs
+ * Really fixed PHONE_QUERY_CODEC problem this time
+ *
+ * Revision 4.5 2001/08/13 00:11:03 craigs
+ * Fixed problem in handling of PHONE_QUERY_CODEC, thanks to Shane Anderson
+ *
+ * Revision 4.4 2001/08/07 07:58:12 craigs
+ * Changed back to three digit version numbers
+ * Added tagbuild target to allow automatic and easy tagging of versions
+ *
+ * Revision 4.3 2001/08/07 07:24:47 craigs
+ * Added ixj-ver.h to allow easy configuration management of driver
+ * Added display of version number in /prox/ixj
+ *
+ * Revision 4.2 2001/08/06 07:07:19 craigs
+ * Reverted IXJCTL_DSP_TYPE and IXJCTL_DSP_VERSION files to original
+ * behaviour of returning int rather than short *
+ *
+ * Revision 4.1 2001/08/05 00:17:37 craigs
+ * More changes for correct PCMCIA installation
+ * Start of changes for backward Linux compatibility
+ *
+ * Revision 4.0 2001/08/04 12:33:12 craigs
+ * New version using GNU autoconf
+ *
+ * Revision 3.105 2001/07/20 23:14:32 eokerson
+ * More work on CallerID generation when using ring cadences.
+ *
+ * Revision 3.104 2001/07/06 01:33:55 eokerson
+ * Some bugfixes from Robert Vojta <vojta@ipex.cz> and a few mods to the Makefile.
+ *
+ * Revision 3.103 2001/07/05 19:20:16 eokerson
+ * Updated HOWTO
+ * Changed mic gain to 30dB on Internet LineJACK mic/speaker port.
+ *
+ * Revision 3.102 2001/07/03 23:51:21 eokerson
+ * Un-mute mic on Internet LineJACK when in speakerphone mode.
+ *
+ * Revision 3.101 2001/07/02 19:26:56 eokerson
+ * Removed initialiazation of ixjdebug and ixj_convert_loaded so they will go in the .bss instead of the .data
+ *
+ * Revision 3.100 2001/07/02 19:18:27 eokerson
+ * Changed driver to make dynamic allocation possible. We now pass IXJ * between functions instead of array indexes.
+ * Fixed the way the POTS and PSTN ports interact during a PSTN call to allow local answering.
+ * Fixed speaker mode on Internet LineJACK.
+ *
+ * Revision 3.99 2001/05/09 14:11:16 eokerson
+ * Fixed kmalloc error in ixj_build_filter_cadence. Thanks David Chan <cat@waulogy.stanford.edu>.
+ *
+ * Revision 3.98 2001/05/08 19:55:33 eokerson
+ * Fixed POTS hookstate detection while it is connected to PSTN port.
+ *
+ * Revision 3.97 2001/05/08 00:01:04 eokerson
+ * Fixed kernel oops when sending caller ID data.
+ *
+ * Revision 3.96 2001/05/04 23:09:30 eokerson
+ * Now uses one kernel timer for each card, instead of one for the entire driver.
+ *
+ * Revision 3.95 2001/04/25 22:06:47 eokerson
+ * Fixed squawking at beginning of some G.723.1 calls.
+ *
+ * Revision 3.94 2001/04/03 23:42:00 eokerson
+ * Added linear volume ioctls
+ * Added raw filter load ioctl
+ *
+ * Revision 3.93 2001/02/27 01:00:06 eokerson
+ * Fixed blocking in CallerID.
+ * Reduced size of ixj structure for smaller driver footprint.
+ *
+ * Revision 3.92 2001/02/20 22:02:59 eokerson
+ * Fixed isapnp and pcmcia module compatibility for 2.4.x kernels.
+ * Improved PSTN ring detection.
+ * Fixed wink generation on POTS ports.
+ *
+ * Revision 3.91 2001/02/13 00:55:44 eokerson
+ * Turn AEC back on after changing frame sizes.
+ *
+ * Revision 3.90 2001/02/12 16:42:00 eokerson
+ * Added ALAW codec, thanks to Fabio Ferrari for the table based converters to make ALAW from ULAW.
+ *
+ * Revision 3.89 2001/02/12 15:41:16 eokerson
+ * Fix from Artis Kugevics - Tone gains were not being set correctly.
+ *
+ * Revision 3.88 2001/02/05 23:25:42 eokerson
+ * Fixed lockup bugs with deregister.
+ *
+ * Revision 3.87 2001/01/29 21:00:39 eokerson
+ * Fix from Fabio Ferrari <fabio.ferrari@digitro.com.br> to properly handle EAGAIN and EINTR during non-blocking write.
+ * Updated copyright date.
+ *
+ * Revision 3.86 2001/01/23 23:53:46 eokerson
+ * Fixes to G.729 compatibility.
+ *
+ * Revision 3.85 2001/01/23 21:30:36 eokerson
+ * Added verbage about cards supported.
+ * Removed commands that put the card in low power mode at some times that it should not be in low power mode.
+ *
+ * Revision 3.84 2001/01/22 23:32:10 eokerson
+ * Some bugfixes from David Huggins-Daines, <dhd@cepstral.com> and other cleanups.
+ *
+ * Revision 3.83 2001/01/19 14:51:41 eokerson
+ * Fixed ixj_WriteDSPCommand to decrement usage counter when command fails.
+ *
+ * Revision 3.82 2001/01/19 00:34:49 eokerson
+ * Added verbosity to write overlap errors.
+ *
+ * Revision 3.81 2001/01/18 23:56:54 eokerson
+ * Fixed PSTN line test functions.
+ *
+ * Revision 3.80 2001/01/18 22:29:27 eokerson
+ * Updated AEC/AGC values for different cards.
+ *
+ * Revision 3.79 2001/01/17 02:58:54 eokerson
+ * Fixed AEC reset after Caller ID.
+ * Fixed Codec lockup after Caller ID on Call Waiting when not using 30ms frames.
+ *
+ * Revision 3.78 2001/01/16 19:43:09 eokerson
+ * Added support for Linux 2.4.x kernels.
+ *
+ * Revision 3.77 2001/01/09 04:00:52 eokerson
+ * Linetest will now test the line, even if it has previously succeeded.
+ *
+ * Revision 3.76 2001/01/08 19:27:00 eokerson
+ * Fixed problem with standard cable on Internet PhoneCARD.
+ *
+ * Revision 3.75 2000/12/22 16:52:14 eokerson
+ * Modified to allow hookstate detection on the POTS port when the PSTN port is selected.
+ *
+ * Revision 3.74 2000/12/08 22:41:50 eokerson
+ * Added capability for G729B.
+ *
+ * Revision 3.73 2000/12/07 23:35:16 eokerson
+ * Added capability to have different ring pattern before CallerID data.
+ * Added hookstate checks in CallerID routines to stop FSK.
+ *
+ * Revision 3.72 2000/12/06 19:31:31 eokerson
+ * Modified signal behavior to only send one signal per event.
+ *
+ * Revision 3.71 2000/12/06 03:23:08 eokerson
+ * Fixed CallerID on Call Waiting.
+ *
+ * Revision 3.70 2000/12/04 21:29:37 eokerson
+ * Added checking to Smart Cable gain functions.
+ *
+ * Revision 3.69 2000/12/04 21:05:20 eokerson
+ * Changed ixjdebug levels.
+ * Added ioctls to change gains in Internet Phone CARD Smart Cable.
+ *
+ * Revision 3.68 2000/12/04 00:17:21 craigs
+ * Changed mixer voice gain to +6dB rather than 0dB
+ *
+ * Revision 3.67 2000/11/30 21:25:51 eokerson
+ * Fixed write signal errors.
+ *
+ * Revision 3.66 2000/11/29 22:42:44 eokerson
+ * Fixed PSTN ring detect problems.
+ *
+ * Revision 3.65 2000/11/29 07:31:55 craigs
+ * Added new 425Hz filter co-efficients
+ * Added card-specific DTMF prescaler initialisation
+ *
+ * Revision 3.64 2000/11/28 14:03:32 craigs
+ * Changed certain mixer initialisations to be 0dB rather than 12dB
+ * Added additional information to /proc/ixj
+ *
+ * Revision 3.63 2000/11/28 11:38:41 craigs
+ * Added display of AEC modes in AUTO and AGC mode
+ *
+ * Revision 3.62 2000/11/28 04:05:44 eokerson
+ * Improved PSTN ring detection routine.
+ *
+ * Revision 3.61 2000/11/27 21:53:12 eokerson
+ * Fixed flash detection.
+ *
+ * Revision 3.60 2000/11/27 15:57:29 eokerson
+ * More work on G.729 load routines.
+ *
+ * Revision 3.59 2000/11/25 21:55:12 eokerson
+ * Fixed errors in G.729 load routine.
+ *
+ * Revision 3.58 2000/11/25 04:08:29 eokerson
+ * Added board locks around G.729 and TS85 load routines.
+ *
+ * Revision 3.57 2000/11/24 05:35:17 craigs
+ * Added ability to retrieve mixer values on LineJACK
+ * Added complete initialisation of all mixer values at startup
+ * Fixed spelling mistake
+ *
+ * Revision 3.56 2000/11/23 02:52:11 robertj
+ * Added cvs change log keyword.
+ * Fixed bug in capabilities list when using G.729 module.
+ *
+ */
+
+#include "ixj-ver.h"
+
+#define PERFMON_STATS
+#define IXJDEBUG 0
+#define MAXRINGS 5
+
+#include <linux/module.h>
+
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/kernel.h> /* printk() */
+#include <linux/fs.h> /* everything... */
+#include <linux/errno.h> /* error codes */
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/mm.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/proc_fs.h>
+#include <linux/poll.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+
+#include <asm/io.h>
+#include <asm/uaccess.h>
+
+#include <linux/isapnp.h>
+
+#include "ixj.h"
+
+#define TYPE(inode) (iminor(inode) >> 4)
+#define NUM(inode) (iminor(inode) & 0xf)
+
+static DEFINE_MUTEX(ixj_mutex);
+static int ixjdebug;
+static int hertz = HZ;
+static int samplerate = 100;
+
+module_param(ixjdebug, int, 0);
+
+static DEFINE_PCI_DEVICE_TABLE(ixj_pci_tbl) = {
+ { PCI_VENDOR_ID_QUICKNET, PCI_DEVICE_ID_QUICKNET_XJ,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ { }
+};
+MODULE_DEVICE_TABLE(pci, ixj_pci_tbl);
+
+/************************************************************************
+*
+* ixjdebug meanings are now bit mapped instead of level based
+* Values can be or'ed together to turn on multiple messages
+*
+* bit 0 (0x0001) = any failure
+* bit 1 (0x0002) = general messages
+* bit 2 (0x0004) = POTS ringing related
+* bit 3 (0x0008) = PSTN events
+* bit 4 (0x0010) = PSTN Cadence state details
+* bit 5 (0x0020) = Tone detection triggers
+* bit 6 (0x0040) = Tone detection cadence details
+* bit 7 (0x0080) = ioctl tracking
+* bit 8 (0x0100) = signal tracking
+* bit 9 (0x0200) = CallerID generation details
+*
+************************************************************************/
+
+#ifdef IXJ_DYN_ALLOC
+
+static IXJ *ixj[IXJMAX];
+#define get_ixj(b) ixj[(b)]
+
+/*
+ * Allocate a free IXJ device
+ */
+
+static IXJ *ixj_alloc()
+{
+ for(cnt=0; cnt<IXJMAX; cnt++)
+ {
+ if(ixj[cnt] == NULL || !ixj[cnt]->DSPbase)
+ {
+ j = kmalloc(sizeof(IXJ), GFP_KERNEL);
+ if (j == NULL)
+ return NULL;
+ ixj[cnt] = j;
+ return j;
+ }
+ }
+ return NULL;
+}
+
+static void ixj_fsk_free(IXJ *j)
+{
+ kfree(j->fskdata);
+ j->fskdata = NULL;
+}
+
+static void ixj_fsk_alloc(IXJ *j)
+{
+ if(!j->fskdata) {
+ j->fskdata = kmalloc(8000, GFP_KERNEL);
+ if (!j->fskdata) {
+ if(ixjdebug & 0x0200) {
+ printk("IXJ phone%d - allocate failed\n", j->board);
+ }
+ return;
+ } else {
+ j->fsksize = 8000;
+ if(ixjdebug & 0x0200) {
+ printk("IXJ phone%d - allocate succeeded\n", j->board);
+ }
+ }
+ }
+}
+
+#else
+
+static IXJ ixj[IXJMAX];
+#define get_ixj(b) (&ixj[(b)])
+
+/*
+ * Allocate a free IXJ device
+ */
+
+static IXJ *ixj_alloc(void)
+{
+ int cnt;
+ for(cnt=0; cnt<IXJMAX; cnt++) {
+ if(!ixj[cnt].DSPbase)
+ return &ixj[cnt];
+ }
+ return NULL;
+}
+
+static inline void ixj_fsk_free(IXJ *j) {;}
+
+static inline void ixj_fsk_alloc(IXJ *j)
+{
+ j->fsksize = 8000;
+}
+
+#endif
+
+#ifdef PERFMON_STATS
+#define ixj_perfmon(x) ((x)++)
+#else
+#define ixj_perfmon(x) do { } while(0)
+#endif
+
+static int ixj_convert_loaded;
+
+static int ixj_WriteDSPCommand(unsigned short, IXJ *j);
+
+/************************************************************************
+*
+* These are function definitions to allow external modules to register
+* enhanced functionality call backs.
+*
+************************************************************************/
+
+static int Stub(IXJ * J, unsigned long arg)
+{
+ return 0;
+}
+
+static IXJ_REGFUNC ixj_PreRead = &Stub;
+static IXJ_REGFUNC ixj_PostRead = &Stub;
+static IXJ_REGFUNC ixj_PreWrite = &Stub;
+static IXJ_REGFUNC ixj_PostWrite = &Stub;
+
+static void ixj_read_frame(IXJ *j);
+static void ixj_write_frame(IXJ *j);
+static void ixj_init_timer(IXJ *j);
+static void ixj_add_timer(IXJ * j);
+static void ixj_timeout(unsigned long ptr);
+static int read_filters(IXJ *j);
+static int LineMonitor(IXJ *j);
+static int ixj_fasync(int fd, struct file *, int mode);
+static int ixj_set_port(IXJ *j, int arg);
+static int ixj_set_pots(IXJ *j, int arg);
+static int ixj_hookstate(IXJ *j);
+static int ixj_record_start(IXJ *j);
+static void ixj_record_stop(IXJ *j);
+static void set_rec_volume(IXJ *j, int volume);
+static int get_rec_volume(IXJ *j);
+static int set_rec_codec(IXJ *j, int rate);
+static void ixj_vad(IXJ *j, int arg);
+static int ixj_play_start(IXJ *j);
+static void ixj_play_stop(IXJ *j);
+static int ixj_set_tone_on(unsigned short arg, IXJ *j);
+static int ixj_set_tone_off(unsigned short, IXJ *j);
+static int ixj_play_tone(IXJ *j, char tone);
+static void ixj_aec_start(IXJ *j, int level);
+static int idle(IXJ *j);
+static void ixj_ring_on(IXJ *j);
+static void ixj_ring_off(IXJ *j);
+static void aec_stop(IXJ *j);
+static void ixj_ringback(IXJ *j);
+static void ixj_busytone(IXJ *j);
+static void ixj_dialtone(IXJ *j);
+static void ixj_cpt_stop(IXJ *j);
+static char daa_int_read(IXJ *j);
+static char daa_CR_read(IXJ *j, int cr);
+static int daa_set_mode(IXJ *j, int mode);
+static int ixj_linetest(IXJ *j);
+static int ixj_daa_write(IXJ *j);
+static int ixj_daa_cid_read(IXJ *j);
+static void DAA_Coeff_US(IXJ *j);
+static void DAA_Coeff_UK(IXJ *j);
+static void DAA_Coeff_France(IXJ *j);
+static void DAA_Coeff_Germany(IXJ *j);
+static void DAA_Coeff_Australia(IXJ *j);
+static void DAA_Coeff_Japan(IXJ *j);
+static int ixj_init_filter(IXJ *j, IXJ_FILTER * jf);
+static int ixj_init_filter_raw(IXJ *j, IXJ_FILTER_RAW * jfr);
+static int ixj_init_tone(IXJ *j, IXJ_TONE * ti);
+static int ixj_build_cadence(IXJ *j, IXJ_CADENCE __user * cp);
+static int ixj_build_filter_cadence(IXJ *j, IXJ_FILTER_CADENCE __user * cp);
+/* Serial Control Interface funtions */
+static int SCI_Control(IXJ *j, int control);
+static int SCI_Prepare(IXJ *j);
+static int SCI_WaitHighSCI(IXJ *j);
+static int SCI_WaitLowSCI(IXJ *j);
+static DWORD PCIEE_GetSerialNumber(WORD wAddress);
+static int ixj_PCcontrol_wait(IXJ *j);
+static void ixj_pre_cid(IXJ *j);
+static void ixj_write_cid(IXJ *j);
+static void ixj_write_cid_bit(IXJ *j, int bit);
+static int set_base_frame(IXJ *j, int size);
+static int set_play_codec(IXJ *j, int rate);
+static void set_rec_depth(IXJ *j, int depth);
+static int ixj_mixer(long val, IXJ *j);
+
+/************************************************************************
+CT8020/CT8021 Host Programmers Model
+Host address Function Access
+DSPbase +
+0-1 Aux Software Status Register (reserved) Read Only
+2-3 Software Status Register Read Only
+4-5 Aux Software Control Register (reserved) Read Write
+6-7 Software Control Register Read Write
+8-9 Hardware Status Register Read Only
+A-B Hardware Control Register Read Write
+C-D Host Transmit (Write) Data Buffer Access Port (buffer input)Write Only
+E-F Host Receive (Read) Data Buffer Access Port (buffer input) Read Only
+************************************************************************/
+
+static inline void ixj_read_HSR(IXJ *j)
+{
+ j->hsr.bytes.low = inb_p(j->DSPbase + 8);
+ j->hsr.bytes.high = inb_p(j->DSPbase + 9);
+}
+
+static inline int IsControlReady(IXJ *j)
+{
+ ixj_read_HSR(j);
+ return j->hsr.bits.controlrdy ? 1 : 0;
+}
+
+static inline int IsPCControlReady(IXJ *j)
+{
+ j->pccr1.byte = inb_p(j->XILINXbase + 3);
+ return j->pccr1.bits.crr ? 1 : 0;
+}
+
+static inline int IsStatusReady(IXJ *j)
+{
+ ixj_read_HSR(j);
+ return j->hsr.bits.statusrdy ? 1 : 0;
+}
+
+static inline int IsRxReady(IXJ *j)
+{
+ ixj_read_HSR(j);
+ ixj_perfmon(j->rxreadycheck);
+ return j->hsr.bits.rxrdy ? 1 : 0;
+}
+
+static inline int IsTxReady(IXJ *j)
+{
+ ixj_read_HSR(j);
+ ixj_perfmon(j->txreadycheck);
+ return j->hsr.bits.txrdy ? 1 : 0;
+}
+
+static inline void set_play_volume(IXJ *j, int volume)
+{
+ if (ixjdebug & 0x0002)
+ printk(KERN_INFO "IXJ: /dev/phone%d Setting Play Volume to 0x%4.4x\n", j->board, volume);
+ ixj_WriteDSPCommand(0xCF02, j);
+ ixj_WriteDSPCommand(volume, j);
+}
+
+static int set_play_volume_linear(IXJ *j, int volume)
+{
+ int newvolume, dspplaymax;
+
+ if (ixjdebug & 0x0002)
+ printk(KERN_INFO "IXJ: /dev/phone %d Setting Linear Play Volume to 0x%4.4x\n", j->board, volume);
+ if(volume > 100 || volume < 0) {
+ return -1;
+ }
+
+ /* This should normalize the perceived volumes between the different cards caused by differences in the hardware */
+ switch (j->cardtype) {
+ case QTI_PHONEJACK:
+ dspplaymax = 0x380;
+ break;
+ case QTI_LINEJACK:
+ if(j->port == PORT_PSTN) {
+ dspplaymax = 0x48;
+ } else {
+ dspplaymax = 0x100;
+ }
+ break;
+ case QTI_PHONEJACK_LITE:
+ dspplaymax = 0x380;
+ break;
+ case QTI_PHONEJACK_PCI:
+ dspplaymax = 0x6C;
+ break;
+ case QTI_PHONECARD:
+ dspplaymax = 0x50;
+ break;
+ default:
+ return -1;
+ }
+ newvolume = (dspplaymax * volume) / 100;
+ set_play_volume(j, newvolume);
+ return 0;
+}
+
+static inline void set_play_depth(IXJ *j, int depth)
+{
+ if (depth > 60)
+ depth = 60;
+ if (depth < 0)
+ depth = 0;
+ ixj_WriteDSPCommand(0x5280 + depth, j);
+}
+
+static inline int get_play_volume(IXJ *j)
+{
+ ixj_WriteDSPCommand(0xCF00, j);
+ return j->ssr.high << 8 | j->ssr.low;
+}
+
+static int get_play_volume_linear(IXJ *j)
+{
+ int volume, newvolume, dspplaymax;
+
+ /* This should normalize the perceived volumes between the different cards caused by differences in the hardware */
+ switch (j->cardtype) {
+ case QTI_PHONEJACK:
+ dspplaymax = 0x380;
+ break;
+ case QTI_LINEJACK:
+ if(j->port == PORT_PSTN) {
+ dspplaymax = 0x48;
+ } else {
+ dspplaymax = 0x100;
+ }
+ break;
+ case QTI_PHONEJACK_LITE:
+ dspplaymax = 0x380;
+ break;
+ case QTI_PHONEJACK_PCI:
+ dspplaymax = 0x6C;
+ break;
+ case QTI_PHONECARD:
+ dspplaymax = 100;
+ break;
+ default:
+ return -1;
+ }
+ volume = get_play_volume(j);
+ newvolume = (volume * 100) / dspplaymax;
+ if(newvolume > 100)
+ newvolume = 100;
+ return newvolume;
+}
+
+static inline BYTE SLIC_GetState(IXJ *j)
+{
+ if (j->cardtype == QTI_PHONECARD) {
+ j->pccr1.byte = 0;
+ j->psccr.bits.dev = 3;
+ j->psccr.bits.rw = 1;
+ outw_p(j->psccr.byte << 8, j->XILINXbase + 0x00);
+ ixj_PCcontrol_wait(j);
+ j->pslic.byte = inw_p(j->XILINXbase + 0x00) & 0xFF;
+ ixj_PCcontrol_wait(j);
+ if (j->pslic.bits.powerdown)
+ return PLD_SLIC_STATE_OC;
+ else if (!j->pslic.bits.ring0 && !j->pslic.bits.ring1)
+ return PLD_SLIC_STATE_ACTIVE;
+ else
+ return PLD_SLIC_STATE_RINGING;
+ } else {
+ j->pld_slicr.byte = inb_p(j->XILINXbase + 0x01);
+ }
+ return j->pld_slicr.bits.state;
+}
+
+static bool SLIC_SetState(BYTE byState, IXJ *j)
+{
+ bool fRetVal = false;
+
+ if (j->cardtype == QTI_PHONECARD) {
+ if (j->flags.pcmciasct) {
+ switch (byState) {
+ case PLD_SLIC_STATE_TIPOPEN:
+ case PLD_SLIC_STATE_OC:
+ j->pslic.bits.powerdown = 1;
+ j->pslic.bits.ring0 = j->pslic.bits.ring1 = 0;
+ fRetVal = true;
+ break;
+ case PLD_SLIC_STATE_RINGING:
+ if (j->readers || j->writers) {
+ j->pslic.bits.powerdown = 0;
+ j->pslic.bits.ring0 = 1;
+ j->pslic.bits.ring1 = 0;
+ fRetVal = true;
+ }
+ break;
+ case PLD_SLIC_STATE_OHT: /* On-hook transmit */
+
+ case PLD_SLIC_STATE_STANDBY:
+ case PLD_SLIC_STATE_ACTIVE:
+ if (j->readers || j->writers) {
+ j->pslic.bits.powerdown = 0;
+ } else {
+ j->pslic.bits.powerdown = 1;
+ }
+ j->pslic.bits.ring0 = j->pslic.bits.ring1 = 0;
+ fRetVal = true;
+ break;
+ case PLD_SLIC_STATE_APR: /* Active polarity reversal */
+
+ case PLD_SLIC_STATE_OHTPR: /* OHT polarity reversal */
+
+ default:
+ fRetVal = false;
+ break;
+ }
+ j->psccr.bits.dev = 3;
+ j->psccr.bits.rw = 0;
+ outw_p(j->psccr.byte << 8 | j->pslic.byte, j->XILINXbase + 0x00);
+ ixj_PCcontrol_wait(j);
+ }
+ } else {
+ /* Set the C1, C2, C3 & B2EN signals. */
+ switch (byState) {
+ case PLD_SLIC_STATE_OC:
+ j->pld_slicw.bits.c1 = 0;
+ j->pld_slicw.bits.c2 = 0;
+ j->pld_slicw.bits.c3 = 0;
+ j->pld_slicw.bits.b2en = 0;
+ outb_p(j->pld_slicw.byte, j->XILINXbase + 0x01);
+ fRetVal = true;
+ break;
+ case PLD_SLIC_STATE_RINGING:
+ j->pld_slicw.bits.c1 = 1;
+ j->pld_slicw.bits.c2 = 0;
+ j->pld_slicw.bits.c3 = 0;
+ j->pld_slicw.bits.b2en = 1;
+ outb_p(j->pld_slicw.byte, j->XILINXbase + 0x01);
+ fRetVal = true;
+ break;
+ case PLD_SLIC_STATE_ACTIVE:
+ j->pld_slicw.bits.c1 = 0;
+ j->pld_slicw.bits.c2 = 1;
+ j->pld_slicw.bits.c3 = 0;
+ j->pld_slicw.bits.b2en = 0;
+ outb_p(j->pld_slicw.byte, j->XILINXbase + 0x01);
+ fRetVal = true;
+ break;
+ case PLD_SLIC_STATE_OHT: /* On-hook transmit */
+
+ j->pld_slicw.bits.c1 = 1;
+ j->pld_slicw.bits.c2 = 1;
+ j->pld_slicw.bits.c3 = 0;
+ j->pld_slicw.bits.b2en = 0;
+ outb_p(j->pld_slicw.byte, j->XILINXbase + 0x01);
+ fRetVal = true;
+ break;
+ case PLD_SLIC_STATE_TIPOPEN:
+ j->pld_slicw.bits.c1 = 0;
+ j->pld_slicw.bits.c2 = 0;
+ j->pld_slicw.bits.c3 = 1;
+ j->pld_slicw.bits.b2en = 0;
+ outb_p(j->pld_slicw.byte, j->XILINXbase + 0x01);
+ fRetVal = true;
+ break;
+ case PLD_SLIC_STATE_STANDBY:
+ j->pld_slicw.bits.c1 = 1;
+ j->pld_slicw.bits.c2 = 0;
+ j->pld_slicw.bits.c3 = 1;
+ j->pld_slicw.bits.b2en = 1;
+ outb_p(j->pld_slicw.byte, j->XILINXbase + 0x01);
+ fRetVal = true;
+ break;
+ case PLD_SLIC_STATE_APR: /* Active polarity reversal */
+
+ j->pld_slicw.bits.c1 = 0;
+ j->pld_slicw.bits.c2 = 1;
+ j->pld_slicw.bits.c3 = 1;
+ j->pld_slicw.bits.b2en = 0;
+ outb_p(j->pld_slicw.byte, j->XILINXbase + 0x01);
+ fRetVal = true;
+ break;
+ case PLD_SLIC_STATE_OHTPR: /* OHT polarity reversal */
+
+ j->pld_slicw.bits.c1 = 1;
+ j->pld_slicw.bits.c2 = 1;
+ j->pld_slicw.bits.c3 = 1;
+ j->pld_slicw.bits.b2en = 0;
+ outb_p(j->pld_slicw.byte, j->XILINXbase + 0x01);
+ fRetVal = true;
+ break;
+ default:
+ fRetVal = false;
+ break;
+ }
+ }
+
+ return fRetVal;
+}
+
+static int ixj_wink(IXJ *j)
+{
+ BYTE slicnow;
+
+ slicnow = SLIC_GetState(j);
+
+ j->pots_winkstart = jiffies;
+ SLIC_SetState(PLD_SLIC_STATE_OC, j);
+
+ msleep(jiffies_to_msecs(j->winktime));
+
+ SLIC_SetState(slicnow, j);
+ return 0;
+}
+
+static void ixj_init_timer(IXJ *j)
+{
+ init_timer(&j->timer);
+ j->timer.function = ixj_timeout;
+ j->timer.data = (unsigned long)j;
+}
+
+static void ixj_add_timer(IXJ *j)
+{
+ j->timer.expires = jiffies + (hertz / samplerate);
+ add_timer(&j->timer);
+}
+
+static void ixj_tone_timeout(IXJ *j)
+{
+ IXJ_TONE ti;
+
+ j->tone_state++;
+ if (j->tone_state == 3) {
+ j->tone_state = 0;
+ if (j->cadence_t) {
+ j->tone_cadence_state++;
+ if (j->tone_cadence_state >= j->cadence_t->elements_used) {
+ switch (j->cadence_t->termination) {
+ case PLAY_ONCE:
+ ixj_cpt_stop(j);
+ break;
+ case REPEAT_LAST_ELEMENT:
+ j->tone_cadence_state--;
+ ixj_play_tone(j, j->cadence_t->ce[j->tone_cadence_state].index);
+ break;
+ case REPEAT_ALL:
+ j->tone_cadence_state = 0;
+ if (j->cadence_t->ce[j->tone_cadence_state].freq0) {
+ ti.tone_index = j->cadence_t->ce[j->tone_cadence_state].index;
+ ti.freq0 = j->cadence_t->ce[j->tone_cadence_state].freq0;
+ ti.gain0 = j->cadence_t->ce[j->tone_cadence_state].gain0;
+ ti.freq1 = j->cadence_t->ce[j->tone_cadence_state].freq1;
+ ti.gain1 = j->cadence_t->ce[j->tone_cadence_state].gain1;
+ ixj_init_tone(j, &ti);
+ }
+ ixj_set_tone_on(j->cadence_t->ce[0].tone_on_time, j);
+ ixj_set_tone_off(j->cadence_t->ce[0].tone_off_time, j);
+ ixj_play_tone(j, j->cadence_t->ce[0].index);
+ break;
+ }
+ } else {
+ if (j->cadence_t->ce[j->tone_cadence_state].gain0) {
+ ti.tone_index = j->cadence_t->ce[j->tone_cadence_state].index;
+ ti.freq0 = j->cadence_t->ce[j->tone_cadence_state].freq0;
+ ti.gain0 = j->cadence_t->ce[j->tone_cadence_state].gain0;
+ ti.freq1 = j->cadence_t->ce[j->tone_cadence_state].freq1;
+ ti.gain1 = j->cadence_t->ce[j->tone_cadence_state].gain1;
+ ixj_init_tone(j, &ti);
+ }
+ ixj_set_tone_on(j->cadence_t->ce[j->tone_cadence_state].tone_on_time, j);
+ ixj_set_tone_off(j->cadence_t->ce[j->tone_cadence_state].tone_off_time, j);
+ ixj_play_tone(j, j->cadence_t->ce[j->tone_cadence_state].index);
+ }
+ }
+ }
+}
+
+static inline void ixj_kill_fasync(IXJ *j, IXJ_SIGEVENT event, int dir)
+{
+ if(j->ixj_signals[event]) {
+ if(ixjdebug & 0x0100)
+ printk("Sending signal for event %d\n", event);
+ /* Send apps notice of change */
+ /* see config.h for macro definition */
+ kill_fasync(&(j->async_queue), j->ixj_signals[event], dir);
+ }
+}
+
+static void ixj_pstn_state(IXJ *j)
+{
+ int var;
+ union XOPXR0 XR0, daaint;
+
+ var = 10;
+
+ XR0.reg = j->m_DAAShadowRegs.XOP_REGS.XOP.xr0.reg;
+ daaint.reg = 0;
+ XR0.bitreg.RMR = j->m_DAAShadowRegs.SOP_REGS.SOP.cr1.bitreg.RMR;
+
+ j->pld_scrr.byte = inb_p(j->XILINXbase);
+ if (j->pld_scrr.bits.daaflag) {
+ daa_int_read(j);
+ if(j->m_DAAShadowRegs.XOP_REGS.XOP.xr0.bitreg.RING) {
+ if(time_after(jiffies, j->pstn_sleeptil) && !(j->flags.pots_pstn && j->hookstate)) {
+ daaint.bitreg.RING = 1;
+ if(ixjdebug & 0x0008) {
+ printk(KERN_INFO "IXJ DAA Ring Interrupt /dev/phone%d at %ld\n", j->board, jiffies);
+ }
+ } else {
+ daa_set_mode(j, SOP_PU_RESET);
+ }
+ }
+ if(j->m_DAAShadowRegs.XOP_REGS.XOP.xr0.bitreg.Caller_ID) {
+ daaint.bitreg.Caller_ID = 1;
+ j->pstn_cid_intr = 1;
+ j->pstn_cid_received = jiffies;
+ if(ixjdebug & 0x0008) {
+ printk(KERN_INFO "IXJ DAA Caller_ID Interrupt /dev/phone%d at %ld\n", j->board, jiffies);
+ }
+ }
+ if(j->m_DAAShadowRegs.XOP_REGS.XOP.xr0.bitreg.Cadence) {
+ daaint.bitreg.Cadence = 1;
+ if(ixjdebug & 0x0008) {
+ printk(KERN_INFO "IXJ DAA Cadence Interrupt /dev/phone%d at %ld\n", j->board, jiffies);
+ }
+ }
+ if(j->m_DAAShadowRegs.XOP_REGS.XOP.xr0.bitreg.VDD_OK != XR0.bitreg.VDD_OK) {
+ daaint.bitreg.VDD_OK = 1;
+ daaint.bitreg.SI_0 = j->m_DAAShadowRegs.XOP_REGS.XOP.xr0.bitreg.VDD_OK;
+ }
+ }
+ daa_CR_read(j, 1);
+ if(j->m_DAAShadowRegs.SOP_REGS.SOP.cr1.bitreg.RMR != XR0.bitreg.RMR && time_after(jiffies, j->pstn_sleeptil) && !(j->flags.pots_pstn && j->hookstate)) {
+ daaint.bitreg.RMR = 1;
+ daaint.bitreg.SI_1 = j->m_DAAShadowRegs.SOP_REGS.SOP.cr1.bitreg.RMR;
+ if(ixjdebug & 0x0008) {
+ printk(KERN_INFO "IXJ DAA RMR /dev/phone%d was %s for %ld\n", j->board, XR0.bitreg.RMR?"on":"off", jiffies - j->pstn_last_rmr);
+ }
+ j->pstn_prev_rmr = j->pstn_last_rmr;
+ j->pstn_last_rmr = jiffies;
+ }
+ switch(j->daa_mode) {
+ case SOP_PU_SLEEP:
+ if (daaint.bitreg.RING) {
+ if (!j->flags.pstn_ringing) {
+ if (j->daa_mode != SOP_PU_RINGING) {
+ j->pstn_ring_int = jiffies;
+ daa_set_mode(j, SOP_PU_RINGING);
+ }
+ }
+ }
+ break;
+ case SOP_PU_RINGING:
+ if (daaint.bitreg.RMR) {
+ if (ixjdebug & 0x0008) {
+ printk(KERN_INFO "IXJ Ring Cadence a state = %d /dev/phone%d at %ld\n", j->cadence_f[4].state, j->board, jiffies);
+ }
+ if (daaint.bitreg.SI_1) { /* Rising edge of RMR */
+ j->flags.pstn_rmr = 1;
+ j->pstn_ring_start = jiffies;
+ j->pstn_ring_stop = 0;
+ j->ex.bits.pstn_ring = 0;
+ if (j->cadence_f[4].state == 0) {
+ j->cadence_f[4].state = 1;
+ j->cadence_f[4].on1min = jiffies + (long)((j->cadence_f[4].on1 * hertz * (100 - var)) / 10000);
+ j->cadence_f[4].on1dot = jiffies + (long)((j->cadence_f[4].on1 * hertz * (100)) / 10000);
+ j->cadence_f[4].on1max = jiffies + (long)((j->cadence_f[4].on1 * hertz * (100 + var)) / 10000);
+ } else if (j->cadence_f[4].state == 2) {
+ if((time_after(jiffies, j->cadence_f[4].off1min) &&
+ time_before(jiffies, j->cadence_f[4].off1max))) {
+ if (j->cadence_f[4].on2) {
+ j->cadence_f[4].state = 3;
+ j->cadence_f[4].on2min = jiffies + (long)((j->cadence_f[4].on2 * (hertz * (100 - var)) / 10000));
+ j->cadence_f[4].on2dot = jiffies + (long)((j->cadence_f[4].on2 * (hertz * (100)) / 10000));
+ j->cadence_f[4].on2max = jiffies + (long)((j->cadence_f[4].on2 * (hertz * (100 + var)) / 10000));
+ } else {
+ j->cadence_f[4].state = 7;
+ }
+ } else {
+ if (ixjdebug & 0x0008) {
+ printk(KERN_INFO "IXJ Ring Cadence fail state = %d /dev/phone%d at %ld should be %d\n",
+ j->cadence_f[4].state, j->board, jiffies - j->pstn_prev_rmr,
+ j->cadence_f[4].off1);
+ }
+ j->cadence_f[4].state = 0;
+ }
+ } else if (j->cadence_f[4].state == 4) {
+ if((time_after(jiffies, j->cadence_f[4].off2min) &&
+ time_before(jiffies, j->cadence_f[4].off2max))) {
+ if (j->cadence_f[4].on3) {
+ j->cadence_f[4].state = 5;
+ j->cadence_f[4].on3min = jiffies + (long)((j->cadence_f[4].on3 * (hertz * (100 - var)) / 10000));
+ j->cadence_f[4].on3dot = jiffies + (long)((j->cadence_f[4].on3 * (hertz * (100)) / 10000));
+ j->cadence_f[4].on3max = jiffies + (long)((j->cadence_f[4].on3 * (hertz * (100 + var)) / 10000));
+ } else {
+ j->cadence_f[4].state = 7;
+ }
+ } else {
+ if (ixjdebug & 0x0008) {
+ printk(KERN_INFO "IXJ Ring Cadence fail state = %d /dev/phone%d at %ld should be %d\n",
+ j->cadence_f[4].state, j->board, jiffies - j->pstn_prev_rmr,
+ j->cadence_f[4].off2);
+ }
+ j->cadence_f[4].state = 0;
+ }
+ } else if (j->cadence_f[4].state == 6) {
+ if((time_after(jiffies, j->cadence_f[4].off3min) &&
+ time_before(jiffies, j->cadence_f[4].off3max))) {
+ j->cadence_f[4].state = 7;
+ } else {
+ if (ixjdebug & 0x0008) {
+ printk(KERN_INFO "IXJ Ring Cadence fail state = %d /dev/phone%d at %ld should be %d\n",
+ j->cadence_f[4].state, j->board, jiffies - j->pstn_prev_rmr,
+ j->cadence_f[4].off3);
+ }
+ j->cadence_f[4].state = 0;
+ }
+ } else {
+ j->cadence_f[4].state = 0;
+ }
+ } else { /* Falling edge of RMR */
+ j->pstn_ring_start = 0;
+ j->pstn_ring_stop = jiffies;
+ if (j->cadence_f[4].state == 1) {
+ if(!j->cadence_f[4].on1) {
+ j->cadence_f[4].state = 7;
+ } else if((time_after(jiffies, j->cadence_f[4].on1min) &&
+ time_before(jiffies, j->cadence_f[4].on1max))) {
+ if (j->cadence_f[4].off1) {
+ j->cadence_f[4].state = 2;
+ j->cadence_f[4].off1min = jiffies + (long)((j->cadence_f[4].off1 * (hertz * (100 - var)) / 10000));
+ j->cadence_f[4].off1dot = jiffies + (long)((j->cadence_f[4].off1 * (hertz * (100)) / 10000));
+ j->cadence_f[4].off1max = jiffies + (long)((j->cadence_f[4].off1 * (hertz * (100 + var)) / 10000));
+ } else {
+ j->cadence_f[4].state = 7;
+ }
+ } else {
+ if (ixjdebug & 0x0008) {
+ printk(KERN_INFO "IXJ Ring Cadence fail state = %d /dev/phone%d at %ld should be %d\n",
+ j->cadence_f[4].state, j->board, jiffies - j->pstn_prev_rmr,
+ j->cadence_f[4].on1);
+ }
+ j->cadence_f[4].state = 0;
+ }
+ } else if (j->cadence_f[4].state == 3) {
+ if((time_after(jiffies, j->cadence_f[4].on2min) &&
+ time_before(jiffies, j->cadence_f[4].on2max))) {
+ if (j->cadence_f[4].off2) {
+ j->cadence_f[4].state = 4;
+ j->cadence_f[4].off2min = jiffies + (long)((j->cadence_f[4].off2 * (hertz * (100 - var)) / 10000));
+ j->cadence_f[4].off2dot = jiffies + (long)((j->cadence_f[4].off2 * (hertz * (100)) / 10000));
+ j->cadence_f[4].off2max = jiffies + (long)((j->cadence_f[4].off2 * (hertz * (100 + var)) / 10000));
+ } else {
+ j->cadence_f[4].state = 7;
+ }
+ } else {
+ if (ixjdebug & 0x0008) {
+ printk(KERN_INFO "IXJ Ring Cadence fail state = %d /dev/phone%d at %ld should be %d\n",
+ j->cadence_f[4].state, j->board, jiffies - j->pstn_prev_rmr,
+ j->cadence_f[4].on2);
+ }
+ j->cadence_f[4].state = 0;
+ }
+ } else if (j->cadence_f[4].state == 5) {
+ if((time_after(jiffies, j->cadence_f[4].on3min) &&
+ time_before(jiffies, j->cadence_f[4].on3max))) {
+ if (j->cadence_f[4].off3) {
+ j->cadence_f[4].state = 6;
+ j->cadence_f[4].off3min = jiffies + (long)((j->cadence_f[4].off3 * (hertz * (100 - var)) / 10000));
+ j->cadence_f[4].off3dot = jiffies + (long)((j->cadence_f[4].off3 * (hertz * (100)) / 10000));
+ j->cadence_f[4].off3max = jiffies + (long)((j->cadence_f[4].off3 * (hertz * (100 + var)) / 10000));
+ } else {
+ j->cadence_f[4].state = 7;
+ }
+ } else {
+ j->cadence_f[4].state = 0;
+ }
+ } else {
+ if (ixjdebug & 0x0008) {
+ printk(KERN_INFO "IXJ Ring Cadence fail state = %d /dev/phone%d at %ld should be %d\n",
+ j->cadence_f[4].state, j->board, jiffies - j->pstn_prev_rmr,
+ j->cadence_f[4].on3);
+ }
+ j->cadence_f[4].state = 0;
+ }
+ }
+ if (ixjdebug & 0x0010) {
+ printk(KERN_INFO "IXJ Ring Cadence b state = %d /dev/phone%d at %ld\n", j->cadence_f[4].state, j->board, jiffies);
+ }
+ if (ixjdebug & 0x0010) {
+ switch(j->cadence_f[4].state) {
+ case 1:
+ printk(KERN_INFO "IXJ /dev/phone%d Next Ring Cadence state at %u min %ld - %ld - max %ld\n", j->board,
+ j->cadence_f[4].on1, j->cadence_f[4].on1min, j->cadence_f[4].on1dot, j->cadence_f[4].on1max);
+ break;
+ case 2:
+ printk(KERN_INFO "IXJ /dev/phone%d Next Ring Cadence state at %u min %ld - %ld - max %ld\n", j->board,
+ j->cadence_f[4].off1, j->cadence_f[4].off1min, j->cadence_f[4].off1dot, j->cadence_f[4].off1max);
+ break;
+ case 3:
+ printk(KERN_INFO "IXJ /dev/phone%d Next Ring Cadence state at %u min %ld - %ld - max %ld\n", j->board,
+ j->cadence_f[4].on2, j->cadence_f[4].on2min, j->cadence_f[4].on2dot, j->cadence_f[4].on2max);
+ break;
+ case 4:
+ printk(KERN_INFO "IXJ /dev/phone%d Next Ring Cadence state at %u min %ld - %ld - max %ld\n", j->board,
+ j->cadence_f[4].off2, j->cadence_f[4].off2min, j->cadence_f[4].off2dot, j->cadence_f[4].off2max);
+ break;
+ case 5:
+ printk(KERN_INFO "IXJ /dev/phone%d Next Ring Cadence state at %u min %ld - %ld - max %ld\n", j->board,
+ j->cadence_f[4].on3, j->cadence_f[4].on3min, j->cadence_f[4].on3dot, j->cadence_f[4].on3max);
+ break;
+ case 6:
+ printk(KERN_INFO "IXJ /dev/phone%d Next Ring Cadence state at %u min %ld - %ld - max %ld\n", j->board,
+ j->cadence_f[4].off3, j->cadence_f[4].off3min, j->cadence_f[4].off3dot, j->cadence_f[4].off3max);
+ break;
+ }
+ }
+ }
+ if (j->cadence_f[4].state == 7) {
+ j->cadence_f[4].state = 0;
+ j->pstn_ring_stop = jiffies;
+ j->ex.bits.pstn_ring = 1;
+ ixj_kill_fasync(j, SIG_PSTN_RING, POLL_IN);
+ if(ixjdebug & 0x0008) {
+ printk(KERN_INFO "IXJ Ring int set /dev/phone%d at %ld\n", j->board, jiffies);
+ }
+ }
+ if((j->pstn_ring_int != 0 && time_after(jiffies, j->pstn_ring_int + (hertz * 5)) && !j->flags.pstn_rmr) ||
+ (j->pstn_ring_stop != 0 && time_after(jiffies, j->pstn_ring_stop + (hertz * 5)))) {
+ if(ixjdebug & 0x0008) {
+ printk("IXJ DAA no ring in 5 seconds /dev/phone%d at %ld\n", j->board, jiffies);
+ printk("IXJ DAA pstn ring int /dev/phone%d at %ld\n", j->board, j->pstn_ring_int);
+ printk("IXJ DAA pstn ring stop /dev/phone%d at %ld\n", j->board, j->pstn_ring_stop);
+ }
+ j->pstn_ring_stop = j->pstn_ring_int = 0;
+ daa_set_mode(j, SOP_PU_SLEEP);
+ }
+ outb_p(j->pld_scrw.byte, j->XILINXbase);
+ if (j->pstn_cid_intr && time_after(jiffies, j->pstn_cid_received + hertz)) {
+ ixj_daa_cid_read(j);
+ j->ex.bits.caller_id = 1;
+ ixj_kill_fasync(j, SIG_CALLER_ID, POLL_IN);
+ j->pstn_cid_intr = 0;
+ }
+ if (daaint.bitreg.Cadence) {
+ if(ixjdebug & 0x0008) {
+ printk("IXJ DAA Cadence interrupt going to sleep /dev/phone%d\n", j->board);
+ }
+ daa_set_mode(j, SOP_PU_SLEEP);
+ j->ex.bits.pstn_ring = 0;
+ }
+ break;
+ case SOP_PU_CONVERSATION:
+ if (daaint.bitreg.VDD_OK) {
+ if(!daaint.bitreg.SI_0) {
+ if (!j->pstn_winkstart) {
+ if(ixjdebug & 0x0008) {
+ printk("IXJ DAA possible wink /dev/phone%d %ld\n", j->board, jiffies);
+ }
+ j->pstn_winkstart = jiffies;
+ }
+ } else {
+ if (j->pstn_winkstart) {
+ if(ixjdebug & 0x0008) {
+ printk("IXJ DAA possible wink end /dev/phone%d %ld\n", j->board, jiffies);
+ }
+ j->pstn_winkstart = 0;
+ }
+ }
+ }
+ if (j->pstn_winkstart && time_after(jiffies, j->pstn_winkstart + ((hertz * j->winktime) / 1000))) {
+ if(ixjdebug & 0x0008) {
+ printk("IXJ DAA wink detected going to sleep /dev/phone%d %ld\n", j->board, jiffies);
+ }
+ daa_set_mode(j, SOP_PU_SLEEP);
+ j->pstn_winkstart = 0;
+ j->ex.bits.pstn_wink = 1;
+ ixj_kill_fasync(j, SIG_PSTN_WINK, POLL_IN);
+ }
+ break;
+ }
+}
+
+static void ixj_timeout(unsigned long ptr)
+{
+ int board;
+ unsigned long jifon;
+ IXJ *j = (IXJ *)ptr;
+ board = j->board;
+
+ if (j->DSPbase && atomic_read(&j->DSPWrite) == 0 && test_and_set_bit(board, (void *)&j->busyflags) == 0) {
+ ixj_perfmon(j->timerchecks);
+ j->hookstate = ixj_hookstate(j);
+ if (j->tone_state) {
+ if (!(j->hookstate)) {
+ ixj_cpt_stop(j);
+ if (j->m_hook) {
+ j->m_hook = 0;
+ j->ex.bits.hookstate = 1;
+ ixj_kill_fasync(j, SIG_HOOKSTATE, POLL_IN);
+ }
+ clear_bit(board, &j->busyflags);
+ ixj_add_timer(j);
+ return;
+ }
+ if (j->tone_state == 1)
+ jifon = ((hertz * j->tone_on_time) * 25 / 100000);
+ else
+ jifon = ((hertz * j->tone_on_time) * 25 / 100000) + ((hertz * j->tone_off_time) * 25 / 100000);
+ if (time_before(jiffies, j->tone_start_jif + jifon)) {
+ if (j->tone_state == 1) {
+ ixj_play_tone(j, j->tone_index);
+ if (j->dsp.low == 0x20) {
+ clear_bit(board, &j->busyflags);
+ ixj_add_timer(j);
+ return;
+ }
+ } else {
+ ixj_play_tone(j, 0);
+ if (j->dsp.low == 0x20) {
+ clear_bit(board, &j->busyflags);
+ ixj_add_timer(j);
+ return;
+ }
+ }
+ } else {
+ ixj_tone_timeout(j);
+ if (j->flags.dialtone) {
+ ixj_dialtone(j);
+ }
+ if (j->flags.busytone) {
+ ixj_busytone(j);
+ if (j->dsp.low == 0x20) {
+ clear_bit(board, &j->busyflags);
+ ixj_add_timer(j);
+ return;
+ }
+ }
+ if (j->flags.ringback) {
+ ixj_ringback(j);
+ if (j->dsp.low == 0x20) {
+ clear_bit(board, &j->busyflags);
+ ixj_add_timer(j);
+ return;
+ }
+ }
+ if (!j->tone_state) {
+ ixj_cpt_stop(j);
+ }
+ }
+ }
+ if (!(j->tone_state && j->dsp.low == 0x20)) {
+ if (IsRxReady(j)) {
+ ixj_read_frame(j);
+ }
+ if (IsTxReady(j)) {
+ ixj_write_frame(j);
+ }
+ }
+ if (j->flags.cringing) {
+ if (j->hookstate & 1) {
+ j->flags.cringing = 0;
+ ixj_ring_off(j);
+ } else if(j->cadence_f[5].enable && ((!j->cadence_f[5].en_filter) || (j->cadence_f[5].en_filter && j->flags.firstring))) {
+ switch(j->cadence_f[5].state) {
+ case 0:
+ j->cadence_f[5].on1dot = jiffies + (long)((j->cadence_f[5].on1 * (hertz * 100) / 10000));
+ if (time_before(jiffies, j->cadence_f[5].on1dot)) {
+ if(ixjdebug & 0x0004) {
+ printk("Ringing cadence state = %d - %ld\n", j->cadence_f[5].state, jiffies);
+ }
+ ixj_ring_on(j);
+ }
+ j->cadence_f[5].state = 1;
+ break;
+ case 1:
+ if (time_after(jiffies, j->cadence_f[5].on1dot)) {
+ j->cadence_f[5].off1dot = jiffies + (long)((j->cadence_f[5].off1 * (hertz * 100) / 10000));
+ if(ixjdebug & 0x0004) {
+ printk("Ringing cadence state = %d - %ld\n", j->cadence_f[5].state, jiffies);
+ }
+ ixj_ring_off(j);
+ j->cadence_f[5].state = 2;
+ }
+ break;
+ case 2:
+ if (time_after(jiffies, j->cadence_f[5].off1dot)) {
+ if(ixjdebug & 0x0004) {
+ printk("Ringing cadence state = %d - %ld\n", j->cadence_f[5].state, jiffies);
+ }
+ ixj_ring_on(j);
+ if (j->cadence_f[5].on2) {
+ j->cadence_f[5].on2dot = jiffies + (long)((j->cadence_f[5].on2 * (hertz * 100) / 10000));
+ j->cadence_f[5].state = 3;
+ } else {
+ j->cadence_f[5].state = 7;
+ }
+ }
+ break;
+ case 3:
+ if (time_after(jiffies, j->cadence_f[5].on2dot)) {
+ if(ixjdebug & 0x0004) {
+ printk("Ringing cadence state = %d - %ld\n", j->cadence_f[5].state, jiffies);
+ }
+ ixj_ring_off(j);
+ if (j->cadence_f[5].off2) {
+ j->cadence_f[5].off2dot = jiffies + (long)((j->cadence_f[5].off2 * (hertz * 100) / 10000));
+ j->cadence_f[5].state = 4;
+ } else {
+ j->cadence_f[5].state = 7;
+ }
+ }
+ break;
+ case 4:
+ if (time_after(jiffies, j->cadence_f[5].off2dot)) {
+ if(ixjdebug & 0x0004) {
+ printk("Ringing cadence state = %d - %ld\n", j->cadence_f[5].state, jiffies);
+ }
+ ixj_ring_on(j);
+ if (j->cadence_f[5].on3) {
+ j->cadence_f[5].on3dot = jiffies + (long)((j->cadence_f[5].on3 * (hertz * 100) / 10000));
+ j->cadence_f[5].state = 5;
+ } else {
+ j->cadence_f[5].state = 7;
+ }
+ }
+ break;
+ case 5:
+ if (time_after(jiffies, j->cadence_f[5].on3dot)) {
+ if(ixjdebug & 0x0004) {
+ printk("Ringing cadence state = %d - %ld\n", j->cadence_f[5].state, jiffies);
+ }
+ ixj_ring_off(j);
+ if (j->cadence_f[5].off3) {
+ j->cadence_f[5].off3dot = jiffies + (long)((j->cadence_f[5].off3 * (hertz * 100) / 10000));
+ j->cadence_f[5].state = 6;
+ } else {
+ j->cadence_f[5].state = 7;
+ }
+ }
+ break;
+ case 6:
+ if (time_after(jiffies, j->cadence_f[5].off3dot)) {
+ if(ixjdebug & 0x0004) {
+ printk("Ringing cadence state = %d - %ld\n", j->cadence_f[5].state, jiffies);
+ }
+ j->cadence_f[5].state = 7;
+ }
+ break;
+ case 7:
+ if(ixjdebug & 0x0004) {
+ printk("Ringing cadence state = %d - %ld\n", j->cadence_f[5].state, jiffies);
+ }
+ j->flags.cidring = 1;
+ j->cadence_f[5].state = 0;
+ break;
+ }
+ if (j->flags.cidring && !j->flags.cidsent) {
+ j->flags.cidsent = 1;
+ if(j->fskdcnt) {
+ SLIC_SetState(PLD_SLIC_STATE_OHT, j);
+ ixj_pre_cid(j);
+ }
+ j->flags.cidring = 0;
+ }
+ clear_bit(board, &j->busyflags);
+ ixj_add_timer(j);
+ return;
+ } else {
+ if (time_after(jiffies, j->ring_cadence_jif + (hertz / 2))) {
+ if (j->flags.cidring && !j->flags.cidsent) {
+ j->flags.cidsent = 1;
+ if(j->fskdcnt) {
+ SLIC_SetState(PLD_SLIC_STATE_OHT, j);
+ ixj_pre_cid(j);
+ }
+ j->flags.cidring = 0;
+ }
+ j->ring_cadence_t--;
+ if (j->ring_cadence_t == -1)
+ j->ring_cadence_t = 15;
+ j->ring_cadence_jif = jiffies;
+
+ if (j->ring_cadence & 1 << j->ring_cadence_t) {
+ if(j->flags.cidsent && j->cadence_f[5].en_filter)
+ j->flags.firstring = 1;
+ else
+ ixj_ring_on(j);
+ } else {
+ ixj_ring_off(j);
+ if(!j->flags.cidsent)
+ j->flags.cidring = 1;
+ }
+ }
+ clear_bit(board, &j->busyflags);
+ ixj_add_timer(j);
+ return;
+ }
+ }
+ if (!j->flags.ringing) {
+ if (j->hookstate) { /* & 1) { */
+ if (j->dsp.low != 0x20 &&
+ SLIC_GetState(j) != PLD_SLIC_STATE_ACTIVE) {
+ SLIC_SetState(PLD_SLIC_STATE_ACTIVE, j);
+ }
+ LineMonitor(j);
+ read_filters(j);
+ ixj_WriteDSPCommand(0x511B, j);
+ j->proc_load = j->ssr.high << 8 | j->ssr.low;
+ if (!j->m_hook && (j->hookstate & 1)) {
+ j->m_hook = j->ex.bits.hookstate = 1;
+ ixj_kill_fasync(j, SIG_HOOKSTATE, POLL_IN);
+ }
+ } else {
+ if (j->ex.bits.dtmf_ready) {
+ j->dtmf_wp = j->dtmf_rp = j->ex.bits.dtmf_ready = 0;
+ }
+ if (j->m_hook) {
+ j->m_hook = 0;
+ j->ex.bits.hookstate = 1;
+ ixj_kill_fasync(j, SIG_HOOKSTATE, POLL_IN);
+ }
+ }
+ }
+ if (j->cardtype == QTI_LINEJACK && !j->flags.pstncheck && j->flags.pstn_present) {
+ ixj_pstn_state(j);
+ }
+ if (j->ex.bytes) {
+ wake_up_interruptible(&j->poll_q); /* Wake any blocked selects */
+ }
+ clear_bit(board, &j->busyflags);
+ }
+ ixj_add_timer(j);
+}
+
+static int ixj_status_wait(IXJ *j)
+{
+ unsigned long jif;
+
+ jif = jiffies + ((60 * hertz) / 100);
+ while (!IsStatusReady(j)) {
+ ixj_perfmon(j->statuswait);
+ if (time_after(jiffies, jif)) {
+ ixj_perfmon(j->statuswaitfail);
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static int ixj_PCcontrol_wait(IXJ *j)
+{
+ unsigned long jif;
+
+ jif = jiffies + ((60 * hertz) / 100);
+ while (!IsPCControlReady(j)) {
+ ixj_perfmon(j->pcontrolwait);
+ if (time_after(jiffies, jif)) {
+ ixj_perfmon(j->pcontrolwaitfail);
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static int ixj_WriteDSPCommand(unsigned short cmd, IXJ *j)
+{
+ BYTES bytes;
+ unsigned long jif;
+
+ atomic_inc(&j->DSPWrite);
+ if(atomic_read(&j->DSPWrite) > 1) {
+ printk("IXJ %d DSP write overlap attempting command 0x%4.4x\n", j->board, cmd);
+ return -1;
+ }
+ bytes.high = (cmd & 0xFF00) >> 8;
+ bytes.low = cmd & 0x00FF;
+ jif = jiffies + ((60 * hertz) / 100);
+ while (!IsControlReady(j)) {
+ ixj_perfmon(j->iscontrolready);
+ if (time_after(jiffies, jif)) {
+ ixj_perfmon(j->iscontrolreadyfail);
+ atomic_dec(&j->DSPWrite);
+ if(atomic_read(&j->DSPWrite) > 0) {
+ printk("IXJ %d DSP overlaped command 0x%4.4x during control ready failure.\n", j->board, cmd);
+ while(atomic_read(&j->DSPWrite) > 0) {
+ atomic_dec(&j->DSPWrite);
+ }
+ }
+ return -1;
+ }
+ }
+ outb(bytes.low, j->DSPbase + 6);
+ outb(bytes.high, j->DSPbase + 7);
+
+ if (ixj_status_wait(j)) {
+ j->ssr.low = 0xFF;
+ j->ssr.high = 0xFF;
+ atomic_dec(&j->DSPWrite);
+ if(atomic_read(&j->DSPWrite) > 0) {
+ printk("IXJ %d DSP overlaped command 0x%4.4x during status wait failure.\n", j->board, cmd);
+ while(atomic_read(&j->DSPWrite) > 0) {
+ atomic_dec(&j->DSPWrite);
+ }
+ }
+ return -1;
+ }
+/* Read Software Status Register */
+ j->ssr.low = inb_p(j->DSPbase + 2);
+ j->ssr.high = inb_p(j->DSPbase + 3);
+ atomic_dec(&j->DSPWrite);
+ if(atomic_read(&j->DSPWrite) > 0) {
+ printk("IXJ %d DSP overlaped command 0x%4.4x\n", j->board, cmd);
+ while(atomic_read(&j->DSPWrite) > 0) {
+ atomic_dec(&j->DSPWrite);
+ }
+ }
+ return 0;
+}
+
+/***************************************************************************
+*
+* General Purpose IO Register read routine
+*
+***************************************************************************/
+static inline int ixj_gpio_read(IXJ *j)
+{
+ if (ixj_WriteDSPCommand(0x5143, j))
+ return -1;
+
+ j->gpio.bytes.low = j->ssr.low;
+ j->gpio.bytes.high = j->ssr.high;
+
+ return 0;
+}
+
+static inline void LED_SetState(int state, IXJ *j)
+{
+ if (j->cardtype == QTI_LINEJACK) {
+ j->pld_scrw.bits.led1 = state & 0x1 ? 1 : 0;
+ j->pld_scrw.bits.led2 = state & 0x2 ? 1 : 0;
+ j->pld_scrw.bits.led3 = state & 0x4 ? 1 : 0;
+ j->pld_scrw.bits.led4 = state & 0x8 ? 1 : 0;
+
+ outb(j->pld_scrw.byte, j->XILINXbase);
+ }
+}
+
+/*********************************************************************
+* GPIO Pins are configured as follows on the Quicknet Internet
+* PhoneJACK Telephony Cards
+*
+* POTS Select GPIO_6=0 GPIO_7=0
+* Mic/Speaker Select GPIO_6=0 GPIO_7=1
+* Handset Select GPIO_6=1 GPIO_7=0
+*
+* SLIC Active GPIO_1=0 GPIO_2=1 GPIO_5=0
+* SLIC Ringing GPIO_1=1 GPIO_2=1 GPIO_5=0
+* SLIC Open Circuit GPIO_1=0 GPIO_2=0 GPIO_5=0
+*
+* Hook Switch changes reported on GPIO_3
+*********************************************************************/
+static int ixj_set_port(IXJ *j, int arg)
+{
+ if (j->cardtype == QTI_PHONEJACK_LITE) {
+ if (arg != PORT_POTS)
+ return 10;
+ else
+ return 0;
+ }
+ switch (arg) {
+ case PORT_POTS:
+ j->port = PORT_POTS;
+ switch (j->cardtype) {
+ case QTI_PHONECARD:
+ if (j->flags.pcmciasct == 1)
+ SLIC_SetState(PLD_SLIC_STATE_ACTIVE, j);
+ else
+ return 11;
+ break;
+ case QTI_PHONEJACK_PCI:
+ j->pld_slicw.pcib.mic = 0;
+ j->pld_slicw.pcib.spk = 0;
+ outb(j->pld_slicw.byte, j->XILINXbase + 0x01);
+ break;
+ case QTI_LINEJACK:
+ ixj_set_pots(j, 0); /* Disconnect POTS/PSTN relay */
+ if (ixj_WriteDSPCommand(0xC528, j)) /* Write CODEC config to
+ Software Control Register */
+ return 2;
+ j->pld_scrw.bits.daafsyncen = 0; /* Turn off DAA Frame Sync */
+
+ outb(j->pld_scrw.byte, j->XILINXbase);
+ j->pld_clock.byte = 0;
+ outb(j->pld_clock.byte, j->XILINXbase + 0x04);
+ j->pld_slicw.bits.rly1 = 1;
+ j->pld_slicw.bits.spken = 0;
+ outb(j->pld_slicw.byte, j->XILINXbase + 0x01);
+ ixj_mixer(0x1200, j); /* Turn Off MIC switch on mixer left */
+ ixj_mixer(0x1401, j); /* Turn On Mono1 switch on mixer left */
+ ixj_mixer(0x1300, j); /* Turn Off MIC switch on mixer right */
+ ixj_mixer(0x1501, j); /* Turn On Mono1 switch on mixer right */
+ ixj_mixer(0x0E80, j); /*Mic mute */
+ ixj_mixer(0x0F00, j); /* Set mono out (SLIC) to 0dB */
+ ixj_mixer(0x0080, j); /* Mute Master Left volume */
+ ixj_mixer(0x0180, j); /* Mute Master Right volume */
+ SLIC_SetState(PLD_SLIC_STATE_STANDBY, j);
+/* SLIC_SetState(PLD_SLIC_STATE_ACTIVE, j); */
+ break;
+ case QTI_PHONEJACK:
+ j->gpio.bytes.high = 0x0B;
+ j->gpio.bits.gpio6 = 0;
+ j->gpio.bits.gpio7 = 0;
+ ixj_WriteDSPCommand(j->gpio.word, j);
+ break;
+ }
+ break;
+ case PORT_PSTN:
+ if (j->cardtype == QTI_LINEJACK) {
+ ixj_WriteDSPCommand(0xC534, j); /* Write CODEC config to Software Control Register */
+
+ j->pld_slicw.bits.rly3 = 0;
+ j->pld_slicw.bits.rly1 = 1;
+ j->pld_slicw.bits.spken = 0;
+ outb(j->pld_slicw.byte, j->XILINXbase + 0x01);
+ j->port = PORT_PSTN;
+ } else {
+ return 4;
+ }
+ break;
+ case PORT_SPEAKER:
+ j->port = PORT_SPEAKER;
+ switch (j->cardtype) {
+ case QTI_PHONECARD:
+ if (j->flags.pcmciasct) {
+ SLIC_SetState(PLD_SLIC_STATE_OC, j);
+ }
+ break;
+ case QTI_PHONEJACK_PCI:
+ j->pld_slicw.pcib.mic = 1;
+ j->pld_slicw.pcib.spk = 1;
+ outb(j->pld_slicw.byte, j->XILINXbase + 0x01);
+ break;
+ case QTI_LINEJACK:
+ ixj_set_pots(j, 0); /* Disconnect POTS/PSTN relay */
+ if (ixj_WriteDSPCommand(0xC528, j)) /* Write CODEC config to
+ Software Control Register */
+ return 2;
+ j->pld_scrw.bits.daafsyncen = 0; /* Turn off DAA Frame Sync */
+
+ outb(j->pld_scrw.byte, j->XILINXbase);
+ j->pld_clock.byte = 0;
+ outb(j->pld_clock.byte, j->XILINXbase + 0x04);
+ j->pld_slicw.bits.rly1 = 1;
+ j->pld_slicw.bits.spken = 1;
+ outb(j->pld_slicw.byte, j->XILINXbase + 0x01);
+ ixj_mixer(0x1201, j); /* Turn On MIC switch on mixer left */
+ ixj_mixer(0x1400, j); /* Turn Off Mono1 switch on mixer left */
+ ixj_mixer(0x1301, j); /* Turn On MIC switch on mixer right */
+ ixj_mixer(0x1500, j); /* Turn Off Mono1 switch on mixer right */
+ ixj_mixer(0x0E06, j); /*Mic un-mute 0dB */
+ ixj_mixer(0x0F80, j); /* Mute mono out (SLIC) */
+ ixj_mixer(0x0000, j); /* Set Master Left volume to 0dB */
+ ixj_mixer(0x0100, j); /* Set Master Right volume to 0dB */
+ break;
+ case QTI_PHONEJACK:
+ j->gpio.bytes.high = 0x0B;
+ j->gpio.bits.gpio6 = 0;
+ j->gpio.bits.gpio7 = 1;
+ ixj_WriteDSPCommand(j->gpio.word, j);
+ break;
+ }
+ break;
+ case PORT_HANDSET:
+ if (j->cardtype != QTI_PHONEJACK) {
+ return 5;
+ } else {
+ j->gpio.bytes.high = 0x0B;
+ j->gpio.bits.gpio6 = 1;
+ j->gpio.bits.gpio7 = 0;
+ ixj_WriteDSPCommand(j->gpio.word, j);
+ j->port = PORT_HANDSET;
+ }
+ break;
+ default:
+ return 6;
+ break;
+ }
+ return 0;
+}
+
+static int ixj_set_pots(IXJ *j, int arg)
+{
+ if (j->cardtype == QTI_LINEJACK) {
+ if (arg) {
+ if (j->port == PORT_PSTN) {
+ j->pld_slicw.bits.rly1 = 0;
+ outb(j->pld_slicw.byte, j->XILINXbase + 0x01);
+ j->flags.pots_pstn = 1;
+ return 1;
+ } else {
+ j->flags.pots_pstn = 0;
+ return 0;
+ }
+ } else {
+ j->pld_slicw.bits.rly1 = 1;
+ outb(j->pld_slicw.byte, j->XILINXbase + 0x01);
+ j->flags.pots_pstn = 0;
+ return 1;
+ }
+ } else {
+ return 0;
+ }
+}
+
+static void ixj_ring_on(IXJ *j)
+{
+ if (j->dsp.low == 0x20) /* Internet PhoneJACK */
+ {
+ if (ixjdebug & 0x0004)
+ printk(KERN_INFO "IXJ Ring On /dev/phone%d\n", j->board);
+
+ j->gpio.bytes.high = 0x0B;
+ j->gpio.bytes.low = 0x00;
+ j->gpio.bits.gpio1 = 1;
+ j->gpio.bits.gpio2 = 1;
+ j->gpio.bits.gpio5 = 0;
+ ixj_WriteDSPCommand(j->gpio.word, j); /* send the ring signal */
+ } else /* Internet LineJACK, Internet PhoneJACK Lite or Internet PhoneJACK PCI */
+ {
+ if (ixjdebug & 0x0004)
+ printk(KERN_INFO "IXJ Ring On /dev/phone%d\n", j->board);
+
+ SLIC_SetState(PLD_SLIC_STATE_RINGING, j);
+ }
+}
+
+static int ixj_siadc(IXJ *j, int val)
+{
+ if(j->cardtype == QTI_PHONECARD){
+ if(j->flags.pcmciascp){
+ if(val == -1)
+ return j->siadc.bits.rxg;
+
+ if(val < 0 || val > 0x1F)
+ return -1;
+
+ j->siadc.bits.hom = 0; /* Handset Out Mute */
+ j->siadc.bits.lom = 0; /* Line Out Mute */
+ j->siadc.bits.rxg = val; /*(0xC000 - 0x41C8) / 0x4EF; RX PGA Gain */
+ j->psccr.bits.addr = 6; /* R/W Smart Cable Register Address */
+ j->psccr.bits.rw = 0; /* Read / Write flag */
+ j->psccr.bits.dev = 0;
+ outb(j->siadc.byte, j->XILINXbase + 0x00);
+ outb(j->psccr.byte, j->XILINXbase + 0x01);
+ ixj_PCcontrol_wait(j);
+ return j->siadc.bits.rxg;
+ }
+ }
+ return -1;
+}
+
+static int ixj_sidac(IXJ *j, int val)
+{
+ if(j->cardtype == QTI_PHONECARD){
+ if(j->flags.pcmciascp){
+ if(val == -1)
+ return j->sidac.bits.txg;
+
+ if(val < 0 || val > 0x1F)
+ return -1;
+
+ j->sidac.bits.srm = 1; /* Speaker Right Mute */
+ j->sidac.bits.slm = 1; /* Speaker Left Mute */
+ j->sidac.bits.txg = val; /* (0xC000 - 0x45E4) / 0x5D3; TX PGA Gain */
+ j->psccr.bits.addr = 7; /* R/W Smart Cable Register Address */
+ j->psccr.bits.rw = 0; /* Read / Write flag */
+ j->psccr.bits.dev = 0;
+ outb(j->sidac.byte, j->XILINXbase + 0x00);
+ outb(j->psccr.byte, j->XILINXbase + 0x01);
+ ixj_PCcontrol_wait(j);
+ return j->sidac.bits.txg;
+ }
+ }
+ return -1;
+}
+
+static int ixj_pcmcia_cable_check(IXJ *j)
+{
+ j->pccr1.byte = inb_p(j->XILINXbase + 0x03);
+ if (!j->flags.pcmciastate) {
+ j->pccr2.byte = inb_p(j->XILINXbase + 0x02);
+ if (j->pccr1.bits.drf || j->pccr2.bits.rstc) {
+ j->flags.pcmciastate = 4;
+ return 0;
+ }
+ if (j->pccr1.bits.ed) {
+ j->pccr1.bits.ed = 0;
+ j->psccr.bits.dev = 3;
+ j->psccr.bits.rw = 1;
+ outw_p(j->psccr.byte << 8, j->XILINXbase + 0x00);
+ ixj_PCcontrol_wait(j);
+ j->pslic.byte = inw_p(j->XILINXbase + 0x00) & 0xFF;
+ j->pslic.bits.led2 = j->pslic.bits.det ? 1 : 0;
+ j->psccr.bits.dev = 3;
+ j->psccr.bits.rw = 0;
+ outw_p(j->psccr.byte << 8 | j->pslic.byte, j->XILINXbase + 0x00);
+ ixj_PCcontrol_wait(j);
+ return j->pslic.bits.led2 ? 1 : 0;
+ } else if (j->flags.pcmciasct) {
+ return j->r_hook;
+ } else {
+ return 1;
+ }
+ } else if (j->flags.pcmciastate == 4) {
+ if (!j->pccr1.bits.drf) {
+ j->flags.pcmciastate = 3;
+ }
+ return 0;
+ } else if (j->flags.pcmciastate == 3) {
+ j->pccr2.bits.pwr = 0;
+ j->pccr2.bits.rstc = 1;
+ outb(j->pccr2.byte, j->XILINXbase + 0x02);
+ j->checkwait = jiffies + (hertz * 2);
+ j->flags.incheck = 1;
+ j->flags.pcmciastate = 2;
+ return 0;
+ } else if (j->flags.pcmciastate == 2) {
+ if (j->flags.incheck) {
+ if (time_before(jiffies, j->checkwait)) {
+ return 0;
+ } else {
+ j->flags.incheck = 0;
+ }
+ }
+ j->pccr2.bits.pwr = 0;
+ j->pccr2.bits.rstc = 0;
+ outb_p(j->pccr2.byte, j->XILINXbase + 0x02);
+ j->flags.pcmciastate = 1;
+ return 0;
+ } else if (j->flags.pcmciastate == 1) {
+ j->flags.pcmciastate = 0;
+ if (!j->pccr1.bits.drf) {
+ j->psccr.bits.dev = 3;
+ j->psccr.bits.rw = 1;
+ outb_p(j->psccr.byte, j->XILINXbase + 0x01);
+ ixj_PCcontrol_wait(j);
+ j->flags.pcmciascp = 1; /* Set Cable Present Flag */
+
+ j->flags.pcmciasct = (inw_p(j->XILINXbase + 0x00) >> 8) & 0x03; /* Get Cable Type */
+
+ if (j->flags.pcmciasct == 3) {
+ j->flags.pcmciastate = 4;
+ return 0;
+ } else if (j->flags.pcmciasct == 0) {
+ j->pccr2.bits.pwr = 1;
+ j->pccr2.bits.rstc = 0;
+ outb_p(j->pccr2.byte, j->XILINXbase + 0x02);
+ j->port = PORT_SPEAKER;
+ } else {
+ j->port = PORT_POTS;
+ }
+ j->sic1.bits.cpd = 0; /* Chip Power Down */
+ j->sic1.bits.mpd = 0; /* MIC Bias Power Down */
+ j->sic1.bits.hpd = 0; /* Handset Bias Power Down */
+ j->sic1.bits.lpd = 0; /* Line Bias Power Down */
+ j->sic1.bits.spd = 1; /* Speaker Drive Power Down */
+ j->psccr.bits.addr = 1; /* R/W Smart Cable Register Address */
+ j->psccr.bits.rw = 0; /* Read / Write flag */
+ j->psccr.bits.dev = 0;
+ outb(j->sic1.byte, j->XILINXbase + 0x00);
+ outb(j->psccr.byte, j->XILINXbase + 0x01);
+ ixj_PCcontrol_wait(j);
+
+ j->sic2.bits.al = 0; /* Analog Loopback DAC analog -> ADC analog */
+ j->sic2.bits.dl2 = 0; /* Digital Loopback DAC -> ADC one bit */
+ j->sic2.bits.dl1 = 0; /* Digital Loopback ADC -> DAC one bit */
+ j->sic2.bits.pll = 0; /* 1 = div 10, 0 = div 5 */
+ j->sic2.bits.hpd = 0; /* HPF disable */
+ j->psccr.bits.addr = 2; /* R/W Smart Cable Register Address */
+ j->psccr.bits.rw = 0; /* Read / Write flag */
+ j->psccr.bits.dev = 0;
+ outb(j->sic2.byte, j->XILINXbase + 0x00);
+ outb(j->psccr.byte, j->XILINXbase + 0x01);
+ ixj_PCcontrol_wait(j);
+
+ j->psccr.bits.addr = 3; /* R/W Smart Cable Register Address */
+ j->psccr.bits.rw = 0; /* Read / Write flag */
+ j->psccr.bits.dev = 0;
+ outb(0x00, j->XILINXbase + 0x00); /* PLL Divide N1 */
+ outb(j->psccr.byte, j->XILINXbase + 0x01);
+ ixj_PCcontrol_wait(j);
+
+ j->psccr.bits.addr = 4; /* R/W Smart Cable Register Address */
+ j->psccr.bits.rw = 0; /* Read / Write flag */
+ j->psccr.bits.dev = 0;
+ outb(0x09, j->XILINXbase + 0x00); /* PLL Multiply M1 */
+ outb(j->psccr.byte, j->XILINXbase + 0x01);
+ ixj_PCcontrol_wait(j);
+
+ j->sirxg.bits.lig = 1; /* Line In Gain */
+ j->sirxg.bits.lim = 1; /* Line In Mute */
+ j->sirxg.bits.mcg = 0; /* MIC In Gain was 3 */
+ j->sirxg.bits.mcm = 0; /* MIC In Mute */
+ j->sirxg.bits.him = 0; /* Handset In Mute */
+ j->sirxg.bits.iir = 1; /* IIR */
+ j->psccr.bits.addr = 5; /* R/W Smart Cable Register Address */
+ j->psccr.bits.rw = 0; /* Read / Write flag */
+ j->psccr.bits.dev = 0;
+ outb(j->sirxg.byte, j->XILINXbase + 0x00);
+ outb(j->psccr.byte, j->XILINXbase + 0x01);
+ ixj_PCcontrol_wait(j);
+
+ ixj_siadc(j, 0x17);
+ ixj_sidac(j, 0x1D);
+
+ j->siaatt.bits.sot = 0;
+ j->psccr.bits.addr = 9; /* R/W Smart Cable Register Address */
+ j->psccr.bits.rw = 0; /* Read / Write flag */
+ j->psccr.bits.dev = 0;
+ outb(j->siaatt.byte, j->XILINXbase + 0x00);
+ outb(j->psccr.byte, j->XILINXbase + 0x01);
+ ixj_PCcontrol_wait(j);
+
+ if (j->flags.pcmciasct == 1 && !j->readers && !j->writers) {
+ j->psccr.byte = j->pslic.byte = 0;
+ j->pslic.bits.powerdown = 1;
+ j->psccr.bits.dev = 3;
+ j->psccr.bits.rw = 0;
+ outw_p(j->psccr.byte << 8 | j->pslic.byte, j->XILINXbase + 0x00);
+ ixj_PCcontrol_wait(j);
+ }
+ }
+ return 0;
+ } else {
+ j->flags.pcmciascp = 0;
+ return 0;
+ }
+ return 0;
+}
+
+static int ixj_hookstate(IXJ *j)
+{
+ int fOffHook = 0;
+
+ switch (j->cardtype) {
+ case QTI_PHONEJACK:
+ ixj_gpio_read(j);
+ fOffHook = j->gpio.bits.gpio3read ? 1 : 0;
+ break;
+ case QTI_LINEJACK:
+ case QTI_PHONEJACK_LITE:
+ case QTI_PHONEJACK_PCI:
+ SLIC_GetState(j);
+ if(j->cardtype == QTI_LINEJACK && j->flags.pots_pstn == 1 && (j->readers || j->writers)) {
+ fOffHook = j->pld_slicr.bits.potspstn ? 1 : 0;
+ if(fOffHook != j->p_hook) {
+ if(!j->checkwait) {
+ j->checkwait = jiffies;
+ }
+ if(time_before(jiffies, j->checkwait + 2)) {
+ fOffHook ^= 1;
+ } else {
+ j->checkwait = 0;
+ }
+ j->p_hook = fOffHook;
+ printk("IXJ : /dev/phone%d pots-pstn hookstate check %d at %ld\n", j->board, fOffHook, jiffies);
+ }
+ } else {
+ if (j->pld_slicr.bits.state == PLD_SLIC_STATE_ACTIVE ||
+ j->pld_slicr.bits.state == PLD_SLIC_STATE_STANDBY) {
+ if (j->flags.ringing || j->flags.cringing) {
+ if (!in_interrupt()) {
+ msleep(20);
+ }
+ SLIC_GetState(j);
+ if (j->pld_slicr.bits.state == PLD_SLIC_STATE_RINGING) {
+ ixj_ring_on(j);
+ }
+ }
+ if (j->cardtype == QTI_PHONEJACK_PCI) {
+ j->pld_scrr.byte = inb_p(j->XILINXbase);
+ fOffHook = j->pld_scrr.pcib.det ? 1 : 0;
+ } else
+ fOffHook = j->pld_slicr.bits.det ? 1 : 0;
+ }
+ }
+ break;
+ case QTI_PHONECARD:
+ fOffHook = ixj_pcmcia_cable_check(j);
+ break;
+ }
+ if (j->r_hook != fOffHook) {
+ j->r_hook = fOffHook;
+ if (j->port == PORT_SPEAKER || j->port == PORT_HANDSET) { // || (j->port == PORT_PSTN && j->flags.pots_pstn == 0)) {
+ j->ex.bits.hookstate = 1;
+ ixj_kill_fasync(j, SIG_HOOKSTATE, POLL_IN);
+ } else if (!fOffHook) {
+ j->flash_end = jiffies + ((60 * hertz) / 100);
+ }
+ }
+ if (fOffHook) {
+ if(time_before(jiffies, j->flash_end)) {
+ j->ex.bits.flash = 1;
+ j->flash_end = 0;
+ ixj_kill_fasync(j, SIG_FLASH, POLL_IN);
+ }
+ } else {
+ if(time_before(jiffies, j->flash_end)) {
+ fOffHook = 1;
+ }
+ }
+
+ if (j->port == PORT_PSTN && j->daa_mode == SOP_PU_CONVERSATION)
+ fOffHook |= 2;
+
+ if (j->port == PORT_SPEAKER) {
+ if(j->cardtype == QTI_PHONECARD) {
+ if(j->flags.pcmciascp && j->flags.pcmciasct) {
+ fOffHook |= 2;
+ }
+ } else {
+ fOffHook |= 2;
+ }
+ }
+
+ if (j->port == PORT_HANDSET)
+ fOffHook |= 2;
+
+ return fOffHook;
+}
+
+static void ixj_ring_off(IXJ *j)
+{
+ if (j->dsp.low == 0x20) /* Internet PhoneJACK */
+ {
+ if (ixjdebug & 0x0004)
+ printk(KERN_INFO "IXJ Ring Off\n");
+ j->gpio.bytes.high = 0x0B;
+ j->gpio.bytes.low = 0x00;
+ j->gpio.bits.gpio1 = 0;
+ j->gpio.bits.gpio2 = 1;
+ j->gpio.bits.gpio5 = 0;
+ ixj_WriteDSPCommand(j->gpio.word, j);
+ } else /* Internet LineJACK */
+ {
+ if (ixjdebug & 0x0004)
+ printk(KERN_INFO "IXJ Ring Off\n");
+
+ if(!j->flags.cidplay)
+ SLIC_SetState(PLD_SLIC_STATE_STANDBY, j);
+
+ SLIC_GetState(j);
+ }
+}
+
+static void ixj_ring_start(IXJ *j)
+{
+ j->flags.cringing = 1;
+ if (ixjdebug & 0x0004)
+ printk(KERN_INFO "IXJ Cadence Ringing Start /dev/phone%d\n", j->board);
+ if (ixj_hookstate(j) & 1) {
+ if (j->port == PORT_POTS)
+ ixj_ring_off(j);
+ j->flags.cringing = 0;
+ if (ixjdebug & 0x0004)
+ printk(KERN_INFO "IXJ Cadence Ringing Stopped /dev/phone%d off hook\n", j->board);
+ } else if(j->cadence_f[5].enable && (!j->cadence_f[5].en_filter)) {
+ j->ring_cadence_jif = jiffies;
+ j->flags.cidsent = j->flags.cidring = 0;
+ j->cadence_f[5].state = 0;
+ if(j->cadence_f[5].on1)
+ ixj_ring_on(j);
+ } else {
+ j->ring_cadence_jif = jiffies;
+ j->ring_cadence_t = 15;
+ if (j->ring_cadence & 1 << j->ring_cadence_t) {
+ ixj_ring_on(j);
+ } else {
+ ixj_ring_off(j);
+ }
+ j->flags.cidsent = j->flags.cidring = j->flags.firstring = 0;
+ }
+}
+
+static int ixj_ring(IXJ *j)
+{
+ char cntr;
+ unsigned long jif;
+
+ j->flags.ringing = 1;
+ if (ixj_hookstate(j) & 1) {
+ ixj_ring_off(j);
+ j->flags.ringing = 0;
+ return 1;
+ }
+ for (cntr = 0; cntr < j->maxrings; cntr++) {
+ jif = jiffies + (1 * hertz);
+ ixj_ring_on(j);
+ while (time_before(jiffies, jif)) {
+ if (ixj_hookstate(j) & 1) {
+ ixj_ring_off(j);
+ j->flags.ringing = 0;
+ return 1;
+ }
+ schedule_timeout_interruptible(1);
+ if (signal_pending(current))
+ break;
+ }
+ jif = jiffies + (3 * hertz);
+ ixj_ring_off(j);
+ while (time_before(jiffies, jif)) {
+ if (ixj_hookstate(j) & 1) {
+ msleep(10);
+ if (ixj_hookstate(j) & 1) {
+ j->flags.ringing = 0;
+ return 1;
+ }
+ }
+ schedule_timeout_interruptible(1);
+ if (signal_pending(current))
+ break;
+ }
+ }
+ ixj_ring_off(j);
+ j->flags.ringing = 0;
+ return 0;
+}
+
+static int ixj_open(struct phone_device *p, struct file *file_p)
+{
+ IXJ *j = get_ixj(p->board);
+ file_p->private_data = j;
+
+ if (!j->DSPbase)
+ return -ENODEV;
+
+ if (file_p->f_mode & FMODE_READ) {
+ if(!j->readers) {
+ j->readers++;
+ } else {
+ return -EBUSY;
+ }
+ }
+
+ if (file_p->f_mode & FMODE_WRITE) {
+ if(!j->writers) {
+ j->writers++;
+ } else {
+ if (file_p->f_mode & FMODE_READ){
+ j->readers--;
+ }
+ return -EBUSY;
+ }
+ }
+
+ if (j->cardtype == QTI_PHONECARD) {
+ j->pslic.bits.powerdown = 0;
+ j->psccr.bits.dev = 3;
+ j->psccr.bits.rw = 0;
+ outw_p(j->psccr.byte << 8 | j->pslic.byte, j->XILINXbase + 0x00);
+ ixj_PCcontrol_wait(j);
+ }
+
+ j->flags.cidplay = 0;
+ j->flags.cidcw_ack = 0;
+
+ if (ixjdebug & 0x0002)
+ printk(KERN_INFO "Opening board %d\n", p->board);
+
+ j->framesread = j->frameswritten = 0;
+ return 0;
+}
+
+static int ixj_release(struct inode *inode, struct file *file_p)
+{
+ IXJ_TONE ti;
+ int cnt;
+ IXJ *j = file_p->private_data;
+ int board = j->p.board;
+
+ /*
+ * Set up locks to ensure that only one process is talking to the DSP at a time.
+ * This is necessary to keep the DSP from locking up.
+ */
+ while(test_and_set_bit(board, (void *)&j->busyflags) != 0)
+ schedule_timeout_interruptible(1);
+ if (ixjdebug & 0x0002)
+ printk(KERN_INFO "Closing board %d\n", NUM(inode));
+
+ if (j->cardtype == QTI_PHONECARD)
+ ixj_set_port(j, PORT_SPEAKER);
+ else
+ ixj_set_port(j, PORT_POTS);
+
+ aec_stop(j);
+ ixj_play_stop(j);
+ ixj_record_stop(j);
+ set_play_volume(j, 0x100);
+ set_rec_volume(j, 0x100);
+ ixj_ring_off(j);
+
+ /* Restore the tone table to default settings. */
+ ti.tone_index = 10;
+ ti.gain0 = 1;
+ ti.freq0 = hz941;
+ ti.gain1 = 0;
+ ti.freq1 = hz1209;
+ ixj_init_tone(j, &ti);
+ ti.tone_index = 11;
+ ti.gain0 = 1;
+ ti.freq0 = hz941;
+ ti.gain1 = 0;
+ ti.freq1 = hz1336;
+ ixj_init_tone(j, &ti);
+ ti.tone_index = 12;
+ ti.gain0 = 1;
+ ti.freq0 = hz941;
+ ti.gain1 = 0;
+ ti.freq1 = hz1477;
+ ixj_init_tone(j, &ti);
+ ti.tone_index = 13;
+ ti.gain0 = 1;
+ ti.freq0 = hz800;
+ ti.gain1 = 0;
+ ti.freq1 = 0;
+ ixj_init_tone(j, &ti);
+ ti.tone_index = 14;
+ ti.gain0 = 1;
+ ti.freq0 = hz1000;
+ ti.gain1 = 0;
+ ti.freq1 = 0;
+ ixj_init_tone(j, &ti);
+ ti.tone_index = 15;
+ ti.gain0 = 1;
+ ti.freq0 = hz1250;
+ ti.gain1 = 0;
+ ti.freq1 = 0;
+ ixj_init_tone(j, &ti);
+ ti.tone_index = 16;
+ ti.gain0 = 1;
+ ti.freq0 = hz950;
+ ti.gain1 = 0;
+ ti.freq1 = 0;
+ ixj_init_tone(j, &ti);
+ ti.tone_index = 17;
+ ti.gain0 = 1;
+ ti.freq0 = hz1100;
+ ti.gain1 = 0;
+ ti.freq1 = 0;
+ ixj_init_tone(j, &ti);
+ ti.tone_index = 18;
+ ti.gain0 = 1;
+ ti.freq0 = hz1400;
+ ti.gain1 = 0;
+ ti.freq1 = 0;
+ ixj_init_tone(j, &ti);
+ ti.tone_index = 19;
+ ti.gain0 = 1;
+ ti.freq0 = hz1500;
+ ti.gain1 = 0;
+ ti.freq1 = 0;
+ ixj_init_tone(j, &ti);
+ ti.tone_index = 20;
+ ti.gain0 = 1;
+ ti.freq0 = hz1600;
+ ti.gain1 = 0;
+ ti.freq1 = 0;
+ ixj_init_tone(j, &ti);
+ ti.tone_index = 21;
+ ti.gain0 = 1;
+ ti.freq0 = hz1800;
+ ti.gain1 = 0;
+ ti.freq1 = 0;
+ ixj_init_tone(j, &ti);
+ ti.tone_index = 22;
+ ti.gain0 = 1;
+ ti.freq0 = hz2100;
+ ti.gain1 = 0;
+ ti.freq1 = 0;
+ ixj_init_tone(j, &ti);
+ ti.tone_index = 23;
+ ti.gain0 = 1;
+ ti.freq0 = hz1300;
+ ti.gain1 = 0;
+ ti.freq1 = 0;
+ ixj_init_tone(j, &ti);
+ ti.tone_index = 24;
+ ti.gain0 = 1;
+ ti.freq0 = hz2450;
+ ti.gain1 = 0;
+ ti.freq1 = 0;
+ ixj_init_tone(j, &ti);
+ ti.tone_index = 25;
+ ti.gain0 = 1;
+ ti.freq0 = hz350;
+ ti.gain1 = 0;
+ ti.freq1 = hz440;
+ ixj_init_tone(j, &ti);
+ ti.tone_index = 26;
+ ti.gain0 = 1;
+ ti.freq0 = hz440;
+ ti.gain1 = 0;
+ ti.freq1 = hz480;
+ ixj_init_tone(j, &ti);
+ ti.tone_index = 27;
+ ti.gain0 = 1;
+ ti.freq0 = hz480;
+ ti.gain1 = 0;
+ ti.freq1 = hz620;
+ ixj_init_tone(j, &ti);
+
+ set_rec_depth(j, 2); /* Set Record Channel Limit to 2 frames */
+
+ set_play_depth(j, 2); /* Set Playback Channel Limit to 2 frames */
+
+ j->ex.bits.dtmf_ready = 0;
+ j->dtmf_state = 0;
+ j->dtmf_wp = j->dtmf_rp = 0;
+ j->rec_mode = j->play_mode = -1;
+ j->flags.ringing = 0;
+ j->maxrings = MAXRINGS;
+ j->ring_cadence = USA_RING_CADENCE;
+ if(j->cadence_f[5].enable) {
+ j->cadence_f[5].enable = j->cadence_f[5].en_filter = j->cadence_f[5].state = 0;
+ }
+ j->drybuffer = 0;
+ j->winktime = 320;
+ j->flags.dtmf_oob = 0;
+ for (cnt = 0; cnt < 4; cnt++)
+ j->cadence_f[cnt].enable = 0;
+
+ idle(j);
+
+ if(j->cardtype == QTI_PHONECARD) {
+ SLIC_SetState(PLD_SLIC_STATE_OC, j);
+ }
+
+ if (file_p->f_mode & FMODE_READ)
+ j->readers--;
+ if (file_p->f_mode & FMODE_WRITE)
+ j->writers--;
+
+ if (j->read_buffer && !j->readers) {
+ kfree(j->read_buffer);
+ j->read_buffer = NULL;
+ j->read_buffer_size = 0;
+ }
+ if (j->write_buffer && !j->writers) {
+ kfree(j->write_buffer);
+ j->write_buffer = NULL;
+ j->write_buffer_size = 0;
+ }
+ j->rec_codec = j->play_codec = 0;
+ j->rec_frame_size = j->play_frame_size = 0;
+ j->flags.cidsent = j->flags.cidring = 0;
+
+ if(j->cardtype == QTI_LINEJACK && !j->readers && !j->writers) {
+ ixj_set_port(j, PORT_PSTN);
+ daa_set_mode(j, SOP_PU_SLEEP);
+ ixj_set_pots(j, 1);
+ }
+ ixj_WriteDSPCommand(0x0FE3, j); /* Put the DSP in 1/5 power mode. */
+
+ /* Set up the default signals for events */
+ for (cnt = 0; cnt < 35; cnt++)
+ j->ixj_signals[cnt] = SIGIO;
+
+ /* Set the excetion signal enable flags */
+ j->ex_sig.bits.dtmf_ready = j->ex_sig.bits.hookstate = j->ex_sig.bits.flash = j->ex_sig.bits.pstn_ring =
+ j->ex_sig.bits.caller_id = j->ex_sig.bits.pstn_wink = j->ex_sig.bits.f0 = j->ex_sig.bits.f1 = j->ex_sig.bits.f2 =
+ j->ex_sig.bits.f3 = j->ex_sig.bits.fc0 = j->ex_sig.bits.fc1 = j->ex_sig.bits.fc2 = j->ex_sig.bits.fc3 = 1;
+
+ file_p->private_data = NULL;
+ clear_bit(board, &j->busyflags);
+ return 0;
+}
+
+static int read_filters(IXJ *j)
+{
+ unsigned short fc, cnt, trg;
+ int var;
+
+ trg = 0;
+ if (ixj_WriteDSPCommand(0x5144, j)) {
+ if(ixjdebug & 0x0001) {
+ printk(KERN_INFO "Read Frame Counter failed!\n");
+ }
+ return -1;
+ }
+ fc = j->ssr.high << 8 | j->ssr.low;
+ if (fc == j->frame_count)
+ return 1;
+
+ j->frame_count = fc;
+
+ if (j->dtmf_proc)
+ return 1;
+
+ var = 10;
+
+ for (cnt = 0; cnt < 4; cnt++) {
+ if (ixj_WriteDSPCommand(0x5154 + cnt, j)) {
+ if(ixjdebug & 0x0001) {
+ printk(KERN_INFO "Select Filter %d failed!\n", cnt);
+ }
+ return -1;
+ }
+ if (ixj_WriteDSPCommand(0x515C, j)) {
+ if(ixjdebug & 0x0001) {
+ printk(KERN_INFO "Read Filter History %d failed!\n", cnt);
+ }
+ return -1;
+ }
+ j->filter_hist[cnt] = j->ssr.high << 8 | j->ssr.low;
+
+ if (j->cadence_f[cnt].enable) {
+ if (j->filter_hist[cnt] & 3 && !(j->filter_hist[cnt] & 12)) {
+ if (j->cadence_f[cnt].state == 0) {
+ j->cadence_f[cnt].state = 1;
+ j->cadence_f[cnt].on1min = jiffies + (long)((j->cadence_f[cnt].on1 * (hertz * (100 - var)) / 10000));
+ j->cadence_f[cnt].on1dot = jiffies + (long)((j->cadence_f[cnt].on1 * (hertz * (100)) / 10000));
+ j->cadence_f[cnt].on1max = jiffies + (long)((j->cadence_f[cnt].on1 * (hertz * (100 + var)) / 10000));
+ } else if (j->cadence_f[cnt].state == 2 &&
+ (time_after(jiffies, j->cadence_f[cnt].off1min) &&
+ time_before(jiffies, j->cadence_f[cnt].off1max))) {
+ if (j->cadence_f[cnt].on2) {
+ j->cadence_f[cnt].state = 3;
+ j->cadence_f[cnt].on2min = jiffies + (long)((j->cadence_f[cnt].on2 * (hertz * (100 - var)) / 10000));
+ j->cadence_f[cnt].on2dot = jiffies + (long)((j->cadence_f[cnt].on2 * (hertz * (100)) / 10000));
+ j->cadence_f[cnt].on2max = jiffies + (long)((j->cadence_f[cnt].on2 * (hertz * (100 + var)) / 10000));
+ } else {
+ j->cadence_f[cnt].state = 7;
+ }
+ } else if (j->cadence_f[cnt].state == 4 &&
+ (time_after(jiffies, j->cadence_f[cnt].off2min) &&
+ time_before(jiffies, j->cadence_f[cnt].off2max))) {
+ if (j->cadence_f[cnt].on3) {
+ j->cadence_f[cnt].state = 5;
+ j->cadence_f[cnt].on3min = jiffies + (long)((j->cadence_f[cnt].on3 * (hertz * (100 - var)) / 10000));
+ j->cadence_f[cnt].on3dot = jiffies + (long)((j->cadence_f[cnt].on3 * (hertz * (100)) / 10000));
+ j->cadence_f[cnt].on3max = jiffies + (long)((j->cadence_f[cnt].on3 * (hertz * (100 + var)) / 10000));
+ } else {
+ j->cadence_f[cnt].state = 7;
+ }
+ } else {
+ j->cadence_f[cnt].state = 0;
+ }
+ } else if (j->filter_hist[cnt] & 12 && !(j->filter_hist[cnt] & 3)) {
+ if (j->cadence_f[cnt].state == 1) {
+ if(!j->cadence_f[cnt].on1) {
+ j->cadence_f[cnt].state = 7;
+ } else if((time_after(jiffies, j->cadence_f[cnt].on1min) &&
+ time_before(jiffies, j->cadence_f[cnt].on1max))) {
+ if(j->cadence_f[cnt].off1) {
+ j->cadence_f[cnt].state = 2;
+ j->cadence_f[cnt].off1min = jiffies + (long)((j->cadence_f[cnt].off1 * (hertz * (100 - var)) / 10000));
+ j->cadence_f[cnt].off1dot = jiffies + (long)((j->cadence_f[cnt].off1 * (hertz * (100)) / 10000));
+ j->cadence_f[cnt].off1max = jiffies + (long)((j->cadence_f[cnt].off1 * (hertz * (100 + var)) / 10000));
+ } else {
+ j->cadence_f[cnt].state = 7;
+ }
+ } else {
+ j->cadence_f[cnt].state = 0;
+ }
+ } else if (j->cadence_f[cnt].state == 3) {
+ if((time_after(jiffies, j->cadence_f[cnt].on2min) &&
+ time_before(jiffies, j->cadence_f[cnt].on2max))) {
+ if(j->cadence_f[cnt].off2) {
+ j->cadence_f[cnt].state = 4;
+ j->cadence_f[cnt].off2min = jiffies + (long)((j->cadence_f[cnt].off2 * (hertz * (100 - var)) / 10000));
+ j->cadence_f[cnt].off2dot = jiffies + (long)((j->cadence_f[cnt].off2 * (hertz * (100)) / 10000));
+ j->cadence_f[cnt].off2max = jiffies + (long)((j->cadence_f[cnt].off2 * (hertz * (100 + var)) / 10000));
+ } else {
+ j->cadence_f[cnt].state = 7;
+ }
+ } else {
+ j->cadence_f[cnt].state = 0;
+ }
+ } else if (j->cadence_f[cnt].state == 5) {
+ if ((time_after(jiffies, j->cadence_f[cnt].on3min) &&
+ time_before(jiffies, j->cadence_f[cnt].on3max))) {
+ if(j->cadence_f[cnt].off3) {
+ j->cadence_f[cnt].state = 6;
+ j->cadence_f[cnt].off3min = jiffies + (long)((j->cadence_f[cnt].off3 * (hertz * (100 - var)) / 10000));
+ j->cadence_f[cnt].off3dot = jiffies + (long)((j->cadence_f[cnt].off3 * (hertz * (100)) / 10000));
+ j->cadence_f[cnt].off3max = jiffies + (long)((j->cadence_f[cnt].off3 * (hertz * (100 + var)) / 10000));
+ } else {
+ j->cadence_f[cnt].state = 7;
+ }
+ } else {
+ j->cadence_f[cnt].state = 0;
+ }
+ } else {
+ j->cadence_f[cnt].state = 0;
+ }
+ } else {
+ switch(j->cadence_f[cnt].state) {
+ case 1:
+ if(time_after(jiffies, j->cadence_f[cnt].on1dot) &&
+ !j->cadence_f[cnt].off1 &&
+ !j->cadence_f[cnt].on2 && !j->cadence_f[cnt].off2 &&
+ !j->cadence_f[cnt].on3 && !j->cadence_f[cnt].off3) {
+ j->cadence_f[cnt].state = 7;
+ }
+ break;
+ case 3:
+ if(time_after(jiffies, j->cadence_f[cnt].on2dot) &&
+ !j->cadence_f[cnt].off2 &&
+ !j->cadence_f[cnt].on3 && !j->cadence_f[cnt].off3) {
+ j->cadence_f[cnt].state = 7;
+ }
+ break;
+ case 5:
+ if(time_after(jiffies, j->cadence_f[cnt].on3dot) &&
+ !j->cadence_f[cnt].off3) {
+ j->cadence_f[cnt].state = 7;
+ }
+ break;
+ }
+ }
+
+ if (ixjdebug & 0x0040) {
+ printk(KERN_INFO "IXJ Tone Cadence state = %d /dev/phone%d at %ld\n", j->cadence_f[cnt].state, j->board, jiffies);
+ switch(j->cadence_f[cnt].state) {
+ case 0:
+ printk(KERN_INFO "IXJ /dev/phone%d No Tone detected\n", j->board);
+ break;
+ case 1:
+ printk(KERN_INFO "IXJ /dev/phone%d Next Tone Cadence state at %u %ld - %ld - %ld\n", j->board,
+ j->cadence_f[cnt].on1, j->cadence_f[cnt].on1min, j->cadence_f[cnt].on1dot, j->cadence_f[cnt].on1max);
+ break;
+ case 2:
+ printk(KERN_INFO "IXJ /dev/phone%d Next Tone Cadence state at %ld - %ld\n", j->board, j->cadence_f[cnt].off1min,
+ j->cadence_f[cnt].off1max);
+ break;
+ case 3:
+ printk(KERN_INFO "IXJ /dev/phone%d Next Tone Cadence state at %ld - %ld\n", j->board, j->cadence_f[cnt].on2min,
+ j->cadence_f[cnt].on2max);
+ break;
+ case 4:
+ printk(KERN_INFO "IXJ /dev/phone%d Next Tone Cadence state at %ld - %ld\n", j->board, j->cadence_f[cnt].off2min,
+ j->cadence_f[cnt].off2max);
+ break;
+ case 5:
+ printk(KERN_INFO "IXJ /dev/phone%d Next Tone Cadence state at %ld - %ld\n", j->board, j->cadence_f[cnt].on3min,
+ j->cadence_f[cnt].on3max);
+ break;
+ case 6:
+ printk(KERN_INFO "IXJ /dev/phone%d Next Tone Cadence state at %ld - %ld\n", j->board, j->cadence_f[cnt].off3min,
+ j->cadence_f[cnt].off3max);
+ break;
+ }
+ }
+ }
+ if (j->cadence_f[cnt].state == 7) {
+ j->cadence_f[cnt].state = 0;
+ if (j->cadence_f[cnt].enable == 1)
+ j->cadence_f[cnt].enable = 0;
+ switch (cnt) {
+ case 0:
+ if(ixjdebug & 0x0020) {
+ printk(KERN_INFO "Filter Cadence 0 triggered %ld\n", jiffies);
+ }
+ j->ex.bits.fc0 = 1;
+ ixj_kill_fasync(j, SIG_FC0, POLL_IN);
+ break;
+ case 1:
+ if(ixjdebug & 0x0020) {
+ printk(KERN_INFO "Filter Cadence 1 triggered %ld\n", jiffies);
+ }
+ j->ex.bits.fc1 = 1;
+ ixj_kill_fasync(j, SIG_FC1, POLL_IN);
+ break;
+ case 2:
+ if(ixjdebug & 0x0020) {
+ printk(KERN_INFO "Filter Cadence 2 triggered %ld\n", jiffies);
+ }
+ j->ex.bits.fc2 = 1;
+ ixj_kill_fasync(j, SIG_FC2, POLL_IN);
+ break;
+ case 3:
+ if(ixjdebug & 0x0020) {
+ printk(KERN_INFO "Filter Cadence 3 triggered %ld\n", jiffies);
+ }
+ j->ex.bits.fc3 = 1;
+ ixj_kill_fasync(j, SIG_FC3, POLL_IN);
+ break;
+ }
+ }
+ if (j->filter_en[cnt] && ((j->filter_hist[cnt] & 3 && !(j->filter_hist[cnt] & 12)) ||
+ (j->filter_hist[cnt] & 12 && !(j->filter_hist[cnt] & 3)))) {
+ if((j->filter_hist[cnt] & 3 && !(j->filter_hist[cnt] & 12))) {
+ trg = 1;
+ } else if((j->filter_hist[cnt] & 12 && !(j->filter_hist[cnt] & 3))) {
+ trg = 0;
+ }
+ switch (cnt) {
+ case 0:
+ if(ixjdebug & 0x0020) {
+ printk(KERN_INFO "Filter 0 triggered %d at %ld\n", trg, jiffies);
+ }
+ j->ex.bits.f0 = 1;
+ ixj_kill_fasync(j, SIG_F0, POLL_IN);
+ break;
+ case 1:
+ if(ixjdebug & 0x0020) {
+ printk(KERN_INFO "Filter 1 triggered %d at %ld\n", trg, jiffies);
+ }
+ j->ex.bits.f1 = 1;
+ ixj_kill_fasync(j, SIG_F1, POLL_IN);
+ break;
+ case 2:
+ if(ixjdebug & 0x0020) {
+ printk(KERN_INFO "Filter 2 triggered %d at %ld\n", trg, jiffies);
+ }
+ j->ex.bits.f2 = 1;
+ ixj_kill_fasync(j, SIG_F2, POLL_IN);
+ break;
+ case 3:
+ if(ixjdebug & 0x0020) {
+ printk(KERN_INFO "Filter 3 triggered %d at %ld\n", trg, jiffies);
+ }
+ j->ex.bits.f3 = 1;
+ ixj_kill_fasync(j, SIG_F3, POLL_IN);
+ break;
+ }
+ }
+ }
+ return 0;
+}
+
+static int LineMonitor(IXJ *j)
+{
+ if (j->dtmf_proc) {
+ return -1;
+ }
+ j->dtmf_proc = 1;
+
+ if (ixj_WriteDSPCommand(0x7000, j)) /* Line Monitor */
+ return -1;
+
+ j->dtmf.bytes.high = j->ssr.high;
+ j->dtmf.bytes.low = j->ssr.low;
+ if (!j->dtmf_state && j->dtmf.bits.dtmf_valid) {
+ j->dtmf_state = 1;
+ j->dtmf_current = j->dtmf.bits.digit;
+ }
+ if (j->dtmf_state && !j->dtmf.bits.dtmf_valid) /* && j->dtmf_wp != j->dtmf_rp) */
+ {
+ if(!j->cidcw_wait) {
+ j->dtmfbuffer[j->dtmf_wp] = j->dtmf_current;
+ j->dtmf_wp++;
+ if (j->dtmf_wp == 79)
+ j->dtmf_wp = 0;
+ j->ex.bits.dtmf_ready = 1;
+ if(j->ex_sig.bits.dtmf_ready) {
+ ixj_kill_fasync(j, SIG_DTMF_READY, POLL_IN);
+ }
+ }
+ else if(j->dtmf_current == 0x00 || j->dtmf_current == 0x0D) {
+ if(ixjdebug & 0x0020) {
+ printk("IXJ phone%d saw CIDCW Ack DTMF %d from display at %ld\n", j->board, j->dtmf_current, jiffies);
+ }
+ j->flags.cidcw_ack = 1;
+ }
+ j->dtmf_state = 0;
+ }
+ j->dtmf_proc = 0;
+
+ return 0;
+}
+
+/************************************************************************
+*
+* Functions to allow alaw <-> ulaw conversions.
+*
+************************************************************************/
+
+static void ulaw2alaw(unsigned char *buff, unsigned long len)
+{
+ static unsigned char table_ulaw2alaw[] =
+ {
+ 0x2A, 0x2B, 0x28, 0x29, 0x2E, 0x2F, 0x2C, 0x2D,
+ 0x22, 0x23, 0x20, 0x21, 0x26, 0x27, 0x24, 0x25,
+ 0x3A, 0x3B, 0x38, 0x39, 0x3E, 0x3F, 0x3C, 0x3D,
+ 0x32, 0x33, 0x30, 0x31, 0x36, 0x37, 0x34, 0x35,
+ 0x0B, 0x08, 0x09, 0x0E, 0x0F, 0x0C, 0x0D, 0x02,
+ 0x03, 0x00, 0x01, 0x06, 0x07, 0x04, 0x05, 0x1A,
+ 0x1B, 0x18, 0x19, 0x1E, 0x1F, 0x1C, 0x1D, 0x12,
+ 0x13, 0x10, 0x11, 0x16, 0x17, 0x14, 0x15, 0x6B,
+ 0x68, 0x69, 0x6E, 0x6F, 0x6C, 0x6D, 0x62, 0x63,
+ 0x60, 0x61, 0x66, 0x67, 0x64, 0x65, 0x7B, 0x79,
+ 0x7E, 0x7F, 0x7C, 0x7D, 0x72, 0x73, 0x70, 0x71,
+ 0x76, 0x77, 0x74, 0x75, 0x4B, 0x49, 0x4F, 0x4D,
+ 0x42, 0x43, 0x40, 0x41, 0x46, 0x47, 0x44, 0x45,
+ 0x5A, 0x5B, 0x58, 0x59, 0x5E, 0x5F, 0x5C, 0x5D,
+ 0x52, 0x52, 0x53, 0x53, 0x50, 0x50, 0x51, 0x51,
+ 0x56, 0x56, 0x57, 0x57, 0x54, 0x54, 0x55, 0xD5,
+ 0xAA, 0xAB, 0xA8, 0xA9, 0xAE, 0xAF, 0xAC, 0xAD,
+ 0xA2, 0xA3, 0xA0, 0xA1, 0xA6, 0xA7, 0xA4, 0xA5,
+ 0xBA, 0xBB, 0xB8, 0xB9, 0xBE, 0xBF, 0xBC, 0xBD,
+ 0xB2, 0xB3, 0xB0, 0xB1, 0xB6, 0xB7, 0xB4, 0xB5,
+ 0x8B, 0x88, 0x89, 0x8E, 0x8F, 0x8C, 0x8D, 0x82,
+ 0x83, 0x80, 0x81, 0x86, 0x87, 0x84, 0x85, 0x9A,
+ 0x9B, 0x98, 0x99, 0x9E, 0x9F, 0x9C, 0x9D, 0x92,
+ 0x93, 0x90, 0x91, 0x96, 0x97, 0x94, 0x95, 0xEB,
+ 0xE8, 0xE9, 0xEE, 0xEF, 0xEC, 0xED, 0xE2, 0xE3,
+ 0xE0, 0xE1, 0xE6, 0xE7, 0xE4, 0xE5, 0xFB, 0xF9,
+ 0xFE, 0xFF, 0xFC, 0xFD, 0xF2, 0xF3, 0xF0, 0xF1,
+ 0xF6, 0xF7, 0xF4, 0xF5, 0xCB, 0xC9, 0xCF, 0xCD,
+ 0xC2, 0xC3, 0xC0, 0xC1, 0xC6, 0xC7, 0xC4, 0xC5,
+ 0xDA, 0xDB, 0xD8, 0xD9, 0xDE, 0xDF, 0xDC, 0xDD,
+ 0xD2, 0xD2, 0xD3, 0xD3, 0xD0, 0xD0, 0xD1, 0xD1,
+ 0xD6, 0xD6, 0xD7, 0xD7, 0xD4, 0xD4, 0xD5, 0xD5
+ };
+
+ while (len--)
+ {
+ *buff = table_ulaw2alaw[*(unsigned char *)buff];
+ buff++;
+ }
+}
+
+static void alaw2ulaw(unsigned char *buff, unsigned long len)
+{
+ static unsigned char table_alaw2ulaw[] =
+ {
+ 0x29, 0x2A, 0x27, 0x28, 0x2D, 0x2E, 0x2B, 0x2C,
+ 0x21, 0x22, 0x1F, 0x20, 0x25, 0x26, 0x23, 0x24,
+ 0x39, 0x3A, 0x37, 0x38, 0x3D, 0x3E, 0x3B, 0x3C,
+ 0x31, 0x32, 0x2F, 0x30, 0x35, 0x36, 0x33, 0x34,
+ 0x0A, 0x0B, 0x08, 0x09, 0x0E, 0x0F, 0x0C, 0x0D,
+ 0x02, 0x03, 0x00, 0x01, 0x06, 0x07, 0x04, 0x05,
+ 0x1A, 0x1B, 0x18, 0x19, 0x1E, 0x1F, 0x1C, 0x1D,
+ 0x12, 0x13, 0x10, 0x11, 0x16, 0x17, 0x14, 0x15,
+ 0x62, 0x63, 0x60, 0x61, 0x66, 0x67, 0x64, 0x65,
+ 0x5D, 0x5D, 0x5C, 0x5C, 0x5F, 0x5F, 0x5E, 0x5E,
+ 0x74, 0x76, 0x70, 0x72, 0x7C, 0x7E, 0x78, 0x7A,
+ 0x6A, 0x6B, 0x68, 0x69, 0x6E, 0x6F, 0x6C, 0x6D,
+ 0x48, 0x49, 0x46, 0x47, 0x4C, 0x4D, 0x4A, 0x4B,
+ 0x40, 0x41, 0x3F, 0x3F, 0x44, 0x45, 0x42, 0x43,
+ 0x56, 0x57, 0x54, 0x55, 0x5A, 0x5B, 0x58, 0x59,
+ 0x4F, 0x4F, 0x4E, 0x4E, 0x52, 0x53, 0x50, 0x51,
+ 0xA9, 0xAA, 0xA7, 0xA8, 0xAD, 0xAE, 0xAB, 0xAC,
+ 0xA1, 0xA2, 0x9F, 0xA0, 0xA5, 0xA6, 0xA3, 0xA4,
+ 0xB9, 0xBA, 0xB7, 0xB8, 0xBD, 0xBE, 0xBB, 0xBC,
+ 0xB1, 0xB2, 0xAF, 0xB0, 0xB5, 0xB6, 0xB3, 0xB4,
+ 0x8A, 0x8B, 0x88, 0x89, 0x8E, 0x8F, 0x8C, 0x8D,
+ 0x82, 0x83, 0x80, 0x81, 0x86, 0x87, 0x84, 0x85,
+ 0x9A, 0x9B, 0x98, 0x99, 0x9E, 0x9F, 0x9C, 0x9D,
+ 0x92, 0x93, 0x90, 0x91, 0x96, 0x97, 0x94, 0x95,
+ 0xE2, 0xE3, 0xE0, 0xE1, 0xE6, 0xE7, 0xE4, 0xE5,
+ 0xDD, 0xDD, 0xDC, 0xDC, 0xDF, 0xDF, 0xDE, 0xDE,
+ 0xF4, 0xF6, 0xF0, 0xF2, 0xFC, 0xFE, 0xF8, 0xFA,
+ 0xEA, 0xEB, 0xE8, 0xE9, 0xEE, 0xEF, 0xEC, 0xED,
+ 0xC8, 0xC9, 0xC6, 0xC7, 0xCC, 0xCD, 0xCA, 0xCB,
+ 0xC0, 0xC1, 0xBF, 0xBF, 0xC4, 0xC5, 0xC2, 0xC3,
+ 0xD6, 0xD7, 0xD4, 0xD5, 0xDA, 0xDB, 0xD8, 0xD9,
+ 0xCF, 0xCF, 0xCE, 0xCE, 0xD2, 0xD3, 0xD0, 0xD1
+ };
+
+ while (len--)
+ {
+ *buff = table_alaw2ulaw[*(unsigned char *)buff];
+ buff++;
+ }
+}
+
+static ssize_t ixj_read(struct file * file_p, char __user *buf, size_t length, loff_t * ppos)
+{
+ unsigned long i = *ppos;
+ IXJ * j = get_ixj(NUM(file_p->f_path.dentry->d_inode));
+
+ DECLARE_WAITQUEUE(wait, current);
+
+ if (j->flags.inread)
+ return -EALREADY;
+
+ j->flags.inread = 1;
+
+ add_wait_queue(&j->read_q, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+ mb();
+
+ while (!j->read_buffer_ready || (j->dtmf_state && j->flags.dtmf_oob)) {
+ ++j->read_wait;
+ if (file_p->f_flags & O_NONBLOCK) {
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&j->read_q, &wait);
+ j->flags.inread = 0;
+ return -EAGAIN;
+ }
+ if (!ixj_hookstate(j)) {
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&j->read_q, &wait);
+ j->flags.inread = 0;
+ return 0;
+ }
+ interruptible_sleep_on(&j->read_q);
+ if (signal_pending(current)) {
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&j->read_q, &wait);
+ j->flags.inread = 0;
+ return -EINTR;
+ }
+ }
+
+ remove_wait_queue(&j->read_q, &wait);
+ set_current_state(TASK_RUNNING);
+ /* Don't ever copy more than the user asks */
+ if(j->rec_codec == ALAW)
+ ulaw2alaw(j->read_buffer, min(length, j->read_buffer_size));
+ i = copy_to_user(buf, j->read_buffer, min(length, j->read_buffer_size));
+ j->read_buffer_ready = 0;
+ if (i) {
+ j->flags.inread = 0;
+ return -EFAULT;
+ } else {
+ j->flags.inread = 0;
+ return min(length, j->read_buffer_size);
+ }
+}
+
+static ssize_t ixj_enhanced_read(struct file * file_p, char __user *buf, size_t length,
+ loff_t * ppos)
+{
+ int pre_retval;
+ ssize_t read_retval = 0;
+ IXJ *j = get_ixj(NUM(file_p->f_path.dentry->d_inode));
+
+ pre_retval = ixj_PreRead(j, 0L);
+ switch (pre_retval) {
+ case NORMAL:
+ read_retval = ixj_read(file_p, buf, length, ppos);
+ ixj_PostRead(j, 0L);
+ break;
+ case NOPOST:
+ read_retval = ixj_read(file_p, buf, length, ppos);
+ break;
+ case POSTONLY:
+ ixj_PostRead(j, 0L);
+ break;
+ default:
+ read_retval = pre_retval;
+ }
+ return read_retval;
+}
+
+static ssize_t ixj_write(struct file *file_p, const char __user *buf, size_t count, loff_t * ppos)
+{
+ unsigned long i = *ppos;
+ IXJ *j = file_p->private_data;
+
+ DECLARE_WAITQUEUE(wait, current);
+
+ if (j->flags.inwrite)
+ return -EALREADY;
+
+ j->flags.inwrite = 1;
+
+ add_wait_queue(&j->write_q, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+ mb();
+
+
+ while (!j->write_buffers_empty) {
+ ++j->write_wait;
+ if (file_p->f_flags & O_NONBLOCK) {
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&j->write_q, &wait);
+ j->flags.inwrite = 0;
+ return -EAGAIN;
+ }
+ if (!ixj_hookstate(j)) {
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&j->write_q, &wait);
+ j->flags.inwrite = 0;
+ return 0;
+ }
+ interruptible_sleep_on(&j->write_q);
+ if (signal_pending(current)) {
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&j->write_q, &wait);
+ j->flags.inwrite = 0;
+ return -EINTR;
+ }
+ }
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&j->write_q, &wait);
+ if (j->write_buffer_wp + count >= j->write_buffer_end)
+ j->write_buffer_wp = j->write_buffer;
+ i = copy_from_user(j->write_buffer_wp, buf, min(count, j->write_buffer_size));
+ if (i) {
+ j->flags.inwrite = 0;
+ return -EFAULT;
+ }
+ if(j->play_codec == ALAW)
+ alaw2ulaw(j->write_buffer_wp, min(count, j->write_buffer_size));
+ j->flags.inwrite = 0;
+ return min(count, j->write_buffer_size);
+}
+
+static ssize_t ixj_enhanced_write(struct file * file_p, const char __user *buf, size_t count, loff_t * ppos)
+{
+ int pre_retval;
+ ssize_t write_retval = 0;
+
+ IXJ *j = get_ixj(NUM(file_p->f_path.dentry->d_inode));
+
+ pre_retval = ixj_PreWrite(j, 0L);
+ switch (pre_retval) {
+ case NORMAL:
+ write_retval = ixj_write(file_p, buf, count, ppos);
+ if (write_retval > 0) {
+ ixj_PostWrite(j, 0L);
+ j->write_buffer_wp += write_retval;
+ j->write_buffers_empty--;
+ }
+ break;
+ case NOPOST:
+ write_retval = ixj_write(file_p, buf, count, ppos);
+ if (write_retval > 0) {
+ j->write_buffer_wp += write_retval;
+ j->write_buffers_empty--;
+ }
+ break;
+ case POSTONLY:
+ ixj_PostWrite(j, 0L);
+ break;
+ default:
+ write_retval = pre_retval;
+ }
+ return write_retval;
+}
+
+static void ixj_read_frame(IXJ *j)
+{
+ int cnt, dly;
+
+ if (j->read_buffer) {
+ for (cnt = 0; cnt < j->rec_frame_size * 2; cnt += 2) {
+ if (!(cnt % 16) && !IsRxReady(j)) {
+ dly = 0;
+ while (!IsRxReady(j)) {
+ if (dly++ > 5) {
+ dly = 0;
+ break;
+ }
+ udelay(10);
+ }
+ }
+ /* Throw away word 0 of the 8021 compressed format to get standard G.729. */
+ if (j->rec_codec == G729 && (cnt == 0 || cnt == 10 || cnt == 20)) {
+ inb_p(j->DSPbase + 0x0E);
+ inb_p(j->DSPbase + 0x0F);
+ }
+ *(j->read_buffer + cnt) = inb_p(j->DSPbase + 0x0E);
+ *(j->read_buffer + cnt + 1) = inb_p(j->DSPbase + 0x0F);
+ }
+ ++j->framesread;
+ if (j->intercom != -1) {
+ if (IsTxReady(get_ixj(j->intercom))) {
+ for (cnt = 0; cnt < j->rec_frame_size * 2; cnt += 2) {
+ if (!(cnt % 16) && !IsTxReady(j)) {
+ dly = 0;
+ while (!IsTxReady(j)) {
+ if (dly++ > 5) {
+ dly = 0;
+ break;
+ }
+ udelay(10);
+ }
+ }
+ outb_p(*(j->read_buffer + cnt), get_ixj(j->intercom)->DSPbase + 0x0C);
+ outb_p(*(j->read_buffer + cnt + 1), get_ixj(j->intercom)->DSPbase + 0x0D);
+ }
+ get_ixj(j->intercom)->frameswritten++;
+ }
+ } else {
+ j->read_buffer_ready = 1;
+ wake_up_interruptible(&j->read_q); /* Wake any blocked readers */
+
+ wake_up_interruptible(&j->poll_q); /* Wake any blocked selects */
+
+ if(j->ixj_signals[SIG_READ_READY])
+ ixj_kill_fasync(j, SIG_READ_READY, POLL_OUT);
+ }
+ }
+}
+
+static short fsk[][6][20] =
+{
+ {
+ {
+ 0, 17846, 29934, 32364, 24351, 8481, -10126, -25465, -32587, -29196,
+ -16384, 1715, 19260, 30591, 32051, 23170, 6813, -11743, -26509, -32722
+ },
+ {
+ -28377, -14876, 3425, 20621, 31163, 31650, 21925, 5126, -13328, -27481,
+ -32767, -27481, -13328, 5126, 21925, 31650, 31163, 20621, 3425, -14876
+ },
+ {
+ -28377, -32722, -26509, -11743, 6813, 23170, 32051, 30591, 19260, 1715,
+ -16384, -29196, -32587, -25465, -10126, 8481, 24351, 32364, 29934, 17846
+ },
+ {
+ 0, -17846, -29934, -32364, -24351, -8481, 10126, 25465, 32587, 29196,
+ 16384, -1715, -19260, -30591, -32051, -23170, -6813, 11743, 26509, 32722
+ },
+ {
+ 28377, 14876, -3425, -20621, -31163, -31650, -21925, -5126, 13328, 27481,
+ 32767, 27481, 13328, -5126, -21925, -31650, -31163, -20621, -3425, 14876
+ },
+ {
+ 28377, 32722, 26509, 11743, -6813, -23170, -32051, -30591, -19260, -1715,
+ 16384, 29196, 32587, 25465, 10126, -8481, -24351, -32364, -29934, -17846
+ }
+ },
+ {
+ {
+ 0, 10126, 19260, 26509, 31163, 32767, 31163, 26509, 19260, 10126,
+ 0, -10126, -19260, -26509, -31163, -32767, -31163, -26509, -19260, -10126
+ },
+ {
+ -28377, -21925, -13328, -3425, 6813, 16384, 24351, 29934, 32587, 32051,
+ 28377, 21925, 13328, 3425, -6813, -16384, -24351, -29934, -32587, -32051
+ },
+ {
+ -28377, -32051, -32587, -29934, -24351, -16384, -6813, 3425, 13328, 21925,
+ 28377, 32051, 32587, 29934, 24351, 16384, 6813, -3425, -13328, -21925
+ },
+ {
+ 0, -10126, -19260, -26509, -31163, -32767, -31163, -26509, -19260, -10126,
+ 0, 10126, 19260, 26509, 31163, 32767, 31163, 26509, 19260, 10126
+ },
+ {
+ 28377, 21925, 13328, 3425, -6813, -16383, -24351, -29934, -32587, -32051,
+ -28377, -21925, -13328, -3425, 6813, 16383, 24351, 29934, 32587, 32051
+ },
+ {
+ 28377, 32051, 32587, 29934, 24351, 16384, 6813, -3425, -13328, -21925,
+ -28377, -32051, -32587, -29934, -24351, -16384, -6813, 3425, 13328, 21925
+ }
+ }
+};
+
+
+static void ixj_write_cid_bit(IXJ *j, int bit)
+{
+ while (j->fskcnt < 20) {
+ if(j->fskdcnt < (j->fsksize - 1))
+ j->fskdata[j->fskdcnt++] = fsk[bit][j->fskz][j->fskcnt];
+
+ j->fskcnt += 3;
+ }
+ j->fskcnt %= 20;
+
+ if (!bit)
+ j->fskz++;
+ if (j->fskz >= 6)
+ j->fskz = 0;
+
+}
+
+static void ixj_write_cid_byte(IXJ *j, char byte)
+{
+ IXJ_CBYTE cb;
+
+ cb.cbyte = byte;
+ ixj_write_cid_bit(j, 0);
+ ixj_write_cid_bit(j, cb.cbits.b0 ? 1 : 0);
+ ixj_write_cid_bit(j, cb.cbits.b1 ? 1 : 0);
+ ixj_write_cid_bit(j, cb.cbits.b2 ? 1 : 0);
+ ixj_write_cid_bit(j, cb.cbits.b3 ? 1 : 0);
+ ixj_write_cid_bit(j, cb.cbits.b4 ? 1 : 0);
+ ixj_write_cid_bit(j, cb.cbits.b5 ? 1 : 0);
+ ixj_write_cid_bit(j, cb.cbits.b6 ? 1 : 0);
+ ixj_write_cid_bit(j, cb.cbits.b7 ? 1 : 0);
+ ixj_write_cid_bit(j, 1);
+}
+
+static void ixj_write_cid_seize(IXJ *j)
+{
+ int cnt;
+
+ for (cnt = 0; cnt < 150; cnt++) {
+ ixj_write_cid_bit(j, 0);
+ ixj_write_cid_bit(j, 1);
+ }
+ for (cnt = 0; cnt < 180; cnt++) {
+ ixj_write_cid_bit(j, 1);
+ }
+}
+
+static void ixj_write_cidcw_seize(IXJ *j)
+{
+ int cnt;
+
+ for (cnt = 0; cnt < 80; cnt++) {
+ ixj_write_cid_bit(j, 1);
+ }
+}
+
+static int ixj_write_cid_string(IXJ *j, char *s, int checksum)
+{
+ int cnt;
+
+ for (cnt = 0; cnt < strlen(s); cnt++) {
+ ixj_write_cid_byte(j, s[cnt]);
+ checksum = (checksum + s[cnt]);
+ }
+ return checksum;
+}
+
+static void ixj_pad_fsk(IXJ *j, int pad)
+{
+ int cnt;
+
+ for (cnt = 0; cnt < pad; cnt++) {
+ if(j->fskdcnt < (j->fsksize - 1))
+ j->fskdata[j->fskdcnt++] = 0x0000;
+ }
+ for (cnt = 0; cnt < 720; cnt++) {
+ if(j->fskdcnt < (j->fsksize - 1))
+ j->fskdata[j->fskdcnt++] = 0x0000;
+ }
+}
+
+static void ixj_pre_cid(IXJ *j)
+{
+ j->cid_play_codec = j->play_codec;
+ j->cid_play_frame_size = j->play_frame_size;
+ j->cid_play_volume = get_play_volume(j);
+ j->cid_play_flag = j->flags.playing;
+
+ j->cid_rec_codec = j->rec_codec;
+ j->cid_rec_volume = get_rec_volume(j);
+ j->cid_rec_flag = j->flags.recording;
+
+ j->cid_play_aec_level = j->aec_level;
+
+ switch(j->baseframe.low) {
+ case 0xA0:
+ j->cid_base_frame_size = 20;
+ break;
+ case 0x50:
+ j->cid_base_frame_size = 10;
+ break;
+ case 0xF0:
+ j->cid_base_frame_size = 30;
+ break;
+ }
+
+ ixj_play_stop(j);
+ ixj_cpt_stop(j);
+
+ j->flags.cidplay = 1;
+
+ set_base_frame(j, 30);
+ set_play_codec(j, LINEAR16);
+ set_play_volume(j, 0x1B);
+ ixj_play_start(j);
+}
+
+static void ixj_post_cid(IXJ *j)
+{
+ ixj_play_stop(j);
+
+ if(j->cidsize > 5000) {
+ SLIC_SetState(PLD_SLIC_STATE_STANDBY, j);
+ }
+ j->flags.cidplay = 0;
+ if(ixjdebug & 0x0200) {
+ printk("IXJ phone%d Finished Playing CallerID data %ld\n", j->board, jiffies);
+ }
+
+ ixj_fsk_free(j);
+
+ j->fskdcnt = 0;
+ set_base_frame(j, j->cid_base_frame_size);
+ set_play_codec(j, j->cid_play_codec);
+ ixj_aec_start(j, j->cid_play_aec_level);
+ set_play_volume(j, j->cid_play_volume);
+
+ set_rec_codec(j, j->cid_rec_codec);
+ set_rec_volume(j, j->cid_rec_volume);
+
+ if(j->cid_rec_flag)
+ ixj_record_start(j);
+
+ if(j->cid_play_flag)
+ ixj_play_start(j);
+
+ if(j->cid_play_flag) {
+ wake_up_interruptible(&j->write_q); /* Wake any blocked writers */
+ }
+}
+
+static void ixj_write_cid(IXJ *j)
+{
+ char sdmf1[50];
+ char sdmf2[50];
+ char sdmf3[80];
+ char mdmflen, len1, len2, len3;
+ int pad;
+
+ int checksum = 0;
+
+ if (j->dsp.low == 0x20 || j->flags.cidplay)
+ return;
+
+ j->fskz = j->fskphase = j->fskcnt = j->fskdcnt = 0;
+ j->cidsize = j->cidcnt = 0;
+
+ ixj_fsk_alloc(j);
+
+ strcpy(sdmf1, j->cid_send.month);
+ strcat(sdmf1, j->cid_send.day);
+ strcat(sdmf1, j->cid_send.hour);
+ strcat(sdmf1, j->cid_send.min);
+ strcpy(sdmf2, j->cid_send.number);
+ strcpy(sdmf3, j->cid_send.name);
+
+ len1 = strlen(sdmf1);
+ len2 = strlen(sdmf2);
+ len3 = strlen(sdmf3);
+ mdmflen = len1 + len2 + len3 + 6;
+
+ while(1){
+ ixj_write_cid_seize(j);
+
+ ixj_write_cid_byte(j, 0x80);
+ checksum = 0x80;
+ ixj_write_cid_byte(j, mdmflen);
+ checksum = checksum + mdmflen;
+
+ ixj_write_cid_byte(j, 0x01);
+ checksum = checksum + 0x01;
+ ixj_write_cid_byte(j, len1);
+ checksum = checksum + len1;
+ checksum = ixj_write_cid_string(j, sdmf1, checksum);
+ if(ixj_hookstate(j) & 1)
+ break;
+
+ ixj_write_cid_byte(j, 0x02);
+ checksum = checksum + 0x02;
+ ixj_write_cid_byte(j, len2);
+ checksum = checksum + len2;
+ checksum = ixj_write_cid_string(j, sdmf2, checksum);
+ if(ixj_hookstate(j) & 1)
+ break;
+
+ ixj_write_cid_byte(j, 0x07);
+ checksum = checksum + 0x07;
+ ixj_write_cid_byte(j, len3);
+ checksum = checksum + len3;
+ checksum = ixj_write_cid_string(j, sdmf3, checksum);
+ if(ixj_hookstate(j) & 1)
+ break;
+
+ checksum %= 256;
+ checksum ^= 0xFF;
+ checksum += 1;
+
+ ixj_write_cid_byte(j, (char) checksum);
+
+ pad = j->fskdcnt % 240;
+ if (pad) {
+ pad = 240 - pad;
+ }
+ ixj_pad_fsk(j, pad);
+ break;
+ }
+
+ ixj_write_frame(j);
+}
+
+static void ixj_write_cidcw(IXJ *j)
+{
+ IXJ_TONE ti;
+
+ char sdmf1[50];
+ char sdmf2[50];
+ char sdmf3[80];
+ char mdmflen, len1, len2, len3;
+ int pad;
+
+ int checksum = 0;
+
+ if (j->dsp.low == 0x20 || j->flags.cidplay)
+ return;
+
+ j->fskz = j->fskphase = j->fskcnt = j->fskdcnt = 0;
+ j->cidsize = j->cidcnt = 0;
+
+ ixj_fsk_alloc(j);
+
+ j->flags.cidcw_ack = 0;
+
+ ti.tone_index = 23;
+ ti.gain0 = 1;
+ ti.freq0 = hz440;
+ ti.gain1 = 0;
+ ti.freq1 = 0;
+ ixj_init_tone(j, &ti);
+
+ ixj_set_tone_on(1500, j);
+ ixj_set_tone_off(32, j);
+ if(ixjdebug & 0x0200) {
+ printk("IXJ cidcw phone%d first tone start at %ld\n", j->board, jiffies);
+ }
+ ixj_play_tone(j, 23);
+
+ clear_bit(j->board, &j->busyflags);
+ while(j->tone_state)
+ schedule_timeout_interruptible(1);
+ while(test_and_set_bit(j->board, (void *)&j->busyflags) != 0)
+ schedule_timeout_interruptible(1);
+ if(ixjdebug & 0x0200) {
+ printk("IXJ cidcw phone%d first tone end at %ld\n", j->board, jiffies);
+ }
+
+ ti.tone_index = 24;
+ ti.gain0 = 1;
+ ti.freq0 = hz2130;
+ ti.gain1 = 0;
+ ti.freq1 = hz2750;
+ ixj_init_tone(j, &ti);
+
+ ixj_set_tone_off(10, j);
+ ixj_set_tone_on(600, j);
+ if(ixjdebug & 0x0200) {
+ printk("IXJ cidcw phone%d second tone start at %ld\n", j->board, jiffies);
+ }
+ ixj_play_tone(j, 24);
+
+ clear_bit(j->board, &j->busyflags);
+ while(j->tone_state)
+ schedule_timeout_interruptible(1);
+ while(test_and_set_bit(j->board, (void *)&j->busyflags) != 0)
+ schedule_timeout_interruptible(1);
+ if(ixjdebug & 0x0200) {
+ printk("IXJ cidcw phone%d sent second tone at %ld\n", j->board, jiffies);
+ }
+
+ j->cidcw_wait = jiffies + ((50 * hertz) / 100);
+
+ clear_bit(j->board, &j->busyflags);
+ while(!j->flags.cidcw_ack && time_before(jiffies, j->cidcw_wait))
+ schedule_timeout_interruptible(1);
+ while(test_and_set_bit(j->board, (void *)&j->busyflags) != 0)
+ schedule_timeout_interruptible(1);
+ j->cidcw_wait = 0;
+ if(!j->flags.cidcw_ack) {
+ if(ixjdebug & 0x0200) {
+ printk("IXJ cidcw phone%d did not receive ACK from display %ld\n", j->board, jiffies);
+ }
+ ixj_post_cid(j);
+ if(j->cid_play_flag) {
+ wake_up_interruptible(&j->write_q); /* Wake any blocked readers */
+ }
+ return;
+ } else {
+ ixj_pre_cid(j);
+ }
+ j->flags.cidcw_ack = 0;
+ strcpy(sdmf1, j->cid_send.month);
+ strcat(sdmf1, j->cid_send.day);
+ strcat(sdmf1, j->cid_send.hour);
+ strcat(sdmf1, j->cid_send.min);
+ strcpy(sdmf2, j->cid_send.number);
+ strcpy(sdmf3, j->cid_send.name);
+
+ len1 = strlen(sdmf1);
+ len2 = strlen(sdmf2);
+ len3 = strlen(sdmf3);
+ mdmflen = len1 + len2 + len3 + 6;
+
+ ixj_write_cidcw_seize(j);
+
+ ixj_write_cid_byte(j, 0x80);
+ checksum = 0x80;
+ ixj_write_cid_byte(j, mdmflen);
+ checksum = checksum + mdmflen;
+
+ ixj_write_cid_byte(j, 0x01);
+ checksum = checksum + 0x01;
+ ixj_write_cid_byte(j, len1);
+ checksum = checksum + len1;
+ checksum = ixj_write_cid_string(j, sdmf1, checksum);
+
+ ixj_write_cid_byte(j, 0x02);
+ checksum = checksum + 0x02;
+ ixj_write_cid_byte(j, len2);
+ checksum = checksum + len2;
+ checksum = ixj_write_cid_string(j, sdmf2, checksum);
+
+ ixj_write_cid_byte(j, 0x07);
+ checksum = checksum + 0x07;
+ ixj_write_cid_byte(j, len3);
+ checksum = checksum + len3;
+ checksum = ixj_write_cid_string(j, sdmf3, checksum);
+
+ checksum %= 256;
+ checksum ^= 0xFF;
+ checksum += 1;
+
+ ixj_write_cid_byte(j, (char) checksum);
+
+ pad = j->fskdcnt % 240;
+ if (pad) {
+ pad = 240 - pad;
+ }
+ ixj_pad_fsk(j, pad);
+ if(ixjdebug & 0x0200) {
+ printk("IXJ cidcw phone%d sent FSK data at %ld\n", j->board, jiffies);
+ }
+}
+
+static void ixj_write_vmwi(IXJ *j, int msg)
+{
+ char mdmflen;
+ int pad;
+
+ int checksum = 0;
+
+ if (j->dsp.low == 0x20 || j->flags.cidplay)
+ return;
+
+ j->fskz = j->fskphase = j->fskcnt = j->fskdcnt = 0;
+ j->cidsize = j->cidcnt = 0;
+
+ ixj_fsk_alloc(j);
+
+ mdmflen = 3;
+
+ if (j->port == PORT_POTS)
+ SLIC_SetState(PLD_SLIC_STATE_OHT, j);
+
+ ixj_write_cid_seize(j);
+
+ ixj_write_cid_byte(j, 0x82);
+ checksum = 0x82;
+ ixj_write_cid_byte(j, mdmflen);
+ checksum = checksum + mdmflen;
+
+ ixj_write_cid_byte(j, 0x0B);
+ checksum = checksum + 0x0B;
+ ixj_write_cid_byte(j, 1);
+ checksum = checksum + 1;
+
+ if(msg) {
+ ixj_write_cid_byte(j, 0xFF);
+ checksum = checksum + 0xFF;
+ }
+ else {
+ ixj_write_cid_byte(j, 0x00);
+ checksum = checksum + 0x00;
+ }
+
+ checksum %= 256;
+ checksum ^= 0xFF;
+ checksum += 1;
+
+ ixj_write_cid_byte(j, (char) checksum);
+
+ pad = j->fskdcnt % 240;
+ if (pad) {
+ pad = 240 - pad;
+ }
+ ixj_pad_fsk(j, pad);
+}
+
+static void ixj_write_frame(IXJ *j)
+{
+ int cnt, frame_count, dly;
+ IXJ_WORD dat;
+
+ frame_count = 0;
+ if(j->flags.cidplay) {
+ for(cnt = 0; cnt < 480; cnt++) {
+ if (!(cnt % 16) && !IsTxReady(j)) {
+ dly = 0;
+ while (!IsTxReady(j)) {
+ if (dly++ > 5) {
+ dly = 0;
+ break;
+ }
+ udelay(10);
+ }
+ }
+ dat.word = j->fskdata[j->cidcnt++];
+ outb_p(dat.bytes.low, j->DSPbase + 0x0C);
+ outb_p(dat.bytes.high, j->DSPbase + 0x0D);
+ cnt++;
+ }
+ if(j->cidcnt >= j->fskdcnt) {
+ ixj_post_cid(j);
+ }
+ /* This may seem rude, but if we just played one frame of FSK data for CallerID
+ and there is real audio data in the buffer, we need to throw it away because
+ we just used it's time slot */
+ if (j->write_buffer_rp > j->write_buffer_wp) {
+ j->write_buffer_rp += j->cid_play_frame_size * 2;
+ if (j->write_buffer_rp >= j->write_buffer_end) {
+ j->write_buffer_rp = j->write_buffer;
+ }
+ j->write_buffers_empty++;
+ wake_up_interruptible(&j->write_q); /* Wake any blocked writers */
+
+ wake_up_interruptible(&j->poll_q); /* Wake any blocked selects */
+ }
+ } else if (j->write_buffer && j->write_buffers_empty < 1) {
+ if (j->write_buffer_wp > j->write_buffer_rp) {
+ frame_count =
+ (j->write_buffer_wp - j->write_buffer_rp) / (j->play_frame_size * 2);
+ }
+ if (j->write_buffer_rp > j->write_buffer_wp) {
+ frame_count =
+ (j->write_buffer_wp - j->write_buffer) / (j->play_frame_size * 2) +
+ (j->write_buffer_end - j->write_buffer_rp) / (j->play_frame_size * 2);
+ }
+ if (frame_count >= 1) {
+ if (j->ver.low == 0x12 && j->play_mode && j->flags.play_first_frame) {
+ BYTES blankword;
+
+ switch (j->play_mode) {
+ case PLAYBACK_MODE_ULAW:
+ case PLAYBACK_MODE_ALAW:
+ blankword.low = blankword.high = 0xFF;
+ break;
+ case PLAYBACK_MODE_8LINEAR:
+ case PLAYBACK_MODE_16LINEAR:
+ default:
+ blankword.low = blankword.high = 0x00;
+ break;
+ case PLAYBACK_MODE_8LINEAR_WSS:
+ blankword.low = blankword.high = 0x80;
+ break;
+ }
+ for (cnt = 0; cnt < 16; cnt++) {
+ if (!(cnt % 16) && !IsTxReady(j)) {
+ dly = 0;
+ while (!IsTxReady(j)) {
+ if (dly++ > 5) {
+ dly = 0;
+ break;
+ }
+ udelay(10);
+ }
+ }
+ outb_p((blankword.low), j->DSPbase + 0x0C);
+ outb_p((blankword.high), j->DSPbase + 0x0D);
+ }
+ j->flags.play_first_frame = 0;
+ } else if (j->play_codec == G723_63 && j->flags.play_first_frame) {
+ for (cnt = 0; cnt < 24; cnt++) {
+ BYTES blankword;
+
+ if(cnt == 12) {
+ blankword.low = 0x02;
+ blankword.high = 0x00;
+ }
+ else {
+ blankword.low = blankword.high = 0x00;
+ }
+ if (!(cnt % 16) && !IsTxReady(j)) {
+ dly = 0;
+ while (!IsTxReady(j)) {
+ if (dly++ > 5) {
+ dly = 0;
+ break;
+ }
+ udelay(10);
+ }
+ }
+ outb_p((blankword.low), j->DSPbase + 0x0C);
+ outb_p((blankword.high), j->DSPbase + 0x0D);
+ }
+ j->flags.play_first_frame = 0;
+ }
+ for (cnt = 0; cnt < j->play_frame_size * 2; cnt += 2) {
+ if (!(cnt % 16) && !IsTxReady(j)) {
+ dly = 0;
+ while (!IsTxReady(j)) {
+ if (dly++ > 5) {
+ dly = 0;
+ break;
+ }
+ udelay(10);
+ }
+ }
+ /* Add word 0 to G.729 frames for the 8021. Right now we don't do VAD/CNG */
+ if (j->play_codec == G729 && (cnt == 0 || cnt == 10 || cnt == 20)) {
+ if (j->write_buffer_rp[cnt] == 0 &&
+ j->write_buffer_rp[cnt + 1] == 0 &&
+ j->write_buffer_rp[cnt + 2] == 0 &&
+ j->write_buffer_rp[cnt + 3] == 0 &&
+ j->write_buffer_rp[cnt + 4] == 0 &&
+ j->write_buffer_rp[cnt + 5] == 0 &&
+ j->write_buffer_rp[cnt + 6] == 0 &&
+ j->write_buffer_rp[cnt + 7] == 0 &&
+ j->write_buffer_rp[cnt + 8] == 0 &&
+ j->write_buffer_rp[cnt + 9] == 0) {
+ /* someone is trying to write silence lets make this a type 0 frame. */
+ outb_p(0x00, j->DSPbase + 0x0C);
+ outb_p(0x00, j->DSPbase + 0x0D);
+ } else {
+ /* so all other frames are type 1. */
+ outb_p(0x01, j->DSPbase + 0x0C);
+ outb_p(0x00, j->DSPbase + 0x0D);
+ }
+ }
+ outb_p(*(j->write_buffer_rp + cnt), j->DSPbase + 0x0C);
+ outb_p(*(j->write_buffer_rp + cnt + 1), j->DSPbase + 0x0D);
+ *(j->write_buffer_rp + cnt) = 0;
+ *(j->write_buffer_rp + cnt + 1) = 0;
+ }
+ j->write_buffer_rp += j->play_frame_size * 2;
+ if (j->write_buffer_rp >= j->write_buffer_end) {
+ j->write_buffer_rp = j->write_buffer;
+ }
+ j->write_buffers_empty++;
+ wake_up_interruptible(&j->write_q); /* Wake any blocked writers */
+
+ wake_up_interruptible(&j->poll_q); /* Wake any blocked selects */
+
+ ++j->frameswritten;
+ }
+ } else {
+ j->drybuffer++;
+ }
+ if(j->ixj_signals[SIG_WRITE_READY]) {
+ ixj_kill_fasync(j, SIG_WRITE_READY, POLL_OUT);
+ }
+}
+
+static int idle(IXJ *j)
+{
+ if (ixj_WriteDSPCommand(0x0000, j)) /* DSP Idle */
+
+ return 0;
+
+ if (j->ssr.high || j->ssr.low) {
+ return 0;
+ } else {
+ j->play_mode = -1;
+ j->flags.playing = 0;
+ j->rec_mode = -1;
+ j->flags.recording = 0;
+ return 1;
+ }
+}
+
+static int set_base_frame(IXJ *j, int size)
+{
+ unsigned short cmd;
+ int cnt;
+
+ idle(j);
+ j->cid_play_aec_level = j->aec_level;
+ aec_stop(j);
+ for (cnt = 0; cnt < 10; cnt++) {
+ if (idle(j))
+ break;
+ }
+ if (j->ssr.high || j->ssr.low)
+ return -1;
+ if (j->dsp.low != 0x20) {
+ switch (size) {
+ case 30:
+ cmd = 0x07F0;
+ /* Set Base Frame Size to 240 pg9-10 8021 */
+ break;
+ case 20:
+ cmd = 0x07A0;
+ /* Set Base Frame Size to 160 pg9-10 8021 */
+ break;
+ case 10:
+ cmd = 0x0750;
+ /* Set Base Frame Size to 80 pg9-10 8021 */
+ break;
+ default:
+ return -1;
+ }
+ } else {
+ if (size == 30)
+ return size;
+ else
+ return -1;
+ }
+ if (ixj_WriteDSPCommand(cmd, j)) {
+ j->baseframe.high = j->baseframe.low = 0xFF;
+ return -1;
+ } else {
+ j->baseframe.high = j->ssr.high;
+ j->baseframe.low = j->ssr.low;
+ /* If the status returned is 0x0000 (pg9-9 8021) the call failed */
+ if(j->baseframe.high == 0x00 && j->baseframe.low == 0x00) {
+ return -1;
+ }
+ }
+ ixj_aec_start(j, j->cid_play_aec_level);
+ return size;
+}
+
+static int set_rec_codec(IXJ *j, int rate)
+{
+ int retval = 0;
+
+ j->rec_codec = rate;
+
+ switch (rate) {
+ case G723_63:
+ if (j->ver.low != 0x12 || ixj_convert_loaded) {
+ j->rec_frame_size = 12;
+ j->rec_mode = 0;
+ } else {
+ retval = 1;
+ }
+ break;
+ case G723_53:
+ if (j->ver.low != 0x12 || ixj_convert_loaded) {
+ j->rec_frame_size = 10;
+ j->rec_mode = 0;
+ } else {
+ retval = 1;
+ }
+ break;
+ case TS85:
+ if (j->dsp.low == 0x20 || j->flags.ts85_loaded) {
+ j->rec_frame_size = 16;
+ j->rec_mode = 0;
+ } else {
+ retval = 1;
+ }
+ break;
+ case TS48:
+ if (j->ver.low != 0x12 || ixj_convert_loaded) {
+ j->rec_frame_size = 9;
+ j->rec_mode = 0;
+ } else {
+ retval = 1;
+ }
+ break;
+ case TS41:
+ if (j->ver.low != 0x12 || ixj_convert_loaded) {
+ j->rec_frame_size = 8;
+ j->rec_mode = 0;
+ } else {
+ retval = 1;
+ }
+ break;
+ case G728:
+ if (j->dsp.low != 0x20) {
+ j->rec_frame_size = 48;
+ j->rec_mode = 0;
+ } else {
+ retval = 1;
+ }
+ break;
+ case G729:
+ if (j->dsp.low != 0x20) {
+ if (!j->flags.g729_loaded) {
+ retval = 1;
+ break;
+ }
+ switch (j->baseframe.low) {
+ case 0xA0:
+ j->rec_frame_size = 10;
+ break;
+ case 0x50:
+ j->rec_frame_size = 5;
+ break;
+ default:
+ j->rec_frame_size = 15;
+ break;
+ }
+ j->rec_mode = 0;
+ } else {
+ retval = 1;
+ }
+ break;
+ case G729B:
+ if (j->dsp.low != 0x20) {
+ if (!j->flags.g729_loaded) {
+ retval = 1;
+ break;
+ }
+ switch (j->baseframe.low) {
+ case 0xA0:
+ j->rec_frame_size = 12;
+ break;
+ case 0x50:
+ j->rec_frame_size = 6;
+ break;
+ default:
+ j->rec_frame_size = 18;
+ break;
+ }
+ j->rec_mode = 0;
+ } else {
+ retval = 1;
+ }
+ break;
+ case ULAW:
+ switch (j->baseframe.low) {
+ case 0xA0:
+ j->rec_frame_size = 80;
+ break;
+ case 0x50:
+ j->rec_frame_size = 40;
+ break;
+ default:
+ j->rec_frame_size = 120;
+ break;
+ }
+ j->rec_mode = 4;
+ break;
+ case ALAW:
+ switch (j->baseframe.low) {
+ case 0xA0:
+ j->rec_frame_size = 80;
+ break;
+ case 0x50:
+ j->rec_frame_size = 40;
+ break;
+ default:
+ j->rec_frame_size = 120;
+ break;
+ }
+ j->rec_mode = 4;
+ break;
+ case LINEAR16:
+ switch (j->baseframe.low) {
+ case 0xA0:
+ j->rec_frame_size = 160;
+ break;
+ case 0x50:
+ j->rec_frame_size = 80;
+ break;
+ default:
+ j->rec_frame_size = 240;
+ break;
+ }
+ j->rec_mode = 5;
+ break;
+ case LINEAR8:
+ switch (j->baseframe.low) {
+ case 0xA0:
+ j->rec_frame_size = 80;
+ break;
+ case 0x50:
+ j->rec_frame_size = 40;
+ break;
+ default:
+ j->rec_frame_size = 120;
+ break;
+ }
+ j->rec_mode = 6;
+ break;
+ case WSS:
+ switch (j->baseframe.low) {
+ case 0xA0:
+ j->rec_frame_size = 80;
+ break;
+ case 0x50:
+ j->rec_frame_size = 40;
+ break;
+ default:
+ j->rec_frame_size = 120;
+ break;
+ }
+ j->rec_mode = 7;
+ break;
+ default:
+ kfree(j->read_buffer);
+ j->rec_frame_size = 0;
+ j->rec_mode = -1;
+ j->read_buffer = NULL;
+ j->read_buffer_size = 0;
+ retval = 1;
+ break;
+ }
+ return retval;
+}
+
+static int ixj_record_start(IXJ *j)
+{
+ unsigned short cmd = 0x0000;
+
+ if (j->read_buffer) {
+ ixj_record_stop(j);
+ }
+ j->flags.recording = 1;
+ ixj_WriteDSPCommand(0x0FE0, j); /* Put the DSP in full power mode. */
+
+ if(ixjdebug & 0x0002)
+ printk("IXJ %d Starting Record Codec %d at %ld\n", j->board, j->rec_codec, jiffies);
+
+ if (!j->rec_mode) {
+ switch (j->rec_codec) {
+ case G723_63:
+ cmd = 0x5131;
+ break;
+ case G723_53:
+ cmd = 0x5132;
+ break;
+ case TS85:
+ cmd = 0x5130; /* TrueSpeech 8.5 */
+
+ break;
+ case TS48:
+ cmd = 0x5133; /* TrueSpeech 4.8 */
+
+ break;
+ case TS41:
+ cmd = 0x5134; /* TrueSpeech 4.1 */
+
+ break;
+ case G728:
+ cmd = 0x5135;
+ break;
+ case G729:
+ case G729B:
+ cmd = 0x5136;
+ break;
+ default:
+ return 1;
+ }
+ if (ixj_WriteDSPCommand(cmd, j))
+ return -1;
+ }
+ if (!j->read_buffer) {
+ if (!j->read_buffer)
+ j->read_buffer = kmalloc(j->rec_frame_size * 2, GFP_ATOMIC);
+ if (!j->read_buffer) {
+ printk("Read buffer allocation for ixj board %d failed!\n", j->board);
+ return -ENOMEM;
+ }
+ }
+ j->read_buffer_size = j->rec_frame_size * 2;
+
+ if (ixj_WriteDSPCommand(0x5102, j)) /* Set Poll sync mode */
+
+ return -1;
+
+ switch (j->rec_mode) {
+ case 0:
+ cmd = 0x1C03; /* Record C1 */
+
+ break;
+ case 4:
+ if (j->ver.low == 0x12) {
+ cmd = 0x1E03; /* Record C1 */
+
+ } else {
+ cmd = 0x1E01; /* Record C1 */
+
+ }
+ break;
+ case 5:
+ if (j->ver.low == 0x12) {
+ cmd = 0x1E83; /* Record C1 */
+
+ } else {
+ cmd = 0x1E81; /* Record C1 */
+
+ }
+ break;
+ case 6:
+ if (j->ver.low == 0x12) {
+ cmd = 0x1F03; /* Record C1 */
+
+ } else {
+ cmd = 0x1F01; /* Record C1 */
+
+ }
+ break;
+ case 7:
+ if (j->ver.low == 0x12) {
+ cmd = 0x1F83; /* Record C1 */
+ } else {
+ cmd = 0x1F81; /* Record C1 */
+ }
+ break;
+ }
+ if (ixj_WriteDSPCommand(cmd, j))
+ return -1;
+
+ if (j->flags.playing) {
+ ixj_aec_start(j, j->aec_level);
+ }
+ return 0;
+}
+
+static void ixj_record_stop(IXJ *j)
+{
+ if (ixjdebug & 0x0002)
+ printk("IXJ %d Stopping Record Codec %d at %ld\n", j->board, j->rec_codec, jiffies);
+
+ kfree(j->read_buffer);
+ j->read_buffer = NULL;
+ j->read_buffer_size = 0;
+ if (j->rec_mode > -1) {
+ ixj_WriteDSPCommand(0x5120, j);
+ j->rec_mode = -1;
+ }
+ j->flags.recording = 0;
+}
+static void ixj_vad(IXJ *j, int arg)
+{
+ if (arg)
+ ixj_WriteDSPCommand(0x513F, j);
+ else
+ ixj_WriteDSPCommand(0x513E, j);
+}
+
+static void set_rec_depth(IXJ *j, int depth)
+{
+ if (depth > 60)
+ depth = 60;
+ if (depth < 0)
+ depth = 0;
+ ixj_WriteDSPCommand(0x5180 + depth, j);
+}
+
+static void set_dtmf_prescale(IXJ *j, int volume)
+{
+ ixj_WriteDSPCommand(0xCF07, j);
+ ixj_WriteDSPCommand(volume, j);
+}
+
+static int get_dtmf_prescale(IXJ *j)
+{
+ ixj_WriteDSPCommand(0xCF05, j);
+ return j->ssr.high << 8 | j->ssr.low;
+}
+
+static void set_rec_volume(IXJ *j, int volume)
+{
+ if(j->aec_level == AEC_AGC) {
+ if (ixjdebug & 0x0002)
+ printk(KERN_INFO "IXJ: /dev/phone%d Setting AGC Threshold to 0x%4.4x\n", j->board, volume);
+ ixj_WriteDSPCommand(0xCF96, j);
+ ixj_WriteDSPCommand(volume, j);
+ } else {
+ if (ixjdebug & 0x0002)
+ printk(KERN_INFO "IXJ: /dev/phone %d Setting Record Volume to 0x%4.4x\n", j->board, volume);
+ ixj_WriteDSPCommand(0xCF03, j);
+ ixj_WriteDSPCommand(volume, j);
+ }
+}
+
+static int set_rec_volume_linear(IXJ *j, int volume)
+{
+ int newvolume, dsprecmax;
+
+ if (ixjdebug & 0x0002)
+ printk(KERN_INFO "IXJ: /dev/phone %d Setting Linear Record Volume to 0x%4.4x\n", j->board, volume);
+ if(volume > 100 || volume < 0) {
+ return -1;
+ }
+
+ /* This should normalize the perceived volumes between the different cards caused by differences in the hardware */
+ switch (j->cardtype) {
+ case QTI_PHONEJACK:
+ dsprecmax = 0x440;
+ break;
+ case QTI_LINEJACK:
+ dsprecmax = 0x180;
+ ixj_mixer(0x0203, j); /*Voice Left Volume unmute 6db */
+ ixj_mixer(0x0303, j); /*Voice Right Volume unmute 6db */
+ ixj_mixer(0x0C00, j); /*Mono1 unmute 12db */
+ break;
+ case QTI_PHONEJACK_LITE:
+ dsprecmax = 0x4C0;
+ break;
+ case QTI_PHONEJACK_PCI:
+ dsprecmax = 0x100;
+ break;
+ case QTI_PHONECARD:
+ dsprecmax = 0x400;
+ break;
+ default:
+ return -1;
+ }
+ newvolume = (dsprecmax * volume) / 100;
+ set_rec_volume(j, newvolume);
+ return 0;
+}
+
+static int get_rec_volume(IXJ *j)
+{
+ if(j->aec_level == AEC_AGC) {
+ if (ixjdebug & 0x0002)
+ printk(KERN_INFO "Getting AGC Threshold\n");
+ ixj_WriteDSPCommand(0xCF86, j);
+ if (ixjdebug & 0x0002)
+ printk(KERN_INFO "AGC Threshold is 0x%2.2x%2.2x\n", j->ssr.high, j->ssr.low);
+ return j->ssr.high << 8 | j->ssr.low;
+ } else {
+ if (ixjdebug & 0x0002)
+ printk(KERN_INFO "Getting Record Volume\n");
+ ixj_WriteDSPCommand(0xCF01, j);
+ return j->ssr.high << 8 | j->ssr.low;
+ }
+}
+
+static int get_rec_volume_linear(IXJ *j)
+{
+ int volume, newvolume, dsprecmax;
+
+ switch (j->cardtype) {
+ case QTI_PHONEJACK:
+ dsprecmax = 0x440;
+ break;
+ case QTI_LINEJACK:
+ dsprecmax = 0x180;
+ break;
+ case QTI_PHONEJACK_LITE:
+ dsprecmax = 0x4C0;
+ break;
+ case QTI_PHONEJACK_PCI:
+ dsprecmax = 0x100;
+ break;
+ case QTI_PHONECARD:
+ dsprecmax = 0x400;
+ break;
+ default:
+ return -1;
+ }
+ volume = get_rec_volume(j);
+ newvolume = (volume * 100) / dsprecmax;
+ if(newvolume > 100)
+ newvolume = 100;
+ return newvolume;
+}
+
+static int get_rec_level(IXJ *j)
+{
+ int retval;
+
+ ixj_WriteDSPCommand(0xCF88, j);
+
+ retval = j->ssr.high << 8 | j->ssr.low;
+ retval = (retval * 256) / 240;
+ return retval;
+}
+
+static void ixj_aec_start(IXJ *j, int level)
+{
+ j->aec_level = level;
+ if (ixjdebug & 0x0002)
+ printk(KERN_INFO "AGC set = 0x%2.2x\n", j->aec_level);
+ if (!level) {
+ aec_stop(j);
+ } else {
+ if (j->rec_codec == G729 || j->play_codec == G729 || j->rec_codec == G729B || j->play_codec == G729B) {
+ ixj_WriteDSPCommand(0xE022, j); /* Move AEC filter buffer */
+
+ ixj_WriteDSPCommand(0x0300, j);
+ }
+ ixj_WriteDSPCommand(0xB001, j); /* AEC On */
+
+ ixj_WriteDSPCommand(0xE013, j); /* Advanced AEC C1 */
+
+ switch (level) {
+ case AEC_LOW:
+ ixj_WriteDSPCommand(0x0000, j); /* Advanced AEC C2 = off */
+
+ ixj_WriteDSPCommand(0xE011, j);
+ ixj_WriteDSPCommand(0xFFFF, j);
+
+ ixj_WriteDSPCommand(0xCF97, j); /* Set AGC Enable */
+ ixj_WriteDSPCommand(0x0000, j); /* to off */
+
+ break;
+
+ case AEC_MED:
+ ixj_WriteDSPCommand(0x0600, j); /* Advanced AEC C2 = on medium */
+
+ ixj_WriteDSPCommand(0xE011, j);
+ ixj_WriteDSPCommand(0x0080, j);
+
+ ixj_WriteDSPCommand(0xCF97, j); /* Set AGC Enable */
+ ixj_WriteDSPCommand(0x0000, j); /* to off */
+
+ break;
+
+ case AEC_HIGH:
+ ixj_WriteDSPCommand(0x0C00, j); /* Advanced AEC C2 = on high */
+
+ ixj_WriteDSPCommand(0xE011, j);
+ ixj_WriteDSPCommand(0x0080, j);
+
+ ixj_WriteDSPCommand(0xCF97, j); /* Set AGC Enable */
+ ixj_WriteDSPCommand(0x0000, j); /* to off */
+
+ break;
+
+ case AEC_AGC:
+ /* First we have to put the AEC into advance auto mode so that AGC will not conflict with it */
+ ixj_WriteDSPCommand(0x0002, j); /* Attenuation scaling factor of 2 */
+
+ ixj_WriteDSPCommand(0xE011, j);
+ ixj_WriteDSPCommand(0x0100, j); /* Higher Threshold Floor */
+
+ ixj_WriteDSPCommand(0xE012, j); /* Set Train and Lock */
+
+ if(j->cardtype == QTI_LINEJACK || j->cardtype == QTI_PHONECARD)
+ ixj_WriteDSPCommand(0x0224, j);
+ else
+ ixj_WriteDSPCommand(0x1224, j);
+
+ ixj_WriteDSPCommand(0xE014, j);
+ ixj_WriteDSPCommand(0x0003, j); /* Lock threshold at 3dB */
+
+ ixj_WriteDSPCommand(0xE338, j); /* Set Echo Suppresser Attenuation to 0dB */
+
+ /* Now we can set the AGC initial parameters and turn it on */
+ ixj_WriteDSPCommand(0xCF90, j); /* Set AGC Minimum gain */
+ ixj_WriteDSPCommand(0x0020, j); /* to 0.125 (-18dB) */
+
+ ixj_WriteDSPCommand(0xCF91, j); /* Set AGC Maximum gain */
+ ixj_WriteDSPCommand(0x1000, j); /* to 16 (24dB) */
+
+ ixj_WriteDSPCommand(0xCF92, j); /* Set AGC start gain */
+ ixj_WriteDSPCommand(0x0800, j); /* to 8 (+18dB) */
+
+ ixj_WriteDSPCommand(0xCF93, j); /* Set AGC hold time */
+ ixj_WriteDSPCommand(0x1F40, j); /* to 2 seconds (units are 250us) */
+
+ ixj_WriteDSPCommand(0xCF94, j); /* Set AGC Attack Time Constant */
+ ixj_WriteDSPCommand(0x0005, j); /* to 8ms */
+
+ ixj_WriteDSPCommand(0xCF95, j); /* Set AGC Decay Time Constant */
+ ixj_WriteDSPCommand(0x000D, j); /* to 4096ms */
+
+ ixj_WriteDSPCommand(0xCF96, j); /* Set AGC Attack Threshold */
+ ixj_WriteDSPCommand(0x1200, j); /* to 25% */
+
+ ixj_WriteDSPCommand(0xCF97, j); /* Set AGC Enable */
+ ixj_WriteDSPCommand(0x0001, j); /* to on */
+
+ break;
+
+ case AEC_AUTO:
+ ixj_WriteDSPCommand(0x0002, j); /* Attenuation scaling factor of 2 */
+
+ ixj_WriteDSPCommand(0xE011, j);
+ ixj_WriteDSPCommand(0x0100, j); /* Higher Threshold Floor */
+
+ ixj_WriteDSPCommand(0xE012, j); /* Set Train and Lock */
+
+ if(j->cardtype == QTI_LINEJACK || j->cardtype == QTI_PHONECARD)
+ ixj_WriteDSPCommand(0x0224, j);
+ else
+ ixj_WriteDSPCommand(0x1224, j);
+
+ ixj_WriteDSPCommand(0xE014, j);
+ ixj_WriteDSPCommand(0x0003, j); /* Lock threshold at 3dB */
+
+ ixj_WriteDSPCommand(0xE338, j); /* Set Echo Suppresser Attenuation to 0dB */
+
+ break;
+ }
+ }
+}
+
+static void aec_stop(IXJ *j)
+{
+ j->aec_level = AEC_OFF;
+ if (j->rec_codec == G729 || j->play_codec == G729 || j->rec_codec == G729B || j->play_codec == G729B) {
+ ixj_WriteDSPCommand(0xE022, j); /* Move AEC filter buffer back */
+
+ ixj_WriteDSPCommand(0x0700, j);
+ }
+ if (j->play_mode != -1 && j->rec_mode != -1)
+ {
+ ixj_WriteDSPCommand(0xB002, j); /* AEC Stop */
+ }
+}
+
+static int set_play_codec(IXJ *j, int rate)
+{
+ int retval = 0;
+
+ j->play_codec = rate;
+
+ switch (rate) {
+ case G723_63:
+ if (j->ver.low != 0x12 || ixj_convert_loaded) {
+ j->play_frame_size = 12;
+ j->play_mode = 0;
+ } else {
+ retval = 1;
+ }
+ break;
+ case G723_53:
+ if (j->ver.low != 0x12 || ixj_convert_loaded) {
+ j->play_frame_size = 10;
+ j->play_mode = 0;
+ } else {
+ retval = 1;
+ }
+ break;
+ case TS85:
+ if (j->dsp.low == 0x20 || j->flags.ts85_loaded) {
+ j->play_frame_size = 16;
+ j->play_mode = 0;
+ } else {
+ retval = 1;
+ }
+ break;
+ case TS48:
+ if (j->ver.low != 0x12 || ixj_convert_loaded) {
+ j->play_frame_size = 9;
+ j->play_mode = 0;
+ } else {
+ retval = 1;
+ }
+ break;
+ case TS41:
+ if (j->ver.low != 0x12 || ixj_convert_loaded) {
+ j->play_frame_size = 8;
+ j->play_mode = 0;
+ } else {
+ retval = 1;
+ }
+ break;
+ case G728:
+ if (j->dsp.low != 0x20) {
+ j->play_frame_size = 48;
+ j->play_mode = 0;
+ } else {
+ retval = 1;
+ }
+ break;
+ case G729:
+ if (j->dsp.low != 0x20) {
+ if (!j->flags.g729_loaded) {
+ retval = 1;
+ break;
+ }
+ switch (j->baseframe.low) {
+ case 0xA0:
+ j->play_frame_size = 10;
+ break;
+ case 0x50:
+ j->play_frame_size = 5;
+ break;
+ default:
+ j->play_frame_size = 15;
+ break;
+ }
+ j->play_mode = 0;
+ } else {
+ retval = 1;
+ }
+ break;
+ case G729B:
+ if (j->dsp.low != 0x20) {
+ if (!j->flags.g729_loaded) {
+ retval = 1;
+ break;
+ }
+ switch (j->baseframe.low) {
+ case 0xA0:
+ j->play_frame_size = 12;
+ break;
+ case 0x50:
+ j->play_frame_size = 6;
+ break;
+ default:
+ j->play_frame_size = 18;
+ break;
+ }
+ j->play_mode = 0;
+ } else {
+ retval = 1;
+ }
+ break;
+ case ULAW:
+ switch (j->baseframe.low) {
+ case 0xA0:
+ j->play_frame_size = 80;
+ break;
+ case 0x50:
+ j->play_frame_size = 40;
+ break;
+ default:
+ j->play_frame_size = 120;
+ break;
+ }
+ j->play_mode = 2;
+ break;
+ case ALAW:
+ switch (j->baseframe.low) {
+ case 0xA0:
+ j->play_frame_size = 80;
+ break;
+ case 0x50:
+ j->play_frame_size = 40;
+ break;
+ default:
+ j->play_frame_size = 120;
+ break;
+ }
+ j->play_mode = 2;
+ break;
+ case LINEAR16:
+ switch (j->baseframe.low) {
+ case 0xA0:
+ j->play_frame_size = 160;
+ break;
+ case 0x50:
+ j->play_frame_size = 80;
+ break;
+ default:
+ j->play_frame_size = 240;
+ break;
+ }
+ j->play_mode = 6;
+ break;
+ case LINEAR8:
+ switch (j->baseframe.low) {
+ case 0xA0:
+ j->play_frame_size = 80;
+ break;
+ case 0x50:
+ j->play_frame_size = 40;
+ break;
+ default:
+ j->play_frame_size = 120;
+ break;
+ }
+ j->play_mode = 4;
+ break;
+ case WSS:
+ switch (j->baseframe.low) {
+ case 0xA0:
+ j->play_frame_size = 80;
+ break;
+ case 0x50:
+ j->play_frame_size = 40;
+ break;
+ default:
+ j->play_frame_size = 120;
+ break;
+ }
+ j->play_mode = 5;
+ break;
+ default:
+ kfree(j->write_buffer);
+ j->play_frame_size = 0;
+ j->play_mode = -1;
+ j->write_buffer = NULL;
+ j->write_buffer_size = 0;
+ retval = 1;
+ break;
+ }
+ return retval;
+}
+
+static int ixj_play_start(IXJ *j)
+{
+ unsigned short cmd = 0x0000;
+
+ if (j->write_buffer) {
+ ixj_play_stop(j);
+ }
+
+ if(ixjdebug & 0x0002)
+ printk("IXJ %d Starting Play Codec %d at %ld\n", j->board, j->play_codec, jiffies);
+
+ j->flags.playing = 1;
+ ixj_WriteDSPCommand(0x0FE0, j); /* Put the DSP in full power mode. */
+
+ j->flags.play_first_frame = 1;
+ j->drybuffer = 0;
+
+ if (!j->play_mode) {
+ switch (j->play_codec) {
+ case G723_63:
+ cmd = 0x5231;
+ break;
+ case G723_53:
+ cmd = 0x5232;
+ break;
+ case TS85:
+ cmd = 0x5230; /* TrueSpeech 8.5 */
+
+ break;
+ case TS48:
+ cmd = 0x5233; /* TrueSpeech 4.8 */
+
+ break;
+ case TS41:
+ cmd = 0x5234; /* TrueSpeech 4.1 */
+
+ break;
+ case G728:
+ cmd = 0x5235;
+ break;
+ case G729:
+ case G729B:
+ cmd = 0x5236;
+ break;
+ default:
+ return 1;
+ }
+ if (ixj_WriteDSPCommand(cmd, j))
+ return -1;
+ }
+ j->write_buffer = kmalloc(j->play_frame_size * 2, GFP_ATOMIC);
+ if (!j->write_buffer) {
+ printk("Write buffer allocation for ixj board %d failed!\n", j->board);
+ return -ENOMEM;
+ }
+/* j->write_buffers_empty = 2; */
+ j->write_buffers_empty = 1;
+ j->write_buffer_size = j->play_frame_size * 2;
+ j->write_buffer_end = j->write_buffer + j->play_frame_size * 2;
+ j->write_buffer_rp = j->write_buffer_wp = j->write_buffer;
+
+ if (ixj_WriteDSPCommand(0x5202, j)) /* Set Poll sync mode */
+
+ return -1;
+
+ switch (j->play_mode) {
+ case 0:
+ cmd = 0x2C03;
+ break;
+ case 2:
+ if (j->ver.low == 0x12) {
+ cmd = 0x2C23;
+ } else {
+ cmd = 0x2C21;
+ }
+ break;
+ case 4:
+ if (j->ver.low == 0x12) {
+ cmd = 0x2C43;
+ } else {
+ cmd = 0x2C41;
+ }
+ break;
+ case 5:
+ if (j->ver.low == 0x12) {
+ cmd = 0x2C53;
+ } else {
+ cmd = 0x2C51;
+ }
+ break;
+ case 6:
+ if (j->ver.low == 0x12) {
+ cmd = 0x2C63;
+ } else {
+ cmd = 0x2C61;
+ }
+ break;
+ }
+ if (ixj_WriteDSPCommand(cmd, j))
+ return -1;
+
+ if (ixj_WriteDSPCommand(0x2000, j)) /* Playback C2 */
+ return -1;
+
+ if (ixj_WriteDSPCommand(0x2000 + j->play_frame_size, j)) /* Playback C3 */
+ return -1;
+
+ if (j->flags.recording) {
+ ixj_aec_start(j, j->aec_level);
+ }
+
+ return 0;
+}
+
+static void ixj_play_stop(IXJ *j)
+{
+ if (ixjdebug & 0x0002)
+ printk("IXJ %d Stopping Play Codec %d at %ld\n", j->board, j->play_codec, jiffies);
+
+ kfree(j->write_buffer);
+ j->write_buffer = NULL;
+ j->write_buffer_size = 0;
+ if (j->play_mode > -1) {
+ ixj_WriteDSPCommand(0x5221, j); /* Stop playback and flush buffers. 8022 reference page 9-40 */
+
+ j->play_mode = -1;
+ }
+ j->flags.playing = 0;
+}
+
+static inline int get_play_level(IXJ *j)
+{
+ int retval;
+
+ ixj_WriteDSPCommand(0xCF8F, j); /* 8022 Reference page 9-38 */
+ return j->ssr.high << 8 | j->ssr.low;
+ retval = j->ssr.high << 8 | j->ssr.low;
+ retval = (retval * 256) / 240;
+ return retval;
+}
+
+static unsigned int ixj_poll(struct file *file_p, poll_table * wait)
+{
+ unsigned int mask = 0;
+
+ IXJ *j = get_ixj(NUM(file_p->f_path.dentry->d_inode));
+
+ poll_wait(file_p, &(j->poll_q), wait);
+ if (j->read_buffer_ready > 0)
+ mask |= POLLIN | POLLRDNORM; /* readable */
+ if (j->write_buffers_empty > 0)
+ mask |= POLLOUT | POLLWRNORM; /* writable */
+ if (j->ex.bytes)
+ mask |= POLLPRI;
+ return mask;
+}
+
+static int ixj_play_tone(IXJ *j, char tone)
+{
+ if (!j->tone_state) {
+ if(ixjdebug & 0x0002) {
+ printk("IXJ %d starting tone %d at %ld\n", j->board, tone, jiffies);
+ }
+ if (j->dsp.low == 0x20) {
+ idle(j);
+ }
+ j->tone_start_jif = jiffies;
+
+ j->tone_state = 1;
+ }
+
+ j->tone_index = tone;
+ if (ixj_WriteDSPCommand(0x6000 + j->tone_index, j))
+ return -1;
+
+ return 0;
+}
+
+static int ixj_set_tone_on(unsigned short arg, IXJ *j)
+{
+ j->tone_on_time = arg;
+
+ if (ixj_WriteDSPCommand(0x6E04, j)) /* Set Tone On Period */
+
+ return -1;
+
+ if (ixj_WriteDSPCommand(arg, j))
+ return -1;
+
+ return 0;
+}
+
+static int SCI_WaitHighSCI(IXJ *j)
+{
+ int cnt;
+
+ j->pld_scrr.byte = inb_p(j->XILINXbase);
+ if (!j->pld_scrr.bits.sci) {
+ for (cnt = 0; cnt < 10; cnt++) {
+ udelay(32);
+ j->pld_scrr.byte = inb_p(j->XILINXbase);
+
+ if ((j->pld_scrr.bits.sci))
+ return 1;
+ }
+ if (ixjdebug & 0x0001)
+ printk(KERN_INFO "SCI Wait High failed %x\n", j->pld_scrr.byte);
+ return 0;
+ } else
+ return 1;
+}
+
+static int SCI_WaitLowSCI(IXJ *j)
+{
+ int cnt;
+
+ j->pld_scrr.byte = inb_p(j->XILINXbase);
+ if (j->pld_scrr.bits.sci) {
+ for (cnt = 0; cnt < 10; cnt++) {
+ udelay(32);
+ j->pld_scrr.byte = inb_p(j->XILINXbase);
+
+ if (!(j->pld_scrr.bits.sci))
+ return 1;
+ }
+ if (ixjdebug & 0x0001)
+ printk(KERN_INFO "SCI Wait Low failed %x\n", j->pld_scrr.byte);
+ return 0;
+ } else
+ return 1;
+}
+
+static int SCI_Control(IXJ *j, int control)
+{
+ switch (control) {
+ case SCI_End:
+ j->pld_scrw.bits.c0 = 0; /* Set PLD Serial control interface */
+
+ j->pld_scrw.bits.c1 = 0; /* to no selection */
+
+ break;
+ case SCI_Enable_DAA:
+ j->pld_scrw.bits.c0 = 1; /* Set PLD Serial control interface */
+
+ j->pld_scrw.bits.c1 = 0; /* to write to DAA */
+
+ break;
+ case SCI_Enable_Mixer:
+ j->pld_scrw.bits.c0 = 0; /* Set PLD Serial control interface */
+
+ j->pld_scrw.bits.c1 = 1; /* to write to mixer */
+
+ break;
+ case SCI_Enable_EEPROM:
+ j->pld_scrw.bits.c0 = 1; /* Set PLD Serial control interface */
+
+ j->pld_scrw.bits.c1 = 1; /* to write to EEPROM */
+
+ break;
+ default:
+ return 0;
+ break;
+ }
+ outb_p(j->pld_scrw.byte, j->XILINXbase);
+
+ switch (control) {
+ case SCI_End:
+ return 1;
+ break;
+ case SCI_Enable_DAA:
+ case SCI_Enable_Mixer:
+ case SCI_Enable_EEPROM:
+ if (!SCI_WaitHighSCI(j))
+ return 0;
+ break;
+ default:
+ return 0;
+ break;
+ }
+ return 1;
+}
+
+static int SCI_Prepare(IXJ *j)
+{
+ if (!SCI_Control(j, SCI_End))
+ return 0;
+
+ if (!SCI_WaitLowSCI(j))
+ return 0;
+
+ return 1;
+}
+
+static int ixj_get_mixer(long val, IXJ *j)
+{
+ int reg = (val & 0x1F00) >> 8;
+ return j->mix.vol[reg];
+}
+
+static int ixj_mixer(long val, IXJ *j)
+{
+ BYTES bytes;
+
+ bytes.high = (val & 0x1F00) >> 8;
+ bytes.low = val & 0x00FF;
+
+ /* save mixer value so we can get back later on */
+ j->mix.vol[bytes.high] = bytes.low;
+
+ outb_p(bytes.high & 0x1F, j->XILINXbase + 0x03); /* Load Mixer Address */
+
+ outb_p(bytes.low, j->XILINXbase + 0x02); /* Load Mixer Data */
+
+ SCI_Control(j, SCI_Enable_Mixer);
+
+ SCI_Control(j, SCI_End);
+
+ return 0;
+}
+
+static int daa_load(BYTES * p_bytes, IXJ *j)
+{
+ outb_p(p_bytes->high, j->XILINXbase + 0x03);
+ outb_p(p_bytes->low, j->XILINXbase + 0x02);
+ if (!SCI_Control(j, SCI_Enable_DAA))
+ return 0;
+ else
+ return 1;
+}
+
+static int ixj_daa_cr4(IXJ *j, char reg)
+{
+ BYTES bytes;
+
+ switch (j->daa_mode) {
+ case SOP_PU_SLEEP:
+ bytes.high = 0x14;
+ break;
+ case SOP_PU_RINGING:
+ bytes.high = 0x54;
+ break;
+ case SOP_PU_CONVERSATION:
+ bytes.high = 0x94;
+ break;
+ case SOP_PU_PULSEDIALING:
+ bytes.high = 0xD4;
+ break;
+ }
+
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr4.reg = reg;
+
+ switch (j->m_DAAShadowRegs.SOP_REGS.SOP.cr4.bitreg.AGX) {
+ case 0:
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr4.bitreg.AGR_Z = 0;
+ break;
+ case 1:
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr4.bitreg.AGR_Z = 2;
+ break;
+ case 2:
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr4.bitreg.AGR_Z = 1;
+ break;
+ case 3:
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr4.bitreg.AGR_Z = 3;
+ break;
+ }
+
+ bytes.low = j->m_DAAShadowRegs.SOP_REGS.SOP.cr4.reg;
+
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ if (!SCI_Prepare(j))
+ return 0;
+
+ return 1;
+}
+
+static char daa_int_read(IXJ *j)
+{
+ BYTES bytes;
+
+ if (!SCI_Prepare(j))
+ return 0;
+
+ bytes.high = 0x38;
+ bytes.low = 0x00;
+ outb_p(bytes.high, j->XILINXbase + 0x03);
+ outb_p(bytes.low, j->XILINXbase + 0x02);
+
+ if (!SCI_Control(j, SCI_Enable_DAA))
+ return 0;
+
+ bytes.high = inb_p(j->XILINXbase + 0x03);
+ bytes.low = inb_p(j->XILINXbase + 0x02);
+ if (bytes.low != ALISDAA_ID_BYTE) {
+ if (ixjdebug & 0x0001)
+ printk("Cannot read DAA ID Byte high = %d low = %d\n", bytes.high, bytes.low);
+ return 0;
+ }
+ if (!SCI_Control(j, SCI_Enable_DAA))
+ return 0;
+ if (!SCI_Control(j, SCI_End))
+ return 0;
+
+ bytes.high = inb_p(j->XILINXbase + 0x03);
+ bytes.low = inb_p(j->XILINXbase + 0x02);
+
+ j->m_DAAShadowRegs.XOP_REGS.XOP.xr0.reg = bytes.high;
+
+ return 1;
+}
+
+static char daa_CR_read(IXJ *j, int cr)
+{
+ IXJ_WORD wdata;
+ BYTES bytes;
+
+ if (!SCI_Prepare(j))
+ return 0;
+
+ switch (j->daa_mode) {
+ case SOP_PU_SLEEP:
+ bytes.high = 0x30 + cr;
+ break;
+ case SOP_PU_RINGING:
+ bytes.high = 0x70 + cr;
+ break;
+ case SOP_PU_CONVERSATION:
+ bytes.high = 0xB0 + cr;
+ break;
+ case SOP_PU_PULSEDIALING:
+ default:
+ bytes.high = 0xF0 + cr;
+ break;
+ }
+
+ bytes.low = 0x00;
+
+ outb_p(bytes.high, j->XILINXbase + 0x03);
+ outb_p(bytes.low, j->XILINXbase + 0x02);
+
+ if (!SCI_Control(j, SCI_Enable_DAA))
+ return 0;
+
+ bytes.high = inb_p(j->XILINXbase + 0x03);
+ bytes.low = inb_p(j->XILINXbase + 0x02);
+ if (bytes.low != ALISDAA_ID_BYTE) {
+ if (ixjdebug & 0x0001)
+ printk("Cannot read DAA ID Byte high = %d low = %d\n", bytes.high, bytes.low);
+ return 0;
+ }
+ if (!SCI_Control(j, SCI_Enable_DAA))
+ return 0;
+ if (!SCI_Control(j, SCI_End))
+ return 0;
+
+ wdata.word = inw_p(j->XILINXbase + 0x02);
+
+ switch(cr){
+ case 5:
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr5.reg = wdata.bytes.high;
+ break;
+ case 4:
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr4.reg = wdata.bytes.high;
+ break;
+ case 3:
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr3.reg = wdata.bytes.high;
+ break;
+ case 2:
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr2.reg = wdata.bytes.high;
+ break;
+ case 1:
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr1.reg = wdata.bytes.high;
+ break;
+ case 0:
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr0.reg = wdata.bytes.high;
+ break;
+ default:
+ return 0;
+ }
+ return 1;
+}
+
+static int ixj_daa_cid_reset(IXJ *j)
+{
+ int i;
+ BYTES bytes;
+
+ if (ixjdebug & 0x0002)
+ printk("DAA Clearing CID ram\n");
+
+ if (!SCI_Prepare(j))
+ return 0;
+
+ bytes.high = 0x58;
+ bytes.low = 0x00;
+ outb_p(bytes.high, j->XILINXbase + 0x03);
+ outb_p(bytes.low, j->XILINXbase + 0x02);
+
+ if (!SCI_Control(j, SCI_Enable_DAA))
+ return 0;
+
+ if (!SCI_WaitHighSCI(j))
+ return 0;
+
+ for (i = 0; i < ALISDAA_CALLERID_SIZE - 1; i += 2) {
+ bytes.high = bytes.low = 0x00;
+ outb_p(bytes.high, j->XILINXbase + 0x03);
+
+ if (i < ALISDAA_CALLERID_SIZE - 1)
+ outb_p(bytes.low, j->XILINXbase + 0x02);
+
+ if (!SCI_Control(j, SCI_Enable_DAA))
+ return 0;
+
+ if (!SCI_WaitHighSCI(j))
+ return 0;
+
+ }
+
+ if (!SCI_Control(j, SCI_End))
+ return 0;
+
+ if (ixjdebug & 0x0002)
+ printk("DAA CID ram cleared\n");
+
+ return 1;
+}
+
+static int ixj_daa_cid_read(IXJ *j)
+{
+ int i;
+ BYTES bytes;
+ char CID[ALISDAA_CALLERID_SIZE];
+ bool mContinue;
+ char *pIn, *pOut;
+
+ if (!SCI_Prepare(j))
+ return 0;
+
+ bytes.high = 0x78;
+ bytes.low = 0x00;
+ outb_p(bytes.high, j->XILINXbase + 0x03);
+ outb_p(bytes.low, j->XILINXbase + 0x02);
+
+ if (!SCI_Control(j, SCI_Enable_DAA))
+ return 0;
+
+ if (!SCI_WaitHighSCI(j))
+ return 0;
+
+ bytes.high = inb_p(j->XILINXbase + 0x03);
+ bytes.low = inb_p(j->XILINXbase + 0x02);
+ if (bytes.low != ALISDAA_ID_BYTE) {
+ if (ixjdebug & 0x0001)
+ printk("DAA Get Version Cannot read DAA ID Byte high = %d low = %d\n", bytes.high, bytes.low);
+ return 0;
+ }
+ for (i = 0; i < ALISDAA_CALLERID_SIZE; i += 2) {
+ bytes.high = bytes.low = 0x00;
+ outb_p(bytes.high, j->XILINXbase + 0x03);
+ outb_p(bytes.low, j->XILINXbase + 0x02);
+
+ if (!SCI_Control(j, SCI_Enable_DAA))
+ return 0;
+
+ if (!SCI_WaitHighSCI(j))
+ return 0;
+
+ CID[i + 0] = inb_p(j->XILINXbase + 0x03);
+ CID[i + 1] = inb_p(j->XILINXbase + 0x02);
+ }
+
+ if (!SCI_Control(j, SCI_End))
+ return 0;
+
+ pIn = CID;
+ pOut = j->m_DAAShadowRegs.CAO_REGS.CAO.CallerID;
+ mContinue = true;
+ while (mContinue) {
+ if ((pIn[1] & 0x03) == 0x01) {
+ pOut[0] = pIn[0];
+ }
+ if ((pIn[2] & 0x0c) == 0x04) {
+ pOut[1] = ((pIn[2] & 0x03) << 6) | ((pIn[1] & 0xfc) >> 2);
+ }
+ if ((pIn[3] & 0x30) == 0x10) {
+ pOut[2] = ((pIn[3] & 0x0f) << 4) | ((pIn[2] & 0xf0) >> 4);
+ }
+ if ((pIn[4] & 0xc0) == 0x40) {
+ pOut[3] = ((pIn[4] & 0x3f) << 2) | ((pIn[3] & 0xc0) >> 6);
+ } else {
+ mContinue = false;
+ }
+ pIn += 5, pOut += 4;
+ }
+ memset(&j->cid, 0, sizeof(PHONE_CID));
+ pOut = j->m_DAAShadowRegs.CAO_REGS.CAO.CallerID;
+ pOut += 4;
+ strncpy(j->cid.month, pOut, 2);
+ pOut += 2;
+ strncpy(j->cid.day, pOut, 2);
+ pOut += 2;
+ strncpy(j->cid.hour, pOut, 2);
+ pOut += 2;
+ strncpy(j->cid.min, pOut, 2);
+ pOut += 3;
+ j->cid.numlen = *pOut;
+ pOut += 1;
+ strncpy(j->cid.number, pOut, j->cid.numlen);
+ pOut += j->cid.numlen + 1;
+ j->cid.namelen = *pOut;
+ pOut += 1;
+ strncpy(j->cid.name, pOut, j->cid.namelen);
+
+ ixj_daa_cid_reset(j);
+ return 1;
+}
+
+static char daa_get_version(IXJ *j)
+{
+ BYTES bytes;
+
+ if (!SCI_Prepare(j))
+ return 0;
+
+ bytes.high = 0x35;
+ bytes.low = 0x00;
+ outb_p(bytes.high, j->XILINXbase + 0x03);
+ outb_p(bytes.low, j->XILINXbase + 0x02);
+
+ if (!SCI_Control(j, SCI_Enable_DAA))
+ return 0;
+
+ bytes.high = inb_p(j->XILINXbase + 0x03);
+ bytes.low = inb_p(j->XILINXbase + 0x02);
+ if (bytes.low != ALISDAA_ID_BYTE) {
+ if (ixjdebug & 0x0001)
+ printk("DAA Get Version Cannot read DAA ID Byte high = %d low = %d\n", bytes.high, bytes.low);
+ return 0;
+ }
+ if (!SCI_Control(j, SCI_Enable_DAA))
+ return 0;
+
+ if (!SCI_Control(j, SCI_End))
+ return 0;
+
+ bytes.high = inb_p(j->XILINXbase + 0x03);
+ bytes.low = inb_p(j->XILINXbase + 0x02);
+ if (ixjdebug & 0x0002)
+ printk("DAA CR5 Byte high = 0x%x low = 0x%x\n", bytes.high, bytes.low);
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr5.reg = bytes.high;
+ return bytes.high;
+}
+
+static int daa_set_mode(IXJ *j, int mode)
+{
+ /* NOTE:
+ The DAA *MUST* be in the conversation mode if the
+ PSTN line is to be seized (PSTN line off-hook).
+ Taking the PSTN line off-hook while the DAA is in
+ a mode other than conversation mode will cause a
+ hardware failure of the ALIS-A part.
+
+ NOTE:
+ The DAA can only go to SLEEP, RINGING or PULSEDIALING modes
+ if the PSTN line is on-hook. Failure to have the PSTN line
+ in the on-hook state WILL CAUSE A HARDWARE FAILURE OF THE
+ ALIS-A part.
+ */
+
+ BYTES bytes;
+
+ j->flags.pstn_rmr = 0;
+
+ if (!SCI_Prepare(j))
+ return 0;
+
+ switch (mode) {
+ case SOP_PU_RESET:
+ j->pld_scrw.bits.daafsyncen = 0; /* Turn off DAA Frame Sync */
+
+ outb_p(j->pld_scrw.byte, j->XILINXbase);
+ j->pld_slicw.bits.rly2 = 0;
+ outb_p(j->pld_slicw.byte, j->XILINXbase + 0x01);
+ bytes.high = 0x10;
+ bytes.low = j->m_DAAShadowRegs.SOP_REGS.SOP.cr0.reg;
+ daa_load(&bytes, j);
+ if (!SCI_Prepare(j))
+ return 0;
+
+ j->daa_mode = SOP_PU_SLEEP;
+ break;
+ case SOP_PU_SLEEP:
+ if(j->daa_mode == SOP_PU_SLEEP)
+ {
+ break;
+ }
+ if (ixjdebug & 0x0008)
+ printk(KERN_INFO "phone DAA: SOP_PU_SLEEP at %ld\n", jiffies);
+/* if(j->daa_mode == SOP_PU_CONVERSATION) */
+ {
+ j->pld_scrw.bits.daafsyncen = 0; /* Turn off DAA Frame Sync */
+
+ outb_p(j->pld_scrw.byte, j->XILINXbase);
+ j->pld_slicw.bits.rly2 = 0;
+ outb_p(j->pld_slicw.byte, j->XILINXbase + 0x01);
+ bytes.high = 0x10;
+ bytes.low = j->m_DAAShadowRegs.SOP_REGS.SOP.cr0.reg;
+ daa_load(&bytes, j);
+ if (!SCI_Prepare(j))
+ return 0;
+ }
+ j->pld_scrw.bits.daafsyncen = 0; /* Turn off DAA Frame Sync */
+
+ outb_p(j->pld_scrw.byte, j->XILINXbase);
+ j->pld_slicw.bits.rly2 = 0;
+ outb_p(j->pld_slicw.byte, j->XILINXbase + 0x01);
+ bytes.high = 0x10;
+ bytes.low = j->m_DAAShadowRegs.SOP_REGS.SOP.cr0.reg;
+ daa_load(&bytes, j);
+ if (!SCI_Prepare(j))
+ return 0;
+
+ j->daa_mode = SOP_PU_SLEEP;
+ j->flags.pstn_ringing = 0;
+ j->ex.bits.pstn_ring = 0;
+ j->pstn_sleeptil = jiffies + (hertz / 4);
+ wake_up_interruptible(&j->read_q); /* Wake any blocked readers */
+ wake_up_interruptible(&j->write_q); /* Wake any blocked writers */
+ wake_up_interruptible(&j->poll_q); /* Wake any blocked selects */
+ break;
+ case SOP_PU_RINGING:
+ if (ixjdebug & 0x0008)
+ printk(KERN_INFO "phone DAA: SOP_PU_RINGING at %ld\n", jiffies);
+ j->pld_scrw.bits.daafsyncen = 0; /* Turn off DAA Frame Sync */
+
+ outb_p(j->pld_scrw.byte, j->XILINXbase);
+ j->pld_slicw.bits.rly2 = 0;
+ outb_p(j->pld_slicw.byte, j->XILINXbase + 0x01);
+ bytes.high = 0x50;
+ bytes.low = j->m_DAAShadowRegs.SOP_REGS.SOP.cr0.reg;
+ daa_load(&bytes, j);
+ if (!SCI_Prepare(j))
+ return 0;
+ j->daa_mode = SOP_PU_RINGING;
+ break;
+ case SOP_PU_CONVERSATION:
+ if (ixjdebug & 0x0008)
+ printk(KERN_INFO "phone DAA: SOP_PU_CONVERSATION at %ld\n", jiffies);
+ bytes.high = 0x90;
+ bytes.low = j->m_DAAShadowRegs.SOP_REGS.SOP.cr0.reg;
+ daa_load(&bytes, j);
+ if (!SCI_Prepare(j))
+ return 0;
+ j->pld_slicw.bits.rly2 = 1;
+ outb_p(j->pld_slicw.byte, j->XILINXbase + 0x01);
+ j->pld_scrw.bits.daafsyncen = 1; /* Turn on DAA Frame Sync */
+
+ outb_p(j->pld_scrw.byte, j->XILINXbase);
+ j->daa_mode = SOP_PU_CONVERSATION;
+ j->flags.pstn_ringing = 0;
+ j->ex.bits.pstn_ring = 0;
+ j->pstn_sleeptil = jiffies;
+ j->pstn_ring_start = j->pstn_ring_stop = j->pstn_ring_int = 0;
+ break;
+ case SOP_PU_PULSEDIALING:
+ if (ixjdebug & 0x0008)
+ printk(KERN_INFO "phone DAA: SOP_PU_PULSEDIALING at %ld\n", jiffies);
+ j->pld_scrw.bits.daafsyncen = 0; /* Turn off DAA Frame Sync */
+
+ outb_p(j->pld_scrw.byte, j->XILINXbase);
+ j->pld_slicw.bits.rly2 = 0;
+ outb_p(j->pld_slicw.byte, j->XILINXbase + 0x01);
+ bytes.high = 0xD0;
+ bytes.low = j->m_DAAShadowRegs.SOP_REGS.SOP.cr0.reg;
+ daa_load(&bytes, j);
+ if (!SCI_Prepare(j))
+ return 0;
+ j->daa_mode = SOP_PU_PULSEDIALING;
+ break;
+ default:
+ break;
+ }
+ return 1;
+}
+
+static int ixj_daa_write(IXJ *j)
+{
+ BYTES bytes;
+
+ j->flags.pstncheck = 1;
+
+ daa_set_mode(j, SOP_PU_SLEEP);
+
+ if (!SCI_Prepare(j))
+ return 0;
+
+ outb_p(j->pld_scrw.byte, j->XILINXbase);
+
+ bytes.high = 0x14;
+ bytes.low = j->m_DAAShadowRegs.SOP_REGS.SOP.cr4.reg;
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.SOP_REGS.SOP.cr3.reg;
+ bytes.low = j->m_DAAShadowRegs.SOP_REGS.SOP.cr2.reg;
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.SOP_REGS.SOP.cr1.reg;
+ bytes.low = j->m_DAAShadowRegs.SOP_REGS.SOP.cr0.reg;
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ if (!SCI_Prepare(j))
+ return 0;
+
+ bytes.high = 0x1F;
+ bytes.low = j->m_DAAShadowRegs.XOP_REGS.XOP.xr7.reg;
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.XOP_xr6_W.reg;
+ bytes.low = j->m_DAAShadowRegs.XOP_REGS.XOP.xr5.reg;
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.XOP_REGS.XOP.xr4.reg;
+ bytes.low = j->m_DAAShadowRegs.XOP_REGS.XOP.xr3.reg;
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.XOP_REGS.XOP.xr2.reg;
+ bytes.low = j->m_DAAShadowRegs.XOP_REGS.XOP.xr1.reg;
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.XOP_xr0_W.reg;
+ bytes.low = 0x00;
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ if (!SCI_Prepare(j))
+ return 0;
+
+ bytes.high = 0x00;
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[7];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[6];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[5];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[4];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[3];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[2];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[1];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[0];
+ bytes.low = 0x00;
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ if (!SCI_Control(j, SCI_End))
+ return 0;
+ if (!SCI_WaitLowSCI(j))
+ return 0;
+
+ bytes.high = 0x01;
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[7];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[6];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[5];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[4];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[3];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[2];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[1];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[0];
+ bytes.low = 0x00;
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ if (!SCI_Control(j, SCI_End))
+ return 0;
+ if (!SCI_WaitLowSCI(j))
+ return 0;
+
+ bytes.high = 0x02;
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[7];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[6];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[5];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[4];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[3];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[2];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[1];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[0];
+ bytes.low = 0x00;
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ if (!SCI_Control(j, SCI_End))
+ return 0;
+ if (!SCI_WaitLowSCI(j))
+ return 0;
+
+ bytes.high = 0x03;
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[7];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[6];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[5];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[4];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[3];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[2];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[1];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[0];
+ bytes.low = 0x00;
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ if (!SCI_Control(j, SCI_End))
+ return 0;
+ if (!SCI_WaitLowSCI(j))
+ return 0;
+
+ bytes.high = 0x04;
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[7];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[6];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[5];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[4];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[3];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[2];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[1];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[0];
+ bytes.low = 0x00;
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ if (!SCI_Control(j, SCI_End))
+ return 0;
+ if (!SCI_WaitLowSCI(j))
+ return 0;
+
+ bytes.high = 0x05;
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[7];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[6];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[5];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[4];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[3];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[2];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[1];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[0];
+ bytes.low = 0x00;
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ if (!SCI_Control(j, SCI_End))
+ return 0;
+ if (!SCI_WaitLowSCI(j))
+ return 0;
+
+ bytes.high = 0x06;
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[7];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[6];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[5];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[4];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[3];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[2];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[1];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[0];
+ bytes.low = 0x00;
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ if (!SCI_Control(j, SCI_End))
+ return 0;
+ if (!SCI_WaitLowSCI(j))
+ return 0;
+
+ bytes.high = 0x07;
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[7];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[6];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[5];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[4];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[3];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[2];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[1];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[0];
+ bytes.low = 0x00;
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ if (!SCI_Control(j, SCI_End))
+ return 0;
+ if (!SCI_WaitLowSCI(j))
+ return 0;
+
+ bytes.high = 0x08;
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[7];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[6];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[5];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[4];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[3];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[2];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[1];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[0];
+ bytes.low = 0x00;
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ if (!SCI_Control(j, SCI_End))
+ return 0;
+ if (!SCI_WaitLowSCI(j))
+ return 0;
+
+ bytes.high = 0x09;
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[3];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[2];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[1];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[0];
+ bytes.low = 0x00;
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ if (!SCI_Control(j, SCI_End))
+ return 0;
+ if (!SCI_WaitLowSCI(j))
+ return 0;
+
+ bytes.high = 0x0A;
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[3];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[2];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[1];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[0];
+ bytes.low = 0x00;
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ if (!SCI_Control(j, SCI_End))
+ return 0;
+ if (!SCI_WaitLowSCI(j))
+ return 0;
+
+ bytes.high = 0x0B;
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[3];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[2];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[1];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[0];
+ bytes.low = 0x00;
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ if (!SCI_Control(j, SCI_End))
+ return 0;
+ if (!SCI_WaitLowSCI(j))
+ return 0;
+
+ bytes.high = 0x0C;
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[3];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[2];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[1];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[0];
+ bytes.low = 0x00;
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ if (!SCI_Control(j, SCI_End))
+ return 0;
+ if (!SCI_WaitLowSCI(j))
+ return 0;
+
+ bytes.high = 0x0D;
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[3];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[2];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[1];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[0];
+ bytes.low = 0x00;
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ if (!SCI_Control(j, SCI_End))
+ return 0;
+ if (!SCI_WaitLowSCI(j))
+ return 0;
+
+ bytes.high = 0x0E;
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[7];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[6];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[5];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[4];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[3];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[2];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[1];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[0];
+ bytes.low = 0x00;
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ if (!SCI_Control(j, SCI_End))
+ return 0;
+ if (!SCI_WaitLowSCI(j))
+ return 0;
+
+ bytes.high = 0x0F;
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[7];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[6];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[5];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[4];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[3];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[2];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[1];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[0];
+ bytes.low = 0x00;
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ udelay(32);
+ j->pld_scrr.byte = inb_p(j->XILINXbase);
+ if (!SCI_Control(j, SCI_End))
+ return 0;
+
+ outb_p(j->pld_scrw.byte, j->XILINXbase);
+
+ if (ixjdebug & 0x0002)
+ printk("DAA Coefficients Loaded\n");
+
+ j->flags.pstncheck = 0;
+ return 1;
+}
+
+static int ixj_set_tone_off(unsigned short arg, IXJ *j)
+{
+ j->tone_off_time = arg;
+ if (ixj_WriteDSPCommand(0x6E05, j)) /* Set Tone Off Period */
+
+ return -1;
+ if (ixj_WriteDSPCommand(arg, j))
+ return -1;
+ return 0;
+}
+
+static int ixj_get_tone_on(IXJ *j)
+{
+ if (ixj_WriteDSPCommand(0x6E06, j)) /* Get Tone On Period */
+
+ return -1;
+ return 0;
+}
+
+static int ixj_get_tone_off(IXJ *j)
+{
+ if (ixj_WriteDSPCommand(0x6E07, j)) /* Get Tone Off Period */
+
+ return -1;
+ return 0;
+}
+
+static void ixj_busytone(IXJ *j)
+{
+ j->flags.ringback = 0;
+ j->flags.dialtone = 0;
+ j->flags.busytone = 1;
+ ixj_set_tone_on(0x07D0, j);
+ ixj_set_tone_off(0x07D0, j);
+ ixj_play_tone(j, 27);
+}
+
+static void ixj_dialtone(IXJ *j)
+{
+ j->flags.ringback = 0;
+ j->flags.dialtone = 1;
+ j->flags.busytone = 0;
+ if (j->dsp.low == 0x20) {
+ return;
+ } else {
+ ixj_set_tone_on(0xFFFF, j);
+ ixj_set_tone_off(0x0000, j);
+ ixj_play_tone(j, 25);
+ }
+}
+
+static void ixj_cpt_stop(IXJ *j)
+{
+ if(j->tone_state || j->tone_cadence_state)
+ {
+ j->flags.dialtone = 0;
+ j->flags.busytone = 0;
+ j->flags.ringback = 0;
+ ixj_set_tone_on(0x0001, j);
+ ixj_set_tone_off(0x0000, j);
+ ixj_play_tone(j, 0);
+ j->tone_state = j->tone_cadence_state = 0;
+ if (j->cadence_t) {
+ kfree(j->cadence_t->ce);
+ kfree(j->cadence_t);
+ j->cadence_t = NULL;
+ }
+ }
+ if (j->play_mode == -1 && j->rec_mode == -1)
+ idle(j);
+ if (j->play_mode != -1 && j->dsp.low == 0x20)
+ ixj_play_start(j);
+ if (j->rec_mode != -1 && j->dsp.low == 0x20)
+ ixj_record_start(j);
+}
+
+static void ixj_ringback(IXJ *j)
+{
+ j->flags.busytone = 0;
+ j->flags.dialtone = 0;
+ j->flags.ringback = 1;
+ ixj_set_tone_on(0x0FA0, j);
+ ixj_set_tone_off(0x2EE0, j);
+ ixj_play_tone(j, 26);
+}
+
+static void ixj_testram(IXJ *j)
+{
+ ixj_WriteDSPCommand(0x3001, j); /* Test External SRAM */
+}
+
+static int ixj_build_cadence(IXJ *j, IXJ_CADENCE __user * cp)
+{
+ ixj_cadence *lcp;
+ IXJ_CADENCE_ELEMENT __user *cep;
+ IXJ_CADENCE_ELEMENT *lcep;
+ IXJ_TONE ti;
+ int err;
+
+ lcp = kmalloc(sizeof(ixj_cadence), GFP_KERNEL);
+ if (lcp == NULL)
+ return -ENOMEM;
+
+ err = -EFAULT;
+ if (copy_from_user(&lcp->elements_used,
+ &cp->elements_used, sizeof(int)))
+ goto out;
+ if (copy_from_user(&lcp->termination,
+ &cp->termination, sizeof(IXJ_CADENCE_TERM)))
+ goto out;
+ if (get_user(cep, &cp->ce))
+ goto out;
+
+ err = -EINVAL;
+ if ((unsigned)lcp->elements_used >= ~0U/sizeof(IXJ_CADENCE_ELEMENT))
+ goto out;
+
+ err = -ENOMEM;
+ lcep = kmalloc(sizeof(IXJ_CADENCE_ELEMENT) * lcp->elements_used, GFP_KERNEL);
+ if (!lcep)
+ goto out;
+
+ err = -EFAULT;
+ if (copy_from_user(lcep, cep, sizeof(IXJ_CADENCE_ELEMENT) * lcp->elements_used))
+ goto out1;
+
+ if (j->cadence_t) {
+ kfree(j->cadence_t->ce);
+ kfree(j->cadence_t);
+ }
+ lcp->ce = (void *) lcep;
+ j->cadence_t = lcp;
+ j->tone_cadence_state = 0;
+ ixj_set_tone_on(lcp->ce[0].tone_on_time, j);
+ ixj_set_tone_off(lcp->ce[0].tone_off_time, j);
+ if (j->cadence_t->ce[j->tone_cadence_state].freq0) {
+ ti.tone_index = j->cadence_t->ce[j->tone_cadence_state].index;
+ ti.freq0 = j->cadence_t->ce[j->tone_cadence_state].freq0;
+ ti.gain0 = j->cadence_t->ce[j->tone_cadence_state].gain0;
+ ti.freq1 = j->cadence_t->ce[j->tone_cadence_state].freq1;
+ ti.gain1 = j->cadence_t->ce[j->tone_cadence_state].gain1;
+ ixj_init_tone(j, &ti);
+ }
+ ixj_play_tone(j, lcp->ce[0].index);
+ return 1;
+out1:
+ kfree(lcep);
+out:
+ kfree(lcp);
+ return err;
+}
+
+static int ixj_build_filter_cadence(IXJ *j, IXJ_FILTER_CADENCE __user * cp)
+{
+ IXJ_FILTER_CADENCE *lcp;
+ lcp = memdup_user(cp, sizeof(IXJ_FILTER_CADENCE));
+ if (IS_ERR(lcp)) {
+ if(ixjdebug & 0x0001) {
+ printk(KERN_INFO "Could not allocate memory for cadence or could not copy cadence to kernel\n");
+ }
+ return PTR_ERR(lcp);
+ }
+ if (lcp->filter > 5) {
+ if(ixjdebug & 0x0001) {
+ printk(KERN_INFO "Cadence out of range\n");
+ }
+ kfree(lcp);
+ return -1;
+ }
+ j->cadence_f[lcp->filter].state = 0;
+ j->cadence_f[lcp->filter].enable = lcp->enable;
+ j->filter_en[lcp->filter] = j->cadence_f[lcp->filter].en_filter = lcp->en_filter;
+ j->cadence_f[lcp->filter].on1 = lcp->on1;
+ j->cadence_f[lcp->filter].on1min = 0;
+ j->cadence_f[lcp->filter].on1max = 0;
+ j->cadence_f[lcp->filter].off1 = lcp->off1;
+ j->cadence_f[lcp->filter].off1min = 0;
+ j->cadence_f[lcp->filter].off1max = 0;
+ j->cadence_f[lcp->filter].on2 = lcp->on2;
+ j->cadence_f[lcp->filter].on2min = 0;
+ j->cadence_f[lcp->filter].on2max = 0;
+ j->cadence_f[lcp->filter].off2 = lcp->off2;
+ j->cadence_f[lcp->filter].off2min = 0;
+ j->cadence_f[lcp->filter].off2max = 0;
+ j->cadence_f[lcp->filter].on3 = lcp->on3;
+ j->cadence_f[lcp->filter].on3min = 0;
+ j->cadence_f[lcp->filter].on3max = 0;
+ j->cadence_f[lcp->filter].off3 = lcp->off3;
+ j->cadence_f[lcp->filter].off3min = 0;
+ j->cadence_f[lcp->filter].off3max = 0;
+ if(ixjdebug & 0x0002) {
+ printk(KERN_INFO "Cadence %d loaded\n", lcp->filter);
+ }
+ kfree(lcp);
+ return 0;
+}
+
+static void add_caps(IXJ *j)
+{
+ j->caps = 0;
+ j->caplist[j->caps].cap = PHONE_VENDOR_QUICKNET;
+ strcpy(j->caplist[j->caps].desc, "Quicknet Technologies, Inc. (www.quicknet.net)");
+ j->caplist[j->caps].captype = vendor;
+ j->caplist[j->caps].handle = j->caps;
+ j->caps++;
+ j->caplist[j->caps].captype = device;
+ switch (j->cardtype) {
+ case QTI_PHONEJACK:
+ strcpy(j->caplist[j->caps].desc, "Quicknet Internet PhoneJACK");
+ break;
+ case QTI_LINEJACK:
+ strcpy(j->caplist[j->caps].desc, "Quicknet Internet LineJACK");
+ break;
+ case QTI_PHONEJACK_LITE:
+ strcpy(j->caplist[j->caps].desc, "Quicknet Internet PhoneJACK Lite");
+ break;
+ case QTI_PHONEJACK_PCI:
+ strcpy(j->caplist[j->caps].desc, "Quicknet Internet PhoneJACK PCI");
+ break;
+ case QTI_PHONECARD:
+ strcpy(j->caplist[j->caps].desc, "Quicknet Internet PhoneCARD");
+ break;
+ }
+ j->caplist[j->caps].cap = j->cardtype;
+ j->caplist[j->caps].handle = j->caps;
+ j->caps++;
+ strcpy(j->caplist[j->caps].desc, "POTS");
+ j->caplist[j->caps].captype = port;
+ j->caplist[j->caps].cap = pots;
+ j->caplist[j->caps].handle = j->caps;
+ j->caps++;
+
+ /* add devices that can do speaker/mic */
+ switch (j->cardtype) {
+ case QTI_PHONEJACK:
+ case QTI_LINEJACK:
+ case QTI_PHONEJACK_PCI:
+ case QTI_PHONECARD:
+ strcpy(j->caplist[j->caps].desc, "SPEAKER");
+ j->caplist[j->caps].captype = port;
+ j->caplist[j->caps].cap = speaker;
+ j->caplist[j->caps].handle = j->caps;
+ j->caps++;
+ default:
+ break;
+ }
+
+ /* add devices that can do handset */
+ switch (j->cardtype) {
+ case QTI_PHONEJACK:
+ strcpy(j->caplist[j->caps].desc, "HANDSET");
+ j->caplist[j->caps].captype = port;
+ j->caplist[j->caps].cap = handset;
+ j->caplist[j->caps].handle = j->caps;
+ j->caps++;
+ break;
+ default:
+ break;
+ }
+
+ /* add devices that can do PSTN */
+ switch (j->cardtype) {
+ case QTI_LINEJACK:
+ strcpy(j->caplist[j->caps].desc, "PSTN");
+ j->caplist[j->caps].captype = port;
+ j->caplist[j->caps].cap = pstn;
+ j->caplist[j->caps].handle = j->caps;
+ j->caps++;
+ break;
+ default:
+ break;
+ }
+
+ /* add codecs - all cards can do uLaw, linear 8/16, and Windows sound system */
+ strcpy(j->caplist[j->caps].desc, "ULAW");
+ j->caplist[j->caps].captype = codec;
+ j->caplist[j->caps].cap = ULAW;
+ j->caplist[j->caps].handle = j->caps;
+ j->caps++;
+
+ strcpy(j->caplist[j->caps].desc, "LINEAR 16 bit");
+ j->caplist[j->caps].captype = codec;
+ j->caplist[j->caps].cap = LINEAR16;
+ j->caplist[j->caps].handle = j->caps;
+ j->caps++;
+
+ strcpy(j->caplist[j->caps].desc, "LINEAR 8 bit");
+ j->caplist[j->caps].captype = codec;
+ j->caplist[j->caps].cap = LINEAR8;
+ j->caplist[j->caps].handle = j->caps;
+ j->caps++;
+
+ strcpy(j->caplist[j->caps].desc, "Windows Sound System");
+ j->caplist[j->caps].captype = codec;
+ j->caplist[j->caps].cap = WSS;
+ j->caplist[j->caps].handle = j->caps;
+ j->caps++;
+
+ /* software ALAW codec, made from ULAW */
+ strcpy(j->caplist[j->caps].desc, "ALAW");
+ j->caplist[j->caps].captype = codec;
+ j->caplist[j->caps].cap = ALAW;
+ j->caplist[j->caps].handle = j->caps;
+ j->caps++;
+
+ /* version 12 of the 8020 does the following codecs in a broken way */
+ if (j->dsp.low != 0x20 || j->ver.low != 0x12) {
+ strcpy(j->caplist[j->caps].desc, "G.723.1 6.3kbps");
+ j->caplist[j->caps].captype = codec;
+ j->caplist[j->caps].cap = G723_63;
+ j->caplist[j->caps].handle = j->caps;
+ j->caps++;
+
+ strcpy(j->caplist[j->caps].desc, "G.723.1 5.3kbps");
+ j->caplist[j->caps].captype = codec;
+ j->caplist[j->caps].cap = G723_53;
+ j->caplist[j->caps].handle = j->caps;
+ j->caps++;
+
+ strcpy(j->caplist[j->caps].desc, "TrueSpeech 4.8kbps");
+ j->caplist[j->caps].captype = codec;
+ j->caplist[j->caps].cap = TS48;
+ j->caplist[j->caps].handle = j->caps;
+ j->caps++;
+
+ strcpy(j->caplist[j->caps].desc, "TrueSpeech 4.1kbps");
+ j->caplist[j->caps].captype = codec;
+ j->caplist[j->caps].cap = TS41;
+ j->caplist[j->caps].handle = j->caps;
+ j->caps++;
+ }
+
+ /* 8020 chips can do TS8.5 native, and 8021/8022 can load it */
+ if (j->dsp.low == 0x20 || j->flags.ts85_loaded) {
+ strcpy(j->caplist[j->caps].desc, "TrueSpeech 8.5kbps");
+ j->caplist[j->caps].captype = codec;
+ j->caplist[j->caps].cap = TS85;
+ j->caplist[j->caps].handle = j->caps;
+ j->caps++;
+ }
+
+ /* 8021 chips can do G728 */
+ if (j->dsp.low == 0x21) {
+ strcpy(j->caplist[j->caps].desc, "G.728 16kbps");
+ j->caplist[j->caps].captype = codec;
+ j->caplist[j->caps].cap = G728;
+ j->caplist[j->caps].handle = j->caps;
+ j->caps++;
+ }
+
+ /* 8021/8022 chips can do G729 if loaded */
+ if (j->dsp.low != 0x20 && j->flags.g729_loaded) {
+ strcpy(j->caplist[j->caps].desc, "G.729A 8kbps");
+ j->caplist[j->caps].captype = codec;
+ j->caplist[j->caps].cap = G729;
+ j->caplist[j->caps].handle = j->caps;
+ j->caps++;
+ }
+ if (j->dsp.low != 0x20 && j->flags.g729_loaded) {
+ strcpy(j->caplist[j->caps].desc, "G.729B 8kbps");
+ j->caplist[j->caps].captype = codec;
+ j->caplist[j->caps].cap = G729B;
+ j->caplist[j->caps].handle = j->caps;
+ j->caps++;
+ }
+}
+
+static int capabilities_check(IXJ *j, struct phone_capability *pcreq)
+{
+ int cnt;
+ int retval = 0;
+ for (cnt = 0; cnt < j->caps; cnt++) {
+ if (pcreq->captype == j->caplist[cnt].captype
+ && pcreq->cap == j->caplist[cnt].cap) {
+ retval = 1;
+ break;
+ }
+ }
+ return retval;
+}
+
+static long do_ixj_ioctl(struct file *file_p, unsigned int cmd, unsigned long arg)
+{
+ IXJ_TONE ti;
+ IXJ_FILTER jf;
+ IXJ_FILTER_RAW jfr;
+ void __user *argp = (void __user *)arg;
+ struct inode *inode = file_p->f_path.dentry->d_inode;
+ unsigned int minor = iminor(inode);
+ unsigned int raise, mant;
+ int board = NUM(inode);
+
+ IXJ *j = get_ixj(NUM(inode));
+
+ int retval = 0;
+
+ /*
+ * Set up locks to ensure that only one process is talking to the DSP at a time.
+ * This is necessary to keep the DSP from locking up.
+ */
+ while(test_and_set_bit(board, (void *)&j->busyflags) != 0)
+ schedule_timeout_interruptible(1);
+ if (ixjdebug & 0x0040)
+ printk("phone%d ioctl, cmd: 0x%x, arg: 0x%lx\n", minor, cmd, arg);
+ if (minor >= IXJMAX) {
+ clear_bit(board, &j->busyflags);
+ return -ENODEV;
+ }
+ /*
+ * Check ioctls only root can use.
+ */
+ if (!capable(CAP_SYS_ADMIN)) {
+ switch (cmd) {
+ case IXJCTL_TESTRAM:
+ case IXJCTL_HZ:
+ retval = -EPERM;
+ }
+ }
+ switch (cmd) {
+ case IXJCTL_TESTRAM:
+ ixj_testram(j);
+ retval = (j->ssr.high << 8) + j->ssr.low;
+ break;
+ case IXJCTL_CARDTYPE:
+ retval = j->cardtype;
+ break;
+ case IXJCTL_SERIAL:
+ retval = j->serial;
+ break;
+ case IXJCTL_VERSION:
+ {
+ char arg_str[100];
+ snprintf(arg_str, sizeof(arg_str),
+ "\nDriver version %i.%i.%i", IXJ_VER_MAJOR,
+ IXJ_VER_MINOR, IXJ_BLD_VER);
+ if (copy_to_user(argp, arg_str, strlen(arg_str)))
+ retval = -EFAULT;
+ }
+ break;
+ case PHONE_RING_CADENCE:
+ j->ring_cadence = arg;
+ break;
+ case IXJCTL_CIDCW:
+ if(arg) {
+ if (copy_from_user(&j->cid_send, argp, sizeof(PHONE_CID))) {
+ retval = -EFAULT;
+ break;
+ }
+ } else {
+ memset(&j->cid_send, 0, sizeof(PHONE_CID));
+ }
+ ixj_write_cidcw(j);
+ break;
+ /* Binary compatbility */
+ case OLD_PHONE_RING_START:
+ arg = 0;
+ /* Fall through */
+ case PHONE_RING_START:
+ if(arg) {
+ if (copy_from_user(&j->cid_send, argp, sizeof(PHONE_CID))) {
+ retval = -EFAULT;
+ break;
+ }
+ ixj_write_cid(j);
+ } else {
+ memset(&j->cid_send, 0, sizeof(PHONE_CID));
+ }
+ ixj_ring_start(j);
+ break;
+ case PHONE_RING_STOP:
+ j->flags.cringing = 0;
+ if(j->cadence_f[5].enable) {
+ j->cadence_f[5].state = 0;
+ }
+ ixj_ring_off(j);
+ break;
+ case PHONE_RING:
+ retval = ixj_ring(j);
+ break;
+ case PHONE_EXCEPTION:
+ retval = j->ex.bytes;
+ if(j->ex.bits.flash) {
+ j->flash_end = 0;
+ j->ex.bits.flash = 0;
+ }
+ j->ex.bits.pstn_ring = 0;
+ j->ex.bits.caller_id = 0;
+ j->ex.bits.pstn_wink = 0;
+ j->ex.bits.f0 = 0;
+ j->ex.bits.f1 = 0;
+ j->ex.bits.f2 = 0;
+ j->ex.bits.f3 = 0;
+ j->ex.bits.fc0 = 0;
+ j->ex.bits.fc1 = 0;
+ j->ex.bits.fc2 = 0;
+ j->ex.bits.fc3 = 0;
+ j->ex.bits.reserved = 0;
+ break;
+ case PHONE_HOOKSTATE:
+ j->ex.bits.hookstate = 0;
+ retval = j->hookstate; //j->r_hook;
+ break;
+ case IXJCTL_SET_LED:
+ LED_SetState(arg, j);
+ break;
+ case PHONE_FRAME:
+ retval = set_base_frame(j, arg);
+ break;
+ case PHONE_REC_CODEC:
+ retval = set_rec_codec(j, arg);
+ break;
+ case PHONE_VAD:
+ ixj_vad(j, arg);
+ break;
+ case PHONE_REC_START:
+ ixj_record_start(j);
+ break;
+ case PHONE_REC_STOP:
+ ixj_record_stop(j);
+ break;
+ case PHONE_REC_DEPTH:
+ set_rec_depth(j, arg);
+ break;
+ case PHONE_REC_VOLUME:
+ if(arg == -1) {
+ retval = get_rec_volume(j);
+ }
+ else {
+ set_rec_volume(j, arg);
+ retval = arg;
+ }
+ break;
+ case PHONE_REC_VOLUME_LINEAR:
+ if(arg == -1) {
+ retval = get_rec_volume_linear(j);
+ }
+ else {
+ set_rec_volume_linear(j, arg);
+ retval = arg;
+ }
+ break;
+ case IXJCTL_DTMF_PRESCALE:
+ if(arg == -1) {
+ retval = get_dtmf_prescale(j);
+ }
+ else {
+ set_dtmf_prescale(j, arg);
+ retval = arg;
+ }
+ break;
+ case PHONE_REC_LEVEL:
+ retval = get_rec_level(j);
+ break;
+ case IXJCTL_SC_RXG:
+ retval = ixj_siadc(j, arg);
+ break;
+ case IXJCTL_SC_TXG:
+ retval = ixj_sidac(j, arg);
+ break;
+ case IXJCTL_AEC_START:
+ ixj_aec_start(j, arg);
+ break;
+ case IXJCTL_AEC_STOP:
+ aec_stop(j);
+ break;
+ case IXJCTL_AEC_GET_LEVEL:
+ retval = j->aec_level;
+ break;
+ case PHONE_PLAY_CODEC:
+ retval = set_play_codec(j, arg);
+ break;
+ case PHONE_PLAY_START:
+ retval = ixj_play_start(j);
+ break;
+ case PHONE_PLAY_STOP:
+ ixj_play_stop(j);
+ break;
+ case PHONE_PLAY_DEPTH:
+ set_play_depth(j, arg);
+ break;
+ case PHONE_PLAY_VOLUME:
+ if(arg == -1) {
+ retval = get_play_volume(j);
+ }
+ else {
+ set_play_volume(j, arg);
+ retval = arg;
+ }
+ break;
+ case PHONE_PLAY_VOLUME_LINEAR:
+ if(arg == -1) {
+ retval = get_play_volume_linear(j);
+ }
+ else {
+ set_play_volume_linear(j, arg);
+ retval = arg;
+ }
+ break;
+ case PHONE_PLAY_LEVEL:
+ retval = get_play_level(j);
+ break;
+ case IXJCTL_DSP_TYPE:
+ retval = (j->dsp.high << 8) + j->dsp.low;
+ break;
+ case IXJCTL_DSP_VERSION:
+ retval = (j->ver.high << 8) + j->ver.low;
+ break;
+ case IXJCTL_HZ:
+ hertz = arg;
+ break;
+ case IXJCTL_RATE:
+ if (arg > hertz)
+ retval = -1;
+ else
+ samplerate = arg;
+ break;
+ case IXJCTL_DRYBUFFER_READ:
+ put_user(j->drybuffer, (unsigned long __user *) argp);
+ break;
+ case IXJCTL_DRYBUFFER_CLEAR:
+ j->drybuffer = 0;
+ break;
+ case IXJCTL_FRAMES_READ:
+ put_user(j->framesread, (unsigned long __user *) argp);
+ break;
+ case IXJCTL_FRAMES_WRITTEN:
+ put_user(j->frameswritten, (unsigned long __user *) argp);
+ break;
+ case IXJCTL_READ_WAIT:
+ put_user(j->read_wait, (unsigned long __user *) argp);
+ break;
+ case IXJCTL_WRITE_WAIT:
+ put_user(j->write_wait, (unsigned long __user *) argp);
+ break;
+ case PHONE_MAXRINGS:
+ j->maxrings = arg;
+ break;
+ case PHONE_SET_TONE_ON_TIME:
+ ixj_set_tone_on(arg, j);
+ break;
+ case PHONE_SET_TONE_OFF_TIME:
+ ixj_set_tone_off(arg, j);
+ break;
+ case PHONE_GET_TONE_ON_TIME:
+ if (ixj_get_tone_on(j)) {
+ retval = -1;
+ } else {
+ retval = (j->ssr.high << 8) + j->ssr.low;
+ }
+ break;
+ case PHONE_GET_TONE_OFF_TIME:
+ if (ixj_get_tone_off(j)) {
+ retval = -1;
+ } else {
+ retval = (j->ssr.high << 8) + j->ssr.low;
+ }
+ break;
+ case PHONE_PLAY_TONE:
+ if (!j->tone_state)
+ retval = ixj_play_tone(j, arg);
+ else
+ retval = -1;
+ break;
+ case PHONE_GET_TONE_STATE:
+ retval = j->tone_state;
+ break;
+ case PHONE_DTMF_READY:
+ retval = j->ex.bits.dtmf_ready;
+ break;
+ case PHONE_GET_DTMF:
+ if (ixj_hookstate(j)) {
+ if (j->dtmf_rp != j->dtmf_wp) {
+ retval = j->dtmfbuffer[j->dtmf_rp];
+ j->dtmf_rp++;
+ if (j->dtmf_rp == 79)
+ j->dtmf_rp = 0;
+ if (j->dtmf_rp == j->dtmf_wp) {
+ j->ex.bits.dtmf_ready = j->dtmf_rp = j->dtmf_wp = 0;
+ }
+ }
+ }
+ break;
+ case PHONE_GET_DTMF_ASCII:
+ if (ixj_hookstate(j)) {
+ if (j->dtmf_rp != j->dtmf_wp) {
+ switch (j->dtmfbuffer[j->dtmf_rp]) {
+ case 10:
+ retval = 42; /* '*'; */
+
+ break;
+ case 11:
+ retval = 48; /*'0'; */
+
+ break;
+ case 12:
+ retval = 35; /*'#'; */
+
+ break;
+ case 28:
+ retval = 65; /*'A'; */
+
+ break;
+ case 29:
+ retval = 66; /*'B'; */
+
+ break;
+ case 30:
+ retval = 67; /*'C'; */
+
+ break;
+ case 31:
+ retval = 68; /*'D'; */
+
+ break;
+ default:
+ retval = 48 + j->dtmfbuffer[j->dtmf_rp];
+ break;
+ }
+ j->dtmf_rp++;
+ if (j->dtmf_rp == 79)
+ j->dtmf_rp = 0;
+ if(j->dtmf_rp == j->dtmf_wp)
+ {
+ j->ex.bits.dtmf_ready = j->dtmf_rp = j->dtmf_wp = 0;
+ }
+ }
+ }
+ break;
+ case PHONE_DTMF_OOB:
+ j->flags.dtmf_oob = arg;
+ break;
+ case PHONE_DIALTONE:
+ ixj_dialtone(j);
+ break;
+ case PHONE_BUSY:
+ ixj_busytone(j);
+ break;
+ case PHONE_RINGBACK:
+ ixj_ringback(j);
+ break;
+ case PHONE_WINK:
+ if(j->cardtype == QTI_PHONEJACK)
+ retval = -1;
+ else
+ retval = ixj_wink(j);
+ break;
+ case PHONE_CPT_STOP:
+ ixj_cpt_stop(j);
+ break;
+ case PHONE_QUERY_CODEC:
+ {
+ struct phone_codec_data pd;
+ int val;
+ int proto_size[] = {
+ -1,
+ 12, 10, 16, 9, 8, 48, 5,
+ 40, 40, 80, 40, 40, 6
+ };
+ if(copy_from_user(&pd, argp, sizeof(pd))) {
+ retval = -EFAULT;
+ break;
+ }
+ if(pd.type<1 || pd.type>13) {
+ retval = -EPROTONOSUPPORT;
+ break;
+ }
+ if(pd.type<G729)
+ val=proto_size[pd.type];
+ else switch(j->baseframe.low)
+ {
+ case 0xA0:val=2*proto_size[pd.type];break;
+ case 0x50:val=proto_size[pd.type];break;
+ default:val=proto_size[pd.type]*3;break;
+ }
+ pd.buf_min=pd.buf_max=pd.buf_opt=val;
+ if(copy_to_user(argp, &pd, sizeof(pd)))
+ retval = -EFAULT;
+ break;
+ }
+ case IXJCTL_DSP_IDLE:
+ idle(j);
+ break;
+ case IXJCTL_MIXER:
+ if ((arg & 0xff) == 0xff)
+ retval = ixj_get_mixer(arg, j);
+ else
+ ixj_mixer(arg, j);
+ break;
+ case IXJCTL_DAA_COEFF_SET:
+ switch (arg) {
+ case DAA_US:
+ DAA_Coeff_US(j);
+ retval = ixj_daa_write(j);
+ break;
+ case DAA_UK:
+ DAA_Coeff_UK(j);
+ retval = ixj_daa_write(j);
+ break;
+ case DAA_FRANCE:
+ DAA_Coeff_France(j);
+ retval = ixj_daa_write(j);
+ break;
+ case DAA_GERMANY:
+ DAA_Coeff_Germany(j);
+ retval = ixj_daa_write(j);
+ break;
+ case DAA_AUSTRALIA:
+ DAA_Coeff_Australia(j);
+ retval = ixj_daa_write(j);
+ break;
+ case DAA_JAPAN:
+ DAA_Coeff_Japan(j);
+ retval = ixj_daa_write(j);
+ break;
+ default:
+ retval = 1;
+ break;
+ }
+ break;
+ case IXJCTL_DAA_AGAIN:
+ ixj_daa_cr4(j, arg | 0x02);
+ break;
+ case IXJCTL_PSTN_LINETEST:
+ retval = ixj_linetest(j);
+ break;
+ case IXJCTL_VMWI:
+ ixj_write_vmwi(j, arg);
+ break;
+ case IXJCTL_CID:
+ if (copy_to_user(argp, &j->cid, sizeof(PHONE_CID)))
+ retval = -EFAULT;
+ j->ex.bits.caller_id = 0;
+ break;
+ case IXJCTL_WINK_DURATION:
+ j->winktime = arg;
+ break;
+ case IXJCTL_PORT:
+ if (arg)
+ retval = ixj_set_port(j, arg);
+ else
+ retval = j->port;
+ break;
+ case IXJCTL_POTS_PSTN:
+ retval = ixj_set_pots(j, arg);
+ break;
+ case PHONE_CAPABILITIES:
+ add_caps(j);
+ retval = j->caps;
+ break;
+ case PHONE_CAPABILITIES_LIST:
+ add_caps(j);
+ if (copy_to_user(argp, j->caplist, sizeof(struct phone_capability) * j->caps))
+ retval = -EFAULT;
+ break;
+ case PHONE_CAPABILITIES_CHECK:
+ {
+ struct phone_capability cap;
+ if (copy_from_user(&cap, argp, sizeof(cap)))
+ retval = -EFAULT;
+ else {
+ add_caps(j);
+ retval = capabilities_check(j, &cap);
+ }
+ }
+ break;
+ case PHONE_PSTN_SET_STATE:
+ daa_set_mode(j, arg);
+ break;
+ case PHONE_PSTN_GET_STATE:
+ retval = j->daa_mode;
+ j->ex.bits.pstn_ring = 0;
+ break;
+ case IXJCTL_SET_FILTER:
+ if (copy_from_user(&jf, argp, sizeof(jf)))
+ retval = -EFAULT;
+ else
+ retval = ixj_init_filter(j, &jf);
+ break;
+ case IXJCTL_SET_FILTER_RAW:
+ if (copy_from_user(&jfr, argp, sizeof(jfr)))
+ retval = -EFAULT;
+ else
+ retval = ixj_init_filter_raw(j, &jfr);
+ break;
+ case IXJCTL_GET_FILTER_HIST:
+ if(arg<0||arg>3)
+ retval = -EINVAL;
+ else
+ retval = j->filter_hist[arg];
+ break;
+ case IXJCTL_INIT_TONE:
+ if (copy_from_user(&ti, argp, sizeof(ti)))
+ retval = -EFAULT;
+ else
+ retval = ixj_init_tone(j, &ti);
+ break;
+ case IXJCTL_TONE_CADENCE:
+ retval = ixj_build_cadence(j, argp);
+ break;
+ case IXJCTL_FILTER_CADENCE:
+ retval = ixj_build_filter_cadence(j, argp);
+ break;
+ case IXJCTL_SIGCTL:
+ if (copy_from_user(&j->sigdef, argp, sizeof(IXJ_SIGDEF))) {
+ retval = -EFAULT;
+ break;
+ }
+ j->ixj_signals[j->sigdef.event] = j->sigdef.signal;
+ if(j->sigdef.event < 33) {
+ raise = 1;
+ for(mant = 0; mant < j->sigdef.event; mant++){
+ raise *= 2;
+ }
+ if(j->sigdef.signal)
+ j->ex_sig.bytes |= raise;
+ else
+ j->ex_sig.bytes &= (raise^0xffff);
+ }
+ break;
+ case IXJCTL_INTERCOM_STOP:
+ if(arg < 0 || arg >= IXJMAX)
+ return -EINVAL;
+ j->intercom = -1;
+ ixj_record_stop(j);
+ ixj_play_stop(j);
+ idle(j);
+ get_ixj(arg)->intercom = -1;
+ ixj_record_stop(get_ixj(arg));
+ ixj_play_stop(get_ixj(arg));
+ idle(get_ixj(arg));
+ break;
+ case IXJCTL_INTERCOM_START:
+ if(arg < 0 || arg >= IXJMAX)
+ return -EINVAL;
+ j->intercom = arg;
+ ixj_record_start(j);
+ ixj_play_start(j);
+ get_ixj(arg)->intercom = board;
+ ixj_play_start(get_ixj(arg));
+ ixj_record_start(get_ixj(arg));
+ break;
+ }
+ if (ixjdebug & 0x0040)
+ printk("phone%d ioctl end, cmd: 0x%x, arg: 0x%lx\n", minor, cmd, arg);
+ clear_bit(board, &j->busyflags);
+ return retval;
+}
+
+static long ixj_ioctl(struct file *file_p, unsigned int cmd, unsigned long arg)
+{
+ long ret;
+ mutex_lock(&ixj_mutex);
+ ret = do_ixj_ioctl(file_p, cmd, arg);
+ mutex_unlock(&ixj_mutex);
+ return ret;
+}
+
+static int ixj_fasync(int fd, struct file *file_p, int mode)
+{
+ IXJ *j = get_ixj(NUM(file_p->f_path.dentry->d_inode));
+
+ return fasync_helper(fd, file_p, mode, &j->async_queue);
+}
+
+static const struct file_operations ixj_fops =
+{
+ .owner = THIS_MODULE,
+ .read = ixj_enhanced_read,
+ .write = ixj_enhanced_write,
+ .poll = ixj_poll,
+ .unlocked_ioctl = ixj_ioctl,
+ .release = ixj_release,
+ .fasync = ixj_fasync,
+ .llseek = default_llseek,
+};
+
+static int ixj_linetest(IXJ *j)
+{
+ j->flags.pstncheck = 1; /* Testing */
+ j->flags.pstn_present = 0; /* Assume the line is not there */
+
+ daa_int_read(j); /*Clear DAA Interrupt flags */
+ /* */
+ /* Hold all relays in the normally de-energized position. */
+ /* */
+
+ j->pld_slicw.bits.rly1 = 0;
+ j->pld_slicw.bits.rly2 = 0;
+ j->pld_slicw.bits.rly3 = 0;
+ outb_p(j->pld_slicw.byte, j->XILINXbase + 0x01);
+ j->pld_scrw.bits.daafsyncen = 0; /* Turn off DAA Frame Sync */
+
+ outb_p(j->pld_scrw.byte, j->XILINXbase);
+ j->pld_slicr.byte = inb_p(j->XILINXbase + 0x01);
+ if (j->pld_slicr.bits.potspstn) {
+ j->flags.pots_pstn = 1;
+ j->flags.pots_correct = 0;
+ LED_SetState(0x4, j);
+ } else {
+ j->flags.pots_pstn = 0;
+ j->pld_slicw.bits.rly1 = 0;
+ j->pld_slicw.bits.rly2 = 0;
+ j->pld_slicw.bits.rly3 = 1;
+ outb_p(j->pld_slicw.byte, j->XILINXbase + 0x01);
+ j->pld_scrw.bits.daafsyncen = 0; /* Turn off DAA Frame Sync */
+
+ outb_p(j->pld_scrw.byte, j->XILINXbase);
+ daa_set_mode(j, SOP_PU_CONVERSATION);
+ msleep(1000);
+ daa_int_read(j);
+ daa_set_mode(j, SOP_PU_RESET);
+ if (j->m_DAAShadowRegs.XOP_REGS.XOP.xr0.bitreg.VDD_OK) {
+ j->flags.pots_correct = 0; /* Should not be line voltage on POTS port. */
+ LED_SetState(0x4, j);
+ j->pld_slicw.bits.rly3 = 0;
+ outb_p(j->pld_slicw.byte, j->XILINXbase + 0x01);
+ } else {
+ j->flags.pots_correct = 1;
+ LED_SetState(0x8, j);
+ j->pld_slicw.bits.rly1 = 1;
+ j->pld_slicw.bits.rly2 = 0;
+ j->pld_slicw.bits.rly3 = 0;
+ outb_p(j->pld_slicw.byte, j->XILINXbase + 0x01);
+ }
+ }
+ j->pld_slicw.bits.rly3 = 0;
+ outb_p(j->pld_slicw.byte, j->XILINXbase + 0x01);
+ daa_set_mode(j, SOP_PU_CONVERSATION);
+ msleep(1000);
+ daa_int_read(j);
+ daa_set_mode(j, SOP_PU_RESET);
+ if (j->m_DAAShadowRegs.XOP_REGS.XOP.xr0.bitreg.VDD_OK) {
+ j->pstn_sleeptil = jiffies + (hertz / 4);
+ j->flags.pstn_present = 1;
+ } else {
+ j->flags.pstn_present = 0;
+ }
+ if (j->flags.pstn_present) {
+ if (j->flags.pots_correct) {
+ LED_SetState(0xA, j);
+ } else {
+ LED_SetState(0x6, j);
+ }
+ } else {
+ if (j->flags.pots_correct) {
+ LED_SetState(0x9, j);
+ } else {
+ LED_SetState(0x5, j);
+ }
+ }
+ j->flags.pstncheck = 0; /* Testing */
+ return j->flags.pstn_present;
+}
+
+static int ixj_selfprobe(IXJ *j)
+{
+ unsigned short cmd;
+ int cnt;
+ BYTES bytes;
+
+ init_waitqueue_head(&j->poll_q);
+ init_waitqueue_head(&j->read_q);
+ init_waitqueue_head(&j->write_q);
+
+ while(atomic_read(&j->DSPWrite) > 0)
+ atomic_dec(&j->DSPWrite);
+ if (ixjdebug & 0x0002)
+ printk(KERN_INFO "Write IDLE to Software Control Register\n");
+ ixj_WriteDSPCommand(0x0FE0, j); /* Put the DSP in full power mode. */
+
+ if (ixj_WriteDSPCommand(0x0000, j)) /* Write IDLE to Software Control Register */
+ return -1;
+/* The read values of the SSR should be 0x00 for the IDLE command */
+ if (j->ssr.low || j->ssr.high)
+ return -1;
+ if (ixjdebug & 0x0002)
+ printk(KERN_INFO "Get Device ID Code\n");
+ if (ixj_WriteDSPCommand(0x3400, j)) /* Get Device ID Code */
+ return -1;
+ j->dsp.low = j->ssr.low;
+ j->dsp.high = j->ssr.high;
+ if (ixjdebug & 0x0002)
+ printk(KERN_INFO "Get Device Version Code\n");
+ if (ixj_WriteDSPCommand(0x3800, j)) /* Get Device Version Code */
+ return -1;
+ j->ver.low = j->ssr.low;
+ j->ver.high = j->ssr.high;
+ if (!j->cardtype) {
+ if (j->dsp.low == 0x21) {
+ bytes.high = bytes.low = inb_p(j->XILINXbase + 0x02);
+ outb_p(bytes.low ^ 0xFF, j->XILINXbase + 0x02);
+/* Test for Internet LineJACK or Internet PhoneJACK Lite */
+ bytes.low = inb_p(j->XILINXbase + 0x02);
+ if (bytes.low == bytes.high) /* Register is read only on */
+ /* Internet PhoneJack Lite */
+ {
+ j->cardtype = QTI_PHONEJACK_LITE;
+ if (!request_region(j->XILINXbase, 4, "ixj control")) {
+ printk(KERN_INFO "ixj: can't get I/O address 0x%x\n", j->XILINXbase);
+ return -1;
+ }
+ j->pld_slicw.pcib.e1 = 1;
+ outb_p(j->pld_slicw.byte, j->XILINXbase);
+ } else {
+ j->cardtype = QTI_LINEJACK;
+
+ if (!request_region(j->XILINXbase, 8, "ixj control")) {
+ printk(KERN_INFO "ixj: can't get I/O address 0x%x\n", j->XILINXbase);
+ return -1;
+ }
+ }
+ } else if (j->dsp.low == 0x22) {
+ j->cardtype = QTI_PHONEJACK_PCI;
+ request_region(j->XILINXbase, 4, "ixj control");
+ j->pld_slicw.pcib.e1 = 1;
+ outb_p(j->pld_slicw.byte, j->XILINXbase);
+ } else
+ j->cardtype = QTI_PHONEJACK;
+ } else {
+ switch (j->cardtype) {
+ case QTI_PHONEJACK:
+ if (!j->dsp.low != 0x20) {
+ j->dsp.high = 0x80;
+ j->dsp.low = 0x20;
+ ixj_WriteDSPCommand(0x3800, j);
+ j->ver.low = j->ssr.low;
+ j->ver.high = j->ssr.high;
+ }
+ break;
+ case QTI_LINEJACK:
+ if (!request_region(j->XILINXbase, 8, "ixj control")) {
+ printk(KERN_INFO "ixj: can't get I/O address 0x%x\n", j->XILINXbase);
+ return -1;
+ }
+ break;
+ case QTI_PHONEJACK_LITE:
+ case QTI_PHONEJACK_PCI:
+ if (!request_region(j->XILINXbase, 4, "ixj control")) {
+ printk(KERN_INFO "ixj: can't get I/O address 0x%x\n", j->XILINXbase);
+ return -1;
+ }
+ j->pld_slicw.pcib.e1 = 1;
+ outb_p(j->pld_slicw.byte, j->XILINXbase);
+ break;
+ case QTI_PHONECARD:
+ break;
+ }
+ }
+ if (j->dsp.low == 0x20 || j->cardtype == QTI_PHONEJACK_LITE || j->cardtype == QTI_PHONEJACK_PCI) {
+ if (ixjdebug & 0x0002)
+ printk(KERN_INFO "Write CODEC config to Software Control Register\n");
+ if (ixj_WriteDSPCommand(0xC462, j)) /* Write CODEC config to Software Control Register */
+ return -1;
+ if (ixjdebug & 0x0002)
+ printk(KERN_INFO "Write CODEC timing to Software Control Register\n");
+ if (j->cardtype == QTI_PHONEJACK) {
+ cmd = 0x9FF2;
+ } else {
+ cmd = 0x9FF5;
+ }
+ if (ixj_WriteDSPCommand(cmd, j)) /* Write CODEC timing to Software Control Register */
+ return -1;
+ } else {
+ if (set_base_frame(j, 30) != 30)
+ return -1;
+ if (ixjdebug & 0x0002)
+ printk(KERN_INFO "Write CODEC config to Software Control Register\n");
+ if (j->cardtype == QTI_PHONECARD) {
+ if (ixj_WriteDSPCommand(0xC528, j)) /* Write CODEC config to Software Control Register */
+ return -1;
+ }
+ if (j->cardtype == QTI_LINEJACK) {
+ if (ixj_WriteDSPCommand(0xC528, j)) /* Write CODEC config to Software Control Register */
+ return -1;
+ if (ixjdebug & 0x0002)
+ printk(KERN_INFO "Turn on the PLD Clock at 8Khz\n");
+ j->pld_clock.byte = 0;
+ outb_p(j->pld_clock.byte, j->XILINXbase + 0x04);
+ }
+ }
+
+ if (j->dsp.low == 0x20) {
+ if (ixjdebug & 0x0002)
+ printk(KERN_INFO "Configure GPIO pins\n");
+ j->gpio.bytes.high = 0x09;
+/* bytes.low = 0xEF; 0xF7 */
+ j->gpio.bits.gpio1 = 1;
+ j->gpio.bits.gpio2 = 1;
+ j->gpio.bits.gpio3 = 0;
+ j->gpio.bits.gpio4 = 1;
+ j->gpio.bits.gpio5 = 1;
+ j->gpio.bits.gpio6 = 1;
+ j->gpio.bits.gpio7 = 1;
+ ixj_WriteDSPCommand(j->gpio.word, j); /* Set GPIO pin directions */
+ if (ixjdebug & 0x0002)
+ printk(KERN_INFO "Enable SLIC\n");
+ j->gpio.bytes.high = 0x0B;
+ j->gpio.bytes.low = 0x00;
+ j->gpio.bits.gpio1 = 0;
+ j->gpio.bits.gpio2 = 1;
+ j->gpio.bits.gpio5 = 0;
+ ixj_WriteDSPCommand(j->gpio.word, j); /* send the ring stop signal */
+ j->port = PORT_POTS;
+ } else {
+ if (j->cardtype == QTI_LINEJACK) {
+ LED_SetState(0x1, j);
+ msleep(100);
+ LED_SetState(0x2, j);
+ msleep(100);
+ LED_SetState(0x4, j);
+ msleep(100);
+ LED_SetState(0x8, j);
+ msleep(100);
+ LED_SetState(0x0, j);
+ daa_get_version(j);
+ if (ixjdebug & 0x0002)
+ printk("Loading DAA Coefficients\n");
+ DAA_Coeff_US(j);
+ if (!ixj_daa_write(j)) {
+ printk("DAA write failed on board %d\n", j->board);
+ return -1;
+ }
+ if(!ixj_daa_cid_reset(j)) {
+ printk("DAA CID reset failed on board %d\n", j->board);
+ return -1;
+ }
+ j->flags.pots_correct = 0;
+ j->flags.pstn_present = 0;
+ ixj_linetest(j);
+ if (j->flags.pots_correct) {
+ j->pld_scrw.bits.daafsyncen = 0; /* Turn off DAA Frame Sync */
+
+ outb_p(j->pld_scrw.byte, j->XILINXbase);
+ j->pld_slicw.bits.rly1 = 1;
+ j->pld_slicw.bits.spken = 1;
+ outb_p(j->pld_slicw.byte, j->XILINXbase + 0x01);
+ SLIC_SetState(PLD_SLIC_STATE_STANDBY, j);
+/* SLIC_SetState(PLD_SLIC_STATE_ACTIVE, j); */
+ j->port = PORT_POTS;
+ }
+ ixj_set_port(j, PORT_PSTN);
+ ixj_set_pots(j, 1);
+ if (ixjdebug & 0x0002)
+ printk(KERN_INFO "Enable Mixer\n");
+ ixj_mixer(0x0000, j); /*Master Volume Left unmute 0db */
+ ixj_mixer(0x0100, j); /*Master Volume Right unmute 0db */
+
+ ixj_mixer(0x0203, j); /*Voice Left Volume unmute 6db */
+ ixj_mixer(0x0303, j); /*Voice Right Volume unmute 6db */
+
+ ixj_mixer(0x0480, j); /*FM Left mute */
+ ixj_mixer(0x0580, j); /*FM Right mute */
+
+ ixj_mixer(0x0680, j); /*CD Left mute */
+ ixj_mixer(0x0780, j); /*CD Right mute */
+
+ ixj_mixer(0x0880, j); /*Line Left mute */
+ ixj_mixer(0x0980, j); /*Line Right mute */
+
+ ixj_mixer(0x0A80, j); /*Aux left mute */
+ ixj_mixer(0x0B80, j); /*Aux right mute */
+
+ ixj_mixer(0x0C00, j); /*Mono1 unmute 12db */
+ ixj_mixer(0x0D80, j); /*Mono2 mute */
+
+ ixj_mixer(0x0E80, j); /*Mic mute */
+
+ ixj_mixer(0x0F00, j); /*Mono Out Volume unmute 0db */
+
+ ixj_mixer(0x1000, j); /*Voice Left and Right out only */
+ ixj_mixer(0x110C, j);
+
+
+ ixj_mixer(0x1200, j); /*Mono1 switch on mixer left */
+ ixj_mixer(0x1401, j);
+
+ ixj_mixer(0x1300, j); /*Mono1 switch on mixer right */
+ ixj_mixer(0x1501, j);
+
+ ixj_mixer(0x1700, j); /*Clock select */
+
+ ixj_mixer(0x1800, j); /*ADC input from mixer */
+
+ ixj_mixer(0x1901, j); /*Mic gain 30db */
+
+ if (ixjdebug & 0x0002)
+ printk(KERN_INFO "Setting Default US Ring Cadence Detection\n");
+ j->cadence_f[4].state = 0;
+ j->cadence_f[4].on1 = 0; /*Cadence Filter 4 is used for PSTN ring cadence */
+ j->cadence_f[4].off1 = 0;
+ j->cadence_f[4].on2 = 0;
+ j->cadence_f[4].off2 = 0;
+ j->cadence_f[4].on3 = 0;
+ j->cadence_f[4].off3 = 0; /* These should represent standard US ring pulse. */
+ j->pstn_last_rmr = jiffies;
+
+ } else {
+ if (j->cardtype == QTI_PHONECARD) {
+ ixj_WriteDSPCommand(0xCF07, j);
+ ixj_WriteDSPCommand(0x00B0, j);
+ ixj_set_port(j, PORT_SPEAKER);
+ } else {
+ ixj_set_port(j, PORT_POTS);
+ SLIC_SetState(PLD_SLIC_STATE_STANDBY, j);
+/* SLIC_SetState(PLD_SLIC_STATE_ACTIVE, j); */
+ }
+ }
+ }
+
+ j->intercom = -1;
+ j->framesread = j->frameswritten = 0;
+ j->read_wait = j->write_wait = 0;
+ j->rxreadycheck = j->txreadycheck = 0;
+
+ /* initialise the DTMF prescale to a sensible value */
+ if (j->cardtype == QTI_LINEJACK) {
+ set_dtmf_prescale(j, 0x10);
+ } else {
+ set_dtmf_prescale(j, 0x40);
+ }
+ set_play_volume(j, 0x100);
+ set_rec_volume(j, 0x100);
+
+ if (ixj_WriteDSPCommand(0x0000, j)) /* Write IDLE to Software Control Register */
+ return -1;
+/* The read values of the SSR should be 0x00 for the IDLE command */
+ if (j->ssr.low || j->ssr.high)
+ return -1;
+
+ if (ixjdebug & 0x0002)
+ printk(KERN_INFO "Enable Line Monitor\n");
+
+ if (ixjdebug & 0x0002)
+ printk(KERN_INFO "Set Line Monitor to Asyncronous Mode\n");
+
+ if (ixj_WriteDSPCommand(0x7E01, j)) /* Asynchronous Line Monitor */
+ return -1;
+
+ if (ixjdebug & 0x002)
+ printk(KERN_INFO "Enable DTMF Detectors\n");
+
+ if (ixj_WriteDSPCommand(0x5151, j)) /* Enable DTMF detection */
+ return -1;
+
+ if (ixj_WriteDSPCommand(0x6E01, j)) /* Set Asyncronous Tone Generation */
+ return -1;
+
+ set_rec_depth(j, 2); /* Set Record Channel Limit to 2 frames */
+
+ set_play_depth(j, 2); /* Set Playback Channel Limit to 2 frames */
+
+ j->ex.bits.dtmf_ready = 0;
+ j->dtmf_state = 0;
+ j->dtmf_wp = j->dtmf_rp = 0;
+ j->rec_mode = j->play_mode = -1;
+ j->flags.ringing = 0;
+ j->maxrings = MAXRINGS;
+ j->ring_cadence = USA_RING_CADENCE;
+ j->drybuffer = 0;
+ j->winktime = 320;
+ j->flags.dtmf_oob = 0;
+ for (cnt = 0; cnt < 4; cnt++)
+ j->cadence_f[cnt].enable = 0;
+ /* must be a device on the specified address */
+ ixj_WriteDSPCommand(0x0FE3, j); /* Put the DSP in 1/5 power mode. */
+
+ /* Set up the default signals for events */
+ for (cnt = 0; cnt < 35; cnt++)
+ j->ixj_signals[cnt] = SIGIO;
+
+ /* Set the excetion signal enable flags */
+ j->ex_sig.bits.dtmf_ready = j->ex_sig.bits.hookstate = j->ex_sig.bits.flash = j->ex_sig.bits.pstn_ring =
+ j->ex_sig.bits.caller_id = j->ex_sig.bits.pstn_wink = j->ex_sig.bits.f0 = j->ex_sig.bits.f1 = j->ex_sig.bits.f2 =
+ j->ex_sig.bits.f3 = j->ex_sig.bits.fc0 = j->ex_sig.bits.fc1 = j->ex_sig.bits.fc2 = j->ex_sig.bits.fc3 = 1;
+#ifdef IXJ_DYN_ALLOC
+ j->fskdata = NULL;
+#endif
+ j->fskdcnt = 0;
+ j->cidcw_wait = 0;
+
+ /* Register with the Telephony for Linux subsystem */
+ j->p.f_op = &ixj_fops;
+ j->p.open = ixj_open;
+ j->p.board = j->board;
+ phone_register_device(&j->p, PHONE_UNIT_ANY);
+
+ ixj_init_timer(j);
+ ixj_add_timer(j);
+ return 0;
+}
+
+/*
+ * Exported service for pcmcia card handling
+ */
+
+IXJ *ixj_pcmcia_probe(unsigned long dsp, unsigned long xilinx)
+{
+ IXJ *j = ixj_alloc();
+
+ j->board = 0;
+
+ j->DSPbase = dsp;
+ j->XILINXbase = xilinx;
+ j->cardtype = QTI_PHONECARD;
+ ixj_selfprobe(j);
+ return j;
+}
+
+EXPORT_SYMBOL(ixj_pcmcia_probe); /* Fpr PCMCIA */
+
+static int ixj_get_status_proc(char *buf)
+{
+ int len;
+ int cnt;
+ IXJ *j;
+ len = 0;
+ len += sprintf(buf + len, "\nDriver version %i.%i.%i", IXJ_VER_MAJOR, IXJ_VER_MINOR, IXJ_BLD_VER);
+ len += sprintf(buf + len, "\nsizeof IXJ struct %Zd bytes", sizeof(IXJ));
+ len += sprintf(buf + len, "\nsizeof DAA struct %Zd bytes", sizeof(DAA_REGS));
+ len += sprintf(buf + len, "\nUsing old telephony API");
+ len += sprintf(buf + len, "\nDebug Level %d\n", ixjdebug);
+
+ for (cnt = 0; cnt < IXJMAX; cnt++) {
+ j = get_ixj(cnt);
+ if(j==NULL)
+ continue;
+ if (j->DSPbase) {
+ len += sprintf(buf + len, "\nCard Num %d", cnt);
+ len += sprintf(buf + len, "\nDSP Base Address 0x%4.4x", j->DSPbase);
+ if (j->cardtype != QTI_PHONEJACK)
+ len += sprintf(buf + len, "\nXILINX Base Address 0x%4.4x", j->XILINXbase);
+ len += sprintf(buf + len, "\nDSP Type %2.2x%2.2x", j->dsp.high, j->dsp.low);
+ len += sprintf(buf + len, "\nDSP Version %2.2x.%2.2x", j->ver.high, j->ver.low);
+ len += sprintf(buf + len, "\nSerial Number %8.8x", j->serial);
+ switch (j->cardtype) {
+ case (QTI_PHONEJACK):
+ len += sprintf(buf + len, "\nCard Type = Internet PhoneJACK");
+ break;
+ case (QTI_LINEJACK):
+ len += sprintf(buf + len, "\nCard Type = Internet LineJACK");
+ if (j->flags.g729_loaded)
+ len += sprintf(buf + len, " w/G.729 A/B");
+ len += sprintf(buf + len, " Country = %d", j->daa_country);
+ break;
+ case (QTI_PHONEJACK_LITE):
+ len += sprintf(buf + len, "\nCard Type = Internet PhoneJACK Lite");
+ if (j->flags.g729_loaded)
+ len += sprintf(buf + len, " w/G.729 A/B");
+ break;
+ case (QTI_PHONEJACK_PCI):
+ len += sprintf(buf + len, "\nCard Type = Internet PhoneJACK PCI");
+ if (j->flags.g729_loaded)
+ len += sprintf(buf + len, " w/G.729 A/B");
+ break;
+ case (QTI_PHONECARD):
+ len += sprintf(buf + len, "\nCard Type = Internet PhoneCARD");
+ if (j->flags.g729_loaded)
+ len += sprintf(buf + len, " w/G.729 A/B");
+ len += sprintf(buf + len, "\nSmart Cable %spresent", j->pccr1.bits.drf ? "not " : "");
+ if (!j->pccr1.bits.drf)
+ len += sprintf(buf + len, "\nSmart Cable type %d", j->flags.pcmciasct);
+ len += sprintf(buf + len, "\nSmart Cable state %d", j->flags.pcmciastate);
+ break;
+ default:
+ len += sprintf(buf + len, "\nCard Type = %d", j->cardtype);
+ break;
+ }
+ len += sprintf(buf + len, "\nReaders %d", j->readers);
+ len += sprintf(buf + len, "\nWriters %d", j->writers);
+ add_caps(j);
+ len += sprintf(buf + len, "\nCapabilities %d", j->caps);
+ if (j->dsp.low != 0x20)
+ len += sprintf(buf + len, "\nDSP Processor load %d", j->proc_load);
+ if (j->flags.cidsent)
+ len += sprintf(buf + len, "\nCaller ID data sent");
+ else
+ len += sprintf(buf + len, "\nCaller ID data not sent");
+
+ len += sprintf(buf + len, "\nPlay CODEC ");
+ switch (j->play_codec) {
+ case G723_63:
+ len += sprintf(buf + len, "G.723.1 6.3");
+ break;
+ case G723_53:
+ len += sprintf(buf + len, "G.723.1 5.3");
+ break;
+ case TS85:
+ len += sprintf(buf + len, "TrueSpeech 8.5");
+ break;
+ case TS48:
+ len += sprintf(buf + len, "TrueSpeech 4.8");
+ break;
+ case TS41:
+ len += sprintf(buf + len, "TrueSpeech 4.1");
+ break;
+ case G728:
+ len += sprintf(buf + len, "G.728");
+ break;
+ case G729:
+ len += sprintf(buf + len, "G.729");
+ break;
+ case G729B:
+ len += sprintf(buf + len, "G.729B");
+ break;
+ case ULAW:
+ len += sprintf(buf + len, "uLaw");
+ break;
+ case ALAW:
+ len += sprintf(buf + len, "aLaw");
+ break;
+ case LINEAR16:
+ len += sprintf(buf + len, "16 bit Linear");
+ break;
+ case LINEAR8:
+ len += sprintf(buf + len, "8 bit Linear");
+ break;
+ case WSS:
+ len += sprintf(buf + len, "Windows Sound System");
+ break;
+ default:
+ len += sprintf(buf + len, "NO CODEC CHOSEN");
+ break;
+ }
+ len += sprintf(buf + len, "\nRecord CODEC ");
+ switch (j->rec_codec) {
+ case G723_63:
+ len += sprintf(buf + len, "G.723.1 6.3");
+ break;
+ case G723_53:
+ len += sprintf(buf + len, "G.723.1 5.3");
+ break;
+ case TS85:
+ len += sprintf(buf + len, "TrueSpeech 8.5");
+ break;
+ case TS48:
+ len += sprintf(buf + len, "TrueSpeech 4.8");
+ break;
+ case TS41:
+ len += sprintf(buf + len, "TrueSpeech 4.1");
+ break;
+ case G728:
+ len += sprintf(buf + len, "G.728");
+ break;
+ case G729:
+ len += sprintf(buf + len, "G.729");
+ break;
+ case G729B:
+ len += sprintf(buf + len, "G.729B");
+ break;
+ case ULAW:
+ len += sprintf(buf + len, "uLaw");
+ break;
+ case ALAW:
+ len += sprintf(buf + len, "aLaw");
+ break;
+ case LINEAR16:
+ len += sprintf(buf + len, "16 bit Linear");
+ break;
+ case LINEAR8:
+ len += sprintf(buf + len, "8 bit Linear");
+ break;
+ case WSS:
+ len += sprintf(buf + len, "Windows Sound System");
+ break;
+ default:
+ len += sprintf(buf + len, "NO CODEC CHOSEN");
+ break;
+ }
+ len += sprintf(buf + len, "\nAEC ");
+ switch (j->aec_level) {
+ case AEC_OFF:
+ len += sprintf(buf + len, "Off");
+ break;
+ case AEC_LOW:
+ len += sprintf(buf + len, "Low");
+ break;
+ case AEC_MED:
+ len += sprintf(buf + len, "Med");
+ break;
+ case AEC_HIGH:
+ len += sprintf(buf + len, "High");
+ break;
+ case AEC_AUTO:
+ len += sprintf(buf + len, "Auto");
+ break;
+ case AEC_AGC:
+ len += sprintf(buf + len, "AEC/AGC");
+ break;
+ default:
+ len += sprintf(buf + len, "unknown(%i)", j->aec_level);
+ break;
+ }
+
+ len += sprintf(buf + len, "\nRec volume 0x%x", get_rec_volume(j));
+ len += sprintf(buf + len, "\nPlay volume 0x%x", get_play_volume(j));
+ len += sprintf(buf + len, "\nDTMF prescale 0x%x", get_dtmf_prescale(j));
+
+ len += sprintf(buf + len, "\nHook state %d", j->hookstate); /* j->r_hook); */
+
+ if (j->cardtype == QTI_LINEJACK) {
+ len += sprintf(buf + len, "\nPOTS Correct %d", j->flags.pots_correct);
+ len += sprintf(buf + len, "\nPSTN Present %d", j->flags.pstn_present);
+ len += sprintf(buf + len, "\nPSTN Check %d", j->flags.pstncheck);
+ len += sprintf(buf + len, "\nPOTS to PSTN %d", j->flags.pots_pstn);
+ switch (j->daa_mode) {
+ case SOP_PU_SLEEP:
+ len += sprintf(buf + len, "\nDAA PSTN On Hook");
+ break;
+ case SOP_PU_RINGING:
+ len += sprintf(buf + len, "\nDAA PSTN Ringing");
+ len += sprintf(buf + len, "\nRinging state = %d", j->cadence_f[4].state);
+ break;
+ case SOP_PU_CONVERSATION:
+ len += sprintf(buf + len, "\nDAA PSTN Off Hook");
+ break;
+ case SOP_PU_PULSEDIALING:
+ len += sprintf(buf + len, "\nDAA PSTN Pulse Dialing");
+ break;
+ }
+ len += sprintf(buf + len, "\nDAA RMR = %d", j->m_DAAShadowRegs.SOP_REGS.SOP.cr1.bitreg.RMR);
+ len += sprintf(buf + len, "\nDAA VDD OK = %d", j->m_DAAShadowRegs.XOP_REGS.XOP.xr0.bitreg.VDD_OK);
+ len += sprintf(buf + len, "\nDAA CR0 = 0x%02x", j->m_DAAShadowRegs.SOP_REGS.SOP.cr0.reg);
+ len += sprintf(buf + len, "\nDAA CR1 = 0x%02x", j->m_DAAShadowRegs.SOP_REGS.SOP.cr1.reg);
+ len += sprintf(buf + len, "\nDAA CR2 = 0x%02x", j->m_DAAShadowRegs.SOP_REGS.SOP.cr2.reg);
+ len += sprintf(buf + len, "\nDAA CR3 = 0x%02x", j->m_DAAShadowRegs.SOP_REGS.SOP.cr3.reg);
+ len += sprintf(buf + len, "\nDAA CR4 = 0x%02x", j->m_DAAShadowRegs.SOP_REGS.SOP.cr4.reg);
+ len += sprintf(buf + len, "\nDAA CR5 = 0x%02x", j->m_DAAShadowRegs.SOP_REGS.SOP.cr5.reg);
+ len += sprintf(buf + len, "\nDAA XR0 = 0x%02x", j->m_DAAShadowRegs.XOP_REGS.XOP.xr0.reg);
+ len += sprintf(buf + len, "\nDAA ringstop %ld - jiffies %ld", j->pstn_ring_stop, jiffies);
+ }
+ switch (j->port) {
+ case PORT_POTS:
+ len += sprintf(buf + len, "\nPort POTS");
+ break;
+ case PORT_PSTN:
+ len += sprintf(buf + len, "\nPort PSTN");
+ break;
+ case PORT_SPEAKER:
+ len += sprintf(buf + len, "\nPort SPEAKER/MIC");
+ break;
+ case PORT_HANDSET:
+ len += sprintf(buf + len, "\nPort HANDSET");
+ break;
+ }
+ if (j->dsp.low == 0x21 || j->dsp.low == 0x22) {
+ len += sprintf(buf + len, "\nSLIC state ");
+ switch (SLIC_GetState(j)) {
+ case PLD_SLIC_STATE_OC:
+ len += sprintf(buf + len, "OC");
+ break;
+ case PLD_SLIC_STATE_RINGING:
+ len += sprintf(buf + len, "RINGING");
+ break;
+ case PLD_SLIC_STATE_ACTIVE:
+ len += sprintf(buf + len, "ACTIVE");
+ break;
+ case PLD_SLIC_STATE_OHT: /* On-hook transmit */
+ len += sprintf(buf + len, "OHT");
+ break;
+ case PLD_SLIC_STATE_TIPOPEN:
+ len += sprintf(buf + len, "TIPOPEN");
+ break;
+ case PLD_SLIC_STATE_STANDBY:
+ len += sprintf(buf + len, "STANDBY");
+ break;
+ case PLD_SLIC_STATE_APR: /* Active polarity reversal */
+ len += sprintf(buf + len, "APR");
+ break;
+ case PLD_SLIC_STATE_OHTPR: /* OHT polarity reversal */
+ len += sprintf(buf + len, "OHTPR");
+ break;
+ default:
+ len += sprintf(buf + len, "%d", SLIC_GetState(j));
+ break;
+ }
+ }
+ len += sprintf(buf + len, "\nBase Frame %2.2x.%2.2x", j->baseframe.high, j->baseframe.low);
+ len += sprintf(buf + len, "\nCID Base Frame %2d", j->cid_base_frame_size);
+#ifdef PERFMON_STATS
+ len += sprintf(buf + len, "\nTimer Checks %ld", j->timerchecks);
+ len += sprintf(buf + len, "\nRX Ready Checks %ld", j->rxreadycheck);
+ len += sprintf(buf + len, "\nTX Ready Checks %ld", j->txreadycheck);
+ len += sprintf(buf + len, "\nFrames Read %ld", j->framesread);
+ len += sprintf(buf + len, "\nFrames Written %ld", j->frameswritten);
+ len += sprintf(buf + len, "\nDry Buffer %ld", j->drybuffer);
+ len += sprintf(buf + len, "\nRead Waits %ld", j->read_wait);
+ len += sprintf(buf + len, "\nWrite Waits %ld", j->write_wait);
+ len += sprintf(buf + len, "\nStatus Waits %ld", j->statuswait);
+ len += sprintf(buf + len, "\nStatus Wait Fails %ld", j->statuswaitfail);
+ len += sprintf(buf + len, "\nPControl Waits %ld", j->pcontrolwait);
+ len += sprintf(buf + len, "\nPControl Wait Fails %ld", j->pcontrolwaitfail);
+ len += sprintf(buf + len, "\nIs Control Ready Checks %ld", j->iscontrolready);
+ len += sprintf(buf + len, "\nIs Control Ready Check failures %ld", j->iscontrolreadyfail);
+
+#endif
+ len += sprintf(buf + len, "\n");
+ }
+ }
+ return len;
+}
+
+static int ixj_read_proc(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ int len = ixj_get_status_proc(page);
+ if (len <= off+count) *eof = 1;
+ *start = page + off;
+ len -= off;
+ if (len>count) len = count;
+ if (len<0) len = 0;
+ return len;
+}
+
+
+static void cleanup(void)
+{
+ int cnt;
+ IXJ *j;
+
+ for (cnt = 0; cnt < IXJMAX; cnt++) {
+ j = get_ixj(cnt);
+ if(j != NULL && j->DSPbase) {
+ if (ixjdebug & 0x0002)
+ printk(KERN_INFO "IXJ: Deleting timer for /dev/phone%d\n", cnt);
+ del_timer(&j->timer);
+ if (j->cardtype == QTI_LINEJACK) {
+ j->pld_scrw.bits.daafsyncen = 0; /* Turn off DAA Frame Sync */
+
+ outb_p(j->pld_scrw.byte, j->XILINXbase);
+ j->pld_slicw.bits.rly1 = 0;
+ j->pld_slicw.bits.rly2 = 0;
+ j->pld_slicw.bits.rly3 = 0;
+ outb_p(j->pld_slicw.byte, j->XILINXbase + 0x01);
+ LED_SetState(0x0, j);
+ if (ixjdebug & 0x0002)
+ printk(KERN_INFO "IXJ: Releasing XILINX address for /dev/phone%d\n", cnt);
+ release_region(j->XILINXbase, 8);
+ } else if (j->cardtype == QTI_PHONEJACK_LITE || j->cardtype == QTI_PHONEJACK_PCI) {
+ if (ixjdebug & 0x0002)
+ printk(KERN_INFO "IXJ: Releasing XILINX address for /dev/phone%d\n", cnt);
+ release_region(j->XILINXbase, 4);
+ }
+ kfree(j->read_buffer);
+ kfree(j->write_buffer);
+ if (j->dev)
+ pnp_device_detach(j->dev);
+ if (ixjdebug & 0x0002)
+ printk(KERN_INFO "IXJ: Unregistering /dev/phone%d from LTAPI\n", cnt);
+ phone_unregister_device(&j->p);
+ if (ixjdebug & 0x0002)
+ printk(KERN_INFO "IXJ: Releasing DSP address for /dev/phone%d\n", cnt);
+ release_region(j->DSPbase, 16);
+#ifdef IXJ_DYN_ALLOC
+ if (ixjdebug & 0x0002)
+ printk(KERN_INFO "IXJ: Freeing memory for /dev/phone%d\n", cnt);
+ kfree(j);
+ ixj[cnt] = NULL;
+#endif
+ }
+ }
+ if (ixjdebug & 0x0002)
+ printk(KERN_INFO "IXJ: Removing /proc/ixj\n");
+ remove_proc_entry ("ixj", NULL);
+}
+
+/* Typedefs */
+typedef struct {
+ BYTE length;
+ DWORD bits;
+} DATABLOCK;
+
+static void PCIEE_WriteBit(WORD wEEPROMAddress, BYTE lastLCC, BYTE byData)
+{
+ lastLCC = lastLCC & 0xfb;
+ lastLCC = lastLCC | (byData ? 4 : 0);
+ outb(lastLCC, wEEPROMAddress); /*set data out bit as appropriate */
+
+ mdelay(1);
+ lastLCC = lastLCC | 0x01;
+ outb(lastLCC, wEEPROMAddress); /*SK rising edge */
+
+ byData = byData << 1;
+ lastLCC = lastLCC & 0xfe;
+ mdelay(1);
+ outb(lastLCC, wEEPROMAddress); /*after delay, SK falling edge */
+
+}
+
+static BYTE PCIEE_ReadBit(WORD wEEPROMAddress, BYTE lastLCC)
+{
+ mdelay(1);
+ lastLCC = lastLCC | 0x01;
+ outb(lastLCC, wEEPROMAddress); /*SK rising edge */
+
+ lastLCC = lastLCC & 0xfe;
+ mdelay(1);
+ outb(lastLCC, wEEPROMAddress); /*after delay, SK falling edge */
+
+ return ((inb(wEEPROMAddress) >> 3) & 1);
+}
+
+static bool PCIEE_ReadWord(WORD wAddress, WORD wLoc, WORD * pwResult)
+{
+ BYTE lastLCC;
+ WORD wEEPROMAddress = wAddress + 3;
+ DWORD i;
+ BYTE byResult;
+ *pwResult = 0;
+ lastLCC = inb(wEEPROMAddress);
+ lastLCC = lastLCC | 0x02;
+ lastLCC = lastLCC & 0xfe;
+ outb(lastLCC, wEEPROMAddress); /* CS hi, SK lo */
+
+ mdelay(1); /* delay */
+
+ PCIEE_WriteBit(wEEPROMAddress, lastLCC, 1);
+ PCIEE_WriteBit(wEEPROMAddress, lastLCC, 1);
+ PCIEE_WriteBit(wEEPROMAddress, lastLCC, 0);
+ for (i = 0; i < 8; i++) {
+ PCIEE_WriteBit(wEEPROMAddress, lastLCC, wLoc & 0x80 ? 1 : 0);
+ wLoc <<= 1;
+ }
+
+ for (i = 0; i < 16; i++) {
+ byResult = PCIEE_ReadBit(wEEPROMAddress, lastLCC);
+ *pwResult = (*pwResult << 1) | byResult;
+ }
+
+ mdelay(1); /* another delay */
+
+ lastLCC = lastLCC & 0xfd;
+ outb(lastLCC, wEEPROMAddress); /* negate CS */
+
+ return 0;
+}
+
+static DWORD PCIEE_GetSerialNumber(WORD wAddress)
+{
+ WORD wLo, wHi;
+ if (PCIEE_ReadWord(wAddress, 62, &wLo))
+ return 0;
+ if (PCIEE_ReadWord(wAddress, 63, &wHi))
+ return 0;
+ return (((DWORD) wHi << 16) | wLo);
+}
+
+static int dspio[IXJMAX + 1] =
+{
+ 0,
+};
+static int xio[IXJMAX + 1] =
+{
+ 0,
+};
+
+module_param_array(dspio, int, NULL, 0);
+module_param_array(xio, int, NULL, 0);
+MODULE_DESCRIPTION("Quicknet VoIP Telephony card module - www.quicknet.net");
+MODULE_AUTHOR("Ed Okerson <eokerson@quicknet.net>");
+MODULE_LICENSE("GPL");
+
+static void __exit ixj_exit(void)
+{
+ cleanup();
+}
+
+static IXJ *new_ixj(unsigned long port)
+{
+ IXJ *res;
+ if (!request_region(port, 16, "ixj DSP")) {
+ printk(KERN_INFO "ixj: can't get I/O address 0x%lx\n", port);
+ return NULL;
+ }
+ res = ixj_alloc();
+ if (!res) {
+ release_region(port, 16);
+ printk(KERN_INFO "ixj: out of memory\n");
+ return NULL;
+ }
+ res->DSPbase = port;
+ return res;
+}
+
+static int __init ixj_probe_isapnp(int *cnt)
+{
+ int probe = 0;
+ int func = 0x110;
+ struct pnp_dev *dev = NULL, *old_dev = NULL;
+
+ while (1) {
+ do {
+ IXJ *j;
+ int result;
+
+ old_dev = dev;
+ dev = pnp_find_dev(NULL, ISAPNP_VENDOR('Q', 'T', 'I'),
+ ISAPNP_FUNCTION(func), old_dev);
+ if (!dev || !dev->card)
+ break;
+ result = pnp_device_attach(dev);
+ if (result < 0) {
+ printk("pnp attach failed %d \n", result);
+ break;
+ }
+ if (pnp_activate_dev(dev) < 0) {
+ printk("pnp activate failed (out of resources?)\n");
+ pnp_device_detach(dev);
+ return -ENOMEM;
+ }
+
+ if (!pnp_port_valid(dev, 0)) {
+ pnp_device_detach(dev);
+ return -ENODEV;
+ }
+
+ j = new_ixj(pnp_port_start(dev, 0));
+ if (!j)
+ break;
+
+ if (func != 0x110)
+ j->XILINXbase = pnp_port_start(dev, 1); /* get real port */
+
+ switch (func) {
+ case (0x110):
+ j->cardtype = QTI_PHONEJACK;
+ break;
+ case (0x310):
+ j->cardtype = QTI_LINEJACK;
+ break;
+ case (0x410):
+ j->cardtype = QTI_PHONEJACK_LITE;
+ break;
+ }
+ j->board = *cnt;
+ probe = ixj_selfprobe(j);
+ if(!probe) {
+ j->serial = dev->card->serial;
+ j->dev = dev;
+ switch (func) {
+ case 0x110:
+ printk(KERN_INFO "ixj: found Internet PhoneJACK at 0x%x\n", j->DSPbase);
+ break;
+ case 0x310:
+ printk(KERN_INFO "ixj: found Internet LineJACK at 0x%x\n", j->DSPbase);
+ break;
+ case 0x410:
+ printk(KERN_INFO "ixj: found Internet PhoneJACK Lite at 0x%x\n", j->DSPbase);
+ break;
+ }
+ }
+ ++*cnt;
+ } while (dev);
+ if (func == 0x410)
+ break;
+ if (func == 0x310)
+ func = 0x410;
+ if (func == 0x110)
+ func = 0x310;
+ dev = NULL;
+ }
+ return probe;
+}
+
+static int __init ixj_probe_isa(int *cnt)
+{
+ int i, probe;
+
+ /* Use passed parameters for older kernels without PnP */
+ for (i = 0; i < IXJMAX; i++) {
+ if (dspio[i]) {
+ IXJ *j = new_ixj(dspio[i]);
+
+ if (!j)
+ break;
+
+ j->XILINXbase = xio[i];
+ j->cardtype = 0;
+
+ j->board = *cnt;
+ probe = ixj_selfprobe(j);
+ j->dev = NULL;
+ ++*cnt;
+ }
+ }
+ return 0;
+}
+
+static int __init ixj_probe_pci(int *cnt)
+{
+ struct pci_dev *pci = NULL;
+ int i, probe = 0;
+ IXJ *j = NULL;
+
+ for (i = 0; i < IXJMAX - *cnt; i++) {
+ pci = pci_get_device(PCI_VENDOR_ID_QUICKNET,
+ PCI_DEVICE_ID_QUICKNET_XJ, pci);
+ if (!pci)
+ break;
+
+ if (pci_enable_device(pci))
+ break;
+ j = new_ixj(pci_resource_start(pci, 0));
+ if (!j)
+ break;
+
+ j->serial = (PCIEE_GetSerialNumber)pci_resource_start(pci, 2);
+ j->XILINXbase = j->DSPbase + 0x10;
+ j->cardtype = QTI_PHONEJACK_PCI;
+ j->board = *cnt;
+ probe = ixj_selfprobe(j);
+ if (!probe)
+ printk(KERN_INFO "ixj: found Internet PhoneJACK PCI at 0x%x\n", j->DSPbase);
+ ++*cnt;
+ }
+ pci_dev_put(pci);
+ return probe;
+}
+
+static int __init ixj_init(void)
+{
+ int cnt = 0;
+ int probe = 0;
+
+ cnt = 0;
+
+ /* These might be no-ops, see above. */
+ if ((probe = ixj_probe_isapnp(&cnt)) < 0) {
+ return probe;
+ }
+ if ((probe = ixj_probe_isa(&cnt)) < 0) {
+ return probe;
+ }
+ if ((probe = ixj_probe_pci(&cnt)) < 0) {
+ return probe;
+ }
+ printk(KERN_INFO "ixj driver initialized.\n");
+ create_proc_read_entry ("ixj", 0, NULL, ixj_read_proc, NULL);
+ return probe;
+}
+
+module_init(ixj_init);
+module_exit(ixj_exit);
+
+static void DAA_Coeff_US(IXJ *j)
+{
+ int i;
+
+ j->daa_country = DAA_US;
+ /*----------------------------------------------- */
+ /* CAO */
+ for (i = 0; i < ALISDAA_CALLERID_SIZE; i++) {
+ j->m_DAAShadowRegs.CAO_REGS.CAO.CallerID[i] = 0;
+ }
+
+/* Bytes for IM-filter part 1 (04): 0E,32,E2,2F,C2,5A,C0,00 */
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[7] = 0x03;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[6] = 0x4B;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[5] = 0x5D;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[4] = 0xCD;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[3] = 0x24;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[2] = 0xC5;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[1] = 0xA0;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[0] = 0x00;
+/* Bytes for IM-filter part 2 (05): 72,85,00,0E,2B,3A,D0,08 */
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[7] = 0x71;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[6] = 0x1A;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[5] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[4] = 0x0A;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[3] = 0xB5;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[2] = 0x33;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[1] = 0xE0;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[0] = 0x08;
+/* Bytes for FRX-filter (08): 03,8F,48,F2,8F,48,70,08 */
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[7] = 0x05;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[6] = 0xA3;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[5] = 0x72;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[4] = 0x34;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[3] = 0x3F;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[2] = 0x3B;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[1] = 0x30;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[0] = 0x08;
+/* Bytes for FRR-filter (07): 04,8F,38,7F,9B,EA,B0,08 */
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[7] = 0x05;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[6] = 0x87;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[5] = 0xF9;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[4] = 0x3E;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[3] = 0x32;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[2] = 0xDA;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[1] = 0xB0;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[0] = 0x08;
+/* Bytes for AX-filter (0A): 16,55,DD,CA */
+ j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[3] = 0x41;
+ j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[2] = 0xB5;
+ j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[1] = 0xDD;
+ j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[0] = 0xCA;
+/* Bytes for AR-filter (09): 52,D3,11,42 */
+ j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[3] = 0x25;
+ j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[2] = 0xC7;
+ j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[1] = 0x10;
+ j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[0] = 0xD6;
+/* Bytes for TH-filter part 1 (00): 00,42,48,81,B3,80,00,98 */
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[7] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[6] = 0x42;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[5] = 0x48;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[4] = 0x81;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[3] = 0xA5;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[2] = 0x80;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[1] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[0] = 0x98;
+/* Bytes for TH-filter part 2 (01): 02,F2,33,A0,68,AB,8A,AD */
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[7] = 0x02;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[6] = 0xA2;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[5] = 0x2B;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[4] = 0xB0;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[3] = 0xE8;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[2] = 0xAB;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[1] = 0x81;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[0] = 0xCC;
+/* Bytes for TH-filter part 3 (02): 00,88,DA,54,A4,BA,2D,BB */
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[7] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[6] = 0x88;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[5] = 0xD2;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[4] = 0x24;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[3] = 0xBA;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[2] = 0xA9;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[1] = 0x3B;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[0] = 0xA6;
+/* ; (10K, 0.68uF) */
+ /* */
+ /* Bytes for Ringing part 1 (03):1B,3B,9B,BA,D4,1C,B3,23 */
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[7] = 0x1B;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[6] = 0x3C;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[5] = 0x93;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[4] = 0x3A;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[3] = 0x22;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[2] = 0x12;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[1] = 0xA3;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[0] = 0x23;
+ /* Bytes for Ringing part 2 (06):13,42,A6,BA,D4,73,CA,D5 */
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[7] = 0x12;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[6] = 0xA2;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[5] = 0xA6;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[4] = 0xBA;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[3] = 0x22;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[2] = 0x7A;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[1] = 0x0A;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[0] = 0xD5;
+
+ /* Levelmetering Ringing (0D):B2,45,0F,8E */
+ j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[3] = 0xAA;
+ j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[2] = 0x35;
+ j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[1] = 0x0F;
+ j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[0] = 0x8E;
+
+ /* Bytes for Ringing part 1 (03):1B,3B,9B,BA,D4,1C,B3,23 */
+/* j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[7] = 0x1C; */
+/* j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[6] = 0xB3; */
+/* j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[5] = 0xAB; */
+/* j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[4] = 0xAB; */
+/* j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[3] = 0x54; */
+/* j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[2] = 0x2D; */
+/* j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[1] = 0x62; */
+/* j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[0] = 0x2D; */
+ /* Bytes for Ringing part 2 (06):13,42,A6,BA,D4,73,CA,D5 */
+/* j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[7] = 0x2D; */
+/* j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[6] = 0x62; */
+/* j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[5] = 0xA6; */
+/* j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[4] = 0xBB; */
+/* j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[3] = 0x2A; */
+/* j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[2] = 0x7D; */
+/* j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[1] = 0x0A; */
+/* j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[0] = 0xD4; */
+/* */
+ /* Levelmetering Ringing (0D):B2,45,0F,8E */
+/* j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[3] = 0xAA; */
+/* j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[2] = 0x05; */
+/* j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[1] = 0x0F; */
+/* j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[0] = 0x8E; */
+
+ /* Caller ID 1st Tone (0E):CA,0E,CA,09,99,99,99,99 */
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[7] = 0xCA;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[6] = 0x0E;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[5] = 0xCA;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[4] = 0x09;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[3] = 0x99;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[2] = 0x99;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[1] = 0x99;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[0] = 0x99;
+/* Caller ID 2nd Tone (0F):FD,B5,BA,07,DA,00,00,00 */
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[7] = 0xFD;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[6] = 0xB5;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[5] = 0xBA;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[4] = 0x07;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[3] = 0xDA;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[2] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[1] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[0] = 0x00;
+/* */
+ /* ;CR Registers */
+ /* Config. Reg. 0 (filters) (cr0):FE ; CLK gen. by crystal */
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr0.reg = 0xFF;
+/* Config. Reg. 1 (dialing) (cr1):05 */
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr1.reg = 0x05;
+/* Config. Reg. 2 (caller ID) (cr2):04 */
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr2.reg = 0x04;
+/* Config. Reg. 3 (testloops) (cr3):03 ; SEL Bit==0, HP-disabled */
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr3.reg = 0x00;
+/* Config. Reg. 4 (analog gain) (cr4):02 */
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr4.reg = 0x02;
+ /* Config. Reg. 5 (Version) (cr5):02 */
+ /* Config. Reg. 6 (Reserved) (cr6):00 */
+ /* Config. Reg. 7 (Reserved) (cr7):00 */
+ /* */
+ /* ;xr Registers */
+ /* Ext. Reg. 0 (Interrupt Reg.) (xr0):02 */
+
+ j->m_DAAShadowRegs.XOP_xr0_W.reg = 0x02; /* SO_1 set to '1' because it is inverted. */
+ /* Ext. Reg. 1 (Interrupt enable) (xr1):3C Cadence, RING, Caller ID, VDD_OK */
+
+ j->m_DAAShadowRegs.XOP_REGS.XOP.xr1.reg = 0x3C;
+/* Ext. Reg. 2 (Cadence Time Out) (xr2):7D */
+ j->m_DAAShadowRegs.XOP_REGS.XOP.xr2.reg = 0x7D;
+/* Ext. Reg. 3 (DC Char) (xr3):32 ; B-Filter Off == 1 */
+ j->m_DAAShadowRegs.XOP_REGS.XOP.xr3.reg = 0x3B; /*0x32; */
+ /* Ext. Reg. 4 (Cadence) (xr4):00 */
+
+ j->m_DAAShadowRegs.XOP_REGS.XOP.xr4.reg = 0x00;
+/* Ext. Reg. 5 (Ring timer) (xr5):22 */
+ j->m_DAAShadowRegs.XOP_REGS.XOP.xr5.reg = 0x22;
+/* Ext. Reg. 6 (Power State) (xr6):00 */
+ j->m_DAAShadowRegs.XOP_xr6_W.reg = 0x00;
+/* Ext. Reg. 7 (Vdd) (xr7):40 */
+ j->m_DAAShadowRegs.XOP_REGS.XOP.xr7.reg = 0x40; /* 0x40 ??? Should it be 0x00? */
+ /* */
+ /* DTMF Tone 1 (0B): 11,B3,5A,2C ; 697 Hz */
+ /* 12,33,5A,C3 ; 770 Hz */
+ /* 13,3C,5B,32 ; 852 Hz */
+ /* 1D,1B,5C,CC ; 941 Hz */
+
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[3] = 0x11;
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[2] = 0xB3;
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[1] = 0x5A;
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[0] = 0x2C;
+/* DTMF Tone 2 (0C): 32,32,52,B3 ; 1209 Hz */
+ /* EC,1D,52,22 ; 1336 Hz */
+ /* AA,AC,51,D2 ; 1477 Hz */
+ /* 9B,3B,51,25 ; 1633 Hz */
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[3] = 0x32;
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[2] = 0x32;
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[1] = 0x52;
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[0] = 0xB3;
+}
+
+static void DAA_Coeff_UK(IXJ *j)
+{
+ int i;
+
+ j->daa_country = DAA_UK;
+ /*----------------------------------------------- */
+ /* CAO */
+ for (i = 0; i < ALISDAA_CALLERID_SIZE; i++) {
+ j->m_DAAShadowRegs.CAO_REGS.CAO.CallerID[i] = 0;
+ }
+
+/* Bytes for IM-filter part 1 (04): 00,C2,BB,A8,CB,81,A0,00 */
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[7] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[6] = 0xC2;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[5] = 0xBB;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[4] = 0xA8;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[3] = 0xCB;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[2] = 0x81;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[1] = 0xA0;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[0] = 0x00;
+/* Bytes for IM-filter part 2 (05): 40,00,00,0A,A4,33,E0,08 */
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[7] = 0x40;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[6] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[5] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[4] = 0x0A;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[3] = 0xA4;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[2] = 0x33;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[1] = 0xE0;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[0] = 0x08;
+/* Bytes for FRX-filter (08): 07,9B,ED,24,B2,A2,A0,08 */
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[7] = 0x07;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[6] = 0x9B;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[5] = 0xED;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[4] = 0x24;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[3] = 0xB2;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[2] = 0xA2;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[1] = 0xA0;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[0] = 0x08;
+/* Bytes for FRR-filter (07): 0F,92,F2,B2,87,D2,30,08 */
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[7] = 0x0F;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[6] = 0x92;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[5] = 0xF2;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[4] = 0xB2;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[3] = 0x87;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[2] = 0xD2;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[1] = 0x30;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[0] = 0x08;
+/* Bytes for AX-filter (0A): 1B,A5,DD,CA */
+ j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[3] = 0x1B;
+ j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[2] = 0xA5;
+ j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[1] = 0xDD;
+ j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[0] = 0xCA;
+/* Bytes for AR-filter (09): E2,27,10,D6 */
+ j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[3] = 0xE2;
+ j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[2] = 0x27;
+ j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[1] = 0x10;
+ j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[0] = 0xD6;
+/* Bytes for TH-filter part 1 (00): 80,2D,38,8B,D0,00,00,98 */
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[7] = 0x80;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[6] = 0x2D;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[5] = 0x38;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[4] = 0x8B;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[3] = 0xD0;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[2] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[1] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[0] = 0x98;
+/* Bytes for TH-filter part 2 (01): 02,5A,53,F0,0B,5F,84,D4 */
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[7] = 0x02;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[6] = 0x5A;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[5] = 0x53;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[4] = 0xF0;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[3] = 0x0B;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[2] = 0x5F;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[1] = 0x84;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[0] = 0xD4;
+/* Bytes for TH-filter part 3 (02): 00,88,6A,A4,8F,52,F5,32 */
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[7] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[6] = 0x88;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[5] = 0x6A;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[4] = 0xA4;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[3] = 0x8F;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[2] = 0x52;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[1] = 0xF5;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[0] = 0x32;
+/* ; idle */
+ /* Bytes for Ringing part 1 (03):1B,3C,93,3A,22,12,A3,23 */
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[7] = 0x1B;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[6] = 0x3C;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[5] = 0x93;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[4] = 0x3A;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[3] = 0x22;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[2] = 0x12;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[1] = 0xA3;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[0] = 0x23;
+/* Bytes for Ringing part 2 (06):12,A2,A6,BA,22,7A,0A,D5 */
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[7] = 0x12;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[6] = 0xA2;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[5] = 0xA6;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[4] = 0xBA;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[3] = 0x22;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[2] = 0x7A;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[1] = 0x0A;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[0] = 0xD5;
+/* Levelmetering Ringing (0D):AA,35,0F,8E ; 25Hz 30V less possible? */
+ j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[3] = 0xAA;
+ j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[2] = 0x35;
+ j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[1] = 0x0F;
+ j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[0] = 0x8E;
+/* Caller ID 1st Tone (0E):CA,0E,CA,09,99,99,99,99 */
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[7] = 0xCA;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[6] = 0x0E;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[5] = 0xCA;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[4] = 0x09;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[3] = 0x99;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[2] = 0x99;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[1] = 0x99;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[0] = 0x99;
+/* Caller ID 2nd Tone (0F):FD,B5,BA,07,DA,00,00,00 */
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[7] = 0xFD;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[6] = 0xB5;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[5] = 0xBA;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[4] = 0x07;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[3] = 0xDA;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[2] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[1] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[0] = 0x00;
+/* ;CR Registers */
+ /* Config. Reg. 0 (filters) (cr0):FF */
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr0.reg = 0xFF;
+/* Config. Reg. 1 (dialing) (cr1):05 */
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr1.reg = 0x05;
+/* Config. Reg. 2 (caller ID) (cr2):04 */
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr2.reg = 0x04;
+/* Config. Reg. 3 (testloops) (cr3):00 ; */
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr3.reg = 0x00;
+/* Config. Reg. 4 (analog gain) (cr4):02 */
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr4.reg = 0x02;
+ /* Config. Reg. 5 (Version) (cr5):02 */
+ /* Config. Reg. 6 (Reserved) (cr6):00 */
+ /* Config. Reg. 7 (Reserved) (cr7):00 */
+ /* ;xr Registers */
+ /* Ext. Reg. 0 (Interrupt Reg.) (xr0):02 */
+
+ j->m_DAAShadowRegs.XOP_xr0_W.reg = 0x02; /* SO_1 set to '1' because it is inverted. */
+ /* Ext. Reg. 1 (Interrupt enable) (xr1):1C */
+
+ j->m_DAAShadowRegs.XOP_REGS.XOP.xr1.reg = 0x1C; /* RING, Caller ID, VDD_OK */
+ /* Ext. Reg. 2 (Cadence Time Out) (xr2):7D */
+
+ j->m_DAAShadowRegs.XOP_REGS.XOP.xr2.reg = 0x7D;
+/* Ext. Reg. 3 (DC Char) (xr3):36 ; */
+ j->m_DAAShadowRegs.XOP_REGS.XOP.xr3.reg = 0x36;
+/* Ext. Reg. 4 (Cadence) (xr4):00 */
+ j->m_DAAShadowRegs.XOP_REGS.XOP.xr4.reg = 0x00;
+/* Ext. Reg. 5 (Ring timer) (xr5):22 */
+ j->m_DAAShadowRegs.XOP_REGS.XOP.xr5.reg = 0x22;
+/* Ext. Reg. 6 (Power State) (xr6):00 */
+ j->m_DAAShadowRegs.XOP_xr6_W.reg = 0x00;
+/* Ext. Reg. 7 (Vdd) (xr7):46 */
+ j->m_DAAShadowRegs.XOP_REGS.XOP.xr7.reg = 0x46; /* 0x46 ??? Should it be 0x00? */
+ /* DTMF Tone 1 (0B): 11,B3,5A,2C ; 697 Hz */
+ /* 12,33,5A,C3 ; 770 Hz */
+ /* 13,3C,5B,32 ; 852 Hz */
+ /* 1D,1B,5C,CC ; 941 Hz */
+
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[3] = 0x11;
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[2] = 0xB3;
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[1] = 0x5A;
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[0] = 0x2C;
+/* DTMF Tone 2 (0C): 32,32,52,B3 ; 1209 Hz */
+ /* EC,1D,52,22 ; 1336 Hz */
+ /* AA,AC,51,D2 ; 1477 Hz */
+ /* 9B,3B,51,25 ; 1633 Hz */
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[3] = 0x32;
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[2] = 0x32;
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[1] = 0x52;
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[0] = 0xB3;
+}
+
+
+static void DAA_Coeff_France(IXJ *j)
+{
+ int i;
+
+ j->daa_country = DAA_FRANCE;
+ /*----------------------------------------------- */
+ /* CAO */
+ for (i = 0; i < ALISDAA_CALLERID_SIZE; i++) {
+ j->m_DAAShadowRegs.CAO_REGS.CAO.CallerID[i] = 0;
+ }
+
+/* Bytes for IM-filter part 1 (04): 02,A2,43,2C,22,AF,A0,00 */
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[7] = 0x02;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[6] = 0xA2;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[5] = 0x43;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[4] = 0x2C;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[3] = 0x22;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[2] = 0xAF;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[1] = 0xA0;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[0] = 0x00;
+/* Bytes for IM-filter part 2 (05): 67,CE,00,0C,22,33,E0,08 */
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[7] = 0x67;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[6] = 0xCE;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[5] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[4] = 0x2C;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[3] = 0x22;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[2] = 0x33;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[1] = 0xE0;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[0] = 0x08;
+/* Bytes for FRX-filter (08): 07,9A,28,F6,23,4A,B0,08 */
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[7] = 0x07;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[6] = 0x9A;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[5] = 0x28;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[4] = 0xF6;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[3] = 0x23;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[2] = 0x4A;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[1] = 0xB0;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[0] = 0x08;
+/* Bytes for FRR-filter (07): 03,8F,F9,2F,9E,FA,20,08 */
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[7] = 0x03;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[6] = 0x8F;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[5] = 0xF9;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[4] = 0x2F;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[3] = 0x9E;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[2] = 0xFA;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[1] = 0x20;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[0] = 0x08;
+/* Bytes for AX-filter (0A): 16,B5,DD,CA */
+ j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[3] = 0x16;
+ j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[2] = 0xB5;
+ j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[1] = 0xDD;
+ j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[0] = 0xCA;
+/* Bytes for AR-filter (09): 52,C7,10,D6 */
+ j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[3] = 0xE2;
+ j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[2] = 0xC7;
+ j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[1] = 0x10;
+ j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[0] = 0xD6;
+/* Bytes for TH-filter part 1 (00): 00,42,48,81,A6,80,00,98 */
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[7] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[6] = 0x42;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[5] = 0x48;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[4] = 0x81;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[3] = 0xA6;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[2] = 0x80;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[1] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[0] = 0x98;
+/* Bytes for TH-filter part 2 (01): 02,AC,2A,30,78,AC,8A,2C */
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[7] = 0x02;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[6] = 0xAC;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[5] = 0x2A;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[4] = 0x30;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[3] = 0x78;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[2] = 0xAC;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[1] = 0x8A;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[0] = 0x2C;
+/* Bytes for TH-filter part 3 (02): 00,88,DA,A5,22,BA,2C,45 */
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[7] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[6] = 0x88;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[5] = 0xDA;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[4] = 0xA5;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[3] = 0x22;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[2] = 0xBA;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[1] = 0x2C;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[0] = 0x45;
+/* ; idle */
+ /* Bytes for Ringing part 1 (03):1B,3C,93,3A,22,12,A3,23 */
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[7] = 0x1B;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[6] = 0x3C;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[5] = 0x93;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[4] = 0x3A;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[3] = 0x22;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[2] = 0x12;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[1] = 0xA3;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[0] = 0x23;
+/* Bytes for Ringing part 2 (06):12,A2,A6,BA,22,7A,0A,D5 */
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[7] = 0x12;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[6] = 0xA2;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[5] = 0xA6;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[4] = 0xBA;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[3] = 0x22;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[2] = 0x7A;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[1] = 0x0A;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[0] = 0xD5;
+/* Levelmetering Ringing (0D):32,45,B5,84 ; 50Hz 20V */
+ j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[3] = 0x32;
+ j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[2] = 0x45;
+ j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[1] = 0xB5;
+ j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[0] = 0x84;
+/* Caller ID 1st Tone (0E):CA,0E,CA,09,99,99,99,99 */
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[7] = 0xCA;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[6] = 0x0E;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[5] = 0xCA;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[4] = 0x09;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[3] = 0x99;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[2] = 0x99;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[1] = 0x99;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[0] = 0x99;
+/* Caller ID 2nd Tone (0F):FD,B5,BA,07,DA,00,00,00 */
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[7] = 0xFD;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[6] = 0xB5;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[5] = 0xBA;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[4] = 0x07;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[3] = 0xDA;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[2] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[1] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[0] = 0x00;
+/* ;CR Registers */
+ /* Config. Reg. 0 (filters) (cr0):FF */
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr0.reg = 0xFF;
+/* Config. Reg. 1 (dialing) (cr1):05 */
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr1.reg = 0x05;
+/* Config. Reg. 2 (caller ID) (cr2):04 */
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr2.reg = 0x04;
+/* Config. Reg. 3 (testloops) (cr3):00 ; */
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr3.reg = 0x00;
+/* Config. Reg. 4 (analog gain) (cr4):02 */
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr4.reg = 0x02;
+ /* Config. Reg. 5 (Version) (cr5):02 */
+ /* Config. Reg. 6 (Reserved) (cr6):00 */
+ /* Config. Reg. 7 (Reserved) (cr7):00 */
+ /* ;xr Registers */
+ /* Ext. Reg. 0 (Interrupt Reg.) (xr0):02 */
+
+ j->m_DAAShadowRegs.XOP_xr0_W.reg = 0x02; /* SO_1 set to '1' because it is inverted. */
+ /* Ext. Reg. 1 (Interrupt enable) (xr1):1C */
+
+ j->m_DAAShadowRegs.XOP_REGS.XOP.xr1.reg = 0x1C; /* RING, Caller ID, VDD_OK */
+ /* Ext. Reg. 2 (Cadence Time Out) (xr2):7D */
+
+ j->m_DAAShadowRegs.XOP_REGS.XOP.xr2.reg = 0x7D;
+/* Ext. Reg. 3 (DC Char) (xr3):36 ; */
+ j->m_DAAShadowRegs.XOP_REGS.XOP.xr3.reg = 0x36;
+/* Ext. Reg. 4 (Cadence) (xr4):00 */
+ j->m_DAAShadowRegs.XOP_REGS.XOP.xr4.reg = 0x00;
+/* Ext. Reg. 5 (Ring timer) (xr5):22 */
+ j->m_DAAShadowRegs.XOP_REGS.XOP.xr5.reg = 0x22;
+/* Ext. Reg. 6 (Power State) (xr6):00 */
+ j->m_DAAShadowRegs.XOP_xr6_W.reg = 0x00;
+/* Ext. Reg. 7 (Vdd) (xr7):46 */
+ j->m_DAAShadowRegs.XOP_REGS.XOP.xr7.reg = 0x46; /* 0x46 ??? Should it be 0x00? */
+ /* DTMF Tone 1 (0B): 11,B3,5A,2C ; 697 Hz */
+ /* 12,33,5A,C3 ; 770 Hz */
+ /* 13,3C,5B,32 ; 852 Hz */
+ /* 1D,1B,5C,CC ; 941 Hz */
+
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[3] = 0x11;
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[2] = 0xB3;
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[1] = 0x5A;
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[0] = 0x2C;
+/* DTMF Tone 2 (0C): 32,32,52,B3 ; 1209 Hz */
+ /* EC,1D,52,22 ; 1336 Hz */
+ /* AA,AC,51,D2 ; 1477 Hz */
+ /* 9B,3B,51,25 ; 1633 Hz */
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[3] = 0x32;
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[2] = 0x32;
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[1] = 0x52;
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[0] = 0xB3;
+}
+
+
+static void DAA_Coeff_Germany(IXJ *j)
+{
+ int i;
+
+ j->daa_country = DAA_GERMANY;
+ /*----------------------------------------------- */
+ /* CAO */
+ for (i = 0; i < ALISDAA_CALLERID_SIZE; i++) {
+ j->m_DAAShadowRegs.CAO_REGS.CAO.CallerID[i] = 0;
+ }
+
+/* Bytes for IM-filter part 1 (04): 00,CE,BB,B8,D2,81,B0,00 */
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[7] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[6] = 0xCE;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[5] = 0xBB;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[4] = 0xB8;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[3] = 0xD2;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[2] = 0x81;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[1] = 0xB0;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[0] = 0x00;
+/* Bytes for IM-filter part 2 (05): 45,8F,00,0C,D2,3A,D0,08 */
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[7] = 0x45;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[6] = 0x8F;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[5] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[4] = 0x0C;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[3] = 0xD2;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[2] = 0x3A;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[1] = 0xD0;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[0] = 0x08;
+/* Bytes for FRX-filter (08): 07,AA,E2,34,24,89,20,08 */
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[7] = 0x07;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[6] = 0xAA;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[5] = 0xE2;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[4] = 0x34;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[3] = 0x24;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[2] = 0x89;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[1] = 0x20;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[0] = 0x08;
+/* Bytes for FRR-filter (07): 02,87,FA,37,9A,CA,B0,08 */
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[7] = 0x02;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[6] = 0x87;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[5] = 0xFA;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[4] = 0x37;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[3] = 0x9A;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[2] = 0xCA;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[1] = 0xB0;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[0] = 0x08;
+/* Bytes for AX-filter (0A): 72,D5,DD,CA */
+ j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[3] = 0x72;
+ j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[2] = 0xD5;
+ j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[1] = 0xDD;
+ j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[0] = 0xCA;
+/* Bytes for AR-filter (09): 72,42,13,4B */
+ j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[3] = 0x72;
+ j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[2] = 0x42;
+ j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[1] = 0x13;
+ j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[0] = 0x4B;
+/* Bytes for TH-filter part 1 (00): 80,52,48,81,AD,80,00,98 */
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[7] = 0x80;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[6] = 0x52;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[5] = 0x48;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[4] = 0x81;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[3] = 0xAD;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[2] = 0x80;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[1] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[0] = 0x98;
+/* Bytes for TH-filter part 2 (01): 02,42,5A,20,E8,1A,81,27 */
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[7] = 0x02;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[6] = 0x42;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[5] = 0x5A;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[4] = 0x20;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[3] = 0xE8;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[2] = 0x1A;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[1] = 0x81;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[0] = 0x27;
+/* Bytes for TH-filter part 3 (02): 00,88,63,26,BD,4B,A3,C2 */
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[7] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[6] = 0x88;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[5] = 0x63;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[4] = 0x26;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[3] = 0xBD;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[2] = 0x4B;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[1] = 0xA3;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[0] = 0xC2;
+/* ; (10K, 0.68uF) */
+ /* Bytes for Ringing part 1 (03):1B,3B,9B,BA,D4,1C,B3,23 */
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[7] = 0x1B;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[6] = 0x3B;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[5] = 0x9B;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[4] = 0xBA;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[3] = 0xD4;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[2] = 0x1C;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[1] = 0xB3;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[0] = 0x23;
+/* Bytes for Ringing part 2 (06):13,42,A6,BA,D4,73,CA,D5 */
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[7] = 0x13;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[6] = 0x42;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[5] = 0xA6;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[4] = 0xBA;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[3] = 0xD4;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[2] = 0x73;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[1] = 0xCA;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[0] = 0xD5;
+/* Levelmetering Ringing (0D):B2,45,0F,8E */
+ j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[3] = 0xB2;
+ j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[2] = 0x45;
+ j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[1] = 0x0F;
+ j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[0] = 0x8E;
+/* Caller ID 1st Tone (0E):CA,0E,CA,09,99,99,99,99 */
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[7] = 0xCA;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[6] = 0x0E;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[5] = 0xCA;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[4] = 0x09;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[3] = 0x99;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[2] = 0x99;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[1] = 0x99;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[0] = 0x99;
+/* Caller ID 2nd Tone (0F):FD,B5,BA,07,DA,00,00,00 */
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[7] = 0xFD;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[6] = 0xB5;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[5] = 0xBA;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[4] = 0x07;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[3] = 0xDA;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[2] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[1] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[0] = 0x00;
+/* ;CR Registers */
+ /* Config. Reg. 0 (filters) (cr0):FF ; all Filters enabled, CLK from ext. source */
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr0.reg = 0xFF;
+/* Config. Reg. 1 (dialing) (cr1):05 ; Manual Ring, Ring metering enabled */
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr1.reg = 0x05;
+/* Config. Reg. 2 (caller ID) (cr2):04 ; Analog Gain 0dB, FSC internal */
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr2.reg = 0x04;
+/* Config. Reg. 3 (testloops) (cr3):00 ; SEL Bit==0, HP-enabled */
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr3.reg = 0x00;
+/* Config. Reg. 4 (analog gain) (cr4):02 */
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr4.reg = 0x02;
+ /* Config. Reg. 5 (Version) (cr5):02 */
+ /* Config. Reg. 6 (Reserved) (cr6):00 */
+ /* Config. Reg. 7 (Reserved) (cr7):00 */
+ /* ;xr Registers */
+ /* Ext. Reg. 0 (Interrupt Reg.) (xr0):02 */
+
+ j->m_DAAShadowRegs.XOP_xr0_W.reg = 0x02; /* SO_1 set to '1' because it is inverted. */
+ /* Ext. Reg. 1 (Interrupt enable) (xr1):1C ; Ring, CID, VDDOK Interrupts enabled */
+
+ j->m_DAAShadowRegs.XOP_REGS.XOP.xr1.reg = 0x1C; /* RING, Caller ID, VDD_OK */
+ /* Ext. Reg. 2 (Cadence Time Out) (xr2):7D */
+
+ j->m_DAAShadowRegs.XOP_REGS.XOP.xr2.reg = 0x7D;
+/* Ext. Reg. 3 (DC Char) (xr3):32 ; B-Filter Off==1, U0=3.5V, R=200Ohm */
+ j->m_DAAShadowRegs.XOP_REGS.XOP.xr3.reg = 0x32;
+/* Ext. Reg. 4 (Cadence) (xr4):00 */
+ j->m_DAAShadowRegs.XOP_REGS.XOP.xr4.reg = 0x00;
+/* Ext. Reg. 5 (Ring timer) (xr5):22 */
+ j->m_DAAShadowRegs.XOP_REGS.XOP.xr5.reg = 0x22;
+/* Ext. Reg. 6 (Power State) (xr6):00 */
+ j->m_DAAShadowRegs.XOP_xr6_W.reg = 0x00;
+/* Ext. Reg. 7 (Vdd) (xr7):40 ; VDD=4.25 V */
+ j->m_DAAShadowRegs.XOP_REGS.XOP.xr7.reg = 0x40; /* 0x40 ??? Should it be 0x00? */
+ /* DTMF Tone 1 (0B): 11,B3,5A,2C ; 697 Hz */
+ /* 12,33,5A,C3 ; 770 Hz */
+ /* 13,3C,5B,32 ; 852 Hz */
+ /* 1D,1B,5C,CC ; 941 Hz */
+
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[3] = 0x11;
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[2] = 0xB3;
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[1] = 0x5A;
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[0] = 0x2C;
+/* DTMF Tone 2 (0C): 32,32,52,B3 ; 1209 Hz */
+ /* EC,1D,52,22 ; 1336 Hz */
+ /* AA,AC,51,D2 ; 1477 Hz */
+ /* 9B,3B,51,25 ; 1633 Hz */
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[3] = 0x32;
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[2] = 0x32;
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[1] = 0x52;
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[0] = 0xB3;
+}
+
+
+static void DAA_Coeff_Australia(IXJ *j)
+{
+ int i;
+
+ j->daa_country = DAA_AUSTRALIA;
+ /*----------------------------------------------- */
+ /* CAO */
+ for (i = 0; i < ALISDAA_CALLERID_SIZE; i++) {
+ j->m_DAAShadowRegs.CAO_REGS.CAO.CallerID[i] = 0;
+ }
+
+/* Bytes for IM-filter part 1 (04): 00,A3,AA,28,B3,82,D0,00 */
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[7] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[6] = 0xA3;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[5] = 0xAA;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[4] = 0x28;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[3] = 0xB3;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[2] = 0x82;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[1] = 0xD0;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[0] = 0x00;
+/* Bytes for IM-filter part 2 (05): 70,96,00,09,32,6B,C0,08 */
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[7] = 0x70;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[6] = 0x96;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[5] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[4] = 0x09;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[3] = 0x32;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[2] = 0x6B;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[1] = 0xC0;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[0] = 0x08;
+/* Bytes for FRX-filter (08): 07,96,E2,34,32,9B,30,08 */
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[7] = 0x07;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[6] = 0x96;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[5] = 0xE2;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[4] = 0x34;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[3] = 0x32;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[2] = 0x9B;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[1] = 0x30;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[0] = 0x08;
+/* Bytes for FRR-filter (07): 0F,9A,E9,2F,22,CC,A0,08 */
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[7] = 0x0F;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[6] = 0x9A;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[5] = 0xE9;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[4] = 0x2F;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[3] = 0x22;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[2] = 0xCC;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[1] = 0xA0;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[0] = 0x08;
+/* Bytes for AX-filter (0A): CB,45,DD,CA */
+ j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[3] = 0xCB;
+ j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[2] = 0x45;
+ j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[1] = 0xDD;
+ j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[0] = 0xCA;
+/* Bytes for AR-filter (09): 1B,67,10,D6 */
+ j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[3] = 0x1B;
+ j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[2] = 0x67;
+ j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[1] = 0x10;
+ j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[0] = 0xD6;
+/* Bytes for TH-filter part 1 (00): 80,52,48,81,AF,80,00,98 */
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[7] = 0x80;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[6] = 0x52;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[5] = 0x48;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[4] = 0x81;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[3] = 0xAF;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[2] = 0x80;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[1] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[0] = 0x98;
+/* Bytes for TH-filter part 2 (01): 02,DB,52,B0,38,01,82,AC */
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[7] = 0x02;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[6] = 0xDB;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[5] = 0x52;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[4] = 0xB0;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[3] = 0x38;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[2] = 0x01;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[1] = 0x82;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[0] = 0xAC;
+/* Bytes for TH-filter part 3 (02): 00,88,4A,3E,2C,3B,24,46 */
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[7] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[6] = 0x88;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[5] = 0x4A;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[4] = 0x3E;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[3] = 0x2C;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[2] = 0x3B;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[1] = 0x24;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[0] = 0x46;
+/* ; idle */
+ /* Bytes for Ringing part 1 (03):1B,3C,93,3A,22,12,A3,23 */
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[7] = 0x1B;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[6] = 0x3C;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[5] = 0x93;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[4] = 0x3A;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[3] = 0x22;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[2] = 0x12;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[1] = 0xA3;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[0] = 0x23;
+/* Bytes for Ringing part 2 (06):12,A2,A6,BA,22,7A,0A,D5 */
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[7] = 0x12;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[6] = 0xA2;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[5] = 0xA6;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[4] = 0xBA;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[3] = 0x22;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[2] = 0x7A;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[1] = 0x0A;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[0] = 0xD5;
+/* Levelmetering Ringing (0D):32,45,B5,84 ; 50Hz 20V */
+ j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[3] = 0x32;
+ j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[2] = 0x45;
+ j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[1] = 0xB5;
+ j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[0] = 0x84;
+/* Caller ID 1st Tone (0E):CA,0E,CA,09,99,99,99,99 */
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[7] = 0xCA;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[6] = 0x0E;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[5] = 0xCA;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[4] = 0x09;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[3] = 0x99;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[2] = 0x99;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[1] = 0x99;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[0] = 0x99;
+/* Caller ID 2nd Tone (0F):FD,B5,BA,07,DA,00,00,00 */
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[7] = 0xFD;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[6] = 0xB5;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[5] = 0xBA;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[4] = 0x07;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[3] = 0xDA;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[2] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[1] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[0] = 0x00;
+/* ;CR Registers */
+ /* Config. Reg. 0 (filters) (cr0):FF */
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr0.reg = 0xFF;
+/* Config. Reg. 1 (dialing) (cr1):05 */
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr1.reg = 0x05;
+/* Config. Reg. 2 (caller ID) (cr2):04 */
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr2.reg = 0x04;
+/* Config. Reg. 3 (testloops) (cr3):00 ; */
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr3.reg = 0x00;
+/* Config. Reg. 4 (analog gain) (cr4):02 */
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr4.reg = 0x02;
+ /* Config. Reg. 5 (Version) (cr5):02 */
+ /* Config. Reg. 6 (Reserved) (cr6):00 */
+ /* Config. Reg. 7 (Reserved) (cr7):00 */
+ /* ;xr Registers */
+ /* Ext. Reg. 0 (Interrupt Reg.) (xr0):02 */
+
+ j->m_DAAShadowRegs.XOP_xr0_W.reg = 0x02; /* SO_1 set to '1' because it is inverted. */
+ /* Ext. Reg. 1 (Interrupt enable) (xr1):1C */
+
+ j->m_DAAShadowRegs.XOP_REGS.XOP.xr1.reg = 0x1C; /* RING, Caller ID, VDD_OK */
+ /* Ext. Reg. 2 (Cadence Time Out) (xr2):7D */
+
+ j->m_DAAShadowRegs.XOP_REGS.XOP.xr2.reg = 0x7D;
+/* Ext. Reg. 3 (DC Char) (xr3):2B ; */
+ j->m_DAAShadowRegs.XOP_REGS.XOP.xr3.reg = 0x2B;
+/* Ext. Reg. 4 (Cadence) (xr4):00 */
+ j->m_DAAShadowRegs.XOP_REGS.XOP.xr4.reg = 0x00;
+/* Ext. Reg. 5 (Ring timer) (xr5):22 */
+ j->m_DAAShadowRegs.XOP_REGS.XOP.xr5.reg = 0x22;
+/* Ext. Reg. 6 (Power State) (xr6):00 */
+ j->m_DAAShadowRegs.XOP_xr6_W.reg = 0x00;
+/* Ext. Reg. 7 (Vdd) (xr7):40 */
+ j->m_DAAShadowRegs.XOP_REGS.XOP.xr7.reg = 0x40; /* 0x40 ??? Should it be 0x00? */
+
+ /* DTMF Tone 1 (0B): 11,B3,5A,2C ; 697 Hz */
+ /* 12,33,5A,C3 ; 770 Hz */
+ /* 13,3C,5B,32 ; 852 Hz */
+ /* 1D,1B,5C,CC ; 941 Hz */
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[3] = 0x11;
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[2] = 0xB3;
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[1] = 0x5A;
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[0] = 0x2C;
+
+ /* DTMF Tone 2 (0C): 32,32,52,B3 ; 1209 Hz */
+ /* EC,1D,52,22 ; 1336 Hz */
+ /* AA,AC,51,D2 ; 1477 Hz */
+ /* 9B,3B,51,25 ; 1633 Hz */
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[3] = 0x32;
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[2] = 0x32;
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[1] = 0x52;
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[0] = 0xB3;
+}
+
+static void DAA_Coeff_Japan(IXJ *j)
+{
+ int i;
+
+ j->daa_country = DAA_JAPAN;
+ /*----------------------------------------------- */
+ /* CAO */
+ for (i = 0; i < ALISDAA_CALLERID_SIZE; i++) {
+ j->m_DAAShadowRegs.CAO_REGS.CAO.CallerID[i] = 0;
+ }
+
+/* Bytes for IM-filter part 1 (04): 06,BD,E2,2D,BA,F9,A0,00 */
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[7] = 0x06;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[6] = 0xBD;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[5] = 0xE2;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[4] = 0x2D;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[3] = 0xBA;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[2] = 0xF9;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[1] = 0xA0;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[0] = 0x00;
+/* Bytes for IM-filter part 2 (05): 6F,F7,00,0E,34,33,E0,08 */
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[7] = 0x6F;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[6] = 0xF7;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[5] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[4] = 0x0E;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[3] = 0x34;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[2] = 0x33;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[1] = 0xE0;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[0] = 0x08;
+/* Bytes for FRX-filter (08): 02,8F,68,77,9C,58,F0,08 */
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[7] = 0x02;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[6] = 0x8F;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[5] = 0x68;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[4] = 0x77;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[3] = 0x9C;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[2] = 0x58;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[1] = 0xF0;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[0] = 0x08;
+/* Bytes for FRR-filter (07): 03,8F,38,73,87,EA,20,08 */
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[7] = 0x03;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[6] = 0x8F;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[5] = 0x38;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[4] = 0x73;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[3] = 0x87;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[2] = 0xEA;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[1] = 0x20;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[0] = 0x08;
+/* Bytes for AX-filter (0A): 51,C5,DD,CA */
+ j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[3] = 0x51;
+ j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[2] = 0xC5;
+ j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[1] = 0xDD;
+ j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[0] = 0xCA;
+/* Bytes for AR-filter (09): 25,A7,10,D6 */
+ j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[3] = 0x25;
+ j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[2] = 0xA7;
+ j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[1] = 0x10;
+ j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[0] = 0xD6;
+/* Bytes for TH-filter part 1 (00): 00,42,48,81,AE,80,00,98 */
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[7] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[6] = 0x42;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[5] = 0x48;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[4] = 0x81;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[3] = 0xAE;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[2] = 0x80;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[1] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[0] = 0x98;
+/* Bytes for TH-filter part 2 (01): 02,AB,2A,20,99,5B,89,28 */
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[7] = 0x02;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[6] = 0xAB;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[5] = 0x2A;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[4] = 0x20;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[3] = 0x99;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[2] = 0x5B;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[1] = 0x89;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[0] = 0x28;
+/* Bytes for TH-filter part 3 (02): 00,88,DA,25,34,C5,4C,BA */
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[7] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[6] = 0x88;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[5] = 0xDA;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[4] = 0x25;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[3] = 0x34;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[2] = 0xC5;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[1] = 0x4C;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[0] = 0xBA;
+/* ; idle */
+ /* Bytes for Ringing part 1 (03):1B,3C,93,3A,22,12,A3,23 */
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[7] = 0x1B;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[6] = 0x3C;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[5] = 0x93;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[4] = 0x3A;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[3] = 0x22;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[2] = 0x12;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[1] = 0xA3;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[0] = 0x23;
+/* Bytes for Ringing part 2 (06):12,A2,A6,BA,22,7A,0A,D5 */
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[7] = 0x12;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[6] = 0xA2;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[5] = 0xA6;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[4] = 0xBA;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[3] = 0x22;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[2] = 0x7A;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[1] = 0x0A;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[0] = 0xD5;
+/* Levelmetering Ringing (0D):AA,35,0F,8E ; 25Hz 30V ????????? */
+ j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[3] = 0xAA;
+ j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[2] = 0x35;
+ j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[1] = 0x0F;
+ j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[0] = 0x8E;
+/* Caller ID 1st Tone (0E):CA,0E,CA,09,99,99,99,99 */
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[7] = 0xCA;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[6] = 0x0E;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[5] = 0xCA;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[4] = 0x09;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[3] = 0x99;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[2] = 0x99;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[1] = 0x99;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[0] = 0x99;
+/* Caller ID 2nd Tone (0F):FD,B5,BA,07,DA,00,00,00 */
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[7] = 0xFD;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[6] = 0xB5;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[5] = 0xBA;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[4] = 0x07;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[3] = 0xDA;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[2] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[1] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[0] = 0x00;
+/* ;CR Registers */
+ /* Config. Reg. 0 (filters) (cr0):FF */
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr0.reg = 0xFF;
+/* Config. Reg. 1 (dialing) (cr1):05 */
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr1.reg = 0x05;
+/* Config. Reg. 2 (caller ID) (cr2):04 */
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr2.reg = 0x04;
+/* Config. Reg. 3 (testloops) (cr3):00 ; */
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr3.reg = 0x00;
+/* Config. Reg. 4 (analog gain) (cr4):02 */
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr4.reg = 0x02;
+ /* Config. Reg. 5 (Version) (cr5):02 */
+ /* Config. Reg. 6 (Reserved) (cr6):00 */
+ /* Config. Reg. 7 (Reserved) (cr7):00 */
+ /* ;xr Registers */
+ /* Ext. Reg. 0 (Interrupt Reg.) (xr0):02 */
+
+ j->m_DAAShadowRegs.XOP_xr0_W.reg = 0x02; /* SO_1 set to '1' because it is inverted. */
+ /* Ext. Reg. 1 (Interrupt enable) (xr1):1C */
+
+ j->m_DAAShadowRegs.XOP_REGS.XOP.xr1.reg = 0x1C; /* RING, Caller ID, VDD_OK */
+ /* Ext. Reg. 2 (Cadence Time Out) (xr2):7D */
+
+ j->m_DAAShadowRegs.XOP_REGS.XOP.xr2.reg = 0x7D;
+/* Ext. Reg. 3 (DC Char) (xr3):22 ; */
+ j->m_DAAShadowRegs.XOP_REGS.XOP.xr3.reg = 0x22;
+/* Ext. Reg. 4 (Cadence) (xr4):00 */
+ j->m_DAAShadowRegs.XOP_REGS.XOP.xr4.reg = 0x00;
+/* Ext. Reg. 5 (Ring timer) (xr5):22 */
+ j->m_DAAShadowRegs.XOP_REGS.XOP.xr5.reg = 0x22;
+/* Ext. Reg. 6 (Power State) (xr6):00 */
+ j->m_DAAShadowRegs.XOP_xr6_W.reg = 0x00;
+/* Ext. Reg. 7 (Vdd) (xr7):40 */
+ j->m_DAAShadowRegs.XOP_REGS.XOP.xr7.reg = 0x40; /* 0x40 ??? Should it be 0x00? */
+ /* DTMF Tone 1 (0B): 11,B3,5A,2C ; 697 Hz */
+ /* 12,33,5A,C3 ; 770 Hz */
+ /* 13,3C,5B,32 ; 852 Hz */
+ /* 1D,1B,5C,CC ; 941 Hz */
+
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[3] = 0x11;
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[2] = 0xB3;
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[1] = 0x5A;
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[0] = 0x2C;
+/* DTMF Tone 2 (0C): 32,32,52,B3 ; 1209 Hz */
+ /* EC,1D,52,22 ; 1336 Hz */
+ /* AA,AC,51,D2 ; 1477 Hz */
+ /* 9B,3B,51,25 ; 1633 Hz */
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[3] = 0x32;
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[2] = 0x32;
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[1] = 0x52;
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[0] = 0xB3;
+}
+
+static s16 tone_table[][19] =
+{
+ { /* f20_50[] 11 */
+ 32538, /* A1 = 1.985962 */
+ -32325, /* A2 = -0.986511 */
+ -343, /* B2 = -0.010493 */
+ 0, /* B1 = 0 */
+ 343, /* B0 = 0.010493 */
+ 32619, /* A1 = 1.990906 */
+ -32520, /* A2 = -0.992462 */
+ 19179, /* B2 = 0.585327 */
+ -19178, /* B1 = -1.170593 */
+ 19179, /* B0 = 0.585327 */
+ 32723, /* A1 = 1.997314 */
+ -32686, /* A2 = -0.997528 */
+ 9973, /* B2 = 0.304352 */
+ -9955, /* B1 = -0.607605 */
+ 9973, /* B0 = 0.304352 */
+ 7, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f133_200[] 12 */
+ 32072, /* A1 = 1.95752 */
+ -31896, /* A2 = -0.973419 */
+ -435, /* B2 = -0.013294 */
+ 0, /* B1 = 0 */
+ 435, /* B0 = 0.013294 */
+ 32188, /* A1 = 1.9646 */
+ -32400, /* A2 = -0.98877 */
+ 15139, /* B2 = 0.462036 */
+ -14882, /* B1 = -0.908356 */
+ 15139, /* B0 = 0.462036 */
+ 32473, /* A1 = 1.981995 */
+ -32524, /* A2 = -0.992584 */
+ 23200, /* B2 = 0.708008 */
+ -23113, /* B1 = -1.410706 */
+ 23200, /* B0 = 0.708008 */
+ 7, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f300 13 */
+ 31769, /* A1 = -1.939026 */
+ -32584, /* A2 = 0.994385 */
+ -475, /* B2 = -0.014522 */
+ 0, /* B1 = 0.000000 */
+ 475, /* B0 = 0.014522 */
+ 31789, /* A1 = -1.940247 */
+ -32679, /* A2 = 0.997284 */
+ 17280, /* B2 = 0.527344 */
+ -16865, /* B1 = -1.029358 */
+ 17280, /* B0 = 0.527344 */
+ 31841, /* A1 = -1.943481 */
+ -32681, /* A2 = 0.997345 */
+ 543, /* B2 = 0.016579 */
+ -525, /* B1 = -0.032097 */
+ 543, /* B0 = 0.016579 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f300_420[] 14 */
+ 30750, /* A1 = 1.876892 */
+ -31212, /* A2 = -0.952515 */
+ -804, /* B2 = -0.024541 */
+ 0, /* B1 = 0 */
+ 804, /* B0 = 0.024541 */
+ 30686, /* A1 = 1.872925 */
+ -32145, /* A2 = -0.980988 */
+ 14747, /* B2 = 0.450043 */
+ -13703, /* B1 = -0.836395 */
+ 14747, /* B0 = 0.450043 */
+ 31651, /* A1 = 1.931824 */
+ -32321, /* A2 = -0.986389 */
+ 24425, /* B2 = 0.745422 */
+ -23914, /* B1 = -1.459595 */
+ 24427, /* B0 = 0.745483 */
+ 7, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f330 15 */
+ 31613, /* A1 = -1.929565 */
+ -32646, /* A2 = 0.996277 */
+ -185, /* B2 = -0.005657 */
+ 0, /* B1 = 0.000000 */
+ 185, /* B0 = 0.005657 */
+ 31620, /* A1 = -1.929932 */
+ -32713, /* A2 = 0.998352 */
+ 19253, /* B2 = 0.587585 */
+ -18566, /* B1 = -1.133179 */
+ 19253, /* B0 = 0.587585 */
+ 31674, /* A1 = -1.933228 */
+ -32715, /* A2 = 0.998413 */
+ 2575, /* B2 = 0.078590 */
+ -2495, /* B1 = -0.152283 */
+ 2575, /* B0 = 0.078590 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f300_425[] 16 */
+ 30741, /* A1 = 1.876282 */
+ -31475, /* A2 = -0.960541 */
+ -703, /* B2 = -0.021484 */
+ 0, /* B1 = 0 */
+ 703, /* B0 = 0.021484 */
+ 30688, /* A1 = 1.873047 */
+ -32248, /* A2 = -0.984161 */
+ 14542, /* B2 = 0.443787 */
+ -13523, /* B1 = -0.825439 */
+ 14542, /* B0 = 0.443817 */
+ 31494, /* A1 = 1.922302 */
+ -32366, /* A2 = -0.987762 */
+ 21577, /* B2 = 0.658508 */
+ -21013, /* B1 = -1.282532 */
+ 21577, /* B0 = 0.658508 */
+ 7, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f330_440[] 17 */
+ 30627, /* A1 = 1.869324 */
+ -31338, /* A2 = -0.95636 */
+ -843, /* B2 = -0.025749 */
+ 0, /* B1 = 0 */
+ 843, /* B0 = 0.025749 */
+ 30550, /* A1 = 1.864685 */
+ -32221, /* A2 = -0.983337 */
+ 13594, /* B2 = 0.414886 */
+ -12589, /* B1 = -0.768402 */
+ 13594, /* B0 = 0.414886 */
+ 31488, /* A1 = 1.921936 */
+ -32358, /* A2 = -0.987518 */
+ 24684, /* B2 = 0.753296 */
+ -24029, /* B1 = -1.466614 */
+ 24684, /* B0 = 0.753296 */
+ 7, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f340 18 */
+ 31546, /* A1 = -1.925476 */
+ -32646, /* A2 = 0.996277 */
+ -445, /* B2 = -0.013588 */
+ 0, /* B1 = 0.000000 */
+ 445, /* B0 = 0.013588 */
+ 31551, /* A1 = -1.925781 */
+ -32713, /* A2 = 0.998352 */
+ 23884, /* B2 = 0.728882 */
+ -22979, /* B1 = -1.402527 */
+ 23884, /* B0 = 0.728882 */
+ 31606, /* A1 = -1.929138 */
+ -32715, /* A2 = 0.998413 */
+ 863, /* B2 = 0.026367 */
+ -835, /* B1 = -0.050985 */
+ 863, /* B0 = 0.026367 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f350_400[] 19 */
+ 31006, /* A1 = 1.892517 */
+ -32029, /* A2 = -0.977448 */
+ -461, /* B2 = -0.014096 */
+ 0, /* B1 = 0 */
+ 461, /* B0 = 0.014096 */
+ 30999, /* A1 = 1.892029 */
+ -32487, /* A2 = -0.991455 */
+ 11325, /* B2 = 0.345612 */
+ -10682, /* B1 = -0.651978 */
+ 11325, /* B0 = 0.345612 */
+ 31441, /* A1 = 1.919067 */
+ -32526, /* A2 = -0.992615 */
+ 24324, /* B2 = 0.74231 */
+ -23535, /* B1 = -1.436523 */
+ 24324, /* B0 = 0.74231 */
+ 7, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f350_440[] */
+ 30634, /* A1 = 1.869751 */
+ -31533, /* A2 = -0.962341 */
+ -680, /* B2 = -0.020782 */
+ 0, /* B1 = 0 */
+ 680, /* B0 = 0.020782 */
+ 30571, /* A1 = 1.865906 */
+ -32277, /* A2 = -0.985016 */
+ 12894, /* B2 = 0.393524 */
+ -11945, /* B1 = -0.729065 */
+ 12894, /* B0 = 0.393524 */
+ 31367, /* A1 = 1.91449 */
+ -32379, /* A2 = -0.988129 */
+ 23820, /* B2 = 0.726929 */
+ -23104, /* B1 = -1.410217 */
+ 23820, /* B0 = 0.726929 */
+ 7, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f350_450[] */
+ 30552, /* A1 = 1.864807 */
+ -31434, /* A2 = -0.95929 */
+ -690, /* B2 = -0.021066 */
+ 0, /* B1 = 0 */
+ 690, /* B0 = 0.021066 */
+ 30472, /* A1 = 1.859924 */
+ -32248, /* A2 = -0.984161 */
+ 13385, /* B2 = 0.408478 */
+ -12357, /* B1 = -0.754242 */
+ 13385, /* B0 = 0.408478 */
+ 31358, /* A1 = 1.914001 */
+ -32366, /* A2 = -0.987732 */
+ 26488, /* B2 = 0.80835 */
+ -25692, /* B1 = -1.568176 */
+ 26490, /* B0 = 0.808411 */
+ 7, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f360 */
+ 31397, /* A1 = -1.916321 */
+ -32623, /* A2 = 0.995605 */
+ -117, /* B2 = -0.003598 */
+ 0, /* B1 = 0.000000 */
+ 117, /* B0 = 0.003598 */
+ 31403, /* A1 = -1.916687 */
+ -32700, /* A2 = 0.997925 */
+ 3388, /* B2 = 0.103401 */
+ -3240, /* B1 = -0.197784 */
+ 3388, /* B0 = 0.103401 */
+ 31463, /* A1 = -1.920410 */
+ -32702, /* A2 = 0.997986 */
+ 13346, /* B2 = 0.407288 */
+ -12863, /* B1 = -0.785126 */
+ 13346, /* B0 = 0.407288 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f380_420[] */
+ 30831, /* A1 = 1.881775 */
+ -32064, /* A2 = -0.978546 */
+ -367, /* B2 = -0.01122 */
+ 0, /* B1 = 0 */
+ 367, /* B0 = 0.01122 */
+ 30813, /* A1 = 1.880737 */
+ -32456, /* A2 = -0.990509 */
+ 11068, /* B2 = 0.337769 */
+ -10338, /* B1 = -0.631042 */
+ 11068, /* B0 = 0.337769 */
+ 31214, /* A1 = 1.905212 */
+ -32491, /* A2 = -0.991577 */
+ 16374, /* B2 = 0.499695 */
+ -15781, /* B1 = -0.963196 */
+ 16374, /* B0 = 0.499695 */
+ 7, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f392 */
+ 31152, /* A1 = -1.901428 */
+ -32613, /* A2 = 0.995300 */
+ -314, /* B2 = -0.009605 */
+ 0, /* B1 = 0.000000 */
+ 314, /* B0 = 0.009605 */
+ 31156, /* A1 = -1.901672 */
+ -32694, /* A2 = 0.997742 */
+ 28847, /* B2 = 0.880371 */
+ -2734, /* B1 = -0.166901 */
+ 28847, /* B0 = 0.880371 */
+ 31225, /* A1 = -1.905823 */
+ -32696, /* A2 = 0.997803 */
+ 462, /* B2 = 0.014108 */
+ -442, /* B1 = -0.027019 */
+ 462, /* B0 = 0.014108 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f400_425[] */
+ 30836, /* A1 = 1.882141 */
+ -32296, /* A2 = -0.985596 */
+ -324, /* B2 = -0.009903 */
+ 0, /* B1 = 0 */
+ 324, /* B0 = 0.009903 */
+ 30825, /* A1 = 1.881409 */
+ -32570, /* A2 = -0.993958 */
+ 16847, /* B2 = 0.51416 */
+ -15792, /* B1 = -0.963898 */
+ 16847, /* B0 = 0.51416 */
+ 31106, /* A1 = 1.89856 */
+ -32584, /* A2 = -0.994415 */
+ 9579, /* B2 = 0.292328 */
+ -9164, /* B1 = -0.559357 */
+ 9579, /* B0 = 0.292328 */
+ 7, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f400_440[] */
+ 30702, /* A1 = 1.873962 */
+ -32134, /* A2 = -0.980682 */
+ -517, /* B2 = -0.015793 */
+ 0, /* B1 = 0 */
+ 517, /* B0 = 0.015793 */
+ 30676, /* A1 = 1.872375 */
+ -32520, /* A2 = -0.992462 */
+ 8144, /* B2 = 0.24855 */
+ -7596, /* B1 = -0.463684 */
+ 8144, /* B0 = 0.24855 */
+ 31084, /* A1 = 1.897217 */
+ -32547, /* A2 = -0.993256 */
+ 22713, /* B2 = 0.693176 */
+ -21734, /* B1 = -1.326599 */
+ 22713, /* B0 = 0.693176 */
+ 7, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f400_450[] */
+ 30613, /* A1 = 1.86853 */
+ -32031, /* A2 = -0.977509 */
+ -618, /* B2 = -0.018866 */
+ 0, /* B1 = 0 */
+ 618, /* B0 = 0.018866 */
+ 30577, /* A1 = 1.866272 */
+ -32491, /* A2 = -0.991577 */
+ 9612, /* B2 = 0.293335 */
+ -8935, /* B1 = -0.54541 */
+ 9612, /* B0 = 0.293335 */
+ 31071, /* A1 = 1.896484 */
+ -32524, /* A2 = -0.992584 */
+ 21596, /* B2 = 0.659058 */
+ -20667, /* B1 = -1.261414 */
+ 21596, /* B0 = 0.659058 */
+ 7, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f420 */
+ 30914, /* A1 = -1.886841 */
+ -32584, /* A2 = 0.994385 */
+ -426, /* B2 = -0.013020 */
+ 0, /* B1 = 0.000000 */
+ 426, /* B0 = 0.013020 */
+ 30914, /* A1 = -1.886841 */
+ -32679, /* A2 = 0.997314 */
+ 17520, /* B2 = 0.534668 */
+ -16471, /* B1 = -1.005310 */
+ 17520, /* B0 = 0.534668 */
+ 31004, /* A1 = -1.892334 */
+ -32683, /* A2 = 0.997406 */
+ 819, /* B2 = 0.025023 */
+ -780, /* B1 = -0.047619 */
+ 819, /* B0 = 0.025023 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+#if 0
+ { /* f425 */
+ 30881, /* A1 = -1.884827 */
+ -32603, /* A2 = 0.994965 */
+ -496, /* B2 = -0.015144 */
+ 0, /* B1 = 0.000000 */
+ 496, /* B0 = 0.015144 */
+ 30880, /* A1 = -1.884766 */
+ -32692, /* A2 = 0.997711 */
+ 24767, /* B2 = 0.755859 */
+ -23290, /* B1 = -1.421509 */
+ 24767, /* B0 = 0.755859 */
+ 30967, /* A1 = -1.890076 */
+ -32694, /* A2 = 0.997772 */
+ 728, /* B2 = 0.022232 */
+ -691, /* B1 = -0.042194 */
+ 728, /* B0 = 0.022232 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+#else
+ {
+ 30850,
+ -32534,
+ -504,
+ 0,
+ 504,
+ 30831,
+ -32669,
+ 24303,
+ -22080,
+ 24303,
+ 30994,
+ -32673,
+ 1905,
+ -1811,
+ 1905,
+ 5,
+ 129,
+ 17,
+ 0xff5
+ },
+#endif
+ { /* f425_450[] */
+ 30646, /* A1 = 1.870544 */
+ -32327, /* A2 = -0.986572 */
+ -287, /* B2 = -0.008769 */
+ 0, /* B1 = 0 */
+ 287, /* B0 = 0.008769 */
+ 30627, /* A1 = 1.869324 */
+ -32607, /* A2 = -0.995087 */
+ 13269, /* B2 = 0.404968 */
+ -12376, /* B1 = -0.755432 */
+ 13269, /* B0 = 0.404968 */
+ 30924, /* A1 = 1.887512 */
+ -32619, /* A2 = -0.995453 */
+ 19950, /* B2 = 0.608826 */
+ -18940, /* B1 = -1.156006 */
+ 19950, /* B0 = 0.608826 */
+ 7, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f425_475[] */
+ 30396, /* A1 = 1.855225 */
+ -32014, /* A2 = -0.97699 */
+ -395, /* B2 = -0.012055 */
+ 0, /* B1 = 0 */
+ 395, /* B0 = 0.012055 */
+ 30343, /* A1 = 1.85199 */
+ -32482, /* A2 = -0.991302 */
+ 17823, /* B2 = 0.543945 */
+ -16431, /* B1 = -1.002869 */
+ 17823, /* B0 = 0.543945 */
+ 30872, /* A1 = 1.884338 */
+ -32516, /* A2 = -0.99231 */
+ 18124, /* B2 = 0.553101 */
+ -17246, /* B1 = -1.052673 */
+ 18124, /* B0 = 0.553101 */
+ 7, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f435 */
+ 30796, /* A1 = -1.879639 */
+ -32603, /* A2 = 0.994965 */
+ -254, /* B2 = -0.007762 */
+ 0, /* B1 = 0.000000 */
+ 254, /* B0 = 0.007762 */
+ 30793, /* A1 = -1.879456 */
+ -32692, /* A2 = 0.997711 */
+ 18934, /* B2 = 0.577820 */
+ -17751, /* B1 = -1.083496 */
+ 18934, /* B0 = 0.577820 */
+ 30882, /* A1 = -1.884888 */
+ -32694, /* A2 = 0.997772 */
+ 1858, /* B2 = 0.056713 */
+ -1758, /* B1 = -0.107357 */
+ 1858, /* B0 = 0.056713 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f440_450[] */
+ 30641, /* A1 = 1.870239 */
+ -32458, /* A2 = -0.99057 */
+ -155, /* B2 = -0.004735 */
+ 0, /* B1 = 0 */
+ 155, /* B0 = 0.004735 */
+ 30631, /* A1 = 1.869568 */
+ -32630, /* A2 = -0.995789 */
+ 11453, /* B2 = 0.349548 */
+ -10666, /* B1 = -0.651001 */
+ 11453, /* B0 = 0.349548 */
+ 30810, /* A1 = 1.880554 */
+ -32634, /* A2 = -0.995941 */
+ 12237, /* B2 = 0.373474 */
+ -11588, /* B1 = -0.707336 */
+ 12237, /* B0 = 0.373474 */
+ 7, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f440_480[] */
+ 30367, /* A1 = 1.853455 */
+ -32147, /* A2 = -0.981079 */
+ -495, /* B2 = -0.015113 */
+ 0, /* B1 = 0 */
+ 495, /* B0 = 0.015113 */
+ 30322, /* A1 = 1.850769 */
+ -32543, /* A2 = -0.993134 */
+ 10031, /* B2 = 0.306152 */
+ -9252, /* B1 = -0.564728 */
+ 10031, /* B0 = 0.306152 */
+ 30770, /* A1 = 1.878052 */
+ -32563, /* A2 = -0.993774 */
+ 22674, /* B2 = 0.691956 */
+ -21465, /* B1 = -1.31012 */
+ 22674, /* B0 = 0.691956 */
+ 7, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f445 */
+ 30709, /* A1 = -1.874329 */
+ -32603, /* A2 = 0.994965 */
+ -83, /* B2 = -0.002545 */
+ 0, /* B1 = 0.000000 */
+ 83, /* B0 = 0.002545 */
+ 30704, /* A1 = -1.874084 */
+ -32692, /* A2 = 0.997711 */
+ 10641, /* B2 = 0.324738 */
+ -9947, /* B1 = -0.607147 */
+ 10641, /* B0 = 0.324738 */
+ 30796, /* A1 = -1.879639 */
+ -32694, /* A2 = 0.997772 */
+ 10079, /* B2 = 0.307587 */
+ 9513, /* B1 = 0.580688 */
+ 10079, /* B0 = 0.307587 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f450 */
+ 30664, /* A1 = -1.871643 */
+ -32603, /* A2 = 0.994965 */
+ -164, /* B2 = -0.005029 */
+ 0, /* B1 = 0.000000 */
+ 164, /* B0 = 0.005029 */
+ 30661, /* A1 = -1.871399 */
+ -32692, /* A2 = 0.997711 */
+ 15294, /* B2 = 0.466736 */
+ -14275, /* B1 = -0.871307 */
+ 15294, /* B0 = 0.466736 */
+ 30751, /* A1 = -1.876953 */
+ -32694, /* A2 = 0.997772 */
+ 3548, /* B2 = 0.108284 */
+ -3344, /* B1 = -0.204155 */
+ 3548, /* B0 = 0.108284 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f452 */
+ 30653, /* A1 = -1.870911 */
+ -32615, /* A2 = 0.995361 */
+ -209, /* B2 = -0.006382 */
+ 0, /* B1 = 0.000000 */
+ 209, /* B0 = 0.006382 */
+ 30647, /* A1 = -1.870605 */
+ -32702, /* A2 = 0.997986 */
+ 18971, /* B2 = 0.578979 */
+ -17716, /* B1 = -1.081299 */
+ 18971, /* B0 = 0.578979 */
+ 30738, /* A1 = -1.876099 */
+ -32702, /* A2 = 0.998016 */
+ 2967, /* B2 = 0.090561 */
+ -2793, /* B1 = -0.170502 */
+ 2967, /* B0 = 0.090561 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f475 */
+ 30437, /* A1 = -1.857727 */
+ -32603, /* A2 = 0.994965 */
+ -264, /* B2 = -0.008062 */
+ 0, /* B1 = 0.000000 */
+ 264, /* B0 = 0.008062 */
+ 30430, /* A1 = -1.857300 */
+ -32692, /* A2 = 0.997711 */
+ 21681, /* B2 = 0.661682 */
+ -20082, /* B1 = -1.225708 */
+ 21681, /* B0 = 0.661682 */
+ 30526, /* A1 = -1.863220 */
+ -32694, /* A2 = 0.997742 */
+ 1559, /* B2 = 0.047600 */
+ -1459, /* B1 = -0.089096 */
+ 1559, /* B0 = 0.047600 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f480_620[] */
+ 28975, /* A1 = 1.768494 */
+ -30955, /* A2 = -0.944672 */
+ -1026, /* B2 = -0.03133 */
+ 0, /* B1 = 0 */
+ 1026, /* B0 = 0.03133 */
+ 28613, /* A1 = 1.746399 */
+ -32089, /* A2 = -0.979309 */
+ 14214, /* B2 = 0.433807 */
+ -12202, /* B1 = -0.744812 */
+ 14214, /* B0 = 0.433807 */
+ 30243, /* A1 = 1.845947 */
+ -32238, /* A2 = -0.983856 */
+ 24825, /* B2 = 0.757629 */
+ -23402, /* B1 = -1.428345 */
+ 24825, /* B0 = 0.757629 */
+ 7, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f494 */
+ 30257, /* A1 = -1.846741 */
+ -32605, /* A2 = 0.995056 */
+ -249, /* B2 = -0.007625 */
+ 0, /* B1 = 0.000000 */
+ 249, /* B0 = 0.007625 */
+ 30247, /* A1 = -1.846191 */
+ -32694, /* A2 = 0.997772 */
+ 18088, /* B2 = 0.552002 */
+ -16652, /* B1 = -1.016418 */
+ 18088, /* B0 = 0.552002 */
+ 30348, /* A1 = -1.852295 */
+ -32696, /* A2 = 0.997803 */
+ 2099, /* B2 = 0.064064 */
+ -1953, /* B1 = -0.119202 */
+ 2099, /* B0 = 0.064064 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f500 */
+ 30202, /* A1 = -1.843431 */
+ -32624, /* A2 = 0.995622 */
+ -413, /* B2 = -0.012622 */
+ 0, /* B1 = 0.000000 */
+ 413, /* B0 = 0.012622 */
+ 30191, /* A1 = -1.842721 */
+ -32714, /* A2 = 0.998364 */
+ 25954, /* B2 = 0.792057 */
+ -23890, /* B1 = -1.458131 */
+ 25954, /* B0 = 0.792057 */
+ 30296, /* A1 = -1.849172 */
+ -32715, /* A2 = 0.998397 */
+ 2007, /* B2 = 0.061264 */
+ -1860, /* B1 = -0.113568 */
+ 2007, /* B0 = 0.061264 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f520 */
+ 30001, /* A1 = -1.831116 */
+ -32613, /* A2 = 0.995270 */
+ -155, /* B2 = -0.004750 */
+ 0, /* B1 = 0.000000 */
+ 155, /* B0 = 0.004750 */
+ 29985, /* A1 = -1.830200 */
+ -32710, /* A2 = 0.998260 */
+ 6584, /* B2 = 0.200928 */
+ -6018, /* B1 = -0.367355 */
+ 6584, /* B0 = 0.200928 */
+ 30105, /* A1 = -1.837524 */
+ -32712, /* A2 = 0.998291 */
+ 23812, /* B2 = 0.726685 */
+ -21936, /* B1 = -1.338928 */
+ 23812, /* B0 = 0.726685 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f523 */
+ 29964, /* A1 = -1.828918 */
+ -32601, /* A2 = 0.994904 */
+ -101, /* B2 = -0.003110 */
+ 0, /* B1 = 0.000000 */
+ 101, /* B0 = 0.003110 */
+ 29949, /* A1 = -1.827942 */
+ -32700, /* A2 = 0.997925 */
+ 11041, /* B2 = 0.336975 */
+ -10075, /* B1 = -0.614960 */
+ 11041, /* B0 = 0.336975 */
+ 30070, /* A1 = -1.835388 */
+ -32702, /* A2 = 0.997986 */
+ 16762, /* B2 = 0.511536 */
+ -15437, /* B1 = -0.942230 */
+ 16762, /* B0 = 0.511536 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f525 */
+ 29936, /* A1 = -1.827209 */
+ -32584, /* A2 = 0.994415 */
+ -91, /* B2 = -0.002806 */
+ 0, /* B1 = 0.000000 */
+ 91, /* B0 = 0.002806 */
+ 29921, /* A1 = -1.826233 */
+ -32688, /* A2 = 0.997559 */
+ 11449, /* B2 = 0.349396 */
+ -10426, /* B1 = -0.636383 */
+ 11449, /* B0 = 0.349396 */
+ 30045, /* A1 = -1.833862 */
+ -32688, /* A2 = 0.997589 */
+ 13055, /* B2 = 0.398407 */
+ -12028, /* B1 = -0.734161 */
+ 13055, /* B0 = 0.398407 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f540_660[] */
+ 28499, /* A1 = 1.739441 */
+ -31129, /* A2 = -0.949982 */
+ -849, /* B2 = -0.025922 */
+ 0, /* B1 = 0 */
+ 849, /* B0 = 0.025922 */
+ 28128, /* A1 = 1.716797 */
+ -32130, /* A2 = -0.98056 */
+ 14556, /* B2 = 0.444214 */
+ -12251, /* B1 = -0.747772 */
+ 14556, /* B0 = 0.444244 */
+ 29667, /* A1 = 1.81073 */
+ -32244, /* A2 = -0.984039 */
+ 23038, /* B2 = 0.703064 */
+ -21358, /* B1 = -1.303589 */
+ 23040, /* B0 = 0.703125 */
+ 7, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f587 */
+ 29271, /* A1 = -1.786560 */
+ -32599, /* A2 = 0.994873 */
+ -490, /* B2 = -0.014957 */
+ 0, /* B1 = 0.000000 */
+ 490, /* B0 = 0.014957 */
+ 29246, /* A1 = -1.785095 */
+ -32700, /* A2 = 0.997925 */
+ 28961, /* B2 = 0.883850 */
+ -25796, /* B1 = -1.574463 */
+ 28961, /* B0 = 0.883850 */
+ 29383, /* A1 = -1.793396 */
+ -32700, /* A2 = 0.997955 */
+ 1299, /* B2 = 0.039650 */
+ -1169, /* B1 = -0.071396 */
+ 1299, /* B0 = 0.039650 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f590 */
+ 29230, /* A1 = -1.784058 */
+ -32584, /* A2 = 0.994415 */
+ -418, /* B2 = -0.012757 */
+ 0, /* B1 = 0.000000 */
+ 418, /* B0 = 0.012757 */
+ 29206, /* A1 = -1.782593 */
+ -32688, /* A2 = 0.997559 */
+ 36556, /* B2 = 1.115601 */
+ -32478, /* B1 = -1.982300 */
+ 36556, /* B0 = 1.115601 */
+ 29345, /* A1 = -1.791077 */
+ -32688, /* A2 = 0.997589 */
+ 897, /* B2 = 0.027397 */
+ -808, /* B1 = -0.049334 */
+ 897, /* B0 = 0.027397 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f600 */
+ 29116, /* A1 = -1.777100 */
+ -32603, /* A2 = 0.994965 */
+ -165, /* B2 = -0.005039 */
+ 0, /* B1 = 0.000000 */
+ 165, /* B0 = 0.005039 */
+ 29089, /* A1 = -1.775452 */
+ -32708, /* A2 = 0.998199 */
+ 6963, /* B2 = 0.212494 */
+ -6172, /* B1 = -0.376770 */
+ 6963, /* B0 = 0.212494 */
+ 29237, /* A1 = -1.784485 */
+ -32710, /* A2 = 0.998230 */
+ 24197, /* B2 = 0.738464 */
+ -21657, /* B1 = -1.321899 */
+ 24197, /* B0 = 0.738464 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f660 */
+ 28376, /* A1 = -1.731934 */
+ -32567, /* A2 = 0.993896 */
+ -363, /* B2 = -0.011102 */
+ 0, /* B1 = 0.000000 */
+ 363, /* B0 = 0.011102 */
+ 28337, /* A1 = -1.729614 */
+ -32683, /* A2 = 0.997434 */
+ 21766, /* B2 = 0.664246 */
+ -18761, /* B1 = -1.145081 */
+ 21766, /* B0 = 0.664246 */
+ 28513, /* A1 = -1.740356 */
+ -32686, /* A2 = 0.997498 */
+ 2509, /* B2 = 0.076584 */
+ -2196, /* B1 = -0.134041 */
+ 2509, /* B0 = 0.076584 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f700 */
+ 27844, /* A1 = -1.699463 */
+ -32563, /* A2 = 0.993744 */
+ -366, /* B2 = -0.011187 */
+ 0, /* B1 = 0.000000 */
+ 366, /* B0 = 0.011187 */
+ 27797, /* A1 = -1.696655 */
+ -32686, /* A2 = 0.997498 */
+ 22748, /* B2 = 0.694214 */
+ -19235, /* B1 = -1.174072 */
+ 22748, /* B0 = 0.694214 */
+ 27995, /* A1 = -1.708740 */
+ -32688, /* A2 = 0.997559 */
+ 2964, /* B2 = 0.090477 */
+ -2546, /* B1 = -0.155449 */
+ 2964, /* B0 = 0.090477 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f740 */
+ 27297, /* A1 = -1.666077 */
+ -32551, /* A2 = 0.993408 */
+ -345, /* B2 = -0.010540 */
+ 0, /* B1 = 0.000000 */
+ 345, /* B0 = 0.010540 */
+ 27240, /* A1 = -1.662598 */
+ -32683, /* A2 = 0.997406 */
+ 22560, /* B2 = 0.688477 */
+ -18688, /* B1 = -1.140625 */
+ 22560, /* B0 = 0.688477 */
+ 27461, /* A1 = -1.676147 */
+ -32684, /* A2 = 0.997467 */
+ 3541, /* B2 = 0.108086 */
+ -2985, /* B1 = -0.182220 */
+ 3541, /* B0 = 0.108086 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f750 */
+ 27155, /* A1 = -1.657410 */
+ -32551, /* A2 = 0.993408 */
+ -462, /* B2 = -0.014117 */
+ 0, /* B1 = 0.000000 */
+ 462, /* B0 = 0.014117 */
+ 27097, /* A1 = -1.653870 */
+ -32683, /* A2 = 0.997406 */
+ 32495, /* B2 = 0.991699 */
+ -26776, /* B1 = -1.634338 */
+ 32495, /* B0 = 0.991699 */
+ 27321, /* A1 = -1.667542 */
+ -32684, /* A2 = 0.997467 */
+ 1835, /* B2 = 0.056007 */
+ -1539, /* B1 = -0.093948 */
+ 1835, /* B0 = 0.056007 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f750_1450[] */
+ 19298, /* A1 = 1.177917 */
+ -24471, /* A2 = -0.746796 */
+ -4152, /* B2 = -0.126709 */
+ 0, /* B1 = 0 */
+ 4152, /* B0 = 0.126709 */
+ 12902, /* A1 = 0.787476 */
+ -29091, /* A2 = -0.887817 */
+ 12491, /* B2 = 0.38121 */
+ -1794, /* B1 = -0.109528 */
+ 12494, /* B0 = 0.381317 */
+ 26291, /* A1 = 1.604736 */
+ -30470, /* A2 = -0.929901 */
+ 28859, /* B2 = 0.880737 */
+ -26084, /* B1 = -1.592102 */
+ 28861, /* B0 = 0.880798 */
+ 7, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f770 */
+ 26867, /* A1 = -1.639832 */
+ -32551, /* A2 = 0.993408 */
+ -123, /* B2 = -0.003755 */
+ 0, /* B1 = 0.000000 */
+ 123, /* B0 = 0.003755 */
+ 26805, /* A1 = -1.636108 */
+ -32683, /* A2 = 0.997406 */
+ 17297, /* B2 = 0.527863 */
+ -14096, /* B1 = -0.860382 */
+ 17297, /* B0 = 0.527863 */
+ 27034, /* A1 = -1.650085 */
+ -32684, /* A2 = 0.997467 */
+ 12958, /* B2 = 0.395477 */
+ -10756, /* B1 = -0.656525 */
+ 12958, /* B0 = 0.395477 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f800 */
+ 26413, /* A1 = -1.612122 */
+ -32547, /* A2 = 0.993286 */
+ -223, /* B2 = -0.006825 */
+ 0, /* B1 = 0.000000 */
+ 223, /* B0 = 0.006825 */
+ 26342, /* A1 = -1.607849 */
+ -32686, /* A2 = 0.997498 */
+ 6391, /* B2 = 0.195053 */
+ -5120, /* B1 = -0.312531 */
+ 6391, /* B0 = 0.195053 */
+ 26593, /* A1 = -1.623108 */
+ -32688, /* A2 = 0.997559 */
+ 23681, /* B2 = 0.722717 */
+ -19328, /* B1 = -1.179688 */
+ 23681, /* B0 = 0.722717 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f816 */
+ 26168, /* A1 = -1.597209 */
+ -32528, /* A2 = 0.992706 */
+ -235, /* B2 = -0.007182 */
+ 0, /* B1 = 0.000000 */
+ 235, /* B0 = 0.007182 */
+ 26092, /* A1 = -1.592590 */
+ -32675, /* A2 = 0.997192 */
+ 20823, /* B2 = 0.635498 */
+ -16510, /* B1 = -1.007751 */
+ 20823, /* B0 = 0.635498 */
+ 26363, /* A1 = -1.609070 */
+ -32677, /* A2 = 0.997253 */
+ 6739, /* B2 = 0.205688 */
+ -5459, /* B1 = -0.333206 */
+ 6739, /* B0 = 0.205688 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f850 */
+ 25641, /* A1 = -1.565063 */
+ -32536, /* A2 = 0.992950 */
+ -121, /* B2 = -0.003707 */
+ 0, /* B1 = 0.000000 */
+ 121, /* B0 = 0.003707 */
+ 25560, /* A1 = -1.560059 */
+ -32684, /* A2 = 0.997437 */
+ 18341, /* B2 = 0.559753 */
+ -14252, /* B1 = -0.869904 */
+ 18341, /* B0 = 0.559753 */
+ 25837, /* A1 = -1.577026 */
+ -32684, /* A2 = 0.997467 */
+ 16679, /* B2 = 0.509003 */
+ -13232, /* B1 = -0.807648 */
+ 16679, /* B0 = 0.509003 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f857_1645[] */
+ 16415, /* A1 = 1.001953 */
+ -23669, /* A2 = -0.722321 */
+ -4549, /* B2 = -0.138847 */
+ 0, /* B1 = 0 */
+ 4549, /* B0 = 0.138847 */
+ 8456, /* A1 = 0.516174 */
+ -28996, /* A2 = -0.884918 */
+ 13753, /* B2 = 0.419724 */
+ -12, /* B1 = -0.000763 */
+ 13757, /* B0 = 0.419846 */
+ 24632, /* A1 = 1.503418 */
+ -30271, /* A2 = -0.923828 */
+ 29070, /* B2 = 0.887146 */
+ -25265, /* B1 = -1.542114 */
+ 29073, /* B0 = 0.887268 */
+ 7, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f900 */
+ 24806, /* A1 = -1.514099 */
+ -32501, /* A2 = 0.991852 */
+ -326, /* B2 = -0.009969 */
+ 0, /* B1 = 0.000000 */
+ 326, /* B0 = 0.009969 */
+ 24709, /* A1 = -1.508118 */
+ -32659, /* A2 = 0.996674 */
+ 20277, /* B2 = 0.618835 */
+ -15182, /* B1 = -0.926636 */
+ 20277, /* B0 = 0.618835 */
+ 25022, /* A1 = -1.527222 */
+ -32661, /* A2 = 0.996735 */
+ 4320, /* B2 = 0.131836 */
+ -3331, /* B1 = -0.203339 */
+ 4320, /* B0 = 0.131836 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f900_1300[] */
+ 19776, /* A1 = 1.207092 */
+ -27437, /* A2 = -0.837341 */
+ -2666, /* B2 = -0.081371 */
+ 0, /* B1 = 0 */
+ 2666, /* B0 = 0.081371 */
+ 16302, /* A1 = 0.995026 */
+ -30354, /* A2 = -0.926361 */
+ 10389, /* B2 = 0.317062 */
+ -3327, /* B1 = -0.203064 */
+ 10389, /* B0 = 0.317062 */
+ 24299, /* A1 = 1.483154 */
+ -30930, /* A2 = -0.943909 */
+ 25016, /* B2 = 0.763428 */
+ -21171, /* B1 = -1.292236 */
+ 25016, /* B0 = 0.763428 */
+ 7, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f935_1215[] */
+ 20554, /* A1 = 1.254517 */
+ -28764, /* A2 = -0.877838 */
+ -2048, /* B2 = -0.062515 */
+ 0, /* B1 = 0 */
+ 2048, /* B0 = 0.062515 */
+ 18209, /* A1 = 1.11145 */
+ -30951, /* A2 = -0.94458 */
+ 9390, /* B2 = 0.286575 */
+ -3955, /* B1 = -0.241455 */
+ 9390, /* B0 = 0.286575 */
+ 23902, /* A1 = 1.458923 */
+ -31286, /* A2 = -0.954803 */
+ 23252, /* B2 = 0.709595 */
+ -19132, /* B1 = -1.167725 */
+ 23252, /* B0 = 0.709595 */
+ 7, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f941_1477[] */
+ 17543, /* A1 = 1.07074 */
+ -26220, /* A2 = -0.800201 */
+ -3298, /* B2 = -0.100647 */
+ 0, /* B1 = 0 */
+ 3298, /* B0 = 0.100647 */
+ 12423, /* A1 = 0.75827 */
+ -30036, /* A2 = -0.916626 */
+ 12651, /* B2 = 0.386078 */
+ -2444, /* B1 = -0.14917 */
+ 12653, /* B0 = 0.386154 */
+ 23518, /* A1 = 1.435425 */
+ -30745, /* A2 = -0.938293 */
+ 27282, /* B2 = 0.832581 */
+ -22529, /* B1 = -1.375122 */
+ 27286, /* B0 = 0.832703 */
+ 7, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f942 */
+ 24104, /* A1 = -1.471252 */
+ -32507, /* A2 = 0.992065 */
+ -351, /* B2 = -0.010722 */
+ 0, /* B1 = 0.000000 */
+ 351, /* B0 = 0.010722 */
+ 23996, /* A1 = -1.464600 */
+ -32671, /* A2 = 0.997040 */
+ 22848, /* B2 = 0.697266 */
+ -16639, /* B1 = -1.015564 */
+ 22848, /* B0 = 0.697266 */
+ 24332, /* A1 = -1.485168 */
+ -32673, /* A2 = 0.997101 */
+ 4906, /* B2 = 0.149727 */
+ -3672, /* B1 = -0.224174 */
+ 4906, /* B0 = 0.149727 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f950 */
+ 23967, /* A1 = -1.462830 */
+ -32507, /* A2 = 0.992065 */
+ -518, /* B2 = -0.015821 */
+ 0, /* B1 = 0.000000 */
+ 518, /* B0 = 0.015821 */
+ 23856, /* A1 = -1.456055 */
+ -32671, /* A2 = 0.997040 */
+ 26287, /* B2 = 0.802246 */
+ -19031, /* B1 = -1.161560 */
+ 26287, /* B0 = 0.802246 */
+ 24195, /* A1 = -1.476746 */
+ -32673, /* A2 = 0.997101 */
+ 2890, /* B2 = 0.088196 */
+ -2151, /* B1 = -0.131317 */
+ 2890, /* B0 = 0.088196 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f950_1400[] */
+ 18294, /* A1 = 1.116638 */
+ -26962, /* A2 = -0.822845 */
+ -2914, /* B2 = -0.088936 */
+ 0, /* B1 = 0 */
+ 2914, /* B0 = 0.088936 */
+ 14119, /* A1 = 0.861786 */
+ -30227, /* A2 = -0.922455 */
+ 11466, /* B2 = 0.349945 */
+ -2833, /* B1 = -0.172943 */
+ 11466, /* B0 = 0.349945 */
+ 23431, /* A1 = 1.430115 */
+ -30828, /* A2 = -0.940796 */
+ 25331, /* B2 = 0.773071 */
+ -20911, /* B1 = -1.276367 */
+ 25331, /* B0 = 0.773071 */
+ 7, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f975 */
+ 23521, /* A1 = -1.435608 */
+ -32489, /* A2 = 0.991516 */
+ -193, /* B2 = -0.005915 */
+ 0, /* B1 = 0.000000 */
+ 193, /* B0 = 0.005915 */
+ 23404, /* A1 = -1.428467 */
+ -32655, /* A2 = 0.996582 */
+ 17740, /* B2 = 0.541412 */
+ -12567, /* B1 = -0.767029 */
+ 17740, /* B0 = 0.541412 */
+ 23753, /* A1 = -1.449829 */
+ -32657, /* A2 = 0.996613 */
+ 9090, /* B2 = 0.277405 */
+ -6662, /* B1 = -0.406647 */
+ 9090, /* B0 = 0.277405 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f1000 */
+ 23071, /* A1 = -1.408203 */
+ -32489, /* A2 = 0.991516 */
+ -293, /* B2 = -0.008965 */
+ 0, /* B1 = 0.000000 */
+ 293, /* B0 = 0.008965 */
+ 22951, /* A1 = -1.400818 */
+ -32655, /* A2 = 0.996582 */
+ 5689, /* B2 = 0.173645 */
+ -3951, /* B1 = -0.241150 */
+ 5689, /* B0 = 0.173645 */
+ 23307, /* A1 = -1.422607 */
+ -32657, /* A2 = 0.996613 */
+ 18692, /* B2 = 0.570435 */
+ -13447, /* B1 = -0.820770 */
+ 18692, /* B0 = 0.570435 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f1020 */
+ 22701, /* A1 = -1.385620 */
+ -32474, /* A2 = 0.991058 */
+ -292, /* B2 = -0.008933 */
+ 0, /*163840 , B1 = 10.000000 */
+ 292, /* B0 = 0.008933 */
+ 22564, /* A1 = -1.377258 */
+ -32655, /* A2 = 0.996552 */
+ 20756, /* B2 = 0.633423 */
+ -14176, /* B1 = -0.865295 */
+ 20756, /* B0 = 0.633423 */
+ 22960, /* A1 = -1.401428 */
+ -32657, /* A2 = 0.996613 */
+ 6520, /* B2 = 0.198990 */
+ -4619, /* B1 = -0.281937 */
+ 6520, /* B0 = 0.198990 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f1050 */
+ 22142, /* A1 = -1.351501 */
+ -32474, /* A2 = 0.991058 */
+ -147, /* B2 = -0.004493 */
+ 0, /* B1 = 0.000000 */
+ 147, /* B0 = 0.004493 */
+ 22000, /* A1 = -1.342834 */
+ -32655, /* A2 = 0.996552 */
+ 15379, /* B2 = 0.469360 */
+ -10237, /* B1 = -0.624847 */
+ 15379, /* B0 = 0.469360 */
+ 22406, /* A1 = -1.367554 */
+ -32657, /* A2 = 0.996613 */
+ 17491, /* B2 = 0.533783 */
+ -12096, /* B1 = -0.738312 */
+ 17491, /* B0 = 0.533783 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f1100_1750[] */
+ 12973, /* A1 = 0.79184 */
+ -24916, /* A2 = -0.760376 */
+ 6655, /* B2 = 0.203102 */
+ 367, /* B1 = 0.0224 */
+ 6657, /* B0 = 0.203171 */
+ 5915, /* A1 = 0.361053 */
+ -29560, /* A2 = -0.90213 */
+ -7777, /* B2 = -0.23735 */
+ 0, /* B1 = 0 */
+ 7777, /* B0 = 0.23735 */
+ 20510, /* A1 = 1.251892 */
+ -30260, /* A2 = -0.923462 */
+ 26662, /* B2 = 0.81366 */
+ -20573, /* B1 = -1.255737 */
+ 26668, /* B0 = 0.813843 */
+ 7, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f1140 */
+ 20392, /* A1 = -1.244629 */
+ -32460, /* A2 = 0.990601 */
+ -270, /* B2 = -0.008240 */
+ 0, /* B1 = 0.000000 */
+ 270, /* B0 = 0.008240 */
+ 20218, /* A1 = -1.234009 */
+ -32655, /* A2 = 0.996582 */
+ 21337, /* B2 = 0.651154 */
+ -13044, /* B1 = -0.796143 */
+ 21337, /* B0 = 0.651154 */
+ 20684, /* A1 = -1.262512 */
+ -32657, /* A2 = 0.996643 */
+ 8572, /* B2 = 0.261612 */
+ -5476, /* B1 = -0.334244 */
+ 8572, /* B0 = 0.261612 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f1200 */
+ 19159, /* A1 = -1.169373 */
+ -32456, /* A2 = 0.990509 */
+ -335, /* B2 = -0.010252 */
+ 0, /* B1 = 0.000000 */
+ 335, /* B0 = 0.010252 */
+ 18966, /* A1 = -1.157593 */
+ -32661, /* A2 = 0.996735 */
+ 6802, /* B2 = 0.207588 */
+ -3900, /* B1 = -0.238098 */
+ 6802, /* B0 = 0.207588 */
+ 19467, /* A1 = -1.188232 */
+ -32661, /* A2 = 0.996765 */
+ 25035, /* B2 = 0.764008 */
+ -15049, /* B1 = -0.918579 */
+ 25035, /* B0 = 0.764008 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f1209 */
+ 18976, /* A1 = -1.158264 */
+ -32439, /* A2 = 0.989990 */
+ -183, /* B2 = -0.005588 */
+ 0, /* B1 = 0.000000 */
+ 183, /* B0 = 0.005588 */
+ 18774, /* A1 = -1.145874 */
+ -32650, /* A2 = 0.996429 */
+ 15468, /* B2 = 0.472076 */
+ -8768, /* B1 = -0.535217 */
+ 15468, /* B0 = 0.472076 */
+ 19300, /* A1 = -1.177979 */
+ -32652, /* A2 = 0.996490 */
+ 19840, /* B2 = 0.605499 */
+ -11842, /* B1 = -0.722809 */
+ 19840, /* B0 = 0.605499 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f1330 */
+ 16357, /* A1 = -0.998413 */
+ -32368, /* A2 = 0.987793 */
+ -217, /* B2 = -0.006652 */
+ 0, /* B1 = 0.000000 */
+ 217, /* B0 = 0.006652 */
+ 16107, /* A1 = -0.983126 */
+ -32601, /* A2 = 0.994904 */
+ 11602, /* B2 = 0.354065 */
+ -5555, /* B1 = -0.339111 */
+ 11602, /* B0 = 0.354065 */
+ 16722, /* A1 = -1.020630 */
+ -32603, /* A2 = 0.994965 */
+ 15574, /* B2 = 0.475311 */
+ -8176, /* B1 = -0.499069 */
+ 15574, /* B0 = 0.475311 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f1336 */
+ 16234, /* A1 = -0.990875 */
+ 32404, /* A2 = -0.988922 */
+ -193, /* B2 = -0.005908 */
+ 0, /* B1 = 0.000000 */
+ 193, /* B0 = 0.005908 */
+ 15986, /* A1 = -0.975769 */
+ -32632, /* A2 = 0.995880 */
+ 18051, /* B2 = 0.550903 */
+ -8658, /* B1 = -0.528473 */
+ 18051, /* B0 = 0.550903 */
+ 16591, /* A1 = -1.012695 */
+ -32634, /* A2 = 0.995941 */
+ 15736, /* B2 = 0.480240 */
+ -8125, /* B1 = -0.495926 */
+ 15736, /* B0 = 0.480240 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f1366 */
+ 15564, /* A1 = -0.949982 */
+ -32404, /* A2 = 0.988922 */
+ -269, /* B2 = -0.008216 */
+ 0, /* B1 = 0.000000 */
+ 269, /* B0 = 0.008216 */
+ 15310, /* A1 = -0.934479 */
+ -32632, /* A2 = 0.995880 */
+ 10815, /* B2 = 0.330063 */
+ -4962, /* B1 = -0.302887 */
+ 10815, /* B0 = 0.330063 */
+ 15924, /* A1 = -0.971924 */
+ -32634, /* A2 = 0.995941 */
+ 18880, /* B2 = 0.576172 */
+ -9364, /* B1 = -0.571594 */
+ 18880, /* B0 = 0.576172 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f1380 */
+ 15247, /* A1 = -0.930603 */
+ -32397, /* A2 = 0.988708 */
+ -244, /* B2 = -0.007451 */
+ 0, /* B1 = 0.000000 */
+ 244, /* B0 = 0.007451 */
+ 14989, /* A1 = -0.914886 */
+ -32627, /* A2 = 0.995697 */
+ 18961, /* B2 = 0.578644 */
+ -8498, /* B1 = -0.518707 */
+ 18961, /* B0 = 0.578644 */
+ 15608, /* A1 = -0.952667 */
+ -32628, /* A2 = 0.995758 */
+ 11145, /* B2 = 0.340134 */
+ -5430, /* B1 = -0.331467 */
+ 11145, /* B0 = 0.340134 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f1400 */
+ 14780, /* A1 = -0.902130 */
+ -32393, /* A2 = 0.988586 */
+ -396, /* B2 = -0.012086 */
+ 0, /* B1 = 0.000000 */
+ 396, /* B0 = 0.012086 */
+ 14510, /* A1 = -0.885651 */
+ -32630, /* A2 = 0.995819 */
+ 6326, /* B2 = 0.193069 */
+ -2747, /* B1 = -0.167671 */
+ 6326, /* B0 = 0.193069 */
+ 15154, /* A1 = -0.924957 */
+ -32632, /* A2 = 0.995850 */
+ 23235, /* B2 = 0.709076 */
+ -10983, /* B1 = -0.670380 */
+ 23235, /* B0 = 0.709076 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f1477 */
+ 13005, /* A1 = -0.793793 */
+ -32368, /* A2 = 0.987823 */
+ -500, /* B2 = -0.015265 */
+ 0, /* B1 = 0.000000 */
+ 500, /* B0 = 0.015265 */
+ 12708, /* A1 = -0.775665 */
+ -32615, /* A2 = 0.995331 */
+ 11420, /* B2 = 0.348526 */
+ -4306, /* B1 = -0.262833 */
+ 11420, /* B0 = 0.348526 */
+ 13397, /* A1 = -0.817688 */
+ -32615, /* A2 = 0.995361 */
+ 9454, /* B2 = 0.288528 */
+ -3981, /* B1 = -0.243027 */
+ 9454, /* B0 = 0.288528 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f1600 */
+ 10046, /* A1 = -0.613190 */
+ -32331, /* A2 = 0.986694 */
+ -455, /* B2 = -0.013915 */
+ 0, /* B1 = 0.000000 */
+ 455, /* B0 = 0.013915 */
+ 9694, /* A1 = -0.591705 */
+ -32601, /* A2 = 0.994934 */
+ 6023, /* B2 = 0.183815 */
+ -1708, /* B1 = -0.104279 */
+ 6023, /* B0 = 0.183815 */
+ 10478, /* A1 = -0.639587 */
+ -32603, /* A2 = 0.994965 */
+ 22031, /* B2 = 0.672333 */
+ -7342, /* B1 = -0.448151 */
+ 22031, /* B0 = 0.672333 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f1633_1638[] */
+ 9181, /* A1 = 0.560394 */
+ -32256, /* A2 = -0.984375 */
+ -556, /* B2 = -0.016975 */
+ 0, /* B1 = 0 */
+ 556, /* B0 = 0.016975 */
+ 8757, /* A1 = 0.534515 */
+ -32574, /* A2 = -0.99408 */
+ 8443, /* B2 = 0.25769 */
+ -2135, /* B1 = -0.130341 */
+ 8443, /* B0 = 0.25769 */
+ 9691, /* A1 = 0.591522 */
+ -32574, /* A2 = -0.99411 */
+ 15446, /* B2 = 0.471375 */
+ -4809, /* B1 = -0.293579 */
+ 15446, /* B0 = 0.471375 */
+ 7, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f1800 */
+ 5076, /* A1 = -0.309875 */
+ -32304, /* A2 = 0.985840 */
+ -508, /* B2 = -0.015503 */
+ 0, /* B1 = 0.000000 */
+ 508, /* B0 = 0.015503 */
+ 4646, /* A1 = -0.283600 */
+ -32605, /* A2 = 0.995026 */
+ 6742, /* B2 = 0.205780 */
+ -878, /* B1 = -0.053635 */
+ 6742, /* B0 = 0.205780 */
+ 5552, /* A1 = -0.338928 */
+ -32605, /* A2 = 0.995056 */
+ 23667, /* B2 = 0.722260 */
+ -4297, /* B1 = -0.262329 */
+ 23667, /* B0 = 0.722260 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f1860 */
+ 3569, /* A1 = -0.217865 */
+ -32292, /* A2 = 0.985504 */
+ -239, /* B2 = -0.007322 */
+ 0, /* B1 = 0.000000 */
+ 239, /* B0 = 0.007322 */
+ 3117, /* A1 = -0.190277 */
+ -32603, /* A2 = 0.994965 */
+ 18658, /* B2 = 0.569427 */
+ -1557, /* B1 = -0.095032 */
+ 18658, /* B0 = 0.569427 */
+ 4054, /* A1 = -0.247437 */
+ -32603, /* A2 = 0.994965 */
+ 18886, /* B2 = 0.576385 */
+ -2566, /* B1 = -0.156647 */
+ 18886, /* B0 = 0.576385 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+};
+static int ixj_init_filter(IXJ *j, IXJ_FILTER * jf)
+{
+ unsigned short cmd;
+ int cnt, max;
+
+ if (jf->filter > 3) {
+ return -1;
+ }
+ if (ixj_WriteDSPCommand(0x5154 + jf->filter, j)) /* Select Filter */
+
+ return -1;
+ if (!jf->enable) {
+ if (ixj_WriteDSPCommand(0x5152, j)) /* Disable Filter */
+
+ return -1;
+ else
+ return 0;
+ } else {
+ if (ixj_WriteDSPCommand(0x5153, j)) /* Enable Filter */
+
+ return -1;
+ /* Select the filter (f0 - f3) to use. */
+ if (ixj_WriteDSPCommand(0x5154 + jf->filter, j))
+ return -1;
+ }
+ if (jf->freq < 12 && jf->freq > 3) {
+ /* Select the frequency for the selected filter. */
+ if (ixj_WriteDSPCommand(0x5170 + jf->freq, j))
+ return -1;
+ } else if (jf->freq > 11) {
+ /* We need to load a programmable filter set for undefined */
+ /* frequencies. So we will point the filter to a programmable set. */
+ /* Since there are only 4 filters and 4 programmable sets, we will */
+ /* just point the filter to the same number set and program it for the */
+ /* frequency we want. */
+ if (ixj_WriteDSPCommand(0x5170 + jf->filter, j))
+ return -1;
+ if (j->ver.low != 0x12) {
+ cmd = 0x515B;
+ max = 19;
+ } else {
+ cmd = 0x515E;
+ max = 15;
+ }
+ if (ixj_WriteDSPCommand(cmd, j))
+ return -1;
+ for (cnt = 0; cnt < max; cnt++) {
+ if (ixj_WriteDSPCommand(tone_table[jf->freq - 12][cnt], j))
+ return -1;
+ }
+ }
+ j->filter_en[jf->filter] = jf->enable;
+ return 0;
+}
+
+static int ixj_init_filter_raw(IXJ *j, IXJ_FILTER_RAW * jfr)
+{
+ unsigned short cmd;
+ int cnt, max;
+ if (jfr->filter > 3) {
+ return -1;
+ }
+ if (ixj_WriteDSPCommand(0x5154 + jfr->filter, j)) /* Select Filter */
+ return -1;
+
+ if (!jfr->enable) {
+ if (ixj_WriteDSPCommand(0x5152, j)) /* Disable Filter */
+ return -1;
+ else
+ return 0;
+ } else {
+ if (ixj_WriteDSPCommand(0x5153, j)) /* Enable Filter */
+ return -1;
+ /* Select the filter (f0 - f3) to use. */
+ if (ixj_WriteDSPCommand(0x5154 + jfr->filter, j))
+ return -1;
+ }
+ /* We need to load a programmable filter set for undefined */
+ /* frequencies. So we will point the filter to a programmable set. */
+ /* Since there are only 4 filters and 4 programmable sets, we will */
+ /* just point the filter to the same number set and program it for the */
+ /* frequency we want. */
+ if (ixj_WriteDSPCommand(0x5170 + jfr->filter, j))
+ return -1;
+ if (j->ver.low != 0x12) {
+ cmd = 0x515B;
+ max = 19;
+ } else {
+ cmd = 0x515E;
+ max = 15;
+ }
+ if (ixj_WriteDSPCommand(cmd, j))
+ return -1;
+ for (cnt = 0; cnt < max; cnt++) {
+ if (ixj_WriteDSPCommand(jfr->coeff[cnt], j))
+ return -1;
+ }
+ j->filter_en[jfr->filter] = jfr->enable;
+ return 0;
+}
+
+static int ixj_init_tone(IXJ *j, IXJ_TONE * ti)
+{
+ int freq0, freq1;
+ unsigned short data;
+ if (ti->freq0) {
+ freq0 = ti->freq0;
+ } else {
+ freq0 = 0x7FFF;
+ }
+
+ if (ti->freq1) {
+ freq1 = ti->freq1;
+ } else {
+ freq1 = 0x7FFF;
+ }
+
+ if(ti->tone_index > 12 && ti->tone_index < 28)
+ {
+ if (ixj_WriteDSPCommand(0x6800 + ti->tone_index, j))
+ return -1;
+ if (ixj_WriteDSPCommand(0x6000 + (ti->gain1 << 4) + ti->gain0, j))
+ return -1;
+ data = freq0;
+ if (ixj_WriteDSPCommand(data, j))
+ return -1;
+ data = freq1;
+ if (ixj_WriteDSPCommand(data, j))
+ return -1;
+ }
+ return freq0;
+}
+
diff --git a/drivers/staging/telephony/ixj.h b/drivers/staging/telephony/ixj.h
new file mode 100644
index 000000000000..2c841134f61c
--- /dev/null
+++ b/drivers/staging/telephony/ixj.h
@@ -0,0 +1,1322 @@
+/******************************************************************************
+ * ixj.h
+ *
+ *
+ * Device Driver for Quicknet Technologies, Inc.'s Telephony cards
+ * including the Internet PhoneJACK, Internet PhoneJACK Lite,
+ * Internet PhoneJACK PCI, Internet LineJACK, Internet PhoneCARD and
+ * SmartCABLE
+ *
+ * (c) Copyright 1999-2001 Quicknet Technologies, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Author: Ed Okerson, <eokerson@quicknet.net>
+ *
+ * Contributors: Greg Herlein, <gherlein@quicknet.net>
+ * David W. Erhart, <derhart@quicknet.net>
+ * John Sellers, <jsellers@quicknet.net>
+ * Mike Preston, <mpreston@quicknet.net>
+ *
+ * More information about the hardware related to this driver can be found
+ * at our website: http://www.quicknet.net
+ *
+ * Fixes:
+ *
+ * IN NO EVENT SHALL QUICKNET TECHNOLOGIES, INC. BE LIABLE TO ANY PARTY FOR
+ * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT
+ * OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF QUICKNET
+ * TECHNOLOGIES, INC.HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * QUICKNET TECHNOLOGIES, INC. SPECIFICALLY DISCLAIMS ANY WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
+ * AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
+ * ON AN "AS IS" BASIS, AND QUICKNET TECHNOLOGIES, INC. HAS NO OBLIGATION
+ * TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
+ *
+ *****************************************************************************/
+#define IXJ_VERSION 3031
+
+#include <linux/types.h>
+
+#include <linux/ixjuser.h>
+#include <linux/phonedev.h>
+
+typedef __u16 WORD;
+typedef __u32 DWORD;
+typedef __u8 BYTE;
+
+#ifndef IXJMAX
+#define IXJMAX 16
+#endif
+
+/******************************************************************************
+*
+* This structure when unioned with the structures below makes simple byte
+* access to the registers easier.
+*
+******************************************************************************/
+typedef struct {
+ unsigned char low;
+ unsigned char high;
+} BYTES;
+
+typedef union {
+ BYTES bytes;
+ short word;
+} IXJ_WORD;
+
+typedef struct{
+ unsigned int b0:1;
+ unsigned int b1:1;
+ unsigned int b2:1;
+ unsigned int b3:1;
+ unsigned int b4:1;
+ unsigned int b5:1;
+ unsigned int b6:1;
+ unsigned int b7:1;
+} IXJ_CBITS;
+
+typedef union{
+ IXJ_CBITS cbits;
+ char cbyte;
+} IXJ_CBYTE;
+
+/******************************************************************************
+*
+* This structure represents the Hardware Control Register of the CT8020/8021
+* The CT8020 is used in the Internet PhoneJACK, and the 8021 in the
+* Internet LineJACK
+*
+******************************************************************************/
+typedef struct {
+ unsigned int rxrdy:1;
+ unsigned int txrdy:1;
+ unsigned int status:1;
+ unsigned int auxstatus:1;
+ unsigned int rxdma:1;
+ unsigned int txdma:1;
+ unsigned int rxburst:1;
+ unsigned int txburst:1;
+ unsigned int dmadir:1;
+ unsigned int cont:1;
+ unsigned int irqn:1;
+ unsigned int t:5;
+} HCRBIT;
+
+typedef union {
+ HCRBIT bits;
+ BYTES bytes;
+} HCR;
+
+/******************************************************************************
+*
+* This structure represents the Hardware Status Register of the CT8020/8021
+* The CT8020 is used in the Internet PhoneJACK, and the 8021 in the
+* Internet LineJACK
+*
+******************************************************************************/
+typedef struct {
+ unsigned int controlrdy:1;
+ unsigned int auxctlrdy:1;
+ unsigned int statusrdy:1;
+ unsigned int auxstatusrdy:1;
+ unsigned int rxrdy:1;
+ unsigned int txrdy:1;
+ unsigned int restart:1;
+ unsigned int irqn:1;
+ unsigned int rxdma:1;
+ unsigned int txdma:1;
+ unsigned int cohostshutdown:1;
+ unsigned int t:5;
+} HSRBIT;
+
+typedef union {
+ HSRBIT bits;
+ BYTES bytes;
+} HSR;
+
+/******************************************************************************
+*
+* This structure represents the General Purpose IO Register of the CT8020/8021
+* The CT8020 is used in the Internet PhoneJACK, and the 8021 in the
+* Internet LineJACK
+*
+******************************************************************************/
+typedef struct {
+ unsigned int x:1;
+ unsigned int gpio1:1;
+ unsigned int gpio2:1;
+ unsigned int gpio3:1;
+ unsigned int gpio4:1;
+ unsigned int gpio5:1;
+ unsigned int gpio6:1;
+ unsigned int gpio7:1;
+ unsigned int xread:1;
+ unsigned int gpio1read:1;
+ unsigned int gpio2read:1;
+ unsigned int gpio3read:1;
+ unsigned int gpio4read:1;
+ unsigned int gpio5read:1;
+ unsigned int gpio6read:1;
+ unsigned int gpio7read:1;
+} GPIOBIT;
+
+typedef union {
+ GPIOBIT bits;
+ BYTES bytes;
+ unsigned short word;
+} GPIO;
+
+/******************************************************************************
+*
+* This structure represents the Line Monitor status response
+*
+******************************************************************************/
+typedef struct {
+ unsigned int digit:4;
+ unsigned int cpf_valid:1;
+ unsigned int dtmf_valid:1;
+ unsigned int peak:1;
+ unsigned int z:1;
+ unsigned int f0:1;
+ unsigned int f1:1;
+ unsigned int f2:1;
+ unsigned int f3:1;
+ unsigned int frame:4;
+} LMON;
+
+typedef union {
+ LMON bits;
+ BYTES bytes;
+} DTMF;
+
+typedef struct {
+ unsigned int z:7;
+ unsigned int dtmf_en:1;
+ unsigned int y:4;
+ unsigned int F3:1;
+ unsigned int F2:1;
+ unsigned int F1:1;
+ unsigned int F0:1;
+} CP;
+
+typedef union {
+ CP bits;
+ BYTES bytes;
+} CPTF;
+
+/******************************************************************************
+*
+* This structure represents the Status Control Register on the Internet
+* LineJACK
+*
+******************************************************************************/
+typedef struct {
+ unsigned int c0:1;
+ unsigned int c1:1;
+ unsigned int stereo:1;
+ unsigned int daafsyncen:1;
+ unsigned int led1:1;
+ unsigned int led2:1;
+ unsigned int led3:1;
+ unsigned int led4:1;
+} PSCRWI; /* Internet LineJACK and Internet PhoneJACK Lite */
+
+typedef struct {
+ unsigned int eidp:1;
+ unsigned int eisd:1;
+ unsigned int x:6;
+} PSCRWP; /* Internet PhoneJACK PCI */
+
+typedef union {
+ PSCRWI bits;
+ PSCRWP pcib;
+ char byte;
+} PLD_SCRW;
+
+typedef struct {
+ unsigned int c0:1;
+ unsigned int c1:1;
+ unsigned int x:1;
+ unsigned int d0ee:1;
+ unsigned int mixerbusy:1;
+ unsigned int sci:1;
+ unsigned int dspflag:1;
+ unsigned int daaflag:1;
+} PSCRRI;
+
+typedef struct {
+ unsigned int eidp:1;
+ unsigned int eisd:1;
+ unsigned int x:4;
+ unsigned int dspflag:1;
+ unsigned int det:1;
+} PSCRRP;
+
+typedef union {
+ PSCRRI bits;
+ PSCRRP pcib;
+ char byte;
+} PLD_SCRR;
+
+/******************************************************************************
+*
+* These structures represents the SLIC Control Register on the
+* Internet LineJACK
+*
+******************************************************************************/
+typedef struct {
+ unsigned int c1:1;
+ unsigned int c2:1;
+ unsigned int c3:1;
+ unsigned int b2en:1;
+ unsigned int spken:1;
+ unsigned int rly1:1;
+ unsigned int rly2:1;
+ unsigned int rly3:1;
+} PSLICWRITE;
+
+typedef struct {
+ unsigned int state:3;
+ unsigned int b2en:1;
+ unsigned int spken:1;
+ unsigned int c3:1;
+ unsigned int potspstn:1;
+ unsigned int det:1;
+} PSLICREAD;
+
+typedef struct {
+ unsigned int c1:1;
+ unsigned int c2:1;
+ unsigned int c3:1;
+ unsigned int b2en:1;
+ unsigned int e1:1;
+ unsigned int mic:1;
+ unsigned int spk:1;
+ unsigned int x:1;
+} PSLICPCI;
+
+typedef union {
+ PSLICPCI pcib;
+ PSLICWRITE bits;
+ PSLICREAD slic;
+ char byte;
+} PLD_SLICW;
+
+typedef union {
+ PSLICPCI pcib;
+ PSLICREAD bits;
+ char byte;
+} PLD_SLICR;
+
+/******************************************************************************
+*
+* These structures represents the Clock Control Register on the
+* Internet LineJACK
+*
+******************************************************************************/
+typedef struct {
+ unsigned int clk0:1;
+ unsigned int clk1:1;
+ unsigned int clk2:1;
+ unsigned int x0:1;
+ unsigned int slic_e1:1;
+ unsigned int x1:1;
+ unsigned int x2:1;
+ unsigned int x3:1;
+} PCLOCK;
+
+typedef union {
+ PCLOCK bits;
+ char byte;
+} PLD_CLOCK;
+
+/******************************************************************************
+*
+* These structures deal with the mixer on the Internet LineJACK
+*
+******************************************************************************/
+
+typedef struct {
+ unsigned short vol[10];
+ unsigned int recsrc;
+ unsigned int modcnt;
+ unsigned short micpreamp;
+} MIX;
+
+/******************************************************************************
+*
+* These structures deal with the control logic on the Internet PhoneCARD
+*
+******************************************************************************/
+typedef struct {
+ unsigned int x0:4; /* unused bits */
+
+ unsigned int ed:1; /* Event Detect */
+
+ unsigned int drf:1; /* SmartCABLE Removal Flag 1=no cable */
+
+ unsigned int dspf:1; /* DSP Flag 1=DSP Ready */
+
+ unsigned int crr:1; /* Control Register Ready */
+
+} COMMAND_REG1;
+
+typedef union {
+ COMMAND_REG1 bits;
+ unsigned char byte;
+} PCMCIA_CR1;
+
+typedef struct {
+ unsigned int x0:4; /* unused bits */
+
+ unsigned int rstc:1; /* SmartCABLE Reset */
+
+ unsigned int pwr:1; /* SmartCABLE Power */
+
+ unsigned int x1:2; /* unused bits */
+
+} COMMAND_REG2;
+
+typedef union {
+ COMMAND_REG2 bits;
+ unsigned char byte;
+} PCMCIA_CR2;
+
+typedef struct {
+ unsigned int addr:5; /* R/W SmartCABLE Register Address */
+
+ unsigned int rw:1; /* Read / Write flag */
+
+ unsigned int dev:2; /* 2 bit SmartCABLE Device Address */
+
+} CONTROL_REG;
+
+typedef union {
+ CONTROL_REG bits;
+ unsigned char byte;
+} PCMCIA_SCCR;
+
+typedef struct {
+ unsigned int hsw:1;
+ unsigned int det:1;
+ unsigned int led2:1;
+ unsigned int led1:1;
+ unsigned int ring1:1;
+ unsigned int ring0:1;
+ unsigned int x:1;
+ unsigned int powerdown:1;
+} PCMCIA_SLIC_REG;
+
+typedef union {
+ PCMCIA_SLIC_REG bits;
+ unsigned char byte;
+} PCMCIA_SLIC;
+
+typedef struct {
+ unsigned int cpd:1; /* Chip Power Down */
+
+ unsigned int mpd:1; /* MIC Bias Power Down */
+
+ unsigned int hpd:1; /* Handset Drive Power Down */
+
+ unsigned int lpd:1; /* Line Drive Power Down */
+
+ unsigned int spd:1; /* Speaker Drive Power Down */
+
+ unsigned int x:2; /* unused bits */
+
+ unsigned int sr:1; /* Software Reset */
+
+} Si3CONTROL1;
+
+typedef union {
+ Si3CONTROL1 bits;
+ unsigned char byte;
+} Si3C1;
+
+typedef struct {
+ unsigned int al:1; /* Analog Loopback DAC analog -> ADC analog */
+
+ unsigned int dl2:1; /* Digital Loopback DAC -> ADC one bit */
+
+ unsigned int dl1:1; /* Digital Loopback ADC -> DAC one bit */
+
+ unsigned int pll:1; /* 1 = div 10, 0 = div 5 */
+
+ unsigned int hpd:1; /* HPF disable */
+
+ unsigned int x:3; /* unused bits */
+
+} Si3CONTROL2;
+
+typedef union {
+ Si3CONTROL2 bits;
+ unsigned char byte;
+} Si3C2;
+
+typedef struct {
+ unsigned int iir:1; /* 1 enables IIR, 0 enables FIR */
+
+ unsigned int him:1; /* Handset Input Mute */
+
+ unsigned int mcm:1; /* MIC In Mute */
+
+ unsigned int mcg:2; /* MIC In Gain */
+
+ unsigned int lim:1; /* Line In Mute */
+
+ unsigned int lig:2; /* Line In Gain */
+
+} Si3RXGAIN;
+
+typedef union {
+ Si3RXGAIN bits;
+ unsigned char byte;
+} Si3RXG;
+
+typedef struct {
+ unsigned int hom:1; /* Handset Out Mute */
+
+ unsigned int lom:1; /* Line Out Mute */
+
+ unsigned int rxg:5; /* RX PGA Gain */
+
+ unsigned int x:1; /* unused bit */
+
+} Si3ADCVOLUME;
+
+typedef union {
+ Si3ADCVOLUME bits;
+ unsigned char byte;
+} Si3ADC;
+
+typedef struct {
+ unsigned int srm:1; /* Speaker Right Mute */
+
+ unsigned int slm:1; /* Speaker Left Mute */
+
+ unsigned int txg:5; /* TX PGA Gain */
+
+ unsigned int x:1; /* unused bit */
+
+} Si3DACVOLUME;
+
+typedef union {
+ Si3DACVOLUME bits;
+ unsigned char byte;
+} Si3DAC;
+
+typedef struct {
+ unsigned int x:5; /* unused bit */
+
+ unsigned int losc:1; /* Line Out Short Circuit */
+
+ unsigned int srsc:1; /* Speaker Right Short Circuit */
+
+ unsigned int slsc:1; /* Speaker Left Short Circuit */
+
+} Si3STATUSREPORT;
+
+typedef union {
+ Si3STATUSREPORT bits;
+ unsigned char byte;
+} Si3STAT;
+
+typedef struct {
+ unsigned int sot:2; /* Speaker Out Attenuation */
+
+ unsigned int lot:2; /* Line Out Attenuation */
+
+ unsigned int x:4; /* unused bits */
+
+} Si3ANALOGATTN;
+
+typedef union {
+ Si3ANALOGATTN bits;
+ unsigned char byte;
+} Si3AATT;
+
+/******************************************************************************
+*
+* These structures deal with the DAA on the Internet LineJACK
+*
+******************************************************************************/
+
+typedef struct _DAA_REGS {
+ /*----------------------------------------------- */
+ /* SOP Registers */
+ /* */
+ BYTE bySOP;
+
+ union _SOP_REGS {
+ struct _SOP {
+ union /* SOP - CR0 Register */
+ {
+ BYTE reg;
+ struct _CR0_BITREGS {
+ BYTE CLK_EXT:1; /* cr0[0:0] */
+
+ BYTE RIP:1; /* cr0[1:1] */
+
+ BYTE AR:1; /* cr0[2:2] */
+
+ BYTE AX:1; /* cr0[3:3] */
+
+ BYTE FRR:1; /* cr0[4:4] */
+
+ BYTE FRX:1; /* cr0[5:5] */
+
+ BYTE IM:1; /* cr0[6:6] */
+
+ BYTE TH:1; /* cr0[7:7] */
+
+ } bitreg;
+ } cr0;
+
+ union /* SOP - CR1 Register */
+ {
+ BYTE reg;
+ struct _CR1_REGS {
+ BYTE RM:1; /* cr1[0:0] */
+
+ BYTE RMR:1; /* cr1[1:1] */
+
+ BYTE No_auto:1; /* cr1[2:2] */
+
+ BYTE Pulse:1; /* cr1[3:3] */
+
+ BYTE P_Tone1:1; /* cr1[4:4] */
+
+ BYTE P_Tone2:1; /* cr1[5:5] */
+
+ BYTE E_Tone1:1; /* cr1[6:6] */
+
+ BYTE E_Tone2:1; /* cr1[7:7] */
+
+ } bitreg;
+ } cr1;
+
+ union /* SOP - CR2 Register */
+ {
+ BYTE reg;
+ struct _CR2_REGS {
+ BYTE Call_II:1; /* CR2[0:0] */
+
+ BYTE Call_I:1; /* CR2[1:1] */
+
+ BYTE Call_en:1; /* CR2[2:2] */
+
+ BYTE Call_pon:1; /* CR2[3:3] */
+
+ BYTE IDR:1; /* CR2[4:4] */
+
+ BYTE COT_R:3; /* CR2[5:7] */
+
+ } bitreg;
+ } cr2;
+
+ union /* SOP - CR3 Register */
+ {
+ BYTE reg;
+ struct _CR3_REGS {
+ BYTE DHP_X:1; /* CR3[0:0] */
+
+ BYTE DHP_R:1; /* CR3[1:1] */
+
+ BYTE Cal_pctl:1; /* CR3[2:2] */
+
+ BYTE SEL:1; /* CR3[3:3] */
+
+ BYTE TestLoops:4; /* CR3[4:7] */
+
+ } bitreg;
+ } cr3;
+
+ union /* SOP - CR4 Register */
+ {
+ BYTE reg;
+ struct _CR4_REGS {
+ BYTE Fsc_en:1; /* CR4[0:0] */
+
+ BYTE Int_en:1; /* CR4[1:1] */
+
+ BYTE AGX:2; /* CR4[2:3] */
+
+ BYTE AGR_R:2; /* CR4[4:5] */
+
+ BYTE AGR_Z:2; /* CR4[6:7] */
+
+ } bitreg;
+ } cr4;
+
+ union /* SOP - CR5 Register */
+ {
+ BYTE reg;
+ struct _CR5_REGS {
+ BYTE V_0:1; /* CR5[0:0] */
+
+ BYTE V_1:1; /* CR5[1:1] */
+
+ BYTE V_2:1; /* CR5[2:2] */
+
+ BYTE V_3:1; /* CR5[3:3] */
+
+ BYTE V_4:1; /* CR5[4:4] */
+
+ BYTE V_5:1; /* CR5[5:5] */
+
+ BYTE V_6:1; /* CR5[6:6] */
+
+ BYTE V_7:1; /* CR5[7:7] */
+
+ } bitreg;
+ } cr5;
+
+ union /* SOP - CR6 Register */
+ {
+ BYTE reg;
+ struct _CR6_REGS {
+ BYTE reserved:8; /* CR6[0:7] */
+
+ } bitreg;
+ } cr6;
+
+ union /* SOP - CR7 Register */
+ {
+ BYTE reg;
+ struct _CR7_REGS {
+ BYTE reserved:8; /* CR7[0:7] */
+
+ } bitreg;
+ } cr7;
+ } SOP;
+
+ BYTE ByteRegs[sizeof(struct _SOP)];
+
+ } SOP_REGS;
+
+ /* DAA_REGS.SOP_REGS.SOP.CR5.reg */
+ /* DAA_REGS.SOP_REGS.SOP.CR5.bitreg */
+ /* DAA_REGS.SOP_REGS.SOP.CR5.bitreg.V_2 */
+ /* DAA_REGS.SOP_REGS.ByteRegs[5] */
+
+ /*----------------------------------------------- */
+ /* XOP Registers */
+ /* */
+ BYTE byXOP;
+
+ union _XOP_REGS {
+ struct _XOP {
+ union XOPXR0/* XOP - XR0 Register - Read values */
+ {
+ BYTE reg;
+ struct _XR0_BITREGS {
+ BYTE SI_0:1; /* XR0[0:0] - Read */
+
+ BYTE SI_1:1; /* XR0[1:1] - Read */
+
+ BYTE VDD_OK:1; /* XR0[2:2] - Read */
+
+ BYTE Caller_ID:1; /* XR0[3:3] - Read */
+
+ BYTE RING:1; /* XR0[4:4] - Read */
+
+ BYTE Cadence:1; /* XR0[5:5] - Read */
+
+ BYTE Wake_up:1; /* XR0[6:6] - Read */
+
+ BYTE RMR:1; /* XR0[7:7] - Read */
+
+ } bitreg;
+ } xr0;
+
+ union /* XOP - XR1 Register */
+ {
+ BYTE reg;
+ struct _XR1_BITREGS {
+ BYTE M_SI_0:1; /* XR1[0:0] */
+
+ BYTE M_SI_1:1; /* XR1[1:1] */
+
+ BYTE M_VDD_OK:1; /* XR1[2:2] */
+
+ BYTE M_Caller_ID:1; /* XR1[3:3] */
+
+ BYTE M_RING:1; /* XR1[4:4] */
+
+ BYTE M_Cadence:1; /* XR1[5:5] */
+
+ BYTE M_Wake_up:1; /* XR1[6:6] */
+
+ BYTE unused:1; /* XR1[7:7] */
+
+ } bitreg;
+ } xr1;
+
+ union /* XOP - XR2 Register */
+ {
+ BYTE reg;
+ struct _XR2_BITREGS {
+ BYTE CTO0:1; /* XR2[0:0] */
+
+ BYTE CTO1:1; /* XR2[1:1] */
+
+ BYTE CTO2:1; /* XR2[2:2] */
+
+ BYTE CTO3:1; /* XR2[3:3] */
+
+ BYTE CTO4:1; /* XR2[4:4] */
+
+ BYTE CTO5:1; /* XR2[5:5] */
+
+ BYTE CTO6:1; /* XR2[6:6] */
+
+ BYTE CTO7:1; /* XR2[7:7] */
+
+ } bitreg;
+ } xr2;
+
+ union /* XOP - XR3 Register */
+ {
+ BYTE reg;
+ struct _XR3_BITREGS {
+ BYTE DCR0:1; /* XR3[0:0] */
+
+ BYTE DCR1:1; /* XR3[1:1] */
+
+ BYTE DCI:1; /* XR3[2:2] */
+
+ BYTE DCU0:1; /* XR3[3:3] */
+
+ BYTE DCU1:1; /* XR3[4:4] */
+
+ BYTE B_off:1; /* XR3[5:5] */
+
+ BYTE AGB0:1; /* XR3[6:6] */
+
+ BYTE AGB1:1; /* XR3[7:7] */
+
+ } bitreg;
+ } xr3;
+
+ union /* XOP - XR4 Register */
+ {
+ BYTE reg;
+ struct _XR4_BITREGS {
+ BYTE C_0:1; /* XR4[0:0] */
+
+ BYTE C_1:1; /* XR4[1:1] */
+
+ BYTE C_2:1; /* XR4[2:2] */
+
+ BYTE C_3:1; /* XR4[3:3] */
+
+ BYTE C_4:1; /* XR4[4:4] */
+
+ BYTE C_5:1; /* XR4[5:5] */
+
+ BYTE C_6:1; /* XR4[6:6] */
+
+ BYTE C_7:1; /* XR4[7:7] */
+
+ } bitreg;
+ } xr4;
+
+ union /* XOP - XR5 Register */
+ {
+ BYTE reg;
+ struct _XR5_BITREGS {
+ BYTE T_0:1; /* XR5[0:0] */
+
+ BYTE T_1:1; /* XR5[1:1] */
+
+ BYTE T_2:1; /* XR5[2:2] */
+
+ BYTE T_3:1; /* XR5[3:3] */
+
+ BYTE T_4:1; /* XR5[4:4] */
+
+ BYTE T_5:1; /* XR5[5:5] */
+
+ BYTE T_6:1; /* XR5[6:6] */
+
+ BYTE T_7:1; /* XR5[7:7] */
+
+ } bitreg;
+ } xr5;
+
+ union /* XOP - XR6 Register - Read Values */
+ {
+ BYTE reg;
+ struct _XR6_BITREGS {
+ BYTE CPS0:1; /* XR6[0:0] */
+
+ BYTE CPS1:1; /* XR6[1:1] */
+
+ BYTE unused1:2; /* XR6[2:3] */
+
+ BYTE CLK_OFF:1; /* XR6[4:4] */
+
+ BYTE unused2:3; /* XR6[5:7] */
+
+ } bitreg;
+ } xr6;
+
+ union /* XOP - XR7 Register */
+ {
+ BYTE reg;
+ struct _XR7_BITREGS {
+ BYTE unused1:1; /* XR7[0:0] */
+
+ BYTE Vdd0:1; /* XR7[1:1] */
+
+ BYTE Vdd1:1; /* XR7[2:2] */
+
+ BYTE unused2:5; /* XR7[3:7] */
+
+ } bitreg;
+ } xr7;
+ } XOP;
+
+ BYTE ByteRegs[sizeof(struct _XOP)];
+
+ } XOP_REGS;
+
+ /* DAA_REGS.XOP_REGS.XOP.XR7.reg */
+ /* DAA_REGS.XOP_REGS.XOP.XR7.bitreg */
+ /* DAA_REGS.XOP_REGS.XOP.XR7.bitreg.Vdd0 */
+ /* DAA_REGS.XOP_REGS.ByteRegs[7] */
+
+ /*----------------------------------------------- */
+ /* COP Registers */
+ /* */
+ BYTE byCOP;
+
+ union _COP_REGS {
+ struct _COP {
+ BYTE THFilterCoeff_1[8]; /* COP - TH Filter Coefficients, CODE=0, Part 1 */
+
+ BYTE THFilterCoeff_2[8]; /* COP - TH Filter Coefficients, CODE=1, Part 2 */
+
+ BYTE THFilterCoeff_3[8]; /* COP - TH Filter Coefficients, CODE=2, Part 3 */
+
+ BYTE RingerImpendance_1[8]; /* COP - Ringer Impendance Coefficients, CODE=3, Part 1 */
+
+ BYTE IMFilterCoeff_1[8]; /* COP - IM Filter Coefficients, CODE=4, Part 1 */
+
+ BYTE IMFilterCoeff_2[8]; /* COP - IM Filter Coefficients, CODE=5, Part 2 */
+
+ BYTE RingerImpendance_2[8]; /* COP - Ringer Impendance Coefficients, CODE=6, Part 2 */
+
+ BYTE FRRFilterCoeff[8]; /* COP - FRR Filter Coefficients, CODE=7 */
+
+ BYTE FRXFilterCoeff[8]; /* COP - FRX Filter Coefficients, CODE=8 */
+
+ BYTE ARFilterCoeff[4]; /* COP - AR Filter Coefficients, CODE=9 */
+
+ BYTE AXFilterCoeff[4]; /* COP - AX Filter Coefficients, CODE=10 */
+
+ BYTE Tone1Coeff[4]; /* COP - Tone1 Coefficients, CODE=11 */
+
+ BYTE Tone2Coeff[4]; /* COP - Tone2 Coefficients, CODE=12 */
+
+ BYTE LevelmeteringRinging[4]; /* COP - Levelmetering Ringing, CODE=13 */
+
+ BYTE CallerID1stTone[8]; /* COP - Caller ID 1st Tone, CODE=14 */
+
+ BYTE CallerID2ndTone[8]; /* COP - Caller ID 2nd Tone, CODE=15 */
+
+ } COP;
+
+ BYTE ByteRegs[sizeof(struct _COP)];
+
+ } COP_REGS;
+
+ /* DAA_REGS.COP_REGS.COP.XR7.Tone1Coeff[3] */
+ /* DAA_REGS.COP_REGS.COP.XR7.bitreg */
+ /* DAA_REGS.COP_REGS.COP.XR7.bitreg.Vdd0 */
+ /* DAA_REGS.COP_REGS.ByteRegs[57] */
+
+ /*----------------------------------------------- */
+ /* CAO Registers */
+ /* */
+ BYTE byCAO;
+
+ union _CAO_REGS {
+ struct _CAO {
+ BYTE CallerID[512]; /* CAO - Caller ID Bytes */
+
+ } CAO;
+
+ BYTE ByteRegs[sizeof(struct _CAO)];
+ } CAO_REGS;
+
+ union /* XOP - XR0 Register - Write values */
+ {
+ BYTE reg;
+ struct _XR0_BITREGSW {
+ BYTE SO_0:1; /* XR1[0:0] - Write */
+
+ BYTE SO_1:1; /* XR1[1:1] - Write */
+
+ BYTE SO_2:1; /* XR1[2:2] - Write */
+
+ BYTE unused:5; /* XR1[3:7] - Write */
+
+ } bitreg;
+ } XOP_xr0_W;
+
+ union /* XOP - XR6 Register - Write values */
+ {
+ BYTE reg;
+ struct _XR6_BITREGSW {
+ BYTE unused1:4; /* XR6[0:3] */
+
+ BYTE CLK_OFF:1; /* XR6[4:4] */
+
+ BYTE unused2:3; /* XR6[5:7] */
+
+ } bitreg;
+ } XOP_xr6_W;
+
+} DAA_REGS;
+
+#define ALISDAA_ID_BYTE 0x81
+#define ALISDAA_CALLERID_SIZE 512
+
+/*------------------------------ */
+/* */
+/* Misc definitions */
+/* */
+
+/* Power Up Operation */
+#define SOP_PU_SLEEP 0
+#define SOP_PU_RINGING 1
+#define SOP_PU_CONVERSATION 2
+#define SOP_PU_PULSEDIALING 3
+#define SOP_PU_RESET 4
+
+#define ALISDAA_CALLERID_SIZE 512
+
+#define PLAYBACK_MODE_COMPRESSED 0 /* Selects: Compressed modes, TrueSpeech 8.5-4.1, G.723.1, G.722, G.728, G.729 */
+#define PLAYBACK_MODE_TRUESPEECH_V40 0 /* Selects: TrueSpeech 8.5, 6.3, 5.3, 4.8 or 4.1 Kbps */
+#define PLAYBACK_MODE_TRUESPEECH 8 /* Selects: TrueSpeech 8.5, 6.3, 5.3, 4.8 or 4.1 Kbps Version 5.1 */
+#define PLAYBACK_MODE_ULAW 2 /* Selects: 64 Kbit/sec MuA-law PCM */
+#define PLAYBACK_MODE_ALAW 10 /* Selects: 64 Kbit/sec A-law PCM */
+#define PLAYBACK_MODE_16LINEAR 6 /* Selects: 128 Kbit/sec 16-bit linear */
+#define PLAYBACK_MODE_8LINEAR 4 /* Selects: 64 Kbit/sec 8-bit signed linear */
+#define PLAYBACK_MODE_8LINEAR_WSS 5 /* Selects: 64 Kbit/sec WSS 8-bit unsigned linear */
+
+#define RECORD_MODE_COMPRESSED 0 /* Selects: Compressed modes, TrueSpeech 8.5-4.1, G.723.1, G.722, G.728, G.729 */
+#define RECORD_MODE_TRUESPEECH 0 /* Selects: TrueSpeech 8.5, 6.3, 5.3, 4.8 or 4.1 Kbps */
+#define RECORD_MODE_ULAW 4 /* Selects: 64 Kbit/sec Mu-law PCM */
+#define RECORD_MODE_ALAW 12 /* Selects: 64 Kbit/sec A-law PCM */
+#define RECORD_MODE_16LINEAR 5 /* Selects: 128 Kbit/sec 16-bit linear */
+#define RECORD_MODE_8LINEAR 6 /* Selects: 64 Kbit/sec 8-bit signed linear */
+#define RECORD_MODE_8LINEAR_WSS 7 /* Selects: 64 Kbit/sec WSS 8-bit unsigned linear */
+
+enum SLIC_STATES {
+ PLD_SLIC_STATE_OC = 0,
+ PLD_SLIC_STATE_RINGING,
+ PLD_SLIC_STATE_ACTIVE,
+ PLD_SLIC_STATE_OHT,
+ PLD_SLIC_STATE_TIPOPEN,
+ PLD_SLIC_STATE_STANDBY,
+ PLD_SLIC_STATE_APR,
+ PLD_SLIC_STATE_OHTPR
+};
+
+enum SCI_CONTROL {
+ SCI_End = 0,
+ SCI_Enable_DAA,
+ SCI_Enable_Mixer,
+ SCI_Enable_EEPROM
+};
+
+enum Mode {
+ T63, T53, T48, T40
+};
+enum Dir {
+ V3_TO_V4, V4_TO_V3, V4_TO_V5, V5_TO_V4
+};
+
+typedef struct Proc_Info_Tag {
+ enum Mode convert_mode;
+ enum Dir convert_dir;
+ int Prev_Frame_Type;
+ int Current_Frame_Type;
+} Proc_Info_Type;
+
+enum PREVAL {
+ NORMAL = 0,
+ NOPOST,
+ POSTONLY,
+ PREERROR
+};
+
+enum IXJ_EXTENSIONS {
+ G729LOADER = 0,
+ TS85LOADER,
+ PRE_READ,
+ POST_READ,
+ PRE_WRITE,
+ POST_WRITE,
+ PRE_IOCTL,
+ POST_IOCTL
+};
+
+typedef struct {
+ char enable;
+ char en_filter;
+ unsigned int filter;
+ unsigned int state; /* State 0 when cadence has not started. */
+
+ unsigned int on1; /* State 1 */
+
+ unsigned long on1min; /* State 1 - 10% + jiffies */
+ unsigned long on1dot; /* State 1 + jiffies */
+
+ unsigned long on1max; /* State 1 + 10% + jiffies */
+
+ unsigned int off1; /* State 2 */
+
+ unsigned long off1min;
+ unsigned long off1dot; /* State 2 + jiffies */
+ unsigned long off1max;
+ unsigned int on2; /* State 3 */
+
+ unsigned long on2min;
+ unsigned long on2dot;
+ unsigned long on2max;
+ unsigned int off2; /* State 4 */
+
+ unsigned long off2min;
+ unsigned long off2dot; /* State 4 + jiffies */
+ unsigned long off2max;
+ unsigned int on3; /* State 5 */
+
+ unsigned long on3min;
+ unsigned long on3dot;
+ unsigned long on3max;
+ unsigned int off3; /* State 6 */
+
+ unsigned long off3min;
+ unsigned long off3dot; /* State 6 + jiffies */
+ unsigned long off3max;
+} IXJ_CADENCE_F;
+
+typedef struct {
+ unsigned int busytone:1;
+ unsigned int dialtone:1;
+ unsigned int ringback:1;
+ unsigned int ringing:1;
+ unsigned int playing:1;
+ unsigned int recording:1;
+ unsigned int cringing:1;
+ unsigned int play_first_frame:1;
+ unsigned int pstn_present:1;
+ unsigned int pstn_ringing:1;
+ unsigned int pots_correct:1;
+ unsigned int pots_pstn:1;
+ unsigned int g729_loaded:1;
+ unsigned int ts85_loaded:1;
+ unsigned int dtmf_oob:1; /* DTMF Out-Of-Band */
+
+ unsigned int pcmciascp:1; /* SmartCABLE Present */
+
+ unsigned int pcmciasct:2; /* SmartCABLE Type */
+
+ unsigned int pcmciastate:3; /* SmartCABLE Init State */
+
+ unsigned int inwrite:1; /* Currently writing */
+
+ unsigned int inread:1; /* Currently reading */
+
+ unsigned int incheck:1; /* Currently checking the SmartCABLE */
+
+ unsigned int cidplay:1; /* Currently playing Caller ID */
+
+ unsigned int cidring:1; /* This is the ring for Caller ID */
+
+ unsigned int cidsent:1; /* Caller ID has been sent */
+
+ unsigned int cidcw_ack:1; /* Caller ID CW ACK (from CPE) */
+ unsigned int firstring:1; /* First ring cadence is complete */
+ unsigned int pstncheck:1; /* Currently checking the PSTN Line */
+ unsigned int pstn_rmr:1;
+ unsigned int x:3; /* unused bits */
+
+} IXJ_FLAGS;
+
+/******************************************************************************
+*
+* This structure holds the state of all of the Quicknet cards
+*
+******************************************************************************/
+
+typedef struct {
+ int elements_used;
+ IXJ_CADENCE_TERM termination;
+ IXJ_CADENCE_ELEMENT *ce;
+} ixj_cadence;
+
+typedef struct {
+ struct phone_device p;
+ struct timer_list timer;
+ unsigned int board;
+ unsigned int DSPbase;
+ unsigned int XILINXbase;
+ unsigned int serial;
+ atomic_t DSPWrite;
+ struct phone_capability caplist[30];
+ unsigned int caps;
+ struct pnp_dev *dev;
+ unsigned int cardtype;
+ unsigned int rec_codec;
+ unsigned int cid_rec_codec;
+ unsigned int cid_rec_volume;
+ unsigned char cid_rec_flag;
+ signed char rec_mode;
+ unsigned int play_codec;
+ unsigned int cid_play_codec;
+ unsigned int cid_play_volume;
+ unsigned char cid_play_flag;
+ signed char play_mode;
+ IXJ_FLAGS flags;
+ unsigned long busyflags;
+ unsigned int rec_frame_size;
+ unsigned int play_frame_size;
+ unsigned int cid_play_frame_size;
+ unsigned int cid_base_frame_size;
+ unsigned long cidcw_wait;
+ int aec_level;
+ int cid_play_aec_level;
+ int readers, writers;
+ wait_queue_head_t poll_q;
+ wait_queue_head_t read_q;
+ char *read_buffer, *read_buffer_end;
+ char *read_convert_buffer;
+ size_t read_buffer_size;
+ unsigned int read_buffer_ready;
+ wait_queue_head_t write_q;
+ char *write_buffer, *write_buffer_end;
+ char *write_convert_buffer;
+ size_t write_buffer_size;
+ unsigned int write_buffers_empty;
+ unsigned long drybuffer;
+ char *write_buffer_rp, *write_buffer_wp;
+ char dtmfbuffer[80];
+ char dtmf_current;
+ int dtmf_wp, dtmf_rp, dtmf_state, dtmf_proc;
+ int tone_off_time, tone_on_time;
+ struct fasync_struct *async_queue;
+ unsigned long tone_start_jif;
+ char tone_index;
+ char tone_state;
+ char maxrings;
+ ixj_cadence *cadence_t;
+ ixj_cadence *cadence_r;
+ int tone_cadence_state;
+ IXJ_CADENCE_F cadence_f[6];
+ DTMF dtmf;
+ CPTF cptf;
+ BYTES dsp;
+ BYTES ver;
+ BYTES scr;
+ BYTES ssr;
+ BYTES baseframe;
+ HSR hsr;
+ GPIO gpio;
+ PLD_SCRR pld_scrr;
+ PLD_SCRW pld_scrw;
+ PLD_SLICW pld_slicw;
+ PLD_SLICR pld_slicr;
+ PLD_CLOCK pld_clock;
+ PCMCIA_CR1 pccr1;
+ PCMCIA_CR2 pccr2;
+ PCMCIA_SCCR psccr;
+ PCMCIA_SLIC pslic;
+ char pscdd;
+ Si3C1 sic1;
+ Si3C2 sic2;
+ Si3RXG sirxg;
+ Si3ADC siadc;
+ Si3DAC sidac;
+ Si3STAT sistat;
+ Si3AATT siaatt;
+ MIX mix;
+ unsigned short ring_cadence;
+ int ring_cadence_t;
+ unsigned long ring_cadence_jif;
+ unsigned long checkwait;
+ int intercom;
+ int m_hook;
+ int r_hook;
+ int p_hook;
+ char pstn_envelope;
+ char pstn_cid_intr;
+ unsigned char fskz;
+ unsigned char fskphase;
+ unsigned char fskcnt;
+ unsigned int cidsize;
+ unsigned int cidcnt;
+ unsigned long pstn_cid_received;
+ PHONE_CID cid;
+ PHONE_CID cid_send;
+ unsigned long pstn_ring_int;
+ unsigned long pstn_ring_start;
+ unsigned long pstn_ring_stop;
+ unsigned long pstn_winkstart;
+ unsigned long pstn_last_rmr;
+ unsigned long pstn_prev_rmr;
+ unsigned long pots_winkstart;
+ unsigned int winktime;
+ unsigned long flash_end;
+ char port;
+ char hookstate;
+ union telephony_exception ex;
+ union telephony_exception ex_sig;
+ int ixj_signals[35];
+ IXJ_SIGDEF sigdef;
+ char daa_mode;
+ char daa_country;
+ unsigned long pstn_sleeptil;
+ DAA_REGS m_DAAShadowRegs;
+ Proc_Info_Type Info_read;
+ Proc_Info_Type Info_write;
+ unsigned short frame_count;
+ unsigned int filter_hist[4];
+ unsigned char filter_en[6];
+ unsigned short proc_load;
+ unsigned long framesread;
+ unsigned long frameswritten;
+ unsigned long read_wait;
+ unsigned long write_wait;
+ unsigned long timerchecks;
+ unsigned long txreadycheck;
+ unsigned long rxreadycheck;
+ unsigned long statuswait;
+ unsigned long statuswaitfail;
+ unsigned long pcontrolwait;
+ unsigned long pcontrolwaitfail;
+ unsigned long iscontrolready;
+ unsigned long iscontrolreadyfail;
+ unsigned long pstnstatecheck;
+#ifdef IXJ_DYN_ALLOC
+ short *fskdata;
+#else
+ short fskdata[8000];
+#endif
+ int fsksize;
+ int fskdcnt;
+} IXJ;
+
+typedef int (*IXJ_REGFUNC) (IXJ * j, unsigned long arg);
+
+extern IXJ *ixj_pcmcia_probe(unsigned long, unsigned long);
+
diff --git a/drivers/staging/telephony/ixj_pcmcia.c b/drivers/staging/telephony/ixj_pcmcia.c
new file mode 100644
index 000000000000..05032e2cc954
--- /dev/null
+++ b/drivers/staging/telephony/ixj_pcmcia.c
@@ -0,0 +1,187 @@
+#include "ixj-ver.h"
+
+#include <linux/module.h>
+
+#include <linux/init.h>
+#include <linux/kernel.h> /* printk() */
+#include <linux/fs.h> /* everything... */
+#include <linux/errno.h> /* error codes */
+#include <linux/slab.h>
+
+#include <pcmcia/cistpl.h>
+#include <pcmcia/ds.h>
+
+#include "ixj.h"
+
+/*
+ * PCMCIA service support for Quicknet cards
+ */
+
+
+typedef struct ixj_info_t {
+ int ndev;
+ struct ixj *port;
+} ixj_info_t;
+
+static void ixj_detach(struct pcmcia_device *p_dev);
+static int ixj_config(struct pcmcia_device * link);
+static void ixj_cs_release(struct pcmcia_device * link);
+
+static int ixj_probe(struct pcmcia_device *p_dev)
+{
+ dev_dbg(&p_dev->dev, "ixj_attach()\n");
+ /* Create new ixj device */
+ p_dev->priv = kzalloc(sizeof(struct ixj_info_t), GFP_KERNEL);
+ if (!p_dev->priv) {
+ return -ENOMEM;
+ }
+
+ return ixj_config(p_dev);
+}
+
+static void ixj_detach(struct pcmcia_device *link)
+{
+ dev_dbg(&link->dev, "ixj_detach\n");
+
+ ixj_cs_release(link);
+
+ kfree(link->priv);
+}
+
+static void ixj_get_serial(struct pcmcia_device * link, IXJ * j)
+{
+ char *str;
+ int i, place;
+ dev_dbg(&link->dev, "ixj_get_serial\n");
+
+ str = link->prod_id[0];
+ if (!str)
+ goto failed;
+ printk("%s", str);
+ str = link->prod_id[1];
+ if (!str)
+ goto failed;
+ printk(" %s", str);
+ str = link->prod_id[2];
+ if (!str)
+ goto failed;
+ place = 1;
+ for (i = strlen(str) - 1; i >= 0; i--) {
+ switch (str[i]) {
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9':
+ j->serial += (str[i] - 48) * place;
+ break;
+ case 'A':
+ case 'B':
+ case 'C':
+ case 'D':
+ case 'E':
+ case 'F':
+ j->serial += (str[i] - 55) * place;
+ break;
+ case 'a':
+ case 'b':
+ case 'c':
+ case 'd':
+ case 'e':
+ case 'f':
+ j->serial += (str[i] - 87) * place;
+ break;
+ }
+ place = place * 0x10;
+ }
+ str = link->prod_id[3];
+ if (!str)
+ goto failed;
+ printk(" version %s\n", str);
+failed:
+ return;
+}
+
+static int ixj_config_check(struct pcmcia_device *p_dev, void *priv_data)
+{
+ p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
+ p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
+ p_dev->resource[1]->flags &= ~IO_DATA_PATH_WIDTH;
+ p_dev->resource[1]->flags |= IO_DATA_PATH_WIDTH_8;
+ p_dev->io_lines = 3;
+
+ return pcmcia_request_io(p_dev);
+}
+
+static int ixj_config(struct pcmcia_device * link)
+{
+ IXJ *j;
+ ixj_info_t *info;
+
+ info = link->priv;
+ dev_dbg(&link->dev, "ixj_config\n");
+
+ link->config_flags = CONF_AUTO_SET_IO;
+
+ if (pcmcia_loop_config(link, ixj_config_check, NULL))
+ goto failed;
+
+ if (pcmcia_enable_device(link))
+ goto failed;
+
+ /*
+ * Register the card with the core.
+ */
+ j = ixj_pcmcia_probe(link->resource[0]->start,
+ link->resource[0]->start + 0x10);
+
+ info->ndev = 1;
+ ixj_get_serial(link, j);
+ return 0;
+
+failed:
+ ixj_cs_release(link);
+ return -ENODEV;
+}
+
+static void ixj_cs_release(struct pcmcia_device *link)
+{
+ ixj_info_t *info = link->priv;
+ dev_dbg(&link->dev, "ixj_cs_release\n");
+ info->ndev = 0;
+ pcmcia_disable_device(link);
+}
+
+static const struct pcmcia_device_id ixj_ids[] = {
+ PCMCIA_DEVICE_MANF_CARD(0x0257, 0x0600),
+ PCMCIA_DEVICE_NULL
+};
+MODULE_DEVICE_TABLE(pcmcia, ixj_ids);
+
+static struct pcmcia_driver ixj_driver = {
+ .owner = THIS_MODULE,
+ .name = "ixj_cs",
+ .probe = ixj_probe,
+ .remove = ixj_detach,
+ .id_table = ixj_ids,
+};
+
+static int __init ixj_pcmcia_init(void)
+{
+ return pcmcia_register_driver(&ixj_driver);
+}
+
+static void ixj_pcmcia_exit(void)
+{
+ pcmcia_unregister_driver(&ixj_driver);
+}
+
+module_init(ixj_pcmcia_init);
+module_exit(ixj_pcmcia_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/telephony/phonedev.c b/drivers/staging/telephony/phonedev.c
new file mode 100644
index 000000000000..1915af201175
--- /dev/null
+++ b/drivers/staging/telephony/phonedev.c
@@ -0,0 +1,167 @@
+/*
+ * Telephony registration for Linux
+ *
+ * (c) Copyright 1999 Red Hat Software Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Author: Alan Cox, <alan@lxorguk.ukuu.org.uk>
+ *
+ * Fixes: Mar 01 2000 Thomas Sparr, <thomas.l.sparr@telia.com>
+ * phone_register_device now works with unit!=PHONE_UNIT_ANY
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/phonedev.h>
+#include <linux/init.h>
+#include <asm/uaccess.h>
+#include <asm/system.h>
+
+#include <linux/kmod.h>
+#include <linux/sem.h>
+#include <linux/mutex.h>
+
+#define PHONE_NUM_DEVICES 256
+
+/*
+ * Active devices
+ */
+
+static struct phone_device *phone_device[PHONE_NUM_DEVICES];
+static DEFINE_MUTEX(phone_lock);
+
+/*
+ * Open a phone device.
+ */
+
+static int phone_open(struct inode *inode, struct file *file)
+{
+ unsigned int minor = iminor(inode);
+ int err = 0;
+ struct phone_device *p;
+ const struct file_operations *old_fops, *new_fops = NULL;
+
+ if (minor >= PHONE_NUM_DEVICES)
+ return -ENODEV;
+
+ mutex_lock(&phone_lock);
+ p = phone_device[minor];
+ if (p)
+ new_fops = fops_get(p->f_op);
+ if (!new_fops) {
+ mutex_unlock(&phone_lock);
+ request_module("char-major-%d-%d", PHONE_MAJOR, minor);
+ mutex_lock(&phone_lock);
+ p = phone_device[minor];
+ if (p == NULL || (new_fops = fops_get(p->f_op)) == NULL)
+ {
+ err=-ENODEV;
+ goto end;
+ }
+ }
+ old_fops = file->f_op;
+ file->f_op = new_fops;
+ if (p->open)
+ err = p->open(p, file); /* Tell the device it is open */
+ if (err) {
+ fops_put(file->f_op);
+ file->f_op = fops_get(old_fops);
+ }
+ fops_put(old_fops);
+end:
+ mutex_unlock(&phone_lock);
+ return err;
+}
+
+/*
+ * Telephony For Linux device drivers request registration here.
+ */
+
+int phone_register_device(struct phone_device *p, int unit)
+{
+ int base;
+ int end;
+ int i;
+
+ base = 0;
+ end = PHONE_NUM_DEVICES - 1;
+
+ if (unit != PHONE_UNIT_ANY) {
+ base = unit;
+ end = unit + 1; /* enter the loop at least one time */
+ }
+
+ mutex_lock(&phone_lock);
+ for (i = base; i < end; i++) {
+ if (phone_device[i] == NULL) {
+ phone_device[i] = p;
+ p->minor = i;
+ mutex_unlock(&phone_lock);
+ return 0;
+ }
+ }
+ mutex_unlock(&phone_lock);
+ return -ENFILE;
+}
+
+/*
+ * Unregister an unused Telephony for linux device
+ */
+
+void phone_unregister_device(struct phone_device *pfd)
+{
+ mutex_lock(&phone_lock);
+ if (likely(phone_device[pfd->minor] == pfd))
+ phone_device[pfd->minor] = NULL;
+ mutex_unlock(&phone_lock);
+}
+
+
+static const struct file_operations phone_fops =
+{
+ .owner = THIS_MODULE,
+ .open = phone_open,
+ .llseek = noop_llseek,
+};
+
+/*
+ * Board init functions
+ */
+
+
+/*
+ * Initialise Telephony for linux
+ */
+
+static int __init telephony_init(void)
+{
+ printk(KERN_INFO "Linux telephony interface: v1.00\n");
+ if (register_chrdev(PHONE_MAJOR, "telephony", &phone_fops)) {
+ printk("phonedev: unable to get major %d\n", PHONE_MAJOR);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void __exit telephony_exit(void)
+{
+ unregister_chrdev(PHONE_MAJOR, "telephony");
+}
+
+module_init(telephony_init);
+module_exit(telephony_exit);
+
+MODULE_LICENSE("GPL");
+
+EXPORT_SYMBOL(phone_register_device);
+EXPORT_SYMBOL(phone_unregister_device);
diff --git a/drivers/staging/tidspbridge/Kconfig b/drivers/staging/tidspbridge/Kconfig
index 21a559ecbbb1..0dd479f5638d 100644
--- a/drivers/staging/tidspbridge/Kconfig
+++ b/drivers/staging/tidspbridge/Kconfig
@@ -31,12 +31,6 @@ config TIDSPBRIDGE_MEMPOOL_SIZE
Allocate specified size of memory at booting time to avoid allocation
failure under heavy memory fragmentation after some use time.
-config TIDSPBRIDGE_DEBUG
- bool "Debug Support"
- depends on TIDSPBRIDGE
- help
- Say Y to enable Bridge debugging capabilities
-
config TIDSPBRIDGE_RECOVERY
bool "Recovery Support"
depends on TIDSPBRIDGE
@@ -58,22 +52,6 @@ config TIDSPBRIDGE_CACHE_LINE_CHECK
This can lead to heap corruption. Say Y, to enforce the check for 128
byte alignment, buffers failing this check will be rejected.
-config TIDSPBRIDGE_WDT3
- bool "Enable watchdog timer"
- depends on TIDSPBRIDGE
- help
- WTD3 is managed by DSP and once it is enabled, DSP side bridge is in
- charge of refreshing the timer before overflow, if the DSP hangs MPU
- will caught the interrupt and try to recover DSP.
-
-config TIDSPBRIDGE_WDT_TIMEOUT
- int "Watchdog timer timeout (in secs)"
- depends on TIDSPBRIDGE && TIDSPBRIDGE_WDT3
- default 5
- help
- Watchdog timer timeout value, after that time if the watchdog timer
- counter is not reset the wdt overflow interrupt will be triggered
-
config TIDSPBRIDGE_NTFY_PWRERR
bool "Notify power errors"
depends on TIDSPBRIDGE
diff --git a/drivers/staging/tidspbridge/Makefile b/drivers/staging/tidspbridge/Makefile
index fd6a2761cc3b..8c8c92a9083f 100644
--- a/drivers/staging/tidspbridge/Makefile
+++ b/drivers/staging/tidspbridge/Makefile
@@ -1,4 +1,4 @@
-obj-$(CONFIG_TIDSPBRIDGE) += bridgedriver.o
+obj-$(CONFIG_TIDSPBRIDGE) += tidspbridge.o
libgen = gen/gh.o gen/uuidutil.o
libcore = core/chnl_sm.o core/msg_sm.o core/io_sm.o core/tiomap3430.o \
@@ -13,7 +13,7 @@ libdload = dynload/cload.o dynload/getsection.o dynload/reloc.o \
dynload/tramp.o
libhw = hw/hw_mmu.o
-bridgedriver-y := $(libgen) $(libservices) $(libcore) $(libpmgr) $(librmgr) \
+tidspbridge-y := $(libgen) $(libservices) $(libcore) $(libpmgr) $(librmgr) \
$(libdload) $(libhw)
#Machine dependent
diff --git a/drivers/staging/tidspbridge/core/chnl_sm.c b/drivers/staging/tidspbridge/core/chnl_sm.c
index 6d66e7d0fba8..e0c7e4c470c8 100644
--- a/drivers/staging/tidspbridge/core/chnl_sm.c
+++ b/drivers/staging/tidspbridge/core/chnl_sm.c
@@ -50,9 +50,6 @@
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
-/* ----------------------------------- Trace & Debug */
-#include <dspbridge/dbc.h>
-
/* ----------------------------------- OS Adaptation Layer */
#include <dspbridge/sync.h>
@@ -123,7 +120,6 @@ int bridge_chnl_add_io_req(struct chnl_object *chnl_obj, void *host_buf,
CHNL_IS_OUTPUT(pchnl->chnl_mode))
return -EPIPE;
/* No other possible states left */
- DBC_ASSERT(0);
}
dev_obj = dev_get_first();
@@ -190,7 +186,6 @@ func_cont:
* Note: for dma chans dw_dsp_addr contains dsp address
* of SM buffer.
*/
- DBC_ASSERT(chnl_mgr_obj->word_size != 0);
/* DSP address */
chnl_packet_obj->dsp_tx_addr = dw_dsp_addr / chnl_mgr_obj->word_size;
chnl_packet_obj->byte_size = byte_size;
@@ -201,7 +196,6 @@ func_cont:
CHNL_IOCSTATCOMPLETE);
list_add_tail(&chnl_packet_obj->link, &pchnl->io_requests);
pchnl->cio_reqs++;
- DBC_ASSERT(pchnl->cio_reqs <= pchnl->chnl_packets);
/*
* If end of stream, update the channel state to prevent
* more IOR's.
@@ -209,8 +203,6 @@ func_cont:
if (is_eos)
pchnl->state |= CHNL_STATEEOS;
- /* Legacy DSM Processor-Copy */
- DBC_ASSERT(pchnl->chnl_type == CHNL_PCPY);
/* Request IO from the DSP */
io_request_chnl(chnl_mgr_obj->iomgr, pchnl,
(CHNL_IS_INPUT(pchnl->chnl_mode) ? IO_INPUT :
@@ -283,7 +275,6 @@ int bridge_chnl_cancel_io(struct chnl_object *chnl_obj)
list_add_tail(&chirp->link, &pchnl->io_completions);
pchnl->cio_cs++;
pchnl->cio_reqs--;
- DBC_ASSERT(pchnl->cio_reqs >= 0);
}
spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock);
@@ -311,8 +302,6 @@ int bridge_chnl_close(struct chnl_object *chnl_obj)
status = bridge_chnl_cancel_io(chnl_obj);
if (status)
return status;
- /* Assert I/O on this channel is now cancelled: Protects from io_dpc */
- DBC_ASSERT((pchnl->state & CHNL_STATECANCEL));
/* Invalidate channel object: Protects from CHNL_GetIOCompletion() */
/* Free the slot in the channel manager: */
pchnl->chnl_mgr_obj->channels[pchnl->chnl_id] = NULL;
@@ -358,13 +347,6 @@ int bridge_chnl_create(struct chnl_mgr **channel_mgr,
struct chnl_mgr *chnl_mgr_obj = NULL;
u8 max_channels;
- /* Check DBC requirements: */
- DBC_REQUIRE(channel_mgr != NULL);
- DBC_REQUIRE(mgr_attrts != NULL);
- DBC_REQUIRE(mgr_attrts->max_channels > 0);
- DBC_REQUIRE(mgr_attrts->max_channels <= CHNL_MAXCHANNELS);
- DBC_REQUIRE(mgr_attrts->word_size != 0);
-
/* Allocate channel manager object */
chnl_mgr_obj = kzalloc(sizeof(struct chnl_mgr), GFP_KERNEL);
if (chnl_mgr_obj) {
@@ -374,7 +356,6 @@ int bridge_chnl_create(struct chnl_mgr **channel_mgr,
* mgr_attrts->max_channels = CHNL_MAXCHANNELS =
* DDMA_MAXDDMACHNLS = DDMA_MAXZCPYCHNLS.
*/
- DBC_ASSERT(mgr_attrts->max_channels == CHNL_MAXCHANNELS);
max_channels = CHNL_MAXCHANNELS + CHNL_MAXCHANNELS * CHNL_PCPY;
/* Create array of channels */
chnl_mgr_obj->channels = kzalloc(sizeof(struct chnl_object *)
@@ -491,7 +472,6 @@ int bridge_chnl_flush_io(struct chnl_object *chnl_obj, u32 timeout)
pchnl->state &= ~CHNL_STATECANCEL;
}
}
- DBC_ENSURE(status || list_empty(&pchnl->io_requests));
return status;
}
@@ -592,7 +572,6 @@ int bridge_chnl_get_ioc(struct chnl_object *chnl_obj, u32 timeout,
omap_mbox_disable_irq(dev_ctxt->mbox, IRQ_RX);
if (dequeue_ioc) {
/* Dequeue IOC and set chan_ioc; */
- DBC_ASSERT(!list_empty(&pchnl->io_completions));
chnl_packet_obj = list_first_entry(&pchnl->io_completions,
struct chnl_irp, link);
list_del(&chnl_packet_obj->link);
@@ -705,8 +684,6 @@ int bridge_chnl_idle(struct chnl_object *chnl_obj, u32 timeout,
struct chnl_mgr *chnl_mgr_obj;
int status = 0;
- DBC_REQUIRE(chnl_obj);
-
chnl_mode = chnl_obj->chnl_mode;
chnl_mgr_obj = chnl_obj->chnl_mgr_obj;
@@ -736,10 +713,7 @@ int bridge_chnl_open(struct chnl_object **chnl,
struct chnl_mgr *chnl_mgr_obj = hchnl_mgr;
struct chnl_object *pchnl = NULL;
struct sync_object *sync_event = NULL;
- /* Ensure DBC requirements: */
- DBC_REQUIRE(chnl != NULL);
- DBC_REQUIRE(pattrs != NULL);
- DBC_REQUIRE(hchnl_mgr != NULL);
+
*chnl = NULL;
/* Validate Args: */
@@ -761,7 +735,6 @@ int bridge_chnl_open(struct chnl_object **chnl,
return status;
}
- DBC_ASSERT(ch_id < chnl_mgr_obj->max_channels);
/* Create channel object: */
pchnl = kzalloc(sizeof(struct chnl_object), GFP_KERNEL);
@@ -850,7 +823,6 @@ int bridge_chnl_register_notify(struct chnl_object *chnl_obj,
{
int status = 0;
- DBC_ASSERT(!(event_mask & ~(DSP_STREAMDONE | DSP_STREAMIOCOMPLETION)));
if (event_mask)
status = ntfy_register(chnl_obj->ntfy_obj, hnotification,
@@ -906,8 +878,6 @@ static void free_chirp_list(struct list_head *chirp_list)
{
struct chnl_irp *chirp, *tmp;
- DBC_REQUIRE(chirp_list != NULL);
-
list_for_each_entry_safe(chirp, tmp, chirp_list, link) {
list_del(&chirp->link);
kfree(chirp);
@@ -924,8 +894,6 @@ static int search_free_channel(struct chnl_mgr *chnl_mgr_obj,
int status = -ENOSR;
u32 i;
- DBC_REQUIRE(chnl_mgr_obj);
-
for (i = 0; i < chnl_mgr_obj->max_channels; i++) {
if (chnl_mgr_obj->channels[i] == NULL) {
status = 0;
diff --git a/drivers/staging/tidspbridge/core/dsp-clock.c b/drivers/staging/tidspbridge/core/dsp-clock.c
index 7eb56178fb64..c7df34e6b60b 100644
--- a/drivers/staging/tidspbridge/core/dsp-clock.c
+++ b/drivers/staging/tidspbridge/core/dsp-clock.c
@@ -29,9 +29,6 @@
#include <dspbridge/dev.h>
#include "_tiomap.h"
-/* ----------------------------------- Trace & Debug */
-#include <dspbridge/dbc.h>
-
/* ----------------------------------- This */
#include <dspbridge/clk.h>
diff --git a/drivers/staging/tidspbridge/core/io_sm.c b/drivers/staging/tidspbridge/core/io_sm.c
index 694c0e5e55cc..9b50b5bd4edb 100644
--- a/drivers/staging/tidspbridge/core/io_sm.c
+++ b/drivers/staging/tidspbridge/core/io_sm.c
@@ -33,9 +33,6 @@
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
-/* Trace & Debug */
-#include <dspbridge/dbc.h>
-
/* Services Layer */
#include <dspbridge/ntfy.h>
#include <dspbridge/sync.h>
@@ -114,7 +111,7 @@ struct io_mgr {
struct mgr_processorextinfo ext_proc_info;
struct cmm_object *cmm_mgr; /* Shared Mem Mngr */
struct work_struct io_workq; /* workqueue */
-#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG)
+#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE)
u32 trace_buffer_begin; /* Trace message start address */
u32 trace_buffer_end; /* Trace message end address */
u32 trace_buffer_current; /* Trace message current address */
@@ -246,7 +243,7 @@ int bridge_io_destroy(struct io_mgr *hio_mgr)
/* Free IO DPC object */
tasklet_kill(&hio_mgr->dpc_tasklet);
-#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG)
+#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE)
kfree(hio_mgr->msg);
#endif
dsp_wdt_exit();
@@ -386,7 +383,7 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
status = -EFAULT;
}
if (!status) {
-#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG)
+#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE)
status =
cod_get_sym_value(cod_man, DSP_TRACESEC_END, &shm0_end);
#else
@@ -731,7 +728,7 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
hmsg_mgr->max_msgs);
memset((void *)hio_mgr->shared_mem, 0, sizeof(struct shm));
-#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG)
+#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE)
/* Get the start address of trace buffer */
status = cod_get_sym_value(cod_man, SYS_PUTCBEG,
&hio_mgr->trace_buffer_begin);
@@ -910,7 +907,7 @@ void io_dpc(unsigned long ref_data)
}
#endif
-#ifdef CONFIG_TIDSPBRIDGE_DEBUG
+#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
if (pio_mgr->intr_val & MBX_DBG_SYSPRINTF) {
/* Notify DSP Trace message */
print_dsp_debug_trace(pio_mgr);
@@ -973,29 +970,16 @@ void io_request_chnl(struct io_mgr *io_manager, struct chnl_object *pchnl,
chnl_mgr_obj = io_manager->chnl_mgr;
sm = io_manager->shared_mem;
if (io_mode == IO_INPUT) {
- /*
- * Assertion fires if CHNL_AddIOReq() called on a stream
- * which was cancelled, or attached to a dead board.
- */
- DBC_ASSERT((pchnl->state == CHNL_STATEREADY) ||
- (pchnl->state == CHNL_STATEEOS));
/* Indicate to the DSP we have a buffer available for input */
set_chnl_busy(sm, pchnl->chnl_id);
*mbx_val = MBX_PCPY_CLASS;
} else if (io_mode == IO_OUTPUT) {
/*
- * This assertion fails if CHNL_AddIOReq() was called on a
- * stream which was cancelled, or attached to a dead board.
- */
- DBC_ASSERT((pchnl->state & ~CHNL_STATEEOS) ==
- CHNL_STATEREADY);
- /*
* Record the fact that we have a buffer available for
* output.
*/
chnl_mgr_obj->output_mask |= (1 << pchnl->chnl_id);
} else {
- DBC_ASSERT(io_mode); /* Shouldn't get here. */
}
func_end:
return;
@@ -1087,7 +1071,6 @@ static void input_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl,
dw_arg = sm->arg;
if (chnl_id >= CHNL_MAXCHANNELS) {
/* Shouldn't be here: would indicate corrupted shm. */
- DBC_ASSERT(chnl_id);
goto func_end;
}
pchnl = chnl_mgr_obj->channels[chnl_id];
@@ -1683,7 +1666,7 @@ int bridge_io_get_proc_load(struct io_mgr *hio_mgr,
}
-#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG)
+#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE)
void print_dsp_debug_trace(struct io_mgr *hio_mgr)
{
u32 ul_new_message_length = 0, ul_gpp_cur_pointer;
diff --git a/drivers/staging/tidspbridge/core/msg_sm.c b/drivers/staging/tidspbridge/core/msg_sm.c
index 94d9e04a22fa..ce9557e16eb0 100644
--- a/drivers/staging/tidspbridge/core/msg_sm.c
+++ b/drivers/staging/tidspbridge/core/msg_sm.c
@@ -20,9 +20,6 @@
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
-/* ----------------------------------- Trace & Debug */
-#include <dspbridge/dbc.h>
-
/* ----------------------------------- OS Adaptation Layer */
#include <dspbridge/sync.h>
diff --git a/drivers/staging/tidspbridge/core/tiomap3430.c b/drivers/staging/tidspbridge/core/tiomap3430.c
index dde559d06c43..7862513cc295 100644
--- a/drivers/staging/tidspbridge/core/tiomap3430.c
+++ b/drivers/staging/tidspbridge/core/tiomap3430.c
@@ -27,9 +27,6 @@
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
-/* ----------------------------------- Trace & Debug */
-#include <dspbridge/dbc.h>
-
/* ----------------------------------- OS Adaptation Layer */
#include <dspbridge/drv.h>
#include <dspbridge/sync.h>
@@ -256,9 +253,6 @@ static void bad_page_dump(u32 pa, struct page *pg)
void bridge_drv_entry(struct bridge_drv_interface **drv_intf,
const char *driver_file_name)
{
-
- DBC_REQUIRE(driver_file_name != NULL);
-
if (strcmp(driver_file_name, "UMA") == 0)
*drv_intf = &drv_interface_fxns;
else
@@ -389,6 +383,7 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
u32 clk_cmd;
struct io_mgr *hio_mgr;
u32 ul_load_monitor_timer;
+ u32 wdt_en = 0;
struct omap_dsp_platform_data *pdata =
omap_dspbridge_dev->dev.platform_data;
@@ -399,16 +394,13 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
(void)dev_get_symbol(dev_context->dev_obj, SHMBASENAME,
&ul_shm_base_virt);
ul_shm_base_virt *= DSPWORDSIZE;
- DBC_ASSERT(ul_shm_base_virt != 0);
/* DSP Virtual address */
ul_tlb_base_virt = dev_context->atlb_entry[0].dsp_va;
- DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt);
ul_shm_offset_virt =
ul_shm_base_virt - (ul_tlb_base_virt * DSPWORDSIZE);
/* Kernel logical address */
ul_shm_base = dev_context->atlb_entry[0].gpp_va + ul_shm_offset_virt;
- DBC_ASSERT(ul_shm_base != 0);
/* 2nd wd is used as sync field */
dw_sync_addr = ul_shm_base + SHMSYNCOFFSET;
/* Write a signature into the shm base + offset; this will
@@ -603,9 +595,12 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
if (!wait_for_start(dev_context, dw_sync_addr))
status = -ETIMEDOUT;
- /* Start wdt */
- dsp_wdt_sm_set((void *)ul_shm_base);
- dsp_wdt_enable(true);
+ dev_get_symbol(dev_context->dev_obj, "_WDT_enable", &wdt_en);
+ if (wdt_en) {
+ /* Start wdt */
+ dsp_wdt_sm_set((void *)ul_shm_base);
+ dsp_wdt_enable(true);
+ }
status = dev_get_io_mgr(dev_context->dev_obj, &hio_mgr);
if (hio_mgr) {
diff --git a/drivers/staging/tidspbridge/core/tiomap3430_pwr.c b/drivers/staging/tidspbridge/core/tiomap3430_pwr.c
index 02dd4391309a..16a4aafa86ae 100644
--- a/drivers/staging/tidspbridge/core/tiomap3430_pwr.c
+++ b/drivers/staging/tidspbridge/core/tiomap3430_pwr.c
@@ -303,7 +303,6 @@ int dsp_peripheral_clk_ctrl(struct bridge_dev_context *dev_context,
}
/* TODO -- Assert may be a too hard restriction here.. May be we should
* just return with failure when the CLK ID does not match */
- /* DBC_ASSERT(clk_id_index < MBX_PM_MAX_RESOURCES); */
if (clk_id_index == MBX_PM_MAX_RESOURCES) {
/* return with a more meaningfull error code */
return -EPERM;
diff --git a/drivers/staging/tidspbridge/core/tiomap_io.c b/drivers/staging/tidspbridge/core/tiomap_io.c
index dfb356eb6723..7fda10c36862 100644
--- a/drivers/staging/tidspbridge/core/tiomap_io.c
+++ b/drivers/staging/tidspbridge/core/tiomap_io.c
@@ -21,9 +21,6 @@
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
-/* ----------------------------------- Trace & Debug */
-#include <dspbridge/dbc.h>
-
/* ----------------------------------- Platform Manager */
#include <dspbridge/dev.h>
#include <dspbridge/drv.h>
@@ -68,20 +65,17 @@ int read_ext_dsp_data(struct bridge_dev_context *dev_ctxt,
status = dev_get_symbol(dev_context->dev_obj,
SHMBASENAME, &ul_shm_base_virt);
}
- DBC_ASSERT(ul_shm_base_virt != 0);
/* Check if it is a read of Trace section */
if (!status && !ul_trace_sec_beg) {
status = dev_get_symbol(dev_context->dev_obj,
DSP_TRACESEC_BEG, &ul_trace_sec_beg);
}
- DBC_ASSERT(ul_trace_sec_beg != 0);
if (!status && !ul_trace_sec_end) {
status = dev_get_symbol(dev_context->dev_obj,
DSP_TRACESEC_END, &ul_trace_sec_end);
}
- DBC_ASSERT(ul_trace_sec_end != 0);
if (!status) {
if ((dsp_addr <= ul_trace_sec_end) &&
@@ -105,19 +99,16 @@ int read_ext_dsp_data(struct bridge_dev_context *dev_ctxt,
status = dev_get_symbol(dev_context->dev_obj,
DYNEXTBASE, &ul_dyn_ext_base);
}
- DBC_ASSERT(ul_dyn_ext_base != 0);
if (!status) {
status = dev_get_symbol(dev_context->dev_obj,
EXTBASE, &ul_ext_base);
}
- DBC_ASSERT(ul_ext_base != 0);
if (!status) {
status = dev_get_symbol(dev_context->dev_obj,
EXTEND, &ul_ext_end);
}
- DBC_ASSERT(ul_ext_end != 0);
/* Trace buffer is right after the shm SEG0,
* so set the base address to SHMBASE */
@@ -126,8 +117,6 @@ int read_ext_dsp_data(struct bridge_dev_context *dev_ctxt,
ul_ext_end = ul_trace_sec_end;
}
- DBC_ASSERT(ul_ext_end != 0);
- DBC_ASSERT(ul_ext_end > ul_ext_base);
if (ul_ext_end < ul_ext_base)
status = -EPERM;
@@ -135,7 +124,6 @@ int read_ext_dsp_data(struct bridge_dev_context *dev_ctxt,
if (!status) {
ul_tlb_base_virt =
dev_context->atlb_entry[0].dsp_va * DSPWORDSIZE;
- DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt);
dw_ext_prog_virt_mem =
dev_context->atlb_entry[0].gpp_va;
@@ -271,7 +259,6 @@ int write_ext_dsp_data(struct bridge_dev_context *dev_context,
/* Get SHM_BEG EXT_BEG and EXT_END. */
ret = dev_get_symbol(dev_context->dev_obj,
SHMBASENAME, &ul_shm_base_virt);
- DBC_ASSERT(ul_shm_base_virt != 0);
if (dynamic_load) {
if (!ret) {
if (symbols_reloaded)
@@ -280,7 +267,6 @@ int write_ext_dsp_data(struct bridge_dev_context *dev_context,
(dev_context->dev_obj, DYNEXTBASE,
&ul_ext_base);
}
- DBC_ASSERT(ul_ext_base != 0);
if (!ret) {
/* DR OMAPS00013235 : DLModules array may be
* in EXTMEM. It is expected that DYNEXTMEM and
@@ -299,7 +285,6 @@ int write_ext_dsp_data(struct bridge_dev_context *dev_context,
dev_get_symbol
(dev_context->dev_obj, EXTBASE,
&ul_ext_base);
- DBC_ASSERT(ul_ext_base != 0);
if (!ret)
ret =
dev_get_symbol
@@ -312,15 +297,12 @@ int write_ext_dsp_data(struct bridge_dev_context *dev_context,
if (trace_load)
ul_ext_base = ul_shm_base_virt;
- DBC_ASSERT(ul_ext_end != 0);
- DBC_ASSERT(ul_ext_end > ul_ext_base);
if (ul_ext_end < ul_ext_base)
ret = -EPERM;
if (!ret) {
ul_tlb_base_virt =
dev_context->atlb_entry[0].dsp_va * DSPWORDSIZE;
- DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt);
if (symbols_reloaded) {
ret = dev_get_symbol
diff --git a/drivers/staging/tidspbridge/core/wdt.c b/drivers/staging/tidspbridge/core/wdt.c
index 2126f5977530..70055c8111ed 100644
--- a/drivers/staging/tidspbridge/core/wdt.c
+++ b/drivers/staging/tidspbridge/core/wdt.c
@@ -25,8 +25,6 @@
#include <dspbridge/host_os.h>
-#ifdef CONFIG_TIDSPBRIDGE_WDT3
-
#define OMAP34XX_WDT3_BASE (L4_PER_34XX_BASE + 0x30000)
static struct dsp_wdt_setting dsp_wdt;
@@ -84,7 +82,7 @@ int dsp_wdt_init(void)
void dsp_wdt_sm_set(void *data)
{
dsp_wdt.sm_wdt = data;
- dsp_wdt.sm_wdt->wdt_overflow = CONFIG_TIDSPBRIDGE_WDT_TIMEOUT;
+ dsp_wdt.sm_wdt->wdt_overflow = 5; /* in seconds */
}
@@ -128,23 +126,3 @@ void dsp_wdt_enable(bool enable)
clk_disable(dsp_wdt.fclk);
}
}
-
-#else
-void dsp_wdt_enable(bool enable)
-{
-}
-
-void dsp_wdt_sm_set(void *data)
-{
-}
-
-int dsp_wdt_init(void)
-{
- return 0;
-}
-
-void dsp_wdt_exit(void)
-{
-}
-#endif
-
diff --git a/drivers/staging/tidspbridge/gen/gh.c b/drivers/staging/tidspbridge/gen/gh.c
index 60aa7b063c91..25eaef782aaa 100644
--- a/drivers/staging/tidspbridge/gen/gh.c
+++ b/drivers/staging/tidspbridge/gen/gh.c
@@ -95,15 +95,6 @@ void gh_delete(struct gh_t_hash_tab *hash_tab)
}
/*
- * ======== gh_exit ========
- */
-
-void gh_exit(void)
-{
- /* Do nothing */
-}
-
-/*
* ======== gh_find ========
*/
@@ -122,15 +113,6 @@ void *gh_find(struct gh_t_hash_tab *hash_tab, void *key)
}
/*
- * ======== gh_init ========
- */
-
-void gh_init(void)
-{
- /* Do nothing */
-}
-
-/*
* ======== gh_insert ========
*/
diff --git a/drivers/staging/tidspbridge/gen/uuidutil.c b/drivers/staging/tidspbridge/gen/uuidutil.c
index ff6ebadf98f4..b44656cf7858 100644
--- a/drivers/staging/tidspbridge/gen/uuidutil.c
+++ b/drivers/staging/tidspbridge/gen/uuidutil.c
@@ -23,9 +23,6 @@
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
-/* ----------------------------------- Trace & Debug */
-#include <dspbridge/dbc.h>
-
/* ----------------------------------- This */
#include <dspbridge/uuidutil.h>
@@ -41,8 +38,6 @@ void uuid_uuid_to_string(struct dsp_uuid *uuid_obj, char *sz_uuid,
{
s32 i; /* return result from snprintf. */
- DBC_REQUIRE(uuid_obj && sz_uuid);
-
i = snprintf(sz_uuid, size,
"%.8X_%.4X_%.4X_%.2X%.2X_%.2X%.2X%.2X%.2X%.2X%.2X",
uuid_obj->data1, uuid_obj->data2, uuid_obj->data3,
@@ -50,8 +45,6 @@ void uuid_uuid_to_string(struct dsp_uuid *uuid_obj, char *sz_uuid,
uuid_obj->data6[0], uuid_obj->data6[1],
uuid_obj->data6[2], uuid_obj->data6[3],
uuid_obj->data6[4], uuid_obj->data6[5]);
-
- DBC_ENSURE(i != -1);
}
static s32 uuid_hex_to_bin(char *buf, s32 len)
diff --git a/drivers/staging/tidspbridge/include/dspbridge/_chnl_sm.h b/drivers/staging/tidspbridge/include/dspbridge/_chnl_sm.h
index 6e7ab4fd8c39..cc95a18f1db9 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/_chnl_sm.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/_chnl_sm.h
@@ -99,14 +99,10 @@ struct shm {
struct opp_rqst_struct opp_request;
/* load monitor information structure */
struct load_mon_struct load_mon_info;
-#ifdef CONFIG_TIDSPBRIDGE_WDT3
/* Flag for WDT enable/disable F/I clocks */
u32 wdt_setclocks;
u32 wdt_overflow; /* WDT overflow time */
char dummy[176]; /* padding to 256 byte boundary */
-#else
- char dummy[184]; /* padding to 256 byte boundary */
-#endif
u32 shm_dbg_var[64]; /* shared memory debug variables */
};
diff --git a/drivers/staging/tidspbridge/include/dspbridge/chnl.h b/drivers/staging/tidspbridge/include/dspbridge/chnl.h
index 92f6a13424f2..9b018b1f9bf3 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/chnl.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/chnl.h
@@ -48,7 +48,6 @@
* -ECHRNG: This manager cannot handle this many channels.
* -EEXIST: Channel manager already exists for this device.
* Requires:
- * chnl_init(void) called.
* channel_mgr != NULL.
* mgr_attrts != NULL.
* Ensures:
@@ -70,7 +69,6 @@ extern int chnl_create(struct chnl_mgr **channel_mgr,
* 0: Success.
* -EFAULT: hchnl_mgr was invalid.
* Requires:
- * chnl_init(void) called.
* Ensures:
* 0: Cancels I/O on each open channel.
* Closes each open channel.
@@ -79,31 +77,4 @@ extern int chnl_create(struct chnl_mgr **channel_mgr,
*/
extern int chnl_destroy(struct chnl_mgr *hchnl_mgr);
-/*
- * ======== chnl_exit ========
- * Purpose:
- * Discontinue usage of the CHNL module.
- * Parameters:
- * Returns:
- * Requires:
- * chnl_init(void) previously called.
- * Ensures:
- * Resources, if any acquired in chnl_init(void), are freed when the last
- * client of CHNL calls chnl_exit(void).
- */
-extern void chnl_exit(void);
-
-/*
- * ======== chnl_init ========
- * Purpose:
- * Initialize the CHNL module's private state.
- * Parameters:
- * Returns:
- * TRUE if initialized; FALSE if error occurred.
- * Requires:
- * Ensures:
- * A requirement for each of the other public CHNL functions.
- */
-extern bool chnl_init(void);
-
#endif /* CHNL_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/cmm.h b/drivers/staging/tidspbridge/include/dspbridge/cmm.h
index aff22051cf57..c66bcf7ea90c 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/cmm.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/cmm.h
@@ -79,7 +79,6 @@ extern void *cmm_calloc_buf(struct cmm_object *hcmm_mgr,
* -EPERM: Failed to initialize critical sect sync object.
*
* Requires:
- * cmm_init(void) called.
* ph_cmm_mgr != NULL.
* mgr_attrts->min_block_size >= 4 bytes.
* Ensures:
@@ -111,20 +110,6 @@ extern int cmm_create(struct cmm_object **ph_cmm_mgr,
extern int cmm_destroy(struct cmm_object *hcmm_mgr, bool force);
/*
- * ======== cmm_exit ========
- * Purpose:
- * Discontinue usage of module. Cleanup CMM module if CMM cRef reaches zero.
- * Parameters:
- * n/a
- * Returns:
- * n/a
- * Requires:
- * CMM is initialized.
- * Ensures:
- */
-extern void cmm_exit(void);
-
-/*
* ======== cmm_free_buf ========
* Purpose:
* Free the given buffer.
@@ -185,19 +170,6 @@ extern int cmm_get_info(struct cmm_object *hcmm_mgr,
struct cmm_info *cmm_info_obj);
/*
- * ======== cmm_init ========
- * Purpose:
- * Initializes private state of CMM module.
- * Parameters:
- * Returns:
- * TRUE if initialized; FALSE if error occurred.
- * Requires:
- * Ensures:
- * CMM initialized.
- */
-extern bool cmm_init(void);
-
-/*
* ======== cmm_register_gppsm_seg ========
* Purpose:
* Register a block of SM with the CMM.
@@ -333,7 +305,6 @@ extern int cmm_xlator_free_buf(struct cmm_xlatorobject *xlator,
* 0: Success.
* -EFAULT: Bad translator handle.
* Requires:
- * (refs > 0)
* (paddr != NULL)
* (ul_size > 0)
* Ensures:
@@ -355,7 +326,6 @@ extern int cmm_xlator_info(struct cmm_xlatorobject *xlator,
* Returns:
* Valid address on success, else NULL.
* Requires:
- * refs > 0
* paddr != NULL
* xtype >= CMM_VA2PA) && (xtype <= CMM_DSPPA2PA)
* Ensures:
diff --git a/drivers/staging/tidspbridge/include/dspbridge/cod.h b/drivers/staging/tidspbridge/include/dspbridge/cod.h
index cb684c11b302..ba2005d02422 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/cod.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/cod.h
@@ -100,21 +100,6 @@ extern int cod_create(struct cod_manager **mgr,
extern void cod_delete(struct cod_manager *cod_mgr_obj);
/*
- * ======== cod_exit ========
- * Purpose:
- * Discontinue usage of the COD module.
- * Parameters:
- * None.
- * Returns:
- * None.
- * Requires:
- * COD initialized.
- * Ensures:
- * Resources acquired in cod_init(void) are freed.
- */
-extern void cod_exit(void);
-
-/*
* ======== cod_get_base_lib ========
* Purpose:
* Get handle to the base image DBL library.
@@ -243,20 +228,6 @@ extern int cod_get_sym_value(struct cod_manager *cod_mgr_obj,
char *str_sym, u32 * pul_value);
/*
- * ======== cod_init ========
- * Purpose:
- * Initialize the COD module's private state.
- * Parameters:
- * None.
- * Returns:
- * TRUE if initialized; FALSE if error occurred.
- * Requires:
- * Ensures:
- * A requirement for each of the other public COD functions.
- */
-extern bool cod_init(void);
-
-/*
* ======== cod_load_base ========
* Purpose:
* Load the initial program image, optionally with command-line arguments,
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dbc.h b/drivers/staging/tidspbridge/include/dspbridge/dbc.h
deleted file mode 100644
index 463760f499a4..000000000000
--- a/drivers/staging/tidspbridge/include/dspbridge/dbc.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * dbc.h
- *
- * DSP-BIOS Bridge driver support functions for TI OMAP processors.
- *
- * "Design by Contract" programming macros.
- *
- * Notes:
- * Requires that the GT->ERROR function has been defaulted to a valid
- * error handler for the given execution environment.
- *
- * Does not require that GT_init() be called.
- *
- * Copyright (C) 2008 Texas Instruments, Inc.
- *
- * This package is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
- * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
- */
-
-#ifndef DBC_
-#define DBC_
-
-/* Assertion Macros: */
-#ifdef CONFIG_TIDSPBRIDGE_DEBUG
-
-#define DBC_ASSERT(exp) \
- if (!(exp)) \
- pr_err("%s, line %d: Assertion (" #exp ") failed.\n", \
- __FILE__, __LINE__)
-#define DBC_REQUIRE DBC_ASSERT /* Function Precondition. */
-#define DBC_ENSURE DBC_ASSERT /* Function Postcondition. */
-
-#else
-
-#define DBC_ASSERT(exp) {}
-#define DBC_REQUIRE(exp) {}
-#define DBC_ENSURE(exp) {}
-
-#endif /* DEBUG */
-
-#endif /* DBC_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dev.h b/drivers/staging/tidspbridge/include/dspbridge/dev.h
index f92b4be0b413..fa2d79ef6cc8 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/dev.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/dev.h
@@ -478,33 +478,6 @@ extern int dev_get_bridge_context(struct dev_object *hdev_obj,
**phbridge_context);
/*
- * ======== dev_exit ========
- * Purpose:
- * Decrement reference count, and free resources when reference count is
- * 0.
- * Parameters:
- * Returns:
- * Requires:
- * DEV is initialized.
- * Ensures:
- * When reference count == 0, DEV's private resources are freed.
- */
-extern void dev_exit(void);
-
-/*
- * ======== dev_init ========
- * Purpose:
- * Initialize DEV's private state, keeping a reference count on each call.
- * Parameters:
- * Returns:
- * TRUE if initialized; FALSE if error occurred.
- * Requires:
- * Ensures:
- * TRUE: A requirement for the other public DEV functions.
- */
-extern bool dev_init(void);
-
-/*
* ======== dev_insert_proc_object ========
* Purpose:
* Inserts the Processor Object into the List of PROC Objects
diff --git a/drivers/staging/tidspbridge/include/dspbridge/disp.h b/drivers/staging/tidspbridge/include/dspbridge/disp.h
index 5dfdc8cfb937..39d3cea9ca8b 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/disp.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/disp.h
@@ -53,7 +53,6 @@ struct disp_attr {
* -ENOMEM: Insufficient memory for requested resources.
* -EPERM: Unable to create dispatcher.
* Requires:
- * disp_init(void) called.
* disp_attrs != NULL.
* hdev_obj != NULL.
* dispatch_obj != NULL.
@@ -73,7 +72,6 @@ extern int disp_create(struct disp_object **dispatch_obj,
* disp_obj: Node Dispatcher object.
* Returns:
* Requires:
- * disp_init(void) called.
* Valid disp_obj.
* Ensures:
* disp_obj is invalid.
@@ -81,31 +79,6 @@ extern int disp_create(struct disp_object **dispatch_obj,
extern void disp_delete(struct disp_object *disp_obj);
/*
- * ======== disp_exit ========
- * Discontinue usage of DISP module.
- *
- * Parameters:
- * Returns:
- * Requires:
- * disp_init(void) previously called.
- * Ensures:
- * Any resources acquired in disp_init(void) will be freed when last DISP
- * client calls disp_exit(void).
- */
-extern void disp_exit(void);
-
-/*
- * ======== disp_init ========
- * Initialize the DISP module.
- *
- * Parameters:
- * Returns:
- * TRUE if initialization succeeded, FALSE otherwise.
- * Ensures:
- */
-extern bool disp_init(void);
-
-/*
* ======== disp_node_change_priority ========
* Change the priority of a node currently running on the target.
*
@@ -120,7 +93,6 @@ extern bool disp_init(void);
* 0: Success.
* -ETIME: A timeout occurred before the DSP responded.
* Requires:
- * disp_init(void) called.
* Valid disp_obj.
* hnode != NULL.
* Ensures:
@@ -148,7 +120,6 @@ extern int disp_node_change_priority(struct disp_object
* -ETIME: A timeout occurred before the DSP responded.
* -EPERM: A failure occurred, unable to create node.
* Requires:
- * disp_init(void) called.
* Valid disp_obj.
* pargs != NULL.
* hnode != NULL.
@@ -178,7 +149,6 @@ extern int disp_node_create(struct disp_object *disp_obj,
* 0: Success.
* -ETIME: A timeout occurred before the DSP responded.
* Requires:
- * disp_init(void) called.
* Valid disp_obj.
* hnode != NULL.
* Ensures:
@@ -204,7 +174,6 @@ extern int disp_node_delete(struct disp_object *disp_obj,
* 0: Success.
* -ETIME: A timeout occurred before the DSP responded.
* Requires:
- * disp_init(void) called.
* Valid disp_obj.
* hnode != NULL.
* Ensures:
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dmm.h b/drivers/staging/tidspbridge/include/dspbridge/dmm.h
index 6c58335c5f60..c3487be8fcf5 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/dmm.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/dmm.h
@@ -61,10 +61,6 @@ extern int dmm_create(struct dmm_object **dmm_manager,
struct dev_object *hdev_obj,
const struct dmm_mgrattrs *mgr_attrts);
-extern bool dmm_init(void);
-
-extern void dmm_exit(void);
-
extern int dmm_create_tables(struct dmm_object *dmm_mgr,
u32 addr, u32 size);
diff --git a/drivers/staging/tidspbridge/include/dspbridge/drv.h b/drivers/staging/tidspbridge/include/dspbridge/drv.h
index 9cdbd955dce9..b0c7708321b2 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/drv.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/drv.h
@@ -199,17 +199,6 @@ extern int drv_create(struct drv_object **drv_obj);
extern int drv_destroy(struct drv_object *driver_obj);
/*
- * ======== drv_exit ========
- * Purpose:
- * Exit the DRV module, freeing any modules initialized in drv_init.
- * Parameters:
- * Returns:
- * Requires:
- * Ensures:
- */
-extern void drv_exit(void);
-
-/*
* ======== drv_get_first_dev_object ========
* Purpose:
* Returns the Ptr to the FirstDev Object in the List
@@ -294,18 +283,6 @@ extern u32 drv_get_next_dev_object(u32 hdev_obj);
extern u32 drv_get_next_dev_extension(u32 dev_extension);
/*
- * ======== drv_init ========
- * Purpose:
- * Initialize the DRV module.
- * Parameters:
- * Returns:
- * TRUE if success; FALSE otherwise.
- * Requires:
- * Ensures:
- */
-extern int drv_init(void);
-
-/*
* ======== drv_insert_dev_object ========
* Purpose:
* Insert a DeviceObject into the list of Driver object.
diff --git a/drivers/staging/tidspbridge/include/dspbridge/gh.h b/drivers/staging/tidspbridge/include/dspbridge/gh.h
index 9de291d1f566..da85079dbfb6 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/gh.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/gh.h
@@ -23,9 +23,7 @@ extern struct gh_t_hash_tab *gh_create(u16 max_bucket, u16 val_size,
bool(*match) (void *, void *),
void (*delete) (void *));
extern void gh_delete(struct gh_t_hash_tab *hash_tab);
-extern void gh_exit(void);
extern void *gh_find(struct gh_t_hash_tab *hash_tab, void *key);
-extern void gh_init(void);
extern void *gh_insert(struct gh_t_hash_tab *hash_tab, void *key, void *value);
#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
void gh_iterate(struct gh_t_hash_tab *hash_tab,
diff --git a/drivers/staging/tidspbridge/include/dspbridge/io.h b/drivers/staging/tidspbridge/include/dspbridge/io.h
index 500bbd71684d..750571856908 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/io.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/io.h
@@ -55,7 +55,6 @@ struct io_attrs {
* -EINVAL: Invalid DSP word size (must be > 0).
* Invalid base address for DSP communications.
* Requires:
- * io_init(void) called.
* io_man != NULL.
* mgr_attrts != NULL.
* Ensures:
@@ -74,36 +73,8 @@ extern int io_create(struct io_mgr **io_man,
* 0: Success.
* -EFAULT: hio_mgr was invalid.
* Requires:
- * io_init(void) called.
* Ensures:
*/
extern int io_destroy(struct io_mgr *hio_mgr);
-/*
- * ======== io_exit ========
- * Purpose:
- * Discontinue usage of the IO module.
- * Parameters:
- * Returns:
- * Requires:
- * io_init(void) previously called.
- * Ensures:
- * Resources, if any acquired in io_init(void), are freed when the last
- * client of IO calls io_exit(void).
- */
-extern void io_exit(void);
-
-/*
- * ======== io_init ========
- * Purpose:
- * Initialize the IO module's private state.
- * Parameters:
- * Returns:
- * TRUE if initialized; FALSE if error occurred.
- * Requires:
- * Ensures:
- * A requirement for each of the other public CHNL functions.
- */
-extern bool io_init(void);
-
#endif /* CHNL_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/io_sm.h b/drivers/staging/tidspbridge/include/dspbridge/io_sm.h
index a054dad21333..903ff12b14de 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/io_sm.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/io_sm.h
@@ -154,8 +154,6 @@ int dump_dsp_stack(struct bridge_dev_context *bridge_context);
void dump_dl_modules(struct bridge_dev_context *bridge_context);
-#endif
-#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG)
void print_dsp_debug_trace(struct io_mgr *hio_mgr);
#endif
diff --git a/drivers/staging/tidspbridge/include/dspbridge/msg.h b/drivers/staging/tidspbridge/include/dspbridge/msg.h
index 95778bcb5aae..2c8712c933fc 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/msg.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/msg.h
@@ -34,7 +34,6 @@
* msg_callback: Called whenever an RMS_EXIT message is received.
* Returns:
* Requires:
- * msg_mod_init(void) called.
* msg_man != NULL.
* hdev_obj != NULL.
* msg_callback != NULL.
@@ -52,35 +51,9 @@ extern int msg_create(struct msg_mgr **msg_man,
* hmsg_mgr: Handle returned from msg_create().
* Returns:
* Requires:
- * msg_mod_init(void) called.
* Valid hmsg_mgr.
* Ensures:
*/
extern void msg_delete(struct msg_mgr *hmsg_mgr);
-/*
- * ======== msg_exit ========
- * Purpose:
- * Discontinue usage of msg_ctrl module.
- * Parameters:
- * Returns:
- * Requires:
- * msg_mod_init(void) successfully called before.
- * Ensures:
- * Any resources acquired in msg_mod_init(void) will be freed when last
- * msg_ctrl client calls msg_exit(void).
- */
-extern void msg_exit(void);
-
-/*
- * ======== msg_mod_init ========
- * Purpose:
- * Initialize the msg_ctrl module.
- * Parameters:
- * Returns:
- * TRUE if initialization succeeded, FALSE otherwise.
- * Ensures:
- */
-extern bool msg_mod_init(void);
-
#endif /* MSG_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/nldr.h b/drivers/staging/tidspbridge/include/dspbridge/nldr.h
index d9653ee667e1..c5e48ca6c548 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/nldr.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/nldr.h
@@ -36,7 +36,6 @@ extern int nldr_create(struct nldr_object **nldr,
const struct nldr_attrs *pattrs);
extern void nldr_delete(struct nldr_object *nldr_obj);
-extern void nldr_exit(void);
extern int nldr_get_fxn_addr(struct nldr_nodeobject *nldr_node_obj,
char *str_fxn, u32 * addr);
@@ -44,7 +43,6 @@ extern int nldr_get_fxn_addr(struct nldr_nodeobject *nldr_node_obj,
extern int nldr_get_rmm_manager(struct nldr_object *nldr,
struct rmm_target_obj **rmm_mgr);
-extern bool nldr_init(void);
extern int nldr_load(struct nldr_nodeobject *nldr_node_obj,
enum nldr_phase phase);
extern int nldr_unload(struct nldr_nodeobject *nldr_node_obj,
diff --git a/drivers/staging/tidspbridge/include/dspbridge/nldrdefs.h b/drivers/staging/tidspbridge/include/dspbridge/nldrdefs.h
index ee3a85f08fc3..7e3c7f58b496 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/nldrdefs.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/nldrdefs.h
@@ -119,7 +119,6 @@ enum nldr_phase {
* 0: Success.
* -ENOMEM: Insufficient memory on GPP.
* Requires:
- * nldr_init(void) called.
* Valid nldr_obj.
* node_props != NULL.
* nldr_nodeobj != NULL.
@@ -148,7 +147,6 @@ typedef int(*nldr_allocatefxn) (struct nldr_object *nldr_obj,
* 0: Success;
* -ENOMEM: Insufficient memory for requested resources.
* Requires:
- * nldr_init(void) called.
* nldr != NULL.
* hdev_obj != NULL.
* pattrs != NULL.
@@ -168,7 +166,6 @@ typedef int(*nldr_createfxn) (struct nldr_object **nldr,
* nldr_obj: Node manager object.
* Returns:
* Requires:
- * nldr_init(void) called.
* Valid nldr_obj.
* Ensures:
* nldr_obj invalid
@@ -176,20 +173,6 @@ typedef int(*nldr_createfxn) (struct nldr_object **nldr,
typedef void (*nldr_deletefxn) (struct nldr_object *nldr_obj);
/*
- * ======== nldr_exit ========
- * Discontinue usage of NLDR module.
- *
- * Parameters:
- * Returns:
- * Requires:
- * nldr_init(void) successfully called before.
- * Ensures:
- * Any resources acquired in nldr_init(void) will be freed when last NLDR
- * client calls nldr_exit(void).
- */
-typedef void (*nldr_exitfxn) (void);
-
-/*
* ======== NLDR_Free ========
* Free resources allocated in nldr_allocate.
*
@@ -197,7 +180,6 @@ typedef void (*nldr_exitfxn) (void);
* nldr_node_obj: Handle returned from nldr_allocate().
* Returns:
* Requires:
- * nldr_init(void) called.
* Valid nldr_node_obj.
* Ensures:
*/
@@ -216,7 +198,6 @@ typedef void (*nldr_freefxn) (struct nldr_nodeobject *nldr_node_obj);
* 0: Success.
* -ESPIPE: Address of function not found.
* Requires:
- * nldr_init(void) called.
* Valid nldr_node_obj.
* addr != NULL;
* str_fxn != NULL;
@@ -227,17 +208,6 @@ typedef int(*nldr_getfxnaddrfxn) (struct nldr_nodeobject
char *str_fxn, u32 * addr);
/*
- * ======== nldr_init ========
- * Initialize the NLDR module.
- *
- * Parameters:
- * Returns:
- * TRUE if initialization succeeded, FALSE otherwise.
- * Ensures:
- */
-typedef bool(*nldr_initfxn) (void);
-
-/*
* ======== nldr_load ========
* Load create, delete, or execute phase function of a node on the DSP.
*
@@ -251,7 +221,6 @@ typedef bool(*nldr_initfxn) (void);
* is already in use.
* -EILSEQ: Failure in dynamic loader library.
* Requires:
- * nldr_init(void) called.
* Valid nldr_node_obj.
* Ensures:
*/
@@ -269,7 +238,6 @@ typedef int(*nldr_loadfxn) (struct nldr_nodeobject *nldr_node_obj,
* 0: Success.
* -ENOMEM: Insufficient memory on GPP.
* Requires:
- * nldr_init(void) called.
* Valid nldr_node_obj.
* Ensures:
*/
@@ -283,9 +251,7 @@ struct node_ldr_fxns {
nldr_allocatefxn allocate;
nldr_createfxn create;
nldr_deletefxn delete;
- nldr_exitfxn exit;
nldr_getfxnaddrfxn get_fxn_addr;
- nldr_initfxn init;
nldr_loadfxn load;
nldr_unloadfxn unload;
};
diff --git a/drivers/staging/tidspbridge/include/dspbridge/node.h b/drivers/staging/tidspbridge/include/dspbridge/node.h
index 16371d818e3d..7397b7a12f7a 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/node.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/node.h
@@ -47,7 +47,6 @@
* -EPERM: A failure occurred, unable to allocate node.
* -EBADR: Proccessor is not in the running state.
* Requires:
- * node_init(void) called.
* hprocessor != NULL.
* node_uuid != NULL.
* noderes != NULL.
@@ -81,7 +80,6 @@ extern int node_allocate(struct proc_object *hprocessor,
* -EPERM: General Failure.
* -EINVAL: Invalid Size.
* Requires:
- * node_init(void) called.
* pbuffer != NULL.
* Ensures:
*/
@@ -106,7 +104,6 @@ extern int node_alloc_msg_buf(struct node_object *hnode,
* or NODE_RUNNING state.
* -ETIME: A timeout occurred before the DSP responded.
* Requires:
- * node_init(void) called.
* Ensures:
* 0 && (Node's current priority == prio)
*/
@@ -157,7 +154,6 @@ extern int node_change_priority(struct node_object *hnode, s32 prio);
* Device node to device node, or device node to
* GPP), the two nodes are on different DSPs.
* Requires:
- * node_init(void) called.
* Ensures:
*/
extern int node_connect(struct node_object *node1,
@@ -185,7 +181,6 @@ extern int node_connect(struct node_object *node1,
* -ETIME: A timeout occurred before the DSP responded.
* -EPERM: A failure occurred, unable to create node.
* Requires:
- * node_init(void) called.
* Ensures:
*/
extern int node_create(struct node_object *hnode);
@@ -206,7 +201,6 @@ extern int node_create(struct node_object *hnode);
* -ENOMEM: Insufficient memory for requested resources.
* -EPERM: General failure.
* Requires:
- * node_init(void) called.
* node_man != NULL.
* hdev_obj != NULL.
* Ensures:
@@ -234,7 +228,6 @@ extern int node_create_mgr(struct node_mgr **node_man,
* -EPERM: A failure occurred in deleting the node.
* -ESPIPE: Delete function not found in the COFF file.
* Requires:
- * node_init(void) called.
* Ensures:
* 0: hnode is invalid.
*/
@@ -250,7 +243,6 @@ extern int node_delete(struct node_res_object *noderes,
* Returns:
* 0: Success.
* Requires:
- * node_init(void) called.
* Valid hnode_mgr.
* Ensures:
*/
@@ -287,20 +279,6 @@ extern int node_enum_nodes(struct node_mgr *hnode_mgr,
u32 *pu_allocated);
/*
- * ======== node_exit ========
- * Purpose:
- * Discontinue usage of NODE module.
- * Parameters:
- * Returns:
- * Requires:
- * node_init(void) successfully called before.
- * Ensures:
- * Any resources acquired in node_init(void) will be freed when last NODE
- * client calls node_exit(void).
- */
-extern void node_exit(void);
-
-/*
* ======== node_free_msg_buf ========
* Purpose:
* Free a message buffer previously allocated with node_alloc_msg_buf.
@@ -313,7 +291,6 @@ extern void node_exit(void);
* -EFAULT: Invalid node handle.
* -EPERM: Failure to free the buffer.
* Requires:
- * node_init(void) called.
* pbuffer != NULL.
* Ensures:
*/
@@ -336,7 +313,6 @@ extern int node_free_msg_buf(struct node_object *hnode,
* 0: Success.
* -EFAULT: Invalid hnode.
* Requires:
- * node_init(void) called.
* pattr != NULL.
* Ensures:
* 0: *pattrs contains the node's current attributes.
@@ -363,7 +339,6 @@ extern int node_get_attr(struct node_object *hnode,
* Error occurred while trying to retrieve a message.
* -ETIME: Timeout occurred and no message is available.
* Requires:
- * node_init(void) called.
* message != NULL.
* Ensures:
*/
@@ -386,17 +361,6 @@ extern int node_get_nldr_obj(struct node_mgr *hnode_mgr,
struct nldr_object **nldr_ovlyobj);
/*
- * ======== node_init ========
- * Purpose:
- * Initialize the NODE module.
- * Parameters:
- * Returns:
- * TRUE if initialization succeeded, FALSE otherwise.
- * Ensures:
- */
-extern bool node_init(void);
-
-/*
* ======== node_on_exit ========
* Purpose:
* Gets called when RMS_EXIT is received for a node. PROC needs to pass
@@ -425,7 +389,6 @@ void node_on_exit(struct node_object *hnode, s32 node_status);
* -ETIME: A timeout occurred before the DSP responded.
* DSP_EWRONGSTSATE: Node is not in NODE_RUNNING state.
* Requires:
- * node_init(void) called.
* Ensures:
*/
extern int node_pause(struct node_object *hnode);
@@ -449,7 +412,6 @@ extern int node_pause(struct node_object *hnode);
* -ETIME: Timeout occurred before message could be set.
* -EBADR: Node is in invalid state for sending messages.
* Requires:
- * node_init(void) called.
* pmsg != NULL.
* Ensures:
*/
@@ -473,7 +435,6 @@ extern int node_put_message(struct node_object *hnode,
* -ENOSYS: Notification type specified by notify_type is not
* supported.
* Requires:
- * node_init(void) called.
* hnotification != NULL.
* Ensures:
*/
@@ -500,7 +461,6 @@ extern int node_register_notify(struct node_object *hnode,
* DSP_EWRONGSTSATE: Node is not in NODE_PAUSED or NODE_CREATED state.
* -ESPIPE: Execute function not found in the COFF file.
* Requires:
- * node_init(void) called.
* Ensures:
*/
extern int node_run(struct node_object *hnode);
@@ -523,7 +483,6 @@ extern int node_run(struct node_object *hnode);
* Unable to terminate the node.
* -EBADR: Operation not valid for the current node state.
* Requires:
- * node_init(void) called.
* pstatus != NULL.
* Ensures:
*/
diff --git a/drivers/staging/tidspbridge/include/dspbridge/nodepriv.h b/drivers/staging/tidspbridge/include/dspbridge/nodepriv.h
index 9c1e06758c89..d5b54bb81e8e 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/nodepriv.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/nodepriv.h
@@ -96,7 +96,6 @@ struct node_createargs {
* -EINVAL: The node's stream corresponding to index and dir
* is not a stream to or from the host.
* Requires:
- * node_init(void) called.
* Valid dir.
* chan_id != NULL.
* Ensures:
diff --git a/drivers/staging/tidspbridge/include/dspbridge/proc.h b/drivers/staging/tidspbridge/include/dspbridge/proc.h
index f00dffd51989..a82380ebc041 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/proc.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/proc.h
@@ -189,20 +189,6 @@ extern int proc_get_resource_info(void *hprocessor,
u32 resource_info_size);
/*
- * ======== proc_exit ========
- * Purpose:
- * Decrement reference count, and free resources when reference count is
- * 0.
- * Parameters:
- * Returns:
- * Requires:
- * PROC is initialized.
- * Ensures:
- * When reference count == 0, PROC's private resources are freed.
- */
-extern void proc_exit(void);
-
-/*
* ======== proc_get_dev_object =========
* Purpose:
* Returns the DEV Hanlde for a given Processor handle
@@ -223,20 +209,6 @@ extern int proc_get_dev_object(void *hprocessor,
struct dev_object **device_obj);
/*
- * ======== proc_init ========
- * Purpose:
- * Initialize PROC's private state, keeping a reference count on each
- * call.
- * Parameters:
- * Returns:
- * TRUE if initialized; FALSE if error occurred.
- * Requires:
- * Ensures:
- * TRUE: A requirement for the other public PROC functions.
- */
-extern bool proc_init(void);
-
-/*
* ======== proc_get_state ========
* Purpose:
* Report the state of the specified DSP processor.
diff --git a/drivers/staging/tidspbridge/include/dspbridge/rmm.h b/drivers/staging/tidspbridge/include/dspbridge/rmm.h
index baea536681e9..f7a4dc8ecb4f 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/rmm.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/rmm.h
@@ -115,18 +115,6 @@ extern int rmm_create(struct rmm_target_obj **target_obj,
extern void rmm_delete(struct rmm_target_obj *target);
/*
- * ======== rmm_exit ========
- * Exit the RMM module
- *
- * Parameters:
- * Returns:
- * Requires:
- * rmm_init successfully called.
- * Ensures:
- */
-extern void rmm_exit(void);
-
-/*
* ======== rmm_free ========
* Free or unreserve memory allocated through rmm_alloc().
*
@@ -148,19 +136,6 @@ extern bool rmm_free(struct rmm_target_obj *target, u32 segid, u32 dsp_addr,
u32 size, bool reserved);
/*
- * ======== rmm_init ========
- * Initialize the RMM module
- *
- * Parameters:
- * Returns:
- * TRUE: Success.
- * FALSE: Failure.
- * Requires:
- * Ensures:
- */
-extern bool rmm_init(void);
-
-/*
* ======== rmm_stat ========
* Obtain memory segment status
*
diff --git a/drivers/staging/tidspbridge/include/dspbridge/strm.h b/drivers/staging/tidspbridge/include/dspbridge/strm.h
index 613fe53dd239..dacf0c234fd1 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/strm.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/strm.h
@@ -40,7 +40,6 @@
* -EPERM: Failure occurred, unable to allocate buffers.
* -EINVAL: usize must be > 0 bytes.
* Requires:
- * strm_init(void) called.
* ap_buffer != NULL.
* Ensures:
*/
@@ -63,7 +62,6 @@ extern int strm_allocate_buffer(struct strm_res_object *strmres,
* been reclaimed.
* -EPERM: Failure to close stream.
* Requires:
- * strm_init(void) called.
* Ensures:
*/
extern int strm_close(struct strm_res_object *strmres,
@@ -83,7 +81,6 @@ extern int strm_close(struct strm_res_object *strmres,
* -ENOMEM: Insufficient memory for requested resources.
* -EPERM: General failure.
* Requires:
- * strm_init(void) called.
* strm_man != NULL.
* dev_obj != NULL.
* Ensures:
@@ -101,7 +98,6 @@ extern int strm_create(struct strm_mgr **strm_man,
* strm_mgr_obj: Handle to STRM manager object from strm_create.
* Returns:
* Requires:
- * strm_init(void) called.
* Valid strm_mgr_obj.
* Ensures:
* strm_mgr_obj is not valid.
@@ -109,18 +105,6 @@ extern int strm_create(struct strm_mgr **strm_man,
extern void strm_delete(struct strm_mgr *strm_mgr_obj);
/*
- * ======== strm_exit ========
- * Purpose:
- * Discontinue usage of STRM module.
- * Parameters:
- * Returns:
- * Requires:
- * strm_init(void) successfully called before.
- * Ensures:
- */
-extern void strm_exit(void);
-
-/*
* ======== strm_free_buffer ========
* Purpose:
* Free buffer(s) allocated with strm_allocate_buffer.
@@ -133,7 +117,6 @@ extern void strm_exit(void);
* -EFAULT: Invalid stream handle.
* -EPERM: Failure occurred, unable to free buffers.
* Requires:
- * strm_init(void) called.
* ap_buffer != NULL.
* Ensures:
*/
@@ -156,7 +139,6 @@ extern int strm_free_buffer(struct strm_res_object *strmres,
* -EINVAL: stream_info_size < sizeof(dsp_streaminfo).
* -EPERM: Unable to get stream info.
* Requires:
- * strm_init(void) called.
* stream_info != NULL.
* Ensures:
*/
@@ -184,24 +166,11 @@ extern int strm_get_info(struct strm_object *stream_obj,
* -ETIME: A timeout occurred before the stream could be idled.
* -EPERM: Unable to idle stream.
* Requires:
- * strm_init(void) called.
* Ensures:
*/
extern int strm_idle(struct strm_object *stream_obj, bool flush_data);
/*
- * ======== strm_init ========
- * Purpose:
- * Initialize the STRM module.
- * Parameters:
- * Returns:
- * TRUE if initialization succeeded, FALSE otherwise.
- * Requires:
- * Ensures:
- */
-extern bool strm_init(void);
-
-/*
* ======== strm_issue ========
* Purpose:
* Send a buffer of data to a stream.
@@ -217,8 +186,7 @@ extern bool strm_init(void);
* -ENOSR: The stream is full.
* -EPERM: Failure occurred, unable to issue buffer.
* Requires:
- * strm_init(void) called.
- * pbuf != NULL.
+* pbuf != NULL.
* Ensures:
*/
extern int strm_issue(struct strm_object *stream_obj, u8 * pbuf,
@@ -244,7 +212,6 @@ extern int strm_issue(struct strm_object *stream_obj, u8 * pbuf,
* Unable to open stream.
* -EINVAL: Invalid index.
* Requires:
- * strm_init(void) called.
* strmres != NULL.
* pattr != NULL.
* Ensures:
@@ -275,7 +242,6 @@ extern int strm_open(struct node_object *hnode, u32 dir,
* retrieved.
* -EPERM: Failure occurred, unable to reclaim buffer.
* Requires:
- * strm_init(void) called.
* buf_ptr != NULL.
* nbytes != NULL.
* pdw_arg != NULL.
@@ -302,7 +268,6 @@ extern int strm_reclaim(struct strm_object *stream_obj,
* -ENOSYS: Notification type specified by notify_type is not
* supported.
* Requires:
- * strm_init(void) called.
* hnotification != NULL.
* Ensures:
*/
@@ -328,7 +293,6 @@ extern int strm_register_notify(struct strm_object *stream_obj,
* -ETIME: A timeout occurred before a stream became ready.
* -EPERM: Failure occurred, unable to select a stream.
* Requires:
- * strm_init(void) called.
* strm_tab != NULL.
* strms > 0.
* pmask != NULL.
diff --git a/drivers/staging/tidspbridge/pmgr/chnl.c b/drivers/staging/tidspbridge/pmgr/chnl.c
index 245de82e2d67..4bd8686f2355 100644
--- a/drivers/staging/tidspbridge/pmgr/chnl.c
+++ b/drivers/staging/tidspbridge/pmgr/chnl.c
@@ -24,9 +24,6 @@
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
-/* ----------------------------------- Trace & Debug */
-#include <dspbridge/dbc.h>
-
/* ----------------------------------- OS Adaptation Layer */
#include <dspbridge/sync.h>
@@ -41,9 +38,6 @@
/* ----------------------------------- This */
#include <dspbridge/chnl.h>
-/* ----------------------------------- Globals */
-static u32 refs;
-
/*
* ======== chnl_create ========
* Purpose:
@@ -58,10 +52,6 @@ int chnl_create(struct chnl_mgr **channel_mgr,
struct chnl_mgr *hchnl_mgr;
struct chnl_mgr_ *chnl_mgr_obj = NULL;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(channel_mgr != NULL);
- DBC_REQUIRE(mgr_attrts != NULL);
-
*channel_mgr = NULL;
/* Validate args: */
@@ -99,8 +89,6 @@ int chnl_create(struct chnl_mgr **channel_mgr,
}
}
- DBC_ENSURE(status || chnl_mgr_obj);
-
return status;
}
@@ -115,8 +103,6 @@ int chnl_destroy(struct chnl_mgr *hchnl_mgr)
struct bridge_drv_interface *intf_fxns;
int status;
- DBC_REQUIRE(refs > 0);
-
if (chnl_mgr_obj) {
intf_fxns = chnl_mgr_obj->intf_fxns;
/* Let Bridge channel module destroy the chnl_mgr: */
@@ -127,36 +113,3 @@ int chnl_destroy(struct chnl_mgr *hchnl_mgr)
return status;
}
-
-/*
- * ======== chnl_exit ========
- * Purpose:
- * Discontinue usage of the CHNL module.
- */
-void chnl_exit(void)
-{
- DBC_REQUIRE(refs > 0);
-
- refs--;
-
- DBC_ENSURE(refs >= 0);
-}
-
-/*
- * ======== chnl_init ========
- * Purpose:
- * Initialize the CHNL module's private state.
- */
-bool chnl_init(void)
-{
- bool ret = true;
-
- DBC_REQUIRE(refs >= 0);
-
- if (ret)
- refs++;
-
- DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
-
- return ret;
-}
diff --git a/drivers/staging/tidspbridge/pmgr/cmm.c b/drivers/staging/tidspbridge/pmgr/cmm.c
index e6b2c8962f81..4a800dadd703 100644
--- a/drivers/staging/tidspbridge/pmgr/cmm.c
+++ b/drivers/staging/tidspbridge/pmgr/cmm.c
@@ -35,9 +35,6 @@
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
-/* ----------------------------------- Trace & Debug */
-#include <dspbridge/dbc.h>
-
/* ----------------------------------- OS Adaptation Layer */
#include <dspbridge/sync.h>
@@ -134,9 +131,6 @@ struct cmm_mnode {
u32 client_proc; /* Process that allocated this mem block */
};
-/* ----------------------------------- Globals */
-static u32 refs; /* module reference count */
-
/* ----------------------------------- Function Prototypes */
static void add_to_free_list(struct cmm_allocator *allocator,
struct cmm_mnode *pnode);
@@ -244,9 +238,6 @@ int cmm_create(struct cmm_object **ph_cmm_mgr,
struct cmm_object *cmm_obj = NULL;
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(ph_cmm_mgr != NULL);
-
*ph_cmm_mgr = NULL;
/* create, zero, and tag a cmm mgr object */
cmm_obj = kzalloc(sizeof(struct cmm_object), GFP_KERNEL);
@@ -256,8 +247,6 @@ int cmm_create(struct cmm_object **ph_cmm_mgr,
if (mgr_attrts == NULL)
mgr_attrts = &cmm_dfltmgrattrs; /* set defaults */
- /* 4 bytes minimum */
- DBC_ASSERT(mgr_attrts->min_block_size >= 4);
/* save away smallest block allocation for this cmm mgr */
cmm_obj->min_block_size = mgr_attrts->min_block_size;
cmm_obj->page_size = PAGE_SIZE;
@@ -283,7 +272,6 @@ int cmm_destroy(struct cmm_object *hcmm_mgr, bool force)
s32 slot_seg;
struct cmm_mnode *node, *tmp;
- DBC_REQUIRE(refs > 0);
if (!hcmm_mgr) {
status = -EFAULT;
return status;
@@ -326,19 +314,6 @@ int cmm_destroy(struct cmm_object *hcmm_mgr, bool force)
}
/*
- * ======== cmm_exit ========
- * Purpose:
- * Discontinue usage of module; free resources when reference count
- * reaches 0.
- */
-void cmm_exit(void)
-{
- DBC_REQUIRE(refs > 0);
-
- refs--;
-}
-
-/*
* ======== cmm_free_buf ========
* Purpose:
* Free the given buffer.
@@ -351,9 +326,6 @@ int cmm_free_buf(struct cmm_object *hcmm_mgr, void *buf_pa, u32 ul_seg_id)
struct cmm_allocator *allocator;
struct cmm_attrs *pattrs;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(buf_pa != NULL);
-
if (ul_seg_id == 0) {
pattrs = &cmm_dfltalctattrs;
ul_seg_id = pattrs->seg_id;
@@ -392,8 +364,6 @@ int cmm_get_handle(void *hprocessor, struct cmm_object ** ph_cmm_mgr)
int status = 0;
struct dev_object *hdev_obj;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(ph_cmm_mgr != NULL);
if (hprocessor != NULL)
status = proc_get_dev_object(hprocessor, &hdev_obj);
else
@@ -419,8 +389,6 @@ int cmm_get_info(struct cmm_object *hcmm_mgr,
struct cmm_allocator *altr;
struct cmm_mnode *curr;
- DBC_REQUIRE(cmm_info_obj != NULL);
-
if (!hcmm_mgr) {
status = -EFAULT;
return status;
@@ -464,24 +432,6 @@ int cmm_get_info(struct cmm_object *hcmm_mgr,
}
/*
- * ======== cmm_init ========
- * Purpose:
- * Initializes private state of CMM module.
- */
-bool cmm_init(void)
-{
- bool ret = true;
-
- DBC_REQUIRE(refs >= 0);
- if (ret)
- refs++;
-
- DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
-
- return ret;
-}
-
-/*
* ======== cmm_register_gppsm_seg ========
* Purpose:
* Register a block of SM with the CMM to be used for later GPP SM
@@ -499,13 +449,6 @@ int cmm_register_gppsm_seg(struct cmm_object *hcmm_mgr,
struct cmm_mnode *new_node;
s32 slot_seg;
- DBC_REQUIRE(ul_size > 0);
- DBC_REQUIRE(sgmt_id != NULL);
- DBC_REQUIRE(dw_gpp_base_pa != 0);
- DBC_REQUIRE(gpp_base_va != 0);
- DBC_REQUIRE((c_factor <= CMM_ADDTODSPPA) &&
- (c_factor >= CMM_SUBFROMDSPPA));
-
dev_dbg(bridge, "%s: dw_gpp_base_pa %x ul_size %x dsp_addr_offset %x "
"dw_dsp_base %x ul_dsp_size %x gpp_base_va %x\n",
__func__, dw_gpp_base_pa, ul_size, dsp_addr_offset,
@@ -589,7 +532,6 @@ int cmm_un_register_gppsm_seg(struct cmm_object *hcmm_mgr,
struct cmm_allocator *psma;
u32 ul_id = ul_seg_id;
- DBC_REQUIRE(ul_seg_id > 0);
if (!hcmm_mgr)
return -EFAULT;
@@ -635,8 +577,6 @@ static void un_register_gppsm_seg(struct cmm_allocator *psma)
{
struct cmm_mnode *curr, *tmp;
- DBC_REQUIRE(psma != NULL);
-
/* free nodes on free list */
list_for_each_entry_safe(curr, tmp, &psma->free_list, link) {
list_del(&curr->link);
@@ -664,7 +604,6 @@ static void un_register_gppsm_seg(struct cmm_allocator *psma)
static s32 get_slot(struct cmm_object *cmm_mgr_obj)
{
s32 slot_seg = -1; /* neg on failure */
- DBC_REQUIRE(cmm_mgr_obj != NULL);
/* get first available slot in cmm mgr SMSegTab[] */
for (slot_seg = 0; slot_seg < CMM_MAXGPPSEGS; slot_seg++) {
if (cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] == NULL)
@@ -687,11 +626,6 @@ static struct cmm_mnode *get_node(struct cmm_object *cmm_mgr_obj, u32 dw_pa,
{
struct cmm_mnode *pnode;
- DBC_REQUIRE(cmm_mgr_obj != NULL);
- DBC_REQUIRE(dw_pa != 0);
- DBC_REQUIRE(dw_va != 0);
- DBC_REQUIRE(ul_size != 0);
-
/* Check cmm mgr's node freelist */
if (list_empty(&cmm_mgr_obj->node_free_list)) {
pnode = kzalloc(sizeof(struct cmm_mnode), GFP_KERNEL);
@@ -719,7 +653,6 @@ static struct cmm_mnode *get_node(struct cmm_object *cmm_mgr_obj, u32 dw_pa,
*/
static void delete_node(struct cmm_object *cmm_mgr_obj, struct cmm_mnode *pnode)
{
- DBC_REQUIRE(pnode != NULL);
list_add_tail(&pnode->link, &cmm_mgr_obj->node_free_list);
}
@@ -794,9 +727,6 @@ static void add_to_free_list(struct cmm_allocator *allocator,
static struct cmm_allocator *get_allocator(struct cmm_object *cmm_mgr_obj,
u32 ul_seg_id)
{
- DBC_REQUIRE(cmm_mgr_obj != NULL);
- DBC_REQUIRE((ul_seg_id > 0) && (ul_seg_id <= CMM_MAXGPPSEGS));
-
return cmm_mgr_obj->pa_gppsm_seg_tab[ul_seg_id - 1];
}
@@ -818,10 +748,6 @@ int cmm_xlator_create(struct cmm_xlatorobject **xlator,
struct cmm_xlator *xlator_object = NULL;
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(xlator != NULL);
- DBC_REQUIRE(hcmm_mgr != NULL);
-
*xlator = NULL;
if (xlator_attrs == NULL)
xlator_attrs = &cmm_dfltxlatorattrs; /* set defaults */
@@ -851,13 +777,6 @@ void *cmm_xlator_alloc_buf(struct cmm_xlatorobject *xlator, void *va_buf,
void *tmp_va_buff;
struct cmm_attrs attrs;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(xlator != NULL);
- DBC_REQUIRE(xlator_obj->cmm_mgr != NULL);
- DBC_REQUIRE(va_buf != NULL);
- DBC_REQUIRE(pa_size > 0);
- DBC_REQUIRE(xlator_obj->seg_id > 0);
-
if (xlator_obj) {
attrs.seg_id = xlator_obj->seg_id;
__raw_writel(0, va_buf);
@@ -887,10 +806,6 @@ int cmm_xlator_free_buf(struct cmm_xlatorobject *xlator, void *buf_va)
int status = -EPERM;
void *buf_pa = NULL;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(buf_va != NULL);
- DBC_REQUIRE(xlator_obj->seg_id > 0);
-
if (xlator_obj) {
/* convert Va to Pa so we can free it. */
buf_pa = cmm_xlator_translate(xlator, buf_va, CMM_VA2PA);
@@ -900,7 +815,8 @@ int cmm_xlator_free_buf(struct cmm_xlatorobject *xlator, void *buf_va)
if (status) {
/* Uh oh, this shouldn't happen. Descriptor
* gone! */
- DBC_ASSERT(false); /* CMM is leaking mem */
+ pr_err("%s, line %d: Assertion failed\n",
+ __FILE__, __LINE__);
}
}
}
@@ -918,10 +834,6 @@ int cmm_xlator_info(struct cmm_xlatorobject *xlator, u8 ** paddr,
struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator;
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(paddr != NULL);
- DBC_REQUIRE((segm_id > 0) && (segm_id <= CMM_MAXGPPSEGS));
-
if (xlator_obj) {
if (set_info) {
/* set translators virtual address range */
@@ -948,16 +860,11 @@ void *cmm_xlator_translate(struct cmm_xlatorobject *xlator, void *paddr,
struct cmm_allocator *allocator = NULL;
u32 dw_offset = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(paddr != NULL);
- DBC_REQUIRE((xtype >= CMM_VA2PA) && (xtype <= CMM_DSPPA2PA));
-
if (!xlator_obj)
goto loop_cont;
cmm_mgr_obj = (struct cmm_object *)xlator_obj->cmm_mgr;
/* get this translator's default SM allocator */
- DBC_ASSERT(xlator_obj->seg_id > 0);
allocator = cmm_mgr_obj->pa_gppsm_seg_tab[xlator_obj->seg_id - 1];
if (!allocator)
goto loop_cont;
diff --git a/drivers/staging/tidspbridge/pmgr/cod.c b/drivers/staging/tidspbridge/pmgr/cod.c
index 1a29264b5853..4007826f7abc 100644
--- a/drivers/staging/tidspbridge/pmgr/cod.c
+++ b/drivers/staging/tidspbridge/pmgr/cod.c
@@ -30,9 +30,6 @@
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
-/* ----------------------------------- Trace & Debug */
-#include <dspbridge/dbc.h>
-
/* ----------------------------------- Platform Manager */
/* Include appropriate loader header file */
#include <dspbridge/dbll.h>
@@ -61,8 +58,6 @@ struct cod_libraryobj {
struct cod_manager *cod_mgr;
};
-static u32 refs = 0L;
-
static struct dbll_fxns ldr_fxns = {
(dbll_close_fxn) dbll_close,
(dbll_create_fxn) dbll_create,
@@ -183,10 +178,6 @@ void cod_close(struct cod_libraryobj *lib)
{
struct cod_manager *hmgr;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(lib != NULL);
- DBC_REQUIRE(lib->cod_mgr);
-
hmgr = lib->cod_mgr;
hmgr->fxns.close_fxn(lib->dbll_lib);
@@ -208,9 +199,6 @@ int cod_create(struct cod_manager **mgr, char *str_zl_file)
struct dbll_attrs zl_attrs;
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(mgr != NULL);
-
/* assume failure */
*mgr = NULL;
@@ -263,9 +251,6 @@ int cod_create(struct cod_manager **mgr, char *str_zl_file)
*/
void cod_delete(struct cod_manager *cod_mgr_obj)
{
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(cod_mgr_obj);
-
if (cod_mgr_obj->base_lib) {
if (cod_mgr_obj->loaded)
cod_mgr_obj->fxns.unload_fxn(cod_mgr_obj->base_lib,
@@ -281,21 +266,6 @@ void cod_delete(struct cod_manager *cod_mgr_obj)
}
/*
- * ======== cod_exit ========
- * Purpose:
- * Discontinue usage of the COD module.
- *
- */
-void cod_exit(void)
-{
- DBC_REQUIRE(refs > 0);
-
- refs--;
-
- DBC_ENSURE(refs >= 0);
-}
-
-/*
* ======== cod_get_base_lib ========
* Purpose:
* Get handle to the base image DBL library.
@@ -305,10 +275,6 @@ int cod_get_base_lib(struct cod_manager *cod_mgr_obj,
{
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(cod_mgr_obj);
- DBC_REQUIRE(plib != NULL);
-
*plib = (struct dbll_library_obj *)cod_mgr_obj->base_lib;
return status;
@@ -322,10 +288,6 @@ int cod_get_base_name(struct cod_manager *cod_mgr_obj, char *sz_name,
{
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(cod_mgr_obj);
- DBC_REQUIRE(sz_name != NULL);
-
if (usize <= COD_MAXPATHLENGTH)
strncpy(sz_name, cod_mgr_obj->sz_zl_file, usize);
else
@@ -342,10 +304,6 @@ int cod_get_base_name(struct cod_manager *cod_mgr_obj, char *sz_name,
*/
int cod_get_entry(struct cod_manager *cod_mgr_obj, u32 *entry_pt)
{
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(cod_mgr_obj);
- DBC_REQUIRE(entry_pt != NULL);
-
*entry_pt = cod_mgr_obj->entry;
return 0;
@@ -361,10 +319,6 @@ int cod_get_loader(struct cod_manager *cod_mgr_obj,
{
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(cod_mgr_obj);
- DBC_REQUIRE(loader != NULL);
-
*loader = (struct dbll_tar_obj *)cod_mgr_obj->target;
return status;
@@ -382,13 +336,6 @@ int cod_get_section(struct cod_libraryobj *lib, char *str_sect,
struct cod_manager *cod_mgr_obj;
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(lib != NULL);
- DBC_REQUIRE(lib->cod_mgr);
- DBC_REQUIRE(str_sect != NULL);
- DBC_REQUIRE(addr != NULL);
- DBC_REQUIRE(len != NULL);
-
*addr = 0;
*len = 0;
if (lib != NULL) {
@@ -399,8 +346,6 @@ int cod_get_section(struct cod_libraryobj *lib, char *str_sect,
status = -ESPIPE;
}
- DBC_ENSURE(!status || ((*addr == 0) && (*len == 0)));
-
return status;
}
@@ -417,11 +362,6 @@ int cod_get_sym_value(struct cod_manager *cod_mgr_obj, char *str_sym,
{
struct dbll_sym_val *dbll_sym;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(cod_mgr_obj);
- DBC_REQUIRE(str_sym != NULL);
- DBC_REQUIRE(pul_value != NULL);
-
dev_dbg(bridge, "%s: cod_mgr_obj: %p str_sym: %s pul_value: %p\n",
__func__, cod_mgr_obj, str_sym, pul_value);
if (cod_mgr_obj->base_lib) {
@@ -442,25 +382,6 @@ int cod_get_sym_value(struct cod_manager *cod_mgr_obj, char *str_sym,
}
/*
- * ======== cod_init ========
- * Purpose:
- * Initialize the COD module's private state.
- *
- */
-bool cod_init(void)
-{
- bool ret = true;
-
- DBC_REQUIRE(refs >= 0);
-
- if (ret)
- refs++;
-
- DBC_ENSURE((ret && refs > 0) || (!ret && refs >= 0));
- return ret;
-}
-
-/*
* ======== cod_load_base ========
* Purpose:
* Load the initial program image, optionally with command-line arguments,
@@ -482,14 +403,6 @@ int cod_load_base(struct cod_manager *cod_mgr_obj, u32 num_argc, char *args[],
int status;
u32 i;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(cod_mgr_obj);
- DBC_REQUIRE(num_argc > 0);
- DBC_REQUIRE(args != NULL);
- DBC_REQUIRE(args[0] != NULL);
- DBC_REQUIRE(pfn_write != NULL);
- DBC_REQUIRE(cod_mgr_obj->base_lib != NULL);
-
/*
* Make sure every argv[] stated in argc has a value, or change argc to
* reflect true number in NULL terminated argv array.
@@ -538,12 +451,6 @@ int cod_open(struct cod_manager *hmgr, char *sz_coff_path,
int status = 0;
struct cod_libraryobj *lib = NULL;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(hmgr);
- DBC_REQUIRE(sz_coff_path != NULL);
- DBC_REQUIRE(flags == COD_NOLOAD || flags == COD_SYMB);
- DBC_REQUIRE(lib_obj != NULL);
-
*lib_obj = NULL;
lib = kzalloc(sizeof(struct cod_libraryobj), GFP_KERNEL);
@@ -575,10 +482,6 @@ int cod_open_base(struct cod_manager *hmgr, char *sz_coff_path,
int status = 0;
struct dbll_library_obj *lib;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(hmgr);
- DBC_REQUIRE(sz_coff_path != NULL);
-
/* if we previously opened a base image, close it now */
if (hmgr->base_lib) {
if (hmgr->loaded) {
@@ -612,12 +515,6 @@ int cod_read_section(struct cod_libraryobj *lib, char *str_sect,
{
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(lib != NULL);
- DBC_REQUIRE(lib->cod_mgr);
- DBC_REQUIRE(str_sect != NULL);
- DBC_REQUIRE(str_content != NULL);
-
if (lib != NULL)
status =
lib->cod_mgr->fxns.read_sect_fxn(lib->dbll_lib, str_sect,
diff --git a/drivers/staging/tidspbridge/pmgr/dbll.c b/drivers/staging/tidspbridge/pmgr/dbll.c
index 31da62b14bc9..9f07036cd411 100644
--- a/drivers/staging/tidspbridge/pmgr/dbll.c
+++ b/drivers/staging/tidspbridge/pmgr/dbll.c
@@ -21,8 +21,6 @@
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
-/* ----------------------------------- Trace & Debug */
-#include <dspbridge/dbc.h>
#include <dspbridge/gh.h>
/* ----------------------------------- OS Adaptation Layer */
@@ -189,8 +187,6 @@ static u16 name_hash(void *key, u16 max_bucket);
static bool name_match(void *key, void *sp);
static void sym_delete(void *value);
-static u32 refs; /* module reference count */
-
/* Symbol Redefinition */
static int redefined_symbol;
static int gbl_search = 1;
@@ -202,9 +198,6 @@ void dbll_close(struct dbll_library_obj *zl_lib)
{
struct dbll_tar_obj *zl_target;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(zl_lib);
- DBC_REQUIRE(zl_lib->open_ref > 0);
zl_target = zl_lib->target_obj;
zl_lib->open_ref--;
if (zl_lib->open_ref == 0) {
@@ -241,10 +234,6 @@ int dbll_create(struct dbll_tar_obj **target_obj,
struct dbll_tar_obj *pzl_target;
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(pattrs != NULL);
- DBC_REQUIRE(target_obj != NULL);
-
/* Allocate DBL target object */
pzl_target = kzalloc(sizeof(struct dbll_tar_obj), GFP_KERNEL);
if (target_obj != NULL) {
@@ -255,8 +244,6 @@ int dbll_create(struct dbll_tar_obj **target_obj,
pzl_target->attrs = *pattrs;
*target_obj = (struct dbll_tar_obj *)pzl_target;
}
- DBC_ENSURE((!status && *target_obj) ||
- (status && *target_obj == NULL));
}
return status;
@@ -269,9 +256,6 @@ void dbll_delete(struct dbll_tar_obj *target)
{
struct dbll_tar_obj *zl_target = (struct dbll_tar_obj *)target;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(zl_target);
-
kfree(zl_target);
}
@@ -282,14 +266,7 @@ void dbll_delete(struct dbll_tar_obj *target)
*/
void dbll_exit(void)
{
- DBC_REQUIRE(refs > 0);
-
- refs--;
-
- if (refs == 0)
- gh_exit();
-
- DBC_ENSURE(refs >= 0);
+ /* do nothing */
}
/*
@@ -302,12 +279,6 @@ bool dbll_get_addr(struct dbll_library_obj *zl_lib, char *name,
struct dbll_symbol *sym;
bool status = false;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(zl_lib);
- DBC_REQUIRE(name != NULL);
- DBC_REQUIRE(sym_val != NULL);
- DBC_REQUIRE(zl_lib->sym_tab != NULL);
-
sym = (struct dbll_symbol *)gh_find(zl_lib->sym_tab, name);
if (sym != NULL) {
*sym_val = &sym->value;
@@ -327,10 +298,6 @@ void dbll_get_attrs(struct dbll_tar_obj *target, struct dbll_attrs *pattrs)
{
struct dbll_tar_obj *zl_target = (struct dbll_tar_obj *)target;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(zl_target);
- DBC_REQUIRE(pattrs != NULL);
-
if ((pattrs != NULL) && (zl_target != NULL))
*pattrs = zl_target->attrs;
@@ -347,12 +314,6 @@ bool dbll_get_c_addr(struct dbll_library_obj *zl_lib, char *name,
char cname[MAXEXPR + 1];
bool status = false;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(zl_lib);
- DBC_REQUIRE(sym_val != NULL);
- DBC_REQUIRE(zl_lib->sym_tab != NULL);
- DBC_REQUIRE(name != NULL);
-
cname[0] = '_';
strncpy(cname + 1, name, sizeof(cname) - 2);
@@ -382,12 +343,6 @@ int dbll_get_sect(struct dbll_library_obj *lib, char *name, u32 *paddr,
struct dbll_library_obj *zl_lib = (struct dbll_library_obj *)lib;
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(name != NULL);
- DBC_REQUIRE(paddr != NULL);
- DBC_REQUIRE(psize != NULL);
- DBC_REQUIRE(zl_lib);
-
/* If DOFF file is not open, we open it. */
if (zl_lib != NULL) {
if (zl_lib->fp == NULL) {
@@ -434,12 +389,7 @@ int dbll_get_sect(struct dbll_library_obj *lib, char *name, u32 *paddr,
*/
bool dbll_init(void)
{
- DBC_REQUIRE(refs >= 0);
-
- if (refs == 0)
- gh_init();
-
- refs++;
+ /* do nothing */
return true;
}
@@ -456,10 +406,6 @@ int dbll_load(struct dbll_library_obj *lib, dbll_flags flags,
s32 err;
int status = 0;
bool opened_doff = false;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(zl_lib);
- DBC_REQUIRE(entry != NULL);
- DBC_REQUIRE(attrs != NULL);
/*
* Load if not already loaded.
@@ -558,8 +504,6 @@ int dbll_load(struct dbll_library_obj *lib, dbll_flags flags,
if (opened_doff)
dof_close(zl_lib);
- DBC_ENSURE(status || zl_lib->load_ref > 0);
-
dev_dbg(bridge, "%s: lib: %p flags: 0x%x entry: %p, status 0x%x\n",
__func__, lib, flags, entry, status);
@@ -577,12 +521,6 @@ int dbll_open(struct dbll_tar_obj *target, char *file, dbll_flags flags,
s32 err;
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(zl_target);
- DBC_REQUIRE(zl_target->attrs.fopen != NULL);
- DBC_REQUIRE(file != NULL);
- DBC_REQUIRE(lib_obj != NULL);
-
zl_lib = zl_target->head;
while (zl_lib != NULL) {
if (strcmp(zl_lib->file_name, file) == 0) {
@@ -699,8 +637,6 @@ func_cont:
dbll_close((struct dbll_library_obj *)zl_lib);
}
- DBC_ENSURE((!status && (zl_lib->open_ref > 0) && *lib_obj)
- || (status && *lib_obj == NULL));
dev_dbg(bridge, "%s: target: %p file: %s lib_obj: %p, status 0x%x\n",
__func__, target, file, lib_obj, status);
@@ -722,12 +658,6 @@ int dbll_read_sect(struct dbll_library_obj *lib, char *name,
const struct ldr_section_info *sect = NULL;
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(zl_lib);
- DBC_REQUIRE(name != NULL);
- DBC_REQUIRE(buf != NULL);
- DBC_REQUIRE(size != 0);
-
/* If DOFF file is not open, we open it. */
if (zl_lib != NULL) {
if (zl_lib->fp == NULL) {
@@ -788,14 +718,11 @@ void dbll_unload(struct dbll_library_obj *lib, struct dbll_attrs *attrs)
struct dbll_library_obj *zl_lib = (struct dbll_library_obj *)lib;
s32 err = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(zl_lib);
- DBC_REQUIRE(zl_lib->load_ref > 0);
dev_dbg(bridge, "%s: lib: %p\n", __func__, lib);
zl_lib->load_ref--;
/* Unload only if reference count is 0 */
if (zl_lib->load_ref != 0)
- goto func_end;
+ return;
zl_lib->target_obj->attrs = *attrs;
if (zl_lib->dload_mod_obj) {
@@ -814,8 +741,6 @@ void dbll_unload(struct dbll_library_obj *lib, struct dbll_attrs *attrs)
/* delete DOFF desc since it holds *lots* of host OS
* resources */
dof_close(zl_lib);
-func_end:
- DBC_ENSURE(zl_lib->load_ref >= 0);
}
/*
@@ -874,8 +799,6 @@ static u16 name_hash(void *key, u16 max_bucket)
u16 hash;
char *name = (char *)key;
- DBC_REQUIRE(name != NULL);
-
hash = 0;
while (*name) {
@@ -893,9 +816,6 @@ static u16 name_hash(void *key, u16 max_bucket)
*/
static bool name_match(void *key, void *sp)
{
- DBC_REQUIRE(key != NULL);
- DBC_REQUIRE(sp != NULL);
-
if ((key != NULL) && (sp != NULL)) {
if (strcmp((char *)key, ((struct dbll_symbol *)sp)->name) ==
0)
@@ -938,10 +858,7 @@ static int dbll_read_buffer(struct dynamic_loader_stream *this, void *buffer,
struct dbll_library_obj *lib;
int bytes_read = 0;
- DBC_REQUIRE(this != NULL);
lib = pstream->lib;
- DBC_REQUIRE(lib);
-
if (lib != NULL) {
bytes_read =
(*(lib->target_obj->attrs.fread)) (buffer, 1, bufsize,
@@ -960,10 +877,7 @@ static int dbll_set_file_posn(struct dynamic_loader_stream *this,
struct dbll_library_obj *lib;
int status = 0; /* Success */
- DBC_REQUIRE(this != NULL);
lib = pstream->lib;
- DBC_REQUIRE(lib);
-
if (lib != NULL) {
status = (*(lib->target_obj->attrs.fseek)) (lib->fp, (long)pos,
SEEK_SET);
@@ -986,10 +900,7 @@ static struct dynload_symbol *dbll_find_symbol(struct dynamic_loader_sym *this,
struct dbll_sym_val *dbll_sym = NULL;
bool status = false; /* Symbol not found yet */
- DBC_REQUIRE(this != NULL);
lib = ldr_sym->lib;
- DBC_REQUIRE(lib);
-
if (lib != NULL) {
if (lib->target_obj->attrs.sym_lookup) {
/* Check current lib + base lib + dep lib +
@@ -1015,9 +926,6 @@ static struct dynload_symbol *dbll_find_symbol(struct dynamic_loader_sym *this,
if (!status && gbl_search)
dev_dbg(bridge, "%s: Symbol not found: %s\n", __func__, name);
- DBC_ASSERT((status && (dbll_sym != NULL))
- || (!status && (dbll_sym == NULL)));
-
ret_sym = (struct dynload_symbol *)dbll_sym;
return ret_sym;
}
@@ -1034,11 +942,7 @@ static struct dynload_symbol *find_in_symbol_table(struct dynamic_loader_sym
struct dbll_library_obj *lib;
struct dbll_symbol *sym;
- DBC_REQUIRE(this != NULL);
lib = ldr_sym->lib;
- DBC_REQUIRE(lib);
- DBC_REQUIRE(lib->sym_tab != NULL);
-
sym = (struct dbll_symbol *)gh_find(lib->sym_tab, (char *)name);
ret_sym = (struct dynload_symbol *)&sym->value;
@@ -1059,10 +963,7 @@ static struct dynload_symbol *dbll_add_to_symbol_table(struct dynamic_loader_sym
struct dbll_library_obj *lib;
struct dynload_symbol *ret;
- DBC_REQUIRE(this != NULL);
- DBC_REQUIRE(name);
lib = ldr_sym->lib;
- DBC_REQUIRE(lib);
/* Check to see if symbol is already defined in symbol table */
if (!(lib->target_obj->attrs.base_image)) {
@@ -1111,10 +1012,7 @@ static void dbll_purge_symbol_table(struct dynamic_loader_sym *this,
struct ldr_symbol *ldr_sym = (struct ldr_symbol *)this;
struct dbll_library_obj *lib;
- DBC_REQUIRE(this != NULL);
lib = ldr_sym->lib;
- DBC_REQUIRE(lib);
-
/* May not need to do anything */
}
@@ -1127,9 +1025,7 @@ static void *allocate(struct dynamic_loader_sym *this, unsigned memsize)
struct dbll_library_obj *lib;
void *buf;
- DBC_REQUIRE(this != NULL);
lib = ldr_sym->lib;
- DBC_REQUIRE(lib);
buf = kzalloc(memsize, GFP_KERNEL);
@@ -1144,9 +1040,7 @@ static void deallocate(struct dynamic_loader_sym *this, void *mem_ptr)
struct ldr_symbol *ldr_sym = (struct ldr_symbol *)this;
struct dbll_library_obj *lib;
- DBC_REQUIRE(this != NULL);
lib = ldr_sym->lib;
- DBC_REQUIRE(lib);
kfree(mem_ptr);
}
@@ -1161,9 +1055,7 @@ static void dbll_err_report(struct dynamic_loader_sym *this, const char *errstr,
struct dbll_library_obj *lib;
char temp_buf[MAXEXPR];
- DBC_REQUIRE(this != NULL);
lib = ldr_sym->lib;
- DBC_REQUIRE(lib);
vsnprintf((char *)temp_buf, MAXEXPR, (char *)errstr, args);
dev_dbg(bridge, "%s\n", temp_buf);
}
@@ -1195,9 +1087,7 @@ static int dbll_rmm_alloc(struct dynamic_loader_allocate *this,
u32 alloc_size = 0;
u32 run_addr_flag = 0;
- DBC_REQUIRE(this != NULL);
lib = dbll_alloc_obj->lib;
- DBC_REQUIRE(lib);
mem_sect_type =
(stype == DLOAD_TEXT) ? DBLL_CODE : (stype ==
@@ -1206,7 +1096,6 @@ static int dbll_rmm_alloc(struct dynamic_loader_allocate *this,
/* Attempt to extract the segment ID and requirement information from
the name of the section */
- DBC_REQUIRE(info->name);
token_len = strlen((char *)(info->name)) + 1;
sz_sect_name = kzalloc(token_len, GFP_KERNEL);
@@ -1307,9 +1196,7 @@ static void rmm_dealloc(struct dynamic_loader_allocate *this,
(stype == DLOAD_TEXT) ? DBLL_CODE : (stype ==
DLOAD_BSS) ? DBLL_BSS :
DBLL_DATA;
- DBC_REQUIRE(this != NULL);
lib = dbll_alloc_obj->lib;
- DBC_REQUIRE(lib);
/* segid was set by alloc function */
segid = (u32) info->context;
if (mem_sect_type == DBLL_CODE)
@@ -1347,9 +1234,7 @@ static int read_mem(struct dynamic_loader_initialize *this, void *buf,
struct dbll_library_obj *lib;
int bytes_read = 0;
- DBC_REQUIRE(this != NULL);
lib = init_obj->lib;
- DBC_REQUIRE(lib);
/* Need bridge_brd_read function */
return bytes_read;
}
@@ -1368,7 +1253,6 @@ static int write_mem(struct dynamic_loader_initialize *this, void *buf,
u32 mem_sect_type;
bool ret = true;
- DBC_REQUIRE(this != NULL);
lib = init_obj->lib;
if (!lib)
return false;
@@ -1415,7 +1299,6 @@ static int fill_mem(struct dynamic_loader_initialize *this, ldr_addr addr,
struct dbll_library_obj *lib;
struct dbll_init_obj *init_obj = (struct dbll_init_obj *)this;
- DBC_REQUIRE(this != NULL);
lib = init_obj->lib;
pbuf = NULL;
/* Pass the NULL pointer to write_mem to get the start address of Shared
@@ -1439,9 +1322,7 @@ static int execute(struct dynamic_loader_initialize *this, ldr_addr start)
struct dbll_library_obj *lib;
bool ret = true;
- DBC_REQUIRE(this != NULL);
lib = init_obj->lib;
- DBC_REQUIRE(lib);
/* Save entry point */
if (lib != NULL)
lib->entry = (u32) start;
diff --git a/drivers/staging/tidspbridge/pmgr/dev.c b/drivers/staging/tidspbridge/pmgr/dev.c
index 522810bc7427..6234ffb5e8a3 100644
--- a/drivers/staging/tidspbridge/pmgr/dev.c
+++ b/drivers/staging/tidspbridge/pmgr/dev.c
@@ -24,9 +24,6 @@
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
-/* ----------------------------------- Trace & Debug */
-#include <dspbridge/dbc.h>
-
/* ----------------------------------- Platform Manager */
#include <dspbridge/cod.h>
#include <dspbridge/drv.h>
@@ -84,9 +81,6 @@ struct drv_ext {
char sz_string[MAXREGPATHLENGTH];
};
-/* ----------------------------------- Globals */
-static u32 refs; /* Module reference count */
-
/* ----------------------------------- Function Prototypes */
static int fxn_not_implemented(int arg, ...);
static int init_cod_mgr(struct dev_object *dev_obj);
@@ -106,11 +100,8 @@ u32 dev_brd_write_fxn(void *arb, u32 dsp_add, void *host_buf,
u32 ul_written = 0;
int status;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(host_buf != NULL); /* Required of BrdWrite(). */
if (dev_obj) {
/* Require of BrdWrite() */
- DBC_ASSERT(dev_obj->bridge_context != NULL);
status = (*dev_obj->bridge_interface.brd_write) (
dev_obj->bridge_context, host_buf,
dsp_add, ul_num_bytes, mem_space);
@@ -143,9 +134,6 @@ int dev_create_device(struct dev_object **device_obj,
struct drv_object *hdrv_obj = NULL;
struct drv_data *drv_datap = dev_get_drvdata(bridge);
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(device_obj != NULL);
- DBC_REQUIRE(driver_file_name != NULL);
status = drv_request_bridge_res_dsp((void *)&host_res);
@@ -169,7 +157,6 @@ int dev_create_device(struct dev_object **device_obj,
/* Create the device object, and pass a handle to the Bridge driver for
* storage. */
if (!status) {
- DBC_ASSERT(drv_fxns);
dev_obj = kzalloc(sizeof(struct dev_object), GFP_KERNEL);
if (dev_obj) {
/* Fill out the rest of the Dev Object structure: */
@@ -191,9 +178,6 @@ int dev_create_device(struct dev_object **device_obj,
status = (dev_obj->bridge_interface.dev_create)
(&dev_obj->bridge_context, dev_obj,
host_res);
- /* Assert bridge_dev_create()'s ensure clause: */
- DBC_ASSERT(status
- || (dev_obj->bridge_context != NULL));
} else {
status = -ENOMEM;
}
@@ -271,7 +255,6 @@ leave:
*device_obj = NULL;
}
- DBC_ENSURE((!status && *device_obj) || (status && !*device_obj));
return status;
}
@@ -287,17 +270,11 @@ int dev_create2(struct dev_object *hdev_obj)
int status = 0;
struct dev_object *dev_obj = hdev_obj;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(hdev_obj);
-
/* There can be only one Node Manager per DEV object */
- DBC_ASSERT(!dev_obj->node_mgr);
status = node_create_mgr(&dev_obj->node_mgr, hdev_obj);
if (status)
dev_obj->node_mgr = NULL;
- DBC_ENSURE((!status && dev_obj->node_mgr != NULL)
- || (status && dev_obj->node_mgr == NULL));
return status;
}
@@ -311,9 +288,6 @@ int dev_destroy2(struct dev_object *hdev_obj)
int status = 0;
struct dev_object *dev_obj = hdev_obj;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(hdev_obj);
-
if (dev_obj->node_mgr) {
if (node_delete_mgr(dev_obj->node_mgr))
status = -EPERM;
@@ -322,7 +296,6 @@ int dev_destroy2(struct dev_object *hdev_obj)
}
- DBC_ENSURE((!status && dev_obj->node_mgr == NULL) || status);
return status;
}
@@ -337,8 +310,6 @@ int dev_destroy_device(struct dev_object *hdev_obj)
int status = 0;
struct dev_object *dev_obj = hdev_obj;
- DBC_REQUIRE(refs > 0);
-
if (hdev_obj) {
if (dev_obj->cod_mgr) {
cod_delete(dev_obj->cod_mgr);
@@ -415,9 +386,6 @@ int dev_get_chnl_mgr(struct dev_object *hdev_obj,
int status = 0;
struct dev_object *dev_obj = hdev_obj;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(mgr != NULL);
-
if (hdev_obj) {
*mgr = dev_obj->chnl_mgr;
} else {
@@ -425,7 +393,6 @@ int dev_get_chnl_mgr(struct dev_object *hdev_obj,
status = -EFAULT;
}
- DBC_ENSURE(!status || (mgr != NULL && *mgr == NULL));
return status;
}
@@ -441,9 +408,6 @@ int dev_get_cmm_mgr(struct dev_object *hdev_obj,
int status = 0;
struct dev_object *dev_obj = hdev_obj;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(mgr != NULL);
-
if (hdev_obj) {
*mgr = dev_obj->cmm_mgr;
} else {
@@ -451,7 +415,6 @@ int dev_get_cmm_mgr(struct dev_object *hdev_obj,
status = -EFAULT;
}
- DBC_ENSURE(!status || (mgr != NULL && *mgr == NULL));
return status;
}
@@ -467,9 +430,6 @@ int dev_get_dmm_mgr(struct dev_object *hdev_obj,
int status = 0;
struct dev_object *dev_obj = hdev_obj;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(mgr != NULL);
-
if (hdev_obj) {
*mgr = dev_obj->dmm_mgr;
} else {
@@ -477,7 +437,6 @@ int dev_get_dmm_mgr(struct dev_object *hdev_obj,
status = -EFAULT;
}
- DBC_ENSURE(!status || (mgr != NULL && *mgr == NULL));
return status;
}
@@ -492,9 +451,6 @@ int dev_get_cod_mgr(struct dev_object *hdev_obj,
int status = 0;
struct dev_object *dev_obj = hdev_obj;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(cod_mgr != NULL);
-
if (hdev_obj) {
*cod_mgr = dev_obj->cod_mgr;
} else {
@@ -502,7 +458,6 @@ int dev_get_cod_mgr(struct dev_object *hdev_obj,
status = -EFAULT;
}
- DBC_ENSURE(!status || (cod_mgr != NULL && *cod_mgr == NULL));
return status;
}
@@ -514,9 +469,6 @@ int dev_get_deh_mgr(struct dev_object *hdev_obj,
{
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(deh_manager != NULL);
- DBC_REQUIRE(hdev_obj);
if (hdev_obj) {
*deh_manager = hdev_obj->deh_mgr;
} else {
@@ -537,9 +489,6 @@ int dev_get_dev_node(struct dev_object *hdev_obj,
int status = 0;
struct dev_object *dev_obj = hdev_obj;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(dev_nde != NULL);
-
if (hdev_obj) {
*dev_nde = dev_obj->dev_node_obj;
} else {
@@ -547,7 +496,6 @@ int dev_get_dev_node(struct dev_object *hdev_obj,
status = -EFAULT;
}
- DBC_ENSURE(!status || (dev_nde != NULL && *dev_nde == NULL));
return status;
}
@@ -578,9 +526,6 @@ int dev_get_intf_fxns(struct dev_object *hdev_obj,
int status = 0;
struct dev_object *dev_obj = hdev_obj;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(if_fxns != NULL);
-
if (hdev_obj) {
*if_fxns = &dev_obj->bridge_interface;
} else {
@@ -588,7 +533,6 @@ int dev_get_intf_fxns(struct dev_object *hdev_obj,
status = -EFAULT;
}
- DBC_ENSURE(!status || ((if_fxns != NULL) && (*if_fxns == NULL)));
return status;
}
@@ -600,10 +544,6 @@ int dev_get_io_mgr(struct dev_object *hdev_obj,
{
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(io_man != NULL);
- DBC_REQUIRE(hdev_obj);
-
if (hdev_obj) {
*io_man = hdev_obj->iomgr;
} else {
@@ -638,10 +578,6 @@ struct dev_object *dev_get_next(struct dev_object *hdev_obj)
*/
void dev_get_msg_mgr(struct dev_object *hdev_obj, struct msg_mgr **msg_man)
{
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(msg_man != NULL);
- DBC_REQUIRE(hdev_obj);
-
*msg_man = hdev_obj->msg_mgr;
}
@@ -656,9 +592,6 @@ int dev_get_node_manager(struct dev_object *hdev_obj,
int status = 0;
struct dev_object *dev_obj = hdev_obj;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(node_man != NULL);
-
if (hdev_obj) {
*node_man = dev_obj->node_mgr;
} else {
@@ -666,7 +599,6 @@ int dev_get_node_manager(struct dev_object *hdev_obj,
status = -EFAULT;
}
- DBC_ENSURE(!status || (node_man != NULL && *node_man == NULL));
return status;
}
@@ -679,9 +611,6 @@ int dev_get_symbol(struct dev_object *hdev_obj,
int status = 0;
struct cod_manager *cod_mgr;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(str_sym != NULL && pul_value != NULL);
-
if (hdev_obj) {
status = dev_get_cod_mgr(hdev_obj, &cod_mgr);
if (cod_mgr)
@@ -706,9 +635,6 @@ int dev_get_bridge_context(struct dev_object *hdev_obj,
int status = 0;
struct dev_object *dev_obj = hdev_obj;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(phbridge_context != NULL);
-
if (hdev_obj) {
*phbridge_context = dev_obj->bridge_context;
} else {
@@ -716,67 +642,10 @@ int dev_get_bridge_context(struct dev_object *hdev_obj,
status = -EFAULT;
}
- DBC_ENSURE(!status || ((phbridge_context != NULL) &&
- (*phbridge_context == NULL)));
return status;
}
/*
- * ======== dev_exit ========
- * Purpose:
- * Decrement reference count, and free resources when reference count is
- * 0.
- */
-void dev_exit(void)
-{
- DBC_REQUIRE(refs > 0);
-
- refs--;
-
- if (refs == 0) {
- cmm_exit();
- dmm_exit();
- }
-
- DBC_ENSURE(refs >= 0);
-}
-
-/*
- * ======== dev_init ========
- * Purpose:
- * Initialize DEV's private state, keeping a reference count on each call.
- */
-bool dev_init(void)
-{
- bool cmm_ret, dmm_ret, ret = true;
-
- DBC_REQUIRE(refs >= 0);
-
- if (refs == 0) {
- cmm_ret = cmm_init();
- dmm_ret = dmm_init();
-
- ret = cmm_ret && dmm_ret;
-
- if (!ret) {
- if (cmm_ret)
- cmm_exit();
-
- if (dmm_ret)
- dmm_exit();
-
- }
- }
-
- if (ret)
- refs++;
-
- DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
-
- return ret;
-}
-
-/*
* ======== dev_notify_clients ========
* Purpose:
* Notify all clients of this device of a change in device status.
@@ -841,14 +710,11 @@ int dev_set_chnl_mgr(struct dev_object *hdev_obj,
int status = 0;
struct dev_object *dev_obj = hdev_obj;
- DBC_REQUIRE(refs > 0);
-
if (hdev_obj)
dev_obj->chnl_mgr = hmgr;
else
status = -EFAULT;
- DBC_ENSURE(status || (dev_obj->chnl_mgr == hmgr));
return status;
}
@@ -859,9 +725,6 @@ int dev_set_chnl_mgr(struct dev_object *hdev_obj,
*/
void dev_set_msg_mgr(struct dev_object *hdev_obj, struct msg_mgr *hmgr)
{
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(hdev_obj);
-
hdev_obj->msg_mgr = hmgr;
}
@@ -879,8 +742,6 @@ int dev_start_device(struct cfg_devnode *dev_node_obj)
struct mgr_object *hmgr_obj = NULL;
struct drv_data *drv_datap = dev_get_drvdata(bridge);
- DBC_REQUIRE(refs > 0);
-
/* Given all resources, create a device object. */
status = dev_create_device(&hdev_obj, bridge_file_name,
dev_node_obj);
@@ -944,9 +805,6 @@ static int init_cod_mgr(struct dev_object *dev_obj)
int status = 0;
char *sz_dummy_file = "dummy";
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(!dev_obj || (dev_obj->cod_mgr == NULL));
-
status = cod_create(&dev_obj->cod_mgr, sz_dummy_file);
return status;
@@ -976,10 +834,6 @@ int dev_insert_proc_object(struct dev_object *hdev_obj,
{
struct dev_object *dev_obj = (struct dev_object *)hdev_obj;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(dev_obj);
- DBC_REQUIRE(proc_obj != 0);
- DBC_REQUIRE(already_attached != NULL);
if (!list_empty(&dev_obj->proc_list))
*already_attached = true;
@@ -1017,10 +871,6 @@ int dev_remove_proc_object(struct dev_object *hdev_obj, u32 proc_obj)
struct list_head *cur_elem;
struct dev_object *dev_obj = (struct dev_object *)hdev_obj;
- DBC_REQUIRE(dev_obj);
- DBC_REQUIRE(proc_obj != 0);
- DBC_REQUIRE(!list_empty(&dev_obj->proc_list));
-
/* Search list for dev_obj: */
list_for_each(cur_elem, &dev_obj->proc_list) {
if ((u32) cur_elem == proc_obj) {
@@ -1069,10 +919,6 @@ static void store_interface_fxns(struct bridge_drv_interface *drv_fxns,
(intf_fxns->pfn = ((drv_fxns->pfn != NULL) ? drv_fxns->pfn : \
(cast)fxn_not_implemented))
- DBC_REQUIRE(intf_fxns != NULL);
- DBC_REQUIRE(drv_fxns != NULL);
- DBC_REQUIRE(MAKEVERSION(drv_fxns->brd_api_major_version,
- drv_fxns->brd_api_minor_version) <= BRD_API_VERSION);
bridge_version = MAKEVERSION(drv_fxns->brd_api_major_version,
drv_fxns->brd_api_minor_version);
intf_fxns->brd_api_major_version = drv_fxns->brd_api_major_version;
@@ -1119,33 +965,5 @@ static void store_interface_fxns(struct bridge_drv_interface *drv_fxns,
STORE_FXN(fxn_msg_setqueueid, msg_set_queue_id);
}
/* Add code for any additional functions in newerBridge versions here */
- /* Ensure postcondition: */
- DBC_ENSURE(intf_fxns->dev_create != NULL);
- DBC_ENSURE(intf_fxns->dev_destroy != NULL);
- DBC_ENSURE(intf_fxns->dev_cntrl != NULL);
- DBC_ENSURE(intf_fxns->brd_monitor != NULL);
- DBC_ENSURE(intf_fxns->brd_start != NULL);
- DBC_ENSURE(intf_fxns->brd_stop != NULL);
- DBC_ENSURE(intf_fxns->brd_status != NULL);
- DBC_ENSURE(intf_fxns->brd_read != NULL);
- DBC_ENSURE(intf_fxns->brd_write != NULL);
- DBC_ENSURE(intf_fxns->chnl_create != NULL);
- DBC_ENSURE(intf_fxns->chnl_destroy != NULL);
- DBC_ENSURE(intf_fxns->chnl_open != NULL);
- DBC_ENSURE(intf_fxns->chnl_close != NULL);
- DBC_ENSURE(intf_fxns->chnl_add_io_req != NULL);
- DBC_ENSURE(intf_fxns->chnl_get_ioc != NULL);
- DBC_ENSURE(intf_fxns->chnl_cancel_io != NULL);
- DBC_ENSURE(intf_fxns->chnl_flush_io != NULL);
- DBC_ENSURE(intf_fxns->chnl_get_info != NULL);
- DBC_ENSURE(intf_fxns->chnl_get_mgr_info != NULL);
- DBC_ENSURE(intf_fxns->chnl_idle != NULL);
- DBC_ENSURE(intf_fxns->chnl_register_notify != NULL);
- DBC_ENSURE(intf_fxns->io_create != NULL);
- DBC_ENSURE(intf_fxns->io_destroy != NULL);
- DBC_ENSURE(intf_fxns->io_on_loaded != NULL);
- DBC_ENSURE(intf_fxns->io_get_proc_load != NULL);
- DBC_ENSURE(intf_fxns->msg_set_queue_id != NULL);
-
#undef STORE_FXN
}
diff --git a/drivers/staging/tidspbridge/pmgr/dmm.c b/drivers/staging/tidspbridge/pmgr/dmm.c
index 8685233d7627..7c9f83916068 100644
--- a/drivers/staging/tidspbridge/pmgr/dmm.c
+++ b/drivers/staging/tidspbridge/pmgr/dmm.c
@@ -28,9 +28,6 @@
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
-/* ----------------------------------- Trace & Debug */
-#include <dspbridge/dbc.h>
-
/* ----------------------------------- OS Adaptation Layer */
#include <dspbridge/sync.h>
@@ -54,8 +51,6 @@ struct dmm_object {
spinlock_t dmm_lock; /* Lock to access dmm mgr */
};
-/* ----------------------------------- Globals */
-static u32 refs; /* module reference count */
struct map_page {
u32 region_size:15;
u32 mapped_size:15;
@@ -123,8 +118,6 @@ int dmm_create(struct dmm_object **dmm_manager,
{
struct dmm_object *dmm_obj = NULL;
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(dmm_manager != NULL);
*dmm_manager = NULL;
/* create, zero, and tag a cmm mgr object */
@@ -149,7 +142,6 @@ int dmm_destroy(struct dmm_object *dmm_mgr)
struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
int status = 0;
- DBC_REQUIRE(refs > 0);
if (dmm_mgr) {
status = dmm_delete_tables(dmm_obj);
if (!status)
@@ -169,7 +161,6 @@ int dmm_delete_tables(struct dmm_object *dmm_mgr)
{
int status = 0;
- DBC_REQUIRE(refs > 0);
/* Delete all DMM tables */
if (dmm_mgr)
vfree(virtual_mapping_table);
@@ -179,19 +170,6 @@ int dmm_delete_tables(struct dmm_object *dmm_mgr)
}
/*
- * ======== dmm_exit ========
- * Purpose:
- * Discontinue usage of module; free resources when reference count
- * reaches 0.
- */
-void dmm_exit(void)
-{
- DBC_REQUIRE(refs > 0);
-
- refs--;
-}
-
-/*
* ======== dmm_get_handle ========
* Purpose:
* Return the dynamic memory manager object for this device.
@@ -202,8 +180,6 @@ int dmm_get_handle(void *hprocessor, struct dmm_object **dmm_manager)
int status = 0;
struct dev_object *hdev_obj;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(dmm_manager != NULL);
if (hprocessor != NULL)
status = proc_get_dev_object(hprocessor, &hdev_obj);
else
@@ -216,28 +192,6 @@ int dmm_get_handle(void *hprocessor, struct dmm_object **dmm_manager)
}
/*
- * ======== dmm_init ========
- * Purpose:
- * Initializes private state of DMM module.
- */
-bool dmm_init(void)
-{
- bool ret = true;
-
- DBC_REQUIRE(refs >= 0);
-
- if (ret)
- refs++;
-
- DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
-
- virtual_mapping_table = NULL;
- table_size = 0;
-
- return ret;
-}
-
-/*
* ======== dmm_map_memory ========
* Purpose:
* Add a mapping block to the reserved chunk. DMM assumes that this block
diff --git a/drivers/staging/tidspbridge/pmgr/dspapi.c b/drivers/staging/tidspbridge/pmgr/dspapi.c
index 767ffe270ed6..9ef1ad9527af 100644
--- a/drivers/staging/tidspbridge/pmgr/dspapi.c
+++ b/drivers/staging/tidspbridge/pmgr/dspapi.c
@@ -24,9 +24,6 @@
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
-/* ----------------------------------- Trace & Debug */
-#include <dspbridge/dbc.h>
-
/* ----------------------------------- OS Adaptation Layer */
#include <dspbridge/ntfy.h>
@@ -266,25 +263,10 @@ err:
*/
void api_exit(void)
{
- DBC_REQUIRE(api_c_refs > 0);
api_c_refs--;
- if (api_c_refs == 0) {
- /* Release all modules initialized in api_init(). */
- cod_exit();
- dev_exit();
- chnl_exit();
- msg_exit();
- io_exit();
- strm_exit();
- disp_exit();
- node_exit();
- proc_exit();
+ if (api_c_refs == 0)
mgr_exit();
- rmm_exit();
- drv_exit();
- }
- DBC_ENSURE(api_c_refs >= 0);
}
/*
@@ -295,64 +277,10 @@ void api_exit(void)
bool api_init(void)
{
bool ret = true;
- bool fdrv, fdev, fcod, fchnl, fmsg, fio;
- bool fmgr, fproc, fnode, fdisp, fstrm, frmm;
-
- if (api_c_refs == 0) {
- /* initialize driver and other modules */
- fdrv = drv_init();
- fmgr = mgr_init();
- fproc = proc_init();
- fnode = node_init();
- fdisp = disp_init();
- fstrm = strm_init();
- frmm = rmm_init();
- fchnl = chnl_init();
- fmsg = msg_mod_init();
- fio = io_init();
- fdev = dev_init();
- fcod = cod_init();
- ret = fdrv && fdev && fchnl && fcod && fmsg && fio;
- ret = ret && fmgr && fproc && frmm;
- if (!ret) {
- if (fdrv)
- drv_exit();
-
- if (fmgr)
- mgr_exit();
-
- if (fstrm)
- strm_exit();
-
- if (fproc)
- proc_exit();
-
- if (fnode)
- node_exit();
-
- if (fdisp)
- disp_exit();
-
- if (fchnl)
- chnl_exit();
-
- if (fmsg)
- msg_exit();
-
- if (fio)
- io_exit();
-
- if (fdev)
- dev_exit();
-
- if (fcod)
- cod_exit();
-
- if (frmm)
- rmm_exit();
- }
- }
+ if (api_c_refs == 0)
+ ret = mgr_init();
+
if (ret)
api_c_refs++;
@@ -382,8 +310,6 @@ int api_init_complete2(void)
struct drv_data *drv_datap;
u8 dev_type;
- DBC_REQUIRE(api_c_refs > 0);
-
/* Walk the list of DevObjects, get each devnode, and attempting to
* autostart the board. Note that this requires COF loading, which
* requires KFILE. */
diff --git a/drivers/staging/tidspbridge/pmgr/io.c b/drivers/staging/tidspbridge/pmgr/io.c
index 65245f310f89..4073c9c672fd 100644
--- a/drivers/staging/tidspbridge/pmgr/io.c
+++ b/drivers/staging/tidspbridge/pmgr/io.c
@@ -23,9 +23,6 @@
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
-/* ----------------------------------- Trace & Debug */
-#include <dspbridge/dbc.h>
-
/* ----------------------------------- Platform Manager */
#include <dspbridge/dev.h>
@@ -33,9 +30,6 @@
#include <ioobj.h>
#include <dspbridge/io.h>
-/* ----------------------------------- Globals */
-static u32 refs;
-
/*
* ======== io_create ========
* Purpose:
@@ -50,10 +44,6 @@ int io_create(struct io_mgr **io_man, struct dev_object *hdev_obj,
struct io_mgr_ *pio_mgr = NULL;
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(io_man != NULL);
- DBC_REQUIRE(mgr_attrts != NULL);
-
*io_man = NULL;
/* A memory base of 0 implies no memory base: */
@@ -94,8 +84,6 @@ int io_destroy(struct io_mgr *hio_mgr)
struct io_mgr_ *pio_mgr = (struct io_mgr_ *)hio_mgr;
int status;
- DBC_REQUIRE(refs > 0);
-
intf_fxns = pio_mgr->intf_fxns;
/* Let Bridge channel module destroy the io_mgr: */
@@ -103,36 +91,3 @@ int io_destroy(struct io_mgr *hio_mgr)
return status;
}
-
-/*
- * ======== io_exit ========
- * Purpose:
- * Discontinue usage of the IO module.
- */
-void io_exit(void)
-{
- DBC_REQUIRE(refs > 0);
-
- refs--;
-
- DBC_ENSURE(refs >= 0);
-}
-
-/*
- * ======== io_init ========
- * Purpose:
- * Initialize the IO module's private state.
- */
-bool io_init(void)
-{
- bool ret = true;
-
- DBC_REQUIRE(refs >= 0);
-
- if (ret)
- refs++;
-
- DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
-
- return ret;
-}
diff --git a/drivers/staging/tidspbridge/pmgr/msg.c b/drivers/staging/tidspbridge/pmgr/msg.c
index a6916039eed6..f093cfb51c00 100644
--- a/drivers/staging/tidspbridge/pmgr/msg.c
+++ b/drivers/staging/tidspbridge/pmgr/msg.c
@@ -23,9 +23,6 @@
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
-/* ----------------------------------- Trace & Debug */
-#include <dspbridge/dbc.h>
-
/* ----------------------------------- Bridge Driver */
#include <dspbridge/dspdefs.h>
@@ -36,9 +33,6 @@
#include <msgobj.h>
#include <dspbridge/msg.h>
-/* ----------------------------------- Globals */
-static u32 refs; /* module reference count */
-
/*
* ======== msg_create ========
* Purpose:
@@ -53,11 +47,6 @@ int msg_create(struct msg_mgr **msg_man,
struct msg_mgr *hmsg_mgr;
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(msg_man != NULL);
- DBC_REQUIRE(msg_callback != NULL);
- DBC_REQUIRE(hdev_obj != NULL);
-
*msg_man = NULL;
dev_get_intf_fxns(hdev_obj, &intf_fxns);
@@ -90,8 +79,6 @@ void msg_delete(struct msg_mgr *hmsg_mgr)
struct msg_mgr_ *msg_mgr_obj = (struct msg_mgr_ *)hmsg_mgr;
struct bridge_drv_interface *intf_fxns;
- DBC_REQUIRE(refs > 0);
-
if (msg_mgr_obj) {
intf_fxns = msg_mgr_obj->intf_fxns;
@@ -102,28 +89,3 @@ void msg_delete(struct msg_mgr *hmsg_mgr)
__func__, hmsg_mgr);
}
}
-
-/*
- * ======== msg_exit ========
- */
-void msg_exit(void)
-{
- DBC_REQUIRE(refs > 0);
- refs--;
-
- DBC_ENSURE(refs >= 0);
-}
-
-/*
- * ======== msg_mod_init ========
- */
-bool msg_mod_init(void)
-{
- DBC_REQUIRE(refs >= 0);
-
- refs++;
-
- DBC_ENSURE(refs >= 0);
-
- return true;
-}
diff --git a/drivers/staging/tidspbridge/rmgr/dbdcd.c b/drivers/staging/tidspbridge/rmgr/dbdcd.c
index fda240214cd6..12a1d34b3954 100644
--- a/drivers/staging/tidspbridge/rmgr/dbdcd.c
+++ b/drivers/staging/tidspbridge/rmgr/dbdcd.c
@@ -29,8 +29,6 @@
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
-/* ----------------------------------- Trace & Debug */
-#include <dspbridge/dbc.h>
/* ----------------------------------- Platform Manager */
#include <dspbridge/cod.h>
@@ -85,8 +83,6 @@ int dcd_auto_register(struct dcd_manager *hdcd_mgr,
{
int status = 0;
- DBC_REQUIRE(refs > 0);
-
if (hdcd_mgr)
status = dcd_get_objects(hdcd_mgr, sz_coff_path,
(dcd_registerfxn) dcd_register_object,
@@ -107,8 +103,6 @@ int dcd_auto_unregister(struct dcd_manager *hdcd_mgr,
{
int status = 0;
- DBC_REQUIRE(refs > 0);
-
if (hdcd_mgr)
status = dcd_get_objects(hdcd_mgr, sz_coff_path,
(dcd_registerfxn) dcd_register_object,
@@ -131,9 +125,6 @@ int dcd_create_manager(char *sz_zl_dll_name,
struct dcd_manager *dcd_mgr_obj = NULL; /* DCD Manager pointer */
int status = 0;
- DBC_REQUIRE(refs >= 0);
- DBC_REQUIRE(dcd_mgr);
-
status = cod_create(&cod_mgr, sz_zl_dll_name);
if (status)
goto func_end;
@@ -156,9 +147,6 @@ int dcd_create_manager(char *sz_zl_dll_name,
cod_delete(cod_mgr);
}
- DBC_ENSURE((!status) ||
- ((dcd_mgr_obj == NULL) && (status == -ENOMEM)));
-
func_end:
return status;
}
@@ -173,8 +161,6 @@ int dcd_destroy_manager(struct dcd_manager *hdcd_mgr)
struct dcd_manager *dcd_mgr_obj = hdcd_mgr;
int status = -EFAULT;
- DBC_REQUIRE(refs >= 0);
-
if (hdcd_mgr) {
/* Delete the COD manager. */
cod_delete(dcd_mgr_obj->cod_mgr);
@@ -205,10 +191,6 @@ int dcd_enumerate_object(s32 index, enum dsp_dcdobjtype obj_type,
struct dcd_key_elem *dcd_key;
int len;
- DBC_REQUIRE(refs >= 0);
- DBC_REQUIRE(index >= 0);
- DBC_REQUIRE(uuid_obj != NULL);
-
if ((index != 0) && (enum_refs == 0)) {
/*
* If an enumeration is being performed on an index greater
@@ -222,7 +204,6 @@ int dcd_enumerate_object(s32 index, enum dsp_dcdobjtype obj_type,
* "_\0" + length of sz_obj_type string + terminating NULL.
*/
dw_key_len = strlen(DCD_REGKEY) + 1 + sizeof(sz_obj_type) + 1;
- DBC_ASSERT(dw_key_len < DCD_MAXPATHLENGTH);
/* Create proper REG key; concatenate DCD_REGKEY with
* obj_type. */
@@ -294,8 +275,6 @@ int dcd_enumerate_object(s32 index, enum dsp_dcdobjtype obj_type,
}
}
- DBC_ENSURE(uuid_obj || (status == -EPERM));
-
return status;
}
@@ -307,11 +286,9 @@ int dcd_enumerate_object(s32 index, enum dsp_dcdobjtype obj_type,
void dcd_exit(void)
{
struct dcd_key_elem *rv, *rv_tmp;
- DBC_REQUIRE(refs > 0);
refs--;
if (refs == 0) {
- cod_exit();
list_for_each_entry_safe(rv, rv_tmp, &reg_key_list, link) {
list_del(&rv->link);
kfree(rv->path);
@@ -319,7 +296,6 @@ void dcd_exit(void)
}
}
- DBC_ENSURE(refs >= 0);
}
/*
@@ -333,12 +309,6 @@ int dcd_get_dep_libs(struct dcd_manager *hdcd_mgr,
{
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(hdcd_mgr);
- DBC_REQUIRE(uuid_obj != NULL);
- DBC_REQUIRE(dep_lib_uuids != NULL);
- DBC_REQUIRE(prstnt_dep_libs != NULL);
-
status =
get_dep_lib_info(hdcd_mgr, uuid_obj, &num_libs, NULL, dep_lib_uuids,
prstnt_dep_libs, phase);
@@ -356,12 +326,6 @@ int dcd_get_num_dep_libs(struct dcd_manager *hdcd_mgr,
{
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(hdcd_mgr);
- DBC_REQUIRE(num_libs != NULL);
- DBC_REQUIRE(num_pers_libs != NULL);
- DBC_REQUIRE(uuid_obj != NULL);
-
status = get_dep_lib_info(hdcd_mgr, uuid_obj, num_libs, num_pers_libs,
NULL, NULL, phase);
@@ -393,10 +357,6 @@ int dcd_get_object_def(struct dcd_manager *hdcd_mgr,
u32 dw_key_len; /* Len of REG key. */
char sz_obj_type[MAX_INT2CHAR_LENGTH]; /* str. rep. of obj_type. */
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(obj_def != NULL);
- DBC_REQUIRE(obj_uuid != NULL);
-
sz_uuid = kzalloc(MAXUUIDLEN, GFP_KERNEL);
if (!sz_uuid) {
status = -ENOMEM;
@@ -411,7 +371,6 @@ int dcd_get_object_def(struct dcd_manager *hdcd_mgr,
/* Pre-determine final key length. It's length of DCD_REGKEY +
* "_\0" + length of sz_obj_type string + terminating NULL */
dw_key_len = strlen(DCD_REGKEY) + 1 + sizeof(sz_obj_type) + 1;
- DBC_ASSERT(dw_key_len < DCD_MAXPATHLENGTH);
/* Create proper REG key; concatenate DCD_REGKEY with obj_type. */
strncpy(sz_reg_key, DCD_REGKEY, strlen(DCD_REGKEY) + 1);
@@ -470,7 +429,6 @@ int dcd_get_object_def(struct dcd_manager *hdcd_mgr,
}
/* Ensure sz_uuid + 1 is not greater than sizeof sz_sect_name. */
- DBC_ASSERT((strlen(sz_uuid) + 1) < sizeof(sz_sect_name));
/* Create section name based on node UUID. A period is
* pre-pended to the UUID string to form the section name.
@@ -553,7 +511,6 @@ int dcd_get_objects(struct dcd_manager *hdcd_mgr,
struct dsp_uuid dsp_uuid_obj;
s32 object_type;
- DBC_REQUIRE(refs > 0);
if (!hdcd_mgr) {
status = -EFAULT;
goto func_end;
@@ -663,11 +620,6 @@ int dcd_get_library_name(struct dcd_manager *hdcd_mgr,
int status = 0;
struct dcd_key_elem *dcd_key = NULL;
- DBC_REQUIRE(uuid_obj != NULL);
- DBC_REQUIRE(str_lib_name != NULL);
- DBC_REQUIRE(buff_size != NULL);
- DBC_REQUIRE(hdcd_mgr);
-
dev_dbg(bridge, "%s: hdcd_mgr %p, uuid_obj %p, str_lib_name %p,"
" buff_size %p\n", __func__, hdcd_mgr, uuid_obj, str_lib_name,
buff_size);
@@ -677,7 +629,6 @@ int dcd_get_library_name(struct dcd_manager *hdcd_mgr,
* "_\0" + length of sz_obj_type string + terminating NULL.
*/
dw_key_len = strlen(DCD_REGKEY) + 1 + sizeof(sz_obj_type) + 1;
- DBC_ASSERT(dw_key_len < DCD_MAXPATHLENGTH);
/* Create proper REG key; concatenate DCD_REGKEY with obj_type. */
strncpy(sz_reg_key, DCD_REGKEY, strlen(DCD_REGKEY) + 1);
@@ -705,7 +656,6 @@ int dcd_get_library_name(struct dcd_manager *hdcd_mgr,
break;
default:
status = -EINVAL;
- DBC_ASSERT(false);
}
if (!status) {
if ((strlen(sz_reg_key) + strlen(sz_obj_type)) <
@@ -787,30 +737,14 @@ int dcd_get_library_name(struct dcd_manager *hdcd_mgr,
*/
bool dcd_init(void)
{
- bool init_cod;
bool ret = true;
- DBC_REQUIRE(refs >= 0);
-
- if (refs == 0) {
- /* Initialize required modules. */
- init_cod = cod_init();
-
- if (!init_cod) {
- ret = false;
- /* Exit initialized modules. */
- if (init_cod)
- cod_exit();
- }
-
+ if (refs == 0)
INIT_LIST_HEAD(&reg_key_list);
- }
if (ret)
refs++;
- DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs == 0)));
-
return ret;
}
@@ -832,15 +766,6 @@ int dcd_register_object(struct dsp_uuid *uuid_obj,
char sz_obj_type[MAX_INT2CHAR_LENGTH]; /* str. rep. of obj_type. */
struct dcd_key_elem *dcd_key = NULL;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(uuid_obj != NULL);
- DBC_REQUIRE((obj_type == DSP_DCDNODETYPE) ||
- (obj_type == DSP_DCDPROCESSORTYPE) ||
- (obj_type == DSP_DCDLIBRARYTYPE) ||
- (obj_type == DSP_DCDCREATELIBTYPE) ||
- (obj_type == DSP_DCDEXECUTELIBTYPE) ||
- (obj_type == DSP_DCDDELETELIBTYPE));
-
dev_dbg(bridge, "%s: object UUID %p, obj_type %d, szPathName %s\n",
__func__, uuid_obj, obj_type, psz_path_name);
@@ -849,7 +774,6 @@ int dcd_register_object(struct dsp_uuid *uuid_obj,
* "_\0" + length of sz_obj_type string + terminating NULL.
*/
dw_key_len = strlen(DCD_REGKEY) + 1 + sizeof(sz_obj_type) + 1;
- DBC_ASSERT(dw_key_len < DCD_MAXPATHLENGTH);
/* Create proper REG key; concatenate DCD_REGKEY with obj_type. */
strncpy(sz_reg_key, DCD_REGKEY, strlen(DCD_REGKEY) + 1);
@@ -987,15 +911,6 @@ int dcd_unregister_object(struct dsp_uuid *uuid_obj,
{
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(uuid_obj != NULL);
- DBC_REQUIRE((obj_type == DSP_DCDNODETYPE) ||
- (obj_type == DSP_DCDPROCESSORTYPE) ||
- (obj_type == DSP_DCDLIBRARYTYPE) ||
- (obj_type == DSP_DCDCREATELIBTYPE) ||
- (obj_type == DSP_DCDEXECUTELIBTYPE) ||
- (obj_type == DSP_DCDDELETELIBTYPE));
-
/*
* When dcd_register_object is called with NULL as pathname,
* it indicates an unregister object operation.
@@ -1055,12 +970,6 @@ static int get_attrs_from_buf(char *psz_buf, u32 ul_buf_size,
s32 entry_id;
#endif
- DBC_REQUIRE(psz_buf != NULL);
- DBC_REQUIRE(ul_buf_size != 0);
- DBC_REQUIRE((obj_type == DSP_DCDNODETYPE)
- || (obj_type == DSP_DCDPROCESSORTYPE));
- DBC_REQUIRE(gen_obj != NULL);
-
switch (obj_type) {
case DSP_DCDNODETYPE:
/*
@@ -1082,7 +991,6 @@ static int get_attrs_from_buf(char *psz_buf, u32 ul_buf_size,
token = strsep(&psz_cur, seps);
/* ac_name */
- DBC_REQUIRE(token);
token_len = strlen(token);
if (token_len > DSP_MAXNAMELEN - 1)
token_len = DSP_MAXNAMELEN - 1;
@@ -1167,7 +1075,6 @@ static int get_attrs_from_buf(char *psz_buf, u32 ul_buf_size,
token = strsep(&psz_cur, seps);
/* char *str_create_phase_fxn */
- DBC_REQUIRE(token);
token_len = strlen(token);
gen_obj->obj_data.node_obj.str_create_phase_fxn =
kzalloc(token_len + 1, GFP_KERNEL);
@@ -1178,7 +1085,6 @@ static int get_attrs_from_buf(char *psz_buf, u32 ul_buf_size,
token = strsep(&psz_cur, seps);
/* char *str_execute_phase_fxn */
- DBC_REQUIRE(token);
token_len = strlen(token);
gen_obj->obj_data.node_obj.str_execute_phase_fxn =
kzalloc(token_len + 1, GFP_KERNEL);
@@ -1189,7 +1095,6 @@ static int get_attrs_from_buf(char *psz_buf, u32 ul_buf_size,
token = strsep(&psz_cur, seps);
/* char *str_delete_phase_fxn */
- DBC_REQUIRE(token);
token_len = strlen(token);
gen_obj->obj_data.node_obj.str_delete_phase_fxn =
kzalloc(token_len + 1, GFP_KERNEL);
@@ -1421,12 +1326,6 @@ static int get_dep_lib_info(struct dcd_manager *hdcd_mgr,
u16 dep_libs = 0;
int status = 0;
- DBC_REQUIRE(refs > 0);
-
- DBC_REQUIRE(hdcd_mgr);
- DBC_REQUIRE(num_libs != NULL);
- DBC_REQUIRE(uuid_obj != NULL);
-
/* Initialize to 0 dependent libraries, if only counting number of
* dependent libraries */
if (!get_uuids) {
diff --git a/drivers/staging/tidspbridge/rmgr/disp.c b/drivers/staging/tidspbridge/rmgr/disp.c
index a9aa22f3b4f6..4af51b75aeab 100644
--- a/drivers/staging/tidspbridge/rmgr/disp.c
+++ b/drivers/staging/tidspbridge/rmgr/disp.c
@@ -24,9 +24,6 @@
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
-/* ----------------------------------- Trace & Debug */
-#include <dspbridge/dbc.h>
-
/* ----------------------------------- OS Adaptation Layer */
#include <dspbridge/sync.h>
@@ -72,8 +69,6 @@ struct disp_object {
u32 data_mau_size; /* Size of DSP Data MAU */
};
-static u32 refs;
-
static void delete_disp(struct disp_object *disp_obj);
static int fill_stream_def(rms_word *pdw_buf, u32 *ptotal, u32 offset,
struct node_strmdef strm_def, u32 max,
@@ -96,11 +91,6 @@ int disp_create(struct disp_object **dispatch_obj,
int status = 0;
u8 dev_type;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(dispatch_obj != NULL);
- DBC_REQUIRE(disp_attrs != NULL);
- DBC_REQUIRE(hdev_obj != NULL);
-
*dispatch_obj = NULL;
/* Allocate Node Dispatcher object */
@@ -168,8 +158,6 @@ func_cont:
else
delete_disp(disp_obj);
- DBC_ENSURE((status && *dispatch_obj == NULL) ||
- (!status && *dispatch_obj));
return status;
}
@@ -179,43 +167,10 @@ func_cont:
*/
void disp_delete(struct disp_object *disp_obj)
{
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(disp_obj);
-
delete_disp(disp_obj);
}
/*
- * ======== disp_exit ========
- * Discontinue usage of DISP module.
- */
-void disp_exit(void)
-{
- DBC_REQUIRE(refs > 0);
-
- refs--;
-
- DBC_ENSURE(refs >= 0);
-}
-
-/*
- * ======== disp_init ========
- * Initialize the DISP module.
- */
-bool disp_init(void)
-{
- bool ret = true;
-
- DBC_REQUIRE(refs >= 0);
-
- if (ret)
- refs++;
-
- DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
- return ret;
-}
-
-/*
* ======== disp_node_change_priority ========
* Change the priority of a node currently running on the target.
*/
@@ -227,10 +182,6 @@ int disp_node_change_priority(struct disp_object *disp_obj,
struct rms_command *rms_cmd;
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(disp_obj);
- DBC_REQUIRE(hnode != NULL);
-
/* Send message to RMS to change priority */
rms_cmd = (struct rms_command *)(disp_obj->buf);
rms_cmd->fxn = (rms_word) (rms_fxn);
@@ -276,12 +227,6 @@ int disp_node_create(struct disp_object *disp_obj,
struct dsp_nodeinfo node_info;
u8 dev_type;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(disp_obj);
- DBC_REQUIRE(hnode != NULL);
- DBC_REQUIRE(node_get_type(hnode) != NODE_DEVICE);
- DBC_REQUIRE(node_env != NULL);
-
status = dev_get_dev_type(disp_obj->dev_obj, &dev_type);
if (status)
@@ -292,11 +237,9 @@ int disp_node_create(struct disp_object *disp_obj,
__func__, dev_type);
goto func_end;
}
- DBC_REQUIRE(pargs != NULL);
node_type = node_get_type(hnode);
node_msg_args = pargs->asa.node_msg_args;
max = disp_obj->bufsize_rms; /*Max # of RMS words that can be sent */
- DBC_ASSERT(max == RMS_COMMANDBUFSIZE);
chars_in_rms_word = sizeof(rms_word) / disp_obj->char_size;
/* Number of RMS words needed to hold arg data */
dw_length =
@@ -457,7 +400,6 @@ int disp_node_create(struct disp_object *disp_obj,
}
if (!status) {
ul_bytes = total * sizeof(rms_word);
- DBC_ASSERT(ul_bytes < (RMS_COMMANDBUFSIZE * sizeof(rms_word)));
status = send_message(disp_obj, node_get_timeout(hnode),
ul_bytes, node_env);
}
@@ -480,10 +422,6 @@ int disp_node_delete(struct disp_object *disp_obj,
int status = 0;
u8 dev_type;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(disp_obj);
- DBC_REQUIRE(hnode != NULL);
-
status = dev_get_dev_type(disp_obj->dev_obj, &dev_type);
if (!status) {
@@ -521,9 +459,6 @@ int disp_node_run(struct disp_object *disp_obj,
struct rms_command *rms_cmd;
int status = 0;
u8 dev_type;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(disp_obj);
- DBC_REQUIRE(hnode != NULL);
status = dev_get_dev_type(disp_obj->dev_obj, &dev_type);
@@ -620,7 +555,6 @@ static int fill_stream_def(rms_word *pdw_buf, u32 *ptotal, u32 offset,
* 1 from total.
*/
total += sizeof(struct rms_strm_def) / sizeof(rms_word) - 1;
- DBC_REQUIRE(strm_def.sz_device);
dw_length = strlen(strm_def.sz_device) + 1;
/* Number of RMS_WORDS needed to hold device name */
@@ -659,8 +593,6 @@ static int send_message(struct disp_object *disp_obj, u32 timeout,
struct chnl_ioc chnl_ioc_obj;
int status = 0;
- DBC_REQUIRE(pdw_arg != NULL);
-
*pdw_arg = (u32) NULL;
intf_fxns = disp_obj->intf_fxns;
chnl_obj = disp_obj->chnl_to_dsp;
@@ -703,7 +635,6 @@ static int send_message(struct disp_object *disp_obj, u32 timeout,
status = -EPERM;
} else {
if (CHNL_IS_IO_COMPLETE(chnl_ioc_obj)) {
- DBC_ASSERT(chnl_ioc_obj.buf == pbuf);
if (*((int *)chnl_ioc_obj.buf) < 0) {
/* Translate DSP's to kernel error */
status = -EREMOTEIO;
diff --git a/drivers/staging/tidspbridge/rmgr/drv.c b/drivers/staging/tidspbridge/rmgr/drv.c
index db8215f540d8..6795205b0155 100644
--- a/drivers/staging/tidspbridge/rmgr/drv.c
+++ b/drivers/staging/tidspbridge/rmgr/drv.c
@@ -24,9 +24,6 @@
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
-/* ----------------------------------- Trace & Debug */
-#include <dspbridge/dbc.h>
-
/* ----------------------------------- This */
#include <dspbridge/drv.h>
#include <dspbridge/dev.h>
@@ -54,7 +51,6 @@ struct drv_ext {
};
/* ----------------------------------- Globals */
-static s32 refs;
static bool ext_phys_mem_pool_enabled;
struct ext_phys_mem_pool {
u32 phys_mem_base;
@@ -172,7 +168,6 @@ void drv_proc_node_update_status(void *node_resource, s32 status)
{
struct node_res_object *node_res_obj =
(struct node_res_object *)node_resource;
- DBC_ASSERT(node_resource != NULL);
node_res_obj->node_allocated = status;
}
@@ -181,7 +176,6 @@ void drv_proc_node_update_heap_status(void *node_resource, s32 status)
{
struct node_res_object *node_res_obj =
(struct node_res_object *)node_resource;
- DBC_ASSERT(node_resource != NULL);
node_res_obj->heap_allocated = status;
}
@@ -308,9 +302,6 @@ int drv_create(struct drv_object **drv_obj)
struct drv_object *pdrv_object = NULL;
struct drv_data *drv_datap = dev_get_drvdata(bridge);
- DBC_REQUIRE(drv_obj != NULL);
- DBC_REQUIRE(refs > 0);
-
pdrv_object = kzalloc(sizeof(struct drv_object), GFP_KERNEL);
if (pdrv_object) {
/* Create and Initialize List of device objects */
@@ -336,25 +327,10 @@ int drv_create(struct drv_object **drv_obj)
kfree(pdrv_object);
}
- DBC_ENSURE(status || pdrv_object);
return status;
}
/*
- * ======== drv_exit ========
- * Purpose:
- * Discontinue usage of the DRV module.
- */
-void drv_exit(void)
-{
- DBC_REQUIRE(refs > 0);
-
- refs--;
-
- DBC_ENSURE(refs >= 0);
-}
-
-/*
* ======== = drv_destroy ======== =
* purpose:
* Invoked during bridge de-initialization
@@ -365,9 +341,6 @@ int drv_destroy(struct drv_object *driver_obj)
struct drv_object *pdrv_object = (struct drv_object *)driver_obj;
struct drv_data *drv_datap = dev_get_drvdata(bridge);
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(pdrv_object);
-
kfree(pdrv_object);
/* Update the DRV Object in the driver data */
if (drv_datap) {
@@ -389,17 +362,8 @@ int drv_get_dev_object(u32 index, struct drv_object *hdrv_obj,
struct dev_object **device_obj)
{
int status = 0;
-#ifdef CONFIG_TIDSPBRIDGE_DEBUG
- /* used only for Assertions and debug messages */
- struct drv_object *pdrv_obj = (struct drv_object *)hdrv_obj;
-#endif
struct dev_object *dev_obj;
u32 i;
- DBC_REQUIRE(pdrv_obj);
- DBC_REQUIRE(device_obj != NULL);
- DBC_REQUIRE(index >= 0);
- DBC_REQUIRE(refs > 0);
- DBC_ASSERT(!(list_empty(&pdrv_obj->dev_list)));
dev_obj = (struct dev_object *)drv_get_first_dev_object();
for (i = 0; i < index; i++) {
@@ -524,25 +488,6 @@ u32 drv_get_next_dev_extension(u32 dev_extension)
}
/*
- * ======== drv_init ========
- * Purpose:
- * Initialize DRV module private state.
- */
-int drv_init(void)
-{
- s32 ret = 1; /* function return value */
-
- DBC_REQUIRE(refs >= 0);
-
- if (ret)
- refs++;
-
- DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
-
- return ret;
-}
-
-/*
* ======== drv_insert_dev_object ========
* Purpose:
* Insert a DevObject into the list of Manager object.
@@ -552,10 +497,6 @@ int drv_insert_dev_object(struct drv_object *driver_obj,
{
struct drv_object *pdrv_object = (struct drv_object *)driver_obj;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(hdev_obj != NULL);
- DBC_REQUIRE(pdrv_object);
-
list_add_tail((struct list_head *)hdev_obj, &pdrv_object->dev_list);
return 0;
@@ -574,12 +515,6 @@ int drv_remove_dev_object(struct drv_object *driver_obj,
struct drv_object *pdrv_object = (struct drv_object *)driver_obj;
struct list_head *cur_elem;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(pdrv_object);
- DBC_REQUIRE(hdev_obj != NULL);
-
- DBC_REQUIRE(!list_empty(&pdrv_object->dev_list));
-
/* Search list for p_proc_object: */
list_for_each(cur_elem, &pdrv_object->dev_list) {
/* If found, remove it. */
@@ -605,9 +540,6 @@ int drv_request_resources(u32 dw_context, u32 *dev_node_strg)
struct drv_ext *pszdev_node;
struct drv_data *drv_datap = dev_get_drvdata(bridge);
- DBC_REQUIRE(dw_context != 0);
- DBC_REQUIRE(dev_node_strg != NULL);
-
/*
* Allocate memory to hold the string. This will live until
* it is freed in the Release resources. Update the driver object
@@ -639,10 +571,6 @@ int drv_request_resources(u32 dw_context, u32 *dev_node_strg)
*dev_node_strg = 0;
}
- DBC_ENSURE((!status && dev_node_strg != NULL &&
- !list_empty(&pdrv_object->dev_node_string)) ||
- (status && *dev_node_strg == 0));
-
return status;
}
@@ -900,8 +828,6 @@ void *mem_alloc_phys_mem(u32 byte_size, u32 align_mask,
void mem_free_phys_mem(void *virtual_address, u32 physical_address,
u32 byte_size)
{
- DBC_REQUIRE(virtual_address != NULL);
-
if (!ext_phys_mem_pool_enabled)
dma_free_coherent(NULL, byte_size, virtual_address,
physical_address);
diff --git a/drivers/staging/tidspbridge/rmgr/drv_interface.c b/drivers/staging/tidspbridge/rmgr/drv_interface.c
index 385740bad0de..3cac01492063 100644
--- a/drivers/staging/tidspbridge/rmgr/drv_interface.c
+++ b/drivers/staging/tidspbridge/rmgr/drv_interface.c
@@ -16,11 +16,8 @@
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/
-/* ----------------------------------- Host OS */
-
#include <plat/dsp.h>
-#include <dspbridge/host_os.h>
#include <linux/types.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
@@ -33,36 +30,25 @@
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
-/* ----------------------------------- Trace & Debug */
-#include <dspbridge/dbc.h>
-
/* ----------------------------------- OS Adaptation Layer */
#include <dspbridge/clk.h>
-#include <dspbridge/sync.h>
/* ----------------------------------- Platform Manager */
-#include <dspbridge/dspapi-ioctl.h>
#include <dspbridge/dspapi.h>
#include <dspbridge/dspdrv.h>
/* ----------------------------------- Resource Manager */
#include <dspbridge/pwr.h>
-/* ----------------------------------- This */
-#include <drv_interface.h>
-
#include <dspbridge/resourcecleanup.h>
-#include <dspbridge/chnl.h>
#include <dspbridge/proc.h>
#include <dspbridge/dev.h>
-#include <dspbridge/drv.h>
#ifdef CONFIG_TIDSPBRIDGE_DVFS
#include <mach-omap2/omap3-opp.h>
#endif
/* ----------------------------------- Globals */
-#define DRIVER_NAME "DspBridge"
#define DSPBRIDGE_VERSION "0.3"
s32 dsp_debug;
@@ -131,7 +117,166 @@ MODULE_AUTHOR("Texas Instruments");
MODULE_LICENSE("GPL");
MODULE_VERSION(DSPBRIDGE_VERSION);
-static char *driver_name = DRIVER_NAME;
+/*
+ * This function is called when an application opens handle to the
+ * bridge driver.
+ */
+static int bridge_open(struct inode *ip, struct file *filp)
+{
+ int status = 0;
+ struct process_context *pr_ctxt = NULL;
+
+ /*
+ * Allocate a new process context and insert it into global
+ * process context list.
+ */
+
+#ifdef CONFIG_TIDSPBRIDGE_RECOVERY
+ if (recover) {
+ if (filp->f_flags & O_NONBLOCK ||
+ wait_for_completion_interruptible(&bridge_open_comp))
+ return -EBUSY;
+ }
+#endif
+ pr_ctxt = kzalloc(sizeof(struct process_context), GFP_KERNEL);
+ if (!pr_ctxt)
+ return -ENOMEM;
+
+ pr_ctxt->res_state = PROC_RES_ALLOCATED;
+ spin_lock_init(&pr_ctxt->dmm_map_lock);
+ INIT_LIST_HEAD(&pr_ctxt->dmm_map_list);
+ spin_lock_init(&pr_ctxt->dmm_rsv_lock);
+ INIT_LIST_HEAD(&pr_ctxt->dmm_rsv_list);
+
+ pr_ctxt->node_id = kzalloc(sizeof(struct idr), GFP_KERNEL);
+ if (!pr_ctxt->node_id) {
+ status = -ENOMEM;
+ goto err1;
+ }
+
+ idr_init(pr_ctxt->node_id);
+
+ pr_ctxt->stream_id = kzalloc(sizeof(struct idr), GFP_KERNEL);
+ if (!pr_ctxt->stream_id) {
+ status = -ENOMEM;
+ goto err2;
+ }
+
+ idr_init(pr_ctxt->stream_id);
+
+ filp->private_data = pr_ctxt;
+
+#ifdef CONFIG_TIDSPBRIDGE_RECOVERY
+ atomic_inc(&bridge_cref);
+#endif
+ return 0;
+
+err2:
+ kfree(pr_ctxt->node_id);
+err1:
+ kfree(pr_ctxt);
+ return status;
+}
+
+/*
+ * This function is called when an application closes handle to the bridge
+ * driver.
+ */
+static int bridge_release(struct inode *ip, struct file *filp)
+{
+ int status = 0;
+ struct process_context *pr_ctxt;
+
+ if (!filp->private_data) {
+ status = -EIO;
+ goto err;
+ }
+
+ pr_ctxt = filp->private_data;
+ flush_signals(current);
+ drv_remove_all_resources(pr_ctxt);
+ proc_detach(pr_ctxt);
+ kfree(pr_ctxt->node_id);
+ kfree(pr_ctxt->stream_id);
+ kfree(pr_ctxt);
+
+ filp->private_data = NULL;
+
+err:
+#ifdef CONFIG_TIDSPBRIDGE_RECOVERY
+ if (!atomic_dec_return(&bridge_cref))
+ complete(&bridge_comp);
+#endif
+ return status;
+}
+
+/* This function provides IO interface to the bridge driver. */
+static long bridge_ioctl(struct file *filp, unsigned int code,
+ unsigned long args)
+{
+ int status;
+ u32 retval = 0;
+ union trapped_args buf_in;
+
+#ifdef CONFIG_TIDSPBRIDGE_RECOVERY
+ if (recover) {
+ status = -EIO;
+ goto err;
+ }
+#endif
+#ifdef CONFIG_PM
+ status = omap34_xxbridge_suspend_lockout(&bridge_suspend_data, filp);
+ if (status != 0)
+ return status;
+#endif
+
+ if (!filp->private_data) {
+ status = -EIO;
+ goto err;
+ }
+
+ status = copy_from_user(&buf_in, (union trapped_args *)args,
+ sizeof(union trapped_args));
+
+ if (!status) {
+ status = api_call_dev_ioctl(code, &buf_in, &retval,
+ filp->private_data);
+
+ if (!status) {
+ status = retval;
+ } else {
+ dev_dbg(bridge, "%s: IOCTL Failed, code: 0x%x "
+ "status 0x%x\n", __func__, code, status);
+ status = -1;
+ }
+
+ }
+
+err:
+ return status;
+}
+
+/* This function maps kernel space memory to user space memory. */
+static int bridge_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ u32 status;
+
+ vma->vm_flags |= VM_RESERVED | VM_IO;
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ dev_dbg(bridge, "%s: vm filp %p start %lx end %lx page_prot %ulx "
+ "flags %lx\n", __func__, filp,
+ vma->vm_start, vma->vm_end, vma->vm_page_prot,
+ vma->vm_flags);
+
+ status = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+ if (status != 0)
+ status = -EAGAIN;
+
+ return status;
+}
static const struct file_operations bridge_fops = {
.open = bridge_open,
@@ -211,10 +356,10 @@ void bridge_recover_schedule(void)
#endif
#ifdef CONFIG_TIDSPBRIDGE_DVFS
static int dspbridge_scale_notification(struct notifier_block *op,
- unsigned long val, void *ptr)
+ unsigned long val, void *ptr)
{
struct omap_dsp_platform_data *pdata =
- omap_dspbridge_dev->dev.platform_data;
+ omap_dspbridge_dev->dev.platform_data;
if (CPUFREQ_POSTCHANGE == val && pdata->dsp_get_opp)
pwr_pm_post_scale(PRCM_VDD1, pdata->dsp_get_opp());
@@ -319,7 +464,7 @@ err2:
err1:
#ifdef CONFIG_TIDSPBRIDGE_DVFS
cpufreq_unregister_notifier(&iva_clk_notifier,
- CPUFREQ_TRANSITION_NOTIFIER);
+ CPUFREQ_TRANSITION_NOTIFIER);
#endif
dsp_clk_exit();
@@ -345,7 +490,7 @@ static int __devinit omap34_xx_bridge_probe(struct platform_device *pdev)
goto err1;
/* use 2.6 device model */
- err = alloc_chrdev_region(&dev, 0, 1, driver_name);
+ err = alloc_chrdev_region(&dev, 0, 1, "DspBridge");
if (err) {
pr_err("%s: Can't get major %d\n", __func__, driver_major);
goto err1;
@@ -385,7 +530,6 @@ err1:
static int __devexit omap34_xx_bridge_remove(struct platform_device *pdev)
{
dev_t devno;
- bool ret;
int status = 0;
struct drv_data *drv_datap = dev_get_drvdata(bridge);
@@ -398,16 +542,15 @@ static int __devexit omap34_xx_bridge_remove(struct platform_device *pdev)
#ifdef CONFIG_TIDSPBRIDGE_DVFS
if (cpufreq_unregister_notifier(&iva_clk_notifier,
- CPUFREQ_TRANSITION_NOTIFIER))
+ CPUFREQ_TRANSITION_NOTIFIER))
pr_err("%s: cpufreq_unregister_notifier failed for iva2_ck\n",
__func__);
#endif /* #ifdef CONFIG_TIDSPBRIDGE_DVFS */
if (driver_context) {
/* Put the DSP in reset state */
- ret = dsp_deinit(driver_context);
+ dsp_deinit(driver_context);
driver_context = 0;
- DBC_ASSERT(ret == true);
}
kfree(drv_datap);
@@ -431,7 +574,7 @@ func_cont:
}
#ifdef CONFIG_PM
-static int BRIDGE_SUSPEND(struct platform_device *pdev, pm_message_t state)
+static int bridge_suspend(struct platform_device *pdev, pm_message_t state)
{
u32 status;
u32 command = PWR_EMERGENCYDEEPSLEEP;
@@ -444,7 +587,7 @@ static int BRIDGE_SUSPEND(struct platform_device *pdev, pm_message_t state)
return 0;
}
-static int BRIDGE_RESUME(struct platform_device *pdev)
+static int bridge_resume(struct platform_device *pdev)
{
u32 status;
@@ -456,9 +599,6 @@ static int BRIDGE_RESUME(struct platform_device *pdev)
wake_up(&bridge_suspend_data.suspend_wq);
return 0;
}
-#else
-#define BRIDGE_SUSPEND NULL
-#define BRIDGE_RESUME NULL
#endif
static struct platform_driver bridge_driver = {
@@ -467,8 +607,10 @@ static struct platform_driver bridge_driver = {
},
.probe = omap34_xx_bridge_probe,
.remove = __devexit_p(omap34_xx_bridge_remove),
- .suspend = BRIDGE_SUSPEND,
- .resume = BRIDGE_RESUME,
+#ifdef CONFIG_PM
+ .suspend = bridge_suspend,
+ .resume = bridge_resume,
+#endif
};
static int __init bridge_init(void)
@@ -481,170 +623,6 @@ static void __exit bridge_exit(void)
platform_driver_unregister(&bridge_driver);
}
-/*
- * This function is called when an application opens handle to the
- * bridge driver.
- */
-static int bridge_open(struct inode *ip, struct file *filp)
-{
- int status = 0;
- struct process_context *pr_ctxt = NULL;
-
- /*
- * Allocate a new process context and insert it into global
- * process context list.
- */
-
-#ifdef CONFIG_TIDSPBRIDGE_RECOVERY
- if (recover) {
- if (filp->f_flags & O_NONBLOCK ||
- wait_for_completion_interruptible(&bridge_open_comp))
- return -EBUSY;
- }
-#endif
- pr_ctxt = kzalloc(sizeof(struct process_context), GFP_KERNEL);
- if (!pr_ctxt)
- return -ENOMEM;
-
- pr_ctxt->res_state = PROC_RES_ALLOCATED;
- spin_lock_init(&pr_ctxt->dmm_map_lock);
- INIT_LIST_HEAD(&pr_ctxt->dmm_map_list);
- spin_lock_init(&pr_ctxt->dmm_rsv_lock);
- INIT_LIST_HEAD(&pr_ctxt->dmm_rsv_list);
-
- pr_ctxt->node_id = kzalloc(sizeof(struct idr), GFP_KERNEL);
- if (!pr_ctxt->node_id) {
- status = -ENOMEM;
- goto err1;
- }
-
- idr_init(pr_ctxt->node_id);
-
- pr_ctxt->stream_id = kzalloc(sizeof(struct idr), GFP_KERNEL);
- if (!pr_ctxt->stream_id) {
- status = -ENOMEM;
- goto err2;
- }
-
- idr_init(pr_ctxt->stream_id);
-
- filp->private_data = pr_ctxt;
-
-#ifdef CONFIG_TIDSPBRIDGE_RECOVERY
- atomic_inc(&bridge_cref);
-#endif
- return 0;
-
-err2:
- kfree(pr_ctxt->node_id);
-err1:
- kfree(pr_ctxt);
- return status;
-}
-
-/*
- * This function is called when an application closes handle to the bridge
- * driver.
- */
-static int bridge_release(struct inode *ip, struct file *filp)
-{
- int status = 0;
- struct process_context *pr_ctxt;
-
- if (!filp->private_data) {
- status = -EIO;
- goto err;
- }
-
- pr_ctxt = filp->private_data;
- flush_signals(current);
- drv_remove_all_resources(pr_ctxt);
- proc_detach(pr_ctxt);
- kfree(pr_ctxt->node_id);
- kfree(pr_ctxt->stream_id);
- kfree(pr_ctxt);
-
- filp->private_data = NULL;
-
-err:
-#ifdef CONFIG_TIDSPBRIDGE_RECOVERY
- if (!atomic_dec_return(&bridge_cref))
- complete(&bridge_comp);
-#endif
- return status;
-}
-
-/* This function provides IO interface to the bridge driver. */
-static long bridge_ioctl(struct file *filp, unsigned int code,
- unsigned long args)
-{
- int status;
- u32 retval = 0;
- union trapped_args buf_in;
-
- DBC_REQUIRE(filp != NULL);
-#ifdef CONFIG_TIDSPBRIDGE_RECOVERY
- if (recover) {
- status = -EIO;
- goto err;
- }
-#endif
-#ifdef CONFIG_PM
- status = omap34_xxbridge_suspend_lockout(&bridge_suspend_data, filp);
- if (status != 0)
- return status;
-#endif
-
- if (!filp->private_data) {
- status = -EIO;
- goto err;
- }
-
- status = copy_from_user(&buf_in, (union trapped_args *)args,
- sizeof(union trapped_args));
-
- if (!status) {
- status = api_call_dev_ioctl(code, &buf_in, &retval,
- filp->private_data);
-
- if (!status) {
- status = retval;
- } else {
- dev_dbg(bridge, "%s: IOCTL Failed, code: 0x%x "
- "status 0x%x\n", __func__, code, status);
- status = -1;
- }
-
- }
-
-err:
- return status;
-}
-
-/* This function maps kernel space memory to user space memory. */
-static int bridge_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- u32 offset = vma->vm_pgoff << PAGE_SHIFT;
- u32 status;
-
- DBC_ASSERT(vma->vm_start < vma->vm_end);
-
- vma->vm_flags |= VM_RESERVED | VM_IO;
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-
- dev_dbg(bridge, "%s: vm filp %p offset %x start %lx end %lx page_prot "
- "%lx flags %lx\n", __func__, filp, offset,
- vma->vm_start, vma->vm_end, vma->vm_page_prot, vma->vm_flags);
-
- status = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
- vma->vm_end - vma->vm_start,
- vma->vm_page_prot);
- if (status != 0)
- status = -EAGAIN;
-
- return status;
-}
-
/* To remove all process resources before removing the process from the
* process context list */
int drv_remove_all_resources(void *process_ctxt)
diff --git a/drivers/staging/tidspbridge/rmgr/drv_interface.h b/drivers/staging/tidspbridge/rmgr/drv_interface.h
deleted file mode 100644
index ab070602adc2..000000000000
--- a/drivers/staging/tidspbridge/rmgr/drv_interface.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * drv_interface.h
- *
- * DSP-BIOS Bridge driver support functions for TI OMAP processors.
- *
- * Copyright (C) 2005-2006 Texas Instruments, Inc.
- *
- * This package is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
- * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
- */
-
-#ifndef _DRV_INTERFACE_H_
-#define _DRV_INTERFACE_H_
-
-/* Prototypes for all functions in this bridge */
-static int __init bridge_init(void); /* Initialize bridge */
-static void __exit bridge_exit(void); /* Opposite of initialize */
-static int bridge_open(struct inode *ip, struct file *filp); /* Open */
-static int bridge_release(struct inode *ip, struct file *filp); /* Release */
-static long bridge_ioctl(struct file *filp, unsigned int code,
- unsigned long args);
-static int bridge_mmap(struct file *filp, struct vm_area_struct *vma);
-#endif /* ifndef _DRV_INTERFACE_H_ */
diff --git a/drivers/staging/tidspbridge/rmgr/dspdrv.c b/drivers/staging/tidspbridge/rmgr/dspdrv.c
index 7a6fc737872c..dc767b183cdf 100644
--- a/drivers/staging/tidspbridge/rmgr/dspdrv.c
+++ b/drivers/staging/tidspbridge/rmgr/dspdrv.c
@@ -23,9 +23,6 @@
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
-/* ----------------------------------- Trace & Debug */
-#include <dspbridge/dbc.h>
-
/* ----------------------------------- Platform Manager */
#include <dspbridge/drv.h>
#include <dspbridge/dev.h>
@@ -102,8 +99,6 @@ func_cont:
} else {
dev_dbg(bridge, "%s: Failed\n", __func__);
} /* End api_init_complete2 */
- DBC_ENSURE((!status && drv_obj != NULL) ||
- (status && drv_obj == NULL));
*init_status = status;
/* Return the Driver Object */
return (u32) drv_obj;
diff --git a/drivers/staging/tidspbridge/rmgr/mgr.c b/drivers/staging/tidspbridge/rmgr/mgr.c
index d635c01c015e..8a1e9287cff6 100644
--- a/drivers/staging/tidspbridge/rmgr/mgr.c
+++ b/drivers/staging/tidspbridge/rmgr/mgr.c
@@ -26,9 +26,6 @@
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
-/* ----------------------------------- Trace & Debug */
-#include <dspbridge/dbc.h>
-
/* ----------------------------------- OS Adaptation Layer */
#include <dspbridge/sync.h>
@@ -62,9 +59,6 @@ int mgr_create(struct mgr_object **mgr_obj,
struct mgr_object *pmgr_obj = NULL;
struct drv_data *drv_datap = dev_get_drvdata(bridge);
- DBC_REQUIRE(mgr_obj != NULL);
- DBC_REQUIRE(refs > 0);
-
pmgr_obj = kzalloc(sizeof(struct mgr_object), GFP_KERNEL);
if (pmgr_obj) {
status = dcd_create_manager(ZLDLLNAME, &pmgr_obj->dcd_mgr);
@@ -92,7 +86,6 @@ int mgr_create(struct mgr_object **mgr_obj,
status = -ENOMEM;
}
- DBC_ENSURE(status || pmgr_obj);
return status;
}
@@ -106,9 +99,6 @@ int mgr_destroy(struct mgr_object *hmgr_obj)
struct mgr_object *pmgr_obj = (struct mgr_object *)hmgr_obj;
struct drv_data *drv_datap = dev_get_drvdata(bridge);
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(hmgr_obj);
-
/* Free resources */
if (hmgr_obj->dcd_mgr)
dcd_destroy_manager(hmgr_obj->dcd_mgr);
@@ -140,11 +130,6 @@ int mgr_enum_node_info(u32 node_id, struct dsp_ndbprops *pndb_props,
struct mgr_object *pmgr_obj = NULL;
struct drv_data *drv_datap = dev_get_drvdata(bridge);
- DBC_REQUIRE(pndb_props != NULL);
- DBC_REQUIRE(pu_num_nodes != NULL);
- DBC_REQUIRE(undb_props_size >= sizeof(struct dsp_ndbprops));
- DBC_REQUIRE(refs > 0);
-
*pu_num_nodes = 0;
/* Get the Manager Object from the driver data */
if (!drv_datap || !drv_datap->mgr_object) {
@@ -153,7 +138,6 @@ int mgr_enum_node_info(u32 node_id, struct dsp_ndbprops *pndb_props,
}
pmgr_obj = drv_datap->mgr_object;
- DBC_ASSERT(pmgr_obj);
/* Forever loop till we hit failed or no more items in the
* Enumeration. We will exit the loop other than 0; */
while (!status) {
@@ -205,11 +189,6 @@ int mgr_enum_processor_info(u32 processor_id,
struct drv_data *drv_datap = dev_get_drvdata(bridge);
bool proc_detect = false;
- DBC_REQUIRE(processor_info != NULL);
- DBC_REQUIRE(pu_num_procs != NULL);
- DBC_REQUIRE(processor_info_size >= sizeof(struct dsp_processorinfo));
- DBC_REQUIRE(refs > 0);
-
*pu_num_procs = 0;
/* Retrieve the Object handle from the driver data */
@@ -242,7 +221,6 @@ int mgr_enum_processor_info(u32 processor_id,
dev_dbg(bridge, "%s: Failed to get MGR Object\n", __func__);
goto func_end;
}
- DBC_ASSERT(pmgr_obj);
/* Forever loop till we hit no more items in the
* Enumeration. We will exit the loop other than 0; */
while (status1 == 0) {
@@ -310,12 +288,9 @@ func_end:
*/
void mgr_exit(void)
{
- DBC_REQUIRE(refs > 0);
refs--;
if (refs == 0)
dcd_exit();
-
- DBC_ENSURE(refs >= 0);
}
/*
@@ -328,16 +303,11 @@ int mgr_get_dcd_handle(struct mgr_object *mgr_handle,
int status = -EPERM;
struct mgr_object *pmgr_obj = (struct mgr_object *)mgr_handle;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(dcd_handle != NULL);
-
*dcd_handle = (u32) NULL;
if (pmgr_obj) {
*dcd_handle = (u32) pmgr_obj->dcd_mgr;
status = 0;
}
- DBC_ENSURE((!status && *dcd_handle != (u32) NULL) ||
- (status && *dcd_handle == (u32) NULL));
return status;
}
@@ -349,22 +319,13 @@ int mgr_get_dcd_handle(struct mgr_object *mgr_handle,
bool mgr_init(void)
{
bool ret = true;
- bool init_dcd = false;
- DBC_REQUIRE(refs >= 0);
-
- if (refs == 0) {
- init_dcd = dcd_init(); /* DCD Module */
-
- if (!init_dcd)
- ret = false;
- }
+ if (refs == 0)
+ ret = dcd_init(); /* DCD Module */
if (ret)
refs++;
- DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
-
return ret;
}
@@ -380,8 +341,6 @@ int mgr_wait_for_bridge_events(struct dsp_notification **anotifications,
struct sync_object *sync_events[MAX_EVENTS];
u32 i;
- DBC_REQUIRE(count < MAX_EVENTS);
-
for (i = 0; i < count; i++)
sync_events[i] = anotifications[i]->handle;
diff --git a/drivers/staging/tidspbridge/rmgr/nldr.c b/drivers/staging/tidspbridge/rmgr/nldr.c
index 0e70cba15ebc..30d5480fcdcc 100644
--- a/drivers/staging/tidspbridge/rmgr/nldr.c
+++ b/drivers/staging/tidspbridge/rmgr/nldr.c
@@ -22,8 +22,6 @@
#include <dspbridge/dbdefs.h>
-#include <dspbridge/dbc.h>
-
/* Platform manager */
#include <dspbridge/cod.h>
#include <dspbridge/dev.h>
@@ -265,8 +263,6 @@ static struct dbll_fxns ldr_fxns = {
(dbll_unload_fxn) dbll_unload,
};
-static u32 refs; /* module reference count */
-
static int add_ovly_info(void *handle, struct dbll_sect_info *sect_info,
u32 addr, u32 bytes);
static int add_ovly_node(struct dsp_uuid *uuid_obj,
@@ -313,11 +309,6 @@ int nldr_allocate(struct nldr_object *nldr_obj, void *priv_ref,
struct nldr_nodeobject *nldr_node_obj = NULL;
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(node_props != NULL);
- DBC_REQUIRE(nldr_nodeobj != NULL);
- DBC_REQUIRE(nldr_obj);
-
/* Initialize handle in case of failure */
*nldr_nodeobj = NULL;
/* Allocate node object */
@@ -398,8 +389,6 @@ int nldr_allocate(struct nldr_object *nldr_obj, void *priv_ref,
if (status && nldr_node_obj)
kfree(nldr_node_obj);
- DBC_ENSURE((!status && *nldr_nodeobj)
- || (status && *nldr_nodeobj == NULL));
return status;
}
@@ -425,12 +414,6 @@ int nldr_create(struct nldr_object **nldr,
struct rmm_segment *rmm_segs = NULL;
u16 i;
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(nldr != NULL);
- DBC_REQUIRE(hdev_obj != NULL);
- DBC_REQUIRE(pattrs != NULL);
- DBC_REQUIRE(pattrs->ovly != NULL);
- DBC_REQUIRE(pattrs->write != NULL);
/* Allocate dynamic loader object */
nldr_obj = kzalloc(sizeof(struct nldr_object), GFP_KERNEL);
@@ -440,13 +423,10 @@ int nldr_create(struct nldr_object **nldr,
dev_get_cod_mgr(hdev_obj, &cod_mgr);
if (cod_mgr) {
status = cod_get_loader(cod_mgr, &nldr_obj->dbll);
- DBC_ASSERT(!status);
status = cod_get_base_lib(cod_mgr, &nldr_obj->base_lib);
- DBC_ASSERT(!status);
status =
cod_get_base_name(cod_mgr, sz_zl_file,
COD_MAXPATHLENGTH);
- DBC_ASSERT(!status);
}
status = 0;
/* end lazy status checking */
@@ -547,7 +527,6 @@ int nldr_create(struct nldr_object **nldr,
status =
cod_get_base_name(cod_mgr, sz_zl_file, COD_MAXPATHLENGTH);
/* lazy check */
- DBC_ASSERT(!status);
/* First count number of overlay nodes */
status =
dcd_get_objects(nldr_obj->dcd_mgr, sz_zl_file,
@@ -583,7 +562,6 @@ int nldr_create(struct nldr_object **nldr,
*nldr = NULL;
}
/* FIXME:Temp. Fix. Must be removed */
- DBC_ENSURE((!status && *nldr) || (status && *nldr == NULL));
return status;
}
@@ -595,8 +573,6 @@ void nldr_delete(struct nldr_object *nldr_obj)
struct ovly_sect *ovly_section;
struct ovly_sect *next;
u16 i;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(nldr_obj);
nldr_obj->ldr_fxns.exit_fxn();
if (nldr_obj->rmm)
@@ -644,22 +620,6 @@ void nldr_delete(struct nldr_object *nldr_obj)
}
/*
- * ======== nldr_exit ========
- * Discontinue usage of NLDR module.
- */
-void nldr_exit(void)
-{
- DBC_REQUIRE(refs > 0);
-
- refs--;
-
- if (refs == 0)
- rmm_exit();
-
- DBC_ENSURE(refs >= 0);
-}
-
-/*
* ======== nldr_get_fxn_addr ========
*/
int nldr_get_fxn_addr(struct nldr_nodeobject *nldr_node_obj,
@@ -671,10 +631,6 @@ int nldr_get_fxn_addr(struct nldr_nodeobject *nldr_node_obj,
bool status1 = false;
s32 i = 0;
struct lib_node root = { NULL, 0, NULL };
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(nldr_node_obj);
- DBC_REQUIRE(addr != NULL);
- DBC_REQUIRE(str_fxn != NULL);
nldr_obj = nldr_node_obj->nldr_obj;
/* Called from node_create(), node_delete(), or node_run(). */
@@ -690,7 +646,6 @@ int nldr_get_fxn_addr(struct nldr_nodeobject *nldr_node_obj,
root = nldr_node_obj->delete_lib;
break;
default:
- DBC_ASSERT(false);
break;
}
} else {
@@ -760,7 +715,6 @@ int nldr_get_rmm_manager(struct nldr_object *nldr,
{
int status = 0;
struct nldr_object *nldr_obj = nldr;
- DBC_REQUIRE(rmm_mgr != NULL);
if (nldr) {
*rmm_mgr = nldr_obj->rmm;
@@ -769,29 +723,10 @@ int nldr_get_rmm_manager(struct nldr_object *nldr,
status = -EFAULT;
}
- DBC_ENSURE(!status || (rmm_mgr != NULL && *rmm_mgr == NULL));
-
return status;
}
/*
- * ======== nldr_init ========
- * Initialize the NLDR module.
- */
-bool nldr_init(void)
-{
- DBC_REQUIRE(refs >= 0);
-
- if (refs == 0)
- rmm_init();
-
- refs++;
-
- DBC_ENSURE(refs > 0);
- return true;
-}
-
-/*
* ======== nldr_load ========
*/
int nldr_load(struct nldr_nodeobject *nldr_node_obj,
@@ -801,9 +736,6 @@ int nldr_load(struct nldr_nodeobject *nldr_node_obj,
struct dsp_uuid lib_uuid;
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(nldr_node_obj);
-
nldr_obj = nldr_node_obj->nldr_obj;
if (nldr_node_obj->dynamic) {
@@ -839,7 +771,6 @@ int nldr_load(struct nldr_nodeobject *nldr_node_obj,
break;
default:
- DBC_ASSERT(false);
break;
}
}
@@ -863,9 +794,6 @@ int nldr_unload(struct nldr_nodeobject *nldr_node_obj,
struct lib_node *root_lib = NULL;
s32 i = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(nldr_node_obj);
-
if (nldr_node_obj != NULL) {
if (nldr_node_obj->dynamic) {
if (*nldr_node_obj->phase_split) {
@@ -889,7 +817,6 @@ int nldr_unload(struct nldr_nodeobject *nldr_node_obj,
nldr_node_obj->pers_libs = 0;
break;
default:
- DBC_ASSERT(false);
break;
}
} else {
@@ -929,7 +856,6 @@ static int add_ovly_info(void *handle, struct dbll_sect_info *sect_info,
/* Find the node it belongs to */
for (i = 0; i < nldr_obj->ovly_nodes; i++) {
node_name = nldr_obj->ovly_table[i].node_name;
- DBC_REQUIRE(node_name);
if (strncmp(node_name, sect_name + 1, strlen(node_name)) == 0) {
/* Found the node */
break;
@@ -1018,8 +944,6 @@ static int add_ovly_node(struct dsp_uuid *uuid_obj,
/* Add node to table */
nldr_obj->ovly_table[nldr_obj->ovly_nid].uuid =
*uuid_obj;
- DBC_REQUIRE(obj_def.obj_data.node_obj.ndb_props.
- ac_name);
len =
strlen(obj_def.obj_data.node_obj.ndb_props.ac_name);
node_name = obj_def.obj_data.node_obj.ndb_props.ac_name;
@@ -1129,7 +1053,6 @@ static void free_sects(struct nldr_object *nldr_obj,
ret =
rmm_free(nldr_obj->rmm, 0, ovly_section->sect_run_addr,
ovly_section->size, true);
- DBC_ASSERT(ret);
ovly_section = ovly_section->next_sect;
i++;
}
@@ -1249,7 +1172,6 @@ static int load_lib(struct nldr_nodeobject *nldr_node_obj,
if (depth > MAXDEPTH) {
/* Error */
- DBC_ASSERT(false);
}
root->lib = NULL;
/* Allocate a buffer for library file name of size DBL_MAXPATHLENGTH */
@@ -1312,7 +1234,6 @@ static int load_lib(struct nldr_nodeobject *nldr_node_obj,
dcd_get_num_dep_libs(nldr_node_obj->nldr_obj->dcd_mgr,
&uuid, &nd_libs, &np_libs, phase);
}
- DBC_ASSERT(nd_libs >= np_libs);
if (!status) {
if (!(*nldr_node_obj->phase_split))
np_libs = 0;
@@ -1474,7 +1395,6 @@ static int load_ovly(struct nldr_nodeobject *nldr_node_obj,
}
}
- DBC_ASSERT(i < nldr_obj->ovly_nodes);
if (!po_node) {
status = -ENOENT;
@@ -1500,7 +1420,6 @@ static int load_ovly(struct nldr_nodeobject *nldr_node_obj,
break;
default:
- DBC_ASSERT(false);
break;
}
@@ -1623,9 +1542,6 @@ static int remote_alloc(void **ref, u16 mem_sect, u32 size,
struct rmm_addr *rmm_addr_obj = (struct rmm_addr *)dsp_address;
bool mem_load_req = false;
int status = -ENOMEM; /* Set to fail */
- DBC_REQUIRE(hnode);
- DBC_REQUIRE(mem_sect == DBLL_CODE || mem_sect == DBLL_DATA ||
- mem_sect == DBLL_BSS);
nldr_obj = hnode->nldr_obj;
rmm = nldr_obj->rmm;
/* Convert size to DSP words */
@@ -1651,7 +1567,6 @@ static int remote_alloc(void **ref, u16 mem_sect, u32 size,
mem_phase_bit = EXECUTEDATAFLAGBIT;
break;
default:
- DBC_ASSERT(false);
break;
}
if (mem_sect == DBLL_CODE)
@@ -1670,11 +1585,9 @@ static int remote_alloc(void **ref, u16 mem_sect, u32 size,
/* Find an appropriate segment based on mem_sect */
if (segid == NULLID) {
/* No memory requirements of preferences */
- DBC_ASSERT(!mem_load_req);
goto func_cont;
}
if (segid <= MAXSEGID) {
- DBC_ASSERT(segid < nldr_obj->dload_segs);
/* Attempt to allocate from segid first. */
rmm_addr_obj->segid = segid;
status =
@@ -1685,7 +1598,6 @@ static int remote_alloc(void **ref, u16 mem_sect, u32 size,
}
} else {
/* segid > MAXSEGID ==> Internal or external memory */
- DBC_ASSERT(segid == MEMINTERNALID || segid == MEMEXTERNALID);
/* Check for any internal or external memory segment,
* depending on segid. */
mem_sect_type |= segid == MEMINTERNALID ?
@@ -1736,8 +1648,6 @@ static int remote_free(void **ref, u16 space, u32 dsp_address,
u32 word_size;
int status = -ENOMEM; /* Set to fail */
- DBC_REQUIRE(nldr_obj);
-
rmm = nldr_obj->rmm;
/* Convert size to DSP words */
@@ -1761,7 +1671,6 @@ static void unload_lib(struct nldr_nodeobject *nldr_node_obj,
struct nldr_object *nldr_obj = nldr_node_obj->nldr_obj;
u16 i;
- DBC_ASSERT(root != NULL);
/* Unload dependent libraries */
for (i = 0; i < root->dep_libs; i++)
@@ -1812,7 +1721,6 @@ static void unload_ovly(struct nldr_nodeobject *nldr_node_obj,
}
}
- DBC_ASSERT(i < nldr_obj->ovly_nodes);
if (!po_node)
/* TODO: Should we print warning here? */
@@ -1839,14 +1747,11 @@ static void unload_ovly(struct nldr_nodeobject *nldr_node_obj,
other_alloc = po_node->other_sects;
break;
default:
- DBC_ASSERT(false);
break;
}
- DBC_ASSERT(ref_count && (*ref_count > 0));
if (ref_count && (*ref_count > 0)) {
*ref_count -= 1;
if (other_ref) {
- DBC_ASSERT(*other_ref > 0);
*other_ref -= 1;
}
}
@@ -1897,9 +1802,6 @@ int nldr_find_addr(struct nldr_nodeobject *nldr_node, u32 sym_addr,
bool status1 = false;
s32 i = 0;
struct lib_node root = { NULL, 0, NULL };
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(offset_output != NULL);
- DBC_REQUIRE(sym_name != NULL);
pr_debug("%s(0x%x, 0x%x, 0x%x, 0x%x, %s)\n", __func__, (u32) nldr_node,
sym_addr, offset_range, (u32) offset_output, sym_name);
@@ -1915,7 +1817,6 @@ int nldr_find_addr(struct nldr_nodeobject *nldr_node, u32 sym_addr,
root = nldr_node->delete_lib;
break;
default:
- DBC_ASSERT(false);
break;
}
} else {
diff --git a/drivers/staging/tidspbridge/rmgr/node.c b/drivers/staging/tidspbridge/rmgr/node.c
index 5dadaa445ad9..7fb426c5251c 100644
--- a/drivers/staging/tidspbridge/rmgr/node.c
+++ b/drivers/staging/tidspbridge/rmgr/node.c
@@ -26,9 +26,6 @@
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
-/* ----------------------------------- Trace & Debug */
-#include <dspbridge/dbc.h>
-
/* ----------------------------------- OS Adaptation Layer */
#include <dspbridge/memdefs.h>
#include <dspbridge/proc.h>
@@ -162,7 +159,6 @@ struct node_mgr {
/* Loader properties */
struct nldr_object *nldr_obj; /* Handle to loader */
struct node_ldr_fxns nldr_fxns; /* Handle to loader functions */
- bool loader_init; /* Loader Init function succeeded? */
};
/*
@@ -264,16 +260,12 @@ static u32 ovly(void *priv_ref, u32 dsp_run_addr, u32 dsp_load_addr,
static u32 mem_write(void *priv_ref, u32 dsp_add, void *pbuf,
u32 ul_num_bytes, u32 mem_space);
-static u32 refs; /* module reference count */
-
/* Dynamic loader functions. */
static struct node_ldr_fxns nldr_fxns = {
nldr_allocate,
nldr_create,
nldr_delete,
- nldr_exit,
nldr_get_fxn_addr,
- nldr_init,
nldr_load,
nldr_unload,
};
@@ -326,11 +318,6 @@ int node_allocate(struct proc_object *hprocessor,
void *node_res;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(hprocessor != NULL);
- DBC_REQUIRE(noderes != NULL);
- DBC_REQUIRE(node_uuid != NULL);
-
*noderes = NULL;
status = proc_get_processor_id(hprocessor, &proc_id);
@@ -673,7 +660,6 @@ func_cont:
drv_proc_node_update_heap_status(node_res, true);
drv_proc_node_update_status(node_res, true);
}
- DBC_ENSURE((status && *noderes == NULL) || (!status && *noderes));
func_end:
dev_dbg(bridge, "%s: hprocessor: %p pNodeId: %p pargs: %p attr_in: %p "
"node_res: %p status: 0x%x\n", __func__, hprocessor,
@@ -696,11 +682,6 @@ DBAPI node_alloc_msg_buf(struct node_object *hnode, u32 usize,
bool set_info;
u32 proc_id;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(pbuffer != NULL);
-
- DBC_REQUIRE(usize > 0);
-
if (!pnode)
status = -EFAULT;
else if (node_get_type(pnode) == NODE_DEVICE)
@@ -714,7 +695,6 @@ DBAPI node_alloc_msg_buf(struct node_object *hnode, u32 usize,
status = proc_get_processor_id(pnode->processor, &proc_id);
if (proc_id != DSP_UNIT) {
- DBC_ASSERT(NULL);
goto func_end;
}
/* If segment ID includes MEM_SETVIRTUALSEGID then pbuffer is a
@@ -782,8 +762,6 @@ int node_change_priority(struct node_object *hnode, s32 prio)
int status = 0;
u32 proc_id;
- DBC_REQUIRE(refs > 0);
-
if (!hnode || !hnode->node_mgr) {
status = -EFAULT;
} else {
@@ -854,7 +832,6 @@ int node_connect(struct node_object *node1, u32 stream1,
s8 chnl_mode;
u32 dw_length;
int status = 0;
- DBC_REQUIRE(refs > 0);
if (!node1 || !node2)
return -EFAULT;
@@ -903,7 +880,6 @@ int node_connect(struct node_object *node1, u32 stream1,
if (node1_type != NODE_GPP) {
hnode_mgr = node1->node_mgr;
} else {
- DBC_ASSERT(node2 != (struct node_object *)DSP_HGPPNODE);
hnode_mgr = node2->node_mgr;
}
@@ -982,9 +958,6 @@ int node_connect(struct node_object *node1, u32 stream1,
goto out_unlock;
}
- DBC_ASSERT((node1_type == NODE_GPP) ||
- (node2_type == NODE_GPP));
-
chnl_mode = (node1_type == NODE_GPP) ?
CHNL_MODETODSP : CHNL_MODEFROMDSP;
@@ -1139,7 +1112,6 @@ int node_create(struct node_object *hnode)
omap_dspbridge_dev->dev.platform_data;
#endif
- DBC_REQUIRE(refs > 0);
if (!pnode) {
status = -EFAULT;
goto func_end;
@@ -1291,10 +1263,6 @@ int node_create_mgr(struct node_mgr **node_man,
int status = 0;
u8 dev_type;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(node_man != NULL);
- DBC_REQUIRE(hdev_obj != NULL);
-
*node_man = NULL;
/* Allocate Node manager object */
node_mgr_obj = kzalloc(sizeof(struct node_mgr), GFP_KERNEL);
@@ -1366,7 +1334,6 @@ int node_create_mgr(struct node_mgr **node_man,
nldr_attrs_obj.write = mem_write;
nldr_attrs_obj.dsp_word_size = node_mgr_obj->dsp_word_size;
nldr_attrs_obj.dsp_mau_size = node_mgr_obj->dsp_mau_size;
- node_mgr_obj->loader_init = node_mgr_obj->nldr_fxns.init();
status = node_mgr_obj->nldr_fxns.create(&node_mgr_obj->nldr_obj,
hdev_obj,
&nldr_attrs_obj);
@@ -1375,8 +1342,6 @@ int node_create_mgr(struct node_mgr **node_man,
*node_man = node_mgr_obj;
- DBC_ENSURE((status && *node_man == NULL) || (!status && *node_man));
-
return status;
out_err:
delete_node_mgr(node_mgr_obj);
@@ -1409,7 +1374,6 @@ int node_delete(struct node_res_object *noderes,
void *node_res = noderes;
struct dsp_processorstate proc_state;
- DBC_REQUIRE(refs > 0);
if (!pnode) {
status = -EFAULT;
@@ -1554,8 +1518,6 @@ func_end:
*/
int node_delete_mgr(struct node_mgr *hnode_mgr)
{
- DBC_REQUIRE(refs > 0);
-
if (!hnode_mgr)
return -EFAULT;
@@ -1576,10 +1538,6 @@ int node_enum_nodes(struct node_mgr *hnode_mgr, void **node_tab,
struct node_object *hnode;
u32 i = 0;
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(node_tab != NULL || node_tab_size == 0);
- DBC_REQUIRE(pu_num_nodes != NULL);
- DBC_REQUIRE(pu_allocated != NULL);
if (!hnode_mgr) {
status = -EFAULT;
@@ -1605,20 +1563,6 @@ func_end:
}
/*
- * ======== node_exit ========
- * Purpose:
- * Discontinue usage of NODE module.
- */
-void node_exit(void)
-{
- DBC_REQUIRE(refs > 0);
-
- refs--;
-
- DBC_ENSURE(refs >= 0);
-}
-
-/*
* ======== node_free_msg_buf ========
* Purpose:
* Frees the message buffer.
@@ -1629,10 +1573,6 @@ int node_free_msg_buf(struct node_object *hnode, u8 * pbuffer,
struct node_object *pnode = (struct node_object *)hnode;
int status = 0;
u32 proc_id;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(pbuffer != NULL);
- DBC_REQUIRE(pnode != NULL);
- DBC_REQUIRE(pnode->xlator != NULL);
if (!hnode) {
status = -EFAULT;
@@ -1653,7 +1593,6 @@ int node_free_msg_buf(struct node_object *hnode, u8 * pbuffer,
status = cmm_xlator_free_buf(pnode->xlator, pbuffer);
}
} else {
- DBC_ASSERT(NULL); /* BUG */
}
func_end:
return status;
@@ -1669,9 +1608,6 @@ int node_get_attr(struct node_object *hnode,
struct dsp_nodeattr *pattr, u32 attr_size)
{
struct node_mgr *hnode_mgr;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(pattr != NULL);
- DBC_REQUIRE(attr_size >= sizeof(struct dsp_nodeattr));
if (!hnode)
return -EFAULT;
@@ -1713,9 +1649,6 @@ int node_get_channel_id(struct node_object *hnode, u32 dir, u32 index,
{
enum node_type node_type;
int status = -EINVAL;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(dir == DSP_TONODE || dir == DSP_FROMNODE);
- DBC_REQUIRE(chan_id != NULL);
if (!hnode) {
status = -EFAULT;
@@ -1734,7 +1667,6 @@ int node_get_channel_id(struct node_object *hnode, u32 dir, u32 index,
}
}
} else {
- DBC_ASSERT(dir == DSP_FROMNODE);
if (index < MAX_OUTPUTS(hnode)) {
if (hnode->outputs[index].type == HOSTCONNECT) {
*chan_id = hnode->outputs[index].dev_id;
@@ -1761,9 +1693,6 @@ int node_get_message(struct node_object *hnode,
struct dsp_processorstate proc_state;
struct proc_object *hprocessor;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(message != NULL);
-
if (!hnode) {
status = -EFAULT;
goto func_end;
@@ -1831,14 +1760,12 @@ int node_get_nldr_obj(struct node_mgr *hnode_mgr,
{
int status = 0;
struct node_mgr *node_mgr_obj = hnode_mgr;
- DBC_REQUIRE(nldr_ovlyobj != NULL);
if (!hnode_mgr)
status = -EFAULT;
else
*nldr_ovlyobj = node_mgr_obj->nldr_obj;
- DBC_ENSURE(!status || (nldr_ovlyobj != NULL && *nldr_ovlyobj == NULL));
return status;
}
@@ -1852,8 +1779,6 @@ int node_get_strm_mgr(struct node_object *hnode,
{
int status = 0;
- DBC_REQUIRE(refs > 0);
-
if (!hnode)
status = -EFAULT;
else
@@ -1867,8 +1792,6 @@ int node_get_strm_mgr(struct node_object *hnode,
*/
enum nldr_loadtype node_get_load_type(struct node_object *hnode)
{
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(hnode);
if (!hnode) {
dev_dbg(bridge, "%s: Failed. hnode: %p\n", __func__, hnode);
return -1;
@@ -1884,8 +1807,6 @@ enum nldr_loadtype node_get_load_type(struct node_object *hnode)
*/
u32 node_get_timeout(struct node_object *hnode)
{
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(hnode);
if (!hnode) {
dev_dbg(bridge, "%s: failed. hnode: %p\n", __func__, hnode);
return 0;
@@ -1915,20 +1836,6 @@ enum node_type node_get_type(struct node_object *hnode)
}
/*
- * ======== node_init ========
- * Purpose:
- * Initialize the NODE module.
- */
-bool node_init(void)
-{
- DBC_REQUIRE(refs >= 0);
-
- refs++;
-
- return true;
-}
-
-/*
* ======== node_on_exit ========
* Purpose:
* Gets called when RMS_EXIT is received for a node.
@@ -1970,8 +1877,6 @@ int node_pause(struct node_object *hnode)
struct dsp_processorstate proc_state;
struct proc_object *hprocessor;
- DBC_REQUIRE(refs > 0);
-
if (!hnode) {
status = -EFAULT;
} else {
@@ -2054,9 +1959,6 @@ int node_put_message(struct node_object *hnode,
struct dsp_processorstate proc_state;
struct proc_object *hprocessor;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(pmsg != NULL);
-
if (!hnode) {
status = -EFAULT;
goto func_end;
@@ -2146,9 +2048,6 @@ int node_register_notify(struct node_object *hnode, u32 event_mask,
struct bridge_drv_interface *intf_fxns;
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(hnotification != NULL);
-
if (!hnode) {
status = -EFAULT;
} else {
@@ -2207,8 +2106,6 @@ int node_run(struct node_object *hnode)
struct dsp_processorstate proc_state;
struct proc_object *hprocessor;
- DBC_REQUIRE(refs > 0);
-
if (!hnode) {
status = -EFAULT;
goto func_end;
@@ -2287,7 +2184,6 @@ int node_run(struct node_object *hnode)
NODE_GET_PRIORITY(hnode));
} else {
/* We should never get here */
- DBC_ASSERT(false);
}
func_cont1:
/* Update node state. */
@@ -2326,9 +2222,6 @@ int node_terminate(struct node_object *hnode, int *pstatus)
struct deh_mgr *hdeh_mgr;
struct dsp_processorstate proc_state;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(pstatus != NULL);
-
if (!hnode || !hnode->node_mgr) {
status = -EFAULT;
goto func_end;
@@ -2610,9 +2503,6 @@ static void delete_node_mgr(struct node_mgr *hnode_mgr)
if (hnode_mgr->nldr_obj)
hnode_mgr->nldr_fxns.delete(hnode_mgr->nldr_obj);
- if (hnode_mgr->loader_init)
- hnode_mgr->nldr_fxns.exit();
-
kfree(hnode_mgr);
}
}
@@ -2668,7 +2558,6 @@ static void fill_stream_connect(struct node_object *node1,
strm1->connect_type = CONNECTTYPE_GPPOUTPUT;
} else {
/* GPP == > NODE */
- DBC_ASSERT(node2 != (struct node_object *)DSP_HGPPNODE);
strm_index = node2->num_inputs + node2->num_outputs - 1;
strm2 = &(node2->stream_connect[strm_index]);
strm2->cb_struct = sizeof(struct dsp_streamconnect);
@@ -2748,9 +2637,6 @@ static int get_fxn_address(struct node_object *hnode, u32 * fxn_addr,
char *pstr_fxn_name = NULL;
struct node_mgr *hnode_mgr = hnode->node_mgr;
int status = 0;
- DBC_REQUIRE(node_get_type(hnode) == NODE_TASK ||
- node_get_type(hnode) == NODE_DAISSOCKET ||
- node_get_type(hnode) == NODE_MESSAGE);
switch (phase) {
case CREATEPHASE:
@@ -2767,7 +2653,6 @@ static int get_fxn_address(struct node_object *hnode, u32 * fxn_addr,
break;
default:
/* Should never get here */
- DBC_ASSERT(false);
break;
}
@@ -2787,9 +2672,6 @@ void get_node_info(struct node_object *hnode, struct dsp_nodeinfo *node_info)
{
u32 i;
- DBC_REQUIRE(hnode);
- DBC_REQUIRE(node_info != NULL);
-
node_info->cb_struct = sizeof(struct dsp_nodeinfo);
node_info->nb_node_database_props =
hnode->dcd_props.obj_data.node_obj.ndb_props;
@@ -2848,9 +2730,7 @@ static int get_node_props(struct dcd_manager *hdcd_mgr,
pmsg_args->max_msgs);
} else {
/* Copy device name */
- DBC_REQUIRE(pndb_props->ac_name);
len = strlen(pndb_props->ac_name);
- DBC_ASSERT(len < MAXDEVNAMELEN);
hnode->str_dev_name = kzalloc(len + 1, GFP_KERNEL);
if (hnode->str_dev_name == NULL) {
status = -ENOMEM;
@@ -2938,10 +2818,6 @@ int node_get_uuid_props(void *hprocessor,
struct dcd_nodeprops dcd_node_props;
struct dsp_processorstate proc_state;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(hprocessor != NULL);
- DBC_REQUIRE(node_uuid != NULL);
-
if (hprocessor == NULL || node_uuid == NULL) {
status = -EFAULT;
goto func_end;
@@ -3063,8 +2939,6 @@ static u32 ovly(void *priv_ref, u32 dsp_run_addr, u32 dsp_load_addr,
/* Function interface to Bridge driver*/
struct bridge_drv_interface *intf_fxns;
- DBC_REQUIRE(hnode);
-
hnode_mgr = hnode->node_mgr;
ul_size = ul_num_bytes / hnode_mgr->dsp_word_size;
@@ -3106,9 +2980,6 @@ static u32 mem_write(void *priv_ref, u32 dsp_add, void *pbuf,
/* Function interface to Bridge driver */
struct bridge_drv_interface *intf_fxns;
- DBC_REQUIRE(hnode);
- DBC_REQUIRE(mem_space & DBLL_CODE || mem_space & DBLL_DATA);
-
hnode_mgr = hnode->node_mgr;
ul_timeout = hnode->timeout;
diff --git a/drivers/staging/tidspbridge/rmgr/proc.c b/drivers/staging/tidspbridge/rmgr/proc.c
index 242dd1399996..7e4f12f6be42 100644
--- a/drivers/staging/tidspbridge/rmgr/proc.c
+++ b/drivers/staging/tidspbridge/rmgr/proc.c
@@ -25,9 +25,6 @@
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
-/* ----------------------------------- Trace & Debug */
-#include <dspbridge/dbc.h>
-
/* ----------------------------------- OS Adaptation Layer */
#include <dspbridge/ntfy.h>
#include <dspbridge/sync.h>
@@ -101,8 +98,6 @@ struct proc_object {
struct list_head proc_list;
};
-static u32 refs;
-
DEFINE_MUTEX(proc_lock); /* For critical sections */
/* ----------------------------------- Function Prototypes */
@@ -281,9 +276,6 @@ proc_attach(u32 processor_id,
struct drv_data *drv_datap = dev_get_drvdata(bridge);
u8 dev_type;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(ph_processor != NULL);
-
if (pr_ctxt->processor) {
*ph_processor = pr_ctxt->processor;
return status;
@@ -382,10 +374,6 @@ proc_attach(u32 processor_id,
kfree(p_proc_object);
}
func_end:
- DBC_ENSURE((status == -EPERM && *ph_processor == NULL) ||
- (!status && p_proc_object) ||
- (status == 0 && p_proc_object));
-
return status;
}
@@ -445,10 +433,6 @@ int proc_auto_start(struct cfg_devnode *dev_node_obj,
struct drv_data *drv_datap = dev_get_drvdata(bridge);
u8 dev_type;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(dev_node_obj != NULL);
- DBC_REQUIRE(hdev_obj != NULL);
-
/* Create a Dummy PROC Object */
if (!drv_datap || !drv_datap->mgr_object) {
status = -ENODATA;
@@ -516,8 +500,6 @@ int proc_ctrl(void *hprocessor, u32 dw_cmd, struct dsp_cbdata * arg)
struct proc_object *p_proc_object = hprocessor;
u32 timeout = 0;
- DBC_REQUIRE(refs > 0);
-
if (p_proc_object) {
/* intercept PWR deep sleep command */
if (dw_cmd == BRDIOCTL_DEEPSLEEP) {
@@ -565,8 +547,6 @@ int proc_detach(struct process_context *pr_ctxt)
int status = 0;
struct proc_object *p_proc_object = NULL;
- DBC_REQUIRE(refs > 0);
-
p_proc_object = (struct proc_object *)pr_ctxt->processor;
if (p_proc_object) {
@@ -607,11 +587,6 @@ int proc_enum_nodes(void *hprocessor, void **node_tab,
struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
struct node_mgr *hnode_mgr = NULL;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(node_tab != NULL || node_tab_size == 0);
- DBC_REQUIRE(pu_num_nodes != NULL);
- DBC_REQUIRE(pu_allocated != NULL);
-
if (p_proc_object) {
if (!(dev_get_node_manager(p_proc_object->dev_obj,
&hnode_mgr))) {
@@ -768,8 +743,6 @@ int proc_begin_dma(void *hprocessor, void *pmpu_addr, u32 ul_size,
struct process_context *pr_ctxt = (struct process_context *) hprocessor;
struct dmm_map_object *map_obj;
- DBC_REQUIRE(refs > 0);
-
if (!pr_ctxt) {
status = -EFAULT;
goto err_out;
@@ -810,8 +783,6 @@ int proc_end_dma(void *hprocessor, void *pmpu_addr, u32 ul_size,
struct process_context *pr_ctxt = (struct process_context *) hprocessor;
struct dmm_map_object *map_obj;
- DBC_REQUIRE(refs > 0);
-
if (!pr_ctxt) {
status = -EFAULT;
goto err_out;
@@ -884,10 +855,6 @@ int proc_get_resource_info(void *hprocessor, u32 resource_type,
struct rmm_target_obj *rmm = NULL;
struct io_mgr *hio_mgr = NULL; /* IO manager handle */
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(resource_info != NULL);
- DBC_REQUIRE(resource_info_size >= sizeof(struct dsp_resourceinfo));
-
if (!p_proc_object) {
status = -EFAULT;
goto func_end;
@@ -940,21 +907,6 @@ func_end:
}
/*
- * ======== proc_exit ========
- * Purpose:
- * Decrement reference count, and free resources when reference count is
- * 0.
- */
-void proc_exit(void)
-{
- DBC_REQUIRE(refs > 0);
-
- refs--;
-
- DBC_ENSURE(refs >= 0);
-}
-
-/*
* ======== proc_get_dev_object ========
* Purpose:
* Return the Dev Object handle for a given Processor.
@@ -966,9 +918,6 @@ int proc_get_dev_object(void *hprocessor,
int status = -EPERM;
struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(device_obj != NULL);
-
if (p_proc_object) {
*device_obj = p_proc_object->dev_obj;
status = 0;
@@ -977,9 +926,6 @@ int proc_get_dev_object(void *hprocessor,
status = -EFAULT;
}
- DBC_ENSURE((!status && *device_obj != NULL) ||
- (status && *device_obj == NULL));
-
return status;
}
@@ -996,10 +942,6 @@ int proc_get_state(void *hprocessor,
struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
int brd_status;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(proc_state_obj != NULL);
- DBC_REQUIRE(state_info_size >= sizeof(struct dsp_processorstate));
-
if (p_proc_object) {
/* First, retrieve BRD state information */
status = (*p_proc_object->intf_fxns->brd_status)
@@ -1055,25 +997,6 @@ int proc_get_trace(void *hprocessor, u8 * pbuf, u32 max_size)
}
/*
- * ======== proc_init ========
- * Purpose:
- * Initialize PROC's private state, keeping a reference count on each call
- */
-bool proc_init(void)
-{
- bool ret = true;
-
- DBC_REQUIRE(refs >= 0);
-
- if (ret)
- refs++;
-
- DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
-
- return ret;
-}
-
-/*
* ======== proc_load ========
* Purpose:
* Reset a processor and load a new base program image.
@@ -1111,10 +1034,6 @@ int proc_load(void *hprocessor, const s32 argc_index,
omap_dspbridge_dev->dev.platform_data;
#endif
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(argc_index > 0);
- DBC_REQUIRE(user_args != NULL);
-
#ifdef OPT_LOAD_TIME_INSTRUMENTATION
do_gettimeofday(&tv1);
#endif
@@ -1202,8 +1121,6 @@ int proc_load(void *hprocessor, const s32 argc_index,
if (status) {
status = -EPERM;
} else {
- DBC_ASSERT(p_proc_object->last_coff ==
- NULL);
/* Allocate memory for pszLastCoff */
p_proc_object->last_coff =
kzalloc((strlen(user_args[0]) +
@@ -1226,7 +1143,6 @@ int proc_load(void *hprocessor, const s32 argc_index,
if (!hmsg_mgr) {
status = msg_create(&hmsg_mgr, p_proc_object->dev_obj,
(msg_onexit) node_on_exit);
- DBC_ASSERT(!status);
dev_set_msg_mgr(p_proc_object->dev_obj, hmsg_mgr);
}
}
@@ -1322,7 +1238,6 @@ int proc_load(void *hprocessor, const s32 argc_index,
strlen(pargv0) + 1);
else
status = -ENOMEM;
- DBC_ASSERT(brd_state == BRD_LOADED);
}
}
@@ -1331,9 +1246,6 @@ func_end:
pr_err("%s: Processor failed to load\n", __func__);
proc_stop(p_proc_object);
}
- DBC_ENSURE((!status
- && p_proc_object->proc_state == PROC_LOADED)
- || status);
#ifdef OPT_LOAD_TIME_INSTRUMENTATION
do_gettimeofday(&tv2);
if (tv2.tv_usec < tv1.tv_usec) {
@@ -1443,9 +1355,6 @@ int proc_register_notify(void *hprocessor, u32 event_mask,
struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
struct deh_mgr *hdeh_mgr;
- DBC_REQUIRE(hnotification != NULL);
- DBC_REQUIRE(refs > 0);
-
/* Check processor handle */
if (!p_proc_object) {
status = -EFAULT;
@@ -1567,7 +1476,6 @@ int proc_start(void *hprocessor)
u32 dw_dsp_addr; /* Loaded code's entry point. */
int brd_state;
- DBC_REQUIRE(refs > 0);
if (!p_proc_object) {
status = -EFAULT;
goto func_end;
@@ -1616,7 +1524,6 @@ func_cont:
if (!((*p_proc_object->intf_fxns->brd_status)
(p_proc_object->bridge_context, &brd_state))) {
pr_info("%s: dsp in running state\n", __func__);
- DBC_ASSERT(brd_state != BRD_HIBERNATION);
}
} else {
pr_err("%s: Failed to start the dsp\n", __func__);
@@ -1624,8 +1531,6 @@ func_cont:
}
func_end:
- DBC_ENSURE((!status && p_proc_object->proc_state ==
- PROC_RUNNING) || status);
return status;
}
@@ -1644,9 +1549,7 @@ int proc_stop(void *hprocessor)
u32 node_tab_size = 1;
u32 num_nodes = 0;
u32 nodes_allocated = 0;
- int brd_state;
- DBC_REQUIRE(refs > 0);
if (!p_proc_object) {
status = -EFAULT;
goto func_end;
@@ -1678,11 +1581,6 @@ int proc_stop(void *hprocessor)
msg_delete(hmsg_mgr);
dev_set_msg_mgr(p_proc_object->dev_obj, NULL);
}
- if (!((*p_proc_object->
- intf_fxns->brd_status) (p_proc_object->
- bridge_context,
- &brd_state)))
- DBC_ASSERT(brd_state == BRD_STOPPED);
}
} else {
pr_err("%s: Failed to stop the processor\n", __func__);
@@ -1820,10 +1718,6 @@ static int proc_monitor(struct proc_object *proc_obj)
{
int status = -EPERM;
struct msg_mgr *hmsg_mgr;
- int brd_state;
-
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(proc_obj);
/* This is needed only when Device is loaded when it is
* already 'ACTIVE' */
@@ -1840,13 +1734,8 @@ static int proc_monitor(struct proc_object *proc_obj)
if (!((*proc_obj->intf_fxns->brd_monitor)
(proc_obj->bridge_context))) {
status = 0;
- if (!((*proc_obj->intf_fxns->brd_status)
- (proc_obj->bridge_context, &brd_state)))
- DBC_ASSERT(brd_state == BRD_IDLE);
}
- DBC_ENSURE((!status && brd_state == BRD_IDLE) ||
- status);
return status;
}
@@ -1880,8 +1769,6 @@ static char **prepend_envp(char **new_envp, char **envp, s32 envp_elems,
{
char **pp_envp = new_envp;
- DBC_REQUIRE(new_envp);
-
/* Prepend new environ var=value string */
*new_envp++ = sz_var;
@@ -1906,9 +1793,6 @@ int proc_notify_clients(void *proc, u32 events)
int status = 0;
struct proc_object *p_proc_object = (struct proc_object *)proc;
- DBC_REQUIRE(p_proc_object);
- DBC_REQUIRE(is_valid_proc_event(events));
- DBC_REQUIRE(refs > 0);
if (!p_proc_object) {
status = -EFAULT;
goto func_end;
@@ -1930,9 +1814,6 @@ int proc_notify_all_clients(void *proc, u32 events)
int status = 0;
struct proc_object *p_proc_object = (struct proc_object *)proc;
- DBC_REQUIRE(is_valid_proc_event(events));
- DBC_REQUIRE(refs > 0);
-
if (!p_proc_object) {
status = -EFAULT;
goto func_end;
diff --git a/drivers/staging/tidspbridge/rmgr/rmm.c b/drivers/staging/tidspbridge/rmgr/rmm.c
index f3dc0ddbfacc..52187bd97729 100644
--- a/drivers/staging/tidspbridge/rmgr/rmm.c
+++ b/drivers/staging/tidspbridge/rmgr/rmm.c
@@ -46,9 +46,6 @@
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
-/* ----------------------------------- Trace & Debug */
-#include <dspbridge/dbc.h>
-
/* ----------------------------------- This */
#include <dspbridge/rmm.h>
@@ -83,8 +80,6 @@ struct rmm_target_obj {
struct list_head ovly_list; /* List of overlay memory in use */
};
-static u32 refs; /* module reference count */
-
static bool alloc_block(struct rmm_target_obj *target, u32 segid, u32 size,
u32 align, u32 *dsp_address);
static bool free_block(struct rmm_target_obj *target, u32 segid, u32 addr,
@@ -101,12 +96,6 @@ int rmm_alloc(struct rmm_target_obj *target, u32 segid, u32 size,
u32 addr;
int status = 0;
- DBC_REQUIRE(target);
- DBC_REQUIRE(dsp_address != NULL);
- DBC_REQUIRE(size > 0);
- DBC_REQUIRE(reserve || (target->num_segs > 0));
- DBC_REQUIRE(refs > 0);
-
if (!reserve) {
if (!alloc_block(target, segid, size, align, dsp_address)) {
status = -ENOMEM;
@@ -170,9 +159,6 @@ int rmm_create(struct rmm_target_obj **target_obj,
s32 i;
int status = 0;
- DBC_REQUIRE(target_obj != NULL);
- DBC_REQUIRE(num_segs == 0 || seg_tab != NULL);
-
/* Allocate DBL target object */
target = kzalloc(sizeof(struct rmm_target_obj), GFP_KERNEL);
@@ -235,9 +221,6 @@ func_cont:
}
- DBC_ENSURE((!status && *target_obj)
- || (status && *target_obj == NULL));
-
return status;
}
@@ -251,8 +234,6 @@ void rmm_delete(struct rmm_target_obj *target)
struct rmm_header *next;
u32 i;
- DBC_REQUIRE(target);
-
kfree(target->seg_tab);
list_for_each_entry_safe(sect, tmp, &target->ovly_list, list_elem) {
@@ -277,18 +258,6 @@ void rmm_delete(struct rmm_target_obj *target)
}
/*
- * ======== rmm_exit ========
- */
-void rmm_exit(void)
-{
- DBC_REQUIRE(refs > 0);
-
- refs--;
-
- DBC_ENSURE(refs >= 0);
-}
-
-/*
* ======== rmm_free ========
*/
bool rmm_free(struct rmm_target_obj *target, u32 segid, u32 dsp_addr, u32 size,
@@ -297,15 +266,6 @@ bool rmm_free(struct rmm_target_obj *target, u32 segid, u32 dsp_addr, u32 size,
struct rmm_ovly_sect *sect, *tmp;
bool ret = false;
- DBC_REQUIRE(target);
-
- DBC_REQUIRE(reserved || segid < target->num_segs);
- DBC_REQUIRE(reserved || (dsp_addr >= target->seg_tab[segid].base &&
- (dsp_addr + size) <= (target->seg_tab[segid].
- base +
- target->seg_tab[segid].
- length)));
-
/*
* Free or unreserve memory.
*/
@@ -319,7 +279,6 @@ bool rmm_free(struct rmm_target_obj *target, u32 segid, u32 dsp_addr, u32 size,
list_for_each_entry_safe(sect, tmp, &target->ovly_list,
list_elem) {
if (dsp_addr == sect->addr) {
- DBC_ASSERT(size == sect->size);
/* Remove from list */
list_del(&sect->list_elem);
kfree(sect);
@@ -331,18 +290,6 @@ bool rmm_free(struct rmm_target_obj *target, u32 segid, u32 dsp_addr, u32 size,
}
/*
- * ======== rmm_init ========
- */
-bool rmm_init(void)
-{
- DBC_REQUIRE(refs >= 0);
-
- refs++;
-
- return true;
-}
-
-/*
* ======== rmm_stat ========
*/
bool rmm_stat(struct rmm_target_obj *target, enum dsp_memtype segid,
@@ -354,9 +301,6 @@ bool rmm_stat(struct rmm_target_obj *target, enum dsp_memtype segid,
u32 total_free_size = 0;
u32 free_blocks = 0;
- DBC_REQUIRE(mem_stat_buf != NULL);
- DBC_ASSERT(target != NULL);
-
if ((u32) segid < target->num_segs) {
head = target->free_list[segid];
diff --git a/drivers/staging/tidspbridge/rmgr/strm.c b/drivers/staging/tidspbridge/rmgr/strm.c
index 3fae0e9f511e..34cc934e0c3d 100644
--- a/drivers/staging/tidspbridge/rmgr/strm.c
+++ b/drivers/staging/tidspbridge/rmgr/strm.c
@@ -24,9 +24,6 @@
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
-/* ----------------------------------- Trace & Debug */
-#include <dspbridge/dbc.h>
-
/* ----------------------------------- OS Adaptation Layer */
#include <dspbridge/sync.h>
@@ -84,9 +81,6 @@ struct strm_object {
struct cmm_xlatorobject *xlator;
};
-/* ----------------------------------- Globals */
-static u32 refs; /* module reference count */
-
/* ----------------------------------- Function Prototypes */
static int delete_strm(struct strm_object *stream_obj);
@@ -104,9 +98,6 @@ int strm_allocate_buffer(struct strm_res_object *strmres, u32 usize,
u32 i;
struct strm_object *stream_obj = strmres->stream;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(ap_buffer != NULL);
-
if (stream_obj) {
/*
* Allocate from segment specified at time of stream open.
@@ -122,7 +113,6 @@ int strm_allocate_buffer(struct strm_res_object *strmres, u32 usize,
goto func_end;
for (i = 0; i < num_bufs; i++) {
- DBC_ASSERT(stream_obj->xlator != NULL);
(void)cmm_xlator_alloc_buf(stream_obj->xlator, &ap_buffer[i],
usize);
if (ap_buffer[i] == NULL) {
@@ -156,8 +146,6 @@ int strm_close(struct strm_res_object *strmres,
int status = 0;
struct strm_object *stream_obj = strmres->stream;
- DBC_REQUIRE(refs > 0);
-
if (!stream_obj) {
status = -EFAULT;
} else {
@@ -167,7 +155,6 @@ int strm_close(struct strm_res_object *strmres,
status =
(*intf_fxns->chnl_get_info) (stream_obj->chnl_obj,
&chnl_info_obj);
- DBC_ASSERT(!status);
if (chnl_info_obj.cio_cs > 0 || chnl_info_obj.cio_reqs > 0)
status = -EPIPE;
@@ -180,9 +167,6 @@ int strm_close(struct strm_res_object *strmres,
idr_remove(pr_ctxt->stream_id, strmres->id);
func_end:
- DBC_ENSURE(status == 0 || status == -EFAULT ||
- status == -EPIPE || status == -EPERM);
-
dev_dbg(bridge, "%s: stream_obj: %p, status 0x%x\n", __func__,
stream_obj, status);
return status;
@@ -199,10 +183,6 @@ int strm_create(struct strm_mgr **strm_man,
struct strm_mgr *strm_mgr_obj;
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(strm_man != NULL);
- DBC_REQUIRE(dev_obj != NULL);
-
*strm_man = NULL;
/* Allocate STRM manager object */
strm_mgr_obj = kzalloc(sizeof(struct strm_mgr), GFP_KERNEL);
@@ -217,7 +197,6 @@ int strm_create(struct strm_mgr **strm_man,
if (!status) {
(void)dev_get_intf_fxns(dev_obj,
&(strm_mgr_obj->intf_fxns));
- DBC_ASSERT(strm_mgr_obj->intf_fxns != NULL);
}
}
@@ -226,8 +205,6 @@ int strm_create(struct strm_mgr **strm_man,
else
kfree(strm_mgr_obj);
- DBC_ENSURE((!status && *strm_man) || (status && *strm_man == NULL));
-
return status;
}
@@ -238,27 +215,10 @@ int strm_create(struct strm_mgr **strm_man,
*/
void strm_delete(struct strm_mgr *strm_mgr_obj)
{
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(strm_mgr_obj);
-
kfree(strm_mgr_obj);
}
/*
- * ======== strm_exit ========
- * Purpose:
- * Discontinue usage of STRM module.
- */
-void strm_exit(void)
-{
- DBC_REQUIRE(refs > 0);
-
- refs--;
-
- DBC_ENSURE(refs >= 0);
-}
-
-/*
* ======== strm_free_buffer ========
* Purpose:
* Frees the buffers allocated for a stream.
@@ -270,15 +230,11 @@ int strm_free_buffer(struct strm_res_object *strmres, u8 ** ap_buffer,
u32 i = 0;
struct strm_object *stream_obj = strmres->stream;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(ap_buffer != NULL);
-
if (!stream_obj)
status = -EFAULT;
if (!status) {
for (i = 0; i < num_bufs; i++) {
- DBC_ASSERT(stream_obj->xlator != NULL);
status =
cmm_xlator_free_buf(stream_obj->xlator,
ap_buffer[i]);
@@ -306,10 +262,6 @@ int strm_get_info(struct strm_object *stream_obj,
int status = 0;
void *virt_base = NULL; /* NULL if no SM used */
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(stream_info != NULL);
- DBC_REQUIRE(stream_info_size >= sizeof(struct stream_info));
-
if (!stream_obj) {
status = -EFAULT;
} else {
@@ -330,7 +282,6 @@ int strm_get_info(struct strm_object *stream_obj,
if (stream_obj->xlator) {
/* We have a translator */
- DBC_ASSERT(stream_obj->segment_id > 0);
cmm_xlator_info(stream_obj->xlator, (u8 **) &virt_base, 0,
stream_obj->segment_id, false);
}
@@ -370,8 +321,6 @@ int strm_idle(struct strm_object *stream_obj, bool flush_data)
struct bridge_drv_interface *intf_fxns;
int status = 0;
- DBC_REQUIRE(refs > 0);
-
if (!stream_obj) {
status = -EFAULT;
} else {
@@ -388,25 +337,6 @@ int strm_idle(struct strm_object *stream_obj, bool flush_data)
}
/*
- * ======== strm_init ========
- * Purpose:
- * Initialize the STRM module.
- */
-bool strm_init(void)
-{
- bool ret = true;
-
- DBC_REQUIRE(refs >= 0);
-
- if (ret)
- refs++;
-
- DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
-
- return ret;
-}
-
-/*
* ======== strm_issue ========
* Purpose:
* Issues a buffer on a stream
@@ -418,9 +348,6 @@ int strm_issue(struct strm_object *stream_obj, u8 *pbuf, u32 ul_bytes,
int status = 0;
void *tmp_buf = NULL;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(pbuf != NULL);
-
if (!stream_obj) {
status = -EFAULT;
} else {
@@ -471,9 +398,6 @@ int strm_open(struct node_object *hnode, u32 dir, u32 index,
void *stream_res;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(strmres != NULL);
- DBC_REQUIRE(pattr != NULL);
*strmres = NULL;
if (dir != DSP_TONODE && dir != DSP_FROMNODE) {
status = -EPERM;
@@ -536,14 +460,12 @@ int strm_open(struct node_object *hnode, u32 dir, u32 index,
goto func_cont;
/* No System DMA */
- DBC_ASSERT(strm_obj->strm_mode != STRMMODE_LDMA);
/* Get the shared mem mgr for this streams dev object */
status = dev_get_cmm_mgr(strm_mgr_obj->dev_obj, &hcmm_mgr);
if (!status) {
/*Allocate a SM addr translator for this strm. */
status = cmm_xlator_create(&strm_obj->xlator, hcmm_mgr, NULL);
if (!status) {
- DBC_ASSERT(strm_obj->segment_id > 0);
/* Set translators Virt Addr attributes */
status = cmm_xlator_info(strm_obj->xlator,
(u8 **) &pattr->virt_base,
@@ -575,10 +497,6 @@ func_cont:
* strm_mgr_obj->chnl_mgr better be valid or we
* assert here), and then return -EPERM.
*/
- DBC_ASSERT(status == -ENOSR ||
- status == -ECHRNG ||
- status == -EALREADY ||
- status == -EIO);
status = -EPERM;
}
}
@@ -594,12 +512,6 @@ func_cont:
(void)delete_strm(strm_obj);
}
- /* ensure we return a documented error code */
- DBC_ENSURE((!status && strm_obj) ||
- (*strmres == NULL && (status == -EFAULT ||
- status == -EPERM
- || status == -EINVAL)));
-
dev_dbg(bridge, "%s: hnode: %p dir: 0x%x index: 0x%x pattr: %p "
"strmres: %p status: 0x%x\n", __func__,
hnode, dir, index, pattr, strmres, status);
@@ -619,11 +531,6 @@ int strm_reclaim(struct strm_object *stream_obj, u8 ** buf_ptr,
int status = 0;
void *tmp_buf = NULL;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(buf_ptr != NULL);
- DBC_REQUIRE(nbytes != NULL);
- DBC_REQUIRE(pdw_arg != NULL);
-
if (!stream_obj) {
status = -EFAULT;
goto func_end;
@@ -679,11 +586,6 @@ int strm_reclaim(struct strm_object *stream_obj, u8 ** buf_ptr,
*buf_ptr = chnl_ioc_obj.buf;
}
func_end:
- /* ensure we return a documented return code */
- DBC_ENSURE(!status || status == -EFAULT ||
- status == -ETIME || status == -ESRCH ||
- status == -EPERM);
-
dev_dbg(bridge, "%s: stream_obj: %p buf_ptr: %p nbytes: %p "
"pdw_arg: %p status 0x%x\n", __func__, stream_obj,
buf_ptr, nbytes, pdw_arg, status);
@@ -702,9 +604,6 @@ int strm_register_notify(struct strm_object *stream_obj, u32 event_mask,
struct bridge_drv_interface *intf_fxns;
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(hnotification != NULL);
-
if (!stream_obj) {
status = -EFAULT;
} else if ((event_mask & ~((DSP_STREAMIOCOMPLETION) |
@@ -725,10 +624,7 @@ int strm_register_notify(struct strm_object *stream_obj, u32 event_mask,
notify_type,
hnotification);
}
- /* ensure we return a documented return code */
- DBC_ENSURE(!status || status == -EFAULT ||
- status == -ETIME || status == -ESRCH ||
- status == -ENOSYS || status == -EPERM);
+
return status;
}
@@ -747,11 +643,6 @@ int strm_select(struct strm_object **strm_tab, u32 strms,
u32 i;
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(strm_tab != NULL);
- DBC_REQUIRE(pmask != NULL);
- DBC_REQUIRE(strms > 0);
-
*pmask = 0;
for (i = 0; i < strms; i++) {
if (!strm_tab[i]) {
@@ -811,9 +702,6 @@ int strm_select(struct strm_object **strm_tab, u32 strms,
func_end:
kfree(sync_events);
- DBC_ENSURE((!status && (*pmask != 0 || utimeout == 0)) ||
- (status && *pmask == 0));
-
return status;
}
diff --git a/drivers/staging/usbip/stub.h b/drivers/staging/usbip/stub.h
index d4073684eacd..a73e437ec215 100644
--- a/drivers/staging/usbip/stub.h
+++ b/drivers/staging/usbip/stub.h
@@ -35,7 +35,6 @@
struct stub_device {
struct usb_interface *interface;
struct usb_device *udev;
- struct list_head list;
struct usbip_device ud;
__u32 devid;
diff --git a/drivers/staging/usbip/stub_dev.c b/drivers/staging/usbip/stub_dev.c
index 03420e25d9c6..fa870e3f7f6a 100644
--- a/drivers/staging/usbip/stub_dev.c
+++ b/drivers/staging/usbip/stub_dev.c
@@ -297,7 +297,6 @@ static struct stub_device *stub_device_alloc(struct usb_device *udev,
sdev->devid = (busnum << 16) | devnum;
sdev->ud.side = USBIP_STUB;
sdev->ud.status = SDEV_ST_AVAILABLE;
- /* sdev->ud.lock = SPIN_LOCK_UNLOCKED; */
spin_lock_init(&sdev->ud.lock);
sdev->ud.tcp_socket = NULL;
@@ -306,7 +305,6 @@ static struct stub_device *stub_device_alloc(struct usb_device *udev,
INIT_LIST_HEAD(&sdev->priv_free);
INIT_LIST_HEAD(&sdev->unlink_free);
INIT_LIST_HEAD(&sdev->unlink_tx);
- /* sdev->priv_lock = SPIN_LOCK_UNLOCKED; */
spin_lock_init(&sdev->priv_lock);
init_waitqueue_head(&sdev->tx_waitq);
diff --git a/drivers/staging/usbip/stub_rx.c b/drivers/staging/usbip/stub_rx.c
index 27ac363d1cfa..1d5b3fc62160 100644
--- a/drivers/staging/usbip/stub_rx.c
+++ b/drivers/staging/usbip/stub_rx.c
@@ -367,15 +367,6 @@ static int get_pipe(struct stub_device *sdev, int epnum, int dir)
}
epd = &ep->desc;
-#if 0
- /* epnum 0 is always control */
- if (epnum == 0) {
- if (dir == USBIP_DIR_OUT)
- return usb_sndctrlpipe(udev, 0);
- else
- return usb_rcvctrlpipe(udev, 0);
- }
-#endif
if (usb_endpoint_xfer_control(epd)) {
if (dir == USBIP_DIR_OUT)
return usb_sndctrlpipe(udev, epnum);
diff --git a/drivers/staging/usbip/usbip_common.c b/drivers/staging/usbip/usbip_common.c
index d93e7f1f7973..70f230269329 100644
--- a/drivers/staging/usbip/usbip_common.c
+++ b/drivers/staging/usbip/usbip_common.c
@@ -735,26 +735,25 @@ EXPORT_SYMBOL_GPL(usbip_recv_iso);
* buffer and iso packets need to be stored and be in propeper endian in urb
* before calling this function
*/
-int usbip_pad_iso(struct usbip_device *ud, struct urb *urb)
+void usbip_pad_iso(struct usbip_device *ud, struct urb *urb)
{
int np = urb->number_of_packets;
int i;
- int ret;
int actualoffset = urb->actual_length;
if (!usb_pipeisoc(urb->pipe))
- return 0;
+ return;
/* if no packets or length of data is 0, then nothing to unpack */
if (np == 0 || urb->actual_length == 0)
- return 0;
+ return;
/*
* if actual_length is transfer_buffer_length then no padding is
* present.
*/
if (urb->actual_length == urb->transfer_buffer_length)
- return 0;
+ return;
/*
* loop over all packets from last to first (to prevent overwritting
@@ -766,8 +765,6 @@ int usbip_pad_iso(struct usbip_device *ud, struct urb *urb)
urb->transfer_buffer + actualoffset,
urb->iso_frame_desc[i].actual_length);
}
-
- return ret;
}
EXPORT_SYMBOL_GPL(usbip_pad_iso);
diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
index b8f8c48b8a72..c7b888ca54f5 100644
--- a/drivers/staging/usbip/usbip_common.h
+++ b/drivers/staging/usbip/usbip_common.h
@@ -306,7 +306,7 @@ void usbip_header_correct_endian(struct usbip_header *pdu, int send);
void *usbip_alloc_iso_desc_pdu(struct urb *urb, ssize_t *bufflen);
/* some members of urb must be substituted before. */
int usbip_recv_iso(struct usbip_device *ud, struct urb *urb);
-int usbip_pad_iso(struct usbip_device *ud, struct urb *urb);
+void usbip_pad_iso(struct usbip_device *ud, struct urb *urb);
int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb);
/* usbip_event.c */
diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
index 2ee97e2095b0..dca9bf11f0c2 100644
--- a/drivers/staging/usbip/vhci_hcd.c
+++ b/drivers/staging/usbip/vhci_hcd.c
@@ -386,29 +386,6 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
dum->port_status[rhport] |=
USB_PORT_STAT_ENABLE;
}
-#if 0
- if (dum->driver) {
- dum->port_status[rhport] |=
- USB_PORT_STAT_ENABLE;
- /* give it the best speed we agree on */
- dum->gadget.speed = dum->driver->speed;
- dum->gadget.ep0->maxpacket = 64;
- switch (dum->gadget.speed) {
- case USB_SPEED_HIGH:
- dum->port_status[rhport] |=
- USB_PORT_STAT_HIGH_SPEED;
- break;
- case USB_SPEED_LOW:
- dum->gadget.ep0->maxpacket = 8;
- dum->port_status[rhport] |=
- USB_PORT_STAT_LOW_SPEED;
- break;
- default:
- dum->gadget.speed = USB_SPEED_FULL;
- break;
- }
- }
-#endif
}
((u16 *) buf)[0] = cpu_to_le16(dum->port_status[rhport]);
((u16 *) buf)[1] = cpu_to_le16(dum->port_status[rhport] >> 16);
@@ -425,15 +402,6 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
case USB_PORT_FEAT_SUSPEND:
usbip_dbg_vhci_rh(" SetPortFeature: "
"USB_PORT_FEAT_SUSPEND\n");
-#if 0
- dum->port_status[rhport] |=
- (1 << USB_PORT_FEAT_SUSPEND);
- if (dum->driver->suspend) {
- spin_unlock(&dum->lock);
- dum->driver->suspend(&dum->gadget);
- spin_lock(&dum->lock);
- }
-#endif
break;
case USB_PORT_FEAT_RESET:
usbip_dbg_vhci_rh(" SetPortFeature: "
@@ -444,13 +412,6 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
~(USB_PORT_STAT_ENABLE |
USB_PORT_STAT_LOW_SPEED |
USB_PORT_STAT_HIGH_SPEED);
-#if 0
- if (dum->driver) {
- dev_dbg(hardware, "disconnect\n");
- stop_activity(dum, dum->driver);
- }
-#endif
-
/* FIXME test that code path! */
}
/* 50msec reset signaling */
@@ -934,14 +895,12 @@ static void vhci_device_init(struct vhci_device *vdev)
vdev->ud.side = USBIP_VHCI;
vdev->ud.status = VDEV_ST_NULL;
- /* vdev->ud.lock = SPIN_LOCK_UNLOCKED; */
spin_lock_init(&vdev->ud.lock);
INIT_LIST_HEAD(&vdev->priv_rx);
INIT_LIST_HEAD(&vdev->priv_tx);
INIT_LIST_HEAD(&vdev->unlink_tx);
INIT_LIST_HEAD(&vdev->unlink_rx);
- /* vdev->priv_lock = SPIN_LOCK_UNLOCKED; */
spin_lock_init(&vdev->priv_lock);
init_waitqueue_head(&vdev->waitq_tx);
diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
index 3f511b47563d..f5fba7320c5a 100644
--- a/drivers/staging/usbip/vhci_rx.c
+++ b/drivers/staging/usbip/vhci_rx.c
@@ -94,8 +94,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
return;
/* restore the padding in iso packets */
- if (usbip_pad_iso(ud, urb) < 0)
- return;
+ usbip_pad_iso(ud, urb);
if (usbip_dbg_flag_vhci_rx)
usbip_dump_urb(urb);
diff --git a/drivers/staging/vme/devices/vme_pio2.h b/drivers/staging/vme/devices/vme_pio2.h
index 3c5931364535..72d9ce0bcb45 100644
--- a/drivers/staging/vme/devices/vme_pio2.h
+++ b/drivers/staging/vme/devices/vme_pio2.h
@@ -243,7 +243,7 @@ struct pio2_card {
int pio2_cntr_reset(struct pio2_card *);
int pio2_gpio_reset(struct pio2_card *);
-int __init pio2_gpio_init(struct pio2_card *);
-void __exit pio2_gpio_exit(struct pio2_card *);
+int __devinit pio2_gpio_init(struct pio2_card *);
+void pio2_gpio_exit(struct pio2_card *);
#endif /* _VME_PIO2_H_ */
diff --git a/drivers/staging/vme/devices/vme_pio2_gpio.c b/drivers/staging/vme/devices/vme_pio2_gpio.c
index dc837deb99dd..858484915f08 100644
--- a/drivers/staging/vme/devices/vme_pio2_gpio.c
+++ b/drivers/staging/vme/devices/vme_pio2_gpio.c
@@ -187,7 +187,7 @@ int pio2_gpio_reset(struct pio2_card *card)
return 0;
}
-int __init pio2_gpio_init(struct pio2_card *card)
+int __devinit pio2_gpio_init(struct pio2_card *card)
{
int retval = 0;
char *label;
@@ -220,7 +220,7 @@ int __init pio2_gpio_init(struct pio2_card *card)
return retval;
};
-void __exit pio2_gpio_exit(struct pio2_card *card)
+void pio2_gpio_exit(struct pio2_card *card)
{
const char *label = card->gc.label;
diff --git a/drivers/staging/vme/vme.h b/drivers/staging/vme/vme.h
index 9d38ceed60e2..c9d65bf14cec 100644
--- a/drivers/staging/vme/vme.h
+++ b/drivers/staging/vme/vme.h
@@ -156,7 +156,7 @@ int vme_irq_request(struct vme_dev *, int, int,
void vme_irq_free(struct vme_dev *, int, int);
int vme_irq_generate(struct vme_dev *, int, int);
-struct vme_resource * vme_lm_request(struct vme_dev *);
+struct vme_resource *vme_lm_request(struct vme_dev *);
int vme_lm_count(struct vme_resource *);
int vme_lm_set(struct vme_resource *, unsigned long long, u32, u32);
int vme_lm_get(struct vme_resource *, unsigned long long *, u32 *, u32 *);
diff --git a/drivers/staging/vt6655/bssdb.c b/drivers/staging/vt6655/bssdb.c
index 577599ed70ad..1368e8cc9add 100644
--- a/drivers/staging/vt6655/bssdb.c
+++ b/drivers/staging/vt6655/bssdb.c
@@ -1327,13 +1327,13 @@ start:
}
if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) {
- // if adhoc started which essid is NULL string, rescaning.
+ // if adhoc started which essid is NULL string, rescanning.
if ((pMgmt->eCurrState == WMAC_STATE_STARTED) && (pCurrSSID->len == 0)) {
if (pDevice->uAutoReConnectTime < 10) {
pDevice->uAutoReConnectTime++;
}
else {
- DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "Adhoc re-scaning ...\n");
+ DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "Adhoc re-scanning ...\n");
pMgmt->eScanType = WMAC_SCAN_ACTIVE;
bScheduleCommand((void *) pDevice, WLAN_CMD_BSSID_SCAN, NULL);
bScheduleCommand((void *) pDevice, WLAN_CMD_SSID, NULL);
diff --git a/drivers/staging/vt6655/ioctl.c b/drivers/staging/vt6655/ioctl.c
index 7fd5cc5a55f6..ef197efab049 100644
--- a/drivers/staging/vt6655/ioctl.c
+++ b/drivers/staging/vt6655/ioctl.c
@@ -324,16 +324,16 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq)
pItemSSID = (PWLAN_IE_SSID)pBSS->abySSID;
memset(pList->sBSSIDList[ii].abySSID, 0, WLAN_SSID_MAXLEN + 1);
memcpy(pList->sBSSIDList[ii].abySSID, pItemSSID->abySSID, pItemSSID->len);
- if (WLAN_GET_CAP_INFO_ESS(pBSS->wCapInfo)) {
+ if (WLAN_GET_CAP_INFO_ESS(pBSS->wCapInfo))
pList->sBSSIDList[ii].byNetType = INFRA;
- } else {
+ else
pList->sBSSIDList[ii].byNetType = ADHOC;
- }
- if (WLAN_GET_CAP_INFO_PRIVACY(pBSS->wCapInfo)) {
+
+ if (WLAN_GET_CAP_INFO_PRIVACY(pBSS->wCapInfo))
pList->sBSSIDList[ii].bWEPOn = true;
- } else {
+ else
pList->sBSSIDList[ii].bWEPOn = false;
- }
+
ii++;
if (ii >= pList->uItem)
break;
@@ -367,9 +367,9 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq)
netif_stop_queue(pDevice->dev);
spin_lock_irq(&pDevice->lock);
- if (pDevice->bRadioOff == false) {
+ if (pDevice->bRadioOff == false)
CARDbRadioPowerOff(pDevice);
- }
+
pDevice->bLinkPass = false;
memset(pMgmt->abyCurrBSSID, 0, 6);
pMgmt->eCurrState = WMAC_STATE_IDLE;
@@ -489,13 +489,12 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq)
break;
}
- if (sStartAPCmd.wBBPType == PHY80211g) {
+ if (sStartAPCmd.wBBPType == PHY80211g)
pMgmt->byAPBBType = PHY_TYPE_11G;
- } else if (sStartAPCmd.wBBPType == PHY80211a) {
+ else if (sStartAPCmd.wBBPType == PHY80211a)
pMgmt->byAPBBType = PHY_TYPE_11A;
- } else {
+ else
pMgmt->byAPBBType = PHY_TYPE_11B;
- }
pItemSSID = (PWLAN_IE_SSID)sStartAPCmd.ssid;
if (pItemSSID->len > WLAN_SSID_MAXLEN + 1)
diff --git a/drivers/staging/vt6656/bssdb.c b/drivers/staging/vt6656/bssdb.c
index 32c67ed8435a..619c257e8773 100644
--- a/drivers/staging/vt6656/bssdb.c
+++ b/drivers/staging/vt6656/bssdb.c
@@ -1195,13 +1195,13 @@ else {
}
if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) {
- // if adhoc started which essid is NULL string, rescaning.
+ // if adhoc started which essid is NULL string, rescanning.
if ((pMgmt->eCurrState == WMAC_STATE_STARTED) && (pCurrSSID->len == 0)) {
if (pDevice->uAutoReConnectTime < 10) {
pDevice->uAutoReConnectTime++;
}
else {
- DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "Adhoc re-scaning ...\n");
+ DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "Adhoc re-scanning ...\n");
pMgmt->eScanType = WMAC_SCAN_ACTIVE;
bScheduleCommand((void *) pDevice, WLAN_CMD_BSSID_SCAN, NULL);
bScheduleCommand((void *) pDevice, WLAN_CMD_SSID, NULL);
diff --git a/drivers/staging/vt6656/iwctl.c b/drivers/staging/vt6656/iwctl.c
index ecfda5272fa1..b24e5314a6af 100644
--- a/drivers/staging/vt6656/iwctl.c
+++ b/drivers/staging/vt6656/iwctl.c
@@ -46,9 +46,6 @@
#include <net/iw_handler.h>
-
-/*--------------------- Static Definitions -------------------------*/
-
#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
#define SUPPORTED_WIRELESS_EXT 18
#else
@@ -63,19 +60,8 @@ static const long frequency_list[] = {
5700, 5745, 5765, 5785, 5805, 5825
};
-
-/*--------------------- Static Classes ----------------------------*/
-
-
-//static int msglevel =MSG_LEVEL_DEBUG;
static int msglevel =MSG_LEVEL_INFO;
-
-/*--------------------- Static Variables --------------------------*/
-/*--------------------- Static Functions --------------------------*/
-
-/*--------------------- Export Variables --------------------------*/
-
struct iw_statistics *iwctl_get_wireless_stats(struct net_device *dev)
{
PSDevice pDevice = netdev_priv(dev);
@@ -87,7 +73,6 @@ struct iw_statistics *iwctl_get_wireless_stats(struct net_device *dev)
pDevice->wstats.qual.qual =(BYTE) pDevice->scStatistic.LinkQuality;
RFvRSSITodBm(pDevice, (BYTE)(pDevice->uCurrRSSI), &ldBm);
pDevice->wstats.qual.level = ldBm;
- //pDevice->wstats.qual.level = 0x100 - pDevice->uCurrRSSI;
pDevice->wstats.qual.noise = 0;
pDevice->wstats.qual.updated = 1;
pDevice->wstats.discard.nwid = 0;
@@ -100,21 +85,6 @@ struct iw_statistics *iwctl_get_wireless_stats(struct net_device *dev)
return &pDevice->wstats;
}
-
-
-/*------------------------------------------------------------------*/
-
-
-static int iwctl_commit(struct net_device *dev,
- struct iw_request_info *info,
- void *wrq,
- char *extra)
-{
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWCOMMIT\n");
-
- return 0;
-}
-
/*
* Wireless Handler : get protocol name
*/
@@ -197,14 +167,12 @@ if(pDevice->byReAssocCount > 0) { //reject scan when re-associating!
}
pMgmt->eScanType = WMAC_SCAN_PASSIVE;
- //printk("SIOCSIWSCAN:WLAN_CMD_BSSID_SCAN\n");
bScheduleCommand((void *) pDevice, WLAN_CMD_BSSID_SCAN, NULL);
spin_unlock_irq(&pDevice->lock);
return 0;
}
-
/*
* Wireless Handler : get scan results
*/
@@ -503,7 +471,7 @@ int iwctl_siwmode(struct net_device *dev,
* Wireless Handler : get operation mode
*/
-int iwctl_giwmode(struct net_device *dev,
+void iwctl_giwmode(struct net_device *dev,
struct iw_request_info *info,
__u32 *wmode,
char *extra)
@@ -530,8 +498,6 @@ int iwctl_giwmode(struct net_device *dev,
default:
*wmode = IW_MODE_ADHOC;
}
-
- return 0;
}
@@ -539,7 +505,7 @@ int iwctl_giwmode(struct net_device *dev,
* Wireless Handler : get capability range
*/
-int iwctl_giwrange(struct net_device *dev,
+void iwctl_giwrange(struct net_device *dev,
struct iw_request_info *info,
struct iw_point *wrq,
char *extra)
@@ -634,9 +600,6 @@ int iwctl_giwrange(struct net_device *dev,
range->avg_qual.level = 176; // -80 dBm
range->avg_qual.noise = 0;
}
-
-
- return 0;
}
@@ -708,9 +671,7 @@ int iwctl_giwap(struct net_device *dev,
memcpy(wrq->sa_data, pMgmt->abyCurrBSSID, 6);
-//20080123-02,<Modify> by Einsn Liu
if ((pDevice->bLinkPass == FALSE) && (pMgmt->eCurrMode != WMAC_MODE_ESS_AP))
- // if ((pDevice->bLinkPass == FALSE) && (pMgmt->eCurrMode == WMAC_MODE_ESS_STA))
memset(wrq->sa_data, 0, 6);
if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
@@ -895,8 +856,7 @@ int iwctl_siwessid(struct net_device *dev,
/*
* Wireless Handler : get essid
*/
-
-int iwctl_giwessid(struct net_device *dev,
+void iwctl_giwessid(struct net_device *dev,
struct iw_request_info *info,
struct iw_point *wrq,
char *extra)
@@ -913,14 +873,11 @@ int iwctl_giwessid(struct net_device *dev,
// Get the current SSID
pItemSSID = (PWLAN_IE_SSID)pMgmt->abyCurrSSID;
- //pItemSSID = (PWLAN_IE_SSID)pMgmt->abyDesireSSID;
memcpy(extra, pItemSSID->abySSID , pItemSSID->len);
extra[pItemSSID->len] = '\0';
wrq->length = pItemSSID->len;
wrq->flags = 1; // active
-
- return 0;
}
/*
@@ -1008,8 +965,7 @@ int iwctl_siwrate(struct net_device *dev,
/*
* Wireless Handler : get data rate
*/
-
-int iwctl_giwrate(struct net_device *dev,
+void iwctl_giwrate(struct net_device *dev,
struct iw_request_info *info,
struct iw_param *wrq,
char *extra)
@@ -1047,9 +1003,6 @@ int iwctl_giwrate(struct net_device *dev,
if (pDevice->bFixRate == TRUE)
wrq->fixed = TRUE;
}
-
-
- return 0;
}
@@ -1057,27 +1010,19 @@ int iwctl_giwrate(struct net_device *dev,
/*
* Wireless Handler : set rts threshold
*/
-
int iwctl_siwrts(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq,
- char *extra)
+ struct iw_param *wrq)
{
- PSDevice pDevice = (PSDevice)netdev_priv(dev);
- int rc = 0;
+ PSDevice pDevice = (PSDevice)netdev_priv(dev);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWRTS \n");
+ if ((wrq->value < 0 || wrq->value > 2312) && !wrq->disabled)
+ return -EINVAL;
- {
- int rthr = wrq->value;
- if(wrq->disabled)
- rthr = 2312;
- if((rthr < 0) || (rthr > 2312)) {
- rc = -EINVAL;
- }else {
- pDevice->wRTSThreshold = rthr;
- }
- }
+ else if (wrq->disabled)
+ pDevice->wRTSThreshold = 2312;
+
+ else
+ pDevice->wRTSThreshold = wrq->value;
return 0;
}
@@ -1327,55 +1272,6 @@ int iwctl_siwencode(struct net_device *dev,
return rc;
}
-/*
- * Wireless Handler : get encode mode
- */
-//2008-0409-06, <Mark> by Einsn Liu
- /*
-int iwctl_giwencode(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *wrq,
- char *extra)
-{
- PSDevice pDevice = (PSDevice)netdev_priv(dev);
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- int rc = 0;
- char abyKey[WLAN_WEP232_KEYLEN];
- unsigned int index = (unsigned int)(wrq->flags & IW_ENCODE_INDEX);
- PSKeyItem pKey = NULL;
-
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWENCODE\n");
-
-
- memset(abyKey, 0, sizeof(abyKey));
- // Check encryption mode
- wrq->flags = IW_ENCODE_NOKEY;
- // Is WEP enabled ???
- if (pDevice->bEncryptionEnable)
- wrq->flags |= IW_ENCODE_ENABLED;
- else
- wrq->flags |= IW_ENCODE_DISABLED;
-
- if (pMgmt->bShareKeyAlgorithm)
- wrq->flags |= IW_ENCODE_RESTRICTED;
- else
- wrq->flags |= IW_ENCODE_OPEN;
-
- if (KeybGetKey(&(pDevice->sKey), pDevice->abyBroadcastAddr, (BYTE)index , &pKey)){
- wrq->length = pKey->uKeyLength;
- memcpy(abyKey, pKey->abyKey, pKey->uKeyLength);
- }
- else {
- rc = -EINVAL;
- return rc;
- }
- wrq->flags |= index;
- // Copy the key to the user buffer
- memcpy(extra, abyKey, WLAN_WEP232_KEYLEN);
- return 0;
-}
-*/
-
int iwctl_giwencode(struct net_device *dev,
struct iw_request_info *info,
struct iw_point *wrq,
@@ -1562,7 +1458,6 @@ int iwctl_siwauth(struct net_device *dev,
wpa_version = wrq->value;
if(wrq->value == IW_AUTH_WPA_VERSION_DISABLED) {
PRINT_K("iwctl_siwauth:set WPADEV to disable at 1??????\n");
- //pDevice->bWPADEVUp = FALSE;
}
else if(wrq->value == IW_AUTH_WPA_VERSION_WPA) {
PRINT_K("iwctl_siwauth:set WPADEV to WPA1******\n");
@@ -1570,7 +1465,6 @@ int iwctl_siwauth(struct net_device *dev,
else {
PRINT_K("iwctl_siwauth:set WPADEV to WPA2******\n");
}
- //pDevice->bWPASuppWextEnabled =TRUE;
break;
case IW_AUTH_CIPHER_PAIRWISE:
pairwise = wrq->value;
@@ -1627,11 +1521,6 @@ int iwctl_siwauth(struct net_device *dev,
}
break;
case IW_AUTH_WPA_ENABLED:
- //pDevice->bWPADEVUp = !! wrq->value;
- //if(pDevice->bWPADEVUp==TRUE)
- // printk("iwctl_siwauth:set WPADEV to enable successful*******\n");
- //else
- // printk("iwctl_siwauth:set WPADEV to enable fail?????\n");
break;
case IW_AUTH_RX_UNENCRYPTED_EAPOL:
break;
@@ -1646,7 +1535,6 @@ int iwctl_siwauth(struct net_device *dev,
pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
pMgmt->bShareKeyAlgorithm = FALSE;
pMgmt->eAuthenMode = WMAC_AUTH_OPEN;
- //pDevice->bWPADEVUp = FALSE;
PRINT_K("iwctl_siwauth:set WPADEV to disaable at 2?????\n");
}
@@ -1655,15 +1543,6 @@ int iwctl_siwauth(struct net_device *dev,
ret = -EOPNOTSUPP;
break;
}
-/*
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "wpa_version = %d\n",wpa_version);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pairwise = %d\n",pairwise);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pDevice->eEncryptionStatus = %d\n",pDevice->eEncryptionStatus);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pMgmt->eAuthenMode = %d\n",pMgmt->eAuthenMode);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pMgmt->bShareKeyAlgorithm = %s\n",pMgmt->bShareKeyAlgorithm?"TRUE":"FALSE");
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pDevice->bEncryptionEnable = %s\n",pDevice->bEncryptionEnable?"TRUE":"FALSE");
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pDevice->bWPADEVUp = %s\n",pDevice->bWPADEVUp?"TRUE":"FALSE");
-*/
return ret;
}
@@ -1752,8 +1631,6 @@ int iwctl_siwencodeext(struct net_device *dev,
u8 seq[IW_ENCODE_SEQ_MAX_SIZE];
u8 key[64];
size_t seq_len=0,key_len=0;
-//
- // int ii;
u8 *buf;
size_t blen;
u8 key_array[64];
@@ -1883,7 +1760,6 @@ int iwctl_siwmlme(struct net_device *dev,
PSDevice pDevice = (PSDevice)netdev_priv(dev);
PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
struct iw_mlme *mlme = (struct iw_mlme *)extra;
- //u16 reason = cpu_to_le16(mlme->reason_code);
int ret = 0;
if(memcmp(pMgmt->abyCurrBSSID, mlme->addr.sa_data, ETH_ALEN)){
@@ -1892,12 +1768,6 @@ int iwctl_siwmlme(struct net_device *dev,
}
switch(mlme->cmd){
case IW_MLME_DEAUTH:
- //this command seems to be not complete,please test it --einsnliu
- //printk("iwctl_siwmlme--->send DEAUTH\n");
- /* bScheduleCommand((void *) pDevice,
- WLAN_CMD_DEAUTH,
- (PBYTE)&reason); */
- //break;
case IW_MLME_DISASSOC:
if(pDevice->bLinkPass == TRUE){
PRINT_K("iwctl_siwmlme--->send DISASSOCIATE\n");
@@ -1916,77 +1786,9 @@ int iwctl_siwmlme(struct net_device *dev,
#endif
-/*------------------------------------------------------------------*/
-/*
- * Structures to export the Wireless Handlers
- */
-
-
-/*
-static const iw_handler iwctl_handler[] =
-{
- (iw_handler) iwctl_commit, // SIOCSIWCOMMIT
- (iw_handler) iwctl_giwname, // SIOCGIWNAME
- (iw_handler) NULL, // SIOCSIWNWID
- (iw_handler) iwctl_siwfreq, // SIOCSIWFREQ
- (iw_handler) iwctl_giwfreq, // SIOCGIWFREQ
- (iw_handler) iwctl_siwmode, // SIOCSIWMODE
- (iw_handler) iwctl_giwmode, // SIOCGIWMODE
- (iw_handler) NULL, // SIOCSIWSENS
- (iw_handler) iwctl_giwsens, // SIOCGIWSENS
- (iw_handler) NULL, // SIOCSIWRANGE
- (iw_handler) iwctl_giwrange, // SIOCGIWRANGE
- (iw_handler) NULL, // SIOCSIWPRIV
- (iw_handler) NULL, // SIOCGIWPRIV
- (iw_handler) NULL, // SIOCSIWSTATS
- (iw_handler) NULL, // SIOCGIWSTATS
- (iw_handler) NULL, // SIOCSIWSPY
- (iw_handler) NULL, // SIOCGIWSPY
- (iw_handler) NULL, // -- hole --
- (iw_handler) NULL, // -- hole --
- (iw_handler) iwctl_siwap, // SIOCSIWAP
- (iw_handler) iwctl_giwap, // SIOCGIWAP
- (iw_handler) NULL, // -- hole -- 0x16
- (iw_handler) iwctl_giwaplist, // SIOCGIWAPLIST
- (iw_handler) iwctl_siwscan, // SIOCSIWSCAN
- (iw_handler) iwctl_giwscan, // SIOCGIWSCAN
- (iw_handler) iwctl_siwessid, // SIOCSIWESSID
- (iw_handler) iwctl_giwessid, // SIOCGIWESSID
- (iw_handler) NULL, // SIOCSIWNICKN
- (iw_handler) NULL, // SIOCGIWNICKN
- (iw_handler) NULL, // -- hole --
- (iw_handler) NULL, // -- hole --
- (iw_handler) iwctl_siwrate, // SIOCSIWRATE 0x20
- (iw_handler) iwctl_giwrate, // SIOCGIWRATE
- (iw_handler) iwctl_siwrts, // SIOCSIWRTS
- (iw_handler) iwctl_giwrts, // SIOCGIWRTS
- (iw_handler) iwctl_siwfrag, // SIOCSIWFRAG
- (iw_handler) iwctl_giwfrag, // SIOCGIWFRAG
- (iw_handler) NULL, // SIOCSIWTXPOW
- (iw_handler) NULL, // SIOCGIWTXPOW
- (iw_handler) iwctl_siwretry, // SIOCSIWRETRY
- (iw_handler) iwctl_giwretry, // SIOCGIWRETRY
- (iw_handler) iwctl_siwencode, // SIOCSIWENCODE
- (iw_handler) iwctl_giwencode, // SIOCGIWENCODE
- (iw_handler) iwctl_siwpower, // SIOCSIWPOWER
- (iw_handler) iwctl_giwpower, // SIOCGIWPOWER
- (iw_handler) NULL, // -- hole --
- (iw_handler) NULL, // -- hole --
- (iw_handler) iwctl_siwgenie, // SIOCSIWGENIE
- (iw_handler) iwctl_giwgenie, // SIOCGIWGENIE
- (iw_handler) iwctl_siwauth, // SIOCSIWAUTH
- (iw_handler) iwctl_giwauth, // SIOCGIWAUTH
- (iw_handler) iwctl_siwencodeext, // SIOCSIWENCODEEXT
- (iw_handler) iwctl_giwencodeext, // SIOCGIWENCODEEXT
- (iw_handler) NULL, // SIOCSIWPMKSA
- (iw_handler) NULL, // -- hole --
-
-};
-*/
-
static const iw_handler iwctl_handler[] =
{
- (iw_handler) iwctl_commit, // SIOCSIWCOMMIT
+ (iw_handler) NULL, /* SIOCSIWCOMMIT */
(iw_handler) NULL, // SIOCGIWNAME
(iw_handler) NULL, // SIOCSIWNWID
(iw_handler) NULL, // SIOCGIWNWID
@@ -2063,13 +1865,9 @@ const struct iw_handler_def iwctl_handler_def =
{
.get_wireless_stats = &iwctl_get_wireless_stats,
.num_standard = sizeof(iwctl_handler)/sizeof(iw_handler),
-// .num_private = sizeof(iwctl_private_handler)/sizeof(iw_handler),
-// .num_private_args = sizeof(iwctl_private_args)/sizeof(struct iw_priv_args),
.num_private = 0,
.num_private_args = 0,
.standard = (iw_handler *) iwctl_handler,
-// .private = (iw_handler *) iwctl_private_handler,
-// .private_args = (struct iw_priv_args *)iwctl_private_args,
.private = NULL,
.private_args = NULL,
};
diff --git a/drivers/staging/vt6656/iwctl.h b/drivers/staging/vt6656/iwctl.h
index 10a240e65012..0c6e0496779b 100644
--- a/drivers/staging/vt6656/iwctl.h
+++ b/drivers/staging/vt6656/iwctl.h
@@ -46,13 +46,13 @@ int iwctl_siwap(struct net_device *dev,
struct sockaddr *wrq,
char *extra);
-int iwctl_giwrange(struct net_device *dev,
+void iwctl_giwrange(struct net_device *dev,
struct iw_request_info *info,
struct iw_point *wrq,
char *extra);
-int iwctl_giwmode(struct net_device *dev,
+void iwctl_giwmode(struct net_device *dev,
struct iw_request_info *info,
__u32 *wmode,
char *extra);
@@ -97,7 +97,7 @@ int iwctl_siwessid(struct net_device *dev,
struct iw_point *wrq,
char *extra);
-int iwctl_giwessid(struct net_device *dev,
+void iwctl_giwessid(struct net_device *dev,
struct iw_request_info *info,
struct iw_point *wrq,
char *extra);
@@ -107,16 +107,13 @@ int iwctl_siwrate(struct net_device *dev,
struct iw_param *wrq,
char *extra);
-int iwctl_giwrate(struct net_device *dev,
+void iwctl_giwrate(struct net_device *dev,
struct iw_request_info *info,
struct iw_param *wrq,
char *extra);
int iwctl_siwrts(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq,
- char *extra);
-
+ struct iw_param *wrq);
int iwctl_giwrts(struct net_device *dev,
struct iw_request_info *info,
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index 6a708f447651..763e028a5cc5 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -1657,8 +1657,8 @@ static int device_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) {
{
char essid[IW_ESSID_MAX_SIZE+1];
if (wrq->u.essid.pointer) {
- rc = iwctl_giwessid(dev, NULL,
- &(wrq->u.essid), essid);
+ iwctl_giwessid(dev, NULL,
+ &(wrq->u.essid), essid);
if (copy_to_user(wrq->u.essid.pointer,
essid,
wrq->u.essid.length) )
@@ -1698,14 +1698,13 @@ static int device_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) {
// Get the current bit-rate
case SIOCGIWRATE:
-
- rc = iwctl_giwrate(dev, NULL, &(wrq->u.bitrate), NULL);
+ iwctl_giwrate(dev, NULL, &(wrq->u.bitrate), NULL);
break;
// Set the desired RTS threshold
case SIOCSIWRTS:
- rc = iwctl_siwrts(dev, NULL, &(wrq->u.rts), NULL);
+ rc = iwctl_siwrts(dev, &(wrq->u.rts));
break;
// Get the current RTS threshold
@@ -1733,7 +1732,7 @@ static int device_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) {
// Get mode of operation
case SIOCGIWMODE:
- rc = iwctl_giwmode(dev, NULL, &(wrq->u.mode), NULL);
+ iwctl_giwmode(dev, NULL, &(wrq->u.mode), NULL);
break;
// Set WEP keys and mode
@@ -1811,7 +1810,7 @@ static int device_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) {
{
struct iw_range range;
- rc = iwctl_giwrange(dev, NULL, &(wrq->u.data), (char *) &range);
+ iwctl_giwrange(dev, NULL, &(wrq->u.data), (char *) &range);
if (copy_to_user(wrq->u.data.pointer, &range, sizeof(struct iw_range)))
rc = -EFAULT;
}
diff --git a/drivers/staging/vt6656/wpactl.c b/drivers/staging/vt6656/wpactl.c
index 2fa4f845a755..5435e8205b2c 100644
--- a/drivers/staging/vt6656/wpactl.c
+++ b/drivers/staging/vt6656/wpactl.c
@@ -46,23 +46,18 @@
#define VIAWGET_WPA_MAX_BUF_SIZE 1024
-
-
static const int frequency_list[] = {
2412, 2417, 2422, 2427, 2432, 2437, 2442,
2447, 2452, 2457, 2462, 2467, 2472, 2484
};
+
/*--------------------- Static Classes ----------------------------*/
/*--------------------- Static Variables --------------------------*/
-//static int msglevel =MSG_LEVEL_DEBUG;
-static int msglevel =MSG_LEVEL_INFO;
+static int msglevel = MSG_LEVEL_INFO;
/*--------------------- Static Functions --------------------------*/
-
-
-
/*--------------------- Export Variables --------------------------*/
static void wpadev_setup(struct net_device *dev)
{
@@ -72,9 +67,9 @@ static void wpadev_setup(struct net_device *dev)
dev->addr_len = ETH_ALEN;
dev->tx_queue_len = 1000;
- memset(dev->broadcast,0xFF, ETH_ALEN);
+ memset(dev->broadcast, 0xFF, ETH_ALEN);
- dev->flags = IFF_BROADCAST|IFF_MULTICAST;
+ dev->flags = IFF_BROADCAST | IFF_MULTICAST;
}
/*
@@ -90,45 +85,43 @@ static void wpadev_setup(struct net_device *dev)
* Return Value:
*
*/
-
static int wpa_init_wpadev(PSDevice pDevice)
{
- PSDevice wpadev_priv;
+ PSDevice wpadev_priv;
struct net_device *dev = pDevice->dev;
- int ret=0;
+ int ret = 0;
pDevice->wpadev = alloc_netdev(sizeof(PSDevice), "vntwpa", wpadev_setup);
if (pDevice->wpadev == NULL)
return -ENOMEM;
- wpadev_priv = netdev_priv(pDevice->wpadev);
- *wpadev_priv = *pDevice;
+ wpadev_priv = netdev_priv(pDevice->wpadev);
+ *wpadev_priv = *pDevice;
memcpy(pDevice->wpadev->dev_addr, dev->dev_addr, ETH_ALEN);
- pDevice->wpadev->base_addr = dev->base_addr;
+ pDevice->wpadev->base_addr = dev->base_addr;
pDevice->wpadev->irq = dev->irq;
pDevice->wpadev->mem_start = dev->mem_start;
pDevice->wpadev->mem_end = dev->mem_end;
ret = register_netdev(pDevice->wpadev);
if (ret) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: register_netdev(WPA) failed!\n",
- dev->name);
+ dev->name);
free_netdev(pDevice->wpadev);
return -1;
}
if (pDevice->skb == NULL) {
- pDevice->skb = dev_alloc_skb((int)pDevice->rx_buf_sz);
- if (pDevice->skb == NULL)
- return -ENOMEM;
- }
+ pDevice->skb = dev_alloc_skb((int)pDevice->rx_buf_sz);
+ if (pDevice->skb == NULL)
+ return -ENOMEM;
+ }
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Registered netdev %s for WPA management\n",
- dev->name, pDevice->wpadev->name);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Registered netdev %s for WPA management\n",
+ dev->name, pDevice->wpadev->name);
return 0;
}
-
/*
* Description:
* unregister net_device (wpadev)
@@ -141,29 +134,24 @@ static int wpa_init_wpadev(PSDevice pDevice)
* Return Value:
*
*/
-
static int wpa_release_wpadev(PSDevice pDevice)
{
- if (pDevice->skb) {
- dev_kfree_skb(pDevice->skb);
- pDevice->skb = NULL;
- }
+ if (pDevice->skb) {
+ dev_kfree_skb(pDevice->skb);
+ pDevice->skb = NULL;
+ }
- if (pDevice->wpadev) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Netdevice %s unregistered\n",
- pDevice->dev->name, pDevice->wpadev->name);
- unregister_netdev(pDevice->wpadev);
- free_netdev(pDevice->wpadev);
- pDevice->wpadev = NULL;
- }
+ if (pDevice->wpadev) {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Netdevice %s unregistered\n",
+ pDevice->dev->name, pDevice->wpadev->name);
+ unregister_netdev(pDevice->wpadev);
+ free_netdev(pDevice->wpadev);
+ pDevice->wpadev = NULL;
+ }
return 0;
}
-
-
-
-
/*
* Description:
* Set enable/disable dev for wpa supplicant deamon
@@ -177,13 +165,11 @@ static int wpa_release_wpadev(PSDevice pDevice)
* Return Value:
*
*/
-
int wpa_set_wpadev(PSDevice pDevice, int val)
{
if (val)
return wpa_init_wpadev(pDevice);
- else
- return wpa_release_wpadev(pDevice);
+ return wpa_release_wpadev(pDevice);
}
/*
@@ -199,245 +185,217 @@ int wpa_set_wpadev(PSDevice pDevice, int val)
* Return Value:
*
*/
-
int wpa_set_keys(PSDevice pDevice, void *ctx, BOOL fcpfkernel)
{
- struct viawget_wpa_param *param=ctx;
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- DWORD dwKeyIndex = 0;
- BYTE abyKey[MAX_KEY_LEN];
- BYTE abySeq[MAX_KEY_LEN];
- QWORD KeyRSC;
-// NDIS_802_11_KEY_RSC KeyRSC;
- BYTE byKeyDecMode = KEY_CTL_WEP;
+ struct viawget_wpa_param *param = ctx;
+ PSMgmtObject pMgmt = &pDevice->sMgmtObj;
+ DWORD dwKeyIndex = 0;
+ BYTE abyKey[MAX_KEY_LEN];
+ BYTE abySeq[MAX_KEY_LEN];
+ QWORD KeyRSC;
+ BYTE byKeyDecMode = KEY_CTL_WEP;
int ret = 0;
- int uu, ii;
-
+ int uu;
+ int ii;
if (param->u.wpa_key.alg_name > WPA_ALG_CCMP)
return -EINVAL;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "param->u.wpa_key.alg_name = %d \n", param->u.wpa_key.alg_name);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "param->u.wpa_key.alg_name = %d \n",
+ param->u.wpa_key.alg_name);
if (param->u.wpa_key.alg_name == WPA_ALG_NONE) {
- pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
- pDevice->bEncryptionEnable = FALSE;
- pDevice->byKeyIndex = 0;
- pDevice->bTransmitKey = FALSE;
- for (uu=0; uu<MAX_KEY_TABLE; uu++) {
- MACvDisableKeyEntry(pDevice, uu);
- }
- return ret;
- }
+ pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
+ pDevice->bEncryptionEnable = FALSE;
+ pDevice->byKeyIndex = 0;
+ pDevice->bTransmitKey = FALSE;
+ for (uu=0; uu<MAX_KEY_TABLE; uu++) {
+ MACvDisableKeyEntry(pDevice, uu);
+ }
+ return ret;
+ }
if (param->u.wpa_key.key && param->u.wpa_key.key_len > sizeof(abyKey))
return -EINVAL;
- spin_unlock_irq(&pDevice->lock);
- if(param->u.wpa_key.key && fcpfkernel) {
- memcpy(&abyKey[0], param->u.wpa_key.key, param->u.wpa_key.key_len);
- }
- else {
- if (param->u.wpa_key.key &&
- copy_from_user(&abyKey[0], param->u.wpa_key.key, param->u.wpa_key.key_len)) {
- spin_lock_irq(&pDevice->lock);
- return -EINVAL;
+ spin_unlock_irq(&pDevice->lock);
+ if (param->u.wpa_key.key && fcpfkernel) {
+ memcpy(&abyKey[0], param->u.wpa_key.key, param->u.wpa_key.key_len);
+ } else {
+ if (param->u.wpa_key.key &&
+ copy_from_user(&abyKey[0], param->u.wpa_key.key,
+ param->u.wpa_key.key_len)) {
+ spin_lock_irq(&pDevice->lock);
+ return -EINVAL;
+ }
}
- }
- spin_lock_irq(&pDevice->lock);
+ spin_lock_irq(&pDevice->lock);
- dwKeyIndex = (DWORD)(param->u.wpa_key.key_index);
+ dwKeyIndex = (DWORD)(param->u.wpa_key.key_index);
if (param->u.wpa_key.alg_name == WPA_ALG_WEP) {
- if (dwKeyIndex > 3) {
- return -EINVAL;
- }
- else {
- if (param->u.wpa_key.set_tx) {
- pDevice->byKeyIndex = (BYTE)dwKeyIndex;
- pDevice->bTransmitKey = TRUE;
- dwKeyIndex |= (1 << 31);
- }
- KeybSetDefaultKey( pDevice,
- &(pDevice->sKey),
- dwKeyIndex & ~(BIT30 | USE_KEYRSC),
- param->u.wpa_key.key_len,
- NULL,
- abyKey,
- KEY_CTL_WEP
- );
-
- }
- pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
- pDevice->bEncryptionEnable = TRUE;
- return ret;
+ if (dwKeyIndex > 3) {
+ return -EINVAL;
+ } else {
+ if (param->u.wpa_key.set_tx) {
+ pDevice->byKeyIndex = (BYTE)dwKeyIndex;
+ pDevice->bTransmitKey = TRUE;
+ dwKeyIndex |= (1 << 31);
+ }
+ KeybSetDefaultKey( pDevice,
+ &(pDevice->sKey),
+ dwKeyIndex & ~(BIT30 | USE_KEYRSC),
+ param->u.wpa_key.key_len,
+ NULL,
+ abyKey,
+ KEY_CTL_WEP
+ );
+
+ }
+ pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
+ pDevice->bEncryptionEnable = TRUE;
+ return ret;
}
if (param->u.wpa_key.seq && param->u.wpa_key.seq_len > sizeof(abySeq))
return -EINVAL;
- spin_unlock_irq(&pDevice->lock);
- if(param->u.wpa_key.seq && fcpfkernel) {
- memcpy(&abySeq[0], param->u.wpa_key.seq, param->u.wpa_key.seq_len);
- }
- else {
- if (param->u.wpa_key.seq &&
- copy_from_user(&abySeq[0], param->u.wpa_key.seq, param->u.wpa_key.seq_len)) {
- spin_lock_irq(&pDevice->lock);
- return -EINVAL;
- }
+ spin_unlock_irq(&pDevice->lock);
+ if (param->u.wpa_key.seq && fcpfkernel) {
+ memcpy(&abySeq[0], param->u.wpa_key.seq, param->u.wpa_key.seq_len);
+ } else {
+ if (param->u.wpa_key.seq &&
+ copy_from_user(&abySeq[0], param->u.wpa_key.seq,
+ param->u.wpa_key.seq_len)) {
+ spin_lock_irq(&pDevice->lock);
+ return -EINVAL;
+ }
}
spin_lock_irq(&pDevice->lock);
if (param->u.wpa_key.seq_len > 0) {
for (ii = 0 ; ii < param->u.wpa_key.seq_len ; ii++) {
- if (ii < 4)
- LODWORD(KeyRSC) |= (abySeq[ii] << (ii * 8));
- else
- HIDWORD(KeyRSC) |= (abySeq[ii] << ((ii-4) * 8));
- //KeyRSC |= (abySeq[ii] << (ii * 8));
+ if (ii < 4)
+ LODWORD(KeyRSC) |= (abySeq[ii] << (ii * 8));
+ else
+ HIDWORD(KeyRSC) |= (abySeq[ii] << ((ii-4) * 8));
}
dwKeyIndex |= 1 << 29;
}
- if (param->u.wpa_key.key_index >= MAX_GROUP_KEY) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "return dwKeyIndex > 3\n");
- return -EINVAL;
- }
+ if (param->u.wpa_key.key_index >= MAX_GROUP_KEY) {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "return dwKeyIndex > 3\n");
+ return -EINVAL;
+ }
if (param->u.wpa_key.alg_name == WPA_ALG_TKIP) {
- pDevice->eEncryptionStatus = Ndis802_11Encryption2Enabled;
- }
+ pDevice->eEncryptionStatus = Ndis802_11Encryption2Enabled;
+ }
if (param->u.wpa_key.alg_name == WPA_ALG_CCMP) {
- pDevice->eEncryptionStatus = Ndis802_11Encryption3Enabled;
- }
+ pDevice->eEncryptionStatus = Ndis802_11Encryption3Enabled;
+ }
if (param->u.wpa_key.set_tx)
dwKeyIndex |= (1 << 31);
- if (pDevice->eEncryptionStatus == Ndis802_11Encryption3Enabled)
- byKeyDecMode = KEY_CTL_CCMP;
- else if (pDevice->eEncryptionStatus == Ndis802_11Encryption2Enabled)
- byKeyDecMode = KEY_CTL_TKIP;
- else
- byKeyDecMode = KEY_CTL_WEP;
-
- // Fix HCT test that set 256 bits KEY and Ndis802_11Encryption3Enabled
- if (pDevice->eEncryptionStatus == Ndis802_11Encryption3Enabled) {
- if (param->u.wpa_key.key_len == MAX_KEY_LEN)
- byKeyDecMode = KEY_CTL_TKIP;
- else if (param->u.wpa_key.key_len == WLAN_WEP40_KEYLEN)
- byKeyDecMode = KEY_CTL_WEP;
- else if (param->u.wpa_key.key_len == WLAN_WEP104_KEYLEN)
- byKeyDecMode = KEY_CTL_WEP;
- } else if (pDevice->eEncryptionStatus == Ndis802_11Encryption2Enabled) {
- if (param->u.wpa_key.key_len == WLAN_WEP40_KEYLEN)
- byKeyDecMode = KEY_CTL_WEP;
- else if (param->u.wpa_key.key_len == WLAN_WEP104_KEYLEN)
- byKeyDecMode = KEY_CTL_WEP;
- }
-
- // Check TKIP key length
- if ((byKeyDecMode == KEY_CTL_TKIP) &&
- (param->u.wpa_key.key_len != MAX_KEY_LEN)) {
- // TKIP Key must be 256 bits
- //DBG_PRN_WLAN03(("return NDIS_STATUS_INVALID_DATA - TKIP Key must be 256 bits\n"));
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "return- TKIP Key must be 256 bits!\n");
- return -EINVAL;
- }
- // Check AES key length
- if ((byKeyDecMode == KEY_CTL_CCMP) &&
- (param->u.wpa_key.key_len != AES_KEY_LEN)) {
- // AES Key must be 128 bits
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "return - AES Key must be 128 bits\n");
- return -EINVAL;
- }
+ if (pDevice->eEncryptionStatus == Ndis802_11Encryption3Enabled)
+ byKeyDecMode = KEY_CTL_CCMP;
+ else if (pDevice->eEncryptionStatus == Ndis802_11Encryption2Enabled)
+ byKeyDecMode = KEY_CTL_TKIP;
+ else
+ byKeyDecMode = KEY_CTL_WEP;
+
+ // Fix HCT test that set 256 bits KEY and Ndis802_11Encryption3Enabled
+ if (pDevice->eEncryptionStatus == Ndis802_11Encryption3Enabled) {
+ if (param->u.wpa_key.key_len == MAX_KEY_LEN)
+ byKeyDecMode = KEY_CTL_TKIP;
+ else if (param->u.wpa_key.key_len == WLAN_WEP40_KEYLEN)
+ byKeyDecMode = KEY_CTL_WEP;
+ else if (param->u.wpa_key.key_len == WLAN_WEP104_KEYLEN)
+ byKeyDecMode = KEY_CTL_WEP;
+ } else if (pDevice->eEncryptionStatus == Ndis802_11Encryption2Enabled) {
+ if (param->u.wpa_key.key_len == WLAN_WEP40_KEYLEN)
+ byKeyDecMode = KEY_CTL_WEP;
+ else if (param->u.wpa_key.key_len == WLAN_WEP104_KEYLEN)
+ byKeyDecMode = KEY_CTL_WEP;
+ }
- if (is_broadcast_ether_addr(&param->addr[0]) || (param->addr == NULL)) {
- /* if broadcast, set the key as every key entry's group key */
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Groupe Key Assign.\n");
-
- if ((KeybSetAllGroupKey(pDevice,
- &(pDevice->sKey),
- dwKeyIndex,
- param->u.wpa_key.key_len,
- (PQWORD) &(KeyRSC),
- (PBYTE)abyKey,
- byKeyDecMode
- ) == TRUE) &&
- (KeybSetDefaultKey(pDevice,
- &(pDevice->sKey),
- dwKeyIndex,
- param->u.wpa_key.key_len,
- (PQWORD) &(KeyRSC),
- (PBYTE)abyKey,
- byKeyDecMode
- ) == TRUE) ) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "GROUP Key Assign.\n");
-
- } else {
- //DBG_PRN_WLAN03(("return NDIS_STATUS_INVALID_DATA -KeybSetDefaultKey Fail.0\n"));
- return -EINVAL;
- }
-
- } else {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Pairwise Key Assign.\n");
- // BSSID not 0xffffffffffff
- // Pairwise Key can't be WEP
- if (byKeyDecMode == KEY_CTL_WEP) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Pairwise Key can't be WEP\n");
- return -EINVAL;
- }
-
- dwKeyIndex |= (1 << 30); // set pairwise key
- if (pMgmt->eConfigMode == WMAC_CONFIG_IBSS_STA) {
- //DBG_PRN_WLAN03(("return NDIS_STATUS_INVALID_DATA - WMAC_CONFIG_IBSS_STA\n"));
- return -EINVAL;
- }
- if (KeybSetKey(pDevice,
- &(pDevice->sKey),
- &param->addr[0],
- dwKeyIndex,
- param->u.wpa_key.key_len,
- (PQWORD) &(KeyRSC),
- (PBYTE)abyKey,
- byKeyDecMode
- ) == TRUE) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Pairwise Key Set\n");
-
- } else {
- // Key Table Full
- if (!compare_ether_addr(&param->addr[0], pDevice->abyBSSID)) {
- //DBG_PRN_WLAN03(("return NDIS_STATUS_INVALID_DATA -Key Table Full.2\n"));
- return -EINVAL;
-
- } else {
- // Save Key and configure just before associate/reassociate to BSSID
- // we do not implement now
- return -EINVAL;
- }
- }
- } // BSSID not 0xffffffffffff
- if ((ret == 0) && ((param->u.wpa_key.set_tx) != 0)) {
- pDevice->byKeyIndex = (BYTE)param->u.wpa_key.key_index;
- pDevice->bTransmitKey = TRUE;
+ // Check TKIP key length
+ if ((byKeyDecMode == KEY_CTL_TKIP) &&
+ (param->u.wpa_key.key_len != MAX_KEY_LEN)) {
+ // TKIP Key must be 256 bits
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "return- TKIP Key must be 256 bits!\n");
+ return -EINVAL;
}
- pDevice->bEncryptionEnable = TRUE;
+ // Check AES key length
+ if ((byKeyDecMode == KEY_CTL_CCMP) &&
+ (param->u.wpa_key.key_len != AES_KEY_LEN)) {
+ // AES Key must be 128 bits
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "return - AES Key must be 128 bits\n");
+ return -EINVAL;
+ }
-/*
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " key=%x-%x-%x-%x-%x-xxxxx \n",
- pMgmt->sNodeDBTable[iNodeIndex].abyWepKey[byKeyIndex][0],
- pMgmt->sNodeDBTable[iNodeIndex].abyWepKey[byKeyIndex][1],
- pMgmt->sNodeDBTable[iNodeIndex].abyWepKey[byKeyIndex][2],
- pMgmt->sNodeDBTable[iNodeIndex].abyWepKey[byKeyIndex][3],
- pMgmt->sNodeDBTable[iNodeIndex].abyWepKey[byKeyIndex][4]
- );
-*/
+ if (is_broadcast_ether_addr(&param->addr[0]) || (param->addr == NULL)) {
+ /* if broadcast, set the key as every key entry's group key */
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Groupe Key Assign.\n");
+
+ if ((KeybSetAllGroupKey(pDevice, &(pDevice->sKey), dwKeyIndex,
+ param->u.wpa_key.key_len,
+ (PQWORD) &(KeyRSC),
+ (PBYTE)abyKey,
+ byKeyDecMode
+ ) == TRUE) &&
+ (KeybSetDefaultKey(pDevice,
+ &(pDevice->sKey),
+ dwKeyIndex,
+ param->u.wpa_key.key_len,
+ (PQWORD) &(KeyRSC),
+ (PBYTE)abyKey,
+ byKeyDecMode
+ ) == TRUE) ) {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "GROUP Key Assign.\n");
+ } else {
+ return -EINVAL;
+ }
+ } else {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Pairwise Key Assign.\n");
+ // BSSID not 0xffffffffffff
+ // Pairwise Key can't be WEP
+ if (byKeyDecMode == KEY_CTL_WEP) {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Pairwise Key can't be WEP\n");
+ return -EINVAL;
+ }
+ dwKeyIndex |= (1 << 30); // set pairwise key
+ if (pMgmt->eConfigMode == WMAC_CONFIG_IBSS_STA) {
+ //DBG_PRN_WLAN03(("return NDIS_STATUS_INVALID_DATA - WMAC_CONFIG_IBSS_STA\n"));
+ return -EINVAL;
+ }
+ if (KeybSetKey(pDevice, &(pDevice->sKey), &param->addr[0],
+ dwKeyIndex, param->u.wpa_key.key_len,
+ (PQWORD) &(KeyRSC), (PBYTE)abyKey, byKeyDecMode
+ ) == TRUE) {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Pairwise Key Set\n");
+ } else {
+ // Key Table Full
+ if (!compare_ether_addr(&param->addr[0], pDevice->abyBSSID)) {
+ //DBG_PRN_WLAN03(("return NDIS_STATUS_INVALID_DATA -Key Table Full.2\n"));
+ return -EINVAL;
+ } else {
+ // Save Key and configure just before associate/reassociate to BSSID
+ // we do not implement now
+ return -EINVAL;
+ }
+ }
+ } // BSSID not 0xffffffffffff
+ if ((ret == 0) && ((param->u.wpa_key.set_tx) != 0)) {
+ pDevice->byKeyIndex = (BYTE)param->u.wpa_key.key_index;
+ pDevice->bTransmitKey = TRUE;
+ }
+ pDevice->bEncryptionEnable = TRUE;
return ret;
-
}
@@ -454,23 +412,17 @@ int wpa_set_wpadev(PSDevice pDevice, int val)
* Return Value:
*
*/
-
-static int wpa_set_wpa(PSDevice pDevice,
- struct viawget_wpa_param *param)
+static int wpa_set_wpa(PSDevice pDevice, struct viawget_wpa_param *param)
{
-
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
+ PSMgmtObject pMgmt = &pDevice->sMgmtObj;
int ret = 0;
- pMgmt->eAuthenMode = WMAC_AUTH_OPEN;
- pMgmt->bShareKeyAlgorithm = FALSE;
+ pMgmt->eAuthenMode = WMAC_AUTH_OPEN;
+ pMgmt->bShareKeyAlgorithm = FALSE;
- return ret;
+ return ret;
}
-
-
-
/*
* Description:
* set disassociate
@@ -484,25 +436,21 @@ static int wpa_set_wpa(PSDevice pDevice,
* Return Value:
*
*/
-
-static int wpa_set_disassociate(PSDevice pDevice,
- struct viawget_wpa_param *param)
+static int wpa_set_disassociate(PSDevice pDevice, struct viawget_wpa_param *param)
{
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
+ PSMgmtObject pMgmt = &pDevice->sMgmtObj;
int ret = 0;
- spin_lock_irq(&pDevice->lock);
- if (pDevice->bLinkPass) {
- if (!memcmp(param->addr, pMgmt->abyCurrBSSID, 6))
- bScheduleCommand((void *) pDevice, WLAN_CMD_DISASSOCIATE, NULL);
- }
- spin_unlock_irq(&pDevice->lock);
+ spin_lock_irq(&pDevice->lock);
+ if (pDevice->bLinkPass) {
+ if (!memcmp(param->addr, pMgmt->abyCurrBSSID, 6))
+ bScheduleCommand((void *)pDevice, WLAN_CMD_DISASSOCIATE, NULL);
+ }
+ spin_unlock_irq(&pDevice->lock);
- return ret;
+ return ret;
}
-
-
/*
* Description:
* enable scan process
@@ -516,36 +464,30 @@ static int wpa_set_disassociate(PSDevice pDevice,
* Return Value:
*
*/
-
-static int wpa_set_scan(PSDevice pDevice,
- struct viawget_wpa_param *param)
+static int wpa_set_scan(PSDevice pDevice, struct viawget_wpa_param *param)
{
int ret = 0;
/**set ap_scan=1&&scan_ssid=1 under hidden ssid mode**/
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- PWLAN_IE_SSID pItemSSID;
-printk("wpa_set_scan-->desired [ssid=%s,ssid_len=%d]\n",
- param->u.scan_req.ssid,param->u.scan_req.ssid_len);
+ PSMgmtObject pMgmt = &pDevice->sMgmtObj;
+ PWLAN_IE_SSID pItemSSID;
+ printk("wpa_set_scan-->desired [ssid=%s,ssid_len=%d]\n",
+ param->u.scan_req.ssid,param->u.scan_req.ssid_len);
// Set the SSID
-memset(pMgmt->abyDesireSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
-pItemSSID = (PWLAN_IE_SSID)pMgmt->abyDesireSSID;
-pItemSSID->byElementID = WLAN_EID_SSID;
-memcpy(pItemSSID->abySSID, param->u.scan_req.ssid, param->u.scan_req.ssid_len);
-pItemSSID->len = param->u.scan_req.ssid_len;
-
- spin_lock_irq(&pDevice->lock);
- BSSvClearBSSList((void *) pDevice, pDevice->bLinkPass);
- /* bScheduleCommand((void *) pDevice, WLAN_CMD_BSSID_SCAN, NULL); */
- bScheduleCommand((void *) pDevice,
- WLAN_CMD_BSSID_SCAN,
- pMgmt->abyDesireSSID);
- spin_unlock_irq(&pDevice->lock);
-
- return ret;
-}
+ memset(pMgmt->abyDesireSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
+ pItemSSID = (PWLAN_IE_SSID)pMgmt->abyDesireSSID;
+ pItemSSID->byElementID = WLAN_EID_SSID;
+ memcpy(pItemSSID->abySSID, param->u.scan_req.ssid, param->u.scan_req.ssid_len);
+ pItemSSID->len = param->u.scan_req.ssid_len;
+ spin_lock_irq(&pDevice->lock);
+ BSSvClearBSSList((void *) pDevice, pDevice->bLinkPass);
+ bScheduleCommand((void *) pDevice, WLAN_CMD_BSSID_SCAN,
+ pMgmt->abyDesireSSID);
+ spin_unlock_irq(&pDevice->lock);
+ return ret;
+}
/*
* Description:
@@ -560,19 +502,15 @@ pItemSSID->len = param->u.scan_req.ssid_len;
* Return Value:
*
*/
-
-static int wpa_get_bssid(PSDevice pDevice,
- struct viawget_wpa_param *param)
+static int wpa_get_bssid(PSDevice pDevice, struct viawget_wpa_param *param)
{
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- int ret = 0;
- memcpy(param->u.wpa_associate.bssid, pMgmt->abyCurrBSSID , 6);
+ PSMgmtObject pMgmt = &pDevice->sMgmtObj;
+ int ret = 0;
+ memcpy(param->u.wpa_associate.bssid, pMgmt->abyCurrBSSID, 6);
return ret;
-
}
-
/*
* Description:
* get bssid
@@ -586,24 +524,20 @@ static int wpa_get_bssid(PSDevice pDevice,
* Return Value:
*
*/
-
-static int wpa_get_ssid(PSDevice pDevice,
- struct viawget_wpa_param *param)
+static int wpa_get_ssid(PSDevice pDevice, struct viawget_wpa_param *param)
{
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- PWLAN_IE_SSID pItemSSID;
+ PSMgmtObject pMgmt = &pDevice->sMgmtObj;
+ PWLAN_IE_SSID pItemSSID;
int ret = 0;
- pItemSSID = (PWLAN_IE_SSID)pMgmt->abyCurrSSID;
+ pItemSSID = (PWLAN_IE_SSID)pMgmt->abyCurrSSID;
- memcpy(param->u.wpa_associate.ssid, pItemSSID->abySSID , pItemSSID->len);
+ memcpy(param->u.wpa_associate.ssid, pItemSSID->abySSID, pItemSSID->len);
param->u.wpa_associate.ssid_len = pItemSSID->len;
- return ret;
+ return ret;
}
-
-
/*
* Description:
* get scan results
@@ -617,135 +551,114 @@ static int wpa_get_ssid(PSDevice pDevice,
* Return Value:
*
*/
-
-static int wpa_get_scan(PSDevice pDevice,
- struct viawget_wpa_param *param)
+static int wpa_get_scan(PSDevice pDevice, struct viawget_wpa_param *param)
{
struct viawget_scan_result *scan_buf;
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- PWLAN_IE_SSID pItemSSID;
- PKnownBSS pBSS;
- PBYTE pBuf;
+ PSMgmtObject pMgmt = &pDevice->sMgmtObj;
+ PWLAN_IE_SSID pItemSSID;
+ PKnownBSS pBSS;
+ PBYTE pBuf;
int ret = 0;
u16 count = 0;
- u16 ii, jj;
- long ldBm;//James //add
+ u16 ii;
+ u16 jj;
+ long ldBm; //James //add
//******mike:bubble sort by stronger RSSI*****//
+ PBYTE ptempBSS;
- PBYTE ptempBSS;
-
+ ptempBSS = kmalloc(sizeof(KnownBSS), GFP_ATOMIC);
+ if (ptempBSS == NULL) {
+ printk("bubble sort kmalloc memory fail@@@\n");
+ ret = -ENOMEM;
+ return ret;
+ }
- ptempBSS = kmalloc(sizeof(KnownBSS), (int)GFP_ATOMIC);
-
- if (ptempBSS == NULL) {
-
- printk("bubble sort kmalloc memory fail@@@\n");
-
- ret = -ENOMEM;
-
- return ret;
-
- }
-
- for (ii = 0; ii < MAX_BSS_NUM; ii++) {
-
- for (jj = 0; jj < MAX_BSS_NUM - ii - 1; jj++) {
-
- if ((pMgmt->sBSSList[jj].bActive != TRUE) ||
-
- ((pMgmt->sBSSList[jj].uRSSI>pMgmt->sBSSList[jj+1].uRSSI) &&(pMgmt->sBSSList[jj+1].bActive!=FALSE))) {
-
- memcpy(ptempBSS,&pMgmt->sBSSList[jj],sizeof(KnownBSS));
-
- memcpy(&pMgmt->sBSSList[jj],&pMgmt->sBSSList[jj+1],sizeof(KnownBSS));
-
- memcpy(&pMgmt->sBSSList[jj+1],ptempBSS,sizeof(KnownBSS));
-
- }
-
- }
-
- }
-
- kfree(ptempBSS);
-
- // printk("bubble sort result:\n");
+ for (ii = 0; ii < MAX_BSS_NUM; ii++) {
+ for (jj = 0; jj < MAX_BSS_NUM - ii - 1; jj++) {
+ if ((pMgmt->sBSSList[jj].bActive != TRUE)
+ || ((pMgmt->sBSSList[jj].uRSSI > pMgmt->sBSSList[jj + 1].uRSSI)
+ && (pMgmt->sBSSList[jj + 1].bActive != FALSE))) {
+ memcpy(ptempBSS,&pMgmt->sBSSList[jj], sizeof(KnownBSS));
+ memcpy(&pMgmt->sBSSList[jj], &pMgmt->sBSSList[jj + 1],
+ sizeof(KnownBSS));
+ memcpy(&pMgmt->sBSSList[jj + 1], ptempBSS, sizeof(KnownBSS));
+ }
+ }
+ }
+ kfree(ptempBSS);
count = 0;
pBSS = &(pMgmt->sBSSList[0]);
- for (ii = 0; ii < MAX_BSS_NUM; ii++) {
- pBSS = &(pMgmt->sBSSList[ii]);
- if (!pBSS->bActive)
- continue;
- count++;
- }
+ for (ii = 0; ii < MAX_BSS_NUM; ii++) {
+ pBSS = &(pMgmt->sBSSList[ii]);
+ if (!pBSS->bActive)
+ continue;
+ count++;
+ }
- pBuf = kcalloc(count, sizeof(struct viawget_scan_result), (int)GFP_ATOMIC);
+ pBuf = kcalloc(count, sizeof(struct viawget_scan_result), GFP_ATOMIC);
- if (pBuf == NULL) {
- ret = -ENOMEM;
- return ret;
- }
- scan_buf = (struct viawget_scan_result *)pBuf;
+ if (pBuf == NULL) {
+ ret = -ENOMEM;
+ return ret;
+ }
+ scan_buf = (struct viawget_scan_result *)pBuf;
pBSS = &(pMgmt->sBSSList[0]);
- for (ii = 0, jj = 0; ii < MAX_BSS_NUM ; ii++) {
- pBSS = &(pMgmt->sBSSList[ii]);
- if (pBSS->bActive) {
- if (jj >= count)
- break;
- memcpy(scan_buf->bssid, pBSS->abyBSSID, WLAN_BSSID_LEN);
- pItemSSID = (PWLAN_IE_SSID)pBSS->abySSID;
- memcpy(scan_buf->ssid, pItemSSID->abySSID, pItemSSID->len);
- scan_buf->ssid_len = pItemSSID->len;
- scan_buf->freq = frequency_list[pBSS->uChannel-1];
- scan_buf->caps = pBSS->wCapInfo; //DavidWang for sharemode
-
- RFvRSSITodBm(pDevice, (BYTE)(pBSS->uRSSI), &ldBm);
- if(-ldBm<50){
+ for (ii = 0, jj = 0; ii < MAX_BSS_NUM; ii++) {
+ pBSS = &(pMgmt->sBSSList[ii]);
+ if (pBSS->bActive) {
+ if (jj >= count)
+ break;
+ memcpy(scan_buf->bssid, pBSS->abyBSSID, WLAN_BSSID_LEN);
+ pItemSSID = (PWLAN_IE_SSID)pBSS->abySSID;
+ memcpy(scan_buf->ssid, pItemSSID->abySSID, pItemSSID->len);
+ scan_buf->ssid_len = pItemSSID->len;
+ scan_buf->freq = frequency_list[pBSS->uChannel-1];
+ scan_buf->caps = pBSS->wCapInfo; // DavidWang for sharemode
+
+ RFvRSSITodBm(pDevice, (BYTE)(pBSS->uRSSI), &ldBm);
+ if (-ldBm < 50)
scan_buf->qual = 100;
- }else if(-ldBm > 90) {
- scan_buf->qual = 0;
- }else {
+ else if (-ldBm > 90)
+ scan_buf->qual = 0;
+ else
scan_buf->qual=(40-(-ldBm-50))*100/40;
- }
//James
- //scan_buf->caps = pBSS->wCapInfo;
- //scan_buf->qual =
- scan_buf->noise = 0;
- scan_buf->level = ldBm;
-
- //scan_buf->maxrate =
- if (pBSS->wWPALen != 0) {
- scan_buf->wpa_ie_len = pBSS->wWPALen;
- memcpy(scan_buf->wpa_ie, pBSS->byWPAIE, pBSS->wWPALen);
- }
- if (pBSS->wRSNLen != 0) {
- scan_buf->rsn_ie_len = pBSS->wRSNLen;
- memcpy(scan_buf->rsn_ie, pBSS->byRSNIE, pBSS->wRSNLen);
- }
- scan_buf = (struct viawget_scan_result *)((PBYTE)scan_buf + sizeof(struct viawget_scan_result));
- jj ++;
- }
- }
+ //scan_buf->caps = pBSS->wCapInfo;
+ //scan_buf->qual =
+ scan_buf->noise = 0;
+ scan_buf->level = ldBm;
+
+ //scan_buf->maxrate =
+ if (pBSS->wWPALen != 0) {
+ scan_buf->wpa_ie_len = pBSS->wWPALen;
+ memcpy(scan_buf->wpa_ie, pBSS->byWPAIE, pBSS->wWPALen);
+ }
+ if (pBSS->wRSNLen != 0) {
+ scan_buf->rsn_ie_len = pBSS->wRSNLen;
+ memcpy(scan_buf->rsn_ie, pBSS->byRSNIE, pBSS->wRSNLen);
+ }
+ scan_buf = (struct viawget_scan_result *)((PBYTE)scan_buf + sizeof(struct viawget_scan_result));
+ jj ++;
+ }
+ }
- if (jj < count)
- count = jj;
+ if (jj < count)
+ count = jj;
- if (copy_to_user(param->u.scan_results.buf, pBuf, sizeof(struct viawget_scan_result) * count)) {
+ if (copy_to_user(param->u.scan_results.buf, pBuf, sizeof(struct viawget_scan_result) * count))
ret = -EFAULT;
- }
+
param->u.scan_results.scan_count = count;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " param->u.scan_results.scan_count = %d\n", count)
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " param->u.scan_results.scan_count = %d\n", count);
- kfree(pBuf);
- return ret;
+ kfree(pBuf);
+ return ret;
}
-
-
/*
* Description:
* set associate with AP
@@ -759,25 +672,23 @@ static int wpa_get_scan(PSDevice pDevice,
* Return Value:
*
*/
-
-static int wpa_set_associate(PSDevice pDevice,
- struct viawget_wpa_param *param)
+static int wpa_set_associate(PSDevice pDevice, struct viawget_wpa_param *param)
{
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- PWLAN_IE_SSID pItemSSID;
- BYTE abyNullAddr[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
- BYTE abyWPAIE[64];
- int ret = 0;
- BOOL bwepEnabled=FALSE;
+ PSMgmtObject pMgmt = &pDevice->sMgmtObj;
+ PWLAN_IE_SSID pItemSSID;
+ BYTE abyNullAddr[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+ BYTE abyWPAIE[64];
+ int ret = 0;
+ BOOL bwepEnabled=FALSE;
// set key type & algorithm
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pairwise_suite = %d\n", param->u.wpa_associate.pairwise_suite);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "group_suite = %d\n", param->u.wpa_associate.group_suite);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "key_mgmt_suite = %d\n", param->u.wpa_associate.key_mgmt_suite);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "auth_alg = %d\n", param->u.wpa_associate.auth_alg);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "mode = %d\n", param->u.wpa_associate.mode);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "wpa_ie_len = %d\n", param->u.wpa_associate.wpa_ie_len);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Roaming dBm = %d\n", param->u.wpa_associate.roam_dbm); //Davidwang
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pairwise_suite = %d\n", param->u.wpa_associate.pairwise_suite);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "group_suite = %d\n", param->u.wpa_associate.group_suite);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "key_mgmt_suite = %d\n", param->u.wpa_associate.key_mgmt_suite);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "auth_alg = %d\n", param->u.wpa_associate.auth_alg);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "mode = %d\n", param->u.wpa_associate.mode);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "wpa_ie_len = %d\n", param->u.wpa_associate.wpa_ie_len);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Roaming dBm = %d\n", param->u.wpa_associate.roam_dbm); // Davidwang
if (param->u.wpa_associate.wpa_ie) {
if (param->u.wpa_associate.wpa_ie_len > sizeof(abyWPAIE))
@@ -789,25 +700,25 @@ static int wpa_set_associate(PSDevice pDevice,
}
if (param->u.wpa_associate.mode == 1)
- pMgmt->eConfigMode = WMAC_CONFIG_IBSS_STA;
+ pMgmt->eConfigMode = WMAC_CONFIG_IBSS_STA;
else
- pMgmt->eConfigMode = WMAC_CONFIG_ESS_STA;
+ pMgmt->eConfigMode = WMAC_CONFIG_ESS_STA;
// set bssid
- if (memcmp(param->u.wpa_associate.bssid, &abyNullAddr[0], 6) != 0)
- memcpy(pMgmt->abyDesireBSSID, param->u.wpa_associate.bssid, 6);
- // set ssid
+ if (memcmp(param->u.wpa_associate.bssid, &abyNullAddr[0], 6) != 0)
+ memcpy(pMgmt->abyDesireBSSID, param->u.wpa_associate.bssid, 6);
+ // set ssid
memset(pMgmt->abyDesireSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
- pItemSSID = (PWLAN_IE_SSID)pMgmt->abyDesireSSID;
- pItemSSID->byElementID = WLAN_EID_SSID;
+ pItemSSID = (PWLAN_IE_SSID)pMgmt->abyDesireSSID;
+ pItemSSID->byElementID = WLAN_EID_SSID;
pItemSSID->len = param->u.wpa_associate.ssid_len;
memcpy(pItemSSID->abySSID, param->u.wpa_associate.ssid, pItemSSID->len);
- if (param->u.wpa_associate.wpa_ie_len == 0) {
- if (param->u.wpa_associate.auth_alg & AUTH_ALG_SHARED_KEY)
- pMgmt->eAuthenMode = WMAC_AUTH_SHAREKEY;
- else
- pMgmt->eAuthenMode = WMAC_AUTH_OPEN;
+ if (param->u.wpa_associate.wpa_ie_len == 0) {
+ if (param->u.wpa_associate.auth_alg & AUTH_ALG_SHARED_KEY)
+ pMgmt->eAuthenMode = WMAC_AUTH_SHAREKEY;
+ else
+ pMgmt->eAuthenMode = WMAC_AUTH_OPEN;
} else if (abyWPAIE[0] == RSN_INFO_ELEM) {
if (param->u.wpa_associate.key_mgmt_suite == KEY_MGMT_PSK)
pMgmt->eAuthenMode = WMAC_AUTH_WPA2PSK;
@@ -817,9 +728,9 @@ static int wpa_set_associate(PSDevice pDevice,
if (param->u.wpa_associate.key_mgmt_suite == KEY_MGMT_WPA_NONE)
pMgmt->eAuthenMode = WMAC_AUTH_WPANONE;
else if (param->u.wpa_associate.key_mgmt_suite == KEY_MGMT_PSK)
- pMgmt->eAuthenMode = WMAC_AUTH_WPAPSK;
+ pMgmt->eAuthenMode = WMAC_AUTH_WPAPSK;
else
- pMgmt->eAuthenMode = WMAC_AUTH_WPA;
+ pMgmt->eAuthenMode = WMAC_AUTH_WPA;
}
switch (param->u.wpa_associate.pairwise_suite) {
@@ -833,7 +744,6 @@ static int wpa_set_associate(PSDevice pDevice,
case CIPHER_WEP104:
pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
bwepEnabled = TRUE;
- // printk("****************wpa_set_associate:set CIPHER_WEP40_104\n");
break;
case CIPHER_NONE:
if (param->u.wpa_associate.group_suite == CIPHER_CCMP)
@@ -845,70 +755,64 @@ static int wpa_set_associate(PSDevice pDevice,
pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
}
- pMgmt->Roam_dbm = param->u.wpa_associate.roam_dbm;
- // if ((pMgmt->Roam_dbm > 40)&&(pMgmt->Roam_dbm<80))
- // pDevice->bEnableRoaming = TRUE;
-
- if (pMgmt->eAuthenMode == WMAC_AUTH_SHAREKEY) { //@wep-sharekey
- pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
- pMgmt->bShareKeyAlgorithm = TRUE;
- }
- else if (pMgmt->eAuthenMode == WMAC_AUTH_OPEN) {
- if(bwepEnabled==TRUE) { //@open-wep
- pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
- }
- else { //@only open
- pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
+ pMgmt->Roam_dbm = param->u.wpa_associate.roam_dbm;
+ if (pMgmt->eAuthenMode == WMAC_AUTH_SHAREKEY) { // @wep-sharekey
+ pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
+ pMgmt->bShareKeyAlgorithm = TRUE;
+ } else if (pMgmt->eAuthenMode == WMAC_AUTH_OPEN) {
+ if(bwepEnabled==TRUE) { //@open-wep
+ pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
+ } else {
+ // @only open
+ pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
}
- }
-//mike save old encryption status
+ }
+ // mike save old encryption status
pDevice->eOldEncryptionStatus = pDevice->eEncryptionStatus;
- if (pDevice->eEncryptionStatus != Ndis802_11EncryptionDisabled)
- pDevice->bEncryptionEnable = TRUE;
- else
- pDevice->bEncryptionEnable = FALSE;
-
- if ((pMgmt->eAuthenMode == WMAC_AUTH_SHAREKEY) ||
- ((pMgmt->eAuthenMode == WMAC_AUTH_OPEN) && (bwepEnabled==TRUE))) {
- //mike re-comment:open-wep && sharekey-wep needn't do initial key!!
-
- }
- else
- KeyvInitTable(pDevice,&pDevice->sKey);
+ if (pDevice->eEncryptionStatus != Ndis802_11EncryptionDisabled)
+ pDevice->bEncryptionEnable = TRUE;
+ else
+ pDevice->bEncryptionEnable = FALSE;
- spin_lock_irq(&pDevice->lock);
- pDevice->bLinkPass = FALSE;
- ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_SLOW);
- memset(pMgmt->abyCurrBSSID, 0, 6);
- pMgmt->eCurrState = WMAC_STATE_IDLE;
- netif_stop_queue(pDevice->dev);
+ if ((pMgmt->eAuthenMode == WMAC_AUTH_SHAREKEY) ||
+ ((pMgmt->eAuthenMode == WMAC_AUTH_OPEN) && (bwepEnabled==TRUE))) {
+ // mike re-comment:open-wep && sharekey-wep needn't do initial key!!
+ } else {
+ KeyvInitTable(pDevice,&pDevice->sKey);
+ }
-/*******search if ap_scan=2 ,which is associating request in hidden ssid mode ****/
-{
- PKnownBSS pCurr = NULL;
- pCurr = BSSpSearchBSSList(pDevice,
- pMgmt->abyDesireBSSID,
- pMgmt->abyDesireSSID,
- pDevice->eConfigPHYMode
- );
-
- if (pCurr == NULL){
- printk("wpa_set_associate---->hidden mode site survey before associate.......\n");
- bScheduleCommand((void *) pDevice,
- WLAN_CMD_BSSID_SCAN,
- pMgmt->abyDesireSSID);
- }
-}
+ spin_lock_irq(&pDevice->lock);
+ pDevice->bLinkPass = FALSE;
+ ControlvMaskByte(pDevice, MESSAGE_REQUEST_MACREG, MAC_REG_PAPEDELAY, LEDSTS_STS, LEDSTS_SLOW);
+ memset(pMgmt->abyCurrBSSID, 0, 6);
+ pMgmt->eCurrState = WMAC_STATE_IDLE;
+ netif_stop_queue(pDevice->dev);
+
+/******* search if ap_scan=2, which is associating request in hidden ssid mode ****/
+ {
+ PKnownBSS pCurr = NULL;
+ pCurr = BSSpSearchBSSList(pDevice,
+ pMgmt->abyDesireBSSID,
+ pMgmt->abyDesireSSID,
+ pDevice->eConfigPHYMode
+ );
+
+ if (pCurr == NULL){
+ printk("wpa_set_associate---->hidden mode site survey before associate.......\n");
+ bScheduleCommand((void *)pDevice,
+ WLAN_CMD_BSSID_SCAN,
+ pMgmt->abyDesireSSID);
+ }
+ }
/****************************************************************/
- bScheduleCommand((void *) pDevice, WLAN_CMD_SSID, NULL);
- spin_unlock_irq(&pDevice->lock);
+ bScheduleCommand((void *)pDevice, WLAN_CMD_SSID, NULL);
+ spin_unlock_irq(&pDevice->lock);
- return ret;
+ return ret;
}
-
/*
* Description:
* wpa_ioctl main function supported for wpa supplicant
@@ -922,7 +826,6 @@ static int wpa_set_associate(PSDevice pDevice,
* Return Value:
*
*/
-
int wpa_ioctl(PSDevice pDevice, struct iw_point *p)
{
struct viawget_wpa_param *param;
@@ -930,10 +833,10 @@ int wpa_ioctl(PSDevice pDevice, struct iw_point *p)
int wpa_ioctl = 0;
if (p->length < sizeof(struct viawget_wpa_param) ||
- p->length > VIAWGET_WPA_MAX_BUF_SIZE || !p->pointer)
+ p->length > VIAWGET_WPA_MAX_BUF_SIZE || !p->pointer)
return -EINVAL;
- param = kmalloc((int)p->length, (int)GFP_KERNEL);
+ param = kmalloc((int)p->length, GFP_KERNEL);
if (param == NULL)
return -ENOMEM;
@@ -944,63 +847,63 @@ int wpa_ioctl(PSDevice pDevice, struct iw_point *p)
switch (param->cmd) {
case VIAWGET_SET_WPA:
- ret = wpa_set_wpa(pDevice, param);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_WPA \n");
+ ret = wpa_set_wpa(pDevice, param);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_WPA \n");
break;
case VIAWGET_SET_KEY:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_KEY \n");
- spin_lock_irq(&pDevice->lock);
- ret = wpa_set_keys(pDevice, param, FALSE);
- spin_unlock_irq(&pDevice->lock);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_KEY \n");
+ spin_lock_irq(&pDevice->lock);
+ ret = wpa_set_keys(pDevice, param, FALSE);
+ spin_unlock_irq(&pDevice->lock);
break;
case VIAWGET_SET_SCAN:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_SCAN \n");
- ret = wpa_set_scan(pDevice, param);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_SCAN \n");
+ ret = wpa_set_scan(pDevice, param);
break;
case VIAWGET_GET_SCAN:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_GET_SCAN\n");
- ret = wpa_get_scan(pDevice, param);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_GET_SCAN\n");
+ ret = wpa_get_scan(pDevice, param);
wpa_ioctl = 1;
break;
case VIAWGET_GET_SSID:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_GET_SSID \n");
- ret = wpa_get_ssid(pDevice, param);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_GET_SSID \n");
+ ret = wpa_get_ssid(pDevice, param);
wpa_ioctl = 1;
break;
case VIAWGET_GET_BSSID:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_GET_BSSID \n");
- ret = wpa_get_bssid(pDevice, param);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_GET_BSSID \n");
+ ret = wpa_get_bssid(pDevice, param);
wpa_ioctl = 1;
break;
case VIAWGET_SET_ASSOCIATE:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_ASSOCIATE \n");
- ret = wpa_set_associate(pDevice, param);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_ASSOCIATE \n");
+ ret = wpa_set_associate(pDevice, param);
break;
case VIAWGET_SET_DISASSOCIATE:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_DISASSOCIATE \n");
- ret = wpa_set_disassociate(pDevice, param);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_DISASSOCIATE \n");
+ ret = wpa_set_disassociate(pDevice, param);
break;
case VIAWGET_SET_DROP_UNENCRYPT:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_DROP_UNENCRYPT \n");
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_DROP_UNENCRYPT \n");
break;
- case VIAWGET_SET_DEAUTHENTICATE:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_DEAUTHENTICATE \n");
+ case VIAWGET_SET_DEAUTHENTICATE:
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_DEAUTHENTICATE \n");
break;
default:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "wpa_ioctl: unknown cmd=%d\n",
- param->cmd);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "wpa_ioctl: unknown cmd=%d\n",
+ param->cmd);
+ kfree(param);
return -EOPNOTSUPP;
- break;
}
if ((ret == 0) && wpa_ioctl) {
@@ -1012,7 +915,5 @@ int wpa_ioctl(PSDevice pDevice, struct iw_point *p)
out:
kfree(param);
-
return ret;
}
-
diff --git a/drivers/staging/wlags49_h2/wl_cs.c b/drivers/staging/wlags49_h2/wl_cs.c
index 2faee2dd4bb1..a2cbb29c3f59 100644
--- a/drivers/staging/wlags49_h2/wl_cs.c
+++ b/drivers/staging/wlags49_h2/wl_cs.c
@@ -229,7 +229,6 @@ static int wl_adapter_resume(struct pcmcia_device *link)
void wl_adapter_insert(struct pcmcia_device *link)
{
struct net_device *dev;
- int i;
int ret;
/*--------------------------------------------------------------------*/
@@ -266,10 +265,8 @@ void wl_adapter_insert(struct pcmcia_device *link)
register_wlags_sysfs(dev);
- printk(KERN_INFO "%s: Wireless, io_addr %#03lx, irq %d, ""mac_address ",
- dev->name, dev->base_addr, dev->irq);
- for (i = 0; i < ETH_ALEN; i++)
- printk("%02X%c", dev->dev_addr[i], ((i < (ETH_ALEN-1)) ? ':' : '\n'));
+ printk(KERN_INFO "%s: Wireless, io_addr %#03lx, irq %d, mac_address"
+ " %pM\n", dev->name, dev->base_addr, dev->irq, dev->dev_addr);
DBG_LEAVE(DbgInfo);
return;
diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c
index fb466f4c92e0..4cd3ba5d5646 100644
--- a/drivers/staging/wlan-ng/cfg80211.c
+++ b/drivers/staging/wlan-ng/cfg80211.c
@@ -356,7 +356,7 @@ int prism2_scan(struct wiphy *wiphy, struct net_device *dev,
msg1.msgcode = DIDmsg_dot11req_scan;
msg1.bsstype.data = P80211ENUM_bsstype_any;
- memset(&(msg1.bssid.data), 0xFF, sizeof(p80211item_pstr6_t));
+ memset(&msg1.bssid.data.data, 0xFF, sizeof(msg1.bssid.data.data));
msg1.bssid.data.len = 6;
if (request->n_ssids > 0) {
diff --git a/drivers/staging/wlan-ng/p80211netdev.c b/drivers/staging/wlan-ng/p80211netdev.c
index 14bfeb2e704c..0f51b4ab3631 100644
--- a/drivers/staging/wlan-ng/p80211netdev.c
+++ b/drivers/staging/wlan-ng/p80211netdev.c
@@ -150,7 +150,7 @@ static int p80211knetdev_init(netdevice_t *netdev)
* Returns:
* the address of the statistics structure
----------------------------------------------------------------*/
-static struct net_device_stats *p80211knetdev_get_stats(netdevice_t * netdev)
+static struct net_device_stats *p80211knetdev_get_stats(netdevice_t *netdev)
{
wlandevice_t *wlandev = netdev->ml_priv;
diff --git a/drivers/staging/wlan-ng/prism2mgmt.c b/drivers/staging/wlan-ng/prism2mgmt.c
index 6675c8226cef..c3bb05dd744f 100644
--- a/drivers/staging/wlan-ng/prism2mgmt.c
+++ b/drivers/staging/wlan-ng/prism2mgmt.c
@@ -406,6 +406,7 @@ int prism2mgmt_scan_results(wlandevice_t *wlandev, void *msgp)
/* SSID */
req->ssid.status = P80211ENUM_msgitem_status_data_ok;
req->ssid.data.len = le16_to_cpu(item->ssid.len);
+ req->ssid.data.len = min_t(u16, req->ssid.data.len, WLAN_BSSID_LEN);
memcpy(req->ssid.data.data, item->ssid.data, req->ssid.data.len);
/* supported rates */
diff --git a/drivers/staging/xgifb/XGI_main.h b/drivers/staging/xgifb/XGI_main.h
index 35f7b2a485e1..e828fd403c35 100644
--- a/drivers/staging/xgifb/XGI_main.h
+++ b/drivers/staging/xgifb/XGI_main.h
@@ -7,47 +7,32 @@
#include "XGIfb.h"
#include "vb_struct.h"
+#include "../../video/sis/sis.h"
#include "vb_def.h"
#define XGIFAIL(x) do { printk(x "\n"); return -EINVAL; } while (0)
-#ifndef PCI_VENDOR_ID_XG
-#define PCI_VENDOR_ID_XG 0x18CA
+#ifndef PCI_DEVICE_ID_XGI_41
+#define PCI_DEVICE_ID_XGI_41 0x041
#endif
-
-#ifndef PCI_DEVICE_ID_XG_40
-#define PCI_DEVICE_ID_XG_40 0x040
-#endif
-#ifndef PCI_DEVICE_ID_XG_41
-#define PCI_DEVICE_ID_XG_41 0x041
-#endif
-#ifndef PCI_DEVICE_ID_XG_42
-#define PCI_DEVICE_ID_XG_42 0x042
+#ifndef PCI_DEVICE_ID_XGI_42
+#define PCI_DEVICE_ID_XGI_42 0x042
#endif
-#ifndef PCI_DEVICE_ID_XG_20
-#define PCI_DEVICE_ID_XG_20 0x020
-#endif
-#ifndef PCI_DEVICE_ID_XG_27
-#define PCI_DEVICE_ID_XG_27 0x027
+#ifndef PCI_DEVICE_ID_XGI_27
+#define PCI_DEVICE_ID_XGI_27 0x027
#endif
static DEFINE_PCI_DEVICE_TABLE(xgifb_pci_table) = {
- {PCI_DEVICE(PCI_VENDOR_ID_XG, PCI_DEVICE_ID_XG_20)},
- {PCI_DEVICE(PCI_VENDOR_ID_XG, PCI_DEVICE_ID_XG_27)},
- {PCI_DEVICE(PCI_VENDOR_ID_XG, PCI_DEVICE_ID_XG_40)},
- {PCI_DEVICE(PCI_VENDOR_ID_XG, PCI_DEVICE_ID_XG_42)},
+ {PCI_DEVICE(PCI_VENDOR_ID_XGI, PCI_DEVICE_ID_XGI_20)},
+ {PCI_DEVICE(PCI_VENDOR_ID_XGI, PCI_DEVICE_ID_XGI_27)},
+ {PCI_DEVICE(PCI_VENDOR_ID_XGI, PCI_DEVICE_ID_XGI_40)},
+ {PCI_DEVICE(PCI_VENDOR_ID_XGI, PCI_DEVICE_ID_XGI_42)},
{0}
};
MODULE_DEVICE_TABLE(pci, xgifb_pci_table);
/* To be included in fb.h */
-#ifndef FB_ACCEL_XGI_XABRE
-#define FB_ACCEL_XGI_XABRE 41 /* XGI 330 ("Xabre") */
-#endif
-
-#define SEQ_DATA 0x15
-
#define XGISR (xgifb_info->dev_info.P3c4)
#define XGICR (xgifb_info->dev_info.P3d4)
#define XGIDACA (xgifb_info->dev_info.P3c8)
@@ -60,12 +45,6 @@ MODULE_DEVICE_TABLE(pci, xgifb_pci_table);
#define XGIDAC2A XGIPART5
#define XGIDAC2D (XGIPART5 + 1)
-#define IND_XGI_PASSWORD 0x05 /* SRs */
-#define IND_XGI_RAMDAC_CONTROL 0x07
-#define IND_XGI_DRAM_SIZE 0x14
-#define IND_XGI_MODULE_ENABLE 0x1E
-#define IND_XGI_PCI_ADDRESS_SET 0x20
-
#define IND_XGI_SCRATCH_REG_CR30 0x30 /* CRs */
#define IND_XGI_SCRATCH_REG_CR31 0x31
#define IND_XGI_SCRATCH_REG_CR32 0x32
@@ -73,10 +52,6 @@ MODULE_DEVICE_TABLE(pci, xgifb_pci_table);
#define IND_XGI_LCD_PANEL 0x36
#define IND_XGI_SCRATCH_REG_CR37 0x37
-#define IND_XGI_CRT2_WRITE_ENABLE_315 0x2F
-
-#define XGI_PASSWORD 0x86 /* SR05 */
-
#define XGI_DRAM_SIZE_MASK 0xF0 /*SR14 */
#define XGI_DRAM_SIZE_1MB 0x00
#define XGI_DRAM_SIZE_2MB 0x01
@@ -88,37 +63,6 @@ MODULE_DEVICE_TABLE(pci, xgifb_pci_table);
#define XGI_DRAM_SIZE_128MB 0x07
#define XGI_DRAM_SIZE_256MB 0x08
-#define XGI_ENABLE_2D 0x40 /* SR1E */
-
-#define XGI_MEM_MAP_IO_ENABLE 0x01 /* SR20 */
-#define XGI_PCI_ADDR_ENABLE 0x80
-
-#define XGI_SIMULTANEOUS_VIEW_ENABLE 0x01 /* CR30 */
-#define XGI_VB_OUTPUT_COMPOSITE 0x04
-#define XGI_VB_OUTPUT_SVIDEO 0x08
-#define XGI_VB_OUTPUT_SCART 0x10
-#define XGI_VB_OUTPUT_LCD 0x20
-#define XGI_VB_OUTPUT_CRT2 0x40
-#define XGI_VB_OUTPUT_HIVISION 0x80
-
-#define XGI_VB_OUTPUT_DISABLE 0x20 /* CR31 */
-#define XGI_DRIVER_MODE 0x40
-
-#define XGI_VB_COMPOSITE 0x01 /* CR32 */
-#define XGI_VB_SVIDEO 0x02
-#define XGI_VB_SCART 0x04
-#define XGI_VB_LCD 0x08
-#define XGI_VB_CRT2 0x10
-#define XGI_CRT1 0x20
-#define XGI_VB_HIVISION 0x40
-#define XGI_VB_YPBPR 0x80
-#define XGI_VB_TV (XGI_VB_COMPOSITE | XGI_VB_SVIDEO | \
- XGI_VB_SCART | XGI_VB_HIVISION|XGI_VB_YPBPR)
-
-#define XGI_EXTERNAL_CHIP_MASK 0x0E /* CR37 */
-#define XGI310_EXTERNAL_CHIP_LVDS 0x02 /* in CR37 << 1 ! */
-#define XGI310_EXTERNAL_CHIP_LVDS_CHRONTEL 0x03 /* in CR37 << 1 ! */
-
/* ------------------- Global Variables ----------------------------- */
/* display status */
diff --git a/drivers/staging/xgifb/XGI_main_26.c b/drivers/staging/xgifb/XGI_main_26.c
index 2502c49c9c5b..21c037827de4 100644
--- a/drivers/staging/xgifb/XGI_main_26.c
+++ b/drivers/staging/xgifb/XGI_main_26.c
@@ -4,6 +4,8 @@
* Base on TW's sis fbdev code.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
/* #include <linux/config.h> */
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -55,7 +57,7 @@ static unsigned int refresh_rate;
#undef XGIFBDEBUG
#ifdef XGIFBDEBUG
-#define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt, __FUNCTION__ , ## args)
+#define DPRINTK(fmt, args...) pr_debug("%s: " fmt, __func__ , ## args)
#else
#define DPRINTK(fmt, args...)
#endif
@@ -142,7 +144,7 @@ static inline void dumpVGAReg(void)
#if 1
#define DEBUGPRN(x)
#else
-#define DEBUGPRN(x) printk(KERN_INFO x "\n");
+#define DEBUGPRN(x) pr_info(x "\n");
#endif
/* --------------- Hardware Access Routines -------------------------- */
@@ -369,15 +371,15 @@ static void XGIRegInit(struct vb_device_info *XGI_Pr, unsigned long BaseAddr)
XGI_Pr->P3c9 = BaseAddr + 0x19;
XGI_Pr->P3da = BaseAddr + 0x2A;
/* Digital video interface registers (LCD) */
- XGI_Pr->Part1Port = BaseAddr + XGI_CRT2_PORT_04;
+ XGI_Pr->Part1Port = BaseAddr + SIS_CRT2_PORT_04;
/* 301 TV Encoder registers */
- XGI_Pr->Part2Port = BaseAddr + XGI_CRT2_PORT_10;
+ XGI_Pr->Part2Port = BaseAddr + SIS_CRT2_PORT_10;
/* 301 Macrovision registers */
- XGI_Pr->Part3Port = BaseAddr + XGI_CRT2_PORT_12;
+ XGI_Pr->Part3Port = BaseAddr + SIS_CRT2_PORT_12;
/* 301 VGA2 (and LCD) registers */
- XGI_Pr->Part4Port = BaseAddr + XGI_CRT2_PORT_14;
+ XGI_Pr->Part4Port = BaseAddr + SIS_CRT2_PORT_14;
/* 301 palette address port registers */
- XGI_Pr->Part5Port = BaseAddr + XGI_CRT2_PORT_14 + 2;
+ XGI_Pr->Part5Port = BaseAddr + SIS_CRT2_PORT_14 + 2;
}
@@ -424,7 +426,7 @@ static void XGIfb_search_mode(struct xgifb_video_info *xgifb_info,
i++;
}
if (!j)
- printk(KERN_INFO "XGIfb: Invalid mode '%s'\n", name);
+ pr_info("Invalid mode '%s'\n", name);
}
static void XGIfb_search_vesamode(struct xgifb_video_info *xgifb_info,
@@ -449,7 +451,7 @@ static void XGIfb_search_vesamode(struct xgifb_video_info *xgifb_info,
invalid:
if (!j)
- printk(KERN_INFO "XGIfb: Invalid VESA mode 0x%x'\n", vesamode);
+ pr_info("Invalid VESA mode 0x%x'\n", vesamode);
}
static int XGIfb_validate_mode(struct xgifb_video_info *xgifb_info, int myindex)
@@ -526,12 +528,6 @@ static int XGIfb_validate_mode(struct xgifb_video_info *xgifb_info, int myindex)
xres = 1600;
yres = 1200;
break;
- /* case LCD_320x480: */ /* TW: FSTN */
- /*
- xres = 320;
- yres = 480;
- break;
- */
default:
xres = 0;
yres = 0;
@@ -692,7 +688,7 @@ static void XGIfb_search_crt2type(const char *name)
i++;
}
if (XGIfb_crt2type < 0)
- printk(KERN_INFO "XGIfb: Invalid CRT2 type: %s\n", name);
+ pr_info("Invalid CRT2 type: %s\n", name);
}
static u8 XGIfb_search_refresh_rate(struct xgifb_video_info *xgifb_info,
@@ -742,7 +738,7 @@ static u8 XGIfb_search_refresh_rate(struct xgifb_video_info *xgifb_info,
if (xgifb_info->rate_idx > 0) {
return xgifb_info->rate_idx;
} else {
- printk(KERN_INFO "XGIfb: Unsupported rate %d for %dx%d\n",
+ pr_info("Unsupported rate %d for %dx%d\n",
rate, xres, yres);
return 0;
}
@@ -811,27 +807,27 @@ static void XGIfb_pre_setmode(struct xgifb_video_info *xgifb_info)
switch (xgifb_info->display2) {
case XGIFB_DISP_CRT:
- cr30 = (XGI_VB_OUTPUT_CRT2 | XGI_SIMULTANEOUS_VIEW_ENABLE);
- cr31 |= XGI_DRIVER_MODE;
+ cr30 = (SIS_VB_OUTPUT_CRT2 | SIS_SIMULTANEOUS_VIEW_ENABLE);
+ cr31 |= SIS_DRIVER_MODE;
break;
case XGIFB_DISP_LCD:
- cr30 = (XGI_VB_OUTPUT_LCD | XGI_SIMULTANEOUS_VIEW_ENABLE);
- cr31 |= XGI_DRIVER_MODE;
+ cr30 = (SIS_VB_OUTPUT_LCD | SIS_SIMULTANEOUS_VIEW_ENABLE);
+ cr31 |= SIS_DRIVER_MODE;
break;
case XGIFB_DISP_TV:
if (xgifb_info->TV_type == TVMODE_HIVISION)
- cr30 = (XGI_VB_OUTPUT_HIVISION
- | XGI_SIMULTANEOUS_VIEW_ENABLE);
+ cr30 = (SIS_VB_OUTPUT_HIVISION
+ | SIS_SIMULTANEOUS_VIEW_ENABLE);
else if (xgifb_info->TV_plug == TVPLUG_SVIDEO)
- cr30 = (XGI_VB_OUTPUT_SVIDEO
- | XGI_SIMULTANEOUS_VIEW_ENABLE);
+ cr30 = (SIS_VB_OUTPUT_SVIDEO
+ | SIS_SIMULTANEOUS_VIEW_ENABLE);
else if (xgifb_info->TV_plug == TVPLUG_COMPOSITE)
- cr30 = (XGI_VB_OUTPUT_COMPOSITE
- | XGI_SIMULTANEOUS_VIEW_ENABLE);
+ cr30 = (SIS_VB_OUTPUT_COMPOSITE
+ | SIS_SIMULTANEOUS_VIEW_ENABLE);
else if (xgifb_info->TV_plug == TVPLUG_SCART)
- cr30 = (XGI_VB_OUTPUT_SCART
- | XGI_SIMULTANEOUS_VIEW_ENABLE);
- cr31 |= XGI_DRIVER_MODE;
+ cr30 = (SIS_VB_OUTPUT_SCART
+ | SIS_SIMULTANEOUS_VIEW_ENABLE);
+ cr31 |= SIS_DRIVER_MODE;
if (XGIfb_tvmode == 1 || xgifb_info->TV_type == TVMODE_PAL)
cr31 |= 0x01;
@@ -840,7 +836,7 @@ static void XGIfb_pre_setmode(struct xgifb_video_info *xgifb_info)
break;
default: /* disable CRT2 */
cr30 = 0x00;
- cr31 |= (XGI_DRIVER_MODE | XGI_VB_OUTPUT_DISABLE);
+ cr31 |= (SIS_DRIVER_MODE | SIS_VB_OUTPUT_DISABLE);
}
xgifb_reg_set(XGICR, IND_XGI_SCRATCH_REG_CR30, cr30);
@@ -854,7 +850,7 @@ static void XGIfb_post_setmode(struct xgifb_video_info *xgifb_info)
u8 reg;
unsigned char doit = 1;
/*
- xgifb_reg_set(XGISR,IND_XGI_PASSWORD,XGI_PASSWORD);
+ xgifb_reg_set(XGISR,IND_SIS_PASSWORD,SIS_PASSWORD);
xgifb_reg_set(XGICR, 0x13, 0x00);
xgifb_reg_and_or(XGISR,0x0E, 0xF0, 0x01);
*test*
@@ -890,7 +886,7 @@ static void XGIfb_post_setmode(struct xgifb_video_info *xgifb_info)
reg |= 0x80;
xgifb_reg_set(XGICR, 0x17, reg);
- xgifb_reg_and(XGISR, IND_XGI_RAMDAC_CONTROL, ~0x04);
+ xgifb_reg_and(XGISR, IND_SIS_RAMDAC_CONTROL, ~0x04);
if (xgifb_info->display2 == XGIFB_DISP_TV &&
xgifb_info->hasVB == HASVB_301) {
@@ -923,7 +919,7 @@ static void XGIfb_post_setmode(struct xgifb_video_info *xgifb_info)
break;
}
xgifb_reg_or(XGIPART1,
- IND_XGI_CRT2_WRITE_ENABLE_315,
+ SIS_CRT2_WENABLE_315,
0x01);
if (xgifb_info->TV_type == TVMODE_NTSC) {
@@ -1118,7 +1114,7 @@ static int XGIfb_do_set_var(struct fb_var_screeninfo *var, int isactive,
if (!htotal || !vtotal) {
DPRINTK("XGIfb: Invalid 'var' information\n");
return -EINVAL;
- } printk(KERN_DEBUG "XGIfb: var->pixclock=%d, htotal=%d, vtotal=%d\n",
+ } pr_debug("var->pixclock=%d, htotal=%d, vtotal=%d\n",
var->pixclock, htotal, vtotal);
if (var->pixclock && htotal && vtotal) {
@@ -1130,7 +1126,7 @@ static int XGIfb_do_set_var(struct fb_var_screeninfo *var, int isactive,
xgifb_info->refresh_rate = 60;
}
- printk(KERN_DEBUG "XGIfb: Change mode to %dx%dx%d-%dHz\n",
+ pr_debug("Change mode to %dx%dx%d-%dHz\n",
var->xres,
var->yres,
var->bits_per_pixel,
@@ -1158,7 +1154,7 @@ static int XGIfb_do_set_var(struct fb_var_screeninfo *var, int isactive,
xgifb_info->mode_idx = -1;
if (xgifb_info->mode_idx < 0) {
- printk(KERN_ERR "XGIfb: Mode %dx%dx%d not supported\n",
+ pr_err("Mode %dx%dx%d not supported\n",
var->xres, var->yres, var->bits_per_pixel);
xgifb_info->mode_idx = old_mode;
return -EINVAL;
@@ -1177,14 +1173,14 @@ static int XGIfb_do_set_var(struct fb_var_screeninfo *var, int isactive,
if (XGISetModeNew(xgifb_info, hw_info,
XGIbios_mode[xgifb_info->mode_idx].mode_no)
== 0) {
- printk(KERN_ERR "XGIfb: Setting mode[0x%x] failed\n",
+ pr_err("Setting mode[0x%x] failed\n",
XGIbios_mode[xgifb_info->mode_idx].mode_no);
return -EINVAL;
}
info->fix.line_length = ((info->var.xres_virtual
* info->var.bits_per_pixel) >> 6);
- xgifb_reg_set(XGISR, IND_XGI_PASSWORD, XGI_PASSWORD);
+ xgifb_reg_set(XGISR, IND_SIS_PASSWORD, SIS_PASSWORD);
xgifb_reg_set(XGICR, 0x13, (info->fix.line_length & 0x00ff));
xgifb_reg_set(XGISR,
@@ -1239,7 +1235,7 @@ static int XGIfb_do_set_var(struct fb_var_screeninfo *var, int isactive,
break;
default:
xgifb_info->video_cmap_len = 16;
- printk(KERN_ERR "XGIfb: Unsupported depth %d",
+ pr_err("Unsupported depth %d",
xgifb_info->video_bpp);
break;
}
@@ -1273,7 +1269,7 @@ static int XGIfb_pan_var(struct fb_var_screeninfo *var, struct fb_info *info)
break;
}
- xgifb_reg_set(XGISR, IND_XGI_PASSWORD, XGI_PASSWORD);
+ xgifb_reg_set(XGISR, IND_SIS_PASSWORD, SIS_PASSWORD);
xgifb_reg_set(XGICR, 0x0D, base & 0xFF);
xgifb_reg_set(XGICR, 0x0C, (base >> 8) & 0xFF);
@@ -1282,7 +1278,7 @@ static int XGIfb_pan_var(struct fb_var_screeninfo *var, struct fb_info *info)
xgifb_reg_and_or(XGISR, 0x37, 0xDF, (base >> 21) & 0x04);
if (xgifb_info->display2 != XGIFB_DISP_NONE) {
- xgifb_reg_or(XGIPART1, IND_XGI_CRT2_WRITE_ENABLE_315, 0x01);
+ xgifb_reg_or(XGIPART1, SIS_CRT2_WENABLE_315, 0x01);
xgifb_reg_set(XGIPART1, 0x06, (base & 0xFF));
xgifb_reg_set(XGIPART1, 0x05, ((base >> 8) & 0xFF));
xgifb_reg_set(XGIPART1, 0x04, ((base >> 16) & 0xFF));
@@ -1387,7 +1383,7 @@ static int XGIfb_get_fix(struct fb_fix_screeninfo *fix, int con,
fix->line_length = xgifb_info->video_linelength;
fix->mmio_start = xgifb_info->mmio_base;
fix->mmio_len = xgifb_info->mmio_size;
- fix->accel = FB_ACCEL_XGI_XABRE;
+ fix->accel = FB_ACCEL_SIS_XABRE;
DEBUGPRN("end of get_fix");
return 0;
@@ -1441,7 +1437,7 @@ static int XGIfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
hrate = (drate * 1000) / htotal;
xgifb_info->refresh_rate =
(unsigned int) (hrate * 2 / vtotal);
- printk(KERN_DEBUG
+ pr_debug(
"%s: pixclock = %d ,htotal=%d, vtotal=%d\n"
"%s: drate=%d, hrate=%d, refresh_rate=%d\n",
__func__, var->pixclock, htotal, vtotal,
@@ -1479,7 +1475,7 @@ static int XGIfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
if (!found_mode) {
- printk(KERN_ERR "XGIfb: %dx%dx%d is no valid mode\n",
+ pr_err("%dx%dx%d is no valid mode\n",
var->xres, var->yres, var->bits_per_pixel);
search_idx = 0;
while (XGIbios_mode[search_idx].mode_no != 0) {
@@ -1498,11 +1494,11 @@ static int XGIfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
if (found_mode) {
var->xres = XGIbios_mode[search_idx].xres;
var->yres = XGIbios_mode[search_idx].yres;
- printk(KERN_DEBUG "XGIfb: Adapted to mode %dx%dx%d\n",
+ pr_debug("Adapted to mode %dx%dx%d\n",
var->xres, var->yres, var->bits_per_pixel);
} else {
- printk(KERN_ERR "XGIfb: Failed to find similar mode to %dx%dx%d\n",
+ pr_err("Failed to find similar mode to %dx%dx%d\n",
var->xres, var->yres, var->bits_per_pixel);
return -EINVAL;
}
@@ -1634,9 +1630,9 @@ static int XGIfb_get_dram_size(struct xgifb_video_info *xgifb_info)
/* xorg driver sets 32MB * 1 channel */
if (xgifb_info->chip == XG27)
- xgifb_reg_set(XGISR, IND_XGI_DRAM_SIZE, 0x51);
+ xgifb_reg_set(XGISR, IND_SIS_DRAM_SIZE, 0x51);
- reg = xgifb_reg_get(XGISR, IND_XGI_DRAM_SIZE);
+ reg = xgifb_reg_get(XGISR, IND_SIS_DRAM_SIZE);
switch ((reg & XGI_DRAM_SIZE_MASK) >> 4) {
case XGI_DRAM_SIZE_1MB:
xgifb_info->video_size = 0x100000;
@@ -1711,7 +1707,7 @@ static int XGIfb_get_dram_size(struct xgifb_video_info *xgifb_info)
/* xgifb_info->video_size = 0x200000; */ /* 1024x768x16 */
/* xgifb_info->video_size = 0x1000000; */ /* benchmark */
- printk("XGIfb: SR14=%x DramSzie %x ChannelNum %x\n",
+ pr_info("SR14=%x DramSzie %x ChannelNum %x\n",
reg,
xgifb_info->video_size, ChannelNum);
return 0;
@@ -1736,7 +1732,7 @@ static void XGIfb_detect_VB(struct xgifb_video_info *xgifb_info)
cr32 = xgifb_reg_get(XGICR, IND_XGI_SCRATCH_REG_CR32);
- if ((cr32 & XGI_CRT1) && !XGIfb_crt1off)
+ if ((cr32 & SIS_CRT1) && !XGIfb_crt1off)
XGIfb_crt1off = 0;
else {
if (cr32 & 0x5F)
@@ -1746,11 +1742,11 @@ static void XGIfb_detect_VB(struct xgifb_video_info *xgifb_info)
}
if (!xgifb_info->display2_force) {
- if (cr32 & XGI_VB_TV)
+ if (cr32 & SIS_VB_TV)
xgifb_info->display2 = XGIFB_DISP_TV;
- else if (cr32 & XGI_VB_LCD)
+ else if (cr32 & SIS_VB_LCD)
xgifb_info->display2 = XGIFB_DISP_LCD;
- else if (cr32 & XGI_VB_CRT2)
+ else if (cr32 & SIS_VB_CRT2)
xgifb_info->display2 = XGIFB_DISP_CRT;
else
xgifb_info->display2 = XGIFB_DISP_NONE;
@@ -1759,14 +1755,14 @@ static void XGIfb_detect_VB(struct xgifb_video_info *xgifb_info)
if (XGIfb_tvplug != -1)
/* PR/TW: Override with option */
xgifb_info->TV_plug = XGIfb_tvplug;
- else if (cr32 & XGI_VB_HIVISION) {
+ else if (cr32 & SIS_VB_HIVISION) {
xgifb_info->TV_type = TVMODE_HIVISION;
xgifb_info->TV_plug = TVPLUG_SVIDEO;
- } else if (cr32 & XGI_VB_SVIDEO)
+ } else if (cr32 & SIS_VB_SVIDEO)
xgifb_info->TV_plug = TVPLUG_SVIDEO;
- else if (cr32 & XGI_VB_COMPOSITE)
+ else if (cr32 & SIS_VB_COMPOSITE)
xgifb_info->TV_plug = TVPLUG_COMPOSITE;
- else if (cr32 & XGI_VB_SCART)
+ else if (cr32 & SIS_VB_SCART)
xgifb_info->TV_plug = TVPLUG_SCART;
if (xgifb_info->TV_type == 0) {
@@ -1811,11 +1807,11 @@ static void XGIfb_get_VB_type(struct xgifb_video_info *xgifb_info)
if (!XGIfb_has_VB(xgifb_info)) {
reg = xgifb_reg_get(XGICR, IND_XGI_SCRATCH_REG_CR37);
- switch ((reg & XGI_EXTERNAL_CHIP_MASK) >> 1) {
- case XGI310_EXTERNAL_CHIP_LVDS:
+ switch ((reg & SIS_EXTERNAL_CHIP_MASK) >> 1) {
+ case SIS_EXTERNAL_CHIP_LVDS:
xgifb_info->hasVB = HASVB_LVDS;
break;
- case XGI310_EXTERNAL_CHIP_LVDS_CHRONTEL:
+ case SIS_EXTERNAL_CHIP_LVDS_CHRONTEL:
xgifb_info->hasVB = HASVB_LVDS_CHRONTEL;
break;
default:
@@ -1917,7 +1913,7 @@ static int __devinit xgifb_probe(struct pci_dev *pdev,
xgifb_info->vga_base = pci_resource_start(pdev, 2) + 0x30;
hw_info->pjIOAddress = (unsigned char *)xgifb_info->vga_base;
/* XGI_Pr.RelIO = ioremap(pci_resource_start(pdev, 2), 128) + 0x30; */
- printk("XGIfb: Relocate IO address: %lx [%08lx]\n",
+ pr_info("Relocate IO address: %lx [%08lx]\n",
(unsigned long)pci_resource_start(pdev, 2),
xgifb_info->dev_info.RelIO);
@@ -1933,17 +1929,17 @@ static int __devinit xgifb_probe(struct pci_dev *pdev,
XGIRegInit(&xgifb_info->dev_info, (unsigned long)hw_info->pjIOAddress);
- xgifb_reg_set(XGISR, IND_XGI_PASSWORD, XGI_PASSWORD);
- reg1 = xgifb_reg_get(XGISR, IND_XGI_PASSWORD);
+ xgifb_reg_set(XGISR, IND_SIS_PASSWORD, SIS_PASSWORD);
+ reg1 = xgifb_reg_get(XGISR, IND_SIS_PASSWORD);
if (reg1 != 0xa1) { /*I/O error */
- printk("\nXGIfb: I/O error!!!");
+ pr_err("I/O error!!!");
ret = -EIO;
goto error;
}
switch (xgifb_info->chip_id) {
- case PCI_DEVICE_ID_XG_20:
+ case PCI_DEVICE_ID_XGI_20:
xgifb_reg_or(XGICR, Index_CR_GPIO_Reg3, GPIOG_EN);
CR48 = xgifb_reg_get(XGICR, Index_CR_GPIO_Reg1);
if (CR48&GPIOG_READ)
@@ -1951,16 +1947,16 @@ static int __devinit xgifb_probe(struct pci_dev *pdev,
else
xgifb_info->chip = XG20;
break;
- case PCI_DEVICE_ID_XG_40:
+ case PCI_DEVICE_ID_XGI_40:
xgifb_info->chip = XG40;
break;
- case PCI_DEVICE_ID_XG_41:
+ case PCI_DEVICE_ID_XGI_41:
xgifb_info->chip = XG41;
break;
- case PCI_DEVICE_ID_XG_42:
+ case PCI_DEVICE_ID_XGI_42:
xgifb_info->chip = XG42;
break;
- case PCI_DEVICE_ID_XG_27:
+ case PCI_DEVICE_ID_XGI_27:
xgifb_info->chip = XG27;
break;
default:
@@ -1968,31 +1964,31 @@ static int __devinit xgifb_probe(struct pci_dev *pdev,
goto error;
}
- printk("XGIfb:chipid = %x\n", xgifb_info->chip);
+ pr_info("chipid = %x\n", xgifb_info->chip);
hw_info->jChipType = xgifb_info->chip;
if (XGIfb_get_dram_size(xgifb_info)) {
- printk(KERN_INFO "XGIfb: Fatal error: Unable to determine RAM size.\n");
+ pr_err("Fatal error: Unable to determine RAM size.\n");
ret = -ENODEV;
goto error;
}
/* Enable PCI_LINEAR_ADDRESSING and MMIO_ENABLE */
xgifb_reg_or(XGISR,
- IND_XGI_PCI_ADDRESS_SET,
- (XGI_PCI_ADDR_ENABLE | XGI_MEM_MAP_IO_ENABLE));
+ IND_SIS_PCI_ADDRESS_SET,
+ (SIS_PCI_ADDR_ENABLE | SIS_MEM_MAP_IO_ENABLE));
/* Enable 2D accelerator engine */
- xgifb_reg_or(XGISR, IND_XGI_MODULE_ENABLE, XGI_ENABLE_2D);
+ xgifb_reg_or(XGISR, IND_SIS_MODULE_ENABLE, SIS_ENABLE_2D);
hw_info->ulVideoMemorySize = xgifb_info->video_size;
if (!request_mem_region(xgifb_info->video_base,
xgifb_info->video_size,
"XGIfb FB")) {
- printk("unable request memory size %x",
+ pr_err("unable request memory size %x\n",
xgifb_info->video_size);
- printk(KERN_ERR "XGIfb: Fatal error: Unable to reserve frame buffer memory\n");
- printk(KERN_ERR "XGIfb: Is there another framebuffer driver active?\n");
+ pr_err("Fatal error: Unable to reserve frame buffer memory\n");
+ pr_err("Is there another framebuffer driver active?\n");
ret = -ENODEV;
goto error;
}
@@ -2000,7 +1996,7 @@ static int __devinit xgifb_probe(struct pci_dev *pdev,
if (!request_mem_region(xgifb_info->mmio_base,
xgifb_info->mmio_size,
"XGIfb MMIO")) {
- printk(KERN_ERR "XGIfb: Fatal error: Unable to reserve MMIO region\n");
+ pr_err("Fatal error: Unable to reserve MMIO region\n");
ret = -ENODEV;
goto error_0;
}
@@ -2010,20 +2006,18 @@ static int __devinit xgifb_probe(struct pci_dev *pdev,
xgifb_info->mmio_vbase = ioremap(xgifb_info->mmio_base,
xgifb_info->mmio_size);
- printk(KERN_INFO "XGIfb: Framebuffer at 0x%lx, mapped to 0x%p, size %dk\n",
+ pr_info("Framebuffer at 0x%lx, mapped to 0x%p, size %dk\n",
xgifb_info->video_base,
xgifb_info->video_vbase,
xgifb_info->video_size / 1024);
- printk(KERN_INFO "XGIfb: MMIO at 0x%lx, mapped to 0x%p, size %ldk\n",
+ pr_info("MMIO at 0x%lx, mapped to 0x%p, size %ldk\n",
xgifb_info->mmio_base, xgifb_info->mmio_vbase,
xgifb_info->mmio_size / 1024);
- printk("XGIfb: XGIInitNew() ...");
+
pci_set_drvdata(pdev, xgifb_info);
- if (XGIInitNew(pdev))
- printk("OK\n");
- else
- printk("Fail\n");
+ if (!XGIInitNew(pdev))
+ pr_err("XGIInitNew() failed!\n");
xgifb_info->mtrr = (unsigned int) 0;
@@ -2033,13 +2027,12 @@ static int __devinit xgifb_probe(struct pci_dev *pdev,
xgifb_info->hasVB = HASVB_NONE;
} else if (xgifb_info->chip == XG21) {
CR38 = xgifb_reg_get(XGICR, 0x38);
- if ((CR38&0xE0) == 0xC0) {
+ if ((CR38&0xE0) == 0xC0)
xgifb_info->display2 = XGIFB_DISP_LCD;
- } else if ((CR38&0xE0) == 0x60) {
+ else if ((CR38&0xE0) == 0x60)
xgifb_info->hasVB = HASVB_CHRONTEL;
- } else {
+ else
xgifb_info->hasVB = HASVB_NONE;
- }
} else {
XGIfb_get_VB_type(xgifb_info);
}
@@ -2053,10 +2046,10 @@ static int __devinit xgifb_probe(struct pci_dev *pdev,
reg = xgifb_reg_get(XGIPART4, 0x01);
if (reg >= 0xE0) {
hw_info->ujVBChipID = VB_CHIP_302LV;
- printk(KERN_INFO "XGIfb: XGI302LV bridge detected (revision 0x%02x)\n", reg);
+ pr_info("XGI302LV bridge detected (revision 0x%02x)\n", reg);
} else if (reg >= 0xD0) {
hw_info->ujVBChipID = VB_CHIP_301LV;
- printk(KERN_INFO "XGIfb: XGI301LV bridge detected (revision 0x%02x)\n", reg);
+ pr_info("XGI301LV bridge detected (revision 0x%02x)\n", reg);
}
/* else if (reg >= 0xB0) {
hw_info->ujVBChipID = VB_CHIP_301B;
@@ -2065,17 +2058,17 @@ static int __devinit xgifb_probe(struct pci_dev *pdev,
} */
else {
hw_info->ujVBChipID = VB_CHIP_301;
- printk("XGIfb: XGI301 bridge detected\n");
+ pr_info("XGI301 bridge detected\n");
}
break;
case HASVB_302:
reg = xgifb_reg_get(XGIPART4, 0x01);
if (reg >= 0xE0) {
hw_info->ujVBChipID = VB_CHIP_302LV;
- printk(KERN_INFO "XGIfb: XGI302LV bridge detected (revision 0x%02x)\n", reg);
+ pr_info("XGI302LV bridge detected (revision 0x%02x)\n", reg);
} else if (reg >= 0xD0) {
hw_info->ujVBChipID = VB_CHIP_301LV;
- printk(KERN_INFO "XGIfb: XGI302LV bridge detected (revision 0x%02x)\n", reg);
+ pr_info("XGI302LV bridge detected (revision 0x%02x)\n", reg);
} else if (reg >= 0xB0) {
reg1 = xgifb_reg_get(XGIPART4, 0x23);
@@ -2083,27 +2076,27 @@ static int __devinit xgifb_probe(struct pci_dev *pdev,
} else {
hw_info->ujVBChipID = VB_CHIP_302;
- printk(KERN_INFO "XGIfb: XGI302 bridge detected\n");
+ pr_info("XGI302 bridge detected\n");
}
break;
case HASVB_LVDS:
hw_info->ulExternalChip = 0x1;
- printk(KERN_INFO "XGIfb: LVDS transmitter detected\n");
+ pr_info("LVDS transmitter detected\n");
break;
case HASVB_TRUMPION:
hw_info->ulExternalChip = 0x2;
- printk(KERN_INFO "XGIfb: Trumpion Zurac LVDS scaler detected\n");
+ pr_info("Trumpion Zurac LVDS scaler detected\n");
break;
case HASVB_CHRONTEL:
hw_info->ulExternalChip = 0x4;
- printk(KERN_INFO "XGIfb: Chrontel TV encoder detected\n");
+ pr_info("Chrontel TV encoder detected\n");
break;
case HASVB_LVDS_CHRONTEL:
hw_info->ulExternalChip = 0x5;
- printk(KERN_INFO "XGIfb: LVDS transmitter and Chrontel TV encoder detected\n");
+ pr_info("LVDS transmitter and Chrontel TV encoder detected\n");
break;
default:
- printk(KERN_INFO "XGIfb: No or unknown bridge type detected\n");
+ pr_info("No or unknown bridge type detected\n");
break;
}
@@ -2117,10 +2110,6 @@ static int __devinit xgifb_probe(struct pci_dev *pdev,
reg = xgifb_reg_get(XGICR, IND_XGI_LCD_PANEL);
reg &= 0x0f;
hw_info->ulCRT2LCDType = XGI310paneltype[reg];
-
- } else {
- /* TW: FSTN/DSTN */
- hw_info->ulCRT2LCDType = LCD_320x480;
}
}
@@ -2147,9 +2136,6 @@ static int __devinit xgifb_probe(struct pci_dev *pdev,
if (tmp & 0x20) {
tmp = xgifb_reg_get(
XGIPART1, 0x13);
- if (tmp & 0x04) {
- /* XGI_Pr.XGI_UseLCDA = 1; */
- }
}
}
}
@@ -2222,12 +2208,12 @@ static int __devinit xgifb_probe(struct pci_dev *pdev,
break;
default:
xgifb_info->video_cmap_len = 16;
- printk(KERN_INFO "XGIfb: Unsupported depth %d",
+ pr_info("Unsupported depth %d\n",
xgifb_info->video_bpp);
break;
}
- printk(KERN_INFO "XGIfb: Default mode is %dx%dx%d (%dHz)\n",
+ pr_info("Default mode is %dx%dx%d (%dHz)\n",
xgifb_info->video_width,
xgifb_info->video_height,
xgifb_info->video_bpp,
@@ -2404,7 +2390,7 @@ MODULE_PARM_DESC(filter,
static void __exit xgifb_remove_module(void)
{
pci_unregister_driver(&xgifb_driver);
- printk(KERN_DEBUG "xgifb: Module unloaded\n");
+ pr_debug("Module unloaded\n");
}
module_exit(xgifb_remove_module);
diff --git a/drivers/staging/xgifb/XGIfb.h b/drivers/staging/xgifb/XGIfb.h
index 2c866bb65a00..37bb730de047 100644
--- a/drivers/staging/xgifb/XGIfb.h
+++ b/drivers/staging/xgifb/XGIfb.h
@@ -3,8 +3,8 @@
#include <linux/ioctl.h>
#include <linux/types.h>
-#include "vb_struct.h"
#include "vgatypes.h"
+#include "vb_struct.h"
enum xgifb_display_type {
XGIFB_DISP_NONE = 0,
diff --git a/drivers/staging/xgifb/vb_def.h b/drivers/staging/xgifb/vb_def.h
index 5beeef99bb14..c7317931f671 100644
--- a/drivers/staging/xgifb/vb_def.h
+++ b/drivers/staging/xgifb/vb_def.h
@@ -1,153 +1,48 @@
/* $XFree86: xc/programs/Xserver/hw/xfree86/drivers/xgi/initdef.h
* ,v 1.4 2000/12/02 01:16:17 dawes Exp $*/
-#ifndef _INITDEF_
-#define _INITDEF_
+#ifndef _VB_DEF_
+#define _VB_DEF_
+#include "../../video/sis/initdef.h"
#define VB_XGI301C 0x0020 /* for 301C */
-/*end 301b*/
-
-#define VB_YPbPr525p 0x01
-#define VB_YPbPr750p 0x02
#define VB_YPbPr1080i 0x03
#define LVDSCRT1Len 15
-
-#define SupportCHTV 0x0800
#define SupportCRT2in301C 0x0100 /* for 301C */
#define SetCHTVOverScan 0x8000
-#define PanelRGB18Bit 0x0100
-#define PanelRGB24Bit 0x0000
-#define Panel320x480 0x07 /*fstn*/
+#define Panel_320x480 0x07 /*fstn*/
/* [ycchen] 02/12/03 Modify for Multi-Sync. LCD Support */
#define PanelResInfo 0x1F /* CR36 Panel Type/LCDResInfo */
-#define Panel800x600 0x01
-#define Panel1024x768 0x02
-#define Panel1024x768x75 0x22
-#define Panel1280x1024 0x03
-#define Panel1280x1024x75 0x23
-#define Panel640x480 0x04
-#define Panel1280x960 0x07
-#define Panel1400x1050 0x09
-#define Panel1600x1200 0x0B
+#define Panel_1024x768x75 0x22
+#define Panel_1280x1024x75 0x23
#define PanelRef60Hz 0x00
#define PanelRef75Hz 0x20
-#define CRT2DisplayFlag 0x2000
-
#define YPbPr525iVCLK 0x03B
#define YPbPr525iVCLK_2 0x03A
#define XGI_CRT2_PORT_00 (0x00 - 0x030)
-#define XGI_CRT2_PORT_04 (0x04 - 0x030)
-#define XGI_CRT2_PORT_10 (0x10 - 0x30)
-#define XGI_CRT2_PORT_12 (0x12 - 0x30)
-#define XGI_CRT2_PORT_14 (0x14 - 0x30)
-
-#define _PanelType00 0x00
-#define _PanelType01 0x08
-#define _PanelType02 0x10
-#define _PanelType03 0x18
-#define _PanelType04 0x20
-#define _PanelType05 0x28
-#define _PanelType06 0x30
-#define _PanelType07 0x38
-#define _PanelType08 0x40
-#define _PanelType09 0x48
-#define _PanelType0A 0x50
-#define _PanelType0B 0x58
-#define _PanelType0C 0x60
-#define _PanelType0D 0x68
-#define _PanelType0E 0x70
-#define _PanelType0F 0x78
/* =============================================================
for 310
============================================================== */
-/* add LCDDataList for GetLCDPtr */
-#define LCDDataList (VBIOSTablePointerStart+0x22)
-/* */
-/* Modify from 310.inc */
-/* */
-/* */
-
#define ModeSoftSetting 0x04
-#define BoardTVType 0x02
-
-#define SoftDRAMType 0x80 /* DRAMSetting */
-
/* ---------------- SetMode Stack */
#define CRT1Len 15
#define VCLKLen 4
-#define VGA_XGI340 0x0001 /* 340 series */
-
-#define VB_XGI301 0x0001 /* VB Type Info */
-#define VB_XGI301B 0x0002 /* 301 series */
-#define VB_XGI302B 0x0004
-#define VB_NoLCD 0x8000
-#define VB_XGI301LV 0x0008
-#define VB_XGI302LV 0x0010
-#define VB_LVDS_NS 0x0001 /* 3rd party chip */
-
-#define ModeInfoFlag 0x0007
-#define ModeText 0x0000
-#define ModeEGA 0x0002 /* 16 colors mode */
-#define ModeVGA 0x0003 /* 256 colors mode */
-
-#define DACInfoFlag 0x0018
-
-#define MemoryInfoFlag 0x01e0
-#define MemorySizeShift 5
-
-#define Charx8Dot 0x0200
-#define LineCompareOff 0x0400
-#define CRT2Mode 0x0800
-#define HalfDCLK 0x1000
-#define NoSupportSimuTV 0x2000
-#define DoubleScanMode 0x8000
-
-/* -------------- Ext_InfoFlag */
-#define Support16Bpp 0x0005
-#define Support32Bpp 0x0007
#define SupportAllCRT2 0x0078
-#define SupportTV 0x0008
-#define SupportHiVisionTV 0x0010
-#define SupportLCD 0x0020
-#define SupportRAMDAC2 0x0040
#define NoSupportTV 0x0070
#define NoSupportHiVisionTV 0x0060
#define NoSupportLCD 0x0058
-#define SupportTV1024 0x0800 /* 301btest */
-#define SupportYPbPr 0x1000 /* 301lv */
-#define InterlaceMode 0x0080
-#define SyncPP 0x0000
-#define SyncPN 0x4000
-#define SyncNP 0x8000
-#define SyncNN 0xC000
/* -------------- SetMode Stack/Scratch */
-#define SetSimuScanMode 0x0001 /* VBInfo/CR30 & CR31 */
-#define SwitchToCRT2 0x0002
-#define SetCRT2ToTV 0x089C
-#define SetCRT2ToAVIDEO 0x0004
-#define SetCRT2ToSVIDEO 0x0008
-#define SetCRT2ToSCART 0x0010
-#define SetCRT2ToLCD 0x0020
-#define SetCRT2ToRAMDAC 0x0040
-#define SetCRT2ToHiVisionTV 0x0080
-#define SetCRT2ToLCDA 0x0100
-#define SetInSlaveMode 0x0200
-#define SetNotSimuMode 0x0400
-#define SetCRT2ToYPbPr 0x0800
-#define LoadDACFlag 0x1000
-#define DisableCRT2Display 0x2000
-#define DriverMode 0x4000
+#define XGI_SetCRT2ToLCDA 0x0100
#define SetCRT2ToDualEdge 0x8000
-#define ProgrammingCRT2 0x0001 /* Set Flag */
#define ReserveTVOption 0x0008
#define GatingCRT 0x0800
#define DisableChB 0x1000
@@ -155,23 +50,14 @@
#define DisableChA 0x4000
#define EnableChA 0x8000
-#define SetNTSCTV 0x0000 /* TV Info */
-#define SetPALTV 0x0001
-#define SetNTSCJ 0x0002
-#define SetPALMTV 0x0004
-#define SetPALNTV 0x0008
-#define SetYPbPrMode525i 0x0020
-#define SetYPbPrMode525p 0x0040
-#define SetYPbPrMode750p 0x0080
-#define SetYPbPrMode1080i 0x0100
#define SetTVLowResolution 0x0400
#define TVSimuMode 0x0800
#define RPLLDIV2XO 0x1000
#define NTSC1024x768 0x2000
#define SetTVLockMode 0x4000
-#define LCDVESATiming 0x0001 /* LCD Info/CR37 */
-#define EnableLVDSDDA 0x0002
+#define XGI_LCDVESATiming 0x0001 /* LCD Info/CR37 */
+#define XGI_EnableLVDSDDA 0x0002
#define EnableScalingLCD 0x0008
#define SetPWDEnable 0x0004
#define SetLCDtoNonExpanding 0x0010
@@ -184,7 +70,7 @@
#define EnableLCD24bpp 0x0004 /* default */
#define DisableLCD24bpp 0x0000
#define LCDPolarity 0x00c0 /* default: SyncNN */
-#define LCDDualLink 0x0100
+#define XGI_LCDDualLink 0x0100
#define EnableSpectrum 0x0200
#define PWDEnable 0x0400
#define EnableVBCLKDRVLOW 0x4000
@@ -206,31 +92,21 @@
#define TVSense 0xc7
-#define TVOverScan 0x10 /* CR35 */
-
#define YPbPrMode 0xe0
#define YPbPrMode525i 0x00
#define YPbPrMode525p 0x20
#define YPbPrMode750p 0x40
#define YPbPrMode1080i 0x60
-
-#define LCDRGB18Bit 0x01 /* CR37 */
-#define LCDNonExpanding 0x10
-#define LCDSync 0x20
-#define LCDSyncBit 0xe0 /* H/V polarity & sync ID */
-
#define ScalingLCD 0x08
-#define EnableDualEdge 0x01 /* CR38 */
-#define SetToLCDA 0x02
#define SetYPbPr 0x04
/* ---------------------- VUMA Information */
#define DisplayDeviceFromCMOS 0x10
/* ---------------------- HK Evnet Definition */
-#define ModeSwitchStatus 0xf0
+#define XGI_ModeSwitchStatus 0xf0
#define ActiveCRT1 0x10
#define ActiveLCD 0x0020
#define ActiveTV 0x40
@@ -246,28 +122,13 @@
/* translated from asm code 301def.h */
/* */
/* --------------------------------------------------------- */
-#define LCDDataLen 8
-#define TVDataLen 12
#define LVDSCRT1Len_H 8
#define LVDSCRT1Len_V 7
-#define LVDSDataLen 6
-#define LVDSDesDataLen 6
#define LCDDesDataLen 6
#define LVDSDesDataLen2 8
#define LCDDesDataLen2 8
-#define CHTVRegLen 16
-#define StHiTVHT 892
-#define StHiTVVT 1126
-#define StHiTextTVHT 1000
-#define StHiTextTVVT 1126
-#define ExtHiTVHT 2100
-#define ExtHiTVVT 1125
-#define NTSCHT 1716
-#define NTSCVT 525
#define NTSC1024x768HT 1908
-#define PALHT 1728
-#define PALVT 625
#define YPbPrTV525iHT 1716 /* YPbPr */
#define YPbPrTV525iVT 525
@@ -276,22 +137,16 @@
#define YPbPrTV750pHT 1650
#define YPbPrTV750pVT 750
-#define CRT2Delay1 0x04 /* XGI301 */
-#define CRT2Delay2 0x0A /* 301B,302 */
-
-
#define VCLK25_175 0x00
#define VCLK28_322 0x01
#define VCLK31_5 0x02
#define VCLK36 0x03
-#define VCLK40 0x04
#define VCLK43_163 0x05
#define VCLK44_9 0x06
#define VCLK49_5 0x07
#define VCLK50 0x08
#define VCLK52_406 0x09
#define VCLK56_25 0x0A
-#define VCLK65 0x0B
#define VCLK68_179 0x0D
#define VCLK72_852 0x0E
#define VCLK75 0x0F
@@ -300,7 +155,6 @@
#define VCLK83_95 0x13
#define VCLK86_6 0x15
#define VCLK94_5 0x16
-#define VCLK108_2 0x19
#define VCLK113_309 0x1B
#define VCLK116_406 0x1C
#define VCLK135_5 0x1E
@@ -327,16 +181,10 @@
#define VCLK125_999 0x51
#define VCLK148_5 0x52
#define VCLK217_325 0x55
-#define YPbPr750pVCLK 0x57
+#define XGI_YPbPr750pVCLK 0x57
-#define TVVCLKDIV2 0x3A
-#define TVVCLK 0x3B
-#define HiTVVCLKDIV2 0x3C
-#define HiTVVCLK 0x3D
-#define HiTVSimuVCLK 0x3E
-#define HiTVTextVCLK 0x3F
#define VCLK39_77 0x40
-#define YPbPr525pVCLK 0x3A
+#define YPbPr525pVCLK 0x3A
#define NTSC1024VCLK 0x41
#define VCLK35_2 0x49 /* ; 800x480 */
#define VCLK122_61 0x4A
diff --git a/drivers/staging/xgifb/vb_init.c b/drivers/staging/xgifb/vb_init.c
index 4ccd988ffd7c..94d5c35e22fb 100644
--- a/drivers/staging/xgifb/vb_init.c
+++ b/drivers/staging/xgifb/vb_init.c
@@ -3,8 +3,8 @@
#include <linux/pci.h>
#include <linux/vmalloc.h>
-#include "vgatypes.h"
#include "XGIfb.h"
+#include "vgatypes.h"
#include "vb_def.h"
#include "vb_struct.h"
@@ -1268,7 +1268,7 @@ static void XGINew_SetModeScratch(struct xgi_hw_device_info *HwDeviceExtension,
if (pVBInfo->IF_DEF_HiVision == 1) {
if ((temp >> 8) & ActiveHiTV)
- tempcl |= SetCRT2ToHiVisionTV;
+ tempcl |= SetCRT2ToHiVision;
}
if (pVBInfo->IF_DEF_YPbPr == 1) {
@@ -1287,7 +1287,7 @@ static void XGINew_SetModeScratch(struct xgi_hw_device_info *HwDeviceExtension,
if (pVBInfo->IF_DEF_HiVision == 1) {
if ((temp >> 8) & ActiveHiTV)
- tempcl |= SetCRT2ToHiVisionTV;
+ tempcl |= SetCRT2ToHiVision;
}
if (pVBInfo->IF_DEF_YPbPr == 1) {
@@ -1299,9 +1299,9 @@ static void XGINew_SetModeScratch(struct xgi_hw_device_info *HwDeviceExtension,
tempcl |= SetSimuScanMode;
if ((!(temp & ActiveCRT1)) && ((temp & ActiveLCD) || (temp & ActiveTV)
|| (temp & ActiveCRT2)))
- tempcl ^= (SetSimuScanMode | SwitchToCRT2);
+ tempcl ^= (SetSimuScanMode | SwitchCRT2);
if ((temp & ActiveLCD) && (temp & ActiveTV))
- tempcl ^= (SetSimuScanMode | SwitchToCRT2);
+ tempcl ^= (SetSimuScanMode | SwitchCRT2);
xgifb_reg_set(pVBInfo->P3d4, 0x30, tempcl);
CR31Data = xgifb_reg_get(pVBInfo->P3d4, 0x31);
@@ -1516,11 +1516,11 @@ unsigned char XGIInitNew(struct pci_dev *pdev)
pVBInfo->P3c9 = pVBInfo->BaseAddr + 0x19;
pVBInfo->P3da = pVBInfo->BaseAddr + 0x2A;
pVBInfo->Part0Port = pVBInfo->BaseAddr + XGI_CRT2_PORT_00;
- pVBInfo->Part1Port = pVBInfo->BaseAddr + XGI_CRT2_PORT_04;
- pVBInfo->Part2Port = pVBInfo->BaseAddr + XGI_CRT2_PORT_10;
- pVBInfo->Part3Port = pVBInfo->BaseAddr + XGI_CRT2_PORT_12;
- pVBInfo->Part4Port = pVBInfo->BaseAddr + XGI_CRT2_PORT_14;
- pVBInfo->Part5Port = pVBInfo->BaseAddr + XGI_CRT2_PORT_14 + 2;
+ pVBInfo->Part1Port = pVBInfo->BaseAddr + SIS_CRT2_PORT_04;
+ pVBInfo->Part2Port = pVBInfo->BaseAddr + SIS_CRT2_PORT_10;
+ pVBInfo->Part3Port = pVBInfo->BaseAddr + SIS_CRT2_PORT_12;
+ pVBInfo->Part4Port = pVBInfo->BaseAddr + SIS_CRT2_PORT_14;
+ pVBInfo->Part5Port = pVBInfo->BaseAddr + SIS_CRT2_PORT_14 + 2;
printk("5");
if (HwDeviceExtension->jChipType < XG20) /* kuku 2004/06/25 */
diff --git a/drivers/staging/xgifb/vb_setmode.c b/drivers/staging/xgifb/vb_setmode.c
index 67a316c3c108..2919924213c4 100644
--- a/drivers/staging/xgifb/vb_setmode.c
+++ b/drivers/staging/xgifb/vb_setmode.c
@@ -61,20 +61,20 @@ static const unsigned short XGINew_VGA_DAC[] = {
void InitTo330Pointer(unsigned char ChipType, struct vb_device_info *pVBInfo)
{
pVBInfo->SModeIDTable = (struct XGI_StStruct *) XGI330_SModeIDTable;
- pVBInfo->StandTable = (struct XGI_StandTableStruct *) XGI330_StandTable;
+ pVBInfo->StandTable = (struct SiS_StandTable_S *) XGI330_StandTable;
pVBInfo->EModeIDTable = (struct XGI_ExtStruct *) XGI330_EModeIDTable;
pVBInfo->RefIndex = (struct XGI_Ext2Struct *) XGI330_RefIndex;
pVBInfo->XGINEWUB_CRT1Table
= (struct XGI_CRT1TableStruct *) XGI_CRT1Table;
- pVBInfo->MCLKData = (struct XGI_MCLKDataStruct *) XGI340New_MCLKData;
+ pVBInfo->MCLKData = (struct SiS_MCLKData *) XGI340New_MCLKData;
pVBInfo->ECLKData = (struct XGI_ECLKDataStruct *) XGI340_ECLKData;
- pVBInfo->VCLKData = (struct XGI_VCLKDataStruct *) XGI_VCLKData;
- pVBInfo->VBVCLKData = (struct XGI_VBVCLKDataStruct *) XGI_VBVCLKData;
+ pVBInfo->VCLKData = (struct SiS_VCLKData *) XGI_VCLKData;
+ pVBInfo->VBVCLKData = (struct SiS_VBVCLKData *) XGI_VBVCLKData;
pVBInfo->ScreenOffset = XGI330_ScreenOffset;
- pVBInfo->StResInfo = (struct XGI_StResInfoStruct *) XGI330_StResInfo;
+ pVBInfo->StResInfo = (struct SiS_StResInfo_S *) XGI330_StResInfo;
pVBInfo->ModeResInfo
- = (struct XGI_ModeResInfoStruct *) XGI330_ModeResInfo;
+ = (struct SiS_ModeResInfo_S *) XGI330_ModeResInfo;
pVBInfo->pOutputSelect = &XGI330_OutputSelect;
pVBInfo->pSoftSetting = &XGI330_SoftSetting;
@@ -138,7 +138,7 @@ void InitTo330Pointer(unsigned char ChipType, struct vb_device_info *pVBInfo)
pVBInfo->UpdateCRT1 = (struct XGI_XG21CRT1Struct *) XGI_UpdateCRT1Table;
/* 310 customization related */
- if ((pVBInfo->VBType & VB_XGI301LV) || (pVBInfo->VBType & VB_XGI302LV))
+ if ((pVBInfo->VBType & VB_SIS301LV) || (pVBInfo->VBType & VB_SIS302LV))
pVBInfo->LCDCapList = XGI_LCDDLCapList;
else
pVBInfo->LCDCapList = XGI_LCDCapList;
@@ -153,7 +153,7 @@ void InitTo330Pointer(unsigned char ChipType, struct vb_device_info *pVBInfo)
if (ChipType == XG27) {
pVBInfo->MCLKData
- = (struct XGI_MCLKDataStruct *) XGI27New_MCLKData;
+ = (struct SiS_MCLKData *) XGI27New_MCLKData;
pVBInfo->CR40 = XGI27_cr41;
pVBInfo->pXGINew_CR97 = &XG27_CR97;
pVBInfo->pSR36 = &XG27_SR36;
@@ -208,8 +208,8 @@ static void XGI_SetSeqRegs(unsigned short ModeNo,
xgifb_reg_set(pVBInfo->P3c4, 0x00, 0x03); /* Set SR0 */
tempah = pVBInfo->StandTable[StandTableIndex].SR[0];
- i = SetCRT2ToLCDA;
- if (pVBInfo->VBInfo & SetCRT2ToLCDA) {
+ i = XGI_SetCRT2ToLCDA;
+ if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) {
tempah |= 0x01;
} else {
if (pVBInfo->VBInfo & (SetCRT2ToTV | SetCRT2ToLCD)) {
@@ -263,7 +263,7 @@ static void XGI_SetATTRegs(unsigned short ModeNo,
ARdata = pVBInfo->StandTable[StandTableIndex].ATTR[i];
if (modeflag & Charx8Dot) { /* ifndef Dot9 */
if (i == 0x13) {
- if (pVBInfo->VBInfo & SetCRT2ToLCDA) {
+ if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) {
ARdata = 0;
} else {
if (pVBInfo->VBInfo & (SetCRT2ToTV
@@ -356,11 +356,11 @@ static unsigned char XGI_AjustCRT2Rate(unsigned short ModeNo,
}
/* 301b */
- if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) {
+ if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) {
tempax |= SupportLCD;
- if (pVBInfo->LCDResInfo != Panel1280x1024) {
- if (pVBInfo->LCDResInfo != Panel1280x960) {
+ if (pVBInfo->LCDResInfo != Panel_1280x1024) {
+ if (pVBInfo->LCDResInfo != Panel_1280x960) {
if (pVBInfo->LCDInfo &
LCDNonExpanding) {
if (resinfo >= 9) {
@@ -372,10 +372,10 @@ static unsigned char XGI_AjustCRT2Rate(unsigned short ModeNo,
}
}
- if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) { /* for HiTV */
- if ((pVBInfo->VBType & VB_XGI301LV) &&
+ if (pVBInfo->VBInfo & SetCRT2ToHiVision) { /* for HiTV */
+ if ((pVBInfo->VBType & VB_SIS301LV) &&
(pVBInfo->VBExtInfo == VB_YPbPr1080i)) {
- tempax |= SupportYPbPr;
+ tempax |= SupportYPbPr750p;
if (pVBInfo->VBInfo & SetInSlaveMode) {
if (resinfo == 4)
return 0;
@@ -387,7 +387,7 @@ static unsigned char XGI_AjustCRT2Rate(unsigned short ModeNo,
return 0;
}
} else {
- tempax |= SupportHiVisionTV;
+ tempax |= SupportHiVision;
if (pVBInfo->VBInfo & SetInSlaveMode) {
if (resinfo == 4)
return 0;
@@ -406,17 +406,17 @@ static unsigned char XGI_AjustCRT2Rate(unsigned short ModeNo,
if (pVBInfo->VBInfo & (SetCRT2ToAVIDEO |
SetCRT2ToSVIDEO |
SetCRT2ToSCART |
- SetCRT2ToYPbPr |
- SetCRT2ToHiVisionTV)) {
+ SetCRT2ToYPbPr525750 |
+ SetCRT2ToHiVision)) {
tempax |= SupportTV;
- if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B
- | VB_XGI301LV | VB_XGI302LV
+ if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B
+ | VB_SIS301LV | VB_SIS302LV
| VB_XGI301C)) {
tempax |= SupportTV1024;
}
- if (!(pVBInfo->VBInfo & SetPALTV)) {
+ if (!(pVBInfo->VBInfo & TVSetPAL)) {
if (modeflag & NoSupportSimuTV) {
if (pVBInfo->VBInfo &
SetInSlaveMode) {
@@ -436,7 +436,7 @@ static unsigned char XGI_AjustCRT2Rate(unsigned short ModeNo,
if (resinfo > 0x08)
return 0; /* 1024x768 */
- if (pVBInfo->LCDResInfo < Panel1024x768) {
+ if (pVBInfo->LCDResInfo < Panel_1024x768) {
if (resinfo > 0x07)
return 0; /* 800x600 */
@@ -1230,23 +1230,23 @@ static unsigned short XGI_GetVCLK2Ptr(unsigned short ModeNo,
struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
{
- unsigned short LCDXlat1VCLK[4] = { VCLK65 + 2,
- VCLK65 + 2,
- VCLK65 + 2,
- VCLK65 + 2 };
- unsigned short LCDXlat2VCLK[4] = { VCLK108_2 + 5,
- VCLK108_2 + 5,
- VCLK108_2 + 5,
- VCLK108_2 + 5 };
+ unsigned short LCDXlat1VCLK[4] = { VCLK65_315 + 2,
+ VCLK65_315 + 2,
+ VCLK65_315 + 2,
+ VCLK65_315 + 2 };
+ unsigned short LCDXlat2VCLK[4] = { VCLK108_2_315 + 5,
+ VCLK108_2_315 + 5,
+ VCLK108_2_315 + 5,
+ VCLK108_2_315 + 5 };
unsigned short LVDSXlat1VCLK[4] = { VCLK40, VCLK40, VCLK40, VCLK40 };
- unsigned short LVDSXlat2VCLK[4] = { VCLK65 + 2,
- VCLK65 + 2,
- VCLK65 + 2,
- VCLK65 + 2 };
- unsigned short LVDSXlat3VCLK[4] = { VCLK65 + 2,
- VCLK65 + 2,
- VCLK65 + 2,
- VCLK65 + 2 };
+ unsigned short LVDSXlat2VCLK[4] = { VCLK65_315 + 2,
+ VCLK65_315 + 2,
+ VCLK65_315 + 2,
+ VCLK65_315 + 2 };
+ unsigned short LVDSXlat3VCLK[4] = { VCLK65_315 + 2,
+ VCLK65_315 + 2,
+ VCLK65_315 + 2,
+ VCLK65_315 + 2 };
unsigned short CRT2Index, VCLKIndex;
unsigned short modeflag, resinfo;
@@ -1266,36 +1266,36 @@ static unsigned short XGI_GetVCLK2Ptr(unsigned short ModeNo,
if (pVBInfo->IF_DEF_LVDS == 0) {
CRT2Index = CRT2Index >> 6; /* for LCD */
- if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) { /*301b*/
- if (pVBInfo->LCDResInfo != Panel1024x768)
+ if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) { /*301b*/
+ if (pVBInfo->LCDResInfo != Panel_1024x768)
VCLKIndex = LCDXlat2VCLK[CRT2Index];
else
VCLKIndex = LCDXlat1VCLK[CRT2Index];
- } else if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) {
+ } else if (pVBInfo->VBInfo & SetCRT2ToHiVision) {
if (pVBInfo->SetFlag & RPLLDIV2XO) {
- VCLKIndex = HiTVVCLKDIV2;
+ VCLKIndex = TVCLKBASE_315 + HiTVVCLKDIV2;
VCLKIndex += 25;
} else {
- VCLKIndex = HiTVVCLK;
+ VCLKIndex = TVCLKBASE_315 + HiTVVCLK;
VCLKIndex += 25;
}
if (pVBInfo->SetFlag & TVSimuMode) {
if (modeflag & Charx8Dot) {
- VCLKIndex = HiTVSimuVCLK;
+ VCLKIndex = TVCLKBASE_315 + HiTVSimuVCLK;
VCLKIndex += 25;
} else {
- VCLKIndex = HiTVTextVCLK;
+ VCLKIndex = TVCLKBASE_315 + HiTVTextVCLK;
VCLKIndex += 25;
}
}
/* 301lv */
- if ((pVBInfo->VBType & VB_XGI301LV) &&
+ if ((pVBInfo->VBType & VB_SIS301LV) &&
!(pVBInfo->VBExtInfo == VB_YPbPr1080i)) {
- if (pVBInfo->VBExtInfo == VB_YPbPr750p)
- VCLKIndex = YPbPr750pVCLK;
- else if (pVBInfo->VBExtInfo == VB_YPbPr525p)
+ if (pVBInfo->VBExtInfo == YPbPr750p)
+ VCLKIndex = XGI_YPbPr750pVCLK;
+ else if (pVBInfo->VBExtInfo == YPbPr525p)
VCLKIndex = YPbPr525pVCLK;
else if (pVBInfo->SetFlag & RPLLDIV2XO)
VCLKIndex = YPbPr525iVCLK_2;
@@ -1304,10 +1304,10 @@ static unsigned short XGI_GetVCLK2Ptr(unsigned short ModeNo,
}
} else if (pVBInfo->VBInfo & SetCRT2ToTV) {
if (pVBInfo->SetFlag & RPLLDIV2XO) {
- VCLKIndex = TVVCLKDIV2;
+ VCLKIndex = TVCLKBASE_315 + TVVCLKDIV2;
VCLKIndex += 25;
} else {
- VCLKIndex = TVVCLK;
+ VCLKIndex = TVCLKBASE_315 + TVVCLK;
VCLKIndex += 25;
}
} else { /* for CRT2 */
@@ -1329,11 +1329,11 @@ static unsigned short XGI_GetVCLK2Ptr(unsigned short ModeNo,
VCLKIndex = CRT2Index;
VCLKIndex = VCLKIndex >> 6;
- if ((pVBInfo->LCDResInfo == Panel800x600) ||
- (pVBInfo->LCDResInfo == Panel320x480))
+ if ((pVBInfo->LCDResInfo == Panel_800x600) ||
+ (pVBInfo->LCDResInfo == Panel_320x480))
VCLKIndex = LVDSXlat1VCLK[VCLKIndex];
- else if ((pVBInfo->LCDResInfo == Panel1024x768) ||
- (pVBInfo->LCDResInfo == Panel1024x768x75))
+ else if ((pVBInfo->LCDResInfo == Panel_1024x768) ||
+ (pVBInfo->LCDResInfo == Panel_1024x768x75))
VCLKIndex = LVDSXlat2VCLK[VCLKIndex];
else
VCLKIndex = LVDSXlat3VCLK[VCLKIndex];
@@ -1360,9 +1360,9 @@ static void XGI_SetCRT1VCLK(unsigned short ModeNo,
xgifb_reg_set(pVBInfo->P3c4, 0x2C,
pVBInfo->VCLKData[index].SR2C);
xgifb_reg_set(pVBInfo->P3c4, 0x2D, 0x01);
- } else if ((pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
- | VB_XGI302LV | VB_XGI301C)) && (pVBInfo->VBInfo
- & SetCRT2ToLCDA)) {
+ } else if ((pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
+ | VB_SIS302LV | VB_XGI301C)) && (pVBInfo->VBInfo
+ & XGI_SetCRT2ToLCDA)) {
vclkindex = XGI_GetVCLK2Ptr(ModeNo, ModeIdIndex,
RefreshRateTableIndex, HwDeviceExtension,
pVBInfo);
@@ -1801,7 +1801,7 @@ static void *XGI_GetLcdPtr(unsigned short BX, unsigned short ModeNo,
Ext_CRT2CRTC;
}
- if (pVBInfo->VBInfo & SetCRT2ToLCDA) {
+ if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) {
if (ModeNo <= 0x13)
tempal = pVBInfo->SModeIDTable[ModeIdIndex].
St_CRT2CRTC2;
@@ -2128,30 +2128,30 @@ static void *XGI_GetLcdPtr(unsigned short BX, unsigned short ModeNo,
return &XGI_CetLCDDes1024x768Data[tempal];
break;
case 3:
- if ((pVBInfo->VBType & VB_XGI301LV) ||
- (pVBInfo->VBType & VB_XGI302LV))
+ if ((pVBInfo->VBType & VB_SIS301LV) ||
+ (pVBInfo->VBType & VB_SIS302LV))
return &XGI_ExtLCDDLDes1280x1024Data[tempal];
else
return &XGI_ExtLCDDes1280x1024Data[tempal];
break;
case 4:
- if ((pVBInfo->VBType & VB_XGI301LV) ||
- (pVBInfo->VBType & VB_XGI302LV))
+ if ((pVBInfo->VBType & VB_SIS301LV) ||
+ (pVBInfo->VBType & VB_SIS302LV))
return &XGI_StLCDDLDes1280x1024Data[tempal];
else
return &XGI_StLCDDes1280x1024Data[tempal];
break;
case 5:
- if ((pVBInfo->VBType & VB_XGI301LV) ||
- (pVBInfo->VBType & VB_XGI302LV))
+ if ((pVBInfo->VBType & VB_SIS301LV) ||
+ (pVBInfo->VBType & VB_SIS302LV))
return &XGI_CetLCDDLDes1280x1024Data[tempal];
else
return &XGI_CetLCDDes1280x1024Data[tempal];
break;
case 6:
case 7:
- if ((pVBInfo->VBType & VB_XGI301LV) ||
- (pVBInfo->VBType & VB_XGI302LV))
+ if ((pVBInfo->VBType & VB_SIS301LV) ||
+ (pVBInfo->VBType & VB_SIS302LV))
return &xgifb_lcddldes_1400x1050[tempal];
else
return &xgifb_lcddes_1400x1050[tempal];
@@ -2163,15 +2163,15 @@ static void *XGI_GetLcdPtr(unsigned short BX, unsigned short ModeNo,
return &XGI_CetLCDDes1400x1050Data2[tempal];
break;
case 10:
- if ((pVBInfo->VBType & VB_XGI301LV) ||
- (pVBInfo->VBType & VB_XGI302LV))
+ if ((pVBInfo->VBType & VB_SIS301LV) ||
+ (pVBInfo->VBType & VB_SIS302LV))
return &XGI_ExtLCDDLDes1600x1200Data[tempal];
else
return &XGI_ExtLCDDes1600x1200Data[tempal];
break;
case 11:
- if ((pVBInfo->VBType & VB_XGI301LV) ||
- (pVBInfo->VBType & VB_XGI302LV))
+ if ((pVBInfo->VBType & VB_SIS301LV) ||
+ (pVBInfo->VBType & VB_SIS302LV))
return &XGI_StLCDDLDes1600x1200Data[tempal];
else
return &XGI_StLCDDes1600x1200Data[tempal];
@@ -2188,15 +2188,15 @@ static void *XGI_GetLcdPtr(unsigned short BX, unsigned short ModeNo,
break;
case 16:
case 17:
- if ((pVBInfo->VBType & VB_XGI301LV) ||
- (pVBInfo->VBType & VB_XGI302LV))
+ if ((pVBInfo->VBType & VB_SIS301LV) ||
+ (pVBInfo->VBType & VB_SIS302LV))
return &xgifb_lcddldes_1280x1024x75[tempal];
else
return &xgifb_lcddes_1280x1024x75[tempal];
break;
case 18:
- if ((pVBInfo->VBType & VB_XGI301LV) ||
- (pVBInfo->VBType & VB_XGI302LV))
+ if ((pVBInfo->VBType & VB_SIS301LV) ||
+ (pVBInfo->VBType & VB_SIS302LV))
return &XGI_CetLCDDLDes1280x1024x75Data[tempal];
else
return &XGI_CetLCDDes1280x1024x75Data[tempal];
@@ -2364,7 +2364,7 @@ static void XGI_GetLVDSData(unsigned short ModeNo, unsigned short ModeIdIndex,
tempbx = 2;
- if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) {
+ if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) {
LCDPtr = (struct XGI330_LVDSDataStruct *) XGI_GetLcdPtr(tempbx,
ModeNo, ModeIdIndex, RefreshRateTableIndex,
pVBInfo);
@@ -2374,18 +2374,18 @@ static void XGI_GetLVDSData(unsigned short ModeNo, unsigned short ModeIdIndex,
pVBInfo->VT = LCDPtr->LCDVT;
}
- if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) {
+ if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) {
if (!(pVBInfo->LCDInfo & (SetLCDtoNonExpanding
| EnableScalingLCD))) {
- if ((pVBInfo->LCDResInfo == Panel1024x768) ||
- (pVBInfo->LCDResInfo == Panel1024x768x75)) {
+ if ((pVBInfo->LCDResInfo == Panel_1024x768) ||
+ (pVBInfo->LCDResInfo == Panel_1024x768x75)) {
pVBInfo->HDE = 1024;
pVBInfo->VDE = 768;
- } else if ((pVBInfo->LCDResInfo == Panel1280x1024) ||
- (pVBInfo->LCDResInfo == Panel1280x1024x75)) {
+ } else if ((pVBInfo->LCDResInfo == Panel_1280x1024) ||
+ (pVBInfo->LCDResInfo == Panel_1280x1024x75)) {
pVBInfo->HDE = 1280;
pVBInfo->VDE = 1024;
- } else if (pVBInfo->LCDResInfo == Panel1400x1050) {
+ } else if (pVBInfo->LCDResInfo == Panel_1400x1050) {
pVBInfo->HDE = 1400;
pVBInfo->VDE = 1050;
} else {
@@ -2415,7 +2415,7 @@ static void XGI_ModCRT1Regs(unsigned short ModeNo, unsigned short ModeIdIndex,
tempbx = 0;
- if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) {
+ if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) {
LCDPtr = (struct XGI_LVDSCRT1HDataStruct *)
XGI_GetLcdPtr(tempbx, ModeNo,
ModeIdIndex,
@@ -2430,7 +2430,7 @@ static void XGI_ModCRT1Regs(unsigned short ModeNo, unsigned short ModeIdIndex,
tempbx = 1;
- if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) {
+ if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) {
LCDPtr1 = (struct XGI_LVDSCRT1VDataStruct *)
XGI_GetLcdPtr(
tempbx,
@@ -2496,7 +2496,7 @@ static unsigned short XGI_GetLCDCapPtr1(struct vb_device_info *pVBInfo)
}
if (tempbl == 0xFF) {
- pVBInfo->LCDResInfo = Panel1024x768;
+ pVBInfo->LCDResInfo = Panel_1024x768;
pVBInfo->LCDTypeInfo = 0;
i = 0;
}
@@ -2556,15 +2556,15 @@ static void XGI_SetLVDSRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
push2 = tempax;
/* GetLCDResInfo */
- if ((pVBInfo->LCDResInfo == Panel1024x768) ||
- (pVBInfo->LCDResInfo == Panel1024x768x75)) {
+ if ((pVBInfo->LCDResInfo == Panel_1024x768) ||
+ (pVBInfo->LCDResInfo == Panel_1024x768x75)) {
tempax = 1024;
tempbx = 768;
- } else if ((pVBInfo->LCDResInfo == Panel1280x1024) ||
- (pVBInfo->LCDResInfo == Panel1280x1024x75)) {
+ } else if ((pVBInfo->LCDResInfo == Panel_1280x1024) ||
+ (pVBInfo->LCDResInfo == Panel_1280x1024x75)) {
tempax = 1280;
tempbx = 1024;
- } else if (pVBInfo->LCDResInfo == Panel1400x1050) {
+ } else if (pVBInfo->LCDResInfo == Panel_1400x1050) {
tempax = 1400;
tempbx = 1050;
} else {
@@ -2682,7 +2682,7 @@ static void XGI_SetLVDSRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
if (tempbx != pVBInfo->VDE)
tempax |= 0x40;
- if (pVBInfo->LCDInfo & EnableLVDSDDA)
+ if (pVBInfo->LCDInfo & XGI_EnableLVDSDDA)
tempax |= 0x40;
xgifb_reg_and_or(pVBInfo->Part1Port, 0x1a, 0x07,
@@ -2768,7 +2768,7 @@ static void XGI_SetLVDSRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
temp1 = temp1 / push3;
tempbx = (unsigned short) (temp1 & 0xffff);
- if (pVBInfo->LCDResInfo == Panel1024x768)
+ if (pVBInfo->LCDResInfo == Panel_1024x768)
tempbx -= 1;
tempax = ((tempbx >> 8) & 0xff) << 3;
@@ -2800,7 +2800,7 @@ static void XGI_GetLCDVCLKPtr(unsigned char *di_0, unsigned char *di_1,
{
unsigned short index;
- if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) {
+ if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) {
index = XGI_GetLCDCapPtr1(pVBInfo);
if (pVBInfo->VBInfo & SetCRT2ToLCD) { /* LCDB */
@@ -2834,35 +2834,35 @@ static unsigned char XGI_GetVCLKPtr(unsigned short RefreshRateTableIndex,
index = XGI_GetLCDCapPtr(pVBInfo);
tempal = pVBInfo->LCDCapList[index].LCD_VCLK;
- if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA))
+ if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA))
return tempal;
/* {TV} */
if (pVBInfo->VBType &
- (VB_XGI301B |
- VB_XGI302B |
- VB_XGI301LV |
- VB_XGI302LV |
+ (VB_SIS301B |
+ VB_SIS302B |
+ VB_SIS301LV |
+ VB_SIS302LV |
VB_XGI301C)) {
- if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) {
- tempal = HiTVVCLKDIV2;
+ if (pVBInfo->VBInfo & SetCRT2ToHiVision) {
+ tempal = TVCLKBASE_315 + HiTVVCLKDIV2;
if (!(pVBInfo->TVInfo & RPLLDIV2XO))
- tempal = HiTVVCLK;
+ tempal = TVCLKBASE_315 + HiTVVCLK;
if (pVBInfo->TVInfo & TVSimuMode) {
- tempal = HiTVSimuVCLK;
+ tempal = TVCLKBASE_315 + HiTVSimuVCLK;
if (!(modeflag & Charx8Dot))
- tempal = HiTVTextVCLK;
+ tempal = TVCLKBASE_315 + HiTVTextVCLK;
}
return tempal;
}
- if (pVBInfo->TVInfo & SetYPbPrMode750p) {
- tempal = YPbPr750pVCLK;
+ if (pVBInfo->TVInfo & TVSetYPbPr750p) {
+ tempal = XGI_YPbPr750pVCLK;
return tempal;
}
- if (pVBInfo->TVInfo & SetYPbPrMode525p) {
+ if (pVBInfo->TVInfo & TVSetYPbPr525p) {
tempal = YPbPr525pVCLK;
return tempal;
}
@@ -2870,9 +2870,9 @@ static unsigned char XGI_GetVCLKPtr(unsigned short RefreshRateTableIndex,
tempal = NTSC1024VCLK;
if (!(pVBInfo->TVInfo & NTSC1024x768)) {
- tempal = TVVCLKDIV2;
+ tempal = TVCLKBASE_315 + TVVCLKDIV2;
if (!(pVBInfo->TVInfo & RPLLDIV2XO))
- tempal = TVVCLK;
+ tempal = TVCLKBASE_315 + TVVCLK;
}
if (pVBInfo->VBInfo & SetCRT2ToTV)
@@ -2898,9 +2898,9 @@ static unsigned char XGI_GetVCLKPtr(unsigned short RefreshRateTableIndex,
static void XGI_GetVCLKLen(unsigned char tempal, unsigned char *di_0,
unsigned char *di_1, struct vb_device_info *pVBInfo)
{
- if (pVBInfo->VBType & (VB_XGI301 | VB_XGI301B | VB_XGI302B
- | VB_XGI301LV | VB_XGI302LV | VB_XGI301C)) {
- if ((!(pVBInfo->VBInfo & SetCRT2ToLCDA)) && (pVBInfo->SetFlag
+ if (pVBInfo->VBType & (VB_SIS301 | VB_SIS301B | VB_SIS302B
+ | VB_SIS301LV | VB_SIS302LV | VB_XGI301C)) {
+ if ((!(pVBInfo->VBInfo & XGI_SetCRT2ToLCDA)) && (pVBInfo->SetFlag
& ProgrammingCRT2)) {
*di_0 = (unsigned char) XGI_VBVCLKData[tempal].SR2B;
*di_1 = XGI_VBVCLKData[tempal].SR2C;
@@ -2926,7 +2926,7 @@ static void XGI_SetCRT2ECLK(unsigned short ModeNo, unsigned short ModeIdIndex,
for (i = 0; i < 4; i++) {
xgifb_reg_and_or(pVBInfo->P3d4, 0x31, ~0x30,
(unsigned short) (0x10 * i));
- if ((!(pVBInfo->VBInfo & SetCRT2ToLCDA))
+ if ((!(pVBInfo->VBInfo & XGI_SetCRT2ToLCDA))
&& (!(pVBInfo->VBInfo & SetInSlaveMode))) {
xgifb_reg_set(pVBInfo->P3c4, 0x2e, di_0);
xgifb_reg_set(pVBInfo->P3c4, 0x2f, di_1);
@@ -2942,8 +2942,8 @@ static void XGI_UpdateModeInfo(struct xgi_hw_device_info *HwDeviceExtension,
{
unsigned short tempcl, tempch, temp, tempbl, tempax;
- if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
- | VB_XGI302LV | VB_XGI301C)) {
+ if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
+ | VB_SIS302LV | VB_XGI301C)) {
tempcl = 0;
tempch = 0;
temp = xgifb_reg_get(pVBInfo->P3c4, 0x01);
@@ -2987,12 +2987,12 @@ static void XGI_UpdateModeInfo(struct xgi_hw_device_info *HwDeviceExtension,
if (temp & 0x02)
tempch |= ActiveSCART;
- if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) {
+ if (pVBInfo->VBInfo & SetCRT2ToHiVision) {
if (temp & 0x01)
tempch |= ActiveHiTV;
}
- if (pVBInfo->VBInfo & SetCRT2ToYPbPr) {
+ if (pVBInfo->VBInfo & SetCRT2ToYPbPr525750) {
temp = xgifb_reg_get(
pVBInfo->Part2Port,
0x4d);
@@ -3014,7 +3014,7 @@ static void XGI_UpdateModeInfo(struct xgi_hw_device_info *HwDeviceExtension,
}
}
temp = tempcl;
- tempbl = ~ModeSwitchStatus;
+ tempbl = ~XGI_ModeSwitchStatus;
xgifb_reg_and_or(pVBInfo->P3d4, 0x3d, tempbl, temp);
if (!(pVBInfo->SetFlag & ReserveTVOption))
@@ -3029,19 +3029,19 @@ void XGI_GetVBType(struct vb_device_info *pVBInfo)
unsigned short flag, tempbx, tempah;
if (pVBInfo->IF_DEF_LVDS == 0) {
- tempbx = VB_XGI302B;
+ tempbx = VB_SIS302B;
flag = xgifb_reg_get(pVBInfo->Part4Port, 0x00);
if (flag != 0x02) {
- tempbx = VB_XGI301;
+ tempbx = VB_SIS301;
flag = xgifb_reg_get(pVBInfo->Part4Port, 0x01);
if (flag >= 0xB0) {
- tempbx = VB_XGI301B;
+ tempbx = VB_SIS301B;
if (flag >= 0xC0) {
tempbx = VB_XGI301C;
if (flag >= 0xD0) {
- tempbx = VB_XGI301LV;
+ tempbx = VB_SIS301LV;
if (flag >= 0xE0) {
- tempbx = VB_XGI302LV;
+ tempbx = VB_SIS302LV;
tempah = xgifb_reg_get(
pVBInfo->Part4Port,
0x39);
@@ -3052,7 +3052,7 @@ void XGI_GetVBType(struct vb_device_info *pVBInfo)
}
}
- if (tempbx & (VB_XGI301B | VB_XGI302B)) {
+ if (tempbx & (VB_SIS301B | VB_SIS302B)) {
flag = xgifb_reg_get(
pVBInfo->Part4Port,
0x23);
@@ -3078,7 +3078,7 @@ static void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
modeflag = pVBInfo->EModeIDTable[ModeIdIndex].Ext_ModeFlag;
pVBInfo->SetFlag = 0;
- pVBInfo->ModeType = modeflag & ModeInfoFlag;
+ pVBInfo->ModeType = modeflag & ModeTypeMask;
tempbx = 0;
if (pVBInfo->VBType & 0xFFFF) {
@@ -3090,7 +3090,7 @@ static void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
push = push << 8;
tempax = temp << 8;
tempbx = tempbx | tempax;
- temp = (SetCRT2ToDualEdge | SetCRT2ToYPbPr | SetCRT2ToLCDA
+ temp = (SetCRT2ToDualEdge | SetCRT2ToYPbPr525750 | XGI_SetCRT2ToLCDA
| SetInSlaveMode | DisableCRT2Display);
temp = 0xFFFF ^ temp;
tempbx &= temp;
@@ -3103,9 +3103,9 @@ static void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
(HwDeviceExtension->jChipType >= XG40)) {
if (pVBInfo->IF_DEF_LVDS == 0) {
if (pVBInfo->VBType &
- (VB_XGI302B |
- VB_XGI301LV |
- VB_XGI302LV |
+ (VB_SIS302B |
+ VB_SIS301LV |
+ VB_SIS302LV |
VB_XGI301C)) {
if (temp & EnableDualEdge) {
tempbx |=
@@ -3113,7 +3113,7 @@ static void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
if (temp & SetToLCDA)
tempbx |=
- SetCRT2ToLCDA;
+ XGI_SetCRT2ToLCDA;
}
}
}
@@ -3123,8 +3123,8 @@ static void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
if (pVBInfo->IF_DEF_YPbPr == 1) {
/* [Billy] 07/05/04 */
if (((pVBInfo->IF_DEF_LVDS == 0) &&
- ((pVBInfo->VBType & VB_XGI301LV) ||
- (pVBInfo->VBType & VB_XGI302LV) ||
+ ((pVBInfo->VBType & VB_SIS301LV) ||
+ (pVBInfo->VBType & VB_SIS302LV) ||
(pVBInfo->VBType & VB_XGI301C)))) {
if (temp & SetYPbPr) {
if (pVBInfo->IF_DEF_HiVision == 1) {
@@ -3134,13 +3134,13 @@ static void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
pVBInfo->P3d4,
0x35);
temp &= YPbPrMode;
- tempbx |= SetCRT2ToHiVisionTV;
+ tempbx |= SetCRT2ToHiVision;
if (temp != YPbPrMode1080i) {
tempbx &=
- (~SetCRT2ToHiVisionTV);
+ (~SetCRT2ToHiVision);
tempbx |=
- SetCRT2ToYPbPr;
+ SetCRT2ToYPbPr525750;
}
}
}
@@ -3172,30 +3172,30 @@ static void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
if (pVBInfo->IF_DEF_LCDA == 1) { /* Select Display Device */
if (!(pVBInfo->VBType & VB_NoLCD)) {
- if (tempbx & SetCRT2ToLCDA) {
+ if (tempbx & XGI_SetCRT2ToLCDA) {
if (tempbx & SetSimuScanMode)
tempbx &= (~(SetCRT2ToLCD |
SetCRT2ToRAMDAC |
- SwitchToCRT2));
+ SwitchCRT2));
else
tempbx &= (~(SetCRT2ToLCD |
SetCRT2ToRAMDAC |
SetCRT2ToTV |
- SwitchToCRT2));
+ SwitchCRT2));
}
}
}
/* shampoo add */
/* for driver abnormal */
- if (!(tempbx & (SwitchToCRT2 | SetSimuScanMode))) {
+ if (!(tempbx & (SwitchCRT2 | SetSimuScanMode))) {
if (pVBInfo->IF_DEF_CRT2Monitor == 1) {
if (tempbx & SetCRT2ToRAMDAC) {
tempbx &= (0xFF00 |
SetCRT2ToRAMDAC |
- SwitchToCRT2 |
+ SwitchCRT2 |
SetSimuScanMode);
- tempbx &= (0x00FF | (~SetCRT2ToYPbPr));
+ tempbx &= (0x00FF | (~SetCRT2ToYPbPr525750));
}
} else {
tempbx &= (~(SetCRT2ToRAMDAC |
@@ -3208,37 +3208,37 @@ static void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
if (tempbx & SetCRT2ToLCD) {
tempbx &= (0xFF00 |
SetCRT2ToLCD |
- SwitchToCRT2 |
+ SwitchCRT2 |
SetSimuScanMode);
- tempbx &= (0x00FF | (~SetCRT2ToYPbPr));
+ tempbx &= (0x00FF | (~SetCRT2ToYPbPr525750));
}
}
if (tempbx & SetCRT2ToSCART) {
tempbx &= (0xFF00 |
SetCRT2ToSCART |
- SwitchToCRT2 |
+ SwitchCRT2 |
SetSimuScanMode);
- tempbx &= (0x00FF | (~SetCRT2ToYPbPr));
+ tempbx &= (0x00FF | (~SetCRT2ToYPbPr525750));
}
if (pVBInfo->IF_DEF_YPbPr == 1) {
- if (tempbx & SetCRT2ToYPbPr)
+ if (tempbx & SetCRT2ToYPbPr525750)
tempbx &= (0xFF00 |
- SwitchToCRT2 |
+ SwitchCRT2 |
SetSimuScanMode);
}
if (pVBInfo->IF_DEF_HiVision == 1) {
- if (tempbx & SetCRT2ToHiVisionTV)
+ if (tempbx & SetCRT2ToHiVision)
tempbx &= (0xFF00 |
- SetCRT2ToHiVisionTV |
- SwitchToCRT2 |
+ SetCRT2ToHiVision |
+ SwitchCRT2 |
SetSimuScanMode);
}
if (tempax & DisableCRT2Display) { /* Set Display Device Info */
- if (!(tempbx & (SwitchToCRT2 | SetSimuScanMode)))
+ if (!(tempbx & (SwitchCRT2 | SetSimuScanMode)))
tempbx = DisableCRT2Display;
}
@@ -3246,7 +3246,7 @@ static void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
if ((!(tempbx & DriverMode)) ||
(!(modeflag & CRT2Mode))) {
if (pVBInfo->IF_DEF_LCDA == 1) {
- if (!(tempbx & SetCRT2ToLCDA))
+ if (!(tempbx & XGI_SetCRT2ToLCDA))
tempbx |= (SetInSlaveMode |
SetSimuScanMode);
}
@@ -3255,9 +3255,9 @@ static void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
/* LCD+TV can't support in slave mode
* (Force LCDA+TV->LCDB) */
if ((tempbx & SetInSlaveMode) &&
- (tempbx & SetCRT2ToLCDA)) {
+ (tempbx & XGI_SetCRT2ToLCDA)) {
tempbx ^= (SetCRT2ToLCD |
- SetCRT2ToLCDA |
+ XGI_SetCRT2ToLCDA |
SetCRT2ToDualEdge);
pVBInfo->SetFlag |= ReserveTVOption;
}
@@ -3291,43 +3291,43 @@ static void XGI_GetTVInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
if (pVBInfo->VBInfo & SetCRT2ToTV) {
temp = xgifb_reg_get(pVBInfo->P3d4, 0x35);
tempbx = temp;
- if (tempbx & SetPALTV) {
+ if (tempbx & TVSetPAL) {
tempbx &= (SetCHTVOverScan |
- SetPALMTV |
- SetPALNTV |
- SetPALTV);
- if (tempbx & SetPALMTV)
+ TVSetPALM |
+ TVSetPALN |
+ TVSetPAL);
+ if (tempbx & TVSetPALM)
/* set to NTSC if PAL-M */
- tempbx &= ~SetPALTV;
+ tempbx &= ~TVSetPAL;
} else
tempbx &= (SetCHTVOverScan |
- SetNTSCJ |
- SetPALTV);
+ TVSetNTSCJ |
+ TVSetPAL);
}
if (pVBInfo->IF_DEF_LVDS == 0) {
if (pVBInfo->VBInfo & SetCRT2ToSCART)
- tempbx |= SetPALTV;
+ tempbx |= TVSetPAL;
}
if (pVBInfo->IF_DEF_YPbPr == 1) {
- if (pVBInfo->VBInfo & SetCRT2ToYPbPr) {
+ if (pVBInfo->VBInfo & SetCRT2ToYPbPr525750) {
index1 = xgifb_reg_get(pVBInfo->P3d4, 0x35);
index1 &= YPbPrMode;
if (index1 == YPbPrMode525i)
- tempbx |= SetYPbPrMode525i;
+ tempbx |= TVSetYPbPr525i;
if (index1 == YPbPrMode525p)
- tempbx = tempbx | SetYPbPrMode525p;
+ tempbx = tempbx | TVSetYPbPr525p;
if (index1 == YPbPrMode750p)
- tempbx = tempbx | SetYPbPrMode750p;
+ tempbx = tempbx | TVSetYPbPr750p;
}
}
if (pVBInfo->IF_DEF_HiVision == 1) {
- if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV)
- tempbx = tempbx | SetYPbPrMode1080i | SetPALTV;
+ if (pVBInfo->VBInfo & SetCRT2ToHiVision)
+ tempbx = tempbx | TVSetHiVision | TVSetPAL;
}
if (pVBInfo->IF_DEF_LVDS == 0) { /* shampoo */
@@ -3335,25 +3335,25 @@ static void XGI_GetTVInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
(!(pVBInfo->VBInfo & SetNotSimuMode)))
tempbx |= TVSimuMode;
- if (!(tempbx & SetPALTV) &&
+ if (!(tempbx & TVSetPAL) &&
(modeflag > 13) &&
(resinfo == 8)) /* NTSC 1024x768, */
tempbx |= NTSC1024x768;
tempbx |= RPLLDIV2XO;
- if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) {
+ if (pVBInfo->VBInfo & SetCRT2ToHiVision) {
if (pVBInfo->VBInfo & SetInSlaveMode)
tempbx &= (~RPLLDIV2XO);
} else {
if (tempbx &
- (SetYPbPrMode525p | SetYPbPrMode750p))
+ (TVSetYPbPr525p | TVSetYPbPr750p))
tempbx &= (~RPLLDIV2XO);
else if (!(pVBInfo->VBType &
- (VB_XGI301B |
- VB_XGI302B |
- VB_XGI301LV |
- VB_XGI302LV |
+ (VB_SIS301B |
+ VB_SIS302B |
+ VB_SIS301LV |
+ VB_SIS302LV |
VB_XGI301C))) {
if (tempbx & TVSimuMode)
tempbx &= (~RPLLDIV2XO);
@@ -3386,13 +3386,13 @@ static unsigned char XGI_GetLCDInfo(unsigned short ModeNo,
tempbx = temp & 0x0F;
if (tempbx == 0)
- tempbx = Panel1024x768; /* default */
+ tempbx = Panel_1024x768; /* default */
/* LCD75 [2003/8/22] Vicent */
- if ((tempbx == Panel1024x768) || (tempbx == Panel1280x1024)) {
+ if ((tempbx == Panel_1024x768) || (tempbx == Panel_1280x1024)) {
if (pVBInfo->VBInfo & DriverMode) {
tempax = xgifb_reg_get(pVBInfo->P3d4, 0x33);
- if (pVBInfo->VBInfo & SetCRT2ToLCDA)
+ if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA)
tempax &= 0x0F;
else
tempax = tempax >> 4;
@@ -3411,7 +3411,7 @@ static unsigned char XGI_GetLCDInfo(unsigned short ModeNo,
/* End of LCD75 */
- if (!(pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)))
+ if (!(pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)))
return 0;
tempbx = 0;
@@ -3427,30 +3427,30 @@ static unsigned char XGI_GetLCDInfo(unsigned short ModeNo,
tempax = pVBInfo->LCDCapList[LCDIdIndex].LCD_Capability;
if (pVBInfo->IF_DEF_LVDS == 0) { /* shampoo */
- if (((pVBInfo->VBType & VB_XGI302LV) || (pVBInfo->VBType
- & VB_XGI301C)) && (tempax & LCDDualLink)) {
+ if (((pVBInfo->VBType & VB_SIS302LV) || (pVBInfo->VBType
+ & VB_XGI301C)) && (tempax & XGI_LCDDualLink)) {
tempbx |= SetLCDDualLink;
}
}
if (pVBInfo->IF_DEF_LVDS == 0) {
- if ((pVBInfo->LCDResInfo == Panel1400x1050) && (pVBInfo->VBInfo
+ if ((pVBInfo->LCDResInfo == Panel_1400x1050) && (pVBInfo->VBInfo
& SetCRT2ToLCD) && (ModeNo > 0x13) && (resinfo
== 9) && (!(tempbx & EnableScalingLCD)))
- /* set to center in 1280x1024 LCDB for Panel1400x1050 */
+ /* set to center in 1280x1024 LCDB for Panel_1400x1050 */
tempbx |= SetLCDtoNonExpanding;
}
if (pVBInfo->IF_DEF_ExpLink == 1) {
if (modeflag & HalfDCLK) {
if (!(tempbx & SetLCDtoNonExpanding)) {
- tempbx |= EnableLVDSDDA;
+ tempbx |= XGI_EnableLVDSDDA;
} else {
if (ModeNo > 0x13) {
if (pVBInfo->LCDResInfo
- == Panel1024x768) {
+ == Panel_1024x768) {
if (resinfo == 4) {/* 512x384 */
- tempbx |= EnableLVDSDDA;
+ tempbx |= XGI_EnableLVDSDDA;
}
}
}
@@ -3460,9 +3460,9 @@ static unsigned char XGI_GetLCDInfo(unsigned short ModeNo,
if (pVBInfo->VBInfo & SetInSlaveMode) {
if (pVBInfo->VBInfo & SetNotSimuMode)
- tempbx |= LCDVESATiming;
+ tempbx |= XGI_LCDVESATiming;
} else {
- tempbx |= LCDVESATiming;
+ tempbx |= XGI_LCDVESATiming;
}
pVBInfo->LCDInfo = tempbx;
@@ -3477,7 +3477,7 @@ static unsigned char XGI_GetLCDInfo(unsigned short ModeNo,
SetInSlaveMode |
SetCRT2ToLCD);
pVBInfo->VBInfo |=
- SetCRT2ToLCDA |
+ XGI_SetCRT2ToLCDA |
SetCRT2ToDualEdge;
}
}
@@ -3801,27 +3801,27 @@ static void XGI_GetCRT2ResInfo(unsigned short ModeNo,
if (pVBInfo->VBInfo & SetCRT2ToLCD) {
if (pVBInfo->IF_DEF_LVDS == 0) {
- if (pVBInfo->LCDResInfo == Panel1600x1200) {
- if (!(pVBInfo->LCDInfo & LCDVESATiming)) {
+ if (pVBInfo->LCDResInfo == Panel_1600x1200) {
+ if (!(pVBInfo->LCDInfo & XGI_LCDVESATiming)) {
if (yres == 1024)
yres = 1056;
}
}
- if (pVBInfo->LCDResInfo == Panel1280x1024) {
+ if (pVBInfo->LCDResInfo == Panel_1280x1024) {
if (yres == 400)
yres = 405;
else if (yres == 350)
yres = 360;
- if (pVBInfo->LCDInfo & LCDVESATiming) {
+ if (pVBInfo->LCDInfo & XGI_LCDVESATiming) {
if (yres == 360)
yres = 375;
}
}
- if (pVBInfo->LCDResInfo == Panel1024x768) {
- if (!(pVBInfo->LCDInfo & LCDVESATiming)) {
+ if (pVBInfo->LCDResInfo == Panel_1024x768) {
+ if (!(pVBInfo->LCDInfo & XGI_LCDVESATiming)) {
if (!(pVBInfo->LCDInfo
& LCDNonExpanding)) {
if (yres == 350)
@@ -3848,7 +3848,7 @@ static void XGI_GetCRT2ResInfo(unsigned short ModeNo,
static unsigned char XGI_IsLCDDualLink(struct vb_device_info *pVBInfo)
{
- if ((pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) &&
+ if ((pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) &&
(pVBInfo->LCDInfo & SetLCDDualLink)) /* shampoo0129 */
return 1;
@@ -3918,8 +3918,8 @@ static void XGI_GetCRT2Data(unsigned short ModeNo, unsigned short ModeIdIndex,
{
unsigned short tempax = 0, tempbx, modeflag, resinfo;
- struct XGI_LCDDataStruct *LCDPtr = NULL;
- struct XGI_TVDataStruct *TVPtr = NULL;
+ struct SiS_LCDData *LCDPtr = NULL;
+ struct SiS_TVData *TVPtr = NULL;
if (ModeNo <= 0x13) {
/* si+St_ResInfo */
@@ -3942,8 +3942,8 @@ static void XGI_GetCRT2Data(unsigned short ModeNo, unsigned short ModeIdIndex,
tempbx = 4;
- if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) {
- LCDPtr = (struct XGI_LCDDataStruct *) XGI_GetLcdPtr(tempbx,
+ if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) {
+ LCDPtr = (struct SiS_LCDData *) XGI_GetLcdPtr(tempbx,
ModeNo, ModeIdIndex, RefreshRateTableIndex,
pVBInfo);
@@ -3954,11 +3954,11 @@ static void XGI_GetCRT2Data(unsigned short ModeNo, unsigned short ModeIdIndex,
pVBInfo->HT = LCDPtr->LCDHT;
pVBInfo->VT = LCDPtr->LCDVT;
- if (pVBInfo->LCDResInfo == Panel1024x768) {
+ if (pVBInfo->LCDResInfo == Panel_1024x768) {
tempax = 1024;
tempbx = 768;
- if (!(pVBInfo->LCDInfo & LCDVESATiming)) {
+ if (!(pVBInfo->LCDInfo & XGI_LCDVESATiming)) {
if (pVBInfo->VGAVDE == 357)
tempbx = 527;
else if (pVBInfo->VGAVDE == 420)
@@ -3971,10 +3971,10 @@ static void XGI_GetCRT2Data(unsigned short ModeNo, unsigned short ModeIdIndex,
tempbx = 768;
} else
tempbx = 768;
- } else if (pVBInfo->LCDResInfo == Panel1024x768x75) {
+ } else if (pVBInfo->LCDResInfo == Panel_1024x768x75) {
tempax = 1024;
tempbx = 768;
- } else if (pVBInfo->LCDResInfo == Panel1280x1024) {
+ } else if (pVBInfo->LCDResInfo == Panel_1280x1024) {
tempax = 1280;
if (pVBInfo->VGAVDE == 360)
tempbx = 768;
@@ -3984,10 +3984,10 @@ static void XGI_GetCRT2Data(unsigned short ModeNo, unsigned short ModeIdIndex,
tempbx = 864;
else
tempbx = 1024;
- } else if (pVBInfo->LCDResInfo == Panel1280x1024x75) {
+ } else if (pVBInfo->LCDResInfo == Panel_1280x1024x75) {
tempax = 1280;
tempbx = 1024;
- } else if (pVBInfo->LCDResInfo == Panel1280x960) {
+ } else if (pVBInfo->LCDResInfo == Panel_1280x960) {
tempax = 1280;
if (pVBInfo->VGAVDE == 350)
tempbx = 700;
@@ -3997,7 +3997,7 @@ static void XGI_GetCRT2Data(unsigned short ModeNo, unsigned short ModeIdIndex,
tempbx = 960;
else
tempbx = 960;
- } else if (pVBInfo->LCDResInfo == Panel1400x1050) {
+ } else if (pVBInfo->LCDResInfo == Panel_1400x1050) {
tempax = 1400;
tempbx = 1050;
@@ -4005,10 +4005,10 @@ static void XGI_GetCRT2Data(unsigned short ModeNo, unsigned short ModeIdIndex,
tempax = 1280;
tempbx = 1024;
}
- } else if (pVBInfo->LCDResInfo == Panel1600x1200) {
+ } else if (pVBInfo->LCDResInfo == Panel_1600x1200) {
tempax = 1600;
tempbx = 1200; /* alan 10/14/2003 */
- if (!(pVBInfo->LCDInfo & LCDVESATiming)) {
+ if (!(pVBInfo->LCDInfo & XGI_LCDVESATiming)) {
if (pVBInfo->VGAVDE == 350)
tempbx = 875;
else if (pVBInfo->VGAVDE == 400)
@@ -4028,7 +4028,7 @@ static void XGI_GetCRT2Data(unsigned short ModeNo, unsigned short ModeIdIndex,
if (pVBInfo->VBInfo & (SetCRT2ToTV)) {
tempbx = 4;
- TVPtr = (struct XGI_TVDataStruct *) XGI_GetTVPtr(tempbx,
+ TVPtr = (struct SiS_TVData *) XGI_GetTVPtr(tempbx,
ModeNo, ModeIdIndex, RefreshRateTableIndex,
pVBInfo);
@@ -4041,7 +4041,7 @@ static void XGI_GetCRT2Data(unsigned short ModeNo, unsigned short ModeIdIndex,
pVBInfo->RVBHRS = TVPtr->RVBHRS;
pVBInfo->NewFlickerMode = TVPtr->FlickerMode;
- if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) {
+ if (pVBInfo->VBInfo & SetCRT2ToHiVision) {
if (resinfo == 0x08)
pVBInfo->NewFlickerMode = 0x40;
else if (resinfo == 0x09)
@@ -4066,16 +4066,16 @@ static void XGI_GetCRT2Data(unsigned short ModeNo, unsigned short ModeIdIndex,
}
}
}
- } else if (pVBInfo->VBInfo & SetCRT2ToYPbPr) {
- if (pVBInfo->TVInfo & SetYPbPrMode750p) {
+ } else if (pVBInfo->VBInfo & SetCRT2ToYPbPr525750) {
+ if (pVBInfo->TVInfo & TVSetYPbPr750p) {
tempax = YPbPrTV750pHT; /* Ext750pTVHT */
tempbx = YPbPrTV750pVT; /* Ext750pTVVT */
}
- if (pVBInfo->TVInfo & SetYPbPrMode525p) {
+ if (pVBInfo->TVInfo & TVSetYPbPr525p) {
tempax = YPbPrTV525pHT; /* Ext525pTVHT */
tempbx = YPbPrTV525pVT; /* Ext525pTVVT */
- } else if (pVBInfo->TVInfo & SetYPbPrMode525i) {
+ } else if (pVBInfo->TVInfo & TVSetYPbPr525i) {
tempax = YPbPrTV525iHT; /* Ext525iTVHT */
tempbx = YPbPrTV525iVT; /* Ext525iTVVT */
if (pVBInfo->TVInfo & NTSC1024x768)
@@ -4084,7 +4084,7 @@ static void XGI_GetCRT2Data(unsigned short ModeNo, unsigned short ModeIdIndex,
} else {
tempax = PALHT;
tempbx = PALVT;
- if (!(pVBInfo->TVInfo & SetPALTV)) {
+ if (!(pVBInfo->TVInfo & TVSetPAL)) {
tempax = NTSCHT;
tempbx = NTSCVT;
if (pVBInfo->TVInfo & NTSC1024x768)
@@ -4109,7 +4109,7 @@ static void XGI_SetCRT2VCLK(unsigned short ModeNo, unsigned short ModeIdIndex,
XGI_GetVCLKLen(tempal, &di_0, &di_1, pVBInfo);
XGI_GetLCDVCLKPtr(&di_0, &di_1, pVBInfo);
- if (pVBInfo->VBType & VB_XGI301) { /* shampoo 0129 */
+ if (pVBInfo->VBType & VB_SIS301) { /* shampoo 0129 */
/* 301 */
xgifb_reg_set(pVBInfo->Part4Port, 0x0A, 0x10);
xgifb_reg_set(pVBInfo->Part4Port, 0x0B, di_1);
@@ -4139,7 +4139,7 @@ static unsigned short XGI_GetColorDepth(unsigned short ModeNo,
else
modeflag = pVBInfo->EModeIDTable[ModeIdIndex].Ext_ModeFlag;
- index = (modeflag & ModeInfoFlag) - ModeEGA;
+ index = (modeflag & ModeTypeMask) - ModeEGA;
if (index < 0)
index = 0;
@@ -4435,7 +4435,7 @@ static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
xgifb_reg_set(pVBInfo->Part1Port, 0x03, temp);
tempcx = 0x08;
- if (pVBInfo->VBType & (VB_XGI301LV | VB_XGI302LV | VB_XGI301C))
+ if (pVBInfo->VBType & (VB_SIS301LV | VB_SIS302LV | VB_XGI301C))
modeflag |= Charx8Dot;
tempax = pVBInfo->VGAHDE; /* 0x04 Horizontal Display End */
@@ -4451,12 +4451,12 @@ static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
temp = (tempbx & 0xFF00) >> 8;
if (pVBInfo->VBInfo & SetCRT2ToTV) {
- if (!(pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
- | VB_XGI302LV | VB_XGI301C)))
+ if (!(pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
+ | VB_SIS302LV | VB_XGI301C)))
temp += 2;
- if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) {
- if (pVBInfo->VBType & VB_XGI301LV) {
+ if (pVBInfo->VBInfo & SetCRT2ToHiVision) {
+ if (pVBInfo->VBType & VB_SIS301LV) {
if (pVBInfo->VBExtInfo == VB_YPbPr1080i) {
if (resinfo == 7)
temp -= 2;
@@ -4487,7 +4487,7 @@ static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
tempax = (tempax / tempcx) - 5;
tempcx = tempax; /* 20030401 0x07 horizontal Retrace Start */
- if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) {
+ if (pVBInfo->VBInfo & SetCRT2ToHiVision) {
temp = (tempbx & 0x00FF) - 1;
if (!(modeflag & HalfDCLK)) {
temp -= 6;
@@ -4513,19 +4513,19 @@ static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
}
} else if (!(modeflag & HalfDCLK)) {
temp -= 4;
- if (pVBInfo->LCDResInfo != Panel1280x960 &&
+ if (pVBInfo->LCDResInfo != Panel_1280x960 &&
pVBInfo->VGAHDE >= 800) {
temp -= 7;
if (pVBInfo->ModeType == ModeEGA &&
pVBInfo->VGAVDE == 1024) {
temp += 15;
if (pVBInfo->LCDResInfo !=
- Panel1280x1024)
+ Panel_1280x1024)
temp += 7;
}
if (pVBInfo->VGAHDE >= 1280 &&
- pVBInfo->LCDResInfo != Panel1280x960 &&
+ pVBInfo->LCDResInfo != Panel_1280x960 &&
(pVBInfo->LCDInfo & LCDNonExpanding))
temp += 28;
}
@@ -4619,8 +4619,8 @@ static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
push2 = tempbx;
if (pVBInfo->VBInfo & SetCRT2ToLCD) {
- if (pVBInfo->LCDResInfo == Panel1024x768) {
- if (!(pVBInfo->LCDInfo & LCDVESATiming)) {
+ if (pVBInfo->LCDResInfo == Panel_1024x768) {
+ if (!(pVBInfo->LCDInfo & XGI_LCDVESATiming)) {
if (tempbx == 350)
tempbx += 5;
if (tempbx == 480)
@@ -4669,19 +4669,19 @@ static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
tempbx += tempax;
}
- if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) {
- if (pVBInfo->VBType & VB_XGI301LV) {
- if (pVBInfo->TVInfo & SetYPbPrMode1080i) {
+ if (pVBInfo->VBInfo & SetCRT2ToHiVision) {
+ if (pVBInfo->VBType & VB_SIS301LV) {
+ if (pVBInfo->TVInfo & TVSetHiVision) {
tempbx -= 10;
} else {
if (pVBInfo->TVInfo & TVSimuMode) {
- if (pVBInfo->TVInfo & SetPALTV) {
+ if (pVBInfo->TVInfo & TVSetPAL) {
if (pVBInfo->VBType &
- VB_XGI301LV) {
+ VB_SIS301LV) {
if (!(pVBInfo->TVInfo &
- (SetYPbPrMode525p |
- SetYPbPrMode750p |
- SetYPbPrMode1080i)))
+ (TVSetYPbPr525p |
+ TVSetYPbPr750p |
+ TVSetHiVision)))
tempbx += 40;
} else {
tempbx += 40;
@@ -4694,12 +4694,12 @@ static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
}
} else {
if (pVBInfo->TVInfo & TVSimuMode) {
- if (pVBInfo->TVInfo & SetPALTV) {
- if (pVBInfo->VBType & VB_XGI301LV) {
+ if (pVBInfo->TVInfo & TVSetPAL) {
+ if (pVBInfo->VBType & VB_SIS301LV) {
if (!(pVBInfo->TVInfo &
- (SetYPbPrMode525p |
- SetYPbPrMode750p |
- SetYPbPrMode1080i)))
+ (TVSetYPbPr525p |
+ TVSetYPbPr750p |
+ TVSetHiVision)))
tempbx += 40;
} else {
tempbx += 40;
@@ -4713,7 +4713,7 @@ static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
tempax += tempbx;
push1 = tempax; /* push ax */
- if ((pVBInfo->TVInfo & SetPALTV)) {
+ if ((pVBInfo->TVInfo & TVSetPAL)) {
if (tempbx <= 513) {
if (tempax >= 513)
tempbx = 513;
@@ -4761,7 +4761,7 @@ static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
temp = (temp >> 1) & 0x09;
- if (pVBInfo->VBType & (VB_XGI301LV | VB_XGI302LV | VB_XGI301C))
+ if (pVBInfo->VBType & (VB_SIS301LV | VB_SIS302LV | VB_XGI301C))
temp |= 0x01;
xgifb_reg_set(pVBInfo->Part1Port, 0x16, temp); /* 0x16 SR01 */
@@ -4813,13 +4813,13 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
if (pVBInfo->VBInfo & SetCRT2ToSCART)
tempax |= 0x0200;
- if (!(pVBInfo->TVInfo & SetPALTV))
+ if (!(pVBInfo->TVInfo & TVSetPAL))
tempax |= 0x1000;
- if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV)
+ if (pVBInfo->VBInfo & SetCRT2ToHiVision)
tempax |= 0x0100;
- if (pVBInfo->TVInfo & (SetYPbPrMode525p | SetYPbPrMode750p))
+ if (pVBInfo->TVInfo & (TVSetYPbPr525p | TVSetYPbPr750p))
tempax &= 0xfe00;
tempax = (tempax & 0xff00) >> 8;
@@ -4827,10 +4827,10 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
xgifb_reg_set(pVBInfo->Part2Port, 0x0, tempax);
TimingPoint = pVBInfo->NTSCTiming;
- if (pVBInfo->TVInfo & SetPALTV)
+ if (pVBInfo->TVInfo & TVSetPAL)
TimingPoint = pVBInfo->PALTiming;
- if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) {
+ if (pVBInfo->VBInfo & SetCRT2ToHiVision) {
TimingPoint = pVBInfo->HiTVExtTiming;
if (pVBInfo->VBInfo & SetInSlaveMode)
@@ -4843,14 +4843,14 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
TimingPoint = pVBInfo->HiTVTextTiming;
}
- if (pVBInfo->VBInfo & SetCRT2ToYPbPr) {
- if (pVBInfo->TVInfo & SetYPbPrMode525i)
+ if (pVBInfo->VBInfo & SetCRT2ToYPbPr525750) {
+ if (pVBInfo->TVInfo & TVSetYPbPr525i)
TimingPoint = pVBInfo->YPbPr525iTiming;
- if (pVBInfo->TVInfo & SetYPbPrMode525p)
+ if (pVBInfo->TVInfo & TVSetYPbPr525p)
TimingPoint = pVBInfo->YPbPr525pTiming;
- if (pVBInfo->TVInfo & SetYPbPrMode750p)
+ if (pVBInfo->TVInfo & TVSetYPbPr750p)
TimingPoint = pVBInfo->YPbPr750pTiming;
}
@@ -4868,10 +4868,10 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
temp &= 0x80;
xgifb_reg_and_or(pVBInfo->Part2Port, 0x0A, 0xFF, temp);
- if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV)
+ if (pVBInfo->VBInfo & SetCRT2ToHiVision)
tempax = 950;
- if (pVBInfo->TVInfo & SetPALTV)
+ if (pVBInfo->TVInfo & TVSetPAL)
tempax = 520;
else
tempax = 440;
@@ -4884,15 +4884,15 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
temp = (tempax & 0xFF00) >> 8;
temp += (unsigned short) TimingPoint[0];
- if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
- | VB_XGI302LV | VB_XGI301C)) {
+ if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
+ | VB_SIS302LV | VB_XGI301C)) {
if (pVBInfo->VBInfo & (SetCRT2ToAVIDEO
| SetCRT2ToSVIDEO | SetCRT2ToSCART
- | SetCRT2ToYPbPr)) {
+ | SetCRT2ToYPbPr525750)) {
tempcx = pVBInfo->VGAHDE;
if (tempcx >= 1024) {
temp = 0x17; /* NTSC */
- if (pVBInfo->TVInfo & SetPALTV)
+ if (pVBInfo->TVInfo & TVSetPAL)
temp = 0x19; /* PAL */
}
}
@@ -4903,15 +4903,15 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
temp = (tempax & 0xFF00) >> 8;
temp += TimingPoint[1];
- if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
- | VB_XGI302LV | VB_XGI301C)) {
+ if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
+ | VB_SIS302LV | VB_XGI301C)) {
if ((pVBInfo->VBInfo & (SetCRT2ToAVIDEO
| SetCRT2ToSVIDEO | SetCRT2ToSCART
- | SetCRT2ToYPbPr))) {
+ | SetCRT2ToYPbPr525750))) {
tempcx = pVBInfo->VGAHDE;
if (tempcx >= 1024) {
temp = 0x1D; /* NTSC */
- if (pVBInfo->TVInfo & SetPALTV)
+ if (pVBInfo->TVInfo & TVSetPAL)
temp = 0x52; /* PAL */
}
}
@@ -4936,7 +4936,7 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
push1 = tempcx; /* push cx */
tempcx += 7;
- if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV)
+ if (pVBInfo->VBInfo & SetCRT2ToHiVision)
tempcx -= 4;
temp = tempcx & 0x00FF;
@@ -4954,7 +4954,7 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
tempbx = push2;
tempbx = tempbx + 8;
- if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) {
+ if (pVBInfo->VBInfo & SetCRT2ToHiVision) {
tempbx = tempbx - 4;
tempcx = tempbx;
}
@@ -4970,7 +4970,7 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
xgifb_reg_and_or(pVBInfo->Part2Port, 0x28, 0x0F, temp);
tempcx += 8;
- if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV)
+ if (pVBInfo->VBInfo & SetCRT2ToHiVision)
tempcx -= 4;
temp = tempcx & 0xFF;
@@ -5005,9 +5005,9 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
if (pVBInfo->VBInfo & SetCRT2ToTV) {
if (pVBInfo->VBType &
- (VB_XGI301LV | VB_XGI302LV | VB_XGI301C)) {
+ (VB_SIS301LV | VB_SIS302LV | VB_XGI301C)) {
if (!(pVBInfo->TVInfo &
- (SetYPbPrMode525p | SetYPbPrMode750p)))
+ (TVSetYPbPr525p | TVSetYPbPr750p)))
tempbx = tempbx >> 1;
} else
tempbx = tempbx >> 1;
@@ -5016,9 +5016,9 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
tempbx -= 2;
temp = tempbx & 0x00FF;
- if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) {
- if (pVBInfo->VBType & VB_XGI301LV) {
- if (pVBInfo->TVInfo & SetYPbPrMode1080i) {
+ if (pVBInfo->VBInfo & SetCRT2ToHiVision) {
+ if (pVBInfo->VBType & VB_SIS301LV) {
+ if (pVBInfo->TVInfo & TVSetHiVision) {
if (pVBInfo->VBInfo & SetInSlaveMode) {
if (ModeNo == 0x2f)
temp += 1;
@@ -5037,9 +5037,9 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
temp = (tempcx & 0xFF00) >> 8;
temp |= ((tempbx & 0xFF00) >> 8) << 6;
- if (!(pVBInfo->VBInfo & SetCRT2ToHiVisionTV)) {
- if (pVBInfo->VBType & VB_XGI301LV) {
- if (pVBInfo->TVInfo & SetYPbPrMode1080i) {
+ if (!(pVBInfo->VBInfo & SetCRT2ToHiVision)) {
+ if (pVBInfo->VBType & VB_SIS301LV) {
+ if (pVBInfo->TVInfo & TVSetHiVision) {
temp |= 0x10;
if (!(pVBInfo->VBInfo & SetCRT2ToSVIDEO))
@@ -5054,18 +5054,18 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
xgifb_reg_set(pVBInfo->Part2Port, 0x30, temp);
- if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
- | VB_XGI302LV | VB_XGI301C)) { /* TV gatingno */
+ if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
+ | VB_SIS302LV | VB_XGI301C)) { /* TV gatingno */
tempbx = pVBInfo->VDE;
tempcx = tempbx - 2;
if (pVBInfo->VBInfo & SetCRT2ToTV) {
- if (!(pVBInfo->TVInfo & (SetYPbPrMode525p
- | SetYPbPrMode750p)))
+ if (!(pVBInfo->TVInfo & (TVSetYPbPr525p
+ | TVSetYPbPr750p)))
tempbx = tempbx >> 1;
}
- if (pVBInfo->VBType & (VB_XGI302LV | VB_XGI301C)) {
+ if (pVBInfo->VBType & (VB_SIS302LV | VB_XGI301C)) {
temp = 0;
if (tempcx & 0x0400)
temp |= 0x20;
@@ -5118,8 +5118,8 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
/* 301b */
tempecx = 8 * 1024;
- if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
- | VB_XGI302LV | VB_XGI301C)) {
+ if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
+ | VB_SIS302LV | VB_XGI301C)) {
tempecx = tempecx * 8;
}
@@ -5133,8 +5133,8 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
tempax = (unsigned short) tempeax;
/* 301b */
- if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
- | VB_XGI302LV | VB_XGI301C)) {
+ if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
+ | VB_SIS302LV | VB_XGI301C)) {
tempcx = ((tempax & 0xFF00) >> 5) >> 8;
}
/* end 301b */
@@ -5161,7 +5161,7 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
temp |= 0x18;
xgifb_reg_and_or(pVBInfo->Part2Port, 0x46, ~0x1F, temp);
- if (pVBInfo->TVInfo & SetPALTV) {
+ if (pVBInfo->TVInfo & TVSetPAL) {
tempbx = 0x0382;
tempcx = 0x007e;
} else {
@@ -5178,13 +5178,13 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
temp = temp << 2;
temp |= ((tempbx & 0xFF00) >> 8) & 0x03;
- if (pVBInfo->VBInfo & SetCRT2ToYPbPr) {
+ if (pVBInfo->VBInfo & SetCRT2ToYPbPr525750) {
temp |= 0x10;
- if (pVBInfo->TVInfo & SetYPbPrMode525p)
+ if (pVBInfo->TVInfo & TVSetYPbPr525p)
temp |= 0x20;
- if (pVBInfo->TVInfo & SetYPbPrMode750p)
+ if (pVBInfo->TVInfo & TVSetYPbPr750p)
temp |= 0x60;
}
@@ -5192,7 +5192,7 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
temp = xgifb_reg_get(pVBInfo->Part2Port, 0x43); /* 301b change */
xgifb_reg_set(pVBInfo->Part2Port, 0x43, (unsigned short) (temp - 3));
- if (!(pVBInfo->TVInfo & (SetYPbPrMode525p | SetYPbPrMode750p))) {
+ if (!(pVBInfo->TVInfo & (TVSetYPbPr525p | TVSetYPbPr750p))) {
if (pVBInfo->TVInfo & NTSC1024x768) {
TimingPoint = XGI_NTSC1024AdjTime;
for (i = 0x1c, j = 0; i <= 0x30; i++, j++) {
@@ -5205,12 +5205,12 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
/* [ycchen] 01/14/03 Modify for 301C PALM Support */
if (pVBInfo->VBType & VB_XGI301C) {
- if (pVBInfo->TVInfo & SetPALMTV)
+ if (pVBInfo->TVInfo & TVSetPALM)
xgifb_reg_and_or(pVBInfo->Part2Port, 0x4E, ~0x08,
0x08); /* PALM Mode */
}
- if (pVBInfo->TVInfo & SetPALMTV) {
+ if (pVBInfo->TVInfo & TVSetPALM) {
tempax = (unsigned char) xgifb_reg_get(pVBInfo->Part2Port,
0x01);
tempax--;
@@ -5219,7 +5219,7 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
xgifb_reg_and(pVBInfo->Part2Port, 0x00, 0xEF);
}
- if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) {
+ if (pVBInfo->VBInfo & SetCRT2ToHiVision) {
if (!(pVBInfo->VBInfo & SetInSlaveMode))
xgifb_reg_set(pVBInfo->Part2Port, 0x0B, 0x00);
}
@@ -5267,11 +5267,11 @@ static void XGI_SetLCDRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
xgifb_reg_and_or(pVBInfo->Part2Port, 0x2B, 0x0F, temp);
temp = 0x01;
- if (pVBInfo->LCDResInfo == Panel1280x1024) {
+ if (pVBInfo->LCDResInfo == Panel_1280x1024) {
if (pVBInfo->ModeType == ModeEGA) {
if (pVBInfo->VGAHDE >= 1024) {
temp = 0x02;
- if (pVBInfo->LCDInfo & LCDVESATiming)
+ if (pVBInfo->LCDInfo & XGI_LCDVESATiming)
temp = 0x01;
}
}
@@ -5305,14 +5305,14 @@ static void XGI_SetLCDRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
tempah = pVBInfo->LCDResInfo;
tempah &= PanelResInfo;
- if ((tempah == Panel1024x768) || (tempah == Panel1024x768x75)) {
+ if ((tempah == Panel_1024x768) || (tempah == Panel_1024x768x75)) {
tempbx = 1024;
tempcx = 768;
- } else if ((tempah == Panel1280x1024) ||
- (tempah == Panel1280x1024x75)) {
+ } else if ((tempah == Panel_1280x1024) ||
+ (tempah == Panel_1280x1024x75)) {
tempbx = 1280;
tempcx = 1024;
- } else if (tempah == Panel1400x1050) {
+ } else if (tempah == Panel_1400x1050) {
tempbx = 1400;
tempcx = 1050;
} else {
@@ -5375,7 +5375,7 @@ static void XGI_SetLCDRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
tempcx = tempcx >> 1;
}
- if (pVBInfo->VBType & VB_XGI302LV)
+ if (pVBInfo->VBType & VB_SIS302LV)
tempbx += 1;
if (pVBInfo->VBType & VB_XGI301C) /* tap4 */
@@ -5405,7 +5405,7 @@ static void XGI_SetLCDRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
tempcx = tempcx >> 1;
}
- if (pVBInfo->VBType & VB_XGI302LV)
+ if (pVBInfo->VBType & VB_SIS302LV)
tempbx += 1;
tempcx += tempbx;
@@ -5422,10 +5422,10 @@ static void XGI_SetLCDRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
temp = tempcx & 0x00FF; /* RHSYEXP2S=lcdhre */
xgifb_reg_set(pVBInfo->Part2Port, 0x21, temp);
- if (!(pVBInfo->LCDInfo & LCDVESATiming)) {
+ if (!(pVBInfo->LCDInfo & XGI_LCDVESATiming)) {
if (pVBInfo->VGAVDE == 525) {
- if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B
- | VB_XGI301LV | VB_XGI302LV
+ if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B
+ | VB_SIS301LV | VB_SIS302LV
| VB_XGI301C)) {
temp = 0xC6;
} else
@@ -5436,8 +5436,8 @@ static void XGI_SetLCDRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
}
if (pVBInfo->VGAVDE == 420) {
- if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B
- | VB_XGI301LV | VB_XGI302LV
+ if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B
+ | VB_SIS301LV | VB_SIS302LV
| VB_XGI301C)) {
temp = 0x4F;
} else
@@ -5473,18 +5473,18 @@ static struct XGI301C_Tap4TimingStruct *XGI_GetTap4Ptr(unsigned short tempcx,
else
Tap4TimingPtr = xgifb_ntsc_525_tap4_timing; /* NTSC */
- if (pVBInfo->TVInfo & SetPALTV)
+ if (pVBInfo->TVInfo & TVSetPAL)
Tap4TimingPtr = PALTap4Timing;
- if (pVBInfo->VBInfo & SetCRT2ToYPbPr) {
- if ((pVBInfo->TVInfo & SetYPbPrMode525i) ||
- (pVBInfo->TVInfo & SetYPbPrMode525p))
+ if (pVBInfo->VBInfo & SetCRT2ToYPbPr525750) {
+ if ((pVBInfo->TVInfo & TVSetYPbPr525i) ||
+ (pVBInfo->TVInfo & TVSetYPbPr525p))
Tap4TimingPtr = xgifb_ntsc_525_tap4_timing;
- if (pVBInfo->TVInfo & SetYPbPrMode750p)
+ if (pVBInfo->TVInfo & TVSetYPbPr750p)
Tap4TimingPtr = YPbPr750pTap4Timing;
}
- if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV)
+ if (pVBInfo->VBInfo & SetCRT2ToHiVision)
Tap4TimingPtr = xgifb_tap4_timing;
i = 0;
@@ -5510,7 +5510,7 @@ static void XGI_SetTap4Regs(struct vb_device_info *pVBInfo)
xgifb_reg_set(pVBInfo->Part2Port, i, Tap4TimingPtr->Reg[j]);
if ((pVBInfo->VBInfo & SetCRT2ToTV) &&
- (!(pVBInfo->VBInfo & SetCRT2ToHiVisionTV))) {
+ (!(pVBInfo->VBInfo & SetCRT2ToHiVision))) {
/* Set Vertical Scaling */
Tap4TimingPtr = XGI_GetTap4Ptr(1, pVBInfo);
for (i = 0xC0, j = 0; i < 0xFF; i++, j++)
@@ -5520,7 +5520,7 @@ static void XGI_SetTap4Regs(struct vb_device_info *pVBInfo)
}
if ((pVBInfo->VBInfo & SetCRT2ToTV) &&
- (!(pVBInfo->VBInfo & SetCRT2ToHiVisionTV)))
+ (!(pVBInfo->VBInfo & SetCRT2ToHiVision)))
/* Enable V.Scaling */
xgifb_reg_and_or(pVBInfo->Part2Port, 0x4E, ~0x14, 0x04);
else
@@ -5543,7 +5543,7 @@ static void XGI_SetGroup3(unsigned short ModeNo, unsigned short ModeIdIndex,
modeflag = pVBInfo->EModeIDTable[ModeIdIndex].Ext_ModeFlag;
xgifb_reg_set(pVBInfo->Part3Port, 0x00, 0x00);
- if (pVBInfo->TVInfo & SetPALTV) {
+ if (pVBInfo->TVInfo & TVSetPAL) {
xgifb_reg_set(pVBInfo->Part3Port, 0x13, 0xFA);
xgifb_reg_set(pVBInfo->Part3Port, 0x14, 0xC8);
} else {
@@ -5554,15 +5554,15 @@ static void XGI_SetGroup3(unsigned short ModeNo, unsigned short ModeIdIndex,
if (!(pVBInfo->VBInfo & SetCRT2ToTV))
return;
- if (pVBInfo->TVInfo & SetPALMTV) {
+ if (pVBInfo->TVInfo & TVSetPALM) {
xgifb_reg_set(pVBInfo->Part3Port, 0x13, 0xFA);
xgifb_reg_set(pVBInfo->Part3Port, 0x14, 0xC8);
xgifb_reg_set(pVBInfo->Part3Port, 0x3D, 0xA8);
}
- if ((pVBInfo->VBInfo & SetCRT2ToHiVisionTV) || (pVBInfo->VBInfo
- & SetCRT2ToYPbPr)) {
- if (pVBInfo->TVInfo & SetYPbPrMode525i)
+ if ((pVBInfo->VBInfo & SetCRT2ToHiVision) || (pVBInfo->VBInfo
+ & SetCRT2ToYPbPr525750)) {
+ if (pVBInfo->TVInfo & TVSetYPbPr525i)
return;
tempdi = pVBInfo->HiTVGroup3Data;
@@ -5572,17 +5572,17 @@ static void XGI_SetGroup3(unsigned short ModeNo, unsigned short ModeIdIndex,
tempdi = pVBInfo->HiTVGroup3Text;
}
- if (pVBInfo->TVInfo & SetYPbPrMode525p)
+ if (pVBInfo->TVInfo & TVSetYPbPr525p)
tempdi = pVBInfo->Ren525pGroup3;
- if (pVBInfo->TVInfo & SetYPbPrMode750p)
+ if (pVBInfo->TVInfo & TVSetYPbPr750p)
tempdi = pVBInfo->Ren750pGroup3;
for (i = 0; i <= 0x3E; i++)
xgifb_reg_set(pVBInfo->Part3Port, i, tempdi[i]);
if (pVBInfo->VBType & VB_XGI301C) { /* Marcovision */
- if (pVBInfo->TVInfo & SetYPbPrMode525p)
+ if (pVBInfo->TVInfo & TVSetYPbPr525p)
xgifb_reg_set(pVBInfo->Part3Port, 0x28, 0x3f);
}
}
@@ -5637,7 +5637,7 @@ static void XGI_SetGroup4(unsigned short ModeNo, unsigned short ModeIdIndex,
if (XGI_IsLCDDualLink(pVBInfo))
tempbx = tempbx >> 1;
- if (tempcx & SetCRT2ToHiVisionTV) {
+ if (tempcx & SetCRT2ToHiVision) {
temp = 0;
if (tempbx <= 1024)
temp = 0xA0;
@@ -5656,7 +5656,7 @@ static void XGI_SetGroup4(unsigned short ModeNo, unsigned short ModeIdIndex,
}
}
- if (pVBInfo->TVInfo & (SetYPbPrMode525p | SetYPbPrMode750p)) {
+ if (pVBInfo->TVInfo & (TVSetYPbPr525p | TVSetYPbPr750p)) {
temp = 0x00;
if (pVBInfo->VGAHDE == 1280)
temp = 0x40;
@@ -5667,7 +5667,7 @@ static void XGI_SetGroup4(unsigned short ModeNo, unsigned short ModeIdIndex,
tempebx = pVBInfo->VDE;
- if (tempcx & SetCRT2ToHiVisionTV) {
+ if (tempcx & SetCRT2ToHiVision) {
if (!(temp & 0xE000))
tempbx = tempbx >> 1;
}
@@ -5705,8 +5705,8 @@ static void XGI_SetGroup4(unsigned short ModeNo, unsigned short ModeIdIndex,
xgifb_reg_set(pVBInfo->Part4Port, 0x19, temp);
/* 301b */
- if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
- | VB_XGI302LV | VB_XGI301C)) {
+ if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
+ | VB_SIS302LV | VB_XGI301C)) {
temp = 0x0028;
xgifb_reg_set(pVBInfo->Part4Port, 0x1C, temp);
tempax = pVBInfo->VGAHDE;
@@ -5735,7 +5735,7 @@ static void XGI_SetGroup4(unsigned short ModeNo, unsigned short ModeIdIndex,
temp = (tempax & 0x00FF);
xgifb_reg_set(pVBInfo->Part4Port, 0x1D, temp);
- if (pVBInfo->VBInfo & (SetCRT2ToTV | SetCRT2ToHiVisionTV)) {
+ if (pVBInfo->VBInfo & (SetCRT2ToTV | SetCRT2ToHiVision)) {
if (pVBInfo->VGAHDE > 800)
xgifb_reg_or(pVBInfo->Part4Port, 0x1E, 0x08);
@@ -5744,8 +5744,8 @@ static void XGI_SetGroup4(unsigned short ModeNo, unsigned short ModeIdIndex,
if (pVBInfo->VBInfo & SetCRT2ToTV) {
if (!(pVBInfo->TVInfo & (NTSC1024x768
- | SetYPbPrMode525p | SetYPbPrMode750p
- | SetYPbPrMode1080i))) {
+ | TVSetYPbPr525p | TVSetYPbPr750p
+ | TVSetHiVision))) {
temp |= 0x0001;
if ((pVBInfo->VBInfo & SetInSlaveMode)
&& (!(pVBInfo->TVInfo
@@ -5785,7 +5785,7 @@ static void XGI_SetGroup5(unsigned short ModeNo, unsigned short ModeIdIndex,
Pdata = pVBInfo->Part5Port + 1;
if (pVBInfo->ModeType == ModeVGA) {
if (!(pVBInfo->VBInfo & (SetInSlaveMode | LoadDACFlag
- | CRT2DisplayFlag))) {
+ | DisableCRT2Display))) {
XGINew_EnableCRT2(pVBInfo);
}
}
@@ -6074,7 +6074,7 @@ static unsigned char XGI_IsLCDON(struct vb_device_info *pVBInfo)
tempax = pVBInfo->VBInfo;
if (tempax & SetCRT2ToDualEdge)
return 0;
- else if (tempax & (DisableCRT2Display | SwitchToCRT2 | SetSimuScanMode))
+ else if (tempax & (DisableCRT2Display | SwitchCRT2 | SetSimuScanMode))
return 1;
return 0;
@@ -6140,15 +6140,15 @@ static void XGI_DisableBridge(struct xgifb_video_info *xgifb_info,
{
unsigned short tempah = 0;
- if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
- | VB_XGI302LV | VB_XGI301C)) {
+ if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
+ | VB_SIS302LV | VB_XGI301C)) {
tempah = 0x3F;
if (!(pVBInfo->VBInfo &
(DisableCRT2Display | SetSimuScanMode))) {
- if (pVBInfo->VBInfo & SetCRT2ToLCDA) {
+ if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) {
if (pVBInfo->VBInfo & SetCRT2ToDualEdge) {
tempah = 0x7F; /* Disable Channel A */
- if (!(pVBInfo->VBInfo & SetCRT2ToLCDA))
+ if (!(pVBInfo->VBInfo & XGI_SetCRT2ToLCDA))
/* Disable Channel B */
tempah = 0xBF;
@@ -6166,8 +6166,8 @@ static void XGI_DisableBridge(struct xgifb_video_info *xgifb_info,
/* disable part4_1f */
xgifb_reg_and(pVBInfo->Part4Port, 0x1F, tempah);
- if (pVBInfo->VBType & (VB_XGI302LV | VB_XGI301C)) {
- if (((pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)))
+ if (pVBInfo->VBType & (VB_SIS302LV | VB_XGI301C)) {
+ if (((pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)))
|| (XGI_DisableChISLCD(pVBInfo))
|| (XGI_IsLCDON(pVBInfo)))
/* LVDS Driver power down */
@@ -6175,16 +6175,16 @@ static void XGI_DisableBridge(struct xgifb_video_info *xgifb_info,
}
if ((pVBInfo->SetFlag & DisableChA) || (pVBInfo->VBInfo
- & (DisableCRT2Display | SetCRT2ToLCDA
+ & (DisableCRT2Display | XGI_SetCRT2ToLCDA
| SetSimuScanMode))) {
if (pVBInfo->SetFlag & GatingCRT)
XGI_EnableGatingCRT(HwDeviceExtension, pVBInfo);
XGI_DisplayOff(xgifb_info, HwDeviceExtension, pVBInfo);
}
- if (pVBInfo->VBInfo & SetCRT2ToLCDA) {
+ if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) {
if ((pVBInfo->SetFlag & DisableChA) || (pVBInfo->VBInfo
- & SetCRT2ToLCDA))
+ & XGI_SetCRT2ToLCDA))
/* Power down */
xgifb_reg_and(pVBInfo->Part1Port, 0x1e, 0xdf);
}
@@ -6198,7 +6198,7 @@ static void XGI_DisableBridge(struct xgifb_video_info *xgifb_info,
if ((pVBInfo->SetFlag & DisableChB) ||
(pVBInfo->VBInfo &
(DisableCRT2Display | SetSimuScanMode)) ||
- ((!(pVBInfo->VBInfo & SetCRT2ToLCDA)) &&
+ ((!(pVBInfo->VBInfo & XGI_SetCRT2ToLCDA)) &&
(pVBInfo->VBInfo &
(SetCRT2ToRAMDAC | SetCRT2ToLCD | SetCRT2ToTV))))
xgifb_reg_or(pVBInfo->Part1Port, 0x00, 0x80);
@@ -6206,7 +6206,7 @@ static void XGI_DisableBridge(struct xgifb_video_info *xgifb_info,
if ((pVBInfo->SetFlag & DisableChB) ||
(pVBInfo->VBInfo &
(DisableCRT2Display | SetSimuScanMode)) ||
- (!(pVBInfo->VBInfo & SetCRT2ToLCDA)) ||
+ (!(pVBInfo->VBInfo & XGI_SetCRT2ToLCDA)) ||
(pVBInfo->VBInfo &
(SetCRT2ToRAMDAC | SetCRT2ToLCD | SetCRT2ToTV))) {
/* save Part1 index 0 */
@@ -6227,7 +6227,7 @@ static void XGI_DisableBridge(struct xgifb_video_info *xgifb_info,
xgifb_reg_and(pVBInfo->P3c4, 0x32, 0xDF);
}
- if (pVBInfo->VBInfo & (DisableCRT2Display | SetCRT2ToLCDA
+ if (pVBInfo->VBInfo & (DisableCRT2Display | XGI_SetCRT2ToLCDA
| SetSimuScanMode))
XGI_DisplayOff(xgifb_info, HwDeviceExtension, pVBInfo);
}
@@ -6254,15 +6254,15 @@ static unsigned short XGI_GetTVPtrIndex(struct vb_device_info *pVBInfo)
{
unsigned short tempbx = 0;
- if (pVBInfo->TVInfo & SetPALTV)
+ if (pVBInfo->TVInfo & TVSetPAL)
tempbx = 2;
- if (pVBInfo->TVInfo & SetYPbPrMode1080i)
+ if (pVBInfo->TVInfo & TVSetHiVision)
tempbx = 4;
- if (pVBInfo->TVInfo & SetYPbPrMode525i)
+ if (pVBInfo->TVInfo & TVSetYPbPr525i)
tempbx = 6;
- if (pVBInfo->TVInfo & SetYPbPrMode525p)
+ if (pVBInfo->TVInfo & TVSetYPbPr525p)
tempbx = 8;
- if (pVBInfo->TVInfo & SetYPbPrMode750p)
+ if (pVBInfo->TVInfo & TVSetYPbPr750p)
tempbx = 10;
if (pVBInfo->TVInfo & TVSimuMode)
tempbx++;
@@ -6293,23 +6293,23 @@ static void XGI_GetTVPtrIndex2(unsigned short *tempbx, unsigned char *tempcl,
*tempcl = 0;
*tempch = 0;
- if (pVBInfo->TVInfo & SetPALTV)
+ if (pVBInfo->TVInfo & TVSetPAL)
*tempbx = 1;
- if (pVBInfo->TVInfo & SetPALMTV)
+ if (pVBInfo->TVInfo & TVSetPALM)
*tempbx = 2;
- if (pVBInfo->TVInfo & SetPALNTV)
+ if (pVBInfo->TVInfo & TVSetPALN)
*tempbx = 3;
if (pVBInfo->TVInfo & NTSC1024x768) {
*tempbx = 4;
- if (pVBInfo->TVInfo & SetPALMTV)
+ if (pVBInfo->TVInfo & TVSetPALM)
*tempbx = 5;
}
- if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
- | VB_XGI302LV | VB_XGI301C)) {
+ if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
+ | VB_SIS302LV | VB_XGI301C)) {
if ((!(pVBInfo->VBInfo & SetInSlaveMode)) || (pVBInfo->TVInfo
& TVSimuMode)) {
*tempbx += 8;
@@ -6317,8 +6317,8 @@ static void XGI_GetTVPtrIndex2(unsigned short *tempbx, unsigned char *tempcl,
}
}
- if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
- | VB_XGI302LV | VB_XGI301C))
+ if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
+ | VB_SIS302LV | VB_XGI301C))
(*tempch)++;
}
@@ -6328,9 +6328,9 @@ static void XGI_SetDelayComp(struct vb_device_info *pVBInfo)
unsigned char tempah, tempbl, tempbh;
- if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
- | VB_XGI302LV | VB_XGI301C)) {
- if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA
+ if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
+ | VB_SIS302LV | VB_XGI301C)) {
+ if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA
| SetCRT2ToTV | SetCRT2ToRAMDAC)) {
tempbl = 0;
tempbh = 0;
@@ -6338,20 +6338,20 @@ static void XGI_SetDelayComp(struct vb_device_info *pVBInfo)
index = XGI_GetTVPtrIndex(pVBInfo); /* Get TV Delay */
tempbl = pVBInfo->XGI_TVDelayList[index];
- if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B
- | VB_XGI301LV | VB_XGI302LV
+ if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B
+ | VB_SIS301LV | VB_SIS302LV
| VB_XGI301C))
tempbl = pVBInfo->XGI_TVDelayList2[index];
if (pVBInfo->VBInfo & SetCRT2ToDualEdge)
tempbl = tempbl >> 4;
- if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) {
+ if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) {
/* Get LCD Delay */
index = XGI_GetLCDCapPtr(pVBInfo);
tempbh = pVBInfo->LCDCapList[index].
LCD_DelayCompensation;
- if (!(pVBInfo->VBInfo & SetCRT2ToLCDA))
+ if (!(pVBInfo->VBInfo & XGI_SetCRT2ToLCDA))
tempbl = tempbh;
}
@@ -6365,7 +6365,7 @@ static void XGI_SetDelayComp(struct vb_device_info *pVBInfo)
tempah |= tempbl;
}
- if (pVBInfo->VBInfo & SetCRT2ToLCDA) { /* Channel A */
+ if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) { /* Channel A */
tempah &= 0x0F;
tempah |= tempbh;
}
@@ -6475,13 +6475,13 @@ static void XGI_SetLCDCap(struct vb_device_info *pVBInfo)
tempcx = pVBInfo->LCDCapList[XGI_GetLCDCapPtr(pVBInfo)].LCD_Capability;
if (pVBInfo->VBType &
- (VB_XGI301B |
- VB_XGI302B |
- VB_XGI301LV |
- VB_XGI302LV |
+ (VB_SIS301B |
+ VB_SIS302B |
+ VB_SIS301LV |
+ VB_SIS302LV |
VB_XGI301C)) { /* 301LV/302LV only */
if (pVBInfo->VBType &
- (VB_XGI301LV | VB_XGI302LV | VB_XGI301C)) {
+ (VB_SIS301LV | VB_SIS302LV | VB_XGI301C)) {
/* Set 301LV Capability */
xgifb_reg_set(pVBInfo->Part4Port, 0x24,
(unsigned char) (tempcx & 0x1F));
@@ -6493,14 +6493,14 @@ static void XGI_SetLCDCap(struct vb_device_info *pVBInfo)
| EnablePLLSPLOW)) >> 8));
}
- if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
- | VB_XGI302LV | VB_XGI301C)) {
+ if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
+ | VB_SIS302LV | VB_XGI301C)) {
if (pVBInfo->VBInfo & SetCRT2ToLCD)
XGI_SetLCDCap_B(tempcx, pVBInfo);
- else if (pVBInfo->VBInfo & SetCRT2ToLCDA)
+ else if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA)
XGI_SetLCDCap_A(tempcx, pVBInfo);
- if (pVBInfo->VBType & (VB_XGI302LV | VB_XGI301C)) {
+ if (pVBInfo->VBType & (VB_SIS302LV | VB_XGI301C)) {
if (tempcx & EnableSpectrum)
SetSpectrum(pVBInfo);
}
@@ -6524,7 +6524,7 @@ static void XGI_SetAntiFlicker(unsigned short ModeNo,
unsigned char tempah;
- if (pVBInfo->TVInfo & (SetYPbPrMode525p | SetYPbPrMode750p))
+ if (pVBInfo->TVInfo & (TVSetYPbPr525p | TVSetYPbPr750p))
return;
tempbx = XGI_GetTVPtrIndex(pVBInfo);
@@ -6648,8 +6648,8 @@ static void XGI_SetYFilter(unsigned short ModeNo, unsigned short ModeIdIndex,
xgifb_reg_set(pVBInfo->Part2Port, 0x38, filterPtr[index++]);
}
- if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
- | VB_XGI302LV | VB_XGI301C)) {
+ if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
+ | VB_SIS302LV | VB_XGI301C)) {
xgifb_reg_set(pVBInfo->Part2Port, 0x48, filterPtr[index++]);
xgifb_reg_set(pVBInfo->Part2Port, 0x49, filterPtr[index++]);
xgifb_reg_set(pVBInfo->Part2Port, 0x4A, filterPtr[index++]);
@@ -6668,7 +6668,7 @@ static void XGI_OEM310Setting(unsigned short ModeNo,
{
XGI_SetDelayComp(pVBInfo);
- if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA))
+ if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA))
XGI_SetLCDCap(pVBInfo);
if (pVBInfo->VBInfo & SetCRT2ToTV) {
@@ -6676,7 +6676,7 @@ static void XGI_OEM310Setting(unsigned short ModeNo,
XGI_SetYFilter(ModeNo, ModeIdIndex, pVBInfo);
XGI_SetAntiFlicker(ModeNo, ModeIdIndex, pVBInfo);
- if (pVBInfo->VBType & VB_XGI301)
+ if (pVBInfo->VBType & VB_SIS301)
XGI_SetEdgeEnhance(ModeNo, ModeIdIndex, pVBInfo);
}
}
@@ -6732,15 +6732,15 @@ static void XGI_SetCRT2ModeRegs(unsigned short ModeNo,
tempbl = 0xff;
if (pVBInfo->VBInfo & (SetCRT2ToRAMDAC | SetCRT2ToTV
- | SetCRT2ToLCD | SetCRT2ToLCDA)) {
- if ((pVBInfo->VBInfo & SetCRT2ToLCDA) &&
+ | SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) {
+ if ((pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) &&
(!(pVBInfo->VBInfo & SetSimuScanMode))) {
tempbl &= 0xf7;
tempah |= 0x01;
xgifb_reg_and_or(pVBInfo->Part1Port, 0x2e,
tempbl, tempah);
} else {
- if (pVBInfo->VBInfo & SetCRT2ToLCDA) {
+ if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) {
tempbl &= 0xf7;
tempah |= 0x01;
}
@@ -6780,7 +6780,7 @@ static void XGI_SetCRT2ModeRegs(unsigned short ModeNo,
}
if (pVBInfo->VBInfo & (SetCRT2ToRAMDAC | SetCRT2ToTV | SetCRT2ToLCD
- | SetCRT2ToLCDA)) {
+ | XGI_SetCRT2ToLCDA)) {
tempah &= (~0x08);
if ((pVBInfo->ModeType == ModeVGA) && (!(pVBInfo->VBInfo
& SetInSlaveMode))) {
@@ -6807,24 +6807,24 @@ static void XGI_SetCRT2ModeRegs(unsigned short ModeNo,
tempah |= 0x40;
}
- if ((pVBInfo->LCDResInfo == Panel1280x1024)
- || (pVBInfo->LCDResInfo == Panel1280x1024x75))
+ if ((pVBInfo->LCDResInfo == Panel_1280x1024)
+ || (pVBInfo->LCDResInfo == Panel_1280x1024x75))
tempah |= 0x80;
- if (pVBInfo->LCDResInfo == Panel1280x960)
+ if (pVBInfo->LCDResInfo == Panel_1280x960)
tempah |= 0x80;
xgifb_reg_set(pVBInfo->Part4Port, 0x0C, tempah);
}
- if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
- | VB_XGI302LV | VB_XGI301C)) {
+ if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
+ | VB_SIS302LV | VB_XGI301C)) {
tempah = 0;
tempbl = 0xfb;
if (pVBInfo->VBInfo & SetCRT2ToDualEdge) {
tempbl = 0xff;
- if (pVBInfo->VBInfo & SetCRT2ToLCDA)
+ if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA)
tempah |= 0x04; /* shampoo 0129 */
}
@@ -6849,7 +6849,7 @@ static void XGI_SetCRT2ModeRegs(unsigned short ModeNo,
tempah = 0;
tempbl = 0x7f;
- if (!(pVBInfo->VBInfo & SetCRT2ToLCDA)) {
+ if (!(pVBInfo->VBInfo & XGI_SetCRT2ToLCDA)) {
tempbl = 0xff;
if (!(pVBInfo->VBInfo & SetCRT2ToDualEdge))
tempah |= 0x80;
@@ -6857,7 +6857,7 @@ static void XGI_SetCRT2ModeRegs(unsigned short ModeNo,
xgifb_reg_and_or(pVBInfo->Part4Port, 0x23, tempbl, tempah);
- if (pVBInfo->VBType & (VB_XGI302LV | VB_XGI301C)) {
+ if (pVBInfo->VBType & (VB_SIS302LV | VB_XGI301C)) {
if (pVBInfo->LCDInfo & SetLCDDualLink) {
xgifb_reg_or(pVBInfo->Part4Port, 0x27, 0x20);
xgifb_reg_or(pVBInfo->Part4Port, 0x34, 0x10);
@@ -6872,7 +6872,7 @@ static void XGI_CloseCRTC(struct xgi_hw_device_info *HwDeviceExtension,
tempbx = 0;
- if (pVBInfo->VBInfo & SetCRT2ToLCDA)
+ if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA)
tempbx = 0x08A0;
}
@@ -6937,10 +6937,10 @@ unsigned short XGI_GetRatePtrCRT2(struct xgi_hw_device_info *pXGIHWDE,
index--;
if (pVBInfo->SetFlag & ProgrammingCRT2) {
- if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) {
+ if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) {
if (pVBInfo->IF_DEF_LVDS == 0) {
- if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B
- | VB_XGI301LV | VB_XGI302LV
+ if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B
+ | VB_SIS301LV | VB_SIS302LV
| VB_XGI301C))
/* 301b */
temp = LCDARefreshIndex[
@@ -6983,7 +6983,7 @@ unsigned short XGI_GetRatePtrCRT2(struct xgi_hw_device_info *pXGIHWDE,
break;
temp = pVBInfo->RefIndex[RefreshRateTableIndex + i].
Ext_InfoFlag;
- temp &= ModeInfoFlag;
+ temp &= ModeTypeMask;
if (temp < pVBInfo->ModeType)
break;
i++;
@@ -7163,8 +7163,8 @@ static void XGI_EnableBridge(struct xgifb_video_info *xgifb_info,
{
unsigned short tempah;
- if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
- | VB_XGI302LV | VB_XGI301C)) {
+ if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
+ | VB_SIS302LV | VB_XGI301C)) {
if (!(pVBInfo->SetFlag & DisableChA)) {
if (pVBInfo->SetFlag & EnableChA) {
/* Power on */
@@ -7207,11 +7207,11 @@ static void XGI_EnableBridge(struct xgifb_video_info *xgifb_info,
|| (!(pVBInfo->VBInfo & DisableCRT2Display))) {
xgifb_reg_and_or(pVBInfo->Part2Port, 0x00, ~0xE0,
0x20); /* shampoo 0129 */
- if (pVBInfo->VBType & (VB_XGI302LV | VB_XGI301C)) {
+ if (pVBInfo->VBType & (VB_SIS302LV | VB_XGI301C)) {
if (!XGI_DisableChISLCD(pVBInfo)) {
if (XGI_EnableChISLCD(pVBInfo) ||
(pVBInfo->VBInfo &
- (SetCRT2ToLCD | SetCRT2ToLCDA)))
+ (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)))
/* LVDS PLL power on */
xgifb_reg_and(
pVBInfo->Part4Port,
@@ -7229,12 +7229,12 @@ static void XGI_EnableBridge(struct xgifb_video_info *xgifb_info,
tempah = 0xc0;
if (!(pVBInfo->VBInfo & SetSimuScanMode)) {
- if (pVBInfo->VBInfo & SetCRT2ToLCDA) {
+ if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) {
if (pVBInfo->VBInfo &
SetCRT2ToDualEdge) {
tempah = tempah & 0x40;
if (pVBInfo->VBInfo &
- SetCRT2ToLCDA)
+ XGI_SetCRT2ToLCDA)
tempah = tempah ^ 0xC0;
if (pVBInfo->SetFlag &
@@ -7271,7 +7271,7 @@ static void XGI_EnableBridge(struct xgifb_video_info *xgifb_info,
} /* 301 */
else { /* LVDS */
if (pVBInfo->VBInfo & (SetCRT2ToTV | SetCRT2ToLCD
- | SetCRT2ToLCDA))
+ | XGI_SetCRT2ToLCDA))
/* enable CRT2 */
xgifb_reg_or(pVBInfo->Part1Port, 0x1E, 0x20);
@@ -7311,9 +7311,9 @@ static void XGI_SetCRT1Group(struct xgifb_video_info *xgifb_info,
pVBInfo->SetFlag &= temp;
pVBInfo->SelectCRT2Rate = 0;
- if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
- | VB_XGI302LV | VB_XGI301C)) {
- if (pVBInfo->VBInfo & (SetSimuScanMode | SetCRT2ToLCDA
+ if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
+ | VB_SIS302LV | VB_XGI301C)) {
+ if (pVBInfo->VBInfo & (SetSimuScanMode | XGI_SetCRT2ToLCDA
| SetInSlaveMode)) {
pVBInfo->SetFlag |= ProgrammingCRT2;
}
@@ -7415,11 +7415,11 @@ unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info,
pVBInfo->P3c9 = pVBInfo->BaseAddr + 0x19;
pVBInfo->P3da = pVBInfo->BaseAddr + 0x2A;
pVBInfo->Part0Port = pVBInfo->BaseAddr + XGI_CRT2_PORT_00;
- pVBInfo->Part1Port = pVBInfo->BaseAddr + XGI_CRT2_PORT_04;
- pVBInfo->Part2Port = pVBInfo->BaseAddr + XGI_CRT2_PORT_10;
- pVBInfo->Part3Port = pVBInfo->BaseAddr + XGI_CRT2_PORT_12;
- pVBInfo->Part4Port = pVBInfo->BaseAddr + XGI_CRT2_PORT_14;
- pVBInfo->Part5Port = pVBInfo->BaseAddr + XGI_CRT2_PORT_14 + 2;
+ pVBInfo->Part1Port = pVBInfo->BaseAddr + SIS_CRT2_PORT_04;
+ pVBInfo->Part2Port = pVBInfo->BaseAddr + SIS_CRT2_PORT_10;
+ pVBInfo->Part3Port = pVBInfo->BaseAddr + SIS_CRT2_PORT_12;
+ pVBInfo->Part4Port = pVBInfo->BaseAddr + SIS_CRT2_PORT_14;
+ pVBInfo->Part5Port = pVBInfo->BaseAddr + SIS_CRT2_PORT_14 + 2;
/* for x86 Linux, XG21 LVDS */
if (HwDeviceExtension->jChipType == XG21) {
@@ -7452,20 +7452,20 @@ unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info,
XGI_GetLCDInfo(ModeNo, ModeIdIndex, pVBInfo);
XGI_DisableBridge(xgifb_info, HwDeviceExtension, pVBInfo);
- if (pVBInfo->VBInfo & (SetSimuScanMode | SetCRT2ToLCDA)) {
+ if (pVBInfo->VBInfo & (SetSimuScanMode | XGI_SetCRT2ToLCDA)) {
XGI_SetCRT1Group(xgifb_info, HwDeviceExtension, ModeNo,
ModeIdIndex, pVBInfo);
- if (pVBInfo->VBInfo & SetCRT2ToLCDA) {
+ if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) {
XGI_SetLCDAGroup(ModeNo, ModeIdIndex,
HwDeviceExtension, pVBInfo);
}
} else {
- if (!(pVBInfo->VBInfo & SwitchToCRT2)) {
+ if (!(pVBInfo->VBInfo & SwitchCRT2)) {
XGI_SetCRT1Group(xgifb_info,
HwDeviceExtension, ModeNo,
ModeIdIndex, pVBInfo);
- if (pVBInfo->VBInfo & SetCRT2ToLCDA) {
+ if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) {
XGI_SetLCDAGroup(ModeNo, ModeIdIndex,
HwDeviceExtension,
pVBInfo);
@@ -7473,7 +7473,7 @@ unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info,
}
}
- if (pVBInfo->VBInfo & (SetSimuScanMode | SwitchToCRT2)) {
+ if (pVBInfo->VBInfo & (SetSimuScanMode | SwitchCRT2)) {
switch (HwDeviceExtension->ujVBChipID) {
case VB_CHIP_301:
XGI_SetCRT2Group301(ModeNo, HwDeviceExtension,
@@ -7504,10 +7504,10 @@ unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info,
if (ModeNo <= 0x13) {
pVBInfo->ModeType = pVBInfo->SModeIDTable[ModeIdIndex].
- St_ModeFlag & ModeInfoFlag;
+ St_ModeFlag & ModeTypeMask;
} else {
pVBInfo->ModeType = pVBInfo->EModeIDTable[ModeIdIndex].
- Ext_ModeFlag & ModeInfoFlag;
+ Ext_ModeFlag & ModeTypeMask;
}
pVBInfo->SetFlag = 0;
diff --git a/drivers/staging/xgifb/vb_struct.h b/drivers/staging/xgifb/vb_struct.h
index 6556a0d6ff82..a5bd56af92b1 100644
--- a/drivers/staging/xgifb/vb_struct.h
+++ b/drivers/staging/xgifb/vb_struct.h
@@ -1,15 +1,6 @@
#ifndef _VB_STRUCT_
#define _VB_STRUCT_
-
-struct XGI_LCDDataStruct {
- unsigned short RVBHCMAX;
- unsigned short RVBHCFACT;
- unsigned short VGAHT;
- unsigned short VGAVT;
- unsigned short LCDHT;
- unsigned short LCDVT;
-};
-
+#include "../../video/sis/vstruct.h"
struct XGI_LVDSCRT1HDataStruct {
unsigned char Reg[8];
@@ -19,22 +10,6 @@ struct XGI_LVDSCRT1VDataStruct {
unsigned char Reg[7];
};
-struct XGI_TVDataStruct {
- unsigned short RVBHCMAX;
- unsigned short RVBHCFACT;
- unsigned short VGAHT;
- unsigned short VGAVT;
- unsigned short TVHDE;
- unsigned short TVVDE;
- unsigned short RVBHRS;
- unsigned char FlickerMode;
- unsigned short HALFRVBHRS;
- unsigned char RY1COE;
- unsigned char RY2COE;
- unsigned char RY3COE;
- unsigned char RY4COE;
-};
-
struct XGI_StStruct {
unsigned char St_ModeID;
unsigned short St_ModeFlag;
@@ -47,18 +22,6 @@ struct XGI_StStruct {
unsigned char VB_StTVYFilterIndex;
};
-struct XGI_StandTableStruct {
- unsigned char CRT_COLS;
- unsigned char ROWS;
- unsigned char CHAR_HEIGHT;
- unsigned short CRT_LEN;
- unsigned char SR[4];
- unsigned char MISC;
- unsigned char CRTC[0x19];
- unsigned char ATTR[0x14];
- unsigned char GRC[9];
-};
-
struct XGI_ExtStruct {
unsigned char Ext_ModeID;
unsigned short Ext_ModeFlag;
@@ -85,39 +48,11 @@ struct XGI_Ext2Struct {
/* unsigned short ROM_OFFSET; */
};
-
-struct XGI_MCLKDataStruct {
- unsigned char SR28, SR29, SR2A;
- unsigned short CLOCK;
-};
-
struct XGI_ECLKDataStruct {
unsigned char SR2E, SR2F, SR30;
unsigned short CLOCK;
};
-struct XGI_VCLKDataStruct {
- unsigned char SR2B, SR2C;
- unsigned short CLOCK;
-};
-
-struct XGI_VBVCLKDataStruct {
- unsigned char Part4_A, Part4_B;
- unsigned short CLOCK;
-};
-
-struct XGI_StResInfoStruct {
- unsigned short HTotal;
- unsigned short VTotal;
-};
-
-struct XGI_ModeResInfoStruct {
- unsigned short HTotal;
- unsigned short VTotal;
- unsigned char XChar;
- unsigned char YChar;
-};
-
/*add for new UNIVGABIOS*/
struct XGI_LCDDesStruct {
unsigned short LCDHDES;
@@ -350,7 +285,7 @@ struct vb_device_info {
unsigned char *pCRT2Data_4_D;
unsigned char *pCRT2Data_4_E;
unsigned char *pCRT2Data_4_10;
- struct XGI_MCLKDataStruct *MCLKData;
+ struct SiS_MCLKData *MCLKData;
struct XGI_ECLKDataStruct *ECLKData;
unsigned char *XGI_TVDelayList;
@@ -380,15 +315,15 @@ struct vb_device_info {
struct XGI_TimingVStruct *TimingV;
struct XGI_StStruct *SModeIDTable;
- struct XGI_StandTableStruct *StandTable;
+ struct SiS_StandTable_S *StandTable;
struct XGI_ExtStruct *EModeIDTable;
struct XGI_Ext2Struct *RefIndex;
/* XGINew_CRT1TableStruct *CRT1Table; */
struct XGI_CRT1TableStruct *XGINEWUB_CRT1Table;
- struct XGI_VCLKDataStruct *VCLKData;
- struct XGI_VBVCLKDataStruct *VBVCLKData;
- struct XGI_StResInfoStruct *StResInfo;
- struct XGI_ModeResInfoStruct *ModeResInfo;
+ struct SiS_VCLKData *VCLKData;
+ struct SiS_VBVCLKData *VBVCLKData;
+ struct SiS_StResInfo_S *StResInfo;
+ struct SiS_ModeResInfo_S *ModeResInfo;
struct XGI_XG21CRT1Struct *UpdateCRT1;
int ram_type;
diff --git a/drivers/staging/xgifb/vb_table.h b/drivers/staging/xgifb/vb_table.h
index e7946f1c1143..dddf261ed53d 100644
--- a/drivers/staging/xgifb/vb_table.h
+++ b/drivers/staging/xgifb/vb_table.h
@@ -1,5 +1,5 @@
/* yilin modify for xgi20 */
-static struct XGI_MCLKDataStruct XGI340New_MCLKData[] = {
+static struct SiS_MCLKData XGI340New_MCLKData[] = {
{0x16, 0x01, 0x01, 166},
{0x19, 0x02, 0x01, 124},
{0x7C, 0x08, 0x01, 200},
@@ -10,7 +10,7 @@ static struct XGI_MCLKDataStruct XGI340New_MCLKData[] = {
{0x5c, 0x23, 0x01, 166}
};
-static struct XGI_MCLKDataStruct XGI27New_MCLKData[] = {
+static struct SiS_MCLKData XGI27New_MCLKData[] = {
{0x5c, 0x23, 0x01, 166},
{0x19, 0x02, 0x01, 124},
{0x7C, 0x08, 0x80, 200},
@@ -296,7 +296,7 @@ static struct XGI_ExtStruct XGI330_EModeIDTable[] = {
0x00, 0x00, 0x00, 0x00, 0x00}
};
-static struct XGI_StandTableStruct XGI330_StandTable[] = {
+static struct SiS_StandTable_S XGI330_StandTable[] = {
/* MD_0_200 */
{
0x28, 0x18, 0x08, 0x0800,
@@ -2353,109 +2353,109 @@ static struct XGI_LVDSCRT1VDataStruct XGI_LVDSCRT11280x1024_2_Vx75[] = {
/*add for new UNIVGABIOS*/
static struct XGI330_LCDDataTablStruct XGI_LCDDataTable[] = {
- {Panel1024x768, 0x0019, 0x0001, 0}, /* XGI_ExtLCD1024x768Data */
- {Panel1024x768, 0x0019, 0x0000, 1}, /* XGI_StLCD1024x768Data */
- {Panel1024x768, 0x0018, 0x0010, 2}, /* XGI_CetLCD1024x768Data */
- {Panel1280x1024, 0x0019, 0x0001, 3}, /* XGI_ExtLCD1280x1024Data */
- {Panel1280x1024, 0x0019, 0x0000, 4}, /* XGI_StLCD1280x1024Data */
- {Panel1280x1024, 0x0018, 0x0010, 5}, /* XGI_CetLCD1280x1024Data */
- {Panel1400x1050, 0x0019, 0x0001, 6}, /* XGI_ExtLCD1400x1050Data */
- {Panel1400x1050, 0x0019, 0x0000, 7}, /* XGI_StLCD1400x1050Data */
- {Panel1400x1050, 0x0018, 0x0010, 8}, /* XGI_CetLCD1400x1050Data */
- {Panel1600x1200, 0x0019, 0x0001, 9}, /* XGI_ExtLCD1600x1200Data */
- {Panel1600x1200, 0x0019, 0x0000, 10}, /* XGI_StLCD1600x1200Data */
+ {Panel_1024x768, 0x0019, 0x0001, 0}, /* XGI_ExtLCD1024x768Data */
+ {Panel_1024x768, 0x0019, 0x0000, 1}, /* XGI_StLCD1024x768Data */
+ {Panel_1024x768, 0x0018, 0x0010, 2}, /* XGI_CetLCD1024x768Data */
+ {Panel_1280x1024, 0x0019, 0x0001, 3}, /* XGI_ExtLCD1280x1024Data */
+ {Panel_1280x1024, 0x0019, 0x0000, 4}, /* XGI_StLCD1280x1024Data */
+ {Panel_1280x1024, 0x0018, 0x0010, 5}, /* XGI_CetLCD1280x1024Data */
+ {Panel_1400x1050, 0x0019, 0x0001, 6}, /* XGI_ExtLCD1400x1050Data */
+ {Panel_1400x1050, 0x0019, 0x0000, 7}, /* XGI_StLCD1400x1050Data */
+ {Panel_1400x1050, 0x0018, 0x0010, 8}, /* XGI_CetLCD1400x1050Data */
+ {Panel_1600x1200, 0x0019, 0x0001, 9}, /* XGI_ExtLCD1600x1200Data */
+ {Panel_1600x1200, 0x0019, 0x0000, 10}, /* XGI_StLCD1600x1200Data */
{PanelRef60Hz, 0x0008, 0x0008, 11}, /* XGI_NoScalingData */
- {Panel1024x768x75, 0x0019, 0x0001, 12}, /* XGI_ExtLCD1024x768x75Data */
- {Panel1024x768x75, 0x0019, 0x0000, 13}, /* XGI_StLCD1024x768x75Data */
- {Panel1024x768x75, 0x0018, 0x0010, 14}, /* XGI_CetLCD1024x768x75Data */
- {Panel1280x1024x75, 0x0019, 0x0001, 15}, /* XGI_ExtLCD1280x1024x75Data*/
- {Panel1280x1024x75, 0x0019, 0x0000, 16}, /* XGI_StLCD1280x1024x75Data */
- {Panel1280x1024x75, 0x0018, 0x0010, 17}, /* XGI_CetLCD1280x1024x75Data*/
+ {Panel_1024x768x75, 0x0019, 0x0001, 12}, /* XGI_ExtLCD1024x768x75Data */
+ {Panel_1024x768x75, 0x0019, 0x0000, 13}, /* XGI_StLCD1024x768x75Data */
+ {Panel_1024x768x75, 0x0018, 0x0010, 14}, /* XGI_CetLCD1024x768x75Data */
+ {Panel_1280x1024x75, 0x0019, 0x0001, 15}, /* XGI_ExtLCD1280x1024x75Data*/
+ {Panel_1280x1024x75, 0x0019, 0x0000, 16}, /* XGI_StLCD1280x1024x75Data */
+ {Panel_1280x1024x75, 0x0018, 0x0010, 17}, /* XGI_CetLCD1280x1024x75Data*/
{PanelRef75Hz, 0x0008, 0x0008, 18}, /* XGI_NoScalingDatax75 */
{0xFF, 0x0000, 0x0000, 0} /* End of table */
};
static struct XGI330_LCDDataTablStruct XGI_LCDDesDataTable[] = {
- {Panel1024x768, 0x0019, 0x0001, 0}, /* XGI_ExtLCDDes1024x768Data */
- {Panel1024x768, 0x0019, 0x0000, 1}, /* XGI_StLCDDes1024x768Data */
- {Panel1024x768, 0x0018, 0x0010, 2}, /* XGI_CetLCDDes1024x768Data */
- {Panel1280x1024, 0x0019, 0x0001, 3}, /* XGI_ExtLCDDes1280x1024Data */
- {Panel1280x1024, 0x0019, 0x0000, 4}, /* XGI_StLCDDes1280x1024Data */
- {Panel1280x1024, 0x0018, 0x0010, 5}, /* XGI_CetLCDDes1280x1024Data */
- {Panel1400x1050, 0x0019, 0x0001, 6}, /* XGI_ExtLCDDes1400x1050Data */
- {Panel1400x1050, 0x0019, 0x0000, 7}, /* XGI_StLCDDes1400x1050Data */
- {Panel1400x1050, 0x0418, 0x0010, 8}, /* XGI_CetLCDDes1400x1050Data */
- {Panel1400x1050, 0x0418, 0x0410, 9}, /* XGI_CetLCDDes1400x1050Data2 */
- {Panel1600x1200, 0x0019, 0x0001, 10}, /* XGI_ExtLCDDes1600x1200Data */
- {Panel1600x1200, 0x0019, 0x0000, 11}, /* XGI_StLCDDes1600x1200Data */
+ {Panel_1024x768, 0x0019, 0x0001, 0}, /* XGI_ExtLCDDes1024x768Data */
+ {Panel_1024x768, 0x0019, 0x0000, 1}, /* XGI_StLCDDes1024x768Data */
+ {Panel_1024x768, 0x0018, 0x0010, 2}, /* XGI_CetLCDDes1024x768Data */
+ {Panel_1280x1024, 0x0019, 0x0001, 3}, /* XGI_ExtLCDDes1280x1024Data */
+ {Panel_1280x1024, 0x0019, 0x0000, 4}, /* XGI_StLCDDes1280x1024Data */
+ {Panel_1280x1024, 0x0018, 0x0010, 5}, /* XGI_CetLCDDes1280x1024Data */
+ {Panel_1400x1050, 0x0019, 0x0001, 6}, /* XGI_ExtLCDDes1400x1050Data */
+ {Panel_1400x1050, 0x0019, 0x0000, 7}, /* XGI_StLCDDes1400x1050Data */
+ {Panel_1400x1050, 0x0418, 0x0010, 8}, /* XGI_CetLCDDes1400x1050Data */
+ {Panel_1400x1050, 0x0418, 0x0410, 9}, /* XGI_CetLCDDes1400x1050Data2 */
+ {Panel_1600x1200, 0x0019, 0x0001, 10}, /* XGI_ExtLCDDes1600x1200Data */
+ {Panel_1600x1200, 0x0019, 0x0000, 11}, /* XGI_StLCDDes1600x1200Data */
{PanelRef60Hz, 0x0008, 0x0008, 12}, /* XGI_NoScalingDesData */
- {Panel1024x768x75, 0x0019, 0x0001, 13}, /*XGI_ExtLCDDes1024x768x75Data*/
- {Panel1024x768x75, 0x0019, 0x0000, 14}, /* XGI_StLCDDes1024x768x75Data*/
- {Panel1024x768x75, 0x0018, 0x0010, 15}, /*XGI_CetLCDDes1024x768x75Data*/
+ {Panel_1024x768x75, 0x0019, 0x0001, 13}, /*XGI_ExtLCDDes1024x768x75Data*/
+ {Panel_1024x768x75, 0x0019, 0x0000, 14}, /* XGI_StLCDDes1024x768x75Data*/
+ {Panel_1024x768x75, 0x0018, 0x0010, 15}, /*XGI_CetLCDDes1024x768x75Data*/
/* XGI_ExtLCDDes1280x1024x75Data */
- {Panel1280x1024x75, 0x0019, 0x0001, 16},
+ {Panel_1280x1024x75, 0x0019, 0x0001, 16},
/* XGI_StLCDDes1280x1024x75Data */
- {Panel1280x1024x75, 0x0019, 0x0000, 17},
+ {Panel_1280x1024x75, 0x0019, 0x0000, 17},
/* XGI_CetLCDDes1280x1024x75Data */
- {Panel1280x1024x75, 0x0018, 0x0010, 18},
+ {Panel_1280x1024x75, 0x0018, 0x0010, 18},
{PanelRef75Hz, 0x0008, 0x0008, 19}, /* XGI_NoScalingDesDatax75 */
{0xFF, 0x0000, 0x0000, 0}
};
static struct XGI330_LCDDataTablStruct xgifb_epllcd_crt1[] = {
- {Panel1024x768, 0x0018, 0x0000, 0}, /* XGI_LVDSCRT11024x768_1 */
- {Panel1024x768, 0x0018, 0x0010, 1}, /* XGI_LVDSCRT11024x768_2 */
- {Panel1280x1024, 0x0018, 0x0000, 2}, /* XGI_LVDSCRT11280x1024_1 */
- {Panel1280x1024, 0x0018, 0x0010, 3}, /* XGI_LVDSCRT11280x1024_2 */
- {Panel1400x1050, 0x0018, 0x0000, 4}, /* XGI_LVDSCRT11400x1050_1 */
- {Panel1400x1050, 0x0018, 0x0010, 5}, /* XGI_LVDSCRT11400x1050_2 */
- {Panel1600x1200, 0x0018, 0x0000, 6}, /* XGI_LVDSCRT11600x1200_1 */
- {Panel1024x768x75, 0x0018, 0x0000, 7}, /* XGI_LVDSCRT11024x768_1x75 */
- {Panel1024x768x75, 0x0018, 0x0010, 8}, /* XGI_LVDSCRT11024x768_2x75 */
- {Panel1280x1024x75, 0x0018, 0x0000, 9}, /*XGI_LVDSCRT11280x1024_1x75*/
- {Panel1280x1024x75, 0x0018, 0x0010, 10},/*XGI_LVDSCRT11280x1024_2x75*/
+ {Panel_1024x768, 0x0018, 0x0000, 0}, /* XGI_LVDSCRT11024x768_1 */
+ {Panel_1024x768, 0x0018, 0x0010, 1}, /* XGI_LVDSCRT11024x768_2 */
+ {Panel_1280x1024, 0x0018, 0x0000, 2}, /* XGI_LVDSCRT11280x1024_1 */
+ {Panel_1280x1024, 0x0018, 0x0010, 3}, /* XGI_LVDSCRT11280x1024_2 */
+ {Panel_1400x1050, 0x0018, 0x0000, 4}, /* XGI_LVDSCRT11400x1050_1 */
+ {Panel_1400x1050, 0x0018, 0x0010, 5}, /* XGI_LVDSCRT11400x1050_2 */
+ {Panel_1600x1200, 0x0018, 0x0000, 6}, /* XGI_LVDSCRT11600x1200_1 */
+ {Panel_1024x768x75, 0x0018, 0x0000, 7}, /* XGI_LVDSCRT11024x768_1x75 */
+ {Panel_1024x768x75, 0x0018, 0x0010, 8}, /* XGI_LVDSCRT11024x768_2x75 */
+ {Panel_1280x1024x75, 0x0018, 0x0000, 9}, /*XGI_LVDSCRT11280x1024_1x75*/
+ {Panel_1280x1024x75, 0x0018, 0x0010, 10},/*XGI_LVDSCRT11280x1024_2x75*/
{0xFF, 0x0000, 0x0000, 0}
};
static struct XGI330_LCDDataTablStruct XGI_EPLLCDDataPtr[] = {
- {Panel1024x768, 0x0018, 0x0000, 0}, /* XGI_LVDS1024x768Data_1 */
- {Panel1024x768, 0x0018, 0x0010, 1}, /* XGI_LVDS1024x768Data_2 */
- {Panel1280x1024, 0x0018, 0x0000, 2}, /* XGI_LVDS1280x1024Data_1 */
- {Panel1280x1024, 0x0018, 0x0010, 3}, /* XGI_LVDS1280x1024Data_2 */
- {Panel1400x1050, 0x0018, 0x0000, 4}, /* XGI_LVDS1400x1050Data_1 */
- {Panel1400x1050, 0x0018, 0x0010, 5}, /* XGI_LVDS1400x1050Data_2 */
- {Panel1600x1200, 0x0018, 0x0000, 6}, /* XGI_LVDS1600x1200Data_1 */
+ {Panel_1024x768, 0x0018, 0x0000, 0}, /* XGI_LVDS1024x768Data_1 */
+ {Panel_1024x768, 0x0018, 0x0010, 1}, /* XGI_LVDS1024x768Data_2 */
+ {Panel_1280x1024, 0x0018, 0x0000, 2}, /* XGI_LVDS1280x1024Data_1 */
+ {Panel_1280x1024, 0x0018, 0x0010, 3}, /* XGI_LVDS1280x1024Data_2 */
+ {Panel_1400x1050, 0x0018, 0x0000, 4}, /* XGI_LVDS1400x1050Data_1 */
+ {Panel_1400x1050, 0x0018, 0x0010, 5}, /* XGI_LVDS1400x1050Data_2 */
+ {Panel_1600x1200, 0x0018, 0x0000, 6}, /* XGI_LVDS1600x1200Data_1 */
{PanelRef60Hz, 0x0008, 0x0008, 7}, /* XGI_LVDSNoScalingData */
- {Panel1024x768x75, 0x0018, 0x0000, 8}, /* XGI_LVDS1024x768Data_1x75 */
- {Panel1024x768x75, 0x0018, 0x0010, 9}, /* XGI_LVDS1024x768Data_2x75 */
- {Panel1280x1024x75, 0x0018, 0x0000, 10}, /* XGI_LVDS1280x1024Data_1x75*/
- {Panel1280x1024x75, 0x0018, 0x0010, 11}, /*XGI_LVDS1280x1024Data_2x75*/
+ {Panel_1024x768x75, 0x0018, 0x0000, 8}, /* XGI_LVDS1024x768Data_1x75 */
+ {Panel_1024x768x75, 0x0018, 0x0010, 9}, /* XGI_LVDS1024x768Data_2x75 */
+ {Panel_1280x1024x75, 0x0018, 0x0000, 10}, /* XGI_LVDS1280x1024Data_1x75*/
+ {Panel_1280x1024x75, 0x0018, 0x0010, 11}, /*XGI_LVDS1280x1024Data_2x75*/
{PanelRef75Hz, 0x0008, 0x0008, 12}, /* XGI_LVDSNoScalingDatax75 */
{0xFF, 0x0000, 0x0000, 0}
};
static struct XGI330_LCDDataTablStruct XGI_EPLLCDDesDataPtr[] = {
- {Panel1024x768, 0x0018, 0x0000, 0}, /* XGI_LVDS1024x768Des_1 */
- {Panel1024x768, 0x0618, 0x0410, 1}, /* XGI_LVDS1024x768Des_3 */
- {Panel1024x768, 0x0018, 0x0010, 2}, /* XGI_LVDS1024x768Des_2 */
- {Panel1280x1024, 0x0018, 0x0000, 3}, /* XGI_LVDS1280x1024Des_1 */
- {Panel1280x1024, 0x0018, 0x0010, 4}, /* XGI_LVDS1280x1024Des_2 */
- {Panel1400x1050, 0x0018, 0x0000, 5}, /* XGI_LVDS1400x1050Des_1 */
- {Panel1400x1050, 0x0018, 0x0010, 6}, /* XGI_LVDS1400x1050Des_2 */
- {Panel1600x1200, 0x0018, 0x0000, 7}, /* XGI_LVDS1600x1200Des_1 */
+ {Panel_1024x768, 0x0018, 0x0000, 0}, /* XGI_LVDS1024x768Des_1 */
+ {Panel_1024x768, 0x0618, 0x0410, 1}, /* XGI_LVDS1024x768Des_3 */
+ {Panel_1024x768, 0x0018, 0x0010, 2}, /* XGI_LVDS1024x768Des_2 */
+ {Panel_1280x1024, 0x0018, 0x0000, 3}, /* XGI_LVDS1280x1024Des_1 */
+ {Panel_1280x1024, 0x0018, 0x0010, 4}, /* XGI_LVDS1280x1024Des_2 */
+ {Panel_1400x1050, 0x0018, 0x0000, 5}, /* XGI_LVDS1400x1050Des_1 */
+ {Panel_1400x1050, 0x0018, 0x0010, 6}, /* XGI_LVDS1400x1050Des_2 */
+ {Panel_1600x1200, 0x0018, 0x0000, 7}, /* XGI_LVDS1600x1200Des_1 */
{PanelRef60Hz, 0x0008, 0x0008, 8}, /* XGI_LVDSNoScalingDesData */
- {Panel1024x768x75, 0x0018, 0x0000, 9}, /* XGI_LVDS1024x768Des_1x75 */
- {Panel1024x768x75, 0x0618, 0x0410, 10}, /* XGI_LVDS1024x768Des_3x75 */
- {Panel1024x768x75, 0x0018, 0x0010, 11}, /* XGI_LVDS1024x768Des_2x75 */
- {Panel1280x1024x75, 0x0018, 0x0000, 12}, /* XGI_LVDS1280x1024Des_1x75 */
- {Panel1280x1024x75, 0x0018, 0x0010, 13}, /* XGI_LVDS1280x1024Des_2x75 */
+ {Panel_1024x768x75, 0x0018, 0x0000, 9}, /* XGI_LVDS1024x768Des_1x75 */
+ {Panel_1024x768x75, 0x0618, 0x0410, 10}, /* XGI_LVDS1024x768Des_3x75 */
+ {Panel_1024x768x75, 0x0018, 0x0010, 11}, /* XGI_LVDS1024x768Des_2x75 */
+ {Panel_1280x1024x75, 0x0018, 0x0000, 12}, /* XGI_LVDS1280x1024Des_1x75 */
+ {Panel_1280x1024x75, 0x0018, 0x0010, 13}, /* XGI_LVDS1280x1024Des_2x75 */
{PanelRef75Hz, 0x0008, 0x0008, 14}, /* XGI_LVDSNoScalingDesDatax75 */
{0xFF, 0x0000, 0x0000, 0}
};
static struct XGI330_LCDDataTablStruct XGI_EPLCHLCDRegPtr[] = {
- {Panel1024x768, 0x0000, 0x0000, 0}, /* XGI_CH7017LV1024x768 */
- {Panel1400x1050, 0x0000, 0x0000, 1}, /* XGI_CH7017LV1400x1050 */
+ {Panel_1024x768, 0x0000, 0x0000, 0}, /* XGI_CH7017LV1024x768 */
+ {Panel_1400x1050, 0x0000, 0x0000, 1}, /* XGI_CH7017LV1400x1050 */
{0xFF, 0x0000, 0x0000, 0}
};
@@ -2501,225 +2501,225 @@ static unsigned short LCDLenList[] = {
/* Dual link only */
static struct XGI330_LCDCapStruct XGI_LCDDLCapList[] = {
/* LCDCap1024x768 */
- {Panel1024x768, DefaultLCDCap, 0, 0x012, 0x88, 0x06, VCLK65,
+ {Panel_1024x768, DefaultLCDCap, 0, 0x012, 0x88, 0x06, VCLK65_315,
0x6C, 0xC3, 0x35, 0x62, 0x02, 0x14, 0x0A, 0x02, 0x00,
0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x28, 0x10},
/* LCDCap1280x1024 */
- {Panel1280x1024, LCDDualLink+DefaultLCDCap, StLCDBToA,
- 0x012, 0x70, 0x03, VCLK108_2,
+ {Panel_1280x1024, XGI_LCDDualLink+DefaultLCDCap, StLCDBToA,
+ 0x012, 0x70, 0x03, VCLK108_2_315,
0x70, 0x44, 0xF8, 0x2F, 0x02, 0x14, 0x0A, 0x02, 0x00,
0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x30, 0x10},
/* LCDCap1400x1050 */
- {Panel1400x1050, LCDDualLink+DefaultLCDCap, StLCDBToA,
- 0x012, 0x70, 0x03, VCLK108_2,
+ {Panel_1400x1050, XGI_LCDDualLink+DefaultLCDCap, StLCDBToA,
+ 0x012, 0x70, 0x03, VCLK108_2_315,
0x70, 0x44, 0xF8, 0x2F, 0x02, 0x14, 0x0A, 0x02, 0x00,
0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x30, 0x10},
/* LCDCap1600x1200 */
- {Panel1600x1200, LCDDualLink+DefaultLCDCap, LCDToFull,
+ {Panel_1600x1200, XGI_LCDDualLink+DefaultLCDCap, LCDToFull,
0x012, 0xC0, 0x03, VCLK162,
0x43, 0x22, 0x70, 0x24, 0x02, 0x14, 0x0A, 0x02, 0x00,
0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x30, 0x10},
/* LCDCap1024x768x75 */
- {Panel1024x768x75, DefaultLCDCap, 0, 0x012, 0x60, 0, VCLK78_75,
+ {Panel_1024x768x75, DefaultLCDCap, 0, 0x012, 0x60, 0, VCLK78_75,
0x2B, 0x61, 0x2B, 0x61, 0x02, 0x14, 0x0A, 0x02, 0x00,
0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x28, 0x10},
/* LCDCap1280x1024x75 */
- {Panel1280x1024x75, LCDDualLink+DefaultLCDCap, StLCDBToA,
+ {Panel_1280x1024x75, XGI_LCDDualLink+DefaultLCDCap, StLCDBToA,
0x012, 0x90, 0x03, VCLK135_5,
0x54, 0x42, 0x4A, 0x61, 0x02, 0x14, 0x0A, 0x02, 0x00,
0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x30, 0x10},
/* LCDCapDefault */
- {0xFF, DefaultLCDCap, 0, 0x012, 0x88, 0x06, VCLK65,
+ {0xFF, DefaultLCDCap, 0, 0x012, 0x88, 0x06, VCLK65_315,
0x6C, 0xC3, 0x35, 0x62, 0x02, 0x14, 0x0A, 0x02, 0x00,
0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x28, 0x10}
};
static struct XGI330_LCDCapStruct XGI_LCDCapList[] = {
/* LCDCap1024x768 */
- {Panel1024x768, DefaultLCDCap, 0, 0x012, 0x88, 0x06, VCLK65,
+ {Panel_1024x768, DefaultLCDCap, 0, 0x012, 0x88, 0x06, VCLK65_315,
0x6C, 0xC3, 0x35, 0x62, 0x02, 0x14, 0x0A, 0x02, 0x00,
0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x28, 0x10},
/* LCDCap1280x1024 */
- {Panel1280x1024, DefaultLCDCap, StLCDBToA,
- 0x012, 0x70, 0x03, VCLK108_2,
+ {Panel_1280x1024, DefaultLCDCap, StLCDBToA,
+ 0x012, 0x70, 0x03, VCLK108_2_315,
0x70, 0x44, 0xF8, 0x2F, 0x02, 0x14, 0x0A, 0x02, 0x00,
0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x30, 0x10},
/* LCDCap1400x1050 */
- {Panel1400x1050, DefaultLCDCap, StLCDBToA,
- 0x012, 0x70, 0x03, VCLK108_2,
+ {Panel_1400x1050, DefaultLCDCap, StLCDBToA,
+ 0x012, 0x70, 0x03, VCLK108_2_315,
0x70, 0x44, 0xF8, 0x2F, 0x02, 0x14, 0x0A, 0x02, 0x00,
0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x30, 0x10},
/* LCDCap1600x1200 */
- {Panel1600x1200, DefaultLCDCap, LCDToFull,
+ {Panel_1600x1200, DefaultLCDCap, LCDToFull,
0x012, 0xC0, 0x03, VCLK162,
0x5A, 0x23, 0x5A, 0x23, 0x02, 0x14, 0x0A, 0x02, 0x00,
0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x30, 0x10},
/* LCDCap1024x768x75 */
- {Panel1024x768x75, DefaultLCDCap, 0, 0x012, 0x60, 0, VCLK78_75,
+ {Panel_1024x768x75, DefaultLCDCap, 0, 0x012, 0x60, 0, VCLK78_75,
0x2B, 0x61, 0x2B, 0x61, 0x02, 0x14, 0x0A, 0x02, 0x00,
0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x28, 0x10},
/* LCDCap1280x1024x75 */
- {Panel1280x1024x75, DefaultLCDCap, StLCDBToA,
+ {Panel_1280x1024x75, DefaultLCDCap, StLCDBToA,
0x012, 0x90, 0x03, VCLK135_5,
0x54, 0x42, 0x4A, 0x61, 0x02, 0x14, 0x0A, 0x02, 0x00,
0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x30, 0x10},
/* LCDCapDefault */
- {0xFF, DefaultLCDCap, 0, 0x012, 0x88, 0x06, VCLK65,
+ {0xFF, DefaultLCDCap, 0, 0x012, 0x88, 0x06, VCLK65_315,
0x6C, 0xC3, 0x35, 0x62, 0x02, 0x14, 0x0A, 0x02, 0x00,
0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x28, 0x10}
};
static struct XGI_Ext2Struct XGI330_RefIndex[] = {
- {Support32Bpp + SupportAllCRT2 + SyncPN, RES320x200, VCLK25_175,
+ {Mode32Bpp + SupportAllCRT2 + SyncPN, RES320x200, VCLK25_175,
0x00, 0x10, 0x59, 320, 200},/* 00 */
- {Support32Bpp + SupportAllCRT2 + SyncPN, RES320x200, VCLK25_175,
+ {Mode32Bpp + SupportAllCRT2 + SyncPN, RES320x200, VCLK25_175,
0x00, 0x10, 0x00, 320, 400},/* 01 */
- {Support32Bpp + SupportAllCRT2 + SyncNN, RES320x240, VCLK25_175,
+ {Mode32Bpp + SupportAllCRT2 + SyncNN, RES320x240, VCLK25_175,
0x04, 0x20, 0x50, 320, 240},/* 02 */
- {Support32Bpp + SupportAllCRT2 + SyncPP, RES400x300, VCLK40,
+ {Mode32Bpp + SupportAllCRT2 + SyncPP, RES400x300, VCLK40,
0x05, 0x32, 0x51, 400, 300},/* 03 */
- {Support32Bpp + NoSupportTV + SyncNN + SupportTV1024, RES512x384,
- VCLK65, 0x06, 0x43, 0x52, 512, 384},/* 04 */
- {Support32Bpp + SupportAllCRT2 + SyncPN, RES640x400, VCLK25_175,
+ {Mode32Bpp + NoSupportTV + SyncNN + SupportTV1024, RES512x384,
+ VCLK65_315, 0x06, 0x43, 0x52, 512, 384},/* 04 */
+ {Mode32Bpp + SupportAllCRT2 + SyncPN, RES640x400, VCLK25_175,
0x00, 0x14, 0x2f, 640, 400},/* 05 */
- {Support32Bpp + SupportAllCRT2 + SyncNN, RES640x480x60, VCLK25_175,
+ {Mode32Bpp + SupportAllCRT2 + SyncNN, RES640x480x60, VCLK25_175,
0x04, 0x24, 0x2e, 640, 480},/* 06 640x480x60Hz (LCD 640x480x60z) */
- {Support32Bpp + NoSupportHiVisionTV + SyncNN, RES640x480x72, VCLK31_5,
+ {Mode32Bpp + NoSupportHiVisionTV + SyncNN, RES640x480x72, VCLK31_5,
0x04, 0x24, 0x2e, 640, 480},/* 07 640x480x72Hz (LCD 640x480x70Hz) */
- {Support32Bpp + NoSupportHiVisionTV + SyncNN, RES640x480x75, VCLK31_5,
+ {Mode32Bpp + NoSupportHiVisionTV + SyncNN, RES640x480x75, VCLK31_5,
0x47, 0x24, 0x2e, 640, 480},/* 08 640x480x75Hz (LCD 640x480x75Hz) */
- {Support32Bpp + SupportRAMDAC2 + SyncNN, RES640x480x85, VCLK36,
+ {Mode32Bpp + SupportRAMDAC2 + SyncNN, RES640x480x85, VCLK36,
0x8A, 0x24, 0x2e, 640, 480},/* 09 640x480x85Hz */
- {Support32Bpp + SupportRAMDAC2 + SyncPN, RES640x480x100, VCLK43_163,
+ {Mode32Bpp + SupportRAMDAC2 + SyncPN, RES640x480x100, VCLK43_163,
0x00, 0x24, 0x2e, 640, 480},/* 0a 640x480x100Hz */
- {Support32Bpp + SupportRAMDAC2 + SyncPN, RES640x480x120, VCLK52_406,
+ {Mode32Bpp + SupportRAMDAC2 + SyncPN, RES640x480x120, VCLK52_406,
0x00, 0x24, 0x2e, 640, 480},/* 0b 640x480x120Hz */
- {Support32Bpp + SupportRAMDAC2 + SyncPN, RES640x480x160, VCLK72_852,
+ {Mode32Bpp + SupportRAMDAC2 + SyncPN, RES640x480x160, VCLK72_852,
0x00, 0x24, 0x2e, 640, 480},/* 0c 640x480x160Hz */
- {Support32Bpp + SupportRAMDAC2 + SyncNN, RES640x480x200, VCLK86_6,
+ {Mode32Bpp + SupportRAMDAC2 + SyncNN, RES640x480x200, VCLK86_6,
0x00, 0x24, 0x2e, 640, 480},/* 0d 640x480x200Hz */
- {Support32Bpp + NoSupportLCD + SyncPP, RES800x600x56, VCLK36,
+ {Mode32Bpp + NoSupportLCD + SyncPP, RES800x600x56, VCLK36,
0x05, 0x36, 0x6a, 800, 600},/* 0e 800x600x56Hz */
- {Support32Bpp + NoSupportTV + SyncPP, RES800x600x60, VCLK40,
+ {Mode32Bpp + NoSupportTV + SyncPP, RES800x600x60, VCLK40,
0x05, 0x36, 0x6a, 800, 600},/* 0f 800x600x60Hz (LCD 800x600x60Hz) */
- {Support32Bpp + NoSupportHiVisionTV + SyncPP, RES800x600x72, VCLK50,
+ {Mode32Bpp + NoSupportHiVisionTV + SyncPP, RES800x600x72, VCLK50,
0x48, 0x36, 0x6a, 800, 600},/* 10 800x600x72Hz (LCD 800x600x70Hz) */
- {Support32Bpp + NoSupportHiVisionTV + SyncPP, RES800x600x75, VCLK49_5,
+ {Mode32Bpp + NoSupportHiVisionTV + SyncPP, RES800x600x75, VCLK49_5,
0x8B, 0x36, 0x6a, 800, 600},/* 11 800x600x75Hz (LCD 800x600x75Hz) */
- {Support32Bpp + SupportRAMDAC2 + SyncPP, RES800x600x85, VCLK56_25,
+ {Mode32Bpp + SupportRAMDAC2 + SyncPP, RES800x600x85, VCLK56_25,
0x00, 0x36, 0x6a, 800, 600},/* 12 800x600x85Hz */
- {Support32Bpp + SupportRAMDAC2 + SyncPN, RES800x600x100, VCLK68_179,
+ {Mode32Bpp + SupportRAMDAC2 + SyncPN, RES800x600x100, VCLK68_179,
0x00, 0x36, 0x6a, 800, 600},/* 13 800x600x100Hz */
- {Support32Bpp + SupportRAMDAC2 + SyncPN, RES800x600x120, VCLK83_95,
+ {Mode32Bpp + SupportRAMDAC2 + SyncPN, RES800x600x120, VCLK83_95,
0x00, 0x36, 0x6a, 800, 600},/* 14 800x600x120Hz */
- {Support32Bpp + SupportRAMDAC2 + SyncPN, RES800x600x160, VCLK116_406,
+ {Mode32Bpp + SupportRAMDAC2 + SyncPN, RES800x600x160, VCLK116_406,
0x00, 0x36, 0x6a, 800, 600},/* 15 800x600x160Hz */
- {Support32Bpp + InterlaceMode + SyncPP, RES1024x768x43, VCLK44_9,
+ {Mode32Bpp + InterlaceMode + SyncPP, RES1024x768x43, VCLK44_9,
0x00, 0x47, 0x37, 1024, 768},/* 16 1024x768x43Hz */
/* 17 1024x768x60Hz (LCD 1024x768x60Hz) */
- {Support32Bpp + NoSupportTV + SyncNN + SupportTV1024, RES1024x768x60,
- VCLK65, 0x06, 0x47, 0x37, 1024, 768},
- {Support32Bpp + NoSupportHiVisionTV + SyncNN, RES1024x768x70, VCLK75,
+ {Mode32Bpp + NoSupportTV + SyncNN + SupportTV1024, RES1024x768x60,
+ VCLK65_315, 0x06, 0x47, 0x37, 1024, 768},
+ {Mode32Bpp + NoSupportHiVisionTV + SyncNN, RES1024x768x70, VCLK75,
0x49, 0x47, 0x37, 1024, 768},/* 18 1024x768x70Hz (LCD 1024x768x70Hz) */
- {Support32Bpp + NoSupportHiVisionTV + SyncPP, RES1024x768x75, VCLK78_75,
+ {Mode32Bpp + NoSupportHiVisionTV + SyncPP, RES1024x768x75, VCLK78_75,
0x00, 0x47, 0x37, 1024, 768},/* 19 1024x768x75Hz (LCD 1024x768x75Hz) */
- {Support32Bpp + SupportRAMDAC2 + SyncPP, RES1024x768x85, VCLK94_5,
+ {Mode32Bpp + SupportRAMDAC2 + SyncPP, RES1024x768x85, VCLK94_5,
0x8C, 0x47, 0x37, 1024, 768},/* 1a 1024x768x85Hz */
- {Support32Bpp + SupportRAMDAC2 + SyncPN, RES1024x768x100, VCLK113_309,
+ {Mode32Bpp + SupportRAMDAC2 + SyncPN, RES1024x768x100, VCLK113_309,
0x00, 0x47, 0x37, 1024, 768},/* 1b 1024x768x100Hz */
- {Support32Bpp + SupportRAMDAC2 + SyncPN, RES1024x768x120, VCLK139_054,
+ {Mode32Bpp + SupportRAMDAC2 + SyncPN, RES1024x768x120, VCLK139_054,
0x00, 0x47, 0x37, 1024, 768},/* 1c 1024x768x120Hz */
- {Support32Bpp + SupportLCD + SyncPP, RES1280x960x60, VCLK108_2,
+ {Mode32Bpp + SupportLCD + SyncPP, RES1280x960x60, VCLK108_2_315,
0x08, 0x58, 0x7b, 1280, 960},/* 1d 1280x960x60Hz */
- {Support32Bpp + InterlaceMode + SyncPP, RES1280x1024x43, VCLK78_75,
+ {Mode32Bpp + InterlaceMode + SyncPP, RES1280x1024x43, VCLK78_75,
0x00, 0x58, 0x3a, 1280, 1024},/* 1e 1280x1024x43Hz */
- {Support32Bpp + NoSupportTV + SyncPP, RES1280x1024x60, VCLK108_2,
+ {Mode32Bpp + NoSupportTV + SyncPP, RES1280x1024x60, VCLK108_2_315,
0x07, 0x58, 0x3a, 1280, 1024},/*1f 1280x1024x60Hz (LCD 1280x1024x60Hz)*/
- {Support32Bpp + NoSupportTV + SyncPP, RES1280x1024x75, VCLK135_5,
+ {Mode32Bpp + NoSupportTV + SyncPP, RES1280x1024x75, VCLK135_5,
0x00, 0x58, 0x3a, 1280, 1024},/*20 1280x1024x75Hz (LCD 1280x1024x75Hz)*/
- {Support32Bpp + SyncPP, RES1280x1024x85, VCLK157_5,
+ {Mode32Bpp + SyncPP, RES1280x1024x85, VCLK157_5,
0x00, 0x58, 0x3a, 1280, 1024},/* 21 1280x1024x85Hz */
/* 22 1600x1200x60Hz */
- {Support32Bpp + SupportLCD + SyncPP + SupportCRT2in301C,
+ {Mode32Bpp + SupportLCD + SyncPP + SupportCRT2in301C,
RES1600x1200x60, VCLK162, 0x09, 0x7A, 0x3c, 1600, 1200},
- {Support32Bpp + SyncPP + SupportCRT2in301C, RES1600x1200x65, VCLK175,
+ {Mode32Bpp + SyncPP + SupportCRT2in301C, RES1600x1200x65, VCLK175,
0x00, 0x69, 0x3c, 1600, 1200},/* 23 1600x1200x65Hz */
- {Support32Bpp + SyncPP + SupportCRT2in301C, RES1600x1200x70, VCLK189,
+ {Mode32Bpp + SyncPP + SupportCRT2in301C, RES1600x1200x70, VCLK189,
0x00, 0x69, 0x3c, 1600, 1200},/* 24 1600x1200x70Hz */
- {Support32Bpp + SyncPP + SupportCRT2in301C, RES1600x1200x75, VCLK202_5,
+ {Mode32Bpp + SyncPP + SupportCRT2in301C, RES1600x1200x75, VCLK202_5,
0x00, 0x69, 0x3c, 1600, 1200},/* 25 1600x1200x75Hz */
- {Support32Bpp + SyncPP, RES1600x1200x85, VCLK229_5,
+ {Mode32Bpp + SyncPP, RES1600x1200x85, VCLK229_5,
0x00, 0x69, 0x3c, 1600, 1200},/* 26 1600x1200x85Hz */
- {Support32Bpp + SyncPP, RES1600x1200x100, VCLK269_655,
+ {Mode32Bpp + SyncPP, RES1600x1200x100, VCLK269_655,
0x00, 0x69, 0x3c, 1600, 1200},/* 27 1600x1200x100Hz */
- {Support32Bpp + SyncPP, RES1600x1200x120, VCLK323_586,
+ {Mode32Bpp + SyncPP, RES1600x1200x120, VCLK323_586,
0x00, 0x69, 0x3c, 1600, 1200},/* 28 1600x1200x120Hz */
- {Support32Bpp + SupportLCD + SyncNP, RES1920x1440x60, VCLK234,
+ {Mode32Bpp + SupportLCD + SyncNP, RES1920x1440x60, VCLK234,
0x00, 0x00, 0x68, 1920, 1440},/* 29 1920x1440x60Hz */
- {Support32Bpp + SyncPN, RES1920x1440x65, VCLK254_817,
+ {Mode32Bpp + SyncPN, RES1920x1440x65, VCLK254_817,
0x00, 0x00, 0x68, 1920, 1440},/* 2a 1920x1440x65Hz */
- {Support32Bpp + SyncPN, RES1920x1440x70, VCLK277_015,
+ {Mode32Bpp + SyncPN, RES1920x1440x70, VCLK277_015,
0x00, 0x00, 0x68, 1920, 1440},/* 2b 1920x1440x70Hz */
- {Support32Bpp + SyncPN, RES1920x1440x75, VCLK291_132,
+ {Mode32Bpp + SyncPN, RES1920x1440x75, VCLK291_132,
0x00, 0x00, 0x68, 1920, 1440},/* 2c 1920x1440x75Hz */
- {Support32Bpp + SyncPN, RES1920x1440x85, VCLK330_615,
+ {Mode32Bpp + SyncPN, RES1920x1440x85, VCLK330_615,
0x00, 0x00, 0x68, 1920, 1440},/* 2d 1920x1440x85Hz */
- {Support16Bpp + SyncPN, RES1920x1440x100, VCLK388_631,
+ {Mode16Bpp + SyncPN, RES1920x1440x100, VCLK388_631,
0x00, 0x00, 0x68, 1920, 1440},/* 2e 1920x1440x100Hz */
- {Support32Bpp + SupportLCD + SyncPN, RES2048x1536x60, VCLK266_952,
+ {Mode32Bpp + SupportLCD + SyncPN, RES2048x1536x60, VCLK266_952,
0x00, 0x00, 0x6c, 2048, 1536},/* 2f 2048x1536x60Hz */
- {Support32Bpp + SyncPN, RES2048x1536x65, VCLK291_766,
+ {Mode32Bpp + SyncPN, RES2048x1536x65, VCLK291_766,
0x00, 0x00, 0x6c, 2048, 1536},/* 30 2048x1536x65Hz */
- {Support32Bpp + SyncPN, RES2048x1536x70, VCLK315_195,
+ {Mode32Bpp + SyncPN, RES2048x1536x70, VCLK315_195,
0x00, 0x00, 0x6c, 2048, 1536},/* 31 2048x1536x70Hz */
- {Support32Bpp + SyncPN, RES2048x1536x75, VCLK340_477,
+ {Mode32Bpp + SyncPN, RES2048x1536x75, VCLK340_477,
0x00, 0x00, 0x6c, 2048, 1536},/* 32 2048x1536x75Hz */
- {Support16Bpp + SyncPN, RES2048x1536x85, VCLK375_847,
+ {Mode16Bpp + SyncPN, RES2048x1536x85, VCLK375_847,
0x00, 0x00, 0x6c, 2048, 1536},/* 33 2048x1536x85Hz */
- {Support32Bpp + SupportHiVisionTV + SupportRAMDAC2 +
- SyncPP + SupportYPbPr, RES800x480x60, VCLK39_77,
+ {Mode32Bpp + SupportHiVision + SupportRAMDAC2 +
+ SyncPP + SupportYPbPr750p, RES800x480x60, VCLK39_77,
0x08, 0x00, 0x70, 800, 480},/* 34 800x480x60Hz */
- {Support32Bpp + SupportRAMDAC2 + SyncPP, RES800x480x75, VCLK49_5,
+ {Mode32Bpp + SupportRAMDAC2 + SyncPP, RES800x480x75, VCLK49_5,
0x08, 0x00, 0x70, 800, 480},/* 35 800x480x75Hz */
- {Support32Bpp + SupportRAMDAC2 + SyncPP, RES800x480x85, VCLK56_25,
+ {Mode32Bpp + SupportRAMDAC2 + SyncPP, RES800x480x85, VCLK56_25,
0x08, 0x00, 0x70, 800, 480},/* 36 800x480x85Hz */
- {Support32Bpp + SupportHiVisionTV + SupportRAMDAC2 +
- SyncPP + SupportYPbPr, RES1024x576x60, VCLK65,
+ {Mode32Bpp + SupportHiVision + SupportRAMDAC2 +
+ SyncPP + SupportYPbPr750p, RES1024x576x60, VCLK65_315,
0x09, 0x00, 0x71, 1024, 576},/* 37 1024x576x60Hz */
- {Support32Bpp + SupportRAMDAC2 + SyncPP, RES1024x576x75, VCLK78_75,
+ {Mode32Bpp + SupportRAMDAC2 + SyncPP, RES1024x576x75, VCLK78_75,
0x09, 0x00, 0x71, 1024, 576},/* 38 1024x576x75Hz */
- {Support32Bpp + SupportRAMDAC2 + SyncPP, RES1024x576x85, VCLK94_5,
+ {Mode32Bpp + SupportRAMDAC2 + SyncPP, RES1024x576x85, VCLK94_5,
0x09, 0x00, 0x71, 1024, 576},/* 39 1024x576x85Hz */
- {Support32Bpp + SupportHiVisionTV + SupportRAMDAC2 +
- SyncPP + SupportYPbPr, RES1280x720x60, VCLK108_2,
+ {Mode32Bpp + SupportHiVision + SupportRAMDAC2 +
+ SyncPP + SupportYPbPr750p, RES1280x720x60, VCLK108_2_315,
0x0A, 0x00, 0x75, 1280, 720},/* 3a 1280x720x60Hz*/
- {Support32Bpp + SupportRAMDAC2 + SyncPP, RES1280x720x75, VCLK135_5,
+ {Mode32Bpp + SupportRAMDAC2 + SyncPP, RES1280x720x75, VCLK135_5,
0x0A, 0x00, 0x75, 1280, 720},/* 3b 1280x720x75Hz */
- {Support32Bpp + SupportRAMDAC2 + SyncPP, RES1280x720x85, VCLK157_5,
+ {Mode32Bpp + SupportRAMDAC2 + SyncPP, RES1280x720x85, VCLK157_5,
0x0A, 0x00, 0x75, 1280, 720},/* 3c 1280x720x85Hz */
- {Support32Bpp + SupportTV + SyncNN, RES720x480x60, VCLK28_322,
+ {Mode32Bpp + SupportTV + SyncNN, RES720x480x60, VCLK28_322,
0x06, 0x00, 0x31, 720, 480},/* 3d 720x480x60Hz */
- {Support32Bpp + SupportTV + SyncPP, RES720x576x56, VCLK36,
+ {Mode32Bpp + SupportTV + SyncPP, RES720x576x56, VCLK36,
0x06, 0x00, 0x32, 720, 576},/* 3e 720x576x56Hz */
- {Support32Bpp + InterlaceMode + NoSupportLCD + SyncPP, RES856x480x79I,
+ {Mode32Bpp + InterlaceMode + NoSupportLCD + SyncPP, RES856x480x79I,
VCLK35_2, 0x00, 0x00, 0x00, 856, 480},/* 3f 856x480x79I */
- {Support32Bpp + NoSupportLCD + SyncNN, RES856x480x60, VCLK35_2,
+ {Mode32Bpp + NoSupportLCD + SyncNN, RES856x480x60, VCLK35_2,
0x00, 0x00, 0x00, 856, 480},/* 40 856x480x60Hz */
- {Support32Bpp + NoSupportHiVisionTV + SyncPP, RES1280x768x60,
+ {Mode32Bpp + NoSupportHiVisionTV + SyncPP, RES1280x768x60,
VCLK79_411, 0x08, 0x48, 0x23, 1280, 768},/* 41 1280x768x60Hz */
- {Support32Bpp + NoSupportHiVisionTV + SyncPP, RES1400x1050x60,
+ {Mode32Bpp + NoSupportHiVisionTV + SyncPP, RES1400x1050x60,
VCLK122_61, 0x08, 0x69, 0x26, 1400, 1050},/* 42 1400x1050x60Hz */
- {Support32Bpp + SupportRAMDAC2 + SyncPP, RES1152x864x60, VCLK80_350,
+ {Mode32Bpp + SupportRAMDAC2 + SyncPP, RES1152x864x60, VCLK80_350,
0x37, 0x00, 0x20, 1152, 864},/* 43 1152x864x60Hz */
- {Support32Bpp + SupportRAMDAC2 + SyncPP, RES1152x864x75, VCLK107_385,
+ {Mode32Bpp + SupportRAMDAC2 + SyncPP, RES1152x864x75, VCLK107_385,
0x37, 0x00, 0x20, 1152, 864},/* 44 1152x864x75Hz */
- {Support32Bpp + SupportLCD + SupportRAMDAC2 + SyncPP, RES1280x960x75,
+ {Mode32Bpp + SupportLCD + SupportRAMDAC2 + SyncPP, RES1280x960x75,
VCLK125_999, 0x3A, 0x88, 0x7b, 1280, 960},/* 45 1280x960x75Hz */
- {Support32Bpp + SupportLCD + SupportRAMDAC2 + SyncPP, RES1280x960x85,
+ {Mode32Bpp + SupportLCD + SupportRAMDAC2 + SyncPP, RES1280x960x85,
VCLK148_5, 0x0A, 0x88, 0x7b, 1280, 960},/* 46 1280x960x85Hz */
- {Support32Bpp + SupportLCD + SupportRAMDAC2 + SyncPP, RES1280x960x120,
+ {Mode32Bpp + SupportLCD + SupportRAMDAC2 + SyncPP, RES1280x960x120,
VCLK217_325, 0x3A, 0x88, 0x7b, 1280, 960},/* 47 1280x960x120Hz */
- {Support32Bpp + SupportRAMDAC2 + SyncPN, RES1024x768x160, VCLK139_054,
+ {Mode32Bpp + SupportRAMDAC2 + SyncPN, RES1024x768x160, VCLK139_054,
0x30, 0x47, 0x37, 1024, 768},/* 48 1024x768x160Hz */
};
@@ -2729,7 +2729,7 @@ static unsigned char XGI330_ScreenOffset[] = {
0x57, 0x48
};
-static struct XGI_StResInfoStruct XGI330_StResInfo[] = {
+static struct SiS_StResInfo_S XGI330_StResInfo[] = {
{640, 400},
{640, 350},
{720, 400},
@@ -2737,7 +2737,7 @@ static struct XGI_StResInfoStruct XGI330_StResInfo[] = {
{640, 480}
};
-static struct XGI_ModeResInfoStruct XGI330_ModeResInfo[] = {
+static struct SiS_ModeResInfo_S XGI330_ModeResInfo[] = {
{ 320, 200, 8, 8},
{ 320, 240, 8, 8},
{ 320, 400, 8, 8},
diff --git a/drivers/staging/xgifb/vgatypes.h b/drivers/staging/xgifb/vgatypes.h
index 9e166bbb00c4..a7208e315815 100644
--- a/drivers/staging/xgifb/vgatypes.h
+++ b/drivers/staging/xgifb/vgatypes.h
@@ -2,6 +2,9 @@
#define _VGATYPES_
#include <linux/ioctl.h>
+#include <linux/fb.h> /* for struct fb_var_screeninfo for sis.h */
+#include "../../video/sis/vgatypes.h"
+#include "../../video/sis/sis.h" /* for LCD_TYPE */
#ifndef XGI_VB_CHIP_TYPE
enum XGI_VB_CHIP_TYPE {
@@ -19,6 +22,12 @@ enum XGI_VB_CHIP_TYPE {
};
#endif
+
+#define XGI_LCD_TYPE
+/* Since the merge with video/sis the LCD_TYPEs are used from
+ drivers/video/sis/sis.h . Nevertheless we keep this (for the moment) for
+ future reference until the code is merged completely and we are sure
+ nothing of this should be added to the sis.h header */
#ifndef XGI_LCD_TYPE
enum XGI_LCD_TYPE {
LCD_INVALID = 0,
diff --git a/drivers/staging/zcache/Kconfig b/drivers/staging/zcache/Kconfig
index 7fabcb2bc80d..3ed2c8f656a5 100644
--- a/drivers/staging/zcache/Kconfig
+++ b/drivers/staging/zcache/Kconfig
@@ -1,13 +1,14 @@
config ZCACHE
- tristate "Dynamic compression of swap pages and clean pagecache pages"
- depends on CLEANCACHE || FRONTSWAP
- select XVMALLOC
- select LZO_COMPRESS
- select LZO_DECOMPRESS
+ bool "Dynamic compression of swap pages and clean pagecache pages"
+ # X86 dependency is because zsmalloc uses non-portable pte/tlb
+ # functions
+ depends on (CLEANCACHE || FRONTSWAP) && CRYPTO && X86
+ select ZSMALLOC
+ select CRYPTO_LZO
default n
help
Zcache doubles RAM efficiency while providing a significant
- performance boosts on many workloads. Zcache uses lzo1x
+ performance boosts on many workloads. Zcache uses
compression and an in-kernel implementation of transcendent
memory to store clean page cache pages and swap in RAM,
providing a noticeable reduction in disk I/O.
diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
index ed147c4b110d..0d4aa82706b3 100644
--- a/drivers/staging/zcache/tmem.h
+++ b/drivers/staging/zcache/tmem.h
@@ -47,7 +47,7 @@
#define ASSERT_INVERTED_SENTINEL(_x, _y) do { } while (0)
#endif
-#define ASSERT_SPINLOCK(_l) WARN_ON(!spin_is_locked(_l))
+#define ASSERT_SPINLOCK(_l) lockdep_assert_held(_l)
/*
* A pool is the highest-level data structure managed by tmem and
diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c
index ef7c52bb1df9..2734dacacbaf 100644
--- a/drivers/staging/zcache/zcache-main.c
+++ b/drivers/staging/zcache/zcache-main.c
@@ -6,9 +6,10 @@
*
* Zcache provides an in-kernel "host implementation" for transcendent memory
* and, thus indirectly, for cleancache and frontswap. Zcache includes two
- * page-accessible memory [1] interfaces, both utilizing lzo1x compression:
+ * page-accessible memory [1] interfaces, both utilizing the crypto compression
+ * API:
* 1) "compression buddies" ("zbud") is used for ephemeral pages
- * 2) xvmalloc is used for persistent pages.
+ * 2) zsmalloc is used for persistent pages.
* Xvmalloc (based on the TLSF allocator) has very low fragmentation
* so maximizes space efficiency, while zbud allows pairs (and potentially,
* in the future, more than a pair of) compressed pages to be closely linked
@@ -23,15 +24,16 @@
#include <linux/cpu.h>
#include <linux/highmem.h>
#include <linux/list.h>
-#include <linux/lzo.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/atomic.h>
#include <linux/math64.h>
+#include <linux/crypto.h>
+#include <linux/string.h>
#include "tmem.h"
-#include "../zram/xvmalloc.h" /* if built in drivers/staging */
+#include "../zsmalloc/zsmalloc.h"
#if (!defined(CONFIG_CLEANCACHE) && !defined(CONFIG_FRONTSWAP))
#error "zcache is useless without CONFIG_CLEANCACHE or CONFIG_FRONTSWAP"
@@ -60,7 +62,7 @@ MODULE_LICENSE("GPL");
struct zcache_client {
struct tmem_pool *tmem_pools[MAX_POOLS_PER_CLIENT];
- struct xv_pool *xvpool;
+ struct zs_pool *zspool;
bool allocated;
atomic_t refcount;
};
@@ -81,6 +83,38 @@ static inline bool is_local_client(struct zcache_client *cli)
return cli == &zcache_host;
}
+/* crypto API for zcache */
+#define ZCACHE_COMP_NAME_SZ CRYPTO_MAX_ALG_NAME
+static char zcache_comp_name[ZCACHE_COMP_NAME_SZ];
+static struct crypto_comp * __percpu *zcache_comp_pcpu_tfms;
+
+enum comp_op {
+ ZCACHE_COMPOP_COMPRESS,
+ ZCACHE_COMPOP_DECOMPRESS
+};
+
+static inline int zcache_comp_op(enum comp_op op,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen)
+{
+ struct crypto_comp *tfm;
+ int ret;
+
+ BUG_ON(!zcache_comp_pcpu_tfms);
+ tfm = *per_cpu_ptr(zcache_comp_pcpu_tfms, get_cpu());
+ BUG_ON(!tfm);
+ switch (op) {
+ case ZCACHE_COMPOP_COMPRESS:
+ ret = crypto_comp_compress(tfm, src, slen, dst, dlen);
+ break;
+ case ZCACHE_COMPOP_DECOMPRESS:
+ ret = crypto_comp_decompress(tfm, src, slen, dst, dlen);
+ break;
+ }
+ put_cpu();
+ return ret;
+}
+
/**********
* Compression buddies ("zbud") provides for packing two (or, possibly
* in the future, more) compressed ephemeral pages into a single "raw"
@@ -299,10 +333,12 @@ static void zbud_free_and_delist(struct zbud_hdr *zh)
struct zbud_page *zbpg =
container_of(zh, struct zbud_page, buddy[budnum]);
+ spin_lock(&zbud_budlists_spinlock);
spin_lock(&zbpg->lock);
if (list_empty(&zbpg->bud_list)) {
/* ignore zombie page... see zbud_evict_pages() */
spin_unlock(&zbpg->lock);
+ spin_unlock(&zbud_budlists_spinlock);
return;
}
size = zbud_free(zh);
@@ -310,7 +346,6 @@ static void zbud_free_and_delist(struct zbud_hdr *zh)
zh_other = &zbpg->buddy[(budnum == 0) ? 1 : 0];
if (zh_other->size == 0) { /* was unbuddied: unlist and free */
chunks = zbud_size_to_chunks(size) ;
- spin_lock(&zbud_budlists_spinlock);
BUG_ON(list_empty(&zbud_unbuddied[chunks].list));
list_del_init(&zbpg->bud_list);
zbud_unbuddied[chunks].count--;
@@ -318,7 +353,6 @@ static void zbud_free_and_delist(struct zbud_hdr *zh)
zbud_free_raw_page(zbpg);
} else { /* was buddied: move remaining buddy to unbuddied list */
chunks = zbud_size_to_chunks(zh_other->size) ;
- spin_lock(&zbud_budlists_spinlock);
list_del_init(&zbpg->bud_list);
zcache_zbud_buddied_count--;
list_add_tail(&zbpg->bud_list, &zbud_unbuddied[chunks].list);
@@ -407,7 +441,7 @@ static int zbud_decompress(struct page *page, struct zbud_hdr *zh)
{
struct zbud_page *zbpg;
unsigned budnum = zbud_budnum(zh);
- size_t out_len = PAGE_SIZE;
+ unsigned int out_len = PAGE_SIZE;
char *to_va, *from_va;
unsigned size;
int ret = 0;
@@ -421,13 +455,14 @@ static int zbud_decompress(struct page *page, struct zbud_hdr *zh)
}
ASSERT_SENTINEL(zh, ZBH);
BUG_ON(zh->size == 0 || zh->size > zbud_max_buddy_size());
- to_va = kmap_atomic(page, KM_USER0);
+ to_va = kmap_atomic(page);
size = zh->size;
from_va = zbud_data(zh, size);
- ret = lzo1x_decompress_safe(from_va, size, to_va, &out_len);
- BUG_ON(ret != LZO_E_OK);
+ ret = zcache_comp_op(ZCACHE_COMPOP_DECOMPRESS, from_va, size,
+ to_va, &out_len);
+ BUG_ON(ret);
BUG_ON(out_len != PAGE_SIZE);
- kunmap_atomic(to_va, KM_USER0);
+ kunmap_atomic(to_va);
out:
spin_unlock(&zbpg->lock);
return ret;
@@ -622,8 +657,8 @@ static int zbud_show_cumul_chunk_counts(char *buf)
#endif
/**********
- * This "zv" PAM implementation combines the TLSF-based xvMalloc
- * with lzo1x compression to maximize the amount of data that can
+ * This "zv" PAM implementation combines the slab-based zsmalloc
+ * with the crypto compression API to maximize the amount of data that can
* be packed into a physical page.
*
* Zv represents a PAM page with the index and object (plus a "size" value
@@ -636,6 +671,7 @@ struct zv_hdr {
uint32_t pool_id;
struct tmem_oid oid;
uint32_t index;
+ size_t size;
DECL_SENTINEL
};
@@ -657,72 +693,72 @@ static unsigned int zv_max_mean_zsize = (PAGE_SIZE / 8) * 5;
static atomic_t zv_curr_dist_counts[NCHUNKS];
static atomic_t zv_cumul_dist_counts[NCHUNKS];
-static struct zv_hdr *zv_create(struct xv_pool *xvpool, uint32_t pool_id,
+static struct zv_hdr *zv_create(struct zs_pool *pool, uint32_t pool_id,
struct tmem_oid *oid, uint32_t index,
void *cdata, unsigned clen)
{
- struct page *page;
- struct zv_hdr *zv = NULL;
- uint32_t offset;
- int alloc_size = clen + sizeof(struct zv_hdr);
- int chunks = (alloc_size + (CHUNK_SIZE - 1)) >> CHUNK_SHIFT;
- int ret;
+ struct zv_hdr *zv;
+ u32 size = clen + sizeof(struct zv_hdr);
+ int chunks = (size + (CHUNK_SIZE - 1)) >> CHUNK_SHIFT;
+ void *handle = NULL;
BUG_ON(!irqs_disabled());
BUG_ON(chunks >= NCHUNKS);
- ret = xv_malloc(xvpool, alloc_size,
- &page, &offset, ZCACHE_GFP_MASK);
- if (unlikely(ret))
+ handle = zs_malloc(pool, size);
+ if (!handle)
goto out;
atomic_inc(&zv_curr_dist_counts[chunks]);
atomic_inc(&zv_cumul_dist_counts[chunks]);
- zv = kmap_atomic(page, KM_USER0) + offset;
+ zv = zs_map_object(pool, handle);
zv->index = index;
zv->oid = *oid;
zv->pool_id = pool_id;
+ zv->size = clen;
SET_SENTINEL(zv, ZVH);
memcpy((char *)zv + sizeof(struct zv_hdr), cdata, clen);
- kunmap_atomic(zv, KM_USER0);
+ zs_unmap_object(pool, handle);
out:
- return zv;
+ return handle;
}
-static void zv_free(struct xv_pool *xvpool, struct zv_hdr *zv)
+static void zv_free(struct zs_pool *pool, void *handle)
{
unsigned long flags;
- struct page *page;
- uint32_t offset;
- uint16_t size = xv_get_object_size(zv);
- int chunks = (size + (CHUNK_SIZE - 1)) >> CHUNK_SHIFT;
+ struct zv_hdr *zv;
+ uint16_t size;
+ int chunks;
+ zv = zs_map_object(pool, handle);
ASSERT_SENTINEL(zv, ZVH);
+ size = zv->size + sizeof(struct zv_hdr);
+ INVERT_SENTINEL(zv, ZVH);
+ zs_unmap_object(pool, handle);
+
+ chunks = (size + (CHUNK_SIZE - 1)) >> CHUNK_SHIFT;
BUG_ON(chunks >= NCHUNKS);
atomic_dec(&zv_curr_dist_counts[chunks]);
- size -= sizeof(*zv);
- BUG_ON(size == 0);
- INVERT_SENTINEL(zv, ZVH);
- page = virt_to_page(zv);
- offset = (unsigned long)zv & ~PAGE_MASK;
+
local_irq_save(flags);
- xv_free(xvpool, page, offset);
+ zs_free(pool, handle);
local_irq_restore(flags);
}
-static void zv_decompress(struct page *page, struct zv_hdr *zv)
+static void zv_decompress(struct page *page, void *handle)
{
- size_t clen = PAGE_SIZE;
+ unsigned int clen = PAGE_SIZE;
char *to_va;
- unsigned size;
int ret;
+ struct zv_hdr *zv;
+ zv = zs_map_object(zcache_host.zspool, handle);
+ BUG_ON(zv->size == 0);
ASSERT_SENTINEL(zv, ZVH);
- size = xv_get_object_size(zv) - sizeof(*zv);
- BUG_ON(size == 0);
- to_va = kmap_atomic(page, KM_USER0);
- ret = lzo1x_decompress_safe((char *)zv + sizeof(*zv),
- size, to_va, &clen);
- kunmap_atomic(to_va, KM_USER0);
- BUG_ON(ret != LZO_E_OK);
+ to_va = kmap_atomic(page);
+ ret = zcache_comp_op(ZCACHE_COMPOP_DECOMPRESS, (char *)zv + sizeof(*zv),
+ zv->size, to_va, &clen);
+ kunmap_atomic(to_va);
+ zs_unmap_object(zcache_host.zspool, handle);
+ BUG_ON(ret);
BUG_ON(clen != PAGE_SIZE);
}
@@ -948,8 +984,8 @@ int zcache_new_client(uint16_t cli_id)
goto out;
cli->allocated = 1;
#ifdef CONFIG_FRONTSWAP
- cli->xvpool = xv_create_pool();
- if (cli->xvpool == NULL)
+ cli->zspool = zs_create_pool("zcache", ZCACHE_GFP_MASK);
+ if (cli->zspool == NULL)
goto out;
#endif
ret = 0;
@@ -1132,14 +1168,14 @@ static atomic_t zcache_curr_pers_pampd_count = ATOMIC_INIT(0);
static unsigned long zcache_curr_pers_pampd_count_max;
/* forward reference */
-static int zcache_compress(struct page *from, void **out_va, size_t *out_len);
+static int zcache_compress(struct page *from, void **out_va, unsigned *out_len);
static void *zcache_pampd_create(char *data, size_t size, bool raw, int eph,
struct tmem_pool *pool, struct tmem_oid *oid,
uint32_t index)
{
void *pampd = NULL, *cdata;
- size_t clen;
+ unsigned clen;
int ret;
unsigned long count;
struct page *page = (struct page *)(data);
@@ -1180,7 +1216,7 @@ static void *zcache_pampd_create(char *data, size_t size, bool raw, int eph,
}
/* reject if mean compression is too poor */
if ((clen > zv_max_mean_zsize) && (curr_pers_pampd_count > 0)) {
- total_zsize = xv_get_total_size_bytes(cli->xvpool);
+ total_zsize = zs_get_total_size_bytes(cli->zspool);
zv_mean_zsize = div_u64(total_zsize,
curr_pers_pampd_count);
if (zv_mean_zsize > zv_max_mean_zsize) {
@@ -1188,7 +1224,7 @@ static void *zcache_pampd_create(char *data, size_t size, bool raw, int eph,
goto out;
}
}
- pampd = (void *)zv_create(cli->xvpool, pool->pool_id,
+ pampd = (void *)zv_create(cli->zspool, pool->pool_id,
oid, index, cdata, clen);
if (pampd == NULL)
goto out;
@@ -1246,7 +1282,7 @@ static void zcache_pampd_free(void *pampd, struct tmem_pool *pool,
atomic_dec(&zcache_curr_eph_pampd_count);
BUG_ON(atomic_read(&zcache_curr_eph_pampd_count) < 0);
} else {
- zv_free(cli->xvpool, (struct zv_hdr *)pampd);
+ zv_free(cli->zspool, pampd);
atomic_dec(&zcache_curr_pers_pampd_count);
BUG_ON(atomic_read(&zcache_curr_pers_pampd_count) < 0);
}
@@ -1285,55 +1321,73 @@ static struct tmem_pamops zcache_pamops = {
* zcache compression/decompression and related per-cpu stuff
*/
-#define LZO_WORKMEM_BYTES LZO1X_1_MEM_COMPRESS
-#define LZO_DSTMEM_PAGE_ORDER 1
-static DEFINE_PER_CPU(unsigned char *, zcache_workmem);
static DEFINE_PER_CPU(unsigned char *, zcache_dstmem);
+#define ZCACHE_DSTMEM_ORDER 1
-static int zcache_compress(struct page *from, void **out_va, size_t *out_len)
+static int zcache_compress(struct page *from, void **out_va, unsigned *out_len)
{
int ret = 0;
unsigned char *dmem = __get_cpu_var(zcache_dstmem);
- unsigned char *wmem = __get_cpu_var(zcache_workmem);
char *from_va;
BUG_ON(!irqs_disabled());
- if (unlikely(dmem == NULL || wmem == NULL))
- goto out; /* no buffer, so can't compress */
- from_va = kmap_atomic(from, KM_USER0);
+ if (unlikely(dmem == NULL))
+ goto out; /* no buffer or no compressor so can't compress */
+ *out_len = PAGE_SIZE << ZCACHE_DSTMEM_ORDER;
+ from_va = kmap_atomic(from);
mb();
- ret = lzo1x_1_compress(from_va, PAGE_SIZE, dmem, out_len, wmem);
- BUG_ON(ret != LZO_E_OK);
+ ret = zcache_comp_op(ZCACHE_COMPOP_COMPRESS, from_va, PAGE_SIZE, dmem,
+ out_len);
+ BUG_ON(ret);
*out_va = dmem;
- kunmap_atomic(from_va, KM_USER0);
+ kunmap_atomic(from_va);
ret = 1;
out:
return ret;
}
+static int zcache_comp_cpu_up(int cpu)
+{
+ struct crypto_comp *tfm;
+
+ tfm = crypto_alloc_comp(zcache_comp_name, 0, 0);
+ if (IS_ERR(tfm))
+ return NOTIFY_BAD;
+ *per_cpu_ptr(zcache_comp_pcpu_tfms, cpu) = tfm;
+ return NOTIFY_OK;
+}
+
+static void zcache_comp_cpu_down(int cpu)
+{
+ struct crypto_comp *tfm;
+
+ tfm = *per_cpu_ptr(zcache_comp_pcpu_tfms, cpu);
+ crypto_free_comp(tfm);
+ *per_cpu_ptr(zcache_comp_pcpu_tfms, cpu) = NULL;
+}
static int zcache_cpu_notifier(struct notifier_block *nb,
unsigned long action, void *pcpu)
{
- int cpu = (long)pcpu;
+ int ret, cpu = (long)pcpu;
struct zcache_preload *kp;
switch (action) {
case CPU_UP_PREPARE:
+ ret = zcache_comp_cpu_up(cpu);
+ if (ret != NOTIFY_OK) {
+ pr_err("zcache: can't allocate compressor transform\n");
+ return ret;
+ }
per_cpu(zcache_dstmem, cpu) = (void *)__get_free_pages(
- GFP_KERNEL | __GFP_REPEAT,
- LZO_DSTMEM_PAGE_ORDER),
- per_cpu(zcache_workmem, cpu) =
- kzalloc(LZO1X_MEM_COMPRESS,
- GFP_KERNEL | __GFP_REPEAT);
+ GFP_KERNEL | __GFP_REPEAT, ZCACHE_DSTMEM_ORDER);
break;
case CPU_DEAD:
case CPU_UP_CANCELED:
+ zcache_comp_cpu_down(cpu);
free_pages((unsigned long)per_cpu(zcache_dstmem, cpu),
- LZO_DSTMEM_PAGE_ORDER);
+ ZCACHE_DSTMEM_ORDER);
per_cpu(zcache_dstmem, cpu) = NULL;
- kfree(per_cpu(zcache_workmem, cpu));
- per_cpu(zcache_workmem, cpu) = NULL;
kp = &per_cpu(zcache_preloads, cpu);
while (kp->nr) {
kmem_cache_free(zcache_objnode_cache,
@@ -1757,9 +1811,9 @@ static int zcache_cleancache_init_shared_fs(char *uuid, size_t pagesize)
static struct cleancache_ops zcache_cleancache_ops = {
.put_page = zcache_cleancache_put_page,
.get_page = zcache_cleancache_get_page,
- .flush_page = zcache_cleancache_flush_page,
- .flush_inode = zcache_cleancache_flush_inode,
- .flush_fs = zcache_cleancache_flush_fs,
+ .invalidate_page = zcache_cleancache_flush_page,
+ .invalidate_inode = zcache_cleancache_flush_inode,
+ .invalidate_fs = zcache_cleancache_flush_fs,
.init_shared_fs = zcache_cleancache_init_shared_fs,
.init_fs = zcache_cleancache_init_fs
};
@@ -1867,8 +1921,8 @@ static void zcache_frontswap_init(unsigned ignored)
static struct frontswap_ops zcache_frontswap_ops = {
.put_page = zcache_frontswap_put_page,
.get_page = zcache_frontswap_get_page,
- .flush_page = zcache_frontswap_flush_page,
- .flush_area = zcache_frontswap_flush_area,
+ .invalidate_page = zcache_frontswap_flush_page,
+ .invalidate_area = zcache_frontswap_flush_area,
.init = zcache_frontswap_init
};
@@ -1918,6 +1972,44 @@ static int __init no_frontswap(char *s)
__setup("nofrontswap", no_frontswap);
+static int __init enable_zcache_compressor(char *s)
+{
+ strncpy(zcache_comp_name, s, ZCACHE_COMP_NAME_SZ);
+ zcache_enabled = 1;
+ return 1;
+}
+__setup("zcache=", enable_zcache_compressor);
+
+
+static int zcache_comp_init(void)
+{
+ int ret = 0;
+
+ /* check crypto algorithm */
+ if (*zcache_comp_name != '\0') {
+ ret = crypto_has_comp(zcache_comp_name, 0, 0);
+ if (!ret)
+ pr_info("zcache: %s not supported\n",
+ zcache_comp_name);
+ }
+ if (!ret)
+ strcpy(zcache_comp_name, "lzo");
+ ret = crypto_has_comp(zcache_comp_name, 0, 0);
+ if (!ret) {
+ ret = 1;
+ goto out;
+ }
+ pr_info("zcache: using %s compressor\n", zcache_comp_name);
+
+ /* alloc percpu transforms */
+ ret = 0;
+ zcache_comp_pcpu_tfms = alloc_percpu(struct crypto_comp *);
+ if (!zcache_comp_pcpu_tfms)
+ ret = 1;
+out:
+ return ret;
+}
+
static int __init zcache_init(void)
{
int ret = 0;
@@ -1940,6 +2032,11 @@ static int __init zcache_init(void)
pr_err("zcache: can't register cpu notifier\n");
goto out;
}
+ ret = zcache_comp_init();
+ if (ret) {
+ pr_err("zcache: compressor initialization failed\n");
+ goto out;
+ }
for_each_online_cpu(cpu) {
void *pcpu = (void *)(long)cpu;
zcache_cpu_notifier(&zcache_cpu_notifier_block,
@@ -1975,7 +2072,7 @@ static int __init zcache_init(void)
old_ops = zcache_frontswap_register_ops();
pr_info("zcache: frontswap enabled using kernel "
- "transcendent memory and xvmalloc\n");
+ "transcendent memory and zsmalloc\n");
if (old_ops.init != NULL)
pr_warning("zcache: frontswap_ops overridden");
}
diff --git a/drivers/staging/zram/Kconfig b/drivers/staging/zram/Kconfig
index 3bec4dba3fe5..9d11a4cb99b7 100644
--- a/drivers/staging/zram/Kconfig
+++ b/drivers/staging/zram/Kconfig
@@ -1,11 +1,9 @@
-config XVMALLOC
- bool
- default n
-
config ZRAM
tristate "Compressed RAM block device support"
- depends on BLOCK && SYSFS
- select XVMALLOC
+ # X86 dependency is because zsmalloc uses non-portable pte/tlb
+ # functions
+ depends on BLOCK && SYSFS && X86
+ select ZSMALLOC
select LZO_COMPRESS
select LZO_DECOMPRESS
default n
diff --git a/drivers/staging/zram/Makefile b/drivers/staging/zram/Makefile
index 2a6d3213a756..7f4a3019e9c4 100644
--- a/drivers/staging/zram/Makefile
+++ b/drivers/staging/zram/Makefile
@@ -1,4 +1,3 @@
zram-y := zram_drv.o zram_sysfs.o
obj-$(CONFIG_ZRAM) += zram.o
-obj-$(CONFIG_XVMALLOC) += xvmalloc.o \ No newline at end of file
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
index 2a2a92d389e6..685d612a627b 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/staging/zram/zram_drv.c
@@ -40,7 +40,7 @@ static int zram_major;
struct zram *zram_devices;
/* Module params (documentation at end) */
-unsigned int zram_num_devices;
+static unsigned int num_devices;
static void zram_stat_inc(u32 *v)
{
@@ -135,13 +135,9 @@ static void zram_set_disksize(struct zram *zram, size_t totalram_bytes)
static void zram_free_page(struct zram *zram, size_t index)
{
- u32 clen;
- void *obj;
+ void *handle = zram->table[index].handle;
- struct page *page = zram->table[index].page;
- u32 offset = zram->table[index].offset;
-
- if (unlikely(!page)) {
+ if (unlikely(!handle)) {
/*
* No memory is allocated for zero filled pages.
* Simply clear zero page flag.
@@ -154,27 +150,24 @@ static void zram_free_page(struct zram *zram, size_t index)
}
if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
- clen = PAGE_SIZE;
- __free_page(page);
+ __free_page(handle);
zram_clear_flag(zram, index, ZRAM_UNCOMPRESSED);
zram_stat_dec(&zram->stats.pages_expand);
goto out;
}
- obj = kmap_atomic(page, KM_USER0) + offset;
- clen = xv_get_object_size(obj) - sizeof(struct zobj_header);
- kunmap_atomic(obj, KM_USER0);
+ zs_free(zram->mem_pool, handle);
- xv_free(zram->mem_pool, page, offset);
- if (clen <= PAGE_SIZE / 2)
+ if (zram->table[index].size <= PAGE_SIZE / 2)
zram_stat_dec(&zram->stats.good_compress);
out:
- zram_stat64_sub(zram, &zram->stats.compr_size, clen);
+ zram_stat64_sub(zram, &zram->stats.compr_size,
+ zram->table[index].size);
zram_stat_dec(&zram->stats.pages_stored);
- zram->table[index].page = NULL;
- zram->table[index].offset = 0;
+ zram->table[index].handle = NULL;
+ zram->table[index].size = 0;
}
static void handle_zero_page(struct bio_vec *bvec)
@@ -182,9 +175,9 @@ static void handle_zero_page(struct bio_vec *bvec)
struct page *page = bvec->bv_page;
void *user_mem;
- user_mem = kmap_atomic(page, KM_USER0);
+ user_mem = kmap_atomic(page);
memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
- kunmap_atomic(user_mem, KM_USER0);
+ kunmap_atomic(user_mem);
flush_dcache_page(page);
}
@@ -195,12 +188,12 @@ static void handle_uncompressed_page(struct zram *zram, struct bio_vec *bvec,
struct page *page = bvec->bv_page;
unsigned char *user_mem, *cmem;
- user_mem = kmap_atomic(page, KM_USER0);
- cmem = kmap_atomic(zram->table[index].page, KM_USER1);
+ user_mem = kmap_atomic(page);
+ cmem = kmap_atomic(zram->table[index].handle);
memcpy(user_mem + bvec->bv_offset, cmem + offset, bvec->bv_len);
- kunmap_atomic(cmem, KM_USER1);
- kunmap_atomic(user_mem, KM_USER0);
+ kunmap_atomic(cmem);
+ kunmap_atomic(user_mem);
flush_dcache_page(page);
}
@@ -227,7 +220,7 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
}
/* Requested page is not present in compressed area */
- if (unlikely(!zram->table[index].page)) {
+ if (unlikely(!zram->table[index].handle)) {
pr_debug("Read before write: sector=%lu, size=%u",
(ulong)(bio->bi_sector), bio->bi_size);
handle_zero_page(bvec);
@@ -249,16 +242,15 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
}
}
- user_mem = kmap_atomic(page, KM_USER0);
+ user_mem = kmap_atomic(page);
if (!is_partial_io(bvec))
uncmem = user_mem;
clen = PAGE_SIZE;
- cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
- zram->table[index].offset;
+ cmem = zs_map_object(zram->mem_pool, zram->table[index].handle);
ret = lzo1x_decompress_safe(cmem + sizeof(*zheader),
- xv_get_object_size(cmem) - sizeof(*zheader),
+ zram->table[index].size,
uncmem, &clen);
if (is_partial_io(bvec)) {
@@ -267,8 +259,8 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
kfree(uncmem);
}
- kunmap_atomic(cmem, KM_USER1);
- kunmap_atomic(user_mem, KM_USER0);
+ zs_unmap_object(zram->mem_pool, zram->table[index].handle);
+ kunmap_atomic(user_mem);
/* Should NEVER happen. Return bio error if it does. */
if (unlikely(ret != LZO_E_OK)) {
@@ -290,25 +282,24 @@ static int zram_read_before_write(struct zram *zram, char *mem, u32 index)
unsigned char *cmem;
if (zram_test_flag(zram, index, ZRAM_ZERO) ||
- !zram->table[index].page) {
+ !zram->table[index].handle) {
memset(mem, 0, PAGE_SIZE);
return 0;
}
- cmem = kmap_atomic(zram->table[index].page, KM_USER0) +
- zram->table[index].offset;
+ cmem = zs_map_object(zram->mem_pool, zram->table[index].handle);
/* Page is stored uncompressed since it's incompressible */
if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
memcpy(mem, cmem, PAGE_SIZE);
- kunmap_atomic(cmem, KM_USER0);
+ kunmap_atomic(cmem);
return 0;
}
ret = lzo1x_decompress_safe(cmem + sizeof(*zheader),
- xv_get_object_size(cmem) - sizeof(*zheader),
+ zram->table[index].size,
mem, &clen);
- kunmap_atomic(cmem, KM_USER0);
+ zs_unmap_object(zram->mem_pool, zram->table[index].handle);
/* Should NEVER happen. Return bio error if it does. */
if (unlikely(ret != LZO_E_OK)) {
@@ -326,6 +317,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
int ret;
u32 store_offset;
size_t clen;
+ void *handle;
struct zobj_header *zheader;
struct page *page, *page_store;
unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
@@ -355,11 +347,11 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
* System overwrites unused sectors. Free memory associated
* with this sector now.
*/
- if (zram->table[index].page ||
+ if (zram->table[index].handle ||
zram_test_flag(zram, index, ZRAM_ZERO))
zram_free_page(zram, index);
- user_mem = kmap_atomic(page, KM_USER0);
+ user_mem = kmap_atomic(page);
if (is_partial_io(bvec))
memcpy(uncmem + offset, user_mem + bvec->bv_offset,
@@ -368,7 +360,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
uncmem = user_mem;
if (page_zero_filled(uncmem)) {
- kunmap_atomic(user_mem, KM_USER0);
+ kunmap_atomic(user_mem);
if (is_partial_io(bvec))
kfree(uncmem);
zram_stat_inc(&zram->stats.pages_zero);
@@ -380,7 +372,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
zram->compress_workmem);
- kunmap_atomic(user_mem, KM_USER0);
+ kunmap_atomic(user_mem);
if (is_partial_io(bvec))
kfree(uncmem);
@@ -407,26 +399,22 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
store_offset = 0;
zram_set_flag(zram, index, ZRAM_UNCOMPRESSED);
zram_stat_inc(&zram->stats.pages_expand);
- zram->table[index].page = page_store;
- src = kmap_atomic(page, KM_USER0);
+ handle = page_store;
+ src = kmap_atomic(page);
+ cmem = kmap_atomic(page_store);
goto memstore;
}
- if (xv_malloc(zram->mem_pool, clen + sizeof(*zheader),
- &zram->table[index].page, &store_offset,
- GFP_NOIO | __GFP_HIGHMEM)) {
+ handle = zs_malloc(zram->mem_pool, clen + sizeof(*zheader));
+ if (!handle) {
pr_info("Error allocating memory for compressed "
"page: %u, size=%zu\n", index, clen);
ret = -ENOMEM;
goto out;
}
+ cmem = zs_map_object(zram->mem_pool, handle);
memstore:
- zram->table[index].offset = store_offset;
-
- cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
- zram->table[index].offset;
-
#if 0
/* Back-reference needed for memory defragmentation */
if (!zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)) {
@@ -438,9 +426,15 @@ memstore:
memcpy(cmem, src, clen);
- kunmap_atomic(cmem, KM_USER1);
- if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)))
- kunmap_atomic(src, KM_USER0);
+ if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
+ kunmap_atomic(cmem);
+ kunmap_atomic(src);
+ } else {
+ zs_unmap_object(zram->mem_pool, handle);
+ }
+
+ zram->table[index].handle = handle;
+ zram->table[index].size = clen;
/* Update stats */
zram_stat64_add(zram, &zram->stats.compr_size, clen);
@@ -598,25 +592,20 @@ void __zram_reset_device(struct zram *zram)
/* Free all pages that are still in this zram device */
for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
- struct page *page;
- u16 offset;
-
- page = zram->table[index].page;
- offset = zram->table[index].offset;
-
- if (!page)
+ void *handle = zram->table[index].handle;
+ if (!handle)
continue;
if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)))
- __free_page(page);
+ __free_page(handle);
else
- xv_free(zram->mem_pool, page, offset);
+ zs_free(zram->mem_pool, handle);
}
vfree(zram->table);
zram->table = NULL;
- xv_destroy_pool(zram->mem_pool);
+ zs_destroy_pool(zram->mem_pool);
zram->mem_pool = NULL;
/* Reset stats */
@@ -674,7 +663,7 @@ int zram_init_device(struct zram *zram)
/* zram devices sort of resembles non-rotational disks */
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
- zram->mem_pool = xv_create_pool();
+ zram->mem_pool = zs_create_pool("zram", GFP_NOIO | __GFP_HIGHMEM);
if (!zram->mem_pool) {
pr_err("Error creating memory pool\n");
ret = -ENOMEM;
@@ -790,13 +779,18 @@ static void destroy_device(struct zram *zram)
blk_cleanup_queue(zram->queue);
}
+unsigned int zram_get_num_devices(void)
+{
+ return num_devices;
+}
+
static int __init zram_init(void)
{
int ret, dev_id;
- if (zram_num_devices > max_num_devices) {
+ if (num_devices > max_num_devices) {
pr_warning("Invalid value for num_devices: %u\n",
- zram_num_devices);
+ num_devices);
ret = -EINVAL;
goto out;
}
@@ -808,20 +802,20 @@ static int __init zram_init(void)
goto out;
}
- if (!zram_num_devices) {
+ if (!num_devices) {
pr_info("num_devices not specified. Using default: 1\n");
- zram_num_devices = 1;
+ num_devices = 1;
}
/* Allocate the device array and initialize each one */
- pr_info("Creating %u devices ...\n", zram_num_devices);
- zram_devices = kzalloc(zram_num_devices * sizeof(struct zram), GFP_KERNEL);
+ pr_info("Creating %u devices ...\n", num_devices);
+ zram_devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL);
if (!zram_devices) {
ret = -ENOMEM;
goto unregister;
}
- for (dev_id = 0; dev_id < zram_num_devices; dev_id++) {
+ for (dev_id = 0; dev_id < num_devices; dev_id++) {
ret = create_device(&zram_devices[dev_id], dev_id);
if (ret)
goto free_devices;
@@ -844,7 +838,7 @@ static void __exit zram_exit(void)
int i;
struct zram *zram;
- for (i = 0; i < zram_num_devices; i++) {
+ for (i = 0; i < num_devices; i++) {
zram = &zram_devices[i];
destroy_device(zram);
@@ -858,8 +852,8 @@ static void __exit zram_exit(void)
pr_debug("Cleanup done!\n");
}
-module_param(zram_num_devices, uint, 0);
-MODULE_PARM_DESC(zram_num_devices, "Number of zram devices");
+module_param(num_devices, uint, 0);
+MODULE_PARM_DESC(num_devices, "Number of zram devices");
module_init(zram_init);
module_exit(zram_exit);
diff --git a/drivers/staging/zram/zram_drv.h b/drivers/staging/zram/zram_drv.h
index e5cd2469b6a0..fbe8ac98704c 100644
--- a/drivers/staging/zram/zram_drv.h
+++ b/drivers/staging/zram/zram_drv.h
@@ -18,7 +18,7 @@
#include <linux/spinlock.h>
#include <linux/mutex.h>
-#include "xvmalloc.h"
+#include "../zsmalloc/zsmalloc.h"
/*
* Some arbitrary value. This is just to catch
@@ -51,7 +51,7 @@ static const size_t max_zpage_size = PAGE_SIZE / 4 * 3;
/*
* NOTE: max_zpage_size must be less than or equal to:
- * XV_MAX_ALLOC_SIZE - sizeof(struct zobj_header)
+ * ZS_MAX_ALLOC_SIZE - sizeof(struct zobj_header)
* otherwise, xv_malloc() would always return failure.
*/
@@ -81,8 +81,8 @@ enum zram_pageflags {
/* Allocated for each disk page */
struct table {
- struct page *page;
- u16 offset;
+ void *handle;
+ u16 size; /* object size (excluding header) */
u8 count; /* object ref count (not yet used) */
u8 flags;
} __attribute__((aligned(4)));
@@ -102,7 +102,7 @@ struct zram_stats {
};
struct zram {
- struct xv_pool *mem_pool;
+ struct zs_pool *mem_pool;
void *compress_workmem;
void *compress_buffer;
struct table *table;
@@ -124,7 +124,7 @@ struct zram {
};
extern struct zram *zram_devices;
-extern unsigned int zram_num_devices;
+unsigned int zram_get_num_devices(void);
#ifdef CONFIG_SYSFS
extern struct attribute_group zram_disk_attr_group;
#endif
diff --git a/drivers/staging/zram/zram_sysfs.c b/drivers/staging/zram/zram_sysfs.c
index d521122826f6..a7f377175525 100644
--- a/drivers/staging/zram/zram_sysfs.c
+++ b/drivers/staging/zram/zram_sysfs.c
@@ -34,7 +34,7 @@ static struct zram *dev_to_zram(struct device *dev)
int i;
struct zram *zram = NULL;
- for (i = 0; i < zram_num_devices; i++) {
+ for (i = 0; i < zram_get_num_devices(); i++) {
zram = &zram_devices[i];
if (disk_to_dev(zram->disk) == dev)
break;
@@ -187,7 +187,7 @@ static ssize_t mem_used_total_show(struct device *dev,
struct zram *zram = dev_to_zram(dev);
if (zram->init_done) {
- val = xv_get_total_size_bytes(zram->mem_pool) +
+ val = zs_get_total_size_bytes(zram->mem_pool) +
((u64)(zram->stats.pages_expand) << PAGE_SHIFT);
}
diff --git a/drivers/staging/zsmalloc/Kconfig b/drivers/staging/zsmalloc/Kconfig
new file mode 100644
index 000000000000..a5ab7200626f
--- /dev/null
+++ b/drivers/staging/zsmalloc/Kconfig
@@ -0,0 +1,14 @@
+config ZSMALLOC
+ tristate "Memory allocator for compressed pages"
+ # X86 dependency is because of the use of __flush_tlb_one and set_pte
+ # in zsmalloc-main.c.
+ # TODO: convert these to portable functions
+ depends on X86
+ default n
+ help
+ zsmalloc is a slab-based memory allocator designed to store
+ compressed RAM pages. zsmalloc uses virtual memory mapping
+ in order to reduce fragmentation. However, this results in a
+ non-standard allocator interface where a handle, not a pointer, is
+ returned by an alloc(). This handle must be mapped in order to
+ access the allocated space.
diff --git a/drivers/staging/zsmalloc/Makefile b/drivers/staging/zsmalloc/Makefile
new file mode 100644
index 000000000000..b134848a590d
--- /dev/null
+++ b/drivers/staging/zsmalloc/Makefile
@@ -0,0 +1,3 @@
+zsmalloc-y := zsmalloc-main.o
+
+obj-$(CONFIG_ZSMALLOC) += zsmalloc.o
diff --git a/drivers/staging/zsmalloc/zsmalloc-main.c b/drivers/staging/zsmalloc/zsmalloc-main.c
new file mode 100644
index 000000000000..09caa4f2687e
--- /dev/null
+++ b/drivers/staging/zsmalloc/zsmalloc-main.c
@@ -0,0 +1,745 @@
+/*
+ * zsmalloc memory allocator
+ *
+ * Copyright (C) 2011 Nitin Gupta
+ *
+ * This code is released using a dual license strategy: BSD/GPL
+ * You can choose the license that better fits your requirements.
+ *
+ * Released under the terms of 3-clause BSD License
+ * Released under the terms of GNU General Public License Version 2.0
+ */
+
+#ifdef CONFIG_ZSMALLOC_DEBUG
+#define DEBUG
+#endif
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/errno.h>
+#include <linux/highmem.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <asm/tlbflush.h>
+#include <asm/pgtable.h>
+#include <linux/cpumask.h>
+#include <linux/cpu.h>
+#include <linux/vmalloc.h>
+
+#include "zsmalloc.h"
+#include "zsmalloc_int.h"
+
+/*
+ * A zspage's class index and fullness group
+ * are encoded in its (first)page->mapping
+ */
+#define CLASS_IDX_BITS 28
+#define FULLNESS_BITS 4
+#define CLASS_IDX_MASK ((1 << CLASS_IDX_BITS) - 1)
+#define FULLNESS_MASK ((1 << FULLNESS_BITS) - 1)
+
+/* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
+static DEFINE_PER_CPU(struct mapping_area, zs_map_area);
+
+static int is_first_page(struct page *page)
+{
+ return test_bit(PG_private, &page->flags);
+}
+
+static int is_last_page(struct page *page)
+{
+ return test_bit(PG_private_2, &page->flags);
+}
+
+static void get_zspage_mapping(struct page *page, unsigned int *class_idx,
+ enum fullness_group *fullness)
+{
+ unsigned long m;
+ BUG_ON(!is_first_page(page));
+
+ m = (unsigned long)page->mapping;
+ *fullness = m & FULLNESS_MASK;
+ *class_idx = (m >> FULLNESS_BITS) & CLASS_IDX_MASK;
+}
+
+static void set_zspage_mapping(struct page *page, unsigned int class_idx,
+ enum fullness_group fullness)
+{
+ unsigned long m;
+ BUG_ON(!is_first_page(page));
+
+ m = ((class_idx & CLASS_IDX_MASK) << FULLNESS_BITS) |
+ (fullness & FULLNESS_MASK);
+ page->mapping = (struct address_space *)m;
+}
+
+static int get_size_class_index(int size)
+{
+ int idx = 0;
+
+ if (likely(size > ZS_MIN_ALLOC_SIZE))
+ idx = DIV_ROUND_UP(size - ZS_MIN_ALLOC_SIZE,
+ ZS_SIZE_CLASS_DELTA);
+
+ return idx;
+}
+
+static enum fullness_group get_fullness_group(struct page *page)
+{
+ int inuse, max_objects;
+ enum fullness_group fg;
+ BUG_ON(!is_first_page(page));
+
+ inuse = page->inuse;
+ max_objects = page->objects;
+
+ if (inuse == 0)
+ fg = ZS_EMPTY;
+ else if (inuse == max_objects)
+ fg = ZS_FULL;
+ else if (inuse <= max_objects / fullness_threshold_frac)
+ fg = ZS_ALMOST_EMPTY;
+ else
+ fg = ZS_ALMOST_FULL;
+
+ return fg;
+}
+
+static void insert_zspage(struct page *page, struct size_class *class,
+ enum fullness_group fullness)
+{
+ struct page **head;
+
+ BUG_ON(!is_first_page(page));
+
+ if (fullness >= _ZS_NR_FULLNESS_GROUPS)
+ return;
+
+ head = &class->fullness_list[fullness];
+ if (*head)
+ list_add_tail(&page->lru, &(*head)->lru);
+
+ *head = page;
+}
+
+static void remove_zspage(struct page *page, struct size_class *class,
+ enum fullness_group fullness)
+{
+ struct page **head;
+
+ BUG_ON(!is_first_page(page));
+
+ if (fullness >= _ZS_NR_FULLNESS_GROUPS)
+ return;
+
+ head = &class->fullness_list[fullness];
+ BUG_ON(!*head);
+ if (list_empty(&(*head)->lru))
+ *head = NULL;
+ else if (*head == page)
+ *head = (struct page *)list_entry((*head)->lru.next,
+ struct page, lru);
+
+ list_del_init(&page->lru);
+}
+
+static enum fullness_group fix_fullness_group(struct zs_pool *pool,
+ struct page *page)
+{
+ int class_idx;
+ struct size_class *class;
+ enum fullness_group currfg, newfg;
+
+ BUG_ON(!is_first_page(page));
+
+ get_zspage_mapping(page, &class_idx, &currfg);
+ newfg = get_fullness_group(page);
+ if (newfg == currfg)
+ goto out;
+
+ class = &pool->size_class[class_idx];
+ remove_zspage(page, class, currfg);
+ insert_zspage(page, class, newfg);
+ set_zspage_mapping(page, class_idx, newfg);
+
+out:
+ return newfg;
+}
+
+/*
+ * We have to decide on how many pages to link together
+ * to form a zspage for each size class. This is important
+ * to reduce wastage due to unusable space left at end of
+ * each zspage which is given as:
+ * wastage = Zp - Zp % size_class
+ * where Zp = zspage size = k * PAGE_SIZE where k = 1, 2, ...
+ *
+ * For example, for size class of 3/8 * PAGE_SIZE, we should
+ * link together 3 PAGE_SIZE sized pages to form a zspage
+ * since then we can perfectly fit in 8 such objects.
+ */
+static int get_zspage_order(int class_size)
+{
+ int i, max_usedpc = 0;
+ /* zspage order which gives maximum used size per KB */
+ int max_usedpc_order = 1;
+
+ for (i = 1; i <= ZS_MAX_PAGES_PER_ZSPAGE; i++) {
+ int zspage_size;
+ int waste, usedpc;
+
+ zspage_size = i * PAGE_SIZE;
+ waste = zspage_size % class_size;
+ usedpc = (zspage_size - waste) * 100 / zspage_size;
+
+ if (usedpc > max_usedpc) {
+ max_usedpc = usedpc;
+ max_usedpc_order = i;
+ }
+ }
+
+ return max_usedpc_order;
+}
+
+/*
+ * A single 'zspage' is composed of many system pages which are
+ * linked together using fields in struct page. This function finds
+ * the first/head page, given any component page of a zspage.
+ */
+static struct page *get_first_page(struct page *page)
+{
+ if (is_first_page(page))
+ return page;
+ else
+ return page->first_page;
+}
+
+static struct page *get_next_page(struct page *page)
+{
+ struct page *next;
+
+ if (is_last_page(page))
+ next = NULL;
+ else if (is_first_page(page))
+ next = (struct page *)page->private;
+ else
+ next = list_entry(page->lru.next, struct page, lru);
+
+ return next;
+}
+
+/* Encode <page, obj_idx> as a single handle value */
+static void *obj_location_to_handle(struct page *page, unsigned long obj_idx)
+{
+ unsigned long handle;
+
+ if (!page) {
+ BUG_ON(obj_idx);
+ return NULL;
+ }
+
+ handle = page_to_pfn(page) << OBJ_INDEX_BITS;
+ handle |= (obj_idx & OBJ_INDEX_MASK);
+
+ return (void *)handle;
+}
+
+/* Decode <page, obj_idx> pair from the given object handle */
+static void obj_handle_to_location(void *handle, struct page **page,
+ unsigned long *obj_idx)
+{
+ unsigned long hval = (unsigned long)handle;
+
+ *page = pfn_to_page(hval >> OBJ_INDEX_BITS);
+ *obj_idx = hval & OBJ_INDEX_MASK;
+}
+
+static unsigned long obj_idx_to_offset(struct page *page,
+ unsigned long obj_idx, int class_size)
+{
+ unsigned long off = 0;
+
+ if (!is_first_page(page))
+ off = page->index;
+
+ return off + obj_idx * class_size;
+}
+
+static void free_zspage(struct page *first_page)
+{
+ struct page *nextp, *tmp;
+
+ BUG_ON(!is_first_page(first_page));
+ BUG_ON(first_page->inuse);
+
+ nextp = (struct page *)page_private(first_page);
+
+ clear_bit(PG_private, &first_page->flags);
+ clear_bit(PG_private_2, &first_page->flags);
+ set_page_private(first_page, 0);
+ first_page->mapping = NULL;
+ first_page->freelist = NULL;
+ reset_page_mapcount(first_page);
+ __free_page(first_page);
+
+ /* zspage with only 1 system page */
+ if (!nextp)
+ return;
+
+ list_for_each_entry_safe(nextp, tmp, &nextp->lru, lru) {
+ list_del(&nextp->lru);
+ clear_bit(PG_private_2, &nextp->flags);
+ nextp->index = 0;
+ __free_page(nextp);
+ }
+}
+
+/* Initialize a newly allocated zspage */
+static void init_zspage(struct page *first_page, struct size_class *class)
+{
+ unsigned long off = 0;
+ struct page *page = first_page;
+
+ BUG_ON(!is_first_page(first_page));
+ while (page) {
+ struct page *next_page;
+ struct link_free *link;
+ unsigned int i, objs_on_page;
+
+ /*
+ * page->index stores offset of first object starting
+ * in the page. For the first page, this is always 0,
+ * so we use first_page->index (aka ->freelist) to store
+ * head of corresponding zspage's freelist.
+ */
+ if (page != first_page)
+ page->index = off;
+
+ link = (struct link_free *)kmap_atomic(page) +
+ off / sizeof(*link);
+ objs_on_page = (PAGE_SIZE - off) / class->size;
+
+ for (i = 1; i <= objs_on_page; i++) {
+ off += class->size;
+ if (off < PAGE_SIZE) {
+ link->next = obj_location_to_handle(page, i);
+ link += class->size / sizeof(*link);
+ }
+ }
+
+ /*
+ * We now come to the last (full or partial) object on this
+ * page, which must point to the first object on the next
+ * page (if present)
+ */
+ next_page = get_next_page(page);
+ link->next = obj_location_to_handle(next_page, 0);
+ kunmap_atomic(link);
+ page = next_page;
+ off = (off + class->size) % PAGE_SIZE;
+ }
+}
+
+/*
+ * Allocate a zspage for the given size class
+ */
+static struct page *alloc_zspage(struct size_class *class, gfp_t flags)
+{
+ int i, error;
+ struct page *first_page = NULL;
+
+ /*
+ * Allocate individual pages and link them together as:
+ * 1. first page->private = first sub-page
+ * 2. all sub-pages are linked together using page->lru
+ * 3. each sub-page is linked to the first page using page->first_page
+ *
+ * For each size class, First/Head pages are linked together using
+ * page->lru. Also, we set PG_private to identify the first page
+ * (i.e. no other sub-page has this flag set) and PG_private_2 to
+ * identify the last page.
+ */
+ error = -ENOMEM;
+ for (i = 0; i < class->zspage_order; i++) {
+ struct page *page, *prev_page;
+
+ page = alloc_page(flags);
+ if (!page)
+ goto cleanup;
+
+ INIT_LIST_HEAD(&page->lru);
+ if (i == 0) { /* first page */
+ set_bit(PG_private, &page->flags);
+ set_page_private(page, 0);
+ first_page = page;
+ first_page->inuse = 0;
+ }
+ if (i == 1)
+ first_page->private = (unsigned long)page;
+ if (i >= 1)
+ page->first_page = first_page;
+ if (i >= 2)
+ list_add(&page->lru, &prev_page->lru);
+ if (i == class->zspage_order - 1) /* last page */
+ set_bit(PG_private_2, &page->flags);
+
+ prev_page = page;
+ }
+
+ init_zspage(first_page, class);
+
+ first_page->freelist = obj_location_to_handle(first_page, 0);
+ /* Maximum number of objects we can store in this zspage */
+ first_page->objects = class->zspage_order * PAGE_SIZE / class->size;
+
+ error = 0; /* Success */
+
+cleanup:
+ if (unlikely(error) && first_page) {
+ free_zspage(first_page);
+ first_page = NULL;
+ }
+
+ return first_page;
+}
+
+static struct page *find_get_zspage(struct size_class *class)
+{
+ int i;
+ struct page *page;
+
+ for (i = 0; i < _ZS_NR_FULLNESS_GROUPS; i++) {
+ page = class->fullness_list[i];
+ if (page)
+ break;
+ }
+
+ return page;
+}
+
+
+/*
+ * If this becomes a separate module, register zs_init() with
+ * module_init(), zs_exit with module_exit(), and remove zs_initialized
+*/
+static int zs_initialized;
+
+static int zs_cpu_notifier(struct notifier_block *nb, unsigned long action,
+ void *pcpu)
+{
+ int cpu = (long)pcpu;
+ struct mapping_area *area;
+
+ switch (action) {
+ case CPU_UP_PREPARE:
+ area = &per_cpu(zs_map_area, cpu);
+ if (area->vm)
+ break;
+ area->vm = alloc_vm_area(2 * PAGE_SIZE, area->vm_ptes);
+ if (!area->vm)
+ return notifier_from_errno(-ENOMEM);
+ break;
+ case CPU_DEAD:
+ case CPU_UP_CANCELED:
+ area = &per_cpu(zs_map_area, cpu);
+ if (area->vm)
+ free_vm_area(area->vm);
+ area->vm = NULL;
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block zs_cpu_nb = {
+ .notifier_call = zs_cpu_notifier
+};
+
+static void zs_exit(void)
+{
+ int cpu;
+
+ for_each_online_cpu(cpu)
+ zs_cpu_notifier(NULL, CPU_DEAD, (void *)(long)cpu);
+ unregister_cpu_notifier(&zs_cpu_nb);
+}
+
+static int zs_init(void)
+{
+ int cpu, ret;
+
+ register_cpu_notifier(&zs_cpu_nb);
+ for_each_online_cpu(cpu) {
+ ret = zs_cpu_notifier(NULL, CPU_UP_PREPARE, (void *)(long)cpu);
+ if (notifier_to_errno(ret))
+ goto fail;
+ }
+ return 0;
+fail:
+ zs_exit();
+ return notifier_to_errno(ret);
+}
+
+struct zs_pool *zs_create_pool(const char *name, gfp_t flags)
+{
+ int i, error, ovhd_size;
+ struct zs_pool *pool;
+
+ if (!name)
+ return NULL;
+
+ ovhd_size = roundup(sizeof(*pool), PAGE_SIZE);
+ pool = kzalloc(ovhd_size, GFP_KERNEL);
+ if (!pool)
+ return NULL;
+
+ for (i = 0; i < ZS_SIZE_CLASSES; i++) {
+ int size;
+ struct size_class *class;
+
+ size = ZS_MIN_ALLOC_SIZE + i * ZS_SIZE_CLASS_DELTA;
+ if (size > ZS_MAX_ALLOC_SIZE)
+ size = ZS_MAX_ALLOC_SIZE;
+
+ class = &pool->size_class[i];
+ class->size = size;
+ class->index = i;
+ spin_lock_init(&class->lock);
+ class->zspage_order = get_zspage_order(size);
+
+ }
+
+ /*
+ * If this becomes a separate module, register zs_init with
+ * module_init, and remove this block
+ */
+ if (!zs_initialized) {
+ error = zs_init();
+ if (error)
+ goto cleanup;
+ zs_initialized = 1;
+ }
+
+ pool->flags = flags;
+ pool->name = name;
+
+ error = 0; /* Success */
+
+cleanup:
+ if (error) {
+ zs_destroy_pool(pool);
+ pool = NULL;
+ }
+
+ return pool;
+}
+EXPORT_SYMBOL_GPL(zs_create_pool);
+
+void zs_destroy_pool(struct zs_pool *pool)
+{
+ int i;
+
+ for (i = 0; i < ZS_SIZE_CLASSES; i++) {
+ int fg;
+ struct size_class *class = &pool->size_class[i];
+
+ for (fg = 0; fg < _ZS_NR_FULLNESS_GROUPS; fg++) {
+ if (class->fullness_list[fg]) {
+ pr_info("Freeing non-empty class with size "
+ "%db, fullness group %d\n",
+ class->size, fg);
+ }
+ }
+ }
+ kfree(pool);
+}
+EXPORT_SYMBOL_GPL(zs_destroy_pool);
+
+/**
+ * zs_malloc - Allocate block of given size from pool.
+ * @pool: pool to allocate from
+ * @size: size of block to allocate
+ * @page: page no. that holds the object
+ * @offset: location of object within page
+ *
+ * On success, <page, offset> identifies block allocated
+ * and 0 is returned. On failure, <page, offset> is set to
+ * 0 and -ENOMEM is returned.
+ *
+ * Allocation requests with size > ZS_MAX_ALLOC_SIZE will fail.
+ */
+void *zs_malloc(struct zs_pool *pool, size_t size)
+{
+ void *obj;
+ struct link_free *link;
+ int class_idx;
+ struct size_class *class;
+
+ struct page *first_page, *m_page;
+ unsigned long m_objidx, m_offset;
+
+ if (unlikely(!size || size > ZS_MAX_ALLOC_SIZE))
+ return NULL;
+
+ class_idx = get_size_class_index(size);
+ class = &pool->size_class[class_idx];
+ BUG_ON(class_idx != class->index);
+
+ spin_lock(&class->lock);
+ first_page = find_get_zspage(class);
+
+ if (!first_page) {
+ spin_unlock(&class->lock);
+ first_page = alloc_zspage(class, pool->flags);
+ if (unlikely(!first_page))
+ return NULL;
+
+ set_zspage_mapping(first_page, class->index, ZS_EMPTY);
+ spin_lock(&class->lock);
+ class->pages_allocated += class->zspage_order;
+ }
+
+ obj = first_page->freelist;
+ obj_handle_to_location(obj, &m_page, &m_objidx);
+ m_offset = obj_idx_to_offset(m_page, m_objidx, class->size);
+
+ link = (struct link_free *)kmap_atomic(m_page) +
+ m_offset / sizeof(*link);
+ first_page->freelist = link->next;
+ memset(link, POISON_INUSE, sizeof(*link));
+ kunmap_atomic(link);
+
+ first_page->inuse++;
+ /* Now move the zspage to another fullness group, if required */
+ fix_fullness_group(pool, first_page);
+ spin_unlock(&class->lock);
+
+ return obj;
+}
+EXPORT_SYMBOL_GPL(zs_malloc);
+
+void zs_free(struct zs_pool *pool, void *obj)
+{
+ struct link_free *link;
+ struct page *first_page, *f_page;
+ unsigned long f_objidx, f_offset;
+
+ int class_idx;
+ struct size_class *class;
+ enum fullness_group fullness;
+
+ if (unlikely(!obj))
+ return;
+
+ obj_handle_to_location(obj, &f_page, &f_objidx);
+ first_page = get_first_page(f_page);
+
+ get_zspage_mapping(first_page, &class_idx, &fullness);
+ class = &pool->size_class[class_idx];
+ f_offset = obj_idx_to_offset(f_page, f_objidx, class->size);
+
+ spin_lock(&class->lock);
+
+ /* Insert this object in containing zspage's freelist */
+ link = (struct link_free *)((unsigned char *)kmap_atomic(f_page)
+ + f_offset);
+ link->next = first_page->freelist;
+ kunmap_atomic(link);
+ first_page->freelist = obj;
+
+ first_page->inuse--;
+ fullness = fix_fullness_group(pool, first_page);
+
+ if (fullness == ZS_EMPTY)
+ class->pages_allocated -= class->zspage_order;
+
+ spin_unlock(&class->lock);
+
+ if (fullness == ZS_EMPTY)
+ free_zspage(first_page);
+}
+EXPORT_SYMBOL_GPL(zs_free);
+
+void *zs_map_object(struct zs_pool *pool, void *handle)
+{
+ struct page *page;
+ unsigned long obj_idx, off;
+
+ unsigned int class_idx;
+ enum fullness_group fg;
+ struct size_class *class;
+ struct mapping_area *area;
+
+ BUG_ON(!handle);
+
+ obj_handle_to_location(handle, &page, &obj_idx);
+ get_zspage_mapping(get_first_page(page), &class_idx, &fg);
+ class = &pool->size_class[class_idx];
+ off = obj_idx_to_offset(page, obj_idx, class->size);
+
+ area = &get_cpu_var(zs_map_area);
+ if (off + class->size <= PAGE_SIZE) {
+ /* this object is contained entirely within a page */
+ area->vm_addr = kmap_atomic(page);
+ } else {
+ /* this object spans two pages */
+ struct page *nextp;
+
+ nextp = get_next_page(page);
+ BUG_ON(!nextp);
+
+
+ set_pte(area->vm_ptes[0], mk_pte(page, PAGE_KERNEL));
+ set_pte(area->vm_ptes[1], mk_pte(nextp, PAGE_KERNEL));
+
+ /* We pre-allocated VM area so mapping can never fail */
+ area->vm_addr = area->vm->addr;
+ }
+
+ return area->vm_addr + off;
+}
+EXPORT_SYMBOL_GPL(zs_map_object);
+
+void zs_unmap_object(struct zs_pool *pool, void *handle)
+{
+ struct page *page;
+ unsigned long obj_idx, off;
+
+ unsigned int class_idx;
+ enum fullness_group fg;
+ struct size_class *class;
+ struct mapping_area *area;
+
+ BUG_ON(!handle);
+
+ obj_handle_to_location(handle, &page, &obj_idx);
+ get_zspage_mapping(get_first_page(page), &class_idx, &fg);
+ class = &pool->size_class[class_idx];
+ off = obj_idx_to_offset(page, obj_idx, class->size);
+
+ area = &__get_cpu_var(zs_map_area);
+ if (off + class->size <= PAGE_SIZE) {
+ kunmap_atomic(area->vm_addr);
+ } else {
+ set_pte(area->vm_ptes[0], __pte(0));
+ set_pte(area->vm_ptes[1], __pte(0));
+ __flush_tlb_one((unsigned long)area->vm_addr);
+ __flush_tlb_one((unsigned long)area->vm_addr + PAGE_SIZE);
+ }
+ put_cpu_var(zs_map_area);
+}
+EXPORT_SYMBOL_GPL(zs_unmap_object);
+
+u64 zs_get_total_size_bytes(struct zs_pool *pool)
+{
+ int i;
+ u64 npages = 0;
+
+ for (i = 0; i < ZS_SIZE_CLASSES; i++)
+ npages += pool->size_class[i].pages_allocated;
+
+ return npages << PAGE_SHIFT;
+}
+EXPORT_SYMBOL_GPL(zs_get_total_size_bytes);
diff --git a/drivers/staging/zsmalloc/zsmalloc.h b/drivers/staging/zsmalloc/zsmalloc.h
new file mode 100644
index 000000000000..949384ee7491
--- /dev/null
+++ b/drivers/staging/zsmalloc/zsmalloc.h
@@ -0,0 +1,31 @@
+/*
+ * zsmalloc memory allocator
+ *
+ * Copyright (C) 2011 Nitin Gupta
+ *
+ * This code is released using a dual license strategy: BSD/GPL
+ * You can choose the license that better fits your requirements.
+ *
+ * Released under the terms of 3-clause BSD License
+ * Released under the terms of GNU General Public License Version 2.0
+ */
+
+#ifndef _ZS_MALLOC_H_
+#define _ZS_MALLOC_H_
+
+#include <linux/types.h>
+
+struct zs_pool;
+
+struct zs_pool *zs_create_pool(const char *name, gfp_t flags);
+void zs_destroy_pool(struct zs_pool *pool);
+
+void *zs_malloc(struct zs_pool *pool, size_t size);
+void zs_free(struct zs_pool *pool, void *obj);
+
+void *zs_map_object(struct zs_pool *pool, void *handle);
+void zs_unmap_object(struct zs_pool *pool, void *handle);
+
+u64 zs_get_total_size_bytes(struct zs_pool *pool);
+
+#endif
diff --git a/drivers/staging/zsmalloc/zsmalloc_int.h b/drivers/staging/zsmalloc/zsmalloc_int.h
new file mode 100644
index 000000000000..92eefc663afc
--- /dev/null
+++ b/drivers/staging/zsmalloc/zsmalloc_int.h
@@ -0,0 +1,155 @@
+/*
+ * zsmalloc memory allocator
+ *
+ * Copyright (C) 2011 Nitin Gupta
+ *
+ * This code is released using a dual license strategy: BSD/GPL
+ * You can choose the license that better fits your requirements.
+ *
+ * Released under the terms of 3-clause BSD License
+ * Released under the terms of GNU General Public License Version 2.0
+ */
+
+#ifndef _ZS_MALLOC_INT_H_
+#define _ZS_MALLOC_INT_H_
+
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+/*
+ * This must be power of 2 and greater than of equal to sizeof(link_free).
+ * These two conditions ensure that any 'struct link_free' itself doesn't
+ * span more than 1 page which avoids complex case of mapping 2 pages simply
+ * to restore link_free pointer values.
+ */
+#define ZS_ALIGN 8
+
+/*
+ * A single 'zspage' is composed of up to 2^N discontiguous 0-order (single)
+ * pages. ZS_MAX_ZSPAGE_ORDER defines upper limit on N.
+ */
+#define ZS_MAX_ZSPAGE_ORDER 2
+#define ZS_MAX_PAGES_PER_ZSPAGE (_AC(1, UL) << ZS_MAX_ZSPAGE_ORDER)
+
+/*
+ * Object location (<PFN>, <obj_idx>) is encoded as
+ * as single (void *) handle value.
+ *
+ * Note that object index <obj_idx> is relative to system
+ * page <PFN> it is stored in, so for each sub-page belonging
+ * to a zspage, obj_idx starts with 0.
+ *
+ * This is made more complicated by various memory models and PAE.
+ */
+
+#ifndef MAX_PHYSMEM_BITS
+#ifdef CONFIG_HIGHMEM64G
+#define MAX_PHYSMEM_BITS 36
+#else /* !CONFIG_HIGHMEM64G */
+/*
+ * If this definition of MAX_PHYSMEM_BITS is used, OBJ_INDEX_BITS will just
+ * be PAGE_SHIFT
+ */
+#define MAX_PHYSMEM_BITS BITS_PER_LONG
+#endif
+#endif
+#define _PFN_BITS (MAX_PHYSMEM_BITS - PAGE_SHIFT)
+#define OBJ_INDEX_BITS (BITS_PER_LONG - _PFN_BITS)
+#define OBJ_INDEX_MASK ((_AC(1, UL) << OBJ_INDEX_BITS) - 1)
+
+#define MAX(a, b) ((a) >= (b) ? (a) : (b))
+/* ZS_MIN_ALLOC_SIZE must be multiple of ZS_ALIGN */
+#define ZS_MIN_ALLOC_SIZE \
+ MAX(32, (ZS_MAX_PAGES_PER_ZSPAGE << PAGE_SHIFT >> OBJ_INDEX_BITS))
+#define ZS_MAX_ALLOC_SIZE PAGE_SIZE
+
+/*
+ * On systems with 4K page size, this gives 254 size classes! There is a
+ * trader-off here:
+ * - Large number of size classes is potentially wasteful as free page are
+ * spread across these classes
+ * - Small number of size classes causes large internal fragmentation
+ * - Probably its better to use specific size classes (empirically
+ * determined). NOTE: all those class sizes must be set as multiple of
+ * ZS_ALIGN to make sure link_free itself never has to span 2 pages.
+ *
+ * ZS_MIN_ALLOC_SIZE and ZS_SIZE_CLASS_DELTA must be multiple of ZS_ALIGN
+ * (reason above)
+ */
+#define ZS_SIZE_CLASS_DELTA 16
+#define ZS_SIZE_CLASSES ((ZS_MAX_ALLOC_SIZE - ZS_MIN_ALLOC_SIZE) / \
+ ZS_SIZE_CLASS_DELTA + 1)
+
+/*
+ * We do not maintain any list for completely empty or full pages
+ */
+enum fullness_group {
+ ZS_ALMOST_FULL,
+ ZS_ALMOST_EMPTY,
+ _ZS_NR_FULLNESS_GROUPS,
+
+ ZS_EMPTY,
+ ZS_FULL
+};
+
+/*
+ * We assign a page to ZS_ALMOST_EMPTY fullness group when:
+ * n <= N / f, where
+ * n = number of allocated objects
+ * N = total number of objects zspage can store
+ * f = 1/fullness_threshold_frac
+ *
+ * Similarly, we assign zspage to:
+ * ZS_ALMOST_FULL when n > N / f
+ * ZS_EMPTY when n == 0
+ * ZS_FULL when n == N
+ *
+ * (see: fix_fullness_group())
+ */
+static const int fullness_threshold_frac = 4;
+
+struct mapping_area {
+ struct vm_struct *vm;
+ pte_t *vm_ptes[2];
+ char *vm_addr;
+};
+
+struct size_class {
+ /*
+ * Size of objects stored in this class. Must be multiple
+ * of ZS_ALIGN.
+ */
+ int size;
+ unsigned int index;
+
+ /* Number of PAGE_SIZE sized pages to combine to form a 'zspage' */
+ int zspage_order;
+
+ spinlock_t lock;
+
+ /* stats */
+ u64 pages_allocated;
+
+ struct page *fullness_list[_ZS_NR_FULLNESS_GROUPS];
+};
+
+/*
+ * Placed within free objects to form a singly linked list.
+ * For every zspage, first_page->freelist gives head of this list.
+ *
+ * This must be power of 2 and less than or equal to ZS_ALIGN
+ */
+struct link_free {
+ /* Handle of next free chunk (encodes <PFN, obj_idx>) */
+ void *next;
+};
+
+struct zs_pool {
+ struct size_class size_class[ZS_SIZE_CLASSES];
+
+ gfp_t flags; /* allocation flags used when growing pool */
+ const char *name;
+};
+
+#endif
OpenPOWER on IntegriCloud