summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/stable/sysfs-bus-w111
-rw-r--r--Documentation/ABI/stable/sysfs-driver-w1_ds28ea006
-rw-r--r--Documentation/ABI/testing/sysfs-bus-coresight-devices-etm4x450
-rw-r--r--Documentation/ABI/testing/sysfs-bus-mei16
-rw-r--r--Documentation/devicetree/bindings/arm/coresight.txt12
-rw-r--r--Documentation/devicetree/bindings/mfd/arizona.txt6
-rw-r--r--Documentation/mic/mic_overview.txt28
-rwxr-xr-xDocumentation/mic/mpssd/mpss24
-rw-r--r--Documentation/mic/scif_overview.txt98
-rw-r--r--Documentation/w1/slaves/w1_therm11
-rw-r--r--Documentation/w1/w1.generic30
-rw-r--r--MAINTAINERS14
-rw-r--r--arch/um/os-Linux/drivers/ethertap_user.c2
-rw-r--r--arch/x86/Kconfig25
-rw-r--r--drivers/block/paride/paride.c57
-rw-r--r--drivers/block/paride/paride.h2
-rw-r--r--drivers/block/paride/pcd.c9
-rw-r--r--drivers/block/paride/pd.c12
-rw-r--r--drivers/block/paride/pf.c7
-rw-r--r--drivers/block/paride/pg.c8
-rw-r--r--drivers/block/paride/pt.c8
-rw-r--r--drivers/char/Kconfig8
-rw-r--r--drivers/char/Makefile2
-rw-r--r--drivers/char/misc.c23
-rw-r--r--drivers/char/msm_smd_pkt.c465
-rw-r--r--drivers/char/pcmcia/synclink_cs.c2
-rw-r--r--drivers/char/snsc.c4
-rw-r--r--drivers/char/virtio_console.c4
-rw-r--r--drivers/char/xilinx_hwicap/buffer_icap.c6
-rw-r--r--drivers/char/xillybus/Kconfig2
-rw-r--r--drivers/extcon/Kconfig25
-rw-r--r--drivers/extcon/Makefile1
-rw-r--r--drivers/extcon/extcon-adc-jack.c15
-rw-r--r--drivers/extcon/extcon-arizona.c88
-rw-r--r--drivers/extcon/extcon-axp288.c381
-rw-r--r--drivers/extcon/extcon-gpio.c1
-rw-r--r--drivers/extcon/extcon-max14577.c60
-rw-r--r--drivers/extcon/extcon-max77693.c140
-rw-r--r--drivers/extcon/extcon-max77843.c89
-rw-r--r--drivers/extcon/extcon-max8997.c64
-rw-r--r--drivers/extcon/extcon-palmas.c23
-rw-r--r--drivers/extcon/extcon-rt8973a.c55
-rw-r--r--drivers/extcon/extcon-sm5502.c33
-rw-r--r--drivers/extcon/extcon-usb-gpio.c35
-rw-r--r--drivers/extcon/extcon.c316
-rw-r--r--drivers/hv/Makefile2
-rw-r--r--drivers/hv/channel.c27
-rw-r--r--drivers/hv/channel_mgmt.c156
-rw-r--r--drivers/hv/connection.c13
-rw-r--r--drivers/hv/hv_balloon.c4
-rw-r--r--drivers/hv/hv_fcopy.c287
-rw-r--r--drivers/hv/hv_kvp.c192
-rw-r--r--drivers/hv/hv_snapshot.c168
-rw-r--r--drivers/hv/hv_utils_transport.c276
-rw-r--r--drivers/hv/hv_utils_transport.h51
-rw-r--r--drivers/hv/hyperv_vmbus.h31
-rw-r--r--drivers/hv/vmbus_drv.c21
-rw-r--r--drivers/hwmon/Kconfig11
-rw-r--r--drivers/hwmon/Makefile1
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c (renamed from drivers/char/i8k.c)156
-rw-r--r--drivers/hwtracing/coresight/Kconfig19
-rw-r--r--drivers/hwtracing/coresight/Makefile2
-rw-r--r--drivers/hwtracing/coresight/coresight-etb10.c79
-rw-r--r--drivers/hwtracing/coresight/coresight-etm.h4
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x.c112
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.c2702
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.h391
-rw-r--r--drivers/hwtracing/coresight/coresight-funnel.c61
-rw-r--r--drivers/hwtracing/coresight/coresight-replicator-qcom.c215
-rw-r--r--drivers/hwtracing/coresight/coresight-replicator.c71
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.c31
-rw-r--r--drivers/hwtracing/coresight/coresight-tpiu.c60
-rw-r--r--drivers/hwtracing/coresight/of_coresight.c2
-rw-r--r--drivers/i2c/busses/i2c-parport.c38
-rw-r--r--drivers/misc/Kconfig1
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/carma/Kconfig15
-rw-r--r--drivers/misc/carma/Makefile2
-rw-r--r--drivers/misc/carma/carma-fpga-program.c1182
-rw-r--r--drivers/misc/carma/carma-fpga.c1507
-rw-r--r--drivers/misc/mei/amthif.c28
-rw-r--r--drivers/misc/mei/bus.c150
-rw-r--r--drivers/misc/mei/client.c473
-rw-r--r--drivers/misc/mei/client.h114
-rw-r--r--drivers/misc/mei/debugfs.c15
-rw-r--r--drivers/misc/mei/hbm.c16
-rw-r--r--drivers/misc/mei/hw-me.c59
-rw-r--r--drivers/misc/mei/hw-txe.c33
-rw-r--r--drivers/misc/mei/init.c8
-rw-r--r--drivers/misc/mei/interrupt.c95
-rw-r--r--drivers/misc/mei/main.c57
-rw-r--r--drivers/misc/mei/mei_dev.h102
-rw-r--r--drivers/misc/mei/nfc.c223
-rw-r--r--drivers/misc/mei/pci-txe.c2
-rw-r--r--drivers/misc/mei/wd.c22
-rw-r--r--drivers/misc/mic/Kconfig40
-rw-r--r--drivers/misc/mic/Makefile3
-rw-r--r--drivers/misc/mic/bus/Makefile1
-rw-r--r--drivers/misc/mic/bus/scif_bus.c210
-rw-r--r--drivers/misc/mic/bus/scif_bus.h129
-rw-r--r--drivers/misc/mic/card/mic_device.c132
-rw-r--r--drivers/misc/mic/card/mic_device.h11
-rw-r--r--drivers/misc/mic/card/mic_x100.c61
-rw-r--r--drivers/misc/mic/card/mic_x100.h1
-rw-r--r--drivers/misc/mic/common/mic_dev.h3
-rw-r--r--drivers/misc/mic/host/mic_boot.c264
-rw-r--r--drivers/misc/mic/host/mic_debugfs.c13
-rw-r--r--drivers/misc/mic/host/mic_device.h11
-rw-r--r--drivers/misc/mic/host/mic_intr.h3
-rw-r--r--drivers/misc/mic/host/mic_main.c6
-rw-r--r--drivers/misc/mic/host/mic_smpt.c7
-rw-r--r--drivers/misc/mic/host/mic_smpt.h1
-rw-r--r--drivers/misc/mic/host/mic_virtio.c6
-rw-r--r--drivers/misc/mic/host/mic_x100.c3
-rw-r--r--drivers/misc/mic/scif/Makefile15
-rw-r--r--drivers/misc/mic/scif/scif_api.c1276
-rw-r--r--drivers/misc/mic/scif/scif_debugfs.c85
-rw-r--r--drivers/misc/mic/scif/scif_epd.c353
-rw-r--r--drivers/misc/mic/scif/scif_epd.h160
-rw-r--r--drivers/misc/mic/scif/scif_fd.c303
-rw-r--r--drivers/misc/mic/scif/scif_main.c388
-rw-r--r--drivers/misc/mic/scif/scif_main.h254
-rw-r--r--drivers/misc/mic/scif/scif_map.h113
-rw-r--r--drivers/misc/mic/scif/scif_nm.c237
-rw-r--r--drivers/misc/mic/scif/scif_nodeqp.c1312
-rw-r--r--drivers/misc/mic/scif/scif_nodeqp.h183
-rw-r--r--drivers/misc/mic/scif/scif_peer_bus.c124
-rw-r--r--drivers/misc/mic/scif/scif_peer_bus.h65
-rw-r--r--drivers/misc/mic/scif/scif_ports.c124
-rw-r--r--drivers/misc/mic/scif/scif_rb.c249
-rw-r--r--drivers/misc/mic/scif/scif_rb.h100
-rw-r--r--drivers/misc/sram.c137
-rw-r--r--drivers/misc/ti-st/st_kim.c3
-rw-r--r--drivers/nfc/mei_phy.c295
-rw-r--r--drivers/nfc/mei_phy.h38
-rw-r--r--drivers/nfc/microread/mei.c2
-rw-r--r--drivers/nfc/pn544/mei.c2
-rw-r--r--drivers/parport/parport_pc.c4
-rw-r--r--drivers/parport/procfs.c15
-rw-r--r--drivers/parport/share.c373
-rw-r--r--drivers/pcmcia/cs.c10
-rw-r--r--drivers/platform/goldfish/goldfish_pipe.c2
-rw-r--r--drivers/spmi/Kconfig1
-rw-r--r--drivers/staging/panel/panel.c14
-rw-r--r--drivers/uio/Kconfig2
-rw-r--r--drivers/uio/uio.c3
-rw-r--r--drivers/uio/uio_pruss.c1
-rw-r--r--drivers/usb/phy/phy-tahvo.c9
-rw-r--r--drivers/w1/masters/ds2482.c1
-rw-r--r--drivers/w1/slaves/w1_therm.c162
-rw-r--r--drivers/w1/w1.c17
-rw-r--r--include/dt-bindings/mfd/arizona.h4
-rw-r--r--include/linux/extcon.h134
-rw-r--r--include/linux/extcon/extcon-adc-jack.h5
-rw-r--r--include/linux/hyperv.h48
-rw-r--r--include/linux/mei_cl_bus.h38
-rw-r--r--include/linux/mfd/arizona/pdata.h3
-rw-r--r--include/linux/mfd/axp20x.h5
-rw-r--r--include/linux/mod_devicetable.h13
-rw-r--r--include/linux/parport.h43
-rw-r--r--include/linux/scif.h993
-rw-r--r--include/uapi/linux/Kbuild1
-rw-r--r--include/uapi/linux/hyperv.h8
-rw-r--r--include/uapi/linux/mic_common.h12
-rw-r--r--include/uapi/linux/scif_ioctl.h130
-rw-r--r--lib/lz4/lz4_decompress.c12
-rwxr-xr-xscripts/checkkconfigsymbols.py34
-rw-r--r--scripts/mod/devicetable-offsets.c1
-rw-r--r--scripts/mod/file2alias.c21
-rw-r--r--tools/hv/hv_fcopy_daemon.c15
-rw-r--r--tools/hv/hv_kvp_daemon.c166
-rw-r--r--tools/hv/hv_vss_daemon.c149
172 files changed, 15485 insertions, 5653 deletions
diff --git a/Documentation/ABI/stable/sysfs-bus-w1 b/Documentation/ABI/stable/sysfs-bus-w1
new file mode 100644
index 000000000000..140d85b4ae92
--- /dev/null
+++ b/Documentation/ABI/stable/sysfs-bus-w1
@@ -0,0 +1,11 @@
+What: /sys/bus/w1/devices/.../w1_master_timeout_us
+Date: April 2015
+Contact: Dmitry Khromov <dk@icelogic.net>
+Description: Bus scanning interval, microseconds component.
+ Some of 1-Wire devices commonly associated with physical access
+ control systems are attached/generate presence for as short as
+ 100 ms - hence the tens-to-hundreds milliseconds scan intervals
+ are required.
+ see Documentation/w1/w1.generic for detailed information.
+Users: any user space application which wants to know bus scanning
+ interval
diff --git a/Documentation/ABI/stable/sysfs-driver-w1_ds28ea00 b/Documentation/ABI/stable/sysfs-driver-w1_ds28ea00
new file mode 100644
index 000000000000..e928def14f28
--- /dev/null
+++ b/Documentation/ABI/stable/sysfs-driver-w1_ds28ea00
@@ -0,0 +1,6 @@
+What: /sys/bus/w1/devices/.../w1_seq
+Date: Apr 2015
+Contact: Matt Campbell <mattrcampbell@gmail.com>
+Description: Support for the DS28EA00 chain sequence function
+ see Documentation/w1/slaves/w1_therm for detailed information
+Users: any user space application which wants to communicate with DS28EA00
diff --git a/Documentation/ABI/testing/sysfs-bus-coresight-devices-etm4x b/Documentation/ABI/testing/sysfs-bus-coresight-devices-etm4x
new file mode 100644
index 000000000000..2fe2e3dae487
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-coresight-devices-etm4x
@@ -0,0 +1,450 @@
+What: /sys/bus/coresight/devices/<memory_map>.etm/enable_source
+Date: April 2015
+KernelVersion: 4.01
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (RW) Enable/disable tracing on this specific trace entiry.
+ Enabling a source implies the source has been configured
+ properly and a sink has been identidifed for it. The path
+ of coresight components linking the source to the sink is
+ configured and managed automatically by the coresight framework.
+
+What: /sys/bus/coresight/devices/<memory_map>.etm/cpu
+Date: April 2015
+KernelVersion: 4.01
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (R) The CPU this tracing entity is associated with.
+
+What: /sys/bus/coresight/devices/<memory_map>.etm/nr_pe_cmp
+Date: April 2015
+KernelVersion: 4.01
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (R) Indicates the number of PE comparator inputs that are
+ available for tracing.
+
+What: /sys/bus/coresight/devices/<memory_map>.etm/nr_addr_cmp
+Date: April 2015
+KernelVersion: 4.01
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (R) Indicates the number of address comparator pairs that are
+ available for tracing.
+
+What: /sys/bus/coresight/devices/<memory_map>.etm/nr_cntr
+Date: April 2015
+KernelVersion: 4.01
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (R) Indicates the number of counters that are available for
+ tracing.
+
+What: /sys/bus/coresight/devices/<memory_map>.etm/nr_ext_inp
+Date: April 2015
+KernelVersion: 4.01
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (R) Indicates how many external inputs are implemented.
+
+What: /sys/bus/coresight/devices/<memory_map>.etm/numcidc
+Date: April 2015
+KernelVersion: 4.01
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (R) Indicates the number of Context ID comparators that are
+ available for tracing.
+
+What: /sys/bus/coresight/devices/<memory_map>.etm/numvmidc
+Date: April 2015
+KernelVersion: 4.01
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (R) Indicates the number of VMID comparators that are available
+ for tracing.
+
+What: /sys/bus/coresight/devices/<memory_map>.etm/nrseqstate
+Date: April 2015
+KernelVersion: 4.01
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (R) Indicates the number of sequencer states that are
+ implemented.
+
+What: /sys/bus/coresight/devices/<memory_map>.etm/nr_resource
+Date: April 2015
+KernelVersion: 4.01
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (R) Indicates the number of resource selection pairs that are
+ available for tracing.
+
+What: /sys/bus/coresight/devices/<memory_map>.etm/nr_ss_cmp
+Date: April 2015
+KernelVersion: 4.01
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (R) Indicates the number of single-shot comparator controls that
+ are available for tracing.
+
+What: /sys/bus/coresight/devices/<memory_map>.etm/reset
+Date: April 2015
+KernelVersion: 4.01
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (W) Cancels all configuration on a trace unit and set it back
+ to its boot configuration.
+
+What: /sys/bus/coresight/devices/<memory_map>.etm/mode
+Date: April 2015
+KernelVersion: 4.01
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (RW) Controls various modes supported by this ETM, for example
+ P0 instruction tracing, branch broadcast, cycle counting and
+ context ID tracing.
+
+What: /sys/bus/coresight/devices/<memory_map>.etm/pe
+Date: April 2015
+KernelVersion: 4.01
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (RW) Controls which PE to trace.
+
+What: /sys/bus/coresight/devices/<memory_map>.etm/event
+Date: April 2015
+KernelVersion: 4.01
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (RW) Controls the tracing of arbitrary events from bank 0 to 3.
+
+What: /sys/bus/coresight/devices/<memory_map>.etm/event_instren
+Date: April 2015
+KernelVersion: 4.01
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (RW) Controls the behavior of the events in bank 0 to 3.
+
+What: /sys/bus/coresight/devices/<memory_map>.etm/event_ts
+Date: April 2015
+KernelVersion: 4.01
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (RW) Controls the insertion of global timestamps in the trace
+ streams.
+
+What: /sys/bus/coresight/devices/<memory_map>.etm/syncfreq
+Date: April 2015
+KernelVersion: 4.01
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (RW) Controls how often trace synchronization requests occur.
+
+What: /sys/bus/coresight/devices/<memory_map>.etm/cyc_threshold
+Date: April 2015
+KernelVersion: 4.01
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (RW) Sets the threshold value for cycle counting.
+
+What: /sys/bus/coresight/devices/<memory_map>.etm/bb_ctrl
+Date: April 2015
+KernelVersion: 4.01
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (RW) Controls which regions in the memory map are enabled to
+ use branch broadcasting.
+
+What: /sys/bus/coresight/devices/<memory_map>.etm/event_vinst
+Date: April 2015
+KernelVersion: 4.01
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (RW) Controls instruction trace filtering.
+
+What: /sys/bus/coresight/devices/<memory_map>.etm/s_exlevel_vinst
+Date: April 2015
+KernelVersion: 4.01
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (RW) In Secure state, each bit controls whether instruction
+ tracing is enabled for the corresponding exception level.
+
+What: /sys/bus/coresight/devices/<memory_map>.etm/ns_exlevel_vinst
+Date: April 2015
+KernelVersion: 4.01
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (RW) In non-secure state, each bit controls whether instruction
+ tracing is enabled for the corresponding exception level.
+
+What: /sys/bus/coresight/devices/<memory_map>.etm/addr_idx
+Date: April 2015
+KernelVersion: 4.01
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (RW) Select which address comparator or pair (of comparators) to
+ work with.
+
+What: /sys/bus/coresight/devices/<memory_map>.etm/addr_instdatatype
+Date: April 2015
+KernelVersion: 4.01
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (RW) Controls what type of comparison the trace unit performs.
+
+What: /sys/bus/coresight/devices/<memory_map>.etm/addr_single
+Date: April 2015
+KernelVersion: 4.01
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (RW) Used to setup single address comparator values.
+
+What: /sys/bus/coresight/devices/<memory_map>.etm/addr_range
+Date: April 2015
+KernelVersion: 4.01
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (RW) Used to setup address range comparator values.
+
+What: /sys/bus/coresight/devices/<memory_map>.etm/seq_idx
+Date: April 2015
+KernelVersion: 4.01
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (RW) Select which sequensor.
+
+What: /sys/bus/coresight/devices/<memory_map>.etm/seq_state
+Date: April 2015
+KernelVersion: 4.01
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (RW) Use this to set, or read, the sequencer state.
+
+What: /sys/bus/coresight/devices/<memory_map>.etm/seq_event
+Date: April 2015
+KernelVersion: 4.01
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (RW) Moves the sequencer state to a specific state.
+
+What: /sys/bus/coresight/devices/<memory_map>.etm/seq_reset_event
+Date: April 2015
+KernelVersion: 4.01
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (RW) Moves the sequencer to state 0 when a programmed event
+ occurs.
+
+What: /sys/bus/coresight/devices/<memory_map>.etm/cntr_idx
+Date: April 2015
+KernelVersion: 4.01
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (RW) Select which counter unit to work with.
+
+What: /sys/bus/coresight/devices/<memory_map>.etm/cntrldvr
+Date: April 2015
+KernelVersion: 4.01
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (RW) This sets or returns the reload count value of the
+ specific counter.
+
+What: /sys/bus/coresight/devices/<memory_map>.etm/cntr_val
+Date: April 2015
+KernelVersion: 4.01
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (RW) This sets or returns the current count value of the
+ specific counter.
+
+What: /sys/bus/coresight/devices/<memory_map>.etm/cntr_ctrl
+Date: April 2015
+KernelVersion: 4.01
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (RW) Controls the operation of the selected counter.
+
+What: /sys/bus/coresight/devices/<memory_map>.etm/res_idx
+Date: April 2015
+KernelVersion: 4.01
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (RW) Select which resource selection unit to work with.
+
+What: /sys/bus/coresight/devices/<memory_map>.etm/res_ctrl
+Date: April 2015
+KernelVersion: 4.01
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (RW) Controls the selection of the resources in the trace unit.
+
+What: /sys/bus/coresight/devices/<memory_map>.etm/ctxid_idx
+Date: April 2015
+KernelVersion: 4.01
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (RW) Select which context ID comparator to work with.
+
+What: /sys/bus/coresight/devices/<memory_map>.etm/ctxid_val
+Date: April 2015
+KernelVersion: 4.01
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (RW) Get/Set the context ID comparator value to trigger on.
+
+What: /sys/bus/coresight/devices/<memory_map>.etm/ctxid_masks
+Date: April 2015
+KernelVersion: 4.01
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (RW) Mask for all 8 context ID comparator value
+ registers (if implemented).
+
+What: /sys/bus/coresight/devices/<memory_map>.etm/vmid_idx
+Date: April 2015
+KernelVersion: 4.01
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (RW) Select which virtual machine ID comparator to work with.
+
+What: /sys/bus/coresight/devices/<memory_map>.etm/vmid_val
+Date: April 2015
+KernelVersion: 4.01
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (RW) Get/Set the virtual machine ID comparator value to
+ trigger on.
+
+What: /sys/bus/coresight/devices/<memory_map>.etm/vmid_masks
+Date: April 2015
+KernelVersion: 4.01
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (RW) Mask for all 8 virtual machine ID comparator value
+ registers (if implemented).
+
+What: /sys/bus/coresight/devices/<memory_map>.etm/mgmt/trcoslsr
+Date: April 2015
+KernelVersion: 4.01
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (R) Print the content of the OS Lock Status Register (0x304).
+ The value it taken directly from the HW.
+
+What: /sys/bus/coresight/devices/<memory_map>.etm/mgmt/trcpdcr
+Date: April 2015
+KernelVersion: 4.01
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (R) Print the content of the Power Down Control Register
+ (0x310). The value is taken directly from the HW.
+
+What: /sys/bus/coresight/devices/<memory_map>.etm/mgmt/trcpdsr
+Date: April 2015
+KernelVersion: 4.01
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (R) Print the content of the Power Down Status Register
+ (0x314). The value is taken directly from the HW.
+
+What: /sys/bus/coresight/devices/<memory_map>.etm/mgmt/trclsr
+Date: April 2015
+KernelVersion: 4.01
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (R) Print the content of the SW Lock Status Register
+ (0xFB4). The value is taken directly from the HW.
+
+What: /sys/bus/coresight/devices/<memory_map>.etm/mgmt/trcauthstatus
+Date: April 2015
+KernelVersion: 4.01
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (R) Print the content of the Authentication Status Register
+ (0xFB8). The value is taken directly from the HW.
+
+What: /sys/bus/coresight/devices/<memory_map>.etm/mgmt/trcdevid
+Date: April 2015
+KernelVersion: 4.01
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (R) Print the content of the Device ID Register
+ (0xFC8). The value is taken directly from the HW.
+
+What: /sys/bus/coresight/devices/<memory_map>.etm/mgmt/trcdevtype
+Date: April 2015
+KernelVersion: 4.01
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (R) Print the content of the Device Type Register
+ (0xFCC). The value is taken directly from the HW.
+
+What: /sys/bus/coresight/devices/<memory_map>.etm/mgmt/trcpidr0
+Date: April 2015
+KernelVersion: 4.01
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (R) Print the content of the Peripheral ID0 Register
+ (0xFE0). The value is taken directly from the HW.
+
+What: /sys/bus/coresight/devices/<memory_map>.etm/mgmt/trcpidr1
+Date: April 2015
+KernelVersion: 4.01
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (R) Print the content of the Peripheral ID1 Register
+ (0xFE4). The value is taken directly from the HW.
+
+What: /sys/bus/coresight/devices/<memory_map>.etm/mgmt/trcpidr2
+Date: April 2015
+KernelVersion: 4.01
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (R) Print the content of the Peripheral ID2 Register
+ (0xFE8). The value is taken directly from the HW.
+
+What: /sys/bus/coresight/devices/<memory_map>.etm/mgmt/trcpidr3
+Date: April 2015
+KernelVersion: 4.01
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (R) Print the content of the Peripheral ID3 Register
+ (0xFEC). The value is taken directly from the HW.
+
+What: /sys/bus/coresight/devices/<memory_map>.etm/trcidr/trcidr0
+Date: April 2015
+KernelVersion: 4.01
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (R) Returns the tracing capabilities of the trace unit (0x1E0).
+ The value is taken directly from the HW.
+
+What: /sys/bus/coresight/devices/<memory_map>.etm/trcidr/trcidr1
+Date: April 2015
+KernelVersion: 4.01
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (R) Returns the tracing capabilities of the trace unit (0x1E4).
+ The value is taken directly from the HW.
+
+What: /sys/bus/coresight/devices/<memory_map>.etm/trcidr/trcidr2
+Date: April 2015
+KernelVersion: 4.01
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (R) Returns the maximum size of the data value, data address,
+ VMID, context ID and instuction address in the trace unit
+ (0x1E8). The value is taken directly from the HW.
+
+What: /sys/bus/coresight/devices/<memory_map>.etm/trcidr/trcidr3
+Date: April 2015
+KernelVersion: 4.01
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (R) Returns the value associated with various resources
+ available to the trace unit. See the Trace Macrocell
+ architecture specification for more details (0x1E8).
+ The value is taken directly from the HW.
+
+What: /sys/bus/coresight/devices/<memory_map>.etm/trcidr/trcidr4
+Date: April 2015
+KernelVersion: 4.01
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (R) Returns how many resources the trace unit supports (0x1F0).
+ The value is taken directly from the HW.
+
+What: /sys/bus/coresight/devices/<memory_map>.etm/trcidr/trcidr5
+Date: April 2015
+KernelVersion: 4.01
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (R) Returns how many resources the trace unit supports (0x1F4).
+ The value is taken directly from the HW.
+
+What: /sys/bus/coresight/devices/<memory_map>.etm/trcidr/trcidr8
+Date: April 2015
+KernelVersion: 4.01
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (R) Returns the maximum speculation depth of the instruction
+ trace stream. (0x180). The value is taken directly from the HW.
+
+What: /sys/bus/coresight/devices/<memory_map>.etm/trcidr/trcidr9
+Date: April 2015
+KernelVersion: 4.01
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (R) Returns the number of P0 right-hand keys that the trace unit
+ can use (0x184). The value is taken directly from the HW.
+
+What: /sys/bus/coresight/devices/<memory_map>.etm/trcidr/trcidr10
+Date: April 2015
+KernelVersion: 4.01
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (R) Returns the number of P1 right-hand keys that the trace unit
+ can use (0x188). The value is taken directly from the HW.
+
+What: /sys/bus/coresight/devices/<memory_map>.etm/trcidr/trcidr11
+Date: April 2015
+KernelVersion: 4.01
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (R) Returns the number of special P1 right-hand keys that the
+ trace unit can use (0x18C). The value is taken directly from
+ the HW.
+
+What: /sys/bus/coresight/devices/<memory_map>.etm/trcidr/trcidr12
+Date: April 2015
+KernelVersion: 4.01
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (R) Returns the number of conditional P1 right-hand keys that
+ the trace unit can use (0x190). The value is taken directly
+ from the HW.
+
+What: /sys/bus/coresight/devices/<memory_map>.etm/trcidr/trcidr13
+Date: April 2015
+KernelVersion: 4.01
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (R) Returns the number of special conditional P1 right-hand keys
+ that the trace unit can use (0x194). The value is taken
+ directly from the HW.
diff --git a/Documentation/ABI/testing/sysfs-bus-mei b/Documentation/ABI/testing/sysfs-bus-mei
index 2066f0bbd453..20e4d1638bac 100644
--- a/Documentation/ABI/testing/sysfs-bus-mei
+++ b/Documentation/ABI/testing/sysfs-bus-mei
@@ -4,4 +4,18 @@ KernelVersion: 3.10
Contact: Samuel Ortiz <sameo@linux.intel.com>
linux-mei@linux.intel.com
Description: Stores the same MODALIAS value emitted by uevent
- Format: mei:<mei device name>
+ Format: mei:<mei device name>:<device uuid>:
+
+What: /sys/bus/mei/devices/.../name
+Date: May 2015
+KernelVersion: 4.2
+Contact: Tomas Winkler <tomas.winkler@intel.com>
+Description: Stores mei client device name
+ Format: string
+
+What: /sys/bus/mei/devices/.../uuid
+Date: May 2015
+KernelVersion: 4.2
+Contact: Tomas Winkler <tomas.winkler@intel.com>
+Description: Stores mei client device uuid
+ Format: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
diff --git a/Documentation/devicetree/bindings/arm/coresight.txt b/Documentation/devicetree/bindings/arm/coresight.txt
index 88602b75418e..65a6db2271a2 100644
--- a/Documentation/devicetree/bindings/arm/coresight.txt
+++ b/Documentation/devicetree/bindings/arm/coresight.txt
@@ -17,15 +17,19 @@ its hardware characteristcs.
- "arm,coresight-tmc", "arm,primecell";
- "arm,coresight-funnel", "arm,primecell";
- "arm,coresight-etm3x", "arm,primecell";
+ - "qcom,coresight-replicator1x", "arm,primecell";
* reg: physical base address and length of the register
set(s) of the component.
- * clocks: the clock associated to this component.
+ * clocks: the clocks associated to this component.
- * clock-names: the name of the clock as referenced by the code.
- Since we are using the AMBA framework, the name should be
- "apb_pclk".
+ * clock-names: the name of the clocks referenced by the code.
+ Since we are using the AMBA framework, the name of the clock
+ providing the interconnect should be "apb_pclk", and some
+ coresight blocks also have an additional clock "atclk", which
+ clocks the core of that coresight component. The latter clock
+ is optional.
* port or ports: The representation of the component's port
layout using the generic DT graph presentation found in
diff --git a/Documentation/devicetree/bindings/mfd/arizona.txt b/Documentation/devicetree/bindings/mfd/arizona.txt
index 64fa3b2de6cd..a8fee60dc20d 100644
--- a/Documentation/devicetree/bindings/mfd/arizona.txt
+++ b/Documentation/devicetree/bindings/mfd/arizona.txt
@@ -67,6 +67,12 @@ Optional properties:
present, the number of values should be less than or equal to the
number of inputs, unspecified inputs will use the chip default.
+ - wlf,hpdet-channel : Headphone detection channel.
+ ARIZONA_ACCDET_MODE_HPL or 1 - Headphone detect mode is set to HPDETL
+ ARIZONA_ACCDET_MODE_HPR or 2 - Headphone detect mode is set to HPDETR
+ If this node is not mentioned or if the value is unknown, then
+ headphone detection mode is set to HPDETL.
+
- DCVDD-supply, MICVDD-supply : Power supplies, only need to be specified if
they are being externally supplied. As covered in
Documentation/devicetree/bindings/regulator/regulator.txt
diff --git a/Documentation/mic/mic_overview.txt b/Documentation/mic/mic_overview.txt
index 77c541802ad9..1a2f2c8ec59e 100644
--- a/Documentation/mic/mic_overview.txt
+++ b/Documentation/mic/mic_overview.txt
@@ -24,6 +24,10 @@ a virtual bus called mic bus is created and virtual dma devices are
created on it by the host/card drivers. On host the channels are private
and used only by the host driver to transfer data for the virtio devices.
+The Symmetric Communication Interface (SCIF (pronounced as skiff)) is a
+low level communications API across PCIe currently implemented for MIC.
+More details are available at scif_overview.txt.
+
Here is a block diagram of the various components described above. The
virtio backends are situated on the host rather than the card given better
single threaded performance for the host compared to MIC, the ability of
@@ -47,18 +51,18 @@ the fact that the virtio block storage backend can only be on the host.
| | | Virtio over PCIe IOCTLs |
| | +--------------------------+
+-----------+ | | | +-----------+
-| MIC DMA | | | | | MIC DMA |
-| Driver | | | | | Driver |
-+-----------+ | | | +-----------+
- | | | | |
-+---------------+ | | | +----------------+
-|MIC virtual Bus| | | | |MIC virtual Bus |
-+---------------+ | | | +----------------+
- | | | | |
- | +--------------+ | +---------------+ |
- | |Intel MIC | | |Intel MIC | |
- +---|Card Driver | | |Host Driver | |
- +--------------+ | +---------------+-----+
+| MIC DMA | | +----------+ | +-----------+ | | MIC DMA |
+| Driver | | | SCIF | | | SCIF | | | Driver |
++-----------+ | +----------+ | +-----------+ | +-----------+
+ | | | | | | |
++---------------+ | +-----+-----+ | +-----+-----+ | +---------------+
+|MIC virtual Bus| | |SCIF HW Bus| | |SCIF HW BUS| | |MIC virtual Bus|
++---------------+ | +-----------+ | +-----+-----+ | +---------------+
+ | | | | | | |
+ | +--------------+ | | | +---------------+ |
+ | |Intel MIC | | | | |Intel MIC | |
+ +---|Card Driver +----+ | | |Host Driver | |
+ +--------------+ | +----+---------------+-----+
| | |
+-------------------------------------------------------------+
| |
diff --git a/Documentation/mic/mpssd/mpss b/Documentation/mic/mpssd/mpss
index cacbdb0aefb9..582aad4811ae 100755
--- a/Documentation/mic/mpssd/mpss
+++ b/Documentation/mic/mpssd/mpss
@@ -35,6 +35,7 @@
exec=/usr/sbin/mpssd
sysfs="/sys/class/mic"
+mic_modules="mic_host mic_x100_dma scif"
start()
{
@@ -48,18 +49,15 @@ start()
fi
echo -e $"Starting MPSS Stack"
- echo -e $"Loading MIC_X100_DMA & MIC_HOST Modules"
+ echo -e $"Loading MIC drivers:" $mic_modules
- for f in "mic_host" "mic_x100_dma"
- do
- modprobe $f
- RETVAL=$?
- if [ $RETVAL -ne 0 ]; then
- failure
- echo
- return $RETVAL
- fi
- done
+ modprobe -a $mic_modules
+ RETVAL=$?
+ if [ $RETVAL -ne 0 ]; then
+ failure
+ echo
+ return $RETVAL
+ fi
# Start the daemon
echo -n $"Starting MPSSD "
@@ -170,8 +168,8 @@ unload()
stop
sleep 5
- echo -n $"Removing MIC_HOST & MIC_X100_DMA Modules: "
- modprobe -r mic_host mic_x100_dma
+ echo -n $"Removing MIC drivers:" $mic_modules
+ modprobe -r $mic_modules
RETVAL=$?
[ $RETVAL -ne 0 ] && failure || success
echo
diff --git a/Documentation/mic/scif_overview.txt b/Documentation/mic/scif_overview.txt
new file mode 100644
index 000000000000..0a280d986731
--- /dev/null
+++ b/Documentation/mic/scif_overview.txt
@@ -0,0 +1,98 @@
+The Symmetric Communication Interface (SCIF (pronounced as skiff)) is a low
+level communications API across PCIe currently implemented for MIC. Currently
+SCIF provides inter-node communication within a single host platform, where a
+node is a MIC Coprocessor or Xeon based host. SCIF abstracts the details of
+communicating over the PCIe bus while providing an API that is symmetric
+across all the nodes in the PCIe network. An important design objective for SCIF
+is to deliver the maximum possible performance given the communication
+abilities of the hardware. SCIF has been used to implement an offload compiler
+runtime and OFED support for MPI implementations for MIC coprocessors.
+
+==== SCIF API Components ====
+The SCIF API has the following parts:
+1. Connection establishment using a client server model
+2. Byte stream messaging intended for short messages
+3. Node enumeration to determine online nodes
+4. Poll semantics for detection of incoming connections and messages
+5. Memory registration to pin down pages
+6. Remote memory mapping for low latency CPU accesses via mmap
+7. Remote DMA (RDMA) for high bandwidth DMA transfers
+8. Fence APIs for RDMA synchronization
+
+SCIF exposes the notion of a connection which can be used by peer processes on
+nodes in a SCIF PCIe "network" to share memory "windows" and to communicate. A
+process in a SCIF node initiates a SCIF connection to a peer process on a
+different node via a SCIF "endpoint". SCIF endpoints support messaging APIs
+which are similar to connection oriented socket APIs. Connected SCIF endpoints
+can also register local memory which is followed by data transfer using either
+DMA, CPU copies or remote memory mapping via mmap. SCIF supports both user and
+kernel mode clients which are functionally equivalent.
+
+==== SCIF Performance for MIC ====
+DMA bandwidth comparison between the TCP (over ethernet over PCIe) stack versus
+SCIF shows the performance advantages of SCIF for HPC applications and runtimes.
+
+ Comparison of TCP and SCIF based BW
+
+ Throughput (GB/sec)
+ 8 + PCIe Bandwidth ******
+ + TCP ######
+ 7 + ************************************** SCIF %%%%%%
+ | %%%%%%%%%%%%%%%%%%%
+ 6 + %%%%
+ | %%
+ | %%%
+ 5 + %%
+ | %%
+ 4 + %%
+ | %%
+ 3 + %%
+ | %
+ 2 + %%
+ | %%
+ | %
+ 1 +
+ + ######################################
+ 0 +++---+++--+--+-+--+--+-++-+--+-++-+--+-++-+-
+ 1 10 100 1000 10000 100000
+ Transfer Size (KBytes)
+
+SCIF allows memory sharing via mmap(..) between processes on different PCIe
+nodes and thus provides bare-metal PCIe latency. The round trip SCIF mmap
+latency from the host to an x100 MIC for an 8 byte message is 0.44 usecs.
+
+SCIF has a user space library which is a thin IOCTL wrapper providing a user
+space API similar to the kernel API in scif.h. The SCIF user space library
+is distributed @ https://software.intel.com/en-us/mic-developer
+
+Here is some pseudo code for an example of how two applications on two PCIe
+nodes would typically use the SCIF API:
+
+Process A (on node A) Process B (on node B)
+
+/* get online node information */
+scif_get_node_ids(..) scif_get_node_ids(..)
+scif_open(..) scif_open(..)
+scif_bind(..) scif_bind(..)
+scif_listen(..)
+scif_accept(..) scif_connect(..)
+/* SCIF connection established */
+
+/* Send and receive short messages */
+scif_send(..)/scif_recv(..) scif_send(..)/scif_recv(..)
+
+/* Register memory */
+scif_register(..) scif_register(..)
+
+/* RDMA */
+scif_readfrom(..)/scif_writeto(..) scif_readfrom(..)/scif_writeto(..)
+
+/* Fence DMAs */
+scif_fence_signal(..) scif_fence_signal(..)
+
+mmap(..) mmap(..)
+
+/* Access remote registered memory */
+
+/* Close the endpoints */
+scif_close(..) scif_close(..)
diff --git a/Documentation/w1/slaves/w1_therm b/Documentation/w1/slaves/w1_therm
index cc62a95e4776..13411fe52f7f 100644
--- a/Documentation/w1/slaves/w1_therm
+++ b/Documentation/w1/slaves/w1_therm
@@ -11,12 +11,14 @@ Author: Evgeniy Polyakov <johnpol@2ka.mipt.ru>
Description
-----------
-w1_therm provides basic temperature conversion for ds18*20 devices.
+w1_therm provides basic temperature conversion for ds18*20 devices, and the
+ds28ea00 device.
supported family codes:
W1_THERM_DS18S20 0x10
W1_THERM_DS1822 0x22
W1_THERM_DS18B20 0x28
W1_THERM_DS1825 0x3B
+W1_THERM_DS28EA00 0x42
Support is provided through the sysfs w1_slave file. Each open and
read sequence will initiate a temperature conversion then provide two
@@ -48,3 +50,10 @@ resistor). The DS18b20 temperature sensor specification lists a
maximum current draw of 1.5mA and that a 5k pullup resistor is not
sufficient. The strong pullup is designed to provide the additional
current required.
+
+The DS28EA00 provides an additional two pins for implementing a sequence
+detection algorithm. This feature allows you to determine the physical
+location of the chip in the 1-wire bus without needing pre-existing
+knowledge of the bus ordering. Support is provided through the sysfs
+w1_seq file. The file will contain a single line with an integer value
+representing the device index in the bus starting at 0.
diff --git a/Documentation/w1/w1.generic b/Documentation/w1/w1.generic
index b2033c64c7da..b3ffaf8cfab2 100644
--- a/Documentation/w1/w1.generic
+++ b/Documentation/w1/w1.generic
@@ -76,21 +76,24 @@ See struct w1_bus_master definition in w1.h for details.
w1 master sysfs interface
------------------------------------------------------------------
-<xx-xxxxxxxxxxxxx> - a directory for a found device. The format is family-serial
+<xx-xxxxxxxxxxxxx> - A directory for a found device. The format is family-serial
bus - (standard) symlink to the w1 bus
driver - (standard) symlink to the w1 driver
-w1_master_add - Manually register a slave device
-w1_master_attempts - the number of times a search was attempted
+w1_master_add - (rw) manually register a slave device
+w1_master_attempts - (ro) the number of times a search was attempted
w1_master_max_slave_count
- - maximum number of slaves to search for at a time
-w1_master_name - the name of the device (w1_bus_masterX)
-w1_master_pullup - 5V strong pullup 0 enabled, 1 disabled
-w1_master_remove - Manually remove a slave device
-w1_master_search - the number of searches left to do, -1=continual (default)
+ - (rw) maximum number of slaves to search for at a time
+w1_master_name - (ro) the name of the device (w1_bus_masterX)
+w1_master_pullup - (rw) 5V strong pullup 0 enabled, 1 disabled
+w1_master_remove - (rw) manually remove a slave device
+w1_master_search - (rw) the number of searches left to do,
+ -1=continual (default)
w1_master_slave_count
- - the number of slaves found
-w1_master_slaves - the names of the slaves, one per line
-w1_master_timeout - the delay in seconds between searches
+ - (ro) the number of slaves found
+w1_master_slaves - (ro) the names of the slaves, one per line
+w1_master_timeout - (ro) the delay in seconds between searches
+w1_master_timeout_us
+ - (ro) the delay in microseconds beetwen searches
If you have a w1 bus that never changes (you don't add or remove devices),
you can set the module parameter search_count to a small positive number
@@ -101,6 +104,11 @@ generally only make sense when searching is disabled, as a search will
redetect manually removed devices that are present and timeout manually
added devices that aren't on the bus.
+Bus searches occur at an interval, specified as a summ of timeout and
+timeout_us module parameters (either of which may be 0) for as long as
+w1_master_search remains greater than 0 or is -1. Each search attempt
+decrements w1_master_search by 1 (down to 0) and increments
+w1_master_attempts by 1.
w1 slave sysfs interface
------------------------------------------------------------------
diff --git a/MAINTAINERS b/MAINTAINERS
index cae04966af4b..161747bdecf3 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -732,7 +732,7 @@ ANDROID DRIVERS
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
M: Arve Hjønnevåg <arve@android.com>
M: Riley Andrews <riandrews@android.com>
-T: git git://git.kernel.org/pub/scm/linux/kernel/gregkh/staging.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging.git
L: devel@driverdev.osuosl.org
S: Supported
F: drivers/android/
@@ -3207,9 +3207,9 @@ S: Maintained
F: drivers/platform/x86/dell-smo8800.c
DELL LAPTOP SMM DRIVER
-M: Guenter Roeck <linux@roeck-us.net>
+M: Pali Rohár <pali.rohar@gmail.com>
S: Maintained
-F: drivers/char/i8k.c
+F: drivers/hwmon/dell-smm-hwmon.c
F: include/uapi/linux/i8k.h
DELL SYSTEMS MANAGEMENT BASE DRIVER (dcdbas)
@@ -5444,6 +5444,7 @@ M: Tomas Winkler <tomas.winkler@intel.com>
L: linux-kernel@vger.kernel.org
S: Supported
F: include/uapi/linux/mei.h
+F: include/linux/mei_cl_bus.h
F: drivers/misc/mei/*
F: Documentation/misc-devices/mei/*
@@ -7572,13 +7573,16 @@ S: Maintained
F: Documentation/mn10300/
F: arch/mn10300/
-PARALLEL PORT SUPPORT
+PARALLEL PORT SUBSYSTEM
+M: Sudip Mukherjee <sudipm.mukherjee@gmail.com>
+M: Sudip Mukherjee <sudip@vectorindia.org>
L: linux-parport@lists.infradead.org (subscribers-only)
-S: Orphan
+S: Maintained
F: drivers/parport/
F: include/linux/parport*.h
F: drivers/char/ppdev.c
F: include/uapi/linux/ppdev.h
+F: Documentation/parport*.txt
PARAVIRT_OPS INTERFACE
M: Jeremy Fitzhardinge <jeremy@goop.org>
diff --git a/arch/um/os-Linux/drivers/ethertap_user.c b/arch/um/os-Linux/drivers/ethertap_user.c
index b39b6696ac58..6d4918246ffe 100644
--- a/arch/um/os-Linux/drivers/ethertap_user.c
+++ b/arch/um/os-Linux/drivers/ethertap_user.c
@@ -105,7 +105,7 @@ static int etap_tramp(char *dev, char *gate, int control_me,
sprintf(data_fd_buf, "%d", data_remote);
sprintf(version_buf, "%d", UML_NET_VERSION);
if (gate != NULL) {
- strcpy(gate_buf, gate);
+ strncpy(gate_buf, gate, 15);
args = setup_args;
}
else args = nosetup_args;
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 8e0b76ad8350..4fcf0ade7e91 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1055,24 +1055,19 @@ config TOSHIBA
Say N otherwise.
config I8K
- tristate "Dell laptop support"
+ tristate "Dell i8k legacy laptop support"
select HWMON
+ select SENSORS_DELL_SMM
---help---
- This adds a driver to safely access the System Management Mode
- of the CPU on the Dell Inspiron 8000. The System Management Mode
- is used to read cpu temperature and cooling fan status and to
- control the fans on the I8K portables.
+ This option enables legacy /proc/i8k userspace interface in hwmon
+ dell-smm-hwmon driver. Character file /proc/i8k reports bios version,
+ temperature and allows controlling fan speeds of Dell laptops via
+ System Management Mode. For old Dell laptops (like Dell Inspiron 8000)
+ it reports also power and hotkey status. For fan speed control is
+ needed userspace package i8kutils.
- This driver has been tested only on the Inspiron 8000 but it may
- also work with other Dell laptops. You can force loading on other
- models by passing the parameter `force=1' to the module. Use at
- your own risk.
-
- For information on utilities to make use of this driver see the
- I8K Linux utilities web site at:
- <http://people.debian.org/~dz/i8k/>
-
- Say Y if you intend to run this kernel on a Dell Inspiron 8000.
+ Say Y if you intend to run this kernel on old Dell laptops or want to
+ use userspace package i8kutils.
Say N otherwise.
config X86_REBOOTFIXUPS
diff --git a/drivers/block/paride/paride.c b/drivers/block/paride/paride.c
index 48c50f11f63b..0e287993b778 100644
--- a/drivers/block/paride/paride.c
+++ b/drivers/block/paride/paride.c
@@ -30,6 +30,7 @@
#include <linux/wait.h>
#include <linux/sched.h> /* TASK_* */
#include <linux/parport.h>
+#include <linux/slab.h>
#include "paride.h"
@@ -244,17 +245,19 @@ void paride_unregister(PIP * pr)
EXPORT_SYMBOL(paride_unregister);
-static int pi_register_parport(PIA * pi, int verbose)
+static int pi_register_parport(PIA *pi, int verbose, int unit)
{
struct parport *port;
+ struct pardev_cb par_cb;
port = parport_find_base(pi->port);
if (!port)
return 0;
-
- pi->pardev = parport_register_device(port,
- pi->device, NULL,
- pi_wake_up, NULL, 0, (void *) pi);
+ memset(&par_cb, 0, sizeof(par_cb));
+ par_cb.wakeup = pi_wake_up;
+ par_cb.private = (void *)pi;
+ pi->pardev = parport_register_dev_model(port, pi->device, &par_cb,
+ unit);
parport_put_port(port);
if (!pi->pardev)
return 0;
@@ -311,7 +314,7 @@ static int pi_probe_unit(PIA * pi, int unit, char *scratch, int verbose)
e = pi->proto->max_units;
}
- if (!pi_register_parport(pi, verbose))
+ if (!pi_register_parport(pi, verbose, s))
return 0;
if (pi->proto->test_port) {
@@ -432,3 +435,45 @@ int pi_init(PIA * pi, int autoprobe, int port, int mode,
}
EXPORT_SYMBOL(pi_init);
+
+static int pi_probe(struct pardevice *par_dev)
+{
+ struct device_driver *drv = par_dev->dev.driver;
+ int len = strlen(drv->name);
+
+ if (strncmp(par_dev->name, drv->name, len))
+ return -ENODEV;
+
+ return 0;
+}
+
+void *pi_register_driver(char *name)
+{
+ struct parport_driver *parp_drv;
+ int ret;
+
+ parp_drv = kzalloc(sizeof(*parp_drv), GFP_KERNEL);
+ if (!parp_drv)
+ return NULL;
+
+ parp_drv->name = name;
+ parp_drv->probe = pi_probe;
+ parp_drv->devmodel = true;
+
+ ret = parport_register_driver(parp_drv);
+ if (ret) {
+ kfree(parp_drv);
+ return NULL;
+ }
+ return (void *)parp_drv;
+}
+EXPORT_SYMBOL(pi_register_driver);
+
+void pi_unregister_driver(void *_drv)
+{
+ struct parport_driver *drv = _drv;
+
+ parport_unregister_driver(drv);
+ kfree(drv);
+}
+EXPORT_SYMBOL(pi_unregister_driver);
diff --git a/drivers/block/paride/paride.h b/drivers/block/paride/paride.h
index 2bddbf45518b..ddb9e589da7f 100644
--- a/drivers/block/paride/paride.h
+++ b/drivers/block/paride/paride.h
@@ -165,6 +165,8 @@ typedef struct pi_protocol PIP;
extern int paride_register( PIP * );
extern void paride_unregister ( PIP * );
+void *pi_register_driver(char *);
+void pi_unregister_driver(void *);
#endif /* __DRIVERS_PARIDE_H__ */
/* end of paride.h */
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index 3b7c9f1be484..93362362aa55 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -221,6 +221,7 @@ static int pcd_busy; /* request being processed ? */
static int pcd_sector; /* address of next requested sector */
static int pcd_count; /* number of blocks still to do */
static char *pcd_buf; /* buffer for request in progress */
+static void *par_drv; /* reference of parport driver */
/* kernel glue structures */
@@ -690,6 +691,12 @@ static int pcd_detect(void)
printk("%s: %s version %s, major %d, nice %d\n",
name, name, PCD_VERSION, major, nice);
+ par_drv = pi_register_driver(name);
+ if (!par_drv) {
+ pr_err("failed to register %s driver\n", name);
+ return -1;
+ }
+
k = 0;
if (pcd_drive_count == 0) { /* nothing spec'd - so autoprobe for 1 */
cd = pcd;
@@ -723,6 +730,7 @@ static int pcd_detect(void)
printk("%s: No CD-ROM drive found\n", name);
for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++)
put_disk(cd->disk);
+ pi_unregister_driver(par_drv);
return -1;
}
@@ -984,6 +992,7 @@ static void __exit pcd_exit(void)
}
blk_cleanup_queue(pcd_queue);
unregister_blkdev(major, name);
+ pi_unregister_driver(par_drv);
}
MODULE_LICENSE("GPL");
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index dbb4da1cdca8..b9242d78283d 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -247,6 +247,8 @@ static char *pd_errs[17] = { "ERR", "INDEX", "ECC", "DRQ", "SEEK", "WRERR",
"IDNF", "MC", "UNC", "???", "TMO"
};
+static void *par_drv; /* reference of parport driver */
+
static inline int status_reg(struct pd_unit *disk)
{
return pi_read_regr(disk->pi, 1, 6);
@@ -872,6 +874,12 @@ static int pd_detect(void)
pd_drive_count++;
}
+ par_drv = pi_register_driver(name);
+ if (!par_drv) {
+ pr_err("failed to register %s driver\n", name);
+ return -1;
+ }
+
if (pd_drive_count == 0) { /* nothing spec'd - so autoprobe for 1 */
disk = pd;
if (pi_init(disk->pi, 1, -1, -1, -1, -1, -1, pd_scratch,
@@ -902,8 +910,10 @@ static int pd_detect(void)
found = 1;
}
}
- if (!found)
+ if (!found) {
printk("%s: no valid drive found\n", name);
+ pi_unregister_driver(par_drv);
+ }
return found;
}
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index 9a15fd3c9349..7a7d977a76c5 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -264,6 +264,7 @@ static int pf_cmd; /* current command READ/WRITE */
static struct pf_unit *pf_current;/* unit of current request */
static int pf_mask; /* stopper for pseudo-int */
static char *pf_buf; /* buffer for request in progress */
+static void *par_drv; /* reference of parport driver */
/* kernel glue structures */
@@ -703,6 +704,11 @@ static int pf_detect(void)
printk("%s: %s version %s, major %d, cluster %d, nice %d\n",
name, name, PF_VERSION, major, cluster, nice);
+ par_drv = pi_register_driver(name);
+ if (!par_drv) {
+ pr_err("failed to register %s driver\n", name);
+ return -1;
+ }
k = 0;
if (pf_drive_count == 0) {
if (pi_init(pf->pi, 1, -1, -1, -1, -1, -1, pf_scratch, PI_PF,
@@ -735,6 +741,7 @@ static int pf_detect(void)
printk("%s: No ATAPI disk detected\n", name);
for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++)
put_disk(pf->disk);
+ pi_unregister_driver(par_drv);
return -1;
}
diff --git a/drivers/block/paride/pg.c b/drivers/block/paride/pg.c
index 876d0c3eaf58..bfbd4c852dd9 100644
--- a/drivers/block/paride/pg.c
+++ b/drivers/block/paride/pg.c
@@ -227,6 +227,7 @@ static int pg_identify(struct pg *dev, int log);
static char pg_scratch[512]; /* scratch block buffer */
static struct class *pg_class;
+static void *par_drv; /* reference of parport driver */
/* kernel glue structures */
@@ -481,6 +482,12 @@ static int pg_detect(void)
printk("%s: %s version %s, major %d\n", name, name, PG_VERSION, major);
+ par_drv = pi_register_driver(name);
+ if (!par_drv) {
+ pr_err("failed to register %s driver\n", name);
+ return -1;
+ }
+
k = 0;
if (pg_drive_count == 0) {
if (pi_init(dev->pi, 1, -1, -1, -1, -1, -1, pg_scratch,
@@ -511,6 +518,7 @@ static int pg_detect(void)
if (k)
return 0;
+ pi_unregister_driver(par_drv);
printk("%s: No ATAPI device detected\n", name);
return -1;
}
diff --git a/drivers/block/paride/pt.c b/drivers/block/paride/pt.c
index 2596042eb987..1740d75e8a32 100644
--- a/drivers/block/paride/pt.c
+++ b/drivers/block/paride/pt.c
@@ -232,6 +232,7 @@ static int pt_identify(struct pt_unit *tape);
static struct pt_unit pt[PT_UNITS];
static char pt_scratch[512]; /* scratch block buffer */
+static void *par_drv; /* reference of parport driver */
/* kernel glue structures */
@@ -605,6 +606,12 @@ static int pt_detect(void)
printk("%s: %s version %s, major %d\n", name, name, PT_VERSION, major);
+ par_drv = pi_register_driver(name);
+ if (!par_drv) {
+ pr_err("failed to register %s driver\n", name);
+ return -1;
+ }
+
specified = 0;
for (unit = 0; unit < PT_UNITS; unit++) {
struct pt_unit *tape = &pt[unit];
@@ -644,6 +651,7 @@ static int pt_detect(void)
if (found)
return 0;
+ pi_unregister_driver(par_drv);
printk("%s: No ATAPI tape drive detected\n", name);
return -1;
}
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index a4af8221751e..a043107da2af 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -590,14 +590,6 @@ config DEVPORT
source "drivers/s390/char/Kconfig"
-config MSM_SMD_PKT
- bool "Enable device interface for some SMD packet ports"
- default n
- depends on MSM_SMD
- help
- Enables userspace clients to read and write to some packet SMD
- ports via device interface for MSM chipset.
-
config TILE_SROM
bool "Character-device access via hypervisor to the Tilera SPI ROM"
depends on TILE
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index d06cde26031b..d8a7579300d2 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -9,7 +9,6 @@ obj-$(CONFIG_ATARI_DSP56K) += dsp56k.o
obj-$(CONFIG_VIRTIO_CONSOLE) += virtio_console.o
obj-$(CONFIG_RAW_DRIVER) += raw.o
obj-$(CONFIG_SGI_SNSC) += snsc.o snsc_event.o
-obj-$(CONFIG_MSM_SMD_PKT) += msm_smd_pkt.o
obj-$(CONFIG_MSPEC) += mspec.o
obj-$(CONFIG_MMTIMER) += mmtimer.o
obj-$(CONFIG_UV_MMTIMER) += uv_mmtimer.o
@@ -36,7 +35,6 @@ else
obj-$(CONFIG_NVRAM) += nvram.o
endif
obj-$(CONFIG_TOSHIBA) += toshiba.o
-obj-$(CONFIG_I8K) += i8k.o
obj-$(CONFIG_DS1620) += ds1620.o
obj-$(CONFIG_HW_RANDOM) += hw_random/
obj-$(CONFIG_PPDEV) += ppdev.o
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index 9fd5a91e0d81..fdb0f9b3fe45 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -117,14 +117,14 @@ static int misc_open(struct inode * inode, struct file * file)
const struct file_operations *new_fops = NULL;
mutex_lock(&misc_mtx);
-
+
list_for_each_entry(c, &misc_list, list) {
if (c->minor == minor) {
- new_fops = fops_get(c->fops);
+ new_fops = fops_get(c->fops);
break;
}
}
-
+
if (!new_fops) {
mutex_unlock(&misc_mtx);
request_module("char-major-%d-%d", MISC_MAJOR, minor);
@@ -167,7 +167,7 @@ static const struct file_operations misc_fops = {
/**
* misc_register - register a miscellaneous device
* @misc: device structure
- *
+ *
* Register a miscellaneous device with the kernel. If the minor
* number is set to %MISC_DYNAMIC_MINOR a minor number is assigned
* and placed in the minor field of the structure. For other cases
@@ -181,17 +181,18 @@ static const struct file_operations misc_fops = {
* A zero is returned on success and a negative errno code for
* failure.
*/
-
+
int misc_register(struct miscdevice * misc)
{
dev_t dev;
int err = 0;
+ bool is_dynamic = (misc->minor == MISC_DYNAMIC_MINOR);
INIT_LIST_HEAD(&misc->list);
mutex_lock(&misc_mtx);
- if (misc->minor == MISC_DYNAMIC_MINOR) {
+ if (is_dynamic) {
int i = find_first_zero_bit(misc_minors, DYNAMIC_MINORS);
if (i >= DYNAMIC_MINORS) {
err = -EBUSY;
@@ -216,9 +217,13 @@ int misc_register(struct miscdevice * misc)
device_create_with_groups(misc_class, misc->parent, dev,
misc, misc->groups, "%s", misc->name);
if (IS_ERR(misc->this_device)) {
- int i = DYNAMIC_MINORS - misc->minor - 1;
- if (i < DYNAMIC_MINORS && i >= 0)
- clear_bit(i, misc_minors);
+ if (is_dynamic) {
+ int i = DYNAMIC_MINORS - misc->minor - 1;
+
+ if (i < DYNAMIC_MINORS && i >= 0)
+ clear_bit(i, misc_minors);
+ misc->minor = MISC_DYNAMIC_MINOR;
+ }
err = PTR_ERR(misc->this_device);
goto out;
}
diff --git a/drivers/char/msm_smd_pkt.c b/drivers/char/msm_smd_pkt.c
deleted file mode 100644
index ba82a06d9684..000000000000
--- a/drivers/char/msm_smd_pkt.c
+++ /dev/null
@@ -1,465 +0,0 @@
-/* Copyright (c) 2008-2010, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- */
-/*
- * SMD Packet Driver -- Provides userspace interface to SMD packet ports.
- */
-
-#include <linux/slab.h>
-#include <linux/cdev.h>
-#include <linux/module.h>
-#include <linux/fs.h>
-#include <linux/device.h>
-#include <linux/sched.h>
-#include <linux/mutex.h>
-#include <linux/delay.h>
-#include <linux/uaccess.h>
-#include <linux/workqueue.h>
-#include <linux/poll.h>
-
-#include <mach/msm_smd.h>
-
-#define NUM_SMD_PKT_PORTS 9
-#define DEVICE_NAME "smdpkt"
-#define MAX_BUF_SIZE 2048
-
-struct smd_pkt_dev {
- struct cdev cdev;
- struct device *devicep;
-
- struct smd_channel *ch;
- int open_count;
- struct mutex ch_lock;
- struct mutex rx_lock;
- struct mutex tx_lock;
- wait_queue_head_t ch_read_wait_queue;
- wait_queue_head_t ch_opened_wait_queue;
-
- int i;
-
- unsigned char tx_buf[MAX_BUF_SIZE];
- unsigned char rx_buf[MAX_BUF_SIZE];
- int remote_open;
-
-} *smd_pkt_devp[NUM_SMD_PKT_PORTS];
-
-struct class *smd_pkt_classp;
-static dev_t smd_pkt_number;
-
-static int msm_smd_pkt_debug_enable;
-module_param_named(debug_enable, msm_smd_pkt_debug_enable,
- int, S_IRUGO | S_IWUSR | S_IWGRP);
-
-#ifdef DEBUG
-#define D_DUMP_BUFFER(prestr, cnt, buf) do { \
- int i; \
- if (msm_smd_pkt_debug_enable) { \
- pr_debug("%s", prestr); \
- for (i = 0; i < cnt; i++) \
- pr_debug("%.2x", buf[i]); \
- pr_debug("\n"); \
- } \
- } while (0)
-#else
-#define D_DUMP_BUFFER(prestr, cnt, buf) do {} while (0)
-#endif
-
-#ifdef DEBUG
-#define DBG(x...) do { \
- if (msm_smd_pkt_debug_enable) \
- pr_debug(x); \
- } while (0)
-#else
-#define DBG(x...) do {} while (0)
-#endif
-
-static void check_and_wakeup_reader(struct smd_pkt_dev *smd_pkt_devp)
-{
- int sz;
-
- if (!smd_pkt_devp || !smd_pkt_devp->ch)
- return;
-
- sz = smd_cur_packet_size(smd_pkt_devp->ch);
- if (sz == 0) {
- DBG("no packet\n");
- return;
- }
- if (sz > smd_read_avail(smd_pkt_devp->ch)) {
- DBG("incomplete packet\n");
- return;
- }
-
- DBG("waking up reader\n");
- wake_up_interruptible(&smd_pkt_devp->ch_read_wait_queue);
-}
-
-static int smd_pkt_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- int r, bytes_read;
- struct smd_pkt_dev *smd_pkt_devp;
- struct smd_channel *chl;
-
- DBG("read %d bytes\n", count);
- if (count > MAX_BUF_SIZE)
- return -EINVAL;
-
- smd_pkt_devp = file->private_data;
- if (!smd_pkt_devp || !smd_pkt_devp->ch)
- return -EINVAL;
-
- chl = smd_pkt_devp->ch;
-wait_for_packet:
- r = wait_event_interruptible(smd_pkt_devp->ch_read_wait_queue,
- (smd_cur_packet_size(chl) > 0 &&
- smd_read_avail(chl) >=
- smd_cur_packet_size(chl)));
-
- if (r < 0) {
- if (r != -ERESTARTSYS)
- pr_err("wait returned %d\n", r);
- return r;
- }
-
- mutex_lock(&smd_pkt_devp->rx_lock);
-
- bytes_read = smd_cur_packet_size(smd_pkt_devp->ch);
- if (bytes_read == 0 ||
- bytes_read < smd_read_avail(smd_pkt_devp->ch)) {
- mutex_unlock(&smd_pkt_devp->rx_lock);
- DBG("Nothing to read\n");
- goto wait_for_packet;
- }
-
- if (bytes_read > count) {
- mutex_unlock(&smd_pkt_devp->rx_lock);
- pr_info("packet size %d > buffer size %d", bytes_read, count);
- return -EINVAL;
- }
-
- r = smd_read(smd_pkt_devp->ch, smd_pkt_devp->rx_buf, bytes_read);
- if (r != bytes_read) {
- mutex_unlock(&smd_pkt_devp->rx_lock);
- pr_err("smd_read failed to read %d bytes: %d\n", bytes_read, r);
- return -EIO;
- }
-
- D_DUMP_BUFFER("read: ", bytes_read, smd_pkt_devp->rx_buf);
- r = copy_to_user(buf, smd_pkt_devp->rx_buf, bytes_read);
- mutex_unlock(&smd_pkt_devp->rx_lock);
- if (r) {
- pr_err("copy_to_user failed %d\n", r);
- return -EFAULT;
- }
-
- DBG("read complete %d bytes\n", bytes_read);
- check_and_wakeup_reader(smd_pkt_devp);
-
- return bytes_read;
-}
-
-static int smd_pkt_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- int r;
- struct smd_pkt_dev *smd_pkt_devp;
-
- if (count > MAX_BUF_SIZE)
- return -EINVAL;
-
- DBG("writing %d bytes\n", count);
-
- smd_pkt_devp = file->private_data;
- if (!smd_pkt_devp || !smd_pkt_devp->ch)
- return -EINVAL;
-
- mutex_lock(&smd_pkt_devp->tx_lock);
- if (smd_write_avail(smd_pkt_devp->ch) < count) {
- mutex_unlock(&smd_pkt_devp->tx_lock);
- DBG("Not enough space to write\n");
- return -ENOMEM;
- }
-
- D_DUMP_BUFFER("write: ", count, buf);
- r = copy_from_user(smd_pkt_devp->tx_buf, buf, count);
- if (r) {
- mutex_unlock(&smd_pkt_devp->tx_lock);
- pr_err("copy_from_user failed %d\n", r);
- return -EFAULT;
- }
-
- r = smd_write(smd_pkt_devp->ch, smd_pkt_devp->tx_buf, count);
- if (r != count) {
- mutex_unlock(&smd_pkt_devp->tx_lock);
- pr_err("smd_write failed to write %d bytes: %d.\n", count, r);
- return -EIO;
- }
- mutex_unlock(&smd_pkt_devp->tx_lock);
-
- DBG("wrote %d bytes\n", count);
- return count;
-}
-
-static unsigned int smd_pkt_poll(struct file *file, poll_table *wait)
-{
- struct smd_pkt_dev *smd_pkt_devp;
- unsigned int mask = 0;
-
- smd_pkt_devp = file->private_data;
- if (!smd_pkt_devp)
- return POLLERR;
-
- DBG("poll waiting\n");
- poll_wait(file, &smd_pkt_devp->ch_read_wait_queue, wait);
- if (smd_read_avail(smd_pkt_devp->ch))
- mask |= POLLIN | POLLRDNORM;
-
- DBG("poll return\n");
- return mask;
-}
-
-static void smd_pkt_ch_notify(void *priv, unsigned event)
-{
- struct smd_pkt_dev *smd_pkt_devp = priv;
-
- if (smd_pkt_devp->ch == 0)
- return;
-
- switch (event) {
- case SMD_EVENT_DATA:
- DBG("data\n");
- check_and_wakeup_reader(smd_pkt_devp);
- break;
-
- case SMD_EVENT_OPEN:
- DBG("remote open\n");
- smd_pkt_devp->remote_open = 1;
- wake_up_interruptible(&smd_pkt_devp->ch_opened_wait_queue);
- break;
-
- case SMD_EVENT_CLOSE:
- smd_pkt_devp->remote_open = 0;
- pr_info("remote closed\n");
- break;
-
- default:
- pr_err("unknown event %d\n", event);
- break;
- }
-}
-
-static char *smd_pkt_dev_name[] = {
- "smdcntl0",
- "smdcntl1",
- "smdcntl2",
- "smdcntl3",
- "smdcntl4",
- "smdcntl5",
- "smdcntl6",
- "smdcntl7",
- "smd22",
-};
-
-static char *smd_ch_name[] = {
- "DATA5_CNTL",
- "DATA6_CNTL",
- "DATA7_CNTL",
- "DATA8_CNTL",
- "DATA9_CNTL",
- "DATA12_CNTL",
- "DATA13_CNTL",
- "DATA14_CNTL",
- "DATA22",
-};
-
-static int smd_pkt_open(struct inode *inode, struct file *file)
-{
- int r = 0;
- struct smd_pkt_dev *smd_pkt_devp;
-
- smd_pkt_devp = container_of(inode->i_cdev, struct smd_pkt_dev, cdev);
- if (!smd_pkt_devp)
- return -EINVAL;
-
- file->private_data = smd_pkt_devp;
-
- mutex_lock(&smd_pkt_devp->ch_lock);
- if (smd_pkt_devp->open_count == 0) {
- r = smd_open(smd_ch_name[smd_pkt_devp->i],
- &smd_pkt_devp->ch, smd_pkt_devp,
- smd_pkt_ch_notify);
- if (r < 0) {
- pr_err("smd_open failed for %s, %d\n",
- smd_ch_name[smd_pkt_devp->i], r);
- goto out;
- }
-
- r = wait_event_interruptible_timeout(
- smd_pkt_devp->ch_opened_wait_queue,
- smd_pkt_devp->remote_open,
- msecs_to_jiffies(2 * HZ));
- if (r == 0)
- r = -ETIMEDOUT;
-
- if (r < 0) {
- pr_err("wait returned %d\n", r);
- smd_close(smd_pkt_devp->ch);
- smd_pkt_devp->ch = 0;
- } else {
- smd_pkt_devp->open_count++;
- r = 0;
- }
- }
-out:
- mutex_unlock(&smd_pkt_devp->ch_lock);
- return r;
-}
-
-static int smd_pkt_release(struct inode *inode, struct file *file)
-{
- int r = 0;
- struct smd_pkt_dev *smd_pkt_devp = file->private_data;
-
- if (!smd_pkt_devp)
- return -EINVAL;
-
- mutex_lock(&smd_pkt_devp->ch_lock);
- if (--smd_pkt_devp->open_count == 0) {
- r = smd_close(smd_pkt_devp->ch);
- smd_pkt_devp->ch = 0;
- }
- mutex_unlock(&smd_pkt_devp->ch_lock);
-
- return r;
-}
-
-static const struct file_operations smd_pkt_fops = {
- .owner = THIS_MODULE,
- .open = smd_pkt_open,
- .release = smd_pkt_release,
- .read = smd_pkt_read,
- .write = smd_pkt_write,
- .poll = smd_pkt_poll,
-};
-
-static int __init smd_pkt_init(void)
-{
- int i;
- int r;
-
- r = alloc_chrdev_region(&smd_pkt_number, 0,
- NUM_SMD_PKT_PORTS, DEVICE_NAME);
- if (r) {
- pr_err("alloc_chrdev_region() failed %d\n", r);
- return r;
- }
-
- smd_pkt_classp = class_create(THIS_MODULE, DEVICE_NAME);
- if (IS_ERR(smd_pkt_classp)) {
- r = PTR_ERR(smd_pkt_classp);
- pr_err("class_create() failed %d\n", r);
- goto unreg_chardev;
- }
-
- for (i = 0; i < NUM_SMD_PKT_PORTS; ++i) {
- smd_pkt_devp[i] = kzalloc(sizeof(struct smd_pkt_dev),
- GFP_KERNEL);
- if (!smd_pkt_devp[i]) {
- pr_err("kmalloc() failed\n");
- goto clean_cdevs;
- }
-
- smd_pkt_devp[i]->i = i;
-
- init_waitqueue_head(&smd_pkt_devp[i]->ch_read_wait_queue);
- smd_pkt_devp[i]->remote_open = 0;
- init_waitqueue_head(&smd_pkt_devp[i]->ch_opened_wait_queue);
-
- mutex_init(&smd_pkt_devp[i]->ch_lock);
- mutex_init(&smd_pkt_devp[i]->rx_lock);
- mutex_init(&smd_pkt_devp[i]->tx_lock);
-
- cdev_init(&smd_pkt_devp[i]->cdev, &smd_pkt_fops);
- smd_pkt_devp[i]->cdev.owner = THIS_MODULE;
-
- r = cdev_add(&smd_pkt_devp[i]->cdev,
- (smd_pkt_number + i), 1);
- if (r) {
- pr_err("cdev_add() failed %d\n", r);
- kfree(smd_pkt_devp[i]);
- goto clean_cdevs;
- }
-
- smd_pkt_devp[i]->devicep =
- device_create(smd_pkt_classp, NULL,
- (smd_pkt_number + i), NULL,
- smd_pkt_dev_name[i]);
- if (IS_ERR(smd_pkt_devp[i]->devicep)) {
- r = PTR_ERR(smd_pkt_devp[i]->devicep);
- pr_err("device_create() failed %d\n", r);
- cdev_del(&smd_pkt_devp[i]->cdev);
- kfree(smd_pkt_devp[i]);
- goto clean_cdevs;
- }
-
- }
-
- pr_info("SMD Packet Port Driver Initialized.\n");
- return 0;
-
-clean_cdevs:
- if (i > 0) {
- while (--i >= 0) {
- mutex_destroy(&smd_pkt_devp[i]->ch_lock);
- mutex_destroy(&smd_pkt_devp[i]->rx_lock);
- mutex_destroy(&smd_pkt_devp[i]->tx_lock);
- cdev_del(&smd_pkt_devp[i]->cdev);
- kfree(smd_pkt_devp[i]);
- device_destroy(smd_pkt_classp,
- MKDEV(MAJOR(smd_pkt_number), i));
- }
- }
-
- class_destroy(smd_pkt_classp);
-unreg_chardev:
- unregister_chrdev_region(MAJOR(smd_pkt_number), NUM_SMD_PKT_PORTS);
- return r;
-}
-module_init(smd_pkt_init);
-
-static void __exit smd_pkt_cleanup(void)
-{
- int i;
-
- for (i = 0; i < NUM_SMD_PKT_PORTS; ++i) {
- mutex_destroy(&smd_pkt_devp[i]->ch_lock);
- mutex_destroy(&smd_pkt_devp[i]->rx_lock);
- mutex_destroy(&smd_pkt_devp[i]->tx_lock);
- cdev_del(&smd_pkt_devp[i]->cdev);
- kfree(smd_pkt_devp[i]);
- device_destroy(smd_pkt_classp,
- MKDEV(MAJOR(smd_pkt_number), i));
- }
-
- class_destroy(smd_pkt_classp);
- unregister_chrdev_region(MAJOR(smd_pkt_number), NUM_SMD_PKT_PORTS);
-}
-module_exit(smd_pkt_cleanup);
-
-MODULE_DESCRIPTION("MSM Shared Memory Packet Port");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index 0ea9986059af..7680d5213ff8 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -437,7 +437,7 @@ static int mgslpc_device_count = 0;
* .text section address and breakpoint on module load.
* This is useful for use with gdb and add-symbol-file command.
*/
-static bool break_on_load=0;
+static bool break_on_load;
/*
* Driver major number, defaults to zero to get auto
diff --git a/drivers/char/snsc.c b/drivers/char/snsc.c
index 8bab59292a0d..8a80ead8d316 100644
--- a/drivers/char/snsc.c
+++ b/drivers/char/snsc.c
@@ -198,7 +198,7 @@ scdrv_read(struct file *file, char __user *buf, size_t count, loff_t *f_pos)
add_wait_queue(&sd->sd_rq, &wait);
spin_unlock_irqrestore(&sd->sd_rlock, flags);
- schedule_timeout(SCDRV_TIMEOUT);
+ schedule_timeout(msecs_to_jiffies(SCDRV_TIMEOUT));
remove_wait_queue(&sd->sd_rq, &wait);
if (signal_pending(current)) {
@@ -294,7 +294,7 @@ scdrv_write(struct file *file, const char __user *buf,
add_wait_queue(&sd->sd_wq, &wait);
spin_unlock_irqrestore(&sd->sd_wlock, flags);
- schedule_timeout(SCDRV_TIMEOUT);
+ schedule_timeout(msecs_to_jiffies(SCDRV_TIMEOUT));
remove_wait_queue(&sd->sd_wq, &wait);
if (signal_pending(current)) {
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 50754d203310..d2406fe25533 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1492,8 +1492,8 @@ static int add_port(struct ports_device *portdev, u32 id)
* Finally, create the debugfs file that we can use to
* inspect a port's state at any time
*/
- sprintf(debugfs_name, "vport%up%u",
- port->portdev->vdev->index, id);
+ snprintf(debugfs_name, sizeof(debugfs_name), "vport%up%u",
+ port->portdev->vdev->index, id);
port->debugfs_file = debugfs_create_file(debugfs_name, 0444,
pdrvdata.debugfs_dir,
port,
diff --git a/drivers/char/xilinx_hwicap/buffer_icap.c b/drivers/char/xilinx_hwicap/buffer_icap.c
index 05d897764f02..53c3882e4981 100644
--- a/drivers/char/xilinx_hwicap/buffer_icap.c
+++ b/drivers/char/xilinx_hwicap/buffer_icap.c
@@ -270,7 +270,7 @@ int buffer_icap_set_configuration(struct hwicap_drvdata *drvdata, u32 *data,
int status;
s32 buffer_count = 0;
s32 num_writes = 0;
- bool dirty = 0;
+ bool dirty = false;
u32 i;
void __iomem *base_address = drvdata->base_address;
@@ -279,7 +279,7 @@ int buffer_icap_set_configuration(struct hwicap_drvdata *drvdata, u32 *data,
/* Copy data to bram */
buffer_icap_set_bram(base_address, buffer_count, data[i]);
- dirty = 1;
+ dirty = true;
if (buffer_count < XHI_MAX_BUFFER_INTS - 1) {
buffer_count++;
@@ -299,7 +299,7 @@ int buffer_icap_set_configuration(struct hwicap_drvdata *drvdata, u32 *data,
buffer_count = 0;
num_writes++;
- dirty = 0;
+ dirty = false;
}
/* Write unwritten data to ICAP */
diff --git a/drivers/char/xillybus/Kconfig b/drivers/char/xillybus/Kconfig
index b53bdf12da0d..b302684d86c1 100644
--- a/drivers/char/xillybus/Kconfig
+++ b/drivers/char/xillybus/Kconfig
@@ -24,7 +24,7 @@ config XILLYBUS_PCIE
config XILLYBUS_OF
tristate "Xillybus over Device Tree"
- depends on OF_ADDRESS && OF_IRQ
+ depends on OF_ADDRESS && OF_IRQ && HAS_DMA
help
Set to M if you want Xillybus to find its resources from the
Open Firmware Flattened Device Tree. If the target is an embedded
diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig
index fdc0bf0543ce..0cebbf668886 100644
--- a/drivers/extcon/Kconfig
+++ b/drivers/extcon/Kconfig
@@ -28,15 +28,22 @@ config EXTCON_ARIZONA
with Wolfson Arizona devices. These are audio CODECs with
advanced audio accessory detection support.
+config EXTCON_AXP288
+ tristate "X-Power AXP288 EXTCON support"
+ depends on MFD_AXP20X && USB_PHY
+ help
+ Say Y here to enable support for USB peripheral detection
+ and USB MUX switching by X-Power AXP288 PMIC.
+
config EXTCON_GPIO
tristate "GPIO extcon support"
- depends on GPIOLIB
+ depends on GPIOLIB || COMPILE_TEST
help
Say Y here to enable GPIO based extcon support. Note that GPIO
extcon supports single state per extcon instance.
config EXTCON_MAX14577
- tristate "MAX14577/77836 EXTCON Support"
+ tristate "Maxim MAX14577/77836 EXTCON Support"
depends on MFD_MAX14577
select IRQ_DOMAIN
select REGMAP_I2C
@@ -46,7 +53,7 @@ config EXTCON_MAX14577
detector and switch.
config EXTCON_MAX77693
- tristate "MAX77693 EXTCON Support"
+ tristate "Maxim MAX77693 EXTCON Support"
depends on MFD_MAX77693 && INPUT
select IRQ_DOMAIN
select REGMAP_I2C
@@ -56,7 +63,7 @@ config EXTCON_MAX77693
detector and switch.
config EXTCON_MAX77843
- tristate "MAX77843 EXTCON Support"
+ tristate "Maxim MAX77843 EXTCON Support"
depends on MFD_MAX77843
select IRQ_DOMAIN
select REGMAP_I2C
@@ -66,7 +73,7 @@ config EXTCON_MAX77843
detector add switch.
config EXTCON_MAX8997
- tristate "MAX8997 EXTCON Support"
+ tristate "Maxim MAX8997 EXTCON Support"
depends on MFD_MAX8997 && IRQ_DOMAIN
help
If you say yes here you get support for the MUIC device of
@@ -81,7 +88,7 @@ config EXTCON_PALMAS
detection by palmas usb.
config EXTCON_RT8973A
- tristate "RT8973A EXTCON support"
+ tristate "Richtek RT8973A EXTCON support"
depends on I2C
select IRQ_DOMAIN
select REGMAP_I2C
@@ -93,7 +100,7 @@ config EXTCON_RT8973A
from abnormal high input voltage (up to 28V).
config EXTCON_SM5502
- tristate "SM5502 EXTCON support"
+ tristate "Silicon Mitus SM5502 EXTCON support"
depends on I2C
select IRQ_DOMAIN
select REGMAP_I2C
@@ -105,9 +112,9 @@ config EXTCON_SM5502
config EXTCON_USB_GPIO
tristate "USB GPIO extcon support"
- depends on GPIOLIB
+ depends on GPIOLIB || COMPILE_TEST
help
Say Y here to enable GPIO based USB cable detection extcon support.
Used typically if GPIO is used for USB ID pin detection.
-endif # MULTISTATE_SWITCH
+endif
diff --git a/drivers/extcon/Makefile b/drivers/extcon/Makefile
index 9204114791a3..ba787d04295b 100644
--- a/drivers/extcon/Makefile
+++ b/drivers/extcon/Makefile
@@ -5,6 +5,7 @@
obj-$(CONFIG_EXTCON) += extcon.o
obj-$(CONFIG_EXTCON_ADC_JACK) += extcon-adc-jack.o
obj-$(CONFIG_EXTCON_ARIZONA) += extcon-arizona.o
+obj-$(CONFIG_EXTCON_AXP288) += extcon-axp288.o
obj-$(CONFIG_EXTCON_GPIO) += extcon-gpio.o
obj-$(CONFIG_EXTCON_MAX14577) += extcon-max14577.o
obj-$(CONFIG_EXTCON_MAX77693) += extcon-max77693.o
diff --git a/drivers/extcon/extcon-adc-jack.c b/drivers/extcon/extcon-adc-jack.c
index 2bb82e55065a..7fc0ae1912f8 100644
--- a/drivers/extcon/extcon-adc-jack.c
+++ b/drivers/extcon/extcon-adc-jack.c
@@ -29,7 +29,6 @@
* struct adc_jack_data - internal data for adc_jack device driver
* @edev: extcon device.
* @cable_names: list of supported cables.
- * @num_cables: size of cable_names.
* @adc_conditions: list of adc value conditions.
* @num_conditions: size of adc_conditions.
* @irq: irq number of attach/detach event (0 if not exist).
@@ -41,8 +40,7 @@
struct adc_jack_data {
struct extcon_dev *edev;
- const char **cable_names;
- int num_cables;
+ const unsigned int **cable_names;
struct adc_jack_cond *adc_conditions;
int num_conditions;
@@ -112,17 +110,6 @@ static int adc_jack_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "failed to allocate extcon device\n");
return -ENOMEM;
}
- data->edev->name = pdata->name;
-
- /* Check the length of array and set num_cables */
- for (i = 0; data->edev->supported_cable[i]; i++)
- ;
- if (i == 0 || i > SUPPORTED_CABLE_MAX) {
- dev_err(&pdev->dev, "error: pdata->cable_names size = %d\n",
- i - 1);
- return -EINVAL;
- }
- data->num_cables = i;
if (!pdata->adc_conditions ||
!pdata->adc_conditions[0].state) {
diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c
index a0ed35b336e4..ad87f263056f 100644
--- a/drivers/extcon/extcon-arizona.c
+++ b/drivers/extcon/extcon-arizona.c
@@ -32,13 +32,10 @@
#include <linux/mfd/arizona/core.h>
#include <linux/mfd/arizona/pdata.h>
#include <linux/mfd/arizona/registers.h>
+#include <dt-bindings/mfd/arizona.h>
#define ARIZONA_MAX_MICD_RANGE 8
-#define ARIZONA_ACCDET_MODE_MIC 0
-#define ARIZONA_ACCDET_MODE_HPL 1
-#define ARIZONA_ACCDET_MODE_HPR 2
-
#define ARIZONA_MICD_CLAMP_MODE_JDL 0x4
#define ARIZONA_MICD_CLAMP_MODE_JDH 0x5
#define ARIZONA_MICD_CLAMP_MODE_JDL_GP5H 0x9
@@ -94,7 +91,7 @@ struct arizona_extcon_info {
bool detecting;
int jack_flips;
- int hpdet_ip;
+ int hpdet_ip_version;
struct extcon_dev *edev;
};
@@ -121,17 +118,12 @@ static const int arizona_micd_levels[] = {
1257,
};
-#define ARIZONA_CABLE_MECHANICAL 0
-#define ARIZONA_CABLE_MICROPHONE 1
-#define ARIZONA_CABLE_HEADPHONE 2
-#define ARIZONA_CABLE_LINEOUT 3
-
-static const char *arizona_cable[] = {
- "Mechanical",
- "Microphone",
- "Headphone",
- "Line-out",
- NULL,
+static const unsigned int arizona_cable[] = {
+ EXTCON_MECHANICAL,
+ EXTCON_MICROPHONE,
+ EXTCON_HEADPHONE,
+ EXTCON_LINE_OUT,
+ EXTCON_NONE,
};
static void arizona_start_hpdet_acc_id(struct arizona_extcon_info *info);
@@ -145,6 +137,7 @@ static void arizona_extcon_hp_clamp(struct arizona_extcon_info *info,
switch (arizona->type) {
case WM5110:
+ case WM8280:
mask = ARIZONA_HP1L_SHRTO | ARIZONA_HP1L_FLWR |
ARIZONA_HP1L_SHRTI;
if (clamp)
@@ -380,7 +373,7 @@ static int arizona_hpdet_read(struct arizona_extcon_info *info)
return ret;
}
- switch (info->hpdet_ip) {
+ switch (info->hpdet_ip_version) {
case 0:
if (!(val & ARIZONA_HP_DONE)) {
dev_err(arizona->dev, "HPDET did not complete: %x\n",
@@ -441,7 +434,7 @@ static int arizona_hpdet_read(struct arizona_extcon_info *info)
default:
dev_warn(arizona->dev, "Unknown HPDET IP revision %d\n",
- info->hpdet_ip);
+ info->hpdet_ip_version);
case 2:
if (!(val & ARIZONA_HP_DONE_B)) {
dev_err(arizona->dev, "HPDET did not complete: %x\n",
@@ -559,7 +552,7 @@ static irqreturn_t arizona_hpdet_irq(int irq, void *data)
struct arizona_extcon_info *info = data;
struct arizona *arizona = info->arizona;
int id_gpio = arizona->pdata.hpdet_id_gpio;
- int report = ARIZONA_CABLE_HEADPHONE;
+ unsigned int report = EXTCON_HEADPHONE;
int ret, reading;
bool mic = false;
@@ -573,7 +566,7 @@ static irqreturn_t arizona_hpdet_irq(int irq, void *data)
}
/* If the cable was removed while measuring ignore the result */
- ret = extcon_get_cable_state_(info->edev, ARIZONA_CABLE_MECHANICAL);
+ ret = extcon_get_cable_state_(info->edev, EXTCON_MECHANICAL);
if (ret < 0) {
dev_err(arizona->dev, "Failed to check cable state: %d\n",
ret);
@@ -604,9 +597,9 @@ static irqreturn_t arizona_hpdet_irq(int irq, void *data)
/* Report high impedence cables as line outputs */
if (reading >= 5000)
- report = ARIZONA_CABLE_LINEOUT;
+ report = EXTCON_LINE_OUT;
else
- report = ARIZONA_CABLE_HEADPHONE;
+ report = EXTCON_HEADPHONE;
ret = extcon_set_cable_state_(info->edev, report, true);
if (ret != 0)
@@ -670,9 +663,9 @@ static void arizona_identify_headphone(struct arizona_extcon_info *info)
ret = regmap_update_bits(arizona->regmap,
ARIZONA_ACCESSORY_DETECT_MODE_1,
ARIZONA_ACCDET_MODE_MASK,
- ARIZONA_ACCDET_MODE_HPL);
+ arizona->pdata.hpdet_channel);
if (ret != 0) {
- dev_err(arizona->dev, "Failed to set HPDETL mode: %d\n", ret);
+ dev_err(arizona->dev, "Failed to set HPDET mode: %d\n", ret);
goto err;
}
@@ -691,8 +684,7 @@ err:
ARIZONA_ACCDET_MODE_MASK, ARIZONA_ACCDET_MODE_MIC);
/* Just report headphone */
- ret = extcon_set_cable_state_(info->edev,
- ARIZONA_CABLE_HEADPHONE, true);
+ ret = extcon_set_cable_state_(info->edev, EXTCON_HEADPHONE, true);
if (ret != 0)
dev_err(arizona->dev, "Failed to report headphone: %d\n", ret);
@@ -722,9 +714,9 @@ static void arizona_start_hpdet_acc_id(struct arizona_extcon_info *info)
ARIZONA_ACCESSORY_DETECT_MODE_1,
ARIZONA_ACCDET_SRC | ARIZONA_ACCDET_MODE_MASK,
info->micd_modes[0].src |
- ARIZONA_ACCDET_MODE_HPL);
+ arizona->pdata.hpdet_channel);
if (ret != 0) {
- dev_err(arizona->dev, "Failed to set HPDETL mode: %d\n", ret);
+ dev_err(arizona->dev, "Failed to set HPDET mode: %d\n", ret);
goto err;
}
@@ -749,8 +741,7 @@ err:
ARIZONA_ACCDET_MODE_MASK, ARIZONA_ACCDET_MODE_MIC);
/* Just report headphone */
- ret = extcon_set_cable_state_(info->edev,
- ARIZONA_CABLE_HEADPHONE, true);
+ ret = extcon_set_cable_state_(info->edev, EXTCON_HEADPHONE, true);
if (ret != 0)
dev_err(arizona->dev, "Failed to report headphone: %d\n", ret);
@@ -789,7 +780,7 @@ static void arizona_micd_detect(struct work_struct *work)
mutex_lock(&info->lock);
/* If the cable was removed while measuring ignore the result */
- ret = extcon_get_cable_state_(info->edev, ARIZONA_CABLE_MECHANICAL);
+ ret = extcon_get_cable_state_(info->edev, EXTCON_MECHANICAL);
if (ret < 0) {
dev_err(arizona->dev, "Failed to check cable state: %d\n",
ret);
@@ -838,8 +829,7 @@ static void arizona_micd_detect(struct work_struct *work)
arizona_identify_headphone(info);
ret = extcon_set_cable_state_(info->edev,
- ARIZONA_CABLE_MICROPHONE, true);
-
+ EXTCON_MICROPHONE, true);
if (ret != 0)
dev_err(arizona->dev, "Headset report failed: %d\n",
ret);
@@ -1030,7 +1020,7 @@ static irqreturn_t arizona_jackdet(int irq, void *data)
if (info->last_jackdet == present) {
dev_dbg(arizona->dev, "Detected jack\n");
ret = extcon_set_cable_state_(info->edev,
- ARIZONA_CABLE_MECHANICAL, true);
+ EXTCON_MECHANICAL, true);
if (ret != 0)
dev_err(arizona->dev, "Mechanical report failed: %d\n",
@@ -1120,6 +1110,26 @@ static void arizona_micd_set_level(struct arizona *arizona, int index,
regmap_update_bits(arizona->regmap, reg, mask, level);
}
+static int arizona_extcon_of_get_pdata(struct arizona *arizona)
+{
+ struct arizona_pdata *pdata = &arizona->pdata;
+ unsigned int val = ARIZONA_ACCDET_MODE_HPL;
+
+ of_property_read_u32(arizona->dev->of_node, "wlf,hpdet-channel", &val);
+ switch (val) {
+ case ARIZONA_ACCDET_MODE_HPL:
+ case ARIZONA_ACCDET_MODE_HPR:
+ pdata->hpdet_channel = val;
+ break;
+ default:
+ dev_err(arizona->dev,
+ "Wrong wlf,hpdet-channel DT value %d\n", val);
+ pdata->hpdet_channel = ARIZONA_ACCDET_MODE_HPL;
+ }
+
+ return 0;
+}
+
static int arizona_extcon_probe(struct platform_device *pdev)
{
struct arizona *arizona = dev_get_drvdata(pdev->dev.parent);
@@ -1137,6 +1147,11 @@ static int arizona_extcon_probe(struct platform_device *pdev)
if (!info)
return -ENOMEM;
+ if (IS_ENABLED(CONFIG_OF)) {
+ if (!dev_get_platdata(arizona->dev))
+ arizona_extcon_of_get_pdata(arizona);
+ }
+
info->micvdd = devm_regulator_get(&pdev->dev, "MICVDD");
if (IS_ERR(info->micvdd)) {
ret = PTR_ERR(info->micvdd);
@@ -1161,7 +1176,7 @@ static int arizona_extcon_probe(struct platform_device *pdev)
break;
default:
info->micd_clamp = true;
- info->hpdet_ip = 1;
+ info->hpdet_ip_version = 1;
break;
}
break;
@@ -1172,7 +1187,7 @@ static int arizona_extcon_probe(struct platform_device *pdev)
break;
default:
info->micd_clamp = true;
- info->hpdet_ip = 2;
+ info->hpdet_ip_version = 2;
break;
}
break;
@@ -1185,7 +1200,6 @@ static int arizona_extcon_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "failed to allocate extcon device\n");
return -ENOMEM;
}
- info->edev->name = "Headset Jack";
ret = devm_extcon_dev_register(&pdev->dev, info->edev);
if (ret < 0) {
diff --git a/drivers/extcon/extcon-axp288.c b/drivers/extcon/extcon-axp288.c
new file mode 100644
index 000000000000..ea962bc547b8
--- /dev/null
+++ b/drivers/extcon/extcon-axp288.c
@@ -0,0 +1,381 @@
+/*
+ * extcon-axp288.c - X-Power AXP288 PMIC extcon cable detection driver
+ *
+ * Copyright (C) 2015 Intel Corporation
+ * Author: Ramakrishna Pallala <ramakrishna.pallala@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/usb/phy.h>
+#include <linux/notifier.h>
+#include <linux/extcon.h>
+#include <linux/regmap.h>
+#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/mfd/axp20x.h>
+
+/* Power source status register */
+#define PS_STAT_VBUS_TRIGGER BIT(0)
+#define PS_STAT_BAT_CHRG_DIR BIT(2)
+#define PS_STAT_VBUS_ABOVE_VHOLD BIT(3)
+#define PS_STAT_VBUS_VALID BIT(4)
+#define PS_STAT_VBUS_PRESENT BIT(5)
+
+/* BC module global register */
+#define BC_GLOBAL_RUN BIT(0)
+#define BC_GLOBAL_DET_STAT BIT(2)
+#define BC_GLOBAL_DBP_TOUT BIT(3)
+#define BC_GLOBAL_VLGC_COM_SEL BIT(4)
+#define BC_GLOBAL_DCD_TOUT_MASK (BIT(6)|BIT(5))
+#define BC_GLOBAL_DCD_TOUT_300MS 0
+#define BC_GLOBAL_DCD_TOUT_100MS 1
+#define BC_GLOBAL_DCD_TOUT_500MS 2
+#define BC_GLOBAL_DCD_TOUT_900MS 3
+#define BC_GLOBAL_DCD_DET_SEL BIT(7)
+
+/* BC module vbus control and status register */
+#define VBUS_CNTL_DPDM_PD_EN BIT(4)
+#define VBUS_CNTL_DPDM_FD_EN BIT(5)
+#define VBUS_CNTL_FIRST_PO_STAT BIT(6)
+
+/* BC USB status register */
+#define USB_STAT_BUS_STAT_MASK (BIT(3)|BIT(2)|BIT(1)|BIT(0))
+#define USB_STAT_BUS_STAT_SHIFT 0
+#define USB_STAT_BUS_STAT_ATHD 0
+#define USB_STAT_BUS_STAT_CONN 1
+#define USB_STAT_BUS_STAT_SUSP 2
+#define USB_STAT_BUS_STAT_CONF 3
+#define USB_STAT_USB_SS_MODE BIT(4)
+#define USB_STAT_DEAD_BAT_DET BIT(6)
+#define USB_STAT_DBP_UNCFG BIT(7)
+
+/* BC detect status register */
+#define DET_STAT_MASK (BIT(7)|BIT(6)|BIT(5))
+#define DET_STAT_SHIFT 5
+#define DET_STAT_SDP 1
+#define DET_STAT_CDP 2
+#define DET_STAT_DCP 3
+
+/* IRQ enable-1 register */
+#define PWRSRC_IRQ_CFG_MASK (BIT(4)|BIT(3)|BIT(2))
+
+/* IRQ enable-6 register */
+#define BC12_IRQ_CFG_MASK BIT(1)
+
+enum axp288_extcon_reg {
+ AXP288_PS_STAT_REG = 0x00,
+ AXP288_PS_BOOT_REASON_REG = 0x02,
+ AXP288_BC_GLOBAL_REG = 0x2c,
+ AXP288_BC_VBUS_CNTL_REG = 0x2d,
+ AXP288_BC_USB_STAT_REG = 0x2e,
+ AXP288_BC_DET_STAT_REG = 0x2f,
+ AXP288_PWRSRC_IRQ_CFG_REG = 0x40,
+ AXP288_BC12_IRQ_CFG_REG = 0x45,
+};
+
+enum axp288_mux_select {
+ EXTCON_GPIO_MUX_SEL_PMIC = 0,
+ EXTCON_GPIO_MUX_SEL_SOC,
+};
+
+enum axp288_extcon_irq {
+ VBUS_FALLING_IRQ = 0,
+ VBUS_RISING_IRQ,
+ MV_CHNG_IRQ,
+ BC_USB_CHNG_IRQ,
+ EXTCON_IRQ_END,
+};
+
+static const unsigned int axp288_extcon_cables[] = {
+ EXTCON_SLOW_CHARGER,
+ EXTCON_CHARGE_DOWNSTREAM,
+ EXTCON_FAST_CHARGER,
+ EXTCON_NONE,
+};
+
+struct axp288_extcon_info {
+ struct device *dev;
+ struct regmap *regmap;
+ struct regmap_irq_chip_data *regmap_irqc;
+ struct axp288_extcon_pdata *pdata;
+ int irq[EXTCON_IRQ_END];
+ struct extcon_dev *edev;
+ struct notifier_block extcon_nb;
+ struct usb_phy *otg;
+};
+
+/* Power up/down reason string array */
+static char *axp288_pwr_up_down_info[] = {
+ "Last wake caused by user pressing the power button",
+ "Last wake caused by a charger insertion",
+ "Last wake caused by a battery insertion",
+ "Last wake caused by SOC initiated global reset",
+ "Last wake caused by cold reset",
+ "Last shutdown caused by PMIC UVLO threshold",
+ "Last shutdown caused by SOC initiated cold off",
+ "Last shutdown caused by user pressing the power button",
+ NULL,
+};
+
+/*
+ * Decode and log the given "reset source indicator" (rsi)
+ * register and then clear it.
+ */
+static void axp288_extcon_log_rsi(struct axp288_extcon_info *info)
+{
+ char **rsi;
+ unsigned int val, i, clear_mask = 0;
+ int ret;
+
+ ret = regmap_read(info->regmap, AXP288_PS_BOOT_REASON_REG, &val);
+ for (i = 0, rsi = axp288_pwr_up_down_info; *rsi; rsi++, i++) {
+ if (val & BIT(i)) {
+ dev_dbg(info->dev, "%s\n", *rsi);
+ clear_mask |= BIT(i);
+ }
+ }
+
+ /* Clear the register value for next reboot (write 1 to clear bit) */
+ regmap_write(info->regmap, AXP288_PS_BOOT_REASON_REG, clear_mask);
+}
+
+static int axp288_handle_chrg_det_event(struct axp288_extcon_info *info)
+{
+ static bool notify_otg, notify_charger;
+ static unsigned int cable;
+ int ret, stat, cfg, pwr_stat;
+ u8 chrg_type;
+ bool vbus_attach = false;
+
+ ret = regmap_read(info->regmap, AXP288_PS_STAT_REG, &pwr_stat);
+ if (ret < 0) {
+ dev_err(info->dev, "failed to read vbus status\n");
+ return ret;
+ }
+
+ vbus_attach = (pwr_stat & PS_STAT_VBUS_PRESENT);
+ if (!vbus_attach)
+ goto notify_otg;
+
+ /* Check charger detection completion status */
+ ret = regmap_read(info->regmap, AXP288_BC_GLOBAL_REG, &cfg);
+ if (ret < 0)
+ goto dev_det_ret;
+ if (cfg & BC_GLOBAL_DET_STAT) {
+ dev_dbg(info->dev, "can't complete the charger detection\n");
+ goto dev_det_ret;
+ }
+
+ ret = regmap_read(info->regmap, AXP288_BC_DET_STAT_REG, &stat);
+ if (ret < 0)
+ goto dev_det_ret;
+
+ chrg_type = (stat & DET_STAT_MASK) >> DET_STAT_SHIFT;
+
+ switch (chrg_type) {
+ case DET_STAT_SDP:
+ dev_dbg(info->dev, "sdp cable is connecetd\n");
+ notify_otg = true;
+ notify_charger = true;
+ cable = EXTCON_SLOW_CHARGER;
+ break;
+ case DET_STAT_CDP:
+ dev_dbg(info->dev, "cdp cable is connecetd\n");
+ notify_otg = true;
+ notify_charger = true;
+ cable = EXTCON_CHARGE_DOWNSTREAM;
+ break;
+ case DET_STAT_DCP:
+ dev_dbg(info->dev, "dcp cable is connecetd\n");
+ notify_charger = true;
+ cable = EXTCON_FAST_CHARGER;
+ break;
+ default:
+ dev_warn(info->dev,
+ "disconnect or unknown or ID event\n");
+ }
+
+notify_otg:
+ if (notify_otg) {
+ /*
+ * If VBUS is absent Connect D+/D- lines to PMIC for BC
+ * detection. Else connect them to SOC for USB communication.
+ */
+ if (info->pdata->gpio_mux_cntl)
+ gpiod_set_value(info->pdata->gpio_mux_cntl,
+ vbus_attach ? EXTCON_GPIO_MUX_SEL_SOC
+ : EXTCON_GPIO_MUX_SEL_PMIC);
+
+ atomic_notifier_call_chain(&info->otg->notifier,
+ vbus_attach ? USB_EVENT_VBUS : USB_EVENT_NONE, NULL);
+ }
+
+ if (notify_charger)
+ extcon_set_cable_state_(info->edev, cable, vbus_attach);
+
+ /* Clear the flags on disconnect event */
+ if (!vbus_attach)
+ notify_otg = notify_charger = false;
+
+ return 0;
+
+dev_det_ret:
+ if (ret < 0)
+ dev_err(info->dev, "failed to detect BC Mod\n");
+
+ return ret;
+}
+
+static irqreturn_t axp288_extcon_isr(int irq, void *data)
+{
+ struct axp288_extcon_info *info = data;
+ int ret;
+
+ ret = axp288_handle_chrg_det_event(info);
+ if (ret < 0)
+ dev_err(info->dev, "failed to handle the interrupt\n");
+
+ return IRQ_HANDLED;
+}
+
+static void axp288_extcon_enable_irq(struct axp288_extcon_info *info)
+{
+ /* Unmask VBUS interrupt */
+ regmap_write(info->regmap, AXP288_PWRSRC_IRQ_CFG_REG,
+ PWRSRC_IRQ_CFG_MASK);
+ regmap_update_bits(info->regmap, AXP288_BC_GLOBAL_REG,
+ BC_GLOBAL_RUN, 0);
+ /* Unmask the BC1.2 complete interrupts */
+ regmap_write(info->regmap, AXP288_BC12_IRQ_CFG_REG, BC12_IRQ_CFG_MASK);
+ /* Enable the charger detection logic */
+ regmap_update_bits(info->regmap, AXP288_BC_GLOBAL_REG,
+ BC_GLOBAL_RUN, BC_GLOBAL_RUN);
+}
+
+static int axp288_extcon_probe(struct platform_device *pdev)
+{
+ struct axp288_extcon_info *info;
+ struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent);
+ int ret, i, pirq, gpio;
+
+ info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->dev = &pdev->dev;
+ info->regmap = axp20x->regmap;
+ info->regmap_irqc = axp20x->regmap_irqc;
+ info->pdata = pdev->dev.platform_data;
+
+ if (!info->pdata) {
+ /* Try ACPI provided pdata via device properties */
+ if (!device_property_present(&pdev->dev,
+ "axp288_extcon_data\n"))
+ dev_err(&pdev->dev, "failed to get platform data\n");
+ return -ENODEV;
+ }
+ platform_set_drvdata(pdev, info);
+
+ axp288_extcon_log_rsi(info);
+
+ /* Initialize extcon device */
+ info->edev = devm_extcon_dev_allocate(&pdev->dev,
+ axp288_extcon_cables);
+ if (IS_ERR(info->edev)) {
+ dev_err(&pdev->dev, "failed to allocate memory for extcon\n");
+ return PTR_ERR(info->edev);
+ }
+
+ /* Register extcon device */
+ ret = devm_extcon_dev_register(&pdev->dev, info->edev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register extcon device\n");
+ return ret;
+ }
+
+ /* Get otg transceiver phy */
+ info->otg = usb_get_phy(USB_PHY_TYPE_USB2);
+ if (IS_ERR(info->otg)) {
+ dev_err(&pdev->dev, "failed to get otg transceiver\n");
+ return PTR_ERR(info->otg);
+ }
+
+ /* Set up gpio control for USB Mux */
+ if (info->pdata->gpio_mux_cntl) {
+ gpio = desc_to_gpio(info->pdata->gpio_mux_cntl);
+ ret = gpio_request(gpio, "USB_MUX");
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "failed to request the gpio=%d\n", gpio);
+ goto gpio_req_failed;
+ }
+ gpiod_direction_output(info->pdata->gpio_mux_cntl,
+ EXTCON_GPIO_MUX_SEL_PMIC);
+ }
+
+ for (i = 0; i < EXTCON_IRQ_END; i++) {
+ pirq = platform_get_irq(pdev, i);
+ info->irq[i] = regmap_irq_get_virq(info->regmap_irqc, pirq);
+ if (info->irq[i] < 0) {
+ dev_err(&pdev->dev,
+ "failed to get virtual interrupt=%d\n", pirq);
+ ret = info->irq[i];
+ goto gpio_req_failed;
+ }
+
+ ret = devm_request_threaded_irq(&pdev->dev, info->irq[i],
+ NULL, axp288_extcon_isr,
+ IRQF_ONESHOT | IRQF_NO_SUSPEND,
+ pdev->name, info);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request interrupt=%d\n",
+ info->irq[i]);
+ goto gpio_req_failed;
+ }
+ }
+
+ /* Enable interrupts */
+ axp288_extcon_enable_irq(info);
+
+ return 0;
+
+gpio_req_failed:
+ usb_put_phy(info->otg);
+ return ret;
+}
+
+static int axp288_extcon_remove(struct platform_device *pdev)
+{
+ struct axp288_extcon_info *info = platform_get_drvdata(pdev);
+
+ usb_put_phy(info->otg);
+ return 0;
+}
+
+static struct platform_driver axp288_extcon_driver = {
+ .probe = axp288_extcon_probe,
+ .remove = axp288_extcon_remove,
+ .driver = {
+ .name = "axp288_extcon",
+ },
+};
+module_platform_driver(axp288_extcon_driver);
+
+MODULE_AUTHOR("Ramakrishna Pallala <ramakrishna.pallala@intel.com>");
+MODULE_DESCRIPTION("X-Powers AXP288 extcon driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/extcon/extcon-gpio.c b/drivers/extcon/extcon-gpio.c
index 7af33fc433cd..355459a54e8b 100644
--- a/drivers/extcon/extcon-gpio.c
+++ b/drivers/extcon/extcon-gpio.c
@@ -104,7 +104,6 @@ static int gpio_extcon_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "failed to allocate extcon device\n");
return -ENOMEM;
}
- extcon_data->edev->name = pdata->name;
extcon_data->gpio = pdata->gpio;
extcon_data->gpio_active_low = pdata->gpio_active_low;
diff --git a/drivers/extcon/extcon-max14577.c b/drivers/extcon/extcon-max14577.c
index 3823aa4a3a80..df0659d98e5a 100644
--- a/drivers/extcon/extcon-max14577.c
+++ b/drivers/extcon/extcon-max14577.c
@@ -148,33 +148,14 @@ enum max14577_muic_acc_type {
MAX14577_MUIC_ADC_OPEN,
};
-/* max14577 MUIC device support below list of accessories(external connector) */
-enum {
- EXTCON_CABLE_USB = 0,
- EXTCON_CABLE_TA,
- EXTCON_CABLE_FAST_CHARGER,
- EXTCON_CABLE_SLOW_CHARGER,
- EXTCON_CABLE_CHARGE_DOWNSTREAM,
- EXTCON_CABLE_JIG_USB_ON,
- EXTCON_CABLE_JIG_USB_OFF,
- EXTCON_CABLE_JIG_UART_OFF,
- EXTCON_CABLE_JIG_UART_ON,
-
- _EXTCON_CABLE_NUM,
-};
-
-static const char *max14577_extcon_cable[] = {
- [EXTCON_CABLE_USB] = "USB",
- [EXTCON_CABLE_TA] = "TA",
- [EXTCON_CABLE_FAST_CHARGER] = "Fast-charger",
- [EXTCON_CABLE_SLOW_CHARGER] = "Slow-charger",
- [EXTCON_CABLE_CHARGE_DOWNSTREAM] = "Charge-downstream",
- [EXTCON_CABLE_JIG_USB_ON] = "JIG-USB-ON",
- [EXTCON_CABLE_JIG_USB_OFF] = "JIG-USB-OFF",
- [EXTCON_CABLE_JIG_UART_OFF] = "JIG-UART-OFF",
- [EXTCON_CABLE_JIG_UART_ON] = "JIG-UART-ON",
-
- NULL,
+static const unsigned int max14577_extcon_cable[] = {
+ EXTCON_USB,
+ EXTCON_TA,
+ EXTCON_FAST_CHARGER,
+ EXTCON_SLOW_CHARGER,
+ EXTCON_CHARGE_DOWNSTREAM,
+ EXTCON_JIG,
+ EXTCON_NONE,
};
/*
@@ -348,7 +329,6 @@ static int max14577_muic_get_cable_type(struct max14577_muic_info *info,
static int max14577_muic_jig_handler(struct max14577_muic_info *info,
int cable_type, bool attached)
{
- char cable_name[32];
int ret = 0;
u8 path = CTRL1_SW_OPEN;
@@ -358,18 +338,12 @@ static int max14577_muic_jig_handler(struct max14577_muic_info *info,
switch (cable_type) {
case MAX14577_MUIC_ADC_FACTORY_MODE_USB_OFF: /* ADC_JIG_USB_OFF */
- /* PATH:AP_USB */
- strcpy(cable_name, "JIG-USB-OFF");
- path = CTRL1_SW_USB;
- break;
case MAX14577_MUIC_ADC_FACTORY_MODE_USB_ON: /* ADC_JIG_USB_ON */
/* PATH:AP_USB */
- strcpy(cable_name, "JIG-USB-ON");
path = CTRL1_SW_USB;
break;
case MAX14577_MUIC_ADC_FACTORY_MODE_UART_OFF: /* ADC_JIG_UART_OFF */
/* PATH:AP_UART */
- strcpy(cable_name, "JIG-UART-OFF");
path = CTRL1_SW_UART;
break;
default:
@@ -382,7 +356,7 @@ static int max14577_muic_jig_handler(struct max14577_muic_info *info,
if (ret < 0)
return ret;
- extcon_set_cable_state(info->edev, cable_name, attached);
+ extcon_set_cable_state_(info->edev, EXTCON_JIG, attached);
return 0;
}
@@ -479,20 +453,22 @@ static int max14577_muic_chg_handler(struct max14577_muic_info *info)
if (ret < 0)
return ret;
- extcon_set_cable_state(info->edev, "USB", attached);
+ extcon_set_cable_state_(info->edev, EXTCON_USB, attached);
break;
case MAX14577_CHARGER_TYPE_DEDICATED_CHG:
- extcon_set_cable_state(info->edev, "TA", attached);
+ extcon_set_cable_state_(info->edev, EXTCON_TA, attached);
break;
case MAX14577_CHARGER_TYPE_DOWNSTREAM_PORT:
- extcon_set_cable_state(info->edev,
- "Charge-downstream", attached);
+ extcon_set_cable_state_(info->edev, EXTCON_CHARGE_DOWNSTREAM,
+ attached);
break;
case MAX14577_CHARGER_TYPE_SPECIAL_500MA:
- extcon_set_cable_state(info->edev, "Slow-charger", attached);
+ extcon_set_cable_state_(info->edev, EXTCON_SLOW_CHARGER,
+ attached);
break;
case MAX14577_CHARGER_TYPE_SPECIAL_1A:
- extcon_set_cable_state(info->edev, "Fast-charger", attached);
+ extcon_set_cable_state_(info->edev, EXTCON_FAST_CHARGER,
+ attached);
break;
case MAX14577_CHARGER_TYPE_NONE:
case MAX14577_CHARGER_TYPE_DEAD_BATTERY:
@@ -742,8 +718,6 @@ static int max14577_muic_probe(struct platform_device *pdev)
return -ENOMEM;
}
- info->edev->name = dev_name(&pdev->dev);
-
ret = devm_extcon_dev_register(&pdev->dev, info->edev);
if (ret) {
dev_err(&pdev->dev, "failed to register extcon device\n");
diff --git a/drivers/extcon/extcon-max77693.c b/drivers/extcon/extcon-max77693.c
index a66bec8f6252..f4f3b3d53928 100644
--- a/drivers/extcon/extcon-max77693.c
+++ b/drivers/extcon/extcon-max77693.c
@@ -200,44 +200,17 @@ enum max77693_muic_acc_type {
/*
* MAX77693 MUIC device support below list of accessories(external connector)
*/
-enum {
- EXTCON_CABLE_USB = 0,
- EXTCON_CABLE_USB_HOST,
- EXTCON_CABLE_TA,
- EXTCON_CABLE_FAST_CHARGER,
- EXTCON_CABLE_SLOW_CHARGER,
- EXTCON_CABLE_CHARGE_DOWNSTREAM,
- EXTCON_CABLE_MHL,
- EXTCON_CABLE_MHL_TA,
- EXTCON_CABLE_JIG_USB_ON,
- EXTCON_CABLE_JIG_USB_OFF,
- EXTCON_CABLE_JIG_UART_OFF,
- EXTCON_CABLE_JIG_UART_ON,
- EXTCON_CABLE_DOCK_SMART,
- EXTCON_CABLE_DOCK_DESK,
- EXTCON_CABLE_DOCK_AUDIO,
-
- _EXTCON_CABLE_NUM,
-};
-
-static const char *max77693_extcon_cable[] = {
- [EXTCON_CABLE_USB] = "USB",
- [EXTCON_CABLE_USB_HOST] = "USB-Host",
- [EXTCON_CABLE_TA] = "TA",
- [EXTCON_CABLE_FAST_CHARGER] = "Fast-charger",
- [EXTCON_CABLE_SLOW_CHARGER] = "Slow-charger",
- [EXTCON_CABLE_CHARGE_DOWNSTREAM] = "Charge-downstream",
- [EXTCON_CABLE_MHL] = "MHL",
- [EXTCON_CABLE_MHL_TA] = "MHL-TA",
- [EXTCON_CABLE_JIG_USB_ON] = "JIG-USB-ON",
- [EXTCON_CABLE_JIG_USB_OFF] = "JIG-USB-OFF",
- [EXTCON_CABLE_JIG_UART_OFF] = "JIG-UART-OFF",
- [EXTCON_CABLE_JIG_UART_ON] = "JIG-UART-ON",
- [EXTCON_CABLE_DOCK_SMART] = "Dock-Smart",
- [EXTCON_CABLE_DOCK_DESK] = "Dock-Desk",
- [EXTCON_CABLE_DOCK_AUDIO] = "Dock-Audio",
-
- NULL,
+static const unsigned int max77693_extcon_cable[] = {
+ EXTCON_USB,
+ EXTCON_USB_HOST,
+ EXTCON_TA,
+ EXTCON_FAST_CHARGER,
+ EXTCON_SLOW_CHARGER,
+ EXTCON_CHARGE_DOWNSTREAM,
+ EXTCON_MHL,
+ EXTCON_JIG,
+ EXTCON_DOCK,
+ EXTCON_NONE,
};
/*
@@ -484,7 +457,7 @@ static int max77693_muic_dock_handler(struct max77693_muic_info *info,
int ret = 0;
int vbvolt;
bool cable_attached;
- char dock_name[CABLE_NAME_MAX];
+ unsigned int dock_id;
dev_info(info->dev,
"external connector is %s (adc:0x%02x)\n",
@@ -507,15 +480,15 @@ static int max77693_muic_dock_handler(struct max77693_muic_info *info,
}
/*
- * Notify Dock-Smart/MHL state.
- * - Dock-Smart device include three type of cable which
+ * Notify Dock/MHL state.
+ * - Dock device include three type of cable which
* are HDMI, USB for mouse/keyboard and micro-usb port
- * for USB/TA cable. Dock-Smart device need always exteranl
- * power supply(USB/TA cable through micro-usb cable). Dock-
- * Smart device support screen output of target to separate
+ * for USB/TA cable. Dock device need always exteranl
+ * power supply(USB/TA cable through micro-usb cable). Dock
+ * device support screen output of target to separate
* monitor and mouse/keyboard for desktop mode.
*
- * Features of 'USB/TA cable with Dock-Smart device'
+ * Features of 'USB/TA cable with Dock device'
* - Support MHL
* - Support external output feature of audio
* - Support charging through micro-usb port without data
@@ -529,16 +502,16 @@ static int max77693_muic_dock_handler(struct max77693_muic_info *info,
if (ret < 0)
return ret;
- extcon_set_cable_state(info->edev, "Dock-Smart", attached);
- extcon_set_cable_state(info->edev, "MHL", attached);
+ extcon_set_cable_state_(info->edev, EXTCON_DOCK, attached);
+ extcon_set_cable_state_(info->edev, EXTCON_MHL, attached);
goto out;
case MAX77693_MUIC_ADC_AUDIO_MODE_REMOTE: /* Dock-Desk */
- strcpy(dock_name, "Dock-Desk");
+ dock_id = EXTCON_DOCK;
break;
case MAX77693_MUIC_ADC_AV_CABLE_NOLOAD: /* Dock-Audio */
- strcpy(dock_name, "Dock-Audio");
+ dock_id = EXTCON_DOCK;
if (!attached)
- extcon_set_cable_state(info->edev, "USB", false);
+ extcon_set_cable_state_(info->edev, EXTCON_USB, false);
break;
default:
dev_err(info->dev, "failed to detect %s dock device\n",
@@ -550,7 +523,7 @@ static int max77693_muic_dock_handler(struct max77693_muic_info *info,
ret = max77693_muic_set_path(info, CONTROL1_SW_AUDIO, attached);
if (ret < 0)
return ret;
- extcon_set_cable_state(info->edev, dock_name, attached);
+ extcon_set_cable_state_(info->edev, dock_id, attached);
out:
return 0;
@@ -615,20 +588,19 @@ static int max77693_muic_adc_ground_handler(struct max77693_muic_info *info)
ret = max77693_muic_set_path(info, CONTROL1_SW_USB, attached);
if (ret < 0)
return ret;
- extcon_set_cable_state(info->edev, "USB-Host", attached);
+ extcon_set_cable_state_(info->edev, EXTCON_USB_HOST, attached);
break;
case MAX77693_MUIC_GND_AV_CABLE_LOAD:
/* Audio Video Cable with load, PATH:AUDIO */
ret = max77693_muic_set_path(info, CONTROL1_SW_AUDIO, attached);
if (ret < 0)
return ret;
- extcon_set_cable_state(info->edev,
- "Audio-video-load", attached);
+ extcon_set_cable_state_(info->edev, EXTCON_USB, attached);
break;
case MAX77693_MUIC_GND_MHL:
case MAX77693_MUIC_GND_MHL_VB:
/* MHL or MHL with USB/TA cable */
- extcon_set_cable_state(info->edev, "MHL", attached);
+ extcon_set_cable_state_(info->edev, EXTCON_MHL, attached);
break;
default:
dev_err(info->dev, "failed to detect %s cable of gnd type\n",
@@ -642,7 +614,6 @@ static int max77693_muic_adc_ground_handler(struct max77693_muic_info *info)
static int max77693_muic_jig_handler(struct max77693_muic_info *info,
int cable_type, bool attached)
{
- char cable_name[32];
int ret = 0;
u8 path = CONTROL1_SW_OPEN;
@@ -652,23 +623,13 @@ static int max77693_muic_jig_handler(struct max77693_muic_info *info,
switch (cable_type) {
case MAX77693_MUIC_ADC_FACTORY_MODE_USB_OFF: /* ADC_JIG_USB_OFF */
- /* PATH:AP_USB */
- strcpy(cable_name, "JIG-USB-OFF");
- path = CONTROL1_SW_USB;
- break;
case MAX77693_MUIC_ADC_FACTORY_MODE_USB_ON: /* ADC_JIG_USB_ON */
/* PATH:AP_USB */
- strcpy(cable_name, "JIG-USB-ON");
path = CONTROL1_SW_USB;
break;
case MAX77693_MUIC_ADC_FACTORY_MODE_UART_OFF: /* ADC_JIG_UART_OFF */
- /* PATH:AP_UART */
- strcpy(cable_name, "JIG-UART-OFF");
- path = CONTROL1_SW_UART;
- break;
case MAX77693_MUIC_ADC_FACTORY_MODE_UART_ON: /* ADC_JIG_UART_ON */
/* PATH:AP_UART */
- strcpy(cable_name, "JIG-UART-ON");
path = CONTROL1_SW_UART;
break;
default:
@@ -681,7 +642,7 @@ static int max77693_muic_jig_handler(struct max77693_muic_info *info,
if (ret < 0)
return ret;
- extcon_set_cable_state(info->edev, cable_name, attached);
+ extcon_set_cable_state_(info->edev, EXTCON_JIG, attached);
return 0;
}
@@ -823,22 +784,22 @@ static int max77693_muic_chg_handler(struct max77693_muic_info *info)
case MAX77693_MUIC_GND_MHL:
case MAX77693_MUIC_GND_MHL_VB:
/*
- * MHL cable with MHL-TA(USB/TA) cable
+ * MHL cable with USB/TA cable
* - MHL cable include two port(HDMI line and separate
* micro-usb port. When the target connect MHL cable,
- * extcon driver check whether MHL-TA(USB/TA) cable is
- * connected. If MHL-TA cable is connected, extcon
+ * extcon driver check whether USB/TA cable is
+ * connected. If USB/TA cable is connected, extcon
* driver notify state to notifiee for charging battery.
*
- * Features of 'MHL-TA(USB/TA) with MHL cable'
+ * Features of 'USB/TA with MHL cable'
* - Support MHL
* - Support charging through micro-usb port without
* data connection
*/
- extcon_set_cable_state(info->edev, "MHL-TA", attached);
+ extcon_set_cable_state_(info->edev, EXTCON_TA, attached);
if (!cable_attached)
- extcon_set_cable_state(info->edev,
- "MHL", cable_attached);
+ extcon_set_cable_state_(info->edev, EXTCON_MHL,
+ cable_attached);
break;
}
@@ -861,11 +822,12 @@ static int max77693_muic_chg_handler(struct max77693_muic_info *info)
* - Support charging through micro-usb port without
* data connection.
*/
- extcon_set_cable_state(info->edev, "USB", attached);
+ extcon_set_cable_state_(info->edev, EXTCON_USB,
+ attached);
if (!cable_attached)
- extcon_set_cable_state(info->edev, "Dock-Audio",
- cable_attached);
+ extcon_set_cable_state_(info->edev, EXTCON_DOCK,
+ cable_attached);
break;
case MAX77693_MUIC_ADC_RESERVED_ACC_3: /* Dock-Smart */
/*
@@ -893,10 +855,10 @@ static int max77693_muic_chg_handler(struct max77693_muic_info *info)
if (ret < 0)
return ret;
- extcon_set_cable_state(info->edev, "Dock-Smart",
- attached);
- extcon_set_cable_state(info->edev, "MHL", attached);
-
+ extcon_set_cable_state_(info->edev, EXTCON_DOCK,
+ attached);
+ extcon_set_cable_state_(info->edev, EXTCON_MHL,
+ attached);
break;
}
@@ -929,23 +891,26 @@ static int max77693_muic_chg_handler(struct max77693_muic_info *info)
if (ret < 0)
return ret;
- extcon_set_cable_state(info->edev, "USB", attached);
+ extcon_set_cable_state_(info->edev, EXTCON_USB,
+ attached);
break;
case MAX77693_CHARGER_TYPE_DEDICATED_CHG:
/* Only TA cable */
- extcon_set_cable_state(info->edev, "TA", attached);
+ extcon_set_cable_state_(info->edev, EXTCON_TA, attached);
break;
}
break;
case MAX77693_CHARGER_TYPE_DOWNSTREAM_PORT:
- extcon_set_cable_state(info->edev,
- "Charge-downstream", attached);
+ extcon_set_cable_state_(info->edev, EXTCON_CHARGE_DOWNSTREAM,
+ attached);
break;
case MAX77693_CHARGER_TYPE_APPLE_500MA:
- extcon_set_cable_state(info->edev, "Slow-charger", attached);
+ extcon_set_cable_state_(info->edev, EXTCON_SLOW_CHARGER,
+ attached);
break;
case MAX77693_CHARGER_TYPE_APPLE_1A_2A:
- extcon_set_cable_state(info->edev, "Fast-charger", attached);
+ extcon_set_cable_state_(info->edev, EXTCON_FAST_CHARGER,
+ attached);
break;
case MAX77693_CHARGER_TYPE_DEAD_BATTERY:
break;
@@ -1182,7 +1147,6 @@ static int max77693_muic_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "failed to allocate memory for extcon\n");
return -ENOMEM;
}
- info->edev->name = DEV_NAME;
ret = devm_extcon_dev_register(&pdev->dev, info->edev);
if (ret) {
diff --git a/drivers/extcon/extcon-max77843.c b/drivers/extcon/extcon-max77843.c
index 8db6a926ea07..fac2f1417a79 100644
--- a/drivers/extcon/extcon-max77843.c
+++ b/drivers/extcon/extcon-max77843.c
@@ -118,36 +118,16 @@ enum max77843_muic_charger_type {
MAX77843_MUIC_CHG_GND,
};
-enum {
- MAX77843_CABLE_USB = 0,
- MAX77843_CABLE_USB_HOST,
- MAX77843_CABLE_TA,
- MAX77843_CABLE_CHARGE_DOWNSTREAM,
- MAX77843_CABLE_FAST_CHARGER,
- MAX77843_CABLE_SLOW_CHARGER,
- MAX77843_CABLE_MHL,
- MAX77843_CABLE_MHL_TA,
- MAX77843_CABLE_JIG_USB_ON,
- MAX77843_CABLE_JIG_USB_OFF,
- MAX77843_CABLE_JIG_UART_ON,
- MAX77843_CABLE_JIG_UART_OFF,
-
- MAX77843_CABLE_NUM,
-};
-
-static const char *max77843_extcon_cable[] = {
- [MAX77843_CABLE_USB] = "USB",
- [MAX77843_CABLE_USB_HOST] = "USB-HOST",
- [MAX77843_CABLE_TA] = "TA",
- [MAX77843_CABLE_CHARGE_DOWNSTREAM] = "CHARGER-DOWNSTREAM",
- [MAX77843_CABLE_FAST_CHARGER] = "FAST-CHARGER",
- [MAX77843_CABLE_SLOW_CHARGER] = "SLOW-CHARGER",
- [MAX77843_CABLE_MHL] = "MHL",
- [MAX77843_CABLE_MHL_TA] = "MHL-TA",
- [MAX77843_CABLE_JIG_USB_ON] = "JIG-USB-ON",
- [MAX77843_CABLE_JIG_USB_OFF] = "JIG-USB-OFF",
- [MAX77843_CABLE_JIG_UART_ON] = "JIG-UART-ON",
- [MAX77843_CABLE_JIG_UART_OFF] = "JIG-UART-OFF",
+static const unsigned int max77843_extcon_cable[] = {
+ EXTCON_USB,
+ EXTCON_USB_HOST,
+ EXTCON_TA,
+ EXTCON_CHARGE_DOWNSTREAM,
+ EXTCON_FAST_CHARGER,
+ EXTCON_SLOW_CHARGER,
+ EXTCON_MHL,
+ EXTCON_JIG,
+ EXTCON_NONE,
};
struct max77843_muic_irq {
@@ -362,7 +342,7 @@ static int max77843_muic_adc_gnd_handler(struct max77843_muic_info *info)
if (ret < 0)
return ret;
- extcon_set_cable_state(info->edev, "USB-HOST", attached);
+ extcon_set_cable_state_(info->edev, EXTCON_USB_HOST, attached);
break;
case MAX77843_MUIC_GND_MHL_VB:
case MAX77843_MUIC_GND_MHL:
@@ -370,7 +350,7 @@ static int max77843_muic_adc_gnd_handler(struct max77843_muic_info *info)
if (ret < 0)
return ret;
- extcon_set_cable_state(info->edev, "MHL", attached);
+ extcon_set_cable_state_(info->edev, EXTCON_MHL, attached);
break;
default:
dev_err(info->dev, "failed to detect %s accessory(gnd:0x%x)\n",
@@ -385,36 +365,29 @@ static int max77843_muic_jig_handler(struct max77843_muic_info *info,
int cable_type, bool attached)
{
int ret;
+ u8 path = CONTROL1_SW_OPEN;
dev_dbg(info->dev, "external connector is %s (adc:0x%02x)\n",
attached ? "attached" : "detached", cable_type);
switch (cable_type) {
case MAX77843_MUIC_ADC_FACTORY_MODE_USB_OFF:
- ret = max77843_muic_set_path(info, CONTROL1_SW_USB, attached);
- if (ret < 0)
- return ret;
- extcon_set_cable_state(info->edev, "JIG-USB-OFF", attached);
- break;
case MAX77843_MUIC_ADC_FACTORY_MODE_USB_ON:
- ret = max77843_muic_set_path(info, CONTROL1_SW_USB, attached);
- if (ret < 0)
- return ret;
- extcon_set_cable_state(info->edev, "JIG-USB-ON", attached);
+ path = CONTROL1_SW_USB;
break;
case MAX77843_MUIC_ADC_FACTORY_MODE_UART_OFF:
- ret = max77843_muic_set_path(info, CONTROL1_SW_UART, attached);
- if (ret < 0)
- return ret;
- extcon_set_cable_state(info->edev, "JIG-UART-OFF", attached);
+ path = CONTROL1_SW_UART;
break;
default:
- ret = max77843_muic_set_path(info, CONTROL1_SW_OPEN, attached);
- if (ret < 0)
- return ret;
- break;
+ return -EINVAL;
}
+ ret = max77843_muic_set_path(info, path, attached);
+ if (ret < 0)
+ return ret;
+
+ extcon_set_cable_state_(info->edev, EXTCON_JIG, attached);
+
return 0;
}
@@ -505,36 +478,38 @@ static int max77843_muic_chg_handler(struct max77843_muic_info *info)
if (ret < 0)
return ret;
- extcon_set_cable_state(info->edev, "USB", attached);
+ extcon_set_cable_state_(info->edev, EXTCON_USB, attached);
break;
case MAX77843_MUIC_CHG_DOWNSTREAM:
ret = max77843_muic_set_path(info, CONTROL1_SW_OPEN, attached);
if (ret < 0)
return ret;
- extcon_set_cable_state(info->edev,
- "CHARGER-DOWNSTREAM", attached);
+ extcon_set_cable_state_(info->edev, EXTCON_CHARGE_DOWNSTREAM,
+ attached);
break;
case MAX77843_MUIC_CHG_DEDICATED:
ret = max77843_muic_set_path(info, CONTROL1_SW_OPEN, attached);
if (ret < 0)
return ret;
- extcon_set_cable_state(info->edev, "TA", attached);
+ extcon_set_cable_state_(info->edev, EXTCON_TA, attached);
break;
case MAX77843_MUIC_CHG_SPECIAL_500MA:
ret = max77843_muic_set_path(info, CONTROL1_SW_OPEN, attached);
if (ret < 0)
return ret;
- extcon_set_cable_state(info->edev, "SLOW-CHAREGER", attached);
+ extcon_set_cable_state_(info->edev, EXTCON_SLOW_CHARGER,
+ attached);
break;
case MAX77843_MUIC_CHG_SPECIAL_1A:
ret = max77843_muic_set_path(info, CONTROL1_SW_OPEN, attached);
if (ret < 0)
return ret;
- extcon_set_cable_state(info->edev, "FAST-CHARGER", attached);
+ extcon_set_cable_state_(info->edev, EXTCON_FAST_CHARGER,
+ attached);
break;
case MAX77843_MUIC_CHG_GND:
gnd_type = max77843_muic_get_cable_type(info,
@@ -542,9 +517,9 @@ static int max77843_muic_chg_handler(struct max77843_muic_info *info)
/* Charger cable on MHL accessory is attach or detach */
if (gnd_type == MAX77843_MUIC_GND_MHL_VB)
- extcon_set_cable_state(info->edev, "MHL-TA", true);
+ extcon_set_cable_state_(info->edev, EXTCON_TA, true);
else if (gnd_type == MAX77843_MUIC_GND_MHL)
- extcon_set_cable_state(info->edev, "MHL-TA", false);
+ extcon_set_cable_state_(info->edev, EXTCON_TA, false);
break;
case MAX77843_MUIC_CHG_NONE:
break;
diff --git a/drivers/extcon/extcon-max8997.c b/drivers/extcon/extcon-max8997.c
index 5774e56c6422..7b1ef200b121 100644
--- a/drivers/extcon/extcon-max8997.c
+++ b/drivers/extcon/extcon-max8997.c
@@ -145,34 +145,17 @@ struct max8997_muic_info {
int path_uart;
};
-enum {
- EXTCON_CABLE_USB = 0,
- EXTCON_CABLE_USB_HOST,
- EXTCON_CABLE_TA,
- EXTCON_CABLE_FAST_CHARGER,
- EXTCON_CABLE_SLOW_CHARGER,
- EXTCON_CABLE_CHARGE_DOWNSTREAM,
- EXTCON_CABLE_MHL,
- EXTCON_CABLE_DOCK_DESK,
- EXTCON_CABLE_DOCK_CARD,
- EXTCON_CABLE_JIG,
-
- _EXTCON_CABLE_NUM,
-};
-
-static const char *max8997_extcon_cable[] = {
- [EXTCON_CABLE_USB] = "USB",
- [EXTCON_CABLE_USB_HOST] = "USB-Host",
- [EXTCON_CABLE_TA] = "TA",
- [EXTCON_CABLE_FAST_CHARGER] = "Fast-charger",
- [EXTCON_CABLE_SLOW_CHARGER] = "Slow-charger",
- [EXTCON_CABLE_CHARGE_DOWNSTREAM] = "Charge-downstream",
- [EXTCON_CABLE_MHL] = "MHL",
- [EXTCON_CABLE_DOCK_DESK] = "Dock-Desk",
- [EXTCON_CABLE_DOCK_CARD] = "Dock-Card",
- [EXTCON_CABLE_JIG] = "JIG",
-
- NULL,
+static const unsigned int max8997_extcon_cable[] = {
+ EXTCON_USB,
+ EXTCON_USB_HOST,
+ EXTCON_TA,
+ EXTCON_FAST_CHARGER,
+ EXTCON_SLOW_CHARGER,
+ EXTCON_CHARGE_DOWNSTREAM,
+ EXTCON_MHL,
+ EXTCON_DOCK,
+ EXTCON_JIG,
+ EXTCON_NONE,
};
/*
@@ -347,10 +330,10 @@ static int max8997_muic_handle_usb(struct max8997_muic_info *info,
switch (usb_type) {
case MAX8997_USB_HOST:
- extcon_set_cable_state(info->edev, "USB-Host", attached);
+ extcon_set_cable_state_(info->edev, EXTCON_USB_HOST, attached);
break;
case MAX8997_USB_DEVICE:
- extcon_set_cable_state(info->edev, "USB", attached);
+ extcon_set_cable_state_(info->edev, EXTCON_USB, attached);
break;
default:
dev_err(info->dev, "failed to detect %s usb cable\n",
@@ -374,10 +357,8 @@ static int max8997_muic_handle_dock(struct max8997_muic_info *info,
switch (cable_type) {
case MAX8997_MUIC_ADC_AV_CABLE_NOLOAD:
- extcon_set_cable_state(info->edev, "Dock-desk", attached);
- break;
case MAX8997_MUIC_ADC_FACTORY_MODE_UART_ON:
- extcon_set_cable_state(info->edev, "Dock-card", attached);
+ extcon_set_cable_state_(info->edev, EXTCON_DOCK, attached);
break;
default:
dev_err(info->dev, "failed to detect %s dock device\n",
@@ -400,7 +381,7 @@ static int max8997_muic_handle_jig_uart(struct max8997_muic_info *info,
return ret;
}
- extcon_set_cable_state(info->edev, "JIG", attached);
+ extcon_set_cable_state_(info->edev, EXTCON_JIG, attached);
return 0;
}
@@ -422,7 +403,7 @@ static int max8997_muic_adc_handler(struct max8997_muic_info *info)
return ret;
break;
case MAX8997_MUIC_ADC_MHL:
- extcon_set_cable_state(info->edev, "MHL", attached);
+ extcon_set_cable_state_(info->edev, EXTCON_MHL, attached);
break;
case MAX8997_MUIC_ADC_FACTORY_MODE_USB_OFF:
case MAX8997_MUIC_ADC_FACTORY_MODE_USB_ON:
@@ -505,17 +486,19 @@ static int max8997_muic_chg_handler(struct max8997_muic_info *info)
}
break;
case MAX8997_CHARGER_TYPE_DOWNSTREAM_PORT:
- extcon_set_cable_state(info->edev,
- "Charge-downstream", attached);
+ extcon_set_cable_state_(info->edev, EXTCON_CHARGE_DOWNSTREAM,
+ attached);
break;
case MAX8997_CHARGER_TYPE_DEDICATED_CHG:
- extcon_set_cable_state(info->edev, "TA", attached);
+ extcon_set_cable_state_(info->edev, EXTCON_TA, attached);
break;
case MAX8997_CHARGER_TYPE_500MA:
- extcon_set_cable_state(info->edev, "Slow-charger", attached);
+ extcon_set_cable_state_(info->edev, EXTCON_SLOW_CHARGER,
+ attached);
break;
case MAX8997_CHARGER_TYPE_1A:
- extcon_set_cable_state(info->edev, "Fast-charger", attached);
+ extcon_set_cable_state_(info->edev, EXTCON_FAST_CHARGER,
+ attached);
break;
default:
dev_err(info->dev,
@@ -700,7 +683,6 @@ static int max8997_muic_probe(struct platform_device *pdev)
ret = -ENOMEM;
goto err_irq;
}
- info->edev->name = DEV_NAME;
ret = devm_extcon_dev_register(&pdev->dev, info->edev);
if (ret) {
diff --git a/drivers/extcon/extcon-palmas.c b/drivers/extcon/extcon-palmas.c
index 11c6757b6c40..080d5cc27055 100644
--- a/drivers/extcon/extcon-palmas.c
+++ b/drivers/extcon/extcon-palmas.c
@@ -29,10 +29,10 @@
#include <linux/of.h>
#include <linux/of_platform.h>
-static const char *palmas_extcon_cable[] = {
- [0] = "USB",
- [1] = "USB-HOST",
- NULL,
+static const unsigned int palmas_extcon_cable[] = {
+ EXTCON_USB,
+ EXTCON_USB_HOST,
+ EXTCON_NONE,
};
static const int mutually_exclusive[] = {0x3, 0x0};
@@ -49,6 +49,7 @@ static void palmas_usb_wakeup(struct palmas *palmas, int enable)
static irqreturn_t palmas_vbus_irq_handler(int irq, void *_palmas_usb)
{
struct palmas_usb *palmas_usb = _palmas_usb;
+ struct extcon_dev *edev = palmas_usb->edev;
unsigned int vbus_line_state;
palmas_read(palmas_usb->palmas, PALMAS_INTERRUPT_BASE,
@@ -57,7 +58,7 @@ static irqreturn_t palmas_vbus_irq_handler(int irq, void *_palmas_usb)
if (vbus_line_state & PALMAS_INT3_LINE_STATE_VBUS) {
if (palmas_usb->linkstat != PALMAS_USB_STATE_VBUS) {
palmas_usb->linkstat = PALMAS_USB_STATE_VBUS;
- extcon_set_cable_state(palmas_usb->edev, "USB", true);
+ extcon_set_cable_state_(edev, EXTCON_USB, true);
dev_info(palmas_usb->dev, "USB cable is attached\n");
} else {
dev_dbg(palmas_usb->dev,
@@ -66,7 +67,7 @@ static irqreturn_t palmas_vbus_irq_handler(int irq, void *_palmas_usb)
} else if (!(vbus_line_state & PALMAS_INT3_LINE_STATE_VBUS)) {
if (palmas_usb->linkstat == PALMAS_USB_STATE_VBUS) {
palmas_usb->linkstat = PALMAS_USB_STATE_DISCONNECT;
- extcon_set_cable_state(palmas_usb->edev, "USB", false);
+ extcon_set_cable_state_(edev, EXTCON_USB, false);
dev_info(palmas_usb->dev, "USB cable is detached\n");
} else {
dev_dbg(palmas_usb->dev,
@@ -81,6 +82,7 @@ static irqreturn_t palmas_id_irq_handler(int irq, void *_palmas_usb)
{
unsigned int set, id_src;
struct palmas_usb *palmas_usb = _palmas_usb;
+ struct extcon_dev *edev = palmas_usb->edev;
palmas_read(palmas_usb->palmas, PALMAS_USB_OTG_BASE,
PALMAS_USB_ID_INT_LATCH_SET, &set);
@@ -93,7 +95,7 @@ static irqreturn_t palmas_id_irq_handler(int irq, void *_palmas_usb)
PALMAS_USB_ID_INT_LATCH_CLR,
PALMAS_USB_ID_INT_EN_HI_CLR_ID_GND);
palmas_usb->linkstat = PALMAS_USB_STATE_ID;
- extcon_set_cable_state(palmas_usb->edev, "USB-HOST", true);
+ extcon_set_cable_state_(edev, EXTCON_USB_HOST, true);
dev_info(palmas_usb->dev, "USB-HOST cable is attached\n");
} else if ((set & PALMAS_USB_ID_INT_SRC_ID_FLOAT) &&
(id_src & PALMAS_USB_ID_INT_SRC_ID_FLOAT)) {
@@ -101,17 +103,17 @@ static irqreturn_t palmas_id_irq_handler(int irq, void *_palmas_usb)
PALMAS_USB_ID_INT_LATCH_CLR,
PALMAS_USB_ID_INT_EN_HI_CLR_ID_FLOAT);
palmas_usb->linkstat = PALMAS_USB_STATE_DISCONNECT;
- extcon_set_cable_state(palmas_usb->edev, "USB-HOST", false);
+ extcon_set_cable_state_(edev, EXTCON_USB_HOST, false);
dev_info(palmas_usb->dev, "USB-HOST cable is detached\n");
} else if ((palmas_usb->linkstat == PALMAS_USB_STATE_ID) &&
(!(set & PALMAS_USB_ID_INT_SRC_ID_GND))) {
palmas_usb->linkstat = PALMAS_USB_STATE_DISCONNECT;
- extcon_set_cable_state(palmas_usb->edev, "USB-HOST", false);
+ extcon_set_cable_state_(edev, EXTCON_USB_HOST, false);
dev_info(palmas_usb->dev, "USB-HOST cable is detached\n");
} else if ((palmas_usb->linkstat == PALMAS_USB_STATE_DISCONNECT) &&
(id_src & PALMAS_USB_ID_INT_SRC_ID_GND)) {
palmas_usb->linkstat = PALMAS_USB_STATE_ID;
- extcon_set_cable_state(palmas_usb->edev, "USB-HOST", true);
+ extcon_set_cable_state_(edev, EXTCON_USB_HOST, true);
dev_info(palmas_usb->dev, " USB-HOST cable is attached\n");
}
@@ -193,7 +195,6 @@ static int palmas_usb_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "failed to allocate extcon device\n");
return -ENOMEM;
}
- palmas_usb->edev->name = kstrdup(node->name, GFP_KERNEL);
palmas_usb->edev->mutually_exclusive = mutually_exclusive;
status = devm_extcon_dev_register(&pdev->dev, palmas_usb->edev);
diff --git a/drivers/extcon/extcon-rt8973a.c b/drivers/extcon/extcon-rt8973a.c
index 9ccd5af89d1c..92c939221a41 100644
--- a/drivers/extcon/extcon-rt8973a.c
+++ b/drivers/extcon/extcon-rt8973a.c
@@ -90,27 +90,12 @@ static struct reg_data rt8973a_reg_data[] = {
};
/* List of detectable cables */
-enum {
- EXTCON_CABLE_USB = 0,
- EXTCON_CABLE_USB_HOST,
- EXTCON_CABLE_TA,
- EXTCON_CABLE_JIG_OFF_USB,
- EXTCON_CABLE_JIG_ON_USB,
- EXTCON_CABLE_JIG_OFF_UART,
- EXTCON_CABLE_JIG_ON_UART,
-
- EXTCON_CABLE_END,
-};
-
-static const char *rt8973a_extcon_cable[] = {
- [EXTCON_CABLE_USB] = "USB",
- [EXTCON_CABLE_USB_HOST] = "USB-Host",
- [EXTCON_CABLE_TA] = "TA",
- [EXTCON_CABLE_JIG_OFF_USB] = "JIG-USB-OFF",
- [EXTCON_CABLE_JIG_ON_USB] = "JIG-USB-ON",
- [EXTCON_CABLE_JIG_OFF_UART] = "JIG-UART-OFF",
- [EXTCON_CABLE_JIG_ON_UART] = "JIG-UART-ON",
- NULL,
+static const unsigned int rt8973a_extcon_cable[] = {
+ EXTCON_USB,
+ EXTCON_USB_HOST,
+ EXTCON_TA,
+ EXTCON_JIG,
+ EXTCON_NONE,
};
/* Define OVP (Over Voltage Protection), OTP (Over Temperature Protection) */
@@ -313,14 +298,11 @@ static int rt8973a_muic_cable_handler(struct rt8973a_muic_info *info,
enum rt8973a_event_type event)
{
static unsigned int prev_cable_type;
- const char **cable_names = info->edev->supported_cable;
unsigned int con_sw = DM_DP_SWITCH_UART;
- int ret, idx = 0, cable_type;
+ int ret, cable_type;
+ unsigned int id;
bool attached = false;
- if (!cable_names)
- return 0;
-
switch (event) {
case RT8973A_EVENT_ATTACH:
cable_type = rt8973a_muic_get_cable_type(info);
@@ -347,31 +329,25 @@ static int rt8973a_muic_cable_handler(struct rt8973a_muic_info *info,
switch (cable_type) {
case RT8973A_MUIC_ADC_OTG:
- idx = EXTCON_CABLE_USB_HOST;
+ id = EXTCON_USB_HOST;
con_sw = DM_DP_SWITCH_USB;
break;
case RT8973A_MUIC_ADC_TA:
- idx = EXTCON_CABLE_TA;
+ id = EXTCON_TA;
con_sw = DM_DP_SWITCH_OPEN;
break;
case RT8973A_MUIC_ADC_FACTORY_MODE_BOOT_OFF_USB:
- idx = EXTCON_CABLE_JIG_OFF_USB;
- con_sw = DM_DP_SWITCH_UART;
- break;
case RT8973A_MUIC_ADC_FACTORY_MODE_BOOT_ON_USB:
- idx = EXTCON_CABLE_JIG_ON_USB;
- con_sw = DM_DP_SWITCH_UART;
+ id = EXTCON_JIG;
+ con_sw = DM_DP_SWITCH_USB;
break;
case RT8973A_MUIC_ADC_FACTORY_MODE_BOOT_OFF_UART:
- idx = EXTCON_CABLE_JIG_OFF_UART;
- con_sw = DM_DP_SWITCH_UART;
- break;
case RT8973A_MUIC_ADC_FACTORY_MODE_BOOT_ON_UART:
- idx = EXTCON_CABLE_JIG_ON_UART;
+ id = EXTCON_JIG;
con_sw = DM_DP_SWITCH_UART;
break;
case RT8973A_MUIC_ADC_USB:
- idx = EXTCON_CABLE_USB;
+ id = EXTCON_USB;
con_sw = DM_DP_SWITCH_USB;
break;
case RT8973A_MUIC_ADC_OPEN:
@@ -421,7 +397,7 @@ static int rt8973a_muic_cable_handler(struct rt8973a_muic_info *info,
return ret;
/* Change the state of external accessory */
- extcon_set_cable_state(info->edev, cable_names[idx], attached);
+ extcon_set_cable_state_(info->edev, id, attached);
return 0;
}
@@ -643,7 +619,6 @@ static int rt8973a_muic_i2c_probe(struct i2c_client *i2c,
dev_err(info->dev, "failed to allocate memory for extcon\n");
return -ENOMEM;
}
- info->edev->name = np->name;
/* Register extcon device */
ret = devm_extcon_dev_register(info->dev, info->edev);
diff --git a/drivers/extcon/extcon-sm5502.c b/drivers/extcon/extcon-sm5502.c
index 2f93cf307852..817dece23b4c 100644
--- a/drivers/extcon/extcon-sm5502.c
+++ b/drivers/extcon/extcon-sm5502.c
@@ -92,19 +92,11 @@ static struct reg_data sm5502_reg_data[] = {
};
/* List of detectable cables */
-enum {
- EXTCON_CABLE_USB = 0,
- EXTCON_CABLE_USB_HOST,
- EXTCON_CABLE_TA,
-
- EXTCON_CABLE_END,
-};
-
-static const char *sm5502_extcon_cable[] = {
- [EXTCON_CABLE_USB] = "USB",
- [EXTCON_CABLE_USB_HOST] = "USB-Host",
- [EXTCON_CABLE_TA] = "TA",
- NULL,
+static const unsigned int sm5502_extcon_cable[] = {
+ EXTCON_USB,
+ EXTCON_USB_HOST,
+ EXTCON_TA,
+ EXTCON_NONE,
};
/* Define supported accessory type */
@@ -377,16 +369,12 @@ static int sm5502_muic_cable_handler(struct sm5502_muic_info *info,
bool attached)
{
static unsigned int prev_cable_type = SM5502_MUIC_ADC_GROUND;
- const char **cable_names = info->edev->supported_cable;
unsigned int cable_type = SM5502_MUIC_ADC_GROUND;
unsigned int con_sw = DM_DP_SWITCH_OPEN;
unsigned int vbus_sw = VBUSIN_SWITCH_OPEN;
- unsigned int idx = 0;
+ unsigned int id;
int ret;
- if (!cable_names)
- return 0;
-
/* Get the type of attached or detached cable */
if (attached)
cable_type = sm5502_muic_get_cable_type(info);
@@ -396,17 +384,17 @@ static int sm5502_muic_cable_handler(struct sm5502_muic_info *info,
switch (cable_type) {
case SM5502_MUIC_ADC_OPEN_USB:
- idx = EXTCON_CABLE_USB;
+ id = EXTCON_USB;
con_sw = DM_DP_SWITCH_USB;
vbus_sw = VBUSIN_SWITCH_VBUSOUT_WITH_USB;
break;
case SM5502_MUIC_ADC_OPEN_TA:
- idx = EXTCON_CABLE_TA;
+ id = EXTCON_TA;
con_sw = DM_DP_SWITCH_OPEN;
vbus_sw = VBUSIN_SWITCH_VBUSOUT;
break;
case SM5502_MUIC_ADC_OPEN_USB_OTG:
- idx = EXTCON_CABLE_USB_HOST;
+ id = EXTCON_USB_HOST;
con_sw = DM_DP_SWITCH_USB;
vbus_sw = VBUSIN_SWITCH_OPEN;
break;
@@ -422,7 +410,7 @@ static int sm5502_muic_cable_handler(struct sm5502_muic_info *info,
return ret;
/* Change the state of external accessory */
- extcon_set_cable_state(info->edev, cable_names[idx], attached);
+ extcon_set_cable_state_(info->edev, id, attached);
return 0;
}
@@ -623,7 +611,6 @@ static int sm5022_muic_i2c_probe(struct i2c_client *i2c,
dev_err(info->dev, "failed to allocate memory for extcon\n");
return -ENOMEM;
}
- info->edev->name = np->name;
/* Register extcon device */
ret = devm_extcon_dev_register(info->dev, info->edev);
diff --git a/drivers/extcon/extcon-usb-gpio.c b/drivers/extcon/extcon-usb-gpio.c
index e45d1f13f445..a2a44536a608 100644
--- a/drivers/extcon/extcon-usb-gpio.c
+++ b/drivers/extcon/extcon-usb-gpio.c
@@ -15,6 +15,7 @@
*/
#include <linux/extcon.h>
+#include <linux/gpio/consumer.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
@@ -38,18 +39,10 @@ struct usb_extcon_info {
struct delayed_work wq_detcable;
};
-/* List of detectable cables */
-enum {
- EXTCON_CABLE_USB = 0,
- EXTCON_CABLE_USB_HOST,
-
- EXTCON_CABLE_END,
-};
-
-static const char *usb_extcon_cable[] = {
- [EXTCON_CABLE_USB] = "USB",
- [EXTCON_CABLE_USB_HOST] = "USB-HOST",
- NULL,
+static const unsigned int usb_extcon_cable[] = {
+ EXTCON_USB,
+ EXTCON_USB_HOST,
+ EXTCON_NONE,
};
static void usb_extcon_detect_cable(struct work_struct *work)
@@ -67,24 +60,16 @@ static void usb_extcon_detect_cable(struct work_struct *work)
* As we don't have event for USB peripheral cable attached,
* we simulate USB peripheral attach here.
*/
- extcon_set_cable_state(info->edev,
- usb_extcon_cable[EXTCON_CABLE_USB_HOST],
- false);
- extcon_set_cable_state(info->edev,
- usb_extcon_cable[EXTCON_CABLE_USB],
- true);
+ extcon_set_cable_state_(info->edev, EXTCON_USB_HOST, false);
+ extcon_set_cable_state_(info->edev, EXTCON_USB, true);
} else {
/*
* ID = 0 means USB HOST cable attached.
* As we don't have event for USB peripheral cable detached,
* we simulate USB peripheral detach here.
*/
- extcon_set_cable_state(info->edev,
- usb_extcon_cable[EXTCON_CABLE_USB],
- false);
- extcon_set_cable_state(info->edev,
- usb_extcon_cable[EXTCON_CABLE_USB_HOST],
- true);
+ extcon_set_cable_state_(info->edev, EXTCON_USB, false);
+ extcon_set_cable_state_(info->edev, EXTCON_USB_HOST, true);
}
}
@@ -113,7 +98,7 @@ static int usb_extcon_probe(struct platform_device *pdev)
return -ENOMEM;
info->dev = dev;
- info->id_gpiod = devm_gpiod_get(&pdev->dev, "id");
+ info->id_gpiod = devm_gpiod_get(&pdev->dev, "id", GPIOD_IN);
if (IS_ERR(info->id_gpiod)) {
dev_err(dev, "failed to get ID GPIO\n");
return PTR_ERR(info->id_gpiod);
diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c
index 4c9f165e4a04..76157ab9faf3 100644
--- a/drivers/extcon/extcon.c
+++ b/drivers/extcon/extcon.c
@@ -1,8 +1,11 @@
/*
- * drivers/extcon/extcon_class.c
+ * drivers/extcon/extcon.c - External Connector (extcon) framework.
*
* External connector (extcon) class driver
*
+ * Copyright (C) 2015 Samsung Electronics
+ * Author: Chanwoo Choi <cw00.choi@samsung.com>
+ *
* Copyright (C) 2012 Samsung Electronics
* Author: Donggeun Kim <dg77.kim@samsung.com>
* Author: MyungJoo Ham <myungjoo.ham@samsung.com>
@@ -19,8 +22,7 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
-*/
+ */
#include <linux/module.h>
#include <linux/types.h>
@@ -33,36 +35,43 @@
#include <linux/slab.h>
#include <linux/sysfs.h>
-/*
- * extcon_cable_name suggests the standard cable names for commonly used
- * cable types.
- *
- * However, please do not use extcon_cable_name directly for extcon_dev
- * struct's supported_cable pointer unless your device really supports
- * every single port-type of the following cable names. Please choose cable
- * names that are actually used in your extcon device.
- */
-const char extcon_cable_name[][CABLE_NAME_MAX + 1] = {
+#define SUPPORTED_CABLE_MAX 32
+#define CABLE_NAME_MAX 30
+
+static const char *extcon_name[] = {
+ [EXTCON_NONE] = "NONE",
+
+ /* USB external connector */
[EXTCON_USB] = "USB",
- [EXTCON_USB_HOST] = "USB-Host",
+ [EXTCON_USB_HOST] = "USB-HOST",
+
+ /* Charger external connector */
[EXTCON_TA] = "TA",
- [EXTCON_FAST_CHARGER] = "Fast-charger",
- [EXTCON_SLOW_CHARGER] = "Slow-charger",
- [EXTCON_CHARGE_DOWNSTREAM] = "Charge-downstream",
+ [EXTCON_FAST_CHARGER] = "FAST-CHARGER",
+ [EXTCON_SLOW_CHARGER] = "SLOW-CHARGER",
+ [EXTCON_CHARGE_DOWNSTREAM] = "CHARGE-DOWNSTREAM",
+
+ /* Audio/Video external connector */
+ [EXTCON_LINE_IN] = "LINE-IN",
+ [EXTCON_LINE_OUT] = "LINE-OUT",
+ [EXTCON_MICROPHONE] = "MICROPHONE",
+ [EXTCON_HEADPHONE] = "HEADPHONE",
+
[EXTCON_HDMI] = "HDMI",
[EXTCON_MHL] = "MHL",
[EXTCON_DVI] = "DVI",
[EXTCON_VGA] = "VGA",
- [EXTCON_DOCK] = "Dock",
- [EXTCON_LINE_IN] = "Line-in",
- [EXTCON_LINE_OUT] = "Line-out",
- [EXTCON_MIC_IN] = "Microphone",
- [EXTCON_HEADPHONE_OUT] = "Headphone",
- [EXTCON_SPDIF_IN] = "SPDIF-in",
- [EXTCON_SPDIF_OUT] = "SPDIF-out",
- [EXTCON_VIDEO_IN] = "Video-in",
- [EXTCON_VIDEO_OUT] = "Video-out",
- [EXTCON_MECHANICAL] = "Mechanical",
+ [EXTCON_SPDIF_IN] = "SPDIF-IN",
+ [EXTCON_SPDIF_OUT] = "SPDIF-OUT",
+ [EXTCON_VIDEO_IN] = "VIDEO-IN",
+ [EXTCON_VIDEO_OUT] = "VIDEO-OUT",
+
+ /* Etc external connector */
+ [EXTCON_DOCK] = "DOCK",
+ [EXTCON_JIG] = "JIG",
+ [EXTCON_MECHANICAL] = "MECHANICAL",
+
+ NULL,
};
static struct class *extcon_class;
@@ -102,6 +111,51 @@ static int check_mutually_exclusive(struct extcon_dev *edev, u32 new_state)
return 0;
}
+static int find_cable_index_by_id(struct extcon_dev *edev, const unsigned int id)
+{
+ int i;
+
+ /* Find the the index of extcon cable in edev->supported_cable */
+ for (i = 0; i < edev->max_supported; i++) {
+ if (edev->supported_cable[i] == id)
+ return i;
+ }
+
+ return -EINVAL;
+}
+
+static int find_cable_index_by_name(struct extcon_dev *edev, const char *name)
+{
+ unsigned int id = EXTCON_NONE;
+ int i = 0;
+
+ if (edev->max_supported == 0)
+ return -EINVAL;
+
+ /* Find the the number of extcon cable */
+ while (extcon_name[i]) {
+ if (!strncmp(extcon_name[i], name, CABLE_NAME_MAX)) {
+ id = i;
+ break;
+ }
+ }
+
+ if (id == EXTCON_NONE)
+ return -EINVAL;
+
+ return find_cable_index_by_id(edev, id);
+}
+
+static bool is_extcon_changed(u32 prev, u32 new, int idx, bool *attached)
+{
+ if (((prev >> idx) & 0x1) != ((new >> idx) & 0x1)) {
+ *attached = new ? true : false;
+ return true;
+ }
+
+ return false;
+}
+
static ssize_t state_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -119,11 +173,9 @@ static ssize_t state_show(struct device *dev, struct device_attribute *attr,
if (edev->max_supported == 0)
return sprintf(buf, "%u\n", edev->state);
- for (i = 0; i < SUPPORTED_CABLE_MAX; i++) {
- if (!edev->supported_cable[i])
- break;
+ for (i = 0; i < edev->max_supported; i++) {
count += sprintf(buf + count, "%s=%d\n",
- edev->supported_cable[i],
+ extcon_name[edev->supported_cable[i]],
!!(edev->state & (1 << i)));
}
@@ -155,15 +207,7 @@ static ssize_t name_show(struct device *dev, struct device_attribute *attr,
{
struct extcon_dev *edev = dev_get_drvdata(dev);
- /* Optional callback given by the user */
- if (edev->print_name) {
- int ret = edev->print_name(edev, buf);
-
- if (ret >= 0)
- return ret;
- }
-
- return sprintf(buf, "%s\n", dev_name(&edev->dev));
+ return sprintf(buf, "%s\n", edev->name);
}
static DEVICE_ATTR_RO(name);
@@ -172,9 +216,10 @@ static ssize_t cable_name_show(struct device *dev,
{
struct extcon_cable *cable = container_of(attr, struct extcon_cable,
attr_name);
+ int i = cable->cable_index;
return sprintf(buf, "%s\n",
- cable->edev->supported_cable[cable->cable_index]);
+ extcon_name[cable->edev->supported_cable[i]]);
}
static ssize_t cable_state_show(struct device *dev,
@@ -211,23 +256,27 @@ int extcon_update_state(struct extcon_dev *edev, u32 mask, u32 state)
char *envp[3];
int env_offset = 0;
int length;
+ int index;
unsigned long flags;
+ bool attached;
spin_lock_irqsave(&edev->lock, flags);
if (edev->state != ((edev->state & ~mask) | (state & mask))) {
- u32 old_state = edev->state;
-
if (check_mutually_exclusive(edev, (edev->state & ~mask) |
(state & mask))) {
spin_unlock_irqrestore(&edev->lock, flags);
return -EPERM;
}
+ for (index = 0; index < edev->max_supported; index++) {
+ if (is_extcon_changed(edev->state, state, index, &attached))
+ raw_notifier_call_chain(&edev->nh[index], attached, edev);
+ }
+
edev->state &= ~mask;
edev->state |= state & mask;
- raw_notifier_call_chain(&edev->nh, old_state, edev);
/* This could be in interrupt handler */
prop_buf = (char *)get_zeroed_page(GFP_ATOMIC);
if (prop_buf) {
@@ -284,39 +333,19 @@ int extcon_set_state(struct extcon_dev *edev, u32 state)
EXPORT_SYMBOL_GPL(extcon_set_state);
/**
- * extcon_find_cable_index() - Get the cable index based on the cable name.
+ * extcon_get_cable_state_() - Get the status of a specific cable.
* @edev: the extcon device that has the cable.
- * @cable_name: cable name to be searched.
- *
- * Note that accessing a cable state based on cable_index is faster than
- * cable_name because using cable_name induces a loop with strncmp().
- * Thus, when get/set_cable_state is repeatedly used, using cable_index
- * is recommended.
+ * @id: the unique id of each external connector in extcon enumeration.
*/
-int extcon_find_cable_index(struct extcon_dev *edev, const char *cable_name)
+int extcon_get_cable_state_(struct extcon_dev *edev, const unsigned int id)
{
- int i;
+ int index;
- if (edev->supported_cable) {
- for (i = 0; edev->supported_cable[i]; i++) {
- if (!strncmp(edev->supported_cable[i],
- cable_name, CABLE_NAME_MAX))
- return i;
- }
- }
+ index = find_cable_index_by_id(edev, id);
+ if (index < 0)
+ return index;
- return -EINVAL;
-}
-EXPORT_SYMBOL_GPL(extcon_find_cable_index);
-
-/**
- * extcon_get_cable_state_() - Get the status of a specific cable.
- * @edev: the extcon device that has the cable.
- * @index: cable index that can be retrieved by extcon_find_cable_index().
- */
-int extcon_get_cable_state_(struct extcon_dev *edev, int index)
-{
- if (index < 0 || (edev->max_supported && edev->max_supported <= index))
+ if (edev->max_supported && edev->max_supported <= index)
return -EINVAL;
return !!(edev->state & (1 << index));
@@ -332,7 +361,7 @@ EXPORT_SYMBOL_GPL(extcon_get_cable_state_);
*/
int extcon_get_cable_state(struct extcon_dev *edev, const char *cable_name)
{
- return extcon_get_cable_state_(edev, extcon_find_cable_index
+ return extcon_get_cable_state_(edev, find_cable_index_by_name
(edev, cable_name));
}
EXPORT_SYMBOL_GPL(extcon_get_cable_state);
@@ -340,17 +369,22 @@ EXPORT_SYMBOL_GPL(extcon_get_cable_state);
/**
* extcon_set_cable_state_() - Set the status of a specific cable.
* @edev: the extcon device that has the cable.
- * @index: cable index that can be retrieved by
- * extcon_find_cable_index().
- * @cable_state: the new cable status. The default semantics is
+ * @id: the unique id of each external connector
+ * in extcon enumeration.
+ * @state: the new cable status. The default semantics is
* true: attached / false: detached.
*/
-int extcon_set_cable_state_(struct extcon_dev *edev,
- int index, bool cable_state)
+int extcon_set_cable_state_(struct extcon_dev *edev, unsigned int id,
+ bool cable_state)
{
u32 state;
+ int index;
+
+ index = find_cable_index_by_id(edev, id);
+ if (index < 0)
+ return index;
- if (index < 0 || (edev->max_supported && edev->max_supported <= index))
+ if (edev->max_supported && edev->max_supported <= index)
return -EINVAL;
state = cable_state ? (1 << index) : 0;
@@ -370,7 +404,7 @@ EXPORT_SYMBOL_GPL(extcon_set_cable_state_);
int extcon_set_cable_state(struct extcon_dev *edev,
const char *cable_name, bool cable_state)
{
- return extcon_set_cable_state_(edev, extcon_find_cable_index
+ return extcon_set_cable_state_(edev, find_cable_index_by_name
(edev, cable_name), cable_state);
}
EXPORT_SYMBOL_GPL(extcon_set_cable_state);
@@ -395,29 +429,6 @@ out:
}
EXPORT_SYMBOL_GPL(extcon_get_extcon_dev);
-static int _call_per_cable(struct notifier_block *nb, unsigned long val,
- void *ptr)
-{
- struct extcon_specific_cable_nb *obj = container_of(nb,
- struct extcon_specific_cable_nb, internal_nb);
- struct extcon_dev *edev = ptr;
-
- if ((val & (1 << obj->cable_index)) !=
- (edev->state & (1 << obj->cable_index))) {
- bool cable_state = true;
-
- obj->previous_value = val;
-
- if (val & (1 << obj->cable_index))
- cable_state = false;
-
- return obj->user_nb->notifier_call(obj->user_nb,
- cable_state, ptr);
- }
-
- return NOTIFY_OK;
-}
-
/**
* extcon_register_interest() - Register a notifier for a state change of a
* specific cable, not an entier set of cables of a
@@ -456,20 +467,18 @@ int extcon_register_interest(struct extcon_specific_cable_nb *obj,
if (!obj->edev)
return -ENODEV;
- obj->cable_index = extcon_find_cable_index(obj->edev,
- cable_name);
+ obj->cable_index = find_cable_index_by_name(obj->edev,
+ cable_name);
if (obj->cable_index < 0)
return obj->cable_index;
obj->user_nb = nb;
- obj->internal_nb.notifier_call = _call_per_cable;
-
spin_lock_irqsave(&obj->edev->lock, flags);
- ret = raw_notifier_chain_register(&obj->edev->nh,
- &obj->internal_nb);
+ ret = raw_notifier_chain_register(
+ &obj->edev->nh[obj->cable_index],
+ obj->user_nb);
spin_unlock_irqrestore(&obj->edev->lock, flags);
- return ret;
} else {
struct class_dev_iter iter;
struct extcon_dev *extd;
@@ -481,7 +490,7 @@ int extcon_register_interest(struct extcon_specific_cable_nb *obj,
while ((dev = class_dev_iter_next(&iter))) {
extd = dev_get_drvdata(dev);
- if (extcon_find_cable_index(extd, cable_name) < 0)
+ if (find_cable_index_by_name(extd, cable_name) < 0)
continue;
class_dev_iter_exit(&iter);
@@ -489,8 +498,10 @@ int extcon_register_interest(struct extcon_specific_cable_nb *obj,
cable_name, nb);
}
- return -ENODEV;
+ ret = -ENODEV;
}
+
+ return ret;
}
EXPORT_SYMBOL_GPL(extcon_register_interest);
@@ -509,7 +520,8 @@ int extcon_unregister_interest(struct extcon_specific_cable_nb *obj)
return -EINVAL;
spin_lock_irqsave(&obj->edev->lock, flags);
- ret = raw_notifier_chain_unregister(&obj->edev->nh, &obj->internal_nb);
+ ret = raw_notifier_chain_unregister(
+ &obj->edev->nh[obj->cable_index], obj->user_nb);
spin_unlock_irqrestore(&obj->edev->lock, flags);
return ret;
@@ -519,21 +531,24 @@ EXPORT_SYMBOL_GPL(extcon_unregister_interest);
/**
* extcon_register_notifier() - Register a notifiee to get notified by
* any attach status changes from the extcon.
- * @edev: the extcon device.
+ * @edev: the extcon device that has the external connecotr.
+ * @id: the unique id of each external connector in extcon enumeration.
* @nb: a notifier block to be registered.
*
* Note that the second parameter given to the callback of nb (val) is
* "old_state", not the current state. The current state can be retrieved
* by looking at the third pameter (edev pointer)'s state value.
*/
-int extcon_register_notifier(struct extcon_dev *edev,
- struct notifier_block *nb)
+int extcon_register_notifier(struct extcon_dev *edev, unsigned int id,
+ struct notifier_block *nb)
{
unsigned long flags;
- int ret;
+ int ret, idx;
+
+ idx = find_cable_index_by_id(edev, id);
spin_lock_irqsave(&edev->lock, flags);
- ret = raw_notifier_chain_register(&edev->nh, nb);
+ ret = raw_notifier_chain_register(&edev->nh[idx], nb);
spin_unlock_irqrestore(&edev->lock, flags);
return ret;
@@ -542,17 +557,20 @@ EXPORT_SYMBOL_GPL(extcon_register_notifier);
/**
* extcon_unregister_notifier() - Unregister a notifiee from the extcon device.
- * @edev: the extcon device.
- * @nb: a registered notifier block to be unregistered.
+ * @edev: the extcon device that has the external connecotr.
+ * @id: the unique id of each external connector in extcon enumeration.
+ * @nb: a notifier block to be registered.
*/
-int extcon_unregister_notifier(struct extcon_dev *edev,
- struct notifier_block *nb)
+int extcon_unregister_notifier(struct extcon_dev *edev, unsigned int id,
+ struct notifier_block *nb)
{
unsigned long flags;
- int ret;
+ int ret, idx;
+
+ idx = find_cable_index_by_id(edev, id);
spin_lock_irqsave(&edev->lock, flags);
- ret = raw_notifier_chain_unregister(&edev->nh, nb);
+ ret = raw_notifier_chain_unregister(&edev->nh[idx], nb);
spin_unlock_irqrestore(&edev->lock, flags);
return ret;
@@ -595,7 +613,7 @@ static void dummy_sysfs_dev_release(struct device *dev)
/*
* extcon_dev_allocate() - Allocate the memory of extcon device.
- * @supported_cable: Array of supported cable names ending with NULL.
+ * @supported_cable: Array of supported extcon ending with EXTCON_NONE.
* If supported_cable is NULL, cable name related APIs
* are disabled.
*
@@ -605,7 +623,7 @@ static void dummy_sysfs_dev_release(struct device *dev)
*
* Return the pointer of extcon device if success or ERR_PTR(err) if fail
*/
-struct extcon_dev *extcon_dev_allocate(const char **supported_cable)
+struct extcon_dev *extcon_dev_allocate(const unsigned int *supported_cable)
{
struct extcon_dev *edev;
@@ -647,7 +665,7 @@ static void devm_extcon_dev_release(struct device *dev, void *res)
/**
* devm_extcon_dev_allocate - Allocate managed extcon device
* @dev: device owning the extcon device being created
- * @supported_cable: Array of supported cable names ending with NULL.
+ * @supported_cable: Array of supported extcon ending with EXTCON_NONE.
* If supported_cable is NULL, cable name related APIs
* are disabled.
*
@@ -659,7 +677,7 @@ static void devm_extcon_dev_release(struct device *dev, void *res)
* or ERR_PTR(err) if fail
*/
struct extcon_dev *devm_extcon_dev_allocate(struct device *dev,
- const char **supported_cable)
+ const unsigned int *supported_cable)
{
struct extcon_dev **ptr, *edev;
@@ -701,6 +719,7 @@ EXPORT_SYMBOL_GPL(devm_extcon_dev_free);
int extcon_dev_register(struct extcon_dev *edev)
{
int ret, index = 0;
+ static atomic_t edev_no = ATOMIC_INIT(-1);
if (!extcon_class) {
ret = create_extcon_class();
@@ -708,30 +727,29 @@ int extcon_dev_register(struct extcon_dev *edev)
return ret;
}
- if (edev->supported_cable) {
- /* Get size of array */
- for (index = 0; edev->supported_cable[index]; index++)
- ;
- edev->max_supported = index;
- } else {
- edev->max_supported = 0;
- }
+ if (!edev->supported_cable)
+ return -EINVAL;
+
+ for (; edev->supported_cable[index] != EXTCON_NONE; index++);
+ edev->max_supported = index;
if (index > SUPPORTED_CABLE_MAX) {
- dev_err(&edev->dev, "extcon: maximum number of supported cables exceeded.\n");
+ dev_err(&edev->dev,
+ "exceed the maximum number of supported cables\n");
return -EINVAL;
}
edev->dev.class = extcon_class;
edev->dev.release = extcon_dev_release;
- edev->name = edev->name ? edev->name : dev_name(edev->dev.parent);
+ edev->name = dev_name(edev->dev.parent);
if (IS_ERR_OR_NULL(edev->name)) {
dev_err(&edev->dev,
"extcon device name is null\n");
return -EINVAL;
}
- dev_set_name(&edev->dev, "%s", edev->name);
+ dev_set_name(&edev->dev, "extcon%lu",
+ (unsigned long)atomic_inc_return(&edev_no));
if (edev->max_supported) {
char buf[10];
@@ -864,7 +882,15 @@ int extcon_dev_register(struct extcon_dev *edev)
spin_lock_init(&edev->lock);
- RAW_INIT_NOTIFIER_HEAD(&edev->nh);
+ edev->nh = devm_kzalloc(&edev->dev,
+ sizeof(*edev->nh) * edev->max_supported, GFP_KERNEL);
+ if (!edev->nh) {
+ ret = -ENOMEM;
+ goto err_dev;
+ }
+
+ for (index = 0; index < edev->max_supported; index++)
+ RAW_INIT_NOTIFIER_HEAD(&edev->nh[index]);
dev_set_drvdata(&edev->dev, edev);
edev->state = 0;
@@ -1044,6 +1070,15 @@ struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev, int index)
#endif /* CONFIG_OF */
EXPORT_SYMBOL_GPL(extcon_get_edev_by_phandle);
+/**
+ * extcon_get_edev_name() - Get the name of the extcon device.
+ * @edev: the extcon device
+ */
+const char *extcon_get_edev_name(struct extcon_dev *edev)
+{
+ return !edev ? NULL : edev->name;
+}
+
static int __init extcon_class_init(void)
{
return create_extcon_class();
@@ -1059,6 +1094,7 @@ static void __exit extcon_class_exit(void)
}
module_exit(extcon_class_exit);
+MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>");
MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>");
MODULE_AUTHOR("Donggeun Kim <dg77.kim@samsung.com>");
MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
diff --git a/drivers/hv/Makefile b/drivers/hv/Makefile
index 5e4dfa4cfe22..39c9b2c08d33 100644
--- a/drivers/hv/Makefile
+++ b/drivers/hv/Makefile
@@ -5,4 +5,4 @@ obj-$(CONFIG_HYPERV_BALLOON) += hv_balloon.o
hv_vmbus-y := vmbus_drv.o \
hv.o connection.o channel.o \
channel_mgmt.o ring_buffer.o
-hv_utils-y := hv_util.o hv_kvp.o hv_snapshot.o hv_fcopy.o
+hv_utils-y := hv_util.o hv_kvp.o hv_snapshot.o hv_fcopy.o hv_utils_transport.o
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 54da66dc7d16..603ce97e9027 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -73,6 +73,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
unsigned long flags;
int ret, err = 0;
unsigned long t;
+ struct page *page;
spin_lock_irqsave(&newchannel->lock, flags);
if (newchannel->state == CHANNEL_OPEN_STATE) {
@@ -87,8 +88,17 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
newchannel->channel_callback_context = context;
/* Allocate the ring buffer */
- out = (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
- get_order(send_ringbuffer_size + recv_ringbuffer_size));
+ page = alloc_pages_node(cpu_to_node(newchannel->target_cpu),
+ GFP_KERNEL|__GFP_ZERO,
+ get_order(send_ringbuffer_size +
+ recv_ringbuffer_size));
+
+ if (!page)
+ out = (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
+ get_order(send_ringbuffer_size +
+ recv_ringbuffer_size));
+ else
+ out = (void *)page_address(page);
if (!out) {
err = -ENOMEM;
@@ -178,19 +188,18 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
goto error1;
}
-
- if (open_info->response.open_result.status)
- err = open_info->response.open_result.status;
-
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
list_del(&open_info->msglistentry);
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
- if (err == 0)
- newchannel->state = CHANNEL_OPENED_STATE;
+ if (open_info->response.open_result.status) {
+ err = -EAGAIN;
+ goto error_gpadl;
+ }
+ newchannel->state = CHANNEL_OPENED_STATE;
kfree(open_info);
- return err;
+ return 0;
error1:
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 0eeb1b3bc048..4506a6623618 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -32,6 +32,9 @@
#include "hyperv_vmbus.h"
+static void init_vp_index(struct vmbus_channel *channel,
+ const uuid_le *type_guid);
+
/**
* vmbus_prep_negotiate_resp() - Create default response for Hyper-V Negotiate message
* @icmsghdrp: Pointer to msg header structure
@@ -205,6 +208,7 @@ void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid)
primary_channel = channel->primary_channel;
spin_lock_irqsave(&primary_channel->lock, flags);
list_del(&channel->sc_list);
+ primary_channel->num_sc--;
spin_unlock_irqrestore(&primary_channel->lock, flags);
}
free_channel(channel);
@@ -212,11 +216,16 @@ void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid)
void vmbus_free_channels(void)
{
- struct vmbus_channel *channel;
+ struct vmbus_channel *channel, *tmp;
+
+ list_for_each_entry_safe(channel, tmp, &vmbus_connection.chn_list,
+ listentry) {
+ /* if we don't set rescind to true, vmbus_close_internal()
+ * won't invoke hv_process_channel_removal().
+ */
+ channel->rescind = true;
- list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
vmbus_device_unregister(channel->device_obj);
- free_channel(channel);
}
}
@@ -228,7 +237,6 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
{
struct vmbus_channel *channel;
bool fnew = true;
- bool enq = false;
unsigned long flags;
/* Make sure this is a new offer */
@@ -244,25 +252,12 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
}
}
- if (fnew) {
+ if (fnew)
list_add_tail(&newchannel->listentry,
&vmbus_connection.chn_list);
- enq = true;
- }
spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags);
- if (enq) {
- if (newchannel->target_cpu != get_cpu()) {
- put_cpu();
- smp_call_function_single(newchannel->target_cpu,
- percpu_channel_enq,
- newchannel, true);
- } else {
- percpu_channel_enq(newchannel);
- put_cpu();
- }
- }
if (!fnew) {
/*
* Check to see if this is a sub-channel.
@@ -274,27 +269,22 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
newchannel->primary_channel = channel;
spin_lock_irqsave(&channel->lock, flags);
list_add_tail(&newchannel->sc_list, &channel->sc_list);
- spin_unlock_irqrestore(&channel->lock, flags);
-
- if (newchannel->target_cpu != get_cpu()) {
- put_cpu();
- smp_call_function_single(newchannel->target_cpu,
- percpu_channel_enq,
- newchannel, true);
- } else {
- percpu_channel_enq(newchannel);
- put_cpu();
- }
-
- newchannel->state = CHANNEL_OPEN_STATE;
channel->num_sc++;
- if (channel->sc_creation_callback != NULL)
- channel->sc_creation_callback(newchannel);
+ spin_unlock_irqrestore(&channel->lock, flags);
+ } else
+ goto err_free_chan;
+ }
- return;
- }
+ init_vp_index(newchannel, &newchannel->offermsg.offer.if_type);
- goto err_free_chan;
+ if (newchannel->target_cpu != get_cpu()) {
+ put_cpu();
+ smp_call_function_single(newchannel->target_cpu,
+ percpu_channel_enq,
+ newchannel, true);
+ } else {
+ percpu_channel_enq(newchannel);
+ put_cpu();
}
/*
@@ -304,6 +294,12 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
*/
newchannel->state = CHANNEL_OPEN_STATE;
+ if (!fnew) {
+ if (channel->sc_creation_callback != NULL)
+ channel->sc_creation_callback(newchannel);
+ return;
+ }
+
/*
* Start the process of binding this offer to the driver
* We need to set the DeviceObject field before calling
@@ -374,23 +370,27 @@ static const struct hv_vmbus_device_id hp_devs[] = {
/*
* We use this state to statically distribute the channel interrupt load.
*/
-static u32 next_vp;
+static int next_numa_node_id;
/*
* Starting with Win8, we can statically distribute the incoming
- * channel interrupt load by binding a channel to VCPU. We
- * implement here a simple round robin scheme for distributing
- * the interrupt load.
- * We will bind channels that are not performance critical to cpu 0 and
- * performance critical channels (IDE, SCSI and Network) will be uniformly
- * distributed across all available CPUs.
+ * channel interrupt load by binding a channel to VCPU.
+ * We do this in a hierarchical fashion:
+ * First distribute the primary channels across available NUMA nodes
+ * and then distribute the subchannels amongst the CPUs in the NUMA
+ * node assigned to the primary channel.
+ *
+ * For pre-win8 hosts or non-performance critical channels we assign the
+ * first CPU in the first NUMA node.
*/
static void init_vp_index(struct vmbus_channel *channel, const uuid_le *type_guid)
{
u32 cur_cpu;
int i;
bool perf_chn = false;
- u32 max_cpus = num_online_cpus();
+ struct vmbus_channel *primary = channel->primary_channel;
+ int next_node;
+ struct cpumask available_mask;
for (i = IDE; i < MAX_PERF_CHN; i++) {
if (!memcmp(type_guid->b, hp_devs[i].guid,
@@ -407,16 +407,77 @@ static void init_vp_index(struct vmbus_channel *channel, const uuid_le *type_gui
* Also if the channel is not a performance critical
* channel, bind it to cpu 0.
*/
+ channel->numa_node = 0;
+ cpumask_set_cpu(0, &channel->alloced_cpus_in_node);
channel->target_cpu = 0;
- channel->target_vp = 0;
+ channel->target_vp = hv_context.vp_index[0];
return;
}
- cur_cpu = (++next_vp % max_cpus);
+
+ /*
+ * We distribute primary channels evenly across all the available
+ * NUMA nodes and within the assigned NUMA node we will assign the
+ * first available CPU to the primary channel.
+ * The sub-channels will be assigned to the CPUs available in the
+ * NUMA node evenly.
+ */
+ if (!primary) {
+ while (true) {
+ next_node = next_numa_node_id++;
+ if (next_node == nr_node_ids)
+ next_node = next_numa_node_id = 0;
+ if (cpumask_empty(cpumask_of_node(next_node)))
+ continue;
+ break;
+ }
+ channel->numa_node = next_node;
+ primary = channel;
+ }
+
+ if (cpumask_weight(&primary->alloced_cpus_in_node) ==
+ cpumask_weight(cpumask_of_node(primary->numa_node))) {
+ /*
+ * We have cycled through all the CPUs in the node;
+ * reset the alloced map.
+ */
+ cpumask_clear(&primary->alloced_cpus_in_node);
+ }
+
+ cpumask_xor(&available_mask, &primary->alloced_cpus_in_node,
+ cpumask_of_node(primary->numa_node));
+
+ cur_cpu = cpumask_next(-1, &available_mask);
+ cpumask_set_cpu(cur_cpu, &primary->alloced_cpus_in_node);
+
channel->target_cpu = cur_cpu;
channel->target_vp = hv_context.vp_index[cur_cpu];
}
/*
+ * vmbus_unload_response - Handler for the unload response.
+ */
+static void vmbus_unload_response(struct vmbus_channel_message_header *hdr)
+{
+ /*
+ * This is a global event; just wakeup the waiting thread.
+ * Once we successfully unload, we can cleanup the monitor state.
+ */
+ complete(&vmbus_connection.unload_event);
+}
+
+void vmbus_initiate_unload(void)
+{
+ struct vmbus_channel_message_header hdr;
+
+ init_completion(&vmbus_connection.unload_event);
+ memset(&hdr, 0, sizeof(struct vmbus_channel_message_header));
+ hdr.msgtype = CHANNELMSG_UNLOAD;
+ vmbus_post_msg(&hdr, sizeof(struct vmbus_channel_message_header));
+
+ wait_for_completion(&vmbus_connection.unload_event);
+}
+
+/*
* vmbus_onoffer - Handler for channel offers from vmbus in parent partition.
*
*/
@@ -461,8 +522,6 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
offer->connection_id;
}
- init_vp_index(newchannel, &offer->offer.if_type);
-
memcpy(&newchannel->offermsg, offer,
sizeof(struct vmbus_channel_offer_channel));
newchannel->monitor_grp = (u8)offer->monitorid / 32;
@@ -712,6 +771,7 @@ struct vmbus_channel_message_table_entry
{CHANNELMSG_INITIATE_CONTACT, 0, NULL},
{CHANNELMSG_VERSION_RESPONSE, 1, vmbus_onversion_response},
{CHANNELMSG_UNLOAD, 0, NULL},
+ {CHANNELMSG_UNLOAD_RESPONSE, 1, vmbus_unload_response},
};
/*
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index b27220a425f4..4fc2e8836e60 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -58,6 +58,9 @@ static __u32 vmbus_get_next_version(__u32 current_version)
case (VERSION_WIN8_1):
return VERSION_WIN8;
+ case (VERSION_WIN10):
+ return VERSION_WIN8_1;
+
case (VERSION_WS2008):
default:
return VERSION_INVAL;
@@ -80,7 +83,7 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
msg->interrupt_page = virt_to_phys(vmbus_connection.int_page);
msg->monitor_page1 = virt_to_phys(vmbus_connection.monitor_pages[0]);
msg->monitor_page2 = virt_to_phys(vmbus_connection.monitor_pages[1]);
- if (version == VERSION_WIN8_1) {
+ if (version >= VERSION_WIN8_1) {
msg->target_vcpu = hv_context.vp_index[get_cpu()];
put_cpu();
}
@@ -227,6 +230,11 @@ cleanup:
void vmbus_disconnect(void)
{
+ /*
+ * First send the unload request to the host.
+ */
+ vmbus_initiate_unload();
+
if (vmbus_connection.work_queue) {
drain_workqueue(vmbus_connection.work_queue);
destroy_workqueue(vmbus_connection.work_queue);
@@ -371,8 +379,7 @@ void vmbus_on_event(unsigned long data)
int cpu = smp_processor_id();
union hv_synic_event_flags *event;
- if ((vmbus_proto_version == VERSION_WS2008) ||
- (vmbus_proto_version == VERSION_WIN7)) {
+ if (vmbus_proto_version < VERSION_WIN8) {
maxdword = MAX_NUM_CHANNELS_SUPPORTED >> 5;
recv_int_page = vmbus_connection.recv_int_page;
} else {
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index cb5b7dc9797f..8a725cd69ad7 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -567,7 +567,9 @@ static int hv_memory_notifier(struct notifier_block *nb, unsigned long val,
case MEM_ONLINE:
dm_device.num_pages_onlined += mem->nr_pages;
case MEM_CANCEL_ONLINE:
- mutex_unlock(&dm_device.ha_region_mutex);
+ if (val == MEM_ONLINE ||
+ mutex_is_locked(&dm_device.ha_region_mutex))
+ mutex_unlock(&dm_device.ha_region_mutex);
if (dm_device.ha_waiting) {
dm_device.ha_waiting = false;
complete(&dm_device.ol_waitevent);
diff --git a/drivers/hv/hv_fcopy.c b/drivers/hv/hv_fcopy.c
index cd453e4b2a07..b50dd330cf31 100644
--- a/drivers/hv/hv_fcopy.c
+++ b/drivers/hv/hv_fcopy.c
@@ -19,17 +19,13 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/semaphore.h>
-#include <linux/fs.h>
#include <linux/nls.h>
#include <linux/workqueue.h>
-#include <linux/cdev.h>
#include <linux/hyperv.h>
#include <linux/sched.h>
-#include <linux/uaccess.h>
-#include <linux/miscdevice.h>
#include "hyperv_vmbus.h"
+#include "hv_utils_transport.h"
#define WIN8_SRV_MAJOR 1
#define WIN8_SRV_MINOR 1
@@ -47,39 +43,31 @@
* ensure this by serializing packet processing in this driver - we do not
* read additional packets from the VMBUs until the current packet is fully
* handled.
- *
- * The transaction "active" state is set when we receive a request from the
- * host and we cleanup this state when the transaction is completed - when we
- * respond to the host with our response. When the transaction active state is
- * set, we defer handling incoming packets.
*/
static struct {
- bool active; /* transaction status - active or not */
+ int state; /* hvutil_device_state */
int recv_len; /* number of bytes received. */
struct hv_fcopy_hdr *fcopy_msg; /* current message */
- struct hv_start_fcopy message; /* sent to daemon */
struct vmbus_channel *recv_channel; /* chn we got the request */
u64 recv_req_id; /* request ID. */
void *fcopy_context; /* for the channel callback */
- struct semaphore read_sema;
} fcopy_transaction;
-static bool opened; /* currently device opened */
-
-/*
- * Before we can accept copy messages from the host, we need
- * to handshake with the user level daemon. This state tracks
- * if we are in the handshake phase.
- */
-static bool in_hand_shake = true;
-static void fcopy_send_data(void);
static void fcopy_respond_to_host(int error);
-static void fcopy_work_func(struct work_struct *dummy);
-static DECLARE_DELAYED_WORK(fcopy_work, fcopy_work_func);
+static void fcopy_send_data(struct work_struct *dummy);
+static void fcopy_timeout_func(struct work_struct *dummy);
+static DECLARE_DELAYED_WORK(fcopy_timeout_work, fcopy_timeout_func);
+static DECLARE_WORK(fcopy_send_work, fcopy_send_data);
+static const char fcopy_devname[] = "vmbus/hv_fcopy";
static u8 *recv_buffer;
+static struct hvutil_transport *hvt;
+/*
+ * This state maintains the version number registered by the daemon.
+ */
+static int dm_reg_value;
-static void fcopy_work_func(struct work_struct *dummy)
+static void fcopy_timeout_func(struct work_struct *dummy)
{
/*
* If the timer fires, the user-mode component has not responded;
@@ -87,23 +75,28 @@ static void fcopy_work_func(struct work_struct *dummy)
*/
fcopy_respond_to_host(HV_E_FAIL);
- /* In the case the user-space daemon crashes, hangs or is killed, we
- * need to down the semaphore, otherwise, after the daemon starts next
- * time, the obsolete data in fcopy_transaction.message or
- * fcopy_transaction.fcopy_msg will be used immediately.
- *
- * NOTE: fcopy_read() happens to get the semaphore (very rare)? We're
- * still OK, because we've reported the failure to the host.
- */
- if (down_trylock(&fcopy_transaction.read_sema))
- ;
+ /* Transaction is finished, reset the state. */
+ if (fcopy_transaction.state > HVUTIL_READY)
+ fcopy_transaction.state = HVUTIL_READY;
+ hv_poll_channel(fcopy_transaction.fcopy_context,
+ hv_fcopy_onchannelcallback);
}
static int fcopy_handle_handshake(u32 version)
{
+ u32 our_ver = FCOPY_CURRENT_VERSION;
+
switch (version) {
- case FCOPY_CURRENT_VERSION:
+ case FCOPY_VERSION_0:
+ /* Daemon doesn't expect us to reply */
+ dm_reg_value = version;
+ break;
+ case FCOPY_VERSION_1:
+ /* Daemon expects us to reply with our own version */
+ if (hvutil_transport_send(hvt, &our_ver, sizeof(our_ver)))
+ return -EFAULT;
+ dm_reg_value = version;
break;
default:
/*
@@ -114,20 +107,20 @@ static int fcopy_handle_handshake(u32 version)
*/
return -EINVAL;
}
- pr_info("FCP: user-mode registering done. Daemon version: %d\n",
- version);
- fcopy_transaction.active = false;
- if (fcopy_transaction.fcopy_context)
- hv_fcopy_onchannelcallback(fcopy_transaction.fcopy_context);
- in_hand_shake = false;
+ pr_debug("FCP: userspace daemon ver. %d registered\n", version);
+ fcopy_transaction.state = HVUTIL_READY;
+ hv_poll_channel(fcopy_transaction.fcopy_context,
+ hv_fcopy_onchannelcallback);
return 0;
}
-static void fcopy_send_data(void)
+static void fcopy_send_data(struct work_struct *dummy)
{
- struct hv_start_fcopy *smsg_out = &fcopy_transaction.message;
+ struct hv_start_fcopy smsg_out;
int operation = fcopy_transaction.fcopy_msg->operation;
struct hv_start_fcopy *smsg_in;
+ void *out_src;
+ int rc, out_len;
/*
* The strings sent from the host are encoded in
@@ -142,26 +135,39 @@ static void fcopy_send_data(void)
switch (operation) {
case START_FILE_COPY:
- memset(smsg_out, 0, sizeof(struct hv_start_fcopy));
- smsg_out->hdr.operation = operation;
+ out_len = sizeof(struct hv_start_fcopy);
+ memset(&smsg_out, 0, out_len);
+ smsg_out.hdr.operation = operation;
smsg_in = (struct hv_start_fcopy *)fcopy_transaction.fcopy_msg;
utf16s_to_utf8s((wchar_t *)smsg_in->file_name, W_MAX_PATH,
UTF16_LITTLE_ENDIAN,
- (__u8 *)smsg_out->file_name, W_MAX_PATH - 1);
+ (__u8 *)&smsg_out.file_name, W_MAX_PATH - 1);
utf16s_to_utf8s((wchar_t *)smsg_in->path_name, W_MAX_PATH,
UTF16_LITTLE_ENDIAN,
- (__u8 *)smsg_out->path_name, W_MAX_PATH - 1);
+ (__u8 *)&smsg_out.path_name, W_MAX_PATH - 1);
- smsg_out->copy_flags = smsg_in->copy_flags;
- smsg_out->file_size = smsg_in->file_size;
+ smsg_out.copy_flags = smsg_in->copy_flags;
+ smsg_out.file_size = smsg_in->file_size;
+ out_src = &smsg_out;
break;
default:
+ out_src = fcopy_transaction.fcopy_msg;
+ out_len = fcopy_transaction.recv_len;
break;
}
- up(&fcopy_transaction.read_sema);
+
+ fcopy_transaction.state = HVUTIL_USERSPACE_REQ;
+ rc = hvutil_transport_send(hvt, out_src, out_len);
+ if (rc) {
+ pr_debug("FCP: failed to communicate to the daemon: %d\n", rc);
+ if (cancel_delayed_work_sync(&fcopy_timeout_work)) {
+ fcopy_respond_to_host(HV_E_FAIL);
+ fcopy_transaction.state = HVUTIL_READY;
+ }
+ }
return;
}
@@ -189,8 +195,6 @@ fcopy_respond_to_host(int error)
channel = fcopy_transaction.recv_channel;
req_id = fcopy_transaction.recv_req_id;
- fcopy_transaction.active = false;
-
icmsghdr = (struct icmsg_hdr *)
&recv_buffer[sizeof(struct vmbuspipe_hdr)];
@@ -218,7 +222,7 @@ void hv_fcopy_onchannelcallback(void *context)
int util_fw_version;
int fcopy_srv_version;
- if (fcopy_transaction.active) {
+ if (fcopy_transaction.state > HVUTIL_READY) {
/*
* We will defer processing this callback once
* the current transaction is complete.
@@ -226,6 +230,7 @@ void hv_fcopy_onchannelcallback(void *context)
fcopy_transaction.fcopy_context = context;
return;
}
+ fcopy_transaction.fcopy_context = NULL;
vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 2, &recvlen,
&requestid);
@@ -249,17 +254,23 @@ void hv_fcopy_onchannelcallback(void *context)
* transaction; note transactions are serialized.
*/
- fcopy_transaction.active = true;
fcopy_transaction.recv_len = recvlen;
fcopy_transaction.recv_channel = channel;
fcopy_transaction.recv_req_id = requestid;
fcopy_transaction.fcopy_msg = fcopy_msg;
+ if (fcopy_transaction.state < HVUTIL_READY) {
+ /* Userspace is not registered yet */
+ fcopy_respond_to_host(HV_E_FAIL);
+ return;
+ }
+ fcopy_transaction.state = HVUTIL_HOSTMSG_RECEIVED;
+
/*
* Send the information to the user-level daemon.
*/
- schedule_delayed_work(&fcopy_work, 5*HZ);
- fcopy_send_data();
+ schedule_work(&fcopy_send_work);
+ schedule_delayed_work(&fcopy_timeout_work, 5*HZ);
return;
}
icmsghdr->icflags = ICMSGHDRFLAG_TRANSACTION | ICMSGHDRFLAG_RESPONSE;
@@ -267,155 +278,44 @@ void hv_fcopy_onchannelcallback(void *context)
VM_PKT_DATA_INBAND, 0);
}
-/*
- * Create a char device that can support read/write for passing
- * the payload.
- */
-
-static ssize_t fcopy_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- void *src;
- size_t copy_size;
- int operation;
-
- /*
- * Wait until there is something to be read.
- */
- if (down_interruptible(&fcopy_transaction.read_sema))
- return -EINTR;
-
- /*
- * The channel may be rescinded and in this case, we will wakeup the
- * the thread blocked on the semaphore and we will use the opened
- * state to correctly handle this case.
- */
- if (!opened)
- return -ENODEV;
-
- operation = fcopy_transaction.fcopy_msg->operation;
-
- if (operation == START_FILE_COPY) {
- src = &fcopy_transaction.message;
- copy_size = sizeof(struct hv_start_fcopy);
- if (count < copy_size)
- return 0;
- } else {
- src = fcopy_transaction.fcopy_msg;
- copy_size = sizeof(struct hv_do_fcopy);
- if (count < copy_size)
- return 0;
- }
- if (copy_to_user(buf, src, copy_size))
- return -EFAULT;
-
- return copy_size;
-}
-
-static ssize_t fcopy_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
+/* Callback when data is received from userspace */
+static int fcopy_on_msg(void *msg, int len)
{
- int response = 0;
+ int *val = (int *)msg;
- if (count != sizeof(int))
+ if (len != sizeof(int))
return -EINVAL;
- if (copy_from_user(&response, buf, sizeof(int)))
- return -EFAULT;
+ if (fcopy_transaction.state == HVUTIL_DEVICE_INIT)
+ return fcopy_handle_handshake(*val);
- if (in_hand_shake) {
- if (fcopy_handle_handshake(response))
- return -EINVAL;
- return sizeof(int);
- }
+ if (fcopy_transaction.state != HVUTIL_USERSPACE_REQ)
+ return -EINVAL;
/*
* Complete the transaction by forwarding the result
* to the host. But first, cancel the timeout.
*/
- if (cancel_delayed_work_sync(&fcopy_work))
- fcopy_respond_to_host(response);
-
- return sizeof(int);
-}
-
-static int fcopy_open(struct inode *inode, struct file *f)
-{
- /*
- * The user level daemon that will open this device is
- * really an extension of this driver. We can have only
- * active open at a time.
- */
- if (opened)
- return -EBUSY;
+ if (cancel_delayed_work_sync(&fcopy_timeout_work)) {
+ fcopy_transaction.state = HVUTIL_USERSPACE_RECV;
+ fcopy_respond_to_host(*val);
+ fcopy_transaction.state = HVUTIL_READY;
+ hv_poll_channel(fcopy_transaction.fcopy_context,
+ hv_fcopy_onchannelcallback);
+ }
- /*
- * The daemon is alive; setup the state.
- */
- opened = true;
return 0;
}
-/* XXX: there are still some tricky corner cases, e.g.,
- * 1) In a SMP guest, when fcopy_release() runs between
- * schedule_delayed_work() and fcopy_send_data(), there is
- * still a chance an obsolete message will be queued.
- *
- * 2) When the fcopy daemon is running, if we unload the driver,
- * we'll notice a kernel oops when we kill the daemon later.
- */
-static int fcopy_release(struct inode *inode, struct file *f)
+static void fcopy_on_reset(void)
{
/*
* The daemon has exited; reset the state.
*/
- in_hand_shake = true;
- opened = false;
+ fcopy_transaction.state = HVUTIL_DEVICE_INIT;
- if (cancel_delayed_work_sync(&fcopy_work)) {
- /* We haven't up()-ed the semaphore(very rare)? */
- if (down_trylock(&fcopy_transaction.read_sema))
- ;
+ if (cancel_delayed_work_sync(&fcopy_timeout_work))
fcopy_respond_to_host(HV_E_FAIL);
- }
- return 0;
-}
-
-
-static const struct file_operations fcopy_fops = {
- .read = fcopy_read,
- .write = fcopy_write,
- .release = fcopy_release,
- .open = fcopy_open,
-};
-
-static struct miscdevice fcopy_misc = {
- .minor = MISC_DYNAMIC_MINOR,
- .name = "vmbus/hv_fcopy",
- .fops = &fcopy_fops,
-};
-
-static int fcopy_dev_init(void)
-{
- return misc_register(&fcopy_misc);
-}
-
-static void fcopy_dev_deinit(void)
-{
-
- /*
- * The device is going away - perhaps because the
- * host has rescinded the channel. Setup state so that
- * user level daemon can gracefully exit if it is blocked
- * on the read semaphore.
- */
- opened = false;
- /*
- * Signal the semaphore as the device is
- * going away.
- */
- up(&fcopy_transaction.read_sema);
- misc_deregister(&fcopy_misc);
}
int hv_fcopy_init(struct hv_util_service *srv)
@@ -428,14 +328,19 @@ int hv_fcopy_init(struct hv_util_service *srv)
* Defer processing channel callbacks until the daemon
* has registered.
*/
- fcopy_transaction.active = true;
- sema_init(&fcopy_transaction.read_sema, 0);
+ fcopy_transaction.state = HVUTIL_DEVICE_INIT;
+
+ hvt = hvutil_transport_init(fcopy_devname, 0, 0,
+ fcopy_on_msg, fcopy_on_reset);
+ if (!hvt)
+ return -EFAULT;
- return fcopy_dev_init();
+ return 0;
}
void hv_fcopy_deinit(void)
{
- cancel_delayed_work_sync(&fcopy_work);
- fcopy_dev_deinit();
+ fcopy_transaction.state = HVUTIL_DEVICE_DYING;
+ cancel_delayed_work_sync(&fcopy_timeout_work);
+ hvutil_transport_destroy(hvt);
}
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
index beb8105c0e7b..d85798d5992c 100644
--- a/drivers/hv/hv_kvp.c
+++ b/drivers/hv/hv_kvp.c
@@ -28,6 +28,8 @@
#include <linux/workqueue.h>
#include <linux/hyperv.h>
+#include "hyperv_vmbus.h"
+#include "hv_utils_transport.h"
/*
* Pre win8 version numbers used in ws2008 and ws 2008 r2 (win7)
@@ -45,16 +47,21 @@
#define WIN8_SRV_VERSION (WIN8_SRV_MAJOR << 16 | WIN8_SRV_MINOR)
/*
- * Global state maintained for transaction that is being processed.
- * Note that only one transaction can be active at any point in time.
+ * Global state maintained for transaction that is being processed. For a class
+ * of integration services, including the "KVP service", the specified protocol
+ * is a "request/response" protocol which means that there can only be single
+ * outstanding transaction from the host at any given point in time. We use
+ * this to simplify memory management in this driver - we cache and process
+ * only one message at a time.
*
- * This state is set when we receive a request from the host; we
- * cleanup this state when the transaction is completed - when we respond
- * to the host with the key value.
+ * While the request/response protocol is guaranteed by the host, we further
+ * ensure this by serializing packet processing in this driver - we do not
+ * read additional packets from the VMBUs until the current packet is fully
+ * handled.
*/
static struct {
- bool active; /* transaction status - active or not */
+ int state; /* hvutil_device_state */
int recv_len; /* number of bytes received. */
struct hv_kvp_msg *kvp_msg; /* current message */
struct vmbus_channel *recv_channel; /* chn we got the request */
@@ -63,13 +70,6 @@ static struct {
} kvp_transaction;
/*
- * Before we can accept KVP messages from the host, we need
- * to handshake with the user level daemon. This state tracks
- * if we are in the handshake phase.
- */
-static bool in_hand_shake = true;
-
-/*
* This state maintains the version number registered by the daemon.
*/
static int dm_reg_value;
@@ -78,15 +78,15 @@ static void kvp_send_key(struct work_struct *dummy);
static void kvp_respond_to_host(struct hv_kvp_msg *msg, int error);
-static void kvp_work_func(struct work_struct *dummy);
+static void kvp_timeout_func(struct work_struct *dummy);
static void kvp_register(int);
-static DECLARE_DELAYED_WORK(kvp_work, kvp_work_func);
+static DECLARE_DELAYED_WORK(kvp_timeout_work, kvp_timeout_func);
static DECLARE_WORK(kvp_sendkey_work, kvp_send_key);
-static struct cb_id kvp_id = { CN_KVP_IDX, CN_KVP_VAL };
-static const char kvp_name[] = "kvp_kernel_module";
+static const char kvp_devname[] = "vmbus/hv_kvp";
static u8 *recv_buffer;
+static struct hvutil_transport *hvt;
/*
* Register the kernel component with the user-level daemon.
* As part of this registration, pass the LIC version number.
@@ -98,50 +98,39 @@ static void
kvp_register(int reg_value)
{
- struct cn_msg *msg;
struct hv_kvp_msg *kvp_msg;
char *version;
- msg = kzalloc(sizeof(*msg) + sizeof(struct hv_kvp_msg), GFP_ATOMIC);
+ kvp_msg = kzalloc(sizeof(*kvp_msg), GFP_KERNEL);
- if (msg) {
- kvp_msg = (struct hv_kvp_msg *)msg->data;
+ if (kvp_msg) {
version = kvp_msg->body.kvp_register.version;
- msg->id.idx = CN_KVP_IDX;
- msg->id.val = CN_KVP_VAL;
-
kvp_msg->kvp_hdr.operation = reg_value;
strcpy(version, HV_DRV_VERSION);
- msg->len = sizeof(struct hv_kvp_msg);
- cn_netlink_send(msg, 0, 0, GFP_ATOMIC);
- kfree(msg);
+
+ hvutil_transport_send(hvt, kvp_msg, sizeof(*kvp_msg));
+ kfree(kvp_msg);
}
}
-static void
-kvp_work_func(struct work_struct *dummy)
+
+static void kvp_timeout_func(struct work_struct *dummy)
{
/*
* If the timer fires, the user-mode component has not responded;
* process the pending transaction.
*/
kvp_respond_to_host(NULL, HV_E_FAIL);
-}
-static void poll_channel(struct vmbus_channel *channel)
-{
- if (channel->target_cpu != smp_processor_id())
- smp_call_function_single(channel->target_cpu,
- hv_kvp_onchannelcallback,
- channel, true);
- else
- hv_kvp_onchannelcallback(channel);
-}
+ /* Transaction is finished, reset the state. */
+ if (kvp_transaction.state > HVUTIL_READY)
+ kvp_transaction.state = HVUTIL_READY;
+ hv_poll_channel(kvp_transaction.kvp_context,
+ hv_kvp_onchannelcallback);
+}
static int kvp_handle_handshake(struct hv_kvp_msg *msg)
{
- int ret = 1;
-
switch (msg->kvp_hdr.operation) {
case KVP_OP_REGISTER:
dm_reg_value = KVP_OP_REGISTER;
@@ -155,20 +144,18 @@ static int kvp_handle_handshake(struct hv_kvp_msg *msg)
pr_info("KVP: incompatible daemon\n");
pr_info("KVP: KVP version: %d, Daemon version: %d\n",
KVP_OP_REGISTER1, msg->kvp_hdr.operation);
- ret = 0;
+ return -EINVAL;
}
- if (ret) {
- /*
- * We have a compatible daemon; complete the handshake.
- */
- pr_info("KVP: user-mode registering done.\n");
- kvp_register(dm_reg_value);
- kvp_transaction.active = false;
- if (kvp_transaction.kvp_context)
- poll_channel(kvp_transaction.kvp_context);
- }
- return ret;
+ /*
+ * We have a compatible daemon; complete the handshake.
+ */
+ pr_debug("KVP: userspace daemon ver. %d registered\n",
+ KVP_OP_REGISTER);
+ kvp_register(dm_reg_value);
+ kvp_transaction.state = HVUTIL_READY;
+
+ return 0;
}
@@ -176,26 +163,30 @@ static int kvp_handle_handshake(struct hv_kvp_msg *msg)
* Callback when data is received from user mode.
*/
-static void
-kvp_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
+static int kvp_on_msg(void *msg, int len)
{
- struct hv_kvp_msg *message;
+ struct hv_kvp_msg *message = (struct hv_kvp_msg *)msg;
struct hv_kvp_msg_enumerate *data;
int error = 0;
- message = (struct hv_kvp_msg *)msg->data;
+ if (len < sizeof(*message))
+ return -EINVAL;
/*
* If we are negotiating the version information
* with the daemon; handle that first.
*/
- if (in_hand_shake) {
- if (kvp_handle_handshake(message))
- in_hand_shake = false;
- return;
+ if (kvp_transaction.state < HVUTIL_READY) {
+ return kvp_handle_handshake(message);
}
+ /* We didn't send anything to userspace so the reply is spurious */
+ if (kvp_transaction.state < HVUTIL_USERSPACE_REQ)
+ return -EINVAL;
+
+ kvp_transaction.state = HVUTIL_USERSPACE_RECV;
+
/*
* Based on the version of the daemon, we propagate errors from the
* daemon differently.
@@ -225,8 +216,14 @@ kvp_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
* Complete the transaction by forwarding the key value
* to the host. But first, cancel the timeout.
*/
- if (cancel_delayed_work_sync(&kvp_work))
+ if (cancel_delayed_work_sync(&kvp_timeout_work)) {
kvp_respond_to_host(message, error);
+ kvp_transaction.state = HVUTIL_READY;
+ hv_poll_channel(kvp_transaction.kvp_context,
+ hv_kvp_onchannelcallback);
+ }
+
+ return 0;
}
@@ -343,7 +340,6 @@ static void process_ib_ipinfo(void *in_msg, void *out_msg, int op)
static void
kvp_send_key(struct work_struct *dummy)
{
- struct cn_msg *msg;
struct hv_kvp_msg *message;
struct hv_kvp_msg *in_msg;
__u8 operation = kvp_transaction.kvp_msg->kvp_hdr.operation;
@@ -352,14 +348,11 @@ kvp_send_key(struct work_struct *dummy)
__u64 val64;
int rc;
- msg = kzalloc(sizeof(*msg) + sizeof(struct hv_kvp_msg) , GFP_ATOMIC);
- if (!msg)
+ /* The transaction state is wrong. */
+ if (kvp_transaction.state != HVUTIL_HOSTMSG_RECEIVED)
return;
- msg->id.idx = CN_KVP_IDX;
- msg->id.val = CN_KVP_VAL;
-
- message = (struct hv_kvp_msg *)msg->data;
+ message = kzalloc(sizeof(*message), GFP_KERNEL);
message->kvp_hdr.operation = operation;
message->kvp_hdr.pool = pool;
in_msg = kvp_transaction.kvp_msg;
@@ -446,15 +439,17 @@ kvp_send_key(struct work_struct *dummy)
break;
}
- msg->len = sizeof(struct hv_kvp_msg);
- rc = cn_netlink_send(msg, 0, 0, GFP_ATOMIC);
+ kvp_transaction.state = HVUTIL_USERSPACE_REQ;
+ rc = hvutil_transport_send(hvt, message, sizeof(*message));
if (rc) {
pr_debug("KVP: failed to communicate to the daemon: %d\n", rc);
- if (cancel_delayed_work_sync(&kvp_work))
+ if (cancel_delayed_work_sync(&kvp_timeout_work)) {
kvp_respond_to_host(message, HV_E_FAIL);
+ kvp_transaction.state = HVUTIL_READY;
+ }
}
- kfree(msg);
+ kfree(message);
return;
}
@@ -479,17 +474,6 @@ kvp_respond_to_host(struct hv_kvp_msg *msg_to_host, int error)
int ret;
/*
- * If a transaction is not active; log and return.
- */
-
- if (!kvp_transaction.active) {
- /*
- * This is a spurious call!
- */
- pr_warn("KVP: Transaction not active\n");
- return;
- }
- /*
* Copy the global state for completing the transaction. Note that
* only one transaction can be active at a time.
*/
@@ -498,8 +482,6 @@ kvp_respond_to_host(struct hv_kvp_msg *msg_to_host, int error)
channel = kvp_transaction.recv_channel;
req_id = kvp_transaction.recv_req_id;
- kvp_transaction.active = false;
-
icmsghdrp = (struct icmsg_hdr *)
&recv_buffer[sizeof(struct vmbuspipe_hdr)];
@@ -586,7 +568,6 @@ response_done:
vmbus_sendpacket(channel, recv_buffer, buf_len, req_id,
VM_PKT_DATA_INBAND, 0);
- poll_channel(channel);
}
/*
@@ -612,7 +593,7 @@ void hv_kvp_onchannelcallback(void *context)
int util_fw_version;
int kvp_srv_version;
- if (kvp_transaction.active) {
+ if (kvp_transaction.state > HVUTIL_READY) {
/*
* We will defer processing this callback once
* the current transaction is complete.
@@ -620,6 +601,7 @@ void hv_kvp_onchannelcallback(void *context)
kvp_transaction.kvp_context = context;
return;
}
+ kvp_transaction.kvp_context = NULL;
vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 4, &recvlen,
&requestid);
@@ -664,9 +646,15 @@ void hv_kvp_onchannelcallback(void *context)
kvp_transaction.recv_len = recvlen;
kvp_transaction.recv_channel = channel;
kvp_transaction.recv_req_id = requestid;
- kvp_transaction.active = true;
kvp_transaction.kvp_msg = kvp_msg;
+ if (kvp_transaction.state < HVUTIL_READY) {
+ /* Userspace is not registered yet */
+ kvp_respond_to_host(NULL, HV_E_FAIL);
+ return;
+ }
+ kvp_transaction.state = HVUTIL_HOSTMSG_RECEIVED;
+
/*
* Get the information from the
* user-mode component.
@@ -677,7 +665,7 @@ void hv_kvp_onchannelcallback(void *context)
* user-mode not responding.
*/
schedule_work(&kvp_sendkey_work);
- schedule_delayed_work(&kvp_work, 5*HZ);
+ schedule_delayed_work(&kvp_timeout_work, 5*HZ);
return;
@@ -693,14 +681,16 @@ void hv_kvp_onchannelcallback(void *context)
}
+static void kvp_on_reset(void)
+{
+ if (cancel_delayed_work_sync(&kvp_timeout_work))
+ kvp_respond_to_host(NULL, HV_E_FAIL);
+ kvp_transaction.state = HVUTIL_DEVICE_INIT;
+}
+
int
hv_kvp_init(struct hv_util_service *srv)
{
- int err;
-
- err = cn_add_callback(&kvp_id, kvp_name, kvp_cn_callback);
- if (err)
- return err;
recv_buffer = srv->recv_buffer;
/*
@@ -709,14 +699,20 @@ hv_kvp_init(struct hv_util_service *srv)
* Defer processing channel callbacks until the daemon
* has registered.
*/
- kvp_transaction.active = true;
+ kvp_transaction.state = HVUTIL_DEVICE_INIT;
+
+ hvt = hvutil_transport_init(kvp_devname, CN_KVP_IDX, CN_KVP_VAL,
+ kvp_on_msg, kvp_on_reset);
+ if (!hvt)
+ return -EFAULT;
return 0;
}
void hv_kvp_deinit(void)
{
- cn_del_callback(&kvp_id);
- cancel_delayed_work_sync(&kvp_work);
+ kvp_transaction.state = HVUTIL_DEVICE_DYING;
+ cancel_delayed_work_sync(&kvp_timeout_work);
cancel_work_sync(&kvp_sendkey_work);
+ hvutil_transport_destroy(hvt);
}
diff --git a/drivers/hv/hv_snapshot.c b/drivers/hv/hv_snapshot.c
index 9d5e0d1efdb5..815405f2e777 100644
--- a/drivers/hv/hv_snapshot.c
+++ b/drivers/hv/hv_snapshot.c
@@ -24,6 +24,9 @@
#include <linux/workqueue.h>
#include <linux/hyperv.h>
+#include "hyperv_vmbus.h"
+#include "hv_utils_transport.h"
+
#define VSS_MAJOR 5
#define VSS_MINOR 0
#define VSS_VERSION (VSS_MAJOR << 16 | VSS_MINOR)
@@ -31,28 +34,39 @@
#define VSS_USERSPACE_TIMEOUT (msecs_to_jiffies(10 * 1000))
/*
- * Global state maintained for transaction that is being processed.
- * Note that only one transaction can be active at any point in time.
+ * Global state maintained for transaction that is being processed. For a class
+ * of integration services, including the "VSS service", the specified protocol
+ * is a "request/response" protocol which means that there can only be single
+ * outstanding transaction from the host at any given point in time. We use
+ * this to simplify memory management in this driver - we cache and process
+ * only one message at a time.
*
- * This state is set when we receive a request from the host; we
- * cleanup this state when the transaction is completed - when we respond
- * to the host with the key value.
+ * While the request/response protocol is guaranteed by the host, we further
+ * ensure this by serializing packet processing in this driver - we do not
+ * read additional packets from the VMBUs until the current packet is fully
+ * handled.
*/
static struct {
- bool active; /* transaction status - active or not */
+ int state; /* hvutil_device_state */
int recv_len; /* number of bytes received. */
struct vmbus_channel *recv_channel; /* chn we got the request */
u64 recv_req_id; /* request ID. */
struct hv_vss_msg *msg; /* current message */
+ void *vss_context; /* for the channel callback */
} vss_transaction;
static void vss_respond_to_host(int error);
-static struct cb_id vss_id = { CN_VSS_IDX, CN_VSS_VAL };
-static const char vss_name[] = "vss_kernel_module";
+/*
+ * This state maintains the version number registered by the daemon.
+ */
+static int dm_reg_value;
+
+static const char vss_devname[] = "vmbus/hv_vss";
static __u8 *recv_buffer;
+static struct hvutil_transport *hvt;
static void vss_send_op(struct work_struct *dummy);
static void vss_timeout_func(struct work_struct *dummy);
@@ -71,25 +85,69 @@ static void vss_timeout_func(struct work_struct *dummy)
*/
pr_warn("VSS: timeout waiting for daemon to reply\n");
vss_respond_to_host(HV_E_FAIL);
+
+ /* Transaction is finished, reset the state. */
+ if (vss_transaction.state > HVUTIL_READY)
+ vss_transaction.state = HVUTIL_READY;
+
+ hv_poll_channel(vss_transaction.vss_context,
+ hv_vss_onchannelcallback);
}
-static void
-vss_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
+static int vss_handle_handshake(struct hv_vss_msg *vss_msg)
{
- struct hv_vss_msg *vss_msg;
+ u32 our_ver = VSS_OP_REGISTER1;
+
+ switch (vss_msg->vss_hdr.operation) {
+ case VSS_OP_REGISTER:
+ /* Daemon doesn't expect us to reply */
+ dm_reg_value = VSS_OP_REGISTER;
+ break;
+ case VSS_OP_REGISTER1:
+ /* Daemon expects us to reply with our own version*/
+ if (hvutil_transport_send(hvt, &our_ver, sizeof(our_ver)))
+ return -EFAULT;
+ dm_reg_value = VSS_OP_REGISTER1;
+ break;
+ default:
+ return -EINVAL;
+ }
+ vss_transaction.state = HVUTIL_READY;
+ pr_debug("VSS: userspace daemon ver. %d registered\n", dm_reg_value);
+ return 0;
+}
- vss_msg = (struct hv_vss_msg *)msg->data;
+static int vss_on_msg(void *msg, int len)
+{
+ struct hv_vss_msg *vss_msg = (struct hv_vss_msg *)msg;
- if (vss_msg->vss_hdr.operation == VSS_OP_REGISTER) {
- pr_info("VSS daemon registered\n");
- vss_transaction.active = false;
- if (vss_transaction.recv_channel != NULL)
- hv_vss_onchannelcallback(vss_transaction.recv_channel);
- return;
+ if (len != sizeof(*vss_msg))
+ return -EINVAL;
+ if (vss_msg->vss_hdr.operation == VSS_OP_REGISTER ||
+ vss_msg->vss_hdr.operation == VSS_OP_REGISTER1) {
+ /*
+ * Don't process registration messages if we're in the middle
+ * of a transaction processing.
+ */
+ if (vss_transaction.state > HVUTIL_READY)
+ return -EINVAL;
+ return vss_handle_handshake(vss_msg);
+ } else if (vss_transaction.state == HVUTIL_USERSPACE_REQ) {
+ vss_transaction.state = HVUTIL_USERSPACE_RECV;
+ if (cancel_delayed_work_sync(&vss_timeout_work)) {
+ vss_respond_to_host(vss_msg->error);
+ /* Transaction is finished, reset the state. */
+ vss_transaction.state = HVUTIL_READY;
+ hv_poll_channel(vss_transaction.vss_context,
+ hv_vss_onchannelcallback);
+ }
+ } else {
+ /* This is a spurious call! */
+ pr_warn("VSS: Transaction not active\n");
+ return -EINVAL;
}
- if (cancel_delayed_work_sync(&vss_timeout_work))
- vss_respond_to_host(vss_msg->error);
+ return 0;
}
@@ -97,28 +155,29 @@ static void vss_send_op(struct work_struct *dummy)
{
int op = vss_transaction.msg->vss_hdr.operation;
int rc;
- struct cn_msg *msg;
struct hv_vss_msg *vss_msg;
- msg = kzalloc(sizeof(*msg) + sizeof(*vss_msg), GFP_ATOMIC);
- if (!msg)
+ /* The transaction state is wrong. */
+ if (vss_transaction.state != HVUTIL_HOSTMSG_RECEIVED)
return;
- vss_msg = (struct hv_vss_msg *)msg->data;
-
- msg->id.idx = CN_VSS_IDX;
- msg->id.val = CN_VSS_VAL;
+ vss_msg = kzalloc(sizeof(*vss_msg), GFP_KERNEL);
+ if (!vss_msg)
+ return;
vss_msg->vss_hdr.operation = op;
- msg->len = sizeof(struct hv_vss_msg);
- rc = cn_netlink_send(msg, 0, 0, GFP_ATOMIC);
+ vss_transaction.state = HVUTIL_USERSPACE_REQ;
+ rc = hvutil_transport_send(hvt, vss_msg, sizeof(*vss_msg));
if (rc) {
pr_warn("VSS: failed to communicate to the daemon: %d\n", rc);
- if (cancel_delayed_work_sync(&vss_timeout_work))
+ if (cancel_delayed_work_sync(&vss_timeout_work)) {
vss_respond_to_host(HV_E_FAIL);
+ vss_transaction.state = HVUTIL_READY;
+ }
}
- kfree(msg);
+
+ kfree(vss_msg);
return;
}
@@ -136,17 +195,6 @@ vss_respond_to_host(int error)
u64 req_id;
/*
- * If a transaction is not active; log and return.
- */
-
- if (!vss_transaction.active) {
- /*
- * This is a spurious call!
- */
- pr_warn("VSS: Transaction not active\n");
- return;
- }
- /*
* Copy the global state for completing the transaction. Note that
* only one transaction can be active at a time.
*/
@@ -154,7 +202,6 @@ vss_respond_to_host(int error)
buf_len = vss_transaction.recv_len;
channel = vss_transaction.recv_channel;
req_id = vss_transaction.recv_req_id;
- vss_transaction.active = false;
icmsghdrp = (struct icmsg_hdr *)
&recv_buffer[sizeof(struct vmbuspipe_hdr)];
@@ -191,14 +238,15 @@ void hv_vss_onchannelcallback(void *context)
struct icmsg_hdr *icmsghdrp;
struct icmsg_negotiate *negop = NULL;
- if (vss_transaction.active) {
+ if (vss_transaction.state > HVUTIL_READY) {
/*
* We will defer processing this callback once
* the current transaction is complete.
*/
- vss_transaction.recv_channel = channel;
+ vss_transaction.vss_context = context;
return;
}
+ vss_transaction.vss_context = NULL;
vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 2, &recvlen,
&requestid);
@@ -224,7 +272,6 @@ void hv_vss_onchannelcallback(void *context)
vss_transaction.recv_len = recvlen;
vss_transaction.recv_channel = channel;
vss_transaction.recv_req_id = requestid;
- vss_transaction.active = true;
vss_transaction.msg = (struct hv_vss_msg *)vss_msg;
switch (vss_msg->vss_hdr.operation) {
@@ -241,6 +288,12 @@ void hv_vss_onchannelcallback(void *context)
*/
case VSS_OP_FREEZE:
case VSS_OP_THAW:
+ if (vss_transaction.state < HVUTIL_READY) {
+ /* Userspace is not registered yet */
+ vss_respond_to_host(HV_E_FAIL);
+ return;
+ }
+ vss_transaction.state = HVUTIL_HOSTMSG_RECEIVED;
schedule_work(&vss_send_op_work);
schedule_delayed_work(&vss_timeout_work,
VSS_USERSPACE_TIMEOUT);
@@ -275,14 +328,16 @@ void hv_vss_onchannelcallback(void *context)
}
+static void vss_on_reset(void)
+{
+ if (cancel_delayed_work_sync(&vss_timeout_work))
+ vss_respond_to_host(HV_E_FAIL);
+ vss_transaction.state = HVUTIL_DEVICE_INIT;
+}
+
int
hv_vss_init(struct hv_util_service *srv)
{
- int err;
-
- err = cn_add_callback(&vss_id, vss_name, vss_cn_callback);
- if (err)
- return err;
recv_buffer = srv->recv_buffer;
/*
@@ -291,13 +346,20 @@ hv_vss_init(struct hv_util_service *srv)
* Defer processing channel callbacks until the daemon
* has registered.
*/
- vss_transaction.active = true;
+ vss_transaction.state = HVUTIL_DEVICE_INIT;
+
+ hvt = hvutil_transport_init(vss_devname, CN_VSS_IDX, CN_VSS_VAL,
+ vss_on_msg, vss_on_reset);
+ if (!hvt)
+ return -EFAULT;
+
return 0;
}
void hv_vss_deinit(void)
{
- cn_del_callback(&vss_id);
+ vss_transaction.state = HVUTIL_DEVICE_DYING;
cancel_delayed_work_sync(&vss_timeout_work);
cancel_work_sync(&vss_send_op_work);
+ hvutil_transport_destroy(hvt);
}
diff --git a/drivers/hv/hv_utils_transport.c b/drivers/hv/hv_utils_transport.c
new file mode 100644
index 000000000000..ea7ba5ef16a9
--- /dev/null
+++ b/drivers/hv/hv_utils_transport.c
@@ -0,0 +1,276 @@
+/*
+ * Kernel/userspace transport abstraction for Hyper-V util driver.
+ *
+ * Copyright (C) 2015, Vitaly Kuznetsov <vkuznets@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/poll.h>
+
+#include "hyperv_vmbus.h"
+#include "hv_utils_transport.h"
+
+static DEFINE_SPINLOCK(hvt_list_lock);
+static struct list_head hvt_list = LIST_HEAD_INIT(hvt_list);
+
+static void hvt_reset(struct hvutil_transport *hvt)
+{
+ mutex_lock(&hvt->outmsg_lock);
+ kfree(hvt->outmsg);
+ hvt->outmsg = NULL;
+ hvt->outmsg_len = 0;
+ mutex_unlock(&hvt->outmsg_lock);
+ if (hvt->on_reset)
+ hvt->on_reset();
+}
+
+static ssize_t hvt_op_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hvutil_transport *hvt;
+ int ret;
+
+ hvt = container_of(file->f_op, struct hvutil_transport, fops);
+
+ if (wait_event_interruptible(hvt->outmsg_q, hvt->outmsg_len > 0))
+ return -EINTR;
+
+ mutex_lock(&hvt->outmsg_lock);
+ if (!hvt->outmsg) {
+ ret = -EAGAIN;
+ goto out_unlock;
+ }
+
+ if (count < hvt->outmsg_len) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ if (!copy_to_user(buf, hvt->outmsg, hvt->outmsg_len))
+ ret = hvt->outmsg_len;
+ else
+ ret = -EFAULT;
+
+ kfree(hvt->outmsg);
+ hvt->outmsg = NULL;
+ hvt->outmsg_len = 0;
+
+out_unlock:
+ mutex_unlock(&hvt->outmsg_lock);
+ return ret;
+}
+
+static ssize_t hvt_op_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hvutil_transport *hvt;
+ u8 *inmsg;
+
+ hvt = container_of(file->f_op, struct hvutil_transport, fops);
+
+ inmsg = kzalloc(count, GFP_KERNEL);
+ if (copy_from_user(inmsg, buf, count)) {
+ kfree(inmsg);
+ return -EFAULT;
+ }
+ if (hvt->on_msg(inmsg, count))
+ return -EFAULT;
+ kfree(inmsg);
+
+ return count;
+}
+
+static unsigned int hvt_op_poll(struct file *file, poll_table *wait)
+{
+ struct hvutil_transport *hvt;
+
+ hvt = container_of(file->f_op, struct hvutil_transport, fops);
+
+ poll_wait(file, &hvt->outmsg_q, wait);
+ if (hvt->outmsg_len > 0)
+ return POLLIN | POLLRDNORM;
+
+ return 0;
+}
+
+static int hvt_op_open(struct inode *inode, struct file *file)
+{
+ struct hvutil_transport *hvt;
+
+ hvt = container_of(file->f_op, struct hvutil_transport, fops);
+
+ /*
+ * Switching to CHARDEV mode. We switch bach to INIT when device
+ * gets released.
+ */
+ if (hvt->mode == HVUTIL_TRANSPORT_INIT)
+ hvt->mode = HVUTIL_TRANSPORT_CHARDEV;
+ else if (hvt->mode == HVUTIL_TRANSPORT_NETLINK) {
+ /*
+ * We're switching from netlink communication to using char
+ * device. Issue the reset first.
+ */
+ hvt_reset(hvt);
+ hvt->mode = HVUTIL_TRANSPORT_CHARDEV;
+ } else
+ return -EBUSY;
+
+ return 0;
+}
+
+static int hvt_op_release(struct inode *inode, struct file *file)
+{
+ struct hvutil_transport *hvt;
+
+ hvt = container_of(file->f_op, struct hvutil_transport, fops);
+
+ hvt->mode = HVUTIL_TRANSPORT_INIT;
+ /*
+ * Cleanup message buffers to avoid spurious messages when the daemon
+ * connects back.
+ */
+ hvt_reset(hvt);
+
+ return 0;
+}
+
+static void hvt_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
+{
+ struct hvutil_transport *hvt, *hvt_found = NULL;
+
+ spin_lock(&hvt_list_lock);
+ list_for_each_entry(hvt, &hvt_list, list) {
+ if (hvt->cn_id.idx == msg->id.idx &&
+ hvt->cn_id.val == msg->id.val) {
+ hvt_found = hvt;
+ break;
+ }
+ }
+ spin_unlock(&hvt_list_lock);
+ if (!hvt_found) {
+ pr_warn("hvt_cn_callback: spurious message received!\n");
+ return;
+ }
+
+ /*
+ * Switching to NETLINK mode. Switching to CHARDEV happens when someone
+ * opens the device.
+ */
+ if (hvt->mode == HVUTIL_TRANSPORT_INIT)
+ hvt->mode = HVUTIL_TRANSPORT_NETLINK;
+
+ if (hvt->mode == HVUTIL_TRANSPORT_NETLINK)
+ hvt_found->on_msg(msg->data, msg->len);
+ else
+ pr_warn("hvt_cn_callback: unexpected netlink message!\n");
+}
+
+int hvutil_transport_send(struct hvutil_transport *hvt, void *msg, int len)
+{
+ struct cn_msg *cn_msg;
+ int ret = 0;
+
+ if (hvt->mode == HVUTIL_TRANSPORT_INIT) {
+ return -EINVAL;
+ } else if (hvt->mode == HVUTIL_TRANSPORT_NETLINK) {
+ cn_msg = kzalloc(sizeof(*cn_msg) + len, GFP_ATOMIC);
+ if (!msg)
+ return -ENOMEM;
+ cn_msg->id.idx = hvt->cn_id.idx;
+ cn_msg->id.val = hvt->cn_id.val;
+ cn_msg->len = len;
+ memcpy(cn_msg->data, msg, len);
+ ret = cn_netlink_send(cn_msg, 0, 0, GFP_ATOMIC);
+ kfree(cn_msg);
+ return ret;
+ }
+ /* HVUTIL_TRANSPORT_CHARDEV */
+ mutex_lock(&hvt->outmsg_lock);
+ if (hvt->outmsg) {
+ /* Previous message wasn't received */
+ ret = -EFAULT;
+ goto out_unlock;
+ }
+ hvt->outmsg = kzalloc(len, GFP_KERNEL);
+ memcpy(hvt->outmsg, msg, len);
+ hvt->outmsg_len = len;
+ wake_up_interruptible(&hvt->outmsg_q);
+out_unlock:
+ mutex_unlock(&hvt->outmsg_lock);
+ return ret;
+}
+
+struct hvutil_transport *hvutil_transport_init(const char *name,
+ u32 cn_idx, u32 cn_val,
+ int (*on_msg)(void *, int),
+ void (*on_reset)(void))
+{
+ struct hvutil_transport *hvt;
+
+ hvt = kzalloc(sizeof(*hvt), GFP_KERNEL);
+ if (!hvt)
+ return NULL;
+
+ hvt->cn_id.idx = cn_idx;
+ hvt->cn_id.val = cn_val;
+
+ hvt->mdev.minor = MISC_DYNAMIC_MINOR;
+ hvt->mdev.name = name;
+
+ hvt->fops.owner = THIS_MODULE;
+ hvt->fops.read = hvt_op_read;
+ hvt->fops.write = hvt_op_write;
+ hvt->fops.poll = hvt_op_poll;
+ hvt->fops.open = hvt_op_open;
+ hvt->fops.release = hvt_op_release;
+
+ hvt->mdev.fops = &hvt->fops;
+
+ init_waitqueue_head(&hvt->outmsg_q);
+ mutex_init(&hvt->outmsg_lock);
+
+ spin_lock(&hvt_list_lock);
+ list_add(&hvt->list, &hvt_list);
+ spin_unlock(&hvt_list_lock);
+
+ hvt->on_msg = on_msg;
+ hvt->on_reset = on_reset;
+
+ if (misc_register(&hvt->mdev))
+ goto err_free_hvt;
+
+ /* Use cn_id.idx/cn_id.val to determine if we need to setup netlink */
+ if (hvt->cn_id.idx > 0 && hvt->cn_id.val > 0 &&
+ cn_add_callback(&hvt->cn_id, name, hvt_cn_callback))
+ goto err_free_hvt;
+
+ return hvt;
+
+err_free_hvt:
+ kfree(hvt);
+ return NULL;
+}
+
+void hvutil_transport_destroy(struct hvutil_transport *hvt)
+{
+ spin_lock(&hvt_list_lock);
+ list_del(&hvt->list);
+ spin_unlock(&hvt_list_lock);
+ if (hvt->cn_id.idx > 0 && hvt->cn_id.val > 0)
+ cn_del_callback(&hvt->cn_id);
+ misc_deregister(&hvt->mdev);
+ kfree(hvt->outmsg);
+ kfree(hvt);
+}
diff --git a/drivers/hv/hv_utils_transport.h b/drivers/hv/hv_utils_transport.h
new file mode 100644
index 000000000000..314c76ce1b07
--- /dev/null
+++ b/drivers/hv/hv_utils_transport.h
@@ -0,0 +1,51 @@
+/*
+ * Kernel/userspace transport abstraction for Hyper-V util driver.
+ *
+ * Copyright (C) 2015, Vitaly Kuznetsov <vkuznets@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ */
+
+#ifndef _HV_UTILS_TRANSPORT_H
+#define _HV_UTILS_TRANSPORT_H
+
+#include <linux/connector.h>
+#include <linux/miscdevice.h>
+
+enum hvutil_transport_mode {
+ HVUTIL_TRANSPORT_INIT = 0,
+ HVUTIL_TRANSPORT_NETLINK,
+ HVUTIL_TRANSPORT_CHARDEV,
+};
+
+struct hvutil_transport {
+ int mode; /* hvutil_transport_mode */
+ struct file_operations fops; /* file operations */
+ struct miscdevice mdev; /* misc device */
+ struct cb_id cn_id; /* CN_*_IDX/CN_*_VAL */
+ struct list_head list; /* hvt_list */
+ int (*on_msg)(void *, int); /* callback on new user message */
+ void (*on_reset)(void); /* callback when userspace drops */
+ u8 *outmsg; /* message to the userspace */
+ int outmsg_len; /* its length */
+ wait_queue_head_t outmsg_q; /* poll/read wait queue */
+ struct mutex outmsg_lock; /* protects outmsg */
+};
+
+struct hvutil_transport *hvutil_transport_init(const char *name,
+ u32 cn_idx, u32 cn_val,
+ int (*on_msg)(void *, int),
+ void (*on_reset)(void));
+int hvutil_transport_send(struct hvutil_transport *hvt, void *msg, int len);
+void hvutil_transport_destroy(struct hvutil_transport *hvt);
+
+#endif /* _HV_UTILS_TRANSPORT_H */
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 887287ad411f..cddc0c9f6bf9 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -647,6 +647,7 @@ struct vmbus_connection {
atomic_t next_gpadl_handle;
+ struct completion unload_event;
/*
* Represents channel interrupts. Each bit position represents a
* channel. When a channel sends an interrupt via VMBUS, it finds its
@@ -730,9 +731,39 @@ int vmbus_set_event(struct vmbus_channel *channel);
void vmbus_on_event(unsigned long data);
+int hv_kvp_init(struct hv_util_service *);
+void hv_kvp_deinit(void);
+void hv_kvp_onchannelcallback(void *);
+
+int hv_vss_init(struct hv_util_service *);
+void hv_vss_deinit(void);
+void hv_vss_onchannelcallback(void *);
+
int hv_fcopy_init(struct hv_util_service *);
void hv_fcopy_deinit(void);
void hv_fcopy_onchannelcallback(void *);
+void vmbus_initiate_unload(void);
+
+static inline void hv_poll_channel(struct vmbus_channel *channel,
+ void (*cb)(void *))
+{
+ if (!channel)
+ return;
+
+ if (channel->target_cpu != smp_processor_id())
+ smp_call_function_single(channel->target_cpu,
+ cb, channel, true);
+ else
+ cb(channel);
+}
+enum hvutil_device_state {
+ HVUTIL_DEVICE_INIT = 0, /* driver is loaded, waiting for userspace */
+ HVUTIL_READY, /* userspace is registered */
+ HVUTIL_HOSTMSG_RECEIVED, /* message from the host was received */
+ HVUTIL_USERSPACE_REQ, /* request to userspace was sent */
+ HVUTIL_USERSPACE_RECV, /* reply from userspace was received */
+ HVUTIL_DEVICE_DYING, /* driver unload is in progress */
+};
#endif /* _HYPERV_VMBUS_H */
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index c85235e9f245..cf204005ee78 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -1035,6 +1035,15 @@ acpi_walk_err:
return ret_val;
}
+static int vmbus_acpi_remove(struct acpi_device *device)
+{
+ int ret = 0;
+
+ if (hyperv_mmio.start && hyperv_mmio.end)
+ ret = release_resource(&hyperv_mmio);
+ return ret;
+}
+
static const struct acpi_device_id vmbus_acpi_device_ids[] = {
{"VMBUS", 0},
{"VMBus", 0},
@@ -1047,6 +1056,7 @@ static struct acpi_driver vmbus_acpi_driver = {
.ids = vmbus_acpi_device_ids,
.ops = {
.add = vmbus_acpi_add,
+ .remove = vmbus_acpi_remove,
},
};
@@ -1096,15 +1106,22 @@ static void __exit vmbus_exit(void)
vmbus_connection.conn_state = DISCONNECTED;
hv_synic_clockevents_cleanup();
+ vmbus_disconnect();
hv_remove_vmbus_irq();
+ tasklet_kill(&msg_dpc);
vmbus_free_channels();
+ if (ms_hyperv.features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
+ atomic_notifier_chain_unregister(&panic_notifier_list,
+ &hyperv_panic_block);
+ }
bus_unregister(&hv_bus);
hv_cleanup();
- for_each_online_cpu(cpu)
+ for_each_online_cpu(cpu) {
+ tasklet_kill(hv_context.event_dpc[cpu]);
smp_call_function_single(cpu, hv_synic_cleanup, NULL, 1);
+ }
acpi_bus_unregister_driver(&vmbus_acpi_driver);
hv_cpu_hotplug_quirk(false);
- vmbus_disconnect();
}
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 54075a07d2a1..7c65b7334738 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -371,6 +371,17 @@ config SENSORS_DS1621
This driver can also be built as a module. If so, the module
will be called ds1621.
+config SENSORS_DELL_SMM
+ tristate "Dell laptop SMM BIOS hwmon driver"
+ depends on X86
+ help
+ This hwmon driver adds support for reporting temperature of different
+ sensors and controls the fans on Dell laptops via System Management
+ Mode provided by Dell BIOS.
+
+ When option I8K is also enabled this driver provides legacy /proc/i8k
+ userspace interface for i8kutils package.
+
config SENSORS_DA9052_ADC
tristate "Dialog DA9052/DA9053 ADC"
depends on PMIC_DA9052
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index ab904027f074..9e0f3dd2841d 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -49,6 +49,7 @@ obj-$(CONFIG_SENSORS_ATXP1) += atxp1.o
obj-$(CONFIG_SENSORS_CORETEMP) += coretemp.o
obj-$(CONFIG_SENSORS_DA9052_ADC)+= da9052-hwmon.o
obj-$(CONFIG_SENSORS_DA9055)+= da9055-hwmon.o
+obj-$(CONFIG_SENSORS_DELL_SMM) += dell-smm-hwmon.o
obj-$(CONFIG_SENSORS_DME1737) += dme1737.o
obj-$(CONFIG_SENSORS_DS620) += ds620.o
obj-$(CONFIG_SENSORS_DS1621) += ds1621.o
diff --git a/drivers/char/i8k.c b/drivers/hwmon/dell-smm-hwmon.c
index a43048b5b05f..2a808822af21 100644
--- a/drivers/char/i8k.c
+++ b/drivers/hwmon/dell-smm-hwmon.c
@@ -1,12 +1,12 @@
/*
- * i8k.c -- Linux driver for accessing the SMM BIOS on Dell laptops.
+ * dell-smm-hwmon.c -- Linux driver for accessing the SMM BIOS on Dell laptops.
*
* Copyright (C) 2001 Massimo Dal Zotto <dz@debian.org>
*
* Hwmon integration:
* Copyright (C) 2011 Jean Delvare <jdelvare@suse.de>
* Copyright (C) 2013, 2014 Guenter Roeck <linux@roeck-us.net>
- * Copyright (C) 2014 Pali Rohár <pali.rohar@gmail.com>
+ * Copyright (C) 2014, 2015 Pali Rohár <pali.rohar@gmail.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -80,8 +80,10 @@ static uint i8k_fan_max = I8K_FAN_HIGH;
#define I8K_HWMON_HAVE_FAN2 (1 << 5)
MODULE_AUTHOR("Massimo Dal Zotto (dz@debian.org)");
-MODULE_DESCRIPTION("Driver for accessing SMM BIOS on Dell laptops");
+MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>");
+MODULE_DESCRIPTION("Dell laptop SMM BIOS hwmon driver");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("i8k");
static bool force;
module_param(force, bool, 0);
@@ -91,6 +93,7 @@ static bool ignore_dmi;
module_param(ignore_dmi, bool, 0);
MODULE_PARM_DESC(ignore_dmi, "Continue probing hardware even if DMI data does not match");
+#if IS_ENABLED(CONFIG_I8K)
static bool restricted;
module_param(restricted, bool, 0);
MODULE_PARM_DESC(restricted, "Allow fan control if SYS_ADMIN capability set");
@@ -98,6 +101,7 @@ MODULE_PARM_DESC(restricted, "Allow fan control if SYS_ADMIN capability set");
static bool power_status;
module_param(power_status, bool, 0600);
MODULE_PARM_DESC(power_status, "Report power status in /proc/i8k");
+#endif
static uint fan_mult;
module_param(fan_mult, uint, 0);
@@ -107,18 +111,6 @@ static uint fan_max;
module_param(fan_max, uint, 0);
MODULE_PARM_DESC(fan_max, "Maximum configurable fan speed (default: autodetect)");
-static int i8k_open_fs(struct inode *inode, struct file *file);
-static long i8k_ioctl(struct file *, unsigned int, unsigned long);
-
-static const struct file_operations i8k_fops = {
- .owner = THIS_MODULE,
- .open = i8k_open_fs,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .unlocked_ioctl = i8k_ioctl,
-};
-
struct smm_regs {
unsigned int eax;
unsigned int ebx __packed;
@@ -219,45 +211,6 @@ out:
}
/*
- * Read the Fn key status.
- */
-static int i8k_get_fn_status(void)
-{
- struct smm_regs regs = { .eax = I8K_SMM_FN_STATUS, };
- int rc;
-
- rc = i8k_smm(&regs);
- if (rc < 0)
- return rc;
-
- switch ((regs.eax >> I8K_FN_SHIFT) & I8K_FN_MASK) {
- case I8K_FN_UP:
- return I8K_VOL_UP;
- case I8K_FN_DOWN:
- return I8K_VOL_DOWN;
- case I8K_FN_MUTE:
- return I8K_VOL_MUTE;
- default:
- return 0;
- }
-}
-
-/*
- * Read the power status.
- */
-static int i8k_get_power_status(void)
-{
- struct smm_regs regs = { .eax = I8K_SMM_POWER_STATUS, };
- int rc;
-
- rc = i8k_smm(&regs);
- if (rc < 0)
- return rc;
-
- return (regs.eax & 0xff) == I8K_POWER_AC ? I8K_AC : I8K_BATTERY;
-}
-
-/*
* Read the fan status.
*/
static int i8k_get_fan_status(int fan)
@@ -376,6 +329,51 @@ static int i8k_get_dell_signature(int req_fn)
return regs.eax == 1145651527 && regs.edx == 1145392204 ? 0 : -1;
}
+#if IS_ENABLED(CONFIG_I8K)
+
+/*
+ * Read the Fn key status.
+ */
+static int i8k_get_fn_status(void)
+{
+ struct smm_regs regs = { .eax = I8K_SMM_FN_STATUS, };
+ int rc;
+
+ rc = i8k_smm(&regs);
+ if (rc < 0)
+ return rc;
+
+ switch ((regs.eax >> I8K_FN_SHIFT) & I8K_FN_MASK) {
+ case I8K_FN_UP:
+ return I8K_VOL_UP;
+ case I8K_FN_DOWN:
+ return I8K_VOL_DOWN;
+ case I8K_FN_MUTE:
+ return I8K_VOL_MUTE;
+ default:
+ return 0;
+ }
+}
+
+/*
+ * Read the power status.
+ */
+static int i8k_get_power_status(void)
+{
+ struct smm_regs regs = { .eax = I8K_SMM_POWER_STATUS, };
+ int rc;
+
+ rc = i8k_smm(&regs);
+ if (rc < 0)
+ return rc;
+
+ return (regs.eax & 0xff) == I8K_POWER_AC ? I8K_AC : I8K_BATTERY;
+}
+
+/*
+ * Procfs interface
+ */
+
static int
i8k_ioctl_unlocked(struct file *fp, unsigned int cmd, unsigned long arg)
{
@@ -526,6 +524,37 @@ static int i8k_open_fs(struct inode *inode, struct file *file)
return single_open(file, i8k_proc_show, NULL);
}
+static const struct file_operations i8k_fops = {
+ .owner = THIS_MODULE,
+ .open = i8k_open_fs,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .unlocked_ioctl = i8k_ioctl,
+};
+
+static void __init i8k_init_procfs(void)
+{
+ /* Register the proc entry */
+ proc_create("i8k", 0, NULL, &i8k_fops);
+}
+
+static void __exit i8k_exit_procfs(void)
+{
+ remove_proc_entry("i8k", NULL);
+}
+
+#else
+
+static inline void __init i8k_init_procfs(void)
+{
+}
+
+static inline void __exit i8k_exit_procfs(void)
+{
+}
+
+#endif
/*
* Hwmon interface
@@ -748,8 +777,8 @@ static int __init i8k_init_hwmon(void)
if (err >= 0)
i8k_hwmon_flags |= I8K_HWMON_HAVE_FAN2;
- i8k_hwmon_dev = hwmon_device_register_with_groups(NULL, "i8k", NULL,
- i8k_groups);
+ i8k_hwmon_dev = hwmon_device_register_with_groups(NULL, "dell-smm",
+ NULL, i8k_groups);
if (IS_ERR(i8k_hwmon_dev)) {
err = PTR_ERR(i8k_hwmon_dev);
i8k_hwmon_dev = NULL;
@@ -974,33 +1003,24 @@ static int __init i8k_probe(void)
static int __init i8k_init(void)
{
- struct proc_dir_entry *proc_i8k;
int err;
/* Are we running on an supported laptop? */
if (i8k_probe())
return -ENODEV;
- /* Register the proc entry */
- proc_i8k = proc_create("i8k", 0, NULL, &i8k_fops);
- if (!proc_i8k)
- return -ENOENT;
-
err = i8k_init_hwmon();
if (err)
- goto exit_remove_proc;
+ return err;
+ i8k_init_procfs();
return 0;
-
- exit_remove_proc:
- remove_proc_entry("i8k", NULL);
- return err;
}
static void __exit i8k_exit(void)
{
hwmon_device_unregister(i8k_hwmon_dev);
- remove_proc_entry("i8k", NULL);
+ i8k_exit_procfs();
}
module_init(i8k_init);
diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig
index fc1f1ae7a49d..6c8921140f02 100644
--- a/drivers/hwtracing/coresight/Kconfig
+++ b/drivers/hwtracing/coresight/Kconfig
@@ -58,4 +58,23 @@ config CORESIGHT_SOURCE_ETM3X
which allows tracing the instructions that a processor is executing
This is primarily useful for instruction level tracing. Depending
the ETM version data tracing may also be available.
+
+config CORESIGHT_SOURCE_ETM4X
+ bool "CoreSight Embedded Trace Macrocell 4.x driver"
+ depends on ARM64
+ select CORESIGHT_LINKS_AND_SINKS
+ help
+ This driver provides support for the ETM4.x tracer module, tracing the
+ instructions that a processor is executing. This is primarily useful
+ for instruction level tracing. Depending on the implemented version
+ data tracing may also be available.
+
+config CORESIGHT_QCOM_REPLICATOR
+ bool "Qualcomm CoreSight Replicator driver"
+ depends on CORESIGHT_LINKS_AND_SINKS
+ help
+ This enables support for Qualcomm CoreSight link driver. The
+ programmable ATB replicator sends the ATB trace stream from the
+ ETB/ETF to the TPIUi and ETR.
+
endif
diff --git a/drivers/hwtracing/coresight/Makefile b/drivers/hwtracing/coresight/Makefile
index 4b4bec890ef5..99f8e5f6256e 100644
--- a/drivers/hwtracing/coresight/Makefile
+++ b/drivers/hwtracing/coresight/Makefile
@@ -9,3 +9,5 @@ obj-$(CONFIG_CORESIGHT_SINK_ETBV10) += coresight-etb10.o
obj-$(CONFIG_CORESIGHT_LINKS_AND_SINKS) += coresight-funnel.o \
coresight-replicator.o
obj-$(CONFIG_CORESIGHT_SOURCE_ETM3X) += coresight-etm3x.o coresight-etm-cp14.o
+obj-$(CONFIG_CORESIGHT_SOURCE_ETM4X) += coresight-etm4x.o
+obj-$(CONFIG_CORESIGHT_QCOM_REPLICATOR) += coresight-replicator-qcom.o
diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c
index 40049869aecd..77d0f9c1118d 100644
--- a/drivers/hwtracing/coresight/coresight-etb10.c
+++ b/drivers/hwtracing/coresight/coresight-etb10.c
@@ -22,10 +22,11 @@
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
-#include <linux/clk.h>
+#include <linux/pm_runtime.h>
#include <linux/seq_file.h>
#include <linux/coresight.h>
#include <linux/amba/bus.h>
+#include <linux/clk.h>
#include "coresight-priv.h"
@@ -66,9 +67,9 @@
* struct etb_drvdata - specifics associated to an ETB component
* @base: memory mapped base address for this component.
* @dev: the device entity associated to this component.
+ * @atclk: optional clock for the core parts of the ETB.
* @csdev: component vitals needed by the framework.
* @miscdev: specifics to handle "/dev/xyz.etb" entry.
- * @clk: the clock this component is associated to.
* @spinlock: only one at a time pls.
* @in_use: synchronise user space access to etb buffer.
* @buf: area of memory where ETB buffer content gets sent.
@@ -79,9 +80,9 @@
struct etb_drvdata {
void __iomem *base;
struct device *dev;
+ struct clk *atclk;
struct coresight_device *csdev;
struct miscdevice miscdev;
- struct clk *clk;
spinlock_t spinlock;
atomic_t in_use;
u8 *buf;
@@ -92,17 +93,14 @@ struct etb_drvdata {
static unsigned int etb_get_buffer_depth(struct etb_drvdata *drvdata)
{
- int ret;
u32 depth = 0;
- ret = clk_prepare_enable(drvdata->clk);
- if (ret)
- return ret;
+ pm_runtime_get_sync(drvdata->dev);
/* RO registers don't need locking */
depth = readl_relaxed(drvdata->base + ETB_RAM_DEPTH_REG);
- clk_disable_unprepare(drvdata->clk);
+ pm_runtime_put(drvdata->dev);
return depth;
}
@@ -137,12 +135,9 @@ static void etb_enable_hw(struct etb_drvdata *drvdata)
static int etb_enable(struct coresight_device *csdev)
{
struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
- int ret;
unsigned long flags;
- ret = clk_prepare_enable(drvdata->clk);
- if (ret)
- return ret;
+ pm_runtime_get_sync(drvdata->dev);
spin_lock_irqsave(&drvdata->spinlock, flags);
etb_enable_hw(drvdata);
@@ -252,7 +247,7 @@ static void etb_disable(struct coresight_device *csdev)
drvdata->enable = false;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
- clk_disable_unprepare(drvdata->clk);
+ pm_runtime_put(drvdata->dev);
dev_info(drvdata->dev, "ETB disabled\n");
}
@@ -339,16 +334,12 @@ static const struct file_operations etb_fops = {
static ssize_t status_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- int ret;
unsigned long flags;
u32 etb_rdr, etb_sr, etb_rrp, etb_rwp;
u32 etb_trg, etb_cr, etb_ffsr, etb_ffcr;
struct etb_drvdata *drvdata = dev_get_drvdata(dev->parent);
- ret = clk_prepare_enable(drvdata->clk);
- if (ret)
- goto out;
-
+ pm_runtime_get_sync(drvdata->dev);
spin_lock_irqsave(&drvdata->spinlock, flags);
CS_UNLOCK(drvdata->base);
@@ -364,7 +355,7 @@ static ssize_t status_show(struct device *dev,
CS_LOCK(drvdata->base);
spin_unlock_irqrestore(&drvdata->spinlock, flags);
- clk_disable_unprepare(drvdata->clk);
+ pm_runtime_put(drvdata->dev);
return sprintf(buf,
"Depth:\t\t0x%x\n"
@@ -377,7 +368,7 @@ static ssize_t status_show(struct device *dev,
"Flush ctrl:\t0x%x\n",
etb_rdr, etb_sr, etb_rrp, etb_rwp,
etb_trg, etb_cr, etb_ffsr, etb_ffcr);
-out:
+
return -EINVAL;
}
static DEVICE_ATTR_RO(status);
@@ -438,6 +429,12 @@ static int etb_probe(struct amba_device *adev, const struct amba_id *id)
return -ENOMEM;
drvdata->dev = &adev->dev;
+ drvdata->atclk = devm_clk_get(&adev->dev, "atclk"); /* optional */
+ if (!IS_ERR(drvdata->atclk)) {
+ ret = clk_prepare_enable(drvdata->atclk);
+ if (ret)
+ return ret;
+ }
dev_set_drvdata(dev, drvdata);
/* validity for the resource is already checked by the AMBA core */
@@ -449,21 +446,19 @@ static int etb_probe(struct amba_device *adev, const struct amba_id *id)
spin_lock_init(&drvdata->spinlock);
- drvdata->clk = adev->pclk;
- ret = clk_prepare_enable(drvdata->clk);
- if (ret)
- return ret;
-
drvdata->buffer_depth = etb_get_buffer_depth(drvdata);
- clk_disable_unprepare(drvdata->clk);
+ pm_runtime_put(&adev->dev);
- if (drvdata->buffer_depth < 0)
+ if (drvdata->buffer_depth & 0x80000000)
return -EINVAL;
drvdata->buf = devm_kzalloc(dev,
drvdata->buffer_depth * 4, GFP_KERNEL);
- if (!drvdata->buf)
+ if (!drvdata->buf) {
+ dev_err(dev, "Failed to allocate %u bytes for buffer data\n",
+ drvdata->buffer_depth * 4);
return -ENOMEM;
+ }
desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
if (!desc)
@@ -503,6 +498,32 @@ static int etb_remove(struct amba_device *adev)
return 0;
}
+#ifdef CONFIG_PM
+static int etb_runtime_suspend(struct device *dev)
+{
+ struct etb_drvdata *drvdata = dev_get_drvdata(dev);
+
+ if (drvdata && !IS_ERR(drvdata->atclk))
+ clk_disable_unprepare(drvdata->atclk);
+
+ return 0;
+}
+
+static int etb_runtime_resume(struct device *dev)
+{
+ struct etb_drvdata *drvdata = dev_get_drvdata(dev);
+
+ if (drvdata && !IS_ERR(drvdata->atclk))
+ clk_prepare_enable(drvdata->atclk);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops etb_dev_pm_ops = {
+ SET_RUNTIME_PM_OPS(etb_runtime_suspend, etb_runtime_resume, NULL)
+};
+
static struct amba_id etb_ids[] = {
{
.id = 0x0003b907,
@@ -515,6 +536,8 @@ static struct amba_driver etb_driver = {
.drv = {
.name = "coresight-etb10",
.owner = THIS_MODULE,
+ .pm = &etb_dev_pm_ops,
+
},
.probe = etb_probe,
.remove = etb_remove,
diff --git a/drivers/hwtracing/coresight/coresight-etm.h b/drivers/hwtracing/coresight/coresight-etm.h
index 501c5fac8a45..098ffbec0a44 100644
--- a/drivers/hwtracing/coresight/coresight-etm.h
+++ b/drivers/hwtracing/coresight/coresight-etm.h
@@ -140,8 +140,8 @@
* struct etm_drvdata - specifics associated to an ETM component
* @base: memory mapped base address for this component.
* @dev: the device entity associated to this component.
+ * @atclk: optional clock for the core parts of the ETM.
* @csdev: component vitals needed by the framework.
- * @clk: the clock this component is associated to.
* @spinlock: only one at a time pls.
* @cpu: the cpu this component is affined to.
* @port_size: port size as reported by ETMCR bit 4-6 and 21.
@@ -192,8 +192,8 @@
struct etm_drvdata {
void __iomem *base;
struct device *dev;
+ struct clk *atclk;
struct coresight_device *csdev;
- struct clk *clk;
spinlock_t spinlock;
int cpu;
int port_size;
diff --git a/drivers/hwtracing/coresight/coresight-etm3x.c b/drivers/hwtracing/coresight/coresight-etm3x.c
index c965f5724abd..018a00fda611 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x.c
@@ -23,13 +23,14 @@
#include <linux/smp.h>
#include <linux/sysfs.h>
#include <linux/stat.h>
-#include <linux/clk.h>
+#include <linux/pm_runtime.h>
#include <linux/cpu.h>
#include <linux/of.h>
#include <linux/coresight.h>
#include <linux/amba/bus.h>
#include <linux/seq_file.h>
#include <linux/uaccess.h>
+#include <linux/clk.h>
#include <asm/sections.h>
#include "coresight-etm.h"
@@ -325,9 +326,7 @@ static int etm_trace_id(struct coresight_device *csdev)
if (!drvdata->enable)
return drvdata->traceid;
-
- if (clk_prepare_enable(drvdata->clk))
- goto out;
+ pm_runtime_get_sync(csdev->dev.parent);
spin_lock_irqsave(&drvdata->spinlock, flags);
@@ -336,8 +335,8 @@ static int etm_trace_id(struct coresight_device *csdev)
CS_LOCK(drvdata->base);
spin_unlock_irqrestore(&drvdata->spinlock, flags);
- clk_disable_unprepare(drvdata->clk);
-out:
+ pm_runtime_put(csdev->dev.parent);
+
return trace_id;
}
@@ -346,10 +345,7 @@ static int etm_enable(struct coresight_device *csdev)
struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
int ret;
- ret = clk_prepare_enable(drvdata->clk);
- if (ret)
- goto err_clk;
-
+ pm_runtime_get_sync(csdev->dev.parent);
spin_lock(&drvdata->spinlock);
/*
@@ -373,8 +369,7 @@ static int etm_enable(struct coresight_device *csdev)
return 0;
err:
spin_unlock(&drvdata->spinlock);
- clk_disable_unprepare(drvdata->clk);
-err_clk:
+ pm_runtime_put(csdev->dev.parent);
return ret;
}
@@ -423,8 +418,7 @@ static void etm_disable(struct coresight_device *csdev)
spin_unlock(&drvdata->spinlock);
put_online_cpus();
-
- clk_disable_unprepare(drvdata->clk);
+ pm_runtime_put(csdev->dev.parent);
dev_info(drvdata->dev, "ETM tracing disabled\n");
}
@@ -474,14 +468,10 @@ static DEVICE_ATTR_RO(nr_ctxid_cmp);
static ssize_t etmsr_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- int ret;
unsigned long flags, val;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
- ret = clk_prepare_enable(drvdata->clk);
- if (ret)
- return ret;
-
+ pm_runtime_get_sync(drvdata->dev);
spin_lock_irqsave(&drvdata->spinlock, flags);
CS_UNLOCK(drvdata->base);
@@ -489,7 +479,7 @@ static ssize_t etmsr_show(struct device *dev,
CS_LOCK(drvdata->base);
spin_unlock_irqrestore(&drvdata->spinlock, flags);
- clk_disable_unprepare(drvdata->clk);
+ pm_runtime_put(drvdata->dev);
return sprintf(buf, "%#lx\n", val);
}
@@ -1317,7 +1307,6 @@ static DEVICE_ATTR_RW(seq_13_event);
static ssize_t seq_curr_state_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- int ret;
unsigned long val, flags;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
@@ -1326,10 +1315,7 @@ static ssize_t seq_curr_state_show(struct device *dev,
goto out;
}
- ret = clk_prepare_enable(drvdata->clk);
- if (ret)
- return ret;
-
+ pm_runtime_get_sync(drvdata->dev);
spin_lock_irqsave(&drvdata->spinlock, flags);
CS_UNLOCK(drvdata->base);
@@ -1337,7 +1323,7 @@ static ssize_t seq_curr_state_show(struct device *dev,
CS_LOCK(drvdata->base);
spin_unlock_irqrestore(&drvdata->spinlock, flags);
- clk_disable_unprepare(drvdata->clk);
+ pm_runtime_put(drvdata->dev);
out:
return sprintf(buf, "%#lx\n", val);
}
@@ -1521,10 +1507,7 @@ static ssize_t status_show(struct device *dev,
unsigned long flags;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
- ret = clk_prepare_enable(drvdata->clk);
- if (ret)
- return ret;
-
+ pm_runtime_get_sync(drvdata->dev);
spin_lock_irqsave(&drvdata->spinlock, flags);
CS_UNLOCK(drvdata->base);
@@ -1550,7 +1533,7 @@ static ssize_t status_show(struct device *dev,
CS_LOCK(drvdata->base);
spin_unlock_irqrestore(&drvdata->spinlock, flags);
- clk_disable_unprepare(drvdata->clk);
+ pm_runtime_put(drvdata->dev);
return ret;
}
@@ -1559,7 +1542,6 @@ static DEVICE_ATTR_RO(status);
static ssize_t traceid_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- int ret;
unsigned long val, flags;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
@@ -1568,10 +1550,7 @@ static ssize_t traceid_show(struct device *dev,
goto out;
}
- ret = clk_prepare_enable(drvdata->clk);
- if (ret)
- return ret;
-
+ pm_runtime_get_sync(drvdata->dev);
spin_lock_irqsave(&drvdata->spinlock, flags);
CS_UNLOCK(drvdata->base);
@@ -1579,7 +1558,7 @@ static ssize_t traceid_show(struct device *dev,
CS_LOCK(drvdata->base);
spin_unlock_irqrestore(&drvdata->spinlock, flags);
- clk_disable_unprepare(drvdata->clk);
+ pm_runtime_put(drvdata->dev);
out:
return sprintf(buf, "%#lx\n", val);
}
@@ -1817,10 +1796,12 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id)
spin_lock_init(&drvdata->spinlock);
- drvdata->clk = adev->pclk;
- ret = clk_prepare_enable(drvdata->clk);
- if (ret)
- return ret;
+ drvdata->atclk = devm_clk_get(&adev->dev, "atclk"); /* optional */
+ if (!IS_ERR(drvdata->atclk)) {
+ ret = clk_prepare_enable(drvdata->atclk);
+ if (ret)
+ return ret;
+ }
drvdata->cpu = pdata ? pdata->cpu : 0;
@@ -1845,8 +1826,6 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id)
}
etm_init_default_data(drvdata);
- clk_disable_unprepare(drvdata->clk);
-
desc->type = CORESIGHT_DEV_TYPE_SOURCE;
desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC;
desc->ops = &etm_cs_ops;
@@ -1859,7 +1838,8 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id)
goto err_arch_supported;
}
- dev_info(dev, "ETM initialized\n");
+ pm_runtime_put(&adev->dev);
+ dev_info(dev, "%s initialized\n", (char *)id->data);
if (boot_enable) {
coresight_enable(drvdata->csdev);
@@ -1869,7 +1849,6 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id)
return 0;
err_arch_supported:
- clk_disable_unprepare(drvdata->clk);
if (--etm_count == 0)
unregister_hotcpu_notifier(&etm_cpu_notifier);
return ret;
@@ -1886,22 +1865,52 @@ static int etm_remove(struct amba_device *adev)
return 0;
}
+#ifdef CONFIG_PM
+static int etm_runtime_suspend(struct device *dev)
+{
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev);
+
+ if (drvdata && !IS_ERR(drvdata->atclk))
+ clk_disable_unprepare(drvdata->atclk);
+
+ return 0;
+}
+
+static int etm_runtime_resume(struct device *dev)
+{
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev);
+
+ if (drvdata && !IS_ERR(drvdata->atclk))
+ clk_prepare_enable(drvdata->atclk);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops etm_dev_pm_ops = {
+ SET_RUNTIME_PM_OPS(etm_runtime_suspend, etm_runtime_resume, NULL)
+};
+
static struct amba_id etm_ids[] = {
{ /* ETM 3.3 */
.id = 0x0003b921,
.mask = 0x0003ffff,
+ .data = "ETM 3.3",
},
{ /* ETM 3.5 */
.id = 0x0003b956,
.mask = 0x0003ffff,
+ .data = "ETM 3.5",
},
{ /* PTM 1.0 */
.id = 0x0003b950,
.mask = 0x0003ffff,
+ .data = "PTM 1.0",
},
{ /* PTM 1.1 */
.id = 0x0003b95f,
.mask = 0x0003ffff,
+ .data = "PTM 1.1",
},
{ 0, 0},
};
@@ -1910,23 +1919,14 @@ static struct amba_driver etm_driver = {
.drv = {
.name = "coresight-etm3x",
.owner = THIS_MODULE,
+ .pm = &etm_dev_pm_ops,
},
.probe = etm_probe,
.remove = etm_remove,
.id_table = etm_ids,
};
-int __init etm_init(void)
-{
- return amba_driver_register(&etm_driver);
-}
-module_init(etm_init);
-
-void __exit etm_exit(void)
-{
- amba_driver_unregister(&etm_driver);
-}
-module_exit(etm_exit);
+module_amba_driver(etm_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("CoreSight Program Flow Trace driver");
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
new file mode 100644
index 000000000000..1312e993c501
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
@@ -0,0 +1,2702 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/smp.h>
+#include <linux/sysfs.h>
+#include <linux/stat.h>
+#include <linux/clk.h>
+#include <linux/cpu.h>
+#include <linux/coresight.h>
+#include <linux/pm_wakeup.h>
+#include <linux/amba/bus.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+#include <linux/pm_runtime.h>
+#include <asm/sections.h>
+
+#include "coresight-etm4x.h"
+
+static int boot_enable;
+module_param_named(boot_enable, boot_enable, int, S_IRUGO);
+
+/* The number of ETMv4 currently registered */
+static int etm4_count;
+static struct etmv4_drvdata *etmdrvdata[NR_CPUS];
+
+static void etm4_os_unlock(void *info)
+{
+ struct etmv4_drvdata *drvdata = (struct etmv4_drvdata *)info;
+
+ /* Writing any value to ETMOSLAR unlocks the trace registers */
+ writel_relaxed(0x0, drvdata->base + TRCOSLAR);
+ isb();
+}
+
+static bool etm4_arch_supported(u8 arch)
+{
+ switch (arch) {
+ case ETM_ARCH_V4:
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+
+static int etm4_trace_id(struct coresight_device *csdev)
+{
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ unsigned long flags;
+ int trace_id = -1;
+
+ if (!drvdata->enable)
+ return drvdata->trcid;
+
+ pm_runtime_get_sync(drvdata->dev);
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+
+ CS_UNLOCK(drvdata->base);
+ trace_id = readl_relaxed(drvdata->base + TRCTRACEIDR);
+ trace_id &= ETM_TRACEID_MASK;
+ CS_LOCK(drvdata->base);
+
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ pm_runtime_put(drvdata->dev);
+
+ return trace_id;
+}
+
+static void etm4_enable_hw(void *info)
+{
+ int i;
+ struct etmv4_drvdata *drvdata = info;
+
+ CS_UNLOCK(drvdata->base);
+
+ etm4_os_unlock(drvdata);
+
+ /* Disable the trace unit before programming trace registers */
+ writel_relaxed(0, drvdata->base + TRCPRGCTLR);
+
+ /* wait for TRCSTATR.IDLE to go up */
+ if (coresight_timeout(drvdata->base, TRCSTATR, TRCSTATR_IDLE_BIT, 1))
+ dev_err(drvdata->dev,
+ "timeout observed when probing at offset %#x\n",
+ TRCSTATR);
+
+ writel_relaxed(drvdata->pe_sel, drvdata->base + TRCPROCSELR);
+ writel_relaxed(drvdata->cfg, drvdata->base + TRCCONFIGR);
+ /* nothing specific implemented */
+ writel_relaxed(0x0, drvdata->base + TRCAUXCTLR);
+ writel_relaxed(drvdata->eventctrl0, drvdata->base + TRCEVENTCTL0R);
+ writel_relaxed(drvdata->eventctrl1, drvdata->base + TRCEVENTCTL1R);
+ writel_relaxed(drvdata->stall_ctrl, drvdata->base + TRCSTALLCTLR);
+ writel_relaxed(drvdata->ts_ctrl, drvdata->base + TRCTSCTLR);
+ writel_relaxed(drvdata->syncfreq, drvdata->base + TRCSYNCPR);
+ writel_relaxed(drvdata->ccctlr, drvdata->base + TRCCCCTLR);
+ writel_relaxed(drvdata->bb_ctrl, drvdata->base + TRCBBCTLR);
+ writel_relaxed(drvdata->trcid, drvdata->base + TRCTRACEIDR);
+ writel_relaxed(drvdata->vinst_ctrl, drvdata->base + TRCVICTLR);
+ writel_relaxed(drvdata->viiectlr, drvdata->base + TRCVIIECTLR);
+ writel_relaxed(drvdata->vissctlr,
+ drvdata->base + TRCVISSCTLR);
+ writel_relaxed(drvdata->vipcssctlr,
+ drvdata->base + TRCVIPCSSCTLR);
+ for (i = 0; i < drvdata->nrseqstate - 1; i++)
+ writel_relaxed(drvdata->seq_ctrl[i],
+ drvdata->base + TRCSEQEVRn(i));
+ writel_relaxed(drvdata->seq_rst, drvdata->base + TRCSEQRSTEVR);
+ writel_relaxed(drvdata->seq_state, drvdata->base + TRCSEQSTR);
+ writel_relaxed(drvdata->ext_inp, drvdata->base + TRCEXTINSELR);
+ for (i = 0; i < drvdata->nr_cntr; i++) {
+ writel_relaxed(drvdata->cntrldvr[i],
+ drvdata->base + TRCCNTRLDVRn(i));
+ writel_relaxed(drvdata->cntr_ctrl[i],
+ drvdata->base + TRCCNTCTLRn(i));
+ writel_relaxed(drvdata->cntr_val[i],
+ drvdata->base + TRCCNTVRn(i));
+ }
+ for (i = 0; i < drvdata->nr_resource; i++)
+ writel_relaxed(drvdata->res_ctrl[i],
+ drvdata->base + TRCRSCTLRn(i));
+
+ for (i = 0; i < drvdata->nr_ss_cmp; i++) {
+ writel_relaxed(drvdata->ss_ctrl[i],
+ drvdata->base + TRCSSCCRn(i));
+ writel_relaxed(drvdata->ss_status[i],
+ drvdata->base + TRCSSCSRn(i));
+ writel_relaxed(drvdata->ss_pe_cmp[i],
+ drvdata->base + TRCSSPCICRn(i));
+ }
+ for (i = 0; i < drvdata->nr_addr_cmp; i++) {
+ writeq_relaxed(drvdata->addr_val[i],
+ drvdata->base + TRCACVRn(i));
+ writeq_relaxed(drvdata->addr_acc[i],
+ drvdata->base + TRCACATRn(i));
+ }
+ for (i = 0; i < drvdata->numcidc; i++)
+ writeq_relaxed(drvdata->ctxid_val[i],
+ drvdata->base + TRCCIDCVRn(i));
+ writel_relaxed(drvdata->ctxid_mask0, drvdata->base + TRCCIDCCTLR0);
+ writel_relaxed(drvdata->ctxid_mask1, drvdata->base + TRCCIDCCTLR1);
+
+ for (i = 0; i < drvdata->numvmidc; i++)
+ writeq_relaxed(drvdata->vmid_val[i],
+ drvdata->base + TRCVMIDCVRn(i));
+ writel_relaxed(drvdata->vmid_mask0, drvdata->base + TRCVMIDCCTLR0);
+ writel_relaxed(drvdata->vmid_mask1, drvdata->base + TRCVMIDCCTLR1);
+
+ /* Enable the trace unit */
+ writel_relaxed(1, drvdata->base + TRCPRGCTLR);
+
+ /* wait for TRCSTATR.IDLE to go back down to '0' */
+ if (coresight_timeout(drvdata->base, TRCSTATR, TRCSTATR_IDLE_BIT, 0))
+ dev_err(drvdata->dev,
+ "timeout observed when probing at offset %#x\n",
+ TRCSTATR);
+
+ CS_LOCK(drvdata->base);
+
+ dev_dbg(drvdata->dev, "cpu: %d enable smp call done\n", drvdata->cpu);
+}
+
+static int etm4_enable(struct coresight_device *csdev)
+{
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ int ret;
+
+ pm_runtime_get_sync(drvdata->dev);
+ spin_lock(&drvdata->spinlock);
+
+ /*
+ * Executing etm4_enable_hw on the cpu whose ETM is being enabled
+ * ensures that register writes occur when cpu is powered.
+ */
+ ret = smp_call_function_single(drvdata->cpu,
+ etm4_enable_hw, drvdata, 1);
+ if (ret)
+ goto err;
+ drvdata->enable = true;
+ drvdata->sticky_enable = true;
+
+ spin_unlock(&drvdata->spinlock);
+
+ dev_info(drvdata->dev, "ETM tracing enabled\n");
+ return 0;
+err:
+ spin_unlock(&drvdata->spinlock);
+ pm_runtime_put(drvdata->dev);
+ return ret;
+}
+
+static void etm4_disable_hw(void *info)
+{
+ u32 control;
+ struct etmv4_drvdata *drvdata = info;
+
+ CS_UNLOCK(drvdata->base);
+
+ control = readl_relaxed(drvdata->base + TRCPRGCTLR);
+
+ /* EN, bit[0] Trace unit enable bit */
+ control &= ~0x1;
+
+ /* make sure everything completes before disabling */
+ mb();
+ isb();
+ writel_relaxed(control, drvdata->base + TRCPRGCTLR);
+
+ CS_LOCK(drvdata->base);
+
+ dev_dbg(drvdata->dev, "cpu: %d disable smp call done\n", drvdata->cpu);
+}
+
+static void etm4_disable(struct coresight_device *csdev)
+{
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ /*
+ * Taking hotplug lock here protects from clocks getting disabled
+ * with tracing being left on (crash scenario) if user disable occurs
+ * after cpu online mask indicates the cpu is offline but before the
+ * DYING hotplug callback is serviced by the ETM driver.
+ */
+ get_online_cpus();
+ spin_lock(&drvdata->spinlock);
+
+ /*
+ * Executing etm4_disable_hw on the cpu whose ETM is being disabled
+ * ensures that register writes occur when cpu is powered.
+ */
+ smp_call_function_single(drvdata->cpu, etm4_disable_hw, drvdata, 1);
+ drvdata->enable = false;
+
+ spin_unlock(&drvdata->spinlock);
+ put_online_cpus();
+
+ pm_runtime_put(drvdata->dev);
+
+ dev_info(drvdata->dev, "ETM tracing disabled\n");
+}
+
+static const struct coresight_ops_source etm4_source_ops = {
+ .trace_id = etm4_trace_id,
+ .enable = etm4_enable,
+ .disable = etm4_disable,
+};
+
+static const struct coresight_ops etm4_cs_ops = {
+ .source_ops = &etm4_source_ops,
+};
+
+static int etm4_set_mode_exclude(struct etmv4_drvdata *drvdata, bool exclude)
+{
+ u8 idx = drvdata->addr_idx;
+
+ /*
+ * TRCACATRn.TYPE bit[1:0]: type of comparison
+ * the trace unit performs
+ */
+ if (BMVAL(drvdata->addr_acc[idx], 0, 1) == ETM_INSTR_ADDR) {
+ if (idx % 2 != 0)
+ return -EINVAL;
+
+ /*
+ * We are performing instruction address comparison. Set the
+ * relevant bit of ViewInst Include/Exclude Control register
+ * for corresponding address comparator pair.
+ */
+ if (drvdata->addr_type[idx] != ETM_ADDR_TYPE_RANGE ||
+ drvdata->addr_type[idx + 1] != ETM_ADDR_TYPE_RANGE)
+ return -EINVAL;
+
+ if (exclude == true) {
+ /*
+ * Set exclude bit and unset the include bit
+ * corresponding to comparator pair
+ */
+ drvdata->viiectlr |= BIT(idx / 2 + 16);
+ drvdata->viiectlr &= ~BIT(idx / 2);
+ } else {
+ /*
+ * Set include bit and unset exclude bit
+ * corresponding to comparator pair
+ */
+ drvdata->viiectlr |= BIT(idx / 2);
+ drvdata->viiectlr &= ~BIT(idx / 2 + 16);
+ }
+ }
+ return 0;
+}
+
+static ssize_t nr_pe_cmp_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = drvdata->nr_pe_cmp;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+static DEVICE_ATTR_RO(nr_pe_cmp);
+
+static ssize_t nr_addr_cmp_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = drvdata->nr_addr_cmp;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+static DEVICE_ATTR_RO(nr_addr_cmp);
+
+static ssize_t nr_cntr_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = drvdata->nr_cntr;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+static DEVICE_ATTR_RO(nr_cntr);
+
+static ssize_t nr_ext_inp_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = drvdata->nr_ext_inp;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+static DEVICE_ATTR_RO(nr_ext_inp);
+
+static ssize_t numcidc_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = drvdata->numcidc;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+static DEVICE_ATTR_RO(numcidc);
+
+static ssize_t numvmidc_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = drvdata->numvmidc;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+static DEVICE_ATTR_RO(numvmidc);
+
+static ssize_t nrseqstate_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = drvdata->nrseqstate;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+static DEVICE_ATTR_RO(nrseqstate);
+
+static ssize_t nr_resource_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = drvdata->nr_resource;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+static DEVICE_ATTR_RO(nr_resource);
+
+static ssize_t nr_ss_cmp_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = drvdata->nr_ss_cmp;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+static DEVICE_ATTR_RO(nr_ss_cmp);
+
+static ssize_t reset_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int i;
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ if (val)
+ drvdata->mode = 0x0;
+
+ /* Disable data tracing: do not trace load and store data transfers */
+ drvdata->mode &= ~(ETM_MODE_LOAD | ETM_MODE_STORE);
+ drvdata->cfg &= ~(BIT(1) | BIT(2));
+
+ /* Disable data value and data address tracing */
+ drvdata->mode &= ~(ETM_MODE_DATA_TRACE_ADDR |
+ ETM_MODE_DATA_TRACE_VAL);
+ drvdata->cfg &= ~(BIT(16) | BIT(17));
+
+ /* Disable all events tracing */
+ drvdata->eventctrl0 = 0x0;
+ drvdata->eventctrl1 = 0x0;
+
+ /* Disable timestamp event */
+ drvdata->ts_ctrl = 0x0;
+
+ /* Disable stalling */
+ drvdata->stall_ctrl = 0x0;
+
+ /* Reset trace synchronization period to 2^8 = 256 bytes*/
+ if (drvdata->syncpr == false)
+ drvdata->syncfreq = 0x8;
+
+ /*
+ * Enable ViewInst to trace everything with start-stop logic in
+ * started state. ARM recommends start-stop logic is set before
+ * each trace run.
+ */
+ drvdata->vinst_ctrl |= BIT(0);
+ if (drvdata->nr_addr_cmp == true) {
+ drvdata->mode |= ETM_MODE_VIEWINST_STARTSTOP;
+ /* SSSTATUS, bit[9] */
+ drvdata->vinst_ctrl |= BIT(9);
+ }
+
+ /* No address range filtering for ViewInst */
+ drvdata->viiectlr = 0x0;
+
+ /* No start-stop filtering for ViewInst */
+ drvdata->vissctlr = 0x0;
+
+ /* Disable seq events */
+ for (i = 0; i < drvdata->nrseqstate-1; i++)
+ drvdata->seq_ctrl[i] = 0x0;
+ drvdata->seq_rst = 0x0;
+ drvdata->seq_state = 0x0;
+
+ /* Disable external input events */
+ drvdata->ext_inp = 0x0;
+
+ drvdata->cntr_idx = 0x0;
+ for (i = 0; i < drvdata->nr_cntr; i++) {
+ drvdata->cntrldvr[i] = 0x0;
+ drvdata->cntr_ctrl[i] = 0x0;
+ drvdata->cntr_val[i] = 0x0;
+ }
+
+ drvdata->res_idx = 0x0;
+ for (i = 0; i < drvdata->nr_resource; i++)
+ drvdata->res_ctrl[i] = 0x0;
+
+ for (i = 0; i < drvdata->nr_ss_cmp; i++) {
+ drvdata->ss_ctrl[i] = 0x0;
+ drvdata->ss_pe_cmp[i] = 0x0;
+ }
+
+ drvdata->addr_idx = 0x0;
+ for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
+ drvdata->addr_val[i] = 0x0;
+ drvdata->addr_acc[i] = 0x0;
+ drvdata->addr_type[i] = ETM_ADDR_TYPE_NONE;
+ }
+
+ drvdata->ctxid_idx = 0x0;
+ for (i = 0; i < drvdata->numcidc; i++)
+ drvdata->ctxid_val[i] = 0x0;
+ drvdata->ctxid_mask0 = 0x0;
+ drvdata->ctxid_mask1 = 0x0;
+
+ drvdata->vmid_idx = 0x0;
+ for (i = 0; i < drvdata->numvmidc; i++)
+ drvdata->vmid_val[i] = 0x0;
+ drvdata->vmid_mask0 = 0x0;
+ drvdata->vmid_mask1 = 0x0;
+
+ drvdata->trcid = drvdata->cpu + 1;
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_WO(reset);
+
+static ssize_t mode_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = drvdata->mode;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t mode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val, mode;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ drvdata->mode = val & ETMv4_MODE_ALL;
+
+ if (drvdata->mode & ETM_MODE_EXCLUDE)
+ etm4_set_mode_exclude(drvdata, true);
+ else
+ etm4_set_mode_exclude(drvdata, false);
+
+ if (drvdata->instrp0 == true) {
+ /* start by clearing instruction P0 field */
+ drvdata->cfg &= ~(BIT(1) | BIT(2));
+ if (drvdata->mode & ETM_MODE_LOAD)
+ /* 0b01 Trace load instructions as P0 instructions */
+ drvdata->cfg |= BIT(1);
+ if (drvdata->mode & ETM_MODE_STORE)
+ /* 0b10 Trace store instructions as P0 instructions */
+ drvdata->cfg |= BIT(2);
+ if (drvdata->mode & ETM_MODE_LOAD_STORE)
+ /*
+ * 0b11 Trace load and store instructions
+ * as P0 instructions
+ */
+ drvdata->cfg |= BIT(1) | BIT(2);
+ }
+
+ /* bit[3], Branch broadcast mode */
+ if ((drvdata->mode & ETM_MODE_BB) && (drvdata->trcbb == true))
+ drvdata->cfg |= BIT(3);
+ else
+ drvdata->cfg &= ~BIT(3);
+
+ /* bit[4], Cycle counting instruction trace bit */
+ if ((drvdata->mode & ETMv4_MODE_CYCACC) &&
+ (drvdata->trccci == true))
+ drvdata->cfg |= BIT(4);
+ else
+ drvdata->cfg &= ~BIT(4);
+
+ /* bit[6], Context ID tracing bit */
+ if ((drvdata->mode & ETMv4_MODE_CTXID) && (drvdata->ctxid_size))
+ drvdata->cfg |= BIT(6);
+ else
+ drvdata->cfg &= ~BIT(6);
+
+ if ((drvdata->mode & ETM_MODE_VMID) && (drvdata->vmid_size))
+ drvdata->cfg |= BIT(7);
+ else
+ drvdata->cfg &= ~BIT(7);
+
+ /* bits[10:8], Conditional instruction tracing bit */
+ mode = ETM_MODE_COND(drvdata->mode);
+ if (drvdata->trccond == true) {
+ drvdata->cfg &= ~(BIT(8) | BIT(9) | BIT(10));
+ drvdata->cfg |= mode << 8;
+ }
+
+ /* bit[11], Global timestamp tracing bit */
+ if ((drvdata->mode & ETMv4_MODE_TIMESTAMP) && (drvdata->ts_size))
+ drvdata->cfg |= BIT(11);
+ else
+ drvdata->cfg &= ~BIT(11);
+
+ /* bit[12], Return stack enable bit */
+ if ((drvdata->mode & ETM_MODE_RETURNSTACK) &&
+ (drvdata->retstack == true))
+ drvdata->cfg |= BIT(12);
+ else
+ drvdata->cfg &= ~BIT(12);
+
+ /* bits[14:13], Q element enable field */
+ mode = ETM_MODE_QELEM(drvdata->mode);
+ /* start by clearing QE bits */
+ drvdata->cfg &= ~(BIT(13) | BIT(14));
+ /* if supported, Q elements with instruction counts are enabled */
+ if ((mode & BIT(0)) && (drvdata->q_support & BIT(0)))
+ drvdata->cfg |= BIT(13);
+ /*
+ * if supported, Q elements with and without instruction
+ * counts are enabled
+ */
+ if ((mode & BIT(1)) && (drvdata->q_support & BIT(1)))
+ drvdata->cfg |= BIT(14);
+
+ /* bit[11], AMBA Trace Bus (ATB) trigger enable bit */
+ if ((drvdata->mode & ETM_MODE_ATB_TRIGGER) &&
+ (drvdata->atbtrig == true))
+ drvdata->eventctrl1 |= BIT(11);
+ else
+ drvdata->eventctrl1 &= ~BIT(11);
+
+ /* bit[12], Low-power state behavior override bit */
+ if ((drvdata->mode & ETM_MODE_LPOVERRIDE) &&
+ (drvdata->lpoverride == true))
+ drvdata->eventctrl1 |= BIT(12);
+ else
+ drvdata->eventctrl1 &= ~BIT(12);
+
+ /* bit[8], Instruction stall bit */
+ if (drvdata->mode & ETM_MODE_ISTALL_EN)
+ drvdata->stall_ctrl |= BIT(8);
+ else
+ drvdata->stall_ctrl &= ~BIT(8);
+
+ /* bit[10], Prioritize instruction trace bit */
+ if (drvdata->mode & ETM_MODE_INSTPRIO)
+ drvdata->stall_ctrl |= BIT(10);
+ else
+ drvdata->stall_ctrl &= ~BIT(10);
+
+ /* bit[13], Trace overflow prevention bit */
+ if ((drvdata->mode & ETM_MODE_NOOVERFLOW) &&
+ (drvdata->nooverflow == true))
+ drvdata->stall_ctrl |= BIT(13);
+ else
+ drvdata->stall_ctrl &= ~BIT(13);
+
+ /* bit[9] Start/stop logic control bit */
+ if (drvdata->mode & ETM_MODE_VIEWINST_STARTSTOP)
+ drvdata->vinst_ctrl |= BIT(9);
+ else
+ drvdata->vinst_ctrl &= ~BIT(9);
+
+ /* bit[10], Whether a trace unit must trace a Reset exception */
+ if (drvdata->mode & ETM_MODE_TRACE_RESET)
+ drvdata->vinst_ctrl |= BIT(10);
+ else
+ drvdata->vinst_ctrl &= ~BIT(10);
+
+ /* bit[11], Whether a trace unit must trace a system error exception */
+ if ((drvdata->mode & ETM_MODE_TRACE_ERR) &&
+ (drvdata->trc_error == true))
+ drvdata->vinst_ctrl |= BIT(11);
+ else
+ drvdata->vinst_ctrl &= ~BIT(11);
+
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(mode);
+
+static ssize_t pe_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = drvdata->pe_sel;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t pe_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ if (val > drvdata->nr_pe) {
+ spin_unlock(&drvdata->spinlock);
+ return -EINVAL;
+ }
+
+ drvdata->pe_sel = val;
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(pe);
+
+static ssize_t event_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = drvdata->eventctrl0;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t event_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ switch (drvdata->nr_event) {
+ case 0x0:
+ /* EVENT0, bits[7:0] */
+ drvdata->eventctrl0 = val & 0xFF;
+ break;
+ case 0x1:
+ /* EVENT1, bits[15:8] */
+ drvdata->eventctrl0 = val & 0xFFFF;
+ break;
+ case 0x2:
+ /* EVENT2, bits[23:16] */
+ drvdata->eventctrl0 = val & 0xFFFFFF;
+ break;
+ case 0x3:
+ /* EVENT3, bits[31:24] */
+ drvdata->eventctrl0 = val;
+ break;
+ default:
+ break;
+ }
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(event);
+
+static ssize_t event_instren_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = BMVAL(drvdata->eventctrl1, 0, 3);
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t event_instren_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ /* start by clearing all instruction event enable bits */
+ drvdata->eventctrl1 &= ~(BIT(0) | BIT(1) | BIT(2) | BIT(3));
+ switch (drvdata->nr_event) {
+ case 0x0:
+ /* generate Event element for event 1 */
+ drvdata->eventctrl1 |= val & BIT(1);
+ break;
+ case 0x1:
+ /* generate Event element for event 1 and 2 */
+ drvdata->eventctrl1 |= val & (BIT(0) | BIT(1));
+ break;
+ case 0x2:
+ /* generate Event element for event 1, 2 and 3 */
+ drvdata->eventctrl1 |= val & (BIT(0) | BIT(1) | BIT(2));
+ break;
+ case 0x3:
+ /* generate Event element for all 4 events */
+ drvdata->eventctrl1 |= val & 0xF;
+ break;
+ default:
+ break;
+ }
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(event_instren);
+
+static ssize_t event_ts_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = drvdata->ts_ctrl;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t event_ts_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ if (!drvdata->ts_size)
+ return -EINVAL;
+
+ drvdata->ts_ctrl = val & ETMv4_EVENT_MASK;
+ return size;
+}
+static DEVICE_ATTR_RW(event_ts);
+
+static ssize_t syncfreq_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = drvdata->syncfreq;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t syncfreq_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ if (drvdata->syncpr == true)
+ return -EINVAL;
+
+ drvdata->syncfreq = val & ETMv4_SYNC_MASK;
+ return size;
+}
+static DEVICE_ATTR_RW(syncfreq);
+
+static ssize_t cyc_threshold_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = drvdata->ccctlr;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t cyc_threshold_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ if (val < drvdata->ccitmin)
+ return -EINVAL;
+
+ drvdata->ccctlr = val & ETM_CYC_THRESHOLD_MASK;
+ return size;
+}
+static DEVICE_ATTR_RW(cyc_threshold);
+
+static ssize_t bb_ctrl_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = drvdata->bb_ctrl;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t bb_ctrl_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ if (drvdata->trcbb == false)
+ return -EINVAL;
+ if (!drvdata->nr_addr_cmp)
+ return -EINVAL;
+ /*
+ * Bit[7:0] selects which address range comparator is used for
+ * branch broadcast control.
+ */
+ if (BMVAL(val, 0, 7) > drvdata->nr_addr_cmp)
+ return -EINVAL;
+
+ drvdata->bb_ctrl = val;
+ return size;
+}
+static DEVICE_ATTR_RW(bb_ctrl);
+
+static ssize_t event_vinst_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = drvdata->vinst_ctrl & ETMv4_EVENT_MASK;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t event_vinst_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ val &= ETMv4_EVENT_MASK;
+ drvdata->vinst_ctrl &= ~ETMv4_EVENT_MASK;
+ drvdata->vinst_ctrl |= val;
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(event_vinst);
+
+static ssize_t s_exlevel_vinst_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = BMVAL(drvdata->vinst_ctrl, 16, 19);
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t s_exlevel_vinst_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ /* clear all EXLEVEL_S bits (bit[18] is never implemented) */
+ drvdata->vinst_ctrl &= ~(BIT(16) | BIT(17) | BIT(19));
+ /* enable instruction tracing for corresponding exception level */
+ val &= drvdata->s_ex_level;
+ drvdata->vinst_ctrl |= (val << 16);
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(s_exlevel_vinst);
+
+static ssize_t ns_exlevel_vinst_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ /* EXLEVEL_NS, bits[23:20] */
+ val = BMVAL(drvdata->vinst_ctrl, 20, 23);
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t ns_exlevel_vinst_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ /* clear EXLEVEL_NS bits (bit[23] is never implemented */
+ drvdata->vinst_ctrl &= ~(BIT(20) | BIT(21) | BIT(22));
+ /* enable instruction tracing for corresponding exception level */
+ val &= drvdata->ns_ex_level;
+ drvdata->vinst_ctrl |= (val << 20);
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(ns_exlevel_vinst);
+
+static ssize_t addr_idx_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = drvdata->addr_idx;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t addr_idx_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ if (val >= drvdata->nr_addr_cmp * 2)
+ return -EINVAL;
+
+ /*
+ * Use spinlock to ensure index doesn't change while it gets
+ * dereferenced multiple times within a spinlock block elsewhere.
+ */
+ spin_lock(&drvdata->spinlock);
+ drvdata->addr_idx = val;
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(addr_idx);
+
+static ssize_t addr_instdatatype_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t len;
+ u8 val, idx;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ spin_lock(&drvdata->spinlock);
+ idx = drvdata->addr_idx;
+ val = BMVAL(drvdata->addr_acc[idx], 0, 1);
+ len = scnprintf(buf, PAGE_SIZE, "%s\n",
+ val == ETM_INSTR_ADDR ? "instr" :
+ (val == ETM_DATA_LOAD_ADDR ? "data_load" :
+ (val == ETM_DATA_STORE_ADDR ? "data_store" :
+ "data_load_store")));
+ spin_unlock(&drvdata->spinlock);
+ return len;
+}
+
+static ssize_t addr_instdatatype_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ u8 idx;
+ char str[20] = "";
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ if (strlen(buf) >= 20)
+ return -EINVAL;
+ if (sscanf(buf, "%s", str) != 1)
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ idx = drvdata->addr_idx;
+ if (!strcmp(str, "instr"))
+ /* TYPE, bits[1:0] */
+ drvdata->addr_acc[idx] &= ~(BIT(0) | BIT(1));
+
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(addr_instdatatype);
+
+static ssize_t addr_single_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u8 idx;
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ idx = drvdata->addr_idx;
+ spin_lock(&drvdata->spinlock);
+ if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+ drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
+ spin_unlock(&drvdata->spinlock);
+ return -EPERM;
+ }
+ val = (unsigned long)drvdata->addr_val[idx];
+ spin_unlock(&drvdata->spinlock);
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t addr_single_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ u8 idx;
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ idx = drvdata->addr_idx;
+ if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+ drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
+ spin_unlock(&drvdata->spinlock);
+ return -EPERM;
+ }
+
+ drvdata->addr_val[idx] = (u64)val;
+ drvdata->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(addr_single);
+
+static ssize_t addr_range_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u8 idx;
+ unsigned long val1, val2;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ spin_lock(&drvdata->spinlock);
+ idx = drvdata->addr_idx;
+ if (idx % 2 != 0) {
+ spin_unlock(&drvdata->spinlock);
+ return -EPERM;
+ }
+ if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
+ drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
+ (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
+ drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
+ spin_unlock(&drvdata->spinlock);
+ return -EPERM;
+ }
+
+ val1 = (unsigned long)drvdata->addr_val[idx];
+ val2 = (unsigned long)drvdata->addr_val[idx + 1];
+ spin_unlock(&drvdata->spinlock);
+ return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
+}
+
+static ssize_t addr_range_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ u8 idx;
+ unsigned long val1, val2;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
+ return -EINVAL;
+ /* lower address comparator cannot have a higher address value */
+ if (val1 > val2)
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ idx = drvdata->addr_idx;
+ if (idx % 2 != 0) {
+ spin_unlock(&drvdata->spinlock);
+ return -EPERM;
+ }
+
+ if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
+ drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
+ (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
+ drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
+ spin_unlock(&drvdata->spinlock);
+ return -EPERM;
+ }
+
+ drvdata->addr_val[idx] = (u64)val1;
+ drvdata->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
+ drvdata->addr_val[idx + 1] = (u64)val2;
+ drvdata->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
+ /*
+ * Program include or exclude control bits for vinst or vdata
+ * whenever we change addr comparators to ETM_ADDR_TYPE_RANGE
+ */
+ if (drvdata->mode & ETM_MODE_EXCLUDE)
+ etm4_set_mode_exclude(drvdata, true);
+ else
+ etm4_set_mode_exclude(drvdata, false);
+
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(addr_range);
+
+static ssize_t addr_start_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u8 idx;
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ spin_lock(&drvdata->spinlock);
+ idx = drvdata->addr_idx;
+
+ if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+ drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) {
+ spin_unlock(&drvdata->spinlock);
+ return -EPERM;
+ }
+
+ val = (unsigned long)drvdata->addr_val[idx];
+ spin_unlock(&drvdata->spinlock);
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t addr_start_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ u8 idx;
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ idx = drvdata->addr_idx;
+ if (!drvdata->nr_addr_cmp) {
+ spin_unlock(&drvdata->spinlock);
+ return -EINVAL;
+ }
+ if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+ drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) {
+ spin_unlock(&drvdata->spinlock);
+ return -EPERM;
+ }
+
+ drvdata->addr_val[idx] = (u64)val;
+ drvdata->addr_type[idx] = ETM_ADDR_TYPE_START;
+ drvdata->vissctlr |= BIT(idx);
+ /* SSSTATUS, bit[9] - turn on start/stop logic */
+ drvdata->vinst_ctrl |= BIT(9);
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(addr_start);
+
+static ssize_t addr_stop_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u8 idx;
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ spin_lock(&drvdata->spinlock);
+ idx = drvdata->addr_idx;
+
+ if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+ drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
+ spin_unlock(&drvdata->spinlock);
+ return -EPERM;
+ }
+
+ val = (unsigned long)drvdata->addr_val[idx];
+ spin_unlock(&drvdata->spinlock);
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t addr_stop_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ u8 idx;
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ idx = drvdata->addr_idx;
+ if (!drvdata->nr_addr_cmp) {
+ spin_unlock(&drvdata->spinlock);
+ return -EINVAL;
+ }
+ if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+ drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
+ spin_unlock(&drvdata->spinlock);
+ return -EPERM;
+ }
+
+ drvdata->addr_val[idx] = (u64)val;
+ drvdata->addr_type[idx] = ETM_ADDR_TYPE_STOP;
+ drvdata->vissctlr |= BIT(idx + 16);
+ /* SSSTATUS, bit[9] - turn on start/stop logic */
+ drvdata->vinst_ctrl |= BIT(9);
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(addr_stop);
+
+static ssize_t addr_ctxtype_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t len;
+ u8 idx, val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ spin_lock(&drvdata->spinlock);
+ idx = drvdata->addr_idx;
+ /* CONTEXTTYPE, bits[3:2] */
+ val = BMVAL(drvdata->addr_acc[idx], 2, 3);
+ len = scnprintf(buf, PAGE_SIZE, "%s\n", val == ETM_CTX_NONE ? "none" :
+ (val == ETM_CTX_CTXID ? "ctxid" :
+ (val == ETM_CTX_VMID ? "vmid" : "all")));
+ spin_unlock(&drvdata->spinlock);
+ return len;
+}
+
+static ssize_t addr_ctxtype_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ u8 idx;
+ char str[10] = "";
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ if (strlen(buf) >= 10)
+ return -EINVAL;
+ if (sscanf(buf, "%s", str) != 1)
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ idx = drvdata->addr_idx;
+ if (!strcmp(str, "none"))
+ /* start by clearing context type bits */
+ drvdata->addr_acc[idx] &= ~(BIT(2) | BIT(3));
+ else if (!strcmp(str, "ctxid")) {
+ /* 0b01 The trace unit performs a Context ID */
+ if (drvdata->numcidc) {
+ drvdata->addr_acc[idx] |= BIT(2);
+ drvdata->addr_acc[idx] &= ~BIT(3);
+ }
+ } else if (!strcmp(str, "vmid")) {
+ /* 0b10 The trace unit performs a VMID */
+ if (drvdata->numvmidc) {
+ drvdata->addr_acc[idx] &= ~BIT(2);
+ drvdata->addr_acc[idx] |= BIT(3);
+ }
+ } else if (!strcmp(str, "all")) {
+ /*
+ * 0b11 The trace unit performs a Context ID
+ * comparison and a VMID
+ */
+ if (drvdata->numcidc)
+ drvdata->addr_acc[idx] |= BIT(2);
+ if (drvdata->numvmidc)
+ drvdata->addr_acc[idx] |= BIT(3);
+ }
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(addr_ctxtype);
+
+static ssize_t addr_context_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u8 idx;
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ spin_lock(&drvdata->spinlock);
+ idx = drvdata->addr_idx;
+ /* context ID comparator bits[6:4] */
+ val = BMVAL(drvdata->addr_acc[idx], 4, 6);
+ spin_unlock(&drvdata->spinlock);
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t addr_context_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ u8 idx;
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ if ((drvdata->numcidc <= 1) && (drvdata->numvmidc <= 1))
+ return -EINVAL;
+ if (val >= (drvdata->numcidc >= drvdata->numvmidc ?
+ drvdata->numcidc : drvdata->numvmidc))
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ idx = drvdata->addr_idx;
+ /* clear context ID comparator bits[6:4] */
+ drvdata->addr_acc[idx] &= ~(BIT(4) | BIT(5) | BIT(6));
+ drvdata->addr_acc[idx] |= (val << 4);
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(addr_context);
+
+static ssize_t seq_idx_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = drvdata->seq_idx;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t seq_idx_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ if (val >= drvdata->nrseqstate - 1)
+ return -EINVAL;
+
+ /*
+ * Use spinlock to ensure index doesn't change while it gets
+ * dereferenced multiple times within a spinlock block elsewhere.
+ */
+ spin_lock(&drvdata->spinlock);
+ drvdata->seq_idx = val;
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(seq_idx);
+
+static ssize_t seq_state_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = drvdata->seq_state;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t seq_state_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ if (val >= drvdata->nrseqstate)
+ return -EINVAL;
+
+ drvdata->seq_state = val;
+ return size;
+}
+static DEVICE_ATTR_RW(seq_state);
+
+static ssize_t seq_event_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u8 idx;
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ spin_lock(&drvdata->spinlock);
+ idx = drvdata->seq_idx;
+ val = drvdata->seq_ctrl[idx];
+ spin_unlock(&drvdata->spinlock);
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t seq_event_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ u8 idx;
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ idx = drvdata->seq_idx;
+ /* RST, bits[7:0] */
+ drvdata->seq_ctrl[idx] = val & 0xFF;
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(seq_event);
+
+static ssize_t seq_reset_event_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = drvdata->seq_rst;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t seq_reset_event_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ if (!(drvdata->nrseqstate))
+ return -EINVAL;
+
+ drvdata->seq_rst = val & ETMv4_EVENT_MASK;
+ return size;
+}
+static DEVICE_ATTR_RW(seq_reset_event);
+
+static ssize_t cntr_idx_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = drvdata->cntr_idx;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t cntr_idx_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ if (val >= drvdata->nr_cntr)
+ return -EINVAL;
+
+ /*
+ * Use spinlock to ensure index doesn't change while it gets
+ * dereferenced multiple times within a spinlock block elsewhere.
+ */
+ spin_lock(&drvdata->spinlock);
+ drvdata->cntr_idx = val;
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(cntr_idx);
+
+static ssize_t cntrldvr_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u8 idx;
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ spin_lock(&drvdata->spinlock);
+ idx = drvdata->cntr_idx;
+ val = drvdata->cntrldvr[idx];
+ spin_unlock(&drvdata->spinlock);
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t cntrldvr_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ u8 idx;
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ if (val > ETM_CNTR_MAX_VAL)
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ idx = drvdata->cntr_idx;
+ drvdata->cntrldvr[idx] = val;
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(cntrldvr);
+
+static ssize_t cntr_val_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u8 idx;
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ spin_lock(&drvdata->spinlock);
+ idx = drvdata->cntr_idx;
+ val = drvdata->cntr_val[idx];
+ spin_unlock(&drvdata->spinlock);
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t cntr_val_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ u8 idx;
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ if (val > ETM_CNTR_MAX_VAL)
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ idx = drvdata->cntr_idx;
+ drvdata->cntr_val[idx] = val;
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(cntr_val);
+
+static ssize_t cntr_ctrl_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u8 idx;
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ spin_lock(&drvdata->spinlock);
+ idx = drvdata->cntr_idx;
+ val = drvdata->cntr_ctrl[idx];
+ spin_unlock(&drvdata->spinlock);
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t cntr_ctrl_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ u8 idx;
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ idx = drvdata->cntr_idx;
+ drvdata->cntr_ctrl[idx] = val;
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(cntr_ctrl);
+
+static ssize_t res_idx_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = drvdata->res_idx;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t res_idx_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ /* Resource selector pair 0 is always implemented and reserved */
+ if ((val == 0) || (val >= drvdata->nr_resource))
+ return -EINVAL;
+
+ /*
+ * Use spinlock to ensure index doesn't change while it gets
+ * dereferenced multiple times within a spinlock block elsewhere.
+ */
+ spin_lock(&drvdata->spinlock);
+ drvdata->res_idx = val;
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(res_idx);
+
+static ssize_t res_ctrl_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u8 idx;
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ spin_lock(&drvdata->spinlock);
+ idx = drvdata->res_idx;
+ val = drvdata->res_ctrl[idx];
+ spin_unlock(&drvdata->spinlock);
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t res_ctrl_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ u8 idx;
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ idx = drvdata->res_idx;
+ /* For odd idx pair inversal bit is RES0 */
+ if (idx % 2 != 0)
+ /* PAIRINV, bit[21] */
+ val &= ~BIT(21);
+ drvdata->res_ctrl[idx] = val;
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(res_ctrl);
+
+static ssize_t ctxid_idx_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = drvdata->ctxid_idx;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t ctxid_idx_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ if (val >= drvdata->numcidc)
+ return -EINVAL;
+
+ /*
+ * Use spinlock to ensure index doesn't change while it gets
+ * dereferenced multiple times within a spinlock block elsewhere.
+ */
+ spin_lock(&drvdata->spinlock);
+ drvdata->ctxid_idx = val;
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(ctxid_idx);
+
+static ssize_t ctxid_val_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u8 idx;
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ spin_lock(&drvdata->spinlock);
+ idx = drvdata->ctxid_idx;
+ val = (unsigned long)drvdata->ctxid_val[idx];
+ spin_unlock(&drvdata->spinlock);
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t ctxid_val_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ u8 idx;
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ /*
+ * only implemented when ctxid tracing is enabled, i.e. at least one
+ * ctxid comparator is implemented and ctxid is greater than 0 bits
+ * in length
+ */
+ if (!drvdata->ctxid_size || !drvdata->numcidc)
+ return -EINVAL;
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ idx = drvdata->ctxid_idx;
+ drvdata->ctxid_val[idx] = (u64)val;
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(ctxid_val);
+
+static ssize_t ctxid_masks_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val1, val2;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ spin_lock(&drvdata->spinlock);
+ val1 = drvdata->ctxid_mask0;
+ val2 = drvdata->ctxid_mask1;
+ spin_unlock(&drvdata->spinlock);
+ return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
+}
+
+static ssize_t ctxid_masks_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ u8 i, j, maskbyte;
+ unsigned long val1, val2, mask;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ /*
+ * only implemented when ctxid tracing is enabled, i.e. at least one
+ * ctxid comparator is implemented and ctxid is greater than 0 bits
+ * in length
+ */
+ if (!drvdata->ctxid_size || !drvdata->numcidc)
+ return -EINVAL;
+ if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ /*
+ * each byte[0..3] controls mask value applied to ctxid
+ * comparator[0..3]
+ */
+ switch (drvdata->numcidc) {
+ case 0x1:
+ /* COMP0, bits[7:0] */
+ drvdata->ctxid_mask0 = val1 & 0xFF;
+ break;
+ case 0x2:
+ /* COMP1, bits[15:8] */
+ drvdata->ctxid_mask0 = val1 & 0xFFFF;
+ break;
+ case 0x3:
+ /* COMP2, bits[23:16] */
+ drvdata->ctxid_mask0 = val1 & 0xFFFFFF;
+ break;
+ case 0x4:
+ /* COMP3, bits[31:24] */
+ drvdata->ctxid_mask0 = val1;
+ break;
+ case 0x5:
+ /* COMP4, bits[7:0] */
+ drvdata->ctxid_mask0 = val1;
+ drvdata->ctxid_mask1 = val2 & 0xFF;
+ break;
+ case 0x6:
+ /* COMP5, bits[15:8] */
+ drvdata->ctxid_mask0 = val1;
+ drvdata->ctxid_mask1 = val2 & 0xFFFF;
+ break;
+ case 0x7:
+ /* COMP6, bits[23:16] */
+ drvdata->ctxid_mask0 = val1;
+ drvdata->ctxid_mask1 = val2 & 0xFFFFFF;
+ break;
+ case 0x8:
+ /* COMP7, bits[31:24] */
+ drvdata->ctxid_mask0 = val1;
+ drvdata->ctxid_mask1 = val2;
+ break;
+ default:
+ break;
+ }
+ /*
+ * If software sets a mask bit to 1, it must program relevant byte
+ * of ctxid comparator value 0x0, otherwise behavior is unpredictable.
+ * For example, if bit[3] of ctxid_mask0 is 1, we must clear bits[31:24]
+ * of ctxid comparator0 value (corresponding to byte 0) register.
+ */
+ mask = drvdata->ctxid_mask0;
+ for (i = 0; i < drvdata->numcidc; i++) {
+ /* mask value of corresponding ctxid comparator */
+ maskbyte = mask & ETMv4_EVENT_MASK;
+ /*
+ * each bit corresponds to a byte of respective ctxid comparator
+ * value register
+ */
+ for (j = 0; j < 8; j++) {
+ if (maskbyte & 1)
+ drvdata->ctxid_val[i] &= ~(0xFF << (j * 8));
+ maskbyte >>= 1;
+ }
+ /* Select the next ctxid comparator mask value */
+ if (i == 3)
+ /* ctxid comparators[4-7] */
+ mask = drvdata->ctxid_mask1;
+ else
+ mask >>= 0x8;
+ }
+
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(ctxid_masks);
+
+static ssize_t vmid_idx_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = drvdata->vmid_idx;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t vmid_idx_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ if (val >= drvdata->numvmidc)
+ return -EINVAL;
+
+ /*
+ * Use spinlock to ensure index doesn't change while it gets
+ * dereferenced multiple times within a spinlock block elsewhere.
+ */
+ spin_lock(&drvdata->spinlock);
+ drvdata->vmid_idx = val;
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(vmid_idx);
+
+static ssize_t vmid_val_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = (unsigned long)drvdata->vmid_val[drvdata->vmid_idx];
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t vmid_val_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ /*
+ * only implemented when vmid tracing is enabled, i.e. at least one
+ * vmid comparator is implemented and at least 8 bit vmid size
+ */
+ if (!drvdata->vmid_size || !drvdata->numvmidc)
+ return -EINVAL;
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ drvdata->vmid_val[drvdata->vmid_idx] = (u64)val;
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(vmid_val);
+
+static ssize_t vmid_masks_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val1, val2;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ spin_lock(&drvdata->spinlock);
+ val1 = drvdata->vmid_mask0;
+ val2 = drvdata->vmid_mask1;
+ spin_unlock(&drvdata->spinlock);
+ return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
+}
+
+static ssize_t vmid_masks_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ u8 i, j, maskbyte;
+ unsigned long val1, val2, mask;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ /*
+ * only implemented when vmid tracing is enabled, i.e. at least one
+ * vmid comparator is implemented and at least 8 bit vmid size
+ */
+ if (!drvdata->vmid_size || !drvdata->numvmidc)
+ return -EINVAL;
+ if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+
+ /*
+ * each byte[0..3] controls mask value applied to vmid
+ * comparator[0..3]
+ */
+ switch (drvdata->numvmidc) {
+ case 0x1:
+ /* COMP0, bits[7:0] */
+ drvdata->vmid_mask0 = val1 & 0xFF;
+ break;
+ case 0x2:
+ /* COMP1, bits[15:8] */
+ drvdata->vmid_mask0 = val1 & 0xFFFF;
+ break;
+ case 0x3:
+ /* COMP2, bits[23:16] */
+ drvdata->vmid_mask0 = val1 & 0xFFFFFF;
+ break;
+ case 0x4:
+ /* COMP3, bits[31:24] */
+ drvdata->vmid_mask0 = val1;
+ break;
+ case 0x5:
+ /* COMP4, bits[7:0] */
+ drvdata->vmid_mask0 = val1;
+ drvdata->vmid_mask1 = val2 & 0xFF;
+ break;
+ case 0x6:
+ /* COMP5, bits[15:8] */
+ drvdata->vmid_mask0 = val1;
+ drvdata->vmid_mask1 = val2 & 0xFFFF;
+ break;
+ case 0x7:
+ /* COMP6, bits[23:16] */
+ drvdata->vmid_mask0 = val1;
+ drvdata->vmid_mask1 = val2 & 0xFFFFFF;
+ break;
+ case 0x8:
+ /* COMP7, bits[31:24] */
+ drvdata->vmid_mask0 = val1;
+ drvdata->vmid_mask1 = val2;
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * If software sets a mask bit to 1, it must program relevant byte
+ * of vmid comparator value 0x0, otherwise behavior is unpredictable.
+ * For example, if bit[3] of vmid_mask0 is 1, we must clear bits[31:24]
+ * of vmid comparator0 value (corresponding to byte 0) register.
+ */
+ mask = drvdata->vmid_mask0;
+ for (i = 0; i < drvdata->numvmidc; i++) {
+ /* mask value of corresponding vmid comparator */
+ maskbyte = mask & ETMv4_EVENT_MASK;
+ /*
+ * each bit corresponds to a byte of respective vmid comparator
+ * value register
+ */
+ for (j = 0; j < 8; j++) {
+ if (maskbyte & 1)
+ drvdata->vmid_val[i] &= ~(0xFF << (j * 8));
+ maskbyte >>= 1;
+ }
+ /* Select the next vmid comparator mask value */
+ if (i == 3)
+ /* vmid comparators[4-7] */
+ mask = drvdata->vmid_mask1;
+ else
+ mask >>= 0x8;
+ }
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(vmid_masks);
+
+static ssize_t cpu_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = drvdata->cpu;
+ return scnprintf(buf, PAGE_SIZE, "%d\n", val);
+
+}
+static DEVICE_ATTR_RO(cpu);
+
+static struct attribute *coresight_etmv4_attrs[] = {
+ &dev_attr_nr_pe_cmp.attr,
+ &dev_attr_nr_addr_cmp.attr,
+ &dev_attr_nr_cntr.attr,
+ &dev_attr_nr_ext_inp.attr,
+ &dev_attr_numcidc.attr,
+ &dev_attr_numvmidc.attr,
+ &dev_attr_nrseqstate.attr,
+ &dev_attr_nr_resource.attr,
+ &dev_attr_nr_ss_cmp.attr,
+ &dev_attr_reset.attr,
+ &dev_attr_mode.attr,
+ &dev_attr_pe.attr,
+ &dev_attr_event.attr,
+ &dev_attr_event_instren.attr,
+ &dev_attr_event_ts.attr,
+ &dev_attr_syncfreq.attr,
+ &dev_attr_cyc_threshold.attr,
+ &dev_attr_bb_ctrl.attr,
+ &dev_attr_event_vinst.attr,
+ &dev_attr_s_exlevel_vinst.attr,
+ &dev_attr_ns_exlevel_vinst.attr,
+ &dev_attr_addr_idx.attr,
+ &dev_attr_addr_instdatatype.attr,
+ &dev_attr_addr_single.attr,
+ &dev_attr_addr_range.attr,
+ &dev_attr_addr_start.attr,
+ &dev_attr_addr_stop.attr,
+ &dev_attr_addr_ctxtype.attr,
+ &dev_attr_addr_context.attr,
+ &dev_attr_seq_idx.attr,
+ &dev_attr_seq_state.attr,
+ &dev_attr_seq_event.attr,
+ &dev_attr_seq_reset_event.attr,
+ &dev_attr_cntr_idx.attr,
+ &dev_attr_cntrldvr.attr,
+ &dev_attr_cntr_val.attr,
+ &dev_attr_cntr_ctrl.attr,
+ &dev_attr_res_idx.attr,
+ &dev_attr_res_ctrl.attr,
+ &dev_attr_ctxid_idx.attr,
+ &dev_attr_ctxid_val.attr,
+ &dev_attr_ctxid_masks.attr,
+ &dev_attr_vmid_idx.attr,
+ &dev_attr_vmid_val.attr,
+ &dev_attr_vmid_masks.attr,
+ &dev_attr_cpu.attr,
+ NULL,
+};
+
+#define coresight_simple_func(name, offset) \
+static ssize_t name##_show(struct device *_dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(_dev->parent); \
+ return scnprintf(buf, PAGE_SIZE, "0x%x\n", \
+ readl_relaxed(drvdata->base + offset)); \
+} \
+DEVICE_ATTR_RO(name)
+
+coresight_simple_func(trcoslsr, TRCOSLSR);
+coresight_simple_func(trcpdcr, TRCPDCR);
+coresight_simple_func(trcpdsr, TRCPDSR);
+coresight_simple_func(trclsr, TRCLSR);
+coresight_simple_func(trcauthstatus, TRCAUTHSTATUS);
+coresight_simple_func(trcdevid, TRCDEVID);
+coresight_simple_func(trcdevtype, TRCDEVTYPE);
+coresight_simple_func(trcpidr0, TRCPIDR0);
+coresight_simple_func(trcpidr1, TRCPIDR1);
+coresight_simple_func(trcpidr2, TRCPIDR2);
+coresight_simple_func(trcpidr3, TRCPIDR3);
+
+static struct attribute *coresight_etmv4_mgmt_attrs[] = {
+ &dev_attr_trcoslsr.attr,
+ &dev_attr_trcpdcr.attr,
+ &dev_attr_trcpdsr.attr,
+ &dev_attr_trclsr.attr,
+ &dev_attr_trcauthstatus.attr,
+ &dev_attr_trcdevid.attr,
+ &dev_attr_trcdevtype.attr,
+ &dev_attr_trcpidr0.attr,
+ &dev_attr_trcpidr1.attr,
+ &dev_attr_trcpidr2.attr,
+ &dev_attr_trcpidr3.attr,
+ NULL,
+};
+
+coresight_simple_func(trcidr0, TRCIDR0);
+coresight_simple_func(trcidr1, TRCIDR1);
+coresight_simple_func(trcidr2, TRCIDR2);
+coresight_simple_func(trcidr3, TRCIDR3);
+coresight_simple_func(trcidr4, TRCIDR4);
+coresight_simple_func(trcidr5, TRCIDR5);
+/* trcidr[6,7] are reserved */
+coresight_simple_func(trcidr8, TRCIDR8);
+coresight_simple_func(trcidr9, TRCIDR9);
+coresight_simple_func(trcidr10, TRCIDR10);
+coresight_simple_func(trcidr11, TRCIDR11);
+coresight_simple_func(trcidr12, TRCIDR12);
+coresight_simple_func(trcidr13, TRCIDR13);
+
+static struct attribute *coresight_etmv4_trcidr_attrs[] = {
+ &dev_attr_trcidr0.attr,
+ &dev_attr_trcidr1.attr,
+ &dev_attr_trcidr2.attr,
+ &dev_attr_trcidr3.attr,
+ &dev_attr_trcidr4.attr,
+ &dev_attr_trcidr5.attr,
+ /* trcidr[6,7] are reserved */
+ &dev_attr_trcidr8.attr,
+ &dev_attr_trcidr9.attr,
+ &dev_attr_trcidr10.attr,
+ &dev_attr_trcidr11.attr,
+ &dev_attr_trcidr12.attr,
+ &dev_attr_trcidr13.attr,
+ NULL,
+};
+
+static const struct attribute_group coresight_etmv4_group = {
+ .attrs = coresight_etmv4_attrs,
+};
+
+static const struct attribute_group coresight_etmv4_mgmt_group = {
+ .attrs = coresight_etmv4_mgmt_attrs,
+ .name = "mgmt",
+};
+
+static const struct attribute_group coresight_etmv4_trcidr_group = {
+ .attrs = coresight_etmv4_trcidr_attrs,
+ .name = "trcidr",
+};
+
+static const struct attribute_group *coresight_etmv4_groups[] = {
+ &coresight_etmv4_group,
+ &coresight_etmv4_mgmt_group,
+ &coresight_etmv4_trcidr_group,
+ NULL,
+};
+
+static void etm4_init_arch_data(void *info)
+{
+ u32 etmidr0;
+ u32 etmidr1;
+ u32 etmidr2;
+ u32 etmidr3;
+ u32 etmidr4;
+ u32 etmidr5;
+ struct etmv4_drvdata *drvdata = info;
+
+ CS_UNLOCK(drvdata->base);
+
+ /* find all capabilities of the tracing unit */
+ etmidr0 = readl_relaxed(drvdata->base + TRCIDR0);
+
+ /* INSTP0, bits[2:1] P0 tracing support field */
+ if (BMVAL(etmidr0, 1, 1) && BMVAL(etmidr0, 2, 2))
+ drvdata->instrp0 = true;
+ else
+ drvdata->instrp0 = false;
+
+ /* TRCBB, bit[5] Branch broadcast tracing support bit */
+ if (BMVAL(etmidr0, 5, 5))
+ drvdata->trcbb = true;
+ else
+ drvdata->trcbb = false;
+
+ /* TRCCOND, bit[6] Conditional instruction tracing support bit */
+ if (BMVAL(etmidr0, 6, 6))
+ drvdata->trccond = true;
+ else
+ drvdata->trccond = false;
+
+ /* TRCCCI, bit[7] Cycle counting instruction bit */
+ if (BMVAL(etmidr0, 7, 7))
+ drvdata->trccci = true;
+ else
+ drvdata->trccci = false;
+
+ /* RETSTACK, bit[9] Return stack bit */
+ if (BMVAL(etmidr0, 9, 9))
+ drvdata->retstack = true;
+ else
+ drvdata->retstack = false;
+
+ /* NUMEVENT, bits[11:10] Number of events field */
+ drvdata->nr_event = BMVAL(etmidr0, 10, 11);
+ /* QSUPP, bits[16:15] Q element support field */
+ drvdata->q_support = BMVAL(etmidr0, 15, 16);
+ /* TSSIZE, bits[28:24] Global timestamp size field */
+ drvdata->ts_size = BMVAL(etmidr0, 24, 28);
+
+ /* base architecture of trace unit */
+ etmidr1 = readl_relaxed(drvdata->base + TRCIDR1);
+ /*
+ * TRCARCHMIN, bits[7:4] architecture the minor version number
+ * TRCARCHMAJ, bits[11:8] architecture major versin number
+ */
+ drvdata->arch = BMVAL(etmidr1, 4, 11);
+
+ /* maximum size of resources */
+ etmidr2 = readl_relaxed(drvdata->base + TRCIDR2);
+ /* CIDSIZE, bits[9:5] Indicates the Context ID size */
+ drvdata->ctxid_size = BMVAL(etmidr2, 5, 9);
+ /* VMIDSIZE, bits[14:10] Indicates the VMID size */
+ drvdata->vmid_size = BMVAL(etmidr2, 10, 14);
+ /* CCSIZE, bits[28:25] size of the cycle counter in bits minus 12 */
+ drvdata->ccsize = BMVAL(etmidr2, 25, 28);
+
+ etmidr3 = readl_relaxed(drvdata->base + TRCIDR3);
+ /* CCITMIN, bits[11:0] minimum threshold value that can be programmed */
+ drvdata->ccitmin = BMVAL(etmidr3, 0, 11);
+ /* EXLEVEL_S, bits[19:16] Secure state instruction tracing */
+ drvdata->s_ex_level = BMVAL(etmidr3, 16, 19);
+ /* EXLEVEL_NS, bits[23:20] Non-secure state instruction tracing */
+ drvdata->ns_ex_level = BMVAL(etmidr3, 20, 23);
+
+ /*
+ * TRCERR, bit[24] whether a trace unit can trace a
+ * system error exception.
+ */
+ if (BMVAL(etmidr3, 24, 24))
+ drvdata->trc_error = true;
+ else
+ drvdata->trc_error = false;
+
+ /* SYNCPR, bit[25] implementation has a fixed synchronization period? */
+ if (BMVAL(etmidr3, 25, 25))
+ drvdata->syncpr = true;
+ else
+ drvdata->syncpr = false;
+
+ /* STALLCTL, bit[26] is stall control implemented? */
+ if (BMVAL(etmidr3, 26, 26))
+ drvdata->stallctl = true;
+ else
+ drvdata->stallctl = false;
+
+ /* SYSSTALL, bit[27] implementation can support stall control? */
+ if (BMVAL(etmidr3, 27, 27))
+ drvdata->sysstall = true;
+ else
+ drvdata->sysstall = false;
+
+ /* NUMPROC, bits[30:28] the number of PEs available for tracing */
+ drvdata->nr_pe = BMVAL(etmidr3, 28, 30);
+
+ /* NOOVERFLOW, bit[31] is trace overflow prevention supported */
+ if (BMVAL(etmidr3, 31, 31))
+ drvdata->nooverflow = true;
+ else
+ drvdata->nooverflow = false;
+
+ /* number of resources trace unit supports */
+ etmidr4 = readl_relaxed(drvdata->base + TRCIDR4);
+ /* NUMACPAIRS, bits[0:3] number of addr comparator pairs for tracing */
+ drvdata->nr_addr_cmp = BMVAL(etmidr4, 0, 3);
+ /* NUMPC, bits[15:12] number of PE comparator inputs for tracing */
+ drvdata->nr_pe_cmp = BMVAL(etmidr4, 12, 15);
+ /* NUMRSPAIR, bits[19:16] the number of resource pairs for tracing */
+ drvdata->nr_resource = BMVAL(etmidr4, 16, 19);
+ /*
+ * NUMSSCC, bits[23:20] the number of single-shot
+ * comparator control for tracing
+ */
+ drvdata->nr_ss_cmp = BMVAL(etmidr4, 20, 23);
+ /* NUMCIDC, bits[27:24] number of Context ID comparators for tracing */
+ drvdata->numcidc = BMVAL(etmidr4, 24, 27);
+ /* NUMVMIDC, bits[31:28] number of VMID comparators for tracing */
+ drvdata->numvmidc = BMVAL(etmidr4, 28, 31);
+
+ etmidr5 = readl_relaxed(drvdata->base + TRCIDR5);
+ /* NUMEXTIN, bits[8:0] number of external inputs implemented */
+ drvdata->nr_ext_inp = BMVAL(etmidr5, 0, 8);
+ /* TRACEIDSIZE, bits[21:16] indicates the trace ID width */
+ drvdata->trcid_size = BMVAL(etmidr5, 16, 21);
+ /* ATBTRIG, bit[22] implementation can support ATB triggers? */
+ if (BMVAL(etmidr5, 22, 22))
+ drvdata->atbtrig = true;
+ else
+ drvdata->atbtrig = false;
+ /*
+ * LPOVERRIDE, bit[23] implementation supports
+ * low-power state override
+ */
+ if (BMVAL(etmidr5, 23, 23))
+ drvdata->lpoverride = true;
+ else
+ drvdata->lpoverride = false;
+ /* NUMSEQSTATE, bits[27:25] number of sequencer states implemented */
+ drvdata->nrseqstate = BMVAL(etmidr5, 25, 27);
+ /* NUMCNTR, bits[30:28] number of counters available for tracing */
+ drvdata->nr_cntr = BMVAL(etmidr5, 28, 30);
+ CS_LOCK(drvdata->base);
+}
+
+static void etm4_init_default_data(struct etmv4_drvdata *drvdata)
+{
+ int i;
+
+ drvdata->pe_sel = 0x0;
+ drvdata->cfg = (ETMv4_MODE_CTXID | ETM_MODE_VMID |
+ ETMv4_MODE_TIMESTAMP | ETM_MODE_RETURNSTACK);
+
+ /* disable all events tracing */
+ drvdata->eventctrl0 = 0x0;
+ drvdata->eventctrl1 = 0x0;
+
+ /* disable stalling */
+ drvdata->stall_ctrl = 0x0;
+
+ /* disable timestamp event */
+ drvdata->ts_ctrl = 0x0;
+
+ /* enable trace synchronization every 4096 bytes for trace */
+ if (drvdata->syncpr == false)
+ drvdata->syncfreq = 0xC;
+
+ /*
+ * enable viewInst to trace everything with start-stop logic in
+ * started state
+ */
+ drvdata->vinst_ctrl |= BIT(0);
+ /* set initial state of start-stop logic */
+ if (drvdata->nr_addr_cmp)
+ drvdata->vinst_ctrl |= BIT(9);
+
+ /* no address range filtering for ViewInst */
+ drvdata->viiectlr = 0x0;
+ /* no start-stop filtering for ViewInst */
+ drvdata->vissctlr = 0x0;
+
+ /* disable seq events */
+ for (i = 0; i < drvdata->nrseqstate-1; i++)
+ drvdata->seq_ctrl[i] = 0x0;
+ drvdata->seq_rst = 0x0;
+ drvdata->seq_state = 0x0;
+
+ /* disable external input events */
+ drvdata->ext_inp = 0x0;
+
+ for (i = 0; i < drvdata->nr_cntr; i++) {
+ drvdata->cntrldvr[i] = 0x0;
+ drvdata->cntr_ctrl[i] = 0x0;
+ drvdata->cntr_val[i] = 0x0;
+ }
+
+ for (i = 2; i < drvdata->nr_resource * 2; i++)
+ drvdata->res_ctrl[i] = 0x0;
+
+ for (i = 0; i < drvdata->nr_ss_cmp; i++) {
+ drvdata->ss_ctrl[i] = 0x0;
+ drvdata->ss_pe_cmp[i] = 0x0;
+ }
+
+ if (drvdata->nr_addr_cmp >= 1) {
+ drvdata->addr_val[0] = (unsigned long)_stext;
+ drvdata->addr_val[1] = (unsigned long)_etext;
+ drvdata->addr_type[0] = ETM_ADDR_TYPE_RANGE;
+ drvdata->addr_type[1] = ETM_ADDR_TYPE_RANGE;
+ }
+
+ for (i = 0; i < drvdata->numcidc; i++)
+ drvdata->ctxid_val[i] = 0x0;
+ drvdata->ctxid_mask0 = 0x0;
+ drvdata->ctxid_mask1 = 0x0;
+
+ for (i = 0; i < drvdata->numvmidc; i++)
+ drvdata->vmid_val[i] = 0x0;
+ drvdata->vmid_mask0 = 0x0;
+ drvdata->vmid_mask1 = 0x0;
+
+ /*
+ * A trace ID value of 0 is invalid, so let's start at some
+ * random value that fits in 7 bits. ETMv3.x has 0x10 so let's
+ * start at 0x20.
+ */
+ drvdata->trcid = 0x20 + drvdata->cpu;
+}
+
+static int etm4_cpu_callback(struct notifier_block *nfb, unsigned long action,
+ void *hcpu)
+{
+ unsigned int cpu = (unsigned long)hcpu;
+
+ if (!etmdrvdata[cpu])
+ goto out;
+
+ switch (action & (~CPU_TASKS_FROZEN)) {
+ case CPU_STARTING:
+ spin_lock(&etmdrvdata[cpu]->spinlock);
+ if (!etmdrvdata[cpu]->os_unlock) {
+ etm4_os_unlock(etmdrvdata[cpu]);
+ etmdrvdata[cpu]->os_unlock = true;
+ }
+
+ if (etmdrvdata[cpu]->enable)
+ etm4_enable_hw(etmdrvdata[cpu]);
+ spin_unlock(&etmdrvdata[cpu]->spinlock);
+ break;
+
+ case CPU_ONLINE:
+ if (etmdrvdata[cpu]->boot_enable &&
+ !etmdrvdata[cpu]->sticky_enable)
+ coresight_enable(etmdrvdata[cpu]->csdev);
+ break;
+
+ case CPU_DYING:
+ spin_lock(&etmdrvdata[cpu]->spinlock);
+ if (etmdrvdata[cpu]->enable)
+ etm4_disable_hw(etmdrvdata[cpu]);
+ spin_unlock(&etmdrvdata[cpu]->spinlock);
+ break;
+ }
+out:
+ return NOTIFY_OK;
+}
+
+static struct notifier_block etm4_cpu_notifier = {
+ .notifier_call = etm4_cpu_callback,
+};
+
+static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
+{
+ int ret;
+ void __iomem *base;
+ struct device *dev = &adev->dev;
+ struct coresight_platform_data *pdata = NULL;
+ struct etmv4_drvdata *drvdata;
+ struct resource *res = &adev->res;
+ struct coresight_desc *desc;
+ struct device_node *np = adev->dev.of_node;
+
+ desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ if (np) {
+ pdata = of_get_coresight_platform_data(dev, np);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
+ adev->dev.platform_data = pdata;
+ }
+
+ drvdata->dev = &adev->dev;
+ dev_set_drvdata(dev, drvdata);
+
+ /* Validity for the resource is already checked by the AMBA core */
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ drvdata->base = base;
+
+ spin_lock_init(&drvdata->spinlock);
+
+ drvdata->cpu = pdata ? pdata->cpu : 0;
+
+ get_online_cpus();
+ etmdrvdata[drvdata->cpu] = drvdata;
+
+ if (!smp_call_function_single(drvdata->cpu, etm4_os_unlock, drvdata, 1))
+ drvdata->os_unlock = true;
+
+ if (smp_call_function_single(drvdata->cpu,
+ etm4_init_arch_data, drvdata, 1))
+ dev_err(dev, "ETM arch init failed\n");
+
+ if (!etm4_count++)
+ register_hotcpu_notifier(&etm4_cpu_notifier);
+
+ put_online_cpus();
+
+ if (etm4_arch_supported(drvdata->arch) == false) {
+ ret = -EINVAL;
+ goto err_arch_supported;
+ }
+ etm4_init_default_data(drvdata);
+
+ pm_runtime_put(&adev->dev);
+
+ desc->type = CORESIGHT_DEV_TYPE_SOURCE;
+ desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC;
+ desc->ops = &etm4_cs_ops;
+ desc->pdata = pdata;
+ desc->dev = dev;
+ desc->groups = coresight_etmv4_groups;
+ drvdata->csdev = coresight_register(desc);
+ if (IS_ERR(drvdata->csdev)) {
+ ret = PTR_ERR(drvdata->csdev);
+ goto err_coresight_register;
+ }
+
+ dev_info(dev, "%s initialized\n", (char *)id->data);
+
+ if (boot_enable) {
+ coresight_enable(drvdata->csdev);
+ drvdata->boot_enable = true;
+ }
+
+ return 0;
+
+err_arch_supported:
+ pm_runtime_put(&adev->dev);
+err_coresight_register:
+ if (--etm4_count == 0)
+ unregister_hotcpu_notifier(&etm4_cpu_notifier);
+ return ret;
+}
+
+static int etm4_remove(struct amba_device *adev)
+{
+ struct etmv4_drvdata *drvdata = amba_get_drvdata(adev);
+
+ coresight_unregister(drvdata->csdev);
+ if (--etm4_count == 0)
+ unregister_hotcpu_notifier(&etm4_cpu_notifier);
+
+ return 0;
+}
+
+static struct amba_id etm4_ids[] = {
+ { /* ETM 4.0 - Qualcomm */
+ .id = 0x0003b95d,
+ .mask = 0x0003ffff,
+ .data = "ETM 4.0",
+ },
+ { /* ETM 4.0 - Juno board */
+ .id = 0x000bb95e,
+ .mask = 0x000fffff,
+ .data = "ETM 4.0",
+ },
+ { 0, 0},
+};
+
+static struct amba_driver etm4x_driver = {
+ .drv = {
+ .name = "coresight-etm4x",
+ },
+ .probe = etm4_probe,
+ .remove = etm4_remove,
+ .id_table = etm4_ids,
+};
+
+module_amba_driver(etm4x_driver);
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h
new file mode 100644
index 000000000000..e08e983dd2d9
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-etm4x.h
@@ -0,0 +1,391 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CORESIGHT_CORESIGHT_ETM_H
+#define _CORESIGHT_CORESIGHT_ETM_H
+
+#include <linux/spinlock.h>
+#include "coresight-priv.h"
+
+/*
+ * Device registers:
+ * 0x000 - 0x2FC: Trace registers
+ * 0x300 - 0x314: Management registers
+ * 0x318 - 0xEFC: Trace registers
+ * 0xF00: Management registers
+ * 0xFA0 - 0xFA4: Trace registers
+ * 0xFA8 - 0xFFC: Management registers
+ */
+/* Trace registers (0x000-0x2FC) */
+/* Main control and configuration registers */
+#define TRCPRGCTLR 0x004
+#define TRCPROCSELR 0x008
+#define TRCSTATR 0x00C
+#define TRCCONFIGR 0x010
+#define TRCAUXCTLR 0x018
+#define TRCEVENTCTL0R 0x020
+#define TRCEVENTCTL1R 0x024
+#define TRCSTALLCTLR 0x02C
+#define TRCTSCTLR 0x030
+#define TRCSYNCPR 0x034
+#define TRCCCCTLR 0x038
+#define TRCBBCTLR 0x03C
+#define TRCTRACEIDR 0x040
+#define TRCQCTLR 0x044
+/* Filtering control registers */
+#define TRCVICTLR 0x080
+#define TRCVIIECTLR 0x084
+#define TRCVISSCTLR 0x088
+#define TRCVIPCSSCTLR 0x08C
+#define TRCVDCTLR 0x0A0
+#define TRCVDSACCTLR 0x0A4
+#define TRCVDARCCTLR 0x0A8
+/* Derived resources registers */
+#define TRCSEQEVRn(n) (0x100 + (n * 4))
+#define TRCSEQRSTEVR 0x118
+#define TRCSEQSTR 0x11C
+#define TRCEXTINSELR 0x120
+#define TRCCNTRLDVRn(n) (0x140 + (n * 4))
+#define TRCCNTCTLRn(n) (0x150 + (n * 4))
+#define TRCCNTVRn(n) (0x160 + (n * 4))
+/* ID registers */
+#define TRCIDR8 0x180
+#define TRCIDR9 0x184
+#define TRCIDR10 0x188
+#define TRCIDR11 0x18C
+#define TRCIDR12 0x190
+#define TRCIDR13 0x194
+#define TRCIMSPEC0 0x1C0
+#define TRCIMSPECn(n) (0x1C0 + (n * 4))
+#define TRCIDR0 0x1E0
+#define TRCIDR1 0x1E4
+#define TRCIDR2 0x1E8
+#define TRCIDR3 0x1EC
+#define TRCIDR4 0x1F0
+#define TRCIDR5 0x1F4
+#define TRCIDR6 0x1F8
+#define TRCIDR7 0x1FC
+/* Resource selection registers */
+#define TRCRSCTLRn(n) (0x200 + (n * 4))
+/* Single-shot comparator registers */
+#define TRCSSCCRn(n) (0x280 + (n * 4))
+#define TRCSSCSRn(n) (0x2A0 + (n * 4))
+#define TRCSSPCICRn(n) (0x2C0 + (n * 4))
+/* Management registers (0x300-0x314) */
+#define TRCOSLAR 0x300
+#define TRCOSLSR 0x304
+#define TRCPDCR 0x310
+#define TRCPDSR 0x314
+/* Trace registers (0x318-0xEFC) */
+/* Comparator registers */
+#define TRCACVRn(n) (0x400 + (n * 8))
+#define TRCACATRn(n) (0x480 + (n * 8))
+#define TRCDVCVRn(n) (0x500 + (n * 16))
+#define TRCDVCMRn(n) (0x580 + (n * 16))
+#define TRCCIDCVRn(n) (0x600 + (n * 8))
+#define TRCVMIDCVRn(n) (0x640 + (n * 8))
+#define TRCCIDCCTLR0 0x680
+#define TRCCIDCCTLR1 0x684
+#define TRCVMIDCCTLR0 0x688
+#define TRCVMIDCCTLR1 0x68C
+/* Management register (0xF00) */
+/* Integration control registers */
+#define TRCITCTRL 0xF00
+/* Trace registers (0xFA0-0xFA4) */
+/* Claim tag registers */
+#define TRCCLAIMSET 0xFA0
+#define TRCCLAIMCLR 0xFA4
+/* Management registers (0xFA8-0xFFC) */
+#define TRCDEVAFF0 0xFA8
+#define TRCDEVAFF1 0xFAC
+#define TRCLAR 0xFB0
+#define TRCLSR 0xFB4
+#define TRCAUTHSTATUS 0xFB8
+#define TRCDEVARCH 0xFBC
+#define TRCDEVID 0xFC8
+#define TRCDEVTYPE 0xFCC
+#define TRCPIDR4 0xFD0
+#define TRCPIDR5 0xFD4
+#define TRCPIDR6 0xFD8
+#define TRCPIDR7 0xFDC
+#define TRCPIDR0 0xFE0
+#define TRCPIDR1 0xFE4
+#define TRCPIDR2 0xFE8
+#define TRCPIDR3 0xFEC
+#define TRCCIDR0 0xFF0
+#define TRCCIDR1 0xFF4
+#define TRCCIDR2 0xFF8
+#define TRCCIDR3 0xFFC
+
+/* ETMv4 resources */
+#define ETM_MAX_NR_PE 8
+#define ETMv4_MAX_CNTR 4
+#define ETM_MAX_SEQ_STATES 4
+#define ETM_MAX_EXT_INP_SEL 4
+#define ETM_MAX_EXT_INP 256
+#define ETM_MAX_EXT_OUT 4
+#define ETM_MAX_SINGLE_ADDR_CMP 16
+#define ETM_MAX_ADDR_RANGE_CMP (ETM_MAX_SINGLE_ADDR_CMP / 2)
+#define ETM_MAX_DATA_VAL_CMP 8
+#define ETMv4_MAX_CTXID_CMP 8
+#define ETM_MAX_VMID_CMP 8
+#define ETM_MAX_PE_CMP 8
+#define ETM_MAX_RES_SEL 16
+#define ETM_MAX_SS_CMP 8
+
+#define ETM_ARCH_V4 0x40
+#define ETMv4_SYNC_MASK 0x1F
+#define ETM_CYC_THRESHOLD_MASK 0xFFF
+#define ETMv4_EVENT_MASK 0xFF
+#define ETM_CNTR_MAX_VAL 0xFFFF
+#define ETM_TRACEID_MASK 0x3f
+
+/* ETMv4 programming modes */
+#define ETM_MODE_EXCLUDE BIT(0)
+#define ETM_MODE_LOAD BIT(1)
+#define ETM_MODE_STORE BIT(2)
+#define ETM_MODE_LOAD_STORE BIT(3)
+#define ETM_MODE_BB BIT(4)
+#define ETMv4_MODE_CYCACC BIT(5)
+#define ETMv4_MODE_CTXID BIT(6)
+#define ETM_MODE_VMID BIT(7)
+#define ETM_MODE_COND(val) BMVAL(val, 8, 10)
+#define ETMv4_MODE_TIMESTAMP BIT(11)
+#define ETM_MODE_RETURNSTACK BIT(12)
+#define ETM_MODE_QELEM(val) BMVAL(val, 13, 14)
+#define ETM_MODE_DATA_TRACE_ADDR BIT(15)
+#define ETM_MODE_DATA_TRACE_VAL BIT(16)
+#define ETM_MODE_ISTALL BIT(17)
+#define ETM_MODE_DSTALL BIT(18)
+#define ETM_MODE_ATB_TRIGGER BIT(19)
+#define ETM_MODE_LPOVERRIDE BIT(20)
+#define ETM_MODE_ISTALL_EN BIT(21)
+#define ETM_MODE_DSTALL_EN BIT(22)
+#define ETM_MODE_INSTPRIO BIT(23)
+#define ETM_MODE_NOOVERFLOW BIT(24)
+#define ETM_MODE_TRACE_RESET BIT(25)
+#define ETM_MODE_TRACE_ERR BIT(26)
+#define ETM_MODE_VIEWINST_STARTSTOP BIT(27)
+#define ETMv4_MODE_ALL 0xFFFFFFF
+
+#define TRCSTATR_IDLE_BIT 0
+
+/**
+ * struct etm4_drvdata - specifics associated to an ETM component
+ * @base: Memory mapped base address for this component.
+ * @dev: The device entity associated to this component.
+ * @csdev: Component vitals needed by the framework.
+ * @spinlock: Only one at a time pls.
+ * @cpu: The cpu this component is affined to.
+ * @arch: ETM version number.
+ * @enable: Is this ETM currently tracing.
+ * @sticky_enable: true if ETM base configuration has been done.
+ * @boot_enable:True if we should start tracing at boot time.
+ * @os_unlock: True if access to management registers is allowed.
+ * @nr_pe: The number of processing entity available for tracing.
+ * @nr_pe_cmp: The number of processing entity comparator inputs that are
+ * available for tracing.
+ * @nr_addr_cmp:Number of pairs of address comparators available
+ * as found in ETMIDR4 0-3.
+ * @nr_cntr: Number of counters as found in ETMIDR5 bit 28-30.
+ * @nr_ext_inp: Number of external input.
+ * @numcidc: Number of contextID comparators.
+ * @numvmidc: Number of VMID comparators.
+ * @nrseqstate: The number of sequencer states that are implemented.
+ * @nr_event: Indicates how many events the trace unit support.
+ * @nr_resource:The number of resource selection pairs available for tracing.
+ * @nr_ss_cmp: Number of single-shot comparator controls that are available.
+ * @mode: Controls various modes supported by this ETM.
+ * @trcid: value of the current ID for this component.
+ * @trcid_size: Indicates the trace ID width.
+ * @instrp0: Tracing of load and store instructions
+ * as P0 elements is supported.
+ * @trccond: If the trace unit supports conditional
+ * instruction tracing.
+ * @retstack: Indicates if the implementation supports a return stack.
+ * @trc_error: Whether a trace unit can trace a system
+ * error exception.
+ * @atbtrig: If the implementation can support ATB triggers
+ * @lpoverride: If the implementation can support low-power state over.
+ * @pe_sel: Controls which PE to trace.
+ * @cfg: Controls the tracing options.
+ * @eventctrl0: Controls the tracing of arbitrary events.
+ * @eventctrl1: Controls the behavior of the events that @event_ctrl0 selects.
+ * @stallctl: If functionality that prevents trace unit buffer overflows
+ * is available.
+ * @sysstall: Does the system support stall control of the PE?
+ * @nooverflow: Indicate if overflow prevention is supported.
+ * @stall_ctrl: Enables trace unit functionality that prevents trace
+ * unit buffer overflows.
+ * @ts_size: Global timestamp size field.
+ * @ts_ctrl: Controls the insertion of global timestamps in the
+ * trace streams.
+ * @syncpr: Indicates if an implementation has a fixed
+ * synchronization period.
+ * @syncfreq: Controls how often trace synchronization requests occur.
+ * @trccci: Indicates if the trace unit supports cycle counting
+ * for instruction.
+ * @ccsize: Indicates the size of the cycle counter in bits.
+ * @ccitmin: minimum value that can be programmed in
+ * the TRCCCCTLR register.
+ * @ccctlr: Sets the threshold value for cycle counting.
+ * @trcbb: Indicates if the trace unit supports branch broadcast tracing.
+ * @q_support: Q element support characteristics.
+ * @vinst_ctrl: Controls instruction trace filtering.
+ * @viiectlr: Set or read, the address range comparators.
+ * @vissctlr: Set, or read, the single address comparators that control the
+ * ViewInst start-stop logic.
+ * @vipcssctlr: Set, or read, which PE comparator inputs can control the
+ * ViewInst start-stop logic.
+ * @seq_idx: Sequencor index selector.
+ * @seq_ctrl: Control for the sequencer state transition control register.
+ * @seq_rst: Moves the sequencer to state 0 when a programmed event occurs.
+ * @seq_state: Set, or read the sequencer state.
+ * @cntr_idx: Counter index seletor.
+ * @cntrldvr: Sets or returns the reload count value for a counter.
+ * @cntr_ctrl: Controls the operation of a counter.
+ * @cntr_val: Sets or returns the value for a counter.
+ * @res_idx: Resource index selector.
+ * @res_ctrl: Controls the selection of the resources in the trace unit.
+ * @ss_ctrl: Controls the corresponding single-shot comparator resource.
+ * @ss_status: The status of the corresponding single-shot comparator.
+ * @ss_pe_cmp: Selects the PE comparator inputs for Single-shot control.
+ * @addr_idx: Address comparator index selector.
+ * @addr_val: Value for address comparator.
+ * @addr_acc: Address comparator access type.
+ * @addr_type: Current status of the comparator register.
+ * @ctxid_idx: Context ID index selector.
+ * @ctxid_size: Size of the context ID field to consider.
+ * @ctxid_val: Value of the context ID comparator.
+ * @ctxid_mask0:Context ID comparator mask for comparator 0-3.
+ * @ctxid_mask1:Context ID comparator mask for comparator 4-7.
+ * @vmid_idx: VM ID index selector.
+ * @vmid_size: Size of the VM ID comparator to consider.
+ * @vmid_val: Value of the VM ID comparator.
+ * @vmid_mask0: VM ID comparator mask for comparator 0-3.
+ * @vmid_mask1: VM ID comparator mask for comparator 4-7.
+ * @s_ex_level: In secure state, indicates whether instruction tracing is
+ * supported for the corresponding Exception level.
+ * @ns_ex_level:In non-secure state, indicates whether instruction tracing is
+ * supported for the corresponding Exception level.
+ * @ext_inp: External input selection.
+ */
+struct etmv4_drvdata {
+ void __iomem *base;
+ struct device *dev;
+ struct coresight_device *csdev;
+ spinlock_t spinlock;
+ int cpu;
+ u8 arch;
+ bool enable;
+ bool sticky_enable;
+ bool boot_enable;
+ bool os_unlock;
+ u8 nr_pe;
+ u8 nr_pe_cmp;
+ u8 nr_addr_cmp;
+ u8 nr_cntr;
+ u8 nr_ext_inp;
+ u8 numcidc;
+ u8 numvmidc;
+ u8 nrseqstate;
+ u8 nr_event;
+ u8 nr_resource;
+ u8 nr_ss_cmp;
+ u32 mode;
+ u8 trcid;
+ u8 trcid_size;
+ bool instrp0;
+ bool trccond;
+ bool retstack;
+ bool trc_error;
+ bool atbtrig;
+ bool lpoverride;
+ u32 pe_sel;
+ u32 cfg;
+ u32 eventctrl0;
+ u32 eventctrl1;
+ bool stallctl;
+ bool sysstall;
+ bool nooverflow;
+ u32 stall_ctrl;
+ u8 ts_size;
+ u32 ts_ctrl;
+ bool syncpr;
+ u32 syncfreq;
+ bool trccci;
+ u8 ccsize;
+ u8 ccitmin;
+ u32 ccctlr;
+ bool trcbb;
+ u32 bb_ctrl;
+ bool q_support;
+ u32 vinst_ctrl;
+ u32 viiectlr;
+ u32 vissctlr;
+ u32 vipcssctlr;
+ u8 seq_idx;
+ u32 seq_ctrl[ETM_MAX_SEQ_STATES];
+ u32 seq_rst;
+ u32 seq_state;
+ u8 cntr_idx;
+ u32 cntrldvr[ETMv4_MAX_CNTR];
+ u32 cntr_ctrl[ETMv4_MAX_CNTR];
+ u32 cntr_val[ETMv4_MAX_CNTR];
+ u8 res_idx;
+ u32 res_ctrl[ETM_MAX_RES_SEL];
+ u32 ss_ctrl[ETM_MAX_SS_CMP];
+ u32 ss_status[ETM_MAX_SS_CMP];
+ u32 ss_pe_cmp[ETM_MAX_SS_CMP];
+ u8 addr_idx;
+ u64 addr_val[ETM_MAX_SINGLE_ADDR_CMP];
+ u64 addr_acc[ETM_MAX_SINGLE_ADDR_CMP];
+ u8 addr_type[ETM_MAX_SINGLE_ADDR_CMP];
+ u8 ctxid_idx;
+ u8 ctxid_size;
+ u64 ctxid_val[ETMv4_MAX_CTXID_CMP];
+ u32 ctxid_mask0;
+ u32 ctxid_mask1;
+ u8 vmid_idx;
+ u8 vmid_size;
+ u64 vmid_val[ETM_MAX_VMID_CMP];
+ u32 vmid_mask0;
+ u32 vmid_mask1;
+ u8 s_ex_level;
+ u8 ns_ex_level;
+ u32 ext_inp;
+};
+
+/* Address comparator access types */
+enum etm_addr_acctype {
+ ETM_INSTR_ADDR,
+ ETM_DATA_LOAD_ADDR,
+ ETM_DATA_STORE_ADDR,
+ ETM_DATA_LOAD_STORE_ADDR,
+};
+
+/* Address comparator context types */
+enum etm_addr_ctxtype {
+ ETM_CTX_NONE,
+ ETM_CTX_CTXID,
+ ETM_CTX_VMID,
+ ETM_CTX_CTXID_VMID,
+};
+
+enum etm_addr_type {
+ ETM_ADDR_TYPE_NONE,
+ ETM_ADDR_TYPE_SINGLE,
+ ETM_ADDR_TYPE_RANGE,
+ ETM_ADDR_TYPE_START,
+ ETM_ADDR_TYPE_STOP,
+};
+#endif
diff --git a/drivers/hwtracing/coresight/coresight-funnel.c b/drivers/hwtracing/coresight/coresight-funnel.c
index 3db36f70b666..2e36bde7fcb4 100644
--- a/drivers/hwtracing/coresight/coresight-funnel.c
+++ b/drivers/hwtracing/coresight/coresight-funnel.c
@@ -18,9 +18,10 @@
#include <linux/err.h>
#include <linux/fs.h>
#include <linux/slab.h>
-#include <linux/clk.h>
+#include <linux/pm_runtime.h>
#include <linux/coresight.h>
#include <linux/amba/bus.h>
+#include <linux/clk.h>
#include "coresight-priv.h"
@@ -35,15 +36,15 @@
* struct funnel_drvdata - specifics associated to a funnel component
* @base: memory mapped base address for this component.
* @dev: the device entity associated to this component.
+ * @atclk: optional clock for the core parts of the funnel.
* @csdev: component vitals needed by the framework.
- * @clk: the clock this component is associated to.
* @priority: port selection order.
*/
struct funnel_drvdata {
void __iomem *base;
struct device *dev;
+ struct clk *atclk;
struct coresight_device *csdev;
- struct clk *clk;
unsigned long priority;
};
@@ -67,12 +68,8 @@ static int funnel_enable(struct coresight_device *csdev, int inport,
int outport)
{
struct funnel_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
- int ret;
-
- ret = clk_prepare_enable(drvdata->clk);
- if (ret)
- return ret;
+ pm_runtime_get_sync(drvdata->dev);
funnel_enable_hw(drvdata, inport);
dev_info(drvdata->dev, "FUNNEL inport %d enabled\n", inport);
@@ -98,8 +95,7 @@ static void funnel_disable(struct coresight_device *csdev, int inport,
struct funnel_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
funnel_disable_hw(drvdata, inport);
-
- clk_disable_unprepare(drvdata->clk);
+ pm_runtime_put(drvdata->dev);
dev_info(drvdata->dev, "FUNNEL inport %d disabled\n", inport);
}
@@ -153,16 +149,14 @@ static u32 get_funnel_ctrl_hw(struct funnel_drvdata *drvdata)
static ssize_t funnel_ctrl_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- int ret;
u32 val;
struct funnel_drvdata *drvdata = dev_get_drvdata(dev->parent);
- ret = clk_prepare_enable(drvdata->clk);
- if (ret)
- return ret;
+ pm_runtime_get_sync(drvdata->dev);
val = get_funnel_ctrl_hw(drvdata);
- clk_disable_unprepare(drvdata->clk);
+
+ pm_runtime_put(drvdata->dev);
return sprintf(buf, "%#x\n", val);
}
@@ -177,6 +171,7 @@ ATTRIBUTE_GROUPS(coresight_funnel);
static int funnel_probe(struct amba_device *adev, const struct amba_id *id)
{
+ int ret;
void __iomem *base;
struct device *dev = &adev->dev;
struct coresight_platform_data *pdata = NULL;
@@ -197,6 +192,12 @@ static int funnel_probe(struct amba_device *adev, const struct amba_id *id)
return -ENOMEM;
drvdata->dev = &adev->dev;
+ drvdata->atclk = devm_clk_get(&adev->dev, "atclk"); /* optional */
+ if (!IS_ERR(drvdata->atclk)) {
+ ret = clk_prepare_enable(drvdata->atclk);
+ if (ret)
+ return ret;
+ }
dev_set_drvdata(dev, drvdata);
/* Validity for the resource is already checked by the AMBA core */
@@ -205,8 +206,7 @@ static int funnel_probe(struct amba_device *adev, const struct amba_id *id)
return PTR_ERR(base);
drvdata->base = base;
-
- drvdata->clk = adev->pclk;
+ pm_runtime_put(&adev->dev);
desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
if (!desc)
@@ -234,6 +234,32 @@ static int funnel_remove(struct amba_device *adev)
return 0;
}
+#ifdef CONFIG_PM
+static int funnel_runtime_suspend(struct device *dev)
+{
+ struct funnel_drvdata *drvdata = dev_get_drvdata(dev);
+
+ if (drvdata && !IS_ERR(drvdata->atclk))
+ clk_disable_unprepare(drvdata->atclk);
+
+ return 0;
+}
+
+static int funnel_runtime_resume(struct device *dev)
+{
+ struct funnel_drvdata *drvdata = dev_get_drvdata(dev);
+
+ if (drvdata && !IS_ERR(drvdata->atclk))
+ clk_prepare_enable(drvdata->atclk);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops funnel_dev_pm_ops = {
+ SET_RUNTIME_PM_OPS(funnel_runtime_suspend, funnel_runtime_resume, NULL)
+};
+
static struct amba_id funnel_ids[] = {
{
.id = 0x0003b908,
@@ -246,6 +272,7 @@ static struct amba_driver funnel_driver = {
.drv = {
.name = "coresight-funnel",
.owner = THIS_MODULE,
+ .pm = &funnel_dev_pm_ops,
},
.probe = funnel_probe,
.remove = funnel_remove,
diff --git a/drivers/hwtracing/coresight/coresight-replicator-qcom.c b/drivers/hwtracing/coresight/coresight-replicator-qcom.c
new file mode 100644
index 000000000000..584059e9e866
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-replicator-qcom.c
@@ -0,0 +1,215 @@
+/*
+ * Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/amba/bus.h>
+#include <linux/clk.h>
+#include <linux/coresight.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+
+#include "coresight-priv.h"
+
+#define REPLICATOR_IDFILTER0 0x000
+#define REPLICATOR_IDFILTER1 0x004
+
+/**
+ * struct replicator_state - specifics associated to a replicator component
+ * @base: memory mapped base address for this component.
+ * @dev: the device entity associated with this component
+ * @atclk: optional clock for the core parts of the replicator.
+ * @csdev: component vitals needed by the framework
+ */
+struct replicator_state {
+ void __iomem *base;
+ struct device *dev;
+ struct clk *atclk;
+ struct coresight_device *csdev;
+};
+
+static int replicator_enable(struct coresight_device *csdev, int inport,
+ int outport)
+{
+ struct replicator_state *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ pm_runtime_get_sync(drvdata->dev);
+
+ CS_UNLOCK(drvdata->base);
+
+ /*
+ * Ensure that the other port is disabled
+ * 0x00 - passing through the replicator unimpeded
+ * 0xff - disable (or impede) the flow of ATB data
+ */
+ if (outport == 0) {
+ writel_relaxed(0x00, drvdata->base + REPLICATOR_IDFILTER0);
+ writel_relaxed(0xff, drvdata->base + REPLICATOR_IDFILTER1);
+ } else {
+ writel_relaxed(0x00, drvdata->base + REPLICATOR_IDFILTER1);
+ writel_relaxed(0xff, drvdata->base + REPLICATOR_IDFILTER0);
+ }
+
+ CS_LOCK(drvdata->base);
+
+ dev_info(drvdata->dev, "REPLICATOR enabled\n");
+ return 0;
+}
+
+static void replicator_disable(struct coresight_device *csdev, int inport,
+ int outport)
+{
+ struct replicator_state *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ CS_UNLOCK(drvdata->base);
+
+ /* disable the flow of ATB data through port */
+ if (outport == 0)
+ writel_relaxed(0xff, drvdata->base + REPLICATOR_IDFILTER0);
+ else
+ writel_relaxed(0xff, drvdata->base + REPLICATOR_IDFILTER1);
+
+ CS_LOCK(drvdata->base);
+
+ pm_runtime_put(drvdata->dev);
+
+ dev_info(drvdata->dev, "REPLICATOR disabled\n");
+}
+
+static const struct coresight_ops_link replicator_link_ops = {
+ .enable = replicator_enable,
+ .disable = replicator_disable,
+};
+
+static const struct coresight_ops replicator_cs_ops = {
+ .link_ops = &replicator_link_ops,
+};
+
+static int replicator_probe(struct amba_device *adev, const struct amba_id *id)
+{
+ int ret;
+ struct device *dev = &adev->dev;
+ struct resource *res = &adev->res;
+ struct coresight_platform_data *pdata = NULL;
+ struct replicator_state *drvdata;
+ struct coresight_desc *desc;
+ struct device_node *np = adev->dev.of_node;
+ void __iomem *base;
+
+ if (np) {
+ pdata = of_get_coresight_platform_data(dev, np);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
+ adev->dev.platform_data = pdata;
+ }
+
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ drvdata->dev = &adev->dev;
+ drvdata->atclk = devm_clk_get(&adev->dev, "atclk"); /* optional */
+ if (!IS_ERR(drvdata->atclk)) {
+ ret = clk_prepare_enable(drvdata->atclk);
+ if (ret)
+ return ret;
+ }
+
+ /* Validity for the resource is already checked by the AMBA core */
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ drvdata->base = base;
+ dev_set_drvdata(dev, drvdata);
+ pm_runtime_put(&adev->dev);
+
+ desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ desc->type = CORESIGHT_DEV_TYPE_LINK;
+ desc->subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_SPLIT;
+ desc->ops = &replicator_cs_ops;
+ desc->pdata = adev->dev.platform_data;
+ desc->dev = &adev->dev;
+ drvdata->csdev = coresight_register(desc);
+ if (IS_ERR(drvdata->csdev))
+ return PTR_ERR(drvdata->csdev);
+
+ dev_info(dev, "%s initialized\n", (char *)id->data);
+ return 0;
+}
+
+static int replicator_remove(struct amba_device *adev)
+{
+ struct replicator_state *drvdata = amba_get_drvdata(adev);
+
+ pm_runtime_disable(&adev->dev);
+ coresight_unregister(drvdata->csdev);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int replicator_runtime_suspend(struct device *dev)
+{
+ struct replicator_state *drvdata = dev_get_drvdata(dev);
+
+ if (drvdata && !IS_ERR(drvdata->atclk))
+ clk_disable_unprepare(drvdata->atclk);
+
+ return 0;
+}
+
+static int replicator_runtime_resume(struct device *dev)
+{
+ struct replicator_state *drvdata = dev_get_drvdata(dev);
+
+ if (drvdata && !IS_ERR(drvdata->atclk))
+ clk_prepare_enable(drvdata->atclk);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops replicator_dev_pm_ops = {
+ SET_RUNTIME_PM_OPS(replicator_runtime_suspend,
+ replicator_runtime_resume,
+ NULL)
+};
+
+static struct amba_id replicator_ids[] = {
+ {
+ .id = 0x0003b909,
+ .mask = 0x0003ffff,
+ .data = "REPLICATOR 1.0",
+ },
+ { 0, 0 },
+};
+
+static struct amba_driver replicator_driver = {
+ .drv = {
+ .name = "coresight-replicator-qcom",
+ .pm = &replicator_dev_pm_ops,
+ },
+ .probe = replicator_probe,
+ .remove = replicator_remove,
+ .id_table = replicator_ids,
+};
+
+module_amba_driver(replicator_driver);
diff --git a/drivers/hwtracing/coresight/coresight-replicator.c b/drivers/hwtracing/coresight/coresight-replicator.c
index 75b9abd804e6..7974b7c3da6b 100644
--- a/drivers/hwtracing/coresight/coresight-replicator.c
+++ b/drivers/hwtracing/coresight/coresight-replicator.c
@@ -18,6 +18,7 @@
#include <linux/io.h>
#include <linux/err.h>
#include <linux/slab.h>
+#include <linux/pm_runtime.h>
#include <linux/clk.h>
#include <linux/of.h>
#include <linux/coresight.h>
@@ -27,10 +28,12 @@
/**
* struct replicator_drvdata - specifics associated to a replicator component
* @dev: the device entity associated with this component
+ * @atclk: optional clock for the core parts of the replicator.
* @csdev: component vitals needed by the framework
*/
struct replicator_drvdata {
struct device *dev;
+ struct clk *atclk;
struct coresight_device *csdev;
};
@@ -39,6 +42,7 @@ static int replicator_enable(struct coresight_device *csdev, int inport,
{
struct replicator_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ pm_runtime_get_sync(drvdata->dev);
dev_info(drvdata->dev, "REPLICATOR enabled\n");
return 0;
}
@@ -48,6 +52,7 @@ static void replicator_disable(struct coresight_device *csdev, int inport,
{
struct replicator_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ pm_runtime_put(drvdata->dev);
dev_info(drvdata->dev, "REPLICATOR disabled\n");
}
@@ -62,6 +67,7 @@ static const struct coresight_ops replicator_cs_ops = {
static int replicator_probe(struct platform_device *pdev)
{
+ int ret;
struct device *dev = &pdev->dev;
struct coresight_platform_data *pdata = NULL;
struct replicator_drvdata *drvdata;
@@ -80,11 +86,22 @@ static int replicator_probe(struct platform_device *pdev)
return -ENOMEM;
drvdata->dev = &pdev->dev;
+ drvdata->atclk = devm_clk_get(&pdev->dev, "atclk"); /* optional */
+ if (!IS_ERR(drvdata->atclk)) {
+ ret = clk_prepare_enable(drvdata->atclk);
+ if (ret)
+ return ret;
+ }
+ pm_runtime_get_noresume(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
platform_set_drvdata(pdev, drvdata);
desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
- if (!desc)
- return -ENOMEM;
+ if (!desc) {
+ ret = -ENOMEM;
+ goto out_disable_pm;
+ }
desc->type = CORESIGHT_DEV_TYPE_LINK;
desc->subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_SPLIT;
@@ -92,11 +109,23 @@ static int replicator_probe(struct platform_device *pdev)
desc->pdata = pdev->dev.platform_data;
desc->dev = &pdev->dev;
drvdata->csdev = coresight_register(desc);
- if (IS_ERR(drvdata->csdev))
- return PTR_ERR(drvdata->csdev);
+ if (IS_ERR(drvdata->csdev)) {
+ ret = PTR_ERR(drvdata->csdev);
+ goto out_disable_pm;
+ }
+
+ pm_runtime_put(&pdev->dev);
dev_info(dev, "REPLICATOR initialized\n");
return 0;
+
+out_disable_pm:
+ if (!IS_ERR(drvdata->atclk))
+ clk_disable_unprepare(drvdata->atclk);
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return ret;
}
static int replicator_remove(struct platform_device *pdev)
@@ -104,9 +133,42 @@ static int replicator_remove(struct platform_device *pdev)
struct replicator_drvdata *drvdata = platform_get_drvdata(pdev);
coresight_unregister(drvdata->csdev);
+ pm_runtime_get_sync(&pdev->dev);
+ if (!IS_ERR(drvdata->atclk))
+ clk_disable_unprepare(drvdata->atclk);
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int replicator_runtime_suspend(struct device *dev)
+{
+ struct replicator_drvdata *drvdata = dev_get_drvdata(dev);
+
+ if (drvdata && !IS_ERR(drvdata->atclk))
+ clk_disable_unprepare(drvdata->atclk);
+
return 0;
}
+static int replicator_runtime_resume(struct device *dev)
+{
+ struct replicator_drvdata *drvdata = dev_get_drvdata(dev);
+
+ if (drvdata && !IS_ERR(drvdata->atclk))
+ clk_prepare_enable(drvdata->atclk);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops replicator_dev_pm_ops = {
+ SET_RUNTIME_PM_OPS(replicator_runtime_suspend,
+ replicator_runtime_resume, NULL)
+};
+
static const struct of_device_id replicator_match[] = {
{.compatible = "arm,coresight-replicator"},
{}
@@ -118,6 +180,7 @@ static struct platform_driver replicator_driver = {
.driver = {
.name = "coresight-replicator",
.of_match_table = replicator_match,
+ .pm = &replicator_dev_pm_ops,
},
};
diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c
index 7147f3dd363c..a57c7ec1661f 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.c
+++ b/drivers/hwtracing/coresight/coresight-tmc.c
@@ -23,7 +23,7 @@
#include <linux/slab.h>
#include <linux/dma-mapping.h>
#include <linux/spinlock.h>
-#include <linux/clk.h>
+#include <linux/pm_runtime.h>
#include <linux/of.h>
#include <linux/coresight.h>
#include <linux/amba/bus.h>
@@ -104,7 +104,6 @@ enum tmc_mem_intf_width {
* @dev: the device entity associated to this component.
* @csdev: component vitals needed by the framework.
* @miscdev: specifics to handle "/dev/xyz.tmc" entry.
- * @clk: the clock this component is associated to.
* @spinlock: only one at a time pls.
* @read_count: manages preparation of buffer for reading.
* @buf: area of memory where trace data get sent.
@@ -120,7 +119,6 @@ struct tmc_drvdata {
struct device *dev;
struct coresight_device *csdev;
struct miscdevice miscdev;
- struct clk *clk;
spinlock_t spinlock;
int read_count;
bool reading;
@@ -242,17 +240,14 @@ static void tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
static int tmc_enable(struct tmc_drvdata *drvdata, enum tmc_mode mode)
{
- int ret;
unsigned long flags;
- ret = clk_prepare_enable(drvdata->clk);
- if (ret)
- return ret;
+ pm_runtime_get_sync(drvdata->dev);
spin_lock_irqsave(&drvdata->spinlock, flags);
if (drvdata->reading) {
spin_unlock_irqrestore(&drvdata->spinlock, flags);
- clk_disable_unprepare(drvdata->clk);
+ pm_runtime_put(drvdata->dev);
return -EBUSY;
}
@@ -386,7 +381,7 @@ out:
drvdata->enable = false;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
- clk_disable_unprepare(drvdata->clk);
+ pm_runtime_put(drvdata->dev);
dev_info(drvdata->dev, "TMC disabled\n");
}
@@ -568,17 +563,13 @@ static const struct file_operations tmc_fops = {
static ssize_t status_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- int ret;
unsigned long flags;
u32 tmc_rsz, tmc_sts, tmc_rrp, tmc_rwp, tmc_trg;
u32 tmc_ctl, tmc_ffsr, tmc_ffcr, tmc_mode, tmc_pscr;
u32 devid;
struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
- ret = clk_prepare_enable(drvdata->clk);
- if (ret)
- goto out;
-
+ pm_runtime_get_sync(drvdata->dev);
spin_lock_irqsave(&drvdata->spinlock, flags);
CS_UNLOCK(drvdata->base);
@@ -596,8 +587,7 @@ static ssize_t status_show(struct device *dev,
CS_LOCK(drvdata->base);
spin_unlock_irqrestore(&drvdata->spinlock, flags);
-
- clk_disable_unprepare(drvdata->clk);
+ pm_runtime_put(drvdata->dev);
return sprintf(buf,
"Depth:\t\t0x%x\n"
@@ -613,7 +603,7 @@ static ssize_t status_show(struct device *dev,
"DEVID:\t\t0x%x\n",
tmc_rsz, tmc_sts, tmc_rrp, tmc_rwp, tmc_trg,
tmc_ctl, tmc_ffsr, tmc_ffcr, tmc_mode, tmc_pscr, devid);
-out:
+
return -EINVAL;
}
static DEVICE_ATTR_RO(status);
@@ -700,11 +690,6 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
spin_lock_init(&drvdata->spinlock);
- drvdata->clk = adev->pclk;
- ret = clk_prepare_enable(drvdata->clk);
- if (ret)
- return ret;
-
devid = readl_relaxed(drvdata->base + CORESIGHT_DEVID);
drvdata->config_type = BMVAL(devid, 6, 7);
@@ -719,7 +704,7 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
drvdata->size = readl_relaxed(drvdata->base + TMC_RSZ) * 4;
}
- clk_disable_unprepare(drvdata->clk);
+ pm_runtime_put(&adev->dev);
if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
drvdata->vaddr = dma_alloc_coherent(dev, drvdata->size,
diff --git a/drivers/hwtracing/coresight/coresight-tpiu.c b/drivers/hwtracing/coresight/coresight-tpiu.c
index 3b33af2416bb..7214efd10db5 100644
--- a/drivers/hwtracing/coresight/coresight-tpiu.c
+++ b/drivers/hwtracing/coresight/coresight-tpiu.c
@@ -17,9 +17,10 @@
#include <linux/io.h>
#include <linux/err.h>
#include <linux/slab.h>
-#include <linux/clk.h>
+#include <linux/pm_runtime.h>
#include <linux/coresight.h>
#include <linux/amba/bus.h>
+#include <linux/clk.h>
#include "coresight-priv.h"
@@ -50,14 +51,14 @@
/**
* @base: memory mapped base address for this component.
* @dev: the device entity associated to this component.
+ * @atclk: optional clock for the core parts of the TPIU.
* @csdev: component vitals needed by the framework.
- * @clk: the clock this component is associated to.
*/
struct tpiu_drvdata {
void __iomem *base;
struct device *dev;
+ struct clk *atclk;
struct coresight_device *csdev;
- struct clk *clk;
};
static void tpiu_enable_hw(struct tpiu_drvdata *drvdata)
@@ -72,12 +73,8 @@ static void tpiu_enable_hw(struct tpiu_drvdata *drvdata)
static int tpiu_enable(struct coresight_device *csdev)
{
struct tpiu_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
- int ret;
-
- ret = clk_prepare_enable(drvdata->clk);
- if (ret)
- return ret;
+ pm_runtime_get_sync(csdev->dev.parent);
tpiu_enable_hw(drvdata);
dev_info(drvdata->dev, "TPIU enabled\n");
@@ -101,8 +98,7 @@ static void tpiu_disable(struct coresight_device *csdev)
struct tpiu_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
tpiu_disable_hw(drvdata);
-
- clk_disable_unprepare(drvdata->clk);
+ pm_runtime_put(csdev->dev.parent);
dev_info(drvdata->dev, "TPIU disabled\n");
}
@@ -139,6 +135,12 @@ static int tpiu_probe(struct amba_device *adev, const struct amba_id *id)
return -ENOMEM;
drvdata->dev = &adev->dev;
+ drvdata->atclk = devm_clk_get(&adev->dev, "atclk"); /* optional */
+ if (!IS_ERR(drvdata->atclk)) {
+ ret = clk_prepare_enable(drvdata->atclk);
+ if (ret)
+ return ret;
+ }
dev_set_drvdata(dev, drvdata);
/* Validity for the resource is already checked by the AMBA core */
@@ -148,15 +150,10 @@ static int tpiu_probe(struct amba_device *adev, const struct amba_id *id)
drvdata->base = base;
- drvdata->clk = adev->pclk;
- ret = clk_prepare_enable(drvdata->clk);
- if (ret)
- return ret;
-
/* Disable tpiu to support older devices */
tpiu_disable_hw(drvdata);
- clk_disable_unprepare(drvdata->clk);
+ pm_runtime_put(&adev->dev);
desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
if (!desc)
@@ -183,11 +180,41 @@ static int tpiu_remove(struct amba_device *adev)
return 0;
}
+#ifdef CONFIG_PM
+static int tpiu_runtime_suspend(struct device *dev)
+{
+ struct tpiu_drvdata *drvdata = dev_get_drvdata(dev);
+
+ if (drvdata && !IS_ERR(drvdata->atclk))
+ clk_disable_unprepare(drvdata->atclk);
+
+ return 0;
+}
+
+static int tpiu_runtime_resume(struct device *dev)
+{
+ struct tpiu_drvdata *drvdata = dev_get_drvdata(dev);
+
+ if (drvdata && !IS_ERR(drvdata->atclk))
+ clk_prepare_enable(drvdata->atclk);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops tpiu_dev_pm_ops = {
+ SET_RUNTIME_PM_OPS(tpiu_runtime_suspend, tpiu_runtime_resume, NULL)
+};
+
static struct amba_id tpiu_ids[] = {
{
.id = 0x0003b912,
.mask = 0x0003ffff,
},
+ {
+ .id = 0x0004b912,
+ .mask = 0x0007ffff,
+ },
{ 0, 0},
};
@@ -195,6 +222,7 @@ static struct amba_driver tpiu_driver = {
.drv = {
.name = "coresight-tpiu",
.owner = THIS_MODULE,
+ .pm = &tpiu_dev_pm_ops,
},
.probe = tpiu_probe,
.remove = tpiu_remove,
diff --git a/drivers/hwtracing/coresight/of_coresight.c b/drivers/hwtracing/coresight/of_coresight.c
index 35e51ce93a5c..b0973617826f 100644
--- a/drivers/hwtracing/coresight/of_coresight.c
+++ b/drivers/hwtracing/coresight/of_coresight.c
@@ -37,7 +37,7 @@ of_coresight_get_endpoint_device(struct device_node *endpoint)
struct device *dev = NULL;
/*
- * If we have a non-configuable replicator, it will be found on the
+ * If we have a non-configurable replicator, it will be found on the
* platform bus.
*/
dev = bus_find_device(&platform_bus_type, NULL,
diff --git a/drivers/i2c/busses/i2c-parport.c b/drivers/i2c/busses/i2c-parport.c
index a1fac5aa9bae..9b94c3db80ab 100644
--- a/drivers/i2c/busses/i2c-parport.c
+++ b/drivers/i2c/busses/i2c-parport.c
@@ -46,6 +46,9 @@ struct i2c_par {
static LIST_HEAD(adapter_list);
static DEFINE_MUTEX(adapter_list_lock);
+#define MAX_DEVICE 4
+static int parport[MAX_DEVICE] = {0, -1, -1, -1};
+
/* ----- Low-level parallel port access ----------------------------------- */
@@ -163,17 +166,34 @@ static void i2c_parport_irq(void *data)
static void i2c_parport_attach(struct parport *port)
{
struct i2c_par *adapter;
+ int i;
+ struct pardev_cb i2c_parport_cb;
+
+ for (i = 0; i < MAX_DEVICE; i++) {
+ if (parport[i] == -1)
+ continue;
+ if (port->number == parport[i])
+ break;
+ }
+ if (i == MAX_DEVICE) {
+ pr_debug("i2c-parport: Not using parport%d.\n", port->number);
+ return;
+ }
adapter = kzalloc(sizeof(struct i2c_par), GFP_KERNEL);
if (adapter == NULL) {
printk(KERN_ERR "i2c-parport: Failed to kzalloc\n");
return;
}
+ memset(&i2c_parport_cb, 0, sizeof(i2c_parport_cb));
+ i2c_parport_cb.flags = PARPORT_FLAG_EXCL;
+ i2c_parport_cb.irq_func = i2c_parport_irq;
+ i2c_parport_cb.private = adapter;
pr_debug("i2c-parport: attaching to %s\n", port->name);
parport_disable_irq(port);
- adapter->pdev = parport_register_device(port, "i2c-parport",
- NULL, NULL, i2c_parport_irq, PARPORT_FLAG_EXCL, adapter);
+ adapter->pdev = parport_register_dev_model(port, "i2c-parport",
+ &i2c_parport_cb, i);
if (!adapter->pdev) {
printk(KERN_ERR "i2c-parport: Unable to register with parport\n");
goto err_free;
@@ -267,9 +287,10 @@ static void i2c_parport_detach(struct parport *port)
}
static struct parport_driver i2c_parport_driver = {
- .name = "i2c-parport",
- .attach = i2c_parport_attach,
- .detach = i2c_parport_detach,
+ .name = "i2c-parport",
+ .match_port = i2c_parport_attach,
+ .detach = i2c_parport_detach,
+ .devmodel = true,
};
/* ----- Module loading, unloading and information ------------------------ */
@@ -298,5 +319,12 @@ MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
MODULE_DESCRIPTION("I2C bus over parallel port");
MODULE_LICENSE("GPL");
+module_param_array(parport, int, NULL, 0);
+MODULE_PARM_DESC(parport,
+ "List of parallel ports to bind to, by index.\n"
+ " Atmost " __stringify(MAX_DEVICE) " devices are supported.\n"
+ " Default is one device connected to parport0.\n"
+);
+
module_init(i2c_parport_init);
module_exit(i2c_parport_exit);
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 006242c8bca0..42c38525904b 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -520,7 +520,6 @@ source "drivers/misc/eeprom/Kconfig"
source "drivers/misc/cb710/Kconfig"
source "drivers/misc/ti-st/Kconfig"
source "drivers/misc/lis3lv02d/Kconfig"
-source "drivers/misc/carma/Kconfig"
source "drivers/misc/altera-stapl/Kconfig"
source "drivers/misc/mei/Kconfig"
source "drivers/misc/vmw_vmci/Kconfig"
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 7d5c4cd118c4..d056fb7186fe 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -44,7 +44,6 @@ obj-$(CONFIG_ARM_CHARLCD) += arm-charlcd.o
obj-$(CONFIG_PCH_PHUB) += pch_phub.o
obj-y += ti-st/
obj-y += lis3lv02d/
-obj-y += carma/
obj-$(CONFIG_USB_SWITCH_FSA9480) += fsa9480.o
obj-$(CONFIG_ALTERA_STAPL) +=altera-stapl/
obj-$(CONFIG_INTEL_MEI) += mei/
diff --git a/drivers/misc/carma/Kconfig b/drivers/misc/carma/Kconfig
deleted file mode 100644
index 295882bfb14e..000000000000
--- a/drivers/misc/carma/Kconfig
+++ /dev/null
@@ -1,15 +0,0 @@
-config CARMA_FPGA
- tristate "CARMA DATA-FPGA Access Driver"
- depends on FSL_SOC && PPC_83xx && HAS_DMA && FSL_DMA
- default n
- help
- Say Y here to include support for communicating with the data
- processing FPGAs on the OVRO CARMA board.
-
-config CARMA_FPGA_PROGRAM
- tristate "CARMA DATA-FPGA Programmer"
- depends on FSL_SOC && PPC_83xx && HAS_DMA && FSL_DMA
- default n
- help
- Say Y here to include support for programming the data processing
- FPGAs on the OVRO CARMA board.
diff --git a/drivers/misc/carma/Makefile b/drivers/misc/carma/Makefile
deleted file mode 100644
index ff36ac2ce534..000000000000
--- a/drivers/misc/carma/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-obj-$(CONFIG_CARMA_FPGA) += carma-fpga.o
-obj-$(CONFIG_CARMA_FPGA_PROGRAM) += carma-fpga-program.o
diff --git a/drivers/misc/carma/carma-fpga-program.c b/drivers/misc/carma/carma-fpga-program.c
deleted file mode 100644
index 0b1bd85e4ae6..000000000000
--- a/drivers/misc/carma/carma-fpga-program.c
+++ /dev/null
@@ -1,1182 +0,0 @@
-/*
- * CARMA Board DATA-FPGA Programmer
- *
- * Copyright (c) 2009-2011 Ira W. Snyder <iws@ovro.caltech.edu>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <linux/dma-mapping.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/of_platform.h>
-#include <linux/completion.h>
-#include <linux/miscdevice.h>
-#include <linux/dmaengine.h>
-#include <linux/fsldma.h>
-#include <linux/interrupt.h>
-#include <linux/highmem.h>
-#include <linux/vmalloc.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/leds.h>
-#include <linux/slab.h>
-#include <linux/kref.h>
-#include <linux/fs.h>
-#include <linux/io.h>
-
-/* MPC8349EMDS specific get_immrbase() */
-#include <sysdev/fsl_soc.h>
-
-static const char drv_name[] = "carma-fpga-program";
-
-/*
- * Firmware images are always this exact size
- *
- * 12849552 bytes for a CARMA Digitizer Board (EP2S90 FPGAs)
- * 18662880 bytes for a CARMA Correlator Board (EP2S130 FPGAs)
- */
-#define FW_SIZE_EP2S90 12849552
-#define FW_SIZE_EP2S130 18662880
-
-struct fpga_dev {
- struct miscdevice miscdev;
-
- /* Reference count */
- struct kref ref;
-
- /* Device Registers */
- struct device *dev;
- void __iomem *regs;
- void __iomem *immr;
-
- /* Freescale DMA Device */
- struct dma_chan *chan;
-
- /* Interrupts */
- int irq, status;
- struct completion completion;
-
- /* FPGA Bitfile */
- struct mutex lock;
-
- void *vaddr;
- struct scatterlist *sglist;
- int sglen;
- int nr_pages;
- bool buf_allocated;
-
- /* max size and written bytes */
- size_t fw_size;
- size_t bytes;
-};
-
-static int fpga_dma_init(struct fpga_dev *priv, int nr_pages)
-{
- struct page *pg;
- int i;
-
- priv->vaddr = vmalloc_32(nr_pages << PAGE_SHIFT);
- if (NULL == priv->vaddr) {
- pr_debug("vmalloc_32(%d pages) failed\n", nr_pages);
- return -ENOMEM;
- }
-
- pr_debug("vmalloc is at addr 0x%08lx, size=%d\n",
- (unsigned long)priv->vaddr,
- nr_pages << PAGE_SHIFT);
-
- memset(priv->vaddr, 0, nr_pages << PAGE_SHIFT);
- priv->nr_pages = nr_pages;
-
- priv->sglist = vzalloc(priv->nr_pages * sizeof(*priv->sglist));
- if (NULL == priv->sglist)
- goto vzalloc_err;
-
- sg_init_table(priv->sglist, priv->nr_pages);
- for (i = 0; i < priv->nr_pages; i++) {
- pg = vmalloc_to_page(priv->vaddr + i * PAGE_SIZE);
- if (NULL == pg)
- goto vmalloc_to_page_err;
- sg_set_page(&priv->sglist[i], pg, PAGE_SIZE, 0);
- }
- return 0;
-
-vmalloc_to_page_err:
- vfree(priv->sglist);
- priv->sglist = NULL;
-vzalloc_err:
- vfree(priv->vaddr);
- priv->vaddr = NULL;
- return -ENOMEM;
-}
-
-static int fpga_dma_map(struct fpga_dev *priv)
-{
- priv->sglen = dma_map_sg(priv->dev, priv->sglist,
- priv->nr_pages, DMA_TO_DEVICE);
-
- if (0 == priv->sglen) {
- pr_warn("%s: dma_map_sg failed\n", __func__);
- return -ENOMEM;
- }
- return 0;
-}
-
-static int fpga_dma_unmap(struct fpga_dev *priv)
-{
- if (!priv->sglen)
- return 0;
-
- dma_unmap_sg(priv->dev, priv->sglist, priv->sglen, DMA_TO_DEVICE);
- priv->sglen = 0;
- return 0;
-}
-
-/*
- * FPGA Bitfile Helpers
- */
-
-/**
- * fpga_drop_firmware_data() - drop the bitfile image from memory
- * @priv: the driver's private data structure
- *
- * LOCKING: must hold priv->lock
- */
-static void fpga_drop_firmware_data(struct fpga_dev *priv)
-{
- vfree(priv->sglist);
- vfree(priv->vaddr);
- priv->buf_allocated = false;
- priv->bytes = 0;
-}
-
-/*
- * Private Data Reference Count
- */
-
-static void fpga_dev_remove(struct kref *ref)
-{
- struct fpga_dev *priv = container_of(ref, struct fpga_dev, ref);
-
- /* free any firmware image that was not programmed */
- fpga_drop_firmware_data(priv);
-
- mutex_destroy(&priv->lock);
- kfree(priv);
-}
-
-/*
- * LED Trigger (could be a seperate module)
- */
-
-/*
- * NOTE: this whole thing does have the problem that whenever the led's are
- * NOTE: first set to use the fpga trigger, they could be in the wrong state
- */
-
-DEFINE_LED_TRIGGER(ledtrig_fpga);
-
-static void ledtrig_fpga_programmed(bool enabled)
-{
- if (enabled)
- led_trigger_event(ledtrig_fpga, LED_FULL);
- else
- led_trigger_event(ledtrig_fpga, LED_OFF);
-}
-
-/*
- * FPGA Register Helpers
- */
-
-/* Register Definitions */
-#define FPGA_CONFIG_CONTROL 0x40
-#define FPGA_CONFIG_STATUS 0x44
-#define FPGA_CONFIG_FIFO_SIZE 0x48
-#define FPGA_CONFIG_FIFO_USED 0x4C
-#define FPGA_CONFIG_TOTAL_BYTE_COUNT 0x50
-#define FPGA_CONFIG_CUR_BYTE_COUNT 0x54
-
-#define FPGA_FIFO_ADDRESS 0x3000
-
-static int fpga_fifo_size(void __iomem *regs)
-{
- return ioread32be(regs + FPGA_CONFIG_FIFO_SIZE);
-}
-
-#define CFG_STATUS_ERR_MASK 0xfffe
-
-static int fpga_config_error(void __iomem *regs)
-{
- return ioread32be(regs + FPGA_CONFIG_STATUS) & CFG_STATUS_ERR_MASK;
-}
-
-static int fpga_fifo_empty(void __iomem *regs)
-{
- return ioread32be(regs + FPGA_CONFIG_FIFO_USED) == 0;
-}
-
-static void fpga_fifo_write(void __iomem *regs, u32 val)
-{
- iowrite32be(val, regs + FPGA_FIFO_ADDRESS);
-}
-
-static void fpga_set_byte_count(void __iomem *regs, u32 count)
-{
- iowrite32be(count, regs + FPGA_CONFIG_TOTAL_BYTE_COUNT);
-}
-
-#define CFG_CTL_ENABLE (1 << 0)
-#define CFG_CTL_RESET (1 << 1)
-#define CFG_CTL_DMA (1 << 2)
-
-static void fpga_programmer_enable(struct fpga_dev *priv, bool dma)
-{
- u32 val;
-
- val = (dma) ? (CFG_CTL_ENABLE | CFG_CTL_DMA) : CFG_CTL_ENABLE;
- iowrite32be(val, priv->regs + FPGA_CONFIG_CONTROL);
-}
-
-static void fpga_programmer_disable(struct fpga_dev *priv)
-{
- iowrite32be(0x0, priv->regs + FPGA_CONFIG_CONTROL);
-}
-
-static void fpga_dump_registers(struct fpga_dev *priv)
-{
- u32 control, status, size, used, total, curr;
-
- /* good status: do nothing */
- if (priv->status == 0)
- return;
-
- /* Dump all status registers */
- control = ioread32be(priv->regs + FPGA_CONFIG_CONTROL);
- status = ioread32be(priv->regs + FPGA_CONFIG_STATUS);
- size = ioread32be(priv->regs + FPGA_CONFIG_FIFO_SIZE);
- used = ioread32be(priv->regs + FPGA_CONFIG_FIFO_USED);
- total = ioread32be(priv->regs + FPGA_CONFIG_TOTAL_BYTE_COUNT);
- curr = ioread32be(priv->regs + FPGA_CONFIG_CUR_BYTE_COUNT);
-
- dev_err(priv->dev, "Configuration failed, dumping status registers\n");
- dev_err(priv->dev, "Control: 0x%.8x\n", control);
- dev_err(priv->dev, "Status: 0x%.8x\n", status);
- dev_err(priv->dev, "FIFO Size: 0x%.8x\n", size);
- dev_err(priv->dev, "FIFO Used: 0x%.8x\n", used);
- dev_err(priv->dev, "FIFO Total: 0x%.8x\n", total);
- dev_err(priv->dev, "FIFO Curr: 0x%.8x\n", curr);
-}
-
-/*
- * FPGA Power Supply Code
- */
-
-#define CTL_PWR_CONTROL 0x2006
-#define CTL_PWR_STATUS 0x200A
-#define CTL_PWR_FAIL 0x200B
-
-#define PWR_CONTROL_ENABLE 0x01
-
-#define PWR_STATUS_ERROR_MASK 0x10
-#define PWR_STATUS_GOOD 0x0f
-
-/*
- * Determine if the FPGA power is good for all supplies
- */
-static bool fpga_power_good(struct fpga_dev *priv)
-{
- u8 val;
-
- val = ioread8(priv->regs + CTL_PWR_STATUS);
- if (val & PWR_STATUS_ERROR_MASK)
- return false;
-
- return val == PWR_STATUS_GOOD;
-}
-
-/*
- * Disable the FPGA power supplies
- */
-static void fpga_disable_power_supplies(struct fpga_dev *priv)
-{
- unsigned long start;
- u8 val;
-
- iowrite8(0x0, priv->regs + CTL_PWR_CONTROL);
-
- /*
- * Wait 500ms for the power rails to discharge
- *
- * Without this delay, the CTL-CPLD state machine can get into a
- * state where it is waiting for the power-goods to assert, but they
- * never do. This only happens when enabling and disabling the
- * power sequencer very rapidly.
- *
- * The loop below will also wait for the power goods to de-assert,
- * but testing has shown that they are always disabled by the time
- * the sleep completes. However, omitting the sleep and only waiting
- * for the power-goods to de-assert was not sufficient to ensure
- * that the power sequencer would not wedge itself.
- */
- msleep(500);
-
- start = jiffies;
- while (time_before(jiffies, start + HZ)) {
- val = ioread8(priv->regs + CTL_PWR_STATUS);
- if (!(val & PWR_STATUS_GOOD))
- break;
-
- usleep_range(5000, 10000);
- }
-
- val = ioread8(priv->regs + CTL_PWR_STATUS);
- if (val & PWR_STATUS_GOOD) {
- dev_err(priv->dev, "power disable failed: "
- "power goods: status 0x%.2x\n", val);
- }
-
- if (val & PWR_STATUS_ERROR_MASK) {
- dev_err(priv->dev, "power disable failed: "
- "alarm bit set: status 0x%.2x\n", val);
- }
-}
-
-/**
- * fpga_enable_power_supplies() - enable the DATA-FPGA power supplies
- * @priv: the driver's private data structure
- *
- * Enable the DATA-FPGA power supplies, waiting up to 1 second for
- * them to enable successfully.
- *
- * Returns 0 on success, -ERRNO otherwise
- */
-static int fpga_enable_power_supplies(struct fpga_dev *priv)
-{
- unsigned long start = jiffies;
-
- if (fpga_power_good(priv)) {
- dev_dbg(priv->dev, "power was already good\n");
- return 0;
- }
-
- iowrite8(PWR_CONTROL_ENABLE, priv->regs + CTL_PWR_CONTROL);
- while (time_before(jiffies, start + HZ)) {
- if (fpga_power_good(priv))
- return 0;
-
- usleep_range(5000, 10000);
- }
-
- return fpga_power_good(priv) ? 0 : -ETIMEDOUT;
-}
-
-/*
- * Determine if the FPGA power supplies are all enabled
- */
-static bool fpga_power_enabled(struct fpga_dev *priv)
-{
- u8 val;
-
- val = ioread8(priv->regs + CTL_PWR_CONTROL);
- if (val & PWR_CONTROL_ENABLE)
- return true;
-
- return false;
-}
-
-/*
- * Determine if the FPGA's are programmed and running correctly
- */
-static bool fpga_running(struct fpga_dev *priv)
-{
- if (!fpga_power_good(priv))
- return false;
-
- /* Check the config done bit */
- return ioread32be(priv->regs + FPGA_CONFIG_STATUS) & (1 << 18);
-}
-
-/*
- * FPGA Programming Code
- */
-
-/**
- * fpga_program_block() - put a block of data into the programmer's FIFO
- * @priv: the driver's private data structure
- * @buf: the data to program
- * @count: the length of data to program (must be a multiple of 4 bytes)
- *
- * Returns 0 on success, -ERRNO otherwise
- */
-static int fpga_program_block(struct fpga_dev *priv, void *buf, size_t count)
-{
- u32 *data = buf;
- int size = fpga_fifo_size(priv->regs);
- int i, len;
- unsigned long timeout;
-
- /* enforce correct data length for the FIFO */
- BUG_ON(count % 4 != 0);
-
- while (count > 0) {
-
- /* Get the size of the block to write (maximum is FIFO_SIZE) */
- len = min_t(size_t, count, size);
- timeout = jiffies + HZ / 4;
-
- /* Write the block */
- for (i = 0; i < len / 4; i++)
- fpga_fifo_write(priv->regs, data[i]);
-
- /* Update the amounts left */
- count -= len;
- data += len / 4;
-
- /* Wait for the fifo to empty */
- while (true) {
-
- if (fpga_fifo_empty(priv->regs)) {
- break;
- } else {
- dev_dbg(priv->dev, "Fifo not empty\n");
- cpu_relax();
- }
-
- if (fpga_config_error(priv->regs)) {
- dev_err(priv->dev, "Error detected\n");
- return -EIO;
- }
-
- if (time_after(jiffies, timeout)) {
- dev_err(priv->dev, "Fifo drain timeout\n");
- return -ETIMEDOUT;
- }
-
- usleep_range(5000, 10000);
- }
- }
-
- return 0;
-}
-
-/**
- * fpga_program_cpu() - program the DATA-FPGA's using the CPU
- * @priv: the driver's private data structure
- *
- * This is useful when the DMA programming method fails. It is possible to
- * wedge the Freescale DMA controller such that the DMA programming method
- * always fails. This method has always succeeded.
- *
- * Returns 0 on success, -ERRNO otherwise
- */
-static noinline int fpga_program_cpu(struct fpga_dev *priv)
-{
- int ret;
- unsigned long timeout;
-
- /* Disable the programmer */
- fpga_programmer_disable(priv);
-
- /* Set the total byte count */
- fpga_set_byte_count(priv->regs, priv->bytes);
- dev_dbg(priv->dev, "total byte count %u bytes\n", priv->bytes);
-
- /* Enable the controller for programming */
- fpga_programmer_enable(priv, false);
- dev_dbg(priv->dev, "enabled the controller\n");
-
- /* Write each chunk of the FPGA bitfile to FPGA programmer */
- ret = fpga_program_block(priv, priv->vaddr, priv->bytes);
- if (ret)
- goto out_disable_controller;
-
- /* Wait for the interrupt handler to signal that programming finished */
- timeout = wait_for_completion_timeout(&priv->completion, 2 * HZ);
- if (!timeout) {
- dev_err(priv->dev, "Timed out waiting for completion\n");
- ret = -ETIMEDOUT;
- goto out_disable_controller;
- }
-
- /* Retrieve the status from the interrupt handler */
- ret = priv->status;
-
-out_disable_controller:
- fpga_programmer_disable(priv);
- return ret;
-}
-
-#define FIFO_DMA_ADDRESS 0xf0003000
-#define FIFO_MAX_LEN 4096
-
-/**
- * fpga_program_dma() - program the DATA-FPGA's using the DMA engine
- * @priv: the driver's private data structure
- *
- * Program the DATA-FPGA's using the Freescale DMA engine. This requires that
- * the engine is programmed such that the hardware DMA request lines can
- * control the entire DMA transaction. The system controller FPGA then
- * completely offloads the programming from the CPU.
- *
- * Returns 0 on success, -ERRNO otherwise
- */
-static noinline int fpga_program_dma(struct fpga_dev *priv)
-{
- struct dma_chan *chan = priv->chan;
- struct dma_async_tx_descriptor *tx;
- size_t num_pages, len, avail = 0;
- struct dma_slave_config config;
- struct scatterlist *sg;
- struct sg_table table;
- dma_cookie_t cookie;
- int ret, i;
- unsigned long timeout;
-
- /* Disable the programmer */
- fpga_programmer_disable(priv);
-
- /* Allocate a scatterlist for the DMA destination */
- num_pages = DIV_ROUND_UP(priv->bytes, FIFO_MAX_LEN);
- ret = sg_alloc_table(&table, num_pages, GFP_KERNEL);
- if (ret) {
- dev_err(priv->dev, "Unable to allocate dst scatterlist\n");
- ret = -ENOMEM;
- goto out_return;
- }
-
- /*
- * This is an ugly hack
- *
- * We fill in a scatterlist as if it were mapped for DMA. This is
- * necessary because there exists no better structure for this
- * inside the kernel code.
- *
- * As an added bonus, we can use the DMAEngine API for all of this,
- * rather than inventing another extremely similar API.
- */
- avail = priv->bytes;
- for_each_sg(table.sgl, sg, num_pages, i) {
- len = min_t(size_t, avail, FIFO_MAX_LEN);
- sg_dma_address(sg) = FIFO_DMA_ADDRESS;
- sg_dma_len(sg) = len;
-
- avail -= len;
- }
-
- /* Map the buffer for DMA */
- ret = fpga_dma_map(priv);
- if (ret) {
- dev_err(priv->dev, "Unable to map buffer for DMA\n");
- goto out_free_table;
- }
-
- /*
- * Configure the DMA channel to transfer FIFO_SIZE / 2 bytes per
- * transaction, and then put it under external control
- */
- memset(&config, 0, sizeof(config));
- config.direction = DMA_MEM_TO_DEV;
- config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- config.dst_maxburst = fpga_fifo_size(priv->regs) / 2 / 4;
- ret = dmaengine_slave_config(chan, &config);
- if (ret) {
- dev_err(priv->dev, "DMA slave configuration failed\n");
- goto out_dma_unmap;
- }
-
- ret = fsl_dma_external_start(chan, 1);
- if (ret) {
- dev_err(priv->dev, "DMA external control setup failed\n");
- goto out_dma_unmap;
- }
-
- /* setup and submit the DMA transaction */
-
- tx = dmaengine_prep_dma_sg(chan, table.sgl, num_pages,
- priv->sglist, priv->sglen, 0);
- if (!tx) {
- dev_err(priv->dev, "Unable to prep DMA transaction\n");
- ret = -ENOMEM;
- goto out_dma_unmap;
- }
-
- cookie = tx->tx_submit(tx);
- if (dma_submit_error(cookie)) {
- dev_err(priv->dev, "Unable to submit DMA transaction\n");
- ret = -ENOMEM;
- goto out_dma_unmap;
- }
-
- dma_async_issue_pending(chan);
-
- /* Set the total byte count */
- fpga_set_byte_count(priv->regs, priv->bytes);
- dev_dbg(priv->dev, "total byte count %u bytes\n", priv->bytes);
-
- /* Enable the controller for DMA programming */
- fpga_programmer_enable(priv, true);
- dev_dbg(priv->dev, "enabled the controller\n");
-
- /* Wait for the interrupt handler to signal that programming finished */
- timeout = wait_for_completion_timeout(&priv->completion, 2 * HZ);
- if (!timeout) {
- dev_err(priv->dev, "Timed out waiting for completion\n");
- ret = -ETIMEDOUT;
- goto out_disable_controller;
- }
-
- /* Retrieve the status from the interrupt handler */
- ret = priv->status;
-
-out_disable_controller:
- fpga_programmer_disable(priv);
-out_dma_unmap:
- fpga_dma_unmap(priv);
-out_free_table:
- sg_free_table(&table);
-out_return:
- return ret;
-}
-
-/*
- * Interrupt Handling
- */
-
-static irqreturn_t fpga_irq(int irq, void *dev_id)
-{
- struct fpga_dev *priv = dev_id;
-
- /* Save the status */
- priv->status = fpga_config_error(priv->regs) ? -EIO : 0;
- dev_dbg(priv->dev, "INTERRUPT status %d\n", priv->status);
- fpga_dump_registers(priv);
-
- /* Disabling the programmer clears the interrupt */
- fpga_programmer_disable(priv);
-
- /* Notify any waiters */
- complete(&priv->completion);
-
- return IRQ_HANDLED;
-}
-
-/*
- * SYSFS Helpers
- */
-
-/**
- * fpga_do_stop() - deconfigure (reset) the DATA-FPGA's
- * @priv: the driver's private data structure
- *
- * LOCKING: must hold priv->lock
- */
-static int fpga_do_stop(struct fpga_dev *priv)
-{
- u32 val;
-
- /* Set the led to unprogrammed */
- ledtrig_fpga_programmed(false);
-
- /* Pulse the config line to reset the FPGA's */
- val = CFG_CTL_ENABLE | CFG_CTL_RESET;
- iowrite32be(val, priv->regs + FPGA_CONFIG_CONTROL);
- iowrite32be(0x0, priv->regs + FPGA_CONFIG_CONTROL);
-
- return 0;
-}
-
-static noinline int fpga_do_program(struct fpga_dev *priv)
-{
- int ret;
-
- if (priv->bytes != priv->fw_size) {
- dev_err(priv->dev, "Incorrect bitfile size: got %zu bytes, "
- "should be %zu bytes\n",
- priv->bytes, priv->fw_size);
- return -EINVAL;
- }
-
- if (!fpga_power_enabled(priv)) {
- dev_err(priv->dev, "Power not enabled\n");
- return -EINVAL;
- }
-
- if (!fpga_power_good(priv)) {
- dev_err(priv->dev, "Power not good\n");
- return -EINVAL;
- }
-
- /* Set the LED to unprogrammed */
- ledtrig_fpga_programmed(false);
-
- /* Try to program the FPGA's using DMA */
- ret = fpga_program_dma(priv);
-
- /* If DMA failed or doesn't exist, try with CPU */
- if (ret) {
- dev_warn(priv->dev, "Falling back to CPU programming\n");
- ret = fpga_program_cpu(priv);
- }
-
- if (ret) {
- dev_err(priv->dev, "Unable to program FPGA's\n");
- return ret;
- }
-
- /* Drop the firmware bitfile from memory */
- fpga_drop_firmware_data(priv);
-
- dev_dbg(priv->dev, "FPGA programming successful\n");
- ledtrig_fpga_programmed(true);
-
- return 0;
-}
-
-/*
- * File Operations
- */
-
-static int fpga_open(struct inode *inode, struct file *filp)
-{
- /*
- * The miscdevice layer puts our struct miscdevice into the
- * filp->private_data field. We use this to find our private
- * data and then overwrite it with our own private structure.
- */
- struct fpga_dev *priv = container_of(filp->private_data,
- struct fpga_dev, miscdev);
- unsigned int nr_pages;
- int ret;
-
- /* We only allow one process at a time */
- ret = mutex_lock_interruptible(&priv->lock);
- if (ret)
- return ret;
-
- filp->private_data = priv;
- kref_get(&priv->ref);
-
- /* Truncation: drop any existing data */
- if (filp->f_flags & O_TRUNC)
- priv->bytes = 0;
-
- /* Check if we have already allocated a buffer */
- if (priv->buf_allocated)
- return 0;
-
- /* Allocate a buffer to hold enough data for the bitfile */
- nr_pages = DIV_ROUND_UP(priv->fw_size, PAGE_SIZE);
- ret = fpga_dma_init(priv, nr_pages);
- if (ret) {
- dev_err(priv->dev, "unable to allocate data buffer\n");
- mutex_unlock(&priv->lock);
- kref_put(&priv->ref, fpga_dev_remove);
- return ret;
- }
-
- priv->buf_allocated = true;
- return 0;
-}
-
-static int fpga_release(struct inode *inode, struct file *filp)
-{
- struct fpga_dev *priv = filp->private_data;
-
- mutex_unlock(&priv->lock);
- kref_put(&priv->ref, fpga_dev_remove);
- return 0;
-}
-
-static ssize_t fpga_write(struct file *filp, const char __user *buf,
- size_t count, loff_t *f_pos)
-{
- struct fpga_dev *priv = filp->private_data;
-
- /* FPGA bitfiles have an exact size: disallow anything else */
- if (priv->bytes >= priv->fw_size)
- return -ENOSPC;
-
- count = min_t(size_t, priv->fw_size - priv->bytes, count);
- if (copy_from_user(priv->vaddr + priv->bytes, buf, count))
- return -EFAULT;
-
- priv->bytes += count;
- return count;
-}
-
-static ssize_t fpga_read(struct file *filp, char __user *buf, size_t count,
- loff_t *f_pos)
-{
- struct fpga_dev *priv = filp->private_data;
- return simple_read_from_buffer(buf, count, f_pos,
- priv->vaddr, priv->bytes);
-}
-
-static loff_t fpga_llseek(struct file *filp, loff_t offset, int origin)
-{
- struct fpga_dev *priv = filp->private_data;
-
- /* only read-only opens are allowed to seek */
- if ((filp->f_flags & O_ACCMODE) != O_RDONLY)
- return -EINVAL;
-
- return fixed_size_llseek(filp, offset, origin, priv->fw_size);
-}
-
-static const struct file_operations fpga_fops = {
- .open = fpga_open,
- .release = fpga_release,
- .write = fpga_write,
- .read = fpga_read,
- .llseek = fpga_llseek,
-};
-
-/*
- * Device Attributes
- */
-
-static ssize_t pfail_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct fpga_dev *priv = dev_get_drvdata(dev);
- u8 val;
-
- val = ioread8(priv->regs + CTL_PWR_FAIL);
- return snprintf(buf, PAGE_SIZE, "0x%.2x\n", val);
-}
-
-static ssize_t pgood_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct fpga_dev *priv = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%d\n", fpga_power_good(priv));
-}
-
-static ssize_t penable_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct fpga_dev *priv = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%d\n", fpga_power_enabled(priv));
-}
-
-static ssize_t penable_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct fpga_dev *priv = dev_get_drvdata(dev);
- unsigned long val;
- int ret;
-
- ret = kstrtoul(buf, 0, &val);
- if (ret)
- return ret;
-
- if (val) {
- ret = fpga_enable_power_supplies(priv);
- if (ret)
- return ret;
- } else {
- fpga_do_stop(priv);
- fpga_disable_power_supplies(priv);
- }
-
- return count;
-}
-
-static ssize_t program_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct fpga_dev *priv = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%d\n", fpga_running(priv));
-}
-
-static ssize_t program_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct fpga_dev *priv = dev_get_drvdata(dev);
- unsigned long val;
- int ret;
-
- ret = kstrtoul(buf, 0, &val);
- if (ret)
- return ret;
-
- /* We can't have an image writer and be programming simultaneously */
- if (mutex_lock_interruptible(&priv->lock))
- return -ERESTARTSYS;
-
- /* Program or Reset the FPGA's */
- ret = val ? fpga_do_program(priv) : fpga_do_stop(priv);
- if (ret)
- goto out_unlock;
-
- /* Success */
- ret = count;
-
-out_unlock:
- mutex_unlock(&priv->lock);
- return ret;
-}
-
-static DEVICE_ATTR(power_fail, S_IRUGO, pfail_show, NULL);
-static DEVICE_ATTR(power_good, S_IRUGO, pgood_show, NULL);
-static DEVICE_ATTR(power_enable, S_IRUGO | S_IWUSR,
- penable_show, penable_store);
-
-static DEVICE_ATTR(program, S_IRUGO | S_IWUSR,
- program_show, program_store);
-
-static struct attribute *fpga_attributes[] = {
- &dev_attr_power_fail.attr,
- &dev_attr_power_good.attr,
- &dev_attr_power_enable.attr,
- &dev_attr_program.attr,
- NULL,
-};
-
-static const struct attribute_group fpga_attr_group = {
- .attrs = fpga_attributes,
-};
-
-/*
- * OpenFirmware Device Subsystem
- */
-
-#define SYS_REG_VERSION 0x00
-#define SYS_REG_GEOGRAPHIC 0x10
-
-static bool dma_filter(struct dma_chan *chan, void *data)
-{
- /*
- * DMA Channel #0 is the only acceptable device
- *
- * This probably won't survive an unload/load cycle of the Freescale
- * DMAEngine driver, but that won't be a problem
- */
- return chan->chan_id == 0 && chan->device->dev_id == 0;
-}
-
-static int fpga_of_remove(struct platform_device *op)
-{
- struct fpga_dev *priv = platform_get_drvdata(op);
- struct device *this_device = priv->miscdev.this_device;
-
- sysfs_remove_group(&this_device->kobj, &fpga_attr_group);
- misc_deregister(&priv->miscdev);
-
- free_irq(priv->irq, priv);
- irq_dispose_mapping(priv->irq);
-
- /* make sure the power supplies are off */
- fpga_disable_power_supplies(priv);
-
- /* unmap registers */
- iounmap(priv->immr);
- iounmap(priv->regs);
-
- dma_release_channel(priv->chan);
-
- /* drop our reference to the private data structure */
- kref_put(&priv->ref, fpga_dev_remove);
- return 0;
-}
-
-/* CTL-CPLD Version Register */
-#define CTL_CPLD_VERSION 0x2000
-
-static int fpga_of_probe(struct platform_device *op)
-{
- struct device_node *of_node = op->dev.of_node;
- struct device *this_device;
- struct fpga_dev *priv;
- dma_cap_mask_t mask;
- u32 ver;
- int ret;
-
- /* Allocate private data */
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- dev_err(&op->dev, "Unable to allocate private data\n");
- ret = -ENOMEM;
- goto out_return;
- }
-
- /* Setup the miscdevice */
- priv->miscdev.minor = MISC_DYNAMIC_MINOR;
- priv->miscdev.name = drv_name;
- priv->miscdev.fops = &fpga_fops;
-
- kref_init(&priv->ref);
-
- platform_set_drvdata(op, priv);
- priv->dev = &op->dev;
- mutex_init(&priv->lock);
- init_completion(&priv->completion);
-
- dev_set_drvdata(priv->dev, priv);
- dma_cap_zero(mask);
- dma_cap_set(DMA_MEMCPY, mask);
- dma_cap_set(DMA_SLAVE, mask);
- dma_cap_set(DMA_SG, mask);
-
- /* Get control of DMA channel #0 */
- priv->chan = dma_request_channel(mask, dma_filter, NULL);
- if (!priv->chan) {
- dev_err(&op->dev, "Unable to acquire DMA channel #0\n");
- ret = -ENODEV;
- goto out_free_priv;
- }
-
- /* Remap the registers for use */
- priv->regs = of_iomap(of_node, 0);
- if (!priv->regs) {
- dev_err(&op->dev, "Unable to ioremap registers\n");
- ret = -ENOMEM;
- goto out_dma_release_channel;
- }
-
- /* Remap the IMMR for use */
- priv->immr = ioremap(get_immrbase(), 0x100000);
- if (!priv->immr) {
- dev_err(&op->dev, "Unable to ioremap IMMR\n");
- ret = -ENOMEM;
- goto out_unmap_regs;
- }
-
- /*
- * Check that external DMA is configured
- *
- * U-Boot does this for us, but we should check it and bail out if
- * there is a problem. Failing to have this register setup correctly
- * will cause the DMA controller to transfer a single cacheline
- * worth of data, then wedge itself.
- */
- if ((ioread32be(priv->immr + 0x114) & 0xE00) != 0xE00) {
- dev_err(&op->dev, "External DMA control not configured\n");
- ret = -ENODEV;
- goto out_unmap_immr;
- }
-
- /*
- * Check the CTL-CPLD version
- *
- * This driver uses the CTL-CPLD DATA-FPGA power sequencer, and we
- * don't want to run on any version of the CTL-CPLD that does not use
- * a compatible register layout.
- *
- * v2: changed register layout, added power sequencer
- * v3: added glitch filter on the i2c overcurrent/overtemp outputs
- */
- ver = ioread8(priv->regs + CTL_CPLD_VERSION);
- if (ver != 0x02 && ver != 0x03) {
- dev_err(&op->dev, "CTL-CPLD is not version 0x02 or 0x03!\n");
- ret = -ENODEV;
- goto out_unmap_immr;
- }
-
- /* Set the exact size that the firmware image should be */
- ver = ioread32be(priv->regs + SYS_REG_VERSION);
- priv->fw_size = (ver & (1 << 18)) ? FW_SIZE_EP2S130 : FW_SIZE_EP2S90;
-
- /* Find the correct IRQ number */
- priv->irq = irq_of_parse_and_map(of_node, 0);
- if (priv->irq == NO_IRQ) {
- dev_err(&op->dev, "Unable to find IRQ line\n");
- ret = -ENODEV;
- goto out_unmap_immr;
- }
-
- /* Request the IRQ */
- ret = request_irq(priv->irq, fpga_irq, IRQF_SHARED, drv_name, priv);
- if (ret) {
- dev_err(&op->dev, "Unable to request IRQ %d\n", priv->irq);
- ret = -ENODEV;
- goto out_irq_dispose_mapping;
- }
-
- /* Reset and stop the FPGA's, just in case */
- fpga_do_stop(priv);
-
- /* Register the miscdevice */
- ret = misc_register(&priv->miscdev);
- if (ret) {
- dev_err(&op->dev, "Unable to register miscdevice\n");
- goto out_free_irq;
- }
-
- /* Create the sysfs files */
- this_device = priv->miscdev.this_device;
- dev_set_drvdata(this_device, priv);
- ret = sysfs_create_group(&this_device->kobj, &fpga_attr_group);
- if (ret) {
- dev_err(&op->dev, "Unable to create sysfs files\n");
- goto out_misc_deregister;
- }
-
- dev_info(priv->dev, "CARMA FPGA Programmer: %s rev%s with %s FPGAs\n",
- (ver & (1 << 17)) ? "Correlator" : "Digitizer",
- (ver & (1 << 16)) ? "B" : "A",
- (ver & (1 << 18)) ? "EP2S130" : "EP2S90");
-
- return 0;
-
-out_misc_deregister:
- misc_deregister(&priv->miscdev);
-out_free_irq:
- free_irq(priv->irq, priv);
-out_irq_dispose_mapping:
- irq_dispose_mapping(priv->irq);
-out_unmap_immr:
- iounmap(priv->immr);
-out_unmap_regs:
- iounmap(priv->regs);
-out_dma_release_channel:
- dma_release_channel(priv->chan);
-out_free_priv:
- kref_put(&priv->ref, fpga_dev_remove);
-out_return:
- return ret;
-}
-
-static const struct of_device_id fpga_of_match[] = {
- { .compatible = "carma,fpga-programmer", },
- {},
-};
-
-static struct platform_driver fpga_of_driver = {
- .probe = fpga_of_probe,
- .remove = fpga_of_remove,
- .driver = {
- .name = drv_name,
- .of_match_table = fpga_of_match,
- },
-};
-
-/*
- * Module Init / Exit
- */
-
-static int __init fpga_init(void)
-{
- led_trigger_register_simple("fpga", &ledtrig_fpga);
- return platform_driver_register(&fpga_of_driver);
-}
-
-static void __exit fpga_exit(void)
-{
- platform_driver_unregister(&fpga_of_driver);
- led_trigger_unregister_simple(ledtrig_fpga);
-}
-
-MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>");
-MODULE_DESCRIPTION("CARMA Board DATA-FPGA Programmer");
-MODULE_LICENSE("GPL");
-
-module_init(fpga_init);
-module_exit(fpga_exit);
diff --git a/drivers/misc/carma/carma-fpga.c b/drivers/misc/carma/carma-fpga.c
deleted file mode 100644
index 5aba3fd789de..000000000000
--- a/drivers/misc/carma/carma-fpga.c
+++ /dev/null
@@ -1,1507 +0,0 @@
-/*
- * CARMA DATA-FPGA Access Driver
- *
- * Copyright (c) 2009-2011 Ira W. Snyder <iws@ovro.caltech.edu>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-/*
- * FPGA Memory Dump Format
- *
- * FPGA #0 control registers (32 x 32-bit words)
- * FPGA #1 control registers (32 x 32-bit words)
- * FPGA #2 control registers (32 x 32-bit words)
- * FPGA #3 control registers (32 x 32-bit words)
- * SYSFPGA control registers (32 x 32-bit words)
- * FPGA #0 correlation array (NUM_CORL0 correlation blocks)
- * FPGA #1 correlation array (NUM_CORL1 correlation blocks)
- * FPGA #2 correlation array (NUM_CORL2 correlation blocks)
- * FPGA #3 correlation array (NUM_CORL3 correlation blocks)
- *
- * Each correlation array consists of:
- *
- * Correlation Data (2 x NUM_LAGSn x 32-bit words)
- * Pipeline Metadata (2 x NUM_METAn x 32-bit words)
- * Quantization Counters (2 x NUM_QCNTn x 32-bit words)
- *
- * The NUM_CORLn, NUM_LAGSn, NUM_METAn, and NUM_QCNTn values come from
- * the FPGA configuration registers. They do not change once the FPGA's
- * have been programmed, they only change on re-programming.
- */
-
-/*
- * Basic Description:
- *
- * This driver is used to capture correlation spectra off of the four data
- * processing FPGAs. The FPGAs are often reprogrammed at runtime, therefore
- * this driver supports dynamic enable/disable of capture while the device
- * remains open.
- *
- * The nominal capture rate is 64Hz (every 15.625ms). To facilitate this fast
- * capture rate, all buffers are pre-allocated to avoid any potentially long
- * running memory allocations while capturing.
- *
- * There are two lists and one pointer which are used to keep track of the
- * different states of data buffers.
- *
- * 1) free list
- * This list holds all empty data buffers which are ready to receive data.
- *
- * 2) inflight pointer
- * This pointer holds the currently inflight data buffer. This buffer is having
- * data copied into it by the DMA engine.
- *
- * 3) used list
- * This list holds data buffers which have been filled, and are waiting to be
- * read by userspace.
- *
- * All buffers start life on the free list, then move successively to the
- * inflight pointer, and then to the used list. After they have been read by
- * userspace, they are moved back to the free list. The cycle repeats as long
- * as necessary.
- *
- * It should be noted that all buffers are mapped and ready for DMA when they
- * are on any of the three lists. They are only unmapped when they are in the
- * process of being read by userspace.
- */
-
-/*
- * Notes on the IRQ masking scheme:
- *
- * The IRQ masking scheme here is different than most other hardware. The only
- * way for the DATA-FPGAs to detect if the kernel has taken too long to copy
- * the data is if the status registers are not cleared before the next
- * correlation data dump is ready.
- *
- * The interrupt line is connected to the status registers, such that when they
- * are cleared, the interrupt is de-asserted. Therein lies our problem. We need
- * to schedule a long-running DMA operation and return from the interrupt
- * handler quickly, but we cannot clear the status registers.
- *
- * To handle this, the system controller FPGA has the capability to connect the
- * interrupt line to a user-controlled GPIO pin. This pin is driven high
- * (unasserted) and left that way. To mask the interrupt, we change the
- * interrupt source to the GPIO pin. Tada, we hid the interrupt. :)
- */
-
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/of_platform.h>
-#include <linux/dma-mapping.h>
-#include <linux/miscdevice.h>
-#include <linux/interrupt.h>
-#include <linux/dmaengine.h>
-#include <linux/seq_file.h>
-#include <linux/highmem.h>
-#include <linux/debugfs.h>
-#include <linux/vmalloc.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/poll.h>
-#include <linux/slab.h>
-#include <linux/kref.h>
-#include <linux/io.h>
-
-/* system controller registers */
-#define SYS_IRQ_SOURCE_CTL 0x24
-#define SYS_IRQ_OUTPUT_EN 0x28
-#define SYS_IRQ_OUTPUT_DATA 0x2C
-#define SYS_IRQ_INPUT_DATA 0x30
-#define SYS_FPGA_CONFIG_STATUS 0x44
-
-/* GPIO IRQ line assignment */
-#define IRQ_CORL_DONE 0x10
-
-/* FPGA registers */
-#define MMAP_REG_VERSION 0x00
-#define MMAP_REG_CORL_CONF1 0x08
-#define MMAP_REG_CORL_CONF2 0x0C
-#define MMAP_REG_STATUS 0x48
-
-#define SYS_FPGA_BLOCK 0xF0000000
-
-#define DATA_FPGA_START 0x400000
-#define DATA_FPGA_SIZE 0x80000
-
-static const char drv_name[] = "carma-fpga";
-
-#define NUM_FPGA 4
-
-#define MIN_DATA_BUFS 8
-#define MAX_DATA_BUFS 64
-
-struct fpga_info {
- unsigned int num_lag_ram;
- unsigned int blk_size;
-};
-
-struct data_buf {
- struct list_head entry;
- void *vaddr;
- struct scatterlist *sglist;
- int sglen;
- int nr_pages;
- size_t size;
-};
-
-struct fpga_device {
- /* character device */
- struct miscdevice miscdev;
- struct device *dev;
- struct mutex mutex;
-
- /* reference count */
- struct kref ref;
-
- /* FPGA registers and information */
- struct fpga_info info[NUM_FPGA];
- void __iomem *regs;
- int irq;
-
- /* FPGA Physical Address/Size Information */
- resource_size_t phys_addr;
- size_t phys_size;
-
- /* DMA structures */
- struct sg_table corl_table;
- unsigned int corl_nents;
- struct dma_chan *chan;
-
- /* Protection for all members below */
- spinlock_t lock;
-
- /* Device enable/disable flag */
- bool enabled;
-
- /* Correlation data buffers */
- wait_queue_head_t wait;
- struct list_head free;
- struct list_head used;
- struct data_buf *inflight;
-
- /* Information about data buffers */
- unsigned int num_dropped;
- unsigned int num_buffers;
- size_t bufsize;
- struct dentry *dbg_entry;
-};
-
-struct fpga_reader {
- struct fpga_device *priv;
- struct data_buf *buf;
- off_t buf_start;
-};
-
-static void fpga_device_release(struct kref *ref)
-{
- struct fpga_device *priv = container_of(ref, struct fpga_device, ref);
-
- /* the last reader has exited, cleanup the last bits */
- mutex_destroy(&priv->mutex);
- kfree(priv);
-}
-
-/*
- * Data Buffer Allocation Helpers
- */
-
-static int carma_dma_init(struct data_buf *buf, int nr_pages)
-{
- struct page *pg;
- int i;
-
- buf->vaddr = vmalloc_32(nr_pages << PAGE_SHIFT);
- if (NULL == buf->vaddr) {
- pr_debug("vmalloc_32(%d pages) failed\n", nr_pages);
- return -ENOMEM;
- }
-
- pr_debug("vmalloc is at addr 0x%08lx, size=%d\n",
- (unsigned long)buf->vaddr,
- nr_pages << PAGE_SHIFT);
-
- memset(buf->vaddr, 0, nr_pages << PAGE_SHIFT);
- buf->nr_pages = nr_pages;
-
- buf->sglist = vzalloc(buf->nr_pages * sizeof(*buf->sglist));
- if (NULL == buf->sglist)
- goto vzalloc_err;
-
- sg_init_table(buf->sglist, buf->nr_pages);
- for (i = 0; i < buf->nr_pages; i++) {
- pg = vmalloc_to_page(buf->vaddr + i * PAGE_SIZE);
- if (NULL == pg)
- goto vmalloc_to_page_err;
- sg_set_page(&buf->sglist[i], pg, PAGE_SIZE, 0);
- }
- return 0;
-
-vmalloc_to_page_err:
- vfree(buf->sglist);
- buf->sglist = NULL;
-vzalloc_err:
- vfree(buf->vaddr);
- buf->vaddr = NULL;
- return -ENOMEM;
-}
-
-static int carma_dma_map(struct device *dev, struct data_buf *buf)
-{
- buf->sglen = dma_map_sg(dev, buf->sglist,
- buf->nr_pages, DMA_FROM_DEVICE);
-
- if (0 == buf->sglen) {
- pr_warn("%s: dma_map_sg failed\n", __func__);
- return -ENOMEM;
- }
- return 0;
-}
-
-static int carma_dma_unmap(struct device *dev, struct data_buf *buf)
-{
- if (!buf->sglen)
- return 0;
-
- dma_unmap_sg(dev, buf->sglist, buf->sglen, DMA_FROM_DEVICE);
- buf->sglen = 0;
- return 0;
-}
-
-/**
- * data_free_buffer() - free a single data buffer and all allocated memory
- * @buf: the buffer to free
- *
- * This will free all of the pages allocated to the given data buffer, and
- * then free the structure itself
- */
-static void data_free_buffer(struct data_buf *buf)
-{
- /* It is ok to free a NULL buffer */
- if (!buf)
- return;
-
- /* free all memory */
- vfree(buf->sglist);
- vfree(buf->vaddr);
- kfree(buf);
-}
-
-/**
- * data_alloc_buffer() - allocate and fill a data buffer with pages
- * @bytes: the number of bytes required
- *
- * This allocates all space needed for a data buffer. It must be mapped before
- * use in a DMA transaction using carma_dma_map().
- *
- * Returns NULL on failure
- */
-static struct data_buf *data_alloc_buffer(const size_t bytes)
-{
- unsigned int nr_pages;
- struct data_buf *buf;
- int ret;
-
- /* calculate the number of pages necessary */
- nr_pages = DIV_ROUND_UP(bytes, PAGE_SIZE);
-
- /* allocate the buffer structure */
- buf = kzalloc(sizeof(*buf), GFP_KERNEL);
- if (!buf)
- goto out_return;
-
- /* initialize internal fields */
- INIT_LIST_HEAD(&buf->entry);
- buf->size = bytes;
-
- /* allocate the buffer */
- ret = carma_dma_init(buf, nr_pages);
- if (ret)
- goto out_free_buf;
-
- return buf;
-
-out_free_buf:
- kfree(buf);
-out_return:
- return NULL;
-}
-
-/**
- * data_free_buffers() - free all allocated buffers
- * @priv: the driver's private data structure
- *
- * Free all buffers allocated by the driver (except those currently in the
- * process of being read by userspace).
- *
- * LOCKING: must hold dev->mutex
- * CONTEXT: user
- */
-static void data_free_buffers(struct fpga_device *priv)
-{
- struct data_buf *buf, *tmp;
-
- /* the device should be stopped, no DMA in progress */
- BUG_ON(priv->inflight != NULL);
-
- list_for_each_entry_safe(buf, tmp, &priv->free, entry) {
- list_del_init(&buf->entry);
- carma_dma_unmap(priv->dev, buf);
- data_free_buffer(buf);
- }
-
- list_for_each_entry_safe(buf, tmp, &priv->used, entry) {
- list_del_init(&buf->entry);
- carma_dma_unmap(priv->dev, buf);
- data_free_buffer(buf);
- }
-
- priv->num_buffers = 0;
- priv->bufsize = 0;
-}
-
-/**
- * data_alloc_buffers() - allocate 1 seconds worth of data buffers
- * @priv: the driver's private data structure
- *
- * Allocate enough buffers for a whole second worth of data
- *
- * This routine will attempt to degrade nicely by succeeding even if a full
- * second worth of data buffers could not be allocated, as long as a minimum
- * number were allocated. In this case, it will print a message to the kernel
- * log.
- *
- * The device must not be modifying any lists when this is called.
- *
- * CONTEXT: user
- * LOCKING: must hold dev->mutex
- *
- * Returns 0 on success, -ERRNO otherwise
- */
-static int data_alloc_buffers(struct fpga_device *priv)
-{
- struct data_buf *buf;
- int i, ret;
-
- for (i = 0; i < MAX_DATA_BUFS; i++) {
-
- /* allocate a buffer */
- buf = data_alloc_buffer(priv->bufsize);
- if (!buf)
- break;
-
- /* map it for DMA */
- ret = carma_dma_map(priv->dev, buf);
- if (ret) {
- data_free_buffer(buf);
- break;
- }
-
- /* add it to the list of free buffers */
- list_add_tail(&buf->entry, &priv->free);
- priv->num_buffers++;
- }
-
- /* Make sure we allocated the minimum required number of buffers */
- if (priv->num_buffers < MIN_DATA_BUFS) {
- dev_err(priv->dev, "Unable to allocate enough data buffers\n");
- data_free_buffers(priv);
- return -ENOMEM;
- }
-
- /* Warn if we are running in a degraded state, but do not fail */
- if (priv->num_buffers < MAX_DATA_BUFS) {
- dev_warn(priv->dev,
- "Unable to allocate %d buffers, using %d buffers instead\n",
- MAX_DATA_BUFS, i);
- }
-
- return 0;
-}
-
-/*
- * DMA Operations Helpers
- */
-
-/**
- * fpga_start_addr() - get the physical address a DATA-FPGA
- * @priv: the driver's private data structure
- * @fpga: the DATA-FPGA number (zero based)
- */
-static dma_addr_t fpga_start_addr(struct fpga_device *priv, unsigned int fpga)
-{
- return priv->phys_addr + 0x400000 + (0x80000 * fpga);
-}
-
-/**
- * fpga_block_addr() - get the physical address of a correlation data block
- * @priv: the driver's private data structure
- * @fpga: the DATA-FPGA number (zero based)
- * @blknum: the correlation block number (zero based)
- */
-static dma_addr_t fpga_block_addr(struct fpga_device *priv, unsigned int fpga,
- unsigned int blknum)
-{
- return fpga_start_addr(priv, fpga) + (0x10000 * (1 + blknum));
-}
-
-#define REG_BLOCK_SIZE (32 * 4)
-
-/**
- * data_setup_corl_table() - create the scatterlist for correlation dumps
- * @priv: the driver's private data structure
- *
- * Create the scatterlist for transferring a correlation dump from the
- * DATA FPGAs. This structure will be reused for each buffer than needs
- * to be filled with correlation data.
- *
- * Returns 0 on success, -ERRNO otherwise
- */
-static int data_setup_corl_table(struct fpga_device *priv)
-{
- struct sg_table *table = &priv->corl_table;
- struct scatterlist *sg;
- struct fpga_info *info;
- int i, j, ret;
-
- /* Calculate the number of entries needed */
- priv->corl_nents = (1 + NUM_FPGA) * REG_BLOCK_SIZE;
- for (i = 0; i < NUM_FPGA; i++)
- priv->corl_nents += priv->info[i].num_lag_ram;
-
- /* Allocate the scatterlist table */
- ret = sg_alloc_table(table, priv->corl_nents, GFP_KERNEL);
- if (ret) {
- dev_err(priv->dev, "unable to allocate DMA table\n");
- return ret;
- }
-
- /* Add the DATA FPGA registers to the scatterlist */
- sg = table->sgl;
- for (i = 0; i < NUM_FPGA; i++) {
- sg_dma_address(sg) = fpga_start_addr(priv, i);
- sg_dma_len(sg) = REG_BLOCK_SIZE;
- sg = sg_next(sg);
- }
-
- /* Add the SYS-FPGA registers to the scatterlist */
- sg_dma_address(sg) = SYS_FPGA_BLOCK;
- sg_dma_len(sg) = REG_BLOCK_SIZE;
- sg = sg_next(sg);
-
- /* Add the FPGA correlation data blocks to the scatterlist */
- for (i = 0; i < NUM_FPGA; i++) {
- info = &priv->info[i];
- for (j = 0; j < info->num_lag_ram; j++) {
- sg_dma_address(sg) = fpga_block_addr(priv, i, j);
- sg_dma_len(sg) = info->blk_size;
- sg = sg_next(sg);
- }
- }
-
- /*
- * All physical addresses and lengths are present in the structure
- * now. It can be reused for every FPGA DATA interrupt
- */
- return 0;
-}
-
-/*
- * FPGA Register Access Helpers
- */
-
-static void fpga_write_reg(struct fpga_device *priv, unsigned int fpga,
- unsigned int reg, u32 val)
-{
- const int fpga_start = DATA_FPGA_START + (fpga * DATA_FPGA_SIZE);
- iowrite32be(val, priv->regs + fpga_start + reg);
-}
-
-static u32 fpga_read_reg(struct fpga_device *priv, unsigned int fpga,
- unsigned int reg)
-{
- const int fpga_start = DATA_FPGA_START + (fpga * DATA_FPGA_SIZE);
- return ioread32be(priv->regs + fpga_start + reg);
-}
-
-/**
- * data_calculate_bufsize() - calculate the data buffer size required
- * @priv: the driver's private data structure
- *
- * Calculate the total buffer size needed to hold a single block
- * of correlation data
- *
- * CONTEXT: user
- *
- * Returns 0 on success, -ERRNO otherwise
- */
-static int data_calculate_bufsize(struct fpga_device *priv)
-{
- u32 num_corl, num_lags, num_meta, num_qcnt, num_pack;
- u32 conf1, conf2, version;
- u32 num_lag_ram, blk_size;
- int i;
-
- /* Each buffer starts with the 5 FPGA register areas */
- priv->bufsize = (1 + NUM_FPGA) * REG_BLOCK_SIZE;
-
- /* Read and store the configuration data for each FPGA */
- for (i = 0; i < NUM_FPGA; i++) {
- version = fpga_read_reg(priv, i, MMAP_REG_VERSION);
- conf1 = fpga_read_reg(priv, i, MMAP_REG_CORL_CONF1);
- conf2 = fpga_read_reg(priv, i, MMAP_REG_CORL_CONF2);
-
- /* minor version 2 and later */
- if ((version & 0x000000FF) >= 2) {
- num_corl = (conf1 & 0x000000F0) >> 4;
- num_pack = (conf1 & 0x00000F00) >> 8;
- num_lags = (conf1 & 0x00FFF000) >> 12;
- num_meta = (conf1 & 0x7F000000) >> 24;
- num_qcnt = (conf2 & 0x00000FFF) >> 0;
- } else {
- num_corl = (conf1 & 0x000000F0) >> 4;
- num_pack = 1; /* implied */
- num_lags = (conf1 & 0x000FFF00) >> 8;
- num_meta = (conf1 & 0x7FF00000) >> 20;
- num_qcnt = (conf2 & 0x00000FFF) >> 0;
- }
-
- num_lag_ram = (num_corl + num_pack - 1) / num_pack;
- blk_size = ((num_pack * num_lags) + num_meta + num_qcnt) * 8;
-
- priv->info[i].num_lag_ram = num_lag_ram;
- priv->info[i].blk_size = blk_size;
- priv->bufsize += num_lag_ram * blk_size;
-
- dev_dbg(priv->dev, "FPGA %d NUM_CORL: %d\n", i, num_corl);
- dev_dbg(priv->dev, "FPGA %d NUM_PACK: %d\n", i, num_pack);
- dev_dbg(priv->dev, "FPGA %d NUM_LAGS: %d\n", i, num_lags);
- dev_dbg(priv->dev, "FPGA %d NUM_META: %d\n", i, num_meta);
- dev_dbg(priv->dev, "FPGA %d NUM_QCNT: %d\n", i, num_qcnt);
- dev_dbg(priv->dev, "FPGA %d BLK_SIZE: %d\n", i, blk_size);
- }
-
- dev_dbg(priv->dev, "TOTAL BUFFER SIZE: %zu bytes\n", priv->bufsize);
- return 0;
-}
-
-/*
- * Interrupt Handling
- */
-
-/**
- * data_disable_interrupts() - stop the device from generating interrupts
- * @priv: the driver's private data structure
- *
- * Hide interrupts by switching to GPIO interrupt source
- *
- * LOCKING: must hold dev->lock
- */
-static void data_disable_interrupts(struct fpga_device *priv)
-{
- /* hide the interrupt by switching the IRQ driver to GPIO */
- iowrite32be(0x2F, priv->regs + SYS_IRQ_SOURCE_CTL);
-}
-
-/**
- * data_enable_interrupts() - allow the device to generate interrupts
- * @priv: the driver's private data structure
- *
- * Unhide interrupts by switching to the FPGA interrupt source. At the
- * same time, clear the DATA-FPGA status registers.
- *
- * LOCKING: must hold dev->lock
- */
-static void data_enable_interrupts(struct fpga_device *priv)
-{
- /* clear the actual FPGA corl_done interrupt */
- fpga_write_reg(priv, 0, MMAP_REG_STATUS, 0x0);
- fpga_write_reg(priv, 1, MMAP_REG_STATUS, 0x0);
- fpga_write_reg(priv, 2, MMAP_REG_STATUS, 0x0);
- fpga_write_reg(priv, 3, MMAP_REG_STATUS, 0x0);
-
- /* flush the writes */
- fpga_read_reg(priv, 0, MMAP_REG_STATUS);
- fpga_read_reg(priv, 1, MMAP_REG_STATUS);
- fpga_read_reg(priv, 2, MMAP_REG_STATUS);
- fpga_read_reg(priv, 3, MMAP_REG_STATUS);
-
- /* switch back to the external interrupt source */
- iowrite32be(0x3F, priv->regs + SYS_IRQ_SOURCE_CTL);
-}
-
-/**
- * data_dma_cb() - DMAEngine callback for DMA completion
- * @data: the driver's private data structure
- *
- * Complete a DMA transfer from the DATA-FPGA's
- *
- * This is called via the DMA callback mechanism, and will handle moving the
- * completed DMA transaction to the used list, and then wake any processes
- * waiting for new data
- *
- * CONTEXT: any, softirq expected
- */
-static void data_dma_cb(void *data)
-{
- struct fpga_device *priv = data;
- unsigned long flags;
-
- spin_lock_irqsave(&priv->lock, flags);
-
- /* If there is no inflight buffer, we've got a bug */
- BUG_ON(priv->inflight == NULL);
-
- /* Move the inflight buffer onto the used list */
- list_move_tail(&priv->inflight->entry, &priv->used);
- priv->inflight = NULL;
-
- /*
- * If data dumping is still enabled, then clear the FPGA
- * status registers and re-enable FPGA interrupts
- */
- if (priv->enabled)
- data_enable_interrupts(priv);
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- /*
- * We've changed both the inflight and used lists, so we need
- * to wake up any processes that are blocking for those events
- */
- wake_up(&priv->wait);
-}
-
-/**
- * data_submit_dma() - prepare and submit the required DMA to fill a buffer
- * @priv: the driver's private data structure
- * @buf: the data buffer
- *
- * Prepare and submit the necessary DMA transactions to fill a correlation
- * data buffer.
- *
- * LOCKING: must hold dev->lock
- * CONTEXT: hardirq only
- *
- * Returns 0 on success, -ERRNO otherwise
- */
-static int data_submit_dma(struct fpga_device *priv, struct data_buf *buf)
-{
- struct scatterlist *dst_sg, *src_sg;
- unsigned int dst_nents, src_nents;
- struct dma_chan *chan = priv->chan;
- struct dma_async_tx_descriptor *tx;
- dma_cookie_t cookie;
- dma_addr_t dst, src;
- unsigned long dma_flags = 0;
-
- dst_sg = buf->sglist;
- dst_nents = buf->sglen;
-
- src_sg = priv->corl_table.sgl;
- src_nents = priv->corl_nents;
-
- /*
- * All buffers passed to this function should be ready and mapped
- * for DMA already. Therefore, we don't need to do anything except
- * submit it to the Freescale DMA Engine for processing
- */
-
- /* setup the scatterlist to scatterlist transfer */
- tx = chan->device->device_prep_dma_sg(chan,
- dst_sg, dst_nents,
- src_sg, src_nents,
- 0);
- if (!tx) {
- dev_err(priv->dev, "unable to prep scatterlist DMA\n");
- return -ENOMEM;
- }
-
- /* submit the transaction to the DMA controller */
- cookie = tx->tx_submit(tx);
- if (dma_submit_error(cookie)) {
- dev_err(priv->dev, "unable to submit scatterlist DMA\n");
- return -ENOMEM;
- }
-
- /* Prepare the re-read of the SYS-FPGA block */
- dst = sg_dma_address(dst_sg) + (NUM_FPGA * REG_BLOCK_SIZE);
- src = SYS_FPGA_BLOCK;
- tx = chan->device->device_prep_dma_memcpy(chan, dst, src,
- REG_BLOCK_SIZE,
- dma_flags);
- if (!tx) {
- dev_err(priv->dev, "unable to prep SYS-FPGA DMA\n");
- return -ENOMEM;
- }
-
- /* Setup the callback */
- tx->callback = data_dma_cb;
- tx->callback_param = priv;
-
- /* submit the transaction to the DMA controller */
- cookie = tx->tx_submit(tx);
- if (dma_submit_error(cookie)) {
- dev_err(priv->dev, "unable to submit SYS-FPGA DMA\n");
- return -ENOMEM;
- }
-
- return 0;
-}
-
-#define CORL_DONE 0x1
-#define CORL_ERR 0x2
-
-static irqreturn_t data_irq(int irq, void *dev_id)
-{
- struct fpga_device *priv = dev_id;
- bool submitted = false;
- struct data_buf *buf;
- u32 status;
- int i;
-
- /* detect spurious interrupts via FPGA status */
- for (i = 0; i < 4; i++) {
- status = fpga_read_reg(priv, i, MMAP_REG_STATUS);
- if (!(status & (CORL_DONE | CORL_ERR))) {
- dev_err(priv->dev, "spurious irq detected (FPGA)\n");
- return IRQ_NONE;
- }
- }
-
- /* detect spurious interrupts via raw IRQ pin readback */
- status = ioread32be(priv->regs + SYS_IRQ_INPUT_DATA);
- if (status & IRQ_CORL_DONE) {
- dev_err(priv->dev, "spurious irq detected (IRQ)\n");
- return IRQ_NONE;
- }
-
- spin_lock(&priv->lock);
-
- /*
- * This is an error case that should never happen.
- *
- * If this driver has a bug and manages to re-enable interrupts while
- * a DMA is in progress, then we will hit this statement and should
- * start paying attention immediately.
- */
- BUG_ON(priv->inflight != NULL);
-
- /* hide the interrupt by switching the IRQ driver to GPIO */
- data_disable_interrupts(priv);
-
- /* If there are no free buffers, drop this data */
- if (list_empty(&priv->free)) {
- priv->num_dropped++;
- goto out;
- }
-
- buf = list_first_entry(&priv->free, struct data_buf, entry);
- list_del_init(&buf->entry);
- BUG_ON(buf->size != priv->bufsize);
-
- /* Submit a DMA transfer to get the correlation data */
- if (data_submit_dma(priv, buf)) {
- dev_err(priv->dev, "Unable to setup DMA transfer\n");
- list_move_tail(&buf->entry, &priv->free);
- goto out;
- }
-
- /* Save the buffer for the DMA callback */
- priv->inflight = buf;
- submitted = true;
-
- /* Start the DMA Engine */
- dma_async_issue_pending(priv->chan);
-
-out:
- /* If no DMA was submitted, re-enable interrupts */
- if (!submitted)
- data_enable_interrupts(priv);
-
- spin_unlock(&priv->lock);
- return IRQ_HANDLED;
-}
-
-/*
- * Realtime Device Enable Helpers
- */
-
-/**
- * data_device_enable() - enable the device for buffered dumping
- * @priv: the driver's private data structure
- *
- * Enable the device for buffered dumping. Allocates buffers and hooks up
- * the interrupt handler. When this finishes, data will come pouring in.
- *
- * LOCKING: must hold dev->mutex
- * CONTEXT: user context only
- *
- * Returns 0 on success, -ERRNO otherwise
- */
-static int data_device_enable(struct fpga_device *priv)
-{
- bool enabled;
- u32 val;
- int ret;
-
- /* multiple enables are safe: they do nothing */
- spin_lock_irq(&priv->lock);
- enabled = priv->enabled;
- spin_unlock_irq(&priv->lock);
- if (enabled)
- return 0;
-
- /* check that the FPGAs are programmed */
- val = ioread32be(priv->regs + SYS_FPGA_CONFIG_STATUS);
- if (!(val & (1 << 18))) {
- dev_err(priv->dev, "DATA-FPGAs are not enabled\n");
- return -ENODATA;
- }
-
- /* read the FPGAs to calculate the buffer size */
- ret = data_calculate_bufsize(priv);
- if (ret) {
- dev_err(priv->dev, "unable to calculate buffer size\n");
- goto out_error;
- }
-
- /* allocate the correlation data buffers */
- ret = data_alloc_buffers(priv);
- if (ret) {
- dev_err(priv->dev, "unable to allocate buffers\n");
- goto out_error;
- }
-
- /* setup the source scatterlist for dumping correlation data */
- ret = data_setup_corl_table(priv);
- if (ret) {
- dev_err(priv->dev, "unable to setup correlation DMA table\n");
- goto out_error;
- }
-
- /* prevent the FPGAs from generating interrupts */
- data_disable_interrupts(priv);
-
- /* hookup the irq handler */
- ret = request_irq(priv->irq, data_irq, IRQF_SHARED, drv_name, priv);
- if (ret) {
- dev_err(priv->dev, "unable to request IRQ handler\n");
- goto out_error;
- }
-
- /* allow the DMA callback to re-enable FPGA interrupts */
- spin_lock_irq(&priv->lock);
- priv->enabled = true;
- spin_unlock_irq(&priv->lock);
-
- /* allow the FPGAs to generate interrupts */
- data_enable_interrupts(priv);
- return 0;
-
-out_error:
- sg_free_table(&priv->corl_table);
- priv->corl_nents = 0;
-
- data_free_buffers(priv);
- return ret;
-}
-
-/**
- * data_device_disable() - disable the device for buffered dumping
- * @priv: the driver's private data structure
- *
- * Disable the device for buffered dumping. Stops new DMA transactions from
- * being generated, waits for all outstanding DMA to complete, and then frees
- * all buffers.
- *
- * LOCKING: must hold dev->mutex
- * CONTEXT: user only
- *
- * Returns 0 on success, -ERRNO otherwise
- */
-static int data_device_disable(struct fpga_device *priv)
-{
- spin_lock_irq(&priv->lock);
-
- /* allow multiple disable */
- if (!priv->enabled) {
- spin_unlock_irq(&priv->lock);
- return 0;
- }
-
- /*
- * Mark the device disabled
- *
- * This stops DMA callbacks from re-enabling interrupts
- */
- priv->enabled = false;
-
- /* prevent the FPGAs from generating interrupts */
- data_disable_interrupts(priv);
-
- /* wait until all ongoing DMA has finished */
- while (priv->inflight != NULL) {
- spin_unlock_irq(&priv->lock);
- wait_event(priv->wait, priv->inflight == NULL);
- spin_lock_irq(&priv->lock);
- }
-
- spin_unlock_irq(&priv->lock);
-
- /* unhook the irq handler */
- free_irq(priv->irq, priv);
-
- /* free the correlation table */
- sg_free_table(&priv->corl_table);
- priv->corl_nents = 0;
-
- /* free all buffers: the free and used lists are not being changed */
- data_free_buffers(priv);
- return 0;
-}
-
-/*
- * DEBUGFS Interface
- */
-#ifdef CONFIG_DEBUG_FS
-
-/*
- * Count the number of entries in the given list
- */
-static unsigned int list_num_entries(struct list_head *list)
-{
- struct list_head *entry;
- unsigned int ret = 0;
-
- list_for_each(entry, list)
- ret++;
-
- return ret;
-}
-
-static int data_debug_show(struct seq_file *f, void *offset)
-{
- struct fpga_device *priv = f->private;
-
- spin_lock_irq(&priv->lock);
-
- seq_printf(f, "enabled: %d\n", priv->enabled);
- seq_printf(f, "bufsize: %d\n", priv->bufsize);
- seq_printf(f, "num_buffers: %d\n", priv->num_buffers);
- seq_printf(f, "num_free: %d\n", list_num_entries(&priv->free));
- seq_printf(f, "inflight: %d\n", priv->inflight != NULL);
- seq_printf(f, "num_used: %d\n", list_num_entries(&priv->used));
- seq_printf(f, "num_dropped: %d\n", priv->num_dropped);
-
- spin_unlock_irq(&priv->lock);
- return 0;
-}
-
-static int data_debug_open(struct inode *inode, struct file *file)
-{
- return single_open(file, data_debug_show, inode->i_private);
-}
-
-static const struct file_operations data_debug_fops = {
- .owner = THIS_MODULE,
- .open = data_debug_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int data_debugfs_init(struct fpga_device *priv)
-{
- priv->dbg_entry = debugfs_create_file(drv_name, S_IRUGO, NULL, priv,
- &data_debug_fops);
- return PTR_ERR_OR_ZERO(priv->dbg_entry);
-}
-
-static void data_debugfs_exit(struct fpga_device *priv)
-{
- debugfs_remove(priv->dbg_entry);
-}
-
-#else
-
-static inline int data_debugfs_init(struct fpga_device *priv)
-{
- return 0;
-}
-
-static inline void data_debugfs_exit(struct fpga_device *priv)
-{
-}
-
-#endif /* CONFIG_DEBUG_FS */
-
-/*
- * SYSFS Attributes
- */
-
-static ssize_t data_en_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct fpga_device *priv = dev_get_drvdata(dev);
- int ret;
-
- spin_lock_irq(&priv->lock);
- ret = snprintf(buf, PAGE_SIZE, "%u\n", priv->enabled);
- spin_unlock_irq(&priv->lock);
-
- return ret;
-}
-
-static ssize_t data_en_set(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct fpga_device *priv = dev_get_drvdata(dev);
- unsigned long enable;
- int ret;
-
- ret = kstrtoul(buf, 0, &enable);
- if (ret) {
- dev_err(priv->dev, "unable to parse enable input\n");
- return ret;
- }
-
- /* protect against concurrent enable/disable */
- ret = mutex_lock_interruptible(&priv->mutex);
- if (ret)
- return ret;
-
- if (enable)
- ret = data_device_enable(priv);
- else
- ret = data_device_disable(priv);
-
- if (ret) {
- dev_err(priv->dev, "device %s failed\n",
- enable ? "enable" : "disable");
- count = ret;
- goto out_unlock;
- }
-
-out_unlock:
- mutex_unlock(&priv->mutex);
- return count;
-}
-
-static DEVICE_ATTR(enable, S_IWUSR | S_IRUGO, data_en_show, data_en_set);
-
-static struct attribute *data_sysfs_attrs[] = {
- &dev_attr_enable.attr,
- NULL,
-};
-
-static const struct attribute_group rt_sysfs_attr_group = {
- .attrs = data_sysfs_attrs,
-};
-
-/*
- * FPGA Realtime Data Character Device
- */
-
-static int data_open(struct inode *inode, struct file *filp)
-{
- /*
- * The miscdevice layer puts our struct miscdevice into the
- * filp->private_data field. We use this to find our private
- * data and then overwrite it with our own private structure.
- */
- struct fpga_device *priv = container_of(filp->private_data,
- struct fpga_device, miscdev);
- struct fpga_reader *reader;
- int ret;
-
- /* allocate private data */
- reader = kzalloc(sizeof(*reader), GFP_KERNEL);
- if (!reader)
- return -ENOMEM;
-
- reader->priv = priv;
- reader->buf = NULL;
-
- filp->private_data = reader;
- ret = nonseekable_open(inode, filp);
- if (ret) {
- dev_err(priv->dev, "nonseekable-open failed\n");
- kfree(reader);
- return ret;
- }
-
- /*
- * success, increase the reference count of the private data structure
- * so that it doesn't disappear if the device is unbound
- */
- kref_get(&priv->ref);
- return 0;
-}
-
-static int data_release(struct inode *inode, struct file *filp)
-{
- struct fpga_reader *reader = filp->private_data;
- struct fpga_device *priv = reader->priv;
-
- /* free the per-reader structure */
- data_free_buffer(reader->buf);
- kfree(reader);
- filp->private_data = NULL;
-
- /* decrement our reference count to the private data */
- kref_put(&priv->ref, fpga_device_release);
- return 0;
-}
-
-static ssize_t data_read(struct file *filp, char __user *ubuf, size_t count,
- loff_t *f_pos)
-{
- struct fpga_reader *reader = filp->private_data;
- struct fpga_device *priv = reader->priv;
- struct list_head *used = &priv->used;
- bool drop_buffer = false;
- struct data_buf *dbuf;
- size_t avail;
- void *data;
- int ret;
-
- /* check if we already have a partial buffer */
- if (reader->buf) {
- dbuf = reader->buf;
- goto have_buffer;
- }
-
- spin_lock_irq(&priv->lock);
-
- /* Block until there is at least one buffer on the used list */
- while (list_empty(used)) {
- spin_unlock_irq(&priv->lock);
-
- if (filp->f_flags & O_NONBLOCK)
- return -EAGAIN;
-
- ret = wait_event_interruptible(priv->wait, !list_empty(used));
- if (ret)
- return ret;
-
- spin_lock_irq(&priv->lock);
- }
-
- /* Grab the first buffer off of the used list */
- dbuf = list_first_entry(used, struct data_buf, entry);
- list_del_init(&dbuf->entry);
-
- spin_unlock_irq(&priv->lock);
-
- /* Buffers are always mapped: unmap it */
- carma_dma_unmap(priv->dev, dbuf);
-
- /* save the buffer for later */
- reader->buf = dbuf;
- reader->buf_start = 0;
-
-have_buffer:
- /* Get the number of bytes available */
- avail = dbuf->size - reader->buf_start;
- data = dbuf->vaddr + reader->buf_start;
-
- /* Get the number of bytes we can transfer */
- count = min(count, avail);
-
- /* Copy the data to the userspace buffer */
- if (copy_to_user(ubuf, data, count))
- return -EFAULT;
-
- /* Update the amount of available space */
- avail -= count;
-
- /*
- * If there is still some data available, save the buffer for the
- * next userspace call to read() and return
- */
- if (avail > 0) {
- reader->buf_start += count;
- reader->buf = dbuf;
- return count;
- }
-
- /*
- * Get the buffer ready to be reused for DMA
- *
- * If it fails, we pretend that the read never happed and return
- * -EFAULT to userspace. The read will be retried.
- */
- ret = carma_dma_map(priv->dev, dbuf);
- if (ret) {
- dev_err(priv->dev, "unable to remap buffer for DMA\n");
- return -EFAULT;
- }
-
- /* Lock against concurrent enable/disable */
- spin_lock_irq(&priv->lock);
-
- /* the reader is finished with this buffer */
- reader->buf = NULL;
-
- /*
- * One of two things has happened, the device is disabled, or the
- * device has been reconfigured underneath us. In either case, we
- * should just throw away the buffer.
- *
- * Lockdep complains if this is done under the spinlock, so we
- * handle it during the unlock path.
- */
- if (!priv->enabled || dbuf->size != priv->bufsize) {
- drop_buffer = true;
- goto out_unlock;
- }
-
- /* The buffer is safe to reuse, so add it back to the free list */
- list_add_tail(&dbuf->entry, &priv->free);
-
-out_unlock:
- spin_unlock_irq(&priv->lock);
-
- if (drop_buffer) {
- carma_dma_unmap(priv->dev, dbuf);
- data_free_buffer(dbuf);
- }
-
- return count;
-}
-
-static unsigned int data_poll(struct file *filp, struct poll_table_struct *tbl)
-{
- struct fpga_reader *reader = filp->private_data;
- struct fpga_device *priv = reader->priv;
- unsigned int mask = 0;
-
- poll_wait(filp, &priv->wait, tbl);
-
- if (!list_empty(&priv->used))
- mask |= POLLIN | POLLRDNORM;
-
- return mask;
-}
-
-static int data_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- struct fpga_reader *reader = filp->private_data;
- struct fpga_device *priv = reader->priv;
- unsigned long offset, vsize, psize, addr;
-
- /* VMA properties */
- offset = vma->vm_pgoff << PAGE_SHIFT;
- vsize = vma->vm_end - vma->vm_start;
- psize = priv->phys_size - offset;
- addr = (priv->phys_addr + offset) >> PAGE_SHIFT;
-
- /* Check against the FPGA region's physical memory size */
- if (vsize > psize) {
- dev_err(priv->dev, "requested mmap mapping too large\n");
- return -EINVAL;
- }
-
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-
- return io_remap_pfn_range(vma, vma->vm_start, addr, vsize,
- vma->vm_page_prot);
-}
-
-static const struct file_operations data_fops = {
- .owner = THIS_MODULE,
- .open = data_open,
- .release = data_release,
- .read = data_read,
- .poll = data_poll,
- .mmap = data_mmap,
- .llseek = no_llseek,
-};
-
-/*
- * OpenFirmware Device Subsystem
- */
-
-static bool dma_filter(struct dma_chan *chan, void *data)
-{
- /*
- * DMA Channel #0 is used for the FPGA Programmer, so ignore it
- *
- * This probably won't survive an unload/load cycle of the Freescale
- * DMAEngine driver, but that won't be a problem
- */
- if (chan->chan_id == 0 && chan->device->dev_id == 0)
- return false;
-
- return true;
-}
-
-static int data_of_probe(struct platform_device *op)
-{
- struct device_node *of_node = op->dev.of_node;
- struct device *this_device;
- struct fpga_device *priv;
- struct resource res;
- dma_cap_mask_t mask;
- int ret;
-
- /* Allocate private data */
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- dev_err(&op->dev, "Unable to allocate device private data\n");
- ret = -ENOMEM;
- goto out_return;
- }
-
- platform_set_drvdata(op, priv);
- priv->dev = &op->dev;
- kref_init(&priv->ref);
- mutex_init(&priv->mutex);
-
- dev_set_drvdata(priv->dev, priv);
- spin_lock_init(&priv->lock);
- INIT_LIST_HEAD(&priv->free);
- INIT_LIST_HEAD(&priv->used);
- init_waitqueue_head(&priv->wait);
-
- /* Setup the misc device */
- priv->miscdev.minor = MISC_DYNAMIC_MINOR;
- priv->miscdev.name = drv_name;
- priv->miscdev.fops = &data_fops;
-
- /* Get the physical address of the FPGA registers */
- ret = of_address_to_resource(of_node, 0, &res);
- if (ret) {
- dev_err(&op->dev, "Unable to find FPGA physical address\n");
- ret = -ENODEV;
- goto out_free_priv;
- }
-
- priv->phys_addr = res.start;
- priv->phys_size = resource_size(&res);
-
- /* ioremap the registers for use */
- priv->regs = of_iomap(of_node, 0);
- if (!priv->regs) {
- dev_err(&op->dev, "Unable to ioremap registers\n");
- ret = -ENOMEM;
- goto out_free_priv;
- }
-
- dma_cap_zero(mask);
- dma_cap_set(DMA_MEMCPY, mask);
- dma_cap_set(DMA_INTERRUPT, mask);
- dma_cap_set(DMA_SLAVE, mask);
- dma_cap_set(DMA_SG, mask);
-
- /* Request a DMA channel */
- priv->chan = dma_request_channel(mask, dma_filter, NULL);
- if (!priv->chan) {
- dev_err(&op->dev, "Unable to request DMA channel\n");
- ret = -ENODEV;
- goto out_unmap_regs;
- }
-
- /* Find the correct IRQ number */
- priv->irq = irq_of_parse_and_map(of_node, 0);
- if (priv->irq == NO_IRQ) {
- dev_err(&op->dev, "Unable to find IRQ line\n");
- ret = -ENODEV;
- goto out_release_dma;
- }
-
- /* Drive the GPIO for FPGA IRQ high (no interrupt) */
- iowrite32be(IRQ_CORL_DONE, priv->regs + SYS_IRQ_OUTPUT_DATA);
-
- /* Register the miscdevice */
- ret = misc_register(&priv->miscdev);
- if (ret) {
- dev_err(&op->dev, "Unable to register miscdevice\n");
- goto out_irq_dispose_mapping;
- }
-
- /* Create the debugfs files */
- ret = data_debugfs_init(priv);
- if (ret) {
- dev_err(&op->dev, "Unable to create debugfs files\n");
- goto out_misc_deregister;
- }
-
- /* Create the sysfs files */
- this_device = priv->miscdev.this_device;
- dev_set_drvdata(this_device, priv);
- ret = sysfs_create_group(&this_device->kobj, &rt_sysfs_attr_group);
- if (ret) {
- dev_err(&op->dev, "Unable to create sysfs files\n");
- goto out_data_debugfs_exit;
- }
-
- dev_info(&op->dev, "CARMA FPGA Realtime Data Driver Loaded\n");
- return 0;
-
-out_data_debugfs_exit:
- data_debugfs_exit(priv);
-out_misc_deregister:
- misc_deregister(&priv->miscdev);
-out_irq_dispose_mapping:
- irq_dispose_mapping(priv->irq);
-out_release_dma:
- dma_release_channel(priv->chan);
-out_unmap_regs:
- iounmap(priv->regs);
-out_free_priv:
- kref_put(&priv->ref, fpga_device_release);
-out_return:
- return ret;
-}
-
-static int data_of_remove(struct platform_device *op)
-{
- struct fpga_device *priv = platform_get_drvdata(op);
- struct device *this_device = priv->miscdev.this_device;
-
- /* remove all sysfs files, now the device cannot be re-enabled */
- sysfs_remove_group(&this_device->kobj, &rt_sysfs_attr_group);
-
- /* remove all debugfs files */
- data_debugfs_exit(priv);
-
- /* disable the device from generating data */
- data_device_disable(priv);
-
- /* remove the character device to stop new readers from appearing */
- misc_deregister(&priv->miscdev);
-
- /* cleanup everything not needed by readers */
- irq_dispose_mapping(priv->irq);
- dma_release_channel(priv->chan);
- iounmap(priv->regs);
-
- /* release our reference */
- kref_put(&priv->ref, fpga_device_release);
- return 0;
-}
-
-static const struct of_device_id data_of_match[] = {
- { .compatible = "carma,carma-fpga", },
- {},
-};
-
-static struct platform_driver data_of_driver = {
- .probe = data_of_probe,
- .remove = data_of_remove,
- .driver = {
- .name = drv_name,
- .of_match_table = data_of_match,
- },
-};
-
-module_platform_driver(data_of_driver);
-
-MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>");
-MODULE_DESCRIPTION("CARMA DATA-FPGA Access Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/misc/mei/amthif.c b/drivers/misc/mei/amthif.c
index d2cd53e3fac3..1e42781592d8 100644
--- a/drivers/misc/mei/amthif.c
+++ b/drivers/misc/mei/amthif.c
@@ -59,46 +59,29 @@ void mei_amthif_reset_params(struct mei_device *dev)
* mei_amthif_host_init - mei initialization amthif client.
*
* @dev: the device structure
+ * @me_cl: me client
*
* Return: 0 on success, <0 on failure.
*/
-int mei_amthif_host_init(struct mei_device *dev)
+int mei_amthif_host_init(struct mei_device *dev, struct mei_me_client *me_cl)
{
struct mei_cl *cl = &dev->iamthif_cl;
- struct mei_me_client *me_cl;
int ret;
dev->iamthif_state = MEI_IAMTHIF_IDLE;
mei_cl_init(cl, dev);
- me_cl = mei_me_cl_by_uuid(dev, &mei_amthif_guid);
- if (!me_cl) {
- dev_info(dev->dev, "amthif: failed to find the client");
- return -ENOTTY;
- }
-
- cl->me_client_id = me_cl->client_id;
- cl->cl_uuid = me_cl->props.protocol_name;
-
- /* Assign iamthif_mtu to the value received from ME */
-
- dev->iamthif_mtu = me_cl->props.max_msg_length;
- dev_dbg(dev->dev, "IAMTHIF_MTU = %d\n", dev->iamthif_mtu);
-
-
ret = mei_cl_link(cl, MEI_IAMTHIF_HOST_CLIENT_ID);
if (ret < 0) {
dev_err(dev->dev, "amthif: failed cl_link %d\n", ret);
- goto out;
+ return ret;
}
- ret = mei_cl_connect(cl, NULL);
+ ret = mei_cl_connect(cl, me_cl, NULL);
dev->iamthif_state = MEI_IAMTHIF_IDLE;
-out:
- mei_me_cl_put(me_cl);
return ret;
}
@@ -250,7 +233,6 @@ static int mei_amthif_read_start(struct mei_cl *cl, struct file *file)
{
struct mei_device *dev = cl->dev;
struct mei_cl_cb *cb;
- size_t length = dev->iamthif_mtu;
int rets;
cb = mei_io_cb_init(cl, MEI_FOP_READ, file);
@@ -259,7 +241,7 @@ static int mei_amthif_read_start(struct mei_cl *cl, struct file *file)
goto err;
}
- rets = mei_io_cb_alloc_buf(cb, length);
+ rets = mei_io_cb_alloc_buf(cb, mei_cl_mtu(cl));
if (rets)
goto err;
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index 4cf38c39878a..357b6ae4d207 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -35,18 +35,30 @@ static int mei_cl_device_match(struct device *dev, struct device_driver *drv)
struct mei_cl_device *device = to_mei_cl_device(dev);
struct mei_cl_driver *driver = to_mei_cl_driver(drv);
const struct mei_cl_device_id *id;
+ const uuid_le *uuid;
+ const char *name;
if (!device)
return 0;
+ uuid = mei_me_cl_uuid(device->me_cl);
+ name = device->name;
+
if (!driver || !driver->id_table)
return 0;
id = driver->id_table;
- while (id->name[0]) {
- if (!strncmp(dev_name(dev), id->name, sizeof(id->name)))
- return 1;
+ while (uuid_le_cmp(NULL_UUID_LE, id->uuid)) {
+
+ if (!uuid_le_cmp(*uuid, id->uuid)) {
+ if (id->name[0]) {
+ if (!strncmp(name, id->name, sizeof(id->name)))
+ return 1;
+ } else {
+ return 1;
+ }
+ }
id++;
}
@@ -69,7 +81,7 @@ static int mei_cl_device_probe(struct device *dev)
dev_dbg(dev, "Device probe\n");
- strlcpy(id.name, dev_name(dev), sizeof(id.name));
+ strlcpy(id.name, device->name, sizeof(id.name));
return driver->probe(device, &id);
}
@@ -97,18 +109,48 @@ static int mei_cl_device_remove(struct device *dev)
return driver->remove(device);
}
+static ssize_t name_show(struct device *dev, struct device_attribute *a,
+ char *buf)
+{
+ struct mei_cl_device *device = to_mei_cl_device(dev);
+ size_t len;
+
+ len = snprintf(buf, PAGE_SIZE, "%s", device->name);
+
+ return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len;
+}
+static DEVICE_ATTR_RO(name);
+
+static ssize_t uuid_show(struct device *dev, struct device_attribute *a,
+ char *buf)
+{
+ struct mei_cl_device *device = to_mei_cl_device(dev);
+ const uuid_le *uuid = mei_me_cl_uuid(device->me_cl);
+ size_t len;
+
+ len = snprintf(buf, PAGE_SIZE, "%pUl", uuid);
+
+ return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len;
+}
+static DEVICE_ATTR_RO(uuid);
+
static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
char *buf)
{
- int len;
+ struct mei_cl_device *device = to_mei_cl_device(dev);
+ const uuid_le *uuid = mei_me_cl_uuid(device->me_cl);
+ size_t len;
- len = snprintf(buf, PAGE_SIZE, "mei:%s\n", dev_name(dev));
+ len = snprintf(buf, PAGE_SIZE, "mei:%s:" MEI_CL_UUID_FMT ":",
+ device->name, MEI_CL_UUID_ARGS(uuid->b));
return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len;
}
static DEVICE_ATTR_RO(modalias);
static struct attribute *mei_cl_dev_attrs[] = {
+ &dev_attr_name.attr,
+ &dev_attr_uuid.attr,
&dev_attr_modalias.attr,
NULL,
};
@@ -116,7 +158,17 @@ ATTRIBUTE_GROUPS(mei_cl_dev);
static int mei_cl_uevent(struct device *dev, struct kobj_uevent_env *env)
{
- if (add_uevent_var(env, "MODALIAS=mei:%s", dev_name(dev)))
+ struct mei_cl_device *device = to_mei_cl_device(dev);
+ const uuid_le *uuid = mei_me_cl_uuid(device->me_cl);
+
+ if (add_uevent_var(env, "MEI_CL_UUID=%pUl", uuid))
+ return -ENOMEM;
+
+ if (add_uevent_var(env, "MEI_CL_NAME=%s", device->name))
+ return -ENOMEM;
+
+ if (add_uevent_var(env, "MODALIAS=mei:%s:" MEI_CL_UUID_FMT ":",
+ device->name, MEI_CL_UUID_ARGS(uuid->b)))
return -ENOMEM;
return 0;
@@ -133,7 +185,13 @@ static struct bus_type mei_cl_bus_type = {
static void mei_cl_dev_release(struct device *dev)
{
- kfree(to_mei_cl_device(dev));
+ struct mei_cl_device *device = to_mei_cl_device(dev);
+
+ if (!device)
+ return;
+
+ mei_me_cl_put(device->me_cl);
+ kfree(device);
}
static struct device_type mei_cl_device_type = {
@@ -141,45 +199,50 @@ static struct device_type mei_cl_device_type = {
};
struct mei_cl *mei_cl_bus_find_cl_by_uuid(struct mei_device *dev,
- uuid_le uuid)
+ uuid_le uuid)
{
struct mei_cl *cl;
list_for_each_entry(cl, &dev->device_list, device_link) {
- if (!uuid_le_cmp(uuid, cl->cl_uuid))
+ if (cl->device && cl->device->me_cl &&
+ !uuid_le_cmp(uuid, *mei_me_cl_uuid(cl->device->me_cl)))
return cl;
}
return NULL;
}
+
struct mei_cl_device *mei_cl_add_device(struct mei_device *dev,
- uuid_le uuid, char *name,
- struct mei_cl_ops *ops)
+ struct mei_me_client *me_cl,
+ struct mei_cl *cl,
+ char *name)
{
struct mei_cl_device *device;
- struct mei_cl *cl;
int status;
- cl = mei_cl_bus_find_cl_by_uuid(dev, uuid);
- if (cl == NULL)
- return NULL;
-
device = kzalloc(sizeof(struct mei_cl_device), GFP_KERNEL);
if (!device)
return NULL;
- device->cl = cl;
- device->ops = ops;
+ device->me_cl = mei_me_cl_get(me_cl);
+ if (!device->me_cl) {
+ kfree(device);
+ return NULL;
+ }
+ device->cl = cl;
device->dev.parent = dev->dev;
device->dev.bus = &mei_cl_bus_type;
device->dev.type = &mei_cl_device_type;
- dev_set_name(&device->dev, "%s", name);
+ strlcpy(device->name, name, sizeof(device->name));
+
+ dev_set_name(&device->dev, "mei:%s:%pUl", name, mei_me_cl_uuid(me_cl));
status = device_register(&device->dev);
if (status) {
dev_err(dev->dev, "Failed to register MEI device\n");
+ mei_me_cl_put(device->me_cl);
kfree(device);
return NULL;
}
@@ -224,11 +287,10 @@ void mei_cl_driver_unregister(struct mei_cl_driver *driver)
}
EXPORT_SYMBOL_GPL(mei_cl_driver_unregister);
-static ssize_t ___mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
+ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
bool blocking)
{
struct mei_device *dev;
- struct mei_me_client *me_cl = NULL;
struct mei_cl_cb *cb = NULL;
ssize_t rets;
@@ -244,13 +306,12 @@ static ssize_t ___mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
}
/* Check if we have an ME client device */
- me_cl = mei_me_cl_by_uuid_id(dev, &cl->cl_uuid, cl->me_client_id);
- if (!me_cl) {
+ if (!mei_me_cl_is_active(cl->me_cl)) {
rets = -ENOTTY;
goto out;
}
- if (length > me_cl->props.max_msg_length) {
+ if (length > mei_cl_mtu(cl)) {
rets = -EFBIG;
goto out;
}
@@ -266,7 +327,6 @@ static ssize_t ___mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
rets = mei_cl_write(cl, cb, blocking);
out:
- mei_me_cl_put(me_cl);
mutex_unlock(&dev->device_lock);
if (rets < 0)
mei_io_cb_free(cb);
@@ -341,16 +401,6 @@ out:
return rets;
}
-inline ssize_t __mei_cl_async_send(struct mei_cl *cl, u8 *buf, size_t length)
-{
- return ___mei_cl_send(cl, buf, length, 0);
-}
-
-inline ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length)
-{
- return ___mei_cl_send(cl, buf, length, 1);
-}
-
ssize_t mei_cl_send(struct mei_cl_device *device, u8 *buf, size_t length)
{
struct mei_cl *cl = device->cl;
@@ -358,23 +408,17 @@ ssize_t mei_cl_send(struct mei_cl_device *device, u8 *buf, size_t length)
if (cl == NULL)
return -ENODEV;
- if (device->ops && device->ops->send)
- return device->ops->send(device, buf, length);
-
- return __mei_cl_send(cl, buf, length);
+ return __mei_cl_send(cl, buf, length, 1);
}
EXPORT_SYMBOL_GPL(mei_cl_send);
ssize_t mei_cl_recv(struct mei_cl_device *device, u8 *buf, size_t length)
{
- struct mei_cl *cl = device->cl;
+ struct mei_cl *cl = device->cl;
if (cl == NULL)
return -ENODEV;
- if (device->ops && device->ops->recv)
- return device->ops->recv(device, buf, length);
-
return __mei_cl_recv(cl, buf, length);
}
EXPORT_SYMBOL_GPL(mei_cl_recv);
@@ -436,7 +480,13 @@ int mei_cl_enable_device(struct mei_cl_device *device)
mutex_lock(&dev->device_lock);
- err = mei_cl_connect(cl, NULL);
+ if (mei_cl_is_connected(cl)) {
+ mutex_unlock(&dev->device_lock);
+ dev_warn(dev->dev, "Already connected");
+ return -EBUSY;
+ }
+
+ err = mei_cl_connect(cl, device->me_cl, NULL);
if (err < 0) {
mutex_unlock(&dev->device_lock);
dev_err(dev->dev, "Could not connect to the ME client");
@@ -449,10 +499,7 @@ int mei_cl_enable_device(struct mei_cl_device *device)
if (device->event_cb)
mei_cl_read_start(device->cl, 0, NULL);
- if (!device->ops || !device->ops->enable)
- return 0;
-
- return device->ops->enable(device);
+ return 0;
}
EXPORT_SYMBOL_GPL(mei_cl_enable_device);
@@ -467,9 +514,6 @@ int mei_cl_disable_device(struct mei_cl_device *device)
dev = cl->dev;
- if (device->ops && device->ops->disable)
- device->ops->disable(device);
-
device->event_cb = NULL;
mutex_lock(&dev->device_lock);
@@ -480,8 +524,6 @@ int mei_cl_disable_device(struct mei_cl_device *device)
goto out;
}
- cl->state = MEI_FILE_DISCONNECTING;
-
err = mei_cl_disconnect(cl);
if (err < 0) {
dev_err(dev->dev, "Could not disconnect from the ME client");
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index 1e99ef6a54a2..6decbe136ea7 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -83,7 +83,7 @@ void mei_me_cl_put(struct mei_me_client *me_cl)
}
/**
- * __mei_me_cl_del - delete me client form the list and decrease
+ * __mei_me_cl_del - delete me client from the list and decrease
* reference counter
*
* @dev: mei device
@@ -96,11 +96,25 @@ static void __mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl)
if (!me_cl)
return;
- list_del(&me_cl->list);
+ list_del_init(&me_cl->list);
mei_me_cl_put(me_cl);
}
/**
+ * mei_me_cl_del - delete me client from the list and decrease
+ * reference counter
+ *
+ * @dev: mei device
+ * @me_cl: me client
+ */
+void mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl)
+{
+ down_write(&dev->me_clients_rwsem);
+ __mei_me_cl_del(dev, me_cl);
+ up_write(&dev->me_clients_rwsem);
+}
+
+/**
* mei_me_cl_add - add me client to the list
*
* @dev: mei device
@@ -317,7 +331,7 @@ static inline bool mei_cl_cmp_id(const struct mei_cl *cl1,
{
return cl1 && cl2 &&
(cl1->host_client_id == cl2->host_client_id) &&
- (cl1->me_client_id == cl2->me_client_id);
+ (mei_cl_me_id(cl1) == mei_cl_me_id(cl2));
}
/**
@@ -546,6 +560,7 @@ void mei_cl_init(struct mei_cl *cl, struct mei_device *dev)
INIT_LIST_HEAD(&cl->link);
INIT_LIST_HEAD(&cl->device_link);
cl->writing_state = MEI_IDLE;
+ cl->state = MEI_FILE_INITIALIZING;
cl->dev = dev;
}
@@ -619,7 +634,7 @@ int mei_cl_link(struct mei_cl *cl, int id)
}
/**
- * mei_cl_unlink - remove me_cl from the list
+ * mei_cl_unlink - remove host client from the list
*
* @cl: host client
*
@@ -667,17 +682,17 @@ void mei_host_client_init(struct work_struct *work)
me_cl = mei_me_cl_by_uuid(dev, &mei_amthif_guid);
if (me_cl)
- mei_amthif_host_init(dev);
+ mei_amthif_host_init(dev, me_cl);
mei_me_cl_put(me_cl);
me_cl = mei_me_cl_by_uuid(dev, &mei_wd_guid);
if (me_cl)
- mei_wd_host_init(dev);
+ mei_wd_host_init(dev, me_cl);
mei_me_cl_put(me_cl);
me_cl = mei_me_cl_by_uuid(dev, &mei_nfc_guid);
if (me_cl)
- mei_nfc_host_init(dev);
+ mei_nfc_host_init(dev, me_cl);
mei_me_cl_put(me_cl);
@@ -699,7 +714,7 @@ void mei_host_client_init(struct work_struct *work)
bool mei_hbuf_acquire(struct mei_device *dev)
{
if (mei_pg_state(dev) == MEI_PG_ON ||
- dev->pg_event == MEI_PG_EVENT_WAIT) {
+ mei_pg_in_transition(dev)) {
dev_dbg(dev->dev, "device is in pg\n");
return false;
}
@@ -715,6 +730,120 @@ bool mei_hbuf_acquire(struct mei_device *dev)
}
/**
+ * mei_cl_set_disconnected - set disconnected state and clear
+ * associated states and resources
+ *
+ * @cl: host client
+ */
+void mei_cl_set_disconnected(struct mei_cl *cl)
+{
+ struct mei_device *dev = cl->dev;
+
+ if (cl->state == MEI_FILE_DISCONNECTED ||
+ cl->state == MEI_FILE_INITIALIZING)
+ return;
+
+ cl->state = MEI_FILE_DISCONNECTED;
+ mei_io_list_flush(&dev->ctrl_rd_list, cl);
+ mei_io_list_flush(&dev->ctrl_wr_list, cl);
+ cl->mei_flow_ctrl_creds = 0;
+ cl->timer_count = 0;
+
+ if (!cl->me_cl)
+ return;
+
+ if (!WARN_ON(cl->me_cl->connect_count == 0))
+ cl->me_cl->connect_count--;
+
+ if (cl->me_cl->connect_count == 0)
+ cl->me_cl->mei_flow_ctrl_creds = 0;
+
+ mei_me_cl_put(cl->me_cl);
+ cl->me_cl = NULL;
+}
+
+static int mei_cl_set_connecting(struct mei_cl *cl, struct mei_me_client *me_cl)
+{
+ if (!mei_me_cl_get(me_cl))
+ return -ENOENT;
+
+ /* only one connection is allowed for fixed address clients */
+ if (me_cl->props.fixed_address) {
+ if (me_cl->connect_count) {
+ mei_me_cl_put(me_cl);
+ return -EBUSY;
+ }
+ }
+
+ cl->me_cl = me_cl;
+ cl->state = MEI_FILE_CONNECTING;
+ cl->me_cl->connect_count++;
+
+ return 0;
+}
+
+/*
+ * mei_cl_send_disconnect - send disconnect request
+ *
+ * @cl: host client
+ * @cb: callback block
+ *
+ * Return: 0, OK; otherwise, error.
+ */
+static int mei_cl_send_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb)
+{
+ struct mei_device *dev;
+ int ret;
+
+ dev = cl->dev;
+
+ ret = mei_hbm_cl_disconnect_req(dev, cl);
+ cl->status = ret;
+ if (ret) {
+ cl->state = MEI_FILE_DISCONNECT_REPLY;
+ return ret;
+ }
+
+ list_move_tail(&cb->list, &dev->ctrl_rd_list.list);
+ cl->timer_count = MEI_CONNECT_TIMEOUT;
+
+ return 0;
+}
+
+/**
+ * mei_cl_irq_disconnect - processes close related operation from
+ * interrupt thread context - send disconnect request
+ *
+ * @cl: client
+ * @cb: callback block.
+ * @cmpl_list: complete list.
+ *
+ * Return: 0, OK; otherwise, error.
+ */
+int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb,
+ struct mei_cl_cb *cmpl_list)
+{
+ struct mei_device *dev = cl->dev;
+ u32 msg_slots;
+ int slots;
+ int ret;
+
+ msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_request));
+ slots = mei_hbuf_empty_slots(dev);
+
+ if (slots < msg_slots)
+ return -EMSGSIZE;
+
+ ret = mei_cl_send_disconnect(cl, cb);
+ if (ret)
+ list_move_tail(&cb->list, &cmpl_list->list);
+
+ return ret;
+}
+
+
+
+/**
* mei_cl_disconnect - disconnect host client from the me one
*
* @cl: host client
@@ -736,8 +865,13 @@ int mei_cl_disconnect(struct mei_cl *cl)
cl_dbg(dev, cl, "disconnecting");
- if (cl->state != MEI_FILE_DISCONNECTING)
+ if (!mei_cl_is_connected(cl))
+ return 0;
+
+ if (mei_cl_is_fixed_address(cl)) {
+ mei_cl_set_disconnected(cl);
return 0;
+ }
rets = pm_runtime_get(dev->dev);
if (rets < 0 && rets != -EINPROGRESS) {
@@ -746,44 +880,41 @@ int mei_cl_disconnect(struct mei_cl *cl)
return rets;
}
+ cl->state = MEI_FILE_DISCONNECTING;
+
cb = mei_io_cb_init(cl, MEI_FOP_DISCONNECT, NULL);
rets = cb ? 0 : -ENOMEM;
if (rets)
- goto free;
+ goto out;
+
+ cl_dbg(dev, cl, "add disconnect cb to control write list\n");
+ list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
if (mei_hbuf_acquire(dev)) {
- if (mei_hbm_cl_disconnect_req(dev, cl)) {
- rets = -ENODEV;
+ rets = mei_cl_send_disconnect(cl, cb);
+ if (rets) {
cl_err(dev, cl, "failed to disconnect.\n");
- goto free;
+ goto out;
}
- cl->timer_count = MEI_CONNECT_TIMEOUT;
- mdelay(10); /* Wait for hardware disconnection ready */
- list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
- } else {
- cl_dbg(dev, cl, "add disconnect cb to control write list\n");
- list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
-
}
- mutex_unlock(&dev->device_lock);
-
- wait_event_timeout(cl->wait,
- MEI_FILE_DISCONNECTED == cl->state,
- mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
+ mutex_unlock(&dev->device_lock);
+ wait_event_timeout(cl->wait, cl->state == MEI_FILE_DISCONNECT_REPLY,
+ mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
mutex_lock(&dev->device_lock);
- if (MEI_FILE_DISCONNECTED == cl->state) {
- rets = 0;
- cl_dbg(dev, cl, "successfully disconnected from FW client.\n");
- } else {
+ rets = cl->status;
+ if (cl->state != MEI_FILE_DISCONNECT_REPLY) {
cl_dbg(dev, cl, "timeout on disconnect from FW client.\n");
rets = -ETIME;
}
- mei_io_list_flush(&dev->ctrl_rd_list, cl);
- mei_io_list_flush(&dev->ctrl_wr_list, cl);
-free:
+out:
+ /* we disconnect also on error */
+ mei_cl_set_disconnected(cl);
+ if (!rets)
+ cl_dbg(dev, cl, "successfully disconnected from FW client.\n");
+
cl_dbg(dev, cl, "rpm: autosuspend\n");
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
@@ -801,53 +932,119 @@ free:
*
* Return: true if other client is connected, false - otherwise.
*/
-bool mei_cl_is_other_connecting(struct mei_cl *cl)
+static bool mei_cl_is_other_connecting(struct mei_cl *cl)
{
struct mei_device *dev;
- struct mei_cl *ocl; /* the other client */
-
- if (WARN_ON(!cl || !cl->dev))
- return false;
+ struct mei_cl_cb *cb;
dev = cl->dev;
- list_for_each_entry(ocl, &dev->file_list, link) {
- if (ocl->state == MEI_FILE_CONNECTING &&
- ocl != cl &&
- cl->me_client_id == ocl->me_client_id)
+ list_for_each_entry(cb, &dev->ctrl_rd_list.list, list) {
+ if (cb->fop_type == MEI_FOP_CONNECT &&
+ mei_cl_me_id(cl) == mei_cl_me_id(cb->cl))
return true;
-
}
return false;
}
/**
+ * mei_cl_send_connect - send connect request
+ *
+ * @cl: host client
+ * @cb: callback block
+ *
+ * Return: 0, OK; otherwise, error.
+ */
+static int mei_cl_send_connect(struct mei_cl *cl, struct mei_cl_cb *cb)
+{
+ struct mei_device *dev;
+ int ret;
+
+ dev = cl->dev;
+
+ ret = mei_hbm_cl_connect_req(dev, cl);
+ cl->status = ret;
+ if (ret) {
+ cl->state = MEI_FILE_DISCONNECT_REPLY;
+ return ret;
+ }
+
+ list_move_tail(&cb->list, &dev->ctrl_rd_list.list);
+ cl->timer_count = MEI_CONNECT_TIMEOUT;
+ return 0;
+}
+
+/**
+ * mei_cl_irq_connect - send connect request in irq_thread context
+ *
+ * @cl: host client
+ * @cb: callback block
+ * @cmpl_list: complete list
+ *
+ * Return: 0, OK; otherwise, error.
+ */
+int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb,
+ struct mei_cl_cb *cmpl_list)
+{
+ struct mei_device *dev = cl->dev;
+ u32 msg_slots;
+ int slots;
+ int rets;
+
+ msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_request));
+ slots = mei_hbuf_empty_slots(dev);
+
+ if (mei_cl_is_other_connecting(cl))
+ return 0;
+
+ if (slots < msg_slots)
+ return -EMSGSIZE;
+
+ rets = mei_cl_send_connect(cl, cb);
+ if (rets)
+ list_move_tail(&cb->list, &cmpl_list->list);
+
+ return rets;
+}
+
+/**
* mei_cl_connect - connect host client to the me one
*
* @cl: host client
+ * @me_cl: me client
* @file: pointer to file structure
*
* Locking: called under "dev->device_lock" lock
*
* Return: 0 on success, <0 on failure.
*/
-int mei_cl_connect(struct mei_cl *cl, struct file *file)
+int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl,
+ struct file *file)
{
struct mei_device *dev;
struct mei_cl_cb *cb;
int rets;
- if (WARN_ON(!cl || !cl->dev))
+ if (WARN_ON(!cl || !cl->dev || !me_cl))
return -ENODEV;
dev = cl->dev;
+ rets = mei_cl_set_connecting(cl, me_cl);
+ if (rets)
+ return rets;
+
+ if (mei_cl_is_fixed_address(cl)) {
+ cl->state = MEI_FILE_CONNECTED;
+ return 0;
+ }
+
rets = pm_runtime_get(dev->dev);
if (rets < 0 && rets != -EINPROGRESS) {
pm_runtime_put_noidle(dev->dev);
cl_err(dev, cl, "rpm: get failed %d\n", rets);
- return rets;
+ goto nortpm;
}
cb = mei_io_cb_init(cl, MEI_FOP_CONNECT, file);
@@ -855,45 +1052,40 @@ int mei_cl_connect(struct mei_cl *cl, struct file *file)
if (rets)
goto out;
+ list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
+
/* run hbuf acquire last so we don't have to undo */
if (!mei_cl_is_other_connecting(cl) && mei_hbuf_acquire(dev)) {
- cl->state = MEI_FILE_CONNECTING;
- if (mei_hbm_cl_connect_req(dev, cl)) {
- rets = -ENODEV;
+ rets = mei_cl_send_connect(cl, cb);
+ if (rets)
goto out;
- }
- cl->timer_count = MEI_CONNECT_TIMEOUT;
- list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
- } else {
- cl->state = MEI_FILE_INITIALIZING;
- list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
}
mutex_unlock(&dev->device_lock);
wait_event_timeout(cl->wait,
(cl->state == MEI_FILE_CONNECTED ||
- cl->state == MEI_FILE_DISCONNECTED),
+ cl->state == MEI_FILE_DISCONNECT_REPLY),
mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
mutex_lock(&dev->device_lock);
if (!mei_cl_is_connected(cl)) {
- cl->state = MEI_FILE_DISCONNECTED;
- /* something went really wrong */
+ /* timeout or something went really wrong */
if (!cl->status)
cl->status = -EFAULT;
-
- mei_io_list_flush(&dev->ctrl_rd_list, cl);
- mei_io_list_flush(&dev->ctrl_wr_list, cl);
}
rets = cl->status;
-
out:
cl_dbg(dev, cl, "rpm: autosuspend\n");
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
mei_io_cb_free(cb);
+
+nortpm:
+ if (!mei_cl_is_connected(cl))
+ mei_cl_set_disconnected(cl);
+
return rets;
}
@@ -934,36 +1126,29 @@ err:
* @cl: private data of the file object
*
* Return: 1 if mei_flow_ctrl_creds >0, 0 - otherwise.
- * -ENOENT if mei_cl is not present
- * -EINVAL if single_recv_buf == 0
*/
int mei_cl_flow_ctrl_creds(struct mei_cl *cl)
{
- struct mei_device *dev;
- struct mei_me_client *me_cl;
- int rets = 0;
+ int rets;
- if (WARN_ON(!cl || !cl->dev))
+ if (WARN_ON(!cl || !cl->me_cl))
return -EINVAL;
- dev = cl->dev;
-
if (cl->mei_flow_ctrl_creds > 0)
return 1;
- me_cl = mei_me_cl_by_uuid_id(dev, &cl->cl_uuid, cl->me_client_id);
- if (!me_cl) {
- cl_err(dev, cl, "no such me client %d\n", cl->me_client_id);
- return -ENOENT;
+ if (mei_cl_is_fixed_address(cl)) {
+ rets = mei_cl_read_start(cl, mei_cl_mtu(cl), NULL);
+ if (rets && rets != -EBUSY)
+ return rets;
+ return 1;
}
- if (me_cl->mei_flow_ctrl_creds > 0) {
- rets = 1;
- if (WARN_ON(me_cl->props.single_recv_buf == 0))
- rets = -EINVAL;
+ if (mei_cl_is_single_recv_buf(cl)) {
+ if (cl->me_cl->mei_flow_ctrl_creds > 0)
+ return 1;
}
- mei_me_cl_put(me_cl);
- return rets;
+ return 0;
}
/**
@@ -973,43 +1158,26 @@ int mei_cl_flow_ctrl_creds(struct mei_cl *cl)
*
* Return:
* 0 on success
- * -ENOENT when me client is not found
* -EINVAL when ctrl credits are <= 0
*/
int mei_cl_flow_ctrl_reduce(struct mei_cl *cl)
{
- struct mei_device *dev;
- struct mei_me_client *me_cl;
- int rets;
-
- if (WARN_ON(!cl || !cl->dev))
+ if (WARN_ON(!cl || !cl->me_cl))
return -EINVAL;
- dev = cl->dev;
-
- me_cl = mei_me_cl_by_uuid_id(dev, &cl->cl_uuid, cl->me_client_id);
- if (!me_cl) {
- cl_err(dev, cl, "no such me client %d\n", cl->me_client_id);
- return -ENOENT;
- }
+ if (mei_cl_is_fixed_address(cl))
+ return 0;
- if (me_cl->props.single_recv_buf) {
- if (WARN_ON(me_cl->mei_flow_ctrl_creds <= 0)) {
- rets = -EINVAL;
- goto out;
- }
- me_cl->mei_flow_ctrl_creds--;
+ if (mei_cl_is_single_recv_buf(cl)) {
+ if (WARN_ON(cl->me_cl->mei_flow_ctrl_creds <= 0))
+ return -EINVAL;
+ cl->me_cl->mei_flow_ctrl_creds--;
} else {
- if (WARN_ON(cl->mei_flow_ctrl_creds <= 0)) {
- rets = -EINVAL;
- goto out;
- }
+ if (WARN_ON(cl->mei_flow_ctrl_creds <= 0))
+ return -EINVAL;
cl->mei_flow_ctrl_creds--;
}
- rets = 0;
-out:
- mei_me_cl_put(me_cl);
- return rets;
+ return 0;
}
/**
@@ -1025,7 +1193,6 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length, struct file *fp)
{
struct mei_device *dev;
struct mei_cl_cb *cb;
- struct mei_me_client *me_cl;
int rets;
if (WARN_ON(!cl || !cl->dev))
@@ -1040,27 +1207,29 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length, struct file *fp)
if (!list_empty(&cl->rd_pending))
return -EBUSY;
- me_cl = mei_me_cl_by_uuid_id(dev, &cl->cl_uuid, cl->me_client_id);
- if (!me_cl) {
- cl_err(dev, cl, "no such me client %d\n", cl->me_client_id);
+ if (!mei_me_cl_is_active(cl->me_cl)) {
+ cl_err(dev, cl, "no such me client\n");
return -ENOTTY;
}
+
/* always allocate at least client max message */
- length = max_t(size_t, length, me_cl->props.max_msg_length);
- mei_me_cl_put(me_cl);
+ length = max_t(size_t, length, mei_cl_mtu(cl));
+ cb = mei_cl_alloc_cb(cl, length, MEI_FOP_READ, fp);
+ if (!cb)
+ return -ENOMEM;
+
+ if (mei_cl_is_fixed_address(cl)) {
+ list_add_tail(&cb->list, &cl->rd_pending);
+ return 0;
+ }
rets = pm_runtime_get(dev->dev);
if (rets < 0 && rets != -EINPROGRESS) {
pm_runtime_put_noidle(dev->dev);
cl_err(dev, cl, "rpm: get failed %d\n", rets);
- return rets;
+ goto nortpm;
}
- cb = mei_cl_alloc_cb(cl, length, MEI_FOP_READ, fp);
- rets = cb ? 0 : -ENOMEM;
- if (rets)
- goto out;
-
if (mei_hbuf_acquire(dev)) {
rets = mei_hbm_cl_flow_control_req(dev, cl);
if (rets < 0)
@@ -1068,6 +1237,7 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length, struct file *fp)
list_add_tail(&cb->list, &cl->rd_pending);
} else {
+ rets = 0;
list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
}
@@ -1075,7 +1245,7 @@ out:
cl_dbg(dev, cl, "rpm: autosuspend\n");
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
-
+nortpm:
if (rets)
mei_io_cb_free(cb);
@@ -1102,6 +1272,7 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
u32 msg_slots;
int slots;
int rets;
+ bool first_chunk;
if (WARN_ON(!cl || !cl->dev))
return -ENODEV;
@@ -1110,7 +1281,9 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
buf = &cb->buf;
- rets = mei_cl_flow_ctrl_creds(cl);
+ first_chunk = cb->buf_idx == 0;
+
+ rets = first_chunk ? mei_cl_flow_ctrl_creds(cl) : 1;
if (rets < 0)
return rets;
@@ -1123,8 +1296,8 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
len = buf->size - cb->buf_idx;
msg_slots = mei_data2slots(len);
- mei_hdr.host_addr = cl->host_client_id;
- mei_hdr.me_addr = cl->me_client_id;
+ mei_hdr.host_addr = mei_cl_host_addr(cl);
+ mei_hdr.me_addr = mei_cl_me_id(cl);
mei_hdr.reserved = 0;
mei_hdr.internal = cb->internal;
@@ -1157,12 +1330,14 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
cb->buf_idx += mei_hdr.length;
cb->completed = mei_hdr.msg_complete == 1;
- if (mei_hdr.msg_complete) {
+ if (first_chunk) {
if (mei_cl_flow_ctrl_reduce(cl))
return -EIO;
- list_move_tail(&cb->list, &dev->write_waiting_list.list);
}
+ if (mei_hdr.msg_complete)
+ list_move_tail(&cb->list, &dev->write_waiting_list.list);
+
return 0;
}
@@ -1207,8 +1382,8 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
cb->buf_idx = 0;
cl->writing_state = MEI_IDLE;
- mei_hdr.host_addr = cl->host_client_id;
- mei_hdr.me_addr = cl->me_client_id;
+ mei_hdr.host_addr = mei_cl_host_addr(cl);
+ mei_hdr.me_addr = mei_cl_me_id(cl);
mei_hdr.reserved = 0;
mei_hdr.msg_complete = 0;
mei_hdr.internal = cb->internal;
@@ -1241,21 +1416,19 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
if (rets)
goto err;
+ rets = mei_cl_flow_ctrl_reduce(cl);
+ if (rets)
+ goto err;
+
cl->writing_state = MEI_WRITING;
cb->buf_idx = mei_hdr.length;
cb->completed = mei_hdr.msg_complete == 1;
out:
- if (mei_hdr.msg_complete) {
- rets = mei_cl_flow_ctrl_reduce(cl);
- if (rets < 0)
- goto err;
-
+ if (mei_hdr.msg_complete)
list_add_tail(&cb->list, &dev->write_waiting_list.list);
- } else {
+ else
list_add_tail(&cb->list, &dev->write_list.list);
- }
-
if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) {
@@ -1289,20 +1462,36 @@ err:
*/
void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
{
- if (cb->fop_type == MEI_FOP_WRITE) {
+ struct mei_device *dev = cl->dev;
+
+ switch (cb->fop_type) {
+ case MEI_FOP_WRITE:
mei_io_cb_free(cb);
- cb = NULL;
cl->writing_state = MEI_WRITE_COMPLETE;
- if (waitqueue_active(&cl->tx_wait))
+ if (waitqueue_active(&cl->tx_wait)) {
wake_up_interruptible(&cl->tx_wait);
+ } else {
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_request_autosuspend(dev->dev);
+ }
+ break;
- } else if (cb->fop_type == MEI_FOP_READ) {
+ case MEI_FOP_READ:
list_add_tail(&cb->list, &cl->rd_completed);
if (waitqueue_active(&cl->rx_wait))
wake_up_interruptible_all(&cl->rx_wait);
else
mei_cl_bus_rx_event(cl);
+ break;
+
+ case MEI_FOP_CONNECT:
+ case MEI_FOP_DISCONNECT:
+ if (waitqueue_active(&cl->wait))
+ wake_up(&cl->wait);
+ break;
+ default:
+ BUG_ON(0);
}
}
@@ -1312,16 +1501,12 @@ void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
*
* @dev: mei device
*/
-
void mei_cl_all_disconnect(struct mei_device *dev)
{
struct mei_cl *cl;
- list_for_each_entry(cl, &dev->file_list, link) {
- cl->state = MEI_FILE_DISCONNECTED;
- cl->mei_flow_ctrl_creds = 0;
- cl->timer_count = 0;
- }
+ list_for_each_entry(cl, &dev->file_list, link)
+ mei_cl_set_disconnected(cl);
}
diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h
index 0a39e5d45171..8d7f057f1045 100644
--- a/drivers/misc/mei/client.h
+++ b/drivers/misc/mei/client.h
@@ -44,6 +44,30 @@ void mei_me_cl_rm_by_uuid_id(struct mei_device *dev,
const uuid_le *uuid, u8 id);
void mei_me_cl_rm_all(struct mei_device *dev);
+/**
+ * mei_me_cl_is_active - check whether me client is active in the fw
+ *
+ * @me_cl: me client
+ *
+ * Return: true if the me client is active in the firmware
+ */
+static inline bool mei_me_cl_is_active(const struct mei_me_client *me_cl)
+{
+ return !list_empty_careful(&me_cl->list);
+}
+
+/**
+ * mei_me_cl_uuid - return me client protocol name (uuid)
+ *
+ * @me_cl: me client
+ *
+ * Return: me client protocol name
+ */
+static inline const uuid_le *mei_me_cl_uuid(const struct mei_me_client *me_cl)
+{
+ return &me_cl->props.protocol_name;
+}
+
/*
* MEI IO Functions
*/
@@ -94,18 +118,96 @@ int mei_cl_flow_ctrl_reduce(struct mei_cl *cl);
/**
* mei_cl_is_connected - host client is connected
*
- * @cl: host clinet
+ * @cl: host client
*
- * Return: true if the host clinet is connected
+ * Return: true if the host client is connected
*/
static inline bool mei_cl_is_connected(struct mei_cl *cl)
{
return cl->state == MEI_FILE_CONNECTED;
}
-bool mei_cl_is_other_connecting(struct mei_cl *cl);
+/**
+ * mei_cl_me_id - me client id
+ *
+ * @cl: host client
+ *
+ * Return: me client id or 0 if client is not connected
+ */
+static inline u8 mei_cl_me_id(const struct mei_cl *cl)
+{
+ return cl->me_cl ? cl->me_cl->client_id : 0;
+}
+
+/**
+ * mei_cl_mtu - maximal message that client can send and receive
+ *
+ * @cl: host client
+ *
+ * Return: mtu
+ */
+static inline size_t mei_cl_mtu(const struct mei_cl *cl)
+{
+ return cl->me_cl->props.max_msg_length;
+}
+
+/**
+ * mei_cl_is_fixed_address - check whether the me client uses fixed address
+ *
+ * @cl: host client
+ *
+ * Return: true if the client is connected and it has fixed me address
+ */
+static inline bool mei_cl_is_fixed_address(const struct mei_cl *cl)
+{
+ return cl->me_cl && cl->me_cl->props.fixed_address;
+}
+
+/**
+ * mei_cl_is_single_recv_buf- check whether the me client
+ * uses single receiving buffer
+ *
+ * @cl: host client
+ *
+ * Return: true if single_recv_buf == 1; 0 otherwise
+ */
+static inline bool mei_cl_is_single_recv_buf(const struct mei_cl *cl)
+{
+ return cl->me_cl->props.single_recv_buf;
+}
+
+/**
+ * mei_cl_uuid - client's uuid
+ *
+ * @cl: host client
+ *
+ * Return: return uuid of connected me client
+ */
+static inline const uuid_le *mei_cl_uuid(const struct mei_cl *cl)
+{
+ return mei_me_cl_uuid(cl->me_cl);
+}
+
+/**
+ * mei_cl_host_addr - client's host address
+ *
+ * @cl: host client
+ *
+ * Return: 0 for fixed address client, host address for dynamic client
+ */
+static inline u8 mei_cl_host_addr(const struct mei_cl *cl)
+{
+ return mei_cl_is_fixed_address(cl) ? 0 : cl->host_client_id;
+}
+
int mei_cl_disconnect(struct mei_cl *cl);
-int mei_cl_connect(struct mei_cl *cl, struct file *file);
+void mei_cl_set_disconnected(struct mei_cl *cl);
+int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb,
+ struct mei_cl_cb *cmpl_list);
+int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl,
+ struct file *file);
+int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb,
+ struct mei_cl_cb *cmpl_list);
int mei_cl_read_start(struct mei_cl *cl, size_t length, struct file *fp);
int mei_cl_irq_read_msg(struct mei_cl *cl, struct mei_msg_hdr *hdr,
struct mei_cl_cb *cmpl_list);
@@ -117,14 +219,12 @@ void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb);
void mei_host_client_init(struct work_struct *work);
-
-
void mei_cl_all_disconnect(struct mei_device *dev);
void mei_cl_all_wakeup(struct mei_device *dev);
void mei_cl_all_write_clear(struct mei_device *dev);
#define MEI_CL_FMT "cl:host=%02d me=%02d "
-#define MEI_CL_PRM(cl) (cl)->host_client_id, (cl)->me_client_id
+#define MEI_CL_PRM(cl) (cl)->host_client_id, mei_cl_me_id(cl)
#define cl_dbg(dev, cl, format, arg...) \
dev_dbg((dev)->dev, MEI_CL_FMT format, MEI_CL_PRM(cl), ##arg)
diff --git a/drivers/misc/mei/debugfs.c b/drivers/misc/mei/debugfs.c
index d9cd7e6ee484..eb868341247f 100644
--- a/drivers/misc/mei/debugfs.c
+++ b/drivers/misc/mei/debugfs.c
@@ -116,7 +116,7 @@ static ssize_t mei_dbgfs_read_active(struct file *fp, char __user *ubuf,
pos += scnprintf(buf + pos, bufsz - pos,
"%2d|%2d|%4d|%5d|%2d|%2d|\n",
- i, cl->me_client_id, cl->host_client_id, cl->state,
+ i, mei_cl_me_id(cl), cl->host_client_id, cl->state,
!list_empty(&cl->rd_completed), cl->writing_state);
i++;
}
@@ -149,6 +149,13 @@ static ssize_t mei_dbgfs_read_devstate(struct file *fp, char __user *ubuf,
mei_dev_state_str(dev->dev_state));
pos += scnprintf(buf + pos, bufsz - pos, "hbm: %s\n",
mei_hbm_state_str(dev->hbm_state));
+
+ if (dev->hbm_state == MEI_HBM_STARTED) {
+ pos += scnprintf(buf + pos, bufsz - pos, "hbm features:\n");
+ pos += scnprintf(buf + pos, bufsz - pos, "\tPG: %01d\n",
+ dev->hbm_f_pg_supported);
+ }
+
pos += scnprintf(buf + pos, bufsz - pos, "pg: %s, %s\n",
mei_pg_is_enabled(dev) ? "ENABLED" : "DISABLED",
mei_pg_state_str(mei_pg_state(dev)));
@@ -209,6 +216,12 @@ int mei_dbgfs_register(struct mei_device *dev, const char *name)
dev_err(dev->dev, "devstate: registration failed\n");
goto err;
}
+ f = debugfs_create_bool("allow_fixed_address", S_IRUSR | S_IWUSR, dir,
+ &dev->allow_fixed_address);
+ if (!f) {
+ dev_err(dev->dev, "allow_fixed_address: registration failed\n");
+ goto err;
+ }
dev->dbgfs_dir = dir;
return 0;
err:
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index 58da92565c5e..a4f283165a33 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -150,8 +150,8 @@ void mei_hbm_cl_hdr(struct mei_cl *cl, u8 hbm_cmd, void *buf, size_t len)
memset(cmd, 0, len);
cmd->hbm_cmd = hbm_cmd;
- cmd->host_addr = cl->host_client_id;
- cmd->me_addr = cl->me_client_id;
+ cmd->host_addr = mei_cl_host_addr(cl);
+ cmd->me_addr = mei_cl_me_id(cl);
}
/**
@@ -188,8 +188,8 @@ int mei_hbm_cl_write(struct mei_device *dev,
static inline
bool mei_hbm_cl_addr_equal(struct mei_cl *cl, struct mei_hbm_cl_cmd *cmd)
{
- return cl->host_client_id == cmd->host_addr &&
- cl->me_client_id == cmd->me_addr;
+ return mei_cl_host_addr(cl) == cmd->host_addr &&
+ mei_cl_me_id(cl) == cmd->me_addr;
}
/**
@@ -572,7 +572,7 @@ static void mei_hbm_cl_disconnect_res(struct mei_device *dev, struct mei_cl *cl,
cl_dbg(dev, cl, "hbm: disconnect response status=%d\n", rs->status);
if (rs->status == MEI_CL_DISCONN_SUCCESS)
- cl->state = MEI_FILE_DISCONNECTED;
+ cl->state = MEI_FILE_DISCONNECT_REPLY;
cl->status = 0;
}
@@ -611,7 +611,7 @@ static void mei_hbm_cl_connect_res(struct mei_device *dev, struct mei_cl *cl,
if (rs->status == MEI_CL_CONN_SUCCESS)
cl->state = MEI_FILE_CONNECTED;
else
- cl->state = MEI_FILE_DISCONNECTED;
+ cl->state = MEI_FILE_DISCONNECT_REPLY;
cl->status = mei_cl_conn_status_to_errno(rs->status);
}
@@ -680,8 +680,8 @@ static int mei_hbm_fw_disconnect_req(struct mei_device *dev,
cl = mei_hbm_cl_find_by_cmd(dev, disconnect_req);
if (cl) {
- cl_dbg(dev, cl, "disconnect request received\n");
- cl->state = MEI_FILE_DISCONNECTED;
+ cl_dbg(dev, cl, "fw disconnect request received\n");
+ cl->state = MEI_FILE_DISCONNECTING;
cl->timer_count = 0;
cb = mei_io_cb_init(cl, MEI_FOP_DISCONNECT_RSP, NULL);
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index 6fb75e62a764..43d7101ff993 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -663,11 +663,27 @@ int mei_me_pg_exit_sync(struct mei_device *dev)
mutex_lock(&dev->device_lock);
reply:
- if (dev->pg_event == MEI_PG_EVENT_RECEIVED)
- ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_EXIT_RES_CMD);
+ if (dev->pg_event != MEI_PG_EVENT_RECEIVED) {
+ ret = -ETIME;
+ goto out;
+ }
+
+ dev->pg_event = MEI_PG_EVENT_INTR_WAIT;
+ ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_EXIT_RES_CMD);
+ if (ret)
+ return ret;
+
+ mutex_unlock(&dev->device_lock);
+ wait_event_timeout(dev->wait_pg,
+ dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, timeout);
+ mutex_lock(&dev->device_lock);
+
+ if (dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED)
+ ret = 0;
else
ret = -ETIME;
+out:
dev->pg_event = MEI_PG_EVENT_IDLE;
hw->pg_state = MEI_PG_OFF;
@@ -675,6 +691,19 @@ reply:
}
/**
+ * mei_me_pg_in_transition - is device now in pg transition
+ *
+ * @dev: the device structure
+ *
+ * Return: true if in pg transition, false otherwise
+ */
+static bool mei_me_pg_in_transition(struct mei_device *dev)
+{
+ return dev->pg_event >= MEI_PG_EVENT_WAIT &&
+ dev->pg_event <= MEI_PG_EVENT_INTR_WAIT;
+}
+
+/**
* mei_me_pg_is_enabled - detect if PG is supported by HW
*
* @dev: the device structure
@@ -705,6 +734,24 @@ notsupported:
}
/**
+ * mei_me_pg_intr - perform pg processing in interrupt thread handler
+ *
+ * @dev: the device structure
+ */
+static void mei_me_pg_intr(struct mei_device *dev)
+{
+ struct mei_me_hw *hw = to_me_hw(dev);
+
+ if (dev->pg_event != MEI_PG_EVENT_INTR_WAIT)
+ return;
+
+ dev->pg_event = MEI_PG_EVENT_INTR_RECEIVED;
+ hw->pg_state = MEI_PG_OFF;
+ if (waitqueue_active(&dev->wait_pg))
+ wake_up(&dev->wait_pg);
+}
+
+/**
* mei_me_irq_quick_handler - The ISR of the MEI device
*
* @irq: The irq number
@@ -761,6 +808,8 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
goto end;
}
+ mei_me_pg_intr(dev);
+
/* check if we need to start the dev */
if (!mei_host_is_ready(dev)) {
if (mei_hw_is_ready(dev)) {
@@ -797,9 +846,10 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
/*
* During PG handshake only allowed write is the replay to the
* PG exit message, so block calling write function
- * if the pg state is not idle
+ * if the pg event is in PG handshake
*/
- if (dev->pg_event == MEI_PG_EVENT_IDLE) {
+ if (dev->pg_event != MEI_PG_EVENT_WAIT &&
+ dev->pg_event != MEI_PG_EVENT_RECEIVED) {
rets = mei_irq_write_handler(dev, &complete_list);
dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
}
@@ -824,6 +874,7 @@ static const struct mei_hw_ops mei_me_hw_ops = {
.hw_config = mei_me_hw_config,
.hw_start = mei_me_hw_start,
+ .pg_in_transition = mei_me_pg_in_transition,
.pg_is_enabled = mei_me_pg_is_enabled,
.intr_clear = mei_me_intr_clear,
diff --git a/drivers/misc/mei/hw-txe.c b/drivers/misc/mei/hw-txe.c
index 7abafe7d120d..bae680c648ff 100644
--- a/drivers/misc/mei/hw-txe.c
+++ b/drivers/misc/mei/hw-txe.c
@@ -16,6 +16,7 @@
#include <linux/pci.h>
#include <linux/jiffies.h>
+#include <linux/ktime.h>
#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/irqreturn.h>
@@ -218,26 +219,25 @@ static u32 mei_txe_aliveness_get(struct mei_device *dev)
*
* Polls for HICR_HOST_ALIVENESS_RESP.ALIVENESS_RESP to be set
*
- * Return: > 0 if the expected value was received, -ETIME otherwise
+ * Return: 0 if the expected value was received, -ETIME otherwise
*/
static int mei_txe_aliveness_poll(struct mei_device *dev, u32 expected)
{
struct mei_txe_hw *hw = to_txe_hw(dev);
- int t = 0;
+ ktime_t stop, start;
+ start = ktime_get();
+ stop = ktime_add(start, ms_to_ktime(SEC_ALIVENESS_WAIT_TIMEOUT));
do {
hw->aliveness = mei_txe_aliveness_get(dev);
if (hw->aliveness == expected) {
dev->pg_event = MEI_PG_EVENT_IDLE;
- dev_dbg(dev->dev,
- "aliveness settled after %d msecs\n", t);
- return t;
+ dev_dbg(dev->dev, "aliveness settled after %lld usecs\n",
+ ktime_to_us(ktime_sub(ktime_get(), start)));
+ return 0;
}
- mutex_unlock(&dev->device_lock);
- msleep(MSEC_PER_SEC / 5);
- mutex_lock(&dev->device_lock);
- t += MSEC_PER_SEC / 5;
- } while (t < SEC_ALIVENESS_WAIT_TIMEOUT);
+ usleep_range(20, 50);
+ } while (ktime_before(ktime_get(), stop));
dev->pg_event = MEI_PG_EVENT_IDLE;
dev_err(dev->dev, "aliveness timed out\n");
@@ -302,6 +302,18 @@ int mei_txe_aliveness_set_sync(struct mei_device *dev, u32 req)
}
/**
+ * mei_txe_pg_in_transition - is device now in pg transition
+ *
+ * @dev: the device structure
+ *
+ * Return: true if in pg transition, false otherwise
+ */
+static bool mei_txe_pg_in_transition(struct mei_device *dev)
+{
+ return dev->pg_event == MEI_PG_EVENT_WAIT;
+}
+
+/**
* mei_txe_pg_is_enabled - detect if PG is supported by HW
*
* @dev: the device structure
@@ -1138,6 +1150,7 @@ static const struct mei_hw_ops mei_txe_hw_ops = {
.hw_config = mei_txe_hw_config,
.hw_start = mei_txe_hw_start,
+ .pg_in_transition = mei_txe_pg_in_transition,
.pg_is_enabled = mei_txe_pg_is_enabled,
.intr_clear = mei_txe_intr_clear,
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index 97353cf8d9b6..94514b2c7a50 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -361,13 +361,15 @@ bool mei_write_is_idle(struct mei_device *dev)
{
bool idle = (dev->dev_state == MEI_DEV_ENABLED &&
list_empty(&dev->ctrl_wr_list.list) &&
- list_empty(&dev->write_list.list));
+ list_empty(&dev->write_list.list) &&
+ list_empty(&dev->write_waiting_list.list));
- dev_dbg(dev->dev, "write pg: is idle[%d] state=%s ctrl=%d write=%d\n",
+ dev_dbg(dev->dev, "write pg: is idle[%d] state=%s ctrl=%01d write=%01d wwait=%01d\n",
idle,
mei_dev_state_str(dev->dev_state),
list_empty(&dev->ctrl_wr_list.list),
- list_empty(&dev->write_list.list));
+ list_empty(&dev->write_list.list),
+ list_empty(&dev->write_waiting_list.list));
return idle;
}
diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
index 3f84d2edcde4..3f3405269c39 100644
--- a/drivers/misc/mei/interrupt.c
+++ b/drivers/misc/mei/interrupt.c
@@ -65,8 +65,8 @@ EXPORT_SYMBOL_GPL(mei_irq_compl_handler);
static inline int mei_cl_hbm_equal(struct mei_cl *cl,
struct mei_msg_hdr *mei_hdr)
{
- return cl->host_client_id == mei_hdr->host_addr &&
- cl->me_client_id == mei_hdr->me_addr;
+ return mei_cl_host_addr(cl) == mei_hdr->host_addr &&
+ mei_cl_me_id(cl) == mei_hdr->me_addr;
}
/**
@@ -180,56 +180,14 @@ static int mei_cl_irq_disconnect_rsp(struct mei_cl *cl, struct mei_cl_cb *cb,
return -EMSGSIZE;
ret = mei_hbm_cl_disconnect_rsp(dev, cl);
-
- cl->state = MEI_FILE_DISCONNECTED;
- cl->status = 0;
+ mei_cl_set_disconnected(cl);
mei_io_cb_free(cb);
+ mei_me_cl_put(cl->me_cl);
+ cl->me_cl = NULL;
return ret;
}
-
-
-/**
- * mei_cl_irq_disconnect - processes close related operation from
- * interrupt thread context - send disconnect request
- *
- * @cl: client
- * @cb: callback block.
- * @cmpl_list: complete list.
- *
- * Return: 0, OK; otherwise, error.
- */
-static int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb,
- struct mei_cl_cb *cmpl_list)
-{
- struct mei_device *dev = cl->dev;
- u32 msg_slots;
- int slots;
-
- msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_request));
- slots = mei_hbuf_empty_slots(dev);
-
- if (slots < msg_slots)
- return -EMSGSIZE;
-
- if (mei_hbm_cl_disconnect_req(dev, cl)) {
- cl->status = 0;
- cb->buf_idx = 0;
- list_move_tail(&cb->list, &cmpl_list->list);
- return -EIO;
- }
-
- cl->state = MEI_FILE_DISCONNECTING;
- cl->status = 0;
- cb->buf_idx = 0;
- list_move_tail(&cb->list, &dev->ctrl_rd_list.list);
- cl->timer_count = MEI_CONNECT_TIMEOUT;
-
- return 0;
-}
-
-
/**
* mei_cl_irq_read - processes client read related operation from the
* interrupt thread context - request for flow control credits
@@ -267,49 +225,6 @@ static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb,
return 0;
}
-
-/**
- * mei_cl_irq_connect - send connect request in irq_thread context
- *
- * @cl: client
- * @cb: callback block.
- * @cmpl_list: complete list.
- *
- * Return: 0, OK; otherwise, error.
- */
-static int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb,
- struct mei_cl_cb *cmpl_list)
-{
- struct mei_device *dev = cl->dev;
- u32 msg_slots;
- int slots;
- int ret;
-
- msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_request));
- slots = mei_hbuf_empty_slots(dev);
-
- if (mei_cl_is_other_connecting(cl))
- return 0;
-
- if (slots < msg_slots)
- return -EMSGSIZE;
-
- cl->state = MEI_FILE_CONNECTING;
-
- ret = mei_hbm_cl_connect_req(dev, cl);
- if (ret) {
- cl->status = ret;
- cb->buf_idx = 0;
- list_del_init(&cb->list);
- return ret;
- }
-
- list_move_tail(&cb->list, &dev->ctrl_rd_list.list);
- cl->timer_count = MEI_CONNECT_TIMEOUT;
- return 0;
-}
-
-
/**
* mei_irq_read_handler - bottom half read routine after ISR to
* handle the read processing.
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 3e2968159506..8eb0a9500a90 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -94,7 +94,7 @@ static int mei_release(struct inode *inode, struct file *file)
{
struct mei_cl *cl = file->private_data;
struct mei_device *dev;
- int rets = 0;
+ int rets;
if (WARN_ON(!cl || !cl->dev))
return -ENODEV;
@@ -106,11 +106,8 @@ static int mei_release(struct inode *inode, struct file *file)
rets = mei_amthif_release(dev, file);
goto out;
}
- if (mei_cl_is_connected(cl)) {
- cl->state = MEI_FILE_DISCONNECTING;
- cl_dbg(dev, cl, "disconnecting\n");
- rets = mei_cl_disconnect(cl);
- }
+ rets = mei_cl_disconnect(cl);
+
mei_cl_flush_queues(cl, file);
cl_dbg(dev, cl, "removing\n");
@@ -186,8 +183,7 @@ static ssize_t mei_read(struct file *file, char __user *ubuf,
err = mei_cl_read_start(cl, length, file);
if (err && err != -EBUSY) {
- dev_dbg(dev->dev,
- "mei start read failure with status = %d\n", err);
+ cl_dbg(dev, cl, "mei start read failure status = %d\n", err);
rets = err;
goto out;
}
@@ -218,6 +214,11 @@ static ssize_t mei_read(struct file *file, char __user *ubuf,
cb = mei_cl_read_cb(cl, file);
if (!cb) {
+ if (mei_cl_is_fixed_address(cl) && dev->allow_fixed_address) {
+ cb = mei_cl_read_cb(cl, NULL);
+ if (cb)
+ goto copy_buffer;
+ }
rets = 0;
goto out;
}
@@ -226,11 +227,11 @@ copy_buffer:
/* now copy the data to user space */
if (cb->status) {
rets = cb->status;
- dev_dbg(dev->dev, "read operation failed %d\n", rets);
+ cl_dbg(dev, cl, "read operation failed %d\n", rets);
goto free;
}
- dev_dbg(dev->dev, "buf.size = %d buf.idx= %ld\n",
+ cl_dbg(dev, cl, "buf.size = %d buf.idx = %ld\n",
cb->buf.size, cb->buf_idx);
if (length == 0 || ubuf == NULL || *offset > cb->buf_idx) {
rets = -EMSGSIZE;
@@ -256,7 +257,7 @@ free:
mei_io_cb_free(cb);
out:
- dev_dbg(dev->dev, "end mei read rets= %d\n", rets);
+ cl_dbg(dev, cl, "end mei read rets = %d\n", rets);
mutex_unlock(&dev->device_lock);
return rets;
}
@@ -274,7 +275,6 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
size_t length, loff_t *offset)
{
struct mei_cl *cl = file->private_data;
- struct mei_me_client *me_cl = NULL;
struct mei_cl_cb *write_cb = NULL;
struct mei_device *dev;
unsigned long timeout = 0;
@@ -292,27 +292,27 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
goto out;
}
- me_cl = mei_me_cl_by_uuid_id(dev, &cl->cl_uuid, cl->me_client_id);
- if (!me_cl) {
- rets = -ENOTTY;
+ if (!mei_cl_is_connected(cl)) {
+ cl_err(dev, cl, "is not connected");
+ rets = -ENODEV;
goto out;
}
- if (length == 0) {
- rets = 0;
+ if (!mei_me_cl_is_active(cl->me_cl)) {
+ rets = -ENOTTY;
goto out;
}
- if (length > me_cl->props.max_msg_length) {
+ if (length > mei_cl_mtu(cl)) {
rets = -EFBIG;
goto out;
}
- if (!mei_cl_is_connected(cl)) {
- cl_err(dev, cl, "is not connected");
- rets = -ENODEV;
+ if (length == 0) {
+ rets = 0;
goto out;
}
+
if (cl == &dev->iamthif_cl) {
write_cb = mei_amthif_find_read_list_entry(dev, file);
@@ -350,14 +350,12 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
"amthif write failed with status = %d\n", rets);
goto out;
}
- mei_me_cl_put(me_cl);
mutex_unlock(&dev->device_lock);
return length;
}
rets = mei_cl_write(cl, write_cb, false);
out:
- mei_me_cl_put(me_cl);
mutex_unlock(&dev->device_lock);
if (rets < 0)
mei_io_cb_free(write_cb);
@@ -395,17 +393,16 @@ static int mei_ioctl_connect_client(struct file *file,
/* find ME client we're trying to connect to */
me_cl = mei_me_cl_by_uuid(dev, &data->in_client_uuid);
- if (!me_cl || me_cl->props.fixed_address) {
+ if (!me_cl ||
+ (me_cl->props.fixed_address && !dev->allow_fixed_address)) {
dev_dbg(dev->dev, "Cannot connect to FW Client UUID = %pUl\n",
- &data->in_client_uuid);
+ &data->in_client_uuid);
+ mei_me_cl_put(me_cl);
return -ENOTTY;
}
- cl->me_client_id = me_cl->client_id;
- cl->cl_uuid = me_cl->props.protocol_name;
-
dev_dbg(dev->dev, "Connect to FW Client ID = %d\n",
- cl->me_client_id);
+ me_cl->client_id);
dev_dbg(dev->dev, "FW Client - Protocol Version = %d\n",
me_cl->props.protocol_version);
dev_dbg(dev->dev, "FW Client - Max Msg Len = %d\n",
@@ -441,7 +438,7 @@ static int mei_ioctl_connect_client(struct file *file,
client->protocol_version = me_cl->props.protocol_version;
dev_dbg(dev->dev, "Can connect?\n");
- rets = mei_cl_connect(cl, file);
+ rets = mei_cl_connect(cl, me_cl, file);
end:
mei_me_cl_put(me_cl);
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index f066ecd71939..453f6a333b42 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -88,7 +88,8 @@ enum file_state {
MEI_FILE_CONNECTING,
MEI_FILE_CONNECTED,
MEI_FILE_DISCONNECTING,
- MEI_FILE_DISCONNECTED
+ MEI_FILE_DISCONNECT_REPLY,
+ MEI_FILE_DISCONNECTED,
};
/* MEI device states */
@@ -176,6 +177,8 @@ struct mei_fw_status {
* @props: client properties
* @client_id: me client id
* @mei_flow_ctrl_creds: flow control credits
+ * @connect_count: number connections to this client
+ * @reserved: reserved
*/
struct mei_me_client {
struct list_head list;
@@ -183,6 +186,8 @@ struct mei_me_client {
struct mei_client_properties props;
u8 client_id;
u8 mei_flow_ctrl_creds;
+ u8 connect_count;
+ u8 reserved;
};
@@ -226,11 +231,11 @@ struct mei_cl_cb {
* @rx_wait: wait queue for rx completion
* @wait: wait queue for management operation
* @status: connection status
- * @cl_uuid: client uuid name
+ * @me_cl: fw client connected
* @host_client_id: host id
- * @me_client_id: me/fw id
* @mei_flow_ctrl_creds: transmit flow credentials
* @timer_count: watchdog timer for operation completion
+ * @reserved: reserved for alignment
* @writing_state: state of the tx
* @rd_pending: pending read credits
* @rd_completed: completed read
@@ -246,11 +251,11 @@ struct mei_cl {
wait_queue_head_t rx_wait;
wait_queue_head_t wait;
int status;
- uuid_le cl_uuid;
+ struct mei_me_client *me_cl;
u8 host_client_id;
- u8 me_client_id;
u8 mei_flow_ctrl_creds;
u8 timer_count;
+ u8 reserved;
enum mei_file_transaction_states writing_state;
struct list_head rd_pending;
struct list_head rd_completed;
@@ -271,6 +276,7 @@ struct mei_cl {
* @fw_status : get fw status registers
* @pg_state : power gating state of the device
+ * @pg_in_transition : is device now in pg transition
* @pg_is_enabled : is power gating enabled
* @intr_clear : clear pending interrupts
@@ -300,6 +306,7 @@ struct mei_hw_ops {
int (*fw_status)(struct mei_device *dev, struct mei_fw_status *fw_sts);
enum mei_pg_state (*pg_state)(struct mei_device *dev);
+ bool (*pg_in_transition)(struct mei_device *dev);
bool (*pg_is_enabled)(struct mei_device *dev);
void (*intr_clear)(struct mei_device *dev);
@@ -323,34 +330,14 @@ struct mei_hw_ops {
/* MEI bus API*/
-/**
- * struct mei_cl_ops - MEI CL device ops
- * This structure allows ME host clients to implement technology
- * specific operations.
- *
- * @enable: Enable an MEI CL device. Some devices require specific
- * HECI commands to initialize completely.
- * @disable: Disable an MEI CL device.
- * @send: Tx hook for the device. This allows ME host clients to trap
- * the device driver buffers before actually physically
- * pushing it to the ME.
- * @recv: Rx hook for the device. This allows ME host clients to trap the
- * ME buffers before forwarding them to the device driver.
- */
-struct mei_cl_ops {
- int (*enable)(struct mei_cl_device *device);
- int (*disable)(struct mei_cl_device *device);
- int (*send)(struct mei_cl_device *device, u8 *buf, size_t length);
- int (*recv)(struct mei_cl_device *device, u8 *buf, size_t length);
-};
-
struct mei_cl_device *mei_cl_add_device(struct mei_device *dev,
- uuid_le uuid, char *name,
- struct mei_cl_ops *ops);
+ struct mei_me_client *me_cl,
+ struct mei_cl *cl,
+ char *name);
void mei_cl_remove_device(struct mei_cl_device *device);
-ssize_t __mei_cl_async_send(struct mei_cl *cl, u8 *buf, size_t length);
-ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length);
+ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
+ bool blocking);
ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length);
void mei_cl_bus_rx_event(struct mei_cl *cl);
void mei_cl_bus_remove_devices(struct mei_device *dev);
@@ -358,51 +345,21 @@ int mei_cl_bus_init(void);
void mei_cl_bus_exit(void);
struct mei_cl *mei_cl_bus_find_cl_by_uuid(struct mei_device *dev, uuid_le uuid);
-
-/**
- * struct mei_cl_device - MEI device handle
- * An mei_cl_device pointer is returned from mei_add_device()
- * and links MEI bus clients to their actual ME host client pointer.
- * Drivers for MEI devices will get an mei_cl_device pointer
- * when being probed and shall use it for doing ME bus I/O.
- *
- * @dev: linux driver model device pointer
- * @cl: mei client
- * @ops: ME transport ops
- * @event_work: async work to execute event callback
- * @event_cb: Drivers register this callback to get asynchronous ME
- * events (e.g. Rx buffer pending) notifications.
- * @event_context: event callback run context
- * @events: Events bitmask sent to the driver.
- * @priv_data: client private data
- */
-struct mei_cl_device {
- struct device dev;
-
- struct mei_cl *cl;
-
- const struct mei_cl_ops *ops;
-
- struct work_struct event_work;
- mei_cl_event_cb_t event_cb;
- void *event_context;
- unsigned long events;
-
- void *priv_data;
-};
-
-
/**
* enum mei_pg_event - power gating transition events
*
* @MEI_PG_EVENT_IDLE: the driver is not in power gating transition
* @MEI_PG_EVENT_WAIT: the driver is waiting for a pg event to complete
* @MEI_PG_EVENT_RECEIVED: the driver received pg event
+ * @MEI_PG_EVENT_INTR_WAIT: the driver is waiting for a pg event interrupt
+ * @MEI_PG_EVENT_INTR_RECEIVED: the driver received pg event interrupt
*/
enum mei_pg_event {
MEI_PG_EVENT_IDLE,
MEI_PG_EVENT_WAIT,
MEI_PG_EVENT_RECEIVED,
+ MEI_PG_EVENT_INTR_WAIT,
+ MEI_PG_EVENT_INTR_RECEIVED,
};
/**
@@ -467,6 +424,8 @@ const char *mei_pg_state_str(enum mei_pg_state state);
* @host_clients_map : host clients id pool
* @me_client_index : last FW client index in enumeration
*
+ * @allow_fixed_address: allow user space to connect a fixed client
+ *
* @wd_cl : watchdog client
* @wd_state : watchdog client state
* @wd_pending : watchdog command is pending
@@ -479,7 +438,6 @@ const char *mei_pg_state_str(enum mei_pg_state state);
* @iamthif_cl : amthif host client
* @iamthif_current_cb : amthif current operation callback
* @iamthif_open_count : number of opened amthif connections
- * @iamthif_mtu : amthif client max message length
* @iamthif_timer : time stamp of current amthif command completion
* @iamthif_stall_timer : timer to detect amthif hang
* @iamthif_state : amthif processor state
@@ -558,6 +516,8 @@ struct mei_device {
DECLARE_BITMAP(host_clients_map, MEI_CLIENTS_MAX);
unsigned long me_client_index;
+ u32 allow_fixed_address;
+
struct mei_cl wd_cl;
enum mei_wd_states wd_state;
bool wd_pending;
@@ -573,7 +533,6 @@ struct mei_device {
struct mei_cl iamthif_cl;
struct mei_cl_cb *iamthif_current_cb;
long iamthif_open_count;
- int iamthif_mtu;
unsigned long iamthif_timer;
u32 iamthif_stall_timer;
enum iamthif_states iamthif_state;
@@ -652,7 +611,7 @@ void mei_irq_compl_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list);
*/
void mei_amthif_reset_params(struct mei_device *dev);
-int mei_amthif_host_init(struct mei_device *dev);
+int mei_amthif_host_init(struct mei_device *dev, struct mei_me_client *me_cl);
int mei_amthif_read(struct mei_device *dev, struct file *file,
char __user *ubuf, size_t length, loff_t *offset);
@@ -679,7 +638,7 @@ int mei_amthif_irq_read(struct mei_device *dev, s32 *slots);
/*
* NFC functions
*/
-int mei_nfc_host_init(struct mei_device *dev);
+int mei_nfc_host_init(struct mei_device *dev, struct mei_me_client *me_cl);
void mei_nfc_host_exit(struct mei_device *dev);
/*
@@ -689,7 +648,7 @@ extern const uuid_le mei_nfc_guid;
int mei_wd_send(struct mei_device *dev);
int mei_wd_stop(struct mei_device *dev);
-int mei_wd_host_init(struct mei_device *dev);
+int mei_wd_host_init(struct mei_device *dev, struct mei_me_client *me_cl);
/*
* mei_watchdog_register - Registering watchdog interface
* once we got connection to the WD Client
@@ -717,6 +676,11 @@ static inline enum mei_pg_state mei_pg_state(struct mei_device *dev)
return dev->ops->pg_state(dev);
}
+static inline bool mei_pg_in_transition(struct mei_device *dev)
+{
+ return dev->ops->pg_in_transition(dev);
+}
+
static inline bool mei_pg_is_enabled(struct mei_device *dev)
{
return dev->ops->pg_is_enabled(dev);
diff --git a/drivers/misc/mei/nfc.c b/drivers/misc/mei/nfc.c
index c3bcb63686d7..b983c4ecad38 100644
--- a/drivers/misc/mei/nfc.c
+++ b/drivers/misc/mei/nfc.c
@@ -91,30 +91,25 @@ struct mei_nfc_hci_hdr {
/**
* struct mei_nfc_dev - NFC mei device
*
+ * @me_cl: NFC me client
* @cl: NFC host client
* @cl_info: NFC info host client
* @init_work: perform connection to the info client
- * @send_wq: send completion wait queue
* @fw_ivn: NFC Interface Version Number
* @vendor_id: NFC manufacturer ID
* @radio_type: NFC radio type
* @bus_name: bus name
*
- * @req_id: message counter
- * @recv_req_id: reception message counter
*/
struct mei_nfc_dev {
+ struct mei_me_client *me_cl;
struct mei_cl *cl;
struct mei_cl *cl_info;
struct work_struct init_work;
- wait_queue_head_t send_wq;
u8 fw_ivn;
u8 vendor_id;
u8 radio_type;
char *bus_name;
-
- u16 req_id;
- u16 recv_req_id;
};
/* UUIDs for NFC F/W clients */
@@ -151,6 +146,7 @@ static void mei_nfc_free(struct mei_nfc_dev *ndev)
kfree(ndev->cl_info);
}
+ mei_me_cl_put(ndev->me_cl);
kfree(ndev);
}
@@ -199,73 +195,6 @@ static int mei_nfc_build_bus_name(struct mei_nfc_dev *ndev)
return 0;
}
-static int mei_nfc_connect(struct mei_nfc_dev *ndev)
-{
- struct mei_device *dev;
- struct mei_cl *cl;
- struct mei_nfc_cmd *cmd, *reply;
- struct mei_nfc_connect *connect;
- struct mei_nfc_connect_resp *connect_resp;
- size_t connect_length, connect_resp_length;
- int bytes_recv, ret;
-
- cl = ndev->cl;
- dev = cl->dev;
-
- connect_length = sizeof(struct mei_nfc_cmd) +
- sizeof(struct mei_nfc_connect);
-
- connect_resp_length = sizeof(struct mei_nfc_cmd) +
- sizeof(struct mei_nfc_connect_resp);
-
- cmd = kzalloc(connect_length, GFP_KERNEL);
- if (!cmd)
- return -ENOMEM;
- connect = (struct mei_nfc_connect *)cmd->data;
-
- reply = kzalloc(connect_resp_length, GFP_KERNEL);
- if (!reply) {
- kfree(cmd);
- return -ENOMEM;
- }
-
- connect_resp = (struct mei_nfc_connect_resp *)reply->data;
-
- cmd->command = MEI_NFC_CMD_MAINTENANCE;
- cmd->data_size = 3;
- cmd->sub_command = MEI_NFC_SUBCMD_CONNECT;
- connect->fw_ivn = ndev->fw_ivn;
- connect->vendor_id = ndev->vendor_id;
-
- ret = __mei_cl_send(cl, (u8 *)cmd, connect_length);
- if (ret < 0) {
- dev_err(dev->dev, "Could not send connect cmd\n");
- goto err;
- }
-
- bytes_recv = __mei_cl_recv(cl, (u8 *)reply, connect_resp_length);
- if (bytes_recv < 0) {
- dev_err(dev->dev, "Could not read connect response\n");
- ret = bytes_recv;
- goto err;
- }
-
- dev_info(dev->dev, "IVN 0x%x Vendor ID 0x%x\n",
- connect_resp->fw_ivn, connect_resp->vendor_id);
-
- dev_info(dev->dev, "ME FW %d.%d.%d.%d\n",
- connect_resp->me_major, connect_resp->me_minor,
- connect_resp->me_hotfix, connect_resp->me_build);
-
- ret = 0;
-
-err:
- kfree(reply);
- kfree(cmd);
-
- return ret;
-}
-
static int mei_nfc_if_version(struct mei_nfc_dev *ndev)
{
struct mei_device *dev;
@@ -285,7 +214,7 @@ static int mei_nfc_if_version(struct mei_nfc_dev *ndev)
cmd.data_size = 1;
cmd.sub_command = MEI_NFC_SUBCMD_IF_VERSION;
- ret = __mei_cl_send(cl, (u8 *)&cmd, sizeof(struct mei_nfc_cmd));
+ ret = __mei_cl_send(cl, (u8 *)&cmd, sizeof(struct mei_nfc_cmd), 1);
if (ret < 0) {
dev_err(dev->dev, "Could not send IF version cmd\n");
return ret;
@@ -317,106 +246,13 @@ err:
return ret;
}
-static int mei_nfc_enable(struct mei_cl_device *cldev)
-{
- struct mei_device *dev;
- struct mei_nfc_dev *ndev;
- int ret;
-
- ndev = (struct mei_nfc_dev *)cldev->priv_data;
- dev = ndev->cl->dev;
-
- ret = mei_nfc_connect(ndev);
- if (ret < 0) {
- dev_err(dev->dev, "Could not connect to NFC");
- return ret;
- }
-
- return 0;
-}
-
-static int mei_nfc_disable(struct mei_cl_device *cldev)
-{
- return 0;
-}
-
-static int mei_nfc_send(struct mei_cl_device *cldev, u8 *buf, size_t length)
-{
- struct mei_device *dev;
- struct mei_nfc_dev *ndev;
- struct mei_nfc_hci_hdr *hdr;
- u8 *mei_buf;
- int err;
-
- ndev = (struct mei_nfc_dev *) cldev->priv_data;
- dev = ndev->cl->dev;
-
- err = -ENOMEM;
- mei_buf = kzalloc(length + MEI_NFC_HEADER_SIZE, GFP_KERNEL);
- if (!mei_buf)
- goto out;
-
- hdr = (struct mei_nfc_hci_hdr *) mei_buf;
- hdr->cmd = MEI_NFC_CMD_HCI_SEND;
- hdr->status = 0;
- hdr->req_id = ndev->req_id;
- hdr->reserved = 0;
- hdr->data_size = length;
-
- memcpy(mei_buf + MEI_NFC_HEADER_SIZE, buf, length);
- err = __mei_cl_send(ndev->cl, mei_buf, length + MEI_NFC_HEADER_SIZE);
- if (err < 0)
- goto out;
-
- if (!wait_event_interruptible_timeout(ndev->send_wq,
- ndev->recv_req_id == ndev->req_id, HZ)) {
- dev_err(dev->dev, "NFC MEI command timeout\n");
- err = -ETIME;
- } else {
- ndev->req_id++;
- }
-out:
- kfree(mei_buf);
- return err;
-}
-
-static int mei_nfc_recv(struct mei_cl_device *cldev, u8 *buf, size_t length)
-{
- struct mei_nfc_dev *ndev;
- struct mei_nfc_hci_hdr *hci_hdr;
- int received_length;
-
- ndev = (struct mei_nfc_dev *)cldev->priv_data;
-
- received_length = __mei_cl_recv(ndev->cl, buf, length);
- if (received_length < 0)
- return received_length;
-
- hci_hdr = (struct mei_nfc_hci_hdr *) buf;
-
- if (hci_hdr->cmd == MEI_NFC_CMD_HCI_SEND) {
- ndev->recv_req_id = hci_hdr->req_id;
- wake_up(&ndev->send_wq);
-
- return 0;
- }
-
- return received_length;
-}
-
-static struct mei_cl_ops nfc_ops = {
- .enable = mei_nfc_enable,
- .disable = mei_nfc_disable,
- .send = mei_nfc_send,
- .recv = mei_nfc_recv,
-};
-
static void mei_nfc_init(struct work_struct *work)
{
struct mei_device *dev;
struct mei_cl_device *cldev;
struct mei_nfc_dev *ndev;
struct mei_cl *cl_info;
+ struct mei_me_client *me_cl_info;
ndev = container_of(work, struct mei_nfc_dev, init_work);
@@ -425,13 +261,22 @@ static void mei_nfc_init(struct work_struct *work)
mutex_lock(&dev->device_lock);
- if (mei_cl_connect(cl_info, NULL) < 0) {
+ /* check for valid client id */
+ me_cl_info = mei_me_cl_by_uuid(dev, &mei_nfc_info_guid);
+ if (!me_cl_info) {
+ mutex_unlock(&dev->device_lock);
+ dev_info(dev->dev, "nfc: failed to find the info client\n");
+ goto err;
+ }
+
+ if (mei_cl_connect(cl_info, me_cl_info, NULL) < 0) {
+ mei_me_cl_put(me_cl_info);
mutex_unlock(&dev->device_lock);
dev_err(dev->dev, "Could not connect to the NFC INFO ME client");
goto err;
}
-
+ mei_me_cl_put(me_cl_info);
mutex_unlock(&dev->device_lock);
if (mei_nfc_if_version(ndev) < 0) {
@@ -459,7 +304,8 @@ static void mei_nfc_init(struct work_struct *work)
return;
}
- cldev = mei_cl_add_device(dev, mei_nfc_guid, ndev->bus_name, &nfc_ops);
+ cldev = mei_cl_add_device(dev, ndev->me_cl, ndev->cl,
+ ndev->bus_name);
if (!cldev) {
dev_err(dev->dev, "Could not add the NFC device to the MEI bus\n");
@@ -479,11 +325,10 @@ err:
}
-int mei_nfc_host_init(struct mei_device *dev)
+int mei_nfc_host_init(struct mei_device *dev, struct mei_me_client *me_cl)
{
struct mei_nfc_dev *ndev;
struct mei_cl *cl_info, *cl;
- struct mei_me_client *me_cl = NULL;
int ret;
@@ -500,11 +345,9 @@ int mei_nfc_host_init(struct mei_device *dev)
goto err;
}
- /* check for valid client id */
- me_cl = mei_me_cl_by_uuid(dev, &mei_nfc_info_guid);
- if (!me_cl) {
- dev_info(dev->dev, "nfc: failed to find the client\n");
- ret = -ENOTTY;
+ ndev->me_cl = mei_me_cl_get(me_cl);
+ if (!ndev->me_cl) {
+ ret = -ENODEV;
goto err;
}
@@ -514,48 +357,26 @@ int mei_nfc_host_init(struct mei_device *dev)
goto err;
}
- cl_info->me_client_id = me_cl->client_id;
- cl_info->cl_uuid = me_cl->props.protocol_name;
- mei_me_cl_put(me_cl);
- me_cl = NULL;
-
list_add_tail(&cl_info->device_link, &dev->device_list);
ndev->cl_info = cl_info;
- /* check for valid client id */
- me_cl = mei_me_cl_by_uuid(dev, &mei_nfc_guid);
- if (!me_cl) {
- dev_info(dev->dev, "nfc: failed to find the client\n");
- ret = -ENOTTY;
- goto err;
- }
-
cl = mei_cl_alloc_linked(dev, MEI_HOST_CLIENT_ID_ANY);
if (IS_ERR(cl)) {
ret = PTR_ERR(cl);
goto err;
}
- cl->me_client_id = me_cl->client_id;
- cl->cl_uuid = me_cl->props.protocol_name;
- mei_me_cl_put(me_cl);
- me_cl = NULL;
-
list_add_tail(&cl->device_link, &dev->device_list);
ndev->cl = cl;
- ndev->req_id = 1;
-
INIT_WORK(&ndev->init_work, mei_nfc_init);
- init_waitqueue_head(&ndev->send_wq);
schedule_work(&ndev->init_work);
return 0;
err:
- mei_me_cl_put(me_cl);
mei_nfc_free(ndev);
return ret;
diff --git a/drivers/misc/mei/pci-txe.c b/drivers/misc/mei/pci-txe.c
index dcfcba44b6f7..0882c0201907 100644
--- a/drivers/misc/mei/pci-txe.c
+++ b/drivers/misc/mei/pci-txe.c
@@ -338,7 +338,7 @@ static int mei_txe_pm_runtime_suspend(struct device *device)
* However if device is not wakeable we do not enter
* D-low state and we need to keep the interrupt kicking
*/
- if (!ret && pci_dev_run_wake(pdev))
+ if (!ret && pci_dev_run_wake(pdev))
mei_disable_interrupts(dev);
dev_dbg(&pdev->dev, "rpm: txe: runtime suspend ret=%d\n", ret);
diff --git a/drivers/misc/mei/wd.c b/drivers/misc/mei/wd.c
index 2725f865c3d6..2bc0f5089f82 100644
--- a/drivers/misc/mei/wd.c
+++ b/drivers/misc/mei/wd.c
@@ -50,15 +50,15 @@ static void mei_wd_set_start_timeout(struct mei_device *dev, u16 timeout)
* mei_wd_host_init - connect to the watchdog client
*
* @dev: the device structure
+ * @me_cl: me client
*
* Return: -ENOTTY if wd client cannot be found
* -EIO if write has failed
* 0 on success
*/
-int mei_wd_host_init(struct mei_device *dev)
+int mei_wd_host_init(struct mei_device *dev, struct mei_me_client *me_cl)
{
struct mei_cl *cl = &dev->wd_cl;
- struct mei_me_client *me_cl;
int ret;
mei_cl_init(cl, dev);
@@ -66,27 +66,13 @@ int mei_wd_host_init(struct mei_device *dev)
dev->wd_timeout = MEI_WD_DEFAULT_TIMEOUT;
dev->wd_state = MEI_WD_IDLE;
-
- /* check for valid client id */
- me_cl = mei_me_cl_by_uuid(dev, &mei_wd_guid);
- if (!me_cl) {
- dev_info(dev->dev, "wd: failed to find the client\n");
- return -ENOTTY;
- }
-
- cl->me_client_id = me_cl->client_id;
- cl->cl_uuid = me_cl->props.protocol_name;
- mei_me_cl_put(me_cl);
-
ret = mei_cl_link(cl, MEI_WD_HOST_CLIENT_ID);
-
if (ret < 0) {
dev_info(dev->dev, "wd: failed link client\n");
return ret;
}
- ret = mei_cl_connect(cl, NULL);
-
+ ret = mei_cl_connect(cl, me_cl, NULL);
if (ret) {
dev_err(dev->dev, "wd: failed to connect = %d\n", ret);
mei_cl_unlink(cl);
@@ -118,7 +104,7 @@ int mei_wd_send(struct mei_device *dev)
int ret;
hdr.host_addr = cl->host_client_id;
- hdr.me_addr = cl->me_client_id;
+ hdr.me_addr = mei_cl_me_id(cl);
hdr.msg_complete = 1;
hdr.reserved = 0;
hdr.internal = 0;
diff --git a/drivers/misc/mic/Kconfig b/drivers/misc/mic/Kconfig
index cc4eef040c14..e9f2f56c370d 100644
--- a/drivers/misc/mic/Kconfig
+++ b/drivers/misc/mic/Kconfig
@@ -15,11 +15,28 @@ config INTEL_MIC_BUS
OS and tools for MIC to use with this driver are available from
<http://software.intel.com/en-us/mic-developer>.
+comment "SCIF Bus Driver"
+
+config SCIF_BUS
+ tristate "SCIF Bus Driver"
+ depends on 64BIT && PCI && X86 && X86_DEV_DMA_OPS
+ help
+ This option is selected by any driver which registers a
+ device or driver on the SCIF Bus, such as CONFIG_INTEL_MIC_HOST
+ and CONFIG_INTEL_MIC_CARD.
+
+ If you are building a host/card kernel with an Intel MIC device
+ then say M (recommended) or Y, else say N. If unsure say N.
+
+ More information about the Intel MIC family as well as the Linux
+ OS and tools for MIC to use with this driver are available from
+ <http://software.intel.com/en-us/mic-developer>.
+
comment "Intel MIC Host Driver"
config INTEL_MIC_HOST
tristate "Intel MIC Host Driver"
- depends on 64BIT && PCI && X86 && INTEL_MIC_BUS
+ depends on 64BIT && PCI && X86 && INTEL_MIC_BUS && SCIF_BUS
select VHOST_RING
help
This enables Host Driver support for the Intel Many Integrated
@@ -39,7 +56,7 @@ comment "Intel MIC Card Driver"
config INTEL_MIC_CARD
tristate "Intel MIC Card Driver"
- depends on 64BIT && X86 && INTEL_MIC_BUS
+ depends on 64BIT && X86 && INTEL_MIC_BUS && SCIF_BUS
select VIRTIO
help
This enables card driver support for the Intel Many Integrated
@@ -52,3 +69,22 @@ config INTEL_MIC_CARD
For more information see
<http://software.intel.com/en-us/mic-developer>.
+
+comment "SCIF Driver"
+
+config SCIF
+ tristate "SCIF Driver"
+ depends on 64BIT && PCI && X86 && SCIF_BUS
+ help
+ This enables SCIF Driver support for the Intel Many Integrated
+ Core (MIC) family of PCIe form factor coprocessor devices that
+ run a 64 bit Linux OS. The Symmetric Communication Interface
+ (SCIF (pronounced as skiff)) is a low level communications API
+ across PCIe currently implemented for MIC.
+
+ If you are building a host kernel with an Intel MIC device then
+ say M (recommended) or Y, else say N. If unsure say N.
+
+ More information about the Intel MIC family as well as the Linux
+ OS and tools for MIC to use with this driver are available from
+ <http://software.intel.com/en-us/mic-developer>.
diff --git a/drivers/misc/mic/Makefile b/drivers/misc/mic/Makefile
index e9bf148755e2..a74042c58649 100644
--- a/drivers/misc/mic/Makefile
+++ b/drivers/misc/mic/Makefile
@@ -4,4 +4,5 @@
#
obj-$(CONFIG_INTEL_MIC_HOST) += host/
obj-$(CONFIG_INTEL_MIC_CARD) += card/
-obj-$(CONFIG_INTEL_MIC_BUS) += bus/
+obj-y += bus/
+obj-$(CONFIG_SCIF) += scif/
diff --git a/drivers/misc/mic/bus/Makefile b/drivers/misc/mic/bus/Makefile
index d85c7f2a0af4..1ed37e234c96 100644
--- a/drivers/misc/mic/bus/Makefile
+++ b/drivers/misc/mic/bus/Makefile
@@ -3,3 +3,4 @@
# Copyright(c) 2014, Intel Corporation.
#
obj-$(CONFIG_INTEL_MIC_BUS) += mic_bus.o
+obj-$(CONFIG_SCIF_BUS) += scif_bus.o
diff --git a/drivers/misc/mic/bus/scif_bus.c b/drivers/misc/mic/bus/scif_bus.c
new file mode 100644
index 000000000000..2da7ceed015d
--- /dev/null
+++ b/drivers/misc/mic/bus/scif_bus.c
@@ -0,0 +1,210 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Intel Symmetric Communications Interface Bus driver.
+ */
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/idr.h>
+#include <linux/dma-mapping.h>
+
+#include "scif_bus.h"
+
+static ssize_t device_show(struct device *d,
+ struct device_attribute *attr, char *buf)
+{
+ struct scif_hw_dev *dev = dev_to_scif(d);
+
+ return sprintf(buf, "0x%04x\n", dev->id.device);
+}
+
+static DEVICE_ATTR_RO(device);
+
+static ssize_t vendor_show(struct device *d,
+ struct device_attribute *attr, char *buf)
+{
+ struct scif_hw_dev *dev = dev_to_scif(d);
+
+ return sprintf(buf, "0x%04x\n", dev->id.vendor);
+}
+
+static DEVICE_ATTR_RO(vendor);
+
+static ssize_t modalias_show(struct device *d,
+ struct device_attribute *attr, char *buf)
+{
+ struct scif_hw_dev *dev = dev_to_scif(d);
+
+ return sprintf(buf, "scif:d%08Xv%08X\n",
+ dev->id.device, dev->id.vendor);
+}
+
+static DEVICE_ATTR_RO(modalias);
+
+static struct attribute *scif_dev_attrs[] = {
+ &dev_attr_device.attr,
+ &dev_attr_vendor.attr,
+ &dev_attr_modalias.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(scif_dev);
+
+static inline int scif_id_match(const struct scif_hw_dev *dev,
+ const struct scif_hw_dev_id *id)
+{
+ if (id->device != dev->id.device && id->device != SCIF_DEV_ANY_ID)
+ return 0;
+
+ return id->vendor == SCIF_DEV_ANY_ID || id->vendor == dev->id.vendor;
+}
+
+/*
+ * This looks through all the IDs a driver claims to support. If any of them
+ * match, we return 1 and the kernel will call scif_dev_probe().
+ */
+static int scif_dev_match(struct device *dv, struct device_driver *dr)
+{
+ unsigned int i;
+ struct scif_hw_dev *dev = dev_to_scif(dv);
+ const struct scif_hw_dev_id *ids;
+
+ ids = drv_to_scif(dr)->id_table;
+ for (i = 0; ids[i].device; i++)
+ if (scif_id_match(dev, &ids[i]))
+ return 1;
+ return 0;
+}
+
+static int scif_uevent(struct device *dv, struct kobj_uevent_env *env)
+{
+ struct scif_hw_dev *dev = dev_to_scif(dv);
+
+ return add_uevent_var(env, "MODALIAS=scif:d%08Xv%08X",
+ dev->id.device, dev->id.vendor);
+}
+
+static int scif_dev_probe(struct device *d)
+{
+ struct scif_hw_dev *dev = dev_to_scif(d);
+ struct scif_driver *drv = drv_to_scif(dev->dev.driver);
+
+ return drv->probe(dev);
+}
+
+static int scif_dev_remove(struct device *d)
+{
+ struct scif_hw_dev *dev = dev_to_scif(d);
+ struct scif_driver *drv = drv_to_scif(dev->dev.driver);
+
+ drv->remove(dev);
+ return 0;
+}
+
+static struct bus_type scif_bus = {
+ .name = "scif_bus",
+ .match = scif_dev_match,
+ .dev_groups = scif_dev_groups,
+ .uevent = scif_uevent,
+ .probe = scif_dev_probe,
+ .remove = scif_dev_remove,
+};
+
+int scif_register_driver(struct scif_driver *driver)
+{
+ driver->driver.bus = &scif_bus;
+ return driver_register(&driver->driver);
+}
+EXPORT_SYMBOL_GPL(scif_register_driver);
+
+void scif_unregister_driver(struct scif_driver *driver)
+{
+ driver_unregister(&driver->driver);
+}
+EXPORT_SYMBOL_GPL(scif_unregister_driver);
+
+static void scif_release_dev(struct device *d)
+{
+ struct scif_hw_dev *sdev = dev_to_scif(d);
+
+ kfree(sdev);
+}
+
+struct scif_hw_dev *
+scif_register_device(struct device *pdev, int id, struct dma_map_ops *dma_ops,
+ struct scif_hw_ops *hw_ops, u8 dnode, u8 snode,
+ struct mic_mw *mmio, struct mic_mw *aper, void *dp,
+ void __iomem *rdp, struct dma_chan **chan, int num_chan)
+{
+ int ret;
+ struct scif_hw_dev *sdev;
+
+ sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
+ if (!sdev)
+ return ERR_PTR(-ENOMEM);
+
+ sdev->dev.parent = pdev;
+ sdev->id.device = id;
+ sdev->id.vendor = SCIF_DEV_ANY_ID;
+ sdev->dev.archdata.dma_ops = dma_ops;
+ sdev->dev.release = scif_release_dev;
+ sdev->hw_ops = hw_ops;
+ sdev->dnode = dnode;
+ sdev->snode = snode;
+ dev_set_drvdata(&sdev->dev, sdev);
+ sdev->dev.bus = &scif_bus;
+ sdev->mmio = mmio;
+ sdev->aper = aper;
+ sdev->dp = dp;
+ sdev->rdp = rdp;
+ sdev->dev.dma_mask = &sdev->dev.coherent_dma_mask;
+ dma_set_mask(&sdev->dev, DMA_BIT_MASK(64));
+ sdev->dma_ch = chan;
+ sdev->num_dma_ch = num_chan;
+ dev_set_name(&sdev->dev, "scif-dev%u", sdev->dnode);
+ /*
+ * device_register() causes the bus infrastructure to look for a
+ * matching driver.
+ */
+ ret = device_register(&sdev->dev);
+ if (ret)
+ goto free_sdev;
+ return sdev;
+free_sdev:
+ kfree(sdev);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(scif_register_device);
+
+void scif_unregister_device(struct scif_hw_dev *sdev)
+{
+ device_unregister(&sdev->dev);
+}
+EXPORT_SYMBOL_GPL(scif_unregister_device);
+
+static int __init scif_init(void)
+{
+ return bus_register(&scif_bus);
+}
+
+static void __exit scif_exit(void)
+{
+ bus_unregister(&scif_bus);
+}
+
+core_initcall(scif_init);
+module_exit(scif_exit);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_DESCRIPTION("Intel(R) SCIF Bus driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/mic/bus/scif_bus.h b/drivers/misc/mic/bus/scif_bus.h
new file mode 100644
index 000000000000..335a228a8236
--- /dev/null
+++ b/drivers/misc/mic/bus/scif_bus.h
@@ -0,0 +1,129 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Intel Symmetric Communications Interface Bus driver.
+ */
+#ifndef _SCIF_BUS_H_
+#define _SCIF_BUS_H_
+/*
+ * Everything a scif driver needs to work with any particular scif
+ * hardware abstraction layer.
+ */
+#include <linux/dma-mapping.h>
+
+#include <linux/mic_common.h>
+#include "../common/mic_dev.h"
+
+struct scif_hw_dev_id {
+ u32 device;
+ u32 vendor;
+};
+
+#define MIC_SCIF_DEV 1
+#define SCIF_DEV_ANY_ID 0xffffffff
+
+/**
+ * scif_hw_dev - representation of a hardware device abstracted for scif
+ * @hw_ops: the hardware ops supported by this device
+ * @id: the device type identification (used to match it with a driver)
+ * @mmio: MMIO memory window
+ * @aper: Aperture memory window
+ * @dev: underlying device
+ * @dnode - The destination node which this device will communicate with.
+ * @snode - The source node for this device.
+ * @dp - Self device page
+ * @rdp - Remote device page
+ * @dma_ch - Array of DMA channels
+ * @num_dma_ch - Number of DMA channels available
+ */
+struct scif_hw_dev {
+ struct scif_hw_ops *hw_ops;
+ struct scif_hw_dev_id id;
+ struct mic_mw *mmio;
+ struct mic_mw *aper;
+ struct device dev;
+ u8 dnode;
+ u8 snode;
+ void *dp;
+ void __iomem *rdp;
+ struct dma_chan **dma_ch;
+ int num_dma_ch;
+};
+
+/**
+ * scif_driver - operations for a scif I/O driver
+ * @driver: underlying device driver (populate name and owner).
+ * @id_table: the ids serviced by this driver.
+ * @probe: the function to call when a device is found. Returns 0 or -errno.
+ * @remove: the function to call when a device is removed.
+ */
+struct scif_driver {
+ struct device_driver driver;
+ const struct scif_hw_dev_id *id_table;
+ int (*probe)(struct scif_hw_dev *dev);
+ void (*remove)(struct scif_hw_dev *dev);
+};
+
+/**
+ * scif_hw_ops - Hardware operations for accessing a SCIF device on the SCIF bus.
+ *
+ * @next_db: Obtain the next available doorbell.
+ * @request_irq: Request an interrupt on a particular doorbell.
+ * @free_irq: Free an interrupt requested previously.
+ * @ack_interrupt: acknowledge an interrupt in the ISR.
+ * @send_intr: Send an interrupt to the remote node on a specified doorbell.
+ * @send_p2p_intr: Send an interrupt to the peer node on a specified doorbell
+ * which is specifically targeted for a peer to peer node.
+ * @ioremap: Map a buffer with the specified physical address and length.
+ * @iounmap: Unmap a buffer previously mapped.
+ */
+struct scif_hw_ops {
+ int (*next_db)(struct scif_hw_dev *sdev);
+ struct mic_irq * (*request_irq)(struct scif_hw_dev *sdev,
+ irqreturn_t (*func)(int irq,
+ void *data),
+ const char *name, void *data,
+ int db);
+ void (*free_irq)(struct scif_hw_dev *sdev,
+ struct mic_irq *cookie, void *data);
+ void (*ack_interrupt)(struct scif_hw_dev *sdev, int num);
+ void (*send_intr)(struct scif_hw_dev *sdev, int db);
+ void (*send_p2p_intr)(struct scif_hw_dev *sdev, int db,
+ struct mic_mw *mw);
+ void __iomem * (*ioremap)(struct scif_hw_dev *sdev,
+ phys_addr_t pa, size_t len);
+ void (*iounmap)(struct scif_hw_dev *sdev, void __iomem *va);
+};
+
+int scif_register_driver(struct scif_driver *driver);
+void scif_unregister_driver(struct scif_driver *driver);
+struct scif_hw_dev *
+scif_register_device(struct device *pdev, int id,
+ struct dma_map_ops *dma_ops,
+ struct scif_hw_ops *hw_ops, u8 dnode, u8 snode,
+ struct mic_mw *mmio, struct mic_mw *aper,
+ void *dp, void __iomem *rdp,
+ struct dma_chan **chan, int num_chan);
+void scif_unregister_device(struct scif_hw_dev *sdev);
+
+static inline struct scif_hw_dev *dev_to_scif(struct device *dev)
+{
+ return container_of(dev, struct scif_hw_dev, dev);
+}
+
+static inline struct scif_driver *drv_to_scif(struct device_driver *drv)
+{
+ return container_of(drv, struct scif_driver, driver);
+}
+#endif /* _SCIF_BUS_H */
diff --git a/drivers/misc/mic/card/mic_device.c b/drivers/misc/mic/card/mic_device.c
index 83819eee553b..6338908b2252 100644
--- a/drivers/misc/mic/card/mic_device.c
+++ b/drivers/misc/mic/card/mic_device.c
@@ -28,6 +28,8 @@
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/reboot.h>
+#include <linux/dmaengine.h>
+#include <linux/kmod.h>
#include <linux/mic_common.h>
#include "../common/mic_dev.h"
@@ -240,6 +242,111 @@ static void mic_uninit_irq(void)
kfree(mdrv->irq_info.irq_usage_count);
}
+static inline struct mic_driver *scdev_to_mdrv(struct scif_hw_dev *scdev)
+{
+ return dev_get_drvdata(scdev->dev.parent);
+}
+
+static struct mic_irq *
+___mic_request_irq(struct scif_hw_dev *scdev,
+ irqreturn_t (*func)(int irq, void *data),
+ const char *name, void *data,
+ int db)
+{
+ return mic_request_card_irq(func, NULL, name, data, db);
+}
+
+static void
+___mic_free_irq(struct scif_hw_dev *scdev,
+ struct mic_irq *cookie, void *data)
+{
+ return mic_free_card_irq(cookie, data);
+}
+
+static void ___mic_ack_interrupt(struct scif_hw_dev *scdev, int num)
+{
+ struct mic_driver *mdrv = scdev_to_mdrv(scdev);
+
+ mic_ack_interrupt(&mdrv->mdev);
+}
+
+static int ___mic_next_db(struct scif_hw_dev *scdev)
+{
+ return mic_next_card_db();
+}
+
+static void ___mic_send_intr(struct scif_hw_dev *scdev, int db)
+{
+ struct mic_driver *mdrv = scdev_to_mdrv(scdev);
+
+ mic_send_intr(&mdrv->mdev, db);
+}
+
+static void ___mic_send_p2p_intr(struct scif_hw_dev *scdev, int db,
+ struct mic_mw *mw)
+{
+ mic_send_p2p_intr(db, mw);
+}
+
+static void __iomem *
+___mic_ioremap(struct scif_hw_dev *scdev,
+ phys_addr_t pa, size_t len)
+{
+ struct mic_driver *mdrv = scdev_to_mdrv(scdev);
+
+ return mic_card_map(&mdrv->mdev, pa, len);
+}
+
+static void ___mic_iounmap(struct scif_hw_dev *scdev, void __iomem *va)
+{
+ struct mic_driver *mdrv = scdev_to_mdrv(scdev);
+
+ mic_card_unmap(&mdrv->mdev, va);
+}
+
+static struct scif_hw_ops scif_hw_ops = {
+ .request_irq = ___mic_request_irq,
+ .free_irq = ___mic_free_irq,
+ .ack_interrupt = ___mic_ack_interrupt,
+ .next_db = ___mic_next_db,
+ .send_intr = ___mic_send_intr,
+ .send_p2p_intr = ___mic_send_p2p_intr,
+ .ioremap = ___mic_ioremap,
+ .iounmap = ___mic_iounmap,
+};
+
+static int mic_request_dma_chans(struct mic_driver *mdrv)
+{
+ dma_cap_mask_t mask;
+ struct dma_chan *chan;
+
+ request_module("mic_x100_dma");
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_MEMCPY, mask);
+
+ do {
+ chan = dma_request_channel(mask, NULL, NULL);
+ if (chan) {
+ mdrv->dma_ch[mdrv->num_dma_ch++] = chan;
+ if (mdrv->num_dma_ch >= MIC_MAX_DMA_CHAN)
+ break;
+ }
+ } while (chan);
+ dev_info(mdrv->dev, "DMA channels # %d\n", mdrv->num_dma_ch);
+ return mdrv->num_dma_ch;
+}
+
+static void mic_free_dma_chans(struct mic_driver *mdrv)
+{
+ int i = 0;
+
+ for (i = 0; i < mdrv->num_dma_ch; i++) {
+ dma_release_channel(mdrv->dma_ch[i]);
+ mdrv->dma_ch[i] = NULL;
+ }
+ mdrv->num_dma_ch = 0;
+}
+
/*
* mic_driver_init - MIC driver initialization tasks.
*
@@ -248,6 +355,8 @@ static void mic_uninit_irq(void)
int __init mic_driver_init(struct mic_driver *mdrv)
{
int rc;
+ struct mic_bootparam __iomem *bootparam;
+ u8 node_id;
g_drv = mdrv;
/*
@@ -268,13 +377,32 @@ int __init mic_driver_init(struct mic_driver *mdrv)
rc = mic_shutdown_init();
if (rc)
goto irq_uninit;
+ if (!mic_request_dma_chans(mdrv)) {
+ rc = -ENODEV;
+ goto shutdown_uninit;
+ }
rc = mic_devices_init(mdrv);
if (rc)
- goto shutdown_uninit;
+ goto dma_free;
+ bootparam = mdrv->dp;
+ node_id = ioread8(&bootparam->node_id);
+ mdrv->scdev = scif_register_device(mdrv->dev, MIC_SCIF_DEV,
+ NULL, &scif_hw_ops,
+ 0, node_id, &mdrv->mdev.mmio, NULL,
+ NULL, mdrv->dp, mdrv->dma_ch,
+ mdrv->num_dma_ch);
+ if (IS_ERR(mdrv->scdev)) {
+ rc = PTR_ERR(mdrv->scdev);
+ goto device_uninit;
+ }
mic_create_card_debug_dir(mdrv);
atomic_notifier_chain_register(&panic_notifier_list, &mic_panic);
done:
return rc;
+device_uninit:
+ mic_devices_uninit(mdrv);
+dma_free:
+ mic_free_dma_chans(mdrv);
shutdown_uninit:
mic_shutdown_uninit();
irq_uninit:
@@ -294,7 +422,9 @@ put:
void mic_driver_uninit(struct mic_driver *mdrv)
{
mic_delete_card_debug_dir(mdrv);
+ scif_unregister_device(mdrv->scdev);
mic_devices_uninit(mdrv);
+ mic_free_dma_chans(mdrv);
/*
* Inform the host about the shutdown status i.e. poweroff/restart etc.
* The module cannot be unloaded so the only code path to call
diff --git a/drivers/misc/mic/card/mic_device.h b/drivers/misc/mic/card/mic_device.h
index 844be8fc9b22..1dbf83c41289 100644
--- a/drivers/misc/mic/card/mic_device.h
+++ b/drivers/misc/mic/card/mic_device.h
@@ -29,9 +29,9 @@
#include <linux/workqueue.h>
#include <linux/io.h>
-#include <linux/irqreturn.h>
#include <linux/interrupt.h>
#include <linux/mic_bus.h>
+#include "../bus/scif_bus.h"
/**
* struct mic_intr_info - Contains h/w specific interrupt sources info
@@ -73,6 +73,9 @@ struct mic_device {
* @irq_info: The OS specific irq information
* @intr_info: H/W specific interrupt information.
* @dma_mbdev: dma device on the MIC virtual bus.
+ * @dma_ch - Array of DMA channels
+ * @num_dma_ch - Number of DMA channels available
+ * @scdev: SCIF device on the SCIF virtual bus.
*/
struct mic_driver {
char name[20];
@@ -84,6 +87,9 @@ struct mic_driver {
struct mic_irq_info irq_info;
struct mic_intr_info intr_info;
struct mbus_device *dma_mbdev;
+ struct dma_chan *dma_ch[MIC_MAX_DMA_CHAN];
+ int num_dma_ch;
+ struct scif_hw_dev *scdev;
};
/**
@@ -122,10 +128,11 @@ void mic_driver_uninit(struct mic_driver *mdrv);
int mic_next_card_db(void);
struct mic_irq *
mic_request_card_irq(irq_handler_t handler, irq_handler_t thread_fn,
- const char *name, void *data, int intr_src);
+ const char *name, void *data, int db);
void mic_free_card_irq(struct mic_irq *cookie, void *data);
u32 mic_read_spad(struct mic_device *mdev, unsigned int idx);
void mic_send_intr(struct mic_device *mdev, int doorbell);
+void mic_send_p2p_intr(int doorbell, struct mic_mw *mw);
int mic_db_to_irq(struct mic_driver *mdrv, int db);
u32 mic_ack_interrupt(struct mic_device *mdev);
void mic_hw_intr_init(struct mic_driver *mdrv);
diff --git a/drivers/misc/mic/card/mic_x100.c b/drivers/misc/mic/card/mic_x100.c
index e98e537d68e3..77fd41781c2e 100644
--- a/drivers/misc/mic/card/mic_x100.c
+++ b/drivers/misc/mic/card/mic_x100.c
@@ -70,6 +70,41 @@ void mic_send_intr(struct mic_device *mdev, int doorbell)
(MIC_X100_SBOX_SDBIC0 + (4 * doorbell)));
}
+/*
+ * mic_x100_send_sbox_intr - Send an MIC_X100_SBOX interrupt to MIC.
+ */
+static void mic_x100_send_sbox_intr(struct mic_mw *mw, int doorbell)
+{
+ u64 apic_icr_offset = MIC_X100_SBOX_APICICR0 + doorbell * 8;
+ u32 apicicr_low = mic_mmio_read(mw, MIC_X100_SBOX_BASE_ADDRESS +
+ apic_icr_offset);
+
+ /* for MIC we need to make sure we "hit" the send_icr bit (13) */
+ apicicr_low = (apicicr_low | (1 << 13));
+ /*
+ * Ensure that the interrupt is ordered w.r.t. previous stores
+ * to main memory. Fence instructions are not implemented in X100
+ * since execution is in order but a compiler barrier is still
+ * required.
+ */
+ wmb();
+ mic_mmio_write(mw, apicicr_low,
+ MIC_X100_SBOX_BASE_ADDRESS + apic_icr_offset);
+}
+
+static void mic_x100_send_rdmasr_intr(struct mic_mw *mw, int doorbell)
+{
+ int rdmasr_offset = MIC_X100_SBOX_RDMASR0 + (doorbell << 2);
+ /*
+ * Ensure that the interrupt is ordered w.r.t. previous stores
+ * to main memory. Fence instructions are not implemented in X100
+ * since execution is in order but a compiler barrier is still
+ * required.
+ */
+ wmb();
+ mic_mmio_write(mw, 0, MIC_X100_SBOX_BASE_ADDRESS + rdmasr_offset);
+}
+
/**
* mic_ack_interrupt - Device specific interrupt handling.
* @mdev: pointer to mic_device instance
@@ -91,6 +126,18 @@ static inline int mic_get_rdmasr_irq(int index)
return MIC_X100_RDMASR_IRQ_BASE + index;
}
+void mic_send_p2p_intr(int db, struct mic_mw *mw)
+{
+ int rdmasr_index;
+
+ if (db < MIC_X100_NUM_SBOX_IRQ) {
+ mic_x100_send_sbox_intr(mw, db);
+ } else {
+ rdmasr_index = db - MIC_X100_NUM_SBOX_IRQ;
+ mic_x100_send_rdmasr_intr(mw, rdmasr_index);
+ }
+}
+
/**
* mic_hw_intr_init - Initialize h/w specific interrupt
* information.
@@ -113,11 +160,15 @@ void mic_hw_intr_init(struct mic_driver *mdrv)
int mic_db_to_irq(struct mic_driver *mdrv, int db)
{
int rdmasr_index;
+
+ /*
+ * The total number of doorbell interrupts on the card are 16. Indices
+ * 0-8 falls in the SBOX category and 8-15 fall in the RDMASR category.
+ */
if (db < MIC_X100_NUM_SBOX_IRQ) {
return mic_get_sbox_irq(db);
} else {
- rdmasr_index = db - MIC_X100_NUM_SBOX_IRQ +
- MIC_X100_RDMASR_IRQ_BASE;
+ rdmasr_index = db - MIC_X100_NUM_SBOX_IRQ;
return mic_get_rdmasr_irq(rdmasr_index);
}
}
@@ -243,10 +294,16 @@ static void mic_platform_shutdown(struct platform_device *pdev)
mic_remove(pdev);
}
+static u64 mic_dma_mask = DMA_BIT_MASK(64);
+
static struct platform_device mic_platform_dev = {
.name = mic_driver_name,
.id = 0,
.num_resources = 0,
+ .dev = {
+ .dma_mask = &mic_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(64),
+ },
};
static struct platform_driver __refdata mic_platform_driver = {
diff --git a/drivers/misc/mic/card/mic_x100.h b/drivers/misc/mic/card/mic_x100.h
index d66ea55639c3..7e2224934ba8 100644
--- a/drivers/misc/mic/card/mic_x100.h
+++ b/drivers/misc/mic/card/mic_x100.h
@@ -35,6 +35,7 @@
#define MIC_X100_SBOX_SDBIC0 0x0000CC90
#define MIC_X100_SBOX_SDBIC0_DBREQ_BIT 0x80000000
#define MIC_X100_SBOX_RDMASR0 0x0000B180
+#define MIC_X100_SBOX_APICICR0 0x0000A9D0
#define MIC_X100_MAX_DOORBELL_IDX 8
diff --git a/drivers/misc/mic/common/mic_dev.h b/drivers/misc/mic/common/mic_dev.h
index 92999c2bbf82..0b58c46045dc 100644
--- a/drivers/misc/mic/common/mic_dev.h
+++ b/drivers/misc/mic/common/mic_dev.h
@@ -48,4 +48,7 @@ struct mic_mw {
#define MIC_VIRTIO_PARAM_DEV_REMOVE 0x1
#define MIC_VIRTIO_PARAM_CONFIG_CHANGED 0x2
+/* Maximum number of DMA channels */
+#define MIC_MAX_DMA_CHAN 4
+
#endif
diff --git a/drivers/misc/mic/host/mic_boot.c b/drivers/misc/mic/host/mic_boot.c
index d9fa609da061..e5f6a5e7bca1 100644
--- a/drivers/misc/mic/host/mic_boot.c
+++ b/drivers/misc/mic/host/mic_boot.c
@@ -21,6 +21,7 @@
#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/pci.h>
+#include <linux/kmod.h>
#include <linux/mic_common.h>
#include <linux/mic_bus.h>
@@ -29,6 +30,188 @@
#include "mic_smpt.h"
#include "mic_virtio.h"
+static inline struct mic_device *scdev_to_mdev(struct scif_hw_dev *scdev)
+{
+ return dev_get_drvdata(scdev->dev.parent);
+}
+
+static void *__mic_dma_alloc(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp,
+ struct dma_attrs *attrs)
+{
+ struct scif_hw_dev *scdev = dev_get_drvdata(dev);
+ struct mic_device *mdev = scdev_to_mdev(scdev);
+ dma_addr_t tmp;
+ void *va = kmalloc(size, gfp);
+
+ if (va) {
+ tmp = mic_map_single(mdev, va, size);
+ if (dma_mapping_error(dev, tmp)) {
+ kfree(va);
+ va = NULL;
+ } else {
+ *dma_handle = tmp;
+ }
+ }
+ return va;
+}
+
+static void __mic_dma_free(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle, struct dma_attrs *attrs)
+{
+ struct scif_hw_dev *scdev = dev_get_drvdata(dev);
+ struct mic_device *mdev = scdev_to_mdev(scdev);
+
+ mic_unmap_single(mdev, dma_handle, size);
+ kfree(vaddr);
+}
+
+static dma_addr_t
+__mic_dma_map_page(struct device *dev, struct page *page, unsigned long offset,
+ size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ void *va = phys_to_virt(page_to_phys(page)) + offset;
+ struct scif_hw_dev *scdev = dev_get_drvdata(dev);
+ struct mic_device *mdev = scdev_to_mdev(scdev);
+
+ return mic_map_single(mdev, va, size);
+}
+
+static void
+__mic_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
+ size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ struct scif_hw_dev *scdev = dev_get_drvdata(dev);
+ struct mic_device *mdev = scdev_to_mdev(scdev);
+
+ mic_unmap_single(mdev, dma_addr, size);
+}
+
+static int __mic_dma_map_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ struct scif_hw_dev *scdev = dev_get_drvdata(dev);
+ struct mic_device *mdev = scdev_to_mdev(scdev);
+ struct scatterlist *s;
+ int i, j, ret;
+ dma_addr_t da;
+
+ ret = dma_map_sg(mdev->sdev->parent, sg, nents, dir);
+ if (ret <= 0)
+ return 0;
+
+ for_each_sg(sg, s, nents, i) {
+ da = mic_map(mdev, sg_dma_address(s) + s->offset, s->length);
+ if (!da)
+ goto err;
+ sg_dma_address(s) = da;
+ }
+ return nents;
+err:
+ for_each_sg(sg, s, i, j) {
+ mic_unmap(mdev, sg_dma_address(s), s->length);
+ sg_dma_address(s) = mic_to_dma_addr(mdev, sg_dma_address(s));
+ }
+ dma_unmap_sg(mdev->sdev->parent, sg, nents, dir);
+ return 0;
+}
+
+static void __mic_dma_unmap_sg(struct device *dev,
+ struct scatterlist *sg, int nents,
+ enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ struct scif_hw_dev *scdev = dev_get_drvdata(dev);
+ struct mic_device *mdev = scdev_to_mdev(scdev);
+ struct scatterlist *s;
+ dma_addr_t da;
+ int i;
+
+ for_each_sg(sg, s, nents, i) {
+ da = mic_to_dma_addr(mdev, sg_dma_address(s));
+ mic_unmap(mdev, sg_dma_address(s), s->length);
+ sg_dma_address(s) = da;
+ }
+ dma_unmap_sg(mdev->sdev->parent, sg, nents, dir);
+}
+
+static struct dma_map_ops __mic_dma_ops = {
+ .alloc = __mic_dma_alloc,
+ .free = __mic_dma_free,
+ .map_page = __mic_dma_map_page,
+ .unmap_page = __mic_dma_unmap_page,
+ .map_sg = __mic_dma_map_sg,
+ .unmap_sg = __mic_dma_unmap_sg,
+};
+
+static struct mic_irq *
+___mic_request_irq(struct scif_hw_dev *scdev,
+ irqreturn_t (*func)(int irq, void *data),
+ const char *name,
+ void *data, int db)
+{
+ struct mic_device *mdev = scdev_to_mdev(scdev);
+
+ return mic_request_threaded_irq(mdev, func, NULL, name, data,
+ db, MIC_INTR_DB);
+}
+
+static void
+___mic_free_irq(struct scif_hw_dev *scdev,
+ struct mic_irq *cookie, void *data)
+{
+ struct mic_device *mdev = scdev_to_mdev(scdev);
+
+ return mic_free_irq(mdev, cookie, data);
+}
+
+static void ___mic_ack_interrupt(struct scif_hw_dev *scdev, int num)
+{
+ struct mic_device *mdev = scdev_to_mdev(scdev);
+
+ mdev->ops->intr_workarounds(mdev);
+}
+
+static int ___mic_next_db(struct scif_hw_dev *scdev)
+{
+ struct mic_device *mdev = scdev_to_mdev(scdev);
+
+ return mic_next_db(mdev);
+}
+
+static void ___mic_send_intr(struct scif_hw_dev *scdev, int db)
+{
+ struct mic_device *mdev = scdev_to_mdev(scdev);
+
+ mdev->ops->send_intr(mdev, db);
+}
+
+static void __iomem *___mic_ioremap(struct scif_hw_dev *scdev,
+ phys_addr_t pa, size_t len)
+{
+ struct mic_device *mdev = scdev_to_mdev(scdev);
+
+ return mdev->aper.va + pa;
+}
+
+static void ___mic_iounmap(struct scif_hw_dev *scdev, void __iomem *va)
+{
+ /* nothing to do */
+}
+
+static struct scif_hw_ops scif_hw_ops = {
+ .request_irq = ___mic_request_irq,
+ .free_irq = ___mic_free_irq,
+ .ack_interrupt = ___mic_ack_interrupt,
+ .next_db = ___mic_next_db,
+ .send_intr = ___mic_send_intr,
+ .ioremap = ___mic_ioremap,
+ .iounmap = ___mic_iounmap,
+};
+
static inline struct mic_device *mbdev_to_mdev(struct mbus_device *mbdev)
{
return dev_get_drvdata(mbdev->dev.parent);
@@ -127,6 +310,58 @@ void mic_bootparam_init(struct mic_device *mdev)
bootparam->h2c_config_db = -1;
bootparam->shutdown_status = 0;
bootparam->shutdown_card = 0;
+ /* Total nodes = number of MICs + 1 for self node */
+ bootparam->tot_nodes = atomic_read(&g_num_mics) + 1;
+ bootparam->node_id = mdev->id + 1;
+ bootparam->scif_host_dma_addr = 0x0;
+ bootparam->scif_card_dma_addr = 0x0;
+ bootparam->c2h_scif_db = -1;
+ bootparam->h2c_scif_db = -1;
+}
+
+/**
+ * mic_request_dma_chans - Request DMA channels
+ * @mdev: pointer to mic_device instance
+ *
+ * returns number of DMA channels acquired
+ */
+static int mic_request_dma_chans(struct mic_device *mdev)
+{
+ dma_cap_mask_t mask;
+ struct dma_chan *chan;
+
+ request_module("mic_x100_dma");
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_MEMCPY, mask);
+
+ do {
+ chan = dma_request_channel(mask, mdev->ops->dma_filter,
+ mdev->sdev->parent);
+ if (chan) {
+ mdev->dma_ch[mdev->num_dma_ch++] = chan;
+ if (mdev->num_dma_ch >= MIC_MAX_DMA_CHAN)
+ break;
+ }
+ } while (chan);
+ dev_info(mdev->sdev->parent, "DMA channels # %d\n", mdev->num_dma_ch);
+ return mdev->num_dma_ch;
+}
+
+/**
+ * mic_free_dma_chans - release DMA channels
+ * @mdev: pointer to mic_device instance
+ *
+ * returns none
+ */
+static void mic_free_dma_chans(struct mic_device *mdev)
+{
+ int i = 0;
+
+ for (i = 0; i < mdev->num_dma_ch; i++) {
+ dma_release_channel(mdev->dma_ch[i]);
+ mdev->dma_ch[i] = NULL;
+ }
+ mdev->num_dma_ch = 0;
}
/**
@@ -141,6 +376,7 @@ int mic_start(struct mic_device *mdev, const char *buf)
{
int rc;
mutex_lock(&mdev->mic_mutex);
+ mic_bootparam_init(mdev);
retry:
if (MIC_OFFLINE != mdev->state) {
rc = -EINVAL;
@@ -161,14 +397,22 @@ retry:
rc = PTR_ERR(mdev->dma_mbdev);
goto unlock_ret;
}
- mdev->dma_ch = mic_request_dma_chan(mdev);
- if (!mdev->dma_ch) {
- rc = -ENXIO;
+ if (!mic_request_dma_chans(mdev)) {
+ rc = -ENODEV;
goto dma_remove;
}
+ mdev->scdev = scif_register_device(mdev->sdev->parent, MIC_SCIF_DEV,
+ &__mic_dma_ops, &scif_hw_ops,
+ mdev->id + 1, 0, &mdev->mmio,
+ &mdev->aper, mdev->dp, NULL,
+ mdev->dma_ch, mdev->num_dma_ch);
+ if (IS_ERR(mdev->scdev)) {
+ rc = PTR_ERR(mdev->scdev);
+ goto dma_free;
+ }
rc = mdev->ops->load_mic_fw(mdev, buf);
if (rc)
- goto dma_release;
+ goto scif_remove;
mic_smpt_restore(mdev);
mic_intr_restore(mdev);
mdev->intr_ops->enable_interrupts(mdev);
@@ -177,8 +421,10 @@ retry:
mdev->ops->send_firmware_intr(mdev);
mic_set_state(mdev, MIC_ONLINE);
goto unlock_ret;
-dma_release:
- dma_release_channel(mdev->dma_ch);
+scif_remove:
+ scif_unregister_device(mdev->scdev);
+dma_free:
+ mic_free_dma_chans(mdev);
dma_remove:
mbus_unregister_device(mdev->dma_mbdev);
unlock_ret:
@@ -197,11 +443,9 @@ void mic_stop(struct mic_device *mdev, bool force)
{
mutex_lock(&mdev->mic_mutex);
if (MIC_OFFLINE != mdev->state || force) {
+ scif_unregister_device(mdev->scdev);
mic_virtio_reset_devices(mdev);
- if (mdev->dma_ch) {
- dma_release_channel(mdev->dma_ch);
- mdev->dma_ch = NULL;
- }
+ mic_free_dma_chans(mdev);
mbus_unregister_device(mdev->dma_mbdev);
mic_bootparam_init(mdev);
mic_reset(mdev);
diff --git a/drivers/misc/mic/host/mic_debugfs.c b/drivers/misc/mic/host/mic_debugfs.c
index 687e9aacf3bb..3c9ea4896f3c 100644
--- a/drivers/misc/mic/host/mic_debugfs.c
+++ b/drivers/misc/mic/host/mic_debugfs.c
@@ -214,6 +214,19 @@ static int mic_dp_show(struct seq_file *s, void *pos)
bootparam->shutdown_status);
seq_printf(s, "Bootparam: shutdown_card %d\n",
bootparam->shutdown_card);
+ seq_printf(s, "Bootparam: tot_nodes %d\n",
+ bootparam->tot_nodes);
+ seq_printf(s, "Bootparam: node_id %d\n",
+ bootparam->node_id);
+ seq_printf(s, "Bootparam: c2h_scif_db %d\n",
+ bootparam->c2h_scif_db);
+ seq_printf(s, "Bootparam: h2c_scif_db %d\n",
+ bootparam->h2c_scif_db);
+ seq_printf(s, "Bootparam: scif_host_dma_addr 0x%llx\n",
+ bootparam->scif_host_dma_addr);
+ seq_printf(s, "Bootparam: scif_card_dma_addr 0x%llx\n",
+ bootparam->scif_card_dma_addr);
+
for (i = sizeof(*bootparam); i < MIC_DP_SIZE;
i += mic_total_desc_size(d)) {
diff --git a/drivers/misc/mic/host/mic_device.h b/drivers/misc/mic/host/mic_device.h
index 016bd15a7bd1..01a7555aa648 100644
--- a/drivers/misc/mic/host/mic_device.h
+++ b/drivers/misc/mic/host/mic_device.h
@@ -27,7 +27,7 @@
#include <linux/irqreturn.h>
#include <linux/dmaengine.h>
#include <linux/mic_bus.h>
-
+#include "../bus/scif_bus.h"
#include "mic_intr.h"
/* The maximum number of MIC devices supported in a single host system. */
@@ -90,7 +90,9 @@ enum mic_stepping {
* @vdev_list: list of virtio devices.
* @pm_notifier: Handles PM notifications from the OS.
* @dma_mbdev: MIC BUS DMA device.
- * @dma_ch: DMA channel reserved by this driver for use by virtio devices.
+ * @dma_ch - Array of DMA channels
+ * @num_dma_ch - Number of DMA channels available
+ * @scdev: SCIF device on the SCIF virtual bus.
*/
struct mic_device {
struct mic_mw mmio;
@@ -129,7 +131,9 @@ struct mic_device {
struct list_head vdev_list;
struct notifier_block pm_notifier;
struct mbus_device *dma_mbdev;
- struct dma_chan *dma_ch;
+ struct dma_chan *dma_ch[MIC_MAX_DMA_CHAN];
+ int num_dma_ch;
+ struct scif_hw_dev *scdev;
};
/**
@@ -228,4 +232,5 @@ void mic_exit_debugfs(void);
void mic_prepare_suspend(struct mic_device *mdev);
void mic_complete_resume(struct mic_device *mdev);
void mic_suspend(struct mic_device *mdev);
+extern atomic_t g_num_mics;
#endif
diff --git a/drivers/misc/mic/host/mic_intr.h b/drivers/misc/mic/host/mic_intr.h
index 9f783d4ad7f1..cce28824db8a 100644
--- a/drivers/misc/mic/host/mic_intr.h
+++ b/drivers/misc/mic/host/mic_intr.h
@@ -28,8 +28,9 @@
* 3 for virtio network, console and block devices.
* 1 for card shutdown notifications.
* 4 for host owned DMA channels.
+ * 1 for SCIF
*/
-#define MIC_MIN_MSIX 8
+#define MIC_MIN_MSIX 9
#define MIC_NUM_OFFSETS 32
/**
diff --git a/drivers/misc/mic/host/mic_main.c b/drivers/misc/mic/host/mic_main.c
index ab37a3117d23..456462932151 100644
--- a/drivers/misc/mic/host/mic_main.c
+++ b/drivers/misc/mic/host/mic_main.c
@@ -67,6 +67,8 @@ static struct ida g_mic_ida;
static struct class *g_mic_class;
/* Base device node number for MIC devices */
static dev_t g_mic_devno;
+/* Track the total number of MIC devices */
+atomic_t g_num_mics;
static const struct file_operations mic_fops = {
.open = mic_open,
@@ -408,6 +410,7 @@ static int mic_probe(struct pci_dev *pdev,
dev_err(&pdev->dev, "cdev_add err id %d rc %d\n", mdev->id, rc);
goto cleanup_debug_dir;
}
+ atomic_inc(&g_num_mics);
return 0;
cleanup_debug_dir:
mic_delete_debug_dir(mdev);
@@ -459,6 +462,7 @@ static void mic_remove(struct pci_dev *pdev)
return;
mic_stop(mdev, false);
+ atomic_dec(&g_num_mics);
cdev_del(&mdev->cdev);
mic_delete_debug_dir(mdev);
mutex_lock(&mdev->mic_mutex);
@@ -478,6 +482,7 @@ static void mic_remove(struct pci_dev *pdev)
ida_simple_remove(&g_mic_ida, mdev->id);
kfree(mdev);
}
+
static struct pci_driver mic_driver = {
.name = mic_driver_name,
.id_table = mic_pci_tbl,
@@ -512,6 +517,7 @@ static int __init mic_init(void)
}
return ret;
cleanup_debugfs:
+ ida_destroy(&g_mic_ida);
mic_exit_debugfs();
class_destroy(g_mic_class);
cleanup_chrdev:
diff --git a/drivers/misc/mic/host/mic_smpt.c b/drivers/misc/mic/host/mic_smpt.c
index fae474c4899e..cec82034875f 100644
--- a/drivers/misc/mic/host/mic_smpt.c
+++ b/drivers/misc/mic/host/mic_smpt.c
@@ -174,8 +174,7 @@ static int mic_get_smpt_ref_count(struct mic_device *mdev, dma_addr_t dma_addr,
*
* returns a DMA address.
*/
-static dma_addr_t
-mic_to_dma_addr(struct mic_device *mdev, dma_addr_t mic_addr)
+dma_addr_t mic_to_dma_addr(struct mic_device *mdev, dma_addr_t mic_addr)
{
struct mic_smpt_info *smpt_info = mdev->smpt;
int spt;
@@ -214,7 +213,7 @@ dma_addr_t mic_map(struct mic_device *mdev, dma_addr_t dma_addr, size_t size)
if (!size || size > mic_max_system_memory(mdev))
return mic_addr;
- ref = kmalloc(mdev->smpt->info.num_reg * sizeof(s64), GFP_KERNEL);
+ ref = kmalloc_array(mdev->smpt->info.num_reg, sizeof(s64), GFP_ATOMIC);
if (!ref)
return mic_addr;
@@ -271,7 +270,7 @@ void mic_unmap(struct mic_device *mdev, dma_addr_t mic_addr, size_t size)
}
spt = mic_sys_addr_to_smpt(mdev, mic_addr);
- ref = kmalloc(mdev->smpt->info.num_reg * sizeof(s64), GFP_KERNEL);
+ ref = kmalloc_array(mdev->smpt->info.num_reg, sizeof(s64), GFP_ATOMIC);
if (!ref)
return;
diff --git a/drivers/misc/mic/host/mic_smpt.h b/drivers/misc/mic/host/mic_smpt.h
index 51970abfe7df..68721c6e7455 100644
--- a/drivers/misc/mic/host/mic_smpt.h
+++ b/drivers/misc/mic/host/mic_smpt.h
@@ -78,6 +78,7 @@ void mic_unmap_single(struct mic_device *mdev,
dma_addr_t mic_map(struct mic_device *mdev,
dma_addr_t dma_addr, size_t size);
void mic_unmap(struct mic_device *mdev, dma_addr_t mic_addr, size_t size);
+dma_addr_t mic_to_dma_addr(struct mic_device *mdev, dma_addr_t mic_addr);
/**
* mic_map_error - Check a MIC address for errors.
diff --git a/drivers/misc/mic/host/mic_virtio.c b/drivers/misc/mic/host/mic_virtio.c
index a020e4eb435a..cc08e9f733c9 100644
--- a/drivers/misc/mic/host/mic_virtio.c
+++ b/drivers/misc/mic/host/mic_virtio.c
@@ -40,7 +40,7 @@ static int mic_sync_dma(struct mic_device *mdev, dma_addr_t dst,
{
int err = 0;
struct dma_async_tx_descriptor *tx;
- struct dma_chan *mic_ch = mdev->dma_ch;
+ struct dma_chan *mic_ch = mdev->dma_ch[0];
if (!mic_ch) {
err = -EBUSY;
@@ -80,7 +80,7 @@ static int mic_virtio_copy_to_user(struct mic_vdev *mvdev, void __user *ubuf,
struct mic_device *mdev = mvdev->mdev;
void __iomem *dbuf = mdev->aper.va + daddr;
struct mic_vringh *mvr = &mvdev->mvr[vr_idx];
- size_t dma_alignment = 1 << mdev->dma_ch->device->copy_align;
+ size_t dma_alignment = 1 << mdev->dma_ch[0]->device->copy_align;
size_t dma_offset;
size_t partlen;
int err;
@@ -129,7 +129,7 @@ static int mic_virtio_copy_from_user(struct mic_vdev *mvdev, void __user *ubuf,
struct mic_device *mdev = mvdev->mdev;
void __iomem *dbuf = mdev->aper.va + daddr;
struct mic_vringh *mvr = &mvdev->mvr[vr_idx];
- size_t dma_alignment = 1 << mdev->dma_ch->device->copy_align;
+ size_t dma_alignment = 1 << mdev->dma_ch[0]->device->copy_align;
size_t partlen;
int err;
diff --git a/drivers/misc/mic/host/mic_x100.c b/drivers/misc/mic/host/mic_x100.c
index b7a21e11dcdf..3341e90dede4 100644
--- a/drivers/misc/mic/host/mic_x100.c
+++ b/drivers/misc/mic/host/mic_x100.c
@@ -167,8 +167,7 @@ static void mic_x100_send_intr(struct mic_device *mdev, int doorbell)
if (doorbell < MIC_X100_NUM_SBOX_IRQ) {
mic_x100_send_sbox_intr(mdev, doorbell);
} else {
- rdmasr_db = doorbell - MIC_X100_NUM_SBOX_IRQ +
- MIC_X100_RDMASR_IRQ_BASE;
+ rdmasr_db = doorbell - MIC_X100_NUM_SBOX_IRQ;
mic_x100_send_rdmasr_intr(mdev, rdmasr_db);
}
}
diff --git a/drivers/misc/mic/scif/Makefile b/drivers/misc/mic/scif/Makefile
new file mode 100644
index 000000000000..bf10bb7e2b91
--- /dev/null
+++ b/drivers/misc/mic/scif/Makefile
@@ -0,0 +1,15 @@
+#
+# Makefile - SCIF driver.
+# Copyright(c) 2014, Intel Corporation.
+#
+obj-$(CONFIG_SCIF) += scif.o
+scif-objs := scif_main.o
+scif-objs += scif_peer_bus.o
+scif-objs += scif_ports.o
+scif-objs += scif_debugfs.o
+scif-objs += scif_fd.o
+scif-objs += scif_api.o
+scif-objs += scif_epd.o
+scif-objs += scif_rb.o
+scif-objs += scif_nodeqp.o
+scif-objs += scif_nm.o
diff --git a/drivers/misc/mic/scif/scif_api.c b/drivers/misc/mic/scif/scif_api.c
new file mode 100644
index 000000000000..f39d3135a9ef
--- /dev/null
+++ b/drivers/misc/mic/scif/scif_api.c
@@ -0,0 +1,1276 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Intel SCIF driver.
+ *
+ */
+#include <linux/scif.h>
+#include "scif_main.h"
+#include "scif_map.h"
+
+static const char * const scif_ep_states[] = {
+ "Unbound",
+ "Bound",
+ "Listening",
+ "Connected",
+ "Connecting",
+ "Mapping",
+ "Closing",
+ "Close Listening",
+ "Disconnected",
+ "Zombie"};
+
+enum conn_async_state {
+ ASYNC_CONN_IDLE = 1, /* ep setup for async connect */
+ ASYNC_CONN_INPROGRESS, /* async connect in progress */
+ ASYNC_CONN_FLUSH_WORK /* async work flush in progress */
+};
+
+scif_epd_t scif_open(void)
+{
+ struct scif_endpt *ep;
+
+ might_sleep();
+ ep = kzalloc(sizeof(*ep), GFP_KERNEL);
+ if (!ep)
+ goto err_ep_alloc;
+
+ ep->qp_info.qp = kzalloc(sizeof(*ep->qp_info.qp), GFP_KERNEL);
+ if (!ep->qp_info.qp)
+ goto err_qp_alloc;
+
+ spin_lock_init(&ep->lock);
+ mutex_init(&ep->sendlock);
+ mutex_init(&ep->recvlock);
+
+ ep->state = SCIFEP_UNBOUND;
+ dev_dbg(scif_info.mdev.this_device,
+ "SCIFAPI open: ep %p success\n", ep);
+ return ep;
+
+err_qp_alloc:
+ kfree(ep);
+err_ep_alloc:
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(scif_open);
+
+/*
+ * scif_disconnect_ep - Disconnects the endpoint if found
+ * @epd: The end point returned from scif_open()
+ */
+static struct scif_endpt *scif_disconnect_ep(struct scif_endpt *ep)
+{
+ struct scifmsg msg;
+ struct scif_endpt *fep = NULL;
+ struct scif_endpt *tmpep;
+ struct list_head *pos, *tmpq;
+ int err;
+
+ /*
+ * Wake up any threads blocked in send()/recv() before closing
+ * out the connection. Grabbing and releasing the send/recv lock
+ * will ensure that any blocked senders/receivers have exited for
+ * Ring 0 endpoints. It is a Ring 0 bug to call send/recv after
+ * close. Ring 3 endpoints are not affected since close will not
+ * be called while there are IOCTLs executing.
+ */
+ wake_up_interruptible(&ep->sendwq);
+ wake_up_interruptible(&ep->recvwq);
+ mutex_lock(&ep->sendlock);
+ mutex_unlock(&ep->sendlock);
+ mutex_lock(&ep->recvlock);
+ mutex_unlock(&ep->recvlock);
+
+ /* Remove from the connected list */
+ mutex_lock(&scif_info.connlock);
+ list_for_each_safe(pos, tmpq, &scif_info.connected) {
+ tmpep = list_entry(pos, struct scif_endpt, list);
+ if (tmpep == ep) {
+ list_del(pos);
+ fep = tmpep;
+ spin_lock(&ep->lock);
+ break;
+ }
+ }
+
+ if (!fep) {
+ /*
+ * The other side has completed the disconnect before
+ * the end point can be removed from the list. Therefore
+ * the ep lock is not locked, traverse the disconnected
+ * list to find the endpoint and release the conn lock.
+ */
+ list_for_each_safe(pos, tmpq, &scif_info.disconnected) {
+ tmpep = list_entry(pos, struct scif_endpt, list);
+ if (tmpep == ep) {
+ list_del(pos);
+ break;
+ }
+ }
+ mutex_unlock(&scif_info.connlock);
+ return NULL;
+ }
+
+ init_completion(&ep->discon);
+ msg.uop = SCIF_DISCNCT;
+ msg.src = ep->port;
+ msg.dst = ep->peer;
+ msg.payload[0] = (u64)ep;
+ msg.payload[1] = ep->remote_ep;
+
+ err = scif_nodeqp_send(ep->remote_dev, &msg);
+ spin_unlock(&ep->lock);
+ mutex_unlock(&scif_info.connlock);
+
+ if (!err)
+ /* Wait for the remote node to respond with SCIF_DISCNT_ACK */
+ wait_for_completion_timeout(&ep->discon,
+ SCIF_NODE_ALIVE_TIMEOUT);
+ return ep;
+}
+
+int scif_close(scif_epd_t epd)
+{
+ struct scif_endpt *ep = (struct scif_endpt *)epd;
+ struct scif_endpt *tmpep;
+ struct list_head *pos, *tmpq;
+ enum scif_epd_state oldstate;
+ bool flush_conn;
+
+ dev_dbg(scif_info.mdev.this_device, "SCIFAPI close: ep %p %s\n",
+ ep, scif_ep_states[ep->state]);
+ might_sleep();
+ spin_lock(&ep->lock);
+ flush_conn = (ep->conn_async_state == ASYNC_CONN_INPROGRESS);
+ spin_unlock(&ep->lock);
+
+ if (flush_conn)
+ flush_work(&scif_info.conn_work);
+
+ spin_lock(&ep->lock);
+ oldstate = ep->state;
+
+ ep->state = SCIFEP_CLOSING;
+
+ switch (oldstate) {
+ case SCIFEP_ZOMBIE:
+ case SCIFEP_DISCONNECTED:
+ spin_unlock(&ep->lock);
+ /* Remove from the disconnected list */
+ mutex_lock(&scif_info.connlock);
+ list_for_each_safe(pos, tmpq, &scif_info.disconnected) {
+ tmpep = list_entry(pos, struct scif_endpt, list);
+ if (tmpep == ep) {
+ list_del(pos);
+ break;
+ }
+ }
+ mutex_unlock(&scif_info.connlock);
+ break;
+ case SCIFEP_UNBOUND:
+ case SCIFEP_BOUND:
+ case SCIFEP_CONNECTING:
+ spin_unlock(&ep->lock);
+ break;
+ case SCIFEP_MAPPING:
+ case SCIFEP_CONNECTED:
+ case SCIFEP_CLOSING:
+ {
+ spin_unlock(&ep->lock);
+ scif_disconnect_ep(ep);
+ break;
+ }
+ case SCIFEP_LISTENING:
+ case SCIFEP_CLLISTEN:
+ {
+ struct scif_conreq *conreq;
+ struct scifmsg msg;
+ struct scif_endpt *aep;
+
+ spin_unlock(&ep->lock);
+ spin_lock(&scif_info.eplock);
+
+ /* remove from listen list */
+ list_for_each_safe(pos, tmpq, &scif_info.listen) {
+ tmpep = list_entry(pos, struct scif_endpt, list);
+ if (tmpep == ep)
+ list_del(pos);
+ }
+ /* Remove any dangling accepts */
+ while (ep->acceptcnt) {
+ aep = list_first_entry(&ep->li_accept,
+ struct scif_endpt, liacceptlist);
+ list_del(&aep->liacceptlist);
+ scif_put_port(aep->port.port);
+ list_for_each_safe(pos, tmpq, &scif_info.uaccept) {
+ tmpep = list_entry(pos, struct scif_endpt,
+ miacceptlist);
+ if (tmpep == aep) {
+ list_del(pos);
+ break;
+ }
+ }
+ spin_unlock(&scif_info.eplock);
+ mutex_lock(&scif_info.connlock);
+ list_for_each_safe(pos, tmpq, &scif_info.connected) {
+ tmpep = list_entry(pos,
+ struct scif_endpt, list);
+ if (tmpep == aep) {
+ list_del(pos);
+ break;
+ }
+ }
+ list_for_each_safe(pos, tmpq, &scif_info.disconnected) {
+ tmpep = list_entry(pos,
+ struct scif_endpt, list);
+ if (tmpep == aep) {
+ list_del(pos);
+ break;
+ }
+ }
+ mutex_unlock(&scif_info.connlock);
+ scif_teardown_ep(aep);
+ spin_lock(&scif_info.eplock);
+ scif_add_epd_to_zombie_list(aep, SCIF_EPLOCK_HELD);
+ ep->acceptcnt--;
+ }
+
+ spin_lock(&ep->lock);
+ spin_unlock(&scif_info.eplock);
+
+ /* Remove and reject any pending connection requests. */
+ while (ep->conreqcnt) {
+ conreq = list_first_entry(&ep->conlist,
+ struct scif_conreq, list);
+ list_del(&conreq->list);
+
+ msg.uop = SCIF_CNCT_REJ;
+ msg.dst.node = conreq->msg.src.node;
+ msg.dst.port = conreq->msg.src.port;
+ msg.payload[0] = conreq->msg.payload[0];
+ msg.payload[1] = conreq->msg.payload[1];
+ /*
+ * No Error Handling on purpose for scif_nodeqp_send().
+ * If the remote node is lost we still want free the
+ * connection requests on the self node.
+ */
+ scif_nodeqp_send(&scif_dev[conreq->msg.src.node],
+ &msg);
+ ep->conreqcnt--;
+ kfree(conreq);
+ }
+
+ spin_unlock(&ep->lock);
+ /* If a kSCIF accept is waiting wake it up */
+ wake_up_interruptible(&ep->conwq);
+ break;
+ }
+ }
+ scif_put_port(ep->port.port);
+ scif_teardown_ep(ep);
+ scif_add_epd_to_zombie_list(ep, !SCIF_EPLOCK_HELD);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(scif_close);
+
+/**
+ * scif_flush() - Wakes up any blocking accepts. The endpoint will no longer
+ * accept new connections.
+ * @epd: The end point returned from scif_open()
+ */
+int __scif_flush(scif_epd_t epd)
+{
+ struct scif_endpt *ep = (struct scif_endpt *)epd;
+
+ switch (ep->state) {
+ case SCIFEP_LISTENING:
+ {
+ ep->state = SCIFEP_CLLISTEN;
+
+ /* If an accept is waiting wake it up */
+ wake_up_interruptible(&ep->conwq);
+ break;
+ }
+ default:
+ break;
+ }
+ return 0;
+}
+
+int scif_bind(scif_epd_t epd, u16 pn)
+{
+ struct scif_endpt *ep = (struct scif_endpt *)epd;
+ int ret = 0;
+ int tmp;
+
+ dev_dbg(scif_info.mdev.this_device,
+ "SCIFAPI bind: ep %p %s requested port number %d\n",
+ ep, scif_ep_states[ep->state], pn);
+ if (pn) {
+ /*
+ * Similar to IETF RFC 1700, SCIF ports below
+ * SCIF_ADMIN_PORT_END can only be bound by system (or root)
+ * processes or by processes executed by privileged users.
+ */
+ if (pn < SCIF_ADMIN_PORT_END && !capable(CAP_SYS_ADMIN)) {
+ ret = -EACCES;
+ goto scif_bind_admin_exit;
+ }
+ }
+
+ spin_lock(&ep->lock);
+ if (ep->state == SCIFEP_BOUND) {
+ ret = -EINVAL;
+ goto scif_bind_exit;
+ } else if (ep->state != SCIFEP_UNBOUND) {
+ ret = -EISCONN;
+ goto scif_bind_exit;
+ }
+
+ if (pn) {
+ tmp = scif_rsrv_port(pn);
+ if (tmp != pn) {
+ ret = -EINVAL;
+ goto scif_bind_exit;
+ }
+ } else {
+ pn = scif_get_new_port();
+ if (!pn) {
+ ret = -ENOSPC;
+ goto scif_bind_exit;
+ }
+ }
+
+ ep->state = SCIFEP_BOUND;
+ ep->port.node = scif_info.nodeid;
+ ep->port.port = pn;
+ ep->conn_async_state = ASYNC_CONN_IDLE;
+ ret = pn;
+ dev_dbg(scif_info.mdev.this_device,
+ "SCIFAPI bind: bound to port number %d\n", pn);
+scif_bind_exit:
+ spin_unlock(&ep->lock);
+scif_bind_admin_exit:
+ return ret;
+}
+EXPORT_SYMBOL_GPL(scif_bind);
+
+int scif_listen(scif_epd_t epd, int backlog)
+{
+ struct scif_endpt *ep = (struct scif_endpt *)epd;
+
+ dev_dbg(scif_info.mdev.this_device,
+ "SCIFAPI listen: ep %p %s\n", ep, scif_ep_states[ep->state]);
+ spin_lock(&ep->lock);
+ switch (ep->state) {
+ case SCIFEP_ZOMBIE:
+ case SCIFEP_CLOSING:
+ case SCIFEP_CLLISTEN:
+ case SCIFEP_UNBOUND:
+ case SCIFEP_DISCONNECTED:
+ spin_unlock(&ep->lock);
+ return -EINVAL;
+ case SCIFEP_LISTENING:
+ case SCIFEP_CONNECTED:
+ case SCIFEP_CONNECTING:
+ case SCIFEP_MAPPING:
+ spin_unlock(&ep->lock);
+ return -EISCONN;
+ case SCIFEP_BOUND:
+ break;
+ }
+
+ ep->state = SCIFEP_LISTENING;
+ ep->backlog = backlog;
+
+ ep->conreqcnt = 0;
+ ep->acceptcnt = 0;
+ INIT_LIST_HEAD(&ep->conlist);
+ init_waitqueue_head(&ep->conwq);
+ INIT_LIST_HEAD(&ep->li_accept);
+ spin_unlock(&ep->lock);
+
+ /*
+ * Listen status is complete so delete the qp information not needed
+ * on a listen before placing on the list of listening ep's
+ */
+ scif_teardown_ep(ep);
+ ep->qp_info.qp = NULL;
+
+ spin_lock(&scif_info.eplock);
+ list_add_tail(&ep->list, &scif_info.listen);
+ spin_unlock(&scif_info.eplock);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(scif_listen);
+
+/*
+ ************************************************************************
+ * SCIF connection flow:
+ *
+ * 1) A SCIF listening endpoint can call scif_accept(..) to wait for SCIF
+ * connections via a SCIF_CNCT_REQ message
+ * 2) A SCIF endpoint can initiate a SCIF connection by calling
+ * scif_connect(..) which calls scif_setup_qp_connect(..) which
+ * allocates the local qp for the endpoint ring buffer and then sends
+ * a SCIF_CNCT_REQ to the remote node and waits for a SCIF_CNCT_GNT or
+ * a SCIF_CNCT_REJ message
+ * 3) The peer node handles a SCIF_CNCT_REQ via scif_cnctreq_resp(..) which
+ * wakes up any threads blocked in step 1 or sends a SCIF_CNCT_REJ
+ * message otherwise
+ * 4) A thread blocked waiting for incoming connections allocates its local
+ * endpoint QP and ring buffer following which it sends a SCIF_CNCT_GNT
+ * and waits for a SCIF_CNCT_GNT(N)ACK. If the allocation fails then
+ * the node sends a SCIF_CNCT_REJ message
+ * 5) Upon receipt of a SCIF_CNCT_GNT or a SCIF_CNCT_REJ message the
+ * connecting endpoint is woken up as part of handling
+ * scif_cnctgnt_resp(..) following which it maps the remote endpoints'
+ * QP, updates its outbound QP and sends a SCIF_CNCT_GNTACK message on
+ * success or a SCIF_CNCT_GNTNACK message on failure and completes
+ * the scif_connect(..) API
+ * 6) Upon receipt of a SCIF_CNCT_GNT(N)ACK the accepting endpoint blocked
+ * in step 4 is woken up and completes the scif_accept(..) API
+ * 7) The SCIF connection is now established between the two SCIF endpoints.
+ */
+static int scif_conn_func(struct scif_endpt *ep)
+{
+ int err = 0;
+ struct scifmsg msg;
+ struct device *spdev;
+
+ /* Initiate the first part of the endpoint QP setup */
+ err = scif_setup_qp_connect(ep->qp_info.qp, &ep->qp_info.qp_offset,
+ SCIF_ENDPT_QP_SIZE, ep->remote_dev);
+ if (err) {
+ dev_err(&ep->remote_dev->sdev->dev,
+ "%s err %d qp_offset 0x%llx\n",
+ __func__, err, ep->qp_info.qp_offset);
+ ep->state = SCIFEP_BOUND;
+ goto connect_error_simple;
+ }
+
+ spdev = scif_get_peer_dev(ep->remote_dev);
+ if (IS_ERR(spdev)) {
+ err = PTR_ERR(spdev);
+ goto cleanup_qp;
+ }
+ /* Format connect message and send it */
+ msg.src = ep->port;
+ msg.dst = ep->conn_port;
+ msg.uop = SCIF_CNCT_REQ;
+ msg.payload[0] = (u64)ep;
+ msg.payload[1] = ep->qp_info.qp_offset;
+ err = _scif_nodeqp_send(ep->remote_dev, &msg);
+ if (err)
+ goto connect_error_dec;
+ scif_put_peer_dev(spdev);
+ /*
+ * Wait for the remote node to respond with SCIF_CNCT_GNT or
+ * SCIF_CNCT_REJ message.
+ */
+ err = wait_event_timeout(ep->conwq, ep->state != SCIFEP_CONNECTING,
+ SCIF_NODE_ALIVE_TIMEOUT);
+ if (!err) {
+ dev_err(&ep->remote_dev->sdev->dev,
+ "%s %d timeout\n", __func__, __LINE__);
+ ep->state = SCIFEP_BOUND;
+ }
+ spdev = scif_get_peer_dev(ep->remote_dev);
+ if (IS_ERR(spdev)) {
+ err = PTR_ERR(spdev);
+ goto cleanup_qp;
+ }
+ if (ep->state == SCIFEP_MAPPING) {
+ err = scif_setup_qp_connect_response(ep->remote_dev,
+ ep->qp_info.qp,
+ ep->qp_info.gnt_pld);
+ /*
+ * If the resource to map the queue are not available then
+ * we need to tell the other side to terminate the accept
+ */
+ if (err) {
+ dev_err(&ep->remote_dev->sdev->dev,
+ "%s %d err %d\n", __func__, __LINE__, err);
+ msg.uop = SCIF_CNCT_GNTNACK;
+ msg.payload[0] = ep->remote_ep;
+ _scif_nodeqp_send(ep->remote_dev, &msg);
+ ep->state = SCIFEP_BOUND;
+ goto connect_error_dec;
+ }
+
+ msg.uop = SCIF_CNCT_GNTACK;
+ msg.payload[0] = ep->remote_ep;
+ err = _scif_nodeqp_send(ep->remote_dev, &msg);
+ if (err) {
+ ep->state = SCIFEP_BOUND;
+ goto connect_error_dec;
+ }
+ ep->state = SCIFEP_CONNECTED;
+ mutex_lock(&scif_info.connlock);
+ list_add_tail(&ep->list, &scif_info.connected);
+ mutex_unlock(&scif_info.connlock);
+ dev_dbg(&ep->remote_dev->sdev->dev,
+ "SCIFAPI connect: ep %p connected\n", ep);
+ } else if (ep->state == SCIFEP_BOUND) {
+ dev_dbg(&ep->remote_dev->sdev->dev,
+ "SCIFAPI connect: ep %p connection refused\n", ep);
+ err = -ECONNREFUSED;
+ goto connect_error_dec;
+ }
+ scif_put_peer_dev(spdev);
+ return err;
+connect_error_dec:
+ scif_put_peer_dev(spdev);
+cleanup_qp:
+ scif_cleanup_ep_qp(ep);
+connect_error_simple:
+ return err;
+}
+
+/*
+ * scif_conn_handler:
+ *
+ * Workqueue handler for servicing non-blocking SCIF connect
+ *
+ */
+void scif_conn_handler(struct work_struct *work)
+{
+ struct scif_endpt *ep;
+
+ do {
+ ep = NULL;
+ spin_lock(&scif_info.nb_connect_lock);
+ if (!list_empty(&scif_info.nb_connect_list)) {
+ ep = list_first_entry(&scif_info.nb_connect_list,
+ struct scif_endpt, conn_list);
+ list_del(&ep->conn_list);
+ }
+ spin_unlock(&scif_info.nb_connect_lock);
+ if (ep)
+ ep->conn_err = scif_conn_func(ep);
+ } while (ep);
+}
+
+int __scif_connect(scif_epd_t epd, struct scif_port_id *dst, bool non_block)
+{
+ struct scif_endpt *ep = (struct scif_endpt *)epd;
+ int err = 0;
+ struct scif_dev *remote_dev;
+ struct device *spdev;
+
+ dev_dbg(scif_info.mdev.this_device, "SCIFAPI connect: ep %p %s\n", ep,
+ scif_ep_states[ep->state]);
+
+ if (!scif_dev || dst->node > scif_info.maxid)
+ return -ENODEV;
+
+ might_sleep();
+
+ remote_dev = &scif_dev[dst->node];
+ spdev = scif_get_peer_dev(remote_dev);
+ if (IS_ERR(spdev)) {
+ err = PTR_ERR(spdev);
+ return err;
+ }
+
+ spin_lock(&ep->lock);
+ switch (ep->state) {
+ case SCIFEP_ZOMBIE:
+ case SCIFEP_CLOSING:
+ err = -EINVAL;
+ break;
+ case SCIFEP_DISCONNECTED:
+ if (ep->conn_async_state == ASYNC_CONN_INPROGRESS)
+ ep->conn_async_state = ASYNC_CONN_FLUSH_WORK;
+ else
+ err = -EINVAL;
+ break;
+ case SCIFEP_LISTENING:
+ case SCIFEP_CLLISTEN:
+ err = -EOPNOTSUPP;
+ break;
+ case SCIFEP_CONNECTING:
+ case SCIFEP_MAPPING:
+ if (ep->conn_async_state == ASYNC_CONN_INPROGRESS)
+ err = -EINPROGRESS;
+ else
+ err = -EISCONN;
+ break;
+ case SCIFEP_CONNECTED:
+ if (ep->conn_async_state == ASYNC_CONN_INPROGRESS)
+ ep->conn_async_state = ASYNC_CONN_FLUSH_WORK;
+ else
+ err = -EISCONN;
+ break;
+ case SCIFEP_UNBOUND:
+ ep->port.port = scif_get_new_port();
+ if (!ep->port.port) {
+ err = -ENOSPC;
+ } else {
+ ep->port.node = scif_info.nodeid;
+ ep->conn_async_state = ASYNC_CONN_IDLE;
+ }
+ /* Fall through */
+ case SCIFEP_BOUND:
+ /*
+ * If a non-blocking connect has been already initiated
+ * (conn_async_state is either ASYNC_CONN_INPROGRESS or
+ * ASYNC_CONN_FLUSH_WORK), the end point could end up in
+ * SCIF_BOUND due an error in the connection process
+ * (e.g., connection refused) If conn_async_state is
+ * ASYNC_CONN_INPROGRESS - transition to ASYNC_CONN_FLUSH_WORK
+ * so that the error status can be collected. If the state is
+ * already ASYNC_CONN_FLUSH_WORK - then set the error to
+ * EINPROGRESS since some other thread is waiting to collect
+ * error status.
+ */
+ if (ep->conn_async_state == ASYNC_CONN_INPROGRESS) {
+ ep->conn_async_state = ASYNC_CONN_FLUSH_WORK;
+ } else if (ep->conn_async_state == ASYNC_CONN_FLUSH_WORK) {
+ err = -EINPROGRESS;
+ } else {
+ ep->conn_port = *dst;
+ init_waitqueue_head(&ep->sendwq);
+ init_waitqueue_head(&ep->recvwq);
+ init_waitqueue_head(&ep->conwq);
+ ep->conn_async_state = 0;
+
+ if (unlikely(non_block))
+ ep->conn_async_state = ASYNC_CONN_INPROGRESS;
+ }
+ break;
+ }
+
+ if (err || ep->conn_async_state == ASYNC_CONN_FLUSH_WORK)
+ goto connect_simple_unlock1;
+
+ ep->state = SCIFEP_CONNECTING;
+ ep->remote_dev = &scif_dev[dst->node];
+ ep->qp_info.qp->magic = SCIFEP_MAGIC;
+ if (ep->conn_async_state == ASYNC_CONN_INPROGRESS) {
+ spin_lock(&scif_info.nb_connect_lock);
+ list_add_tail(&ep->conn_list, &scif_info.nb_connect_list);
+ spin_unlock(&scif_info.nb_connect_lock);
+ err = -EINPROGRESS;
+ schedule_work(&scif_info.conn_work);
+ }
+connect_simple_unlock1:
+ spin_unlock(&ep->lock);
+ scif_put_peer_dev(spdev);
+ if (err) {
+ return err;
+ } else if (ep->conn_async_state == ASYNC_CONN_FLUSH_WORK) {
+ flush_work(&scif_info.conn_work);
+ err = ep->conn_err;
+ spin_lock(&ep->lock);
+ ep->conn_async_state = ASYNC_CONN_IDLE;
+ spin_unlock(&ep->lock);
+ } else {
+ err = scif_conn_func(ep);
+ }
+ return err;
+}
+
+int scif_connect(scif_epd_t epd, struct scif_port_id *dst)
+{
+ return __scif_connect(epd, dst, false);
+}
+EXPORT_SYMBOL_GPL(scif_connect);
+
+/**
+ * scif_accept() - Accept a connection request from the remote node
+ *
+ * The function accepts a connection request from the remote node. Successful
+ * complete is indicate by a new end point being created and passed back
+ * to the caller for future reference.
+ *
+ * Upon successful complete a zero will be returned and the peer information
+ * will be filled in.
+ *
+ * If the end point is not in the listening state -EINVAL will be returned.
+ *
+ * If during the connection sequence resource allocation fails the -ENOMEM
+ * will be returned.
+ *
+ * If the function is called with the ASYNC flag set and no connection requests
+ * are pending it will return -EAGAIN.
+ *
+ * If the remote side is not sending any connection requests the caller may
+ * terminate this function with a signal. If so a -EINTR will be returned.
+ */
+int scif_accept(scif_epd_t epd, struct scif_port_id *peer,
+ scif_epd_t *newepd, int flags)
+{
+ struct scif_endpt *lep = (struct scif_endpt *)epd;
+ struct scif_endpt *cep;
+ struct scif_conreq *conreq;
+ struct scifmsg msg;
+ int err;
+ struct device *spdev;
+
+ dev_dbg(scif_info.mdev.this_device,
+ "SCIFAPI accept: ep %p %s\n", lep, scif_ep_states[lep->state]);
+
+ if (flags & ~SCIF_ACCEPT_SYNC)
+ return -EINVAL;
+
+ if (!peer || !newepd)
+ return -EINVAL;
+
+ might_sleep();
+ spin_lock(&lep->lock);
+ if (lep->state != SCIFEP_LISTENING) {
+ spin_unlock(&lep->lock);
+ return -EINVAL;
+ }
+
+ if (!lep->conreqcnt && !(flags & SCIF_ACCEPT_SYNC)) {
+ /* No connection request present and we do not want to wait */
+ spin_unlock(&lep->lock);
+ return -EAGAIN;
+ }
+
+ lep->files = current->files;
+retry_connection:
+ spin_unlock(&lep->lock);
+ /* Wait for the remote node to send us a SCIF_CNCT_REQ */
+ err = wait_event_interruptible(lep->conwq,
+ (lep->conreqcnt ||
+ (lep->state != SCIFEP_LISTENING)));
+ if (err)
+ return err;
+
+ if (lep->state != SCIFEP_LISTENING)
+ return -EINTR;
+
+ spin_lock(&lep->lock);
+
+ if (!lep->conreqcnt)
+ goto retry_connection;
+
+ /* Get the first connect request off the list */
+ conreq = list_first_entry(&lep->conlist, struct scif_conreq, list);
+ list_del(&conreq->list);
+ lep->conreqcnt--;
+ spin_unlock(&lep->lock);
+
+ /* Fill in the peer information */
+ peer->node = conreq->msg.src.node;
+ peer->port = conreq->msg.src.port;
+
+ cep = kzalloc(sizeof(*cep), GFP_KERNEL);
+ if (!cep) {
+ err = -ENOMEM;
+ goto scif_accept_error_epalloc;
+ }
+ spin_lock_init(&cep->lock);
+ mutex_init(&cep->sendlock);
+ mutex_init(&cep->recvlock);
+ cep->state = SCIFEP_CONNECTING;
+ cep->remote_dev = &scif_dev[peer->node];
+ cep->remote_ep = conreq->msg.payload[0];
+
+ cep->qp_info.qp = kzalloc(sizeof(*cep->qp_info.qp), GFP_KERNEL);
+ if (!cep->qp_info.qp) {
+ err = -ENOMEM;
+ goto scif_accept_error_qpalloc;
+ }
+
+ cep->qp_info.qp->magic = SCIFEP_MAGIC;
+ spdev = scif_get_peer_dev(cep->remote_dev);
+ if (IS_ERR(spdev)) {
+ err = PTR_ERR(spdev);
+ goto scif_accept_error_map;
+ }
+ err = scif_setup_qp_accept(cep->qp_info.qp, &cep->qp_info.qp_offset,
+ conreq->msg.payload[1], SCIF_ENDPT_QP_SIZE,
+ cep->remote_dev);
+ if (err) {
+ dev_dbg(&cep->remote_dev->sdev->dev,
+ "SCIFAPI accept: ep %p new %p scif_setup_qp_accept %d qp_offset 0x%llx\n",
+ lep, cep, err, cep->qp_info.qp_offset);
+ scif_put_peer_dev(spdev);
+ goto scif_accept_error_map;
+ }
+
+ cep->port.node = lep->port.node;
+ cep->port.port = lep->port.port;
+ cep->peer.node = peer->node;
+ cep->peer.port = peer->port;
+ init_waitqueue_head(&cep->sendwq);
+ init_waitqueue_head(&cep->recvwq);
+ init_waitqueue_head(&cep->conwq);
+
+ msg.uop = SCIF_CNCT_GNT;
+ msg.src = cep->port;
+ msg.payload[0] = cep->remote_ep;
+ msg.payload[1] = cep->qp_info.qp_offset;
+ msg.payload[2] = (u64)cep;
+
+ err = _scif_nodeqp_send(cep->remote_dev, &msg);
+ scif_put_peer_dev(spdev);
+ if (err)
+ goto scif_accept_error_map;
+retry:
+ /* Wait for the remote node to respond with SCIF_CNCT_GNT(N)ACK */
+ err = wait_event_timeout(cep->conwq, cep->state != SCIFEP_CONNECTING,
+ SCIF_NODE_ACCEPT_TIMEOUT);
+ if (!err && scifdev_alive(cep))
+ goto retry;
+ err = !err ? -ENODEV : 0;
+ if (err)
+ goto scif_accept_error_map;
+ kfree(conreq);
+
+ spin_lock(&cep->lock);
+
+ if (cep->state == SCIFEP_CLOSING) {
+ /*
+ * Remote failed to allocate resources and NAKed the grant.
+ * There is at this point nothing referencing the new end point.
+ */
+ spin_unlock(&cep->lock);
+ scif_teardown_ep(cep);
+ kfree(cep);
+
+ /* If call with sync flag then go back and wait. */
+ if (flags & SCIF_ACCEPT_SYNC) {
+ spin_lock(&lep->lock);
+ goto retry_connection;
+ }
+ return -EAGAIN;
+ }
+
+ scif_get_port(cep->port.port);
+ *newepd = (scif_epd_t)cep;
+ spin_unlock(&cep->lock);
+ return 0;
+scif_accept_error_map:
+ scif_teardown_ep(cep);
+scif_accept_error_qpalloc:
+ kfree(cep);
+scif_accept_error_epalloc:
+ msg.uop = SCIF_CNCT_REJ;
+ msg.dst.node = conreq->msg.src.node;
+ msg.dst.port = conreq->msg.src.port;
+ msg.payload[0] = conreq->msg.payload[0];
+ msg.payload[1] = conreq->msg.payload[1];
+ scif_nodeqp_send(&scif_dev[conreq->msg.src.node], &msg);
+ kfree(conreq);
+ return err;
+}
+EXPORT_SYMBOL_GPL(scif_accept);
+
+/*
+ * scif_msg_param_check:
+ * @epd: The end point returned from scif_open()
+ * @len: Length to receive
+ * @flags: blocking or non blocking
+ *
+ * Validate parameters for messaging APIs scif_send(..)/scif_recv(..).
+ */
+static inline int scif_msg_param_check(scif_epd_t epd, int len, int flags)
+{
+ int ret = -EINVAL;
+
+ if (len < 0)
+ goto err_ret;
+ if (flags && (!(flags & SCIF_RECV_BLOCK)))
+ goto err_ret;
+ ret = 0;
+err_ret:
+ return ret;
+}
+
+static int _scif_send(scif_epd_t epd, void *msg, int len, int flags)
+{
+ struct scif_endpt *ep = (struct scif_endpt *)epd;
+ struct scifmsg notif_msg;
+ int curr_xfer_len = 0, sent_len = 0, write_count;
+ int ret = 0;
+ struct scif_qp *qp = ep->qp_info.qp;
+
+ if (flags & SCIF_SEND_BLOCK)
+ might_sleep();
+
+ spin_lock(&ep->lock);
+ while (sent_len != len && SCIFEP_CONNECTED == ep->state) {
+ write_count = scif_rb_space(&qp->outbound_q);
+ if (write_count) {
+ /* Best effort to send as much data as possible */
+ curr_xfer_len = min(len - sent_len, write_count);
+ ret = scif_rb_write(&qp->outbound_q, msg,
+ curr_xfer_len);
+ if (ret < 0)
+ break;
+ /* Success. Update write pointer */
+ scif_rb_commit(&qp->outbound_q);
+ /*
+ * Send a notification to the peer about the
+ * produced data message.
+ */
+ notif_msg.src = ep->port;
+ notif_msg.uop = SCIF_CLIENT_SENT;
+ notif_msg.payload[0] = ep->remote_ep;
+ ret = _scif_nodeqp_send(ep->remote_dev, &notif_msg);
+ if (ret)
+ break;
+ sent_len += curr_xfer_len;
+ msg = msg + curr_xfer_len;
+ continue;
+ }
+ curr_xfer_len = min(len - sent_len, SCIF_ENDPT_QP_SIZE - 1);
+ /* Not enough RB space. return for the Non Blocking case */
+ if (!(flags & SCIF_SEND_BLOCK))
+ break;
+
+ spin_unlock(&ep->lock);
+ /* Wait for a SCIF_CLIENT_RCVD message in the Blocking case */
+ ret =
+ wait_event_interruptible(ep->sendwq,
+ (SCIFEP_CONNECTED != ep->state) ||
+ (scif_rb_space(&qp->outbound_q) >=
+ curr_xfer_len));
+ spin_lock(&ep->lock);
+ if (ret)
+ break;
+ }
+ if (sent_len)
+ ret = sent_len;
+ else if (!ret && SCIFEP_CONNECTED != ep->state)
+ ret = SCIFEP_DISCONNECTED == ep->state ?
+ -ECONNRESET : -ENOTCONN;
+ spin_unlock(&ep->lock);
+ return ret;
+}
+
+static int _scif_recv(scif_epd_t epd, void *msg, int len, int flags)
+{
+ int read_size;
+ struct scif_endpt *ep = (struct scif_endpt *)epd;
+ struct scifmsg notif_msg;
+ int curr_recv_len = 0, remaining_len = len, read_count;
+ int ret = 0;
+ struct scif_qp *qp = ep->qp_info.qp;
+
+ if (flags & SCIF_RECV_BLOCK)
+ might_sleep();
+ spin_lock(&ep->lock);
+ while (remaining_len && (SCIFEP_CONNECTED == ep->state ||
+ SCIFEP_DISCONNECTED == ep->state)) {
+ read_count = scif_rb_count(&qp->inbound_q, remaining_len);
+ if (read_count) {
+ /*
+ * Best effort to recv as much data as there
+ * are bytes to read in the RB particularly
+ * important for the Non Blocking case.
+ */
+ curr_recv_len = min(remaining_len, read_count);
+ read_size = scif_rb_get_next(&qp->inbound_q,
+ msg, curr_recv_len);
+ if (ep->state == SCIFEP_CONNECTED) {
+ /*
+ * Update the read pointer only if the endpoint
+ * is still connected else the read pointer
+ * might no longer exist since the peer has
+ * freed resources!
+ */
+ scif_rb_update_read_ptr(&qp->inbound_q);
+ /*
+ * Send a notification to the peer about the
+ * consumed data message only if the EP is in
+ * SCIFEP_CONNECTED state.
+ */
+ notif_msg.src = ep->port;
+ notif_msg.uop = SCIF_CLIENT_RCVD;
+ notif_msg.payload[0] = ep->remote_ep;
+ ret = _scif_nodeqp_send(ep->remote_dev,
+ &notif_msg);
+ if (ret)
+ break;
+ }
+ remaining_len -= curr_recv_len;
+ msg = msg + curr_recv_len;
+ continue;
+ }
+ /*
+ * Bail out now if the EP is in SCIFEP_DISCONNECTED state else
+ * we will keep looping forever.
+ */
+ if (ep->state == SCIFEP_DISCONNECTED)
+ break;
+ /*
+ * Return in the Non Blocking case if there is no data
+ * to read in this iteration.
+ */
+ if (!(flags & SCIF_RECV_BLOCK))
+ break;
+ curr_recv_len = min(remaining_len, SCIF_ENDPT_QP_SIZE - 1);
+ spin_unlock(&ep->lock);
+ /*
+ * Wait for a SCIF_CLIENT_SEND message in the blocking case
+ * or until other side disconnects.
+ */
+ ret =
+ wait_event_interruptible(ep->recvwq,
+ SCIFEP_CONNECTED != ep->state ||
+ scif_rb_count(&qp->inbound_q,
+ curr_recv_len)
+ >= curr_recv_len);
+ spin_lock(&ep->lock);
+ if (ret)
+ break;
+ }
+ if (len - remaining_len)
+ ret = len - remaining_len;
+ else if (!ret && ep->state != SCIFEP_CONNECTED)
+ ret = ep->state == SCIFEP_DISCONNECTED ?
+ -ECONNRESET : -ENOTCONN;
+ spin_unlock(&ep->lock);
+ return ret;
+}
+
+/**
+ * scif_user_send() - Send data to connection queue
+ * @epd: The end point returned from scif_open()
+ * @msg: Address to place data
+ * @len: Length to receive
+ * @flags: blocking or non blocking
+ *
+ * This function is called from the driver IOCTL entry point
+ * only and is a wrapper for _scif_send().
+ */
+int scif_user_send(scif_epd_t epd, void __user *msg, int len, int flags)
+{
+ struct scif_endpt *ep = (struct scif_endpt *)epd;
+ int err = 0;
+ int sent_len = 0;
+ char *tmp;
+ int loop_len;
+ int chunk_len = min(len, (1 << (MAX_ORDER + PAGE_SHIFT - 1)));
+
+ dev_dbg(scif_info.mdev.this_device,
+ "SCIFAPI send (U): ep %p %s\n", ep, scif_ep_states[ep->state]);
+ if (!len)
+ return 0;
+
+ err = scif_msg_param_check(epd, len, flags);
+ if (err)
+ goto send_err;
+
+ tmp = kmalloc(chunk_len, GFP_KERNEL);
+ if (!tmp) {
+ err = -ENOMEM;
+ goto send_err;
+ }
+ /*
+ * Grabbing the lock before breaking up the transfer in
+ * multiple chunks is required to ensure that messages do
+ * not get fragmented and reordered.
+ */
+ mutex_lock(&ep->sendlock);
+ while (sent_len != len) {
+ loop_len = len - sent_len;
+ loop_len = min(chunk_len, loop_len);
+ if (copy_from_user(tmp, msg, loop_len)) {
+ err = -EFAULT;
+ goto send_free_err;
+ }
+ err = _scif_send(epd, tmp, loop_len, flags);
+ if (err < 0)
+ goto send_free_err;
+ sent_len += err;
+ msg += err;
+ if (err != loop_len)
+ goto send_free_err;
+ }
+send_free_err:
+ mutex_unlock(&ep->sendlock);
+ kfree(tmp);
+send_err:
+ return err < 0 ? err : sent_len;
+}
+
+/**
+ * scif_user_recv() - Receive data from connection queue
+ * @epd: The end point returned from scif_open()
+ * @msg: Address to place data
+ * @len: Length to receive
+ * @flags: blocking or non blocking
+ *
+ * This function is called from the driver IOCTL entry point
+ * only and is a wrapper for _scif_recv().
+ */
+int scif_user_recv(scif_epd_t epd, void __user *msg, int len, int flags)
+{
+ struct scif_endpt *ep = (struct scif_endpt *)epd;
+ int err = 0;
+ int recv_len = 0;
+ char *tmp;
+ int loop_len;
+ int chunk_len = min(len, (1 << (MAX_ORDER + PAGE_SHIFT - 1)));
+
+ dev_dbg(scif_info.mdev.this_device,
+ "SCIFAPI recv (U): ep %p %s\n", ep, scif_ep_states[ep->state]);
+ if (!len)
+ return 0;
+
+ err = scif_msg_param_check(epd, len, flags);
+ if (err)
+ goto recv_err;
+
+ tmp = kmalloc(chunk_len, GFP_KERNEL);
+ if (!tmp) {
+ err = -ENOMEM;
+ goto recv_err;
+ }
+ /*
+ * Grabbing the lock before breaking up the transfer in
+ * multiple chunks is required to ensure that messages do
+ * not get fragmented and reordered.
+ */
+ mutex_lock(&ep->recvlock);
+ while (recv_len != len) {
+ loop_len = len - recv_len;
+ loop_len = min(chunk_len, loop_len);
+ err = _scif_recv(epd, tmp, loop_len, flags);
+ if (err < 0)
+ goto recv_free_err;
+ if (copy_to_user(msg, tmp, err)) {
+ err = -EFAULT;
+ goto recv_free_err;
+ }
+ recv_len += err;
+ msg += err;
+ if (err != loop_len)
+ goto recv_free_err;
+ }
+recv_free_err:
+ mutex_unlock(&ep->recvlock);
+ kfree(tmp);
+recv_err:
+ return err < 0 ? err : recv_len;
+}
+
+/**
+ * scif_send() - Send data to connection queue
+ * @epd: The end point returned from scif_open()
+ * @msg: Address to place data
+ * @len: Length to receive
+ * @flags: blocking or non blocking
+ *
+ * This function is called from the kernel mode only and is
+ * a wrapper for _scif_send().
+ */
+int scif_send(scif_epd_t epd, void *msg, int len, int flags)
+{
+ struct scif_endpt *ep = (struct scif_endpt *)epd;
+ int ret;
+
+ dev_dbg(scif_info.mdev.this_device,
+ "SCIFAPI send (K): ep %p %s\n", ep, scif_ep_states[ep->state]);
+ if (!len)
+ return 0;
+
+ ret = scif_msg_param_check(epd, len, flags);
+ if (ret)
+ return ret;
+ if (!ep->remote_dev)
+ return -ENOTCONN;
+ /*
+ * Grab the mutex lock in the blocking case only
+ * to ensure messages do not get fragmented/reordered.
+ * The non blocking mode is protected using spin locks
+ * in _scif_send().
+ */
+ if (flags & SCIF_SEND_BLOCK)
+ mutex_lock(&ep->sendlock);
+
+ ret = _scif_send(epd, msg, len, flags);
+
+ if (flags & SCIF_SEND_BLOCK)
+ mutex_unlock(&ep->sendlock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(scif_send);
+
+/**
+ * scif_recv() - Receive data from connection queue
+ * @epd: The end point returned from scif_open()
+ * @msg: Address to place data
+ * @len: Length to receive
+ * @flags: blocking or non blocking
+ *
+ * This function is called from the kernel mode only and is
+ * a wrapper for _scif_recv().
+ */
+int scif_recv(scif_epd_t epd, void *msg, int len, int flags)
+{
+ struct scif_endpt *ep = (struct scif_endpt *)epd;
+ int ret;
+
+ dev_dbg(scif_info.mdev.this_device,
+ "SCIFAPI recv (K): ep %p %s\n", ep, scif_ep_states[ep->state]);
+ if (!len)
+ return 0;
+
+ ret = scif_msg_param_check(epd, len, flags);
+ if (ret)
+ return ret;
+ /*
+ * Grab the mutex lock in the blocking case only
+ * to ensure messages do not get fragmented/reordered.
+ * The non blocking mode is protected using spin locks
+ * in _scif_send().
+ */
+ if (flags & SCIF_RECV_BLOCK)
+ mutex_lock(&ep->recvlock);
+
+ ret = _scif_recv(epd, msg, len, flags);
+
+ if (flags & SCIF_RECV_BLOCK)
+ mutex_unlock(&ep->recvlock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(scif_recv);
+
+int scif_get_node_ids(u16 *nodes, int len, u16 *self)
+{
+ int online = 0;
+ int offset = 0;
+ int node;
+
+ if (!scif_is_mgmt_node())
+ scif_get_node_info();
+
+ *self = scif_info.nodeid;
+ mutex_lock(&scif_info.conflock);
+ len = min_t(int, len, scif_info.total);
+ for (node = 0; node <= scif_info.maxid; node++) {
+ if (_scifdev_alive(&scif_dev[node])) {
+ online++;
+ if (offset < len)
+ nodes[offset++] = node;
+ }
+ }
+ dev_dbg(scif_info.mdev.this_device,
+ "SCIFAPI get_node_ids total %d online %d filled in %d nodes\n",
+ scif_info.total, online, offset);
+ mutex_unlock(&scif_info.conflock);
+
+ return online;
+}
+EXPORT_SYMBOL_GPL(scif_get_node_ids);
diff --git a/drivers/misc/mic/scif/scif_debugfs.c b/drivers/misc/mic/scif/scif_debugfs.c
new file mode 100644
index 000000000000..51f14e2a1196
--- /dev/null
+++ b/drivers/misc/mic/scif/scif_debugfs.c
@@ -0,0 +1,85 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Intel SCIF driver.
+ *
+ */
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include "../common/mic_dev.h"
+#include "scif_main.h"
+
+/* Debugfs parent dir */
+static struct dentry *scif_dbg;
+
+static int scif_dev_test(struct seq_file *s, void *unused)
+{
+ int node;
+
+ seq_printf(s, "Total Nodes %d Self Node Id %d Maxid %d\n",
+ scif_info.total, scif_info.nodeid,
+ scif_info.maxid);
+
+ if (!scif_dev)
+ return 0;
+
+ seq_printf(s, "%-16s\t%-16s\n", "node_id", "state");
+
+ for (node = 0; node <= scif_info.maxid; node++)
+ seq_printf(s, "%-16d\t%-16s\n", scif_dev[node].node,
+ _scifdev_alive(&scif_dev[node]) ?
+ "Running" : "Offline");
+ return 0;
+}
+
+static int scif_dev_test_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, scif_dev_test, inode->i_private);
+}
+
+static int scif_dev_test_release(struct inode *inode, struct file *file)
+{
+ return single_release(inode, file);
+}
+
+static const struct file_operations scif_dev_ops = {
+ .owner = THIS_MODULE,
+ .open = scif_dev_test_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = scif_dev_test_release
+};
+
+void __init scif_init_debugfs(void)
+{
+ struct dentry *d;
+
+ scif_dbg = debugfs_create_dir(KBUILD_MODNAME, NULL);
+ if (!scif_dbg) {
+ dev_err(scif_info.mdev.this_device,
+ "can't create debugfs dir scif\n");
+ return;
+ }
+
+ d = debugfs_create_file("scif_dev", 0444, scif_dbg,
+ NULL, &scif_dev_ops);
+ debugfs_create_u8("en_msg_log", 0666, scif_dbg, &scif_info.en_msg_log);
+ debugfs_create_u8("p2p_enable", 0666, scif_dbg, &scif_info.p2p_enable);
+}
+
+void scif_exit_debugfs(void)
+{
+ debugfs_remove_recursive(scif_dbg);
+}
diff --git a/drivers/misc/mic/scif/scif_epd.c b/drivers/misc/mic/scif/scif_epd.c
new file mode 100644
index 000000000000..b4bfbb08a8e3
--- /dev/null
+++ b/drivers/misc/mic/scif/scif_epd.c
@@ -0,0 +1,353 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Intel SCIF driver.
+ *
+ */
+#include "scif_main.h"
+#include "scif_map.h"
+
+void scif_cleanup_ep_qp(struct scif_endpt *ep)
+{
+ struct scif_qp *qp = ep->qp_info.qp;
+
+ if (qp->outbound_q.rb_base) {
+ scif_iounmap((void *)qp->outbound_q.rb_base,
+ qp->outbound_q.size, ep->remote_dev);
+ qp->outbound_q.rb_base = NULL;
+ }
+ if (qp->remote_qp) {
+ scif_iounmap((void *)qp->remote_qp,
+ sizeof(struct scif_qp), ep->remote_dev);
+ qp->remote_qp = NULL;
+ }
+ if (qp->local_qp) {
+ scif_unmap_single(qp->local_qp, ep->remote_dev,
+ sizeof(struct scif_qp));
+ qp->local_qp = 0x0;
+ }
+ if (qp->local_buf) {
+ scif_unmap_single(qp->local_buf, ep->remote_dev,
+ SCIF_ENDPT_QP_SIZE);
+ qp->local_buf = 0;
+ }
+}
+
+void scif_teardown_ep(void *endpt)
+{
+ struct scif_endpt *ep = endpt;
+ struct scif_qp *qp = ep->qp_info.qp;
+
+ if (qp) {
+ spin_lock(&ep->lock);
+ scif_cleanup_ep_qp(ep);
+ spin_unlock(&ep->lock);
+ kfree(qp->inbound_q.rb_base);
+ kfree(qp);
+ }
+}
+
+/*
+ * Enqueue the endpoint to the zombie list for cleanup.
+ * The endpoint should not be accessed once this API returns.
+ */
+void scif_add_epd_to_zombie_list(struct scif_endpt *ep, bool eplock_held)
+{
+ if (!eplock_held)
+ spin_lock(&scif_info.eplock);
+ spin_lock(&ep->lock);
+ ep->state = SCIFEP_ZOMBIE;
+ spin_unlock(&ep->lock);
+ list_add_tail(&ep->list, &scif_info.zombie);
+ scif_info.nr_zombies++;
+ if (!eplock_held)
+ spin_unlock(&scif_info.eplock);
+ schedule_work(&scif_info.misc_work);
+}
+
+static struct scif_endpt *scif_find_listen_ep(u16 port)
+{
+ struct scif_endpt *ep = NULL;
+ struct list_head *pos, *tmpq;
+
+ spin_lock(&scif_info.eplock);
+ list_for_each_safe(pos, tmpq, &scif_info.listen) {
+ ep = list_entry(pos, struct scif_endpt, list);
+ if (ep->port.port == port) {
+ spin_lock(&ep->lock);
+ spin_unlock(&scif_info.eplock);
+ return ep;
+ }
+ }
+ spin_unlock(&scif_info.eplock);
+ return NULL;
+}
+
+void scif_cleanup_zombie_epd(void)
+{
+ struct list_head *pos, *tmpq;
+ struct scif_endpt *ep;
+
+ spin_lock(&scif_info.eplock);
+ list_for_each_safe(pos, tmpq, &scif_info.zombie) {
+ ep = list_entry(pos, struct scif_endpt, list);
+ list_del(pos);
+ scif_info.nr_zombies--;
+ kfree(ep);
+ }
+ spin_unlock(&scif_info.eplock);
+}
+
+/**
+ * scif_cnctreq() - Respond to SCIF_CNCT_REQ interrupt message
+ * @msg: Interrupt message
+ *
+ * This message is initiated by the remote node to request a connection
+ * to the local node. This function looks for an end point in the
+ * listen state on the requested port id.
+ *
+ * If it finds a listening port it places the connect request on the
+ * listening end points queue and wakes up any pending accept calls.
+ *
+ * If it does not find a listening end point it sends a connection
+ * reject message to the remote node.
+ */
+void scif_cnctreq(struct scif_dev *scifdev, struct scifmsg *msg)
+{
+ struct scif_endpt *ep = NULL;
+ struct scif_conreq *conreq;
+
+ conreq = kmalloc(sizeof(*conreq), GFP_KERNEL);
+ if (!conreq)
+ /* Lack of resources so reject the request. */
+ goto conreq_sendrej;
+
+ ep = scif_find_listen_ep(msg->dst.port);
+ if (!ep)
+ /* Send reject due to no listening ports */
+ goto conreq_sendrej_free;
+
+ if (ep->backlog <= ep->conreqcnt) {
+ /* Send reject due to too many pending requests */
+ spin_unlock(&ep->lock);
+ goto conreq_sendrej_free;
+ }
+
+ conreq->msg = *msg;
+ list_add_tail(&conreq->list, &ep->conlist);
+ ep->conreqcnt++;
+ wake_up_interruptible(&ep->conwq);
+ spin_unlock(&ep->lock);
+ return;
+
+conreq_sendrej_free:
+ kfree(conreq);
+conreq_sendrej:
+ msg->uop = SCIF_CNCT_REJ;
+ scif_nodeqp_send(&scif_dev[msg->src.node], msg);
+}
+
+/**
+ * scif_cnctgnt() - Respond to SCIF_CNCT_GNT interrupt message
+ * @msg: Interrupt message
+ *
+ * An accept() on the remote node has occurred and sent this message
+ * to indicate success. Place the end point in the MAPPING state and
+ * save the remote nodes memory information. Then wake up the connect
+ * request so it can finish.
+ */
+void scif_cnctgnt(struct scif_dev *scifdev, struct scifmsg *msg)
+{
+ struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
+
+ spin_lock(&ep->lock);
+ if (SCIFEP_CONNECTING == ep->state) {
+ ep->peer.node = msg->src.node;
+ ep->peer.port = msg->src.port;
+ ep->qp_info.gnt_pld = msg->payload[1];
+ ep->remote_ep = msg->payload[2];
+ ep->state = SCIFEP_MAPPING;
+
+ wake_up(&ep->conwq);
+ }
+ spin_unlock(&ep->lock);
+}
+
+/**
+ * scif_cnctgnt_ack() - Respond to SCIF_CNCT_GNTACK interrupt message
+ * @msg: Interrupt message
+ *
+ * The remote connection request has finished mapping the local memory.
+ * Place the connection in the connected state and wake up the pending
+ * accept() call.
+ */
+void scif_cnctgnt_ack(struct scif_dev *scifdev, struct scifmsg *msg)
+{
+ struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
+
+ mutex_lock(&scif_info.connlock);
+ spin_lock(&ep->lock);
+ /* New ep is now connected with all resources set. */
+ ep->state = SCIFEP_CONNECTED;
+ list_add_tail(&ep->list, &scif_info.connected);
+ wake_up(&ep->conwq);
+ spin_unlock(&ep->lock);
+ mutex_unlock(&scif_info.connlock);
+}
+
+/**
+ * scif_cnctgnt_nack() - Respond to SCIF_CNCT_GNTNACK interrupt message
+ * @msg: Interrupt message
+ *
+ * The remote connection request failed to map the local memory it was sent.
+ * Place the end point in the CLOSING state to indicate it and wake up
+ * the pending accept();
+ */
+void scif_cnctgnt_nack(struct scif_dev *scifdev, struct scifmsg *msg)
+{
+ struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
+
+ spin_lock(&ep->lock);
+ ep->state = SCIFEP_CLOSING;
+ wake_up(&ep->conwq);
+ spin_unlock(&ep->lock);
+}
+
+/**
+ * scif_cnctrej() - Respond to SCIF_CNCT_REJ interrupt message
+ * @msg: Interrupt message
+ *
+ * The remote end has rejected the connection request. Set the end
+ * point back to the bound state and wake up the pending connect().
+ */
+void scif_cnctrej(struct scif_dev *scifdev, struct scifmsg *msg)
+{
+ struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
+
+ spin_lock(&ep->lock);
+ if (SCIFEP_CONNECTING == ep->state) {
+ ep->state = SCIFEP_BOUND;
+ wake_up(&ep->conwq);
+ }
+ spin_unlock(&ep->lock);
+}
+
+/**
+ * scif_discnct() - Respond to SCIF_DISCNCT interrupt message
+ * @msg: Interrupt message
+ *
+ * The remote node has indicated close() has been called on its end
+ * point. Remove the local end point from the connected list, set its
+ * state to disconnected and ensure accesses to the remote node are
+ * shutdown.
+ *
+ * When all accesses to the remote end have completed then send a
+ * DISCNT_ACK to indicate it can remove its resources and complete
+ * the close routine.
+ */
+void scif_discnct(struct scif_dev *scifdev, struct scifmsg *msg)
+{
+ struct scif_endpt *ep = NULL;
+ struct scif_endpt *tmpep;
+ struct list_head *pos, *tmpq;
+
+ mutex_lock(&scif_info.connlock);
+ list_for_each_safe(pos, tmpq, &scif_info.connected) {
+ tmpep = list_entry(pos, struct scif_endpt, list);
+ /*
+ * The local ep may have sent a disconnect and and been closed
+ * due to a message response time out. It may have been
+ * allocated again and formed a new connection so we want to
+ * check if the remote ep matches
+ */
+ if (((u64)tmpep == msg->payload[1]) &&
+ ((u64)tmpep->remote_ep == msg->payload[0])) {
+ list_del(pos);
+ ep = tmpep;
+ spin_lock(&ep->lock);
+ break;
+ }
+ }
+
+ /*
+ * If the terminated end is not found then this side started closing
+ * before the other side sent the disconnect. If so the ep will no
+ * longer be on the connected list. Regardless the other side
+ * needs to be acked to let it know close is complete.
+ */
+ if (!ep) {
+ mutex_unlock(&scif_info.connlock);
+ goto discnct_ack;
+ }
+
+ ep->state = SCIFEP_DISCONNECTED;
+ list_add_tail(&ep->list, &scif_info.disconnected);
+
+ wake_up_interruptible(&ep->sendwq);
+ wake_up_interruptible(&ep->recvwq);
+ spin_unlock(&ep->lock);
+ mutex_unlock(&scif_info.connlock);
+
+discnct_ack:
+ msg->uop = SCIF_DISCNT_ACK;
+ scif_nodeqp_send(&scif_dev[msg->src.node], msg);
+}
+
+/**
+ * scif_discnct_ack() - Respond to SCIF_DISCNT_ACK interrupt message
+ * @msg: Interrupt message
+ *
+ * Remote side has indicated it has not more references to local resources
+ */
+void scif_discnt_ack(struct scif_dev *scifdev, struct scifmsg *msg)
+{
+ struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
+
+ spin_lock(&ep->lock);
+ ep->state = SCIFEP_DISCONNECTED;
+ spin_unlock(&ep->lock);
+ complete(&ep->discon);
+}
+
+/**
+ * scif_clientsend() - Respond to SCIF_CLIENT_SEND interrupt message
+ * @msg: Interrupt message
+ *
+ * Remote side is confirming send or receive interrupt handling is complete.
+ */
+void scif_clientsend(struct scif_dev *scifdev, struct scifmsg *msg)
+{
+ struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
+
+ spin_lock(&ep->lock);
+ if (SCIFEP_CONNECTED == ep->state)
+ wake_up_interruptible(&ep->recvwq);
+ spin_unlock(&ep->lock);
+}
+
+/**
+ * scif_clientrcvd() - Respond to SCIF_CLIENT_RCVD interrupt message
+ * @msg: Interrupt message
+ *
+ * Remote side is confirming send or receive interrupt handling is complete.
+ */
+void scif_clientrcvd(struct scif_dev *scifdev, struct scifmsg *msg)
+{
+ struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
+
+ spin_lock(&ep->lock);
+ if (SCIFEP_CONNECTED == ep->state)
+ wake_up_interruptible(&ep->sendwq);
+ spin_unlock(&ep->lock);
+}
diff --git a/drivers/misc/mic/scif/scif_epd.h b/drivers/misc/mic/scif/scif_epd.h
new file mode 100644
index 000000000000..331322a25213
--- /dev/null
+++ b/drivers/misc/mic/scif/scif_epd.h
@@ -0,0 +1,160 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Intel SCIF driver.
+ *
+ */
+#ifndef SCIF_EPD_H
+#define SCIF_EPD_H
+
+#include <linux/delay.h>
+#include <linux/scif.h>
+#include <linux/scif_ioctl.h>
+
+#define SCIF_EPLOCK_HELD true
+
+enum scif_epd_state {
+ SCIFEP_UNBOUND,
+ SCIFEP_BOUND,
+ SCIFEP_LISTENING,
+ SCIFEP_CONNECTED,
+ SCIFEP_CONNECTING,
+ SCIFEP_MAPPING,
+ SCIFEP_CLOSING,
+ SCIFEP_CLLISTEN,
+ SCIFEP_DISCONNECTED,
+ SCIFEP_ZOMBIE
+};
+
+/*
+ * struct scif_conreq - Data structure added to the connection list.
+ *
+ * @msg: connection request message received
+ * @list: link to list of connection requests
+ */
+struct scif_conreq {
+ struct scifmsg msg;
+ struct list_head list;
+};
+
+/* Size of the RB for the Endpoint QP */
+#define SCIF_ENDPT_QP_SIZE 0x1000
+
+/*
+ * scif_endpt_qp_info - SCIF endpoint queue pair
+ *
+ * @qp - Qpair for this endpoint
+ * @qp_offset - DMA address of the QP
+ * @gnt_pld - Payload in a SCIF_CNCT_GNT message containing the
+ * physical address of the remote_qp.
+ */
+struct scif_endpt_qp_info {
+ struct scif_qp *qp;
+ dma_addr_t qp_offset;
+ dma_addr_t gnt_pld;
+};
+
+/*
+ * struct scif_endpt - The SCIF endpoint data structure
+ *
+ * @state: end point state
+ * @lock: lock synchronizing access to endpoint fields like state etc
+ * @port: self port information
+ * @peer: peer port information
+ * @backlog: maximum pending connection requests
+ * @qp_info: Endpoint QP information for SCIF messaging
+ * @remote_dev: scifdev used by this endpt to communicate with remote node.
+ * @remote_ep: remote endpoint
+ * @conreqcnt: Keep track of number of connection requests.
+ * @files: Open file information used to match the id passed in with
+ * the flush routine.
+ * @conlist: list of connection requests
+ * @conwq: waitqueue for connection processing
+ * @discon: completion used during disconnection
+ * @sendwq: waitqueue used during sending messages
+ * @recvwq: waitqueue used during message receipt
+ * @sendlock: Synchronize ordering of messages sent
+ * @recvlock: Synchronize ordering of messages received
+ * @list: link to list of various endpoints like connected, listening etc
+ * @li_accept: pending ACCEPTREG
+ * @acceptcnt: pending ACCEPTREG cnt
+ * @liacceptlist: link to listen accept
+ * @miacceptlist: link to uaccept
+ * @listenep: associated listen ep
+ * @conn_work: Non blocking connect work
+ * @conn_port: Connection port
+ * @conn_err: Errors during connection
+ * @conn_async_state: Async connection
+ * @conn_list: List of async connection requests
+ */
+struct scif_endpt {
+ enum scif_epd_state state;
+ spinlock_t lock;
+ struct scif_port_id port;
+ struct scif_port_id peer;
+ int backlog;
+ struct scif_endpt_qp_info qp_info;
+ struct scif_dev *remote_dev;
+ u64 remote_ep;
+ int conreqcnt;
+ struct files_struct *files;
+ struct list_head conlist;
+ wait_queue_head_t conwq;
+ struct completion discon;
+ wait_queue_head_t sendwq;
+ wait_queue_head_t recvwq;
+ struct mutex sendlock;
+ struct mutex recvlock;
+ struct list_head list;
+ struct list_head li_accept;
+ int acceptcnt;
+ struct list_head liacceptlist;
+ struct list_head miacceptlist;
+ struct scif_endpt *listenep;
+ struct scif_port_id conn_port;
+ int conn_err;
+ int conn_async_state;
+ struct list_head conn_list;
+};
+
+static inline int scifdev_alive(struct scif_endpt *ep)
+{
+ return _scifdev_alive(ep->remote_dev);
+}
+
+void scif_cleanup_zombie_epd(void);
+void scif_teardown_ep(void *endpt);
+void scif_cleanup_ep_qp(struct scif_endpt *ep);
+void scif_add_epd_to_zombie_list(struct scif_endpt *ep, bool eplock_held);
+void scif_get_node_info(void);
+void scif_send_acks(struct scif_dev *dev);
+void scif_conn_handler(struct work_struct *work);
+int scif_rsrv_port(u16 port);
+void scif_get_port(u16 port);
+int scif_get_new_port(void);
+void scif_put_port(u16 port);
+int scif_user_send(scif_epd_t epd, void __user *msg, int len, int flags);
+int scif_user_recv(scif_epd_t epd, void __user *msg, int len, int flags);
+void scif_cnctreq(struct scif_dev *scifdev, struct scifmsg *msg);
+void scif_cnctgnt(struct scif_dev *scifdev, struct scifmsg *msg);
+void scif_cnctgnt_ack(struct scif_dev *scifdev, struct scifmsg *msg);
+void scif_cnctgnt_nack(struct scif_dev *scifdev, struct scifmsg *msg);
+void scif_cnctrej(struct scif_dev *scifdev, struct scifmsg *msg);
+void scif_discnct(struct scif_dev *scifdev, struct scifmsg *msg);
+void scif_discnt_ack(struct scif_dev *scifdev, struct scifmsg *msg);
+void scif_clientsend(struct scif_dev *scifdev, struct scifmsg *msg);
+void scif_clientrcvd(struct scif_dev *scifdev, struct scifmsg *msg);
+int __scif_connect(scif_epd_t epd, struct scif_port_id *dst, bool non_block);
+int __scif_flush(scif_epd_t epd);
+#endif /* SCIF_EPD_H */
diff --git a/drivers/misc/mic/scif/scif_fd.c b/drivers/misc/mic/scif/scif_fd.c
new file mode 100644
index 000000000000..eccf7e7135f9
--- /dev/null
+++ b/drivers/misc/mic/scif/scif_fd.c
@@ -0,0 +1,303 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Intel SCIF driver.
+ *
+ */
+#include "scif_main.h"
+
+static int scif_fdopen(struct inode *inode, struct file *f)
+{
+ struct scif_endpt *priv = scif_open();
+
+ if (!priv)
+ return -ENOMEM;
+ f->private_data = priv;
+ return 0;
+}
+
+static int scif_fdclose(struct inode *inode, struct file *f)
+{
+ struct scif_endpt *priv = f->private_data;
+
+ return scif_close(priv);
+}
+
+static int scif_fdflush(struct file *f, fl_owner_t id)
+{
+ struct scif_endpt *ep = f->private_data;
+
+ spin_lock(&ep->lock);
+ /*
+ * The listening endpoint stashes the open file information before
+ * waiting for incoming connections. The release callback would never be
+ * called if the application closed the endpoint, while waiting for
+ * incoming connections from a separate thread since the file descriptor
+ * reference count is bumped up in the accept IOCTL. Call the flush
+ * routine if the id matches the endpoint open file information so that
+ * the listening endpoint can be woken up and the fd released.
+ */
+ if (ep->files == id)
+ __scif_flush(ep);
+ spin_unlock(&ep->lock);
+ return 0;
+}
+
+static __always_inline void scif_err_debug(int err, const char *str)
+{
+ /*
+ * ENOTCONN is a common uninteresting error which is
+ * flooding debug messages to the console unnecessarily.
+ */
+ if (err < 0 && err != -ENOTCONN)
+ dev_dbg(scif_info.mdev.this_device, "%s err %d\n", str, err);
+}
+
+static long scif_fdioctl(struct file *f, unsigned int cmd, unsigned long arg)
+{
+ struct scif_endpt *priv = f->private_data;
+ void __user *argp = (void __user *)arg;
+ int err = 0;
+ struct scifioctl_msg request;
+ bool non_block = false;
+
+ non_block = !!(f->f_flags & O_NONBLOCK);
+
+ switch (cmd) {
+ case SCIF_BIND:
+ {
+ int pn;
+
+ if (copy_from_user(&pn, argp, sizeof(pn)))
+ return -EFAULT;
+
+ pn = scif_bind(priv, pn);
+ if (pn < 0)
+ return pn;
+
+ if (copy_to_user(argp, &pn, sizeof(pn)))
+ return -EFAULT;
+
+ return 0;
+ }
+ case SCIF_LISTEN:
+ return scif_listen(priv, arg);
+ case SCIF_CONNECT:
+ {
+ struct scifioctl_connect req;
+ struct scif_endpt *ep = (struct scif_endpt *)priv;
+
+ if (copy_from_user(&req, argp, sizeof(req)))
+ return -EFAULT;
+
+ err = __scif_connect(priv, &req.peer, non_block);
+ if (err < 0)
+ return err;
+
+ req.self.node = ep->port.node;
+ req.self.port = ep->port.port;
+
+ if (copy_to_user(argp, &req, sizeof(req)))
+ return -EFAULT;
+
+ return 0;
+ }
+ /*
+ * Accept is done in two halves. The request ioctl does the basic
+ * functionality of accepting the request and returning the information
+ * about it including the internal ID of the end point. The register
+ * is done with the internal ID on a new file descriptor opened by the
+ * requesting process.
+ */
+ case SCIF_ACCEPTREQ:
+ {
+ struct scifioctl_accept request;
+ scif_epd_t *ep = (scif_epd_t *)&request.endpt;
+
+ if (copy_from_user(&request, argp, sizeof(request)))
+ return -EFAULT;
+
+ err = scif_accept(priv, &request.peer, ep, request.flags);
+ if (err < 0)
+ return err;
+
+ if (copy_to_user(argp, &request, sizeof(request))) {
+ scif_close(*ep);
+ return -EFAULT;
+ }
+ /*
+ * Add to the list of user mode eps where the second half
+ * of the accept is not yet completed.
+ */
+ spin_lock(&scif_info.eplock);
+ list_add_tail(&((*ep)->miacceptlist), &scif_info.uaccept);
+ list_add_tail(&((*ep)->liacceptlist), &priv->li_accept);
+ (*ep)->listenep = priv;
+ priv->acceptcnt++;
+ spin_unlock(&scif_info.eplock);
+
+ return 0;
+ }
+ case SCIF_ACCEPTREG:
+ {
+ struct scif_endpt *priv = f->private_data;
+ struct scif_endpt *newep;
+ struct scif_endpt *lisep;
+ struct scif_endpt *fep = NULL;
+ struct scif_endpt *tmpep;
+ struct list_head *pos, *tmpq;
+
+ /* Finally replace the pointer to the accepted endpoint */
+ if (copy_from_user(&newep, argp, sizeof(void *)))
+ return -EFAULT;
+
+ /* Remove form the user accept queue */
+ spin_lock(&scif_info.eplock);
+ list_for_each_safe(pos, tmpq, &scif_info.uaccept) {
+ tmpep = list_entry(pos,
+ struct scif_endpt, miacceptlist);
+ if (tmpep == newep) {
+ list_del(pos);
+ fep = tmpep;
+ break;
+ }
+ }
+
+ if (!fep) {
+ spin_unlock(&scif_info.eplock);
+ return -ENOENT;
+ }
+
+ lisep = newep->listenep;
+ list_for_each_safe(pos, tmpq, &lisep->li_accept) {
+ tmpep = list_entry(pos,
+ struct scif_endpt, liacceptlist);
+ if (tmpep == newep) {
+ list_del(pos);
+ lisep->acceptcnt--;
+ break;
+ }
+ }
+
+ spin_unlock(&scif_info.eplock);
+
+ /* Free the resources automatically created from the open. */
+ scif_teardown_ep(priv);
+ scif_add_epd_to_zombie_list(priv, !SCIF_EPLOCK_HELD);
+ f->private_data = newep;
+ return 0;
+ }
+ case SCIF_SEND:
+ {
+ struct scif_endpt *priv = f->private_data;
+
+ if (copy_from_user(&request, argp,
+ sizeof(struct scifioctl_msg))) {
+ err = -EFAULT;
+ goto send_err;
+ }
+ err = scif_user_send(priv, (void __user *)request.msg,
+ request.len, request.flags);
+ if (err < 0)
+ goto send_err;
+ if (copy_to_user(&
+ ((struct scifioctl_msg __user *)argp)->out_len,
+ &err, sizeof(err))) {
+ err = -EFAULT;
+ goto send_err;
+ }
+ err = 0;
+send_err:
+ scif_err_debug(err, "scif_send");
+ return err;
+ }
+ case SCIF_RECV:
+ {
+ struct scif_endpt *priv = f->private_data;
+
+ if (copy_from_user(&request, argp,
+ sizeof(struct scifioctl_msg))) {
+ err = -EFAULT;
+ goto recv_err;
+ }
+
+ err = scif_user_recv(priv, (void __user *)request.msg,
+ request.len, request.flags);
+ if (err < 0)
+ goto recv_err;
+
+ if (copy_to_user(&
+ ((struct scifioctl_msg __user *)argp)->out_len,
+ &err, sizeof(err))) {
+ err = -EFAULT;
+ goto recv_err;
+ }
+ err = 0;
+recv_err:
+ scif_err_debug(err, "scif_recv");
+ return err;
+ }
+ case SCIF_GET_NODEIDS:
+ {
+ struct scifioctl_node_ids node_ids;
+ int entries;
+ u16 *nodes;
+ void __user *unodes, *uself;
+ u16 self;
+
+ if (copy_from_user(&node_ids, argp, sizeof(node_ids))) {
+ err = -EFAULT;
+ goto getnodes_err2;
+ }
+
+ entries = min_t(int, scif_info.maxid, node_ids.len);
+ nodes = kmalloc_array(entries, sizeof(u16), GFP_KERNEL);
+ if (entries && !nodes) {
+ err = -ENOMEM;
+ goto getnodes_err2;
+ }
+ node_ids.len = scif_get_node_ids(nodes, entries, &self);
+
+ unodes = (void __user *)node_ids.nodes;
+ if (copy_to_user(unodes, nodes, sizeof(u16) * entries)) {
+ err = -EFAULT;
+ goto getnodes_err1;
+ }
+
+ uself = (void __user *)node_ids.self;
+ if (copy_to_user(uself, &self, sizeof(u16))) {
+ err = -EFAULT;
+ goto getnodes_err1;
+ }
+
+ if (copy_to_user(argp, &node_ids, sizeof(node_ids))) {
+ err = -EFAULT;
+ goto getnodes_err1;
+ }
+getnodes_err1:
+ kfree(nodes);
+getnodes_err2:
+ return err;
+ }
+ }
+ return -EINVAL;
+}
+
+const struct file_operations scif_fops = {
+ .open = scif_fdopen,
+ .release = scif_fdclose,
+ .unlocked_ioctl = scif_fdioctl,
+ .flush = scif_fdflush,
+ .owner = THIS_MODULE,
+};
diff --git a/drivers/misc/mic/scif/scif_main.c b/drivers/misc/mic/scif/scif_main.c
new file mode 100644
index 000000000000..6ce851f5c7e6
--- /dev/null
+++ b/drivers/misc/mic/scif/scif_main.c
@@ -0,0 +1,388 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Intel SCIF driver.
+ *
+ */
+#include <linux/module.h>
+#include <linux/idr.h>
+
+#include <linux/mic_common.h>
+#include "../common/mic_dev.h"
+#include "../bus/scif_bus.h"
+#include "scif_peer_bus.h"
+#include "scif_main.h"
+#include "scif_map.h"
+
+struct scif_info scif_info = {
+ .mdev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "scif",
+ .fops = &scif_fops,
+ }
+};
+
+struct scif_dev *scif_dev;
+static atomic_t g_loopb_cnt;
+
+/* Runs in the context of intr_wq */
+static void scif_intr_bh_handler(struct work_struct *work)
+{
+ struct scif_dev *scifdev =
+ container_of(work, struct scif_dev, intr_bh);
+
+ if (scifdev_self(scifdev))
+ scif_loopb_msg_handler(scifdev, scifdev->qpairs);
+ else
+ scif_nodeqp_intrhandler(scifdev, scifdev->qpairs);
+}
+
+int scif_setup_intr_wq(struct scif_dev *scifdev)
+{
+ if (!scifdev->intr_wq) {
+ snprintf(scifdev->intr_wqname, sizeof(scifdev->intr_wqname),
+ "SCIF INTR %d", scifdev->node);
+ scifdev->intr_wq =
+ alloc_ordered_workqueue(scifdev->intr_wqname, 0);
+ if (!scifdev->intr_wq)
+ return -ENOMEM;
+ INIT_WORK(&scifdev->intr_bh, scif_intr_bh_handler);
+ }
+ return 0;
+}
+
+void scif_destroy_intr_wq(struct scif_dev *scifdev)
+{
+ if (scifdev->intr_wq) {
+ destroy_workqueue(scifdev->intr_wq);
+ scifdev->intr_wq = NULL;
+ }
+}
+
+irqreturn_t scif_intr_handler(int irq, void *data)
+{
+ struct scif_dev *scifdev = data;
+ struct scif_hw_dev *sdev = scifdev->sdev;
+
+ sdev->hw_ops->ack_interrupt(sdev, scifdev->db);
+ queue_work(scifdev->intr_wq, &scifdev->intr_bh);
+ return IRQ_HANDLED;
+}
+
+static int scif_peer_probe(struct scif_peer_dev *spdev)
+{
+ struct scif_dev *scifdev = &scif_dev[spdev->dnode];
+
+ mutex_lock(&scif_info.conflock);
+ scif_info.total++;
+ scif_info.maxid = max_t(u32, spdev->dnode, scif_info.maxid);
+ mutex_unlock(&scif_info.conflock);
+ rcu_assign_pointer(scifdev->spdev, spdev);
+
+ /* In the future SCIF kernel client devices will be added here */
+ return 0;
+}
+
+static void scif_peer_remove(struct scif_peer_dev *spdev)
+{
+ struct scif_dev *scifdev = &scif_dev[spdev->dnode];
+
+ /* In the future SCIF kernel client devices will be removed here */
+ spdev = rcu_dereference(scifdev->spdev);
+ if (spdev)
+ RCU_INIT_POINTER(scifdev->spdev, NULL);
+ synchronize_rcu();
+
+ mutex_lock(&scif_info.conflock);
+ scif_info.total--;
+ mutex_unlock(&scif_info.conflock);
+}
+
+static void scif_qp_setup_handler(struct work_struct *work)
+{
+ struct scif_dev *scifdev = container_of(work, struct scif_dev,
+ qp_dwork.work);
+ struct scif_hw_dev *sdev = scifdev->sdev;
+ dma_addr_t da = 0;
+ int err;
+
+ if (scif_is_mgmt_node()) {
+ struct mic_bootparam *bp = sdev->dp;
+
+ da = bp->scif_card_dma_addr;
+ scifdev->rdb = bp->h2c_scif_db;
+ } else {
+ struct mic_bootparam __iomem *bp = sdev->rdp;
+
+ da = readq(&bp->scif_host_dma_addr);
+ scifdev->rdb = ioread8(&bp->c2h_scif_db);
+ }
+ if (da) {
+ err = scif_qp_response(da, scifdev);
+ if (err)
+ dev_err(&scifdev->sdev->dev,
+ "scif_qp_response err %d\n", err);
+ } else {
+ schedule_delayed_work(&scifdev->qp_dwork,
+ msecs_to_jiffies(1000));
+ }
+}
+
+static int scif_setup_scifdev(struct scif_hw_dev *sdev)
+{
+ int i;
+ u8 num_nodes;
+
+ if (sdev->snode) {
+ struct mic_bootparam __iomem *bp = sdev->rdp;
+
+ num_nodes = ioread8(&bp->tot_nodes);
+ } else {
+ struct mic_bootparam *bp = sdev->dp;
+
+ num_nodes = bp->tot_nodes;
+ }
+ scif_dev = kcalloc(num_nodes, sizeof(*scif_dev), GFP_KERNEL);
+ if (!scif_dev)
+ return -ENOMEM;
+ for (i = 0; i < num_nodes; i++) {
+ struct scif_dev *scifdev = &scif_dev[i];
+
+ scifdev->node = i;
+ scifdev->exit = OP_IDLE;
+ init_waitqueue_head(&scifdev->disconn_wq);
+ mutex_init(&scifdev->lock);
+ INIT_WORK(&scifdev->init_msg_work, scif_qp_response_ack);
+ INIT_DELAYED_WORK(&scifdev->p2p_dwork,
+ scif_poll_qp_state);
+ INIT_DELAYED_WORK(&scifdev->qp_dwork,
+ scif_qp_setup_handler);
+ INIT_LIST_HEAD(&scifdev->p2p);
+ RCU_INIT_POINTER(scifdev->spdev, NULL);
+ }
+ return 0;
+}
+
+static void scif_destroy_scifdev(void)
+{
+ kfree(scif_dev);
+}
+
+static int scif_probe(struct scif_hw_dev *sdev)
+{
+ struct scif_dev *scifdev;
+ int rc;
+
+ dev_set_drvdata(&sdev->dev, sdev);
+ if (1 == atomic_add_return(1, &g_loopb_cnt)) {
+ struct scif_dev *loopb_dev;
+
+ rc = scif_setup_scifdev(sdev);
+ if (rc)
+ goto exit;
+ scifdev = &scif_dev[sdev->dnode];
+ scifdev->sdev = sdev;
+ loopb_dev = &scif_dev[sdev->snode];
+ loopb_dev->sdev = sdev;
+ rc = scif_setup_loopback_qp(loopb_dev);
+ if (rc)
+ goto free_sdev;
+ } else {
+ scifdev = &scif_dev[sdev->dnode];
+ scifdev->sdev = sdev;
+ }
+ rc = scif_setup_intr_wq(scifdev);
+ if (rc)
+ goto destroy_loopb;
+ rc = scif_setup_qp(scifdev);
+ if (rc)
+ goto destroy_intr;
+ scifdev->db = sdev->hw_ops->next_db(sdev);
+ scifdev->cookie = sdev->hw_ops->request_irq(sdev, scif_intr_handler,
+ "SCIF_INTR", scifdev,
+ scifdev->db);
+ if (IS_ERR(scifdev->cookie)) {
+ rc = PTR_ERR(scifdev->cookie);
+ goto free_qp;
+ }
+ if (scif_is_mgmt_node()) {
+ struct mic_bootparam *bp = sdev->dp;
+
+ bp->c2h_scif_db = scifdev->db;
+ bp->scif_host_dma_addr = scifdev->qp_dma_addr;
+ } else {
+ struct mic_bootparam __iomem *bp = sdev->rdp;
+
+ iowrite8(scifdev->db, &bp->h2c_scif_db);
+ writeq(scifdev->qp_dma_addr, &bp->scif_card_dma_addr);
+ }
+ schedule_delayed_work(&scifdev->qp_dwork,
+ msecs_to_jiffies(1000));
+ return rc;
+free_qp:
+ scif_free_qp(scifdev);
+destroy_intr:
+ scif_destroy_intr_wq(scifdev);
+destroy_loopb:
+ if (atomic_dec_and_test(&g_loopb_cnt))
+ scif_destroy_loopback_qp(&scif_dev[sdev->snode]);
+free_sdev:
+ scif_destroy_scifdev();
+exit:
+ return rc;
+}
+
+void scif_stop(struct scif_dev *scifdev)
+{
+ struct scif_dev *dev;
+ int i;
+
+ for (i = scif_info.maxid; i >= 0; i--) {
+ dev = &scif_dev[i];
+ if (scifdev_self(dev))
+ continue;
+ scif_handle_remove_node(i);
+ }
+}
+
+static void scif_remove(struct scif_hw_dev *sdev)
+{
+ struct scif_dev *scifdev = &scif_dev[sdev->dnode];
+
+ if (scif_is_mgmt_node()) {
+ struct mic_bootparam *bp = sdev->dp;
+
+ bp->c2h_scif_db = -1;
+ bp->scif_host_dma_addr = 0x0;
+ } else {
+ struct mic_bootparam __iomem *bp = sdev->rdp;
+
+ iowrite8(-1, &bp->h2c_scif_db);
+ writeq(0x0, &bp->scif_card_dma_addr);
+ }
+ if (scif_is_mgmt_node()) {
+ scif_disconnect_node(scifdev->node, true);
+ } else {
+ scif_info.card_initiated_exit = true;
+ scif_stop(scifdev);
+ }
+ if (atomic_dec_and_test(&g_loopb_cnt))
+ scif_destroy_loopback_qp(&scif_dev[sdev->snode]);
+ if (scifdev->cookie) {
+ sdev->hw_ops->free_irq(sdev, scifdev->cookie, scifdev);
+ scifdev->cookie = NULL;
+ }
+ scif_destroy_intr_wq(scifdev);
+ cancel_delayed_work(&scifdev->qp_dwork);
+ scif_free_qp(scifdev);
+ scifdev->rdb = -1;
+ scifdev->sdev = NULL;
+}
+
+static struct scif_peer_driver scif_peer_driver = {
+ .driver.name = KBUILD_MODNAME,
+ .driver.owner = THIS_MODULE,
+ .probe = scif_peer_probe,
+ .remove = scif_peer_remove,
+};
+
+static struct scif_hw_dev_id id_table[] = {
+ { MIC_SCIF_DEV, SCIF_DEV_ANY_ID },
+ { 0 },
+};
+
+static struct scif_driver scif_driver = {
+ .driver.name = KBUILD_MODNAME,
+ .driver.owner = THIS_MODULE,
+ .id_table = id_table,
+ .probe = scif_probe,
+ .remove = scif_remove,
+};
+
+static int _scif_init(void)
+{
+ spin_lock_init(&scif_info.eplock);
+ spin_lock_init(&scif_info.nb_connect_lock);
+ spin_lock_init(&scif_info.port_lock);
+ mutex_init(&scif_info.conflock);
+ mutex_init(&scif_info.connlock);
+ INIT_LIST_HEAD(&scif_info.uaccept);
+ INIT_LIST_HEAD(&scif_info.listen);
+ INIT_LIST_HEAD(&scif_info.zombie);
+ INIT_LIST_HEAD(&scif_info.connected);
+ INIT_LIST_HEAD(&scif_info.disconnected);
+ INIT_LIST_HEAD(&scif_info.nb_connect_list);
+ init_waitqueue_head(&scif_info.exitwq);
+ scif_info.en_msg_log = 0;
+ scif_info.p2p_enable = 1;
+ INIT_WORK(&scif_info.misc_work, scif_misc_handler);
+ INIT_WORK(&scif_info.conn_work, scif_conn_handler);
+ idr_init(&scif_ports);
+ return 0;
+}
+
+static void _scif_exit(void)
+{
+ idr_destroy(&scif_ports);
+ scif_destroy_scifdev();
+}
+
+static int __init scif_init(void)
+{
+ struct miscdevice *mdev = &scif_info.mdev;
+ int rc;
+
+ _scif_init();
+ rc = scif_peer_bus_init();
+ if (rc)
+ goto exit;
+ rc = scif_peer_register_driver(&scif_peer_driver);
+ if (rc)
+ goto peer_bus_exit;
+ rc = scif_register_driver(&scif_driver);
+ if (rc)
+ goto unreg_scif_peer;
+ rc = misc_register(mdev);
+ if (rc)
+ goto unreg_scif;
+ scif_init_debugfs();
+ return 0;
+unreg_scif:
+ scif_unregister_driver(&scif_driver);
+unreg_scif_peer:
+ scif_peer_unregister_driver(&scif_peer_driver);
+peer_bus_exit:
+ scif_peer_bus_exit();
+exit:
+ _scif_exit();
+ return rc;
+}
+
+static void __exit scif_exit(void)
+{
+ scif_exit_debugfs();
+ misc_deregister(&scif_info.mdev);
+ scif_unregister_driver(&scif_driver);
+ scif_peer_unregister_driver(&scif_peer_driver);
+ scif_peer_bus_exit();
+ _scif_exit();
+}
+
+module_init(scif_init);
+module_exit(scif_exit);
+
+MODULE_DEVICE_TABLE(scif, id_table);
+MODULE_AUTHOR("Intel Corporation");
+MODULE_DESCRIPTION("Intel(R) SCIF driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/mic/scif/scif_main.h b/drivers/misc/mic/scif/scif_main.h
new file mode 100644
index 000000000000..580bc63e1b23
--- /dev/null
+++ b/drivers/misc/mic/scif/scif_main.h
@@ -0,0 +1,254 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Intel SCIF driver.
+ *
+ */
+#ifndef SCIF_MAIN_H
+#define SCIF_MAIN_H
+
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/miscdevice.h>
+#include <linux/dmaengine.h>
+#include <linux/file.h>
+#include <linux/scif.h>
+
+#include "../common/mic_dev.h"
+
+#define SCIF_MGMT_NODE 0
+#define SCIF_DEFAULT_WATCHDOG_TO 30
+#define SCIF_NODE_ACCEPT_TIMEOUT (3 * HZ)
+#define SCIF_NODE_ALIVE_TIMEOUT (SCIF_DEFAULT_WATCHDOG_TO * HZ)
+
+/*
+ * Generic state used for certain node QP message exchanges
+ * like Unregister, Alloc etc.
+ */
+enum scif_msg_state {
+ OP_IDLE = 1,
+ OP_IN_PROGRESS,
+ OP_COMPLETED,
+ OP_FAILED
+};
+
+/*
+ * struct scif_info - Global SCIF information
+ *
+ * @nodeid: Node ID this node is to others
+ * @maxid: Max known node ID
+ * @total: Total number of SCIF nodes
+ * @nr_zombies: number of zombie endpoints
+ * @eplock: Lock to synchronize listening, zombie endpoint lists
+ * @connlock: Lock to synchronize connected and disconnected lists
+ * @nb_connect_lock: Synchronize non blocking connect operations
+ * @port_lock: Synchronize access to SCIF ports
+ * @uaccept: List of user acceptreq waiting for acceptreg
+ * @listen: List of listening end points
+ * @zombie: List of zombie end points with pending RMA's
+ * @connected: List of end points in connected state
+ * @disconnected: List of end points in disconnected state
+ * @nb_connect_list: List for non blocking connections
+ * @misc_work: miscellaneous SCIF tasks
+ * @conflock: Lock to synchronize SCIF node configuration changes
+ * @en_msg_log: Enable debug message logging
+ * @p2p_enable: Enable P2P SCIF network
+ * @mdev: The MISC device
+ * @conn_work: Work for workqueue handling all connections
+ * @exitwq: Wait queue for waiting for an EXIT node QP message response
+ * @loopb_dev: Dummy SCIF device used for loopback
+ * @loopb_wq: Workqueue used for handling loopback messages
+ * @loopb_wqname[16]: Name of loopback workqueue
+ * @loopb_work: Used for submitting work to loopb_wq
+ * @loopb_recv_q: List of messages received on the loopb_wq
+ * @card_initiated_exit: set when the card has initiated the exit
+ */
+struct scif_info {
+ u8 nodeid;
+ u8 maxid;
+ u8 total;
+ u32 nr_zombies;
+ spinlock_t eplock;
+ struct mutex connlock;
+ spinlock_t nb_connect_lock;
+ spinlock_t port_lock;
+ struct list_head uaccept;
+ struct list_head listen;
+ struct list_head zombie;
+ struct list_head connected;
+ struct list_head disconnected;
+ struct list_head nb_connect_list;
+ struct work_struct misc_work;
+ struct mutex conflock;
+ u8 en_msg_log;
+ u8 p2p_enable;
+ struct miscdevice mdev;
+ struct work_struct conn_work;
+ wait_queue_head_t exitwq;
+ struct scif_dev *loopb_dev;
+ struct workqueue_struct *loopb_wq;
+ char loopb_wqname[16];
+ struct work_struct loopb_work;
+ struct list_head loopb_recv_q;
+ bool card_initiated_exit;
+};
+
+/*
+ * struct scif_p2p_info - SCIF mapping information used for P2P
+ *
+ * @ppi_peer_id - SCIF peer node id
+ * @ppi_sg - Scatter list for bar information (One for mmio and one for aper)
+ * @sg_nentries - Number of entries in the scatterlist
+ * @ppi_da: DMA address for MMIO and APER bars
+ * @ppi_len: Length of MMIO and APER bars
+ * @ppi_list: Link in list of mapping information
+ */
+struct scif_p2p_info {
+ u8 ppi_peer_id;
+ struct scatterlist *ppi_sg[2];
+ u64 sg_nentries[2];
+ dma_addr_t ppi_da[2];
+ u64 ppi_len[2];
+#define SCIF_PPI_MMIO 0
+#define SCIF_PPI_APER 1
+ struct list_head ppi_list;
+};
+
+/*
+ * struct scif_dev - SCIF remote device specific fields
+ *
+ * @node: Node id
+ * @p2p: List of P2P mapping information
+ * @qpairs: The node queue pair for exchanging control messages
+ * @intr_wq: Workqueue for handling Node QP messages
+ * @intr_wqname: Name of node QP workqueue for handling interrupts
+ * @intr_bh: Used for submitting work to intr_wq
+ * @lock: Lock used for synchronizing access to the scif device
+ * @sdev: SCIF hardware device on the SCIF hardware bus
+ * @db: doorbell the peer will trigger to generate an interrupt on self
+ * @rdb: Doorbell to trigger on the peer to generate an interrupt on the peer
+ * @cookie: Cookie received while registering the interrupt handler
+ * init_msg_work: work scheduled for SCIF_INIT message processing
+ * @p2p_dwork: Delayed work to enable polling for P2P state
+ * @qp_dwork: Delayed work for enabling polling for remote QP information
+ * @p2p_retry: Number of times to retry polling of P2P state
+ * @base_addr: P2P aperture bar base address
+ * @mic_mw mmio: The peer MMIO information used for P2P
+ * @spdev: SCIF peer device on the SCIF peer bus
+ * @node_remove_ack_pending: True if a node_remove_ack is pending
+ * @exit_ack_pending: true if an exit_ack is pending
+ * @disconn_wq: Used while waiting for a node remove response
+ * @disconn_rescnt: Keeps track of number of node remove requests sent
+ * @exit: Status of exit message
+ * @qp_dma_addr: Queue pair DMA address passed to the peer
+*/
+struct scif_dev {
+ u8 node;
+ struct list_head p2p;
+ struct scif_qp *qpairs;
+ struct workqueue_struct *intr_wq;
+ char intr_wqname[16];
+ struct work_struct intr_bh;
+ struct mutex lock;
+ struct scif_hw_dev *sdev;
+ int db;
+ int rdb;
+ struct mic_irq *cookie;
+ struct work_struct init_msg_work;
+ struct delayed_work p2p_dwork;
+ struct delayed_work qp_dwork;
+ int p2p_retry;
+ dma_addr_t base_addr;
+ struct mic_mw mmio;
+ struct scif_peer_dev __rcu *spdev;
+ bool node_remove_ack_pending;
+ bool exit_ack_pending;
+ wait_queue_head_t disconn_wq;
+ atomic_t disconn_rescnt;
+ enum scif_msg_state exit;
+ dma_addr_t qp_dma_addr;
+};
+
+extern struct scif_info scif_info;
+extern struct idr scif_ports;
+extern struct scif_dev *scif_dev;
+extern const struct file_operations scif_fops;
+
+/* Size of the RB for the Node QP */
+#define SCIF_NODE_QP_SIZE 0x10000
+
+#include "scif_nodeqp.h"
+
+/*
+ * scifdev_self:
+ * @dev: The remote SCIF Device
+ *
+ * Returns true if the SCIF Device passed is the self aka Loopback SCIF device.
+ */
+static inline int scifdev_self(struct scif_dev *dev)
+{
+ return dev->node == scif_info.nodeid;
+}
+
+static inline bool scif_is_mgmt_node(void)
+{
+ return !scif_info.nodeid;
+}
+
+/*
+ * scifdev_is_p2p:
+ * @dev: The remote SCIF Device
+ *
+ * Returns true if the SCIF Device is a MIC Peer to Peer SCIF device.
+ */
+static inline bool scifdev_is_p2p(struct scif_dev *dev)
+{
+ if (scif_is_mgmt_node())
+ return false;
+ else
+ return dev != &scif_dev[SCIF_MGMT_NODE] &&
+ !scifdev_self(dev);
+}
+
+/*
+ * scifdev_alive:
+ * @scifdev: The remote SCIF Device
+ *
+ * Returns true if the remote SCIF Device is running or sleeping for
+ * this endpoint.
+ */
+static inline int _scifdev_alive(struct scif_dev *scifdev)
+{
+ struct scif_peer_dev *spdev;
+
+ rcu_read_lock();
+ spdev = rcu_dereference(scifdev->spdev);
+ rcu_read_unlock();
+ return !!spdev;
+}
+
+#include "scif_epd.h"
+
+void __init scif_init_debugfs(void);
+void scif_exit_debugfs(void);
+int scif_setup_intr_wq(struct scif_dev *scifdev);
+void scif_destroy_intr_wq(struct scif_dev *scifdev);
+void scif_cleanup_scifdev(struct scif_dev *dev);
+void scif_handle_remove_node(int node);
+void scif_disconnect_node(u32 node_id, bool mgmt_initiated);
+void scif_free_qp(struct scif_dev *dev);
+void scif_misc_handler(struct work_struct *work);
+void scif_stop(struct scif_dev *scifdev);
+irqreturn_t scif_intr_handler(int irq, void *data);
+#endif /* SCIF_MAIN_H */
diff --git a/drivers/misc/mic/scif/scif_map.h b/drivers/misc/mic/scif/scif_map.h
new file mode 100644
index 000000000000..20e50b4e19b2
--- /dev/null
+++ b/drivers/misc/mic/scif/scif_map.h
@@ -0,0 +1,113 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Intel SCIF driver.
+ *
+ */
+#ifndef SCIF_MAP_H
+#define SCIF_MAP_H
+
+#include "../bus/scif_bus.h"
+
+static __always_inline void *
+scif_alloc_coherent(dma_addr_t *dma_handle,
+ struct scif_dev *scifdev, size_t size,
+ gfp_t gfp)
+{
+ void *va;
+
+ if (scifdev_self(scifdev)) {
+ va = kmalloc(size, gfp);
+ if (va)
+ *dma_handle = virt_to_phys(va);
+ } else {
+ va = dma_alloc_coherent(&scifdev->sdev->dev,
+ size, dma_handle, gfp);
+ if (va && scifdev_is_p2p(scifdev))
+ *dma_handle = *dma_handle + scifdev->base_addr;
+ }
+ return va;
+}
+
+static __always_inline void
+scif_free_coherent(void *va, dma_addr_t local,
+ struct scif_dev *scifdev, size_t size)
+{
+ if (scifdev_self(scifdev)) {
+ kfree(va);
+ } else {
+ if (scifdev_is_p2p(scifdev) && local > scifdev->base_addr)
+ local = local - scifdev->base_addr;
+ dma_free_coherent(&scifdev->sdev->dev,
+ size, va, local);
+ }
+}
+
+static __always_inline int
+scif_map_single(dma_addr_t *dma_handle,
+ void *local, struct scif_dev *scifdev, size_t size)
+{
+ int err = 0;
+
+ if (scifdev_self(scifdev)) {
+ *dma_handle = virt_to_phys((local));
+ } else {
+ *dma_handle = dma_map_single(&scifdev->sdev->dev,
+ local, size, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(&scifdev->sdev->dev, *dma_handle))
+ err = -ENOMEM;
+ else if (scifdev_is_p2p(scifdev))
+ *dma_handle = *dma_handle + scifdev->base_addr;
+ }
+ if (err)
+ *dma_handle = 0;
+ return err;
+}
+
+static __always_inline void
+scif_unmap_single(dma_addr_t local, struct scif_dev *scifdev,
+ size_t size)
+{
+ if (!scifdev_self(scifdev)) {
+ if (scifdev_is_p2p(scifdev) && local > scifdev->base_addr)
+ local = local - scifdev->base_addr;
+ dma_unmap_single(&scifdev->sdev->dev, local,
+ size, DMA_BIDIRECTIONAL);
+ }
+}
+
+static __always_inline void *
+scif_ioremap(dma_addr_t phys, size_t size, struct scif_dev *scifdev)
+{
+ void *out_virt;
+ struct scif_hw_dev *sdev = scifdev->sdev;
+
+ if (scifdev_self(scifdev))
+ out_virt = phys_to_virt(phys);
+ else
+ out_virt = (void __force *)
+ sdev->hw_ops->ioremap(sdev, phys, size);
+ return out_virt;
+}
+
+static __always_inline void
+scif_iounmap(void *virt, size_t len, struct scif_dev *scifdev)
+{
+ if (!scifdev_self(scifdev)) {
+ struct scif_hw_dev *sdev = scifdev->sdev;
+
+ sdev->hw_ops->iounmap(sdev, (void __force __iomem *)virt);
+ }
+}
+#endif /* SCIF_MAP_H */
diff --git a/drivers/misc/mic/scif/scif_nm.c b/drivers/misc/mic/scif/scif_nm.c
new file mode 100644
index 000000000000..9b4c5382d6a7
--- /dev/null
+++ b/drivers/misc/mic/scif/scif_nm.c
@@ -0,0 +1,237 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Intel SCIF driver.
+ *
+ */
+#include "scif_peer_bus.h"
+
+#include "scif_main.h"
+#include "scif_map.h"
+
+/**
+ * scif_invalidate_ep() - Set state for all connected endpoints
+ * to disconnected and wake up all send/recv waitqueues
+ */
+static void scif_invalidate_ep(int node)
+{
+ struct scif_endpt *ep;
+ struct list_head *pos, *tmpq;
+
+ flush_work(&scif_info.conn_work);
+ mutex_lock(&scif_info.connlock);
+ list_for_each_safe(pos, tmpq, &scif_info.disconnected) {
+ ep = list_entry(pos, struct scif_endpt, list);
+ if (ep->remote_dev->node == node) {
+ spin_lock(&ep->lock);
+ scif_cleanup_ep_qp(ep);
+ spin_unlock(&ep->lock);
+ }
+ }
+ list_for_each_safe(pos, tmpq, &scif_info.connected) {
+ ep = list_entry(pos, struct scif_endpt, list);
+ if (ep->remote_dev->node == node) {
+ list_del(pos);
+ spin_lock(&ep->lock);
+ ep->state = SCIFEP_DISCONNECTED;
+ list_add_tail(&ep->list, &scif_info.disconnected);
+ scif_cleanup_ep_qp(ep);
+ wake_up_interruptible(&ep->sendwq);
+ wake_up_interruptible(&ep->recvwq);
+ spin_unlock(&ep->lock);
+ }
+ }
+ mutex_unlock(&scif_info.connlock);
+}
+
+void scif_free_qp(struct scif_dev *scifdev)
+{
+ struct scif_qp *qp = scifdev->qpairs;
+
+ if (!qp)
+ return;
+ scif_free_coherent((void *)qp->inbound_q.rb_base,
+ qp->local_buf, scifdev, qp->inbound_q.size);
+ scif_unmap_single(qp->local_qp, scifdev, sizeof(struct scif_qp));
+ kfree(scifdev->qpairs);
+ scifdev->qpairs = NULL;
+}
+
+static void scif_cleanup_qp(struct scif_dev *dev)
+{
+ struct scif_qp *qp = &dev->qpairs[0];
+
+ if (!qp)
+ return;
+ scif_iounmap((void *)qp->remote_qp, sizeof(struct scif_qp), dev);
+ scif_iounmap((void *)qp->outbound_q.rb_base,
+ sizeof(struct scif_qp), dev);
+ qp->remote_qp = NULL;
+ qp->local_write = 0;
+ qp->inbound_q.current_write_offset = 0;
+ qp->inbound_q.current_read_offset = 0;
+ if (scifdev_is_p2p(dev))
+ scif_free_qp(dev);
+}
+
+void scif_send_acks(struct scif_dev *dev)
+{
+ struct scifmsg msg;
+
+ if (dev->node_remove_ack_pending) {
+ msg.uop = SCIF_NODE_REMOVE_ACK;
+ msg.src.node = scif_info.nodeid;
+ msg.dst.node = SCIF_MGMT_NODE;
+ msg.payload[0] = dev->node;
+ scif_nodeqp_send(&scif_dev[SCIF_MGMT_NODE], &msg);
+ dev->node_remove_ack_pending = false;
+ }
+ if (dev->exit_ack_pending) {
+ msg.uop = SCIF_EXIT_ACK;
+ msg.src.node = scif_info.nodeid;
+ msg.dst.node = dev->node;
+ scif_nodeqp_send(dev, &msg);
+ dev->exit_ack_pending = false;
+ }
+}
+
+/*
+ * scif_cleanup_scifdev
+ *
+ * @dev: Remote SCIF device.
+ * Uninitialize SCIF data structures for remote SCIF device.
+ */
+void scif_cleanup_scifdev(struct scif_dev *dev)
+{
+ struct scif_hw_dev *sdev = dev->sdev;
+
+ if (!dev->sdev)
+ return;
+ if (scifdev_is_p2p(dev)) {
+ if (dev->cookie) {
+ sdev->hw_ops->free_irq(sdev, dev->cookie, dev);
+ dev->cookie = NULL;
+ }
+ scif_destroy_intr_wq(dev);
+ }
+ scif_destroy_p2p(dev);
+ scif_invalidate_ep(dev->node);
+ scif_send_acks(dev);
+ if (!dev->node && scif_info.card_initiated_exit) {
+ /*
+ * Send an SCIF_EXIT message which is the last message from MIC
+ * to the Host and wait for a SCIF_EXIT_ACK
+ */
+ scif_send_exit(dev);
+ scif_info.card_initiated_exit = false;
+ }
+ scif_cleanup_qp(dev);
+}
+
+/*
+ * scif_remove_node:
+ *
+ * @node: Node to remove
+ */
+void scif_handle_remove_node(int node)
+{
+ struct scif_dev *scifdev = &scif_dev[node];
+ struct scif_peer_dev *spdev;
+
+ rcu_read_lock();
+ spdev = rcu_dereference(scifdev->spdev);
+ rcu_read_unlock();
+ if (spdev)
+ scif_peer_unregister_device(spdev);
+ else
+ scif_send_acks(scifdev);
+}
+
+static int scif_send_rmnode_msg(int node, int remove_node)
+{
+ struct scifmsg notif_msg;
+ struct scif_dev *dev = &scif_dev[node];
+
+ notif_msg.uop = SCIF_NODE_REMOVE;
+ notif_msg.src.node = scif_info.nodeid;
+ notif_msg.dst.node = node;
+ notif_msg.payload[0] = remove_node;
+ return scif_nodeqp_send(dev, &notif_msg);
+}
+
+/**
+ * scif_node_disconnect:
+ *
+ * @node_id[in]: source node id.
+ * @mgmt_initiated: Disconnection initiated from the mgmt node
+ *
+ * Disconnect a node from the scif network.
+ */
+void scif_disconnect_node(u32 node_id, bool mgmt_initiated)
+{
+ int ret;
+ int msg_cnt = 0;
+ u32 i = 0;
+ struct scif_dev *scifdev = &scif_dev[node_id];
+
+ if (!node_id)
+ return;
+
+ atomic_set(&scifdev->disconn_rescnt, 0);
+
+ /* Destroy p2p network */
+ for (i = 1; i <= scif_info.maxid; i++) {
+ if (i == node_id)
+ continue;
+ ret = scif_send_rmnode_msg(i, node_id);
+ if (!ret)
+ msg_cnt++;
+ }
+ /* Wait for the remote nodes to respond with SCIF_NODE_REMOVE_ACK */
+ ret = wait_event_timeout(scifdev->disconn_wq,
+ (atomic_read(&scifdev->disconn_rescnt)
+ == msg_cnt), SCIF_NODE_ALIVE_TIMEOUT);
+ /* Tell the card to clean up */
+ if (mgmt_initiated && _scifdev_alive(scifdev))
+ /*
+ * Send an SCIF_EXIT message which is the last message from Host
+ * to the MIC and wait for a SCIF_EXIT_ACK
+ */
+ scif_send_exit(scifdev);
+ atomic_set(&scifdev->disconn_rescnt, 0);
+ /* Tell the mgmt node to clean up */
+ ret = scif_send_rmnode_msg(SCIF_MGMT_NODE, node_id);
+ if (!ret)
+ /* Wait for mgmt node to respond with SCIF_NODE_REMOVE_ACK */
+ wait_event_timeout(scifdev->disconn_wq,
+ (atomic_read(&scifdev->disconn_rescnt) == 1),
+ SCIF_NODE_ALIVE_TIMEOUT);
+}
+
+void scif_get_node_info(void)
+{
+ struct scifmsg msg;
+ DECLARE_COMPLETION_ONSTACK(node_info);
+
+ msg.uop = SCIF_GET_NODE_INFO;
+ msg.src.node = scif_info.nodeid;
+ msg.dst.node = SCIF_MGMT_NODE;
+ msg.payload[3] = (u64)&node_info;
+
+ if ((scif_nodeqp_send(&scif_dev[SCIF_MGMT_NODE], &msg)))
+ return;
+
+ /* Wait for a response with SCIF_GET_NODE_INFO */
+ wait_for_completion(&node_info);
+}
diff --git a/drivers/misc/mic/scif/scif_nodeqp.c b/drivers/misc/mic/scif/scif_nodeqp.c
new file mode 100644
index 000000000000..41e3bdb10061
--- /dev/null
+++ b/drivers/misc/mic/scif/scif_nodeqp.c
@@ -0,0 +1,1312 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Intel SCIF driver.
+ *
+ */
+#include "../bus/scif_bus.h"
+#include "scif_peer_bus.h"
+#include "scif_main.h"
+#include "scif_nodeqp.h"
+#include "scif_map.h"
+
+/*
+ ************************************************************************
+ * SCIF node Queue Pair (QP) setup flow:
+ *
+ * 1) SCIF driver gets probed with a scif_hw_dev via the scif_hw_bus
+ * 2) scif_setup_qp(..) allocates the local qp and calls
+ * scif_setup_qp_connect(..) which allocates and maps the local
+ * buffer for the inbound QP
+ * 3) The local node updates the device page with the DMA address of the QP
+ * 4) A delayed work is scheduled (qp_dwork) which periodically reads if
+ * the peer node has updated its QP DMA address
+ * 5) Once a valid non zero address is found in the QP DMA address field
+ * in the device page, the local node maps the remote node's QP,
+ * updates its outbound QP and sends a SCIF_INIT message to the peer
+ * 6) The SCIF_INIT message is received by the peer node QP interrupt bottom
+ * half handler by calling scif_init(..)
+ * 7) scif_init(..) registers a new SCIF peer node by calling
+ * scif_peer_register_device(..) which signifies the addition of a new
+ * SCIF node
+ * 8) On the mgmt node, P2P network setup/teardown is initiated if all the
+ * remote nodes are online via scif_p2p_setup(..)
+ * 9) For P2P setup, the host maps the remote nodes' aperture and memory
+ * bars and sends a SCIF_NODE_ADD message to both nodes
+ * 10) As part of scif_nodeadd, both nodes set up their local inbound
+ * QPs and send a SCIF_NODE_ADD_ACK to the mgmt node
+ * 11) As part of scif_node_add_ack(..) the mgmt node forwards the
+ * SCIF_NODE_ADD_ACK to the remote nodes
+ * 12) As part of scif_node_add_ack(..) the remote nodes update their
+ * outbound QPs, make sure they can access memory on the remote node
+ * and then add a new SCIF peer node by calling
+ * scif_peer_register_device(..) which signifies the addition of a new
+ * SCIF node.
+ * 13) The SCIF network is now established across all nodes.
+ *
+ ************************************************************************
+ * SCIF node QP teardown flow (initiated by non mgmt node):
+ *
+ * 1) SCIF driver gets a remove callback with a scif_hw_dev via the scif_hw_bus
+ * 2) The device page QP DMA address field is updated with 0x0
+ * 3) A non mgmt node now cleans up all local data structures and sends a
+ * SCIF_EXIT message to the peer and waits for a SCIF_EXIT_ACK
+ * 4) As part of scif_exit(..) handling scif_disconnect_node(..) is called
+ * 5) scif_disconnect_node(..) sends a SCIF_NODE_REMOVE message to all the
+ * peers and waits for a SCIF_NODE_REMOVE_ACK
+ * 6) As part of scif_node_remove(..) a remote node unregisters the peer
+ * node from the SCIF network and sends a SCIF_NODE_REMOVE_ACK
+ * 7) When the mgmt node has received all the SCIF_NODE_REMOVE_ACKs
+ * it sends itself a node remove message whose handling cleans up local
+ * data structures and unregisters the peer node from the SCIF network
+ * 8) The mgmt node sends a SCIF_EXIT_ACK
+ * 9) Upon receipt of the SCIF_EXIT_ACK the node initiating the teardown
+ * completes the SCIF remove routine
+ * 10) The SCIF network is now torn down for the node initiating the
+ * teardown sequence
+ *
+ ************************************************************************
+ * SCIF node QP teardown flow (initiated by mgmt node):
+ *
+ * 1) SCIF driver gets a remove callback with a scif_hw_dev via the scif_hw_bus
+ * 2) The device page QP DMA address field is updated with 0x0
+ * 3) The mgmt node calls scif_disconnect_node(..)
+ * 4) scif_disconnect_node(..) sends a SCIF_NODE_REMOVE message to all the peers
+ * and waits for a SCIF_NODE_REMOVE_ACK
+ * 5) As part of scif_node_remove(..) a remote node unregisters the peer
+ * node from the SCIF network and sends a SCIF_NODE_REMOVE_ACK
+ * 6) When the mgmt node has received all the SCIF_NODE_REMOVE_ACKs
+ * it unregisters the peer node from the SCIF network
+ * 7) The mgmt node sends a SCIF_EXIT message and waits for a SCIF_EXIT_ACK.
+ * 8) A non mgmt node upon receipt of a SCIF_EXIT message calls scif_stop(..)
+ * which would clean up local data structures for all SCIF nodes and
+ * then send a SCIF_EXIT_ACK back to the mgmt node
+ * 9) Upon receipt of the SCIF_EXIT_ACK the the mgmt node sends itself a node
+ * remove message whose handling cleans up local data structures and
+ * destroys any P2P mappings.
+ * 10) The SCIF hardware device for which a remove callback was received is now
+ * disconnected from the SCIF network.
+ */
+/*
+ * Initializes "local" data structures for the QP. Allocates the QP
+ * ring buffer (rb) and initializes the "in bound" queue.
+ */
+int scif_setup_qp_connect(struct scif_qp *qp, dma_addr_t *qp_offset,
+ int local_size, struct scif_dev *scifdev)
+{
+ void *local_q = NULL;
+ int err = 0;
+ u32 tmp_rd = 0;
+
+ spin_lock_init(&qp->send_lock);
+ spin_lock_init(&qp->recv_lock);
+
+ local_q = kzalloc(local_size, GFP_KERNEL);
+ if (!local_q) {
+ err = -ENOMEM;
+ return err;
+ }
+ err = scif_map_single(&qp->local_buf, local_q, scifdev, local_size);
+ if (err)
+ goto kfree;
+ /*
+ * To setup the inbound_q, the buffer lives locally, the read pointer
+ * is remote and the write pointer is local.
+ */
+ scif_rb_init(&qp->inbound_q,
+ &tmp_rd,
+ &qp->local_write,
+ local_q, get_count_order(local_size));
+ /*
+ * The read pointer is NULL initially and it is unsafe to use the ring
+ * buffer til this changes!
+ */
+ qp->inbound_q.read_ptr = NULL;
+ err = scif_map_single(qp_offset, qp,
+ scifdev, sizeof(struct scif_qp));
+ if (err)
+ goto unmap;
+ qp->local_qp = *qp_offset;
+ return err;
+unmap:
+ scif_unmap_single(qp->local_buf, scifdev, local_size);
+ qp->local_buf = 0;
+kfree:
+ kfree(local_q);
+ return err;
+}
+
+/* When the other side has already done it's allocation, this is called */
+int scif_setup_qp_accept(struct scif_qp *qp, dma_addr_t *qp_offset,
+ dma_addr_t phys, int local_size,
+ struct scif_dev *scifdev)
+{
+ void *local_q;
+ void *remote_q;
+ struct scif_qp *remote_qp;
+ int remote_size;
+ int err = 0;
+
+ spin_lock_init(&qp->send_lock);
+ spin_lock_init(&qp->recv_lock);
+ /* Start by figuring out where we need to point */
+ remote_qp = scif_ioremap(phys, sizeof(struct scif_qp), scifdev);
+ if (!remote_qp)
+ return -EIO;
+ qp->remote_qp = remote_qp;
+ if (qp->remote_qp->magic != SCIFEP_MAGIC) {
+ err = -EIO;
+ goto iounmap;
+ }
+ qp->remote_buf = remote_qp->local_buf;
+ remote_size = qp->remote_qp->inbound_q.size;
+ remote_q = scif_ioremap(qp->remote_buf, remote_size, scifdev);
+ if (!remote_q) {
+ err = -EIO;
+ goto iounmap;
+ }
+ qp->remote_qp->local_write = 0;
+ /*
+ * To setup the outbound_q, the buffer lives in remote memory,
+ * the read pointer is local, the write pointer is remote
+ */
+ scif_rb_init(&qp->outbound_q,
+ &qp->local_read,
+ &qp->remote_qp->local_write,
+ remote_q,
+ get_count_order(remote_size));
+ local_q = kzalloc(local_size, GFP_KERNEL);
+ if (!local_q) {
+ err = -ENOMEM;
+ goto iounmap_1;
+ }
+ err = scif_map_single(&qp->local_buf, local_q, scifdev, local_size);
+ if (err)
+ goto kfree;
+ qp->remote_qp->local_read = 0;
+ /*
+ * To setup the inbound_q, the buffer lives locally, the read pointer
+ * is remote and the write pointer is local
+ */
+ scif_rb_init(&qp->inbound_q,
+ &qp->remote_qp->local_read,
+ &qp->local_write,
+ local_q, get_count_order(local_size));
+ err = scif_map_single(qp_offset, qp, scifdev,
+ sizeof(struct scif_qp));
+ if (err)
+ goto unmap;
+ qp->local_qp = *qp_offset;
+ return err;
+unmap:
+ scif_unmap_single(qp->local_buf, scifdev, local_size);
+ qp->local_buf = 0;
+kfree:
+ kfree(local_q);
+iounmap_1:
+ scif_iounmap(remote_q, remote_size, scifdev);
+ qp->outbound_q.rb_base = NULL;
+iounmap:
+ scif_iounmap(qp->remote_qp, sizeof(struct scif_qp), scifdev);
+ qp->remote_qp = NULL;
+ return err;
+}
+
+int scif_setup_qp_connect_response(struct scif_dev *scifdev,
+ struct scif_qp *qp, u64 payload)
+{
+ int err = 0;
+ void *r_buf;
+ int remote_size;
+ phys_addr_t tmp_phys;
+
+ qp->remote_qp = scif_ioremap(payload, sizeof(struct scif_qp), scifdev);
+
+ if (!qp->remote_qp) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ if (qp->remote_qp->magic != SCIFEP_MAGIC) {
+ dev_err(&scifdev->sdev->dev,
+ "SCIFEP_MAGIC mismatch between self %d remote %d\n",
+ scif_dev[scif_info.nodeid].node, scifdev->node);
+ err = -ENODEV;
+ goto error;
+ }
+
+ tmp_phys = qp->remote_qp->local_buf;
+ remote_size = qp->remote_qp->inbound_q.size;
+ r_buf = scif_ioremap(tmp_phys, remote_size, scifdev);
+
+ if (!r_buf)
+ return -EIO;
+
+ qp->local_read = 0;
+ scif_rb_init(&qp->outbound_q,
+ &qp->local_read,
+ &qp->remote_qp->local_write,
+ r_buf,
+ get_count_order(remote_size));
+ /*
+ * resetup the inbound_q now that we know where the
+ * inbound_read really is.
+ */
+ scif_rb_init(&qp->inbound_q,
+ &qp->remote_qp->local_read,
+ &qp->local_write,
+ qp->inbound_q.rb_base,
+ get_count_order(qp->inbound_q.size));
+error:
+ return err;
+}
+
+static __always_inline void
+scif_send_msg_intr(struct scif_dev *scifdev)
+{
+ struct scif_hw_dev *sdev = scifdev->sdev;
+
+ if (scifdev_is_p2p(scifdev))
+ sdev->hw_ops->send_p2p_intr(sdev, scifdev->rdb, &scifdev->mmio);
+ else
+ sdev->hw_ops->send_intr(sdev, scifdev->rdb);
+}
+
+int scif_qp_response(phys_addr_t phys, struct scif_dev *scifdev)
+{
+ int err = 0;
+ struct scifmsg msg;
+
+ err = scif_setup_qp_connect_response(scifdev, scifdev->qpairs, phys);
+ if (!err) {
+ /*
+ * Now that everything is setup and mapped, we're ready
+ * to tell the peer about our queue's location
+ */
+ msg.uop = SCIF_INIT;
+ msg.dst.node = scifdev->node;
+ err = scif_nodeqp_send(scifdev, &msg);
+ }
+ return err;
+}
+
+void scif_send_exit(struct scif_dev *scifdev)
+{
+ struct scifmsg msg;
+ int ret;
+
+ scifdev->exit = OP_IN_PROGRESS;
+ msg.uop = SCIF_EXIT;
+ msg.src.node = scif_info.nodeid;
+ msg.dst.node = scifdev->node;
+ ret = scif_nodeqp_send(scifdev, &msg);
+ if (ret)
+ goto done;
+ /* Wait for a SCIF_EXIT_ACK message */
+ wait_event_timeout(scif_info.exitwq, scifdev->exit == OP_COMPLETED,
+ SCIF_NODE_ALIVE_TIMEOUT);
+done:
+ scifdev->exit = OP_IDLE;
+}
+
+int scif_setup_qp(struct scif_dev *scifdev)
+{
+ int err = 0;
+ int local_size;
+ struct scif_qp *qp;
+
+ local_size = SCIF_NODE_QP_SIZE;
+
+ qp = kzalloc(sizeof(*qp), GFP_KERNEL);
+ if (!qp) {
+ err = -ENOMEM;
+ return err;
+ }
+ qp->magic = SCIFEP_MAGIC;
+ scifdev->qpairs = qp;
+ err = scif_setup_qp_connect(qp, &scifdev->qp_dma_addr,
+ local_size, scifdev);
+ if (err)
+ goto free_qp;
+ /*
+ * We're as setup as we can be. The inbound_q is setup, w/o a usable
+ * outbound q. When we get a message, the read_ptr will be updated,
+ * and we will pull the message.
+ */
+ return err;
+free_qp:
+ kfree(scifdev->qpairs);
+ scifdev->qpairs = NULL;
+ return err;
+}
+
+static void scif_p2p_freesg(struct scatterlist *sg)
+{
+ kfree(sg);
+}
+
+static struct scatterlist *
+scif_p2p_setsg(void __iomem *va, int page_size, int page_cnt)
+{
+ struct scatterlist *sg;
+ struct page *page;
+ int i;
+
+ sg = kcalloc(page_cnt, sizeof(struct scatterlist), GFP_KERNEL);
+ if (!sg)
+ return NULL;
+ sg_init_table(sg, page_cnt);
+ for (i = 0; i < page_cnt; i++) {
+ page = vmalloc_to_page((void __force *)va);
+ if (!page)
+ goto p2p_sg_err;
+ sg_set_page(&sg[i], page, page_size, 0);
+ va += page_size;
+ }
+ return sg;
+p2p_sg_err:
+ kfree(sg);
+ return NULL;
+}
+
+/* Init p2p mappings required to access peerdev from scifdev */
+static struct scif_p2p_info *
+scif_init_p2p_info(struct scif_dev *scifdev, struct scif_dev *peerdev)
+{
+ struct scif_p2p_info *p2p;
+ int num_mmio_pages, num_aper_pages, sg_page_shift, err, num_aper_chunks;
+ struct scif_hw_dev *psdev = peerdev->sdev;
+ struct scif_hw_dev *sdev = scifdev->sdev;
+
+ num_mmio_pages = psdev->mmio->len >> PAGE_SHIFT;
+ num_aper_pages = psdev->aper->len >> PAGE_SHIFT;
+
+ p2p = kzalloc(sizeof(*p2p), GFP_KERNEL);
+ if (!p2p)
+ return NULL;
+ p2p->ppi_sg[SCIF_PPI_MMIO] = scif_p2p_setsg(psdev->mmio->va,
+ PAGE_SIZE, num_mmio_pages);
+ if (!p2p->ppi_sg[SCIF_PPI_MMIO])
+ goto free_p2p;
+ p2p->sg_nentries[SCIF_PPI_MMIO] = num_mmio_pages;
+ sg_page_shift = get_order(min(psdev->aper->len, (u64)(1 << 30)));
+ num_aper_chunks = num_aper_pages >> (sg_page_shift - PAGE_SHIFT);
+ p2p->ppi_sg[SCIF_PPI_APER] = scif_p2p_setsg(psdev->aper->va,
+ 1 << sg_page_shift,
+ num_aper_chunks);
+ p2p->sg_nentries[SCIF_PPI_APER] = num_aper_chunks;
+ err = dma_map_sg(&sdev->dev, p2p->ppi_sg[SCIF_PPI_MMIO],
+ num_mmio_pages, PCI_DMA_BIDIRECTIONAL);
+ if (err != num_mmio_pages)
+ goto scif_p2p_free;
+ err = dma_map_sg(&sdev->dev, p2p->ppi_sg[SCIF_PPI_APER],
+ num_aper_chunks, PCI_DMA_BIDIRECTIONAL);
+ if (err != num_aper_chunks)
+ goto dma_unmap;
+ p2p->ppi_da[SCIF_PPI_MMIO] = sg_dma_address(p2p->ppi_sg[SCIF_PPI_MMIO]);
+ p2p->ppi_da[SCIF_PPI_APER] = sg_dma_address(p2p->ppi_sg[SCIF_PPI_APER]);
+ p2p->ppi_len[SCIF_PPI_MMIO] = num_mmio_pages;
+ p2p->ppi_len[SCIF_PPI_APER] = num_aper_pages;
+ p2p->ppi_peer_id = peerdev->node;
+ return p2p;
+dma_unmap:
+ dma_unmap_sg(&sdev->dev, p2p->ppi_sg[SCIF_PPI_MMIO],
+ p2p->sg_nentries[SCIF_PPI_MMIO], DMA_BIDIRECTIONAL);
+scif_p2p_free:
+ scif_p2p_freesg(p2p->ppi_sg[SCIF_PPI_MMIO]);
+ scif_p2p_freesg(p2p->ppi_sg[SCIF_PPI_APER]);
+free_p2p:
+ kfree(p2p);
+ return NULL;
+}
+
+/**
+ * scif_node_connect: Respond to SCIF_NODE_CONNECT interrupt message
+ * @dst: Destination node
+ *
+ * Connect the src and dst node by setting up the p2p connection
+ * between them. Management node here acts like a proxy.
+ */
+static void scif_node_connect(struct scif_dev *scifdev, int dst)
+{
+ struct scif_dev *dev_j = scifdev;
+ struct scif_dev *dev_i = NULL;
+ struct scif_p2p_info *p2p_ij = NULL; /* bus addr for j from i */
+ struct scif_p2p_info *p2p_ji = NULL; /* bus addr for i from j */
+ struct scif_p2p_info *p2p;
+ struct list_head *pos, *tmp;
+ struct scifmsg msg;
+ int err;
+ u64 tmppayload;
+
+ if (dst < 1 || dst > scif_info.maxid)
+ return;
+
+ dev_i = &scif_dev[dst];
+
+ if (!_scifdev_alive(dev_i))
+ return;
+ /*
+ * If the p2p connection is already setup or in the process of setting
+ * up then just ignore this request. The requested node will get
+ * informed by SCIF_NODE_ADD_ACK or SCIF_NODE_ADD_NACK
+ */
+ if (!list_empty(&dev_i->p2p)) {
+ list_for_each_safe(pos, tmp, &dev_i->p2p) {
+ p2p = list_entry(pos, struct scif_p2p_info, ppi_list);
+ if (p2p->ppi_peer_id == dev_j->node)
+ return;
+ }
+ }
+ p2p_ij = scif_init_p2p_info(dev_i, dev_j);
+ if (!p2p_ij)
+ return;
+ p2p_ji = scif_init_p2p_info(dev_j, dev_i);
+ if (!p2p_ji)
+ return;
+ list_add_tail(&p2p_ij->ppi_list, &dev_i->p2p);
+ list_add_tail(&p2p_ji->ppi_list, &dev_j->p2p);
+
+ /*
+ * Send a SCIF_NODE_ADD to dev_i, pass it its bus address
+ * as seen from dev_j
+ */
+ msg.uop = SCIF_NODE_ADD;
+ msg.src.node = dev_j->node;
+ msg.dst.node = dev_i->node;
+
+ msg.payload[0] = p2p_ji->ppi_da[SCIF_PPI_APER];
+ msg.payload[1] = p2p_ij->ppi_da[SCIF_PPI_MMIO];
+ msg.payload[2] = p2p_ij->ppi_da[SCIF_PPI_APER];
+ msg.payload[3] = p2p_ij->ppi_len[SCIF_PPI_APER] << PAGE_SHIFT;
+
+ err = scif_nodeqp_send(dev_i, &msg);
+ if (err) {
+ dev_err(&scifdev->sdev->dev,
+ "%s %d error %d\n", __func__, __LINE__, err);
+ return;
+ }
+
+ /* Same as above but to dev_j */
+ msg.uop = SCIF_NODE_ADD;
+ msg.src.node = dev_i->node;
+ msg.dst.node = dev_j->node;
+
+ tmppayload = msg.payload[0];
+ msg.payload[0] = msg.payload[2];
+ msg.payload[2] = tmppayload;
+ msg.payload[1] = p2p_ji->ppi_da[SCIF_PPI_MMIO];
+ msg.payload[3] = p2p_ji->ppi_len[SCIF_PPI_APER] << PAGE_SHIFT;
+
+ scif_nodeqp_send(dev_j, &msg);
+}
+
+static void scif_p2p_setup(void)
+{
+ int i, j;
+
+ if (!scif_info.p2p_enable)
+ return;
+
+ for (i = 1; i <= scif_info.maxid; i++)
+ if (!_scifdev_alive(&scif_dev[i]))
+ return;
+
+ for (i = 1; i <= scif_info.maxid; i++) {
+ for (j = 1; j <= scif_info.maxid; j++) {
+ struct scif_dev *scifdev = &scif_dev[i];
+
+ if (i == j)
+ continue;
+ scif_node_connect(scifdev, j);
+ }
+ }
+}
+
+void scif_qp_response_ack(struct work_struct *work)
+{
+ struct scif_dev *scifdev = container_of(work, struct scif_dev,
+ init_msg_work);
+ struct scif_peer_dev *spdev;
+
+ /* Drop the INIT message if it has already been received */
+ if (_scifdev_alive(scifdev))
+ return;
+
+ spdev = scif_peer_register_device(scifdev);
+ if (IS_ERR(spdev))
+ return;
+
+ if (scif_is_mgmt_node()) {
+ mutex_lock(&scif_info.conflock);
+ scif_p2p_setup();
+ mutex_unlock(&scif_info.conflock);
+ }
+}
+
+static char *message_types[] = {"BAD",
+ "INIT",
+ "EXIT",
+ "SCIF_EXIT_ACK",
+ "SCIF_NODE_ADD",
+ "SCIF_NODE_ADD_ACK",
+ "SCIF_NODE_ADD_NACK",
+ "REMOVE_NODE",
+ "REMOVE_NODE_ACK",
+ "CNCT_REQ",
+ "CNCT_GNT",
+ "CNCT_GNTACK",
+ "CNCT_GNTNACK",
+ "CNCT_REJ",
+ "DISCNCT",
+ "DISCNT_ACK",
+ "CLIENT_SENT",
+ "CLIENT_RCVD",
+ "SCIF_GET_NODE_INFO"};
+
+static void
+scif_display_message(struct scif_dev *scifdev, struct scifmsg *msg,
+ const char *label)
+{
+ if (!scif_info.en_msg_log)
+ return;
+ if (msg->uop > SCIF_MAX_MSG) {
+ dev_err(&scifdev->sdev->dev,
+ "%s: unknown msg type %d\n", label, msg->uop);
+ return;
+ }
+ dev_info(&scifdev->sdev->dev,
+ "%s: msg type %s, src %d:%d, dest %d:%d payload 0x%llx:0x%llx:0x%llx:0x%llx\n",
+ label, message_types[msg->uop], msg->src.node, msg->src.port,
+ msg->dst.node, msg->dst.port, msg->payload[0], msg->payload[1],
+ msg->payload[2], msg->payload[3]);
+}
+
+int _scif_nodeqp_send(struct scif_dev *scifdev, struct scifmsg *msg)
+{
+ struct scif_qp *qp = scifdev->qpairs;
+ int err = -ENOMEM, loop_cnt = 0;
+
+ scif_display_message(scifdev, msg, "Sent");
+ if (!qp) {
+ err = -EINVAL;
+ goto error;
+ }
+ spin_lock(&qp->send_lock);
+
+ while ((err = scif_rb_write(&qp->outbound_q,
+ msg, sizeof(struct scifmsg)))) {
+ mdelay(1);
+#define SCIF_NODEQP_SEND_TO_MSEC (3 * 1000)
+ if (loop_cnt++ > (SCIF_NODEQP_SEND_TO_MSEC)) {
+ err = -ENODEV;
+ break;
+ }
+ }
+ if (!err)
+ scif_rb_commit(&qp->outbound_q);
+ spin_unlock(&qp->send_lock);
+ if (!err) {
+ if (scifdev_self(scifdev))
+ /*
+ * For loopback we need to emulate an interrupt by
+ * queuing work for the queue handling real node
+ * Qp interrupts.
+ */
+ queue_work(scifdev->intr_wq, &scifdev->intr_bh);
+ else
+ scif_send_msg_intr(scifdev);
+ }
+error:
+ if (err)
+ dev_dbg(&scifdev->sdev->dev,
+ "%s %d error %d uop %d\n",
+ __func__, __LINE__, err, msg->uop);
+ return err;
+}
+
+/**
+ * scif_nodeqp_send - Send a message on the node queue pair
+ * @scifdev: Scif Device.
+ * @msg: The message to be sent.
+ */
+int scif_nodeqp_send(struct scif_dev *scifdev, struct scifmsg *msg)
+{
+ int err;
+ struct device *spdev = NULL;
+
+ if (msg->uop > SCIF_EXIT_ACK) {
+ /* Dont send messages once the exit flow has begun */
+ if (OP_IDLE != scifdev->exit)
+ return -ENODEV;
+ spdev = scif_get_peer_dev(scifdev);
+ if (IS_ERR(spdev)) {
+ err = PTR_ERR(spdev);
+ return err;
+ }
+ }
+ err = _scif_nodeqp_send(scifdev, msg);
+ if (msg->uop > SCIF_EXIT_ACK)
+ scif_put_peer_dev(spdev);
+ return err;
+}
+
+/*
+ * scif_misc_handler:
+ *
+ * Work queue handler for servicing miscellaneous SCIF tasks.
+ * Examples include:
+ * 1) Cleanup of zombie endpoints.
+ */
+void scif_misc_handler(struct work_struct *work)
+{
+ scif_cleanup_zombie_epd();
+}
+
+/**
+ * scif_init() - Respond to SCIF_INIT interrupt message
+ * @scifdev: Remote SCIF device node
+ * @msg: Interrupt message
+ */
+static __always_inline void
+scif_init(struct scif_dev *scifdev, struct scifmsg *msg)
+{
+ /*
+ * Allow the thread waiting for device page updates for the peer QP DMA
+ * address to complete initializing the inbound_q.
+ */
+ flush_delayed_work(&scifdev->qp_dwork);
+ /*
+ * Delegate the peer device registration to a workqueue, otherwise if
+ * SCIF client probe (called during peer device registration) calls
+ * scif_connect(..), it will block the message processing thread causing
+ * a deadlock.
+ */
+ schedule_work(&scifdev->init_msg_work);
+}
+
+/**
+ * scif_exit() - Respond to SCIF_EXIT interrupt message
+ * @scifdev: Remote SCIF device node
+ * @msg: Interrupt message
+ *
+ * This function stops the SCIF interface for the node which sent
+ * the SCIF_EXIT message and starts waiting for that node to
+ * resetup the queue pair again.
+ */
+static __always_inline void
+scif_exit(struct scif_dev *scifdev, struct scifmsg *unused)
+{
+ scifdev->exit_ack_pending = true;
+ if (scif_is_mgmt_node())
+ scif_disconnect_node(scifdev->node, false);
+ else
+ scif_stop(scifdev);
+ schedule_delayed_work(&scifdev->qp_dwork,
+ msecs_to_jiffies(1000));
+}
+
+/**
+ * scif_exitack() - Respond to SCIF_EXIT_ACK interrupt message
+ * @scifdev: Remote SCIF device node
+ * @msg: Interrupt message
+ *
+ */
+static __always_inline void
+scif_exit_ack(struct scif_dev *scifdev, struct scifmsg *unused)
+{
+ scifdev->exit = OP_COMPLETED;
+ wake_up(&scif_info.exitwq);
+}
+
+/**
+ * scif_node_add() - Respond to SCIF_NODE_ADD interrupt message
+ * @scifdev: Remote SCIF device node
+ * @msg: Interrupt message
+ *
+ * When the mgmt node driver has finished initializing a MIC node queue pair it
+ * marks the node as online. It then looks for all currently online MIC cards
+ * and send a SCIF_NODE_ADD message to identify the ID of the new card for
+ * peer to peer initialization
+ *
+ * The local node allocates its incoming queue and sends its address in the
+ * SCIF_NODE_ADD_ACK message back to the mgmt node, the mgmt node "reflects"
+ * this message to the new node
+ */
+static __always_inline void
+scif_node_add(struct scif_dev *scifdev, struct scifmsg *msg)
+{
+ struct scif_dev *newdev;
+ dma_addr_t qp_offset;
+ int qp_connect;
+ struct scif_hw_dev *sdev;
+
+ dev_dbg(&scifdev->sdev->dev,
+ "Scifdev %d:%d received NODE_ADD msg for node %d\n",
+ scifdev->node, msg->dst.node, msg->src.node);
+ dev_dbg(&scifdev->sdev->dev,
+ "Remote address for this node's aperture %llx\n",
+ msg->payload[0]);
+ newdev = &scif_dev[msg->src.node];
+ newdev->node = msg->src.node;
+ newdev->sdev = scif_dev[SCIF_MGMT_NODE].sdev;
+ sdev = newdev->sdev;
+
+ if (scif_setup_intr_wq(newdev)) {
+ dev_err(&scifdev->sdev->dev,
+ "failed to setup interrupts for %d\n", msg->src.node);
+ goto interrupt_setup_error;
+ }
+ newdev->mmio.va = ioremap_nocache(msg->payload[1], sdev->mmio->len);
+ if (!newdev->mmio.va) {
+ dev_err(&scifdev->sdev->dev,
+ "failed to map mmio for %d\n", msg->src.node);
+ goto mmio_map_error;
+ }
+ newdev->qpairs = kzalloc(sizeof(*newdev->qpairs), GFP_KERNEL);
+ if (!newdev->qpairs)
+ goto qp_alloc_error;
+ /*
+ * Set the base address of the remote node's memory since it gets
+ * added to qp_offset
+ */
+ newdev->base_addr = msg->payload[0];
+
+ qp_connect = scif_setup_qp_connect(newdev->qpairs, &qp_offset,
+ SCIF_NODE_QP_SIZE, newdev);
+ if (qp_connect) {
+ dev_err(&scifdev->sdev->dev,
+ "failed to setup qp_connect %d\n", qp_connect);
+ goto qp_connect_error;
+ }
+
+ newdev->db = sdev->hw_ops->next_db(sdev);
+ newdev->cookie = sdev->hw_ops->request_irq(sdev, scif_intr_handler,
+ "SCIF_INTR", newdev,
+ newdev->db);
+ if (IS_ERR(newdev->cookie))
+ goto qp_connect_error;
+ newdev->qpairs->magic = SCIFEP_MAGIC;
+ newdev->qpairs->qp_state = SCIF_QP_OFFLINE;
+
+ msg->uop = SCIF_NODE_ADD_ACK;
+ msg->dst.node = msg->src.node;
+ msg->src.node = scif_info.nodeid;
+ msg->payload[0] = qp_offset;
+ msg->payload[2] = newdev->db;
+ scif_nodeqp_send(&scif_dev[SCIF_MGMT_NODE], msg);
+ return;
+qp_connect_error:
+ kfree(newdev->qpairs);
+ newdev->qpairs = NULL;
+qp_alloc_error:
+ iounmap(newdev->mmio.va);
+ newdev->mmio.va = NULL;
+mmio_map_error:
+interrupt_setup_error:
+ dev_err(&scifdev->sdev->dev,
+ "node add failed for node %d\n", msg->src.node);
+ msg->uop = SCIF_NODE_ADD_NACK;
+ msg->dst.node = msg->src.node;
+ msg->src.node = scif_info.nodeid;
+ scif_nodeqp_send(&scif_dev[SCIF_MGMT_NODE], msg);
+}
+
+void scif_poll_qp_state(struct work_struct *work)
+{
+#define SCIF_NODE_QP_RETRY 100
+#define SCIF_NODE_QP_TIMEOUT 100
+ struct scif_dev *peerdev = container_of(work, struct scif_dev,
+ p2p_dwork.work);
+ struct scif_qp *qp = &peerdev->qpairs[0];
+
+ if (qp->qp_state != SCIF_QP_ONLINE ||
+ qp->remote_qp->qp_state != SCIF_QP_ONLINE) {
+ if (peerdev->p2p_retry++ == SCIF_NODE_QP_RETRY) {
+ dev_err(&peerdev->sdev->dev,
+ "Warning: QP check timeout with state %d\n",
+ qp->qp_state);
+ goto timeout;
+ }
+ schedule_delayed_work(&peerdev->p2p_dwork,
+ msecs_to_jiffies(SCIF_NODE_QP_TIMEOUT));
+ return;
+ }
+ scif_peer_register_device(peerdev);
+ return;
+timeout:
+ dev_err(&peerdev->sdev->dev,
+ "%s %d remote node %d offline, state = 0x%x\n",
+ __func__, __LINE__, peerdev->node, qp->qp_state);
+ qp->remote_qp->qp_state = SCIF_QP_OFFLINE;
+ scif_cleanup_scifdev(peerdev);
+}
+
+/**
+ * scif_node_add_ack() - Respond to SCIF_NODE_ADD_ACK interrupt message
+ * @scifdev: Remote SCIF device node
+ * @msg: Interrupt message
+ *
+ * After a MIC node receives the SCIF_NODE_ADD_ACK message it send this
+ * message to the mgmt node to confirm the sequence is finished.
+ *
+ */
+static __always_inline void
+scif_node_add_ack(struct scif_dev *scifdev, struct scifmsg *msg)
+{
+ struct scif_dev *peerdev;
+ struct scif_qp *qp;
+ struct scif_dev *dst_dev = &scif_dev[msg->dst.node];
+
+ dev_dbg(&scifdev->sdev->dev,
+ "Scifdev %d received SCIF_NODE_ADD_ACK msg src %d dst %d\n",
+ scifdev->node, msg->src.node, msg->dst.node);
+ dev_dbg(&scifdev->sdev->dev,
+ "payload %llx %llx %llx %llx\n", msg->payload[0],
+ msg->payload[1], msg->payload[2], msg->payload[3]);
+ if (scif_is_mgmt_node()) {
+ /*
+ * the lock serializes with scif_qp_response_ack. The mgmt node
+ * is forwarding the NODE_ADD_ACK message from src to dst we
+ * need to make sure that the dst has already received a
+ * NODE_ADD for src and setup its end of the qp to dst
+ */
+ mutex_lock(&scif_info.conflock);
+ msg->payload[1] = scif_info.maxid;
+ scif_nodeqp_send(dst_dev, msg);
+ mutex_unlock(&scif_info.conflock);
+ return;
+ }
+ peerdev = &scif_dev[msg->src.node];
+ peerdev->sdev = scif_dev[SCIF_MGMT_NODE].sdev;
+ peerdev->node = msg->src.node;
+
+ qp = &peerdev->qpairs[0];
+
+ if ((scif_setup_qp_connect_response(peerdev, &peerdev->qpairs[0],
+ msg->payload[0])))
+ goto local_error;
+ peerdev->rdb = msg->payload[2];
+ qp->remote_qp->qp_state = SCIF_QP_ONLINE;
+ schedule_delayed_work(&peerdev->p2p_dwork, 0);
+ return;
+local_error:
+ scif_cleanup_scifdev(peerdev);
+}
+
+/**
+ * scif_node_add_nack: Respond to SCIF_NODE_ADD_NACK interrupt message
+ * @msg: Interrupt message
+ *
+ * SCIF_NODE_ADD failed, so inform the waiting wq.
+ */
+static __always_inline void
+scif_node_add_nack(struct scif_dev *scifdev, struct scifmsg *msg)
+{
+ if (scif_is_mgmt_node()) {
+ struct scif_dev *dst_dev = &scif_dev[msg->dst.node];
+
+ dev_dbg(&scifdev->sdev->dev,
+ "SCIF_NODE_ADD_NACK received from %d\n", scifdev->node);
+ scif_nodeqp_send(dst_dev, msg);
+ }
+}
+
+/*
+ * scif_node_remove: Handle SCIF_NODE_REMOVE message
+ * @msg: Interrupt message
+ *
+ * Handle node removal.
+ */
+static __always_inline void
+scif_node_remove(struct scif_dev *scifdev, struct scifmsg *msg)
+{
+ int node = msg->payload[0];
+ struct scif_dev *scdev = &scif_dev[node];
+
+ scdev->node_remove_ack_pending = true;
+ scif_handle_remove_node(node);
+}
+
+/*
+ * scif_node_remove_ack: Handle SCIF_NODE_REMOVE_ACK message
+ * @msg: Interrupt message
+ *
+ * The peer has acked a SCIF_NODE_REMOVE message.
+ */
+static __always_inline void
+scif_node_remove_ack(struct scif_dev *scifdev, struct scifmsg *msg)
+{
+ struct scif_dev *sdev = &scif_dev[msg->payload[0]];
+
+ atomic_inc(&sdev->disconn_rescnt);
+ wake_up(&sdev->disconn_wq);
+}
+
+/**
+ * scif_get_node_info: Respond to SCIF_GET_NODE_INFO interrupt message
+ * @msg: Interrupt message
+ *
+ * Retrieve node info i.e maxid and total from the mgmt node.
+ */
+static __always_inline void
+scif_get_node_info_resp(struct scif_dev *scifdev, struct scifmsg *msg)
+{
+ if (scif_is_mgmt_node()) {
+ swap(msg->dst.node, msg->src.node);
+ mutex_lock(&scif_info.conflock);
+ msg->payload[1] = scif_info.maxid;
+ msg->payload[2] = scif_info.total;
+ mutex_unlock(&scif_info.conflock);
+ scif_nodeqp_send(scifdev, msg);
+ } else {
+ struct completion *node_info =
+ (struct completion *)msg->payload[3];
+
+ mutex_lock(&scif_info.conflock);
+ scif_info.maxid = msg->payload[1];
+ scif_info.total = msg->payload[2];
+ complete_all(node_info);
+ mutex_unlock(&scif_info.conflock);
+ }
+}
+
+static void
+scif_msg_unknown(struct scif_dev *scifdev, struct scifmsg *msg)
+{
+ /* Bogus Node Qp Message? */
+ dev_err(&scifdev->sdev->dev,
+ "Unknown message 0x%xn scifdev->node 0x%x\n",
+ msg->uop, scifdev->node);
+}
+
+static void (*scif_intr_func[SCIF_MAX_MSG + 1])
+ (struct scif_dev *, struct scifmsg *msg) = {
+ scif_msg_unknown, /* Error */
+ scif_init, /* SCIF_INIT */
+ scif_exit, /* SCIF_EXIT */
+ scif_exit_ack, /* SCIF_EXIT_ACK */
+ scif_node_add, /* SCIF_NODE_ADD */
+ scif_node_add_ack, /* SCIF_NODE_ADD_ACK */
+ scif_node_add_nack, /* SCIF_NODE_ADD_NACK */
+ scif_node_remove, /* SCIF_NODE_REMOVE */
+ scif_node_remove_ack, /* SCIF_NODE_REMOVE_ACK */
+ scif_cnctreq, /* SCIF_CNCT_REQ */
+ scif_cnctgnt, /* SCIF_CNCT_GNT */
+ scif_cnctgnt_ack, /* SCIF_CNCT_GNTACK */
+ scif_cnctgnt_nack, /* SCIF_CNCT_GNTNACK */
+ scif_cnctrej, /* SCIF_CNCT_REJ */
+ scif_discnct, /* SCIF_DISCNCT */
+ scif_discnt_ack, /* SCIF_DISCNT_ACK */
+ scif_clientsend, /* SCIF_CLIENT_SENT */
+ scif_clientrcvd, /* SCIF_CLIENT_RCVD */
+ scif_get_node_info_resp,/* SCIF_GET_NODE_INFO */
+};
+
+/**
+ * scif_nodeqp_msg_handler() - Common handler for node messages
+ * @scifdev: Remote device to respond to
+ * @qp: Remote memory pointer
+ * @msg: The message to be handled.
+ *
+ * This routine calls the appropriate routine to handle a Node Qp
+ * message receipt
+ */
+static int scif_max_msg_id = SCIF_MAX_MSG;
+
+static void
+scif_nodeqp_msg_handler(struct scif_dev *scifdev,
+ struct scif_qp *qp, struct scifmsg *msg)
+{
+ scif_display_message(scifdev, msg, "Rcvd");
+
+ if (msg->uop > (u32)scif_max_msg_id) {
+ /* Bogus Node Qp Message? */
+ dev_err(&scifdev->sdev->dev,
+ "Unknown message 0x%xn scifdev->node 0x%x\n",
+ msg->uop, scifdev->node);
+ return;
+ }
+
+ scif_intr_func[msg->uop](scifdev, msg);
+}
+
+/**
+ * scif_nodeqp_intrhandler() - Interrupt handler for node messages
+ * @scifdev: Remote device to respond to
+ * @qp: Remote memory pointer
+ *
+ * This routine is triggered by the interrupt mechanism. It reads
+ * messages from the node queue RB and calls the Node QP Message handling
+ * routine.
+ */
+void scif_nodeqp_intrhandler(struct scif_dev *scifdev, struct scif_qp *qp)
+{
+ struct scifmsg msg;
+ int read_size;
+
+ do {
+ read_size = scif_rb_get_next(&qp->inbound_q, &msg, sizeof(msg));
+ if (!read_size)
+ break;
+ scif_nodeqp_msg_handler(scifdev, qp, &msg);
+ /*
+ * The node queue pair is unmapped so skip the read pointer
+ * update after receipt of a SCIF_EXIT_ACK
+ */
+ if (SCIF_EXIT_ACK == msg.uop)
+ break;
+ scif_rb_update_read_ptr(&qp->inbound_q);
+ } while (1);
+}
+
+/**
+ * scif_loopb_wq_handler - Loopback Workqueue Handler.
+ * @work: loop back work
+ *
+ * This work queue routine is invoked by the loopback work queue handler.
+ * It grabs the recv lock, dequeues any available messages from the head
+ * of the loopback message list, calls the node QP message handler,
+ * waits for it to return, then frees up this message and dequeues more
+ * elements of the list if available.
+ */
+static void scif_loopb_wq_handler(struct work_struct *unused)
+{
+ struct scif_dev *scifdev = scif_info.loopb_dev;
+ struct scif_qp *qp = scifdev->qpairs;
+ struct scif_loopb_msg *msg;
+
+ do {
+ msg = NULL;
+ spin_lock(&qp->recv_lock);
+ if (!list_empty(&scif_info.loopb_recv_q)) {
+ msg = list_first_entry(&scif_info.loopb_recv_q,
+ struct scif_loopb_msg,
+ list);
+ list_del(&msg->list);
+ }
+ spin_unlock(&qp->recv_lock);
+
+ if (msg) {
+ scif_nodeqp_msg_handler(scifdev, qp, &msg->msg);
+ kfree(msg);
+ }
+ } while (msg);
+}
+
+/**
+ * scif_loopb_msg_handler() - Workqueue handler for loopback messages.
+ * @scifdev: SCIF device
+ * @qp: Queue pair.
+ *
+ * This work queue routine is triggered when a loopback message is received.
+ *
+ * We need special handling for receiving Node Qp messages on a loopback SCIF
+ * device via two workqueues for receiving messages.
+ *
+ * The reason we need the extra workqueue which is not required with *normal*
+ * non-loopback SCIF devices is the potential classic deadlock described below:
+ *
+ * Thread A tries to send a message on a loopback SCIF device and blocks since
+ * there is no space in the RB while it has the send_lock held or another
+ * lock called lock X for example.
+ *
+ * Thread B: The Loopback Node QP message receive workqueue receives the message
+ * and tries to send a message (eg an ACK) to the loopback SCIF device. It tries
+ * to grab the send lock again or lock X and deadlocks with Thread A. The RB
+ * cannot be drained any further due to this classic deadlock.
+ *
+ * In order to avoid deadlocks as mentioned above we have an extra level of
+ * indirection achieved by having two workqueues.
+ * 1) The first workqueue whose handler is scif_loopb_msg_handler reads
+ * messages from the Node QP RB, adds them to a list and queues work for the
+ * second workqueue.
+ *
+ * 2) The second workqueue whose handler is scif_loopb_wq_handler dequeues
+ * messages from the list, handles them, frees up the memory and dequeues
+ * more elements from the list if possible.
+ */
+int
+scif_loopb_msg_handler(struct scif_dev *scifdev, struct scif_qp *qp)
+{
+ int read_size;
+ struct scif_loopb_msg *msg;
+
+ do {
+ msg = kmalloc(sizeof(*msg), GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+ read_size = scif_rb_get_next(&qp->inbound_q, &msg->msg,
+ sizeof(struct scifmsg));
+ if (read_size != sizeof(struct scifmsg)) {
+ kfree(msg);
+ scif_rb_update_read_ptr(&qp->inbound_q);
+ break;
+ }
+ spin_lock(&qp->recv_lock);
+ list_add_tail(&msg->list, &scif_info.loopb_recv_q);
+ spin_unlock(&qp->recv_lock);
+ queue_work(scif_info.loopb_wq, &scif_info.loopb_work);
+ scif_rb_update_read_ptr(&qp->inbound_q);
+ } while (read_size == sizeof(struct scifmsg));
+ return read_size;
+}
+
+/**
+ * scif_setup_loopback_qp - One time setup work for Loopback Node Qp.
+ * @scifdev: SCIF device
+ *
+ * Sets up the required loopback workqueues, queue pairs and ring buffers
+ */
+int scif_setup_loopback_qp(struct scif_dev *scifdev)
+{
+ int err = 0;
+ void *local_q;
+ struct scif_qp *qp;
+ struct scif_peer_dev *spdev;
+
+ err = scif_setup_intr_wq(scifdev);
+ if (err)
+ goto exit;
+ INIT_LIST_HEAD(&scif_info.loopb_recv_q);
+ snprintf(scif_info.loopb_wqname, sizeof(scif_info.loopb_wqname),
+ "SCIF LOOPB %d", scifdev->node);
+ scif_info.loopb_wq =
+ alloc_ordered_workqueue(scif_info.loopb_wqname, 0);
+ if (!scif_info.loopb_wq) {
+ err = -ENOMEM;
+ goto destroy_intr;
+ }
+ INIT_WORK(&scif_info.loopb_work, scif_loopb_wq_handler);
+ /* Allocate Self Qpair */
+ scifdev->qpairs = kzalloc(sizeof(*scifdev->qpairs), GFP_KERNEL);
+ if (!scifdev->qpairs) {
+ err = -ENOMEM;
+ goto destroy_loopb_wq;
+ }
+
+ qp = scifdev->qpairs;
+ qp->magic = SCIFEP_MAGIC;
+ spin_lock_init(&qp->send_lock);
+ spin_lock_init(&qp->recv_lock);
+
+ local_q = kzalloc(SCIF_NODE_QP_SIZE, GFP_KERNEL);
+ if (!local_q) {
+ err = -ENOMEM;
+ goto free_qpairs;
+ }
+ /*
+ * For loopback the inbound_q and outbound_q are essentially the same
+ * since the Node sends a message on the loopback interface to the
+ * outbound_q which is then received on the inbound_q.
+ */
+ scif_rb_init(&qp->outbound_q,
+ &qp->local_read,
+ &qp->local_write,
+ local_q, get_count_order(SCIF_NODE_QP_SIZE));
+
+ scif_rb_init(&qp->inbound_q,
+ &qp->local_read,
+ &qp->local_write,
+ local_q, get_count_order(SCIF_NODE_QP_SIZE));
+ scif_info.nodeid = scifdev->node;
+ spdev = scif_peer_register_device(scifdev);
+ if (IS_ERR(spdev)) {
+ err = PTR_ERR(spdev);
+ goto free_local_q;
+ }
+ scif_info.loopb_dev = scifdev;
+ return err;
+free_local_q:
+ kfree(local_q);
+free_qpairs:
+ kfree(scifdev->qpairs);
+destroy_loopb_wq:
+ destroy_workqueue(scif_info.loopb_wq);
+destroy_intr:
+ scif_destroy_intr_wq(scifdev);
+exit:
+ return err;
+}
+
+/**
+ * scif_destroy_loopback_qp - One time uninit work for Loopback Node Qp
+ * @scifdev: SCIF device
+ *
+ * Destroys the workqueues and frees up the Ring Buffer and Queue Pair memory.
+ */
+int scif_destroy_loopback_qp(struct scif_dev *scifdev)
+{
+ struct scif_peer_dev *spdev;
+
+ rcu_read_lock();
+ spdev = rcu_dereference(scifdev->spdev);
+ rcu_read_unlock();
+ if (spdev)
+ scif_peer_unregister_device(spdev);
+ destroy_workqueue(scif_info.loopb_wq);
+ scif_destroy_intr_wq(scifdev);
+ kfree(scifdev->qpairs->outbound_q.rb_base);
+ kfree(scifdev->qpairs);
+ scifdev->sdev = NULL;
+ scif_info.loopb_dev = NULL;
+ return 0;
+}
+
+void scif_destroy_p2p(struct scif_dev *scifdev)
+{
+ struct scif_dev *peer_dev;
+ struct scif_p2p_info *p2p;
+ struct list_head *pos, *tmp;
+ int bd;
+
+ mutex_lock(&scif_info.conflock);
+ /* Free P2P mappings in the given node for all its peer nodes */
+ list_for_each_safe(pos, tmp, &scifdev->p2p) {
+ p2p = list_entry(pos, struct scif_p2p_info, ppi_list);
+ dma_unmap_sg(&scifdev->sdev->dev, p2p->ppi_sg[SCIF_PPI_MMIO],
+ p2p->sg_nentries[SCIF_PPI_MMIO],
+ DMA_BIDIRECTIONAL);
+ dma_unmap_sg(&scifdev->sdev->dev, p2p->ppi_sg[SCIF_PPI_APER],
+ p2p->sg_nentries[SCIF_PPI_APER],
+ DMA_BIDIRECTIONAL);
+ scif_p2p_freesg(p2p->ppi_sg[SCIF_PPI_MMIO]);
+ scif_p2p_freesg(p2p->ppi_sg[SCIF_PPI_APER]);
+ list_del(pos);
+ kfree(p2p);
+ }
+
+ /* Free P2P mapping created in the peer nodes for the given node */
+ for (bd = SCIF_MGMT_NODE + 1; bd <= scif_info.maxid; bd++) {
+ peer_dev = &scif_dev[bd];
+ list_for_each_safe(pos, tmp, &peer_dev->p2p) {
+ p2p = list_entry(pos, struct scif_p2p_info, ppi_list);
+ if (p2p->ppi_peer_id == scifdev->node) {
+ dma_unmap_sg(&peer_dev->sdev->dev,
+ p2p->ppi_sg[SCIF_PPI_MMIO],
+ p2p->sg_nentries[SCIF_PPI_MMIO],
+ DMA_BIDIRECTIONAL);
+ dma_unmap_sg(&peer_dev->sdev->dev,
+ p2p->ppi_sg[SCIF_PPI_APER],
+ p2p->sg_nentries[SCIF_PPI_APER],
+ DMA_BIDIRECTIONAL);
+ scif_p2p_freesg(p2p->ppi_sg[SCIF_PPI_MMIO]);
+ scif_p2p_freesg(p2p->ppi_sg[SCIF_PPI_APER]);
+ list_del(pos);
+ kfree(p2p);
+ }
+ }
+ }
+ mutex_unlock(&scif_info.conflock);
+}
diff --git a/drivers/misc/mic/scif/scif_nodeqp.h b/drivers/misc/mic/scif/scif_nodeqp.h
new file mode 100644
index 000000000000..6c0ed6783479
--- /dev/null
+++ b/drivers/misc/mic/scif/scif_nodeqp.h
@@ -0,0 +1,183 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2014 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Intel SCIF driver.
+ *
+ */
+#ifndef SCIF_NODEQP
+#define SCIF_NODEQP
+
+#include "scif_rb.h"
+#include "scif_peer_bus.h"
+
+#define SCIF_INIT 1 /* First message sent to the peer node for discovery */
+#define SCIF_EXIT 2 /* Last message from the peer informing intent to exit */
+#define SCIF_EXIT_ACK 3 /* Response to SCIF_EXIT message */
+#define SCIF_NODE_ADD 4 /* Tell Online nodes a new node exits */
+#define SCIF_NODE_ADD_ACK 5 /* Confirm to mgmt node sequence is finished */
+#define SCIF_NODE_ADD_NACK 6 /* SCIF_NODE_ADD failed */
+#define SCIF_NODE_REMOVE 7 /* Request to deactivate a SCIF node */
+#define SCIF_NODE_REMOVE_ACK 8 /* Response to a SCIF_NODE_REMOVE message */
+#define SCIF_CNCT_REQ 9 /* Phys addr of Request connection to a port */
+#define SCIF_CNCT_GNT 10 /* Phys addr of new Grant connection request */
+#define SCIF_CNCT_GNTACK 11 /* Error type Reject a connection request */
+#define SCIF_CNCT_GNTNACK 12 /* Error type Reject a connection request */
+#define SCIF_CNCT_REJ 13 /* Error type Reject a connection request */
+#define SCIF_DISCNCT 14 /* Notify peer that connection is being terminated */
+#define SCIF_DISCNT_ACK 15 /* Notify peer that connection is being terminated */
+#define SCIF_CLIENT_SENT 16 /* Notify the peer that data has been written */
+#define SCIF_CLIENT_RCVD 17 /* Notify the peer that data has been read */
+#define SCIF_GET_NODE_INFO 18 /* Get current node mask from the mgmt node*/
+#define SCIF_MAX_MSG SCIF_GET_NODE_INFO
+
+/*
+ * struct scifmsg - Node QP message format
+ *
+ * @src: Source information
+ * @dst: Destination information
+ * @uop: The message opcode
+ * @payload: Unique payload format for each message
+ */
+struct scifmsg {
+ struct scif_port_id src;
+ struct scif_port_id dst;
+ u32 uop;
+ u64 payload[4];
+} __packed;
+
+/*
+ * struct scif_qp - Node Queue Pair
+ *
+ * Interesting structure -- a little difficult because we can only
+ * write across the PCIe, so any r/w pointer we need to read is
+ * local. We only need to read the read pointer on the inbound_q
+ * and read the write pointer in the outbound_q
+ *
+ * @magic: Magic value to ensure the peer sees the QP correctly
+ * @outbound_q: The outbound ring buffer for sending messages
+ * @inbound_q: The inbound ring buffer for receiving messages
+ * @local_write: Local write index
+ * @local_read: Local read index
+ * @remote_qp: The remote queue pair
+ * @local_buf: DMA address of local ring buffer
+ * @local_qp: DMA address of the local queue pair data structure
+ * @remote_buf: DMA address of remote ring buffer
+ * @qp_state: QP state i.e. online or offline used for P2P
+ * @send_lock: synchronize access to outbound queue
+ * @recv_lock: Synchronize access to inbound queue
+ */
+struct scif_qp {
+ u64 magic;
+#define SCIFEP_MAGIC 0x5c1f000000005c1fULL
+ struct scif_rb outbound_q;
+ struct scif_rb inbound_q;
+
+ u32 local_write __aligned(64);
+ u32 local_read __aligned(64);
+ struct scif_qp *remote_qp;
+ dma_addr_t local_buf;
+ dma_addr_t local_qp;
+ dma_addr_t remote_buf;
+ u32 qp_state;
+#define SCIF_QP_OFFLINE 0xdead
+#define SCIF_QP_ONLINE 0xc0de
+ spinlock_t send_lock;
+ spinlock_t recv_lock;
+};
+
+/*
+ * struct scif_loopb_msg - An element in the loopback Node QP message list.
+ *
+ * @msg - The SCIF node QP message
+ * @list - link in the list of messages
+ */
+struct scif_loopb_msg {
+ struct scifmsg msg;
+ struct list_head list;
+};
+
+int scif_nodeqp_send(struct scif_dev *scifdev, struct scifmsg *msg);
+int _scif_nodeqp_send(struct scif_dev *scifdev, struct scifmsg *msg);
+void scif_nodeqp_intrhandler(struct scif_dev *scifdev, struct scif_qp *qp);
+int scif_loopb_msg_handler(struct scif_dev *scifdev, struct scif_qp *qp);
+int scif_setup_qp(struct scif_dev *scifdev);
+int scif_qp_response(phys_addr_t phys, struct scif_dev *dev);
+int scif_setup_qp_connect(struct scif_qp *qp, dma_addr_t *qp_offset,
+ int local_size, struct scif_dev *scifdev);
+int scif_setup_qp_accept(struct scif_qp *qp, dma_addr_t *qp_offset,
+ dma_addr_t phys, int local_size,
+ struct scif_dev *scifdev);
+int scif_setup_qp_connect_response(struct scif_dev *scifdev,
+ struct scif_qp *qp, u64 payload);
+int scif_setup_loopback_qp(struct scif_dev *scifdev);
+int scif_destroy_loopback_qp(struct scif_dev *scifdev);
+void scif_poll_qp_state(struct work_struct *work);
+void scif_qp_response_ack(struct work_struct *work);
+void scif_destroy_p2p(struct scif_dev *scifdev);
+void scif_send_exit(struct scif_dev *scifdev);
+static inline struct device *scif_get_peer_dev(struct scif_dev *scifdev)
+{
+ struct scif_peer_dev *spdev;
+ struct device *spdev_ret;
+
+ rcu_read_lock();
+ spdev = rcu_dereference(scifdev->spdev);
+ if (spdev)
+ spdev_ret = get_device(&spdev->dev);
+ else
+ spdev_ret = ERR_PTR(-ENODEV);
+ rcu_read_unlock();
+ return spdev_ret;
+}
+
+static inline void scif_put_peer_dev(struct device *dev)
+{
+ put_device(dev);
+}
+#endif /* SCIF_NODEQP */
diff --git a/drivers/misc/mic/scif/scif_peer_bus.c b/drivers/misc/mic/scif/scif_peer_bus.c
new file mode 100644
index 000000000000..589ae9ad2501
--- /dev/null
+++ b/drivers/misc/mic/scif/scif_peer_bus.c
@@ -0,0 +1,124 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Intel SCIF driver.
+ */
+#include "scif_main.h"
+#include "../bus/scif_bus.h"
+#include "scif_peer_bus.h"
+
+static inline struct scif_peer_dev *
+dev_to_scif_peer(struct device *dev)
+{
+ return container_of(dev, struct scif_peer_dev, dev);
+}
+
+static inline struct scif_peer_driver *
+drv_to_scif_peer(struct device_driver *drv)
+{
+ return container_of(drv, struct scif_peer_driver, driver);
+}
+
+static int scif_peer_dev_match(struct device *dv, struct device_driver *dr)
+{
+ return !strncmp(dev_name(dv), dr->name, 4);
+}
+
+static int scif_peer_dev_probe(struct device *d)
+{
+ struct scif_peer_dev *dev = dev_to_scif_peer(d);
+ struct scif_peer_driver *drv = drv_to_scif_peer(dev->dev.driver);
+
+ return drv->probe(dev);
+}
+
+static int scif_peer_dev_remove(struct device *d)
+{
+ struct scif_peer_dev *dev = dev_to_scif_peer(d);
+ struct scif_peer_driver *drv = drv_to_scif_peer(dev->dev.driver);
+
+ drv->remove(dev);
+ return 0;
+}
+
+static struct bus_type scif_peer_bus = {
+ .name = "scif_peer_bus",
+ .match = scif_peer_dev_match,
+ .probe = scif_peer_dev_probe,
+ .remove = scif_peer_dev_remove,
+};
+
+int scif_peer_register_driver(struct scif_peer_driver *driver)
+{
+ driver->driver.bus = &scif_peer_bus;
+ return driver_register(&driver->driver);
+}
+
+void scif_peer_unregister_driver(struct scif_peer_driver *driver)
+{
+ driver_unregister(&driver->driver);
+}
+
+static void scif_peer_release_dev(struct device *d)
+{
+ struct scif_peer_dev *sdev = dev_to_scif_peer(d);
+ struct scif_dev *scifdev = &scif_dev[sdev->dnode];
+
+ scif_cleanup_scifdev(scifdev);
+ kfree(sdev);
+}
+
+struct scif_peer_dev *
+scif_peer_register_device(struct scif_dev *scifdev)
+{
+ int ret;
+ struct scif_peer_dev *spdev;
+
+ spdev = kzalloc(sizeof(*spdev), GFP_KERNEL);
+ if (!spdev)
+ return ERR_PTR(-ENOMEM);
+
+ spdev->dev.parent = scifdev->sdev->dev.parent;
+ spdev->dev.release = scif_peer_release_dev;
+ spdev->dnode = scifdev->node;
+ spdev->dev.bus = &scif_peer_bus;
+
+ dev_set_name(&spdev->dev, "scif_peer-dev%u", spdev->dnode);
+ /*
+ * device_register() causes the bus infrastructure to look for a
+ * matching driver.
+ */
+ ret = device_register(&spdev->dev);
+ if (ret)
+ goto free_spdev;
+ return spdev;
+free_spdev:
+ kfree(spdev);
+ return ERR_PTR(ret);
+}
+
+void scif_peer_unregister_device(struct scif_peer_dev *sdev)
+{
+ device_unregister(&sdev->dev);
+}
+
+int scif_peer_bus_init(void)
+{
+ return bus_register(&scif_peer_bus);
+}
+
+void scif_peer_bus_exit(void)
+{
+ bus_unregister(&scif_peer_bus);
+}
diff --git a/drivers/misc/mic/scif/scif_peer_bus.h b/drivers/misc/mic/scif/scif_peer_bus.h
new file mode 100644
index 000000000000..33f0dbb30152
--- /dev/null
+++ b/drivers/misc/mic/scif/scif_peer_bus.h
@@ -0,0 +1,65 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Intel SCIF driver.
+ */
+#ifndef _SCIF_PEER_BUS_H_
+#define _SCIF_PEER_BUS_H_
+
+#include <linux/device.h>
+#include <linux/mic_common.h>
+
+/*
+ * Peer devices show up as PCIe devices for the mgmt node but not the cards.
+ * The mgmt node discovers all the cards on the PCIe bus and informs the other
+ * cards about their peers. Upon notification of a peer a node adds a peer
+ * device to the peer bus to maintain symmetry in the way devices are
+ * discovered across all nodes in the SCIF network.
+ */
+/**
+ * scif_peer_dev - representation of a peer SCIF device
+ * @dev: underlying device
+ * @dnode - The destination node which this device will communicate with.
+ */
+struct scif_peer_dev {
+ struct device dev;
+ u8 dnode;
+};
+
+/**
+ * scif_peer_driver - operations for a scif_peer I/O driver
+ * @driver: underlying device driver (populate name and owner).
+ * @id_table: the ids serviced by this driver.
+ * @probe: the function to call when a device is found. Returns 0 or -errno.
+ * @remove: the function to call when a device is removed.
+ */
+struct scif_peer_driver {
+ struct device_driver driver;
+ const struct scif_peer_dev_id *id_table;
+
+ int (*probe)(struct scif_peer_dev *dev);
+ void (*remove)(struct scif_peer_dev *dev);
+};
+
+struct scif_dev;
+
+int scif_peer_register_driver(struct scif_peer_driver *driver);
+void scif_peer_unregister_driver(struct scif_peer_driver *driver);
+
+struct scif_peer_dev *scif_peer_register_device(struct scif_dev *sdev);
+void scif_peer_unregister_device(struct scif_peer_dev *sdev);
+
+int scif_peer_bus_init(void);
+void scif_peer_bus_exit(void);
+#endif /* _SCIF_PEER_BUS_H */
diff --git a/drivers/misc/mic/scif/scif_ports.c b/drivers/misc/mic/scif/scif_ports.c
new file mode 100644
index 000000000000..594e18d279d8
--- /dev/null
+++ b/drivers/misc/mic/scif/scif_ports.c
@@ -0,0 +1,124 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Intel SCIF driver.
+ *
+ */
+#include <linux/idr.h>
+
+#include "scif_main.h"
+
+#define SCIF_PORT_COUNT 0x10000 /* Ports available */
+
+struct idr scif_ports;
+
+/*
+ * struct scif_port - SCIF port information
+ *
+ * @ref_cnt - Reference count since there can be multiple endpoints
+ * created via scif_accept(..) simultaneously using a port.
+ */
+struct scif_port {
+ int ref_cnt;
+};
+
+/**
+ * __scif_get_port - Reserve a specified port # for SCIF and add it
+ * to the global list.
+ * @port : port # to be reserved.
+ *
+ * @return : Allocated SCIF port #, or -ENOSPC if port unavailable.
+ * On memory allocation failure, returns -ENOMEM.
+ */
+static int __scif_get_port(int start, int end)
+{
+ int id;
+ struct scif_port *port = kzalloc(sizeof(*port), GFP_ATOMIC);
+
+ if (!port)
+ return -ENOMEM;
+ spin_lock(&scif_info.port_lock);
+ id = idr_alloc(&scif_ports, port, start, end, GFP_ATOMIC);
+ if (id >= 0)
+ port->ref_cnt++;
+ spin_unlock(&scif_info.port_lock);
+ return id;
+}
+
+/**
+ * scif_rsrv_port - Reserve a specified port # for SCIF.
+ * @port : port # to be reserved.
+ *
+ * @return : Allocated SCIF port #, or -ENOSPC if port unavailable.
+ * On memory allocation failure, returns -ENOMEM.
+ */
+int scif_rsrv_port(u16 port)
+{
+ return __scif_get_port(port, port + 1);
+}
+
+/**
+ * scif_get_new_port - Get and reserve any port # for SCIF in the range
+ * SCIF_PORT_RSVD + 1 to SCIF_PORT_COUNT - 1.
+ *
+ * @return : Allocated SCIF port #, or -ENOSPC if no ports available.
+ * On memory allocation failure, returns -ENOMEM.
+ */
+int scif_get_new_port(void)
+{
+ return __scif_get_port(SCIF_PORT_RSVD + 1, SCIF_PORT_COUNT);
+}
+
+/**
+ * scif_get_port - Increment the reference count for a SCIF port
+ * @id : SCIF port
+ *
+ * @return : None
+ */
+void scif_get_port(u16 id)
+{
+ struct scif_port *port;
+
+ if (!id)
+ return;
+ spin_lock(&scif_info.port_lock);
+ port = idr_find(&scif_ports, id);
+ if (port)
+ port->ref_cnt++;
+ spin_unlock(&scif_info.port_lock);
+}
+
+/**
+ * scif_put_port - Release a reserved SCIF port
+ * @id : SCIF port to be released.
+ *
+ * @return : None
+ */
+void scif_put_port(u16 id)
+{
+ struct scif_port *port;
+
+ if (!id)
+ return;
+ spin_lock(&scif_info.port_lock);
+ port = idr_find(&scif_ports, id);
+ if (port) {
+ port->ref_cnt--;
+ if (!port->ref_cnt) {
+ idr_remove(&scif_ports, id);
+ kfree(port);
+ }
+ }
+ spin_unlock(&scif_info.port_lock);
+}
diff --git a/drivers/misc/mic/scif/scif_rb.c b/drivers/misc/mic/scif/scif_rb.c
new file mode 100644
index 000000000000..637cc4686742
--- /dev/null
+++ b/drivers/misc/mic/scif/scif_rb.c
@@ -0,0 +1,249 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Intel SCIF driver.
+ *
+ */
+#include <linux/circ_buf.h>
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/errno.h>
+
+#include "scif_rb.h"
+
+#define scif_rb_ring_cnt(head, tail, size) CIRC_CNT(head, tail, size)
+#define scif_rb_ring_space(head, tail, size) CIRC_SPACE(head, tail, size)
+
+/**
+ * scif_rb_init - Initializes the ring buffer
+ * @rb: ring buffer
+ * @read_ptr: A pointer to the read offset
+ * @write_ptr: A pointer to the write offset
+ * @rb_base: A pointer to the base of the ring buffer
+ * @size: The size of the ring buffer in powers of two
+ */
+void scif_rb_init(struct scif_rb *rb, u32 *read_ptr, u32 *write_ptr,
+ void *rb_base, u8 size)
+{
+ rb->rb_base = rb_base;
+ rb->size = (1 << size);
+ rb->read_ptr = read_ptr;
+ rb->write_ptr = write_ptr;
+ rb->current_read_offset = *read_ptr;
+ rb->current_write_offset = *write_ptr;
+}
+
+/* Copies a message to the ring buffer -- handles the wrap around case */
+static void memcpy_torb(struct scif_rb *rb, void *header,
+ void *msg, u32 size)
+{
+ u32 size1, size2;
+
+ if (header + size >= rb->rb_base + rb->size) {
+ /* Need to call two copies if it wraps around */
+ size1 = (u32)(rb->rb_base + rb->size - header);
+ size2 = size - size1;
+ memcpy_toio((void __iomem __force *)header, msg, size1);
+ memcpy_toio((void __iomem __force *)rb->rb_base,
+ msg + size1, size2);
+ } else {
+ memcpy_toio((void __iomem __force *)header, msg, size);
+ }
+}
+
+/* Copies a message from the ring buffer -- handles the wrap around case */
+static void memcpy_fromrb(struct scif_rb *rb, void *header,
+ void *msg, u32 size)
+{
+ u32 size1, size2;
+
+ if (header + size >= rb->rb_base + rb->size) {
+ /* Need to call two copies if it wraps around */
+ size1 = (u32)(rb->rb_base + rb->size - header);
+ size2 = size - size1;
+ memcpy_fromio(msg, (void __iomem __force *)header, size1);
+ memcpy_fromio(msg + size1,
+ (void __iomem __force *)rb->rb_base, size2);
+ } else {
+ memcpy_fromio(msg, (void __iomem __force *)header, size);
+ }
+}
+
+/**
+ * scif_rb_space - Query space available for writing to the RB
+ * @rb: ring buffer
+ *
+ * Return: size available for writing to RB in bytes.
+ */
+u32 scif_rb_space(struct scif_rb *rb)
+{
+ rb->current_read_offset = *rb->read_ptr;
+ /*
+ * Update from the HW read pointer only once the peer has exposed the
+ * new empty slot. This barrier is paired with the memory barrier
+ * scif_rb_update_read_ptr()
+ */
+ mb();
+ return scif_rb_ring_space(rb->current_write_offset,
+ rb->current_read_offset, rb->size);
+}
+
+/**
+ * scif_rb_write - Write a message to the RB
+ * @rb: ring buffer
+ * @msg: buffer to send the message. Must be at least size bytes long
+ * @size: the size (in bytes) to be copied to the RB
+ *
+ * This API does not block if there isn't enough space in the RB.
+ * Returns: 0 on success or -ENOMEM on failure
+ */
+int scif_rb_write(struct scif_rb *rb, void *msg, u32 size)
+{
+ void *header;
+
+ if (scif_rb_space(rb) < size)
+ return -ENOMEM;
+ header = rb->rb_base + rb->current_write_offset;
+ memcpy_torb(rb, header, msg, size);
+ /*
+ * Wait until scif_rb_commit(). Update the local ring
+ * buffer data, not the shared data until commit.
+ */
+ rb->current_write_offset =
+ (rb->current_write_offset + size) & (rb->size - 1);
+ return 0;
+}
+
+/**
+ * scif_rb_commit - To submit the message to let the peer fetch it
+ * @rb: ring buffer
+ */
+void scif_rb_commit(struct scif_rb *rb)
+{
+ /*
+ * We must ensure ordering between the all the data committed
+ * previously before we expose the new message to the peer by
+ * updating the write_ptr. This write barrier is paired with
+ * the read barrier in scif_rb_count(..)
+ */
+ wmb();
+ ACCESS_ONCE(*rb->write_ptr) = rb->current_write_offset;
+#ifdef CONFIG_INTEL_MIC_CARD
+ /*
+ * X100 Si bug: For the case where a Core is performing an EXT_WR
+ * followed by a Doorbell Write, the Core must perform two EXT_WR to the
+ * same address with the same data before it does the Doorbell Write.
+ * This way, if ordering is violated for the Interrupt Message, it will
+ * fall just behind the first Posted associated with the first EXT_WR.
+ */
+ ACCESS_ONCE(*rb->write_ptr) = rb->current_write_offset;
+#endif
+}
+
+/**
+ * scif_rb_get - To get next message from the ring buffer
+ * @rb: ring buffer
+ * @size: Number of bytes to be read
+ *
+ * Return: NULL if no bytes to be read from the ring buffer, otherwise the
+ * pointer to the next byte
+ */
+static void *scif_rb_get(struct scif_rb *rb, u32 size)
+{
+ void *header = NULL;
+
+ if (scif_rb_count(rb, size) >= size)
+ header = rb->rb_base + rb->current_read_offset;
+ return header;
+}
+
+/*
+ * scif_rb_get_next - Read from ring buffer.
+ * @rb: ring buffer
+ * @msg: buffer to hold the message. Must be at least size bytes long
+ * @size: Number of bytes to be read
+ *
+ * Return: number of bytes read if available bytes are >= size, otherwise
+ * returns zero.
+ */
+u32 scif_rb_get_next(struct scif_rb *rb, void *msg, u32 size)
+{
+ void *header = NULL;
+ int read_size = 0;
+
+ header = scif_rb_get(rb, size);
+ if (header) {
+ u32 next_cmd_offset =
+ (rb->current_read_offset + size) & (rb->size - 1);
+
+ read_size = size;
+ rb->current_read_offset = next_cmd_offset;
+ memcpy_fromrb(rb, header, msg, size);
+ }
+ return read_size;
+}
+
+/**
+ * scif_rb_update_read_ptr
+ * @rb: ring buffer
+ */
+void scif_rb_update_read_ptr(struct scif_rb *rb)
+{
+ u32 new_offset;
+
+ new_offset = rb->current_read_offset;
+ /*
+ * We must ensure ordering between the all the data committed or read
+ * previously before we expose the empty slot to the peer by updating
+ * the read_ptr. This barrier is paired with the memory barrier in
+ * scif_rb_space(..)
+ */
+ mb();
+ ACCESS_ONCE(*rb->read_ptr) = new_offset;
+#ifdef CONFIG_INTEL_MIC_CARD
+ /*
+ * X100 Si Bug: For the case where a Core is performing an EXT_WR
+ * followed by a Doorbell Write, the Core must perform two EXT_WR to the
+ * same address with the same data before it does the Doorbell Write.
+ * This way, if ordering is violated for the Interrupt Message, it will
+ * fall just behind the first Posted associated with the first EXT_WR.
+ */
+ ACCESS_ONCE(*rb->read_ptr) = new_offset;
+#endif
+}
+
+/**
+ * scif_rb_count
+ * @rb: ring buffer
+ * @size: Number of bytes expected to be read
+ *
+ * Return: number of bytes that can be read from the RB
+ */
+u32 scif_rb_count(struct scif_rb *rb, u32 size)
+{
+ if (scif_rb_ring_cnt(rb->current_write_offset,
+ rb->current_read_offset,
+ rb->size) < size) {
+ rb->current_write_offset = *rb->write_ptr;
+ /*
+ * Update from the HW write pointer if empty only once the peer
+ * has exposed the new message. This read barrier is paired
+ * with the write barrier in scif_rb_commit(..)
+ */
+ smp_rmb();
+ }
+ return scif_rb_ring_cnt(rb->current_write_offset,
+ rb->current_read_offset,
+ rb->size);
+}
diff --git a/drivers/misc/mic/scif/scif_rb.h b/drivers/misc/mic/scif/scif_rb.h
new file mode 100644
index 000000000000..166dffe3093d
--- /dev/null
+++ b/drivers/misc/mic/scif/scif_rb.h
@@ -0,0 +1,100 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2014 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Intel SCIF driver.
+ */
+#ifndef SCIF_RB_H
+#define SCIF_RB_H
+/*
+ * This file describes a general purpose, byte based ring buffer. Writers to the
+ * ring buffer need to synchronize using a lock. The same is true for readers,
+ * although in practice, the ring buffer has a single reader. It is lockless
+ * between producer and consumer so it can handle being used across the PCIe
+ * bus. The ring buffer ensures that there are no reads across the PCIe bus for
+ * performance reasons. Two of these are used to form a single bidirectional
+ * queue-pair across PCIe.
+ */
+/*
+ * struct scif_rb - SCIF Ring Buffer
+ *
+ * @rb_base: The base of the memory used for storing RB messages
+ * @read_ptr: Pointer to the read offset
+ * @write_ptr: Pointer to the write offset
+ * @size: Size of the memory in rb_base
+ * @current_read_offset: Cached read offset for performance
+ * @current_write_offset: Cached write offset for performance
+ */
+struct scif_rb {
+ void *rb_base;
+ u32 *read_ptr;
+ u32 *write_ptr;
+ u32 size;
+ u32 current_read_offset;
+ u32 current_write_offset;
+};
+
+/* methods used by both */
+void scif_rb_init(struct scif_rb *rb, u32 *read_ptr, u32 *write_ptr,
+ void *rb_base, u8 size);
+/* writer only methods */
+/* write a new command, then scif_rb_commit() */
+int scif_rb_write(struct scif_rb *rb, void *msg, u32 size);
+/* after write(), then scif_rb_commit() */
+void scif_rb_commit(struct scif_rb *rb);
+/* query space available for writing to a RB. */
+u32 scif_rb_space(struct scif_rb *rb);
+
+/* reader only methods */
+/* read a new message from the ring buffer of size bytes */
+u32 scif_rb_get_next(struct scif_rb *rb, void *msg, u32 size);
+/* update the read pointer so that the space can be reused */
+void scif_rb_update_read_ptr(struct scif_rb *rb);
+/* count the number of bytes that can be read */
+u32 scif_rb_count(struct scif_rb *rb, u32 size);
+#endif
diff --git a/drivers/misc/sram.c b/drivers/misc/sram.c
index eeaaf5fca105..15c33cc34a80 100644
--- a/drivers/misc/sram.c
+++ b/drivers/misc/sram.c
@@ -18,23 +18,20 @@
* MA 02110-1301, USA.
*/
-#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/clk.h>
-#include <linux/err.h>
+#include <linux/genalloc.h>
#include <linux/io.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/list.h>
#include <linux/list_sort.h>
+#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/genalloc.h>
#define SRAM_GRANULARITY 32
struct sram_dev {
+ struct device *dev;
+ void __iomem *virt_base;
+
struct gen_pool *pool;
struct clk *clk;
};
@@ -54,62 +51,27 @@ static int sram_reserve_cmp(void *priv, struct list_head *a,
return ra->start - rb->start;
}
-static int sram_probe(struct platform_device *pdev)
+static int sram_reserve_regions(struct sram_dev *sram, struct resource *res)
{
- void __iomem *virt_base;
- struct sram_dev *sram;
- struct resource *res;
- struct device_node *np = pdev->dev.of_node, *child;
+ struct device_node *np = sram->dev->of_node, *child;
unsigned long size, cur_start, cur_size;
struct sram_reserve *rblocks, *block;
struct list_head reserve_list;
unsigned int nblocks;
- int ret;
+ int ret = 0;
INIT_LIST_HEAD(&reserve_list);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "found no memory resource\n");
- return -EINVAL;
- }
-
size = resource_size(res);
- if (!devm_request_mem_region(&pdev->dev,
- res->start, size, pdev->name)) {
- dev_err(&pdev->dev, "could not request region for resource\n");
- return -EBUSY;
- }
-
- virt_base = devm_ioremap_wc(&pdev->dev, res->start, size);
- if (IS_ERR(virt_base))
- return PTR_ERR(virt_base);
-
- sram = devm_kzalloc(&pdev->dev, sizeof(*sram), GFP_KERNEL);
- if (!sram)
- return -ENOMEM;
-
- sram->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(sram->clk))
- sram->clk = NULL;
- else
- clk_prepare_enable(sram->clk);
-
- sram->pool = devm_gen_pool_create(&pdev->dev, ilog2(SRAM_GRANULARITY), -1);
- if (!sram->pool)
- return -ENOMEM;
-
/*
* We need an additional block to mark the end of the memory region
* after the reserved blocks from the dt are processed.
*/
nblocks = (np) ? of_get_available_child_count(np) + 1 : 1;
rblocks = kmalloc((nblocks) * sizeof(*rblocks), GFP_KERNEL);
- if (!rblocks) {
- ret = -ENOMEM;
- goto err_alloc;
- }
+ if (!rblocks)
+ return -ENOMEM;
block = &rblocks[0];
for_each_available_child_of_node(np, child) {
@@ -117,17 +79,19 @@ static int sram_probe(struct platform_device *pdev)
ret = of_address_to_resource(child, 0, &child_res);
if (ret < 0) {
- dev_err(&pdev->dev,
+ dev_err(sram->dev,
"could not get address for node %s\n",
child->full_name);
+ of_node_put(child);
goto err_chunks;
}
if (child_res.start < res->start || child_res.end > res->end) {
- dev_err(&pdev->dev,
+ dev_err(sram->dev,
"reserved block %s outside the sram area\n",
child->full_name);
ret = -EINVAL;
+ of_node_put(child);
goto err_chunks;
}
@@ -135,9 +99,8 @@ static int sram_probe(struct platform_device *pdev)
block->size = resource_size(&child_res);
list_add_tail(&block->list, &reserve_list);
- dev_dbg(&pdev->dev, "found reserved block 0x%x-0x%x\n",
- block->start,
- block->start + block->size);
+ dev_dbg(sram->dev, "found reserved block 0x%x-0x%x\n",
+ block->start, block->start + block->size);
block++;
}
@@ -154,7 +117,7 @@ static int sram_probe(struct platform_device *pdev)
list_for_each_entry(block, &reserve_list, list) {
/* can only happen if sections overlap */
if (block->start < cur_start) {
- dev_err(&pdev->dev,
+ dev_err(sram->dev,
"block at 0x%x starts after current offset 0x%lx\n",
block->start, cur_start);
ret = -EINVAL;
@@ -174,10 +137,11 @@ static int sram_probe(struct platform_device *pdev)
*/
cur_size = block->start - cur_start;
- dev_dbg(&pdev->dev, "adding chunk 0x%lx-0x%lx\n",
+ dev_dbg(sram->dev, "adding chunk 0x%lx-0x%lx\n",
cur_start, cur_start + cur_size);
+
ret = gen_pool_add_virt(sram->pool,
- (unsigned long)virt_base + cur_start,
+ (unsigned long)sram->virt_base + cur_start,
res->start + cur_start, cur_size, -1);
if (ret < 0)
goto err_chunks;
@@ -186,20 +150,63 @@ static int sram_probe(struct platform_device *pdev)
cur_start = block->start + block->size;
}
+ err_chunks:
kfree(rblocks);
+ return ret;
+}
+
+static int sram_probe(struct platform_device *pdev)
+{
+ struct sram_dev *sram;
+ struct resource *res;
+ size_t size;
+ int ret;
+
+ sram = devm_kzalloc(&pdev->dev, sizeof(*sram), GFP_KERNEL);
+ if (!sram)
+ return -ENOMEM;
+
+ sram->dev = &pdev->dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(sram->dev, "found no memory resource\n");
+ return -EINVAL;
+ }
+
+ size = resource_size(res);
+
+ if (!devm_request_mem_region(sram->dev, res->start, size, pdev->name)) {
+ dev_err(sram->dev, "could not request region for resource\n");
+ return -EBUSY;
+ }
+
+ sram->virt_base = devm_ioremap_wc(sram->dev, res->start, size);
+ if (IS_ERR(sram->virt_base))
+ return PTR_ERR(sram->virt_base);
+
+ sram->pool = devm_gen_pool_create(sram->dev,
+ ilog2(SRAM_GRANULARITY), -1);
+ if (!sram->pool)
+ return -ENOMEM;
+
+ ret = sram_reserve_regions(sram, res);
+ if (ret)
+ return ret;
+
+ sram->clk = devm_clk_get(sram->dev, NULL);
+ if (IS_ERR(sram->clk))
+ sram->clk = NULL;
+ else
+ clk_prepare_enable(sram->clk);
+
platform_set_drvdata(pdev, sram);
- dev_dbg(&pdev->dev, "SRAM pool: %ld KiB @ 0x%p\n", size / 1024, virt_base);
+ dev_dbg(sram->dev, "SRAM pool: %zu KiB @ 0x%p\n",
+ gen_pool_size(sram->pool) / 1024, sram->virt_base);
return 0;
-
-err_chunks:
- kfree(rblocks);
-err_alloc:
- if (sram->clk)
- clk_disable_unprepare(sram->clk);
- return ret;
}
static int sram_remove(struct platform_device *pdev)
@@ -207,7 +214,7 @@ static int sram_remove(struct platform_device *pdev)
struct sram_dev *sram = platform_get_drvdata(pdev);
if (gen_pool_avail(sram->pool) < gen_pool_size(sram->pool))
- dev_dbg(&pdev->dev, "removed while SRAM allocated\n");
+ dev_err(sram->dev, "removed while SRAM allocated\n");
if (sram->clk)
clk_disable_unprepare(sram->clk);
diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c
index 18e7a03985d4..5027b8ffae43 100644
--- a/drivers/misc/ti-st/st_kim.c
+++ b/drivers/misc/ti-st/st_kim.c
@@ -752,9 +752,8 @@ static struct ti_st_plat_data *get_platform_data(struct device *dev)
int len;
dt_pdata = kzalloc(sizeof(*dt_pdata), GFP_KERNEL);
-
if (!dt_pdata)
- pr_err("Can't allocate device_tree platform data\n");
+ return NULL;
dt_property = of_get_property(np, "dev_name", &len);
if (dt_property)
diff --git a/drivers/nfc/mei_phy.c b/drivers/nfc/mei_phy.c
index 11c7cbdade66..2b77ccf77f81 100644
--- a/drivers/nfc/mei_phy.c
+++ b/drivers/nfc/mei_phy.c
@@ -32,6 +32,51 @@ struct mei_nfc_hdr {
u16 data_size;
} __packed;
+struct mei_nfc_cmd {
+ struct mei_nfc_hdr hdr;
+ u8 sub_command;
+ u8 data[];
+} __packed;
+
+struct mei_nfc_reply {
+ struct mei_nfc_hdr hdr;
+ u8 sub_command;
+ u8 reply_status;
+ u8 data[];
+} __packed;
+
+struct mei_nfc_if_version {
+ u8 radio_version_sw[3];
+ u8 reserved[3];
+ u8 radio_version_hw[3];
+ u8 i2c_addr;
+ u8 fw_ivn;
+ u8 vendor_id;
+ u8 radio_type;
+} __packed;
+
+struct mei_nfc_connect {
+ u8 fw_ivn;
+ u8 vendor_id;
+} __packed;
+
+struct mei_nfc_connect_resp {
+ u8 fw_ivn;
+ u8 vendor_id;
+ u16 me_major;
+ u16 me_minor;
+ u16 me_hotfix;
+ u16 me_build;
+} __packed;
+
+
+#define MEI_NFC_CMD_MAINTENANCE 0x00
+#define MEI_NFC_CMD_HCI_SEND 0x01
+#define MEI_NFC_CMD_HCI_RECV 0x02
+
+#define MEI_NFC_SUBCMD_CONNECT 0x00
+#define MEI_NFC_SUBCMD_IF_VERSION 0x01
+
#define MEI_NFC_MAX_READ (MEI_NFC_HEADER_SIZE + MEI_NFC_MAX_HCI_PAYLOAD)
#define MEI_DUMP_SKB_IN(info, skb) \
@@ -45,51 +90,169 @@ do { \
do { \
pr_debug("%s:\n", info); \
print_hex_dump_debug("mei out: ", DUMP_PREFIX_OFFSET, \
- 16, 1, (skb)->data, (skb)->len, false); \
+ 16, 1, (skb)->data, (skb)->len, false); \
} while (0)
-int nfc_mei_phy_enable(void *phy_id)
+#define MEI_DUMP_NFC_HDR(info, _hdr) \
+do { \
+ pr_debug("%s:\n", info); \
+ pr_debug("cmd=%02d status=%d req_id=%d rsvd=%d size=%d\n", \
+ (_hdr)->cmd, (_hdr)->status, (_hdr)->req_id, \
+ (_hdr)->reserved, (_hdr)->data_size); \
+} while (0)
+
+static int mei_nfc_if_version(struct nfc_mei_phy *phy)
{
- int r;
- struct nfc_mei_phy *phy = phy_id;
+
+ struct mei_nfc_cmd cmd;
+ struct mei_nfc_reply *reply = NULL;
+ struct mei_nfc_if_version *version;
+ size_t if_version_length;
+ int bytes_recv, r;
pr_info("%s\n", __func__);
- if (phy->powered == 1)
- return 0;
+ memset(&cmd, 0, sizeof(struct mei_nfc_cmd));
+ cmd.hdr.cmd = MEI_NFC_CMD_MAINTENANCE;
+ cmd.hdr.data_size = 1;
+ cmd.sub_command = MEI_NFC_SUBCMD_IF_VERSION;
- r = mei_cl_enable_device(phy->device);
+ MEI_DUMP_NFC_HDR("version", &cmd.hdr);
+ r = mei_cl_send(phy->device, (u8 *)&cmd, sizeof(struct mei_nfc_cmd));
if (r < 0) {
- pr_err("Could not enable device\n");
+ pr_err("Could not send IF version cmd\n");
return r;
}
- r = mei_cl_register_event_cb(phy->device, nfc_mei_event_cb, phy);
- if (r) {
- pr_err("Event cb registration failed\n");
- mei_cl_disable_device(phy->device);
- phy->powered = 0;
+ /* to be sure on the stack we alloc memory */
+ if_version_length = sizeof(struct mei_nfc_reply) +
+ sizeof(struct mei_nfc_if_version);
- return r;
+ reply = kzalloc(if_version_length, GFP_KERNEL);
+ if (!reply)
+ return -ENOMEM;
+
+ bytes_recv = mei_cl_recv(phy->device, (u8 *)reply, if_version_length);
+ if (bytes_recv < 0 || bytes_recv < sizeof(struct mei_nfc_reply)) {
+ pr_err("Could not read IF version\n");
+ r = -EIO;
+ goto err;
}
- phy->powered = 1;
+ version = (struct mei_nfc_if_version *)reply->data;
- return 0;
+ phy->fw_ivn = version->fw_ivn;
+ phy->vendor_id = version->vendor_id;
+ phy->radio_type = version->radio_type;
+
+err:
+ kfree(reply);
+ return r;
}
-EXPORT_SYMBOL_GPL(nfc_mei_phy_enable);
-void nfc_mei_phy_disable(void *phy_id)
+static int mei_nfc_connect(struct nfc_mei_phy *phy)
{
- struct nfc_mei_phy *phy = phy_id;
+ struct mei_nfc_cmd *cmd, *reply;
+ struct mei_nfc_connect *connect;
+ struct mei_nfc_connect_resp *connect_resp;
+ size_t connect_length, connect_resp_length;
+ int bytes_recv, r;
pr_info("%s\n", __func__);
- mei_cl_disable_device(phy->device);
+ connect_length = sizeof(struct mei_nfc_cmd) +
+ sizeof(struct mei_nfc_connect);
- phy->powered = 0;
+ connect_resp_length = sizeof(struct mei_nfc_cmd) +
+ sizeof(struct mei_nfc_connect_resp);
+
+ cmd = kzalloc(connect_length, GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+ connect = (struct mei_nfc_connect *)cmd->data;
+
+ reply = kzalloc(connect_resp_length, GFP_KERNEL);
+ if (!reply) {
+ kfree(cmd);
+ return -ENOMEM;
+ }
+
+ connect_resp = (struct mei_nfc_connect_resp *)reply->data;
+
+ cmd->hdr.cmd = MEI_NFC_CMD_MAINTENANCE;
+ cmd->hdr.data_size = 3;
+ cmd->sub_command = MEI_NFC_SUBCMD_CONNECT;
+ connect->fw_ivn = phy->fw_ivn;
+ connect->vendor_id = phy->vendor_id;
+
+ MEI_DUMP_NFC_HDR("connect request", &cmd->hdr);
+ r = mei_cl_send(phy->device, (u8 *)cmd, connect_length);
+ if (r < 0) {
+ pr_err("Could not send connect cmd %d\n", r);
+ goto err;
+ }
+
+ bytes_recv = mei_cl_recv(phy->device, (u8 *)reply, connect_resp_length);
+ if (bytes_recv < 0) {
+ r = bytes_recv;
+ pr_err("Could not read connect response %d\n", r);
+ goto err;
+ }
+
+ MEI_DUMP_NFC_HDR("connect reply", &reply->hdr);
+
+ pr_info("IVN 0x%x Vendor ID 0x%x\n",
+ connect_resp->fw_ivn, connect_resp->vendor_id);
+
+ pr_info("ME FW %d.%d.%d.%d\n",
+ connect_resp->me_major, connect_resp->me_minor,
+ connect_resp->me_hotfix, connect_resp->me_build);
+
+ r = 0;
+
+err:
+ kfree(reply);
+ kfree(cmd);
+
+ return r;
+}
+
+static int mei_nfc_send(struct nfc_mei_phy *phy, u8 *buf, size_t length)
+{
+ struct mei_nfc_hdr *hdr;
+ u8 *mei_buf;
+ int err;
+
+ err = -ENOMEM;
+ mei_buf = kzalloc(length + MEI_NFC_HEADER_SIZE, GFP_KERNEL);
+ if (!mei_buf)
+ goto out;
+
+ hdr = (struct mei_nfc_hdr *)mei_buf;
+ hdr->cmd = MEI_NFC_CMD_HCI_SEND;
+ hdr->status = 0;
+ hdr->req_id = phy->req_id;
+ hdr->reserved = 0;
+ hdr->data_size = length;
+
+ MEI_DUMP_NFC_HDR("send", hdr);
+
+ memcpy(mei_buf + MEI_NFC_HEADER_SIZE, buf, length);
+ err = mei_cl_send(phy->device, mei_buf, length + MEI_NFC_HEADER_SIZE);
+ if (err < 0)
+ goto out;
+
+ if (!wait_event_interruptible_timeout(phy->send_wq,
+ phy->recv_req_id == phy->req_id, HZ)) {
+ pr_err("NFC MEI command timeout\n");
+ err = -ETIME;
+ } else {
+ phy->req_id++;
+ }
+out:
+ kfree(mei_buf);
+ return err;
}
-EXPORT_SYMBOL_GPL(nfc_mei_phy_disable);
/*
* Writing a frame must not return the number of written bytes.
@@ -103,14 +266,38 @@ static int nfc_mei_phy_write(void *phy_id, struct sk_buff *skb)
MEI_DUMP_SKB_OUT("mei frame sent", skb);
- r = mei_cl_send(phy->device, skb->data, skb->len);
+ r = mei_nfc_send(phy, skb->data, skb->len);
if (r > 0)
r = 0;
return r;
}
-void nfc_mei_event_cb(struct mei_cl_device *device, u32 events, void *context)
+static int mei_nfc_recv(struct nfc_mei_phy *phy, u8 *buf, size_t length)
+{
+ struct mei_nfc_hdr *hdr;
+ int received_length;
+
+ received_length = mei_cl_recv(phy->device, buf, length);
+ if (received_length < 0)
+ return received_length;
+
+ hdr = (struct mei_nfc_hdr *) buf;
+
+ MEI_DUMP_NFC_HDR("receive", hdr);
+ if (hdr->cmd == MEI_NFC_CMD_HCI_SEND) {
+ phy->recv_req_id = hdr->req_id;
+ wake_up(&phy->send_wq);
+
+ return 0;
+ }
+
+ return received_length;
+}
+
+
+static void nfc_mei_event_cb(struct mei_cl_device *device, u32 events,
+ void *context)
{
struct nfc_mei_phy *phy = context;
@@ -125,7 +312,7 @@ void nfc_mei_event_cb(struct mei_cl_device *device, u32 events, void *context)
if (!skb)
return;
- reply_size = mei_cl_recv(device, skb->data, MEI_NFC_MAX_READ);
+ reply_size = mei_nfc_recv(phy, skb->data, MEI_NFC_MAX_READ);
if (reply_size < MEI_NFC_HEADER_SIZE) {
kfree_skb(skb);
return;
@@ -139,7 +326,61 @@ void nfc_mei_event_cb(struct mei_cl_device *device, u32 events, void *context)
nfc_hci_recv_frame(phy->hdev, skb);
}
}
-EXPORT_SYMBOL_GPL(nfc_mei_event_cb);
+
+static int nfc_mei_phy_enable(void *phy_id)
+{
+ int r;
+ struct nfc_mei_phy *phy = phy_id;
+
+ pr_info("%s\n", __func__);
+
+ if (phy->powered == 1)
+ return 0;
+
+ r = mei_cl_enable_device(phy->device);
+ if (r < 0) {
+ pr_err("Could not enable device %d\n", r);
+ return r;
+ }
+
+ r = mei_nfc_if_version(phy);
+ if (r < 0) {
+ pr_err("Could not enable device %d\n", r);
+ goto err;
+ }
+
+ r = mei_nfc_connect(phy);
+ if (r < 0) {
+ pr_err("Could not connect to device %d\n", r);
+ goto err;
+ }
+
+ r = mei_cl_register_event_cb(phy->device, nfc_mei_event_cb, phy);
+ if (r) {
+ pr_err("Event cb registration failed %d\n", r);
+ goto err;
+ }
+
+ phy->powered = 1;
+
+ return 0;
+
+err:
+ phy->powered = 0;
+ mei_cl_disable_device(phy->device);
+ return r;
+}
+
+static void nfc_mei_phy_disable(void *phy_id)
+{
+ struct nfc_mei_phy *phy = phy_id;
+
+ pr_info("%s\n", __func__);
+
+ mei_cl_disable_device(phy->device);
+
+ phy->powered = 0;
+}
struct nfc_phy_ops mei_phy_ops = {
.write = nfc_mei_phy_write,
@@ -157,6 +398,7 @@ struct nfc_mei_phy *nfc_mei_phy_alloc(struct mei_cl_device *device)
return NULL;
phy->device = device;
+ init_waitqueue_head(&phy->send_wq);
mei_cl_set_drvdata(device, phy);
return phy;
@@ -165,6 +407,7 @@ EXPORT_SYMBOL_GPL(nfc_mei_phy_alloc);
void nfc_mei_phy_free(struct nfc_mei_phy *phy)
{
+ mei_cl_disable_device(phy->device);
kfree(phy);
}
EXPORT_SYMBOL_GPL(nfc_mei_phy_free);
diff --git a/drivers/nfc/mei_phy.h b/drivers/nfc/mei_phy.h
index d669900f8278..fbfa3e61738f 100644
--- a/drivers/nfc/mei_phy.h
+++ b/drivers/nfc/mei_phy.h
@@ -3,27 +3,49 @@
#include <linux/mei_cl_bus.h>
#include <net/nfc/hci.h>
+#include <linux/uuid.h>
+#define MEI_NFC_UUID UUID_LE(0x0bb17a78, 0x2a8e, 0x4c50, \
+ 0x94, 0xd4, 0x50, 0x26, 0x67, 0x23, 0x77, 0x5c)
#define MEI_NFC_HEADER_SIZE 10
#define MEI_NFC_MAX_HCI_PAYLOAD 300
+/**
+ * struct nfc_mei_phy
+ *
+ * @device: mei device
+ * @hdev: nfc hci device
+
+ * @send_wq: send completion wait queue
+ * @fw_ivn: NFC Interface Version Number
+ * @vendor_id: NFC manufacturer ID
+ * @radio_type: NFC radio type
+ * @reserved: reserved for alignment
+ * @req_id: message counter
+ * @recv_req_id: reception message counter
+ * @powered: the device is in powered state
+ * @hard_fault: < 0 if hardware error occurred
+ * and prevents normal operation.
+ */
struct nfc_mei_phy {
struct mei_cl_device *device;
struct nfc_hci_dev *hdev;
- int powered;
+ wait_queue_head_t send_wq;
+ u8 fw_ivn;
+ u8 vendor_id;
+ u8 radio_type;
+ u8 reserved;
+
+ u16 req_id;
+ u16 recv_req_id;
- int hard_fault; /*
- * < 0 if hardware error occured
- * and prevents normal operation.
- */
+ int powered;
+ int hard_fault;
};
extern struct nfc_phy_ops mei_phy_ops;
-int nfc_mei_phy_enable(void *phy_id);
-void nfc_mei_phy_disable(void *phy_id);
-void nfc_mei_event_cb(struct mei_cl_device *device, u32 events, void *context);
struct nfc_mei_phy *nfc_mei_phy_alloc(struct mei_cl_device *device);
void nfc_mei_phy_free(struct nfc_mei_phy *phy);
diff --git a/drivers/nfc/microread/mei.c b/drivers/nfc/microread/mei.c
index 2d1395be64ae..f9f5fc97cdd7 100644
--- a/drivers/nfc/microread/mei.c
+++ b/drivers/nfc/microread/mei.c
@@ -67,7 +67,7 @@ static int microread_mei_remove(struct mei_cl_device *device)
}
static struct mei_cl_device_id microread_mei_tbl[] = {
- { MICROREAD_DRIVER_NAME },
+ { MICROREAD_DRIVER_NAME, MEI_NFC_UUID},
/* required last entry */
{ }
diff --git a/drivers/nfc/pn544/mei.c b/drivers/nfc/pn544/mei.c
index 330cd4031009..101a37e12efa 100644
--- a/drivers/nfc/pn544/mei.c
+++ b/drivers/nfc/pn544/mei.c
@@ -67,7 +67,7 @@ static int pn544_mei_remove(struct mei_cl_device *device)
}
static struct mei_cl_device_id pn544_mei_tbl[] = {
- { PN544_DRIVER_NAME },
+ { PN544_DRIVER_NAME, MEI_NFC_UUID},
/* required last entry */
{ }
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index 53d15b30636a..78530d1714dc 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -2255,7 +2255,7 @@ out5:
release_region(base+0x3, 5);
release_region(base, 3);
out4:
- parport_put_port(p);
+ parport_del_port(p);
out3:
kfree(priv);
out2:
@@ -2294,7 +2294,7 @@ void parport_pc_unregister_port(struct parport *p)
priv->dma_handle);
#endif
kfree(p->private_data);
- parport_put_port(p);
+ parport_del_port(p);
kfree(ops); /* hope no-one cached it */
}
EXPORT_SYMBOL(parport_pc_unregister_port);
diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
index 3b470801a04f..c776333a68bc 100644
--- a/drivers/parport/procfs.c
+++ b/drivers/parport/procfs.c
@@ -21,6 +21,7 @@
#include <linux/parport.h>
#include <linux/ctype.h>
#include <linux/sysctl.h>
+#include <linux/device.h>
#include <asm/uaccess.h>
@@ -558,8 +559,18 @@ int parport_device_proc_unregister(struct pardevice *device)
static int __init parport_default_proc_register(void)
{
+ int ret;
+
parport_default_sysctl_table.sysctl_header =
register_sysctl_table(parport_default_sysctl_table.dev_dir);
+ if (!parport_default_sysctl_table.sysctl_header)
+ return -ENOMEM;
+ ret = parport_bus_init();
+ if (ret) {
+ unregister_sysctl_table(parport_default_sysctl_table.
+ sysctl_header);
+ return ret;
+ }
return 0;
}
@@ -570,6 +581,7 @@ static void __exit parport_default_proc_unregister(void)
sysctl_header);
parport_default_sysctl_table.sysctl_header = NULL;
}
+ parport_bus_exit();
}
#else /* no sysctl or no procfs*/
@@ -596,11 +608,12 @@ int parport_device_proc_unregister(struct pardevice *device)
static int __init parport_default_proc_register (void)
{
- return 0;
+ return parport_bus_init();
}
static void __exit parport_default_proc_unregister (void)
{
+ parport_bus_exit();
}
#endif
diff --git a/drivers/parport/share.c b/drivers/parport/share.c
index 3fa66244ce32..8067f54ce050 100644
--- a/drivers/parport/share.c
+++ b/drivers/parport/share.c
@@ -29,6 +29,7 @@
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/kmod.h>
+#include <linux/device.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
@@ -100,13 +101,91 @@ static struct parport_operations dead_ops = {
.owner = NULL,
};
+static struct device_type parport_device_type = {
+ .name = "parport",
+};
+
+static int is_parport(struct device *dev)
+{
+ return dev->type == &parport_device_type;
+}
+
+static int parport_probe(struct device *dev)
+{
+ struct parport_driver *drv;
+
+ if (is_parport(dev))
+ return -ENODEV;
+
+ drv = to_parport_driver(dev->driver);
+ if (!drv->probe) {
+ /* if driver has not defined a custom probe */
+ struct pardevice *par_dev = to_pardevice(dev);
+
+ if (strcmp(par_dev->name, drv->name))
+ return -ENODEV;
+ return 0;
+ }
+ /* if driver defined its own probe */
+ return drv->probe(to_pardevice(dev));
+}
+
+static struct bus_type parport_bus_type = {
+ .name = "parport",
+ .probe = parport_probe,
+};
+
+int parport_bus_init(void)
+{
+ return bus_register(&parport_bus_type);
+}
+
+void parport_bus_exit(void)
+{
+ bus_unregister(&parport_bus_type);
+}
+
+/*
+ * iterates through all the drivers registered with the bus and sends the port
+ * details to the match_port callback of the driver, so that the driver can
+ * know about the new port that just regsitered with the bus and decide if it
+ * wants to use this new port.
+ */
+static int driver_check(struct device_driver *dev_drv, void *_port)
+{
+ struct parport *port = _port;
+ struct parport_driver *drv = to_parport_driver(dev_drv);
+
+ if (drv->match_port)
+ drv->match_port(port);
+ return 0;
+}
+
/* Call attach(port) for each registered driver. */
static void attach_driver_chain(struct parport *port)
{
/* caller has exclusive registration_lock */
struct parport_driver *drv;
+
list_for_each_entry(drv, &drivers, list)
drv->attach(port);
+
+ /*
+ * call the driver_check function of the drivers registered in
+ * new device model
+ */
+
+ bus_for_each_drv(&parport_bus_type, NULL, port, driver_check);
+}
+
+static int driver_detach(struct device_driver *_drv, void *_port)
+{
+ struct parport *port = _port;
+ struct parport_driver *drv = to_parport_driver(_drv);
+
+ if (drv->detach)
+ drv->detach(port);
+ return 0;
}
/* Call detach(port) for each registered driver. */
@@ -116,6 +195,13 @@ static void detach_driver_chain(struct parport *port)
/* caller has exclusive registration_lock */
list_for_each_entry(drv, &drivers, list)
drv->detach (port);
+
+ /*
+ * call the detach function of the drivers registered in
+ * new device model
+ */
+
+ bus_for_each_drv(&parport_bus_type, NULL, port, driver_detach);
}
/* Ask kmod for some lowlevel drivers. */
@@ -126,17 +212,39 @@ static void get_lowlevel_driver (void)
request_module ("parport_lowlevel");
}
+/*
+ * iterates through all the devices connected to the bus and sends the device
+ * details to the match_port callback of the driver, so that the driver can
+ * know what are all the ports that are connected to the bus and choose the
+ * port to which it wants to register its device.
+ */
+static int port_check(struct device *dev, void *dev_drv)
+{
+ struct parport_driver *drv = dev_drv;
+
+ /* only send ports, do not send other devices connected to bus */
+ if (is_parport(dev))
+ drv->match_port(to_parport_dev(dev));
+ return 0;
+}
+
/**
* parport_register_driver - register a parallel port device driver
* @drv: structure describing the driver
+ * @owner: owner module of drv
+ * @mod_name: module name string
*
* This can be called by a parallel port device driver in order
* to receive notifications about ports being found in the
* system, as well as ports no longer available.
*
+ * If devmodel is true then the new device model is used
+ * for registration.
+ *
* The @drv structure is allocated by the caller and must not be
* deallocated until after calling parport_unregister_driver().
*
+ * If using the non device model:
* The driver's attach() function may block. The port that
* attach() is given will be valid for the duration of the
* callback, but if the driver wants to take a copy of the
@@ -148,21 +256,57 @@ static void get_lowlevel_driver (void)
* callback, but if the driver wants to take a copy of the
* pointer it must call parport_get_port() to do so.
*
- * Returns 0 on success. Currently it always succeeds.
+ *
+ * Returns 0 on success. The non device model will always succeeds.
+ * but the new device model can fail and will return the error code.
**/
-int parport_register_driver (struct parport_driver *drv)
+int __parport_register_driver(struct parport_driver *drv, struct module *owner,
+ const char *mod_name)
{
- struct parport *port;
-
if (list_empty(&portlist))
get_lowlevel_driver ();
- mutex_lock(&registration_lock);
- list_for_each_entry(port, &portlist, list)
- drv->attach(port);
- list_add(&drv->list, &drivers);
- mutex_unlock(&registration_lock);
+ if (drv->devmodel) {
+ /* using device model */
+ int ret;
+
+ /* initialize common driver fields */
+ drv->driver.name = drv->name;
+ drv->driver.bus = &parport_bus_type;
+ drv->driver.owner = owner;
+ drv->driver.mod_name = mod_name;
+ ret = driver_register(&drv->driver);
+ if (ret)
+ return ret;
+
+ mutex_lock(&registration_lock);
+ if (drv->match_port)
+ bus_for_each_dev(&parport_bus_type, NULL, drv,
+ port_check);
+ mutex_unlock(&registration_lock);
+ } else {
+ struct parport *port;
+
+ drv->devmodel = false;
+
+ mutex_lock(&registration_lock);
+ list_for_each_entry(port, &portlist, list)
+ drv->attach(port);
+ list_add(&drv->list, &drivers);
+ mutex_unlock(&registration_lock);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(__parport_register_driver);
+
+static int port_detach(struct device *dev, void *_drv)
+{
+ struct parport_driver *drv = _drv;
+
+ if (is_parport(dev) && drv->detach)
+ drv->detach(to_parport_dev(dev));
return 0;
}
@@ -189,15 +333,22 @@ void parport_unregister_driver (struct parport_driver *drv)
struct parport *port;
mutex_lock(&registration_lock);
- list_del_init(&drv->list);
- list_for_each_entry(port, &portlist, list)
- drv->detach(port);
+ if (drv->devmodel) {
+ bus_for_each_dev(&parport_bus_type, NULL, drv, port_detach);
+ driver_unregister(&drv->driver);
+ } else {
+ list_del_init(&drv->list);
+ list_for_each_entry(port, &portlist, list)
+ drv->detach(port);
+ }
mutex_unlock(&registration_lock);
}
-static void free_port (struct parport *port)
+static void free_port(struct device *dev)
{
int d;
+ struct parport *port = to_parport_dev(dev);
+
spin_lock(&full_list_lock);
list_del(&port->full_list);
spin_unlock(&full_list_lock);
@@ -223,25 +374,29 @@ static void free_port (struct parport *port)
struct parport *parport_get_port (struct parport *port)
{
- atomic_inc (&port->ref_count);
- return port;
+ struct device *dev = get_device(&port->bus_dev);
+
+ return to_parport_dev(dev);
+}
+
+void parport_del_port(struct parport *port)
+{
+ device_unregister(&port->bus_dev);
}
+EXPORT_SYMBOL(parport_del_port);
/**
* parport_put_port - decrement a port's reference count
* @port: the port
*
* This should be called once for each call to parport_get_port(),
- * once the port is no longer needed.
+ * once the port is no longer needed. When the reference count reaches
+ * zero (port is no longer used), free_port is called.
**/
void parport_put_port (struct parport *port)
{
- if (atomic_dec_and_test (&port->ref_count))
- /* Can destroy it now. */
- free_port (port);
-
- return;
+ put_device(&port->bus_dev);
}
/**
@@ -281,6 +436,7 @@ struct parport *parport_register_port(unsigned long base, int irq, int dma,
int num;
int device;
char *name;
+ int ret;
tmp = kzalloc(sizeof(struct parport), GFP_KERNEL);
if (!tmp) {
@@ -333,6 +489,10 @@ struct parport *parport_register_port(unsigned long base, int irq, int dma,
*/
sprintf(name, "parport%d", tmp->portnum = tmp->number);
tmp->name = name;
+ tmp->bus_dev.bus = &parport_bus_type;
+ tmp->bus_dev.release = free_port;
+ dev_set_name(&tmp->bus_dev, name);
+ tmp->bus_dev.type = &parport_device_type;
for (device = 0; device < 5; device++)
/* assume the worst */
@@ -340,6 +500,12 @@ struct parport *parport_register_port(unsigned long base, int irq, int dma,
tmp->waithead = tmp->waittail = NULL;
+ ret = device_register(&tmp->bus_dev);
+ if (ret) {
+ put_device(&tmp->bus_dev);
+ return NULL;
+ }
+
return tmp;
}
@@ -542,6 +708,20 @@ parport_register_device(struct parport *port, const char *name,
}
}
+ if (flags & PARPORT_DEV_EXCL) {
+ if (port->physport->devices) {
+ /*
+ * If a device is already registered and this new
+ * device wants exclusive access, then no need to
+ * continue as we can not grant exclusive access to
+ * this device.
+ */
+ pr_err("%s: cannot grant exclusive access for device %s\n",
+ port->name, name);
+ return NULL;
+ }
+ }
+
/* We up our own module reference count, and that of the port
on which a device is to be registered, to ensure that
neither of us gets unloaded while we sleep in (e.g.)
@@ -575,6 +755,7 @@ parport_register_device(struct parport *port, const char *name,
tmp->irq_func = irq_func;
tmp->waiting = 0;
tmp->timeout = 5 * HZ;
+ tmp->devmodel = false;
/* Chain this onto the list */
tmp->prev = NULL;
@@ -630,6 +811,150 @@ parport_register_device(struct parport *port, const char *name,
return NULL;
}
+static void free_pardevice(struct device *dev)
+{
+ struct pardevice *par_dev = to_pardevice(dev);
+
+ kfree(par_dev->name);
+ kfree(par_dev);
+}
+
+struct pardevice *
+parport_register_dev_model(struct parport *port, const char *name,
+ const struct pardev_cb *par_dev_cb, int id)
+{
+ struct pardevice *par_dev;
+ int ret;
+ char *devname;
+
+ if (port->physport->flags & PARPORT_FLAG_EXCL) {
+ /* An exclusive device is registered. */
+ pr_err("%s: no more devices allowed\n", port->name);
+ return NULL;
+ }
+
+ if (par_dev_cb->flags & PARPORT_DEV_LURK) {
+ if (!par_dev_cb->preempt || !par_dev_cb->wakeup) {
+ pr_info("%s: refused to register lurking device (%s) without callbacks\n",
+ port->name, name);
+ return NULL;
+ }
+ }
+
+ if (par_dev_cb->flags & PARPORT_DEV_EXCL) {
+ if (port->physport->devices) {
+ /*
+ * If a device is already registered and this new
+ * device wants exclusive access, then no need to
+ * continue as we can not grant exclusive access to
+ * this device.
+ */
+ pr_err("%s: cannot grant exclusive access for device %s\n",
+ port->name, name);
+ return NULL;
+ }
+ }
+
+ if (!try_module_get(port->ops->owner))
+ return NULL;
+
+ parport_get_port(port);
+
+ par_dev = kzalloc(sizeof(*par_dev), GFP_KERNEL);
+ if (!par_dev)
+ goto err_put_port;
+
+ par_dev->state = kzalloc(sizeof(*par_dev->state), GFP_KERNEL);
+ if (!par_dev->state)
+ goto err_put_par_dev;
+
+ devname = kstrdup(name, GFP_KERNEL);
+ if (!devname)
+ goto err_free_par_dev;
+
+ par_dev->name = devname;
+ par_dev->port = port;
+ par_dev->daisy = -1;
+ par_dev->preempt = par_dev_cb->preempt;
+ par_dev->wakeup = par_dev_cb->wakeup;
+ par_dev->private = par_dev_cb->private;
+ par_dev->flags = par_dev_cb->flags;
+ par_dev->irq_func = par_dev_cb->irq_func;
+ par_dev->waiting = 0;
+ par_dev->timeout = 5 * HZ;
+
+ par_dev->dev.parent = &port->bus_dev;
+ par_dev->dev.bus = &parport_bus_type;
+ ret = dev_set_name(&par_dev->dev, "%s.%d", devname, id);
+ if (ret)
+ goto err_free_devname;
+ par_dev->dev.release = free_pardevice;
+ par_dev->devmodel = true;
+ ret = device_register(&par_dev->dev);
+ if (ret)
+ goto err_put_dev;
+
+ /* Chain this onto the list */
+ par_dev->prev = NULL;
+ /*
+ * This function must not run from an irq handler so we don' t need
+ * to clear irq on the local CPU. -arca
+ */
+ spin_lock(&port->physport->pardevice_lock);
+
+ if (par_dev_cb->flags & PARPORT_DEV_EXCL) {
+ if (port->physport->devices) {
+ spin_unlock(&port->physport->pardevice_lock);
+ pr_debug("%s: cannot grant exclusive access for device %s\n",
+ port->name, name);
+ goto err_put_dev;
+ }
+ port->flags |= PARPORT_FLAG_EXCL;
+ }
+
+ par_dev->next = port->physport->devices;
+ wmb(); /*
+ * Make sure that tmp->next is written before it's
+ * added to the list; see comments marked 'no locking
+ * required'
+ */
+ if (port->physport->devices)
+ port->physport->devices->prev = par_dev;
+ port->physport->devices = par_dev;
+ spin_unlock(&port->physport->pardevice_lock);
+
+ init_waitqueue_head(&par_dev->wait_q);
+ par_dev->timeslice = parport_default_timeslice;
+ par_dev->waitnext = NULL;
+ par_dev->waitprev = NULL;
+
+ /*
+ * This has to be run as last thing since init_state may need other
+ * pardevice fields. -arca
+ */
+ port->ops->init_state(par_dev, par_dev->state);
+ port->proc_device = par_dev;
+ parport_device_proc_register(par_dev);
+
+ return par_dev;
+
+err_put_dev:
+ put_device(&par_dev->dev);
+err_free_devname:
+ kfree(devname);
+err_free_par_dev:
+ kfree(par_dev->state);
+err_put_par_dev:
+ if (!par_dev->devmodel)
+ kfree(par_dev);
+err_put_port:
+ parport_put_port(port);
+ module_put(port->ops->owner);
+
+ return NULL;
+}
+EXPORT_SYMBOL(parport_register_dev_model);
+
/**
* parport_unregister_device - deregister a device on a parallel port
* @dev: pointer to structure representing device
@@ -691,7 +1016,10 @@ void parport_unregister_device(struct pardevice *dev)
spin_unlock_irq(&port->waitlist_lock);
kfree(dev->state);
- kfree(dev);
+ if (dev->devmodel)
+ device_unregister(&dev->dev);
+ else
+ kfree(dev);
module_put(port->ops->owner);
parport_put_port (port);
@@ -1019,7 +1347,6 @@ EXPORT_SYMBOL(parport_release);
EXPORT_SYMBOL(parport_register_port);
EXPORT_SYMBOL(parport_announce_port);
EXPORT_SYMBOL(parport_remove_port);
-EXPORT_SYMBOL(parport_register_driver);
EXPORT_SYMBOL(parport_unregister_driver);
EXPORT_SYMBOL(parport_register_device);
EXPORT_SYMBOL(parport_unregister_device);
diff --git a/drivers/pcmcia/cs.c b/drivers/pcmcia/cs.c
index 8007bfda720a..c3b615c94b4b 100644
--- a/drivers/pcmcia/cs.c
+++ b/drivers/pcmcia/cs.c
@@ -621,8 +621,6 @@ static int pccardd(void *__skt)
unsigned int events;
unsigned int sysfs_events;
- set_current_state(TASK_INTERRUPTIBLE);
-
spin_lock_irqsave(&skt->thread_lock, flags);
events = skt->thread_events;
skt->thread_events = 0;
@@ -670,11 +668,15 @@ static int pccardd(void *__skt)
if (kthread_should_stop())
break;
+ set_current_state(TASK_INTERRUPTIBLE);
+
schedule();
+
+ /* make sure we are running */
+ __set_current_state(TASK_RUNNING);
+
try_to_freeze();
}
- /* make sure we are running before we exit */
- set_current_state(TASK_RUNNING);
/* shut down socket, if a device is still present */
if (skt->state & SOCKET_PRESENT) {
diff --git a/drivers/platform/goldfish/goldfish_pipe.c b/drivers/platform/goldfish/goldfish_pipe.c
index d9a09d9637d9..a6558409ba45 100644
--- a/drivers/platform/goldfish/goldfish_pipe.c
+++ b/drivers/platform/goldfish/goldfish_pipe.c
@@ -282,7 +282,7 @@ static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer,
return -EIO;
/* Null reads or writes succeeds */
- if (unlikely(bufflen) == 0)
+ if (unlikely(bufflen == 0))
return 0;
/* Check the buffer range for access */
diff --git a/drivers/spmi/Kconfig b/drivers/spmi/Kconfig
index c8d99563d245..982580af1d16 100644
--- a/drivers/spmi/Kconfig
+++ b/drivers/spmi/Kconfig
@@ -14,6 +14,7 @@ config SPMI_MSM_PMIC_ARB
tristate "Qualcomm MSM SPMI Controller (PMIC Arbiter)"
depends on IRQ_DOMAIN
depends on ARCH_QCOM || COMPILE_TEST
+ depends on HAS_IOMEM
default ARCH_QCOM
help
If you say yes to this option, support will be included for the
diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c
index ea54fb4ec837..592a12241b37 100644
--- a/drivers/staging/panel/panel.c
+++ b/drivers/staging/panel/panel.c
@@ -2190,6 +2190,8 @@ static struct notifier_block panel_notifier = {
static void panel_attach(struct parport *port)
{
+ struct pardev_cb panel_cb;
+
if (port->number != parport)
return;
@@ -2199,10 +2201,11 @@ static void panel_attach(struct parport *port)
return;
}
- pprt = parport_register_device(port, "panel", NULL, NULL, /* pf, kf */
- NULL,
- /*PARPORT_DEV_EXCL */
- 0, (void *)&pprt);
+ memset(&panel_cb, 0, sizeof(panel_cb));
+ panel_cb.private = &pprt;
+ /* panel_cb.flags = 0 should be PARPORT_DEV_EXCL? */
+
+ pprt = parport_register_dev_model(port, "panel", &panel_cb, 0);
if (pprt == NULL) {
pr_err("%s: port->number=%d parport=%d, parport_register_device() failed\n",
__func__, port->number, parport);
@@ -2270,8 +2273,9 @@ static void panel_detach(struct parport *port)
static struct parport_driver panel_driver = {
.name = "panel",
- .attach = panel_attach,
+ .match_port = panel_attach,
.detach = panel_detach,
+ .devmodel = true,
};
/* init function */
diff --git a/drivers/uio/Kconfig b/drivers/uio/Kconfig
index 8a15c323c030..48fb1d983f6c 100644
--- a/drivers/uio/Kconfig
+++ b/drivers/uio/Kconfig
@@ -126,8 +126,8 @@ config UIO_FSL_ELBC_GPCM_NETX5152
config UIO_PRUSS
tristate "Texas Instruments PRUSS driver"
- depends on ARCH_DAVINCI_DA850
select GENERIC_ALLOCATOR
+ depends on HAS_IOMEM
help
PRUSS driver for OMAPL138/DA850/AM18XX devices
PRUSS driver requires user space components, examples and user space
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index 65bf0676d54a..3257d4220d01 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -879,7 +879,8 @@ void uio_unregister_device(struct uio_info *info)
uio_dev_del_attributes(idev);
- free_irq(idev->info->irq, idev);
+ if (info->irq && info->irq != UIO_IRQ_CUSTOM)
+ free_irq(info->irq, idev);
device_destroy(&uio_class, MKDEV(uio_major, idev->minor));
diff --git a/drivers/uio/uio_pruss.c b/drivers/uio/uio_pruss.c
index 818735bb8c3a..ca9e2fafb0b6 100644
--- a/drivers/uio/uio_pruss.c
+++ b/drivers/uio/uio_pruss.c
@@ -24,6 +24,7 @@
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/dma-mapping.h>
+#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/genalloc.h>
diff --git a/drivers/usb/phy/phy-tahvo.c b/drivers/usb/phy/phy-tahvo.c
index 2b28443d07b9..b40d6a87d694 100644
--- a/drivers/usb/phy/phy-tahvo.c
+++ b/drivers/usb/phy/phy-tahvo.c
@@ -60,10 +60,11 @@ struct tahvo_usb {
struct extcon_dev extcon;
};
-static const char *tahvo_cable[] = {
- "USB-HOST",
- "USB",
- NULL,
+static const unsigned int tahvo_cable[] = {
+ EXTCON_USB,
+ EXTCON_USB_HOST,
+
+ EXTCON_NONE,
};
static ssize_t vbus_state_show(struct device *device,
diff --git a/drivers/w1/masters/ds2482.c b/drivers/w1/masters/ds2482.c
index e76a9b39abb2..a674409edfb3 100644
--- a/drivers/w1/masters/ds2482.c
+++ b/drivers/w1/masters/ds2482.c
@@ -93,6 +93,7 @@ static const struct i2c_device_id ds2482_id[] = {
{ "ds2482", 0 },
{ }
};
+MODULE_DEVICE_TABLE(i2c, ds2482_id);
static struct i2c_driver ds2482_driver = {
.driver = {
diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c
index 1f11a20a8ab9..2f029e8f4f95 100644
--- a/drivers/w1/slaves/w1_therm.c
+++ b/drivers/w1/slaves/w1_therm.c
@@ -59,16 +59,32 @@ MODULE_ALIAS("w1-family-" __stringify(W1_THERM_DS28EA00));
static int w1_strong_pullup = 1;
module_param_named(strong_pullup, w1_strong_pullup, int, 0);
+struct w1_therm_family_data {
+ uint8_t rom[9];
+ atomic_t refcnt;
+};
+
+/* return the address of the refcnt in the family data */
+#define THERM_REFCNT(family_data) \
+ (&((struct w1_therm_family_data*)family_data)->refcnt)
+
static int w1_therm_add_slave(struct w1_slave *sl)
{
- sl->family_data = kzalloc(9, GFP_KERNEL);
+ sl->family_data = kzalloc(sizeof(struct w1_therm_family_data),
+ GFP_KERNEL);
if (!sl->family_data)
return -ENOMEM;
+ atomic_set(THERM_REFCNT(sl->family_data), 1);
return 0;
}
static void w1_therm_remove_slave(struct w1_slave *sl)
{
+ int refcnt = atomic_sub_return(1, THERM_REFCNT(sl->family_data));
+ while(refcnt) {
+ msleep(1000);
+ refcnt = atomic_read(THERM_REFCNT(sl->family_data));
+ }
kfree(sl->family_data);
sl->family_data = NULL;
}
@@ -76,13 +92,24 @@ static void w1_therm_remove_slave(struct w1_slave *sl)
static ssize_t w1_slave_show(struct device *device,
struct device_attribute *attr, char *buf);
+static ssize_t w1_seq_show(struct device *device,
+ struct device_attribute *attr, char *buf);
+
static DEVICE_ATTR_RO(w1_slave);
+static DEVICE_ATTR_RO(w1_seq);
static struct attribute *w1_therm_attrs[] = {
&dev_attr_w1_slave.attr,
NULL,
};
+
+static struct attribute *w1_ds28ea00_attrs[] = {
+ &dev_attr_w1_slave.attr,
+ &dev_attr_w1_seq.attr,
+ NULL,
+};
ATTRIBUTE_GROUPS(w1_therm);
+ATTRIBUTE_GROUPS(w1_ds28ea00);
static struct w1_family_ops w1_therm_fops = {
.add_slave = w1_therm_add_slave,
@@ -90,6 +117,12 @@ static struct w1_family_ops w1_therm_fops = {
.groups = w1_therm_groups,
};
+static struct w1_family_ops w1_ds28ea00_fops = {
+ .add_slave = w1_therm_add_slave,
+ .remove_slave = w1_therm_remove_slave,
+ .groups = w1_ds28ea00_groups,
+};
+
static struct w1_family w1_therm_family_DS18S20 = {
.fid = W1_THERM_DS18S20,
.fops = &w1_therm_fops,
@@ -107,7 +140,7 @@ static struct w1_family w1_therm_family_DS1822 = {
static struct w1_family w1_therm_family_DS28EA00 = {
.fid = W1_THERM_DS28EA00,
- .fops = &w1_therm_fops,
+ .fops = &w1_ds28ea00_fops,
};
static struct w1_family w1_therm_family_DS1825 = {
@@ -194,13 +227,22 @@ static ssize_t w1_slave_show(struct device *device,
struct w1_slave *sl = dev_to_w1_slave(device);
struct w1_master *dev = sl->master;
u8 rom[9], crc, verdict, external_power;
- int i, max_trying = 10;
+ int i, ret, max_trying = 10;
ssize_t c = PAGE_SIZE;
+ u8 *family_data = sl->family_data;
- i = mutex_lock_interruptible(&dev->bus_mutex);
- if (i != 0)
- return i;
+ ret = mutex_lock_interruptible(&dev->bus_mutex);
+ if (ret != 0)
+ goto post_unlock;
+
+ if(!sl->family_data)
+ {
+ ret = -ENODEV;
+ goto pre_unlock;
+ }
+ /* prevent the slave from going away in sleep */
+ atomic_inc(THERM_REFCNT(family_data));
memset(rom, 0, sizeof(rom));
while (max_trying--) {
@@ -230,17 +272,19 @@ static ssize_t w1_slave_show(struct device *device,
mutex_unlock(&dev->bus_mutex);
sleep_rem = msleep_interruptible(tm);
- if (sleep_rem != 0)
- return -EINTR;
+ if (sleep_rem != 0) {
+ ret = -EINTR;
+ goto post_unlock;
+ }
- i = mutex_lock_interruptible(&dev->bus_mutex);
- if (i != 0)
- return i;
+ ret = mutex_lock_interruptible(&dev->bus_mutex);
+ if (ret != 0)
+ goto post_unlock;
} else if (!w1_strong_pullup) {
sleep_rem = msleep_interruptible(tm);
if (sleep_rem != 0) {
- mutex_unlock(&dev->bus_mutex);
- return -EINTR;
+ ret = -EINTR;
+ goto pre_unlock;
}
}
@@ -269,19 +313,107 @@ static ssize_t w1_slave_show(struct device *device,
c -= snprintf(buf + PAGE_SIZE - c, c, ": crc=%02x %s\n",
crc, (verdict) ? "YES" : "NO");
if (verdict)
- memcpy(sl->family_data, rom, sizeof(rom));
+ memcpy(family_data, rom, sizeof(rom));
else
dev_warn(device, "Read failed CRC check\n");
for (i = 0; i < 9; ++i)
c -= snprintf(buf + PAGE_SIZE - c, c, "%02x ",
- ((u8 *)sl->family_data)[i]);
+ ((u8 *)family_data)[i]);
c -= snprintf(buf + PAGE_SIZE - c, c, "t=%d\n",
w1_convert_temp(rom, sl->family->fid));
+ ret = PAGE_SIZE - c;
+
+pre_unlock:
mutex_unlock(&dev->bus_mutex);
+post_unlock:
+ atomic_dec(THERM_REFCNT(family_data));
+ return ret;
+}
+
+#define W1_42_CHAIN 0x99
+#define W1_42_CHAIN_OFF 0x3C
+#define W1_42_CHAIN_OFF_INV 0xC3
+#define W1_42_CHAIN_ON 0x5A
+#define W1_42_CHAIN_ON_INV 0xA5
+#define W1_42_CHAIN_DONE 0x96
+#define W1_42_CHAIN_DONE_INV 0x69
+#define W1_42_COND_READ 0x0F
+#define W1_42_SUCCESS_CONFIRM_BYTE 0xAA
+#define W1_42_FINISHED_BYTE 0xFF
+static ssize_t w1_seq_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct w1_slave *sl = dev_to_w1_slave(device);
+ ssize_t c = PAGE_SIZE;
+ int rv;
+ int i;
+ u8 ack;
+ u64 rn;
+ struct w1_reg_num *reg_num;
+ int seq = 0;
+
+ mutex_lock(&sl->master->bus_mutex);
+ /* Place all devices in CHAIN state */
+ if (w1_reset_bus(sl->master))
+ goto error;
+ w1_write_8(sl->master, W1_SKIP_ROM);
+ w1_write_8(sl->master, W1_42_CHAIN);
+ w1_write_8(sl->master, W1_42_CHAIN_ON);
+ w1_write_8(sl->master, W1_42_CHAIN_ON_INV);
+ msleep(sl->master->pullup_duration);
+
+ /* check for acknowledgment */
+ ack = w1_read_8(sl->master);
+ if (ack != W1_42_SUCCESS_CONFIRM_BYTE)
+ goto error;
+
+ /* In case the bus fails to send 0xFF, limit*/
+ for (i = 0; i <= 64; i++) {
+ if (w1_reset_bus(sl->master))
+ goto error;
+
+ w1_write_8(sl->master, W1_42_COND_READ);
+ rv = w1_read_block(sl->master, (u8 *)&rn, 8);
+ reg_num = (struct w1_reg_num *) &rn;
+ if (reg_num->family == W1_42_FINISHED_BYTE)
+ break;
+ if (sl->reg_num.id == reg_num->id)
+ seq = i;
+
+ w1_write_8(sl->master, W1_42_CHAIN);
+ w1_write_8(sl->master, W1_42_CHAIN_DONE);
+ w1_write_8(sl->master, W1_42_CHAIN_DONE_INV);
+ w1_read_block(sl->master, &ack, sizeof(ack));
+
+ /* check for acknowledgment */
+ ack = w1_read_8(sl->master);
+ if (ack != W1_42_SUCCESS_CONFIRM_BYTE)
+ goto error;
+
+ }
+
+ /* Exit from CHAIN state */
+ if (w1_reset_bus(sl->master))
+ goto error;
+ w1_write_8(sl->master, W1_SKIP_ROM);
+ w1_write_8(sl->master, W1_42_CHAIN);
+ w1_write_8(sl->master, W1_42_CHAIN_OFF);
+ w1_write_8(sl->master, W1_42_CHAIN_OFF_INV);
+
+ /* check for acknowledgment */
+ ack = w1_read_8(sl->master);
+ if (ack != W1_42_SUCCESS_CONFIRM_BYTE)
+ goto error;
+ mutex_unlock(&sl->master->bus_mutex);
+
+ c -= snprintf(buf + PAGE_SIZE - c, c, "%d\n", seq);
return PAGE_SIZE - c;
+error:
+ mutex_unlock(&sl->master->bus_mutex);
+ return -EIO;
}
static int __init w1_therm_init(void)
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index 181f41cb960b..c9a7ff67d395 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -46,11 +46,15 @@ MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol.");
static int w1_timeout = 10;
+static int w1_timeout_us = 0;
int w1_max_slave_count = 64;
int w1_max_slave_ttl = 10;
module_param_named(timeout, w1_timeout, int, 0);
MODULE_PARM_DESC(timeout, "time in seconds between automatic slave searches");
+module_param_named(timeout_us, w1_timeout_us, int, 0);
+MODULE_PARM_DESC(timeout, "time in microseconds between automatic slave"
+ " searches");
/* A search stops when w1_max_slave_count devices have been found in that
* search. The next search will start over and detect the same set of devices
* on a static 1-wire bus. Memory is not allocated based on this number, just
@@ -317,6 +321,14 @@ static ssize_t w1_master_attribute_show_timeout(struct device *dev, struct devic
return count;
}
+static ssize_t w1_master_attribute_show_timeout_us(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t count;
+ count = sprintf(buf, "%d\n", w1_timeout_us);
+ return count;
+}
+
static ssize_t w1_master_attribute_store_max_slave_count(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
@@ -543,6 +555,7 @@ static W1_MASTER_ATTR_RO(slave_count, S_IRUGO);
static W1_MASTER_ATTR_RW(max_slave_count, S_IRUGO | S_IWUSR | S_IWGRP);
static W1_MASTER_ATTR_RO(attempts, S_IRUGO);
static W1_MASTER_ATTR_RO(timeout, S_IRUGO);
+static W1_MASTER_ATTR_RO(timeout_us, S_IRUGO);
static W1_MASTER_ATTR_RO(pointer, S_IRUGO);
static W1_MASTER_ATTR_RW(search, S_IRUGO | S_IWUSR | S_IWGRP);
static W1_MASTER_ATTR_RW(pullup, S_IRUGO | S_IWUSR | S_IWGRP);
@@ -556,6 +569,7 @@ static struct attribute *w1_master_default_attrs[] = {
&w1_master_attribute_max_slave_count.attr,
&w1_master_attribute_attempts.attr,
&w1_master_attribute_timeout.attr,
+ &w1_master_attribute_timeout_us.attr,
&w1_master_attribute_pointer.attr,
&w1_master_attribute_search.attr,
&w1_master_attribute_pullup.attr,
@@ -1108,7 +1122,8 @@ int w1_process(void *data)
/* As long as w1_timeout is only set by a module parameter the sleep
* time can be calculated in jiffies once.
*/
- const unsigned long jtime = msecs_to_jiffies(w1_timeout * 1000);
+ const unsigned long jtime =
+ usecs_to_jiffies(w1_timeout * 1000000 + w1_timeout_us);
/* remainder if it woke up early */
unsigned long jremain = 0;
diff --git a/include/dt-bindings/mfd/arizona.h b/include/dt-bindings/mfd/arizona.h
index 555609910acb..7b2000cead43 100644
--- a/include/dt-bindings/mfd/arizona.h
+++ b/include/dt-bindings/mfd/arizona.h
@@ -104,4 +104,8 @@
#define ARIZONA_MICD_TIME_256MS 11
#define ARIZONA_MICD_TIME_512MS 12
+#define ARIZONA_ACCDET_MODE_MIC 0
+#define ARIZONA_ACCDET_MODE_HPL 1
+#define ARIZONA_ACCDET_MODE_HPR 2
+
#endif
diff --git a/include/linux/extcon.h b/include/linux/extcon.h
index 36f49c405dfb..b16d929fa75f 100644
--- a/include/linux/extcon.h
+++ b/include/linux/extcon.h
@@ -1,6 +1,9 @@
/*
* External connector (extcon) class driver
*
+ * Copyright (C) 2015 Samsung Electronics
+ * Author: Chanwoo Choi <cw00.choi@samsung.com>
+ *
* Copyright (C) 2012 Samsung Electronics
* Author: Donggeun Kim <dg77.kim@samsung.com>
* Author: MyungJoo Ham <myungjoo.ham@samsung.com>
@@ -27,50 +30,35 @@
#include <linux/notifier.h>
#include <linux/sysfs.h>
-#define SUPPORTED_CABLE_MAX 32
-#define CABLE_NAME_MAX 30
-
/*
- * The standard cable name is to help support general notifier
- * and notifiee device drivers to share the common names.
- * Please use standard cable names unless your notifier device has
- * a very unique and abnormal cable or
- * the cable type is supposed to be used with only one unique
- * pair of notifier/notifiee devices.
- *
- * Please add any other "standard" cables used with extcon dev.
- *
- * You may add a dot and number to specify version or specification
- * of the specific cable if it is required. (e.g., "Fast-charger.18"
- * and "Fast-charger.10" for 1.8A and 1.0A chargers)
- * However, the notifiee and notifier should be able to handle such
- * string and if the notifiee can negotiate the protocol or identify,
- * you don't need such convention. This convention is helpful when
- * notifier can distinguish but notifiee cannot.
+ * Define the unique id of supported external connectors
*/
-enum extcon_cable_name {
- EXTCON_USB = 0,
- EXTCON_USB_HOST,
- EXTCON_TA, /* Travel Adaptor */
- EXTCON_FAST_CHARGER,
- EXTCON_SLOW_CHARGER,
- EXTCON_CHARGE_DOWNSTREAM, /* Charging an external device */
- EXTCON_HDMI,
- EXTCON_MHL,
- EXTCON_DVI,
- EXTCON_VGA,
- EXTCON_DOCK,
- EXTCON_LINE_IN,
- EXTCON_LINE_OUT,
- EXTCON_MIC_IN,
- EXTCON_HEADPHONE_OUT,
- EXTCON_SPDIF_IN,
- EXTCON_SPDIF_OUT,
- EXTCON_VIDEO_IN,
- EXTCON_VIDEO_OUT,
- EXTCON_MECHANICAL,
-};
-extern const char extcon_cable_name[][CABLE_NAME_MAX + 1];
+#define EXTCON_NONE 0
+
+#define EXTCON_USB 1 /* USB connector */
+#define EXTCON_USB_HOST 2
+
+#define EXTCON_TA 3 /* Charger connector */
+#define EXTCON_FAST_CHARGER 4
+#define EXTCON_SLOW_CHARGER 5
+#define EXTCON_CHARGE_DOWNSTREAM 6
+
+#define EXTCON_LINE_IN 7 /* Audio/Video connector */
+#define EXTCON_LINE_OUT 8
+#define EXTCON_MICROPHONE 9
+#define EXTCON_HEADPHONE 10
+#define EXTCON_HDMI 11
+#define EXTCON_MHL 12
+#define EXTCON_DVI 13
+#define EXTCON_VGA 14
+#define EXTCON_SPDIF_IN 15
+#define EXTCON_SPDIF_OUT 16
+#define EXTCON_VIDEO_IN 17
+#define EXTCON_VIDEO_OUT 18
+
+#define EXTCON_DOCK 19 /* Misc connector */
+#define EXTCON_JIG 20
+#define EXTCON_MECHANICAL 21
struct extcon_cable;
@@ -78,7 +66,7 @@ struct extcon_cable;
* struct extcon_dev - An extcon device represents one external connector.
* @name: The name of this extcon device. Parent device name is
* used if NULL.
- * @supported_cable: Array of supported cable names ending with NULL.
+ * @supported_cable: Array of supported cable names ending with EXTCON_NONE.
* If supported_cable is NULL, cable name related APIs
* are disabled.
* @mutually_exclusive: Array of mutually exclusive set of cables that cannot
@@ -89,16 +77,14 @@ struct extcon_cable;
* be attached simulataneously. {0x7, 0} is equivalent to
* {0x3, 0x6, 0x5, 0}. If it is {0xFFFFFFFF, 0}, there
* can be no simultaneous connections.
- * @print_name: An optional callback to override the method to print the
- * name of the extcon device.
* @print_state: An optional callback to override the method to print the
* status of the extcon device.
* @dev: Device of this extcon.
* @state: Attach/detach state of this extcon. Do not provide at
* register-time.
* @nh: Notifier for the state change events from this extcon
- * @entry: To support list of extcon devices so that users can search
- * for extcon devices based on the extcon name.
+ * @entry: To support list of extcon devices so that users can
+ * search for extcon devices based on the extcon name.
* @lock:
* @max_supported: Internal value to store the number of cables.
* @extcon_dev_type: Device_type struct to provide attribute_groups
@@ -113,16 +99,15 @@ struct extcon_cable;
struct extcon_dev {
/* Optional user initializing data */
const char *name;
- const char **supported_cable;
+ const unsigned int *supported_cable;
const u32 *mutually_exclusive;
/* Optional callbacks to override class functions */
- ssize_t (*print_name)(struct extcon_dev *edev, char *buf);
ssize_t (*print_state)(struct extcon_dev *edev, char *buf);
/* Internal data. Please do not set. */
struct device dev;
- struct raw_notifier_head nh;
+ struct raw_notifier_head *nh;
struct list_head entry;
int max_supported;
spinlock_t lock; /* could be called by irq handler */
@@ -161,8 +146,6 @@ struct extcon_cable {
/**
* struct extcon_specific_cable_nb - An internal data for
* extcon_register_interest().
- * @internal_nb: A notifier block bridging extcon notifier
- * and cable notifier.
* @user_nb: user provided notifier block for events from
* a specific cable.
* @cable_index: the target cable.
@@ -170,7 +153,6 @@ struct extcon_cable {
* @previous_value: the saved previous event value.
*/
struct extcon_specific_cable_nb {
- struct notifier_block internal_nb;
struct notifier_block *user_nb;
int cable_index;
struct extcon_dev *edev;
@@ -194,10 +176,10 @@ extern struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name);
/*
* Following APIs control the memory of extcon device.
*/
-extern struct extcon_dev *extcon_dev_allocate(const char **cables);
+extern struct extcon_dev *extcon_dev_allocate(const unsigned int *cable);
extern void extcon_dev_free(struct extcon_dev *edev);
extern struct extcon_dev *devm_extcon_dev_allocate(struct device *dev,
- const char **cables);
+ const unsigned int *cable);
extern void devm_extcon_dev_free(struct device *dev, struct extcon_dev *edev);
/*
@@ -216,13 +198,10 @@ extern int extcon_update_state(struct extcon_dev *edev, u32 mask, u32 state);
/*
* get/set_cable_state access each bit of the 32b encoded state value.
- * They are used to access the status of each cable based on the cable_name
- * or cable_index, which is retrieved by extcon_find_cable_index
+ * They are used to access the status of each cable based on the cable_name.
*/
-extern int extcon_find_cable_index(struct extcon_dev *sdev,
- const char *cable_name);
-extern int extcon_get_cable_state_(struct extcon_dev *edev, int cable_index);
-extern int extcon_set_cable_state_(struct extcon_dev *edev, int cable_index,
+extern int extcon_get_cable_state_(struct extcon_dev *edev, unsigned int id);
+extern int extcon_set_cable_state_(struct extcon_dev *edev, unsigned int id,
bool cable_state);
extern int extcon_get_cable_state(struct extcon_dev *edev,
@@ -249,16 +228,21 @@ extern int extcon_unregister_interest(struct extcon_specific_cable_nb *nb);
* we do not recommend to use this for normal 'notifiee' device drivers who
* want to be notified by a specific external port of the notifier.
*/
-extern int extcon_register_notifier(struct extcon_dev *edev,
+extern int extcon_register_notifier(struct extcon_dev *edev, unsigned int id,
+ struct notifier_block *nb);
+extern int extcon_unregister_notifier(struct extcon_dev *edev, unsigned int id,
struct notifier_block *nb);
-extern int extcon_unregister_notifier(struct extcon_dev *edev,
- struct notifier_block *nb);
/*
* Following API get the extcon device from devicetree.
* This function use phandle of devicetree to get extcon device directly.
*/
-extern struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev, int index);
+extern struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev,
+ int index);
+
+/* Following API to get information of extcon device */
+extern const char *extcon_get_edev_name(struct extcon_dev *edev);
+
#else /* CONFIG_EXTCON */
static inline int extcon_dev_register(struct extcon_dev *edev)
{
@@ -276,7 +260,7 @@ static inline int devm_extcon_dev_register(struct device *dev,
static inline void devm_extcon_dev_unregister(struct device *dev,
struct extcon_dev *edev) { }
-static inline struct extcon_dev *extcon_dev_allocate(const char **cables)
+static inline struct extcon_dev *extcon_dev_allocate(const unsigned int *cable)
{
return ERR_PTR(-ENOSYS);
}
@@ -284,7 +268,7 @@ static inline struct extcon_dev *extcon_dev_allocate(const char **cables)
static inline void extcon_dev_free(struct extcon_dev *edev) { }
static inline struct extcon_dev *devm_extcon_dev_allocate(struct device *dev,
- const char **cables)
+ const unsigned int *cable)
{
return ERR_PTR(-ENOSYS);
}
@@ -307,20 +291,14 @@ static inline int extcon_update_state(struct extcon_dev *edev, u32 mask,
return 0;
}
-static inline int extcon_find_cable_index(struct extcon_dev *edev,
- const char *cable_name)
-{
- return 0;
-}
-
static inline int extcon_get_cable_state_(struct extcon_dev *edev,
- int cable_index)
+ unsigned int id)
{
return 0;
}
static inline int extcon_set_cable_state_(struct extcon_dev *edev,
- int cable_index, bool cable_state)
+ unsigned int id, bool cable_state)
{
return 0;
}
@@ -343,13 +321,15 @@ static inline struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name)
}
static inline int extcon_register_notifier(struct extcon_dev *edev,
- struct notifier_block *nb)
+ unsigned int id,
+ struct notifier_block *nb)
{
return 0;
}
static inline int extcon_unregister_notifier(struct extcon_dev *edev,
- struct notifier_block *nb)
+ unsigned int id,
+ struct notifier_block *nb)
{
return 0;
}
diff --git a/include/linux/extcon/extcon-adc-jack.h b/include/linux/extcon/extcon-adc-jack.h
index 9ca958c4e94c..53c60806bcfb 100644
--- a/include/linux/extcon/extcon-adc-jack.h
+++ b/include/linux/extcon/extcon-adc-jack.h
@@ -44,7 +44,7 @@ struct adc_jack_cond {
* @consumer_channel: Unique name to identify the channel on the consumer
* side. This typically describes the channels used within
* the consumer. E.g. 'battery_voltage'
- * @cable_names: array of cable names ending with null.
+ * @cable_names: array of extcon id for supported cables.
* @adc_contitions: array of struct adc_jack_cond conditions ending
* with .state = 0 entry. This describes how to decode
* adc values into extcon state.
@@ -58,8 +58,7 @@ struct adc_jack_pdata {
const char *name;
const char *consumer_channel;
- /* The last entry should be NULL */
- const char **cable_names;
+ const enum extcon *cable_names;
/* The last entry's state should be 0 */
struct adc_jack_cond *adc_conditions;
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 902c37aef67e..30d3a1f79450 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -160,16 +160,18 @@ hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi,
* 1 . 1 (Windows 7)
* 2 . 4 (Windows 8)
* 3 . 0 (Windows 8 R2)
+ * 4 . 0 (Windows 10)
*/
#define VERSION_WS2008 ((0 << 16) | (13))
#define VERSION_WIN7 ((1 << 16) | (1))
#define VERSION_WIN8 ((2 << 16) | (4))
#define VERSION_WIN8_1 ((3 << 16) | (0))
+#define VERSION_WIN10 ((4 << 16) | (0))
#define VERSION_INVAL -1
-#define VERSION_CURRENT VERSION_WIN8_1
+#define VERSION_CURRENT VERSION_WIN10
/* Make maximum size of pipe payload of 16K */
#define MAX_PIPE_DATA_PAYLOAD (sizeof(u8) * 16384)
@@ -389,10 +391,7 @@ enum vmbus_channel_message_type {
CHANNELMSG_INITIATE_CONTACT = 14,
CHANNELMSG_VERSION_RESPONSE = 15,
CHANNELMSG_UNLOAD = 16,
-#ifdef VMBUS_FEATURE_PARENT_OR_PEER_MEMORY_MAPPED_INTO_A_CHILD
- CHANNELMSG_VIEWRANGE_ADD = 17,
- CHANNELMSG_VIEWRANGE_REMOVE = 18,
-#endif
+ CHANNELMSG_UNLOAD_RESPONSE = 17,
CHANNELMSG_COUNT
};
@@ -549,21 +548,6 @@ struct vmbus_channel_gpadl_torndown {
u32 gpadl;
} __packed;
-#ifdef VMBUS_FEATURE_PARENT_OR_PEER_MEMORY_MAPPED_INTO_A_CHILD
-struct vmbus_channel_view_range_add {
- struct vmbus_channel_message_header header;
- PHYSICAL_ADDRESS viewrange_base;
- u64 viewrange_length;
- u32 child_relid;
-} __packed;
-
-struct vmbus_channel_view_range_remove {
- struct vmbus_channel_message_header header;
- PHYSICAL_ADDRESS viewrange_base;
- u32 child_relid;
-} __packed;
-#endif
-
struct vmbus_channel_relid_released {
struct vmbus_channel_message_header header;
u32 child_relid;
@@ -713,6 +697,11 @@ struct vmbus_channel {
/* The corresponding CPUID in the guest */
u32 target_cpu;
/*
+ * State to manage the CPU affiliation of channels.
+ */
+ struct cpumask alloced_cpus_in_node;
+ int numa_node;
+ /*
* Support for sub-channels. For high performance devices,
* it will be useful to have multiple sub-channels to support
* a scalable communication infrastructure with the host.
@@ -745,6 +734,15 @@ struct vmbus_channel {
*/
struct list_head sc_list;
/*
+ * Current number of sub-channels.
+ */
+ int num_sc;
+ /*
+ * Number of a sub-channel (position within sc_list) which is supposed
+ * to be used as the next outgoing channel.
+ */
+ int next_oc;
+ /*
* The primary channel this sub-channel belongs to.
* This will be NULL for the primary channel.
*/
@@ -758,9 +756,6 @@ struct vmbus_channel {
* link up channels based on their CPU affinity.
*/
struct list_head percpu_list;
-
- int num_sc;
- int next_oc;
};
static inline void set_channel_read_state(struct vmbus_channel *c, bool state)
@@ -1236,13 +1231,6 @@ extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *,
struct icmsg_negotiate *, u8 *, int,
int);
-int hv_kvp_init(struct hv_util_service *);
-void hv_kvp_deinit(void);
-void hv_kvp_onchannelcallback(void *);
-
-int hv_vss_init(struct hv_util_service *);
-void hv_vss_deinit(void);
-void hv_vss_onchannelcallback(void *);
void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid);
extern struct resource hyperv_mmio;
diff --git a/include/linux/mei_cl_bus.h b/include/linux/mei_cl_bus.h
index 0819d36a3a74..a16b1f9c1aca 100644
--- a/include/linux/mei_cl_bus.h
+++ b/include/linux/mei_cl_bus.h
@@ -7,6 +7,42 @@
struct mei_cl_device;
+typedef void (*mei_cl_event_cb_t)(struct mei_cl_device *device,
+ u32 events, void *context);
+
+/**
+ * struct mei_cl_device - MEI device handle
+ * An mei_cl_device pointer is returned from mei_add_device()
+ * and links MEI bus clients to their actual ME host client pointer.
+ * Drivers for MEI devices will get an mei_cl_device pointer
+ * when being probed and shall use it for doing ME bus I/O.
+ *
+ * @dev: linux driver model device pointer
+ * @me_cl: me client
+ * @cl: mei client
+ * @name: device name
+ * @event_work: async work to execute event callback
+ * @event_cb: Drivers register this callback to get asynchronous ME
+ * events (e.g. Rx buffer pending) notifications.
+ * @event_context: event callback run context
+ * @events: Events bitmask sent to the driver.
+ * @priv_data: client private data
+ */
+struct mei_cl_device {
+ struct device dev;
+
+ struct mei_me_client *me_cl;
+ struct mei_cl *cl;
+ char name[MEI_CL_NAME_SIZE];
+
+ struct work_struct event_work;
+ mei_cl_event_cb_t event_cb;
+ void *event_context;
+ unsigned long events;
+
+ void *priv_data;
+};
+
struct mei_cl_driver {
struct device_driver driver;
const char *name;
@@ -28,8 +64,6 @@ void mei_cl_driver_unregister(struct mei_cl_driver *driver);
ssize_t mei_cl_send(struct mei_cl_device *device, u8 *buf, size_t length);
ssize_t mei_cl_recv(struct mei_cl_device *device, u8 *buf, size_t length);
-typedef void (*mei_cl_event_cb_t)(struct mei_cl_device *device,
- u32 events, void *context);
int mei_cl_register_event_cb(struct mei_cl_device *device,
mei_cl_event_cb_t read_cb, void *context);
diff --git a/include/linux/mfd/arizona/pdata.h b/include/linux/mfd/arizona/pdata.h
index f6722677e6d0..43db4faad143 100644
--- a/include/linux/mfd/arizona/pdata.h
+++ b/include/linux/mfd/arizona/pdata.h
@@ -121,6 +121,9 @@ struct arizona_pdata {
/** GPIO used for mic isolation with HPDET */
int hpdet_id_gpio;
+ /** Channel to use for headphone detection */
+ unsigned int hpdet_channel;
+
/** Extra debounce timeout used during initial mic detection (ms) */
int micd_detect_debounce;
diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h
index 02f97dc568ac..c2aa853fb412 100644
--- a/include/linux/mfd/axp20x.h
+++ b/include/linux/mfd/axp20x.h
@@ -368,4 +368,9 @@ struct axp20x_chrg_pdata {
int def_cv;
};
+struct axp288_extcon_pdata {
+ /* GPIO pin control to switch D+/D- lines b/w PMIC and SOC */
+ struct gpio_desc *gpio_mux_cntl;
+};
+
#endif /* __LINUX_MFD_AXP20X_H */
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 3bfd56778c29..048c270822f9 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -599,9 +599,22 @@ struct ipack_device_id {
#define MEI_CL_MODULE_PREFIX "mei:"
#define MEI_CL_NAME_SIZE 32
+#define MEI_CL_UUID_FMT "%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x"
+#define MEI_CL_UUID_ARGS(_u) \
+ _u[0], _u[1], _u[2], _u[3], _u[4], _u[5], _u[6], _u[7], \
+ _u[8], _u[9], _u[10], _u[11], _u[12], _u[13], _u[14], _u[15]
+/**
+ * struct mei_cl_device_id - MEI client device identifier
+ * @name: helper name
+ * @uuid: client uuid
+ * @driver_info: information used by the driver.
+ *
+ * identifies mei client device by uuid and name
+ */
struct mei_cl_device_id {
char name[MEI_CL_NAME_SIZE];
+ uuid_le uuid;
kernel_ulong_t driver_info;
};
diff --git a/include/linux/parport.h b/include/linux/parport.h
index c22f12547324..58e3c64c6b49 100644
--- a/include/linux/parport.h
+++ b/include/linux/parport.h
@@ -13,6 +13,7 @@
#include <linux/wait.h>
#include <linux/irqreturn.h>
#include <linux/semaphore.h>
+#include <linux/device.h>
#include <asm/ptrace.h>
#include <uapi/linux/parport.h>
@@ -145,6 +146,8 @@ struct pardevice {
unsigned int flags;
struct pardevice *next;
struct pardevice *prev;
+ struct device dev;
+ bool devmodel;
struct parport_state *state; /* saved status over preemption */
wait_queue_head_t wait_q;
unsigned long int time;
@@ -156,6 +159,8 @@ struct pardevice {
void * sysctl_table;
};
+#define to_pardevice(n) container_of(n, struct pardevice, dev)
+
/* IEEE1284 information */
/* IEEE1284 phases. These are exposed to userland through ppdev IOCTL
@@ -195,7 +200,7 @@ struct parport {
* This may unfortulately be null if the
* port has a legacy driver.
*/
-
+ struct device bus_dev; /* to link with the bus */
struct parport *physport;
/* If this is a non-default mux
parport, i.e. we're a clone of a real
@@ -245,15 +250,26 @@ struct parport {
struct parport *slaves[3];
};
+#define to_parport_dev(n) container_of(n, struct parport, bus_dev)
+
#define DEFAULT_SPIN_TIME 500 /* us */
struct parport_driver {
const char *name;
void (*attach) (struct parport *);
void (*detach) (struct parport *);
+ void (*match_port)(struct parport *);
+ int (*probe)(struct pardevice *);
+ struct device_driver driver;
+ bool devmodel;
struct list_head list;
};
+#define to_parport_driver(n) container_of(n, struct parport_driver, driver)
+
+int parport_bus_init(void);
+void parport_bus_exit(void);
+
/* parport_register_port registers a new parallel port at the given
address (if one does not already exist) and returns a pointer to it.
This entails claiming the I/O region, IRQ and DMA. NULL is returned
@@ -272,10 +288,20 @@ void parport_announce_port (struct parport *port);
extern void parport_remove_port(struct parport *port);
/* Register a new high-level driver. */
-extern int parport_register_driver (struct parport_driver *);
+
+int __must_check __parport_register_driver(struct parport_driver *,
+ struct module *,
+ const char *mod_name);
+/*
+ * parport_register_driver must be a macro so that KBUILD_MODNAME can
+ * be expanded
+ */
+#define parport_register_driver(driver) \
+ __parport_register_driver(driver, THIS_MODULE, KBUILD_MODNAME)
/* Unregister a high-level driver. */
extern void parport_unregister_driver (struct parport_driver *);
+void parport_unregister_driver(struct parport_driver *);
/* If parport_register_driver doesn't fit your needs, perhaps
* parport_find_xxx does. */
@@ -288,6 +314,15 @@ extern irqreturn_t parport_irq_handler(int irq, void *dev_id);
/* Reference counting for ports. */
extern struct parport *parport_get_port (struct parport *);
extern void parport_put_port (struct parport *);
+void parport_del_port(struct parport *);
+
+struct pardev_cb {
+ int (*preempt)(void *);
+ void (*wakeup)(void *);
+ void *private;
+ void (*irq_func)(void *);
+ unsigned int flags;
+};
/* parport_register_device declares that a device is connected to a
port, and tells the kernel all it needs to know.
@@ -301,6 +336,10 @@ struct pardevice *parport_register_device(struct parport *port,
void (*irq_func)(void *),
int flags, void *handle);
+struct pardevice *
+parport_register_dev_model(struct parport *port, const char *name,
+ const struct pardev_cb *par_dev_cb, int cnt);
+
/* parport_unregister unlinks a device from the chain. */
extern void parport_unregister_device(struct pardevice *dev);
diff --git a/include/linux/scif.h b/include/linux/scif.h
new file mode 100644
index 000000000000..44f4f3898bbe
--- /dev/null
+++ b/include/linux/scif.h
@@ -0,0 +1,993 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2014 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Intel SCIF driver.
+ *
+ */
+#ifndef __SCIF_H__
+#define __SCIF_H__
+
+#include <linux/types.h>
+#include <linux/poll.h>
+#include <linux/scif_ioctl.h>
+
+#define SCIF_ACCEPT_SYNC 1
+#define SCIF_SEND_BLOCK 1
+#define SCIF_RECV_BLOCK 1
+
+enum {
+ SCIF_PROT_READ = (1 << 0),
+ SCIF_PROT_WRITE = (1 << 1)
+};
+
+enum {
+ SCIF_MAP_FIXED = 0x10,
+ SCIF_MAP_KERNEL = 0x20,
+};
+
+enum {
+ SCIF_FENCE_INIT_SELF = (1 << 0),
+ SCIF_FENCE_INIT_PEER = (1 << 1),
+ SCIF_SIGNAL_LOCAL = (1 << 4),
+ SCIF_SIGNAL_REMOTE = (1 << 5)
+};
+
+enum {
+ SCIF_RMA_USECPU = (1 << 0),
+ SCIF_RMA_USECACHE = (1 << 1),
+ SCIF_RMA_SYNC = (1 << 2),
+ SCIF_RMA_ORDERED = (1 << 3)
+};
+
+/* End of SCIF Admin Reserved Ports */
+#define SCIF_ADMIN_PORT_END 1024
+
+/* End of SCIF Reserved Ports */
+#define SCIF_PORT_RSVD 1088
+
+typedef struct scif_endpt *scif_epd_t;
+
+#define SCIF_OPEN_FAILED ((scif_epd_t)-1)
+#define SCIF_REGISTER_FAILED ((off_t)-1)
+#define SCIF_MMAP_FAILED ((void *)-1)
+
+/**
+ * scif_open() - Create an endpoint
+ *
+ * Return:
+ * Upon successful completion, scif_open() returns an endpoint descriptor to
+ * be used in subsequent SCIF functions calls to refer to that endpoint;
+ * otherwise in user mode SCIF_OPEN_FAILED (that is ((scif_epd_t)-1)) is
+ * returned and errno is set to indicate the error; in kernel mode a NULL
+ * scif_epd_t is returned.
+ *
+ * Errors:
+ * ENOMEM - Insufficient kernel memory was available
+ */
+scif_epd_t scif_open(void);
+
+/**
+ * scif_bind() - Bind an endpoint to a port
+ * @epd: endpoint descriptor
+ * @pn: port number
+ *
+ * scif_bind() binds endpoint epd to port pn, where pn is a port number on the
+ * local node. If pn is zero, a port number greater than or equal to
+ * SCIF_PORT_RSVD is assigned and returned. Each endpoint may be bound to
+ * exactly one local port. Ports less than 1024 when requested can only be bound
+ * by system (or root) processes or by processes executed by privileged users.
+ *
+ * Return:
+ * Upon successful completion, scif_bind() returns the port number to which epd
+ * is bound; otherwise in user mode -1 is returned and errno is set to
+ * indicate the error; in kernel mode the negative of one of the following
+ * errors is returned.
+ *
+ * Errors:
+ * EBADF, ENOTTY - epd is not a valid endpoint descriptor
+ * EINVAL - the endpoint or the port is already bound
+ * EISCONN - The endpoint is already connected
+ * ENOSPC - No port number available for assignment
+ * EACCES - The port requested is protected and the user is not the superuser
+ */
+int scif_bind(scif_epd_t epd, u16 pn);
+
+/**
+ * scif_listen() - Listen for connections on an endpoint
+ * @epd: endpoint descriptor
+ * @backlog: maximum pending connection requests
+ *
+ * scif_listen() marks the endpoint epd as a listening endpoint - that is, as
+ * an endpoint that will be used to accept incoming connection requests. Once
+ * so marked, the endpoint is said to be in the listening state and may not be
+ * used as the endpoint of a connection.
+ *
+ * The endpoint, epd, must have been bound to a port.
+ *
+ * The backlog argument defines the maximum length to which the queue of
+ * pending connections for epd may grow. If a connection request arrives when
+ * the queue is full, the client may receive an error with an indication that
+ * the connection was refused.
+ *
+ * Return:
+ * Upon successful completion, scif_listen() returns 0; otherwise in user mode
+ * -1 is returned and errno is set to indicate the error; in kernel mode the
+ * negative of one of the following errors is returned.
+ *
+ * Errors:
+ * EBADF, ENOTTY - epd is not a valid endpoint descriptor
+ * EINVAL - the endpoint is not bound to a port
+ * EISCONN - The endpoint is already connected or listening
+ */
+int scif_listen(scif_epd_t epd, int backlog);
+
+/**
+ * scif_connect() - Initiate a connection on a port
+ * @epd: endpoint descriptor
+ * @dst: global id of port to which to connect
+ *
+ * The scif_connect() function requests the connection of endpoint epd to remote
+ * port dst. If the connection is successful, a peer endpoint, bound to dst, is
+ * created on node dst.node. On successful return, the connection is complete.
+ *
+ * If the endpoint epd has not already been bound to a port, scif_connect()
+ * will bind it to an unused local port.
+ *
+ * A connection is terminated when an endpoint of the connection is closed,
+ * either explicitly by scif_close(), or when a process that owns one of the
+ * endpoints of the connection is terminated.
+ *
+ * In user space, scif_connect() supports an asynchronous connection mode
+ * if the application has set the O_NONBLOCK flag on the endpoint via the
+ * fcntl() system call. Setting this flag will result in the calling process
+ * not to wait during scif_connect().
+ *
+ * Return:
+ * Upon successful completion, scif_connect() returns the port ID to which the
+ * endpoint, epd, is bound; otherwise in user mode -1 is returned and errno is
+ * set to indicate the error; in kernel mode the negative of one of the
+ * following errors is returned.
+ *
+ * Errors:
+ * EBADF, ENOTTY - epd is not a valid endpoint descriptor
+ * ECONNREFUSED - The destination was not listening for connections or refused
+ * the connection request
+ * EINVAL - dst.port is not a valid port ID
+ * EISCONN - The endpoint is already connected
+ * ENOMEM - No buffer space is available
+ * ENODEV - The destination node does not exist, or the node is lost or existed,
+ * but is not currently in the network since it may have crashed
+ * ENOSPC - No port number available for assignment
+ * EOPNOTSUPP - The endpoint is listening and cannot be connected
+ */
+int scif_connect(scif_epd_t epd, struct scif_port_id *dst);
+
+/**
+ * scif_accept() - Accept a connection on an endpoint
+ * @epd: endpoint descriptor
+ * @peer: global id of port to which connected
+ * @newepd: new connected endpoint descriptor
+ * @flags: flags
+ *
+ * The scif_accept() call extracts the first connection request from the queue
+ * of pending connections for the port on which epd is listening. scif_accept()
+ * creates a new endpoint, bound to the same port as epd, and allocates a new
+ * SCIF endpoint descriptor, returned in newepd, for the endpoint. The new
+ * endpoint is connected to the endpoint through which the connection was
+ * requested. epd is unaffected by this call, and remains in the listening
+ * state.
+ *
+ * On successful return, peer holds the global port identifier (node id and
+ * local port number) of the port which requested the connection.
+ *
+ * A connection is terminated when an endpoint of the connection is closed,
+ * either explicitly by scif_close(), or when a process that owns one of the
+ * endpoints of the connection is terminated.
+ *
+ * The number of connections that can (subsequently) be accepted on epd is only
+ * limited by system resources (memory).
+ *
+ * The flags argument is formed by OR'ing together zero or more of the
+ * following values.
+ * SCIF_ACCEPT_SYNC - block until a connection request is presented. If
+ * SCIF_ACCEPT_SYNC is not in flags, and no pending
+ * connections are present on the queue, scif_accept()
+ * fails with an EAGAIN error
+ *
+ * In user mode, the select() and poll() functions can be used to determine
+ * when there is a connection request. In kernel mode, the scif_poll()
+ * function may be used for this purpose. A readable event will be delivered
+ * when a connection is requested.
+ *
+ * Return:
+ * Upon successful completion, scif_accept() returns 0; otherwise in user mode
+ * -1 is returned and errno is set to indicate the error; in kernel mode the
+ * negative of one of the following errors is returned.
+ *
+ * Errors:
+ * EAGAIN - SCIF_ACCEPT_SYNC is not set and no connections are present to be
+ * accepted or SCIF_ACCEPT_SYNC is not set and remote node failed to complete
+ * its connection request
+ * EBADF, ENOTTY - epd is not a valid endpoint descriptor
+ * EINTR - Interrupted function
+ * EINVAL - epd is not a listening endpoint, or flags is invalid, or peer is
+ * NULL, or newepd is NULL
+ * ENODEV - The requesting node is lost or existed, but is not currently in the
+ * network since it may have crashed
+ * ENOMEM - Not enough space
+ * ENOENT - Secondary part of epd registration failed
+ */
+int scif_accept(scif_epd_t epd, struct scif_port_id *peer, scif_epd_t
+ *newepd, int flags);
+
+/**
+ * scif_close() - Close an endpoint
+ * @epd: endpoint descriptor
+ *
+ * scif_close() closes an endpoint and performs necessary teardown of
+ * facilities associated with that endpoint.
+ *
+ * If epd is a listening endpoint then it will no longer accept connection
+ * requests on the port to which it is bound. Any pending connection requests
+ * are rejected.
+ *
+ * If epd is a connected endpoint, then its peer endpoint is also closed. RMAs
+ * which are in-process through epd or its peer endpoint will complete before
+ * scif_close() returns. Registered windows of the local and peer endpoints are
+ * released as if scif_unregister() was called against each window.
+ *
+ * Closing a SCIF endpoint does not affect local registered memory mapped by
+ * a SCIF endpoint on a remote node. The local memory remains mapped by the peer
+ * SCIF endpoint explicitly removed by calling munmap(..) by the peer.
+ *
+ * If the peer endpoint's receive queue is not empty at the time that epd is
+ * closed, then the peer endpoint can be passed as the endpoint parameter to
+ * scif_recv() until the receive queue is empty.
+ *
+ * epd is freed and may no longer be accessed.
+ *
+ * Return:
+ * Upon successful completion, scif_close() returns 0; otherwise in user mode
+ * -1 is returned and errno is set to indicate the error; in kernel mode the
+ * negative of one of the following errors is returned.
+ *
+ * Errors:
+ * EBADF, ENOTTY - epd is not a valid endpoint descriptor
+ */
+int scif_close(scif_epd_t epd);
+
+/**
+ * scif_send() - Send a message
+ * @epd: endpoint descriptor
+ * @msg: message buffer address
+ * @len: message length
+ * @flags: blocking mode flags
+ *
+ * scif_send() sends data to the peer of endpoint epd. Up to len bytes of data
+ * are copied from memory starting at address msg. On successful execution the
+ * return value of scif_send() is the number of bytes that were sent, and is
+ * zero if no bytes were sent because len was zero. scif_send() may be called
+ * only when the endpoint is in a connected state.
+ *
+ * If a scif_send() call is non-blocking, then it sends only those bytes which
+ * can be sent without waiting, up to a maximum of len bytes.
+ *
+ * If a scif_send() call is blocking, then it normally returns after sending
+ * all len bytes. If a blocking call is interrupted or the connection is
+ * reset, the call is considered successful if some bytes were sent or len is
+ * zero, otherwise the call is considered unsuccessful.
+ *
+ * In user mode, the select() and poll() functions can be used to determine
+ * when the send queue is not full. In kernel mode, the scif_poll() function
+ * may be used for this purpose.
+ *
+ * It is recommended that scif_send()/scif_recv() only be used for short
+ * control-type message communication between SCIF endpoints. The SCIF RMA
+ * APIs are expected to provide better performance for transfer sizes of
+ * 1024 bytes or longer for the current MIC hardware and software
+ * implementation.
+ *
+ * scif_send() will block until the entire message is sent if SCIF_SEND_BLOCK
+ * is passed as the flags argument.
+ *
+ * Return:
+ * Upon successful completion, scif_send() returns the number of bytes sent;
+ * otherwise in user mode -1 is returned and errno is set to indicate the
+ * error; in kernel mode the negative of one of the following errors is
+ * returned.
+ *
+ * Errors:
+ * EBADF, ENOTTY - epd is not a valid endpoint descriptor
+ * ECONNRESET - Connection reset by peer
+ * EFAULT - An invalid address was specified for a parameter
+ * EINVAL - flags is invalid, or len is negative
+ * ENODEV - The remote node is lost or existed, but is not currently in the
+ * network since it may have crashed
+ * ENOMEM - Not enough space
+ * ENOTCONN - The endpoint is not connected
+ */
+int scif_send(scif_epd_t epd, void *msg, int len, int flags);
+
+/**
+ * scif_recv() - Receive a message
+ * @epd: endpoint descriptor
+ * @msg: message buffer address
+ * @len: message buffer length
+ * @flags: blocking mode flags
+ *
+ * scif_recv() receives data from the peer of endpoint epd. Up to len bytes of
+ * data are copied to memory starting at address msg. On successful execution
+ * the return value of scif_recv() is the number of bytes that were received,
+ * and is zero if no bytes were received because len was zero. scif_recv() may
+ * be called only when the endpoint is in a connected state.
+ *
+ * If a scif_recv() call is non-blocking, then it receives only those bytes
+ * which can be received without waiting, up to a maximum of len bytes.
+ *
+ * If a scif_recv() call is blocking, then it normally returns after receiving
+ * all len bytes. If the blocking call was interrupted due to a disconnection,
+ * subsequent calls to scif_recv() will copy all bytes received upto the point
+ * of disconnection.
+ *
+ * In user mode, the select() and poll() functions can be used to determine
+ * when data is available to be received. In kernel mode, the scif_poll()
+ * function may be used for this purpose.
+ *
+ * It is recommended that scif_send()/scif_recv() only be used for short
+ * control-type message communication between SCIF endpoints. The SCIF RMA
+ * APIs are expected to provide better performance for transfer sizes of
+ * 1024 bytes or longer for the current MIC hardware and software
+ * implementation.
+ *
+ * scif_recv() will block until the entire message is received if
+ * SCIF_RECV_BLOCK is passed as the flags argument.
+ *
+ * Return:
+ * Upon successful completion, scif_recv() returns the number of bytes
+ * received; otherwise in user mode -1 is returned and errno is set to
+ * indicate the error; in kernel mode the negative of one of the following
+ * errors is returned.
+ *
+ * Errors:
+ * EAGAIN - The destination node is returning from a low power state
+ * EBADF, ENOTTY - epd is not a valid endpoint descriptor
+ * ECONNRESET - Connection reset by peer
+ * EFAULT - An invalid address was specified for a parameter
+ * EINVAL - flags is invalid, or len is negative
+ * ENODEV - The remote node is lost or existed, but is not currently in the
+ * network since it may have crashed
+ * ENOMEM - Not enough space
+ * ENOTCONN - The endpoint is not connected
+ */
+int scif_recv(scif_epd_t epd, void *msg, int len, int flags);
+
+/**
+ * scif_register() - Mark a memory region for remote access.
+ * @epd: endpoint descriptor
+ * @addr: starting virtual address
+ * @len: length of range
+ * @offset: offset of window
+ * @prot_flags: read/write protection flags
+ * @map_flags: mapping flags
+ *
+ * The scif_register() function opens a window, a range of whole pages of the
+ * registered address space of the endpoint epd, starting at offset po and
+ * continuing for len bytes. The value of po, further described below, is a
+ * function of the parameters offset and len, and the value of map_flags. Each
+ * page of the window represents the physical memory page which backs the
+ * corresponding page of the range of virtual address pages starting at addr
+ * and continuing for len bytes. addr and len are constrained to be multiples
+ * of the page size. A successful scif_register() call returns po.
+ *
+ * When SCIF_MAP_FIXED is set in the map_flags argument, po will be offset
+ * exactly, and offset is constrained to be a multiple of the page size. The
+ * mapping established by scif_register() will not replace any existing
+ * registration; an error is returned if any page within the range [offset,
+ * offset + len - 1] intersects an existing window.
+ *
+ * When SCIF_MAP_FIXED is not set, the implementation uses offset in an
+ * implementation-defined manner to arrive at po. The po value so chosen will
+ * be an area of the registered address space that the implementation deems
+ * suitable for a mapping of len bytes. An offset value of 0 is interpreted as
+ * granting the implementation complete freedom in selecting po, subject to
+ * constraints described below. A non-zero value of offset is taken to be a
+ * suggestion of an offset near which the mapping should be placed. When the
+ * implementation selects a value for po, it does not replace any extant
+ * window. In all cases, po will be a multiple of the page size.
+ *
+ * The physical pages which are so represented by a window are available for
+ * access in calls to mmap(), scif_readfrom(), scif_writeto(),
+ * scif_vreadfrom(), and scif_vwriteto(). While a window is registered, the
+ * physical pages represented by the window will not be reused by the memory
+ * subsystem for any other purpose. Note that the same physical page may be
+ * represented by multiple windows.
+ *
+ * Subsequent operations which change the memory pages to which virtual
+ * addresses are mapped (such as mmap(), munmap()) have no effect on
+ * existing window.
+ *
+ * If the process will fork(), it is recommended that the registered
+ * virtual address range be marked with MADV_DONTFORK. Doing so will prevent
+ * problems due to copy-on-write semantics.
+ *
+ * The prot_flags argument is formed by OR'ing together one or more of the
+ * following values.
+ * SCIF_PROT_READ - allow read operations from the window
+ * SCIF_PROT_WRITE - allow write operations to the window
+ *
+ * The map_flags argument can be set to SCIF_MAP_FIXED which interprets a
+ * fixed offset.
+ *
+ * Return:
+ * Upon successful completion, scif_register() returns the offset at which the
+ * mapping was placed (po); otherwise in user mode SCIF_REGISTER_FAILED (that
+ * is (off_t *)-1) is returned and errno is set to indicate the error; in
+ * kernel mode the negative of one of the following errors is returned.
+ *
+ * Errors:
+ * EADDRINUSE - SCIF_MAP_FIXED is set in map_flags, and pages in the range
+ * [offset, offset + len -1] are already registered
+ * EAGAIN - The mapping could not be performed due to lack of resources
+ * EBADF, ENOTTY - epd is not a valid endpoint descriptor
+ * ECONNRESET - Connection reset by peer
+ * EFAULT - Addresses in the range [addr, addr + len - 1] are invalid
+ * EINVAL - map_flags is invalid, or prot_flags is invalid, or SCIF_MAP_FIXED is
+ * set in flags, and offset is not a multiple of the page size, or addr is not a
+ * multiple of the page size, or len is not a multiple of the page size, or is
+ * 0, or offset is negative
+ * ENODEV - The remote node is lost or existed, but is not currently in the
+ * network since it may have crashed
+ * ENOMEM - Not enough space
+ * ENOTCONN -The endpoint is not connected
+ */
+off_t scif_register(scif_epd_t epd, void *addr, size_t len, off_t offset,
+ int prot_flags, int map_flags);
+
+/**
+ * scif_unregister() - Mark a memory region for remote access.
+ * @epd: endpoint descriptor
+ * @offset: start of range to unregister
+ * @len: length of range to unregister
+ *
+ * The scif_unregister() function closes those previously registered windows
+ * which are entirely within the range [offset, offset + len - 1]. It is an
+ * error to specify a range which intersects only a subrange of a window.
+ *
+ * On a successful return, pages within the window may no longer be specified
+ * in calls to mmap(), scif_readfrom(), scif_writeto(), scif_vreadfrom(),
+ * scif_vwriteto(), scif_get_pages, and scif_fence_signal(). The window,
+ * however, continues to exist until all previous references against it are
+ * removed. A window is referenced if there is a mapping to it created by
+ * mmap(), or if scif_get_pages() was called against the window
+ * (and the pages have not been returned via scif_put_pages()). A window is
+ * also referenced while an RMA, in which some range of the window is a source
+ * or destination, is in progress. Finally a window is referenced while some
+ * offset in that window was specified to scif_fence_signal(), and the RMAs
+ * marked by that call to scif_fence_signal() have not completed. While a
+ * window is in this state, its registered address space pages are not
+ * available for use in a new registered window.
+ *
+ * When all such references to the window have been removed, its references to
+ * all the physical pages which it represents are removed. Similarly, the
+ * registered address space pages of the window become available for
+ * registration in a new window.
+ *
+ * Return:
+ * Upon successful completion, scif_unregister() returns 0; otherwise in user
+ * mode -1 is returned and errno is set to indicate the error; in kernel mode
+ * the negative of one of the following errors is returned. In the event of an
+ * error, no windows are unregistered.
+ *
+ * Errors:
+ * EBADF, ENOTTY - epd is not a valid endpoint descriptor
+ * ECONNRESET - Connection reset by peer
+ * EINVAL - the range [offset, offset + len - 1] intersects a subrange of a
+ * window, or offset is negative
+ * ENODEV - The remote node is lost or existed, but is not currently in the
+ * network since it may have crashed
+ * ENOTCONN - The endpoint is not connected
+ * ENXIO - Offsets in the range [offset, offset + len - 1] are invalid for the
+ * registered address space of epd
+ */
+int scif_unregister(scif_epd_t epd, off_t offset, size_t len);
+
+/**
+ * scif_readfrom() - Copy from a remote address space
+ * @epd: endpoint descriptor
+ * @loffset: offset in local registered address space to
+ * which to copy
+ * @len: length of range to copy
+ * @roffset: offset in remote registered address space
+ * from which to copy
+ * @rma_flags: transfer mode flags
+ *
+ * scif_readfrom() copies len bytes from the remote registered address space of
+ * the peer of endpoint epd, starting at the offset roffset to the local
+ * registered address space of epd, starting at the offset loffset.
+ *
+ * Each of the specified ranges [loffset, loffset + len - 1] and [roffset,
+ * roffset + len - 1] must be within some registered window or windows of the
+ * local and remote nodes. A range may intersect multiple registered windows,
+ * but only if those windows are contiguous in the registered address space.
+ *
+ * If rma_flags includes SCIF_RMA_USECPU, then the data is copied using
+ * programmed read/writes. Otherwise the data is copied using DMA. If rma_-
+ * flags includes SCIF_RMA_SYNC, then scif_readfrom() will return after the
+ * transfer is complete. Otherwise, the transfer may be performed asynchron-
+ * ously. The order in which any two asynchronous RMA operations complete
+ * is non-deterministic. The synchronization functions, scif_fence_mark()/
+ * scif_fence_wait() and scif_fence_signal(), can be used to synchronize to
+ * the completion of asynchronous RMA operations on the same endpoint.
+ *
+ * The DMA transfer of individual bytes is not guaranteed to complete in
+ * address order. If rma_flags includes SCIF_RMA_ORDERED, then the last
+ * cacheline or partial cacheline of the source range will become visible on
+ * the destination node after all other transferred data in the source
+ * range has become visible on the destination node.
+ *
+ * The optimal DMA performance will likely be realized if both
+ * loffset and roffset are cacheline aligned (are a multiple of 64). Lower
+ * performance will likely be realized if loffset and roffset are not
+ * cacheline aligned but are separated by some multiple of 64. The lowest level
+ * of performance is likely if loffset and roffset are not separated by a
+ * multiple of 64.
+ *
+ * The rma_flags argument is formed by ORing together zero or more of the
+ * following values.
+ * SCIF_RMA_USECPU - perform the transfer using the CPU, otherwise use the DMA
+ * engine.
+ * SCIF_RMA_SYNC - perform the transfer synchronously, returning after the
+ * transfer has completed. Passing this flag results in the
+ * current implementation busy waiting and consuming CPU cycles
+ * while the DMA transfer is in progress for best performance by
+ * avoiding the interrupt latency.
+ * SCIF_RMA_ORDERED - ensure that the last cacheline or partial cacheline of
+ * the source range becomes visible on the destination node
+ * after all other transferred data in the source range has
+ * become visible on the destination
+ *
+ * Return:
+ * Upon successful completion, scif_readfrom() returns 0; otherwise in user
+ * mode -1 is returned and errno is set to indicate the error; in kernel mode
+ * the negative of one of the following errors is returned.
+ *
+ * Errors:
+ * EACCESS - Attempt to write to a read-only range
+ * EBADF, ENOTTY - epd is not a valid endpoint descriptor
+ * ECONNRESET - Connection reset by peer
+ * EINVAL - rma_flags is invalid
+ * ENODEV - The remote node is lost or existed, but is not currently in the
+ * network since it may have crashed
+ * ENOTCONN - The endpoint is not connected
+ * ENXIO - The range [loffset, loffset + len - 1] is invalid for the registered
+ * address space of epd, or, The range [roffset, roffset + len - 1] is invalid
+ * for the registered address space of the peer of epd, or loffset or roffset
+ * is negative
+ */
+int scif_readfrom(scif_epd_t epd, off_t loffset, size_t len, off_t
+ roffset, int rma_flags);
+
+/**
+ * scif_writeto() - Copy to a remote address space
+ * @epd: endpoint descriptor
+ * @loffset: offset in local registered address space
+ * from which to copy
+ * @len: length of range to copy
+ * @roffset: offset in remote registered address space to
+ * which to copy
+ * @rma_flags: transfer mode flags
+ *
+ * scif_writeto() copies len bytes from the local registered address space of
+ * epd, starting at the offset loffset to the remote registered address space
+ * of the peer of endpoint epd, starting at the offset roffset.
+ *
+ * Each of the specified ranges [loffset, loffset + len - 1] and [roffset,
+ * roffset + len - 1] must be within some registered window or windows of the
+ * local and remote nodes. A range may intersect multiple registered windows,
+ * but only if those windows are contiguous in the registered address space.
+ *
+ * If rma_flags includes SCIF_RMA_USECPU, then the data is copied using
+ * programmed read/writes. Otherwise the data is copied using DMA. If rma_-
+ * flags includes SCIF_RMA_SYNC, then scif_writeto() will return after the
+ * transfer is complete. Otherwise, the transfer may be performed asynchron-
+ * ously. The order in which any two asynchronous RMA operations complete
+ * is non-deterministic. The synchronization functions, scif_fence_mark()/
+ * scif_fence_wait() and scif_fence_signal(), can be used to synchronize to
+ * the completion of asynchronous RMA operations on the same endpoint.
+ *
+ * The DMA transfer of individual bytes is not guaranteed to complete in
+ * address order. If rma_flags includes SCIF_RMA_ORDERED, then the last
+ * cacheline or partial cacheline of the source range will become visible on
+ * the destination node after all other transferred data in the source
+ * range has become visible on the destination node.
+ *
+ * The optimal DMA performance will likely be realized if both
+ * loffset and roffset are cacheline aligned (are a multiple of 64). Lower
+ * performance will likely be realized if loffset and roffset are not cacheline
+ * aligned but are separated by some multiple of 64. The lowest level of
+ * performance is likely if loffset and roffset are not separated by a multiple
+ * of 64.
+ *
+ * The rma_flags argument is formed by ORing together zero or more of the
+ * following values.
+ * SCIF_RMA_USECPU - perform the transfer using the CPU, otherwise use the DMA
+ * engine.
+ * SCIF_RMA_SYNC - perform the transfer synchronously, returning after the
+ * transfer has completed. Passing this flag results in the
+ * current implementation busy waiting and consuming CPU cycles
+ * while the DMA transfer is in progress for best performance by
+ * avoiding the interrupt latency.
+ * SCIF_RMA_ORDERED - ensure that the last cacheline or partial cacheline of
+ * the source range becomes visible on the destination node
+ * after all other transferred data in the source range has
+ * become visible on the destination
+ *
+ * Return:
+ * Upon successful completion, scif_readfrom() returns 0; otherwise in user
+ * mode -1 is returned and errno is set to indicate the error; in kernel mode
+ * the negative of one of the following errors is returned.
+ *
+ * Errors:
+ * EACCESS - Attempt to write to a read-only range
+ * EBADF, ENOTTY - epd is not a valid endpoint descriptor
+ * ECONNRESET - Connection reset by peer
+ * EINVAL - rma_flags is invalid
+ * ENODEV - The remote node is lost or existed, but is not currently in the
+ * network since it may have crashed
+ * ENOTCONN - The endpoint is not connected
+ * ENXIO - The range [loffset, loffset + len - 1] is invalid for the registered
+ * address space of epd, or, The range [roffset , roffset + len -1] is invalid
+ * for the registered address space of the peer of epd, or loffset or roffset
+ * is negative
+ */
+int scif_writeto(scif_epd_t epd, off_t loffset, size_t len, off_t
+ roffset, int rma_flags);
+
+/**
+ * scif_vreadfrom() - Copy from a remote address space
+ * @epd: endpoint descriptor
+ * @addr: address to which to copy
+ * @len: length of range to copy
+ * @roffset: offset in remote registered address space
+ * from which to copy
+ * @rma_flags: transfer mode flags
+ *
+ * scif_vreadfrom() copies len bytes from the remote registered address
+ * space of the peer of endpoint epd, starting at the offset roffset, to local
+ * memory, starting at addr.
+ *
+ * The specified range [roffset, roffset + len - 1] must be within some
+ * registered window or windows of the remote nodes. The range may
+ * intersect multiple registered windows, but only if those windows are
+ * contiguous in the registered address space.
+ *
+ * If rma_flags includes SCIF_RMA_USECPU, then the data is copied using
+ * programmed read/writes. Otherwise the data is copied using DMA. If rma_-
+ * flags includes SCIF_RMA_SYNC, then scif_vreadfrom() will return after the
+ * transfer is complete. Otherwise, the transfer may be performed asynchron-
+ * ously. The order in which any two asynchronous RMA operations complete
+ * is non-deterministic. The synchronization functions, scif_fence_mark()/
+ * scif_fence_wait() and scif_fence_signal(), can be used to synchronize to
+ * the completion of asynchronous RMA operations on the same endpoint.
+ *
+ * The DMA transfer of individual bytes is not guaranteed to complete in
+ * address order. If rma_flags includes SCIF_RMA_ORDERED, then the last
+ * cacheline or partial cacheline of the source range will become visible on
+ * the destination node after all other transferred data in the source
+ * range has become visible on the destination node.
+ *
+ * If rma_flags includes SCIF_RMA_USECACHE, then the physical pages which back
+ * the specified local memory range may be remain in a pinned state even after
+ * the specified transfer completes. This may reduce overhead if some or all of
+ * the same virtual address range is referenced in a subsequent call of
+ * scif_vreadfrom() or scif_vwriteto().
+ *
+ * The optimal DMA performance will likely be realized if both
+ * addr and roffset are cacheline aligned (are a multiple of 64). Lower
+ * performance will likely be realized if addr and roffset are not
+ * cacheline aligned but are separated by some multiple of 64. The lowest level
+ * of performance is likely if addr and roffset are not separated by a
+ * multiple of 64.
+ *
+ * The rma_flags argument is formed by ORing together zero or more of the
+ * following values.
+ * SCIF_RMA_USECPU - perform the transfer using the CPU, otherwise use the DMA
+ * engine.
+ * SCIF_RMA_USECACHE - enable registration caching
+ * SCIF_RMA_SYNC - perform the transfer synchronously, returning after the
+ * transfer has completed. Passing this flag results in the
+ * current implementation busy waiting and consuming CPU cycles
+ * while the DMA transfer is in progress for best performance by
+ * avoiding the interrupt latency.
+ * SCIF_RMA_ORDERED - ensure that the last cacheline or partial cacheline of
+ * the source range becomes visible on the destination node
+ * after all other transferred data in the source range has
+ * become visible on the destination
+ *
+ * Return:
+ * Upon successful completion, scif_vreadfrom() returns 0; otherwise in user
+ * mode -1 is returned and errno is set to indicate the error; in kernel mode
+ * the negative of one of the following errors is returned.
+ *
+ * Errors:
+ * EACCESS - Attempt to write to a read-only range
+ * EBADF, ENOTTY - epd is not a valid endpoint descriptor
+ * ECONNRESET - Connection reset by peer
+ * EFAULT - Addresses in the range [addr, addr + len - 1] are invalid
+ * EINVAL - rma_flags is invalid
+ * ENODEV - The remote node is lost or existed, but is not currently in the
+ * network since it may have crashed
+ * ENOTCONN - The endpoint is not connected
+ * ENXIO - Offsets in the range [roffset, roffset + len - 1] are invalid for the
+ * registered address space of epd
+ */
+int scif_vreadfrom(scif_epd_t epd, void *addr, size_t len, off_t roffset,
+ int rma_flags);
+
+/**
+ * scif_vwriteto() - Copy to a remote address space
+ * @epd: endpoint descriptor
+ * @addr: address from which to copy
+ * @len: length of range to copy
+ * @roffset: offset in remote registered address space to
+ * which to copy
+ * @rma_flags: transfer mode flags
+ *
+ * scif_vwriteto() copies len bytes from the local memory, starting at addr, to
+ * the remote registered address space of the peer of endpoint epd, starting at
+ * the offset roffset.
+ *
+ * The specified range [roffset, roffset + len - 1] must be within some
+ * registered window or windows of the remote nodes. The range may intersect
+ * multiple registered windows, but only if those windows are contiguous in the
+ * registered address space.
+ *
+ * If rma_flags includes SCIF_RMA_USECPU, then the data is copied using
+ * programmed read/writes. Otherwise the data is copied using DMA. If rma_-
+ * flags includes SCIF_RMA_SYNC, then scif_vwriteto() will return after the
+ * transfer is complete. Otherwise, the transfer may be performed asynchron-
+ * ously. The order in which any two asynchronous RMA operations complete
+ * is non-deterministic. The synchronization functions, scif_fence_mark()/
+ * scif_fence_wait() and scif_fence_signal(), can be used to synchronize to
+ * the completion of asynchronous RMA operations on the same endpoint.
+ *
+ * The DMA transfer of individual bytes is not guaranteed to complete in
+ * address order. If rma_flags includes SCIF_RMA_ORDERED, then the last
+ * cacheline or partial cacheline of the source range will become visible on
+ * the destination node after all other transferred data in the source
+ * range has become visible on the destination node.
+ *
+ * If rma_flags includes SCIF_RMA_USECACHE, then the physical pages which back
+ * the specified local memory range may be remain in a pinned state even after
+ * the specified transfer completes. This may reduce overhead if some or all of
+ * the same virtual address range is referenced in a subsequent call of
+ * scif_vreadfrom() or scif_vwriteto().
+ *
+ * The optimal DMA performance will likely be realized if both
+ * addr and offset are cacheline aligned (are a multiple of 64). Lower
+ * performance will likely be realized if addr and offset are not cacheline
+ * aligned but are separated by some multiple of 64. The lowest level of
+ * performance is likely if addr and offset are not separated by a multiple of
+ * 64.
+ *
+ * The rma_flags argument is formed by ORing together zero or more of the
+ * following values.
+ * SCIF_RMA_USECPU - perform the transfer using the CPU, otherwise use the DMA
+ * engine.
+ * SCIF_RMA_USECACHE - allow registration caching
+ * SCIF_RMA_SYNC - perform the transfer synchronously, returning after the
+ * transfer has completed. Passing this flag results in the
+ * current implementation busy waiting and consuming CPU cycles
+ * while the DMA transfer is in progress for best performance by
+ * avoiding the interrupt latency.
+ * SCIF_RMA_ORDERED - ensure that the last cacheline or partial cacheline of
+ * the source range becomes visible on the destination node
+ * after all other transferred data in the source range has
+ * become visible on the destination
+ *
+ * Return:
+ * Upon successful completion, scif_vwriteto() returns 0; otherwise in user
+ * mode -1 is returned and errno is set to indicate the error; in kernel mode
+ * the negative of one of the following errors is returned.
+ *
+ * Errors:
+ * EACCESS - Attempt to write to a read-only range
+ * EBADF, ENOTTY - epd is not a valid endpoint descriptor
+ * ECONNRESET - Connection reset by peer
+ * EFAULT - Addresses in the range [addr, addr + len - 1] are invalid
+ * EINVAL - rma_flags is invalid
+ * ENODEV - The remote node is lost or existed, but is not currently in the
+ * network since it may have crashed
+ * ENOTCONN - The endpoint is not connected
+ * ENXIO - Offsets in the range [roffset, roffset + len - 1] are invalid for the
+ * registered address space of epd
+ */
+int scif_vwriteto(scif_epd_t epd, void *addr, size_t len, off_t roffset,
+ int rma_flags);
+
+/**
+ * scif_fence_mark() - Mark previously issued RMAs
+ * @epd: endpoint descriptor
+ * @flags: control flags
+ * @mark: marked value returned as output.
+ *
+ * scif_fence_mark() returns after marking the current set of all uncompleted
+ * RMAs initiated through the endpoint epd or the current set of all
+ * uncompleted RMAs initiated through the peer of endpoint epd. The RMAs are
+ * marked with a value returned at mark. The application may subsequently call
+ * scif_fence_wait(), passing the value returned at mark, to await completion
+ * of all RMAs so marked.
+ *
+ * The flags argument has exactly one of the following values.
+ * SCIF_FENCE_INIT_SELF - RMA operations initiated through endpoint
+ * epd are marked
+ * SCIF_FENCE_INIT_PEER - RMA operations initiated through the peer
+ * of endpoint epd are marked
+ *
+ * Return:
+ * Upon successful completion, scif_fence_mark() returns 0; otherwise in user
+ * mode -1 is returned and errno is set to indicate the error; in kernel mode
+ * the negative of one of the following errors is returned.
+ *
+ * Errors:
+ * EBADF, ENOTTY - epd is not a valid endpoint descriptor
+ * ECONNRESET - Connection reset by peer
+ * EINVAL - flags is invalid
+ * ENODEV - The remote node is lost or existed, but is not currently in the
+ * network since it may have crashed
+ * ENOTCONN - The endpoint is not connected
+ * ENOMEM - Insufficient kernel memory was available
+ */
+int scif_fence_mark(scif_epd_t epd, int flags, int *mark);
+
+/**
+ * scif_fence_wait() - Wait for completion of marked RMAs
+ * @epd: endpoint descriptor
+ * @mark: mark request
+ *
+ * scif_fence_wait() returns after all RMAs marked with mark have completed.
+ * The value passed in mark must have been obtained in a previous call to
+ * scif_fence_mark().
+ *
+ * Return:
+ * Upon successful completion, scif_fence_wait() returns 0; otherwise in user
+ * mode -1 is returned and errno is set to indicate the error; in kernel mode
+ * the negative of one of the following errors is returned.
+ *
+ * Errors:
+ * EBADF, ENOTTY - epd is not a valid endpoint descriptor
+ * ECONNRESET - Connection reset by peer
+ * ENODEV - The remote node is lost or existed, but is not currently in the
+ * network since it may have crashed
+ * ENOTCONN - The endpoint is not connected
+ * ENOMEM - Insufficient kernel memory was available
+ */
+int scif_fence_wait(scif_epd_t epd, int mark);
+
+/**
+ * scif_fence_signal() - Request a memory update on completion of RMAs
+ * @epd: endpoint descriptor
+ * @loff: local offset
+ * @lval: local value to write to loffset
+ * @roff: remote offset
+ * @rval: remote value to write to roffset
+ * @flags: flags
+ *
+ * scif_fence_signal() returns after marking the current set of all uncompleted
+ * RMAs initiated through the endpoint epd or marking the current set of all
+ * uncompleted RMAs initiated through the peer of endpoint epd.
+ *
+ * If flags includes SCIF_SIGNAL_LOCAL, then on completion of the RMAs in the
+ * marked set, lval is written to memory at the address corresponding to offset
+ * loff in the local registered address space of epd. loff must be within a
+ * registered window. If flags includes SCIF_SIGNAL_REMOTE, then on completion
+ * of the RMAs in the marked set, rval is written to memory at the address
+ * corresponding to offset roff in the remote registered address space of epd.
+ * roff must be within a remote registered window of the peer of epd. Note
+ * that any specified offset must be DWORD (4 byte / 32 bit) aligned.
+ *
+ * The flags argument is formed by OR'ing together the following.
+ * Exactly one of the following values.
+ * SCIF_FENCE_INIT_SELF - RMA operations initiated through endpoint
+ * epd are marked
+ * SCIF_FENCE_INIT_PEER - RMA operations initiated through the peer
+ * of endpoint epd are marked
+ * One or more of the following values.
+ * SCIF_SIGNAL_LOCAL - On completion of the marked set of RMAs, write lval to
+ * memory at the address corresponding to offset loff in the local
+ * registered address space of epd.
+ * SCIF_SIGNAL_REMOTE - On completion of the marked set of RMAs, write rval to
+ * memory at the address corresponding to offset roff in the remote
+ * registered address space of epd.
+ *
+ * Return:
+ * Upon successful completion, scif_fence_signal() returns 0; otherwise in
+ * user mode -1 is returned and errno is set to indicate the error; in kernel
+ * mode the negative of one of the following errors is returned.
+ *
+ * Errors:
+ * EBADF, ENOTTY - epd is not a valid endpoint descriptor
+ * ECONNRESET - Connection reset by peer
+ * EINVAL - flags is invalid, or loff or roff are not DWORD aligned
+ * ENODEV - The remote node is lost or existed, but is not currently in the
+ * network since it may have crashed
+ * ENOTCONN - The endpoint is not connected
+ * ENXIO - loff is invalid for the registered address of epd, or roff is invalid
+ * for the registered address space, of the peer of epd
+ */
+int scif_fence_signal(scif_epd_t epd, off_t loff, u64 lval, off_t roff,
+ u64 rval, int flags);
+
+/**
+ * scif_get_node_ids() - Return information about online nodes
+ * @nodes: array in which to return online node IDs
+ * @len: number of entries in the nodes array
+ * @self: address to place the node ID of the local node
+ *
+ * scif_get_node_ids() fills in the nodes array with up to len node IDs of the
+ * nodes in the SCIF network. If there is not enough space in nodes, as
+ * indicated by the len parameter, only len node IDs are returned in nodes. The
+ * return value of scif_get_node_ids() is the total number of nodes currently in
+ * the SCIF network. By checking the return value against the len parameter,
+ * the user may determine if enough space for nodes was allocated.
+ *
+ * The node ID of the local node is returned at self.
+ *
+ * Return:
+ * Upon successful completion, scif_get_node_ids() returns the actual number of
+ * online nodes in the SCIF network including 'self'; otherwise in user mode
+ * -1 is returned and errno is set to indicate the error; in kernel mode no
+ * errors are returned.
+ *
+ * Errors:
+ * EFAULT - Bad address
+ */
+int scif_get_node_ids(u16 *nodes, int len, u16 *self);
+
+#endif /* __SCIF_H__ */
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 4460e5820b0e..baa7c80da6af 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -352,6 +352,7 @@ header-y += rtc.h
header-y += rtnetlink.h
header-y += scc.h
header-y += sched.h
+header-y += scif_ioctl.h
header-y += screen_info.h
header-y += sctp.h
header-y += sdla.h
diff --git a/include/uapi/linux/hyperv.h b/include/uapi/linux/hyperv.h
index bb1cb73c927a..e4c0a35d6417 100644
--- a/include/uapi/linux/hyperv.h
+++ b/include/uapi/linux/hyperv.h
@@ -45,6 +45,11 @@
#define VSS_OP_REGISTER 128
+/*
+ Daemon code with full handshake support.
+ */
+#define VSS_OP_REGISTER1 129
+
enum hv_vss_op {
VSS_OP_CREATE = 0,
VSS_OP_DELETE,
@@ -100,7 +105,8 @@ struct hv_vss_msg {
*/
#define FCOPY_VERSION_0 0
-#define FCOPY_CURRENT_VERSION FCOPY_VERSION_0
+#define FCOPY_VERSION_1 1
+#define FCOPY_CURRENT_VERSION FCOPY_VERSION_1
#define W_MAX_PATH 260
enum hv_fcopy_op {
diff --git a/include/uapi/linux/mic_common.h b/include/uapi/linux/mic_common.h
index 6eb40244e019..302a2ced373c 100644
--- a/include/uapi/linux/mic_common.h
+++ b/include/uapi/linux/mic_common.h
@@ -80,6 +80,12 @@ struct mic_device_ctrl {
* @h2c_config_db: Host to Card Virtio config doorbell set by card
* @shutdown_status: Card shutdown status set by card
* @shutdown_card: Set to 1 by the host when a card shutdown is initiated
+ * @tot_nodes: Total number of nodes in the SCIF network
+ * @node_id: Unique id of the node
+ * @h2c_scif_db - Host to card SCIF doorbell set by card
+ * @c2h_scif_db - Card to host SCIF doorbell set by host
+ * @scif_host_dma_addr - SCIF host queue pair DMA address
+ * @scif_card_dma_addr - SCIF card queue pair DMA address
*/
struct mic_bootparam {
__le32 magic;
@@ -88,6 +94,12 @@ struct mic_bootparam {
__s8 h2c_config_db;
__u8 shutdown_status;
__u8 shutdown_card;
+ __u8 tot_nodes;
+ __u8 node_id;
+ __u8 h2c_scif_db;
+ __u8 c2h_scif_db;
+ __u64 scif_host_dma_addr;
+ __u64 scif_card_dma_addr;
} __attribute__ ((aligned(8)));
/**
diff --git a/include/uapi/linux/scif_ioctl.h b/include/uapi/linux/scif_ioctl.h
new file mode 100644
index 000000000000..4a94d917cf99
--- /dev/null
+++ b/include/uapi/linux/scif_ioctl.h
@@ -0,0 +1,130 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2014 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Intel SCIF driver.
+ *
+ */
+/*
+ * -----------------------------------------
+ * SCIF IOCTL interface information
+ * -----------------------------------------
+ */
+#ifndef SCIF_IOCTL_H
+#define SCIF_IOCTL_H
+
+#include <linux/types.h>
+
+/**
+ * struct scif_port_id - SCIF port information
+ * @node: node on which port resides
+ * @port: local port number
+ */
+struct scif_port_id {
+ __u16 node;
+ __u16 port;
+};
+
+/**
+ * struct scifioctl_connect - used for SCIF_CONNECT IOCTL
+ * @self: used to read back the assigned port_id
+ * @peer: destination node and port to connect to
+ */
+struct scifioctl_connect {
+ struct scif_port_id self;
+ struct scif_port_id peer;
+};
+
+/**
+ * struct scifioctl_accept - used for SCIF_ACCEPTREQ IOCTL
+ * @flags: flags
+ * @peer: global id of peer endpoint
+ * @endpt: new connected endpoint descriptor
+ */
+struct scifioctl_accept {
+ __s32 flags;
+ struct scif_port_id peer;
+ __u64 endpt;
+};
+
+/**
+ * struct scifioctl_msg - used for SCIF_SEND/SCIF_RECV IOCTL
+ * @msg: message buffer address
+ * @len: message length
+ * @flags: flags
+ * @out_len: number of bytes sent/received
+ */
+struct scifioctl_msg {
+ __u64 msg;
+ __s32 len;
+ __s32 flags;
+ __s32 out_len;
+};
+
+/**
+ * struct scifioctl_node_ids - used for SCIF_GET_NODEIDS IOCTL
+ * @nodes: pointer to an array of node_ids
+ * @self: ID of the current node
+ * @len: length of array
+ */
+struct scifioctl_node_ids {
+ __u64 nodes;
+ __u64 self;
+ __s32 len;
+};
+
+#define SCIF_BIND _IOWR('s', 1, __u64)
+#define SCIF_LISTEN _IOW('s', 2, __s32)
+#define SCIF_CONNECT _IOWR('s', 3, struct scifioctl_connect)
+#define SCIF_ACCEPTREQ _IOWR('s', 4, struct scifioctl_accept)
+#define SCIF_ACCEPTREG _IOWR('s', 5, __u64)
+#define SCIF_SEND _IOWR('s', 6, struct scifioctl_msg)
+#define SCIF_RECV _IOWR('s', 7, struct scifioctl_msg)
+#define SCIF_GET_NODEIDS _IOWR('s', 14, struct scifioctl_node_ids)
+
+#endif /* SCIF_IOCTL_H */
diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c
index 26cc6029b280..6d940c72b5fc 100644
--- a/lib/lz4/lz4_decompress.c
+++ b/lib/lz4/lz4_decompress.c
@@ -140,8 +140,12 @@ static int lz4_uncompress(const char *source, char *dest, int osize)
/* Error: request to write beyond destination buffer */
if (cpy > oend)
goto _output_error;
+#if LZ4_ARCH64
+ if ((ref + COPYLENGTH) > oend)
+#else
if ((ref + COPYLENGTH) > oend ||
(op + COPYLENGTH) > oend)
+#endif
goto _output_error;
LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH));
while (op < cpy)
@@ -266,7 +270,13 @@ static int lz4_uncompress_unknownoutputsize(const char *source, char *dest,
if (cpy > oend - COPYLENGTH) {
if (cpy > oend)
goto _output_error; /* write outside of buf */
-
+#if LZ4_ARCH64
+ if ((ref + COPYLENGTH) > oend)
+#else
+ if ((ref + COPYLENGTH) > oend ||
+ (op + COPYLENGTH) > oend)
+#endif
+ goto _output_error;
LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH));
while (op < cpy)
*op++ = *ref++;
diff --git a/scripts/checkkconfigsymbols.py b/scripts/checkkconfigsymbols.py
index 74086a583d8d..c89fdcaf06e8 100755
--- a/scripts/checkkconfigsymbols.py
+++ b/scripts/checkkconfigsymbols.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python2
"""Find Kconfig symbols that are referenced but not defined."""
@@ -58,6 +58,12 @@ def parse_options():
"input format bases on Git log's "
"\'commmit1..commit2\'.")
+ parser.add_option('-i', '--ignore', dest='ignore', action='store',
+ default="",
+ help="Ignore files matching this pattern. Note that "
+ "the pattern needs to be a Python regex. To "
+ "ignore defconfigs, specify -i '.*defconfig'.")
+
parser.add_option('', '--force', dest='force', action='store_true',
default=False,
help="Reset current Git tree even when it's dirty.")
@@ -80,6 +86,12 @@ def parse_options():
"'--force' if you\nwant to ignore this warning and "
"continue.")
+ if opts.ignore:
+ try:
+ re.match(opts.ignore, "this/is/just/a/test.c")
+ except:
+ sys.exit("Please specify a valid Python regex.")
+
return opts
@@ -105,11 +117,11 @@ def main():
# get undefined items before the commit
execute("git reset --hard %s" % commit_a)
- undefined_a = check_symbols()
+ undefined_a = check_symbols(opts.ignore)
# get undefined items for the commit
execute("git reset --hard %s" % commit_b)
- undefined_b = check_symbols()
+ undefined_b = check_symbols(opts.ignore)
# report cases that are present for the commit but not before
for feature in sorted(undefined_b):
@@ -129,7 +141,7 @@ def main():
# default to check the entire tree
else:
- undefined = check_symbols()
+ undefined = check_symbols(opts.ignore)
for feature in sorted(undefined):
files = sorted(undefined.get(feature))
print "%s\t%s" % (feature, ", ".join(files))
@@ -160,9 +172,10 @@ def get_head():
return stdout.strip('\n')
-def check_symbols():
+def check_symbols(ignore):
"""Find undefined Kconfig symbols and return a dict with the symbol as key
- and a list of referencing files as value."""
+ and a list of referencing files as value. Files matching %ignore are not
+ checked for undefined symbols."""
source_files = []
kconfig_files = []
defined_features = set()
@@ -185,10 +198,17 @@ def check_symbols():
source_files.append(gitfile)
for sfile in source_files:
+ if ignore and re.match(ignore, sfile):
+ # do not check files matching %ignore
+ continue
parse_source_file(sfile, referenced_features)
for kfile in kconfig_files:
- parse_kconfig_file(kfile, defined_features, referenced_features)
+ if ignore and re.match(ignore, kfile):
+ # do not collect references for files matching %ignore
+ parse_kconfig_file(kfile, defined_features, dict())
+ else:
+ parse_kconfig_file(kfile, defined_features, referenced_features)
undefined = {} # {feature: [files]}
for feature in sorted(referenced_features):
diff --git a/scripts/mod/devicetable-offsets.c b/scripts/mod/devicetable-offsets.c
index fce36d0f6898..091f6290a651 100644
--- a/scripts/mod/devicetable-offsets.c
+++ b/scripts/mod/devicetable-offsets.c
@@ -182,6 +182,7 @@ int main(void)
DEVID(mei_cl_device_id);
DEVID_FIELD(mei_cl_device_id, name);
+ DEVID_FIELD(mei_cl_device_id, uuid);
DEVID(rio_device_id);
DEVID_FIELD(rio_device_id, did);
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
index 78691d51a479..718b2a29bd43 100644
--- a/scripts/mod/file2alias.c
+++ b/scripts/mod/file2alias.c
@@ -34,6 +34,9 @@ typedef Elf64_Addr kernel_ulong_t;
typedef uint32_t __u32;
typedef uint16_t __u16;
typedef unsigned char __u8;
+typedef struct {
+ __u8 b[16];
+} uuid_le;
/* Big exception to the "don't include kernel headers into userspace, which
* even potentially has different endianness and word sizes, since
@@ -131,6 +134,15 @@ static inline void add_wildcard(char *str)
strcat(str + len, "*");
}
+static inline void add_uuid(char *str, uuid_le uuid)
+{
+ int len = strlen(str);
+ int i;
+
+ for (i = 0; i < 16; i++)
+ sprintf(str + len + (i << 1), "%02x", uuid.b[i]);
+}
+
/**
* Check that sizeof(device_id type) are consistent with size of section
* in .o file. If in-consistent then userspace and kernel does not agree
@@ -1160,13 +1172,18 @@ static int do_cpu_entry(const char *filename, void *symval, char *alias)
}
ADD_TO_DEVTABLE("cpu", cpu_feature, do_cpu_entry);
-/* Looks like: mei:S */
+/* Looks like: mei:S:uuid */
static int do_mei_entry(const char *filename, void *symval,
char *alias)
{
DEF_FIELD_ADDR(symval, mei_cl_device_id, name);
+ DEF_FIELD_ADDR(symval, mei_cl_device_id, uuid);
+
+ sprintf(alias, MEI_CL_MODULE_PREFIX);
+ sprintf(alias + strlen(alias), "%s:", (*name)[0] ? *name : "*");
+ add_uuid(alias, *uuid);
- sprintf(alias, MEI_CL_MODULE_PREFIX "%s", *name);
+ strcat(alias, ":*");
return 1;
}
diff --git a/tools/hv/hv_fcopy_daemon.c b/tools/hv/hv_fcopy_daemon.c
index 9445d8f264a4..5480e4e424eb 100644
--- a/tools/hv/hv_fcopy_daemon.c
+++ b/tools/hv/hv_fcopy_daemon.c
@@ -137,6 +137,8 @@ int main(int argc, char *argv[])
int version = FCOPY_CURRENT_VERSION;
char *buffer[4096 * 2];
struct hv_fcopy_hdr *in_msg;
+ int in_handshake = 1;
+ __u32 kernel_modver;
static struct option long_options[] = {
{"help", no_argument, 0, 'h' },
@@ -191,6 +193,19 @@ int main(int argc, char *argv[])
syslog(LOG_ERR, "pread failed: %s", strerror(errno));
exit(EXIT_FAILURE);
}
+
+ if (in_handshake) {
+ if (len != sizeof(kernel_modver)) {
+ syslog(LOG_ERR, "invalid version negotiation");
+ exit(EXIT_FAILURE);
+ }
+ kernel_modver = *(__u32 *)buffer;
+ in_handshake = 0;
+ syslog(LOG_INFO, "HV_FCOPY: kernel module version: %d",
+ kernel_modver);
+ continue;
+ }
+
in_msg = (struct hv_fcopy_hdr *)buffer;
switch (in_msg->operation) {
diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
index 408bb076a234..0d9f48ec42bb 100644
--- a/tools/hv/hv_kvp_daemon.c
+++ b/tools/hv/hv_kvp_daemon.c
@@ -33,7 +33,6 @@
#include <ctype.h>
#include <errno.h>
#include <arpa/inet.h>
-#include <linux/connector.h>
#include <linux/hyperv.h>
#include <linux/netlink.h>
#include <ifaddrs.h>
@@ -79,7 +78,6 @@ enum {
DNS
};
-static struct sockaddr_nl addr;
static int in_hand_shake = 1;
static char *os_name = "";
@@ -1387,34 +1385,6 @@ kvp_get_domain_name(char *buffer, int length)
freeaddrinfo(info);
}
-static int
-netlink_send(int fd, struct cn_msg *msg)
-{
- struct nlmsghdr nlh = { .nlmsg_type = NLMSG_DONE };
- unsigned int size;
- struct msghdr message;
- struct iovec iov[2];
-
- size = sizeof(struct cn_msg) + msg->len;
-
- nlh.nlmsg_pid = getpid();
- nlh.nlmsg_len = NLMSG_LENGTH(size);
-
- iov[0].iov_base = &nlh;
- iov[0].iov_len = sizeof(nlh);
-
- iov[1].iov_base = msg;
- iov[1].iov_len = size;
-
- memset(&message, 0, sizeof(message));
- message.msg_name = &addr;
- message.msg_namelen = sizeof(addr);
- message.msg_iov = iov;
- message.msg_iovlen = 2;
-
- return sendmsg(fd, &message, 0);
-}
-
void print_usage(char *argv[])
{
fprintf(stderr, "Usage: %s [options]\n"
@@ -1425,22 +1395,17 @@ void print_usage(char *argv[])
int main(int argc, char *argv[])
{
- int fd, len, nl_group;
+ int kvp_fd, len;
int error;
- struct cn_msg *message;
struct pollfd pfd;
- struct nlmsghdr *incoming_msg;
- struct cn_msg *incoming_cn_msg;
- struct hv_kvp_msg *hv_msg;
- char *p;
+ char *p;
+ struct hv_kvp_msg hv_msg[1];
char *key_value;
char *key_name;
int op;
int pool;
char *if_name;
struct hv_kvp_ipaddr_value *kvp_ip_val;
- char *kvp_recv_buffer;
- size_t kvp_recv_buffer_len;
int daemonize = 1, long_index = 0, opt;
static struct option long_options[] = {
@@ -1468,12 +1433,14 @@ int main(int argc, char *argv[])
openlog("KVP", 0, LOG_USER);
syslog(LOG_INFO, "KVP starting; pid is:%d", getpid());
- kvp_recv_buffer_len = NLMSG_LENGTH(0) + sizeof(struct cn_msg) + sizeof(struct hv_kvp_msg);
- kvp_recv_buffer = calloc(1, kvp_recv_buffer_len);
- if (!kvp_recv_buffer) {
- syslog(LOG_ERR, "Failed to allocate netlink buffer");
+ kvp_fd = open("/dev/vmbus/hv_kvp", O_RDWR);
+
+ if (kvp_fd < 0) {
+ syslog(LOG_ERR, "open /dev/vmbus/hv_kvp failed; error: %d %s",
+ errno, strerror(errno));
exit(EXIT_FAILURE);
}
+
/*
* Retrieve OS release information.
*/
@@ -1489,100 +1456,44 @@ int main(int argc, char *argv[])
exit(EXIT_FAILURE);
}
- fd = socket(AF_NETLINK, SOCK_DGRAM, NETLINK_CONNECTOR);
- if (fd < 0) {
- syslog(LOG_ERR, "netlink socket creation failed; error: %d %s", errno,
- strerror(errno));
- exit(EXIT_FAILURE);
- }
- addr.nl_family = AF_NETLINK;
- addr.nl_pad = 0;
- addr.nl_pid = 0;
- addr.nl_groups = 0;
-
-
- error = bind(fd, (struct sockaddr *)&addr, sizeof(addr));
- if (error < 0) {
- syslog(LOG_ERR, "bind failed; error: %d %s", errno, strerror(errno));
- close(fd);
- exit(EXIT_FAILURE);
- }
- nl_group = CN_KVP_IDX;
-
- if (setsockopt(fd, SOL_NETLINK, NETLINK_ADD_MEMBERSHIP, &nl_group, sizeof(nl_group)) < 0) {
- syslog(LOG_ERR, "setsockopt failed; error: %d %s", errno, strerror(errno));
- close(fd);
- exit(EXIT_FAILURE);
- }
-
/*
* Register ourselves with the kernel.
*/
- message = (struct cn_msg *)kvp_recv_buffer;
- message->id.idx = CN_KVP_IDX;
- message->id.val = CN_KVP_VAL;
-
- hv_msg = (struct hv_kvp_msg *)message->data;
hv_msg->kvp_hdr.operation = KVP_OP_REGISTER1;
- message->ack = 0;
- message->len = sizeof(struct hv_kvp_msg);
-
- len = netlink_send(fd, message);
- if (len < 0) {
- syslog(LOG_ERR, "netlink_send failed; error: %d %s", errno, strerror(errno));
- close(fd);
+ len = write(kvp_fd, hv_msg, sizeof(struct hv_kvp_msg));
+ if (len != sizeof(struct hv_kvp_msg)) {
+ syslog(LOG_ERR, "registration to kernel failed; error: %d %s",
+ errno, strerror(errno));
+ close(kvp_fd);
exit(EXIT_FAILURE);
}
- pfd.fd = fd;
+ pfd.fd = kvp_fd;
while (1) {
- struct sockaddr *addr_p = (struct sockaddr *) &addr;
- socklen_t addr_l = sizeof(addr);
pfd.events = POLLIN;
pfd.revents = 0;
if (poll(&pfd, 1, -1) < 0) {
syslog(LOG_ERR, "poll failed; error: %d %s", errno, strerror(errno));
if (errno == EINVAL) {
- close(fd);
+ close(kvp_fd);
exit(EXIT_FAILURE);
}
else
continue;
}
- len = recvfrom(fd, kvp_recv_buffer, kvp_recv_buffer_len, 0,
- addr_p, &addr_l);
-
- if (len < 0) {
- int saved_errno = errno;
- syslog(LOG_ERR, "recvfrom failed; pid:%u error:%d %s",
- addr.nl_pid, errno, strerror(errno));
+ len = read(kvp_fd, hv_msg, sizeof(struct hv_kvp_msg));
- if (saved_errno == ENOBUFS) {
- syslog(LOG_ERR, "receive error: ignored");
- continue;
- }
+ if (len != sizeof(struct hv_kvp_msg)) {
+ syslog(LOG_ERR, "read failed; error:%d %s",
+ errno, strerror(errno));
- close(fd);
- return -1;
+ close(kvp_fd);
+ return EXIT_FAILURE;
}
- if (addr.nl_pid) {
- syslog(LOG_WARNING, "Received packet from untrusted pid:%u",
- addr.nl_pid);
- continue;
- }
-
- incoming_msg = (struct nlmsghdr *)kvp_recv_buffer;
-
- if (incoming_msg->nlmsg_type != NLMSG_DONE)
- continue;
-
- incoming_cn_msg = (struct cn_msg *)NLMSG_DATA(incoming_msg);
- hv_msg = (struct hv_kvp_msg *)incoming_cn_msg->data;
-
/*
* We will use the KVP header information to pass back
* the error from this daemon. So, first copy the state
@@ -1603,7 +1514,7 @@ int main(int argc, char *argv[])
if (lic_version) {
strcpy(lic_version, p);
syslog(LOG_INFO, "KVP LIC Version: %s",
- lic_version);
+ lic_version);
} else {
syslog(LOG_ERR, "malloc failed");
}
@@ -1702,7 +1613,6 @@ int main(int argc, char *argv[])
goto kvp_done;
}
- hv_msg = (struct hv_kvp_msg *)incoming_cn_msg->data;
key_name = (char *)hv_msg->body.kvp_enum_data.data.key;
key_value = (char *)hv_msg->body.kvp_enum_data.data.value;
@@ -1753,31 +1663,17 @@ int main(int argc, char *argv[])
hv_msg->error = HV_S_CONT;
break;
}
- /*
- * Send the value back to the kernel. The response is
- * already in the receive buffer. Update the cn_msg header to
- * reflect the key value that has been added to the message
- */
-kvp_done:
-
- incoming_cn_msg->id.idx = CN_KVP_IDX;
- incoming_cn_msg->id.val = CN_KVP_VAL;
- incoming_cn_msg->ack = 0;
- incoming_cn_msg->len = sizeof(struct hv_kvp_msg);
-
- len = netlink_send(fd, incoming_cn_msg);
- if (len < 0) {
- int saved_errno = errno;
- syslog(LOG_ERR, "net_link send failed; error: %d %s", errno,
- strerror(errno));
-
- if (saved_errno == ENOMEM || saved_errno == ENOBUFS) {
- syslog(LOG_ERR, "send error: ignored");
- continue;
- }
+ /* Send the value back to the kernel. */
+kvp_done:
+ len = write(kvp_fd, hv_msg, sizeof(struct hv_kvp_msg));
+ if (len != sizeof(struct hv_kvp_msg)) {
+ syslog(LOG_ERR, "write failed; error: %d %s", errno,
+ strerror(errno));
exit(EXIT_FAILURE);
}
}
+ close(kvp_fd);
+ exit(0);
}
diff --git a/tools/hv/hv_vss_daemon.c b/tools/hv/hv_vss_daemon.c
index 506dd0148828..96234b638249 100644
--- a/tools/hv/hv_vss_daemon.c
+++ b/tools/hv/hv_vss_daemon.c
@@ -19,7 +19,6 @@
#include <sys/types.h>
-#include <sys/socket.h>
#include <sys/poll.h>
#include <sys/ioctl.h>
#include <fcntl.h>
@@ -30,21 +29,11 @@
#include <string.h>
#include <ctype.h>
#include <errno.h>
-#include <arpa/inet.h>
#include <linux/fs.h>
-#include <linux/connector.h>
#include <linux/hyperv.h>
-#include <linux/netlink.h>
#include <syslog.h>
#include <getopt.h>
-static struct sockaddr_nl addr;
-
-#ifndef SOL_NETLINK
-#define SOL_NETLINK 270
-#endif
-
-
/* Don't use syslog() in the function since that can cause write to disk */
static int vss_do_freeze(char *dir, unsigned int cmd)
{
@@ -143,33 +132,6 @@ out:
return error;
}
-static int netlink_send(int fd, struct cn_msg *msg)
-{
- struct nlmsghdr nlh = { .nlmsg_type = NLMSG_DONE };
- unsigned int size;
- struct msghdr message;
- struct iovec iov[2];
-
- size = sizeof(struct cn_msg) + msg->len;
-
- nlh.nlmsg_pid = getpid();
- nlh.nlmsg_len = NLMSG_LENGTH(size);
-
- iov[0].iov_base = &nlh;
- iov[0].iov_len = sizeof(nlh);
-
- iov[1].iov_base = msg;
- iov[1].iov_len = size;
-
- memset(&message, 0, sizeof(message));
- message.msg_name = &addr;
- message.msg_namelen = sizeof(addr);
- message.msg_iov = iov;
- message.msg_iovlen = 2;
-
- return sendmsg(fd, &message, 0);
-}
-
void print_usage(char *argv[])
{
fprintf(stderr, "Usage: %s [options]\n"
@@ -180,17 +142,14 @@ void print_usage(char *argv[])
int main(int argc, char *argv[])
{
- int fd, len, nl_group;
+ int vss_fd, len;
int error;
- struct cn_msg *message;
struct pollfd pfd;
- struct nlmsghdr *incoming_msg;
- struct cn_msg *incoming_cn_msg;
int op;
- struct hv_vss_msg *vss_msg;
- char *vss_recv_buffer;
- size_t vss_recv_buffer_len;
+ struct hv_vss_msg vss_msg[1];
int daemonize = 1, long_index = 0, opt;
+ int in_handshake = 1;
+ __u32 kernel_modver;
static struct option long_options[] = {
{"help", no_argument, 0, 'h' },
@@ -217,98 +176,62 @@ int main(int argc, char *argv[])
openlog("Hyper-V VSS", 0, LOG_USER);
syslog(LOG_INFO, "VSS starting; pid is:%d", getpid());
- vss_recv_buffer_len = NLMSG_LENGTH(0) + sizeof(struct cn_msg) + sizeof(struct hv_vss_msg);
- vss_recv_buffer = calloc(1, vss_recv_buffer_len);
- if (!vss_recv_buffer) {
- syslog(LOG_ERR, "Failed to allocate netlink buffers");
- exit(EXIT_FAILURE);
- }
-
- fd = socket(AF_NETLINK, SOCK_DGRAM, NETLINK_CONNECTOR);
- if (fd < 0) {
- syslog(LOG_ERR, "netlink socket creation failed; error:%d %s",
- errno, strerror(errno));
- exit(EXIT_FAILURE);
- }
- addr.nl_family = AF_NETLINK;
- addr.nl_pad = 0;
- addr.nl_pid = 0;
- addr.nl_groups = 0;
-
-
- error = bind(fd, (struct sockaddr *)&addr, sizeof(addr));
- if (error < 0) {
- syslog(LOG_ERR, "bind failed; error:%d %s", errno, strerror(errno));
- close(fd);
- exit(EXIT_FAILURE);
- }
- nl_group = CN_VSS_IDX;
- if (setsockopt(fd, SOL_NETLINK, NETLINK_ADD_MEMBERSHIP, &nl_group, sizeof(nl_group)) < 0) {
- syslog(LOG_ERR, "setsockopt failed; error:%d %s", errno, strerror(errno));
- close(fd);
+ vss_fd = open("/dev/vmbus/hv_vss", O_RDWR);
+ if (vss_fd < 0) {
+ syslog(LOG_ERR, "open /dev/vmbus/hv_vss failed; error: %d %s",
+ errno, strerror(errno));
exit(EXIT_FAILURE);
}
/*
* Register ourselves with the kernel.
*/
- message = (struct cn_msg *)vss_recv_buffer;
- message->id.idx = CN_VSS_IDX;
- message->id.val = CN_VSS_VAL;
- message->ack = 0;
- vss_msg = (struct hv_vss_msg *)message->data;
- vss_msg->vss_hdr.operation = VSS_OP_REGISTER;
+ vss_msg->vss_hdr.operation = VSS_OP_REGISTER1;
- message->len = sizeof(struct hv_vss_msg);
-
- len = netlink_send(fd, message);
+ len = write(vss_fd, vss_msg, sizeof(struct hv_vss_msg));
if (len < 0) {
- syslog(LOG_ERR, "netlink_send failed; error:%d %s", errno, strerror(errno));
- close(fd);
+ syslog(LOG_ERR, "registration to kernel failed; error: %d %s",
+ errno, strerror(errno));
+ close(vss_fd);
exit(EXIT_FAILURE);
}
- pfd.fd = fd;
+ pfd.fd = vss_fd;
while (1) {
- struct sockaddr *addr_p = (struct sockaddr *) &addr;
- socklen_t addr_l = sizeof(addr);
pfd.events = POLLIN;
pfd.revents = 0;
if (poll(&pfd, 1, -1) < 0) {
syslog(LOG_ERR, "poll failed; error:%d %s", errno, strerror(errno));
if (errno == EINVAL) {
- close(fd);
+ close(vss_fd);
exit(EXIT_FAILURE);
}
else
continue;
}
- len = recvfrom(fd, vss_recv_buffer, vss_recv_buffer_len, 0,
- addr_p, &addr_l);
+ len = read(vss_fd, vss_msg, sizeof(struct hv_vss_msg));
- if (len < 0) {
- syslog(LOG_ERR, "recvfrom failed; pid:%u error:%d %s",
- addr.nl_pid, errno, strerror(errno));
- close(fd);
- return -1;
- }
-
- if (addr.nl_pid) {
- syslog(LOG_WARNING,
- "Received packet from untrusted pid:%u",
- addr.nl_pid);
+ if (in_handshake) {
+ if (len != sizeof(kernel_modver)) {
+ syslog(LOG_ERR, "invalid version negotiation");
+ exit(EXIT_FAILURE);
+ }
+ kernel_modver = *(__u32 *)vss_msg;
+ in_handshake = 0;
+ syslog(LOG_INFO, "VSS: kernel module version: %d",
+ kernel_modver);
continue;
}
- incoming_msg = (struct nlmsghdr *)vss_recv_buffer;
-
- if (incoming_msg->nlmsg_type != NLMSG_DONE)
- continue;
+ if (len != sizeof(struct hv_vss_msg)) {
+ syslog(LOG_ERR, "read failed; error:%d %s",
+ errno, strerror(errno));
+ close(vss_fd);
+ return EXIT_FAILURE;
+ }
- incoming_cn_msg = (struct cn_msg *)NLMSG_DATA(incoming_msg);
- vss_msg = (struct hv_vss_msg *)incoming_cn_msg->data;
op = vss_msg->vss_hdr.operation;
error = HV_S_OK;
@@ -331,12 +254,14 @@ int main(int argc, char *argv[])
syslog(LOG_ERR, "Illegal op:%d\n", op);
}
vss_msg->error = error;
- len = netlink_send(fd, incoming_cn_msg);
- if (len < 0) {
- syslog(LOG_ERR, "net_link send failed; error:%d %s",
- errno, strerror(errno));
+ len = write(vss_fd, &error, sizeof(struct hv_vss_msg));
+ if (len != sizeof(struct hv_vss_msg)) {
+ syslog(LOG_ERR, "write failed; error: %d %s", errno,
+ strerror(errno));
exit(EXIT_FAILURE);
}
}
+ close(vss_fd);
+ exit(0);
}
OpenPOWER on IntegriCloud