diff options
Diffstat (limited to 'drivers/misc')
50 files changed, 12754 insertions, 2784 deletions
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index b151b7c1bd59..e83fdfe0c8ca 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -127,7 +127,7 @@ config PHANTOM config INTEL_MID_PTI tristate "Parallel Trace Interface for MIPI P1149.7 cJTAG standard" - depends on PCI + depends on PCI && TTY default n help The PTI (Parallel Trace Interface) driver directs @@ -192,7 +192,7 @@ config ICS932S401 config ATMEL_SSC tristate "Device driver for Atmel SSC peripheral" - depends on AVR32 || ARCH_AT91 + depends on HAS_IOMEM ---help--- This option enables device driver support for Atmel Synchronized Serial Communication peripheral (SSC). @@ -499,6 +499,17 @@ config USB_SWITCH_FSA9480 stereo and mono audio, video, microphone and UART data to use a common connector port. +config LATTICE_ECP3_CONFIG + tristate "Lattice ECP3 FPGA bitstream configuration via SPI" + depends on SPI && SYSFS + select FW_LOADER + default n + help + This option enables support for bitstream configuration (programming + or loading) of the Lattice ECP3 FPGA family via SPI. + + If unsure, say N. + source "drivers/misc/c2port/Kconfig" source "drivers/misc/eeprom/Kconfig" source "drivers/misc/cb710/Kconfig" @@ -507,4 +518,5 @@ source "drivers/misc/lis3lv02d/Kconfig" source "drivers/misc/carma/Kconfig" source "drivers/misc/altera-stapl/Kconfig" source "drivers/misc/mei/Kconfig" +source "drivers/misc/vmw_vmci/Kconfig" endmenu diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index 2129377c0de6..35a1463c72d9 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile @@ -49,3 +49,6 @@ obj-y += carma/ obj-$(CONFIG_USB_SWITCH_FSA9480) += fsa9480.o obj-$(CONFIG_ALTERA_STAPL) +=altera-stapl/ obj-$(CONFIG_INTEL_MEI) += mei/ +obj-$(CONFIG_MAX8997_MUIC) += max8997-muic.o +obj-$(CONFIG_VMWARE_VMCI) += vmw_vmci/ +obj-$(CONFIG_LATTICE_ECP3_CONFIG) += lattice-ecp3-config.o diff --git a/drivers/misc/atmel-ssc.c b/drivers/misc/atmel-ssc.c index 3c09cbb70b1d..c09c28f92055 100644 --- a/drivers/misc/atmel-ssc.c +++ b/drivers/misc/atmel-ssc.c @@ -159,11 +159,9 @@ static int ssc_probe(struct platform_device *pdev) return -ENXIO; } - ssc->regs = devm_request_and_ioremap(&pdev->dev, regs); - if (!ssc->regs) { - dev_dbg(&pdev->dev, "ioremap failed\n"); - return -EINVAL; - } + ssc->regs = devm_ioremap_resource(&pdev->dev, regs); + if (IS_ERR(ssc->regs)) + return PTR_ERR(ssc->regs); ssc->phybase = regs->start; @@ -175,7 +173,7 @@ static int ssc_probe(struct platform_device *pdev) /* disable all interrupts */ clk_enable(ssc->clk); - ssc_writel(ssc->regs, IDR, ~0UL); + ssc_writel(ssc->regs, IDR, -1); ssc_readl(ssc->regs, SR); clk_disable(ssc->clk); diff --git a/drivers/misc/cb710/Kconfig b/drivers/misc/cb710/Kconfig index 22429b8b1068..5acb9c5b49c4 100644 --- a/drivers/misc/cb710/Kconfig +++ b/drivers/misc/cb710/Kconfig @@ -1,6 +1,6 @@ config CB710_CORE tristate "ENE CB710/720 Flash memory card reader support" - depends on PCI + depends on PCI && GENERIC_HARDIRQS help This option enables support for PCI ENE CB710/720 Flash memory card reader found in some laptops (ie. some versions of HP Compaq nx9500). diff --git a/drivers/misc/lattice-ecp3-config.c b/drivers/misc/lattice-ecp3-config.c new file mode 100644 index 000000000000..155700bfd2b6 --- /dev/null +++ b/drivers/misc/lattice-ecp3-config.c @@ -0,0 +1,243 @@ +/* + * Copyright (C) 2012 Stefan Roese <sr@denx.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/device.h> +#include <linux/firmware.h> +#include <linux/module.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/spi/spi.h> +#include <linux/platform_device.h> +#include <linux/delay.h> + +#define FIRMWARE_NAME "lattice-ecp3.bit" + +/* + * The JTAG ID's of the supported FPGA's. The ID is 32bit wide + * reversed as noted in the manual. + */ +#define ID_ECP3_17 0xc2088080 +#define ID_ECP3_35 0xc2048080 + +/* FPGA commands */ +#define FPGA_CMD_READ_ID 0x07 /* plus 24 bits */ +#define FPGA_CMD_READ_STATUS 0x09 /* plus 24 bits */ +#define FPGA_CMD_CLEAR 0x70 +#define FPGA_CMD_REFRESH 0x71 +#define FPGA_CMD_WRITE_EN 0x4a /* plus 2 bits */ +#define FPGA_CMD_WRITE_DIS 0x4f /* plus 8 bits */ +#define FPGA_CMD_WRITE_INC 0x41 /* plus 0 bits */ + +/* + * The status register is 32bit revered, DONE is bit 17 from the TN1222.pdf + * (LatticeECP3 Slave SPI Port User's Guide) + */ +#define FPGA_STATUS_DONE 0x00004000 +#define FPGA_STATUS_CLEARED 0x00010000 + +#define FPGA_CLEAR_TIMEOUT 5000 /* max. 5000ms for FPGA clear */ +#define FPGA_CLEAR_MSLEEP 10 +#define FPGA_CLEAR_LOOP_COUNT (FPGA_CLEAR_TIMEOUT / FPGA_CLEAR_MSLEEP) + +struct fpga_data { + struct completion fw_loaded; +}; + +struct ecp3_dev { + u32 jedec_id; + char *name; +}; + +static const struct ecp3_dev ecp3_dev[] = { + { + .jedec_id = ID_ECP3_17, + .name = "Lattice ECP3-17", + }, + { + .jedec_id = ID_ECP3_35, + .name = "Lattice ECP3-35", + }, +}; + +static void firmware_load(const struct firmware *fw, void *context) +{ + struct spi_device *spi = (struct spi_device *)context; + struct fpga_data *data = dev_get_drvdata(&spi->dev); + u8 *buffer; + int ret; + u8 txbuf[8]; + u8 rxbuf[8]; + int rx_len = 8; + int i; + u32 jedec_id; + u32 status; + + if (fw->size == 0) { + dev_err(&spi->dev, "Error: Firmware size is 0!\n"); + return; + } + + /* Fill dummy data (24 stuffing bits for commands) */ + txbuf[1] = 0x00; + txbuf[2] = 0x00; + txbuf[3] = 0x00; + + /* Trying to speak with the FPGA via SPI... */ + txbuf[0] = FPGA_CMD_READ_ID; + ret = spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len); + dev_dbg(&spi->dev, "FPGA JTAG ID=%08x\n", *(u32 *)&rxbuf[4]); + jedec_id = *(u32 *)&rxbuf[4]; + + for (i = 0; i < ARRAY_SIZE(ecp3_dev); i++) { + if (jedec_id == ecp3_dev[i].jedec_id) + break; + } + if (i == ARRAY_SIZE(ecp3_dev)) { + dev_err(&spi->dev, + "Error: No supported FPGA detected (JEDEC_ID=%08x)!\n", + jedec_id); + return; + } + + dev_info(&spi->dev, "FPGA %s detected\n", ecp3_dev[i].name); + + txbuf[0] = FPGA_CMD_READ_STATUS; + ret = spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len); + dev_dbg(&spi->dev, "FPGA Status=%08x\n", *(u32 *)&rxbuf[4]); + + buffer = kzalloc(fw->size + 8, GFP_KERNEL); + if (!buffer) { + dev_err(&spi->dev, "Error: Can't allocate memory!\n"); + return; + } + + /* + * Insert WRITE_INC command into stream (one SPI frame) + */ + buffer[0] = FPGA_CMD_WRITE_INC; + buffer[1] = 0xff; + buffer[2] = 0xff; + buffer[3] = 0xff; + memcpy(buffer + 4, fw->data, fw->size); + + txbuf[0] = FPGA_CMD_REFRESH; + ret = spi_write(spi, txbuf, 4); + + txbuf[0] = FPGA_CMD_WRITE_EN; + ret = spi_write(spi, txbuf, 4); + + txbuf[0] = FPGA_CMD_CLEAR; + ret = spi_write(spi, txbuf, 4); + + /* + * Wait for FPGA memory to become cleared + */ + for (i = 0; i < FPGA_CLEAR_LOOP_COUNT; i++) { + txbuf[0] = FPGA_CMD_READ_STATUS; + ret = spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len); + status = *(u32 *)&rxbuf[4]; + if (status == FPGA_STATUS_CLEARED) + break; + + msleep(FPGA_CLEAR_MSLEEP); + } + + if (i == FPGA_CLEAR_LOOP_COUNT) { + dev_err(&spi->dev, + "Error: Timeout waiting for FPGA to clear (status=%08x)!\n", + status); + kfree(buffer); + return; + } + + dev_info(&spi->dev, "Configuring the FPGA...\n"); + ret = spi_write(spi, buffer, fw->size + 8); + + txbuf[0] = FPGA_CMD_WRITE_DIS; + ret = spi_write(spi, txbuf, 4); + + txbuf[0] = FPGA_CMD_READ_STATUS; + ret = spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len); + dev_dbg(&spi->dev, "FPGA Status=%08x\n", *(u32 *)&rxbuf[4]); + status = *(u32 *)&rxbuf[4]; + + /* Check result */ + if (status & FPGA_STATUS_DONE) + dev_info(&spi->dev, "FPGA succesfully configured!\n"); + else + dev_info(&spi->dev, "FPGA not configured (DONE not set)\n"); + + /* + * Don't forget to release the firmware again + */ + release_firmware(fw); + + kfree(buffer); + + complete(&data->fw_loaded); +} + +static int lattice_ecp3_probe(struct spi_device *spi) +{ + struct fpga_data *data; + int err; + + data = devm_kzalloc(&spi->dev, sizeof(*data), GFP_KERNEL); + if (!data) { + dev_err(&spi->dev, "Memory allocation for fpga_data failed\n"); + return -ENOMEM; + } + spi_set_drvdata(spi, data); + + init_completion(&data->fw_loaded); + err = request_firmware_nowait(THIS_MODULE, FW_ACTION_NOHOTPLUG, + FIRMWARE_NAME, &spi->dev, + GFP_KERNEL, spi, firmware_load); + if (err) { + dev_err(&spi->dev, "Firmware loading failed with %d!\n", err); + return err; + } + + dev_info(&spi->dev, "FPGA bitstream configuration driver registered\n"); + + return 0; +} + +static int lattice_ecp3_remove(struct spi_device *spi) +{ + struct fpga_data *data = spi_get_drvdata(spi); + + wait_for_completion(&data->fw_loaded); + + return 0; +} + +static const struct spi_device_id lattice_ecp3_id[] = { + { "ecp3-17", 0 }, + { "ecp3-35", 0 }, + { } +}; +MODULE_DEVICE_TABLE(spi, lattice_ecp3_id); + +static struct spi_driver lattice_ecp3_driver = { + .driver = { + .name = "lattice-ecp3", + .owner = THIS_MODULE, + }, + .probe = lattice_ecp3_probe, + .remove = lattice_ecp3_remove, + .id_table = lattice_ecp3_id, +}; + +module_spi_driver(lattice_ecp3_driver); + +MODULE_AUTHOR("Stefan Roese <sr@denx.de>"); +MODULE_DESCRIPTION("Lattice ECP3 FPGA configuration via SPI"); +MODULE_LICENSE("GPL"); diff --git a/drivers/misc/mei/Kconfig b/drivers/misc/mei/Kconfig index 5a79ccde2fdf..d21b4d006a55 100644 --- a/drivers/misc/mei/Kconfig +++ b/drivers/misc/mei/Kconfig @@ -1,11 +1,22 @@ config INTEL_MEI - tristate "Intel Management Engine Interface (Intel MEI)" + tristate "Intel Management Engine Interface" depends on X86 && PCI && WATCHDOG_CORE help The Intel Management Engine (Intel ME) provides Manageability, Security and Media services for system containing Intel chipsets. if selected /dev/mei misc device will be created. + For more information see + <http://software.intel.com/en-us/manageability/> + +config INTEL_MEI_ME + bool "ME Enabled Intel Chipsets" + depends on INTEL_MEI + depends on X86 && PCI && WATCHDOG_CORE + default y + help + MEI support for ME Enabled Intel chipsets. + Supported Chipsets are: 7 Series Chipset Family 6 Series Chipset Family @@ -24,5 +35,3 @@ config INTEL_MEI 82Q33 Express 82X38/X48 Express - For more information see - <http://software.intel.com/en-us/manageability/> diff --git a/drivers/misc/mei/Makefile b/drivers/misc/mei/Makefile index 0017842e166c..040af6c7b147 100644 --- a/drivers/misc/mei/Makefile +++ b/drivers/misc/mei/Makefile @@ -4,9 +4,11 @@ # obj-$(CONFIG_INTEL_MEI) += mei.o mei-objs := init.o +mei-objs += hbm.o mei-objs += interrupt.o -mei-objs += interface.o -mei-objs += iorw.o +mei-objs += client.o mei-objs += main.o mei-objs += amthif.o mei-objs += wd.o +mei-$(CONFIG_INTEL_MEI_ME) += pci-me.o +mei-$(CONFIG_INTEL_MEI_ME) += hw-me.o diff --git a/drivers/misc/mei/amthif.c b/drivers/misc/mei/amthif.c index e40ffd9502d1..c86d7e3839a4 100644 --- a/drivers/misc/mei/amthif.c +++ b/drivers/misc/mei/amthif.c @@ -31,15 +31,16 @@ #include <linux/jiffies.h> #include <linux/uaccess.h> +#include <linux/mei.h> #include "mei_dev.h" -#include "hw.h" -#include <linux/mei.h> -#include "interface.h" +#include "hbm.h" +#include "hw-me.h" +#include "client.h" -const uuid_le mei_amthi_guid = UUID_LE(0x12f80028, 0xb4b7, 0x4b2d, 0xac, - 0xa8, 0x46, 0xe0, 0xff, 0x65, - 0x81, 0x4c); +const uuid_le mei_amthif_guid = UUID_LE(0x12f80028, 0xb4b7, 0x4b2d, + 0xac, 0xa8, 0x46, 0xe0, + 0xff, 0x65, 0x81, 0x4c); /** * mei_amthif_reset_params - initializes mei device iamthif @@ -64,22 +65,24 @@ void mei_amthif_reset_params(struct mei_device *dev) * @dev: the device structure * */ -void mei_amthif_host_init(struct mei_device *dev) +int mei_amthif_host_init(struct mei_device *dev) { - int i; + struct mei_cl *cl = &dev->iamthif_cl; unsigned char *msg_buf; + int ret, i; + + dev->iamthif_state = MEI_IAMTHIF_IDLE; - mei_cl_init(&dev->iamthif_cl, dev); - dev->iamthif_cl.state = MEI_FILE_DISCONNECTED; + mei_cl_init(cl, dev); - /* find ME amthi client */ - i = mei_me_cl_link(dev, &dev->iamthif_cl, - &mei_amthi_guid, MEI_IAMTHIF_HOST_CLIENT_ID); + i = mei_me_cl_by_uuid(dev, &mei_amthif_guid); if (i < 0) { - dev_info(&dev->pdev->dev, "failed to find iamthif client.\n"); - return; + dev_info(&dev->pdev->dev, "amthif: failed to find the client\n"); + return -ENOENT; } + cl->me_client_id = dev->me_clients[i].client_id; + /* Assign iamthif_mtu to the value received from ME */ dev->iamthif_mtu = dev->me_clients[i].props.max_msg_length; @@ -93,19 +96,29 @@ void mei_amthif_host_init(struct mei_device *dev) msg_buf = kcalloc(dev->iamthif_mtu, sizeof(unsigned char), GFP_KERNEL); if (!msg_buf) { - dev_dbg(&dev->pdev->dev, "memory allocation for ME message buffer failed.\n"); - return; + dev_err(&dev->pdev->dev, "amthif: memory allocation for ME message buffer failed.\n"); + return -ENOMEM; } dev->iamthif_msg_buf = msg_buf; - if (mei_connect(dev, &dev->iamthif_cl)) { - dev_dbg(&dev->pdev->dev, "Failed to connect to AMTHI client\n"); - dev->iamthif_cl.state = MEI_FILE_DISCONNECTED; - dev->iamthif_cl.host_client_id = 0; + ret = mei_cl_link(cl, MEI_IAMTHIF_HOST_CLIENT_ID); + + if (ret < 0) { + dev_err(&dev->pdev->dev, "amthif: failed link client\n"); + return -ENOENT; + } + + cl->state = MEI_FILE_CONNECTING; + + if (mei_hbm_cl_connect_req(dev, cl)) { + dev_dbg(&dev->pdev->dev, "amthif: Failed to connect to ME client\n"); + cl->state = MEI_FILE_DISCONNECTED; + cl->host_client_id = 0; } else { - dev->iamthif_cl.timer_count = MEI_CONNECT_TIMEOUT; + cl->timer_count = MEI_CONNECT_TIMEOUT; } + return 0; } /** @@ -168,10 +181,10 @@ int mei_amthif_read(struct mei_device *dev, struct file *file, i = mei_me_cl_by_id(dev, dev->iamthif_cl.me_client_id); if (i < 0) { - dev_dbg(&dev->pdev->dev, "amthi client not found.\n"); + dev_dbg(&dev->pdev->dev, "amthif client not found.\n"); return -ENODEV; } - dev_dbg(&dev->pdev->dev, "checking amthi data\n"); + dev_dbg(&dev->pdev->dev, "checking amthif data\n"); cb = mei_amthif_find_read_list_entry(dev, file); /* Check for if we can block or not*/ @@ -179,7 +192,7 @@ int mei_amthif_read(struct mei_device *dev, struct file *file, return -EAGAIN; - dev_dbg(&dev->pdev->dev, "waiting for amthi data\n"); + dev_dbg(&dev->pdev->dev, "waiting for amthif data\n"); while (cb == NULL) { /* unlock the Mutex */ mutex_unlock(&dev->device_lock); @@ -197,17 +210,17 @@ int mei_amthif_read(struct mei_device *dev, struct file *file, } - dev_dbg(&dev->pdev->dev, "Got amthi data\n"); + dev_dbg(&dev->pdev->dev, "Got amthif data\n"); dev->iamthif_timer = 0; if (cb) { timeout = cb->read_time + mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER); - dev_dbg(&dev->pdev->dev, "amthi timeout = %lud\n", + dev_dbg(&dev->pdev->dev, "amthif timeout = %lud\n", timeout); if (time_after(jiffies, timeout)) { - dev_dbg(&dev->pdev->dev, "amthi Time out\n"); + dev_dbg(&dev->pdev->dev, "amthif Time out\n"); /* 15 sec for the message has expired */ list_del(&cb->list); rets = -ETIMEDOUT; @@ -227,9 +240,9 @@ int mei_amthif_read(struct mei_device *dev, struct file *file, * remove message from deletion list */ - dev_dbg(&dev->pdev->dev, "amthi cb->response_buffer size - %d\n", + dev_dbg(&dev->pdev->dev, "amthif cb->response_buffer size - %d\n", cb->response_buffer.size); - dev_dbg(&dev->pdev->dev, "amthi cb->buf_idx - %lu\n", cb->buf_idx); + dev_dbg(&dev->pdev->dev, "amthif cb->buf_idx - %lu\n", cb->buf_idx); /* length is being turncated to PAGE_SIZE, however, * the buf_idx may point beyond */ @@ -245,7 +258,7 @@ int mei_amthif_read(struct mei_device *dev, struct file *file, } } free: - dev_dbg(&dev->pdev->dev, "free amthi cb memory.\n"); + dev_dbg(&dev->pdev->dev, "free amthif cb memory.\n"); *offset = 0; mei_io_cb_free(cb); out: @@ -269,7 +282,7 @@ static int mei_amthif_send_cmd(struct mei_device *dev, struct mei_cl_cb *cb) if (!dev || !cb) return -ENODEV; - dev_dbg(&dev->pdev->dev, "write data to amthi client.\n"); + dev_dbg(&dev->pdev->dev, "write data to amthif client.\n"); dev->iamthif_state = MEI_IAMTHIF_WRITING; dev->iamthif_current_cb = cb; @@ -280,15 +293,15 @@ static int mei_amthif_send_cmd(struct mei_device *dev, struct mei_cl_cb *cb) memcpy(dev->iamthif_msg_buf, cb->request_buffer.data, cb->request_buffer.size); - ret = mei_flow_ctrl_creds(dev, &dev->iamthif_cl); + ret = mei_cl_flow_ctrl_creds(&dev->iamthif_cl); if (ret < 0) return ret; - if (ret && dev->mei_host_buffer_is_empty) { + if (ret && dev->hbuf_is_ready) { ret = 0; - dev->mei_host_buffer_is_empty = false; - if (cb->request_buffer.size > mei_hbuf_max_data(dev)) { - mei_hdr.length = mei_hbuf_max_data(dev); + dev->hbuf_is_ready = false; + if (cb->request_buffer.size > mei_hbuf_max_len(dev)) { + mei_hdr.length = mei_hbuf_max_len(dev); mei_hdr.msg_complete = 0; } else { mei_hdr.length = cb->request_buffer.size; @@ -300,25 +313,24 @@ static int mei_amthif_send_cmd(struct mei_device *dev, struct mei_cl_cb *cb) mei_hdr.reserved = 0; dev->iamthif_msg_buf_index += mei_hdr.length; if (mei_write_message(dev, &mei_hdr, - (unsigned char *)(dev->iamthif_msg_buf), - mei_hdr.length)) + (unsigned char *)dev->iamthif_msg_buf)) return -ENODEV; if (mei_hdr.msg_complete) { - if (mei_flow_ctrl_reduce(dev, &dev->iamthif_cl)) + if (mei_cl_flow_ctrl_reduce(&dev->iamthif_cl)) return -ENODEV; dev->iamthif_flow_control_pending = true; dev->iamthif_state = MEI_IAMTHIF_FLOW_CONTROL; - dev_dbg(&dev->pdev->dev, "add amthi cb to write waiting list\n"); + dev_dbg(&dev->pdev->dev, "add amthif cb to write waiting list\n"); dev->iamthif_current_cb = cb; dev->iamthif_file_object = cb->file_object; list_add_tail(&cb->list, &dev->write_waiting_list.list); } else { - dev_dbg(&dev->pdev->dev, "message does not complete, so add amthi cb to write list.\n"); + dev_dbg(&dev->pdev->dev, "message does not complete, so add amthif cb to write list.\n"); list_add_tail(&cb->list, &dev->write_list.list); } } else { - if (!(dev->mei_host_buffer_is_empty)) + if (!dev->hbuf_is_ready) dev_dbg(&dev->pdev->dev, "host buffer is not empty"); dev_dbg(&dev->pdev->dev, "No flow control credentials, so add iamthif cb to write list.\n"); @@ -383,7 +395,7 @@ void mei_amthif_run_next_cmd(struct mei_device *dev) dev->iamthif_timer = 0; dev->iamthif_file_object = NULL; - dev_dbg(&dev->pdev->dev, "complete amthi cmd_list cb.\n"); + dev_dbg(&dev->pdev->dev, "complete amthif cmd_list cb.\n"); list_for_each_entry_safe(pos, next, &dev->amthif_cmd_list.list, list) { list_del(&pos->list); @@ -392,7 +404,7 @@ void mei_amthif_run_next_cmd(struct mei_device *dev) status = mei_amthif_send_cmd(dev, pos); if (status) { dev_dbg(&dev->pdev->dev, - "amthi write failed status = %d\n", + "amthif write failed status = %d\n", status); return; } @@ -412,7 +424,7 @@ unsigned int mei_amthif_poll(struct mei_device *dev, if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE && dev->iamthif_file_object == file) { mask |= (POLLIN | POLLRDNORM); - dev_dbg(&dev->pdev->dev, "run next amthi cb\n"); + dev_dbg(&dev->pdev->dev, "run next amthif cb\n"); mei_amthif_run_next_cmd(dev); } return mask; @@ -434,54 +446,51 @@ unsigned int mei_amthif_poll(struct mei_device *dev, int mei_amthif_irq_write_complete(struct mei_device *dev, s32 *slots, struct mei_cl_cb *cb, struct mei_cl_cb *cmpl_list) { - struct mei_msg_hdr *mei_hdr; + struct mei_msg_hdr mei_hdr; struct mei_cl *cl = cb->cl; size_t len = dev->iamthif_msg_buf_size - dev->iamthif_msg_buf_index; size_t msg_slots = mei_data2slots(len); - mei_hdr = (struct mei_msg_hdr *)&dev->wr_msg_buf[0]; - mei_hdr->host_addr = cl->host_client_id; - mei_hdr->me_addr = cl->me_client_id; - mei_hdr->reserved = 0; + mei_hdr.host_addr = cl->host_client_id; + mei_hdr.me_addr = cl->me_client_id; + mei_hdr.reserved = 0; if (*slots >= msg_slots) { - mei_hdr->length = len; - mei_hdr->msg_complete = 1; + mei_hdr.length = len; + mei_hdr.msg_complete = 1; /* Split the message only if we can write the whole host buffer */ } else if (*slots == dev->hbuf_depth) { msg_slots = *slots; len = (*slots * sizeof(u32)) - sizeof(struct mei_msg_hdr); - mei_hdr->length = len; - mei_hdr->msg_complete = 0; + mei_hdr.length = len; + mei_hdr.msg_complete = 0; } else { /* wait for next time the host buffer is empty */ return 0; } - dev_dbg(&dev->pdev->dev, "msg: len = %d complete = %d\n", - mei_hdr->length, mei_hdr->msg_complete); + dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(&mei_hdr)); *slots -= msg_slots; - if (mei_write_message(dev, mei_hdr, - dev->iamthif_msg_buf + dev->iamthif_msg_buf_index, - mei_hdr->length)) { + if (mei_write_message(dev, &mei_hdr, + dev->iamthif_msg_buf + dev->iamthif_msg_buf_index)) { dev->iamthif_state = MEI_IAMTHIF_IDLE; cl->status = -ENODEV; list_del(&cb->list); return -ENODEV; } - if (mei_flow_ctrl_reduce(dev, cl)) + if (mei_cl_flow_ctrl_reduce(cl)) return -ENODEV; - dev->iamthif_msg_buf_index += mei_hdr->length; + dev->iamthif_msg_buf_index += mei_hdr.length; cl->status = 0; - if (mei_hdr->msg_complete) { + if (mei_hdr.msg_complete) { dev->iamthif_state = MEI_IAMTHIF_FLOW_CONTROL; dev->iamthif_flow_control_pending = true; - /* save iamthif cb sent to amthi client */ + /* save iamthif cb sent to amthif client */ cb->buf_idx = dev->iamthif_msg_buf_index; dev->iamthif_current_cb = cb; @@ -494,11 +503,11 @@ int mei_amthif_irq_write_complete(struct mei_device *dev, s32 *slots, /** * mei_amthif_irq_read_message - read routine after ISR to - * handle the read amthi message + * handle the read amthif message * * @complete_list: An instance of our list structure * @dev: the device structure - * @mei_hdr: header of amthi message + * @mei_hdr: header of amthif message * * returns 0 on success, <0 on failure. */ @@ -522,10 +531,10 @@ int mei_amthif_irq_read_message(struct mei_cl_cb *complete_list, return 0; dev_dbg(&dev->pdev->dev, - "amthi_message_buffer_index =%d\n", + "amthif_message_buffer_index =%d\n", mei_hdr->length); - dev_dbg(&dev->pdev->dev, "completed amthi read.\n "); + dev_dbg(&dev->pdev->dev, "completed amthif read.\n "); if (!dev->iamthif_current_cb) return -ENODEV; @@ -540,8 +549,8 @@ int mei_amthif_irq_read_message(struct mei_cl_cb *complete_list, cb->read_time = jiffies; if (dev->iamthif_ioctl && cb->cl == &dev->iamthif_cl) { /* found the iamthif cb */ - dev_dbg(&dev->pdev->dev, "complete the amthi read cb.\n "); - dev_dbg(&dev->pdev->dev, "add the amthi read cb to complete.\n "); + dev_dbg(&dev->pdev->dev, "complete the amthif read cb.\n "); + dev_dbg(&dev->pdev->dev, "add the amthif read cb to complete.\n "); list_add_tail(&cb->list, &complete_list->list); } return 0; @@ -563,7 +572,7 @@ int mei_amthif_irq_read(struct mei_device *dev, s32 *slots) return -EMSGSIZE; } *slots -= mei_data2slots(sizeof(struct hbm_flow_control)); - if (mei_send_flow_control(dev, &dev->iamthif_cl)) { + if (mei_hbm_cl_flow_control_req(dev, &dev->iamthif_cl)) { dev_dbg(&dev->pdev->dev, "iamthif flow control failed\n"); return -EIO; } @@ -574,7 +583,7 @@ int mei_amthif_irq_read(struct mei_device *dev, s32 *slots) dev->iamthif_msg_buf_index = 0; dev->iamthif_msg_buf_size = 0; dev->iamthif_stall_timer = MEI_IAMTHIF_STALL_TIMER; - dev->mei_host_buffer_is_empty = mei_hbuf_is_empty(dev); + dev->hbuf_is_ready = mei_hbuf_is_ready(dev); return 0; } @@ -593,7 +602,7 @@ void mei_amthif_complete(struct mei_device *dev, struct mei_cl_cb *cb) dev->iamthif_msg_buf, dev->iamthif_msg_buf_index); list_add_tail(&cb->list, &dev->amthif_rd_complete_list.list); - dev_dbg(&dev->pdev->dev, "amthi read completed\n"); + dev_dbg(&dev->pdev->dev, "amthif read completed\n"); dev->iamthif_timer = jiffies; dev_dbg(&dev->pdev->dev, "dev->iamthif_timer = %ld\n", dev->iamthif_timer); @@ -601,7 +610,7 @@ void mei_amthif_complete(struct mei_device *dev, struct mei_cl_cb *cb) mei_amthif_run_next_cmd(dev); } - dev_dbg(&dev->pdev->dev, "completing amthi call back.\n"); + dev_dbg(&dev->pdev->dev, "completing amthif call back.\n"); wake_up_interruptible(&dev->iamthif_cl.wait); } @@ -635,7 +644,8 @@ static bool mei_clear_list(struct mei_device *dev, if (dev->iamthif_current_cb == cb_pos) { dev->iamthif_current_cb = NULL; /* send flow control to iamthif client */ - mei_send_flow_control(dev, &dev->iamthif_cl); + mei_hbm_cl_flow_control_req(dev, + &dev->iamthif_cl); } /* free all allocated buffers */ mei_io_cb_free(cb_pos); @@ -706,11 +716,11 @@ int mei_amthif_release(struct mei_device *dev, struct file *file) if (dev->iamthif_file_object == file && dev->iamthif_state != MEI_IAMTHIF_IDLE) { - dev_dbg(&dev->pdev->dev, "amthi canceled iamthif state %d\n", + dev_dbg(&dev->pdev->dev, "amthif canceled iamthif state %d\n", dev->iamthif_state); dev->iamthif_canceled = true; if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE) { - dev_dbg(&dev->pdev->dev, "run next amthi iamthif cb\n"); + dev_dbg(&dev->pdev->dev, "run next amthif iamthif cb\n"); mei_amthif_run_next_cmd(dev); } } diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c new file mode 100644 index 000000000000..1569afe935de --- /dev/null +++ b/drivers/misc/mei/client.c @@ -0,0 +1,729 @@ +/* + * + * Intel Management Engine Interface (Intel MEI) Linux driver + * Copyright (c) 2003-2012, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#include <linux/pci.h> +#include <linux/sched.h> +#include <linux/wait.h> +#include <linux/delay.h> + +#include <linux/mei.h> + +#include "mei_dev.h" +#include "hbm.h" +#include "client.h" + +/** + * mei_me_cl_by_uuid - locate index of me client + * + * @dev: mei device + * returns me client index or -ENOENT if not found + */ +int mei_me_cl_by_uuid(const struct mei_device *dev, const uuid_le *uuid) +{ + int i, res = -ENOENT; + + for (i = 0; i < dev->me_clients_num; ++i) + if (uuid_le_cmp(*uuid, + dev->me_clients[i].props.protocol_name) == 0) { + res = i; + break; + } + + return res; +} + + +/** + * mei_me_cl_by_id return index to me_clients for client_id + * + * @dev: the device structure + * @client_id: me client id + * + * Locking: called under "dev->device_lock" lock + * + * returns index on success, -ENOENT on failure. + */ + +int mei_me_cl_by_id(struct mei_device *dev, u8 client_id) +{ + int i; + for (i = 0; i < dev->me_clients_num; i++) + if (dev->me_clients[i].client_id == client_id) + break; + if (WARN_ON(dev->me_clients[i].client_id != client_id)) + return -ENOENT; + + if (i == dev->me_clients_num) + return -ENOENT; + + return i; +} + + +/** + * mei_io_list_flush - removes list entry belonging to cl. + * + * @list: An instance of our list structure + * @cl: host client + */ +void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl) +{ + struct mei_cl_cb *cb; + struct mei_cl_cb *next; + + list_for_each_entry_safe(cb, next, &list->list, list) { + if (cb->cl && mei_cl_cmp_id(cl, cb->cl)) + list_del(&cb->list); + } +} + +/** + * mei_io_cb_free - free mei_cb_private related memory + * + * @cb: mei callback struct + */ +void mei_io_cb_free(struct mei_cl_cb *cb) +{ + if (cb == NULL) + return; + + kfree(cb->request_buffer.data); + kfree(cb->response_buffer.data); + kfree(cb); +} + +/** + * mei_io_cb_init - allocate and initialize io callback + * + * @cl - mei client + * @file: pointer to file structure + * + * returns mei_cl_cb pointer or NULL; + */ +struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, struct file *fp) +{ + struct mei_cl_cb *cb; + + cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL); + if (!cb) + return NULL; + + mei_io_list_init(cb); + + cb->file_object = fp; + cb->cl = cl; + cb->buf_idx = 0; + return cb; +} + +/** + * mei_io_cb_alloc_req_buf - allocate request buffer + * + * @cb - io callback structure + * @size: size of the buffer + * + * returns 0 on success + * -EINVAL if cb is NULL + * -ENOMEM if allocation failed + */ +int mei_io_cb_alloc_req_buf(struct mei_cl_cb *cb, size_t length) +{ + if (!cb) + return -EINVAL; + + if (length == 0) + return 0; + + cb->request_buffer.data = kmalloc(length, GFP_KERNEL); + if (!cb->request_buffer.data) + return -ENOMEM; + cb->request_buffer.size = length; + return 0; +} +/** + * mei_io_cb_alloc_req_buf - allocate respose buffer + * + * @cb - io callback structure + * @size: size of the buffer + * + * returns 0 on success + * -EINVAL if cb is NULL + * -ENOMEM if allocation failed + */ +int mei_io_cb_alloc_resp_buf(struct mei_cl_cb *cb, size_t length) +{ + if (!cb) + return -EINVAL; + + if (length == 0) + return 0; + + cb->response_buffer.data = kmalloc(length, GFP_KERNEL); + if (!cb->response_buffer.data) + return -ENOMEM; + cb->response_buffer.size = length; + return 0; +} + + + +/** + * mei_cl_flush_queues - flushes queue lists belonging to cl. + * + * @dev: the device structure + * @cl: host client + */ +int mei_cl_flush_queues(struct mei_cl *cl) +{ + if (WARN_ON(!cl || !cl->dev)) + return -EINVAL; + + dev_dbg(&cl->dev->pdev->dev, "remove list entry belonging to cl\n"); + mei_io_list_flush(&cl->dev->read_list, cl); + mei_io_list_flush(&cl->dev->write_list, cl); + mei_io_list_flush(&cl->dev->write_waiting_list, cl); + mei_io_list_flush(&cl->dev->ctrl_wr_list, cl); + mei_io_list_flush(&cl->dev->ctrl_rd_list, cl); + mei_io_list_flush(&cl->dev->amthif_cmd_list, cl); + mei_io_list_flush(&cl->dev->amthif_rd_complete_list, cl); + return 0; +} + + +/** + * mei_cl_init - initializes intialize cl. + * + * @cl: host client to be initialized + * @dev: mei device + */ +void mei_cl_init(struct mei_cl *cl, struct mei_device *dev) +{ + memset(cl, 0, sizeof(struct mei_cl)); + init_waitqueue_head(&cl->wait); + init_waitqueue_head(&cl->rx_wait); + init_waitqueue_head(&cl->tx_wait); + INIT_LIST_HEAD(&cl->link); + cl->reading_state = MEI_IDLE; + cl->writing_state = MEI_IDLE; + cl->dev = dev; +} + +/** + * mei_cl_allocate - allocates cl structure and sets it up. + * + * @dev: mei device + * returns The allocated file or NULL on failure + */ +struct mei_cl *mei_cl_allocate(struct mei_device *dev) +{ + struct mei_cl *cl; + + cl = kmalloc(sizeof(struct mei_cl), GFP_KERNEL); + if (!cl) + return NULL; + + mei_cl_init(cl, dev); + + return cl; +} + +/** + * mei_cl_find_read_cb - find this cl's callback in the read list + * + * @dev: device structure + * returns cb on success, NULL on error + */ +struct mei_cl_cb *mei_cl_find_read_cb(struct mei_cl *cl) +{ + struct mei_device *dev = cl->dev; + struct mei_cl_cb *cb = NULL; + struct mei_cl_cb *next = NULL; + + list_for_each_entry_safe(cb, next, &dev->read_list.list, list) + if (mei_cl_cmp_id(cl, cb->cl)) + return cb; + return NULL; +} + +/** mei_cl_link: allocte host id in the host map + * + * @cl - host client + * @id - fixed host id or -1 for genereting one + * returns 0 on success + * -EINVAL on incorrect values + * -ENONET if client not found + */ +int mei_cl_link(struct mei_cl *cl, int id) +{ + struct mei_device *dev; + + if (WARN_ON(!cl || !cl->dev)) + return -EINVAL; + + dev = cl->dev; + + /* If Id is not asigned get one*/ + if (id == MEI_HOST_CLIENT_ID_ANY) + id = find_first_zero_bit(dev->host_clients_map, + MEI_CLIENTS_MAX); + + if (id >= MEI_CLIENTS_MAX) { + dev_err(&dev->pdev->dev, "id exceded %d", MEI_CLIENTS_MAX) ; + return -ENOENT; + } + + dev->open_handle_count++; + + cl->host_client_id = id; + list_add_tail(&cl->link, &dev->file_list); + + set_bit(id, dev->host_clients_map); + + cl->state = MEI_FILE_INITIALIZING; + + dev_dbg(&dev->pdev->dev, "link cl host id = %d\n", cl->host_client_id); + return 0; +} + +/** + * mei_cl_unlink - remove me_cl from the list + * + * @dev: the device structure + */ +int mei_cl_unlink(struct mei_cl *cl) +{ + struct mei_device *dev; + struct mei_cl *pos, *next; + + /* don't shout on error exit path */ + if (!cl) + return 0; + + /* wd and amthif might not be initialized */ + if (!cl->dev) + return 0; + + dev = cl->dev; + + list_for_each_entry_safe(pos, next, &dev->file_list, link) { + if (cl->host_client_id == pos->host_client_id) { + dev_dbg(&dev->pdev->dev, "remove host client = %d, ME client = %d\n", + pos->host_client_id, pos->me_client_id); + list_del_init(&pos->link); + break; + } + } + return 0; +} + + +void mei_host_client_init(struct work_struct *work) +{ + struct mei_device *dev = container_of(work, + struct mei_device, init_work); + struct mei_client_properties *client_props; + int i; + + mutex_lock(&dev->device_lock); + + bitmap_zero(dev->host_clients_map, MEI_CLIENTS_MAX); + dev->open_handle_count = 0; + + /* + * Reserving the first three client IDs + * 0: Reserved for MEI Bus Message communications + * 1: Reserved for Watchdog + * 2: Reserved for AMTHI + */ + bitmap_set(dev->host_clients_map, 0, 3); + + for (i = 0; i < dev->me_clients_num; i++) { + client_props = &dev->me_clients[i].props; + + if (!uuid_le_cmp(client_props->protocol_name, mei_amthif_guid)) + mei_amthif_host_init(dev); + else if (!uuid_le_cmp(client_props->protocol_name, mei_wd_guid)) + mei_wd_host_init(dev); + } + + dev->dev_state = MEI_DEV_ENABLED; + + mutex_unlock(&dev->device_lock); +} + + +/** + * mei_cl_disconnect - disconnect host clinet form the me one + * + * @cl: host client + * + * Locking: called under "dev->device_lock" lock + * + * returns 0 on success, <0 on failure. + */ +int mei_cl_disconnect(struct mei_cl *cl) +{ + struct mei_device *dev; + struct mei_cl_cb *cb; + int rets, err; + + if (WARN_ON(!cl || !cl->dev)) + return -ENODEV; + + dev = cl->dev; + + if (cl->state != MEI_FILE_DISCONNECTING) + return 0; + + cb = mei_io_cb_init(cl, NULL); + if (!cb) + return -ENOMEM; + + cb->fop_type = MEI_FOP_CLOSE; + if (dev->hbuf_is_ready) { + dev->hbuf_is_ready = false; + if (mei_hbm_cl_disconnect_req(dev, cl)) { + rets = -ENODEV; + dev_err(&dev->pdev->dev, "failed to disconnect.\n"); + goto free; + } + mdelay(10); /* Wait for hardware disconnection ready */ + list_add_tail(&cb->list, &dev->ctrl_rd_list.list); + } else { + dev_dbg(&dev->pdev->dev, "add disconnect cb to control write list\n"); + list_add_tail(&cb->list, &dev->ctrl_wr_list.list); + + } + mutex_unlock(&dev->device_lock); + + err = wait_event_timeout(dev->wait_recvd_msg, + MEI_FILE_DISCONNECTED == cl->state, + mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); + + mutex_lock(&dev->device_lock); + if (MEI_FILE_DISCONNECTED == cl->state) { + rets = 0; + dev_dbg(&dev->pdev->dev, "successfully disconnected from FW client.\n"); + } else { + rets = -ENODEV; + if (MEI_FILE_DISCONNECTED != cl->state) + dev_dbg(&dev->pdev->dev, "wrong status client disconnect.\n"); + + if (err) + dev_dbg(&dev->pdev->dev, + "wait failed disconnect err=%08x\n", + err); + + dev_dbg(&dev->pdev->dev, "failed to disconnect from FW client.\n"); + } + + mei_io_list_flush(&dev->ctrl_rd_list, cl); + mei_io_list_flush(&dev->ctrl_wr_list, cl); +free: + mei_io_cb_free(cb); + return rets; +} + + +/** + * mei_cl_is_other_connecting - checks if other + * client with the same me client id is connecting + * + * @cl: private data of the file object + * + * returns ture if other client is connected, 0 - otherwise. + */ +bool mei_cl_is_other_connecting(struct mei_cl *cl) +{ + struct mei_device *dev; + struct mei_cl *pos; + struct mei_cl *next; + + if (WARN_ON(!cl || !cl->dev)) + return false; + + dev = cl->dev; + + list_for_each_entry_safe(pos, next, &dev->file_list, link) { + if ((pos->state == MEI_FILE_CONNECTING) && + (pos != cl) && cl->me_client_id == pos->me_client_id) + return true; + + } + + return false; +} + +/** + * mei_cl_connect - connect host clinet to the me one + * + * @cl: host client + * + * Locking: called under "dev->device_lock" lock + * + * returns 0 on success, <0 on failure. + */ +int mei_cl_connect(struct mei_cl *cl, struct file *file) +{ + struct mei_device *dev; + struct mei_cl_cb *cb; + long timeout = mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT); + int rets; + + if (WARN_ON(!cl || !cl->dev)) + return -ENODEV; + + dev = cl->dev; + + cb = mei_io_cb_init(cl, file); + if (!cb) { + rets = -ENOMEM; + goto out; + } + + cb->fop_type = MEI_FOP_IOCTL; + + if (dev->hbuf_is_ready && !mei_cl_is_other_connecting(cl)) { + dev->hbuf_is_ready = false; + + if (mei_hbm_cl_connect_req(dev, cl)) { + rets = -ENODEV; + goto out; + } + cl->timer_count = MEI_CONNECT_TIMEOUT; + list_add_tail(&cb->list, &dev->ctrl_rd_list.list); + } else { + list_add_tail(&cb->list, &dev->ctrl_wr_list.list); + } + + mutex_unlock(&dev->device_lock); + rets = wait_event_timeout(dev->wait_recvd_msg, + (cl->state == MEI_FILE_CONNECTED || + cl->state == MEI_FILE_DISCONNECTED), + timeout * HZ); + mutex_lock(&dev->device_lock); + + if (cl->state != MEI_FILE_CONNECTED) { + rets = -EFAULT; + + mei_io_list_flush(&dev->ctrl_rd_list, cl); + mei_io_list_flush(&dev->ctrl_wr_list, cl); + goto out; + } + + rets = cl->status; + +out: + mei_io_cb_free(cb); + return rets; +} + +/** + * mei_cl_flow_ctrl_creds - checks flow_control credits for cl. + * + * @dev: the device structure + * @cl: private data of the file object + * + * returns 1 if mei_flow_ctrl_creds >0, 0 - otherwise. + * -ENOENT if mei_cl is not present + * -EINVAL if single_recv_buf == 0 + */ +int mei_cl_flow_ctrl_creds(struct mei_cl *cl) +{ + struct mei_device *dev; + int i; + + if (WARN_ON(!cl || !cl->dev)) + return -EINVAL; + + dev = cl->dev; + + if (!dev->me_clients_num) + return 0; + + if (cl->mei_flow_ctrl_creds > 0) + return 1; + + for (i = 0; i < dev->me_clients_num; i++) { + struct mei_me_client *me_cl = &dev->me_clients[i]; + if (me_cl->client_id == cl->me_client_id) { + if (me_cl->mei_flow_ctrl_creds) { + if (WARN_ON(me_cl->props.single_recv_buf == 0)) + return -EINVAL; + return 1; + } else { + return 0; + } + } + } + return -ENOENT; +} + +/** + * mei_cl_flow_ctrl_reduce - reduces flow_control. + * + * @dev: the device structure + * @cl: private data of the file object + * @returns + * 0 on success + * -ENOENT when me client is not found + * -EINVAL when ctrl credits are <= 0 + */ +int mei_cl_flow_ctrl_reduce(struct mei_cl *cl) +{ + struct mei_device *dev; + int i; + + if (WARN_ON(!cl || !cl->dev)) + return -EINVAL; + + dev = cl->dev; + + if (!dev->me_clients_num) + return -ENOENT; + + for (i = 0; i < dev->me_clients_num; i++) { + struct mei_me_client *me_cl = &dev->me_clients[i]; + if (me_cl->client_id == cl->me_client_id) { + if (me_cl->props.single_recv_buf != 0) { + if (WARN_ON(me_cl->mei_flow_ctrl_creds <= 0)) + return -EINVAL; + dev->me_clients[i].mei_flow_ctrl_creds--; + } else { + if (WARN_ON(cl->mei_flow_ctrl_creds <= 0)) + return -EINVAL; + cl->mei_flow_ctrl_creds--; + } + return 0; + } + } + return -ENOENT; +} + +/** + * mei_cl_start_read - the start read client message function. + * + * @cl: host client + * + * returns 0 on success, <0 on failure. + */ +int mei_cl_read_start(struct mei_cl *cl) +{ + struct mei_device *dev; + struct mei_cl_cb *cb; + int rets; + int i; + + if (WARN_ON(!cl || !cl->dev)) + return -ENODEV; + + dev = cl->dev; + + if (cl->state != MEI_FILE_CONNECTED) + return -ENODEV; + + if (dev->dev_state != MEI_DEV_ENABLED) + return -ENODEV; + + if (cl->read_cb) { + dev_dbg(&dev->pdev->dev, "read is pending.\n"); + return -EBUSY; + } + i = mei_me_cl_by_id(dev, cl->me_client_id); + if (i < 0) { + dev_err(&dev->pdev->dev, "no such me client %d\n", + cl->me_client_id); + return -ENODEV; + } + + cb = mei_io_cb_init(cl, NULL); + if (!cb) + return -ENOMEM; + + rets = mei_io_cb_alloc_resp_buf(cb, + dev->me_clients[i].props.max_msg_length); + if (rets) + goto err; + + cb->fop_type = MEI_FOP_READ; + cl->read_cb = cb; + if (dev->hbuf_is_ready) { + dev->hbuf_is_ready = false; + if (mei_hbm_cl_flow_control_req(dev, cl)) { + rets = -ENODEV; + goto err; + } + list_add_tail(&cb->list, &dev->read_list.list); + } else { + list_add_tail(&cb->list, &dev->ctrl_wr_list.list); + } + return rets; +err: + mei_io_cb_free(cb); + return rets; +} + +/** + * mei_cl_all_disconnect - disconnect forcefully all connected clients + * + * @dev - mei device + */ + +void mei_cl_all_disconnect(struct mei_device *dev) +{ + struct mei_cl *cl, *next; + + list_for_each_entry_safe(cl, next, &dev->file_list, link) { + cl->state = MEI_FILE_DISCONNECTED; + cl->mei_flow_ctrl_creds = 0; + cl->read_cb = NULL; + cl->timer_count = 0; + } +} + + +/** + * mei_cl_all_read_wakeup - wake up all readings so they can be interrupted + * + * @dev - mei device + */ +void mei_cl_all_read_wakeup(struct mei_device *dev) +{ + struct mei_cl *cl, *next; + list_for_each_entry_safe(cl, next, &dev->file_list, link) { + if (waitqueue_active(&cl->rx_wait)) { + dev_dbg(&dev->pdev->dev, "Waking up client!\n"); + wake_up_interruptible(&cl->rx_wait); + } + } +} + +/** + * mei_cl_all_write_clear - clear all pending writes + + * @dev - mei device + */ +void mei_cl_all_write_clear(struct mei_device *dev) +{ + struct mei_cl_cb *cb, *next; + + list_for_each_entry_safe(cb, next, &dev->write_list.list, list) { + list_del(&cb->list); + mei_io_cb_free(cb); + } +} + + diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h new file mode 100644 index 000000000000..214b2397ec3e --- /dev/null +++ b/drivers/misc/mei/client.h @@ -0,0 +1,102 @@ +/* + * + * Intel Management Engine Interface (Intel MEI) Linux driver + * Copyright (c) 2003-2012, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#ifndef _MEI_CLIENT_H_ +#define _MEI_CLIENT_H_ + +#include <linux/types.h> +#include <linux/watchdog.h> +#include <linux/poll.h> +#include <linux/mei.h> + +#include "mei_dev.h" + +int mei_me_cl_by_uuid(const struct mei_device *dev, const uuid_le *cuuid); +int mei_me_cl_by_id(struct mei_device *dev, u8 client_id); + +/* + * MEI IO Functions + */ +struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, struct file *fp); +void mei_io_cb_free(struct mei_cl_cb *priv_cb); +int mei_io_cb_alloc_req_buf(struct mei_cl_cb *cb, size_t length); +int mei_io_cb_alloc_resp_buf(struct mei_cl_cb *cb, size_t length); + + +/** + * mei_io_list_init - Sets up a queue list. + * + * @list: An instance cl callback structure + */ +static inline void mei_io_list_init(struct mei_cl_cb *list) +{ + INIT_LIST_HEAD(&list->list); +} +void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl); + +/* + * MEI Host Client Functions + */ + +struct mei_cl *mei_cl_allocate(struct mei_device *dev); +void mei_cl_init(struct mei_cl *cl, struct mei_device *dev); + + +int mei_cl_link(struct mei_cl *cl, int id); +int mei_cl_unlink(struct mei_cl *cl); + +int mei_cl_flush_queues(struct mei_cl *cl); +struct mei_cl_cb *mei_cl_find_read_cb(struct mei_cl *cl); + +/** + * mei_cl_cmp_id - tells if file private data have same id + * + * @fe1: private data of 1. file object + * @fe2: private data of 2. file object + * + * returns true - if ids are the same and not NULL + */ +static inline bool mei_cl_cmp_id(const struct mei_cl *cl1, + const struct mei_cl *cl2) +{ + return cl1 && cl2 && + (cl1->host_client_id == cl2->host_client_id) && + (cl1->me_client_id == cl2->me_client_id); +} + + +int mei_cl_flow_ctrl_creds(struct mei_cl *cl); + +int mei_cl_flow_ctrl_reduce(struct mei_cl *cl); +/* + * MEI input output function prototype + */ +bool mei_cl_is_other_connecting(struct mei_cl *cl); +int mei_cl_disconnect(struct mei_cl *cl); + +int mei_cl_read_start(struct mei_cl *cl); + +int mei_cl_connect(struct mei_cl *cl, struct file *file); + +void mei_host_client_init(struct work_struct *work); + + +void mei_cl_all_disconnect(struct mei_device *dev); +void mei_cl_all_read_wakeup(struct mei_device *dev); +void mei_cl_all_write_clear(struct mei_device *dev); + + +#endif /* _MEI_CLIENT_H_ */ diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c new file mode 100644 index 000000000000..fb9e63ba3bb1 --- /dev/null +++ b/drivers/misc/mei/hbm.c @@ -0,0 +1,669 @@ +/* + * + * Intel Management Engine Interface (Intel MEI) Linux driver + * Copyright (c) 2003-2012, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#include <linux/pci.h> +#include <linux/sched.h> +#include <linux/wait.h> +#include <linux/mei.h> + +#include "mei_dev.h" +#include "hbm.h" +#include "hw-me.h" + +/** + * mei_hbm_me_cl_allocate - allocates storage for me clients + * + * @dev: the device structure + * + * returns none. + */ +static void mei_hbm_me_cl_allocate(struct mei_device *dev) +{ + struct mei_me_client *clients; + int b; + + /* count how many ME clients we have */ + for_each_set_bit(b, dev->me_clients_map, MEI_CLIENTS_MAX) + dev->me_clients_num++; + + if (dev->me_clients_num <= 0) + return; + + kfree(dev->me_clients); + dev->me_clients = NULL; + + dev_dbg(&dev->pdev->dev, "memory allocation for ME clients size=%zd.\n", + dev->me_clients_num * sizeof(struct mei_me_client)); + /* allocate storage for ME clients representation */ + clients = kcalloc(dev->me_clients_num, + sizeof(struct mei_me_client), GFP_KERNEL); + if (!clients) { + dev_err(&dev->pdev->dev, "memory allocation for ME clients failed.\n"); + dev->dev_state = MEI_DEV_RESETING; + mei_reset(dev, 1); + return; + } + dev->me_clients = clients; + return; +} + +/** + * mei_hbm_cl_hdr - construct client hbm header + * @cl: - client + * @hbm_cmd: host bus message command + * @buf: buffer for cl header + * @len: buffer length + */ +static inline +void mei_hbm_cl_hdr(struct mei_cl *cl, u8 hbm_cmd, void *buf, size_t len) +{ + struct mei_hbm_cl_cmd *cmd = buf; + + memset(cmd, 0, len); + + cmd->hbm_cmd = hbm_cmd; + cmd->host_addr = cl->host_client_id; + cmd->me_addr = cl->me_client_id; +} + +/** + * same_disconn_addr - tells if they have the same address + * + * @file: private data of the file object. + * @disconn: disconnection request. + * + * returns true if addres are same + */ +static inline +bool mei_hbm_cl_addr_equal(struct mei_cl *cl, void *buf) +{ + struct mei_hbm_cl_cmd *cmd = buf; + return cl->host_client_id == cmd->host_addr && + cl->me_client_id == cmd->me_addr; +} + + +/** + * is_treat_specially_client - checks if the message belongs + * to the file private data. + * + * @cl: private data of the file object + * @rs: connect response bus message + * + */ +static bool is_treat_specially_client(struct mei_cl *cl, + struct hbm_client_connect_response *rs) +{ + if (mei_hbm_cl_addr_equal(cl, rs)) { + if (!rs->status) { + cl->state = MEI_FILE_CONNECTED; + cl->status = 0; + + } else { + cl->state = MEI_FILE_DISCONNECTED; + cl->status = -ENODEV; + } + cl->timer_count = 0; + + return true; + } + return false; +} + +/** + * mei_hbm_start_req - sends start request message. + * + * @dev: the device structure + */ +void mei_hbm_start_req(struct mei_device *dev) +{ + struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr; + struct hbm_host_version_request *start_req; + const size_t len = sizeof(struct hbm_host_version_request); + + mei_hbm_hdr(mei_hdr, len); + + /* host start message */ + start_req = (struct hbm_host_version_request *)dev->wr_msg.data; + memset(start_req, 0, len); + start_req->hbm_cmd = HOST_START_REQ_CMD; + start_req->host_version.major_version = HBM_MAJOR_VERSION; + start_req->host_version.minor_version = HBM_MINOR_VERSION; + + dev->recvd_msg = false; + if (mei_write_message(dev, mei_hdr, dev->wr_msg.data)) { + dev_dbg(&dev->pdev->dev, "write send version message to FW fail.\n"); + dev->dev_state = MEI_DEV_RESETING; + mei_reset(dev, 1); + } + dev->init_clients_state = MEI_START_MESSAGE; + dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT; + return ; +} + +/** + * mei_hbm_enum_clients_req - sends enumeration client request message. + * + * @dev: the device structure + * + * returns none. + */ +static void mei_hbm_enum_clients_req(struct mei_device *dev) +{ + struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr; + struct hbm_host_enum_request *enum_req; + const size_t len = sizeof(struct hbm_host_enum_request); + /* enumerate clients */ + mei_hbm_hdr(mei_hdr, len); + + enum_req = (struct hbm_host_enum_request *)dev->wr_msg.data; + memset(enum_req, 0, len); + enum_req->hbm_cmd = HOST_ENUM_REQ_CMD; + + if (mei_write_message(dev, mei_hdr, dev->wr_msg.data)) { + dev->dev_state = MEI_DEV_RESETING; + dev_dbg(&dev->pdev->dev, "write send enumeration request message to FW fail.\n"); + mei_reset(dev, 1); + } + dev->init_clients_state = MEI_ENUM_CLIENTS_MESSAGE; + dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT; + return; +} + +/** + * mei_hbm_prop_requsest - request property for a single client + * + * @dev: the device structure + * + * returns none. + */ + +static int mei_hbm_prop_req(struct mei_device *dev) +{ + + struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr; + struct hbm_props_request *prop_req; + const size_t len = sizeof(struct hbm_props_request); + unsigned long next_client_index; + u8 client_num; + + + client_num = dev->me_client_presentation_num; + + next_client_index = find_next_bit(dev->me_clients_map, MEI_CLIENTS_MAX, + dev->me_client_index); + + /* We got all client properties */ + if (next_client_index == MEI_CLIENTS_MAX) { + schedule_work(&dev->init_work); + + return 0; + } + + dev->me_clients[client_num].client_id = next_client_index; + dev->me_clients[client_num].mei_flow_ctrl_creds = 0; + + mei_hbm_hdr(mei_hdr, len); + prop_req = (struct hbm_props_request *)dev->wr_msg.data; + + memset(prop_req, 0, sizeof(struct hbm_props_request)); + + + prop_req->hbm_cmd = HOST_CLIENT_PROPERTIES_REQ_CMD; + prop_req->address = next_client_index; + + if (mei_write_message(dev, mei_hdr, dev->wr_msg.data)) { + dev->dev_state = MEI_DEV_RESETING; + dev_err(&dev->pdev->dev, "Properties request command failed\n"); + mei_reset(dev, 1); + + return -EIO; + } + + dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT; + dev->me_client_index = next_client_index; + + return 0; +} + +/** + * mei_hbm_stop_req_prepare - perpare stop request message + * + * @dev - mei device + * @mei_hdr - mei message header + * @data - hbm message body buffer + */ +static void mei_hbm_stop_req_prepare(struct mei_device *dev, + struct mei_msg_hdr *mei_hdr, unsigned char *data) +{ + struct hbm_host_stop_request *req = + (struct hbm_host_stop_request *)data; + const size_t len = sizeof(struct hbm_host_stop_request); + + mei_hbm_hdr(mei_hdr, len); + + memset(req, 0, len); + req->hbm_cmd = HOST_STOP_REQ_CMD; + req->reason = DRIVER_STOP_REQUEST; +} + +/** + * mei_hbm_cl_flow_control_req - sends flow control requst. + * + * @dev: the device structure + * @cl: client info + * + * This function returns -EIO on write failure + */ +int mei_hbm_cl_flow_control_req(struct mei_device *dev, struct mei_cl *cl) +{ + struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr; + const size_t len = sizeof(struct hbm_flow_control); + + mei_hbm_hdr(mei_hdr, len); + mei_hbm_cl_hdr(cl, MEI_FLOW_CONTROL_CMD, dev->wr_msg.data, len); + + dev_dbg(&dev->pdev->dev, "sending flow control host client = %d, ME client = %d\n", + cl->host_client_id, cl->me_client_id); + + return mei_write_message(dev, mei_hdr, dev->wr_msg.data); +} + +/** + * add_single_flow_creds - adds single buffer credentials. + * + * @file: private data ot the file object. + * @flow: flow control. + */ +static void mei_hbm_add_single_flow_creds(struct mei_device *dev, + struct hbm_flow_control *flow) +{ + struct mei_me_client *client; + int i; + + for (i = 0; i < dev->me_clients_num; i++) { + client = &dev->me_clients[i]; + if (client && flow->me_addr == client->client_id) { + if (client->props.single_recv_buf) { + client->mei_flow_ctrl_creds++; + dev_dbg(&dev->pdev->dev, "recv flow ctrl msg ME %d (single).\n", + flow->me_addr); + dev_dbg(&dev->pdev->dev, "flow control credentials =%d.\n", + client->mei_flow_ctrl_creds); + } else { + BUG(); /* error in flow control */ + } + } + } +} + +/** + * mei_hbm_cl_flow_control_res - flow control response from me + * + * @dev: the device structure + * @flow_control: flow control response bus message + */ +static void mei_hbm_cl_flow_control_res(struct mei_device *dev, + struct hbm_flow_control *flow_control) +{ + struct mei_cl *cl = NULL; + struct mei_cl *next = NULL; + + if (!flow_control->host_addr) { + /* single receive buffer */ + mei_hbm_add_single_flow_creds(dev, flow_control); + return; + } + + /* normal connection */ + list_for_each_entry_safe(cl, next, &dev->file_list, link) { + if (mei_hbm_cl_addr_equal(cl, flow_control)) { + cl->mei_flow_ctrl_creds++; + dev_dbg(&dev->pdev->dev, "flow ctrl msg for host %d ME %d.\n", + flow_control->host_addr, flow_control->me_addr); + dev_dbg(&dev->pdev->dev, "flow control credentials = %d.\n", + cl->mei_flow_ctrl_creds); + break; + } + } +} + + +/** + * mei_hbm_cl_disconnect_req - sends disconnect message to fw. + * + * @dev: the device structure + * @cl: a client to disconnect from + * + * This function returns -EIO on write failure + */ +int mei_hbm_cl_disconnect_req(struct mei_device *dev, struct mei_cl *cl) +{ + struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr; + const size_t len = sizeof(struct hbm_client_connect_request); + + mei_hbm_hdr(mei_hdr, len); + mei_hbm_cl_hdr(cl, CLIENT_DISCONNECT_REQ_CMD, dev->wr_msg.data, len); + + return mei_write_message(dev, mei_hdr, dev->wr_msg.data); +} + +/** + * mei_hbm_cl_disconnect_res - disconnect response from ME + * + * @dev: the device structure + * @rs: disconnect response bus message + */ +static void mei_hbm_cl_disconnect_res(struct mei_device *dev, + struct hbm_client_connect_response *rs) +{ + struct mei_cl *cl; + struct mei_cl_cb *pos = NULL, *next = NULL; + + dev_dbg(&dev->pdev->dev, + "disconnect_response:\n" + "ME Client = %d\n" + "Host Client = %d\n" + "Status = %d\n", + rs->me_addr, + rs->host_addr, + rs->status); + + list_for_each_entry_safe(pos, next, &dev->ctrl_rd_list.list, list) { + cl = pos->cl; + + if (!cl) { + list_del(&pos->list); + return; + } + + dev_dbg(&dev->pdev->dev, "list_for_each_entry_safe in ctrl_rd_list.\n"); + if (mei_hbm_cl_addr_equal(cl, rs)) { + list_del(&pos->list); + if (!rs->status) + cl->state = MEI_FILE_DISCONNECTED; + + cl->status = 0; + cl->timer_count = 0; + break; + } + } +} + +/** + * mei_hbm_cl_connect_req - send connection request to specific me client + * + * @dev: the device structure + * @cl: a client to connect to + * + * returns -EIO on write failure + */ +int mei_hbm_cl_connect_req(struct mei_device *dev, struct mei_cl *cl) +{ + struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr; + const size_t len = sizeof(struct hbm_client_connect_request); + + mei_hbm_hdr(mei_hdr, len); + mei_hbm_cl_hdr(cl, CLIENT_CONNECT_REQ_CMD, dev->wr_msg.data, len); + + return mei_write_message(dev, mei_hdr, dev->wr_msg.data); +} + +/** + * mei_hbm_cl_connect_res - connect resposne from the ME + * + * @dev: the device structure + * @rs: connect response bus message + */ +static void mei_hbm_cl_connect_res(struct mei_device *dev, + struct hbm_client_connect_response *rs) +{ + + struct mei_cl *cl; + struct mei_cl_cb *pos = NULL, *next = NULL; + + dev_dbg(&dev->pdev->dev, + "connect_response:\n" + "ME Client = %d\n" + "Host Client = %d\n" + "Status = %d\n", + rs->me_addr, + rs->host_addr, + rs->status); + + /* if WD or iamthif client treat specially */ + + if (is_treat_specially_client(&dev->wd_cl, rs)) { + dev_dbg(&dev->pdev->dev, "successfully connected to WD client.\n"); + mei_watchdog_register(dev); + + return; + } + + if (is_treat_specially_client(&dev->iamthif_cl, rs)) { + dev->iamthif_state = MEI_IAMTHIF_IDLE; + return; + } + list_for_each_entry_safe(pos, next, &dev->ctrl_rd_list.list, list) { + + cl = pos->cl; + if (!cl) { + list_del(&pos->list); + return; + } + if (pos->fop_type == MEI_FOP_IOCTL) { + if (is_treat_specially_client(cl, rs)) { + list_del(&pos->list); + cl->status = 0; + cl->timer_count = 0; + break; + } + } + } +} + + +/** + * mei_client_disconnect_request - disconnect request initiated by me + * host sends disoconnect response + * + * @dev: the device structure. + * @disconnect_req: disconnect request bus message from the me + */ +static void mei_hbm_fw_disconnect_req(struct mei_device *dev, + struct hbm_client_connect_request *disconnect_req) +{ + struct mei_cl *cl, *next; + const size_t len = sizeof(struct hbm_client_connect_response); + + list_for_each_entry_safe(cl, next, &dev->file_list, link) { + if (mei_hbm_cl_addr_equal(cl, disconnect_req)) { + dev_dbg(&dev->pdev->dev, "disconnect request host client %d ME client %d.\n", + disconnect_req->host_addr, + disconnect_req->me_addr); + cl->state = MEI_FILE_DISCONNECTED; + cl->timer_count = 0; + if (cl == &dev->wd_cl) + dev->wd_pending = false; + else if (cl == &dev->iamthif_cl) + dev->iamthif_timer = 0; + + /* prepare disconnect response */ + mei_hbm_hdr(&dev->wr_ext_msg.hdr, len); + mei_hbm_cl_hdr(cl, CLIENT_DISCONNECT_RES_CMD, + dev->wr_ext_msg.data, len); + break; + } + } +} + + +/** + * mei_hbm_dispatch - bottom half read routine after ISR to + * handle the read bus message cmd processing. + * + * @dev: the device structure + * @mei_hdr: header of bus message + */ +void mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) +{ + struct mei_bus_message *mei_msg; + struct mei_me_client *me_client; + struct hbm_host_version_response *version_res; + struct hbm_client_connect_response *connect_res; + struct hbm_client_connect_response *disconnect_res; + struct hbm_client_connect_request *disconnect_req; + struct hbm_flow_control *flow_control; + struct hbm_props_response *props_res; + struct hbm_host_enum_response *enum_res; + + /* read the message to our buffer */ + BUG_ON(hdr->length >= sizeof(dev->rd_msg_buf)); + mei_read_slots(dev, dev->rd_msg_buf, hdr->length); + mei_msg = (struct mei_bus_message *)dev->rd_msg_buf; + + switch (mei_msg->hbm_cmd) { + case HOST_START_RES_CMD: + version_res = (struct hbm_host_version_response *)mei_msg; + if (!version_res->host_version_supported) { + dev->version = version_res->me_max_version; + dev_dbg(&dev->pdev->dev, "version mismatch.\n"); + + mei_hbm_stop_req_prepare(dev, &dev->wr_msg.hdr, + dev->wr_msg.data); + mei_write_message(dev, &dev->wr_msg.hdr, + dev->wr_msg.data); + return; + } + + dev->version.major_version = HBM_MAJOR_VERSION; + dev->version.minor_version = HBM_MINOR_VERSION; + if (dev->dev_state == MEI_DEV_INIT_CLIENTS && + dev->init_clients_state == MEI_START_MESSAGE) { + dev->init_clients_timer = 0; + mei_hbm_enum_clients_req(dev); + } else { + dev->recvd_msg = false; + dev_dbg(&dev->pdev->dev, "reset due to received hbm: host start\n"); + mei_reset(dev, 1); + return; + } + + dev->recvd_msg = true; + dev_dbg(&dev->pdev->dev, "host start response message received.\n"); + break; + + case CLIENT_CONNECT_RES_CMD: + connect_res = (struct hbm_client_connect_response *) mei_msg; + mei_hbm_cl_connect_res(dev, connect_res); + dev_dbg(&dev->pdev->dev, "client connect response message received.\n"); + wake_up(&dev->wait_recvd_msg); + break; + + case CLIENT_DISCONNECT_RES_CMD: + disconnect_res = (struct hbm_client_connect_response *) mei_msg; + mei_hbm_cl_disconnect_res(dev, disconnect_res); + dev_dbg(&dev->pdev->dev, "client disconnect response message received.\n"); + wake_up(&dev->wait_recvd_msg); + break; + + case MEI_FLOW_CONTROL_CMD: + flow_control = (struct hbm_flow_control *) mei_msg; + mei_hbm_cl_flow_control_res(dev, flow_control); + dev_dbg(&dev->pdev->dev, "client flow control response message received.\n"); + break; + + case HOST_CLIENT_PROPERTIES_RES_CMD: + props_res = (struct hbm_props_response *)mei_msg; + me_client = &dev->me_clients[dev->me_client_presentation_num]; + + if (props_res->status || !dev->me_clients) { + dev_dbg(&dev->pdev->dev, "reset due to received host client properties response bus message wrong status.\n"); + mei_reset(dev, 1); + return; + } + + if (me_client->client_id != props_res->address) { + dev_err(&dev->pdev->dev, + "Host client properties reply mismatch\n"); + mei_reset(dev, 1); + + return; + } + + if (dev->dev_state != MEI_DEV_INIT_CLIENTS || + dev->init_clients_state != MEI_CLIENT_PROPERTIES_MESSAGE) { + dev_err(&dev->pdev->dev, + "Unexpected client properties reply\n"); + mei_reset(dev, 1); + + return; + } + + me_client->props = props_res->client_properties; + dev->me_client_index++; + dev->me_client_presentation_num++; + + /* request property for the next client */ + mei_hbm_prop_req(dev); + + break; + + case HOST_ENUM_RES_CMD: + enum_res = (struct hbm_host_enum_response *) mei_msg; + memcpy(dev->me_clients_map, enum_res->valid_addresses, 32); + if (dev->dev_state == MEI_DEV_INIT_CLIENTS && + dev->init_clients_state == MEI_ENUM_CLIENTS_MESSAGE) { + dev->init_clients_timer = 0; + dev->me_client_presentation_num = 0; + dev->me_client_index = 0; + mei_hbm_me_cl_allocate(dev); + dev->init_clients_state = + MEI_CLIENT_PROPERTIES_MESSAGE; + + /* first property reqeust */ + mei_hbm_prop_req(dev); + } else { + dev_dbg(&dev->pdev->dev, "reset due to received host enumeration clients response bus message.\n"); + mei_reset(dev, 1); + return; + } + break; + + case HOST_STOP_RES_CMD: + dev->dev_state = MEI_DEV_DISABLED; + dev_dbg(&dev->pdev->dev, "resetting because of FW stop response.\n"); + mei_reset(dev, 1); + break; + + case CLIENT_DISCONNECT_REQ_CMD: + /* search for client */ + disconnect_req = (struct hbm_client_connect_request *)mei_msg; + mei_hbm_fw_disconnect_req(dev, disconnect_req); + break; + + case ME_STOP_REQ_CMD: + + mei_hbm_stop_req_prepare(dev, &dev->wr_ext_msg.hdr, + dev->wr_ext_msg.data); + break; + default: + BUG(); + break; + + } +} + diff --git a/drivers/misc/mei/hbm.h b/drivers/misc/mei/hbm.h new file mode 100644 index 000000000000..b552afbaf85c --- /dev/null +++ b/drivers/misc/mei/hbm.h @@ -0,0 +1,39 @@ +/* + * + * Intel Management Engine Interface (Intel MEI) Linux driver + * Copyright (c) 2003-2012, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#ifndef _MEI_HBM_H_ +#define _MEI_HBM_H_ + +void mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr); + +static inline void mei_hbm_hdr(struct mei_msg_hdr *hdr, size_t length) +{ + hdr->host_addr = 0; + hdr->me_addr = 0; + hdr->length = length; + hdr->msg_complete = 1; + hdr->reserved = 0; +} + +void mei_hbm_start_req(struct mei_device *dev); + +int mei_hbm_cl_flow_control_req(struct mei_device *dev, struct mei_cl *cl); +int mei_hbm_cl_disconnect_req(struct mei_device *dev, struct mei_cl *cl); +int mei_hbm_cl_connect_req(struct mei_device *dev, struct mei_cl *cl); + + +#endif /* _MEI_HBM_H_ */ + diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h new file mode 100644 index 000000000000..6a203b6e8346 --- /dev/null +++ b/drivers/misc/mei/hw-me-regs.h @@ -0,0 +1,167 @@ +/****************************************************************************** + * Intel Management Engine Interface (Intel MEI) Linux driver + * Intel MEI Interface Header + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Corporation. + * linux-mei@linux.intel.com + * http://www.intel.com + * + * BSD LICENSE + * + * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#ifndef _MEI_HW_MEI_REGS_H_ +#define _MEI_HW_MEI_REGS_H_ + +/* + * MEI device IDs + */ +#define MEI_DEV_ID_82946GZ 0x2974 /* 82946GZ/GL */ +#define MEI_DEV_ID_82G35 0x2984 /* 82G35 Express */ +#define MEI_DEV_ID_82Q965 0x2994 /* 82Q963/Q965 */ +#define MEI_DEV_ID_82G965 0x29A4 /* 82P965/G965 */ + +#define MEI_DEV_ID_82GM965 0x2A04 /* Mobile PM965/GM965 */ +#define MEI_DEV_ID_82GME965 0x2A14 /* Mobile GME965/GLE960 */ + +#define MEI_DEV_ID_ICH9_82Q35 0x29B4 /* 82Q35 Express */ +#define MEI_DEV_ID_ICH9_82G33 0x29C4 /* 82G33/G31/P35/P31 Express */ +#define MEI_DEV_ID_ICH9_82Q33 0x29D4 /* 82Q33 Express */ +#define MEI_DEV_ID_ICH9_82X38 0x29E4 /* 82X38/X48 Express */ +#define MEI_DEV_ID_ICH9_3200 0x29F4 /* 3200/3210 Server */ + +#define MEI_DEV_ID_ICH9_6 0x28B4 /* Bearlake */ +#define MEI_DEV_ID_ICH9_7 0x28C4 /* Bearlake */ +#define MEI_DEV_ID_ICH9_8 0x28D4 /* Bearlake */ +#define MEI_DEV_ID_ICH9_9 0x28E4 /* Bearlake */ +#define MEI_DEV_ID_ICH9_10 0x28F4 /* Bearlake */ + +#define MEI_DEV_ID_ICH9M_1 0x2A44 /* Cantiga */ +#define MEI_DEV_ID_ICH9M_2 0x2A54 /* Cantiga */ +#define MEI_DEV_ID_ICH9M_3 0x2A64 /* Cantiga */ +#define MEI_DEV_ID_ICH9M_4 0x2A74 /* Cantiga */ + +#define MEI_DEV_ID_ICH10_1 0x2E04 /* Eaglelake */ +#define MEI_DEV_ID_ICH10_2 0x2E14 /* Eaglelake */ +#define MEI_DEV_ID_ICH10_3 0x2E24 /* Eaglelake */ +#define MEI_DEV_ID_ICH10_4 0x2E34 /* Eaglelake */ + +#define MEI_DEV_ID_IBXPK_1 0x3B64 /* Calpella */ +#define MEI_DEV_ID_IBXPK_2 0x3B65 /* Calpella */ + +#define MEI_DEV_ID_CPT_1 0x1C3A /* Couger Point */ +#define MEI_DEV_ID_PBG_1 0x1D3A /* C600/X79 Patsburg */ + +#define MEI_DEV_ID_PPT_1 0x1E3A /* Panther Point */ +#define MEI_DEV_ID_PPT_2 0x1CBA /* Panther Point */ +#define MEI_DEV_ID_PPT_3 0x1DBA /* Panther Point */ + +#define MEI_DEV_ID_LPT 0x8C3A /* Lynx Point */ +#define MEI_DEV_ID_LPT_LP 0x9C3A /* Lynx Point LP */ +/* + * MEI HW Section + */ + +/* MEI registers */ +/* H_CB_WW - Host Circular Buffer (CB) Write Window register */ +#define H_CB_WW 0 +/* H_CSR - Host Control Status register */ +#define H_CSR 4 +/* ME_CB_RW - ME Circular Buffer Read Window register (read only) */ +#define ME_CB_RW 8 +/* ME_CSR_HA - ME Control Status Host Access register (read only) */ +#define ME_CSR_HA 0xC + + +/* register bits of H_CSR (Host Control Status register) */ +/* Host Circular Buffer Depth - maximum number of 32-bit entries in CB */ +#define H_CBD 0xFF000000 +/* Host Circular Buffer Write Pointer */ +#define H_CBWP 0x00FF0000 +/* Host Circular Buffer Read Pointer */ +#define H_CBRP 0x0000FF00 +/* Host Reset */ +#define H_RST 0x00000010 +/* Host Ready */ +#define H_RDY 0x00000008 +/* Host Interrupt Generate */ +#define H_IG 0x00000004 +/* Host Interrupt Status */ +#define H_IS 0x00000002 +/* Host Interrupt Enable */ +#define H_IE 0x00000001 + + +/* register bits of ME_CSR_HA (ME Control Status Host Access register) */ +/* ME CB (Circular Buffer) Depth HRA (Host Read Access) - host read only +access to ME_CBD */ +#define ME_CBD_HRA 0xFF000000 +/* ME CB Write Pointer HRA - host read only access to ME_CBWP */ +#define ME_CBWP_HRA 0x00FF0000 +/* ME CB Read Pointer HRA - host read only access to ME_CBRP */ +#define ME_CBRP_HRA 0x0000FF00 +/* ME Reset HRA - host read only access to ME_RST */ +#define ME_RST_HRA 0x00000010 +/* ME Ready HRA - host read only access to ME_RDY */ +#define ME_RDY_HRA 0x00000008 +/* ME Interrupt Generate HRA - host read only access to ME_IG */ +#define ME_IG_HRA 0x00000004 +/* ME Interrupt Status HRA - host read only access to ME_IS */ +#define ME_IS_HRA 0x00000002 +/* ME Interrupt Enable HRA - host read only access to ME_IE */ +#define ME_IE_HRA 0x00000001 + +#endif /* _MEI_HW_MEI_REGS_H_ */ diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c new file mode 100644 index 000000000000..45ea7185c003 --- /dev/null +++ b/drivers/misc/mei/hw-me.c @@ -0,0 +1,576 @@ +/* + * + * Intel Management Engine Interface (Intel MEI) Linux driver + * Copyright (c) 2003-2012, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#include <linux/pci.h> + +#include <linux/kthread.h> +#include <linux/interrupt.h> + +#include "mei_dev.h" +#include "hw-me.h" + +#include "hbm.h" + + +/** + * mei_reg_read - Reads 32bit data from the mei device + * + * @dev: the device structure + * @offset: offset from which to read the data + * + * returns register value (u32) + */ +static inline u32 mei_reg_read(const struct mei_me_hw *hw, + unsigned long offset) +{ + return ioread32(hw->mem_addr + offset); +} + + +/** + * mei_reg_write - Writes 32bit data to the mei device + * + * @dev: the device structure + * @offset: offset from which to write the data + * @value: register value to write (u32) + */ +static inline void mei_reg_write(const struct mei_me_hw *hw, + unsigned long offset, u32 value) +{ + iowrite32(value, hw->mem_addr + offset); +} + +/** + * mei_mecbrw_read - Reads 32bit data from ME circular buffer + * read window register + * + * @dev: the device structure + * + * returns ME_CB_RW register value (u32) + */ +static u32 mei_me_mecbrw_read(const struct mei_device *dev) +{ + return mei_reg_read(to_me_hw(dev), ME_CB_RW); +} +/** + * mei_mecsr_read - Reads 32bit data from the ME CSR + * + * @dev: the device structure + * + * returns ME_CSR_HA register value (u32) + */ +static inline u32 mei_mecsr_read(const struct mei_me_hw *hw) +{ + return mei_reg_read(hw, ME_CSR_HA); +} + +/** + * mei_hcsr_read - Reads 32bit data from the host CSR + * + * @dev: the device structure + * + * returns H_CSR register value (u32) + */ +static inline u32 mei_hcsr_read(const struct mei_me_hw *hw) +{ + return mei_reg_read(hw, H_CSR); +} + +/** + * mei_hcsr_set - writes H_CSR register to the mei device, + * and ignores the H_IS bit for it is write-one-to-zero. + * + * @dev: the device structure + */ +static inline void mei_hcsr_set(struct mei_me_hw *hw, u32 hcsr) +{ + hcsr &= ~H_IS; + mei_reg_write(hw, H_CSR, hcsr); +} + + +/** + * me_hw_config - configure hw dependent settings + * + * @dev: mei device + */ +static void mei_me_hw_config(struct mei_device *dev) +{ + u32 hcsr = mei_hcsr_read(to_me_hw(dev)); + /* Doesn't change in runtime */ + dev->hbuf_depth = (hcsr & H_CBD) >> 24; +} +/** + * mei_clear_interrupts - clear and stop interrupts + * + * @dev: the device structure + */ +static void mei_me_intr_clear(struct mei_device *dev) +{ + struct mei_me_hw *hw = to_me_hw(dev); + u32 hcsr = mei_hcsr_read(hw); + if ((hcsr & H_IS) == H_IS) + mei_reg_write(hw, H_CSR, hcsr); +} +/** + * mei_me_intr_enable - enables mei device interrupts + * + * @dev: the device structure + */ +static void mei_me_intr_enable(struct mei_device *dev) +{ + struct mei_me_hw *hw = to_me_hw(dev); + u32 hcsr = mei_hcsr_read(hw); + hcsr |= H_IE; + mei_hcsr_set(hw, hcsr); +} + +/** + * mei_disable_interrupts - disables mei device interrupts + * + * @dev: the device structure + */ +static void mei_me_intr_disable(struct mei_device *dev) +{ + struct mei_me_hw *hw = to_me_hw(dev); + u32 hcsr = mei_hcsr_read(hw); + hcsr &= ~H_IE; + mei_hcsr_set(hw, hcsr); +} + +/** + * mei_me_hw_reset - resets fw via mei csr register. + * + * @dev: the device structure + * @interrupts_enabled: if interrupt should be enabled after reset. + */ +static void mei_me_hw_reset(struct mei_device *dev, bool intr_enable) +{ + struct mei_me_hw *hw = to_me_hw(dev); + u32 hcsr = mei_hcsr_read(hw); + + dev_dbg(&dev->pdev->dev, "before reset HCSR = 0x%08x.\n", hcsr); + + hcsr |= (H_RST | H_IG); + + if (intr_enable) + hcsr |= H_IE; + else + hcsr &= ~H_IE; + + mei_hcsr_set(hw, hcsr); + + hcsr = mei_hcsr_read(hw) | H_IG; + hcsr &= ~H_RST; + + mei_hcsr_set(hw, hcsr); + + hcsr = mei_hcsr_read(hw); + + dev_dbg(&dev->pdev->dev, "current HCSR = 0x%08x.\n", hcsr); +} + +/** + * mei_me_host_set_ready - enable device + * + * @dev - mei device + * returns bool + */ + +static void mei_me_host_set_ready(struct mei_device *dev) +{ + struct mei_me_hw *hw = to_me_hw(dev); + hw->host_hw_state |= H_IE | H_IG | H_RDY; + mei_hcsr_set(hw, hw->host_hw_state); +} +/** + * mei_me_host_is_ready - check whether the host has turned ready + * + * @dev - mei device + * returns bool + */ +static bool mei_me_host_is_ready(struct mei_device *dev) +{ + struct mei_me_hw *hw = to_me_hw(dev); + hw->host_hw_state = mei_hcsr_read(hw); + return (hw->host_hw_state & H_RDY) == H_RDY; +} + +/** + * mei_me_hw_is_ready - check whether the me(hw) has turned ready + * + * @dev - mei device + * returns bool + */ +static bool mei_me_hw_is_ready(struct mei_device *dev) +{ + struct mei_me_hw *hw = to_me_hw(dev); + hw->me_hw_state = mei_mecsr_read(hw); + return (hw->me_hw_state & ME_RDY_HRA) == ME_RDY_HRA; +} + +/** + * mei_hbuf_filled_slots - gets number of device filled buffer slots + * + * @dev: the device structure + * + * returns number of filled slots + */ +static unsigned char mei_hbuf_filled_slots(struct mei_device *dev) +{ + struct mei_me_hw *hw = to_me_hw(dev); + char read_ptr, write_ptr; + + hw->host_hw_state = mei_hcsr_read(hw); + + read_ptr = (char) ((hw->host_hw_state & H_CBRP) >> 8); + write_ptr = (char) ((hw->host_hw_state & H_CBWP) >> 16); + + return (unsigned char) (write_ptr - read_ptr); +} + +/** + * mei_hbuf_is_empty - checks if host buffer is empty. + * + * @dev: the device structure + * + * returns true if empty, false - otherwise. + */ +static bool mei_me_hbuf_is_empty(struct mei_device *dev) +{ + return mei_hbuf_filled_slots(dev) == 0; +} + +/** + * mei_me_hbuf_empty_slots - counts write empty slots. + * + * @dev: the device structure + * + * returns -1(ESLOTS_OVERFLOW) if overflow, otherwise empty slots count + */ +static int mei_me_hbuf_empty_slots(struct mei_device *dev) +{ + unsigned char filled_slots, empty_slots; + + filled_slots = mei_hbuf_filled_slots(dev); + empty_slots = dev->hbuf_depth - filled_slots; + + /* check for overflow */ + if (filled_slots > dev->hbuf_depth) + return -EOVERFLOW; + + return empty_slots; +} + +static size_t mei_me_hbuf_max_len(const struct mei_device *dev) +{ + return dev->hbuf_depth * sizeof(u32) - sizeof(struct mei_msg_hdr); +} + + +/** + * mei_write_message - writes a message to mei device. + * + * @dev: the device structure + * @header: mei HECI header of message + * @buf: message payload will be written + * + * This function returns -EIO if write has failed + */ +static int mei_me_write_message(struct mei_device *dev, + struct mei_msg_hdr *header, + unsigned char *buf) +{ + struct mei_me_hw *hw = to_me_hw(dev); + unsigned long rem, dw_cnt; + unsigned long length = header->length; + u32 *reg_buf = (u32 *)buf; + u32 hcsr; + int i; + int empty_slots; + + dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(header)); + + empty_slots = mei_hbuf_empty_slots(dev); + dev_dbg(&dev->pdev->dev, "empty slots = %hu.\n", empty_slots); + + dw_cnt = mei_data2slots(length); + if (empty_slots < 0 || dw_cnt > empty_slots) + return -EIO; + + mei_reg_write(hw, H_CB_WW, *((u32 *) header)); + + for (i = 0; i < length / 4; i++) + mei_reg_write(hw, H_CB_WW, reg_buf[i]); + + rem = length & 0x3; + if (rem > 0) { + u32 reg = 0; + memcpy(®, &buf[length - rem], rem); + mei_reg_write(hw, H_CB_WW, reg); + } + + hcsr = mei_hcsr_read(hw) | H_IG; + mei_hcsr_set(hw, hcsr); + if (!mei_me_hw_is_ready(dev)) + return -EIO; + + return 0; +} + +/** + * mei_me_count_full_read_slots - counts read full slots. + * + * @dev: the device structure + * + * returns -1(ESLOTS_OVERFLOW) if overflow, otherwise filled slots count + */ +static int mei_me_count_full_read_slots(struct mei_device *dev) +{ + struct mei_me_hw *hw = to_me_hw(dev); + char read_ptr, write_ptr; + unsigned char buffer_depth, filled_slots; + + hw->me_hw_state = mei_mecsr_read(hw); + buffer_depth = (unsigned char)((hw->me_hw_state & ME_CBD_HRA) >> 24); + read_ptr = (char) ((hw->me_hw_state & ME_CBRP_HRA) >> 8); + write_ptr = (char) ((hw->me_hw_state & ME_CBWP_HRA) >> 16); + filled_slots = (unsigned char) (write_ptr - read_ptr); + + /* check for overflow */ + if (filled_slots > buffer_depth) + return -EOVERFLOW; + + dev_dbg(&dev->pdev->dev, "filled_slots =%08x\n", filled_slots); + return (int)filled_slots; +} + +/** + * mei_me_read_slots - reads a message from mei device. + * + * @dev: the device structure + * @buffer: message buffer will be written + * @buffer_length: message size will be read + */ +static int mei_me_read_slots(struct mei_device *dev, unsigned char *buffer, + unsigned long buffer_length) +{ + struct mei_me_hw *hw = to_me_hw(dev); + u32 *reg_buf = (u32 *)buffer; + u32 hcsr; + + for (; buffer_length >= sizeof(u32); buffer_length -= sizeof(u32)) + *reg_buf++ = mei_me_mecbrw_read(dev); + + if (buffer_length > 0) { + u32 reg = mei_me_mecbrw_read(dev); + memcpy(reg_buf, ®, buffer_length); + } + + hcsr = mei_hcsr_read(hw) | H_IG; + mei_hcsr_set(hw, hcsr); + return 0; +} + +/** + * mei_me_irq_quick_handler - The ISR of the MEI device + * + * @irq: The irq number + * @dev_id: pointer to the device structure + * + * returns irqreturn_t + */ + +irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id) +{ + struct mei_device *dev = (struct mei_device *) dev_id; + struct mei_me_hw *hw = to_me_hw(dev); + u32 csr_reg = mei_hcsr_read(hw); + + if ((csr_reg & H_IS) != H_IS) + return IRQ_NONE; + + /* clear H_IS bit in H_CSR */ + mei_reg_write(hw, H_CSR, csr_reg); + + return IRQ_WAKE_THREAD; +} + +/** + * mei_me_irq_thread_handler - function called after ISR to handle the interrupt + * processing. + * + * @irq: The irq number + * @dev_id: pointer to the device structure + * + * returns irqreturn_t + * + */ +irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id) +{ + struct mei_device *dev = (struct mei_device *) dev_id; + struct mei_cl_cb complete_list; + struct mei_cl_cb *cb_pos = NULL, *cb_next = NULL; + struct mei_cl *cl; + s32 slots; + int rets; + bool bus_message_received; + + + dev_dbg(&dev->pdev->dev, "function called after ISR to handle the interrupt processing.\n"); + /* initialize our complete list */ + mutex_lock(&dev->device_lock); + mei_io_list_init(&complete_list); + + /* Ack the interrupt here + * In case of MSI we don't go through the quick handler */ + if (pci_dev_msi_enabled(dev->pdev)) + mei_clear_interrupts(dev); + + /* check if ME wants a reset */ + if (!mei_hw_is_ready(dev) && + dev->dev_state != MEI_DEV_RESETING && + dev->dev_state != MEI_DEV_INITIALIZING) { + dev_dbg(&dev->pdev->dev, "FW not ready.\n"); + mei_reset(dev, 1); + mutex_unlock(&dev->device_lock); + return IRQ_HANDLED; + } + + /* check if we need to start the dev */ + if (!mei_host_is_ready(dev)) { + if (mei_hw_is_ready(dev)) { + dev_dbg(&dev->pdev->dev, "we need to start the dev.\n"); + + mei_host_set_ready(dev); + + dev_dbg(&dev->pdev->dev, "link is established start sending messages.\n"); + /* link is established * start sending messages. */ + + dev->dev_state = MEI_DEV_INIT_CLIENTS; + + mei_hbm_start_req(dev); + mutex_unlock(&dev->device_lock); + return IRQ_HANDLED; + } else { + dev_dbg(&dev->pdev->dev, "FW not ready.\n"); + mutex_unlock(&dev->device_lock); + return IRQ_HANDLED; + } + } + /* check slots available for reading */ + slots = mei_count_full_read_slots(dev); + while (slots > 0) { + /* we have urgent data to send so break the read */ + if (dev->wr_ext_msg.hdr.length) + break; + dev_dbg(&dev->pdev->dev, "slots =%08x\n", slots); + dev_dbg(&dev->pdev->dev, "call mei_irq_read_handler.\n"); + rets = mei_irq_read_handler(dev, &complete_list, &slots); + if (rets) + goto end; + } + rets = mei_irq_write_handler(dev, &complete_list); +end: + dev_dbg(&dev->pdev->dev, "end of bottom half function.\n"); + dev->hbuf_is_ready = mei_hbuf_is_ready(dev); + + bus_message_received = false; + if (dev->recvd_msg && waitqueue_active(&dev->wait_recvd_msg)) { + dev_dbg(&dev->pdev->dev, "received waiting bus message\n"); + bus_message_received = true; + } + mutex_unlock(&dev->device_lock); + if (bus_message_received) { + dev_dbg(&dev->pdev->dev, "wake up dev->wait_recvd_msg\n"); + wake_up_interruptible(&dev->wait_recvd_msg); + bus_message_received = false; + } + if (list_empty(&complete_list.list)) + return IRQ_HANDLED; + + + list_for_each_entry_safe(cb_pos, cb_next, &complete_list.list, list) { + cl = cb_pos->cl; + list_del(&cb_pos->list); + if (cl) { + if (cl != &dev->iamthif_cl) { + dev_dbg(&dev->pdev->dev, "completing call back.\n"); + mei_irq_complete_handler(cl, cb_pos); + cb_pos = NULL; + } else if (cl == &dev->iamthif_cl) { + mei_amthif_complete(dev, cb_pos); + } + } + } + return IRQ_HANDLED; +} +static const struct mei_hw_ops mei_me_hw_ops = { + + .host_set_ready = mei_me_host_set_ready, + .host_is_ready = mei_me_host_is_ready, + + .hw_is_ready = mei_me_hw_is_ready, + .hw_reset = mei_me_hw_reset, + .hw_config = mei_me_hw_config, + + .intr_clear = mei_me_intr_clear, + .intr_enable = mei_me_intr_enable, + .intr_disable = mei_me_intr_disable, + + .hbuf_free_slots = mei_me_hbuf_empty_slots, + .hbuf_is_ready = mei_me_hbuf_is_empty, + .hbuf_max_len = mei_me_hbuf_max_len, + + .write = mei_me_write_message, + + .rdbuf_full_slots = mei_me_count_full_read_slots, + .read_hdr = mei_me_mecbrw_read, + .read = mei_me_read_slots +}; + +/** + * init_mei_device - allocates and initializes the mei device structure + * + * @pdev: The pci device structure + * + * returns The mei_device_device pointer on success, NULL on failure. + */ +struct mei_device *mei_me_dev_init(struct pci_dev *pdev) +{ + struct mei_device *dev; + + dev = kzalloc(sizeof(struct mei_device) + + sizeof(struct mei_me_hw), GFP_KERNEL); + if (!dev) + return NULL; + + mei_device_init(dev); + + INIT_LIST_HEAD(&dev->wd_cl.link); + INIT_LIST_HEAD(&dev->iamthif_cl.link); + mei_io_list_init(&dev->amthif_cmd_list); + mei_io_list_init(&dev->amthif_rd_complete_list); + + INIT_DELAYED_WORK(&dev->timer_work, mei_timer); + INIT_WORK(&dev->init_work, mei_host_client_init); + + dev->ops = &mei_me_hw_ops; + + dev->pdev = pdev; + return dev; +} + diff --git a/drivers/misc/mei/hw-me.h b/drivers/misc/mei/hw-me.h new file mode 100644 index 000000000000..8518d3eeb838 --- /dev/null +++ b/drivers/misc/mei/hw-me.h @@ -0,0 +1,48 @@ +/* + * + * Intel Management Engine Interface (Intel MEI) Linux driver + * Copyright (c) 2003-2012, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + + + +#ifndef _MEI_INTERFACE_H_ +#define _MEI_INTERFACE_H_ + +#include <linux/mei.h> +#include "mei_dev.h" +#include "client.h" + +struct mei_me_hw { + void __iomem *mem_addr; + /* + * hw states of host and fw(ME) + */ + u32 host_hw_state; + u32 me_hw_state; +}; + +#define to_me_hw(dev) (struct mei_me_hw *)((dev)->hw) + +struct mei_device *mei_me_dev_init(struct pci_dev *pdev); + +/* get slots (dwords) from a message length + header (bytes) */ +static inline unsigned char mei_data2slots(size_t length) +{ + return DIV_ROUND_UP(sizeof(struct mei_msg_hdr) + length, 4); +} + +irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id); +irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id); + +#endif /* _MEI_INTERFACE_H_ */ diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h index be8ca6b333ca..cb2f556b4252 100644 --- a/drivers/misc/mei/hw.h +++ b/drivers/misc/mei/hw.h @@ -31,109 +31,6 @@ #define MEI_IAMTHIF_STALL_TIMER 12 /* HPS */ #define MEI_IAMTHIF_READ_TIMER 10 /* HPS */ -/* - * Internal Clients Number - */ -#define MEI_WD_HOST_CLIENT_ID 1 -#define MEI_IAMTHIF_HOST_CLIENT_ID 2 - -/* - * MEI device IDs - */ -#define MEI_DEV_ID_82946GZ 0x2974 /* 82946GZ/GL */ -#define MEI_DEV_ID_82G35 0x2984 /* 82G35 Express */ -#define MEI_DEV_ID_82Q965 0x2994 /* 82Q963/Q965 */ -#define MEI_DEV_ID_82G965 0x29A4 /* 82P965/G965 */ - -#define MEI_DEV_ID_82GM965 0x2A04 /* Mobile PM965/GM965 */ -#define MEI_DEV_ID_82GME965 0x2A14 /* Mobile GME965/GLE960 */ - -#define MEI_DEV_ID_ICH9_82Q35 0x29B4 /* 82Q35 Express */ -#define MEI_DEV_ID_ICH9_82G33 0x29C4 /* 82G33/G31/P35/P31 Express */ -#define MEI_DEV_ID_ICH9_82Q33 0x29D4 /* 82Q33 Express */ -#define MEI_DEV_ID_ICH9_82X38 0x29E4 /* 82X38/X48 Express */ -#define MEI_DEV_ID_ICH9_3200 0x29F4 /* 3200/3210 Server */ - -#define MEI_DEV_ID_ICH9_6 0x28B4 /* Bearlake */ -#define MEI_DEV_ID_ICH9_7 0x28C4 /* Bearlake */ -#define MEI_DEV_ID_ICH9_8 0x28D4 /* Bearlake */ -#define MEI_DEV_ID_ICH9_9 0x28E4 /* Bearlake */ -#define MEI_DEV_ID_ICH9_10 0x28F4 /* Bearlake */ - -#define MEI_DEV_ID_ICH9M_1 0x2A44 /* Cantiga */ -#define MEI_DEV_ID_ICH9M_2 0x2A54 /* Cantiga */ -#define MEI_DEV_ID_ICH9M_3 0x2A64 /* Cantiga */ -#define MEI_DEV_ID_ICH9M_4 0x2A74 /* Cantiga */ - -#define MEI_DEV_ID_ICH10_1 0x2E04 /* Eaglelake */ -#define MEI_DEV_ID_ICH10_2 0x2E14 /* Eaglelake */ -#define MEI_DEV_ID_ICH10_3 0x2E24 /* Eaglelake */ -#define MEI_DEV_ID_ICH10_4 0x2E34 /* Eaglelake */ - -#define MEI_DEV_ID_IBXPK_1 0x3B64 /* Calpella */ -#define MEI_DEV_ID_IBXPK_2 0x3B65 /* Calpella */ - -#define MEI_DEV_ID_CPT_1 0x1C3A /* Couger Point */ -#define MEI_DEV_ID_PBG_1 0x1D3A /* C600/X79 Patsburg */ - -#define MEI_DEV_ID_PPT_1 0x1E3A /* Panther Point */ -#define MEI_DEV_ID_PPT_2 0x1CBA /* Panther Point */ -#define MEI_DEV_ID_PPT_3 0x1DBA /* Panther Point */ - -#define MEI_DEV_ID_LPT 0x8C3A /* Lynx Point */ -#define MEI_DEV_ID_LPT_LP 0x9C3A /* Lynx Point LP */ -/* - * MEI HW Section - */ - -/* MEI registers */ -/* H_CB_WW - Host Circular Buffer (CB) Write Window register */ -#define H_CB_WW 0 -/* H_CSR - Host Control Status register */ -#define H_CSR 4 -/* ME_CB_RW - ME Circular Buffer Read Window register (read only) */ -#define ME_CB_RW 8 -/* ME_CSR_HA - ME Control Status Host Access register (read only) */ -#define ME_CSR_HA 0xC - - -/* register bits of H_CSR (Host Control Status register) */ -/* Host Circular Buffer Depth - maximum number of 32-bit entries in CB */ -#define H_CBD 0xFF000000 -/* Host Circular Buffer Write Pointer */ -#define H_CBWP 0x00FF0000 -/* Host Circular Buffer Read Pointer */ -#define H_CBRP 0x0000FF00 -/* Host Reset */ -#define H_RST 0x00000010 -/* Host Ready */ -#define H_RDY 0x00000008 -/* Host Interrupt Generate */ -#define H_IG 0x00000004 -/* Host Interrupt Status */ -#define H_IS 0x00000002 -/* Host Interrupt Enable */ -#define H_IE 0x00000001 - - -/* register bits of ME_CSR_HA (ME Control Status Host Access register) */ -/* ME CB (Circular Buffer) Depth HRA (Host Read Access) - host read only -access to ME_CBD */ -#define ME_CBD_HRA 0xFF000000 -/* ME CB Write Pointer HRA - host read only access to ME_CBWP */ -#define ME_CBWP_HRA 0x00FF0000 -/* ME CB Read Pointer HRA - host read only access to ME_CBRP */ -#define ME_CBRP_HRA 0x0000FF00 -/* ME Reset HRA - host read only access to ME_RST */ -#define ME_RST_HRA 0x00000010 -/* ME Ready HRA - host read only access to ME_RDY */ -#define ME_RDY_HRA 0x00000008 -/* ME Interrupt Generate HRA - host read only access to ME_IG */ -#define ME_IG_HRA 0x00000004 -/* ME Interrupt Status HRA - host read only access to ME_IS */ -#define ME_IS_HRA 0x00000002 -/* ME Interrupt Enable HRA - host read only access to ME_IE */ -#define ME_IE_HRA 0x00000001 /* * MEI Version @@ -224,6 +121,22 @@ struct mei_bus_message { u8 data[0]; } __packed; +/** + * struct hbm_cl_cmd - client specific host bus command + * CONNECT, DISCONNECT, and FlOW CONTROL + * + * @hbm_cmd - bus message command header + * @me_addr - address of the client in ME + * @host_addr - address of the client in the driver + * @data + */ +struct mei_hbm_cl_cmd { + u8 hbm_cmd; + u8 me_addr; + u8 host_addr; + u8 data; +}; + struct hbm_version { u8 minor_version; u8 major_version; @@ -333,11 +246,5 @@ struct hbm_flow_control { u8 reserved[MEI_FC_MESSAGE_RESERVED_LENGTH]; } __packed; -struct mei_me_client { - struct mei_client_properties props; - u8 client_id; - u8 mei_flow_ctrl_creds; -} __packed; - #endif diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c index a54cd5567ca2..6ec530168afb 100644 --- a/drivers/misc/mei/init.c +++ b/drivers/misc/mei/init.c @@ -19,11 +19,11 @@ #include <linux/wait.h> #include <linux/delay.h> -#include "mei_dev.h" -#include "hw.h" -#include "interface.h" #include <linux/mei.h> +#include "mei_dev.h" +#include "client.h" + const char *mei_dev_state_str(int state) { #define MEI_DEV_STATE(state) case MEI_DEV_##state: return #state @@ -42,84 +42,20 @@ const char *mei_dev_state_str(int state) #undef MEI_DEV_STATE } - - -/** - * mei_io_list_flush - removes list entry belonging to cl. - * - * @list: An instance of our list structure - * @cl: private data of the file object - */ -void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl) -{ - struct mei_cl_cb *pos; - struct mei_cl_cb *next; - - list_for_each_entry_safe(pos, next, &list->list, list) { - if (pos->cl) { - if (mei_cl_cmp_id(cl, pos->cl)) - list_del(&pos->list); - } - } -} -/** - * mei_cl_flush_queues - flushes queue lists belonging to cl. - * - * @dev: the device structure - * @cl: private data of the file object - */ -int mei_cl_flush_queues(struct mei_cl *cl) +void mei_device_init(struct mei_device *dev) { - if (!cl || !cl->dev) - return -EINVAL; - - dev_dbg(&cl->dev->pdev->dev, "remove list entry belonging to cl\n"); - mei_io_list_flush(&cl->dev->read_list, cl); - mei_io_list_flush(&cl->dev->write_list, cl); - mei_io_list_flush(&cl->dev->write_waiting_list, cl); - mei_io_list_flush(&cl->dev->ctrl_wr_list, cl); - mei_io_list_flush(&cl->dev->ctrl_rd_list, cl); - mei_io_list_flush(&cl->dev->amthif_cmd_list, cl); - mei_io_list_flush(&cl->dev->amthif_rd_complete_list, cl); - return 0; -} - - - -/** - * init_mei_device - allocates and initializes the mei device structure - * - * @pdev: The pci device structure - * - * returns The mei_device_device pointer on success, NULL on failure. - */ -struct mei_device *mei_device_init(struct pci_dev *pdev) -{ - struct mei_device *dev; - - dev = kzalloc(sizeof(struct mei_device), GFP_KERNEL); - if (!dev) - return NULL; - /* setup our list array */ INIT_LIST_HEAD(&dev->file_list); - INIT_LIST_HEAD(&dev->wd_cl.link); - INIT_LIST_HEAD(&dev->iamthif_cl.link); mutex_init(&dev->device_lock); init_waitqueue_head(&dev->wait_recvd_msg); init_waitqueue_head(&dev->wait_stop_wd); dev->dev_state = MEI_DEV_INITIALIZING; - dev->iamthif_state = MEI_IAMTHIF_IDLE; mei_io_list_init(&dev->read_list); mei_io_list_init(&dev->write_list); mei_io_list_init(&dev->write_waiting_list); mei_io_list_init(&dev->ctrl_wr_list); mei_io_list_init(&dev->ctrl_rd_list); - mei_io_list_init(&dev->amthif_cmd_list); - mei_io_list_init(&dev->amthif_rd_complete_list); - dev->pdev = pdev; - return dev; } /** @@ -131,101 +67,64 @@ struct mei_device *mei_device_init(struct pci_dev *pdev) */ int mei_hw_init(struct mei_device *dev) { - int err = 0; - int ret; + int ret = 0; mutex_lock(&dev->device_lock); - dev->host_hw_state = mei_hcsr_read(dev); - dev->me_hw_state = mei_mecsr_read(dev); - dev_dbg(&dev->pdev->dev, "host_hw_state = 0x%08x, mestate = 0x%08x.\n", - dev->host_hw_state, dev->me_hw_state); - /* acknowledge interrupt and stop interupts */ - if ((dev->host_hw_state & H_IS) == H_IS) - mei_reg_write(dev, H_CSR, dev->host_hw_state); + mei_clear_interrupts(dev); - /* Doesn't change in runtime */ - dev->hbuf_depth = (dev->host_hw_state & H_CBD) >> 24; + mei_hw_config(dev); dev->recvd_msg = false; dev_dbg(&dev->pdev->dev, "reset in start the mei device.\n"); mei_reset(dev, 1); - dev_dbg(&dev->pdev->dev, "host_hw_state = 0x%08x, me_hw_state = 0x%08x.\n", - dev->host_hw_state, dev->me_hw_state); - /* wait for ME to turn on ME_RDY */ if (!dev->recvd_msg) { mutex_unlock(&dev->device_lock); - err = wait_event_interruptible_timeout(dev->wait_recvd_msg, + ret = wait_event_interruptible_timeout(dev->wait_recvd_msg, dev->recvd_msg, mei_secs_to_jiffies(MEI_INTEROP_TIMEOUT)); mutex_lock(&dev->device_lock); } - if (err <= 0 && !dev->recvd_msg) { + if (ret <= 0 && !dev->recvd_msg) { dev->dev_state = MEI_DEV_DISABLED; dev_dbg(&dev->pdev->dev, "wait_event_interruptible_timeout failed" "on wait for ME to turn on ME_RDY.\n"); - ret = -ENODEV; - goto out; + goto err; } - if (!(((dev->host_hw_state & H_RDY) == H_RDY) && - ((dev->me_hw_state & ME_RDY_HRA) == ME_RDY_HRA))) { - dev->dev_state = MEI_DEV_DISABLED; - dev_dbg(&dev->pdev->dev, - "host_hw_state = 0x%08x, me_hw_state = 0x%08x.\n", - dev->host_hw_state, dev->me_hw_state); - - if (!(dev->host_hw_state & H_RDY)) - dev_dbg(&dev->pdev->dev, "host turn off H_RDY.\n"); - if (!(dev->me_hw_state & ME_RDY_HRA)) - dev_dbg(&dev->pdev->dev, "ME turn off ME_RDY.\n"); + if (!mei_host_is_ready(dev)) { + dev_err(&dev->pdev->dev, "host is not ready.\n"); + goto err; + } - dev_err(&dev->pdev->dev, "link layer initialization failed.\n"); - ret = -ENODEV; - goto out; + if (!mei_hw_is_ready(dev)) { + dev_err(&dev->pdev->dev, "ME is not ready.\n"); + goto err; } if (dev->version.major_version != HBM_MAJOR_VERSION || dev->version.minor_version != HBM_MINOR_VERSION) { dev_dbg(&dev->pdev->dev, "MEI start failed.\n"); - ret = -ENODEV; - goto out; + goto err; } dev->recvd_msg = false; - dev_dbg(&dev->pdev->dev, "host_hw_state = 0x%08x, me_hw_state = 0x%08x.\n", - dev->host_hw_state, dev->me_hw_state); - dev_dbg(&dev->pdev->dev, "ME turn on ME_RDY and host turn on H_RDY.\n"); dev_dbg(&dev->pdev->dev, "link layer has been established.\n"); - dev_dbg(&dev->pdev->dev, "MEI start success.\n"); - ret = 0; -out: mutex_unlock(&dev->device_lock); - return ret; -} - -/** - * mei_hw_reset - resets fw via mei csr register. - * - * @dev: the device structure - * @interrupts_enabled: if interrupt should be enabled after reset. - */ -static void mei_hw_reset(struct mei_device *dev, int interrupts_enabled) -{ - dev->host_hw_state |= (H_RST | H_IG); - - if (interrupts_enabled) - mei_enable_interrupts(dev); - else - mei_disable_interrupts(dev); + return 0; +err: + dev_err(&dev->pdev->dev, "link layer initialization failed.\n"); + dev->dev_state = MEI_DEV_DISABLED; + mutex_unlock(&dev->device_lock); + return -ENODEV; } /** @@ -236,56 +135,34 @@ static void mei_hw_reset(struct mei_device *dev, int interrupts_enabled) */ void mei_reset(struct mei_device *dev, int interrupts_enabled) { - struct mei_cl *cl_pos = NULL; - struct mei_cl *cl_next = NULL; - struct mei_cl_cb *cb_pos = NULL; - struct mei_cl_cb *cb_next = NULL; bool unexpected; - if (dev->dev_state == MEI_DEV_RECOVERING_FROM_RESET) { - dev->need_reset = true; + if (dev->dev_state == MEI_DEV_RECOVERING_FROM_RESET) return; - } unexpected = (dev->dev_state != MEI_DEV_INITIALIZING && dev->dev_state != MEI_DEV_DISABLED && dev->dev_state != MEI_DEV_POWER_DOWN && dev->dev_state != MEI_DEV_POWER_UP); - dev->host_hw_state = mei_hcsr_read(dev); - - dev_dbg(&dev->pdev->dev, "before reset host_hw_state = 0x%08x.\n", - dev->host_hw_state); - mei_hw_reset(dev, interrupts_enabled); - dev->host_hw_state &= ~H_RST; - dev->host_hw_state |= H_IG; - - mei_hcsr_set(dev); - - dev_dbg(&dev->pdev->dev, "currently saved host_hw_state = 0x%08x.\n", - dev->host_hw_state); - - dev->need_reset = false; if (dev->dev_state != MEI_DEV_INITIALIZING) { if (dev->dev_state != MEI_DEV_DISABLED && dev->dev_state != MEI_DEV_POWER_DOWN) dev->dev_state = MEI_DEV_RESETING; - list_for_each_entry_safe(cl_pos, - cl_next, &dev->file_list, link) { - cl_pos->state = MEI_FILE_DISCONNECTED; - cl_pos->mei_flow_ctrl_creds = 0; - cl_pos->read_cb = NULL; - cl_pos->timer_count = 0; - } + mei_cl_all_disconnect(dev); + /* remove entry if already in list */ dev_dbg(&dev->pdev->dev, "remove iamthif and wd from the file list.\n"); - mei_me_cl_unlink(dev, &dev->wd_cl); - - mei_me_cl_unlink(dev, &dev->iamthif_cl); + mei_cl_unlink(&dev->wd_cl); + if (dev->open_handle_count > 0) + dev->open_handle_count--; + mei_cl_unlink(&dev->iamthif_cl); + if (dev->open_handle_count > 0) + dev->open_handle_count--; mei_amthif_reset_params(dev); memset(&dev->wr_ext_msg, 0, sizeof(dev->wr_ext_msg)); @@ -295,392 +172,17 @@ void mei_reset(struct mei_device *dev, int interrupts_enabled) dev->rd_msg_hdr = 0; dev->wd_pending = false; - /* update the state of the registers after reset */ - dev->host_hw_state = mei_hcsr_read(dev); - dev->me_hw_state = mei_mecsr_read(dev); - - dev_dbg(&dev->pdev->dev, "after reset host_hw_state = 0x%08x, me_hw_state = 0x%08x.\n", - dev->host_hw_state, dev->me_hw_state); - if (unexpected) dev_warn(&dev->pdev->dev, "unexpected reset: dev_state = %s\n", mei_dev_state_str(dev->dev_state)); - /* Wake up all readings so they can be interrupted */ - list_for_each_entry_safe(cl_pos, cl_next, &dev->file_list, link) { - if (waitqueue_active(&cl_pos->rx_wait)) { - dev_dbg(&dev->pdev->dev, "Waking up client!\n"); - wake_up_interruptible(&cl_pos->rx_wait); - } - } - /* remove all waiting requests */ - list_for_each_entry_safe(cb_pos, cb_next, &dev->write_list.list, list) { - list_del(&cb_pos->list); - mei_io_cb_free(cb_pos); - } -} - - - -/** - * host_start_message - mei host sends start message. - * - * @dev: the device structure - * - * returns none. - */ -void mei_host_start_message(struct mei_device *dev) -{ - struct mei_msg_hdr *mei_hdr; - struct hbm_host_version_request *start_req; - const size_t len = sizeof(struct hbm_host_version_request); - - mei_hdr = mei_hbm_hdr(&dev->wr_msg_buf[0], len); - - /* host start message */ - start_req = (struct hbm_host_version_request *)&dev->wr_msg_buf[1]; - memset(start_req, 0, len); - start_req->hbm_cmd = HOST_START_REQ_CMD; - start_req->host_version.major_version = HBM_MAJOR_VERSION; - start_req->host_version.minor_version = HBM_MINOR_VERSION; - - dev->recvd_msg = false; - if (mei_write_message(dev, mei_hdr, (unsigned char *)start_req, len)) { - dev_dbg(&dev->pdev->dev, "write send version message to FW fail.\n"); - dev->dev_state = MEI_DEV_RESETING; - mei_reset(dev, 1); - } - dev->init_clients_state = MEI_START_MESSAGE; - dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT; - return ; -} - -/** - * host_enum_clients_message - host sends enumeration client request message. - * - * @dev: the device structure - * - * returns none. - */ -void mei_host_enum_clients_message(struct mei_device *dev) -{ - struct mei_msg_hdr *mei_hdr; - struct hbm_host_enum_request *enum_req; - const size_t len = sizeof(struct hbm_host_enum_request); - /* enumerate clients */ - mei_hdr = mei_hbm_hdr(&dev->wr_msg_buf[0], len); - - enum_req = (struct hbm_host_enum_request *) &dev->wr_msg_buf[1]; - memset(enum_req, 0, sizeof(struct hbm_host_enum_request)); - enum_req->hbm_cmd = HOST_ENUM_REQ_CMD; - - if (mei_write_message(dev, mei_hdr, (unsigned char *)enum_req, len)) { - dev->dev_state = MEI_DEV_RESETING; - dev_dbg(&dev->pdev->dev, "write send enumeration request message to FW fail.\n"); - mei_reset(dev, 1); - } - dev->init_clients_state = MEI_ENUM_CLIENTS_MESSAGE; - dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT; - return; -} - - -/** - * allocate_me_clients_storage - allocates storage for me clients - * - * @dev: the device structure - * - * returns none. - */ -void mei_allocate_me_clients_storage(struct mei_device *dev) -{ - struct mei_me_client *clients; - int b; - - /* count how many ME clients we have */ - for_each_set_bit(b, dev->me_clients_map, MEI_CLIENTS_MAX) - dev->me_clients_num++; - - if (dev->me_clients_num <= 0) - return ; - - - if (dev->me_clients != NULL) { - kfree(dev->me_clients); - dev->me_clients = NULL; - } - dev_dbg(&dev->pdev->dev, "memory allocation for ME clients size=%zd.\n", - dev->me_clients_num * sizeof(struct mei_me_client)); - /* allocate storage for ME clients representation */ - clients = kcalloc(dev->me_clients_num, - sizeof(struct mei_me_client), GFP_KERNEL); - if (!clients) { - dev_dbg(&dev->pdev->dev, "memory allocation for ME clients failed.\n"); - dev->dev_state = MEI_DEV_RESETING; - mei_reset(dev, 1); - return ; - } - dev->me_clients = clients; - return ; -} - -void mei_host_client_init(struct work_struct *work) -{ - struct mei_device *dev = container_of(work, - struct mei_device, init_work); - struct mei_client_properties *client_props; - int i; - - mutex_lock(&dev->device_lock); - - bitmap_zero(dev->host_clients_map, MEI_CLIENTS_MAX); - dev->open_handle_count = 0; - - /* - * Reserving the first three client IDs - * 0: Reserved for MEI Bus Message communications - * 1: Reserved for Watchdog - * 2: Reserved for AMTHI - */ - bitmap_set(dev->host_clients_map, 0, 3); - - for (i = 0; i < dev->me_clients_num; i++) { - client_props = &dev->me_clients[i].props; - - if (!uuid_le_cmp(client_props->protocol_name, mei_amthi_guid)) - mei_amthif_host_init(dev); - else if (!uuid_le_cmp(client_props->protocol_name, mei_wd_guid)) - mei_wd_host_init(dev); - } - - dev->dev_state = MEI_DEV_ENABLED; - - mutex_unlock(&dev->device_lock); -} - -int mei_host_client_enumerate(struct mei_device *dev) -{ - - struct mei_msg_hdr *mei_hdr; - struct hbm_props_request *prop_req; - const size_t len = sizeof(struct hbm_props_request); - unsigned long next_client_index; - u8 client_num; - - - client_num = dev->me_client_presentation_num; - - next_client_index = find_next_bit(dev->me_clients_map, MEI_CLIENTS_MAX, - dev->me_client_index); - - /* We got all client properties */ - if (next_client_index == MEI_CLIENTS_MAX) { - schedule_work(&dev->init_work); - - return 0; - } - - dev->me_clients[client_num].client_id = next_client_index; - dev->me_clients[client_num].mei_flow_ctrl_creds = 0; - - mei_hdr = mei_hbm_hdr(&dev->wr_msg_buf[0], len); - prop_req = (struct hbm_props_request *)&dev->wr_msg_buf[1]; - - memset(prop_req, 0, sizeof(struct hbm_props_request)); - - - prop_req->hbm_cmd = HOST_CLIENT_PROPERTIES_REQ_CMD; - prop_req->address = next_client_index; - - if (mei_write_message(dev, mei_hdr, (unsigned char *) prop_req, - mei_hdr->length)) { - dev->dev_state = MEI_DEV_RESETING; - dev_err(&dev->pdev->dev, "Properties request command failed\n"); - mei_reset(dev, 1); - - return -EIO; - } - - dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT; - dev->me_client_index = next_client_index; - - return 0; -} - -/** - * mei_init_file_private - initializes private file structure. - * - * @priv: private file structure to be initialized - * @file: the file structure - */ -void mei_cl_init(struct mei_cl *priv, struct mei_device *dev) -{ - memset(priv, 0, sizeof(struct mei_cl)); - init_waitqueue_head(&priv->wait); - init_waitqueue_head(&priv->rx_wait); - init_waitqueue_head(&priv->tx_wait); - INIT_LIST_HEAD(&priv->link); - priv->reading_state = MEI_IDLE; - priv->writing_state = MEI_IDLE; - priv->dev = dev; -} - -int mei_me_cl_by_uuid(const struct mei_device *dev, const uuid_le *cuuid) -{ - int i, res = -ENOENT; - - for (i = 0; i < dev->me_clients_num; ++i) - if (uuid_le_cmp(*cuuid, - dev->me_clients[i].props.protocol_name) == 0) { - res = i; - break; - } - - return res; -} - - -/** - * mei_me_cl_link - create link between host and me clinet and add - * me_cl to the list - * - * @dev: the device structure - * @cl: link between me and host client assocated with opened file descriptor - * @cuuid: uuid of ME client - * @client_id: id of the host client - * - * returns ME client index if ME client - * -EINVAL on incorrect values - * -ENONET if client not found - */ -int mei_me_cl_link(struct mei_device *dev, struct mei_cl *cl, - const uuid_le *cuuid, u8 host_cl_id) -{ - int i; - - if (!dev || !cl || !cuuid) - return -EINVAL; - - /* check for valid client id */ - i = mei_me_cl_by_uuid(dev, cuuid); - if (i >= 0) { - cl->me_client_id = dev->me_clients[i].client_id; - cl->state = MEI_FILE_CONNECTING; - cl->host_client_id = host_cl_id; - - list_add_tail(&cl->link, &dev->file_list); - return (u8)i; - } - - return -ENOENT; -} -/** - * mei_me_cl_unlink - remove me_cl from the list - * - * @dev: the device structure - * @host_client_id: host client id to be removed - */ -void mei_me_cl_unlink(struct mei_device *dev, struct mei_cl *cl) -{ - struct mei_cl *pos, *next; - list_for_each_entry_safe(pos, next, &dev->file_list, link) { - if (cl->host_client_id == pos->host_client_id) { - dev_dbg(&dev->pdev->dev, "remove host client = %d, ME client = %d\n", - pos->host_client_id, pos->me_client_id); - list_del_init(&pos->link); - break; - } - } -} + /* wake up all readings so they can be interrupted */ + mei_cl_all_read_wakeup(dev); -/** - * mei_alloc_file_private - allocates a private file structure and sets it up. - * @file: the file structure - * - * returns The allocated file or NULL on failure - */ -struct mei_cl *mei_cl_allocate(struct mei_device *dev) -{ - struct mei_cl *cl; - - cl = kmalloc(sizeof(struct mei_cl), GFP_KERNEL); - if (!cl) - return NULL; - - mei_cl_init(cl, dev); - - return cl; + /* remove all waiting requests */ + mei_cl_all_write_clear(dev); } -/** - * mei_disconnect_host_client - sends disconnect message to fw from host client. - * - * @dev: the device structure - * @cl: private data of the file object - * - * Locking: called under "dev->device_lock" lock - * - * returns 0 on success, <0 on failure. - */ -int mei_disconnect_host_client(struct mei_device *dev, struct mei_cl *cl) -{ - struct mei_cl_cb *cb; - int rets, err; - - if (!dev || !cl) - return -ENODEV; - - if (cl->state != MEI_FILE_DISCONNECTING) - return 0; - - cb = mei_io_cb_init(cl, NULL); - if (!cb) - return -ENOMEM; - - cb->fop_type = MEI_FOP_CLOSE; - if (dev->mei_host_buffer_is_empty) { - dev->mei_host_buffer_is_empty = false; - if (mei_disconnect(dev, cl)) { - rets = -ENODEV; - dev_dbg(&dev->pdev->dev, "failed to call mei_disconnect.\n"); - goto free; - } - mdelay(10); /* Wait for hardware disconnection ready */ - list_add_tail(&cb->list, &dev->ctrl_rd_list.list); - } else { - dev_dbg(&dev->pdev->dev, "add disconnect cb to control write list\n"); - list_add_tail(&cb->list, &dev->ctrl_wr_list.list); - - } - mutex_unlock(&dev->device_lock); - - err = wait_event_timeout(dev->wait_recvd_msg, - MEI_FILE_DISCONNECTED == cl->state, - mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); - - mutex_lock(&dev->device_lock); - if (MEI_FILE_DISCONNECTED == cl->state) { - rets = 0; - dev_dbg(&dev->pdev->dev, "successfully disconnected from FW client.\n"); - } else { - rets = -ENODEV; - if (MEI_FILE_DISCONNECTED != cl->state) - dev_dbg(&dev->pdev->dev, "wrong status client disconnect.\n"); - - if (err) - dev_dbg(&dev->pdev->dev, - "wait failed disconnect err=%08x\n", - err); - - dev_dbg(&dev->pdev->dev, "failed to disconnect from FW client.\n"); - } - - mei_io_list_flush(&dev->ctrl_rd_list, cl); - mei_io_list_flush(&dev->ctrl_wr_list, cl); -free: - mei_io_cb_free(cb); - return rets; -} diff --git a/drivers/misc/mei/interface.c b/drivers/misc/mei/interface.c deleted file mode 100644 index 8de854785960..000000000000 --- a/drivers/misc/mei/interface.c +++ /dev/null @@ -1,388 +0,0 @@ -/* - * - * Intel Management Engine Interface (Intel MEI) Linux driver - * Copyright (c) 2003-2012, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - */ - -#include <linux/pci.h> -#include "mei_dev.h" -#include <linux/mei.h> -#include "interface.h" - - - -/** - * mei_set_csr_register - writes H_CSR register to the mei device, - * and ignores the H_IS bit for it is write-one-to-zero. - * - * @dev: the device structure - */ -void mei_hcsr_set(struct mei_device *dev) -{ - if ((dev->host_hw_state & H_IS) == H_IS) - dev->host_hw_state &= ~H_IS; - mei_reg_write(dev, H_CSR, dev->host_hw_state); - dev->host_hw_state = mei_hcsr_read(dev); -} - -/** - * mei_csr_enable_interrupts - enables mei device interrupts - * - * @dev: the device structure - */ -void mei_enable_interrupts(struct mei_device *dev) -{ - dev->host_hw_state |= H_IE; - mei_hcsr_set(dev); -} - -/** - * mei_csr_disable_interrupts - disables mei device interrupts - * - * @dev: the device structure - */ -void mei_disable_interrupts(struct mei_device *dev) -{ - dev->host_hw_state &= ~H_IE; - mei_hcsr_set(dev); -} - -/** - * mei_hbuf_filled_slots - gets number of device filled buffer slots - * - * @device: the device structure - * - * returns number of filled slots - */ -static unsigned char mei_hbuf_filled_slots(struct mei_device *dev) -{ - char read_ptr, write_ptr; - - dev->host_hw_state = mei_hcsr_read(dev); - - read_ptr = (char) ((dev->host_hw_state & H_CBRP) >> 8); - write_ptr = (char) ((dev->host_hw_state & H_CBWP) >> 16); - - return (unsigned char) (write_ptr - read_ptr); -} - -/** - * mei_hbuf_is_empty - checks if host buffer is empty. - * - * @dev: the device structure - * - * returns true if empty, false - otherwise. - */ -bool mei_hbuf_is_empty(struct mei_device *dev) -{ - return mei_hbuf_filled_slots(dev) == 0; -} - -/** - * mei_hbuf_empty_slots - counts write empty slots. - * - * @dev: the device structure - * - * returns -1(ESLOTS_OVERFLOW) if overflow, otherwise empty slots count - */ -int mei_hbuf_empty_slots(struct mei_device *dev) -{ - unsigned char filled_slots, empty_slots; - - filled_slots = mei_hbuf_filled_slots(dev); - empty_slots = dev->hbuf_depth - filled_slots; - - /* check for overflow */ - if (filled_slots > dev->hbuf_depth) - return -EOVERFLOW; - - return empty_slots; -} - -/** - * mei_write_message - writes a message to mei device. - * - * @dev: the device structure - * @header: header of message - * @write_buffer: message buffer will be written - * @write_length: message size will be written - * - * This function returns -EIO if write has failed - */ -int mei_write_message(struct mei_device *dev, struct mei_msg_hdr *header, - unsigned char *buf, unsigned long length) -{ - unsigned long rem, dw_cnt; - u32 *reg_buf = (u32 *)buf; - int i; - int empty_slots; - - - dev_dbg(&dev->pdev->dev, - "mei_write_message header=%08x.\n", - *((u32 *) header)); - - empty_slots = mei_hbuf_empty_slots(dev); - dev_dbg(&dev->pdev->dev, "empty slots = %hu.\n", empty_slots); - - dw_cnt = mei_data2slots(length); - if (empty_slots < 0 || dw_cnt > empty_slots) - return -EIO; - - mei_reg_write(dev, H_CB_WW, *((u32 *) header)); - - for (i = 0; i < length / 4; i++) - mei_reg_write(dev, H_CB_WW, reg_buf[i]); - - rem = length & 0x3; - if (rem > 0) { - u32 reg = 0; - memcpy(®, &buf[length - rem], rem); - mei_reg_write(dev, H_CB_WW, reg); - } - - dev->host_hw_state = mei_hcsr_read(dev); - dev->host_hw_state |= H_IG; - mei_hcsr_set(dev); - dev->me_hw_state = mei_mecsr_read(dev); - if ((dev->me_hw_state & ME_RDY_HRA) != ME_RDY_HRA) - return -EIO; - - return 0; -} - -/** - * mei_count_full_read_slots - counts read full slots. - * - * @dev: the device structure - * - * returns -1(ESLOTS_OVERFLOW) if overflow, otherwise filled slots count - */ -int mei_count_full_read_slots(struct mei_device *dev) -{ - char read_ptr, write_ptr; - unsigned char buffer_depth, filled_slots; - - dev->me_hw_state = mei_mecsr_read(dev); - buffer_depth = (unsigned char)((dev->me_hw_state & ME_CBD_HRA) >> 24); - read_ptr = (char) ((dev->me_hw_state & ME_CBRP_HRA) >> 8); - write_ptr = (char) ((dev->me_hw_state & ME_CBWP_HRA) >> 16); - filled_slots = (unsigned char) (write_ptr - read_ptr); - - /* check for overflow */ - if (filled_slots > buffer_depth) - return -EOVERFLOW; - - dev_dbg(&dev->pdev->dev, "filled_slots =%08x\n", filled_slots); - return (int)filled_slots; -} - -/** - * mei_read_slots - reads a message from mei device. - * - * @dev: the device structure - * @buffer: message buffer will be written - * @buffer_length: message size will be read - */ -void mei_read_slots(struct mei_device *dev, unsigned char *buffer, - unsigned long buffer_length) -{ - u32 *reg_buf = (u32 *)buffer; - - for (; buffer_length >= sizeof(u32); buffer_length -= sizeof(u32)) - *reg_buf++ = mei_mecbrw_read(dev); - - if (buffer_length > 0) { - u32 reg = mei_mecbrw_read(dev); - memcpy(reg_buf, ®, buffer_length); - } - - dev->host_hw_state |= H_IG; - mei_hcsr_set(dev); -} - -/** - * mei_flow_ctrl_creds - checks flow_control credentials. - * - * @dev: the device structure - * @cl: private data of the file object - * - * returns 1 if mei_flow_ctrl_creds >0, 0 - otherwise. - * -ENOENT if mei_cl is not present - * -EINVAL if single_recv_buf == 0 - */ -int mei_flow_ctrl_creds(struct mei_device *dev, struct mei_cl *cl) -{ - int i; - - if (!dev->me_clients_num) - return 0; - - if (cl->mei_flow_ctrl_creds > 0) - return 1; - - for (i = 0; i < dev->me_clients_num; i++) { - struct mei_me_client *me_cl = &dev->me_clients[i]; - if (me_cl->client_id == cl->me_client_id) { - if (me_cl->mei_flow_ctrl_creds) { - if (WARN_ON(me_cl->props.single_recv_buf == 0)) - return -EINVAL; - return 1; - } else { - return 0; - } - } - } - return -ENOENT; -} - -/** - * mei_flow_ctrl_reduce - reduces flow_control. - * - * @dev: the device structure - * @cl: private data of the file object - * @returns - * 0 on success - * -ENOENT when me client is not found - * -EINVAL when ctrl credits are <= 0 - */ -int mei_flow_ctrl_reduce(struct mei_device *dev, struct mei_cl *cl) -{ - int i; - - if (!dev->me_clients_num) - return -ENOENT; - - for (i = 0; i < dev->me_clients_num; i++) { - struct mei_me_client *me_cl = &dev->me_clients[i]; - if (me_cl->client_id == cl->me_client_id) { - if (me_cl->props.single_recv_buf != 0) { - if (WARN_ON(me_cl->mei_flow_ctrl_creds <= 0)) - return -EINVAL; - dev->me_clients[i].mei_flow_ctrl_creds--; - } else { - if (WARN_ON(cl->mei_flow_ctrl_creds <= 0)) - return -EINVAL; - cl->mei_flow_ctrl_creds--; - } - return 0; - } - } - return -ENOENT; -} - -/** - * mei_send_flow_control - sends flow control to fw. - * - * @dev: the device structure - * @cl: private data of the file object - * - * This function returns -EIO on write failure - */ -int mei_send_flow_control(struct mei_device *dev, struct mei_cl *cl) -{ - struct mei_msg_hdr *mei_hdr; - struct hbm_flow_control *flow_ctrl; - const size_t len = sizeof(struct hbm_flow_control); - - mei_hdr = mei_hbm_hdr(&dev->wr_msg_buf[0], len); - - flow_ctrl = (struct hbm_flow_control *)&dev->wr_msg_buf[1]; - memset(flow_ctrl, 0, len); - flow_ctrl->hbm_cmd = MEI_FLOW_CONTROL_CMD; - flow_ctrl->host_addr = cl->host_client_id; - flow_ctrl->me_addr = cl->me_client_id; - /* FIXME: reserved !? */ - memset(flow_ctrl->reserved, 0, sizeof(flow_ctrl->reserved)); - dev_dbg(&dev->pdev->dev, "sending flow control host client = %d, ME client = %d\n", - cl->host_client_id, cl->me_client_id); - - return mei_write_message(dev, mei_hdr, - (unsigned char *) flow_ctrl, len); -} - -/** - * mei_other_client_is_connecting - checks if other - * client with the same client id is connected. - * - * @dev: the device structure - * @cl: private data of the file object - * - * returns 1 if other client is connected, 0 - otherwise. - */ -int mei_other_client_is_connecting(struct mei_device *dev, - struct mei_cl *cl) -{ - struct mei_cl *cl_pos = NULL; - struct mei_cl *cl_next = NULL; - - list_for_each_entry_safe(cl_pos, cl_next, &dev->file_list, link) { - if ((cl_pos->state == MEI_FILE_CONNECTING) && - (cl_pos != cl) && - cl->me_client_id == cl_pos->me_client_id) - return 1; - - } - return 0; -} - -/** - * mei_disconnect - sends disconnect message to fw. - * - * @dev: the device structure - * @cl: private data of the file object - * - * This function returns -EIO on write failure - */ -int mei_disconnect(struct mei_device *dev, struct mei_cl *cl) -{ - struct mei_msg_hdr *mei_hdr; - struct hbm_client_connect_request *req; - const size_t len = sizeof(struct hbm_client_connect_request); - - mei_hdr = mei_hbm_hdr(&dev->wr_msg_buf[0], len); - - req = (struct hbm_client_connect_request *)&dev->wr_msg_buf[1]; - memset(req, 0, len); - req->hbm_cmd = CLIENT_DISCONNECT_REQ_CMD; - req->host_addr = cl->host_client_id; - req->me_addr = cl->me_client_id; - req->reserved = 0; - - return mei_write_message(dev, mei_hdr, (unsigned char *)req, len); -} - -/** - * mei_connect - sends connect message to fw. - * - * @dev: the device structure - * @cl: private data of the file object - * - * This function returns -EIO on write failure - */ -int mei_connect(struct mei_device *dev, struct mei_cl *cl) -{ - struct mei_msg_hdr *mei_hdr; - struct hbm_client_connect_request *req; - const size_t len = sizeof(struct hbm_client_connect_request); - - mei_hdr = mei_hbm_hdr(&dev->wr_msg_buf[0], len); - - req = (struct hbm_client_connect_request *) &dev->wr_msg_buf[1]; - req->hbm_cmd = CLIENT_CONNECT_REQ_CMD; - req->host_addr = cl->host_client_id; - req->me_addr = cl->me_client_id; - req->reserved = 0; - - return mei_write_message(dev, mei_hdr, (unsigned char *) req, len); -} diff --git a/drivers/misc/mei/interface.h b/drivers/misc/mei/interface.h deleted file mode 100644 index ec6c785a3961..000000000000 --- a/drivers/misc/mei/interface.h +++ /dev/null @@ -1,81 +0,0 @@ -/* - * - * Intel Management Engine Interface (Intel MEI) Linux driver - * Copyright (c) 2003-2012, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - */ - - - -#ifndef _MEI_INTERFACE_H_ -#define _MEI_INTERFACE_H_ - -#include <linux/mei.h> -#include "mei_dev.h" - - - -void mei_read_slots(struct mei_device *dev, - unsigned char *buffer, - unsigned long buffer_length); - -int mei_write_message(struct mei_device *dev, - struct mei_msg_hdr *header, - unsigned char *write_buffer, - unsigned long write_length); - -bool mei_hbuf_is_empty(struct mei_device *dev); - -int mei_hbuf_empty_slots(struct mei_device *dev); - -static inline size_t mei_hbuf_max_data(const struct mei_device *dev) -{ - return dev->hbuf_depth * sizeof(u32) - sizeof(struct mei_msg_hdr); -} - -/* get slots (dwords) from a message length + header (bytes) */ -static inline unsigned char mei_data2slots(size_t length) -{ - return DIV_ROUND_UP(sizeof(struct mei_msg_hdr) + length, 4); -} - -int mei_count_full_read_slots(struct mei_device *dev); - - -int mei_flow_ctrl_creds(struct mei_device *dev, struct mei_cl *cl); - - - -int mei_wd_send(struct mei_device *dev); -int mei_wd_stop(struct mei_device *dev); -int mei_wd_host_init(struct mei_device *dev); -/* - * mei_watchdog_register - Registering watchdog interface - * once we got connection to the WD Client - * @dev - mei device - */ -void mei_watchdog_register(struct mei_device *dev); -/* - * mei_watchdog_unregister - Unregistering watchdog interface - * @dev - mei device - */ -void mei_watchdog_unregister(struct mei_device *dev); - -int mei_flow_ctrl_reduce(struct mei_device *dev, struct mei_cl *cl); - -int mei_send_flow_control(struct mei_device *dev, struct mei_cl *cl); - -int mei_disconnect(struct mei_device *dev, struct mei_cl *cl); -int mei_other_client_is_connecting(struct mei_device *dev, struct mei_cl *cl); -int mei_connect(struct mei_device *dev, struct mei_cl *cl); - -#endif /* _MEI_INTERFACE_H_ */ diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c index 04fa2134615e..3535b2676c97 100644 --- a/drivers/misc/mei/interrupt.c +++ b/drivers/misc/mei/interrupt.c @@ -21,41 +21,21 @@ #include <linux/fs.h> #include <linux/jiffies.h> -#include "mei_dev.h" #include <linux/mei.h> -#include "hw.h" -#include "interface.h" - - -/** - * mei_interrupt_quick_handler - The ISR of the MEI device - * - * @irq: The irq number - * @dev_id: pointer to the device structure - * - * returns irqreturn_t - */ -irqreturn_t mei_interrupt_quick_handler(int irq, void *dev_id) -{ - struct mei_device *dev = (struct mei_device *) dev_id; - u32 csr_reg = mei_hcsr_read(dev); - - if ((csr_reg & H_IS) != H_IS) - return IRQ_NONE; - /* clear H_IS bit in H_CSR */ - mei_reg_write(dev, H_CSR, csr_reg); +#include "mei_dev.h" +#include "hbm.h" +#include "hw-me.h" +#include "client.h" - return IRQ_WAKE_THREAD; -} /** - * _mei_cmpl - processes completed operation. + * mei_complete_handler - processes completed operation. * * @cl: private data of the file object. * @cb_pos: callback block. */ -static void _mei_cmpl(struct mei_cl *cl, struct mei_cl_cb *cb_pos) +void mei_irq_complete_handler(struct mei_cl *cl, struct mei_cl_cb *cb_pos) { if (cb_pos->fop_type == MEI_FOP_WRITE) { mei_io_cb_free(cb_pos); @@ -150,8 +130,8 @@ quit: dev_dbg(&dev->pdev->dev, "message read\n"); if (!buffer) { mei_read_slots(dev, dev->rd_msg_buf, mei_hdr->length); - dev_dbg(&dev->pdev->dev, "discarding message, header =%08x.\n", - *(u32 *) dev->rd_msg_buf); + dev_dbg(&dev->pdev->dev, "discarding message " MEI_HDR_FMT "\n", + MEI_HDR_PRM(mei_hdr)); } return 0; @@ -179,7 +159,7 @@ static int _mei_irq_thread_close(struct mei_device *dev, s32 *slots, *slots -= mei_data2slots(sizeof(struct hbm_client_connect_request)); - if (mei_disconnect(dev, cl)) { + if (mei_hbm_cl_disconnect_req(dev, cl)) { cl->status = 0; cb_pos->buf_idx = 0; list_move_tail(&cb_pos->list, &cmpl_list->list); @@ -195,440 +175,6 @@ static int _mei_irq_thread_close(struct mei_device *dev, s32 *slots, return 0; } -/** - * is_treat_specially_client - checks if the message belongs - * to the file private data. - * - * @cl: private data of the file object - * @rs: connect response bus message - * - */ -static bool is_treat_specially_client(struct mei_cl *cl, - struct hbm_client_connect_response *rs) -{ - - if (cl->host_client_id == rs->host_addr && - cl->me_client_id == rs->me_addr) { - if (!rs->status) { - cl->state = MEI_FILE_CONNECTED; - cl->status = 0; - - } else { - cl->state = MEI_FILE_DISCONNECTED; - cl->status = -ENODEV; - } - cl->timer_count = 0; - - return true; - } - return false; -} - -/** - * mei_client_connect_response - connects to response irq routine - * - * @dev: the device structure - * @rs: connect response bus message - */ -static void mei_client_connect_response(struct mei_device *dev, - struct hbm_client_connect_response *rs) -{ - - struct mei_cl *cl; - struct mei_cl_cb *pos = NULL, *next = NULL; - - dev_dbg(&dev->pdev->dev, - "connect_response:\n" - "ME Client = %d\n" - "Host Client = %d\n" - "Status = %d\n", - rs->me_addr, - rs->host_addr, - rs->status); - - /* if WD or iamthif client treat specially */ - - if (is_treat_specially_client(&(dev->wd_cl), rs)) { - dev_dbg(&dev->pdev->dev, "successfully connected to WD client.\n"); - mei_watchdog_register(dev); - - return; - } - - if (is_treat_specially_client(&(dev->iamthif_cl), rs)) { - dev->iamthif_state = MEI_IAMTHIF_IDLE; - return; - } - list_for_each_entry_safe(pos, next, &dev->ctrl_rd_list.list, list) { - - cl = pos->cl; - if (!cl) { - list_del(&pos->list); - return; - } - if (pos->fop_type == MEI_FOP_IOCTL) { - if (is_treat_specially_client(cl, rs)) { - list_del(&pos->list); - cl->status = 0; - cl->timer_count = 0; - break; - } - } - } -} - -/** - * mei_client_disconnect_response - disconnects from response irq routine - * - * @dev: the device structure - * @rs: disconnect response bus message - */ -static void mei_client_disconnect_response(struct mei_device *dev, - struct hbm_client_connect_response *rs) -{ - struct mei_cl *cl; - struct mei_cl_cb *pos = NULL, *next = NULL; - - dev_dbg(&dev->pdev->dev, - "disconnect_response:\n" - "ME Client = %d\n" - "Host Client = %d\n" - "Status = %d\n", - rs->me_addr, - rs->host_addr, - rs->status); - - list_for_each_entry_safe(pos, next, &dev->ctrl_rd_list.list, list) { - cl = pos->cl; - - if (!cl) { - list_del(&pos->list); - return; - } - - dev_dbg(&dev->pdev->dev, "list_for_each_entry_safe in ctrl_rd_list.\n"); - if (cl->host_client_id == rs->host_addr && - cl->me_client_id == rs->me_addr) { - - list_del(&pos->list); - if (!rs->status) - cl->state = MEI_FILE_DISCONNECTED; - - cl->status = 0; - cl->timer_count = 0; - break; - } - } -} - -/** - * same_flow_addr - tells if they have the same address. - * - * @file: private data of the file object. - * @flow: flow control. - * - * returns !=0, same; 0,not. - */ -static int same_flow_addr(struct mei_cl *cl, struct hbm_flow_control *flow) -{ - return (cl->host_client_id == flow->host_addr && - cl->me_client_id == flow->me_addr); -} - -/** - * add_single_flow_creds - adds single buffer credentials. - * - * @file: private data ot the file object. - * @flow: flow control. - */ -static void add_single_flow_creds(struct mei_device *dev, - struct hbm_flow_control *flow) -{ - struct mei_me_client *client; - int i; - - for (i = 0; i < dev->me_clients_num; i++) { - client = &dev->me_clients[i]; - if (client && flow->me_addr == client->client_id) { - if (client->props.single_recv_buf) { - client->mei_flow_ctrl_creds++; - dev_dbg(&dev->pdev->dev, "recv flow ctrl msg ME %d (single).\n", - flow->me_addr); - dev_dbg(&dev->pdev->dev, "flow control credentials =%d.\n", - client->mei_flow_ctrl_creds); - } else { - BUG(); /* error in flow control */ - } - } - } -} - -/** - * mei_client_flow_control_response - flow control response irq routine - * - * @dev: the device structure - * @flow_control: flow control response bus message - */ -static void mei_client_flow_control_response(struct mei_device *dev, - struct hbm_flow_control *flow_control) -{ - struct mei_cl *cl_pos = NULL; - struct mei_cl *cl_next = NULL; - - if (!flow_control->host_addr) { - /* single receive buffer */ - add_single_flow_creds(dev, flow_control); - } else { - /* normal connection */ - list_for_each_entry_safe(cl_pos, cl_next, - &dev->file_list, link) { - dev_dbg(&dev->pdev->dev, "list_for_each_entry_safe in file_list\n"); - - dev_dbg(&dev->pdev->dev, "cl of host client %d ME client %d.\n", - cl_pos->host_client_id, - cl_pos->me_client_id); - dev_dbg(&dev->pdev->dev, "flow ctrl msg for host %d ME %d.\n", - flow_control->host_addr, - flow_control->me_addr); - if (same_flow_addr(cl_pos, flow_control)) { - dev_dbg(&dev->pdev->dev, "recv ctrl msg for host %d ME %d.\n", - flow_control->host_addr, - flow_control->me_addr); - cl_pos->mei_flow_ctrl_creds++; - dev_dbg(&dev->pdev->dev, "flow control credentials = %d.\n", - cl_pos->mei_flow_ctrl_creds); - break; - } - } - } -} - -/** - * same_disconn_addr - tells if they have the same address - * - * @file: private data of the file object. - * @disconn: disconnection request. - * - * returns !=0, same; 0,not. - */ -static int same_disconn_addr(struct mei_cl *cl, - struct hbm_client_connect_request *req) -{ - return (cl->host_client_id == req->host_addr && - cl->me_client_id == req->me_addr); -} - -/** - * mei_client_disconnect_request - disconnects from request irq routine - * - * @dev: the device structure. - * @disconnect_req: disconnect request bus message. - */ -static void mei_client_disconnect_request(struct mei_device *dev, - struct hbm_client_connect_request *disconnect_req) -{ - struct hbm_client_connect_response *disconnect_res; - struct mei_cl *pos, *next; - const size_t len = sizeof(struct hbm_client_connect_response); - - list_for_each_entry_safe(pos, next, &dev->file_list, link) { - if (same_disconn_addr(pos, disconnect_req)) { - dev_dbg(&dev->pdev->dev, "disconnect request host client %d ME client %d.\n", - disconnect_req->host_addr, - disconnect_req->me_addr); - pos->state = MEI_FILE_DISCONNECTED; - pos->timer_count = 0; - if (pos == &dev->wd_cl) - dev->wd_pending = false; - else if (pos == &dev->iamthif_cl) - dev->iamthif_timer = 0; - - /* prepare disconnect response */ - (void)mei_hbm_hdr((u32 *)&dev->wr_ext_msg.hdr, len); - disconnect_res = - (struct hbm_client_connect_response *) - &dev->wr_ext_msg.data; - disconnect_res->hbm_cmd = CLIENT_DISCONNECT_RES_CMD; - disconnect_res->host_addr = pos->host_client_id; - disconnect_res->me_addr = pos->me_client_id; - disconnect_res->status = 0; - break; - } - } -} - -/** - * mei_irq_thread_read_bus_message - bottom half read routine after ISR to - * handle the read bus message cmd processing. - * - * @dev: the device structure - * @mei_hdr: header of bus message - */ -static void mei_irq_thread_read_bus_message(struct mei_device *dev, - struct mei_msg_hdr *mei_hdr) -{ - struct mei_bus_message *mei_msg; - struct mei_me_client *me_client; - struct hbm_host_version_response *version_res; - struct hbm_client_connect_response *connect_res; - struct hbm_client_connect_response *disconnect_res; - struct hbm_client_connect_request *disconnect_req; - struct hbm_flow_control *flow_control; - struct hbm_props_response *props_res; - struct hbm_host_enum_response *enum_res; - struct hbm_host_stop_request *stop_req; - - /* read the message to our buffer */ - BUG_ON(mei_hdr->length >= sizeof(dev->rd_msg_buf)); - mei_read_slots(dev, dev->rd_msg_buf, mei_hdr->length); - mei_msg = (struct mei_bus_message *)dev->rd_msg_buf; - - switch (mei_msg->hbm_cmd) { - case HOST_START_RES_CMD: - version_res = (struct hbm_host_version_response *) mei_msg; - if (version_res->host_version_supported) { - dev->version.major_version = HBM_MAJOR_VERSION; - dev->version.minor_version = HBM_MINOR_VERSION; - if (dev->dev_state == MEI_DEV_INIT_CLIENTS && - dev->init_clients_state == MEI_START_MESSAGE) { - dev->init_clients_timer = 0; - mei_host_enum_clients_message(dev); - } else { - dev->recvd_msg = false; - dev_dbg(&dev->pdev->dev, "IMEI reset due to received host start response bus message.\n"); - mei_reset(dev, 1); - return; - } - } else { - u32 *buf = dev->wr_msg_buf; - const size_t len = sizeof(struct hbm_host_stop_request); - - dev->version = version_res->me_max_version; - - /* send stop message */ - mei_hdr = mei_hbm_hdr(&buf[0], len); - stop_req = (struct hbm_host_stop_request *)&buf[1]; - memset(stop_req, 0, len); - stop_req->hbm_cmd = HOST_STOP_REQ_CMD; - stop_req->reason = DRIVER_STOP_REQUEST; - - mei_write_message(dev, mei_hdr, - (unsigned char *)stop_req, len); - dev_dbg(&dev->pdev->dev, "version mismatch.\n"); - return; - } - - dev->recvd_msg = true; - dev_dbg(&dev->pdev->dev, "host start response message received.\n"); - break; - - case CLIENT_CONNECT_RES_CMD: - connect_res = (struct hbm_client_connect_response *) mei_msg; - mei_client_connect_response(dev, connect_res); - dev_dbg(&dev->pdev->dev, "client connect response message received.\n"); - wake_up(&dev->wait_recvd_msg); - break; - - case CLIENT_DISCONNECT_RES_CMD: - disconnect_res = (struct hbm_client_connect_response *) mei_msg; - mei_client_disconnect_response(dev, disconnect_res); - dev_dbg(&dev->pdev->dev, "client disconnect response message received.\n"); - wake_up(&dev->wait_recvd_msg); - break; - - case MEI_FLOW_CONTROL_CMD: - flow_control = (struct hbm_flow_control *) mei_msg; - mei_client_flow_control_response(dev, flow_control); - dev_dbg(&dev->pdev->dev, "client flow control response message received.\n"); - break; - - case HOST_CLIENT_PROPERTIES_RES_CMD: - props_res = (struct hbm_props_response *)mei_msg; - me_client = &dev->me_clients[dev->me_client_presentation_num]; - - if (props_res->status || !dev->me_clients) { - dev_dbg(&dev->pdev->dev, "reset due to received host client properties response bus message wrong status.\n"); - mei_reset(dev, 1); - return; - } - - if (me_client->client_id != props_res->address) { - dev_err(&dev->pdev->dev, - "Host client properties reply mismatch\n"); - mei_reset(dev, 1); - - return; - } - - if (dev->dev_state != MEI_DEV_INIT_CLIENTS || - dev->init_clients_state != MEI_CLIENT_PROPERTIES_MESSAGE) { - dev_err(&dev->pdev->dev, - "Unexpected client properties reply\n"); - mei_reset(dev, 1); - - return; - } - - me_client->props = props_res->client_properties; - dev->me_client_index++; - dev->me_client_presentation_num++; - - mei_host_client_enumerate(dev); - - break; - - case HOST_ENUM_RES_CMD: - enum_res = (struct hbm_host_enum_response *) mei_msg; - memcpy(dev->me_clients_map, enum_res->valid_addresses, 32); - if (dev->dev_state == MEI_DEV_INIT_CLIENTS && - dev->init_clients_state == MEI_ENUM_CLIENTS_MESSAGE) { - dev->init_clients_timer = 0; - dev->me_client_presentation_num = 0; - dev->me_client_index = 0; - mei_allocate_me_clients_storage(dev); - dev->init_clients_state = - MEI_CLIENT_PROPERTIES_MESSAGE; - - mei_host_client_enumerate(dev); - } else { - dev_dbg(&dev->pdev->dev, "reset due to received host enumeration clients response bus message.\n"); - mei_reset(dev, 1); - return; - } - break; - - case HOST_STOP_RES_CMD: - dev->dev_state = MEI_DEV_DISABLED; - dev_dbg(&dev->pdev->dev, "resetting because of FW stop response.\n"); - mei_reset(dev, 1); - break; - - case CLIENT_DISCONNECT_REQ_CMD: - /* search for client */ - disconnect_req = (struct hbm_client_connect_request *)mei_msg; - mei_client_disconnect_request(dev, disconnect_req); - break; - - case ME_STOP_REQ_CMD: - { - /* prepare stop request: sent in next interrupt event */ - - const size_t len = sizeof(struct hbm_host_stop_request); - - mei_hdr = mei_hbm_hdr((u32 *)&dev->wr_ext_msg.hdr, len); - stop_req = (struct hbm_host_stop_request *)&dev->wr_ext_msg.data; - memset(stop_req, 0, len); - stop_req->hbm_cmd = HOST_STOP_REQ_CMD; - stop_req->reason = DRIVER_STOP_REQUEST; - break; - } - default: - BUG(); - break; - - } -} - /** * _mei_hb_read - processes read related operation. @@ -655,7 +201,7 @@ static int _mei_irq_thread_read(struct mei_device *dev, s32 *slots, *slots -= mei_data2slots(sizeof(struct hbm_flow_control)); - if (mei_send_flow_control(dev, cl)) { + if (mei_hbm_cl_flow_control_req(dev, cl)) { cl->status = -ENODEV; cb_pos->buf_idx = 0; list_move_tail(&cb_pos->list, &cmpl_list->list); @@ -691,8 +237,8 @@ static int _mei_irq_thread_ioctl(struct mei_device *dev, s32 *slots, } cl->state = MEI_FILE_CONNECTING; - *slots -= mei_data2slots(sizeof(struct hbm_client_connect_request)); - if (mei_connect(dev, cl)) { + *slots -= mei_data2slots(sizeof(struct hbm_client_connect_request)); + if (mei_hbm_cl_connect_req(dev, cl)) { cl->status = -ENODEV; cb_pos->buf_idx = 0; list_del(&cb_pos->list); @@ -717,25 +263,24 @@ static int _mei_irq_thread_ioctl(struct mei_device *dev, s32 *slots, static int mei_irq_thread_write_complete(struct mei_device *dev, s32 *slots, struct mei_cl_cb *cb, struct mei_cl_cb *cmpl_list) { - struct mei_msg_hdr *mei_hdr; + struct mei_msg_hdr mei_hdr; struct mei_cl *cl = cb->cl; size_t len = cb->request_buffer.size - cb->buf_idx; size_t msg_slots = mei_data2slots(len); - mei_hdr = (struct mei_msg_hdr *)&dev->wr_msg_buf[0]; - mei_hdr->host_addr = cl->host_client_id; - mei_hdr->me_addr = cl->me_client_id; - mei_hdr->reserved = 0; + mei_hdr.host_addr = cl->host_client_id; + mei_hdr.me_addr = cl->me_client_id; + mei_hdr.reserved = 0; if (*slots >= msg_slots) { - mei_hdr->length = len; - mei_hdr->msg_complete = 1; + mei_hdr.length = len; + mei_hdr.msg_complete = 1; /* Split the message only if we can write the whole host buffer */ } else if (*slots == dev->hbuf_depth) { msg_slots = *slots; len = (*slots * sizeof(u32)) - sizeof(struct mei_msg_hdr); - mei_hdr->length = len; - mei_hdr->msg_complete = 0; + mei_hdr.length = len; + mei_hdr.msg_complete = 0; } else { /* wait for next time the host buffer is empty */ return 0; @@ -743,23 +288,22 @@ static int mei_irq_thread_write_complete(struct mei_device *dev, s32 *slots, dev_dbg(&dev->pdev->dev, "buf: size = %d idx = %lu\n", cb->request_buffer.size, cb->buf_idx); - dev_dbg(&dev->pdev->dev, "msg: len = %d complete = %d\n", - mei_hdr->length, mei_hdr->msg_complete); + dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(&mei_hdr)); *slots -= msg_slots; - if (mei_write_message(dev, mei_hdr, - cb->request_buffer.data + cb->buf_idx, len)) { + if (mei_write_message(dev, &mei_hdr, + cb->request_buffer.data + cb->buf_idx)) { cl->status = -ENODEV; list_move_tail(&cb->list, &cmpl_list->list); return -ENODEV; } - if (mei_flow_ctrl_reduce(dev, cl)) + if (mei_cl_flow_ctrl_reduce(cl)) return -ENODEV; cl->status = 0; - cb->buf_idx += mei_hdr->length; - if (mei_hdr->msg_complete) + cb->buf_idx += mei_hdr.length; + if (mei_hdr.msg_complete) list_move_tail(&cb->list, &dev->write_waiting_list.list); return 0; @@ -769,15 +313,14 @@ static int mei_irq_thread_write_complete(struct mei_device *dev, s32 *slots, * mei_irq_thread_read_handler - bottom half read routine after ISR to * handle the read processing. * - * @cmpl_list: An instance of our list structure * @dev: the device structure + * @cmpl_list: An instance of our list structure * @slots: slots to read. * * returns 0 on success, <0 on failure. */ -static int mei_irq_thread_read_handler(struct mei_cl_cb *cmpl_list, - struct mei_device *dev, - s32 *slots) +int mei_irq_read_handler(struct mei_device *dev, + struct mei_cl_cb *cmpl_list, s32 *slots) { struct mei_msg_hdr *mei_hdr; struct mei_cl *cl_pos = NULL; @@ -785,13 +328,13 @@ static int mei_irq_thread_read_handler(struct mei_cl_cb *cmpl_list, int ret = 0; if (!dev->rd_msg_hdr) { - dev->rd_msg_hdr = mei_mecbrw_read(dev); + dev->rd_msg_hdr = mei_read_hdr(dev); dev_dbg(&dev->pdev->dev, "slots =%08x.\n", *slots); (*slots)--; dev_dbg(&dev->pdev->dev, "slots =%08x.\n", *slots); } mei_hdr = (struct mei_msg_hdr *) &dev->rd_msg_hdr; - dev_dbg(&dev->pdev->dev, "mei_hdr->length =%d\n", mei_hdr->length); + dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr)); if (mei_hdr->reserved || !dev->rd_msg_hdr) { dev_dbg(&dev->pdev->dev, "corrupted message header.\n"); @@ -830,19 +373,18 @@ static int mei_irq_thread_read_handler(struct mei_cl_cb *cmpl_list, /* decide where to read the message too */ if (!mei_hdr->host_addr) { dev_dbg(&dev->pdev->dev, "call mei_irq_thread_read_bus_message.\n"); - mei_irq_thread_read_bus_message(dev, mei_hdr); + mei_hbm_dispatch(dev, mei_hdr); dev_dbg(&dev->pdev->dev, "end mei_irq_thread_read_bus_message.\n"); } else if (mei_hdr->host_addr == dev->iamthif_cl.host_client_id && (MEI_FILE_CONNECTED == dev->iamthif_cl.state) && (dev->iamthif_state == MEI_IAMTHIF_READING)) { dev_dbg(&dev->pdev->dev, "call mei_irq_thread_read_iamthif_message.\n"); - dev_dbg(&dev->pdev->dev, "mei_hdr->length =%d\n", - mei_hdr->length); + + dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr)); ret = mei_amthif_irq_read_message(cmpl_list, dev, mei_hdr); if (ret) goto end; - } else { dev_dbg(&dev->pdev->dev, "call mei_irq_thread_read_client_message.\n"); ret = mei_irq_thread_read_client_message(cmpl_list, @@ -869,15 +411,15 @@ end: /** - * mei_irq_thread_write_handler - bottom half write routine after - * ISR to handle the write processing. + * mei_irq_write_handler - dispatch write requests + * after irq received * * @dev: the device structure * @cmpl_list: An instance of our list structure * * returns 0 on success, <0 on failure. */ -static int mei_irq_thread_write_handler(struct mei_device *dev, +int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list) { @@ -887,7 +429,7 @@ static int mei_irq_thread_write_handler(struct mei_device *dev, s32 slots; int ret; - if (!mei_hbuf_is_empty(dev)) { + if (!mei_hbuf_is_ready(dev)) { dev_dbg(&dev->pdev->dev, "host buffer is not empty.\n"); return 0; } @@ -930,16 +472,16 @@ static int mei_irq_thread_write_handler(struct mei_device *dev, if (dev->wr_ext_msg.hdr.length) { mei_write_message(dev, &dev->wr_ext_msg.hdr, - dev->wr_ext_msg.data, dev->wr_ext_msg.hdr.length); + dev->wr_ext_msg.data); slots -= mei_data2slots(dev->wr_ext_msg.hdr.length); dev->wr_ext_msg.hdr.length = 0; } if (dev->dev_state == MEI_DEV_ENABLED) { if (dev->wd_pending && - mei_flow_ctrl_creds(dev, &dev->wd_cl) > 0) { + mei_cl_flow_ctrl_creds(&dev->wd_cl) > 0) { if (mei_wd_send(dev)) dev_dbg(&dev->pdev->dev, "wd send failed.\n"); - else if (mei_flow_ctrl_reduce(dev, &dev->wd_cl)) + else if (mei_cl_flow_ctrl_reduce(&dev->wd_cl)) return -ENODEV; dev->wd_pending = false; @@ -978,7 +520,7 @@ static int mei_irq_thread_write_handler(struct mei_device *dev, break; case MEI_FOP_IOCTL: /* connect message */ - if (mei_other_client_is_connecting(dev, cl)) + if (mei_cl_is_other_connecting(cl)) continue; ret = _mei_irq_thread_ioctl(dev, &slots, pos, cl, cmpl_list); @@ -998,7 +540,7 @@ static int mei_irq_thread_write_handler(struct mei_device *dev, cl = pos->cl; if (cl == NULL) continue; - if (mei_flow_ctrl_creds(dev, cl) <= 0) { + if (mei_cl_flow_ctrl_creds(cl) <= 0) { dev_dbg(&dev->pdev->dev, "No flow control credentials for client %d, not sending.\n", cl->host_client_id); @@ -1123,115 +665,3 @@ out: mutex_unlock(&dev->device_lock); } -/** - * mei_interrupt_thread_handler - function called after ISR to handle the interrupt - * processing. - * - * @irq: The irq number - * @dev_id: pointer to the device structure - * - * returns irqreturn_t - * - */ -irqreturn_t mei_interrupt_thread_handler(int irq, void *dev_id) -{ - struct mei_device *dev = (struct mei_device *) dev_id; - struct mei_cl_cb complete_list; - struct mei_cl_cb *cb_pos = NULL, *cb_next = NULL; - struct mei_cl *cl; - s32 slots; - int rets; - bool bus_message_received; - - - dev_dbg(&dev->pdev->dev, "function called after ISR to handle the interrupt processing.\n"); - /* initialize our complete list */ - mutex_lock(&dev->device_lock); - mei_io_list_init(&complete_list); - dev->host_hw_state = mei_hcsr_read(dev); - - /* Ack the interrupt here - * In case of MSI we don't go through the quick handler */ - if (pci_dev_msi_enabled(dev->pdev)) - mei_reg_write(dev, H_CSR, dev->host_hw_state); - - dev->me_hw_state = mei_mecsr_read(dev); - - /* check if ME wants a reset */ - if ((dev->me_hw_state & ME_RDY_HRA) == 0 && - dev->dev_state != MEI_DEV_RESETING && - dev->dev_state != MEI_DEV_INITIALIZING) { - dev_dbg(&dev->pdev->dev, "FW not ready.\n"); - mei_reset(dev, 1); - mutex_unlock(&dev->device_lock); - return IRQ_HANDLED; - } - - /* check if we need to start the dev */ - if ((dev->host_hw_state & H_RDY) == 0) { - if ((dev->me_hw_state & ME_RDY_HRA) == ME_RDY_HRA) { - dev_dbg(&dev->pdev->dev, "we need to start the dev.\n"); - dev->host_hw_state |= (H_IE | H_IG | H_RDY); - mei_hcsr_set(dev); - dev->dev_state = MEI_DEV_INIT_CLIENTS; - dev_dbg(&dev->pdev->dev, "link is established start sending messages.\n"); - /* link is established - * start sending messages. - */ - mei_host_start_message(dev); - mutex_unlock(&dev->device_lock); - return IRQ_HANDLED; - } else { - dev_dbg(&dev->pdev->dev, "FW not ready.\n"); - mutex_unlock(&dev->device_lock); - return IRQ_HANDLED; - } - } - /* check slots available for reading */ - slots = mei_count_full_read_slots(dev); - while (slots > 0) { - /* we have urgent data to send so break the read */ - if (dev->wr_ext_msg.hdr.length) - break; - dev_dbg(&dev->pdev->dev, "slots =%08x\n", slots); - dev_dbg(&dev->pdev->dev, "call mei_irq_thread_read_handler.\n"); - rets = mei_irq_thread_read_handler(&complete_list, dev, &slots); - if (rets) - goto end; - } - rets = mei_irq_thread_write_handler(dev, &complete_list); -end: - dev_dbg(&dev->pdev->dev, "end of bottom half function.\n"); - dev->host_hw_state = mei_hcsr_read(dev); - dev->mei_host_buffer_is_empty = mei_hbuf_is_empty(dev); - - bus_message_received = false; - if (dev->recvd_msg && waitqueue_active(&dev->wait_recvd_msg)) { - dev_dbg(&dev->pdev->dev, "received waiting bus message\n"); - bus_message_received = true; - } - mutex_unlock(&dev->device_lock); - if (bus_message_received) { - dev_dbg(&dev->pdev->dev, "wake up dev->wait_recvd_msg\n"); - wake_up_interruptible(&dev->wait_recvd_msg); - bus_message_received = false; - } - if (list_empty(&complete_list.list)) - return IRQ_HANDLED; - - - list_for_each_entry_safe(cb_pos, cb_next, &complete_list.list, list) { - cl = cb_pos->cl; - list_del(&cb_pos->list); - if (cl) { - if (cl != &dev->iamthif_cl) { - dev_dbg(&dev->pdev->dev, "completing call back.\n"); - _mei_cmpl(cl, cb_pos); - cb_pos = NULL; - } else if (cl == &dev->iamthif_cl) { - mei_amthif_complete(dev, cb_pos); - } - } - } - return IRQ_HANDLED; -} diff --git a/drivers/misc/mei/iorw.c b/drivers/misc/mei/iorw.c deleted file mode 100644 index eb93a1b53b9b..000000000000 --- a/drivers/misc/mei/iorw.c +++ /dev/null @@ -1,366 +0,0 @@ -/* - * - * Intel Management Engine Interface (Intel MEI) Linux driver - * Copyright (c) 2003-2012, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - */ - - -#include <linux/kernel.h> -#include <linux/fs.h> -#include <linux/errno.h> -#include <linux/types.h> -#include <linux/fcntl.h> -#include <linux/aio.h> -#include <linux/pci.h> -#include <linux/init.h> -#include <linux/ioctl.h> -#include <linux/cdev.h> -#include <linux/list.h> -#include <linux/delay.h> -#include <linux/sched.h> -#include <linux/uuid.h> -#include <linux/jiffies.h> -#include <linux/uaccess.h> - - -#include "mei_dev.h" -#include "hw.h" -#include <linux/mei.h> -#include "interface.h" - -/** - * mei_io_cb_free - free mei_cb_private related memory - * - * @cb: mei callback struct - */ -void mei_io_cb_free(struct mei_cl_cb *cb) -{ - if (cb == NULL) - return; - - kfree(cb->request_buffer.data); - kfree(cb->response_buffer.data); - kfree(cb); -} -/** - * mei_io_cb_init - allocate and initialize io callback - * - * @cl - mei client - * @file: pointer to file structure - * - * returns mei_cl_cb pointer or NULL; - */ -struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, struct file *fp) -{ - struct mei_cl_cb *cb; - - cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL); - if (!cb) - return NULL; - - mei_io_list_init(cb); - - cb->file_object = fp; - cb->cl = cl; - cb->buf_idx = 0; - return cb; -} - - -/** - * mei_io_cb_alloc_req_buf - allocate request buffer - * - * @cb - io callback structure - * @size: size of the buffer - * - * returns 0 on success - * -EINVAL if cb is NULL - * -ENOMEM if allocation failed - */ -int mei_io_cb_alloc_req_buf(struct mei_cl_cb *cb, size_t length) -{ - if (!cb) - return -EINVAL; - - if (length == 0) - return 0; - - cb->request_buffer.data = kmalloc(length, GFP_KERNEL); - if (!cb->request_buffer.data) - return -ENOMEM; - cb->request_buffer.size = length; - return 0; -} -/** - * mei_io_cb_alloc_req_buf - allocate respose buffer - * - * @cb - io callback structure - * @size: size of the buffer - * - * returns 0 on success - * -EINVAL if cb is NULL - * -ENOMEM if allocation failed - */ -int mei_io_cb_alloc_resp_buf(struct mei_cl_cb *cb, size_t length) -{ - if (!cb) - return -EINVAL; - - if (length == 0) - return 0; - - cb->response_buffer.data = kmalloc(length, GFP_KERNEL); - if (!cb->response_buffer.data) - return -ENOMEM; - cb->response_buffer.size = length; - return 0; -} - - -/** - * mei_me_cl_by_id return index to me_clients for client_id - * - * @dev: the device structure - * @client_id: me client id - * - * Locking: called under "dev->device_lock" lock - * - * returns index on success, -ENOENT on failure. - */ - -int mei_me_cl_by_id(struct mei_device *dev, u8 client_id) -{ - int i; - for (i = 0; i < dev->me_clients_num; i++) - if (dev->me_clients[i].client_id == client_id) - break; - if (WARN_ON(dev->me_clients[i].client_id != client_id)) - return -ENOENT; - - if (i == dev->me_clients_num) - return -ENOENT; - - return i; -} - -/** - * mei_ioctl_connect_client - the connect to fw client IOCTL function - * - * @dev: the device structure - * @data: IOCTL connect data, input and output parameters - * @file: private data of the file object - * - * Locking: called under "dev->device_lock" lock - * - * returns 0 on success, <0 on failure. - */ -int mei_ioctl_connect_client(struct file *file, - struct mei_connect_client_data *data) -{ - struct mei_device *dev; - struct mei_cl_cb *cb; - struct mei_client *client; - struct mei_cl *cl; - long timeout = mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT); - int i; - int err; - int rets; - - cl = file->private_data; - if (WARN_ON(!cl || !cl->dev)) - return -ENODEV; - - dev = cl->dev; - - dev_dbg(&dev->pdev->dev, "mei_ioctl_connect_client() Entry\n"); - - /* buffered ioctl cb */ - cb = mei_io_cb_init(cl, file); - if (!cb) { - rets = -ENOMEM; - goto end; - } - - cb->fop_type = MEI_FOP_IOCTL; - - if (dev->dev_state != MEI_DEV_ENABLED) { - rets = -ENODEV; - goto end; - } - if (cl->state != MEI_FILE_INITIALIZING && - cl->state != MEI_FILE_DISCONNECTED) { - rets = -EBUSY; - goto end; - } - - /* find ME client we're trying to connect to */ - i = mei_me_cl_by_uuid(dev, &data->in_client_uuid); - if (i >= 0 && !dev->me_clients[i].props.fixed_address) { - cl->me_client_id = dev->me_clients[i].client_id; - cl->state = MEI_FILE_CONNECTING; - } - - dev_dbg(&dev->pdev->dev, "Connect to FW Client ID = %d\n", - cl->me_client_id); - dev_dbg(&dev->pdev->dev, "FW Client - Protocol Version = %d\n", - dev->me_clients[i].props.protocol_version); - dev_dbg(&dev->pdev->dev, "FW Client - Max Msg Len = %d\n", - dev->me_clients[i].props.max_msg_length); - - /* if we're connecting to amthi client then we will use the - * existing connection - */ - if (uuid_le_cmp(data->in_client_uuid, mei_amthi_guid) == 0) { - dev_dbg(&dev->pdev->dev, "FW Client is amthi\n"); - if (dev->iamthif_cl.state != MEI_FILE_CONNECTED) { - rets = -ENODEV; - goto end; - } - clear_bit(cl->host_client_id, dev->host_clients_map); - mei_me_cl_unlink(dev, cl); - - kfree(cl); - cl = NULL; - file->private_data = &dev->iamthif_cl; - - client = &data->out_client_properties; - client->max_msg_length = - dev->me_clients[i].props.max_msg_length; - client->protocol_version = - dev->me_clients[i].props.protocol_version; - rets = dev->iamthif_cl.status; - - goto end; - } - - if (cl->state != MEI_FILE_CONNECTING) { - rets = -ENODEV; - goto end; - } - - - /* prepare the output buffer */ - client = &data->out_client_properties; - client->max_msg_length = dev->me_clients[i].props.max_msg_length; - client->protocol_version = dev->me_clients[i].props.protocol_version; - dev_dbg(&dev->pdev->dev, "Can connect?\n"); - if (dev->mei_host_buffer_is_empty - && !mei_other_client_is_connecting(dev, cl)) { - dev_dbg(&dev->pdev->dev, "Sending Connect Message\n"); - dev->mei_host_buffer_is_empty = false; - if (mei_connect(dev, cl)) { - dev_dbg(&dev->pdev->dev, "Sending connect message - failed\n"); - rets = -ENODEV; - goto end; - } else { - dev_dbg(&dev->pdev->dev, "Sending connect message - succeeded\n"); - cl->timer_count = MEI_CONNECT_TIMEOUT; - list_add_tail(&cb->list, &dev->ctrl_rd_list.list); - } - - - } else { - dev_dbg(&dev->pdev->dev, "Queuing the connect request due to device busy\n"); - dev_dbg(&dev->pdev->dev, "add connect cb to control write list.\n"); - list_add_tail(&cb->list, &dev->ctrl_wr_list.list); - } - mutex_unlock(&dev->device_lock); - err = wait_event_timeout(dev->wait_recvd_msg, - (MEI_FILE_CONNECTED == cl->state || - MEI_FILE_DISCONNECTED == cl->state), timeout); - - mutex_lock(&dev->device_lock); - if (MEI_FILE_CONNECTED == cl->state) { - dev_dbg(&dev->pdev->dev, "successfully connected to FW client.\n"); - rets = cl->status; - goto end; - } else { - dev_dbg(&dev->pdev->dev, "failed to connect to FW client.cl->state = %d.\n", - cl->state); - if (!err) { - dev_dbg(&dev->pdev->dev, - "wait_event_interruptible_timeout failed on client" - " connect message fw response message.\n"); - } - rets = -EFAULT; - - mei_io_list_flush(&dev->ctrl_rd_list, cl); - mei_io_list_flush(&dev->ctrl_wr_list, cl); - goto end; - } - rets = 0; -end: - dev_dbg(&dev->pdev->dev, "free connect cb memory."); - mei_io_cb_free(cb); - return rets; -} - -/** - * mei_start_read - the start read client message function. - * - * @dev: the device structure - * @if_num: minor number - * @cl: private data of the file object - * - * returns 0 on success, <0 on failure. - */ -int mei_start_read(struct mei_device *dev, struct mei_cl *cl) -{ - struct mei_cl_cb *cb; - int rets; - int i; - - if (cl->state != MEI_FILE_CONNECTED) - return -ENODEV; - - if (dev->dev_state != MEI_DEV_ENABLED) - return -ENODEV; - - if (cl->read_pending || cl->read_cb) { - dev_dbg(&dev->pdev->dev, "read is pending.\n"); - return -EBUSY; - } - i = mei_me_cl_by_id(dev, cl->me_client_id); - if (i < 0) { - dev_err(&dev->pdev->dev, "no such me client %d\n", - cl->me_client_id); - return -ENODEV; - } - - cb = mei_io_cb_init(cl, NULL); - if (!cb) - return -ENOMEM; - - rets = mei_io_cb_alloc_resp_buf(cb, - dev->me_clients[i].props.max_msg_length); - if (rets) - goto err; - - cb->fop_type = MEI_FOP_READ; - cl->read_cb = cb; - if (dev->mei_host_buffer_is_empty) { - dev->mei_host_buffer_is_empty = false; - if (mei_send_flow_control(dev, cl)) { - rets = -ENODEV; - goto err; - } - list_add_tail(&cb->list, &dev->read_list.list); - } else { - list_add_tail(&cb->list, &dev->ctrl_wr_list.list); - } - return rets; -err: - mei_io_cb_free(cb); - return rets; -} - diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c index 43fb52ff98ad..903f809b21f7 100644 --- a/drivers/misc/mei/main.c +++ b/drivers/misc/mei/main.c @@ -37,79 +37,11 @@ #include <linux/interrupt.h> #include <linux/miscdevice.h> -#include "mei_dev.h" #include <linux/mei.h> -#include "interface.h" - -/* AMT device is a singleton on the platform */ -static struct pci_dev *mei_pdev; - -/* mei_pci_tbl - PCI Device ID Table */ -static DEFINE_PCI_DEVICE_TABLE(mei_pci_tbl) = { - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82946GZ)}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G35)}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82Q965)}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G965)}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GM965)}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GME965)}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q35)}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82G33)}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q33)}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82X38)}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_3200)}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_6)}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_7)}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_8)}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_9)}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_10)}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_1)}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_2)}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_3)}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_4)}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_1)}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_2)}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_3)}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_4)}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_1)}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_2)}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_CPT_1)}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PBG_1)}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_1)}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_2)}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_3)}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT)}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_LP)}, - - /* required last entry */ - {0, } -}; - -MODULE_DEVICE_TABLE(pci, mei_pci_tbl); -static DEFINE_MUTEX(mei_mutex); - - -/** - * find_read_list_entry - find read list entry - * - * @dev: device structure - * @file: pointer to file structure - * - * returns cb on success, NULL on error - */ -static struct mei_cl_cb *find_read_list_entry( - struct mei_device *dev, - struct mei_cl *cl) -{ - struct mei_cl_cb *pos = NULL; - struct mei_cl_cb *next = NULL; - - dev_dbg(&dev->pdev->dev, "remove read_list CB\n"); - list_for_each_entry_safe(pos, next, &dev->read_list.list, list) - if (mei_cl_cmp_id(cl, pos->cl)) - return pos; - return NULL; -} +#include "mei_dev.h" +#include "hw-me.h" +#include "client.h" /** * mei_open - the open function @@ -121,16 +53,20 @@ static struct mei_cl_cb *find_read_list_entry( */ static int mei_open(struct inode *inode, struct file *file) { + struct miscdevice *misc = file->private_data; + struct pci_dev *pdev; struct mei_cl *cl; struct mei_device *dev; - unsigned long cl_id; + int err; err = -ENODEV; - if (!mei_pdev) + if (!misc->parent) goto out; - dev = pci_get_drvdata(mei_pdev); + pdev = container_of(misc->parent, struct pci_dev, dev); + + dev = pci_get_drvdata(pdev); if (!dev) goto out; @@ -153,24 +89,9 @@ static int mei_open(struct inode *inode, struct file *file) goto out_unlock; } - cl_id = find_first_zero_bit(dev->host_clients_map, MEI_CLIENTS_MAX); - if (cl_id >= MEI_CLIENTS_MAX) { - dev_err(&dev->pdev->dev, "client_id exceded %d", - MEI_CLIENTS_MAX) ; + err = mei_cl_link(cl, MEI_HOST_CLIENT_ID_ANY); + if (err) goto out_unlock; - } - - cl->host_client_id = cl_id; - - dev_dbg(&dev->pdev->dev, "client_id = %d\n", cl->host_client_id); - - dev->open_handle_count++; - - list_add_tail(&cl->link, &dev->file_list); - - set_bit(cl->host_client_id, dev->host_clients_map); - cl->state = MEI_FILE_INITIALIZING; - cl->sm_state = 0; file->private_data = cl; mutex_unlock(&dev->device_lock); @@ -216,7 +137,7 @@ static int mei_release(struct inode *inode, struct file *file) "ME client = %d\n", cl->host_client_id, cl->me_client_id); - rets = mei_disconnect_host_client(dev, cl); + rets = mei_cl_disconnect(cl); } mei_cl_flush_queues(cl); dev_dbg(&dev->pdev->dev, "remove client host client = %d, ME client = %d\n", @@ -227,12 +148,13 @@ static int mei_release(struct inode *inode, struct file *file) clear_bit(cl->host_client_id, dev->host_clients_map); dev->open_handle_count--; } - mei_me_cl_unlink(dev, cl); + mei_cl_unlink(cl); + /* free read cb */ cb = NULL; if (cl->read_cb) { - cb = find_read_list_entry(dev, cl); + cb = mei_cl_find_read_cb(cl); /* Remove entry from read list */ if (cb) list_del(&cb->list); @@ -322,7 +244,7 @@ static ssize_t mei_read(struct file *file, char __user *ubuf, goto out; } - err = mei_start_read(dev, cl); + err = mei_cl_read_start(cl); if (err && err != -EBUSY) { dev_dbg(&dev->pdev->dev, "mei start read failure with status = %d\n", err); @@ -393,14 +315,13 @@ copy_buffer: goto out; free: - cb_pos = find_read_list_entry(dev, cl); + cb_pos = mei_cl_find_read_cb(cl); /* Remove entry from read list */ if (cb_pos) list_del(&cb_pos->list); mei_io_cb_free(cb); cl->reading_state = MEI_IDLE; cl->read_cb = NULL; - cl->read_pending = 0; out: dev_dbg(&dev->pdev->dev, "end mei read rets= %d\n", rets); mutex_unlock(&dev->device_lock); @@ -475,16 +396,15 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf, /* free entry used in read */ if (cl->reading_state == MEI_READ_COMPLETE) { *offset = 0; - write_cb = find_read_list_entry(dev, cl); + write_cb = mei_cl_find_read_cb(cl); if (write_cb) { list_del(&write_cb->list); mei_io_cb_free(write_cb); write_cb = NULL; cl->reading_state = MEI_IDLE; cl->read_cb = NULL; - cl->read_pending = 0; } - } else if (cl->reading_state == MEI_IDLE && !cl->read_pending) + } else if (cl->reading_state == MEI_IDLE) *offset = 0; @@ -519,7 +439,7 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf, if (rets) { dev_err(&dev->pdev->dev, - "amthi write failed with status = %d\n", rets); + "amthif write failed with status = %d\n", rets); goto err; } mutex_unlock(&dev->device_lock); @@ -530,20 +450,20 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf, dev_dbg(&dev->pdev->dev, "host client = %d, ME client = %d\n", cl->host_client_id, cl->me_client_id); - rets = mei_flow_ctrl_creds(dev, cl); + rets = mei_cl_flow_ctrl_creds(cl); if (rets < 0) goto err; - if (rets == 0 || dev->mei_host_buffer_is_empty == false) { + if (rets == 0 || !dev->hbuf_is_ready) { write_cb->buf_idx = 0; mei_hdr.msg_complete = 0; cl->writing_state = MEI_WRITING; goto out; } - dev->mei_host_buffer_is_empty = false; - if (length > mei_hbuf_max_data(dev)) { - mei_hdr.length = mei_hbuf_max_data(dev); + dev->hbuf_is_ready = false; + if (length > mei_hbuf_max_len(dev)) { + mei_hdr.length = mei_hbuf_max_len(dev); mei_hdr.msg_complete = 0; } else { mei_hdr.length = length; @@ -552,10 +472,10 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf, mei_hdr.host_addr = cl->host_client_id; mei_hdr.me_addr = cl->me_client_id; mei_hdr.reserved = 0; - dev_dbg(&dev->pdev->dev, "call mei_write_message header=%08x.\n", - *((u32 *) &mei_hdr)); - if (mei_write_message(dev, &mei_hdr, - write_cb->request_buffer.data, mei_hdr.length)) { + + dev_dbg(&dev->pdev->dev, "write " MEI_HDR_FMT "\n", + MEI_HDR_PRM(&mei_hdr)); + if (mei_write_message(dev, &mei_hdr, write_cb->request_buffer.data)) { rets = -ENODEV; goto err; } @@ -564,7 +484,7 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf, out: if (mei_hdr.msg_complete) { - if (mei_flow_ctrl_reduce(dev, cl)) { + if (mei_cl_flow_ctrl_reduce(cl)) { rets = -ENODEV; goto err; } @@ -582,6 +502,103 @@ err: return rets; } +/** + * mei_ioctl_connect_client - the connect to fw client IOCTL function + * + * @dev: the device structure + * @data: IOCTL connect data, input and output parameters + * @file: private data of the file object + * + * Locking: called under "dev->device_lock" lock + * + * returns 0 on success, <0 on failure. + */ +static int mei_ioctl_connect_client(struct file *file, + struct mei_connect_client_data *data) +{ + struct mei_device *dev; + struct mei_client *client; + struct mei_cl *cl; + int i; + int rets; + + cl = file->private_data; + if (WARN_ON(!cl || !cl->dev)) + return -ENODEV; + + dev = cl->dev; + + if (dev->dev_state != MEI_DEV_ENABLED) { + rets = -ENODEV; + goto end; + } + + if (cl->state != MEI_FILE_INITIALIZING && + cl->state != MEI_FILE_DISCONNECTED) { + rets = -EBUSY; + goto end; + } + + /* find ME client we're trying to connect to */ + i = mei_me_cl_by_uuid(dev, &data->in_client_uuid); + if (i >= 0 && !dev->me_clients[i].props.fixed_address) { + cl->me_client_id = dev->me_clients[i].client_id; + cl->state = MEI_FILE_CONNECTING; + } + + dev_dbg(&dev->pdev->dev, "Connect to FW Client ID = %d\n", + cl->me_client_id); + dev_dbg(&dev->pdev->dev, "FW Client - Protocol Version = %d\n", + dev->me_clients[i].props.protocol_version); + dev_dbg(&dev->pdev->dev, "FW Client - Max Msg Len = %d\n", + dev->me_clients[i].props.max_msg_length); + + /* if we're connecting to amthif client then we will use the + * existing connection + */ + if (uuid_le_cmp(data->in_client_uuid, mei_amthif_guid) == 0) { + dev_dbg(&dev->pdev->dev, "FW Client is amthi\n"); + if (dev->iamthif_cl.state != MEI_FILE_CONNECTED) { + rets = -ENODEV; + goto end; + } + clear_bit(cl->host_client_id, dev->host_clients_map); + mei_cl_unlink(cl); + + kfree(cl); + cl = NULL; + file->private_data = &dev->iamthif_cl; + + client = &data->out_client_properties; + client->max_msg_length = + dev->me_clients[i].props.max_msg_length; + client->protocol_version = + dev->me_clients[i].props.protocol_version; + rets = dev->iamthif_cl.status; + + goto end; + } + + if (cl->state != MEI_FILE_CONNECTING) { + rets = -ENODEV; + goto end; + } + + + /* prepare the output buffer */ + client = &data->out_client_properties; + client->max_msg_length = dev->me_clients[i].props.max_msg_length; + client->protocol_version = dev->me_clients[i].props.protocol_version; + dev_dbg(&dev->pdev->dev, "Can connect?\n"); + + + rets = mei_cl_connect(cl, file); + +end: + dev_dbg(&dev->pdev->dev, "free connect cb memory."); + return rets; +} + /** * mei_ioctl - the IOCTL function @@ -630,6 +647,7 @@ static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data) rets = -EFAULT; goto out; } + rets = mei_ioctl_connect_client(file, connect_data); /* if all is ok, copying the data back to user. */ @@ -726,7 +744,6 @@ static const struct file_operations mei_fops = { .llseek = no_llseek }; - /* * Misc Device Struct */ @@ -736,300 +753,17 @@ static struct miscdevice mei_misc_device = { .minor = MISC_DYNAMIC_MINOR, }; -/** - * mei_quirk_probe - probe for devices that doesn't valid ME interface - * @pdev: PCI device structure - * @ent: entry into pci_device_table - * - * returns true if ME Interface is valid, false otherwise - */ -static bool mei_quirk_probe(struct pci_dev *pdev, - const struct pci_device_id *ent) +int mei_register(struct device *dev) { - u32 reg; - if (ent->device == MEI_DEV_ID_PBG_1) { - pci_read_config_dword(pdev, 0x48, ®); - /* make sure that bit 9 is up and bit 10 is down */ - if ((reg & 0x600) == 0x200) { - dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n"); - return false; - } - } - return true; -} -/** - * mei_probe - Device Initialization Routine - * - * @pdev: PCI device structure - * @ent: entry in kcs_pci_tbl - * - * returns 0 on success, <0 on failure. - */ -static int mei_probe(struct pci_dev *pdev, - const struct pci_device_id *ent) -{ - struct mei_device *dev; - int err; - - mutex_lock(&mei_mutex); - - if (!mei_quirk_probe(pdev, ent)) { - err = -ENODEV; - goto end; - } - - if (mei_pdev) { - err = -EEXIST; - goto end; - } - /* enable pci dev */ - err = pci_enable_device(pdev); - if (err) { - dev_err(&pdev->dev, "failed to enable pci device.\n"); - goto end; - } - /* set PCI host mastering */ - pci_set_master(pdev); - /* pci request regions for mei driver */ - err = pci_request_regions(pdev, KBUILD_MODNAME); - if (err) { - dev_err(&pdev->dev, "failed to get pci regions.\n"); - goto disable_device; - } - /* allocates and initializes the mei dev structure */ - dev = mei_device_init(pdev); - if (!dev) { - err = -ENOMEM; - goto release_regions; - } - /* mapping IO device memory */ - dev->mem_addr = pci_iomap(pdev, 0, 0); - if (!dev->mem_addr) { - dev_err(&pdev->dev, "mapping I/O device memory failure.\n"); - err = -ENOMEM; - goto free_device; - } - pci_enable_msi(pdev); - - /* request and enable interrupt */ - if (pci_dev_msi_enabled(pdev)) - err = request_threaded_irq(pdev->irq, - NULL, - mei_interrupt_thread_handler, - IRQF_ONESHOT, KBUILD_MODNAME, dev); - else - err = request_threaded_irq(pdev->irq, - mei_interrupt_quick_handler, - mei_interrupt_thread_handler, - IRQF_SHARED, KBUILD_MODNAME, dev); - - if (err) { - dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n", - pdev->irq); - goto disable_msi; - } - INIT_DELAYED_WORK(&dev->timer_work, mei_timer); - INIT_WORK(&dev->init_work, mei_host_client_init); - - if (mei_hw_init(dev)) { - dev_err(&pdev->dev, "init hw failure.\n"); - err = -ENODEV; - goto release_irq; - } - - err = misc_register(&mei_misc_device); - if (err) - goto release_irq; - - mei_pdev = pdev; - pci_set_drvdata(pdev, dev); - - - schedule_delayed_work(&dev->timer_work, HZ); - - mutex_unlock(&mei_mutex); - - pr_debug("initialization successful.\n"); - - return 0; - -release_irq: - /* disable interrupts */ - dev->host_hw_state = mei_hcsr_read(dev); - mei_disable_interrupts(dev); - flush_scheduled_work(); - free_irq(pdev->irq, dev); -disable_msi: - pci_disable_msi(pdev); - pci_iounmap(pdev, dev->mem_addr); -free_device: - kfree(dev); -release_regions: - pci_release_regions(pdev); -disable_device: - pci_disable_device(pdev); -end: - mutex_unlock(&mei_mutex); - dev_err(&pdev->dev, "initialization failed.\n"); - return err; + mei_misc_device.parent = dev; + return misc_register(&mei_misc_device); } -/** - * mei_remove - Device Removal Routine - * - * @pdev: PCI device structure - * - * mei_remove is called by the PCI subsystem to alert the driver - * that it should release a PCI device. - */ -static void mei_remove(struct pci_dev *pdev) +void mei_deregister(void) { - struct mei_device *dev; - - if (mei_pdev != pdev) - return; - - dev = pci_get_drvdata(pdev); - if (!dev) - return; - - mutex_lock(&dev->device_lock); - - cancel_delayed_work(&dev->timer_work); - - mei_wd_stop(dev); - - mei_pdev = NULL; - - if (dev->iamthif_cl.state == MEI_FILE_CONNECTED) { - dev->iamthif_cl.state = MEI_FILE_DISCONNECTING; - mei_disconnect_host_client(dev, &dev->iamthif_cl); - } - if (dev->wd_cl.state == MEI_FILE_CONNECTED) { - dev->wd_cl.state = MEI_FILE_DISCONNECTING; - mei_disconnect_host_client(dev, &dev->wd_cl); - } - - /* Unregistering watchdog device */ - mei_watchdog_unregister(dev); - - /* remove entry if already in list */ - dev_dbg(&pdev->dev, "list del iamthif and wd file list.\n"); - mei_me_cl_unlink(dev, &dev->wd_cl); - mei_me_cl_unlink(dev, &dev->iamthif_cl); - - dev->iamthif_current_cb = NULL; - dev->me_clients_num = 0; - - mutex_unlock(&dev->device_lock); - - flush_scheduled_work(); - - /* disable interrupts */ - mei_disable_interrupts(dev); - - free_irq(pdev->irq, dev); - pci_disable_msi(pdev); - pci_set_drvdata(pdev, NULL); - - if (dev->mem_addr) - pci_iounmap(pdev, dev->mem_addr); - - kfree(dev); - - pci_release_regions(pdev); - pci_disable_device(pdev); - misc_deregister(&mei_misc_device); -} -#ifdef CONFIG_PM -static int mei_pci_suspend(struct device *device) -{ - struct pci_dev *pdev = to_pci_dev(device); - struct mei_device *dev = pci_get_drvdata(pdev); - int err; - - if (!dev) - return -ENODEV; - mutex_lock(&dev->device_lock); - - cancel_delayed_work(&dev->timer_work); - - /* Stop watchdog if exists */ - err = mei_wd_stop(dev); - /* Set new mei state */ - if (dev->dev_state == MEI_DEV_ENABLED || - dev->dev_state == MEI_DEV_RECOVERING_FROM_RESET) { - dev->dev_state = MEI_DEV_POWER_DOWN; - mei_reset(dev, 0); - } - mutex_unlock(&dev->device_lock); - - free_irq(pdev->irq, dev); - pci_disable_msi(pdev); - - return err; + mei_misc_device.parent = NULL; } -static int mei_pci_resume(struct device *device) -{ - struct pci_dev *pdev = to_pci_dev(device); - struct mei_device *dev; - int err; - - dev = pci_get_drvdata(pdev); - if (!dev) - return -ENODEV; - - pci_enable_msi(pdev); - - /* request and enable interrupt */ - if (pci_dev_msi_enabled(pdev)) - err = request_threaded_irq(pdev->irq, - NULL, - mei_interrupt_thread_handler, - IRQF_ONESHOT, KBUILD_MODNAME, dev); - else - err = request_threaded_irq(pdev->irq, - mei_interrupt_quick_handler, - mei_interrupt_thread_handler, - IRQF_SHARED, KBUILD_MODNAME, dev); - - if (err) { - dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n", - pdev->irq); - return err; - } - - mutex_lock(&dev->device_lock); - dev->dev_state = MEI_DEV_POWER_UP; - mei_reset(dev, 1); - mutex_unlock(&dev->device_lock); - - /* Start timer if stopped in suspend */ - schedule_delayed_work(&dev->timer_work, HZ); - - return err; -} -static SIMPLE_DEV_PM_OPS(mei_pm_ops, mei_pci_suspend, mei_pci_resume); -#define MEI_PM_OPS (&mei_pm_ops) -#else -#define MEI_PM_OPS NULL -#endif /* CONFIG_PM */ -/* - * PCI driver structure - */ -static struct pci_driver mei_driver = { - .name = KBUILD_MODNAME, - .id_table = mei_pci_tbl, - .probe = mei_probe, - .remove = mei_remove, - .shutdown = mei_remove, - .driver.pm = MEI_PM_OPS, -}; - -module_pci_driver(mei_driver); - -MODULE_AUTHOR("Intel Corporation"); -MODULE_DESCRIPTION("Intel(R) Management Engine Interface"); MODULE_LICENSE("GPL v2"); + diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h index 25da04549d04..cb80166161f0 100644 --- a/drivers/misc/mei/mei_dev.h +++ b/drivers/misc/mei/mei_dev.h @@ -21,7 +21,9 @@ #include <linux/watchdog.h> #include <linux/poll.h> #include <linux/mei.h> + #include "hw.h" +#include "hw-me-regs.h" /* * watch dog definition @@ -44,7 +46,7 @@ /* * AMTHI Client UUID */ -extern const uuid_le mei_amthi_guid; +extern const uuid_le mei_amthif_guid; /* * Watchdog Client UUID @@ -65,12 +67,18 @@ extern const u8 mei_wd_state_independence_msg[3][4]; * Number of File descriptors/handles * that can be opened to the driver. * - * Limit to 253: 256 Total Clients + * Limit to 255: 256 Total Clients * minus internal client for MEI Bus Messags - * minus internal client for AMTHI - * minus internal client for Watchdog */ -#define MEI_MAX_OPEN_HANDLE_COUNT (MEI_CLIENTS_MAX - 3) +#define MEI_MAX_OPEN_HANDLE_COUNT (MEI_CLIENTS_MAX - 1) + +/* + * Internal Clients Number + */ +#define MEI_HOST_CLIENT_ID_ANY (-1) +#define MEI_HBM_HOST_CLIENT_ID 0 /* not used, just for documentation */ +#define MEI_WD_HOST_CLIENT_ID 1 +#define MEI_IAMTHIF_HOST_CLIENT_ID 2 /* File state */ @@ -150,6 +158,19 @@ struct mei_message_data { unsigned char *data; }; +/** + * struct mei_me_client - representation of me (fw) client + * + * @props - client properties + * @client_id - me client id + * @mei_flow_ctrl_creds - flow control credits + */ +struct mei_me_client { + struct mei_client_properties props; + u8 client_id; + u8 mei_flow_ctrl_creds; +}; + struct mei_cl; @@ -178,7 +199,6 @@ struct mei_cl { wait_queue_head_t tx_wait; wait_queue_head_t rx_wait; wait_queue_head_t wait; - int read_pending; int status; /* ID of client connected */ u8 host_client_id; @@ -191,10 +211,67 @@ struct mei_cl { struct mei_cl_cb *read_cb; }; +/** struct mei_hw_ops + * + * @host_set_ready - notify FW that host side is ready + * @host_is_ready - query for host readiness + + * @hw_is_ready - query if hw is ready + * @hw_reset - reset hw + * @hw_config - configure hw + + * @intr_clear - clear pending interrupts + * @intr_enable - enable interrupts + * @intr_disable - disable interrupts + + * @hbuf_free_slots - query for write buffer empty slots + * @hbuf_is_ready - query if write buffer is empty + * @hbuf_max_len - query for write buffer max len + + * @write - write a message to FW + + * @rdbuf_full_slots - query how many slots are filled + + * @read_hdr - get first 4 bytes (header) + * @read - read a buffer from the FW + */ +struct mei_hw_ops { + + void (*host_set_ready) (struct mei_device *dev); + bool (*host_is_ready) (struct mei_device *dev); + + bool (*hw_is_ready) (struct mei_device *dev); + void (*hw_reset) (struct mei_device *dev, bool enable); + void (*hw_config) (struct mei_device *dev); + + void (*intr_clear) (struct mei_device *dev); + void (*intr_enable) (struct mei_device *dev); + void (*intr_disable) (struct mei_device *dev); + + int (*hbuf_free_slots) (struct mei_device *dev); + bool (*hbuf_is_ready) (struct mei_device *dev); + size_t (*hbuf_max_len) (const struct mei_device *dev); + + int (*write)(struct mei_device *dev, + struct mei_msg_hdr *hdr, + unsigned char *buf); + + int (*rdbuf_full_slots)(struct mei_device *dev); + + u32 (*read_hdr)(const struct mei_device *dev); + int (*read) (struct mei_device *dev, + unsigned char *buf, unsigned long len); +}; + /** * struct mei_device - MEI private device struct - * @hbuf_depth - depth of host(write) buffer - * @wr_ext_msg - buffer for hbm control responses (set in read cycle) + + * @mem_addr - mem mapped base register address + + * @hbuf_depth - depth of hardware host/write buffer is slots + * @hbuf_is_ready - query if the host host/write buffer is ready + * @wr_msg - the buffer for hbm control messages + * @wr_ext_msg - the buffer for hbm control responses (set in read cycle) */ struct mei_device { struct pci_dev *pdev; /* pointer to pci device struct */ @@ -213,24 +290,14 @@ struct mei_device { */ struct list_head file_list; long open_handle_count; - /* - * memory of device - */ - unsigned int mem_base; - unsigned int mem_length; - void __iomem *mem_addr; + /* * lock for the device */ struct mutex device_lock; /* device lock */ struct delayed_work timer_work; /* MEI timer delayed work (timeouts) */ bool recvd_msg; - /* - * hw states of host and fw(ME) - */ - u32 host_hw_state; - u32 me_hw_state; - u8 hbuf_depth; + /* * waiting queue for receive message from FW */ @@ -243,11 +310,20 @@ struct mei_device { enum mei_dev_state dev_state; enum mei_init_clients_states init_clients_state; u16 init_clients_timer; - bool need_reset; unsigned char rd_msg_buf[MEI_RD_MSG_BUF_SIZE]; /* control messages */ u32 rd_msg_hdr; - u32 wr_msg_buf[128]; /* used for control messages */ + + /* write buffer */ + u8 hbuf_depth; + bool hbuf_is_ready; + + /* used for control messages */ + struct { + struct mei_msg_hdr hdr; + unsigned char data[128]; + } wr_msg; + struct { struct mei_msg_hdr hdr; unsigned char data[4]; /* All HBM messages are 4 bytes */ @@ -261,7 +337,6 @@ struct mei_device { u8 me_clients_num; u8 me_client_presentation_num; u8 me_client_index; - bool mei_host_buffer_is_empty; struct mei_cl wd_cl; enum mei_wd_states wd_state; @@ -289,6 +364,9 @@ struct mei_device { bool iamthif_canceled; struct work_struct init_work; + + const struct mei_hw_ops *ops; + char hw[0] __aligned(sizeof(void *)); }; static inline unsigned long mei_secs_to_jiffies(unsigned long sec) @@ -300,96 +378,28 @@ static inline unsigned long mei_secs_to_jiffies(unsigned long sec) /* * mei init function prototypes */ -struct mei_device *mei_device_init(struct pci_dev *pdev); +void mei_device_init(struct mei_device *dev); void mei_reset(struct mei_device *dev, int interrupts); int mei_hw_init(struct mei_device *dev); -int mei_task_initialize_clients(void *data); -int mei_initialize_clients(struct mei_device *dev); -int mei_disconnect_host_client(struct mei_device *dev, struct mei_cl *cl); -void mei_allocate_me_clients_storage(struct mei_device *dev); - - -int mei_me_cl_link(struct mei_device *dev, struct mei_cl *cl, - const uuid_le *cguid, u8 host_client_id); -void mei_me_cl_unlink(struct mei_device *dev, struct mei_cl *cl); -int mei_me_cl_by_uuid(const struct mei_device *dev, const uuid_le *cuuid); -int mei_me_cl_by_id(struct mei_device *dev, u8 client_id); - -/* - * MEI IO Functions - */ -struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, struct file *fp); -void mei_io_cb_free(struct mei_cl_cb *priv_cb); -int mei_io_cb_alloc_req_buf(struct mei_cl_cb *cb, size_t length); -int mei_io_cb_alloc_resp_buf(struct mei_cl_cb *cb, size_t length); - - -/** - * mei_io_list_init - Sets up a queue list. - * - * @list: An instance cl callback structure - */ -static inline void mei_io_list_init(struct mei_cl_cb *list) -{ - INIT_LIST_HEAD(&list->list); -} -void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl); - -/* - * MEI ME Client Functions - */ - -struct mei_cl *mei_cl_allocate(struct mei_device *dev); -void mei_cl_init(struct mei_cl *cl, struct mei_device *dev); -int mei_cl_flush_queues(struct mei_cl *cl); -/** - * mei_cl_cmp_id - tells if file private data have same id - * - * @fe1: private data of 1. file object - * @fe2: private data of 2. file object - * - * returns true - if ids are the same and not NULL - */ -static inline bool mei_cl_cmp_id(const struct mei_cl *cl1, - const struct mei_cl *cl2) -{ - return cl1 && cl2 && - (cl1->host_client_id == cl2->host_client_id) && - (cl1->me_client_id == cl2->me_client_id); -} - - - -/* - * MEI Host Client Functions - */ -void mei_host_start_message(struct mei_device *dev); -void mei_host_enum_clients_message(struct mei_device *dev); -int mei_host_client_enumerate(struct mei_device *dev); -void mei_host_client_init(struct work_struct *work); /* * MEI interrupt functions prototype */ -irqreturn_t mei_interrupt_quick_handler(int irq, void *dev_id); -irqreturn_t mei_interrupt_thread_handler(int irq, void *dev_id); -void mei_timer(struct work_struct *work); -/* - * MEI input output function prototype - */ -int mei_ioctl_connect_client(struct file *file, - struct mei_connect_client_data *data); +void mei_timer(struct work_struct *work); +int mei_irq_read_handler(struct mei_device *dev, + struct mei_cl_cb *cmpl_list, s32 *slots); -int mei_start_read(struct mei_device *dev, struct mei_cl *cl); +int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list); +void mei_irq_complete_handler(struct mei_cl *cl, struct mei_cl_cb *cb_pos); /* * AMTHIF - AMT Host Interface Functions */ void mei_amthif_reset_params(struct mei_device *dev); -void mei_amthif_host_init(struct mei_device *dev); +int mei_amthif_host_init(struct mei_device *dev); int mei_amthif_write(struct mei_device *dev, struct mei_cl_cb *priv_cb); @@ -407,9 +417,6 @@ struct mei_cl_cb *mei_amthif_find_read_list_entry(struct mei_device *dev, void mei_amthif_run_next_cmd(struct mei_device *dev); -int mei_amthif_read_message(struct mei_cl_cb *complete_list, - struct mei_device *dev, struct mei_msg_hdr *mei_hdr); - int mei_amthif_irq_write_complete(struct mei_device *dev, s32 *slots, struct mei_cl_cb *cb, struct mei_cl_cb *cmpl_list); @@ -418,92 +425,107 @@ int mei_amthif_irq_read_message(struct mei_cl_cb *complete_list, struct mei_device *dev, struct mei_msg_hdr *mei_hdr); int mei_amthif_irq_read(struct mei_device *dev, s32 *slots); + +int mei_wd_send(struct mei_device *dev); +int mei_wd_stop(struct mei_device *dev); +int mei_wd_host_init(struct mei_device *dev); /* - * Register Access Function + * mei_watchdog_register - Registering watchdog interface + * once we got connection to the WD Client + * @dev - mei device + */ +void mei_watchdog_register(struct mei_device *dev); +/* + * mei_watchdog_unregister - Unregistering watchdog interface + * @dev - mei device */ +void mei_watchdog_unregister(struct mei_device *dev); -/** - * mei_reg_read - Reads 32bit data from the mei device - * - * @dev: the device structure - * @offset: offset from which to read the data - * - * returns register value (u32) +/* + * Register Access Function */ -static inline u32 mei_reg_read(const struct mei_device *dev, - unsigned long offset) + +static inline void mei_hw_config(struct mei_device *dev) +{ + dev->ops->hw_config(dev); +} +static inline void mei_hw_reset(struct mei_device *dev, bool enable) { - return ioread32(dev->mem_addr + offset); + dev->ops->hw_reset(dev, enable); } -/** - * mei_reg_write - Writes 32bit data to the mei device - * - * @dev: the device structure - * @offset: offset from which to write the data - * @value: register value to write (u32) - */ -static inline void mei_reg_write(const struct mei_device *dev, - unsigned long offset, u32 value) +static inline void mei_clear_interrupts(struct mei_device *dev) { - iowrite32(value, dev->mem_addr + offset); + dev->ops->intr_clear(dev); } -/** - * mei_hcsr_read - Reads 32bit data from the host CSR - * - * @dev: the device structure - * - * returns the byte read. - */ -static inline u32 mei_hcsr_read(const struct mei_device *dev) +static inline void mei_enable_interrupts(struct mei_device *dev) { - return mei_reg_read(dev, H_CSR); + dev->ops->intr_enable(dev); } -/** - * mei_mecsr_read - Reads 32bit data from the ME CSR - * - * @dev: the device structure - * - * returns ME_CSR_HA register value (u32) - */ -static inline u32 mei_mecsr_read(const struct mei_device *dev) +static inline void mei_disable_interrupts(struct mei_device *dev) { - return mei_reg_read(dev, ME_CSR_HA); + dev->ops->intr_disable(dev); } -/** - * get_me_cb_rw - Reads 32bit data from the mei ME_CB_RW register - * - * @dev: the device structure - * - * returns ME_CB_RW register value (u32) - */ -static inline u32 mei_mecbrw_read(const struct mei_device *dev) +static inline void mei_host_set_ready(struct mei_device *dev) { - return mei_reg_read(dev, ME_CB_RW); + dev->ops->host_set_ready(dev); +} +static inline bool mei_host_is_ready(struct mei_device *dev) +{ + return dev->ops->host_is_ready(dev); +} +static inline bool mei_hw_is_ready(struct mei_device *dev) +{ + return dev->ops->hw_is_ready(dev); } +static inline bool mei_hbuf_is_ready(struct mei_device *dev) +{ + return dev->ops->hbuf_is_ready(dev); +} -/* - * mei interface function prototypes - */ -void mei_hcsr_set(struct mei_device *dev); -void mei_csr_clear_his(struct mei_device *dev); +static inline int mei_hbuf_empty_slots(struct mei_device *dev) +{ + return dev->ops->hbuf_free_slots(dev); +} + +static inline size_t mei_hbuf_max_len(const struct mei_device *dev) +{ + return dev->ops->hbuf_max_len(dev); +} -void mei_enable_interrupts(struct mei_device *dev); -void mei_disable_interrupts(struct mei_device *dev); +static inline int mei_write_message(struct mei_device *dev, + struct mei_msg_hdr *hdr, + unsigned char *buf) +{ + return dev->ops->write(dev, hdr, buf); +} -static inline struct mei_msg_hdr *mei_hbm_hdr(u32 *buf, size_t length) +static inline u32 mei_read_hdr(const struct mei_device *dev) { - struct mei_msg_hdr *hdr = (struct mei_msg_hdr *)buf; - hdr->host_addr = 0; - hdr->me_addr = 0; - hdr->length = length; - hdr->msg_complete = 1; - hdr->reserved = 0; - return hdr; + return dev->ops->read_hdr(dev); } +static inline void mei_read_slots(struct mei_device *dev, + unsigned char *buf, unsigned long len) +{ + dev->ops->read(dev, buf, len); +} + +static inline int mei_count_full_read_slots(struct mei_device *dev) +{ + return dev->ops->rdbuf_full_slots(dev); +} + +int mei_register(struct device *dev); +void mei_deregister(void); + +#define MEI_HDR_FMT "hdr:host=%02d me=%02d len=%d comp=%1d" +#define MEI_HDR_PRM(hdr) \ + (hdr)->host_addr, (hdr)->me_addr, \ + (hdr)->length, (hdr)->msg_complete + #endif diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c new file mode 100644 index 000000000000..b40ec0601ab0 --- /dev/null +++ b/drivers/misc/mei/pci-me.c @@ -0,0 +1,396 @@ +/* + * + * Intel Management Engine Interface (Intel MEI) Linux driver + * Copyright (c) 2003-2012, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/fs.h> +#include <linux/errno.h> +#include <linux/types.h> +#include <linux/fcntl.h> +#include <linux/aio.h> +#include <linux/pci.h> +#include <linux/poll.h> +#include <linux/init.h> +#include <linux/ioctl.h> +#include <linux/cdev.h> +#include <linux/sched.h> +#include <linux/uuid.h> +#include <linux/compat.h> +#include <linux/jiffies.h> +#include <linux/interrupt.h> +#include <linux/miscdevice.h> + +#include <linux/mei.h> + +#include "mei_dev.h" +#include "hw-me.h" +#include "client.h" + +/* AMT device is a singleton on the platform */ +static struct pci_dev *mei_pdev; + +/* mei_pci_tbl - PCI Device ID Table */ +static DEFINE_PCI_DEVICE_TABLE(mei_pci_tbl) = { + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82946GZ)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G35)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82Q965)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G965)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GM965)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GME965)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q35)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82G33)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q33)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82X38)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_3200)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_6)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_7)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_8)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_9)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_10)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_1)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_2)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_3)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_4)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_1)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_2)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_3)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_4)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_1)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_2)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_CPT_1)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PBG_1)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_1)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_2)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_3)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_LP)}, + + /* required last entry */ + {0, } +}; + +MODULE_DEVICE_TABLE(pci, mei_pci_tbl); + +static DEFINE_MUTEX(mei_mutex); + +/** + * mei_quirk_probe - probe for devices that doesn't valid ME interface + * @pdev: PCI device structure + * @ent: entry into pci_device_table + * + * returns true if ME Interface is valid, false otherwise + */ +static bool mei_quirk_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + u32 reg; + if (ent->device == MEI_DEV_ID_PBG_1) { + pci_read_config_dword(pdev, 0x48, ®); + /* make sure that bit 9 is up and bit 10 is down */ + if ((reg & 0x600) == 0x200) { + dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n"); + return false; + } + } + return true; +} +/** + * mei_probe - Device Initialization Routine + * + * @pdev: PCI device structure + * @ent: entry in kcs_pci_tbl + * + * returns 0 on success, <0 on failure. + */ +static int mei_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct mei_device *dev; + struct mei_me_hw *hw; + int err; + + mutex_lock(&mei_mutex); + + if (!mei_quirk_probe(pdev, ent)) { + err = -ENODEV; + goto end; + } + + if (mei_pdev) { + err = -EEXIST; + goto end; + } + /* enable pci dev */ + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "failed to enable pci device.\n"); + goto end; + } + /* set PCI host mastering */ + pci_set_master(pdev); + /* pci request regions for mei driver */ + err = pci_request_regions(pdev, KBUILD_MODNAME); + if (err) { + dev_err(&pdev->dev, "failed to get pci regions.\n"); + goto disable_device; + } + /* allocates and initializes the mei dev structure */ + dev = mei_me_dev_init(pdev); + if (!dev) { + err = -ENOMEM; + goto release_regions; + } + hw = to_me_hw(dev); + /* mapping IO device memory */ + hw->mem_addr = pci_iomap(pdev, 0, 0); + if (!hw->mem_addr) { + dev_err(&pdev->dev, "mapping I/O device memory failure.\n"); + err = -ENOMEM; + goto free_device; + } + pci_enable_msi(pdev); + + /* request and enable interrupt */ + if (pci_dev_msi_enabled(pdev)) + err = request_threaded_irq(pdev->irq, + NULL, + mei_me_irq_thread_handler, + IRQF_ONESHOT, KBUILD_MODNAME, dev); + else + err = request_threaded_irq(pdev->irq, + mei_me_irq_quick_handler, + mei_me_irq_thread_handler, + IRQF_SHARED, KBUILD_MODNAME, dev); + + if (err) { + dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n", + pdev->irq); + goto disable_msi; + } + + if (mei_hw_init(dev)) { + dev_err(&pdev->dev, "init hw failure.\n"); + err = -ENODEV; + goto release_irq; + } + + err = mei_register(&pdev->dev); + if (err) + goto release_irq; + + mei_pdev = pdev; + pci_set_drvdata(pdev, dev); + + + schedule_delayed_work(&dev->timer_work, HZ); + + mutex_unlock(&mei_mutex); + + pr_debug("initialization successful.\n"); + + return 0; + +release_irq: + mei_disable_interrupts(dev); + flush_scheduled_work(); + free_irq(pdev->irq, dev); +disable_msi: + pci_disable_msi(pdev); + pci_iounmap(pdev, hw->mem_addr); +free_device: + kfree(dev); +release_regions: + pci_release_regions(pdev); +disable_device: + pci_disable_device(pdev); +end: + mutex_unlock(&mei_mutex); + dev_err(&pdev->dev, "initialization failed.\n"); + return err; +} + +/** + * mei_remove - Device Removal Routine + * + * @pdev: PCI device structure + * + * mei_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. + */ +static void mei_remove(struct pci_dev *pdev) +{ + struct mei_device *dev; + struct mei_me_hw *hw; + + if (mei_pdev != pdev) + return; + + dev = pci_get_drvdata(pdev); + if (!dev) + return; + + hw = to_me_hw(dev); + + mutex_lock(&dev->device_lock); + + cancel_delayed_work(&dev->timer_work); + + mei_wd_stop(dev); + + mei_pdev = NULL; + + if (dev->iamthif_cl.state == MEI_FILE_CONNECTED) { + dev->iamthif_cl.state = MEI_FILE_DISCONNECTING; + mei_cl_disconnect(&dev->iamthif_cl); + } + if (dev->wd_cl.state == MEI_FILE_CONNECTED) { + dev->wd_cl.state = MEI_FILE_DISCONNECTING; + mei_cl_disconnect(&dev->wd_cl); + } + + /* Unregistering watchdog device */ + mei_watchdog_unregister(dev); + + /* remove entry if already in list */ + dev_dbg(&pdev->dev, "list del iamthif and wd file list.\n"); + + if (dev->open_handle_count > 0) + dev->open_handle_count--; + mei_cl_unlink(&dev->wd_cl); + + if (dev->open_handle_count > 0) + dev->open_handle_count--; + mei_cl_unlink(&dev->iamthif_cl); + + dev->iamthif_current_cb = NULL; + dev->me_clients_num = 0; + + mutex_unlock(&dev->device_lock); + + flush_scheduled_work(); + + /* disable interrupts */ + mei_disable_interrupts(dev); + + free_irq(pdev->irq, dev); + pci_disable_msi(pdev); + pci_set_drvdata(pdev, NULL); + + if (hw->mem_addr) + pci_iounmap(pdev, hw->mem_addr); + + kfree(dev); + + pci_release_regions(pdev); + pci_disable_device(pdev); + + mei_deregister(); + +} +#ifdef CONFIG_PM +static int mei_pci_suspend(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct mei_device *dev = pci_get_drvdata(pdev); + int err; + + if (!dev) + return -ENODEV; + mutex_lock(&dev->device_lock); + + cancel_delayed_work(&dev->timer_work); + + /* Stop watchdog if exists */ + err = mei_wd_stop(dev); + /* Set new mei state */ + if (dev->dev_state == MEI_DEV_ENABLED || + dev->dev_state == MEI_DEV_RECOVERING_FROM_RESET) { + dev->dev_state = MEI_DEV_POWER_DOWN; + mei_reset(dev, 0); + } + mutex_unlock(&dev->device_lock); + + free_irq(pdev->irq, dev); + pci_disable_msi(pdev); + + return err; +} + +static int mei_pci_resume(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct mei_device *dev; + int err; + + dev = pci_get_drvdata(pdev); + if (!dev) + return -ENODEV; + + pci_enable_msi(pdev); + + /* request and enable interrupt */ + if (pci_dev_msi_enabled(pdev)) + err = request_threaded_irq(pdev->irq, + NULL, + mei_me_irq_thread_handler, + IRQF_ONESHOT, KBUILD_MODNAME, dev); + else + err = request_threaded_irq(pdev->irq, + mei_me_irq_quick_handler, + mei_me_irq_thread_handler, + IRQF_SHARED, KBUILD_MODNAME, dev); + + if (err) { + dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n", + pdev->irq); + return err; + } + + mutex_lock(&dev->device_lock); + dev->dev_state = MEI_DEV_POWER_UP; + mei_reset(dev, 1); + mutex_unlock(&dev->device_lock); + + /* Start timer if stopped in suspend */ + schedule_delayed_work(&dev->timer_work, HZ); + + return err; +} +static SIMPLE_DEV_PM_OPS(mei_pm_ops, mei_pci_suspend, mei_pci_resume); +#define MEI_PM_OPS (&mei_pm_ops) +#else +#define MEI_PM_OPS NULL +#endif /* CONFIG_PM */ +/* + * PCI driver structure + */ +static struct pci_driver mei_driver = { + .name = KBUILD_MODNAME, + .id_table = mei_pci_tbl, + .probe = mei_probe, + .remove = mei_remove, + .shutdown = mei_remove, + .driver.pm = MEI_PM_OPS, +}; + +module_pci_driver(mei_driver); + +MODULE_AUTHOR("Intel Corporation"); +MODULE_DESCRIPTION("Intel(R) Management Engine Interface"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/misc/mei/wd.c b/drivers/misc/mei/wd.c index 9299a8c29a6f..2413247fc392 100644 --- a/drivers/misc/mei/wd.c +++ b/drivers/misc/mei/wd.c @@ -21,11 +21,13 @@ #include <linux/sched.h> #include <linux/watchdog.h> -#include "mei_dev.h" -#include "hw.h" -#include "interface.h" #include <linux/mei.h> +#include "mei_dev.h" +#include "hbm.h" +#include "hw-me.h" +#include "client.h" + static const u8 mei_start_wd_params[] = { 0x02, 0x12, 0x13, 0x10 }; static const u8 mei_stop_wd_params[] = { 0x02, 0x02, 0x14, 0x10 }; @@ -62,30 +64,41 @@ static void mei_wd_set_start_timeout(struct mei_device *dev, u16 timeout) */ int mei_wd_host_init(struct mei_device *dev) { - int id; - mei_cl_init(&dev->wd_cl, dev); + struct mei_cl *cl = &dev->wd_cl; + int i; + int ret; + + mei_cl_init(cl, dev); - /* look for WD client and connect to it */ - dev->wd_cl.state = MEI_FILE_DISCONNECTED; dev->wd_timeout = MEI_WD_DEFAULT_TIMEOUT; dev->wd_state = MEI_WD_IDLE; - /* Connect WD ME client to the host client */ - id = mei_me_cl_link(dev, &dev->wd_cl, - &mei_wd_guid, MEI_WD_HOST_CLIENT_ID); - if (id < 0) { + /* check for valid client id */ + i = mei_me_cl_by_uuid(dev, &mei_wd_guid); + if (i < 0) { dev_info(&dev->pdev->dev, "wd: failed to find the client\n"); return -ENOENT; } - if (mei_connect(dev, &dev->wd_cl)) { + cl->me_client_id = dev->me_clients[i].client_id; + + ret = mei_cl_link(cl, MEI_WD_HOST_CLIENT_ID); + + if (ret < 0) { + dev_info(&dev->pdev->dev, "wd: failed link client\n"); + return -ENOENT; + } + + cl->state = MEI_FILE_CONNECTING; + + if (mei_hbm_cl_connect_req(dev, cl)) { dev_err(&dev->pdev->dev, "wd: failed to connect to the client\n"); - dev->wd_cl.state = MEI_FILE_DISCONNECTED; - dev->wd_cl.host_client_id = 0; + cl->state = MEI_FILE_DISCONNECTED; + cl->host_client_id = 0; return -EIO; } - dev->wd_cl.timer_count = MEI_CONNECT_TIMEOUT; + cl->timer_count = MEI_CONNECT_TIMEOUT; return 0; } @@ -101,22 +114,21 @@ int mei_wd_host_init(struct mei_device *dev) */ int mei_wd_send(struct mei_device *dev) { - struct mei_msg_hdr *mei_hdr; + struct mei_msg_hdr hdr; - mei_hdr = (struct mei_msg_hdr *) &dev->wr_msg_buf[0]; - mei_hdr->host_addr = dev->wd_cl.host_client_id; - mei_hdr->me_addr = dev->wd_cl.me_client_id; - mei_hdr->msg_complete = 1; - mei_hdr->reserved = 0; + hdr.host_addr = dev->wd_cl.host_client_id; + hdr.me_addr = dev->wd_cl.me_client_id; + hdr.msg_complete = 1; + hdr.reserved = 0; if (!memcmp(dev->wd_data, mei_start_wd_params, MEI_WD_HDR_SIZE)) - mei_hdr->length = MEI_WD_START_MSG_SIZE; + hdr.length = MEI_WD_START_MSG_SIZE; else if (!memcmp(dev->wd_data, mei_stop_wd_params, MEI_WD_HDR_SIZE)) - mei_hdr->length = MEI_WD_STOP_MSG_SIZE; + hdr.length = MEI_WD_STOP_MSG_SIZE; else return -EINVAL; - return mei_write_message(dev, mei_hdr, dev->wd_data, mei_hdr->length); + return mei_write_message(dev, &hdr, dev->wd_data); } /** @@ -141,16 +153,16 @@ int mei_wd_stop(struct mei_device *dev) dev->wd_state = MEI_WD_STOPPING; - ret = mei_flow_ctrl_creds(dev, &dev->wd_cl); + ret = mei_cl_flow_ctrl_creds(&dev->wd_cl); if (ret < 0) goto out; - if (ret && dev->mei_host_buffer_is_empty) { + if (ret && dev->hbuf_is_ready) { ret = 0; - dev->mei_host_buffer_is_empty = false; + dev->hbuf_is_ready = false; if (!mei_wd_send(dev)) { - ret = mei_flow_ctrl_reduce(dev, &dev->wd_cl); + ret = mei_cl_flow_ctrl_reduce(&dev->wd_cl); if (ret) goto out; } else { @@ -270,10 +282,9 @@ static int mei_wd_ops_ping(struct watchdog_device *wd_dev) dev->wd_state = MEI_WD_RUNNING; /* Check if we can send the ping to HW*/ - if (dev->mei_host_buffer_is_empty && - mei_flow_ctrl_creds(dev, &dev->wd_cl) > 0) { + if (dev->hbuf_is_ready && mei_cl_flow_ctrl_creds(&dev->wd_cl) > 0) { - dev->mei_host_buffer_is_empty = false; + dev->hbuf_is_ready = false; dev_dbg(&dev->pdev->dev, "wd: sending ping\n"); if (mei_wd_send(dev)) { @@ -282,9 +293,9 @@ static int mei_wd_ops_ping(struct watchdog_device *wd_dev) goto end; } - if (mei_flow_ctrl_reduce(dev, &dev->wd_cl)) { + if (mei_cl_flow_ctrl_reduce(&dev->wd_cl)) { dev_err(&dev->pdev->dev, - "wd: mei_flow_ctrl_reduce() failed.\n"); + "wd: mei_cl_flow_ctrl_reduce() failed.\n"); ret = -EIO; goto end; } diff --git a/drivers/misc/sgi-gru/grufile.c b/drivers/misc/sgi-gru/grufile.c index 492c8cac69ac..44d273c5e19d 100644 --- a/drivers/misc/sgi-gru/grufile.c +++ b/drivers/misc/sgi-gru/grufile.c @@ -517,7 +517,7 @@ static int __init gru_init(void) { int ret; - if (!is_uv_system()) + if (!is_uv_system() || (is_uvx_hub() && !is_uv2_hub())) return 0; #if defined CONFIG_IA64 diff --git a/drivers/misc/ti-st/Kconfig b/drivers/misc/ti-st/Kconfig index abb5de1afce3..f34dcc514730 100644 --- a/drivers/misc/ti-st/Kconfig +++ b/drivers/misc/ti-st/Kconfig @@ -5,7 +5,7 @@ menu "Texas Instruments shared transport line discipline" config TI_ST tristate "Shared transport core driver" - depends on NET && GPIOLIB + depends on NET && GPIOLIB && TTY select FW_LOADER help This enables the shared transport core driver for TI diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c index b90a2241d79c..0a1428016350 100644 --- a/drivers/misc/ti-st/st_core.c +++ b/drivers/misc/ti-st/st_core.c @@ -240,7 +240,8 @@ void st_int_recv(void *disc_data, char *ptr; struct st_proto_s *proto; unsigned short payload_len = 0; - int len = 0, type = 0; + int len = 0; + unsigned char type = 0; unsigned char *plen; struct st_data_s *st_gdata = (struct st_data_s *)disc_data; unsigned long flags; diff --git a/drivers/misc/vmw_vmci/Kconfig b/drivers/misc/vmw_vmci/Kconfig new file mode 100644 index 000000000000..39c2ecadb273 --- /dev/null +++ b/drivers/misc/vmw_vmci/Kconfig @@ -0,0 +1,16 @@ +# +# VMware VMCI device +# + +config VMWARE_VMCI + tristate "VMware VMCI Driver" + depends on X86 && PCI + help + This is VMware's Virtual Machine Communication Interface. It enables + high-speed communication between host and guest in a virtual + environment via the VMCI virtual device. + + If unsure, say N. + + To compile this driver as a module, choose M here: the + module will be called vmw_vmci. diff --git a/drivers/misc/vmw_vmci/Makefile b/drivers/misc/vmw_vmci/Makefile new file mode 100644 index 000000000000..4da9893c3942 --- /dev/null +++ b/drivers/misc/vmw_vmci/Makefile @@ -0,0 +1,4 @@ +obj-$(CONFIG_VMWARE_VMCI) += vmw_vmci.o +vmw_vmci-y += vmci_context.o vmci_datagram.o vmci_doorbell.o \ + vmci_driver.o vmci_event.o vmci_guest.o vmci_handle_array.o \ + vmci_host.o vmci_queue_pair.o vmci_resource.o vmci_route.o diff --git a/drivers/misc/vmw_vmci/vmci_context.c b/drivers/misc/vmw_vmci/vmci_context.c new file mode 100644 index 000000000000..f866a4baecb5 --- /dev/null +++ b/drivers/misc/vmw_vmci/vmci_context.c @@ -0,0 +1,1214 @@ +/* + * VMware VMCI Driver + * + * Copyright (C) 2012 VMware, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation version 2 and no later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include <linux/vmw_vmci_defs.h> +#include <linux/vmw_vmci_api.h> +#include <linux/highmem.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/sched.h> +#include <linux/slab.h> + +#include "vmci_queue_pair.h" +#include "vmci_datagram.h" +#include "vmci_doorbell.h" +#include "vmci_context.h" +#include "vmci_driver.h" +#include "vmci_event.h" + +/* + * List of current VMCI contexts. Contexts can be added by + * vmci_ctx_create() and removed via vmci_ctx_destroy(). + * These, along with context lookup, are protected by the + * list structure's lock. + */ +static struct { + struct list_head head; + spinlock_t lock; /* Spinlock for context list operations */ +} ctx_list = { + .head = LIST_HEAD_INIT(ctx_list.head), + .lock = __SPIN_LOCK_UNLOCKED(ctx_list.lock), +}; + +/* Used by contexts that did not set up notify flag pointers */ +static bool ctx_dummy_notify; + +static void ctx_signal_notify(struct vmci_ctx *context) +{ + *context->notify = true; +} + +static void ctx_clear_notify(struct vmci_ctx *context) +{ + *context->notify = false; +} + +/* + * If nothing requires the attention of the guest, clears both + * notify flag and call. + */ +static void ctx_clear_notify_call(struct vmci_ctx *context) +{ + if (context->pending_datagrams == 0 && + vmci_handle_arr_get_size(context->pending_doorbell_array) == 0) + ctx_clear_notify(context); +} + +/* + * Sets the context's notify flag iff datagrams are pending for this + * context. Called from vmci_setup_notify(). + */ +void vmci_ctx_check_signal_notify(struct vmci_ctx *context) +{ + spin_lock(&context->lock); + if (context->pending_datagrams) + ctx_signal_notify(context); + spin_unlock(&context->lock); +} + +/* + * Allocates and initializes a VMCI context. + */ +struct vmci_ctx *vmci_ctx_create(u32 cid, u32 priv_flags, + uintptr_t event_hnd, + int user_version, + const struct cred *cred) +{ + struct vmci_ctx *context; + int error; + + if (cid == VMCI_INVALID_ID) { + pr_devel("Invalid context ID for VMCI context\n"); + error = -EINVAL; + goto err_out; + } + + if (priv_flags & ~VMCI_PRIVILEGE_ALL_FLAGS) { + pr_devel("Invalid flag (flags=0x%x) for VMCI context\n", + priv_flags); + error = -EINVAL; + goto err_out; + } + + if (user_version == 0) { + pr_devel("Invalid suer_version %d\n", user_version); + error = -EINVAL; + goto err_out; + } + + context = kzalloc(sizeof(*context), GFP_KERNEL); + if (!context) { + pr_warn("Failed to allocate memory for VMCI context\n"); + error = -EINVAL; + goto err_out; + } + + kref_init(&context->kref); + spin_lock_init(&context->lock); + INIT_LIST_HEAD(&context->list_item); + INIT_LIST_HEAD(&context->datagram_queue); + INIT_LIST_HEAD(&context->notifier_list); + + /* Initialize host-specific VMCI context. */ + init_waitqueue_head(&context->host_context.wait_queue); + + context->queue_pair_array = vmci_handle_arr_create(0); + if (!context->queue_pair_array) { + error = -ENOMEM; + goto err_free_ctx; + } + + context->doorbell_array = vmci_handle_arr_create(0); + if (!context->doorbell_array) { + error = -ENOMEM; + goto err_free_qp_array; + } + + context->pending_doorbell_array = vmci_handle_arr_create(0); + if (!context->pending_doorbell_array) { + error = -ENOMEM; + goto err_free_db_array; + } + + context->user_version = user_version; + + context->priv_flags = priv_flags; + + if (cred) + context->cred = get_cred(cred); + + context->notify = &ctx_dummy_notify; + context->notify_page = NULL; + + /* + * If we collide with an existing context we generate a new + * and use it instead. The VMX will determine if regeneration + * is okay. Since there isn't 4B - 16 VMs running on a given + * host, the below loop will terminate. + */ + spin_lock(&ctx_list.lock); + + while (vmci_ctx_exists(cid)) { + /* We reserve the lowest 16 ids for fixed contexts. */ + cid = max(cid, VMCI_RESERVED_CID_LIMIT - 1) + 1; + if (cid == VMCI_INVALID_ID) + cid = VMCI_RESERVED_CID_LIMIT; + } + context->cid = cid; + + list_add_tail_rcu(&context->list_item, &ctx_list.head); + spin_unlock(&ctx_list.lock); + + return context; + + err_free_db_array: + vmci_handle_arr_destroy(context->doorbell_array); + err_free_qp_array: + vmci_handle_arr_destroy(context->queue_pair_array); + err_free_ctx: + kfree(context); + err_out: + return ERR_PTR(error); +} + +/* + * Destroy VMCI context. + */ +void vmci_ctx_destroy(struct vmci_ctx *context) +{ + spin_lock(&ctx_list.lock); + list_del_rcu(&context->list_item); + spin_unlock(&ctx_list.lock); + synchronize_rcu(); + + vmci_ctx_put(context); +} + +/* + * Fire notification for all contexts interested in given cid. + */ +static int ctx_fire_notification(u32 context_id, u32 priv_flags) +{ + u32 i, array_size; + struct vmci_ctx *sub_ctx; + struct vmci_handle_arr *subscriber_array; + struct vmci_handle context_handle = + vmci_make_handle(context_id, VMCI_EVENT_HANDLER); + + /* + * We create an array to hold the subscribers we find when + * scanning through all contexts. + */ + subscriber_array = vmci_handle_arr_create(0); + if (subscriber_array == NULL) + return VMCI_ERROR_NO_MEM; + + /* + * Scan all contexts to find who is interested in being + * notified about given contextID. + */ + rcu_read_lock(); + list_for_each_entry_rcu(sub_ctx, &ctx_list.head, list_item) { + struct vmci_handle_list *node; + + /* + * We only deliver notifications of the removal of + * contexts, if the two contexts are allowed to + * interact. + */ + if (vmci_deny_interaction(priv_flags, sub_ctx->priv_flags)) + continue; + + list_for_each_entry_rcu(node, &sub_ctx->notifier_list, node) { + if (!vmci_handle_is_equal(node->handle, context_handle)) + continue; + + vmci_handle_arr_append_entry(&subscriber_array, + vmci_make_handle(sub_ctx->cid, + VMCI_EVENT_HANDLER)); + } + } + rcu_read_unlock(); + + /* Fire event to all subscribers. */ + array_size = vmci_handle_arr_get_size(subscriber_array); + for (i = 0; i < array_size; i++) { + int result; + struct vmci_event_ctx ev; + + ev.msg.hdr.dst = vmci_handle_arr_get_entry(subscriber_array, i); + ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, + VMCI_CONTEXT_RESOURCE_ID); + ev.msg.hdr.payload_size = sizeof(ev) - sizeof(ev.msg.hdr); + ev.msg.event_data.event = VMCI_EVENT_CTX_REMOVED; + ev.payload.context_id = context_id; + + result = vmci_datagram_dispatch(VMCI_HYPERVISOR_CONTEXT_ID, + &ev.msg.hdr, false); + if (result < VMCI_SUCCESS) { + pr_devel("Failed to enqueue event datagram (type=%d) for context (ID=0x%x)\n", + ev.msg.event_data.event, + ev.msg.hdr.dst.context); + /* We continue to enqueue on next subscriber. */ + } + } + vmci_handle_arr_destroy(subscriber_array); + + return VMCI_SUCCESS; +} + +/* + * Returns the current number of pending datagrams. The call may + * also serve as a synchronization point for the datagram queue, + * as no enqueue operations can occur concurrently. + */ +int vmci_ctx_pending_datagrams(u32 cid, u32 *pending) +{ + struct vmci_ctx *context; + + context = vmci_ctx_get(cid); + if (context == NULL) + return VMCI_ERROR_INVALID_ARGS; + + spin_lock(&context->lock); + if (pending) + *pending = context->pending_datagrams; + spin_unlock(&context->lock); + vmci_ctx_put(context); + + return VMCI_SUCCESS; +} + +/* + * Queues a VMCI datagram for the appropriate target VM context. + */ +int vmci_ctx_enqueue_datagram(u32 cid, struct vmci_datagram *dg) +{ + struct vmci_datagram_queue_entry *dq_entry; + struct vmci_ctx *context; + struct vmci_handle dg_src; + size_t vmci_dg_size; + + vmci_dg_size = VMCI_DG_SIZE(dg); + if (vmci_dg_size > VMCI_MAX_DG_SIZE) { + pr_devel("Datagram too large (bytes=%Zu)\n", vmci_dg_size); + return VMCI_ERROR_INVALID_ARGS; + } + + /* Get the target VM's VMCI context. */ + context = vmci_ctx_get(cid); + if (!context) { + pr_devel("Invalid context (ID=0x%x)\n", cid); + return VMCI_ERROR_INVALID_ARGS; + } + + /* Allocate guest call entry and add it to the target VM's queue. */ + dq_entry = kmalloc(sizeof(*dq_entry), GFP_KERNEL); + if (dq_entry == NULL) { + pr_warn("Failed to allocate memory for datagram\n"); + vmci_ctx_put(context); + return VMCI_ERROR_NO_MEM; + } + dq_entry->dg = dg; + dq_entry->dg_size = vmci_dg_size; + dg_src = dg->src; + INIT_LIST_HEAD(&dq_entry->list_item); + + spin_lock(&context->lock); + + /* + * We put a higher limit on datagrams from the hypervisor. If + * the pending datagram is not from hypervisor, then we check + * if enqueueing it would exceed the + * VMCI_MAX_DATAGRAM_QUEUE_SIZE limit on the destination. If + * the pending datagram is from hypervisor, we allow it to be + * queued at the destination side provided we don't reach the + * VMCI_MAX_DATAGRAM_AND_EVENT_QUEUE_SIZE limit. + */ + if (context->datagram_queue_size + vmci_dg_size >= + VMCI_MAX_DATAGRAM_QUEUE_SIZE && + (!vmci_handle_is_equal(dg_src, + vmci_make_handle + (VMCI_HYPERVISOR_CONTEXT_ID, + VMCI_CONTEXT_RESOURCE_ID)) || + context->datagram_queue_size + vmci_dg_size >= + VMCI_MAX_DATAGRAM_AND_EVENT_QUEUE_SIZE)) { + spin_unlock(&context->lock); + vmci_ctx_put(context); + kfree(dq_entry); + pr_devel("Context (ID=0x%x) receive queue is full\n", cid); + return VMCI_ERROR_NO_RESOURCES; + } + + list_add(&dq_entry->list_item, &context->datagram_queue); + context->pending_datagrams++; + context->datagram_queue_size += vmci_dg_size; + ctx_signal_notify(context); + wake_up(&context->host_context.wait_queue); + spin_unlock(&context->lock); + vmci_ctx_put(context); + + return vmci_dg_size; +} + +/* + * Verifies whether a context with the specified context ID exists. + * FIXME: utility is dubious as no decisions can be reliably made + * using this data as context can appear and disappear at any time. + */ +bool vmci_ctx_exists(u32 cid) +{ + struct vmci_ctx *context; + bool exists = false; + + rcu_read_lock(); + + list_for_each_entry_rcu(context, &ctx_list.head, list_item) { + if (context->cid == cid) { + exists = true; + break; + } + } + + rcu_read_unlock(); + return exists; +} + +/* + * Retrieves VMCI context corresponding to the given cid. + */ +struct vmci_ctx *vmci_ctx_get(u32 cid) +{ + struct vmci_ctx *c, *context = NULL; + + if (cid == VMCI_INVALID_ID) + return NULL; + + rcu_read_lock(); + list_for_each_entry_rcu(c, &ctx_list.head, list_item) { + if (c->cid == cid) { + /* + * The context owner drops its own reference to the + * context only after removing it from the list and + * waiting for RCU grace period to expire. This + * means that we are not about to increase the + * reference count of something that is in the + * process of being destroyed. + */ + context = c; + kref_get(&context->kref); + break; + } + } + rcu_read_unlock(); + + return context; +} + +/* + * Deallocates all parts of a context data structure. This + * function doesn't lock the context, because it assumes that + * the caller was holding the last reference to context. + */ +static void ctx_free_ctx(struct kref *kref) +{ + struct vmci_ctx *context = container_of(kref, struct vmci_ctx, kref); + struct vmci_datagram_queue_entry *dq_entry, *dq_entry_tmp; + struct vmci_handle temp_handle; + struct vmci_handle_list *notifier, *tmp; + + /* + * Fire event to all contexts interested in knowing this + * context is dying. + */ + ctx_fire_notification(context->cid, context->priv_flags); + + /* + * Cleanup all queue pair resources attached to context. If + * the VM dies without cleaning up, this code will make sure + * that no resources are leaked. + */ + temp_handle = vmci_handle_arr_get_entry(context->queue_pair_array, 0); + while (!vmci_handle_is_equal(temp_handle, VMCI_INVALID_HANDLE)) { + if (vmci_qp_broker_detach(temp_handle, + context) < VMCI_SUCCESS) { + /* + * When vmci_qp_broker_detach() succeeds it + * removes the handle from the array. If + * detach fails, we must remove the handle + * ourselves. + */ + vmci_handle_arr_remove_entry(context->queue_pair_array, + temp_handle); + } + temp_handle = + vmci_handle_arr_get_entry(context->queue_pair_array, 0); + } + + /* + * It is fine to destroy this without locking the callQueue, as + * this is the only thread having a reference to the context. + */ + list_for_each_entry_safe(dq_entry, dq_entry_tmp, + &context->datagram_queue, list_item) { + WARN_ON(dq_entry->dg_size != VMCI_DG_SIZE(dq_entry->dg)); + list_del(&dq_entry->list_item); + kfree(dq_entry->dg); + kfree(dq_entry); + } + + list_for_each_entry_safe(notifier, tmp, + &context->notifier_list, node) { + list_del(¬ifier->node); + kfree(notifier); + } + + vmci_handle_arr_destroy(context->queue_pair_array); + vmci_handle_arr_destroy(context->doorbell_array); + vmci_handle_arr_destroy(context->pending_doorbell_array); + vmci_ctx_unset_notify(context); + if (context->cred) + put_cred(context->cred); + kfree(context); +} + +/* + * Drops reference to VMCI context. If this is the last reference to + * the context it will be deallocated. A context is created with + * a reference count of one, and on destroy, it is removed from + * the context list before its reference count is decremented. Thus, + * if we reach zero, we are sure that nobody else are about to increment + * it (they need the entry in the context list for that), and so there + * is no need for locking. + */ +void vmci_ctx_put(struct vmci_ctx *context) +{ + kref_put(&context->kref, ctx_free_ctx); +} + +/* + * Dequeues the next datagram and returns it to caller. + * The caller passes in a pointer to the max size datagram + * it can handle and the datagram is only unqueued if the + * size is less than max_size. If larger max_size is set to + * the size of the datagram to give the caller a chance to + * set up a larger buffer for the guestcall. + */ +int vmci_ctx_dequeue_datagram(struct vmci_ctx *context, + size_t *max_size, + struct vmci_datagram **dg) +{ + struct vmci_datagram_queue_entry *dq_entry; + struct list_head *list_item; + int rv; + + /* Dequeue the next datagram entry. */ + spin_lock(&context->lock); + if (context->pending_datagrams == 0) { + ctx_clear_notify_call(context); + spin_unlock(&context->lock); + pr_devel("No datagrams pending\n"); + return VMCI_ERROR_NO_MORE_DATAGRAMS; + } + + list_item = context->datagram_queue.next; + + dq_entry = + list_entry(list_item, struct vmci_datagram_queue_entry, list_item); + + /* Check size of caller's buffer. */ + if (*max_size < dq_entry->dg_size) { + *max_size = dq_entry->dg_size; + spin_unlock(&context->lock); + pr_devel("Caller's buffer should be at least (size=%u bytes)\n", + (u32) *max_size); + return VMCI_ERROR_NO_MEM; + } + + list_del(list_item); + context->pending_datagrams--; + context->datagram_queue_size -= dq_entry->dg_size; + if (context->pending_datagrams == 0) { + ctx_clear_notify_call(context); + rv = VMCI_SUCCESS; + } else { + /* + * Return the size of the next datagram. + */ + struct vmci_datagram_queue_entry *next_entry; + + list_item = context->datagram_queue.next; + next_entry = + list_entry(list_item, struct vmci_datagram_queue_entry, + list_item); + + /* + * The following size_t -> int truncation is fine as + * the maximum size of a (routable) datagram is 68KB. + */ + rv = (int)next_entry->dg_size; + } + spin_unlock(&context->lock); + + /* Caller must free datagram. */ + *dg = dq_entry->dg; + dq_entry->dg = NULL; + kfree(dq_entry); + + return rv; +} + +/* + * Reverts actions set up by vmci_setup_notify(). Unmaps and unlocks the + * page mapped/locked by vmci_setup_notify(). + */ +void vmci_ctx_unset_notify(struct vmci_ctx *context) +{ + struct page *notify_page; + + spin_lock(&context->lock); + + notify_page = context->notify_page; + context->notify = &ctx_dummy_notify; + context->notify_page = NULL; + + spin_unlock(&context->lock); + + if (notify_page) { + kunmap(notify_page); + put_page(notify_page); + } +} + +/* + * Add remote_cid to list of contexts current contexts wants + * notifications from/about. + */ +int vmci_ctx_add_notification(u32 context_id, u32 remote_cid) +{ + struct vmci_ctx *context; + struct vmci_handle_list *notifier, *n; + int result; + bool exists = false; + + context = vmci_ctx_get(context_id); + if (!context) + return VMCI_ERROR_NOT_FOUND; + + if (VMCI_CONTEXT_IS_VM(context_id) && VMCI_CONTEXT_IS_VM(remote_cid)) { + pr_devel("Context removed notifications for other VMs not supported (src=0x%x, remote=0x%x)\n", + context_id, remote_cid); + result = VMCI_ERROR_DST_UNREACHABLE; + goto out; + } + + if (context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED) { + result = VMCI_ERROR_NO_ACCESS; + goto out; + } + + notifier = kmalloc(sizeof(struct vmci_handle_list), GFP_KERNEL); + if (!notifier) { + result = VMCI_ERROR_NO_MEM; + goto out; + } + + INIT_LIST_HEAD(¬ifier->node); + notifier->handle = vmci_make_handle(remote_cid, VMCI_EVENT_HANDLER); + + spin_lock(&context->lock); + + list_for_each_entry(n, &context->notifier_list, node) { + if (vmci_handle_is_equal(n->handle, notifier->handle)) { + exists = true; + break; + } + } + + if (exists) { + kfree(notifier); + result = VMCI_ERROR_ALREADY_EXISTS; + } else { + list_add_tail_rcu(¬ifier->node, &context->notifier_list); + context->n_notifiers++; + result = VMCI_SUCCESS; + } + + spin_unlock(&context->lock); + + out: + vmci_ctx_put(context); + return result; +} + +/* + * Remove remote_cid from current context's list of contexts it is + * interested in getting notifications from/about. + */ +int vmci_ctx_remove_notification(u32 context_id, u32 remote_cid) +{ + struct vmci_ctx *context; + struct vmci_handle_list *notifier, *tmp; + struct vmci_handle handle; + bool found = false; + + context = vmci_ctx_get(context_id); + if (!context) + return VMCI_ERROR_NOT_FOUND; + + handle = vmci_make_handle(remote_cid, VMCI_EVENT_HANDLER); + + spin_lock(&context->lock); + list_for_each_entry_safe(notifier, tmp, + &context->notifier_list, node) { + if (vmci_handle_is_equal(notifier->handle, handle)) { + list_del_rcu(¬ifier->node); + context->n_notifiers--; + found = true; + break; + } + } + spin_unlock(&context->lock); + + if (found) { + synchronize_rcu(); + kfree(notifier); + } + + vmci_ctx_put(context); + + return found ? VMCI_SUCCESS : VMCI_ERROR_NOT_FOUND; +} + +static int vmci_ctx_get_chkpt_notifiers(struct vmci_ctx *context, + u32 *buf_size, void **pbuf) +{ + u32 *notifiers; + size_t data_size; + struct vmci_handle_list *entry; + int i = 0; + + if (context->n_notifiers == 0) { + *buf_size = 0; + *pbuf = NULL; + return VMCI_SUCCESS; + } + + data_size = context->n_notifiers * sizeof(*notifiers); + if (*buf_size < data_size) { + *buf_size = data_size; + return VMCI_ERROR_MORE_DATA; + } + + notifiers = kmalloc(data_size, GFP_ATOMIC); /* FIXME: want GFP_KERNEL */ + if (!notifiers) + return VMCI_ERROR_NO_MEM; + + list_for_each_entry(entry, &context->notifier_list, node) + notifiers[i++] = entry->handle.context; + + *buf_size = data_size; + *pbuf = notifiers; + return VMCI_SUCCESS; +} + +static int vmci_ctx_get_chkpt_doorbells(struct vmci_ctx *context, + u32 *buf_size, void **pbuf) +{ + struct dbell_cpt_state *dbells; + size_t n_doorbells; + int i; + + n_doorbells = vmci_handle_arr_get_size(context->doorbell_array); + if (n_doorbells > 0) { + size_t data_size = n_doorbells * sizeof(*dbells); + if (*buf_size < data_size) { + *buf_size = data_size; + return VMCI_ERROR_MORE_DATA; + } + + dbells = kmalloc(data_size, GFP_ATOMIC); + if (!dbells) + return VMCI_ERROR_NO_MEM; + + for (i = 0; i < n_doorbells; i++) + dbells[i].handle = vmci_handle_arr_get_entry( + context->doorbell_array, i); + + *buf_size = data_size; + *pbuf = dbells; + } else { + *buf_size = 0; + *pbuf = NULL; + } + + return VMCI_SUCCESS; +} + +/* + * Get current context's checkpoint state of given type. + */ +int vmci_ctx_get_chkpt_state(u32 context_id, + u32 cpt_type, + u32 *buf_size, + void **pbuf) +{ + struct vmci_ctx *context; + int result; + + context = vmci_ctx_get(context_id); + if (!context) + return VMCI_ERROR_NOT_FOUND; + + spin_lock(&context->lock); + + switch (cpt_type) { + case VMCI_NOTIFICATION_CPT_STATE: + result = vmci_ctx_get_chkpt_notifiers(context, buf_size, pbuf); + break; + + case VMCI_WELLKNOWN_CPT_STATE: + /* + * For compatibility with VMX'en with VM to VM communication, we + * always return zero wellknown handles. + */ + + *buf_size = 0; + *pbuf = NULL; + result = VMCI_SUCCESS; + break; + + case VMCI_DOORBELL_CPT_STATE: + result = vmci_ctx_get_chkpt_doorbells(context, buf_size, pbuf); + break; + + default: + pr_devel("Invalid cpt state (type=%d)\n", cpt_type); + result = VMCI_ERROR_INVALID_ARGS; + break; + } + + spin_unlock(&context->lock); + vmci_ctx_put(context); + + return result; +} + +/* + * Set current context's checkpoint state of given type. + */ +int vmci_ctx_set_chkpt_state(u32 context_id, + u32 cpt_type, + u32 buf_size, + void *cpt_buf) +{ + u32 i; + u32 current_id; + int result = VMCI_SUCCESS; + u32 num_ids = buf_size / sizeof(u32); + + if (cpt_type == VMCI_WELLKNOWN_CPT_STATE && num_ids > 0) { + /* + * We would end up here if VMX with VM to VM communication + * attempts to restore a checkpoint with wellknown handles. + */ + pr_warn("Attempt to restore checkpoint with obsolete wellknown handles\n"); + return VMCI_ERROR_OBSOLETE; + } + + if (cpt_type != VMCI_NOTIFICATION_CPT_STATE) { + pr_devel("Invalid cpt state (type=%d)\n", cpt_type); + return VMCI_ERROR_INVALID_ARGS; + } + + for (i = 0; i < num_ids && result == VMCI_SUCCESS; i++) { + current_id = ((u32 *)cpt_buf)[i]; + result = vmci_ctx_add_notification(context_id, current_id); + if (result != VMCI_SUCCESS) + break; + } + if (result != VMCI_SUCCESS) + pr_devel("Failed to set cpt state (type=%d) (error=%d)\n", + cpt_type, result); + + return result; +} + +/* + * Retrieves the specified context's pending notifications in the + * form of a handle array. The handle arrays returned are the + * actual data - not a copy and should not be modified by the + * caller. They must be released using + * vmci_ctx_rcv_notifications_release. + */ +int vmci_ctx_rcv_notifications_get(u32 context_id, + struct vmci_handle_arr **db_handle_array, + struct vmci_handle_arr **qp_handle_array) +{ + struct vmci_ctx *context; + int result = VMCI_SUCCESS; + + context = vmci_ctx_get(context_id); + if (context == NULL) + return VMCI_ERROR_NOT_FOUND; + + spin_lock(&context->lock); + + *db_handle_array = context->pending_doorbell_array; + context->pending_doorbell_array = vmci_handle_arr_create(0); + if (!context->pending_doorbell_array) { + context->pending_doorbell_array = *db_handle_array; + *db_handle_array = NULL; + result = VMCI_ERROR_NO_MEM; + } + *qp_handle_array = NULL; + + spin_unlock(&context->lock); + vmci_ctx_put(context); + + return result; +} + +/* + * Releases handle arrays with pending notifications previously + * retrieved using vmci_ctx_rcv_notifications_get. If the + * notifications were not successfully handed over to the guest, + * success must be false. + */ +void vmci_ctx_rcv_notifications_release(u32 context_id, + struct vmci_handle_arr *db_handle_array, + struct vmci_handle_arr *qp_handle_array, + bool success) +{ + struct vmci_ctx *context = vmci_ctx_get(context_id); + + spin_lock(&context->lock); + if (!success) { + struct vmci_handle handle; + + /* + * New notifications may have been added while we were not + * holding the context lock, so we transfer any new pending + * doorbell notifications to the old array, and reinstate the + * old array. + */ + + handle = vmci_handle_arr_remove_tail( + context->pending_doorbell_array); + while (!vmci_handle_is_invalid(handle)) { + if (!vmci_handle_arr_has_entry(db_handle_array, + handle)) { + vmci_handle_arr_append_entry( + &db_handle_array, handle); + } + handle = vmci_handle_arr_remove_tail( + context->pending_doorbell_array); + } + vmci_handle_arr_destroy(context->pending_doorbell_array); + context->pending_doorbell_array = db_handle_array; + db_handle_array = NULL; + } else { + ctx_clear_notify_call(context); + } + spin_unlock(&context->lock); + vmci_ctx_put(context); + + if (db_handle_array) + vmci_handle_arr_destroy(db_handle_array); + + if (qp_handle_array) + vmci_handle_arr_destroy(qp_handle_array); +} + +/* + * Registers that a new doorbell handle has been allocated by the + * context. Only doorbell handles registered can be notified. + */ +int vmci_ctx_dbell_create(u32 context_id, struct vmci_handle handle) +{ + struct vmci_ctx *context; + int result; + + if (context_id == VMCI_INVALID_ID || vmci_handle_is_invalid(handle)) + return VMCI_ERROR_INVALID_ARGS; + + context = vmci_ctx_get(context_id); + if (context == NULL) + return VMCI_ERROR_NOT_FOUND; + + spin_lock(&context->lock); + if (!vmci_handle_arr_has_entry(context->doorbell_array, handle)) { + vmci_handle_arr_append_entry(&context->doorbell_array, handle); + result = VMCI_SUCCESS; + } else { + result = VMCI_ERROR_DUPLICATE_ENTRY; + } + + spin_unlock(&context->lock); + vmci_ctx_put(context); + + return result; +} + +/* + * Unregisters a doorbell handle that was previously registered + * with vmci_ctx_dbell_create. + */ +int vmci_ctx_dbell_destroy(u32 context_id, struct vmci_handle handle) +{ + struct vmci_ctx *context; + struct vmci_handle removed_handle; + + if (context_id == VMCI_INVALID_ID || vmci_handle_is_invalid(handle)) + return VMCI_ERROR_INVALID_ARGS; + + context = vmci_ctx_get(context_id); + if (context == NULL) + return VMCI_ERROR_NOT_FOUND; + + spin_lock(&context->lock); + removed_handle = + vmci_handle_arr_remove_entry(context->doorbell_array, handle); + vmci_handle_arr_remove_entry(context->pending_doorbell_array, handle); + spin_unlock(&context->lock); + + vmci_ctx_put(context); + + return vmci_handle_is_invalid(removed_handle) ? + VMCI_ERROR_NOT_FOUND : VMCI_SUCCESS; +} + +/* + * Unregisters all doorbell handles that were previously + * registered with vmci_ctx_dbell_create. + */ +int vmci_ctx_dbell_destroy_all(u32 context_id) +{ + struct vmci_ctx *context; + struct vmci_handle handle; + + if (context_id == VMCI_INVALID_ID) + return VMCI_ERROR_INVALID_ARGS; + + context = vmci_ctx_get(context_id); + if (context == NULL) + return VMCI_ERROR_NOT_FOUND; + + spin_lock(&context->lock); + do { + struct vmci_handle_arr *arr = context->doorbell_array; + handle = vmci_handle_arr_remove_tail(arr); + } while (!vmci_handle_is_invalid(handle)); + do { + struct vmci_handle_arr *arr = context->pending_doorbell_array; + handle = vmci_handle_arr_remove_tail(arr); + } while (!vmci_handle_is_invalid(handle)); + spin_unlock(&context->lock); + + vmci_ctx_put(context); + + return VMCI_SUCCESS; +} + +/* + * Registers a notification of a doorbell handle initiated by the + * specified source context. The notification of doorbells are + * subject to the same isolation rules as datagram delivery. To + * allow host side senders of notifications a finer granularity + * of sender rights than those assigned to the sending context + * itself, the host context is required to specify a different + * set of privilege flags that will override the privileges of + * the source context. + */ +int vmci_ctx_notify_dbell(u32 src_cid, + struct vmci_handle handle, + u32 src_priv_flags) +{ + struct vmci_ctx *dst_context; + int result; + + if (vmci_handle_is_invalid(handle)) + return VMCI_ERROR_INVALID_ARGS; + + /* Get the target VM's VMCI context. */ + dst_context = vmci_ctx_get(handle.context); + if (!dst_context) { + pr_devel("Invalid context (ID=0x%x)\n", handle.context); + return VMCI_ERROR_NOT_FOUND; + } + + if (src_cid != handle.context) { + u32 dst_priv_flags; + + if (VMCI_CONTEXT_IS_VM(src_cid) && + VMCI_CONTEXT_IS_VM(handle.context)) { + pr_devel("Doorbell notification from VM to VM not supported (src=0x%x, dst=0x%x)\n", + src_cid, handle.context); + result = VMCI_ERROR_DST_UNREACHABLE; + goto out; + } + + result = vmci_dbell_get_priv_flags(handle, &dst_priv_flags); + if (result < VMCI_SUCCESS) { + pr_warn("Failed to get privilege flags for destination (handle=0x%x:0x%x)\n", + handle.context, handle.resource); + goto out; + } + + if (src_cid != VMCI_HOST_CONTEXT_ID || + src_priv_flags == VMCI_NO_PRIVILEGE_FLAGS) { + src_priv_flags = vmci_context_get_priv_flags(src_cid); + } + + if (vmci_deny_interaction(src_priv_flags, dst_priv_flags)) { + result = VMCI_ERROR_NO_ACCESS; + goto out; + } + } + + if (handle.context == VMCI_HOST_CONTEXT_ID) { + result = vmci_dbell_host_context_notify(src_cid, handle); + } else { + spin_lock(&dst_context->lock); + + if (!vmci_handle_arr_has_entry(dst_context->doorbell_array, + handle)) { + result = VMCI_ERROR_NOT_FOUND; + } else { + if (!vmci_handle_arr_has_entry( + dst_context->pending_doorbell_array, + handle)) { + vmci_handle_arr_append_entry( + &dst_context->pending_doorbell_array, + handle); + + ctx_signal_notify(dst_context); + wake_up(&dst_context->host_context.wait_queue); + + } + result = VMCI_SUCCESS; + } + spin_unlock(&dst_context->lock); + } + + out: + vmci_ctx_put(dst_context); + + return result; +} + +bool vmci_ctx_supports_host_qp(struct vmci_ctx *context) +{ + return context && context->user_version >= VMCI_VERSION_HOSTQP; +} + +/* + * Registers that a new queue pair handle has been allocated by + * the context. + */ +int vmci_ctx_qp_create(struct vmci_ctx *context, struct vmci_handle handle) +{ + int result; + + if (context == NULL || vmci_handle_is_invalid(handle)) + return VMCI_ERROR_INVALID_ARGS; + + if (!vmci_handle_arr_has_entry(context->queue_pair_array, handle)) { + vmci_handle_arr_append_entry(&context->queue_pair_array, + handle); + result = VMCI_SUCCESS; + } else { + result = VMCI_ERROR_DUPLICATE_ENTRY; + } + + return result; +} + +/* + * Unregisters a queue pair handle that was previously registered + * with vmci_ctx_qp_create. + */ +int vmci_ctx_qp_destroy(struct vmci_ctx *context, struct vmci_handle handle) +{ + struct vmci_handle hndl; + + if (context == NULL || vmci_handle_is_invalid(handle)) + return VMCI_ERROR_INVALID_ARGS; + + hndl = vmci_handle_arr_remove_entry(context->queue_pair_array, handle); + + return vmci_handle_is_invalid(hndl) ? + VMCI_ERROR_NOT_FOUND : VMCI_SUCCESS; +} + +/* + * Determines whether a given queue pair handle is registered + * with the given context. + */ +bool vmci_ctx_qp_exists(struct vmci_ctx *context, struct vmci_handle handle) +{ + if (context == NULL || vmci_handle_is_invalid(handle)) + return false; + + return vmci_handle_arr_has_entry(context->queue_pair_array, handle); +} + +/* + * vmci_context_get_priv_flags() - Retrieve privilege flags. + * @context_id: The context ID of the VMCI context. + * + * Retrieves privilege flags of the given VMCI context ID. + */ +u32 vmci_context_get_priv_flags(u32 context_id) +{ + if (vmci_host_code_active()) { + u32 flags; + struct vmci_ctx *context; + + context = vmci_ctx_get(context_id); + if (!context) + return VMCI_LEAST_PRIVILEGE_FLAGS; + + flags = context->priv_flags; + vmci_ctx_put(context); + return flags; + } + return VMCI_NO_PRIVILEGE_FLAGS; +} +EXPORT_SYMBOL_GPL(vmci_context_get_priv_flags); + +/* + * vmci_is_context_owner() - Determimnes if user is the context owner + * @context_id: The context ID of the VMCI context. + * @uid: The host user id (real kernel value). + * + * Determines whether a given UID is the owner of given VMCI context. + */ +bool vmci_is_context_owner(u32 context_id, kuid_t uid) +{ + bool is_owner = false; + + if (vmci_host_code_active()) { + struct vmci_ctx *context = vmci_ctx_get(context_id); + if (context) { + if (context->cred) + is_owner = uid_eq(context->cred->uid, uid); + vmci_ctx_put(context); + } + } + + return is_owner; +} +EXPORT_SYMBOL_GPL(vmci_is_context_owner); diff --git a/drivers/misc/vmw_vmci/vmci_context.h b/drivers/misc/vmw_vmci/vmci_context.h new file mode 100644 index 000000000000..24a88e68a1e6 --- /dev/null +++ b/drivers/misc/vmw_vmci/vmci_context.h @@ -0,0 +1,182 @@ +/* + * VMware VMCI driver (vmciContext.h) + * + * Copyright (C) 2012 VMware, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation version 2 and no later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#ifndef _VMCI_CONTEXT_H_ +#define _VMCI_CONTEXT_H_ + +#include <linux/vmw_vmci_defs.h> +#include <linux/atomic.h> +#include <linux/kref.h> +#include <linux/types.h> +#include <linux/wait.h> + +#include "vmci_handle_array.h" +#include "vmci_datagram.h" + +/* Used to determine what checkpoint state to get and set. */ +enum { + VMCI_NOTIFICATION_CPT_STATE = 1, + VMCI_WELLKNOWN_CPT_STATE = 2, + VMCI_DG_OUT_STATE = 3, + VMCI_DG_IN_STATE = 4, + VMCI_DG_IN_SIZE_STATE = 5, + VMCI_DOORBELL_CPT_STATE = 6, +}; + +/* Host specific struct used for signalling */ +struct vmci_host { + wait_queue_head_t wait_queue; +}; + +struct vmci_handle_list { + struct list_head node; + struct vmci_handle handle; +}; + +struct vmci_ctx { + struct list_head list_item; /* For global VMCI list. */ + u32 cid; + struct kref kref; + struct list_head datagram_queue; /* Head of per VM queue. */ + u32 pending_datagrams; + size_t datagram_queue_size; /* Size of datagram queue in bytes. */ + + /* + * Version of the code that created + * this context; e.g., VMX. + */ + int user_version; + spinlock_t lock; /* Locks callQueue and handle_arrays. */ + + /* + * queue_pairs attached to. The array of + * handles for queue pairs is accessed + * from the code for QP API, and there + * it is protected by the QP lock. It + * is also accessed from the context + * clean up path, which does not + * require a lock. VMCILock is not + * used to protect the QP array field. + */ + struct vmci_handle_arr *queue_pair_array; + + /* Doorbells created by context. */ + struct vmci_handle_arr *doorbell_array; + + /* Doorbells pending for context. */ + struct vmci_handle_arr *pending_doorbell_array; + + /* Contexts current context is subscribing to. */ + struct list_head notifier_list; + unsigned int n_notifiers; + + struct vmci_host host_context; + u32 priv_flags; + + const struct cred *cred; + bool *notify; /* Notify flag pointer - hosted only. */ + struct page *notify_page; /* Page backing the notify UVA. */ +}; + +/* VMCINotifyAddRemoveInfo: Used to add/remove remote context notifications. */ +struct vmci_ctx_info { + u32 remote_cid; + int result; +}; + +/* VMCICptBufInfo: Used to set/get current context's checkpoint state. */ +struct vmci_ctx_chkpt_buf_info { + u64 cpt_buf; + u32 cpt_type; + u32 buf_size; + s32 result; + u32 _pad; +}; + +/* + * VMCINotificationReceiveInfo: Used to recieve pending notifications + * for doorbells and queue pairs. + */ +struct vmci_ctx_notify_recv_info { + u64 db_handle_buf_uva; + u64 db_handle_buf_size; + u64 qp_handle_buf_uva; + u64 qp_handle_buf_size; + s32 result; + u32 _pad; +}; + +/* + * Utilility function that checks whether two entities are allowed + * to interact. If one of them is restricted, the other one must + * be trusted. + */ +static inline bool vmci_deny_interaction(u32 part_one, u32 part_two) +{ + return ((part_one & VMCI_PRIVILEGE_FLAG_RESTRICTED) && + !(part_two & VMCI_PRIVILEGE_FLAG_TRUSTED)) || + ((part_two & VMCI_PRIVILEGE_FLAG_RESTRICTED) && + !(part_one & VMCI_PRIVILEGE_FLAG_TRUSTED)); +} + +struct vmci_ctx *vmci_ctx_create(u32 cid, u32 flags, + uintptr_t event_hnd, int version, + const struct cred *cred); +void vmci_ctx_destroy(struct vmci_ctx *context); + +bool vmci_ctx_supports_host_qp(struct vmci_ctx *context); +int vmci_ctx_enqueue_datagram(u32 cid, struct vmci_datagram *dg); +int vmci_ctx_dequeue_datagram(struct vmci_ctx *context, + size_t *max_size, struct vmci_datagram **dg); +int vmci_ctx_pending_datagrams(u32 cid, u32 *pending); +struct vmci_ctx *vmci_ctx_get(u32 cid); +void vmci_ctx_put(struct vmci_ctx *context); +bool vmci_ctx_exists(u32 cid); + +int vmci_ctx_add_notification(u32 context_id, u32 remote_cid); +int vmci_ctx_remove_notification(u32 context_id, u32 remote_cid); +int vmci_ctx_get_chkpt_state(u32 context_id, u32 cpt_type, + u32 *num_cids, void **cpt_buf_ptr); +int vmci_ctx_set_chkpt_state(u32 context_id, u32 cpt_type, + u32 num_cids, void *cpt_buf); + +int vmci_ctx_qp_create(struct vmci_ctx *context, struct vmci_handle handle); +int vmci_ctx_qp_destroy(struct vmci_ctx *context, struct vmci_handle handle); +bool vmci_ctx_qp_exists(struct vmci_ctx *context, struct vmci_handle handle); + +void vmci_ctx_check_signal_notify(struct vmci_ctx *context); +void vmci_ctx_unset_notify(struct vmci_ctx *context); + +int vmci_ctx_dbell_create(u32 context_id, struct vmci_handle handle); +int vmci_ctx_dbell_destroy(u32 context_id, struct vmci_handle handle); +int vmci_ctx_dbell_destroy_all(u32 context_id); +int vmci_ctx_notify_dbell(u32 cid, struct vmci_handle handle, + u32 src_priv_flags); + +int vmci_ctx_rcv_notifications_get(u32 context_id, struct vmci_handle_arr + **db_handle_array, struct vmci_handle_arr + **qp_handle_array); +void vmci_ctx_rcv_notifications_release(u32 context_id, struct vmci_handle_arr + *db_handle_array, struct vmci_handle_arr + *qp_handle_array, bool success); + +static inline u32 vmci_ctx_get_id(struct vmci_ctx *context) +{ + if (!context) + return VMCI_INVALID_ID; + return context->cid; +} + +#endif /* _VMCI_CONTEXT_H_ */ diff --git a/drivers/misc/vmw_vmci/vmci_datagram.c b/drivers/misc/vmw_vmci/vmci_datagram.c new file mode 100644 index 000000000000..ed5c433cd493 --- /dev/null +++ b/drivers/misc/vmw_vmci/vmci_datagram.c @@ -0,0 +1,500 @@ +/* + * VMware VMCI Driver + * + * Copyright (C) 2012 VMware, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation version 2 and no later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include <linux/vmw_vmci_defs.h> +#include <linux/vmw_vmci_api.h> +#include <linux/module.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/bug.h> + +#include "vmci_datagram.h" +#include "vmci_resource.h" +#include "vmci_context.h" +#include "vmci_driver.h" +#include "vmci_event.h" +#include "vmci_route.h" + +/* + * struct datagram_entry describes the datagram entity. It is used for datagram + * entities created only on the host. + */ +struct datagram_entry { + struct vmci_resource resource; + u32 flags; + bool run_delayed; + vmci_datagram_recv_cb recv_cb; + void *client_data; + u32 priv_flags; +}; + +struct delayed_datagram_info { + struct datagram_entry *entry; + struct vmci_datagram msg; + struct work_struct work; + bool in_dg_host_queue; +}; + +/* Number of in-flight host->host datagrams */ +static atomic_t delayed_dg_host_queue_size = ATOMIC_INIT(0); + +/* + * Create a datagram entry given a handle pointer. + */ +static int dg_create_handle(u32 resource_id, + u32 flags, + u32 priv_flags, + vmci_datagram_recv_cb recv_cb, + void *client_data, struct vmci_handle *out_handle) +{ + int result; + u32 context_id; + struct vmci_handle handle; + struct datagram_entry *entry; + + if ((flags & VMCI_FLAG_WELLKNOWN_DG_HND) != 0) + return VMCI_ERROR_INVALID_ARGS; + + if ((flags & VMCI_FLAG_ANYCID_DG_HND) != 0) { + context_id = VMCI_INVALID_ID; + } else { + context_id = vmci_get_context_id(); + if (context_id == VMCI_INVALID_ID) + return VMCI_ERROR_NO_RESOURCES; + } + + handle = vmci_make_handle(context_id, resource_id); + + entry = kmalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) { + pr_warn("Failed allocating memory for datagram entry\n"); + return VMCI_ERROR_NO_MEM; + } + + entry->run_delayed = (flags & VMCI_FLAG_DG_DELAYED_CB) ? true : false; + entry->flags = flags; + entry->recv_cb = recv_cb; + entry->client_data = client_data; + entry->priv_flags = priv_flags; + + /* Make datagram resource live. */ + result = vmci_resource_add(&entry->resource, + VMCI_RESOURCE_TYPE_DATAGRAM, + handle); + if (result != VMCI_SUCCESS) { + pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d\n", + handle.context, handle.resource, result); + kfree(entry); + return result; + } + + *out_handle = vmci_resource_handle(&entry->resource); + return VMCI_SUCCESS; +} + +/* + * Internal utility function with the same purpose as + * vmci_datagram_get_priv_flags that also takes a context_id. + */ +static int vmci_datagram_get_priv_flags(u32 context_id, + struct vmci_handle handle, + u32 *priv_flags) +{ + if (context_id == VMCI_INVALID_ID) + return VMCI_ERROR_INVALID_ARGS; + + if (context_id == VMCI_HOST_CONTEXT_ID) { + struct datagram_entry *src_entry; + struct vmci_resource *resource; + + resource = vmci_resource_by_handle(handle, + VMCI_RESOURCE_TYPE_DATAGRAM); + if (!resource) + return VMCI_ERROR_INVALID_ARGS; + + src_entry = container_of(resource, struct datagram_entry, + resource); + *priv_flags = src_entry->priv_flags; + vmci_resource_put(resource); + } else if (context_id == VMCI_HYPERVISOR_CONTEXT_ID) + *priv_flags = VMCI_MAX_PRIVILEGE_FLAGS; + else + *priv_flags = vmci_context_get_priv_flags(context_id); + + return VMCI_SUCCESS; +} + +/* + * Calls the specified callback in a delayed context. + */ +static void dg_delayed_dispatch(struct work_struct *work) +{ + struct delayed_datagram_info *dg_info = + container_of(work, struct delayed_datagram_info, work); + + dg_info->entry->recv_cb(dg_info->entry->client_data, &dg_info->msg); + + vmci_resource_put(&dg_info->entry->resource); + + if (dg_info->in_dg_host_queue) + atomic_dec(&delayed_dg_host_queue_size); + + kfree(dg_info); +} + +/* + * Dispatch datagram as a host, to the host, or other vm context. This + * function cannot dispatch to hypervisor context handlers. This should + * have been handled before we get here by vmci_datagram_dispatch. + * Returns number of bytes sent on success, error code otherwise. + */ +static int dg_dispatch_as_host(u32 context_id, struct vmci_datagram *dg) +{ + int retval; + size_t dg_size; + u32 src_priv_flags; + + dg_size = VMCI_DG_SIZE(dg); + + /* Host cannot send to the hypervisor. */ + if (dg->dst.context == VMCI_HYPERVISOR_CONTEXT_ID) + return VMCI_ERROR_DST_UNREACHABLE; + + /* Check that source handle matches sending context. */ + if (dg->src.context != context_id) { + pr_devel("Sender context (ID=0x%x) is not owner of src datagram entry (handle=0x%x:0x%x)\n", + context_id, dg->src.context, dg->src.resource); + return VMCI_ERROR_NO_ACCESS; + } + + /* Get hold of privileges of sending endpoint. */ + retval = vmci_datagram_get_priv_flags(context_id, dg->src, + &src_priv_flags); + if (retval != VMCI_SUCCESS) { + pr_warn("Couldn't get privileges (handle=0x%x:0x%x)\n", + dg->src.context, dg->src.resource); + return retval; + } + + /* Determine if we should route to host or guest destination. */ + if (dg->dst.context == VMCI_HOST_CONTEXT_ID) { + /* Route to host datagram entry. */ + struct datagram_entry *dst_entry; + struct vmci_resource *resource; + + if (dg->src.context == VMCI_HYPERVISOR_CONTEXT_ID && + dg->dst.resource == VMCI_EVENT_HANDLER) { + return vmci_event_dispatch(dg); + } + + resource = vmci_resource_by_handle(dg->dst, + VMCI_RESOURCE_TYPE_DATAGRAM); + if (!resource) { + pr_devel("Sending to invalid destination (handle=0x%x:0x%x)\n", + dg->dst.context, dg->dst.resource); + return VMCI_ERROR_INVALID_RESOURCE; + } + dst_entry = container_of(resource, struct datagram_entry, + resource); + if (vmci_deny_interaction(src_priv_flags, + dst_entry->priv_flags)) { + vmci_resource_put(resource); + return VMCI_ERROR_NO_ACCESS; + } + + /* + * If a VMCI datagram destined for the host is also sent by the + * host, we always run it delayed. This ensures that no locks + * are held when the datagram callback runs. + */ + if (dst_entry->run_delayed || + dg->src.context == VMCI_HOST_CONTEXT_ID) { + struct delayed_datagram_info *dg_info; + + if (atomic_add_return(1, &delayed_dg_host_queue_size) + == VMCI_MAX_DELAYED_DG_HOST_QUEUE_SIZE) { + atomic_dec(&delayed_dg_host_queue_size); + vmci_resource_put(resource); + return VMCI_ERROR_NO_MEM; + } + + dg_info = kmalloc(sizeof(*dg_info) + + (size_t) dg->payload_size, GFP_ATOMIC); + if (!dg_info) { + atomic_dec(&delayed_dg_host_queue_size); + vmci_resource_put(resource); + return VMCI_ERROR_NO_MEM; + } + + dg_info->in_dg_host_queue = true; + dg_info->entry = dst_entry; + memcpy(&dg_info->msg, dg, dg_size); + + INIT_WORK(&dg_info->work, dg_delayed_dispatch); + schedule_work(&dg_info->work); + retval = VMCI_SUCCESS; + + } else { + retval = dst_entry->recv_cb(dst_entry->client_data, dg); + vmci_resource_put(resource); + if (retval < VMCI_SUCCESS) + return retval; + } + } else { + /* Route to destination VM context. */ + struct vmci_datagram *new_dg; + + if (context_id != dg->dst.context) { + if (vmci_deny_interaction(src_priv_flags, + vmci_context_get_priv_flags + (dg->dst.context))) { + return VMCI_ERROR_NO_ACCESS; + } else if (VMCI_CONTEXT_IS_VM(context_id)) { + /* + * If the sending context is a VM, it + * cannot reach another VM. + */ + + pr_devel("Datagram communication between VMs not supported (src=0x%x, dst=0x%x)\n", + context_id, dg->dst.context); + return VMCI_ERROR_DST_UNREACHABLE; + } + } + + /* We make a copy to enqueue. */ + new_dg = kmalloc(dg_size, GFP_KERNEL); + if (new_dg == NULL) + return VMCI_ERROR_NO_MEM; + + memcpy(new_dg, dg, dg_size); + retval = vmci_ctx_enqueue_datagram(dg->dst.context, new_dg); + if (retval < VMCI_SUCCESS) { + kfree(new_dg); + return retval; + } + } + + /* + * We currently truncate the size to signed 32 bits. This doesn't + * matter for this handler as it only support 4Kb messages. + */ + return (int)dg_size; +} + +/* + * Dispatch datagram as a guest, down through the VMX and potentially to + * the host. + * Returns number of bytes sent on success, error code otherwise. + */ +static int dg_dispatch_as_guest(struct vmci_datagram *dg) +{ + int retval; + struct vmci_resource *resource; + + resource = vmci_resource_by_handle(dg->src, + VMCI_RESOURCE_TYPE_DATAGRAM); + if (!resource) + return VMCI_ERROR_NO_HANDLE; + + retval = vmci_send_datagram(dg); + vmci_resource_put(resource); + return retval; +} + +/* + * Dispatch datagram. This will determine the routing for the datagram + * and dispatch it accordingly. + * Returns number of bytes sent on success, error code otherwise. + */ +int vmci_datagram_dispatch(u32 context_id, + struct vmci_datagram *dg, bool from_guest) +{ + int retval; + enum vmci_route route; + + BUILD_BUG_ON(sizeof(struct vmci_datagram) != 24); + + if (VMCI_DG_SIZE(dg) > VMCI_MAX_DG_SIZE) { + pr_devel("Payload (size=%llu bytes) too big to send\n", + (unsigned long long)dg->payload_size); + return VMCI_ERROR_INVALID_ARGS; + } + + retval = vmci_route(&dg->src, &dg->dst, from_guest, &route); + if (retval < VMCI_SUCCESS) { + pr_devel("Failed to route datagram (src=0x%x, dst=0x%x, err=%d)\n", + dg->src.context, dg->dst.context, retval); + return retval; + } + + if (VMCI_ROUTE_AS_HOST == route) { + if (VMCI_INVALID_ID == context_id) + context_id = VMCI_HOST_CONTEXT_ID; + return dg_dispatch_as_host(context_id, dg); + } + + if (VMCI_ROUTE_AS_GUEST == route) + return dg_dispatch_as_guest(dg); + + pr_warn("Unknown route (%d) for datagram\n", route); + return VMCI_ERROR_DST_UNREACHABLE; +} + +/* + * Invoke the handler for the given datagram. This is intended to be + * called only when acting as a guest and receiving a datagram from the + * virtual device. + */ +int vmci_datagram_invoke_guest_handler(struct vmci_datagram *dg) +{ + struct vmci_resource *resource; + struct datagram_entry *dst_entry; + + resource = vmci_resource_by_handle(dg->dst, + VMCI_RESOURCE_TYPE_DATAGRAM); + if (!resource) { + pr_devel("destination (handle=0x%x:0x%x) doesn't exist\n", + dg->dst.context, dg->dst.resource); + return VMCI_ERROR_NO_HANDLE; + } + + dst_entry = container_of(resource, struct datagram_entry, resource); + if (dst_entry->run_delayed) { + struct delayed_datagram_info *dg_info; + + dg_info = kmalloc(sizeof(*dg_info) + (size_t)dg->payload_size, + GFP_ATOMIC); + if (!dg_info) { + vmci_resource_put(resource); + return VMCI_ERROR_NO_MEM; + } + + dg_info->in_dg_host_queue = false; + dg_info->entry = dst_entry; + memcpy(&dg_info->msg, dg, VMCI_DG_SIZE(dg)); + + INIT_WORK(&dg_info->work, dg_delayed_dispatch); + schedule_work(&dg_info->work); + } else { + dst_entry->recv_cb(dst_entry->client_data, dg); + vmci_resource_put(resource); + } + + return VMCI_SUCCESS; +} + +/* + * vmci_datagram_create_handle_priv() - Create host context datagram endpoint + * @resource_id: The resource ID. + * @flags: Datagram Flags. + * @priv_flags: Privilege Flags. + * @recv_cb: Callback when receiving datagrams. + * @client_data: Pointer for a datagram_entry struct + * @out_handle: vmci_handle that is populated as a result of this function. + * + * Creates a host context datagram endpoint and returns a handle to it. + */ +int vmci_datagram_create_handle_priv(u32 resource_id, + u32 flags, + u32 priv_flags, + vmci_datagram_recv_cb recv_cb, + void *client_data, + struct vmci_handle *out_handle) +{ + if (out_handle == NULL) + return VMCI_ERROR_INVALID_ARGS; + + if (recv_cb == NULL) { + pr_devel("Client callback needed when creating datagram\n"); + return VMCI_ERROR_INVALID_ARGS; + } + + if (priv_flags & ~VMCI_PRIVILEGE_ALL_FLAGS) + return VMCI_ERROR_INVALID_ARGS; + + return dg_create_handle(resource_id, flags, priv_flags, recv_cb, + client_data, out_handle); +} +EXPORT_SYMBOL_GPL(vmci_datagram_create_handle_priv); + +/* + * vmci_datagram_create_handle() - Create host context datagram endpoint + * @resource_id: Resource ID. + * @flags: Datagram Flags. + * @recv_cb: Callback when receiving datagrams. + * @client_ata: Pointer for a datagram_entry struct + * @out_handle: vmci_handle that is populated as a result of this function. + * + * Creates a host context datagram endpoint and returns a handle to + * it. Same as vmci_datagram_create_handle_priv without the priviledge + * flags argument. + */ +int vmci_datagram_create_handle(u32 resource_id, + u32 flags, + vmci_datagram_recv_cb recv_cb, + void *client_data, + struct vmci_handle *out_handle) +{ + return vmci_datagram_create_handle_priv( + resource_id, flags, + VMCI_DEFAULT_PROC_PRIVILEGE_FLAGS, + recv_cb, client_data, + out_handle); +} +EXPORT_SYMBOL_GPL(vmci_datagram_create_handle); + +/* + * vmci_datagram_destroy_handle() - Destroys datagram handle + * @handle: vmci_handle to be destroyed and reaped. + * + * Use this function to destroy any datagram handles created by + * vmci_datagram_create_handle{,Priv} functions. + */ +int vmci_datagram_destroy_handle(struct vmci_handle handle) +{ + struct datagram_entry *entry; + struct vmci_resource *resource; + + resource = vmci_resource_by_handle(handle, VMCI_RESOURCE_TYPE_DATAGRAM); + if (!resource) { + pr_devel("Failed to destroy datagram (handle=0x%x:0x%x)\n", + handle.context, handle.resource); + return VMCI_ERROR_NOT_FOUND; + } + + entry = container_of(resource, struct datagram_entry, resource); + + vmci_resource_put(&entry->resource); + vmci_resource_remove(&entry->resource); + kfree(entry); + + return VMCI_SUCCESS; +} +EXPORT_SYMBOL_GPL(vmci_datagram_destroy_handle); + +/* + * vmci_datagram_send() - Send a datagram + * @msg: The datagram to send. + * + * Sends the provided datagram on its merry way. + */ +int vmci_datagram_send(struct vmci_datagram *msg) +{ + if (msg == NULL) + return VMCI_ERROR_INVALID_ARGS; + + return vmci_datagram_dispatch(VMCI_INVALID_ID, msg, false); +} +EXPORT_SYMBOL_GPL(vmci_datagram_send); diff --git a/drivers/misc/vmw_vmci/vmci_datagram.h b/drivers/misc/vmw_vmci/vmci_datagram.h new file mode 100644 index 000000000000..eb4aab7f64ec --- /dev/null +++ b/drivers/misc/vmw_vmci/vmci_datagram.h @@ -0,0 +1,52 @@ +/* + * VMware VMCI Driver + * + * Copyright (C) 2012 VMware, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation version 2 and no later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#ifndef _VMCI_DATAGRAM_H_ +#define _VMCI_DATAGRAM_H_ + +#include <linux/types.h> +#include <linux/list.h> + +#include "vmci_context.h" + +#define VMCI_MAX_DELAYED_DG_HOST_QUEUE_SIZE 256 + +/* + * The struct vmci_datagram_queue_entry is a queue header for the in-kernel VMCI + * datagram queues. It is allocated in non-paged memory, as the + * content is accessed while holding a spinlock. The pending datagram + * itself may be allocated from paged memory. We shadow the size of + * the datagram in the non-paged queue entry as this size is used + * while holding the same spinlock as above. + */ +struct vmci_datagram_queue_entry { + struct list_head list_item; /* For queuing. */ + size_t dg_size; /* Size of datagram. */ + struct vmci_datagram *dg; /* Pending datagram. */ +}; + +/* VMCIDatagramSendRecvInfo */ +struct vmci_datagram_snd_rcv_info { + u64 addr; + u32 len; + s32 result; +}; + +/* Datagram API for non-public use. */ +int vmci_datagram_dispatch(u32 context_id, struct vmci_datagram *dg, + bool from_guest); +int vmci_datagram_invoke_guest_handler(struct vmci_datagram *dg); + +#endif /* _VMCI_DATAGRAM_H_ */ diff --git a/drivers/misc/vmw_vmci/vmci_doorbell.c b/drivers/misc/vmw_vmci/vmci_doorbell.c new file mode 100644 index 000000000000..c3e8397f62ed --- /dev/null +++ b/drivers/misc/vmw_vmci/vmci_doorbell.c @@ -0,0 +1,604 @@ +/* + * VMware VMCI Driver + * + * Copyright (C) 2012 VMware, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation version 2 and no later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include <linux/vmw_vmci_defs.h> +#include <linux/vmw_vmci_api.h> +#include <linux/completion.h> +#include <linux/hash.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/sched.h> +#include <linux/slab.h> + +#include "vmci_datagram.h" +#include "vmci_doorbell.h" +#include "vmci_resource.h" +#include "vmci_driver.h" +#include "vmci_route.h" + + +#define VMCI_DOORBELL_INDEX_BITS 6 +#define VMCI_DOORBELL_INDEX_TABLE_SIZE (1 << VMCI_DOORBELL_INDEX_BITS) +#define VMCI_DOORBELL_HASH(_idx) hash_32(_idx, VMCI_DOORBELL_INDEX_BITS) + +/* + * DoorbellEntry describes the a doorbell notification handle allocated by the + * host. + */ +struct dbell_entry { + struct vmci_resource resource; + struct hlist_node node; + struct work_struct work; + vmci_callback notify_cb; + void *client_data; + u32 idx; + u32 priv_flags; + bool run_delayed; + atomic_t active; /* Only used by guest personality */ +}; + +/* The VMCI index table keeps track of currently registered doorbells. */ +struct dbell_index_table { + spinlock_t lock; /* Index table lock */ + struct hlist_head entries[VMCI_DOORBELL_INDEX_TABLE_SIZE]; +}; + +static struct dbell_index_table vmci_doorbell_it = { + .lock = __SPIN_LOCK_UNLOCKED(vmci_doorbell_it.lock), +}; + +/* + * The max_notify_idx is one larger than the currently known bitmap index in + * use, and is used to determine how much of the bitmap needs to be scanned. + */ +static u32 max_notify_idx; + +/* + * The notify_idx_count is used for determining whether there are free entries + * within the bitmap (if notify_idx_count + 1 < max_notify_idx). + */ +static u32 notify_idx_count; + +/* + * The last_notify_idx_reserved is used to track the last index handed out - in + * the case where multiple handles share a notification index, we hand out + * indexes round robin based on last_notify_idx_reserved. + */ +static u32 last_notify_idx_reserved; + +/* This is a one entry cache used to by the index allocation. */ +static u32 last_notify_idx_released = PAGE_SIZE; + + +/* + * Utility function that retrieves the privilege flags associated + * with a given doorbell handle. For guest endpoints, the + * privileges are determined by the context ID, but for host + * endpoints privileges are associated with the complete + * handle. Hypervisor endpoints are not yet supported. + */ +int vmci_dbell_get_priv_flags(struct vmci_handle handle, u32 *priv_flags) +{ + if (priv_flags == NULL || handle.context == VMCI_INVALID_ID) + return VMCI_ERROR_INVALID_ARGS; + + if (handle.context == VMCI_HOST_CONTEXT_ID) { + struct dbell_entry *entry; + struct vmci_resource *resource; + + resource = vmci_resource_by_handle(handle, + VMCI_RESOURCE_TYPE_DOORBELL); + if (!resource) + return VMCI_ERROR_NOT_FOUND; + + entry = container_of(resource, struct dbell_entry, resource); + *priv_flags = entry->priv_flags; + vmci_resource_put(resource); + } else if (handle.context == VMCI_HYPERVISOR_CONTEXT_ID) { + /* + * Hypervisor endpoints for notifications are not + * supported (yet). + */ + return VMCI_ERROR_INVALID_ARGS; + } else { + *priv_flags = vmci_context_get_priv_flags(handle.context); + } + + return VMCI_SUCCESS; +} + +/* + * Find doorbell entry by bitmap index. + */ +static struct dbell_entry *dbell_index_table_find(u32 idx) +{ + u32 bucket = VMCI_DOORBELL_HASH(idx); + struct dbell_entry *dbell; + struct hlist_node *node; + + hlist_for_each_entry(dbell, node, &vmci_doorbell_it.entries[bucket], + node) { + if (idx == dbell->idx) + return dbell; + } + + return NULL; +} + +/* + * Add the given entry to the index table. This willi take a reference to the + * entry's resource so that the entry is not deleted before it is removed from + * the * table. + */ +static void dbell_index_table_add(struct dbell_entry *entry) +{ + u32 bucket; + u32 new_notify_idx; + + vmci_resource_get(&entry->resource); + + spin_lock_bh(&vmci_doorbell_it.lock); + + /* + * Below we try to allocate an index in the notification + * bitmap with "not too much" sharing between resources. If we + * use less that the full bitmap, we either add to the end if + * there are no unused flags within the currently used area, + * or we search for unused ones. If we use the full bitmap, we + * allocate the index round robin. + */ + if (max_notify_idx < PAGE_SIZE || notify_idx_count < PAGE_SIZE) { + if (last_notify_idx_released < max_notify_idx && + !dbell_index_table_find(last_notify_idx_released)) { + new_notify_idx = last_notify_idx_released; + last_notify_idx_released = PAGE_SIZE; + } else { + bool reused = false; + new_notify_idx = last_notify_idx_reserved; + if (notify_idx_count + 1 < max_notify_idx) { + do { + if (!dbell_index_table_find + (new_notify_idx)) { + reused = true; + break; + } + new_notify_idx = (new_notify_idx + 1) % + max_notify_idx; + } while (new_notify_idx != + last_notify_idx_released); + } + if (!reused) { + new_notify_idx = max_notify_idx; + max_notify_idx++; + } + } + } else { + new_notify_idx = (last_notify_idx_reserved + 1) % PAGE_SIZE; + } + + last_notify_idx_reserved = new_notify_idx; + notify_idx_count++; + + entry->idx = new_notify_idx; + bucket = VMCI_DOORBELL_HASH(entry->idx); + hlist_add_head(&entry->node, &vmci_doorbell_it.entries[bucket]); + + spin_unlock_bh(&vmci_doorbell_it.lock); +} + +/* + * Remove the given entry from the index table. This will release() the + * entry's resource. + */ +static void dbell_index_table_remove(struct dbell_entry *entry) +{ + spin_lock_bh(&vmci_doorbell_it.lock); + + hlist_del_init(&entry->node); + + notify_idx_count--; + if (entry->idx == max_notify_idx - 1) { + /* + * If we delete an entry with the maximum known + * notification index, we take the opportunity to + * prune the current max. As there might be other + * unused indices immediately below, we lower the + * maximum until we hit an index in use. + */ + while (max_notify_idx > 0 && + !dbell_index_table_find(max_notify_idx - 1)) + max_notify_idx--; + } + + last_notify_idx_released = entry->idx; + + spin_unlock_bh(&vmci_doorbell_it.lock); + + vmci_resource_put(&entry->resource); +} + +/* + * Creates a link between the given doorbell handle and the given + * index in the bitmap in the device backend. A notification state + * is created in hypervisor. + */ +static int dbell_link(struct vmci_handle handle, u32 notify_idx) +{ + struct vmci_doorbell_link_msg link_msg; + + link_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, + VMCI_DOORBELL_LINK); + link_msg.hdr.src = VMCI_ANON_SRC_HANDLE; + link_msg.hdr.payload_size = sizeof(link_msg) - VMCI_DG_HEADERSIZE; + link_msg.handle = handle; + link_msg.notify_idx = notify_idx; + + return vmci_send_datagram(&link_msg.hdr); +} + +/* + * Unlinks the given doorbell handle from an index in the bitmap in + * the device backend. The notification state is destroyed in hypervisor. + */ +static int dbell_unlink(struct vmci_handle handle) +{ + struct vmci_doorbell_unlink_msg unlink_msg; + + unlink_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, + VMCI_DOORBELL_UNLINK); + unlink_msg.hdr.src = VMCI_ANON_SRC_HANDLE; + unlink_msg.hdr.payload_size = sizeof(unlink_msg) - VMCI_DG_HEADERSIZE; + unlink_msg.handle = handle; + + return vmci_send_datagram(&unlink_msg.hdr); +} + +/* + * Notify another guest or the host. We send a datagram down to the + * host via the hypervisor with the notification info. + */ +static int dbell_notify_as_guest(struct vmci_handle handle, u32 priv_flags) +{ + struct vmci_doorbell_notify_msg notify_msg; + + notify_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, + VMCI_DOORBELL_NOTIFY); + notify_msg.hdr.src = VMCI_ANON_SRC_HANDLE; + notify_msg.hdr.payload_size = sizeof(notify_msg) - VMCI_DG_HEADERSIZE; + notify_msg.handle = handle; + + return vmci_send_datagram(¬ify_msg.hdr); +} + +/* + * Calls the specified callback in a delayed context. + */ +static void dbell_delayed_dispatch(struct work_struct *work) +{ + struct dbell_entry *entry = container_of(work, + struct dbell_entry, work); + + entry->notify_cb(entry->client_data); + vmci_resource_put(&entry->resource); +} + +/* + * Dispatches a doorbell notification to the host context. + */ +int vmci_dbell_host_context_notify(u32 src_cid, struct vmci_handle handle) +{ + struct dbell_entry *entry; + struct vmci_resource *resource; + + if (vmci_handle_is_invalid(handle)) { + pr_devel("Notifying an invalid doorbell (handle=0x%x:0x%x)\n", + handle.context, handle.resource); + return VMCI_ERROR_INVALID_ARGS; + } + + resource = vmci_resource_by_handle(handle, + VMCI_RESOURCE_TYPE_DOORBELL); + if (!resource) { + pr_devel("Notifying an unknown doorbell (handle=0x%x:0x%x)\n", + handle.context, handle.resource); + return VMCI_ERROR_NOT_FOUND; + } + + entry = container_of(resource, struct dbell_entry, resource); + if (entry->run_delayed) { + schedule_work(&entry->work); + } else { + entry->notify_cb(entry->client_data); + vmci_resource_put(resource); + } + + return VMCI_SUCCESS; +} + +/* + * Register the notification bitmap with the host. + */ +bool vmci_dbell_register_notification_bitmap(u32 bitmap_ppn) +{ + int result; + struct vmci_notify_bm_set_msg bitmap_set_msg; + + bitmap_set_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, + VMCI_SET_NOTIFY_BITMAP); + bitmap_set_msg.hdr.src = VMCI_ANON_SRC_HANDLE; + bitmap_set_msg.hdr.payload_size = sizeof(bitmap_set_msg) - + VMCI_DG_HEADERSIZE; + bitmap_set_msg.bitmap_ppn = bitmap_ppn; + + result = vmci_send_datagram(&bitmap_set_msg.hdr); + if (result != VMCI_SUCCESS) { + pr_devel("Failed to register (PPN=%u) as notification bitmap (error=%d)\n", + bitmap_ppn, result); + return false; + } + return true; +} + +/* + * Executes or schedules the handlers for a given notify index. + */ +static void dbell_fire_entries(u32 notify_idx) +{ + u32 bucket = VMCI_DOORBELL_HASH(notify_idx); + struct dbell_entry *dbell; + struct hlist_node *node; + + spin_lock_bh(&vmci_doorbell_it.lock); + + hlist_for_each_entry(dbell, node, + &vmci_doorbell_it.entries[bucket], node) { + if (dbell->idx == notify_idx && + atomic_read(&dbell->active) == 1) { + if (dbell->run_delayed) { + vmci_resource_get(&dbell->resource); + schedule_work(&dbell->work); + } else { + dbell->notify_cb(dbell->client_data); + } + } + } + + spin_unlock_bh(&vmci_doorbell_it.lock); +} + +/* + * Scans the notification bitmap, collects pending notifications, + * resets the bitmap and invokes appropriate callbacks. + */ +void vmci_dbell_scan_notification_entries(u8 *bitmap) +{ + u32 idx; + + for (idx = 0; idx < max_notify_idx; idx++) { + if (bitmap[idx] & 0x1) { + bitmap[idx] &= ~1; + dbell_fire_entries(idx); + } + } +} + +/* + * vmci_doorbell_create() - Creates a doorbell + * @handle: A handle used to track the resource. Can be invalid. + * @flags: Flag that determines context of callback. + * @priv_flags: Privileges flags. + * @notify_cb: The callback to be ivoked when the doorbell fires. + * @client_data: A parameter to be passed to the callback. + * + * Creates a doorbell with the given callback. If the handle is + * VMCI_INVALID_HANDLE, a free handle will be assigned, if + * possible. The callback can be run immediately (potentially with + * locks held - the default) or delayed (in a kernel thread) by + * specifying the flag VMCI_FLAG_DELAYED_CB. If delayed execution + * is selected, a given callback may not be run if the kernel is + * unable to allocate memory for the delayed execution (highly + * unlikely). + */ +int vmci_doorbell_create(struct vmci_handle *handle, + u32 flags, + u32 priv_flags, + vmci_callback notify_cb, void *client_data) +{ + struct dbell_entry *entry; + struct vmci_handle new_handle; + int result; + + if (!handle || !notify_cb || flags & ~VMCI_FLAG_DELAYED_CB || + priv_flags & ~VMCI_PRIVILEGE_ALL_FLAGS) + return VMCI_ERROR_INVALID_ARGS; + + entry = kmalloc(sizeof(*entry), GFP_KERNEL); + if (entry == NULL) { + pr_warn("Failed allocating memory for datagram entry\n"); + return VMCI_ERROR_NO_MEM; + } + + if (vmci_handle_is_invalid(*handle)) { + u32 context_id = vmci_get_context_id(); + + /* Let resource code allocate a free ID for us */ + new_handle = vmci_make_handle(context_id, VMCI_INVALID_ID); + } else { + bool valid_context = false; + + /* + * Validate the handle. We must do both of the checks below + * because we can be acting as both a host and a guest at the + * same time. We always allow the host context ID, since the + * host functionality is in practice always there with the + * unified driver. + */ + if (handle->context == VMCI_HOST_CONTEXT_ID || + (vmci_guest_code_active() && + vmci_get_context_id() == handle->context)) { + valid_context = true; + } + + if (!valid_context || handle->resource == VMCI_INVALID_ID) { + pr_devel("Invalid argument (handle=0x%x:0x%x)\n", + handle->context, handle->resource); + result = VMCI_ERROR_INVALID_ARGS; + goto free_mem; + } + + new_handle = *handle; + } + + entry->idx = 0; + INIT_HLIST_NODE(&entry->node); + entry->priv_flags = priv_flags; + INIT_WORK(&entry->work, dbell_delayed_dispatch); + entry->run_delayed = flags & VMCI_FLAG_DELAYED_CB; + entry->notify_cb = notify_cb; + entry->client_data = client_data; + atomic_set(&entry->active, 0); + + result = vmci_resource_add(&entry->resource, + VMCI_RESOURCE_TYPE_DOORBELL, + new_handle); + if (result != VMCI_SUCCESS) { + pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d\n", + new_handle.context, new_handle.resource, result); + goto free_mem; + } + + new_handle = vmci_resource_handle(&entry->resource); + if (vmci_guest_code_active()) { + dbell_index_table_add(entry); + result = dbell_link(new_handle, entry->idx); + if (VMCI_SUCCESS != result) + goto destroy_resource; + + atomic_set(&entry->active, 1); + } + + *handle = new_handle; + + return result; + + destroy_resource: + dbell_index_table_remove(entry); + vmci_resource_remove(&entry->resource); + free_mem: + kfree(entry); + return result; +} +EXPORT_SYMBOL_GPL(vmci_doorbell_create); + +/* + * vmci_doorbell_destroy() - Destroy a doorbell. + * @handle: The handle tracking the resource. + * + * Destroys a doorbell previously created with vmcii_doorbell_create. This + * operation may block waiting for a callback to finish. + */ +int vmci_doorbell_destroy(struct vmci_handle handle) +{ + struct dbell_entry *entry; + struct vmci_resource *resource; + + if (vmci_handle_is_invalid(handle)) + return VMCI_ERROR_INVALID_ARGS; + + resource = vmci_resource_by_handle(handle, + VMCI_RESOURCE_TYPE_DOORBELL); + if (!resource) { + pr_devel("Failed to destroy doorbell (handle=0x%x:0x%x)\n", + handle.context, handle.resource); + return VMCI_ERROR_NOT_FOUND; + } + + entry = container_of(resource, struct dbell_entry, resource); + + if (vmci_guest_code_active()) { + int result; + + dbell_index_table_remove(entry); + + result = dbell_unlink(handle); + if (VMCI_SUCCESS != result) { + + /* + * The only reason this should fail would be + * an inconsistency between guest and + * hypervisor state, where the guest believes + * it has an active registration whereas the + * hypervisor doesn't. One case where this may + * happen is if a doorbell is unregistered + * following a hibernation at a time where the + * doorbell state hasn't been restored on the + * hypervisor side yet. Since the handle has + * now been removed in the guest, we just + * print a warning and return success. + */ + pr_devel("Unlink of doorbell (handle=0x%x:0x%x) unknown by hypervisor (error=%d)\n", + handle.context, handle.resource, result); + } + } + + /* + * Now remove the resource from the table. It might still be in use + * after this, in a callback or still on the delayed work queue. + */ + vmci_resource_put(&entry->resource); + vmci_resource_remove(&entry->resource); + + kfree(entry); + + return VMCI_SUCCESS; +} +EXPORT_SYMBOL_GPL(vmci_doorbell_destroy); + +/* + * vmci_doorbell_notify() - Ring the doorbell (and hide in the bushes). + * @dst: The handlle identifying the doorbell resource + * @priv_flags: Priviledge flags. + * + * Generates a notification on the doorbell identified by the + * handle. For host side generation of notifications, the caller + * can specify what the privilege of the calling side is. + */ +int vmci_doorbell_notify(struct vmci_handle dst, u32 priv_flags) +{ + int retval; + enum vmci_route route; + struct vmci_handle src; + + if (vmci_handle_is_invalid(dst) || + (priv_flags & ~VMCI_PRIVILEGE_ALL_FLAGS)) + return VMCI_ERROR_INVALID_ARGS; + + src = VMCI_INVALID_HANDLE; + retval = vmci_route(&src, &dst, false, &route); + if (retval < VMCI_SUCCESS) + return retval; + + if (VMCI_ROUTE_AS_HOST == route) + return vmci_ctx_notify_dbell(VMCI_HOST_CONTEXT_ID, + dst, priv_flags); + + if (VMCI_ROUTE_AS_GUEST == route) + return dbell_notify_as_guest(dst, priv_flags); + + pr_warn("Unknown route (%d) for doorbell\n", route); + return VMCI_ERROR_DST_UNREACHABLE; +} +EXPORT_SYMBOL_GPL(vmci_doorbell_notify); diff --git a/drivers/misc/vmw_vmci/vmci_doorbell.h b/drivers/misc/vmw_vmci/vmci_doorbell.h new file mode 100644 index 000000000000..e4c0b17486a5 --- /dev/null +++ b/drivers/misc/vmw_vmci/vmci_doorbell.h @@ -0,0 +1,51 @@ +/* + * VMware VMCI Driver + * + * Copyright (C) 2012 VMware, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation version 2 and no later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#ifndef VMCI_DOORBELL_H +#define VMCI_DOORBELL_H + +#include <linux/vmw_vmci_defs.h> +#include <linux/types.h> + +#include "vmci_driver.h" + +/* + * VMCINotifyResourceInfo: Used to create and destroy doorbells, and + * generate a notification for a doorbell or queue pair. + */ +struct vmci_dbell_notify_resource_info { + struct vmci_handle handle; + u16 resource; + u16 action; + s32 result; +}; + +/* + * Structure used for checkpointing the doorbell mappings. It is + * written to the checkpoint as is, so changing this structure will + * break checkpoint compatibility. + */ +struct dbell_cpt_state { + struct vmci_handle handle; + u64 bitmap_idx; +}; + +int vmci_dbell_host_context_notify(u32 src_cid, struct vmci_handle handle); +int vmci_dbell_get_priv_flags(struct vmci_handle handle, u32 *priv_flags); + +bool vmci_dbell_register_notification_bitmap(u32 bitmap_ppn); +void vmci_dbell_scan_notification_entries(u8 *bitmap); + +#endif /* VMCI_DOORBELL_H */ diff --git a/drivers/misc/vmw_vmci/vmci_driver.c b/drivers/misc/vmw_vmci/vmci_driver.c new file mode 100644 index 000000000000..7b3fce2da6c3 --- /dev/null +++ b/drivers/misc/vmw_vmci/vmci_driver.c @@ -0,0 +1,117 @@ +/* + * VMware VMCI Driver + * + * Copyright (C) 2012 VMware, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation version 2 and no later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include <linux/vmw_vmci_defs.h> +#include <linux/vmw_vmci_api.h> +#include <linux/atomic.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> + +#include "vmci_driver.h" +#include "vmci_event.h" + +static bool vmci_disable_host; +module_param_named(disable_host, vmci_disable_host, bool, 0); +MODULE_PARM_DESC(disable_host, + "Disable driver host personality (default=enabled)"); + +static bool vmci_disable_guest; +module_param_named(disable_guest, vmci_disable_guest, bool, 0); +MODULE_PARM_DESC(disable_guest, + "Disable driver guest personality (default=enabled)"); + +static bool vmci_guest_personality_initialized; +static bool vmci_host_personality_initialized; + +/* + * vmci_get_context_id() - Gets the current context ID. + * + * Returns the current context ID. Note that since this is accessed only + * from code running in the host, this always returns the host context ID. + */ +u32 vmci_get_context_id(void) +{ + if (vmci_guest_code_active()) + return vmci_get_vm_context_id(); + else if (vmci_host_code_active()) + return VMCI_HOST_CONTEXT_ID; + + return VMCI_INVALID_ID; +} +EXPORT_SYMBOL_GPL(vmci_get_context_id); + +static int __init vmci_drv_init(void) +{ + int vmci_err; + int error; + + vmci_err = vmci_event_init(); + if (vmci_err < VMCI_SUCCESS) { + pr_err("Failed to initialize VMCIEvent (result=%d)\n", + vmci_err); + return -EINVAL; + } + + if (!vmci_disable_guest) { + error = vmci_guest_init(); + if (error) { + pr_warn("Failed to initialize guest personality (err=%d)\n", + error); + } else { + vmci_guest_personality_initialized = true; + pr_info("Guest personality initialized and is %s\n", + vmci_guest_code_active() ? + "active" : "inactive"); + } + } + + if (!vmci_disable_host) { + error = vmci_host_init(); + if (error) { + pr_warn("Unable to initialize host personality (err=%d)\n", + error); + } else { + vmci_host_personality_initialized = true; + pr_info("Initialized host personality\n"); + } + } + + if (!vmci_guest_personality_initialized && + !vmci_host_personality_initialized) { + vmci_event_exit(); + return -ENODEV; + } + + return 0; +} +module_init(vmci_drv_init); + +static void __exit vmci_drv_exit(void) +{ + if (vmci_guest_personality_initialized) + vmci_guest_exit(); + + if (vmci_host_personality_initialized) + vmci_host_exit(); + + vmci_event_exit(); +} +module_exit(vmci_drv_exit); + +MODULE_AUTHOR("VMware, Inc."); +MODULE_DESCRIPTION("VMware Virtual Machine Communication Interface."); +MODULE_VERSION("1.0.0.0-k"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/misc/vmw_vmci/vmci_driver.h b/drivers/misc/vmw_vmci/vmci_driver.h new file mode 100644 index 000000000000..f69156a1f30c --- /dev/null +++ b/drivers/misc/vmw_vmci/vmci_driver.h @@ -0,0 +1,50 @@ +/* + * VMware VMCI Driver + * + * Copyright (C) 2012 VMware, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation version 2 and no later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#ifndef _VMCI_DRIVER_H_ +#define _VMCI_DRIVER_H_ + +#include <linux/vmw_vmci_defs.h> +#include <linux/wait.h> + +#include "vmci_queue_pair.h" +#include "vmci_context.h" + +enum vmci_obj_type { + VMCIOBJ_VMX_VM = 10, + VMCIOBJ_CONTEXT, + VMCIOBJ_SOCKET, + VMCIOBJ_NOT_SET, +}; + +/* For storing VMCI structures in file handles. */ +struct vmci_obj { + void *ptr; + enum vmci_obj_type type; +}; + +u32 vmci_get_context_id(void); +int vmci_send_datagram(struct vmci_datagram *dg); + +int vmci_host_init(void); +void vmci_host_exit(void); +bool vmci_host_code_active(void); + +int vmci_guest_init(void); +void vmci_guest_exit(void); +bool vmci_guest_code_active(void); +u32 vmci_get_vm_context_id(void); + +#endif /* _VMCI_DRIVER_H_ */ diff --git a/drivers/misc/vmw_vmci/vmci_event.c b/drivers/misc/vmw_vmci/vmci_event.c new file mode 100644 index 000000000000..8449516d6ac6 --- /dev/null +++ b/drivers/misc/vmw_vmci/vmci_event.c @@ -0,0 +1,224 @@ +/* + * VMware VMCI Driver + * + * Copyright (C) 2012 VMware, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation version 2 and no later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include <linux/vmw_vmci_defs.h> +#include <linux/vmw_vmci_api.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/sched.h> +#include <linux/slab.h> + +#include "vmci_driver.h" +#include "vmci_event.h" + +#define EVENT_MAGIC 0xEABE0000 +#define VMCI_EVENT_MAX_ATTEMPTS 10 + +struct vmci_subscription { + u32 id; + u32 event; + vmci_event_cb callback; + void *callback_data; + struct list_head node; /* on one of subscriber lists */ +}; + +static struct list_head subscriber_array[VMCI_EVENT_MAX]; +static DEFINE_MUTEX(subscriber_mutex); + +int __init vmci_event_init(void) +{ + int i; + + for (i = 0; i < VMCI_EVENT_MAX; i++) + INIT_LIST_HEAD(&subscriber_array[i]); + + return VMCI_SUCCESS; +} + +void vmci_event_exit(void) +{ + int e; + + /* We free all memory at exit. */ + for (e = 0; e < VMCI_EVENT_MAX; e++) { + struct vmci_subscription *cur, *p2; + list_for_each_entry_safe(cur, p2, &subscriber_array[e], node) { + + /* + * We should never get here because all events + * should have been unregistered before we try + * to unload the driver module. + */ + pr_warn("Unexpected free events occurring\n"); + list_del(&cur->node); + kfree(cur); + } + } +} + +/* + * Find entry. Assumes subscriber_mutex is held. + */ +static struct vmci_subscription *event_find(u32 sub_id) +{ + int e; + + for (e = 0; e < VMCI_EVENT_MAX; e++) { + struct vmci_subscription *cur; + list_for_each_entry(cur, &subscriber_array[e], node) { + if (cur->id == sub_id) + return cur; + } + } + return NULL; +} + +/* + * Actually delivers the events to the subscribers. + * The callback function for each subscriber is invoked. + */ +static void event_deliver(struct vmci_event_msg *event_msg) +{ + struct vmci_subscription *cur; + struct list_head *subscriber_list; + + rcu_read_lock(); + subscriber_list = &subscriber_array[event_msg->event_data.event]; + list_for_each_entry_rcu(cur, subscriber_list, node) { + cur->callback(cur->id, &event_msg->event_data, + cur->callback_data); + } + rcu_read_unlock(); +} + +/* + * Dispatcher for the VMCI_EVENT_RECEIVE datagrams. Calls all + * subscribers for given event. + */ +int vmci_event_dispatch(struct vmci_datagram *msg) +{ + struct vmci_event_msg *event_msg = (struct vmci_event_msg *)msg; + + if (msg->payload_size < sizeof(u32) || + msg->payload_size > sizeof(struct vmci_event_data_max)) + return VMCI_ERROR_INVALID_ARGS; + + if (!VMCI_EVENT_VALID(event_msg->event_data.event)) + return VMCI_ERROR_EVENT_UNKNOWN; + + event_deliver(event_msg); + return VMCI_SUCCESS; +} + +/* + * vmci_event_subscribe() - Subscribe to a given event. + * @event: The event to subscribe to. + * @callback: The callback to invoke upon the event. + * @callback_data: Data to pass to the callback. + * @subscription_id: ID used to track subscription. Used with + * vmci_event_unsubscribe() + * + * Subscribes to the provided event. The callback specified will be + * fired from RCU critical section and therefore must not sleep. + */ +int vmci_event_subscribe(u32 event, + vmci_event_cb callback, + void *callback_data, + u32 *new_subscription_id) +{ + struct vmci_subscription *sub; + int attempts; + int retval; + bool have_new_id = false; + + if (!new_subscription_id) { + pr_devel("%s: Invalid subscription (NULL)\n", __func__); + return VMCI_ERROR_INVALID_ARGS; + } + + if (!VMCI_EVENT_VALID(event) || !callback) { + pr_devel("%s: Failed to subscribe to event (type=%d) (callback=%p) (data=%p)\n", + __func__, event, callback, callback_data); + return VMCI_ERROR_INVALID_ARGS; + } + + sub = kzalloc(sizeof(*sub), GFP_KERNEL); + if (!sub) + return VMCI_ERROR_NO_MEM; + + sub->id = VMCI_EVENT_MAX; + sub->event = event; + sub->callback = callback; + sub->callback_data = callback_data; + INIT_LIST_HEAD(&sub->node); + + mutex_lock(&subscriber_mutex); + + /* Creation of a new event is always allowed. */ + for (attempts = 0; attempts < VMCI_EVENT_MAX_ATTEMPTS; attempts++) { + static u32 subscription_id; + /* + * We try to get an id a couple of time before + * claiming we are out of resources. + */ + + /* Test for duplicate id. */ + if (!event_find(++subscription_id)) { + sub->id = subscription_id; + have_new_id = true; + break; + } + } + + if (have_new_id) { + list_add_rcu(&sub->node, &subscriber_array[event]); + retval = VMCI_SUCCESS; + } else { + retval = VMCI_ERROR_NO_RESOURCES; + } + + mutex_unlock(&subscriber_mutex); + + *new_subscription_id = sub->id; + return retval; +} +EXPORT_SYMBOL_GPL(vmci_event_subscribe); + +/* + * vmci_event_unsubscribe() - unsubscribe from an event. + * @sub_id: A subscription ID as provided by vmci_event_subscribe() + * + * Unsubscribe from given event. Removes it from list and frees it. + * Will return callback_data if requested by caller. + */ +int vmci_event_unsubscribe(u32 sub_id) +{ + struct vmci_subscription *s; + + mutex_lock(&subscriber_mutex); + s = event_find(sub_id); + if (s) + list_del_rcu(&s->node); + mutex_unlock(&subscriber_mutex); + + if (!s) + return VMCI_ERROR_NOT_FOUND; + + synchronize_rcu(); + kfree(s); + + return VMCI_SUCCESS; +} +EXPORT_SYMBOL_GPL(vmci_event_unsubscribe); diff --git a/drivers/misc/vmw_vmci/vmci_event.h b/drivers/misc/vmw_vmci/vmci_event.h new file mode 100644 index 000000000000..7df9b1c0a96c --- /dev/null +++ b/drivers/misc/vmw_vmci/vmci_event.h @@ -0,0 +1,25 @@ +/* + * VMware VMCI Driver + * + * Copyright (C) 2012 VMware, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation version 2 and no later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#ifndef __VMCI_EVENT_H__ +#define __VMCI_EVENT_H__ + +#include <linux/vmw_vmci_api.h> + +int vmci_event_init(void); +void vmci_event_exit(void); +int vmci_event_dispatch(struct vmci_datagram *msg); + +#endif /*__VMCI_EVENT_H__ */ diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c new file mode 100644 index 000000000000..60c01999f489 --- /dev/null +++ b/drivers/misc/vmw_vmci/vmci_guest.c @@ -0,0 +1,759 @@ +/* + * VMware VMCI Driver + * + * Copyright (C) 2012 VMware, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation version 2 and no later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include <linux/vmw_vmci_defs.h> +#include <linux/vmw_vmci_api.h> +#include <linux/moduleparam.h> +#include <linux/interrupt.h> +#include <linux/highmem.h> +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/module.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/init.h> +#include <linux/pci.h> +#include <linux/smp.h> +#include <linux/io.h> +#include <linux/vmalloc.h> + +#include "vmci_datagram.h" +#include "vmci_doorbell.h" +#include "vmci_context.h" +#include "vmci_driver.h" +#include "vmci_event.h" + +#define PCI_VENDOR_ID_VMWARE 0x15AD +#define PCI_DEVICE_ID_VMWARE_VMCI 0x0740 + +#define VMCI_UTIL_NUM_RESOURCES 1 + +static bool vmci_disable_msi; +module_param_named(disable_msi, vmci_disable_msi, bool, 0); +MODULE_PARM_DESC(disable_msi, "Disable MSI use in driver - (default=0)"); + +static bool vmci_disable_msix; +module_param_named(disable_msix, vmci_disable_msix, bool, 0); +MODULE_PARM_DESC(disable_msix, "Disable MSI-X use in driver - (default=0)"); + +static u32 ctx_update_sub_id = VMCI_INVALID_ID; +static u32 vm_context_id = VMCI_INVALID_ID; + +struct vmci_guest_device { + struct device *dev; /* PCI device we are attached to */ + void __iomem *iobase; + + unsigned int irq; + unsigned int intr_type; + bool exclusive_vectors; + struct msix_entry msix_entries[VMCI_MAX_INTRS]; + + struct tasklet_struct datagram_tasklet; + struct tasklet_struct bm_tasklet; + + void *data_buffer; + void *notification_bitmap; +}; + +/* vmci_dev singleton device and supporting data*/ +static struct vmci_guest_device *vmci_dev_g; +static DEFINE_SPINLOCK(vmci_dev_spinlock); + +static atomic_t vmci_num_guest_devices = ATOMIC_INIT(0); + +bool vmci_guest_code_active(void) +{ + return atomic_read(&vmci_num_guest_devices) != 0; +} + +u32 vmci_get_vm_context_id(void) +{ + if (vm_context_id == VMCI_INVALID_ID) { + struct vmci_datagram get_cid_msg; + get_cid_msg.dst = + vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, + VMCI_GET_CONTEXT_ID); + get_cid_msg.src = VMCI_ANON_SRC_HANDLE; + get_cid_msg.payload_size = 0; + vm_context_id = vmci_send_datagram(&get_cid_msg); + } + return vm_context_id; +} + +/* + * VM to hypervisor call mechanism. We use the standard VMware naming + * convention since shared code is calling this function as well. + */ +int vmci_send_datagram(struct vmci_datagram *dg) +{ + unsigned long flags; + int result; + + /* Check args. */ + if (dg == NULL) + return VMCI_ERROR_INVALID_ARGS; + + /* + * Need to acquire spinlock on the device because the datagram + * data may be spread over multiple pages and the monitor may + * interleave device user rpc calls from multiple + * VCPUs. Acquiring the spinlock precludes that + * possibility. Disabling interrupts to avoid incoming + * datagrams during a "rep out" and possibly landing up in + * this function. + */ + spin_lock_irqsave(&vmci_dev_spinlock, flags); + + if (vmci_dev_g) { + iowrite8_rep(vmci_dev_g->iobase + VMCI_DATA_OUT_ADDR, + dg, VMCI_DG_SIZE(dg)); + result = ioread32(vmci_dev_g->iobase + VMCI_RESULT_LOW_ADDR); + } else { + result = VMCI_ERROR_UNAVAILABLE; + } + + spin_unlock_irqrestore(&vmci_dev_spinlock, flags); + + return result; +} +EXPORT_SYMBOL_GPL(vmci_send_datagram); + +/* + * Gets called with the new context id if updated or resumed. + * Context id. + */ +static void vmci_guest_cid_update(u32 sub_id, + const struct vmci_event_data *event_data, + void *client_data) +{ + const struct vmci_event_payld_ctx *ev_payload = + vmci_event_data_const_payload(event_data); + + if (sub_id != ctx_update_sub_id) { + pr_devel("Invalid subscriber (ID=0x%x)\n", sub_id); + return; + } + + if (!event_data || ev_payload->context_id == VMCI_INVALID_ID) { + pr_devel("Invalid event data\n"); + return; + } + + pr_devel("Updating context from (ID=0x%x) to (ID=0x%x) on event (type=%d)\n", + vm_context_id, ev_payload->context_id, event_data->event); + + vm_context_id = ev_payload->context_id; +} + +/* + * Verify that the host supports the hypercalls we need. If it does not, + * try to find fallback hypercalls and use those instead. Returns + * true if required hypercalls (or fallback hypercalls) are + * supported by the host, false otherwise. + */ +static bool vmci_check_host_caps(struct pci_dev *pdev) +{ + bool result; + struct vmci_resource_query_msg *msg; + u32 msg_size = sizeof(struct vmci_resource_query_hdr) + + VMCI_UTIL_NUM_RESOURCES * sizeof(u32); + struct vmci_datagram *check_msg; + + check_msg = kmalloc(msg_size, GFP_KERNEL); + if (!check_msg) { + dev_err(&pdev->dev, "%s: Insufficient memory\n", __func__); + return false; + } + + check_msg->dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, + VMCI_RESOURCES_QUERY); + check_msg->src = VMCI_ANON_SRC_HANDLE; + check_msg->payload_size = msg_size - VMCI_DG_HEADERSIZE; + msg = (struct vmci_resource_query_msg *)VMCI_DG_PAYLOAD(check_msg); + + msg->num_resources = VMCI_UTIL_NUM_RESOURCES; + msg->resources[0] = VMCI_GET_CONTEXT_ID; + + /* Checks that hyper calls are supported */ + result = vmci_send_datagram(check_msg) == 0x01; + kfree(check_msg); + + dev_dbg(&pdev->dev, "%s: Host capability check: %s\n", + __func__, result ? "PASSED" : "FAILED"); + + /* We need the vector. There are no fallbacks. */ + return result; +} + +/* + * Reads datagrams from the data in port and dispatches them. We + * always start reading datagrams into only the first page of the + * datagram buffer. If the datagrams don't fit into one page, we + * use the maximum datagram buffer size for the remainder of the + * invocation. This is a simple heuristic for not penalizing + * small datagrams. + * + * This function assumes that it has exclusive access to the data + * in port for the duration of the call. + */ +static void vmci_dispatch_dgs(unsigned long data) +{ + struct vmci_guest_device *vmci_dev = (struct vmci_guest_device *)data; + u8 *dg_in_buffer = vmci_dev->data_buffer; + struct vmci_datagram *dg; + size_t dg_in_buffer_size = VMCI_MAX_DG_SIZE; + size_t current_dg_in_buffer_size = PAGE_SIZE; + size_t remaining_bytes; + + BUILD_BUG_ON(VMCI_MAX_DG_SIZE < PAGE_SIZE); + + ioread8_rep(vmci_dev->iobase + VMCI_DATA_IN_ADDR, + vmci_dev->data_buffer, current_dg_in_buffer_size); + dg = (struct vmci_datagram *)dg_in_buffer; + remaining_bytes = current_dg_in_buffer_size; + + while (dg->dst.resource != VMCI_INVALID_ID || + remaining_bytes > PAGE_SIZE) { + unsigned dg_in_size; + + /* + * When the input buffer spans multiple pages, a datagram can + * start on any page boundary in the buffer. + */ + if (dg->dst.resource == VMCI_INVALID_ID) { + dg = (struct vmci_datagram *)roundup( + (uintptr_t)dg + 1, PAGE_SIZE); + remaining_bytes = + (size_t)(dg_in_buffer + + current_dg_in_buffer_size - + (u8 *)dg); + continue; + } + + dg_in_size = VMCI_DG_SIZE_ALIGNED(dg); + + if (dg_in_size <= dg_in_buffer_size) { + int result; + + /* + * If the remaining bytes in the datagram + * buffer doesn't contain the complete + * datagram, we first make sure we have enough + * room for it and then we read the reminder + * of the datagram and possibly any following + * datagrams. + */ + if (dg_in_size > remaining_bytes) { + if (remaining_bytes != + current_dg_in_buffer_size) { + + /* + * We move the partial + * datagram to the front and + * read the reminder of the + * datagram and possibly + * following calls into the + * following bytes. + */ + memmove(dg_in_buffer, dg_in_buffer + + current_dg_in_buffer_size - + remaining_bytes, + remaining_bytes); + dg = (struct vmci_datagram *) + dg_in_buffer; + } + + if (current_dg_in_buffer_size != + dg_in_buffer_size) + current_dg_in_buffer_size = + dg_in_buffer_size; + + ioread8_rep(vmci_dev->iobase + + VMCI_DATA_IN_ADDR, + vmci_dev->data_buffer + + remaining_bytes, + current_dg_in_buffer_size - + remaining_bytes); + } + + /* + * We special case event datagrams from the + * hypervisor. + */ + if (dg->src.context == VMCI_HYPERVISOR_CONTEXT_ID && + dg->dst.resource == VMCI_EVENT_HANDLER) { + result = vmci_event_dispatch(dg); + } else { + result = vmci_datagram_invoke_guest_handler(dg); + } + if (result < VMCI_SUCCESS) + dev_dbg(vmci_dev->dev, + "Datagram with resource (ID=0x%x) failed (err=%d)\n", + dg->dst.resource, result); + + /* On to the next datagram. */ + dg = (struct vmci_datagram *)((u8 *)dg + + dg_in_size); + } else { + size_t bytes_to_skip; + + /* + * Datagram doesn't fit in datagram buffer of maximal + * size. We drop it. + */ + dev_dbg(vmci_dev->dev, + "Failed to receive datagram (size=%u bytes)\n", + dg_in_size); + + bytes_to_skip = dg_in_size - remaining_bytes; + if (current_dg_in_buffer_size != dg_in_buffer_size) + current_dg_in_buffer_size = dg_in_buffer_size; + + for (;;) { + ioread8_rep(vmci_dev->iobase + + VMCI_DATA_IN_ADDR, + vmci_dev->data_buffer, + current_dg_in_buffer_size); + if (bytes_to_skip <= current_dg_in_buffer_size) + break; + + bytes_to_skip -= current_dg_in_buffer_size; + } + dg = (struct vmci_datagram *)(dg_in_buffer + + bytes_to_skip); + } + + remaining_bytes = + (size_t) (dg_in_buffer + current_dg_in_buffer_size - + (u8 *)dg); + + if (remaining_bytes < VMCI_DG_HEADERSIZE) { + /* Get the next batch of datagrams. */ + + ioread8_rep(vmci_dev->iobase + VMCI_DATA_IN_ADDR, + vmci_dev->data_buffer, + current_dg_in_buffer_size); + dg = (struct vmci_datagram *)dg_in_buffer; + remaining_bytes = current_dg_in_buffer_size; + } + } +} + +/* + * Scans the notification bitmap for raised flags, clears them + * and handles the notifications. + */ +static void vmci_process_bitmap(unsigned long data) +{ + struct vmci_guest_device *dev = (struct vmci_guest_device *)data; + + if (!dev->notification_bitmap) { + dev_dbg(dev->dev, "No bitmap present in %s\n", __func__); + return; + } + + vmci_dbell_scan_notification_entries(dev->notification_bitmap); +} + +/* + * Enable MSI-X. Try exclusive vectors first, then shared vectors. + */ +static int vmci_enable_msix(struct pci_dev *pdev, + struct vmci_guest_device *vmci_dev) +{ + int i; + int result; + + for (i = 0; i < VMCI_MAX_INTRS; ++i) { + vmci_dev->msix_entries[i].entry = i; + vmci_dev->msix_entries[i].vector = i; + } + + result = pci_enable_msix(pdev, vmci_dev->msix_entries, VMCI_MAX_INTRS); + if (result == 0) + vmci_dev->exclusive_vectors = true; + else if (result > 0) + result = pci_enable_msix(pdev, vmci_dev->msix_entries, 1); + + return result; +} + +/* + * Interrupt handler for legacy or MSI interrupt, or for first MSI-X + * interrupt (vector VMCI_INTR_DATAGRAM). + */ +static irqreturn_t vmci_interrupt(int irq, void *_dev) +{ + struct vmci_guest_device *dev = _dev; + + /* + * If we are using MSI-X with exclusive vectors then we simply schedule + * the datagram tasklet, since we know the interrupt was meant for us. + * Otherwise we must read the ICR to determine what to do. + */ + + if (dev->intr_type == VMCI_INTR_TYPE_MSIX && dev->exclusive_vectors) { + tasklet_schedule(&dev->datagram_tasklet); + } else { + unsigned int icr; + + /* Acknowledge interrupt and determine what needs doing. */ + icr = ioread32(dev->iobase + VMCI_ICR_ADDR); + if (icr == 0 || icr == ~0) + return IRQ_NONE; + + if (icr & VMCI_ICR_DATAGRAM) { + tasklet_schedule(&dev->datagram_tasklet); + icr &= ~VMCI_ICR_DATAGRAM; + } + + if (icr & VMCI_ICR_NOTIFICATION) { + tasklet_schedule(&dev->bm_tasklet); + icr &= ~VMCI_ICR_NOTIFICATION; + } + + if (icr != 0) + dev_warn(dev->dev, + "Ignoring unknown interrupt cause (%d)\n", + icr); + } + + return IRQ_HANDLED; +} + +/* + * Interrupt handler for MSI-X interrupt vector VMCI_INTR_NOTIFICATION, + * which is for the notification bitmap. Will only get called if we are + * using MSI-X with exclusive vectors. + */ +static irqreturn_t vmci_interrupt_bm(int irq, void *_dev) +{ + struct vmci_guest_device *dev = _dev; + + /* For MSI-X we can just assume it was meant for us. */ + tasklet_schedule(&dev->bm_tasklet); + + return IRQ_HANDLED; +} + +/* + * Most of the initialization at module load time is done here. + */ +static int vmci_guest_probe_device(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct vmci_guest_device *vmci_dev; + void __iomem *iobase; + unsigned int capabilities; + unsigned long cmd; + int vmci_err; + int error; + + dev_dbg(&pdev->dev, "Probing for vmci/PCI guest device\n"); + + error = pcim_enable_device(pdev); + if (error) { + dev_err(&pdev->dev, + "Failed to enable VMCI device: %d\n", error); + return error; + } + + error = pcim_iomap_regions(pdev, 1 << 0, KBUILD_MODNAME); + if (error) { + dev_err(&pdev->dev, "Failed to reserve/map IO regions\n"); + return error; + } + + iobase = pcim_iomap_table(pdev)[0]; + + dev_info(&pdev->dev, "Found VMCI PCI device at %#lx, irq %u\n", + (unsigned long)iobase, pdev->irq); + + vmci_dev = devm_kzalloc(&pdev->dev, sizeof(*vmci_dev), GFP_KERNEL); + if (!vmci_dev) { + dev_err(&pdev->dev, + "Can't allocate memory for VMCI device\n"); + return -ENOMEM; + } + + vmci_dev->dev = &pdev->dev; + vmci_dev->intr_type = VMCI_INTR_TYPE_INTX; + vmci_dev->exclusive_vectors = false; + vmci_dev->iobase = iobase; + + tasklet_init(&vmci_dev->datagram_tasklet, + vmci_dispatch_dgs, (unsigned long)vmci_dev); + tasklet_init(&vmci_dev->bm_tasklet, + vmci_process_bitmap, (unsigned long)vmci_dev); + + vmci_dev->data_buffer = vmalloc(VMCI_MAX_DG_SIZE); + if (!vmci_dev->data_buffer) { + dev_err(&pdev->dev, + "Can't allocate memory for datagram buffer\n"); + return -ENOMEM; + } + + pci_set_master(pdev); /* To enable queue_pair functionality. */ + + /* + * Verify that the VMCI Device supports the capabilities that + * we need. If the device is missing capabilities that we would + * like to use, check for fallback capabilities and use those + * instead (so we can run a new VM on old hosts). Fail the load if + * a required capability is missing and there is no fallback. + * + * Right now, we need datagrams. There are no fallbacks. + */ + capabilities = ioread32(vmci_dev->iobase + VMCI_CAPS_ADDR); + if (!(capabilities & VMCI_CAPS_DATAGRAM)) { + dev_err(&pdev->dev, "Device does not support datagrams\n"); + error = -ENXIO; + goto err_free_data_buffer; + } + + /* + * If the hardware supports notifications, we will use that as + * well. + */ + if (capabilities & VMCI_CAPS_NOTIFICATIONS) { + vmci_dev->notification_bitmap = vmalloc(PAGE_SIZE); + if (!vmci_dev->notification_bitmap) { + dev_warn(&pdev->dev, + "Unable to allocate notification bitmap\n"); + } else { + memset(vmci_dev->notification_bitmap, 0, PAGE_SIZE); + capabilities |= VMCI_CAPS_NOTIFICATIONS; + } + } + + dev_info(&pdev->dev, "Using capabilities 0x%x\n", capabilities); + + /* Let the host know which capabilities we intend to use. */ + iowrite32(capabilities, vmci_dev->iobase + VMCI_CAPS_ADDR); + + /* Set up global device so that we can start sending datagrams */ + spin_lock_irq(&vmci_dev_spinlock); + vmci_dev_g = vmci_dev; + spin_unlock_irq(&vmci_dev_spinlock); + + /* + * Register notification bitmap with device if that capability is + * used. + */ + if (capabilities & VMCI_CAPS_NOTIFICATIONS) { + struct page *page = + vmalloc_to_page(vmci_dev->notification_bitmap); + unsigned long bitmap_ppn = page_to_pfn(page); + if (!vmci_dbell_register_notification_bitmap(bitmap_ppn)) { + dev_warn(&pdev->dev, + "VMCI device unable to register notification bitmap with PPN 0x%x\n", + (u32) bitmap_ppn); + goto err_remove_vmci_dev_g; + } + } + + /* Check host capabilities. */ + if (!vmci_check_host_caps(pdev)) + goto err_remove_bitmap; + + /* Enable device. */ + + /* + * We subscribe to the VMCI_EVENT_CTX_ID_UPDATE here so we can + * update the internal context id when needed. + */ + vmci_err = vmci_event_subscribe(VMCI_EVENT_CTX_ID_UPDATE, + vmci_guest_cid_update, NULL, + &ctx_update_sub_id); + if (vmci_err < VMCI_SUCCESS) + dev_warn(&pdev->dev, + "Failed to subscribe to event (type=%d): %d\n", + VMCI_EVENT_CTX_ID_UPDATE, vmci_err); + + /* + * Enable interrupts. Try MSI-X first, then MSI, and then fallback on + * legacy interrupts. + */ + if (!vmci_disable_msix && !vmci_enable_msix(pdev, vmci_dev)) { + vmci_dev->intr_type = VMCI_INTR_TYPE_MSIX; + vmci_dev->irq = vmci_dev->msix_entries[0].vector; + } else if (!vmci_disable_msi && !pci_enable_msi(pdev)) { + vmci_dev->intr_type = VMCI_INTR_TYPE_MSI; + vmci_dev->irq = pdev->irq; + } else { + vmci_dev->intr_type = VMCI_INTR_TYPE_INTX; + vmci_dev->irq = pdev->irq; + } + + /* + * Request IRQ for legacy or MSI interrupts, or for first + * MSI-X vector. + */ + error = request_irq(vmci_dev->irq, vmci_interrupt, IRQF_SHARED, + KBUILD_MODNAME, vmci_dev); + if (error) { + dev_err(&pdev->dev, "Irq %u in use: %d\n", + vmci_dev->irq, error); + goto err_disable_msi; + } + + /* + * For MSI-X with exclusive vectors we need to request an + * interrupt for each vector so that we get a separate + * interrupt handler routine. This allows us to distinguish + * between the vectors. + */ + if (vmci_dev->exclusive_vectors) { + error = request_irq(vmci_dev->msix_entries[1].vector, + vmci_interrupt_bm, 0, KBUILD_MODNAME, + vmci_dev); + if (error) { + dev_err(&pdev->dev, + "Failed to allocate irq %u: %d\n", + vmci_dev->msix_entries[1].vector, error); + goto err_free_irq; + } + } + + dev_dbg(&pdev->dev, "Registered device\n"); + + atomic_inc(&vmci_num_guest_devices); + + /* Enable specific interrupt bits. */ + cmd = VMCI_IMR_DATAGRAM; + if (capabilities & VMCI_CAPS_NOTIFICATIONS) + cmd |= VMCI_IMR_NOTIFICATION; + iowrite32(cmd, vmci_dev->iobase + VMCI_IMR_ADDR); + + /* Enable interrupts. */ + iowrite32(VMCI_CONTROL_INT_ENABLE, + vmci_dev->iobase + VMCI_CONTROL_ADDR); + + pci_set_drvdata(pdev, vmci_dev); + return 0; + +err_free_irq: + free_irq(vmci_dev->irq, &vmci_dev); + tasklet_kill(&vmci_dev->datagram_tasklet); + tasklet_kill(&vmci_dev->bm_tasklet); + +err_disable_msi: + if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSIX) + pci_disable_msix(pdev); + else if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSI) + pci_disable_msi(pdev); + + vmci_err = vmci_event_unsubscribe(ctx_update_sub_id); + if (vmci_err < VMCI_SUCCESS) + dev_warn(&pdev->dev, + "Failed to unsubscribe from event (type=%d) with subscriber (ID=0x%x): %d\n", + VMCI_EVENT_CTX_ID_UPDATE, ctx_update_sub_id, vmci_err); + +err_remove_bitmap: + if (vmci_dev->notification_bitmap) { + iowrite32(VMCI_CONTROL_RESET, + vmci_dev->iobase + VMCI_CONTROL_ADDR); + vfree(vmci_dev->notification_bitmap); + } + +err_remove_vmci_dev_g: + spin_lock_irq(&vmci_dev_spinlock); + vmci_dev_g = NULL; + spin_unlock_irq(&vmci_dev_spinlock); + +err_free_data_buffer: + vfree(vmci_dev->data_buffer); + + /* The rest are managed resources and will be freed by PCI core */ + return error; +} + +static void vmci_guest_remove_device(struct pci_dev *pdev) +{ + struct vmci_guest_device *vmci_dev = pci_get_drvdata(pdev); + int vmci_err; + + dev_dbg(&pdev->dev, "Removing device\n"); + + atomic_dec(&vmci_num_guest_devices); + + vmci_qp_guest_endpoints_exit(); + + vmci_err = vmci_event_unsubscribe(ctx_update_sub_id); + if (vmci_err < VMCI_SUCCESS) + dev_warn(&pdev->dev, + "Failed to unsubscribe from event (type=%d) with subscriber (ID=0x%x): %d\n", + VMCI_EVENT_CTX_ID_UPDATE, ctx_update_sub_id, vmci_err); + + spin_lock_irq(&vmci_dev_spinlock); + vmci_dev_g = NULL; + spin_unlock_irq(&vmci_dev_spinlock); + + dev_dbg(&pdev->dev, "Resetting vmci device\n"); + iowrite32(VMCI_CONTROL_RESET, vmci_dev->iobase + VMCI_CONTROL_ADDR); + + /* + * Free IRQ and then disable MSI/MSI-X as appropriate. For + * MSI-X, we might have multiple vectors, each with their own + * IRQ, which we must free too. + */ + free_irq(vmci_dev->irq, vmci_dev); + if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSIX) { + if (vmci_dev->exclusive_vectors) + free_irq(vmci_dev->msix_entries[1].vector, vmci_dev); + pci_disable_msix(pdev); + } else if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSI) { + pci_disable_msi(pdev); + } + + tasklet_kill(&vmci_dev->datagram_tasklet); + tasklet_kill(&vmci_dev->bm_tasklet); + + if (vmci_dev->notification_bitmap) { + /* + * The device reset above cleared the bitmap state of the + * device, so we can safely free it here. + */ + + vfree(vmci_dev->notification_bitmap); + } + + vfree(vmci_dev->data_buffer); + + /* The rest are managed resources and will be freed by PCI core */ +} + +static DEFINE_PCI_DEVICE_TABLE(vmci_ids) = { + { PCI_DEVICE(PCI_VENDOR_ID_VMWARE, PCI_DEVICE_ID_VMWARE_VMCI), }, + { 0 }, +}; +MODULE_DEVICE_TABLE(pci, vmci_ids); + +static struct pci_driver vmci_guest_driver = { + .name = KBUILD_MODNAME, + .id_table = vmci_ids, + .probe = vmci_guest_probe_device, + .remove = vmci_guest_remove_device, +}; + +int __init vmci_guest_init(void) +{ + return pci_register_driver(&vmci_guest_driver); +} + +void __exit vmci_guest_exit(void) +{ + pci_unregister_driver(&vmci_guest_driver); +} diff --git a/drivers/misc/vmw_vmci/vmci_handle_array.c b/drivers/misc/vmw_vmci/vmci_handle_array.c new file mode 100644 index 000000000000..344973a0fb0a --- /dev/null +++ b/drivers/misc/vmw_vmci/vmci_handle_array.c @@ -0,0 +1,142 @@ +/* + * VMware VMCI Driver + * + * Copyright (C) 2012 VMware, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation version 2 and no later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include <linux/slab.h> +#include "vmci_handle_array.h" + +static size_t handle_arr_calc_size(size_t capacity) +{ + return sizeof(struct vmci_handle_arr) + + capacity * sizeof(struct vmci_handle); +} + +struct vmci_handle_arr *vmci_handle_arr_create(size_t capacity) +{ + struct vmci_handle_arr *array; + + if (capacity == 0) + capacity = VMCI_HANDLE_ARRAY_DEFAULT_SIZE; + + array = kmalloc(handle_arr_calc_size(capacity), GFP_ATOMIC); + if (!array) + return NULL; + + array->capacity = capacity; + array->size = 0; + + return array; +} + +void vmci_handle_arr_destroy(struct vmci_handle_arr *array) +{ + kfree(array); +} + +void vmci_handle_arr_append_entry(struct vmci_handle_arr **array_ptr, + struct vmci_handle handle) +{ + struct vmci_handle_arr *array = *array_ptr; + + if (unlikely(array->size >= array->capacity)) { + /* reallocate. */ + struct vmci_handle_arr *new_array; + size_t new_capacity = array->capacity * VMCI_ARR_CAP_MULT; + size_t new_size = handle_arr_calc_size(new_capacity); + + new_array = krealloc(array, new_size, GFP_ATOMIC); + if (!new_array) + return; + + new_array->capacity = new_capacity; + *array_ptr = array = new_array; + } + + array->entries[array->size] = handle; + array->size++; +} + +/* + * Handle that was removed, VMCI_INVALID_HANDLE if entry not found. + */ +struct vmci_handle vmci_handle_arr_remove_entry(struct vmci_handle_arr *array, + struct vmci_handle entry_handle) +{ + struct vmci_handle handle = VMCI_INVALID_HANDLE; + size_t i; + + for (i = 0; i < array->size; i++) { + if (vmci_handle_is_equal(array->entries[i], entry_handle)) { + handle = array->entries[i]; + array->size--; + array->entries[i] = array->entries[array->size]; + array->entries[array->size] = VMCI_INVALID_HANDLE; + break; + } + } + + return handle; +} + +/* + * Handle that was removed, VMCI_INVALID_HANDLE if array was empty. + */ +struct vmci_handle vmci_handle_arr_remove_tail(struct vmci_handle_arr *array) +{ + struct vmci_handle handle = VMCI_INVALID_HANDLE; + + if (array->size) { + array->size--; + handle = array->entries[array->size]; + array->entries[array->size] = VMCI_INVALID_HANDLE; + } + + return handle; +} + +/* + * Handle at given index, VMCI_INVALID_HANDLE if invalid index. + */ +struct vmci_handle +vmci_handle_arr_get_entry(const struct vmci_handle_arr *array, size_t index) +{ + if (unlikely(index >= array->size)) + return VMCI_INVALID_HANDLE; + + return array->entries[index]; +} + +bool vmci_handle_arr_has_entry(const struct vmci_handle_arr *array, + struct vmci_handle entry_handle) +{ + size_t i; + + for (i = 0; i < array->size; i++) + if (vmci_handle_is_equal(array->entries[i], entry_handle)) + return true; + + return false; +} + +/* + * NULL if the array is empty. Otherwise, a pointer to the array + * of VMCI handles in the handle array. + */ +struct vmci_handle *vmci_handle_arr_get_handles(struct vmci_handle_arr *array) +{ + if (array->size) + return array->entries; + + return NULL; +} diff --git a/drivers/misc/vmw_vmci/vmci_handle_array.h b/drivers/misc/vmw_vmci/vmci_handle_array.h new file mode 100644 index 000000000000..b5f3a7f98cf1 --- /dev/null +++ b/drivers/misc/vmw_vmci/vmci_handle_array.h @@ -0,0 +1,52 @@ +/* + * VMware VMCI Driver + * + * Copyright (C) 2012 VMware, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation version 2 and no later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#ifndef _VMCI_HANDLE_ARRAY_H_ +#define _VMCI_HANDLE_ARRAY_H_ + +#include <linux/vmw_vmci_defs.h> +#include <linux/types.h> + +#define VMCI_HANDLE_ARRAY_DEFAULT_SIZE 4 +#define VMCI_ARR_CAP_MULT 2 /* Array capacity multiplier */ + +struct vmci_handle_arr { + size_t capacity; + size_t size; + struct vmci_handle entries[]; +}; + +struct vmci_handle_arr *vmci_handle_arr_create(size_t capacity); +void vmci_handle_arr_destroy(struct vmci_handle_arr *array); +void vmci_handle_arr_append_entry(struct vmci_handle_arr **array_ptr, + struct vmci_handle handle); +struct vmci_handle vmci_handle_arr_remove_entry(struct vmci_handle_arr *array, + struct vmci_handle + entry_handle); +struct vmci_handle vmci_handle_arr_remove_tail(struct vmci_handle_arr *array); +struct vmci_handle +vmci_handle_arr_get_entry(const struct vmci_handle_arr *array, size_t index); +bool vmci_handle_arr_has_entry(const struct vmci_handle_arr *array, + struct vmci_handle entry_handle); +struct vmci_handle *vmci_handle_arr_get_handles(struct vmci_handle_arr *array); + +static inline size_t vmci_handle_arr_get_size( + const struct vmci_handle_arr *array) +{ + return array->size; +} + + +#endif /* _VMCI_HANDLE_ARRAY_H_ */ diff --git a/drivers/misc/vmw_vmci/vmci_host.c b/drivers/misc/vmw_vmci/vmci_host.c new file mode 100644 index 000000000000..d4722b3dc8ec --- /dev/null +++ b/drivers/misc/vmw_vmci/vmci_host.c @@ -0,0 +1,1043 @@ +/* + * VMware VMCI Driver + * + * Copyright (C) 2012 VMware, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation version 2 and no later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include <linux/vmw_vmci_defs.h> +#include <linux/vmw_vmci_api.h> +#include <linux/moduleparam.h> +#include <linux/miscdevice.h> +#include <linux/interrupt.h> +#include <linux/highmem.h> +#include <linux/atomic.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/file.h> +#include <linux/init.h> +#include <linux/poll.h> +#include <linux/pci.h> +#include <linux/smp.h> +#include <linux/fs.h> +#include <linux/io.h> + +#include "vmci_handle_array.h" +#include "vmci_queue_pair.h" +#include "vmci_datagram.h" +#include "vmci_doorbell.h" +#include "vmci_resource.h" +#include "vmci_context.h" +#include "vmci_driver.h" +#include "vmci_event.h" + +#define VMCI_UTIL_NUM_RESOURCES 1 + +enum { + VMCI_NOTIFY_RESOURCE_QUEUE_PAIR = 0, + VMCI_NOTIFY_RESOURCE_DOOR_BELL = 1, +}; + +enum { + VMCI_NOTIFY_RESOURCE_ACTION_NOTIFY = 0, + VMCI_NOTIFY_RESOURCE_ACTION_CREATE = 1, + VMCI_NOTIFY_RESOURCE_ACTION_DESTROY = 2, +}; + +/* + * VMCI driver initialization. This block can also be used to + * pass initial group membership etc. + */ +struct vmci_init_blk { + u32 cid; + u32 flags; +}; + +/* VMCIqueue_pairAllocInfo_VMToVM */ +struct vmci_qp_alloc_info_vmvm { + struct vmci_handle handle; + u32 peer; + u32 flags; + u64 produce_size; + u64 consume_size; + u64 produce_page_file; /* User VA. */ + u64 consume_page_file; /* User VA. */ + u64 produce_page_file_size; /* Size of the file name array. */ + u64 consume_page_file_size; /* Size of the file name array. */ + s32 result; + u32 _pad; +}; + +/* VMCISetNotifyInfo: Used to pass notify flag's address to the host driver. */ +struct vmci_set_notify_info { + u64 notify_uva; + s32 result; + u32 _pad; +}; + +/* + * Per-instance host state + */ +struct vmci_host_dev { + struct vmci_ctx *context; + int user_version; + enum vmci_obj_type ct_type; + struct mutex lock; /* Mutex lock for vmci context access */ +}; + +static struct vmci_ctx *host_context; +static bool vmci_host_device_initialized; +static atomic_t vmci_host_active_users = ATOMIC_INIT(0); + +/* + * Determines whether the VMCI host personality is + * available. Since the core functionality of the host driver is + * always present, all guests could possibly use the host + * personality. However, to minimize the deviation from the + * pre-unified driver state of affairs, we only consider the host + * device active if there is no active guest device or if there + * are VMX'en with active VMCI contexts using the host device. + */ +bool vmci_host_code_active(void) +{ + return vmci_host_device_initialized && + (!vmci_guest_code_active() || + atomic_read(&vmci_host_active_users) > 0); +} + +/* + * Called on open of /dev/vmci. + */ +static int vmci_host_open(struct inode *inode, struct file *filp) +{ + struct vmci_host_dev *vmci_host_dev; + + vmci_host_dev = kzalloc(sizeof(struct vmci_host_dev), GFP_KERNEL); + if (vmci_host_dev == NULL) + return -ENOMEM; + + vmci_host_dev->ct_type = VMCIOBJ_NOT_SET; + mutex_init(&vmci_host_dev->lock); + filp->private_data = vmci_host_dev; + + return 0; +} + +/* + * Called on close of /dev/vmci, most often when the process + * exits. + */ +static int vmci_host_close(struct inode *inode, struct file *filp) +{ + struct vmci_host_dev *vmci_host_dev = filp->private_data; + + if (vmci_host_dev->ct_type == VMCIOBJ_CONTEXT) { + vmci_ctx_destroy(vmci_host_dev->context); + vmci_host_dev->context = NULL; + + /* + * The number of active contexts is used to track whether any + * VMX'en are using the host personality. It is incremented when + * a context is created through the IOCTL_VMCI_INIT_CONTEXT + * ioctl. + */ + atomic_dec(&vmci_host_active_users); + } + vmci_host_dev->ct_type = VMCIOBJ_NOT_SET; + + kfree(vmci_host_dev); + filp->private_data = NULL; + return 0; +} + +/* + * This is used to wake up the VMX when a VMCI call arrives, or + * to wake up select() or poll() at the next clock tick. + */ +static unsigned int vmci_host_poll(struct file *filp, poll_table *wait) +{ + struct vmci_host_dev *vmci_host_dev = filp->private_data; + struct vmci_ctx *context = vmci_host_dev->context; + unsigned int mask = 0; + + if (vmci_host_dev->ct_type == VMCIOBJ_CONTEXT) { + /* Check for VMCI calls to this VM context. */ + if (wait) + poll_wait(filp, &context->host_context.wait_queue, + wait); + + spin_lock(&context->lock); + if (context->pending_datagrams > 0 || + vmci_handle_arr_get_size( + context->pending_doorbell_array) > 0) { + mask = POLLIN; + } + spin_unlock(&context->lock); + } + return mask; +} + +/* + * Copies the handles of a handle array into a user buffer, and + * returns the new length in userBufferSize. If the copy to the + * user buffer fails, the functions still returns VMCI_SUCCESS, + * but retval != 0. + */ +static int drv_cp_harray_to_user(void __user *user_buf_uva, + u64 *user_buf_size, + struct vmci_handle_arr *handle_array, + int *retval) +{ + u32 array_size = 0; + struct vmci_handle *handles; + + if (handle_array) + array_size = vmci_handle_arr_get_size(handle_array); + + if (array_size * sizeof(*handles) > *user_buf_size) + return VMCI_ERROR_MORE_DATA; + + *user_buf_size = array_size * sizeof(*handles); + if (*user_buf_size) + *retval = copy_to_user(user_buf_uva, + vmci_handle_arr_get_handles + (handle_array), *user_buf_size); + + return VMCI_SUCCESS; +} + +/* + * Sets up a given context for notify to work. Calls drv_map_bool_ptr() + * which maps the notify boolean in user VA in kernel space. + */ +static int vmci_host_setup_notify(struct vmci_ctx *context, + unsigned long uva) +{ + struct page *page; + int retval; + + if (context->notify_page) { + pr_devel("%s: Notify mechanism is already set up\n", __func__); + return VMCI_ERROR_DUPLICATE_ENTRY; + } + + /* + * We are using 'bool' internally, but let's make sure we explicit + * about the size. + */ + BUILD_BUG_ON(sizeof(bool) != sizeof(u8)); + if (!access_ok(VERIFY_WRITE, (void __user *)uva, sizeof(u8))) + return VMCI_ERROR_GENERIC; + + /* + * Lock physical page backing a given user VA. + */ + down_read(¤t->mm->mmap_sem); + retval = get_user_pages(current, current->mm, + PAGE_ALIGN(uva), + 1, 1, 0, &page, NULL); + up_read(¤t->mm->mmap_sem); + if (retval != 1) + return VMCI_ERROR_GENERIC; + + /* + * Map the locked page and set up notify pointer. + */ + context->notify = kmap(page) + (uva & (PAGE_SIZE - 1)); + vmci_ctx_check_signal_notify(context); + + return VMCI_SUCCESS; +} + +static int vmci_host_get_version(struct vmci_host_dev *vmci_host_dev, + unsigned int cmd, void __user *uptr) +{ + if (cmd == IOCTL_VMCI_VERSION2) { + int __user *vptr = uptr; + if (get_user(vmci_host_dev->user_version, vptr)) + return -EFAULT; + } + + /* + * The basic logic here is: + * + * If the user sends in a version of 0 tell it our version. + * If the user didn't send in a version, tell it our version. + * If the user sent in an old version, tell it -its- version. + * If the user sent in an newer version, tell it our version. + * + * The rationale behind telling the caller its version is that + * Workstation 6.5 required that VMX and VMCI kernel module were + * version sync'd. All new VMX users will be programmed to + * handle the VMCI kernel module version. + */ + + if (vmci_host_dev->user_version > 0 && + vmci_host_dev->user_version < VMCI_VERSION_HOSTQP) { + return vmci_host_dev->user_version; + } + + return VMCI_VERSION; +} + +#define vmci_ioctl_err(fmt, ...) \ + pr_devel("%s: " fmt, ioctl_name, ##__VA_ARGS__) + +static int vmci_host_do_init_context(struct vmci_host_dev *vmci_host_dev, + const char *ioctl_name, + void __user *uptr) +{ + struct vmci_init_blk init_block; + const struct cred *cred; + int retval; + + if (copy_from_user(&init_block, uptr, sizeof(init_block))) { + vmci_ioctl_err("error reading init block\n"); + return -EFAULT; + } + + mutex_lock(&vmci_host_dev->lock); + + if (vmci_host_dev->ct_type != VMCIOBJ_NOT_SET) { + vmci_ioctl_err("received VMCI init on initialized handle\n"); + retval = -EINVAL; + goto out; + } + + if (init_block.flags & ~VMCI_PRIVILEGE_FLAG_RESTRICTED) { + vmci_ioctl_err("unsupported VMCI restriction flag\n"); + retval = -EINVAL; + goto out; + } + + cred = get_current_cred(); + vmci_host_dev->context = vmci_ctx_create(init_block.cid, + init_block.flags, 0, + vmci_host_dev->user_version, + cred); + put_cred(cred); + if (IS_ERR(vmci_host_dev->context)) { + retval = PTR_ERR(vmci_host_dev->context); + vmci_ioctl_err("error initializing context\n"); + goto out; + } + + /* + * Copy cid to userlevel, we do this to allow the VMX + * to enforce its policy on cid generation. + */ + init_block.cid = vmci_ctx_get_id(vmci_host_dev->context); + if (copy_to_user(uptr, &init_block, sizeof(init_block))) { + vmci_ctx_destroy(vmci_host_dev->context); + vmci_host_dev->context = NULL; + vmci_ioctl_err("error writing init block\n"); + retval = -EFAULT; + goto out; + } + + vmci_host_dev->ct_type = VMCIOBJ_CONTEXT; + atomic_inc(&vmci_host_active_users); + + retval = 0; + +out: + mutex_unlock(&vmci_host_dev->lock); + return retval; +} + +static int vmci_host_do_send_datagram(struct vmci_host_dev *vmci_host_dev, + const char *ioctl_name, + void __user *uptr) +{ + struct vmci_datagram_snd_rcv_info send_info; + struct vmci_datagram *dg = NULL; + u32 cid; + + if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { + vmci_ioctl_err("only valid for contexts\n"); + return -EINVAL; + } + + if (copy_from_user(&send_info, uptr, sizeof(send_info))) + return -EFAULT; + + if (send_info.len > VMCI_MAX_DG_SIZE) { + vmci_ioctl_err("datagram is too big (size=%d)\n", + send_info.len); + return -EINVAL; + } + + if (send_info.len < sizeof(*dg)) { + vmci_ioctl_err("datagram is too small (size=%d)\n", + send_info.len); + return -EINVAL; + } + + dg = kmalloc(send_info.len, GFP_KERNEL); + if (!dg) { + vmci_ioctl_err( + "cannot allocate memory to dispatch datagram\n"); + return -ENOMEM; + } + + if (copy_from_user(dg, (void __user *)(uintptr_t)send_info.addr, + send_info.len)) { + vmci_ioctl_err("error getting datagram\n"); + kfree(dg); + return -EFAULT; + } + + pr_devel("Datagram dst (handle=0x%x:0x%x) src (handle=0x%x:0x%x), payload (size=%llu bytes)\n", + dg->dst.context, dg->dst.resource, + dg->src.context, dg->src.resource, + (unsigned long long)dg->payload_size); + + /* Get source context id. */ + cid = vmci_ctx_get_id(vmci_host_dev->context); + send_info.result = vmci_datagram_dispatch(cid, dg, true); + kfree(dg); + + return copy_to_user(uptr, &send_info, sizeof(send_info)) ? -EFAULT : 0; +} + +static int vmci_host_do_receive_datagram(struct vmci_host_dev *vmci_host_dev, + const char *ioctl_name, + void __user *uptr) +{ + struct vmci_datagram_snd_rcv_info recv_info; + struct vmci_datagram *dg = NULL; + int retval; + size_t size; + + if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { + vmci_ioctl_err("only valid for contexts\n"); + return -EINVAL; + } + + if (copy_from_user(&recv_info, uptr, sizeof(recv_info))) + return -EFAULT; + + size = recv_info.len; + recv_info.result = vmci_ctx_dequeue_datagram(vmci_host_dev->context, + &size, &dg); + + if (recv_info.result >= VMCI_SUCCESS) { + void __user *ubuf = (void __user *)(uintptr_t)recv_info.addr; + retval = copy_to_user(ubuf, dg, VMCI_DG_SIZE(dg)); + kfree(dg); + if (retval != 0) + return -EFAULT; + } + + return copy_to_user(uptr, &recv_info, sizeof(recv_info)) ? -EFAULT : 0; +} + +static int vmci_host_do_alloc_queuepair(struct vmci_host_dev *vmci_host_dev, + const char *ioctl_name, + void __user *uptr) +{ + struct vmci_handle handle; + int vmci_status; + int __user *retptr; + u32 cid; + + if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { + vmci_ioctl_err("only valid for contexts\n"); + return -EINVAL; + } + + cid = vmci_ctx_get_id(vmci_host_dev->context); + + if (vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) { + struct vmci_qp_alloc_info_vmvm alloc_info; + struct vmci_qp_alloc_info_vmvm __user *info = uptr; + + if (copy_from_user(&alloc_info, uptr, sizeof(alloc_info))) + return -EFAULT; + + handle = alloc_info.handle; + retptr = &info->result; + + vmci_status = vmci_qp_broker_alloc(alloc_info.handle, + alloc_info.peer, + alloc_info.flags, + VMCI_NO_PRIVILEGE_FLAGS, + alloc_info.produce_size, + alloc_info.consume_size, + NULL, + vmci_host_dev->context); + + if (vmci_status == VMCI_SUCCESS) + vmci_status = VMCI_SUCCESS_QUEUEPAIR_CREATE; + } else { + struct vmci_qp_alloc_info alloc_info; + struct vmci_qp_alloc_info __user *info = uptr; + struct vmci_qp_page_store page_store; + + if (copy_from_user(&alloc_info, uptr, sizeof(alloc_info))) + return -EFAULT; + + handle = alloc_info.handle; + retptr = &info->result; + + page_store.pages = alloc_info.ppn_va; + page_store.len = alloc_info.num_ppns; + + vmci_status = vmci_qp_broker_alloc(alloc_info.handle, + alloc_info.peer, + alloc_info.flags, + VMCI_NO_PRIVILEGE_FLAGS, + alloc_info.produce_size, + alloc_info.consume_size, + &page_store, + vmci_host_dev->context); + } + + if (put_user(vmci_status, retptr)) { + if (vmci_status >= VMCI_SUCCESS) { + vmci_status = vmci_qp_broker_detach(handle, + vmci_host_dev->context); + } + return -EFAULT; + } + + return 0; +} + +static int vmci_host_do_queuepair_setva(struct vmci_host_dev *vmci_host_dev, + const char *ioctl_name, + void __user *uptr) +{ + struct vmci_qp_set_va_info set_va_info; + struct vmci_qp_set_va_info __user *info = uptr; + s32 result; + + if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { + vmci_ioctl_err("only valid for contexts\n"); + return -EINVAL; + } + + if (vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) { + vmci_ioctl_err("is not allowed\n"); + return -EINVAL; + } + + if (copy_from_user(&set_va_info, uptr, sizeof(set_va_info))) + return -EFAULT; + + if (set_va_info.va) { + /* + * VMX is passing down a new VA for the queue + * pair mapping. + */ + result = vmci_qp_broker_map(set_va_info.handle, + vmci_host_dev->context, + set_va_info.va); + } else { + /* + * The queue pair is about to be unmapped by + * the VMX. + */ + result = vmci_qp_broker_unmap(set_va_info.handle, + vmci_host_dev->context, 0); + } + + return put_user(result, &info->result) ? -EFAULT : 0; +} + +static int vmci_host_do_queuepair_setpf(struct vmci_host_dev *vmci_host_dev, + const char *ioctl_name, + void __user *uptr) +{ + struct vmci_qp_page_file_info page_file_info; + struct vmci_qp_page_file_info __user *info = uptr; + s32 result; + + if (vmci_host_dev->user_version < VMCI_VERSION_HOSTQP || + vmci_host_dev->user_version >= VMCI_VERSION_NOVMVM) { + vmci_ioctl_err("not supported on this VMX (version=%d)\n", + vmci_host_dev->user_version); + return -EINVAL; + } + + if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { + vmci_ioctl_err("only valid for contexts\n"); + return -EINVAL; + } + + if (copy_from_user(&page_file_info, uptr, sizeof(*info))) + return -EFAULT; + + /* + * Communicate success pre-emptively to the caller. Note that the + * basic premise is that it is incumbent upon the caller not to look at + * the info.result field until after the ioctl() returns. And then, + * only if the ioctl() result indicates no error. We send up the + * SUCCESS status before calling SetPageStore() store because failing + * to copy up the result code means unwinding the SetPageStore(). + * + * It turns out the logic to unwind a SetPageStore() opens a can of + * worms. For example, if a host had created the queue_pair and a + * guest attaches and SetPageStore() is successful but writing success + * fails, then ... the host has to be stopped from writing (anymore) + * data into the queue_pair. That means an additional test in the + * VMCI_Enqueue() code path. Ugh. + */ + + if (put_user(VMCI_SUCCESS, &info->result)) { + /* + * In this case, we can't write a result field of the + * caller's info block. So, we don't even try to + * SetPageStore(). + */ + return -EFAULT; + } + + result = vmci_qp_broker_set_page_store(page_file_info.handle, + page_file_info.produce_va, + page_file_info.consume_va, + vmci_host_dev->context); + if (result < VMCI_SUCCESS) { + if (put_user(result, &info->result)) { + /* + * Note that in this case the SetPageStore() + * call failed but we were unable to + * communicate that to the caller (because the + * copy_to_user() call failed). So, if we + * simply return an error (in this case + * -EFAULT) then the caller will know that the + * SetPageStore failed even though we couldn't + * put the result code in the result field and + * indicate exactly why it failed. + * + * That says nothing about the issue where we + * were once able to write to the caller's info + * memory and now can't. Something more + * serious is probably going on than the fact + * that SetPageStore() didn't work. + */ + return -EFAULT; + } + } + + return 0; +} + +static int vmci_host_do_qp_detach(struct vmci_host_dev *vmci_host_dev, + const char *ioctl_name, + void __user *uptr) +{ + struct vmci_qp_dtch_info detach_info; + struct vmci_qp_dtch_info __user *info = uptr; + s32 result; + + if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { + vmci_ioctl_err("only valid for contexts\n"); + return -EINVAL; + } + + if (copy_from_user(&detach_info, uptr, sizeof(detach_info))) + return -EFAULT; + + result = vmci_qp_broker_detach(detach_info.handle, + vmci_host_dev->context); + if (result == VMCI_SUCCESS && + vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) { + result = VMCI_SUCCESS_LAST_DETACH; + } + + return put_user(result, &info->result) ? -EFAULT : 0; +} + +static int vmci_host_do_ctx_add_notify(struct vmci_host_dev *vmci_host_dev, + const char *ioctl_name, + void __user *uptr) +{ + struct vmci_ctx_info ar_info; + struct vmci_ctx_info __user *info = uptr; + s32 result; + u32 cid; + + if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { + vmci_ioctl_err("only valid for contexts\n"); + return -EINVAL; + } + + if (copy_from_user(&ar_info, uptr, sizeof(ar_info))) + return -EFAULT; + + cid = vmci_ctx_get_id(vmci_host_dev->context); + result = vmci_ctx_add_notification(cid, ar_info.remote_cid); + + return put_user(result, &info->result) ? -EFAULT : 0; +} + +static int vmci_host_do_ctx_remove_notify(struct vmci_host_dev *vmci_host_dev, + const char *ioctl_name, + void __user *uptr) +{ + struct vmci_ctx_info ar_info; + struct vmci_ctx_info __user *info = uptr; + u32 cid; + int result; + + if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { + vmci_ioctl_err("only valid for contexts\n"); + return -EINVAL; + } + + if (copy_from_user(&ar_info, uptr, sizeof(ar_info))) + return -EFAULT; + + cid = vmci_ctx_get_id(vmci_host_dev->context); + result = vmci_ctx_remove_notification(cid, + ar_info.remote_cid); + + return put_user(result, &info->result) ? -EFAULT : 0; +} + +static int vmci_host_do_ctx_get_cpt_state(struct vmci_host_dev *vmci_host_dev, + const char *ioctl_name, + void __user *uptr) +{ + struct vmci_ctx_chkpt_buf_info get_info; + u32 cid; + void *cpt_buf; + int retval; + + if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { + vmci_ioctl_err("only valid for contexts\n"); + return -EINVAL; + } + + if (copy_from_user(&get_info, uptr, sizeof(get_info))) + return -EFAULT; + + cid = vmci_ctx_get_id(vmci_host_dev->context); + get_info.result = vmci_ctx_get_chkpt_state(cid, get_info.cpt_type, + &get_info.buf_size, &cpt_buf); + if (get_info.result == VMCI_SUCCESS && get_info.buf_size) { + void __user *ubuf = (void __user *)(uintptr_t)get_info.cpt_buf; + retval = copy_to_user(ubuf, cpt_buf, get_info.buf_size); + kfree(cpt_buf); + + if (retval) + return -EFAULT; + } + + return copy_to_user(uptr, &get_info, sizeof(get_info)) ? -EFAULT : 0; +} + +static int vmci_host_do_ctx_set_cpt_state(struct vmci_host_dev *vmci_host_dev, + const char *ioctl_name, + void __user *uptr) +{ + struct vmci_ctx_chkpt_buf_info set_info; + u32 cid; + void *cpt_buf; + int retval; + + if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { + vmci_ioctl_err("only valid for contexts\n"); + return -EINVAL; + } + + if (copy_from_user(&set_info, uptr, sizeof(set_info))) + return -EFAULT; + + cpt_buf = kmalloc(set_info.buf_size, GFP_KERNEL); + if (!cpt_buf) { + vmci_ioctl_err( + "cannot allocate memory to set cpt state (type=%d)\n", + set_info.cpt_type); + return -ENOMEM; + } + + if (copy_from_user(cpt_buf, (void __user *)(uintptr_t)set_info.cpt_buf, + set_info.buf_size)) { + retval = -EFAULT; + goto out; + } + + cid = vmci_ctx_get_id(vmci_host_dev->context); + set_info.result = vmci_ctx_set_chkpt_state(cid, set_info.cpt_type, + set_info.buf_size, cpt_buf); + + retval = copy_to_user(uptr, &set_info, sizeof(set_info)) ? -EFAULT : 0; + +out: + kfree(cpt_buf); + return retval; +} + +static int vmci_host_do_get_context_id(struct vmci_host_dev *vmci_host_dev, + const char *ioctl_name, + void __user *uptr) +{ + u32 __user *u32ptr = uptr; + + return put_user(VMCI_HOST_CONTEXT_ID, u32ptr) ? -EFAULT : 0; +} + +static int vmci_host_do_set_notify(struct vmci_host_dev *vmci_host_dev, + const char *ioctl_name, + void __user *uptr) +{ + struct vmci_set_notify_info notify_info; + + if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { + vmci_ioctl_err("only valid for contexts\n"); + return -EINVAL; + } + + if (copy_from_user(¬ify_info, uptr, sizeof(notify_info))) + return -EFAULT; + + if (notify_info.notify_uva) { + notify_info.result = + vmci_host_setup_notify(vmci_host_dev->context, + notify_info.notify_uva); + } else { + vmci_ctx_unset_notify(vmci_host_dev->context); + notify_info.result = VMCI_SUCCESS; + } + + return copy_to_user(uptr, ¬ify_info, sizeof(notify_info)) ? + -EFAULT : 0; +} + +static int vmci_host_do_notify_resource(struct vmci_host_dev *vmci_host_dev, + const char *ioctl_name, + void __user *uptr) +{ + struct vmci_dbell_notify_resource_info info; + u32 cid; + + if (vmci_host_dev->user_version < VMCI_VERSION_NOTIFY) { + vmci_ioctl_err("invalid for current VMX versions\n"); + return -EINVAL; + } + + if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { + vmci_ioctl_err("only valid for contexts\n"); + return -EINVAL; + } + + if (copy_from_user(&info, uptr, sizeof(info))) + return -EFAULT; + + cid = vmci_ctx_get_id(vmci_host_dev->context); + + switch (info.action) { + case VMCI_NOTIFY_RESOURCE_ACTION_NOTIFY: + if (info.resource == VMCI_NOTIFY_RESOURCE_DOOR_BELL) { + u32 flags = VMCI_NO_PRIVILEGE_FLAGS; + info.result = vmci_ctx_notify_dbell(cid, info.handle, + flags); + } else { + info.result = VMCI_ERROR_UNAVAILABLE; + } + break; + + case VMCI_NOTIFY_RESOURCE_ACTION_CREATE: + info.result = vmci_ctx_dbell_create(cid, info.handle); + break; + + case VMCI_NOTIFY_RESOURCE_ACTION_DESTROY: + info.result = vmci_ctx_dbell_destroy(cid, info.handle); + break; + + default: + vmci_ioctl_err("got unknown action (action=%d)\n", + info.action); + info.result = VMCI_ERROR_INVALID_ARGS; + } + + return copy_to_user(uptr, &info, sizeof(info)) ? -EFAULT : 0; +} + +static int vmci_host_do_recv_notifications(struct vmci_host_dev *vmci_host_dev, + const char *ioctl_name, + void __user *uptr) +{ + struct vmci_ctx_notify_recv_info info; + struct vmci_handle_arr *db_handle_array; + struct vmci_handle_arr *qp_handle_array; + void __user *ubuf; + u32 cid; + int retval = 0; + + if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { + vmci_ioctl_err("only valid for contexts\n"); + return -EINVAL; + } + + if (vmci_host_dev->user_version < VMCI_VERSION_NOTIFY) { + vmci_ioctl_err("not supported for the current vmx version\n"); + return -EINVAL; + } + + if (copy_from_user(&info, uptr, sizeof(info))) + return -EFAULT; + + if ((info.db_handle_buf_size && !info.db_handle_buf_uva) || + (info.qp_handle_buf_size && !info.qp_handle_buf_uva)) { + return -EINVAL; + } + + cid = vmci_ctx_get_id(vmci_host_dev->context); + + info.result = vmci_ctx_rcv_notifications_get(cid, + &db_handle_array, &qp_handle_array); + if (info.result != VMCI_SUCCESS) + return copy_to_user(uptr, &info, sizeof(info)) ? -EFAULT : 0; + + ubuf = (void __user *)(uintptr_t)info.db_handle_buf_uva; + info.result = drv_cp_harray_to_user(ubuf, &info.db_handle_buf_size, + db_handle_array, &retval); + if (info.result == VMCI_SUCCESS && !retval) { + ubuf = (void __user *)(uintptr_t)info.qp_handle_buf_uva; + info.result = drv_cp_harray_to_user(ubuf, + &info.qp_handle_buf_size, + qp_handle_array, &retval); + } + + if (!retval && copy_to_user(uptr, &info, sizeof(info))) + retval = -EFAULT; + + vmci_ctx_rcv_notifications_release(cid, + db_handle_array, qp_handle_array, + info.result == VMCI_SUCCESS && !retval); + + return retval; +} + +static long vmci_host_unlocked_ioctl(struct file *filp, + unsigned int iocmd, unsigned long ioarg) +{ +#define VMCI_DO_IOCTL(ioctl_name, ioctl_fn) do { \ + char *name = __stringify(IOCTL_VMCI_ ## ioctl_name); \ + return vmci_host_do_ ## ioctl_fn( \ + vmci_host_dev, name, uptr); \ + } while (0) + + struct vmci_host_dev *vmci_host_dev = filp->private_data; + void __user *uptr = (void __user *)ioarg; + + switch (iocmd) { + case IOCTL_VMCI_INIT_CONTEXT: + VMCI_DO_IOCTL(INIT_CONTEXT, init_context); + case IOCTL_VMCI_DATAGRAM_SEND: + VMCI_DO_IOCTL(DATAGRAM_SEND, send_datagram); + case IOCTL_VMCI_DATAGRAM_RECEIVE: + VMCI_DO_IOCTL(DATAGRAM_RECEIVE, receive_datagram); + case IOCTL_VMCI_QUEUEPAIR_ALLOC: + VMCI_DO_IOCTL(QUEUEPAIR_ALLOC, alloc_queuepair); + case IOCTL_VMCI_QUEUEPAIR_SETVA: + VMCI_DO_IOCTL(QUEUEPAIR_SETVA, queuepair_setva); + case IOCTL_VMCI_QUEUEPAIR_SETPAGEFILE: + VMCI_DO_IOCTL(QUEUEPAIR_SETPAGEFILE, queuepair_setpf); + case IOCTL_VMCI_QUEUEPAIR_DETACH: + VMCI_DO_IOCTL(QUEUEPAIR_DETACH, qp_detach); + case IOCTL_VMCI_CTX_ADD_NOTIFICATION: + VMCI_DO_IOCTL(CTX_ADD_NOTIFICATION, ctx_add_notify); + case IOCTL_VMCI_CTX_REMOVE_NOTIFICATION: + VMCI_DO_IOCTL(CTX_REMOVE_NOTIFICATION, ctx_remove_notify); + case IOCTL_VMCI_CTX_GET_CPT_STATE: + VMCI_DO_IOCTL(CTX_GET_CPT_STATE, ctx_get_cpt_state); + case IOCTL_VMCI_CTX_SET_CPT_STATE: + VMCI_DO_IOCTL(CTX_SET_CPT_STATE, ctx_set_cpt_state); + case IOCTL_VMCI_GET_CONTEXT_ID: + VMCI_DO_IOCTL(GET_CONTEXT_ID, get_context_id); + case IOCTL_VMCI_SET_NOTIFY: + VMCI_DO_IOCTL(SET_NOTIFY, set_notify); + case IOCTL_VMCI_NOTIFY_RESOURCE: + VMCI_DO_IOCTL(NOTIFY_RESOURCE, notify_resource); + case IOCTL_VMCI_NOTIFICATIONS_RECEIVE: + VMCI_DO_IOCTL(NOTIFICATIONS_RECEIVE, recv_notifications); + + case IOCTL_VMCI_VERSION: + case IOCTL_VMCI_VERSION2: + return vmci_host_get_version(vmci_host_dev, iocmd, uptr); + + default: + pr_devel("%s: Unknown ioctl (iocmd=%d)\n", __func__, iocmd); + return -EINVAL; + } + +#undef VMCI_DO_IOCTL +} + +static const struct file_operations vmuser_fops = { + .owner = THIS_MODULE, + .open = vmci_host_open, + .release = vmci_host_close, + .poll = vmci_host_poll, + .unlocked_ioctl = vmci_host_unlocked_ioctl, + .compat_ioctl = vmci_host_unlocked_ioctl, +}; + +static struct miscdevice vmci_host_miscdev = { + .name = "vmci", + .minor = MISC_DYNAMIC_MINOR, + .fops = &vmuser_fops, +}; + +int __init vmci_host_init(void) +{ + int error; + + host_context = vmci_ctx_create(VMCI_HOST_CONTEXT_ID, + VMCI_DEFAULT_PROC_PRIVILEGE_FLAGS, + -1, VMCI_VERSION, NULL); + if (IS_ERR(host_context)) { + error = PTR_ERR(host_context); + pr_warn("Failed to initialize VMCIContext (error%d)\n", + error); + return error; + } + + error = misc_register(&vmci_host_miscdev); + if (error) { + pr_warn("Module registration error (name=%s, major=%d, minor=%d, err=%d)\n", + vmci_host_miscdev.name, + MISC_MAJOR, vmci_host_miscdev.minor, + error); + pr_warn("Unable to initialize host personality\n"); + vmci_ctx_destroy(host_context); + return error; + } + + pr_info("VMCI host device registered (name=%s, major=%d, minor=%d)\n", + vmci_host_miscdev.name, MISC_MAJOR, vmci_host_miscdev.minor); + + vmci_host_device_initialized = true; + return 0; +} + +void __exit vmci_host_exit(void) +{ + int error; + + vmci_host_device_initialized = false; + + error = misc_deregister(&vmci_host_miscdev); + if (error) + pr_warn("Error unregistering character device: %d\n", error); + + vmci_ctx_destroy(host_context); + vmci_qp_broker_exit(); + + pr_debug("VMCI host driver module unloaded\n"); +} diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c new file mode 100644 index 000000000000..d94245dbd765 --- /dev/null +++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c @@ -0,0 +1,3425 @@ +/* + * VMware VMCI Driver + * + * Copyright (C) 2012 VMware, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation version 2 and no later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include <linux/vmw_vmci_defs.h> +#include <linux/vmw_vmci_api.h> +#include <linux/highmem.h> +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/pagemap.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/socket.h> +#include <linux/wait.h> +#include <linux/vmalloc.h> + +#include "vmci_handle_array.h" +#include "vmci_queue_pair.h" +#include "vmci_datagram.h" +#include "vmci_resource.h" +#include "vmci_context.h" +#include "vmci_driver.h" +#include "vmci_event.h" +#include "vmci_route.h" + +/* + * In the following, we will distinguish between two kinds of VMX processes - + * the ones with versions lower than VMCI_VERSION_NOVMVM that use specialized + * VMCI page files in the VMX and supporting VM to VM communication and the + * newer ones that use the guest memory directly. We will in the following + * refer to the older VMX versions as old-style VMX'en, and the newer ones as + * new-style VMX'en. + * + * The state transition datagram is as follows (the VMCIQPB_ prefix has been + * removed for readability) - see below for more details on the transtions: + * + * -------------- NEW ------------- + * | | + * \_/ \_/ + * CREATED_NO_MEM <-----------------> CREATED_MEM + * | | | + * | o-----------------------o | + * | | | + * \_/ \_/ \_/ + * ATTACHED_NO_MEM <----------------> ATTACHED_MEM + * | | | + * | o----------------------o | + * | | | + * \_/ \_/ \_/ + * SHUTDOWN_NO_MEM <----------------> SHUTDOWN_MEM + * | | + * | | + * -------------> gone <------------- + * + * In more detail. When a VMCI queue pair is first created, it will be in the + * VMCIQPB_NEW state. It will then move into one of the following states: + * + * - VMCIQPB_CREATED_NO_MEM: this state indicates that either: + * + * - the created was performed by a host endpoint, in which case there is + * no backing memory yet. + * + * - the create was initiated by an old-style VMX, that uses + * vmci_qp_broker_set_page_store to specify the UVAs of the queue pair at + * a later point in time. This state can be distinguished from the one + * above by the context ID of the creator. A host side is not allowed to + * attach until the page store has been set. + * + * - VMCIQPB_CREATED_MEM: this state is the result when the queue pair + * is created by a VMX using the queue pair device backend that + * sets the UVAs of the queue pair immediately and stores the + * information for later attachers. At this point, it is ready for + * the host side to attach to it. + * + * Once the queue pair is in one of the created states (with the exception of + * the case mentioned for older VMX'en above), it is possible to attach to the + * queue pair. Again we have two new states possible: + * + * - VMCIQPB_ATTACHED_MEM: this state can be reached through the following + * paths: + * + * - from VMCIQPB_CREATED_NO_MEM when a new-style VMX allocates a queue + * pair, and attaches to a queue pair previously created by the host side. + * + * - from VMCIQPB_CREATED_MEM when the host side attaches to a queue pair + * already created by a guest. + * + * - from VMCIQPB_ATTACHED_NO_MEM, when an old-style VMX calls + * vmci_qp_broker_set_page_store (see below). + * + * - VMCIQPB_ATTACHED_NO_MEM: If the queue pair already was in the + * VMCIQPB_CREATED_NO_MEM due to a host side create, an old-style VMX will + * bring the queue pair into this state. Once vmci_qp_broker_set_page_store + * is called to register the user memory, the VMCIQPB_ATTACH_MEM state + * will be entered. + * + * From the attached queue pair, the queue pair can enter the shutdown states + * when either side of the queue pair detaches. If the guest side detaches + * first, the queue pair will enter the VMCIQPB_SHUTDOWN_NO_MEM state, where + * the content of the queue pair will no longer be available. If the host + * side detaches first, the queue pair will either enter the + * VMCIQPB_SHUTDOWN_MEM, if the guest memory is currently mapped, or + * VMCIQPB_SHUTDOWN_NO_MEM, if the guest memory is not mapped + * (e.g., the host detaches while a guest is stunned). + * + * New-style VMX'en will also unmap guest memory, if the guest is + * quiesced, e.g., during a snapshot operation. In that case, the guest + * memory will no longer be available, and the queue pair will transition from + * *_MEM state to a *_NO_MEM state. The VMX may later map the memory once more, + * in which case the queue pair will transition from the *_NO_MEM state at that + * point back to the *_MEM state. Note that the *_NO_MEM state may have changed, + * since the peer may have either attached or detached in the meantime. The + * values are laid out such that ++ on a state will move from a *_NO_MEM to a + * *_MEM state, and vice versa. + */ + +/* + * VMCIMemcpy{To,From}QueueFunc() prototypes. Functions of these + * types are passed around to enqueue and dequeue routines. Note that + * often the functions passed are simply wrappers around memcpy + * itself. + * + * Note: In order for the memcpy typedefs to be compatible with the VMKernel, + * there's an unused last parameter for the hosted side. In + * ESX, that parameter holds a buffer type. + */ +typedef int vmci_memcpy_to_queue_func(struct vmci_queue *queue, + u64 queue_offset, const void *src, + size_t src_offset, size_t size); +typedef int vmci_memcpy_from_queue_func(void *dest, size_t dest_offset, + const struct vmci_queue *queue, + u64 queue_offset, size_t size); + +/* The Kernel specific component of the struct vmci_queue structure. */ +struct vmci_queue_kern_if { + struct page **page; + struct page **header_page; + void *va; + struct mutex __mutex; /* Protects the queue. */ + struct mutex *mutex; /* Shared by producer and consumer queues. */ + bool host; + size_t num_pages; + bool mapped; +}; + +/* + * This structure is opaque to the clients. + */ +struct vmci_qp { + struct vmci_handle handle; + struct vmci_queue *produce_q; + struct vmci_queue *consume_q; + u64 produce_q_size; + u64 consume_q_size; + u32 peer; + u32 flags; + u32 priv_flags; + bool guest_endpoint; + unsigned int blocked; + unsigned int generation; + wait_queue_head_t event; +}; + +enum qp_broker_state { + VMCIQPB_NEW, + VMCIQPB_CREATED_NO_MEM, + VMCIQPB_CREATED_MEM, + VMCIQPB_ATTACHED_NO_MEM, + VMCIQPB_ATTACHED_MEM, + VMCIQPB_SHUTDOWN_NO_MEM, + VMCIQPB_SHUTDOWN_MEM, + VMCIQPB_GONE +}; + +#define QPBROKERSTATE_HAS_MEM(_qpb) (_qpb->state == VMCIQPB_CREATED_MEM || \ + _qpb->state == VMCIQPB_ATTACHED_MEM || \ + _qpb->state == VMCIQPB_SHUTDOWN_MEM) + +/* + * In the queue pair broker, we always use the guest point of view for + * the produce and consume queue values and references, e.g., the + * produce queue size stored is the guests produce queue size. The + * host endpoint will need to swap these around. The only exception is + * the local queue pairs on the host, in which case the host endpoint + * that creates the queue pair will have the right orientation, and + * the attaching host endpoint will need to swap. + */ +struct qp_entry { + struct list_head list_item; + struct vmci_handle handle; + u32 peer; + u32 flags; + u64 produce_size; + u64 consume_size; + u32 ref_count; +}; + +struct qp_broker_entry { + struct vmci_resource resource; + struct qp_entry qp; + u32 create_id; + u32 attach_id; + enum qp_broker_state state; + bool require_trusted_attach; + bool created_by_trusted; + bool vmci_page_files; /* Created by VMX using VMCI page files */ + struct vmci_queue *produce_q; + struct vmci_queue *consume_q; + struct vmci_queue_header saved_produce_q; + struct vmci_queue_header saved_consume_q; + vmci_event_release_cb wakeup_cb; + void *client_data; + void *local_mem; /* Kernel memory for local queue pair */ +}; + +struct qp_guest_endpoint { + struct vmci_resource resource; + struct qp_entry qp; + u64 num_ppns; + void *produce_q; + void *consume_q; + struct ppn_set ppn_set; +}; + +struct qp_list { + struct list_head head; + struct mutex mutex; /* Protect queue list. */ +}; + +static struct qp_list qp_broker_list = { + .head = LIST_HEAD_INIT(qp_broker_list.head), + .mutex = __MUTEX_INITIALIZER(qp_broker_list.mutex), +}; + +static struct qp_list qp_guest_endpoints = { + .head = LIST_HEAD_INIT(qp_guest_endpoints.head), + .mutex = __MUTEX_INITIALIZER(qp_guest_endpoints.mutex), +}; + +#define INVALID_VMCI_GUEST_MEM_ID 0 +#define QPE_NUM_PAGES(_QPE) ((u32) \ + (DIV_ROUND_UP(_QPE.produce_size, PAGE_SIZE) + \ + DIV_ROUND_UP(_QPE.consume_size, PAGE_SIZE) + 2)) + + +/* + * Frees kernel VA space for a given queue and its queue header, and + * frees physical data pages. + */ +static void qp_free_queue(void *q, u64 size) +{ + struct vmci_queue *queue = q; + + if (queue) { + u64 i = DIV_ROUND_UP(size, PAGE_SIZE); + + if (queue->kernel_if->mapped) { + vunmap(queue->kernel_if->va); + queue->kernel_if->va = NULL; + } + + while (i) + __free_page(queue->kernel_if->page[--i]); + + vfree(queue->q_header); + } +} + +/* + * Allocates kernel VA space of specified size, plus space for the + * queue structure/kernel interface and the queue header. Allocates + * physical pages for the queue data pages. + * + * PAGE m: struct vmci_queue_header (struct vmci_queue->q_header) + * PAGE m+1: struct vmci_queue + * PAGE m+1+q: struct vmci_queue_kern_if (struct vmci_queue->kernel_if) + * PAGE n-size: Data pages (struct vmci_queue->kernel_if->page[]) + */ +static void *qp_alloc_queue(u64 size, u32 flags) +{ + u64 i; + struct vmci_queue *queue; + struct vmci_queue_header *q_header; + const u64 num_data_pages = DIV_ROUND_UP(size, PAGE_SIZE); + const uint queue_size = + PAGE_SIZE + + sizeof(*queue) + sizeof(*(queue->kernel_if)) + + num_data_pages * sizeof(*(queue->kernel_if->page)); + + q_header = vmalloc(queue_size); + if (!q_header) + return NULL; + + queue = (void *)q_header + PAGE_SIZE; + queue->q_header = q_header; + queue->saved_header = NULL; + queue->kernel_if = (struct vmci_queue_kern_if *)(queue + 1); + queue->kernel_if->header_page = NULL; /* Unused in guest. */ + queue->kernel_if->page = (struct page **)(queue->kernel_if + 1); + queue->kernel_if->host = false; + queue->kernel_if->va = NULL; + queue->kernel_if->mapped = false; + + for (i = 0; i < num_data_pages; i++) { + queue->kernel_if->page[i] = alloc_pages(GFP_KERNEL, 0); + if (!queue->kernel_if->page[i]) + goto fail; + } + + if (vmci_qp_pinned(flags)) { + queue->kernel_if->va = + vmap(queue->kernel_if->page, num_data_pages, VM_MAP, + PAGE_KERNEL); + if (!queue->kernel_if->va) + goto fail; + + queue->kernel_if->mapped = true; + } + + return (void *)queue; + + fail: + qp_free_queue(queue, i * PAGE_SIZE); + return NULL; +} + +/* + * Copies from a given buffer or iovector to a VMCI Queue. Uses + * kmap()/kunmap() to dynamically map/unmap required portions of the queue + * by traversing the offset -> page translation structure for the queue. + * Assumes that offset + size does not wrap around in the queue. + */ +static int __qp_memcpy_to_queue(struct vmci_queue *queue, + u64 queue_offset, + const void *src, + size_t size, + bool is_iovec) +{ + struct vmci_queue_kern_if *kernel_if = queue->kernel_if; + size_t bytes_copied = 0; + + while (bytes_copied < size) { + u64 page_index = (queue_offset + bytes_copied) / PAGE_SIZE; + size_t page_offset = + (queue_offset + bytes_copied) & (PAGE_SIZE - 1); + void *va; + size_t to_copy; + + if (!kernel_if->mapped) + va = kmap(kernel_if->page[page_index]); + else + va = (void *)((u8 *)kernel_if->va + + (page_index * PAGE_SIZE)); + + if (size - bytes_copied > PAGE_SIZE - page_offset) + /* Enough payload to fill up from this page. */ + to_copy = PAGE_SIZE - page_offset; + else + to_copy = size - bytes_copied; + + if (is_iovec) { + struct iovec *iov = (struct iovec *)src; + int err; + + /* The iovec will track bytes_copied internally. */ + err = memcpy_fromiovec((u8 *)va + page_offset, + iov, to_copy); + if (err != 0) { + kunmap(kernel_if->page[page_index]); + return VMCI_ERROR_INVALID_ARGS; + } + } else { + memcpy((u8 *)va + page_offset, + (u8 *)src + bytes_copied, to_copy); + } + + bytes_copied += to_copy; + if (!kernel_if->mapped) + kunmap(kernel_if->page[page_index]); + } + + return VMCI_SUCCESS; +} + +/* + * Copies to a given buffer or iovector from a VMCI Queue. Uses + * kmap()/kunmap() to dynamically map/unmap required portions of the queue + * by traversing the offset -> page translation structure for the queue. + * Assumes that offset + size does not wrap around in the queue. + */ +static int __qp_memcpy_from_queue(void *dest, + const struct vmci_queue *queue, + u64 queue_offset, + size_t size, + bool is_iovec) +{ + struct vmci_queue_kern_if *kernel_if = queue->kernel_if; + size_t bytes_copied = 0; + + while (bytes_copied < size) { + u64 page_index = (queue_offset + bytes_copied) / PAGE_SIZE; + size_t page_offset = + (queue_offset + bytes_copied) & (PAGE_SIZE - 1); + void *va; + size_t to_copy; + + if (!kernel_if->mapped) + va = kmap(kernel_if->page[page_index]); + else + va = (void *)((u8 *)kernel_if->va + + (page_index * PAGE_SIZE)); + + if (size - bytes_copied > PAGE_SIZE - page_offset) + /* Enough payload to fill up this page. */ + to_copy = PAGE_SIZE - page_offset; + else + to_copy = size - bytes_copied; + + if (is_iovec) { + struct iovec *iov = (struct iovec *)dest; + int err; + + /* The iovec will track bytes_copied internally. */ + err = memcpy_toiovec(iov, (u8 *)va + page_offset, + to_copy); + if (err != 0) { + kunmap(kernel_if->page[page_index]); + return VMCI_ERROR_INVALID_ARGS; + } + } else { + memcpy((u8 *)dest + bytes_copied, + (u8 *)va + page_offset, to_copy); + } + + bytes_copied += to_copy; + if (!kernel_if->mapped) + kunmap(kernel_if->page[page_index]); + } + + return VMCI_SUCCESS; +} + +/* + * Allocates two list of PPNs --- one for the pages in the produce queue, + * and the other for the pages in the consume queue. Intializes the list + * of PPNs with the page frame numbers of the KVA for the two queues (and + * the queue headers). + */ +static int qp_alloc_ppn_set(void *prod_q, + u64 num_produce_pages, + void *cons_q, + u64 num_consume_pages, struct ppn_set *ppn_set) +{ + u32 *produce_ppns; + u32 *consume_ppns; + struct vmci_queue *produce_q = prod_q; + struct vmci_queue *consume_q = cons_q; + u64 i; + + if (!produce_q || !num_produce_pages || !consume_q || + !num_consume_pages || !ppn_set) + return VMCI_ERROR_INVALID_ARGS; + + if (ppn_set->initialized) + return VMCI_ERROR_ALREADY_EXISTS; + + produce_ppns = + kmalloc(num_produce_pages * sizeof(*produce_ppns), GFP_KERNEL); + if (!produce_ppns) + return VMCI_ERROR_NO_MEM; + + consume_ppns = + kmalloc(num_consume_pages * sizeof(*consume_ppns), GFP_KERNEL); + if (!consume_ppns) { + kfree(produce_ppns); + return VMCI_ERROR_NO_MEM; + } + + produce_ppns[0] = page_to_pfn(vmalloc_to_page(produce_q->q_header)); + for (i = 1; i < num_produce_pages; i++) { + unsigned long pfn; + + produce_ppns[i] = + page_to_pfn(produce_q->kernel_if->page[i - 1]); + pfn = produce_ppns[i]; + + /* Fail allocation if PFN isn't supported by hypervisor. */ + if (sizeof(pfn) > sizeof(*produce_ppns) + && pfn != produce_ppns[i]) + goto ppn_error; + } + + consume_ppns[0] = page_to_pfn(vmalloc_to_page(consume_q->q_header)); + for (i = 1; i < num_consume_pages; i++) { + unsigned long pfn; + + consume_ppns[i] = + page_to_pfn(consume_q->kernel_if->page[i - 1]); + pfn = consume_ppns[i]; + + /* Fail allocation if PFN isn't supported by hypervisor. */ + if (sizeof(pfn) > sizeof(*consume_ppns) + && pfn != consume_ppns[i]) + goto ppn_error; + } + + ppn_set->num_produce_pages = num_produce_pages; + ppn_set->num_consume_pages = num_consume_pages; + ppn_set->produce_ppns = produce_ppns; + ppn_set->consume_ppns = consume_ppns; + ppn_set->initialized = true; + return VMCI_SUCCESS; + + ppn_error: + kfree(produce_ppns); + kfree(consume_ppns); + return VMCI_ERROR_INVALID_ARGS; +} + +/* + * Frees the two list of PPNs for a queue pair. + */ +static void qp_free_ppn_set(struct ppn_set *ppn_set) +{ + if (ppn_set->initialized) { + /* Do not call these functions on NULL inputs. */ + kfree(ppn_set->produce_ppns); + kfree(ppn_set->consume_ppns); + } + memset(ppn_set, 0, sizeof(*ppn_set)); +} + +/* + * Populates the list of PPNs in the hypercall structure with the PPNS + * of the produce queue and the consume queue. + */ +static int qp_populate_ppn_set(u8 *call_buf, const struct ppn_set *ppn_set) +{ + memcpy(call_buf, ppn_set->produce_ppns, + ppn_set->num_produce_pages * sizeof(*ppn_set->produce_ppns)); + memcpy(call_buf + + ppn_set->num_produce_pages * sizeof(*ppn_set->produce_ppns), + ppn_set->consume_ppns, + ppn_set->num_consume_pages * sizeof(*ppn_set->consume_ppns)); + + return VMCI_SUCCESS; +} + +static int qp_memcpy_to_queue(struct vmci_queue *queue, + u64 queue_offset, + const void *src, size_t src_offset, size_t size) +{ + return __qp_memcpy_to_queue(queue, queue_offset, + (u8 *)src + src_offset, size, false); +} + +static int qp_memcpy_from_queue(void *dest, + size_t dest_offset, + const struct vmci_queue *queue, + u64 queue_offset, size_t size) +{ + return __qp_memcpy_from_queue((u8 *)dest + dest_offset, + queue, queue_offset, size, false); +} + +/* + * Copies from a given iovec from a VMCI Queue. + */ +static int qp_memcpy_to_queue_iov(struct vmci_queue *queue, + u64 queue_offset, + const void *src, + size_t src_offset, size_t size) +{ + + /* + * We ignore src_offset because src is really a struct iovec * and will + * maintain offset internally. + */ + return __qp_memcpy_to_queue(queue, queue_offset, src, size, true); +} + +/* + * Copies to a given iovec from a VMCI Queue. + */ +static int qp_memcpy_from_queue_iov(void *dest, + size_t dest_offset, + const struct vmci_queue *queue, + u64 queue_offset, size_t size) +{ + /* + * We ignore dest_offset because dest is really a struct iovec * and + * will maintain offset internally. + */ + return __qp_memcpy_from_queue(dest, queue, queue_offset, size, true); +} + +/* + * Allocates kernel VA space of specified size plus space for the queue + * and kernel interface. This is different from the guest queue allocator, + * because we do not allocate our own queue header/data pages here but + * share those of the guest. + */ +static struct vmci_queue *qp_host_alloc_queue(u64 size) +{ + struct vmci_queue *queue; + const size_t num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1; + const size_t queue_size = sizeof(*queue) + sizeof(*(queue->kernel_if)); + const size_t queue_page_size = + num_pages * sizeof(*queue->kernel_if->page); + + queue = kzalloc(queue_size + queue_page_size, GFP_KERNEL); + if (queue) { + queue->q_header = NULL; + queue->saved_header = NULL; + queue->kernel_if = + (struct vmci_queue_kern_if *)((u8 *)queue + + sizeof(*queue)); + queue->kernel_if->host = true; + queue->kernel_if->mutex = NULL; + queue->kernel_if->num_pages = num_pages; + queue->kernel_if->header_page = + (struct page **)((u8 *)queue + queue_size); + queue->kernel_if->page = &queue->kernel_if->header_page[1]; + queue->kernel_if->va = NULL; + queue->kernel_if->mapped = false; + } + + return queue; +} + +/* + * Frees kernel memory for a given queue (header plus translation + * structure). + */ +static void qp_host_free_queue(struct vmci_queue *queue, u64 queue_size) +{ + kfree(queue); +} + +/* + * Initialize the mutex for the pair of queues. This mutex is used to + * protect the q_header and the buffer from changing out from under any + * users of either queue. Of course, it's only any good if the mutexes + * are actually acquired. Queue structure must lie on non-paged memory + * or we cannot guarantee access to the mutex. + */ +static void qp_init_queue_mutex(struct vmci_queue *produce_q, + struct vmci_queue *consume_q) +{ + /* + * Only the host queue has shared state - the guest queues do not + * need to synchronize access using a queue mutex. + */ + + if (produce_q->kernel_if->host) { + produce_q->kernel_if->mutex = &produce_q->kernel_if->__mutex; + consume_q->kernel_if->mutex = &produce_q->kernel_if->__mutex; + mutex_init(produce_q->kernel_if->mutex); + } +} + +/* + * Cleans up the mutex for the pair of queues. + */ +static void qp_cleanup_queue_mutex(struct vmci_queue *produce_q, + struct vmci_queue *consume_q) +{ + if (produce_q->kernel_if->host) { + produce_q->kernel_if->mutex = NULL; + consume_q->kernel_if->mutex = NULL; + } +} + +/* + * Acquire the mutex for the queue. Note that the produce_q and + * the consume_q share a mutex. So, only one of the two need to + * be passed in to this routine. Either will work just fine. + */ +static void qp_acquire_queue_mutex(struct vmci_queue *queue) +{ + if (queue->kernel_if->host) + mutex_lock(queue->kernel_if->mutex); +} + +/* + * Release the mutex for the queue. Note that the produce_q and + * the consume_q share a mutex. So, only one of the two need to + * be passed in to this routine. Either will work just fine. + */ +static void qp_release_queue_mutex(struct vmci_queue *queue) +{ + if (queue->kernel_if->host) + mutex_unlock(queue->kernel_if->mutex); +} + +/* + * Helper function to release pages in the PageStoreAttachInfo + * previously obtained using get_user_pages. + */ +static void qp_release_pages(struct page **pages, + u64 num_pages, bool dirty) +{ + int i; + + for (i = 0; i < num_pages; i++) { + if (dirty) + set_page_dirty(pages[i]); + + page_cache_release(pages[i]); + pages[i] = NULL; + } +} + +/* + * Lock the user pages referenced by the {produce,consume}Buffer + * struct into memory and populate the {produce,consume}Pages + * arrays in the attach structure with them. + */ +static int qp_host_get_user_memory(u64 produce_uva, + u64 consume_uva, + struct vmci_queue *produce_q, + struct vmci_queue *consume_q) +{ + int retval; + int err = VMCI_SUCCESS; + + down_write(¤t->mm->mmap_sem); + retval = get_user_pages(current, + current->mm, + (uintptr_t) produce_uva, + produce_q->kernel_if->num_pages, + 1, 0, produce_q->kernel_if->header_page, NULL); + if (retval < produce_q->kernel_if->num_pages) { + pr_warn("get_user_pages(produce) failed (retval=%d)", retval); + qp_release_pages(produce_q->kernel_if->header_page, retval, + false); + err = VMCI_ERROR_NO_MEM; + goto out; + } + + retval = get_user_pages(current, + current->mm, + (uintptr_t) consume_uva, + consume_q->kernel_if->num_pages, + 1, 0, consume_q->kernel_if->header_page, NULL); + if (retval < consume_q->kernel_if->num_pages) { + pr_warn("get_user_pages(consume) failed (retval=%d)", retval); + qp_release_pages(consume_q->kernel_if->header_page, retval, + false); + qp_release_pages(produce_q->kernel_if->header_page, + produce_q->kernel_if->num_pages, false); + err = VMCI_ERROR_NO_MEM; + } + + out: + up_write(¤t->mm->mmap_sem); + + return err; +} + +/* + * Registers the specification of the user pages used for backing a queue + * pair. Enough information to map in pages is stored in the OS specific + * part of the struct vmci_queue structure. + */ +static int qp_host_register_user_memory(struct vmci_qp_page_store *page_store, + struct vmci_queue *produce_q, + struct vmci_queue *consume_q) +{ + u64 produce_uva; + u64 consume_uva; + + /* + * The new style and the old style mapping only differs in + * that we either get a single or two UVAs, so we split the + * single UVA range at the appropriate spot. + */ + produce_uva = page_store->pages; + consume_uva = page_store->pages + + produce_q->kernel_if->num_pages * PAGE_SIZE; + return qp_host_get_user_memory(produce_uva, consume_uva, produce_q, + consume_q); +} + +/* + * Releases and removes the references to user pages stored in the attach + * struct. Pages are released from the page cache and may become + * swappable again. + */ +static void qp_host_unregister_user_memory(struct vmci_queue *produce_q, + struct vmci_queue *consume_q) +{ + qp_release_pages(produce_q->kernel_if->header_page, + produce_q->kernel_if->num_pages, true); + memset(produce_q->kernel_if->header_page, 0, + sizeof(*produce_q->kernel_if->header_page) * + produce_q->kernel_if->num_pages); + qp_release_pages(consume_q->kernel_if->header_page, + consume_q->kernel_if->num_pages, true); + memset(consume_q->kernel_if->header_page, 0, + sizeof(*consume_q->kernel_if->header_page) * + consume_q->kernel_if->num_pages); +} + +/* + * Once qp_host_register_user_memory has been performed on a + * queue, the queue pair headers can be mapped into the + * kernel. Once mapped, they must be unmapped with + * qp_host_unmap_queues prior to calling + * qp_host_unregister_user_memory. + * Pages are pinned. + */ +static int qp_host_map_queues(struct vmci_queue *produce_q, + struct vmci_queue *consume_q) +{ + int result; + + if (!produce_q->q_header || !consume_q->q_header) { + struct page *headers[2]; + + if (produce_q->q_header != consume_q->q_header) + return VMCI_ERROR_QUEUEPAIR_MISMATCH; + + if (produce_q->kernel_if->header_page == NULL || + *produce_q->kernel_if->header_page == NULL) + return VMCI_ERROR_UNAVAILABLE; + + headers[0] = *produce_q->kernel_if->header_page; + headers[1] = *consume_q->kernel_if->header_page; + + produce_q->q_header = vmap(headers, 2, VM_MAP, PAGE_KERNEL); + if (produce_q->q_header != NULL) { + consume_q->q_header = + (struct vmci_queue_header *)((u8 *) + produce_q->q_header + + PAGE_SIZE); + result = VMCI_SUCCESS; + } else { + pr_warn("vmap failed\n"); + result = VMCI_ERROR_NO_MEM; + } + } else { + result = VMCI_SUCCESS; + } + + return result; +} + +/* + * Unmaps previously mapped queue pair headers from the kernel. + * Pages are unpinned. + */ +static int qp_host_unmap_queues(u32 gid, + struct vmci_queue *produce_q, + struct vmci_queue *consume_q) +{ + if (produce_q->q_header) { + if (produce_q->q_header < consume_q->q_header) + vunmap(produce_q->q_header); + else + vunmap(consume_q->q_header); + + produce_q->q_header = NULL; + consume_q->q_header = NULL; + } + + return VMCI_SUCCESS; +} + +/* + * Finds the entry in the list corresponding to a given handle. Assumes + * that the list is locked. + */ +static struct qp_entry *qp_list_find(struct qp_list *qp_list, + struct vmci_handle handle) +{ + struct qp_entry *entry; + + if (vmci_handle_is_invalid(handle)) + return NULL; + + list_for_each_entry(entry, &qp_list->head, list_item) { + if (vmci_handle_is_equal(entry->handle, handle)) + return entry; + } + + return NULL; +} + +/* + * Finds the entry in the list corresponding to a given handle. + */ +static struct qp_guest_endpoint * +qp_guest_handle_to_entry(struct vmci_handle handle) +{ + struct qp_guest_endpoint *entry; + struct qp_entry *qp = qp_list_find(&qp_guest_endpoints, handle); + + entry = qp ? container_of( + qp, struct qp_guest_endpoint, qp) : NULL; + return entry; +} + +/* + * Finds the entry in the list corresponding to a given handle. + */ +static struct qp_broker_entry * +qp_broker_handle_to_entry(struct vmci_handle handle) +{ + struct qp_broker_entry *entry; + struct qp_entry *qp = qp_list_find(&qp_broker_list, handle); + + entry = qp ? container_of( + qp, struct qp_broker_entry, qp) : NULL; + return entry; +} + +/* + * Dispatches a queue pair event message directly into the local event + * queue. + */ +static int qp_notify_peer_local(bool attach, struct vmci_handle handle) +{ + u32 context_id = vmci_get_context_id(); + struct vmci_event_qp ev; + + ev.msg.hdr.dst = vmci_make_handle(context_id, VMCI_EVENT_HANDLER); + ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, + VMCI_CONTEXT_RESOURCE_ID); + ev.msg.hdr.payload_size = sizeof(ev) - sizeof(ev.msg.hdr); + ev.msg.event_data.event = + attach ? VMCI_EVENT_QP_PEER_ATTACH : VMCI_EVENT_QP_PEER_DETACH; + ev.payload.peer_id = context_id; + ev.payload.handle = handle; + + return vmci_event_dispatch(&ev.msg.hdr); +} + +/* + * Allocates and initializes a qp_guest_endpoint structure. + * Allocates a queue_pair rid (and handle) iff the given entry has + * an invalid handle. 0 through VMCI_RESERVED_RESOURCE_ID_MAX + * are reserved handles. Assumes that the QP list mutex is held + * by the caller. + */ +static struct qp_guest_endpoint * +qp_guest_endpoint_create(struct vmci_handle handle, + u32 peer, + u32 flags, + u64 produce_size, + u64 consume_size, + void *produce_q, + void *consume_q) +{ + int result; + struct qp_guest_endpoint *entry; + /* One page each for the queue headers. */ + const u64 num_ppns = DIV_ROUND_UP(produce_size, PAGE_SIZE) + + DIV_ROUND_UP(consume_size, PAGE_SIZE) + 2; + + if (vmci_handle_is_invalid(handle)) { + u32 context_id = vmci_get_context_id(); + + handle = vmci_make_handle(context_id, VMCI_INVALID_ID); + } + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (entry) { + entry->qp.peer = peer; + entry->qp.flags = flags; + entry->qp.produce_size = produce_size; + entry->qp.consume_size = consume_size; + entry->qp.ref_count = 0; + entry->num_ppns = num_ppns; + entry->produce_q = produce_q; + entry->consume_q = consume_q; + INIT_LIST_HEAD(&entry->qp.list_item); + + /* Add resource obj */ + result = vmci_resource_add(&entry->resource, + VMCI_RESOURCE_TYPE_QPAIR_GUEST, + handle); + entry->qp.handle = vmci_resource_handle(&entry->resource); + if ((result != VMCI_SUCCESS) || + qp_list_find(&qp_guest_endpoints, entry->qp.handle)) { + pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d", + handle.context, handle.resource, result); + kfree(entry); + entry = NULL; + } + } + return entry; +} + +/* + * Frees a qp_guest_endpoint structure. + */ +static void qp_guest_endpoint_destroy(struct qp_guest_endpoint *entry) +{ + qp_free_ppn_set(&entry->ppn_set); + qp_cleanup_queue_mutex(entry->produce_q, entry->consume_q); + qp_free_queue(entry->produce_q, entry->qp.produce_size); + qp_free_queue(entry->consume_q, entry->qp.consume_size); + /* Unlink from resource hash table and free callback */ + vmci_resource_remove(&entry->resource); + + kfree(entry); +} + +/* + * Helper to make a queue_pairAlloc hypercall when the driver is + * supporting a guest device. + */ +static int qp_alloc_hypercall(const struct qp_guest_endpoint *entry) +{ + struct vmci_qp_alloc_msg *alloc_msg; + size_t msg_size; + int result; + + if (!entry || entry->num_ppns <= 2) + return VMCI_ERROR_INVALID_ARGS; + + msg_size = sizeof(*alloc_msg) + + (size_t) entry->num_ppns * sizeof(u32); + alloc_msg = kmalloc(msg_size, GFP_KERNEL); + if (!alloc_msg) + return VMCI_ERROR_NO_MEM; + + alloc_msg->hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, + VMCI_QUEUEPAIR_ALLOC); + alloc_msg->hdr.src = VMCI_ANON_SRC_HANDLE; + alloc_msg->hdr.payload_size = msg_size - VMCI_DG_HEADERSIZE; + alloc_msg->handle = entry->qp.handle; + alloc_msg->peer = entry->qp.peer; + alloc_msg->flags = entry->qp.flags; + alloc_msg->produce_size = entry->qp.produce_size; + alloc_msg->consume_size = entry->qp.consume_size; + alloc_msg->num_ppns = entry->num_ppns; + + result = qp_populate_ppn_set((u8 *)alloc_msg + sizeof(*alloc_msg), + &entry->ppn_set); + if (result == VMCI_SUCCESS) + result = vmci_send_datagram(&alloc_msg->hdr); + + kfree(alloc_msg); + + return result; +} + +/* + * Helper to make a queue_pairDetach hypercall when the driver is + * supporting a guest device. + */ +static int qp_detatch_hypercall(struct vmci_handle handle) +{ + struct vmci_qp_detach_msg detach_msg; + + detach_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, + VMCI_QUEUEPAIR_DETACH); + detach_msg.hdr.src = VMCI_ANON_SRC_HANDLE; + detach_msg.hdr.payload_size = sizeof(handle); + detach_msg.handle = handle; + + return vmci_send_datagram(&detach_msg.hdr); +} + +/* + * Adds the given entry to the list. Assumes that the list is locked. + */ +static void qp_list_add_entry(struct qp_list *qp_list, struct qp_entry *entry) +{ + if (entry) + list_add(&entry->list_item, &qp_list->head); +} + +/* + * Removes the given entry from the list. Assumes that the list is locked. + */ +static void qp_list_remove_entry(struct qp_list *qp_list, + struct qp_entry *entry) +{ + if (entry) + list_del(&entry->list_item); +} + +/* + * Helper for VMCI queue_pair detach interface. Frees the physical + * pages for the queue pair. + */ +static int qp_detatch_guest_work(struct vmci_handle handle) +{ + int result; + struct qp_guest_endpoint *entry; + u32 ref_count = ~0; /* To avoid compiler warning below */ + + mutex_lock(&qp_guest_endpoints.mutex); + + entry = qp_guest_handle_to_entry(handle); + if (!entry) { + mutex_unlock(&qp_guest_endpoints.mutex); + return VMCI_ERROR_NOT_FOUND; + } + + if (entry->qp.flags & VMCI_QPFLAG_LOCAL) { + result = VMCI_SUCCESS; + + if (entry->qp.ref_count > 1) { + result = qp_notify_peer_local(false, handle); + /* + * We can fail to notify a local queuepair + * because we can't allocate. We still want + * to release the entry if that happens, so + * don't bail out yet. + */ + } + } else { + result = qp_detatch_hypercall(handle); + if (result < VMCI_SUCCESS) { + /* + * We failed to notify a non-local queuepair. + * That other queuepair might still be + * accessing the shared memory, so don't + * release the entry yet. It will get cleaned + * up by VMCIqueue_pair_Exit() if necessary + * (assuming we are going away, otherwise why + * did this fail?). + */ + + mutex_unlock(&qp_guest_endpoints.mutex); + return result; + } + } + + /* + * If we get here then we either failed to notify a local queuepair, or + * we succeeded in all cases. Release the entry if required. + */ + + entry->qp.ref_count--; + if (entry->qp.ref_count == 0) + qp_list_remove_entry(&qp_guest_endpoints, &entry->qp); + + /* If we didn't remove the entry, this could change once we unlock. */ + if (entry) + ref_count = entry->qp.ref_count; + + mutex_unlock(&qp_guest_endpoints.mutex); + + if (ref_count == 0) + qp_guest_endpoint_destroy(entry); + + return result; +} + +/* + * This functions handles the actual allocation of a VMCI queue + * pair guest endpoint. Allocates physical pages for the queue + * pair. It makes OS dependent calls through generic wrappers. + */ +static int qp_alloc_guest_work(struct vmci_handle *handle, + struct vmci_queue **produce_q, + u64 produce_size, + struct vmci_queue **consume_q, + u64 consume_size, + u32 peer, + u32 flags, + u32 priv_flags) +{ + const u64 num_produce_pages = + DIV_ROUND_UP(produce_size, PAGE_SIZE) + 1; + const u64 num_consume_pages = + DIV_ROUND_UP(consume_size, PAGE_SIZE) + 1; + void *my_produce_q = NULL; + void *my_consume_q = NULL; + int result; + struct qp_guest_endpoint *queue_pair_entry = NULL; + + if (priv_flags != VMCI_NO_PRIVILEGE_FLAGS) + return VMCI_ERROR_NO_ACCESS; + + mutex_lock(&qp_guest_endpoints.mutex); + + queue_pair_entry = qp_guest_handle_to_entry(*handle); + if (queue_pair_entry) { + if (queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) { + /* Local attach case. */ + if (queue_pair_entry->qp.ref_count > 1) { + pr_devel("Error attempting to attach more than once\n"); + result = VMCI_ERROR_UNAVAILABLE; + goto error_keep_entry; + } + + if (queue_pair_entry->qp.produce_size != consume_size || + queue_pair_entry->qp.consume_size != + produce_size || + queue_pair_entry->qp.flags != + (flags & ~VMCI_QPFLAG_ATTACH_ONLY)) { + pr_devel("Error mismatched queue pair in local attach\n"); + result = VMCI_ERROR_QUEUEPAIR_MISMATCH; + goto error_keep_entry; + } + + /* + * Do a local attach. We swap the consume and + * produce queues for the attacher and deliver + * an attach event. + */ + result = qp_notify_peer_local(true, *handle); + if (result < VMCI_SUCCESS) + goto error_keep_entry; + + my_produce_q = queue_pair_entry->consume_q; + my_consume_q = queue_pair_entry->produce_q; + goto out; + } + + result = VMCI_ERROR_ALREADY_EXISTS; + goto error_keep_entry; + } + + my_produce_q = qp_alloc_queue(produce_size, flags); + if (!my_produce_q) { + pr_warn("Error allocating pages for produce queue\n"); + result = VMCI_ERROR_NO_MEM; + goto error; + } + + my_consume_q = qp_alloc_queue(consume_size, flags); + if (!my_consume_q) { + pr_warn("Error allocating pages for consume queue\n"); + result = VMCI_ERROR_NO_MEM; + goto error; + } + + queue_pair_entry = qp_guest_endpoint_create(*handle, peer, flags, + produce_size, consume_size, + my_produce_q, my_consume_q); + if (!queue_pair_entry) { + pr_warn("Error allocating memory in %s\n", __func__); + result = VMCI_ERROR_NO_MEM; + goto error; + } + + result = qp_alloc_ppn_set(my_produce_q, num_produce_pages, my_consume_q, + num_consume_pages, + &queue_pair_entry->ppn_set); + if (result < VMCI_SUCCESS) { + pr_warn("qp_alloc_ppn_set failed\n"); + goto error; + } + + /* + * It's only necessary to notify the host if this queue pair will be + * attached to from another context. + */ + if (queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) { + /* Local create case. */ + u32 context_id = vmci_get_context_id(); + + /* + * Enforce similar checks on local queue pairs as we + * do for regular ones. The handle's context must + * match the creator or attacher context id (here they + * are both the current context id) and the + * attach-only flag cannot exist during create. We + * also ensure specified peer is this context or an + * invalid one. + */ + if (queue_pair_entry->qp.handle.context != context_id || + (queue_pair_entry->qp.peer != VMCI_INVALID_ID && + queue_pair_entry->qp.peer != context_id)) { + result = VMCI_ERROR_NO_ACCESS; + goto error; + } + + if (queue_pair_entry->qp.flags & VMCI_QPFLAG_ATTACH_ONLY) { + result = VMCI_ERROR_NOT_FOUND; + goto error; + } + } else { + result = qp_alloc_hypercall(queue_pair_entry); + if (result < VMCI_SUCCESS) { + pr_warn("qp_alloc_hypercall result = %d\n", result); + goto error; + } + } + + qp_init_queue_mutex((struct vmci_queue *)my_produce_q, + (struct vmci_queue *)my_consume_q); + + qp_list_add_entry(&qp_guest_endpoints, &queue_pair_entry->qp); + + out: + queue_pair_entry->qp.ref_count++; + *handle = queue_pair_entry->qp.handle; + *produce_q = (struct vmci_queue *)my_produce_q; + *consume_q = (struct vmci_queue *)my_consume_q; + + /* + * We should initialize the queue pair header pages on a local + * queue pair create. For non-local queue pairs, the + * hypervisor initializes the header pages in the create step. + */ + if ((queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) && + queue_pair_entry->qp.ref_count == 1) { + vmci_q_header_init((*produce_q)->q_header, *handle); + vmci_q_header_init((*consume_q)->q_header, *handle); + } + + mutex_unlock(&qp_guest_endpoints.mutex); + + return VMCI_SUCCESS; + + error: + mutex_unlock(&qp_guest_endpoints.mutex); + if (queue_pair_entry) { + /* The queues will be freed inside the destroy routine. */ + qp_guest_endpoint_destroy(queue_pair_entry); + } else { + qp_free_queue(my_produce_q, produce_size); + qp_free_queue(my_consume_q, consume_size); + } + return result; + + error_keep_entry: + /* This path should only be used when an existing entry was found. */ + mutex_unlock(&qp_guest_endpoints.mutex); + return result; +} + +/* + * The first endpoint issuing a queue pair allocation will create the state + * of the queue pair in the queue pair broker. + * + * If the creator is a guest, it will associate a VMX virtual address range + * with the queue pair as specified by the page_store. For compatibility with + * older VMX'en, that would use a separate step to set the VMX virtual + * address range, the virtual address range can be registered later using + * vmci_qp_broker_set_page_store. In that case, a page_store of NULL should be + * used. + * + * If the creator is the host, a page_store of NULL should be used as well, + * since the host is not able to supply a page store for the queue pair. + * + * For older VMX and host callers, the queue pair will be created in the + * VMCIQPB_CREATED_NO_MEM state, and for current VMX callers, it will be + * created in VMCOQPB_CREATED_MEM state. + */ +static int qp_broker_create(struct vmci_handle handle, + u32 peer, + u32 flags, + u32 priv_flags, + u64 produce_size, + u64 consume_size, + struct vmci_qp_page_store *page_store, + struct vmci_ctx *context, + vmci_event_release_cb wakeup_cb, + void *client_data, struct qp_broker_entry **ent) +{ + struct qp_broker_entry *entry = NULL; + const u32 context_id = vmci_ctx_get_id(context); + bool is_local = flags & VMCI_QPFLAG_LOCAL; + int result; + u64 guest_produce_size; + u64 guest_consume_size; + + /* Do not create if the caller asked not to. */ + if (flags & VMCI_QPFLAG_ATTACH_ONLY) + return VMCI_ERROR_NOT_FOUND; + + /* + * Creator's context ID should match handle's context ID or the creator + * must allow the context in handle's context ID as the "peer". + */ + if (handle.context != context_id && handle.context != peer) + return VMCI_ERROR_NO_ACCESS; + + if (VMCI_CONTEXT_IS_VM(context_id) && VMCI_CONTEXT_IS_VM(peer)) + return VMCI_ERROR_DST_UNREACHABLE; + + /* + * Creator's context ID for local queue pairs should match the + * peer, if a peer is specified. + */ + if (is_local && peer != VMCI_INVALID_ID && context_id != peer) + return VMCI_ERROR_NO_ACCESS; + + entry = kzalloc(sizeof(*entry), GFP_ATOMIC); + if (!entry) + return VMCI_ERROR_NO_MEM; + + if (vmci_ctx_get_id(context) == VMCI_HOST_CONTEXT_ID && !is_local) { + /* + * The queue pair broker entry stores values from the guest + * point of view, so a creating host side endpoint should swap + * produce and consume values -- unless it is a local queue + * pair, in which case no swapping is necessary, since the local + * attacher will swap queues. + */ + + guest_produce_size = consume_size; + guest_consume_size = produce_size; + } else { + guest_produce_size = produce_size; + guest_consume_size = consume_size; + } + + entry->qp.handle = handle; + entry->qp.peer = peer; + entry->qp.flags = flags; + entry->qp.produce_size = guest_produce_size; + entry->qp.consume_size = guest_consume_size; + entry->qp.ref_count = 1; + entry->create_id = context_id; + entry->attach_id = VMCI_INVALID_ID; + entry->state = VMCIQPB_NEW; + entry->require_trusted_attach = + !!(context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED); + entry->created_by_trusted = + !!(priv_flags & VMCI_PRIVILEGE_FLAG_TRUSTED); + entry->vmci_page_files = false; + entry->wakeup_cb = wakeup_cb; + entry->client_data = client_data; + entry->produce_q = qp_host_alloc_queue(guest_produce_size); + if (entry->produce_q == NULL) { + result = VMCI_ERROR_NO_MEM; + goto error; + } + entry->consume_q = qp_host_alloc_queue(guest_consume_size); + if (entry->consume_q == NULL) { + result = VMCI_ERROR_NO_MEM; + goto error; + } + + qp_init_queue_mutex(entry->produce_q, entry->consume_q); + + INIT_LIST_HEAD(&entry->qp.list_item); + + if (is_local) { + u8 *tmp; + + entry->local_mem = kcalloc(QPE_NUM_PAGES(entry->qp), + PAGE_SIZE, GFP_KERNEL); + if (entry->local_mem == NULL) { + result = VMCI_ERROR_NO_MEM; + goto error; + } + entry->state = VMCIQPB_CREATED_MEM; + entry->produce_q->q_header = entry->local_mem; + tmp = (u8 *)entry->local_mem + PAGE_SIZE * + (DIV_ROUND_UP(entry->qp.produce_size, PAGE_SIZE) + 1); + entry->consume_q->q_header = (struct vmci_queue_header *)tmp; + } else if (page_store) { + /* + * The VMX already initialized the queue pair headers, so no + * need for the kernel side to do that. + */ + result = qp_host_register_user_memory(page_store, + entry->produce_q, + entry->consume_q); + if (result < VMCI_SUCCESS) + goto error; + + entry->state = VMCIQPB_CREATED_MEM; + } else { + /* + * A create without a page_store may be either a host + * side create (in which case we are waiting for the + * guest side to supply the memory) or an old style + * queue pair create (in which case we will expect a + * set page store call as the next step). + */ + entry->state = VMCIQPB_CREATED_NO_MEM; + } + + qp_list_add_entry(&qp_broker_list, &entry->qp); + if (ent != NULL) + *ent = entry; + + /* Add to resource obj */ + result = vmci_resource_add(&entry->resource, + VMCI_RESOURCE_TYPE_QPAIR_HOST, + handle); + if (result != VMCI_SUCCESS) { + pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d", + handle.context, handle.resource, result); + goto error; + } + + entry->qp.handle = vmci_resource_handle(&entry->resource); + if (is_local) { + vmci_q_header_init(entry->produce_q->q_header, + entry->qp.handle); + vmci_q_header_init(entry->consume_q->q_header, + entry->qp.handle); + } + + vmci_ctx_qp_create(context, entry->qp.handle); + + return VMCI_SUCCESS; + + error: + if (entry != NULL) { + qp_host_free_queue(entry->produce_q, guest_produce_size); + qp_host_free_queue(entry->consume_q, guest_consume_size); + kfree(entry); + } + + return result; +} + +/* + * Enqueues an event datagram to notify the peer VM attached to + * the given queue pair handle about attach/detach event by the + * given VM. Returns Payload size of datagram enqueued on + * success, error code otherwise. + */ +static int qp_notify_peer(bool attach, + struct vmci_handle handle, + u32 my_id, + u32 peer_id) +{ + int rv; + struct vmci_event_qp ev; + + if (vmci_handle_is_invalid(handle) || my_id == VMCI_INVALID_ID || + peer_id == VMCI_INVALID_ID) + return VMCI_ERROR_INVALID_ARGS; + + /* + * In vmci_ctx_enqueue_datagram() we enforce the upper limit on + * number of pending events from the hypervisor to a given VM + * otherwise a rogue VM could do an arbitrary number of attach + * and detach operations causing memory pressure in the host + * kernel. + */ + + ev.msg.hdr.dst = vmci_make_handle(peer_id, VMCI_EVENT_HANDLER); + ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, + VMCI_CONTEXT_RESOURCE_ID); + ev.msg.hdr.payload_size = sizeof(ev) - sizeof(ev.msg.hdr); + ev.msg.event_data.event = attach ? + VMCI_EVENT_QP_PEER_ATTACH : VMCI_EVENT_QP_PEER_DETACH; + ev.payload.handle = handle; + ev.payload.peer_id = my_id; + + rv = vmci_datagram_dispatch(VMCI_HYPERVISOR_CONTEXT_ID, + &ev.msg.hdr, false); + if (rv < VMCI_SUCCESS) + pr_warn("Failed to enqueue queue_pair %s event datagram for context (ID=0x%x)\n", + attach ? "ATTACH" : "DETACH", peer_id); + + return rv; +} + +/* + * The second endpoint issuing a queue pair allocation will attach to + * the queue pair registered with the queue pair broker. + * + * If the attacher is a guest, it will associate a VMX virtual address + * range with the queue pair as specified by the page_store. At this + * point, the already attach host endpoint may start using the queue + * pair, and an attach event is sent to it. For compatibility with + * older VMX'en, that used a separate step to set the VMX virtual + * address range, the virtual address range can be registered later + * using vmci_qp_broker_set_page_store. In that case, a page_store of + * NULL should be used, and the attach event will be generated once + * the actual page store has been set. + * + * If the attacher is the host, a page_store of NULL should be used as + * well, since the page store information is already set by the guest. + * + * For new VMX and host callers, the queue pair will be moved to the + * VMCIQPB_ATTACHED_MEM state, and for older VMX callers, it will be + * moved to the VMCOQPB_ATTACHED_NO_MEM state. + */ +static int qp_broker_attach(struct qp_broker_entry *entry, + u32 peer, + u32 flags, + u32 priv_flags, + u64 produce_size, + u64 consume_size, + struct vmci_qp_page_store *page_store, + struct vmci_ctx *context, + vmci_event_release_cb wakeup_cb, + void *client_data, + struct qp_broker_entry **ent) +{ + const u32 context_id = vmci_ctx_get_id(context); + bool is_local = flags & VMCI_QPFLAG_LOCAL; + int result; + + if (entry->state != VMCIQPB_CREATED_NO_MEM && + entry->state != VMCIQPB_CREATED_MEM) + return VMCI_ERROR_UNAVAILABLE; + + if (is_local) { + if (!(entry->qp.flags & VMCI_QPFLAG_LOCAL) || + context_id != entry->create_id) { + return VMCI_ERROR_INVALID_ARGS; + } + } else if (context_id == entry->create_id || + context_id == entry->attach_id) { + return VMCI_ERROR_ALREADY_EXISTS; + } + + if (VMCI_CONTEXT_IS_VM(context_id) && + VMCI_CONTEXT_IS_VM(entry->create_id)) + return VMCI_ERROR_DST_UNREACHABLE; + + /* + * If we are attaching from a restricted context then the queuepair + * must have been created by a trusted endpoint. + */ + if ((context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED) && + !entry->created_by_trusted) + return VMCI_ERROR_NO_ACCESS; + + /* + * If we are attaching to a queuepair that was created by a restricted + * context then we must be trusted. + */ + if (entry->require_trusted_attach && + (!(priv_flags & VMCI_PRIVILEGE_FLAG_TRUSTED))) + return VMCI_ERROR_NO_ACCESS; + + /* + * If the creator specifies VMCI_INVALID_ID in "peer" field, access + * control check is not performed. + */ + if (entry->qp.peer != VMCI_INVALID_ID && entry->qp.peer != context_id) + return VMCI_ERROR_NO_ACCESS; + + if (entry->create_id == VMCI_HOST_CONTEXT_ID) { + /* + * Do not attach if the caller doesn't support Host Queue Pairs + * and a host created this queue pair. + */ + + if (!vmci_ctx_supports_host_qp(context)) + return VMCI_ERROR_INVALID_RESOURCE; + + } else if (context_id == VMCI_HOST_CONTEXT_ID) { + struct vmci_ctx *create_context; + bool supports_host_qp; + + /* + * Do not attach a host to a user created queue pair if that + * user doesn't support host queue pair end points. + */ + + create_context = vmci_ctx_get(entry->create_id); + supports_host_qp = vmci_ctx_supports_host_qp(create_context); + vmci_ctx_put(create_context); + + if (!supports_host_qp) + return VMCI_ERROR_INVALID_RESOURCE; + } + + if ((entry->qp.flags & ~VMCI_QP_ASYMM) != (flags & ~VMCI_QP_ASYMM_PEER)) + return VMCI_ERROR_QUEUEPAIR_MISMATCH; + + if (context_id != VMCI_HOST_CONTEXT_ID) { + /* + * The queue pair broker entry stores values from the guest + * point of view, so an attaching guest should match the values + * stored in the entry. + */ + + if (entry->qp.produce_size != produce_size || + entry->qp.consume_size != consume_size) { + return VMCI_ERROR_QUEUEPAIR_MISMATCH; + } + } else if (entry->qp.produce_size != consume_size || + entry->qp.consume_size != produce_size) { + return VMCI_ERROR_QUEUEPAIR_MISMATCH; + } + + if (context_id != VMCI_HOST_CONTEXT_ID) { + /* + * If a guest attached to a queue pair, it will supply + * the backing memory. If this is a pre NOVMVM vmx, + * the backing memory will be supplied by calling + * vmci_qp_broker_set_page_store() following the + * return of the vmci_qp_broker_alloc() call. If it is + * a vmx of version NOVMVM or later, the page store + * must be supplied as part of the + * vmci_qp_broker_alloc call. Under all circumstances + * must the initially created queue pair not have any + * memory associated with it already. + */ + + if (entry->state != VMCIQPB_CREATED_NO_MEM) + return VMCI_ERROR_INVALID_ARGS; + + if (page_store != NULL) { + /* + * Patch up host state to point to guest + * supplied memory. The VMX already + * initialized the queue pair headers, so no + * need for the kernel side to do that. + */ + + result = qp_host_register_user_memory(page_store, + entry->produce_q, + entry->consume_q); + if (result < VMCI_SUCCESS) + return result; + + /* + * Preemptively load in the headers if non-blocking to + * prevent blocking later. + */ + if (entry->qp.flags & VMCI_QPFLAG_NONBLOCK) { + result = qp_host_map_queues(entry->produce_q, + entry->consume_q); + if (result < VMCI_SUCCESS) { + qp_host_unregister_user_memory( + entry->produce_q, + entry->consume_q); + return result; + } + } + + entry->state = VMCIQPB_ATTACHED_MEM; + } else { + entry->state = VMCIQPB_ATTACHED_NO_MEM; + } + } else if (entry->state == VMCIQPB_CREATED_NO_MEM) { + /* + * The host side is attempting to attach to a queue + * pair that doesn't have any memory associated with + * it. This must be a pre NOVMVM vmx that hasn't set + * the page store information yet, or a quiesced VM. + */ + + return VMCI_ERROR_UNAVAILABLE; + } else { + /* + * For non-blocking queue pairs, we cannot rely on + * enqueue/dequeue to map in the pages on the + * host-side, since it may block, so we make an + * attempt here. + */ + + if (flags & VMCI_QPFLAG_NONBLOCK) { + result = + qp_host_map_queues(entry->produce_q, + entry->consume_q); + if (result < VMCI_SUCCESS) + return result; + + entry->qp.flags |= flags & + (VMCI_QPFLAG_NONBLOCK | VMCI_QPFLAG_PINNED); + } + + /* The host side has successfully attached to a queue pair. */ + entry->state = VMCIQPB_ATTACHED_MEM; + } + + if (entry->state == VMCIQPB_ATTACHED_MEM) { + result = + qp_notify_peer(true, entry->qp.handle, context_id, + entry->create_id); + if (result < VMCI_SUCCESS) + pr_warn("Failed to notify peer (ID=0x%x) of attach to queue pair (handle=0x%x:0x%x)\n", + entry->create_id, entry->qp.handle.context, + entry->qp.handle.resource); + } + + entry->attach_id = context_id; + entry->qp.ref_count++; + if (wakeup_cb) { + entry->wakeup_cb = wakeup_cb; + entry->client_data = client_data; + } + + /* + * When attaching to local queue pairs, the context already has + * an entry tracking the queue pair, so don't add another one. + */ + if (!is_local) + vmci_ctx_qp_create(context, entry->qp.handle); + + if (ent != NULL) + *ent = entry; + + return VMCI_SUCCESS; +} + +/* + * queue_pair_Alloc for use when setting up queue pair endpoints + * on the host. + */ +static int qp_broker_alloc(struct vmci_handle handle, + u32 peer, + u32 flags, + u32 priv_flags, + u64 produce_size, + u64 consume_size, + struct vmci_qp_page_store *page_store, + struct vmci_ctx *context, + vmci_event_release_cb wakeup_cb, + void *client_data, + struct qp_broker_entry **ent, + bool *swap) +{ + const u32 context_id = vmci_ctx_get_id(context); + bool create; + struct qp_broker_entry *entry = NULL; + bool is_local = flags & VMCI_QPFLAG_LOCAL; + int result; + + if (vmci_handle_is_invalid(handle) || + (flags & ~VMCI_QP_ALL_FLAGS) || is_local || + !(produce_size || consume_size) || + !context || context_id == VMCI_INVALID_ID || + handle.context == VMCI_INVALID_ID) { + return VMCI_ERROR_INVALID_ARGS; + } + + if (page_store && !VMCI_QP_PAGESTORE_IS_WELLFORMED(page_store)) + return VMCI_ERROR_INVALID_ARGS; + + /* + * In the initial argument check, we ensure that non-vmkernel hosts + * are not allowed to create local queue pairs. + */ + + mutex_lock(&qp_broker_list.mutex); + + if (!is_local && vmci_ctx_qp_exists(context, handle)) { + pr_devel("Context (ID=0x%x) already attached to queue pair (handle=0x%x:0x%x)\n", + context_id, handle.context, handle.resource); + mutex_unlock(&qp_broker_list.mutex); + return VMCI_ERROR_ALREADY_EXISTS; + } + + if (handle.resource != VMCI_INVALID_ID) + entry = qp_broker_handle_to_entry(handle); + + if (!entry) { + create = true; + result = + qp_broker_create(handle, peer, flags, priv_flags, + produce_size, consume_size, page_store, + context, wakeup_cb, client_data, ent); + } else { + create = false; + result = + qp_broker_attach(entry, peer, flags, priv_flags, + produce_size, consume_size, page_store, + context, wakeup_cb, client_data, ent); + } + + mutex_unlock(&qp_broker_list.mutex); + + if (swap) + *swap = (context_id == VMCI_HOST_CONTEXT_ID) && + !(create && is_local); + + return result; +} + +/* + * This function implements the kernel API for allocating a queue + * pair. + */ +static int qp_alloc_host_work(struct vmci_handle *handle, + struct vmci_queue **produce_q, + u64 produce_size, + struct vmci_queue **consume_q, + u64 consume_size, + u32 peer, + u32 flags, + u32 priv_flags, + vmci_event_release_cb wakeup_cb, + void *client_data) +{ + struct vmci_handle new_handle; + struct vmci_ctx *context; + struct qp_broker_entry *entry; + int result; + bool swap; + + if (vmci_handle_is_invalid(*handle)) { + new_handle = vmci_make_handle( + VMCI_HOST_CONTEXT_ID, VMCI_INVALID_ID); + } else + new_handle = *handle; + + context = vmci_ctx_get(VMCI_HOST_CONTEXT_ID); + entry = NULL; + result = + qp_broker_alloc(new_handle, peer, flags, priv_flags, + produce_size, consume_size, NULL, context, + wakeup_cb, client_data, &entry, &swap); + if (result == VMCI_SUCCESS) { + if (swap) { + /* + * If this is a local queue pair, the attacher + * will swap around produce and consume + * queues. + */ + + *produce_q = entry->consume_q; + *consume_q = entry->produce_q; + } else { + *produce_q = entry->produce_q; + *consume_q = entry->consume_q; + } + + *handle = vmci_resource_handle(&entry->resource); + } else { + *handle = VMCI_INVALID_HANDLE; + pr_devel("queue pair broker failed to alloc (result=%d)\n", + result); + } + vmci_ctx_put(context); + return result; +} + +/* + * Allocates a VMCI queue_pair. Only checks validity of input + * arguments. The real work is done in the host or guest + * specific function. + */ +int vmci_qp_alloc(struct vmci_handle *handle, + struct vmci_queue **produce_q, + u64 produce_size, + struct vmci_queue **consume_q, + u64 consume_size, + u32 peer, + u32 flags, + u32 priv_flags, + bool guest_endpoint, + vmci_event_release_cb wakeup_cb, + void *client_data) +{ + if (!handle || !produce_q || !consume_q || + (!produce_size && !consume_size) || (flags & ~VMCI_QP_ALL_FLAGS)) + return VMCI_ERROR_INVALID_ARGS; + + if (guest_endpoint) { + return qp_alloc_guest_work(handle, produce_q, + produce_size, consume_q, + consume_size, peer, + flags, priv_flags); + } else { + return qp_alloc_host_work(handle, produce_q, + produce_size, consume_q, + consume_size, peer, flags, + priv_flags, wakeup_cb, client_data); + } +} + +/* + * This function implements the host kernel API for detaching from + * a queue pair. + */ +static int qp_detatch_host_work(struct vmci_handle handle) +{ + int result; + struct vmci_ctx *context; + + context = vmci_ctx_get(VMCI_HOST_CONTEXT_ID); + + result = vmci_qp_broker_detach(handle, context); + + vmci_ctx_put(context); + return result; +} + +/* + * Detaches from a VMCI queue_pair. Only checks validity of input argument. + * Real work is done in the host or guest specific function. + */ +static int qp_detatch(struct vmci_handle handle, bool guest_endpoint) +{ + if (vmci_handle_is_invalid(handle)) + return VMCI_ERROR_INVALID_ARGS; + + if (guest_endpoint) + return qp_detatch_guest_work(handle); + else + return qp_detatch_host_work(handle); +} + +/* + * Returns the entry from the head of the list. Assumes that the list is + * locked. + */ +static struct qp_entry *qp_list_get_head(struct qp_list *qp_list) +{ + if (!list_empty(&qp_list->head)) { + struct qp_entry *entry = + list_first_entry(&qp_list->head, struct qp_entry, + list_item); + return entry; + } + + return NULL; +} + +void vmci_qp_broker_exit(void) +{ + struct qp_entry *entry; + struct qp_broker_entry *be; + + mutex_lock(&qp_broker_list.mutex); + + while ((entry = qp_list_get_head(&qp_broker_list))) { + be = (struct qp_broker_entry *)entry; + + qp_list_remove_entry(&qp_broker_list, entry); + kfree(be); + } + + mutex_unlock(&qp_broker_list.mutex); +} + +/* + * Requests that a queue pair be allocated with the VMCI queue + * pair broker. Allocates a queue pair entry if one does not + * exist. Attaches to one if it exists, and retrieves the page + * files backing that queue_pair. Assumes that the queue pair + * broker lock is held. + */ +int vmci_qp_broker_alloc(struct vmci_handle handle, + u32 peer, + u32 flags, + u32 priv_flags, + u64 produce_size, + u64 consume_size, + struct vmci_qp_page_store *page_store, + struct vmci_ctx *context) +{ + return qp_broker_alloc(handle, peer, flags, priv_flags, + produce_size, consume_size, + page_store, context, NULL, NULL, NULL, NULL); +} + +/* + * VMX'en with versions lower than VMCI_VERSION_NOVMVM use a separate + * step to add the UVAs of the VMX mapping of the queue pair. This function + * provides backwards compatibility with such VMX'en, and takes care of + * registering the page store for a queue pair previously allocated by the + * VMX during create or attach. This function will move the queue pair state + * to either from VMCIQBP_CREATED_NO_MEM to VMCIQBP_CREATED_MEM or + * VMCIQBP_ATTACHED_NO_MEM to VMCIQBP_ATTACHED_MEM. If moving to the + * attached state with memory, the queue pair is ready to be used by the + * host peer, and an attached event will be generated. + * + * Assumes that the queue pair broker lock is held. + * + * This function is only used by the hosted platform, since there is no + * issue with backwards compatibility for vmkernel. + */ +int vmci_qp_broker_set_page_store(struct vmci_handle handle, + u64 produce_uva, + u64 consume_uva, + struct vmci_ctx *context) +{ + struct qp_broker_entry *entry; + int result; + const u32 context_id = vmci_ctx_get_id(context); + + if (vmci_handle_is_invalid(handle) || !context || + context_id == VMCI_INVALID_ID) + return VMCI_ERROR_INVALID_ARGS; + + /* + * We only support guest to host queue pairs, so the VMX must + * supply UVAs for the mapped page files. + */ + + if (produce_uva == 0 || consume_uva == 0) + return VMCI_ERROR_INVALID_ARGS; + + mutex_lock(&qp_broker_list.mutex); + + if (!vmci_ctx_qp_exists(context, handle)) { + pr_warn("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n", + context_id, handle.context, handle.resource); + result = VMCI_ERROR_NOT_FOUND; + goto out; + } + + entry = qp_broker_handle_to_entry(handle); + if (!entry) { + result = VMCI_ERROR_NOT_FOUND; + goto out; + } + + /* + * If I'm the owner then I can set the page store. + * + * Or, if a host created the queue_pair and I'm the attached peer + * then I can set the page store. + */ + if (entry->create_id != context_id && + (entry->create_id != VMCI_HOST_CONTEXT_ID || + entry->attach_id != context_id)) { + result = VMCI_ERROR_QUEUEPAIR_NOTOWNER; + goto out; + } + + if (entry->state != VMCIQPB_CREATED_NO_MEM && + entry->state != VMCIQPB_ATTACHED_NO_MEM) { + result = VMCI_ERROR_UNAVAILABLE; + goto out; + } + + result = qp_host_get_user_memory(produce_uva, consume_uva, + entry->produce_q, entry->consume_q); + if (result < VMCI_SUCCESS) + goto out; + + result = qp_host_map_queues(entry->produce_q, entry->consume_q); + if (result < VMCI_SUCCESS) { + qp_host_unregister_user_memory(entry->produce_q, + entry->consume_q); + goto out; + } + + if (entry->state == VMCIQPB_CREATED_NO_MEM) + entry->state = VMCIQPB_CREATED_MEM; + else + entry->state = VMCIQPB_ATTACHED_MEM; + + entry->vmci_page_files = true; + + if (entry->state == VMCIQPB_ATTACHED_MEM) { + result = + qp_notify_peer(true, handle, context_id, entry->create_id); + if (result < VMCI_SUCCESS) { + pr_warn("Failed to notify peer (ID=0x%x) of attach to queue pair (handle=0x%x:0x%x)\n", + entry->create_id, entry->qp.handle.context, + entry->qp.handle.resource); + } + } + + result = VMCI_SUCCESS; + out: + mutex_unlock(&qp_broker_list.mutex); + return result; +} + +/* + * Resets saved queue headers for the given QP broker + * entry. Should be used when guest memory becomes available + * again, or the guest detaches. + */ +static void qp_reset_saved_headers(struct qp_broker_entry *entry) +{ + entry->produce_q->saved_header = NULL; + entry->consume_q->saved_header = NULL; +} + +/* + * The main entry point for detaching from a queue pair registered with the + * queue pair broker. If more than one endpoint is attached to the queue + * pair, the first endpoint will mainly decrement a reference count and + * generate a notification to its peer. The last endpoint will clean up + * the queue pair state registered with the broker. + * + * When a guest endpoint detaches, it will unmap and unregister the guest + * memory backing the queue pair. If the host is still attached, it will + * no longer be able to access the queue pair content. + * + * If the queue pair is already in a state where there is no memory + * registered for the queue pair (any *_NO_MEM state), it will transition to + * the VMCIQPB_SHUTDOWN_NO_MEM state. This will also happen, if a guest + * endpoint is the first of two endpoints to detach. If the host endpoint is + * the first out of two to detach, the queue pair will move to the + * VMCIQPB_SHUTDOWN_MEM state. + */ +int vmci_qp_broker_detach(struct vmci_handle handle, struct vmci_ctx *context) +{ + struct qp_broker_entry *entry; + const u32 context_id = vmci_ctx_get_id(context); + u32 peer_id; + bool is_local = false; + int result; + + if (vmci_handle_is_invalid(handle) || !context || + context_id == VMCI_INVALID_ID) { + return VMCI_ERROR_INVALID_ARGS; + } + + mutex_lock(&qp_broker_list.mutex); + + if (!vmci_ctx_qp_exists(context, handle)) { + pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n", + context_id, handle.context, handle.resource); + result = VMCI_ERROR_NOT_FOUND; + goto out; + } + + entry = qp_broker_handle_to_entry(handle); + if (!entry) { + pr_devel("Context (ID=0x%x) reports being attached to queue pair(handle=0x%x:0x%x) that isn't present in broker\n", + context_id, handle.context, handle.resource); + result = VMCI_ERROR_NOT_FOUND; + goto out; + } + + if (context_id != entry->create_id && context_id != entry->attach_id) { + result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED; + goto out; + } + + if (context_id == entry->create_id) { + peer_id = entry->attach_id; + entry->create_id = VMCI_INVALID_ID; + } else { + peer_id = entry->create_id; + entry->attach_id = VMCI_INVALID_ID; + } + entry->qp.ref_count--; + + is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL; + + if (context_id != VMCI_HOST_CONTEXT_ID) { + bool headers_mapped; + + /* + * Pre NOVMVM vmx'en may detach from a queue pair + * before setting the page store, and in that case + * there is no user memory to detach from. Also, more + * recent VMX'en may detach from a queue pair in the + * quiesced state. + */ + + qp_acquire_queue_mutex(entry->produce_q); + headers_mapped = entry->produce_q->q_header || + entry->consume_q->q_header; + if (QPBROKERSTATE_HAS_MEM(entry)) { + result = + qp_host_unmap_queues(INVALID_VMCI_GUEST_MEM_ID, + entry->produce_q, + entry->consume_q); + if (result < VMCI_SUCCESS) + pr_warn("Failed to unmap queue headers for queue pair (handle=0x%x:0x%x,result=%d)\n", + handle.context, handle.resource, + result); + + if (entry->vmci_page_files) + qp_host_unregister_user_memory(entry->produce_q, + entry-> + consume_q); + else + qp_host_unregister_user_memory(entry->produce_q, + entry-> + consume_q); + + } + + if (!headers_mapped) + qp_reset_saved_headers(entry); + + qp_release_queue_mutex(entry->produce_q); + + if (!headers_mapped && entry->wakeup_cb) + entry->wakeup_cb(entry->client_data); + + } else { + if (entry->wakeup_cb) { + entry->wakeup_cb = NULL; + entry->client_data = NULL; + } + } + + if (entry->qp.ref_count == 0) { + qp_list_remove_entry(&qp_broker_list, &entry->qp); + + if (is_local) + kfree(entry->local_mem); + + qp_cleanup_queue_mutex(entry->produce_q, entry->consume_q); + qp_host_free_queue(entry->produce_q, entry->qp.produce_size); + qp_host_free_queue(entry->consume_q, entry->qp.consume_size); + /* Unlink from resource hash table and free callback */ + vmci_resource_remove(&entry->resource); + + kfree(entry); + + vmci_ctx_qp_destroy(context, handle); + } else { + qp_notify_peer(false, handle, context_id, peer_id); + if (context_id == VMCI_HOST_CONTEXT_ID && + QPBROKERSTATE_HAS_MEM(entry)) { + entry->state = VMCIQPB_SHUTDOWN_MEM; + } else { + entry->state = VMCIQPB_SHUTDOWN_NO_MEM; + } + + if (!is_local) + vmci_ctx_qp_destroy(context, handle); + + } + result = VMCI_SUCCESS; + out: + mutex_unlock(&qp_broker_list.mutex); + return result; +} + +/* + * Establishes the necessary mappings for a queue pair given a + * reference to the queue pair guest memory. This is usually + * called when a guest is unquiesced and the VMX is allowed to + * map guest memory once again. + */ +int vmci_qp_broker_map(struct vmci_handle handle, + struct vmci_ctx *context, + u64 guest_mem) +{ + struct qp_broker_entry *entry; + const u32 context_id = vmci_ctx_get_id(context); + bool is_local = false; + int result; + + if (vmci_handle_is_invalid(handle) || !context || + context_id == VMCI_INVALID_ID) + return VMCI_ERROR_INVALID_ARGS; + + mutex_lock(&qp_broker_list.mutex); + + if (!vmci_ctx_qp_exists(context, handle)) { + pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n", + context_id, handle.context, handle.resource); + result = VMCI_ERROR_NOT_FOUND; + goto out; + } + + entry = qp_broker_handle_to_entry(handle); + if (!entry) { + pr_devel("Context (ID=0x%x) reports being attached to queue pair (handle=0x%x:0x%x) that isn't present in broker\n", + context_id, handle.context, handle.resource); + result = VMCI_ERROR_NOT_FOUND; + goto out; + } + + if (context_id != entry->create_id && context_id != entry->attach_id) { + result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED; + goto out; + } + + is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL; + result = VMCI_SUCCESS; + + if (context_id != VMCI_HOST_CONTEXT_ID) { + struct vmci_qp_page_store page_store; + + page_store.pages = guest_mem; + page_store.len = QPE_NUM_PAGES(entry->qp); + + qp_acquire_queue_mutex(entry->produce_q); + qp_reset_saved_headers(entry); + result = + qp_host_register_user_memory(&page_store, + entry->produce_q, + entry->consume_q); + qp_release_queue_mutex(entry->produce_q); + if (result == VMCI_SUCCESS) { + /* Move state from *_NO_MEM to *_MEM */ + + entry->state++; + + if (entry->wakeup_cb) + entry->wakeup_cb(entry->client_data); + } + } + + out: + mutex_unlock(&qp_broker_list.mutex); + return result; +} + +/* + * Saves a snapshot of the queue headers for the given QP broker + * entry. Should be used when guest memory is unmapped. + * Results: + * VMCI_SUCCESS on success, appropriate error code if guest memory + * can't be accessed.. + */ +static int qp_save_headers(struct qp_broker_entry *entry) +{ + int result; + + if (entry->produce_q->saved_header != NULL && + entry->consume_q->saved_header != NULL) { + /* + * If the headers have already been saved, we don't need to do + * it again, and we don't want to map in the headers + * unnecessarily. + */ + + return VMCI_SUCCESS; + } + + if (NULL == entry->produce_q->q_header || + NULL == entry->consume_q->q_header) { + result = qp_host_map_queues(entry->produce_q, entry->consume_q); + if (result < VMCI_SUCCESS) + return result; + } + + memcpy(&entry->saved_produce_q, entry->produce_q->q_header, + sizeof(entry->saved_produce_q)); + entry->produce_q->saved_header = &entry->saved_produce_q; + memcpy(&entry->saved_consume_q, entry->consume_q->q_header, + sizeof(entry->saved_consume_q)); + entry->consume_q->saved_header = &entry->saved_consume_q; + + return VMCI_SUCCESS; +} + +/* + * Removes all references to the guest memory of a given queue pair, and + * will move the queue pair from state *_MEM to *_NO_MEM. It is usually + * called when a VM is being quiesced where access to guest memory should + * avoided. + */ +int vmci_qp_broker_unmap(struct vmci_handle handle, + struct vmci_ctx *context, + u32 gid) +{ + struct qp_broker_entry *entry; + const u32 context_id = vmci_ctx_get_id(context); + bool is_local = false; + int result; + + if (vmci_handle_is_invalid(handle) || !context || + context_id == VMCI_INVALID_ID) + return VMCI_ERROR_INVALID_ARGS; + + mutex_lock(&qp_broker_list.mutex); + + if (!vmci_ctx_qp_exists(context, handle)) { + pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n", + context_id, handle.context, handle.resource); + result = VMCI_ERROR_NOT_FOUND; + goto out; + } + + entry = qp_broker_handle_to_entry(handle); + if (!entry) { + pr_devel("Context (ID=0x%x) reports being attached to queue pair (handle=0x%x:0x%x) that isn't present in broker\n", + context_id, handle.context, handle.resource); + result = VMCI_ERROR_NOT_FOUND; + goto out; + } + + if (context_id != entry->create_id && context_id != entry->attach_id) { + result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED; + goto out; + } + + is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL; + + if (context_id != VMCI_HOST_CONTEXT_ID) { + qp_acquire_queue_mutex(entry->produce_q); + result = qp_save_headers(entry); + if (result < VMCI_SUCCESS) + pr_warn("Failed to save queue headers for queue pair (handle=0x%x:0x%x,result=%d)\n", + handle.context, handle.resource, result); + + qp_host_unmap_queues(gid, entry->produce_q, entry->consume_q); + + /* + * On hosted, when we unmap queue pairs, the VMX will also + * unmap the guest memory, so we invalidate the previously + * registered memory. If the queue pair is mapped again at a + * later point in time, we will need to reregister the user + * memory with a possibly new user VA. + */ + qp_host_unregister_user_memory(entry->produce_q, + entry->consume_q); + + /* + * Move state from *_MEM to *_NO_MEM. + */ + entry->state--; + + qp_release_queue_mutex(entry->produce_q); + } + + result = VMCI_SUCCESS; + + out: + mutex_unlock(&qp_broker_list.mutex); + return result; +} + +/* + * Destroys all guest queue pair endpoints. If active guest queue + * pairs still exist, hypercalls to attempt detach from these + * queue pairs will be made. Any failure to detach is silently + * ignored. + */ +void vmci_qp_guest_endpoints_exit(void) +{ + struct qp_entry *entry; + struct qp_guest_endpoint *ep; + + mutex_lock(&qp_guest_endpoints.mutex); + + while ((entry = qp_list_get_head(&qp_guest_endpoints))) { + ep = (struct qp_guest_endpoint *)entry; + + /* Don't make a hypercall for local queue_pairs. */ + if (!(entry->flags & VMCI_QPFLAG_LOCAL)) + qp_detatch_hypercall(entry->handle); + + /* We cannot fail the exit, so let's reset ref_count. */ + entry->ref_count = 0; + qp_list_remove_entry(&qp_guest_endpoints, entry); + + qp_guest_endpoint_destroy(ep); + } + + mutex_unlock(&qp_guest_endpoints.mutex); +} + +/* + * Helper routine that will lock the queue pair before subsequent + * operations. + * Note: Non-blocking on the host side is currently only implemented in ESX. + * Since non-blocking isn't yet implemented on the host personality we + * have no reason to acquire a spin lock. So to avoid the use of an + * unnecessary lock only acquire the mutex if we can block. + * Note: It is assumed that QPFLAG_PINNED implies QPFLAG_NONBLOCK. Therefore + * we can use the same locking function for access to both the queue + * and the queue headers as it is the same logic. Assert this behvior. + */ +static void qp_lock(const struct vmci_qp *qpair) +{ + if (vmci_can_block(qpair->flags)) + qp_acquire_queue_mutex(qpair->produce_q); +} + +/* + * Helper routine that unlocks the queue pair after calling + * qp_lock. Respects non-blocking and pinning flags. + */ +static void qp_unlock(const struct vmci_qp *qpair) +{ + if (vmci_can_block(qpair->flags)) + qp_release_queue_mutex(qpair->produce_q); +} + +/* + * The queue headers may not be mapped at all times. If a queue is + * currently not mapped, it will be attempted to do so. + */ +static int qp_map_queue_headers(struct vmci_queue *produce_q, + struct vmci_queue *consume_q, + bool can_block) +{ + int result; + + if (NULL == produce_q->q_header || NULL == consume_q->q_header) { + if (can_block) + result = qp_host_map_queues(produce_q, consume_q); + else + result = VMCI_ERROR_QUEUEPAIR_NOT_READY; + + if (result < VMCI_SUCCESS) + return (produce_q->saved_header && + consume_q->saved_header) ? + VMCI_ERROR_QUEUEPAIR_NOT_READY : + VMCI_ERROR_QUEUEPAIR_NOTATTACHED; + } + + return VMCI_SUCCESS; +} + +/* + * Helper routine that will retrieve the produce and consume + * headers of a given queue pair. If the guest memory of the + * queue pair is currently not available, the saved queue headers + * will be returned, if these are available. + */ +static int qp_get_queue_headers(const struct vmci_qp *qpair, + struct vmci_queue_header **produce_q_header, + struct vmci_queue_header **consume_q_header) +{ + int result; + + result = qp_map_queue_headers(qpair->produce_q, qpair->consume_q, + vmci_can_block(qpair->flags)); + if (result == VMCI_SUCCESS) { + *produce_q_header = qpair->produce_q->q_header; + *consume_q_header = qpair->consume_q->q_header; + } else if (qpair->produce_q->saved_header && + qpair->consume_q->saved_header) { + *produce_q_header = qpair->produce_q->saved_header; + *consume_q_header = qpair->consume_q->saved_header; + result = VMCI_SUCCESS; + } + + return result; +} + +/* + * Callback from VMCI queue pair broker indicating that a queue + * pair that was previously not ready, now either is ready or + * gone forever. + */ +static int qp_wakeup_cb(void *client_data) +{ + struct vmci_qp *qpair = (struct vmci_qp *)client_data; + + qp_lock(qpair); + while (qpair->blocked > 0) { + qpair->blocked--; + qpair->generation++; + wake_up(&qpair->event); + } + qp_unlock(qpair); + + return VMCI_SUCCESS; +} + +/* + * Makes the calling thread wait for the queue pair to become + * ready for host side access. Returns true when thread is + * woken up after queue pair state change, false otherwise. + */ +static bool qp_wait_for_ready_queue(struct vmci_qp *qpair) +{ + unsigned int generation; + + if (qpair->flags & VMCI_QPFLAG_NONBLOCK) + return false; + + qpair->blocked++; + generation = qpair->generation; + qp_unlock(qpair); + wait_event(qpair->event, generation != qpair->generation); + qp_lock(qpair); + + return true; +} + +/* + * Enqueues a given buffer to the produce queue using the provided + * function. As many bytes as possible (space available in the queue) + * are enqueued. Assumes the queue->mutex has been acquired. Returns + * VMCI_ERROR_QUEUEPAIR_NOSPACE if no space was available to enqueue + * data, VMCI_ERROR_INVALID_SIZE, if any queue pointer is outside the + * queue (as defined by the queue size), VMCI_ERROR_INVALID_ARGS, if + * an error occured when accessing the buffer, + * VMCI_ERROR_QUEUEPAIR_NOTATTACHED, if the queue pair pages aren't + * available. Otherwise, the number of bytes written to the queue is + * returned. Updates the tail pointer of the produce queue. + */ +static ssize_t qp_enqueue_locked(struct vmci_queue *produce_q, + struct vmci_queue *consume_q, + const u64 produce_q_size, + const void *buf, + size_t buf_size, + vmci_memcpy_to_queue_func memcpy_to_queue, + bool can_block) +{ + s64 free_space; + u64 tail; + size_t written; + ssize_t result; + + result = qp_map_queue_headers(produce_q, consume_q, can_block); + if (unlikely(result != VMCI_SUCCESS)) + return result; + + free_space = vmci_q_header_free_space(produce_q->q_header, + consume_q->q_header, + produce_q_size); + if (free_space == 0) + return VMCI_ERROR_QUEUEPAIR_NOSPACE; + + if (free_space < VMCI_SUCCESS) + return (ssize_t) free_space; + + written = (size_t) (free_space > buf_size ? buf_size : free_space); + tail = vmci_q_header_producer_tail(produce_q->q_header); + if (likely(tail + written < produce_q_size)) { + result = memcpy_to_queue(produce_q, tail, buf, 0, written); + } else { + /* Tail pointer wraps around. */ + + const size_t tmp = (size_t) (produce_q_size - tail); + + result = memcpy_to_queue(produce_q, tail, buf, 0, tmp); + if (result >= VMCI_SUCCESS) + result = memcpy_to_queue(produce_q, 0, buf, tmp, + written - tmp); + } + + if (result < VMCI_SUCCESS) + return result; + + vmci_q_header_add_producer_tail(produce_q->q_header, written, + produce_q_size); + return written; +} + +/* + * Dequeues data (if available) from the given consume queue. Writes data + * to the user provided buffer using the provided function. + * Assumes the queue->mutex has been acquired. + * Results: + * VMCI_ERROR_QUEUEPAIR_NODATA if no data was available to dequeue. + * VMCI_ERROR_INVALID_SIZE, if any queue pointer is outside the queue + * (as defined by the queue size). + * VMCI_ERROR_INVALID_ARGS, if an error occured when accessing the buffer. + * Otherwise the number of bytes dequeued is returned. + * Side effects: + * Updates the head pointer of the consume queue. + */ +static ssize_t qp_dequeue_locked(struct vmci_queue *produce_q, + struct vmci_queue *consume_q, + const u64 consume_q_size, + void *buf, + size_t buf_size, + vmci_memcpy_from_queue_func memcpy_from_queue, + bool update_consumer, + bool can_block) +{ + s64 buf_ready; + u64 head; + size_t read; + ssize_t result; + + result = qp_map_queue_headers(produce_q, consume_q, can_block); + if (unlikely(result != VMCI_SUCCESS)) + return result; + + buf_ready = vmci_q_header_buf_ready(consume_q->q_header, + produce_q->q_header, + consume_q_size); + if (buf_ready == 0) + return VMCI_ERROR_QUEUEPAIR_NODATA; + + if (buf_ready < VMCI_SUCCESS) + return (ssize_t) buf_ready; + + read = (size_t) (buf_ready > buf_size ? buf_size : buf_ready); + head = vmci_q_header_consumer_head(produce_q->q_header); + if (likely(head + read < consume_q_size)) { + result = memcpy_from_queue(buf, 0, consume_q, head, read); + } else { + /* Head pointer wraps around. */ + + const size_t tmp = (size_t) (consume_q_size - head); + + result = memcpy_from_queue(buf, 0, consume_q, head, tmp); + if (result >= VMCI_SUCCESS) + result = memcpy_from_queue(buf, tmp, consume_q, 0, + read - tmp); + + } + + if (result < VMCI_SUCCESS) + return result; + + if (update_consumer) + vmci_q_header_add_consumer_head(produce_q->q_header, + read, consume_q_size); + + return read; +} + +/* + * vmci_qpair_alloc() - Allocates a queue pair. + * @qpair: Pointer for the new vmci_qp struct. + * @handle: Handle to track the resource. + * @produce_qsize: Desired size of the producer queue. + * @consume_qsize: Desired size of the consumer queue. + * @peer: ContextID of the peer. + * @flags: VMCI flags. + * @priv_flags: VMCI priviledge flags. + * + * This is the client interface for allocating the memory for a + * vmci_qp structure and then attaching to the underlying + * queue. If an error occurs allocating the memory for the + * vmci_qp structure no attempt is made to attach. If an + * error occurs attaching, then the structure is freed. + */ +int vmci_qpair_alloc(struct vmci_qp **qpair, + struct vmci_handle *handle, + u64 produce_qsize, + u64 consume_qsize, + u32 peer, + u32 flags, + u32 priv_flags) +{ + struct vmci_qp *my_qpair; + int retval; + struct vmci_handle src = VMCI_INVALID_HANDLE; + struct vmci_handle dst = vmci_make_handle(peer, VMCI_INVALID_ID); + enum vmci_route route; + vmci_event_release_cb wakeup_cb; + void *client_data; + + /* + * Restrict the size of a queuepair. The device already + * enforces a limit on the total amount of memory that can be + * allocated to queuepairs for a guest. However, we try to + * allocate this memory before we make the queuepair + * allocation hypercall. On Linux, we allocate each page + * separately, which means rather than fail, the guest will + * thrash while it tries to allocate, and will become + * increasingly unresponsive to the point where it appears to + * be hung. So we place a limit on the size of an individual + * queuepair here, and leave the device to enforce the + * restriction on total queuepair memory. (Note that this + * doesn't prevent all cases; a user with only this much + * physical memory could still get into trouble.) The error + * used by the device is NO_RESOURCES, so use that here too. + */ + + if (produce_qsize + consume_qsize < max(produce_qsize, consume_qsize) || + produce_qsize + consume_qsize > VMCI_MAX_GUEST_QP_MEMORY) + return VMCI_ERROR_NO_RESOURCES; + + retval = vmci_route(&src, &dst, false, &route); + if (retval < VMCI_SUCCESS) + route = vmci_guest_code_active() ? + VMCI_ROUTE_AS_GUEST : VMCI_ROUTE_AS_HOST; + + /* If NONBLOCK or PINNED is set, we better be the guest personality. */ + if ((!vmci_can_block(flags) || vmci_qp_pinned(flags)) && + VMCI_ROUTE_AS_GUEST != route) { + pr_devel("Not guest personality w/ NONBLOCK OR PINNED set"); + return VMCI_ERROR_INVALID_ARGS; + } + + /* + * Limit the size of pinned QPs and check sanity. + * + * Pinned pages implies non-blocking mode. Mutexes aren't acquired + * when the NONBLOCK flag is set in qpair code; and also should not be + * acquired when the PINNED flagged is set. Since pinning pages + * implies we want speed, it makes no sense not to have NONBLOCK + * set if PINNED is set. Hence enforce this implication. + */ + if (vmci_qp_pinned(flags)) { + if (vmci_can_block(flags)) { + pr_err("Attempted to enable pinning w/o non-blocking"); + return VMCI_ERROR_INVALID_ARGS; + } + + if (produce_qsize + consume_qsize > VMCI_MAX_PINNED_QP_MEMORY) + return VMCI_ERROR_NO_RESOURCES; + } + + my_qpair = kzalloc(sizeof(*my_qpair), GFP_KERNEL); + if (!my_qpair) + return VMCI_ERROR_NO_MEM; + + my_qpair->produce_q_size = produce_qsize; + my_qpair->consume_q_size = consume_qsize; + my_qpair->peer = peer; + my_qpair->flags = flags; + my_qpair->priv_flags = priv_flags; + + wakeup_cb = NULL; + client_data = NULL; + + if (VMCI_ROUTE_AS_HOST == route) { + my_qpair->guest_endpoint = false; + if (!(flags & VMCI_QPFLAG_LOCAL)) { + my_qpair->blocked = 0; + my_qpair->generation = 0; + init_waitqueue_head(&my_qpair->event); + wakeup_cb = qp_wakeup_cb; + client_data = (void *)my_qpair; + } + } else { + my_qpair->guest_endpoint = true; + } + + retval = vmci_qp_alloc(handle, + &my_qpair->produce_q, + my_qpair->produce_q_size, + &my_qpair->consume_q, + my_qpair->consume_q_size, + my_qpair->peer, + my_qpair->flags, + my_qpair->priv_flags, + my_qpair->guest_endpoint, + wakeup_cb, client_data); + + if (retval < VMCI_SUCCESS) { + kfree(my_qpair); + return retval; + } + + *qpair = my_qpair; + my_qpair->handle = *handle; + + return retval; +} +EXPORT_SYMBOL_GPL(vmci_qpair_alloc); + +/* + * vmci_qpair_detach() - Detatches the client from a queue pair. + * @qpair: Reference of a pointer to the qpair struct. + * + * This is the client interface for detaching from a VMCIQPair. + * Note that this routine will free the memory allocated for the + * vmci_qp structure too. + */ +int vmci_qpair_detach(struct vmci_qp **qpair) +{ + int result; + struct vmci_qp *old_qpair; + + if (!qpair || !(*qpair)) + return VMCI_ERROR_INVALID_ARGS; + + old_qpair = *qpair; + result = qp_detatch(old_qpair->handle, old_qpair->guest_endpoint); + + /* + * The guest can fail to detach for a number of reasons, and + * if it does so, it will cleanup the entry (if there is one). + * The host can fail too, but it won't cleanup the entry + * immediately, it will do that later when the context is + * freed. Either way, we need to release the qpair struct + * here; there isn't much the caller can do, and we don't want + * to leak. + */ + + memset(old_qpair, 0, sizeof(*old_qpair)); + old_qpair->handle = VMCI_INVALID_HANDLE; + old_qpair->peer = VMCI_INVALID_ID; + kfree(old_qpair); + *qpair = NULL; + + return result; +} +EXPORT_SYMBOL_GPL(vmci_qpair_detach); + +/* + * vmci_qpair_get_produce_indexes() - Retrieves the indexes of the producer. + * @qpair: Pointer to the queue pair struct. + * @producer_tail: Reference used for storing producer tail index. + * @consumer_head: Reference used for storing the consumer head index. + * + * This is the client interface for getting the current indexes of the + * QPair from the point of the view of the caller as the producer. + */ +int vmci_qpair_get_produce_indexes(const struct vmci_qp *qpair, + u64 *producer_tail, + u64 *consumer_head) +{ + struct vmci_queue_header *produce_q_header; + struct vmci_queue_header *consume_q_header; + int result; + + if (!qpair) + return VMCI_ERROR_INVALID_ARGS; + + qp_lock(qpair); + result = + qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header); + if (result == VMCI_SUCCESS) + vmci_q_header_get_pointers(produce_q_header, consume_q_header, + producer_tail, consumer_head); + qp_unlock(qpair); + + if (result == VMCI_SUCCESS && + ((producer_tail && *producer_tail >= qpair->produce_q_size) || + (consumer_head && *consumer_head >= qpair->produce_q_size))) + return VMCI_ERROR_INVALID_SIZE; + + return result; +} +EXPORT_SYMBOL_GPL(vmci_qpair_get_produce_indexes); + +/* + * vmci_qpair_get_consume_indexes() - Retrieves the indexes of the comsumer. + * @qpair: Pointer to the queue pair struct. + * @consumer_tail: Reference used for storing consumer tail index. + * @producer_head: Reference used for storing the producer head index. + * + * This is the client interface for getting the current indexes of the + * QPair from the point of the view of the caller as the consumer. + */ +int vmci_qpair_get_consume_indexes(const struct vmci_qp *qpair, + u64 *consumer_tail, + u64 *producer_head) +{ + struct vmci_queue_header *produce_q_header; + struct vmci_queue_header *consume_q_header; + int result; + + if (!qpair) + return VMCI_ERROR_INVALID_ARGS; + + qp_lock(qpair); + result = + qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header); + if (result == VMCI_SUCCESS) + vmci_q_header_get_pointers(consume_q_header, produce_q_header, + consumer_tail, producer_head); + qp_unlock(qpair); + + if (result == VMCI_SUCCESS && + ((consumer_tail && *consumer_tail >= qpair->consume_q_size) || + (producer_head && *producer_head >= qpair->consume_q_size))) + return VMCI_ERROR_INVALID_SIZE; + + return result; +} +EXPORT_SYMBOL_GPL(vmci_qpair_get_consume_indexes); + +/* + * vmci_qpair_produce_free_space() - Retrieves free space in producer queue. + * @qpair: Pointer to the queue pair struct. + * + * This is the client interface for getting the amount of free + * space in the QPair from the point of the view of the caller as + * the producer which is the common case. Returns < 0 if err, else + * available bytes into which data can be enqueued if > 0. + */ +s64 vmci_qpair_produce_free_space(const struct vmci_qp *qpair) +{ + struct vmci_queue_header *produce_q_header; + struct vmci_queue_header *consume_q_header; + s64 result; + + if (!qpair) + return VMCI_ERROR_INVALID_ARGS; + + qp_lock(qpair); + result = + qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header); + if (result == VMCI_SUCCESS) + result = vmci_q_header_free_space(produce_q_header, + consume_q_header, + qpair->produce_q_size); + else + result = 0; + + qp_unlock(qpair); + + return result; +} +EXPORT_SYMBOL_GPL(vmci_qpair_produce_free_space); + +/* + * vmci_qpair_consume_free_space() - Retrieves free space in consumer queue. + * @qpair: Pointer to the queue pair struct. + * + * This is the client interface for getting the amount of free + * space in the QPair from the point of the view of the caller as + * the consumer which is not the common case. Returns < 0 if err, else + * available bytes into which data can be enqueued if > 0. + */ +s64 vmci_qpair_consume_free_space(const struct vmci_qp *qpair) +{ + struct vmci_queue_header *produce_q_header; + struct vmci_queue_header *consume_q_header; + s64 result; + + if (!qpair) + return VMCI_ERROR_INVALID_ARGS; + + qp_lock(qpair); + result = + qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header); + if (result == VMCI_SUCCESS) + result = vmci_q_header_free_space(consume_q_header, + produce_q_header, + qpair->consume_q_size); + else + result = 0; + + qp_unlock(qpair); + + return result; +} +EXPORT_SYMBOL_GPL(vmci_qpair_consume_free_space); + +/* + * vmci_qpair_produce_buf_ready() - Gets bytes ready to read from + * producer queue. + * @qpair: Pointer to the queue pair struct. + * + * This is the client interface for getting the amount of + * enqueued data in the QPair from the point of the view of the + * caller as the producer which is not the common case. Returns < 0 if err, + * else available bytes that may be read. + */ +s64 vmci_qpair_produce_buf_ready(const struct vmci_qp *qpair) +{ + struct vmci_queue_header *produce_q_header; + struct vmci_queue_header *consume_q_header; + s64 result; + + if (!qpair) + return VMCI_ERROR_INVALID_ARGS; + + qp_lock(qpair); + result = + qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header); + if (result == VMCI_SUCCESS) + result = vmci_q_header_buf_ready(produce_q_header, + consume_q_header, + qpair->produce_q_size); + else + result = 0; + + qp_unlock(qpair); + + return result; +} +EXPORT_SYMBOL_GPL(vmci_qpair_produce_buf_ready); + +/* + * vmci_qpair_consume_buf_ready() - Gets bytes ready to read from + * consumer queue. + * @qpair: Pointer to the queue pair struct. + * + * This is the client interface for getting the amount of + * enqueued data in the QPair from the point of the view of the + * caller as the consumer which is the normal case. Returns < 0 if err, + * else available bytes that may be read. + */ +s64 vmci_qpair_consume_buf_ready(const struct vmci_qp *qpair) +{ + struct vmci_queue_header *produce_q_header; + struct vmci_queue_header *consume_q_header; + s64 result; + + if (!qpair) + return VMCI_ERROR_INVALID_ARGS; + + qp_lock(qpair); + result = + qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header); + if (result == VMCI_SUCCESS) + result = vmci_q_header_buf_ready(consume_q_header, + produce_q_header, + qpair->consume_q_size); + else + result = 0; + + qp_unlock(qpair); + + return result; +} +EXPORT_SYMBOL_GPL(vmci_qpair_consume_buf_ready); + +/* + * vmci_qpair_enqueue() - Throw data on the queue. + * @qpair: Pointer to the queue pair struct. + * @buf: Pointer to buffer containing data + * @buf_size: Length of buffer. + * @buf_type: Buffer type (Unused). + * + * This is the client interface for enqueueing data into the queue. + * Returns number of bytes enqueued or < 0 on error. + */ +ssize_t vmci_qpair_enqueue(struct vmci_qp *qpair, + const void *buf, + size_t buf_size, + int buf_type) +{ + ssize_t result; + + if (!qpair || !buf) + return VMCI_ERROR_INVALID_ARGS; + + qp_lock(qpair); + + do { + result = qp_enqueue_locked(qpair->produce_q, + qpair->consume_q, + qpair->produce_q_size, + buf, buf_size, + qp_memcpy_to_queue, + vmci_can_block(qpair->flags)); + + if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && + !qp_wait_for_ready_queue(qpair)) + result = VMCI_ERROR_WOULD_BLOCK; + + } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY); + + qp_unlock(qpair); + + return result; +} +EXPORT_SYMBOL_GPL(vmci_qpair_enqueue); + +/* + * vmci_qpair_dequeue() - Get data from the queue. + * @qpair: Pointer to the queue pair struct. + * @buf: Pointer to buffer for the data + * @buf_size: Length of buffer. + * @buf_type: Buffer type (Unused). + * + * This is the client interface for dequeueing data from the queue. + * Returns number of bytes dequeued or < 0 on error. + */ +ssize_t vmci_qpair_dequeue(struct vmci_qp *qpair, + void *buf, + size_t buf_size, + int buf_type) +{ + ssize_t result; + + if (!qpair || !buf) + return VMCI_ERROR_INVALID_ARGS; + + qp_lock(qpair); + + do { + result = qp_dequeue_locked(qpair->produce_q, + qpair->consume_q, + qpair->consume_q_size, + buf, buf_size, + qp_memcpy_from_queue, true, + vmci_can_block(qpair->flags)); + + if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && + !qp_wait_for_ready_queue(qpair)) + result = VMCI_ERROR_WOULD_BLOCK; + + } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY); + + qp_unlock(qpair); + + return result; +} +EXPORT_SYMBOL_GPL(vmci_qpair_dequeue); + +/* + * vmci_qpair_peek() - Peek at the data in the queue. + * @qpair: Pointer to the queue pair struct. + * @buf: Pointer to buffer for the data + * @buf_size: Length of buffer. + * @buf_type: Buffer type (Unused on Linux). + * + * This is the client interface for peeking into a queue. (I.e., + * copy data from the queue without updating the head pointer.) + * Returns number of bytes dequeued or < 0 on error. + */ +ssize_t vmci_qpair_peek(struct vmci_qp *qpair, + void *buf, + size_t buf_size, + int buf_type) +{ + ssize_t result; + + if (!qpair || !buf) + return VMCI_ERROR_INVALID_ARGS; + + qp_lock(qpair); + + do { + result = qp_dequeue_locked(qpair->produce_q, + qpair->consume_q, + qpair->consume_q_size, + buf, buf_size, + qp_memcpy_from_queue, false, + vmci_can_block(qpair->flags)); + + if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && + !qp_wait_for_ready_queue(qpair)) + result = VMCI_ERROR_WOULD_BLOCK; + + } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY); + + qp_unlock(qpair); + + return result; +} +EXPORT_SYMBOL_GPL(vmci_qpair_peek); + +/* + * vmci_qpair_enquev() - Throw data on the queue using iov. + * @qpair: Pointer to the queue pair struct. + * @iov: Pointer to buffer containing data + * @iov_size: Length of buffer. + * @buf_type: Buffer type (Unused). + * + * This is the client interface for enqueueing data into the queue. + * This function uses IO vectors to handle the work. Returns number + * of bytes enqueued or < 0 on error. + */ +ssize_t vmci_qpair_enquev(struct vmci_qp *qpair, + void *iov, + size_t iov_size, + int buf_type) +{ + ssize_t result; + + if (!qpair || !iov) + return VMCI_ERROR_INVALID_ARGS; + + qp_lock(qpair); + + do { + result = qp_enqueue_locked(qpair->produce_q, + qpair->consume_q, + qpair->produce_q_size, + iov, iov_size, + qp_memcpy_to_queue_iov, + vmci_can_block(qpair->flags)); + + if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && + !qp_wait_for_ready_queue(qpair)) + result = VMCI_ERROR_WOULD_BLOCK; + + } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY); + + qp_unlock(qpair); + + return result; +} +EXPORT_SYMBOL_GPL(vmci_qpair_enquev); + +/* + * vmci_qpair_dequev() - Get data from the queue using iov. + * @qpair: Pointer to the queue pair struct. + * @iov: Pointer to buffer for the data + * @iov_size: Length of buffer. + * @buf_type: Buffer type (Unused). + * + * This is the client interface for dequeueing data from the queue. + * This function uses IO vectors to handle the work. Returns number + * of bytes dequeued or < 0 on error. + */ +ssize_t vmci_qpair_dequev(struct vmci_qp *qpair, + void *iov, + size_t iov_size, + int buf_type) +{ + ssize_t result; + + if (!qpair || !iov) + return VMCI_ERROR_INVALID_ARGS; + + qp_lock(qpair); + + do { + result = qp_dequeue_locked(qpair->produce_q, + qpair->consume_q, + qpair->consume_q_size, + iov, iov_size, + qp_memcpy_from_queue_iov, + true, vmci_can_block(qpair->flags)); + + if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && + !qp_wait_for_ready_queue(qpair)) + result = VMCI_ERROR_WOULD_BLOCK; + + } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY); + + qp_unlock(qpair); + + return result; +} +EXPORT_SYMBOL_GPL(vmci_qpair_dequev); + +/* + * vmci_qpair_peekv() - Peek at the data in the queue using iov. + * @qpair: Pointer to the queue pair struct. + * @iov: Pointer to buffer for the data + * @iov_size: Length of buffer. + * @buf_type: Buffer type (Unused on Linux). + * + * This is the client interface for peeking into a queue. (I.e., + * copy data from the queue without updating the head pointer.) + * This function uses IO vectors to handle the work. Returns number + * of bytes peeked or < 0 on error. + */ +ssize_t vmci_qpair_peekv(struct vmci_qp *qpair, + void *iov, + size_t iov_size, + int buf_type) +{ + ssize_t result; + + if (!qpair || !iov) + return VMCI_ERROR_INVALID_ARGS; + + qp_lock(qpair); + + do { + result = qp_dequeue_locked(qpair->produce_q, + qpair->consume_q, + qpair->consume_q_size, + iov, iov_size, + qp_memcpy_from_queue_iov, + false, vmci_can_block(qpair->flags)); + + if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && + !qp_wait_for_ready_queue(qpair)) + result = VMCI_ERROR_WOULD_BLOCK; + + } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY); + + qp_unlock(qpair); + return result; +} +EXPORT_SYMBOL_GPL(vmci_qpair_peekv); diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.h b/drivers/misc/vmw_vmci/vmci_queue_pair.h new file mode 100644 index 000000000000..58c6959f6b6d --- /dev/null +++ b/drivers/misc/vmw_vmci/vmci_queue_pair.h @@ -0,0 +1,191 @@ +/* + * VMware VMCI Driver + * + * Copyright (C) 2012 VMware, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation version 2 and no later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#ifndef _VMCI_QUEUE_PAIR_H_ +#define _VMCI_QUEUE_PAIR_H_ + +#include <linux/vmw_vmci_defs.h> +#include <linux/types.h> + +#include "vmci_context.h" + +/* Callback needed for correctly waiting on events. */ +typedef int (*vmci_event_release_cb) (void *client_data); + +/* Guest device port I/O. */ +struct ppn_set { + u64 num_produce_pages; + u64 num_consume_pages; + u32 *produce_ppns; + u32 *consume_ppns; + bool initialized; +}; + +/* VMCIqueue_pairAllocInfo */ +struct vmci_qp_alloc_info { + struct vmci_handle handle; + u32 peer; + u32 flags; + u64 produce_size; + u64 consume_size; + u64 ppn_va; /* Start VA of queue pair PPNs. */ + u64 num_ppns; + s32 result; + u32 version; +}; + +/* VMCIqueue_pairSetVAInfo */ +struct vmci_qp_set_va_info { + struct vmci_handle handle; + u64 va; /* Start VA of queue pair PPNs. */ + u64 num_ppns; + u32 version; + s32 result; +}; + +/* + * For backwards compatibility, here is a version of the + * VMCIqueue_pairPageFileInfo before host support end-points was added. + * Note that the current version of that structure requires VMX to + * pass down the VA of the mapped file. Before host support was added + * there was nothing of the sort. So, when the driver sees the ioctl + * with a parameter that is the sizeof + * VMCIqueue_pairPageFileInfo_NoHostQP then it can infer that the version + * of VMX running can't attach to host end points because it doesn't + * provide the VA of the mapped files. + * + * The Linux driver doesn't get an indication of the size of the + * structure passed down from user space. So, to fix a long standing + * but unfiled bug, the _pad field has been renamed to version. + * Existing versions of VMX always initialize the PageFileInfo + * structure so that _pad, er, version is set to 0. + * + * A version value of 1 indicates that the size of the structure has + * been increased to include two UVA's: produce_uva and consume_uva. + * These UVA's are of the mmap()'d queue contents backing files. + * + * In addition, if when VMX is sending down the + * VMCIqueue_pairPageFileInfo structure it gets an error then it will + * try again with the _NoHostQP version of the file to see if an older + * VMCI kernel module is running. + */ + +/* VMCIqueue_pairPageFileInfo */ +struct vmci_qp_page_file_info { + struct vmci_handle handle; + u64 produce_page_file; /* User VA. */ + u64 consume_page_file; /* User VA. */ + u64 produce_page_file_size; /* Size of the file name array. */ + u64 consume_page_file_size; /* Size of the file name array. */ + s32 result; + u32 version; /* Was _pad. */ + u64 produce_va; /* User VA of the mapped file. */ + u64 consume_va; /* User VA of the mapped file. */ +}; + +/* vmci queuepair detach info */ +struct vmci_qp_dtch_info { + struct vmci_handle handle; + s32 result; + u32 _pad; +}; + +/* + * struct vmci_qp_page_store describes how the memory of a given queue pair + * is backed. When the queue pair is between the host and a guest, the + * page store consists of references to the guest pages. On vmkernel, + * this is a list of PPNs, and on hosted, it is a user VA where the + * queue pair is mapped into the VMX address space. + */ +struct vmci_qp_page_store { + /* Reference to pages backing the queue pair. */ + u64 pages; + /* Length of pageList/virtual addres range (in pages). */ + u32 len; +}; + +/* + * This data type contains the information about a queue. + * There are two queues (hence, queue pairs) per transaction model between a + * pair of end points, A & B. One queue is used by end point A to transmit + * commands and responses to B. The other queue is used by B to transmit + * commands and responses. + * + * struct vmci_queue_kern_if is a per-OS defined Queue structure. It contains + * either a direct pointer to the linear address of the buffer contents or a + * pointer to structures which help the OS locate those data pages. See + * vmciKernelIf.c for each platform for its definition. + */ +struct vmci_queue { + struct vmci_queue_header *q_header; + struct vmci_queue_header *saved_header; + struct vmci_queue_kern_if *kernel_if; +}; + +/* + * Utility function that checks whether the fields of the page + * store contain valid values. + * Result: + * true if the page store is wellformed. false otherwise. + */ +static inline bool +VMCI_QP_PAGESTORE_IS_WELLFORMED(struct vmci_qp_page_store *page_store) +{ + return page_store->len >= 2; +} + +/* + * Helper function to check if the non-blocking flag + * is set for a given queue pair. + */ +static inline bool vmci_can_block(u32 flags) +{ + return !(flags & VMCI_QPFLAG_NONBLOCK); +} + +/* + * Helper function to check if the queue pair is pinned + * into memory. + */ +static inline bool vmci_qp_pinned(u32 flags) +{ + return flags & VMCI_QPFLAG_PINNED; +} + +void vmci_qp_broker_exit(void); +int vmci_qp_broker_alloc(struct vmci_handle handle, u32 peer, + u32 flags, u32 priv_flags, + u64 produce_size, u64 consume_size, + struct vmci_qp_page_store *page_store, + struct vmci_ctx *context); +int vmci_qp_broker_set_page_store(struct vmci_handle handle, + u64 produce_uva, u64 consume_uva, + struct vmci_ctx *context); +int vmci_qp_broker_detach(struct vmci_handle handle, struct vmci_ctx *context); + +void vmci_qp_guest_endpoints_exit(void); + +int vmci_qp_alloc(struct vmci_handle *handle, + struct vmci_queue **produce_q, u64 produce_size, + struct vmci_queue **consume_q, u64 consume_size, + u32 peer, u32 flags, u32 priv_flags, + bool guest_endpoint, vmci_event_release_cb wakeup_cb, + void *client_data); +int vmci_qp_broker_map(struct vmci_handle handle, + struct vmci_ctx *context, u64 guest_mem); +int vmci_qp_broker_unmap(struct vmci_handle handle, + struct vmci_ctx *context, u32 gid); + +#endif /* _VMCI_QUEUE_PAIR_H_ */ diff --git a/drivers/misc/vmw_vmci/vmci_resource.c b/drivers/misc/vmw_vmci/vmci_resource.c new file mode 100644 index 000000000000..a196f84a4fd2 --- /dev/null +++ b/drivers/misc/vmw_vmci/vmci_resource.c @@ -0,0 +1,229 @@ +/* + * VMware VMCI Driver + * + * Copyright (C) 2012 VMware, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation version 2 and no later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include <linux/vmw_vmci_defs.h> +#include <linux/hash.h> +#include <linux/types.h> +#include <linux/rculist.h> + +#include "vmci_resource.h" +#include "vmci_driver.h" + + +#define VMCI_RESOURCE_HASH_BITS 7 +#define VMCI_RESOURCE_HASH_BUCKETS (1 << VMCI_RESOURCE_HASH_BITS) + +struct vmci_hash_table { + spinlock_t lock; + struct hlist_head entries[VMCI_RESOURCE_HASH_BUCKETS]; +}; + +static struct vmci_hash_table vmci_resource_table = { + .lock = __SPIN_LOCK_UNLOCKED(vmci_resource_table.lock), +}; + +static unsigned int vmci_resource_hash(struct vmci_handle handle) +{ + return hash_32(handle.resource, VMCI_RESOURCE_HASH_BITS); +} + +/* + * Gets a resource (if one exists) matching given handle from the hash table. + */ +static struct vmci_resource *vmci_resource_lookup(struct vmci_handle handle, + enum vmci_resource_type type) +{ + struct vmci_resource *r, *resource = NULL; + struct hlist_node *node; + unsigned int idx = vmci_resource_hash(handle); + + rcu_read_lock(); + hlist_for_each_entry_rcu(r, node, + &vmci_resource_table.entries[idx], node) { + u32 cid = r->handle.context; + u32 rid = r->handle.resource; + + if (r->type == type && + rid == handle.resource && + (cid == handle.context || cid == VMCI_INVALID_ID)) { + resource = r; + break; + } + } + rcu_read_unlock(); + + return resource; +} + +/* + * Find an unused resource ID and return it. The first + * VMCI_RESERVED_RESOURCE_ID_MAX are reserved so we start from + * its value + 1. + * Returns VMCI resource id on success, VMCI_INVALID_ID on failure. + */ +static u32 vmci_resource_find_id(u32 context_id, + enum vmci_resource_type resource_type) +{ + static u32 resource_id = VMCI_RESERVED_RESOURCE_ID_MAX + 1; + u32 old_rid = resource_id; + u32 current_rid; + + /* + * Generate a unique resource ID. Keep on trying until we wrap around + * in the RID space. + */ + do { + struct vmci_handle handle; + + current_rid = resource_id; + resource_id++; + if (unlikely(resource_id == VMCI_INVALID_ID)) { + /* Skip the reserved rids. */ + resource_id = VMCI_RESERVED_RESOURCE_ID_MAX + 1; + } + + handle = vmci_make_handle(context_id, current_rid); + if (!vmci_resource_lookup(handle, resource_type)) + return current_rid; + } while (resource_id != old_rid); + + return VMCI_INVALID_ID; +} + + +int vmci_resource_add(struct vmci_resource *resource, + enum vmci_resource_type resource_type, + struct vmci_handle handle) + +{ + unsigned int idx; + int result; + + spin_lock(&vmci_resource_table.lock); + + if (handle.resource == VMCI_INVALID_ID) { + handle.resource = vmci_resource_find_id(handle.context, + resource_type); + if (handle.resource == VMCI_INVALID_ID) { + result = VMCI_ERROR_NO_HANDLE; + goto out; + } + } else if (vmci_resource_lookup(handle, resource_type)) { + result = VMCI_ERROR_ALREADY_EXISTS; + goto out; + } + + resource->handle = handle; + resource->type = resource_type; + INIT_HLIST_NODE(&resource->node); + kref_init(&resource->kref); + init_completion(&resource->done); + + idx = vmci_resource_hash(resource->handle); + hlist_add_head_rcu(&resource->node, &vmci_resource_table.entries[idx]); + + result = VMCI_SUCCESS; + +out: + spin_unlock(&vmci_resource_table.lock); + return result; +} + +void vmci_resource_remove(struct vmci_resource *resource) +{ + struct vmci_handle handle = resource->handle; + unsigned int idx = vmci_resource_hash(handle); + struct vmci_resource *r; + struct hlist_node *node; + + /* Remove resource from hash table. */ + spin_lock(&vmci_resource_table.lock); + + hlist_for_each_entry(r, node, &vmci_resource_table.entries[idx], node) { + if (vmci_handle_is_equal(r->handle, resource->handle)) { + hlist_del_init_rcu(&r->node); + break; + } + } + + spin_unlock(&vmci_resource_table.lock); + synchronize_rcu(); + + vmci_resource_put(resource); + wait_for_completion(&resource->done); +} + +struct vmci_resource * +vmci_resource_by_handle(struct vmci_handle resource_handle, + enum vmci_resource_type resource_type) +{ + struct vmci_resource *r, *resource = NULL; + + rcu_read_lock(); + + r = vmci_resource_lookup(resource_handle, resource_type); + if (r && + (resource_type == r->type || + resource_type == VMCI_RESOURCE_TYPE_ANY)) { + resource = vmci_resource_get(r); + } + + rcu_read_unlock(); + + return resource; +} + +/* + * Get a reference to given resource. + */ +struct vmci_resource *vmci_resource_get(struct vmci_resource *resource) +{ + kref_get(&resource->kref); + + return resource; +} + +static void vmci_release_resource(struct kref *kref) +{ + struct vmci_resource *resource = + container_of(kref, struct vmci_resource, kref); + + /* Verify the resource has been unlinked from hash table */ + WARN_ON(!hlist_unhashed(&resource->node)); + + /* Signal that container of this resource can now be destroyed */ + complete(&resource->done); +} + +/* + * Resource's release function will get called if last reference. + * If it is the last reference, then we are sure that nobody else + * can increment the count again (it's gone from the resource hash + * table), so there's no need for locking here. + */ +int vmci_resource_put(struct vmci_resource *resource) +{ + /* + * We propagate the information back to caller in case it wants to know + * whether entry was freed. + */ + return kref_put(&resource->kref, vmci_release_resource) ? + VMCI_SUCCESS_ENTRY_DEAD : VMCI_SUCCESS; +} + +struct vmci_handle vmci_resource_handle(struct vmci_resource *resource) +{ + return resource->handle; +} diff --git a/drivers/misc/vmw_vmci/vmci_resource.h b/drivers/misc/vmw_vmci/vmci_resource.h new file mode 100644 index 000000000000..9190cd298bee --- /dev/null +++ b/drivers/misc/vmw_vmci/vmci_resource.h @@ -0,0 +1,59 @@ +/* + * VMware VMCI Driver + * + * Copyright (C) 2012 VMware, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation version 2 and no later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#ifndef _VMCI_RESOURCE_H_ +#define _VMCI_RESOURCE_H_ + +#include <linux/vmw_vmci_defs.h> +#include <linux/types.h> + +#include "vmci_context.h" + + +enum vmci_resource_type { + VMCI_RESOURCE_TYPE_ANY, + VMCI_RESOURCE_TYPE_API, + VMCI_RESOURCE_TYPE_GROUP, + VMCI_RESOURCE_TYPE_DATAGRAM, + VMCI_RESOURCE_TYPE_DOORBELL, + VMCI_RESOURCE_TYPE_QPAIR_GUEST, + VMCI_RESOURCE_TYPE_QPAIR_HOST +}; + +struct vmci_resource { + struct vmci_handle handle; + enum vmci_resource_type type; + struct hlist_node node; + struct kref kref; + struct completion done; +}; + + +int vmci_resource_add(struct vmci_resource *resource, + enum vmci_resource_type resource_type, + struct vmci_handle handle); + +void vmci_resource_remove(struct vmci_resource *resource); + +struct vmci_resource * +vmci_resource_by_handle(struct vmci_handle resource_handle, + enum vmci_resource_type resource_type); + +struct vmci_resource *vmci_resource_get(struct vmci_resource *resource); +int vmci_resource_put(struct vmci_resource *resource); + +struct vmci_handle vmci_resource_handle(struct vmci_resource *resource); + +#endif /* _VMCI_RESOURCE_H_ */ diff --git a/drivers/misc/vmw_vmci/vmci_route.c b/drivers/misc/vmw_vmci/vmci_route.c new file mode 100644 index 000000000000..91090658b929 --- /dev/null +++ b/drivers/misc/vmw_vmci/vmci_route.c @@ -0,0 +1,226 @@ +/* + * VMware VMCI Driver + * + * Copyright (C) 2012 VMware, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation version 2 and no later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include <linux/vmw_vmci_defs.h> +#include <linux/vmw_vmci_api.h> + +#include "vmci_context.h" +#include "vmci_driver.h" +#include "vmci_route.h" + +/* + * Make a routing decision for the given source and destination handles. + * This will try to determine the route using the handles and the available + * devices. Will set the source context if it is invalid. + */ +int vmci_route(struct vmci_handle *src, + const struct vmci_handle *dst, + bool from_guest, + enum vmci_route *route) +{ + bool has_host_device = vmci_host_code_active(); + bool has_guest_device = vmci_guest_code_active(); + + *route = VMCI_ROUTE_NONE; + + /* + * "from_guest" is only ever set to true by + * IOCTL_VMCI_DATAGRAM_SEND (or by the vmkernel equivalent), + * which comes from the VMX, so we know it is coming from a + * guest. + * + * To avoid inconsistencies, test these once. We will test + * them again when we do the actual send to ensure that we do + * not touch a non-existent device. + */ + + /* Must have a valid destination context. */ + if (VMCI_INVALID_ID == dst->context) + return VMCI_ERROR_INVALID_ARGS; + + /* Anywhere to hypervisor. */ + if (VMCI_HYPERVISOR_CONTEXT_ID == dst->context) { + + /* + * If this message already came from a guest then we + * cannot send it to the hypervisor. It must come + * from a local client. + */ + if (from_guest) + return VMCI_ERROR_DST_UNREACHABLE; + + /* + * We must be acting as a guest in order to send to + * the hypervisor. + */ + if (!has_guest_device) + return VMCI_ERROR_DEVICE_NOT_FOUND; + + /* And we cannot send if the source is the host context. */ + if (VMCI_HOST_CONTEXT_ID == src->context) + return VMCI_ERROR_INVALID_ARGS; + + /* + * If the client passed the ANON source handle then + * respect it (both context and resource are invalid). + * However, if they passed only an invalid context, + * then they probably mean ANY, in which case we + * should set the real context here before passing it + * down. + */ + if (VMCI_INVALID_ID == src->context && + VMCI_INVALID_ID != src->resource) + src->context = vmci_get_context_id(); + + /* Send from local client down to the hypervisor. */ + *route = VMCI_ROUTE_AS_GUEST; + return VMCI_SUCCESS; + } + + /* Anywhere to local client on host. */ + if (VMCI_HOST_CONTEXT_ID == dst->context) { + /* + * If it is not from a guest but we are acting as a + * guest, then we need to send it down to the host. + * Note that if we are also acting as a host then this + * will prevent us from sending from local client to + * local client, but we accept that restriction as a + * way to remove any ambiguity from the host context. + */ + if (src->context == VMCI_HYPERVISOR_CONTEXT_ID) { + /* + * If the hypervisor is the source, this is + * host local communication. The hypervisor + * may send vmci event datagrams to the host + * itself, but it will never send datagrams to + * an "outer host" through the guest device. + */ + + if (has_host_device) { + *route = VMCI_ROUTE_AS_HOST; + return VMCI_SUCCESS; + } else { + return VMCI_ERROR_DEVICE_NOT_FOUND; + } + } + + if (!from_guest && has_guest_device) { + /* If no source context then use the current. */ + if (VMCI_INVALID_ID == src->context) + src->context = vmci_get_context_id(); + + /* Send it from local client down to the host. */ + *route = VMCI_ROUTE_AS_GUEST; + return VMCI_SUCCESS; + } + + /* + * Otherwise we already received it from a guest and + * it is destined for a local client on this host, or + * it is from another local client on this host. We + * must be acting as a host to service it. + */ + if (!has_host_device) + return VMCI_ERROR_DEVICE_NOT_FOUND; + + if (VMCI_INVALID_ID == src->context) { + /* + * If it came from a guest then it must have a + * valid context. Otherwise we can use the + * host context. + */ + if (from_guest) + return VMCI_ERROR_INVALID_ARGS; + + src->context = VMCI_HOST_CONTEXT_ID; + } + + /* Route to local client. */ + *route = VMCI_ROUTE_AS_HOST; + return VMCI_SUCCESS; + } + + /* + * If we are acting as a host then this might be destined for + * a guest. + */ + if (has_host_device) { + /* It will have a context if it is meant for a guest. */ + if (vmci_ctx_exists(dst->context)) { + if (VMCI_INVALID_ID == src->context) { + /* + * If it came from a guest then it + * must have a valid context. + * Otherwise we can use the host + * context. + */ + + if (from_guest) + return VMCI_ERROR_INVALID_ARGS; + + src->context = VMCI_HOST_CONTEXT_ID; + } else if (VMCI_CONTEXT_IS_VM(src->context) && + src->context != dst->context) { + /* + * VM to VM communication is not + * allowed. Since we catch all + * communication destined for the host + * above, this must be destined for a + * VM since there is a valid context. + */ + + return VMCI_ERROR_DST_UNREACHABLE; + } + + /* Pass it up to the guest. */ + *route = VMCI_ROUTE_AS_HOST; + return VMCI_SUCCESS; + } else if (!has_guest_device) { + /* + * The host is attempting to reach a CID + * without an active context, and we can't + * send it down, since we have no guest + * device. + */ + + return VMCI_ERROR_DST_UNREACHABLE; + } + } + + /* + * We must be a guest trying to send to another guest, which means + * we need to send it down to the host. We do not filter out VM to + * VM communication here, since we want to be able to use the guest + * driver on older versions that do support VM to VM communication. + */ + if (!has_guest_device) { + /* + * Ending up here means we have neither guest nor host + * device. + */ + return VMCI_ERROR_DEVICE_NOT_FOUND; + } + + /* If no source context then use the current context. */ + if (VMCI_INVALID_ID == src->context) + src->context = vmci_get_context_id(); + + /* + * Send it from local client down to the host, which will + * route it to the other guest for us. + */ + *route = VMCI_ROUTE_AS_GUEST; + return VMCI_SUCCESS; +} diff --git a/drivers/misc/vmw_vmci/vmci_route.h b/drivers/misc/vmw_vmci/vmci_route.h new file mode 100644 index 000000000000..3b30e82419c3 --- /dev/null +++ b/drivers/misc/vmw_vmci/vmci_route.h @@ -0,0 +1,30 @@ +/* + * VMware VMCI Driver + * + * Copyright (C) 2012 VMware, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation version 2 and no later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#ifndef _VMCI_ROUTE_H_ +#define _VMCI_ROUTE_H_ + +#include <linux/vmw_vmci_defs.h> + +enum vmci_route { + VMCI_ROUTE_NONE, + VMCI_ROUTE_AS_HOST, + VMCI_ROUTE_AS_GUEST, +}; + +int vmci_route(struct vmci_handle *src, const struct vmci_handle *dst, + bool from_guest, enum vmci_route *route); + +#endif /* _VMCI_ROUTE_H_ */ |